summaryrefslogtreecommitdiff
path: root/plat/nvidia/tegra/soc/t186/drivers/mce/nvg.c
diff options
context:
space:
mode:
Diffstat (limited to 'plat/nvidia/tegra/soc/t186/drivers/mce/nvg.c')
-rw-r--r--plat/nvidia/tegra/soc/t186/drivers/mce/nvg.c221
1 files changed, 129 insertions, 92 deletions
diff --git a/plat/nvidia/tegra/soc/t186/drivers/mce/nvg.c b/plat/nvidia/tegra/soc/t186/drivers/mce/nvg.c
index 0be1af1a..9c115bdd 100644
--- a/plat/nvidia/tegra/soc/t186/drivers/mce/nvg.c
+++ b/plat/nvidia/tegra/soc/t186/drivers/mce/nvg.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -13,57 +13,63 @@
#include <sys/errno.h>
#include <t18x_ari.h>
-extern void nvg_set_request_data(uint64_t req, uint64_t data);
-extern void nvg_set_request(uint64_t req);
-extern uint64_t nvg_get_result(void);
-
-int nvg_enter_cstate(uint32_t ari_base, uint32_t state, uint32_t wake_time)
+int32_t nvg_enter_cstate(uint32_t ari_base, uint32_t state, uint32_t wake_time)
{
+ int32_t ret = 0;
+
+ (void)ari_base;
+
/* check for allowed power state */
- if (state != TEGRA_ARI_CORE_C0 && state != TEGRA_ARI_CORE_C1 &&
- state != TEGRA_ARI_CORE_C6 && state != TEGRA_ARI_CORE_C7) {
+ if ((state != TEGRA_ARI_CORE_C0) && (state != TEGRA_ARI_CORE_C1) &&
+ (state != TEGRA_ARI_CORE_C6) && (state != TEGRA_ARI_CORE_C7)) {
ERROR("%s: unknown cstate (%d)\n", __func__, state);
- return EINVAL;
- }
-
- /* time (TSC ticks) until the core is expected to get a wake event */
- nvg_set_request_data(TEGRA_NVG_CHANNEL_WAKE_TIME, wake_time);
+ ret = EINVAL;
+ } else {
+ /* time (TSC ticks) until the core is expected to get a wake event */
+ nvg_set_request_data(TEGRA_NVG_CHANNEL_WAKE_TIME, wake_time);
- /* set the core cstate */
- write_actlr_el1(state);
+ /* set the core cstate */
+ write_actlr_el1(state);
+ }
- return 0;
+ return ret;
}
/*
* This request allows updating of CLUSTER_CSTATE, CCPLEX_CSTATE and
* SYSTEM_CSTATE values.
*/
-int nvg_update_cstate_info(uint32_t ari_base, uint32_t cluster, uint32_t ccplex,
+int32_t nvg_update_cstate_info(uint32_t ari_base, uint32_t cluster, uint32_t ccplex,
uint32_t system, uint8_t sys_state_force, uint32_t wake_mask,
uint8_t update_wake_mask)
{
- uint64_t val = 0;
+ uint64_t val = 0ULL;
+
+ (void)ari_base;
/* update CLUSTER_CSTATE? */
- if (cluster)
- val |= (cluster & CLUSTER_CSTATE_MASK) |
+ if (cluster != 0U) {
+ val |= ((uint64_t)cluster & CLUSTER_CSTATE_MASK) |
CLUSTER_CSTATE_UPDATE_BIT;
+ }
/* update CCPLEX_CSTATE? */
- if (ccplex)
- val |= (ccplex & CCPLEX_CSTATE_MASK) << CCPLEX_CSTATE_SHIFT |
+ if (ccplex != 0U) {
+ val |= (((uint64_t)ccplex & CCPLEX_CSTATE_MASK) << CCPLEX_CSTATE_SHIFT) |
CCPLEX_CSTATE_UPDATE_BIT;
+ }
/* update SYSTEM_CSTATE? */
- if (system)
- val |= ((system & SYSTEM_CSTATE_MASK) << SYSTEM_CSTATE_SHIFT) |
- ((sys_state_force << SYSTEM_CSTATE_FORCE_UPDATE_SHIFT) |
+ if (system != 0U) {
+ val |= (((uint64_t)system & SYSTEM_CSTATE_MASK) << SYSTEM_CSTATE_SHIFT) |
+ (((uint64_t)sys_state_force << SYSTEM_CSTATE_FORCE_UPDATE_SHIFT) |
SYSTEM_CSTATE_UPDATE_BIT);
+ }
/* update wake mask value? */
- if (update_wake_mask)
+ if (update_wake_mask != 0U) {
val |= CSTATE_WAKE_MASK_UPDATE_BIT;
+ }
/* set the wake mask */
val &= CSTATE_WAKE_MASK_CLEAR;
@@ -75,46 +81,60 @@ int nvg_update_cstate_info(uint32_t ari_base, uint32_t cluster, uint32_t ccplex,
return 0;
}
-int nvg_update_crossover_time(uint32_t ari_base, uint32_t type, uint32_t time)
+int32_t nvg_update_crossover_time(uint32_t ari_base, uint32_t type, uint32_t time)
{
- /* sanity check crossover type */
- if (type > TEGRA_ARI_CROSSOVER_CCP3_SC1)
- return EINVAL;
+ int32_t ret = 0;
- /*
- * The crossover threshold limit types start from
- * TEGRA_CROSSOVER_TYPE_C1_C6 to TEGRA_CROSSOVER_TYPE_CCP3_SC7. The
- * command indices for updating the threshold can be generated
- * by adding the type to the NVG_SET_THRESHOLD_CROSSOVER_C1_C6
- * command index.
- */
- nvg_set_request_data(TEGRA_NVG_CHANNEL_CROSSOVER_C1_C6 + type,
- (uint64_t)time);
+ (void)ari_base;
- return 0;
+ /* sanity check crossover type */
+ if (type > TEGRA_ARI_CROSSOVER_CCP3_SC1) {
+ ret = EINVAL;
+ } else {
+ /*
+ * The crossover threshold limit types start from
+ * TEGRA_CROSSOVER_TYPE_C1_C6 to TEGRA_CROSSOVER_TYPE_CCP3_SC7.
+ * The command indices for updating the threshold be generated
+ * by adding the type to the NVG_SET_THRESHOLD_CROSSOVER_C1_C6
+ * command index.
+ */
+ nvg_set_request_data((TEGRA_NVG_CHANNEL_CROSSOVER_C1_C6 +
+ (uint64_t)type), (uint64_t)time);
+ }
+
+ return ret;
}
uint64_t nvg_read_cstate_stats(uint32_t ari_base, uint32_t state)
{
- /* sanity check state */
- if (state == 0)
- return EINVAL;
+ uint64_t ret;
- /*
- * The cstate types start from NVG_READ_CSTATE_STATS_SC7_ENTRIES
- * to NVG_GET_LAST_CSTATE_ENTRY_A57_3. The command indices for
- * reading the threshold can be generated by adding the type to
- * the NVG_CLEAR_CSTATE_STATS command index.
- */
- nvg_set_request(TEGRA_NVG_CHANNEL_CSTATE_STATS_CLEAR + state);
+ (void)ari_base;
- return (int64_t)nvg_get_result();
+ /* sanity check state */
+ if (state == 0U) {
+ ret = EINVAL;
+ } else {
+ /*
+ * The cstate types start from NVG_READ_CSTATE_STATS_SC7_ENTRIES
+ * to NVG_GET_LAST_CSTATE_ENTRY_A57_3. The command indices for
+ * reading the threshold can be generated by adding the type to
+ * the NVG_CLEAR_CSTATE_STATS command index.
+ */
+ nvg_set_request((TEGRA_NVG_CHANNEL_CSTATE_STATS_CLEAR +
+ (uint64_t)state));
+ ret = nvg_get_result();
+ }
+
+ return ret;
}
-int nvg_write_cstate_stats(uint32_t ari_base, uint32_t state, uint32_t stats)
+int32_t nvg_write_cstate_stats(uint32_t ari_base, uint32_t state, uint32_t stats)
{
uint64_t val;
+ (void)ari_base;
+
/*
* The only difference between a CSTATE_STATS_WRITE and
* CSTATE_STATS_READ is the usage of the 63:32 in the request.
@@ -129,71 +149,88 @@ int nvg_write_cstate_stats(uint32_t ari_base, uint32_t state, uint32_t stats)
* reading the threshold can be generated by adding the type to
* the NVG_CLEAR_CSTATE_STATS command index.
*/
- nvg_set_request_data(TEGRA_NVG_CHANNEL_CSTATE_STATS_CLEAR + state, val);
+ nvg_set_request_data((TEGRA_NVG_CHANNEL_CSTATE_STATS_CLEAR +
+ (uint64_t)state), val);
return 0;
}
-int nvg_is_ccx_allowed(uint32_t ari_base, uint32_t state, uint32_t wake_time)
+int32_t nvg_is_ccx_allowed(uint32_t ari_base, uint32_t state, uint32_t wake_time)
{
+ (void)ari_base;
+ (void)state;
+ (void)wake_time;
+
/* This does not apply to the Denver cluster */
return 0;
}
-int nvg_is_sc7_allowed(uint32_t ari_base, uint32_t state, uint32_t wake_time)
+int32_t nvg_is_sc7_allowed(uint32_t ari_base, uint32_t state, uint32_t wake_time)
{
uint64_t val;
+ int32_t ret;
+
+ (void)ari_base;
/* check for allowed power state */
- if (state != TEGRA_ARI_CORE_C0 && state != TEGRA_ARI_CORE_C1 &&
- state != TEGRA_ARI_CORE_C6 && state != TEGRA_ARI_CORE_C7) {
+ if ((state != TEGRA_ARI_CORE_C0) && (state != TEGRA_ARI_CORE_C1) &&
+ (state != TEGRA_ARI_CORE_C6) && (state != TEGRA_ARI_CORE_C7)) {
ERROR("%s: unknown cstate (%d)\n", __func__, state);
- return EINVAL;
+ ret = EINVAL;
+ } else {
+ /*
+ * Request format -
+ * 63:32 = wake time
+ * 31:0 = C-state for this core
+ */
+ val = ((uint64_t)wake_time << MCE_SC7_WAKE_TIME_SHIFT) |
+ ((uint64_t)state & MCE_SC7_ALLOWED_MASK);
+
+ /* issue command to check if SC7 is allowed */
+ nvg_set_request_data(TEGRA_NVG_CHANNEL_IS_SC7_ALLOWED, val);
+
+ /* 1 = SC7 allowed, 0 = SC7 not allowed */
+ ret = (nvg_get_result() != 0ULL) ? 1 : 0;
}
- /*
- * Request format -
- * 63:32 = wake time
- * 31:0 = C-state for this core
- */
- val = ((uint64_t)wake_time << MCE_SC7_WAKE_TIME_SHIFT) |
- (state & MCE_SC7_ALLOWED_MASK);
-
- /* issue command to check if SC7 is allowed */
- nvg_set_request_data(TEGRA_NVG_CHANNEL_IS_SC7_ALLOWED, val);
-
- /* 1 = SC7 allowed, 0 = SC7 not allowed */
- return !!nvg_get_result();
+ return ret;
}
-int nvg_online_core(uint32_t ari_base, uint32_t core)
+int32_t nvg_online_core(uint32_t ari_base, uint32_t core)
{
- int cpu = read_mpidr() & MPIDR_CPU_MASK;
- int impl = (read_midr() >> MIDR_IMPL_SHIFT) & MIDR_IMPL_MASK;
+ uint64_t cpu = read_mpidr() & (uint64_t)MPIDR_CPU_MASK;
+ uint64_t impl = (read_midr() >> (uint64_t)MIDR_IMPL_SHIFT) &
+ (uint64_t)MIDR_IMPL_MASK;
+ int32_t ret = 0;
+
+ (void)ari_base;
/* sanity check code id */
- if ((core >= MCE_CORE_ID_MAX) || (cpu == core)) {
+ if ((core >= (uint32_t)MCE_CORE_ID_MAX) || (cpu == core)) {
ERROR("%s: unsupported core id (%d)\n", __func__, core);
- return EINVAL;
+ ret = EINVAL;
+ } else {
+ /*
+ * The Denver cluster has 2 CPUs only - 0, 1.
+ */
+ if ((impl == DENVER_IMPL) && ((core == 2U) || (core == 3U))) {
+ ERROR("%s: unknown core id (%d)\n", __func__, core);
+ ret = EINVAL;
+ } else {
+ /* get a core online */
+ nvg_set_request_data(TEGRA_NVG_CHANNEL_ONLINE_CORE,
+ ((uint64_t)core & MCE_CORE_ID_MASK));
+ }
}
- /*
- * The Denver cluster has 2 CPUs only - 0, 1.
- */
- if (impl == DENVER_IMPL && ((core == 2) || (core == 3))) {
- ERROR("%s: unknown core id (%d)\n", __func__, core);
- return EINVAL;
- }
-
- /* get a core online */
- nvg_set_request_data(TEGRA_NVG_CHANNEL_ONLINE_CORE, core & MCE_CORE_ID_MASK);
-
- return 0;
+ return ret;
}
-int nvg_cc3_ctrl(uint32_t ari_base, uint32_t freq, uint32_t volt, uint8_t enable)
+int32_t nvg_cc3_ctrl(uint32_t ari_base, uint32_t freq, uint32_t volt, uint8_t enable)
{
- int val;
+ uint32_t val;
+
+ (void)ari_base;
/*
* If the enable bit is cleared, Auto-CC3 will be disabled by setting
@@ -207,9 +244,9 @@ int nvg_cc3_ctrl(uint32_t ari_base, uint32_t freq, uint32_t volt, uint8_t enable
*/
val = (((freq & MCE_AUTO_CC3_FREQ_MASK) << MCE_AUTO_CC3_FREQ_SHIFT) |\
((volt & MCE_AUTO_CC3_VTG_MASK) << MCE_AUTO_CC3_VTG_SHIFT) |\
- (enable ? MCE_AUTO_CC3_ENABLE_BIT : 0));
+ ((enable != 0U) ? MCE_AUTO_CC3_ENABLE_BIT : 0U));
- nvg_set_request_data(TEGRA_NVG_CHANNEL_CC3_CTRL, val);
+ nvg_set_request_data(TEGRA_NVG_CHANNEL_CC3_CTRL, (uint64_t)val);
return 0;
}