summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--bl31/aarch64/bl31_arch_setup.c51
-rw-r--r--bl31/aarch64/bl31_entrypoint.S62
-rw-r--r--bl31/bl31.mk1
-rw-r--r--bl31/bl31_main.c11
-rw-r--r--docs/rt-svc-writers-guide.md11
-rw-r--r--include/bl31/bl31.h4
-rw-r--r--include/lib/aarch64/smcc_helpers.h86
-rw-r--r--include/lib/psci/psci.h32
-rw-r--r--include/lib/smcc.h (renamed from include/common/smcc_helpers.h)55
-rw-r--r--include/services/std_svc.h6
-rw-r--r--lib/psci/aarch64/psci_entry.S111
-rw-r--r--lib/psci/aarch64/psci_helpers.S26
-rw-r--r--lib/psci/psci_common.c2
-rw-r--r--lib/psci/psci_lib.mk29
-rw-r--r--lib/psci/psci_main.c51
-rw-r--r--lib/psci/psci_on.c4
-rw-r--r--lib/psci/psci_private.h1
-rw-r--r--lib/psci/psci_setup.c39
-rw-r--r--lib/psci/psci_suspend.c1
-rw-r--r--services/std_svc/std_svc_setup.c18
20 files changed, 291 insertions, 310 deletions
diff --git a/bl31/aarch64/bl31_arch_setup.c b/bl31/aarch64/bl31_arch_setup.c
deleted file mode 100644
index 3deacbae..00000000
--- a/bl31/aarch64/bl31_arch_setup.c
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <arch.h>
-#include <arch_helpers.h>
-#include <assert.h>
-#include <bl_common.h>
-#include <bl31.h>
-#include <cpu_data.h>
-#include <platform.h>
-
-/*******************************************************************************
- * This duplicates what the primary cpu did after a cold boot in BL1. The same
- * needs to be done when a cpu is hotplugged in. This function could also over-
- * ride any EL3 setup done by BL1 as this code resides in rw memory.
- ******************************************************************************/
-void bl31_arch_setup(void)
-{
- /* Program the counter frequency */
- write_cntfrq_el0(plat_get_syscnt_freq2());
-
- /* Initialize the cpu_ops pointer. */
- init_cpu_ops();
-}
diff --git a/bl31/aarch64/bl31_entrypoint.S b/bl31/aarch64/bl31_entrypoint.S
index 1c8eed9d..4c3a515a 100644
--- a/bl31/aarch64/bl31_entrypoint.S
+++ b/bl31/aarch64/bl31_entrypoint.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -31,9 +31,10 @@
#include <arch.h>
#include <bl_common.h>
#include <el3_common_macros.S>
+#include <xlat_tables.h>
.globl bl31_entrypoint
-
+ .globl bl31_warm_entrypoint
/* -----------------------------------------------------
* bl31_entrypoint() is the cold boot entrypoint,
@@ -132,3 +133,60 @@ func bl31_entrypoint
b el3_exit
endfunc bl31_entrypoint
+
+ /* --------------------------------------------------------------------
+ * This CPU has been physically powered up. It is either resuming from
+ * suspend or has simply been turned on. In both cases, call the BL31
+ * warmboot entrypoint
+ * --------------------------------------------------------------------
+ */
+func bl31_warm_entrypoint
+ /*
+ * On the warm boot path, most of the EL3 initialisations performed by
+ * 'el3_entrypoint_common' must be skipped:
+ *
+ * - Only when the platform bypasses the BL1/BL31 entrypoint by
+ * programming the reset address do we need to set the CPU endianness.
+ * In other cases, we assume this has been taken care by the
+ * entrypoint code.
+ *
+ * - No need to determine the type of boot, we know it is a warm boot.
+ *
+ * - Do not try to distinguish between primary and secondary CPUs, this
+ * notion only exists for a cold boot.
+ *
+ * - No need to initialise the memory or the C runtime environment,
+ * it has been done once and for all on the cold boot path.
+ */
+ el3_entrypoint_common \
+ _set_endian=PROGRAMMABLE_RESET_ADDRESS \
+ _warm_boot_mailbox=0 \
+ _secondary_cold_boot=0 \
+ _init_memory=0 \
+ _init_c_runtime=0 \
+ _exception_vectors=runtime_exceptions
+
+ /* --------------------------------------------
+ * Enable the MMU with the DCache disabled. It
+ * is safe to use stacks allocated in normal
+ * memory as a result. All memory accesses are
+ * marked nGnRnE when the MMU is disabled. So
+ * all the stack writes will make it to memory.
+ * All memory accesses are marked Non-cacheable
+ * when the MMU is enabled but D$ is disabled.
+ * So used stack memory is guaranteed to be
+ * visible immediately after the MMU is enabled
+ * Enabling the DCache at the same time as the
+ * MMU can lead to speculatively fetched and
+ * possibly stale stack memory being read from
+ * other caches. This can lead to coherency
+ * issues.
+ * --------------------------------------------
+ */
+ mov x0, #DISABLE_DCACHE
+ bl bl31_plat_enable_mmu
+
+ bl psci_warmboot_entrypoint
+
+ b el3_exit
+endfunc bl31_warm_entrypoint
diff --git a/bl31/bl31.mk b/bl31/bl31.mk
index dd3e4cf1..4de511b6 100644
--- a/bl31/bl31.mk
+++ b/bl31/bl31.mk
@@ -32,7 +32,6 @@ include lib/psci/psci_lib.mk
BL31_SOURCES += bl31/bl31_main.c \
bl31/interrupt_mgmt.c \
- bl31/aarch64/bl31_arch_setup.c \
bl31/aarch64/bl31_entrypoint.S \
bl31/aarch64/runtime_exceptions.S \
bl31/aarch64/crash_reporting.S \
diff --git a/bl31/bl31_main.c b/bl31/bl31_main.c
index 7f04d218..f95ef41a 100644
--- a/bl31/bl31_main.c
+++ b/bl31/bl31_main.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -59,6 +59,12 @@ static uint32_t next_image_type = NON_SECURE;
void bl31_lib_init(void)
{
cm_init();
+
+ /*
+ * Initialize the PSCI library here. This also does EL3 architectural
+ * setup.
+ */
+ psci_setup((uintptr_t)bl31_warm_entrypoint);
}
/*******************************************************************************
@@ -74,9 +80,6 @@ void bl31_main(void)
NOTICE("BL31: %s\n", version_string);
NOTICE("BL31: %s\n", build_message);
- /* Perform remaining generic architectural setup from EL3 */
- bl31_arch_setup();
-
/* Perform platform setup in BL31 */
bl31_platform_setup();
diff --git a/docs/rt-svc-writers-guide.md b/docs/rt-svc-writers-guide.md
index 7fe71851..4b811fea 100644
--- a/docs/rt-svc-writers-guide.md
+++ b/docs/rt-svc-writers-guide.md
@@ -113,7 +113,7 @@ initialization and call handler functions.
is also used for diagnostic purposes
* `_start` and `_end` values must be based on the `OEN_*` values defined in
- [`smcc_helpers.h`]
+ [`smcc.h`]
* `_type` must be one of `SMC_TYPE_FAST` or `SMC_TYPE_STD`
@@ -252,10 +252,9 @@ The handler is responsible for:
SMC_RET3(handle, x0, x1, x2);
SMC_RET4(handle, x0, x1, x2, x3);
-The `reserved` parameter to the handler is reserved for future use and can be
-ignored. The value returned by a SMC handler is also reserved for future use -
-completion of the handler function must always be via one of the `SMC_RETn()`
-macros.
+The `cookie` parameter to the handler is reserved for future use and can be
+ignored. The `handle` is returned by the SMC handler - completion of the
+handler function must always be via one of the `SMC_RETn()` macros.
NOTE: The PSCI and Test Secure-EL1 Payload Dispatcher services do not follow
all of the above requirements yet.
@@ -303,6 +302,6 @@ _Copyright (c) 2014-2015, ARM Limited and Contributors. All rights reserved._
[`lib/psci`]: ../lib/psci
[`std_svc_setup.c`]: ../services/std_svc/std_svc_setup.c
[`runtime_svc.h`]: ../include/common/runtime_svc.h
-[`smcc_helpers.h`]: ../include/common/smcc_helpers.h
+[`smcc.h`]: ../include/lib/smcc.h
[PSCI]: http://infocenter.arm.com/help/topic/com.arm.doc.den0022c/DEN0022C_Power_State_Coordination_Interface.pdf "Power State Coordination Interface PDD (ARM DEN 0022C)"
[SMCCC]: http://infocenter.arm.com/help/topic/com.arm.doc.den0028a/index.html "SMC Calling Convention PDD (ARM DEN 0028A)"
diff --git a/include/bl31/bl31.h b/include/bl31/bl31.h
index 96867b07..8352c498 100644
--- a/include/bl31/bl31.h
+++ b/include/bl31/bl31.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -36,11 +36,11 @@
/*******************************************************************************
* Function prototypes
******************************************************************************/
-void bl31_arch_setup(void);
void bl31_next_el_arch_setup(uint32_t security_state);
void bl31_set_next_image_type(uint32_t type);
uint32_t bl31_get_next_image_type(void);
void bl31_prepare_next_image_entry(void);
void bl31_register_bl32_init(int32_t (*)(void));
+void bl31_warm_entrypoint(void);
#endif /* __BL31_H__ */
diff --git a/include/lib/aarch64/smcc_helpers.h b/include/lib/aarch64/smcc_helpers.h
new file mode 100644
index 00000000..617a5bce
--- /dev/null
+++ b/include/lib/aarch64/smcc_helpers.h
@@ -0,0 +1,86 @@
+/*
+ * Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __SMCC_HELPERS_H__
+#define __SMCC_HELPERS_H__
+
+#include <smcc.h>
+
+#ifndef __ASSEMBLY__
+#include <context.h>
+
+/* Convenience macros to return from SMC handler */
+#define SMC_RET0(_h) { \
+ return (uint64_t) (_h); \
+}
+#define SMC_RET1(_h, _x0) { \
+ write_ctx_reg(get_gpregs_ctx(_h), CTX_GPREG_X0, (_x0)); \
+ SMC_RET0(_h); \
+}
+#define SMC_RET2(_h, _x0, _x1) { \
+ write_ctx_reg(get_gpregs_ctx(_h), CTX_GPREG_X1, (_x1)); \
+ SMC_RET1(_h, (_x0)); \
+}
+#define SMC_RET3(_h, _x0, _x1, _x2) { \
+ write_ctx_reg(get_gpregs_ctx(_h), CTX_GPREG_X2, (_x2)); \
+ SMC_RET2(_h, (_x0), (_x1)); \
+}
+#define SMC_RET4(_h, _x0, _x1, _x2, _x3) { \
+ write_ctx_reg(get_gpregs_ctx(_h), CTX_GPREG_X3, (_x3)); \
+ SMC_RET3(_h, (_x0), (_x1), (_x2)); \
+}
+
+/*
+ * Convenience macros to access general purpose registers using handle provided
+ * to SMC handler. These take the offset values defined in context.h
+ */
+#define SMC_GET_GP(_h, _g) \
+ read_ctx_reg(get_gpregs_ctx(_h), (_g))
+#define SMC_SET_GP(_h, _g, _v) \
+ write_ctx_reg(get_gpregs_ctx(_h), (_g), (_v))
+
+/*
+ * Convenience macros to access EL3 context registers using handle provided to
+ * SMC handler. These take the offset values defined in context.h
+ */
+#define SMC_GET_EL3(_h, _e) \
+ read_ctx_reg(get_el3state_ctx(_h), (_e))
+#define SMC_SET_EL3(_h, _e, _v) \
+ write_ctx_reg(get_el3state_ctx(_h), (_e), (_v))
+
+/* Return a UUID in the SMC return registers */
+#define SMC_UUID_RET(_h, _uuid) \
+ SMC_RET4(handle, ((const uint32_t *) &(_uuid))[0], \
+ ((const uint32_t *) &(_uuid))[1], \
+ ((const uint32_t *) &(_uuid))[2], \
+ ((const uint32_t *) &(_uuid))[3])
+
+#endif /*__ASSEMBLY__*/
+#endif /* __SMCC_HELPERS_H__ */
diff --git a/include/lib/psci/psci.h b/include/lib/psci/psci.h
index b6d6d4ea..c3e9ef7e 100644
--- a/include/lib/psci/psci.h
+++ b/include/lib/psci/psci.h
@@ -97,6 +97,12 @@
#define PSCI_NUM_CALLS 18
#endif
+/* The macros below are used to identify PSCI calls from the SMC function ID */
+#define PSCI_FID_MASK 0xffe0u
+#define PSCI_FID_VALUE 0u
+#define is_psci_fid(_fid) \
+ (((_fid) & PSCI_FID_MASK) == PSCI_FID_VALUE)
+
/*******************************************************************************
* PSCI Migrate and friends
******************************************************************************/
@@ -326,9 +332,23 @@ int psci_migrate_info_type(void);
long psci_migrate_info_up_cpu(void);
int psci_features(unsigned int psci_fid);
void __dead2 psci_power_down_wfi(void);
-void psci_entrypoint(void);
-void psci_register_spd_pm_hook(const spd_pm_ops_t *);
-uintptr_t psci_smc_handler(uint32_t smc_fid,
+void psci_arch_setup(void);
+
+/*
+ * The below API is deprecated. This is now replaced by bl31_warmboot_entry in
+ * AArch64.
+ */
+void psci_entrypoint(void) __deprecated;
+
+/*******************************************************************************
+ * Forward declarations
+ ******************************************************************************/
+struct entry_point_info;
+
+/******************************************************************************
+ * PSCI Library Interfaces
+ *****************************************************************************/
+u_register_t psci_smc_handler(uint32_t smc_fid,
u_register_t x1,
u_register_t x2,
u_register_t x3,
@@ -336,9 +356,9 @@ uintptr_t psci_smc_handler(uint32_t smc_fid,
void *cookie,
void *handle,
u_register_t flags);
-
-/* PSCI setup function */
-int psci_setup(void);
+int psci_setup(uintptr_t mailbox_ep);
+void psci_warmboot_entrypoint(void);
+void psci_register_spd_pm_hook(const spd_pm_ops_t *pm);
#endif /*__ASSEMBLY__*/
diff --git a/include/common/smcc_helpers.h b/include/lib/smcc.h
index 6279cf23..c415ba10 100644
--- a/include/common/smcc_helpers.h
+++ b/include/lib/smcc.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -28,8 +28,8 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef __SMCC_HELPERS_H__
-#define __SMCC_HELPERS_H__
+#ifndef __SMCC_H__
+#define __SMCC_H__
/*******************************************************************************
* Bit definitions inside the function id as per the SMC calling convention
@@ -83,7 +83,6 @@
#ifndef __ASSEMBLY__
#include <cassert.h>
-#include <context.h>
#include <stdint.h>
/* Various flags passed to SMC handlers */
@@ -93,45 +92,6 @@
#define is_caller_non_secure(_f) (!!(_f & SMC_FROM_NON_SECURE))
#define is_caller_secure(_f) (!(is_caller_non_secure(_f)))
-/* Convenience macros to return from SMC handler */
-#define SMC_RET0(_h) { \
- return (uint64_t) (_h); \
-}
-#define SMC_RET1(_h, _x0) { \
- write_ctx_reg(get_gpregs_ctx(_h), CTX_GPREG_X0, (_x0)); \
- SMC_RET0(_h); \
-}
-#define SMC_RET2(_h, _x0, _x1) { \
- write_ctx_reg(get_gpregs_ctx(_h), CTX_GPREG_X1, (_x1)); \
- SMC_RET1(_h, (_x0)); \
-}
-#define SMC_RET3(_h, _x0, _x1, _x2) { \
- write_ctx_reg(get_gpregs_ctx(_h), CTX_GPREG_X2, (_x2)); \
- SMC_RET2(_h, (_x0), (_x1)); \
-}
-#define SMC_RET4(_h, _x0, _x1, _x2, _x3) { \
- write_ctx_reg(get_gpregs_ctx(_h), CTX_GPREG_X3, (_x3)); \
- SMC_RET3(_h, (_x0), (_x1), (_x2)); \
-}
-
-/*
- * Convenience macros to access general purpose registers using handle provided
- * to SMC handler. These takes the offset values defined in context.h
- */
-#define SMC_GET_GP(_h, _g) \
- read_ctx_reg(get_gpregs_ctx(_h), (_g))
-#define SMC_SET_GP(_h, _g, _v) \
- write_ctx_reg(get_gpregs_ctx(_h), (_g), (_v))
-
-/*
- * Convenience macros to access EL3 context registers using handle provided to
- * SMC handler. These takes the offset values defined in context.h
- */
-#define SMC_GET_EL3(_h, _e) \
- read_ctx_reg(get_el3state_ctx(_h), (_e))
-#define SMC_SET_EL3(_h, _e, _v) \
- write_ctx_reg(get_el3state_ctx(_h), (_e), (_v))
-
/* The macro below is used to identify a Standard Service SMC call */
#define is_std_svc_call(_fid) ((((_fid) >> FUNCID_OEN_SHIFT) & \
FUNCID_OEN_MASK) == OEN_STD_START)
@@ -154,12 +114,5 @@
{ _n0, _n1, _n2, _n3, _n4, _n5 } \
}
-/* Return a UUID in the SMC return registers */
-#define SMC_UUID_RET(_h, _uuid) \
- SMC_RET4(handle, ((const uint32_t *) &(_uuid))[0], \
- ((const uint32_t *) &(_uuid))[1], \
- ((const uint32_t *) &(_uuid))[2], \
- ((const uint32_t *) &(_uuid))[3])
-
#endif /*__ASSEMBLY__*/
-#endif /* __SMCC_HELPERS_H__ */
+#endif /* __SMCC_H__ */
diff --git a/include/services/std_svc.h b/include/services/std_svc.h
index 49d79f8a..0feb2eae 100644
--- a/include/services/std_svc.h
+++ b/include/services/std_svc.h
@@ -42,10 +42,4 @@
#define STD_SVC_VERSION_MAJOR 0x0
#define STD_SVC_VERSION_MINOR 0x1
-/* The macros below are used to identify PSCI calls from the SMC function ID */
-#define PSCI_FID_MASK 0xffe0u
-#define PSCI_FID_VALUE 0u
-#define is_psci_fid(_fid) \
- (((_fid) & PSCI_FID_MASK) == PSCI_FID_VALUE)
-
#endif /* __STD_SVC_H__ */
diff --git a/lib/psci/aarch64/psci_entry.S b/lib/psci/aarch64/psci_entry.S
deleted file mode 100644
index 646ebcf8..00000000
--- a/lib/psci/aarch64/psci_entry.S
+++ /dev/null
@@ -1,111 +0,0 @@
-/*
- * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <arch.h>
-#include <asm_macros.S>
-#include <el3_common_macros.S>
-#include <psci.h>
-#include <xlat_tables.h>
-
- .globl psci_entrypoint
- .globl psci_power_down_wfi
-
- /* --------------------------------------------------------------------
- * This CPU has been physically powered up. It is either resuming from
- * suspend or has simply been turned on. In both cases, call the power
- * on finisher.
- * --------------------------------------------------------------------
- */
-func psci_entrypoint
- /*
- * On the warm boot path, most of the EL3 initialisations performed by
- * 'el3_entrypoint_common' must be skipped:
- *
- * - Only when the platform bypasses the BL1/BL31 entrypoint by
- * programming the reset address do we need to set the CPU endianness.
- * In other cases, we assume this has been taken care by the
- * entrypoint code.
- *
- * - No need to determine the type of boot, we know it is a warm boot.
- *
- * - Do not try to distinguish between primary and secondary CPUs, this
- * notion only exists for a cold boot.
- *
- * - No need to initialise the memory or the C runtime environment,
- * it has been done once and for all on the cold boot path.
- */
- el3_entrypoint_common \
- _set_endian=PROGRAMMABLE_RESET_ADDRESS \
- _warm_boot_mailbox=0 \
- _secondary_cold_boot=0 \
- _init_memory=0 \
- _init_c_runtime=0 \
- _exception_vectors=runtime_exceptions
-
- /* --------------------------------------------
- * Enable the MMU with the DCache disabled. It
- * is safe to use stacks allocated in normal
- * memory as a result. All memory accesses are
- * marked nGnRnE when the MMU is disabled. So
- * all the stack writes will make it to memory.
- * All memory accesses are marked Non-cacheable
- * when the MMU is enabled but D$ is disabled.
- * So used stack memory is guaranteed to be
- * visible immediately after the MMU is enabled
- * Enabling the DCache at the same time as the
- * MMU can lead to speculatively fetched and
- * possibly stale stack memory being read from
- * other caches. This can lead to coherency
- * issues.
- * --------------------------------------------
- */
- mov x0, #DISABLE_DCACHE
- bl bl31_plat_enable_mmu
-
- bl psci_power_up_finish
-
- b el3_exit
-endfunc psci_entrypoint
-
- /* --------------------------------------------
- * This function is called to indicate to the
- * power controller that it is safe to power
- * down this cpu. It should not exit the wfi
- * and will be released from reset upon power
- * up. 'wfi_spill' is used to catch erroneous
- * exits from wfi.
- * --------------------------------------------
- */
-func psci_power_down_wfi
- dsb sy // ensure write buffer empty
- wfi
- bl plat_panic_handler
-endfunc psci_power_down_wfi
-
diff --git a/lib/psci/aarch64/psci_helpers.S b/lib/psci/aarch64/psci_helpers.S
index 87144dd9..ff250a06 100644
--- a/lib/psci/aarch64/psci_helpers.S
+++ b/lib/psci/aarch64/psci_helpers.S
@@ -35,6 +35,10 @@
.globl psci_do_pwrdown_cache_maintenance
.globl psci_do_pwrup_cache_maintenance
+ .globl psci_power_down_wfi
+#if !ERROR_DEPRECATED
+ .globl psci_entrypoint
+#endif
/* -----------------------------------------------------------------------
* void psci_do_pwrdown_cache_maintenance(unsigned int power level);
@@ -152,3 +156,25 @@ func psci_do_pwrup_cache_maintenance
ldp x29, x30, [sp], #16
ret
endfunc psci_do_pwrup_cache_maintenance
+
+/* -----------------------------------------------------------------------
+ * void psci_power_down_wfi(void);
+ * This function is called to indicate to the power controller that it
+ * is safe to power down this cpu. It should not exit the wfi and will
+ * be released from reset upon power up.
+ * -----------------------------------------------------------------------
+ */
+func psci_power_down_wfi
+ dsb sy // ensure write buffer empty
+ wfi
+ bl plat_panic_handler
+endfunc psci_power_down_wfi
+
+/* -----------------------------------------------------------------------
+ * void psci_entrypoint(void);
+ * The deprecated entry point for PSCI on warm boot for AArch64.
+ * -----------------------------------------------------------------------
+ */
+func_deprecated psci_entrypoint
+ b bl31_warm_entrypoint
+endfunc_deprecated psci_entrypoint
diff --git a/lib/psci/psci_common.c b/lib/psci/psci_common.c
index 2a0afb4c..c6fea5b0 100644
--- a/lib/psci/psci_common.c
+++ b/lib/psci/psci_common.c
@@ -683,7 +683,7 @@ int psci_validate_entry_point(entry_point_info_t *ep,
* code to enable the gic cpu interface and for a cluster it will enable
* coherency at the interconnect level in addition to gic cpu interface.
******************************************************************************/
-void psci_power_up_finish(void)
+void psci_warmboot_entrypoint(void)
{
unsigned int end_pwrlvl, cpu_idx = plat_my_core_pos();
psci_power_state_t state_info = { {PSCI_LOCAL_STATE_RUN} };
diff --git a/lib/psci/psci_lib.mk b/lib/psci/psci_lib.mk
index 93c78329..662e14a2 100644
--- a/lib/psci/psci_lib.mk
+++ b/lib/psci/psci_lib.mk
@@ -28,21 +28,20 @@
# POSSIBILITY OF SUCH DAMAGE.
#
-PSCI_LIB_SOURCES := lib/el3_runtime/cpu_data_array.c \
- lib/el3_runtime/aarch64/context.S \
- lib/el3_runtime/aarch64/cpu_data.S \
- lib/el3_runtime/aarch64/context_mgmt.c \
- lib/cpus/aarch64/cpu_helpers.S \
- lib/locks/exclusive/spinlock.S \
- lib/psci/psci_off.c \
- lib/psci/psci_on.c \
- lib/psci/psci_suspend.c \
- lib/psci/psci_common.c \
- lib/psci/psci_main.c \
- lib/psci/psci_setup.c \
- lib/psci/psci_system_off.c \
- lib/psci/aarch64/psci_entry.S \
- lib/psci/aarch64/psci_helpers.S \
+PSCI_LIB_SOURCES := lib/el3_runtime/cpu_data_array.c \
+ lib/el3_runtime/aarch64/context.S \
+ lib/el3_runtime/aarch64/cpu_data.S \
+ lib/el3_runtime/aarch64/context_mgmt.c \
+ lib/cpus/aarch64/cpu_helpers.S \
+ lib/locks/exclusive/spinlock.S \
+ lib/psci/psci_off.c \
+ lib/psci/psci_on.c \
+ lib/psci/psci_suspend.c \
+ lib/psci/psci_common.c \
+ lib/psci/psci_main.c \
+ lib/psci/psci_setup.c \
+ lib/psci/psci_system_off.c \
+ lib/psci/aarch64/psci_helpers.S
ifeq (${USE_COHERENT_MEM}, 1)
PSCI_LIB_SOURCES += lib/locks/bakery/bakery_lock_coherent.c
diff --git a/lib/psci/psci_main.c b/lib/psci/psci_main.c
index 04ef10e7..d412be3c 100644
--- a/lib/psci/psci_main.c
+++ b/lib/psci/psci_main.c
@@ -33,8 +33,7 @@
#include <assert.h>
#include <debug.h>
#include <platform.h>
-#include <runtime_svc.h>
-#include <std_svc.h>
+#include <smcc.h>
#include <string.h>
#include "psci_private.h"
@@ -327,7 +326,7 @@ int psci_features(unsigned int psci_fid)
/*******************************************************************************
* PSCI top level handler for servicing SMCs.
******************************************************************************/
-uintptr_t psci_smc_handler(uint32_t smc_fid,
+u_register_t psci_smc_handler(uint32_t smc_fid,
u_register_t x1,
u_register_t x2,
u_register_t x3,
@@ -337,11 +336,11 @@ uintptr_t psci_smc_handler(uint32_t smc_fid,
u_register_t flags)
{
if (is_caller_secure(flags))
- SMC_RET1(handle, SMC_UNK);
+ return SMC_UNK;
/* Check the fid against the capabilities */
if (!(psci_caps & define_psci_cap(smc_fid)))
- SMC_RET1(handle, SMC_UNK);
+ return SMC_UNK;
if (((smc_fid >> FUNCID_CC_SHIFT) & FUNCID_CC_MASK) == SMC_32) {
/* 32-bit PSCI function, clear top parameter bits */
@@ -352,31 +351,31 @@ uintptr_t psci_smc_handler(uint32_t smc_fid,
switch (smc_fid) {
case PSCI_VERSION:
- SMC_RET1(handle, psci_version());
+ return psci_version();
case PSCI_CPU_OFF:
- SMC_RET1(handle, psci_cpu_off());
+ return psci_cpu_off();
case PSCI_CPU_SUSPEND_AARCH32:
- SMC_RET1(handle, psci_cpu_suspend(x1, x2, x3));
+ return psci_cpu_suspend(x1, x2, x3);
case PSCI_CPU_ON_AARCH32:
- SMC_RET1(handle, psci_cpu_on(x1, x2, x3));
+ return psci_cpu_on(x1, x2, x3);
case PSCI_AFFINITY_INFO_AARCH32:
- SMC_RET1(handle, psci_affinity_info(x1, x2));
+ return psci_affinity_info(x1, x2);
case PSCI_MIG_AARCH32:
- SMC_RET1(handle, psci_migrate(x1));
+ return psci_migrate(x1);
case PSCI_MIG_INFO_TYPE:
- SMC_RET1(handle, psci_migrate_info_type());
+ return psci_migrate_info_type();
case PSCI_MIG_INFO_UP_CPU_AARCH32:
- SMC_RET1(handle, psci_migrate_info_up_cpu());
+ return psci_migrate_info_up_cpu();
case PSCI_SYSTEM_SUSPEND_AARCH32:
- SMC_RET1(handle, psci_system_suspend(x1, x2));
+ return psci_system_suspend(x1, x2);
case PSCI_SYSTEM_OFF:
psci_system_off();
@@ -387,14 +386,14 @@ uintptr_t psci_smc_handler(uint32_t smc_fid,
/* We should never return from psci_system_reset() */
case PSCI_FEATURES:
- SMC_RET1(handle, psci_features(x1));
+ return psci_features(x1);
#if ENABLE_PSCI_STAT
case PSCI_STAT_RESIDENCY_AARCH32:
- SMC_RET1(handle, psci_stat_residency(x1, x2));
+ return psci_stat_residency(x1, x2);
case PSCI_STAT_COUNT_AARCH32:
- SMC_RET1(handle, psci_stat_count(x1, x2));
+ return psci_stat_count(x1, x2);
#endif
default:
@@ -405,29 +404,29 @@ uintptr_t psci_smc_handler(uint32_t smc_fid,
switch (smc_fid) {
case PSCI_CPU_SUSPEND_AARCH64:
- SMC_RET1(handle, psci_cpu_suspend(x1, x2, x3));
+ return psci_cpu_suspend(x1, x2, x3);
case PSCI_CPU_ON_AARCH64:
- SMC_RET1(handle, psci_cpu_on(x1, x2, x3));
+ return psci_cpu_on(x1, x2, x3);
case PSCI_AFFINITY_INFO_AARCH64:
- SMC_RET1(handle, psci_affinity_info(x1, x2));
+ return psci_affinity_info(x1, x2);
case PSCI_MIG_AARCH64:
- SMC_RET1(handle, psci_migrate(x1));
+ return psci_migrate(x1);
case PSCI_MIG_INFO_UP_CPU_AARCH64:
- SMC_RET1(handle, psci_migrate_info_up_cpu());
+ return psci_migrate_info_up_cpu();
case PSCI_SYSTEM_SUSPEND_AARCH64:
- SMC_RET1(handle, psci_system_suspend(x1, x2));
+ return psci_system_suspend(x1, x2);
#if ENABLE_PSCI_STAT
case PSCI_STAT_RESIDENCY_AARCH64:
- SMC_RET1(handle, psci_stat_residency(x1, x2));
+ return psci_stat_residency(x1, x2);
case PSCI_STAT_COUNT_AARCH64:
- SMC_RET1(handle, psci_stat_count(x1, x2));
+ return psci_stat_count(x1, x2);
#endif
default:
@@ -436,5 +435,5 @@ uintptr_t psci_smc_handler(uint32_t smc_fid,
}
WARN("Unimplemented PSCI Call: 0x%x \n", smc_fid);
- SMC_RET1(handle, SMC_UNK);
+ return SMC_UNK;
}
diff --git a/lib/psci/psci_on.c b/lib/psci/psci_on.c
index d4826ed8..f4bb7978 100644
--- a/lib/psci/psci_on.c
+++ b/lib/psci/psci_on.c
@@ -32,11 +32,9 @@
#include <arch_helpers.h>
#include <assert.h>
#include <bl_common.h>
-#include <bl31.h>
#include <debug.h>
#include <context_mgmt.h>
#include <platform.h>
-#include <runtime_svc.h>
#include <stddef.h>
#include "psci_private.h"
@@ -177,7 +175,7 @@ void psci_cpu_on_finish(unsigned int cpu_idx,
* on have completed. Perform enough arch.initialization
* to run in the non-secure address space.
*/
- bl31_arch_setup();
+ psci_arch_setup();
/*
* Lock the CPU spin lock to make sure that the context initialization
diff --git a/lib/psci/psci_private.h b/lib/psci/psci_private.h
index f42ce551..49352144 100644
--- a/lib/psci/psci_private.h
+++ b/lib/psci/psci_private.h
@@ -192,7 +192,6 @@ int psci_validate_power_state(unsigned int power_state,
void psci_query_sys_suspend_pwrstate(psci_power_state_t *state_info);
int psci_validate_mpidr(u_register_t mpidr);
void psci_init_req_local_pwr_states(void);
-void psci_power_up_finish(void);
int psci_validate_entry_point(entry_point_info_t *ep,
uintptr_t entrypoint, u_register_t context_id);
void psci_get_parent_pwr_domain_nodes(unsigned int cpu_idx,
diff --git a/lib/psci/psci_setup.c b/lib/psci/psci_setup.c
index fac0edec..d35e0001 100644
--- a/lib/psci/psci_setup.c
+++ b/lib/psci/psci_setup.c
@@ -184,15 +184,17 @@ static void populate_power_domain_tree(const unsigned char *topology)
}
/*******************************************************************************
- * This function initializes the power domain topology tree by querying the
- * platform. The power domain nodes higher than the CPU are populated in the
- * array psci_non_cpu_pd_nodes[] and the CPU power domains are populated in
- * psci_cpu_pd_nodes[]. The platform exports its static topology map through the
+ * This function does the architectural setup and takes the warm boot
+ * entry-point `mailbox_ep` as an argument. The function also initializes the
+ * power domain topology tree by querying the platform. The power domain nodes
+ * higher than the CPU are populated in the array psci_non_cpu_pd_nodes[] and
+ * the CPU power domains are populated in psci_cpu_pd_nodes[]. The platform
+ * exports its static topology map through the
* populate_power_domain_topology_tree() API. The algorithm populates the
* psci_non_cpu_pd_nodes and psci_cpu_pd_nodes iteratively by using this
- * topology map. On a platform that implements two clusters of 2 cpus each, and
- * supporting 3 domain levels, the populated psci_non_cpu_pd_nodes would look
- * like this:
+ * topology map. On a platform that implements two clusters of 2 cpus each,
+ * and supporting 3 domain levels, the populated psci_non_cpu_pd_nodes would
+ * look like this:
*
* ---------------------------------------------------
* | system node | cluster 0 node | cluster 1 node |
@@ -204,10 +206,13 @@ static void populate_power_domain_tree(const unsigned char *topology)
* | CPU 0 | CPU 1 | CPU 2 | CPU 3 |
* ------------------------------------------------
******************************************************************************/
-int psci_setup(void)
+int psci_setup(uintptr_t mailbox_ep)
{
const unsigned char *topology_tree;
+ /* Do the Architectural initialization */
+ psci_arch_setup();
+
/* Query the topology map from the platform */
topology_tree = plat_get_power_domain_tree_desc();
@@ -229,8 +234,8 @@ int psci_setup(void)
*/
psci_set_pwr_domains_to_run(PLAT_MAX_PWR_LVL);
- plat_setup_psci_ops((uintptr_t)psci_entrypoint,
- &psci_plat_pm_ops);
+ assert(mailbox_ep);
+ plat_setup_psci_ops(mailbox_ep, &psci_plat_pm_ops);
assert(psci_plat_pm_ops);
/* Initialize the psci capability */
@@ -259,3 +264,17 @@ int psci_setup(void)
return 0;
}
+
+/*******************************************************************************
+ * This duplicates what the primary cpu did after a cold boot in BL1. The same
+ * needs to be done when a cpu is hotplugged in. This function could also over-
+ * ride any EL3 setup done by BL1 as this code resides in rw memory.
+ ******************************************************************************/
+void psci_arch_setup(void)
+{
+ /* Program the counter frequency */
+ write_cntfrq_el0(plat_get_syscnt_freq2());
+
+ /* Initialize the cpu_ops pointer. */
+ init_cpu_ops();
+}
diff --git a/lib/psci/psci_suspend.c b/lib/psci/psci_suspend.c
index ee1ccef2..904a4e75 100644
--- a/lib/psci/psci_suspend.c
+++ b/lib/psci/psci_suspend.c
@@ -37,7 +37,6 @@
#include <cpu_data.h>
#include <debug.h>
#include <platform.h>
-#include <runtime_svc.h>
#include <stddef.h>
#include "psci_private.h"
diff --git a/services/std_svc/std_svc_setup.c b/services/std_svc/std_svc_setup.c
index 992a6005..06647e00 100644
--- a/services/std_svc/std_svc_setup.c
+++ b/services/std_svc/std_svc_setup.c
@@ -31,6 +31,7 @@
#include <debug.h>
#include <psci.h>
#include <runtime_svc.h>
+#include <smcc_helpers.h>
#include <std_svc.h>
#include <stdint.h>
#include <uuid.h>
@@ -40,16 +41,6 @@ DEFINE_SVC_UUID(arm_svc_uid,
0x108d905b, 0xf863, 0x47e8, 0xae, 0x2d,
0xc0, 0xfb, 0x56, 0x41, 0xf6, 0xe2);
-/* Setup Standard Services */
-static int32_t std_svc_setup(void)
-{
- /*
- * PSCI is the only specification implemented as a Standard Service.
- * Invoke PSCI setup from here
- */
- return psci_setup();
-}
-
/*
* Top-level Standard Service SMC handler. This handler will in turn dispatch
* calls to PSCI SMC handler
@@ -68,8 +59,9 @@ uintptr_t std_svc_smc_handler(uint32_t smc_fid,
* value
*/
if (is_psci_fid(smc_fid)) {
- return psci_smc_handler(smc_fid, x1, x2, x3, x4, cookie,
- handle, flags);
+ SMC_RET1(handle,
+ psci_smc_handler(smc_fid, x1, x2, x3, x4,
+ cookie, handle, flags));
}
switch (smc_fid) {
@@ -101,6 +93,6 @@ DECLARE_RT_SVC(
OEN_STD_START,
OEN_STD_END,
SMC_TYPE_FAST,
- std_svc_setup,
+ NULL,
std_svc_smc_handler
);