aboutsummaryrefslogtreecommitdiff
path: root/core/arch/arm/plat-imx
diff options
context:
space:
mode:
authorPeng Fan <peng.fan@nxp.com>2017-09-18 16:31:18 +0800
committerJérôme Forissier <jerome.forissier@linaro.org>2017-09-18 14:52:32 +0200
commit1295874a1bbb809d7505727674dbf795fb75e882 (patch)
tree131ce31499a5e2adced7230a46d6d2f5430ea7e2 /core/arch/arm/plat-imx
parenteedc47b4a630d3ac043ed93ae0399767436940b1 (diff)
core: arm: imx7d: add psci suspend support
Implement i.MX7D suspend/resume support. When the first time runs into suspend, some initialization work needs to be done, such as code copy, iram translation table. Since we only have 32K on chip RAM for suspend/resume usage, we have to put code and data together and use section mapping and WXN is set to false. Signed-off-by: Peng Fan <peng.fan@nxp.com> Acked-by: Jens Wiklander <jens.wiklander@linaro.org> Acked-by: Etienne Carriere <etienne.carriere@linaro.org>
Diffstat (limited to 'core/arch/arm/plat-imx')
-rw-r--r--core/arch/arm/plat-imx/conf.mk4
-rw-r--r--core/arch/arm/plat-imx/imx_pm.h142
-rw-r--r--core/arch/arm/plat-imx/pm/imx7_suspend.c89
-rw-r--r--core/arch/arm/plat-imx/pm/pm-imx7.c257
-rw-r--r--core/arch/arm/plat-imx/pm/psci-suspend-imx7.S718
-rw-r--r--core/arch/arm/plat-imx/pm/psci.c50
-rw-r--r--core/arch/arm/plat-imx/sub.mk6
7 files changed, 1265 insertions, 1 deletions
diff --git a/core/arch/arm/plat-imx/conf.mk b/core/arch/arm/plat-imx/conf.mk
index 8652929f..87af7a6c 100644
--- a/core/arch/arm/plat-imx/conf.mk
+++ b/core/arch/arm/plat-imx/conf.mk
@@ -78,4 +78,8 @@ $(call force,CFG_SECURE_TIME_SOURCE_REE,y)
CFG_BOOT_SECONDARY_REQUEST ?= y
endif
+ifeq ($(filter y, $(CFG_PSCI_ARM32)), y)
+CFG_HWSUPP_MEM_PERM_WXN = n
+endif
+
ta-targets = ta_arm32
diff --git a/core/arch/arm/plat-imx/imx_pm.h b/core/arch/arm/plat-imx/imx_pm.h
new file mode 100644
index 00000000..7a0283bd
--- /dev/null
+++ b/core/arch/arm/plat-imx/imx_pm.h
@@ -0,0 +1,142 @@
+/*
+ * Copyright 2017 NXP
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __IMX_PM_H
+#define __IMX_PM_H
+
+#include <stdint.h>
+
+#define PM_INFO_MX7_M4_RESERVE0_OFF 0x0
+#define PM_INFO_MX7_M4_RESERVE1_OFF 0x4
+#define PM_INFO_MX7_M4_RESERVE2_OFF 0x8
+#define PM_INFO_MX7_PBASE_OFF 0xc
+#define PM_INFO_MX7_ENTRY_OFF 0x10
+#define PM_INFO_MX7_RESUME_ADDR_OFF 0x14
+#define PM_INFO_MX7_DDR_TYPE_OFF 0x18
+#define PM_INFO_MX7_SIZE_OFF 0x1c
+#define PM_INFO_MX7_DDRC_P_OFF 0x20
+#define PM_INFO_MX7_DDRC_V_OFF 0x24
+#define PM_INFO_MX7_DDRC_PHY_P_OFF 0x28
+#define PM_INFO_MX7_DDRC_PHY_V_OFF 0x2c
+#define PM_INFO_MX7_SRC_P_OFF 0x30
+#define PM_INFO_MX7_SRC_V_OFF 0x34
+#define PM_INFO_MX7_IOMUXC_GPR_P_OFF 0x38
+#define PM_INFO_MX7_IOMUXC_GPR_V_OFF 0x3c
+#define PM_INFO_MX7_CCM_P_OFF 0x40
+#define PM_INFO_MX7_CCM_V_OFF 0x44
+#define PM_INFO_MX7_GPC_P_OFF 0x48
+#define PM_INFO_MX7_GPC_V_OFF 0x4c
+#define PM_INFO_MX7_SNVS_P_OFF 0x50
+#define PM_INFO_MX7_SNVS_V_OFF 0x54
+#define PM_INFO_MX7_ANATOP_P_OFF 0x58
+#define PM_INFO_MX7_ANATOP_V_OFF 0x5c
+#define PM_INFO_MX7_LPSR_P_OFF 0x60
+#define PM_INFO_MX7_LPSR_V_OFF 0x64
+#define PM_INFO_MX7_GIC_DIST_P_OFF 0x68
+#define PM_INFO_MX7_GIC_DIST_V_OFF 0x6c
+#define PM_INFO_MX7_TTBR0_OFF 0x70
+#define PM_INFO_MX7_TTBR1_OFF 0x74
+#define PM_INFO_MX7_DDRC_REG_NUM_OFF 0x78
+#define PM_INFO_MX7_DDRC_REG_OFF 0x7C
+#define PM_INFO_MX7_DDRC_PHY_REG_NUM_OFF 0x17C
+#define PM_INFO_MX7_DDRC_PHY_REG_OFF 0x180
+
+#define MX7_DDRC_NUM 32
+#define MX7_DDRC_PHY_NUM 16
+
+
+#define SUSPEND_OCRAM_SIZE 0x1000
+#define LOWPOWER_IDLE_OCRAM_SIZE 0x1000
+
+#define SUSPEND_OCRAM_OFFSET 0x0
+#define LOWPOWER_IDLE_OCRAM_OFFSET 0x1000
+
+#ifndef ASM
+#include <sm/sm.h>
+
+struct imx7_pm_info {
+ uint32_t m4_reserve0;
+ uint32_t m4_reserve1;
+ uint32_t m4_reserve2;
+ paddr_t pa_base; /* pa of pm_info */
+ uintptr_t entry;
+ paddr_t tee_resume;
+ uint32_t ddr_type;
+ uint32_t pm_info_size;
+ paddr_t ddrc_pa_base;
+ vaddr_t ddrc_va_base;
+ paddr_t ddrc_phy_pa_base;
+ vaddr_t ddrc_phy_va_base;
+ paddr_t src_pa_base;
+ vaddr_t src_va_base;
+ paddr_t iomuxc_gpr_pa_base;
+ vaddr_t iomuxc_gpr_va_base;
+ paddr_t ccm_pa_base;
+ vaddr_t ccm_va_base;
+ paddr_t gpc_pa_base;
+ vaddr_t gpc_va_base;
+ paddr_t snvs_pa_base;
+ vaddr_t snvs_va_base;
+ paddr_t anatop_pa_base;
+ vaddr_t anatop_va_base;
+ paddr_t lpsr_pa_base;
+ vaddr_t lpsr_va_base;
+ paddr_t gic_pa_base;
+ vaddr_t gic_va_base;
+ uint32_t ttbr0;
+ uint32_t ttbr1;
+ uint32_t ddrc_num;
+ uint32_t ddrc_val[MX7_DDRC_NUM][2];
+ uint32_t ddrc_phy_num;
+ uint32_t ddrc_phy_val[MX7_DDRC_NUM][2];
+} __aligned(8);
+
+struct suspend_save_regs {
+ uint32_t irq[3];
+ uint32_t fiq[3];
+ uint32_t und[3];
+ uint32_t abt[3];
+ uint32_t mon[3];
+} __aligned(8);
+
+struct imx7_pm_data {
+ uint32_t ddr_type;
+ uint32_t ddrc_num;
+ uint32_t (*ddrc_offset)[2];
+ uint32_t ddrc_phy_num;
+ uint32_t (*ddrc_phy_offset)[2];
+};
+
+void imx7_suspend(struct imx7_pm_info *info);
+void imx7_resume(void);
+void ca7_cpu_resume(void);
+int imx7_suspend_init(void);
+int pm_imx7_iram_tbl_init(void);
+int imx7_cpu_suspend(uint32_t power_state, uintptr_t entry,
+ uint32_t context_id, struct sm_nsec_ctx *nsec);
+#endif
+
+#endif
diff --git a/core/arch/arm/plat-imx/pm/imx7_suspend.c b/core/arch/arm/plat-imx/pm/imx7_suspend.c
new file mode 100644
index 00000000..660e71f6
--- /dev/null
+++ b/core/arch/arm/plat-imx/pm/imx7_suspend.c
@@ -0,0 +1,89 @@
+/*
+ * Copyright (C) 2017 NXP
+ *
+ * Peng Fan <peng.fan@nxp.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <arm.h>
+#include <arm32.h>
+#include <console.h>
+#include <drivers/imx_uart.h>
+#include <io.h>
+#include <imx.h>
+#include <imx_pm.h>
+#include <kernel/panic.h>
+#include <kernel/cache_helpers.h>
+#include <kernel/generic_boot.h>
+#include <kernel/misc.h>
+#include <mm/core_mmu.h>
+#include <mm/core_memprot.h>
+#include <sm/sm.h>
+#include <sm/pm.h>
+#include <sm/psci.h>
+#include <stdint.h>
+
+static int suspended_init;
+
+int imx7_cpu_suspend(uint32_t power_state __unused, uintptr_t entry,
+ uint32_t context_id __unused, struct sm_nsec_ctx *nsec)
+{
+ uint32_t suspend_ocram_base = core_mmu_get_va(TRUSTZONE_OCRAM_START +
+ SUSPEND_OCRAM_OFFSET,
+ MEM_AREA_TEE_COHERENT);
+ struct imx7_pm_info *p = (struct imx7_pm_info *)suspend_ocram_base;
+ int ret;
+
+ if (!suspended_init) {
+ imx7_suspend_init();
+ suspended_init = 1;
+ }
+
+ /* Store non-sec ctx regs */
+ sm_save_modes_regs(&nsec->mode_regs);
+
+ ret = sm_pm_cpu_suspend((uint32_t)p, (int (*)(uint32_t))
+ (suspend_ocram_base + sizeof(*p)));
+ /*
+ * Sometimes sm_pm_cpu_suspend may not really suspended,
+ * we need to check it's return value to restore reg or not
+ */
+ if (ret < 0) {
+ DMSG("=== Not suspended, GPC IRQ Pending ===\n");
+ return 0;
+ }
+
+ plat_cpu_reset_late();
+
+ /* Restore register of different mode in secure world */
+ sm_restore_modes_regs(&nsec->mode_regs);
+
+ /* Set entry for back to Linux */
+ nsec->mon_lr = (uint32_t)entry;
+
+ main_init_gic();
+
+ DMSG("=== Back from Suspended ===\n");
+
+ return 0;
+}
diff --git a/core/arch/arm/plat-imx/pm/pm-imx7.c b/core/arch/arm/plat-imx/pm/pm-imx7.c
new file mode 100644
index 00000000..64cd54f8
--- /dev/null
+++ b/core/arch/arm/plat-imx/pm/pm-imx7.c
@@ -0,0 +1,257 @@
+/*
+ * Copyright 2017 NXP
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <arm.h>
+#include <arm32.h>
+#include <console.h>
+#include <io.h>
+#include <imx.h>
+#include <imx_pm.h>
+#include <kernel/panic.h>
+#include <kernel/pm_stubs.h>
+#include <kernel/cache_helpers.h>
+#include <mm/core_mmu.h>
+#include <mm/core_memprot.h>
+#include <mmdc.h>
+#include <platform_config.h>
+#include <sm/pm.h>
+#include <sm/psci.h>
+#include <sm/sm.h>
+#include <string.h>
+
+paddr_t iram_tbl_phys_addr = -1UL;
+void *iram_tbl_virt_addr;
+
+#define READ_DATA_FROM_HARDWARE 0
+
+static uint32_t imx7d_ddrc_ddr3_setting[][2] = {
+ { 0x0, READ_DATA_FROM_HARDWARE },
+ { 0x1a0, READ_DATA_FROM_HARDWARE },
+ { 0x1a4, READ_DATA_FROM_HARDWARE },
+ { 0x1a8, READ_DATA_FROM_HARDWARE },
+ { 0x64, READ_DATA_FROM_HARDWARE },
+ { 0x490, READ_DATA_FROM_HARDWARE },
+ { 0xd0, READ_DATA_FROM_HARDWARE },
+ { 0xd4, READ_DATA_FROM_HARDWARE },
+ { 0xdc, READ_DATA_FROM_HARDWARE },
+ { 0xe0, READ_DATA_FROM_HARDWARE },
+ { 0xe4, READ_DATA_FROM_HARDWARE },
+ { 0xf4, READ_DATA_FROM_HARDWARE },
+ { 0x100, READ_DATA_FROM_HARDWARE },
+ { 0x104, READ_DATA_FROM_HARDWARE },
+ { 0x108, READ_DATA_FROM_HARDWARE },
+ { 0x10c, READ_DATA_FROM_HARDWARE },
+ { 0x110, READ_DATA_FROM_HARDWARE },
+ { 0x114, READ_DATA_FROM_HARDWARE },
+ { 0x120, READ_DATA_FROM_HARDWARE },
+ { 0x180, READ_DATA_FROM_HARDWARE },
+ { 0x190, READ_DATA_FROM_HARDWARE },
+ { 0x194, READ_DATA_FROM_HARDWARE },
+ { 0x200, READ_DATA_FROM_HARDWARE },
+ { 0x204, READ_DATA_FROM_HARDWARE },
+ { 0x214, READ_DATA_FROM_HARDWARE },
+ { 0x218, READ_DATA_FROM_HARDWARE },
+ { 0x240, READ_DATA_FROM_HARDWARE },
+ { 0x244, READ_DATA_FROM_HARDWARE },
+};
+
+static uint32_t imx7d_ddrc_phy_ddr3_setting[][2] = {
+ { 0x0, READ_DATA_FROM_HARDWARE },
+ { 0x4, READ_DATA_FROM_HARDWARE },
+ { 0x10, READ_DATA_FROM_HARDWARE },
+ { 0xb0, READ_DATA_FROM_HARDWARE },
+ { 0x9c, READ_DATA_FROM_HARDWARE },
+ { 0x7c, READ_DATA_FROM_HARDWARE },
+ { 0x80, READ_DATA_FROM_HARDWARE },
+ { 0x84, READ_DATA_FROM_HARDWARE },
+ { 0x88, READ_DATA_FROM_HARDWARE },
+ { 0x6c, READ_DATA_FROM_HARDWARE },
+ { 0x20, READ_DATA_FROM_HARDWARE },
+ { 0x30, READ_DATA_FROM_HARDWARE },
+ { 0x50, 0x01000010 },
+ { 0x50, 0x00000010 },
+ { 0xc0, 0x0e407304 },
+ { 0xc0, 0x0e447304 },
+ { 0xc0, 0x0e447306 },
+ { 0xc0, 0x0e447304 },
+ { 0xc0, 0x0e407306 },
+};
+
+static struct imx7_pm_data imx7d_pm_data_ddr3 = {
+ .ddrc_num = ARRAY_SIZE(imx7d_ddrc_ddr3_setting),
+ .ddrc_offset = imx7d_ddrc_ddr3_setting,
+ .ddrc_phy_num = ARRAY_SIZE(imx7d_ddrc_phy_ddr3_setting),
+ .ddrc_phy_offset = imx7d_ddrc_phy_ddr3_setting,
+};
+
+paddr_t phys_addr[] = {
+ AIPS1_BASE, AIPS2_BASE, AIPS3_BASE
+};
+
+int pm_imx7_iram_tbl_init(void)
+{
+ uint32_t i;
+ struct tee_mmap_region map;
+
+ /* iram mmu translation table already initialized */
+ if (iram_tbl_phys_addr != (-1UL))
+ return 0;
+
+ iram_tbl_phys_addr = TRUSTZONE_OCRAM_START + 16 * 1024;
+ iram_tbl_virt_addr = phys_to_virt(iram_tbl_phys_addr,
+ MEM_AREA_TEE_COHERENT);
+
+ /* 16KB */
+ memset(iram_tbl_virt_addr, 0, 16 * 1024);
+
+ for (i = 0; i < ARRAY_SIZE(phys_addr); i++) {
+ map.pa = phys_addr[i];
+ map.va = (vaddr_t)phys_to_virt(phys_addr[i], MEM_AREA_IO_SEC);
+ map.region_size = CORE_MMU_PGDIR_SIZE;
+ map.size = AIPS1_SIZE; /* 4M for AIPS1/2/3 */
+ map.type = MEM_AREA_IO_SEC;
+ map.attr = TEE_MATTR_VALID_BLOCK | TEE_MATTR_PRW |
+ TEE_MATTR_GLOBAL | TEE_MATTR_SECURE |
+ (TEE_MATTR_CACHE_NONCACHE << TEE_MATTR_CACHE_SHIFT);
+ map_memarea_sections(&map, (uint32_t *)iram_tbl_virt_addr);
+ }
+
+ /* Note IRAM_S_BASE is not 1M aligned, so take care */
+ map.pa = ROUNDDOWN(IRAM_S_BASE, CORE_MMU_PGDIR_SIZE);
+ map.va = (vaddr_t)phys_to_virt(map.pa, MEM_AREA_TEE_COHERENT);
+ map.region_size = CORE_MMU_PGDIR_SIZE;
+ map.size = CORE_MMU_DEVICE_SIZE;
+ map.type = MEM_AREA_TEE_COHERENT;
+ map.attr = TEE_MATTR_VALID_BLOCK | TEE_MATTR_PRWX | TEE_MATTR_GLOBAL |
+ TEE_MATTR_SECURE;
+ map_memarea_sections(&map, (uint32_t *)iram_tbl_virt_addr);
+
+ map.pa = GIC_BASE;
+ map.va = (vaddr_t)phys_to_virt((paddr_t)GIC_BASE, MEM_AREA_IO_SEC);
+ map.region_size = CORE_MMU_PGDIR_SIZE;
+ map.size = CORE_MMU_DEVICE_SIZE;
+ map.type = MEM_AREA_TEE_COHERENT;
+ map.attr = TEE_MATTR_VALID_BLOCK | TEE_MATTR_PRW | TEE_MATTR_GLOBAL |
+ TEE_MATTR_SECURE;
+ map_memarea_sections(&map, (uint32_t *)iram_tbl_virt_addr);
+
+ return 0;
+}
+
+int imx7_suspend_init(void)
+{
+ uint32_t i;
+ uint32_t (*ddrc_offset_array)[2];
+ uint32_t (*ddrc_phy_offset_array)[2];
+ uint32_t suspend_ocram_base = core_mmu_get_va(TRUSTZONE_OCRAM_START +
+ SUSPEND_OCRAM_OFFSET,
+ MEM_AREA_TEE_COHERENT);
+ struct imx7_pm_info *p = (struct imx7_pm_info *)suspend_ocram_base;
+ struct imx7_pm_data *pm_data;
+
+ pm_imx7_iram_tbl_init();
+
+ dcache_op_level1(DCACHE_OP_CLEAN_INV);
+
+ p->pa_base = TRUSTZONE_OCRAM_START + SUSPEND_OCRAM_OFFSET;
+ p->tee_resume = virt_to_phys((void *)(vaddr_t)ca7_cpu_resume);
+ p->pm_info_size = sizeof(*p);
+ p->ccm_va_base = core_mmu_get_va(CCM_BASE, MEM_AREA_IO_SEC);
+ p->ccm_pa_base = CCM_BASE;
+ p->ddrc_va_base = core_mmu_get_va(DDRC_BASE, MEM_AREA_IO_SEC);
+ p->ddrc_pa_base = DDRC_BASE;
+ p->ddrc_phy_va_base = core_mmu_get_va(DDRC_PHY_BASE, MEM_AREA_IO_SEC);
+ p->ddrc_phy_pa_base = DDRC_PHY_BASE;
+ p->src_va_base = core_mmu_get_va(SRC_BASE, MEM_AREA_IO_SEC);
+ p->src_pa_base = SRC_BASE;
+ p->iomuxc_gpr_va_base = core_mmu_get_va(IOMUXC_GPR_BASE,
+ MEM_AREA_IO_SEC);
+ p->iomuxc_gpr_pa_base = IOMUXC_GPR_BASE;
+ p->gpc_va_base = core_mmu_get_va(GPC_BASE, MEM_AREA_IO_SEC);
+ p->gpc_pa_base = GPC_BASE;
+ p->anatop_va_base = core_mmu_get_va(ANATOP_BASE, MEM_AREA_IO_SEC);
+ p->anatop_pa_base = ANATOP_BASE;
+ p->snvs_va_base = core_mmu_get_va(SNVS_BASE, MEM_AREA_IO_SEC);
+ p->snvs_pa_base = SNVS_BASE;
+ p->lpsr_va_base = core_mmu_get_va(LPSR_BASE, MEM_AREA_IO_SEC);
+ p->lpsr_pa_base = LPSR_BASE;
+ p->gic_va_base = core_mmu_get_va(GIC_BASE, MEM_AREA_IO_SEC);
+ p->gic_pa_base = GIC_BASE;
+
+ /* TODO:lpsr disabled now */
+ write32(0, p->lpsr_va_base);
+
+ p->ddr_type = imx_get_ddr_type();
+ switch (p->ddr_type) {
+ case IMX_DDR_TYPE_DDR3:
+ pm_data = &imx7d_pm_data_ddr3;
+ break;
+ default:
+ panic("Not supported ddr type\n");
+ break;
+ }
+
+ p->ddrc_num = pm_data->ddrc_num;
+ p->ddrc_phy_num = pm_data->ddrc_phy_num;
+ ddrc_offset_array = pm_data->ddrc_offset;
+ ddrc_phy_offset_array = pm_data->ddrc_phy_offset;
+
+ for (i = 0; i < p->ddrc_num; i++) {
+ p->ddrc_val[i][0] = ddrc_offset_array[i][0];
+ if (ddrc_offset_array[i][1] == READ_DATA_FROM_HARDWARE)
+ p->ddrc_val[i][1] = read32(p->ddrc_va_base +
+ ddrc_offset_array[i][0]);
+ else
+ p->ddrc_val[i][1] = ddrc_offset_array[i][1];
+
+ if (p->ddrc_val[i][0] == 0xd0)
+ p->ddrc_val[i][1] |= 0xc0000000;
+ }
+
+ /* initialize DDRC PHY settings */
+ for (i = 0; i < p->ddrc_phy_num; i++) {
+ p->ddrc_phy_val[i][0] = ddrc_phy_offset_array[i][0];
+ if (ddrc_phy_offset_array[i][1] == READ_DATA_FROM_HARDWARE)
+ p->ddrc_phy_val[i][1] =
+ read32(p->ddrc_phy_va_base +
+ ddrc_phy_offset_array[i][0]);
+ else
+ p->ddrc_phy_val[i][1] = ddrc_phy_offset_array[i][1];
+ }
+
+ memcpy((void *)(suspend_ocram_base + sizeof(*p)),
+ (void *)(vaddr_t)imx7_suspend, SUSPEND_OCRAM_SIZE - sizeof(*p));
+
+ dcache_clean_range((void *)suspend_ocram_base, SUSPEND_OCRAM_SIZE);
+
+ /*
+ * Note that IRAM IOSEC map, if changed to MEM map,
+ * need to flush cache
+ */
+ icache_inv_all();
+
+ return 0;
+}
diff --git a/core/arch/arm/plat-imx/pm/psci-suspend-imx7.S b/core/arch/arm/plat-imx/pm/psci-suspend-imx7.S
new file mode 100644
index 00000000..cb48a1b4
--- /dev/null
+++ b/core/arch/arm/plat-imx/pm/psci-suspend-imx7.S
@@ -0,0 +1,718 @@
+/*
+ * Copyright 2017 NXP
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <arm.h>
+#include <arm32_macros.S>
+#include <asm.S>
+#include <imx_pm.h>
+#include <imx-regs.h>
+#include <kernel/cache_helpers.h>
+#include <kernel/tz_proc_def.h>
+#include <kernel/tz_ssvce_def.h>
+#include <kernel/unwind.h>
+#include <platform_config.h>
+
+#define MX7_SRC_GPR1 0x74
+#define MX7_SRC_GPR2 0x78
+#define GPC_PGC_C0 0x800
+#define GPC_PGC_FM 0xa00
+#define ANADIG_SNVS_MISC_CTRL 0x380
+#define ANADIG_SNVS_MISC_CTRL_SET 0x384
+#define ANADIG_SNVS_MISC_CTRL_CLR 0x388
+#define ANADIG_DIGPROG 0x800
+#define DDRC_STAT 0x4
+#define DDRC_PWRCTL 0x30
+#define DDRC_PSTAT 0x3fc
+#define DDRC_PCTRL_0 0x490
+#define DDRC_DFIMISC 0x1b0
+#define DDRC_SWCTL 0x320
+#define DDRC_SWSTAT 0x324
+#define DDRPHY_LP_CON0 0x18
+
+#define CCM_SNVS_LPCG 0x250
+#define MX7D_GPC_IMR1 0x30
+#define MX7D_GPC_IMR2 0x34
+#define MX7D_GPC_IMR3 0x38
+#define MX7D_GPC_IMR4 0x3c
+
+/*
+ * The code in this file is copied to coherent on-chip ram memory,
+ * without any dependency on code/data in tee memory(DDR).
+ */
+ .section .text.psci.suspend
+ .align 3
+
+ .macro disable_l1_dcache
+
+ /*
+ * flush L1 data cache before clearing SCTLR.C bit.
+ */
+ push {r0 - r10, lr}
+ ldr r1, =dcache_op_all
+ mov r0, #DCACHE_OP_CLEAN_INV
+ mov lr, pc
+ bx r1
+ pop {r0 - r10, lr}
+
+ /* disable d-cache */
+ read_sctlr r7
+ bic r7, r7, #SCTLR_C
+ write_sctlr r7
+ dsb
+ isb
+
+ push {r0 - r10, lr}
+ ldr r1, =dcache_op_all
+ mov r0, #DCACHE_OP_CLEAN_INV
+ mov lr, pc
+ bx r1
+ pop {r0 - r10, lr}
+
+ .endm
+
+ .macro store_ttbr
+
+ /* Store TTBR1 to pm_info->ttbr1 */
+ read_ttbr1 r7
+ str r7, [r0, #PM_INFO_MX7_TTBR1_OFF]
+
+ /* Store TTBR0 to pm_info->ttbr1 */
+ read_ttbr0 r7
+ str r7, [r0, #PM_INFO_MX7_TTBR0_OFF]
+
+ /* Disable Branch Prediction */
+ read_sctlr r6
+ bic r6, r6, #SCTLR_Z
+ write_sctlr r6
+
+ /* Flush the BTAC. */
+ write_bpiallis
+
+ ldr r6, =iram_tbl_phys_addr
+ ldr r6, [r6]
+ dsb
+ isb
+
+ /* Store the IRAM table in TTBR1/0 */
+ write_ttbr1 r6
+ write_ttbr0 r6
+
+ /* Read TTBCR and set PD0=1 */
+ read_ttbcr r6
+ orr r6, r6, #TTBCR_PD0
+ write_ttbcr r6
+
+ dsb
+ isb
+
+ /* flush the TLB */
+ write_tlbiallis
+ isb
+ write_tlbiall
+ isb
+
+ .endm
+
+ .macro restore_ttbr
+
+ /* Enable L1 data cache. */
+ read_sctlr r6
+ orr r6, r6, #SCTLR_C
+ write_sctlr r6
+
+ dsb
+ isb
+
+ /* Restore TTBCR */
+ /* Read TTBCR and set PD0=0 */
+ read_ttbcr r6
+ bic r6, r6, #TTBCR_PD0
+ write_ttbcr r6
+ dsb
+ isb
+
+ /* flush the TLB */
+ write_tlbiallis
+
+ /* Enable Branch Prediction */
+ read_sctlr r6
+ orr r6, r6, #SCTLR_Z
+ write_sctlr r6
+
+ /* Flush the Branch Target Address Cache (BTAC) */
+ write_bpiallis
+
+ /* Restore TTBR1/0, get the origin ttbr1/0 from pm info */
+ ldr r7, [r0, #PM_INFO_MX7_TTBR1_OFF]
+ write_ttbr1 r7
+ ldr r7, [r0, #PM_INFO_MX7_TTBR0_OFF]
+ write_ttbr0 r7
+ isb
+
+ .endm
+
+ .macro ddrc_enter_self_refresh
+
+ ldr r11, [r0, #PM_INFO_MX7_DDRC_V_OFF]
+
+ /* let DDR out of self-refresh */
+ ldr r7, =0x0
+ str r7, [r11, #DDRC_PWRCTL]
+
+ /* wait rw port_busy clear */
+ ldr r6, =BIT32(16)
+ orr r6, r6, #0x1
+1:
+ ldr r7, [r11, #DDRC_PSTAT]
+ ands r7, r7, r6
+ bne 1b
+
+ /* enter self-refresh bit 5 */
+ ldr r7, =BIT32(5)
+ str r7, [r11, #DDRC_PWRCTL]
+
+ /* wait until self-refresh mode entered */
+2:
+ ldr r7, [r11, #DDRC_STAT]
+ and r7, r7, #0x3
+ cmp r7, #0x3
+ bne 2b
+3:
+ ldr r7, [r11, #DDRC_STAT]
+ ands r7, r7, #0x20
+ beq 3b
+
+ /* disable dram clk */
+ ldr r7, [r11, #DDRC_PWRCTL]
+ orr r7, r7, #BIT32(3)
+ str r7, [r11, #DDRC_PWRCTL]
+
+ .endm
+
+ .macro ddrc_exit_self_refresh
+
+ cmp r5, #0x0
+ ldreq r11, [r0, #PM_INFO_MX7_DDRC_V_OFF]
+ ldrne r11, [r0, #PM_INFO_MX7_DDRC_P_OFF]
+
+ /* let DDR out of self-refresh */
+ ldr r7, =0x0
+ str r7, [r11, #DDRC_PWRCTL]
+
+ /* wait until self-refresh mode entered */
+4:
+ ldr r7, [r11, #DDRC_STAT]
+ and r7, r7, #0x3
+ cmp r7, #0x3
+ beq 4b
+
+ /* enable auto self-refresh */
+ ldr r7, [r11, #DDRC_PWRCTL]
+ orr r7, r7, #BIT32(0)
+ str r7, [r11, #DDRC_PWRCTL]
+
+ .endm
+
+ .macro wait_delay
+5:
+ subs r6, r6, #0x1
+ bne 5b
+
+ .endm
+
+ .macro ddr_enter_retention
+
+ ldr r11, [r0, #PM_INFO_MX7_DDRC_V_OFF]
+
+ /* let DDR out of self-refresh */
+ ldr r7, =0x0
+ str r7, [r11, #DDRC_PCTRL_0]
+
+ /* wait rw port_busy clear */
+ ldr r6, =BIT32(16)
+ orr r6, r6, #0x1
+6:
+ ldr r7, [r11, #DDRC_PSTAT]
+ ands r7, r7, r6
+ bne 6b
+
+ ldr r11, [r0, #PM_INFO_MX7_DDRC_V_OFF]
+ /* enter self-refresh bit 5 */
+ ldr r7, =BIT32(5)
+ str r7, [r11, #DDRC_PWRCTL]
+
+ /* wait until self-refresh mode entered */
+7:
+ ldr r7, [r11, #DDRC_STAT]
+ and r7, r7, #0x3
+ cmp r7, #0x3
+ bne 7b
+8:
+ ldr r7, [r11, #DDRC_STAT]
+ ands r7, r7, #0x20
+ beq 8b
+
+ /* disable dram clk */
+ ldr r7, =BIT32(5)
+ orr r7, r7, #BIT32(3)
+ str r7, [r11, #DDRC_PWRCTL]
+
+ ldr r11, [r0, #PM_INFO_MX7_ANATOP_V_OFF]
+ ldr r7, [r11, #ANADIG_DIGPROG]
+ and r7, r7, #0xff
+ cmp r7, #0x11
+ bne 10f
+
+ /* TO 1.1 */
+ ldr r11, [r0, #PM_INFO_MX7_IOMUXC_GPR_V_OFF]
+ ldr r7, =0x38000000
+ str r7, [r11]
+
+ /* LPSR mode need to use TO1.0 flow as IOMUX lost power */
+ ldr r10, [r0, #PM_INFO_MX7_LPSR_V_OFF]
+ ldr r7, [r10]
+ cmp r7, #0x0
+ beq 11f
+10:
+ /* reset ddr_phy */
+ ldr r11, [r0, #PM_INFO_MX7_ANATOP_V_OFF]
+ ldr r7, =0x0
+ str r7, [r11, #ANADIG_SNVS_MISC_CTRL]
+
+ /* delay 7 us */
+ ldr r6, =6000
+ wait_delay
+
+ ldr r11, [r0, #PM_INFO_MX7_SRC_V_OFF]
+ ldr r6, =0x1000
+ ldr r7, [r11, r6]
+ orr r7, r7, #0x1
+ str r7, [r11, r6]
+11:
+ /* turn off ddr power */
+ ldr r11, [r0, #PM_INFO_MX7_ANATOP_V_OFF]
+ ldr r7, =(0x1 << 29)
+ str r7, [r11, #ANADIG_SNVS_MISC_CTRL_SET]
+
+ ldr r11, [r0, #PM_INFO_MX7_SRC_V_OFF]
+ ldr r6, =0x1000
+ ldr r7, [r11, r6]
+ orr r7, r7, #0x1
+ str r7, [r11, r6]
+
+ .endm
+
+ .macro ddr_exit_retention
+
+ cmp r5, #0x0
+ ldreq r1, [r0, #PM_INFO_MX7_ANATOP_V_OFF]
+ ldrne r1, [r0, #PM_INFO_MX7_ANATOP_P_OFF]
+ ldreq r2, [r0, #PM_INFO_MX7_SRC_V_OFF]
+ ldrne r2, [r0, #PM_INFO_MX7_SRC_P_OFF]
+ ldreq r3, [r0, #PM_INFO_MX7_DDRC_V_OFF]
+ ldrne r3, [r0, #PM_INFO_MX7_DDRC_P_OFF]
+ ldreq r4, [r0, #PM_INFO_MX7_DDRC_PHY_V_OFF]
+ ldrne r4, [r0, #PM_INFO_MX7_DDRC_PHY_P_OFF]
+ ldreq r10, [r0, #PM_INFO_MX7_CCM_V_OFF]
+ ldrne r10, [r0, #PM_INFO_MX7_CCM_P_OFF]
+ ldreq r11, [r0, #PM_INFO_MX7_IOMUXC_GPR_V_OFF]
+ ldrne r11, [r0, #PM_INFO_MX7_IOMUXC_GPR_P_OFF]
+
+ /* turn on ddr power */
+ ldr r7, =BIT32(29)
+ str r7, [r1, #ANADIG_SNVS_MISC_CTRL_CLR]
+
+ ldr r6, =50
+ wait_delay
+
+ /* clear ddr_phy reset */
+ ldr r6, =0x1000
+ ldr r7, [r2, r6]
+ orr r7, r7, #0x3
+ str r7, [r2, r6]
+ ldr r7, [r2, r6]
+ bic r7, r7, #0x1
+ str r7, [r2, r6]
+13:
+ ldr r6, [r0, #PM_INFO_MX7_DDRC_REG_NUM_OFF]
+ ldr r7, =PM_INFO_MX7_DDRC_REG_OFF
+ add r7, r7, r0
+14:
+ ldr r8, [r7], #0x4
+ ldr r9, [r7], #0x4
+ str r9, [r3, r8]
+ subs r6, r6, #0x1
+ bne 14b
+ ldr r7, =0x20
+ str r7, [r3, #DDRC_PWRCTL]
+ ldr r7, =0x0
+ str r7, [r3, #DDRC_DFIMISC]
+
+ /* do PHY, clear ddr_phy reset */
+ ldr r6, =0x1000
+ ldr r7, [r2, r6]
+ bic r7, r7, #0x2
+ str r7, [r2, r6]
+
+ ldr r7, [r1, #ANADIG_DIGPROG]
+ and r7, r7, #0xff
+ cmp r7, #0x11
+ bne 12f
+
+ /*
+ * TKT262940:
+ * System hang when press RST for DDR PAD is
+ * in retention mode, fixed on TO1.1
+ */
+ ldr r7, [r11]
+ bic r7, r7, #BIT32(27)
+ str r7, [r11]
+ ldr r7, [r11]
+ bic r7, r7, #BIT32(29)
+ str r7, [r11]
+12:
+ ldr r7, =BIT32(30)
+ str r7, [r1, #ANADIG_SNVS_MISC_CTRL_SET]
+
+ /* need to delay ~5mS */
+ ldr r6, =0x100000
+ wait_delay
+
+ ldr r6, [r0, #PM_INFO_MX7_DDRC_PHY_REG_NUM_OFF]
+ ldr r7, =PM_INFO_MX7_DDRC_PHY_REG_OFF
+ add r7, r7, r0
+
+15:
+ ldr r8, [r7], #0x4
+ ldr r9, [r7], #0x4
+ str r9, [r4, r8]
+ subs r6, r6, #0x1
+ bne 15b
+
+ ldr r7, =0x0
+ add r9, r10, #0x4000
+ str r7, [r9, #0x130]
+
+ ldr r7, =0x170
+ orr r7, r7, #0x8
+ str r7, [r11, #0x20]
+
+ ldr r7, =0x2
+ add r9, r10, #0x4000
+ str r7, [r9, #0x130]
+
+ ldr r7, =0xf
+ str r7, [r4, #DDRPHY_LP_CON0]
+
+ /* wait until self-refresh mode entered */
+16:
+ ldr r7, [r3, #DDRC_STAT]
+ and r7, r7, #0x3
+ cmp r7, #0x3
+ bne 16b
+ ldr r7, =0x0
+ str r7, [r3, #DDRC_SWCTL]
+ ldr r7, =0x1
+ str r7, [r3, #DDRC_DFIMISC]
+ ldr r7, =0x1
+ str r7, [r3, #DDRC_SWCTL]
+17:
+ ldr r7, [r3, #DDRC_SWSTAT]
+ and r7, r7, #0x1
+ cmp r7, #0x1
+ bne 17b
+18:
+ ldr r7, [r3, #DDRC_STAT]
+ and r7, r7, #0x20
+ cmp r7, #0x20
+ bne 18b
+
+ /* let DDR out of self-refresh */
+ ldr r7, =0x0
+ str r7, [r3, #DDRC_PWRCTL]
+19:
+ ldr r7, [r3, #DDRC_STAT]
+ and r7, r7, #0x30
+ cmp r7, #0x0
+ bne 19b
+
+20:
+ ldr r7, [r3, #DDRC_STAT]
+ and r7, r7, #0x3
+ cmp r7, #0x1
+ bne 20b
+
+ /* enable port */
+ ldr r7, =0x1
+ str r7, [r3, #DDRC_PCTRL_0]
+
+ /* enable auto self-refresh */
+ ldr r7, [r3, #DDRC_PWRCTL]
+ orr r7, r7, #(1 << 0)
+ str r7, [r3, #DDRC_PWRCTL]
+
+ .endm
+
+FUNC imx7_suspend, :
+UNWIND( .fnstart)
+UNWIND( .cantunwind)
+ push {r4-r12}
+
+ /* make sure SNVS clk is enabled */
+ ldr r11, [r0, #PM_INFO_MX7_CCM_V_OFF]
+ add r11, r11, #0x4000
+ ldr r7, =0x3
+ str r7, [r11, #CCM_SNVS_LPCG]
+
+ /* check whether it is a standby mode */
+ ldr r11, [r0, #PM_INFO_MX7_GPC_V_OFF]
+ ldr r7, [r11, #GPC_PGC_C0]
+ cmp r7, #0
+ beq ddr_only_self_refresh
+
+ /*
+ * The value of r0 is mapped the same in origin table and IRAM table,
+ * thus no need to care r0 here.
+ */
+ ldr r1, [r0, #PM_INFO_MX7_PBASE_OFF]
+ ldr r4, [r0, #PM_INFO_MX7_SIZE_OFF]
+
+ /*
+ * counting the resume address in iram
+ * to set it in SRC register.
+ */
+ ldr r6, =imx7_suspend
+ ldr r7, =resume
+ sub r7, r7, r6
+ add r8, r1, r4
+ add r9, r8, r7
+
+ ldr r11, [r0, #PM_INFO_MX7_SRC_V_OFF]
+ /* store physical resume addr and pm_info address. */
+ str r9, [r11, #MX7_SRC_GPR1]
+ str r1, [r11, #MX7_SRC_GPR2]
+
+ disable_l1_dcache
+
+ store_ttbr
+
+ ldr r11, [r0, #PM_INFO_MX7_GPC_V_OFF]
+ ldr r7, [r11, #GPC_PGC_FM]
+ cmp r7, #0
+ beq ddr_only_self_refresh
+
+ ddr_enter_retention
+ /* enter LPSR mode if resume addr is valid */
+ ldr r11, [r0, #PM_INFO_MX7_LPSR_V_OFF]
+ ldr r7, [r11]
+ cmp r7, #0x0
+ beq ddr_retention_enter_out
+
+ /* disable STOP mode before entering LPSR */
+ ldr r11, [r0, #PM_INFO_MX7_GPC_V_OFF]
+ ldr r7, [r11]
+ bic r7, #0xf
+ str r7, [r11]
+
+ /* shut down vddsoc to enter lpsr mode */
+ ldr r11, [r0, #PM_INFO_MX7_SNVS_V_OFF]
+ ldr r7, [r11, #0x38]
+ orr r7, r7, #0x60
+ str r7, [r11, #0x38]
+ dsb
+wait_shutdown:
+ wfi
+ b wait_shutdown
+
+ddr_only_self_refresh:
+ ddrc_enter_self_refresh
+ b wfi
+ddr_retention_enter_out:
+ ldr r11, [r0, #PM_INFO_MX7_GIC_DIST_V_OFF]
+ ldr r7, =0x0
+ ldr r8, =0x1000
+ str r7, [r11, r8]
+
+ ldr r11, [r0, #PM_INFO_MX7_GPC_V_OFF]
+ ldr r4, [r11, #MX7D_GPC_IMR1]
+ ldr r5, [r11, #MX7D_GPC_IMR2]
+ ldr r6, [r11, #MX7D_GPC_IMR3]
+ ldr r7, [r11, #MX7D_GPC_IMR4]
+
+ ldr r8, =0xffffffff
+ str r8, [r11, #MX7D_GPC_IMR1]
+ str r8, [r11, #MX7D_GPC_IMR2]
+ str r8, [r11, #MX7D_GPC_IMR3]
+ str r8, [r11, #MX7D_GPC_IMR4]
+
+ /*
+ * enable the RBC bypass counter here
+ * to hold off the interrupts. RBC counter
+ * = 8 (240us). With this setting, the latency
+ * from wakeup interrupt to ARM power up
+ * is ~250uS.
+ */
+ ldr r8, [r11, #0x14]
+ bic r8, r8, #(0x3f << 24)
+ orr r8, r8, #(0x8 << 24)
+ str r8, [r11, #0x14]
+
+ /* enable the counter. */
+ ldr r8, [r11, #0x14]
+ orr r8, r8, #(0x1 << 30)
+ str r8, [r11, #0x14]
+
+ /* unmask all the GPC interrupts. */
+ str r4, [r11, #MX7D_GPC_IMR1]
+ str r5, [r11, #MX7D_GPC_IMR2]
+ str r6, [r11, #MX7D_GPC_IMR3]
+ str r7, [r11, #MX7D_GPC_IMR4]
+
+ /*
+ * now delay for a short while (3usec)
+ * ARM is at 1GHz at this point
+ * so a short loop should be enough.
+ * this delay is required to ensure that
+ * the RBC counter can start counting in
+ * case an interrupt is already pending
+ * or in case an interrupt arrives just
+ * as ARM is about to assert DSM_request.
+ */
+ ldr r7, =2000
+rbc_loop:
+ subs r7, r7, #0x1
+ bne rbc_loop
+wfi:
+ dsb
+ /* Enter stop mode */
+ wfi
+
+ mov r5, #0x0
+
+ ldr r11, [r0, #PM_INFO_MX7_GPC_V_OFF]
+ ldr r7, [r11, #GPC_PGC_FM]
+ cmp r7, #0
+ beq wfi_ddr_self_refresh_out
+
+ ddr_exit_retention
+ b wfi_ddr_retention_out
+wfi_ddr_self_refresh_out:
+ ddrc_exit_self_refresh
+wfi_ddr_retention_out:
+
+ /* check whether it is a standby mode */
+ ldr r11, [r0, #PM_INFO_MX7_GPC_V_OFF]
+ ldr r7, [r11, #GPC_PGC_C0]
+ cmp r7, #0
+ beq standby_out
+
+ ldr r11, [r0, #PM_INFO_MX7_GIC_DIST_V_OFF]
+ ldr r7, =0x1
+ ldr r8, =0x1000
+ str r7, [r11, r8]
+
+ restore_ttbr
+standby_out:
+ pop {r4-r12}
+ /* return to suspend finish */
+ bx lr
+
+resume:
+ write_iciallu
+ write_bpiall
+ dsb
+ isb
+
+ mov r6, #(SCTLR_I | SCTLR_Z)
+ write_sctlr r6
+ isb
+
+ /*
+ * After resume back, rom run in SVC mode,
+ * so we need to switch to monitor mode.
+ */
+ cps #CPSR_MODE_MON
+
+ /* get physical resume address from pm_info. */
+ ldr lr, [r0, #PM_INFO_MX7_RESUME_ADDR_OFF]
+ /* clear core0's entry and parameter */
+ ldr r11, [r0, #PM_INFO_MX7_SRC_P_OFF]
+ mov r7, #0x0
+ str r7, [r11, #MX7_SRC_GPR1]
+ str r7, [r11, #MX7_SRC_GPR2]
+
+ mov r5, #0x1
+
+ ldr r11, [r0, #PM_INFO_MX7_GPC_P_OFF]
+ ldr r7, [r11, #GPC_PGC_FM]
+ cmp r7, #0
+ beq dsm_ddr_self_refresh_out
+
+ ddr_exit_retention
+ b dsm_ddr_retention_out
+dsm_ddr_self_refresh_out:
+ ddrc_exit_self_refresh
+dsm_ddr_retention_out:
+
+ bx lr
+UNWIND( .fnend)
+END_FUNC imx7_suspend
+
+FUNC ca7_cpu_resume, :
+UNWIND( .fnstart)
+UNWIND( .cantunwind)
+ mov r0, #0 @ ; write the cache size selection register to be
+ write_csselr r0 @ ; sure we address the data cache
+ isb @ ; isb to sync the change to the cachesizeid reg
+
+_inv_dcache_off:
+ mov r0, #0 @ ; set way number to 0
+_inv_nextway:
+ mov r1, #0 @ ; set line number (=index) to 0
+_inv_nextline:
+ orr r2, r0, r1 @ ; construct way/index value
+ write_dcisw r2 @ ; invalidate data or unified cache line by set/way
+ add r1, r1, #1 << LINE_FIELD_OFFSET @ ; increment the index
+ cmp r1, #1 << LINE_FIELD_OVERFLOW @ ; overflow out of set field?
+ bne _inv_nextline
+ add r0, r0, #1 << WAY_FIELD_OFFSET @ ; increment the way number
+ cmp r0, #0 @ ; overflow out of way field?
+ bne _inv_nextway
+
+ dsb @ ; synchronise
+ isb
+
+ /*
+ * No stack, scratch r0-r3
+ * TODO: Need to use specific configure, but not plat_xxx.
+ * Because plat_xx maybe changed in future, we can not rely on it.
+ * Need handle sp carefully.
+ */
+ blx plat_cpu_reset_early
+
+ b sm_pm_cpu_resume
+UNWIND( .fnend)
+END_FUNC ca7_cpu_resume
diff --git a/core/arch/arm/plat-imx/pm/psci.c b/core/arch/arm/plat-imx/pm/psci.c
index 220da0f7..a7de449e 100644
--- a/core/arch/arm/plat-imx/pm/psci.c
+++ b/core/arch/arm/plat-imx/pm/psci.c
@@ -30,6 +30,7 @@
#include <drivers/imx_uart.h>
#include <io.h>
#include <imx.h>
+#include <imx_pm.h>
#include <imx-regs.h>
#include <kernel/generic_boot.h>
#include <kernel/misc.h>
@@ -84,6 +85,8 @@ int psci_cpu_on(uint32_t core_idx, uint32_t entry,
val |= BIT32(SRC_SCR_CORE1_RST_OFFSET + (core_idx - 1));
write32(val, va + SRC_SCR);
+ imx_set_src_gpr(core_idx, 0);
+
return PSCI_RET_SUCCESS;
}
@@ -155,3 +158,50 @@ int psci_affinity_info(uint32_t affinity,
return PSCI_AFFINITY_LEVEL_OFF;
}
#endif
+
+__weak int imx7_cpu_suspend(uint32_t power_state __unused,
+ uintptr_t entry __unused,
+ uint32_t context_id __unused,
+ struct sm_nsec_ctx *nsec __unused)
+{
+ return 0;
+}
+
+int psci_cpu_suspend(uint32_t power_state,
+ uintptr_t entry, uint32_t context_id __unused,
+ struct sm_nsec_ctx *nsec)
+{
+ uint32_t id, type;
+ int ret = PSCI_RET_INVALID_PARAMETERS;
+
+ id = power_state & PSCI_POWER_STATE_ID_MASK;
+ type = (power_state & PSCI_POWER_STATE_TYPE_MASK) >>
+ PSCI_POWER_STATE_TYPE_SHIFT;
+
+ if ((type != PSCI_POWER_STATE_TYPE_POWER_DOWN) &&
+ (type != PSCI_POWER_STATE_TYPE_STANDBY)) {
+ DMSG("Not supported %x\n", type);
+ return ret;
+ }
+
+ /*
+ * ID 0 means suspend
+ * ID 1 means low power idle
+ * TODO: follow PSCI StateID sample encoding.
+ */
+ DMSG("ID = %d\n", id);
+ if (id == 1) {
+ /* Not supported now */
+ return ret;
+ } else if (id == 0) {
+ if (soc_is_imx7ds()) {
+ return imx7_cpu_suspend(power_state, entry,
+ context_id, nsec);
+ }
+ return ret;
+ }
+
+ DMSG("ID %d not supported\n", id);
+
+ return ret;
+}
diff --git a/core/arch/arm/plat-imx/sub.mk b/core/arch/arm/plat-imx/sub.mk
index 17fda6ad..a90bb622 100644
--- a/core/arch/arm/plat-imx/sub.mk
+++ b/core/arch/arm/plat-imx/sub.mk
@@ -4,7 +4,11 @@ srcs-y += main.c imx-common.c
srcs-$(CFG_MX6)$(CFG_MX7) += mmdc.c
srcs-$(CFG_PL310) += imx_pl310.c
-srcs-$(CFG_PSCI_ARM32) += pm/psci.c pm/gpcv2.c
+ifeq ($(CFG_PSCI_ARM32),y)
+srcs-y += pm/psci.c pm/gpcv2.c
+srcs-$(CFG_MX7) += pm/pm-imx7.c pm/psci-suspend-imx7.S pm/imx7_suspend.c
+endif
+
cflags-pm/psci.c-y += -Wno-suggest-attribute=noreturn
ifneq (,$(filter y, $(CFG_MX6Q) $(CFG_MX6D) $(CFG_MX6DL) $(CFG_MX6S)))