From 20c5ea4fc131dc45c2639653b5b7aeeb2d4d0d1e Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Fri, 4 Mar 2016 10:05:39 +0100 Subject: ARM: reintroduce ioremap_cached() for creating cached I/O mappings The original ARM-only ioremap flavor 'ioremap_cached' has been renamed to 'ioremap_cache' to align with other architectures, and subsequently abused in generic code to map things like firmware tables in memory. For that reason, there is currently an effort underway to deprecate ioremap_cache, whose semantics are poorly defined, and which is typed with an __iomem annotation that is inappropriate for mappings of ordinary memory. However, original users of ioremap_cached() used it in a context where the I/O connotation is appropriate, and replacing those instances with memremap() does not make sense. So let's revive ioremap_cached(), so that we can change back those original users before we drop ioremap_cache entirely in favor of memremap. Cc: Russell King Acked-by: Dan Williams Signed-off-by: Ard Biesheuvel --- arch/arm/mm/ioremap.c | 4 ++++ arch/arm/mm/nommu.c | 4 ++++ 2 files changed, 8 insertions(+) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c index 66a978d05958..d5350f6af089 100644 --- a/arch/arm/mm/ioremap.c +++ b/arch/arm/mm/ioremap.c @@ -380,11 +380,15 @@ void __iomem *ioremap(resource_size_t res_cookie, size_t size) EXPORT_SYMBOL(ioremap); void __iomem *ioremap_cache(resource_size_t res_cookie, size_t size) + __alias(ioremap_cached); + +void __iomem *ioremap_cached(resource_size_t res_cookie, size_t size) { return arch_ioremap_caller(res_cookie, size, MT_DEVICE_CACHED, __builtin_return_address(0)); } EXPORT_SYMBOL(ioremap_cache); +EXPORT_SYMBOL(ioremap_cached); void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size) { diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c index 1dd10936d68d..f24967e8ff7f 100644 --- a/arch/arm/mm/nommu.c +++ b/arch/arm/mm/nommu.c @@ -367,11 +367,15 @@ void __iomem *ioremap(resource_size_t res_cookie, size_t size) EXPORT_SYMBOL(ioremap); void __iomem *ioremap_cache(resource_size_t res_cookie, size_t size) + __alias(ioremap_cached); + +void __iomem *ioremap_cached(resource_size_t res_cookie, size_t size) { return __arm_ioremap_caller(res_cookie, size, MT_DEVICE_CACHED, __builtin_return_address(0)); } EXPORT_SYMBOL(ioremap_cache); +EXPORT_SYMBOL(ioremap_cached); void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size) { -- cgit v1.2.3 From 9ab9e4fce45379cb6a7dbf87cf8f8e6ba01853c2 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Mon, 22 Feb 2016 15:02:08 +0100 Subject: ARM: memremap: implement arch_memremap_wb() The generic memremap() falls back to using ioremap_cache() to create MEMREMAP_WB mappings if the requested region is not already covered by the linear mapping, unless the architecture provides an implementation of arch_memremap_wb(). Since ioremap_cache() is not appropriate on ARM to map memory with the same attributes used for the linear mapping, implement arch_memremap_wb() which does exactly that. Also, relax the WARN() check to allow MT_MEMORY_RW mappings of pfn_valid() pages. Cc: Russell King Acked-by: Dan Williams Signed-off-by: Ard Biesheuvel --- arch/arm/mm/ioremap.c | 12 ++++++++++-- arch/arm/mm/nommu.c | 5 +++++ 2 files changed, 15 insertions(+), 2 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c index d5350f6af089..ff0eed23ddf1 100644 --- a/arch/arm/mm/ioremap.c +++ b/arch/arm/mm/ioremap.c @@ -297,9 +297,10 @@ static void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn, } /* - * Don't allow RAM to be mapped - this causes problems with ARMv6+ + * Don't allow RAM to be mapped with mismatched attributes - this + * causes problems with ARMv6+ */ - if (WARN_ON(pfn_valid(pfn))) + if (WARN_ON(pfn_valid(pfn) && mtype != MT_MEMORY_RW)) return NULL; area = get_vm_area_caller(size, VM_IOREMAP, caller); @@ -418,6 +419,13 @@ __arm_ioremap_exec(phys_addr_t phys_addr, size_t size, bool cached) __builtin_return_address(0)); } +void *arch_memremap_wb(phys_addr_t phys_addr, size_t size) +{ + return (__force void *)arch_ioremap_caller(phys_addr, size, + MT_MEMORY_RW, + __builtin_return_address(0)); +} + void __iounmap(volatile void __iomem *io_addr) { void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr); diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c index f24967e8ff7f..ca6d8e8ca1f8 100644 --- a/arch/arm/mm/nommu.c +++ b/arch/arm/mm/nommu.c @@ -384,6 +384,11 @@ void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size) } EXPORT_SYMBOL(ioremap_wc); +void *arch_memremap_wb(phys_addr_t phys_addr, size_t size) +{ + return (void *)phys_addr; +} + void __iounmap(volatile void __iomem *addr) { } -- cgit v1.2.3 From b67dd2e9bdc0f79a3389e57c51189ed2c2127c00 Mon Sep 17 00:00:00 2001 From: Alexandre Courbot Date: Mon, 7 Mar 2016 03:35:56 +0100 Subject: ARM: 8548/1: dma-mapping: remove arm_dma_set_mask() arm_dma_set_mask() implements exactly the same behavior as the fallback that dma_set_mask() takes if the set_dma_mask op is not set. Remove it and use that fallback instead like what is already done for dma_get_mask(). Signed-off-by: Alexandre Courbot Signed-off-by: Russell King --- arch/arm/mm/dma-mapping.c | 16 ---------------- 1 file changed, 16 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index deac58d5f1f7..bad0c6a4bb7b 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -190,7 +190,6 @@ struct dma_map_ops arm_dma_ops = { .sync_single_for_device = arm_dma_sync_single_for_device, .sync_sg_for_cpu = arm_dma_sync_sg_for_cpu, .sync_sg_for_device = arm_dma_sync_sg_for_device, - .set_dma_mask = arm_dma_set_mask, }; EXPORT_SYMBOL(arm_dma_ops); @@ -209,7 +208,6 @@ struct dma_map_ops arm_coherent_dma_ops = { .get_sgtable = arm_dma_get_sgtable, .map_page = arm_coherent_dma_map_page, .map_sg = arm_dma_map_sg, - .set_dma_mask = arm_dma_set_mask, }; EXPORT_SYMBOL(arm_coherent_dma_ops); @@ -1142,16 +1140,6 @@ int dma_supported(struct device *dev, u64 mask) } EXPORT_SYMBOL(dma_supported); -int arm_dma_set_mask(struct device *dev, u64 dma_mask) -{ - if (!dev->dma_mask || !dma_supported(dev, dma_mask)) - return -EIO; - - *dev->dma_mask = dma_mask; - - return 0; -} - #define PREALLOC_DMA_DEBUG_ENTRIES 4096 static int __init dma_debug_do_init(void) @@ -2005,8 +1993,6 @@ struct dma_map_ops iommu_ops = { .unmap_sg = arm_iommu_unmap_sg, .sync_sg_for_cpu = arm_iommu_sync_sg_for_cpu, .sync_sg_for_device = arm_iommu_sync_sg_for_device, - - .set_dma_mask = arm_dma_set_mask, }; struct dma_map_ops iommu_coherent_ops = { @@ -2020,8 +2006,6 @@ struct dma_map_ops iommu_coherent_ops = { .map_sg = arm_coherent_iommu_map_sg, .unmap_sg = arm_coherent_iommu_unmap_sg, - - .set_dma_mask = arm_dma_set_mask, }; /** -- cgit v1.2.3 From 981b6714dbd26609212536b9fed43e49db1459cf Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 15 Mar 2016 14:55:03 +0000 Subject: ARM: provide improved virt_to_idmap() functionality For kexec, we need more functionality from the IDMAP system. We need to be able to convert physical addresses to their identity mappped versions as well as virtual addresses. Convert the existing arch_virt_to_idmap() to deal with physical addresses instead. Acked-by: Santosh Shilimkar Signed-off-by: Russell King --- arch/arm/mm/idmap.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/idmap.c b/arch/arm/mm/idmap.c index bd274a05b8ff..c1a48f88764e 100644 --- a/arch/arm/mm/idmap.c +++ b/arch/arm/mm/idmap.c @@ -15,7 +15,7 @@ * page tables. */ pgd_t *idmap_pgd; -unsigned long (*arch_virt_to_idmap)(unsigned long x); +long long arch_phys_to_idmap_offset; #ifdef CONFIG_ARM_LPAE static void idmap_add_pmd(pud_t *pud, unsigned long addr, unsigned long end, -- cgit v1.2.3 From 204932dfc87c236fc4fea8e6e4f03123853ea633 Mon Sep 17 00:00:00 2001 From: Brad Mouring Date: Thu, 28 Apr 2016 17:00:52 +0100 Subject: ARM: 8569/1: pl2x0: Add OF control of cache power management Add ability to override power management bits of 310 controllers (dynamic clock gating and standby mode) through OF entries. As the saved register is only applied when working on a supported controller, it is safe to save the settings. In order to maintain existing behavior, if the settings are not found in the DT, the corresponding feature will be enabled. Signed-off-by: Brad Mouring Signed-off-by: Russell King --- arch/arm/mm/cache-l2x0.c | 26 +++++++++++++++++++++----- 1 file changed, 21 insertions(+), 5 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c index 9f9d54271aad..c61996c256cc 100644 --- a/arch/arm/mm/cache-l2x0.c +++ b/arch/arm/mm/cache-l2x0.c @@ -647,11 +647,6 @@ static void __init l2c310_enable(void __iomem *base, unsigned num_lock) aux &= ~(L310_AUX_CTRL_FULL_LINE_ZERO | L310_AUX_CTRL_EARLY_BRESP); } - /* r3p0 or later has power control register */ - if (rev >= L310_CACHE_ID_RTL_R3P0) - l2x0_saved_regs.pwr_ctrl = L310_DYNAMIC_CLK_GATING_EN | - L310_STNDBY_MODE_EN; - /* * Always enable non-secure access to the lockdown registers - * we write to them as part of the L2C enable sequence so they @@ -1141,6 +1136,7 @@ static void __init l2c310_of_parse(const struct device_node *np, u32 filter[2] = { 0, 0 }; u32 assoc; u32 prefetch; + u32 power; u32 val; int ret; @@ -1271,6 +1267,26 @@ static void __init l2c310_of_parse(const struct device_node *np, } l2x0_saved_regs.prefetch_ctrl = prefetch; + + power = l2x0_saved_regs.pwr_ctrl | + L310_DYNAMIC_CLK_GATING_EN | L310_STNDBY_MODE_EN; + + ret = of_property_read_u32(np, "arm,dynamic-clock-gating", &val); + if (!ret) { + if (!val) + power &= ~L310_DYNAMIC_CLK_GATING_EN; + } else if (ret != -EINVAL) { + pr_err("L2C-310 OF dynamic-clock-gating property value is missing or invalid\n"); + } + ret = of_property_read_u32(np, "arm,standby-mode", &val); + if (!ret) { + if (!val) + power &= ~L310_STNDBY_MODE_EN; + } else if (ret != -EINVAL) { + pr_err("L2C-310 OF standby-mode property value is missing or invalid\n"); + } + + l2x0_saved_regs.pwr_ctrl = power; } static const struct l2c_init_data of_l2c310_data __initconst = { -- cgit v1.2.3 From 6427a840ff6aeaac36c59872b0b4b2040ed26c9b Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Tue, 26 Apr 2016 09:11:13 +0100 Subject: ARM: 8567/1: cache-uniphier: activate ways for secondary CPUs This outer cache allows to control active ways independently for each CPU, but currently nothing is done for secondary CPUs. In other words, all the ways are locked for secondary CPUs by default. This commit fixes it to fully bring out the performance of this outer cache. There would be two possible ways to achieve this: [1] Each CPU initializes active ways for itself. This can be done via the SSCLPDAWCR register. This is a banked register, so each CPU sees a different instance of the register for its own. [2] The master CPU initializes active ways for all the CPUs. This is available via SSCDAWCARMR(N) registers, where all instances of SSCLPDAWCR are mirrored. They are mapped at the address SSCDAWCARMR + 4 * N, where N is the CPU number. The outer cache frame work does not support a per-CPU init callback. So this commit adopts [2]; the master CPU iterates over possible CPUs setting up SSCDAWCARMR(N) registers. Signed-off-by: Masahiro Yamada Signed-off-by: Russell King --- arch/arm/mm/cache-uniphier.c | 26 ++++++++++++++++++++++++-- 1 file changed, 24 insertions(+), 2 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/cache-uniphier.c b/arch/arm/mm/cache-uniphier.c index a6fa7b73fbe0..c8e2f4947223 100644 --- a/arch/arm/mm/cache-uniphier.c +++ b/arch/arm/mm/cache-uniphier.c @@ -96,6 +96,7 @@ struct uniphier_cache_data { void __iomem *ctrl_base; void __iomem *rev_base; void __iomem *op_base; + void __iomem *way_ctrl_base; u32 way_present_mask; u32 way_locked_mask; u32 nsets; @@ -256,10 +257,13 @@ static void __init __uniphier_cache_set_locked_ways( struct uniphier_cache_data *data, u32 way_mask) { + unsigned int cpu; + data->way_locked_mask = way_mask & data->way_present_mask; - writel_relaxed(~data->way_locked_mask & data->way_present_mask, - data->ctrl_base + UNIPHIER_SSCLPDAWCR); + for_each_possible_cpu(cpu) + writel_relaxed(~data->way_locked_mask & data->way_present_mask, + data->way_ctrl_base + 4 * cpu); } static void uniphier_cache_maint_range(unsigned long start, unsigned long end, @@ -459,6 +463,8 @@ static int __init __uniphier_cache_init(struct device_node *np, goto err; } + data->way_ctrl_base = data->ctrl_base + 0xc00; + if (*cache_level == 2) { u32 revision = readl(data->rev_base + UNIPHIER_SSCID); /* @@ -467,6 +473,22 @@ static int __init __uniphier_cache_init(struct device_node *np, */ if (revision <= 0x16) data->range_op_max_size = (u32)1 << 22; + + /* + * Unfortunatly, the offset address of active way control base + * varies from SoC to SoC. + */ + switch (revision) { + case 0x11: /* sLD3 */ + data->way_ctrl_base = data->ctrl_base + 0x870; + break; + case 0x12: /* LD4 */ + case 0x16: /* sld8 */ + data->way_ctrl_base = data->ctrl_base + 0x840; + break; + default: + break; + } } data->range_op_max_size -= data->line_size; -- cgit v1.2.3