summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-05-20 10:01:38 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2016-05-20 10:01:38 -0700
commita1c28b75a95808161cacbb3531c418abe248994e (patch)
treed481754b548989457710cd07bdd66df3dedd54f9
parenta05a70db34ba24ca009e1c9cedaef26fd17d5470 (diff)
parent5632a9fbcd451892332d45553ce8b831d5143691 (diff)
Merge branch 'for-linus' of git://git.armlinux.org.uk/~rmk/linux-arm
Pull ARM updates from Russell King: "Changes included in this pull request: - revert pxa2xx-flash back to using ioremap_cached() and switch memremap() to use arch_memremap_wb() - remove pci=firmware command line argument handling - remove unnecessary arm_dma_set_mask() implementation, the generic implementation will do for ARM - removal of the ARM kallsyms "hack" to work around mode switching veneers and vectors located below PAGE_OFFSET - tidy up build system output a little - add L2 cache power management DT bindings - remove duplicated local_irq_disable() in reboot paths - handle AMBA primecell devices better at registration time with PM domains (needed for Samsung SoCs) - ARM specific preparation to support Keystone II kexec" * 'for-linus' of git://git.armlinux.org.uk/~rmk/linux-arm: ARM: 8567/1: cache-uniphier: activate ways for secondary CPUs ARM: 8570/2: Documentation: devicetree: Add PL310 PM bindings ARM: 8569/1: pl2x0: Add OF control of cache power management ARM: 8568/1: reboot: remove duplicated local_irq_disable() ARM: 8566/1: drivers: amba: properly handle devices with power domains ARM: provide arm_has_idmap_alias() helper ARM: kexec: remove 512MB restriction on kexec crashdump ARM: provide improved virt_to_idmap() functionality ARM: kexec: fix crashkernel= handling ARM: 8557/1: specify install, zinstall, and uinstall as PHONY targets ARM: 8562/1: suppress "include/generated/mach-types.h is up to date." ARM: 8553/1: kallsyms: remove --page-offset command line option ARM: 8552/1: kallsyms: remove special lower address limit for CONFIG_ARM ARM: 8555/1: kallsyms: ignore ARM mode switching veneers ARM: 8548/1: dma-mapping: remove arm_dma_set_mask() ARM: 8554/1: kernel: pci: remove pci=firmware command line parameter handling ARM: memremap: implement arch_memremap_wb() memremap: add arch specific hook for MEMREMAP_WB mappings mtd: pxa2xx-flash: switch back from memremap to ioremap_cached ARM: reintroduce ioremap_cached() for creating cached I/O mappings
-rw-r--r--Documentation/devicetree/bindings/arm/l2c2x0.txt6
-rw-r--r--Documentation/kdump/kdump.txt13
-rw-r--r--Documentation/kernel-parameters.txt5
-rw-r--r--arch/arm/boot/Makefile2
-rw-r--r--arch/arm/include/asm/dma-mapping.h2
-rw-r--r--arch/arm/include/asm/io.h12
-rw-r--r--arch/arm/include/asm/memory.h38
-rw-r--r--arch/arm/kernel/bios32.c3
-rw-r--r--arch/arm/kernel/reboot.c3
-rw-r--r--arch/arm/kernel/setup.c26
-rw-r--r--arch/arm/mach-keystone/keystone.c7
-rw-r--r--arch/arm/mm/cache-l2x0.c26
-rw-r--r--arch/arm/mm/cache-uniphier.c26
-rw-r--r--arch/arm/mm/dma-mapping.c16
-rw-r--r--arch/arm/mm/idmap.c2
-rw-r--r--arch/arm/mm/ioremap.c16
-rw-r--r--arch/arm/mm/nommu.c9
-rw-r--r--arch/arm/tools/Makefile11
-rw-r--r--drivers/amba/bus.c100
-rw-r--r--drivers/mtd/maps/pxa2xx-flash.c6
-rw-r--r--kernel/memremap.c11
-rw-r--r--scripts/kallsyms.c10
-rwxr-xr-xscripts/link-vmlinux.sh4
23 files changed, 260 insertions, 94 deletions
diff --git a/Documentation/devicetree/bindings/arm/l2c2x0.txt b/Documentation/devicetree/bindings/arm/l2c2x0.txt
index fe0398c5c77b..c453ab5553cd 100644
--- a/Documentation/devicetree/bindings/arm/l2c2x0.txt
+++ b/Documentation/devicetree/bindings/arm/l2c2x0.txt
@@ -84,6 +84,12 @@ Optional properties:
- prefetch-instr : Instruction prefetch. Value: <0> (forcibly disable),
<1> (forcibly enable), property absent (retain settings set by
firmware)
+- arm,dynamic-clock-gating : L2 dynamic clock gating. Value: <0> (forcibly
+ disable), <1> (forcibly enable), property absent (OS specific behavior,
+ preferrably retain firmware settings)
+- arm,standby-mode: L2 standby mode enable. Value <0> (forcibly disable),
+ <1> (forcibly enable), property absent (OS specific behavior,
+ preferrably retain firmware settings)
Example:
diff --git a/Documentation/kdump/kdump.txt b/Documentation/kdump/kdump.txt
index bc4bd5a44b88..88ff63d5fde3 100644
--- a/Documentation/kdump/kdump.txt
+++ b/Documentation/kdump/kdump.txt
@@ -263,12 +263,6 @@ The syntax is:
crashkernel=<range1>:<size1>[,<range2>:<size2>,...][@offset]
range=start-[end]
-Please note, on arm, the offset is required.
- crashkernel=<range1>:<size1>[,<range2>:<size2>,...]@offset
- range=start-[end]
-
- 'start' is inclusive and 'end' is exclusive.
-
For example:
crashkernel=512M-2G:64M,2G-:128M
@@ -307,10 +301,9 @@ Boot into System Kernel
on the memory consumption of the kdump system. In general this is not
dependent on the memory size of the production system.
- On arm, use "crashkernel=Y@X". Note that the start address of the kernel
- will be aligned to 128MiB (0x08000000), so if the start address is not then
- any space below the alignment point may be overwritten by the dump-capture kernel,
- which means it is possible that the vmcore is not that precise as expected.
+ On arm, the use of "crashkernel=Y@X" is no longer necessary; the
+ kernel will automatically locate the crash kernel image within the
+ first 512MB of RAM if X is not given.
Load the Dump-capture Kernel
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 18d7f5bea077..4e76a349b6f8 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -2959,11 +2959,6 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
for broken drivers that don't call it.
skip_isa_align [X86] do not align io start addr, so can
handle more pci cards
- firmware [ARM] Do not re-enumerate the bus but instead
- just use the configuration from the
- bootloader. This is currently used on
- IXP2000 systems where the bus has to be
- configured a certain way for adjunct CPUs.
noearly [X86] Don't do any early type 1 scanning.
This might help on some broken boards which
machine check when some devices' config space
diff --git a/arch/arm/boot/Makefile b/arch/arm/boot/Makefile
index 48fab15cfc02..446705a4325a 100644
--- a/arch/arm/boot/Makefile
+++ b/arch/arm/boot/Makefile
@@ -88,7 +88,7 @@ $(obj)/bootpImage: $(obj)/bootp/bootp FORCE
$(call if_changed,objcopy)
@$(kecho) ' Kernel: $@ is ready'
-PHONY += initrd
+PHONY += initrd install zinstall uinstall
initrd:
@test "$(INITRD_PHYS)" != "" || \
(echo This machine does not support INITRD; exit -1)
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h
index 02283eb2f5b2..a83570f10124 100644
--- a/arch/arm/include/asm/dma-mapping.h
+++ b/arch/arm/include/asm/dma-mapping.h
@@ -162,8 +162,6 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
static inline void dma_mark_clean(void *addr, size_t size) { }
-extern int arm_dma_set_mask(struct device *dev, u64 dma_mask);
-
/**
* arm_dma_alloc - allocate consistent memory for DMA
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h
index 485982084fe9..781ef5fe235d 100644
--- a/arch/arm/include/asm/io.h
+++ b/arch/arm/include/asm/io.h
@@ -392,9 +392,18 @@ void __iomem *ioremap(resource_size_t res_cookie, size_t size);
#define ioremap ioremap
#define ioremap_nocache ioremap
+/*
+ * Do not use ioremap_cache for mapping memory. Use memremap instead.
+ */
void __iomem *ioremap_cache(resource_size_t res_cookie, size_t size);
#define ioremap_cache ioremap_cache
+/*
+ * Do not use ioremap_cached in new code. Provided for the benefit of
+ * the pxa2xx-flash MTD driver only.
+ */
+void __iomem *ioremap_cached(resource_size_t res_cookie, size_t size);
+
void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size);
#define ioremap_wc ioremap_wc
#define ioremap_wt ioremap_wc
@@ -402,6 +411,9 @@ void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size);
void iounmap(volatile void __iomem *iomem_cookie);
#define iounmap iounmap
+void *arch_memremap_wb(phys_addr_t phys_addr, size_t size);
+#define arch_memremap_wb arch_memremap_wb
+
/*
* io{read,write}{16,32}be() macros
*/
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h
index 9427fd632552..31c07a2cc100 100644
--- a/arch/arm/include/asm/memory.h
+++ b/arch/arm/include/asm/memory.h
@@ -288,19 +288,43 @@ static inline void *phys_to_virt(phys_addr_t x)
#define __va(x) ((void *)__phys_to_virt((phys_addr_t)(x)))
#define pfn_to_kaddr(pfn) __va((phys_addr_t)(pfn) << PAGE_SHIFT)
-extern unsigned long (*arch_virt_to_idmap)(unsigned long x);
+extern long long arch_phys_to_idmap_offset;
/*
- * These are for systems that have a hardware interconnect supported alias of
- * physical memory for idmap purposes. Most cases should leave these
+ * These are for systems that have a hardware interconnect supported alias
+ * of physical memory for idmap purposes. Most cases should leave these
* untouched. Note: this can only return addresses less than 4GiB.
*/
+static inline bool arm_has_idmap_alias(void)
+{
+ return IS_ENABLED(CONFIG_MMU) && arch_phys_to_idmap_offset != 0;
+}
+
+#define IDMAP_INVALID_ADDR ((u32)~0)
+
+static inline unsigned long phys_to_idmap(phys_addr_t addr)
+{
+ if (IS_ENABLED(CONFIG_MMU) && arch_phys_to_idmap_offset) {
+ addr += arch_phys_to_idmap_offset;
+ if (addr > (u32)~0)
+ addr = IDMAP_INVALID_ADDR;
+ }
+ return addr;
+}
+
+static inline phys_addr_t idmap_to_phys(unsigned long idmap)
+{
+ phys_addr_t addr = idmap;
+
+ if (IS_ENABLED(CONFIG_MMU) && arch_phys_to_idmap_offset)
+ addr -= arch_phys_to_idmap_offset;
+
+ return addr;
+}
+
static inline unsigned long __virt_to_idmap(unsigned long x)
{
- if (IS_ENABLED(CONFIG_MMU) && arch_virt_to_idmap)
- return arch_virt_to_idmap(x);
- else
- return __virt_to_phys(x);
+ return phys_to_idmap(__virt_to_phys(x));
}
#define virt_to_idmap(x) __virt_to_idmap((unsigned long)(x))
diff --git a/arch/arm/kernel/bios32.c b/arch/arm/kernel/bios32.c
index 066f7f9ba411..05e61a2eeabe 100644
--- a/arch/arm/kernel/bios32.c
+++ b/arch/arm/kernel/bios32.c
@@ -550,9 +550,6 @@ char * __init pcibios_setup(char *str)
if (!strcmp(str, "debug")) {
debug_pci = 1;
return NULL;
- } else if (!strcmp(str, "firmware")) {
- pci_add_flags(PCI_PROBE_ONLY);
- return NULL;
}
return str;
}
diff --git a/arch/arm/kernel/reboot.c b/arch/arm/kernel/reboot.c
index 71a2ff9ec490..3fa867a2aae6 100644
--- a/arch/arm/kernel/reboot.c
+++ b/arch/arm/kernel/reboot.c
@@ -104,8 +104,6 @@ void machine_halt(void)
{
local_irq_disable();
smp_send_stop();
-
- local_irq_disable();
while (1);
}
@@ -150,6 +148,5 @@ void machine_restart(char *cmd)
/* Whoops - the platform was unable to reboot. Tell the user! */
printk("Reboot failed -- System halted\n");
- local_irq_disable();
while (1);
}
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 7d4e2850910c..7b5350060612 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -941,6 +941,12 @@ static int __init init_machine_late(void)
late_initcall(init_machine_late);
#ifdef CONFIG_KEXEC
+/*
+ * The crash region must be aligned to 128MB to avoid
+ * zImage relocating below the reserved region.
+ */
+#define CRASH_ALIGN (128 << 20)
+
static inline unsigned long long get_total_mem(void)
{
unsigned long total;
@@ -968,6 +974,26 @@ static void __init reserve_crashkernel(void)
if (ret)
return;
+ if (crash_base <= 0) {
+ unsigned long long crash_max = idmap_to_phys((u32)~0);
+ crash_base = memblock_find_in_range(CRASH_ALIGN, crash_max,
+ crash_size, CRASH_ALIGN);
+ if (!crash_base) {
+ pr_err("crashkernel reservation failed - No suitable area found.\n");
+ return;
+ }
+ } else {
+ unsigned long long start;
+
+ start = memblock_find_in_range(crash_base,
+ crash_base + crash_size,
+ crash_size, SECTION_SIZE);
+ if (start != crash_base) {
+ pr_err("crashkernel reservation failed - memory is in use.\n");
+ return;
+ }
+ }
+
ret = memblock_reserve(crash_base, crash_size);
if (ret < 0) {
pr_warn("crashkernel reservation failed - memory is in use (0x%lx)\n",
diff --git a/arch/arm/mach-keystone/keystone.c b/arch/arm/mach-keystone/keystone.c
index e6b9cb1e6709..a33a296b00dc 100644
--- a/arch/arm/mach-keystone/keystone.c
+++ b/arch/arm/mach-keystone/keystone.c
@@ -63,11 +63,6 @@ static void __init keystone_init(void)
of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
}
-static unsigned long keystone_virt_to_idmap(unsigned long x)
-{
- return (phys_addr_t)(x) - CONFIG_PAGE_OFFSET + KEYSTONE_LOW_PHYS_START;
-}
-
static long long __init keystone_pv_fixup(void)
{
long long offset;
@@ -91,7 +86,7 @@ static long long __init keystone_pv_fixup(void)
offset = KEYSTONE_HIGH_PHYS_START - KEYSTONE_LOW_PHYS_START;
/* Populate the arch idmap hook */
- arch_virt_to_idmap = keystone_virt_to_idmap;
+ arch_phys_to_idmap_offset = -offset;
return offset;
}
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index 9f9d54271aad..c61996c256cc 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -647,11 +647,6 @@ static void __init l2c310_enable(void __iomem *base, unsigned num_lock)
aux &= ~(L310_AUX_CTRL_FULL_LINE_ZERO | L310_AUX_CTRL_EARLY_BRESP);
}
- /* r3p0 or later has power control register */
- if (rev >= L310_CACHE_ID_RTL_R3P0)
- l2x0_saved_regs.pwr_ctrl = L310_DYNAMIC_CLK_GATING_EN |
- L310_STNDBY_MODE_EN;
-
/*
* Always enable non-secure access to the lockdown registers -
* we write to them as part of the L2C enable sequence so they
@@ -1141,6 +1136,7 @@ static void __init l2c310_of_parse(const struct device_node *np,
u32 filter[2] = { 0, 0 };
u32 assoc;
u32 prefetch;
+ u32 power;
u32 val;
int ret;
@@ -1271,6 +1267,26 @@ static void __init l2c310_of_parse(const struct device_node *np,
}
l2x0_saved_regs.prefetch_ctrl = prefetch;
+
+ power = l2x0_saved_regs.pwr_ctrl |
+ L310_DYNAMIC_CLK_GATING_EN | L310_STNDBY_MODE_EN;
+
+ ret = of_property_read_u32(np, "arm,dynamic-clock-gating", &val);
+ if (!ret) {
+ if (!val)
+ power &= ~L310_DYNAMIC_CLK_GATING_EN;
+ } else if (ret != -EINVAL) {
+ pr_err("L2C-310 OF dynamic-clock-gating property value is missing or invalid\n");
+ }
+ ret = of_property_read_u32(np, "arm,standby-mode", &val);
+ if (!ret) {
+ if (!val)
+ power &= ~L310_STNDBY_MODE_EN;
+ } else if (ret != -EINVAL) {
+ pr_err("L2C-310 OF standby-mode property value is missing or invalid\n");
+ }
+
+ l2x0_saved_regs.pwr_ctrl = power;
}
static const struct l2c_init_data of_l2c310_data __initconst = {
diff --git a/arch/arm/mm/cache-uniphier.c b/arch/arm/mm/cache-uniphier.c
index a6fa7b73fbe0..c8e2f4947223 100644
--- a/arch/arm/mm/cache-uniphier.c
+++ b/arch/arm/mm/cache-uniphier.c
@@ -96,6 +96,7 @@ struct uniphier_cache_data {
void __iomem *ctrl_base;
void __iomem *rev_base;
void __iomem *op_base;
+ void __iomem *way_ctrl_base;
u32 way_present_mask;
u32 way_locked_mask;
u32 nsets;
@@ -256,10 +257,13 @@ static void __init __uniphier_cache_set_locked_ways(
struct uniphier_cache_data *data,
u32 way_mask)
{
+ unsigned int cpu;
+
data->way_locked_mask = way_mask & data->way_present_mask;
- writel_relaxed(~data->way_locked_mask & data->way_present_mask,
- data->ctrl_base + UNIPHIER_SSCLPDAWCR);
+ for_each_possible_cpu(cpu)
+ writel_relaxed(~data->way_locked_mask & data->way_present_mask,
+ data->way_ctrl_base + 4 * cpu);
}
static void uniphier_cache_maint_range(unsigned long start, unsigned long end,
@@ -459,6 +463,8 @@ static int __init __uniphier_cache_init(struct device_node *np,
goto err;
}
+ data->way_ctrl_base = data->ctrl_base + 0xc00;
+
if (*cache_level == 2) {
u32 revision = readl(data->rev_base + UNIPHIER_SSCID);
/*
@@ -467,6 +473,22 @@ static int __init __uniphier_cache_init(struct device_node *np,
*/
if (revision <= 0x16)
data->range_op_max_size = (u32)1 << 22;
+
+ /*
+ * Unfortunatly, the offset address of active way control base
+ * varies from SoC to SoC.
+ */
+ switch (revision) {
+ case 0x11: /* sLD3 */
+ data->way_ctrl_base = data->ctrl_base + 0x870;
+ break;
+ case 0x12: /* LD4 */
+ case 0x16: /* sld8 */
+ data->way_ctrl_base = data->ctrl_base + 0x840;
+ break;
+ default:
+ break;
+ }
}
data->range_op_max_size -= data->line_size;
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 5c2ca062c3fa..ff7ed5697d3e 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -190,7 +190,6 @@ struct dma_map_ops arm_dma_ops = {
.sync_single_for_device = arm_dma_sync_single_for_device,
.sync_sg_for_cpu = arm_dma_sync_sg_for_cpu,
.sync_sg_for_device = arm_dma_sync_sg_for_device,
- .set_dma_mask = arm_dma_set_mask,
};
EXPORT_SYMBOL(arm_dma_ops);
@@ -209,7 +208,6 @@ struct dma_map_ops arm_coherent_dma_ops = {
.get_sgtable = arm_dma_get_sgtable,
.map_page = arm_coherent_dma_map_page,
.map_sg = arm_dma_map_sg,
- .set_dma_mask = arm_dma_set_mask,
};
EXPORT_SYMBOL(arm_coherent_dma_ops);
@@ -1143,16 +1141,6 @@ int dma_supported(struct device *dev, u64 mask)
}
EXPORT_SYMBOL(dma_supported);
-int arm_dma_set_mask(struct device *dev, u64 dma_mask)
-{
- if (!dev->dma_mask || !dma_supported(dev, dma_mask))
- return -EIO;
-
- *dev->dma_mask = dma_mask;
-
- return 0;
-}
-
#define PREALLOC_DMA_DEBUG_ENTRIES 4096
static int __init dma_debug_do_init(void)
@@ -2006,8 +1994,6 @@ struct dma_map_ops iommu_ops = {
.unmap_sg = arm_iommu_unmap_sg,
.sync_sg_for_cpu = arm_iommu_sync_sg_for_cpu,
.sync_sg_for_device = arm_iommu_sync_sg_for_device,
-
- .set_dma_mask = arm_dma_set_mask,
};
struct dma_map_ops iommu_coherent_ops = {
@@ -2021,8 +2007,6 @@ struct dma_map_ops iommu_coherent_ops = {
.map_sg = arm_coherent_iommu_map_sg,
.unmap_sg = arm_coherent_iommu_unmap_sg,
-
- .set_dma_mask = arm_dma_set_mask,
};
/**
diff --git a/arch/arm/mm/idmap.c b/arch/arm/mm/idmap.c
index bd274a05b8ff..c1a48f88764e 100644
--- a/arch/arm/mm/idmap.c
+++ b/arch/arm/mm/idmap.c
@@ -15,7 +15,7 @@
* page tables.
*/
pgd_t *idmap_pgd;
-unsigned long (*arch_virt_to_idmap)(unsigned long x);
+long long arch_phys_to_idmap_offset;
#ifdef CONFIG_ARM_LPAE
static void idmap_add_pmd(pud_t *pud, unsigned long addr, unsigned long end,
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
index 66a978d05958..ff0eed23ddf1 100644
--- a/arch/arm/mm/ioremap.c
+++ b/arch/arm/mm/ioremap.c
@@ -297,9 +297,10 @@ static void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
}
/*
- * Don't allow RAM to be mapped - this causes problems with ARMv6+
+ * Don't allow RAM to be mapped with mismatched attributes - this
+ * causes problems with ARMv6+
*/
- if (WARN_ON(pfn_valid(pfn)))
+ if (WARN_ON(pfn_valid(pfn) && mtype != MT_MEMORY_RW))
return NULL;
area = get_vm_area_caller(size, VM_IOREMAP, caller);
@@ -380,11 +381,15 @@ void __iomem *ioremap(resource_size_t res_cookie, size_t size)
EXPORT_SYMBOL(ioremap);
void __iomem *ioremap_cache(resource_size_t res_cookie, size_t size)
+ __alias(ioremap_cached);
+
+void __iomem *ioremap_cached(resource_size_t res_cookie, size_t size)
{
return arch_ioremap_caller(res_cookie, size, MT_DEVICE_CACHED,
__builtin_return_address(0));
}
EXPORT_SYMBOL(ioremap_cache);
+EXPORT_SYMBOL(ioremap_cached);
void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size)
{
@@ -414,6 +419,13 @@ __arm_ioremap_exec(phys_addr_t phys_addr, size_t size, bool cached)
__builtin_return_address(0));
}
+void *arch_memremap_wb(phys_addr_t phys_addr, size_t size)
+{
+ return (__force void *)arch_ioremap_caller(phys_addr, size,
+ MT_MEMORY_RW,
+ __builtin_return_address(0));
+}
+
void __iounmap(volatile void __iomem *io_addr)
{
void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr);
diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c
index d5805e4bf2fc..2740967727e2 100644
--- a/arch/arm/mm/nommu.c
+++ b/arch/arm/mm/nommu.c
@@ -368,11 +368,15 @@ void __iomem *ioremap(resource_size_t res_cookie, size_t size)
EXPORT_SYMBOL(ioremap);
void __iomem *ioremap_cache(resource_size_t res_cookie, size_t size)
+ __alias(ioremap_cached);
+
+void __iomem *ioremap_cached(resource_size_t res_cookie, size_t size)
{
return __arm_ioremap_caller(res_cookie, size, MT_DEVICE_CACHED,
__builtin_return_address(0));
}
EXPORT_SYMBOL(ioremap_cache);
+EXPORT_SYMBOL(ioremap_cached);
void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size)
{
@@ -381,6 +385,11 @@ void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size)
}
EXPORT_SYMBOL(ioremap_wc);
+void *arch_memremap_wb(phys_addr_t phys_addr, size_t size)
+{
+ return (void *)phys_addr;
+}
+
void __iounmap(volatile void __iomem *addr)
{
}
diff --git a/arch/arm/tools/Makefile b/arch/arm/tools/Makefile
index 32d05c8219dc..6e4cd1867a9f 100644
--- a/arch/arm/tools/Makefile
+++ b/arch/arm/tools/Makefile
@@ -4,7 +4,10 @@
# Copyright (C) 2001 Russell King
#
-include/generated/mach-types.h: $(src)/gen-mach-types $(src)/mach-types
- @$(kecho) ' Generating $@'
- @mkdir -p $(dir $@)
- $(Q)$(AWK) -f $^ > $@ || { rm -f $@; /bin/false; }
+quiet_cmd_gen_mach = GEN $@
+ cmd_gen_mach = mkdir -p $(dir $@) && \
+ $(AWK) -f $(filter-out $(PHONY),$^) > $@ || \
+ { rm -f $@; /bin/false; }
+
+include/generated/mach-types.h: $(src)/gen-mach-types $(src)/mach-types FORCE
+ $(call if_changed,gen_mach)
diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c
index f0099360039e..a5b5c87e2114 100644
--- a/drivers/amba/bus.c
+++ b/drivers/amba/bus.c
@@ -336,16 +336,7 @@ static void amba_device_release(struct device *dev)
kfree(d);
}
-/**
- * amba_device_add - add a previously allocated AMBA device structure
- * @dev: AMBA device allocated by amba_device_alloc
- * @parent: resource parent for this devices resources
- *
- * Claim the resource, and read the device cell ID if not already
- * initialized. Register the AMBA device with the Linux device
- * manager.
- */
-int amba_device_add(struct amba_device *dev, struct resource *parent)
+static int amba_device_try_add(struct amba_device *dev, struct resource *parent)
{
u32 size;
void __iomem *tmp;
@@ -373,6 +364,12 @@ int amba_device_add(struct amba_device *dev, struct resource *parent)
goto err_release;
}
+ ret = dev_pm_domain_attach(&dev->dev, true);
+ if (ret == -EPROBE_DEFER) {
+ iounmap(tmp);
+ goto err_release;
+ }
+
ret = amba_get_enable_pclk(dev);
if (ret == 0) {
u32 pid, cid;
@@ -398,6 +395,7 @@ int amba_device_add(struct amba_device *dev, struct resource *parent)
}
iounmap(tmp);
+ dev_pm_domain_detach(&dev->dev, true);
if (ret)
goto err_release;
@@ -421,6 +419,88 @@ int amba_device_add(struct amba_device *dev, struct resource *parent)
err_out:
return ret;
}
+
+/*
+ * Registration of AMBA device require reading its pid and cid registers.
+ * To do this, the device must be turned on (if it is a part of power domain)
+ * and have clocks enabled. However in some cases those resources might not be
+ * yet available. Returning EPROBE_DEFER is not a solution in such case,
+ * because callers don't handle this special error code. Instead such devices
+ * are added to the special list and their registration is retried from
+ * periodic worker, until all resources are available and registration succeeds.
+ */
+struct deferred_device {
+ struct amba_device *dev;
+ struct resource *parent;
+ struct list_head node;
+};
+
+static LIST_HEAD(deferred_devices);
+static DEFINE_MUTEX(deferred_devices_lock);
+
+static void amba_deferred_retry_func(struct work_struct *dummy);
+static DECLARE_DELAYED_WORK(deferred_retry_work, amba_deferred_retry_func);
+
+#define DEFERRED_DEVICE_TIMEOUT (msecs_to_jiffies(5 * 1000))
+
+static void amba_deferred_retry_func(struct work_struct *dummy)
+{
+ struct deferred_device *ddev, *tmp;
+
+ mutex_lock(&deferred_devices_lock);
+
+ list_for_each_entry_safe(ddev, tmp, &deferred_devices, node) {
+ int ret = amba_device_try_add(ddev->dev, ddev->parent);
+
+ if (ret == -EPROBE_DEFER)
+ continue;
+
+ list_del_init(&ddev->node);
+ kfree(ddev);
+ }
+
+ if (!list_empty(&deferred_devices))
+ schedule_delayed_work(&deferred_retry_work,
+ DEFERRED_DEVICE_TIMEOUT);
+
+ mutex_unlock(&deferred_devices_lock);
+}
+
+/**
+ * amba_device_add - add a previously allocated AMBA device structure
+ * @dev: AMBA device allocated by amba_device_alloc
+ * @parent: resource parent for this devices resources
+ *
+ * Claim the resource, and read the device cell ID if not already
+ * initialized. Register the AMBA device with the Linux device
+ * manager.
+ */
+int amba_device_add(struct amba_device *dev, struct resource *parent)
+{
+ int ret = amba_device_try_add(dev, parent);
+
+ if (ret == -EPROBE_DEFER) {
+ struct deferred_device *ddev;
+
+ ddev = kmalloc(sizeof(*ddev), GFP_KERNEL);
+ if (!ddev)
+ return -ENOMEM;
+
+ ddev->dev = dev;
+ ddev->parent = parent;
+ ret = 0;
+
+ mutex_lock(&deferred_devices_lock);
+
+ if (list_empty(&deferred_devices))
+ schedule_delayed_work(&deferred_retry_work,
+ DEFERRED_DEVICE_TIMEOUT);
+ list_add_tail(&ddev->node, &deferred_devices);
+
+ mutex_unlock(&deferred_devices_lock);
+ }
+ return ret;
+}
EXPORT_SYMBOL_GPL(amba_device_add);
static struct amba_device *
diff --git a/drivers/mtd/maps/pxa2xx-flash.c b/drivers/mtd/maps/pxa2xx-flash.c
index 7497090e9900..2cde28ed95c9 100644
--- a/drivers/mtd/maps/pxa2xx-flash.c
+++ b/drivers/mtd/maps/pxa2xx-flash.c
@@ -71,8 +71,8 @@ static int pxa2xx_flash_probe(struct platform_device *pdev)
info->map.name);
return -ENOMEM;
}
- info->map.cached = memremap(info->map.phys, info->map.size,
- MEMREMAP_WB);
+ info->map.cached =
+ ioremap_cached(info->map.phys, info->map.size);
if (!info->map.cached)
printk(KERN_WARNING "Failed to ioremap cached %s\n",
info->map.name);
@@ -111,7 +111,7 @@ static int pxa2xx_flash_remove(struct platform_device *dev)
map_destroy(info->mtd);
iounmap(info->map.virt);
if (info->map.cached)
- memunmap(info->map.cached);
+ iounmap(info->map.cached);
kfree(info);
return 0;
}
diff --git a/kernel/memremap.c b/kernel/memremap.c
index a6d382312e6f..017532193fb1 100644
--- a/kernel/memremap.c
+++ b/kernel/memremap.c
@@ -27,6 +27,13 @@ __weak void __iomem *ioremap_cache(resource_size_t offset, unsigned long size)
}
#endif
+#ifndef arch_memremap_wb
+static void *arch_memremap_wb(resource_size_t offset, unsigned long size)
+{
+ return (__force void *)ioremap_cache(offset, size);
+}
+#endif
+
static void *try_ram_remap(resource_size_t offset, size_t size)
{
unsigned long pfn = PHYS_PFN(offset);
@@ -34,7 +41,7 @@ static void *try_ram_remap(resource_size_t offset, size_t size)
/* In the simple case just return the existing linear address */
if (pfn_valid(pfn) && !PageHighMem(pfn_to_page(pfn)))
return __va(offset);
- return NULL; /* fallback to ioremap_cache */
+ return NULL; /* fallback to arch_memremap_wb */
}
/**
@@ -90,7 +97,7 @@ void *memremap(resource_size_t offset, size_t size, unsigned long flags)
if (is_ram == REGION_INTERSECTS)
addr = try_ram_remap(offset, size);
if (!addr)
- addr = ioremap_cache(offset, size);
+ addr = arch_memremap_wb(offset, size);
}
/*
diff --git a/scripts/kallsyms.c b/scripts/kallsyms.c
index 638b143ee60f..1f22a186c18c 100644
--- a/scripts/kallsyms.c
+++ b/scripts/kallsyms.c
@@ -63,7 +63,6 @@ static unsigned int table_size, table_cnt;
static int all_symbols = 0;
static int absolute_percpu = 0;
static char symbol_prefix_char = '\0';
-static unsigned long long kernel_start_addr = 0;
static int base_relative = 0;
int token_profit[0x10000];
@@ -223,15 +222,13 @@ static int symbol_valid(struct sym_entry *s)
static char *special_suffixes[] = {
"_veneer", /* arm */
+ "_from_arm", /* arm */
+ "_from_thumb", /* arm */
NULL };
int i;
char *sym_name = (char *)s->sym + 1;
-
- if (s->addr < kernel_start_addr)
- return 0;
-
/* skip prefix char */
if (symbol_prefix_char && *sym_name == symbol_prefix_char)
sym_name++;
@@ -765,9 +762,6 @@ int main(int argc, char **argv)
if ((*p == '"' && *(p+2) == '"') || (*p == '\'' && *(p+2) == '\''))
p++;
symbol_prefix_char = *p;
- } else if (strncmp(argv[i], "--page-offset=", 14) == 0) {
- const char *p = &argv[i][14];
- kernel_start_addr = strtoull(p, NULL, 16);
} else if (strcmp(argv[i], "--base-relative") == 0)
base_relative = 1;
else
diff --git a/scripts/link-vmlinux.sh b/scripts/link-vmlinux.sh
index 49d61ade9425..f0f6d9d75435 100755
--- a/scripts/link-vmlinux.sh
+++ b/scripts/link-vmlinux.sh
@@ -82,10 +82,6 @@ kallsyms()
kallsymopt="${kallsymopt} --all-symbols"
fi
- if [ -n "${CONFIG_ARM}" ] && [ -z "${CONFIG_XIP_KERNEL}" ] && [ -n "${CONFIG_PAGE_OFFSET}" ]; then
- kallsymopt="${kallsymopt} --page-offset=$CONFIG_PAGE_OFFSET"
- fi
-
if [ -n "${CONFIG_KALLSYMS_ABSOLUTE_PERCPU}" ]; then
kallsymopt="${kallsymopt} --absolute-percpu"
fi