summaryrefslogtreecommitdiff
path: root/arch/arm/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/Kconfig21
-rw-r--r--arch/arm/mm/cache-v6.S18
-rw-r--r--arch/arm/mm/dma-mapping.c18
3 files changed, 44 insertions, 13 deletions
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index 346ae14824a5..101105e52610 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -735,6 +735,25 @@ config NEEDS_SYSCALL_FOR_CMPXCHG
Forget about fast user space cmpxchg support.
It is just not possible.
+config DMA_CACHE_RWFO
+ bool "Enable read/write for ownership DMA cache maintenance"
+ depends on CPU_V6 && SMP
+ default y
+ help
+ The Snoop Control Unit on ARM11MPCore does not detect the
+ cache maintenance operations and the dma_{map,unmap}_area()
+ functions may leave stale cache entries on other CPUs. By
+ enabling this option, Read or Write For Ownership in the ARMv6
+ DMA cache maintenance functions is performed. These LDR/STR
+ instructions change the cache line state to shared or modified
+ so that the cache operation has the desired effect.
+
+ Note that the workaround is only valid on processors that do
+ not perform speculative loads into the D-cache. For such
+ processors, if cache maintenance operations are not broadcast
+ in hardware, other workarounds are needed (e.g. cache
+ maintenance broadcasting in software via FIQ).
+
config OUTER_CACHE
bool
@@ -794,6 +813,8 @@ config ARM_L1_CACHE_SHIFT
config ARM_DMA_MEM_BUFFERABLE
bool "Use non-cacheable memory for DMA" if CPU_V6 && !CPU_V7
+ depends on !(MACH_REALVIEW_PB1176 || REALVIEW_EB_ARM11MP || \
+ MACH_REALVIEW_PB11MP)
default y if CPU_V6 || CPU_V7
help
Historically, the kernel has used strongly ordered mappings to
diff --git a/arch/arm/mm/cache-v6.S b/arch/arm/mm/cache-v6.S
index e46ecd847138..86aa689ef1aa 100644
--- a/arch/arm/mm/cache-v6.S
+++ b/arch/arm/mm/cache-v6.S
@@ -211,8 +211,9 @@ v6_dma_inv_range:
mcrne p15, 0, r1, c7, c15, 1 @ clean & invalidate unified line
#endif
1:
-#ifdef CONFIG_SMP
- str r0, [r0] @ write for ownership
+#ifdef CONFIG_DMA_CACHE_RWFO
+ ldr r2, [r0] @ read for ownership
+ str r2, [r0] @ write for ownership
#endif
#ifdef HARVARD_CACHE
mcr p15, 0, r0, c7, c6, 1 @ invalidate D line
@@ -234,7 +235,7 @@ v6_dma_inv_range:
v6_dma_clean_range:
bic r0, r0, #D_CACHE_LINE_SIZE - 1
1:
-#ifdef CONFIG_SMP
+#ifdef CONFIG_DMA_CACHE_RWFO
ldr r2, [r0] @ read for ownership
#endif
#ifdef HARVARD_CACHE
@@ -257,7 +258,7 @@ v6_dma_clean_range:
ENTRY(v6_dma_flush_range)
bic r0, r0, #D_CACHE_LINE_SIZE - 1
1:
-#ifdef CONFIG_SMP
+#ifdef CONFIG_DMA_CACHE_RWFO
ldr r2, [r0] @ read for ownership
str r2, [r0] @ write for ownership
#endif
@@ -283,9 +284,13 @@ ENTRY(v6_dma_map_area)
add r1, r1, r0
teq r2, #DMA_FROM_DEVICE
beq v6_dma_inv_range
+#ifndef CONFIG_DMA_CACHE_RWFO
+ b v6_dma_clean_range
+#else
teq r2, #DMA_TO_DEVICE
beq v6_dma_clean_range
b v6_dma_flush_range
+#endif
ENDPROC(v6_dma_map_area)
/*
@@ -295,6 +300,11 @@ ENDPROC(v6_dma_map_area)
* - dir - DMA direction
*/
ENTRY(v6_dma_unmap_area)
+#ifndef CONFIG_DMA_CACHE_RWFO
+ add r1, r1, r0
+ teq r2, #DMA_TO_DEVICE
+ bne v6_dma_inv_range
+#endif
mov pc, lr
ENDPROC(v6_dma_unmap_area)
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 13fa536d82e6..9e7742f0a102 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -24,15 +24,6 @@
#include <asm/tlbflush.h>
#include <asm/sizes.h>
-/* Sanity check size */
-#if (CONSISTENT_DMA_SIZE % SZ_2M)
-#error "CONSISTENT_DMA_SIZE must be multiple of 2MiB"
-#endif
-
-#define CONSISTENT_OFFSET(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PAGE_SHIFT)
-#define CONSISTENT_PTE_INDEX(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PGDIR_SHIFT)
-#define NUM_CONSISTENT_PTES (CONSISTENT_DMA_SIZE >> PGDIR_SHIFT)
-
static u64 get_coherent_dma_mask(struct device *dev)
{
u64 mask = ISA_DMA_THRESHOLD;
@@ -123,6 +114,15 @@ static void __dma_free_buffer(struct page *page, size_t size)
}
#ifdef CONFIG_MMU
+/* Sanity check size */
+#if (CONSISTENT_DMA_SIZE % SZ_2M)
+#error "CONSISTENT_DMA_SIZE must be multiple of 2MiB"
+#endif
+
+#define CONSISTENT_OFFSET(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PAGE_SHIFT)
+#define CONSISTENT_PTE_INDEX(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PGDIR_SHIFT)
+#define NUM_CONSISTENT_PTES (CONSISTENT_DMA_SIZE >> PGDIR_SHIFT)
+
/*
* These are the page tables (2MB each) covering uncached, DMA consistent allocations
*/