summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorStefan Reinauer <reinauer@chromium.org>2012-12-02 04:49:50 +0000
committerSimon Glass <sjg@chromium.org>2012-12-06 14:30:39 -0800
commit095593c0301399ae834050e2008862451a72b8b0 (patch)
tree6ebf30f48a3ec3db8111ecfb815044db5bcfa4a4 /arch
parent9a7da182fa936d28658daadef83e0b8f7104487b (diff)
x86: Add basic cache operations
Add functions to enable/disable the data cache. Signed-off-by: Stefan Reinauer <reinauer@chromium.org> Signed-off-by: Simon Glass <sjg@chromium.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/cpu/cpu.c38
-rw-r--r--arch/x86/cpu/interrupts.c68
-rw-r--r--arch/x86/include/asm/cache.h16
-rw-r--r--arch/x86/include/asm/control_regs.h105
4 files changed, 155 insertions, 72 deletions
diff --git a/arch/x86/cpu/cpu.c b/arch/x86/cpu/cpu.c
index fabfbd1bf7..315e87afeb 100644
--- a/arch/x86/cpu/cpu.c
+++ b/arch/x86/cpu/cpu.c
@@ -34,6 +34,7 @@
#include <common.h>
#include <command.h>
+#include <asm/control_regs.h>
#include <asm/processor.h>
#include <asm/processor-flags.h>
#include <asm/interrupt.h>
@@ -147,16 +148,27 @@ int cpu_init_r(void) __attribute__((weak, alias("x86_cpu_init_r")));
void x86_enable_caches(void)
{
- const u32 nw_cd_rst = ~(X86_CR0_NW | X86_CR0_CD);
+ unsigned long cr0;
- /* turn on the cache and disable write through */
- asm("movl %%cr0, %%eax\n"
- "andl %0, %%eax\n"
- "movl %%eax, %%cr0\n"
- "wbinvd\n" : : "i" (nw_cd_rst) : "eax");
+ cr0 = read_cr0();
+ cr0 &= ~(X86_CR0_NW | X86_CR0_CD);
+ write_cr0(cr0);
+ wbinvd();
}
void enable_caches(void) __attribute__((weak, alias("x86_enable_caches")));
+void x86_disable_caches(void)
+{
+ unsigned long cr0;
+
+ cr0 = read_cr0();
+ cr0 |= X86_CR0_NW | X86_CR0_CD;
+ wbinvd();
+ write_cr0(cr0);
+ wbinvd();
+}
+void disable_caches(void) __attribute__((weak, alias("x86_disable_caches")));
+
int x86_init_cache(void)
{
enable_caches();
@@ -200,3 +212,17 @@ void __reset_cpu(ulong addr)
generate_gpf(); /* start the show */
}
void reset_cpu(ulong addr) __attribute__((weak, alias("__reset_cpu")));
+
+int dcache_status(void)
+{
+ return !(read_cr0() & 0x40000000);
+}
+
+/* Define these functions to allow ehch-hcd to function */
+void flush_dcache_range(unsigned long start, unsigned long stop)
+{
+}
+
+void invalidate_dcache_range(unsigned long start, unsigned long stop)
+{
+}
diff --git a/arch/x86/cpu/interrupts.c b/arch/x86/cpu/interrupts.c
index 43ec3f8b08..e7887152f7 100644
--- a/arch/x86/cpu/interrupts.c
+++ b/arch/x86/cpu/interrupts.c
@@ -28,6 +28,8 @@
*/
#include <common.h>
+#include <asm/cache.h>
+#include <asm/control_regs.h>
#include <asm/interrupt.h>
#include <asm/io.h>
#include <asm/processor-flags.h>
@@ -41,72 +43,6 @@
"pushl $"#x"\n" \
"jmp irq_common_entry\n"
-/*
- * Volatile isn't enough to prevent the compiler from reordering the
- * read/write functions for the control registers and messing everything up.
- * A memory clobber would solve the problem, but would prevent reordering of
- * all loads stores around it, which can hurt performance. Solution is to
- * use a variable and mimic reads and writes to it to enforce serialisation
- */
-static unsigned long __force_order;
-
-static inline unsigned long read_cr0(void)
-{
- unsigned long val;
- asm volatile("mov %%cr0,%0\n\t" : "=r" (val), "=m" (__force_order));
- return val;
-}
-
-static inline unsigned long read_cr2(void)
-{
- unsigned long val;
- asm volatile("mov %%cr2,%0\n\t" : "=r" (val), "=m" (__force_order));
- return val;
-}
-
-static inline unsigned long read_cr3(void)
-{
- unsigned long val;
- asm volatile("mov %%cr3,%0\n\t" : "=r" (val), "=m" (__force_order));
- return val;
-}
-
-static inline unsigned long read_cr4(void)
-{
- unsigned long val;
- asm volatile("mov %%cr4,%0\n\t" : "=r" (val), "=m" (__force_order));
- return val;
-}
-
-static inline unsigned long get_debugreg(int regno)
-{
- unsigned long val = 0; /* Damn you, gcc! */
-
- switch (regno) {
- case 0:
- asm("mov %%db0, %0" : "=r" (val));
- break;
- case 1:
- asm("mov %%db1, %0" : "=r" (val));
- break;
- case 2:
- asm("mov %%db2, %0" : "=r" (val));
- break;
- case 3:
- asm("mov %%db3, %0" : "=r" (val));
- break;
- case 6:
- asm("mov %%db6, %0" : "=r" (val));
- break;
- case 7:
- asm("mov %%db7, %0" : "=r" (val));
- break;
- default:
- val = 0;
- }
- return val;
-}
-
void dump_regs(struct irq_regs *regs)
{
unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L;
diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h
index 87c9e0be42..d4678d4916 100644
--- a/arch/x86/include/asm/cache.h
+++ b/arch/x86/include/asm/cache.h
@@ -32,4 +32,20 @@
#define ARCH_DMA_MINALIGN 64
#endif
+static inline void wbinvd(void)
+{
+ asm volatile ("wbinvd" : : : "memory");
+}
+
+static inline void invd(void)
+{
+ asm volatile("invd" : : : "memory");
+}
+
+/* Enable caches and write buffer */
+void enable_caches(void);
+
+/* Disable caches and write buffer */
+void disable_caches(void);
+
#endif /* __X86_CACHE_H__ */
diff --git a/arch/x86/include/asm/control_regs.h b/arch/x86/include/asm/control_regs.h
new file mode 100644
index 0000000000..aa8be30bac
--- /dev/null
+++ b/arch/x86/include/asm/control_regs.h
@@ -0,0 +1,105 @@
+/*
+ * Copyright (c) 2012 The Chromium OS Authors.
+ *
+ * (C) Copyright 2008-2011
+ * Graeme Russ, <graeme.russ@gmail.com>
+ *
+ * (C) Copyright 2002
+ * Daniel Engström, Omicron Ceti AB, <daniel@omicron.se>
+ *
+ * Portions of this file are derived from the Linux kernel source
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ *
+ * See file CREDITS for list of people who contributed to this
+ * project.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#ifndef __X86_CONTROL_REGS_H
+#define __X86_CONTROL_REGS_H
+
+/*
+ * The memory clobber prevents the GCC from reordering the read/write order
+ * of CR0
+*/
+static inline unsigned long read_cr0(void)
+{
+ unsigned long val;
+
+ asm volatile ("movl %%cr0, %0" : "=r" (val) : : "memory");
+ return val;
+}
+
+static inline void write_cr0(unsigned long val)
+{
+ asm volatile ("movl %0, %%cr0" : : "r" (val) : "memory");
+}
+
+static inline unsigned long read_cr2(void)
+{
+ unsigned long val;
+
+ asm volatile("mov %%cr2,%0\n\t" : "=r" (val) : : "memory");
+ return val;
+}
+
+static inline unsigned long read_cr3(void)
+{
+ unsigned long val;
+
+ asm volatile("mov %%cr3,%0\n\t" : "=r" (val) : : "memory");
+ return val;
+}
+
+static inline unsigned long read_cr4(void)
+{
+ unsigned long val;
+
+ asm volatile("mov %%cr4,%0\n\t" : "=r" (val) : : "memory");
+ return val;
+}
+
+static inline unsigned long get_debugreg(int regno)
+{
+ unsigned long val = 0; /* Damn you, gcc! */
+
+ switch (regno) {
+ case 0:
+ asm("mov %%db0, %0" : "=r" (val));
+ break;
+ case 1:
+ asm("mov %%db1, %0" : "=r" (val));
+ break;
+ case 2:
+ asm("mov %%db2, %0" : "=r" (val));
+ break;
+ case 3:
+ asm("mov %%db3, %0" : "=r" (val));
+ break;
+ case 6:
+ asm("mov %%db6, %0" : "=r" (val));
+ break;
+ case 7:
+ asm("mov %%db7, %0" : "=r" (val));
+ break;
+ default:
+ val = 0;
+ }
+ return val;
+}
+
+#endif