summaryrefslogtreecommitdiff
path: root/arch/x86/include/asm/cpu.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/include/asm/cpu.h')
-rw-r--r--arch/x86/include/asm/cpu.h173
1 files changed, 169 insertions, 4 deletions
diff --git a/arch/x86/include/asm/cpu.h b/arch/x86/include/asm/cpu.h
index 6c6774af76..c8392915f1 100644
--- a/arch/x86/include/asm/cpu.h
+++ b/arch/x86/include/asm/cpu.h
@@ -1,16 +1,160 @@
/*
* Copyright (c) 2014 The Chromium OS Authors.
*
+ * Part of this file is adapted from coreboot
+ * src/arch/x86/include/arch/cpu.h and
+ * src/arch/x86/lib/cpu.c
+ *
* SPDX-License-Identifier: GPL-2.0+
*/
-#ifndef __X86_CPU_H
-#define __X86_CPU_H
+#ifndef _ASM_CPU_H
+#define _ASM_CPU_H
+
+enum {
+ X86_VENDOR_INVALID = 0,
+ X86_VENDOR_INTEL,
+ X86_VENDOR_CYRIX,
+ X86_VENDOR_AMD,
+ X86_VENDOR_UMC,
+ X86_VENDOR_NEXGEN,
+ X86_VENDOR_CENTAUR,
+ X86_VENDOR_RISE,
+ X86_VENDOR_TRANSMETA,
+ X86_VENDOR_NSC,
+ X86_VENDOR_SIS,
+ X86_VENDOR_ANY = 0xfe,
+ X86_VENDOR_UNKNOWN = 0xff
+};
+
+struct cpuid_result {
+ uint32_t eax;
+ uint32_t ebx;
+ uint32_t ecx;
+ uint32_t edx;
+};
+
+/*
+ * Generic CPUID function
+ */
+static inline struct cpuid_result cpuid(int op)
+{
+ struct cpuid_result result;
+ asm volatile(
+ "mov %%ebx, %%edi;"
+ "cpuid;"
+ "mov %%ebx, %%esi;"
+ "mov %%edi, %%ebx;"
+ : "=a" (result.eax),
+ "=S" (result.ebx),
+ "=c" (result.ecx),
+ "=d" (result.edx)
+ : "0" (op)
+ : "edi");
+ return result;
+}
+
+/*
+ * Generic Extended CPUID function
+ */
+static inline struct cpuid_result cpuid_ext(int op, unsigned ecx)
+{
+ struct cpuid_result result;
+ asm volatile(
+ "mov %%ebx, %%edi;"
+ "cpuid;"
+ "mov %%ebx, %%esi;"
+ "mov %%edi, %%ebx;"
+ : "=a" (result.eax),
+ "=S" (result.ebx),
+ "=c" (result.ecx),
+ "=d" (result.edx)
+ : "0" (op), "2" (ecx)
+ : "edi");
+ return result;
+}
+
+/*
+ * CPUID functions returning a single datum
+ */
+static inline unsigned int cpuid_eax(unsigned int op)
+{
+ unsigned int eax;
+
+ __asm__("mov %%ebx, %%edi;"
+ "cpuid;"
+ "mov %%edi, %%ebx;"
+ : "=a" (eax)
+ : "0" (op)
+ : "ecx", "edx", "edi");
+ return eax;
+}
+
+static inline unsigned int cpuid_ebx(unsigned int op)
+{
+ unsigned int eax, ebx;
+
+ __asm__("mov %%ebx, %%edi;"
+ "cpuid;"
+ "mov %%ebx, %%esi;"
+ "mov %%edi, %%ebx;"
+ : "=a" (eax), "=S" (ebx)
+ : "0" (op)
+ : "ecx", "edx", "edi");
+ return ebx;
+}
+
+static inline unsigned int cpuid_ecx(unsigned int op)
+{
+ unsigned int eax, ecx;
- /**
+ __asm__("mov %%ebx, %%edi;"
+ "cpuid;"
+ "mov %%edi, %%ebx;"
+ : "=a" (eax), "=c" (ecx)
+ : "0" (op)
+ : "edx", "edi");
+ return ecx;
+}
+
+static inline unsigned int cpuid_edx(unsigned int op)
+{
+ unsigned int eax, edx;
+
+ __asm__("mov %%ebx, %%edi;"
+ "cpuid;"
+ "mov %%edi, %%ebx;"
+ : "=a" (eax), "=d" (edx)
+ : "0" (op)
+ : "ecx", "edi");
+ return edx;
+}
+
+/* Standard macro to see if a specific flag is changeable */
+static inline int flag_is_changeable_p(uint32_t flag)
+{
+ uint32_t f1, f2;
+
+ asm(
+ "pushfl\n\t"
+ "pushfl\n\t"
+ "popl %0\n\t"
+ "movl %0,%1\n\t"
+ "xorl %2,%0\n\t"
+ "pushl %0\n\t"
+ "popfl\n\t"
+ "pushfl\n\t"
+ "popl %0\n\t"
+ "popfl\n\t"
+ : "=&r" (f1), "=&r" (f2)
+ : "ir" (flag));
+ return ((f1^f2) & flag) != 0;
+}
+
+/**
* cpu_enable_paging_pae() - Enable PAE-paging
*
- * @pdpt: Value to set in cr3 (PDPT or PML4T)
+ * @cr3: Value to set in cr3 (PDPT or PML4T)
*/
void cpu_enable_paging_pae(ulong cr3);
@@ -27,6 +171,27 @@ void cpu_disable_paging_pae(void);
int cpu_has_64bit(void);
/**
+ * cpu_vendor_name() - Get CPU vendor name
+ *
+ * @vendor: CPU vendor enumeration number
+ *
+ * @return: Address to hold the CPU vendor name string
+ */
+const char *cpu_vendor_name(int vendor);
+
+#define CPU_MAX_NAME_LEN 49
+
+/**
+ * cpu_get_name() - Get the name of the current cpu
+ *
+ * @name: Place to put name, which must be CPU_MAX_NAME_LEN bytes including
+ * @return pointer to name, which will likely be a few bytes after the start
+ * of @name
+ * \0 terminator
+ */
+char *cpu_get_name(char *name);
+
+/**
* cpu_call64() - Jump to a 64-bit Linux kernel (internal function)
*
* The kernel is uncompressed and the 64-bit entry point is expected to be