summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorChristoph Muellner <christoph.muellner@theobroma-systems.com>2018-05-15 15:26:10 +0200
committerChristoph Muellner <christoph.muellner@theobroma-systems.com>2018-05-16 12:04:01 +0200
commit477b9791f15ad66ac25e308530bcc6a2556da967 (patch)
treeaaf88897fccea94d1c1d62b48d19264e282ef4e2
parent9174f62bfbfe91215b9888272d8e8e0509f57ba2 (diff)
arm64: Use alternative framework for retpoline.
This reduces the impact of retpoline on non-affected processors. By default the retpoline will consist of 6 nop instructions. In case an affected processor is detected (by a MIDR match) the nops will be replaced by the retpoline. The processors, which are using this are Applied Micro's X-Gene1, X-Gene2, X-Gene3, and Ampere Computing's eMAG1. Signed-off-by: Christoph Muellner <christoph.muellner@theobroma-systems.com>
-rw-r--r--arch/arm64/include/asm/cpucaps.h3
-rw-r--r--arch/arm64/include/asm/nospec-branch.h5
-rw-r--r--arch/arm64/kernel/cpu_errata.c58
3 files changed, 65 insertions, 1 deletions
diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h
index bc51b72fafd4..3573d330af27 100644
--- a/arch/arm64/include/asm/cpucaps.h
+++ b/arch/arm64/include/asm/cpucaps.h
@@ -48,7 +48,8 @@
#define ARM64_HAS_CACHE_IDC 27
#define ARM64_HAS_CACHE_DIC 28
#define ARM64_HW_DBM 29
+#define ARM64_RETPOLINE 30
-#define ARM64_NCAPS 30
+#define ARM64_NCAPS 31
#endif /* __ASM_CPUCAPS_H */
diff --git a/arch/arm64/include/asm/nospec-branch.h b/arch/arm64/include/asm/nospec-branch.h
index 463ec183f4ec..63db0e2b19a2 100644
--- a/arch/arm64/include/asm/nospec-branch.h
+++ b/arch/arm64/include/asm/nospec-branch.h
@@ -3,9 +3,13 @@
#ifndef _ASM_ARM64_NOSPEC_BRANCH_H_
#define _ASM_ARM64_NOSPEC_BRANCH_H_
+#include <asm/alternative.h>
+#include <asm/cpucaps.h>
+
#ifdef __ASSEMBLY__
.macro retpoline
+alternative_if ARM64_RETPOLINE
str x30, [sp, #-16]!
bl 101f
100: //speculation trap
@@ -16,6 +20,7 @@
ret
102: //non-spec code
ldr x30, [sp], #16
+alternative_else_nop_endif
.endm
.macro br_nospec reg
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
index a900befadfe8..ed32e1f7e67a 100644
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -331,6 +331,54 @@ static const struct midr_range arm64_harden_el2_vectors[] = {
#endif
+#ifdef CONFIG_RETPOLINE
+
+static inline bool retp_compiler(void)
+{
+ return __is_defined(RETPOLINE);
+}
+
+/* The Spectre V2 mitigation variants */
+enum spectre_v2_mitigation {
+ SPECTRE_V2_NOT_AFFECTED,
+ SPECTRE_V2_RETPOLINE_MINIMAL,
+ SPECTRE_V2_RETPOLINE_GENERIC,
+};
+
+static const char *spectre_v2_strings[] = {
+ [SPECTRE_V2_NOT_AFFECTED] = "Not affected",
+ [SPECTRE_V2_RETPOLINE_MINIMAL] = "Vulnerable: Minimal ASM retpoline",
+ [SPECTRE_V2_RETPOLINE_GENERIC] = "Mitigation: Full retpoline",
+};
+
+static enum spectre_v2_mitigation spectre_v2_enabled = SPECTRE_V2_NOT_AFFECTED;
+
+static bool spectre_v2_bad_module;
+
+bool retpoline_module_ok(bool has_retpoline)
+{
+ if (has_retpoline)
+ return true;
+
+ pr_err("System may be vulnerable to spectre v2\n");
+ spectre_v2_bad_module = true;
+ return false;
+}
+
+static inline const char *spectre_v2_module_string(void)
+{
+ return spectre_v2_bad_module ? " - vulnerable module loaded" : "";
+}
+
+static void
+enable_retpoline(const struct arm64_cpu_capabilities *entry)
+{
+ spectre_v2_enabled = retp_compiler() ? SPECTRE_V2_RETPOLINE_GENERIC :
+ SPECTRE_V2_RETPOLINE_MINIMAL;
+}
+
+#endif
+
const struct arm64_cpu_capabilities arm64_errata[] = {
#if defined(CONFIG_ARM64_ERRATUM_826319) || \
defined(CONFIG_ARM64_ERRATUM_827319) || \
@@ -487,6 +535,16 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
ERRATA_MIDR_RANGE_LIST(arm64_harden_el2_vectors),
},
#endif
+#ifdef CONFIG_RETPOLINE
+ {
+ .desc = "speculative branch prediction using retpoline",
+ .capability = ARM64_RETPOLINE,
+ .type = ARM64_CPUCAP_BOOT_RESTRICTED_CPU_LOCAL_FEATURE,
+ .cpu_enable = enable_retpoline,
+ .matches = is_affected_midr_range,
+ .midr_range = MIDR_RANGE(MIDR_APM_POTENZA, 0, 0, 3, 1),
+ },
+#endif
{
}
};