Skip to content
Snippets Groups Projects
Select Git revision
  • aa4806dba9b199c9b9ada6e5c3058477f3b1ad11
  • master default
2 results

ExpressionProps.cpp

Blame
  • bugs.c 8.62 KiB
    // SPDX-License-Identifier: GPL-2.0
    /*
     *  Copyright (C) 1994  Linus Torvalds
     *
     *  Cyrix stuff, June 1998 by:
     *	- Rafael R. Reilova (moved everything from head.S),
     *        <rreilova@ececs.uc.edu>
     *	- Channing Corn (tests & fixes),
     *	- Andrew D. Balsa (code cleanup).
     */
    #include <linux/init.h>
    #include <linux/utsname.h>
    #include <linux/cpu.h>
    #include <linux/module.h>
    
    #include <asm/nospec-branch.h>
    #include <asm/cmdline.h>
    #include <asm/bugs.h>
    #include <asm/processor.h>
    #include <asm/processor-flags.h>
    #include <asm/fpu/internal.h>
    #include <asm/msr.h>
    #include <asm/paravirt.h>
    #include <asm/alternative.h>
    #include <asm/pgtable.h>
    #include <asm/set_memory.h>
    #include <asm/intel-family.h>
    
    static void __init spectre_v2_select_mitigation(void);
    
    void __init check_bugs(void)
    {
    	identify_boot_cpu();
    
    	if (!IS_ENABLED(CONFIG_SMP)) {
    		pr_info("CPU: ");
    		print_cpu_info(&boot_cpu_data);
    	}
    
    	/* Select the proper spectre mitigation before patching alternatives */
    	spectre_v2_select_mitigation();
    
    #ifdef CONFIG_X86_32
    	/*
    	 * Check whether we are able to run this kernel safely on SMP.
    	 *
    	 * - i386 is no longer supported.
    	 * - In order to run on anything without a TSC, we need to be
    	 *   compiled for a i486.
    	 */
    	if (boot_cpu_data.x86 < 4)
    		panic("Kernel requires i486+ for 'invlpg' and other features");
    
    	init_utsname()->machine[1] =
    		'0' + (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86);
    	alternative_instructions();
    
    	fpu__init_check_bugs();
    #else /* CONFIG_X86_64 */
    	alternative_instructions();
    
    	/*
    	 * Make sure the first 2MB area is not mapped by huge pages
    	 * There are typically fixed size MTRRs in there and overlapping
    	 * MTRRs into large pages causes slow downs.
    	 *
    	 * Right now we don't do that with gbpages because there seems
    	 * very little benefit for that case.
    	 */
    	if (!direct_gbpages)
    		set_memory_4k((unsigned long)__va(0), 1);
    #endif
    }
    
    /* The kernel command line selection */
    enum spectre_v2_mitigation_cmd {
    	SPECTRE_V2_CMD_NONE,
    	SPECTRE_V2_CMD_AUTO,
    	SPECTRE_V2_CMD_FORCE,
    	SPECTRE_V2_CMD_RETPOLINE,
    	SPECTRE_V2_CMD_RETPOLINE_GENERIC,
    	SPECTRE_V2_CMD_RETPOLINE_AMD,
    };
    
    static const char *spectre_v2_strings[] = {
    	[SPECTRE_V2_NONE]			= "Vulnerable",
    	[SPECTRE_V2_RETPOLINE_MINIMAL]		= "Vulnerable: Minimal generic ASM retpoline",
    	[SPECTRE_V2_RETPOLINE_MINIMAL_AMD]	= "Vulnerable: Minimal AMD ASM retpoline",
    	[SPECTRE_V2_RETPOLINE_GENERIC]		= "Mitigation: Full generic retpoline",
    	[SPECTRE_V2_RETPOLINE_AMD]		= "Mitigation: Full AMD retpoline",
    };
    
    #undef pr_fmt
    #define pr_fmt(fmt)     "Spectre V2 mitigation: " fmt
    
    static enum spectre_v2_mitigation spectre_v2_enabled = SPECTRE_V2_NONE;
    static bool spectre_v2_bad_module;
    
    #ifdef RETPOLINE
    bool retpoline_module_ok(bool has_retpoline)
    {
    	if (spectre_v2_enabled == SPECTRE_V2_NONE || has_retpoline)
    		return true;
    
    	pr_err("System may be vunerable to spectre v2\n");
    	spectre_v2_bad_module = true;
    	return false;
    }
    #endif
    
    static void __init spec2_print_if_insecure(const char *reason)
    {
    	if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
    		pr_info("%s\n", reason);
    }
    
    static void __init spec2_print_if_secure(const char *reason)
    {
    	if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
    		pr_info("%s\n", reason);
    }
    
    static inline bool retp_compiler(void)
    {
    	return __is_defined(RETPOLINE);
    }
    
    static inline bool match_option(const char *arg, int arglen, const char *opt)
    {
    	int len = strlen(opt);
    
    	return len == arglen && !strncmp(arg, opt, len);
    }
    
    static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
    {
    	char arg[20];
    	int ret;
    
    	ret = cmdline_find_option(boot_command_line, "spectre_v2", arg,
    				  sizeof(arg));
    	if (ret > 0)  {
    		if (match_option(arg, ret, "off")) {
    			goto disable;
    		} else if (match_option(arg, ret, "on")) {
    			spec2_print_if_secure("force enabled on command line.");
    			return SPECTRE_V2_CMD_FORCE;
    		} else if (match_option(arg, ret, "retpoline")) {
    			spec2_print_if_insecure("retpoline selected on command line.");
    			return SPECTRE_V2_CMD_RETPOLINE;
    		} else if (match_option(arg, ret, "retpoline,amd")) {
    			if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) {
    				pr_err("retpoline,amd selected but CPU is not AMD. Switching to AUTO select\n");
    				return SPECTRE_V2_CMD_AUTO;
    			}
    			spec2_print_if_insecure("AMD retpoline selected on command line.");
    			return SPECTRE_V2_CMD_RETPOLINE_AMD;
    		} else if (match_option(arg, ret, "retpoline,generic")) {
    			spec2_print_if_insecure("generic retpoline selected on command line.");
    			return SPECTRE_V2_CMD_RETPOLINE_GENERIC;
    		} else if (match_option(arg, ret, "auto")) {
    			return SPECTRE_V2_CMD_AUTO;
    		}
    	}
    
    	if (!cmdline_find_option_bool(boot_command_line, "nospectre_v2"))
    		return SPECTRE_V2_CMD_AUTO;
    disable:
    	spec2_print_if_insecure("disabled on command line.");
    	return SPECTRE_V2_CMD_NONE;
    }
    
    /* Check for Skylake-like CPUs (for RSB handling) */
    static bool __init is_skylake_era(void)
    {
    	if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
    	    boot_cpu_data.x86 == 6) {
    		switch (boot_cpu_data.x86_model) {
    		case INTEL_FAM6_SKYLAKE_MOBILE:
    		case INTEL_FAM6_SKYLAKE_DESKTOP:
    		case INTEL_FAM6_SKYLAKE_X:
    		case INTEL_FAM6_KABYLAKE_MOBILE:
    		case INTEL_FAM6_KABYLAKE_DESKTOP:
    			return true;
    		}
    	}
    	return false;
    }
    
    static void __init spectre_v2_select_mitigation(void)
    {
    	enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline();
    	enum spectre_v2_mitigation mode = SPECTRE_V2_NONE;
    
    	/*
    	 * If the CPU is not affected and the command line mode is NONE or AUTO
    	 * then nothing to do.
    	 */
    	if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2) &&
    	    (cmd == SPECTRE_V2_CMD_NONE || cmd == SPECTRE_V2_CMD_AUTO))
    		return;
    
    	switch (cmd) {
    	case SPECTRE_V2_CMD_NONE:
    		return;
    
    	case SPECTRE_V2_CMD_FORCE:
    		/* FALLTRHU */
    	case SPECTRE_V2_CMD_AUTO:
    		goto retpoline_auto;
    
    	case SPECTRE_V2_CMD_RETPOLINE_AMD:
    		if (IS_ENABLED(CONFIG_RETPOLINE))
    			goto retpoline_amd;
    		break;
    	case SPECTRE_V2_CMD_RETPOLINE_GENERIC:
    		if (IS_ENABLED(CONFIG_RETPOLINE))
    			goto retpoline_generic;
    		break;
    	case SPECTRE_V2_CMD_RETPOLINE:
    		if (IS_ENABLED(CONFIG_RETPOLINE))
    			goto retpoline_auto;
    		break;
    	}
    	pr_err("kernel not compiled with retpoline; no mitigation available!");
    	return;
    
    retpoline_auto:
    	if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
    	retpoline_amd:
    		if (!boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) {
    			pr_err("LFENCE not serializing. Switching to generic retpoline\n");
    			goto retpoline_generic;
    		}
    		mode = retp_compiler() ? SPECTRE_V2_RETPOLINE_AMD :
    					 SPECTRE_V2_RETPOLINE_MINIMAL_AMD;
    		setup_force_cpu_cap(X86_FEATURE_RETPOLINE_AMD);
    		setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
    	} else {
    	retpoline_generic:
    		mode = retp_compiler() ? SPECTRE_V2_RETPOLINE_GENERIC :
    					 SPECTRE_V2_RETPOLINE_MINIMAL;
    		setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
    	}
    
    	spectre_v2_enabled = mode;
    	pr_info("%s\n", spectre_v2_strings[mode]);
    
    	/*
    	 * If neither SMEP or KPTI are available, there is a risk of
    	 * hitting userspace addresses in the RSB after a context switch
    	 * from a shallow call stack to a deeper one. To prevent this fill
    	 * the entire RSB, even when using IBRS.
    	 *
    	 * Skylake era CPUs have a separate issue with *underflow* of the
    	 * RSB, when they will predict 'ret' targets from the generic BTB.
    	 * The proper mitigation for this is IBRS. If IBRS is not supported
    	 * or deactivated in favour of retpolines the RSB fill on context
    	 * switch is required.
    	 */
    	if ((!boot_cpu_has(X86_FEATURE_PTI) &&
    	     !boot_cpu_has(X86_FEATURE_SMEP)) || is_skylake_era()) {
    		setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
    		pr_info("Filling RSB on context switch\n");
    	}
    
    	/* Initialize Indirect Branch Prediction Barrier if supported */
    	if (boot_cpu_has(X86_FEATURE_SPEC_CTRL) ||
    	    boot_cpu_has(X86_FEATURE_AMD_PRED_CMD)) {
    		setup_force_cpu_cap(X86_FEATURE_IBPB);
    		pr_info("Enabling Indirect Branch Prediction Barrier\n");
    	}
    }
    
    #undef pr_fmt
    
    #ifdef CONFIG_SYSFS
    ssize_t cpu_show_meltdown(struct device *dev,
    			  struct device_attribute *attr, char *buf)
    {
    	if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
    		return sprintf(buf, "Not affected\n");
    	if (boot_cpu_has(X86_FEATURE_PTI))
    		return sprintf(buf, "Mitigation: PTI\n");
    	return sprintf(buf, "Vulnerable\n");
    }
    
    ssize_t cpu_show_spectre_v1(struct device *dev,
    			    struct device_attribute *attr, char *buf)
    {
    	if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1))
    		return sprintf(buf, "Not affected\n");
    	return sprintf(buf, "Vulnerable\n");
    }
    
    ssize_t cpu_show_spectre_v2(struct device *dev,
    			    struct device_attribute *attr, char *buf)
    {
    	if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
    		return sprintf(buf, "Not affected\n");
    
    	return sprintf(buf, "%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
    		       boot_cpu_has(X86_FEATURE_IBPB) ? ", IPBP" : "",
    		       spectre_v2_bad_module ? " - vulnerable module loaded" : "");
    }
    #endif