Andrey Petrov | 8670e82 | 2020-03-30 12:25:06 -0700 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
Andrey Petrov | 8670e82 | 2020-03-30 12:25:06 -0700 | [diff] [blame] | 2 | |
Arthur Heymans | 3838ede | 2021-05-31 16:10:05 +0200 | [diff] [blame] | 3 | #include <acpi/acpigen.h> |
Jonathan Zhang | b7cf7d3 | 2020-04-02 20:03:48 -0700 | [diff] [blame] | 4 | #include <assert.h> |
Andrey Petrov | 8670e82 | 2020-03-30 12:25:06 -0700 | [diff] [blame] | 5 | #include <console/console.h> |
Marc Jones | 8b522db | 2020-10-12 11:58:46 -0600 | [diff] [blame] | 6 | #include <console/debug.h> |
Andrey Petrov | 8670e82 | 2020-03-30 12:25:06 -0700 | [diff] [blame] | 7 | #include <cpu/cpu.h> |
Marc Jones | c6a6e54 | 2020-11-02 11:59:16 -0700 | [diff] [blame] | 8 | #include <cpu/intel/common/common.h> |
Rocky Phagura | 17a798b | 2020-10-08 13:32:41 -0700 | [diff] [blame] | 9 | #include <cpu/intel/em64t101_save_state.h> |
Andrey Petrov | 8670e82 | 2020-03-30 12:25:06 -0700 | [diff] [blame] | 10 | #include <cpu/intel/microcode.h> |
Rocky Phagura | 17a798b | 2020-10-08 13:32:41 -0700 | [diff] [blame] | 11 | #include <cpu/intel/smm_reloc.h> |
Jonathan Zhang | b7cf7d3 | 2020-04-02 20:03:48 -0700 | [diff] [blame] | 12 | #include <cpu/intel/turbo.h> |
Andrey Petrov | 8670e82 | 2020-03-30 12:25:06 -0700 | [diff] [blame] | 13 | #include <cpu/x86/mp.h> |
| 14 | #include <cpu/x86/mtrr.h> |
| 15 | #include <intelblocks/cpulib.h> |
| 16 | #include <intelblocks/mp_init.h> |
Marc Jones | 52e14f7 | 2021-03-11 14:49:19 -0700 | [diff] [blame] | 17 | #include <intelpch/lockdown.h> |
Jonathan Zhang | b7cf7d3 | 2020-04-02 20:03:48 -0700 | [diff] [blame] | 18 | #include <soc/msr.h> |
Arthur Heymans | 3838ede | 2021-05-31 16:10:05 +0200 | [diff] [blame] | 19 | #include <soc/pci_devs.h> |
Marc Jones | 52e14f7 | 2021-03-11 14:49:19 -0700 | [diff] [blame] | 20 | #include <soc/pm.h> |
Rocky Phagura | 17a798b | 2020-10-08 13:32:41 -0700 | [diff] [blame] | 21 | #include <soc/smmrelocate.h> |
Arthur Heymans | 3838ede | 2021-05-31 16:10:05 +0200 | [diff] [blame] | 22 | #include <soc/soc_util.h> |
Marc Jones | 18960ce | 2020-11-02 12:41:12 -0700 | [diff] [blame] | 23 | #include <soc/util.h> |
Felix Held | d27ef5b | 2021-10-20 20:18:12 +0200 | [diff] [blame] | 24 | #include <types.h> |
Marc Jones | c6a6e54 | 2020-11-02 11:59:16 -0700 | [diff] [blame] | 25 | |
Jonathan Zhang | b7cf7d3 | 2020-04-02 20:03:48 -0700 | [diff] [blame] | 26 | #include "chip.h" |
Andrey Petrov | 8670e82 | 2020-03-30 12:25:06 -0700 | [diff] [blame] | 27 | |
| 28 | static const void *microcode_patch; |
| 29 | |
Jonathan Zhang | b7cf7d3 | 2020-04-02 20:03:48 -0700 | [diff] [blame] | 30 | static const config_t *chip_config = NULL; |
| 31 | |
Subrata Banik | 56ab8e2 | 2022-01-07 13:40:19 +0000 | [diff] [blame] | 32 | bool cpu_soc_is_in_untrusted_mode(void) |
| 33 | { |
| 34 | /* IA_UNTRUSTED_MODE is not supported in Cooper Lake */ |
| 35 | return false; |
| 36 | } |
| 37 | |
Subrata Banik | 37a55d1 | 2022-05-30 18:11:12 +0000 | [diff] [blame] | 38 | void cpu_soc_bios_done(void) |
| 39 | { |
| 40 | /* IA_UNTRUSTED_MODE is not supported in Cooper Lake */ |
| 41 | } |
| 42 | |
Jonathan Zhang | b7cf7d3 | 2020-04-02 20:03:48 -0700 | [diff] [blame] | 43 | static void xeon_configure_mca(void) |
| 44 | { |
| 45 | msr_t msr; |
| 46 | struct cpuid_result cpuid_regs; |
| 47 | |
| 48 | /* |
| 49 | * Check feature flag in CPUID.(EAX=1):EDX[7]==1 MCE |
| 50 | * and CPUID.(EAX=1):EDX[14]==1 MCA |
| 51 | */ |
| 52 | cpuid_regs = cpuid(1); |
| 53 | if ((cpuid_regs.edx & (1 << 7 | 1 << 14)) != (1 << 7 | 1 << 14)) |
| 54 | return; |
| 55 | |
| 56 | msr = rdmsr(IA32_MCG_CAP); |
| 57 | if (msr.lo & IA32_MCG_CAP_CTL_P_MASK) { |
| 58 | /* Enable all error logging */ |
| 59 | msr.lo = msr.hi = 0xffffffff; |
| 60 | wrmsr(IA32_MCG_CTL, msr); |
| 61 | } |
| 62 | |
| 63 | mca_configure(); |
| 64 | } |
| 65 | |
Arthur Heymans | 8331833 | 2021-01-18 20:00:35 +0100 | [diff] [blame] | 66 | /* |
| 67 | * On server platforms the FIT mechanism only updates the microcode on |
| 68 | * the BSP. Loading MCU on AP in parallel seems to fail in 10% of the cases |
| 69 | * so do it serialized. |
| 70 | */ |
Andrey Petrov | 8670e82 | 2020-03-30 12:25:06 -0700 | [diff] [blame] | 71 | void get_microcode_info(const void **microcode, int *parallel) |
| 72 | { |
Patrick Rudolph | 3fa23b8 | 2021-01-25 09:42:08 +0100 | [diff] [blame] | 73 | *microcode = intel_microcode_find(); |
Arthur Heymans | 8331833 | 2021-01-18 20:00:35 +0100 | [diff] [blame] | 74 | *parallel = 0; |
Andrey Petrov | 8670e82 | 2020-03-30 12:25:06 -0700 | [diff] [blame] | 75 | } |
| 76 | |
Andrey Petrov | 8670e82 | 2020-03-30 12:25:06 -0700 | [diff] [blame] | 77 | static void each_cpu_init(struct device *cpu) |
| 78 | { |
Jonathan Zhang | b7cf7d3 | 2020-04-02 20:03:48 -0700 | [diff] [blame] | 79 | msr_t msr; |
| 80 | |
Arthur Heymans | 36e6f9b | 2022-10-27 15:11:05 +0200 | [diff] [blame] | 81 | printk(BIOS_SPEW, "%s dev: %s, cpu: %lu, apic_id: 0x%x, package_id: 0x%x\n", |
| 82 | __func__, dev_path(cpu), cpu_index(), cpu->path.apic.apic_id, |
| 83 | cpu->path.apic.package_id); |
Jonathan Zhang | b7cf7d3 | 2020-04-02 20:03:48 -0700 | [diff] [blame] | 84 | |
Johnny Lin | 12bee2a | 2020-08-04 18:01:54 +0800 | [diff] [blame] | 85 | /* |
| 86 | * Set HWP base feature, EPP reg enumeration, lock thermal and msr |
| 87 | * This is package level MSR. Need to check if it updates correctly on |
| 88 | * multi-socket platform. |
| 89 | */ |
| 90 | msr = rdmsr(MSR_MISC_PWR_MGMT); |
| 91 | if (!(msr.lo & LOCK_MISC_PWR_MGMT_MSR)) { /* if already locked skip update */ |
| 92 | msr.lo = (HWP_ENUM_ENABLE | HWP_EPP_ENUM_ENABLE | LOCK_MISC_PWR_MGMT_MSR | |
| 93 | LOCK_THERM_INT); |
| 94 | wrmsr(MSR_MISC_PWR_MGMT, msr); |
| 95 | } |
| 96 | |
Jonathan Zhang | b7cf7d3 | 2020-04-02 20:03:48 -0700 | [diff] [blame] | 97 | /* Enable Fast Strings */ |
| 98 | msr = rdmsr(IA32_MISC_ENABLE); |
| 99 | msr.lo |= FAST_STRINGS_ENABLE_BIT; |
| 100 | wrmsr(IA32_MISC_ENABLE, msr); |
| 101 | /* Enable Turbo */ |
| 102 | enable_turbo(); |
| 103 | |
| 104 | /* Enable speed step. */ |
| 105 | if (get_turbo_state() == TURBO_ENABLED) { |
| 106 | msr = rdmsr(IA32_MISC_ENABLE); |
| 107 | msr.lo |= SPEED_STEP_ENABLE_BIT; |
| 108 | wrmsr(IA32_MISC_ENABLE, msr); |
| 109 | } |
| 110 | |
| 111 | /* Clear out pending MCEs */ |
| 112 | xeon_configure_mca(); |
Christian Walter | abb3757 | 2020-09-30 13:48:29 +0200 | [diff] [blame] | 113 | |
| 114 | /* Enable Vmx */ |
| 115 | set_vmx_and_lock(); |
Arthur Heymans | 3838ede | 2021-05-31 16:10:05 +0200 | [diff] [blame] | 116 | set_aesni_lock(); |
| 117 | |
| 118 | /* The MSRs and CSRS have the same register layout. Use the CSRS bit definitions |
| 119 | Lock Turbo. Did FSP-S set this up??? */ |
| 120 | msr = rdmsr(MSR_TURBO_ACTIVATION_RATIO); |
| 121 | msr.lo |= (TURBO_ACTIVATION_RATIO_LOCK); |
| 122 | wrmsr(MSR_TURBO_ACTIVATION_RATIO, msr); |
Andrey Petrov | 8670e82 | 2020-03-30 12:25:06 -0700 | [diff] [blame] | 123 | } |
| 124 | |
| 125 | static struct device_operations cpu_dev_ops = { |
| 126 | .init = each_cpu_init, |
| 127 | }; |
| 128 | |
| 129 | static const struct cpu_device_id cpu_table[] = { |
Felix Held | 6a6ac1e | 2023-02-06 15:19:11 +0100 | [diff] [blame] | 130 | {X86_VENDOR_INTEL, CPUID_COOPERLAKE_SP_A0, CPUID_EXACT_MATCH_MASK }, |
| 131 | {X86_VENDOR_INTEL, CPUID_COOPERLAKE_SP_A1, CPUID_EXACT_MATCH_MASK }, |
Felix Held | 1e78165 | 2023-02-08 11:39:16 +0100 | [diff] [blame] | 132 | CPU_TABLE_END |
Andrey Petrov | 8670e82 | 2020-03-30 12:25:06 -0700 | [diff] [blame] | 133 | }; |
| 134 | |
| 135 | static const struct cpu_driver driver __cpu_driver = { |
| 136 | .ops = &cpu_dev_ops, |
| 137 | .id_table = cpu_table, |
| 138 | }; |
| 139 | |
Jonathan Zhang | b7cf7d3 | 2020-04-02 20:03:48 -0700 | [diff] [blame] | 140 | static void set_max_turbo_freq(void) |
| 141 | { |
| 142 | msr_t msr, perf_ctl; |
| 143 | |
| 144 | FUNC_ENTER(); |
| 145 | perf_ctl.hi = 0; |
| 146 | |
| 147 | /* Check for configurable TDP option */ |
| 148 | if (get_turbo_state() == TURBO_ENABLED) { |
| 149 | msr = rdmsr(MSR_TURBO_RATIO_LIMIT); |
| 150 | perf_ctl.lo = (msr.lo & 0xff) << 8; |
| 151 | } else if (cpu_config_tdp_levels()) { |
| 152 | /* Set to nominal TDP ratio */ |
| 153 | msr = rdmsr(MSR_CONFIG_TDP_NOMINAL); |
| 154 | perf_ctl.lo = (msr.lo & 0xff) << 8; |
| 155 | } else { |
| 156 | /* Platform Info bits 15:8 give max ratio */ |
| 157 | msr = rdmsr(MSR_PLATFORM_INFO); |
| 158 | perf_ctl.lo = msr.lo & 0xff00; |
| 159 | } |
| 160 | wrmsr(IA32_PERF_CTL, perf_ctl); |
| 161 | |
| 162 | printk(BIOS_DEBUG, "cpu: frequency set to %d\n", |
Jingle Hsu | a41b12c | 2020-08-11 20:48:45 +0800 | [diff] [blame] | 163 | ((perf_ctl.lo >> 8) & 0xff) * CONFIG_CPU_BCLK_MHZ); |
Jonathan Zhang | b7cf7d3 | 2020-04-02 20:03:48 -0700 | [diff] [blame] | 164 | FUNC_EXIT(); |
| 165 | } |
| 166 | |
Andrey Petrov | 8670e82 | 2020-03-30 12:25:06 -0700 | [diff] [blame] | 167 | /* |
| 168 | * Do essential initialization tasks before APs can be fired up |
| 169 | */ |
| 170 | static void pre_mp_init(void) |
| 171 | { |
| 172 | x86_setup_mtrrs_with_detect(); |
| 173 | x86_mtrr_check(); |
| 174 | } |
| 175 | |
| 176 | static int get_thread_count(void) |
| 177 | { |
| 178 | unsigned int num_phys = 0, num_virts = 0; |
| 179 | |
| 180 | cpu_read_topology(&num_phys, &num_virts); |
| 181 | printk(BIOS_SPEW, "Detected %u cores and %u threads\n", num_phys, num_virts); |
Andrey Petrov | 5d76958 | 2020-04-23 10:54:18 -0700 | [diff] [blame] | 182 | /* |
| 183 | * Currently we do not know a way to figure out how many CPUs we have total |
| 184 | * on multi-socketed. So we pretend all sockets are populated with CPUs with |
| 185 | * same thread/core fusing. |
| 186 | * TODO: properly figure out number of active sockets OR refactor MPinit code |
| 187 | * to remove requirements of having to know total number of CPUs in advance. |
| 188 | */ |
| 189 | return num_virts * CONFIG_MAX_SOCKET; |
Andrey Petrov | 8670e82 | 2020-03-30 12:25:06 -0700 | [diff] [blame] | 190 | } |
| 191 | |
Jonathan Zhang | b7cf7d3 | 2020-04-02 20:03:48 -0700 | [diff] [blame] | 192 | static void post_mp_init(void) |
| 193 | { |
| 194 | /* Set Max Ratio */ |
| 195 | set_max_turbo_freq(); |
Kyösti Mälkki | 0778c86 | 2020-06-10 12:44:03 +0300 | [diff] [blame] | 196 | |
Marc Jones | 52e14f7 | 2021-03-11 14:49:19 -0700 | [diff] [blame] | 197 | if (CONFIG(HAVE_SMI_HANDLER)) { |
Rocky Phagura | 17a798b | 2020-10-08 13:32:41 -0700 | [diff] [blame] | 198 | global_smi_enable(); |
Marc Jones | 52e14f7 | 2021-03-11 14:49:19 -0700 | [diff] [blame] | 199 | if (get_lockdown_config() == CHIPSET_LOCKDOWN_COREBOOT) |
| 200 | pmc_lock_smi(); |
| 201 | } |
Jonathan Zhang | b7cf7d3 | 2020-04-02 20:03:48 -0700 | [diff] [blame] | 202 | } |
| 203 | |
Andrey Petrov | 8670e82 | 2020-03-30 12:25:06 -0700 | [diff] [blame] | 204 | static const struct mp_ops mp_ops = { |
| 205 | .pre_mp_init = pre_mp_init, |
| 206 | .get_cpu_count = get_thread_count, |
Rocky Phagura | 17a798b | 2020-10-08 13:32:41 -0700 | [diff] [blame] | 207 | .get_smm_info = get_smm_info, |
Arthur Heymans | 2cba9e4 | 2021-02-15 14:16:34 +0100 | [diff] [blame] | 208 | .pre_mp_smm_init = smm_southbridge_clear_state, |
Rocky Phagura | 17a798b | 2020-10-08 13:32:41 -0700 | [diff] [blame] | 209 | .relocation_handler = smm_relocation_handler, |
Jonathan Zhang | b7cf7d3 | 2020-04-02 20:03:48 -0700 | [diff] [blame] | 210 | .get_microcode_info = get_microcode_info, |
| 211 | .post_mp_init = post_mp_init, |
Andrey Petrov | 8670e82 | 2020-03-30 12:25:06 -0700 | [diff] [blame] | 212 | }; |
| 213 | |
Arthur Heymans | 829e8e6 | 2023-01-30 19:09:34 +0100 | [diff] [blame] | 214 | void mp_init_cpus(struct bus *bus) |
Andrey Petrov | 8670e82 | 2020-03-30 12:25:06 -0700 | [diff] [blame] | 215 | { |
| 216 | microcode_patch = intel_microcode_find(); |
| 217 | |
| 218 | if (!microcode_patch) |
| 219 | printk(BIOS_ERR, "microcode not found in CBFS!\n"); |
| 220 | |
| 221 | intel_microcode_load_unlocked(microcode_patch); |
| 222 | |
Felix Held | 4dd7d11 | 2021-10-20 23:31:43 +0200 | [diff] [blame] | 223 | /* TODO: Handle mp_init_with_smm failure? */ |
Arthur Heymans | 829e8e6 | 2023-01-30 19:09:34 +0100 | [diff] [blame] | 224 | mp_init_with_smm(bus, &mp_ops); |
Jonathan Zhang | b7cf7d3 | 2020-04-02 20:03:48 -0700 | [diff] [blame] | 225 | |
| 226 | /* |
| 227 | * chip_config is used in cpu device callback. Other than cpu 0, |
| 228 | * rest of the CPU devices do not have chip_info updated. |
| 229 | */ |
Arthur Heymans | 829e8e6 | 2023-01-30 19:09:34 +0100 | [diff] [blame] | 230 | chip_config = bus->dev->chip_info; |
Andrey Petrov | 8670e82 | 2020-03-30 12:25:06 -0700 | [diff] [blame] | 231 | } |