Patrick Georgi | ac95903 | 2020-05-05 22:49:26 +0200 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0-or-later */ |
Ravi Sarawadi | 9d903a1 | 2016-03-04 21:33:04 -0800 | [diff] [blame] | 2 | |
Furquan Shaikh | 76cedd2 | 2020-05-02 10:24:23 -0700 | [diff] [blame] | 3 | #include <acpi/acpi.h> |
Pratik Prajapati | dc194e2 | 2017-08-29 14:27:07 -0700 | [diff] [blame] | 4 | #include <assert.h> |
Ravi Sarawadi | 9d903a1 | 2016-03-04 21:33:04 -0800 | [diff] [blame] | 5 | #include <console/console.h> |
Pratik Prajapati | dc194e2 | 2017-08-29 14:27:07 -0700 | [diff] [blame] | 6 | #include "chip.h" |
Ravi Sarawadi | 9d903a1 | 2016-03-04 21:33:04 -0800 | [diff] [blame] | 7 | #include <cpu/cpu.h> |
Ravi Sarawadi | 9d903a1 | 2016-03-04 21:33:04 -0800 | [diff] [blame] | 8 | #include <cpu/x86/mp.h> |
John Zhao | 3156934 | 2016-08-23 16:38:05 -0700 | [diff] [blame] | 9 | #include <cpu/intel/microcode.h> |
Barnali Sarkar | 1e6b980 | 2017-08-07 18:26:31 +0530 | [diff] [blame] | 10 | #include <cpu/intel/turbo.h> |
Michael Niewöhner | 6303243 | 2020-10-11 17:34:54 +0200 | [diff] [blame] | 11 | #include <cpu/intel/common/common.h> |
Ravi Sarawadi | 9d903a1 | 2016-03-04 21:33:04 -0800 | [diff] [blame] | 12 | #include <cpu/x86/msr.h> |
| 13 | #include <cpu/x86/mtrr.h> |
Kyösti Mälkki | b2a5f0b | 2019-08-04 19:54:32 +0300 | [diff] [blame] | 14 | #include <cpu/x86/smm.h> |
Kyösti Mälkki | e31ec29 | 2019-08-10 17:27:01 +0300 | [diff] [blame] | 15 | #include <cpu/intel/em64t100_save_state.h> |
Matt DeVillier | 4f0b2e0 | 2024-06-10 21:54:26 -0500 | [diff] [blame^] | 16 | #include <cpu/intel/microcode.h> |
Kyösti Mälkki | faf20d3 | 2019-08-14 05:41:41 +0300 | [diff] [blame] | 17 | #include <cpu/intel/smm_reloc.h> |
Ravi Sarawadi | 9d903a1 | 2016-03-04 21:33:04 -0800 | [diff] [blame] | 18 | #include <device/device.h> |
| 19 | #include <device/pci.h> |
Barnali Sarkar | 1e6b980 | 2017-08-07 18:26:31 +0530 | [diff] [blame] | 20 | #include <fsp/api.h> |
Barnali Sarkar | 66fe0c4 | 2017-05-23 18:17:14 +0530 | [diff] [blame] | 21 | #include <intelblocks/cpulib.h> |
Aaron Durbin | efc92a8 | 2017-06-08 10:54:59 -0500 | [diff] [blame] | 22 | #include <intelblocks/fast_spi.h> |
Barnali Sarkar | 1e6b980 | 2017-08-07 18:26:31 +0530 | [diff] [blame] | 23 | #include <intelblocks/mp_init.h> |
Barnali Sarkar | 66fe0c4 | 2017-05-23 18:17:14 +0530 | [diff] [blame] | 24 | #include <intelblocks/msr.h> |
Pratik Prajapati | dc194e2 | 2017-08-29 14:27:07 -0700 | [diff] [blame] | 25 | #include <intelblocks/sgx.h> |
Ravi Sarawadi | ec729365 | 2016-09-09 14:08:50 -0700 | [diff] [blame] | 26 | #include <reg_script.h> |
Ravi Sarawadi | 9d903a1 | 2016-03-04 21:33:04 -0800 | [diff] [blame] | 27 | #include <soc/cpu.h> |
Ravi Sarawadi | ec729365 | 2016-09-09 14:08:50 -0700 | [diff] [blame] | 28 | #include <soc/iomap.h> |
Pratik Prajapati | dc194e2 | 2017-08-29 14:27:07 -0700 | [diff] [blame] | 29 | #include <soc/pci_devs.h> |
Andrey Petrov | 3b63753 | 2016-11-30 17:39:16 -0800 | [diff] [blame] | 30 | #include <soc/pm.h> |
Felix Held | d27ef5b | 2021-10-20 20:18:12 +0200 | [diff] [blame] | 31 | #include <types.h> |
Ravi Sarawadi | 9d903a1 | 2016-03-04 21:33:04 -0800 | [diff] [blame] | 32 | |
Ravi Sarawadi | ec729365 | 2016-09-09 14:08:50 -0700 | [diff] [blame] | 33 | static const struct reg_script core_msr_script[] = { |
Angel Pons | b36100f | 2020-09-07 13:18:10 +0200 | [diff] [blame] | 34 | #if !CONFIG(SOC_INTEL_GEMINILAKE) |
Ravi Sarawadi | ec729365 | 2016-09-09 14:08:50 -0700 | [diff] [blame] | 35 | /* Enable C-state and IO/MWAIT redirect */ |
Elyes HAOUAS | 4e6b790 | 2018-10-02 08:44:47 +0200 | [diff] [blame] | 36 | REG_MSR_WRITE(MSR_PKG_CST_CONFIG_CONTROL, |
Ravi Sarawadi | ec729365 | 2016-09-09 14:08:50 -0700 | [diff] [blame] | 37 | (PKG_C_STATE_LIMIT_C2_MASK | CORE_C_STATE_LIMIT_C10_MASK |
| 38 | | IO_MWAIT_REDIRECT_MASK | CST_CFG_LOCK_MASK)), |
| 39 | /* Power Management I/O base address for I/O trapping to C-states */ |
| 40 | REG_MSR_WRITE(MSR_PMG_IO_CAPTURE_BASE, |
| 41 | (ACPI_PMIO_CST_REG | (PMG_IO_BASE_CST_RNG_BLK_SIZE << 16))), |
Venkateswarlu Vinjamuri | 362180a | 2016-10-31 17:03:55 -0700 | [diff] [blame] | 42 | /* Disable support for MONITOR and MWAIT instructions */ |
Elyes HAOUAS | 419bfbc | 2018-10-01 08:47:51 +0200 | [diff] [blame] | 43 | REG_MSR_RMW(IA32_MISC_ENABLE, ~MONITOR_MWAIT_DIS_MASK, 0), |
Cole Nelson | f357c25 | 2017-05-16 11:38:59 -0700 | [diff] [blame] | 44 | #endif |
Cole Nelson | 9d0950f | 2018-06-12 10:02:49 -0700 | [diff] [blame] | 45 | /* Disable C1E */ |
| 46 | REG_MSR_RMW(MSR_POWER_CTL, ~POWER_CTL_C1E_MASK, 0), |
Ravi Sarawadi | ec729365 | 2016-09-09 14:08:50 -0700 | [diff] [blame] | 47 | REG_SCRIPT_END |
| 48 | }; |
| 49 | |
Subrata Banik | 56ab8e2 | 2022-01-07 13:40:19 +0000 | [diff] [blame] | 50 | bool cpu_soc_is_in_untrusted_mode(void) |
| 51 | { |
| 52 | msr_t msr; |
| 53 | |
| 54 | msr = rdmsr(MSR_POWER_MISC); |
| 55 | return !!(msr.lo & ENABLE_IA_UNTRUSTED); |
| 56 | } |
| 57 | |
Subrata Banik | 37a55d1 | 2022-05-30 18:11:12 +0000 | [diff] [blame] | 58 | void cpu_soc_bios_done(void) |
| 59 | { |
| 60 | msr_t msr; |
| 61 | |
| 62 | msr = rdmsr(MSR_POWER_MISC); |
| 63 | msr.lo |= ENABLE_IA_UNTRUSTED; |
| 64 | wrmsr(MSR_POWER_MISC, msr); |
| 65 | } |
| 66 | |
Elyes HAOUAS | 06e8315 | 2018-05-24 22:48:14 +0200 | [diff] [blame] | 67 | void soc_core_init(struct device *cpu) |
Ravi Sarawadi | ec729365 | 2016-09-09 14:08:50 -0700 | [diff] [blame] | 68 | { |
Patrick Rudolph | fc36e9f | 2021-01-25 10:46:16 +0100 | [diff] [blame] | 69 | /* Configure Core PRMRR for SGX. */ |
| 70 | if (CONFIG(SOC_INTEL_COMMON_BLOCK_SGX_ENABLE)) |
| 71 | prmrr_core_configure(); |
| 72 | |
Pratik Prajapati | dc194e2 | 2017-08-29 14:27:07 -0700 | [diff] [blame] | 73 | /* Clear out pending MCEs */ |
Nico Huber | aa4d9b9 | 2019-02-01 14:20:37 +0100 | [diff] [blame] | 74 | /* TODO(adurbin): Some of these banks are core vs package |
| 75 | scope. For now every CPU clears every bank. */ |
Michael Niewöhner | 7736bfc | 2019-10-22 23:05:06 +0200 | [diff] [blame] | 76 | if (CONFIG(SOC_INTEL_COMMON_BLOCK_SGX_ENABLE) || acpi_get_sleep_type() == ACPI_S5) |
Subrata Banik | f91344c | 2019-05-06 19:23:26 +0530 | [diff] [blame] | 77 | mca_configure(); |
Pratik Prajapati | dc194e2 | 2017-08-29 14:27:07 -0700 | [diff] [blame] | 78 | |
Ravi Sarawadi | ec729365 | 2016-09-09 14:08:50 -0700 | [diff] [blame] | 79 | /* Set core MSRs */ |
| 80 | reg_script_run(core_msr_script); |
Michael Niewöhner | 6303243 | 2020-10-11 17:34:54 +0200 | [diff] [blame] | 81 | |
| 82 | set_aesni_lock(); |
| 83 | |
Matt DeVillier | 0503274 | 2023-04-14 15:00:26 -0500 | [diff] [blame] | 84 | /* Set virtualization based on Kconfig option */ |
| 85 | set_vmx_and_lock(); |
| 86 | |
Andrey Petrov | 3b63753 | 2016-11-30 17:39:16 -0800 | [diff] [blame] | 87 | /* |
| 88 | * Enable ACPI PM timer emulation, which also lets microcode know |
Barnali Sarkar | 9e55ff6 | 2017-06-05 20:01:14 +0530 | [diff] [blame] | 89 | * location of ACPI_BASE_ADDRESS. This also enables other features |
Andrey Petrov | 3b63753 | 2016-11-30 17:39:16 -0800 | [diff] [blame] | 90 | * implemented in microcode. |
| 91 | */ |
| 92 | enable_pm_timer_emulation(); |
Pratik Prajapati | dc194e2 | 2017-08-29 14:27:07 -0700 | [diff] [blame] | 93 | |
Mario Scheithauer | d0e5133 | 2017-10-24 16:57:26 +0200 | [diff] [blame] | 94 | /* Set Max Non-Turbo ratio if RAPL is disabled. */ |
Uwe Poeche | 539fd2a | 2022-03-28 12:39:01 +0200 | [diff] [blame] | 95 | if (CONFIG(SOC_INTEL_DISABLE_POWER_LIMITS)) { |
Mario Scheithauer | d0e5133 | 2017-10-24 16:57:26 +0200 | [diff] [blame] | 96 | cpu_set_p_state_to_max_non_turbo_ratio(); |
Subrata Banik | 6d56916 | 2019-04-10 12:19:27 +0530 | [diff] [blame] | 97 | /* Disable speed step */ |
| 98 | cpu_set_eist(false); |
Uwe Poeche | 539fd2a | 2022-03-28 12:39:01 +0200 | [diff] [blame] | 99 | } else if (CONFIG(SOC_INTEL_SET_MIN_CLOCK_RATIO)) { |
Werner Zeh | 2636186 | 2018-11-21 12:36:21 +0100 | [diff] [blame] | 100 | cpu_set_p_state_to_min_clock_ratio(); |
Subrata Banik | 6d56916 | 2019-04-10 12:19:27 +0530 | [diff] [blame] | 101 | /* Disable speed step */ |
| 102 | cpu_set_eist(false); |
Mario Scheithauer | d0e5133 | 2017-10-24 16:57:26 +0200 | [diff] [blame] | 103 | } |
Ravi Sarawadi | ec729365 | 2016-09-09 14:08:50 -0700 | [diff] [blame] | 104 | } |
| 105 | |
Julius Werner | cd49cce | 2019-03-05 16:53:33 -0800 | [diff] [blame] | 106 | #if !CONFIG(SOC_INTEL_COMMON_BLOCK_CPU_MPINIT) |
Elyes HAOUAS | 06e8315 | 2018-05-24 22:48:14 +0200 | [diff] [blame] | 107 | static void soc_init_core(struct device *cpu) |
Barnali Sarkar | 1e6b980 | 2017-08-07 18:26:31 +0530 | [diff] [blame] | 108 | { |
Pratik Prajapati | 9cd6a26 | 2017-08-14 13:57:46 -0700 | [diff] [blame] | 109 | soc_core_init(cpu); |
Barnali Sarkar | 1e6b980 | 2017-08-07 18:26:31 +0530 | [diff] [blame] | 110 | } |
| 111 | |
Ravi Sarawadi | 9d903a1 | 2016-03-04 21:33:04 -0800 | [diff] [blame] | 112 | static struct device_operations cpu_dev_ops = { |
Barnali Sarkar | 1e6b980 | 2017-08-07 18:26:31 +0530 | [diff] [blame] | 113 | .init = soc_init_core, |
Ravi Sarawadi | 9d903a1 | 2016-03-04 21:33:04 -0800 | [diff] [blame] | 114 | }; |
| 115 | |
Jonathan Neuschäfer | 8f06ce3 | 2017-11-20 01:56:44 +0100 | [diff] [blame] | 116 | static const struct cpu_device_id cpu_table[] = { |
Felix Held | 6a6ac1e | 2023-02-06 15:19:11 +0100 | [diff] [blame] | 117 | { X86_VENDOR_INTEL, CPUID_APOLLOLAKE_A0, CPUID_EXACT_MATCH_MASK }, |
| 118 | { X86_VENDOR_INTEL, CPUID_APOLLOLAKE_B0, CPUID_EXACT_MATCH_MASK }, |
| 119 | { X86_VENDOR_INTEL, CPUID_APOLLOLAKE_E0, CPUID_EXACT_MATCH_MASK }, |
| 120 | { X86_VENDOR_INTEL, CPUID_GLK_A0, CPUID_EXACT_MATCH_MASK }, |
| 121 | { X86_VENDOR_INTEL, CPUID_GLK_B0, CPUID_EXACT_MATCH_MASK }, |
| 122 | { X86_VENDOR_INTEL, CPUID_GLK_R0, CPUID_EXACT_MATCH_MASK }, |
Felix Held | 1e78165 | 2023-02-08 11:39:16 +0100 | [diff] [blame] | 123 | CPU_TABLE_END |
Ravi Sarawadi | 9d903a1 | 2016-03-04 21:33:04 -0800 | [diff] [blame] | 124 | }; |
| 125 | |
| 126 | static const struct cpu_driver driver __cpu_driver = { |
| 127 | .ops = &cpu_dev_ops, |
| 128 | .id_table = cpu_table, |
| 129 | }; |
Barnali Sarkar | 1e6b980 | 2017-08-07 18:26:31 +0530 | [diff] [blame] | 130 | #endif |
Ravi Sarawadi | 9d903a1 | 2016-03-04 21:33:04 -0800 | [diff] [blame] | 131 | |
Hannah Williams | d9c84ca | 2016-05-13 00:47:14 -0700 | [diff] [blame] | 132 | /* |
| 133 | * MP and SMM loading initialization. |
| 134 | */ |
| 135 | struct smm_relocation_attrs { |
| 136 | uint32_t smbase; |
| 137 | uint32_t smrr_base; |
| 138 | uint32_t smrr_mask; |
| 139 | }; |
| 140 | |
| 141 | static struct smm_relocation_attrs relo_attrs; |
| 142 | |
Barnali Sarkar | 1e6b980 | 2017-08-07 18:26:31 +0530 | [diff] [blame] | 143 | /* |
| 144 | * Do essential initialization tasks before APs can be fired up. |
| 145 | * |
Julius Werner | cd49cce | 2019-03-05 16:53:33 -0800 | [diff] [blame] | 146 | * IF (CONFIG(SOC_INTEL_COMMON_BLOCK_CPU_MPINIT)) - |
Barnali Sarkar | 1e6b980 | 2017-08-07 18:26:31 +0530 | [diff] [blame] | 147 | * Skip Pre MP init MTRR programming, as MTRRs are mirrored from BSP, |
| 148 | * that are set prior to ramstage. |
| 149 | * Real MTRRs are programmed after resource allocation. |
| 150 | * |
| 151 | * Do FSP loading before MP Init to ensure that the FSP component stored in |
| 152 | * external stage cache in TSEG does not flush off due to SMM relocation |
| 153 | * during MP Init stage. |
| 154 | * |
| 155 | * ELSE - |
| 156 | * Enable MTRRs on the BSP. This creates the MTRR solution that the |
| 157 | * APs will use. Otherwise APs will try to apply the incomplete solution |
| 158 | * as the BSP is calculating it. |
| 159 | */ |
| 160 | static void pre_mp_init(void) |
| 161 | { |
Julius Werner | cd49cce | 2019-03-05 16:53:33 -0800 | [diff] [blame] | 162 | if (CONFIG(SOC_INTEL_COMMON_BLOCK_CPU_MPINIT)) { |
Kyösti Mälkki | cc93c6e | 2021-01-09 22:53:52 +0200 | [diff] [blame] | 163 | fsps_load(); |
Barnali Sarkar | 1e6b980 | 2017-08-07 18:26:31 +0530 | [diff] [blame] | 164 | return; |
| 165 | } |
| 166 | x86_setup_mtrrs_with_detect(); |
| 167 | x86_mtrr_check(); |
| 168 | } |
| 169 | |
Julius Werner | cd49cce | 2019-03-05 16:53:33 -0800 | [diff] [blame] | 170 | #if !CONFIG(SOC_INTEL_COMMON_BLOCK_CPU_MPINIT) |
Ravi Sarawadi | 9d903a1 | 2016-03-04 21:33:04 -0800 | [diff] [blame] | 171 | static void read_cpu_topology(unsigned int *num_phys, unsigned int *num_virt) |
| 172 | { |
| 173 | msr_t msr; |
| 174 | msr = rdmsr(MSR_CORE_THREAD_COUNT); |
| 175 | *num_virt = (msr.lo >> 0) & 0xffff; |
| 176 | *num_phys = (msr.lo >> 16) & 0xffff; |
| 177 | } |
| 178 | |
Aaron Durbin | e72b9d4 | 2016-05-03 15:56:24 -0500 | [diff] [blame] | 179 | /* Find CPU topology */ |
Barnali Sarkar | 1e6b980 | 2017-08-07 18:26:31 +0530 | [diff] [blame] | 180 | int get_cpu_count(void) |
Aaron Durbin | e72b9d4 | 2016-05-03 15:56:24 -0500 | [diff] [blame] | 181 | { |
| 182 | unsigned int num_virt_cores, num_phys_cores; |
| 183 | |
| 184 | read_cpu_topology(&num_phys_cores, &num_virt_cores); |
| 185 | |
| 186 | printk(BIOS_DEBUG, "Detected %u core, %u thread CPU.\n", |
| 187 | num_phys_cores, num_virt_cores); |
| 188 | |
| 189 | return num_virt_cores; |
| 190 | } |
| 191 | |
Barnali Sarkar | 1e6b980 | 2017-08-07 18:26:31 +0530 | [diff] [blame] | 192 | void get_microcode_info(const void **microcode, int *parallel) |
John Zhao | 3156934 | 2016-08-23 16:38:05 -0700 | [diff] [blame] | 193 | { |
| 194 | *microcode = intel_microcode_find(); |
| 195 | *parallel = 1; |
Barnali Sarkar | 97daf98 | 2017-06-07 13:47:51 +0530 | [diff] [blame] | 196 | |
| 197 | /* Make sure BSP is using the microcode from cbfs */ |
| 198 | intel_microcode_load_unlocked(*microcode); |
John Zhao | 3156934 | 2016-08-23 16:38:05 -0700 | [diff] [blame] | 199 | } |
Barnali Sarkar | 1e6b980 | 2017-08-07 18:26:31 +0530 | [diff] [blame] | 200 | #endif |
John Zhao | 3156934 | 2016-08-23 16:38:05 -0700 | [diff] [blame] | 201 | |
Hannah Williams | d9c84ca | 2016-05-13 00:47:14 -0700 | [diff] [blame] | 202 | static void get_smm_info(uintptr_t *perm_smbase, size_t *perm_smsize, |
| 203 | size_t *smm_save_state_size) |
| 204 | { |
Kyösti Mälkki | 14222d8 | 2019-08-05 15:10:18 +0300 | [diff] [blame] | 205 | uintptr_t smm_base; |
Hannah Williams | d9c84ca | 2016-05-13 00:47:14 -0700 | [diff] [blame] | 206 | size_t smm_size; |
Kyösti Mälkki | 14222d8 | 2019-08-05 15:10:18 +0300 | [diff] [blame] | 207 | uintptr_t handler_base; |
Brandon Breitenstein | 135eae9 | 2016-09-30 13:57:12 -0700 | [diff] [blame] | 208 | size_t handler_size; |
Hannah Williams | d9c84ca | 2016-05-13 00:47:14 -0700 | [diff] [blame] | 209 | |
| 210 | /* All range registers are aligned to 4KiB */ |
| 211 | const uint32_t rmask = ~((1 << 12) - 1); |
| 212 | |
| 213 | /* Initialize global tracking state. */ |
Kyösti Mälkki | dc6c322 | 2019-08-04 20:17:28 +0300 | [diff] [blame] | 214 | smm_region(&smm_base, &smm_size); |
Brandon Breitenstein | 135eae9 | 2016-09-30 13:57:12 -0700 | [diff] [blame] | 215 | smm_subregion(SMM_SUBREGION_HANDLER, &handler_base, &handler_size); |
| 216 | |
Kyösti Mälkki | 14222d8 | 2019-08-05 15:10:18 +0300 | [diff] [blame] | 217 | relo_attrs.smbase = smm_base; |
Hannah Williams | d9c84ca | 2016-05-13 00:47:14 -0700 | [diff] [blame] | 218 | relo_attrs.smrr_base = relo_attrs.smbase | MTRR_TYPE_WRBACK; |
| 219 | relo_attrs.smrr_mask = ~(smm_size - 1) & rmask; |
| 220 | relo_attrs.smrr_mask |= MTRR_PHYS_MASK_VALID; |
| 221 | |
Kyösti Mälkki | 14222d8 | 2019-08-05 15:10:18 +0300 | [diff] [blame] | 222 | *perm_smbase = handler_base; |
Brandon Breitenstein | 135eae9 | 2016-09-30 13:57:12 -0700 | [diff] [blame] | 223 | *perm_smsize = handler_size; |
Hannah Williams | d9c84ca | 2016-05-13 00:47:14 -0700 | [diff] [blame] | 224 | *smm_save_state_size = sizeof(em64t100_smm_state_save_area_t); |
| 225 | } |
| 226 | |
| 227 | static void relocation_handler(int cpu, uintptr_t curr_smbase, |
| 228 | uintptr_t staggered_smbase) |
| 229 | { |
| 230 | msr_t smrr; |
| 231 | em64t100_smm_state_save_area_t *smm_state; |
| 232 | /* Set up SMRR. */ |
| 233 | smrr.lo = relo_attrs.smrr_base; |
| 234 | smrr.hi = 0; |
Arthur Heymans | e750b38e | 2018-07-20 23:31:59 +0200 | [diff] [blame] | 235 | wrmsr(IA32_SMRR_PHYS_BASE, smrr); |
Hannah Williams | d9c84ca | 2016-05-13 00:47:14 -0700 | [diff] [blame] | 236 | smrr.lo = relo_attrs.smrr_mask; |
| 237 | smrr.hi = 0; |
Arthur Heymans | e750b38e | 2018-07-20 23:31:59 +0200 | [diff] [blame] | 238 | wrmsr(IA32_SMRR_PHYS_MASK, smrr); |
Hannah Williams | d9c84ca | 2016-05-13 00:47:14 -0700 | [diff] [blame] | 239 | smm_state = (void *)(SMM_EM64T100_SAVE_STATE_OFFSET + curr_smbase); |
| 240 | smm_state->smbase = staggered_smbase; |
| 241 | } |
Ravi Sarawadi | 9d903a1 | 2016-03-04 21:33:04 -0800 | [diff] [blame] | 242 | /* |
| 243 | * CPU initialization recipe |
| 244 | * |
| 245 | * Note that no microcode update is passed to the init function. CSE updates |
| 246 | * the microcode on all cores before releasing them from reset. That means that |
| 247 | * the BSP and all APs will come up with the same microcode revision. |
| 248 | */ |
Pratik Prajapati | dc194e2 | 2017-08-29 14:27:07 -0700 | [diff] [blame] | 249 | |
| 250 | static void post_mp_init(void) |
| 251 | { |
Kyösti Mälkki | 040c531 | 2020-05-31 20:03:11 +0300 | [diff] [blame] | 252 | global_smi_enable(); |
Pratik Prajapati | dc194e2 | 2017-08-29 14:27:07 -0700 | [diff] [blame] | 253 | |
Michael Niewöhner | 7736bfc | 2019-10-22 23:05:06 +0200 | [diff] [blame] | 254 | if (CONFIG(SOC_INTEL_COMMON_BLOCK_SGX_ENABLE)) |
Patrick Rudolph | 5ec97ce | 2019-07-26 14:47:32 +0200 | [diff] [blame] | 255 | mp_run_on_all_cpus(sgx_configure, NULL); |
Pratik Prajapati | dc194e2 | 2017-08-29 14:27:07 -0700 | [diff] [blame] | 256 | } |
| 257 | |
Aaron Durbin | e72b9d4 | 2016-05-03 15:56:24 -0500 | [diff] [blame] | 258 | static const struct mp_ops mp_ops = { |
| 259 | .pre_mp_init = pre_mp_init, |
| 260 | .get_cpu_count = get_cpu_count, |
Hannah Williams | d9c84ca | 2016-05-13 00:47:14 -0700 | [diff] [blame] | 261 | .get_smm_info = get_smm_info, |
John Zhao | 3156934 | 2016-08-23 16:38:05 -0700 | [diff] [blame] | 262 | .get_microcode_info = get_microcode_info, |
Brandon Breitenstein | a86d1b8 | 2017-06-08 17:32:02 -0700 | [diff] [blame] | 263 | .pre_mp_smm_init = smm_southbridge_clear_state, |
Hannah Williams | d9c84ca | 2016-05-13 00:47:14 -0700 | [diff] [blame] | 264 | .relocation_handler = relocation_handler, |
Pratik Prajapati | dc194e2 | 2017-08-29 14:27:07 -0700 | [diff] [blame] | 265 | .post_mp_init = post_mp_init, |
Ravi Sarawadi | 9d903a1 | 2016-03-04 21:33:04 -0800 | [diff] [blame] | 266 | }; |
| 267 | |
Arthur Heymans | 829e8e6 | 2023-01-30 19:09:34 +0100 | [diff] [blame] | 268 | void mp_init_cpus(struct bus *cpu_bus) |
Ravi Sarawadi | 9d903a1 | 2016-03-04 21:33:04 -0800 | [diff] [blame] | 269 | { |
Ravi Sarawadi | 9d903a1 | 2016-03-04 21:33:04 -0800 | [diff] [blame] | 270 | /* Clear for take-off */ |
Felix Held | 4dd7d11 | 2021-10-20 23:31:43 +0200 | [diff] [blame] | 271 | /* TODO: Handle mp_init_with_smm failure? */ |
| 272 | mp_init_with_smm(cpu_bus, &mp_ops); |
Aaron Durbin | bf696f5 | 2016-11-10 20:04:19 -0600 | [diff] [blame] | 273 | |
Arthur Heymans | 94ab3a8 | 2023-03-20 23:00:36 +0100 | [diff] [blame] | 274 | /* MTRR setup happens later, so we're done here. */ |
| 275 | if (CONFIG(SOC_INTEL_COMMON_BLOCK_CPU_MPINIT)) |
| 276 | return; |
| 277 | |
Aaron Durbin | bf696f5 | 2016-11-10 20:04:19 -0600 | [diff] [blame] | 278 | /* Temporarily cache the memory-mapped boot media. */ |
Julius Werner | cd49cce | 2019-03-05 16:53:33 -0800 | [diff] [blame] | 279 | if (CONFIG(BOOT_DEVICE_MEMORY_MAPPED) && |
| 280 | CONFIG(BOOT_DEVICE_SPI_FLASH)) |
Aaron Durbin | efc92a8 | 2017-06-08 10:54:59 -0500 | [diff] [blame] | 281 | fast_spi_cache_bios_region(); |
Ravi Sarawadi | 9d903a1 | 2016-03-04 21:33:04 -0800 | [diff] [blame] | 282 | } |
Matt DeVillier | 4f0b2e0 | 2024-06-10 21:54:26 -0500 | [diff] [blame^] | 283 | |
| 284 | #if CONFIG(SOC_INTEL_GEMINILAKE) |
| 285 | int soc_skip_ucode_update(u32 current_patch_id, u32 new_patch_id) |
| 286 | { |
| 287 | /* |
| 288 | * If PRMRR/SGX is supported the FIT microcode load will set the msr |
| 289 | * 0x08b with the Patch revision id one less than the id in the |
| 290 | * microcode binary. The PRMRR support is indicated in the MSR |
| 291 | * MTRRCAP[12]. If SGX is not enabled, check and avoid reloading the |
| 292 | * same microcode during CPU initialization. If SGX is enabled, as |
| 293 | * part of SGX BIOS initialization steps, the same microcode needs to |
| 294 | * be reloaded after the core PRMRR MSRs are programmed. |
| 295 | */ |
| 296 | const msr_t mtrr_cap = rdmsr(MTRR_CAP_MSR); |
| 297 | if (mtrr_cap.lo & MTRR_CAP_PRMRR) { |
| 298 | const msr_t prmrr_phys_base = rdmsr(MSR_PRMRR_PHYS_BASE); |
| 299 | if (prmrr_phys_base.raw) { |
| 300 | return 0; |
| 301 | } |
| 302 | } |
| 303 | return current_patch_id == new_patch_id - 1; |
| 304 | } |
| 305 | #endif |