blob: b8bf39197ea511449cfa3f4322039c54fa73b603 [file] [log] [blame]
Patrick Georgiac959032020-05-05 22:49:26 +02001/* SPDX-License-Identifier: GPL-2.0-or-later */
Ravi Sarawadi9d903a12016-03-04 21:33:04 -08002
Furquan Shaikh76cedd22020-05-02 10:24:23 -07003#include <acpi/acpi.h>
Pratik Prajapatidc194e22017-08-29 14:27:07 -07004#include <assert.h>
Ravi Sarawadi9d903a12016-03-04 21:33:04 -08005#include <console/console.h>
Pratik Prajapatidc194e22017-08-29 14:27:07 -07006#include "chip.h"
Ravi Sarawadi9d903a12016-03-04 21:33:04 -08007#include <cpu/cpu.h>
Ravi Sarawadi9d903a12016-03-04 21:33:04 -08008#include <cpu/x86/mp.h>
John Zhao31569342016-08-23 16:38:05 -07009#include <cpu/intel/microcode.h>
Barnali Sarkar1e6b9802017-08-07 18:26:31 +053010#include <cpu/intel/turbo.h>
Michael Niewöhner63032432020-10-11 17:34:54 +020011#include <cpu/intel/common/common.h>
Ravi Sarawadi9d903a12016-03-04 21:33:04 -080012#include <cpu/x86/msr.h>
13#include <cpu/x86/mtrr.h>
Kyösti Mälkkib2a5f0b2019-08-04 19:54:32 +030014#include <cpu/x86/smm.h>
Kyösti Mälkkie31ec292019-08-10 17:27:01 +030015#include <cpu/intel/em64t100_save_state.h>
Matt DeVillier4f0b2e02024-06-10 21:54:26 -050016#include <cpu/intel/microcode.h>
Kyösti Mälkkifaf20d32019-08-14 05:41:41 +030017#include <cpu/intel/smm_reloc.h>
Ravi Sarawadi9d903a12016-03-04 21:33:04 -080018#include <device/device.h>
19#include <device/pci.h>
Barnali Sarkar1e6b9802017-08-07 18:26:31 +053020#include <fsp/api.h>
Barnali Sarkar66fe0c42017-05-23 18:17:14 +053021#include <intelblocks/cpulib.h>
Aaron Durbinefc92a82017-06-08 10:54:59 -050022#include <intelblocks/fast_spi.h>
Barnali Sarkar1e6b9802017-08-07 18:26:31 +053023#include <intelblocks/mp_init.h>
Barnali Sarkar66fe0c42017-05-23 18:17:14 +053024#include <intelblocks/msr.h>
Pratik Prajapatidc194e22017-08-29 14:27:07 -070025#include <intelblocks/sgx.h>
Ravi Sarawadiec7293652016-09-09 14:08:50 -070026#include <reg_script.h>
Ravi Sarawadi9d903a12016-03-04 21:33:04 -080027#include <soc/cpu.h>
Ravi Sarawadiec7293652016-09-09 14:08:50 -070028#include <soc/iomap.h>
Pratik Prajapatidc194e22017-08-29 14:27:07 -070029#include <soc/pci_devs.h>
Andrey Petrov3b637532016-11-30 17:39:16 -080030#include <soc/pm.h>
Felix Heldd27ef5b2021-10-20 20:18:12 +020031#include <types.h>
Ravi Sarawadi9d903a12016-03-04 21:33:04 -080032
Ravi Sarawadiec7293652016-09-09 14:08:50 -070033static const struct reg_script core_msr_script[] = {
Angel Ponsb36100f2020-09-07 13:18:10 +020034#if !CONFIG(SOC_INTEL_GEMINILAKE)
Ravi Sarawadiec7293652016-09-09 14:08:50 -070035 /* Enable C-state and IO/MWAIT redirect */
Elyes HAOUAS4e6b7902018-10-02 08:44:47 +020036 REG_MSR_WRITE(MSR_PKG_CST_CONFIG_CONTROL,
Ravi Sarawadiec7293652016-09-09 14:08:50 -070037 (PKG_C_STATE_LIMIT_C2_MASK | CORE_C_STATE_LIMIT_C10_MASK
38 | IO_MWAIT_REDIRECT_MASK | CST_CFG_LOCK_MASK)),
39 /* Power Management I/O base address for I/O trapping to C-states */
40 REG_MSR_WRITE(MSR_PMG_IO_CAPTURE_BASE,
41 (ACPI_PMIO_CST_REG | (PMG_IO_BASE_CST_RNG_BLK_SIZE << 16))),
Venkateswarlu Vinjamuri362180a2016-10-31 17:03:55 -070042 /* Disable support for MONITOR and MWAIT instructions */
Elyes HAOUAS419bfbc2018-10-01 08:47:51 +020043 REG_MSR_RMW(IA32_MISC_ENABLE, ~MONITOR_MWAIT_DIS_MASK, 0),
Cole Nelsonf357c252017-05-16 11:38:59 -070044#endif
Cole Nelson9d0950f2018-06-12 10:02:49 -070045 /* Disable C1E */
46 REG_MSR_RMW(MSR_POWER_CTL, ~POWER_CTL_C1E_MASK, 0),
Ravi Sarawadiec7293652016-09-09 14:08:50 -070047 REG_SCRIPT_END
48};
49
Subrata Banik56ab8e22022-01-07 13:40:19 +000050bool cpu_soc_is_in_untrusted_mode(void)
51{
52 msr_t msr;
53
54 msr = rdmsr(MSR_POWER_MISC);
55 return !!(msr.lo & ENABLE_IA_UNTRUSTED);
56}
57
Subrata Banik37a55d12022-05-30 18:11:12 +000058void cpu_soc_bios_done(void)
59{
60 msr_t msr;
61
62 msr = rdmsr(MSR_POWER_MISC);
63 msr.lo |= ENABLE_IA_UNTRUSTED;
64 wrmsr(MSR_POWER_MISC, msr);
65}
66
Elyes HAOUAS06e83152018-05-24 22:48:14 +020067void soc_core_init(struct device *cpu)
Ravi Sarawadiec7293652016-09-09 14:08:50 -070068{
Patrick Rudolphfc36e9f2021-01-25 10:46:16 +010069 /* Configure Core PRMRR for SGX. */
70 if (CONFIG(SOC_INTEL_COMMON_BLOCK_SGX_ENABLE))
71 prmrr_core_configure();
72
Pratik Prajapatidc194e22017-08-29 14:27:07 -070073 /* Clear out pending MCEs */
Nico Huberaa4d9b92019-02-01 14:20:37 +010074 /* TODO(adurbin): Some of these banks are core vs package
75 scope. For now every CPU clears every bank. */
Michael Niewöhner7736bfc2019-10-22 23:05:06 +020076 if (CONFIG(SOC_INTEL_COMMON_BLOCK_SGX_ENABLE) || acpi_get_sleep_type() == ACPI_S5)
Subrata Banikf91344c2019-05-06 19:23:26 +053077 mca_configure();
Pratik Prajapatidc194e22017-08-29 14:27:07 -070078
Ravi Sarawadiec7293652016-09-09 14:08:50 -070079 /* Set core MSRs */
80 reg_script_run(core_msr_script);
Michael Niewöhner63032432020-10-11 17:34:54 +020081
82 set_aesni_lock();
83
Matt DeVillier05032742023-04-14 15:00:26 -050084 /* Set virtualization based on Kconfig option */
85 set_vmx_and_lock();
86
Andrey Petrov3b637532016-11-30 17:39:16 -080087 /*
88 * Enable ACPI PM timer emulation, which also lets microcode know
Barnali Sarkar9e55ff62017-06-05 20:01:14 +053089 * location of ACPI_BASE_ADDRESS. This also enables other features
Andrey Petrov3b637532016-11-30 17:39:16 -080090 * implemented in microcode.
91 */
92 enable_pm_timer_emulation();
Pratik Prajapatidc194e22017-08-29 14:27:07 -070093
Mario Scheithauerd0e51332017-10-24 16:57:26 +020094 /* Set Max Non-Turbo ratio if RAPL is disabled. */
Uwe Poeche539fd2a2022-03-28 12:39:01 +020095 if (CONFIG(SOC_INTEL_DISABLE_POWER_LIMITS)) {
Mario Scheithauerd0e51332017-10-24 16:57:26 +020096 cpu_set_p_state_to_max_non_turbo_ratio();
Subrata Banik6d569162019-04-10 12:19:27 +053097 /* Disable speed step */
98 cpu_set_eist(false);
Uwe Poeche539fd2a2022-03-28 12:39:01 +020099 } else if (CONFIG(SOC_INTEL_SET_MIN_CLOCK_RATIO)) {
Werner Zeh26361862018-11-21 12:36:21 +0100100 cpu_set_p_state_to_min_clock_ratio();
Subrata Banik6d569162019-04-10 12:19:27 +0530101 /* Disable speed step */
102 cpu_set_eist(false);
Mario Scheithauerd0e51332017-10-24 16:57:26 +0200103 }
Ravi Sarawadiec7293652016-09-09 14:08:50 -0700104}
105
Julius Wernercd49cce2019-03-05 16:53:33 -0800106#if !CONFIG(SOC_INTEL_COMMON_BLOCK_CPU_MPINIT)
Elyes HAOUAS06e83152018-05-24 22:48:14 +0200107static void soc_init_core(struct device *cpu)
Barnali Sarkar1e6b9802017-08-07 18:26:31 +0530108{
Pratik Prajapati9cd6a262017-08-14 13:57:46 -0700109 soc_core_init(cpu);
Barnali Sarkar1e6b9802017-08-07 18:26:31 +0530110}
111
Ravi Sarawadi9d903a12016-03-04 21:33:04 -0800112static struct device_operations cpu_dev_ops = {
Barnali Sarkar1e6b9802017-08-07 18:26:31 +0530113 .init = soc_init_core,
Ravi Sarawadi9d903a12016-03-04 21:33:04 -0800114};
115
Jonathan Neuschäfer8f06ce32017-11-20 01:56:44 +0100116static const struct cpu_device_id cpu_table[] = {
Felix Held6a6ac1e2023-02-06 15:19:11 +0100117 { X86_VENDOR_INTEL, CPUID_APOLLOLAKE_A0, CPUID_EXACT_MATCH_MASK },
118 { X86_VENDOR_INTEL, CPUID_APOLLOLAKE_B0, CPUID_EXACT_MATCH_MASK },
119 { X86_VENDOR_INTEL, CPUID_APOLLOLAKE_E0, CPUID_EXACT_MATCH_MASK },
120 { X86_VENDOR_INTEL, CPUID_GLK_A0, CPUID_EXACT_MATCH_MASK },
121 { X86_VENDOR_INTEL, CPUID_GLK_B0, CPUID_EXACT_MATCH_MASK },
122 { X86_VENDOR_INTEL, CPUID_GLK_R0, CPUID_EXACT_MATCH_MASK },
Felix Held1e781652023-02-08 11:39:16 +0100123 CPU_TABLE_END
Ravi Sarawadi9d903a12016-03-04 21:33:04 -0800124};
125
126static const struct cpu_driver driver __cpu_driver = {
127 .ops = &cpu_dev_ops,
128 .id_table = cpu_table,
129};
Barnali Sarkar1e6b9802017-08-07 18:26:31 +0530130#endif
Ravi Sarawadi9d903a12016-03-04 21:33:04 -0800131
Hannah Williamsd9c84ca2016-05-13 00:47:14 -0700132/*
133 * MP and SMM loading initialization.
134 */
135struct smm_relocation_attrs {
136 uint32_t smbase;
137 uint32_t smrr_base;
138 uint32_t smrr_mask;
139};
140
141static struct smm_relocation_attrs relo_attrs;
142
Barnali Sarkar1e6b9802017-08-07 18:26:31 +0530143/*
144 * Do essential initialization tasks before APs can be fired up.
145 *
Julius Wernercd49cce2019-03-05 16:53:33 -0800146 * IF (CONFIG(SOC_INTEL_COMMON_BLOCK_CPU_MPINIT)) -
Barnali Sarkar1e6b9802017-08-07 18:26:31 +0530147 * Skip Pre MP init MTRR programming, as MTRRs are mirrored from BSP,
148 * that are set prior to ramstage.
149 * Real MTRRs are programmed after resource allocation.
150 *
151 * Do FSP loading before MP Init to ensure that the FSP component stored in
152 * external stage cache in TSEG does not flush off due to SMM relocation
153 * during MP Init stage.
154 *
155 * ELSE -
156 * Enable MTRRs on the BSP. This creates the MTRR solution that the
157 * APs will use. Otherwise APs will try to apply the incomplete solution
158 * as the BSP is calculating it.
159 */
160static void pre_mp_init(void)
161{
Julius Wernercd49cce2019-03-05 16:53:33 -0800162 if (CONFIG(SOC_INTEL_COMMON_BLOCK_CPU_MPINIT)) {
Kyösti Mälkkicc93c6e2021-01-09 22:53:52 +0200163 fsps_load();
Barnali Sarkar1e6b9802017-08-07 18:26:31 +0530164 return;
165 }
166 x86_setup_mtrrs_with_detect();
167 x86_mtrr_check();
168}
169
Julius Wernercd49cce2019-03-05 16:53:33 -0800170#if !CONFIG(SOC_INTEL_COMMON_BLOCK_CPU_MPINIT)
Ravi Sarawadi9d903a12016-03-04 21:33:04 -0800171static void read_cpu_topology(unsigned int *num_phys, unsigned int *num_virt)
172{
173 msr_t msr;
174 msr = rdmsr(MSR_CORE_THREAD_COUNT);
175 *num_virt = (msr.lo >> 0) & 0xffff;
176 *num_phys = (msr.lo >> 16) & 0xffff;
177}
178
Aaron Durbine72b9d42016-05-03 15:56:24 -0500179/* Find CPU topology */
Barnali Sarkar1e6b9802017-08-07 18:26:31 +0530180int get_cpu_count(void)
Aaron Durbine72b9d42016-05-03 15:56:24 -0500181{
182 unsigned int num_virt_cores, num_phys_cores;
183
184 read_cpu_topology(&num_phys_cores, &num_virt_cores);
185
186 printk(BIOS_DEBUG, "Detected %u core, %u thread CPU.\n",
187 num_phys_cores, num_virt_cores);
188
189 return num_virt_cores;
190}
191
Barnali Sarkar1e6b9802017-08-07 18:26:31 +0530192void get_microcode_info(const void **microcode, int *parallel)
John Zhao31569342016-08-23 16:38:05 -0700193{
194 *microcode = intel_microcode_find();
195 *parallel = 1;
Barnali Sarkar97daf982017-06-07 13:47:51 +0530196
197 /* Make sure BSP is using the microcode from cbfs */
198 intel_microcode_load_unlocked(*microcode);
John Zhao31569342016-08-23 16:38:05 -0700199}
Barnali Sarkar1e6b9802017-08-07 18:26:31 +0530200#endif
John Zhao31569342016-08-23 16:38:05 -0700201
Hannah Williamsd9c84ca2016-05-13 00:47:14 -0700202static void get_smm_info(uintptr_t *perm_smbase, size_t *perm_smsize,
203 size_t *smm_save_state_size)
204{
Kyösti Mälkki14222d82019-08-05 15:10:18 +0300205 uintptr_t smm_base;
Hannah Williamsd9c84ca2016-05-13 00:47:14 -0700206 size_t smm_size;
Kyösti Mälkki14222d82019-08-05 15:10:18 +0300207 uintptr_t handler_base;
Brandon Breitenstein135eae92016-09-30 13:57:12 -0700208 size_t handler_size;
Hannah Williamsd9c84ca2016-05-13 00:47:14 -0700209
210 /* All range registers are aligned to 4KiB */
211 const uint32_t rmask = ~((1 << 12) - 1);
212
213 /* Initialize global tracking state. */
Kyösti Mälkkidc6c3222019-08-04 20:17:28 +0300214 smm_region(&smm_base, &smm_size);
Brandon Breitenstein135eae92016-09-30 13:57:12 -0700215 smm_subregion(SMM_SUBREGION_HANDLER, &handler_base, &handler_size);
216
Kyösti Mälkki14222d82019-08-05 15:10:18 +0300217 relo_attrs.smbase = smm_base;
Hannah Williamsd9c84ca2016-05-13 00:47:14 -0700218 relo_attrs.smrr_base = relo_attrs.smbase | MTRR_TYPE_WRBACK;
219 relo_attrs.smrr_mask = ~(smm_size - 1) & rmask;
220 relo_attrs.smrr_mask |= MTRR_PHYS_MASK_VALID;
221
Kyösti Mälkki14222d82019-08-05 15:10:18 +0300222 *perm_smbase = handler_base;
Brandon Breitenstein135eae92016-09-30 13:57:12 -0700223 *perm_smsize = handler_size;
Hannah Williamsd9c84ca2016-05-13 00:47:14 -0700224 *smm_save_state_size = sizeof(em64t100_smm_state_save_area_t);
225}
226
227static void relocation_handler(int cpu, uintptr_t curr_smbase,
228 uintptr_t staggered_smbase)
229{
230 msr_t smrr;
231 em64t100_smm_state_save_area_t *smm_state;
232 /* Set up SMRR. */
233 smrr.lo = relo_attrs.smrr_base;
234 smrr.hi = 0;
Arthur Heymanse750b38e2018-07-20 23:31:59 +0200235 wrmsr(IA32_SMRR_PHYS_BASE, smrr);
Hannah Williamsd9c84ca2016-05-13 00:47:14 -0700236 smrr.lo = relo_attrs.smrr_mask;
237 smrr.hi = 0;
Arthur Heymanse750b38e2018-07-20 23:31:59 +0200238 wrmsr(IA32_SMRR_PHYS_MASK, smrr);
Hannah Williamsd9c84ca2016-05-13 00:47:14 -0700239 smm_state = (void *)(SMM_EM64T100_SAVE_STATE_OFFSET + curr_smbase);
240 smm_state->smbase = staggered_smbase;
241}
Ravi Sarawadi9d903a12016-03-04 21:33:04 -0800242/*
243 * CPU initialization recipe
244 *
245 * Note that no microcode update is passed to the init function. CSE updates
246 * the microcode on all cores before releasing them from reset. That means that
247 * the BSP and all APs will come up with the same microcode revision.
248 */
Pratik Prajapatidc194e22017-08-29 14:27:07 -0700249
250static void post_mp_init(void)
251{
Kyösti Mälkki040c5312020-05-31 20:03:11 +0300252 global_smi_enable();
Pratik Prajapatidc194e22017-08-29 14:27:07 -0700253
Michael Niewöhner7736bfc2019-10-22 23:05:06 +0200254 if (CONFIG(SOC_INTEL_COMMON_BLOCK_SGX_ENABLE))
Patrick Rudolph5ec97ce2019-07-26 14:47:32 +0200255 mp_run_on_all_cpus(sgx_configure, NULL);
Pratik Prajapatidc194e22017-08-29 14:27:07 -0700256}
257
Aaron Durbine72b9d42016-05-03 15:56:24 -0500258static const struct mp_ops mp_ops = {
259 .pre_mp_init = pre_mp_init,
260 .get_cpu_count = get_cpu_count,
Hannah Williamsd9c84ca2016-05-13 00:47:14 -0700261 .get_smm_info = get_smm_info,
John Zhao31569342016-08-23 16:38:05 -0700262 .get_microcode_info = get_microcode_info,
Brandon Breitensteina86d1b82017-06-08 17:32:02 -0700263 .pre_mp_smm_init = smm_southbridge_clear_state,
Hannah Williamsd9c84ca2016-05-13 00:47:14 -0700264 .relocation_handler = relocation_handler,
Pratik Prajapatidc194e22017-08-29 14:27:07 -0700265 .post_mp_init = post_mp_init,
Ravi Sarawadi9d903a12016-03-04 21:33:04 -0800266};
267
Arthur Heymans829e8e62023-01-30 19:09:34 +0100268void mp_init_cpus(struct bus *cpu_bus)
Ravi Sarawadi9d903a12016-03-04 21:33:04 -0800269{
Ravi Sarawadi9d903a12016-03-04 21:33:04 -0800270 /* Clear for take-off */
Felix Held4dd7d112021-10-20 23:31:43 +0200271 /* TODO: Handle mp_init_with_smm failure? */
272 mp_init_with_smm(cpu_bus, &mp_ops);
Aaron Durbinbf696f52016-11-10 20:04:19 -0600273
Arthur Heymans94ab3a82023-03-20 23:00:36 +0100274 /* MTRR setup happens later, so we're done here. */
275 if (CONFIG(SOC_INTEL_COMMON_BLOCK_CPU_MPINIT))
276 return;
277
Aaron Durbinbf696f52016-11-10 20:04:19 -0600278 /* Temporarily cache the memory-mapped boot media. */
Julius Wernercd49cce2019-03-05 16:53:33 -0800279 if (CONFIG(BOOT_DEVICE_MEMORY_MAPPED) &&
280 CONFIG(BOOT_DEVICE_SPI_FLASH))
Aaron Durbinefc92a82017-06-08 10:54:59 -0500281 fast_spi_cache_bios_region();
Ravi Sarawadi9d903a12016-03-04 21:33:04 -0800282}
Matt DeVillier4f0b2e02024-06-10 21:54:26 -0500283
284#if CONFIG(SOC_INTEL_GEMINILAKE)
285int soc_skip_ucode_update(u32 current_patch_id, u32 new_patch_id)
286{
287 /*
288 * If PRMRR/SGX is supported the FIT microcode load will set the msr
289 * 0x08b with the Patch revision id one less than the id in the
290 * microcode binary. The PRMRR support is indicated in the MSR
291 * MTRRCAP[12]. If SGX is not enabled, check and avoid reloading the
292 * same microcode during CPU initialization. If SGX is enabled, as
293 * part of SGX BIOS initialization steps, the same microcode needs to
294 * be reloaded after the core PRMRR MSRs are programmed.
295 */
296 const msr_t mtrr_cap = rdmsr(MTRR_CAP_MSR);
297 if (mtrr_cap.lo & MTRR_CAP_PRMRR) {
298 const msr_t prmrr_phys_base = rdmsr(MSR_PRMRR_PHYS_BASE);
299 if (prmrr_phys_base.raw) {
300 return 0;
301 }
302 }
303 return current_patch_id == new_patch_id - 1;
304}
305#endif