blob: dba19235223db7d0d739678b44f197950b595cee [file] [log] [blame]
Patrick Georgiac959032020-05-05 22:49:26 +02001/* SPDX-License-Identifier: GPL-2.0-or-later */
Ravi Sarawadi9d903a12016-03-04 21:33:04 -08002
Furquan Shaikh76cedd22020-05-02 10:24:23 -07003#include <acpi/acpi.h>
Pratik Prajapatidc194e22017-08-29 14:27:07 -07004#include <assert.h>
Ravi Sarawadi9d903a12016-03-04 21:33:04 -08005#include <console/console.h>
Pratik Prajapatidc194e22017-08-29 14:27:07 -07006#include "chip.h"
Ravi Sarawadi9d903a12016-03-04 21:33:04 -08007#include <cpu/cpu.h>
Ravi Sarawadi9d903a12016-03-04 21:33:04 -08008#include <cpu/x86/mp.h>
John Zhao31569342016-08-23 16:38:05 -07009#include <cpu/intel/microcode.h>
Barnali Sarkar1e6b9802017-08-07 18:26:31 +053010#include <cpu/intel/turbo.h>
Michael Niewöhner63032432020-10-11 17:34:54 +020011#include <cpu/intel/common/common.h>
Ravi Sarawadi9d903a12016-03-04 21:33:04 -080012#include <cpu/x86/msr.h>
13#include <cpu/x86/mtrr.h>
Kyösti Mälkkib2a5f0b2019-08-04 19:54:32 +030014#include <cpu/x86/smm.h>
Kyösti Mälkkie31ec292019-08-10 17:27:01 +030015#include <cpu/intel/em64t100_save_state.h>
Kyösti Mälkkifaf20d32019-08-14 05:41:41 +030016#include <cpu/intel/smm_reloc.h>
Ravi Sarawadi9d903a12016-03-04 21:33:04 -080017#include <device/device.h>
18#include <device/pci.h>
Barnali Sarkar1e6b9802017-08-07 18:26:31 +053019#include <fsp/api.h>
Barnali Sarkar66fe0c42017-05-23 18:17:14 +053020#include <intelblocks/cpulib.h>
Aaron Durbinefc92a82017-06-08 10:54:59 -050021#include <intelblocks/fast_spi.h>
Barnali Sarkar1e6b9802017-08-07 18:26:31 +053022#include <intelblocks/mp_init.h>
Barnali Sarkar66fe0c42017-05-23 18:17:14 +053023#include <intelblocks/msr.h>
Pratik Prajapatidc194e22017-08-29 14:27:07 -070024#include <intelblocks/sgx.h>
Ravi Sarawadiec7293652016-09-09 14:08:50 -070025#include <reg_script.h>
Ravi Sarawadi9d903a12016-03-04 21:33:04 -080026#include <soc/cpu.h>
Ravi Sarawadiec7293652016-09-09 14:08:50 -070027#include <soc/iomap.h>
Pratik Prajapatidc194e22017-08-29 14:27:07 -070028#include <soc/pci_devs.h>
Andrey Petrov3b637532016-11-30 17:39:16 -080029#include <soc/pm.h>
Felix Heldd27ef5b2021-10-20 20:18:12 +020030#include <types.h>
Ravi Sarawadi9d903a12016-03-04 21:33:04 -080031
Ravi Sarawadiec7293652016-09-09 14:08:50 -070032static const struct reg_script core_msr_script[] = {
Angel Ponsb36100f2020-09-07 13:18:10 +020033#if !CONFIG(SOC_INTEL_GEMINILAKE)
Ravi Sarawadiec7293652016-09-09 14:08:50 -070034 /* Enable C-state and IO/MWAIT redirect */
Elyes HAOUAS4e6b7902018-10-02 08:44:47 +020035 REG_MSR_WRITE(MSR_PKG_CST_CONFIG_CONTROL,
Ravi Sarawadiec7293652016-09-09 14:08:50 -070036 (PKG_C_STATE_LIMIT_C2_MASK | CORE_C_STATE_LIMIT_C10_MASK
37 | IO_MWAIT_REDIRECT_MASK | CST_CFG_LOCK_MASK)),
38 /* Power Management I/O base address for I/O trapping to C-states */
39 REG_MSR_WRITE(MSR_PMG_IO_CAPTURE_BASE,
40 (ACPI_PMIO_CST_REG | (PMG_IO_BASE_CST_RNG_BLK_SIZE << 16))),
Venkateswarlu Vinjamuri362180a2016-10-31 17:03:55 -070041 /* Disable support for MONITOR and MWAIT instructions */
Elyes HAOUAS419bfbc2018-10-01 08:47:51 +020042 REG_MSR_RMW(IA32_MISC_ENABLE, ~MONITOR_MWAIT_DIS_MASK, 0),
Cole Nelsonf357c252017-05-16 11:38:59 -070043#endif
Cole Nelson9d0950f2018-06-12 10:02:49 -070044 /* Disable C1E */
45 REG_MSR_RMW(MSR_POWER_CTL, ~POWER_CTL_C1E_MASK, 0),
Ravi Sarawadiec7293652016-09-09 14:08:50 -070046 REG_SCRIPT_END
47};
48
Subrata Banik56ab8e22022-01-07 13:40:19 +000049bool cpu_soc_is_in_untrusted_mode(void)
50{
51 msr_t msr;
52
53 msr = rdmsr(MSR_POWER_MISC);
54 return !!(msr.lo & ENABLE_IA_UNTRUSTED);
55}
56
Subrata Banik37a55d12022-05-30 18:11:12 +000057void cpu_soc_bios_done(void)
58{
59 msr_t msr;
60
61 msr = rdmsr(MSR_POWER_MISC);
62 msr.lo |= ENABLE_IA_UNTRUSTED;
63 wrmsr(MSR_POWER_MISC, msr);
64}
65
Elyes HAOUAS06e83152018-05-24 22:48:14 +020066void soc_core_init(struct device *cpu)
Ravi Sarawadiec7293652016-09-09 14:08:50 -070067{
Patrick Rudolphfc36e9f2021-01-25 10:46:16 +010068 /* Configure Core PRMRR for SGX. */
69 if (CONFIG(SOC_INTEL_COMMON_BLOCK_SGX_ENABLE))
70 prmrr_core_configure();
71
Pratik Prajapatidc194e22017-08-29 14:27:07 -070072 /* Clear out pending MCEs */
Nico Huberaa4d9b92019-02-01 14:20:37 +010073 /* TODO(adurbin): Some of these banks are core vs package
74 scope. For now every CPU clears every bank. */
Michael Niewöhner7736bfc2019-10-22 23:05:06 +020075 if (CONFIG(SOC_INTEL_COMMON_BLOCK_SGX_ENABLE) || acpi_get_sleep_type() == ACPI_S5)
Subrata Banikf91344c2019-05-06 19:23:26 +053076 mca_configure();
Pratik Prajapatidc194e22017-08-29 14:27:07 -070077
Ravi Sarawadiec7293652016-09-09 14:08:50 -070078 /* Set core MSRs */
79 reg_script_run(core_msr_script);
Michael Niewöhner63032432020-10-11 17:34:54 +020080
81 set_aesni_lock();
82
Andrey Petrov3b637532016-11-30 17:39:16 -080083 /*
84 * Enable ACPI PM timer emulation, which also lets microcode know
Barnali Sarkar9e55ff62017-06-05 20:01:14 +053085 * location of ACPI_BASE_ADDRESS. This also enables other features
Andrey Petrov3b637532016-11-30 17:39:16 -080086 * implemented in microcode.
87 */
88 enable_pm_timer_emulation();
Pratik Prajapatidc194e22017-08-29 14:27:07 -070089
Mario Scheithauerd0e51332017-10-24 16:57:26 +020090 /* Set Max Non-Turbo ratio if RAPL is disabled. */
Uwe Poeche539fd2a2022-03-28 12:39:01 +020091 if (CONFIG(SOC_INTEL_DISABLE_POWER_LIMITS)) {
Mario Scheithauerd0e51332017-10-24 16:57:26 +020092 cpu_set_p_state_to_max_non_turbo_ratio();
Subrata Banik6d569162019-04-10 12:19:27 +053093 /* Disable speed step */
94 cpu_set_eist(false);
Uwe Poeche539fd2a2022-03-28 12:39:01 +020095 } else if (CONFIG(SOC_INTEL_SET_MIN_CLOCK_RATIO)) {
Werner Zeh26361862018-11-21 12:36:21 +010096 cpu_set_p_state_to_min_clock_ratio();
Subrata Banik6d569162019-04-10 12:19:27 +053097 /* Disable speed step */
98 cpu_set_eist(false);
Mario Scheithauerd0e51332017-10-24 16:57:26 +020099 }
Ravi Sarawadiec7293652016-09-09 14:08:50 -0700100}
101
Julius Wernercd49cce2019-03-05 16:53:33 -0800102#if !CONFIG(SOC_INTEL_COMMON_BLOCK_CPU_MPINIT)
Elyes HAOUAS06e83152018-05-24 22:48:14 +0200103static void soc_init_core(struct device *cpu)
Barnali Sarkar1e6b9802017-08-07 18:26:31 +0530104{
Pratik Prajapati9cd6a262017-08-14 13:57:46 -0700105 soc_core_init(cpu);
Barnali Sarkar1e6b9802017-08-07 18:26:31 +0530106}
107
Ravi Sarawadi9d903a12016-03-04 21:33:04 -0800108static struct device_operations cpu_dev_ops = {
Barnali Sarkar1e6b9802017-08-07 18:26:31 +0530109 .init = soc_init_core,
Ravi Sarawadi9d903a12016-03-04 21:33:04 -0800110};
111
Jonathan Neuschäfer8f06ce32017-11-20 01:56:44 +0100112static const struct cpu_device_id cpu_table[] = {
Ravi Sarawadi9d903a12016-03-04 21:33:04 -0800113 { X86_VENDOR_INTEL, CPUID_APOLLOLAKE_A0 },
114 { X86_VENDOR_INTEL, CPUID_APOLLOLAKE_B0 },
Mario Scheithauer545593d2017-10-24 17:41:19 +0200115 { X86_VENDOR_INTEL, CPUID_APOLLOLAKE_E0 },
Hannah Williams3ff14a02017-05-05 16:30:22 -0700116 { X86_VENDOR_INTEL, CPUID_GLK_A0 },
117 { X86_VENDOR_INTEL, CPUID_GLK_B0 },
John Zhao7528f832019-05-10 10:51:52 -0700118 { X86_VENDOR_INTEL, CPUID_GLK_R0 },
Ravi Sarawadi9d903a12016-03-04 21:33:04 -0800119 { 0, 0 },
120};
121
122static const struct cpu_driver driver __cpu_driver = {
123 .ops = &cpu_dev_ops,
124 .id_table = cpu_table,
125};
Barnali Sarkar1e6b9802017-08-07 18:26:31 +0530126#endif
Ravi Sarawadi9d903a12016-03-04 21:33:04 -0800127
Hannah Williamsd9c84ca2016-05-13 00:47:14 -0700128/*
129 * MP and SMM loading initialization.
130 */
131struct smm_relocation_attrs {
132 uint32_t smbase;
133 uint32_t smrr_base;
134 uint32_t smrr_mask;
135};
136
137static struct smm_relocation_attrs relo_attrs;
138
Barnali Sarkar1e6b9802017-08-07 18:26:31 +0530139/*
140 * Do essential initialization tasks before APs can be fired up.
141 *
Julius Wernercd49cce2019-03-05 16:53:33 -0800142 * IF (CONFIG(SOC_INTEL_COMMON_BLOCK_CPU_MPINIT)) -
Barnali Sarkar1e6b9802017-08-07 18:26:31 +0530143 * Skip Pre MP init MTRR programming, as MTRRs are mirrored from BSP,
144 * that are set prior to ramstage.
145 * Real MTRRs are programmed after resource allocation.
146 *
147 * Do FSP loading before MP Init to ensure that the FSP component stored in
148 * external stage cache in TSEG does not flush off due to SMM relocation
149 * during MP Init stage.
150 *
151 * ELSE -
152 * Enable MTRRs on the BSP. This creates the MTRR solution that the
153 * APs will use. Otherwise APs will try to apply the incomplete solution
154 * as the BSP is calculating it.
155 */
156static void pre_mp_init(void)
157{
Julius Wernercd49cce2019-03-05 16:53:33 -0800158 if (CONFIG(SOC_INTEL_COMMON_BLOCK_CPU_MPINIT)) {
Kyösti Mälkkicc93c6e2021-01-09 22:53:52 +0200159 fsps_load();
Barnali Sarkar1e6b9802017-08-07 18:26:31 +0530160 return;
161 }
162 x86_setup_mtrrs_with_detect();
163 x86_mtrr_check();
Mario Scheithauere4cb23c2019-03-07 09:48:33 +0100164
Barnali Sarkar1e6b9802017-08-07 18:26:31 +0530165}
166
Julius Wernercd49cce2019-03-05 16:53:33 -0800167#if !CONFIG(SOC_INTEL_COMMON_BLOCK_CPU_MPINIT)
Ravi Sarawadi9d903a12016-03-04 21:33:04 -0800168static void read_cpu_topology(unsigned int *num_phys, unsigned int *num_virt)
169{
170 msr_t msr;
171 msr = rdmsr(MSR_CORE_THREAD_COUNT);
172 *num_virt = (msr.lo >> 0) & 0xffff;
173 *num_phys = (msr.lo >> 16) & 0xffff;
174}
175
Aaron Durbine72b9d42016-05-03 15:56:24 -0500176/* Find CPU topology */
Barnali Sarkar1e6b9802017-08-07 18:26:31 +0530177int get_cpu_count(void)
Aaron Durbine72b9d42016-05-03 15:56:24 -0500178{
179 unsigned int num_virt_cores, num_phys_cores;
180
181 read_cpu_topology(&num_phys_cores, &num_virt_cores);
182
183 printk(BIOS_DEBUG, "Detected %u core, %u thread CPU.\n",
184 num_phys_cores, num_virt_cores);
185
186 return num_virt_cores;
187}
188
Barnali Sarkar1e6b9802017-08-07 18:26:31 +0530189void get_microcode_info(const void **microcode, int *parallel)
John Zhao31569342016-08-23 16:38:05 -0700190{
191 *microcode = intel_microcode_find();
192 *parallel = 1;
Barnali Sarkar97daf982017-06-07 13:47:51 +0530193
194 /* Make sure BSP is using the microcode from cbfs */
195 intel_microcode_load_unlocked(*microcode);
John Zhao31569342016-08-23 16:38:05 -0700196}
Barnali Sarkar1e6b9802017-08-07 18:26:31 +0530197#endif
John Zhao31569342016-08-23 16:38:05 -0700198
Hannah Williamsd9c84ca2016-05-13 00:47:14 -0700199static void get_smm_info(uintptr_t *perm_smbase, size_t *perm_smsize,
200 size_t *smm_save_state_size)
201{
Kyösti Mälkki14222d82019-08-05 15:10:18 +0300202 uintptr_t smm_base;
Hannah Williamsd9c84ca2016-05-13 00:47:14 -0700203 size_t smm_size;
Kyösti Mälkki14222d82019-08-05 15:10:18 +0300204 uintptr_t handler_base;
Brandon Breitenstein135eae92016-09-30 13:57:12 -0700205 size_t handler_size;
Hannah Williamsd9c84ca2016-05-13 00:47:14 -0700206
207 /* All range registers are aligned to 4KiB */
208 const uint32_t rmask = ~((1 << 12) - 1);
209
210 /* Initialize global tracking state. */
Kyösti Mälkkidc6c3222019-08-04 20:17:28 +0300211 smm_region(&smm_base, &smm_size);
Brandon Breitenstein135eae92016-09-30 13:57:12 -0700212 smm_subregion(SMM_SUBREGION_HANDLER, &handler_base, &handler_size);
213
Kyösti Mälkki14222d82019-08-05 15:10:18 +0300214 relo_attrs.smbase = smm_base;
Hannah Williamsd9c84ca2016-05-13 00:47:14 -0700215 relo_attrs.smrr_base = relo_attrs.smbase | MTRR_TYPE_WRBACK;
216 relo_attrs.smrr_mask = ~(smm_size - 1) & rmask;
217 relo_attrs.smrr_mask |= MTRR_PHYS_MASK_VALID;
218
Kyösti Mälkki14222d82019-08-05 15:10:18 +0300219 *perm_smbase = handler_base;
Brandon Breitenstein135eae92016-09-30 13:57:12 -0700220 *perm_smsize = handler_size;
Hannah Williamsd9c84ca2016-05-13 00:47:14 -0700221 *smm_save_state_size = sizeof(em64t100_smm_state_save_area_t);
222}
223
224static void relocation_handler(int cpu, uintptr_t curr_smbase,
225 uintptr_t staggered_smbase)
226{
227 msr_t smrr;
228 em64t100_smm_state_save_area_t *smm_state;
229 /* Set up SMRR. */
230 smrr.lo = relo_attrs.smrr_base;
231 smrr.hi = 0;
Arthur Heymanse750b38e2018-07-20 23:31:59 +0200232 wrmsr(IA32_SMRR_PHYS_BASE, smrr);
Hannah Williamsd9c84ca2016-05-13 00:47:14 -0700233 smrr.lo = relo_attrs.smrr_mask;
234 smrr.hi = 0;
Arthur Heymanse750b38e2018-07-20 23:31:59 +0200235 wrmsr(IA32_SMRR_PHYS_MASK, smrr);
Hannah Williamsd9c84ca2016-05-13 00:47:14 -0700236 smm_state = (void *)(SMM_EM64T100_SAVE_STATE_OFFSET + curr_smbase);
237 smm_state->smbase = staggered_smbase;
238}
Ravi Sarawadi9d903a12016-03-04 21:33:04 -0800239/*
240 * CPU initialization recipe
241 *
242 * Note that no microcode update is passed to the init function. CSE updates
243 * the microcode on all cores before releasing them from reset. That means that
244 * the BSP and all APs will come up with the same microcode revision.
245 */
Pratik Prajapatidc194e22017-08-29 14:27:07 -0700246
247static void post_mp_init(void)
248{
Kyösti Mälkki040c5312020-05-31 20:03:11 +0300249 global_smi_enable();
Pratik Prajapatidc194e22017-08-29 14:27:07 -0700250
Michael Niewöhner7736bfc2019-10-22 23:05:06 +0200251 if (CONFIG(SOC_INTEL_COMMON_BLOCK_SGX_ENABLE))
Patrick Rudolph5ec97ce2019-07-26 14:47:32 +0200252 mp_run_on_all_cpus(sgx_configure, NULL);
Pratik Prajapatidc194e22017-08-29 14:27:07 -0700253}
254
Aaron Durbine72b9d42016-05-03 15:56:24 -0500255static const struct mp_ops mp_ops = {
256 .pre_mp_init = pre_mp_init,
257 .get_cpu_count = get_cpu_count,
Hannah Williamsd9c84ca2016-05-13 00:47:14 -0700258 .get_smm_info = get_smm_info,
John Zhao31569342016-08-23 16:38:05 -0700259 .get_microcode_info = get_microcode_info,
Brandon Breitensteina86d1b82017-06-08 17:32:02 -0700260 .pre_mp_smm_init = smm_southbridge_clear_state,
Hannah Williamsd9c84ca2016-05-13 00:47:14 -0700261 .relocation_handler = relocation_handler,
Pratik Prajapatidc194e22017-08-29 14:27:07 -0700262 .post_mp_init = post_mp_init,
Ravi Sarawadi9d903a12016-03-04 21:33:04 -0800263};
264
Pratik Prajapati9cd6a262017-08-14 13:57:46 -0700265void soc_init_cpus(struct bus *cpu_bus)
Ravi Sarawadi9d903a12016-03-04 21:33:04 -0800266{
Ravi Sarawadi9d903a12016-03-04 21:33:04 -0800267 /* Clear for take-off */
Felix Held4dd7d112021-10-20 23:31:43 +0200268 /* TODO: Handle mp_init_with_smm failure? */
269 mp_init_with_smm(cpu_bus, &mp_ops);
Barnali Sarkar1e6b9802017-08-07 18:26:31 +0530270}
271
272void apollolake_init_cpus(struct device *dev)
273{
Julius Wernercd49cce2019-03-05 16:53:33 -0800274 if (CONFIG(SOC_INTEL_COMMON_BLOCK_CPU_MPINIT))
Barnali Sarkar1e6b9802017-08-07 18:26:31 +0530275 return;
Arthur Heymans6c88e6e2023-01-30 13:51:02 +0100276 if (!dev->link_list)
277 add_more_links(dev, 1);
Pratik Prajapati9cd6a262017-08-14 13:57:46 -0700278 soc_init_cpus(dev->link_list);
Aaron Durbinbf696f52016-11-10 20:04:19 -0600279
280 /* Temporarily cache the memory-mapped boot media. */
Julius Wernercd49cce2019-03-05 16:53:33 -0800281 if (CONFIG(BOOT_DEVICE_MEMORY_MAPPED) &&
282 CONFIG(BOOT_DEVICE_SPI_FLASH))
Aaron Durbinefc92a82017-06-08 10:54:59 -0500283 fast_spi_cache_bios_region();
Ravi Sarawadi9d903a12016-03-04 21:33:04 -0800284}