blob: 8491a3522c225a672711f348762aee6933332ff4 [file] [log] [blame]
Patrick Georgiac959032020-05-05 22:49:26 +02001/* SPDX-License-Identifier: GPL-2.0-or-later */
Mariusz Szafranskia4041332017-08-02 17:28:17 +02002
3#include <console/console.h>
4#include <cpu/cpu.h>
Julien Viard de Galbert69c57e12018-03-07 14:19:03 +01005#include <cpu/x86/cr.h>
Dmitry Ponamorevccc27d22021-09-15 03:19:15 -07006#include <cpu/x86/lapic.h>
Mariusz Szafranskia4041332017-08-02 17:28:17 +02007#include <cpu/x86/mp.h>
8#include <cpu/x86/msr.h>
9#include <cpu/x86/mtrr.h>
Kyösti Mälkkib2a5f0b2019-08-04 19:54:32 +030010#include <cpu/x86/smm.h>
Kyösti Mälkkifaf20d32019-08-14 05:41:41 +030011#include <cpu/intel/smm_reloc.h>
Kyösti Mälkkie31ec292019-08-10 17:27:01 +030012#include <cpu/intel/em64t100_save_state.h>
Mariusz Szafranskia4041332017-08-02 17:28:17 +020013#include <cpu/intel/turbo.h>
Michael Niewöhner63032432020-10-11 17:34:54 +020014#include <cpu/intel/common/common.h>
Mariusz Szafranskia4041332017-08-02 17:28:17 +020015#include <device/device.h>
16#include <device/pci.h>
Julien Viard de Galbert15b570b2018-03-29 14:35:52 +020017#include <intelblocks/cpulib.h>
Angel Ponsf5dfe242021-11-03 15:50:06 +010018#include <lib.h>
Mariusz Szafranskia4041332017-08-02 17:28:17 +020019#include <soc/msr.h>
20#include <soc/cpu.h>
21#include <soc/iomap.h>
22#include <soc/smm.h>
23#include <soc/soc_util.h>
Felix Heldd27ef5b2021-10-20 20:18:12 +020024#include <types.h>
Mariusz Szafranskia4041332017-08-02 17:28:17 +020025
Subrata Banik56ab8e22022-01-07 13:40:19 +000026bool cpu_soc_is_in_untrusted_mode(void)
27{
28 msr_t msr;
29
30 msr = rdmsr(MSR_POWER_MISC);
31 return !!(msr.lo & ENABLE_IA_UNTRUSTED);
32}
33
Subrata Banik37a55d12022-05-30 18:11:12 +000034void cpu_soc_bios_done(void)
35{
36 msr_t msr;
37
38 msr = rdmsr(MSR_POWER_MISC);
39 msr.lo |= ENABLE_IA_UNTRUSTED;
40 wrmsr(MSR_POWER_MISC, msr);
41}
42
Mariusz Szafranskia4041332017-08-02 17:28:17 +020043static struct smm_relocation_attrs relo_attrs;
44
Julien Viard de Galbert053ea602018-03-07 14:18:49 +010045static void dnv_configure_mca(void)
46{
47 msr_t msr;
Julien Viard de Galbert053ea602018-03-07 14:18:49 +010048 struct cpuid_result cpuid_regs;
49
50 /* Check feature flag in CPUID.(EAX=1):EDX[7]==1 MCE
51 * and CPUID.(EAX=1):EDX[14]==1 MCA*/
52 cpuid_regs = cpuid(1);
53 if ((cpuid_regs.edx & (1<<7 | 1<<14)) != (1<<7 | 1<<14))
54 return;
55
56 msr = rdmsr(IA32_MCG_CAP);
Julien Viard de Galbert053ea602018-03-07 14:18:49 +010057 if (msr.lo & IA32_MCG_CAP_CTL_P_MASK) {
58 /* Enable all error logging */
59 msr.lo = msr.hi = 0xffffffff;
60 wrmsr(IA32_MCG_CTL, msr);
61 }
62
63 /* TODO(adurbin): This should only be done on a cold boot. Also, some
64 of these banks are core vs package scope. For now every CPU clears
65 every bank. */
Subrata Banikf91344c2019-05-06 19:23:26 +053066 mca_configure();
Julien Viard de Galbert69c57e12018-03-07 14:19:03 +010067
68 /* TODO install a fallback MC handler for each core in case OS does
69 not provide one. Is it really needed? */
70
71 /* Enable the machine check exception */
72 write_cr4(read_cr4() | CR4_MCE);
Julien Viard de Galbert053ea602018-03-07 14:18:49 +010073}
74
Julien Viard de Galbert30651572018-03-08 16:26:41 +010075static void configure_thermal_core(void)
76{
77 msr_t msr;
78
79 /* Disable Thermal interrupts */
80 msr.lo = 0;
81 msr.hi = 0;
82 wrmsr(IA32_THERM_INTERRUPT, msr);
83 wrmsr(IA32_PACKAGE_THERM_INTERRUPT, msr);
84
85 msr = rdmsr(IA32_MISC_ENABLE);
86 msr.lo |= THERMAL_MONITOR_ENABLE_BIT; /* TM1/TM2/EMTTM enable */
87 wrmsr(IA32_MISC_ENABLE, msr);
88}
89
Elyes HAOUAS2ec41832018-05-27 17:40:58 +020090static void denverton_core_init(struct device *cpu)
Mariusz Szafranskia4041332017-08-02 17:28:17 +020091{
92 msr_t msr;
93
94 printk(BIOS_DEBUG, "Init Denverton-NS SoC cores.\n");
95
Julien Viard de Galbert053ea602018-03-07 14:18:49 +010096 /* Clear out pending MCEs */
97 dnv_configure_mca();
98
Julien Viard de Galbert30651572018-03-08 16:26:41 +010099 /* Configure Thermal Sensors */
100 configure_thermal_core();
101
Julien Viard de Galbert2f66c702018-03-29 14:43:37 +0200102 /* Enable Fast Strings */
103 msr = rdmsr(IA32_MISC_ENABLE);
104 msr.lo |= FAST_STRINGS_ENABLE_BIT;
105 wrmsr(IA32_MISC_ENABLE, msr);
106
Michael Niewöhner63032432020-10-11 17:34:54 +0200107 set_aesni_lock();
Julien Viard de Galbert4130eb52018-03-08 16:57:47 +0100108
Mariusz Szafranskia4041332017-08-02 17:28:17 +0200109 /* Enable Turbo */
110 enable_turbo();
111
Dmitry Ponamorevfba1475f2021-09-15 03:28:52 -0700112 /* Enable speed step. Always ON.*/
113 msr = rdmsr(IA32_MISC_ENABLE);
114 msr.lo |= SPEED_STEP_ENABLE_BIT;
115 wrmsr(IA32_MISC_ENABLE, msr);
Michael Niewöhner9c19bf02021-09-26 14:23:12 +0200116
117 enable_pm_timer_emulation();
Mariusz Szafranskia4041332017-08-02 17:28:17 +0200118}
119
120static struct device_operations cpu_dev_ops = {
121 .init = denverton_core_init,
122};
123
Jonathan Neuschäfer8f06ce32017-11-20 01:56:44 +0100124static const struct cpu_device_id cpu_table[] = {
Felix Held6a6ac1e2023-02-06 15:19:11 +0100125 /* Denverton-NS A0/A1 CPUID */
126 {X86_VENDOR_INTEL, CPUID_DENVERTON_A0_A1, CPUID_EXACT_MATCH_MASK },
127 /* Denverton-NS B0 CPUID */
128 {X86_VENDOR_INTEL, CPUID_DENVERTON_B0, CPUID_EXACT_MATCH_MASK },
Felix Held1e781652023-02-08 11:39:16 +0100129 CPU_TABLE_END
Mariusz Szafranskia4041332017-08-02 17:28:17 +0200130};
131
132static const struct cpu_driver driver __cpu_driver = {
133 .ops = &cpu_dev_ops,
134 .id_table = cpu_table,
135};
136
137/*
138 * MP and SMM loading initialization.
139 */
140
141static void relocation_handler(int cpu, uintptr_t curr_smbase,
142 uintptr_t staggered_smbase)
143{
144 msr_t smrr;
145 em64t100_smm_state_save_area_t *smm_state;
146 (void)cpu;
147
148 /* Set up SMRR. */
149 smrr.lo = relo_attrs.smrr_base;
150 smrr.hi = 0;
Arthur Heymanse750b38e2018-07-20 23:31:59 +0200151 wrmsr(IA32_SMRR_PHYS_BASE, smrr);
Mariusz Szafranskia4041332017-08-02 17:28:17 +0200152 smrr.lo = relo_attrs.smrr_mask;
153 smrr.hi = 0;
Arthur Heymanse750b38e2018-07-20 23:31:59 +0200154 wrmsr(IA32_SMRR_PHYS_MASK, smrr);
Mariusz Szafranskia4041332017-08-02 17:28:17 +0200155 smm_state = (void *)(SMM_EM64T100_SAVE_STATE_OFFSET + curr_smbase);
156 smm_state->smbase = staggered_smbase;
157}
158
159static void get_smm_info(uintptr_t *perm_smbase, size_t *perm_smsize,
160 size_t *smm_save_state_size)
161{
Kyösti Mälkki14222d82019-08-05 15:10:18 +0300162 uintptr_t smm_base;
Mariusz Szafranskia4041332017-08-02 17:28:17 +0200163 size_t smm_size;
Kyösti Mälkki14222d82019-08-05 15:10:18 +0300164 uintptr_t handler_base;
Mariusz Szafranskia4041332017-08-02 17:28:17 +0200165 size_t handler_size;
166
167 /* All range registers are aligned to 4KiB */
168 const uint32_t rmask = ~((1 << 12) - 1);
169
170 /* Initialize global tracking state. */
171 smm_region(&smm_base, &smm_size);
172 smm_subregion(SMM_SUBREGION_HANDLER, &handler_base, &handler_size);
173
Kyösti Mälkki14222d82019-08-05 15:10:18 +0300174 relo_attrs.smbase = smm_base;
Mariusz Szafranskia4041332017-08-02 17:28:17 +0200175 relo_attrs.smrr_base = relo_attrs.smbase | MTRR_TYPE_WRBACK;
176 relo_attrs.smrr_mask = ~(smm_size - 1) & rmask;
177 relo_attrs.smrr_mask |= MTRR_PHYS_MASK_VALID;
178
Kyösti Mälkki14222d82019-08-05 15:10:18 +0300179 *perm_smbase = handler_base;
Mariusz Szafranskia4041332017-08-02 17:28:17 +0200180 *perm_smsize = handler_size;
181 *smm_save_state_size = sizeof(em64t100_smm_state_save_area_t);
182}
183
Angel Pons81beeae2021-11-03 16:03:45 +0100184static unsigned int detect_num_cpus_via_cpuid(void)
Mariusz Szafranskia4041332017-08-02 17:28:17 +0200185{
Angel Pons81beeae2021-11-03 16:03:45 +0100186 unsigned int ecx = 0;
Mariusz Szafranskia4041332017-08-02 17:28:17 +0200187
188 while (1) {
Angel Pons81beeae2021-11-03 16:03:45 +0100189 const struct cpuid_result leaf_b = cpuid_ext(0xb, ecx);
Mariusz Szafranskia4041332017-08-02 17:28:17 +0200190
191 /* Processor doesn't have hyperthreading so just determine the
Angel Pons81beeae2021-11-03 16:03:45 +0100192 number of cores from level type (ecx[15:8] == 2). */
193 if ((leaf_b.ecx >> 8 & 0xff) == 2)
194 return leaf_b.ebx & 0xffff;
195
Mariusz Szafranskia4041332017-08-02 17:28:17 +0200196 ecx++;
197 }
Mariusz Szafranskia4041332017-08-02 17:28:17 +0200198}
199
Angel Ponsf5dfe242021-11-03 15:50:06 +0100200/* Assumes that FSP has already programmed the cores disabled register */
201static unsigned int detect_num_cpus_via_mch(void)
Mariusz Szafranskia4041332017-08-02 17:28:17 +0200202{
Mariusz Szafranskia4041332017-08-02 17:28:17 +0200203 /* Get Masks for Total Existing SOC Cores and Core Disable Mask */
Angel Ponsf5dfe242021-11-03 15:50:06 +0100204 const u32 core_exists_mask = MMIO32(DEFAULT_MCHBAR + MCH_BAR_CORE_EXISTS_MASK);
205 const u32 core_disable_mask = MMIO32(DEFAULT_MCHBAR + MCH_BAR_CORE_DISABLE_MASK);
206 const u32 active_cores_mask = ~core_disable_mask & core_exists_mask;
Mariusz Szafranskia4041332017-08-02 17:28:17 +0200207
208 /* Calculate Number of Active Cores */
Angel Ponsf5dfe242021-11-03 15:50:06 +0100209 const unsigned int active_cores = popcnt(active_cores_mask);
210 const unsigned int total_cores = popcnt(core_exists_mask);
Mariusz Szafranskia4041332017-08-02 17:28:17 +0200211
Angel Ponsf5dfe242021-11-03 15:50:06 +0100212 printk(BIOS_DEBUG, "Number of Active Cores: %u of %u total.\n",
Mariusz Szafranskia4041332017-08-02 17:28:17 +0200213 active_cores, total_cores);
214
215 return active_cores;
216}
217
218/* Find CPU topology */
219int get_cpu_count(void)
220{
Angel Pons81beeae2021-11-03 16:03:45 +0100221 unsigned int num_cpus = detect_num_cpus_via_mch();
Mariusz Szafranskia4041332017-08-02 17:28:17 +0200222
Angel Pons81beeae2021-11-03 16:03:45 +0100223 if (num_cpus == 0 || num_cpus > CONFIG_MAX_CPUS) {
Mariusz Szafranskia4041332017-08-02 17:28:17 +0200224 num_cpus = detect_num_cpus_via_cpuid();
Angel Pons81beeae2021-11-03 16:03:45 +0100225 printk(BIOS_DEBUG, "Number of Cores (CPUID): %u.\n", num_cpus);
Mariusz Szafranskia4041332017-08-02 17:28:17 +0200226 }
227 return num_cpus;
228}
229
Mariusz Szafranskia4041332017-08-02 17:28:17 +0200230static void set_max_turbo_freq(void)
231{
232 msr_t msr, perf_ctl;
233
234 perf_ctl.hi = 0;
235
236 /* Check for configurable TDP option */
237 if (get_turbo_state() == TURBO_ENABLED) {
238 msr = rdmsr(MSR_TURBO_RATIO_LIMIT);
239 perf_ctl.lo = (msr.lo & 0xff) << 8;
240
241 } else if (cpu_config_tdp_levels()) {
242 /* Set to nominal TDP ratio */
243 msr = rdmsr(MSR_CONFIG_TDP_NOMINAL);
244 perf_ctl.lo = (msr.lo & 0xff) << 8;
245
246 } else {
247 /* Platform Info bits 15:8 give max ratio */
248 msr = rdmsr(MSR_PLATFORM_INFO);
249 perf_ctl.lo = msr.lo & 0xff00;
250 }
251 wrmsr(IA32_PERF_CTL, perf_ctl);
252
253 printk(BIOS_DEBUG, "cpu: frequency set to %d\n",
254 ((perf_ctl.lo >> 8) & 0xff) * CPU_BCLK);
255}
256
257/*
258 * Do essential initialization tasks before APs can be fired up
259 *
260 * 1. Prevent race condition in MTRR solution. Enable MTRRs on the BSP. This
261 * creates the MTRR solution that the APs will use. Otherwise APs will try to
262 * apply the incomplete solution as the BSP is calculating it.
263 */
264static void pre_mp_init(void)
265{
266 x86_setup_mtrrs_with_detect();
267 x86_mtrr_check();
268}
269
270static void post_mp_init(void)
271{
272 /* Set Max Ratio */
273 set_max_turbo_freq();
274
275 /*
276 * Now that all APs have been relocated as well as the BSP let SMIs
277 * start flowing.
278 */
Kyösti Mälkki0778c862020-06-10 12:44:03 +0300279 global_smi_enable();
Mariusz Szafranskia4041332017-08-02 17:28:17 +0200280}
281
282/*
283 * CPU initialization recipe
284 *
285 * Note that no microcode update is passed to the init function. CSE updates
286 * the microcode on all cores before releasing them from reset. That means that
287 * the BSP and all APs will come up with the same microcode revision.
288 */
289static const struct mp_ops mp_ops = {
290 .pre_mp_init = pre_mp_init,
291 .get_cpu_count = get_cpu_count,
292 .get_smm_info = get_smm_info,
Kyösti Mälkkifaf20d32019-08-14 05:41:41 +0300293 .pre_mp_smm_init = smm_southbridge_clear_state,
Mariusz Szafranskia4041332017-08-02 17:28:17 +0200294 .relocation_handler = relocation_handler,
295 .post_mp_init = post_mp_init,
296};
297
Felix Heldaf2da552021-10-21 02:13:36 +0200298void mp_init_cpus(struct bus *cpu_bus)
Mariusz Szafranskia4041332017-08-02 17:28:17 +0200299{
Mariusz Szafranskia4041332017-08-02 17:28:17 +0200300 /* Clear for take-off */
Felix Held4dd7d112021-10-20 23:31:43 +0200301 /* TODO: Handle mp_init_with_smm failure? */
Felix Heldaf2da552021-10-21 02:13:36 +0200302 mp_init_with_smm(cpu_bus, &mp_ops);
Mariusz Szafranskia4041332017-08-02 17:28:17 +0200303}