blob: 126a1c651f75f3992c6fc8f21af9c1477a5b95e7 [file] [log] [blame]
Patrick Georgiac959032020-05-05 22:49:26 +02001/* SPDX-License-Identifier: GPL-2.0-or-later */
Mariusz Szafranskia4041332017-08-02 17:28:17 +02002
3#include <console/console.h>
4#include <cpu/cpu.h>
Julien Viard de Galbert69c57e12018-03-07 14:19:03 +01005#include <cpu/x86/cr.h>
Mariusz Szafranskia4041332017-08-02 17:28:17 +02006#include <cpu/x86/mp.h>
7#include <cpu/x86/msr.h>
8#include <cpu/x86/mtrr.h>
Kyösti Mälkkib2a5f0b2019-08-04 19:54:32 +03009#include <cpu/x86/smm.h>
Kyösti Mälkkifaf20d32019-08-14 05:41:41 +030010#include <cpu/intel/smm_reloc.h>
Kyösti Mälkkie31ec292019-08-10 17:27:01 +030011#include <cpu/intel/em64t100_save_state.h>
Mariusz Szafranskia4041332017-08-02 17:28:17 +020012#include <cpu/intel/turbo.h>
Michael Niewöhner63032432020-10-11 17:34:54 +020013#include <cpu/intel/common/common.h>
Mariusz Szafranskia4041332017-08-02 17:28:17 +020014#include <device/device.h>
15#include <device/pci.h>
Julien Viard de Galbert15b570b2018-03-29 14:35:52 +020016#include <intelblocks/cpulib.h>
Mariusz Szafranskia4041332017-08-02 17:28:17 +020017
18#include <soc/msr.h>
19#include <soc/cpu.h>
20#include <soc/iomap.h>
21#include <soc/smm.h>
22#include <soc/soc_util.h>
Felix Heldd27ef5b2021-10-20 20:18:12 +020023#include <types.h>
Mariusz Szafranskia4041332017-08-02 17:28:17 +020024
25static struct smm_relocation_attrs relo_attrs;
26
Julien Viard de Galbert053ea602018-03-07 14:18:49 +010027static void dnv_configure_mca(void)
28{
29 msr_t msr;
Julien Viard de Galbert053ea602018-03-07 14:18:49 +010030 struct cpuid_result cpuid_regs;
31
32 /* Check feature flag in CPUID.(EAX=1):EDX[7]==1 MCE
33 * and CPUID.(EAX=1):EDX[14]==1 MCA*/
34 cpuid_regs = cpuid(1);
35 if ((cpuid_regs.edx & (1<<7 | 1<<14)) != (1<<7 | 1<<14))
36 return;
37
38 msr = rdmsr(IA32_MCG_CAP);
Julien Viard de Galbert053ea602018-03-07 14:18:49 +010039 if (msr.lo & IA32_MCG_CAP_CTL_P_MASK) {
40 /* Enable all error logging */
41 msr.lo = msr.hi = 0xffffffff;
42 wrmsr(IA32_MCG_CTL, msr);
43 }
44
45 /* TODO(adurbin): This should only be done on a cold boot. Also, some
46 of these banks are core vs package scope. For now every CPU clears
47 every bank. */
Subrata Banikf91344c2019-05-06 19:23:26 +053048 mca_configure();
Julien Viard de Galbert69c57e12018-03-07 14:19:03 +010049
50 /* TODO install a fallback MC handler for each core in case OS does
51 not provide one. Is it really needed? */
52
53 /* Enable the machine check exception */
54 write_cr4(read_cr4() | CR4_MCE);
Julien Viard de Galbert053ea602018-03-07 14:18:49 +010055}
56
Julien Viard de Galbert30651572018-03-08 16:26:41 +010057static void configure_thermal_core(void)
58{
59 msr_t msr;
60
61 /* Disable Thermal interrupts */
62 msr.lo = 0;
63 msr.hi = 0;
64 wrmsr(IA32_THERM_INTERRUPT, msr);
65 wrmsr(IA32_PACKAGE_THERM_INTERRUPT, msr);
66
67 msr = rdmsr(IA32_MISC_ENABLE);
68 msr.lo |= THERMAL_MONITOR_ENABLE_BIT; /* TM1/TM2/EMTTM enable */
69 wrmsr(IA32_MISC_ENABLE, msr);
70}
71
Elyes HAOUAS2ec41832018-05-27 17:40:58 +020072static void denverton_core_init(struct device *cpu)
Mariusz Szafranskia4041332017-08-02 17:28:17 +020073{
74 msr_t msr;
75
76 printk(BIOS_DEBUG, "Init Denverton-NS SoC cores.\n");
77
Julien Viard de Galbert053ea602018-03-07 14:18:49 +010078 /* Clear out pending MCEs */
79 dnv_configure_mca();
80
Julien Viard de Galbert30651572018-03-08 16:26:41 +010081 /* Configure Thermal Sensors */
82 configure_thermal_core();
83
Julien Viard de Galbert2f66c702018-03-29 14:43:37 +020084 /* Enable Fast Strings */
85 msr = rdmsr(IA32_MISC_ENABLE);
86 msr.lo |= FAST_STRINGS_ENABLE_BIT;
87 wrmsr(IA32_MISC_ENABLE, msr);
88
Michael Niewöhner63032432020-10-11 17:34:54 +020089 set_aesni_lock();
Julien Viard de Galbert4130eb52018-03-08 16:57:47 +010090
Mariusz Szafranskia4041332017-08-02 17:28:17 +020091 /* Enable Turbo */
92 enable_turbo();
93
Dmitry Ponamorevfba1475f2021-09-15 03:28:52 -070094 /* Enable speed step. Always ON.*/
95 msr = rdmsr(IA32_MISC_ENABLE);
96 msr.lo |= SPEED_STEP_ENABLE_BIT;
97 wrmsr(IA32_MISC_ENABLE, msr);
Michael Niewöhner9c19bf02021-09-26 14:23:12 +020098
99 enable_pm_timer_emulation();
Mariusz Szafranskia4041332017-08-02 17:28:17 +0200100}
101
102static struct device_operations cpu_dev_ops = {
103 .init = denverton_core_init,
104};
105
Jonathan Neuschäfer8f06ce32017-11-20 01:56:44 +0100106static const struct cpu_device_id cpu_table[] = {
Mariusz Szafranskia4041332017-08-02 17:28:17 +0200107 {X86_VENDOR_INTEL,
108 CPUID_DENVERTON_A0_A1}, /* Denverton-NS A0/A1 CPUID */
109 {X86_VENDOR_INTEL, CPUID_DENVERTON_B0}, /* Denverton-NS B0 CPUID */
110 {0, 0},
111};
112
113static const struct cpu_driver driver __cpu_driver = {
114 .ops = &cpu_dev_ops,
115 .id_table = cpu_table,
116};
117
118/*
119 * MP and SMM loading initialization.
120 */
121
122static void relocation_handler(int cpu, uintptr_t curr_smbase,
123 uintptr_t staggered_smbase)
124{
125 msr_t smrr;
126 em64t100_smm_state_save_area_t *smm_state;
127 (void)cpu;
128
129 /* Set up SMRR. */
130 smrr.lo = relo_attrs.smrr_base;
131 smrr.hi = 0;
Arthur Heymanse750b38e2018-07-20 23:31:59 +0200132 wrmsr(IA32_SMRR_PHYS_BASE, smrr);
Mariusz Szafranskia4041332017-08-02 17:28:17 +0200133 smrr.lo = relo_attrs.smrr_mask;
134 smrr.hi = 0;
Arthur Heymanse750b38e2018-07-20 23:31:59 +0200135 wrmsr(IA32_SMRR_PHYS_MASK, smrr);
Mariusz Szafranskia4041332017-08-02 17:28:17 +0200136 smm_state = (void *)(SMM_EM64T100_SAVE_STATE_OFFSET + curr_smbase);
137 smm_state->smbase = staggered_smbase;
138}
139
140static void get_smm_info(uintptr_t *perm_smbase, size_t *perm_smsize,
141 size_t *smm_save_state_size)
142{
Kyösti Mälkki14222d82019-08-05 15:10:18 +0300143 uintptr_t smm_base;
Mariusz Szafranskia4041332017-08-02 17:28:17 +0200144 size_t smm_size;
Kyösti Mälkki14222d82019-08-05 15:10:18 +0300145 uintptr_t handler_base;
Mariusz Szafranskia4041332017-08-02 17:28:17 +0200146 size_t handler_size;
147
148 /* All range registers are aligned to 4KiB */
149 const uint32_t rmask = ~((1 << 12) - 1);
150
151 /* Initialize global tracking state. */
152 smm_region(&smm_base, &smm_size);
153 smm_subregion(SMM_SUBREGION_HANDLER, &handler_base, &handler_size);
154
Kyösti Mälkki14222d82019-08-05 15:10:18 +0300155 relo_attrs.smbase = smm_base;
Mariusz Szafranskia4041332017-08-02 17:28:17 +0200156 relo_attrs.smrr_base = relo_attrs.smbase | MTRR_TYPE_WRBACK;
157 relo_attrs.smrr_mask = ~(smm_size - 1) & rmask;
158 relo_attrs.smrr_mask |= MTRR_PHYS_MASK_VALID;
159
Kyösti Mälkki14222d82019-08-05 15:10:18 +0300160 *perm_smbase = handler_base;
Mariusz Szafranskia4041332017-08-02 17:28:17 +0200161 *perm_smsize = handler_size;
162 *smm_save_state_size = sizeof(em64t100_smm_state_save_area_t);
163}
164
Angel Pons81beeae2021-11-03 16:03:45 +0100165static unsigned int detect_num_cpus_via_cpuid(void)
Mariusz Szafranskia4041332017-08-02 17:28:17 +0200166{
Angel Pons81beeae2021-11-03 16:03:45 +0100167 unsigned int ecx = 0;
Mariusz Szafranskia4041332017-08-02 17:28:17 +0200168
169 while (1) {
Angel Pons81beeae2021-11-03 16:03:45 +0100170 const struct cpuid_result leaf_b = cpuid_ext(0xb, ecx);
Mariusz Szafranskia4041332017-08-02 17:28:17 +0200171
172 /* Processor doesn't have hyperthreading so just determine the
Angel Pons81beeae2021-11-03 16:03:45 +0100173 number of cores from level type (ecx[15:8] == 2). */
174 if ((leaf_b.ecx >> 8 & 0xff) == 2)
175 return leaf_b.ebx & 0xffff;
176
Mariusz Szafranskia4041332017-08-02 17:28:17 +0200177 ecx++;
178 }
Mariusz Szafranskia4041332017-08-02 17:28:17 +0200179}
180
181static int detect_num_cpus_via_mch(void)
182{
183 /* Assumes that FSP has already programmed the cores disabled register
184 */
185 u32 core_exists_mask, active_cores_mask;
186 u32 core_disable_mask;
187 register int active_cores = 0, total_cores = 0;
188 register int counter = 0;
189
190 /* Get Masks for Total Existing SOC Cores and Core Disable Mask */
191 core_exists_mask = MMIO32(DEFAULT_MCHBAR + MCH_BAR_CORE_EXISTS_MASK);
192 core_disable_mask = MMIO32(DEFAULT_MCHBAR + MCH_BAR_CORE_DISABLE_MASK);
193 active_cores_mask = (~core_disable_mask) & core_exists_mask;
194
195 /* Calculate Number of Active Cores */
196 for (; counter < CONFIG_MAX_CPUS;
197 counter++, active_cores_mask >>= 1, core_exists_mask >>= 1) {
198 active_cores += (active_cores_mask & CORE_BIT_MSK);
199 total_cores += (core_exists_mask & CORE_BIT_MSK);
200 }
201
202 printk(BIOS_DEBUG, "Number of Active Cores: %d of %d total.\n",
203 active_cores, total_cores);
204
205 return active_cores;
206}
207
208/* Find CPU topology */
209int get_cpu_count(void)
210{
Angel Pons81beeae2021-11-03 16:03:45 +0100211 unsigned int num_cpus = detect_num_cpus_via_mch();
Mariusz Szafranskia4041332017-08-02 17:28:17 +0200212
Angel Pons81beeae2021-11-03 16:03:45 +0100213 if (num_cpus == 0 || num_cpus > CONFIG_MAX_CPUS) {
Mariusz Szafranskia4041332017-08-02 17:28:17 +0200214 num_cpus = detect_num_cpus_via_cpuid();
Angel Pons81beeae2021-11-03 16:03:45 +0100215 printk(BIOS_DEBUG, "Number of Cores (CPUID): %u.\n", num_cpus);
Mariusz Szafranskia4041332017-08-02 17:28:17 +0200216 }
217 return num_cpus;
218}
219
Mariusz Szafranskia4041332017-08-02 17:28:17 +0200220static void set_max_turbo_freq(void)
221{
222 msr_t msr, perf_ctl;
223
224 perf_ctl.hi = 0;
225
226 /* Check for configurable TDP option */
227 if (get_turbo_state() == TURBO_ENABLED) {
228 msr = rdmsr(MSR_TURBO_RATIO_LIMIT);
229 perf_ctl.lo = (msr.lo & 0xff) << 8;
230
231 } else if (cpu_config_tdp_levels()) {
232 /* Set to nominal TDP ratio */
233 msr = rdmsr(MSR_CONFIG_TDP_NOMINAL);
234 perf_ctl.lo = (msr.lo & 0xff) << 8;
235
236 } else {
237 /* Platform Info bits 15:8 give max ratio */
238 msr = rdmsr(MSR_PLATFORM_INFO);
239 perf_ctl.lo = msr.lo & 0xff00;
240 }
241 wrmsr(IA32_PERF_CTL, perf_ctl);
242
243 printk(BIOS_DEBUG, "cpu: frequency set to %d\n",
244 ((perf_ctl.lo >> 8) & 0xff) * CPU_BCLK);
245}
246
247/*
248 * Do essential initialization tasks before APs can be fired up
249 *
250 * 1. Prevent race condition in MTRR solution. Enable MTRRs on the BSP. This
251 * creates the MTRR solution that the APs will use. Otherwise APs will try to
252 * apply the incomplete solution as the BSP is calculating it.
253 */
254static void pre_mp_init(void)
255{
256 x86_setup_mtrrs_with_detect();
257 x86_mtrr_check();
258}
259
260static void post_mp_init(void)
261{
262 /* Set Max Ratio */
263 set_max_turbo_freq();
264
265 /*
266 * Now that all APs have been relocated as well as the BSP let SMIs
267 * start flowing.
268 */
Kyösti Mälkki0778c862020-06-10 12:44:03 +0300269 global_smi_enable();
Mariusz Szafranskia4041332017-08-02 17:28:17 +0200270}
271
272/*
273 * CPU initialization recipe
274 *
275 * Note that no microcode update is passed to the init function. CSE updates
276 * the microcode on all cores before releasing them from reset. That means that
277 * the BSP and all APs will come up with the same microcode revision.
278 */
279static const struct mp_ops mp_ops = {
280 .pre_mp_init = pre_mp_init,
281 .get_cpu_count = get_cpu_count,
282 .get_smm_info = get_smm_info,
Kyösti Mälkkifaf20d32019-08-14 05:41:41 +0300283 .pre_mp_smm_init = smm_southbridge_clear_state,
Mariusz Szafranskia4041332017-08-02 17:28:17 +0200284 .relocation_handler = relocation_handler,
285 .post_mp_init = post_mp_init,
286};
287
Felix Heldaf2da552021-10-21 02:13:36 +0200288void mp_init_cpus(struct bus *cpu_bus)
Mariusz Szafranskia4041332017-08-02 17:28:17 +0200289{
Mariusz Szafranskia4041332017-08-02 17:28:17 +0200290 /* Clear for take-off */
Felix Held4dd7d112021-10-20 23:31:43 +0200291 /* TODO: Handle mp_init_with_smm failure? */
Felix Heldaf2da552021-10-21 02:13:36 +0200292 mp_init_with_smm(cpu_bus, &mp_ops);
Mariusz Szafranskia4041332017-08-02 17:28:17 +0200293}