blob: 14535e2ee77bc381ea288b8b41f8f00f60cc6a63 [file] [log] [blame]
Andrey Petrov8670e822020-03-30 12:25:06 -07001/* SPDX-License-Identifier: GPL-2.0-only */
Andrey Petrov8670e822020-03-30 12:25:06 -07002
Arthur Heymans3838ede2021-05-31 16:10:05 +02003#include <acpi/acpigen.h>
Jonathan Zhangb7cf7d32020-04-02 20:03:48 -07004#include <assert.h>
Andrey Petrov8670e822020-03-30 12:25:06 -07005#include <console/console.h>
Marc Jones8b522db2020-10-12 11:58:46 -06006#include <console/debug.h>
Andrey Petrov8670e822020-03-30 12:25:06 -07007#include <cpu/cpu.h>
Marc Jonesc6a6e542020-11-02 11:59:16 -07008#include <cpu/intel/common/common.h>
Rocky Phagura17a798b2020-10-08 13:32:41 -07009#include <cpu/intel/em64t101_save_state.h>
Andrey Petrov8670e822020-03-30 12:25:06 -070010#include <cpu/intel/microcode.h>
Rocky Phagura17a798b2020-10-08 13:32:41 -070011#include <cpu/intel/smm_reloc.h>
Jonathan Zhangb7cf7d32020-04-02 20:03:48 -070012#include <cpu/intel/turbo.h>
Andrey Petrov8670e822020-03-30 12:25:06 -070013#include <cpu/x86/mp.h>
14#include <cpu/x86/mtrr.h>
15#include <intelblocks/cpulib.h>
16#include <intelblocks/mp_init.h>
Marc Jones52e14f72021-03-11 14:49:19 -070017#include <intelpch/lockdown.h>
Jonathan Zhangb7cf7d32020-04-02 20:03:48 -070018#include <soc/msr.h>
Arthur Heymans3838ede2021-05-31 16:10:05 +020019#include <soc/pci_devs.h>
Marc Jones52e14f72021-03-11 14:49:19 -070020#include <soc/pm.h>
Rocky Phagura17a798b2020-10-08 13:32:41 -070021#include <soc/smmrelocate.h>
Arthur Heymans3838ede2021-05-31 16:10:05 +020022#include <soc/soc_util.h>
Marc Jones18960ce2020-11-02 12:41:12 -070023#include <soc/util.h>
Felix Heldd27ef5b2021-10-20 20:18:12 +020024#include <types.h>
Marc Jonesc6a6e542020-11-02 11:59:16 -070025
Jonathan Zhangb7cf7d32020-04-02 20:03:48 -070026#include "chip.h"
Andrey Petrov8670e822020-03-30 12:25:06 -070027
28static const void *microcode_patch;
29
Jonathan Zhangb7cf7d32020-04-02 20:03:48 -070030static const config_t *chip_config = NULL;
31
Subrata Banik56ab8e22022-01-07 13:40:19 +000032bool cpu_soc_is_in_untrusted_mode(void)
33{
34 /* IA_UNTRUSTED_MODE is not supported in Cooper Lake */
35 return false;
36}
37
Subrata Banik37a55d12022-05-30 18:11:12 +000038void cpu_soc_bios_done(void)
39{
40 /* IA_UNTRUSTED_MODE is not supported in Cooper Lake */
41}
42
Jonathan Zhangb7cf7d32020-04-02 20:03:48 -070043static void xeon_configure_mca(void)
44{
45 msr_t msr;
46 struct cpuid_result cpuid_regs;
47
48 /*
49 * Check feature flag in CPUID.(EAX=1):EDX[7]==1 MCE
50 * and CPUID.(EAX=1):EDX[14]==1 MCA
51 */
52 cpuid_regs = cpuid(1);
53 if ((cpuid_regs.edx & (1 << 7 | 1 << 14)) != (1 << 7 | 1 << 14))
54 return;
55
56 msr = rdmsr(IA32_MCG_CAP);
57 if (msr.lo & IA32_MCG_CAP_CTL_P_MASK) {
58 /* Enable all error logging */
59 msr.lo = msr.hi = 0xffffffff;
60 wrmsr(IA32_MCG_CTL, msr);
61 }
62
63 mca_configure();
64}
65
Arthur Heymans83318332021-01-18 20:00:35 +010066/*
67 * On server platforms the FIT mechanism only updates the microcode on
68 * the BSP. Loading MCU on AP in parallel seems to fail in 10% of the cases
69 * so do it serialized.
70 */
Andrey Petrov8670e822020-03-30 12:25:06 -070071void get_microcode_info(const void **microcode, int *parallel)
72{
Patrick Rudolph3fa23b82021-01-25 09:42:08 +010073 *microcode = intel_microcode_find();
Arthur Heymans83318332021-01-18 20:00:35 +010074 *parallel = 0;
Andrey Petrov8670e822020-03-30 12:25:06 -070075}
76
Andrey Petrov8670e822020-03-30 12:25:06 -070077static void each_cpu_init(struct device *cpu)
78{
Jonathan Zhangb7cf7d32020-04-02 20:03:48 -070079 msr_t msr;
80
Arthur Heymans36e6f9b2022-10-27 15:11:05 +020081 printk(BIOS_SPEW, "%s dev: %s, cpu: %lu, apic_id: 0x%x, package_id: 0x%x\n",
82 __func__, dev_path(cpu), cpu_index(), cpu->path.apic.apic_id,
83 cpu->path.apic.package_id);
Jonathan Zhangb7cf7d32020-04-02 20:03:48 -070084
Johnny Lin12bee2a2020-08-04 18:01:54 +080085 /*
86 * Set HWP base feature, EPP reg enumeration, lock thermal and msr
87 * This is package level MSR. Need to check if it updates correctly on
88 * multi-socket platform.
89 */
90 msr = rdmsr(MSR_MISC_PWR_MGMT);
91 if (!(msr.lo & LOCK_MISC_PWR_MGMT_MSR)) { /* if already locked skip update */
92 msr.lo = (HWP_ENUM_ENABLE | HWP_EPP_ENUM_ENABLE | LOCK_MISC_PWR_MGMT_MSR |
93 LOCK_THERM_INT);
94 wrmsr(MSR_MISC_PWR_MGMT, msr);
95 }
96
Jonathan Zhangb7cf7d32020-04-02 20:03:48 -070097 /* Enable Fast Strings */
98 msr = rdmsr(IA32_MISC_ENABLE);
99 msr.lo |= FAST_STRINGS_ENABLE_BIT;
100 wrmsr(IA32_MISC_ENABLE, msr);
101 /* Enable Turbo */
102 enable_turbo();
103
104 /* Enable speed step. */
105 if (get_turbo_state() == TURBO_ENABLED) {
106 msr = rdmsr(IA32_MISC_ENABLE);
107 msr.lo |= SPEED_STEP_ENABLE_BIT;
108 wrmsr(IA32_MISC_ENABLE, msr);
109 }
110
111 /* Clear out pending MCEs */
112 xeon_configure_mca();
Christian Walterabb37572020-09-30 13:48:29 +0200113
114 /* Enable Vmx */
115 set_vmx_and_lock();
Arthur Heymans3838ede2021-05-31 16:10:05 +0200116 set_aesni_lock();
117
118 /* The MSRs and CSRS have the same register layout. Use the CSRS bit definitions
119 Lock Turbo. Did FSP-S set this up??? */
120 msr = rdmsr(MSR_TURBO_ACTIVATION_RATIO);
121 msr.lo |= (TURBO_ACTIVATION_RATIO_LOCK);
122 wrmsr(MSR_TURBO_ACTIVATION_RATIO, msr);
Andrey Petrov8670e822020-03-30 12:25:06 -0700123}
124
125static struct device_operations cpu_dev_ops = {
126 .init = each_cpu_init,
127};
128
129static const struct cpu_device_id cpu_table[] = {
Felix Held6a6ac1e2023-02-06 15:19:11 +0100130 {X86_VENDOR_INTEL, CPUID_COOPERLAKE_SP_A0, CPUID_EXACT_MATCH_MASK },
131 {X86_VENDOR_INTEL, CPUID_COOPERLAKE_SP_A1, CPUID_EXACT_MATCH_MASK },
Felix Held1e781652023-02-08 11:39:16 +0100132 CPU_TABLE_END
Andrey Petrov8670e822020-03-30 12:25:06 -0700133};
134
135static const struct cpu_driver driver __cpu_driver = {
136 .ops = &cpu_dev_ops,
137 .id_table = cpu_table,
138};
139
Jonathan Zhangb7cf7d32020-04-02 20:03:48 -0700140static void set_max_turbo_freq(void)
141{
142 msr_t msr, perf_ctl;
143
144 FUNC_ENTER();
145 perf_ctl.hi = 0;
146
147 /* Check for configurable TDP option */
148 if (get_turbo_state() == TURBO_ENABLED) {
149 msr = rdmsr(MSR_TURBO_RATIO_LIMIT);
150 perf_ctl.lo = (msr.lo & 0xff) << 8;
151 } else if (cpu_config_tdp_levels()) {
152 /* Set to nominal TDP ratio */
153 msr = rdmsr(MSR_CONFIG_TDP_NOMINAL);
154 perf_ctl.lo = (msr.lo & 0xff) << 8;
155 } else {
156 /* Platform Info bits 15:8 give max ratio */
157 msr = rdmsr(MSR_PLATFORM_INFO);
158 perf_ctl.lo = msr.lo & 0xff00;
159 }
160 wrmsr(IA32_PERF_CTL, perf_ctl);
161
162 printk(BIOS_DEBUG, "cpu: frequency set to %d\n",
Jingle Hsua41b12c2020-08-11 20:48:45 +0800163 ((perf_ctl.lo >> 8) & 0xff) * CONFIG_CPU_BCLK_MHZ);
Jonathan Zhangb7cf7d32020-04-02 20:03:48 -0700164 FUNC_EXIT();
165}
166
Andrey Petrov8670e822020-03-30 12:25:06 -0700167/*
168 * Do essential initialization tasks before APs can be fired up
169 */
170static void pre_mp_init(void)
171{
172 x86_setup_mtrrs_with_detect();
173 x86_mtrr_check();
174}
175
176static int get_thread_count(void)
177{
178 unsigned int num_phys = 0, num_virts = 0;
179
180 cpu_read_topology(&num_phys, &num_virts);
181 printk(BIOS_SPEW, "Detected %u cores and %u threads\n", num_phys, num_virts);
Andrey Petrov5d769582020-04-23 10:54:18 -0700182 /*
183 * Currently we do not know a way to figure out how many CPUs we have total
184 * on multi-socketed. So we pretend all sockets are populated with CPUs with
185 * same thread/core fusing.
186 * TODO: properly figure out number of active sockets OR refactor MPinit code
187 * to remove requirements of having to know total number of CPUs in advance.
188 */
189 return num_virts * CONFIG_MAX_SOCKET;
Andrey Petrov8670e822020-03-30 12:25:06 -0700190}
191
Jonathan Zhangb7cf7d32020-04-02 20:03:48 -0700192static void post_mp_init(void)
193{
194 /* Set Max Ratio */
195 set_max_turbo_freq();
Kyösti Mälkki0778c862020-06-10 12:44:03 +0300196
Marc Jones52e14f72021-03-11 14:49:19 -0700197 if (CONFIG(HAVE_SMI_HANDLER)) {
Rocky Phagura17a798b2020-10-08 13:32:41 -0700198 global_smi_enable();
Marc Jones52e14f72021-03-11 14:49:19 -0700199 if (get_lockdown_config() == CHIPSET_LOCKDOWN_COREBOOT)
200 pmc_lock_smi();
201 }
Jonathan Zhangb7cf7d32020-04-02 20:03:48 -0700202}
203
Andrey Petrov8670e822020-03-30 12:25:06 -0700204static const struct mp_ops mp_ops = {
205 .pre_mp_init = pre_mp_init,
206 .get_cpu_count = get_thread_count,
Rocky Phagura17a798b2020-10-08 13:32:41 -0700207 .get_smm_info = get_smm_info,
Arthur Heymans2cba9e42021-02-15 14:16:34 +0100208 .pre_mp_smm_init = smm_southbridge_clear_state,
Rocky Phagura17a798b2020-10-08 13:32:41 -0700209 .relocation_handler = smm_relocation_handler,
Jonathan Zhangb7cf7d32020-04-02 20:03:48 -0700210 .get_microcode_info = get_microcode_info,
211 .post_mp_init = post_mp_init,
Andrey Petrov8670e822020-03-30 12:25:06 -0700212};
213
Arthur Heymans829e8e62023-01-30 19:09:34 +0100214void mp_init_cpus(struct bus *bus)
Andrey Petrov8670e822020-03-30 12:25:06 -0700215{
216 microcode_patch = intel_microcode_find();
217
218 if (!microcode_patch)
219 printk(BIOS_ERR, "microcode not found in CBFS!\n");
220
221 intel_microcode_load_unlocked(microcode_patch);
222
Felix Held4dd7d112021-10-20 23:31:43 +0200223 /* TODO: Handle mp_init_with_smm failure? */
Arthur Heymans829e8e62023-01-30 19:09:34 +0100224 mp_init_with_smm(bus, &mp_ops);
Jonathan Zhangb7cf7d32020-04-02 20:03:48 -0700225
226 /*
227 * chip_config is used in cpu device callback. Other than cpu 0,
228 * rest of the CPU devices do not have chip_info updated.
229 */
Arthur Heymans829e8e62023-01-30 19:09:34 +0100230 chip_config = bus->dev->chip_info;
Andrey Petrov8670e822020-03-30 12:25:06 -0700231}