blob: 5014a1378cb3a68ac565b1d4a8891857d742d676 [file] [log] [blame]
Jonathan Zhang3ed903f2023-01-25 11:37:27 -08001/* SPDX-License-Identifier: GPL-2.0-only */
2
Jonathan Zhang3ed903f2023-01-25 11:37:27 -08003#include <console/debug.h>
Jonathan Zhang3ed903f2023-01-25 11:37:27 -08004#include <cpu/intel/common/common.h>
Jonathan Zhang3ed903f2023-01-25 11:37:27 -08005#include <cpu/intel/microcode.h>
6#include <cpu/intel/smm_reloc.h>
7#include <cpu/intel/turbo.h>
Jonathan Zhang3ed903f2023-01-25 11:37:27 -08008#include <cpu/x86/mp.h>
Patrick Rudolph96499842024-03-25 17:05:05 +01009#include <cpu/x86/topology.h>
Jonathan Zhang3ed903f2023-01-25 11:37:27 -080010#include <intelblocks/cpulib.h>
11#include <intelblocks/mp_init.h>
12#include <intelpch/lockdown.h>
13#include <soc/msr.h>
Jonathan Zhang3ed903f2023-01-25 11:37:27 -080014#include <soc/pm.h>
Jonathan Zhang3ed903f2023-01-25 11:37:27 -080015#include <soc/smmrelocate.h>
16#include <soc/util.h>
17
18#include "chip.h"
19
20static const void *microcode_patch;
21
22static const config_t *chip_config = NULL;
23
24bool cpu_soc_is_in_untrusted_mode(void)
25{
26 return false;
27}
28
29void cpu_soc_bios_done(void)
30{
31}
32
33static void xeon_configure_mca(void)
34{
35 msr_t msr;
36 struct cpuid_result cpuid_regs;
37
38 /*
39 * Check feature flag in CPUID.(EAX=1):EDX[7]==1 MCE
40 * and CPUID.(EAX=1):EDX[14]==1 MCA
41 */
42 cpuid_regs = cpuid(1);
43 if ((cpuid_regs.edx & (1 << 7 | 1 << 14)) != (1 << 7 | 1 << 14))
44 return;
45
46 msr = rdmsr(IA32_MCG_CAP);
47 if (msr.lo & IA32_MCG_CAP_CTL_P_MASK) {
48 /* Enable all error logging */
49 msr.lo = msr.hi = 0xffffffff;
50 wrmsr(IA32_MCG_CTL, msr);
51 }
52
53 mca_configure();
54}
55
56/*
57 * On server platforms the FIT mechanism only updates the microcode on
58 * the BSP. Loading MCU on AP in parallel seems to fail in 10% of the cases
59 * so do it serialized.
60 */
61void get_microcode_info(const void **microcode, int *parallel)
62{
63 *microcode = intel_microcode_find();
64 *parallel = 0;
65}
66
67static void each_cpu_init(struct device *cpu)
68{
69 msr_t msr;
70
Lean Sheng Tand33cbf12023-04-14 14:19:19 +020071 printk(BIOS_SPEW, "%s dev: %s, cpu: %lu, apic_id: 0x%x, package_id: 0x%x\n",
72 __func__, dev_path(cpu), cpu_index(), cpu->path.apic.apic_id,
73 cpu->path.apic.package_id);
Jonathan Zhang3ed903f2023-01-25 11:37:27 -080074
Patrick Rudolph96499842024-03-25 17:05:05 +010075 /* Populate the node ID. It will be used as proximity ID. */
76 set_cpu_node_id_leaf_1f_b(cpu);
77 assert (cpu->path.apic.node_id < CONFIG_MAX_SOCKET);
78
Jonathan Zhang3ed903f2023-01-25 11:37:27 -080079 /*
80 * Enable PWR_PERF_PLTFRM_OVR and PROCHOT_LOCK.
81 * The value set by FSP is 20_005f, we set it to 1a_00a4_005b.
82 */
83 msr = rdmsr(MSR_POWER_CTL);
84 msr.lo |= (0x16 << RESERVED1_SHIFT) | PWR_PERF_PLTFRM_OVR | PROCHOT_LOCK;
85 msr.hi = 0x1a;
86 wrmsr(MSR_POWER_CTL, msr);
87
88 /* Set static, idle, dynamic load line impedance */
89 msr = rdmsr(MSR_VR_MISC_CONFIG);
90 msr.lo = 0x1a1a1a;
91 wrmsr(MSR_VR_MISC_CONFIG, msr);
92
93 /* Set current limitation */
94 msr = rdmsr(MSR_VR_CURRENT_CONFIG);
95 msr.lo = 0x1130;
96 msr.lo |= CURRENT_LIMIT_LOCK;
97 wrmsr(MSR_VR_CURRENT_CONFIG, msr);
98
99 /* Set Turbo Ratio Limits */
100 msr.lo = chip_config->turbo_ratio_limit & 0xffffffff;
101 msr.hi = (chip_config->turbo_ratio_limit >> 32) & 0xffffffff;
102 wrmsr(MSR_TURBO_RATIO_LIMIT, msr);
103
104 /* Set Turbo Ratio Limit Cores */
105 msr.lo = chip_config->turbo_ratio_limit_cores & 0xffffffff;
106 msr.hi = (chip_config->turbo_ratio_limit_cores >> 32) & 0xffffffff;
107 wrmsr(MSR_TURBO_RATIO_LIMIT_CORES, msr);
108
109 /* Set energy policy */
110 msr = rdmsr(MSR_ENERGY_PERF_BIAS_CONFIG);
111 msr.lo = 0x178fa038;
112 wrmsr(MSR_ENERGY_PERF_BIAS_CONFIG, msr);
113
114 msr.hi = 0x158d20;
115 msr.lo = 0x00158af0;
116 wrmsr(PACKAGE_RAPL_LIMIT, msr);
117
118 /*
119 * Set HWP base feature, EPP reg enumeration, lock thermal and msr
120 * This is package level MSR. Need to check if it updates correctly on
121 * multi-socket platform.
122 */
123 msr = rdmsr(MSR_MISC_PWR_MGMT);
124 if (!(msr.lo & LOCK_MISC_PWR_MGMT_MSR)) { /* if already locked skip update */
125 msr.lo = (HWP_ENUM_ENABLE | HWP_EPP_ENUM_ENABLE | LOCK_MISC_PWR_MGMT_MSR
126 | LOCK_THERM_INT);
127 wrmsr(MSR_MISC_PWR_MGMT, msr);
128 }
129
130 /* Enable Fast Strings */
131 msr = rdmsr(IA32_MISC_ENABLE);
132 msr.lo |= FAST_STRINGS_ENABLE_BIT;
133 wrmsr(IA32_MISC_ENABLE, msr);
134 /* Enable Turbo */
135 enable_turbo();
136
137 /* Enable speed step. */
138 if (get_turbo_state() == TURBO_ENABLED) {
139 msr = rdmsr(IA32_MISC_ENABLE);
140 msr.lo |= SPEED_STEP_ENABLE_BIT;
141 wrmsr(IA32_MISC_ENABLE, msr);
142 }
143
144 /* Lock the supported Cstates */
145 msr = rdmsr(MSR_PKG_CST_CONFIG_CONTROL);
146 msr.lo |= CST_CFG_LOCK_MASK;
147 wrmsr(MSR_PKG_CST_CONFIG_CONTROL, msr);
148
149 /* Disable all writes to overclocking limits MSR */
150 msr = rdmsr(MSR_FLEX_RATIO);
151 msr.lo |= MSR_FLEX_RATIO_OC_LOCK;
152 wrmsr(MSR_FLEX_RATIO, msr);
153
154 /* Lock Power Plane Limit MSR */
155 msr = rdmsr(MSR_DRAM_PLANE_POWER_LIMIT);
156 msr.hi |= MSR_HI_PP_PWR_LIM_LOCK;
157 wrmsr(MSR_DRAM_PLANE_POWER_LIMIT, msr);
158
159 /* Clear out pending MCEs */
160 xeon_configure_mca();
161
162 /* Enable Vmx */
163 // set_vmx_and_lock();
164 /* only lock. let vmx enable by FSP */
165 set_feature_ctrl_lock();
166}
167
168static struct device_operations cpu_dev_ops = {
169 .init = each_cpu_init,
170};
171
172static const struct cpu_device_id cpu_table[] = {
Felix Held75613602023-03-20 22:22:54 +0100173 {X86_VENDOR_INTEL, CPUID_SAPPHIRERAPIDS_SP_D, CPUID_EXACT_MATCH_MASK},
174 {X86_VENDOR_INTEL, CPUID_SAPPHIRERAPIDS_SP_E0, CPUID_EXACT_MATCH_MASK},
175 {X86_VENDOR_INTEL, CPUID_SAPPHIRERAPIDS_SP_E2, CPUID_EXACT_MATCH_MASK},
176 {X86_VENDOR_INTEL, CPUID_SAPPHIRERAPIDS_SP_E3, CPUID_EXACT_MATCH_MASK},
177 {X86_VENDOR_INTEL, CPUID_SAPPHIRERAPIDS_SP_E4, CPUID_EXACT_MATCH_MASK},
178 {X86_VENDOR_INTEL, CPUID_SAPPHIRERAPIDS_SP_Ex, CPUID_EXACT_MATCH_MASK},
179 CPU_TABLE_END
Jonathan Zhang3ed903f2023-01-25 11:37:27 -0800180};
181
182static const struct cpu_driver driver __cpu_driver = {
183 .ops = &cpu_dev_ops,
184 .id_table = cpu_table,
185};
186
187static void set_max_turbo_freq(void)
188{
189 msr_t msr, perf_ctl;
190
191 FUNC_ENTER();
192 perf_ctl.hi = 0;
193
194 /* Check for configurable TDP option */
195 if (get_turbo_state() == TURBO_ENABLED) {
196 msr = rdmsr(MSR_TURBO_RATIO_LIMIT);
197 perf_ctl.lo = (msr.lo & 0xff) << 8;
198 } else if (cpu_config_tdp_levels()) {
199 /* Set to nominal TDP ratio */
200 msr = rdmsr(MSR_CONFIG_TDP_NOMINAL);
201 perf_ctl.lo = (msr.lo & 0xff) << 8;
202 } else {
203 /* Platform Info bits 15:8 give max ratio */
204 msr = rdmsr(MSR_PLATFORM_INFO);
205 perf_ctl.lo = msr.lo & 0xff00;
206 }
207 wrmsr(IA32_PERF_CTL, perf_ctl);
208
209 printk(BIOS_DEBUG, "cpu: frequency set to %d\n",
210 ((perf_ctl.lo >> 8) & 0xff) * CONFIG_CPU_BCLK_MHZ);
211 FUNC_EXIT();
212}
213
214/*
215 * Do essential initialization tasks before APs can be fired up
216 */
217static void pre_mp_init(void)
218{
219 x86_setup_mtrrs_with_detect();
220 x86_mtrr_check();
221}
222
223static int get_thread_count(void)
224{
225 unsigned int num_phys = 0, num_virts = 0;
226
Shuo Liu13a3c3a2024-04-28 18:42:15 +0800227 /*
228 * This call calculates the thread count which is corresponding to num_virts
229 * (logical cores), while num_phys is corresponding to physical cores (in SMT
230 * system, one physical core has multiple threads, a.k.a. logical cores).
231 * Hence num_phys is not actually used.
232 */
Jonathan Zhang3ed903f2023-01-25 11:37:27 -0800233 cpu_read_topology(&num_phys, &num_virts);
234 printk(BIOS_SPEW, "Detected %u cores and %u threads\n", num_phys, num_virts);
235 return num_virts * soc_get_num_cpus();
236}
237
238static void post_mp_init(void)
239{
240 /* Set Max Ratio */
241 set_max_turbo_freq();
242
243 if (CONFIG(HAVE_SMI_HANDLER)) {
244 global_smi_enable();
245 if (get_lockdown_config() == CHIPSET_LOCKDOWN_COREBOOT)
246 pmc_lock_smi();
247 }
248}
249
250static const struct mp_ops mp_ops = {
251 .pre_mp_init = pre_mp_init,
252 .get_cpu_count = get_thread_count,
253#if CONFIG(HAVE_SMI_HANDLER)
254 .get_smm_info = get_smm_info,
255 .pre_mp_smm_init = smm_southbridge_clear_state,
256 .relocation_handler = smm_relocation_handler,
257#endif
258 .get_microcode_info = get_microcode_info,
259 .post_mp_init = post_mp_init,
260};
261
262void mp_init_cpus(struct bus *bus)
263{
264 /*
265 * chip_config is used in cpu device callback. Other than cpu 0,
266 * rest of the CPU devices do not have chip_info updated.
267 */
268 chip_config = bus->dev->chip_info;
269
270 microcode_patch = intel_microcode_find();
Jonathan Zhang3ed903f2023-01-25 11:37:27 -0800271 intel_microcode_load_unlocked(microcode_patch);
272
273 if (mp_init_with_smm(bus, &mp_ops) < 0)
274 printk(BIOS_ERR, "MP initialization failure.\n");
Jonathan Zhang3ed903f2023-01-25 11:37:27 -0800275}