blob: 009527c84ef5bff42cc69031ff55b297dadf1ee0 [file] [log] [blame]
Patrick Georgiac959032020-05-05 22:49:26 +02001/* SPDX-License-Identifier: GPL-2.0-or-later */
Jonathan Zhang8f895492020-01-16 11:16:45 -08002
3#include <console/console.h>
Marc Jones8b522db2020-10-12 11:58:46 -06004#include <console/debug.h>
Jonathan Zhang8f895492020-01-16 11:16:45 -08005#include <intelblocks/cpulib.h>
6#include <cpu/cpu.h>
Arthur Heymanse10d8a02023-02-23 10:01:51 +01007#include <cpu/intel/cpu_ids.h>
Jonathan Zhang8f895492020-01-16 11:16:45 -08008#include <cpu/x86/mtrr.h>
9#include <cpu/x86/mp.h>
10#include <cpu/intel/turbo.h>
11#include <soc/msr.h>
Jonathan Zhang8f895492020-01-16 11:16:45 -080012#include <soc/soc_util.h>
Rocky Phagura17a798b2020-10-08 13:32:41 -070013#include <soc/smmrelocate.h>
Angel Pons91903452020-10-22 23:06:04 +020014#include <soc/util.h>
Jonathan Zhang8f895492020-01-16 11:16:45 -080015#include <assert.h>
16#include "chip.h"
Rocky Phagura17a798b2020-10-08 13:32:41 -070017#include <cpu/intel/smm_reloc.h>
18#include <cpu/intel/em64t101_save_state.h>
Felix Heldd27ef5b2021-10-20 20:18:12 +020019#include <types.h>
Jonathan Zhang8f895492020-01-16 11:16:45 -080020
21static const config_t *chip_config = NULL;
22
Subrata Banik56ab8e22022-01-07 13:40:19 +000023bool cpu_soc_is_in_untrusted_mode(void)
24{
25 /* IA_UNTRUSTED_MODE is not supported in Skylake */
26 return false;
27}
28
Subrata Banik37a55d12022-05-30 18:11:12 +000029void cpu_soc_bios_done(void)
30{
31 /* IA_UNTRUSTED_MODE is not supported in Skylake */
32}
33
Jonathan Zhang8f895492020-01-16 11:16:45 -080034static void xeon_configure_mca(void)
35{
36 msr_t msr;
37 struct cpuid_result cpuid_regs;
38
39 /* Check feature flag in CPUID.(EAX=1):EDX[7]==1 MCE
40 * and CPUID.(EAX=1):EDX[14]==1 MCA*/
41 cpuid_regs = cpuid(1);
42 if ((cpuid_regs.edx & (1<<7 | 1<<14)) != (1<<7 | 1<<14))
43 return;
44
45 msr = rdmsr(IA32_MCG_CAP);
46 if (msr.lo & IA32_MCG_CAP_CTL_P_MASK) {
47 /* Enable all error logging */
48 msr.lo = msr.hi = 0xffffffff;
49 wrmsr(IA32_MCG_CTL, msr);
50 }
51
52 /* TODO(adurbin): This should only be done on a cold boot. Also, some
53 of these banks are core vs package scope. For now every CPU clears
54 every bank. */
55 mca_configure();
56}
57
58static void xeon_sp_core_init(struct device *cpu)
59{
60 msr_t msr;
61
Arthur Heymans36e6f9b2022-10-27 15:11:05 +020062 printk(BIOS_INFO, "%s dev: %s, cpu: %lu, apic_id: 0x%x, package_id: 0x%x\n",
63 __func__, dev_path(cpu), cpu_index(), cpu->path.apic.apic_id,
64 cpu->path.apic.package_id);
Elyes Haouasf1ba7d62022-09-13 10:03:44 +020065 assert(chip_config);
Jonathan Zhang8f895492020-01-16 11:16:45 -080066
67 /* set MSR_PKG_CST_CONFIG_CONTROL - scope per core*/
68 msr.hi = 0;
Michael Niewöhner2353cd92021-10-04 16:59:49 +020069 msr.lo = (PKG_CSTATE_NO_LIMIT | CFG_LOCK_ENABLE);
Jonathan Zhang8f895492020-01-16 11:16:45 -080070 wrmsr(MSR_PKG_CST_CONFIG_CONTROL, msr);
71
Jonathan Zhang8f895492020-01-16 11:16:45 -080072 /* Enable Energy Perf Bias Access, Dynamic switching and lock MSR */
73 msr = rdmsr(MSR_POWER_CTL);
74 msr.lo |= (ENERGY_PERF_BIAS_ACCESS_ENABLE | PWR_PERF_TUNING_DYN_SWITCHING_ENABLE
75 | PROCHOT_LOCK_ENABLE);
76 wrmsr(MSR_POWER_CTL, msr);
77
78 /* Set P-State ratio */
79 msr = rdmsr(MSR_IA32_PERF_CTRL);
80 msr.lo &= ~PSTATE_REQ_MASK;
81 msr.lo |= (chip_config->pstate_req_ratio << PSTATE_REQ_SHIFT);
82 wrmsr(MSR_IA32_PERF_CTRL, msr);
83
84 /*
85 * Set HWP base feature, EPP reg enumeration, lock thermal and msr
86 * TODO: Set LOCK_MISC_PWR_MGMT_MSR, Unexpected Exception if you
87 * lock & issue wrmsr on every thread
88 * This is package level MSR. Need to check if it updates correctly on
89 * multi-socket platform.
90 */
91 msr = rdmsr(MSR_MISC_PWR_MGMT);
92 if (!(msr.lo & LOCK_MISC_PWR_MGMT_MSR)) { /* if already locked skip update */
93 msr.lo = (HWP_ENUM_ENABLE | HWP_EPP_ENUM_ENABLE | LOCK_MISC_PWR_MGMT_MSR |
94 LOCK_THERM_INT);
95 wrmsr(MSR_MISC_PWR_MGMT, msr);
96 }
97
98 /* TODO MSR_VR_MISC_CONFIG */
99
100 /* Set current limit lock */
101 msr = rdmsr(MSR_VR_CURRENT_CONFIG);
102 msr.lo |= CURRENT_LIMIT_LOCK;
103 wrmsr(MSR_VR_CURRENT_CONFIG, msr);
104
105 /* Set Turbo Ratio Limits */
106 msr.lo = chip_config->turbo_ratio_limit & 0xffffffff;
107 msr.hi = (chip_config->turbo_ratio_limit >> 32) & 0xffffffff;
108 wrmsr(MSR_TURBO_RATIO_LIMIT, msr);
109
110 /* Set Turbo Ratio Limit Cores */
111 msr.lo = chip_config->turbo_ratio_limit_cores & 0xffffffff;
112 msr.hi = (chip_config->turbo_ratio_limit_cores >> 32) & 0xffffffff;
113 wrmsr(MSR_TURBO_RATIO_LIMIT_CORES, msr);
114
115 /* set Turbo Activation ratio */
116 msr.hi = 0;
117 msr = rdmsr(MSR_TURBO_ACTIVATION_RATIO);
118 msr.lo |= MAX_NON_TURBO_RATIO;
119 wrmsr(MSR_TURBO_ACTIVATION_RATIO, msr);
120
121 /* Enable Fast Strings */
122 msr = rdmsr(IA32_MISC_ENABLE);
123 msr.lo |= FAST_STRINGS_ENABLE_BIT;
124 wrmsr(IA32_MISC_ENABLE, msr);
125
126 /* Set energy policy */
127 msr_t msr1 = rdmsr(MSR_ENERGY_PERF_BIAS_CONFIG);
128 msr.lo = (msr1.lo & EPB_ENERGY_POLICY_MASK) >> EPB_ENERGY_POLICY_SHIFT;
129 msr.hi = 0;
130 wrmsr(MSR_IA32_ENERGY_PERF_BIAS, msr);
131
132 /* Enable Turbo */
133 enable_turbo();
134
135 /* Enable speed step. */
136 if (get_turbo_state() == TURBO_ENABLED) {
137 msr = rdmsr(IA32_MISC_ENABLE);
138 msr.lo |= SPEED_STEP_ENABLE_BIT;
139 wrmsr(IA32_MISC_ENABLE, msr);
140 }
141
142 /* Clear out pending MCEs */
143 xeon_configure_mca();
144}
145
146static struct device_operations cpu_dev_ops = {
147 .init = xeon_sp_core_init,
148};
149
150static const struct cpu_device_id cpu_table[] = {
151 /* Skylake-SP A0/A1 CPUID 0x506f0*/
Felix Held6a6ac1e2023-02-06 15:19:11 +0100152 {X86_VENDOR_INTEL, CPUID_SKYLAKE_SP_A0_A1, CPUID_EXACT_MATCH_MASK },
Jonathan Zhang8f895492020-01-16 11:16:45 -0800153 /* Skylake-SP B0 CPUID 0x506f1*/
Felix Held6a6ac1e2023-02-06 15:19:11 +0100154 {X86_VENDOR_INTEL, CPUID_SKYLAKE_SP_B0, CPUID_EXACT_MATCH_MASK },
Jonathan Zhang8f895492020-01-16 11:16:45 -0800155 /* Skylake-SP 4 CPUID 0x50654*/
Felix Held6a6ac1e2023-02-06 15:19:11 +0100156 {X86_VENDOR_INTEL, CPUID_SKYLAKE_SP_4, CPUID_EXACT_MATCH_MASK },
Felix Held1e781652023-02-08 11:39:16 +0100157 CPU_TABLE_END
Jonathan Zhang8f895492020-01-16 11:16:45 -0800158};
159
160static const struct cpu_driver driver __cpu_driver = {
161 .ops = &cpu_dev_ops,
162 .id_table = cpu_table,
163};
164
Arthur Heymanse10d8a02023-02-23 10:01:51 +0100165#define CPU_BCLK 100
166
Jonathan Zhang8f895492020-01-16 11:16:45 -0800167static void set_max_turbo_freq(void)
168{
169 msr_t msr, perf_ctl;
170
171 FUNC_ENTER();
172 perf_ctl.hi = 0;
173
174 /* Check for configurable TDP option */
175 if (get_turbo_state() == TURBO_ENABLED) {
176 msr = rdmsr(MSR_TURBO_RATIO_LIMIT);
177 perf_ctl.lo = (msr.lo & 0xff) << 8;
178 } else if (cpu_config_tdp_levels()) {
179 /* Set to nominal TDP ratio */
180 msr = rdmsr(MSR_CONFIG_TDP_NOMINAL);
181 perf_ctl.lo = (msr.lo & 0xff) << 8;
182 } else {
183 /* Platform Info bits 15:8 give max ratio */
184 msr = rdmsr(MSR_PLATFORM_INFO);
185 perf_ctl.lo = msr.lo & 0xff00;
186 }
187 wrmsr(IA32_PERF_CTL, perf_ctl);
188
189 printk(BIOS_DEBUG, "cpu: frequency set to %d\n",
190 ((perf_ctl.lo >> 8) & 0xff) * CPU_BCLK);
191 FUNC_EXIT();
192}
193
194/*
195 * Do essential initialization tasks before APs can be fired up
196 *
197 * Prevent race condition in MTRR solution. Enable MTRRs on the BSP. This
198 * creates the MTRR solution that the APs will use. Otherwise APs will try to
199 * apply the incomplete solution as the BSP is calculating it.
200 */
201static void pre_mp_init(void)
202{
203 printk(BIOS_DEBUG, "%s: entry\n", __func__);
204
Arthur Heymans45a6ae32020-12-16 21:55:17 +0100205 x86_setup_mtrrs_with_detect();
206 x86_mtrr_check();
Jonathan Zhang8f895492020-01-16 11:16:45 -0800207}
208
209static void post_mp_init(void)
210{
211 /* Set Max Ratio */
212 set_max_turbo_freq();
213
Rocky Phagura17a798b2020-10-08 13:32:41 -0700214 if (CONFIG(HAVE_SMI_HANDLER))
215 global_smi_enable();
Jonathan Zhang8f895492020-01-16 11:16:45 -0800216}
217
218/*
219 * CPU initialization recipe
220 *
221 * Note that no microcode update is passed to the init function. CSE updates
222 * the microcode on all cores before releasing them from reset. That means that
223 * the BSP and all APs will come up with the same microcode revision.
224 */
225static const struct mp_ops mp_ops = {
226 .pre_mp_init = pre_mp_init,
227 .get_cpu_count = get_platform_thread_count,
Rocky Phagura17a798b2020-10-08 13:32:41 -0700228 .get_smm_info = get_smm_info,
Arthur Heymans2cba9e42021-02-15 14:16:34 +0100229 .pre_mp_smm_init = smm_southbridge_clear_state,
Rocky Phagura17a798b2020-10-08 13:32:41 -0700230 .relocation_handler = smm_relocation_handler,
Jonathan Zhang8f895492020-01-16 11:16:45 -0800231 .post_mp_init = post_mp_init,
232};
233
Arthur Heymans829e8e62023-01-30 19:09:34 +0100234void mp_init_cpus(struct bus *bus)
Jonathan Zhang8f895492020-01-16 11:16:45 -0800235{
236 FUNC_ENTER();
237
238 /*
239 * This gets used in cpu device callback. Other than cpu 0,
240 * rest of the CPU devices do not have
241 * chip_info updated. Global chip_config is used as workaround
242 */
Arthur Heymans829e8e62023-01-30 19:09:34 +0100243 chip_config = bus->dev->chip_info;
Jonathan Zhang8f895492020-01-16 11:16:45 -0800244
245 config_reset_cpl3_csrs();
246
247 /* calls src/cpu/x86/mp_init.c */
Felix Held4dd7d112021-10-20 23:31:43 +0200248 /* TODO: Handle mp_init_with_smm failure? */
Arthur Heymans829e8e62023-01-30 19:09:34 +0100249 mp_init_with_smm(bus, &mp_ops);
Jonathan Zhang8f895492020-01-16 11:16:45 -0800250
Jonathan Zhang8f895492020-01-16 11:16:45 -0800251 FUNC_EXIT();
252}