blob: de8eee8022a059660668d748a5a956bf93a28e1d [file] [log] [blame]
Patrick Georgiac959032020-05-05 22:49:26 +02001/* SPDX-License-Identifier: GPL-2.0-or-later */
Jonathan Zhang8f895492020-01-16 11:16:45 -08002
3#include <console/console.h>
Marc Jones8b522db2020-10-12 11:58:46 -06004#include <console/debug.h>
Jonathan Zhang8f895492020-01-16 11:16:45 -08005#include <intelblocks/cpulib.h>
6#include <cpu/cpu.h>
7#include <cpu/x86/mtrr.h>
8#include <cpu/x86/mp.h>
9#include <cpu/intel/turbo.h>
10#include <soc/msr.h>
11#include <soc/cpu.h>
12#include <soc/soc_util.h>
Rocky Phagura17a798b2020-10-08 13:32:41 -070013#include <soc/smmrelocate.h>
Angel Pons91903452020-10-22 23:06:04 +020014#include <soc/util.h>
Jonathan Zhang8f895492020-01-16 11:16:45 -080015#include <assert.h>
16#include "chip.h"
Rocky Phagura17a798b2020-10-08 13:32:41 -070017#include <cpu/intel/smm_reloc.h>
18#include <cpu/intel/em64t101_save_state.h>
Felix Heldd27ef5b2021-10-20 20:18:12 +020019#include <types.h>
Jonathan Zhang8f895492020-01-16 11:16:45 -080020
21static const config_t *chip_config = NULL;
22
Subrata Banik56ab8e22022-01-07 13:40:19 +000023bool cpu_soc_is_in_untrusted_mode(void)
24{
25 /* IA_UNTRUSTED_MODE is not supported in Skylake */
26 return false;
27}
28
Subrata Banik37a55d12022-05-30 18:11:12 +000029void cpu_soc_bios_done(void)
30{
31 /* IA_UNTRUSTED_MODE is not supported in Skylake */
32}
33
Jonathan Zhang8f895492020-01-16 11:16:45 -080034static void xeon_configure_mca(void)
35{
36 msr_t msr;
37 struct cpuid_result cpuid_regs;
38
39 /* Check feature flag in CPUID.(EAX=1):EDX[7]==1 MCE
40 * and CPUID.(EAX=1):EDX[14]==1 MCA*/
41 cpuid_regs = cpuid(1);
42 if ((cpuid_regs.edx & (1<<7 | 1<<14)) != (1<<7 | 1<<14))
43 return;
44
45 msr = rdmsr(IA32_MCG_CAP);
46 if (msr.lo & IA32_MCG_CAP_CTL_P_MASK) {
47 /* Enable all error logging */
48 msr.lo = msr.hi = 0xffffffff;
49 wrmsr(IA32_MCG_CTL, msr);
50 }
51
52 /* TODO(adurbin): This should only be done on a cold boot. Also, some
53 of these banks are core vs package scope. For now every CPU clears
54 every bank. */
55 mca_configure();
56}
57
58static void xeon_sp_core_init(struct device *cpu)
59{
60 msr_t msr;
61
Arthur Heymanscc226072022-11-12 18:51:04 +010062 printk(BIOS_INFO, "%s dev: %s, cpu: %lu, apic_id: 0x%x\n",
Jonathan Zhang8f895492020-01-16 11:16:45 -080063 __func__, dev_path(cpu), cpu_index(), cpu->path.apic.apic_id);
Elyes Haouasf1ba7d62022-09-13 10:03:44 +020064 assert(chip_config);
Jonathan Zhang8f895492020-01-16 11:16:45 -080065
66 /* set MSR_PKG_CST_CONFIG_CONTROL - scope per core*/
67 msr.hi = 0;
Michael Niewöhner2353cd92021-10-04 16:59:49 +020068 msr.lo = (PKG_CSTATE_NO_LIMIT | CFG_LOCK_ENABLE);
Jonathan Zhang8f895492020-01-16 11:16:45 -080069 wrmsr(MSR_PKG_CST_CONFIG_CONTROL, msr);
70
Jonathan Zhang8f895492020-01-16 11:16:45 -080071 /* Enable Energy Perf Bias Access, Dynamic switching and lock MSR */
72 msr = rdmsr(MSR_POWER_CTL);
73 msr.lo |= (ENERGY_PERF_BIAS_ACCESS_ENABLE | PWR_PERF_TUNING_DYN_SWITCHING_ENABLE
74 | PROCHOT_LOCK_ENABLE);
75 wrmsr(MSR_POWER_CTL, msr);
76
77 /* Set P-State ratio */
78 msr = rdmsr(MSR_IA32_PERF_CTRL);
79 msr.lo &= ~PSTATE_REQ_MASK;
80 msr.lo |= (chip_config->pstate_req_ratio << PSTATE_REQ_SHIFT);
81 wrmsr(MSR_IA32_PERF_CTRL, msr);
82
83 /*
84 * Set HWP base feature, EPP reg enumeration, lock thermal and msr
85 * TODO: Set LOCK_MISC_PWR_MGMT_MSR, Unexpected Exception if you
86 * lock & issue wrmsr on every thread
87 * This is package level MSR. Need to check if it updates correctly on
88 * multi-socket platform.
89 */
90 msr = rdmsr(MSR_MISC_PWR_MGMT);
91 if (!(msr.lo & LOCK_MISC_PWR_MGMT_MSR)) { /* if already locked skip update */
92 msr.lo = (HWP_ENUM_ENABLE | HWP_EPP_ENUM_ENABLE | LOCK_MISC_PWR_MGMT_MSR |
93 LOCK_THERM_INT);
94 wrmsr(MSR_MISC_PWR_MGMT, msr);
95 }
96
97 /* TODO MSR_VR_MISC_CONFIG */
98
99 /* Set current limit lock */
100 msr = rdmsr(MSR_VR_CURRENT_CONFIG);
101 msr.lo |= CURRENT_LIMIT_LOCK;
102 wrmsr(MSR_VR_CURRENT_CONFIG, msr);
103
104 /* Set Turbo Ratio Limits */
105 msr.lo = chip_config->turbo_ratio_limit & 0xffffffff;
106 msr.hi = (chip_config->turbo_ratio_limit >> 32) & 0xffffffff;
107 wrmsr(MSR_TURBO_RATIO_LIMIT, msr);
108
109 /* Set Turbo Ratio Limit Cores */
110 msr.lo = chip_config->turbo_ratio_limit_cores & 0xffffffff;
111 msr.hi = (chip_config->turbo_ratio_limit_cores >> 32) & 0xffffffff;
112 wrmsr(MSR_TURBO_RATIO_LIMIT_CORES, msr);
113
114 /* set Turbo Activation ratio */
115 msr.hi = 0;
116 msr = rdmsr(MSR_TURBO_ACTIVATION_RATIO);
117 msr.lo |= MAX_NON_TURBO_RATIO;
118 wrmsr(MSR_TURBO_ACTIVATION_RATIO, msr);
119
120 /* Enable Fast Strings */
121 msr = rdmsr(IA32_MISC_ENABLE);
122 msr.lo |= FAST_STRINGS_ENABLE_BIT;
123 wrmsr(IA32_MISC_ENABLE, msr);
124
125 /* Set energy policy */
126 msr_t msr1 = rdmsr(MSR_ENERGY_PERF_BIAS_CONFIG);
127 msr.lo = (msr1.lo & EPB_ENERGY_POLICY_MASK) >> EPB_ENERGY_POLICY_SHIFT;
128 msr.hi = 0;
129 wrmsr(MSR_IA32_ENERGY_PERF_BIAS, msr);
130
131 /* Enable Turbo */
132 enable_turbo();
133
134 /* Enable speed step. */
135 if (get_turbo_state() == TURBO_ENABLED) {
136 msr = rdmsr(IA32_MISC_ENABLE);
137 msr.lo |= SPEED_STEP_ENABLE_BIT;
138 wrmsr(IA32_MISC_ENABLE, msr);
139 }
140
141 /* Clear out pending MCEs */
142 xeon_configure_mca();
143}
144
145static struct device_operations cpu_dev_ops = {
146 .init = xeon_sp_core_init,
147};
148
149static const struct cpu_device_id cpu_table[] = {
150 /* Skylake-SP A0/A1 CPUID 0x506f0*/
Felix Held6a6ac1e2023-02-06 15:19:11 +0100151 {X86_VENDOR_INTEL, CPUID_SKYLAKE_SP_A0_A1, CPUID_EXACT_MATCH_MASK },
Jonathan Zhang8f895492020-01-16 11:16:45 -0800152 /* Skylake-SP B0 CPUID 0x506f1*/
Felix Held6a6ac1e2023-02-06 15:19:11 +0100153 {X86_VENDOR_INTEL, CPUID_SKYLAKE_SP_B0, CPUID_EXACT_MATCH_MASK },
Jonathan Zhang8f895492020-01-16 11:16:45 -0800154 /* Skylake-SP 4 CPUID 0x50654*/
Felix Held6a6ac1e2023-02-06 15:19:11 +0100155 {X86_VENDOR_INTEL, CPUID_SKYLAKE_SP_4, CPUID_EXACT_MATCH_MASK },
Felix Held1e781652023-02-08 11:39:16 +0100156 CPU_TABLE_END
Jonathan Zhang8f895492020-01-16 11:16:45 -0800157};
158
159static const struct cpu_driver driver __cpu_driver = {
160 .ops = &cpu_dev_ops,
161 .id_table = cpu_table,
162};
163
164static void set_max_turbo_freq(void)
165{
166 msr_t msr, perf_ctl;
167
168 FUNC_ENTER();
169 perf_ctl.hi = 0;
170
171 /* Check for configurable TDP option */
172 if (get_turbo_state() == TURBO_ENABLED) {
173 msr = rdmsr(MSR_TURBO_RATIO_LIMIT);
174 perf_ctl.lo = (msr.lo & 0xff) << 8;
175 } else if (cpu_config_tdp_levels()) {
176 /* Set to nominal TDP ratio */
177 msr = rdmsr(MSR_CONFIG_TDP_NOMINAL);
178 perf_ctl.lo = (msr.lo & 0xff) << 8;
179 } else {
180 /* Platform Info bits 15:8 give max ratio */
181 msr = rdmsr(MSR_PLATFORM_INFO);
182 perf_ctl.lo = msr.lo & 0xff00;
183 }
184 wrmsr(IA32_PERF_CTL, perf_ctl);
185
186 printk(BIOS_DEBUG, "cpu: frequency set to %d\n",
187 ((perf_ctl.lo >> 8) & 0xff) * CPU_BCLK);
188 FUNC_EXIT();
189}
190
191/*
192 * Do essential initialization tasks before APs can be fired up
193 *
194 * Prevent race condition in MTRR solution. Enable MTRRs on the BSP. This
195 * creates the MTRR solution that the APs will use. Otherwise APs will try to
196 * apply the incomplete solution as the BSP is calculating it.
197 */
198static void pre_mp_init(void)
199{
200 printk(BIOS_DEBUG, "%s: entry\n", __func__);
201
Arthur Heymans45a6ae32020-12-16 21:55:17 +0100202 x86_setup_mtrrs_with_detect();
203 x86_mtrr_check();
Jonathan Zhang8f895492020-01-16 11:16:45 -0800204}
205
206static void post_mp_init(void)
207{
208 /* Set Max Ratio */
209 set_max_turbo_freq();
210
Rocky Phagura17a798b2020-10-08 13:32:41 -0700211 if (CONFIG(HAVE_SMI_HANDLER))
212 global_smi_enable();
Jonathan Zhang8f895492020-01-16 11:16:45 -0800213}
214
215/*
216 * CPU initialization recipe
217 *
218 * Note that no microcode update is passed to the init function. CSE updates
219 * the microcode on all cores before releasing them from reset. That means that
220 * the BSP and all APs will come up with the same microcode revision.
221 */
222static const struct mp_ops mp_ops = {
223 .pre_mp_init = pre_mp_init,
224 .get_cpu_count = get_platform_thread_count,
Rocky Phagura17a798b2020-10-08 13:32:41 -0700225 .get_smm_info = get_smm_info,
Arthur Heymans2cba9e42021-02-15 14:16:34 +0100226 .pre_mp_smm_init = smm_southbridge_clear_state,
Rocky Phagura17a798b2020-10-08 13:32:41 -0700227 .relocation_handler = smm_relocation_handler,
Jonathan Zhang8f895492020-01-16 11:16:45 -0800228 .post_mp_init = post_mp_init,
229};
230
Jonathan Zhang8f895492020-01-16 11:16:45 -0800231void xeon_sp_init_cpus(struct device *dev)
232{
233 FUNC_ENTER();
234
235 /*
236 * This gets used in cpu device callback. Other than cpu 0,
237 * rest of the CPU devices do not have
238 * chip_info updated. Global chip_config is used as workaround
239 */
240 chip_config = dev->chip_info;
241
242 config_reset_cpl3_csrs();
243
244 /* calls src/cpu/x86/mp_init.c */
Felix Held4dd7d112021-10-20 23:31:43 +0200245 /* TODO: Handle mp_init_with_smm failure? */
246 mp_init_with_smm(dev->link_list, &mp_ops);
Jonathan Zhang8f895492020-01-16 11:16:45 -0800247
248 /* update numa domain for all cpu devices */
249 xeonsp_init_cpu_config();
250
251 FUNC_EXIT();
252}