Patrick Georgi | ac95903 | 2020-05-05 22:49:26 +0200 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0-or-later */ |
Jonathan Zhang | 8f89549 | 2020-01-16 11:16:45 -0800 | [diff] [blame] | 2 | |
| 3 | #include <console/console.h> |
Marc Jones | 8b522db | 2020-10-12 11:58:46 -0600 | [diff] [blame] | 4 | #include <console/debug.h> |
Jonathan Zhang | 8f89549 | 2020-01-16 11:16:45 -0800 | [diff] [blame] | 5 | #include <intelblocks/cpulib.h> |
| 6 | #include <cpu/cpu.h> |
| 7 | #include <cpu/x86/mtrr.h> |
| 8 | #include <cpu/x86/mp.h> |
| 9 | #include <cpu/intel/turbo.h> |
| 10 | #include <soc/msr.h> |
| 11 | #include <soc/cpu.h> |
| 12 | #include <soc/soc_util.h> |
Rocky Phagura | 17a798b | 2020-10-08 13:32:41 -0700 | [diff] [blame] | 13 | #include <soc/smmrelocate.h> |
Angel Pons | 9190345 | 2020-10-22 23:06:04 +0200 | [diff] [blame] | 14 | #include <soc/util.h> |
Jonathan Zhang | 8f89549 | 2020-01-16 11:16:45 -0800 | [diff] [blame] | 15 | #include <assert.h> |
| 16 | #include "chip.h" |
Rocky Phagura | 17a798b | 2020-10-08 13:32:41 -0700 | [diff] [blame] | 17 | #include <cpu/intel/smm_reloc.h> |
| 18 | #include <cpu/intel/em64t101_save_state.h> |
Felix Held | d27ef5b | 2021-10-20 20:18:12 +0200 | [diff] [blame] | 19 | #include <types.h> |
Jonathan Zhang | 8f89549 | 2020-01-16 11:16:45 -0800 | [diff] [blame] | 20 | |
| 21 | static const config_t *chip_config = NULL; |
| 22 | |
Subrata Banik | 56ab8e2 | 2022-01-07 13:40:19 +0000 | [diff] [blame] | 23 | bool cpu_soc_is_in_untrusted_mode(void) |
| 24 | { |
| 25 | /* IA_UNTRUSTED_MODE is not supported in Skylake */ |
| 26 | return false; |
| 27 | } |
| 28 | |
Subrata Banik | 37a55d1 | 2022-05-30 18:11:12 +0000 | [diff] [blame] | 29 | void cpu_soc_bios_done(void) |
| 30 | { |
| 31 | /* IA_UNTRUSTED_MODE is not supported in Skylake */ |
| 32 | } |
| 33 | |
Jonathan Zhang | 8f89549 | 2020-01-16 11:16:45 -0800 | [diff] [blame] | 34 | static void xeon_configure_mca(void) |
| 35 | { |
| 36 | msr_t msr; |
| 37 | struct cpuid_result cpuid_regs; |
| 38 | |
| 39 | /* Check feature flag in CPUID.(EAX=1):EDX[7]==1 MCE |
| 40 | * and CPUID.(EAX=1):EDX[14]==1 MCA*/ |
| 41 | cpuid_regs = cpuid(1); |
| 42 | if ((cpuid_regs.edx & (1<<7 | 1<<14)) != (1<<7 | 1<<14)) |
| 43 | return; |
| 44 | |
| 45 | msr = rdmsr(IA32_MCG_CAP); |
| 46 | if (msr.lo & IA32_MCG_CAP_CTL_P_MASK) { |
| 47 | /* Enable all error logging */ |
| 48 | msr.lo = msr.hi = 0xffffffff; |
| 49 | wrmsr(IA32_MCG_CTL, msr); |
| 50 | } |
| 51 | |
| 52 | /* TODO(adurbin): This should only be done on a cold boot. Also, some |
| 53 | of these banks are core vs package scope. For now every CPU clears |
| 54 | every bank. */ |
| 55 | mca_configure(); |
| 56 | } |
| 57 | |
| 58 | static void xeon_sp_core_init(struct device *cpu) |
| 59 | { |
| 60 | msr_t msr; |
| 61 | |
Arthur Heymans | cc22607 | 2022-11-12 18:51:04 +0100 | [diff] [blame] | 62 | printk(BIOS_INFO, "%s dev: %s, cpu: %lu, apic_id: 0x%x\n", |
Jonathan Zhang | 8f89549 | 2020-01-16 11:16:45 -0800 | [diff] [blame] | 63 | __func__, dev_path(cpu), cpu_index(), cpu->path.apic.apic_id); |
Elyes Haouas | f1ba7d6 | 2022-09-13 10:03:44 +0200 | [diff] [blame] | 64 | assert(chip_config); |
Jonathan Zhang | 8f89549 | 2020-01-16 11:16:45 -0800 | [diff] [blame] | 65 | |
| 66 | /* set MSR_PKG_CST_CONFIG_CONTROL - scope per core*/ |
| 67 | msr.hi = 0; |
Michael Niewöhner | 2353cd9 | 2021-10-04 16:59:49 +0200 | [diff] [blame] | 68 | msr.lo = (PKG_CSTATE_NO_LIMIT | CFG_LOCK_ENABLE); |
Jonathan Zhang | 8f89549 | 2020-01-16 11:16:45 -0800 | [diff] [blame] | 69 | wrmsr(MSR_PKG_CST_CONFIG_CONTROL, msr); |
| 70 | |
Jonathan Zhang | 8f89549 | 2020-01-16 11:16:45 -0800 | [diff] [blame] | 71 | /* Enable Energy Perf Bias Access, Dynamic switching and lock MSR */ |
| 72 | msr = rdmsr(MSR_POWER_CTL); |
| 73 | msr.lo |= (ENERGY_PERF_BIAS_ACCESS_ENABLE | PWR_PERF_TUNING_DYN_SWITCHING_ENABLE |
| 74 | | PROCHOT_LOCK_ENABLE); |
| 75 | wrmsr(MSR_POWER_CTL, msr); |
| 76 | |
| 77 | /* Set P-State ratio */ |
| 78 | msr = rdmsr(MSR_IA32_PERF_CTRL); |
| 79 | msr.lo &= ~PSTATE_REQ_MASK; |
| 80 | msr.lo |= (chip_config->pstate_req_ratio << PSTATE_REQ_SHIFT); |
| 81 | wrmsr(MSR_IA32_PERF_CTRL, msr); |
| 82 | |
| 83 | /* |
| 84 | * Set HWP base feature, EPP reg enumeration, lock thermal and msr |
| 85 | * TODO: Set LOCK_MISC_PWR_MGMT_MSR, Unexpected Exception if you |
| 86 | * lock & issue wrmsr on every thread |
| 87 | * This is package level MSR. Need to check if it updates correctly on |
| 88 | * multi-socket platform. |
| 89 | */ |
| 90 | msr = rdmsr(MSR_MISC_PWR_MGMT); |
| 91 | if (!(msr.lo & LOCK_MISC_PWR_MGMT_MSR)) { /* if already locked skip update */ |
| 92 | msr.lo = (HWP_ENUM_ENABLE | HWP_EPP_ENUM_ENABLE | LOCK_MISC_PWR_MGMT_MSR | |
| 93 | LOCK_THERM_INT); |
| 94 | wrmsr(MSR_MISC_PWR_MGMT, msr); |
| 95 | } |
| 96 | |
| 97 | /* TODO MSR_VR_MISC_CONFIG */ |
| 98 | |
| 99 | /* Set current limit lock */ |
| 100 | msr = rdmsr(MSR_VR_CURRENT_CONFIG); |
| 101 | msr.lo |= CURRENT_LIMIT_LOCK; |
| 102 | wrmsr(MSR_VR_CURRENT_CONFIG, msr); |
| 103 | |
| 104 | /* Set Turbo Ratio Limits */ |
| 105 | msr.lo = chip_config->turbo_ratio_limit & 0xffffffff; |
| 106 | msr.hi = (chip_config->turbo_ratio_limit >> 32) & 0xffffffff; |
| 107 | wrmsr(MSR_TURBO_RATIO_LIMIT, msr); |
| 108 | |
| 109 | /* Set Turbo Ratio Limit Cores */ |
| 110 | msr.lo = chip_config->turbo_ratio_limit_cores & 0xffffffff; |
| 111 | msr.hi = (chip_config->turbo_ratio_limit_cores >> 32) & 0xffffffff; |
| 112 | wrmsr(MSR_TURBO_RATIO_LIMIT_CORES, msr); |
| 113 | |
| 114 | /* set Turbo Activation ratio */ |
| 115 | msr.hi = 0; |
| 116 | msr = rdmsr(MSR_TURBO_ACTIVATION_RATIO); |
| 117 | msr.lo |= MAX_NON_TURBO_RATIO; |
| 118 | wrmsr(MSR_TURBO_ACTIVATION_RATIO, msr); |
| 119 | |
| 120 | /* Enable Fast Strings */ |
| 121 | msr = rdmsr(IA32_MISC_ENABLE); |
| 122 | msr.lo |= FAST_STRINGS_ENABLE_BIT; |
| 123 | wrmsr(IA32_MISC_ENABLE, msr); |
| 124 | |
| 125 | /* Set energy policy */ |
| 126 | msr_t msr1 = rdmsr(MSR_ENERGY_PERF_BIAS_CONFIG); |
| 127 | msr.lo = (msr1.lo & EPB_ENERGY_POLICY_MASK) >> EPB_ENERGY_POLICY_SHIFT; |
| 128 | msr.hi = 0; |
| 129 | wrmsr(MSR_IA32_ENERGY_PERF_BIAS, msr); |
| 130 | |
| 131 | /* Enable Turbo */ |
| 132 | enable_turbo(); |
| 133 | |
| 134 | /* Enable speed step. */ |
| 135 | if (get_turbo_state() == TURBO_ENABLED) { |
| 136 | msr = rdmsr(IA32_MISC_ENABLE); |
| 137 | msr.lo |= SPEED_STEP_ENABLE_BIT; |
| 138 | wrmsr(IA32_MISC_ENABLE, msr); |
| 139 | } |
| 140 | |
| 141 | /* Clear out pending MCEs */ |
| 142 | xeon_configure_mca(); |
| 143 | } |
| 144 | |
| 145 | static struct device_operations cpu_dev_ops = { |
| 146 | .init = xeon_sp_core_init, |
| 147 | }; |
| 148 | |
| 149 | static const struct cpu_device_id cpu_table[] = { |
| 150 | /* Skylake-SP A0/A1 CPUID 0x506f0*/ |
Felix Held | 6a6ac1e | 2023-02-06 15:19:11 +0100 | [diff] [blame] | 151 | {X86_VENDOR_INTEL, CPUID_SKYLAKE_SP_A0_A1, CPUID_EXACT_MATCH_MASK }, |
Jonathan Zhang | 8f89549 | 2020-01-16 11:16:45 -0800 | [diff] [blame] | 152 | /* Skylake-SP B0 CPUID 0x506f1*/ |
Felix Held | 6a6ac1e | 2023-02-06 15:19:11 +0100 | [diff] [blame] | 153 | {X86_VENDOR_INTEL, CPUID_SKYLAKE_SP_B0, CPUID_EXACT_MATCH_MASK }, |
Jonathan Zhang | 8f89549 | 2020-01-16 11:16:45 -0800 | [diff] [blame] | 154 | /* Skylake-SP 4 CPUID 0x50654*/ |
Felix Held | 6a6ac1e | 2023-02-06 15:19:11 +0100 | [diff] [blame] | 155 | {X86_VENDOR_INTEL, CPUID_SKYLAKE_SP_4, CPUID_EXACT_MATCH_MASK }, |
Felix Held | 1e78165 | 2023-02-08 11:39:16 +0100 | [diff] [blame^] | 156 | CPU_TABLE_END |
Jonathan Zhang | 8f89549 | 2020-01-16 11:16:45 -0800 | [diff] [blame] | 157 | }; |
| 158 | |
| 159 | static const struct cpu_driver driver __cpu_driver = { |
| 160 | .ops = &cpu_dev_ops, |
| 161 | .id_table = cpu_table, |
| 162 | }; |
| 163 | |
| 164 | static void set_max_turbo_freq(void) |
| 165 | { |
| 166 | msr_t msr, perf_ctl; |
| 167 | |
| 168 | FUNC_ENTER(); |
| 169 | perf_ctl.hi = 0; |
| 170 | |
| 171 | /* Check for configurable TDP option */ |
| 172 | if (get_turbo_state() == TURBO_ENABLED) { |
| 173 | msr = rdmsr(MSR_TURBO_RATIO_LIMIT); |
| 174 | perf_ctl.lo = (msr.lo & 0xff) << 8; |
| 175 | } else if (cpu_config_tdp_levels()) { |
| 176 | /* Set to nominal TDP ratio */ |
| 177 | msr = rdmsr(MSR_CONFIG_TDP_NOMINAL); |
| 178 | perf_ctl.lo = (msr.lo & 0xff) << 8; |
| 179 | } else { |
| 180 | /* Platform Info bits 15:8 give max ratio */ |
| 181 | msr = rdmsr(MSR_PLATFORM_INFO); |
| 182 | perf_ctl.lo = msr.lo & 0xff00; |
| 183 | } |
| 184 | wrmsr(IA32_PERF_CTL, perf_ctl); |
| 185 | |
| 186 | printk(BIOS_DEBUG, "cpu: frequency set to %d\n", |
| 187 | ((perf_ctl.lo >> 8) & 0xff) * CPU_BCLK); |
| 188 | FUNC_EXIT(); |
| 189 | } |
| 190 | |
| 191 | /* |
| 192 | * Do essential initialization tasks before APs can be fired up |
| 193 | * |
| 194 | * Prevent race condition in MTRR solution. Enable MTRRs on the BSP. This |
| 195 | * creates the MTRR solution that the APs will use. Otherwise APs will try to |
| 196 | * apply the incomplete solution as the BSP is calculating it. |
| 197 | */ |
| 198 | static void pre_mp_init(void) |
| 199 | { |
| 200 | printk(BIOS_DEBUG, "%s: entry\n", __func__); |
| 201 | |
Arthur Heymans | 45a6ae3 | 2020-12-16 21:55:17 +0100 | [diff] [blame] | 202 | x86_setup_mtrrs_with_detect(); |
| 203 | x86_mtrr_check(); |
Jonathan Zhang | 8f89549 | 2020-01-16 11:16:45 -0800 | [diff] [blame] | 204 | } |
| 205 | |
| 206 | static void post_mp_init(void) |
| 207 | { |
| 208 | /* Set Max Ratio */ |
| 209 | set_max_turbo_freq(); |
| 210 | |
Rocky Phagura | 17a798b | 2020-10-08 13:32:41 -0700 | [diff] [blame] | 211 | if (CONFIG(HAVE_SMI_HANDLER)) |
| 212 | global_smi_enable(); |
Jonathan Zhang | 8f89549 | 2020-01-16 11:16:45 -0800 | [diff] [blame] | 213 | } |
| 214 | |
| 215 | /* |
| 216 | * CPU initialization recipe |
| 217 | * |
| 218 | * Note that no microcode update is passed to the init function. CSE updates |
| 219 | * the microcode on all cores before releasing them from reset. That means that |
| 220 | * the BSP and all APs will come up with the same microcode revision. |
| 221 | */ |
| 222 | static const struct mp_ops mp_ops = { |
| 223 | .pre_mp_init = pre_mp_init, |
| 224 | .get_cpu_count = get_platform_thread_count, |
Rocky Phagura | 17a798b | 2020-10-08 13:32:41 -0700 | [diff] [blame] | 225 | .get_smm_info = get_smm_info, |
Arthur Heymans | 2cba9e4 | 2021-02-15 14:16:34 +0100 | [diff] [blame] | 226 | .pre_mp_smm_init = smm_southbridge_clear_state, |
Rocky Phagura | 17a798b | 2020-10-08 13:32:41 -0700 | [diff] [blame] | 227 | .relocation_handler = smm_relocation_handler, |
Jonathan Zhang | 8f89549 | 2020-01-16 11:16:45 -0800 | [diff] [blame] | 228 | .post_mp_init = post_mp_init, |
| 229 | }; |
| 230 | |
Jonathan Zhang | 8f89549 | 2020-01-16 11:16:45 -0800 | [diff] [blame] | 231 | void xeon_sp_init_cpus(struct device *dev) |
| 232 | { |
| 233 | FUNC_ENTER(); |
| 234 | |
| 235 | /* |
| 236 | * This gets used in cpu device callback. Other than cpu 0, |
| 237 | * rest of the CPU devices do not have |
| 238 | * chip_info updated. Global chip_config is used as workaround |
| 239 | */ |
| 240 | chip_config = dev->chip_info; |
| 241 | |
| 242 | config_reset_cpl3_csrs(); |
| 243 | |
| 244 | /* calls src/cpu/x86/mp_init.c */ |
Felix Held | 4dd7d11 | 2021-10-20 23:31:43 +0200 | [diff] [blame] | 245 | /* TODO: Handle mp_init_with_smm failure? */ |
| 246 | mp_init_with_smm(dev->link_list, &mp_ops); |
Jonathan Zhang | 8f89549 | 2020-01-16 11:16:45 -0800 | [diff] [blame] | 247 | |
| 248 | /* update numa domain for all cpu devices */ |
| 249 | xeonsp_init_cpu_config(); |
| 250 | |
| 251 | FUNC_EXIT(); |
| 252 | } |