Angel Pons | 3bd1e3d | 2020-04-05 15:47:17 +0200 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
Lee Leahy | b000513 | 2015-05-12 18:19:47 -0700 | [diff] [blame] | 2 | |
| 3 | #include <console/console.h> |
| 4 | #include <device/device.h> |
| 5 | #include <device/pci.h> |
Lee Leahy | b000513 | 2015-05-12 18:19:47 -0700 | [diff] [blame] | 6 | #include <cpu/x86/mtrr.h> |
| 7 | #include <cpu/x86/msr.h> |
| 8 | #include <cpu/x86/lapic.h> |
| 9 | #include <cpu/x86/mp.h> |
Nico Huber | 6275e34 | 2018-11-21 00:11:35 +0100 | [diff] [blame] | 10 | #include <cpu/intel/common/common.h> |
Lee Leahy | b000513 | 2015-05-12 18:19:47 -0700 | [diff] [blame] | 11 | #include <cpu/intel/microcode.h> |
| 12 | #include <cpu/intel/speedstep.h> |
| 13 | #include <cpu/intel/turbo.h> |
Lee Leahy | b000513 | 2015-05-12 18:19:47 -0700 | [diff] [blame] | 14 | #include <cpu/x86/name.h> |
Kyösti Mälkki | faf20d3 | 2019-08-14 05:41:41 +0300 | [diff] [blame] | 15 | #include <cpu/intel/smm_reloc.h> |
Barnali Sarkar | 0a203d1 | 2017-05-04 18:02:17 +0530 | [diff] [blame] | 16 | #include <intelblocks/cpulib.h> |
Aaron Durbin | 93d5f40 | 2017-06-08 11:00:23 -0500 | [diff] [blame] | 17 | #include <intelblocks/fast_spi.h> |
Barnali Sarkar | 7327386 | 2017-06-13 20:22:33 +0530 | [diff] [blame] | 18 | #include <intelblocks/mp_init.h> |
Pratik Prajapati | a04aa3d | 2017-06-12 23:02:36 -0700 | [diff] [blame] | 19 | #include <intelblocks/sgx.h> |
Lee Leahy | b000513 | 2015-05-12 18:19:47 -0700 | [diff] [blame] | 20 | #include <soc/cpu.h> |
| 21 | #include <soc/msr.h> |
| 22 | #include <soc/pci_devs.h> |
| 23 | #include <soc/ramstage.h> |
Lee Leahy | b000513 | 2015-05-12 18:19:47 -0700 | [diff] [blame] | 24 | #include <soc/systemagent.h> |
Lee Leahy | b000513 | 2015-05-12 18:19:47 -0700 | [diff] [blame] | 25 | |
Elyes HAOUAS | c338507 | 2019-03-21 15:38:06 +0100 | [diff] [blame] | 26 | #include "chip.h" |
| 27 | |
Lee Leahy | b000513 | 2015-05-12 18:19:47 -0700 | [diff] [blame] | 28 | static void configure_misc(void) |
| 29 | { |
Kyösti Mälkki | d5f645c | 2019-09-28 00:20:27 +0300 | [diff] [blame] | 30 | config_t *conf = config_of_soc(); |
Lee Leahy | b000513 | 2015-05-12 18:19:47 -0700 | [diff] [blame] | 31 | msr_t msr; |
| 32 | |
| 33 | msr = rdmsr(IA32_MISC_ENABLE); |
Lee Leahy | 1d14b3e | 2015-05-12 18:23:27 -0700 | [diff] [blame] | 34 | msr.lo |= (1 << 0); /* Fast String enable */ |
| 35 | msr.lo |= (1 << 3); /* TM1/TM2/EMTTM enable */ |
Lee Leahy | b000513 | 2015-05-12 18:19:47 -0700 | [diff] [blame] | 36 | wrmsr(IA32_MISC_ENABLE, msr); |
| 37 | |
Matt Delco | 54e9894 | 2020-03-09 12:41:09 -0700 | [diff] [blame] | 38 | /* Set EIST status */ |
| 39 | cpu_set_eist(conf->eist_enable); |
| 40 | |
Lee Leahy | b000513 | 2015-05-12 18:19:47 -0700 | [diff] [blame] | 41 | /* Disable Thermal interrupts */ |
| 42 | msr.lo = 0; |
| 43 | msr.hi = 0; |
| 44 | wrmsr(IA32_THERM_INTERRUPT, msr); |
| 45 | |
| 46 | /* Enable package critical interrupt only */ |
| 47 | msr.lo = 1 << 4; |
| 48 | msr.hi = 0; |
| 49 | wrmsr(IA32_PACKAGE_THERM_INTERRUPT, msr); |
Pratik Prajapati | 79cfcde | 2016-03-08 12:34:06 -0800 | [diff] [blame] | 50 | |
Pratik Prajapati | 79cfcde | 2016-03-08 12:34:06 -0800 | [diff] [blame] | 51 | msr = rdmsr(MSR_POWER_CTL); |
| 52 | msr.lo |= (1 << 0); /* Enable Bi-directional PROCHOT as an input*/ |
Matthew Garrett | 13e7a2f | 2019-07-19 17:02:07 -0700 | [diff] [blame] | 53 | msr.lo |= (1 << 18); /* Enable Energy/Performance Bias control */ |
Cole Nelson | 63b6fea | 2018-06-15 15:51:54 -0700 | [diff] [blame] | 54 | msr.lo &= ~POWER_CTL_C1E_MASK; /* Disable C1E */ |
Pratik Prajapati | 79cfcde | 2016-03-08 12:34:06 -0800 | [diff] [blame] | 55 | msr.lo |= (1 << 23); /* Lock it */ |
| 56 | wrmsr(MSR_POWER_CTL, msr); |
Lee Leahy | b000513 | 2015-05-12 18:19:47 -0700 | [diff] [blame] | 57 | } |
| 58 | |
Subrata Banik | 481b364 | 2017-05-12 11:29:43 +0530 | [diff] [blame] | 59 | static void configure_c_states(void) |
| 60 | { |
| 61 | msr_t msr; |
| 62 | |
| 63 | /* C-state Interrupt Response Latency Control 0 - package C3 latency */ |
| 64 | msr.hi = 0; |
| 65 | msr.lo = IRTL_VALID | IRTL_1024_NS | C_STATE_LATENCY_CONTROL_0_LIMIT; |
| 66 | wrmsr(MSR_C_STATE_LATENCY_CONTROL_0, msr); |
| 67 | |
| 68 | /* C-state Interrupt Response Latency Control 1 - package C6/C7 short */ |
| 69 | msr.hi = 0; |
| 70 | msr.lo = IRTL_VALID | IRTL_1024_NS | C_STATE_LATENCY_CONTROL_1_LIMIT; |
| 71 | wrmsr(MSR_C_STATE_LATENCY_CONTROL_1, msr); |
| 72 | |
| 73 | /* C-state Interrupt Response Latency Control 2 - package C6/C7 long */ |
| 74 | msr.hi = 0; |
| 75 | msr.lo = IRTL_VALID | IRTL_1024_NS | C_STATE_LATENCY_CONTROL_2_LIMIT; |
| 76 | wrmsr(MSR_C_STATE_LATENCY_CONTROL_2, msr); |
| 77 | |
| 78 | /* C-state Interrupt Response Latency Control 3 - package C8 */ |
| 79 | msr.hi = 0; |
| 80 | msr.lo = IRTL_VALID | IRTL_1024_NS | |
| 81 | C_STATE_LATENCY_CONTROL_3_LIMIT; |
| 82 | wrmsr(MSR_C_STATE_LATENCY_CONTROL_3, msr); |
| 83 | |
| 84 | /* C-state Interrupt Response Latency Control 4 - package C9 */ |
| 85 | msr.hi = 0; |
| 86 | msr.lo = IRTL_VALID | IRTL_1024_NS | |
| 87 | C_STATE_LATENCY_CONTROL_4_LIMIT; |
| 88 | wrmsr(MSR_C_STATE_LATENCY_CONTROL_4, msr); |
| 89 | |
| 90 | /* C-state Interrupt Response Latency Control 5 - package C10 */ |
| 91 | msr.hi = 0; |
| 92 | msr.lo = IRTL_VALID | IRTL_1024_NS | |
| 93 | C_STATE_LATENCY_CONTROL_5_LIMIT; |
| 94 | wrmsr(MSR_C_STATE_LATENCY_CONTROL_5, msr); |
| 95 | } |
| 96 | |
Lee Leahy | b000513 | 2015-05-12 18:19:47 -0700 | [diff] [blame] | 97 | /* All CPUs including BSP will run the following function. */ |
Elyes HAOUAS | 143fb46 | 2018-05-25 12:56:45 +0200 | [diff] [blame] | 98 | void soc_core_init(struct device *cpu) |
Lee Leahy | b000513 | 2015-05-12 18:19:47 -0700 | [diff] [blame] | 99 | { |
| 100 | /* Clear out pending MCEs */ |
Pratik Prajapati | e816315 | 2017-08-28 12:27:57 -0700 | [diff] [blame] | 101 | /* TODO(adurbin): This should only be done on a cold boot. Also, some |
| 102 | * of these banks are core vs package scope. For now every CPU clears |
| 103 | * every bank. */ |
Subrata Banik | f91344c | 2019-05-06 19:23:26 +0530 | [diff] [blame] | 104 | mca_configure(); |
Lee Leahy | b000513 | 2015-05-12 18:19:47 -0700 | [diff] [blame] | 105 | |
Elyes HAOUAS | 038e724 | 2016-07-29 18:31:16 +0200 | [diff] [blame] | 106 | /* Enable the local CPU apics */ |
Lee Leahy | b000513 | 2015-05-12 18:19:47 -0700 | [diff] [blame] | 107 | enable_lapic_tpr(); |
| 108 | setup_lapic(); |
| 109 | |
Subrata Banik | 481b364 | 2017-05-12 11:29:43 +0530 | [diff] [blame] | 110 | /* Configure c-state interrupt response time */ |
| 111 | configure_c_states(); |
| 112 | |
Lee Leahy | b000513 | 2015-05-12 18:19:47 -0700 | [diff] [blame] | 113 | /* Configure Enhanced SpeedStep and Thermal Sensors */ |
| 114 | configure_misc(); |
| 115 | |
Michael Niewöhner | 6303243 | 2020-10-11 17:34:54 +0200 | [diff] [blame] | 116 | set_aesni_lock(); |
Michael Niewöhner | 7bdedcd | 2019-09-01 16:49:09 +0200 | [diff] [blame] | 117 | |
Subrata Banik | f004f66 | 2017-02-03 19:05:27 +0530 | [diff] [blame] | 118 | /* Enable ACPI Timer Emulation via MSR 0x121 */ |
| 119 | enable_pm_timer_emulation(); |
| 120 | |
Lee Leahy | b000513 | 2015-05-12 18:19:47 -0700 | [diff] [blame] | 121 | /* Enable Direct Cache Access */ |
| 122 | configure_dca_cap(); |
| 123 | |
| 124 | /* Set energy policy */ |
| 125 | set_energy_perf_bias(ENERGY_POLICY_NORMAL); |
| 126 | |
| 127 | /* Enable Turbo */ |
| 128 | enable_turbo(); |
Robbie Zhang | 7de0317 | 2017-02-21 14:00:31 -0800 | [diff] [blame] | 129 | |
Pratik Prajapati | 7a357eb | 2017-08-14 12:18:38 -0700 | [diff] [blame] | 130 | /* Configure Core PRMRR for SGX. */ |
Michael Niewöhner | 7736bfc | 2019-10-22 23:05:06 +0200 | [diff] [blame] | 131 | if (CONFIG(SOC_INTEL_COMMON_BLOCK_SGX_ENABLE)) |
Michael Niewöhner | 6e66d7b | 2019-10-08 12:00:24 +0200 | [diff] [blame] | 132 | prmrr_core_configure(); |
Aaron Durbin | 5822582 | 2016-05-03 17:45:59 -0500 | [diff] [blame] | 133 | } |
Lee Leahy | b000513 | 2015-05-12 18:19:47 -0700 | [diff] [blame] | 134 | |
Aaron Durbin | 5822582 | 2016-05-03 17:45:59 -0500 | [diff] [blame] | 135 | static void per_cpu_smm_trigger(void) |
| 136 | { |
| 137 | /* Relocate the SMM handler. */ |
| 138 | smm_relocate(); |
Aaron Durbin | 5822582 | 2016-05-03 17:45:59 -0500 | [diff] [blame] | 139 | } |
Lee Leahy | b000513 | 2015-05-12 18:19:47 -0700 | [diff] [blame] | 140 | |
Nico Huber | 6275e34 | 2018-11-21 00:11:35 +0100 | [diff] [blame] | 141 | static void vmx_configure(void *unused) |
| 142 | { |
| 143 | set_feature_ctrl_vmx(); |
| 144 | } |
| 145 | |
| 146 | static void fc_lock_configure(void *unused) |
| 147 | { |
| 148 | set_feature_ctrl_lock(); |
| 149 | } |
| 150 | |
Aaron Durbin | 5822582 | 2016-05-03 17:45:59 -0500 | [diff] [blame] | 151 | static void post_mp_init(void) |
| 152 | { |
Patrick Rudolph | be207b1 | 2019-07-26 14:22:09 +0200 | [diff] [blame] | 153 | int ret = 0; |
| 154 | |
Lee Leahy | b000513 | 2015-05-12 18:19:47 -0700 | [diff] [blame] | 155 | /* Set Max Ratio */ |
Barnali Sarkar | 0a203d1 | 2017-05-04 18:02:17 +0530 | [diff] [blame] | 156 | cpu_set_max_ratio(); |
Lee Leahy | b000513 | 2015-05-12 18:19:47 -0700 | [diff] [blame] | 157 | |
Aaron Durbin | 5822582 | 2016-05-03 17:45:59 -0500 | [diff] [blame] | 158 | /* |
| 159 | * Now that all APs have been relocated as well as the BSP let SMIs |
| 160 | * start flowing. |
| 161 | */ |
Kyösti Mälkki | 040c531 | 2020-05-31 20:03:11 +0300 | [diff] [blame] | 162 | global_smi_enable_no_pwrbtn(); |
Aaron Durbin | 5822582 | 2016-05-03 17:45:59 -0500 | [diff] [blame] | 163 | |
| 164 | /* Lock down the SMRAM space. */ |
Kyösti Mälkki | b490562 | 2019-07-12 08:02:35 +0300 | [diff] [blame] | 165 | if (CONFIG(HAVE_SMI_HANDLER)) |
| 166 | smm_lock(); |
Pratik Prajapati | 7a357eb | 2017-08-14 12:18:38 -0700 | [diff] [blame] | 167 | |
Patrick Rudolph | 5ec97ce | 2019-07-26 14:47:32 +0200 | [diff] [blame] | 168 | ret |= mp_run_on_all_cpus(vmx_configure, NULL); |
Matt DeVillier | 969ef10 | 2018-03-21 20:47:52 -0500 | [diff] [blame] | 169 | |
Michael Niewöhner | 7736bfc | 2019-10-22 23:05:06 +0200 | [diff] [blame] | 170 | if (CONFIG(SOC_INTEL_COMMON_BLOCK_SGX_ENABLE)) |
Michael Niewöhner | 6e66d7b | 2019-10-08 12:00:24 +0200 | [diff] [blame] | 171 | ret |= mp_run_on_all_cpus(sgx_configure, NULL); |
Nico Huber | 6275e34 | 2018-11-21 00:11:35 +0100 | [diff] [blame] | 172 | |
Patrick Rudolph | 5ec97ce | 2019-07-26 14:47:32 +0200 | [diff] [blame] | 173 | ret |= mp_run_on_all_cpus(fc_lock_configure, NULL); |
Patrick Rudolph | be207b1 | 2019-07-26 14:22:09 +0200 | [diff] [blame] | 174 | |
| 175 | if (ret) |
| 176 | printk(BIOS_CRIT, "CRITICAL ERROR: MP post init failed\n"); |
Aaron Durbin | 5822582 | 2016-05-03 17:45:59 -0500 | [diff] [blame] | 177 | } |
| 178 | |
| 179 | static const struct mp_ops mp_ops = { |
Subrata Banik | a4b11e5c | 2017-02-03 18:57:49 +0530 | [diff] [blame] | 180 | /* |
| 181 | * Skip Pre MP init MTRR programming as MTRRs are mirrored from BSP, |
| 182 | * that are set prior to ramstage. |
| 183 | * Real MTRRs programming are being done after resource allocation. |
| 184 | */ |
Furquan Shaikh | c248044 | 2017-02-20 13:41:56 -0800 | [diff] [blame] | 185 | .pre_mp_init = soc_fsp_load, |
Aaron Durbin | 5822582 | 2016-05-03 17:45:59 -0500 | [diff] [blame] | 186 | .get_cpu_count = get_cpu_count, |
| 187 | .get_smm_info = smm_info, |
| 188 | .get_microcode_info = get_microcode_info, |
Aaron Durbin | 5822582 | 2016-05-03 17:45:59 -0500 | [diff] [blame] | 189 | .pre_mp_smm_init = smm_initialize, |
| 190 | .per_cpu_smm_trigger = per_cpu_smm_trigger, |
| 191 | .relocation_handler = smm_relocation_handler, |
| 192 | .post_mp_init = post_mp_init, |
| 193 | }; |
| 194 | |
Pratik Prajapati | 9cd6a26 | 2017-08-14 13:57:46 -0700 | [diff] [blame] | 195 | void soc_init_cpus(struct bus *cpu_bus) |
Aaron Durbin | 5822582 | 2016-05-03 17:45:59 -0500 | [diff] [blame] | 196 | { |
Lee Leahy | f4c4ab9 | 2017-03-16 17:08:03 -0700 | [diff] [blame] | 197 | if (mp_init_with_smm(cpu_bus, &mp_ops)) |
Aaron Durbin | 5822582 | 2016-05-03 17:45:59 -0500 | [diff] [blame] | 198 | printk(BIOS_ERR, "MP initialization failure.\n"); |
Sumeet Pawnikar | 9d2f3de | 2016-12-22 13:48:46 +0530 | [diff] [blame] | 199 | |
| 200 | /* Thermal throttle activation offset */ |
Sumeet R Pawnikar | 360684b | 2020-06-18 15:56:11 +0530 | [diff] [blame] | 201 | configure_tcc_thermal_target(); |
Lee Leahy | b000513 | 2015-05-12 18:19:47 -0700 | [diff] [blame] | 202 | } |
Rizwan Qureshi | a7ff453 | 2015-07-23 22:40:53 +0530 | [diff] [blame] | 203 | |
| 204 | int soc_skip_ucode_update(u32 current_patch_id, u32 new_patch_id) |
| 205 | { |
Robbie Zhang | 7de0317 | 2017-02-21 14:00:31 -0800 | [diff] [blame] | 206 | msr_t msr1; |
| 207 | msr_t msr2; |
| 208 | |
| 209 | /* |
| 210 | * If PRMRR/SGX is supported the FIT microcode load will set the msr |
Rizwan Qureshi | a7ff453 | 2015-07-23 22:40:53 +0530 | [diff] [blame] | 211 | * 0x08b with the Patch revision id one less than the id in the |
| 212 | * microcode binary. The PRMRR support is indicated in the MSR |
Robbie Zhang | 7de0317 | 2017-02-21 14:00:31 -0800 | [diff] [blame] | 213 | * MTRRCAP[12]. If SGX is not enabled, check and avoid reloading the |
| 214 | * same microcode during CPU initialization. If SGX is enabled, as |
| 215 | * part of SGX BIOS initialization steps, the same microcode needs to |
| 216 | * be reloaded after the core PRMRR MSRs are programmed. |
Rizwan Qureshi | a7ff453 | 2015-07-23 22:40:53 +0530 | [diff] [blame] | 217 | */ |
Robbie Zhang | 7de0317 | 2017-02-21 14:00:31 -0800 | [diff] [blame] | 218 | msr1 = rdmsr(MTRR_CAP_MSR); |
Elyes HAOUAS | f212cf3 | 2018-12-18 10:24:55 +0100 | [diff] [blame] | 219 | msr2 = rdmsr(MSR_PRMRR_PHYS_BASE); |
Robbie Zhang | 7de0317 | 2017-02-21 14:00:31 -0800 | [diff] [blame] | 220 | if (msr2.lo && (current_patch_id == new_patch_id - 1)) |
| 221 | return 0; |
| 222 | else |
Kyösti Mälkki | eadd251 | 2020-06-11 09:52:45 +0300 | [diff] [blame] | 223 | return (msr1.lo & MTRR_CAP_PRMRR) && |
Robbie Zhang | 7de0317 | 2017-02-21 14:00:31 -0800 | [diff] [blame] | 224 | (current_patch_id == new_patch_id - 1); |
Rizwan Qureshi | a7ff453 | 2015-07-23 22:40:53 +0530 | [diff] [blame] | 225 | } |