Angel Pons | f23ae0b | 2020-04-02 23:48:12 +0200 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
Matt DeVillier | ed6fe2f | 2016-12-14 16:12:43 -0600 | [diff] [blame] | 2 | |
Furquan Shaikh | 76cedd2 | 2020-05-02 10:24:23 -0700 | [diff] [blame] | 3 | #include <acpi/acpigen.h> |
Matt DeVillier | ed6fe2f | 2016-12-14 16:12:43 -0600 | [diff] [blame] | 4 | #include <console/console.h> |
Elyes Haouas | ad65e8c | 2022-10-31 14:02:13 +0100 | [diff] [blame] | 5 | #include <cpu/cpu.h> |
Michael Niewöhner | 8b4a938 | 2020-10-11 13:00:27 +0200 | [diff] [blame] | 6 | #include <cpu/intel/msr.h> |
Cliff Huang | 6277077 | 2022-03-07 18:39:56 -0800 | [diff] [blame] | 7 | #include <cpu/intel/turbo.h> |
Elyes Haouas | deb5645 | 2022-10-07 10:06:25 +0200 | [diff] [blame] | 8 | #include <cpu/x86/msr.h> |
| 9 | #include <types.h> |
| 10 | |
Matt DeVillier | ed6fe2f | 2016-12-14 16:12:43 -0600 | [diff] [blame] | 11 | #include "common.h" |
| 12 | |
Cliff Huang | 6277077 | 2022-03-07 18:39:56 -0800 | [diff] [blame] | 13 | #define CPUID_6_ECX_EPB (1 << 3) |
| 14 | #define CPUID_6_ENGERY_PERF_PREF (1 << 10) |
| 15 | #define CPUID_6_HWP (1 << 7) |
Michael Niewöhner | 062b92e | 2020-10-20 14:27:09 +0200 | [diff] [blame] | 16 | |
Jeremy Compostella | a6a5b25 | 2023-09-07 10:08:35 -0700 | [diff] [blame] | 17 | /* Structured Extended Feature Flags */ |
| 18 | #define CPUID_EXT_FEATURE_TME_SUPPORTED (1 << 13) |
| 19 | |
Matt DeVillier | f9aed65 | 2018-12-15 15:57:33 -0600 | [diff] [blame] | 20 | void set_vmx_and_lock(void) |
| 21 | { |
| 22 | set_feature_ctrl_vmx(); |
| 23 | set_feature_ctrl_lock(); |
| 24 | } |
| 25 | |
Angel Pons | b998fd0 | 2022-01-31 17:38:06 +0100 | [diff] [blame] | 26 | void set_feature_ctrl_vmx_arg(bool enable) |
Matt DeVillier | ed6fe2f | 2016-12-14 16:12:43 -0600 | [diff] [blame] | 27 | { |
Matt DeVillier | ed6fe2f | 2016-12-14 16:12:43 -0600 | [diff] [blame] | 28 | msr_t msr; |
Subrata Banik | 53b08c3 | 2018-12-10 14:11:35 +0530 | [diff] [blame] | 29 | uint32_t feature_flag; |
Matt DeVillier | ed6fe2f | 2016-12-14 16:12:43 -0600 | [diff] [blame] | 30 | |
Subrata Banik | 53b08c3 | 2018-12-10 14:11:35 +0530 | [diff] [blame] | 31 | feature_flag = cpu_get_feature_flags_ecx(); |
Matt DeVillier | ed6fe2f | 2016-12-14 16:12:43 -0600 | [diff] [blame] | 32 | /* Check that the VMX is supported before reading or writing the MSR. */ |
Subrata Banik | 53b08c3 | 2018-12-10 14:11:35 +0530 | [diff] [blame] | 33 | if (!((feature_flag & CPUID_VMX) || (feature_flag & CPUID_SMX))) { |
Matt DeVillier | ed6fe2f | 2016-12-14 16:12:43 -0600 | [diff] [blame] | 34 | printk(BIOS_DEBUG, "CPU doesn't support VMX; exiting\n"); |
| 35 | return; |
| 36 | } |
| 37 | |
| 38 | msr = rdmsr(IA32_FEATURE_CONTROL); |
| 39 | |
| 40 | if (msr.lo & (1 << 0)) { |
Matt DeVillier | 54efaae | 2018-12-30 00:31:35 -0600 | [diff] [blame] | 41 | printk(BIOS_DEBUG, "IA32_FEATURE_CONTROL already locked; "); |
| 42 | printk(BIOS_DEBUG, "VMX status: %s\n", msr.lo & (1 << 2) ? |
| 43 | "enabled" : "disabled"); |
Matt DeVillier | f9aed65 | 2018-12-15 15:57:33 -0600 | [diff] [blame] | 44 | /* IA32_FEATURE_CONTROL locked. If we set it again we get an |
| 45 | * illegal instruction |
Matt DeVillier | ed6fe2f | 2016-12-14 16:12:43 -0600 | [diff] [blame] | 46 | */ |
| 47 | return; |
| 48 | } |
| 49 | |
| 50 | /* The IA32_FEATURE_CONTROL MSR may initialize with random values. |
| 51 | * It must be cleared regardless of VMX config setting. |
| 52 | */ |
| 53 | msr.hi = msr.lo = 0; |
| 54 | |
| 55 | if (enable) { |
| 56 | msr.lo |= (1 << 2); |
Patrick Rudolph | 980d704 | 2019-10-17 09:47:58 +0200 | [diff] [blame] | 57 | if (feature_flag & CPUID_SMX) { |
Matt DeVillier | ed6fe2f | 2016-12-14 16:12:43 -0600 | [diff] [blame] | 58 | msr.lo |= (1 << 1); |
Patrick Rudolph | 980d704 | 2019-10-17 09:47:58 +0200 | [diff] [blame] | 59 | if (CONFIG(INTEL_TXT)) { |
| 60 | /* Enable GetSec and all GetSec leaves */ |
| 61 | msr.lo |= (0xff << 8); |
| 62 | } |
| 63 | } |
Matt DeVillier | ed6fe2f | 2016-12-14 16:12:43 -0600 | [diff] [blame] | 64 | } |
| 65 | |
| 66 | wrmsr(IA32_FEATURE_CONTROL, msr); |
| 67 | |
Matt DeVillier | f9aed65 | 2018-12-15 15:57:33 -0600 | [diff] [blame] | 68 | printk(BIOS_DEBUG, "VMX status: %s\n", |
| 69 | enable ? "enabled" : "disabled"); |
| 70 | } |
Angel Pons | b998fd0 | 2022-01-31 17:38:06 +0100 | [diff] [blame] | 71 | |
| 72 | void set_feature_ctrl_vmx(void) |
| 73 | { |
| 74 | set_feature_ctrl_vmx_arg(CONFIG(ENABLE_VMX)); |
| 75 | } |
| 76 | |
Matt DeVillier | f9aed65 | 2018-12-15 15:57:33 -0600 | [diff] [blame] | 77 | void set_feature_ctrl_lock(void) |
| 78 | { |
| 79 | msr_t msr; |
Julius Werner | cd49cce | 2019-03-05 16:53:33 -0800 | [diff] [blame] | 80 | int lock = CONFIG(SET_IA32_FC_LOCK_BIT); |
Elyes HAOUAS | 909870a | 2019-01-07 20:39:14 +0100 | [diff] [blame] | 81 | uint32_t feature_flag = cpu_get_feature_flags_ecx(); |
| 82 | |
| 83 | /* Check if VMX is supported before reading or writing the MSR */ |
| 84 | if (!((feature_flag & CPUID_VMX) || (feature_flag & CPUID_SMX))) { |
| 85 | printk(BIOS_DEBUG, "Read IA32_FEATURE_CONTROL unsupported\n"); |
| 86 | return; |
| 87 | } |
Matt DeVillier | f9aed65 | 2018-12-15 15:57:33 -0600 | [diff] [blame] | 88 | |
| 89 | msr = rdmsr(IA32_FEATURE_CONTROL); |
| 90 | |
| 91 | if (msr.lo & (1 << 0)) { |
Matt DeVillier | 460c2c2 | 2019-02-20 12:25:44 -0600 | [diff] [blame] | 92 | printk(BIOS_DEBUG, "IA32_FEATURE_CONTROL already locked\n"); |
Matt DeVillier | f9aed65 | 2018-12-15 15:57:33 -0600 | [diff] [blame] | 93 | /* IA32_FEATURE_CONTROL locked. If we set it again we get an |
| 94 | * illegal instruction |
| 95 | */ |
| 96 | return; |
| 97 | } |
| 98 | |
Matt DeVillier | ed6fe2f | 2016-12-14 16:12:43 -0600 | [diff] [blame] | 99 | if (lock) { |
| 100 | /* Set lock bit */ |
| 101 | msr.lo |= (1 << 0); |
| 102 | wrmsr(IA32_FEATURE_CONTROL, msr); |
| 103 | } |
| 104 | |
Matt DeVillier | f9aed65 | 2018-12-15 15:57:33 -0600 | [diff] [blame] | 105 | printk(BIOS_DEBUG, "IA32_FEATURE_CONTROL status: %s\n", |
Matt DeVillier | ed6fe2f | 2016-12-14 16:12:43 -0600 | [diff] [blame] | 106 | lock ? "locked" : "unlocked"); |
| 107 | } |
Matt Delco | 9557a34 | 2018-08-13 13:49:02 -0700 | [diff] [blame] | 108 | |
| 109 | /* |
| 110 | * Init cppc_config in a way that's appropriate for Intel |
| 111 | * processors with Intel Enhanced Speed Step Technology. |
| 112 | * NOTE: version 2 is expected to be the typical use case. |
| 113 | * For now this function 'punts' on version 3 and just |
| 114 | * populates the additional fields with 'unsupported'. |
| 115 | */ |
| 116 | void cpu_init_cppc_config(struct cppc_config *config, u32 version) |
| 117 | { |
Matt Delco | 9557a34 | 2018-08-13 13:49:02 -0700 | [diff] [blame] | 118 | config->version = version; |
| 119 | |
Michael Niewöhner | 38107fa | 2021-10-05 22:22:21 +0200 | [diff] [blame] | 120 | config->entries[CPPC_HIGHEST_PERF] = CPPC_REG_MSR(IA32_HWP_CAPABILITIES, 0, 8); |
| 121 | config->entries[CPPC_NOMINAL_PERF] = CPPC_REG_MSR(MSR_PLATFORM_INFO, 8, 8); |
| 122 | config->entries[CPPC_LOWEST_NONL_PERF] = CPPC_REG_MSR(IA32_HWP_CAPABILITIES, 16, 8); |
| 123 | config->entries[CPPC_LOWEST_PERF] = CPPC_REG_MSR(IA32_HWP_CAPABILITIES, 24, 8); |
| 124 | config->entries[CPPC_GUARANTEED_PERF] = CPPC_REG_MSR(IA32_HWP_CAPABILITIES, 8, 8); |
| 125 | config->entries[CPPC_DESIRED_PERF] = CPPC_REG_MSR(IA32_HWP_REQUEST, 16, 8); |
| 126 | config->entries[CPPC_MIN_PERF] = CPPC_REG_MSR(IA32_HWP_REQUEST, 0, 8); |
| 127 | config->entries[CPPC_MAX_PERF] = CPPC_REG_MSR(IA32_HWP_REQUEST, 8, 8); |
| 128 | config->entries[CPPC_PERF_REDUCE_TOLERANCE] = CPPC_UNSUPPORTED; |
| 129 | config->entries[CPPC_TIME_WINDOW] = CPPC_UNSUPPORTED; |
| 130 | config->entries[CPPC_COUNTER_WRAP] = CPPC_UNSUPPORTED; |
| 131 | config->entries[CPPC_REF_PERF_COUNTER] = CPPC_REG_MSR(IA32_MPERF, 0, 64); |
| 132 | config->entries[CPPC_DELIVERED_PERF_COUNTER] = CPPC_REG_MSR(IA32_APERF, 0, 64); |
| 133 | config->entries[CPPC_PERF_LIMITED] = CPPC_REG_MSR(IA32_HWP_STATUS, 2, 1); |
| 134 | config->entries[CPPC_ENABLE] = CPPC_REG_MSR(IA32_PM_ENABLE, 0, 1); |
Matt Delco | 9557a34 | 2018-08-13 13:49:02 -0700 | [diff] [blame] | 135 | |
Michael Niewöhner | f72c7b1 | 2021-10-05 21:42:57 +0200 | [diff] [blame] | 136 | if (version < 2) |
| 137 | return; |
Matt Delco | 9557a34 | 2018-08-13 13:49:02 -0700 | [diff] [blame] | 138 | |
Michael Niewöhner | 38107fa | 2021-10-05 22:22:21 +0200 | [diff] [blame] | 139 | config->entries[CPPC_AUTO_SELECT] = CPPC_DWORD(1); |
| 140 | config->entries[CPPC_AUTO_ACTIVITY_WINDOW] = CPPC_REG_MSR(IA32_HWP_REQUEST, 32, 10); |
| 141 | config->entries[CPPC_PERF_PREF] = CPPC_REG_MSR(IA32_HWP_REQUEST, 24, 8); |
| 142 | config->entries[CPPC_REF_PERF] = CPPC_UNSUPPORTED; |
Matt Delco | 9557a34 | 2018-08-13 13:49:02 -0700 | [diff] [blame] | 143 | |
Michael Niewöhner | f72c7b1 | 2021-10-05 21:42:57 +0200 | [diff] [blame] | 144 | if (version < 3) |
| 145 | return; |
Matt Delco | 9557a34 | 2018-08-13 13:49:02 -0700 | [diff] [blame] | 146 | |
Michael Niewöhner | 38107fa | 2021-10-05 22:22:21 +0200 | [diff] [blame] | 147 | config->entries[CPPC_LOWEST_FREQ] = CPPC_UNSUPPORTED; |
| 148 | config->entries[CPPC_NOMINAL_FREQ] = CPPC_UNSUPPORTED; |
Matt Delco | 9557a34 | 2018-08-13 13:49:02 -0700 | [diff] [blame] | 149 | } |
Michael Niewöhner | 8b4a938 | 2020-10-11 13:00:27 +0200 | [diff] [blame] | 150 | |
Michael Niewöhner | 8b4a938 | 2020-10-11 13:00:27 +0200 | [diff] [blame] | 151 | void set_aesni_lock(void) |
| 152 | { |
| 153 | msr_t msr; |
| 154 | |
Michael Niewöhner | 2ffd219 | 2020-10-11 16:59:13 +0200 | [diff] [blame] | 155 | if (!CONFIG(SET_MSR_AESNI_LOCK_BIT)) |
| 156 | return; |
| 157 | |
Patrick Rudolph | 3e69c0a | 2020-10-21 19:00:04 +0200 | [diff] [blame] | 158 | if (!(cpu_get_feature_flags_ecx() & CPUID_AES)) |
Michael Niewöhner | 469a99b | 2020-10-11 16:15:04 +0200 | [diff] [blame] | 159 | return; |
| 160 | |
Michael Niewöhner | 8b4a938 | 2020-10-11 13:00:27 +0200 | [diff] [blame] | 161 | /* Only run once per core as specified in the MSR datasheet */ |
| 162 | if (intel_ht_sibling()) |
| 163 | return; |
| 164 | |
| 165 | msr = rdmsr(MSR_FEATURE_CONFIG); |
Michael Niewöhner | 13b9149 | 2020-10-11 15:56:21 +0200 | [diff] [blame] | 166 | if (msr.lo & AESNI_LOCK) |
| 167 | return; |
| 168 | |
| 169 | msr_set(MSR_FEATURE_CONFIG, AESNI_LOCK); |
Michael Niewöhner | 8b4a938 | 2020-10-11 13:00:27 +0200 | [diff] [blame] | 170 | } |
Michael Niewöhner | 10ae1cf | 2020-10-11 14:05:32 +0200 | [diff] [blame] | 171 | |
| 172 | void enable_lapic_tpr(void) |
| 173 | { |
Michael Niewöhner | 062b92e | 2020-10-20 14:27:09 +0200 | [diff] [blame] | 174 | msr_unset(MSR_PIC_MSG_CONTROL, TPR_UPDATES_DISABLE); |
Michael Niewöhner | 10ae1cf | 2020-10-11 14:05:32 +0200 | [diff] [blame] | 175 | } |
| 176 | |
| 177 | void configure_dca_cap(void) |
| 178 | { |
Michael Niewöhner | 062b92e | 2020-10-20 14:27:09 +0200 | [diff] [blame] | 179 | if (cpu_get_feature_flags_ecx() & CPUID_DCA) |
| 180 | msr_set(IA32_PLATFORM_DCA_CAP, DCA_TYPE0_EN); |
Michael Niewöhner | 10ae1cf | 2020-10-11 14:05:32 +0200 | [diff] [blame] | 181 | } |
| 182 | |
| 183 | void set_energy_perf_bias(u8 policy) |
| 184 | { |
Michael Niewöhner | 062b92e | 2020-10-20 14:27:09 +0200 | [diff] [blame] | 185 | u8 epb = policy & ENERGY_POLICY_MASK; |
Michael Niewöhner | 10ae1cf | 2020-10-11 14:05:32 +0200 | [diff] [blame] | 186 | |
Michael Niewöhner | 062b92e | 2020-10-20 14:27:09 +0200 | [diff] [blame] | 187 | if (!(cpuid_ecx(6) & CPUID_6_ECX_EPB)) |
Michael Niewöhner | 10ae1cf | 2020-10-11 14:05:32 +0200 | [diff] [blame] | 188 | return; |
| 189 | |
Michael Niewöhner | 062b92e | 2020-10-20 14:27:09 +0200 | [diff] [blame] | 190 | msr_unset_and_set(IA32_ENERGY_PERF_BIAS, ENERGY_POLICY_MASK, epb); |
| 191 | printk(BIOS_DEBUG, "cpu: energy policy set to %u\n", epb); |
Michael Niewöhner | 10ae1cf | 2020-10-11 14:05:32 +0200 | [diff] [blame] | 192 | } |
Cliff Huang | 6277077 | 2022-03-07 18:39:56 -0800 | [diff] [blame] | 193 | |
| 194 | /* |
| 195 | * Check energy performance preference and HWP capabilities from Thermal and |
| 196 | * Power Management Leaf CPUID |
| 197 | */ |
| 198 | bool check_energy_perf_cap(void) |
| 199 | { |
| 200 | const u32 cap = cpuid_eax(CPUID_LEAF_PM); |
| 201 | if (!(cap & CPUID_6_ENGERY_PERF_PREF)) |
| 202 | return false; |
| 203 | if (!(cap & CPUID_6_HWP)) |
| 204 | return false; |
| 205 | return true; |
| 206 | } |
| 207 | |
| 208 | /* |
| 209 | * Instructs the CPU to use EPP hints. This means that any energy policies set |
| 210 | * up in `set_energy_perf_bias` will be ignored afterwards. |
| 211 | */ |
| 212 | void enable_energy_perf_pref(void) |
| 213 | { |
| 214 | msr_t msr = rdmsr(IA32_PM_ENABLE); |
| 215 | if (!(msr.lo & HWP_ENABLE)) { |
| 216 | /* Package-scoped MSR */ |
| 217 | printk(BIOS_DEBUG, "HWP_ENABLE: energy-perf preference in favor of energy-perf bias\n"); |
| 218 | msr_set(IA32_PM_ENABLE, HWP_ENABLE); |
| 219 | } |
| 220 | } |
| 221 | |
| 222 | /* |
| 223 | * Set the IA32_HWP_REQUEST Energy-Performance Preference bits on the logical |
| 224 | * thread. 0 is a hint to the HWP to prefer performance, and 255 is a hint to |
| 225 | * prefer energy efficiency. |
| 226 | * This function needs to be called when HWP_ENABLE is set. |
| 227 | */ |
| 228 | void set_energy_perf_pref(u8 pref) |
| 229 | { |
| 230 | msr_unset_and_set(IA32_HWP_REQUEST, IA32_HWP_REQUEST_EPP_MASK, |
Sridhar Siricilla | 6552b99 | 2022-10-26 16:18:35 +0530 | [diff] [blame] | 231 | (uint64_t)pref << IA32_HWP_REQUEST_EPP_SHIFT); |
Cliff Huang | 6277077 | 2022-03-07 18:39:56 -0800 | [diff] [blame] | 232 | } |
Jeremy Compostella | a6a5b25 | 2023-09-07 10:08:35 -0700 | [diff] [blame] | 233 | |
| 234 | bool is_tme_supported(void) |
| 235 | { |
| 236 | struct cpuid_result cpuid_regs; |
| 237 | |
| 238 | cpuid_regs = cpuid_ext(CPUID_STRUCT_EXTENDED_FEATURE_FLAGS, 0x0); |
| 239 | return (cpuid_regs.ecx & CPUID_EXT_FEATURE_TME_SUPPORTED); |
| 240 | } |
Jeremy Compostella | 1eff77b | 2023-09-07 10:33:30 -0700 | [diff] [blame] | 241 | |
| 242 | /* |
| 243 | * Get number of address bits used by Total Memory Encryption (TME) |
| 244 | * |
| 245 | * Returns TME_ACTIVATE[MK_TME_KEYID_BITS] (MSR 0x982 Bits[32-35]). |
| 246 | * |
| 247 | * NOTE: This function should be called after MK-TME features has been |
| 248 | * configured in the MSRs according to the capabilities and platform |
| 249 | * configuration. For instance, after FSP-M. |
| 250 | */ |
Felix Held | ff4d6be | 2023-09-12 14:18:49 +0200 | [diff] [blame] | 251 | static unsigned int get_tme_keyid_bits(void) |
Jeremy Compostella | 1eff77b | 2023-09-07 10:33:30 -0700 | [diff] [blame] | 252 | { |
| 253 | msr_t msr; |
| 254 | |
| 255 | msr = rdmsr(MSR_TME_ACTIVATE); |
| 256 | return msr.hi & TME_ACTIVATE_HI_KEYID_BITS_MASK; |
| 257 | } |
| 258 | |
Felix Held | ff4d6be | 2023-09-12 14:18:49 +0200 | [diff] [blame] | 259 | unsigned int get_reserved_phys_addr_bits(void) |
Jeremy Compostella | 1eff77b | 2023-09-07 10:33:30 -0700 | [diff] [blame] | 260 | { |
| 261 | if (!is_tme_supported()) |
| 262 | return 0; |
| 263 | |
| 264 | return get_tme_keyid_bits(); |
| 265 | } |