blob: 55bc59eb75389d694ffaab5eb1393b4db422ddb2 [file] [log] [blame]
Angel Ponsf23ae0b2020-04-02 23:48:12 +02001/* SPDX-License-Identifier: GPL-2.0-only */
Matt DeVilliered6fe2f2016-12-14 16:12:43 -06002
Furquan Shaikh76cedd22020-05-02 10:24:23 -07003#include <acpi/acpigen.h>
Matt DeVilliered6fe2f2016-12-14 16:12:43 -06004#include <console/console.h>
Elyes Haouasad65e8c2022-10-31 14:02:13 +01005#include <cpu/cpu.h>
Michael Niewöhner8b4a9382020-10-11 13:00:27 +02006#include <cpu/intel/msr.h>
Cliff Huang62770772022-03-07 18:39:56 -08007#include <cpu/intel/turbo.h>
Elyes Haouasdeb56452022-10-07 10:06:25 +02008#include <cpu/x86/msr.h>
9#include <types.h>
10
Matt DeVilliered6fe2f2016-12-14 16:12:43 -060011#include "common.h"
12
Cliff Huang62770772022-03-07 18:39:56 -080013#define CPUID_6_ECX_EPB (1 << 3)
14#define CPUID_6_ENGERY_PERF_PREF (1 << 10)
15#define CPUID_6_HWP (1 << 7)
Michael Niewöhner062b92e2020-10-20 14:27:09 +020016
Jeremy Compostellaa6a5b252023-09-07 10:08:35 -070017/* Structured Extended Feature Flags */
18#define CPUID_EXT_FEATURE_TME_SUPPORTED (1 << 13)
19
Matt DeVillierf9aed652018-12-15 15:57:33 -060020void set_vmx_and_lock(void)
21{
22 set_feature_ctrl_vmx();
23 set_feature_ctrl_lock();
24}
25
Angel Ponsb998fd02022-01-31 17:38:06 +010026void set_feature_ctrl_vmx_arg(bool enable)
Matt DeVilliered6fe2f2016-12-14 16:12:43 -060027{
Matt DeVilliered6fe2f2016-12-14 16:12:43 -060028 msr_t msr;
Subrata Banik53b08c32018-12-10 14:11:35 +053029 uint32_t feature_flag;
Matt DeVilliered6fe2f2016-12-14 16:12:43 -060030
Subrata Banik53b08c32018-12-10 14:11:35 +053031 feature_flag = cpu_get_feature_flags_ecx();
Matt DeVilliered6fe2f2016-12-14 16:12:43 -060032 /* Check that the VMX is supported before reading or writing the MSR. */
Subrata Banik53b08c32018-12-10 14:11:35 +053033 if (!((feature_flag & CPUID_VMX) || (feature_flag & CPUID_SMX))) {
Matt DeVilliered6fe2f2016-12-14 16:12:43 -060034 printk(BIOS_DEBUG, "CPU doesn't support VMX; exiting\n");
35 return;
36 }
37
38 msr = rdmsr(IA32_FEATURE_CONTROL);
39
40 if (msr.lo & (1 << 0)) {
Matt DeVillier54efaae2018-12-30 00:31:35 -060041 printk(BIOS_DEBUG, "IA32_FEATURE_CONTROL already locked; ");
42 printk(BIOS_DEBUG, "VMX status: %s\n", msr.lo & (1 << 2) ?
43 "enabled" : "disabled");
Matt DeVillierf9aed652018-12-15 15:57:33 -060044 /* IA32_FEATURE_CONTROL locked. If we set it again we get an
45 * illegal instruction
Matt DeVilliered6fe2f2016-12-14 16:12:43 -060046 */
47 return;
48 }
49
50 /* The IA32_FEATURE_CONTROL MSR may initialize with random values.
51 * It must be cleared regardless of VMX config setting.
52 */
53 msr.hi = msr.lo = 0;
54
55 if (enable) {
56 msr.lo |= (1 << 2);
Patrick Rudolph980d7042019-10-17 09:47:58 +020057 if (feature_flag & CPUID_SMX) {
Matt DeVilliered6fe2f2016-12-14 16:12:43 -060058 msr.lo |= (1 << 1);
Patrick Rudolph980d7042019-10-17 09:47:58 +020059 if (CONFIG(INTEL_TXT)) {
60 /* Enable GetSec and all GetSec leaves */
61 msr.lo |= (0xff << 8);
62 }
63 }
Matt DeVilliered6fe2f2016-12-14 16:12:43 -060064 }
65
66 wrmsr(IA32_FEATURE_CONTROL, msr);
67
Matt DeVillierf9aed652018-12-15 15:57:33 -060068 printk(BIOS_DEBUG, "VMX status: %s\n",
69 enable ? "enabled" : "disabled");
70}
Angel Ponsb998fd02022-01-31 17:38:06 +010071
72void set_feature_ctrl_vmx(void)
73{
74 set_feature_ctrl_vmx_arg(CONFIG(ENABLE_VMX));
75}
76
Matt DeVillierf9aed652018-12-15 15:57:33 -060077void set_feature_ctrl_lock(void)
78{
79 msr_t msr;
Julius Wernercd49cce2019-03-05 16:53:33 -080080 int lock = CONFIG(SET_IA32_FC_LOCK_BIT);
Elyes HAOUAS909870a2019-01-07 20:39:14 +010081 uint32_t feature_flag = cpu_get_feature_flags_ecx();
82
83 /* Check if VMX is supported before reading or writing the MSR */
84 if (!((feature_flag & CPUID_VMX) || (feature_flag & CPUID_SMX))) {
85 printk(BIOS_DEBUG, "Read IA32_FEATURE_CONTROL unsupported\n");
86 return;
87 }
Matt DeVillierf9aed652018-12-15 15:57:33 -060088
89 msr = rdmsr(IA32_FEATURE_CONTROL);
90
91 if (msr.lo & (1 << 0)) {
Matt DeVillier460c2c22019-02-20 12:25:44 -060092 printk(BIOS_DEBUG, "IA32_FEATURE_CONTROL already locked\n");
Matt DeVillierf9aed652018-12-15 15:57:33 -060093 /* IA32_FEATURE_CONTROL locked. If we set it again we get an
94 * illegal instruction
95 */
96 return;
97 }
98
Matt DeVilliered6fe2f2016-12-14 16:12:43 -060099 if (lock) {
100 /* Set lock bit */
101 msr.lo |= (1 << 0);
102 wrmsr(IA32_FEATURE_CONTROL, msr);
103 }
104
Matt DeVillierf9aed652018-12-15 15:57:33 -0600105 printk(BIOS_DEBUG, "IA32_FEATURE_CONTROL status: %s\n",
Matt DeVilliered6fe2f2016-12-14 16:12:43 -0600106 lock ? "locked" : "unlocked");
107}
Matt Delco9557a342018-08-13 13:49:02 -0700108
109/*
110 * Init cppc_config in a way that's appropriate for Intel
111 * processors with Intel Enhanced Speed Step Technology.
112 * NOTE: version 2 is expected to be the typical use case.
113 * For now this function 'punts' on version 3 and just
114 * populates the additional fields with 'unsupported'.
115 */
116void cpu_init_cppc_config(struct cppc_config *config, u32 version)
117{
Matt Delco9557a342018-08-13 13:49:02 -0700118 config->version = version;
119
Michael Niewöhner38107fa2021-10-05 22:22:21 +0200120 config->entries[CPPC_HIGHEST_PERF] = CPPC_REG_MSR(IA32_HWP_CAPABILITIES, 0, 8);
121 config->entries[CPPC_NOMINAL_PERF] = CPPC_REG_MSR(MSR_PLATFORM_INFO, 8, 8);
122 config->entries[CPPC_LOWEST_NONL_PERF] = CPPC_REG_MSR(IA32_HWP_CAPABILITIES, 16, 8);
123 config->entries[CPPC_LOWEST_PERF] = CPPC_REG_MSR(IA32_HWP_CAPABILITIES, 24, 8);
124 config->entries[CPPC_GUARANTEED_PERF] = CPPC_REG_MSR(IA32_HWP_CAPABILITIES, 8, 8);
125 config->entries[CPPC_DESIRED_PERF] = CPPC_REG_MSR(IA32_HWP_REQUEST, 16, 8);
126 config->entries[CPPC_MIN_PERF] = CPPC_REG_MSR(IA32_HWP_REQUEST, 0, 8);
127 config->entries[CPPC_MAX_PERF] = CPPC_REG_MSR(IA32_HWP_REQUEST, 8, 8);
128 config->entries[CPPC_PERF_REDUCE_TOLERANCE] = CPPC_UNSUPPORTED;
129 config->entries[CPPC_TIME_WINDOW] = CPPC_UNSUPPORTED;
130 config->entries[CPPC_COUNTER_WRAP] = CPPC_UNSUPPORTED;
131 config->entries[CPPC_REF_PERF_COUNTER] = CPPC_REG_MSR(IA32_MPERF, 0, 64);
132 config->entries[CPPC_DELIVERED_PERF_COUNTER] = CPPC_REG_MSR(IA32_APERF, 0, 64);
133 config->entries[CPPC_PERF_LIMITED] = CPPC_REG_MSR(IA32_HWP_STATUS, 2, 1);
134 config->entries[CPPC_ENABLE] = CPPC_REG_MSR(IA32_PM_ENABLE, 0, 1);
Matt Delco9557a342018-08-13 13:49:02 -0700135
Michael Niewöhnerf72c7b12021-10-05 21:42:57 +0200136 if (version < 2)
137 return;
Matt Delco9557a342018-08-13 13:49:02 -0700138
Michael Niewöhner38107fa2021-10-05 22:22:21 +0200139 config->entries[CPPC_AUTO_SELECT] = CPPC_DWORD(1);
140 config->entries[CPPC_AUTO_ACTIVITY_WINDOW] = CPPC_REG_MSR(IA32_HWP_REQUEST, 32, 10);
141 config->entries[CPPC_PERF_PREF] = CPPC_REG_MSR(IA32_HWP_REQUEST, 24, 8);
142 config->entries[CPPC_REF_PERF] = CPPC_UNSUPPORTED;
Matt Delco9557a342018-08-13 13:49:02 -0700143
Michael Niewöhnerf72c7b12021-10-05 21:42:57 +0200144 if (version < 3)
145 return;
Matt Delco9557a342018-08-13 13:49:02 -0700146
Michael Niewöhner38107fa2021-10-05 22:22:21 +0200147 config->entries[CPPC_LOWEST_FREQ] = CPPC_UNSUPPORTED;
148 config->entries[CPPC_NOMINAL_FREQ] = CPPC_UNSUPPORTED;
Matt Delco9557a342018-08-13 13:49:02 -0700149}
Michael Niewöhner8b4a9382020-10-11 13:00:27 +0200150
Michael Niewöhner8b4a9382020-10-11 13:00:27 +0200151void set_aesni_lock(void)
152{
153 msr_t msr;
154
Michael Niewöhner2ffd2192020-10-11 16:59:13 +0200155 if (!CONFIG(SET_MSR_AESNI_LOCK_BIT))
156 return;
157
Patrick Rudolph3e69c0a2020-10-21 19:00:04 +0200158 if (!(cpu_get_feature_flags_ecx() & CPUID_AES))
Michael Niewöhner469a99b2020-10-11 16:15:04 +0200159 return;
160
Michael Niewöhner8b4a9382020-10-11 13:00:27 +0200161 /* Only run once per core as specified in the MSR datasheet */
162 if (intel_ht_sibling())
163 return;
164
165 msr = rdmsr(MSR_FEATURE_CONFIG);
Michael Niewöhner13b91492020-10-11 15:56:21 +0200166 if (msr.lo & AESNI_LOCK)
167 return;
168
169 msr_set(MSR_FEATURE_CONFIG, AESNI_LOCK);
Michael Niewöhner8b4a9382020-10-11 13:00:27 +0200170}
Michael Niewöhner10ae1cf2020-10-11 14:05:32 +0200171
172void enable_lapic_tpr(void)
173{
Michael Niewöhner062b92e2020-10-20 14:27:09 +0200174 msr_unset(MSR_PIC_MSG_CONTROL, TPR_UPDATES_DISABLE);
Michael Niewöhner10ae1cf2020-10-11 14:05:32 +0200175}
176
177void configure_dca_cap(void)
178{
Michael Niewöhner062b92e2020-10-20 14:27:09 +0200179 if (cpu_get_feature_flags_ecx() & CPUID_DCA)
180 msr_set(IA32_PLATFORM_DCA_CAP, DCA_TYPE0_EN);
Michael Niewöhner10ae1cf2020-10-11 14:05:32 +0200181}
182
183void set_energy_perf_bias(u8 policy)
184{
Michael Niewöhner062b92e2020-10-20 14:27:09 +0200185 u8 epb = policy & ENERGY_POLICY_MASK;
Michael Niewöhner10ae1cf2020-10-11 14:05:32 +0200186
Michael Niewöhner062b92e2020-10-20 14:27:09 +0200187 if (!(cpuid_ecx(6) & CPUID_6_ECX_EPB))
Michael Niewöhner10ae1cf2020-10-11 14:05:32 +0200188 return;
189
Michael Niewöhner062b92e2020-10-20 14:27:09 +0200190 msr_unset_and_set(IA32_ENERGY_PERF_BIAS, ENERGY_POLICY_MASK, epb);
191 printk(BIOS_DEBUG, "cpu: energy policy set to %u\n", epb);
Michael Niewöhner10ae1cf2020-10-11 14:05:32 +0200192}
Cliff Huang62770772022-03-07 18:39:56 -0800193
194/*
195 * Check energy performance preference and HWP capabilities from Thermal and
196 * Power Management Leaf CPUID
197 */
198bool check_energy_perf_cap(void)
199{
200 const u32 cap = cpuid_eax(CPUID_LEAF_PM);
201 if (!(cap & CPUID_6_ENGERY_PERF_PREF))
202 return false;
203 if (!(cap & CPUID_6_HWP))
204 return false;
205 return true;
206}
207
208/*
209 * Instructs the CPU to use EPP hints. This means that any energy policies set
210 * up in `set_energy_perf_bias` will be ignored afterwards.
211 */
212void enable_energy_perf_pref(void)
213{
214 msr_t msr = rdmsr(IA32_PM_ENABLE);
215 if (!(msr.lo & HWP_ENABLE)) {
216 /* Package-scoped MSR */
217 printk(BIOS_DEBUG, "HWP_ENABLE: energy-perf preference in favor of energy-perf bias\n");
218 msr_set(IA32_PM_ENABLE, HWP_ENABLE);
219 }
220}
221
222/*
223 * Set the IA32_HWP_REQUEST Energy-Performance Preference bits on the logical
224 * thread. 0 is a hint to the HWP to prefer performance, and 255 is a hint to
225 * prefer energy efficiency.
226 * This function needs to be called when HWP_ENABLE is set.
227*/
228void set_energy_perf_pref(u8 pref)
229{
230 msr_unset_and_set(IA32_HWP_REQUEST, IA32_HWP_REQUEST_EPP_MASK,
Sridhar Siricilla6552b992022-10-26 16:18:35 +0530231 (uint64_t)pref << IA32_HWP_REQUEST_EPP_SHIFT);
Cliff Huang62770772022-03-07 18:39:56 -0800232}
Jeremy Compostellaa6a5b252023-09-07 10:08:35 -0700233
234bool is_tme_supported(void)
235{
236 struct cpuid_result cpuid_regs;
237
238 cpuid_regs = cpuid_ext(CPUID_STRUCT_EXTENDED_FEATURE_FLAGS, 0x0);
239 return (cpuid_regs.ecx & CPUID_EXT_FEATURE_TME_SUPPORTED);
240}
Jeremy Compostella1eff77b2023-09-07 10:33:30 -0700241
242/*
243 * Get number of address bits used by Total Memory Encryption (TME)
244 *
245 * Returns TME_ACTIVATE[MK_TME_KEYID_BITS] (MSR 0x982 Bits[32-35]).
246 *
247 * NOTE: This function should be called after MK-TME features has been
248 * configured in the MSRs according to the capabilities and platform
249 * configuration. For instance, after FSP-M.
250 */
Felix Heldff4d6be2023-09-12 14:18:49 +0200251static unsigned int get_tme_keyid_bits(void)
Jeremy Compostella1eff77b2023-09-07 10:33:30 -0700252{
253 msr_t msr;
254
255 msr = rdmsr(MSR_TME_ACTIVATE);
256 return msr.hi & TME_ACTIVATE_HI_KEYID_BITS_MASK;
257}
258
Felix Heldff4d6be2023-09-12 14:18:49 +0200259unsigned int get_reserved_phys_addr_bits(void)
Jeremy Compostella1eff77b2023-09-07 10:33:30 -0700260{
261 if (!is_tme_supported())
262 return 0;
263
264 return get_tme_keyid_bits();
265}