blob: 5a0b09d92e6021410b698e31eaadd4244f9f5abf [file] [log] [blame]
Angel Ponsf23ae0b2020-04-02 23:48:12 +02001/* SPDX-License-Identifier: GPL-2.0-only */
Aaron Durbin76c37002012-10-30 09:03:43 -05002
3#include <console/console.h>
4#include <device/device.h>
Aaron Durbin76c37002012-10-30 09:03:43 -05005#include <cpu/cpu.h>
6#include <cpu/x86/mtrr.h>
7#include <cpu/x86/msr.h>
Aaron Durbin014baea2014-03-28 22:01:05 -05008#include <cpu/x86/mp.h>
Aaron Durbin76c37002012-10-30 09:03:43 -05009#include <cpu/intel/microcode.h>
Kyösti Mälkkifaf20d32019-08-14 05:41:41 +030010#include <cpu/intel/smm_reloc.h>
Aaron Durbin76c37002012-10-30 09:03:43 -050011#include <cpu/intel/speedstep.h>
12#include <cpu/intel/turbo.h>
Aaron Durbin76c37002012-10-30 09:03:43 -050013#include <cpu/x86/name.h>
Aaron Durbinf24262d2013-04-10 14:59:21 -050014#include <delay.h>
Aaron Durbin7c351312013-04-10 14:46:25 -050015#include <northbridge/intel/haswell/haswell.h>
16#include <southbridge/intel/lynxpoint/pch.h>
Matt DeVilliered6fe2f2016-12-14 16:12:43 -060017#include <cpu/intel/common/common.h>
Felix Heldd27ef5b2021-10-20 20:18:12 +020018#include <types.h>
Aaron Durbin76c37002012-10-30 09:03:43 -050019#include "haswell.h"
20#include "chip.h"
21
Aaron Durbin76c37002012-10-30 09:03:43 -050022/* Convert time in seconds to POWER_LIMIT_1_TIME MSR value */
23static const u8 power_limit_time_sec_to_msr[] = {
24 [0] = 0x00,
25 [1] = 0x0a,
26 [2] = 0x0b,
27 [3] = 0x4b,
28 [4] = 0x0c,
29 [5] = 0x2c,
30 [6] = 0x4c,
31 [7] = 0x6c,
32 [8] = 0x0d,
33 [10] = 0x2d,
34 [12] = 0x4d,
35 [14] = 0x6d,
36 [16] = 0x0e,
37 [20] = 0x2e,
38 [24] = 0x4e,
39 [28] = 0x6e,
40 [32] = 0x0f,
41 [40] = 0x2f,
42 [48] = 0x4f,
43 [56] = 0x6f,
44 [64] = 0x10,
45 [80] = 0x30,
46 [96] = 0x50,
47 [112] = 0x70,
48 [128] = 0x11,
49};
50
51/* Convert POWER_LIMIT_1_TIME MSR value to seconds */
52static const u8 power_limit_time_msr_to_sec[] = {
53 [0x00] = 0,
54 [0x0a] = 1,
55 [0x0b] = 2,
56 [0x4b] = 3,
57 [0x0c] = 4,
58 [0x2c] = 5,
59 [0x4c] = 6,
60 [0x6c] = 7,
61 [0x0d] = 8,
62 [0x2d] = 10,
63 [0x4d] = 12,
64 [0x6d] = 14,
65 [0x0e] = 16,
66 [0x2e] = 20,
67 [0x4e] = 24,
68 [0x6e] = 28,
69 [0x0f] = 32,
70 [0x2f] = 40,
71 [0x4f] = 48,
72 [0x6f] = 56,
73 [0x10] = 64,
74 [0x30] = 80,
75 [0x50] = 96,
76 [0x70] = 112,
77 [0x11] = 128,
78};
79
Angel Pons5d92aa52020-10-14 00:02:37 +020080/* The core 100MHz BCLK is disabled in deeper c-states. One needs to calibrate
81 * the 100MHz BCLK against the 24MHz BCLK to restore the clocks properly
Aaron Durbinf24262d2013-04-10 14:59:21 -050082 * when a core is woken up. */
83static int pcode_ready(void)
84{
85 int wait_count;
86 const int delay_step = 10;
87
88 wait_count = 0;
89 do {
Angel Pons7811a452021-03-27 20:05:22 +010090 if (!(mchbar_read32(BIOS_MAILBOX_INTERFACE) & MAILBOX_RUN_BUSY))
Aaron Durbinf24262d2013-04-10 14:59:21 -050091 return 0;
92 wait_count += delay_step;
93 udelay(delay_step);
94 } while (wait_count < 1000);
95
96 return -1;
97}
98
99static void calibrate_24mhz_bclk(void)
100{
101 int err_code;
102
103 if (pcode_ready() < 0) {
104 printk(BIOS_ERR, "PCODE: mailbox timeout on wait ready.\n");
105 return;
106 }
107
108 /* A non-zero value initiates the PCODE calibration. */
Angel Pons7811a452021-03-27 20:05:22 +0100109 mchbar_write32(BIOS_MAILBOX_DATA, ~0);
110 mchbar_write32(BIOS_MAILBOX_INTERFACE,
111 MAILBOX_RUN_BUSY | MAILBOX_BIOS_CMD_FSM_MEASURE_INTVL);
Aaron Durbinf24262d2013-04-10 14:59:21 -0500112
113 if (pcode_ready() < 0) {
114 printk(BIOS_ERR, "PCODE: mailbox timeout on completion.\n");
115 return;
116 }
117
Angel Pons7811a452021-03-27 20:05:22 +0100118 err_code = mchbar_read32(BIOS_MAILBOX_INTERFACE) & 0xff;
Aaron Durbinf24262d2013-04-10 14:59:21 -0500119
Angel Pons5d92aa52020-10-14 00:02:37 +0200120 printk(BIOS_DEBUG, "PCODE: 24MHz BCLK calibration response: %d\n",
Aaron Durbinf24262d2013-04-10 14:59:21 -0500121 err_code);
122
123 /* Read the calibrated value. */
Angel Pons7811a452021-03-27 20:05:22 +0100124 mchbar_write32(BIOS_MAILBOX_INTERFACE,
125 MAILBOX_RUN_BUSY | MAILBOX_BIOS_CMD_READ_CALIBRATION);
Aaron Durbinf24262d2013-04-10 14:59:21 -0500126
127 if (pcode_ready() < 0) {
128 printk(BIOS_ERR, "PCODE: mailbox timeout on read.\n");
129 return;
130 }
131
Angel Pons5d92aa52020-10-14 00:02:37 +0200132 printk(BIOS_DEBUG, "PCODE: 24MHz BCLK calibration value: 0x%08x\n",
Angel Pons7811a452021-03-27 20:05:22 +0100133 mchbar_read32(BIOS_MAILBOX_DATA));
Aaron Durbinf24262d2013-04-10 14:59:21 -0500134}
135
Duncan Lauriee1e87e02013-04-26 10:35:19 -0700136static u32 pcode_mailbox_read(u32 command)
137{
138 if (pcode_ready() < 0) {
139 printk(BIOS_ERR, "PCODE: mailbox timeout on wait ready.\n");
140 return 0;
141 }
142
143 /* Send command and start transaction */
Angel Pons7811a452021-03-27 20:05:22 +0100144 mchbar_write32(BIOS_MAILBOX_INTERFACE, command | MAILBOX_RUN_BUSY);
Duncan Lauriee1e87e02013-04-26 10:35:19 -0700145
146 if (pcode_ready() < 0) {
147 printk(BIOS_ERR, "PCODE: mailbox timeout on completion.\n");
148 return 0;
149 }
150
151 /* Read mailbox */
Angel Pons7811a452021-03-27 20:05:22 +0100152 return mchbar_read32(BIOS_MAILBOX_DATA);
Duncan Lauriee1e87e02013-04-26 10:35:19 -0700153}
154
Angel Pons1c7ba622020-10-29 00:01:29 +0100155static int pcode_mailbox_write(u32 command, u32 data)
156{
157 if (pcode_ready() < 0) {
158 printk(BIOS_ERR, "PCODE: mailbox timeout on wait ready.\n");
159 return -1;
160 }
161
Angel Pons7811a452021-03-27 20:05:22 +0100162 mchbar_write32(BIOS_MAILBOX_DATA, data);
Angel Pons1c7ba622020-10-29 00:01:29 +0100163
164 /* Send command and start transaction */
Angel Pons7811a452021-03-27 20:05:22 +0100165 mchbar_write32(BIOS_MAILBOX_INTERFACE, command | MAILBOX_RUN_BUSY);
Angel Pons1c7ba622020-10-29 00:01:29 +0100166
167 if (pcode_ready() < 0) {
168 printk(BIOS_ERR, "PCODE: mailbox timeout on completion.\n");
169 return -1;
170 }
171
172 return 0;
173}
174
Arthur Heymansdd96ab62021-11-15 20:11:12 +0100175static struct device *cpu_cluster;
176
Aaron Durbin16cbf892013-07-03 16:21:28 -0500177static void initialize_vr_config(void)
178{
Angel Pons242fd282020-10-28 23:48:56 +0100179 struct cpu_vr_config vr_config = { 0 };
Aaron Durbin16cbf892013-07-03 16:21:28 -0500180 msr_t msr;
181
Arthur Heymansdd96ab62021-11-15 20:11:12 +0100182 /* Make sure your devicetree has the cpu_cluster below chip cpu/intel/haswell! */
183 const struct cpu_intel_haswell_config *conf = cpu_cluster->chip_info;
184 vr_config = conf->vr_config;
Angel Pons242fd282020-10-28 23:48:56 +0100185
Aaron Durbin16cbf892013-07-03 16:21:28 -0500186 printk(BIOS_DEBUG, "Initializing VR config.\n");
187
188 /* Configure VR_CURRENT_CONFIG. */
189 msr = rdmsr(MSR_VR_CURRENT_CONFIG);
190 /* Preserve bits 63 and 62. Bit 62 is PSI4 enable, but it is only valid
191 * on ULT systems. */
192 msr.hi &= 0xc0000000;
193 msr.hi |= (0x01 << (52 - 32)); /* PSI3 threshold - 1A. */
194 msr.hi |= (0x05 << (42 - 32)); /* PSI2 threshold - 5A. */
Angel Pons9dcd1c12020-10-28 22:41:26 +0100195 msr.hi |= (0x14 << (32 - 32)); /* PSI1 threshold - 20A. */
Aaron Durbin16cbf892013-07-03 16:21:28 -0500196
Duncan Laurie118d1052013-07-09 15:34:25 -0700197 if (haswell_is_ult())
Aaron Durbin16cbf892013-07-03 16:21:28 -0500198 msr.hi |= (1 << (62 - 32)); /* Enable PSI4 */
199 /* Leave the max instantaneous current limit (12:0) to default. */
200 wrmsr(MSR_VR_CURRENT_CONFIG, msr);
201
202 /* Configure VR_MISC_CONFIG MSR. */
203 msr = rdmsr(MSR_VR_MISC_CONFIG);
204 /* Set the IOUT_SLOPE scalar applied to dIout in U10.1.9 format. */
205 msr.hi &= ~(0x3ff << (40 - 32));
206 msr.hi |= (0x200 << (40 - 32)); /* 1.0 */
207 /* Set IOUT_OFFSET to 0. */
208 msr.hi &= ~0xff;
209 /* Set exit ramp rate to fast. */
210 msr.hi |= (1 << (50 - 32));
211 /* Set entry ramp rate to slow. */
212 msr.hi &= ~(1 << (51 - 32));
213 /* Enable decay mode on C-state entry. */
214 msr.hi |= (1 << (52 - 32));
Angel Pons242fd282020-10-28 23:48:56 +0100215 /* Set the slow ramp rate */
Tristan Corrickfdf907e2018-10-31 02:27:12 +1300216 if (haswell_is_ult()) {
Tristan Corrickfdf907e2018-10-31 02:27:12 +1300217 msr.hi &= ~(0x3 << (53 - 32));
Angel Pons242fd282020-10-28 23:48:56 +0100218 /* Configure the C-state exit ramp rate. */
219 if (vr_config.slow_ramp_rate_enable) {
220 /* Configured slow ramp rate. */
221 msr.hi |= ((vr_config.slow_ramp_rate_set & 0x3) << (53 - 32));
222 /* Set exit ramp rate to slow. */
223 msr.hi &= ~(1 << (50 - 32));
224 } else {
225 /* Fast ramp rate / 4. */
226 msr.hi |= (1 << (53 - 32));
227 }
Tristan Corrickfdf907e2018-10-31 02:27:12 +1300228 }
Aaron Durbin16cbf892013-07-03 16:21:28 -0500229 /* Set MIN_VID (31:24) to allow CPU to have full control. */
230 msr.lo &= ~0xff000000;
Angel Pons242fd282020-10-28 23:48:56 +0100231 msr.lo |= (vr_config.cpu_min_vid & 0xff) << 24;
Aaron Durbin16cbf892013-07-03 16:21:28 -0500232 wrmsr(MSR_VR_MISC_CONFIG, msr);
233
234 /* Configure VR_MISC_CONFIG2 MSR. */
Angel Pons4c95f102020-10-28 19:38:12 +0100235 if (!haswell_is_ult())
236 return;
237
238 msr = rdmsr(MSR_VR_MISC_CONFIG2);
239 msr.lo &= ~0xffff;
240 /* Allow CPU to control minimum voltage completely (15:8) and
Angel Ponsc86b1192020-10-28 23:53:45 +0100241 set the fast ramp voltage in 10mV steps. */
242 if (cpu_family_model() == BROADWELL_FAMILY_ULT)
243 msr.lo |= 0x006a; /* 1.56V */
244 else
245 msr.lo |= 0x006f; /* 1.60V */
Angel Pons4c95f102020-10-28 19:38:12 +0100246 wrmsr(MSR_VR_MISC_CONFIG2, msr);
Angel Pons1c7ba622020-10-29 00:01:29 +0100247
248 /* Set C9/C10 VCC Min */
249 pcode_mailbox_write(MAILBOX_BIOS_CMD_WRITE_C9C10_VOLTAGE, 0x1f1f);
Aaron Durbin16cbf892013-07-03 16:21:28 -0500250}
251
Duncan Lauriee1e87e02013-04-26 10:35:19 -0700252static void configure_pch_power_sharing(void)
253{
254 u32 pch_power, pch_power_ext, pmsync, pmsync2;
255 int i;
256
257 /* Read PCH Power levels from PCODE */
258 pch_power = pcode_mailbox_read(MAILBOX_BIOS_CMD_READ_PCH_POWER);
259 pch_power_ext = pcode_mailbox_read(MAILBOX_BIOS_CMD_READ_PCH_POWER_EXT);
260
261 printk(BIOS_INFO, "PCH Power: PCODE Levels 0x%08x 0x%08x\n",
Lee Leahy7b5f12b92017-03-15 17:16:59 -0700262 pch_power, pch_power_ext);
Duncan Lauriee1e87e02013-04-26 10:35:19 -0700263
264 pmsync = RCBA32(PMSYNC_CONFIG);
265 pmsync2 = RCBA32(PMSYNC_CONFIG2);
266
267 /* Program PMSYNC_TPR_CONFIG PCH power limit values
268 * pmsync[0:4] = mailbox[0:5]
269 * pmsync[8:12] = mailbox[6:11]
270 * pmsync[16:20] = mailbox[12:17]
271 */
272 for (i = 0; i < 3; i++) {
273 u32 level = pch_power & 0x3f;
274 pch_power >>= 6;
275 pmsync &= ~(0x1f << (i * 8));
276 pmsync |= (level & 0x1f) << (i * 8);
277 }
278 RCBA32(PMSYNC_CONFIG) = pmsync;
279
280 /* Program PMSYNC_TPR_CONFIG2 Extended PCH power limit values
281 * pmsync2[0:4] = mailbox[23:18]
282 * pmsync2[8:12] = mailbox_ext[6:11]
283 * pmsync2[16:20] = mailbox_ext[12:17]
284 * pmsync2[24:28] = mailbox_ext[18:22]
285 */
286 pmsync2 &= ~0x1f;
287 pmsync2 |= pch_power & 0x1f;
288
289 for (i = 1; i < 4; i++) {
290 u32 level = pch_power_ext & 0x3f;
291 pch_power_ext >>= 6;
292 pmsync2 &= ~(0x1f << (i * 8));
293 pmsync2 |= (level & 0x1f) << (i * 8);
294 }
295 RCBA32(PMSYNC_CONFIG2) = pmsync2;
296}
297
Aaron Durbin76c37002012-10-30 09:03:43 -0500298int cpu_config_tdp_levels(void)
299{
300 msr_t platform_info;
301
302 /* Bits 34:33 indicate how many levels supported */
303 platform_info = rdmsr(MSR_PLATFORM_INFO);
304 return (platform_info.hi >> 1) & 3;
305}
306
307/*
308 * Configure processor power limits if possible
309 * This must be done AFTER set of BIOS_RESET_CPL
310 */
311void set_power_limits(u8 power_limit_1_time)
312{
313 msr_t msr = rdmsr(MSR_PLATFORM_INFO);
314 msr_t limit;
Lee Leahy73a28942017-03-15 17:52:06 -0700315 unsigned int power_unit;
316 unsigned int tdp, min_power, max_power, max_time;
Aaron Durbin76c37002012-10-30 09:03:43 -0500317 u8 power_limit_1_val;
318
Edward O'Callaghan5cfef132014-08-03 20:00:47 +1000319 if (power_limit_1_time >= ARRAY_SIZE(power_limit_time_sec_to_msr))
Angel Pons4c95f102020-10-28 19:38:12 +0100320 power_limit_1_time = ARRAY_SIZE(power_limit_time_sec_to_msr) - 1;
Aaron Durbin76c37002012-10-30 09:03:43 -0500321
322 if (!(msr.lo & PLATFORM_INFO_SET_TDP))
323 return;
324
325 /* Get units */
326 msr = rdmsr(MSR_PKG_POWER_SKU_UNIT);
327 power_unit = 2 << ((msr.lo & 0xf) - 1);
328
329 /* Get power defaults for this SKU */
330 msr = rdmsr(MSR_PKG_POWER_SKU);
331 tdp = msr.lo & 0x7fff;
332 min_power = (msr.lo >> 16) & 0x7fff;
333 max_power = msr.hi & 0x7fff;
334 max_time = (msr.hi >> 16) & 0x7f;
335
336 printk(BIOS_DEBUG, "CPU TDP: %u Watts\n", tdp / power_unit);
337
338 if (power_limit_time_msr_to_sec[max_time] > power_limit_1_time)
339 power_limit_1_time = power_limit_time_msr_to_sec[max_time];
340
341 if (min_power > 0 && tdp < min_power)
342 tdp = min_power;
343
344 if (max_power > 0 && tdp > max_power)
345 tdp = max_power;
346
347 power_limit_1_val = power_limit_time_sec_to_msr[power_limit_1_time];
348
349 /* Set long term power limit to TDP */
350 limit.lo = 0;
351 limit.lo |= tdp & PKG_POWER_LIMIT_MASK;
352 limit.lo |= PKG_POWER_LIMIT_EN;
353 limit.lo |= (power_limit_1_val & PKG_POWER_LIMIT_TIME_MASK) <<
354 PKG_POWER_LIMIT_TIME_SHIFT;
355
356 /* Set short term power limit to 1.25 * TDP */
357 limit.hi = 0;
358 limit.hi |= ((tdp * 125) / 100) & PKG_POWER_LIMIT_MASK;
359 limit.hi |= PKG_POWER_LIMIT_EN;
Duncan Lauriec70353f2013-06-28 14:40:38 -0700360 /* Power limit 2 time is only programmable on server SKU */
Aaron Durbin76c37002012-10-30 09:03:43 -0500361
362 wrmsr(MSR_PKG_POWER_LIMIT, limit);
363
Duncan Lauriec70353f2013-06-28 14:40:38 -0700364 /* Set power limit values in MCHBAR as well */
Angel Pons7811a452021-03-27 20:05:22 +0100365 mchbar_write32(MCH_PKG_POWER_LIMIT_LO, limit.lo);
366 mchbar_write32(MCH_PKG_POWER_LIMIT_HI, limit.hi);
Duncan Lauriec70353f2013-06-28 14:40:38 -0700367
368 /* Set DDR RAPL power limit by copying from MMIO to MSR */
Angel Pons7811a452021-03-27 20:05:22 +0100369 msr.lo = mchbar_read32(MCH_DDR_POWER_LIMIT_LO);
370 msr.hi = mchbar_read32(MCH_DDR_POWER_LIMIT_HI);
Duncan Lauriec70353f2013-06-28 14:40:38 -0700371 wrmsr(MSR_DDR_RAPL_LIMIT, msr);
372
Aaron Durbin76c37002012-10-30 09:03:43 -0500373 /* Use nominal TDP values for CPUs with configurable TDP */
374 if (cpu_config_tdp_levels()) {
375 msr = rdmsr(MSR_CONFIG_TDP_NOMINAL);
376 limit.hi = 0;
377 limit.lo = msr.lo & 0xff;
378 wrmsr(MSR_TURBO_ACTIVATION_RATIO, limit);
379 }
380}
381
Aaron Durbin76c37002012-10-30 09:03:43 -0500382static void configure_c_states(void)
383{
Angel Ponsc89d2a282020-10-28 22:23:02 +0100384 msr_t msr = rdmsr(MSR_PLATFORM_INFO);
385
386 const bool timed_mwait_capable = !!(msr.hi & TIMED_MWAIT_SUPPORTED);
Aaron Durbin76c37002012-10-30 09:03:43 -0500387
Elyes HAOUAS4e6b7902018-10-02 08:44:47 +0200388 msr = rdmsr(MSR_PKG_CST_CONFIG_CONTROL);
Aaron Durbin7c351312013-04-10 14:46:25 -0500389 msr.lo |= (1 << 30); // Package c-state Undemotion Enable
390 msr.lo |= (1 << 29); // Package c-state Demotion Enable
Aaron Durbin76c37002012-10-30 09:03:43 -0500391 msr.lo |= (1 << 28); // C1 Auto Undemotion Enable
392 msr.lo |= (1 << 27); // C3 Auto Undemotion Enable
393 msr.lo |= (1 << 26); // C1 Auto Demotion Enable
394 msr.lo |= (1 << 25); // C3 Auto Demotion Enable
Angel Ponscb70d832021-10-11 14:26:42 +0200395 msr.lo |= (1 << 15); // Lock bits 15:0
Aaron Durbin76c37002012-10-30 09:03:43 -0500396 msr.lo &= ~(1 << 10); // Disable IO MWAIT redirection
Angel Ponsc89d2a282020-10-28 22:23:02 +0100397
398 if (timed_mwait_capable)
399 msr.lo |= (1 << 31); // Timed MWAIT Enable
400
Duncan Laurie1c097102013-05-07 13:19:56 -0700401 /* The deepest package c-state defaults to factory-configured value. */
Elyes HAOUAS4e6b7902018-10-02 08:44:47 +0200402 wrmsr(MSR_PKG_CST_CONFIG_CONTROL, msr);
Aaron Durbin76c37002012-10-30 09:03:43 -0500403
Aaron Durbin76c37002012-10-30 09:03:43 -0500404 msr = rdmsr(MSR_MISC_PWR_MGMT);
405 msr.lo &= ~(1 << 0); // Enable P-state HW_ALL coordination
406 wrmsr(MSR_MISC_PWR_MGMT, msr);
407
408 msr = rdmsr(MSR_POWER_CTL);
409 msr.lo |= (1 << 18); // Enable Energy Perf Bias MSR 0x1b0
410 msr.lo |= (1 << 1); // C1E Enable
411 msr.lo |= (1 << 0); // Bi-directional PROCHOT#
412 wrmsr(MSR_POWER_CTL, msr);
413
Aaron Durbin7c351312013-04-10 14:46:25 -0500414 /* C-state Interrupt Response Latency Control 0 - package C3 latency */
Aaron Durbin76c37002012-10-30 09:03:43 -0500415 msr.hi = 0;
Aaron Durbin7c351312013-04-10 14:46:25 -0500416 msr.lo = IRTL_VALID | IRTL_1024_NS | C_STATE_LATENCY_CONTROL_0_LIMIT;
417 wrmsr(MSR_C_STATE_LATENCY_CONTROL_0, msr);
Aaron Durbin76c37002012-10-30 09:03:43 -0500418
Aaron Durbin7c351312013-04-10 14:46:25 -0500419 /* C-state Interrupt Response Latency Control 1 */
Aaron Durbin76c37002012-10-30 09:03:43 -0500420 msr.hi = 0;
Aaron Durbin7c351312013-04-10 14:46:25 -0500421 msr.lo = IRTL_VALID | IRTL_1024_NS | C_STATE_LATENCY_CONTROL_1_LIMIT;
422 wrmsr(MSR_C_STATE_LATENCY_CONTROL_1, msr);
Aaron Durbin76c37002012-10-30 09:03:43 -0500423
Aaron Durbin7c351312013-04-10 14:46:25 -0500424 /* C-state Interrupt Response Latency Control 2 - package C6/C7 short */
Aaron Durbin76c37002012-10-30 09:03:43 -0500425 msr.hi = 0;
Aaron Durbin7c351312013-04-10 14:46:25 -0500426 msr.lo = IRTL_VALID | IRTL_1024_NS | C_STATE_LATENCY_CONTROL_2_LIMIT;
427 wrmsr(MSR_C_STATE_LATENCY_CONTROL_2, msr);
Aaron Durbin76c37002012-10-30 09:03:43 -0500428
Angel Pons4c95f102020-10-28 19:38:12 +0100429 /* Only Haswell ULT supports the 3-5 latency response registers */
430 if (!haswell_is_ult())
431 return;
Aaron Durbin76c37002012-10-30 09:03:43 -0500432
Angel Pons4c95f102020-10-28 19:38:12 +0100433 /* C-state Interrupt Response Latency Control 3 - package C8 */
434 msr.hi = 0;
435 msr.lo = IRTL_VALID | IRTL_1024_NS | C_STATE_LATENCY_CONTROL_3_LIMIT;
436 wrmsr(MSR_C_STATE_LATENCY_CONTROL_3, msr);
Aaron Durbin7c351312013-04-10 14:46:25 -0500437
Angel Pons4c95f102020-10-28 19:38:12 +0100438 /* C-state Interrupt Response Latency Control 4 - package C9 */
439 msr.hi = 0;
440 msr.lo = IRTL_VALID | IRTL_1024_NS | C_STATE_LATENCY_CONTROL_4_LIMIT;
441 wrmsr(MSR_C_STATE_LATENCY_CONTROL_4, msr);
442
443 /* C-state Interrupt Response Latency Control 5 - package C10 */
444 msr.hi = 0;
445 msr.lo = IRTL_VALID | IRTL_1024_NS | C_STATE_LATENCY_CONTROL_5_LIMIT;
446 wrmsr(MSR_C_STATE_LATENCY_CONTROL_5, msr);
Aaron Durbin76c37002012-10-30 09:03:43 -0500447}
Aaron Durbin76c37002012-10-30 09:03:43 -0500448
Arthur Heymansdd96ab62021-11-15 20:11:12 +0100449static void configure_thermal_target(struct device *dev)
Aaron Durbin76c37002012-10-30 09:03:43 -0500450{
Arthur Heymansdd96ab62021-11-15 20:11:12 +0100451 /* Make sure your devicetree has the cpu_cluster below chip cpu/intel/haswell! */
452 struct cpu_intel_haswell_config *conf = dev->bus->dev->chip_info;
Aaron Durbin76c37002012-10-30 09:03:43 -0500453 msr_t msr;
454
Martin Roth4c3ab732013-07-08 16:23:54 -0600455 /* Set TCC activation offset if supported */
Aaron Durbin76c37002012-10-30 09:03:43 -0500456 msr = rdmsr(MSR_PLATFORM_INFO);
457 if ((msr.lo & (1 << 30)) && conf->tcc_offset) {
458 msr = rdmsr(MSR_TEMPERATURE_TARGET);
459 msr.lo &= ~(0xf << 24); /* Bits 27:24 */
460 msr.lo |= (conf->tcc_offset & 0xf) << 24;
461 wrmsr(MSR_TEMPERATURE_TARGET, msr);
462 }
463}
464
465static void configure_misc(void)
466{
467 msr_t msr;
468
469 msr = rdmsr(IA32_MISC_ENABLE);
470 msr.lo |= (1 << 0); /* Fast String enable */
Lee Leahy7b5f12b92017-03-15 17:16:59 -0700471 msr.lo |= (1 << 3); /* TM1/TM2/EMTTM enable */
Aaron Durbin76c37002012-10-30 09:03:43 -0500472 msr.lo |= (1 << 16); /* Enhanced SpeedStep Enable */
473 wrmsr(IA32_MISC_ENABLE, msr);
474
475 /* Disable Thermal interrupts */
476 msr.lo = 0;
477 msr.hi = 0;
478 wrmsr(IA32_THERM_INTERRUPT, msr);
479
480 /* Enable package critical interrupt only */
481 msr.lo = 1 << 4;
482 msr.hi = 0;
483 wrmsr(IA32_PACKAGE_THERM_INTERRUPT, msr);
484}
485
Aaron Durbin76c37002012-10-30 09:03:43 -0500486static void set_max_ratio(void)
487{
488 msr_t msr, perf_ctl;
489
490 perf_ctl.hi = 0;
491
492 /* Check for configurable TDP option */
Angel Pons053deb82020-10-28 22:40:02 +0100493 if (get_turbo_state() == TURBO_ENABLED) {
494 msr = rdmsr(MSR_TURBO_RATIO_LIMIT);
495 perf_ctl.lo = (msr.lo & 0xff) << 8;
496 } else if (cpu_config_tdp_levels()) {
Aaron Durbin76c37002012-10-30 09:03:43 -0500497 /* Set to nominal TDP ratio */
498 msr = rdmsr(MSR_CONFIG_TDP_NOMINAL);
499 perf_ctl.lo = (msr.lo & 0xff) << 8;
500 } else {
501 /* Platform Info bits 15:8 give max ratio */
502 msr = rdmsr(MSR_PLATFORM_INFO);
503 perf_ctl.lo = msr.lo & 0xff00;
504 }
505 wrmsr(IA32_PERF_CTL, perf_ctl);
506
Angel Ponsf6cf49272020-09-25 01:14:24 +0200507 printk(BIOS_DEBUG, "CPU: frequency set to %d\n",
Angel Ponsca965492020-10-28 19:15:36 +0100508 ((perf_ctl.lo >> 8) & 0xff) * CPU_BCLK);
Aaron Durbin76c37002012-10-30 09:03:43 -0500509}
510
Aaron Durbin76c37002012-10-30 09:03:43 -0500511static void configure_mca(void)
512{
513 msr_t msr;
514 int i;
Felix Heldbad21a42021-07-13 01:55:52 +0200515 const unsigned int num_banks = mca_get_bank_count();
Angel Pons1515a482021-06-13 22:33:06 +0200516
517 /* Enable all error reporting */
518 msr.lo = msr.hi = ~0;
519 for (i = 0; i < num_banks; i++)
Felix Held1b46e762021-07-13 00:54:32 +0200520 wrmsr(IA32_MC_CTL(i), msr);
Angel Pons1515a482021-06-13 22:33:06 +0200521
Aaron Durbin24614af2013-01-12 01:07:28 -0600522 /* TODO(adurbin): This should only be done on a cold boot. Also, some
523 * of these banks are core vs package scope. For now every CPU clears
524 * every bank. */
Felix Heldacbf1542021-07-13 16:44:18 +0200525 mca_clear_status();
Aaron Durbin76c37002012-10-30 09:03:43 -0500526}
527
Aaron Durbin305b1f02013-01-15 08:27:05 -0600528/* All CPUs including BSP will run the following function. */
Angel Pons4c95f102020-10-28 19:38:12 +0100529static void cpu_core_init(struct device *cpu)
Aaron Durbin7af20692013-01-14 14:54:41 -0600530{
531 /* Clear out pending MCEs */
532 configure_mca();
533
Aaron Durbin76c37002012-10-30 09:03:43 -0500534 enable_lapic_tpr();
Aaron Durbin76c37002012-10-30 09:03:43 -0500535
Matt DeVilliered6fe2f2016-12-14 16:12:43 -0600536 /* Set virtualization based on Kconfig option */
Matt DeVillierf9aed652018-12-15 15:57:33 -0600537 set_vmx_and_lock();
Matt DeVillierb2a14fb2014-07-07 18:48:16 -0500538
Aaron Durbin76c37002012-10-30 09:03:43 -0500539 /* Configure C States */
Aaron Durbin7c351312013-04-10 14:46:25 -0500540 configure_c_states();
Aaron Durbin76c37002012-10-30 09:03:43 -0500541
542 /* Configure Enhanced SpeedStep and Thermal Sensors */
543 configure_misc();
544
545 /* Thermal throttle activation offset */
Arthur Heymansdd96ab62021-11-15 20:11:12 +0100546 configure_thermal_target(cpu);
Aaron Durbin76c37002012-10-30 09:03:43 -0500547
548 /* Enable Direct Cache Access */
549 configure_dca_cap();
550
551 /* Set energy policy */
552 set_energy_perf_bias(ENERGY_POLICY_NORMAL);
553
Aaron Durbin76c37002012-10-30 09:03:43 -0500554 /* Enable Turbo */
555 enable_turbo();
Aaron Durbin7af20692013-01-14 14:54:41 -0600556}
Aaron Durbin76c37002012-10-30 09:03:43 -0500557
Aaron Durbin014baea2014-03-28 22:01:05 -0500558/* MP initialization support. */
559static const void *microcode_patch;
Aaron Durbin014baea2014-03-28 22:01:05 -0500560
Aaron Durbin463af332016-05-03 17:26:35 -0500561static void pre_mp_init(void)
Aaron Durbin014baea2014-03-28 22:01:05 -0500562{
Aaron Durbin463af332016-05-03 17:26:35 -0500563 /* Setup MTRRs based on physical address size. */
564 x86_setup_mtrrs_with_detect();
565 x86_mtrr_check();
566
567 initialize_vr_config();
568
Angel Pons4c95f102020-10-28 19:38:12 +0100569 if (!haswell_is_ult())
570 return;
571
572 calibrate_24mhz_bclk();
573 configure_pch_power_sharing();
Aaron Durbin014baea2014-03-28 22:01:05 -0500574}
575
Aaron Durbin463af332016-05-03 17:26:35 -0500576static int get_cpu_count(void)
Aaron Durbin014baea2014-03-28 22:01:05 -0500577{
Aaron Durbin463af332016-05-03 17:26:35 -0500578 msr_t msr;
Angel Pons04c497a2021-11-03 16:30:10 +0100579 unsigned int num_threads;
580 unsigned int num_cores;
Aaron Durbin014baea2014-03-28 22:01:05 -0500581
Elyes HAOUASa6a396d2019-05-26 13:25:30 +0200582 msr = rdmsr(MSR_CORE_THREAD_COUNT);
Aaron Durbin014baea2014-03-28 22:01:05 -0500583 num_threads = (msr.lo >> 0) & 0xffff;
584 num_cores = (msr.lo >> 16) & 0xffff;
585 printk(BIOS_DEBUG, "CPU has %u cores, %u threads enabled.\n",
586 num_cores, num_threads);
587
Aaron Durbin463af332016-05-03 17:26:35 -0500588 return num_threads;
589}
Aaron Durbin7af20692013-01-14 14:54:41 -0600590
Aaron Durbin463af332016-05-03 17:26:35 -0500591static void get_microcode_info(const void **microcode, int *parallel)
592{
Aaron Durbin305b1f02013-01-15 08:27:05 -0600593 microcode_patch = intel_microcode_find();
Aaron Durbin463af332016-05-03 17:26:35 -0500594 *microcode = microcode_patch;
595 *parallel = 1;
596}
Aaron Durbin7af20692013-01-14 14:54:41 -0600597
Aaron Durbin463af332016-05-03 17:26:35 -0500598static void per_cpu_smm_trigger(void)
599{
600 /* Relocate the SMM handler. */
601 smm_relocate();
Aaron Durbin305b1f02013-01-15 08:27:05 -0600602
Aaron Durbin463af332016-05-03 17:26:35 -0500603 /* After SMM relocation a 2nd microcode load is required. */
604 intel_microcode_load_unlocked(microcode_patch);
605}
606
607static void post_mp_init(void)
608{
Angel Pons053deb82020-10-28 22:40:02 +0100609 /* Set Max Ratio */
610 set_max_ratio();
611
Aaron Durbin463af332016-05-03 17:26:35 -0500612 /* Now that all APs have been relocated as well as the BSP let SMIs
613 * start flowing. */
Kyösti Mälkki0778c862020-06-10 12:44:03 +0300614 global_smi_enable();
Aaron Durbin463af332016-05-03 17:26:35 -0500615
616 /* Lock down the SMRAM space. */
617 smm_lock();
618}
619
620static const struct mp_ops mp_ops = {
621 .pre_mp_init = pre_mp_init,
622 .get_cpu_count = get_cpu_count,
623 .get_smm_info = smm_info,
624 .get_microcode_info = get_microcode_info,
Aaron Durbin463af332016-05-03 17:26:35 -0500625 .pre_mp_smm_init = smm_initialize,
626 .per_cpu_smm_trigger = per_cpu_smm_trigger,
627 .relocation_handler = smm_relocation_handler,
628 .post_mp_init = post_mp_init,
629};
630
Kyösti Mälkkib3267e02019-08-13 16:44:04 +0300631void mp_init_cpus(struct bus *cpu_bus)
Aaron Durbin463af332016-05-03 17:26:35 -0500632{
Arthur Heymansdd96ab62021-11-15 20:11:12 +0100633 cpu_cluster = cpu_bus->dev;
Felix Held4dd7d112021-10-20 23:31:43 +0200634 /* TODO: Handle mp_init_with_smm failure? */
635 mp_init_with_smm(cpu_bus, &mp_ops);
Aaron Durbin76c37002012-10-30 09:03:43 -0500636}
637
638static struct device_operations cpu_dev_ops = {
Angel Pons4c95f102020-10-28 19:38:12 +0100639 .init = cpu_core_init,
Aaron Durbin76c37002012-10-30 09:03:43 -0500640};
641
Jonathan Neuschäfer8f06ce32017-11-20 01:56:44 +0100642static const struct cpu_device_id cpu_table[] = {
Angel Pons8b0636e2020-10-28 21:48:29 +0100643 { X86_VENDOR_INTEL, CPUID_HASWELL_A0 },
644 { X86_VENDOR_INTEL, CPUID_HASWELL_B0 },
645 { X86_VENDOR_INTEL, CPUID_HASWELL_C0 },
646 { X86_VENDOR_INTEL, CPUID_HASWELL_ULT_B0 },
647 { X86_VENDOR_INTEL, CPUID_HASWELL_ULT_C0 },
648 { X86_VENDOR_INTEL, CPUID_CRYSTALWELL_B0 },
649 { X86_VENDOR_INTEL, CPUID_CRYSTALWELL_C0 },
Angel Ponsf542b7b2020-10-29 01:02:03 +0100650 { X86_VENDOR_INTEL, CPUID_BROADWELL_C0 },
651 { X86_VENDOR_INTEL, CPUID_BROADWELL_ULT_C0 },
652 { X86_VENDOR_INTEL, CPUID_BROADWELL_ULT_D0 },
653 { X86_VENDOR_INTEL, CPUID_BROADWELL_ULT_E0 },
Aaron Durbin76c37002012-10-30 09:03:43 -0500654 { 0, 0 },
655};
656
657static const struct cpu_driver driver __cpu_driver = {
658 .ops = &cpu_dev_ops,
659 .id_table = cpu_table,
Aaron Durbin76c37002012-10-30 09:03:43 -0500660};