blob: e621638da0cb105ed42ef2223e45a0b267bcfd46 [file] [log] [blame]
Angel Ponsf23ae0b2020-04-02 23:48:12 +02001/* SPDX-License-Identifier: GPL-2.0-only */
Aaron Durbin76c37002012-10-30 09:03:43 -05002
3#include <console/console.h>
4#include <device/device.h>
Furquan Shaikh76cedd22020-05-02 10:24:23 -07005#include <acpi/acpi.h>
Aaron Durbin76c37002012-10-30 09:03:43 -05006#include <cpu/cpu.h>
7#include <cpu/x86/mtrr.h>
8#include <cpu/x86/msr.h>
Aaron Durbin014baea2014-03-28 22:01:05 -05009#include <cpu/x86/mp.h>
Aaron Durbin76c37002012-10-30 09:03:43 -050010#include <cpu/x86/lapic.h>
11#include <cpu/intel/microcode.h>
Kyösti Mälkkifaf20d32019-08-14 05:41:41 +030012#include <cpu/intel/smm_reloc.h>
Aaron Durbin76c37002012-10-30 09:03:43 -050013#include <cpu/intel/speedstep.h>
14#include <cpu/intel/turbo.h>
Aaron Durbin76c37002012-10-30 09:03:43 -050015#include <cpu/x86/name.h>
Aaron Durbinf24262d2013-04-10 14:59:21 -050016#include <delay.h>
Aaron Durbin7c351312013-04-10 14:46:25 -050017#include <northbridge/intel/haswell/haswell.h>
18#include <southbridge/intel/lynxpoint/pch.h>
Matt DeVilliered6fe2f2016-12-14 16:12:43 -060019#include <cpu/intel/common/common.h>
Aaron Durbin76c37002012-10-30 09:03:43 -050020#include "haswell.h"
21#include "chip.h"
22
Aaron Durbin7c351312013-04-10 14:46:25 -050023/* Intel suggested latency times in units of 1024ns. */
24#define C_STATE_LATENCY_CONTROL_0_LIMIT 0x42
25#define C_STATE_LATENCY_CONTROL_1_LIMIT 0x73
26#define C_STATE_LATENCY_CONTROL_2_LIMIT 0x91
27#define C_STATE_LATENCY_CONTROL_3_LIMIT 0xe4
28#define C_STATE_LATENCY_CONTROL_4_LIMIT 0x145
29#define C_STATE_LATENCY_CONTROL_5_LIMIT 0x1ef
30
31#define C_STATE_LATENCY_MICRO_SECONDS(limit, base) \
32 (((1 << ((base)*5)) * (limit)) / 1000)
33#define C_STATE_LATENCY_FROM_LAT_REG(reg) \
34 C_STATE_LATENCY_MICRO_SECONDS(C_STATE_LATENCY_CONTROL_ ##reg## _LIMIT, \
Lee Leahy7b5f12b92017-03-15 17:16:59 -070035 (IRTL_1024_NS >> 10))
Aaron Durbin7c351312013-04-10 14:46:25 -050036
Aaron Durbin76c37002012-10-30 09:03:43 -050037/*
Aaron Durbin7c351312013-04-10 14:46:25 -050038 * List of supported C-states in this processor. Only the ULT parts support C8,
39 * C9, and C10.
Aaron Durbin76c37002012-10-30 09:03:43 -050040 */
Aaron Durbin7c351312013-04-10 14:46:25 -050041enum {
42 C_STATE_C0, /* 0 */
43 C_STATE_C1, /* 1 */
44 C_STATE_C1E, /* 2 */
45 C_STATE_C3, /* 3 */
46 C_STATE_C6_SHORT_LAT, /* 4 */
47 C_STATE_C6_LONG_LAT, /* 5 */
48 C_STATE_C7_SHORT_LAT, /* 6 */
49 C_STATE_C7_LONG_LAT, /* 7 */
50 C_STATE_C7S_SHORT_LAT, /* 8 */
51 C_STATE_C7S_LONG_LAT, /* 9 */
52 C_STATE_C8, /* 10 */
53 C_STATE_C9, /* 11 */
54 C_STATE_C10, /* 12 */
55 NUM_C_STATES
Aaron Durbin76c37002012-10-30 09:03:43 -050056};
Aaron Durbin7c351312013-04-10 14:46:25 -050057
58#define MWAIT_RES(state, sub_state) \
59 { \
60 .addrl = (((state) << 4) | (sub_state)), \
61 .space_id = ACPI_ADDRESS_SPACE_FIXED, \
62 .bit_width = ACPI_FFIXEDHW_VENDOR_INTEL, \
63 .bit_offset = ACPI_FFIXEDHW_CLASS_MWAIT, \
64 .access_size = ACPI_FFIXEDHW_FLAG_HW_COORD, \
65 }
66
67static acpi_cstate_t cstate_map[NUM_C_STATES] = {
68 [C_STATE_C0] = { },
69 [C_STATE_C1] = {
70 .latency = 0,
71 .power = 1000,
Lee Leahy9d62e7e2017-03-15 17:40:50 -070072 .resource = MWAIT_RES(0, 0),
Aaron Durbin7c351312013-04-10 14:46:25 -050073 },
74 [C_STATE_C1E] = {
75 .latency = 0,
76 .power = 1000,
Lee Leahy9d62e7e2017-03-15 17:40:50 -070077 .resource = MWAIT_RES(0, 1),
Aaron Durbin7c351312013-04-10 14:46:25 -050078 },
79 [C_STATE_C3] = {
80 .latency = C_STATE_LATENCY_FROM_LAT_REG(0),
81 .power = 900,
82 .resource = MWAIT_RES(1, 0),
83 },
84 [C_STATE_C6_SHORT_LAT] = {
85 .latency = C_STATE_LATENCY_FROM_LAT_REG(1),
86 .power = 800,
87 .resource = MWAIT_RES(2, 0),
88 },
89 [C_STATE_C6_LONG_LAT] = {
90 .latency = C_STATE_LATENCY_FROM_LAT_REG(2),
91 .power = 800,
92 .resource = MWAIT_RES(2, 1),
93 },
94 [C_STATE_C7_SHORT_LAT] = {
95 .latency = C_STATE_LATENCY_FROM_LAT_REG(1),
96 .power = 700,
97 .resource = MWAIT_RES(3, 0),
98 },
99 [C_STATE_C7_LONG_LAT] = {
100 .latency = C_STATE_LATENCY_FROM_LAT_REG(2),
101 .power = 700,
102 .resource = MWAIT_RES(3, 1),
103 },
104 [C_STATE_C7S_SHORT_LAT] = {
105 .latency = C_STATE_LATENCY_FROM_LAT_REG(1),
106 .power = 700,
107 .resource = MWAIT_RES(3, 2),
108 },
109 [C_STATE_C7S_LONG_LAT] = {
110 .latency = C_STATE_LATENCY_FROM_LAT_REG(2),
111 .power = 700,
112 .resource = MWAIT_RES(3, 3),
113 },
114 [C_STATE_C8] = {
115 .latency = C_STATE_LATENCY_FROM_LAT_REG(3),
116 .power = 600,
117 .resource = MWAIT_RES(4, 0),
118 },
119 [C_STATE_C9] = {
120 .latency = C_STATE_LATENCY_FROM_LAT_REG(4),
121 .power = 500,
122 .resource = MWAIT_RES(5, 0),
123 },
124 [C_STATE_C10] = {
125 .latency = C_STATE_LATENCY_FROM_LAT_REG(5),
126 .power = 400,
127 .resource = MWAIT_RES(6, 0),
128 },
129};
Aaron Durbin76c37002012-10-30 09:03:43 -0500130
131/* Convert time in seconds to POWER_LIMIT_1_TIME MSR value */
132static const u8 power_limit_time_sec_to_msr[] = {
133 [0] = 0x00,
134 [1] = 0x0a,
135 [2] = 0x0b,
136 [3] = 0x4b,
137 [4] = 0x0c,
138 [5] = 0x2c,
139 [6] = 0x4c,
140 [7] = 0x6c,
141 [8] = 0x0d,
142 [10] = 0x2d,
143 [12] = 0x4d,
144 [14] = 0x6d,
145 [16] = 0x0e,
146 [20] = 0x2e,
147 [24] = 0x4e,
148 [28] = 0x6e,
149 [32] = 0x0f,
150 [40] = 0x2f,
151 [48] = 0x4f,
152 [56] = 0x6f,
153 [64] = 0x10,
154 [80] = 0x30,
155 [96] = 0x50,
156 [112] = 0x70,
157 [128] = 0x11,
158};
159
160/* Convert POWER_LIMIT_1_TIME MSR value to seconds */
161static const u8 power_limit_time_msr_to_sec[] = {
162 [0x00] = 0,
163 [0x0a] = 1,
164 [0x0b] = 2,
165 [0x4b] = 3,
166 [0x0c] = 4,
167 [0x2c] = 5,
168 [0x4c] = 6,
169 [0x6c] = 7,
170 [0x0d] = 8,
171 [0x2d] = 10,
172 [0x4d] = 12,
173 [0x6d] = 14,
174 [0x0e] = 16,
175 [0x2e] = 20,
176 [0x4e] = 24,
177 [0x6e] = 28,
178 [0x0f] = 32,
179 [0x2f] = 40,
180 [0x4f] = 48,
181 [0x6f] = 56,
182 [0x10] = 64,
183 [0x30] = 80,
184 [0x50] = 96,
185 [0x70] = 112,
186 [0x11] = 128,
187};
188
Duncan Laurie118d1052013-07-09 15:34:25 -0700189int haswell_family_model(void)
190{
191 return cpuid_eax(1) & 0x0fff0ff0;
192}
193
194int haswell_stepping(void)
195{
196 return cpuid_eax(1) & 0xf;
197}
198
Aaron Durbin7c351312013-04-10 14:46:25 -0500199/* Dynamically determine if the part is ULT. */
Duncan Laurie118d1052013-07-09 15:34:25 -0700200int haswell_is_ult(void)
Aaron Durbin7c351312013-04-10 14:46:25 -0500201{
202 static int ult = -1;
203
204 if (ult < 0)
Duncan Laurie118d1052013-07-09 15:34:25 -0700205 ult = !!(haswell_family_model() == HASWELL_FAMILY_ULT);
Aaron Durbin7c351312013-04-10 14:46:25 -0500206
207 return ult;
208}
209
Aaron Durbinf24262d2013-04-10 14:59:21 -0500210/* The core 100MHz BLCK is disabled in deeper c-states. One needs to calibrate
211 * the 100MHz BCLCK against the 24MHz BLCK to restore the clocks properly
212 * when a core is woken up. */
213static int pcode_ready(void)
214{
215 int wait_count;
216 const int delay_step = 10;
217
218 wait_count = 0;
219 do {
220 if (!(MCHBAR32(BIOS_MAILBOX_INTERFACE) & MAILBOX_RUN_BUSY))
221 return 0;
222 wait_count += delay_step;
223 udelay(delay_step);
224 } while (wait_count < 1000);
225
226 return -1;
227}
228
229static void calibrate_24mhz_bclk(void)
230{
231 int err_code;
232
233 if (pcode_ready() < 0) {
234 printk(BIOS_ERR, "PCODE: mailbox timeout on wait ready.\n");
235 return;
236 }
237
238 /* A non-zero value initiates the PCODE calibration. */
239 MCHBAR32(BIOS_MAILBOX_DATA) = ~0;
240 MCHBAR32(BIOS_MAILBOX_INTERFACE) =
241 MAILBOX_RUN_BUSY | MAILBOX_BIOS_CMD_FSM_MEASURE_INTVL;
242
243 if (pcode_ready() < 0) {
244 printk(BIOS_ERR, "PCODE: mailbox timeout on completion.\n");
245 return;
246 }
247
248 err_code = MCHBAR32(BIOS_MAILBOX_INTERFACE) & 0xff;
249
250 printk(BIOS_DEBUG, "PCODE: 24MHz BLCK calibration response: %d\n",
251 err_code);
252
253 /* Read the calibrated value. */
254 MCHBAR32(BIOS_MAILBOX_INTERFACE) =
255 MAILBOX_RUN_BUSY | MAILBOX_BIOS_CMD_READ_CALIBRATION;
256
257 if (pcode_ready() < 0) {
258 printk(BIOS_ERR, "PCODE: mailbox timeout on read.\n");
259 return;
260 }
261
262 printk(BIOS_DEBUG, "PCODE: 24MHz BLCK calibration value: 0x%08x\n",
263 MCHBAR32(BIOS_MAILBOX_DATA));
264}
265
Duncan Lauriee1e87e02013-04-26 10:35:19 -0700266static u32 pcode_mailbox_read(u32 command)
267{
268 if (pcode_ready() < 0) {
269 printk(BIOS_ERR, "PCODE: mailbox timeout on wait ready.\n");
270 return 0;
271 }
272
273 /* Send command and start transaction */
274 MCHBAR32(BIOS_MAILBOX_INTERFACE) = command | MAILBOX_RUN_BUSY;
275
276 if (pcode_ready() < 0) {
277 printk(BIOS_ERR, "PCODE: mailbox timeout on completion.\n");
278 return 0;
279 }
280
281 /* Read mailbox */
282 return MCHBAR32(BIOS_MAILBOX_DATA);
283}
284
Aaron Durbin16cbf892013-07-03 16:21:28 -0500285static void initialize_vr_config(void)
286{
287 msr_t msr;
288
289 printk(BIOS_DEBUG, "Initializing VR config.\n");
290
291 /* Configure VR_CURRENT_CONFIG. */
292 msr = rdmsr(MSR_VR_CURRENT_CONFIG);
293 /* Preserve bits 63 and 62. Bit 62 is PSI4 enable, but it is only valid
294 * on ULT systems. */
295 msr.hi &= 0xc0000000;
296 msr.hi |= (0x01 << (52 - 32)); /* PSI3 threshold - 1A. */
297 msr.hi |= (0x05 << (42 - 32)); /* PSI2 threshold - 5A. */
298 msr.hi |= (0x0f << (32 - 32)); /* PSI1 threshold - 15A. */
299
Duncan Laurie118d1052013-07-09 15:34:25 -0700300 if (haswell_is_ult())
Aaron Durbin16cbf892013-07-03 16:21:28 -0500301 msr.hi |= (1 << (62 - 32)); /* Enable PSI4 */
302 /* Leave the max instantaneous current limit (12:0) to default. */
303 wrmsr(MSR_VR_CURRENT_CONFIG, msr);
304
305 /* Configure VR_MISC_CONFIG MSR. */
306 msr = rdmsr(MSR_VR_MISC_CONFIG);
307 /* Set the IOUT_SLOPE scalar applied to dIout in U10.1.9 format. */
308 msr.hi &= ~(0x3ff << (40 - 32));
309 msr.hi |= (0x200 << (40 - 32)); /* 1.0 */
310 /* Set IOUT_OFFSET to 0. */
311 msr.hi &= ~0xff;
312 /* Set exit ramp rate to fast. */
313 msr.hi |= (1 << (50 - 32));
314 /* Set entry ramp rate to slow. */
315 msr.hi &= ~(1 << (51 - 32));
316 /* Enable decay mode on C-state entry. */
317 msr.hi |= (1 << (52 - 32));
Tristan Corrickfdf907e2018-10-31 02:27:12 +1300318 if (haswell_is_ult()) {
319 /* Set the slow ramp rate to be fast ramp rate / 4 */
320 msr.hi &= ~(0x3 << (53 - 32));
321 msr.hi |= (0x01 << (53 - 32));
322 }
Aaron Durbin16cbf892013-07-03 16:21:28 -0500323 /* Set MIN_VID (31:24) to allow CPU to have full control. */
324 msr.lo &= ~0xff000000;
325 wrmsr(MSR_VR_MISC_CONFIG, msr);
326
327 /* Configure VR_MISC_CONFIG2 MSR. */
Duncan Laurie118d1052013-07-09 15:34:25 -0700328 if (haswell_is_ult()) {
Aaron Durbin16cbf892013-07-03 16:21:28 -0500329 msr = rdmsr(MSR_VR_MISC_CONFIG2);
330 msr.lo &= ~0xffff;
331 /* Allow CPU to control minimum voltage completely (15:8) and
332 * set the fast ramp voltage to 1110mV (0x6f in 10mV steps). */
333 msr.lo |= 0x006f;
334 wrmsr(MSR_VR_MISC_CONFIG2, msr);
335 }
336}
337
Duncan Lauriee1e87e02013-04-26 10:35:19 -0700338static void configure_pch_power_sharing(void)
339{
340 u32 pch_power, pch_power_ext, pmsync, pmsync2;
341 int i;
342
343 /* Read PCH Power levels from PCODE */
344 pch_power = pcode_mailbox_read(MAILBOX_BIOS_CMD_READ_PCH_POWER);
345 pch_power_ext = pcode_mailbox_read(MAILBOX_BIOS_CMD_READ_PCH_POWER_EXT);
346
347 printk(BIOS_INFO, "PCH Power: PCODE Levels 0x%08x 0x%08x\n",
Lee Leahy7b5f12b92017-03-15 17:16:59 -0700348 pch_power, pch_power_ext);
Duncan Lauriee1e87e02013-04-26 10:35:19 -0700349
350 pmsync = RCBA32(PMSYNC_CONFIG);
351 pmsync2 = RCBA32(PMSYNC_CONFIG2);
352
353 /* Program PMSYNC_TPR_CONFIG PCH power limit values
354 * pmsync[0:4] = mailbox[0:5]
355 * pmsync[8:12] = mailbox[6:11]
356 * pmsync[16:20] = mailbox[12:17]
357 */
358 for (i = 0; i < 3; i++) {
359 u32 level = pch_power & 0x3f;
360 pch_power >>= 6;
361 pmsync &= ~(0x1f << (i * 8));
362 pmsync |= (level & 0x1f) << (i * 8);
363 }
364 RCBA32(PMSYNC_CONFIG) = pmsync;
365
366 /* Program PMSYNC_TPR_CONFIG2 Extended PCH power limit values
367 * pmsync2[0:4] = mailbox[23:18]
368 * pmsync2[8:12] = mailbox_ext[6:11]
369 * pmsync2[16:20] = mailbox_ext[12:17]
370 * pmsync2[24:28] = mailbox_ext[18:22]
371 */
372 pmsync2 &= ~0x1f;
373 pmsync2 |= pch_power & 0x1f;
374
375 for (i = 1; i < 4; i++) {
376 u32 level = pch_power_ext & 0x3f;
377 pch_power_ext >>= 6;
378 pmsync2 &= ~(0x1f << (i * 8));
379 pmsync2 |= (level & 0x1f) << (i * 8);
380 }
381 RCBA32(PMSYNC_CONFIG2) = pmsync2;
382}
383
Aaron Durbin76c37002012-10-30 09:03:43 -0500384int cpu_config_tdp_levels(void)
385{
386 msr_t platform_info;
387
388 /* Bits 34:33 indicate how many levels supported */
389 platform_info = rdmsr(MSR_PLATFORM_INFO);
390 return (platform_info.hi >> 1) & 3;
391}
392
393/*
394 * Configure processor power limits if possible
395 * This must be done AFTER set of BIOS_RESET_CPL
396 */
397void set_power_limits(u8 power_limit_1_time)
398{
399 msr_t msr = rdmsr(MSR_PLATFORM_INFO);
400 msr_t limit;
Lee Leahy73a28942017-03-15 17:52:06 -0700401 unsigned int power_unit;
402 unsigned int tdp, min_power, max_power, max_time;
Aaron Durbin76c37002012-10-30 09:03:43 -0500403 u8 power_limit_1_val;
404
Edward O'Callaghan5cfef132014-08-03 20:00:47 +1000405 if (power_limit_1_time >= ARRAY_SIZE(power_limit_time_sec_to_msr))
Lee Leahycdc50482017-03-15 18:26:18 -0700406 power_limit_1_time = ARRAY_SIZE(power_limit_time_sec_to_msr)
407 - 1;
Aaron Durbin76c37002012-10-30 09:03:43 -0500408
409 if (!(msr.lo & PLATFORM_INFO_SET_TDP))
410 return;
411
412 /* Get units */
413 msr = rdmsr(MSR_PKG_POWER_SKU_UNIT);
414 power_unit = 2 << ((msr.lo & 0xf) - 1);
415
416 /* Get power defaults for this SKU */
417 msr = rdmsr(MSR_PKG_POWER_SKU);
418 tdp = msr.lo & 0x7fff;
419 min_power = (msr.lo >> 16) & 0x7fff;
420 max_power = msr.hi & 0x7fff;
421 max_time = (msr.hi >> 16) & 0x7f;
422
423 printk(BIOS_DEBUG, "CPU TDP: %u Watts\n", tdp / power_unit);
424
425 if (power_limit_time_msr_to_sec[max_time] > power_limit_1_time)
426 power_limit_1_time = power_limit_time_msr_to_sec[max_time];
427
428 if (min_power > 0 && tdp < min_power)
429 tdp = min_power;
430
431 if (max_power > 0 && tdp > max_power)
432 tdp = max_power;
433
434 power_limit_1_val = power_limit_time_sec_to_msr[power_limit_1_time];
435
436 /* Set long term power limit to TDP */
437 limit.lo = 0;
438 limit.lo |= tdp & PKG_POWER_LIMIT_MASK;
439 limit.lo |= PKG_POWER_LIMIT_EN;
440 limit.lo |= (power_limit_1_val & PKG_POWER_LIMIT_TIME_MASK) <<
441 PKG_POWER_LIMIT_TIME_SHIFT;
442
443 /* Set short term power limit to 1.25 * TDP */
444 limit.hi = 0;
445 limit.hi |= ((tdp * 125) / 100) & PKG_POWER_LIMIT_MASK;
446 limit.hi |= PKG_POWER_LIMIT_EN;
Duncan Lauriec70353f2013-06-28 14:40:38 -0700447 /* Power limit 2 time is only programmable on server SKU */
Aaron Durbin76c37002012-10-30 09:03:43 -0500448
449 wrmsr(MSR_PKG_POWER_LIMIT, limit);
450
Duncan Lauriec70353f2013-06-28 14:40:38 -0700451 /* Set power limit values in MCHBAR as well */
452 MCHBAR32(MCH_PKG_POWER_LIMIT_LO) = limit.lo;
453 MCHBAR32(MCH_PKG_POWER_LIMIT_HI) = limit.hi;
454
455 /* Set DDR RAPL power limit by copying from MMIO to MSR */
456 msr.lo = MCHBAR32(MCH_DDR_POWER_LIMIT_LO);
457 msr.hi = MCHBAR32(MCH_DDR_POWER_LIMIT_HI);
458 wrmsr(MSR_DDR_RAPL_LIMIT, msr);
459
Aaron Durbin76c37002012-10-30 09:03:43 -0500460 /* Use nominal TDP values for CPUs with configurable TDP */
461 if (cpu_config_tdp_levels()) {
462 msr = rdmsr(MSR_CONFIG_TDP_NOMINAL);
463 limit.hi = 0;
464 limit.lo = msr.lo & 0xff;
465 wrmsr(MSR_TURBO_ACTIVATION_RATIO, limit);
466 }
467}
468
Aaron Durbin76c37002012-10-30 09:03:43 -0500469static void configure_c_states(void)
470{
471 msr_t msr;
472
Elyes HAOUAS4e6b7902018-10-02 08:44:47 +0200473 msr = rdmsr(MSR_PKG_CST_CONFIG_CONTROL);
Aaron Durbin7c351312013-04-10 14:46:25 -0500474 msr.lo |= (1 << 30); // Package c-state Undemotion Enable
475 msr.lo |= (1 << 29); // Package c-state Demotion Enable
Aaron Durbin76c37002012-10-30 09:03:43 -0500476 msr.lo |= (1 << 28); // C1 Auto Undemotion Enable
477 msr.lo |= (1 << 27); // C3 Auto Undemotion Enable
478 msr.lo |= (1 << 26); // C1 Auto Demotion Enable
479 msr.lo |= (1 << 25); // C3 Auto Demotion Enable
480 msr.lo &= ~(1 << 10); // Disable IO MWAIT redirection
Duncan Laurie1c097102013-05-07 13:19:56 -0700481 /* The deepest package c-state defaults to factory-configured value. */
Elyes HAOUAS4e6b7902018-10-02 08:44:47 +0200482 wrmsr(MSR_PKG_CST_CONFIG_CONTROL, msr);
Aaron Durbin76c37002012-10-30 09:03:43 -0500483
484 msr = rdmsr(MSR_PMG_IO_CAPTURE_BASE);
Aaron Durbin7c351312013-04-10 14:46:25 -0500485 msr.lo &= ~0xffff;
486 msr.lo |= (get_pmbase() + 0x14); // LVL_2 base address
487 /* The deepest package c-state defaults to factory-configured value. */
Aaron Durbin76c37002012-10-30 09:03:43 -0500488 wrmsr(MSR_PMG_IO_CAPTURE_BASE, msr);
489
490 msr = rdmsr(MSR_MISC_PWR_MGMT);
491 msr.lo &= ~(1 << 0); // Enable P-state HW_ALL coordination
492 wrmsr(MSR_MISC_PWR_MGMT, msr);
493
494 msr = rdmsr(MSR_POWER_CTL);
495 msr.lo |= (1 << 18); // Enable Energy Perf Bias MSR 0x1b0
496 msr.lo |= (1 << 1); // C1E Enable
497 msr.lo |= (1 << 0); // Bi-directional PROCHOT#
498 wrmsr(MSR_POWER_CTL, msr);
499
Aaron Durbin7c351312013-04-10 14:46:25 -0500500 /* C-state Interrupt Response Latency Control 0 - package C3 latency */
Aaron Durbin76c37002012-10-30 09:03:43 -0500501 msr.hi = 0;
Aaron Durbin7c351312013-04-10 14:46:25 -0500502 msr.lo = IRTL_VALID | IRTL_1024_NS | C_STATE_LATENCY_CONTROL_0_LIMIT;
503 wrmsr(MSR_C_STATE_LATENCY_CONTROL_0, msr);
Aaron Durbin76c37002012-10-30 09:03:43 -0500504
Aaron Durbin7c351312013-04-10 14:46:25 -0500505 /* C-state Interrupt Response Latency Control 1 */
Aaron Durbin76c37002012-10-30 09:03:43 -0500506 msr.hi = 0;
Aaron Durbin7c351312013-04-10 14:46:25 -0500507 msr.lo = IRTL_VALID | IRTL_1024_NS | C_STATE_LATENCY_CONTROL_1_LIMIT;
508 wrmsr(MSR_C_STATE_LATENCY_CONTROL_1, msr);
Aaron Durbin76c37002012-10-30 09:03:43 -0500509
Aaron Durbin7c351312013-04-10 14:46:25 -0500510 /* C-state Interrupt Response Latency Control 2 - package C6/C7 short */
Aaron Durbin76c37002012-10-30 09:03:43 -0500511 msr.hi = 0;
Aaron Durbin7c351312013-04-10 14:46:25 -0500512 msr.lo = IRTL_VALID | IRTL_1024_NS | C_STATE_LATENCY_CONTROL_2_LIMIT;
513 wrmsr(MSR_C_STATE_LATENCY_CONTROL_2, msr);
Aaron Durbin76c37002012-10-30 09:03:43 -0500514
Aaron Durbin7c351312013-04-10 14:46:25 -0500515 /* Haswell ULT only supoprts the 3-5 latency response registers.*/
Duncan Laurie118d1052013-07-09 15:34:25 -0700516 if (haswell_is_ult()) {
Aaron Durbin7c351312013-04-10 14:46:25 -0500517 /* C-state Interrupt Response Latency Control 3 - package C8 */
518 msr.hi = 0;
519 msr.lo = IRTL_VALID | IRTL_1024_NS |
Lee Leahy7b5f12b92017-03-15 17:16:59 -0700520 C_STATE_LATENCY_CONTROL_3_LIMIT;
Aaron Durbin7c351312013-04-10 14:46:25 -0500521 wrmsr(MSR_C_STATE_LATENCY_CONTROL_3, msr);
Aaron Durbin76c37002012-10-30 09:03:43 -0500522
Aaron Durbin7c351312013-04-10 14:46:25 -0500523 /* C-state Interrupt Response Latency Control 4 - package C9 */
524 msr.hi = 0;
525 msr.lo = IRTL_VALID | IRTL_1024_NS |
Lee Leahy7b5f12b92017-03-15 17:16:59 -0700526 C_STATE_LATENCY_CONTROL_4_LIMIT;
Aaron Durbin7c351312013-04-10 14:46:25 -0500527 wrmsr(MSR_C_STATE_LATENCY_CONTROL_4, msr);
528
529 /* C-state Interrupt Response Latency Control 5 - package C10 */
530 msr.hi = 0;
531 msr.lo = IRTL_VALID | IRTL_1024_NS |
Lee Leahy7b5f12b92017-03-15 17:16:59 -0700532 C_STATE_LATENCY_CONTROL_5_LIMIT;
Aaron Durbin7c351312013-04-10 14:46:25 -0500533 wrmsr(MSR_C_STATE_LATENCY_CONTROL_5, msr);
534 }
Aaron Durbin76c37002012-10-30 09:03:43 -0500535}
Aaron Durbin76c37002012-10-30 09:03:43 -0500536
537static void configure_thermal_target(void)
538{
539 struct cpu_intel_haswell_config *conf;
Edward O'Callaghan2c9d2cf2014-10-27 23:29:29 +1100540 struct device *lapic;
Aaron Durbin76c37002012-10-30 09:03:43 -0500541 msr_t msr;
542
543 /* Find pointer to CPU configuration */
544 lapic = dev_find_lapic(SPEEDSTEP_APIC_MAGIC);
545 if (!lapic || !lapic->chip_info)
546 return;
547 conf = lapic->chip_info;
548
Martin Roth4c3ab732013-07-08 16:23:54 -0600549 /* Set TCC activation offset if supported */
Aaron Durbin76c37002012-10-30 09:03:43 -0500550 msr = rdmsr(MSR_PLATFORM_INFO);
551 if ((msr.lo & (1 << 30)) && conf->tcc_offset) {
552 msr = rdmsr(MSR_TEMPERATURE_TARGET);
553 msr.lo &= ~(0xf << 24); /* Bits 27:24 */
554 msr.lo |= (conf->tcc_offset & 0xf) << 24;
555 wrmsr(MSR_TEMPERATURE_TARGET, msr);
556 }
557}
558
559static void configure_misc(void)
560{
561 msr_t msr;
562
563 msr = rdmsr(IA32_MISC_ENABLE);
564 msr.lo |= (1 << 0); /* Fast String enable */
Lee Leahy7b5f12b92017-03-15 17:16:59 -0700565 msr.lo |= (1 << 3); /* TM1/TM2/EMTTM enable */
Aaron Durbin76c37002012-10-30 09:03:43 -0500566 msr.lo |= (1 << 16); /* Enhanced SpeedStep Enable */
567 wrmsr(IA32_MISC_ENABLE, msr);
568
569 /* Disable Thermal interrupts */
570 msr.lo = 0;
571 msr.hi = 0;
572 wrmsr(IA32_THERM_INTERRUPT, msr);
573
574 /* Enable package critical interrupt only */
575 msr.lo = 1 << 4;
576 msr.hi = 0;
577 wrmsr(IA32_PACKAGE_THERM_INTERRUPT, msr);
578}
579
580static void enable_lapic_tpr(void)
581{
582 msr_t msr;
583
584 msr = rdmsr(MSR_PIC_MSG_CONTROL);
585 msr.lo &= ~(1 << 10); /* Enable APIC TPR updates */
586 wrmsr(MSR_PIC_MSG_CONTROL, msr);
587}
588
589static void configure_dca_cap(void)
590{
Subrata Banik53b08c32018-12-10 14:11:35 +0530591 uint32_t feature_flag;
Aaron Durbin76c37002012-10-30 09:03:43 -0500592 msr_t msr;
593
594 /* Check feature flag in CPUID.(EAX=1):ECX[18]==1 */
Subrata Banik53b08c32018-12-10 14:11:35 +0530595 feature_flag = cpu_get_feature_flags_ecx();
596 if (feature_flag & CPUID_DCA) {
Aaron Durbin76c37002012-10-30 09:03:43 -0500597 msr = rdmsr(IA32_PLATFORM_DCA_CAP);
598 msr.lo |= 1;
599 wrmsr(IA32_PLATFORM_DCA_CAP, msr);
600 }
601}
602
603static void set_max_ratio(void)
604{
605 msr_t msr, perf_ctl;
606
607 perf_ctl.hi = 0;
608
609 /* Check for configurable TDP option */
610 if (cpu_config_tdp_levels()) {
611 /* Set to nominal TDP ratio */
612 msr = rdmsr(MSR_CONFIG_TDP_NOMINAL);
613 perf_ctl.lo = (msr.lo & 0xff) << 8;
614 } else {
615 /* Platform Info bits 15:8 give max ratio */
616 msr = rdmsr(MSR_PLATFORM_INFO);
617 perf_ctl.lo = msr.lo & 0xff00;
618 }
619 wrmsr(IA32_PERF_CTL, perf_ctl);
620
621 printk(BIOS_DEBUG, "haswell: frequency set to %d\n",
622 ((perf_ctl.lo >> 8) & 0xff) * HASWELL_BCLK);
623}
624
625static void set_energy_perf_bias(u8 policy)
626{
627 msr_t msr;
Aaron Durbindc278f82012-12-11 17:15:13 -0600628 int ecx;
629
630 /* Determine if energy efficient policy is supported. */
631 ecx = cpuid_ecx(0x6);
632 if (!(ecx & (1 << 3)))
633 return;
Aaron Durbin76c37002012-10-30 09:03:43 -0500634
635 /* Energy Policy is bits 3:0 */
Elyes HAOUAS419bfbc2018-10-01 08:47:51 +0200636 msr = rdmsr(IA32_ENERGY_PERF_BIAS);
Aaron Durbin76c37002012-10-30 09:03:43 -0500637 msr.lo &= ~0xf;
638 msr.lo |= policy & 0xf;
Elyes HAOUAS419bfbc2018-10-01 08:47:51 +0200639 wrmsr(IA32_ENERGY_PERF_BIAS, msr);
Aaron Durbin76c37002012-10-30 09:03:43 -0500640
641 printk(BIOS_DEBUG, "haswell: energy policy set to %u\n",
642 policy);
643}
644
645static void configure_mca(void)
646{
647 msr_t msr;
648 int i;
Aaron Durbin24614af2013-01-12 01:07:28 -0600649 int num_banks;
Aaron Durbin76c37002012-10-30 09:03:43 -0500650
Elyes HAOUAS419bfbc2018-10-01 08:47:51 +0200651 msr = rdmsr(IA32_MCG_CAP);
Aaron Durbin24614af2013-01-12 01:07:28 -0600652 num_banks = msr.lo & 0xff;
Aaron Durbin76c37002012-10-30 09:03:43 -0500653 msr.lo = msr.hi = 0;
Aaron Durbin24614af2013-01-12 01:07:28 -0600654 /* TODO(adurbin): This should only be done on a cold boot. Also, some
655 * of these banks are core vs package scope. For now every CPU clears
656 * every bank. */
657 for (i = 0; i < num_banks; i++)
Aaron Durbin76c37002012-10-30 09:03:43 -0500658 wrmsr(IA32_MC0_STATUS + (i * 4), msr);
659}
660
Aaron Durbin305b1f02013-01-15 08:27:05 -0600661/* All CPUs including BSP will run the following function. */
Edward O'Callaghan2c9d2cf2014-10-27 23:29:29 +1100662static void haswell_init(struct device *cpu)
Aaron Durbin7af20692013-01-14 14:54:41 -0600663{
664 /* Clear out pending MCEs */
665 configure_mca();
666
Elyes HAOUASd6e96862016-08-21 10:12:15 +0200667 /* Enable the local CPU APICs */
Aaron Durbin76c37002012-10-30 09:03:43 -0500668 enable_lapic_tpr();
669 setup_lapic();
670
Matt DeVilliered6fe2f2016-12-14 16:12:43 -0600671 /* Set virtualization based on Kconfig option */
Matt DeVillierf9aed652018-12-15 15:57:33 -0600672 set_vmx_and_lock();
Matt DeVillierb2a14fb2014-07-07 18:48:16 -0500673
Aaron Durbin76c37002012-10-30 09:03:43 -0500674 /* Configure C States */
Aaron Durbin7c351312013-04-10 14:46:25 -0500675 configure_c_states();
Aaron Durbin76c37002012-10-30 09:03:43 -0500676
677 /* Configure Enhanced SpeedStep and Thermal Sensors */
678 configure_misc();
679
680 /* Thermal throttle activation offset */
681 configure_thermal_target();
682
683 /* Enable Direct Cache Access */
684 configure_dca_cap();
685
686 /* Set energy policy */
687 set_energy_perf_bias(ENERGY_POLICY_NORMAL);
688
689 /* Set Max Ratio */
690 set_max_ratio();
691
692 /* Enable Turbo */
693 enable_turbo();
Aaron Durbin7af20692013-01-14 14:54:41 -0600694}
Aaron Durbin76c37002012-10-30 09:03:43 -0500695
Aaron Durbin014baea2014-03-28 22:01:05 -0500696/* MP initialization support. */
697static const void *microcode_patch;
Aaron Durbin014baea2014-03-28 22:01:05 -0500698
Aaron Durbin463af332016-05-03 17:26:35 -0500699static void pre_mp_init(void)
Aaron Durbin014baea2014-03-28 22:01:05 -0500700{
Aaron Durbin463af332016-05-03 17:26:35 -0500701 /* Setup MTRRs based on physical address size. */
702 x86_setup_mtrrs_with_detect();
703 x86_mtrr_check();
704
705 initialize_vr_config();
706
707 if (haswell_is_ult()) {
708 calibrate_24mhz_bclk();
709 configure_pch_power_sharing();
710 }
Aaron Durbin014baea2014-03-28 22:01:05 -0500711}
712
Aaron Durbin463af332016-05-03 17:26:35 -0500713static int get_cpu_count(void)
Aaron Durbin014baea2014-03-28 22:01:05 -0500714{
Aaron Durbin463af332016-05-03 17:26:35 -0500715 msr_t msr;
Aaron Durbin014baea2014-03-28 22:01:05 -0500716 int num_threads;
717 int num_cores;
Aaron Durbin014baea2014-03-28 22:01:05 -0500718
Elyes HAOUASa6a396d2019-05-26 13:25:30 +0200719 msr = rdmsr(MSR_CORE_THREAD_COUNT);
Aaron Durbin014baea2014-03-28 22:01:05 -0500720 num_threads = (msr.lo >> 0) & 0xffff;
721 num_cores = (msr.lo >> 16) & 0xffff;
722 printk(BIOS_DEBUG, "CPU has %u cores, %u threads enabled.\n",
723 num_cores, num_threads);
724
Aaron Durbin463af332016-05-03 17:26:35 -0500725 return num_threads;
726}
Aaron Durbin7af20692013-01-14 14:54:41 -0600727
Aaron Durbin463af332016-05-03 17:26:35 -0500728static void get_microcode_info(const void **microcode, int *parallel)
729{
Aaron Durbin305b1f02013-01-15 08:27:05 -0600730 microcode_patch = intel_microcode_find();
Aaron Durbin463af332016-05-03 17:26:35 -0500731 *microcode = microcode_patch;
732 *parallel = 1;
733}
Aaron Durbin7af20692013-01-14 14:54:41 -0600734
Aaron Durbin463af332016-05-03 17:26:35 -0500735static void per_cpu_smm_trigger(void)
736{
737 /* Relocate the SMM handler. */
738 smm_relocate();
Aaron Durbin305b1f02013-01-15 08:27:05 -0600739
Aaron Durbin463af332016-05-03 17:26:35 -0500740 /* After SMM relocation a 2nd microcode load is required. */
741 intel_microcode_load_unlocked(microcode_patch);
742}
743
744static void post_mp_init(void)
745{
746 /* Now that all APs have been relocated as well as the BSP let SMIs
747 * start flowing. */
Kyösti Mälkki0778c862020-06-10 12:44:03 +0300748 global_smi_enable();
Aaron Durbin463af332016-05-03 17:26:35 -0500749
750 /* Lock down the SMRAM space. */
751 smm_lock();
752}
753
754static const struct mp_ops mp_ops = {
755 .pre_mp_init = pre_mp_init,
756 .get_cpu_count = get_cpu_count,
757 .get_smm_info = smm_info,
758 .get_microcode_info = get_microcode_info,
Aaron Durbin463af332016-05-03 17:26:35 -0500759 .pre_mp_smm_init = smm_initialize,
760 .per_cpu_smm_trigger = per_cpu_smm_trigger,
761 .relocation_handler = smm_relocation_handler,
762 .post_mp_init = post_mp_init,
763};
764
Kyösti Mälkkib3267e02019-08-13 16:44:04 +0300765void mp_init_cpus(struct bus *cpu_bus)
Aaron Durbin463af332016-05-03 17:26:35 -0500766{
Lee Leahy26eeb0f2017-03-15 18:08:50 -0700767 if (mp_init_with_smm(cpu_bus, &mp_ops))
Aaron Durbin014baea2014-03-28 22:01:05 -0500768 printk(BIOS_ERR, "MP initialization failure.\n");
Aaron Durbin76c37002012-10-30 09:03:43 -0500769}
770
771static struct device_operations cpu_dev_ops = {
772 .init = haswell_init,
773};
774
Jonathan Neuschäfer8f06ce32017-11-20 01:56:44 +0100775static const struct cpu_device_id cpu_table[] = {
Aaron Durbin76c37002012-10-30 09:03:43 -0500776 { X86_VENDOR_INTEL, 0x306c1 }, /* Intel Haswell 4+2 A0 */
777 { X86_VENDOR_INTEL, 0x306c2 }, /* Intel Haswell 4+2 B0 */
Tristan Corrick22f97002018-10-31 02:22:39 +1300778 { X86_VENDOR_INTEL, 0x306c3 }, /* Intel Haswell C0 */
Duncan Laurie512540492012-12-17 11:24:45 -0800779 { X86_VENDOR_INTEL, 0x40650 }, /* Intel Haswell ULT B0 */
780 { X86_VENDOR_INTEL, 0x40651 }, /* Intel Haswell ULT B1 */
Aaron Durbin76c37002012-10-30 09:03:43 -0500781 { 0, 0 },
782};
783
784static const struct cpu_driver driver __cpu_driver = {
785 .ops = &cpu_dev_ops,
786 .id_table = cpu_table,
Aaron Durbin7c351312013-04-10 14:46:25 -0500787 .cstates = cstate_map,
Aaron Durbin76c37002012-10-30 09:03:43 -0500788};