blob: fb78df419ad4f875e482ab1f757ecf905b788234 [file] [log] [blame]
Aaron Durbin76c37002012-10-30 09:03:43 -05001/*
2 * This file is part of the coreboot project.
3 *
4 * Copyright (C) 2007-2009 coresystems GmbH
5 * Copyright (C) 2011 The ChromiumOS Authors. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License as
9 * published by the Free Software Foundation; version 2 of
10 * the License.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston,
20 * MA 02110-1301 USA
21 */
22
23#include <console/console.h>
24#include <device/device.h>
25#include <device/pci.h>
26#include <string.h>
27#include <arch/acpi.h>
28#include <cpu/cpu.h>
29#include <cpu/x86/mtrr.h>
30#include <cpu/x86/msr.h>
31#include <cpu/x86/lapic.h>
32#include <cpu/intel/microcode.h>
33#include <cpu/intel/speedstep.h>
34#include <cpu/intel/turbo.h>
35#include <cpu/x86/cache.h>
36#include <cpu/x86/name.h>
Aaron Durbinf24262d2013-04-10 14:59:21 -050037#include <delay.h>
Aaron Durbin76c37002012-10-30 09:03:43 -050038#include <pc80/mc146818rtc.h>
Aaron Durbin7c351312013-04-10 14:46:25 -050039#include <northbridge/intel/haswell/haswell.h>
40#include <southbridge/intel/lynxpoint/pch.h>
Aaron Durbin76c37002012-10-30 09:03:43 -050041#include "haswell.h"
42#include "chip.h"
43
Aaron Durbin7c351312013-04-10 14:46:25 -050044/* Intel suggested latency times in units of 1024ns. */
45#define C_STATE_LATENCY_CONTROL_0_LIMIT 0x42
46#define C_STATE_LATENCY_CONTROL_1_LIMIT 0x73
47#define C_STATE_LATENCY_CONTROL_2_LIMIT 0x91
48#define C_STATE_LATENCY_CONTROL_3_LIMIT 0xe4
49#define C_STATE_LATENCY_CONTROL_4_LIMIT 0x145
50#define C_STATE_LATENCY_CONTROL_5_LIMIT 0x1ef
51
52#define C_STATE_LATENCY_MICRO_SECONDS(limit, base) \
53 (((1 << ((base)*5)) * (limit)) / 1000)
54#define C_STATE_LATENCY_FROM_LAT_REG(reg) \
55 C_STATE_LATENCY_MICRO_SECONDS(C_STATE_LATENCY_CONTROL_ ##reg## _LIMIT, \
56 (IRTL_1024_NS >> 10))
57
Aaron Durbin76c37002012-10-30 09:03:43 -050058/*
Aaron Durbin7c351312013-04-10 14:46:25 -050059 * List of supported C-states in this processor. Only the ULT parts support C8,
60 * C9, and C10.
Aaron Durbin76c37002012-10-30 09:03:43 -050061 */
Aaron Durbin7c351312013-04-10 14:46:25 -050062enum {
63 C_STATE_C0, /* 0 */
64 C_STATE_C1, /* 1 */
65 C_STATE_C1E, /* 2 */
66 C_STATE_C3, /* 3 */
67 C_STATE_C6_SHORT_LAT, /* 4 */
68 C_STATE_C6_LONG_LAT, /* 5 */
69 C_STATE_C7_SHORT_LAT, /* 6 */
70 C_STATE_C7_LONG_LAT, /* 7 */
71 C_STATE_C7S_SHORT_LAT, /* 8 */
72 C_STATE_C7S_LONG_LAT, /* 9 */
73 C_STATE_C8, /* 10 */
74 C_STATE_C9, /* 11 */
75 C_STATE_C10, /* 12 */
76 NUM_C_STATES
Aaron Durbin76c37002012-10-30 09:03:43 -050077};
Aaron Durbin7c351312013-04-10 14:46:25 -050078
79#define MWAIT_RES(state, sub_state) \
80 { \
81 .addrl = (((state) << 4) | (sub_state)), \
82 .space_id = ACPI_ADDRESS_SPACE_FIXED, \
83 .bit_width = ACPI_FFIXEDHW_VENDOR_INTEL, \
84 .bit_offset = ACPI_FFIXEDHW_CLASS_MWAIT, \
85 .access_size = ACPI_FFIXEDHW_FLAG_HW_COORD, \
86 }
87
88static acpi_cstate_t cstate_map[NUM_C_STATES] = {
89 [C_STATE_C0] = { },
90 [C_STATE_C1] = {
91 .latency = 0,
92 .power = 1000,
93 .resource = MWAIT_RES(0,0),
94 },
95 [C_STATE_C1E] = {
96 .latency = 0,
97 .power = 1000,
98 .resource = MWAIT_RES(0,1),
99 },
100 [C_STATE_C3] = {
101 .latency = C_STATE_LATENCY_FROM_LAT_REG(0),
102 .power = 900,
103 .resource = MWAIT_RES(1, 0),
104 },
105 [C_STATE_C6_SHORT_LAT] = {
106 .latency = C_STATE_LATENCY_FROM_LAT_REG(1),
107 .power = 800,
108 .resource = MWAIT_RES(2, 0),
109 },
110 [C_STATE_C6_LONG_LAT] = {
111 .latency = C_STATE_LATENCY_FROM_LAT_REG(2),
112 .power = 800,
113 .resource = MWAIT_RES(2, 1),
114 },
115 [C_STATE_C7_SHORT_LAT] = {
116 .latency = C_STATE_LATENCY_FROM_LAT_REG(1),
117 .power = 700,
118 .resource = MWAIT_RES(3, 0),
119 },
120 [C_STATE_C7_LONG_LAT] = {
121 .latency = C_STATE_LATENCY_FROM_LAT_REG(2),
122 .power = 700,
123 .resource = MWAIT_RES(3, 1),
124 },
125 [C_STATE_C7S_SHORT_LAT] = {
126 .latency = C_STATE_LATENCY_FROM_LAT_REG(1),
127 .power = 700,
128 .resource = MWAIT_RES(3, 2),
129 },
130 [C_STATE_C7S_LONG_LAT] = {
131 .latency = C_STATE_LATENCY_FROM_LAT_REG(2),
132 .power = 700,
133 .resource = MWAIT_RES(3, 3),
134 },
135 [C_STATE_C8] = {
136 .latency = C_STATE_LATENCY_FROM_LAT_REG(3),
137 .power = 600,
138 .resource = MWAIT_RES(4, 0),
139 },
140 [C_STATE_C9] = {
141 .latency = C_STATE_LATENCY_FROM_LAT_REG(4),
142 .power = 500,
143 .resource = MWAIT_RES(5, 0),
144 },
145 [C_STATE_C10] = {
146 .latency = C_STATE_LATENCY_FROM_LAT_REG(5),
147 .power = 400,
148 .resource = MWAIT_RES(6, 0),
149 },
150};
Aaron Durbin76c37002012-10-30 09:03:43 -0500151
152/* Convert time in seconds to POWER_LIMIT_1_TIME MSR value */
153static const u8 power_limit_time_sec_to_msr[] = {
154 [0] = 0x00,
155 [1] = 0x0a,
156 [2] = 0x0b,
157 [3] = 0x4b,
158 [4] = 0x0c,
159 [5] = 0x2c,
160 [6] = 0x4c,
161 [7] = 0x6c,
162 [8] = 0x0d,
163 [10] = 0x2d,
164 [12] = 0x4d,
165 [14] = 0x6d,
166 [16] = 0x0e,
167 [20] = 0x2e,
168 [24] = 0x4e,
169 [28] = 0x6e,
170 [32] = 0x0f,
171 [40] = 0x2f,
172 [48] = 0x4f,
173 [56] = 0x6f,
174 [64] = 0x10,
175 [80] = 0x30,
176 [96] = 0x50,
177 [112] = 0x70,
178 [128] = 0x11,
179};
180
181/* Convert POWER_LIMIT_1_TIME MSR value to seconds */
182static const u8 power_limit_time_msr_to_sec[] = {
183 [0x00] = 0,
184 [0x0a] = 1,
185 [0x0b] = 2,
186 [0x4b] = 3,
187 [0x0c] = 4,
188 [0x2c] = 5,
189 [0x4c] = 6,
190 [0x6c] = 7,
191 [0x0d] = 8,
192 [0x2d] = 10,
193 [0x4d] = 12,
194 [0x6d] = 14,
195 [0x0e] = 16,
196 [0x2e] = 20,
197 [0x4e] = 24,
198 [0x6e] = 28,
199 [0x0f] = 32,
200 [0x2f] = 40,
201 [0x4f] = 48,
202 [0x6f] = 56,
203 [0x10] = 64,
204 [0x30] = 80,
205 [0x50] = 96,
206 [0x70] = 112,
207 [0x11] = 128,
208};
209
Aaron Durbin7c351312013-04-10 14:46:25 -0500210/* Dynamically determine if the part is ULT. */
211static int is_ult(void)
212{
213 static int ult = -1;
214
215 if (ult < 0)
216 ult = (cpuid_eax(1) > 0x40650);
217
218 return ult;
219}
220
Aaron Durbinf24262d2013-04-10 14:59:21 -0500221/* The core 100MHz BLCK is disabled in deeper c-states. One needs to calibrate
222 * the 100MHz BCLCK against the 24MHz BLCK to restore the clocks properly
223 * when a core is woken up. */
224static int pcode_ready(void)
225{
226 int wait_count;
227 const int delay_step = 10;
228
229 wait_count = 0;
230 do {
231 if (!(MCHBAR32(BIOS_MAILBOX_INTERFACE) & MAILBOX_RUN_BUSY))
232 return 0;
233 wait_count += delay_step;
234 udelay(delay_step);
235 } while (wait_count < 1000);
236
237 return -1;
238}
239
240static void calibrate_24mhz_bclk(void)
241{
242 int err_code;
243
244 if (pcode_ready() < 0) {
245 printk(BIOS_ERR, "PCODE: mailbox timeout on wait ready.\n");
246 return;
247 }
248
249 /* A non-zero value initiates the PCODE calibration. */
250 MCHBAR32(BIOS_MAILBOX_DATA) = ~0;
251 MCHBAR32(BIOS_MAILBOX_INTERFACE) =
252 MAILBOX_RUN_BUSY | MAILBOX_BIOS_CMD_FSM_MEASURE_INTVL;
253
254 if (pcode_ready() < 0) {
255 printk(BIOS_ERR, "PCODE: mailbox timeout on completion.\n");
256 return;
257 }
258
259 err_code = MCHBAR32(BIOS_MAILBOX_INTERFACE) & 0xff;
260
261 printk(BIOS_DEBUG, "PCODE: 24MHz BLCK calibration response: %d\n",
262 err_code);
263
264 /* Read the calibrated value. */
265 MCHBAR32(BIOS_MAILBOX_INTERFACE) =
266 MAILBOX_RUN_BUSY | MAILBOX_BIOS_CMD_READ_CALIBRATION;
267
268 if (pcode_ready() < 0) {
269 printk(BIOS_ERR, "PCODE: mailbox timeout on read.\n");
270 return;
271 }
272
273 printk(BIOS_DEBUG, "PCODE: 24MHz BLCK calibration value: 0x%08x\n",
274 MCHBAR32(BIOS_MAILBOX_DATA));
275}
276
Duncan Lauriee1e87e02013-04-26 10:35:19 -0700277static u32 pcode_mailbox_read(u32 command)
278{
279 if (pcode_ready() < 0) {
280 printk(BIOS_ERR, "PCODE: mailbox timeout on wait ready.\n");
281 return 0;
282 }
283
284 /* Send command and start transaction */
285 MCHBAR32(BIOS_MAILBOX_INTERFACE) = command | MAILBOX_RUN_BUSY;
286
287 if (pcode_ready() < 0) {
288 printk(BIOS_ERR, "PCODE: mailbox timeout on completion.\n");
289 return 0;
290 }
291
292 /* Read mailbox */
293 return MCHBAR32(BIOS_MAILBOX_DATA);
294}
295
296static void configure_pch_power_sharing(void)
297{
298 u32 pch_power, pch_power_ext, pmsync, pmsync2;
299 int i;
300
301 /* Read PCH Power levels from PCODE */
302 pch_power = pcode_mailbox_read(MAILBOX_BIOS_CMD_READ_PCH_POWER);
303 pch_power_ext = pcode_mailbox_read(MAILBOX_BIOS_CMD_READ_PCH_POWER_EXT);
304
305 printk(BIOS_INFO, "PCH Power: PCODE Levels 0x%08x 0x%08x\n",
306 pch_power, pch_power_ext);
307
308 pmsync = RCBA32(PMSYNC_CONFIG);
309 pmsync2 = RCBA32(PMSYNC_CONFIG2);
310
311 /* Program PMSYNC_TPR_CONFIG PCH power limit values
312 * pmsync[0:4] = mailbox[0:5]
313 * pmsync[8:12] = mailbox[6:11]
314 * pmsync[16:20] = mailbox[12:17]
315 */
316 for (i = 0; i < 3; i++) {
317 u32 level = pch_power & 0x3f;
318 pch_power >>= 6;
319 pmsync &= ~(0x1f << (i * 8));
320 pmsync |= (level & 0x1f) << (i * 8);
321 }
322 RCBA32(PMSYNC_CONFIG) = pmsync;
323
324 /* Program PMSYNC_TPR_CONFIG2 Extended PCH power limit values
325 * pmsync2[0:4] = mailbox[23:18]
326 * pmsync2[8:12] = mailbox_ext[6:11]
327 * pmsync2[16:20] = mailbox_ext[12:17]
328 * pmsync2[24:28] = mailbox_ext[18:22]
329 */
330 pmsync2 &= ~0x1f;
331 pmsync2 |= pch_power & 0x1f;
332
333 for (i = 1; i < 4; i++) {
334 u32 level = pch_power_ext & 0x3f;
335 pch_power_ext >>= 6;
336 pmsync2 &= ~(0x1f << (i * 8));
337 pmsync2 |= (level & 0x1f) << (i * 8);
338 }
339 RCBA32(PMSYNC_CONFIG2) = pmsync2;
340}
341
Aaron Durbin76c37002012-10-30 09:03:43 -0500342int cpu_config_tdp_levels(void)
343{
344 msr_t platform_info;
345
346 /* Bits 34:33 indicate how many levels supported */
347 platform_info = rdmsr(MSR_PLATFORM_INFO);
348 return (platform_info.hi >> 1) & 3;
349}
350
351/*
352 * Configure processor power limits if possible
353 * This must be done AFTER set of BIOS_RESET_CPL
354 */
355void set_power_limits(u8 power_limit_1_time)
356{
357 msr_t msr = rdmsr(MSR_PLATFORM_INFO);
358 msr_t limit;
359 unsigned power_unit;
360 unsigned tdp, min_power, max_power, max_time;
361 u8 power_limit_1_val;
362
363 if (power_limit_1_time > ARRAY_SIZE(power_limit_time_sec_to_msr))
Duncan Lauriec70353f2013-06-28 14:40:38 -0700364 power_limit_1_time = 28;
Aaron Durbin76c37002012-10-30 09:03:43 -0500365
366 if (!(msr.lo & PLATFORM_INFO_SET_TDP))
367 return;
368
369 /* Get units */
370 msr = rdmsr(MSR_PKG_POWER_SKU_UNIT);
371 power_unit = 2 << ((msr.lo & 0xf) - 1);
372
373 /* Get power defaults for this SKU */
374 msr = rdmsr(MSR_PKG_POWER_SKU);
375 tdp = msr.lo & 0x7fff;
376 min_power = (msr.lo >> 16) & 0x7fff;
377 max_power = msr.hi & 0x7fff;
378 max_time = (msr.hi >> 16) & 0x7f;
379
380 printk(BIOS_DEBUG, "CPU TDP: %u Watts\n", tdp / power_unit);
381
382 if (power_limit_time_msr_to_sec[max_time] > power_limit_1_time)
383 power_limit_1_time = power_limit_time_msr_to_sec[max_time];
384
385 if (min_power > 0 && tdp < min_power)
386 tdp = min_power;
387
388 if (max_power > 0 && tdp > max_power)
389 tdp = max_power;
390
391 power_limit_1_val = power_limit_time_sec_to_msr[power_limit_1_time];
392
393 /* Set long term power limit to TDP */
394 limit.lo = 0;
395 limit.lo |= tdp & PKG_POWER_LIMIT_MASK;
396 limit.lo |= PKG_POWER_LIMIT_EN;
397 limit.lo |= (power_limit_1_val & PKG_POWER_LIMIT_TIME_MASK) <<
398 PKG_POWER_LIMIT_TIME_SHIFT;
399
400 /* Set short term power limit to 1.25 * TDP */
401 limit.hi = 0;
402 limit.hi |= ((tdp * 125) / 100) & PKG_POWER_LIMIT_MASK;
403 limit.hi |= PKG_POWER_LIMIT_EN;
Duncan Lauriec70353f2013-06-28 14:40:38 -0700404 /* Power limit 2 time is only programmable on server SKU */
Aaron Durbin76c37002012-10-30 09:03:43 -0500405
406 wrmsr(MSR_PKG_POWER_LIMIT, limit);
407
Duncan Lauriec70353f2013-06-28 14:40:38 -0700408 /* Set power limit values in MCHBAR as well */
409 MCHBAR32(MCH_PKG_POWER_LIMIT_LO) = limit.lo;
410 MCHBAR32(MCH_PKG_POWER_LIMIT_HI) = limit.hi;
411
412 /* Set DDR RAPL power limit by copying from MMIO to MSR */
413 msr.lo = MCHBAR32(MCH_DDR_POWER_LIMIT_LO);
414 msr.hi = MCHBAR32(MCH_DDR_POWER_LIMIT_HI);
415 wrmsr(MSR_DDR_RAPL_LIMIT, msr);
416
Aaron Durbin76c37002012-10-30 09:03:43 -0500417 /* Use nominal TDP values for CPUs with configurable TDP */
418 if (cpu_config_tdp_levels()) {
419 msr = rdmsr(MSR_CONFIG_TDP_NOMINAL);
420 limit.hi = 0;
421 limit.lo = msr.lo & 0xff;
422 wrmsr(MSR_TURBO_ACTIVATION_RATIO, limit);
423 }
424}
425
Aaron Durbin76c37002012-10-30 09:03:43 -0500426static void configure_c_states(void)
427{
428 msr_t msr;
429
430 msr = rdmsr(MSR_PMG_CST_CONFIG_CONTROL);
Aaron Durbin7c351312013-04-10 14:46:25 -0500431 msr.lo |= (1 << 30); // Package c-state Undemotion Enable
432 msr.lo |= (1 << 29); // Package c-state Demotion Enable
Aaron Durbin76c37002012-10-30 09:03:43 -0500433 msr.lo |= (1 << 28); // C1 Auto Undemotion Enable
434 msr.lo |= (1 << 27); // C3 Auto Undemotion Enable
435 msr.lo |= (1 << 26); // C1 Auto Demotion Enable
436 msr.lo |= (1 << 25); // C3 Auto Demotion Enable
437 msr.lo &= ~(1 << 10); // Disable IO MWAIT redirection
Duncan Laurie1c097102013-05-07 13:19:56 -0700438 /* The deepest package c-state defaults to factory-configured value. */
Aaron Durbin76c37002012-10-30 09:03:43 -0500439 wrmsr(MSR_PMG_CST_CONFIG_CONTROL, msr);
440
441 msr = rdmsr(MSR_PMG_IO_CAPTURE_BASE);
Aaron Durbin7c351312013-04-10 14:46:25 -0500442 msr.lo &= ~0xffff;
443 msr.lo |= (get_pmbase() + 0x14); // LVL_2 base address
444 /* The deepest package c-state defaults to factory-configured value. */
Aaron Durbin76c37002012-10-30 09:03:43 -0500445 wrmsr(MSR_PMG_IO_CAPTURE_BASE, msr);
446
447 msr = rdmsr(MSR_MISC_PWR_MGMT);
448 msr.lo &= ~(1 << 0); // Enable P-state HW_ALL coordination
449 wrmsr(MSR_MISC_PWR_MGMT, msr);
450
451 msr = rdmsr(MSR_POWER_CTL);
452 msr.lo |= (1 << 18); // Enable Energy Perf Bias MSR 0x1b0
453 msr.lo |= (1 << 1); // C1E Enable
454 msr.lo |= (1 << 0); // Bi-directional PROCHOT#
455 wrmsr(MSR_POWER_CTL, msr);
456
Aaron Durbin7c351312013-04-10 14:46:25 -0500457 /* C-state Interrupt Response Latency Control 0 - package C3 latency */
Aaron Durbin76c37002012-10-30 09:03:43 -0500458 msr.hi = 0;
Aaron Durbin7c351312013-04-10 14:46:25 -0500459 msr.lo = IRTL_VALID | IRTL_1024_NS | C_STATE_LATENCY_CONTROL_0_LIMIT;
460 wrmsr(MSR_C_STATE_LATENCY_CONTROL_0, msr);
Aaron Durbin76c37002012-10-30 09:03:43 -0500461
Aaron Durbin7c351312013-04-10 14:46:25 -0500462 /* C-state Interrupt Response Latency Control 1 */
Aaron Durbin76c37002012-10-30 09:03:43 -0500463 msr.hi = 0;
Aaron Durbin7c351312013-04-10 14:46:25 -0500464 msr.lo = IRTL_VALID | IRTL_1024_NS | C_STATE_LATENCY_CONTROL_1_LIMIT;
465 wrmsr(MSR_C_STATE_LATENCY_CONTROL_1, msr);
Aaron Durbin76c37002012-10-30 09:03:43 -0500466
Aaron Durbin7c351312013-04-10 14:46:25 -0500467 /* C-state Interrupt Response Latency Control 2 - package C6/C7 short */
Aaron Durbin76c37002012-10-30 09:03:43 -0500468 msr.hi = 0;
Aaron Durbin7c351312013-04-10 14:46:25 -0500469 msr.lo = IRTL_VALID | IRTL_1024_NS | C_STATE_LATENCY_CONTROL_2_LIMIT;
470 wrmsr(MSR_C_STATE_LATENCY_CONTROL_2, msr);
Aaron Durbin76c37002012-10-30 09:03:43 -0500471
Aaron Durbin7c351312013-04-10 14:46:25 -0500472 /* Haswell ULT only supoprts the 3-5 latency response registers.*/
473 if (is_ult()) {
474 /* C-state Interrupt Response Latency Control 3 - package C8 */
475 msr.hi = 0;
476 msr.lo = IRTL_VALID | IRTL_1024_NS |
477 C_STATE_LATENCY_CONTROL_3_LIMIT;
478 wrmsr(MSR_C_STATE_LATENCY_CONTROL_3, msr);
Aaron Durbin76c37002012-10-30 09:03:43 -0500479
Aaron Durbin7c351312013-04-10 14:46:25 -0500480 /* C-state Interrupt Response Latency Control 4 - package C9 */
481 msr.hi = 0;
482 msr.lo = IRTL_VALID | IRTL_1024_NS |
483 C_STATE_LATENCY_CONTROL_4_LIMIT;
484 wrmsr(MSR_C_STATE_LATENCY_CONTROL_4, msr);
485
486 /* C-state Interrupt Response Latency Control 5 - package C10 */
487 msr.hi = 0;
488 msr.lo = IRTL_VALID | IRTL_1024_NS |
489 C_STATE_LATENCY_CONTROL_5_LIMIT;
490 wrmsr(MSR_C_STATE_LATENCY_CONTROL_5, msr);
491 }
Aaron Durbin76c37002012-10-30 09:03:43 -0500492}
Aaron Durbin76c37002012-10-30 09:03:43 -0500493
494static void configure_thermal_target(void)
495{
496 struct cpu_intel_haswell_config *conf;
497 device_t lapic;
498 msr_t msr;
499
500 /* Find pointer to CPU configuration */
501 lapic = dev_find_lapic(SPEEDSTEP_APIC_MAGIC);
502 if (!lapic || !lapic->chip_info)
503 return;
504 conf = lapic->chip_info;
505
Martin Roth4c3ab732013-07-08 16:23:54 -0600506 /* Set TCC activation offset if supported */
Aaron Durbin76c37002012-10-30 09:03:43 -0500507 msr = rdmsr(MSR_PLATFORM_INFO);
508 if ((msr.lo & (1 << 30)) && conf->tcc_offset) {
509 msr = rdmsr(MSR_TEMPERATURE_TARGET);
510 msr.lo &= ~(0xf << 24); /* Bits 27:24 */
511 msr.lo |= (conf->tcc_offset & 0xf) << 24;
512 wrmsr(MSR_TEMPERATURE_TARGET, msr);
513 }
514}
515
516static void configure_misc(void)
517{
518 msr_t msr;
519
520 msr = rdmsr(IA32_MISC_ENABLE);
521 msr.lo |= (1 << 0); /* Fast String enable */
522 msr.lo |= (1 << 3); /* TM1/TM2/EMTTM enable */
523 msr.lo |= (1 << 16); /* Enhanced SpeedStep Enable */
524 wrmsr(IA32_MISC_ENABLE, msr);
525
526 /* Disable Thermal interrupts */
527 msr.lo = 0;
528 msr.hi = 0;
529 wrmsr(IA32_THERM_INTERRUPT, msr);
530
531 /* Enable package critical interrupt only */
532 msr.lo = 1 << 4;
533 msr.hi = 0;
534 wrmsr(IA32_PACKAGE_THERM_INTERRUPT, msr);
535}
536
537static void enable_lapic_tpr(void)
538{
539 msr_t msr;
540
541 msr = rdmsr(MSR_PIC_MSG_CONTROL);
542 msr.lo &= ~(1 << 10); /* Enable APIC TPR updates */
543 wrmsr(MSR_PIC_MSG_CONTROL, msr);
544}
545
546static void configure_dca_cap(void)
547{
548 struct cpuid_result cpuid_regs;
549 msr_t msr;
550
551 /* Check feature flag in CPUID.(EAX=1):ECX[18]==1 */
552 cpuid_regs = cpuid(1);
553 if (cpuid_regs.ecx & (1 << 18)) {
554 msr = rdmsr(IA32_PLATFORM_DCA_CAP);
555 msr.lo |= 1;
556 wrmsr(IA32_PLATFORM_DCA_CAP, msr);
557 }
558}
559
560static void set_max_ratio(void)
561{
562 msr_t msr, perf_ctl;
563
564 perf_ctl.hi = 0;
565
566 /* Check for configurable TDP option */
567 if (cpu_config_tdp_levels()) {
568 /* Set to nominal TDP ratio */
569 msr = rdmsr(MSR_CONFIG_TDP_NOMINAL);
570 perf_ctl.lo = (msr.lo & 0xff) << 8;
571 } else {
572 /* Platform Info bits 15:8 give max ratio */
573 msr = rdmsr(MSR_PLATFORM_INFO);
574 perf_ctl.lo = msr.lo & 0xff00;
575 }
576 wrmsr(IA32_PERF_CTL, perf_ctl);
577
578 printk(BIOS_DEBUG, "haswell: frequency set to %d\n",
579 ((perf_ctl.lo >> 8) & 0xff) * HASWELL_BCLK);
580}
581
582static void set_energy_perf_bias(u8 policy)
583{
584 msr_t msr;
Aaron Durbindc278f82012-12-11 17:15:13 -0600585 int ecx;
586
587 /* Determine if energy efficient policy is supported. */
588 ecx = cpuid_ecx(0x6);
589 if (!(ecx & (1 << 3)))
590 return;
Aaron Durbin76c37002012-10-30 09:03:43 -0500591
592 /* Energy Policy is bits 3:0 */
593 msr = rdmsr(IA32_ENERGY_PERFORMANCE_BIAS);
594 msr.lo &= ~0xf;
595 msr.lo |= policy & 0xf;
596 wrmsr(IA32_ENERGY_PERFORMANCE_BIAS, msr);
597
598 printk(BIOS_DEBUG, "haswell: energy policy set to %u\n",
599 policy);
600}
601
602static void configure_mca(void)
603{
604 msr_t msr;
Aaron Durbin24614af2013-01-12 01:07:28 -0600605 const unsigned int mcg_cap_msr = 0x179;
Aaron Durbin76c37002012-10-30 09:03:43 -0500606 int i;
Aaron Durbin24614af2013-01-12 01:07:28 -0600607 int num_banks;
Aaron Durbin76c37002012-10-30 09:03:43 -0500608
Aaron Durbin24614af2013-01-12 01:07:28 -0600609 msr = rdmsr(mcg_cap_msr);
610 num_banks = msr.lo & 0xff;
Aaron Durbin76c37002012-10-30 09:03:43 -0500611 msr.lo = msr.hi = 0;
Aaron Durbin24614af2013-01-12 01:07:28 -0600612 /* TODO(adurbin): This should only be done on a cold boot. Also, some
613 * of these banks are core vs package scope. For now every CPU clears
614 * every bank. */
615 for (i = 0; i < num_banks; i++)
Aaron Durbin76c37002012-10-30 09:03:43 -0500616 wrmsr(IA32_MC0_STATUS + (i * 4), msr);
617}
618
Aaron Durbin305b1f02013-01-15 08:27:05 -0600619static void bsp_init_before_ap_bringup(struct bus *cpu_bus)
Aaron Durbin76c37002012-10-30 09:03:43 -0500620{
Aaron Durbin305b1f02013-01-15 08:27:05 -0600621 struct device_path cpu_path;
622 struct cpu_info *info;
Aaron Durbin76c37002012-10-30 09:03:43 -0500623 char processor_name[49];
Aaron Durbin76c37002012-10-30 09:03:43 -0500624
625 /* Print processor name */
626 fill_processor_name(processor_name);
627 printk(BIOS_INFO, "CPU: %s.\n", processor_name);
628
Aaron Durbin305b1f02013-01-15 08:27:05 -0600629 /* Ensure the local apic is enabled */
630 enable_lapic();
631
632 /* Set the device path of the boot cpu. */
633 cpu_path.type = DEVICE_PATH_APIC;
634 cpu_path.apic.apic_id = lapicid();
635
636 /* Find the device structure for the boot cpu. */
637 info = cpu_info();
638 info->cpu = alloc_find_dev(cpu_bus, &cpu_path);
639
640 if (info->index != 0)
641 printk(BIOS_CRIT, "BSP index(%d) != 0!\n", info->index);
642
Aaron Durbin7af20692013-01-14 14:54:41 -0600643 /* Setup MTRRs based on physical address size. */
Aaron Durbin76c37002012-10-30 09:03:43 -0500644 x86_setup_fixed_mtrrs();
Aaron Durbin7af20692013-01-14 14:54:41 -0600645 x86_setup_var_mtrrs(cpuid_eax(0x80000008) & 0xff, 2);
Aaron Durbin76c37002012-10-30 09:03:43 -0500646 x86_mtrr_check();
647
Duncan Lauriee1e87e02013-04-26 10:35:19 -0700648 if (is_ult()) {
Aaron Durbinf24262d2013-04-10 14:59:21 -0500649 calibrate_24mhz_bclk();
Duncan Lauriee1e87e02013-04-26 10:35:19 -0700650 configure_pch_power_sharing();
651 }
Aaron Durbinf24262d2013-04-10 14:59:21 -0500652
Aaron Durbin305b1f02013-01-15 08:27:05 -0600653 /* Call through the cpu driver's initialization. */
654 cpu_initialize(0);
Aaron Durbin7af20692013-01-14 14:54:41 -0600655}
656
Aaron Durbin305b1f02013-01-15 08:27:05 -0600657/* All CPUs including BSP will run the following function. */
658static void haswell_init(device_t cpu)
Aaron Durbin7af20692013-01-14 14:54:41 -0600659{
660 /* Clear out pending MCEs */
661 configure_mca();
662
Aaron Durbin76c37002012-10-30 09:03:43 -0500663 /* Enable the local cpu apics */
664 enable_lapic_tpr();
665 setup_lapic();
666
667 /* Configure C States */
Aaron Durbin7c351312013-04-10 14:46:25 -0500668 configure_c_states();
Aaron Durbin76c37002012-10-30 09:03:43 -0500669
670 /* Configure Enhanced SpeedStep and Thermal Sensors */
671 configure_misc();
672
673 /* Thermal throttle activation offset */
674 configure_thermal_target();
675
676 /* Enable Direct Cache Access */
677 configure_dca_cap();
678
679 /* Set energy policy */
680 set_energy_perf_bias(ENERGY_POLICY_NORMAL);
681
682 /* Set Max Ratio */
683 set_max_ratio();
684
685 /* Enable Turbo */
686 enable_turbo();
Aaron Durbin7af20692013-01-14 14:54:41 -0600687}
Aaron Durbin76c37002012-10-30 09:03:43 -0500688
Aaron Durbin7af20692013-01-14 14:54:41 -0600689void bsp_init_and_start_aps(struct bus *cpu_bus)
690{
Aaron Durbin305b1f02013-01-15 08:27:05 -0600691 int max_cpus;
692 int num_aps;
693 const void *microcode_patch;
694
Martin Roth4c3ab732013-07-08 16:23:54 -0600695 /* Perform any necessary BSP initialization before APs are brought up.
696 * This call also allows the BSP to prepare for any secondary effects
Aaron Durbin7af20692013-01-14 14:54:41 -0600697 * from calling cpu_initialize() such as smm_init(). */
Aaron Durbin305b1f02013-01-15 08:27:05 -0600698 bsp_init_before_ap_bringup(cpu_bus);
Aaron Durbin7af20692013-01-14 14:54:41 -0600699
Aaron Durbin305b1f02013-01-15 08:27:05 -0600700 microcode_patch = intel_microcode_find();
Aaron Durbin7af20692013-01-14 14:54:41 -0600701
Aaron Durbin305b1f02013-01-15 08:27:05 -0600702 /* This needs to be called after the mtrr setup so the BSP mtrrs
703 * can be mirrored by the APs. */
704 if (setup_ap_init(cpu_bus, &max_cpus, microcode_patch)) {
705 printk(BIOS_CRIT, "AP setup initialization failed. "
706 "No APs will be brought up.\n");
707 return;
Aaron Durbin7af20692013-01-14 14:54:41 -0600708 }
Aaron Durbin305b1f02013-01-15 08:27:05 -0600709
710 num_aps = max_cpus - 1;
711 if (start_aps(cpu_bus, num_aps)) {
712 printk(BIOS_CRIT, "AP startup failed. Trying to continue.\n");
713 }
714
715 if (smm_initialize()) {
Martin Roth4c3ab732013-07-08 16:23:54 -0600716 printk(BIOS_CRIT, "SMM Initialization failed...\n");
Aaron Durbin305b1f02013-01-15 08:27:05 -0600717 return;
718 }
719
Aaron Durbin305b1f02013-01-15 08:27:05 -0600720 /* After SMM relocation a 2nd microcode load is required. */
721 intel_microcode_load_unlocked(microcode_patch);
Aaron Durbin23f50162013-04-03 09:55:22 -0500722
723 /* Enable ROM caching if option was selected. */
724 x86_mtrr_enable_rom_caching();
Aaron Durbin76c37002012-10-30 09:03:43 -0500725}
726
727static struct device_operations cpu_dev_ops = {
728 .init = haswell_init,
729};
730
731static struct cpu_device_id cpu_table[] = {
732 { X86_VENDOR_INTEL, 0x306c1 }, /* Intel Haswell 4+2 A0 */
733 { X86_VENDOR_INTEL, 0x306c2 }, /* Intel Haswell 4+2 B0 */
Duncan Laurie512540492012-12-17 11:24:45 -0800734 { X86_VENDOR_INTEL, 0x40650 }, /* Intel Haswell ULT B0 */
735 { X86_VENDOR_INTEL, 0x40651 }, /* Intel Haswell ULT B1 */
Aaron Durbin76c37002012-10-30 09:03:43 -0500736 { 0, 0 },
737};
738
739static const struct cpu_driver driver __cpu_driver = {
740 .ops = &cpu_dev_ops,
741 .id_table = cpu_table,
Aaron Durbin7c351312013-04-10 14:46:25 -0500742 .cstates = cstate_map,
Aaron Durbin76c37002012-10-30 09:03:43 -0500743};
744