blob: 8f1c35ec6eb33ac943aa93b91cc4beb0de6e9c01 [file] [log] [blame]
Eric Biedermanfcd5ace2004-10-14 19:29:29 +00001/*
Stefan Reinauercdc5cc62007-04-24 18:40:02 +00002 * mtrr.c: setting MTRR to decent values for cache initialization on P6
Eric Biedermanfcd5ace2004-10-14 19:29:29 +00003 *
4 * Derived from intel_set_mtrr in intel_subr.c and mtrr.c in linux kernel
5 *
6 * Copyright 2000 Silicon Integrated System Corporation
Aaron Durbinbb4e79a2013-03-26 14:09:47 -05007 * Copyright 2013 Google Inc.
Eric Biedermanfcd5ace2004-10-14 19:29:29 +00008 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22 *
23 *
24 * Reference: Intel Architecture Software Developer's Manual, Volume 3: System Programming
25 */
Yinghai Lu13f1c2a2005-07-08 02:49:49 +000026
Eric Biedermanf8a2ddd2004-10-30 08:05:41 +000027#include <stddef.h>
Aaron Durbinbb4e79a2013-03-26 14:09:47 -050028#include <stdlib.h>
29#include <string.h>
Aaron Durbinbebf6692013-04-24 20:59:43 -050030#include <bootstate.h>
Eric Biedermanfcd5ace2004-10-14 19:29:29 +000031#include <console/console.h>
32#include <device/device.h>
Aaron Durbinebf142a2013-03-29 16:23:23 -050033#include <cpu/cpu.h>
Eric Biedermanfcd5ace2004-10-14 19:29:29 +000034#include <cpu/x86/msr.h>
35#include <cpu/x86/mtrr.h>
36#include <cpu/x86/cache.h>
Stefan Reinauer00093a82011-11-02 16:12:34 -070037#include <cpu/x86/lapic.h>
Sven Schnelleadfbcb792012-01-10 12:01:43 +010038#include <arch/cpu.h>
Stefan Reinauer00093a82011-11-02 16:12:34 -070039#include <arch/acpi.h>
Aaron Durbinbb4e79a2013-03-26 14:09:47 -050040#include <memrange.h>
Aaron Durbin57686f82013-03-20 15:50:59 -050041#if CONFIG_X86_AMD_FIXED_MTRRS
42#include <cpu/amd/mtrr.h>
43#define MTRR_FIXED_WRBACK_BITS (MTRR_READ_MEM | MTRR_WRITE_MEM)
44#else
45#define MTRR_FIXED_WRBACK_BITS 0
46#endif
Eric Biedermanfcd5ace2004-10-14 19:29:29 +000047
Stefan Reinauerc00dfbc2012-04-03 16:24:37 -070048/* 2 MTRRS are reserved for the operating system */
49#define BIOS_MTRRS 6
50#define OS_MTRRS 2
51#define MTRRS (BIOS_MTRRS + OS_MTRRS)
52
53static int total_mtrrs = MTRRS;
54static int bios_mtrrs = BIOS_MTRRS;
55
56static void detect_var_mtrrs(void)
57{
58 msr_t msr;
59
60 msr = rdmsr(MTRRcap_MSR);
61
62 total_mtrrs = msr.lo & 0xff;
63 bios_mtrrs = total_mtrrs - OS_MTRRS;
64}
65
Yinghai Lu13f1c2a2005-07-08 02:49:49 +000066void enable_fixed_mtrr(void)
Eric Biedermanfcd5ace2004-10-14 19:29:29 +000067{
68 msr_t msr;
69
70 msr = rdmsr(MTRRdefType_MSR);
Aaron Durbinbb4e79a2013-03-26 14:09:47 -050071 msr.lo |= MTRRdefTypeEn | MTRRdefTypeFixEn;
Eric Biedermanfcd5ace2004-10-14 19:29:29 +000072 wrmsr(MTRRdefType_MSR, msr);
73}
74
Aaron Durbinbb4e79a2013-03-26 14:09:47 -050075static void enable_var_mtrr(unsigned char deftype)
Eric Biedermanfcd5ace2004-10-14 19:29:29 +000076{
77 msr_t msr;
78
79 msr = rdmsr(MTRRdefType_MSR);
Aaron Durbinbb4e79a2013-03-26 14:09:47 -050080 msr.lo &= ~0xff;
81 msr.lo |= MTRRdefTypeEn | deftype;
Eric Biedermanfcd5ace2004-10-14 19:29:29 +000082 wrmsr(MTRRdefType_MSR, msr);
83}
84
Eric Biedermanfcd5ace2004-10-14 19:29:29 +000085/* fms: find most sigificant bit set, stolen from Linux Kernel Source. */
86static inline unsigned int fms(unsigned int x)
87{
88 int r;
89
90 __asm__("bsrl %1,%0\n\t"
91 "jnz 1f\n\t"
92 "movl $0,%0\n"
93 "1:" : "=r" (r) : "g" (x));
94 return r;
95}
96
Martin Roth4c3ab732013-07-08 16:23:54 -060097/* fls: find least significant bit set */
Eric Biedermanfcd5ace2004-10-14 19:29:29 +000098static inline unsigned int fls(unsigned int x)
99{
100 int r;
101
102 __asm__("bsfl %1,%0\n\t"
103 "jnz 1f\n\t"
104 "movl $32,%0\n"
105 "1:" : "=r" (r) : "g" (x));
106 return r;
107}
108
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500109#define MTRR_VERBOSE_LEVEL BIOS_NEVER
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000110
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500111/* MTRRs are at a 4KiB granularity. Therefore all address calculations can
112 * be done with 32-bit numbers. This allows for the MTRR code to handle
113 * up to 2^44 bytes (16 TiB) of address space. */
114#define RANGE_SHIFT 12
115#define ADDR_SHIFT_TO_RANGE_SHIFT(x) \
116 (((x) > RANGE_SHIFT) ? ((x) - RANGE_SHIFT) : RANGE_SHIFT)
117#define PHYS_TO_RANGE_ADDR(x) ((x) >> RANGE_SHIFT)
118#define RANGE_TO_PHYS_ADDR(x) (((resource_t)(x)) << RANGE_SHIFT)
119#define NUM_FIXED_MTRRS (NUM_FIXED_RANGES / RANGES_PER_FIXED_MTRR)
120
121/* The minimum alignment while handling variable MTRR ranges is 64MiB. */
122#define MTRR_MIN_ALIGN PHYS_TO_RANGE_ADDR(64 << 20)
123/* Helpful constants. */
124#define RANGE_1MB PHYS_TO_RANGE_ADDR(1 << 20)
125#define RANGE_4GB (1 << (ADDR_SHIFT_TO_RANGE_SHIFT(32)))
126
Aaron Durbine3834422013-03-28 20:48:51 -0500127/*
128 * The default MTRR type selection uses 3 approaches for selecting the
129 * optimal number of variable MTRRs. For each range do 3 calculations:
130 * 1. UC as default type with no holes at top of range.
131 * 2. UC as default using holes at top of range.
132 * 3. WB as default.
133 * If using holes is optimal for a range when UC is the default type the
134 * tag is updated to direct the commit routine to use a hole at the top
135 * of a range.
136 */
137#define MTRR_ALGO_SHIFT (8)
138#define MTRR_TAG_MASK ((1 << MTRR_ALGO_SHIFT) - 1)
139/* If the default type is UC use the hole carving algorithm for a range. */
140#define MTRR_RANGE_UC_USE_HOLE (1 << MTRR_ALGO_SHIFT)
141
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500142static inline uint32_t range_entry_base_mtrr_addr(struct range_entry *r)
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000143{
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500144 return PHYS_TO_RANGE_ADDR(range_entry_base(r));
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000145}
146
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500147static inline uint32_t range_entry_end_mtrr_addr(struct range_entry *r)
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000148{
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500149 return PHYS_TO_RANGE_ADDR(range_entry_end(r));
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000150}
151
Aaron Durbine3834422013-03-28 20:48:51 -0500152static inline int range_entry_mtrr_type(struct range_entry *r)
153{
154 return range_entry_tag(r) & MTRR_TAG_MASK;
155}
156
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500157static struct memranges *get_physical_address_space(void)
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000158{
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500159 static struct memranges *addr_space;
160 static struct memranges addr_space_storage;
Duncan Laurie7389fa92011-12-22 10:59:40 -0800161
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500162 /* In order to handle some chipsets not being able to pre-determine
Martin Roth4c3ab732013-07-08 16:23:54 -0600163 * uncacheable ranges, such as graphics memory, at resource insertion
164 * time remove uncacheable regions from the cacheable ones. */
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500165 if (addr_space == NULL) {
166 struct range_entry *r;
Aaron Durbin9b027fe2013-03-26 14:10:34 -0500167 unsigned long mask;
168 unsigned long match;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500169
170 addr_space = &addr_space_storage;
171
Aaron Durbin9b027fe2013-03-26 14:10:34 -0500172 mask = IORESOURCE_CACHEABLE;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500173 /* Collect cacheable and uncacheable address ranges. The
174 * uncacheable regions take precedence over the cacheable
175 * regions. */
176 memranges_init(addr_space, mask, mask, MTRR_TYPE_WRBACK);
177 memranges_add_resources(addr_space, mask, 0,
178 MTRR_TYPE_UNCACHEABLE);
179
Aaron Durbin9b027fe2013-03-26 14:10:34 -0500180 /* Handle any write combining resources. Only prefetchable
181 * resources with the IORESOURCE_WRCOMB flag are appropriate
182 * for this MTRR type. */
183 match = IORESOURCE_PREFETCH | IORESOURCE_WRCOMB;
184 mask |= match;
185 memranges_add_resources(addr_space, mask, match,
186 MTRR_TYPE_WRCOMB);
187
Aaron Durbin77a5b402013-03-26 12:47:47 -0500188#if CONFIG_CACHE_ROM
189 /* Add a write-protect region covering the ROM size
190 * when CONFIG_CACHE_ROM is enabled. The ROM is assumed
191 * to be located at 4GiB - rom size. */
192 resource_t rom_base = RANGE_TO_PHYS_ADDR(
193 RANGE_4GB - PHYS_TO_RANGE_ADDR(CONFIG_ROM_SIZE));
194 memranges_insert(addr_space, rom_base, CONFIG_ROM_SIZE,
195 MTRR_TYPE_WRPROT);
196#endif
197
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500198 /* The address space below 4GiB is special. It needs to be
199 * covered entirly by range entries so that MTRR calculations
200 * can be properly done for the full 32-bit address space.
201 * Therefore, ensure holes are filled up to 4GiB as
202 * uncacheable */
203 memranges_fill_holes_up_to(addr_space,
204 RANGE_TO_PHYS_ADDR(RANGE_4GB),
205 MTRR_TYPE_UNCACHEABLE);
206
207 printk(BIOS_DEBUG, "MTRR: Physical address space:\n");
208 memranges_each_entry(r, addr_space)
209 printk(BIOS_DEBUG,
210 "0x%016llx - 0x%016llx size 0x%08llx type %ld\n",
211 range_entry_base(r), range_entry_end(r),
212 range_entry_size(r), range_entry_tag(r));
Carl-Daniel Hailfinger7dde1da2009-02-11 16:57:32 +0000213 }
Stefan Reinauer7f86ed12009-02-12 16:02:16 +0000214
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500215 return addr_space;
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000216}
217
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500218/* Fixed MTRR descriptor. This structure defines the step size and begin
Martin Roth4c3ab732013-07-08 16:23:54 -0600219 * and end (exclusive) address covered by a set of fixed MTRR MSRs.
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500220 * It also describes the offset in byte intervals to store the calculated MTRR
221 * type in an array. */
222struct fixed_mtrr_desc {
223 uint32_t begin;
224 uint32_t end;
225 uint32_t step;
226 int range_index;
227 int msr_index_base;
Eric Biedermanf8a2ddd2004-10-30 08:05:41 +0000228};
229
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500230/* Shared MTRR calculations. Can be reused by APs. */
231static uint8_t fixed_mtrr_types[NUM_FIXED_RANGES];
232
233/* Fixed MTRR descriptors. */
234static const struct fixed_mtrr_desc fixed_mtrr_desc[] = {
235 { PHYS_TO_RANGE_ADDR(0x000000), PHYS_TO_RANGE_ADDR(0x080000),
236 PHYS_TO_RANGE_ADDR(64 * 1024), 0, MTRRfix64K_00000_MSR },
237 { PHYS_TO_RANGE_ADDR(0x080000), PHYS_TO_RANGE_ADDR(0x0C0000),
238 PHYS_TO_RANGE_ADDR(16 * 1024), 8, MTRRfix16K_80000_MSR },
239 { PHYS_TO_RANGE_ADDR(0x0C0000), PHYS_TO_RANGE_ADDR(0x100000),
240 PHYS_TO_RANGE_ADDR(4 * 1024), 24, MTRRfix4K_C0000_MSR },
241};
242
243static void calc_fixed_mtrrs(void)
Eric Biedermanf8a2ddd2004-10-30 08:05:41 +0000244{
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500245 static int fixed_mtrr_types_initialized;
246 struct memranges *phys_addr_space;
247 struct range_entry *r;
248 const struct fixed_mtrr_desc *desc;
249 const struct fixed_mtrr_desc *last_desc;
250 uint32_t begin;
251 uint32_t end;
252 int type_index;
253
254 if (fixed_mtrr_types_initialized)
Eric Biedermanf8a2ddd2004-10-30 08:05:41 +0000255 return;
Kyösti Mälkki2d42b342012-07-12 00:18:22 +0300256
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500257 phys_addr_space = get_physical_address_space();
Kyösti Mälkki2d42b342012-07-12 00:18:22 +0300258
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500259 /* Set all fixed ranges to uncacheable first. */
260 memset(&fixed_mtrr_types[0], MTRR_TYPE_UNCACHEABLE, NUM_FIXED_RANGES);
Kyösti Mälkki2d42b342012-07-12 00:18:22 +0300261
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500262 desc = &fixed_mtrr_desc[0];
263 last_desc = &fixed_mtrr_desc[ARRAY_SIZE(fixed_mtrr_desc) - 1];
264 type_index = desc->range_index;
Kyösti Mälkki1ec5e742012-07-26 23:51:20 +0300265
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500266 memranges_each_entry(r, phys_addr_space) {
267 begin = range_entry_base_mtrr_addr(r);
268 end = range_entry_end_mtrr_addr(r);
Kyösti Mälkki2d42b342012-07-12 00:18:22 +0300269
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500270 if (begin >= last_desc->end)
271 break;
272
273 if (end > last_desc->end)
274 end = last_desc->end;
275
276 /* Get to the correct fixed mtrr descriptor. */
277 while (begin >= desc->end)
278 desc++;
279
280 type_index = desc->range_index;
281 type_index += (begin - desc->begin) / desc->step;
282
283 while (begin != end) {
284 unsigned char type;
285
286 type = range_entry_tag(r);
287 printk(MTRR_VERBOSE_LEVEL,
288 "MTRR addr 0x%x-0x%x set to %d type @ %d\n",
289 begin, begin + desc->step, type, type_index);
290 if (type == MTRR_TYPE_WRBACK)
291 type |= MTRR_FIXED_WRBACK_BITS;
292 fixed_mtrr_types[type_index] = type;
293 type_index++;
294 begin += desc->step;
295 if (begin == desc->end)
296 desc++;
Yinghai Lu63601872005-01-27 22:48:12 +0000297 }
Eric Biedermanf8a2ddd2004-10-30 08:05:41 +0000298 }
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500299 fixed_mtrr_types_initialized = 1;
300}
301
302static void commit_fixed_mtrrs(void)
303{
304 int i;
305 int j;
306 int msr_num;
307 int type_index;
308 /* 8 ranges per msr. */
309 msr_t fixed_msrs[NUM_FIXED_MTRRS];
310 unsigned long msr_index[NUM_FIXED_MTRRS];
311
312 memset(&fixed_msrs, 0, sizeof(fixed_msrs));
313
314 disable_cache();
315
316 msr_num = 0;
317 type_index = 0;
318 for (i = 0; i < ARRAY_SIZE(fixed_mtrr_desc); i++) {
319 const struct fixed_mtrr_desc *desc;
320 int num_ranges;
321
322 desc = &fixed_mtrr_desc[i];
323 num_ranges = (desc->end - desc->begin) / desc->step;
324 for (j = 0; j < num_ranges; j += RANGES_PER_FIXED_MTRR) {
325 msr_index[msr_num] = desc->msr_index_base +
326 (j / RANGES_PER_FIXED_MTRR);
327 fixed_msrs[msr_num].lo |=
328 fixed_mtrr_types[type_index++] << 0;
329 fixed_msrs[msr_num].lo |=
330 fixed_mtrr_types[type_index++] << 8;
331 fixed_msrs[msr_num].lo |=
332 fixed_mtrr_types[type_index++] << 16;
333 fixed_msrs[msr_num].lo |=
334 fixed_mtrr_types[type_index++] << 24;
335 fixed_msrs[msr_num].hi |=
336 fixed_mtrr_types[type_index++] << 0;
337 fixed_msrs[msr_num].hi |=
338 fixed_mtrr_types[type_index++] << 8;
339 fixed_msrs[msr_num].hi |=
340 fixed_mtrr_types[type_index++] << 16;
341 fixed_msrs[msr_num].hi |=
342 fixed_mtrr_types[type_index++] << 24;
343 msr_num++;
344 }
345 }
346
347 for (i = 0; i < ARRAY_SIZE(fixed_msrs); i++) {
348 printk(BIOS_DEBUG, "MTRR: Fixed MSR 0x%lx 0x%08x%08x\n",
349 msr_index[i], fixed_msrs[i].hi, fixed_msrs[i].lo);
350 wrmsr(msr_index[i], fixed_msrs[i]);
351 }
352
353 enable_cache();
Eric Biedermanf8a2ddd2004-10-30 08:05:41 +0000354}
355
Aaron Durbin57686f82013-03-20 15:50:59 -0500356void x86_setup_fixed_mtrrs_no_enable(void)
Yinghai Lu13f1c2a2005-07-08 02:49:49 +0000357{
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500358 calc_fixed_mtrrs();
359 commit_fixed_mtrrs();
Yinghai Lu13f1c2a2005-07-08 02:49:49 +0000360}
Stefan Reinauer7f86ed12009-02-12 16:02:16 +0000361
Aaron Durbin57686f82013-03-20 15:50:59 -0500362void x86_setup_fixed_mtrrs(void)
363{
364 x86_setup_fixed_mtrrs_no_enable();
365
366 printk(BIOS_SPEW, "call enable_fixed_mtrr()\n");
367 enable_fixed_mtrr();
368}
369
Aaron Durbin77a5b402013-03-26 12:47:47 -0500370/* Keep track of the MTRR that covers the ROM for caching purposes. */
371#if CONFIG_CACHE_ROM
372static long rom_cache_mtrr = -1;
373
Aaron Durbinbc07f5d2013-03-26 13:09:39 -0500374long x86_mtrr_rom_cache_var_index(void)
375{
376 return rom_cache_mtrr;
377}
378
Aaron Durbin77a5b402013-03-26 12:47:47 -0500379void x86_mtrr_enable_rom_caching(void)
380{
381 msr_t msr_val;
382 unsigned long index;
383
384 if (rom_cache_mtrr < 0)
385 return;
386
387 index = rom_cache_mtrr;
388 disable_cache();
389 msr_val = rdmsr(MTRRphysBase_MSR(index));
390 msr_val.lo &= ~0xff;
391 msr_val.lo |= MTRR_TYPE_WRPROT;
392 wrmsr(MTRRphysBase_MSR(index), msr_val);
393 enable_cache();
394}
395
396void x86_mtrr_disable_rom_caching(void)
397{
398 msr_t msr_val;
399 unsigned long index;
400
401 if (rom_cache_mtrr < 0)
402 return;
403
404 index = rom_cache_mtrr;
405 disable_cache();
406 msr_val = rdmsr(MTRRphysBase_MSR(index));
407 msr_val.lo &= ~0xff;
408 wrmsr(MTRRphysBase_MSR(index), msr_val);
409 enable_cache();
410}
Aaron Durbinebf142a2013-03-29 16:23:23 -0500411
Aaron Durbinbebf6692013-04-24 20:59:43 -0500412static void disable_cache_rom(void *unused)
Aaron Durbinebf142a2013-03-29 16:23:23 -0500413{
414 x86_mtrr_disable_rom_caching();
415}
Aaron Durbinbebf6692013-04-24 20:59:43 -0500416
417BOOT_STATE_INIT_ENTRIES(disable_rom_cache_bscb) = {
418 BOOT_STATE_INIT_ENTRY(BS_OS_RESUME, BS_ON_ENTRY,
419 disable_cache_rom, NULL),
420 BOOT_STATE_INIT_ENTRY(BS_PAYLOAD_LOAD, BS_ON_EXIT,
421 disable_cache_rom, NULL),
422};
Aaron Durbin77a5b402013-03-26 12:47:47 -0500423#endif
424
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500425struct var_mtrr_state {
426 struct memranges *addr_space;
427 int above4gb;
428 int address_bits;
429 int commit_mtrrs;
430 int mtrr_index;
431 int def_mtrr_type;
432};
Aaron Durbin57686f82013-03-20 15:50:59 -0500433
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500434static void clear_var_mtrr(int index)
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000435{
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500436 msr_t msr_val;
437
438 msr_val = rdmsr(MTRRphysMask_MSR(index));
439 msr_val.lo &= ~MTRRphysMaskValid;
440 wrmsr(MTRRphysMask_MSR(index), msr_val);
441}
442
443static void write_var_mtrr(struct var_mtrr_state *var_state,
444 uint32_t base, uint32_t size, int mtrr_type)
445{
446 msr_t msr_val;
447 unsigned long msr_index;
448 resource_t rbase;
449 resource_t rsize;
450 resource_t mask;
451
452 /* Some variable MTRRs are attempted to be saved for the OS use.
453 * However, it's more important to try to map the full address space
454 * properly. */
455 if (var_state->mtrr_index >= bios_mtrrs)
456 printk(BIOS_WARNING, "Taking a reserved OS MTRR.\n");
457 if (var_state->mtrr_index >= total_mtrrs) {
458 printk(BIOS_ERR, "ERROR: Not enough MTTRs available!\n");
459 return;
460 }
461
462 rbase = base;
463 rsize = size;
464
465 rbase = RANGE_TO_PHYS_ADDR(rbase);
466 rsize = RANGE_TO_PHYS_ADDR(rsize);
467 rsize = -rsize;
468
469 mask = (1ULL << var_state->address_bits) - 1;
470 rsize = rsize & mask;
471
Aaron Durbin77a5b402013-03-26 12:47:47 -0500472#if CONFIG_CACHE_ROM
473 /* CONFIG_CACHE_ROM allocates an MTRR specifically for allowing
474 * one to turn on caching for faster ROM access. However, it is
475 * left to the MTRR callers to enable it. */
476 if (mtrr_type == MTRR_TYPE_WRPROT) {
477 mtrr_type = MTRR_TYPE_UNCACHEABLE;
478 if (rom_cache_mtrr < 0)
479 rom_cache_mtrr = var_state->mtrr_index;
480 }
481#endif
482
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500483 printk(BIOS_DEBUG, "MTRR: %d base 0x%016llx mask 0x%016llx type %d\n",
484 var_state->mtrr_index, rbase, rsize, mtrr_type);
485
486 msr_val.lo = rbase;
487 msr_val.lo |= mtrr_type;
488
489 msr_val.hi = rbase >> 32;
490 msr_index = MTRRphysBase_MSR(var_state->mtrr_index);
491 wrmsr(msr_index, msr_val);
492
493 msr_val.lo = rsize;
494 msr_val.lo |= MTRRphysMaskValid;
495 msr_val.hi = rsize >> 32;
496 msr_index = MTRRphysMask_MSR(var_state->mtrr_index);
497 wrmsr(msr_index, msr_val);
498}
499
500static void calc_var_mtrr_range(struct var_mtrr_state *var_state,
501 uint32_t base, uint32_t size, int mtrr_type)
502{
503 while (size != 0) {
504 uint32_t addr_lsb;
505 uint32_t size_msb;
506 uint32_t mtrr_size;
507
508 addr_lsb = fls(base);
509 size_msb = fms(size);
510
511 /* All MTRR entries need to have their base aligned to the mask
512 * size. The maximum size is calculated by a function of the
513 * min base bit set and maximum size bit set. */
514 if (addr_lsb > size_msb)
515 mtrr_size = 1 << size_msb;
516 else
517 mtrr_size = 1 << addr_lsb;
518
519 if (var_state->commit_mtrrs)
520 write_var_mtrr(var_state, base, mtrr_size, mtrr_type);
521
522 size -= mtrr_size;
523 base += mtrr_size;
524 var_state->mtrr_index++;
525 }
526}
527
Aaron Durbine3834422013-03-28 20:48:51 -0500528static void calc_var_mtrrs_with_hole(struct var_mtrr_state *var_state,
529 struct range_entry *r)
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500530{
Aaron Durbine3834422013-03-28 20:48:51 -0500531 uint32_t a1, a2, b1, b2;
532 int mtrr_type;
533 struct range_entry *next;
534
535 /*
Martin Roth4c3ab732013-07-08 16:23:54 -0600536 * Determine MTRRs based on the following algorithm for the given entry:
Aaron Durbine3834422013-03-28 20:48:51 -0500537 * +------------------+ b2 = ALIGN_UP(end)
538 * | 0 or more bytes | <-- hole is carved out between b1 and b2
539 * +------------------+ a2 = b1 = end
540 * | |
541 * +------------------+ a1 = begin
542 *
543 * Thus, there are 3 sub-ranges to configure variable MTRRs for.
544 */
545 mtrr_type = range_entry_mtrr_type(r);
546
547 a1 = range_entry_base_mtrr_addr(r);
548 a2 = range_entry_end_mtrr_addr(r);
549
550 /* The end address is under 1MiB. The fixed MTRRs take
551 * precedence over the variable ones. Therefore this range
552 * can be ignored. */
553 if (a2 < RANGE_1MB)
554 return;
555
556 /* Again, the fixed MTRRs take precedence so the beginning
557 * of the range can be set to 0 if it starts below 1MiB. */
558 if (a1 < RANGE_1MB)
559 a1 = 0;
560
561 /* If the range starts above 4GiB the processing is done. */
562 if (!var_state->above4gb && a1 >= RANGE_4GB)
563 return;
564
565 /* Clip the upper address to 4GiB if addresses above 4GiB
566 * are not being processed. */
567 if (!var_state->above4gb && a2 > RANGE_4GB)
568 a2 = RANGE_4GB;
569
Aaron Durbin53924242013-03-29 11:48:27 -0500570 next = memranges_next_entry(var_state->addr_space, r);
571
Aaron Durbine3834422013-03-28 20:48:51 -0500572 b1 = a2;
Aaron Durbin53924242013-03-29 11:48:27 -0500573
Martin Roth4c3ab732013-07-08 16:23:54 -0600574 /* First check if a1 is >= 4GiB and the current entry is the last
Aaron Durbin53924242013-03-29 11:48:27 -0500575 * entry. If so perform an optimization of covering a larger range
576 * defined by the base address' alignment. */
577 if (a1 >= RANGE_4GB && next == NULL) {
578 uint32_t addr_lsb;
579
580 addr_lsb = fls(a1);
581 b2 = (1 << addr_lsb) + a1;
582 if (b2 >= a2) {
583 calc_var_mtrr_range(var_state, a1, b2 - a1, mtrr_type);
584 return;
585 }
586 }
587
588 /* Handle the min alignment roundup case. */
Aaron Durbine3834422013-03-28 20:48:51 -0500589 b2 = ALIGN_UP(a2, MTRR_MIN_ALIGN);
590
591 /* Check against the next range. If the current range_entry is the
592 * last entry then carving a hole is no problem. If the current entry
593 * isn't the last entry then check that the last entry covers the
594 * entire hole range with the default mtrr type. */
Aaron Durbine3834422013-03-28 20:48:51 -0500595 if (next != NULL &&
596 (range_entry_mtrr_type(next) != var_state->def_mtrr_type ||
597 range_entry_end_mtrr_addr(next) < b2)) {
598 calc_var_mtrr_range(var_state, a1, a2 - a1, mtrr_type);
599 return;
600 }
601
602 calc_var_mtrr_range(var_state, a1, b2 - a1, mtrr_type);
603 calc_var_mtrr_range(var_state, b1, b2 - b1, var_state->def_mtrr_type);
604}
605
606static void calc_var_mtrrs_without_hole(struct var_mtrr_state *var_state,
607 struct range_entry *r)
608{
609 uint32_t a1, a2, b1, b2, c1, c2;
610 int mtrr_type;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500611
612 /*
613 * For each range that meets the non-default type process it in the
614 * following manner:
615 * +------------------+ c2 = end
616 * | 0 or more bytes |
617 * +------------------+ b2 = c1 = ALIGN_DOWN(end)
618 * | |
619 * +------------------+ b1 = a2 = ALIGN_UP(begin)
620 * | 0 or more bytes |
621 * +------------------+ a1 = begin
622 *
623 * Thus, there are 3 sub-ranges to configure variable MTRRs for.
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000624 */
Aaron Durbine3834422013-03-28 20:48:51 -0500625 mtrr_type = range_entry_mtrr_type(r);
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500626
Aaron Durbine3834422013-03-28 20:48:51 -0500627 a1 = range_entry_base_mtrr_addr(r);
628 c2 = range_entry_end_mtrr_addr(r);
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500629
Aaron Durbine3834422013-03-28 20:48:51 -0500630 /* The end address is under 1MiB. The fixed MTRRs take
631 * precedence over the variable ones. Therefore this range
632 * can be ignored. */
633 if (c2 < RANGE_1MB)
634 return;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500635
Aaron Durbine3834422013-03-28 20:48:51 -0500636 /* Again, the fixed MTRRs take precedence so the beginning
637 * of the range can be set to 0 if it starts below 1MiB. */
638 if (a1 < RANGE_1MB)
639 a1 = 0;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500640
Aaron Durbine3834422013-03-28 20:48:51 -0500641 /* If the range starts above 4GiB the processing is done. */
642 if (!var_state->above4gb && a1 >= RANGE_4GB)
643 return;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500644
Aaron Durbine3834422013-03-28 20:48:51 -0500645 /* Clip the upper address to 4GiB if addresses above 4GiB
646 * are not being processed. */
647 if (!var_state->above4gb && c2 > RANGE_4GB)
648 c2 = RANGE_4GB;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500649
Aaron Durbine3834422013-03-28 20:48:51 -0500650 /* Don't align up or down on the range if it is smaller
651 * than the minimum granularity. */
652 if ((c2 - a1) < MTRR_MIN_ALIGN) {
653 calc_var_mtrr_range(var_state, a1, c2 - a1, mtrr_type);
654 return;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500655 }
Aaron Durbine3834422013-03-28 20:48:51 -0500656
657 b1 = a2 = ALIGN_UP(a1, MTRR_MIN_ALIGN);
658 b2 = c1 = ALIGN_DOWN(c2, MTRR_MIN_ALIGN);
659
660 calc_var_mtrr_range(var_state, a1, a2 - a1, mtrr_type);
661 calc_var_mtrr_range(var_state, b1, b2 - b1, mtrr_type);
662 calc_var_mtrr_range(var_state, c1, c2 - c1, mtrr_type);
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500663}
664
665static int calc_var_mtrrs(struct memranges *addr_space,
666 int above4gb, int address_bits)
667{
668 int wb_deftype_count;
669 int uc_deftype_count;
Aaron Durbine3834422013-03-28 20:48:51 -0500670 struct range_entry *r;
Eric Biedermanf8a2ddd2004-10-30 08:05:41 +0000671 struct var_mtrr_state var_state;
Eric Biedermanf8a2ddd2004-10-30 08:05:41 +0000672
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500673 /* The default MTRR cacheability type is determined by calculating
674 * the number of MTTRs required for each MTTR type as if it was the
675 * default. */
676 var_state.addr_space = addr_space;
Scott Duplichanf3cce2f2010-11-13 19:07:59 +0000677 var_state.above4gb = above4gb;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500678 var_state.address_bits = address_bits;
679 var_state.commit_mtrrs = 0;
Stefan Reinauer7f86ed12009-02-12 16:02:16 +0000680
Aaron Durbine3834422013-03-28 20:48:51 -0500681 wb_deftype_count = 0;
682 uc_deftype_count = 0;
Duncan Laurie7389fa92011-12-22 10:59:40 -0800683
Aaron Durbine3834422013-03-28 20:48:51 -0500684 /*
685 * For each range do 3 calculations:
686 * 1. UC as default type with no holes at top of range.
687 * 2. UC as default using holes at top of range.
688 * 3. WB as default.
Martin Roth4c3ab732013-07-08 16:23:54 -0600689 * The lowest count is then used as default after totaling all
690 * MTRRs. Note that the optimal algorithm for UC default is marked in
Aaron Durbine3834422013-03-28 20:48:51 -0500691 * the tag of each range regardless of final decision. UC takes
Martin Roth4c3ab732013-07-08 16:23:54 -0600692 * precedence in the MTRR architecture. Therefore, only holes can be
Aaron Durbine3834422013-03-28 20:48:51 -0500693 * used when the type of the region is MTRR_TYPE_WRBACK with
694 * MTRR_TYPE_UNCACHEABLE as the default type.
695 */
696 memranges_each_entry(r, var_state.addr_space) {
697 int mtrr_type;
698
699 mtrr_type = range_entry_mtrr_type(r);
700
701 if (mtrr_type != MTRR_TYPE_UNCACHEABLE) {
702 int uc_hole_count;
703 int uc_no_hole_count;
704
705 var_state.def_mtrr_type = MTRR_TYPE_UNCACHEABLE;
706 var_state.mtrr_index = 0;
707
708 /* No hole calculation. */
709 calc_var_mtrrs_without_hole(&var_state, r);
710 uc_no_hole_count = var_state.mtrr_index;
711
712 /* Hole calculation only if type is WB. The 64 number
713 * is a count that is unachievable, thus making it
714 * a default large number in the case of not doing
715 * the hole calculation. */
716 uc_hole_count = 64;
717 if (mtrr_type == MTRR_TYPE_WRBACK) {
718 var_state.mtrr_index = 0;
719 calc_var_mtrrs_with_hole(&var_state, r);
720 uc_hole_count = var_state.mtrr_index;
721 }
722
723 /* Mark the entry with the optimal algorithm. */
724 if (uc_no_hole_count < uc_hole_count) {
725 uc_deftype_count += uc_no_hole_count;
726 } else {
727 unsigned long new_tag;
728
729 new_tag = mtrr_type | MTRR_RANGE_UC_USE_HOLE;
730 range_entry_update_tag(r, new_tag);
731 uc_deftype_count += uc_hole_count;
732 }
733 }
734
735 if (mtrr_type != MTRR_TYPE_WRBACK) {
736 var_state.mtrr_index = 0;
737 var_state.def_mtrr_type = MTRR_TYPE_WRBACK;
738 calc_var_mtrrs_without_hole(&var_state, r);
739 wb_deftype_count += var_state.mtrr_index;
740 }
741 }
Scott Duplichanf3cce2f2010-11-13 19:07:59 +0000742
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500743 printk(BIOS_DEBUG, "MTRR: default type WB/UC MTRR counts: %d/%d.\n",
744 wb_deftype_count, uc_deftype_count);
Kyösti Mälkkiffc1fb32012-07-11 14:40:19 +0300745
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500746 if (wb_deftype_count < uc_deftype_count) {
747 printk(BIOS_DEBUG, "MTRR: WB selected as default type.\n");
748 return MTRR_TYPE_WRBACK;
749 }
750 printk(BIOS_DEBUG, "MTRR: UC selected as default type.\n");
751 return MTRR_TYPE_UNCACHEABLE;
752}
Kyösti Mälkkiffc1fb32012-07-11 14:40:19 +0300753
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500754static void commit_var_mtrrs(struct memranges *addr_space, int def_type,
755 int above4gb, int address_bits)
756{
Aaron Durbine3834422013-03-28 20:48:51 -0500757 struct range_entry *r;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500758 struct var_mtrr_state var_state;
759 int i;
760
761 var_state.addr_space = addr_space;
762 var_state.above4gb = above4gb;
763 var_state.address_bits = address_bits;
764 /* Write the MSRs. */
765 var_state.commit_mtrrs = 1;
766 var_state.mtrr_index = 0;
767 var_state.def_mtrr_type = def_type;
Aaron Durbine3834422013-03-28 20:48:51 -0500768
769 memranges_each_entry(r, var_state.addr_space) {
770 if (range_entry_mtrr_type(r) == def_type)
771 continue;
772
773 if (def_type == MTRR_TYPE_UNCACHEABLE &&
774 (range_entry_tag(r) & MTRR_RANGE_UC_USE_HOLE))
775 calc_var_mtrrs_with_hole(&var_state, r);
776 else
777 calc_var_mtrrs_without_hole(&var_state, r);
778 }
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500779
780 /* Clear all remaining variable MTTRs. */
781 for (i = var_state.mtrr_index; i < total_mtrrs; i++)
782 clear_var_mtrr(i);
783}
784
785void x86_setup_var_mtrrs(unsigned int address_bits, unsigned int above4gb)
786{
787 static int mtrr_default_type = -1;
788 struct memranges *addr_space;
789
790 addr_space = get_physical_address_space();
791
792 if (mtrr_default_type == -1) {
793 if (above4gb == 2)
794 detect_var_mtrrs();
795 mtrr_default_type =
796 calc_var_mtrrs(addr_space, !!above4gb, address_bits);
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000797 }
Stefan Reinauer00093a82011-11-02 16:12:34 -0700798
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500799 disable_cache();
800 commit_var_mtrrs(addr_space, mtrr_default_type, !!above4gb,
801 address_bits);
802 enable_var_mtrr(mtrr_default_type);
803 enable_cache();
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000804}
805
Sven Schnelleadfbcb792012-01-10 12:01:43 +0100806void x86_setup_mtrrs(void)
Yinghai Lu13f1c2a2005-07-08 02:49:49 +0000807{
Sven Schnelleadfbcb792012-01-10 12:01:43 +0100808 int address_size;
Yinghai Lu13f1c2a2005-07-08 02:49:49 +0000809 x86_setup_fixed_mtrrs();
Sven Schnelleadfbcb792012-01-10 12:01:43 +0100810 address_size = cpu_phys_address_size();
811 printk(BIOS_DEBUG, "CPU physical address size: %d bits\n", address_size);
812 x86_setup_var_mtrrs(address_size, 1);
Yinghai Lu13f1c2a2005-07-08 02:49:49 +0000813}
814
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000815int x86_mtrr_check(void)
816{
817 /* Only Pentium Pro and later have MTRR */
818 msr_t msr;
Stefan Reinauerc02b4fc2010-03-22 11:42:32 +0000819 printk(BIOS_DEBUG, "\nMTRR check\n");
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000820
821 msr = rdmsr(0x2ff);
822 msr.lo >>= 10;
823
Stefan Reinauerc02b4fc2010-03-22 11:42:32 +0000824 printk(BIOS_DEBUG, "Fixed MTRRs : ");
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000825 if (msr.lo & 0x01)
Stefan Reinauerc02b4fc2010-03-22 11:42:32 +0000826 printk(BIOS_DEBUG, "Enabled\n");
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000827 else
Stefan Reinauerc02b4fc2010-03-22 11:42:32 +0000828 printk(BIOS_DEBUG, "Disabled\n");
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000829
Stefan Reinauerc02b4fc2010-03-22 11:42:32 +0000830 printk(BIOS_DEBUG, "Variable MTRRs: ");
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000831 if (msr.lo & 0x02)
Stefan Reinauerc02b4fc2010-03-22 11:42:32 +0000832 printk(BIOS_DEBUG, "Enabled\n");
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000833 else
Stefan Reinauerc02b4fc2010-03-22 11:42:32 +0000834 printk(BIOS_DEBUG, "Disabled\n");
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000835
Stefan Reinauerc02b4fc2010-03-22 11:42:32 +0000836 printk(BIOS_DEBUG, "\n");
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000837
838 post_code(0x93);
839 return ((int) msr.lo);
840}