blob: 608912754c40700e1295e8a7ceb41b7b78e73bff [file] [log] [blame]
Eric Biedermanfcd5ace2004-10-14 19:29:29 +00001/*
Stefan Reinauercdc5cc62007-04-24 18:40:02 +00002 * mtrr.c: setting MTRR to decent values for cache initialization on P6
Eric Biedermanfcd5ace2004-10-14 19:29:29 +00003 *
4 * Derived from intel_set_mtrr in intel_subr.c and mtrr.c in linux kernel
5 *
6 * Copyright 2000 Silicon Integrated System Corporation
Aaron Durbinbb4e79a2013-03-26 14:09:47 -05007 * Copyright 2013 Google Inc.
Eric Biedermanfcd5ace2004-10-14 19:29:29 +00008 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22 *
23 *
24 * Reference: Intel Architecture Software Developer's Manual, Volume 3: System Programming
25 */
Yinghai Lu13f1c2a2005-07-08 02:49:49 +000026
Eric Biedermanf8a2ddd2004-10-30 08:05:41 +000027#include <stddef.h>
Aaron Durbinbb4e79a2013-03-26 14:09:47 -050028#include <stdlib.h>
29#include <string.h>
Eric Biedermanfcd5ace2004-10-14 19:29:29 +000030#include <console/console.h>
31#include <device/device.h>
Aaron Durbinebf142a2013-03-29 16:23:23 -050032#include <cpu/cpu.h>
Eric Biedermanfcd5ace2004-10-14 19:29:29 +000033#include <cpu/x86/msr.h>
34#include <cpu/x86/mtrr.h>
35#include <cpu/x86/cache.h>
Stefan Reinauer00093a82011-11-02 16:12:34 -070036#include <cpu/x86/lapic.h>
Sven Schnelleadfbcb792012-01-10 12:01:43 +010037#include <arch/cpu.h>
Stefan Reinauer00093a82011-11-02 16:12:34 -070038#include <arch/acpi.h>
Aaron Durbinbb4e79a2013-03-26 14:09:47 -050039#include <memrange.h>
Aaron Durbin57686f82013-03-20 15:50:59 -050040#if CONFIG_X86_AMD_FIXED_MTRRS
41#include <cpu/amd/mtrr.h>
42#define MTRR_FIXED_WRBACK_BITS (MTRR_READ_MEM | MTRR_WRITE_MEM)
43#else
44#define MTRR_FIXED_WRBACK_BITS 0
45#endif
Eric Biedermanfcd5ace2004-10-14 19:29:29 +000046
Stefan Reinauerc00dfbc2012-04-03 16:24:37 -070047/* 2 MTRRS are reserved for the operating system */
48#define BIOS_MTRRS 6
49#define OS_MTRRS 2
50#define MTRRS (BIOS_MTRRS + OS_MTRRS)
51
52static int total_mtrrs = MTRRS;
53static int bios_mtrrs = BIOS_MTRRS;
54
55static void detect_var_mtrrs(void)
56{
57 msr_t msr;
58
59 msr = rdmsr(MTRRcap_MSR);
60
61 total_mtrrs = msr.lo & 0xff;
62 bios_mtrrs = total_mtrrs - OS_MTRRS;
63}
64
Yinghai Lu13f1c2a2005-07-08 02:49:49 +000065void enable_fixed_mtrr(void)
Eric Biedermanfcd5ace2004-10-14 19:29:29 +000066{
67 msr_t msr;
68
69 msr = rdmsr(MTRRdefType_MSR);
Aaron Durbinbb4e79a2013-03-26 14:09:47 -050070 msr.lo |= MTRRdefTypeEn | MTRRdefTypeFixEn;
Eric Biedermanfcd5ace2004-10-14 19:29:29 +000071 wrmsr(MTRRdefType_MSR, msr);
72}
73
Aaron Durbinbb4e79a2013-03-26 14:09:47 -050074static void enable_var_mtrr(unsigned char deftype)
Eric Biedermanfcd5ace2004-10-14 19:29:29 +000075{
76 msr_t msr;
77
78 msr = rdmsr(MTRRdefType_MSR);
Aaron Durbinbb4e79a2013-03-26 14:09:47 -050079 msr.lo &= ~0xff;
80 msr.lo |= MTRRdefTypeEn | deftype;
Eric Biedermanfcd5ace2004-10-14 19:29:29 +000081 wrmsr(MTRRdefType_MSR, msr);
82}
83
Eric Biedermanfcd5ace2004-10-14 19:29:29 +000084/* fms: find most sigificant bit set, stolen from Linux Kernel Source. */
85static inline unsigned int fms(unsigned int x)
86{
87 int r;
88
89 __asm__("bsrl %1,%0\n\t"
90 "jnz 1f\n\t"
91 "movl $0,%0\n"
92 "1:" : "=r" (r) : "g" (x));
93 return r;
94}
95
Marc Jones5cbdc1e2009-04-01 22:07:53 +000096/* fls: find least sigificant bit set */
Eric Biedermanfcd5ace2004-10-14 19:29:29 +000097static inline unsigned int fls(unsigned int x)
98{
99 int r;
100
101 __asm__("bsfl %1,%0\n\t"
102 "jnz 1f\n\t"
103 "movl $32,%0\n"
104 "1:" : "=r" (r) : "g" (x));
105 return r;
106}
107
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500108#define MTRR_VERBOSE_LEVEL BIOS_NEVER
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000109
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500110/* MTRRs are at a 4KiB granularity. Therefore all address calculations can
111 * be done with 32-bit numbers. This allows for the MTRR code to handle
112 * up to 2^44 bytes (16 TiB) of address space. */
113#define RANGE_SHIFT 12
114#define ADDR_SHIFT_TO_RANGE_SHIFT(x) \
115 (((x) > RANGE_SHIFT) ? ((x) - RANGE_SHIFT) : RANGE_SHIFT)
116#define PHYS_TO_RANGE_ADDR(x) ((x) >> RANGE_SHIFT)
117#define RANGE_TO_PHYS_ADDR(x) (((resource_t)(x)) << RANGE_SHIFT)
118#define NUM_FIXED_MTRRS (NUM_FIXED_RANGES / RANGES_PER_FIXED_MTRR)
119
120/* The minimum alignment while handling variable MTRR ranges is 64MiB. */
121#define MTRR_MIN_ALIGN PHYS_TO_RANGE_ADDR(64 << 20)
122/* Helpful constants. */
123#define RANGE_1MB PHYS_TO_RANGE_ADDR(1 << 20)
124#define RANGE_4GB (1 << (ADDR_SHIFT_TO_RANGE_SHIFT(32)))
125
Aaron Durbine3834422013-03-28 20:48:51 -0500126/*
127 * The default MTRR type selection uses 3 approaches for selecting the
128 * optimal number of variable MTRRs. For each range do 3 calculations:
129 * 1. UC as default type with no holes at top of range.
130 * 2. UC as default using holes at top of range.
131 * 3. WB as default.
132 * If using holes is optimal for a range when UC is the default type the
133 * tag is updated to direct the commit routine to use a hole at the top
134 * of a range.
135 */
136#define MTRR_ALGO_SHIFT (8)
137#define MTRR_TAG_MASK ((1 << MTRR_ALGO_SHIFT) - 1)
138/* If the default type is UC use the hole carving algorithm for a range. */
139#define MTRR_RANGE_UC_USE_HOLE (1 << MTRR_ALGO_SHIFT)
140
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500141static inline uint32_t range_entry_base_mtrr_addr(struct range_entry *r)
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000142{
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500143 return PHYS_TO_RANGE_ADDR(range_entry_base(r));
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000144}
145
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500146static inline uint32_t range_entry_end_mtrr_addr(struct range_entry *r)
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000147{
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500148 return PHYS_TO_RANGE_ADDR(range_entry_end(r));
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000149}
150
Aaron Durbine3834422013-03-28 20:48:51 -0500151static inline int range_entry_mtrr_type(struct range_entry *r)
152{
153 return range_entry_tag(r) & MTRR_TAG_MASK;
154}
155
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500156static struct memranges *get_physical_address_space(void)
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000157{
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500158 static struct memranges *addr_space;
159 static struct memranges addr_space_storage;
Duncan Laurie7389fa92011-12-22 10:59:40 -0800160
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500161 /* In order to handle some chipsets not being able to pre-determine
162 * uncacheable ranges, such as graphics memory, at resource inseration
163 * time remove unacheable regions from the cacheable ones. */
164 if (addr_space == NULL) {
165 struct range_entry *r;
Aaron Durbin9b027fe2013-03-26 14:10:34 -0500166 unsigned long mask;
167 unsigned long match;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500168
169 addr_space = &addr_space_storage;
170
Aaron Durbin9b027fe2013-03-26 14:10:34 -0500171 mask = IORESOURCE_CACHEABLE;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500172 /* Collect cacheable and uncacheable address ranges. The
173 * uncacheable regions take precedence over the cacheable
174 * regions. */
175 memranges_init(addr_space, mask, mask, MTRR_TYPE_WRBACK);
176 memranges_add_resources(addr_space, mask, 0,
177 MTRR_TYPE_UNCACHEABLE);
178
Aaron Durbin9b027fe2013-03-26 14:10:34 -0500179 /* Handle any write combining resources. Only prefetchable
180 * resources with the IORESOURCE_WRCOMB flag are appropriate
181 * for this MTRR type. */
182 match = IORESOURCE_PREFETCH | IORESOURCE_WRCOMB;
183 mask |= match;
184 memranges_add_resources(addr_space, mask, match,
185 MTRR_TYPE_WRCOMB);
186
Aaron Durbin77a5b402013-03-26 12:47:47 -0500187#if CONFIG_CACHE_ROM
188 /* Add a write-protect region covering the ROM size
189 * when CONFIG_CACHE_ROM is enabled. The ROM is assumed
190 * to be located at 4GiB - rom size. */
191 resource_t rom_base = RANGE_TO_PHYS_ADDR(
192 RANGE_4GB - PHYS_TO_RANGE_ADDR(CONFIG_ROM_SIZE));
193 memranges_insert(addr_space, rom_base, CONFIG_ROM_SIZE,
194 MTRR_TYPE_WRPROT);
195#endif
196
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500197 /* The address space below 4GiB is special. It needs to be
198 * covered entirly by range entries so that MTRR calculations
199 * can be properly done for the full 32-bit address space.
200 * Therefore, ensure holes are filled up to 4GiB as
201 * uncacheable */
202 memranges_fill_holes_up_to(addr_space,
203 RANGE_TO_PHYS_ADDR(RANGE_4GB),
204 MTRR_TYPE_UNCACHEABLE);
205
206 printk(BIOS_DEBUG, "MTRR: Physical address space:\n");
207 memranges_each_entry(r, addr_space)
208 printk(BIOS_DEBUG,
209 "0x%016llx - 0x%016llx size 0x%08llx type %ld\n",
210 range_entry_base(r), range_entry_end(r),
211 range_entry_size(r), range_entry_tag(r));
Carl-Daniel Hailfinger7dde1da2009-02-11 16:57:32 +0000212 }
Stefan Reinauer7f86ed12009-02-12 16:02:16 +0000213
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500214 return addr_space;
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000215}
216
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500217/* Fixed MTRR descriptor. This structure defines the step size and begin
218 * and end (exclusive) address covered by a set of fixe MTRR MSRs.
219 * It also describes the offset in byte intervals to store the calculated MTRR
220 * type in an array. */
221struct fixed_mtrr_desc {
222 uint32_t begin;
223 uint32_t end;
224 uint32_t step;
225 int range_index;
226 int msr_index_base;
Eric Biedermanf8a2ddd2004-10-30 08:05:41 +0000227};
228
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500229/* Shared MTRR calculations. Can be reused by APs. */
230static uint8_t fixed_mtrr_types[NUM_FIXED_RANGES];
231
232/* Fixed MTRR descriptors. */
233static const struct fixed_mtrr_desc fixed_mtrr_desc[] = {
234 { PHYS_TO_RANGE_ADDR(0x000000), PHYS_TO_RANGE_ADDR(0x080000),
235 PHYS_TO_RANGE_ADDR(64 * 1024), 0, MTRRfix64K_00000_MSR },
236 { PHYS_TO_RANGE_ADDR(0x080000), PHYS_TO_RANGE_ADDR(0x0C0000),
237 PHYS_TO_RANGE_ADDR(16 * 1024), 8, MTRRfix16K_80000_MSR },
238 { PHYS_TO_RANGE_ADDR(0x0C0000), PHYS_TO_RANGE_ADDR(0x100000),
239 PHYS_TO_RANGE_ADDR(4 * 1024), 24, MTRRfix4K_C0000_MSR },
240};
241
242static void calc_fixed_mtrrs(void)
Eric Biedermanf8a2ddd2004-10-30 08:05:41 +0000243{
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500244 static int fixed_mtrr_types_initialized;
245 struct memranges *phys_addr_space;
246 struct range_entry *r;
247 const struct fixed_mtrr_desc *desc;
248 const struct fixed_mtrr_desc *last_desc;
249 uint32_t begin;
250 uint32_t end;
251 int type_index;
252
253 if (fixed_mtrr_types_initialized)
Eric Biedermanf8a2ddd2004-10-30 08:05:41 +0000254 return;
Kyösti Mälkki2d42b342012-07-12 00:18:22 +0300255
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500256 phys_addr_space = get_physical_address_space();
Kyösti Mälkki2d42b342012-07-12 00:18:22 +0300257
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500258 /* Set all fixed ranges to uncacheable first. */
259 memset(&fixed_mtrr_types[0], MTRR_TYPE_UNCACHEABLE, NUM_FIXED_RANGES);
Kyösti Mälkki2d42b342012-07-12 00:18:22 +0300260
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500261 desc = &fixed_mtrr_desc[0];
262 last_desc = &fixed_mtrr_desc[ARRAY_SIZE(fixed_mtrr_desc) - 1];
263 type_index = desc->range_index;
Kyösti Mälkki1ec5e742012-07-26 23:51:20 +0300264
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500265 memranges_each_entry(r, phys_addr_space) {
266 begin = range_entry_base_mtrr_addr(r);
267 end = range_entry_end_mtrr_addr(r);
Kyösti Mälkki2d42b342012-07-12 00:18:22 +0300268
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500269 if (begin >= last_desc->end)
270 break;
271
272 if (end > last_desc->end)
273 end = last_desc->end;
274
275 /* Get to the correct fixed mtrr descriptor. */
276 while (begin >= desc->end)
277 desc++;
278
279 type_index = desc->range_index;
280 type_index += (begin - desc->begin) / desc->step;
281
282 while (begin != end) {
283 unsigned char type;
284
285 type = range_entry_tag(r);
286 printk(MTRR_VERBOSE_LEVEL,
287 "MTRR addr 0x%x-0x%x set to %d type @ %d\n",
288 begin, begin + desc->step, type, type_index);
289 if (type == MTRR_TYPE_WRBACK)
290 type |= MTRR_FIXED_WRBACK_BITS;
291 fixed_mtrr_types[type_index] = type;
292 type_index++;
293 begin += desc->step;
294 if (begin == desc->end)
295 desc++;
Yinghai Lu63601872005-01-27 22:48:12 +0000296 }
Eric Biedermanf8a2ddd2004-10-30 08:05:41 +0000297 }
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500298 fixed_mtrr_types_initialized = 1;
299}
300
301static void commit_fixed_mtrrs(void)
302{
303 int i;
304 int j;
305 int msr_num;
306 int type_index;
307 /* 8 ranges per msr. */
308 msr_t fixed_msrs[NUM_FIXED_MTRRS];
309 unsigned long msr_index[NUM_FIXED_MTRRS];
310
311 memset(&fixed_msrs, 0, sizeof(fixed_msrs));
312
313 disable_cache();
314
315 msr_num = 0;
316 type_index = 0;
317 for (i = 0; i < ARRAY_SIZE(fixed_mtrr_desc); i++) {
318 const struct fixed_mtrr_desc *desc;
319 int num_ranges;
320
321 desc = &fixed_mtrr_desc[i];
322 num_ranges = (desc->end - desc->begin) / desc->step;
323 for (j = 0; j < num_ranges; j += RANGES_PER_FIXED_MTRR) {
324 msr_index[msr_num] = desc->msr_index_base +
325 (j / RANGES_PER_FIXED_MTRR);
326 fixed_msrs[msr_num].lo |=
327 fixed_mtrr_types[type_index++] << 0;
328 fixed_msrs[msr_num].lo |=
329 fixed_mtrr_types[type_index++] << 8;
330 fixed_msrs[msr_num].lo |=
331 fixed_mtrr_types[type_index++] << 16;
332 fixed_msrs[msr_num].lo |=
333 fixed_mtrr_types[type_index++] << 24;
334 fixed_msrs[msr_num].hi |=
335 fixed_mtrr_types[type_index++] << 0;
336 fixed_msrs[msr_num].hi |=
337 fixed_mtrr_types[type_index++] << 8;
338 fixed_msrs[msr_num].hi |=
339 fixed_mtrr_types[type_index++] << 16;
340 fixed_msrs[msr_num].hi |=
341 fixed_mtrr_types[type_index++] << 24;
342 msr_num++;
343 }
344 }
345
346 for (i = 0; i < ARRAY_SIZE(fixed_msrs); i++) {
347 printk(BIOS_DEBUG, "MTRR: Fixed MSR 0x%lx 0x%08x%08x\n",
348 msr_index[i], fixed_msrs[i].hi, fixed_msrs[i].lo);
349 wrmsr(msr_index[i], fixed_msrs[i]);
350 }
351
352 enable_cache();
Eric Biedermanf8a2ddd2004-10-30 08:05:41 +0000353}
354
Aaron Durbin57686f82013-03-20 15:50:59 -0500355void x86_setup_fixed_mtrrs_no_enable(void)
Yinghai Lu13f1c2a2005-07-08 02:49:49 +0000356{
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500357 calc_fixed_mtrrs();
358 commit_fixed_mtrrs();
Yinghai Lu13f1c2a2005-07-08 02:49:49 +0000359}
Stefan Reinauer7f86ed12009-02-12 16:02:16 +0000360
Aaron Durbin57686f82013-03-20 15:50:59 -0500361void x86_setup_fixed_mtrrs(void)
362{
363 x86_setup_fixed_mtrrs_no_enable();
364
365 printk(BIOS_SPEW, "call enable_fixed_mtrr()\n");
366 enable_fixed_mtrr();
367}
368
Aaron Durbin77a5b402013-03-26 12:47:47 -0500369/* Keep track of the MTRR that covers the ROM for caching purposes. */
370#if CONFIG_CACHE_ROM
371static long rom_cache_mtrr = -1;
372
Aaron Durbinbc07f5d2013-03-26 13:09:39 -0500373long x86_mtrr_rom_cache_var_index(void)
374{
375 return rom_cache_mtrr;
376}
377
Aaron Durbin77a5b402013-03-26 12:47:47 -0500378void x86_mtrr_enable_rom_caching(void)
379{
380 msr_t msr_val;
381 unsigned long index;
382
383 if (rom_cache_mtrr < 0)
384 return;
385
386 index = rom_cache_mtrr;
387 disable_cache();
388 msr_val = rdmsr(MTRRphysBase_MSR(index));
389 msr_val.lo &= ~0xff;
390 msr_val.lo |= MTRR_TYPE_WRPROT;
391 wrmsr(MTRRphysBase_MSR(index), msr_val);
392 enable_cache();
393}
394
395void x86_mtrr_disable_rom_caching(void)
396{
397 msr_t msr_val;
398 unsigned long index;
399
400 if (rom_cache_mtrr < 0)
401 return;
402
403 index = rom_cache_mtrr;
404 disable_cache();
405 msr_val = rdmsr(MTRRphysBase_MSR(index));
406 msr_val.lo &= ~0xff;
407 wrmsr(MTRRphysBase_MSR(index), msr_val);
408 enable_cache();
409}
Aaron Durbinebf142a2013-03-29 16:23:23 -0500410
411void disable_cache_rom(void)
412{
413 x86_mtrr_disable_rom_caching();
414}
Aaron Durbin77a5b402013-03-26 12:47:47 -0500415#endif
416
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500417struct var_mtrr_state {
418 struct memranges *addr_space;
419 int above4gb;
420 int address_bits;
421 int commit_mtrrs;
422 int mtrr_index;
423 int def_mtrr_type;
424};
Aaron Durbin57686f82013-03-20 15:50:59 -0500425
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500426static void clear_var_mtrr(int index)
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000427{
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500428 msr_t msr_val;
429
430 msr_val = rdmsr(MTRRphysMask_MSR(index));
431 msr_val.lo &= ~MTRRphysMaskValid;
432 wrmsr(MTRRphysMask_MSR(index), msr_val);
433}
434
435static void write_var_mtrr(struct var_mtrr_state *var_state,
436 uint32_t base, uint32_t size, int mtrr_type)
437{
438 msr_t msr_val;
439 unsigned long msr_index;
440 resource_t rbase;
441 resource_t rsize;
442 resource_t mask;
443
444 /* Some variable MTRRs are attempted to be saved for the OS use.
445 * However, it's more important to try to map the full address space
446 * properly. */
447 if (var_state->mtrr_index >= bios_mtrrs)
448 printk(BIOS_WARNING, "Taking a reserved OS MTRR.\n");
449 if (var_state->mtrr_index >= total_mtrrs) {
450 printk(BIOS_ERR, "ERROR: Not enough MTTRs available!\n");
451 return;
452 }
453
454 rbase = base;
455 rsize = size;
456
457 rbase = RANGE_TO_PHYS_ADDR(rbase);
458 rsize = RANGE_TO_PHYS_ADDR(rsize);
459 rsize = -rsize;
460
461 mask = (1ULL << var_state->address_bits) - 1;
462 rsize = rsize & mask;
463
Aaron Durbin77a5b402013-03-26 12:47:47 -0500464#if CONFIG_CACHE_ROM
465 /* CONFIG_CACHE_ROM allocates an MTRR specifically for allowing
466 * one to turn on caching for faster ROM access. However, it is
467 * left to the MTRR callers to enable it. */
468 if (mtrr_type == MTRR_TYPE_WRPROT) {
469 mtrr_type = MTRR_TYPE_UNCACHEABLE;
470 if (rom_cache_mtrr < 0)
471 rom_cache_mtrr = var_state->mtrr_index;
472 }
473#endif
474
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500475 printk(BIOS_DEBUG, "MTRR: %d base 0x%016llx mask 0x%016llx type %d\n",
476 var_state->mtrr_index, rbase, rsize, mtrr_type);
477
478 msr_val.lo = rbase;
479 msr_val.lo |= mtrr_type;
480
481 msr_val.hi = rbase >> 32;
482 msr_index = MTRRphysBase_MSR(var_state->mtrr_index);
483 wrmsr(msr_index, msr_val);
484
485 msr_val.lo = rsize;
486 msr_val.lo |= MTRRphysMaskValid;
487 msr_val.hi = rsize >> 32;
488 msr_index = MTRRphysMask_MSR(var_state->mtrr_index);
489 wrmsr(msr_index, msr_val);
490}
491
492static void calc_var_mtrr_range(struct var_mtrr_state *var_state,
493 uint32_t base, uint32_t size, int mtrr_type)
494{
495 while (size != 0) {
496 uint32_t addr_lsb;
497 uint32_t size_msb;
498 uint32_t mtrr_size;
499
500 addr_lsb = fls(base);
501 size_msb = fms(size);
502
503 /* All MTRR entries need to have their base aligned to the mask
504 * size. The maximum size is calculated by a function of the
505 * min base bit set and maximum size bit set. */
506 if (addr_lsb > size_msb)
507 mtrr_size = 1 << size_msb;
508 else
509 mtrr_size = 1 << addr_lsb;
510
511 if (var_state->commit_mtrrs)
512 write_var_mtrr(var_state, base, mtrr_size, mtrr_type);
513
514 size -= mtrr_size;
515 base += mtrr_size;
516 var_state->mtrr_index++;
517 }
518}
519
Aaron Durbine3834422013-03-28 20:48:51 -0500520static void calc_var_mtrrs_with_hole(struct var_mtrr_state *var_state,
521 struct range_entry *r)
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500522{
Aaron Durbine3834422013-03-28 20:48:51 -0500523 uint32_t a1, a2, b1, b2;
524 int mtrr_type;
525 struct range_entry *next;
526
527 /*
528 * Determine MTRRs based on the following algoirthm for the given entry:
529 * +------------------+ b2 = ALIGN_UP(end)
530 * | 0 or more bytes | <-- hole is carved out between b1 and b2
531 * +------------------+ a2 = b1 = end
532 * | |
533 * +------------------+ a1 = begin
534 *
535 * Thus, there are 3 sub-ranges to configure variable MTRRs for.
536 */
537 mtrr_type = range_entry_mtrr_type(r);
538
539 a1 = range_entry_base_mtrr_addr(r);
540 a2 = range_entry_end_mtrr_addr(r);
541
542 /* The end address is under 1MiB. The fixed MTRRs take
543 * precedence over the variable ones. Therefore this range
544 * can be ignored. */
545 if (a2 < RANGE_1MB)
546 return;
547
548 /* Again, the fixed MTRRs take precedence so the beginning
549 * of the range can be set to 0 if it starts below 1MiB. */
550 if (a1 < RANGE_1MB)
551 a1 = 0;
552
553 /* If the range starts above 4GiB the processing is done. */
554 if (!var_state->above4gb && a1 >= RANGE_4GB)
555 return;
556
557 /* Clip the upper address to 4GiB if addresses above 4GiB
558 * are not being processed. */
559 if (!var_state->above4gb && a2 > RANGE_4GB)
560 a2 = RANGE_4GB;
561
Aaron Durbin53924242013-03-29 11:48:27 -0500562 next = memranges_next_entry(var_state->addr_space, r);
563
Aaron Durbine3834422013-03-28 20:48:51 -0500564 b1 = a2;
Aaron Durbin53924242013-03-29 11:48:27 -0500565
566 /* First check if a1 is >= 4GiB and the current etnry is the last
567 * entry. If so perform an optimization of covering a larger range
568 * defined by the base address' alignment. */
569 if (a1 >= RANGE_4GB && next == NULL) {
570 uint32_t addr_lsb;
571
572 addr_lsb = fls(a1);
573 b2 = (1 << addr_lsb) + a1;
574 if (b2 >= a2) {
575 calc_var_mtrr_range(var_state, a1, b2 - a1, mtrr_type);
576 return;
577 }
578 }
579
580 /* Handle the min alignment roundup case. */
Aaron Durbine3834422013-03-28 20:48:51 -0500581 b2 = ALIGN_UP(a2, MTRR_MIN_ALIGN);
582
583 /* Check against the next range. If the current range_entry is the
584 * last entry then carving a hole is no problem. If the current entry
585 * isn't the last entry then check that the last entry covers the
586 * entire hole range with the default mtrr type. */
Aaron Durbine3834422013-03-28 20:48:51 -0500587 if (next != NULL &&
588 (range_entry_mtrr_type(next) != var_state->def_mtrr_type ||
589 range_entry_end_mtrr_addr(next) < b2)) {
590 calc_var_mtrr_range(var_state, a1, a2 - a1, mtrr_type);
591 return;
592 }
593
594 calc_var_mtrr_range(var_state, a1, b2 - a1, mtrr_type);
595 calc_var_mtrr_range(var_state, b1, b2 - b1, var_state->def_mtrr_type);
596}
597
598static void calc_var_mtrrs_without_hole(struct var_mtrr_state *var_state,
599 struct range_entry *r)
600{
601 uint32_t a1, a2, b1, b2, c1, c2;
602 int mtrr_type;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500603
604 /*
605 * For each range that meets the non-default type process it in the
606 * following manner:
607 * +------------------+ c2 = end
608 * | 0 or more bytes |
609 * +------------------+ b2 = c1 = ALIGN_DOWN(end)
610 * | |
611 * +------------------+ b1 = a2 = ALIGN_UP(begin)
612 * | 0 or more bytes |
613 * +------------------+ a1 = begin
614 *
615 * Thus, there are 3 sub-ranges to configure variable MTRRs for.
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000616 */
Aaron Durbine3834422013-03-28 20:48:51 -0500617 mtrr_type = range_entry_mtrr_type(r);
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500618
Aaron Durbine3834422013-03-28 20:48:51 -0500619 a1 = range_entry_base_mtrr_addr(r);
620 c2 = range_entry_end_mtrr_addr(r);
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500621
Aaron Durbine3834422013-03-28 20:48:51 -0500622 /* The end address is under 1MiB. The fixed MTRRs take
623 * precedence over the variable ones. Therefore this range
624 * can be ignored. */
625 if (c2 < RANGE_1MB)
626 return;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500627
Aaron Durbine3834422013-03-28 20:48:51 -0500628 /* Again, the fixed MTRRs take precedence so the beginning
629 * of the range can be set to 0 if it starts below 1MiB. */
630 if (a1 < RANGE_1MB)
631 a1 = 0;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500632
Aaron Durbine3834422013-03-28 20:48:51 -0500633 /* If the range starts above 4GiB the processing is done. */
634 if (!var_state->above4gb && a1 >= RANGE_4GB)
635 return;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500636
Aaron Durbine3834422013-03-28 20:48:51 -0500637 /* Clip the upper address to 4GiB if addresses above 4GiB
638 * are not being processed. */
639 if (!var_state->above4gb && c2 > RANGE_4GB)
640 c2 = RANGE_4GB;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500641
Aaron Durbine3834422013-03-28 20:48:51 -0500642 /* Don't align up or down on the range if it is smaller
643 * than the minimum granularity. */
644 if ((c2 - a1) < MTRR_MIN_ALIGN) {
645 calc_var_mtrr_range(var_state, a1, c2 - a1, mtrr_type);
646 return;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500647 }
Aaron Durbine3834422013-03-28 20:48:51 -0500648
649 b1 = a2 = ALIGN_UP(a1, MTRR_MIN_ALIGN);
650 b2 = c1 = ALIGN_DOWN(c2, MTRR_MIN_ALIGN);
651
652 calc_var_mtrr_range(var_state, a1, a2 - a1, mtrr_type);
653 calc_var_mtrr_range(var_state, b1, b2 - b1, mtrr_type);
654 calc_var_mtrr_range(var_state, c1, c2 - c1, mtrr_type);
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500655}
656
657static int calc_var_mtrrs(struct memranges *addr_space,
658 int above4gb, int address_bits)
659{
660 int wb_deftype_count;
661 int uc_deftype_count;
Aaron Durbine3834422013-03-28 20:48:51 -0500662 struct range_entry *r;
Eric Biedermanf8a2ddd2004-10-30 08:05:41 +0000663 struct var_mtrr_state var_state;
Eric Biedermanf8a2ddd2004-10-30 08:05:41 +0000664
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500665 /* The default MTRR cacheability type is determined by calculating
666 * the number of MTTRs required for each MTTR type as if it was the
667 * default. */
668 var_state.addr_space = addr_space;
Scott Duplichanf3cce2f2010-11-13 19:07:59 +0000669 var_state.above4gb = above4gb;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500670 var_state.address_bits = address_bits;
671 var_state.commit_mtrrs = 0;
Stefan Reinauer7f86ed12009-02-12 16:02:16 +0000672
Aaron Durbine3834422013-03-28 20:48:51 -0500673 wb_deftype_count = 0;
674 uc_deftype_count = 0;
Duncan Laurie7389fa92011-12-22 10:59:40 -0800675
Aaron Durbine3834422013-03-28 20:48:51 -0500676 /*
677 * For each range do 3 calculations:
678 * 1. UC as default type with no holes at top of range.
679 * 2. UC as default using holes at top of range.
680 * 3. WB as default.
681 * The lowest count is then used as default after totalling all
682 * MTRRs. Note that the optimal algoirthm for UC default is marked in
683 * the tag of each range regardless of final decision. UC takes
684 * precedence in the MTRR archiecture. Therefore, only holes can be
685 * used when the type of the region is MTRR_TYPE_WRBACK with
686 * MTRR_TYPE_UNCACHEABLE as the default type.
687 */
688 memranges_each_entry(r, var_state.addr_space) {
689 int mtrr_type;
690
691 mtrr_type = range_entry_mtrr_type(r);
692
693 if (mtrr_type != MTRR_TYPE_UNCACHEABLE) {
694 int uc_hole_count;
695 int uc_no_hole_count;
696
697 var_state.def_mtrr_type = MTRR_TYPE_UNCACHEABLE;
698 var_state.mtrr_index = 0;
699
700 /* No hole calculation. */
701 calc_var_mtrrs_without_hole(&var_state, r);
702 uc_no_hole_count = var_state.mtrr_index;
703
704 /* Hole calculation only if type is WB. The 64 number
705 * is a count that is unachievable, thus making it
706 * a default large number in the case of not doing
707 * the hole calculation. */
708 uc_hole_count = 64;
709 if (mtrr_type == MTRR_TYPE_WRBACK) {
710 var_state.mtrr_index = 0;
711 calc_var_mtrrs_with_hole(&var_state, r);
712 uc_hole_count = var_state.mtrr_index;
713 }
714
715 /* Mark the entry with the optimal algorithm. */
716 if (uc_no_hole_count < uc_hole_count) {
717 uc_deftype_count += uc_no_hole_count;
718 } else {
719 unsigned long new_tag;
720
721 new_tag = mtrr_type | MTRR_RANGE_UC_USE_HOLE;
722 range_entry_update_tag(r, new_tag);
723 uc_deftype_count += uc_hole_count;
724 }
725 }
726
727 if (mtrr_type != MTRR_TYPE_WRBACK) {
728 var_state.mtrr_index = 0;
729 var_state.def_mtrr_type = MTRR_TYPE_WRBACK;
730 calc_var_mtrrs_without_hole(&var_state, r);
731 wb_deftype_count += var_state.mtrr_index;
732 }
733 }
Scott Duplichanf3cce2f2010-11-13 19:07:59 +0000734
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500735 printk(BIOS_DEBUG, "MTRR: default type WB/UC MTRR counts: %d/%d.\n",
736 wb_deftype_count, uc_deftype_count);
Kyösti Mälkkiffc1fb32012-07-11 14:40:19 +0300737
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500738 if (wb_deftype_count < uc_deftype_count) {
739 printk(BIOS_DEBUG, "MTRR: WB selected as default type.\n");
740 return MTRR_TYPE_WRBACK;
741 }
742 printk(BIOS_DEBUG, "MTRR: UC selected as default type.\n");
743 return MTRR_TYPE_UNCACHEABLE;
744}
Kyösti Mälkkiffc1fb32012-07-11 14:40:19 +0300745
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500746static void commit_var_mtrrs(struct memranges *addr_space, int def_type,
747 int above4gb, int address_bits)
748{
Aaron Durbine3834422013-03-28 20:48:51 -0500749 struct range_entry *r;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500750 struct var_mtrr_state var_state;
751 int i;
752
753 var_state.addr_space = addr_space;
754 var_state.above4gb = above4gb;
755 var_state.address_bits = address_bits;
756 /* Write the MSRs. */
757 var_state.commit_mtrrs = 1;
758 var_state.mtrr_index = 0;
759 var_state.def_mtrr_type = def_type;
Aaron Durbine3834422013-03-28 20:48:51 -0500760
761 memranges_each_entry(r, var_state.addr_space) {
762 if (range_entry_mtrr_type(r) == def_type)
763 continue;
764
765 if (def_type == MTRR_TYPE_UNCACHEABLE &&
766 (range_entry_tag(r) & MTRR_RANGE_UC_USE_HOLE))
767 calc_var_mtrrs_with_hole(&var_state, r);
768 else
769 calc_var_mtrrs_without_hole(&var_state, r);
770 }
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500771
772 /* Clear all remaining variable MTTRs. */
773 for (i = var_state.mtrr_index; i < total_mtrrs; i++)
774 clear_var_mtrr(i);
775}
776
777void x86_setup_var_mtrrs(unsigned int address_bits, unsigned int above4gb)
778{
779 static int mtrr_default_type = -1;
780 struct memranges *addr_space;
781
782 addr_space = get_physical_address_space();
783
784 if (mtrr_default_type == -1) {
785 if (above4gb == 2)
786 detect_var_mtrrs();
787 mtrr_default_type =
788 calc_var_mtrrs(addr_space, !!above4gb, address_bits);
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000789 }
Stefan Reinauer00093a82011-11-02 16:12:34 -0700790
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500791 disable_cache();
792 commit_var_mtrrs(addr_space, mtrr_default_type, !!above4gb,
793 address_bits);
794 enable_var_mtrr(mtrr_default_type);
795 enable_cache();
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000796}
797
Sven Schnelleadfbcb792012-01-10 12:01:43 +0100798void x86_setup_mtrrs(void)
Yinghai Lu13f1c2a2005-07-08 02:49:49 +0000799{
Sven Schnelleadfbcb792012-01-10 12:01:43 +0100800 int address_size;
Yinghai Lu13f1c2a2005-07-08 02:49:49 +0000801 x86_setup_fixed_mtrrs();
Sven Schnelleadfbcb792012-01-10 12:01:43 +0100802 address_size = cpu_phys_address_size();
803 printk(BIOS_DEBUG, "CPU physical address size: %d bits\n", address_size);
804 x86_setup_var_mtrrs(address_size, 1);
Yinghai Lu13f1c2a2005-07-08 02:49:49 +0000805}
806
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000807int x86_mtrr_check(void)
808{
809 /* Only Pentium Pro and later have MTRR */
810 msr_t msr;
Stefan Reinauerc02b4fc2010-03-22 11:42:32 +0000811 printk(BIOS_DEBUG, "\nMTRR check\n");
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000812
813 msr = rdmsr(0x2ff);
814 msr.lo >>= 10;
815
Stefan Reinauerc02b4fc2010-03-22 11:42:32 +0000816 printk(BIOS_DEBUG, "Fixed MTRRs : ");
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000817 if (msr.lo & 0x01)
Stefan Reinauerc02b4fc2010-03-22 11:42:32 +0000818 printk(BIOS_DEBUG, "Enabled\n");
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000819 else
Stefan Reinauerc02b4fc2010-03-22 11:42:32 +0000820 printk(BIOS_DEBUG, "Disabled\n");
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000821
Stefan Reinauerc02b4fc2010-03-22 11:42:32 +0000822 printk(BIOS_DEBUG, "Variable MTRRs: ");
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000823 if (msr.lo & 0x02)
Stefan Reinauerc02b4fc2010-03-22 11:42:32 +0000824 printk(BIOS_DEBUG, "Enabled\n");
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000825 else
Stefan Reinauerc02b4fc2010-03-22 11:42:32 +0000826 printk(BIOS_DEBUG, "Disabled\n");
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000827
Stefan Reinauerc02b4fc2010-03-22 11:42:32 +0000828 printk(BIOS_DEBUG, "\n");
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000829
830 post_code(0x93);
831 return ((int) msr.lo);
832}