blob: 253a7c318b99c7f526f63470041cfc3555c3badd [file] [log] [blame]
Eric Biedermanfcd5ace2004-10-14 19:29:29 +00001/*
Stefan Reinauercdc5cc62007-04-24 18:40:02 +00002 * mtrr.c: setting MTRR to decent values for cache initialization on P6
Eric Biedermanfcd5ace2004-10-14 19:29:29 +00003 *
4 * Derived from intel_set_mtrr in intel_subr.c and mtrr.c in linux kernel
5 *
6 * Copyright 2000 Silicon Integrated System Corporation
Aaron Durbinbb4e79a2013-03-26 14:09:47 -05007 * Copyright 2013 Google Inc.
Eric Biedermanfcd5ace2004-10-14 19:29:29 +00008 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22 *
23 *
24 * Reference: Intel Architecture Software Developer's Manual, Volume 3: System Programming
25 */
Yinghai Lu13f1c2a2005-07-08 02:49:49 +000026
Eric Biedermanf8a2ddd2004-10-30 08:05:41 +000027#include <stddef.h>
Aaron Durbinbb4e79a2013-03-26 14:09:47 -050028#include <stdlib.h>
29#include <string.h>
Eric Biedermanfcd5ace2004-10-14 19:29:29 +000030#include <console/console.h>
31#include <device/device.h>
32#include <cpu/x86/msr.h>
33#include <cpu/x86/mtrr.h>
34#include <cpu/x86/cache.h>
Stefan Reinauer00093a82011-11-02 16:12:34 -070035#include <cpu/x86/lapic.h>
Sven Schnelleadfbcb792012-01-10 12:01:43 +010036#include <arch/cpu.h>
Stefan Reinauer00093a82011-11-02 16:12:34 -070037#include <arch/acpi.h>
Aaron Durbinbb4e79a2013-03-26 14:09:47 -050038#include <memrange.h>
Aaron Durbin57686f82013-03-20 15:50:59 -050039#if CONFIG_X86_AMD_FIXED_MTRRS
40#include <cpu/amd/mtrr.h>
41#define MTRR_FIXED_WRBACK_BITS (MTRR_READ_MEM | MTRR_WRITE_MEM)
42#else
43#define MTRR_FIXED_WRBACK_BITS 0
44#endif
Eric Biedermanfcd5ace2004-10-14 19:29:29 +000045
Stefan Reinauerc00dfbc2012-04-03 16:24:37 -070046/* 2 MTRRS are reserved for the operating system */
47#define BIOS_MTRRS 6
48#define OS_MTRRS 2
49#define MTRRS (BIOS_MTRRS + OS_MTRRS)
50
51static int total_mtrrs = MTRRS;
52static int bios_mtrrs = BIOS_MTRRS;
53
54static void detect_var_mtrrs(void)
55{
56 msr_t msr;
57
58 msr = rdmsr(MTRRcap_MSR);
59
60 total_mtrrs = msr.lo & 0xff;
61 bios_mtrrs = total_mtrrs - OS_MTRRS;
62}
63
Yinghai Lu13f1c2a2005-07-08 02:49:49 +000064void enable_fixed_mtrr(void)
Eric Biedermanfcd5ace2004-10-14 19:29:29 +000065{
66 msr_t msr;
67
68 msr = rdmsr(MTRRdefType_MSR);
Aaron Durbinbb4e79a2013-03-26 14:09:47 -050069 msr.lo |= MTRRdefTypeEn | MTRRdefTypeFixEn;
Eric Biedermanfcd5ace2004-10-14 19:29:29 +000070 wrmsr(MTRRdefType_MSR, msr);
71}
72
Aaron Durbinbb4e79a2013-03-26 14:09:47 -050073static void enable_var_mtrr(unsigned char deftype)
Eric Biedermanfcd5ace2004-10-14 19:29:29 +000074{
75 msr_t msr;
76
77 msr = rdmsr(MTRRdefType_MSR);
Aaron Durbinbb4e79a2013-03-26 14:09:47 -050078 msr.lo &= ~0xff;
79 msr.lo |= MTRRdefTypeEn | deftype;
Eric Biedermanfcd5ace2004-10-14 19:29:29 +000080 wrmsr(MTRRdefType_MSR, msr);
81}
82
Eric Biedermanfcd5ace2004-10-14 19:29:29 +000083/* fms: find most sigificant bit set, stolen from Linux Kernel Source. */
84static inline unsigned int fms(unsigned int x)
85{
86 int r;
87
88 __asm__("bsrl %1,%0\n\t"
89 "jnz 1f\n\t"
90 "movl $0,%0\n"
91 "1:" : "=r" (r) : "g" (x));
92 return r;
93}
94
Marc Jones5cbdc1e2009-04-01 22:07:53 +000095/* fls: find least sigificant bit set */
Eric Biedermanfcd5ace2004-10-14 19:29:29 +000096static inline unsigned int fls(unsigned int x)
97{
98 int r;
99
100 __asm__("bsfl %1,%0\n\t"
101 "jnz 1f\n\t"
102 "movl $32,%0\n"
103 "1:" : "=r" (r) : "g" (x));
104 return r;
105}
106
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500107#define MTRR_VERBOSE_LEVEL BIOS_NEVER
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000108
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500109/* MTRRs are at a 4KiB granularity. Therefore all address calculations can
110 * be done with 32-bit numbers. This allows for the MTRR code to handle
111 * up to 2^44 bytes (16 TiB) of address space. */
112#define RANGE_SHIFT 12
113#define ADDR_SHIFT_TO_RANGE_SHIFT(x) \
114 (((x) > RANGE_SHIFT) ? ((x) - RANGE_SHIFT) : RANGE_SHIFT)
115#define PHYS_TO_RANGE_ADDR(x) ((x) >> RANGE_SHIFT)
116#define RANGE_TO_PHYS_ADDR(x) (((resource_t)(x)) << RANGE_SHIFT)
117#define NUM_FIXED_MTRRS (NUM_FIXED_RANGES / RANGES_PER_FIXED_MTRR)
118
119/* The minimum alignment while handling variable MTRR ranges is 64MiB. */
120#define MTRR_MIN_ALIGN PHYS_TO_RANGE_ADDR(64 << 20)
121/* Helpful constants. */
122#define RANGE_1MB PHYS_TO_RANGE_ADDR(1 << 20)
123#define RANGE_4GB (1 << (ADDR_SHIFT_TO_RANGE_SHIFT(32)))
124
Aaron Durbine3834422013-03-28 20:48:51 -0500125/*
126 * The default MTRR type selection uses 3 approaches for selecting the
127 * optimal number of variable MTRRs. For each range do 3 calculations:
128 * 1. UC as default type with no holes at top of range.
129 * 2. UC as default using holes at top of range.
130 * 3. WB as default.
131 * If using holes is optimal for a range when UC is the default type the
132 * tag is updated to direct the commit routine to use a hole at the top
133 * of a range.
134 */
135#define MTRR_ALGO_SHIFT (8)
136#define MTRR_TAG_MASK ((1 << MTRR_ALGO_SHIFT) - 1)
137/* If the default type is UC use the hole carving algorithm for a range. */
138#define MTRR_RANGE_UC_USE_HOLE (1 << MTRR_ALGO_SHIFT)
139
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500140static inline uint32_t range_entry_base_mtrr_addr(struct range_entry *r)
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000141{
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500142 return PHYS_TO_RANGE_ADDR(range_entry_base(r));
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000143}
144
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500145static inline uint32_t range_entry_end_mtrr_addr(struct range_entry *r)
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000146{
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500147 return PHYS_TO_RANGE_ADDR(range_entry_end(r));
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000148}
149
Aaron Durbine3834422013-03-28 20:48:51 -0500150static inline int range_entry_mtrr_type(struct range_entry *r)
151{
152 return range_entry_tag(r) & MTRR_TAG_MASK;
153}
154
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500155static struct memranges *get_physical_address_space(void)
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000156{
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500157 static struct memranges *addr_space;
158 static struct memranges addr_space_storage;
Duncan Laurie7389fa92011-12-22 10:59:40 -0800159
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500160 /* In order to handle some chipsets not being able to pre-determine
161 * uncacheable ranges, such as graphics memory, at resource inseration
162 * time remove unacheable regions from the cacheable ones. */
163 if (addr_space == NULL) {
164 struct range_entry *r;
Aaron Durbin9b027fe2013-03-26 14:10:34 -0500165 unsigned long mask;
166 unsigned long match;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500167
168 addr_space = &addr_space_storage;
169
Aaron Durbin9b027fe2013-03-26 14:10:34 -0500170 mask = IORESOURCE_CACHEABLE;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500171 /* Collect cacheable and uncacheable address ranges. The
172 * uncacheable regions take precedence over the cacheable
173 * regions. */
174 memranges_init(addr_space, mask, mask, MTRR_TYPE_WRBACK);
175 memranges_add_resources(addr_space, mask, 0,
176 MTRR_TYPE_UNCACHEABLE);
177
Aaron Durbin9b027fe2013-03-26 14:10:34 -0500178 /* Handle any write combining resources. Only prefetchable
179 * resources with the IORESOURCE_WRCOMB flag are appropriate
180 * for this MTRR type. */
181 match = IORESOURCE_PREFETCH | IORESOURCE_WRCOMB;
182 mask |= match;
183 memranges_add_resources(addr_space, mask, match,
184 MTRR_TYPE_WRCOMB);
185
Aaron Durbin77a5b402013-03-26 12:47:47 -0500186#if CONFIG_CACHE_ROM
187 /* Add a write-protect region covering the ROM size
188 * when CONFIG_CACHE_ROM is enabled. The ROM is assumed
189 * to be located at 4GiB - rom size. */
190 resource_t rom_base = RANGE_TO_PHYS_ADDR(
191 RANGE_4GB - PHYS_TO_RANGE_ADDR(CONFIG_ROM_SIZE));
192 memranges_insert(addr_space, rom_base, CONFIG_ROM_SIZE,
193 MTRR_TYPE_WRPROT);
194#endif
195
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500196 /* The address space below 4GiB is special. It needs to be
197 * covered entirly by range entries so that MTRR calculations
198 * can be properly done for the full 32-bit address space.
199 * Therefore, ensure holes are filled up to 4GiB as
200 * uncacheable */
201 memranges_fill_holes_up_to(addr_space,
202 RANGE_TO_PHYS_ADDR(RANGE_4GB),
203 MTRR_TYPE_UNCACHEABLE);
204
205 printk(BIOS_DEBUG, "MTRR: Physical address space:\n");
206 memranges_each_entry(r, addr_space)
207 printk(BIOS_DEBUG,
208 "0x%016llx - 0x%016llx size 0x%08llx type %ld\n",
209 range_entry_base(r), range_entry_end(r),
210 range_entry_size(r), range_entry_tag(r));
Carl-Daniel Hailfinger7dde1da2009-02-11 16:57:32 +0000211 }
Stefan Reinauer7f86ed12009-02-12 16:02:16 +0000212
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500213 return addr_space;
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000214}
215
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500216/* Fixed MTRR descriptor. This structure defines the step size and begin
217 * and end (exclusive) address covered by a set of fixe MTRR MSRs.
218 * It also describes the offset in byte intervals to store the calculated MTRR
219 * type in an array. */
220struct fixed_mtrr_desc {
221 uint32_t begin;
222 uint32_t end;
223 uint32_t step;
224 int range_index;
225 int msr_index_base;
Eric Biedermanf8a2ddd2004-10-30 08:05:41 +0000226};
227
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500228/* Shared MTRR calculations. Can be reused by APs. */
229static uint8_t fixed_mtrr_types[NUM_FIXED_RANGES];
230
231/* Fixed MTRR descriptors. */
232static const struct fixed_mtrr_desc fixed_mtrr_desc[] = {
233 { PHYS_TO_RANGE_ADDR(0x000000), PHYS_TO_RANGE_ADDR(0x080000),
234 PHYS_TO_RANGE_ADDR(64 * 1024), 0, MTRRfix64K_00000_MSR },
235 { PHYS_TO_RANGE_ADDR(0x080000), PHYS_TO_RANGE_ADDR(0x0C0000),
236 PHYS_TO_RANGE_ADDR(16 * 1024), 8, MTRRfix16K_80000_MSR },
237 { PHYS_TO_RANGE_ADDR(0x0C0000), PHYS_TO_RANGE_ADDR(0x100000),
238 PHYS_TO_RANGE_ADDR(4 * 1024), 24, MTRRfix4K_C0000_MSR },
239};
240
241static void calc_fixed_mtrrs(void)
Eric Biedermanf8a2ddd2004-10-30 08:05:41 +0000242{
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500243 static int fixed_mtrr_types_initialized;
244 struct memranges *phys_addr_space;
245 struct range_entry *r;
246 const struct fixed_mtrr_desc *desc;
247 const struct fixed_mtrr_desc *last_desc;
248 uint32_t begin;
249 uint32_t end;
250 int type_index;
251
252 if (fixed_mtrr_types_initialized)
Eric Biedermanf8a2ddd2004-10-30 08:05:41 +0000253 return;
Kyösti Mälkki2d42b342012-07-12 00:18:22 +0300254
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500255 phys_addr_space = get_physical_address_space();
Kyösti Mälkki2d42b342012-07-12 00:18:22 +0300256
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500257 /* Set all fixed ranges to uncacheable first. */
258 memset(&fixed_mtrr_types[0], MTRR_TYPE_UNCACHEABLE, NUM_FIXED_RANGES);
Kyösti Mälkki2d42b342012-07-12 00:18:22 +0300259
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500260 desc = &fixed_mtrr_desc[0];
261 last_desc = &fixed_mtrr_desc[ARRAY_SIZE(fixed_mtrr_desc) - 1];
262 type_index = desc->range_index;
Kyösti Mälkki1ec5e742012-07-26 23:51:20 +0300263
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500264 memranges_each_entry(r, phys_addr_space) {
265 begin = range_entry_base_mtrr_addr(r);
266 end = range_entry_end_mtrr_addr(r);
Kyösti Mälkki2d42b342012-07-12 00:18:22 +0300267
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500268 if (begin >= last_desc->end)
269 break;
270
271 if (end > last_desc->end)
272 end = last_desc->end;
273
274 /* Get to the correct fixed mtrr descriptor. */
275 while (begin >= desc->end)
276 desc++;
277
278 type_index = desc->range_index;
279 type_index += (begin - desc->begin) / desc->step;
280
281 while (begin != end) {
282 unsigned char type;
283
284 type = range_entry_tag(r);
285 printk(MTRR_VERBOSE_LEVEL,
286 "MTRR addr 0x%x-0x%x set to %d type @ %d\n",
287 begin, begin + desc->step, type, type_index);
288 if (type == MTRR_TYPE_WRBACK)
289 type |= MTRR_FIXED_WRBACK_BITS;
290 fixed_mtrr_types[type_index] = type;
291 type_index++;
292 begin += desc->step;
293 if (begin == desc->end)
294 desc++;
Yinghai Lu63601872005-01-27 22:48:12 +0000295 }
Eric Biedermanf8a2ddd2004-10-30 08:05:41 +0000296 }
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500297 fixed_mtrr_types_initialized = 1;
298}
299
300static void commit_fixed_mtrrs(void)
301{
302 int i;
303 int j;
304 int msr_num;
305 int type_index;
306 /* 8 ranges per msr. */
307 msr_t fixed_msrs[NUM_FIXED_MTRRS];
308 unsigned long msr_index[NUM_FIXED_MTRRS];
309
310 memset(&fixed_msrs, 0, sizeof(fixed_msrs));
311
312 disable_cache();
313
314 msr_num = 0;
315 type_index = 0;
316 for (i = 0; i < ARRAY_SIZE(fixed_mtrr_desc); i++) {
317 const struct fixed_mtrr_desc *desc;
318 int num_ranges;
319
320 desc = &fixed_mtrr_desc[i];
321 num_ranges = (desc->end - desc->begin) / desc->step;
322 for (j = 0; j < num_ranges; j += RANGES_PER_FIXED_MTRR) {
323 msr_index[msr_num] = desc->msr_index_base +
324 (j / RANGES_PER_FIXED_MTRR);
325 fixed_msrs[msr_num].lo |=
326 fixed_mtrr_types[type_index++] << 0;
327 fixed_msrs[msr_num].lo |=
328 fixed_mtrr_types[type_index++] << 8;
329 fixed_msrs[msr_num].lo |=
330 fixed_mtrr_types[type_index++] << 16;
331 fixed_msrs[msr_num].lo |=
332 fixed_mtrr_types[type_index++] << 24;
333 fixed_msrs[msr_num].hi |=
334 fixed_mtrr_types[type_index++] << 0;
335 fixed_msrs[msr_num].hi |=
336 fixed_mtrr_types[type_index++] << 8;
337 fixed_msrs[msr_num].hi |=
338 fixed_mtrr_types[type_index++] << 16;
339 fixed_msrs[msr_num].hi |=
340 fixed_mtrr_types[type_index++] << 24;
341 msr_num++;
342 }
343 }
344
345 for (i = 0; i < ARRAY_SIZE(fixed_msrs); i++) {
346 printk(BIOS_DEBUG, "MTRR: Fixed MSR 0x%lx 0x%08x%08x\n",
347 msr_index[i], fixed_msrs[i].hi, fixed_msrs[i].lo);
348 wrmsr(msr_index[i], fixed_msrs[i]);
349 }
350
351 enable_cache();
Eric Biedermanf8a2ddd2004-10-30 08:05:41 +0000352}
353
Aaron Durbin57686f82013-03-20 15:50:59 -0500354void x86_setup_fixed_mtrrs_no_enable(void)
Yinghai Lu13f1c2a2005-07-08 02:49:49 +0000355{
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500356 calc_fixed_mtrrs();
357 commit_fixed_mtrrs();
Yinghai Lu13f1c2a2005-07-08 02:49:49 +0000358}
Stefan Reinauer7f86ed12009-02-12 16:02:16 +0000359
Aaron Durbin57686f82013-03-20 15:50:59 -0500360void x86_setup_fixed_mtrrs(void)
361{
362 x86_setup_fixed_mtrrs_no_enable();
363
364 printk(BIOS_SPEW, "call enable_fixed_mtrr()\n");
365 enable_fixed_mtrr();
366}
367
Aaron Durbin77a5b402013-03-26 12:47:47 -0500368/* Keep track of the MTRR that covers the ROM for caching purposes. */
369#if CONFIG_CACHE_ROM
370static long rom_cache_mtrr = -1;
371
Aaron Durbinbc07f5d2013-03-26 13:09:39 -0500372long x86_mtrr_rom_cache_var_index(void)
373{
374 return rom_cache_mtrr;
375}
376
Aaron Durbin77a5b402013-03-26 12:47:47 -0500377void x86_mtrr_enable_rom_caching(void)
378{
379 msr_t msr_val;
380 unsigned long index;
381
382 if (rom_cache_mtrr < 0)
383 return;
384
385 index = rom_cache_mtrr;
386 disable_cache();
387 msr_val = rdmsr(MTRRphysBase_MSR(index));
388 msr_val.lo &= ~0xff;
389 msr_val.lo |= MTRR_TYPE_WRPROT;
390 wrmsr(MTRRphysBase_MSR(index), msr_val);
391 enable_cache();
392}
393
394void x86_mtrr_disable_rom_caching(void)
395{
396 msr_t msr_val;
397 unsigned long index;
398
399 if (rom_cache_mtrr < 0)
400 return;
401
402 index = rom_cache_mtrr;
403 disable_cache();
404 msr_val = rdmsr(MTRRphysBase_MSR(index));
405 msr_val.lo &= ~0xff;
406 wrmsr(MTRRphysBase_MSR(index), msr_val);
407 enable_cache();
408}
409#endif
410
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500411struct var_mtrr_state {
412 struct memranges *addr_space;
413 int above4gb;
414 int address_bits;
415 int commit_mtrrs;
416 int mtrr_index;
417 int def_mtrr_type;
418};
Aaron Durbin57686f82013-03-20 15:50:59 -0500419
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500420static void clear_var_mtrr(int index)
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000421{
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500422 msr_t msr_val;
423
424 msr_val = rdmsr(MTRRphysMask_MSR(index));
425 msr_val.lo &= ~MTRRphysMaskValid;
426 wrmsr(MTRRphysMask_MSR(index), msr_val);
427}
428
429static void write_var_mtrr(struct var_mtrr_state *var_state,
430 uint32_t base, uint32_t size, int mtrr_type)
431{
432 msr_t msr_val;
433 unsigned long msr_index;
434 resource_t rbase;
435 resource_t rsize;
436 resource_t mask;
437
438 /* Some variable MTRRs are attempted to be saved for the OS use.
439 * However, it's more important to try to map the full address space
440 * properly. */
441 if (var_state->mtrr_index >= bios_mtrrs)
442 printk(BIOS_WARNING, "Taking a reserved OS MTRR.\n");
443 if (var_state->mtrr_index >= total_mtrrs) {
444 printk(BIOS_ERR, "ERROR: Not enough MTTRs available!\n");
445 return;
446 }
447
448 rbase = base;
449 rsize = size;
450
451 rbase = RANGE_TO_PHYS_ADDR(rbase);
452 rsize = RANGE_TO_PHYS_ADDR(rsize);
453 rsize = -rsize;
454
455 mask = (1ULL << var_state->address_bits) - 1;
456 rsize = rsize & mask;
457
Aaron Durbin77a5b402013-03-26 12:47:47 -0500458#if CONFIG_CACHE_ROM
459 /* CONFIG_CACHE_ROM allocates an MTRR specifically for allowing
460 * one to turn on caching for faster ROM access. However, it is
461 * left to the MTRR callers to enable it. */
462 if (mtrr_type == MTRR_TYPE_WRPROT) {
463 mtrr_type = MTRR_TYPE_UNCACHEABLE;
464 if (rom_cache_mtrr < 0)
465 rom_cache_mtrr = var_state->mtrr_index;
466 }
467#endif
468
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500469 printk(BIOS_DEBUG, "MTRR: %d base 0x%016llx mask 0x%016llx type %d\n",
470 var_state->mtrr_index, rbase, rsize, mtrr_type);
471
472 msr_val.lo = rbase;
473 msr_val.lo |= mtrr_type;
474
475 msr_val.hi = rbase >> 32;
476 msr_index = MTRRphysBase_MSR(var_state->mtrr_index);
477 wrmsr(msr_index, msr_val);
478
479 msr_val.lo = rsize;
480 msr_val.lo |= MTRRphysMaskValid;
481 msr_val.hi = rsize >> 32;
482 msr_index = MTRRphysMask_MSR(var_state->mtrr_index);
483 wrmsr(msr_index, msr_val);
484}
485
486static void calc_var_mtrr_range(struct var_mtrr_state *var_state,
487 uint32_t base, uint32_t size, int mtrr_type)
488{
489 while (size != 0) {
490 uint32_t addr_lsb;
491 uint32_t size_msb;
492 uint32_t mtrr_size;
493
494 addr_lsb = fls(base);
495 size_msb = fms(size);
496
497 /* All MTRR entries need to have their base aligned to the mask
498 * size. The maximum size is calculated by a function of the
499 * min base bit set and maximum size bit set. */
500 if (addr_lsb > size_msb)
501 mtrr_size = 1 << size_msb;
502 else
503 mtrr_size = 1 << addr_lsb;
504
505 if (var_state->commit_mtrrs)
506 write_var_mtrr(var_state, base, mtrr_size, mtrr_type);
507
508 size -= mtrr_size;
509 base += mtrr_size;
510 var_state->mtrr_index++;
511 }
512}
513
Aaron Durbine3834422013-03-28 20:48:51 -0500514static void calc_var_mtrrs_with_hole(struct var_mtrr_state *var_state,
515 struct range_entry *r)
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500516{
Aaron Durbine3834422013-03-28 20:48:51 -0500517 uint32_t a1, a2, b1, b2;
518 int mtrr_type;
519 struct range_entry *next;
520
521 /*
522 * Determine MTRRs based on the following algoirthm for the given entry:
523 * +------------------+ b2 = ALIGN_UP(end)
524 * | 0 or more bytes | <-- hole is carved out between b1 and b2
525 * +------------------+ a2 = b1 = end
526 * | |
527 * +------------------+ a1 = begin
528 *
529 * Thus, there are 3 sub-ranges to configure variable MTRRs for.
530 */
531 mtrr_type = range_entry_mtrr_type(r);
532
533 a1 = range_entry_base_mtrr_addr(r);
534 a2 = range_entry_end_mtrr_addr(r);
535
536 /* The end address is under 1MiB. The fixed MTRRs take
537 * precedence over the variable ones. Therefore this range
538 * can be ignored. */
539 if (a2 < RANGE_1MB)
540 return;
541
542 /* Again, the fixed MTRRs take precedence so the beginning
543 * of the range can be set to 0 if it starts below 1MiB. */
544 if (a1 < RANGE_1MB)
545 a1 = 0;
546
547 /* If the range starts above 4GiB the processing is done. */
548 if (!var_state->above4gb && a1 >= RANGE_4GB)
549 return;
550
551 /* Clip the upper address to 4GiB if addresses above 4GiB
552 * are not being processed. */
553 if (!var_state->above4gb && a2 > RANGE_4GB)
554 a2 = RANGE_4GB;
555
Aaron Durbin53924242013-03-29 11:48:27 -0500556 next = memranges_next_entry(var_state->addr_space, r);
557
Aaron Durbine3834422013-03-28 20:48:51 -0500558 b1 = a2;
Aaron Durbin53924242013-03-29 11:48:27 -0500559
560 /* First check if a1 is >= 4GiB and the current etnry is the last
561 * entry. If so perform an optimization of covering a larger range
562 * defined by the base address' alignment. */
563 if (a1 >= RANGE_4GB && next == NULL) {
564 uint32_t addr_lsb;
565
566 addr_lsb = fls(a1);
567 b2 = (1 << addr_lsb) + a1;
568 if (b2 >= a2) {
569 calc_var_mtrr_range(var_state, a1, b2 - a1, mtrr_type);
570 return;
571 }
572 }
573
574 /* Handle the min alignment roundup case. */
Aaron Durbine3834422013-03-28 20:48:51 -0500575 b2 = ALIGN_UP(a2, MTRR_MIN_ALIGN);
576
577 /* Check against the next range. If the current range_entry is the
578 * last entry then carving a hole is no problem. If the current entry
579 * isn't the last entry then check that the last entry covers the
580 * entire hole range with the default mtrr type. */
Aaron Durbine3834422013-03-28 20:48:51 -0500581 if (next != NULL &&
582 (range_entry_mtrr_type(next) != var_state->def_mtrr_type ||
583 range_entry_end_mtrr_addr(next) < b2)) {
584 calc_var_mtrr_range(var_state, a1, a2 - a1, mtrr_type);
585 return;
586 }
587
588 calc_var_mtrr_range(var_state, a1, b2 - a1, mtrr_type);
589 calc_var_mtrr_range(var_state, b1, b2 - b1, var_state->def_mtrr_type);
590}
591
592static void calc_var_mtrrs_without_hole(struct var_mtrr_state *var_state,
593 struct range_entry *r)
594{
595 uint32_t a1, a2, b1, b2, c1, c2;
596 int mtrr_type;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500597
598 /*
599 * For each range that meets the non-default type process it in the
600 * following manner:
601 * +------------------+ c2 = end
602 * | 0 or more bytes |
603 * +------------------+ b2 = c1 = ALIGN_DOWN(end)
604 * | |
605 * +------------------+ b1 = a2 = ALIGN_UP(begin)
606 * | 0 or more bytes |
607 * +------------------+ a1 = begin
608 *
609 * Thus, there are 3 sub-ranges to configure variable MTRRs for.
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000610 */
Aaron Durbine3834422013-03-28 20:48:51 -0500611 mtrr_type = range_entry_mtrr_type(r);
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500612
Aaron Durbine3834422013-03-28 20:48:51 -0500613 a1 = range_entry_base_mtrr_addr(r);
614 c2 = range_entry_end_mtrr_addr(r);
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500615
Aaron Durbine3834422013-03-28 20:48:51 -0500616 /* The end address is under 1MiB. The fixed MTRRs take
617 * precedence over the variable ones. Therefore this range
618 * can be ignored. */
619 if (c2 < RANGE_1MB)
620 return;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500621
Aaron Durbine3834422013-03-28 20:48:51 -0500622 /* Again, the fixed MTRRs take precedence so the beginning
623 * of the range can be set to 0 if it starts below 1MiB. */
624 if (a1 < RANGE_1MB)
625 a1 = 0;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500626
Aaron Durbine3834422013-03-28 20:48:51 -0500627 /* If the range starts above 4GiB the processing is done. */
628 if (!var_state->above4gb && a1 >= RANGE_4GB)
629 return;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500630
Aaron Durbine3834422013-03-28 20:48:51 -0500631 /* Clip the upper address to 4GiB if addresses above 4GiB
632 * are not being processed. */
633 if (!var_state->above4gb && c2 > RANGE_4GB)
634 c2 = RANGE_4GB;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500635
Aaron Durbine3834422013-03-28 20:48:51 -0500636 /* Don't align up or down on the range if it is smaller
637 * than the minimum granularity. */
638 if ((c2 - a1) < MTRR_MIN_ALIGN) {
639 calc_var_mtrr_range(var_state, a1, c2 - a1, mtrr_type);
640 return;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500641 }
Aaron Durbine3834422013-03-28 20:48:51 -0500642
643 b1 = a2 = ALIGN_UP(a1, MTRR_MIN_ALIGN);
644 b2 = c1 = ALIGN_DOWN(c2, MTRR_MIN_ALIGN);
645
646 calc_var_mtrr_range(var_state, a1, a2 - a1, mtrr_type);
647 calc_var_mtrr_range(var_state, b1, b2 - b1, mtrr_type);
648 calc_var_mtrr_range(var_state, c1, c2 - c1, mtrr_type);
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500649}
650
651static int calc_var_mtrrs(struct memranges *addr_space,
652 int above4gb, int address_bits)
653{
654 int wb_deftype_count;
655 int uc_deftype_count;
Aaron Durbine3834422013-03-28 20:48:51 -0500656 struct range_entry *r;
Eric Biedermanf8a2ddd2004-10-30 08:05:41 +0000657 struct var_mtrr_state var_state;
Eric Biedermanf8a2ddd2004-10-30 08:05:41 +0000658
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500659 /* The default MTRR cacheability type is determined by calculating
660 * the number of MTTRs required for each MTTR type as if it was the
661 * default. */
662 var_state.addr_space = addr_space;
Scott Duplichanf3cce2f2010-11-13 19:07:59 +0000663 var_state.above4gb = above4gb;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500664 var_state.address_bits = address_bits;
665 var_state.commit_mtrrs = 0;
Stefan Reinauer7f86ed12009-02-12 16:02:16 +0000666
Aaron Durbine3834422013-03-28 20:48:51 -0500667 wb_deftype_count = 0;
668 uc_deftype_count = 0;
Duncan Laurie7389fa92011-12-22 10:59:40 -0800669
Aaron Durbine3834422013-03-28 20:48:51 -0500670 /*
671 * For each range do 3 calculations:
672 * 1. UC as default type with no holes at top of range.
673 * 2. UC as default using holes at top of range.
674 * 3. WB as default.
675 * The lowest count is then used as default after totalling all
676 * MTRRs. Note that the optimal algoirthm for UC default is marked in
677 * the tag of each range regardless of final decision. UC takes
678 * precedence in the MTRR archiecture. Therefore, only holes can be
679 * used when the type of the region is MTRR_TYPE_WRBACK with
680 * MTRR_TYPE_UNCACHEABLE as the default type.
681 */
682 memranges_each_entry(r, var_state.addr_space) {
683 int mtrr_type;
684
685 mtrr_type = range_entry_mtrr_type(r);
686
687 if (mtrr_type != MTRR_TYPE_UNCACHEABLE) {
688 int uc_hole_count;
689 int uc_no_hole_count;
690
691 var_state.def_mtrr_type = MTRR_TYPE_UNCACHEABLE;
692 var_state.mtrr_index = 0;
693
694 /* No hole calculation. */
695 calc_var_mtrrs_without_hole(&var_state, r);
696 uc_no_hole_count = var_state.mtrr_index;
697
698 /* Hole calculation only if type is WB. The 64 number
699 * is a count that is unachievable, thus making it
700 * a default large number in the case of not doing
701 * the hole calculation. */
702 uc_hole_count = 64;
703 if (mtrr_type == MTRR_TYPE_WRBACK) {
704 var_state.mtrr_index = 0;
705 calc_var_mtrrs_with_hole(&var_state, r);
706 uc_hole_count = var_state.mtrr_index;
707 }
708
709 /* Mark the entry with the optimal algorithm. */
710 if (uc_no_hole_count < uc_hole_count) {
711 uc_deftype_count += uc_no_hole_count;
712 } else {
713 unsigned long new_tag;
714
715 new_tag = mtrr_type | MTRR_RANGE_UC_USE_HOLE;
716 range_entry_update_tag(r, new_tag);
717 uc_deftype_count += uc_hole_count;
718 }
719 }
720
721 if (mtrr_type != MTRR_TYPE_WRBACK) {
722 var_state.mtrr_index = 0;
723 var_state.def_mtrr_type = MTRR_TYPE_WRBACK;
724 calc_var_mtrrs_without_hole(&var_state, r);
725 wb_deftype_count += var_state.mtrr_index;
726 }
727 }
Scott Duplichanf3cce2f2010-11-13 19:07:59 +0000728
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500729 printk(BIOS_DEBUG, "MTRR: default type WB/UC MTRR counts: %d/%d.\n",
730 wb_deftype_count, uc_deftype_count);
Kyösti Mälkkiffc1fb32012-07-11 14:40:19 +0300731
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500732 if (wb_deftype_count < uc_deftype_count) {
733 printk(BIOS_DEBUG, "MTRR: WB selected as default type.\n");
734 return MTRR_TYPE_WRBACK;
735 }
736 printk(BIOS_DEBUG, "MTRR: UC selected as default type.\n");
737 return MTRR_TYPE_UNCACHEABLE;
738}
Kyösti Mälkkiffc1fb32012-07-11 14:40:19 +0300739
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500740static void commit_var_mtrrs(struct memranges *addr_space, int def_type,
741 int above4gb, int address_bits)
742{
Aaron Durbine3834422013-03-28 20:48:51 -0500743 struct range_entry *r;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500744 struct var_mtrr_state var_state;
745 int i;
746
747 var_state.addr_space = addr_space;
748 var_state.above4gb = above4gb;
749 var_state.address_bits = address_bits;
750 /* Write the MSRs. */
751 var_state.commit_mtrrs = 1;
752 var_state.mtrr_index = 0;
753 var_state.def_mtrr_type = def_type;
Aaron Durbine3834422013-03-28 20:48:51 -0500754
755 memranges_each_entry(r, var_state.addr_space) {
756 if (range_entry_mtrr_type(r) == def_type)
757 continue;
758
759 if (def_type == MTRR_TYPE_UNCACHEABLE &&
760 (range_entry_tag(r) & MTRR_RANGE_UC_USE_HOLE))
761 calc_var_mtrrs_with_hole(&var_state, r);
762 else
763 calc_var_mtrrs_without_hole(&var_state, r);
764 }
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500765
766 /* Clear all remaining variable MTTRs. */
767 for (i = var_state.mtrr_index; i < total_mtrrs; i++)
768 clear_var_mtrr(i);
769}
770
771void x86_setup_var_mtrrs(unsigned int address_bits, unsigned int above4gb)
772{
773 static int mtrr_default_type = -1;
774 struct memranges *addr_space;
775
776 addr_space = get_physical_address_space();
777
778 if (mtrr_default_type == -1) {
779 if (above4gb == 2)
780 detect_var_mtrrs();
781 mtrr_default_type =
782 calc_var_mtrrs(addr_space, !!above4gb, address_bits);
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000783 }
Stefan Reinauer00093a82011-11-02 16:12:34 -0700784
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500785 disable_cache();
786 commit_var_mtrrs(addr_space, mtrr_default_type, !!above4gb,
787 address_bits);
788 enable_var_mtrr(mtrr_default_type);
789 enable_cache();
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000790}
791
Sven Schnelleadfbcb792012-01-10 12:01:43 +0100792void x86_setup_mtrrs(void)
Yinghai Lu13f1c2a2005-07-08 02:49:49 +0000793{
Sven Schnelleadfbcb792012-01-10 12:01:43 +0100794 int address_size;
Yinghai Lu13f1c2a2005-07-08 02:49:49 +0000795 x86_setup_fixed_mtrrs();
Sven Schnelleadfbcb792012-01-10 12:01:43 +0100796 address_size = cpu_phys_address_size();
797 printk(BIOS_DEBUG, "CPU physical address size: %d bits\n", address_size);
798 x86_setup_var_mtrrs(address_size, 1);
Yinghai Lu13f1c2a2005-07-08 02:49:49 +0000799}
800
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000801int x86_mtrr_check(void)
802{
803 /* Only Pentium Pro and later have MTRR */
804 msr_t msr;
Stefan Reinauerc02b4fc2010-03-22 11:42:32 +0000805 printk(BIOS_DEBUG, "\nMTRR check\n");
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000806
807 msr = rdmsr(0x2ff);
808 msr.lo >>= 10;
809
Stefan Reinauerc02b4fc2010-03-22 11:42:32 +0000810 printk(BIOS_DEBUG, "Fixed MTRRs : ");
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000811 if (msr.lo & 0x01)
Stefan Reinauerc02b4fc2010-03-22 11:42:32 +0000812 printk(BIOS_DEBUG, "Enabled\n");
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000813 else
Stefan Reinauerc02b4fc2010-03-22 11:42:32 +0000814 printk(BIOS_DEBUG, "Disabled\n");
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000815
Stefan Reinauerc02b4fc2010-03-22 11:42:32 +0000816 printk(BIOS_DEBUG, "Variable MTRRs: ");
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000817 if (msr.lo & 0x02)
Stefan Reinauerc02b4fc2010-03-22 11:42:32 +0000818 printk(BIOS_DEBUG, "Enabled\n");
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000819 else
Stefan Reinauerc02b4fc2010-03-22 11:42:32 +0000820 printk(BIOS_DEBUG, "Disabled\n");
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000821
Stefan Reinauerc02b4fc2010-03-22 11:42:32 +0000822 printk(BIOS_DEBUG, "\n");
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000823
824 post_code(0x93);
825 return ((int) msr.lo);
826}