blob: 60eee319ceecf9881313aa2752eef33a01f4c32a [file] [log] [blame]
Eric Biedermanfcd5ace2004-10-14 19:29:29 +00001/*
Stefan Reinauercdc5cc62007-04-24 18:40:02 +00002 * mtrr.c: setting MTRR to decent values for cache initialization on P6
Eric Biedermanfcd5ace2004-10-14 19:29:29 +00003 *
4 * Derived from intel_set_mtrr in intel_subr.c and mtrr.c in linux kernel
5 *
6 * Copyright 2000 Silicon Integrated System Corporation
Aaron Durbinbb4e79a2013-03-26 14:09:47 -05007 * Copyright 2013 Google Inc.
Eric Biedermanfcd5ace2004-10-14 19:29:29 +00008 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
Eric Biedermanfcd5ace2004-10-14 19:29:29 +000019 *
Lee Leahyc5917072017-03-15 16:38:51 -070020 * Reference: Intel Architecture Software Developer's Manual, Volume 3: System
21 * Programming
Eric Biedermanfcd5ace2004-10-14 19:29:29 +000022 */
Yinghai Lu13f1c2a2005-07-08 02:49:49 +000023
Eric Biedermanf8a2ddd2004-10-30 08:05:41 +000024#include <stddef.h>
Aaron Durbinbb4e79a2013-03-26 14:09:47 -050025#include <stdlib.h>
26#include <string.h>
Aaron Durbinbebf6692013-04-24 20:59:43 -050027#include <bootstate.h>
Eric Biedermanfcd5ace2004-10-14 19:29:29 +000028#include <console/console.h>
29#include <device/device.h>
Aaron Durbinca4f4b82014-02-08 15:41:52 -060030#include <device/pci_ids.h>
Aaron Durbinebf142a2013-03-29 16:23:23 -050031#include <cpu/cpu.h>
Eric Biedermanfcd5ace2004-10-14 19:29:29 +000032#include <cpu/x86/msr.h>
33#include <cpu/x86/mtrr.h>
34#include <cpu/x86/cache.h>
Stefan Reinauer00093a82011-11-02 16:12:34 -070035#include <cpu/x86/lapic.h>
Aaron Durbinbb4e79a2013-03-26 14:09:47 -050036#include <memrange.h>
Aaron Durbin57686f82013-03-20 15:50:59 -050037#include <cpu/amd/mtrr.h>
Richard Spiegelb28025a2019-02-20 11:00:19 -070038#include <assert.h>
Julius Wernercd49cce2019-03-05 16:53:33 -080039#if CONFIG(X86_AMD_FIXED_MTRRS)
Aaron Durbin57686f82013-03-20 15:50:59 -050040#define MTRR_FIXED_WRBACK_BITS (MTRR_READ_MEM | MTRR_WRITE_MEM)
41#else
42#define MTRR_FIXED_WRBACK_BITS 0
43#endif
Eric Biedermanfcd5ace2004-10-14 19:29:29 +000044
Stefan Reinauerc00dfbc2012-04-03 16:24:37 -070045/* 2 MTRRS are reserved for the operating system */
46#define BIOS_MTRRS 6
47#define OS_MTRRS 2
48#define MTRRS (BIOS_MTRRS + OS_MTRRS)
Gabe Black7756fe72014-02-25 01:40:34 -080049/*
Isaac Christensen81f90c52014-09-24 14:59:32 -060050 * Static storage size for variable MTRRs. It's sized sufficiently large to
51 * handle different types of CPUs. Empirically, 16 variable MTRRs has not
Gabe Black7756fe72014-02-25 01:40:34 -080052 * yet been observed.
53 */
54#define NUM_MTRR_STATIC_STORAGE 16
Stefan Reinauerc00dfbc2012-04-03 16:24:37 -070055
56static int total_mtrrs = MTRRS;
57static int bios_mtrrs = BIOS_MTRRS;
58
59static void detect_var_mtrrs(void)
60{
61 msr_t msr;
62
Alexandru Gagniuc86091f92015-09-30 20:23:09 -070063 msr = rdmsr(MTRR_CAP_MSR);
Stefan Reinauerc00dfbc2012-04-03 16:24:37 -070064
65 total_mtrrs = msr.lo & 0xff;
Gabe Black7756fe72014-02-25 01:40:34 -080066
67 if (total_mtrrs > NUM_MTRR_STATIC_STORAGE) {
68 printk(BIOS_WARNING,
69 "MTRRs detected (%d) > NUM_MTRR_STATIC_STORAGE (%d)\n",
70 total_mtrrs, NUM_MTRR_STATIC_STORAGE);
71 total_mtrrs = NUM_MTRR_STATIC_STORAGE;
72 }
Stefan Reinauerc00dfbc2012-04-03 16:24:37 -070073 bios_mtrrs = total_mtrrs - OS_MTRRS;
74}
75
Yinghai Lu13f1c2a2005-07-08 02:49:49 +000076void enable_fixed_mtrr(void)
Eric Biedermanfcd5ace2004-10-14 19:29:29 +000077{
78 msr_t msr;
79
Alexandru Gagniuc86091f92015-09-30 20:23:09 -070080 msr = rdmsr(MTRR_DEF_TYPE_MSR);
81 msr.lo |= MTRR_DEF_TYPE_EN | MTRR_DEF_TYPE_FIX_EN;
82 wrmsr(MTRR_DEF_TYPE_MSR, msr);
Eric Biedermanfcd5ace2004-10-14 19:29:29 +000083}
84
Marshall Dawsonc0dbeda2017-10-19 09:45:16 -060085void fixed_mtrrs_expose_amd_rwdram(void)
86{
87 msr_t syscfg;
88
Julius Wernercd49cce2019-03-05 16:53:33 -080089 if (!CONFIG(X86_AMD_FIXED_MTRRS))
Marshall Dawsonc0dbeda2017-10-19 09:45:16 -060090 return;
91
92 syscfg = rdmsr(SYSCFG_MSR);
93 syscfg.lo |= SYSCFG_MSR_MtrrFixDramModEn;
94 wrmsr(SYSCFG_MSR, syscfg);
95}
96
97void fixed_mtrrs_hide_amd_rwdram(void)
98{
99 msr_t syscfg;
100
Julius Wernercd49cce2019-03-05 16:53:33 -0800101 if (!CONFIG(X86_AMD_FIXED_MTRRS))
Marshall Dawsonc0dbeda2017-10-19 09:45:16 -0600102 return;
103
104 syscfg = rdmsr(SYSCFG_MSR);
105 syscfg.lo &= ~SYSCFG_MSR_MtrrFixDramModEn;
106 wrmsr(SYSCFG_MSR, syscfg);
107}
108
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500109static void enable_var_mtrr(unsigned char deftype)
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000110{
111 msr_t msr;
112
Alexandru Gagniuc86091f92015-09-30 20:23:09 -0700113 msr = rdmsr(MTRR_DEF_TYPE_MSR);
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500114 msr.lo &= ~0xff;
Alexandru Gagniuc86091f92015-09-30 20:23:09 -0700115 msr.lo |= MTRR_DEF_TYPE_EN | deftype;
116 wrmsr(MTRR_DEF_TYPE_MSR, msr);
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000117}
118
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500119#define MTRR_VERBOSE_LEVEL BIOS_NEVER
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000120
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500121/* MTRRs are at a 4KiB granularity. Therefore all address calculations can
122 * be done with 32-bit numbers. This allows for the MTRR code to handle
123 * up to 2^44 bytes (16 TiB) of address space. */
124#define RANGE_SHIFT 12
125#define ADDR_SHIFT_TO_RANGE_SHIFT(x) \
126 (((x) > RANGE_SHIFT) ? ((x) - RANGE_SHIFT) : RANGE_SHIFT)
127#define PHYS_TO_RANGE_ADDR(x) ((x) >> RANGE_SHIFT)
128#define RANGE_TO_PHYS_ADDR(x) (((resource_t)(x)) << RANGE_SHIFT)
129#define NUM_FIXED_MTRRS (NUM_FIXED_RANGES / RANGES_PER_FIXED_MTRR)
130
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500131/* Helpful constants. */
132#define RANGE_1MB PHYS_TO_RANGE_ADDR(1 << 20)
133#define RANGE_4GB (1 << (ADDR_SHIFT_TO_RANGE_SHIFT(32)))
134
Aaron Durbine3834422013-03-28 20:48:51 -0500135#define MTRR_ALGO_SHIFT (8)
136#define MTRR_TAG_MASK ((1 << MTRR_ALGO_SHIFT) - 1)
Aaron Durbine3834422013-03-28 20:48:51 -0500137
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500138static inline uint32_t range_entry_base_mtrr_addr(struct range_entry *r)
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000139{
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500140 return PHYS_TO_RANGE_ADDR(range_entry_base(r));
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000141}
142
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500143static inline uint32_t range_entry_end_mtrr_addr(struct range_entry *r)
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000144{
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500145 return PHYS_TO_RANGE_ADDR(range_entry_end(r));
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000146}
147
Aaron Durbine3834422013-03-28 20:48:51 -0500148static inline int range_entry_mtrr_type(struct range_entry *r)
149{
150 return range_entry_tag(r) & MTRR_TAG_MASK;
151}
152
Aaron Durbinca4f4b82014-02-08 15:41:52 -0600153static int filter_vga_wrcomb(struct device *dev, struct resource *res)
154{
155 /* Only handle PCI devices. */
156 if (dev->path.type != DEVICE_PATH_PCI)
157 return 0;
158
159 /* Only handle VGA class devices. */
160 if (((dev->class >> 8) != PCI_CLASS_DISPLAY_VGA))
161 return 0;
162
163 /* Add resource as write-combining in the address space. */
164 return 1;
165}
166
Aaron Durbin2bebd7b2016-11-10 15:15:35 -0600167static void print_physical_address_space(const struct memranges *addr_space,
168 const char *identifier)
169{
170 const struct range_entry *r;
171
172 if (identifier)
173 printk(BIOS_DEBUG, "MTRR: %s Physical address space:\n",
174 identifier);
175 else
176 printk(BIOS_DEBUG, "MTRR: Physical address space:\n");
177
178 memranges_each_entry(r, addr_space)
179 printk(BIOS_DEBUG,
180 "0x%016llx - 0x%016llx size 0x%08llx type %ld\n",
181 range_entry_base(r), range_entry_end(r),
182 range_entry_size(r), range_entry_tag(r));
183}
184
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500185static struct memranges *get_physical_address_space(void)
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000186{
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500187 static struct memranges *addr_space;
188 static struct memranges addr_space_storage;
Duncan Laurie7389fa92011-12-22 10:59:40 -0800189
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500190 /* In order to handle some chipsets not being able to pre-determine
Martin Roth4c3ab732013-07-08 16:23:54 -0600191 * uncacheable ranges, such as graphics memory, at resource insertion
192 * time remove uncacheable regions from the cacheable ones. */
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500193 if (addr_space == NULL) {
Aaron Durbin9b027fe2013-03-26 14:10:34 -0500194 unsigned long mask;
195 unsigned long match;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500196
197 addr_space = &addr_space_storage;
198
Aaron Durbin9b027fe2013-03-26 14:10:34 -0500199 mask = IORESOURCE_CACHEABLE;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500200 /* Collect cacheable and uncacheable address ranges. The
201 * uncacheable regions take precedence over the cacheable
202 * regions. */
203 memranges_init(addr_space, mask, mask, MTRR_TYPE_WRBACK);
204 memranges_add_resources(addr_space, mask, 0,
Lee Leahya07d0dd2017-03-15 14:25:22 -0700205 MTRR_TYPE_UNCACHEABLE);
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500206
Aaron Durbin9b027fe2013-03-26 14:10:34 -0500207 /* Handle any write combining resources. Only prefetchable
Vladimir Serbinenko30fe6122014-02-05 23:25:28 +0100208 * resources are appropriate for this MTRR type. */
209 match = IORESOURCE_PREFETCH;
Aaron Durbin9b027fe2013-03-26 14:10:34 -0500210 mask |= match;
Lee Leahyc5917072017-03-15 16:38:51 -0700211 memranges_add_resources_filter(addr_space, mask, match,
212 MTRR_TYPE_WRCOMB, filter_vga_wrcomb);
Aaron Durbin9b027fe2013-03-26 14:10:34 -0500213
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500214 /* The address space below 4GiB is special. It needs to be
Martin Roth2f914032016-01-15 10:20:11 -0700215 * covered entirely by range entries so that MTRR calculations
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500216 * can be properly done for the full 32-bit address space.
217 * Therefore, ensure holes are filled up to 4GiB as
218 * uncacheable */
219 memranges_fill_holes_up_to(addr_space,
Lee Leahya07d0dd2017-03-15 14:25:22 -0700220 RANGE_TO_PHYS_ADDR(RANGE_4GB),
221 MTRR_TYPE_UNCACHEABLE);
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500222
Aaron Durbin2bebd7b2016-11-10 15:15:35 -0600223 print_physical_address_space(addr_space, NULL);
Carl-Daniel Hailfinger7dde1da2009-02-11 16:57:32 +0000224 }
Stefan Reinauer7f86ed12009-02-12 16:02:16 +0000225
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500226 return addr_space;
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000227}
228
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500229/* Fixed MTRR descriptor. This structure defines the step size and begin
Martin Roth4c3ab732013-07-08 16:23:54 -0600230 * and end (exclusive) address covered by a set of fixed MTRR MSRs.
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500231 * It also describes the offset in byte intervals to store the calculated MTRR
232 * type in an array. */
233struct fixed_mtrr_desc {
234 uint32_t begin;
235 uint32_t end;
236 uint32_t step;
237 int range_index;
238 int msr_index_base;
Eric Biedermanf8a2ddd2004-10-30 08:05:41 +0000239};
240
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500241/* Shared MTRR calculations. Can be reused by APs. */
242static uint8_t fixed_mtrr_types[NUM_FIXED_RANGES];
243
244/* Fixed MTRR descriptors. */
245static const struct fixed_mtrr_desc fixed_mtrr_desc[] = {
246 { PHYS_TO_RANGE_ADDR(0x000000), PHYS_TO_RANGE_ADDR(0x080000),
Alexandru Gagniuc86091f92015-09-30 20:23:09 -0700247 PHYS_TO_RANGE_ADDR(64 * 1024), 0, MTRR_FIX_64K_00000 },
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500248 { PHYS_TO_RANGE_ADDR(0x080000), PHYS_TO_RANGE_ADDR(0x0C0000),
Alexandru Gagniuc86091f92015-09-30 20:23:09 -0700249 PHYS_TO_RANGE_ADDR(16 * 1024), 8, MTRR_FIX_16K_80000 },
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500250 { PHYS_TO_RANGE_ADDR(0x0C0000), PHYS_TO_RANGE_ADDR(0x100000),
Alexandru Gagniuc86091f92015-09-30 20:23:09 -0700251 PHYS_TO_RANGE_ADDR(4 * 1024), 24, MTRR_FIX_4K_C0000 },
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500252};
253
254static void calc_fixed_mtrrs(void)
Eric Biedermanf8a2ddd2004-10-30 08:05:41 +0000255{
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500256 static int fixed_mtrr_types_initialized;
257 struct memranges *phys_addr_space;
258 struct range_entry *r;
259 const struct fixed_mtrr_desc *desc;
260 const struct fixed_mtrr_desc *last_desc;
261 uint32_t begin;
262 uint32_t end;
263 int type_index;
264
265 if (fixed_mtrr_types_initialized)
Eric Biedermanf8a2ddd2004-10-30 08:05:41 +0000266 return;
Kyösti Mälkki2d42b342012-07-12 00:18:22 +0300267
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500268 phys_addr_space = get_physical_address_space();
Kyösti Mälkki2d42b342012-07-12 00:18:22 +0300269
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500270 /* Set all fixed ranges to uncacheable first. */
271 memset(&fixed_mtrr_types[0], MTRR_TYPE_UNCACHEABLE, NUM_FIXED_RANGES);
Kyösti Mälkki2d42b342012-07-12 00:18:22 +0300272
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500273 desc = &fixed_mtrr_desc[0];
274 last_desc = &fixed_mtrr_desc[ARRAY_SIZE(fixed_mtrr_desc) - 1];
Kyösti Mälkki1ec5e742012-07-26 23:51:20 +0300275
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500276 memranges_each_entry(r, phys_addr_space) {
277 begin = range_entry_base_mtrr_addr(r);
278 end = range_entry_end_mtrr_addr(r);
Kyösti Mälkki2d42b342012-07-12 00:18:22 +0300279
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500280 if (begin >= last_desc->end)
281 break;
282
283 if (end > last_desc->end)
284 end = last_desc->end;
285
286 /* Get to the correct fixed mtrr descriptor. */
287 while (begin >= desc->end)
288 desc++;
289
290 type_index = desc->range_index;
291 type_index += (begin - desc->begin) / desc->step;
292
293 while (begin != end) {
294 unsigned char type;
295
296 type = range_entry_tag(r);
297 printk(MTRR_VERBOSE_LEVEL,
298 "MTRR addr 0x%x-0x%x set to %d type @ %d\n",
299 begin, begin + desc->step, type, type_index);
300 if (type == MTRR_TYPE_WRBACK)
301 type |= MTRR_FIXED_WRBACK_BITS;
302 fixed_mtrr_types[type_index] = type;
303 type_index++;
304 begin += desc->step;
305 if (begin == desc->end)
306 desc++;
Yinghai Lu63601872005-01-27 22:48:12 +0000307 }
Eric Biedermanf8a2ddd2004-10-30 08:05:41 +0000308 }
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500309 fixed_mtrr_types_initialized = 1;
310}
311
312static void commit_fixed_mtrrs(void)
313{
314 int i;
315 int j;
316 int msr_num;
317 int type_index;
318 /* 8 ranges per msr. */
319 msr_t fixed_msrs[NUM_FIXED_MTRRS];
320 unsigned long msr_index[NUM_FIXED_MTRRS];
321
Marshall Dawsonc0dbeda2017-10-19 09:45:16 -0600322 fixed_mtrrs_expose_amd_rwdram();
323
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500324 memset(&fixed_msrs, 0, sizeof(fixed_msrs));
325
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500326 msr_num = 0;
327 type_index = 0;
328 for (i = 0; i < ARRAY_SIZE(fixed_mtrr_desc); i++) {
329 const struct fixed_mtrr_desc *desc;
330 int num_ranges;
331
332 desc = &fixed_mtrr_desc[i];
333 num_ranges = (desc->end - desc->begin) / desc->step;
334 for (j = 0; j < num_ranges; j += RANGES_PER_FIXED_MTRR) {
335 msr_index[msr_num] = desc->msr_index_base +
336 (j / RANGES_PER_FIXED_MTRR);
337 fixed_msrs[msr_num].lo |=
338 fixed_mtrr_types[type_index++] << 0;
339 fixed_msrs[msr_num].lo |=
340 fixed_mtrr_types[type_index++] << 8;
341 fixed_msrs[msr_num].lo |=
342 fixed_mtrr_types[type_index++] << 16;
343 fixed_msrs[msr_num].lo |=
344 fixed_mtrr_types[type_index++] << 24;
345 fixed_msrs[msr_num].hi |=
346 fixed_mtrr_types[type_index++] << 0;
347 fixed_msrs[msr_num].hi |=
348 fixed_mtrr_types[type_index++] << 8;
349 fixed_msrs[msr_num].hi |=
350 fixed_mtrr_types[type_index++] << 16;
351 fixed_msrs[msr_num].hi |=
352 fixed_mtrr_types[type_index++] << 24;
353 msr_num++;
354 }
355 }
356
Jacob Garber5b922722019-05-28 11:47:49 -0600357 /* Ensure that both arrays were fully initialized */
358 ASSERT(msr_num == NUM_FIXED_MTRRS)
359
Gabe Black7756fe72014-02-25 01:40:34 -0800360 for (i = 0; i < ARRAY_SIZE(fixed_msrs); i++)
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500361 printk(BIOS_DEBUG, "MTRR: Fixed MSR 0x%lx 0x%08x%08x\n",
362 msr_index[i], fixed_msrs[i].hi, fixed_msrs[i].lo);
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500363
Gabe Black7756fe72014-02-25 01:40:34 -0800364 disable_cache();
365 for (i = 0; i < ARRAY_SIZE(fixed_msrs); i++)
366 wrmsr(msr_index[i], fixed_msrs[i]);
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500367 enable_cache();
Marshall Dawsonc0dbeda2017-10-19 09:45:16 -0600368 fixed_mtrrs_hide_amd_rwdram();
369
Eric Biedermanf8a2ddd2004-10-30 08:05:41 +0000370}
371
Aaron Durbin57686f82013-03-20 15:50:59 -0500372void x86_setup_fixed_mtrrs_no_enable(void)
Yinghai Lu13f1c2a2005-07-08 02:49:49 +0000373{
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500374 calc_fixed_mtrrs();
375 commit_fixed_mtrrs();
Yinghai Lu13f1c2a2005-07-08 02:49:49 +0000376}
Stefan Reinauer7f86ed12009-02-12 16:02:16 +0000377
Aaron Durbin57686f82013-03-20 15:50:59 -0500378void x86_setup_fixed_mtrrs(void)
379{
380 x86_setup_fixed_mtrrs_no_enable();
381
382 printk(BIOS_SPEW, "call enable_fixed_mtrr()\n");
383 enable_fixed_mtrr();
384}
385
Gabe Black7756fe72014-02-25 01:40:34 -0800386struct var_mtrr_regs {
387 msr_t base;
388 msr_t mask;
389};
390
391struct var_mtrr_solution {
392 int mtrr_default_type;
393 int num_used;
394 struct var_mtrr_regs regs[NUM_MTRR_STATIC_STORAGE];
395};
396
397/* Global storage for variable MTRR solution. */
398static struct var_mtrr_solution mtrr_global_solution;
399
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500400struct var_mtrr_state {
401 struct memranges *addr_space;
402 int above4gb;
403 int address_bits;
Gabe Black7756fe72014-02-25 01:40:34 -0800404 int prepare_msrs;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500405 int mtrr_index;
406 int def_mtrr_type;
Gabe Black7756fe72014-02-25 01:40:34 -0800407 struct var_mtrr_regs *regs;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500408};
Aaron Durbin57686f82013-03-20 15:50:59 -0500409
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500410static void clear_var_mtrr(int index)
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000411{
Aaron Durbin2bebd7b2016-11-10 15:15:35 -0600412 msr_t msr = { .lo = 0, .hi = 0 };
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500413
Aaron Durbin2bebd7b2016-11-10 15:15:35 -0600414 wrmsr(MTRR_PHYS_BASE(index), msr);
415 wrmsr(MTRR_PHYS_MASK(index), msr);
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500416}
417
Gabe Black7756fe72014-02-25 01:40:34 -0800418static void prep_var_mtrr(struct var_mtrr_state *var_state,
Lee Leahya07d0dd2017-03-15 14:25:22 -0700419 uint32_t base, uint32_t size, int mtrr_type)
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500420{
Gabe Black7756fe72014-02-25 01:40:34 -0800421 struct var_mtrr_regs *regs;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500422 resource_t rbase;
423 resource_t rsize;
424 resource_t mask;
425
426 /* Some variable MTRRs are attempted to be saved for the OS use.
427 * However, it's more important to try to map the full address space
428 * properly. */
429 if (var_state->mtrr_index >= bios_mtrrs)
430 printk(BIOS_WARNING, "Taking a reserved OS MTRR.\n");
431 if (var_state->mtrr_index >= total_mtrrs) {
Jonathan Neuschäferbb3a5ef2018-04-09 20:14:19 +0200432 printk(BIOS_ERR, "ERROR: Not enough MTRRs available! MTRR index is %d with %d MTRRs in total.\n",
Paul Menzel6a70dbc2015-10-15 12:41:53 +0200433 var_state->mtrr_index, total_mtrrs);
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500434 return;
435 }
436
437 rbase = base;
438 rsize = size;
439
440 rbase = RANGE_TO_PHYS_ADDR(rbase);
441 rsize = RANGE_TO_PHYS_ADDR(rsize);
442 rsize = -rsize;
443
444 mask = (1ULL << var_state->address_bits) - 1;
445 rsize = rsize & mask;
446
447 printk(BIOS_DEBUG, "MTRR: %d base 0x%016llx mask 0x%016llx type %d\n",
448 var_state->mtrr_index, rbase, rsize, mtrr_type);
449
Gabe Black7756fe72014-02-25 01:40:34 -0800450 regs = &var_state->regs[var_state->mtrr_index];
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500451
Gabe Black7756fe72014-02-25 01:40:34 -0800452 regs->base.lo = rbase;
453 regs->base.lo |= mtrr_type;
454 regs->base.hi = rbase >> 32;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500455
Gabe Black7756fe72014-02-25 01:40:34 -0800456 regs->mask.lo = rsize;
Alexandru Gagniuc86091f92015-09-30 20:23:09 -0700457 regs->mask.lo |= MTRR_PHYS_MASK_VALID;
Gabe Black7756fe72014-02-25 01:40:34 -0800458 regs->mask.hi = rsize >> 32;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500459}
460
461static void calc_var_mtrr_range(struct var_mtrr_state *var_state,
Lee Leahya07d0dd2017-03-15 14:25:22 -0700462 uint32_t base, uint32_t size, int mtrr_type)
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500463{
464 while (size != 0) {
465 uint32_t addr_lsb;
466 uint32_t size_msb;
467 uint32_t mtrr_size;
468
469 addr_lsb = fls(base);
470 size_msb = fms(size);
471
472 /* All MTRR entries need to have their base aligned to the mask
473 * size. The maximum size is calculated by a function of the
474 * min base bit set and maximum size bit set. */
475 if (addr_lsb > size_msb)
476 mtrr_size = 1 << size_msb;
477 else
478 mtrr_size = 1 << addr_lsb;
479
Gabe Black7756fe72014-02-25 01:40:34 -0800480 if (var_state->prepare_msrs)
481 prep_var_mtrr(var_state, base, mtrr_size, mtrr_type);
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500482
483 size -= mtrr_size;
484 base += mtrr_size;
485 var_state->mtrr_index++;
486 }
487}
488
Nico Huberbd5fb662017-10-07 13:40:19 +0200489static uint32_t optimize_var_mtrr_hole(const uint32_t base,
490 const uint32_t hole,
491 const uint64_t limit,
492 const int carve_hole)
493{
494 /*
495 * With default type UC, we can potentially optimize a WB
496 * range with unaligned upper end, by aligning it up and
497 * carving the added "hole" out again.
498 *
499 * To optimize the upper end of the hole, we will test
500 * how many MTRRs calc_var_mtrr_range() will spend for any
501 * alignment of the hole's upper end.
502 *
503 * We take four parameters, the lower end of the WB range
504 * `base`, upper end of the WB range as start of the `hole`,
505 * a `limit` how far we may align the upper end of the hole
506 * up and a flag `carve_hole` whether we should count MTRRs
507 * for carving the hole out. We return the optimal upper end
508 * for the hole (which may be the same as the end of the WB
509 * range in case we don't gain anything by aligning up).
510 */
511
512 const int dont_care = 0;
513 struct var_mtrr_state var_state = { 0, };
514
515 unsigned int align, best_count;
516 uint32_t best_end = hole;
517
518 /* calculate MTRR count for the WB range alone (w/o a hole) */
519 calc_var_mtrr_range(&var_state, base, hole - base, dont_care);
520 best_count = var_state.mtrr_index;
521 var_state.mtrr_index = 0;
522
523 for (align = fls(hole) + 1; align <= fms(hole); ++align) {
524 const uint64_t hole_end = ALIGN_UP((uint64_t)hole, 1 << align);
525 if (hole_end > limit)
526 break;
527
528 /* calculate MTRR count for this alignment */
529 calc_var_mtrr_range(
530 &var_state, base, hole_end - base, dont_care);
531 if (carve_hole)
532 calc_var_mtrr_range(
533 &var_state, hole, hole_end - hole, dont_care);
534
535 if (var_state.mtrr_index < best_count) {
536 best_count = var_state.mtrr_index;
537 best_end = hole_end;
538 }
539 var_state.mtrr_index = 0;
540 }
541
542 return best_end;
543}
544
Aaron Durbine3834422013-03-28 20:48:51 -0500545static void calc_var_mtrrs_with_hole(struct var_mtrr_state *var_state,
Lee Leahya07d0dd2017-03-15 14:25:22 -0700546 struct range_entry *r)
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500547{
Aaron Durbine3834422013-03-28 20:48:51 -0500548 uint32_t a1, a2, b1, b2;
Nico Huberbd5fb662017-10-07 13:40:19 +0200549 int mtrr_type, carve_hole;
Aaron Durbine3834422013-03-28 20:48:51 -0500550
551 /*
Martin Roth4c3ab732013-07-08 16:23:54 -0600552 * Determine MTRRs based on the following algorithm for the given entry:
Aaron Durbine3834422013-03-28 20:48:51 -0500553 * +------------------+ b2 = ALIGN_UP(end)
554 * | 0 or more bytes | <-- hole is carved out between b1 and b2
Nico Huberbd5fb662017-10-07 13:40:19 +0200555 * +------------------+ a2 = b1 = original end
Aaron Durbine3834422013-03-28 20:48:51 -0500556 * | |
557 * +------------------+ a1 = begin
558 *
Nico Huberbd5fb662017-10-07 13:40:19 +0200559 * Thus, there are up to 2 sub-ranges to configure variable MTRRs for.
Aaron Durbine3834422013-03-28 20:48:51 -0500560 */
561 mtrr_type = range_entry_mtrr_type(r);
562
563 a1 = range_entry_base_mtrr_addr(r);
564 a2 = range_entry_end_mtrr_addr(r);
565
Aaron Durbina38677b2016-07-21 14:26:34 -0500566 /* The end address is within the first 1MiB. The fixed MTRRs take
Aaron Durbine3834422013-03-28 20:48:51 -0500567 * precedence over the variable ones. Therefore this range
568 * can be ignored. */
Aaron Durbina38677b2016-07-21 14:26:34 -0500569 if (a2 <= RANGE_1MB)
Aaron Durbine3834422013-03-28 20:48:51 -0500570 return;
571
572 /* Again, the fixed MTRRs take precedence so the beginning
Aaron Durbina38677b2016-07-21 14:26:34 -0500573 * of the range can be set to 0 if it starts at or below 1MiB. */
574 if (a1 <= RANGE_1MB)
Aaron Durbine3834422013-03-28 20:48:51 -0500575 a1 = 0;
576
577 /* If the range starts above 4GiB the processing is done. */
578 if (!var_state->above4gb && a1 >= RANGE_4GB)
579 return;
580
581 /* Clip the upper address to 4GiB if addresses above 4GiB
582 * are not being processed. */
583 if (!var_state->above4gb && a2 > RANGE_4GB)
584 a2 = RANGE_4GB;
585
586 b1 = a2;
Nico Huber64f0bcb2017-10-07 16:37:04 +0200587 b2 = a2;
588 carve_hole = 0;
Aaron Durbin53924242013-03-29 11:48:27 -0500589
Nico Huber64f0bcb2017-10-07 16:37:04 +0200590 /* We only consider WB type ranges for hole-carving. */
591 if (mtrr_type == MTRR_TYPE_WRBACK) {
592 struct range_entry *next;
593 uint64_t b2_limit;
594 /*
595 * Depending on the type of the next range, there are three
596 * different situations to handle:
597 *
598 * 1. WB range is last in address space:
599 * Aligning up, up to the next power of 2, may gain us
600 * something.
601 *
602 * 2. The next range is of type UC:
603 * We may align up, up to the _end_ of the next range. If
604 * there is a gap between the current and the next range,
605 * it would have been covered by the default type UC anyway.
606 *
607 * 3. The next range is not of type UC:
608 * We may align up, up to the _base_ of the next range. This
609 * may either be the end of the current range (if the next
610 * range follows immediately) or the end of the gap between
611 * the ranges.
612 */
613 next = memranges_next_entry(var_state->addr_space, r);
614 if (next == NULL) {
615 b2_limit = ALIGN_UP((uint64_t)b1, 1 << fms(b1));
616 /* If it's the last range above 4GiB, we won't carve
617 the hole out. If an OS wanted to move MMIO there,
618 it would have to override the MTRR setting using
619 PAT just like it would with WB as default type. */
620 carve_hole = a1 < RANGE_4GB;
621 } else if (range_entry_mtrr_type(next)
622 == MTRR_TYPE_UNCACHEABLE) {
623 b2_limit = range_entry_end_mtrr_addr(next);
624 carve_hole = 1;
625 } else {
626 b2_limit = range_entry_base_mtrr_addr(next);
627 carve_hole = 1;
628 }
629 b2 = optimize_var_mtrr_hole(a1, b1, b2_limit, carve_hole);
Aaron Durbin53924242013-03-29 11:48:27 -0500630 }
Aaron Durbine3834422013-03-28 20:48:51 -0500631
632 calc_var_mtrr_range(var_state, a1, b2 - a1, mtrr_type);
Nico Huberbd5fb662017-10-07 13:40:19 +0200633 if (carve_hole && b2 != b1) {
634 calc_var_mtrr_range(var_state, b1, b2 - b1,
635 MTRR_TYPE_UNCACHEABLE);
636 }
Aaron Durbine3834422013-03-28 20:48:51 -0500637}
638
Aaron Durbin5b9e3b62014-02-05 16:00:43 -0600639static void __calc_var_mtrrs(struct memranges *addr_space,
Lee Leahya07d0dd2017-03-15 14:25:22 -0700640 int above4gb, int address_bits,
641 int *num_def_wb_mtrrs, int *num_def_uc_mtrrs)
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500642{
643 int wb_deftype_count;
644 int uc_deftype_count;
Aaron Durbine3834422013-03-28 20:48:51 -0500645 struct range_entry *r;
Eric Biedermanf8a2ddd2004-10-30 08:05:41 +0000646 struct var_mtrr_state var_state;
Eric Biedermanf8a2ddd2004-10-30 08:05:41 +0000647
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500648 /* The default MTRR cacheability type is determined by calculating
Paul Menzel4fe98132014-01-25 15:55:28 +0100649 * the number of MTRRs required for each MTRR type as if it was the
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500650 * default. */
651 var_state.addr_space = addr_space;
Scott Duplichanf3cce2f2010-11-13 19:07:59 +0000652 var_state.above4gb = above4gb;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500653 var_state.address_bits = address_bits;
Gabe Black7756fe72014-02-25 01:40:34 -0800654 var_state.prepare_msrs = 0;
Stefan Reinauer7f86ed12009-02-12 16:02:16 +0000655
Aaron Durbine3834422013-03-28 20:48:51 -0500656 wb_deftype_count = 0;
657 uc_deftype_count = 0;
Duncan Laurie7389fa92011-12-22 10:59:40 -0800658
Aaron Durbine3834422013-03-28 20:48:51 -0500659 /*
Nico Huber64f0bcb2017-10-07 16:37:04 +0200660 * For each range do 2 calculations:
661 * 1. UC as default type with possible holes at top of range.
662 * 2. WB as default.
Martin Roth4c3ab732013-07-08 16:23:54 -0600663 * The lowest count is then used as default after totaling all
Nico Huber64f0bcb2017-10-07 16:37:04 +0200664 * MTRRs. UC takes precedence in the MTRR architecture. There-
665 * fore, only holes can be used when the type of the region is
666 * MTRR_TYPE_WRBACK with MTRR_TYPE_UNCACHEABLE as the default
667 * type.
Aaron Durbine3834422013-03-28 20:48:51 -0500668 */
669 memranges_each_entry(r, var_state.addr_space) {
670 int mtrr_type;
671
672 mtrr_type = range_entry_mtrr_type(r);
673
674 if (mtrr_type != MTRR_TYPE_UNCACHEABLE) {
Aaron Durbine3834422013-03-28 20:48:51 -0500675 var_state.mtrr_index = 0;
Nico Huber64f0bcb2017-10-07 16:37:04 +0200676 var_state.def_mtrr_type = MTRR_TYPE_UNCACHEABLE;
677 calc_var_mtrrs_with_hole(&var_state, r);
678 uc_deftype_count += var_state.mtrr_index;
Aaron Durbine3834422013-03-28 20:48:51 -0500679 }
680
681 if (mtrr_type != MTRR_TYPE_WRBACK) {
682 var_state.mtrr_index = 0;
683 var_state.def_mtrr_type = MTRR_TYPE_WRBACK;
Nico Huber64f0bcb2017-10-07 16:37:04 +0200684 calc_var_mtrrs_with_hole(&var_state, r);
Aaron Durbine3834422013-03-28 20:48:51 -0500685 wb_deftype_count += var_state.mtrr_index;
686 }
687 }
Aaron Durbin5b9e3b62014-02-05 16:00:43 -0600688 *num_def_wb_mtrrs = wb_deftype_count;
689 *num_def_uc_mtrrs = uc_deftype_count;
690}
691
692static int calc_var_mtrrs(struct memranges *addr_space,
Lee Leahya07d0dd2017-03-15 14:25:22 -0700693 int above4gb, int address_bits)
Aaron Durbin5b9e3b62014-02-05 16:00:43 -0600694{
695 int wb_deftype_count = 0;
696 int uc_deftype_count = 0;
697
698 __calc_var_mtrrs(addr_space, above4gb, address_bits, &wb_deftype_count,
Lee Leahya07d0dd2017-03-15 14:25:22 -0700699 &uc_deftype_count);
Aaron Durbin5b9e3b62014-02-05 16:00:43 -0600700
701 if (wb_deftype_count > bios_mtrrs && uc_deftype_count > bios_mtrrs) {
702 printk(BIOS_DEBUG, "MTRR: Removing WRCOMB type. "
703 "WB/UC MTRR counts: %d/%d > %d.\n",
704 wb_deftype_count, uc_deftype_count, bios_mtrrs);
705 memranges_update_tag(addr_space, MTRR_TYPE_WRCOMB,
Lee Leahya07d0dd2017-03-15 14:25:22 -0700706 MTRR_TYPE_UNCACHEABLE);
Aaron Durbin5b9e3b62014-02-05 16:00:43 -0600707 __calc_var_mtrrs(addr_space, above4gb, address_bits,
Lee Leahya07d0dd2017-03-15 14:25:22 -0700708 &wb_deftype_count, &uc_deftype_count);
Aaron Durbin5b9e3b62014-02-05 16:00:43 -0600709 }
Scott Duplichanf3cce2f2010-11-13 19:07:59 +0000710
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500711 printk(BIOS_DEBUG, "MTRR: default type WB/UC MTRR counts: %d/%d.\n",
712 wb_deftype_count, uc_deftype_count);
Kyösti Mälkkiffc1fb32012-07-11 14:40:19 +0300713
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500714 if (wb_deftype_count < uc_deftype_count) {
715 printk(BIOS_DEBUG, "MTRR: WB selected as default type.\n");
716 return MTRR_TYPE_WRBACK;
717 }
718 printk(BIOS_DEBUG, "MTRR: UC selected as default type.\n");
719 return MTRR_TYPE_UNCACHEABLE;
720}
Kyösti Mälkkiffc1fb32012-07-11 14:40:19 +0300721
Gabe Black7756fe72014-02-25 01:40:34 -0800722static void prepare_var_mtrrs(struct memranges *addr_space, int def_type,
723 int above4gb, int address_bits,
724 struct var_mtrr_solution *sol)
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500725{
Aaron Durbine3834422013-03-28 20:48:51 -0500726 struct range_entry *r;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500727 struct var_mtrr_state var_state;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500728
729 var_state.addr_space = addr_space;
730 var_state.above4gb = above4gb;
731 var_state.address_bits = address_bits;
Gabe Black7756fe72014-02-25 01:40:34 -0800732 /* Prepare the MSRs. */
733 var_state.prepare_msrs = 1;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500734 var_state.mtrr_index = 0;
735 var_state.def_mtrr_type = def_type;
Gabe Black7756fe72014-02-25 01:40:34 -0800736 var_state.regs = &sol->regs[0];
Aaron Durbine3834422013-03-28 20:48:51 -0500737
738 memranges_each_entry(r, var_state.addr_space) {
739 if (range_entry_mtrr_type(r) == def_type)
740 continue;
Nico Huber64f0bcb2017-10-07 16:37:04 +0200741 calc_var_mtrrs_with_hole(&var_state, r);
Aaron Durbine3834422013-03-28 20:48:51 -0500742 }
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500743
Gabe Black7756fe72014-02-25 01:40:34 -0800744 /* Update the solution. */
745 sol->num_used = var_state.mtrr_index;
746}
747
Aaron Durbind9762f72017-06-12 12:48:38 -0500748static int commit_var_mtrrs(const struct var_mtrr_solution *sol)
Gabe Black7756fe72014-02-25 01:40:34 -0800749{
750 int i;
751
Aaron Durbind9762f72017-06-12 12:48:38 -0500752 if (sol->num_used > total_mtrrs) {
753 printk(BIOS_WARNING, "Not enough MTRRs: %d vs %d\n",
754 sol->num_used, total_mtrrs);
755 return -1;
756 }
757
Isaac Christensen81f90c52014-09-24 14:59:32 -0600758 /* Write out the variable MTRRs. */
Gabe Black7756fe72014-02-25 01:40:34 -0800759 disable_cache();
760 for (i = 0; i < sol->num_used; i++) {
Alexandru Gagniuc86091f92015-09-30 20:23:09 -0700761 wrmsr(MTRR_PHYS_BASE(i), sol->regs[i].base);
762 wrmsr(MTRR_PHYS_MASK(i), sol->regs[i].mask);
Gabe Black7756fe72014-02-25 01:40:34 -0800763 }
764 /* Clear the ones that are unused. */
765 for (; i < total_mtrrs; i++)
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500766 clear_var_mtrr(i);
Isaac Christensen81f90c52014-09-24 14:59:32 -0600767 enable_var_mtrr(sol->mtrr_default_type);
Gabe Black7756fe72014-02-25 01:40:34 -0800768 enable_cache();
769
Aaron Durbind9762f72017-06-12 12:48:38 -0500770 return 0;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500771}
772
773void x86_setup_var_mtrrs(unsigned int address_bits, unsigned int above4gb)
774{
Gabe Black7756fe72014-02-25 01:40:34 -0800775 static struct var_mtrr_solution *sol = NULL;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500776 struct memranges *addr_space;
777
778 addr_space = get_physical_address_space();
779
Gabe Black7756fe72014-02-25 01:40:34 -0800780 if (sol == NULL) {
Gabe Black7756fe72014-02-25 01:40:34 -0800781 sol = &mtrr_global_solution;
782 sol->mtrr_default_type =
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500783 calc_var_mtrrs(addr_space, !!above4gb, address_bits);
Gabe Black7756fe72014-02-25 01:40:34 -0800784 prepare_var_mtrrs(addr_space, sol->mtrr_default_type,
785 !!above4gb, address_bits, sol);
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000786 }
Stefan Reinauer00093a82011-11-02 16:12:34 -0700787
Gabe Black7756fe72014-02-25 01:40:34 -0800788 commit_var_mtrrs(sol);
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000789}
790
Sven Schnelleadfbcb792012-01-10 12:01:43 +0100791void x86_setup_mtrrs(void)
Yinghai Lu13f1c2a2005-07-08 02:49:49 +0000792{
Sven Schnelleadfbcb792012-01-10 12:01:43 +0100793 int address_size;
Aaron Durbine63be892016-03-07 16:05:36 -0600794
Yinghai Lu13f1c2a2005-07-08 02:49:49 +0000795 x86_setup_fixed_mtrrs();
Sven Schnelleadfbcb792012-01-10 12:01:43 +0100796 address_size = cpu_phys_address_size();
Aaron Durbine63be892016-03-07 16:05:36 -0600797 printk(BIOS_DEBUG, "CPU physical address size: %d bits\n",
798 address_size);
799 /* Always handle addresses above 4GiB. */
Sven Schnelleadfbcb792012-01-10 12:01:43 +0100800 x86_setup_var_mtrrs(address_size, 1);
Yinghai Lu13f1c2a2005-07-08 02:49:49 +0000801}
802
Aaron Durbine63be892016-03-07 16:05:36 -0600803void x86_setup_mtrrs_with_detect(void)
804{
805 detect_var_mtrrs();
806 x86_setup_mtrrs();
807}
808
Kyösti Mälkki38a8fb02014-06-30 13:48:18 +0300809void x86_mtrr_check(void)
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000810{
811 /* Only Pentium Pro and later have MTRR */
812 msr_t msr;
Stefan Reinauerc02b4fc2010-03-22 11:42:32 +0000813 printk(BIOS_DEBUG, "\nMTRR check\n");
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000814
Alexandru Gagniuc86091f92015-09-30 20:23:09 -0700815 msr = rdmsr(MTRR_DEF_TYPE_MSR);
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000816
Stefan Reinauerc02b4fc2010-03-22 11:42:32 +0000817 printk(BIOS_DEBUG, "Fixed MTRRs : ");
Alexandru Gagniuc86091f92015-09-30 20:23:09 -0700818 if (msr.lo & MTRR_DEF_TYPE_FIX_EN)
Stefan Reinauerc02b4fc2010-03-22 11:42:32 +0000819 printk(BIOS_DEBUG, "Enabled\n");
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000820 else
Stefan Reinauerc02b4fc2010-03-22 11:42:32 +0000821 printk(BIOS_DEBUG, "Disabled\n");
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000822
Stefan Reinauerc02b4fc2010-03-22 11:42:32 +0000823 printk(BIOS_DEBUG, "Variable MTRRs: ");
Alexandru Gagniuc86091f92015-09-30 20:23:09 -0700824 if (msr.lo & MTRR_DEF_TYPE_EN)
Stefan Reinauerc02b4fc2010-03-22 11:42:32 +0000825 printk(BIOS_DEBUG, "Enabled\n");
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000826 else
Stefan Reinauerc02b4fc2010-03-22 11:42:32 +0000827 printk(BIOS_DEBUG, "Disabled\n");
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000828
Stefan Reinauerc02b4fc2010-03-22 11:42:32 +0000829 printk(BIOS_DEBUG, "\n");
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000830
831 post_code(0x93);
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000832}
Aaron Durbin2bebd7b2016-11-10 15:15:35 -0600833
834static bool put_back_original_solution;
835
836void mtrr_use_temp_range(uintptr_t begin, size_t size, int type)
837{
838 const struct range_entry *r;
839 const struct memranges *orig;
840 struct var_mtrr_solution sol;
841 struct memranges addr_space;
842 const int above4gb = 1; /* Cover above 4GiB by default. */
843 int address_bits;
844
845 /* Make a copy of the original address space and tweak it with the
846 * provided range. */
847 memranges_init_empty(&addr_space, NULL, 0);
848 orig = get_physical_address_space();
849 memranges_each_entry(r, orig) {
850 unsigned long tag = range_entry_tag(r);
851
Aaron Durbin2bebd7b2016-11-10 15:15:35 -0600852 /* Remove any write combining MTRRs from the temporary
853 * solution as it just fragments the address space. */
854 if (tag == MTRR_TYPE_WRCOMB)
855 tag = MTRR_TYPE_UNCACHEABLE;
856
857 memranges_insert(&addr_space, range_entry_base(r),
858 range_entry_size(r), tag);
859 }
860
861 /* Place new range into the address space. */
862 memranges_insert(&addr_space, begin, size, type);
863
864 print_physical_address_space(&addr_space, "TEMPORARY");
865
866 /* Calculate a new solution with the updated address space. */
867 address_bits = cpu_phys_address_size();
868 memset(&sol, 0, sizeof(sol));
869 sol.mtrr_default_type =
870 calc_var_mtrrs(&addr_space, above4gb, address_bits);
871 prepare_var_mtrrs(&addr_space, sol.mtrr_default_type,
872 above4gb, address_bits, &sol);
Aaron Durbind9762f72017-06-12 12:48:38 -0500873
874 if (commit_var_mtrrs(&sol) < 0)
875 printk(BIOS_WARNING, "Unable to insert temporary MTRR range: 0x%016llx - 0x%016llx size 0x%08llx type %d\n",
876 (long long)begin, (long long)begin + size,
877 (long long)size, type);
878 else
879 put_back_original_solution = true;
Aaron Durbin2bebd7b2016-11-10 15:15:35 -0600880
881 memranges_teardown(&addr_space);
Aaron Durbin2bebd7b2016-11-10 15:15:35 -0600882}
883
884static void remove_temp_solution(void *unused)
885{
886 if (put_back_original_solution)
887 commit_var_mtrrs(&mtrr_global_solution);
888}
889
890BOOT_STATE_INIT_ENTRY(BS_OS_RESUME, BS_ON_ENTRY, remove_temp_solution, NULL);
891BOOT_STATE_INIT_ENTRY(BS_PAYLOAD_LOAD, BS_ON_EXIT, remove_temp_solution, NULL);