Eric Biederman | c84c190 | 2004-10-14 20:13:01 +0000 | [diff] [blame] | 1 | #ifndef CPU_X86_MTRR_H |
| 2 | #define CPU_X86_MTRR_H |
| 3 | |
Eric Biederman | c84c190 | 2004-10-14 20:13:01 +0000 | [diff] [blame] | 4 | /* These are the region types */ |
Alexandru Gagniuc | 86091f9 | 2015-09-30 20:23:09 -0700 | [diff] [blame] | 5 | #define MTRR_TYPE_UNCACHEABLE 0 |
| 6 | #define MTRR_TYPE_WRCOMB 1 |
| 7 | #define MTRR_TYPE_WRTHROUGH 4 |
| 8 | #define MTRR_TYPE_WRPROT 5 |
| 9 | #define MTRR_TYPE_WRBACK 6 |
| 10 | #define MTRR_NUM_TYPES 7 |
Eric Biederman | c84c190 | 2004-10-14 20:13:01 +0000 | [diff] [blame] | 11 | |
Alexandru Gagniuc | 86091f9 | 2015-09-30 20:23:09 -0700 | [diff] [blame] | 12 | #define MTRR_CAP_MSR 0x0fe |
Lee Leahy | bfdf248 | 2015-06-18 10:55:19 -0700 | [diff] [blame] | 13 | |
Alexandru Gagniuc | 86091f9 | 2015-09-30 20:23:09 -0700 | [diff] [blame] | 14 | #define MTRR_CAP_SMRR (1 << 11) |
| 15 | #define MTRR_CAP_WC (1 << 10) |
| 16 | #define MTRR_CAP_FIX (1 << 8) |
| 17 | #define MTRR_CAP_VCNT 0xff |
Lee Leahy | bfdf248 | 2015-06-18 10:55:19 -0700 | [diff] [blame] | 18 | |
Alexandru Gagniuc | 86091f9 | 2015-09-30 20:23:09 -0700 | [diff] [blame] | 19 | #define MTRR_DEF_TYPE_MSR 0x2ff |
| 20 | #define MTRR_DEF_TYPE_MASK 0xff |
| 21 | #define MTRR_DEF_TYPE_EN (1 << 11) |
| 22 | #define MTRR_DEF_TYPE_FIX_EN (1 << 10) |
Eric Biederman | c84c190 | 2004-10-14 20:13:01 +0000 | [diff] [blame] | 23 | |
Uwe Hermann | 66d1687 | 2010-10-01 07:27:51 +0000 | [diff] [blame] | 24 | |
Alexandru Gagniuc | 86091f9 | 2015-09-30 20:23:09 -0700 | [diff] [blame] | 25 | #define SMRR_PHYS_BASE 0x1f2 |
| 26 | #define SMRR_PHYS_MASK 0x1f3 |
Duncan Laurie | 7b67892 | 2012-01-09 22:05:18 -0800 | [diff] [blame] | 27 | |
Alexandru Gagniuc | 86091f9 | 2015-09-30 20:23:09 -0700 | [diff] [blame] | 28 | #define MTRR_PHYS_BASE(reg) (0x200 + 2 * (reg)) |
| 29 | #define MTRR_PHYS_MASK(reg) (MTRR_PHYS_BASE(reg) + 1) |
| 30 | #define MTRR_PHYS_MASK_VALID (1 << 11) |
Eric Biederman | c84c190 | 2004-10-14 20:13:01 +0000 | [diff] [blame] | 31 | |
Alexandru Gagniuc | 86091f9 | 2015-09-30 20:23:09 -0700 | [diff] [blame] | 32 | #define NUM_FIXED_RANGES 88 |
| 33 | #define RANGES_PER_FIXED_MTRR 8 |
| 34 | #define MTRR_FIX_64K_00000 0x250 |
| 35 | #define MTRR_FIX_16K_80000 0x258 |
| 36 | #define MTRR_FIX_16K_A0000 0x259 |
| 37 | #define MTRR_FIX_4K_C0000 0x268 |
| 38 | #define MTRR_FIX_4K_C8000 0x269 |
| 39 | #define MTRR_FIX_4K_D0000 0x26a |
| 40 | #define MTRR_FIX_4K_D8000 0x26b |
| 41 | #define MTRR_FIX_4K_E0000 0x26c |
| 42 | #define MTRR_FIX_4K_E8000 0x26d |
| 43 | #define MTRR_FIX_4K_F0000 0x26e |
| 44 | #define MTRR_FIX_4K_F8000 0x26f |
Eric Biederman | c84c190 | 2004-10-14 20:13:01 +0000 | [diff] [blame] | 45 | |
Stefan Reinauer | 61aee5f | 2011-04-10 04:15:23 +0000 | [diff] [blame] | 46 | #if !defined (__ASSEMBLER__) && !defined(__PRE_RAM__) |
Aaron Durbin | bb4e79a | 2013-03-26 14:09:47 -0500 | [diff] [blame] | 47 | |
| 48 | /* |
| 49 | * The MTRR code has some side effects that the callers should be aware for. |
| 50 | * 1. The call sequence matters. x86_setup_mtrrs() calls |
| 51 | * x86_setup_fixed_mtrrs_no_enable() then enable_fixed_mtrrs() (equivalent |
| 52 | * of x86_setup_fixed_mtrrs()) then x86_setup_var_mtrrs(). If the callers |
| 53 | * want to call the components of x86_setup_mtrrs() because of other |
Martin Roth | 0cb07e3 | 2013-07-09 21:46:01 -0600 | [diff] [blame] | 54 | * requirements the ordering should still preserved. |
Aaron Durbin | bb4e79a | 2013-03-26 14:09:47 -0500 | [diff] [blame] | 55 | * 2. enable_fixed_mtrr() will enable both variable and fixed MTRRs because |
| 56 | * of the nature of the global MTRR enable flag. Therefore, all direct |
| 57 | * or indirect callers of enable_fixed_mtrr() should ensure that the |
| 58 | * variable MTRR MSRs do not contain bad ranges. |
Aaron Durbin | e63be89 | 2016-03-07 16:05:36 -0600 | [diff] [blame] | 59 | * |
| 60 | * Note that this function sets up MTRRs for addresses above 4GiB. |
Ronald G. Minnich | 69efaa0 | 2013-02-26 10:07:40 -0800 | [diff] [blame] | 61 | */ |
Sven Schnelle | adfbcb79 | 2012-01-10 12:01:43 +0100 | [diff] [blame] | 62 | void x86_setup_mtrrs(void); |
Aaron Durbin | bb4e79a | 2013-03-26 14:09:47 -0500 | [diff] [blame] | 63 | /* |
Aaron Durbin | e63be89 | 2016-03-07 16:05:36 -0600 | [diff] [blame] | 64 | * x86_setup_mtrrs_with_detect() does the same thing as x86_setup_mtrrs(), but |
| 65 | * it always dynamically detects the number of variable MTRRs available. |
| 66 | */ |
| 67 | void x86_setup_mtrrs_with_detect(void); |
| 68 | /* |
Aaron Durbin | bb4e79a | 2013-03-26 14:09:47 -0500 | [diff] [blame] | 69 | * x86_setup_var_mtrrs() parameters: |
| 70 | * address_bits - number of physical address bits supported by cpu |
Aaron Durbin | e63be89 | 2016-03-07 16:05:36 -0600 | [diff] [blame] | 71 | * above4gb - if set setup MTRRs for addresses above 4GiB else ignore |
| 72 | * memory ranges above 4GiB |
Aaron Durbin | bb4e79a | 2013-03-26 14:09:47 -0500 | [diff] [blame] | 73 | */ |
| 74 | void x86_setup_var_mtrrs(unsigned int address_bits, unsigned int above4gb); |
| 75 | void enable_fixed_mtrr(void); |
Maciej Pijanka | ea92185 | 2009-10-27 14:29:29 +0000 | [diff] [blame] | 76 | void x86_setup_fixed_mtrrs(void); |
Aaron Durbin | 57686f8 | 2013-03-20 15:50:59 -0500 | [diff] [blame] | 77 | /* Set up fixed MTRRs but do not enable them. */ |
| 78 | void x86_setup_fixed_mtrrs_no_enable(void); |
Kyösti Mälkki | 38a8fb0 | 2014-06-30 13:48:18 +0300 | [diff] [blame] | 79 | void x86_mtrr_check(void); |
Stefan Reinauer | 35b6bbb | 2010-03-28 21:26:54 +0000 | [diff] [blame] | 80 | #endif |
Eric Biederman | c84c190 | 2004-10-14 20:13:01 +0000 | [diff] [blame] | 81 | |
Kyösti Mälkki | 88a67f0 | 2013-12-12 12:27:53 +0200 | [diff] [blame] | 82 | #if !defined(__ASSEMBLER__) && defined(__PRE_RAM__) && !defined(__ROMCC__) |
| 83 | void set_var_mtrr(unsigned reg, unsigned base, unsigned size, unsigned type); |
Furquan Shaikh | 331ac1b | 2016-03-16 16:12:06 -0700 | [diff] [blame] | 84 | int get_free_var_mtrr(void); |
Kyösti Mälkki | 88a67f0 | 2013-12-12 12:27:53 +0200 | [diff] [blame] | 85 | #endif |
| 86 | |
Rizwan Qureshi | 8453c4f | 2016-09-07 20:11:11 +0530 | [diff] [blame] | 87 | #if !defined(__ASSEMBLER__) && !defined(__ROMCC__) |
| 88 | |
| 89 | /* fms: find most significant bit set, stolen from Linux Kernel Source. */ |
| 90 | static inline unsigned int fms(unsigned int x) |
| 91 | { |
| 92 | int r; |
| 93 | |
| 94 | __asm__("bsrl %1,%0\n\t" |
| 95 | "jnz 1f\n\t" |
| 96 | "movl $0,%0\n" |
| 97 | "1:" : "=r" (r) : "g" (x)); |
| 98 | return r; |
| 99 | } |
| 100 | |
| 101 | /* fls: find least significant bit set */ |
| 102 | static inline unsigned int fls(unsigned int x) |
| 103 | { |
| 104 | int r; |
| 105 | |
| 106 | __asm__("bsfl %1,%0\n\t" |
| 107 | "jnz 1f\n\t" |
| 108 | "movl $32,%0\n" |
| 109 | "1:" : "=r" (r) : "g" (x)); |
| 110 | return r; |
| 111 | } |
| 112 | #endif |
| 113 | |
Kyösti Mälkki | 107f72e | 2014-01-06 11:06:26 +0200 | [diff] [blame] | 114 | /* Align up to next power of 2, suitable for ROMCC and assembler too. |
| 115 | * Range of result 256kB to 128MB is good enough here. |
| 116 | */ |
| 117 | #define _POW2_MASK(x) ((x>>1)|(x>>2)|(x>>3)|(x>>4)|(x>>5)| \ |
| 118 | (x>>6)|(x>>7)|(x>>8)|((1<<18)-1)) |
| 119 | #define _ALIGN_UP_POW2(x) ((x + _POW2_MASK(x)) & ~_POW2_MASK(x)) |
| 120 | |
Elyes HAOUAS | 918535a | 2016-07-28 21:25:21 +0200 | [diff] [blame] | 121 | /* At the end of romstage, low RAM 0..CACHE_TM_RAMTOP may be set |
Kyösti Mälkki | 65cc526 | 2016-06-19 20:38:41 +0300 | [diff] [blame] | 122 | * as write-back cacheable to speed up ramstage decompression. |
| 123 | * Note MTRR boundaries, must be power of two. |
| 124 | */ |
| 125 | #define CACHE_TMP_RAMTOP (16<<20) |
Stefan Reinauer | 8f2c616 | 2010-04-06 21:50:21 +0000 | [diff] [blame] | 126 | |
Patrick Georgi | 784544b | 2011-10-31 17:07:52 +0100 | [diff] [blame] | 127 | #if ((CONFIG_XIP_ROM_SIZE & (CONFIG_XIP_ROM_SIZE -1)) != 0) |
Stefan Reinauer | 8f2c616 | 2010-04-06 21:50:21 +0000 | [diff] [blame] | 128 | # error "CONFIG_XIP_ROM_SIZE is not a power of 2" |
| 129 | #endif |
Stefan Reinauer | 8f2c616 | 2010-04-06 21:50:21 +0000 | [diff] [blame] | 130 | |
Kyösti Mälkki | 107f72e | 2014-01-06 11:06:26 +0200 | [diff] [blame] | 131 | /* Select CACHE_ROM_SIZE to use with MTRR setup. For most cases this |
| 132 | * resolves to a suitable CONFIG_ROM_SIZE but some odd cases need to |
| 133 | * use CONFIG_CACHE_ROM_SIZE_OVERRIDE in the mainboard Kconfig. |
| 134 | */ |
| 135 | #if (CONFIG_CACHE_ROM_SIZE_OVERRIDE != 0) |
| 136 | # define CACHE_ROM_SIZE CONFIG_CACHE_ROM_SIZE_OVERRIDE |
| 137 | #else |
| 138 | # if ((CONFIG_ROM_SIZE & (CONFIG_ROM_SIZE-1)) == 0) |
| 139 | # define CACHE_ROM_SIZE CONFIG_ROM_SIZE |
| 140 | # else |
| 141 | # define CACHE_ROM_SIZE _ALIGN_UP_POW2(CONFIG_ROM_SIZE) |
| 142 | # if (CACHE_ROM_SIZE < CONFIG_ROM_SIZE) || (CACHE_ROM_SIZE >= (2 * CONFIG_ROM_SIZE)) |
| 143 | # error "CACHE_ROM_SIZE is not optimal." |
| 144 | # endif |
| 145 | # endif |
Kyösti Mälkki | 5458b9d | 2012-06-30 11:41:08 +0300 | [diff] [blame] | 146 | #endif |
| 147 | |
Kyösti Mälkki | 107f72e | 2014-01-06 11:06:26 +0200 | [diff] [blame] | 148 | #if ((CACHE_ROM_SIZE & (CACHE_ROM_SIZE-1)) != 0) |
| 149 | # error "CACHE_ROM_SIZE is not a power of 2." |
| 150 | #endif |
| 151 | |
| 152 | #define CACHE_ROM_BASE (((1<<20) - (CACHE_ROM_SIZE>>12))<<12) |
| 153 | |
Lee Leahy | ae738ac | 2016-07-24 08:03:37 -0700 | [diff] [blame] | 154 | #if (IS_ENABLED(CONFIG_SOC_SETS_MSRS) && !defined(__ASSEMBLER__) \ |
| 155 | && !defined(__ROMCC__)) |
| 156 | #include <cpu/x86/msr.h> |
| 157 | #include <arch/cpu.h> |
| 158 | |
| 159 | /* |
| 160 | * Set the MTRRs using the data on the stack from setup_stack_and_mtrrs. |
| 161 | * Return a new top_of_stack value which removes the setup_stack_and_mtrrs data. |
| 162 | */ |
| 163 | asmlinkage void *soc_set_mtrrs(void *top_of_stack); |
| 164 | asmlinkage void soc_enable_mtrrs(void); |
| 165 | #endif /* CONFIG_SOC_SETS_MSRS ... */ |
| 166 | |
Eric Biederman | c84c190 | 2004-10-14 20:13:01 +0000 | [diff] [blame] | 167 | #endif /* CPU_X86_MTRR_H */ |