blob: 017a77eb8461beab2c09f57e2eadfee1b9fe2267 [file] [log] [blame]
Eric Biedermanc84c1902004-10-14 20:13:01 +00001#ifndef CPU_X86_MTRR_H
2#define CPU_X86_MTRR_H
3
Eric Biedermanc84c1902004-10-14 20:13:01 +00004/* These are the region types */
5#define MTRR_TYPE_UNCACHEABLE 0
6#define MTRR_TYPE_WRCOMB 1
7/*#define MTRR_TYPE_ 2*/
8/*#define MTRR_TYPE_ 3*/
9#define MTRR_TYPE_WRTHROUGH 4
10#define MTRR_TYPE_WRPROT 5
11#define MTRR_TYPE_WRBACK 6
12#define MTRR_NUM_TYPES 7
13
14#define MTRRcap_MSR 0x0fe
15#define MTRRdefType_MSR 0x2ff
16
Uwe Hermann66d16872010-10-01 07:27:51 +000017#define MTRRdefTypeEn (1 << 11)
18#define MTRRdefTypeFixEn (1 << 10)
19
Duncan Laurie7b678922012-01-09 22:05:18 -080020#define SMRRphysBase_MSR 0x1f2
21#define SMRRphysMask_MSR 0x1f3
22
Eric Biedermanc84c1902004-10-14 20:13:01 +000023#define MTRRphysBase_MSR(reg) (0x200 + 2 * (reg))
24#define MTRRphysMask_MSR(reg) (0x200 + 2 * (reg) + 1)
25
Kevin O'Connor5bb9fd62011-01-19 06:32:35 +000026#define MTRRphysMaskValid (1 << 11)
27
Eric Biedermanc84c1902004-10-14 20:13:01 +000028#define NUM_FIXED_RANGES 88
Aaron Durbinbb4e79a2013-03-26 14:09:47 -050029#define RANGES_PER_FIXED_MTRR 8
Eric Biedermanc84c1902004-10-14 20:13:01 +000030#define MTRRfix64K_00000_MSR 0x250
31#define MTRRfix16K_80000_MSR 0x258
32#define MTRRfix16K_A0000_MSR 0x259
33#define MTRRfix4K_C0000_MSR 0x268
34#define MTRRfix4K_C8000_MSR 0x269
35#define MTRRfix4K_D0000_MSR 0x26a
36#define MTRRfix4K_D8000_MSR 0x26b
37#define MTRRfix4K_E0000_MSR 0x26c
38#define MTRRfix4K_E8000_MSR 0x26d
39#define MTRRfix4K_F0000_MSR 0x26e
40#define MTRRfix4K_F8000_MSR 0x26f
41
Stefan Reinauer61aee5f2011-04-10 04:15:23 +000042#if !defined (__ASSEMBLER__) && !defined(__PRE_RAM__)
Aaron Durbinbb4e79a2013-03-26 14:09:47 -050043
44/*
45 * The MTRR code has some side effects that the callers should be aware for.
46 * 1. The call sequence matters. x86_setup_mtrrs() calls
47 * x86_setup_fixed_mtrrs_no_enable() then enable_fixed_mtrrs() (equivalent
48 * of x86_setup_fixed_mtrrs()) then x86_setup_var_mtrrs(). If the callers
49 * want to call the components of x86_setup_mtrrs() because of other
Martin Roth0cb07e32013-07-09 21:46:01 -060050 * requirements the ordering should still preserved.
Aaron Durbinbb4e79a2013-03-26 14:09:47 -050051 * 2. enable_fixed_mtrr() will enable both variable and fixed MTRRs because
52 * of the nature of the global MTRR enable flag. Therefore, all direct
53 * or indirect callers of enable_fixed_mtrr() should ensure that the
54 * variable MTRR MSRs do not contain bad ranges.
Aaron Durbin77a5b402013-03-26 12:47:47 -050055 * 3. If CONFIG_CACHE_ROM is selected an MTRR is allocated for enabling
56 * the caching of the ROM. However, it is set to uncacheable (UC). It
Martin Roth0cb07e32013-07-09 21:46:01 -060057 * is the responsibility of the caller to enable it by calling
Aaron Durbin77a5b402013-03-26 12:47:47 -050058 * x86_mtrr_enable_rom_caching().
Ronald G. Minnich69efaa02013-02-26 10:07:40 -080059 */
Sven Schnelleadfbcb792012-01-10 12:01:43 +010060void x86_setup_mtrrs(void);
Aaron Durbinbb4e79a2013-03-26 14:09:47 -050061/*
62 * x86_setup_var_mtrrs() parameters:
63 * address_bits - number of physical address bits supported by cpu
64 * above4gb - 2 means dynamically detect number of variable MTRRs available.
65 * non-zero means handle memory ranges above 4GiB.
66 * 0 means ignore memory ranges above 4GiB
67 */
68void x86_setup_var_mtrrs(unsigned int address_bits, unsigned int above4gb);
69void enable_fixed_mtrr(void);
Maciej Pijankaea921852009-10-27 14:29:29 +000070void x86_setup_fixed_mtrrs(void);
Aaron Durbin57686f82013-03-20 15:50:59 -050071/* Set up fixed MTRRs but do not enable them. */
72void x86_setup_fixed_mtrrs_no_enable(void);
Aaron Durbinbb4e79a2013-03-26 14:09:47 -050073int x86_mtrr_check(void);
Aaron Durbin77a5b402013-03-26 12:47:47 -050074/* ROM caching can be used after variable MTRRs are set up. Beware that
75 * enabling CONFIG_CACHE_ROM will eat through quite a few MTRRs based on
76 * one's IO hole size and WRCOMB resources. Be sure to check the console
Aaron Durbin6ccb1ab2013-04-03 09:57:53 -050077 * log when enabling CONFIG_CACHE_ROM or adding WRCOMB resources. Beware that
78 * on CPUs with core-scoped MTRR registers such as hyperthreaded CPUs the
79 * rom caching will be disabled if all threads run the MTRR code. Therefore,
80 * one needs to call x86_mtrr_enable_rom_caching() after all threads of the
81 * same core have run the MTRR code. */
Aaron Durbin77a5b402013-03-26 12:47:47 -050082#if CONFIG_CACHE_ROM
83void x86_mtrr_enable_rom_caching(void);
84void x86_mtrr_disable_rom_caching(void);
Aaron Durbinbc07f5d2013-03-26 13:09:39 -050085/* Return the variable range MTRR index of the ROM cache. */
86long x86_mtrr_rom_cache_var_index(void);
Aaron Durbin77a5b402013-03-26 12:47:47 -050087#else
88static inline void x86_mtrr_enable_rom_caching(void) {}
89static inline void x86_mtrr_disable_rom_caching(void) {}
Aaron Durbinbc07f5d2013-03-26 13:09:39 -050090static inline long x86_mtrr_rom_cache_var_index(void) { return -1; }
Aaron Durbin77a5b402013-03-26 12:47:47 -050091#endif /* CONFIG_CACHE_ROM */
92
Stefan Reinauer35b6bbb2010-03-28 21:26:54 +000093#endif
Eric Biedermanc84c1902004-10-14 20:13:01 +000094
Stefan Reinauer8f2c6162010-04-06 21:50:21 +000095#if !defined(CONFIG_RAMTOP)
96# error "CONFIG_RAMTOP not defined"
97#endif
98
Patrick Georgi784544b2011-10-31 17:07:52 +010099#if ((CONFIG_XIP_ROM_SIZE & (CONFIG_XIP_ROM_SIZE -1)) != 0)
Stefan Reinauer8f2c6162010-04-06 21:50:21 +0000100# error "CONFIG_XIP_ROM_SIZE is not a power of 2"
101#endif
Stefan Reinauer8f2c6162010-04-06 21:50:21 +0000102
Kyösti Mälkki5458b9d2012-06-30 11:41:08 +0300103#if ((CONFIG_CACHE_ROM_SIZE & (CONFIG_CACHE_ROM_SIZE -1)) != 0)
104# error "CONFIG_CACHE_ROM_SIZE is not a power of 2"
105#endif
106
107#define CACHE_ROM_BASE (((1<<20) - (CONFIG_CACHE_ROM_SIZE>>12))<<12)
108
Stefan Reinauer1d888a92011-04-21 20:24:43 +0000109#if (CONFIG_RAMTOP & (CONFIG_RAMTOP - 1)) != 0
Stefan Reinauer8f2c6162010-04-06 21:50:21 +0000110# error "CONFIG_RAMTOP must be a power of 2"
111#endif
112
Eric Biedermanc84c1902004-10-14 20:13:01 +0000113#endif /* CPU_X86_MTRR_H */