blob: e79a3439410ebc15747c6fa18b03a0bcb0171af5 [file] [log] [blame]
Vladimir Serbinenkoc6f6be02013-11-12 22:32:08 +01001/*
2 * This file is part of the coreboot project.
3 *
4 * Copyright (C) 2007-2008 coresystems GmbH
5 * Copyright (C) 2011 Google Inc.
6 * Copyright (C) 2013 Vladimir Serbinenko
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; version 2 of the License.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21
22#ifndef __NORTHBRIDGE_INTEL_NEHALEM_NEHALEM_H__
23#define __NORTHBRIDGE_INTEL_NEHALEM_NEHALEM_H__ 1
24
25#ifndef __ASSEMBLER__
26
27#include <stdint.h>
28
29extern unsigned char fake_vbt[4096];
30
31typedef enum {
32 FSB_CLOCK_1067MHz = 0,
33 FSB_CLOCK_800MHz = 1,
34 FSB_CLOCK_667MHz = 2,
35} fsb_clock_t;
36
37typedef enum { /* Steppings below B1 were pre-production,
38 conversion stepping A1 is... ?
39 We'll support B1, B2, B3, and conversion stepping A1. */
40 STEPPING_A0 = 0,
41 STEPPING_A1 = 1,
42 STEPPING_A2 = 2,
43 STEPPING_A3 = 3,
44 STEPPING_B0 = 4,
45 STEPPING_B1 = 5,
46 STEPPING_B2 = 6,
47 STEPPING_B3 = 7,
48 STEPPING_CONVERSION_A1 = 9,
49} stepping_t;
50
51typedef enum {
52 GMCH_GM45 = 0,
53 GMCH_GM47,
54 GMCH_GM49,
55 GMCH_GE45,
56 GMCH_GL40,
57 GMCH_GL43,
58 GMCH_GS40,
59 GMCH_GS45,
60 GMCH_PM45,
61 GMCH_UNKNOWN
62} gmch_gfx_t;
63
64typedef enum {
65 MEM_CLOCK_533MHz = 0,
66 MEM_CLOCK_400MHz = 1,
67 MEM_CLOCK_333MHz = 2,
68 MEM_CLOCK_1067MT = 0,
69 MEM_CLOCK_800MT = 1,
70 MEM_CLOCK_667MT = 2,
71} mem_clock_t;
72
73typedef enum {
74 DDR1 = 1,
75 DDR2 = 2,
76 DDR3 = 3,
77} ddr_t;
78
79typedef enum {
80 CHANNEL_MODE_SINGLE,
81 CHANNEL_MODE_DUAL_ASYNC,
82 CHANNEL_MODE_DUAL_INTERLEAVED,
83} channel_mode_t;
84
85typedef enum { /* as in DDR3 spd */
86 CHIP_WIDTH_x4 = 0,
87 CHIP_WIDTH_x8 = 1,
88 CHIP_WIDTH_x16 = 2,
89 CHIP_WIDTH_x32 = 3,
90} chip_width_t;
91
92typedef enum { /* as in DDR3 spd */
93 CHIP_CAP_256M = 0,
94 CHIP_CAP_512M = 1,
95 CHIP_CAP_1G = 2,
96 CHIP_CAP_2G = 3,
97 CHIP_CAP_4G = 4,
98 CHIP_CAP_8G = 5,
99 CHIP_CAP_16G = 6,
100} chip_capacity_t;
101
102typedef struct {
103 unsigned int CAS;
104 fsb_clock_t fsb_clock;
105 mem_clock_t mem_clock;
106 channel_mode_t channel_mode;
107 unsigned int tRAS;
108 unsigned int tRP;
109 unsigned int tRCD;
110 unsigned int tRFC;
111 unsigned int tWR;
112 unsigned int tRD;
113 unsigned int tRRD;
114 unsigned int tFAW;
115 unsigned int tWL;
116} timings_t;
117
118typedef struct {
119 unsigned int card_type; /* 0x0: unpopulated,
120 0xa - 0xf: raw card type A - F */
121 chip_width_t chip_width;
122 chip_capacity_t chip_capacity;
123 unsigned int page_size; /* of whole DIMM in Bytes (4096 or 8192) */
124 unsigned int banks;
125 unsigned int ranks;
126 unsigned int rank_capacity_mb; /* per rank in Mega Bytes */
127} dimminfo_t;
128
129/* The setup is one DIMM per channel, so there's no need to find a
130 common timing setup between multiple chips (but chip and controller
131 still need to be coordinated */
132typedef struct {
133 stepping_t stepping;
134 int txt_enabled;
135 int cores;
136 gmch_gfx_t gfx_type;
137 int gs45_low_power_mode; /* low power mode of GMCH_GS45 */
138 int max_ddr2_mhz;
139 int max_ddr3_mt;
140 fsb_clock_t max_fsb;
141 int max_fsb_mhz;
142 int max_render_mhz;
143
144 int spd_type;
145 timings_t selected_timings;
146 dimminfo_t dimms[2];
147} sysinfo_t;
148
149#define TOTAL_CHANNELS 2
150#define CHANNEL_IS_POPULATED(dimms, idx) (dimms[idx].card_type != 0)
151#define CHANNEL_IS_CARDF(dimms, idx) (dimms[idx].card_type == 0xf)
152#define IF_CHANNEL_POPULATED(dimms, idx) if (dimms[idx].card_type != 0)
153#define FOR_EACH_CHANNEL(idx) \
154 for (idx = 0; idx < TOTAL_CHANNELS; ++idx)
155#define FOR_EACH_POPULATED_CHANNEL(dimms, idx) \
156 FOR_EACH_CHANNEL(idx) IF_CHANNEL_POPULATED(dimms, idx)
157
158#define RANKS_PER_CHANNEL 4 /* Only two may be populated */
159#define IF_RANK_POPULATED(dimms, ch, r) \
160 if (dimms[ch].card_type && ((r) < dimms[ch].ranks))
161#define FOR_EACH_RANK_IN_CHANNEL(r) \
162 for (r = 0; r < RANKS_PER_CHANNEL; ++r)
163#define FOR_EACH_POPULATED_RANK_IN_CHANNEL(dimms, ch, r) \
164 FOR_EACH_RANK_IN_CHANNEL(r) IF_RANK_POPULATED(dimms, ch, r)
165#define FOR_EACH_RANK(ch, r) \
166 FOR_EACH_CHANNEL(ch) FOR_EACH_RANK_IN_CHANNEL(r)
167#define FOR_EACH_POPULATED_RANK(dimms, ch, r) \
168 FOR_EACH_RANK(ch, r) IF_RANK_POPULATED(dimms, ch, r)
169
170#define DDR3_MAX_CAS 18
171
172
173enum {
174 VCO_2666 = 4,
175 VCO_3200 = 0,
176 VCO_4000 = 1,
177 VCO_5333 = 2,
178};
179
180#endif
181
182/* Offsets of read/write training results in CMOS.
183 They will be restored upon S3 resumes. */
184#define CMOS_READ_TRAINING 0x80 /* 16 bytes */
185#define CMOS_WRITE_TRAINING 0x90 /* 16 bytes
186 (could be reduced to 10 bytes) */
187
188
189#define DEFAULT_HECIBAR 0xfed17000
190
191 /* 4 KB per PCIe device */
192#define DEFAULT_PCIEXBAR CONFIG_MMCONF_BASE_ADDRESS
193
194#define IOMMU_BASE1 0xfed90000
195#define IOMMU_BASE2 0xfed91000
196#define IOMMU_BASE3 0xfed92000
197#define IOMMU_BASE4 0xfed93000
198
199/*
200 * D0:F0
201 */
202#define D0F0_EPBAR_LO 0x40
203#define D0F0_EPBAR_HI 0x44
204#define D0F0_MCHBAR_LO 0x48
205#define D0F0_MCHBAR_HI 0x4c
206#define D0F0_GGC 0x52
207#define D0F0_DEVEN 0x54
208#define DEVEN_PEG60 (1 << 13)
209#define DEVEN_IGD (1 << 4)
210#define DEVEN_PEG10 (1 << 3)
211#define DEVEN_PEG11 (1 << 2)
212#define DEVEN_PEG12 (1 << 1)
213#define DEVEN_HOST (1 << 0)
214#define D0F0_PCIEXBAR_LO 0x60
215#define D0F0_PCIEXBAR_HI 0x64
216#define D0F0_DMIBAR_LO 0x68
217#define D0F0_DMIBAR_HI 0x6c
218#define D0F0_PMBASE 0x78
219#define QPD0F1_PAM(x) (0x40+(x)) /* 0-6*/
220#define D0F0_REMAPBASE 0x98
221#define D0F0_REMAPLIMIT 0x9a
Vladimir Serbinenkoc6f6be02013-11-12 22:32:08 +0100222#define D0F0_TOM 0xa0
223#define D0F0_TOUUD 0xa2
224#define D0F0_IGD_BASE 0xa4
225#define D0F0_GTT_BASE 0xa8
226#define D0F0_TOLUD 0xb0
227#define D0F0_SKPD 0xdc /* Scratchpad Data */
228
229#define SKPAD_ACPI_S3_MAGIC 0xcafed00d
230#define SKPAD_NORMAL_BOOT_MAGIC 0xcafebabe
231
232
233#define D0F0_CAPID0 0xe0
234
235#define TSEG 0xac /* TSEG base */
236
237/*
238 * D1:F0 PEG
239 */
240#define PEG_CAP 0xa2
241#define SLOTCAP 0xb4
242#define PEGLC 0xec
243#define D1F0_VCCAP 0x104
244#define D1F0_VC0RCTL 0x114
245
246/*
247 * Graphics frequencies
248 */
249#define GCFGC_PCIDEV PCI_DEV(0, 2, 0)
250#define GCFGC_OFFSET 0xf0
251#define GCFGC_CR_SHIFT 0
252#define GCFGC_CR_MASK (0xf << GCFGC_CR_SHIFT)
253#define GCFGC_CS_SHIFT 8
254#define GCFGC_CS_MASK (0xf << GCFGC_CS_SHIFT)
255#define GCFGC_CD_SHIFT 12
256#define GCFGC_CD_MASK (0x1 << GCFGC_CD_SHIFT)
257#define GCFGC_UPDATE_SHIFT 5
258#define GCFGC_UPDATE (0x1 << GCFGC_UPDATE_SHIFT)
259
260/*
261 * MCHBAR
262 */
263
264#define MCHBAR8(x) *((volatile u8 *)(DEFAULT_MCHBAR + x))
265#define MCHBAR16(x) *((volatile u16 *)(DEFAULT_MCHBAR + x))
266#define MCHBAR32(x) *((volatile u32 *)(DEFAULT_MCHBAR + x))
267
268#define PMSTS_MCHBAR 0x0f14 /* Self refresh channel status */
269#define PMSTS_WARM_RESET (1 << 1)
270#define PMSTS_BOTH_SELFREFRESH (1 << 0)
271
272#define CLKCFG_MCHBAR 0x0c00
273#define CLKCFG_FSBCLK_SHIFT 0
274#define CLKCFG_FSBCLK_MASK (7 << CLKCFG_FSBCLK_SHIFT)
275#define CLKCFG_MEMCLK_SHIFT 4
276#define CLKCFG_MEMCLK_MASK (7 << CLKCFG_MEMCLK_SHIFT)
277#define CLKCFG_UPDATE (1 << 12)
278
279#define SSKPD_MCHBAR 0x0c1c
280#define SSKPD_CLK_SHIFT 0
281#define SSKPD_CLK_MASK (7 << SSKPD_CLK_SHIFT)
282
283#define DCC_MCHBAR 0x200
284#define DCC_NO_CHANXOR (1 << 10)
285#define DCC_INTERLEAVED (1 << 1)
286#define DCC_CMD_SHIFT 16
287#define DCC_CMD_MASK (7 << DCC_CMD_SHIFT)
288#define DCC_CMD_NOP (1 << DCC_CMD_SHIFT)
289 /* For mode register mr0: */
290#define DCC_SET_MREG (3 << DCC_CMD_SHIFT)
291 /* For extended mode registers mr1 to mr3: */
292#define DCC_SET_EREG (4 << DCC_CMD_SHIFT)
293#define DCC_SET_EREG_SHIFT 21
294#define DCC_SET_EREG_MASK (DCC_CMD_MASK | (3 << DCC_SET_EREG_SHIFT))
295#define DCC_SET_EREGx(x) ((DCC_SET_EREG | \
296 ((x - 1) << DCC_SET_EREG_SHIFT)) & \
297 DCC_SET_EREG_MASK)
298
299/* Per channel DRAM Row Attribute registers (32-bit) */
300#define CxDRA_MCHBAR(x) (0x1208 + (x * 0x0100))
301#define CxDRA_PAGESIZE_SHIFT(r) (r * 4) /* Per rank r */
302#define CxDRA_PAGESIZE_MASKr(r) (0x7 << CxDRA_PAGESIZE_SHIFT(r))
303#define CxDRA_PAGESIZE_MASK 0x0000ffff
304#define CxDRA_PAGESIZE(r, p) /* for log2(dimm page size in bytes) p */ \
305 (((p - 10) << CxDRA_PAGESIZE_SHIFT(r)) & CxDRA_PAGESIZE_MASKr(r))
306#define CxDRA_BANKS_SHIFT(r) ((r * 3) + 16)
307#define CxDRA_BANKS_MASKr(r) (0x3 << CxDRA_BANKS_SHIFT(r))
308#define CxDRA_BANKS_MASK 0x07ff0000
309#define CxDRA_BANKS(r, b) /* for number of banks b */ \
310 ((b << (CxDRA_BANKS_SHIFT(r) - 3)) & CxDRA_BANKS_MASKr(r))
311
312/*
313 * Per channel DRAM Row Boundary registers (32-bit)
314 * Every two ranks share one register and must be programmed at the same time.
315 * All registers (4 ranks per channel) have to be set.
316 */
317#define CxDRBy_MCHBAR(x, r) (0x1200 + (x * 0x0100) + ((r/2) * 4))
318#define CxDRBy_BOUND_SHIFT(r) ((r % 2) * 16)
319#define CxDRBy_BOUND_MASK(r) (0x1fc << CxDRBy_BOUND_SHIFT(r))
320#define CxDRBy_BOUND_MB(r, b) /* for boundary in MB b */ \
321 (((b >> 5) << CxDRBy_BOUND_SHIFT(r)) & CxDRBy_BOUND_MASK(r))
322
323#define CxDRC0_MCHBAR(x) (0x1230 + (x * 0x0100))
324#define CxDRC0_RANKEN0 (1 << 24) /* Rank Enable */
325#define CxDRC0_RANKEN1 (1 << 25)
326#define CxDRC0_RANKEN2 (1 << 26)
327#define CxDRC0_RANKEN3 (1 << 27)
328#define CxDRC0_RANKEN(r) (1 << (24 + r))
329#define CxDRC0_RANKEN_MASK (0xf << 24)
330#define CxDRC0_RMS_SHIFT 8 /* Refresh Mode Select */
331#define CxDRC0_RMS_MASK (7 << CxDRC0_RMS_SHIFT)
332#define CxDRC0_RMS_78US (2 << CxDRC0_RMS_SHIFT)
333#define CxDRC0_RMS_39US (3 << CxDRC0_RMS_SHIFT)
334
335#define CxDRC1_MCHBAR(x) (0x1234 + (x * 0x0100))
336#define CxDRC1_SSDS_SHIFT 24
337#define CxDRC1_SSDS_MASK (0xff << CxDRC1_SSDS_SHIFT)
338#define CxDRC1_DS (0x91 << CxDRC1_SSDS_SHIFT)
339#define CxDRC1_SS (0xb1 << CxDRC1_SSDS_SHIFT)
340#define CxDRC1_NOTPOP(r) (1 << (16 + r)) /* Write 1 for Not Populated */
341#define CxDRC1_NOTPOP_MASK (0xf << 16)
342#define CxDRC1_MUSTWR (3 << 11)
343
344#define CxDRC2_MCHBAR(x) (0x1238 + (x * 0x0100))
345#define CxDRC2_NOTPOP(r) (1 << (24 + r)) /* Write 1 for Not Populated */
346#define CxDRC2_NOTPOP_MASK (0xf << 24)
347#define CxDRC2_MUSTWR (1 << 12)
348#define CxDRC2_CLK1067MT (1 << 0)
349
350/* DRAM Timing registers (32-bit each) */
351#define CxDRT0_MCHBAR(x) (0x1210 + (x * 0x0100))
352#define CxDRT0_BtB_WtP_SHIFT 26
353#define CxDRT0_BtB_WtP_MASK (0x1f << CxDRT0_BtB_WtP_SHIFT)
354#define CxDRT0_BtB_WtR_SHIFT 20
355#define CxDRT0_BtB_WtR_MASK (0x1f << CxDRT0_BtB_WtR_SHIFT)
356#define CxDRT1_MCHBAR(x) (0x1214 + (x * 0x0100))
357#define CxDRT2_MCHBAR(x) (0x1218 + (x * 0x0100))
358#define CxDRT3_MCHBAR(x) (0x121c + (x * 0x0100))
359#define CxDRT4_MCHBAR(x) (0x1220 + (x * 0x0100))
360#define CxDRT5_MCHBAR(x) (0x1224 + (x * 0x0100))
361#define CxDRT6_MCHBAR(x) (0x1228 + (x * 0x0100))
362
363/* Clock disable registers (32-bit each) */
364#define CxDCLKDIS_MCHBAR(x) (0x120c + (x * 0x0100))
365#define CxDCLKDIS_MASK 3
366#define CxDCLKDIS_ENABLE 3 /* Always enable both clock pairs. */
367
368/* On-Die-Termination registers (2x 32-bit per channel) */
369#define CxODT_HIGH(x) (0x124c + (x * 0x0100))
370#define CxODT_LOW(x) (0x1248 + (x * 0x0100))
371
372/* Write Training registers. */
373#define CxWRTy_MCHBAR(ch, s) (0x1470 + (ch * 0x0100) + ((3 - s) * 4))
374
375#define CxGTEW(x) (0x1270+(x*0x100))
376#define CxGTC(x) (0x1274+(x*0x100))
377#define CxDTPEW(x) (0x1278+(x*0x100))
378#define CxDTAEW(x) (0x1280+(x*0x100))
379#define CxDTC(x) (0x1288+(x*0x100))
380
381
382/*
383 * DMIBAR
384 */
385
386#define DMIBAR8(x) *((volatile u8 *)(DEFAULT_DMIBAR + x))
387#define DMIBAR16(x) *((volatile u16 *)(DEFAULT_DMIBAR + x))
388#define DMIBAR32(x) *((volatile u32 *)(DEFAULT_DMIBAR + x))
389
390#define DMIVC0RCTL 0x14
391#define DMIESD 0x44
392
393
394/*
395 * EPBAR
396 */
397
398#define EPBAR8(x) *((volatile u8 *)(DEFAULT_EPBAR + x))
399#define EPBAR16(x) *((volatile u16 *)(DEFAULT_EPBAR + x))
400#define EPBAR32(x) *((volatile u32 *)(DEFAULT_EPBAR + x))
401
402
403#ifndef __ASSEMBLER__
404void gm45_early_init(void);
405void gm45_early_reset(void);
406
407void enter_raminit_or_reset(void);
408void get_gmch_info(sysinfo_t *);
409void raminit_thermal(const sysinfo_t *);
410void init_igd(const sysinfo_t *, int no_igd, int no_peg);
411void init_pm(const sysinfo_t *);
412
413int raminit_read_vco_index(void);
414u32 raminit_get_rank_addr(unsigned int channel, unsigned int rank);
415
416void raminit_rcomp_calibration(stepping_t stepping);
417void raminit_reset_readwrite_pointers(void);
418void raminit_receive_enable_calibration(const timings_t *, const dimminfo_t *);
419void raminit_write_training(const mem_clock_t, const dimminfo_t *, int s3resume);
420void raminit_read_training(const dimminfo_t *, int s3resume);
421
422void gm45_late_init(stepping_t);
423
424u32 decode_igd_memory_size(u32 gms);
425u32 decode_igd_gtt_size(u32 gsm);
426
427void init_iommu(void);
428#endif
429
430/* Chipset types */
431#define NEHALEM_MOBILE 0
432#define NEHALEM_DESKTOP 1
433#define NEHALEM_SERVER 2
434
435/* Device ID for SandyBridge and IvyBridge */
436#define BASE_REV_SNB 0x00
437#define BASE_REV_IVB 0x50
438#define BASE_REV_MASK 0x50
439
440/* SandyBridge CPU stepping */
441#define SNB_STEP_D0 (BASE_REV_SNB + 5) /* Also J0 */
442#define SNB_STEP_D1 (BASE_REV_SNB + 6)
443#define SNB_STEP_D2 (BASE_REV_SNB + 7) /* Also J1/Q0 */
444
445/* IvyBridge CPU stepping */
446#define IVB_STEP_A0 (BASE_REV_IVB + 0)
447#define IVB_STEP_B0 (BASE_REV_IVB + 2)
448#define IVB_STEP_C0 (BASE_REV_IVB + 4)
449#define IVB_STEP_K0 (BASE_REV_IVB + 5)
450#define IVB_STEP_D0 (BASE_REV_IVB + 6)
451
452/* Intel Enhanced Debug region must be 4MB */
453#define IED_SIZE 0x400000
454
455/* Northbridge BARs */
456#define DEFAULT_PCIEXBAR CONFIG_MMCONF_BASE_ADDRESS /* 4 KB per PCIe device */
457#define DEFAULT_MCHBAR 0xfed10000 /* 16 KB */
458#define DEFAULT_DMIBAR 0xfed18000 /* 4 KB */
459#define DEFAULT_EPBAR 0xfed19000 /* 4 KB */
460#define DEFAULT_RCBABASE 0xfed1c000
461
462#define QUICKPATH_BUS 0xff
463
464#include <southbridge/intel/ibexpeak/pch.h>
465
466/* Everything below this line is ignored in the DSDT */
467#ifndef __ACPI__
468
469/* Device 0:0.0 PCI configuration space (Host Bridge) */
470
471#define EPBAR 0x40
472#define MCHBAR 0x48
473#define PCIEXBAR 0x60
474#define DMIBAR 0x68
475#define X60BAR 0x60
476
477#define LAC 0x87 /* Legacy Access Control */
Vladimir Serbinenko786c0f52014-01-02 10:16:46 +0100478#define QPD0F1_SMRAM 0x4d /* System Management RAM Control */
Vladimir Serbinenkoc6f6be02013-11-12 22:32:08 +0100479#define D_OPEN (1 << 6)
480#define D_CLS (1 << 5)
481#define D_LCK (1 << 4)
482#define G_SMRAME (1 << 3)
483#define C_BASE_SEG ((0 << 2) | (1 << 1) | (0 << 0))
484
485#define SKPAD 0xdc /* Scratchpad Data */
486
487/* Device 0:1.0 PCI configuration space (PCI Express) */
488
489#define BCTRL1 0x3e /* 16bit */
490
491
492/* Device 0:2.0 PCI configuration space (Graphics Device) */
493
494#define MSAC 0x62 /* Multi Size Aperture Control */
495#define SWSCI 0xe8 /* SWSCI enable */
496#define ASLS 0xfc /* OpRegion Base */
497
498/*
499 * MCHBAR
500 */
501
502#define MCHBAR8(x) *((volatile u8 *)(DEFAULT_MCHBAR + x))
503#define MCHBAR16(x) *((volatile u16 *)(DEFAULT_MCHBAR + x))
504#define MCHBAR32(x) *((volatile u32 *)(DEFAULT_MCHBAR + x))
505#define MCHBAR32_OR(x, or) MCHBAR32(x) = (MCHBAR32(x) | (or))
506
507#define SSKPD 0x5d14 /* 16bit (scratchpad) */
508#define BIOS_RESET_CPL 0x5da8 /* 8bit */
509
510/*
511 * EPBAR - Egress Port Root Complex Register Block
512 */
513
514#define EPBAR8(x) *((volatile u8 *)(DEFAULT_EPBAR + x))
515#define EPBAR16(x) *((volatile u16 *)(DEFAULT_EPBAR + x))
516#define EPBAR32(x) *((volatile u32 *)(DEFAULT_EPBAR + x))
517
518#define EPPVCCAP1 0x004 /* 32bit */
519#define EPPVCCAP2 0x008 /* 32bit */
520
521#define EPVC0RCAP 0x010 /* 32bit */
522#define EPVC0RCTL 0x014 /* 32bit */
523#define EPVC0RSTS 0x01a /* 16bit */
524
525#define EPVC1RCAP 0x01c /* 32bit */
526#define EPVC1RCTL 0x020 /* 32bit */
527#define EPVC1RSTS 0x026 /* 16bit */
528
529#define EPVC1MTS 0x028 /* 32bit */
530#define EPVC1IST 0x038 /* 64bit */
531
532#define EPESD 0x044 /* 32bit */
533
534#define EPLE1D 0x050 /* 32bit */
535#define EPLE1A 0x058 /* 64bit */
536#define EPLE2D 0x060 /* 32bit */
537#define EPLE2A 0x068 /* 64bit */
538
539#define PORTARB 0x100 /* 256bit */
540
541/*
542 * DMIBAR
543 */
544
545#define DMIBAR8(x) *((volatile u8 *)(DEFAULT_DMIBAR + x))
546#define DMIBAR16(x) *((volatile u16 *)(DEFAULT_DMIBAR + x))
547#define DMIBAR32(x) *((volatile u32 *)(DEFAULT_DMIBAR + x))
548
549#define DMIVCECH 0x000 /* 32bit */
550#define DMIPVCCAP1 0x004 /* 32bit */
551#define DMIPVCCAP2 0x008 /* 32bit */
552
553#define DMIPVCCCTL 0x00c /* 16bit */
554
555#define DMIVC0RCAP 0x010 /* 32bit */
556#define DMIVC0RCTL0 0x014 /* 32bit */
557#define DMIVC0RSTS 0x01a /* 16bit */
558
559#define DMIVC1RCAP 0x01c /* 32bit */
560#define DMIVC1RCTL 0x020 /* 32bit */
561#define DMIVC1RSTS 0x026 /* 16bit */
562
563#define DMILE1D 0x050 /* 32bit */
564#define DMILE1A 0x058 /* 64bit */
565#define DMILE2D 0x060 /* 32bit */
566#define DMILE2A 0x068 /* 64bit */
567
568#define DMILCAP 0x084 /* 32bit */
569#define DMILCTL 0x088 /* 16bit */
570#define DMILSTS 0x08a /* 16bit */
571
572#define DMICTL1 0x0f0 /* 32bit */
573#define DMICTL2 0x0fc /* 32bit */
574
575#define DMICC 0x208 /* 32bit */
576
577#define DMIDRCCFG 0xeb4 /* 32bit */
578
579#ifndef __ASSEMBLER__
580static inline void barrier(void) { asm("" ::: "memory"); }
581
582struct ied_header {
583 char signature[10];
584 u32 size;
585 u8 reserved[34];
586} __attribute__ ((packed));
587
588#define PCI_DEVICE_ID_SB 0x0104
589#define PCI_DEVICE_ID_IB 0x0154
590
591#ifdef __SMM__
592void intel_sandybridge_finalize_smm(void);
593#else /* !__SMM__ */
594int bridge_silicon_revision(void);
595void nehalem_early_initialization(int chipset_type);
596void nehalem_late_initialization(void);
597
598/* debugging functions */
599void print_pci_devices(void);
600void dump_pci_device(unsigned dev);
601void dump_pci_devices(void);
602void dump_spd_registers(void);
603void dump_mem(unsigned start, unsigned end);
604void report_platform_info(void);
605#endif /* !__SMM__ */
606
607
608#define MRC_DATA_ALIGN 0x1000
609#define MRC_DATA_SIGNATURE (('M'<<0)|('R'<<8)|('C'<<16)|('D'<<24))
610
611struct mrc_data_container {
612 u32 mrc_signature; // "MRCD"
613 u32 mrc_data_size; // Actual total size of this structure
614 u32 mrc_checksum; // IP style checksum
615 u32 reserved; // For header alignment
616 u8 mrc_data[0]; // Variable size, platform/run time dependent.
617} __attribute__ ((packed));
618
619struct mrc_data_container *find_current_mrc_cache(void);
620#if !defined(__PRE_RAM__)
621#include "gma.h"
622int init_igd_opregion(igd_opregion_t *igd_opregion);
623#endif
624
625#endif
626#endif
627#endif