blob: d15c7efbb6fabed4f7fffce89ba97f344da1abbd [file] [log] [blame]
Vladimir Serbinenkoc6f6be02013-11-12 22:32:08 +01001/*
2 * This file is part of the coreboot project.
3 *
4 * Copyright (C) 2007-2008 coresystems GmbH
5 * Copyright (C) 2011 Google Inc.
6 * Copyright (C) 2013 Vladimir Serbinenko
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; version 2 of the License.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
Vladimir Serbinenkoc6f6be02013-11-12 22:32:08 +010016 */
17
18#ifndef __NORTHBRIDGE_INTEL_NEHALEM_NEHALEM_H__
Edward O'Callaghan089a5102015-01-06 02:48:57 +110019#define __NORTHBRIDGE_INTEL_NEHALEM_NEHALEM_H__
Vladimir Serbinenkoc6f6be02013-11-12 22:32:08 +010020
21#ifndef __ASSEMBLER__
22
23#include <stdint.h>
24
25extern unsigned char fake_vbt[4096];
26
27typedef enum {
28 FSB_CLOCK_1067MHz = 0,
29 FSB_CLOCK_800MHz = 1,
30 FSB_CLOCK_667MHz = 2,
31} fsb_clock_t;
32
33typedef enum { /* Steppings below B1 were pre-production,
34 conversion stepping A1 is... ?
35 We'll support B1, B2, B3, and conversion stepping A1. */
36 STEPPING_A0 = 0,
37 STEPPING_A1 = 1,
38 STEPPING_A2 = 2,
39 STEPPING_A3 = 3,
40 STEPPING_B0 = 4,
41 STEPPING_B1 = 5,
42 STEPPING_B2 = 6,
43 STEPPING_B3 = 7,
44 STEPPING_CONVERSION_A1 = 9,
45} stepping_t;
46
47typedef enum {
48 GMCH_GM45 = 0,
49 GMCH_GM47,
50 GMCH_GM49,
51 GMCH_GE45,
52 GMCH_GL40,
53 GMCH_GL43,
54 GMCH_GS40,
55 GMCH_GS45,
56 GMCH_PM45,
57 GMCH_UNKNOWN
58} gmch_gfx_t;
59
60typedef enum {
61 MEM_CLOCK_533MHz = 0,
62 MEM_CLOCK_400MHz = 1,
63 MEM_CLOCK_333MHz = 2,
64 MEM_CLOCK_1067MT = 0,
65 MEM_CLOCK_800MT = 1,
66 MEM_CLOCK_667MT = 2,
67} mem_clock_t;
68
69typedef enum {
70 DDR1 = 1,
71 DDR2 = 2,
72 DDR3 = 3,
73} ddr_t;
74
75typedef enum {
76 CHANNEL_MODE_SINGLE,
77 CHANNEL_MODE_DUAL_ASYNC,
78 CHANNEL_MODE_DUAL_INTERLEAVED,
79} channel_mode_t;
80
81typedef enum { /* as in DDR3 spd */
82 CHIP_WIDTH_x4 = 0,
83 CHIP_WIDTH_x8 = 1,
84 CHIP_WIDTH_x16 = 2,
85 CHIP_WIDTH_x32 = 3,
86} chip_width_t;
87
88typedef enum { /* as in DDR3 spd */
89 CHIP_CAP_256M = 0,
90 CHIP_CAP_512M = 1,
91 CHIP_CAP_1G = 2,
92 CHIP_CAP_2G = 3,
93 CHIP_CAP_4G = 4,
94 CHIP_CAP_8G = 5,
95 CHIP_CAP_16G = 6,
96} chip_capacity_t;
97
98typedef struct {
99 unsigned int CAS;
100 fsb_clock_t fsb_clock;
101 mem_clock_t mem_clock;
102 channel_mode_t channel_mode;
103 unsigned int tRAS;
104 unsigned int tRP;
105 unsigned int tRCD;
106 unsigned int tRFC;
107 unsigned int tWR;
108 unsigned int tRD;
109 unsigned int tRRD;
110 unsigned int tFAW;
111 unsigned int tWL;
112} timings_t;
113
114typedef struct {
115 unsigned int card_type; /* 0x0: unpopulated,
116 0xa - 0xf: raw card type A - F */
117 chip_width_t chip_width;
118 chip_capacity_t chip_capacity;
119 unsigned int page_size; /* of whole DIMM in Bytes (4096 or 8192) */
120 unsigned int banks;
121 unsigned int ranks;
122 unsigned int rank_capacity_mb; /* per rank in Mega Bytes */
123} dimminfo_t;
124
125/* The setup is one DIMM per channel, so there's no need to find a
126 common timing setup between multiple chips (but chip and controller
127 still need to be coordinated */
128typedef struct {
129 stepping_t stepping;
130 int txt_enabled;
131 int cores;
132 gmch_gfx_t gfx_type;
133 int gs45_low_power_mode; /* low power mode of GMCH_GS45 */
134 int max_ddr2_mhz;
135 int max_ddr3_mt;
136 fsb_clock_t max_fsb;
137 int max_fsb_mhz;
138 int max_render_mhz;
139
140 int spd_type;
141 timings_t selected_timings;
142 dimminfo_t dimms[2];
143} sysinfo_t;
144
145#define TOTAL_CHANNELS 2
146#define CHANNEL_IS_POPULATED(dimms, idx) (dimms[idx].card_type != 0)
147#define CHANNEL_IS_CARDF(dimms, idx) (dimms[idx].card_type == 0xf)
148#define IF_CHANNEL_POPULATED(dimms, idx) if (dimms[idx].card_type != 0)
149#define FOR_EACH_CHANNEL(idx) \
150 for (idx = 0; idx < TOTAL_CHANNELS; ++idx)
151#define FOR_EACH_POPULATED_CHANNEL(dimms, idx) \
152 FOR_EACH_CHANNEL(idx) IF_CHANNEL_POPULATED(dimms, idx)
153
154#define RANKS_PER_CHANNEL 4 /* Only two may be populated */
155#define IF_RANK_POPULATED(dimms, ch, r) \
156 if (dimms[ch].card_type && ((r) < dimms[ch].ranks))
157#define FOR_EACH_RANK_IN_CHANNEL(r) \
158 for (r = 0; r < RANKS_PER_CHANNEL; ++r)
159#define FOR_EACH_POPULATED_RANK_IN_CHANNEL(dimms, ch, r) \
160 FOR_EACH_RANK_IN_CHANNEL(r) IF_RANK_POPULATED(dimms, ch, r)
161#define FOR_EACH_RANK(ch, r) \
162 FOR_EACH_CHANNEL(ch) FOR_EACH_RANK_IN_CHANNEL(r)
163#define FOR_EACH_POPULATED_RANK(dimms, ch, r) \
164 FOR_EACH_RANK(ch, r) IF_RANK_POPULATED(dimms, ch, r)
165
166#define DDR3_MAX_CAS 18
167
168
169enum {
170 VCO_2666 = 4,
171 VCO_3200 = 0,
172 VCO_4000 = 1,
173 VCO_5333 = 2,
174};
175
176#endif
177
178/* Offsets of read/write training results in CMOS.
179 They will be restored upon S3 resumes. */
180#define CMOS_READ_TRAINING 0x80 /* 16 bytes */
181#define CMOS_WRITE_TRAINING 0x90 /* 16 bytes
182 (could be reduced to 10 bytes) */
183
184
Kevin Paul Herbertbde6d302014-12-24 18:43:20 -0800185#define DEFAULT_HECIBAR ((u8 *)0xfed17000)
Vladimir Serbinenkoc6f6be02013-11-12 22:32:08 +0100186
187 /* 4 KB per PCIe device */
188#define DEFAULT_PCIEXBAR CONFIG_MMCONF_BASE_ADDRESS
189
190#define IOMMU_BASE1 0xfed90000
191#define IOMMU_BASE2 0xfed91000
192#define IOMMU_BASE3 0xfed92000
193#define IOMMU_BASE4 0xfed93000
194
195/*
196 * D0:F0
197 */
198#define D0F0_EPBAR_LO 0x40
199#define D0F0_EPBAR_HI 0x44
200#define D0F0_MCHBAR_LO 0x48
201#define D0F0_MCHBAR_HI 0x4c
202#define D0F0_GGC 0x52
203#define D0F0_DEVEN 0x54
204#define DEVEN_PEG60 (1 << 13)
205#define DEVEN_IGD (1 << 4)
206#define DEVEN_PEG10 (1 << 3)
207#define DEVEN_PEG11 (1 << 2)
208#define DEVEN_PEG12 (1 << 1)
209#define DEVEN_HOST (1 << 0)
210#define D0F0_PCIEXBAR_LO 0x60
211#define D0F0_PCIEXBAR_HI 0x64
212#define D0F0_DMIBAR_LO 0x68
213#define D0F0_DMIBAR_HI 0x6c
214#define D0F0_PMBASE 0x78
215#define QPD0F1_PAM(x) (0x40+(x)) /* 0-6*/
216#define D0F0_REMAPBASE 0x98
217#define D0F0_REMAPLIMIT 0x9a
Vladimir Serbinenkoc6f6be02013-11-12 22:32:08 +0100218#define D0F0_TOM 0xa0
219#define D0F0_TOUUD 0xa2
220#define D0F0_IGD_BASE 0xa4
221#define D0F0_GTT_BASE 0xa8
222#define D0F0_TOLUD 0xb0
223#define D0F0_SKPD 0xdc /* Scratchpad Data */
224
225#define SKPAD_ACPI_S3_MAGIC 0xcafed00d
226#define SKPAD_NORMAL_BOOT_MAGIC 0xcafebabe
227
228
229#define D0F0_CAPID0 0xe0
230
231#define TSEG 0xac /* TSEG base */
232
233/*
234 * D1:F0 PEG
235 */
236#define PEG_CAP 0xa2
237#define SLOTCAP 0xb4
238#define PEGLC 0xec
239#define D1F0_VCCAP 0x104
240#define D1F0_VC0RCTL 0x114
241
242/*
243 * Graphics frequencies
244 */
245#define GCFGC_PCIDEV PCI_DEV(0, 2, 0)
246#define GCFGC_OFFSET 0xf0
247#define GCFGC_CR_SHIFT 0
248#define GCFGC_CR_MASK (0xf << GCFGC_CR_SHIFT)
249#define GCFGC_CS_SHIFT 8
250#define GCFGC_CS_MASK (0xf << GCFGC_CS_SHIFT)
251#define GCFGC_CD_SHIFT 12
252#define GCFGC_CD_MASK (0x1 << GCFGC_CD_SHIFT)
253#define GCFGC_UPDATE_SHIFT 5
254#define GCFGC_UPDATE (0x1 << GCFGC_UPDATE_SHIFT)
255
256/*
257 * MCHBAR
258 */
259
260#define MCHBAR8(x) *((volatile u8 *)(DEFAULT_MCHBAR + x))
261#define MCHBAR16(x) *((volatile u16 *)(DEFAULT_MCHBAR + x))
262#define MCHBAR32(x) *((volatile u32 *)(DEFAULT_MCHBAR + x))
263
264#define PMSTS_MCHBAR 0x0f14 /* Self refresh channel status */
265#define PMSTS_WARM_RESET (1 << 1)
266#define PMSTS_BOTH_SELFREFRESH (1 << 0)
267
268#define CLKCFG_MCHBAR 0x0c00
269#define CLKCFG_FSBCLK_SHIFT 0
270#define CLKCFG_FSBCLK_MASK (7 << CLKCFG_FSBCLK_SHIFT)
271#define CLKCFG_MEMCLK_SHIFT 4
272#define CLKCFG_MEMCLK_MASK (7 << CLKCFG_MEMCLK_SHIFT)
273#define CLKCFG_UPDATE (1 << 12)
274
275#define SSKPD_MCHBAR 0x0c1c
276#define SSKPD_CLK_SHIFT 0
277#define SSKPD_CLK_MASK (7 << SSKPD_CLK_SHIFT)
278
279#define DCC_MCHBAR 0x200
280#define DCC_NO_CHANXOR (1 << 10)
281#define DCC_INTERLEAVED (1 << 1)
282#define DCC_CMD_SHIFT 16
283#define DCC_CMD_MASK (7 << DCC_CMD_SHIFT)
284#define DCC_CMD_NOP (1 << DCC_CMD_SHIFT)
285 /* For mode register mr0: */
286#define DCC_SET_MREG (3 << DCC_CMD_SHIFT)
287 /* For extended mode registers mr1 to mr3: */
288#define DCC_SET_EREG (4 << DCC_CMD_SHIFT)
289#define DCC_SET_EREG_SHIFT 21
290#define DCC_SET_EREG_MASK (DCC_CMD_MASK | (3 << DCC_SET_EREG_SHIFT))
291#define DCC_SET_EREGx(x) ((DCC_SET_EREG | \
292 ((x - 1) << DCC_SET_EREG_SHIFT)) & \
293 DCC_SET_EREG_MASK)
294
295/* Per channel DRAM Row Attribute registers (32-bit) */
296#define CxDRA_MCHBAR(x) (0x1208 + (x * 0x0100))
297#define CxDRA_PAGESIZE_SHIFT(r) (r * 4) /* Per rank r */
298#define CxDRA_PAGESIZE_MASKr(r) (0x7 << CxDRA_PAGESIZE_SHIFT(r))
299#define CxDRA_PAGESIZE_MASK 0x0000ffff
300#define CxDRA_PAGESIZE(r, p) /* for log2(dimm page size in bytes) p */ \
301 (((p - 10) << CxDRA_PAGESIZE_SHIFT(r)) & CxDRA_PAGESIZE_MASKr(r))
302#define CxDRA_BANKS_SHIFT(r) ((r * 3) + 16)
303#define CxDRA_BANKS_MASKr(r) (0x3 << CxDRA_BANKS_SHIFT(r))
304#define CxDRA_BANKS_MASK 0x07ff0000
305#define CxDRA_BANKS(r, b) /* for number of banks b */ \
306 ((b << (CxDRA_BANKS_SHIFT(r) - 3)) & CxDRA_BANKS_MASKr(r))
307
308/*
309 * Per channel DRAM Row Boundary registers (32-bit)
310 * Every two ranks share one register and must be programmed at the same time.
311 * All registers (4 ranks per channel) have to be set.
312 */
313#define CxDRBy_MCHBAR(x, r) (0x1200 + (x * 0x0100) + ((r/2) * 4))
314#define CxDRBy_BOUND_SHIFT(r) ((r % 2) * 16)
315#define CxDRBy_BOUND_MASK(r) (0x1fc << CxDRBy_BOUND_SHIFT(r))
316#define CxDRBy_BOUND_MB(r, b) /* for boundary in MB b */ \
317 (((b >> 5) << CxDRBy_BOUND_SHIFT(r)) & CxDRBy_BOUND_MASK(r))
318
319#define CxDRC0_MCHBAR(x) (0x1230 + (x * 0x0100))
320#define CxDRC0_RANKEN0 (1 << 24) /* Rank Enable */
321#define CxDRC0_RANKEN1 (1 << 25)
322#define CxDRC0_RANKEN2 (1 << 26)
323#define CxDRC0_RANKEN3 (1 << 27)
324#define CxDRC0_RANKEN(r) (1 << (24 + r))
325#define CxDRC0_RANKEN_MASK (0xf << 24)
326#define CxDRC0_RMS_SHIFT 8 /* Refresh Mode Select */
327#define CxDRC0_RMS_MASK (7 << CxDRC0_RMS_SHIFT)
328#define CxDRC0_RMS_78US (2 << CxDRC0_RMS_SHIFT)
329#define CxDRC0_RMS_39US (3 << CxDRC0_RMS_SHIFT)
330
331#define CxDRC1_MCHBAR(x) (0x1234 + (x * 0x0100))
332#define CxDRC1_SSDS_SHIFT 24
333#define CxDRC1_SSDS_MASK (0xff << CxDRC1_SSDS_SHIFT)
334#define CxDRC1_DS (0x91 << CxDRC1_SSDS_SHIFT)
335#define CxDRC1_SS (0xb1 << CxDRC1_SSDS_SHIFT)
336#define CxDRC1_NOTPOP(r) (1 << (16 + r)) /* Write 1 for Not Populated */
337#define CxDRC1_NOTPOP_MASK (0xf << 16)
338#define CxDRC1_MUSTWR (3 << 11)
339
340#define CxDRC2_MCHBAR(x) (0x1238 + (x * 0x0100))
341#define CxDRC2_NOTPOP(r) (1 << (24 + r)) /* Write 1 for Not Populated */
342#define CxDRC2_NOTPOP_MASK (0xf << 24)
343#define CxDRC2_MUSTWR (1 << 12)
344#define CxDRC2_CLK1067MT (1 << 0)
345
346/* DRAM Timing registers (32-bit each) */
347#define CxDRT0_MCHBAR(x) (0x1210 + (x * 0x0100))
348#define CxDRT0_BtB_WtP_SHIFT 26
349#define CxDRT0_BtB_WtP_MASK (0x1f << CxDRT0_BtB_WtP_SHIFT)
350#define CxDRT0_BtB_WtR_SHIFT 20
351#define CxDRT0_BtB_WtR_MASK (0x1f << CxDRT0_BtB_WtR_SHIFT)
352#define CxDRT1_MCHBAR(x) (0x1214 + (x * 0x0100))
353#define CxDRT2_MCHBAR(x) (0x1218 + (x * 0x0100))
354#define CxDRT3_MCHBAR(x) (0x121c + (x * 0x0100))
355#define CxDRT4_MCHBAR(x) (0x1220 + (x * 0x0100))
356#define CxDRT5_MCHBAR(x) (0x1224 + (x * 0x0100))
357#define CxDRT6_MCHBAR(x) (0x1228 + (x * 0x0100))
358
359/* Clock disable registers (32-bit each) */
360#define CxDCLKDIS_MCHBAR(x) (0x120c + (x * 0x0100))
361#define CxDCLKDIS_MASK 3
362#define CxDCLKDIS_ENABLE 3 /* Always enable both clock pairs. */
363
364/* On-Die-Termination registers (2x 32-bit per channel) */
365#define CxODT_HIGH(x) (0x124c + (x * 0x0100))
366#define CxODT_LOW(x) (0x1248 + (x * 0x0100))
367
368/* Write Training registers. */
369#define CxWRTy_MCHBAR(ch, s) (0x1470 + (ch * 0x0100) + ((3 - s) * 4))
370
371#define CxGTEW(x) (0x1270+(x*0x100))
372#define CxGTC(x) (0x1274+(x*0x100))
373#define CxDTPEW(x) (0x1278+(x*0x100))
374#define CxDTAEW(x) (0x1280+(x*0x100))
375#define CxDTC(x) (0x1288+(x*0x100))
376
377
378/*
379 * DMIBAR
380 */
381
382#define DMIBAR8(x) *((volatile u8 *)(DEFAULT_DMIBAR + x))
383#define DMIBAR16(x) *((volatile u16 *)(DEFAULT_DMIBAR + x))
384#define DMIBAR32(x) *((volatile u32 *)(DEFAULT_DMIBAR + x))
385
386#define DMIVC0RCTL 0x14
387#define DMIESD 0x44
388
389
390/*
391 * EPBAR
392 */
393
394#define EPBAR8(x) *((volatile u8 *)(DEFAULT_EPBAR + x))
395#define EPBAR16(x) *((volatile u16 *)(DEFAULT_EPBAR + x))
396#define EPBAR32(x) *((volatile u32 *)(DEFAULT_EPBAR + x))
397
398
399#ifndef __ASSEMBLER__
400void gm45_early_init(void);
401void gm45_early_reset(void);
402
403void enter_raminit_or_reset(void);
404void get_gmch_info(sysinfo_t *);
405void raminit_thermal(const sysinfo_t *);
406void init_igd(const sysinfo_t *, int no_igd, int no_peg);
407void init_pm(const sysinfo_t *);
408
409int raminit_read_vco_index(void);
410u32 raminit_get_rank_addr(unsigned int channel, unsigned int rank);
411
412void raminit_rcomp_calibration(stepping_t stepping);
413void raminit_reset_readwrite_pointers(void);
414void raminit_receive_enable_calibration(const timings_t *, const dimminfo_t *);
415void raminit_write_training(const mem_clock_t, const dimminfo_t *, int s3resume);
416void raminit_read_training(const dimminfo_t *, int s3resume);
417
418void gm45_late_init(stepping_t);
419
420u32 decode_igd_memory_size(u32 gms);
421u32 decode_igd_gtt_size(u32 gsm);
422
423void init_iommu(void);
424#endif
425
426/* Chipset types */
427#define NEHALEM_MOBILE 0
428#define NEHALEM_DESKTOP 1
429#define NEHALEM_SERVER 2
430
431/* Device ID for SandyBridge and IvyBridge */
432#define BASE_REV_SNB 0x00
433#define BASE_REV_IVB 0x50
434#define BASE_REV_MASK 0x50
435
436/* SandyBridge CPU stepping */
437#define SNB_STEP_D0 (BASE_REV_SNB + 5) /* Also J0 */
438#define SNB_STEP_D1 (BASE_REV_SNB + 6)
439#define SNB_STEP_D2 (BASE_REV_SNB + 7) /* Also J1/Q0 */
440
441/* IvyBridge CPU stepping */
442#define IVB_STEP_A0 (BASE_REV_IVB + 0)
443#define IVB_STEP_B0 (BASE_REV_IVB + 2)
444#define IVB_STEP_C0 (BASE_REV_IVB + 4)
445#define IVB_STEP_K0 (BASE_REV_IVB + 5)
446#define IVB_STEP_D0 (BASE_REV_IVB + 6)
447
448/* Intel Enhanced Debug region must be 4MB */
449#define IED_SIZE 0x400000
450
451/* Northbridge BARs */
452#define DEFAULT_PCIEXBAR CONFIG_MMCONF_BASE_ADDRESS /* 4 KB per PCIe device */
Kevin Paul Herbertbde6d302014-12-24 18:43:20 -0800453#ifndef __ACPI__
454#define DEFAULT_MCHBAR ((u8 *)0xfed10000) /* 16 KB */
455#define DEFAULT_DMIBAR ((u8 *)0xfed18000) /* 4 KB */
456#else
Vladimir Serbinenkoc6f6be02013-11-12 22:32:08 +0100457#define DEFAULT_MCHBAR 0xfed10000 /* 16 KB */
458#define DEFAULT_DMIBAR 0xfed18000 /* 4 KB */
Kevin Paul Herbertbde6d302014-12-24 18:43:20 -0800459#endif
Vladimir Serbinenkoc6f6be02013-11-12 22:32:08 +0100460#define DEFAULT_EPBAR 0xfed19000 /* 4 KB */
Kevin Paul Herbertbde6d302014-12-24 18:43:20 -0800461#define DEFAULT_RCBABASE ((u8 *)0xfed1c000)
Vladimir Serbinenkoc6f6be02013-11-12 22:32:08 +0100462
463#define QUICKPATH_BUS 0xff
464
465#include <southbridge/intel/ibexpeak/pch.h>
466
467/* Everything below this line is ignored in the DSDT */
468#ifndef __ACPI__
469
470/* Device 0:0.0 PCI configuration space (Host Bridge) */
471
472#define EPBAR 0x40
473#define MCHBAR 0x48
474#define PCIEXBAR 0x60
475#define DMIBAR 0x68
476#define X60BAR 0x60
477
478#define LAC 0x87 /* Legacy Access Control */
Vladimir Serbinenko786c0f52014-01-02 10:16:46 +0100479#define QPD0F1_SMRAM 0x4d /* System Management RAM Control */
Vladimir Serbinenkoc6f6be02013-11-12 22:32:08 +0100480
481#define SKPAD 0xdc /* Scratchpad Data */
482
483/* Device 0:1.0 PCI configuration space (PCI Express) */
484
485#define BCTRL1 0x3e /* 16bit */
486
487
488/* Device 0:2.0 PCI configuration space (Graphics Device) */
489
490#define MSAC 0x62 /* Multi Size Aperture Control */
491#define SWSCI 0xe8 /* SWSCI enable */
492#define ASLS 0xfc /* OpRegion Base */
493
494/*
495 * MCHBAR
496 */
497
498#define MCHBAR8(x) *((volatile u8 *)(DEFAULT_MCHBAR + x))
499#define MCHBAR16(x) *((volatile u16 *)(DEFAULT_MCHBAR + x))
500#define MCHBAR32(x) *((volatile u32 *)(DEFAULT_MCHBAR + x))
501#define MCHBAR32_OR(x, or) MCHBAR32(x) = (MCHBAR32(x) | (or))
502
Vladimir Serbinenkoc6f6be02013-11-12 22:32:08 +0100503#define BIOS_RESET_CPL 0x5da8 /* 8bit */
504
505/*
506 * EPBAR - Egress Port Root Complex Register Block
507 */
508
509#define EPBAR8(x) *((volatile u8 *)(DEFAULT_EPBAR + x))
510#define EPBAR16(x) *((volatile u16 *)(DEFAULT_EPBAR + x))
511#define EPBAR32(x) *((volatile u32 *)(DEFAULT_EPBAR + x))
512
513#define EPPVCCAP1 0x004 /* 32bit */
514#define EPPVCCAP2 0x008 /* 32bit */
515
516#define EPVC0RCAP 0x010 /* 32bit */
517#define EPVC0RCTL 0x014 /* 32bit */
518#define EPVC0RSTS 0x01a /* 16bit */
519
520#define EPVC1RCAP 0x01c /* 32bit */
521#define EPVC1RCTL 0x020 /* 32bit */
522#define EPVC1RSTS 0x026 /* 16bit */
523
524#define EPVC1MTS 0x028 /* 32bit */
525#define EPVC1IST 0x038 /* 64bit */
526
527#define EPESD 0x044 /* 32bit */
528
529#define EPLE1D 0x050 /* 32bit */
530#define EPLE1A 0x058 /* 64bit */
531#define EPLE2D 0x060 /* 32bit */
532#define EPLE2A 0x068 /* 64bit */
533
534#define PORTARB 0x100 /* 256bit */
535
536/*
537 * DMIBAR
538 */
539
540#define DMIBAR8(x) *((volatile u8 *)(DEFAULT_DMIBAR + x))
541#define DMIBAR16(x) *((volatile u16 *)(DEFAULT_DMIBAR + x))
542#define DMIBAR32(x) *((volatile u32 *)(DEFAULT_DMIBAR + x))
543
544#define DMIVCECH 0x000 /* 32bit */
545#define DMIPVCCAP1 0x004 /* 32bit */
546#define DMIPVCCAP2 0x008 /* 32bit */
547
548#define DMIPVCCCTL 0x00c /* 16bit */
549
550#define DMIVC0RCAP 0x010 /* 32bit */
551#define DMIVC0RCTL0 0x014 /* 32bit */
552#define DMIVC0RSTS 0x01a /* 16bit */
553
554#define DMIVC1RCAP 0x01c /* 32bit */
555#define DMIVC1RCTL 0x020 /* 32bit */
556#define DMIVC1RSTS 0x026 /* 16bit */
557
558#define DMILE1D 0x050 /* 32bit */
559#define DMILE1A 0x058 /* 64bit */
560#define DMILE2D 0x060 /* 32bit */
561#define DMILE2A 0x068 /* 64bit */
562
563#define DMILCAP 0x084 /* 32bit */
564#define DMILCTL 0x088 /* 16bit */
565#define DMILSTS 0x08a /* 16bit */
566
567#define DMICTL1 0x0f0 /* 32bit */
568#define DMICTL2 0x0fc /* 32bit */
569
570#define DMICC 0x208 /* 32bit */
571
572#define DMIDRCCFG 0xeb4 /* 32bit */
573
574#ifndef __ASSEMBLER__
575static inline void barrier(void) { asm("" ::: "memory"); }
576
Vladimir Serbinenkoc6f6be02013-11-12 22:32:08 +0100577#define PCI_DEVICE_ID_SB 0x0104
578#define PCI_DEVICE_ID_IB 0x0154
579
580#ifdef __SMM__
Alexander Couzens60d44dd02015-01-27 11:57:43 +0100581void intel_nehalem_finalize_smm(void);
Vladimir Serbinenkoc6f6be02013-11-12 22:32:08 +0100582#else /* !__SMM__ */
583int bridge_silicon_revision(void);
584void nehalem_early_initialization(int chipset_type);
585void nehalem_late_initialization(void);
586
587/* debugging functions */
588void print_pci_devices(void);
589void dump_pci_device(unsigned dev);
590void dump_pci_devices(void);
591void dump_spd_registers(void);
592void dump_mem(unsigned start, unsigned end);
593void report_platform_info(void);
594#endif /* !__SMM__ */
595
596
597#define MRC_DATA_ALIGN 0x1000
598#define MRC_DATA_SIGNATURE (('M'<<0)|('R'<<8)|('C'<<16)|('D'<<24))
599
600struct mrc_data_container {
601 u32 mrc_signature; // "MRCD"
602 u32 mrc_data_size; // Actual total size of this structure
603 u32 mrc_checksum; // IP style checksum
604 u32 reserved; // For header alignment
605 u8 mrc_data[0]; // Variable size, platform/run time dependent.
606} __attribute__ ((packed));
607
608struct mrc_data_container *find_current_mrc_cache(void);
609#if !defined(__PRE_RAM__)
610#include "gma.h"
611int init_igd_opregion(igd_opregion_t *igd_opregion);
Vladimir Serbinenkoc6f6be02013-11-12 22:32:08 +0100612#endif
613
614#endif
615#endif
Edward O'Callaghan089a5102015-01-06 02:48:57 +1100616#endif /* __NORTHBRIDGE_INTEL_NEHALEM_NEHALEM_H__ */