blob: e1c26d162c3a819676ed5a6d48bb9eea5caae183 [file] [log] [blame]
Angel Pons4b429832020-04-02 23:48:50 +02001/* SPDX-License-Identifier: GPL-2.0-only */
Aaron Durbin76c37002012-10-30 09:03:43 -05002
Tristan Corrickbc896cd2018-12-17 22:09:50 +13003#include <commonlib/helpers.h>
Aaron Durbin76c37002012-10-30 09:03:43 -05004#include <console/console.h>
Furquan Shaikh76cedd22020-05-02 10:24:23 -07005#include <acpi/acpi.h>
Aaron Durbin76c37002012-10-30 09:03:43 -05006#include <stdint.h>
7#include <delay.h>
8#include <cpu/intel/haswell/haswell.h>
Aaron Durbin76c37002012-10-30 09:03:43 -05009#include <device/device.h>
10#include <device/pci.h>
Tristan Corrickbc896cd2018-12-17 22:09:50 +130011#include <device/pci_def.h>
Aaron Durbin76c37002012-10-30 09:03:43 -050012#include <device/pci_ids.h>
Tristan Corrickbc896cd2018-12-17 22:09:50 +130013#include <device/pci_ops.h>
Aaron Durbin76c37002012-10-30 09:03:43 -050014#include <boot/tables.h>
Elyes HAOUASa1e22b82019-03-18 22:49:36 +010015
Aaron Durbin76c37002012-10-30 09:03:43 -050016#include "chip.h"
17#include "haswell.h"
18
Angel Pons1db5bc72020-01-15 00:49:03 +010019static int get_pcie_bar(struct device *dev, unsigned int index, u32 *base, u32 *len)
Aaron Durbin76c37002012-10-30 09:03:43 -050020{
Angel Pons1db5bc72020-01-15 00:49:03 +010021 u32 pciexbar_reg, mask;
Aaron Durbin76c37002012-10-30 09:03:43 -050022
23 *base = 0;
24 *len = 0;
25
Aaron Durbinc12ef972012-12-18 14:22:49 -060026 pciexbar_reg = pci_read_config32(dev, index);
Aaron Durbin76c37002012-10-30 09:03:43 -050027
28 if (!(pciexbar_reg & (1 << 0)))
29 return 0;
30
31 switch ((pciexbar_reg >> 1) & 3) {
Angel Pons1db5bc72020-01-15 00:49:03 +010032 case 0: /* 256MB */
Ryan Salsamendifa0725d2017-06-30 17:29:37 -070033 mask = (1UL << 31) | (1 << 30) | (1 << 29) | (1 << 28);
34 *base = pciexbar_reg & mask;
Aaron Durbin76c37002012-10-30 09:03:43 -050035 *len = 256 * 1024 * 1024;
36 return 1;
Angel Pons1db5bc72020-01-15 00:49:03 +010037 case 1: /* 128M */
Ryan Salsamendifa0725d2017-06-30 17:29:37 -070038 mask = (1UL << 31) | (1 << 30) | (1 << 29) | (1 << 28);
39 mask |= (1 << 27);
40 *base = pciexbar_reg & mask;
Aaron Durbin76c37002012-10-30 09:03:43 -050041 *len = 128 * 1024 * 1024;
42 return 1;
Angel Pons1db5bc72020-01-15 00:49:03 +010043 case 2: /* 64M */
Ryan Salsamendifa0725d2017-06-30 17:29:37 -070044 mask = (1UL << 31) | (1 << 30) | (1 << 29) | (1 << 28);
45 mask |= (1 << 27) | (1 << 26);
46 *base = pciexbar_reg & mask;
Aaron Durbin76c37002012-10-30 09:03:43 -050047 *len = 64 * 1024 * 1024;
48 return 1;
49 }
50
51 return 0;
52}
53
Angel Ponsf4fa1e12020-08-03 14:12:13 +020054int decode_pcie_bar(u32 *const base, u32 *const len)
55{
56 return get_pcie_bar(pcidev_on_root(0, 0), PCIEXBAR, base, len);
57}
58
Tristan Corrickf3127d42018-10-31 02:25:54 +130059static const char *northbridge_acpi_name(const struct device *dev)
60{
61 if (dev->path.type == DEVICE_PATH_DOMAIN)
62 return "PCI0";
63
64 if (dev->path.type != DEVICE_PATH_PCI || dev->bus->secondary != 0)
65 return NULL;
66
67 switch (dev->path.pci.devfn) {
68 case PCI_DEVFN(0, 0):
69 return "MCHC";
70 }
71
72 return NULL;
73}
74
Angel Pons1db5bc72020-01-15 00:49:03 +010075/*
76 * TODO: We could determine how many PCIe busses we need in the bar.
77 * For now, that number is hardcoded to a max of 64.
78 */
Aaron Durbin76c37002012-10-30 09:03:43 -050079static struct device_operations pci_domain_ops = {
Angel Pons1db5bc72020-01-15 00:49:03 +010080 .read_resources = pci_domain_read_resources,
81 .set_resources = pci_domain_set_resources,
Angel Pons1db5bc72020-01-15 00:49:03 +010082 .scan_bus = pci_domain_scan_bus,
83 .acpi_name = northbridge_acpi_name,
Matt DeVillier85d98d92018-03-04 01:41:23 -060084 .write_acpi_tables = northbridge_write_acpi_tables,
Aaron Durbin76c37002012-10-30 09:03:43 -050085};
86
Elyes HAOUAS77f7a6e2018-05-09 17:47:59 +020087static int get_bar(struct device *dev, unsigned int index, u32 *base, u32 *len)
Aaron Durbin76c37002012-10-30 09:03:43 -050088{
Angel Pons1db5bc72020-01-15 00:49:03 +010089 u32 bar = pci_read_config32(dev, index);
Aaron Durbin76c37002012-10-30 09:03:43 -050090
Angel Pons1db5bc72020-01-15 00:49:03 +010091 /* If not enabled don't report it */
Aaron Durbinc12ef972012-12-18 14:22:49 -060092 if (!(bar & 0x1))
93 return 0;
Aaron Durbin76c37002012-10-30 09:03:43 -050094
Angel Pons1db5bc72020-01-15 00:49:03 +010095 /* Knock down the enable bit */
Aaron Durbinc12ef972012-12-18 14:22:49 -060096 *base = bar & ~1;
97
98 return 1;
Aaron Durbin76c37002012-10-30 09:03:43 -050099}
100
Angel Pons1db5bc72020-01-15 00:49:03 +0100101/*
102 * There are special BARs that actually are programmed in the MCHBAR. These Intel special
103 * features, but they do consume resources that need to be accounted for.
104 */
105static int get_bar_in_mchbar(struct device *dev, unsigned int index, u32 *base, u32 *len)
Aaron Durbin76c37002012-10-30 09:03:43 -0500106{
Angel Pons1db5bc72020-01-15 00:49:03 +0100107 u32 bar = MCHBAR32(index);
Aaron Durbin76c37002012-10-30 09:03:43 -0500108
Angel Pons1db5bc72020-01-15 00:49:03 +0100109 /* If not enabled don't report it */
Aaron Durbinc12ef972012-12-18 14:22:49 -0600110 if (!(bar & 0x1))
111 return 0;
112
Angel Pons1db5bc72020-01-15 00:49:03 +0100113 /* Knock down the enable bit */
Aaron Durbinc12ef972012-12-18 14:22:49 -0600114 *base = bar & ~1;
115
116 return 1;
117}
118
119struct fixed_mmio_descriptor {
120 unsigned int index;
121 u32 size;
Angel Pons1db5bc72020-01-15 00:49:03 +0100122 int (*get_resource)(struct device *dev, unsigned int index, u32 *base, u32 *size);
Aaron Durbinc12ef972012-12-18 14:22:49 -0600123 const char *description;
124};
125
Angel Pons1db5bc72020-01-15 00:49:03 +0100126#define SIZE_KB(x) ((x) * 1024)
Aaron Durbinc12ef972012-12-18 14:22:49 -0600127struct fixed_mmio_descriptor mc_fixed_resources[] = {
128 { PCIEXBAR, SIZE_KB(0), get_pcie_bar, "PCIEXBAR" },
129 { MCHBAR, SIZE_KB(32), get_bar, "MCHBAR" },
130 { DMIBAR, SIZE_KB(4), get_bar, "DMIBAR" },
131 { EPBAR, SIZE_KB(4), get_bar, "EPBAR" },
Angel Pons1db5bc72020-01-15 00:49:03 +0100132 { GDXCBAR, SIZE_KB(4), get_bar_in_mchbar, "GDXCBAR" },
133 { EDRAMBAR, SIZE_KB(16), get_bar_in_mchbar, "EDRAMBAR" },
Aaron Durbinc12ef972012-12-18 14:22:49 -0600134};
135#undef SIZE_KB
136
Angel Pons1db5bc72020-01-15 00:49:03 +0100137/* Add all known fixed MMIO ranges that hang off the host bridge/memory controller device. */
Elyes HAOUAS77f7a6e2018-05-09 17:47:59 +0200138static void mc_add_fixed_mmio_resources(struct device *dev)
Aaron Durbinc12ef972012-12-18 14:22:49 -0600139{
140 int i;
141
142 for (i = 0; i < ARRAY_SIZE(mc_fixed_resources); i++) {
143 u32 base;
144 u32 size;
145 struct resource *resource;
146 unsigned int index;
147
148 size = mc_fixed_resources[i].size;
149 index = mc_fixed_resources[i].index;
Angel Pons1db5bc72020-01-15 00:49:03 +0100150 if (!mc_fixed_resources[i].get_resource(dev, index, &base, &size))
Aaron Durbinc12ef972012-12-18 14:22:49 -0600151 continue;
152
153 resource = new_resource(dev, mc_fixed_resources[i].index);
Angel Pons1db5bc72020-01-15 00:49:03 +0100154 resource->flags = IORESOURCE_MEM | IORESOURCE_FIXED | IORESOURCE_STORED |
155 IORESOURCE_RESERVE | IORESOURCE_ASSIGNED;
156
Aaron Durbinc12ef972012-12-18 14:22:49 -0600157 resource->base = base;
158 resource->size = size;
159 printk(BIOS_DEBUG, "%s: Adding %s @ %x 0x%08lx-0x%08lx.\n",
160 __func__, mc_fixed_resources[i].description, index,
161 (unsigned long)base, (unsigned long)(base + size - 1));
162 }
163}
164
165/* Host Memory Map:
166 *
167 * +--------------------------+ TOUUD
168 * | |
169 * +--------------------------+ 4GiB
170 * | PCI Address Space |
171 * +--------------------------+ TOLUD (also maps into MC address space)
172 * | iGD |
173 * +--------------------------+ BDSM
174 * | GTT |
175 * +--------------------------+ BGSM
176 * | TSEG |
177 * +--------------------------+ TSEGMB
178 * | Usage DRAM |
179 * +--------------------------+ 0
180 *
Angel Pons1db5bc72020-01-15 00:49:03 +0100181 * Some of the base registers above can be equal, making the size of the regions within 0.
182 * This is because the memory controller internally subtracts the base registers from each
183 * other to determine sizes of the regions. In other words, the memory map regions are always
184 * in a fixed order, no matter what sizes they have.
Aaron Durbinc12ef972012-12-18 14:22:49 -0600185 */
186
187struct map_entry {
188 int reg;
189 int is_64_bit;
190 int is_limit;
191 const char *description;
192};
193
Angel Pons1db5bc72020-01-15 00:49:03 +0100194static void read_map_entry(struct device *dev, struct map_entry *entry, uint64_t *result)
Aaron Durbinc12ef972012-12-18 14:22:49 -0600195{
196 uint64_t value;
197 uint64_t mask;
198
Angel Pons1db5bc72020-01-15 00:49:03 +0100199 /* All registers have a 1MiB granularity */
200 mask = ((1ULL << 20) - 1);
Aaron Durbinc12ef972012-12-18 14:22:49 -0600201 mask = ~mask;
202
203 value = 0;
204
205 if (entry->is_64_bit) {
206 value = pci_read_config32(dev, entry->reg + 4);
207 value <<= 32;
Aaron Durbin76c37002012-10-30 09:03:43 -0500208 }
209
Aaron Durbinc12ef972012-12-18 14:22:49 -0600210 value |= pci_read_config32(dev, entry->reg);
211 value &= mask;
212
213 if (entry->is_limit)
214 value |= ~mask;
215
216 *result = value;
217}
218
219#define MAP_ENTRY(reg_, is_64_, is_limit_, desc_) \
220 { \
221 .reg = reg_, \
222 .is_64_bit = is_64_, \
223 .is_limit = is_limit_, \
224 .description = desc_, \
225 }
226
Angel Pons1db5bc72020-01-15 00:49:03 +0100227#define MAP_ENTRY_BASE_32(reg_, desc_) MAP_ENTRY(reg_, 0, 0, desc_)
228#define MAP_ENTRY_BASE_64(reg_, desc_) MAP_ENTRY(reg_, 1, 0, desc_)
229#define MAP_ENTRY_LIMIT_64(reg_, desc_) MAP_ENTRY(reg_, 1, 1, desc_)
Aaron Durbinc12ef972012-12-18 14:22:49 -0600230
231enum {
232 TOM_REG,
233 TOUUD_REG,
234 MESEG_BASE_REG,
235 MESEG_LIMIT_REG,
236 REMAP_BASE_REG,
237 REMAP_LIMIT_REG,
238 TOLUD_REG,
239 BGSM_REG,
240 BDSM_REG,
241 TSEG_REG,
Angel Pons1db5bc72020-01-15 00:49:03 +0100242 /* Must be last */
243 NUM_MAP_ENTRIES,
Aaron Durbinc12ef972012-12-18 14:22:49 -0600244};
245
246static struct map_entry memory_map[NUM_MAP_ENTRIES] = {
Angel Pons1db5bc72020-01-15 00:49:03 +0100247 [TOM_REG] = MAP_ENTRY_BASE_64(TOM, "TOM"),
248 [TOUUD_REG] = MAP_ENTRY_BASE_64(TOUUD, "TOUUD"),
249 [MESEG_BASE_REG] = MAP_ENTRY_BASE_64(MESEG_BASE, "MESEG_BASE"),
Aaron Durbinc12ef972012-12-18 14:22:49 -0600250 [MESEG_LIMIT_REG] = MAP_ENTRY_LIMIT_64(MESEG_LIMIT, "MESEG_LIMIT"),
Angel Pons1db5bc72020-01-15 00:49:03 +0100251 [REMAP_BASE_REG] = MAP_ENTRY_BASE_64(REMAPBASE, "REMAP_BASE"),
Aaron Durbinc12ef972012-12-18 14:22:49 -0600252 [REMAP_LIMIT_REG] = MAP_ENTRY_LIMIT_64(REMAPLIMIT, "REMAP_LIMIT"),
Angel Pons1db5bc72020-01-15 00:49:03 +0100253 [TOLUD_REG] = MAP_ENTRY_BASE_32(TOLUD, "TOLUD"),
254 [BDSM_REG] = MAP_ENTRY_BASE_32(BDSM, "BDSM"),
255 [BGSM_REG] = MAP_ENTRY_BASE_32(BGSM, "BGSM"),
Angel Ponsd8abb262020-05-07 00:48:35 +0200256 [TSEG_REG] = MAP_ENTRY_BASE_32(TSEG, "TSEGMB"),
Aaron Durbinc12ef972012-12-18 14:22:49 -0600257};
258
Elyes HAOUAS77f7a6e2018-05-09 17:47:59 +0200259static void mc_read_map_entries(struct device *dev, uint64_t *values)
Aaron Durbinc12ef972012-12-18 14:22:49 -0600260{
261 int i;
262 for (i = 0; i < NUM_MAP_ENTRIES; i++) {
263 read_map_entry(dev, &memory_map[i], &values[i]);
264 }
265}
266
Elyes HAOUAS77f7a6e2018-05-09 17:47:59 +0200267static void mc_report_map_entries(struct device *dev, uint64_t *values)
Aaron Durbinc12ef972012-12-18 14:22:49 -0600268{
269 int i;
270 for (i = 0; i < NUM_MAP_ENTRIES; i++) {
271 printk(BIOS_DEBUG, "MC MAP: %s: 0x%llx\n",
272 memory_map[i].description, values[i]);
273 }
Angel Pons1db5bc72020-01-15 00:49:03 +0100274 /* One can validate the BDSM and BGSM against the GGC */
Aaron Durbinc12ef972012-12-18 14:22:49 -0600275 printk(BIOS_DEBUG, "MC MAP: GGC: 0x%x\n", pci_read_config16(dev, GGC));
276}
277
Elyes HAOUAS77f7a6e2018-05-09 17:47:59 +0200278static void mc_add_dram_resources(struct device *dev, int *resource_cnt)
Aaron Durbinc12ef972012-12-18 14:22:49 -0600279{
Angel Pons1db5bc72020-01-15 00:49:03 +0100280 unsigned long base_k, size_k, touud_k, index;
Aaron Durbinc12ef972012-12-18 14:22:49 -0600281 struct resource *resource;
282 uint64_t mc_values[NUM_MAP_ENTRIES];
283
Angel Pons1db5bc72020-01-15 00:49:03 +0100284 /* Read in the MAP registers and report their values */
Aaron Durbinc12ef972012-12-18 14:22:49 -0600285 mc_read_map_entries(dev, &mc_values[0]);
286 mc_report_map_entries(dev, &mc_values[0]);
287
288 /*
Aaron Durbin1fef1f52012-12-19 17:15:43 -0600289 * These are the host memory ranges that should be added:
Angel Pons1db5bc72020-01-15 00:49:03 +0100290 * - 0 -> 0xa0000: cacheable
291 * - 0xc0000 -> TSEG: cacheable
292 * - TSEG -> BGSM: cacheable with standard MTRRs and reserved
293 * - BGSM -> TOLUD: not cacheable with standard MTRRs and reserved
294 * - 4GiB -> TOUUD: cacheable
Aaron Durbinc12ef972012-12-18 14:22:49 -0600295 *
Angel Pons1db5bc72020-01-15 00:49:03 +0100296 * The default SMRAM space is reserved so that the range doesn't have to be saved
297 * during S3 Resume. Once marked reserved the OS cannot use the memory. This is a
298 * bit of an odd place to reserve the region, but the CPU devices don't have
299 * dev_ops->read_resources() called on them.
Aaron Durbin1fef1f52012-12-19 17:15:43 -0600300 *
Angel Pons1db5bc72020-01-15 00:49:03 +0100301 * The range 0xa0000 -> 0xc0000 does not have any resources associated with it to
302 * handle legacy VGA memory. If this range is not omitted the mtrr code will setup
303 * the area as cacheable, causing VGA access to not work.
Aaron Durbinc12ef972012-12-18 14:22:49 -0600304 *
Angel Pons1db5bc72020-01-15 00:49:03 +0100305 * The TSEG region is mapped as cacheable so that one can perform SMRAM relocation
306 * faster. Once the SMRR is enabled, the SMRR takes precedence over the existing
307 * MTRRs covering this region.
Aaron Durbine6c3b1d2012-12-21 21:22:07 -0600308 *
Angel Pons1db5bc72020-01-15 00:49:03 +0100309 * It should be noted that cacheable entry types need to be added in order. The reason
310 * is that the current MTRR code assumes this and falls over itself if it isn't.
Aaron Durbin1fef1f52012-12-19 17:15:43 -0600311 *
Angel Pons1db5bc72020-01-15 00:49:03 +0100312 * The resource index starts low and should not meet or exceed PCI_BASE_ADDRESS_0.
Aaron Durbinc12ef972012-12-18 14:22:49 -0600313 */
Matt DeVilliera51e3792018-03-04 01:44:15 -0600314 index = *resource_cnt;
Aaron Durbinc12ef972012-12-18 14:22:49 -0600315
Aaron Durbin6a360042014-02-13 10:30:42 -0600316 /* 0 - > 0xa0000 */
Aaron Durbinc12ef972012-12-18 14:22:49 -0600317 base_k = 0;
Aaron Durbin1fef1f52012-12-19 17:15:43 -0600318 size_k = (0xa0000 >> 10) - base_k;
319 ram_resource(dev, index++, base_k, size_k);
320
321 /* 0xc0000 -> TSEG */
Aaron Durbinc12ef972012-12-18 14:22:49 -0600322 base_k = 0xc0000 >> 10;
323 size_k = (unsigned long)(mc_values[TSEG_REG] >> 10) - base_k;
324 ram_resource(dev, index++, base_k, size_k);
325
Aaron Durbine6c3b1d2012-12-21 21:22:07 -0600326 /* TSEG -> BGSM */
Aaron Durbinc12ef972012-12-18 14:22:49 -0600327 resource = new_resource(dev, index++);
328 resource->base = mc_values[TSEG_REG];
Aaron Durbine6c3b1d2012-12-21 21:22:07 -0600329 resource->size = mc_values[BGSM_REG] - resource->base;
Angel Pons1db5bc72020-01-15 00:49:03 +0100330 resource->flags = IORESOURCE_MEM | IORESOURCE_FIXED | IORESOURCE_STORED |
331 IORESOURCE_RESERVE | IORESOURCE_ASSIGNED | IORESOURCE_CACHEABLE;
Aaron Durbine6c3b1d2012-12-21 21:22:07 -0600332
Angel Pons1db5bc72020-01-15 00:49:03 +0100333 /* BGSM -> TOLUD. If the IGD is disabled, BGSM can equal TOLUD. */
Tristan Corrickc5d367b2018-12-17 22:10:07 +1300334 if (mc_values[BGSM_REG] != mc_values[TOLUD_REG]) {
335 resource = new_resource(dev, index++);
336 resource->base = mc_values[BGSM_REG];
337 resource->size = mc_values[TOLUD_REG] - resource->base;
Angel Pons1db5bc72020-01-15 00:49:03 +0100338 resource->flags = IORESOURCE_MEM | IORESOURCE_FIXED | IORESOURCE_STORED |
339 IORESOURCE_RESERVE | IORESOURCE_ASSIGNED;
Tristan Corrickc5d367b2018-12-17 22:10:07 +1300340 }
Aaron Durbinc12ef972012-12-18 14:22:49 -0600341
342 /* 4GiB -> TOUUD */
343 base_k = 4096 * 1024; /* 4GiB */
Aaron Durbin27435d32013-06-03 09:46:56 -0500344 touud_k = mc_values[TOUUD_REG] >> 10;
345 size_k = touud_k - base_k;
346 if (touud_k > base_k)
Aaron Durbin5c66f082013-01-08 10:10:33 -0600347 ram_resource(dev, index++, base_k, size_k);
Aaron Durbinc12ef972012-12-18 14:22:49 -0600348
Aaron Durbinc9650762013-03-22 22:03:09 -0500349 /* Reserve everything between A segment and 1MB:
350 *
Angel Pons1db5bc72020-01-15 00:49:03 +0100351 * 0xa0000 - 0xbffff: Legacy VGA
Aaron Durbinc9650762013-03-22 22:03:09 -0500352 * 0xc0000 - 0xfffff: RAM
353 */
354 mmio_resource(dev, index++, (0xa0000 >> 10), (0xc0000 - 0xa0000) >> 10);
Angel Pons1db5bc72020-01-15 00:49:03 +0100355 reserved_ram_resource(dev, index++, (0xc0000 >> 10), (0x100000 - 0xc0000) >> 10);
356
Julius Wernercd49cce2019-03-05 16:53:33 -0800357#if CONFIG(CHROMEOS_RAMOOPS)
Aaron Durbinc9650762013-03-22 22:03:09 -0500358 reserved_ram_resource(dev, index++,
359 CONFIG_CHROMEOS_RAMOOPS_RAM_START >> 10,
Angel Pons1db5bc72020-01-15 00:49:03 +0100360 CONFIG_CHROMEOS_RAMOOPS_RAM_SIZE >> 10);
Aaron Durbinc12ef972012-12-18 14:22:49 -0600361#endif
Matt DeVilliera51e3792018-03-04 01:44:15 -0600362 *resource_cnt = index;
Aaron Durbinc12ef972012-12-18 14:22:49 -0600363}
364
Elyes HAOUAS77f7a6e2018-05-09 17:47:59 +0200365static void mc_read_resources(struct device *dev)
Aaron Durbinc12ef972012-12-18 14:22:49 -0600366{
Matt DeVilliera51e3792018-03-04 01:44:15 -0600367 int index = 0;
Angel Pons1db5bc72020-01-15 00:49:03 +0100368 const bool vtd_capable = !(pci_read_config32(dev, CAPID0_A) & VTD_DISABLE);
Matt DeVilliera51e3792018-03-04 01:44:15 -0600369
Angel Pons1db5bc72020-01-15 00:49:03 +0100370 /* Read standard PCI resources */
Aaron Durbinc12ef972012-12-18 14:22:49 -0600371 pci_dev_read_resources(dev);
372
Angel Pons1db5bc72020-01-15 00:49:03 +0100373 /* Add all fixed MMIO resources */
Aaron Durbinc12ef972012-12-18 14:22:49 -0600374 mc_add_fixed_mmio_resources(dev);
375
Angel Pons1db5bc72020-01-15 00:49:03 +0100376 /* Add VT-d MMIO resources, if capable */
Matt DeVilliera51e3792018-03-04 01:44:15 -0600377 if (vtd_capable) {
Angel Pons1db5bc72020-01-15 00:49:03 +0100378 mmio_resource(dev, index++, GFXVT_BASE_ADDRESS / KiB, GFXVT_BASE_SIZE / KiB);
379 mmio_resource(dev, index++, VTVC0_BASE_ADDRESS / KiB, VTVC0_BASE_SIZE / KiB);
Matt DeVilliera51e3792018-03-04 01:44:15 -0600380 }
381
Angel Pons1db5bc72020-01-15 00:49:03 +0100382 /* Calculate and add DRAM resources */
Matt DeVilliera51e3792018-03-04 01:44:15 -0600383 mc_add_dram_resources(dev, &index);
Aaron Durbin76c37002012-10-30 09:03:43 -0500384}
385
Tristan Corrickbc896cd2018-12-17 22:09:50 +1300386/*
Angel Pons1db5bc72020-01-15 00:49:03 +0100387 * The Mini-HD audio device is disabled whenever the IGD is. This is because it provides
388 * audio over the integrated graphics port(s), which requires the IGD to be functional.
Tristan Corrickbc896cd2018-12-17 22:09:50 +1300389 */
390static void disable_devices(void)
391{
392 static const struct {
393 const unsigned int devfn;
394 const u32 mask;
395 const char *const name;
396 } nb_devs[] = {
397 { PCI_DEVFN(1, 2), DEVEN_D1F2EN, "PEG12" },
398 { PCI_DEVFN(1, 1), DEVEN_D1F1EN, "PEG11" },
399 { PCI_DEVFN(1, 0), DEVEN_D1F0EN, "PEG10" },
400 { PCI_DEVFN(2, 0), DEVEN_D2EN | DEVEN_D3EN, "IGD" },
401 { PCI_DEVFN(3, 0), DEVEN_D3EN, "Mini-HD audio" },
402 { PCI_DEVFN(4, 0), DEVEN_D4EN, "\"device 4\"" },
403 { PCI_DEVFN(7, 0), DEVEN_D7EN, "\"device 7\"" },
404 };
405
Angel Pons1db5bc72020-01-15 00:49:03 +0100406 struct device *host_dev = pcidev_on_root(0, 0);
Tristan Corrickbc896cd2018-12-17 22:09:50 +1300407 u32 deven;
408 size_t i;
409
410 if (!host_dev)
411 return;
412
413 deven = pci_read_config32(host_dev, DEVEN);
414
415 for (i = 0; i < ARRAY_SIZE(nb_devs); i++) {
Kyösti Mälkkie7377552018-06-21 16:20:55 +0300416 struct device *dev = pcidev_path_on_root(nb_devs[i].devfn);
Tristan Corrickbc896cd2018-12-17 22:09:50 +1300417 if (!dev || !dev->enabled) {
418 printk(BIOS_DEBUG, "Disabling %s.\n", nb_devs[i].name);
419 deven &= ~nb_devs[i].mask;
420 }
421 }
422
423 pci_write_config32(host_dev, DEVEN, deven);
424}
425
Angel Pons028b8e42020-07-24 14:03:29 +0200426static void init_egress(void)
427{
428 /* VC0: Enable, ID0, TC0 */
429 EPBAR32(EPVC0RCTL) = (1 << 31) | (0 << 24) | (1 << 0);
430
431 /* No Low Priority Extended VCs, one Extended VC */
432 EPBAR32(EPPVCCAP1) = (0 << 4) | (1 << 0);
433
434 /* VC1: Enable, ID1, TC1 */
435 EPBAR32(EPVC1RCTL) = (1 << 31) | (1 << 24) | (1 << 1);
436
437 /* Poll the VC1 Negotiation Pending bit */
438 while ((EPBAR16(EPVC1RSTS) & (1 << 1)) != 0)
439 ;
440}
441
Angel Pons598ec6a2020-07-23 02:37:12 +0200442static void northbridge_dmi_init(void)
443{
444 const bool is_haswell_h = !CONFIG(INTEL_LYNXPOINT_LP);
445
446 u16 reg16;
447 u32 reg32;
448
449 /* Steps prior to DMI ASPM */
450 if (is_haswell_h) {
451 /* Configure DMI De-Emphasis */
452 reg16 = DMIBAR16(DMILCTL2);
453 reg16 |= (1 << 6); /* 0b: -6.0 dB, 1b: -3.5 dB */
454 DMIBAR16(DMILCTL2) = reg16;
455
456 reg32 = DMIBAR32(DMIL0SLAT);
457 reg32 |= (1 << 31);
458 DMIBAR32(DMIL0SLAT) = reg32;
459
460 reg32 = DMIBAR32(DMILLTC);
461 reg32 |= (1 << 29);
462 DMIBAR32(DMILLTC) = reg32;
463
464 reg32 = DMIBAR32(DMI_AFE_PM_TMR);
465 reg32 &= ~0x1f;
466 reg32 |= 0x13;
467 DMIBAR32(DMI_AFE_PM_TMR) = reg32;
468 }
469
470 /* Clear error status bits */
471 DMIBAR32(DMIUESTS) = 0xffffffff;
472 DMIBAR32(DMICESTS) = 0xffffffff;
473
474 if (is_haswell_h) {
475 /* Enable ASPM L0s and L1 on SA link, should happen before PCH link */
476 reg16 = DMIBAR16(DMILCTL);
477 reg16 |= (1 << 1) | (1 << 0);
478 DMIBAR16(DMILCTL) = reg16;
479 }
480}
481
Aaron Durbin76c37002012-10-30 09:03:43 -0500482static void northbridge_init(struct device *dev)
483{
Duncan Lauriec70353f2013-06-28 14:40:38 -0700484 u8 bios_reset_cpl, pair;
Aaron Durbin76c37002012-10-30 09:03:43 -0500485
Angel Pons028b8e42020-07-24 14:03:29 +0200486 init_egress();
Angel Pons598ec6a2020-07-23 02:37:12 +0200487 northbridge_dmi_init();
488
Angel Pons1db5bc72020-01-15 00:49:03 +0100489 /* Enable Power Aware Interrupt Routing. */
490 pair = MCHBAR8(INTRDIRCTL);
Duncan Lauriec70353f2013-06-28 14:40:38 -0700491 pair &= ~0x7; /* Clear 2:0 */
492 pair |= 0x4; /* Fixed Priority */
Angel Pons1db5bc72020-01-15 00:49:03 +0100493 MCHBAR8(INTRDIRCTL) = pair;
Aaron Durbin76c37002012-10-30 09:03:43 -0500494
Tristan Corrickbc896cd2018-12-17 22:09:50 +1300495 disable_devices();
496
Aaron Durbin76c37002012-10-30 09:03:43 -0500497 /*
Angel Pons1db5bc72020-01-15 00:49:03 +0100498 * Set bits 0 + 1 of BIOS_RESET_CPL to indicate to the CPU
499 * that BIOS has initialized memory and power management.
Aaron Durbin76c37002012-10-30 09:03:43 -0500500 */
501 bios_reset_cpl = MCHBAR8(BIOS_RESET_CPL);
Duncan Lauriec70353f2013-06-28 14:40:38 -0700502 bios_reset_cpl |= 3;
Aaron Durbin76c37002012-10-30 09:03:43 -0500503 MCHBAR8(BIOS_RESET_CPL) = bios_reset_cpl;
504 printk(BIOS_DEBUG, "Set BIOS_RESET_CPL\n");
505
Angel Pons1db5bc72020-01-15 00:49:03 +0100506 /* Configure turbo power limits 1ms after reset complete bit. */
Aaron Durbin76c37002012-10-30 09:03:43 -0500507 mdelay(1);
508 set_power_limits(28);
509
Angel Pons1db5bc72020-01-15 00:49:03 +0100510 /* Set here before graphics PM init. */
511 MCHBAR32(MMIO_PAVP_MSG) = 0x00100001;
Aaron Durbin76c37002012-10-30 09:03:43 -0500512}
513
Aaron Durbin76c37002012-10-30 09:03:43 -0500514static struct device_operations mc_ops = {
Nico Huber68680dd2020-03-31 17:34:52 +0200515 .read_resources = mc_read_resources,
516 .set_resources = pci_dev_set_resources,
517 .enable_resources = pci_dev_enable_resources,
518 .init = northbridge_init,
519 .acpi_fill_ssdt = generate_cpu_entries,
Angel Pons1fc0edd2020-05-31 00:03:28 +0200520 .ops_pci = &pci_dev_ops_pci,
Aaron Durbin76c37002012-10-30 09:03:43 -0500521};
522
Tristan Corrickd3856242018-11-01 03:03:29 +1300523static const unsigned short mc_pci_device_ids[] = {
524 0x0c00, /* Desktop */
525 0x0c04, /* Mobile */
526 0x0a04, /* ULT */
Iru Cai0766c982018-12-17 13:21:36 +0800527 0x0c08, /* Server */
Iru Cai12a13e12020-05-22 22:57:03 +0800528 0x0d00, /* Crystal Well Desktop */
529 0x0d04, /* Crystal Well Mobile */
530 0x0d08, /* Crystal Well Server (by extrapolation) */
Tristan Corrickd3856242018-11-01 03:03:29 +1300531 0
Tristan Corrick48170122018-10-31 02:21:41 +1300532};
533
Tristan Corrickd3856242018-11-01 03:03:29 +1300534static const struct pci_driver mc_driver_hsw __pci_driver = {
535 .ops = &mc_ops,
536 .vendor = PCI_VENDOR_ID_INTEL,
537 .devices = mc_pci_device_ids,
Duncan Lauriedf7be712012-12-17 11:22:57 -0800538};
539
Aaron Durbin76c37002012-10-30 09:03:43 -0500540static struct device_operations cpu_bus_ops = {
Nico Huber2f8ba692020-04-05 14:05:24 +0200541 .read_resources = noop_read_resources,
542 .set_resources = noop_set_resources,
Kyösti Mälkkib3267e02019-08-13 16:44:04 +0300543 .init = mp_cpu_bus_init,
Aaron Durbin76c37002012-10-30 09:03:43 -0500544};
545
Elyes HAOUAS77f7a6e2018-05-09 17:47:59 +0200546static void enable_dev(struct device *dev)
Aaron Durbin76c37002012-10-30 09:03:43 -0500547{
Angel Pons1db5bc72020-01-15 00:49:03 +0100548 /* Set the operations if it is a special bus type. */
Aaron Durbin76c37002012-10-30 09:03:43 -0500549 if (dev->path.type == DEVICE_PATH_DOMAIN) {
550 dev->ops = &pci_domain_ops;
551 } else if (dev->path.type == DEVICE_PATH_CPU_CLUSTER) {
552 dev->ops = &cpu_bus_ops;
553 }
554}
555
556struct chip_operations northbridge_intel_haswell_ops = {
557 CHIP_NAME("Intel i7 (Haswell) integrated Northbridge")
558 .enable_dev = enable_dev,
559};