Angel Pons | f94ac9a | 2020-04-05 15:46:48 +0200 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
Duncan Laurie | c88c54c | 2014-04-30 16:36:13 -0700 | [diff] [blame] | 2 | |
| 3 | #include <console/console.h> |
Angel Pons | 9d733de | 2020-11-23 13:15:19 +0100 | [diff] [blame] | 4 | #include <cpu/intel/haswell/haswell.h> |
Furquan Shaikh | 76cedd2 | 2020-05-02 10:24:23 -0700 | [diff] [blame] | 5 | #include <acpi/acpi.h> |
Kyösti Mälkki | f1b58b7 | 2019-03-01 13:43:02 +0200 | [diff] [blame] | 6 | #include <device/pci_ops.h> |
Duncan Laurie | c88c54c | 2014-04-30 16:36:13 -0700 | [diff] [blame] | 7 | #include <stdint.h> |
| 8 | #include <delay.h> |
| 9 | #include <device/device.h> |
| 10 | #include <device/pci.h> |
| 11 | #include <device/pci_ids.h> |
Angel Pons | e866a2f | 2020-10-23 15:54:33 +0200 | [diff] [blame] | 12 | #include <soc/acpi.h> |
Julius Werner | 4ee4bd5 | 2014-10-20 13:46:39 -0700 | [diff] [blame] | 13 | #include <soc/iomap.h> |
| 14 | #include <soc/pci_devs.h> |
Angel Pons | 12fc21b | 2021-06-16 16:57:21 +0200 | [diff] [blame] | 15 | #include <soc/refcode.h> |
Julius Werner | 4ee4bd5 | 2014-10-20 13:46:39 -0700 | [diff] [blame] | 16 | #include <soc/systemagent.h> |
Duncan Laurie | c88c54c | 2014-04-30 16:36:13 -0700 | [diff] [blame] | 17 | |
Duncan Laurie | 84b9cf4 | 2014-07-31 10:46:57 -0700 | [diff] [blame] | 18 | u8 systemagent_revision(void) |
| 19 | { |
Kyösti Mälkki | 71756c21 | 2019-07-12 13:10:19 +0300 | [diff] [blame] | 20 | struct device *sa_dev = pcidev_path_on_root(SA_DEVFN_ROOT); |
| 21 | return pci_read_config8(sa_dev, PCI_REVISION_ID); |
Duncan Laurie | 84b9cf4 | 2014-07-31 10:46:57 -0700 | [diff] [blame] | 22 | } |
| 23 | |
Elyes HAOUAS | 040aff2 | 2018-05-27 16:30:36 +0200 | [diff] [blame] | 24 | static int get_pcie_bar(struct device *dev, unsigned int index, u32 *base, |
| 25 | u32 *len) |
Duncan Laurie | c88c54c | 2014-04-30 16:36:13 -0700 | [diff] [blame] | 26 | { |
| 27 | u32 pciexbar_reg; |
| 28 | |
| 29 | *base = 0; |
| 30 | *len = 0; |
| 31 | |
| 32 | pciexbar_reg = pci_read_config32(dev, index); |
| 33 | |
| 34 | if (!(pciexbar_reg & (1 << 0))) |
| 35 | return 0; |
| 36 | |
| 37 | switch ((pciexbar_reg >> 1) & 3) { |
| 38 | case 0: // 256MB |
| 39 | *base = pciexbar_reg & ((1 << 31)|(1 << 30)|(1 << 29)| |
| 40 | (1 << 28)); |
| 41 | *len = 256 * 1024 * 1024; |
| 42 | return 1; |
| 43 | case 1: // 128M |
| 44 | *base = pciexbar_reg & ((1 << 31)|(1 << 30)|(1 << 29)| |
| 45 | (1 << 28)|(1 << 27)); |
| 46 | *len = 128 * 1024 * 1024; |
| 47 | return 1; |
| 48 | case 2: // 64M |
| 49 | *base = pciexbar_reg & ((1 << 31)|(1 << 30)|(1 << 29)| |
| 50 | (1 << 28)|(1 << 27)|(1 << 26)); |
| 51 | *len = 64 * 1024 * 1024; |
| 52 | return 1; |
| 53 | } |
| 54 | |
| 55 | return 0; |
| 56 | } |
| 57 | |
Elyes HAOUAS | 040aff2 | 2018-05-27 16:30:36 +0200 | [diff] [blame] | 58 | static int get_bar(struct device *dev, unsigned int index, u32 *base, u32 *len) |
Duncan Laurie | c88c54c | 2014-04-30 16:36:13 -0700 | [diff] [blame] | 59 | { |
| 60 | u32 bar; |
| 61 | |
| 62 | bar = pci_read_config32(dev, index); |
| 63 | |
| 64 | /* If not enabled don't report it. */ |
| 65 | if (!(bar & 0x1)) |
| 66 | return 0; |
| 67 | |
| 68 | /* Knock down the enable bit. */ |
| 69 | *base = bar & ~1; |
| 70 | |
| 71 | return 1; |
| 72 | } |
| 73 | |
| 74 | /* There are special BARs that actually are programmed in the MCHBAR. These |
| 75 | * Intel special features, but they do consume resources that need to be |
| 76 | * accounted for. */ |
Elyes HAOUAS | 040aff2 | 2018-05-27 16:30:36 +0200 | [diff] [blame] | 77 | static int get_bar_in_mchbar(struct device *dev, unsigned int index, u32 *base, |
Lee Leahy | 26b7cd0 | 2017-03-16 18:47:55 -0700 | [diff] [blame] | 78 | u32 *len) |
Duncan Laurie | c88c54c | 2014-04-30 16:36:13 -0700 | [diff] [blame] | 79 | { |
| 80 | u32 bar; |
| 81 | |
Angel Pons | a8753e9 | 2021-04-17 14:34:37 +0200 | [diff] [blame] | 82 | bar = mchbar_read32(index); |
Duncan Laurie | c88c54c | 2014-04-30 16:36:13 -0700 | [diff] [blame] | 83 | |
| 84 | /* If not enabled don't report it. */ |
| 85 | if (!(bar & 0x1)) |
| 86 | return 0; |
| 87 | |
| 88 | /* Knock down the enable bit. */ |
| 89 | *base = bar & ~1; |
| 90 | |
| 91 | return 1; |
| 92 | } |
| 93 | |
| 94 | struct fixed_mmio_descriptor { |
| 95 | unsigned int index; |
| 96 | u32 size; |
Elyes HAOUAS | 040aff2 | 2018-05-27 16:30:36 +0200 | [diff] [blame] | 97 | int (*get_resource)(struct device *dev, unsigned int index, |
Lee Leahy | 26b7cd0 | 2017-03-16 18:47:55 -0700 | [diff] [blame] | 98 | u32 *base, u32 *size); |
Duncan Laurie | c88c54c | 2014-04-30 16:36:13 -0700 | [diff] [blame] | 99 | const char *description; |
| 100 | }; |
| 101 | |
| 102 | struct fixed_mmio_descriptor mc_fixed_resources[] = { |
| 103 | { PCIEXBAR, 0, get_pcie_bar, "PCIEXBAR" }, |
| 104 | { MCHBAR, MCH_BASE_SIZE, get_bar, "MCHBAR" }, |
| 105 | { DMIBAR, DMI_BASE_SIZE, get_bar, "DMIBAR" }, |
| 106 | { EPBAR, EP_BASE_SIZE, get_bar, "EPBAR" }, |
| 107 | { GDXCBAR, GDXC_BASE_SIZE, get_bar_in_mchbar, "GDXCBAR" }, |
| 108 | { EDRAMBAR, EDRAM_BASE_SIZE, get_bar_in_mchbar, "EDRAMBAR" }, |
| 109 | }; |
| 110 | |
| 111 | /* |
| 112 | * Add all known fixed MMIO ranges that hang off the host bridge/memory |
| 113 | * controller device. |
| 114 | */ |
Elyes HAOUAS | 040aff2 | 2018-05-27 16:30:36 +0200 | [diff] [blame] | 115 | static void mc_add_fixed_mmio_resources(struct device *dev) |
Duncan Laurie | c88c54c | 2014-04-30 16:36:13 -0700 | [diff] [blame] | 116 | { |
| 117 | int i; |
| 118 | |
| 119 | for (i = 0; i < ARRAY_SIZE(mc_fixed_resources); i++) { |
| 120 | u32 base; |
| 121 | u32 size; |
| 122 | struct resource *resource; |
| 123 | unsigned int index; |
| 124 | |
| 125 | size = mc_fixed_resources[i].size; |
| 126 | index = mc_fixed_resources[i].index; |
| 127 | if (!mc_fixed_resources[i].get_resource(dev, index, |
Lee Leahy | 26b7cd0 | 2017-03-16 18:47:55 -0700 | [diff] [blame] | 128 | &base, &size)) |
Duncan Laurie | c88c54c | 2014-04-30 16:36:13 -0700 | [diff] [blame] | 129 | continue; |
| 130 | |
| 131 | resource = new_resource(dev, mc_fixed_resources[i].index); |
| 132 | resource->flags = IORESOURCE_MEM | IORESOURCE_FIXED | |
Lee Leahy | 26b7cd0 | 2017-03-16 18:47:55 -0700 | [diff] [blame] | 133 | IORESOURCE_STORED | IORESOURCE_RESERVE | |
| 134 | IORESOURCE_ASSIGNED; |
Duncan Laurie | c88c54c | 2014-04-30 16:36:13 -0700 | [diff] [blame] | 135 | resource->base = base; |
| 136 | resource->size = size; |
| 137 | printk(BIOS_DEBUG, "%s: Adding %s @ %x 0x%08lx-0x%08lx.\n", |
| 138 | __func__, mc_fixed_resources[i].description, index, |
| 139 | (unsigned long)base, (unsigned long)(base + size - 1)); |
| 140 | } |
| 141 | } |
| 142 | |
| 143 | /* Host Memory Map: |
| 144 | * |
| 145 | * +--------------------------+ TOUUD |
| 146 | * | | |
| 147 | * +--------------------------+ 4GiB |
| 148 | * | PCI Address Space | |
| 149 | * +--------------------------+ TOLUD (also maps into MC address space) |
| 150 | * | iGD | |
| 151 | * +--------------------------+ BDSM |
| 152 | * | GTT | |
| 153 | * +--------------------------+ BGSM |
| 154 | * | TSEG | |
| 155 | * +--------------------------+ TSEGMB |
| 156 | * | Usage DRAM | |
| 157 | * +--------------------------+ 0 |
| 158 | * |
| 159 | * Some of the base registers above can be equal making the size of those |
| 160 | * regions 0. The reason is because the memory controller internally subtracts |
| 161 | * the base registers from each other to determine sizes of the regions. In |
| 162 | * other words, the memory map is in a fixed order no matter what. |
| 163 | */ |
| 164 | |
| 165 | struct map_entry { |
| 166 | int reg; |
| 167 | int is_64_bit; |
| 168 | int is_limit; |
| 169 | const char *description; |
| 170 | }; |
| 171 | |
Elyes HAOUAS | 040aff2 | 2018-05-27 16:30:36 +0200 | [diff] [blame] | 172 | static void read_map_entry(struct device *dev, struct map_entry *entry, |
Lee Leahy | 26b7cd0 | 2017-03-16 18:47:55 -0700 | [diff] [blame] | 173 | uint64_t *result) |
Duncan Laurie | c88c54c | 2014-04-30 16:36:13 -0700 | [diff] [blame] | 174 | { |
| 175 | uint64_t value; |
| 176 | uint64_t mask; |
| 177 | |
| 178 | /* All registers are on a 1MiB granularity. */ |
| 179 | mask = ((1ULL<<20)-1); |
| 180 | mask = ~mask; |
| 181 | |
| 182 | value = 0; |
| 183 | |
| 184 | if (entry->is_64_bit) { |
| 185 | value = pci_read_config32(dev, entry->reg + 4); |
| 186 | value <<= 32; |
| 187 | } |
| 188 | |
| 189 | value |= pci_read_config32(dev, entry->reg); |
| 190 | value &= mask; |
| 191 | |
| 192 | if (entry->is_limit) |
| 193 | value |= ~mask; |
| 194 | |
| 195 | *result = value; |
| 196 | } |
| 197 | |
| 198 | #define MAP_ENTRY(reg_, is_64_, is_limit_, desc_) \ |
| 199 | { \ |
| 200 | .reg = reg_, \ |
| 201 | .is_64_bit = is_64_, \ |
| 202 | .is_limit = is_limit_, \ |
| 203 | .description = desc_, \ |
| 204 | } |
| 205 | |
| 206 | #define MAP_ENTRY_BASE_64(reg_, desc_) \ |
| 207 | MAP_ENTRY(reg_, 1, 0, desc_) |
| 208 | #define MAP_ENTRY_LIMIT_64(reg_, desc_) \ |
| 209 | MAP_ENTRY(reg_, 1, 1, desc_) |
| 210 | #define MAP_ENTRY_BASE_32(reg_, desc_) \ |
| 211 | MAP_ENTRY(reg_, 0, 0, desc_) |
| 212 | |
| 213 | enum { |
| 214 | TOM_REG, |
| 215 | TOUUD_REG, |
| 216 | MESEG_BASE_REG, |
| 217 | MESEG_LIMIT_REG, |
| 218 | REMAP_BASE_REG, |
| 219 | REMAP_LIMIT_REG, |
| 220 | TOLUD_REG, |
| 221 | BGSM_REG, |
| 222 | BDSM_REG, |
| 223 | TSEG_REG, |
| 224 | // Must be last. |
| 225 | NUM_MAP_ENTRIES |
| 226 | }; |
| 227 | |
| 228 | static struct map_entry memory_map[NUM_MAP_ENTRIES] = { |
| 229 | [TOM_REG] = MAP_ENTRY_BASE_64(TOM, "TOM"), |
| 230 | [TOUUD_REG] = MAP_ENTRY_BASE_64(TOUUD, "TOUUD"), |
| 231 | [MESEG_BASE_REG] = MAP_ENTRY_BASE_64(MESEG_BASE, "MESEG_BASE"), |
| 232 | [MESEG_LIMIT_REG] = MAP_ENTRY_LIMIT_64(MESEG_LIMIT, "MESEG_LIMIT"), |
| 233 | [REMAP_BASE_REG] = MAP_ENTRY_BASE_64(REMAPBASE, "REMAP_BASE"), |
| 234 | [REMAP_LIMIT_REG] = MAP_ENTRY_LIMIT_64(REMAPLIMIT, "REMAP_LIMIT"), |
| 235 | [TOLUD_REG] = MAP_ENTRY_BASE_32(TOLUD, "TOLUD"), |
| 236 | [BDSM_REG] = MAP_ENTRY_BASE_32(BDSM, "BDSM"), |
| 237 | [BGSM_REG] = MAP_ENTRY_BASE_32(BGSM, "BGSM"), |
Angel Pons | 40b3943 | 2020-06-20 18:06:47 +0200 | [diff] [blame] | 238 | [TSEG_REG] = MAP_ENTRY_BASE_32(TSEG, "TSEGMB"), |
Duncan Laurie | c88c54c | 2014-04-30 16:36:13 -0700 | [diff] [blame] | 239 | }; |
| 240 | |
Elyes HAOUAS | 040aff2 | 2018-05-27 16:30:36 +0200 | [diff] [blame] | 241 | static void mc_read_map_entries(struct device *dev, uint64_t *values) |
Duncan Laurie | c88c54c | 2014-04-30 16:36:13 -0700 | [diff] [blame] | 242 | { |
| 243 | int i; |
Lee Leahy | 8a9c7dc | 2017-03-17 10:43:25 -0700 | [diff] [blame] | 244 | for (i = 0; i < NUM_MAP_ENTRIES; i++) |
Duncan Laurie | c88c54c | 2014-04-30 16:36:13 -0700 | [diff] [blame] | 245 | read_map_entry(dev, &memory_map[i], &values[i]); |
Duncan Laurie | c88c54c | 2014-04-30 16:36:13 -0700 | [diff] [blame] | 246 | } |
| 247 | |
Elyes HAOUAS | 040aff2 | 2018-05-27 16:30:36 +0200 | [diff] [blame] | 248 | static void mc_report_map_entries(struct device *dev, uint64_t *values) |
Duncan Laurie | c88c54c | 2014-04-30 16:36:13 -0700 | [diff] [blame] | 249 | { |
| 250 | int i; |
| 251 | for (i = 0; i < NUM_MAP_ENTRIES; i++) { |
| 252 | printk(BIOS_DEBUG, "MC MAP: %s: 0x%llx\n", |
| 253 | memory_map[i].description, values[i]); |
| 254 | } |
| 255 | /* One can validate the BDSM and BGSM against the GGC. */ |
| 256 | printk(BIOS_DEBUG, "MC MAP: GGC: 0x%x\n", pci_read_config16(dev, GGC)); |
| 257 | } |
| 258 | |
Elyes HAOUAS | 040aff2 | 2018-05-27 16:30:36 +0200 | [diff] [blame] | 259 | static void mc_add_dram_resources(struct device *dev, int *resource_cnt) |
Duncan Laurie | c88c54c | 2014-04-30 16:36:13 -0700 | [diff] [blame] | 260 | { |
| 261 | unsigned long base_k, size_k; |
| 262 | unsigned long touud_k; |
| 263 | unsigned long index; |
| 264 | struct resource *resource; |
| 265 | uint64_t mc_values[NUM_MAP_ENTRIES]; |
Duncan Laurie | 6168027 | 2014-05-05 12:42:35 -0500 | [diff] [blame] | 266 | unsigned long dpr_size = 0; |
| 267 | u32 dpr_reg; |
Duncan Laurie | c88c54c | 2014-04-30 16:36:13 -0700 | [diff] [blame] | 268 | |
| 269 | /* Read in the MAP registers and report their values. */ |
| 270 | mc_read_map_entries(dev, &mc_values[0]); |
| 271 | mc_report_map_entries(dev, &mc_values[0]); |
| 272 | |
| 273 | /* |
Duncan Laurie | 6168027 | 2014-05-05 12:42:35 -0500 | [diff] [blame] | 274 | * DMA Protected Range can be reserved below TSEG for PCODE patch |
Paul Menzel | 7f5a1ee | 2021-12-15 10:47:05 +0100 | [diff] [blame] | 275 | * or TXT/Boot Guard related data. Rather than report a base address |
Duncan Laurie | 6168027 | 2014-05-05 12:42:35 -0500 | [diff] [blame] | 276 | * the DPR register reports the TOP of the region, which is the same |
| 277 | * as TSEG base. The region size is reported in MiB in bits 11:4. |
| 278 | */ |
Angel Pons | 54b5e20 | 2020-10-29 20:36:47 +0100 | [diff] [blame] | 279 | dpr_reg = pci_read_config32(dev, DPR); |
Duncan Laurie | 6168027 | 2014-05-05 12:42:35 -0500 | [diff] [blame] | 280 | if (dpr_reg & DPR_EPM) { |
| 281 | dpr_size = (dpr_reg & DPR_SIZE_MASK) << 16; |
| 282 | printk(BIOS_INFO, "DPR SIZE: 0x%lx\n", dpr_size); |
| 283 | } |
| 284 | |
| 285 | /* |
Duncan Laurie | c88c54c | 2014-04-30 16:36:13 -0700 | [diff] [blame] | 286 | * These are the host memory ranges that should be added: |
| 287 | * - 0 -> 0xa0000: cacheable |
| 288 | * - 0xc0000 -> TSEG : cacheable |
| 289 | * - TESG -> BGSM: cacheable with standard MTRRs and reserved |
| 290 | * - BGSM -> TOLUD: not cacheable with standard MTRRs and reserved |
| 291 | * - 4GiB -> TOUUD: cacheable |
| 292 | * |
| 293 | * The default SMRAM space is reserved so that the range doesn't |
| 294 | * have to be saved during S3 Resume. Once marked reserved the OS |
| 295 | * cannot use the memory. This is a bit of an odd place to reserve |
| 296 | * the region, but the CPU devices don't have dev_ops->read_resources() |
| 297 | * called on them. |
| 298 | * |
| 299 | * The range 0xa0000 -> 0xc0000 does not have any resources |
| 300 | * associated with it to handle legacy VGA memory. If this range |
| 301 | * is not omitted the mtrr code will setup the area as cacheable |
| 302 | * causing VGA access to not work. |
| 303 | * |
| 304 | * The TSEG region is mapped as cacheable so that one can perform |
| 305 | * SMRAM relocation faster. Once the SMRR is enabled the SMRR takes |
| 306 | * precedence over the existing MTRRs covering this region. |
| 307 | * |
| 308 | * It should be noted that cacheable entry types need to be added in |
| 309 | * order. The reason is that the current MTRR code assumes this and |
| 310 | * falls over itself if it isn't. |
| 311 | * |
| 312 | * The resource index starts low and should not meet or exceed |
| 313 | * PCI_BASE_ADDRESS_0. |
| 314 | */ |
Matt DeVillier | 81a6f10 | 2018-02-19 17:33:48 -0600 | [diff] [blame] | 315 | index = *resource_cnt; |
Duncan Laurie | c88c54c | 2014-04-30 16:36:13 -0700 | [diff] [blame] | 316 | |
| 317 | /* 0 - > 0xa0000 */ |
| 318 | base_k = 0; |
| 319 | size_k = (0xa0000 >> 10) - base_k; |
| 320 | ram_resource(dev, index++, base_k, size_k); |
| 321 | |
Duncan Laurie | 6168027 | 2014-05-05 12:42:35 -0500 | [diff] [blame] | 322 | /* 0xc0000 -> TSEG - DPR */ |
Duncan Laurie | c88c54c | 2014-04-30 16:36:13 -0700 | [diff] [blame] | 323 | base_k = 0xc0000 >> 10; |
| 324 | size_k = (unsigned long)(mc_values[TSEG_REG] >> 10) - base_k; |
Duncan Laurie | 6168027 | 2014-05-05 12:42:35 -0500 | [diff] [blame] | 325 | size_k -= dpr_size >> 10; |
Duncan Laurie | c88c54c | 2014-04-30 16:36:13 -0700 | [diff] [blame] | 326 | ram_resource(dev, index++, base_k, size_k); |
| 327 | |
Duncan Laurie | 6168027 | 2014-05-05 12:42:35 -0500 | [diff] [blame] | 328 | /* TSEG - DPR -> BGSM */ |
Duncan Laurie | c88c54c | 2014-04-30 16:36:13 -0700 | [diff] [blame] | 329 | resource = new_resource(dev, index++); |
Duncan Laurie | 6168027 | 2014-05-05 12:42:35 -0500 | [diff] [blame] | 330 | resource->base = mc_values[TSEG_REG] - dpr_size; |
Duncan Laurie | c88c54c | 2014-04-30 16:36:13 -0700 | [diff] [blame] | 331 | resource->size = mc_values[BGSM_REG] - resource->base; |
| 332 | resource->flags = IORESOURCE_MEM | IORESOURCE_FIXED | |
Lee Leahy | 26b7cd0 | 2017-03-16 18:47:55 -0700 | [diff] [blame] | 333 | IORESOURCE_STORED | IORESOURCE_RESERVE | |
| 334 | IORESOURCE_ASSIGNED | IORESOURCE_CACHEABLE; |
Duncan Laurie | c88c54c | 2014-04-30 16:36:13 -0700 | [diff] [blame] | 335 | |
| 336 | /* BGSM -> TOLUD */ |
| 337 | resource = new_resource(dev, index++); |
| 338 | resource->base = mc_values[BGSM_REG]; |
| 339 | resource->size = mc_values[TOLUD_REG] - resource->base; |
| 340 | resource->flags = IORESOURCE_MEM | IORESOURCE_FIXED | |
Lee Leahy | 26b7cd0 | 2017-03-16 18:47:55 -0700 | [diff] [blame] | 341 | IORESOURCE_STORED | IORESOURCE_RESERVE | |
| 342 | IORESOURCE_ASSIGNED; |
Duncan Laurie | c88c54c | 2014-04-30 16:36:13 -0700 | [diff] [blame] | 343 | |
| 344 | /* 4GiB -> TOUUD */ |
| 345 | base_k = 4096 * 1024; /* 4GiB */ |
| 346 | touud_k = mc_values[TOUUD_REG] >> 10; |
| 347 | size_k = touud_k - base_k; |
| 348 | if (touud_k > base_k) |
| 349 | ram_resource(dev, index++, base_k, size_k); |
| 350 | |
| 351 | /* Reserve everything between A segment and 1MB: |
| 352 | * |
| 353 | * 0xa0000 - 0xbffff: legacy VGA |
| 354 | * 0xc0000 - 0xfffff: RAM |
| 355 | */ |
| 356 | mmio_resource(dev, index++, (0xa0000 >> 10), (0xc0000 - 0xa0000) >> 10); |
| 357 | reserved_ram_resource(dev, index++, (0xc0000 >> 10), |
Lee Leahy | 26b7cd0 | 2017-03-16 18:47:55 -0700 | [diff] [blame] | 358 | (0x100000 - 0xc0000) >> 10); |
Duncan Laurie | c88c54c | 2014-04-30 16:36:13 -0700 | [diff] [blame] | 359 | |
Matt DeVillier | 81a6f10 | 2018-02-19 17:33:48 -0600 | [diff] [blame] | 360 | *resource_cnt = index; |
Duncan Laurie | c88c54c | 2014-04-30 16:36:13 -0700 | [diff] [blame] | 361 | } |
| 362 | |
Elyes HAOUAS | 040aff2 | 2018-05-27 16:30:36 +0200 | [diff] [blame] | 363 | static void systemagent_read_resources(struct device *dev) |
Duncan Laurie | c88c54c | 2014-04-30 16:36:13 -0700 | [diff] [blame] | 364 | { |
Matt DeVillier | 81a6f10 | 2018-02-19 17:33:48 -0600 | [diff] [blame] | 365 | int index = 0; |
| 366 | const bool vtd_capable = |
| 367 | !(pci_read_config32(dev, CAPID0_A) & VTD_DISABLE); |
| 368 | |
Duncan Laurie | c88c54c | 2014-04-30 16:36:13 -0700 | [diff] [blame] | 369 | /* Read standard PCI resources. */ |
| 370 | pci_dev_read_resources(dev); |
| 371 | |
| 372 | /* Add all fixed MMIO resources. */ |
| 373 | mc_add_fixed_mmio_resources(dev); |
| 374 | |
Matt DeVillier | 81a6f10 | 2018-02-19 17:33:48 -0600 | [diff] [blame] | 375 | /* Add VT-d MMIO resources if capable */ |
| 376 | if (vtd_capable) { |
| 377 | mmio_resource(dev, index++, GFXVT_BASE_ADDRESS / KiB, |
| 378 | GFXVT_BASE_SIZE / KiB); |
| 379 | mmio_resource(dev, index++, VTVC0_BASE_ADDRESS / KiB, |
| 380 | VTVC0_BASE_SIZE / KiB); |
| 381 | } |
| 382 | |
Duncan Laurie | c88c54c | 2014-04-30 16:36:13 -0700 | [diff] [blame] | 383 | /* Calculate and add DRAM resources. */ |
Matt DeVillier | 81a6f10 | 2018-02-19 17:33:48 -0600 | [diff] [blame] | 384 | mc_add_dram_resources(dev, &index); |
Duncan Laurie | c88c54c | 2014-04-30 16:36:13 -0700 | [diff] [blame] | 385 | } |
| 386 | |
| 387 | static void systemagent_init(struct device *dev) |
| 388 | { |
Angel Pons | a8753e9 | 2021-04-17 14:34:37 +0200 | [diff] [blame] | 389 | /* Enable Power Aware Interrupt Routing. */ |
| 390 | mchbar_clrsetbits8(MCH_PAIR, 0x7, 0x4); /* Clear 2:0, set Fixed Priority */ |
Duncan Laurie | c88c54c | 2014-04-30 16:36:13 -0700 | [diff] [blame] | 391 | |
| 392 | /* |
| 393 | * Set bits 0+1 of BIOS_RESET_CPL to indicate to the CPU |
| 394 | * that BIOS has initialized memory and power management |
| 395 | */ |
Angel Pons | a8753e9 | 2021-04-17 14:34:37 +0200 | [diff] [blame] | 396 | mchbar_setbits8(BIOS_RESET_CPL, 3); |
Duncan Laurie | c88c54c | 2014-04-30 16:36:13 -0700 | [diff] [blame] | 397 | printk(BIOS_DEBUG, "Set BIOS_RESET_CPL\n"); |
| 398 | |
| 399 | /* Configure turbo power limits 1ms after reset complete bit */ |
| 400 | mdelay(1); |
Angel Pons | 9f6cdba | 2020-10-25 00:02:29 +0000 | [diff] [blame] | 401 | set_power_limits(28); |
Duncan Laurie | c88c54c | 2014-04-30 16:36:13 -0700 | [diff] [blame] | 402 | } |
| 403 | |
Duncan Laurie | c88c54c | 2014-04-30 16:36:13 -0700 | [diff] [blame] | 404 | static struct device_operations systemagent_ops = { |
Elyes HAOUAS | 1d19127 | 2018-11-27 12:23:48 +0100 | [diff] [blame] | 405 | .read_resources = systemagent_read_resources, |
Nico Huber | 68680dd | 2020-03-31 17:34:52 +0200 | [diff] [blame] | 406 | .acpi_fill_ssdt = generate_cpu_entries, |
Elyes HAOUAS | 1d19127 | 2018-11-27 12:23:48 +0100 | [diff] [blame] | 407 | .set_resources = pci_dev_set_resources, |
| 408 | .enable_resources = pci_dev_enable_resources, |
| 409 | .init = systemagent_init, |
Angel Pons | cb2080f | 2020-10-23 15:45:44 +0200 | [diff] [blame] | 410 | .ops_pci = &pci_dev_ops_pci, |
Duncan Laurie | c88c54c | 2014-04-30 16:36:13 -0700 | [diff] [blame] | 411 | }; |
| 412 | |
| 413 | static const unsigned short systemagent_ids[] = { |
| 414 | 0x0a04, /* Haswell ULT */ |
| 415 | 0x1604, /* Broadwell-U/Y */ |
| 416 | 0x1610, /* Broadwell-H Desktop */ |
| 417 | 0x1614, /* Broadwell-H Mobile */ |
| 418 | 0 |
| 419 | }; |
| 420 | |
| 421 | static const struct pci_driver systemagent_driver __pci_driver = { |
| 422 | .ops = &systemagent_ops, |
Felix Singer | 43b7f41 | 2022-03-07 04:34:52 +0100 | [diff] [blame] | 423 | .vendor = PCI_VID_INTEL, |
Duncan Laurie | c88c54c | 2014-04-30 16:36:13 -0700 | [diff] [blame] | 424 | .devices = systemagent_ids |
| 425 | }; |
Angel Pons | e866a2f | 2020-10-23 15:54:33 +0200 | [diff] [blame] | 426 | |
| 427 | static struct device_operations pci_domain_ops = { |
| 428 | .read_resources = &pci_domain_read_resources, |
| 429 | .set_resources = &pci_domain_set_resources, |
| 430 | .scan_bus = &pci_domain_scan_bus, |
| 431 | #if CONFIG(HAVE_ACPI_TABLES) |
| 432 | .write_acpi_tables = &northbridge_write_acpi_tables, |
| 433 | #endif |
| 434 | }; |
| 435 | |
| 436 | static struct device_operations cpu_bus_ops = { |
| 437 | .read_resources = noop_read_resources, |
| 438 | .set_resources = noop_set_resources, |
Angel Pons | 9a1b720 | 2020-11-23 13:17:56 +0100 | [diff] [blame] | 439 | .init = mp_cpu_bus_init, |
Angel Pons | e866a2f | 2020-10-23 15:54:33 +0200 | [diff] [blame] | 440 | }; |
| 441 | |
| 442 | static void broadwell_enable(struct device *dev) |
| 443 | { |
| 444 | /* Set the operations if it is a special bus type */ |
| 445 | if (dev->path.type == DEVICE_PATH_DOMAIN) { |
| 446 | dev->ops = &pci_domain_ops; |
| 447 | } else if (dev->path.type == DEVICE_PATH_CPU_CLUSTER) { |
| 448 | dev->ops = &cpu_bus_ops; |
Angel Pons | e866a2f | 2020-10-23 15:54:33 +0200 | [diff] [blame] | 449 | } |
| 450 | } |
| 451 | |
Angel Pons | e780d98 | 2021-01-26 19:26:51 +0100 | [diff] [blame] | 452 | static void broadwell_init_pre_device(void *chip_info) |
| 453 | { |
| 454 | broadwell_run_reference_code(); |
| 455 | } |
| 456 | |
Angel Pons | e866a2f | 2020-10-23 15:54:33 +0200 | [diff] [blame] | 457 | struct chip_operations soc_intel_broadwell_ops = { |
| 458 | CHIP_NAME("Intel Broadwell") |
| 459 | .enable_dev = &broadwell_enable, |
| 460 | .init = &broadwell_init_pre_device, |
| 461 | }; |