blob: b6b5608a24074ef2e0e7fc32fd7ebf6809f1f83a [file] [log] [blame]
Duncan Lauriec88c54c2014-04-30 16:36:13 -07001/*
2 * This file is part of the coreboot project.
3 *
4 * Copyright (C) 2007-2009 coresystems GmbH
5 * Copyright (C) 2014 Google Inc.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; version 2 of the License.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
Duncan Lauriec88c54c2014-04-30 16:36:13 -070015 */
16
17#include <console/console.h>
18#include <arch/acpi.h>
Kyösti Mälkkif1b58b72019-03-01 13:43:02 +020019#include <device/pci_ops.h>
Duncan Lauriec88c54c2014-04-30 16:36:13 -070020#include <stdint.h>
21#include <delay.h>
22#include <device/device.h>
23#include <device/pci.h>
24#include <device/pci_ids.h>
25#include <stdlib.h>
Duncan Lauriec88c54c2014-04-30 16:36:13 -070026#include <vendorcode/google/chromeos/chromeos.h>
Julius Werner4ee4bd52014-10-20 13:46:39 -070027#include <soc/cpu.h>
28#include <soc/iomap.h>
29#include <soc/pci_devs.h>
30#include <soc/ramstage.h>
31#include <soc/systemagent.h>
Duncan Lauriec88c54c2014-04-30 16:36:13 -070032
Duncan Laurie84b9cf42014-07-31 10:46:57 -070033u8 systemagent_revision(void)
34{
Kyösti Mälkki71756c212019-07-12 13:10:19 +030035 struct device *sa_dev = pcidev_path_on_root(SA_DEVFN_ROOT);
36 return pci_read_config8(sa_dev, PCI_REVISION_ID);
Duncan Laurie84b9cf42014-07-31 10:46:57 -070037}
38
Matt DeVillier42d16602018-07-04 16:32:21 -050039uintptr_t sa_get_tolud_base(void)
40{
Kyösti Mälkki71756c212019-07-12 13:10:19 +030041 struct device *sa_dev = pcidev_path_on_root(SA_DEVFN_ROOT);
Matt DeVillier42d16602018-07-04 16:32:21 -050042 /* Bit 0 is lock bit, not part of address */
Kyösti Mälkki71756c212019-07-12 13:10:19 +030043 return pci_read_config32(sa_dev, TOLUD) & ~1;
Matt DeVillier42d16602018-07-04 16:32:21 -050044}
45
46uintptr_t sa_get_gsm_base(void)
47{
Kyösti Mälkki71756c212019-07-12 13:10:19 +030048 struct device *sa_dev = pcidev_path_on_root(SA_DEVFN_ROOT);
Matt DeVillier42d16602018-07-04 16:32:21 -050049 /* Bit 0 is lock bit, not part of address */
Kyösti Mälkki71756c212019-07-12 13:10:19 +030050 return pci_read_config32(sa_dev, BGSM) & ~1;
Matt DeVillier42d16602018-07-04 16:32:21 -050051}
52
Elyes HAOUAS040aff22018-05-27 16:30:36 +020053static int get_pcie_bar(struct device *dev, unsigned int index, u32 *base,
54 u32 *len)
Duncan Lauriec88c54c2014-04-30 16:36:13 -070055{
56 u32 pciexbar_reg;
57
58 *base = 0;
59 *len = 0;
60
61 pciexbar_reg = pci_read_config32(dev, index);
62
63 if (!(pciexbar_reg & (1 << 0)))
64 return 0;
65
66 switch ((pciexbar_reg >> 1) & 3) {
67 case 0: // 256MB
68 *base = pciexbar_reg & ((1 << 31)|(1 << 30)|(1 << 29)|
69 (1 << 28));
70 *len = 256 * 1024 * 1024;
71 return 1;
72 case 1: // 128M
73 *base = pciexbar_reg & ((1 << 31)|(1 << 30)|(1 << 29)|
74 (1 << 28)|(1 << 27));
75 *len = 128 * 1024 * 1024;
76 return 1;
77 case 2: // 64M
78 *base = pciexbar_reg & ((1 << 31)|(1 << 30)|(1 << 29)|
79 (1 << 28)|(1 << 27)|(1 << 26));
80 *len = 64 * 1024 * 1024;
81 return 1;
82 }
83
84 return 0;
85}
86
Elyes HAOUAS040aff22018-05-27 16:30:36 +020087static int get_bar(struct device *dev, unsigned int index, u32 *base, u32 *len)
Duncan Lauriec88c54c2014-04-30 16:36:13 -070088{
89 u32 bar;
90
91 bar = pci_read_config32(dev, index);
92
93 /* If not enabled don't report it. */
94 if (!(bar & 0x1))
95 return 0;
96
97 /* Knock down the enable bit. */
98 *base = bar & ~1;
99
100 return 1;
101}
102
103/* There are special BARs that actually are programmed in the MCHBAR. These
104 * Intel special features, but they do consume resources that need to be
105 * accounted for. */
Elyes HAOUAS040aff22018-05-27 16:30:36 +0200106static int get_bar_in_mchbar(struct device *dev, unsigned int index, u32 *base,
Lee Leahy26b7cd02017-03-16 18:47:55 -0700107 u32 *len)
Duncan Lauriec88c54c2014-04-30 16:36:13 -0700108{
109 u32 bar;
110
111 bar = MCHBAR32(index);
112
113 /* If not enabled don't report it. */
114 if (!(bar & 0x1))
115 return 0;
116
117 /* Knock down the enable bit. */
118 *base = bar & ~1;
119
120 return 1;
121}
122
123struct fixed_mmio_descriptor {
124 unsigned int index;
125 u32 size;
Elyes HAOUAS040aff22018-05-27 16:30:36 +0200126 int (*get_resource)(struct device *dev, unsigned int index,
Lee Leahy26b7cd02017-03-16 18:47:55 -0700127 u32 *base, u32 *size);
Duncan Lauriec88c54c2014-04-30 16:36:13 -0700128 const char *description;
129};
130
131struct fixed_mmio_descriptor mc_fixed_resources[] = {
132 { PCIEXBAR, 0, get_pcie_bar, "PCIEXBAR" },
133 { MCHBAR, MCH_BASE_SIZE, get_bar, "MCHBAR" },
134 { DMIBAR, DMI_BASE_SIZE, get_bar, "DMIBAR" },
135 { EPBAR, EP_BASE_SIZE, get_bar, "EPBAR" },
136 { GDXCBAR, GDXC_BASE_SIZE, get_bar_in_mchbar, "GDXCBAR" },
137 { EDRAMBAR, EDRAM_BASE_SIZE, get_bar_in_mchbar, "EDRAMBAR" },
138};
139
140/*
141 * Add all known fixed MMIO ranges that hang off the host bridge/memory
142 * controller device.
143 */
Elyes HAOUAS040aff22018-05-27 16:30:36 +0200144static void mc_add_fixed_mmio_resources(struct device *dev)
Duncan Lauriec88c54c2014-04-30 16:36:13 -0700145{
146 int i;
147
148 for (i = 0; i < ARRAY_SIZE(mc_fixed_resources); i++) {
149 u32 base;
150 u32 size;
151 struct resource *resource;
152 unsigned int index;
153
154 size = mc_fixed_resources[i].size;
155 index = mc_fixed_resources[i].index;
156 if (!mc_fixed_resources[i].get_resource(dev, index,
Lee Leahy26b7cd02017-03-16 18:47:55 -0700157 &base, &size))
Duncan Lauriec88c54c2014-04-30 16:36:13 -0700158 continue;
159
160 resource = new_resource(dev, mc_fixed_resources[i].index);
161 resource->flags = IORESOURCE_MEM | IORESOURCE_FIXED |
Lee Leahy26b7cd02017-03-16 18:47:55 -0700162 IORESOURCE_STORED | IORESOURCE_RESERVE |
163 IORESOURCE_ASSIGNED;
Duncan Lauriec88c54c2014-04-30 16:36:13 -0700164 resource->base = base;
165 resource->size = size;
166 printk(BIOS_DEBUG, "%s: Adding %s @ %x 0x%08lx-0x%08lx.\n",
167 __func__, mc_fixed_resources[i].description, index,
168 (unsigned long)base, (unsigned long)(base + size - 1));
169 }
170}
171
172/* Host Memory Map:
173 *
174 * +--------------------------+ TOUUD
175 * | |
176 * +--------------------------+ 4GiB
177 * | PCI Address Space |
178 * +--------------------------+ TOLUD (also maps into MC address space)
179 * | iGD |
180 * +--------------------------+ BDSM
181 * | GTT |
182 * +--------------------------+ BGSM
183 * | TSEG |
184 * +--------------------------+ TSEGMB
185 * | Usage DRAM |
186 * +--------------------------+ 0
187 *
188 * Some of the base registers above can be equal making the size of those
189 * regions 0. The reason is because the memory controller internally subtracts
190 * the base registers from each other to determine sizes of the regions. In
191 * other words, the memory map is in a fixed order no matter what.
192 */
193
194struct map_entry {
195 int reg;
196 int is_64_bit;
197 int is_limit;
198 const char *description;
199};
200
Elyes HAOUAS040aff22018-05-27 16:30:36 +0200201static void read_map_entry(struct device *dev, struct map_entry *entry,
Lee Leahy26b7cd02017-03-16 18:47:55 -0700202 uint64_t *result)
Duncan Lauriec88c54c2014-04-30 16:36:13 -0700203{
204 uint64_t value;
205 uint64_t mask;
206
207 /* All registers are on a 1MiB granularity. */
208 mask = ((1ULL<<20)-1);
209 mask = ~mask;
210
211 value = 0;
212
213 if (entry->is_64_bit) {
214 value = pci_read_config32(dev, entry->reg + 4);
215 value <<= 32;
216 }
217
218 value |= pci_read_config32(dev, entry->reg);
219 value &= mask;
220
221 if (entry->is_limit)
222 value |= ~mask;
223
224 *result = value;
225}
226
227#define MAP_ENTRY(reg_, is_64_, is_limit_, desc_) \
228 { \
229 .reg = reg_, \
230 .is_64_bit = is_64_, \
231 .is_limit = is_limit_, \
232 .description = desc_, \
233 }
234
235#define MAP_ENTRY_BASE_64(reg_, desc_) \
236 MAP_ENTRY(reg_, 1, 0, desc_)
237#define MAP_ENTRY_LIMIT_64(reg_, desc_) \
238 MAP_ENTRY(reg_, 1, 1, desc_)
239#define MAP_ENTRY_BASE_32(reg_, desc_) \
240 MAP_ENTRY(reg_, 0, 0, desc_)
241
242enum {
243 TOM_REG,
244 TOUUD_REG,
245 MESEG_BASE_REG,
246 MESEG_LIMIT_REG,
247 REMAP_BASE_REG,
248 REMAP_LIMIT_REG,
249 TOLUD_REG,
250 BGSM_REG,
251 BDSM_REG,
252 TSEG_REG,
253 // Must be last.
254 NUM_MAP_ENTRIES
255};
256
257static struct map_entry memory_map[NUM_MAP_ENTRIES] = {
258 [TOM_REG] = MAP_ENTRY_BASE_64(TOM, "TOM"),
259 [TOUUD_REG] = MAP_ENTRY_BASE_64(TOUUD, "TOUUD"),
260 [MESEG_BASE_REG] = MAP_ENTRY_BASE_64(MESEG_BASE, "MESEG_BASE"),
261 [MESEG_LIMIT_REG] = MAP_ENTRY_LIMIT_64(MESEG_LIMIT, "MESEG_LIMIT"),
262 [REMAP_BASE_REG] = MAP_ENTRY_BASE_64(REMAPBASE, "REMAP_BASE"),
263 [REMAP_LIMIT_REG] = MAP_ENTRY_LIMIT_64(REMAPLIMIT, "REMAP_LIMIT"),
264 [TOLUD_REG] = MAP_ENTRY_BASE_32(TOLUD, "TOLUD"),
265 [BDSM_REG] = MAP_ENTRY_BASE_32(BDSM, "BDSM"),
266 [BGSM_REG] = MAP_ENTRY_BASE_32(BGSM, "BGSM"),
267 [TSEG_REG] = MAP_ENTRY_BASE_32(TSEG, "TESGMB"),
268};
269
Elyes HAOUAS040aff22018-05-27 16:30:36 +0200270static void mc_read_map_entries(struct device *dev, uint64_t *values)
Duncan Lauriec88c54c2014-04-30 16:36:13 -0700271{
272 int i;
Lee Leahy8a9c7dc2017-03-17 10:43:25 -0700273 for (i = 0; i < NUM_MAP_ENTRIES; i++)
Duncan Lauriec88c54c2014-04-30 16:36:13 -0700274 read_map_entry(dev, &memory_map[i], &values[i]);
Duncan Lauriec88c54c2014-04-30 16:36:13 -0700275}
276
Elyes HAOUAS040aff22018-05-27 16:30:36 +0200277static void mc_report_map_entries(struct device *dev, uint64_t *values)
Duncan Lauriec88c54c2014-04-30 16:36:13 -0700278{
279 int i;
280 for (i = 0; i < NUM_MAP_ENTRIES; i++) {
281 printk(BIOS_DEBUG, "MC MAP: %s: 0x%llx\n",
282 memory_map[i].description, values[i]);
283 }
284 /* One can validate the BDSM and BGSM against the GGC. */
285 printk(BIOS_DEBUG, "MC MAP: GGC: 0x%x\n", pci_read_config16(dev, GGC));
286}
287
Elyes HAOUAS040aff22018-05-27 16:30:36 +0200288static void mc_add_dram_resources(struct device *dev, int *resource_cnt)
Duncan Lauriec88c54c2014-04-30 16:36:13 -0700289{
290 unsigned long base_k, size_k;
291 unsigned long touud_k;
292 unsigned long index;
293 struct resource *resource;
294 uint64_t mc_values[NUM_MAP_ENTRIES];
Duncan Laurie61680272014-05-05 12:42:35 -0500295 unsigned long dpr_size = 0;
296 u32 dpr_reg;
Kyösti Mälkki71756c212019-07-12 13:10:19 +0300297 struct device *sa_dev = pcidev_path_on_root(SA_DEVFN_ROOT);
Duncan Lauriec88c54c2014-04-30 16:36:13 -0700298
299 /* Read in the MAP registers and report their values. */
300 mc_read_map_entries(dev, &mc_values[0]);
301 mc_report_map_entries(dev, &mc_values[0]);
302
303 /*
Duncan Laurie61680272014-05-05 12:42:35 -0500304 * DMA Protected Range can be reserved below TSEG for PCODE patch
305 * or TXT/BootGuard related data. Rather than report a base address
306 * the DPR register reports the TOP of the region, which is the same
307 * as TSEG base. The region size is reported in MiB in bits 11:4.
308 */
Kyösti Mälkki71756c212019-07-12 13:10:19 +0300309 dpr_reg = pci_read_config32(sa_dev, DPR);
Duncan Laurie61680272014-05-05 12:42:35 -0500310 if (dpr_reg & DPR_EPM) {
311 dpr_size = (dpr_reg & DPR_SIZE_MASK) << 16;
312 printk(BIOS_INFO, "DPR SIZE: 0x%lx\n", dpr_size);
313 }
314
315 /*
Duncan Lauriec88c54c2014-04-30 16:36:13 -0700316 * These are the host memory ranges that should be added:
317 * - 0 -> 0xa0000: cacheable
318 * - 0xc0000 -> TSEG : cacheable
319 * - TESG -> BGSM: cacheable with standard MTRRs and reserved
320 * - BGSM -> TOLUD: not cacheable with standard MTRRs and reserved
321 * - 4GiB -> TOUUD: cacheable
322 *
323 * The default SMRAM space is reserved so that the range doesn't
324 * have to be saved during S3 Resume. Once marked reserved the OS
325 * cannot use the memory. This is a bit of an odd place to reserve
326 * the region, but the CPU devices don't have dev_ops->read_resources()
327 * called on them.
328 *
329 * The range 0xa0000 -> 0xc0000 does not have any resources
330 * associated with it to handle legacy VGA memory. If this range
331 * is not omitted the mtrr code will setup the area as cacheable
332 * causing VGA access to not work.
333 *
334 * The TSEG region is mapped as cacheable so that one can perform
335 * SMRAM relocation faster. Once the SMRR is enabled the SMRR takes
336 * precedence over the existing MTRRs covering this region.
337 *
338 * It should be noted that cacheable entry types need to be added in
339 * order. The reason is that the current MTRR code assumes this and
340 * falls over itself if it isn't.
341 *
342 * The resource index starts low and should not meet or exceed
343 * PCI_BASE_ADDRESS_0.
344 */
Matt DeVillier81a6f102018-02-19 17:33:48 -0600345 index = *resource_cnt;
Duncan Lauriec88c54c2014-04-30 16:36:13 -0700346
347 /* 0 - > 0xa0000 */
348 base_k = 0;
349 size_k = (0xa0000 >> 10) - base_k;
350 ram_resource(dev, index++, base_k, size_k);
351
Duncan Laurie61680272014-05-05 12:42:35 -0500352 /* 0xc0000 -> TSEG - DPR */
Duncan Lauriec88c54c2014-04-30 16:36:13 -0700353 base_k = 0xc0000 >> 10;
354 size_k = (unsigned long)(mc_values[TSEG_REG] >> 10) - base_k;
Duncan Laurie61680272014-05-05 12:42:35 -0500355 size_k -= dpr_size >> 10;
Duncan Lauriec88c54c2014-04-30 16:36:13 -0700356 ram_resource(dev, index++, base_k, size_k);
357
Duncan Laurie61680272014-05-05 12:42:35 -0500358 /* TSEG - DPR -> BGSM */
Duncan Lauriec88c54c2014-04-30 16:36:13 -0700359 resource = new_resource(dev, index++);
Duncan Laurie61680272014-05-05 12:42:35 -0500360 resource->base = mc_values[TSEG_REG] - dpr_size;
Duncan Lauriec88c54c2014-04-30 16:36:13 -0700361 resource->size = mc_values[BGSM_REG] - resource->base;
362 resource->flags = IORESOURCE_MEM | IORESOURCE_FIXED |
Lee Leahy26b7cd02017-03-16 18:47:55 -0700363 IORESOURCE_STORED | IORESOURCE_RESERVE |
364 IORESOURCE_ASSIGNED | IORESOURCE_CACHEABLE;
Duncan Lauriec88c54c2014-04-30 16:36:13 -0700365
366 /* BGSM -> TOLUD */
367 resource = new_resource(dev, index++);
368 resource->base = mc_values[BGSM_REG];
369 resource->size = mc_values[TOLUD_REG] - resource->base;
370 resource->flags = IORESOURCE_MEM | IORESOURCE_FIXED |
Lee Leahy26b7cd02017-03-16 18:47:55 -0700371 IORESOURCE_STORED | IORESOURCE_RESERVE |
372 IORESOURCE_ASSIGNED;
Duncan Lauriec88c54c2014-04-30 16:36:13 -0700373
374 /* 4GiB -> TOUUD */
375 base_k = 4096 * 1024; /* 4GiB */
376 touud_k = mc_values[TOUUD_REG] >> 10;
377 size_k = touud_k - base_k;
378 if (touud_k > base_k)
379 ram_resource(dev, index++, base_k, size_k);
380
381 /* Reserve everything between A segment and 1MB:
382 *
383 * 0xa0000 - 0xbffff: legacy VGA
384 * 0xc0000 - 0xfffff: RAM
385 */
386 mmio_resource(dev, index++, (0xa0000 >> 10), (0xc0000 - 0xa0000) >> 10);
387 reserved_ram_resource(dev, index++, (0xc0000 >> 10),
Lee Leahy26b7cd02017-03-16 18:47:55 -0700388 (0x100000 - 0xc0000) >> 10);
Duncan Lauriec88c54c2014-04-30 16:36:13 -0700389
Julius Wernercd49cce2019-03-05 16:53:33 -0800390 if (CONFIG(CHROMEOS))
Frans Hendriksef05dc82018-11-27 10:35:16 +0100391 chromeos_reserve_ram_oops(dev, index++);
Matt DeVillier81a6f102018-02-19 17:33:48 -0600392
393 *resource_cnt = index;
Duncan Lauriec88c54c2014-04-30 16:36:13 -0700394}
395
Elyes HAOUAS040aff22018-05-27 16:30:36 +0200396static void systemagent_read_resources(struct device *dev)
Duncan Lauriec88c54c2014-04-30 16:36:13 -0700397{
Matt DeVillier81a6f102018-02-19 17:33:48 -0600398 int index = 0;
399 const bool vtd_capable =
400 !(pci_read_config32(dev, CAPID0_A) & VTD_DISABLE);
401
Duncan Lauriec88c54c2014-04-30 16:36:13 -0700402 /* Read standard PCI resources. */
403 pci_dev_read_resources(dev);
404
405 /* Add all fixed MMIO resources. */
406 mc_add_fixed_mmio_resources(dev);
407
Matt DeVillier81a6f102018-02-19 17:33:48 -0600408 /* Add VT-d MMIO resources if capable */
409 if (vtd_capable) {
410 mmio_resource(dev, index++, GFXVT_BASE_ADDRESS / KiB,
411 GFXVT_BASE_SIZE / KiB);
412 mmio_resource(dev, index++, VTVC0_BASE_ADDRESS / KiB,
413 VTVC0_BASE_SIZE / KiB);
414 }
415
Duncan Lauriec88c54c2014-04-30 16:36:13 -0700416 /* Calculate and add DRAM resources. */
Matt DeVillier81a6f102018-02-19 17:33:48 -0600417 mc_add_dram_resources(dev, &index);
Duncan Lauriec88c54c2014-04-30 16:36:13 -0700418}
419
420static void systemagent_init(struct device *dev)
421{
422 u8 bios_reset_cpl, pair;
423
424 /* Enable Power Aware Interrupt Routing */
425 pair = MCHBAR8(MCH_PAIR);
426 pair &= ~0x7; /* Clear 2:0 */
427 pair |= 0x4; /* Fixed Priority */
428 MCHBAR8(MCH_PAIR) = pair;
429
430 /*
431 * Set bits 0+1 of BIOS_RESET_CPL to indicate to the CPU
432 * that BIOS has initialized memory and power management
433 */
434 bios_reset_cpl = MCHBAR8(BIOS_RESET_CPL);
435 bios_reset_cpl |= 3;
436 MCHBAR8(BIOS_RESET_CPL) = bios_reset_cpl;
437 printk(BIOS_DEBUG, "Set BIOS_RESET_CPL\n");
438
439 /* Configure turbo power limits 1ms after reset complete bit */
440 mdelay(1);
441 set_power_limits(28);
442}
443
Duncan Lauriec88c54c2014-04-30 16:36:13 -0700444static struct device_operations systemagent_ops = {
Elyes HAOUAS1d191272018-11-27 12:23:48 +0100445 .read_resources = systemagent_read_resources,
446 .acpi_fill_ssdt_generator = generate_cpu_entries,
447 .set_resources = pci_dev_set_resources,
448 .enable_resources = pci_dev_enable_resources,
449 .init = systemagent_init,
Marc Jonesa6354a12014-12-26 22:11:14 -0700450 .ops_pci = &broadwell_pci_ops,
Duncan Lauriec88c54c2014-04-30 16:36:13 -0700451};
452
453static const unsigned short systemagent_ids[] = {
454 0x0a04, /* Haswell ULT */
455 0x1604, /* Broadwell-U/Y */
456 0x1610, /* Broadwell-H Desktop */
457 0x1614, /* Broadwell-H Mobile */
458 0
459};
460
461static const struct pci_driver systemagent_driver __pci_driver = {
462 .ops = &systemagent_ops,
463 .vendor = PCI_VENDOR_ID_INTEL,
464 .devices = systemagent_ids
465};