blob: 56a44d99e5d1dd34eec67cbf6fb92e889b2c6348 [file] [log] [blame]
Duncan Lauriec88c54c2014-04-30 16:36:13 -07001/*
2 * This file is part of the coreboot project.
3 *
4 * Copyright (C) 2007-2009 coresystems GmbH
5 * Copyright (C) 2014 Google Inc.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; version 2 of the License.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
19 */
20
21#include <console/console.h>
22#include <arch/acpi.h>
23#include <arch/io.h>
24#include <stdint.h>
25#include <delay.h>
26#include <device/device.h>
27#include <device/pci.h>
28#include <device/pci_ids.h>
29#include <stdlib.h>
30#include <string.h>
31#include <cbmem.h>
32#include <romstage_handoff.h>
33#include <vendorcode/google/chromeos/chromeos.h>
34#include <broadwell/cpu.h>
35#include <broadwell/iomap.h>
36#include <broadwell/ramstage.h>
37#include <broadwell/systemagent.h>
38
39static int get_pcie_bar(device_t dev, unsigned int index, u32 *base, u32 *len)
40{
41 u32 pciexbar_reg;
42
43 *base = 0;
44 *len = 0;
45
46 pciexbar_reg = pci_read_config32(dev, index);
47
48 if (!(pciexbar_reg & (1 << 0)))
49 return 0;
50
51 switch ((pciexbar_reg >> 1) & 3) {
52 case 0: // 256MB
53 *base = pciexbar_reg & ((1 << 31)|(1 << 30)|(1 << 29)|
54 (1 << 28));
55 *len = 256 * 1024 * 1024;
56 return 1;
57 case 1: // 128M
58 *base = pciexbar_reg & ((1 << 31)|(1 << 30)|(1 << 29)|
59 (1 << 28)|(1 << 27));
60 *len = 128 * 1024 * 1024;
61 return 1;
62 case 2: // 64M
63 *base = pciexbar_reg & ((1 << 31)|(1 << 30)|(1 << 29)|
64 (1 << 28)|(1 << 27)|(1 << 26));
65 *len = 64 * 1024 * 1024;
66 return 1;
67 }
68
69 return 0;
70}
71
72static int get_bar(device_t dev, unsigned int index, u32 *base, u32 *len)
73{
74 u32 bar;
75
76 bar = pci_read_config32(dev, index);
77
78 /* If not enabled don't report it. */
79 if (!(bar & 0x1))
80 return 0;
81
82 /* Knock down the enable bit. */
83 *base = bar & ~1;
84
85 return 1;
86}
87
88/* There are special BARs that actually are programmed in the MCHBAR. These
89 * Intel special features, but they do consume resources that need to be
90 * accounted for. */
91static int get_bar_in_mchbar(device_t dev, unsigned int index, u32 *base,
92 u32 *len)
93{
94 u32 bar;
95
96 bar = MCHBAR32(index);
97
98 /* If not enabled don't report it. */
99 if (!(bar & 0x1))
100 return 0;
101
102 /* Knock down the enable bit. */
103 *base = bar & ~1;
104
105 return 1;
106}
107
108struct fixed_mmio_descriptor {
109 unsigned int index;
110 u32 size;
111 int (*get_resource)(device_t dev, unsigned int index,
112 u32 *base, u32 *size);
113 const char *description;
114};
115
116struct fixed_mmio_descriptor mc_fixed_resources[] = {
117 { PCIEXBAR, 0, get_pcie_bar, "PCIEXBAR" },
118 { MCHBAR, MCH_BASE_SIZE, get_bar, "MCHBAR" },
119 { DMIBAR, DMI_BASE_SIZE, get_bar, "DMIBAR" },
120 { EPBAR, EP_BASE_SIZE, get_bar, "EPBAR" },
121 { GDXCBAR, GDXC_BASE_SIZE, get_bar_in_mchbar, "GDXCBAR" },
122 { EDRAMBAR, EDRAM_BASE_SIZE, get_bar_in_mchbar, "EDRAMBAR" },
123};
124
125/*
126 * Add all known fixed MMIO ranges that hang off the host bridge/memory
127 * controller device.
128 */
129static void mc_add_fixed_mmio_resources(device_t dev)
130{
131 int i;
132
133 for (i = 0; i < ARRAY_SIZE(mc_fixed_resources); i++) {
134 u32 base;
135 u32 size;
136 struct resource *resource;
137 unsigned int index;
138
139 size = mc_fixed_resources[i].size;
140 index = mc_fixed_resources[i].index;
141 if (!mc_fixed_resources[i].get_resource(dev, index,
142 &base, &size))
143 continue;
144
145 resource = new_resource(dev, mc_fixed_resources[i].index);
146 resource->flags = IORESOURCE_MEM | IORESOURCE_FIXED |
147 IORESOURCE_STORED | IORESOURCE_RESERVE |
148 IORESOURCE_ASSIGNED;
149 resource->base = base;
150 resource->size = size;
151 printk(BIOS_DEBUG, "%s: Adding %s @ %x 0x%08lx-0x%08lx.\n",
152 __func__, mc_fixed_resources[i].description, index,
153 (unsigned long)base, (unsigned long)(base + size - 1));
154 }
155}
156
157/* Host Memory Map:
158 *
159 * +--------------------------+ TOUUD
160 * | |
161 * +--------------------------+ 4GiB
162 * | PCI Address Space |
163 * +--------------------------+ TOLUD (also maps into MC address space)
164 * | iGD |
165 * +--------------------------+ BDSM
166 * | GTT |
167 * +--------------------------+ BGSM
168 * | TSEG |
169 * +--------------------------+ TSEGMB
170 * | Usage DRAM |
171 * +--------------------------+ 0
172 *
173 * Some of the base registers above can be equal making the size of those
174 * regions 0. The reason is because the memory controller internally subtracts
175 * the base registers from each other to determine sizes of the regions. In
176 * other words, the memory map is in a fixed order no matter what.
177 */
178
179struct map_entry {
180 int reg;
181 int is_64_bit;
182 int is_limit;
183 const char *description;
184};
185
186static void read_map_entry(device_t dev, struct map_entry *entry,
187 uint64_t *result)
188{
189 uint64_t value;
190 uint64_t mask;
191
192 /* All registers are on a 1MiB granularity. */
193 mask = ((1ULL<<20)-1);
194 mask = ~mask;
195
196 value = 0;
197
198 if (entry->is_64_bit) {
199 value = pci_read_config32(dev, entry->reg + 4);
200 value <<= 32;
201 }
202
203 value |= pci_read_config32(dev, entry->reg);
204 value &= mask;
205
206 if (entry->is_limit)
207 value |= ~mask;
208
209 *result = value;
210}
211
212#define MAP_ENTRY(reg_, is_64_, is_limit_, desc_) \
213 { \
214 .reg = reg_, \
215 .is_64_bit = is_64_, \
216 .is_limit = is_limit_, \
217 .description = desc_, \
218 }
219
220#define MAP_ENTRY_BASE_64(reg_, desc_) \
221 MAP_ENTRY(reg_, 1, 0, desc_)
222#define MAP_ENTRY_LIMIT_64(reg_, desc_) \
223 MAP_ENTRY(reg_, 1, 1, desc_)
224#define MAP_ENTRY_BASE_32(reg_, desc_) \
225 MAP_ENTRY(reg_, 0, 0, desc_)
226
227enum {
228 TOM_REG,
229 TOUUD_REG,
230 MESEG_BASE_REG,
231 MESEG_LIMIT_REG,
232 REMAP_BASE_REG,
233 REMAP_LIMIT_REG,
234 TOLUD_REG,
235 BGSM_REG,
236 BDSM_REG,
237 TSEG_REG,
238 // Must be last.
239 NUM_MAP_ENTRIES
240};
241
242static struct map_entry memory_map[NUM_MAP_ENTRIES] = {
243 [TOM_REG] = MAP_ENTRY_BASE_64(TOM, "TOM"),
244 [TOUUD_REG] = MAP_ENTRY_BASE_64(TOUUD, "TOUUD"),
245 [MESEG_BASE_REG] = MAP_ENTRY_BASE_64(MESEG_BASE, "MESEG_BASE"),
246 [MESEG_LIMIT_REG] = MAP_ENTRY_LIMIT_64(MESEG_LIMIT, "MESEG_LIMIT"),
247 [REMAP_BASE_REG] = MAP_ENTRY_BASE_64(REMAPBASE, "REMAP_BASE"),
248 [REMAP_LIMIT_REG] = MAP_ENTRY_LIMIT_64(REMAPLIMIT, "REMAP_LIMIT"),
249 [TOLUD_REG] = MAP_ENTRY_BASE_32(TOLUD, "TOLUD"),
250 [BDSM_REG] = MAP_ENTRY_BASE_32(BDSM, "BDSM"),
251 [BGSM_REG] = MAP_ENTRY_BASE_32(BGSM, "BGSM"),
252 [TSEG_REG] = MAP_ENTRY_BASE_32(TSEG, "TESGMB"),
253};
254
255static void mc_read_map_entries(device_t dev, uint64_t *values)
256{
257 int i;
258 for (i = 0; i < NUM_MAP_ENTRIES; i++) {
259 read_map_entry(dev, &memory_map[i], &values[i]);
260 }
261}
262
263static void mc_report_map_entries(device_t dev, uint64_t *values)
264{
265 int i;
266 for (i = 0; i < NUM_MAP_ENTRIES; i++) {
267 printk(BIOS_DEBUG, "MC MAP: %s: 0x%llx\n",
268 memory_map[i].description, values[i]);
269 }
270 /* One can validate the BDSM and BGSM against the GGC. */
271 printk(BIOS_DEBUG, "MC MAP: GGC: 0x%x\n", pci_read_config16(dev, GGC));
272}
273
274static void mc_add_dram_resources(device_t dev)
275{
276 unsigned long base_k, size_k;
277 unsigned long touud_k;
278 unsigned long index;
279 struct resource *resource;
280 uint64_t mc_values[NUM_MAP_ENTRIES];
281
282 /* Read in the MAP registers and report their values. */
283 mc_read_map_entries(dev, &mc_values[0]);
284 mc_report_map_entries(dev, &mc_values[0]);
285
286 /*
287 * These are the host memory ranges that should be added:
288 * - 0 -> 0xa0000: cacheable
289 * - 0xc0000 -> TSEG : cacheable
290 * - TESG -> BGSM: cacheable with standard MTRRs and reserved
291 * - BGSM -> TOLUD: not cacheable with standard MTRRs and reserved
292 * - 4GiB -> TOUUD: cacheable
293 *
294 * The default SMRAM space is reserved so that the range doesn't
295 * have to be saved during S3 Resume. Once marked reserved the OS
296 * cannot use the memory. This is a bit of an odd place to reserve
297 * the region, but the CPU devices don't have dev_ops->read_resources()
298 * called on them.
299 *
300 * The range 0xa0000 -> 0xc0000 does not have any resources
301 * associated with it to handle legacy VGA memory. If this range
302 * is not omitted the mtrr code will setup the area as cacheable
303 * causing VGA access to not work.
304 *
305 * The TSEG region is mapped as cacheable so that one can perform
306 * SMRAM relocation faster. Once the SMRR is enabled the SMRR takes
307 * precedence over the existing MTRRs covering this region.
308 *
309 * It should be noted that cacheable entry types need to be added in
310 * order. The reason is that the current MTRR code assumes this and
311 * falls over itself if it isn't.
312 *
313 * The resource index starts low and should not meet or exceed
314 * PCI_BASE_ADDRESS_0.
315 */
316 index = 0;
317
318 /* 0 - > 0xa0000 */
319 base_k = 0;
320 size_k = (0xa0000 >> 10) - base_k;
321 ram_resource(dev, index++, base_k, size_k);
322
323 /* 0xc0000 -> TSEG */
324 base_k = 0xc0000 >> 10;
325 size_k = (unsigned long)(mc_values[TSEG_REG] >> 10) - base_k;
326 ram_resource(dev, index++, base_k, size_k);
327
328 /* TSEG -> BGSM */
329 resource = new_resource(dev, index++);
330 resource->base = mc_values[TSEG_REG];
331 resource->size = mc_values[BGSM_REG] - resource->base;
332 resource->flags = IORESOURCE_MEM | IORESOURCE_FIXED |
333 IORESOURCE_STORED | IORESOURCE_RESERVE |
334 IORESOURCE_ASSIGNED | IORESOURCE_CACHEABLE;
335
336 /* BGSM -> TOLUD */
337 resource = new_resource(dev, index++);
338 resource->base = mc_values[BGSM_REG];
339 resource->size = mc_values[TOLUD_REG] - resource->base;
340 resource->flags = IORESOURCE_MEM | IORESOURCE_FIXED |
341 IORESOURCE_STORED | IORESOURCE_RESERVE |
342 IORESOURCE_ASSIGNED;
343
344 /* 4GiB -> TOUUD */
345 base_k = 4096 * 1024; /* 4GiB */
346 touud_k = mc_values[TOUUD_REG] >> 10;
347 size_k = touud_k - base_k;
348 if (touud_k > base_k)
349 ram_resource(dev, index++, base_k, size_k);
350
351 /* Reserve everything between A segment and 1MB:
352 *
353 * 0xa0000 - 0xbffff: legacy VGA
354 * 0xc0000 - 0xfffff: RAM
355 */
356 mmio_resource(dev, index++, (0xa0000 >> 10), (0xc0000 - 0xa0000) >> 10);
357 reserved_ram_resource(dev, index++, (0xc0000 >> 10),
358 (0x100000 - 0xc0000) >> 10);
359
360 chromeos_reserve_ram_oops(dev, index++);
361}
362
363static void systemagent_read_resources(device_t dev)
364{
365 /* Read standard PCI resources. */
366 pci_dev_read_resources(dev);
367
368 /* Add all fixed MMIO resources. */
369 mc_add_fixed_mmio_resources(dev);
370
371 /* Calculate and add DRAM resources. */
372 mc_add_dram_resources(dev);
373}
374
375static void systemagent_init(struct device *dev)
376{
377 u8 bios_reset_cpl, pair;
378
379 /* Enable Power Aware Interrupt Routing */
380 pair = MCHBAR8(MCH_PAIR);
381 pair &= ~0x7; /* Clear 2:0 */
382 pair |= 0x4; /* Fixed Priority */
383 MCHBAR8(MCH_PAIR) = pair;
384
385 /*
386 * Set bits 0+1 of BIOS_RESET_CPL to indicate to the CPU
387 * that BIOS has initialized memory and power management
388 */
389 bios_reset_cpl = MCHBAR8(BIOS_RESET_CPL);
390 bios_reset_cpl |= 3;
391 MCHBAR8(BIOS_RESET_CPL) = bios_reset_cpl;
392 printk(BIOS_DEBUG, "Set BIOS_RESET_CPL\n");
393
394 /* Configure turbo power limits 1ms after reset complete bit */
395 mdelay(1);
396 set_power_limits(28);
397}
398
399static void systemagent_enable(device_t dev)
400{
401#if CONFIG_HAVE_ACPI_RESUME
402 struct romstage_handoff *handoff;
403
404 handoff = cbmem_find(CBMEM_ID_ROMSTAGE_INFO);
405
406 if (handoff == NULL) {
407 printk(BIOS_DEBUG, "Unknown boot method, assuming normal.\n");
408 acpi_slp_type = 0;
409 } else if (handoff->s3_resume) {
410 printk(BIOS_DEBUG, "S3 Resume.\n");
411 acpi_slp_type = 3;
412 } else {
413 printk(BIOS_DEBUG, "Normal boot.\n");
414 acpi_slp_type = 0;
415 }
416#endif
417}
418
419static struct device_operations systemagent_ops = {
420 .read_resources = &systemagent_read_resources,
421 .set_resources = &pci_dev_set_resources,
422 .enable_resources = &pci_dev_enable_resources,
423 .init = &systemagent_init,
424 .enable = &systemagent_enable,
425 .ops_pci = &broadwell_pci_ops,
426};
427
428static const unsigned short systemagent_ids[] = {
429 0x0a04, /* Haswell ULT */
430 0x1604, /* Broadwell-U/Y */
431 0x1610, /* Broadwell-H Desktop */
432 0x1614, /* Broadwell-H Mobile */
433 0
434};
435
436static const struct pci_driver systemagent_driver __pci_driver = {
437 .ops = &systemagent_ops,
438 .vendor = PCI_VENDOR_ID_INTEL,
439 .devices = systemagent_ids
440};