blob: 6c5149bb9f29755abd3bb5d87265d13ea8db4431 [file] [log] [blame]
Angel Pons80d92382020-04-05 15:47:00 +02001/* SPDX-License-Identifier: GPL-2.0-only */
Mariusz Szafranskia4041332017-08-02 17:28:17 +02002
Michael Niewöhner46e68ac2019-11-04 22:07:29 +01003#include <cbmem.h>
Mariusz Szafranskia4041332017-08-02 17:28:17 +02004#include <console/console.h>
Kyösti Mälkki13f66502019-03-03 08:01:05 +02005#include <device/mmio.h>
Kyösti Mälkkif1b58b72019-03-01 13:43:02 +02006#include <device/pci_ops.h>
Mariusz Szafranskia4041332017-08-02 17:28:17 +02007#include <stdint.h>
8#include <delay.h>
9#include <device/device.h>
10#include <device/pci.h>
11#include <device/pci_ids.h>
Mariusz Szafranskia4041332017-08-02 17:28:17 +020012#include <timer.h>
13
14#include <soc/iomap.h>
15#include <soc/pci_devs.h>
16#include <soc/ramstage.h>
17#include <soc/systemagent.h>
Julien Viard de Galberta0e50462018-04-05 11:59:07 +020018#include <soc/acpi.h>
Mariusz Szafranskia4041332017-08-02 17:28:17 +020019
20#define _1ms 1
21#define WAITING_STEP 100
22
Elyes HAOUAS2ec41832018-05-27 17:40:58 +020023static int get_pcie_bar(struct device *dev, unsigned int index, u32 *base,
24 u32 *len)
Mariusz Szafranskia4041332017-08-02 17:28:17 +020025{
26 u32 pciexbar_reg;
27
28 *base = 0;
29 *len = 0;
30
31 pciexbar_reg = pci_read_config32(dev, index);
32
33 if (!(pciexbar_reg & (1 << 0)))
34 return 0;
35
36 switch ((pciexbar_reg >> 1) & 3) {
37 case 0: /* 256MB */
38 *base = pciexbar_reg &
39 ((1 << 31) | (1 << 30) | (1 << 29) | (1 << 28));
40 *len = 256 * 1024 * 1024;
41 return 1;
42 case 1: /* 128M */
43 *base = pciexbar_reg & ((1 << 31) | (1 << 30) | (1 << 29) |
44 (1 << 28) | (1 << 27));
45 *len = 128 * 1024 * 1024;
46 return 1;
47 case 2: /* 64M */
48 *base = pciexbar_reg & ((1 << 31) | (1 << 30) | (1 << 29) |
49 (1 << 28) | (1 << 27) | (1 << 26));
50 *len = 64 * 1024 * 1024;
51 return 1;
52 }
53
54 return 0;
55}
56
Elyes HAOUAS2ec41832018-05-27 17:40:58 +020057static int get_bar(struct device *dev, unsigned int index, u32 *base, u32 *len)
Mariusz Szafranskia4041332017-08-02 17:28:17 +020058{
59 u32 bar;
60
61 bar = pci_read_config32(dev, index);
62
63 /* If not enabled don't report it. */
64 if (!(bar & 0x1))
65 return 0;
66
67 /* Knock down the enable bit. */
68 *base = bar & ~1;
69
70 return 1;
71}
72
73struct fixed_mmio_descriptor {
74 unsigned int index;
75 u32 size;
Elyes HAOUAS2ec41832018-05-27 17:40:58 +020076 int (*get_resource)(struct device *dev, unsigned int index, u32 *base,
Mariusz Szafranskia4041332017-08-02 17:28:17 +020077 u32 *size);
78 const char *description;
79};
80
81struct fixed_mmio_descriptor mc_fixed_resources[] = {
82 {PCIEXBAR, 0, get_pcie_bar, "PCIEXBAR"},
83 {MCHBAR, MCH_BASE_SIZE, get_bar, "MCHBAR"},
84};
85
86/*
87 * Add all known fixed MMIO ranges that hang off the host bridge/memory
88 * controller device.
89 */
Elyes HAOUAS2ec41832018-05-27 17:40:58 +020090static void mc_add_fixed_mmio_resources(struct device *dev)
Mariusz Szafranskia4041332017-08-02 17:28:17 +020091{
92 int i;
93
94 for (i = 0; i < ARRAY_SIZE(mc_fixed_resources); i++) {
95 u32 base;
96 u32 size;
97 struct resource *resource;
98 unsigned int index;
99
100 size = mc_fixed_resources[i].size;
101 index = mc_fixed_resources[i].index;
102 if (!mc_fixed_resources[i].get_resource(dev, index, &base,
103 &size))
104 continue;
105
106 resource = new_resource(dev, mc_fixed_resources[i].index);
Kyösti Mälkki4e4edf72022-05-26 19:03:55 +0300107 resource->base = base;
108 resource->size = size;
Mariusz Szafranskia4041332017-08-02 17:28:17 +0200109 resource->flags = IORESOURCE_MEM | IORESOURCE_FIXED |
110 IORESOURCE_STORED | IORESOURCE_RESERVE |
111 IORESOURCE_ASSIGNED;
Mariusz Szafranskia4041332017-08-02 17:28:17 +0200112 printk(BIOS_DEBUG, "%s: Adding %s @ %x 0x%08lx-0x%08lx.\n",
113 __func__, mc_fixed_resources[i].description, index,
114 (unsigned long)base, (unsigned long)(base + size - 1));
115 }
116}
117
118struct map_entry {
119 int reg;
120 int is_64_bit;
121 int is_limit;
122 const char *description;
123};
124
Elyes HAOUAS2ec41832018-05-27 17:40:58 +0200125static void read_map_entry(struct device *dev, struct map_entry *entry,
Mariusz Szafranskia4041332017-08-02 17:28:17 +0200126 uint64_t *result)
127{
128 uint64_t value;
129 uint64_t mask;
130
131 /* All registers are on a 1MiB granularity. */
132 mask = ((1ULL << 20) - 1);
133 mask = ~mask;
134
135 value = 0;
136
137 if (entry->is_64_bit) {
138 value = pci_read_config32(dev, entry->reg + 4);
139 value <<= 32;
140 }
141
142 value |= (uint64_t)pci_read_config32(dev, entry->reg);
143 value &= mask;
144
145 if (entry->is_limit)
146 value |= ~mask;
147
148 *result = value;
149}
150
151#define MAP_ENTRY(reg_, is_64_, is_limit_, desc_) \
152 { \
153 .reg = reg_, .is_64_bit = is_64_, .is_limit = is_limit_, \
154 .description = desc_, \
155 }
156
157#define MAP_ENTRY_BASE_64(reg_, desc_) MAP_ENTRY(reg_, 1, 0, desc_)
158#define MAP_ENTRY_LIMIT_64(reg_, desc_) MAP_ENTRY(reg_, 1, 1, desc_)
159#define MAP_ENTRY_BASE_32(reg_, desc_) MAP_ENTRY(reg_, 0, 0, desc_)
160
161enum {
162 TOUUD_REG,
163 TOLUD_REG,
164 TSEG_REG,
165 /* Must be last. */
166 NUM_MAP_ENTRIES
167};
168
169static struct map_entry memory_map[NUM_MAP_ENTRIES] = {
170 [TOUUD_REG] = MAP_ENTRY_BASE_64(TOUUD, "TOUUD"),
171 [TOLUD_REG] = MAP_ENTRY_BASE_32(TOLUD, "TOLUD"),
172 [TSEG_REG] = MAP_ENTRY_BASE_32(TSEGMB, "TSEGMB"),
173};
174
Elyes HAOUAS2ec41832018-05-27 17:40:58 +0200175static void mc_read_map_entries(struct device *dev, uint64_t *values)
Mariusz Szafranskia4041332017-08-02 17:28:17 +0200176{
177 int i;
178 for (i = 0; i < NUM_MAP_ENTRIES; i++)
179 read_map_entry(dev, &memory_map[i], &values[i]);
180}
181
Elyes HAOUAS2ec41832018-05-27 17:40:58 +0200182static void mc_report_map_entries(struct device *dev, uint64_t *values)
Mariusz Szafranskia4041332017-08-02 17:28:17 +0200183{
184 int i;
185 for (i = 0; i < NUM_MAP_ENTRIES; i++) {
186 printk(BIOS_DEBUG, "MC MAP: %s: 0x%llx\n",
187 memory_map[i].description, values[i]);
188 }
189}
190
Elyes HAOUAS2ec41832018-05-27 17:40:58 +0200191static void mc_add_dram_resources(struct device *dev)
Mariusz Szafranskia4041332017-08-02 17:28:17 +0200192{
Mariusz Szafranskia4041332017-08-02 17:28:17 +0200193 unsigned long index;
Mariusz Szafranskia4041332017-08-02 17:28:17 +0200194 uint64_t mc_values[NUM_MAP_ENTRIES];
195
196 /* Read in the MAP registers and report their values. */
197 mc_read_map_entries(dev, &mc_values[0]);
198 mc_report_map_entries(dev, &mc_values[0]);
199
200 /*
201 * These are the host memory ranges that should be added:
202 * - 0 -> 0xa0000: cacheable
203 * - 0xc0000 -> 0x100000 : reserved
Arthur Heymans22c93352023-07-05 12:03:27 +0200204 * - 0x100000 -> cbmem_top() : cacheable
205 * - cbmem_top() -> TSEG: uncacheable
Mariusz Szafranskia4041332017-08-02 17:28:17 +0200206 * - TESG -> TOLUD: cacheable with standard MTRRs and reserved
207 * - 4GiB -> TOUUD: cacheable
208 *
209 * The default SMRAM space is reserved so that the range doesn't
210 * have to be saved during S3 Resume. Once marked reserved the OS
211 * cannot use the memory. This is a bit of an odd place to reserve
212 * the region, but the CPU devices don't have dev_ops->read_resources()
213 * called on them.
214 *
215 * The range 0xa0000 -> 0xc0000 does not have any resources
216 * associated with it to handle legacy VGA memory. If this range
217 * is not omitted the mtrr code will setup the area as cacheable
218 * causing VGA access to not work.
219 *
220 * The TSEG region is mapped as cacheable so that one can perform
221 * SMRAM relocation faster. Once the SMRR is enabled the SMRR takes
222 * precedence over the existing MTRRs covering this region.
223 *
224 * It should be noted that cacheable entry types need to be added in
225 * order. The reason is that the current MTRR code assumes this and
226 * falls over itself if it isn't.
227 *
228 * The resource index starts low and should not meet or exceed
229 * PCI_BASE_ADDRESS_0.
230 */
231 index = 0;
232
Arthur Heymans22c93352023-07-05 12:03:27 +0200233 /*
234 * 0 - > 0xa0000: RAM
235 * 0xa0000 - 0xbffff: Legacy VGA
236 * 0xc0000 - 0xfffff: RAM
237 */
238 ram_range(dev, index++, 0, 0xa0000);
239 mmio_from_to(dev, index++, 0xa0000, 0xc0000);
240 reserved_ram_from_to(dev, index++, 0xc0000, 1 * MiB);
Mariusz Szafranskia4041332017-08-02 17:28:17 +0200241
Arthur Heymans22c93352023-07-05 12:03:27 +0200242 /* 0x100000 -> cbmem_top() */
243 ram_from_to(dev, index++, 1 * MiB, (uintptr_t)cbmem_top());
Mariusz Szafranskia4041332017-08-02 17:28:17 +0200244
Arthur Heymans22c93352023-07-05 12:03:27 +0200245 /* cbmem_top() -> TSEG */
246 mmio_from_to(dev, index++, (uintptr_t)cbmem_top(), mc_values[TSEG_REG]);
Mariusz Szafranskia4041332017-08-02 17:28:17 +0200247
248 /* TSEG -> TOLUD */
Arthur Heymans22c93352023-07-05 12:03:27 +0200249 reserved_ram_from_to(dev, index++, mc_values[TSEG_REG], mc_values[TOLUD_REG]);
Mariusz Szafranskia4041332017-08-02 17:28:17 +0200250
251 /* 4GiB -> TOUUD */
Kyösti Mälkki0a18d642021-06-28 21:43:31 +0300252 upper_ram_end(dev, index++, mc_values[TOUUD_REG]);
Mariusz Szafranskia4041332017-08-02 17:28:17 +0200253}
254
Elyes HAOUAS2ec41832018-05-27 17:40:58 +0200255static void systemagent_read_resources(struct device *dev)
Mariusz Szafranskia4041332017-08-02 17:28:17 +0200256{
257 /* Read standard PCI resources. */
258 pci_dev_read_resources(dev);
259
260 /* Add all fixed MMIO resources. */
261 mc_add_fixed_mmio_resources(dev);
262
263 /* Calculate and add DRAM resources. */
264 mc_add_dram_resources(dev);
265}
266
267static void systemagent_init(struct device *dev)
268{
269 struct stopwatch sw;
270 void *bios_reset_cpl =
271 (void *)(DEFAULT_MCHBAR + MCH_BAR_BIOS_RESET_CPL);
272 uint32_t reg = read32(bios_reset_cpl);
273
274 /* Stage0 BIOS Reset Complete (RST_CPL) */
275 reg |= RST_CPL_BIT;
276 write32(bios_reset_cpl, reg);
277
278 /*
279 * Poll for bit 8 in same reg (RST_CPL).
280 * We wait here till 1 ms for the bit to get set.
281 */
282 stopwatch_init_msecs_expire(&sw, _1ms);
283 while (!(read32(bios_reset_cpl) & PCODE_INIT_DONE)) {
284 if (stopwatch_expired(&sw)) {
285 printk(BIOS_DEBUG, "Failed to set RST_CPL bit\n");
286 return;
287 }
288 udelay(WAITING_STEP);
289 }
290 printk(BIOS_DEBUG, "Set BIOS_RESET_CPL\n");
291}
292
293static struct device_operations systemagent_ops = {
Elyes HAOUAS1d191272018-11-27 12:23:48 +0100294 .read_resources = systemagent_read_resources,
295 .set_resources = pci_dev_set_resources,
296 .enable_resources = pci_dev_enable_resources,
297 .init = systemagent_init,
Mariusz Szafranskia4041332017-08-02 17:28:17 +0200298 .ops_pci = &soc_pci_ops,
Julien Viard de Galberta0e50462018-04-05 11:59:07 +0200299#if CONFIG(HAVE_ACPI_TABLES)
300 .write_acpi_tables = systemagent_write_acpi_tables,
301#endif
Mariusz Szafranskia4041332017-08-02 17:28:17 +0200302};
303
304/* IDs for System Agent device of Intel Denverton SoC */
305static const unsigned short systemagent_ids[] = {
Felix Singer43b7f412022-03-07 04:34:52 +0100306 PCI_DID_INTEL_DNV_SA,
307 PCI_DID_INTEL_DNVAD_SA,
Mariusz Szafranskia4041332017-08-02 17:28:17 +0200308 0
309};
310
311static const struct pci_driver systemagent_driver __pci_driver = {
312 .ops = &systemagent_ops,
Felix Singer43b7f412022-03-07 04:34:52 +0100313 .vendor = PCI_VID_INTEL,
Mariusz Szafranskia4041332017-08-02 17:28:17 +0200314 .devices = systemagent_ids
315};