blob: 2019fae4cd1576f6bbcacc7c257552b0e01a8fec [file] [log] [blame]
Angel Pons4b429832020-04-02 23:48:50 +02001/* SPDX-License-Identifier: GPL-2.0-only */
Bruce Griffith27ed80b2014-08-15 11:46:25 -06002
Michał Żygowski2f399b72020-04-02 19:51:37 +02003#include <commonlib/helpers.h>
Bruce Griffith27ed80b2014-08-15 11:46:25 -06004#include <console/console.h>
Kyösti Mälkkif1b58b72019-03-01 13:43:02 +02005#include <device/pci_ops.h>
Furquan Shaikh76cedd22020-05-02 10:24:23 -07006#include <acpi/acpi.h>
7#include <acpi/acpi_ivrs.h>
Michał Żygowski208318c2020-03-20 15:54:27 +01008#include <arch/ioapic.h>
Elyes HAOUAS146d0c22020-07-22 11:47:08 +02009#include <types.h>
Bruce Griffith27ed80b2014-08-15 11:46:25 -060010#include <device/device.h>
11#include <device/pci.h>
12#include <device/pci_ids.h>
Bruce Griffith27ed80b2014-08-15 11:46:25 -060013#include <string.h>
Michał Żygowski2f399b72020-04-02 19:51:37 +020014#include <stdlib.h>
Bruce Griffith27ed80b2014-08-15 11:46:25 -060015#include <lib.h>
16#include <cpu/cpu.h>
Bruce Griffith27ed80b2014-08-15 11:46:25 -060017#include <Porting.h>
18#include <AGESA.h>
Bruce Griffith27ed80b2014-08-15 11:46:25 -060019#include <Topology.h>
Elyes HAOUAS400ce552018-10-12 10:54:30 +020020#include <cpu/x86/lapic.h>
21#include <cpu/amd/msr.h>
22#include <cpu/amd/mtrr.h>
Furquan Shaikh76cedd22020-05-02 10:24:23 -070023#include <acpi/acpigen.h>
Angel Ponsec5cf152020-11-10 20:42:07 +010024#include <northbridge/amd/nb_common.h>
Kyösti Mälkkied8d2772017-07-15 17:12:44 +030025#include <northbridge/amd/agesa/agesa_helper.h>
Michał Żygowski2f399b72020-04-02 19:51:37 +020026#include <southbridge/amd/pi/hudson/pci_devs.h>
Bruce Griffith27ed80b2014-08-15 11:46:25 -060027
Kyösti Mälkki113f6702018-05-20 20:12:32 +030028#define MAX_NODE_NUMS MAX_NODES
Michał Żygowski6ca5b472019-09-10 15:10:22 +020029#define PCIE_CAP_AER BIT(5)
30#define PCIE_CAP_ACS BIT(6)
Bruce Griffith27ed80b2014-08-15 11:46:25 -060031
Bruce Griffith27ed80b2014-08-15 11:46:25 -060032typedef struct dram_base_mask {
33 u32 base; //[47:27] at [28:8]
34 u32 mask; //[47:27] at [28:8] and enable at bit 0
35} dram_base_mask_t;
36
Subrata Banikb1434fc2019-03-15 22:20:41 +053037static unsigned int node_nums;
38static unsigned int sblink;
Kyösti Mälkki90ac7362018-05-20 20:59:52 +030039static struct device *__f0_dev[MAX_NODE_NUMS];
40static struct device *__f1_dev[MAX_NODE_NUMS];
41static struct device *__f2_dev[MAX_NODE_NUMS];
42static struct device *__f4_dev[MAX_NODE_NUMS];
Subrata Banikb1434fc2019-03-15 22:20:41 +053043static unsigned int fx_devs = 0;
Bruce Griffith27ed80b2014-08-15 11:46:25 -060044
45static dram_base_mask_t get_dram_base_mask(u32 nodeid)
46{
Kyösti Mälkki90ac7362018-05-20 20:59:52 +030047 struct device *dev;
Bruce Griffith27ed80b2014-08-15 11:46:25 -060048 dram_base_mask_t d;
49 dev = __f1_dev[0];
50 u32 temp;
51 temp = pci_read_config32(dev, 0x44 + (nodeid << 3)); //[39:24] at [31:16]
52 d.mask = ((temp & 0xfff80000)>>(8+3)); // mask out DramMask [26:24] too
53 temp = pci_read_config32(dev, 0x144 + (nodeid <<3)) & 0xff; //[47:40] at [7:0]
54 d.mask |= temp<<21;
55 temp = pci_read_config32(dev, 0x40 + (nodeid << 3)); //[39:24] at [31:16]
56 d.mask |= (temp & 1); // enable bit
57 d.base = ((temp & 0xfff80000)>>(8+3)); // mask out DramBase [26:24) too
58 temp = pci_read_config32(dev, 0x140 + (nodeid <<3)) & 0xff; //[47:40] at [7:0]
59 d.base |= temp<<21;
60 return d;
61}
62
Kyösti Mälkki90ac7362018-05-20 20:59:52 +030063static void set_io_addr_reg(struct device *dev, u32 nodeid, u32 linkn, u32 reg,
Bruce Griffith27ed80b2014-08-15 11:46:25 -060064 u32 io_min, u32 io_max)
65{
66 u32 i;
67 u32 tempreg;
68 /* io range allocation */
69 tempreg = (nodeid&0xf) | ((nodeid & 0x30)<<(8-4)) | (linkn<<4) | ((io_max&0xf0)<<(12-4)); //limit
Elyes HAOUASa8131602016-09-19 10:27:57 -060070 for (i = 0; i < node_nums; i++)
Bruce Griffith27ed80b2014-08-15 11:46:25 -060071 pci_write_config32(__f1_dev[i], reg+4, tempreg);
Elyes HAOUASa8131602016-09-19 10:27:57 -060072 tempreg = 3 /*| (3<<4)*/ | ((io_min&0xf0)<<(12-4)); //base :ISA and VGA ?
Elyes HAOUASa8131602016-09-19 10:27:57 -060073 for (i = 0; i < node_nums; i++)
Bruce Griffith27ed80b2014-08-15 11:46:25 -060074 pci_write_config32(__f1_dev[i], reg, tempreg);
75}
76
77static void set_mmio_addr_reg(u32 nodeid, u32 linkn, u32 reg, u32 index, u32 mmio_min, u32 mmio_max, u32 nodes)
78{
79 u32 i;
80 u32 tempreg;
81 /* io range allocation */
82 tempreg = (nodeid&0xf) | (linkn<<4) | (mmio_max&0xffffff00); //limit
Elyes HAOUASa8131602016-09-19 10:27:57 -060083 for (i = 0; i < nodes; i++)
Bruce Griffith27ed80b2014-08-15 11:46:25 -060084 pci_write_config32(__f1_dev[i], reg+4, tempreg);
85 tempreg = 3 | (nodeid & 0x30) | (mmio_min&0xffffff00);
Elyes HAOUASa8131602016-09-19 10:27:57 -060086 for (i = 0; i < node_nums; i++)
Bruce Griffith27ed80b2014-08-15 11:46:25 -060087 pci_write_config32(__f1_dev[i], reg, tempreg);
88}
89
Kyösti Mälkki90ac7362018-05-20 20:59:52 +030090static struct device *get_node_pci(u32 nodeid, u32 fn)
Bruce Griffith27ed80b2014-08-15 11:46:25 -060091{
Kyösti Mälkkibbd23772019-01-10 05:41:23 +020092 return pcidev_on_root(DEV_CDB + nodeid, fn);
Bruce Griffith27ed80b2014-08-15 11:46:25 -060093}
94
95static void get_fx_devs(void)
96{
97 int i;
98 for (i = 0; i < MAX_NODE_NUMS; i++) {
99 __f0_dev[i] = get_node_pci(i, 0);
100 __f1_dev[i] = get_node_pci(i, 1);
101 __f2_dev[i] = get_node_pci(i, 2);
102 __f4_dev[i] = get_node_pci(i, 4);
103 if (__f0_dev[i] != NULL && __f1_dev[i] != NULL)
104 fx_devs = i+1;
105 }
106 if (__f1_dev[0] == NULL || __f0_dev[0] == NULL || fx_devs == 0) {
107 die("Cannot find 0:0x18.[0|1]\n");
108 }
Elyes HAOUASa8131602016-09-19 10:27:57 -0600109 printk(BIOS_DEBUG, "fx_devs = 0x%x\n", fx_devs);
Bruce Griffith27ed80b2014-08-15 11:46:25 -0600110}
111
Subrata Banikb1434fc2019-03-15 22:20:41 +0530112static u32 f1_read_config32(unsigned int reg)
Bruce Griffith27ed80b2014-08-15 11:46:25 -0600113{
114 if (fx_devs == 0)
115 get_fx_devs();
116 return pci_read_config32(__f1_dev[0], reg);
117}
118
Subrata Banikb1434fc2019-03-15 22:20:41 +0530119static void f1_write_config32(unsigned int reg, u32 value)
Bruce Griffith27ed80b2014-08-15 11:46:25 -0600120{
121 int i;
122 if (fx_devs == 0)
123 get_fx_devs();
Elyes HAOUAS5a7e72f2016-08-23 21:36:02 +0200124 for (i = 0; i < fx_devs; i++) {
Kyösti Mälkki90ac7362018-05-20 20:59:52 +0300125 struct device *dev;
Bruce Griffith27ed80b2014-08-15 11:46:25 -0600126 dev = __f1_dev[i];
127 if (dev && dev->enabled) {
128 pci_write_config32(dev, reg, value);
129 }
130 }
131}
132
Kyösti Mälkki90ac7362018-05-20 20:59:52 +0300133static u32 amdfam16_nodeid(struct device *dev)
Bruce Griffith27ed80b2014-08-15 11:46:25 -0600134{
Kyösti Mälkkibbd23772019-01-10 05:41:23 +0200135 return (dev->path.pci.devfn >> 3) - DEV_CDB;
Bruce Griffith27ed80b2014-08-15 11:46:25 -0600136}
137
138static void set_vga_enable_reg(u32 nodeid, u32 linkn)
139{
140 u32 val;
141
142 val = 1 | (nodeid<<4) | (linkn<<12);
143 /* it will routing
144 * (1)mmio 0xa0000:0xbffff
145 * (2)io 0x3b0:0x3bb, 0x3c0:0x3df
146 */
147 f1_write_config32(0xf4, val);
148
149}
150
151/**
152 * @return
Elyes HAOUAS99b075a2019-12-30 14:29:31 +0100153 * @retval 2 resource does not exist, usable
Bruce Griffith27ed80b2014-08-15 11:46:25 -0600154 * @retval 0 resource exists, not usable
155 * @retval 1 resource exist, resource has been allocated before
156 */
Subrata Banikb1434fc2019-03-15 22:20:41 +0530157static int reg_useable(unsigned int reg, struct device *goal_dev,
158 unsigned int goal_nodeid, unsigned int goal_link)
Bruce Griffith27ed80b2014-08-15 11:46:25 -0600159{
160 struct resource *res;
Subrata Banikb1434fc2019-03-15 22:20:41 +0530161 unsigned int nodeid, link = 0;
Bruce Griffith27ed80b2014-08-15 11:46:25 -0600162 int result;
163 res = 0;
164 for (nodeid = 0; !res && (nodeid < fx_devs); nodeid++) {
Kyösti Mälkki90ac7362018-05-20 20:59:52 +0300165 struct device *dev;
Bruce Griffith27ed80b2014-08-15 11:46:25 -0600166 dev = __f0_dev[nodeid];
167 if (!dev)
168 continue;
169 for (link = 0; !res && (link < 8); link++) {
170 res = probe_resource(dev, IOINDEX(0x1000 + reg, link));
171 }
172 }
173 result = 2;
174 if (res) {
175 result = 0;
176 if ((goal_link == (link - 1)) &&
177 (goal_nodeid == (nodeid - 1)) &&
178 (res->flags <= 1)) {
179 result = 1;
180 }
181 }
182 return result;
183}
184
Subrata Banikb1434fc2019-03-15 22:20:41 +0530185static struct resource *amdfam16_find_iopair(struct device *dev,
186 unsigned int nodeid, unsigned int link)
Bruce Griffith27ed80b2014-08-15 11:46:25 -0600187{
188 struct resource *resource;
189 u32 free_reg, reg;
190 resource = 0;
191 free_reg = 0;
192 for (reg = 0xc0; reg <= 0xd8; reg += 0x8) {
193 int result;
194 result = reg_useable(reg, dev, nodeid, link);
195 if (result == 1) {
196 /* I have been allocated this one */
197 break;
198 }
199 else if (result > 1) {
200 /* I have a free register pair */
201 free_reg = reg;
202 }
203 }
204 if (reg > 0xd8) {
205 reg = free_reg; // if no free, the free_reg still be 0
206 }
207
208 resource = new_resource(dev, IOINDEX(0x1000 + reg, link));
209
210 return resource;
211}
212
Kyösti Mälkki90ac7362018-05-20 20:59:52 +0300213static struct resource *amdfam16_find_mempair(struct device *dev, u32 nodeid, u32 link)
Bruce Griffith27ed80b2014-08-15 11:46:25 -0600214{
215 struct resource *resource;
216 u32 free_reg, reg;
217 resource = 0;
218 free_reg = 0;
219 for (reg = 0x80; reg <= 0xb8; reg += 0x8) {
220 int result;
221 result = reg_useable(reg, dev, nodeid, link);
222 if (result == 1) {
223 /* I have been allocated this one */
224 break;
225 }
226 else if (result > 1) {
227 /* I have a free register pair */
228 free_reg = reg;
229 }
230 }
231 if (reg > 0xb8) {
232 reg = free_reg;
233 }
234
235 resource = new_resource(dev, IOINDEX(0x1000 + reg, link));
236 return resource;
237}
238
Kyösti Mälkki90ac7362018-05-20 20:59:52 +0300239static void amdfam16_link_read_bases(struct device *dev, u32 nodeid, u32 link)
Bruce Griffith27ed80b2014-08-15 11:46:25 -0600240{
241 struct resource *resource;
242
243 /* Initialize the io space constraints on the current bus */
244 resource = amdfam16_find_iopair(dev, nodeid, link);
245 if (resource) {
246 u32 align;
247 align = log2(HT_IO_HOST_ALIGN);
248 resource->base = 0;
249 resource->size = 0;
250 resource->align = align;
251 resource->gran = align;
252 resource->limit = 0xffffUL;
253 resource->flags = IORESOURCE_IO | IORESOURCE_BRIDGE;
254 }
255
256 /* Initialize the prefetchable memory constraints on the current bus */
257 resource = amdfam16_find_mempair(dev, nodeid, link);
258 if (resource) {
259 resource->base = 0;
260 resource->size = 0;
261 resource->align = log2(HT_MEM_HOST_ALIGN);
262 resource->gran = log2(HT_MEM_HOST_ALIGN);
263 resource->limit = 0xffffffffffULL;
264 resource->flags = IORESOURCE_MEM | IORESOURCE_PREFETCH;
265 resource->flags |= IORESOURCE_BRIDGE;
266 }
267
268 /* Initialize the memory constraints on the current bus */
269 resource = amdfam16_find_mempair(dev, nodeid, link);
270 if (resource) {
271 resource->base = 0;
272 resource->size = 0;
273 resource->align = log2(HT_MEM_HOST_ALIGN);
274 resource->gran = log2(HT_MEM_HOST_ALIGN);
275 resource->limit = 0xffffffffffULL;
276 resource->flags = IORESOURCE_MEM | IORESOURCE_BRIDGE;
277 }
278
279}
280
Kyösti Mälkki90ac7362018-05-20 20:59:52 +0300281static void read_resources(struct device *dev)
Bruce Griffith27ed80b2014-08-15 11:46:25 -0600282{
283 u32 nodeid;
284 struct bus *link;
Michał Żygowski208318c2020-03-20 15:54:27 +0100285 struct resource *res;
Bruce Griffith27ed80b2014-08-15 11:46:25 -0600286
287 nodeid = amdfam16_nodeid(dev);
288 for (link = dev->link_list; link; link = link->next) {
289 if (link->children) {
290 amdfam16_link_read_bases(dev, nodeid, link->link_num);
291 }
292 }
Kyösti Mälkki5d490382015-05-27 07:58:22 +0300293
294 /*
295 * This MMCONF resource must be reserved in the PCI domain.
296 * It is not honored by the coreboot resource allocator if it is in
297 * the CPU_CLUSTER.
298 */
Elyes HAOUAS400ce552018-10-12 10:54:30 +0200299 mmconf_resource(dev, MMIO_CONF_BASE);
Michał Żygowski208318c2020-03-20 15:54:27 +0100300
301 /* NB IOAPIC2 resource */
302 res = new_resource(dev, IO_APIC2_ADDR); /* IOAPIC2 */
303 res->base = IO_APIC2_ADDR;
304 res->size = 0x00001000;
305 res->flags = IORESOURCE_MEM | IORESOURCE_ASSIGNED | IORESOURCE_FIXED;
Bruce Griffith27ed80b2014-08-15 11:46:25 -0600306}
307
Kyösti Mälkki90ac7362018-05-20 20:59:52 +0300308static void set_resource(struct device *dev, struct resource *resource, u32 nodeid)
Bruce Griffith27ed80b2014-08-15 11:46:25 -0600309{
310 resource_t rbase, rend;
Subrata Banikb1434fc2019-03-15 22:20:41 +0530311 unsigned int reg, link_num;
Bruce Griffith27ed80b2014-08-15 11:46:25 -0600312 char buf[50];
313
314 /* Make certain the resource has actually been set */
315 if (!(resource->flags & IORESOURCE_ASSIGNED)) {
316 return;
317 }
318
319 /* If I have already stored this resource don't worry about it */
320 if (resource->flags & IORESOURCE_STORED) {
321 return;
322 }
323
324 /* Only handle PCI memory and IO resources */
325 if (!(resource->flags & (IORESOURCE_MEM | IORESOURCE_IO)))
326 return;
327
328 /* Ensure I am actually looking at a resource of function 1 */
329 if ((resource->index & 0xffff) < 0x1000) {
330 return;
331 }
332 /* Get the base address */
333 rbase = resource->base;
334
335 /* Get the limit (rounded up) */
336 rend = resource_end(resource);
337
338 /* Get the register and link */
339 reg = resource->index & 0xfff; // 4k
340 link_num = IOINDEX_LINK(resource->index);
341
342 if (resource->flags & IORESOURCE_IO) {
343 set_io_addr_reg(dev, nodeid, link_num, reg, rbase>>8, rend>>8);
344 }
345 else if (resource->flags & IORESOURCE_MEM) {
Elyes HAOUAS7db506c2016-10-02 11:56:39 +0200346 set_mmio_addr_reg(nodeid, link_num, reg, (resource->index >>24), rbase>>8, rend>>8, node_nums); // [39:8]
Bruce Griffith27ed80b2014-08-15 11:46:25 -0600347 }
348 resource->flags |= IORESOURCE_STORED;
Elyes HAOUAS0d4b11a2016-10-03 21:57:21 +0200349 snprintf(buf, sizeof(buf), " <node %x link %x>",
Bruce Griffith27ed80b2014-08-15 11:46:25 -0600350 nodeid, link_num);
351 report_resource_stored(dev, resource, buf);
352}
353
354/**
355 * I tried to reuse the resource allocation code in set_resource()
356 * but it is too difficult to deal with the resource allocation magic.
357 */
358
Subrata Banikb1434fc2019-03-15 22:20:41 +0530359static void create_vga_resource(struct device *dev, unsigned int nodeid)
Bruce Griffith27ed80b2014-08-15 11:46:25 -0600360{
361 struct bus *link;
362
363 /* find out which link the VGA card is connected,
364 * we only deal with the 'first' vga card */
365 for (link = dev->link_list; link; link = link->next) {
366 if (link->bridge_ctrl & PCI_BRIDGE_CTL_VGA) {
Julius Wernercd49cce2019-03-05 16:53:33 -0800367#if CONFIG(MULTIPLE_VGA_ADAPTERS)
Kyösti Mälkki90ac7362018-05-20 20:59:52 +0300368 extern struct device *vga_pri; // the primary vga device, defined in device.c
Bruce Griffith27ed80b2014-08-15 11:46:25 -0600369 printk(BIOS_DEBUG, "VGA: vga_pri bus num = %d bus range [%d,%d]\n", vga_pri->bus->secondary,
370 link->secondary,link->subordinate);
371 /* We need to make sure the vga_pri is under the link */
Elyes HAOUASa8131602016-09-19 10:27:57 -0600372 if ((vga_pri->bus->secondary >= link->secondary) &&
373 (vga_pri->bus->secondary <= link->subordinate))
Bruce Griffith27ed80b2014-08-15 11:46:25 -0600374#endif
375 break;
376 }
377 }
378
379 /* no VGA card installed */
380 if (link == NULL)
381 return;
382
383 printk(BIOS_DEBUG, "VGA: %s (aka node %d) link %d has VGA device\n", dev_path(dev), nodeid, sblink);
384 set_vga_enable_reg(nodeid, sblink);
385}
386
Kyösti Mälkki90ac7362018-05-20 20:59:52 +0300387static void set_resources(struct device *dev)
Bruce Griffith27ed80b2014-08-15 11:46:25 -0600388{
Subrata Banikb1434fc2019-03-15 22:20:41 +0530389 unsigned int nodeid;
Bruce Griffith27ed80b2014-08-15 11:46:25 -0600390 struct bus *bus;
391 struct resource *res;
392
393 /* Find the nodeid */
394 nodeid = amdfam16_nodeid(dev);
395
396 create_vga_resource(dev, nodeid); //TODO: do we need this?
397
398 /* Set each resource we have found */
399 for (res = dev->resource_list; res; res = res->next) {
400 set_resource(dev, res, nodeid);
401 }
402
403 for (bus = dev->link_list; bus; bus = bus->next) {
404 if (bus->children) {
405 assign_resources(bus);
406 }
407 }
408}
409
410static void northbridge_init(struct device *dev)
411{
Michał Żygowski208318c2020-03-20 15:54:27 +0100412 setup_ioapic((u8 *)IO_APIC2_ADDR, CONFIG_MAX_CPUS+1);
Bruce Griffith27ed80b2014-08-15 11:46:25 -0600413}
Kyösti Mälkki0b5b5412014-11-26 08:11:07 +0200414
Vladimir Serbinenko807127f2014-11-09 13:36:18 +0100415static unsigned long acpi_fill_hest(acpi_hest_t *hest)
Kyösti Mälkki0b5b5412014-11-26 08:11:07 +0200416{
417 void *addr, *current;
418
419 /* Skip the HEST header. */
420 current = (void *)(hest + 1);
421
422 addr = agesawrapper_getlateinitptr(PICK_WHEA_MCE);
423 if (addr != NULL)
424 current += acpi_create_hest_error_source(hest, current, 0, (void *)((u32)addr + 2), *(UINT16 *)addr - 2);
425
426 addr = agesawrapper_getlateinitptr(PICK_WHEA_CMC);
427 if (addr != NULL)
428 current += acpi_create_hest_error_source(hest, current, 1, (void *)((u32)addr + 2), *(UINT16 *)addr - 2);
429
430 return (unsigned long)current;
431}
432
Michał Żygowski2f399b72020-04-02 19:51:37 +0200433unsigned long acpi_fill_ivrs_ioapic(acpi_ivrs_t *ivrs, unsigned long current)
Timothy Pearson9ef07d82016-06-13 13:48:58 -0500434{
Michał Żygowski2f399b72020-04-02 19:51:37 +0200435 /* 8-byte IVHD structures must be aligned to the 8-byte boundary. */
436 current = ALIGN_UP(current, 8);
437 ivrs_ivhd_special_t *ivhd_ioapic = (ivrs_ivhd_special_t *)current;
Timothy Pearson9ef07d82016-06-13 13:48:58 -0500438
Michał Żygowski2f399b72020-04-02 19:51:37 +0200439 ivhd_ioapic->type = IVHD_DEV_8_BYTE_EXT_SPECIAL_DEV;
440 ivhd_ioapic->reserved = 0x0000;
441 ivhd_ioapic->dte_setting = IVHD_DTE_LINT_1_PASS | IVHD_DTE_LINT_0_PASS |
442 IVHD_DTE_SYS_MGT_NO_TRANS | IVHD_DTE_NMI_PASS |
443 IVHD_DTE_EXT_INT_PASS | IVHD_DTE_INIT_PASS;
444 ivhd_ioapic->handle = CONFIG_MAX_CPUS; /* FCH IOAPIC ID */
445 ivhd_ioapic->source_dev_id = PCI_DEVFN(SMBUS_DEV, SMBUS_FUNC);
446 ivhd_ioapic->variety = IVHD_SPECIAL_DEV_IOAPIC;
447 current += sizeof(ivrs_ivhd_special_t);
448
449 ivhd_ioapic = (ivrs_ivhd_special_t *)current;
450
451 ivhd_ioapic->type = IVHD_DEV_8_BYTE_EXT_SPECIAL_DEV;
452 ivhd_ioapic->reserved = 0x0000;
453 ivhd_ioapic->dte_setting = 0x00;
454 ivhd_ioapic->handle = CONFIG_MAX_CPUS + 1; /* GNB IOAPIC ID */
455 ivhd_ioapic->source_dev_id = PCI_DEVFN(0, 1);
456 ivhd_ioapic->variety = IVHD_SPECIAL_DEV_IOAPIC;
457 current += sizeof(ivrs_ivhd_special_t);
458
459 return current;
460}
461
462static unsigned long ivhd_describe_hpet(unsigned long current)
463{
464 /* 8-byte IVHD structures must be aligned to the 8-byte boundary. */
465 current = ALIGN_UP(current, 8);
466 ivrs_ivhd_special_t *ivhd_hpet = (ivrs_ivhd_special_t *)current;
467
468 ivhd_hpet->type = IVHD_DEV_8_BYTE_EXT_SPECIAL_DEV;
469 ivhd_hpet->reserved = 0x0000;
470 ivhd_hpet->dte_setting = 0x00;
471 ivhd_hpet->handle = 0x00;
472 ivhd_hpet->source_dev_id = PCI_DEVFN(SMBUS_DEV, SMBUS_FUNC);
473 ivhd_hpet->variety = IVHD_SPECIAL_DEV_HPET;
474 current += sizeof(ivrs_ivhd_special_t);
475
476 return current;
477}
478
479static unsigned long ivhd_dev_range(unsigned long current, uint16_t start_devid,
480 uint16_t end_devid, uint8_t setting)
481{
482 /* 4-byte IVHD structures must be aligned to the 4-byte boundary. */
483 current = ALIGN_UP(current, 4);
484 ivrs_ivhd_generic_t *ivhd_range = (ivrs_ivhd_generic_t *)current;
485
486 /* Create the start range IVHD entry */
487 ivhd_range->type = IVHD_DEV_4_BYTE_START_RANGE;
488 ivhd_range->dev_id = start_devid;
489 ivhd_range->dte_setting = setting;
490 current += sizeof(ivrs_ivhd_generic_t);
491
492 /* Create the end range IVHD entry */
493 ivhd_range = (ivrs_ivhd_generic_t *)current;
494 ivhd_range->type = IVHD_DEV_4_BYTE_END_RANGE;
495 ivhd_range->dev_id = end_devid;
496 ivhd_range->dte_setting = setting;
497 current += sizeof(ivrs_ivhd_generic_t);
498
499 return current;
500}
501
502static unsigned long add_ivhd_dev_entry(struct device *parent, struct device *dev,
503 unsigned long *current, uint8_t type, uint8_t data)
504{
505 if (type == IVHD_DEV_4_BYTE_SELECT) {
506 /* 4-byte IVHD structures must be aligned to the 4-byte boundary. */
507 *current = ALIGN_UP(*current, 4);
508 ivrs_ivhd_generic_t *ivhd_entry = (ivrs_ivhd_generic_t *)*current;
509
510 ivhd_entry->type = type;
511 ivhd_entry->dev_id = dev->path.pci.devfn | (dev->bus->secondary << 8);
512 ivhd_entry->dte_setting = data;
513 *current += sizeof(ivrs_ivhd_generic_t);
514 } else if (type == IVHD_DEV_8_BYTE_ALIAS_SELECT) {
515 /* 8-byte IVHD structures must be aligned to the 8-byte boundary. */
516 *current = ALIGN_UP(*current, 8);
517 ivrs_ivhd_alias_t *ivhd_entry = (ivrs_ivhd_alias_t *)*current;
518
519 ivhd_entry->type = type;
520 ivhd_entry->dev_id = dev->path.pci.devfn | (dev->bus->secondary << 8);
521 ivhd_entry->dte_setting = data;
522 ivhd_entry->reserved1 = 0;
523 ivhd_entry->reserved2 = 0;
524 ivhd_entry->source_dev_id = parent->path.pci.devfn |
525 (parent->bus->secondary << 8);
526 *current += sizeof(ivrs_ivhd_alias_t);
527 }
528
529 return *current;
530}
531
532static void ivrs_add_device_or_bridge(struct device *parent, struct device *dev,
533 unsigned long *current, uint16_t *ivhd_length)
534{
535 unsigned int header_type, is_pcie;
536 unsigned long current_backup;
537
538 header_type = dev->hdr_type & 0x7f;
539 is_pcie = pci_find_capability(dev, PCI_CAP_ID_PCIE);
540
541 if (((header_type == PCI_HEADER_TYPE_NORMAL) ||
542 (header_type == PCI_HEADER_TYPE_BRIDGE)) && is_pcie) {
543 /* Device or Bridge is PCIe */
544 current_backup = *current;
545 add_ivhd_dev_entry(parent, dev, current, IVHD_DEV_4_BYTE_SELECT, 0x0);
546 *ivhd_length += (*current - current_backup);
547 } else if ((header_type == PCI_HEADER_TYPE_NORMAL) && !is_pcie) {
548 /* Device is legacy PCI or PCI-X */
549 current_backup = *current;
550 add_ivhd_dev_entry(parent, dev, current, IVHD_DEV_8_BYTE_ALIAS_SELECT, 0x0);
551 *ivhd_length += (*current - current_backup);
Timothy Pearson9ef07d82016-06-13 13:48:58 -0500552 }
553}
554
Michał Żygowski2f399b72020-04-02 19:51:37 +0200555static void add_ivhd_device_entries(struct device *parent, struct device *dev,
Timothy Pearson9ef07d82016-06-13 13:48:58 -0500556 unsigned int depth, int linknum, int8_t *root_level,
Michał Żygowski2f399b72020-04-02 19:51:37 +0200557 unsigned long *current, uint16_t *ivhd_length)
Timothy Pearson9ef07d82016-06-13 13:48:58 -0500558{
559 struct device *sibling;
560 struct bus *link;
Michał Żygowski2f399b72020-04-02 19:51:37 +0200561
562 if (!root_level) {
563 root_level = malloc(sizeof(int8_t));
564 *root_level = -1;
565 }
Timothy Pearson9ef07d82016-06-13 13:48:58 -0500566
Timothy Pearson9ef07d82016-06-13 13:48:58 -0500567 if (dev->path.type == DEVICE_PATH_PCI) {
568
569 if ((dev->bus->secondary == 0x0) &&
570 (dev->path.pci.devfn == 0x0))
571 *root_level = depth;
572
573 if ((*root_level != -1) && (dev->enabled)) {
Michał Żygowski2f399b72020-04-02 19:51:37 +0200574 if (depth != *root_level)
575 ivrs_add_device_or_bridge(parent, dev, current, ivhd_length);
Timothy Pearson9ef07d82016-06-13 13:48:58 -0500576 }
577 }
578
579 for (link = dev->link_list; link; link = link->next)
580 for (sibling = link->children; sibling; sibling =
581 sibling->sibling)
Michał Żygowski2f399b72020-04-02 19:51:37 +0200582 add_ivhd_device_entries(dev, sibling, depth + 1, depth, root_level,
583 current, ivhd_length);
584
585 free(root_level);
Timothy Pearson9ef07d82016-06-13 13:48:58 -0500586}
587
Michał Żygowski2f399b72020-04-02 19:51:37 +0200588#define IOMMU_MMIO32(x) (*((volatile uint32_t *)(x)))
589#define EFR_SUPPORT BIT(27)
590
591static unsigned long acpi_fill_ivrs11(unsigned long current, acpi_ivrs_t *ivrs_agesa)
Timothy Pearson9ef07d82016-06-13 13:48:58 -0500592{
Michał Żygowski2f399b72020-04-02 19:51:37 +0200593 acpi_ivrs_ivhd11_t *ivhd_11;
594 unsigned long current_backup;
Timothy Pearson9ef07d82016-06-13 13:48:58 -0500595
Michał Żygowski2f399b72020-04-02 19:51:37 +0200596 /*
597 * These devices should be already found by previous function.
598 * Do not perform NULL checks.
599 */
600 struct device *nb_dev = pcidev_on_root(0, 0);
601 struct device *iommu_dev = pcidev_on_root(0, 2);
Timothy Pearson9ef07d82016-06-13 13:48:58 -0500602
Michał Żygowski2f399b72020-04-02 19:51:37 +0200603 /*
604 * In order to utilize all features, firmware should expose type 11h
605 * IVHD which supersedes the type 10h.
606 */
607 memset((void *)current, 0, sizeof(acpi_ivrs_ivhd11_t));
608 ivhd_11 = (acpi_ivrs_ivhd11_t *)current;
Timothy Pearson9ef07d82016-06-13 13:48:58 -0500609
Michał Żygowski2f399b72020-04-02 19:51:37 +0200610 /* Enable EFR */
611 ivhd_11->type = IVHD_BLOCK_TYPE_FULL__FIXED;
612 /* For type 11h bits 6 and 7 are reserved */
613 ivhd_11->flags = ivrs_agesa->ivhd.flags & 0x3f;
614 ivhd_11->length = sizeof(struct acpi_ivrs_ivhd_11);
615 /* BDF <bus>:00.2 */
616 ivhd_11->device_id = 0x02 | (nb_dev->bus->secondary << 8);
617 /* PCI Capability block 0x40 (type 0xf, "Secure device") */
618 ivhd_11->capability_offset = 0x40;
619 ivhd_11->iommu_base_low = ivrs_agesa->ivhd.iommu_base_low;
620 ivhd_11->iommu_base_high = ivrs_agesa->ivhd.iommu_base_high;
621 ivhd_11->pci_segment_group = 0x0000;
622 ivhd_11->iommu_info = ivrs_agesa->ivhd.iommu_info;
623 ivhd_11->iommu_attributes.perf_counters =
624 (IOMMU_MMIO32(ivhd_11->iommu_base_low + 0x4000) >> 7) & 0xf;
625 ivhd_11->iommu_attributes.perf_counter_banks =
626 (IOMMU_MMIO32(ivhd_11->iommu_base_low + 0x4000) >> 12) & 0x3f;
627 ivhd_11->iommu_attributes.msi_num_ppr =
628 (pci_read_config32(iommu_dev, ivhd_11->capability_offset + 0x10) >> 27) & 0x1f;
Timothy Pearson9ef07d82016-06-13 13:48:58 -0500629
Michał Żygowski2f399b72020-04-02 19:51:37 +0200630 if (pci_read_config32(iommu_dev, ivhd_11->capability_offset) & EFR_SUPPORT) {
631 ivhd_11->efr_reg_image_low = IOMMU_MMIO32(ivhd_11->iommu_base_low + 0x30);
632 ivhd_11->efr_reg_image_high = IOMMU_MMIO32(ivhd_11->iommu_base_low + 0x34);
633 }
634
635 current += sizeof(acpi_ivrs_ivhd11_t);
636
637 /* Now repeat all the device entries from type 10h */
638 current_backup = current;
639 current = ivhd_dev_range(current, PCI_DEVFN(1, 0), PCI_DEVFN(0x1f, 6), 0);
640 ivhd_11->length += (current - current_backup);
641 add_ivhd_device_entries(NULL, all_devices, 0, -1, NULL, &current, &ivhd_11->length);
642
643 /* Describe HPET */
644 current_backup = current;
645 current = ivhd_describe_hpet(current);
646 ivhd_11->length += (current - current_backup);
647
648 /* Describe IOAPICs */
649 current_backup = current;
650 current = acpi_fill_ivrs_ioapic(ivrs_agesa, current);
651 ivhd_11->length += (current - current_backup);
Timothy Pearson9ef07d82016-06-13 13:48:58 -0500652
653 return current;
654}
655
656static unsigned long acpi_fill_ivrs(acpi_ivrs_t *ivrs, unsigned long current)
657{
Piotr Król063e1562018-07-22 20:52:26 +0200658 acpi_ivrs_t *ivrs_agesa;
Michał Żygowski2f399b72020-04-02 19:51:37 +0200659 unsigned long current_backup;
Timothy Pearson9ef07d82016-06-13 13:48:58 -0500660
Michał Żygowski2f399b72020-04-02 19:51:37 +0200661 struct device *nb_dev = pcidev_on_root(0, 0);
Timothy Pearson9ef07d82016-06-13 13:48:58 -0500662 if (!nb_dev) {
Timothy Pearson9ef07d82016-06-13 13:48:58 -0500663 printk(BIOS_WARNING, "%s: G-series northbridge device not present!\n", __func__);
664 printk(BIOS_WARNING, "%s: IVRS table not generated...\n", __func__);
665
666 return (unsigned long)ivrs;
667 }
668
Michał Żygowski2f399b72020-04-02 19:51:37 +0200669 struct device *iommu_dev = pcidev_on_root(0, 2);
Timothy Pearson9ef07d82016-06-13 13:48:58 -0500670
Michał Żygowski2f399b72020-04-02 19:51:37 +0200671 if (!iommu_dev) {
672 printk(BIOS_WARNING, "%s: IOMMU device not found\n", __func__);
673
674 return (unsigned long)ivrs;
675 }
676
Piotr Król063e1562018-07-22 20:52:26 +0200677 ivrs_agesa = agesawrapper_getlateinitptr(PICK_IVRS);
678 if (ivrs_agesa != NULL) {
Michał Żygowski2f399b72020-04-02 19:51:37 +0200679 ivrs->iv_info = ivrs_agesa->iv_info;
680 ivrs->ivhd.type = IVHD_BLOCK_TYPE_LEGACY__FIXED;
681 ivrs->ivhd.flags = ivrs_agesa->ivhd.flags;
Piotr Król063e1562018-07-22 20:52:26 +0200682 ivrs->ivhd.length = sizeof(struct acpi_ivrs_ivhd);
683 /* BDF <bus>:00.2 */
Michał Żygowski2f399b72020-04-02 19:51:37 +0200684 ivrs->ivhd.device_id = 0x02 | (nb_dev->bus->secondary << 8);
685 /* PCI Capability block 0x40 (type 0xf, "Secure device") */
Piotr Król063e1562018-07-22 20:52:26 +0200686 ivrs->ivhd.capability_offset = 0x40;
687 ivrs->ivhd.iommu_base_low = ivrs_agesa->ivhd.iommu_base_low;
688 ivrs->ivhd.iommu_base_high = ivrs_agesa->ivhd.iommu_base_high;
Michał Żygowski2f399b72020-04-02 19:51:37 +0200689 ivrs->ivhd.pci_segment_group = 0x0000;
690 ivrs->ivhd.iommu_info = ivrs_agesa->ivhd.iommu_info;
691 ivrs->ivhd.iommu_feature_info = ivrs_agesa->ivhd.iommu_feature_info;
692 /* Enable EFR if supported */
693 if (pci_read_config32(iommu_dev, ivrs->ivhd.capability_offset) & EFR_SUPPORT)
694 ivrs->iv_info |= IVINFO_EFR_SUPPORTED;
Piotr Król063e1562018-07-22 20:52:26 +0200695 } else {
696 printk(BIOS_WARNING, "%s: AGESA returned NULL IVRS\n", __func__);
697
698 return (unsigned long)ivrs;
699 }
Timothy Pearson9ef07d82016-06-13 13:48:58 -0500700
Michał Żygowski2f399b72020-04-02 19:51:37 +0200701 /*
702 * Add all possible PCI devices on bus 0 that can generate transactions
703 * processed by IOMMU. Start with device 00:01.0 since IOMMU does not
704 * translate transactions generated by itself.
705 */
706 current_backup = current;
707 current = ivhd_dev_range(current, PCI_DEVFN(1, 0), PCI_DEVFN(0x1f, 6), 0);
708 ivrs->ivhd.length += (current - current_backup);
709 add_ivhd_device_entries(NULL, all_devices, 0, -1, NULL, &current, &ivrs->ivhd.length);
Timothy Pearson9ef07d82016-06-13 13:48:58 -0500710
Michał Żygowski2f399b72020-04-02 19:51:37 +0200711 /* Describe HPET */
712 current_backup = current;
713 current = ivhd_describe_hpet(current);
714 ivrs->ivhd.length += (current - current_backup);
Timothy Pearson9ef07d82016-06-13 13:48:58 -0500715
716 /* Describe IOAPICs */
Michał Żygowski2f399b72020-04-02 19:51:37 +0200717 current_backup = current;
718 current = acpi_fill_ivrs_ioapic(ivrs_agesa, current);
719 ivrs->ivhd.length += (current - current_backup);
Timothy Pearson9ef07d82016-06-13 13:48:58 -0500720
Michał Żygowski2f399b72020-04-02 19:51:37 +0200721 /* If EFR is not supported, IVHD type 11h is reserved */
722 if (!(ivrs->iv_info & IVINFO_EFR_SUPPORTED))
723 return current;
724
725 return acpi_fill_ivrs11(current, ivrs_agesa);
Timothy Pearson9ef07d82016-06-13 13:48:58 -0500726}
727
Furquan Shaikh7536a392020-04-24 21:59:21 -0700728static void northbridge_fill_ssdt_generator(const struct device *device)
Kyösti Mälkki0b5b5412014-11-26 08:11:07 +0200729{
730 msr_t msr;
731 char pscope[] = "\\_SB.PCI0";
732
733 acpigen_write_scope(pscope);
734 msr = rdmsr(TOP_MEM);
735 acpigen_write_name_dword("TOM1", msr.lo);
736 msr = rdmsr(TOP_MEM2);
737 /*
738 * Since XP only implements parts of ACPI 2.0, we can't use a qword
739 * here.
740 * See http://www.acpi.info/presentations/S01USMOBS169_OS%2520new.ppt
741 * slide 22ff.
742 * Shift value right by 20 bit to make it fit into 32bit,
743 * giving us 1MB granularity and a limit of almost 4Exabyte of memory.
744 */
745 acpigen_write_name_dword("TOM2", (msr.hi << 12) | msr.lo >> 20);
746 acpigen_pop_len();
747}
748
Michał Żygowski9550e972020-03-20 13:56:46 +0100749static void patch_ssdt_processor_scope(acpi_header_t *ssdt)
750{
751 unsigned int len = ssdt->length - sizeof(acpi_header_t);
752 unsigned int i;
753
754 for (i = sizeof(acpi_header_t); i < len; i++) {
755 /* Search for _PR_ scope and replace it with _SB_ */
756 if (*(uint32_t *)((unsigned long)ssdt + i) == 0x5f52505f)
757 *(uint32_t *)((unsigned long)ssdt + i) = 0x5f42535f;
758 }
759 /* Recalculate checksum */
760 ssdt->checksum = 0;
761 ssdt->checksum = acpi_checksum((void *)ssdt, ssdt->length);
762}
763
Furquan Shaikh0f007d82020-04-24 06:41:18 -0700764static unsigned long agesa_write_acpi_tables(const struct device *device,
Alexander Couzens83fc32f2015-04-12 22:28:37 +0200765 unsigned long current,
Kyösti Mälkki0b5b5412014-11-26 08:11:07 +0200766 acpi_rsdp_t *rsdp)
767{
768 acpi_srat_t *srat;
769 acpi_slit_t *slit;
770 acpi_header_t *ssdt;
771 acpi_header_t *alib;
Timothy Pearson9ef07d82016-06-13 13:48:58 -0500772 acpi_ivrs_t *ivrs;
Kyösti Mälkki0b5b5412014-11-26 08:11:07 +0200773
774 /* HEST */
775 current = ALIGN(current, 8);
Vladimir Serbinenko807127f2014-11-09 13:36:18 +0100776 acpi_write_hest((void *)current, acpi_fill_hest);
Kyösti Mälkki0b5b5412014-11-26 08:11:07 +0200777 acpi_add_table(rsdp, (void *)current);
778 current += ((acpi_header_t *)current)->length;
779
Timothy Pearson9ef07d82016-06-13 13:48:58 -0500780 /* IVRS */
781 current = ALIGN(current, 8);
782 printk(BIOS_DEBUG, "ACPI: * IVRS at %lx\n", current);
783 ivrs = (acpi_ivrs_t *) current;
784 acpi_create_ivrs(ivrs, acpi_fill_ivrs);
785 current += ivrs->header.length;
786 acpi_add_table(rsdp, ivrs);
Kyösti Mälkki0b5b5412014-11-26 08:11:07 +0200787
788 /* SRAT */
789 current = ALIGN(current, 8);
790 printk(BIOS_DEBUG, "ACPI: * SRAT at %lx\n", current);
791 srat = (acpi_srat_t *) agesawrapper_getlateinitptr (PICK_SRAT);
792 if (srat != NULL) {
793 memcpy((void *)current, srat, srat->header.length);
794 srat = (acpi_srat_t *) current;
795 current += srat->header.length;
796 acpi_add_table(rsdp, srat);
797 } else {
798 printk(BIOS_DEBUG, " AGESA SRAT table NULL. Skipping.\n");
799 }
800
801 /* SLIT */
802 current = ALIGN(current, 8);
803 printk(BIOS_DEBUG, "ACPI: * SLIT at %lx\n", current);
804 slit = (acpi_slit_t *) agesawrapper_getlateinitptr (PICK_SLIT);
805 if (slit != NULL) {
806 memcpy((void *)current, slit, slit->header.length);
807 slit = (acpi_slit_t *) current;
808 current += slit->header.length;
809 acpi_add_table(rsdp, slit);
810 } else {
811 printk(BIOS_DEBUG, " AGESA SLIT table NULL. Skipping.\n");
812 }
813
814 /* ALIB */
815 current = ALIGN(current, 16);
816 printk(BIOS_DEBUG, "ACPI: * AGESA ALIB SSDT at %lx\n", current);
817 alib = (acpi_header_t *)agesawrapper_getlateinitptr (PICK_ALIB);
818 if (alib != NULL) {
819 memcpy((void *)current, alib, alib->length);
820 alib = (acpi_header_t *) current;
821 current += alib->length;
822 acpi_add_table(rsdp, (void *)alib);
823 }
824 else {
825 printk(BIOS_DEBUG, " AGESA ALIB SSDT table NULL. Skipping.\n");
826 }
827
828 /* this pstate ssdt may cause Blue Screen: Fixed: Keep this comment for a while. */
829 /* SSDT */
830 current = ALIGN(current, 16);
831 printk(BIOS_DEBUG, "ACPI: * SSDT at %lx\n", current);
832 ssdt = (acpi_header_t *)agesawrapper_getlateinitptr (PICK_PSTATE);
833 if (ssdt != NULL) {
Michał Żygowski9550e972020-03-20 13:56:46 +0100834 patch_ssdt_processor_scope(ssdt);
Kyösti Mälkki0b5b5412014-11-26 08:11:07 +0200835 memcpy((void *)current, ssdt, ssdt->length);
836 ssdt = (acpi_header_t *) current;
837 current += ssdt->length;
838 }
839 else {
840 printk(BIOS_DEBUG, " AGESA PState table NULL. Skipping.\n");
841 }
842 acpi_add_table(rsdp,ssdt);
843
844 printk(BIOS_DEBUG, "ACPI: * SSDT for PState at %lx\n", current);
845 return current;
846}
847
Bruce Griffith27ed80b2014-08-15 11:46:25 -0600848static struct device_operations northbridge_operations = {
849 .read_resources = read_resources,
850 .set_resources = set_resources,
851 .enable_resources = pci_dev_enable_resources,
852 .init = northbridge_init,
Nico Huber68680dd2020-03-31 17:34:52 +0200853 .acpi_fill_ssdt = northbridge_fill_ssdt_generator,
Kyösti Mälkki0b5b5412014-11-26 08:11:07 +0200854 .write_acpi_tables = agesa_write_acpi_tables,
Bruce Griffith27ed80b2014-08-15 11:46:25 -0600855};
856
857static const struct pci_driver family16_northbridge __pci_driver = {
858 .ops = &northbridge_operations,
859 .vendor = PCI_VENDOR_ID_AMD,
Marshall Dawson463f46e2016-10-14 20:46:08 -0600860 .device = PCI_DEVICE_ID_AMD_16H_MODEL_303F_NB_HT,
Bruce Griffith27ed80b2014-08-15 11:46:25 -0600861};
862
863static const struct pci_driver family10_northbridge __pci_driver = {
864 .ops = &northbridge_operations,
865 .vendor = PCI_VENDOR_ID_AMD,
866 .device = PCI_DEVICE_ID_AMD_10H_NB_HT,
867};
868
Dave Frodin891f71a2015-01-19 15:58:24 -0700869static void fam16_finalize(void *chip_info)
870{
Kyösti Mälkki90ac7362018-05-20 20:59:52 +0300871 struct device *dev;
Dave Frodin891f71a2015-01-19 15:58:24 -0700872 u32 value;
Kyösti Mälkki33ff44c2018-05-22 01:15:22 +0300873 dev = pcidev_on_root(0, 0); /* clear IoapicSbFeatureEn */
Dave Frodin891f71a2015-01-19 15:58:24 -0700874 pci_write_config32(dev, 0xF8, 0);
875 pci_write_config32(dev, 0xFC, 5); /* TODO: move it to dsdt.asl */
876
Michał Żygowski6ca5b472019-09-10 15:10:22 +0200877 /*
878 * Currently it is impossible to enable ACS with AGESA by setting the
879 * correct bit for AmdInitMid phase. AGESA code path does not call the
880 * right function that enables these functionalities. Disabled ACS
881 * result in multiple PCIe devices to be assigned to the same IOMMU
882 * group. Without IOMMU group separation the devices cannot be passed
883 * through independently.
884 */
885
886 /* Select GPP link core IO Link Strap Control register 0xB0 */
887 pci_write_config32(dev, 0xE0, 0x014000B0);
888 value = pci_read_config32(dev, 0xE4);
889
890 /* Enable AER (bit 5) and ACS (bit 6 undocumented) */
891 value |= PCIE_CAP_AER | PCIE_CAP_ACS;
892 pci_write_config32(dev, 0xE4, value);
893
894 /* Select GPP link core Wrapper register 0x00 (undocumented) */
895 pci_write_config32(dev, 0xE0, 0x01300000);
896 value = pci_read_config32(dev, 0xE4);
897
898 /*
899 * Enable ACS capabilities straps including sub-items. From lspci it
900 * looks like these bits enable: Source Validation and Translation
901 * Blocking
902 */
903 value |= (BIT(24) | BIT(25) | BIT(26));
904 pci_write_config32(dev, 0xE4, value);
905
Dave Frodin891f71a2015-01-19 15:58:24 -0700906 /* disable No Snoop */
Kyösti Mälkki33ff44c2018-05-22 01:15:22 +0300907 dev = pcidev_on_root(1, 1);
Kyösti Mälkki69f6fd42019-01-21 14:19:01 +0200908 if (dev != NULL) {
909 value = pci_read_config32(dev, 0x60);
910 value &= ~(1 << 11);
911 pci_write_config32(dev, 0x60, value);
912 }
Dave Frodin891f71a2015-01-19 15:58:24 -0700913}
914
Kyösti Mälkkie4c17ce2014-10-21 18:22:32 +0300915struct chip_operations northbridge_amd_pi_00730F01_ops = {
Bruce Griffith27ed80b2014-08-15 11:46:25 -0600916 CHIP_NAME("AMD FAM16 Northbridge")
917 .enable_dev = 0,
Dave Frodin891f71a2015-01-19 15:58:24 -0700918 .final = fam16_finalize,
Bruce Griffith27ed80b2014-08-15 11:46:25 -0600919};
920
Kyösti Mälkki90ac7362018-05-20 20:59:52 +0300921static void domain_read_resources(struct device *dev)
Bruce Griffith27ed80b2014-08-15 11:46:25 -0600922{
Subrata Banikb1434fc2019-03-15 22:20:41 +0530923 unsigned int reg;
Bruce Griffith27ed80b2014-08-15 11:46:25 -0600924
925 /* Find the already assigned resource pairs */
926 get_fx_devs();
927 for (reg = 0x80; reg <= 0xd8; reg+= 0x08) {
928 u32 base, limit;
929 base = f1_read_config32(reg);
930 limit = f1_read_config32(reg + 0x04);
931 /* Is this register allocated? */
932 if ((base & 3) != 0) {
Subrata Banikb1434fc2019-03-15 22:20:41 +0530933 unsigned int nodeid, reg_link;
Kyösti Mälkki90ac7362018-05-20 20:59:52 +0300934 struct device *reg_dev;
Elyes HAOUASa8131602016-09-19 10:27:57 -0600935 if (reg < 0xc0) { // mmio
Bruce Griffith27ed80b2014-08-15 11:46:25 -0600936 nodeid = (limit & 0xf) + (base&0x30);
937 } else { // io
938 nodeid = (limit & 0xf) + ((base>>4)&0x30);
939 }
940 reg_link = (limit >> 4) & 7;
941 reg_dev = __f0_dev[nodeid];
942 if (reg_dev) {
943 /* Reserve the resource */
944 struct resource *res;
945 res = new_resource(reg_dev, IOINDEX(0x1000 + reg, reg_link));
946 if (res) {
947 res->flags = 1;
948 }
949 }
950 }
951 }
952 /* FIXME: do we need to check extend conf space?
953 I don't believe that much preset value */
Bruce Griffith27ed80b2014-08-15 11:46:25 -0600954 pci_domain_read_resources(dev);
Bruce Griffith27ed80b2014-08-15 11:46:25 -0600955}
956
Kyösti Mälkki90ac7362018-05-20 20:59:52 +0300957static void domain_enable_resources(struct device *dev)
Bruce Griffith27ed80b2014-08-15 11:46:25 -0600958{
Bruce Griffith27ed80b2014-08-15 11:46:25 -0600959}
960
961#if CONFIG_HW_MEM_HOLE_SIZEK != 0
962struct hw_mem_hole_info {
Subrata Banikb1434fc2019-03-15 22:20:41 +0530963 unsigned int hole_startk;
Bruce Griffith27ed80b2014-08-15 11:46:25 -0600964 int node_id;
965};
966static struct hw_mem_hole_info get_hw_mem_hole_info(void)
967{
968 struct hw_mem_hole_info mem_hole;
969 int i;
970 mem_hole.hole_startk = CONFIG_HW_MEM_HOLE_SIZEK;
971 mem_hole.node_id = -1;
972 for (i = 0; i < node_nums; i++) {
973 dram_base_mask_t d;
974 u32 hole;
975 d = get_dram_base_mask(i);
976 if (!(d.mask & 1)) continue; // no memory on this node
977 hole = pci_read_config32(__f1_dev[i], 0xf0);
978 if (hole & 2) { // we find the hole
979 mem_hole.hole_startk = (hole & (0xff<<24)) >> 10;
980 mem_hole.node_id = i; // record the node No with hole
981 break; // only one hole
982 }
983 }
984
985 /* We need to double check if there is special set on base reg and limit reg
986 * are not continuous instead of hole, it will find out its hole_startk.
987 */
988 if (mem_hole.node_id == -1) {
989 resource_t limitk_pri = 0;
Elyes HAOUASa8131602016-09-19 10:27:57 -0600990 for (i = 0; i < node_nums; i++) {
Bruce Griffith27ed80b2014-08-15 11:46:25 -0600991 dram_base_mask_t d;
992 resource_t base_k, limit_k;
993 d = get_dram_base_mask(i);
994 if (!(d.base & 1)) continue;
995 base_k = ((resource_t)(d.base & 0x1fffff00)) <<9;
996 if (base_k > 4 *1024 * 1024) break; // don't need to go to check
997 if (limitk_pri != base_k) { // we find the hole
Elyes HAOUAS38a4f2a92020-01-07 19:53:36 +0100998 mem_hole.hole_startk = (unsigned int)limitk_pri; // must be below 4G
Bruce Griffith27ed80b2014-08-15 11:46:25 -0600999 mem_hole.node_id = i;
1000 break; //only one hole
1001 }
1002 limit_k = ((resource_t)(((d.mask & ~1) + 0x000FF) & 0x1fffff00)) << 9;
1003 limitk_pri = limit_k;
1004 }
1005 }
1006 return mem_hole;
1007}
1008#endif
1009
Kyösti Mälkki90ac7362018-05-20 20:59:52 +03001010static void domain_set_resources(struct device *dev)
Bruce Griffith27ed80b2014-08-15 11:46:25 -06001011{
Bruce Griffith27ed80b2014-08-15 11:46:25 -06001012 unsigned long mmio_basek;
1013 u32 pci_tolm;
Bruce Griffith27ed80b2014-08-15 11:46:25 -06001014 int i, idx;
1015 struct bus *link;
1016#if CONFIG_HW_MEM_HOLE_SIZEK != 0
1017 struct hw_mem_hole_info mem_hole;
Bruce Griffith27ed80b2014-08-15 11:46:25 -06001018#endif
1019
Bruce Griffith27ed80b2014-08-15 11:46:25 -06001020 pci_tolm = 0xffffffffUL;
1021 for (link = dev->link_list; link; link = link->next) {
1022 pci_tolm = find_pci_tolm(link);
1023 }
1024
1025 // FIXME handle interleaved nodes. If you fix this here, please fix
1026 // amdk8, too.
1027 mmio_basek = pci_tolm >> 10;
1028 /* Round mmio_basek to something the processor can support */
1029 mmio_basek &= ~((1 << 6) -1);
1030
1031 // FIXME improve mtrr.c so we don't use up all of the mtrrs with a 64M
1032 // MMIO hole. If you fix this here, please fix amdk8, too.
1033 /* Round the mmio hole to 64M */
1034 mmio_basek &= ~((64*1024) - 1);
1035
1036#if CONFIG_HW_MEM_HOLE_SIZEK != 0
1037 /* if the hw mem hole is already set in raminit stage, here we will compare
1038 * mmio_basek and hole_basek. if mmio_basek is bigger that hole_basek and will
1039 * use hole_basek as mmio_basek and we don't need to reset hole.
1040 * otherwise We reset the hole to the mmio_basek
1041 */
1042
1043 mem_hole = get_hw_mem_hole_info();
1044
1045 // Use hole_basek as mmio_basek, and we don't need to reset hole anymore
1046 if ((mem_hole.node_id != -1) && (mmio_basek > mem_hole.hole_startk)) {
1047 mmio_basek = mem_hole.hole_startk;
Bruce Griffith27ed80b2014-08-15 11:46:25 -06001048 }
1049#endif
1050
1051 idx = 0x10;
1052 for (i = 0; i < node_nums; i++) {
1053 dram_base_mask_t d;
1054 resource_t basek, limitk, sizek; // 4 1T
1055
1056 d = get_dram_base_mask(i);
1057
1058 if (!(d.mask & 1)) continue;
1059 basek = ((resource_t)(d.base & 0x1fffff00)) << 9; // could overflow, we may lost 6 bit here
Elyes HAOUAS7db506c2016-10-02 11:56:39 +02001060 limitk = ((resource_t)(((d.mask & ~1) + 0x000FF) & 0x1fffff00)) << 9;
Bruce Griffith27ed80b2014-08-15 11:46:25 -06001061
1062 sizek = limitk - basek;
1063
1064 /* see if we need a hole from 0xa0000 to 0xbffff */
1065 if ((basek < ((8*64)+(8*16))) && (sizek > ((8*64)+(16*16)))) {
1066 ram_resource(dev, (idx | i), basek, ((8*64)+(8*16)) - basek);
1067 idx += 0x10;
1068 basek = (8*64)+(16*16);
1069 sizek = limitk - ((8*64)+(16*16));
1070
1071 }
1072
1073 //printk(BIOS_DEBUG, "node %d : mmio_basek=%08lx, basek=%08llx, limitk=%08llx\n", i, mmio_basek, basek, limitk);
1074
1075 /* split the region to accommodate pci memory space */
Elyes HAOUASa8131602016-09-19 10:27:57 -06001076 if ((basek < 4*1024*1024) && (limitk > mmio_basek)) {
Bruce Griffith27ed80b2014-08-15 11:46:25 -06001077 if (basek <= mmio_basek) {
Subrata Banikb1434fc2019-03-15 22:20:41 +05301078 unsigned int pre_sizek;
Bruce Griffith27ed80b2014-08-15 11:46:25 -06001079 pre_sizek = mmio_basek - basek;
Elyes HAOUASa8131602016-09-19 10:27:57 -06001080 if (pre_sizek > 0) {
Bruce Griffith27ed80b2014-08-15 11:46:25 -06001081 ram_resource(dev, (idx | i), basek, pre_sizek);
1082 idx += 0x10;
1083 sizek -= pre_sizek;
Bruce Griffith27ed80b2014-08-15 11:46:25 -06001084 }
1085 basek = mmio_basek;
1086 }
1087 if ((basek + sizek) <= 4*1024*1024) {
1088 sizek = 0;
1089 }
1090 else {
1091 uint64_t topmem2 = bsp_topmem2();
1092 basek = 4*1024*1024;
1093 sizek = topmem2/1024 - basek;
1094 }
1095 }
1096
1097 ram_resource(dev, (idx | i), basek, sizek);
1098 idx += 0x10;
1099 printk(BIOS_DEBUG, "node %d: mmio_basek=%08lx, basek=%08llx, limitk=%08llx\n",
1100 i, mmio_basek, basek, limitk);
Bruce Griffith27ed80b2014-08-15 11:46:25 -06001101 }
1102
Kyösti Mälkkie87564f2017-04-15 20:07:53 +03001103 add_uma_resource_below_tolm(dev, 7);
Bruce Griffith27ed80b2014-08-15 11:46:25 -06001104
Elyes HAOUAS5a7e72f2016-08-23 21:36:02 +02001105 for (link = dev->link_list; link; link = link->next) {
Bruce Griffith27ed80b2014-08-15 11:46:25 -06001106 if (link->children) {
1107 assign_resources(link);
1108 }
1109 }
1110}
1111
Aaron Durbinaa090cb2017-09-13 16:01:52 -06001112static const char *domain_acpi_name(const struct device *dev)
Philipp Deppenwiese30670122017-03-01 02:24:33 +01001113{
1114 if (dev->path.type == DEVICE_PATH_DOMAIN)
1115 return "PCI0";
1116
1117 return NULL;
1118}
1119
Bruce Griffith27ed80b2014-08-15 11:46:25 -06001120static struct device_operations pci_domain_ops = {
1121 .read_resources = domain_read_resources,
1122 .set_resources = domain_set_resources,
1123 .enable_resources = domain_enable_resources,
Bruce Griffith27ed80b2014-08-15 11:46:25 -06001124 .scan_bus = pci_domain_scan_bus,
Philipp Deppenwiese30670122017-03-01 02:24:33 +01001125 .acpi_name = domain_acpi_name,
Bruce Griffith27ed80b2014-08-15 11:46:25 -06001126};
1127
Kyösti Mälkki90ac7362018-05-20 20:59:52 +03001128static void sysconf_init(struct device *dev) // first node
Bruce Griffith27ed80b2014-08-15 11:46:25 -06001129{
1130 sblink = (pci_read_config32(dev, 0x64)>>8) & 7; // don't forget sublink1
1131 node_nums = ((pci_read_config32(dev, 0x60)>>4) & 7) + 1; //NodeCnt[2:0]
1132}
1133
Kyösti Mälkki90ac7362018-05-20 20:59:52 +03001134static void cpu_bus_scan(struct device *dev)
Bruce Griffith27ed80b2014-08-15 11:46:25 -06001135{
1136 struct bus *cpu_bus;
Kyösti Mälkki90ac7362018-05-20 20:59:52 +03001137 struct device *dev_mc;
Bruce Griffith27ed80b2014-08-15 11:46:25 -06001138 int i,j;
1139 int coreid_bits;
1140 int core_max = 0;
Subrata Banikb1434fc2019-03-15 22:20:41 +05301141 unsigned int ApicIdCoreIdSize;
1142 unsigned int core_nums;
Bruce Griffith27ed80b2014-08-15 11:46:25 -06001143 int siblings = 0;
1144 unsigned int family;
1145 u32 modules = 0;
Bruce Griffith27ed80b2014-08-15 11:46:25 -06001146 int ioapic_count = 0;
1147
Michał Żygowskie7192882019-11-23 19:02:19 +01001148 /* For binaryPI there is no multiprocessor configuration, the number of
1149 * modules will always be 1. */
1150 modules = 1;
1151 ioapic_count = CONFIG_NUM_OF_IOAPICS;
Bruce Griffith27ed80b2014-08-15 11:46:25 -06001152
Kyösti Mälkkibbd23772019-01-10 05:41:23 +02001153 dev_mc = pcidev_on_root(DEV_CDB, 0);
Bruce Griffith27ed80b2014-08-15 11:46:25 -06001154 if (!dev_mc) {
Kyösti Mälkkibbd23772019-01-10 05:41:23 +02001155 printk(BIOS_ERR, "0:%02x.0 not found", DEV_CDB);
Bruce Griffith27ed80b2014-08-15 11:46:25 -06001156 die("");
1157 }
1158 sysconf_init(dev_mc);
Bruce Griffith27ed80b2014-08-15 11:46:25 -06001159
1160 /* Get Max Number of cores(MNC) */
Kyösti Mälkkid41feed2017-09-24 16:23:57 +03001161 coreid_bits = (cpuid_ecx(0x80000008) & 0x0000F000) >> 12;
Bruce Griffith27ed80b2014-08-15 11:46:25 -06001162 core_max = 1 << (coreid_bits & 0x000F); //mnc
1163
1164 ApicIdCoreIdSize = ((cpuid_ecx(0x80000008)>>12) & 0xF);
1165 if (ApicIdCoreIdSize) {
1166 core_nums = (1 << ApicIdCoreIdSize) - 1;
1167 } else {
1168 core_nums = 3; //quad core
1169 }
1170
1171 /* Find which cpus are present */
1172 cpu_bus = dev->link_list;
1173 for (i = 0; i < node_nums; i++) {
Kyösti Mälkki90ac7362018-05-20 20:59:52 +03001174 struct device *cdb_dev;
Subrata Banikb1434fc2019-03-15 22:20:41 +05301175 unsigned int devn;
Bruce Griffith27ed80b2014-08-15 11:46:25 -06001176 struct bus *pbus;
1177
Kyösti Mälkkibbd23772019-01-10 05:41:23 +02001178 devn = DEV_CDB + i;
Bruce Griffith27ed80b2014-08-15 11:46:25 -06001179 pbus = dev_mc->bus;
Bruce Griffith27ed80b2014-08-15 11:46:25 -06001180
1181 /* Find the cpu's pci device */
Kyösti Mälkki33ff44c2018-05-22 01:15:22 +03001182 cdb_dev = pcidev_on_root(devn, 0);
Bruce Griffith27ed80b2014-08-15 11:46:25 -06001183 if (!cdb_dev) {
1184 /* If I am probing things in a weird order
1185 * ensure all of the cpu's pci devices are found.
1186 */
1187 int fn;
Elyes HAOUAS5a7e72f2016-08-23 21:36:02 +02001188 for (fn = 0; fn <= 5; fn++) { //FBDIMM?
Bruce Griffith27ed80b2014-08-15 11:46:25 -06001189 cdb_dev = pci_probe_dev(NULL, pbus,
1190 PCI_DEVFN(devn, fn));
1191 }
Kyösti Mälkki33ff44c2018-05-22 01:15:22 +03001192 cdb_dev = pcidev_on_root(devn, 0);
Bruce Griffith27ed80b2014-08-15 11:46:25 -06001193 } else {
1194 /* Ok, We need to set the links for that device.
1195 * otherwise the device under it will not be scanned
1196 */
Kyösti Mälkkic5163ed82015-02-04 13:25:37 +02001197
1198 add_more_links(cdb_dev, 4);
Bruce Griffith27ed80b2014-08-15 11:46:25 -06001199 }
1200
1201 family = cpuid_eax(1);
1202 family = (family >> 20) & 0xFF;
1203 if (family == 1) { //f10
1204 u32 dword;
Kyösti Mälkki33ff44c2018-05-22 01:15:22 +03001205 cdb_dev = pcidev_on_root(devn, 3);
Bruce Griffith27ed80b2014-08-15 11:46:25 -06001206 dword = pci_read_config32(cdb_dev, 0xe8);
1207 siblings = ((dword & BIT15) >> 13) | ((dword & (BIT13 | BIT12)) >> 12);
1208 } else if (family == 7) {//f16
Kyösti Mälkki33ff44c2018-05-22 01:15:22 +03001209 cdb_dev = pcidev_on_root(devn, 5);
Bruce Griffith27ed80b2014-08-15 11:46:25 -06001210 if (cdb_dev && cdb_dev->enabled) {
1211 siblings = pci_read_config32(cdb_dev, 0x84);
1212 siblings &= 0xFF;
1213 }
1214 } else {
1215 siblings = 0; //default one core
1216 }
1217 int enable_node = cdb_dev && cdb_dev->enabled;
Elyes HAOUASa8131602016-09-19 10:27:57 -06001218 printk(BIOS_SPEW, "%s family%xh, core_max = 0x%x, core_nums = 0x%x, siblings = 0x%x\n",
Bruce Griffith27ed80b2014-08-15 11:46:25 -06001219 dev_path(cdb_dev), 0x0f + family, core_max, core_nums, siblings);
1220
Elyes HAOUASa8131602016-09-19 10:27:57 -06001221 for (j = 0; j <= siblings; j++) {
Bruce Griffith27ed80b2014-08-15 11:46:25 -06001222 u32 lapicid_start = 0;
1223
1224 /*
Elyes HAOUAS38a4f2a92020-01-07 19:53:36 +01001225 * APIC ID calculation is tightly coupled with AGESA v5 code.
Bruce Griffith27ed80b2014-08-15 11:46:25 -06001226 * This calculation MUST match the assignment calculation done
1227 * in LocalApicInitializationAtEarly() function.
1228 * And reference GetLocalApicIdForCore()
1229 *
Elyes HAOUASa5b0bc42020-02-20 20:04:29 +01001230 * Apply APIC enumeration rules
Bruce Griffith27ed80b2014-08-15 11:46:25 -06001231 * For systems with >= 16 APICs, put the IO-APICs at 0..n and
1232 * put the local-APICs at m..z
1233 *
1234 * This is needed because many IO-APIC devices only have 4 bits
1235 * for their APIC id and therefore must reside at 0..15
Elyes HAOUAS6e8b3c12016-09-02 19:22:00 +02001236 */
Bruce Griffith27ed80b2014-08-15 11:46:25 -06001237 if ((node_nums * core_max) + ioapic_count >= 0x10) {
1238 lapicid_start = (ioapic_count - 1) / core_max;
1239 lapicid_start = (lapicid_start + 1) * core_max;
Elyes HAOUASa8131602016-09-19 10:27:57 -06001240 printk(BIOS_SPEW, "lpaicid_start = 0x%x ", lapicid_start);
Bruce Griffith27ed80b2014-08-15 11:46:25 -06001241 }
1242 u32 apic_id = (lapicid_start * (i/modules + 1)) + ((i % modules) ? (j + (siblings + 1)) : j);
Elyes HAOUASa8131602016-09-19 10:27:57 -06001243 printk(BIOS_SPEW, "node 0x%x core 0x%x apicid = 0x%x\n",
Bruce Griffith27ed80b2014-08-15 11:46:25 -06001244 i, j, apic_id);
1245
Kyösti Mälkki90ac7362018-05-20 20:59:52 +03001246 struct device *cpu = add_cpu_device(cpu_bus, apic_id, enable_node);
Bruce Griffith27ed80b2014-08-15 11:46:25 -06001247 if (cpu)
1248 amd_cpu_topology(cpu, i, j);
1249 } //j
1250 }
Bruce Griffith27ed80b2014-08-15 11:46:25 -06001251}
1252
Kyösti Mälkki90ac7362018-05-20 20:59:52 +03001253static void cpu_bus_init(struct device *dev)
Bruce Griffith27ed80b2014-08-15 11:46:25 -06001254{
1255 initialize_cpus(dev->link_list);
1256}
1257
Bruce Griffith27ed80b2014-08-15 11:46:25 -06001258static struct device_operations cpu_bus_ops = {
Nico Huber2f8ba692020-04-05 14:05:24 +02001259 .read_resources = noop_read_resources,
1260 .set_resources = noop_set_resources,
Bruce Griffith27ed80b2014-08-15 11:46:25 -06001261 .init = cpu_bus_init,
1262 .scan_bus = cpu_bus_scan,
1263};
1264
1265static void root_complex_enable_dev(struct device *dev)
1266{
1267 static int done = 0;
1268
Bruce Griffith27ed80b2014-08-15 11:46:25 -06001269 if (!done) {
1270 setup_bsp_ramtop();
Bruce Griffith27ed80b2014-08-15 11:46:25 -06001271 done = 1;
1272 }
1273
1274 /* Set the operations if it is a special bus type */
1275 if (dev->path.type == DEVICE_PATH_DOMAIN) {
1276 dev->ops = &pci_domain_ops;
1277 } else if (dev->path.type == DEVICE_PATH_CPU_CLUSTER) {
1278 dev->ops = &cpu_bus_ops;
1279 }
1280}
1281
Kyösti Mälkkie4c17ce2014-10-21 18:22:32 +03001282struct chip_operations northbridge_amd_pi_00730F01_root_complex_ops = {
Bruce Griffith27ed80b2014-08-15 11:46:25 -06001283 CHIP_NAME("AMD FAM16 Root Complex")
1284 .enable_dev = root_complex_enable_dev,
1285};
1286
1287/*********************************************************************
1288 * Change the vendor / device IDs to match the generic VBIOS header. *
1289 *********************************************************************/
1290u32 map_oprom_vendev(u32 vendev)
1291{
1292 u32 new_vendev;
1293 new_vendev =
1294 ((0x10029850 <= vendev) && (vendev <= 0x1002986F)) ? 0x10029850 : vendev;
1295
1296 if (vendev != new_vendev)
1297 printk(BIOS_NOTICE, "Mapping PCI device %8x to %8x\n", vendev, new_vendev);
1298
1299 return new_vendev;
1300}