blob: d1b06ec9e7e56202a462b08a79d88061aa0c2b3e [file] [log] [blame]
Siyuan Wang3e32cc02013-07-09 17:16:20 +08001/*
2 * This file is part of the coreboot project.
3 *
4 * Copyright (C) 2012 Advanced Micro Devices, Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
Patrick Georgib890a122015-03-26 15:17:45 +010017 * Foundation, Inc.
Siyuan Wang3e32cc02013-07-09 17:16:20 +080018 */
19
20#include <console/console.h>
21#include <arch/io.h>
Kyösti Mälkki8ae16a42014-06-19 20:44:34 +030022#include <arch/acpi.h>
Vladimir Serbinenkodb09b0622014-10-08 22:15:17 +020023#include <arch/acpigen.h>
Siyuan Wang3e32cc02013-07-09 17:16:20 +080024#include <stdint.h>
25#include <device/device.h>
26#include <device/pci.h>
27#include <device/pci_ids.h>
28#include <device/hypertransport.h>
29#include <stdlib.h>
30#include <string.h>
31#include <lib.h>
32#include <cpu/cpu.h>
33#include <cbmem.h>
34
35#include <cpu/x86/lapic.h>
36#include <cpu/amd/mtrr.h>
37
38#include <Porting.h>
39#include <AGESA.h>
40#include <Options.h>
41#include <Topology.h>
42#include <cpu/amd/amdfam16.h>
43#include <cpuRegisters.h>
Kyösti Mälkkif21c2ac2014-10-19 09:35:18 +030044#include <northbridge/amd/agesa/agesawrapper.h>
Siyuan Wang3e32cc02013-07-09 17:16:20 +080045
46#define MAX_NODE_NUMS (MAX_NODES * MAX_DIES)
47
Siyuan Wang3e32cc02013-07-09 17:16:20 +080048typedef struct dram_base_mask {
49 u32 base; //[47:27] at [28:8]
50 u32 mask; //[47:27] at [28:8] and enable at bit 0
51} dram_base_mask_t;
52
53static unsigned node_nums;
54static unsigned sblink;
55static device_t __f0_dev[MAX_NODE_NUMS];
56static device_t __f1_dev[MAX_NODE_NUMS];
57static device_t __f2_dev[MAX_NODE_NUMS];
58static device_t __f4_dev[MAX_NODE_NUMS];
59static unsigned fx_devs = 0;
60
61static dram_base_mask_t get_dram_base_mask(u32 nodeid)
62{
63 device_t dev;
64 dram_base_mask_t d;
65 dev = __f1_dev[0];
66 u32 temp;
67 temp = pci_read_config32(dev, 0x44 + (nodeid << 3)); //[39:24] at [31:16]
68 d.mask = ((temp & 0xfff80000)>>(8+3)); // mask out DramMask [26:24] too
69 temp = pci_read_config32(dev, 0x144 + (nodeid <<3)) & 0xff; //[47:40] at [7:0]
70 d.mask |= temp<<21;
71 temp = pci_read_config32(dev, 0x40 + (nodeid << 3)); //[39:24] at [31:16]
72 d.mask |= (temp & 1); // enable bit
73 d.base = ((temp & 0xfff80000)>>(8+3)); // mask out DramBase [26:24) too
74 temp = pci_read_config32(dev, 0x140 + (nodeid <<3)) & 0xff; //[47:40] at [7:0]
75 d.base |= temp<<21;
76 return d;
77}
78
79static void set_io_addr_reg(device_t dev, u32 nodeid, u32 linkn, u32 reg,
80 u32 io_min, u32 io_max)
81{
82 u32 i;
83 u32 tempreg;
84 /* io range allocation */
85 tempreg = (nodeid&0xf) | ((nodeid & 0x30)<<(8-4)) | (linkn<<4) | ((io_max&0xf0)<<(12-4)); //limit
86 for (i=0; i<node_nums; i++)
87 pci_write_config32(__f1_dev[i], reg+4, tempreg);
88 tempreg = 3 /*| ( 3<<4)*/ | ((io_min&0xf0)<<(12-4)); //base :ISA and VGA ?
89#if 0
90 // FIXME: can we use VGA reg instead?
91 if (dev->link[link].bridge_ctrl & PCI_BRIDGE_CTL_VGA) {
92 printk(BIOS_SPEW, "%s, enabling legacy VGA IO forwarding for %s link %s\n",
93 __func__, dev_path(dev), link);
94 tempreg |= PCI_IO_BASE_VGA_EN;
95 }
96 if (dev->link[link].bridge_ctrl & PCI_BRIDGE_CTL_NO_ISA) {
97 tempreg |= PCI_IO_BASE_NO_ISA;
98 }
99#endif
100 for (i=0; i<node_nums; i++)
101 pci_write_config32(__f1_dev[i], reg, tempreg);
102}
103
104static void set_mmio_addr_reg(u32 nodeid, u32 linkn, u32 reg, u32 index, u32 mmio_min, u32 mmio_max, u32 nodes)
105{
106 u32 i;
107 u32 tempreg;
108 /* io range allocation */
109 tempreg = (nodeid&0xf) | (linkn<<4) | (mmio_max&0xffffff00); //limit
110 for (i=0; i<nodes; i++)
111 pci_write_config32(__f1_dev[i], reg+4, tempreg);
112 tempreg = 3 | (nodeid & 0x30) | (mmio_min&0xffffff00);
113 for (i=0; i<node_nums; i++)
114 pci_write_config32(__f1_dev[i], reg, tempreg);
115}
116
117static device_t get_node_pci(u32 nodeid, u32 fn)
118{
119#if MAX_NODE_NUMS + CONFIG_CDB >= 32
120 if ((CONFIG_CDB + nodeid) < 32) {
121 return dev_find_slot(CONFIG_CBB, PCI_DEVFN(CONFIG_CDB + nodeid, fn));
122 } else {
123 return dev_find_slot(CONFIG_CBB-1, PCI_DEVFN(CONFIG_CDB + nodeid - 32, fn));
124 }
125#else
126 return dev_find_slot(CONFIG_CBB, PCI_DEVFN(CONFIG_CDB + nodeid, fn));
127#endif
128}
129
130static void get_fx_devs(void)
131{
132 int i;
133 for (i = 0; i < MAX_NODE_NUMS; i++) {
134 __f0_dev[i] = get_node_pci(i, 0);
135 __f1_dev[i] = get_node_pci(i, 1);
136 __f2_dev[i] = get_node_pci(i, 2);
137 __f4_dev[i] = get_node_pci(i, 4);
138 if (__f0_dev[i] != NULL && __f1_dev[i] != NULL)
139 fx_devs = i+1;
140 }
141 if (__f1_dev[0] == NULL || __f0_dev[0] == NULL || fx_devs == 0) {
142 die("Cannot find 0:0x18.[0|1]\n");
143 }
144 printk(BIOS_DEBUG, "fx_devs=0x%x\n", fx_devs);
145}
146
147static u32 f1_read_config32(unsigned reg)
148{
149 if (fx_devs == 0)
150 get_fx_devs();
151 return pci_read_config32(__f1_dev[0], reg);
152}
153
154static void f1_write_config32(unsigned reg, u32 value)
155{
156 int i;
157 if (fx_devs == 0)
158 get_fx_devs();
159 for(i = 0; i < fx_devs; i++) {
160 device_t dev;
161 dev = __f1_dev[i];
162 if (dev && dev->enabled) {
163 pci_write_config32(dev, reg, value);
164 }
165 }
166}
167
168static u32 amdfam16_nodeid(device_t dev)
169{
170#if MAX_NODE_NUMS == 64
171 unsigned busn;
172 busn = dev->bus->secondary;
173 if (busn != CONFIG_CBB) {
174 return (dev->path.pci.devfn >> 3) - CONFIG_CDB + 32;
175 } else {
176 return (dev->path.pci.devfn >> 3) - CONFIG_CDB;
177 }
178
179#else
180 return (dev->path.pci.devfn >> 3) - CONFIG_CDB;
181#endif
182}
183
184static void set_vga_enable_reg(u32 nodeid, u32 linkn)
185{
186 u32 val;
187
188 val = 1 | (nodeid<<4) | (linkn<<12);
189 /* it will routing
190 * (1)mmio 0xa0000:0xbffff
191 * (2)io 0x3b0:0x3bb, 0x3c0:0x3df
192 */
193 f1_write_config32(0xf4, val);
194
195}
196
197/**
198 * @return
199 * @retval 2 resoure does not exist, usable
200 * @retval 0 resource exists, not usable
201 * @retval 1 resource exist, resource has been allocated before
202 */
203static int reg_useable(unsigned reg, device_t goal_dev, unsigned goal_nodeid,
204 unsigned goal_link)
205{
206 struct resource *res;
207 unsigned nodeid, link = 0;
208 int result;
209 res = 0;
210 for (nodeid = 0; !res && (nodeid < fx_devs); nodeid++) {
211 device_t dev;
212 dev = __f0_dev[nodeid];
213 if (!dev)
214 continue;
215 for (link = 0; !res && (link < 8); link++) {
216 res = probe_resource(dev, IOINDEX(0x1000 + reg, link));
217 }
218 }
219 result = 2;
220 if (res) {
221 result = 0;
222 if ((goal_link == (link - 1)) &&
223 (goal_nodeid == (nodeid - 1)) &&
224 (res->flags <= 1)) {
225 result = 1;
226 }
227 }
228 return result;
229}
230
231static struct resource *amdfam16_find_iopair(device_t dev, unsigned nodeid, unsigned link)
232{
233 struct resource *resource;
234 u32 free_reg, reg;
235 resource = 0;
236 free_reg = 0;
237 for (reg = 0xc0; reg <= 0xd8; reg += 0x8) {
238 int result;
239 result = reg_useable(reg, dev, nodeid, link);
240 if (result == 1) {
241 /* I have been allocated this one */
242 break;
243 }
244 else if (result > 1) {
245 /* I have a free register pair */
246 free_reg = reg;
247 }
248 }
249 if (reg > 0xd8) {
250 reg = free_reg; // if no free, the free_reg still be 0
251 }
252
253 resource = new_resource(dev, IOINDEX(0x1000 + reg, link));
254
255 return resource;
256}
257
258static struct resource *amdfam16_find_mempair(device_t dev, u32 nodeid, u32 link)
259{
260 struct resource *resource;
261 u32 free_reg, reg;
262 resource = 0;
263 free_reg = 0;
264 for (reg = 0x80; reg <= 0xb8; reg += 0x8) {
265 int result;
266 result = reg_useable(reg, dev, nodeid, link);
267 if (result == 1) {
268 /* I have been allocated this one */
269 break;
270 }
271 else if (result > 1) {
272 /* I have a free register pair */
273 free_reg = reg;
274 }
275 }
276 if (reg > 0xb8) {
277 reg = free_reg;
278 }
279
280 resource = new_resource(dev, IOINDEX(0x1000 + reg, link));
281 return resource;
282}
283
284static void amdfam16_link_read_bases(device_t dev, u32 nodeid, u32 link)
285{
286 struct resource *resource;
287
288 /* Initialize the io space constraints on the current bus */
289 resource = amdfam16_find_iopair(dev, nodeid, link);
290 if (resource) {
291 u32 align;
292 align = log2(HT_IO_HOST_ALIGN);
293 resource->base = 0;
294 resource->size = 0;
295 resource->align = align;
296 resource->gran = align;
297 resource->limit = 0xffffUL;
298 resource->flags = IORESOURCE_IO | IORESOURCE_BRIDGE;
299 }
300
301 /* Initialize the prefetchable memory constraints on the current bus */
302 resource = amdfam16_find_mempair(dev, nodeid, link);
303 if (resource) {
304 resource->base = 0;
305 resource->size = 0;
306 resource->align = log2(HT_MEM_HOST_ALIGN);
307 resource->gran = log2(HT_MEM_HOST_ALIGN);
308 resource->limit = 0xffffffffffULL;
309 resource->flags = IORESOURCE_MEM | IORESOURCE_PREFETCH;
310 resource->flags |= IORESOURCE_BRIDGE;
311 }
312
313 /* Initialize the memory constraints on the current bus */
314 resource = amdfam16_find_mempair(dev, nodeid, link);
315 if (resource) {
316 resource->base = 0;
317 resource->size = 0;
318 resource->align = log2(HT_MEM_HOST_ALIGN);
319 resource->gran = log2(HT_MEM_HOST_ALIGN);
320 resource->limit = 0xffffffffffULL;
321 resource->flags = IORESOURCE_MEM | IORESOURCE_BRIDGE;
322 }
323
324}
325
326static void read_resources(device_t dev)
327{
328 u32 nodeid;
329 struct bus *link;
330
331 nodeid = amdfam16_nodeid(dev);
332 for (link = dev->link_list; link; link = link->next) {
333 if (link->children) {
334 amdfam16_link_read_bases(dev, nodeid, link->link_num);
335 }
336 }
Edward O'Callaghan66c65322014-11-21 01:43:38 +1100337
338 /*
339 * This MMCONF resource must be reserved in the PCI_DOMAIN.
340 * It is not honored by the coreboot resource allocator if it is in
341 * the APIC_CLUSTER.
342 */
343#if CONFIG_MMCONF_SUPPORT
344 struct resource *resource = new_resource(dev, 0xc0010058);
345 resource->base = CONFIG_MMCONF_BASE_ADDRESS;
346 resource->size = CONFIG_MMCONF_BUS_NUMBER * 4096 * 256;
347 resource->flags = IORESOURCE_MEM | IORESOURCE_RESERVE |
348 IORESOURCE_FIXED | IORESOURCE_STORED | IORESOURCE_ASSIGNED;
349#endif
Siyuan Wang3e32cc02013-07-09 17:16:20 +0800350}
351
352static void set_resource(device_t dev, struct resource *resource, u32 nodeid)
353{
354 resource_t rbase, rend;
355 unsigned reg, link_num;
356 char buf[50];
357
358 /* Make certain the resource has actually been set */
359 if (!(resource->flags & IORESOURCE_ASSIGNED)) {
360 return;
361 }
362
363 /* If I have already stored this resource don't worry about it */
364 if (resource->flags & IORESOURCE_STORED) {
365 return;
366 }
367
368 /* Only handle PCI memory and IO resources */
369 if (!(resource->flags & (IORESOURCE_MEM | IORESOURCE_IO)))
370 return;
371
372 /* Ensure I am actually looking at a resource of function 1 */
373 if ((resource->index & 0xffff) < 0x1000) {
374 return;
375 }
376 /* Get the base address */
377 rbase = resource->base;
378
379 /* Get the limit (rounded up) */
380 rend = resource_end(resource);
381
382 /* Get the register and link */
383 reg = resource->index & 0xfff; // 4k
384 link_num = IOINDEX_LINK(resource->index);
385
386 if (resource->flags & IORESOURCE_IO) {
387 set_io_addr_reg(dev, nodeid, link_num, reg, rbase>>8, rend>>8);
388 }
389 else if (resource->flags & IORESOURCE_MEM) {
Edward O'Callaghanae5fd342014-11-20 19:58:09 +1100390 set_mmio_addr_reg(nodeid, link_num, reg, (resource->index >>24), rbase>>8, rend>>8, node_nums);// [39:8]
Siyuan Wang3e32cc02013-07-09 17:16:20 +0800391 }
392 resource->flags |= IORESOURCE_STORED;
Vladimir Serbinenkoa37383d2013-11-26 02:41:26 +0100393 snprintf(buf, sizeof (buf), " <node %x link %x>",
Siyuan Wang3e32cc02013-07-09 17:16:20 +0800394 nodeid, link_num);
395 report_resource_stored(dev, resource, buf);
396}
397
398/**
399 * I tried to reuse the resource allocation code in set_resource()
400 * but it is too difficult to deal with the resource allocation magic.
401 */
402
403static void create_vga_resource(device_t dev, unsigned nodeid)
404{
405 struct bus *link;
406
407 /* find out which link the VGA card is connected,
408 * we only deal with the 'first' vga card */
409 for (link = dev->link_list; link; link = link->next) {
410 if (link->bridge_ctrl & PCI_BRIDGE_CTL_VGA) {
411#if CONFIG_MULTIPLE_VGA_ADAPTERS
412 extern device_t vga_pri; // the primary vga device, defined in device.c
413 printk(BIOS_DEBUG, "VGA: vga_pri bus num = %d bus range [%d,%d]\n", vga_pri->bus->secondary,
414 link->secondary,link->subordinate);
415 /* We need to make sure the vga_pri is under the link */
416 if((vga_pri->bus->secondary >= link->secondary ) &&
417 (vga_pri->bus->secondary <= link->subordinate )
418 )
419#endif
420 break;
421 }
422 }
423
424 /* no VGA card installed */
425 if (link == NULL)
426 return;
427
428 printk(BIOS_DEBUG, "VGA: %s (aka node %d) link %d has VGA device\n", dev_path(dev), nodeid, sblink);
429 set_vga_enable_reg(nodeid, sblink);
430}
431
432static void set_resources(device_t dev)
433{
434 unsigned nodeid;
435 struct bus *bus;
436 struct resource *res;
437
438 /* Find the nodeid */
439 nodeid = amdfam16_nodeid(dev);
440
441 create_vga_resource(dev, nodeid); //TODO: do we need this?
442
443 /* Set each resource we have found */
444 for (res = dev->resource_list; res; res = res->next) {
445 set_resource(dev, res, nodeid);
446 }
447
448 for (bus = dev->link_list; bus; bus = bus->next) {
449 if (bus->children) {
450 assign_resources(bus);
451 }
452 }
Edward O'Callaghan66c65322014-11-21 01:43:38 +1100453
454 /* Print the MMCONF region if it has been reserved. */
455 res = find_resource(dev, 0xc0010058);
456 if (res) {
457 report_resource_stored(dev, res, " <mmconfig>");
458 }
Siyuan Wang3e32cc02013-07-09 17:16:20 +0800459}
460
Vladimir Serbinenkodb09b0622014-10-08 22:15:17 +0200461
Vladimir Serbinenko807127f2014-11-09 13:36:18 +0100462static unsigned long acpi_fill_hest(acpi_hest_t *hest)
Vladimir Serbinenkodb09b0622014-10-08 22:15:17 +0200463{
464 void *addr, *current;
465
466 /* Skip the HEST header. */
467 current = (void *)(hest + 1);
468
469 addr = agesawrapper_getlateinitptr(PICK_WHEA_MCE);
470 if (addr != NULL)
471 current += acpi_create_hest_error_source(hest, current, 0, (void *)((u32)addr + 2), *(UINT16 *)addr - 2);
472
473 addr = agesawrapper_getlateinitptr(PICK_WHEA_CMC);
474 if (addr != NULL)
475 current += acpi_create_hest_error_source(hest, current, 1, (void *)((u32)addr + 2), *(UINT16 *)addr - 2);
476
477 return (unsigned long)current;
478}
479
Vladimir Serbinenkodb09b0622014-10-08 22:15:17 +0200480static void northbridge_fill_ssdt_generator(void)
481{
482 msr_t msr;
483 char pscope[] = "\\_SB.PCI0";
484
485 acpigen_write_scope(pscope);
486 msr = rdmsr(TOP_MEM);
487 acpigen_write_name_dword("TOM1", msr.lo);
488 msr = rdmsr(TOP_MEM2);
489 /*
490 * Since XP only implements parts of ACPI 2.0, we can't use a qword
491 * here.
492 * See http://www.acpi.info/presentations/S01USMOBS169_OS%2520new.ppt
493 * slide 22ff.
494 * Shift value right by 20 bit to make it fit into 32bit,
495 * giving us 1MB granularity and a limit of almost 4Exabyte of memory.
496 */
497 acpigen_write_name_dword("TOM2", (msr.hi << 12) | msr.lo >> 20);
498 acpigen_pop_len();
499}
500
501static unsigned long agesa_write_acpi_tables(unsigned long current,
502 acpi_rsdp_t *rsdp)
503{
504 acpi_srat_t *srat;
505 acpi_slit_t *slit;
506 acpi_header_t *ssdt;
507 acpi_header_t *alib;
508 acpi_header_t *ivrs;
509 acpi_hest_t *hest;
510
511 /* HEST */
512 current = ALIGN(current, 8);
513 hest = (acpi_hest_t *)current;
Vladimir Serbinenko807127f2014-11-09 13:36:18 +0100514 acpi_write_hest((void *)current, acpi_fill_hest);
Vladimir Serbinenkodb09b0622014-10-08 22:15:17 +0200515 acpi_add_table(rsdp, (void *)current);
516 current += ((acpi_header_t *)current)->length;
517
518 current = ALIGN(current, 8);
519 printk(BIOS_DEBUG, "ACPI: * IVRS at %lx\n", current);
520 ivrs = agesawrapper_getlateinitptr(PICK_IVRS);
521 if (ivrs != NULL) {
522 memcpy((void *)current, ivrs, ivrs->length);
523 ivrs = (acpi_header_t *) current;
524 current += ivrs->length;
525 acpi_add_table(rsdp, ivrs);
526 } else {
527 printk(BIOS_DEBUG, " AGESA IVRS table NULL. Skipping.\n");
528 }
529
530 /* SRAT */
531 current = ALIGN(current, 8);
532 printk(BIOS_DEBUG, "ACPI: * SRAT at %lx\n", current);
533 srat = (acpi_srat_t *) agesawrapper_getlateinitptr (PICK_SRAT);
534 if (srat != NULL) {
535 memcpy((void *)current, srat, srat->header.length);
536 srat = (acpi_srat_t *) current;
537 current += srat->header.length;
538 acpi_add_table(rsdp, srat);
539 } else {
540 printk(BIOS_DEBUG, " AGESA SRAT table NULL. Skipping.\n");
541 }
542
543 /* SLIT */
544 current = ALIGN(current, 8);
545 printk(BIOS_DEBUG, "ACPI: * SLIT at %lx\n", current);
546 slit = (acpi_slit_t *) agesawrapper_getlateinitptr (PICK_SLIT);
547 if (slit != NULL) {
548 memcpy((void *)current, slit, slit->header.length);
549 slit = (acpi_slit_t *) current;
550 current += slit->header.length;
551 acpi_add_table(rsdp, slit);
552 } else {
553 printk(BIOS_DEBUG, " AGESA SLIT table NULL. Skipping.\n");
554 }
555
556 /* ALIB */
557 current = ALIGN(current, 16);
558 printk(BIOS_DEBUG, "ACPI: * AGESA ALIB SSDT at %lx\n", current);
559 alib = (acpi_header_t *)agesawrapper_getlateinitptr (PICK_ALIB);
560 if (alib != NULL) {
561 memcpy((void *)current, alib, alib->length);
562 alib = (acpi_header_t *) current;
563 current += alib->length;
564 acpi_add_table(rsdp, (void *)alib);
565 }
566 else {
567 printk(BIOS_DEBUG, " AGESA ALIB SSDT table NULL. Skipping.\n");
568 }
569
570 /* this pstate ssdt may cause Blue Screen: Fixed: Keep this comment for a while. */
571 /* SSDT */
572 current = ALIGN(current, 16);
573 printk(BIOS_DEBUG, "ACPI: * SSDT at %lx\n", current);
574 ssdt = (acpi_header_t *)agesawrapper_getlateinitptr (PICK_PSTATE);
575 if (ssdt != NULL) {
576 memcpy((void *)current, ssdt, ssdt->length);
577 ssdt = (acpi_header_t *) current;
578 current += ssdt->length;
579 }
580 else {
581 printk(BIOS_DEBUG, " AGESA PState table NULL. Skipping.\n");
582 }
583 acpi_add_table(rsdp,ssdt);
584
585 printk(BIOS_DEBUG, "ACPI: * SSDT for PState at %lx\n", current);
586
587 return current;
588}
589
Siyuan Wang3e32cc02013-07-09 17:16:20 +0800590static struct device_operations northbridge_operations = {
591 .read_resources = read_resources,
592 .set_resources = set_resources,
593 .enable_resources = pci_dev_enable_resources,
Edward O'Callaghand994ef12014-11-21 02:22:33 +1100594 .init = DEVICE_NOOP,
Vladimir Serbinenkodb09b0622014-10-08 22:15:17 +0200595 .acpi_fill_ssdt_generator = northbridge_fill_ssdt_generator,
596 .write_acpi_tables = agesa_write_acpi_tables,
Siyuan Wang3e32cc02013-07-09 17:16:20 +0800597 .enable = 0,
598 .ops_pci = 0,
599};
600
601static const struct pci_driver family16_northbridge __pci_driver = {
602 .ops = &northbridge_operations,
603 .vendor = PCI_VENDOR_ID_AMD,
604 .device = PCI_DEVICE_ID_AMD_16H_MODEL_000F_NB_HT,
605};
606
607static const struct pci_driver family10_northbridge __pci_driver = {
608 .ops = &northbridge_operations,
609 .vendor = PCI_VENDOR_ID_AMD,
610 .device = PCI_DEVICE_ID_AMD_10H_NB_HT,
611};
612
Kyösti Mälkki4ee82c62014-11-25 16:03:12 +0200613static void fam16_finalize(void *chip_info)
614{
615 device_t dev;
616 u32 value;
617 dev = dev_find_slot(0, PCI_DEVFN(0, 0)); /* clear IoapicSbFeatureEn */
618 pci_write_config32(dev, 0xF8, 0);
619 pci_write_config32(dev, 0xFC, 5); /* TODO: move it to dsdt.asl */
620
621 /* disable No Snoop */
622 dev = dev_find_slot(0, PCI_DEVFN(1, 1));
623 value = pci_read_config32(dev, 0x60);
624 value &= ~(1 << 11);
625 pci_write_config32(dev, 0x60, value);
626}
627
Siyuan Wang3e32cc02013-07-09 17:16:20 +0800628struct chip_operations northbridge_amd_agesa_family16kb_ops = {
629 CHIP_NAME("AMD FAM16 Northbridge")
630 .enable_dev = 0,
Kyösti Mälkki4ee82c62014-11-25 16:03:12 +0200631 .final = fam16_finalize,
Siyuan Wang3e32cc02013-07-09 17:16:20 +0800632};
633
634static void domain_read_resources(device_t dev)
635{
636 unsigned reg;
637
638 /* Find the already assigned resource pairs */
639 get_fx_devs();
640 for (reg = 0x80; reg <= 0xd8; reg+= 0x08) {
641 u32 base, limit;
642 base = f1_read_config32(reg);
643 limit = f1_read_config32(reg + 0x04);
644 /* Is this register allocated? */
645 if ((base & 3) != 0) {
646 unsigned nodeid, reg_link;
647 device_t reg_dev;
648 if (reg<0xc0) { // mmio
649 nodeid = (limit & 0xf) + (base&0x30);
650 } else { // io
651 nodeid = (limit & 0xf) + ((base>>4)&0x30);
652 }
653 reg_link = (limit >> 4) & 7;
654 reg_dev = __f0_dev[nodeid];
655 if (reg_dev) {
656 /* Reserve the resource */
657 struct resource *res;
658 res = new_resource(reg_dev, IOINDEX(0x1000 + reg, reg_link));
659 if (res) {
660 res->flags = 1;
661 }
662 }
663 }
664 }
665 /* FIXME: do we need to check extend conf space?
666 I don't believe that much preset value */
667
668#if !CONFIG_PCI_64BIT_PREF_MEM
669 pci_domain_read_resources(dev);
670
671#else
672 struct bus *link;
673 struct resource *resource;
674 for (link=dev->link_list; link; link = link->next) {
675 /* Initialize the system wide io space constraints */
676 resource = new_resource(dev, 0|(link->link_num<<2));
677 resource->base = 0x400;
678 resource->limit = 0xffffUL;
679 resource->flags = IORESOURCE_IO;
680
681 /* Initialize the system wide prefetchable memory resources constraints */
682 resource = new_resource(dev, 1|(link->link_num<<2));
683 resource->limit = 0xfcffffffffULL;
684 resource->flags = IORESOURCE_MEM | IORESOURCE_PREFETCH;
685
686 /* Initialize the system wide memory resources constraints */
687 resource = new_resource(dev, 2|(link->link_num<<2));
688 resource->limit = 0xfcffffffffULL;
689 resource->flags = IORESOURCE_MEM;
690 }
691#endif
692}
693
Siyuan Wang3e32cc02013-07-09 17:16:20 +0800694static void domain_enable_resources(device_t dev)
695{
Kyösti Mälkki8ae16a42014-06-19 20:44:34 +0300696 if (acpi_is_wakeup_s3())
Kyösti Mälkki1aa35c62014-10-21 14:19:04 +0300697 agesawrapper_fchs3laterestore();
Siyuan Wang3e32cc02013-07-09 17:16:20 +0800698
699 /* Must be called after PCI enumeration and resource allocation */
Kyösti Mälkkib139b5e2014-10-20 07:41:20 +0300700 if (!acpi_is_wakeup_s3()) {
701 /* Enable MMIO on AMD CPU Address Map Controller */
Kyösti Mälkki48518f02014-11-25 14:20:57 +0200702 amd_initcpuio();
Siyuan Wang3e32cc02013-07-09 17:16:20 +0800703
Kyösti Mälkki1aa35c62014-10-21 14:19:04 +0300704 agesawrapper_amdinitmid();
Kyösti Mälkkib139b5e2014-10-20 07:41:20 +0300705 }
Siyuan Wang3e32cc02013-07-09 17:16:20 +0800706 printk(BIOS_DEBUG, " ader - leaving domain_enable_resources.\n");
707}
708
709#if CONFIG_HW_MEM_HOLE_SIZEK != 0
710struct hw_mem_hole_info {
711 unsigned hole_startk;
712 int node_id;
713};
714static struct hw_mem_hole_info get_hw_mem_hole_info(void)
715{
716 struct hw_mem_hole_info mem_hole;
717 int i;
718 mem_hole.hole_startk = CONFIG_HW_MEM_HOLE_SIZEK;
719 mem_hole.node_id = -1;
720 for (i = 0; i < node_nums; i++) {
721 dram_base_mask_t d;
722 u32 hole;
723 d = get_dram_base_mask(i);
724 if (!(d.mask & 1)) continue; // no memory on this node
725 hole = pci_read_config32(__f1_dev[i], 0xf0);
726 if (hole & 2) { // we find the hole
727 mem_hole.hole_startk = (hole & (0xff<<24)) >> 10;
728 mem_hole.node_id = i; // record the node No with hole
729 break; // only one hole
730 }
731 }
Kyösti Mälkki2f9b3af2014-06-26 05:30:54 +0300732
733 /* We need to double check if there is special set on base reg and limit reg
734 * are not continuous instead of hole, it will find out its hole_startk.
735 */
Siyuan Wang3e32cc02013-07-09 17:16:20 +0800736 if (mem_hole.node_id == -1) {
737 resource_t limitk_pri = 0;
738 for (i=0; i<node_nums; i++) {
739 dram_base_mask_t d;
740 resource_t base_k, limit_k;
741 d = get_dram_base_mask(i);
742 if (!(d.base & 1)) continue;
743 base_k = ((resource_t)(d.base & 0x1fffff00)) <<9;
744 if (base_k > 4 *1024 * 1024) break; // don't need to go to check
745 if (limitk_pri != base_k) { // we find the hole
746 mem_hole.hole_startk = (unsigned)limitk_pri; // must beblow 4G
747 mem_hole.node_id = i;
748 break; //only one hole
749 }
750 limit_k = ((resource_t)(((d.mask & ~1) + 0x000FF) & 0x1fffff00)) << 9;
751 limitk_pri = limit_k;
752 }
753 }
754 return mem_hole;
755}
756#endif
757
758#define ONE_MB_SHIFT 20
759
760static void setup_uma_memory(void)
761{
762#if CONFIG_GFXUMA
763 uint32_t topmem = (uint32_t) bsp_topmem();
764 uint32_t sys_mem;
765
766 /* refer to UMA Size Consideration in Family16h BKDG. */
767 /* Please reference MemNGetUmaSizeOR () */
768 /*
769 * Total system memory UMASize
770 * >= 2G 512M
771 * >=1G 256M
772 * <1G 64M
773 */
774 sys_mem = topmem + (16 << ONE_MB_SHIFT); // Ignore 16MB allocated for C6 when finding UMA size
775 if ((bsp_topmem2()>>32) || (sys_mem >= 2048 << ONE_MB_SHIFT)) {
776 uma_memory_size = 512 << ONE_MB_SHIFT;
777 } else if (sys_mem >= 1024 << ONE_MB_SHIFT) {
778 uma_memory_size = 256 << ONE_MB_SHIFT;
779 } else {
780 uma_memory_size = 64 << ONE_MB_SHIFT;
781 }
782 uma_memory_base = topmem - uma_memory_size; /* TOP_MEM1 */
783
784 printk(BIOS_INFO, "%s: uma size 0x%08llx, memory start 0x%08llx\n",
785 __func__, uma_memory_size, uma_memory_base);
786
787 /* TODO: TOP_MEM2 */
788#endif
789}
790
791
792static void domain_set_resources(device_t dev)
793{
794#if CONFIG_PCI_64BIT_PREF_MEM
795 struct resource *io, *mem1, *mem2;
796 struct resource *res;
797#endif
798 unsigned long mmio_basek;
799 u32 pci_tolm;
Kyösti Mälkki2b790f62013-09-03 05:25:57 +0300800 u64 ramtop = 0;
Siyuan Wang3e32cc02013-07-09 17:16:20 +0800801 int i, idx;
802 struct bus *link;
803#if CONFIG_HW_MEM_HOLE_SIZEK != 0
804 struct hw_mem_hole_info mem_hole;
805 u32 reset_memhole = 1;
806#endif
807
808#if CONFIG_PCI_64BIT_PREF_MEM
809
810 for (link = dev->link_list; link; link = link->next) {
811 /* Now reallocate the pci resources memory with the
812 * highest addresses I can manage.
813 */
814 mem1 = find_resource(dev, 1|(link->link_num<<2));
815 mem2 = find_resource(dev, 2|(link->link_num<<2));
816
817 printk(BIOS_DEBUG, "base1: 0x%08Lx limit1: 0x%08Lx size: 0x%08Lx align: %d\n",
818 mem1->base, mem1->limit, mem1->size, mem1->align);
819 printk(BIOS_DEBUG, "base2: 0x%08Lx limit2: 0x%08Lx size: 0x%08Lx align: %d\n",
820 mem2->base, mem2->limit, mem2->size, mem2->align);
821
822 /* See if both resources have roughly the same limits */
823 if (((mem1->limit <= 0xffffffff) && (mem2->limit <= 0xffffffff)) ||
824 ((mem1->limit > 0xffffffff) && (mem2->limit > 0xffffffff)))
825 {
826 /* If so place the one with the most stringent alignment first */
827 if (mem2->align > mem1->align) {
828 struct resource *tmp;
829 tmp = mem1;
830 mem1 = mem2;
831 mem2 = tmp;
832 }
833 /* Now place the memory as high up as it will go */
834 mem2->base = resource_max(mem2);
835 mem1->limit = mem2->base - 1;
836 mem1->base = resource_max(mem1);
837 }
838 else {
839 /* Place the resources as high up as they will go */
840 mem2->base = resource_max(mem2);
841 mem1->base = resource_max(mem1);
842 }
843
844 printk(BIOS_DEBUG, "base1: 0x%08Lx limit1: 0x%08Lx size: 0x%08Lx align: %d\n",
845 mem1->base, mem1->limit, mem1->size, mem1->align);
846 printk(BIOS_DEBUG, "base2: 0x%08Lx limit2: 0x%08Lx size: 0x%08Lx align: %d\n",
847 mem2->base, mem2->limit, mem2->size, mem2->align);
848 }
849
850 for (res = &dev->resource_list; res; res = res->next)
851 {
852 res->flags |= IORESOURCE_ASSIGNED;
853 res->flags |= IORESOURCE_STORED;
854 report_resource_stored(dev, res, "");
855 }
856#endif
857
858 pci_tolm = 0xffffffffUL;
859 for (link = dev->link_list; link; link = link->next) {
860 pci_tolm = find_pci_tolm(link);
861 }
862
863 // FIXME handle interleaved nodes. If you fix this here, please fix
864 // amdk8, too.
865 mmio_basek = pci_tolm >> 10;
866 /* Round mmio_basek to something the processor can support */
867 mmio_basek &= ~((1 << 6) -1);
868
869 // FIXME improve mtrr.c so we don't use up all of the mtrrs with a 64M
870 // MMIO hole. If you fix this here, please fix amdk8, too.
871 /* Round the mmio hole to 64M */
872 mmio_basek &= ~((64*1024) - 1);
873
874#if CONFIG_HW_MEM_HOLE_SIZEK != 0
875 /* if the hw mem hole is already set in raminit stage, here we will compare
876 * mmio_basek and hole_basek. if mmio_basek is bigger that hole_basek and will
877 * use hole_basek as mmio_basek and we don't need to reset hole.
878 * otherwise We reset the hole to the mmio_basek
879 */
880
881 mem_hole = get_hw_mem_hole_info();
882
883 // Use hole_basek as mmio_basek, and we don't need to reset hole anymore
884 if ((mem_hole.node_id != -1) && (mmio_basek > mem_hole.hole_startk)) {
885 mmio_basek = mem_hole.hole_startk;
886 reset_memhole = 0;
887 }
888#endif
889
890 idx = 0x10;
891 for (i = 0; i < node_nums; i++) {
892 dram_base_mask_t d;
893 resource_t basek, limitk, sizek; // 4 1T
894
895 d = get_dram_base_mask(i);
896
897 if (!(d.mask & 1)) continue;
898 basek = ((resource_t)(d.base & 0x1fffff00)) << 9; // could overflow, we may lost 6 bit here
Edward O'Callaghanae5fd342014-11-20 19:58:09 +1100899 limitk = ((resource_t)(((d.mask & ~1) + 0x000FF) & 0x1fffff00)) << 9;
Siyuan Wang3e32cc02013-07-09 17:16:20 +0800900
901 sizek = limitk - basek;
902
903 /* see if we need a hole from 0xa0000 to 0xbffff */
904 if ((basek < ((8*64)+(8*16))) && (sizek > ((8*64)+(16*16)))) {
905 ram_resource(dev, (idx | i), basek, ((8*64)+(8*16)) - basek);
906 idx += 0x10;
907 basek = (8*64)+(16*16);
908 sizek = limitk - ((8*64)+(16*16));
909
910 }
911
912 //printk(BIOS_DEBUG, "node %d : mmio_basek=%08lx, basek=%08llx, limitk=%08llx\n", i, mmio_basek, basek, limitk);
913
Kyösti Mälkki26c65432014-06-26 05:30:54 +0300914 /* split the region to accommodate pci memory space */
Siyuan Wang3e32cc02013-07-09 17:16:20 +0800915 if ((basek < 4*1024*1024 ) && (limitk > mmio_basek)) {
916 if (basek <= mmio_basek) {
917 unsigned pre_sizek;
918 pre_sizek = mmio_basek - basek;
919 if (pre_sizek>0) {
920 ram_resource(dev, (idx | i), basek, pre_sizek);
921 idx += 0x10;
922 sizek -= pre_sizek;
Kyösti Mälkki2b790f62013-09-03 05:25:57 +0300923 if (!ramtop)
924 ramtop = mmio_basek * 1024;
Siyuan Wang3e32cc02013-07-09 17:16:20 +0800925 }
926 basek = mmio_basek;
927 }
928 if ((basek + sizek) <= 4*1024*1024) {
929 sizek = 0;
930 }
931 else {
932 uint64_t topmem2 = bsp_topmem2();
933 basek = 4*1024*1024;
934 sizek = topmem2/1024 - basek;
935 }
936 }
937
938 ram_resource(dev, (idx | i), basek, sizek);
939 idx += 0x10;
940 printk(BIOS_DEBUG, "node %d: mmio_basek=%08lx, basek=%08llx, limitk=%08llx\n",
941 i, mmio_basek, basek, limitk);
Kyösti Mälkki2b790f62013-09-03 05:25:57 +0300942 if (!ramtop)
943 ramtop = limitk * 1024;
Siyuan Wang3e32cc02013-07-09 17:16:20 +0800944 }
945
946#if CONFIG_GFXUMA
Kyösti Mälkki2b790f62013-09-03 05:25:57 +0300947 set_top_of_ram(uma_memory_base);
Siyuan Wang3e32cc02013-07-09 17:16:20 +0800948 uma_resource(dev, 7, uma_memory_base >> 10, uma_memory_size >> 10);
Kyösti Mälkki2b790f62013-09-03 05:25:57 +0300949#else
950 set_top_of_ram(ramtop);
Siyuan Wang3e32cc02013-07-09 17:16:20 +0800951#endif
952
953 for(link = dev->link_list; link; link = link->next) {
954 if (link->children) {
955 assign_resources(link);
956 }
957 }
958}
959
960static struct device_operations pci_domain_ops = {
961 .read_resources = domain_read_resources,
962 .set_resources = domain_set_resources,
963 .enable_resources = domain_enable_resources,
Edward O'Callaghane9e1d7a2015-01-02 15:11:49 +1100964 .init = DEVICE_NOOP,
Siyuan Wang3e32cc02013-07-09 17:16:20 +0800965 .scan_bus = pci_domain_scan_bus,
966 .ops_pci_bus = pci_bus_default_ops,
967};
968
969static void sysconf_init(device_t dev) // first node
970{
971 sblink = (pci_read_config32(dev, 0x64)>>8) & 7; // don't forget sublink1
972 node_nums = ((pci_read_config32(dev, 0x60)>>4) & 7) + 1; //NodeCnt[2:0]
973}
974
975static void add_more_links(device_t dev, unsigned total_links)
976{
977 struct bus *link, *last = NULL;
978 int link_num;
979
980 for (link = dev->link_list; link; link = link->next)
981 last = link;
982
983 if (last) {
984 int links = total_links - last->link_num;
985 link_num = last->link_num;
986 if (links > 0) {
987 link = malloc(links*sizeof(*link));
988 if (!link)
989 die("Couldn't allocate more links!\n");
990 memset(link, 0, links*sizeof(*link));
991 last->next = link;
992 }
993 }
994 else {
995 link_num = -1;
996 link = malloc(total_links*sizeof(*link));
997 memset(link, 0, total_links*sizeof(*link));
998 dev->link_list = link;
999 }
1000
1001 for (link_num = link_num + 1; link_num < total_links; link_num++) {
1002 link->link_num = link_num;
1003 link->dev = dev;
1004 link->next = link + 1;
1005 last = link;
1006 link = link->next;
1007 }
1008 last->next = NULL;
1009}
1010
1011static u32 cpu_bus_scan(device_t dev, u32 max)
1012{
1013 struct bus *cpu_bus;
1014 device_t dev_mc;
1015#if CONFIG_CBB
1016 device_t pci_domain;
1017#endif
1018 int i,j;
1019 int coreid_bits;
1020 int core_max = 0;
1021 unsigned ApicIdCoreIdSize;
1022 unsigned core_nums;
1023 int siblings = 0;
1024 unsigned int family;
1025
1026#if CONFIG_CBB
1027 dev_mc = dev_find_slot(0, PCI_DEVFN(CONFIG_CDB, 0)); //0x00
1028 if (dev_mc && dev_mc->bus) {
1029 printk(BIOS_DEBUG, "%s found", dev_path(dev_mc));
1030 pci_domain = dev_mc->bus->dev;
1031 if (pci_domain && (pci_domain->path.type == DEVICE_PATH_DOMAIN)) {
1032 printk(BIOS_DEBUG, "\n%s move to ",dev_path(dev_mc));
1033 dev_mc->bus->secondary = CONFIG_CBB; // move to 0xff
1034 printk(BIOS_DEBUG, "%s",dev_path(dev_mc));
1035 } else {
1036 printk(BIOS_DEBUG, " but it is not under pci_domain directly ");
1037 }
1038 printk(BIOS_DEBUG, "\n");
1039 }
1040 dev_mc = dev_find_slot(CONFIG_CBB, PCI_DEVFN(CONFIG_CDB, 0));
1041 if (!dev_mc) {
1042 dev_mc = dev_find_slot(0, PCI_DEVFN(0x18, 0));
1043 if (dev_mc && dev_mc->bus) {
1044 printk(BIOS_DEBUG, "%s found\n", dev_path(dev_mc));
1045 pci_domain = dev_mc->bus->dev;
1046 if (pci_domain && (pci_domain->path.type == DEVICE_PATH_DOMAIN)) {
1047 if ((pci_domain->link_list) && (pci_domain->link_list->children == dev_mc)) {
1048 printk(BIOS_DEBUG, "%s move to ",dev_path(dev_mc));
1049 dev_mc->bus->secondary = CONFIG_CBB; // move to 0xff
1050 printk(BIOS_DEBUG, "%s\n",dev_path(dev_mc));
1051 while (dev_mc) {
1052 printk(BIOS_DEBUG, "%s move to ",dev_path(dev_mc));
1053 dev_mc->path.pci.devfn -= PCI_DEVFN(0x18,0);
1054 printk(BIOS_DEBUG, "%s\n",dev_path(dev_mc));
1055 dev_mc = dev_mc->sibling;
1056 }
1057 }
1058 }
1059 }
1060 }
1061#endif
1062 dev_mc = dev_find_slot(CONFIG_CBB, PCI_DEVFN(CONFIG_CDB, 0));
1063 if (!dev_mc) {
1064 printk(BIOS_ERR, "%02x:%02x.0 not found", CONFIG_CBB, CONFIG_CDB);
1065 die("");
1066 }
1067 sysconf_init(dev_mc);
1068#if CONFIG_CBB && (MAX_NODE_NUMS > 32)
1069 if (node_nums>32) { // need to put node 32 to node 63 to bus 0xfe
1070 if (pci_domain->link_list && !pci_domain->link_list->next) {
1071 struct bus *new_link = new_link(pci_domain);
1072 pci_domain->link_list->next = new_link;
1073 new_link->link_num = 1;
1074 new_link->dev = pci_domain;
1075 new_link->children = 0;
1076 printk(BIOS_DEBUG, "%s links now 2\n", dev_path(pci_domain));
1077 }
1078 pci_domain->link_list->next->secondary = CONFIG_CBB - 1;
1079 }
1080#endif
1081
1082 /* Get Max Number of cores(MNC) */
1083 coreid_bits = (cpuid_ecx(AMD_CPUID_ASIZE_PCCOUNT) & 0x0000F000) >> 12;
1084 core_max = 1 << (coreid_bits & 0x000F); //mnc
1085
1086 ApicIdCoreIdSize = ((cpuid_ecx(0x80000008)>>12) & 0xF);
1087 if (ApicIdCoreIdSize) {
1088 core_nums = (1 << ApicIdCoreIdSize) - 1;
1089 } else {
1090 core_nums = 3; //quad core
1091 }
1092
1093 /* Find which cpus are present */
1094 cpu_bus = dev->link_list;
1095 for (i = 0; i < node_nums; i++) {
1096 device_t cdb_dev;
1097 unsigned busn, devn;
1098 struct bus *pbus;
1099
1100 busn = CONFIG_CBB;
1101 devn = CONFIG_CDB + i;
1102 pbus = dev_mc->bus;
1103#if CONFIG_CBB && (MAX_NODE_NUMS > 32)
1104 if (i >= 32) {
1105 busn--;
1106 devn -= 32;
1107 pbus = pci_domain->link_list->next;
1108 }
1109#endif
1110
1111 /* Find the cpu's pci device */
1112 cdb_dev = dev_find_slot(busn, PCI_DEVFN(devn, 0));
1113 if (!cdb_dev) {
1114 /* If I am probing things in a weird order
1115 * ensure all of the cpu's pci devices are found.
1116 */
1117 int fn;
1118 for(fn = 0; fn <= 5; fn++) { //FBDIMM?
1119 cdb_dev = pci_probe_dev(NULL, pbus,
1120 PCI_DEVFN(devn, fn));
1121 }
1122 cdb_dev = dev_find_slot(busn, PCI_DEVFN(devn, 0));
1123 } else {
1124 /* Ok, We need to set the links for that device.
1125 * otherwise the device under it will not be scanned
1126 */
Kyösti Mälkki2a2d6132015-02-04 13:25:37 +02001127 add_more_links(cdb_dev, 4);
Siyuan Wang3e32cc02013-07-09 17:16:20 +08001128 }
1129
1130 family = cpuid_eax(1);
1131 family = (family >> 20) & 0xFF;
1132 if (family == 1) { //f10
1133 u32 dword;
1134 cdb_dev = dev_find_slot(busn, PCI_DEVFN(devn, 3));
1135 dword = pci_read_config32(cdb_dev, 0xe8);
1136 siblings = ((dword & BIT15) >> 13) | ((dword & (BIT13 | BIT12)) >> 12);
1137 } else if (family == 7) {//f16
1138 cdb_dev = dev_find_slot(busn, PCI_DEVFN(devn, 5));
1139 if (cdb_dev && cdb_dev->enabled) {
1140 siblings = pci_read_config32(cdb_dev, 0x84);
1141 siblings &= 0xFF;
1142 }
1143 } else {
1144 siblings = 0; //default one core
1145 }
1146 int enable_node = cdb_dev && cdb_dev->enabled;
1147 printk(BIOS_SPEW, "%s family%xh, core_max=0x%x, core_nums=0x%x, siblings=0x%x\n",
1148 dev_path(cdb_dev), 0x0f + family, core_max, core_nums, siblings);
1149
1150 for (j = 0; j <= siblings; j++ ) {
1151 extern CONST OPTIONS_CONFIG_TOPOLOGY ROMDATA TopologyConfiguration;
1152 u32 modules = TopologyConfiguration.PlatformNumberOfModules;
1153 u32 lapicid_start = 0;
1154
1155 /*
1156 * APIC ID calucation is tightly coupled with AGESA v5 code.
1157 * This calculation MUST match the assignment calculation done
1158 * in LocalApicInitializationAtEarly() function.
1159 * And reference GetLocalApicIdForCore()
1160 *
1161 * Apply apic enumeration rules
1162 * For systems with >= 16 APICs, put the IO-APICs at 0..n and
1163 * put the local-APICs at m..z
1164 *
1165 * This is needed because many IO-APIC devices only have 4 bits
1166 * for their APIC id and therefore must reside at 0..15
1167 */
1168#ifndef CFG_PLAT_NUM_IO_APICS /* defined in mainboard buildOpts.c */
1169#define CFG_PLAT_NUM_IO_APICS 3
1170#endif
1171 if ((node_nums * core_max) + CFG_PLAT_NUM_IO_APICS >= 0x10) {
1172 lapicid_start = (CFG_PLAT_NUM_IO_APICS - 1) / core_max;
1173 lapicid_start = (lapicid_start + 1) * core_max;
1174 printk(BIOS_SPEW, "lpaicid_start=0x%x ", lapicid_start);
1175 }
1176 u32 apic_id = (lapicid_start * (i/modules + 1)) + ((i % modules) ? (j + (siblings + 1)) : j);
1177 printk(BIOS_SPEW, "node 0x%x core 0x%x apicid=0x%x\n",
1178 i, j, apic_id);
1179
1180 device_t cpu = add_cpu_device(cpu_bus, apic_id, enable_node);
1181 if (cpu)
1182 amd_cpu_topology(cpu, i, j);
1183 } //j
1184 }
1185 return max;
1186}
1187
1188static void cpu_bus_init(device_t dev)
1189{
1190 initialize_cpus(dev->link_list);
1191}
1192
Siyuan Wang3e32cc02013-07-09 17:16:20 +08001193static struct device_operations cpu_bus_ops = {
Edward O'Callaghan66c65322014-11-21 01:43:38 +11001194 .read_resources = DEVICE_NOOP,
1195 .set_resources = DEVICE_NOOP,
Edward O'Callaghan812d2a42014-10-31 08:17:23 +11001196 .enable_resources = DEVICE_NOOP,
Siyuan Wang3e32cc02013-07-09 17:16:20 +08001197 .init = cpu_bus_init,
1198 .scan_bus = cpu_bus_scan,
1199};
1200
1201static void root_complex_enable_dev(struct device *dev)
1202{
1203 static int done = 0;
1204
1205 /* Do not delay UMA setup, as a device on the PCI bus may evaluate
1206 the global uma_memory variables already in its enable function. */
1207 if (!done) {
1208 setup_bsp_ramtop();
1209 setup_uma_memory();
1210 done = 1;
1211 }
1212
1213 /* Set the operations if it is a special bus type */
1214 if (dev->path.type == DEVICE_PATH_DOMAIN) {
1215 dev->ops = &pci_domain_ops;
1216 } else if (dev->path.type == DEVICE_PATH_CPU_CLUSTER) {
1217 dev->ops = &cpu_bus_ops;
1218 }
1219}
1220
1221struct chip_operations northbridge_amd_agesa_family16kb_root_complex_ops = {
1222 CHIP_NAME("AMD FAM16 Root Complex")
1223 .enable_dev = root_complex_enable_dev,
1224};
Bruce Griffith76db07e2013-07-07 02:06:53 -06001225
1226/*********************************************************************
1227 * Change the vendor / device IDs to match the generic VBIOS header. *
1228 *********************************************************************/
1229u32 map_oprom_vendev(u32 vendev)
1230{
1231 u32 new_vendev = vendev;
1232
1233 switch(vendev) {
1234 case 0x10029830:
1235 case 0x10029831:
1236 case 0x10029832:
1237 case 0x10029833:
1238 case 0x10029834:
1239 case 0x10029835:
1240 case 0x10029836:
1241 case 0x10029837:
1242 case 0x10029838:
1243 case 0x10029839:
1244 case 0x1002983A:
1245 case 0x1002983D:
1246 new_vendev = 0x10029830; // This is the default value in AMD-generated VBIOS
1247 break;
1248 default:
1249 break;
1250 }
1251
1252 if (vendev != new_vendev)
1253 printk(BIOS_NOTICE, "Mapping PCI device %8x to %8x\n", vendev, new_vendev);
1254
1255 return new_vendev;
1256}