blob: ca8fa2738ba97826f31572633bf98e83f0e20016 [file] [log] [blame]
Bruce Griffith27ed80b2014-08-15 11:46:25 -06001/*
2 * This file is part of the coreboot project.
3 *
4 * Copyright (C) 2012 Advanced Micro Devices, Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 */
19
20#include <console/console.h>
21#include <arch/io.h>
22#include <arch/acpi.h>
23#include <stdint.h>
24#include <device/device.h>
25#include <device/pci.h>
26#include <device/pci_ids.h>
27#include <device/hypertransport.h>
28#include <stdlib.h>
29#include <string.h>
30#include <lib.h>
31#include <cpu/cpu.h>
32#include <cbmem.h>
33
34#include <Porting.h>
35#include <AGESA.h>
36#include <FieldAccessors.h>
37#include <Options.h>
38#include <Topology.h>
39#include <cpu/amd/amdfam16.h>
40#include <cpuRegisters.h>
Kyösti Mälkki023ed1f2014-10-22 08:05:36 +030041#include <northbridge/amd/pi/agesawrapper.h>
Kyösti Mälkkie4c17ce2014-10-21 18:22:32 +030042#include <northbridge/amd/pi/agesawrapper_call.h>
Bruce Griffith27ed80b2014-08-15 11:46:25 -060043#include "northbridge.h"
44
45#include <cpu/x86/lapic.h>
46#include <cpu/amd/mtrr.h>
47
48#define MAX_NODE_NUMS (MAX_NODES * MAX_DIES)
49
Bruce Griffith27ed80b2014-08-15 11:46:25 -060050typedef struct dram_base_mask {
51 u32 base; //[47:27] at [28:8]
52 u32 mask; //[47:27] at [28:8] and enable at bit 0
53} dram_base_mask_t;
54
55static unsigned node_nums;
56static unsigned sblink;
57static device_t __f0_dev[MAX_NODE_NUMS];
58static device_t __f1_dev[MAX_NODE_NUMS];
59static device_t __f2_dev[MAX_NODE_NUMS];
60static device_t __f4_dev[MAX_NODE_NUMS];
61static unsigned fx_devs = 0;
62
63static dram_base_mask_t get_dram_base_mask(u32 nodeid)
64{
65 device_t dev;
66 dram_base_mask_t d;
67 dev = __f1_dev[0];
68 u32 temp;
69 temp = pci_read_config32(dev, 0x44 + (nodeid << 3)); //[39:24] at [31:16]
70 d.mask = ((temp & 0xfff80000)>>(8+3)); // mask out DramMask [26:24] too
71 temp = pci_read_config32(dev, 0x144 + (nodeid <<3)) & 0xff; //[47:40] at [7:0]
72 d.mask |= temp<<21;
73 temp = pci_read_config32(dev, 0x40 + (nodeid << 3)); //[39:24] at [31:16]
74 d.mask |= (temp & 1); // enable bit
75 d.base = ((temp & 0xfff80000)>>(8+3)); // mask out DramBase [26:24) too
76 temp = pci_read_config32(dev, 0x140 + (nodeid <<3)) & 0xff; //[47:40] at [7:0]
77 d.base |= temp<<21;
78 return d;
79}
80
81static void set_io_addr_reg(device_t dev, u32 nodeid, u32 linkn, u32 reg,
82 u32 io_min, u32 io_max)
83{
84 u32 i;
85 u32 tempreg;
86 /* io range allocation */
87 tempreg = (nodeid&0xf) | ((nodeid & 0x30)<<(8-4)) | (linkn<<4) | ((io_max&0xf0)<<(12-4)); //limit
88 for (i=0; i<node_nums; i++)
89 pci_write_config32(__f1_dev[i], reg+4, tempreg);
90 tempreg = 3 /*| ( 3<<4)*/ | ((io_min&0xf0)<<(12-4)); //base :ISA and VGA ?
91#if 0
92 // FIXME: can we use VGA reg instead?
93 if (dev->link[link].bridge_ctrl & PCI_BRIDGE_CTL_VGA) {
94 printk(BIOS_SPEW, "%s, enabling legacy VGA IO forwarding for %s link %s\n",
95 __func__, dev_path(dev), link);
96 tempreg |= PCI_IO_BASE_VGA_EN;
97 }
98 if (dev->link[link].bridge_ctrl & PCI_BRIDGE_CTL_NO_ISA) {
99 tempreg |= PCI_IO_BASE_NO_ISA;
100 }
101#endif
102 for (i=0; i<node_nums; i++)
103 pci_write_config32(__f1_dev[i], reg, tempreg);
104}
105
106static void set_mmio_addr_reg(u32 nodeid, u32 linkn, u32 reg, u32 index, u32 mmio_min, u32 mmio_max, u32 nodes)
107{
108 u32 i;
109 u32 tempreg;
110 /* io range allocation */
111 tempreg = (nodeid&0xf) | (linkn<<4) | (mmio_max&0xffffff00); //limit
112 for (i=0; i<nodes; i++)
113 pci_write_config32(__f1_dev[i], reg+4, tempreg);
114 tempreg = 3 | (nodeid & 0x30) | (mmio_min&0xffffff00);
115 for (i=0; i<node_nums; i++)
116 pci_write_config32(__f1_dev[i], reg, tempreg);
117}
118
119static device_t get_node_pci(u32 nodeid, u32 fn)
120{
121#if MAX_NODE_NUMS + CONFIG_CDB >= 32
122 if ((CONFIG_CDB + nodeid) < 32) {
123 return dev_find_slot(CONFIG_CBB, PCI_DEVFN(CONFIG_CDB + nodeid, fn));
124 } else {
125 return dev_find_slot(CONFIG_CBB-1, PCI_DEVFN(CONFIG_CDB + nodeid - 32, fn));
126 }
127#else
128 return dev_find_slot(CONFIG_CBB, PCI_DEVFN(CONFIG_CDB + nodeid, fn));
129#endif
130}
131
132static void get_fx_devs(void)
133{
134 int i;
135 for (i = 0; i < MAX_NODE_NUMS; i++) {
136 __f0_dev[i] = get_node_pci(i, 0);
137 __f1_dev[i] = get_node_pci(i, 1);
138 __f2_dev[i] = get_node_pci(i, 2);
139 __f4_dev[i] = get_node_pci(i, 4);
140 if (__f0_dev[i] != NULL && __f1_dev[i] != NULL)
141 fx_devs = i+1;
142 }
143 if (__f1_dev[0] == NULL || __f0_dev[0] == NULL || fx_devs == 0) {
144 die("Cannot find 0:0x18.[0|1]\n");
145 }
146 printk(BIOS_DEBUG, "fx_devs=0x%x\n", fx_devs);
147}
148
149static u32 f1_read_config32(unsigned reg)
150{
151 if (fx_devs == 0)
152 get_fx_devs();
153 return pci_read_config32(__f1_dev[0], reg);
154}
155
156static void f1_write_config32(unsigned reg, u32 value)
157{
158 int i;
159 if (fx_devs == 0)
160 get_fx_devs();
161 for(i = 0; i < fx_devs; i++) {
162 device_t dev;
163 dev = __f1_dev[i];
164 if (dev && dev->enabled) {
165 pci_write_config32(dev, reg, value);
166 }
167 }
168}
169
170static u32 amdfam16_nodeid(device_t dev)
171{
172#if MAX_NODE_NUMS == 64
173 unsigned busn;
174 busn = dev->bus->secondary;
175 if (busn != CONFIG_CBB) {
176 return (dev->path.pci.devfn >> 3) - CONFIG_CDB + 32;
177 } else {
178 return (dev->path.pci.devfn >> 3) - CONFIG_CDB;
179 }
180
181#else
182 return (dev->path.pci.devfn >> 3) - CONFIG_CDB;
183#endif
184}
185
186static void set_vga_enable_reg(u32 nodeid, u32 linkn)
187{
188 u32 val;
189
190 val = 1 | (nodeid<<4) | (linkn<<12);
191 /* it will routing
192 * (1)mmio 0xa0000:0xbffff
193 * (2)io 0x3b0:0x3bb, 0x3c0:0x3df
194 */
195 f1_write_config32(0xf4, val);
196
197}
198
199/**
200 * @return
201 * @retval 2 resoure does not exist, usable
202 * @retval 0 resource exists, not usable
203 * @retval 1 resource exist, resource has been allocated before
204 */
205static int reg_useable(unsigned reg, device_t goal_dev, unsigned goal_nodeid,
206 unsigned goal_link)
207{
208 struct resource *res;
209 unsigned nodeid, link = 0;
210 int result;
211 res = 0;
212 for (nodeid = 0; !res && (nodeid < fx_devs); nodeid++) {
213 device_t dev;
214 dev = __f0_dev[nodeid];
215 if (!dev)
216 continue;
217 for (link = 0; !res && (link < 8); link++) {
218 res = probe_resource(dev, IOINDEX(0x1000 + reg, link));
219 }
220 }
221 result = 2;
222 if (res) {
223 result = 0;
224 if ((goal_link == (link - 1)) &&
225 (goal_nodeid == (nodeid - 1)) &&
226 (res->flags <= 1)) {
227 result = 1;
228 }
229 }
230 return result;
231}
232
233static struct resource *amdfam16_find_iopair(device_t dev, unsigned nodeid, unsigned link)
234{
235 struct resource *resource;
236 u32 free_reg, reg;
237 resource = 0;
238 free_reg = 0;
239 for (reg = 0xc0; reg <= 0xd8; reg += 0x8) {
240 int result;
241 result = reg_useable(reg, dev, nodeid, link);
242 if (result == 1) {
243 /* I have been allocated this one */
244 break;
245 }
246 else if (result > 1) {
247 /* I have a free register pair */
248 free_reg = reg;
249 }
250 }
251 if (reg > 0xd8) {
252 reg = free_reg; // if no free, the free_reg still be 0
253 }
254
255 resource = new_resource(dev, IOINDEX(0x1000 + reg, link));
256
257 return resource;
258}
259
260static struct resource *amdfam16_find_mempair(device_t dev, u32 nodeid, u32 link)
261{
262 struct resource *resource;
263 u32 free_reg, reg;
264 resource = 0;
265 free_reg = 0;
266 for (reg = 0x80; reg <= 0xb8; reg += 0x8) {
267 int result;
268 result = reg_useable(reg, dev, nodeid, link);
269 if (result == 1) {
270 /* I have been allocated this one */
271 break;
272 }
273 else if (result > 1) {
274 /* I have a free register pair */
275 free_reg = reg;
276 }
277 }
278 if (reg > 0xb8) {
279 reg = free_reg;
280 }
281
282 resource = new_resource(dev, IOINDEX(0x1000 + reg, link));
283 return resource;
284}
285
286static void amdfam16_link_read_bases(device_t dev, u32 nodeid, u32 link)
287{
288 struct resource *resource;
289
290 /* Initialize the io space constraints on the current bus */
291 resource = amdfam16_find_iopair(dev, nodeid, link);
292 if (resource) {
293 u32 align;
294 align = log2(HT_IO_HOST_ALIGN);
295 resource->base = 0;
296 resource->size = 0;
297 resource->align = align;
298 resource->gran = align;
299 resource->limit = 0xffffUL;
300 resource->flags = IORESOURCE_IO | IORESOURCE_BRIDGE;
301 }
302
303 /* Initialize the prefetchable memory constraints on the current bus */
304 resource = amdfam16_find_mempair(dev, nodeid, link);
305 if (resource) {
306 resource->base = 0;
307 resource->size = 0;
308 resource->align = log2(HT_MEM_HOST_ALIGN);
309 resource->gran = log2(HT_MEM_HOST_ALIGN);
310 resource->limit = 0xffffffffffULL;
311 resource->flags = IORESOURCE_MEM | IORESOURCE_PREFETCH;
312 resource->flags |= IORESOURCE_BRIDGE;
313 }
314
315 /* Initialize the memory constraints on the current bus */
316 resource = amdfam16_find_mempair(dev, nodeid, link);
317 if (resource) {
318 resource->base = 0;
319 resource->size = 0;
320 resource->align = log2(HT_MEM_HOST_ALIGN);
321 resource->gran = log2(HT_MEM_HOST_ALIGN);
322 resource->limit = 0xffffffffffULL;
323 resource->flags = IORESOURCE_MEM | IORESOURCE_BRIDGE;
324 }
325
326}
327
328static void read_resources(device_t dev)
329{
330 u32 nodeid;
331 struct bus *link;
332
333 nodeid = amdfam16_nodeid(dev);
334 for (link = dev->link_list; link; link = link->next) {
335 if (link->children) {
336 amdfam16_link_read_bases(dev, nodeid, link->link_num);
337 }
338 }
339}
340
341static void set_resource(device_t dev, struct resource *resource, u32 nodeid)
342{
343 resource_t rbase, rend;
344 unsigned reg, link_num;
345 char buf[50];
346
347 /* Make certain the resource has actually been set */
348 if (!(resource->flags & IORESOURCE_ASSIGNED)) {
349 return;
350 }
351
352 /* If I have already stored this resource don't worry about it */
353 if (resource->flags & IORESOURCE_STORED) {
354 return;
355 }
356
357 /* Only handle PCI memory and IO resources */
358 if (!(resource->flags & (IORESOURCE_MEM | IORESOURCE_IO)))
359 return;
360
361 /* Ensure I am actually looking at a resource of function 1 */
362 if ((resource->index & 0xffff) < 0x1000) {
363 return;
364 }
365 /* Get the base address */
366 rbase = resource->base;
367
368 /* Get the limit (rounded up) */
369 rend = resource_end(resource);
370
371 /* Get the register and link */
372 reg = resource->index & 0xfff; // 4k
373 link_num = IOINDEX_LINK(resource->index);
374
375 if (resource->flags & IORESOURCE_IO) {
376 set_io_addr_reg(dev, nodeid, link_num, reg, rbase>>8, rend>>8);
377 }
378 else if (resource->flags & IORESOURCE_MEM) {
379 set_mmio_addr_reg(nodeid, link_num, reg, (resource->index >>24), rbase>>8, rend>>8, node_nums) ;// [39:8]
380 }
381 resource->flags |= IORESOURCE_STORED;
382 snprintf(buf, sizeof (buf), " <node %x link %x>",
383 nodeid, link_num);
384 report_resource_stored(dev, resource, buf);
385}
386
387/**
388 * I tried to reuse the resource allocation code in set_resource()
389 * but it is too difficult to deal with the resource allocation magic.
390 */
391
392static void create_vga_resource(device_t dev, unsigned nodeid)
393{
394 struct bus *link;
395
396 /* find out which link the VGA card is connected,
397 * we only deal with the 'first' vga card */
398 for (link = dev->link_list; link; link = link->next) {
399 if (link->bridge_ctrl & PCI_BRIDGE_CTL_VGA) {
400#if CONFIG_MULTIPLE_VGA_ADAPTERS
401 extern device_t vga_pri; // the primary vga device, defined in device.c
402 printk(BIOS_DEBUG, "VGA: vga_pri bus num = %d bus range [%d,%d]\n", vga_pri->bus->secondary,
403 link->secondary,link->subordinate);
404 /* We need to make sure the vga_pri is under the link */
405 if((vga_pri->bus->secondary >= link->secondary ) &&
406 (vga_pri->bus->secondary <= link->subordinate )
407 )
408#endif
409 break;
410 }
411 }
412
413 /* no VGA card installed */
414 if (link == NULL)
415 return;
416
417 printk(BIOS_DEBUG, "VGA: %s (aka node %d) link %d has VGA device\n", dev_path(dev), nodeid, sblink);
418 set_vga_enable_reg(nodeid, sblink);
419}
420
421static void set_resources(device_t dev)
422{
423 unsigned nodeid;
424 struct bus *bus;
425 struct resource *res;
426
427 /* Find the nodeid */
428 nodeid = amdfam16_nodeid(dev);
429
430 create_vga_resource(dev, nodeid); //TODO: do we need this?
431
432 /* Set each resource we have found */
433 for (res = dev->resource_list; res; res = res->next) {
434 set_resource(dev, res, nodeid);
435 }
436
437 for (bus = dev->link_list; bus; bus = bus->next) {
438 if (bus->children) {
439 assign_resources(bus);
440 }
441 }
442}
443
444static void northbridge_init(struct device *dev)
445{
446}
447#if 0 /* TODO: Check if needed. */
448static unsigned scan_chains(device_t dev, unsigned max)
449{
450 unsigned nodeid;
451 struct bus *link;
452 device_t io_hub = NULL;
453 u32 next_unitid = 0x18;
454 nodeid = amdfam16_nodeid(dev);
455 if (nodeid == 0) {
456 for (link = dev->link_list; link; link = link->next) {
457 //if (link->link_num == sblink) { /* devicetree put IO Hub on link_lsit[sblink] */
458 if (link->link_num == 0) { /* devicetree put IO Hub on link_lsit[0] */
459 io_hub = link->children;
460 if (!io_hub || !io_hub->enabled) {
461 die("I can't find the IO Hub, or IO Hub not enabled, please check the device tree.\n");
462 }
463 /* Now that nothing is overlapping it is safe to scan the children. */
464 max = pci_scan_bus(link, 0x00, ((next_unitid - 1) << 3) | 7, 0);
465 }
466 }
467 }
468 return max;
469}
470#endif
471static struct device_operations northbridge_operations = {
472 .read_resources = read_resources,
473 .set_resources = set_resources,
474 .enable_resources = pci_dev_enable_resources,
475 .init = northbridge_init,
476 //.scan_bus = scan_chains, /* TODO: */
477 .enable = 0,
478 .ops_pci = 0,
479};
480
481static const struct pci_driver family16_northbridge __pci_driver = {
482 .ops = &northbridge_operations,
483 .vendor = PCI_VENDOR_ID_AMD,
484 .device = PCI_DEVICE_ID_AMD_16H_MODEL_003F_NB_HT,
485};
486
487static const struct pci_driver family10_northbridge __pci_driver = {
488 .ops = &northbridge_operations,
489 .vendor = PCI_VENDOR_ID_AMD,
490 .device = PCI_DEVICE_ID_AMD_10H_NB_HT,
491};
492
Kyösti Mälkkie4c17ce2014-10-21 18:22:32 +0300493struct chip_operations northbridge_amd_pi_00730F01_ops = {
Bruce Griffith27ed80b2014-08-15 11:46:25 -0600494 CHIP_NAME("AMD FAM16 Northbridge")
495 .enable_dev = 0,
496};
497
498static void domain_read_resources(device_t dev)
499{
500 unsigned reg;
501
502 /* Find the already assigned resource pairs */
503 get_fx_devs();
504 for (reg = 0x80; reg <= 0xd8; reg+= 0x08) {
505 u32 base, limit;
506 base = f1_read_config32(reg);
507 limit = f1_read_config32(reg + 0x04);
508 /* Is this register allocated? */
509 if ((base & 3) != 0) {
510 unsigned nodeid, reg_link;
511 device_t reg_dev;
512 if (reg<0xc0) { // mmio
513 nodeid = (limit & 0xf) + (base&0x30);
514 } else { // io
515 nodeid = (limit & 0xf) + ((base>>4)&0x30);
516 }
517 reg_link = (limit >> 4) & 7;
518 reg_dev = __f0_dev[nodeid];
519 if (reg_dev) {
520 /* Reserve the resource */
521 struct resource *res;
522 res = new_resource(reg_dev, IOINDEX(0x1000 + reg, reg_link));
523 if (res) {
524 res->flags = 1;
525 }
526 }
527 }
528 }
529 /* FIXME: do we need to check extend conf space?
530 I don't believe that much preset value */
531
532#if !CONFIG_PCI_64BIT_PREF_MEM
533 pci_domain_read_resources(dev);
534
535#else
536 struct bus *link;
537 struct resource *resource;
538 for (link=dev->link_list; link; link = link->next) {
539 /* Initialize the system wide io space constraints */
540 resource = new_resource(dev, 0|(link->link_num<<2));
541 resource->base = 0x400;
542 resource->limit = 0xffffUL;
543 resource->flags = IORESOURCE_IO;
544
545 /* Initialize the system wide prefetchable memory resources constraints */
546 resource = new_resource(dev, 1|(link->link_num<<2));
547 resource->limit = 0xfcffffffffULL;
548 resource->flags = IORESOURCE_MEM | IORESOURCE_PREFETCH;
549
550 /* Initialize the system wide memory resources constraints */
551 resource = new_resource(dev, 2|(link->link_num<<2));
552 resource->limit = 0xfcffffffffULL;
553 resource->flags = IORESOURCE_MEM;
554 }
555#endif
556}
557
558static void domain_enable_resources(device_t dev)
559{
560 if (acpi_is_wakeup_s3())
561 AGESAWRAPPER(fchs3laterestore);
562
563 /* Must be called after PCI enumeration and resource allocation */
564 if (!acpi_is_wakeup_s3())
565 AGESAWRAPPER(amdinitmid);
566
567 printk(BIOS_DEBUG, " ader - leaving domain_enable_resources.\n");
568}
569
570#if CONFIG_HW_MEM_HOLE_SIZEK != 0
571struct hw_mem_hole_info {
572 unsigned hole_startk;
573 int node_id;
574};
575static struct hw_mem_hole_info get_hw_mem_hole_info(void)
576{
577 struct hw_mem_hole_info mem_hole;
578 int i;
579 mem_hole.hole_startk = CONFIG_HW_MEM_HOLE_SIZEK;
580 mem_hole.node_id = -1;
581 for (i = 0; i < node_nums; i++) {
582 dram_base_mask_t d;
583 u32 hole;
584 d = get_dram_base_mask(i);
585 if (!(d.mask & 1)) continue; // no memory on this node
586 hole = pci_read_config32(__f1_dev[i], 0xf0);
587 if (hole & 2) { // we find the hole
588 mem_hole.hole_startk = (hole & (0xff<<24)) >> 10;
589 mem_hole.node_id = i; // record the node No with hole
590 break; // only one hole
591 }
592 }
593
594 /* We need to double check if there is special set on base reg and limit reg
595 * are not continuous instead of hole, it will find out its hole_startk.
596 */
597 if (mem_hole.node_id == -1) {
598 resource_t limitk_pri = 0;
599 for (i=0; i<node_nums; i++) {
600 dram_base_mask_t d;
601 resource_t base_k, limit_k;
602 d = get_dram_base_mask(i);
603 if (!(d.base & 1)) continue;
604 base_k = ((resource_t)(d.base & 0x1fffff00)) <<9;
605 if (base_k > 4 *1024 * 1024) break; // don't need to go to check
606 if (limitk_pri != base_k) { // we find the hole
607 mem_hole.hole_startk = (unsigned)limitk_pri; // must beblow 4G
608 mem_hole.node_id = i;
609 break; //only one hole
610 }
611 limit_k = ((resource_t)(((d.mask & ~1) + 0x000FF) & 0x1fffff00)) << 9;
612 limitk_pri = limit_k;
613 }
614 }
615 return mem_hole;
616}
617#endif
618
619#define ONE_MB_SHIFT 20
620#define ONE_GB_SHIFT 30
621
622static void setup_uma_memory(void)
623{
624#if CONFIG_GFXUMA
625 uint64_t topmem = bsp_topmem();
626 uint64_t topmem2 = bsp_topmem2();
627 uint32_t sysmem_mb, sysmem_gb;
628
629 /* refer to UMA_AUTO size computation in Family16h BKDG. */
630 /* Please reference MemNGetUmaSizeML() */
631 /*
632 * Total system memory UMASize
633 * >= 6G 1024M
634 * >= 4G 512M
635 * >= 2G 256M
636 * < 2G 128M
637 */
638
639 sysmem_mb = (topmem + (16ull << ONE_MB_SHIFT)) >> ONE_MB_SHIFT; // Ignore 16MB allocated for C6 when finding UMA size
640 sysmem_mb += topmem2 ? ((topmem2 >> ONE_MB_SHIFT) - 4096) : 0;
641 sysmem_gb = sysmem_mb >> (ONE_GB_SHIFT - ONE_MB_SHIFT);
642 printk(BIOS_SPEW, "%s: system memory size %luGB, topmem2 size %lluMB, topmem size %lluMB\n", __func__, (unsigned long)sysmem_gb, (topmem2 >> ONE_MB_SHIFT), (topmem >> ONE_MB_SHIFT));
643 if (sysmem_gb >= 6) {
644 uma_memory_size = 1024 << ONE_MB_SHIFT;
645 } else if (sysmem_gb >= 4) {
646 uma_memory_size = 512 << ONE_MB_SHIFT;
647 } else if (sysmem_gb >= 2) {
648 uma_memory_size = 256 << ONE_MB_SHIFT;
649 } else {
650 uma_memory_size = 128 << ONE_MB_SHIFT;
651 }
652 uma_memory_base = topmem - uma_memory_size; /* TOP_MEM1 */
653
654 printk(BIOS_INFO, "%s: uma size 0x%08llx, memory start 0x%08llx\n",
655 __func__, uma_memory_size, uma_memory_base);
656
657 /* TODO: TOP_MEM2 */
658#endif
659}
660
661
662static void domain_set_resources(device_t dev)
663{
664#if CONFIG_PCI_64BIT_PREF_MEM
665 struct resource *io, *mem1, *mem2;
666 struct resource *res;
667#endif
668 unsigned long mmio_basek;
669 u32 pci_tolm;
670 u64 ramtop = 0;
671 int i, idx;
672 struct bus *link;
673#if CONFIG_HW_MEM_HOLE_SIZEK != 0
674 struct hw_mem_hole_info mem_hole;
675 u32 reset_memhole = 1;
676#endif
677
678#if CONFIG_PCI_64BIT_PREF_MEM
679
680 for (link = dev->link_list; link; link = link->next) {
681 /* Now reallocate the pci resources memory with the
682 * highest addresses I can manage.
683 */
684 mem1 = find_resource(dev, 1|(link->link_num<<2));
685 mem2 = find_resource(dev, 2|(link->link_num<<2));
686
687 printk(BIOS_DEBUG, "base1: 0x%08Lx limit1: 0x%08Lx size: 0x%08Lx align: %d\n",
688 mem1->base, mem1->limit, mem1->size, mem1->align);
689 printk(BIOS_DEBUG, "base2: 0x%08Lx limit2: 0x%08Lx size: 0x%08Lx align: %d\n",
690 mem2->base, mem2->limit, mem2->size, mem2->align);
691
692 /* See if both resources have roughly the same limits */
693 if (((mem1->limit <= 0xffffffff) && (mem2->limit <= 0xffffffff)) ||
694 ((mem1->limit > 0xffffffff) && (mem2->limit > 0xffffffff)))
695 {
696 /* If so place the one with the most stringent alignment first */
697 if (mem2->align > mem1->align) {
698 struct resource *tmp;
699 tmp = mem1;
700 mem1 = mem2;
701 mem2 = tmp;
702 }
703 /* Now place the memory as high up as it will go */
704 mem2->base = resource_max(mem2);
705 mem1->limit = mem2->base - 1;
706 mem1->base = resource_max(mem1);
707 }
708 else {
709 /* Place the resources as high up as they will go */
710 mem2->base = resource_max(mem2);
711 mem1->base = resource_max(mem1);
712 }
713
714 printk(BIOS_DEBUG, "base1: 0x%08Lx limit1: 0x%08Lx size: 0x%08Lx align: %d\n",
715 mem1->base, mem1->limit, mem1->size, mem1->align);
716 printk(BIOS_DEBUG, "base2: 0x%08Lx limit2: 0x%08Lx size: 0x%08Lx align: %d\n",
717 mem2->base, mem2->limit, mem2->size, mem2->align);
718 }
719
720 for (res = &dev->resource_list; res; res = res->next)
721 {
722 res->flags |= IORESOURCE_ASSIGNED;
723 res->flags |= IORESOURCE_STORED;
724 report_resource_stored(dev, res, "");
725 }
726#endif
727
728 pci_tolm = 0xffffffffUL;
729 for (link = dev->link_list; link; link = link->next) {
730 pci_tolm = find_pci_tolm(link);
731 }
732
733 // FIXME handle interleaved nodes. If you fix this here, please fix
734 // amdk8, too.
735 mmio_basek = pci_tolm >> 10;
736 /* Round mmio_basek to something the processor can support */
737 mmio_basek &= ~((1 << 6) -1);
738
739 // FIXME improve mtrr.c so we don't use up all of the mtrrs with a 64M
740 // MMIO hole. If you fix this here, please fix amdk8, too.
741 /* Round the mmio hole to 64M */
742 mmio_basek &= ~((64*1024) - 1);
743
744#if CONFIG_HW_MEM_HOLE_SIZEK != 0
745 /* if the hw mem hole is already set in raminit stage, here we will compare
746 * mmio_basek and hole_basek. if mmio_basek is bigger that hole_basek and will
747 * use hole_basek as mmio_basek and we don't need to reset hole.
748 * otherwise We reset the hole to the mmio_basek
749 */
750
751 mem_hole = get_hw_mem_hole_info();
752
753 // Use hole_basek as mmio_basek, and we don't need to reset hole anymore
754 if ((mem_hole.node_id != -1) && (mmio_basek > mem_hole.hole_startk)) {
755 mmio_basek = mem_hole.hole_startk;
756 reset_memhole = 0;
757 }
758#endif
759
760 idx = 0x10;
761 for (i = 0; i < node_nums; i++) {
762 dram_base_mask_t d;
763 resource_t basek, limitk, sizek; // 4 1T
764
765 d = get_dram_base_mask(i);
766
767 if (!(d.mask & 1)) continue;
768 basek = ((resource_t)(d.base & 0x1fffff00)) << 9; // could overflow, we may lost 6 bit here
769 limitk = ((resource_t)(((d.mask & ~1) + 0x000FF) & 0x1fffff00)) << 9 ;
770
771 sizek = limitk - basek;
772
773 /* see if we need a hole from 0xa0000 to 0xbffff */
774 if ((basek < ((8*64)+(8*16))) && (sizek > ((8*64)+(16*16)))) {
775 ram_resource(dev, (idx | i), basek, ((8*64)+(8*16)) - basek);
776 idx += 0x10;
777 basek = (8*64)+(16*16);
778 sizek = limitk - ((8*64)+(16*16));
779
780 }
781
782 //printk(BIOS_DEBUG, "node %d : mmio_basek=%08lx, basek=%08llx, limitk=%08llx\n", i, mmio_basek, basek, limitk);
783
784 /* split the region to accommodate pci memory space */
785 if ((basek < 4*1024*1024 ) && (limitk > mmio_basek)) {
786 if (basek <= mmio_basek) {
787 unsigned pre_sizek;
788 pre_sizek = mmio_basek - basek;
789 if (pre_sizek>0) {
790 ram_resource(dev, (idx | i), basek, pre_sizek);
791 idx += 0x10;
792 sizek -= pre_sizek;
793 if (!ramtop)
794 ramtop = mmio_basek * 1024;
795 }
796 basek = mmio_basek;
797 }
798 if ((basek + sizek) <= 4*1024*1024) {
799 sizek = 0;
800 }
801 else {
802 uint64_t topmem2 = bsp_topmem2();
803 basek = 4*1024*1024;
804 sizek = topmem2/1024 - basek;
805 }
806 }
807
808 ram_resource(dev, (idx | i), basek, sizek);
809 idx += 0x10;
810 printk(BIOS_DEBUG, "node %d: mmio_basek=%08lx, basek=%08llx, limitk=%08llx\n",
811 i, mmio_basek, basek, limitk);
812 if (!ramtop)
813 ramtop = limitk * 1024;
814 }
815
816#if CONFIG_GFXUMA
817 set_top_of_ram(uma_memory_base);
818 uma_resource(dev, 7, uma_memory_base >> 10, uma_memory_size >> 10);
819#else
820 set_top_of_ram(ramtop);
821#endif
822
823 for(link = dev->link_list; link; link = link->next) {
824 if (link->children) {
825 assign_resources(link);
826 }
827 }
828}
829
830static struct device_operations pci_domain_ops = {
831 .read_resources = domain_read_resources,
832 .set_resources = domain_set_resources,
833 .enable_resources = domain_enable_resources,
834 .init = NULL,
835 .scan_bus = pci_domain_scan_bus,
836 .ops_pci_bus = pci_bus_default_ops,
837};
838
839static void sysconf_init(device_t dev) // first node
840{
841 sblink = (pci_read_config32(dev, 0x64)>>8) & 7; // don't forget sublink1
842 node_nums = ((pci_read_config32(dev, 0x60)>>4) & 7) + 1; //NodeCnt[2:0]
843}
844
845static void add_more_links(device_t dev, unsigned total_links)
846{
847 struct bus *link, *last = NULL;
848 int link_num;
849
850 for (link = dev->link_list; link; link = link->next)
851 last = link;
852
853 if (last) {
854 int links = total_links - last->link_num;
855 link_num = last->link_num;
856 if (links > 0) {
857 link = malloc(links*sizeof(*link));
858 if (!link)
859 die("Couldn't allocate more links!\n");
860 memset(link, 0, links*sizeof(*link));
861 last->next = link;
862 }
863 }
864 else {
865 link_num = -1;
866 link = malloc(total_links*sizeof(*link));
867 memset(link, 0, total_links*sizeof(*link));
868 dev->link_list = link;
869 }
870
871 for (link_num = link_num + 1; link_num < total_links; link_num++) {
872 link->link_num = link_num;
873 link->dev = dev;
874 link->next = link + 1;
875 last = link;
876 link = link->next;
877 }
878 last->next = NULL;
879}
880
881static u32 cpu_bus_scan(device_t dev, u32 max)
882{
883 struct bus *cpu_bus;
884 device_t dev_mc;
885#if CONFIG_CBB
886 device_t pci_domain;
887#endif
888 int i,j;
889 int coreid_bits;
890 int core_max = 0;
891 unsigned ApicIdCoreIdSize;
892 unsigned core_nums;
893 int siblings = 0;
894 unsigned int family;
895 u32 modules = 0;
896 VOID* modules_ptr = &modules;
897 BUILD_OPT_CFG* options = NULL;
898 int ioapic_count = 0;
899
900 // TODO Remove the printk's.
901 printk(BIOS_SPEW, "MullinsPI Debug: Grabbing the AMD Topology Information.\n");
902 AmdGetValue(AMD_GLOBAL_USER_OPTIONS, (VOID**)&options, sizeof(options));
903 AmdGetValue(AMD_GLOBAL_NUM_MODULES, &modules_ptr, sizeof(modules));
Alexandru Gagniuc2e0cf142014-12-28 20:38:32 -0600904 modules = *(u32*)modules_ptr;
Bruce Griffith27ed80b2014-08-15 11:46:25 -0600905 ASSERT(modules > 0);
906 ASSERT(options);
907 ioapic_count = (int)options->CfgPlatNumIoApics;
908 ASSERT(ioapic_count > 0);
909 printk(BIOS_SPEW, "MullinsPI Debug: AMD Topology Number of Modules (@0x%p) is %d\n", modules_ptr, modules);
910 printk(BIOS_SPEW, "MullinsPI Debug: AMD Topology Number of IOAPICs (@0x%p) is %d\n", options, (int)options->CfgPlatNumIoApics);
911
912#if CONFIG_CBB
913 dev_mc = dev_find_slot(0, PCI_DEVFN(CONFIG_CDB, 0)); //0x00
914 if (dev_mc && dev_mc->bus) {
915 printk(BIOS_DEBUG, "%s found", dev_path(dev_mc));
916 pci_domain = dev_mc->bus->dev;
917 if (pci_domain && (pci_domain->path.type == DEVICE_PATH_DOMAIN)) {
918 printk(BIOS_DEBUG, "\n%s move to ",dev_path(dev_mc));
919 dev_mc->bus->secondary = CONFIG_CBB; // move to 0xff
920 printk(BIOS_DEBUG, "%s",dev_path(dev_mc));
921 } else {
922 printk(BIOS_DEBUG, " but it is not under pci_domain directly ");
923 }
924 printk(BIOS_DEBUG, "\n");
925 }
926 dev_mc = dev_find_slot(CONFIG_CBB, PCI_DEVFN(CONFIG_CDB, 0));
927 if (!dev_mc) {
928 dev_mc = dev_find_slot(0, PCI_DEVFN(0x18, 0));
929 if (dev_mc && dev_mc->bus) {
930 printk(BIOS_DEBUG, "%s found\n", dev_path(dev_mc));
931 pci_domain = dev_mc->bus->dev;
932 if (pci_domain && (pci_domain->path.type == DEVICE_PATH_DOMAIN)) {
933 if ((pci_domain->link_list) && (pci_domain->link_list->children == dev_mc)) {
934 printk(BIOS_DEBUG, "%s move to ",dev_path(dev_mc));
935 dev_mc->bus->secondary = CONFIG_CBB; // move to 0xff
936 printk(BIOS_DEBUG, "%s\n",dev_path(dev_mc));
937 while (dev_mc) {
938 printk(BIOS_DEBUG, "%s move to ",dev_path(dev_mc));
939 dev_mc->path.pci.devfn -= PCI_DEVFN(0x18,0);
940 printk(BIOS_DEBUG, "%s\n",dev_path(dev_mc));
941 dev_mc = dev_mc->sibling;
942 }
943 }
944 }
945 }
946 }
947#endif
948 dev_mc = dev_find_slot(CONFIG_CBB, PCI_DEVFN(CONFIG_CDB, 0));
949 if (!dev_mc) {
950 printk(BIOS_ERR, "%02x:%02x.0 not found", CONFIG_CBB, CONFIG_CDB);
951 die("");
952 }
953 sysconf_init(dev_mc);
954#if CONFIG_CBB && (MAX_NODE_NUMS > 32)
955 if (node_nums>32) { // need to put node 32 to node 63 to bus 0xfe
956 if (pci_domain->link_list && !pci_domain->link_list->next) {
957 struct bus *new_link = new_link(pci_domain);
958 pci_domain->link_list->next = new_link;
959 new_link->link_num = 1;
960 new_link->dev = pci_domain;
961 new_link->children = 0;
962 printk(BIOS_DEBUG, "%s links now 2\n", dev_path(pci_domain));
963 }
964 pci_domain->link_list->next->secondary = CONFIG_CBB - 1;
965 }
966#endif
967
968 /* Get Max Number of cores(MNC) */
969 coreid_bits = (cpuid_ecx(AMD_CPUID_ASIZE_PCCOUNT) & 0x0000F000) >> 12;
970 core_max = 1 << (coreid_bits & 0x000F); //mnc
971
972 ApicIdCoreIdSize = ((cpuid_ecx(0x80000008)>>12) & 0xF);
973 if (ApicIdCoreIdSize) {
974 core_nums = (1 << ApicIdCoreIdSize) - 1;
975 } else {
976 core_nums = 3; //quad core
977 }
978
979 /* Find which cpus are present */
980 cpu_bus = dev->link_list;
981 for (i = 0; i < node_nums; i++) {
982 device_t cdb_dev;
983 unsigned busn, devn;
984 struct bus *pbus;
985
986 busn = CONFIG_CBB;
987 devn = CONFIG_CDB + i;
988 pbus = dev_mc->bus;
989#if CONFIG_CBB && (MAX_NODE_NUMS > 32)
990 if (i >= 32) {
991 busn--;
992 devn -= 32;
993 pbus = pci_domain->link_list->next;
994 }
995#endif
996
997 /* Find the cpu's pci device */
998 cdb_dev = dev_find_slot(busn, PCI_DEVFN(devn, 0));
999 if (!cdb_dev) {
1000 /* If I am probing things in a weird order
1001 * ensure all of the cpu's pci devices are found.
1002 */
1003 int fn;
1004 for(fn = 0; fn <= 5; fn++) { //FBDIMM?
1005 cdb_dev = pci_probe_dev(NULL, pbus,
1006 PCI_DEVFN(devn, fn));
1007 }
1008 cdb_dev = dev_find_slot(busn, PCI_DEVFN(devn, 0));
1009 } else {
1010 /* Ok, We need to set the links for that device.
1011 * otherwise the device under it will not be scanned
1012 */
1013 int linknum;
1014#if CONFIG_HT3_SUPPORT
1015 linknum = 8;
1016#else
1017 linknum = 4;
1018#endif
1019 add_more_links(cdb_dev, linknum);
1020 }
1021
1022 family = cpuid_eax(1);
1023 family = (family >> 20) & 0xFF;
1024 if (family == 1) { //f10
1025 u32 dword;
1026 cdb_dev = dev_find_slot(busn, PCI_DEVFN(devn, 3));
1027 dword = pci_read_config32(cdb_dev, 0xe8);
1028 siblings = ((dword & BIT15) >> 13) | ((dword & (BIT13 | BIT12)) >> 12);
1029 } else if (family == 7) {//f16
1030 cdb_dev = dev_find_slot(busn, PCI_DEVFN(devn, 5));
1031 if (cdb_dev && cdb_dev->enabled) {
1032 siblings = pci_read_config32(cdb_dev, 0x84);
1033 siblings &= 0xFF;
1034 }
1035 } else {
1036 siblings = 0; //default one core
1037 }
1038 int enable_node = cdb_dev && cdb_dev->enabled;
1039 printk(BIOS_SPEW, "%s family%xh, core_max=0x%x, core_nums=0x%x, siblings=0x%x\n",
1040 dev_path(cdb_dev), 0x0f + family, core_max, core_nums, siblings);
1041
1042 for (j = 0; j <= siblings; j++ ) {
1043 u32 lapicid_start = 0;
1044
1045 /*
1046 * APIC ID calucation is tightly coupled with AGESA v5 code.
1047 * This calculation MUST match the assignment calculation done
1048 * in LocalApicInitializationAtEarly() function.
1049 * And reference GetLocalApicIdForCore()
1050 *
1051 * Apply apic enumeration rules
1052 * For systems with >= 16 APICs, put the IO-APICs at 0..n and
1053 * put the local-APICs at m..z
1054 *
1055 * This is needed because many IO-APIC devices only have 4 bits
1056 * for their APIC id and therefore must reside at 0..15
1057 */
1058 if ((node_nums * core_max) + ioapic_count >= 0x10) {
1059 lapicid_start = (ioapic_count - 1) / core_max;
1060 lapicid_start = (lapicid_start + 1) * core_max;
1061 printk(BIOS_SPEW, "lpaicid_start=0x%x ", lapicid_start);
1062 }
1063 u32 apic_id = (lapicid_start * (i/modules + 1)) + ((i % modules) ? (j + (siblings + 1)) : j);
1064 printk(BIOS_SPEW, "node 0x%x core 0x%x apicid=0x%x\n",
1065 i, j, apic_id);
1066
1067 device_t cpu = add_cpu_device(cpu_bus, apic_id, enable_node);
1068 if (cpu)
1069 amd_cpu_topology(cpu, i, j);
1070 } //j
1071 }
1072 return max;
1073}
1074
1075static void cpu_bus_init(device_t dev)
1076{
1077 initialize_cpus(dev->link_list);
1078}
1079
Bruce Griffith27ed80b2014-08-15 11:46:25 -06001080static void cpu_bus_read_resources(device_t dev)
1081{
1082#if CONFIG_MMCONF_SUPPORT
1083 struct resource *resource = new_resource(dev, 0xc0010058);
1084 resource->base = CONFIG_MMCONF_BASE_ADDRESS;
1085 resource->size = CONFIG_MMCONF_BUS_NUMBER * 4096*256;
1086 resource->flags = IORESOURCE_MEM | IORESOURCE_RESERVE |
1087 IORESOURCE_FIXED | IORESOURCE_STORED | IORESOURCE_ASSIGNED;
1088#endif
1089}
1090
1091static void cpu_bus_set_resources(device_t dev)
1092{
1093 struct resource *resource = find_resource(dev, 0xc0010058);
1094 if (resource) {
1095 report_resource_stored(dev, resource, " <mmconfig>");
1096 }
1097 pci_dev_set_resources(dev);
1098}
1099
1100static struct device_operations cpu_bus_ops = {
1101 .read_resources = cpu_bus_read_resources,
1102 .set_resources = cpu_bus_set_resources,
Edward O'Callaghan812d2a42014-10-31 08:17:23 +11001103 .enable_resources = DEVICE_NOOP,
Bruce Griffith27ed80b2014-08-15 11:46:25 -06001104 .init = cpu_bus_init,
1105 .scan_bus = cpu_bus_scan,
1106};
1107
1108static void root_complex_enable_dev(struct device *dev)
1109{
1110 static int done = 0;
1111
1112 /* Do not delay UMA setup, as a device on the PCI bus may evaluate
1113 the global uma_memory variables already in its enable function. */
1114 if (!done) {
1115 setup_bsp_ramtop();
1116 setup_uma_memory();
1117 done = 1;
1118 }
1119
1120 /* Set the operations if it is a special bus type */
1121 if (dev->path.type == DEVICE_PATH_DOMAIN) {
1122 dev->ops = &pci_domain_ops;
1123 } else if (dev->path.type == DEVICE_PATH_CPU_CLUSTER) {
1124 dev->ops = &cpu_bus_ops;
1125 }
1126}
1127
Kyösti Mälkkie4c17ce2014-10-21 18:22:32 +03001128struct chip_operations northbridge_amd_pi_00730F01_root_complex_ops = {
Bruce Griffith27ed80b2014-08-15 11:46:25 -06001129 CHIP_NAME("AMD FAM16 Root Complex")
1130 .enable_dev = root_complex_enable_dev,
1131};
1132
1133/*********************************************************************
1134 * Change the vendor / device IDs to match the generic VBIOS header. *
1135 *********************************************************************/
1136u32 map_oprom_vendev(u32 vendev)
1137{
1138 u32 new_vendev;
1139 new_vendev =
1140 ((0x10029850 <= vendev) && (vendev <= 0x1002986F)) ? 0x10029850 : vendev;
1141
1142 if (vendev != new_vendev)
1143 printk(BIOS_NOTICE, "Mapping PCI device %8x to %8x\n", vendev, new_vendev);
1144
1145 return new_vendev;
1146}