blob: b8bba5025dc8a408b3cd168597291ceb672120fb [file] [log] [blame]
Siyuan Wang3e32cc02013-07-09 17:16:20 +08001/*
2 * This file is part of the coreboot project.
3 *
4 * Copyright (C) 2012 Advanced Micro Devices, Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 */
19
20#include <console/console.h>
21#include <arch/io.h>
22#include <stdint.h>
23#include <device/device.h>
24#include <device/pci.h>
25#include <device/pci_ids.h>
26#include <device/hypertransport.h>
27#include <stdlib.h>
28#include <string.h>
29#include <lib.h>
30#include <cpu/cpu.h>
31#include <cbmem.h>
32
33#include <cpu/x86/lapic.h>
34#include <cpu/amd/mtrr.h>
35
36#include <Porting.h>
37#include <AGESA.h>
38#include <Options.h>
39#include <Topology.h>
40#include <cpu/amd/amdfam16.h>
41#include <cpuRegisters.h>
42#include "agesawrapper.h"
43#include "northbridge.h"
44
45#define MAX_NODE_NUMS (MAX_NODES * MAX_DIES)
46
47#if (defined CONFIG_EXT_CONF_SUPPORT) && CONFIG_EXT_CONF_SUPPORT == 1
48#error CONFIG_EXT_CONF_SUPPORT == 1 not support anymore!
49#endif
50
51typedef struct dram_base_mask {
52 u32 base; //[47:27] at [28:8]
53 u32 mask; //[47:27] at [28:8] and enable at bit 0
54} dram_base_mask_t;
55
56static unsigned node_nums;
57static unsigned sblink;
58static device_t __f0_dev[MAX_NODE_NUMS];
59static device_t __f1_dev[MAX_NODE_NUMS];
60static device_t __f2_dev[MAX_NODE_NUMS];
61static device_t __f4_dev[MAX_NODE_NUMS];
62static unsigned fx_devs = 0;
63
64static dram_base_mask_t get_dram_base_mask(u32 nodeid)
65{
66 device_t dev;
67 dram_base_mask_t d;
68 dev = __f1_dev[0];
69 u32 temp;
70 temp = pci_read_config32(dev, 0x44 + (nodeid << 3)); //[39:24] at [31:16]
71 d.mask = ((temp & 0xfff80000)>>(8+3)); // mask out DramMask [26:24] too
72 temp = pci_read_config32(dev, 0x144 + (nodeid <<3)) & 0xff; //[47:40] at [7:0]
73 d.mask |= temp<<21;
74 temp = pci_read_config32(dev, 0x40 + (nodeid << 3)); //[39:24] at [31:16]
75 d.mask |= (temp & 1); // enable bit
76 d.base = ((temp & 0xfff80000)>>(8+3)); // mask out DramBase [26:24) too
77 temp = pci_read_config32(dev, 0x140 + (nodeid <<3)) & 0xff; //[47:40] at [7:0]
78 d.base |= temp<<21;
79 return d;
80}
81
82static void set_io_addr_reg(device_t dev, u32 nodeid, u32 linkn, u32 reg,
83 u32 io_min, u32 io_max)
84{
85 u32 i;
86 u32 tempreg;
87 /* io range allocation */
88 tempreg = (nodeid&0xf) | ((nodeid & 0x30)<<(8-4)) | (linkn<<4) | ((io_max&0xf0)<<(12-4)); //limit
89 for (i=0; i<node_nums; i++)
90 pci_write_config32(__f1_dev[i], reg+4, tempreg);
91 tempreg = 3 /*| ( 3<<4)*/ | ((io_min&0xf0)<<(12-4)); //base :ISA and VGA ?
92#if 0
93 // FIXME: can we use VGA reg instead?
94 if (dev->link[link].bridge_ctrl & PCI_BRIDGE_CTL_VGA) {
95 printk(BIOS_SPEW, "%s, enabling legacy VGA IO forwarding for %s link %s\n",
96 __func__, dev_path(dev), link);
97 tempreg |= PCI_IO_BASE_VGA_EN;
98 }
99 if (dev->link[link].bridge_ctrl & PCI_BRIDGE_CTL_NO_ISA) {
100 tempreg |= PCI_IO_BASE_NO_ISA;
101 }
102#endif
103 for (i=0; i<node_nums; i++)
104 pci_write_config32(__f1_dev[i], reg, tempreg);
105}
106
107static void set_mmio_addr_reg(u32 nodeid, u32 linkn, u32 reg, u32 index, u32 mmio_min, u32 mmio_max, u32 nodes)
108{
109 u32 i;
110 u32 tempreg;
111 /* io range allocation */
112 tempreg = (nodeid&0xf) | (linkn<<4) | (mmio_max&0xffffff00); //limit
113 for (i=0; i<nodes; i++)
114 pci_write_config32(__f1_dev[i], reg+4, tempreg);
115 tempreg = 3 | (nodeid & 0x30) | (mmio_min&0xffffff00);
116 for (i=0; i<node_nums; i++)
117 pci_write_config32(__f1_dev[i], reg, tempreg);
118}
119
120static device_t get_node_pci(u32 nodeid, u32 fn)
121{
122#if MAX_NODE_NUMS + CONFIG_CDB >= 32
123 if ((CONFIG_CDB + nodeid) < 32) {
124 return dev_find_slot(CONFIG_CBB, PCI_DEVFN(CONFIG_CDB + nodeid, fn));
125 } else {
126 return dev_find_slot(CONFIG_CBB-1, PCI_DEVFN(CONFIG_CDB + nodeid - 32, fn));
127 }
128#else
129 return dev_find_slot(CONFIG_CBB, PCI_DEVFN(CONFIG_CDB + nodeid, fn));
130#endif
131}
132
133static void get_fx_devs(void)
134{
135 int i;
136 for (i = 0; i < MAX_NODE_NUMS; i++) {
137 __f0_dev[i] = get_node_pci(i, 0);
138 __f1_dev[i] = get_node_pci(i, 1);
139 __f2_dev[i] = get_node_pci(i, 2);
140 __f4_dev[i] = get_node_pci(i, 4);
141 if (__f0_dev[i] != NULL && __f1_dev[i] != NULL)
142 fx_devs = i+1;
143 }
144 if (__f1_dev[0] == NULL || __f0_dev[0] == NULL || fx_devs == 0) {
145 die("Cannot find 0:0x18.[0|1]\n");
146 }
147 printk(BIOS_DEBUG, "fx_devs=0x%x\n", fx_devs);
148}
149
150static u32 f1_read_config32(unsigned reg)
151{
152 if (fx_devs == 0)
153 get_fx_devs();
154 return pci_read_config32(__f1_dev[0], reg);
155}
156
157static void f1_write_config32(unsigned reg, u32 value)
158{
159 int i;
160 if (fx_devs == 0)
161 get_fx_devs();
162 for(i = 0; i < fx_devs; i++) {
163 device_t dev;
164 dev = __f1_dev[i];
165 if (dev && dev->enabled) {
166 pci_write_config32(dev, reg, value);
167 }
168 }
169}
170
171static u32 amdfam16_nodeid(device_t dev)
172{
173#if MAX_NODE_NUMS == 64
174 unsigned busn;
175 busn = dev->bus->secondary;
176 if (busn != CONFIG_CBB) {
177 return (dev->path.pci.devfn >> 3) - CONFIG_CDB + 32;
178 } else {
179 return (dev->path.pci.devfn >> 3) - CONFIG_CDB;
180 }
181
182#else
183 return (dev->path.pci.devfn >> 3) - CONFIG_CDB;
184#endif
185}
186
187static void set_vga_enable_reg(u32 nodeid, u32 linkn)
188{
189 u32 val;
190
191 val = 1 | (nodeid<<4) | (linkn<<12);
192 /* it will routing
193 * (1)mmio 0xa0000:0xbffff
194 * (2)io 0x3b0:0x3bb, 0x3c0:0x3df
195 */
196 f1_write_config32(0xf4, val);
197
198}
199
200/**
201 * @return
202 * @retval 2 resoure does not exist, usable
203 * @retval 0 resource exists, not usable
204 * @retval 1 resource exist, resource has been allocated before
205 */
206static int reg_useable(unsigned reg, device_t goal_dev, unsigned goal_nodeid,
207 unsigned goal_link)
208{
209 struct resource *res;
210 unsigned nodeid, link = 0;
211 int result;
212 res = 0;
213 for (nodeid = 0; !res && (nodeid < fx_devs); nodeid++) {
214 device_t dev;
215 dev = __f0_dev[nodeid];
216 if (!dev)
217 continue;
218 for (link = 0; !res && (link < 8); link++) {
219 res = probe_resource(dev, IOINDEX(0x1000 + reg, link));
220 }
221 }
222 result = 2;
223 if (res) {
224 result = 0;
225 if ((goal_link == (link - 1)) &&
226 (goal_nodeid == (nodeid - 1)) &&
227 (res->flags <= 1)) {
228 result = 1;
229 }
230 }
231 return result;
232}
233
234static struct resource *amdfam16_find_iopair(device_t dev, unsigned nodeid, unsigned link)
235{
236 struct resource *resource;
237 u32 free_reg, reg;
238 resource = 0;
239 free_reg = 0;
240 for (reg = 0xc0; reg <= 0xd8; reg += 0x8) {
241 int result;
242 result = reg_useable(reg, dev, nodeid, link);
243 if (result == 1) {
244 /* I have been allocated this one */
245 break;
246 }
247 else if (result > 1) {
248 /* I have a free register pair */
249 free_reg = reg;
250 }
251 }
252 if (reg > 0xd8) {
253 reg = free_reg; // if no free, the free_reg still be 0
254 }
255
256 resource = new_resource(dev, IOINDEX(0x1000 + reg, link));
257
258 return resource;
259}
260
261static struct resource *amdfam16_find_mempair(device_t dev, u32 nodeid, u32 link)
262{
263 struct resource *resource;
264 u32 free_reg, reg;
265 resource = 0;
266 free_reg = 0;
267 for (reg = 0x80; reg <= 0xb8; reg += 0x8) {
268 int result;
269 result = reg_useable(reg, dev, nodeid, link);
270 if (result == 1) {
271 /* I have been allocated this one */
272 break;
273 }
274 else if (result > 1) {
275 /* I have a free register pair */
276 free_reg = reg;
277 }
278 }
279 if (reg > 0xb8) {
280 reg = free_reg;
281 }
282
283 resource = new_resource(dev, IOINDEX(0x1000 + reg, link));
284 return resource;
285}
286
287static void amdfam16_link_read_bases(device_t dev, u32 nodeid, u32 link)
288{
289 struct resource *resource;
290
291 /* Initialize the io space constraints on the current bus */
292 resource = amdfam16_find_iopair(dev, nodeid, link);
293 if (resource) {
294 u32 align;
295 align = log2(HT_IO_HOST_ALIGN);
296 resource->base = 0;
297 resource->size = 0;
298 resource->align = align;
299 resource->gran = align;
300 resource->limit = 0xffffUL;
301 resource->flags = IORESOURCE_IO | IORESOURCE_BRIDGE;
302 }
303
304 /* Initialize the prefetchable memory constraints on the current bus */
305 resource = amdfam16_find_mempair(dev, nodeid, link);
306 if (resource) {
307 resource->base = 0;
308 resource->size = 0;
309 resource->align = log2(HT_MEM_HOST_ALIGN);
310 resource->gran = log2(HT_MEM_HOST_ALIGN);
311 resource->limit = 0xffffffffffULL;
312 resource->flags = IORESOURCE_MEM | IORESOURCE_PREFETCH;
313 resource->flags |= IORESOURCE_BRIDGE;
314 }
315
316 /* Initialize the memory constraints on the current bus */
317 resource = amdfam16_find_mempair(dev, nodeid, link);
318 if (resource) {
319 resource->base = 0;
320 resource->size = 0;
321 resource->align = log2(HT_MEM_HOST_ALIGN);
322 resource->gran = log2(HT_MEM_HOST_ALIGN);
323 resource->limit = 0xffffffffffULL;
324 resource->flags = IORESOURCE_MEM | IORESOURCE_BRIDGE;
325 }
326
327}
328
329static void read_resources(device_t dev)
330{
331 u32 nodeid;
332 struct bus *link;
333
334 nodeid = amdfam16_nodeid(dev);
335 for (link = dev->link_list; link; link = link->next) {
336 if (link->children) {
337 amdfam16_link_read_bases(dev, nodeid, link->link_num);
338 }
339 }
340}
341
342static void set_resource(device_t dev, struct resource *resource, u32 nodeid)
343{
344 resource_t rbase, rend;
345 unsigned reg, link_num;
346 char buf[50];
347
348 /* Make certain the resource has actually been set */
349 if (!(resource->flags & IORESOURCE_ASSIGNED)) {
350 return;
351 }
352
353 /* If I have already stored this resource don't worry about it */
354 if (resource->flags & IORESOURCE_STORED) {
355 return;
356 }
357
358 /* Only handle PCI memory and IO resources */
359 if (!(resource->flags & (IORESOURCE_MEM | IORESOURCE_IO)))
360 return;
361
362 /* Ensure I am actually looking at a resource of function 1 */
363 if ((resource->index & 0xffff) < 0x1000) {
364 return;
365 }
366 /* Get the base address */
367 rbase = resource->base;
368
369 /* Get the limit (rounded up) */
370 rend = resource_end(resource);
371
372 /* Get the register and link */
373 reg = resource->index & 0xfff; // 4k
374 link_num = IOINDEX_LINK(resource->index);
375
376 if (resource->flags & IORESOURCE_IO) {
377 set_io_addr_reg(dev, nodeid, link_num, reg, rbase>>8, rend>>8);
378 }
379 else if (resource->flags & IORESOURCE_MEM) {
380 set_mmio_addr_reg(nodeid, link_num, reg, (resource->index >>24), rbase>>8, rend>>8, node_nums) ;// [39:8]
381 }
382 resource->flags |= IORESOURCE_STORED;
383 sprintf(buf, " <node %x link %x>",
384 nodeid, link_num);
385 report_resource_stored(dev, resource, buf);
386}
387
388/**
389 * I tried to reuse the resource allocation code in set_resource()
390 * but it is too difficult to deal with the resource allocation magic.
391 */
392
393static void create_vga_resource(device_t dev, unsigned nodeid)
394{
395 struct bus *link;
396
397 /* find out which link the VGA card is connected,
398 * we only deal with the 'first' vga card */
399 for (link = dev->link_list; link; link = link->next) {
400 if (link->bridge_ctrl & PCI_BRIDGE_CTL_VGA) {
401#if CONFIG_MULTIPLE_VGA_ADAPTERS
402 extern device_t vga_pri; // the primary vga device, defined in device.c
403 printk(BIOS_DEBUG, "VGA: vga_pri bus num = %d bus range [%d,%d]\n", vga_pri->bus->secondary,
404 link->secondary,link->subordinate);
405 /* We need to make sure the vga_pri is under the link */
406 if((vga_pri->bus->secondary >= link->secondary ) &&
407 (vga_pri->bus->secondary <= link->subordinate )
408 )
409#endif
410 break;
411 }
412 }
413
414 /* no VGA card installed */
415 if (link == NULL)
416 return;
417
418 printk(BIOS_DEBUG, "VGA: %s (aka node %d) link %d has VGA device\n", dev_path(dev), nodeid, sblink);
419 set_vga_enable_reg(nodeid, sblink);
420}
421
422static void set_resources(device_t dev)
423{
424 unsigned nodeid;
425 struct bus *bus;
426 struct resource *res;
427
428 /* Find the nodeid */
429 nodeid = amdfam16_nodeid(dev);
430
431 create_vga_resource(dev, nodeid); //TODO: do we need this?
432
433 /* Set each resource we have found */
434 for (res = dev->resource_list; res; res = res->next) {
435 set_resource(dev, res, nodeid);
436 }
437
438 for (bus = dev->link_list; bus; bus = bus->next) {
439 if (bus->children) {
440 assign_resources(bus);
441 }
442 }
443}
444
445static void northbridge_init(struct device *dev)
446{
447}
448#if 0 /* TODO: Check if needed. */
449static unsigned scan_chains(device_t dev, unsigned max)
450{
451 unsigned nodeid;
452 struct bus *link;
453 device_t io_hub = NULL;
454 u32 next_unitid = 0x18;
455 nodeid = amdfam16_nodeid(dev);
456 if (nodeid == 0) {
457 for (link = dev->link_list; link; link = link->next) {
458 //if (link->link_num == sblink) { /* devicetree put IO Hub on link_lsit[sblink] */
459 if (link->link_num == 0) { /* devicetree put IO Hub on link_lsit[0] */
460 io_hub = link->children;
461 if (!io_hub || !io_hub->enabled) {
462 die("I can't find the IO Hub, or IO Hub not enabled, please check the device tree.\n");
463 }
464 /* Now that nothing is overlapping it is safe to scan the children. */
465 max = pci_scan_bus(link, 0x00, ((next_unitid - 1) << 3) | 7, 0);
466 }
467 }
468 }
469 return max;
470}
471#endif
472static struct device_operations northbridge_operations = {
473 .read_resources = read_resources,
474 .set_resources = set_resources,
475 .enable_resources = pci_dev_enable_resources,
476 .init = northbridge_init,
477 //.scan_bus = scan_chains, /* TODO: */
478 .enable = 0,
479 .ops_pci = 0,
480};
481
482static const struct pci_driver family16_northbridge __pci_driver = {
483 .ops = &northbridge_operations,
484 .vendor = PCI_VENDOR_ID_AMD,
485 .device = PCI_DEVICE_ID_AMD_16H_MODEL_000F_NB_HT,
486};
487
488static const struct pci_driver family10_northbridge __pci_driver = {
489 .ops = &northbridge_operations,
490 .vendor = PCI_VENDOR_ID_AMD,
491 .device = PCI_DEVICE_ID_AMD_10H_NB_HT,
492};
493
494struct chip_operations northbridge_amd_agesa_family16kb_ops = {
495 CHIP_NAME("AMD FAM16 Northbridge")
496 .enable_dev = 0,
497};
498
499static void domain_read_resources(device_t dev)
500{
501 unsigned reg;
502
503 /* Find the already assigned resource pairs */
504 get_fx_devs();
505 for (reg = 0x80; reg <= 0xd8; reg+= 0x08) {
506 u32 base, limit;
507 base = f1_read_config32(reg);
508 limit = f1_read_config32(reg + 0x04);
509 /* Is this register allocated? */
510 if ((base & 3) != 0) {
511 unsigned nodeid, reg_link;
512 device_t reg_dev;
513 if (reg<0xc0) { // mmio
514 nodeid = (limit & 0xf) + (base&0x30);
515 } else { // io
516 nodeid = (limit & 0xf) + ((base>>4)&0x30);
517 }
518 reg_link = (limit >> 4) & 7;
519 reg_dev = __f0_dev[nodeid];
520 if (reg_dev) {
521 /* Reserve the resource */
522 struct resource *res;
523 res = new_resource(reg_dev, IOINDEX(0x1000 + reg, reg_link));
524 if (res) {
525 res->flags = 1;
526 }
527 }
528 }
529 }
530 /* FIXME: do we need to check extend conf space?
531 I don't believe that much preset value */
532
533#if !CONFIG_PCI_64BIT_PREF_MEM
534 pci_domain_read_resources(dev);
535
536#else
537 struct bus *link;
538 struct resource *resource;
539 for (link=dev->link_list; link; link = link->next) {
540 /* Initialize the system wide io space constraints */
541 resource = new_resource(dev, 0|(link->link_num<<2));
542 resource->base = 0x400;
543 resource->limit = 0xffffUL;
544 resource->flags = IORESOURCE_IO;
545
546 /* Initialize the system wide prefetchable memory resources constraints */
547 resource = new_resource(dev, 1|(link->link_num<<2));
548 resource->limit = 0xfcffffffffULL;
549 resource->flags = IORESOURCE_MEM | IORESOURCE_PREFETCH;
550
551 /* Initialize the system wide memory resources constraints */
552 resource = new_resource(dev, 2|(link->link_num<<2));
553 resource->limit = 0xfcffffffffULL;
554 resource->flags = IORESOURCE_MEM;
555 }
556#endif
557}
558
559extern u8 acpi_slp_type;
560
561static void domain_enable_resources(device_t dev)
562{
563 u32 val;
564#if CONFIG_HAVE_ACPI_RESUME
565 if (acpi_slp_type == 3)
566 agesawrapper_fchs3laterestore();
567#endif
568
569 /* Must be called after PCI enumeration and resource allocation */
570 printk(BIOS_DEBUG, "\nFam16 - domain_enable_resources: AmdInitMid.\n");
571#if CONFIG_HAVE_ACPI_RESUME
572 if (acpi_slp_type != 3) {
573 printk(BIOS_DEBUG, "agesawrapper_amdinitmid ");
574 val = agesawrapper_amdinitmid ();
575 if (val)
576 printk(BIOS_DEBUG, "error level: %x \n", val);
577 else
578 printk(BIOS_DEBUG, "passed.\n");
579 }
580#else
581 printk(BIOS_DEBUG, "agesawrapper_amdinitmid ");
582 val = agesawrapper_amdinitmid ();
583 if (val)
584 printk(BIOS_DEBUG, "error level: %x \n", val);
585 else
586 printk(BIOS_DEBUG, "passed.\n");
587#endif
588
589 printk(BIOS_DEBUG, " ader - leaving domain_enable_resources.\n");
590}
591
592#if CONFIG_HW_MEM_HOLE_SIZEK != 0
593struct hw_mem_hole_info {
594 unsigned hole_startk;
595 int node_id;
596};
597static struct hw_mem_hole_info get_hw_mem_hole_info(void)
598{
599 struct hw_mem_hole_info mem_hole;
600 int i;
601 mem_hole.hole_startk = CONFIG_HW_MEM_HOLE_SIZEK;
602 mem_hole.node_id = -1;
603 for (i = 0; i < node_nums; i++) {
604 dram_base_mask_t d;
605 u32 hole;
606 d = get_dram_base_mask(i);
607 if (!(d.mask & 1)) continue; // no memory on this node
608 hole = pci_read_config32(__f1_dev[i], 0xf0);
609 if (hole & 2) { // we find the hole
610 mem_hole.hole_startk = (hole & (0xff<<24)) >> 10;
611 mem_hole.node_id = i; // record the node No with hole
612 break; // only one hole
613 }
614 }
615 //We need to double check if there is speical set on base reg and limit reg are not continous instead of hole, it will find out it's hole_startk
616 if (mem_hole.node_id == -1) {
617 resource_t limitk_pri = 0;
618 for (i=0; i<node_nums; i++) {
619 dram_base_mask_t d;
620 resource_t base_k, limit_k;
621 d = get_dram_base_mask(i);
622 if (!(d.base & 1)) continue;
623 base_k = ((resource_t)(d.base & 0x1fffff00)) <<9;
624 if (base_k > 4 *1024 * 1024) break; // don't need to go to check
625 if (limitk_pri != base_k) { // we find the hole
626 mem_hole.hole_startk = (unsigned)limitk_pri; // must beblow 4G
627 mem_hole.node_id = i;
628 break; //only one hole
629 }
630 limit_k = ((resource_t)(((d.mask & ~1) + 0x000FF) & 0x1fffff00)) << 9;
631 limitk_pri = limit_k;
632 }
633 }
634 return mem_hole;
635}
636#endif
637
638#define ONE_MB_SHIFT 20
639
640static void setup_uma_memory(void)
641{
642#if CONFIG_GFXUMA
643 uint32_t topmem = (uint32_t) bsp_topmem();
644 uint32_t sys_mem;
645
646 /* refer to UMA Size Consideration in Family16h BKDG. */
647 /* Please reference MemNGetUmaSizeOR () */
648 /*
649 * Total system memory UMASize
650 * >= 2G 512M
651 * >=1G 256M
652 * <1G 64M
653 */
654 sys_mem = topmem + (16 << ONE_MB_SHIFT); // Ignore 16MB allocated for C6 when finding UMA size
655 if ((bsp_topmem2()>>32) || (sys_mem >= 2048 << ONE_MB_SHIFT)) {
656 uma_memory_size = 512 << ONE_MB_SHIFT;
657 } else if (sys_mem >= 1024 << ONE_MB_SHIFT) {
658 uma_memory_size = 256 << ONE_MB_SHIFT;
659 } else {
660 uma_memory_size = 64 << ONE_MB_SHIFT;
661 }
662 uma_memory_base = topmem - uma_memory_size; /* TOP_MEM1 */
663
664 printk(BIOS_INFO, "%s: uma size 0x%08llx, memory start 0x%08llx\n",
665 __func__, uma_memory_size, uma_memory_base);
666
667 /* TODO: TOP_MEM2 */
668#endif
669}
670
671
672static void domain_set_resources(device_t dev)
673{
674#if CONFIG_PCI_64BIT_PREF_MEM
675 struct resource *io, *mem1, *mem2;
676 struct resource *res;
677#endif
678 unsigned long mmio_basek;
679 u32 pci_tolm;
Kyösti Mälkki2b790f62013-09-03 05:25:57 +0300680 u64 ramtop = 0;
Siyuan Wang3e32cc02013-07-09 17:16:20 +0800681 int i, idx;
682 struct bus *link;
683#if CONFIG_HW_MEM_HOLE_SIZEK != 0
684 struct hw_mem_hole_info mem_hole;
685 u32 reset_memhole = 1;
686#endif
687
688#if CONFIG_PCI_64BIT_PREF_MEM
689
690 for (link = dev->link_list; link; link = link->next) {
691 /* Now reallocate the pci resources memory with the
692 * highest addresses I can manage.
693 */
694 mem1 = find_resource(dev, 1|(link->link_num<<2));
695 mem2 = find_resource(dev, 2|(link->link_num<<2));
696
697 printk(BIOS_DEBUG, "base1: 0x%08Lx limit1: 0x%08Lx size: 0x%08Lx align: %d\n",
698 mem1->base, mem1->limit, mem1->size, mem1->align);
699 printk(BIOS_DEBUG, "base2: 0x%08Lx limit2: 0x%08Lx size: 0x%08Lx align: %d\n",
700 mem2->base, mem2->limit, mem2->size, mem2->align);
701
702 /* See if both resources have roughly the same limits */
703 if (((mem1->limit <= 0xffffffff) && (mem2->limit <= 0xffffffff)) ||
704 ((mem1->limit > 0xffffffff) && (mem2->limit > 0xffffffff)))
705 {
706 /* If so place the one with the most stringent alignment first */
707 if (mem2->align > mem1->align) {
708 struct resource *tmp;
709 tmp = mem1;
710 mem1 = mem2;
711 mem2 = tmp;
712 }
713 /* Now place the memory as high up as it will go */
714 mem2->base = resource_max(mem2);
715 mem1->limit = mem2->base - 1;
716 mem1->base = resource_max(mem1);
717 }
718 else {
719 /* Place the resources as high up as they will go */
720 mem2->base = resource_max(mem2);
721 mem1->base = resource_max(mem1);
722 }
723
724 printk(BIOS_DEBUG, "base1: 0x%08Lx limit1: 0x%08Lx size: 0x%08Lx align: %d\n",
725 mem1->base, mem1->limit, mem1->size, mem1->align);
726 printk(BIOS_DEBUG, "base2: 0x%08Lx limit2: 0x%08Lx size: 0x%08Lx align: %d\n",
727 mem2->base, mem2->limit, mem2->size, mem2->align);
728 }
729
730 for (res = &dev->resource_list; res; res = res->next)
731 {
732 res->flags |= IORESOURCE_ASSIGNED;
733 res->flags |= IORESOURCE_STORED;
734 report_resource_stored(dev, res, "");
735 }
736#endif
737
738 pci_tolm = 0xffffffffUL;
739 for (link = dev->link_list; link; link = link->next) {
740 pci_tolm = find_pci_tolm(link);
741 }
742
743 // FIXME handle interleaved nodes. If you fix this here, please fix
744 // amdk8, too.
745 mmio_basek = pci_tolm >> 10;
746 /* Round mmio_basek to something the processor can support */
747 mmio_basek &= ~((1 << 6) -1);
748
749 // FIXME improve mtrr.c so we don't use up all of the mtrrs with a 64M
750 // MMIO hole. If you fix this here, please fix amdk8, too.
751 /* Round the mmio hole to 64M */
752 mmio_basek &= ~((64*1024) - 1);
753
754#if CONFIG_HW_MEM_HOLE_SIZEK != 0
755 /* if the hw mem hole is already set in raminit stage, here we will compare
756 * mmio_basek and hole_basek. if mmio_basek is bigger that hole_basek and will
757 * use hole_basek as mmio_basek and we don't need to reset hole.
758 * otherwise We reset the hole to the mmio_basek
759 */
760
761 mem_hole = get_hw_mem_hole_info();
762
763 // Use hole_basek as mmio_basek, and we don't need to reset hole anymore
764 if ((mem_hole.node_id != -1) && (mmio_basek > mem_hole.hole_startk)) {
765 mmio_basek = mem_hole.hole_startk;
766 reset_memhole = 0;
767 }
768#endif
769
770 idx = 0x10;
771 for (i = 0; i < node_nums; i++) {
772 dram_base_mask_t d;
773 resource_t basek, limitk, sizek; // 4 1T
774
775 d = get_dram_base_mask(i);
776
777 if (!(d.mask & 1)) continue;
778 basek = ((resource_t)(d.base & 0x1fffff00)) << 9; // could overflow, we may lost 6 bit here
779 limitk = ((resource_t)(((d.mask & ~1) + 0x000FF) & 0x1fffff00)) << 9 ;
780
781 sizek = limitk - basek;
782
783 /* see if we need a hole from 0xa0000 to 0xbffff */
784 if ((basek < ((8*64)+(8*16))) && (sizek > ((8*64)+(16*16)))) {
785 ram_resource(dev, (idx | i), basek, ((8*64)+(8*16)) - basek);
786 idx += 0x10;
787 basek = (8*64)+(16*16);
788 sizek = limitk - ((8*64)+(16*16));
789
790 }
791
792 //printk(BIOS_DEBUG, "node %d : mmio_basek=%08lx, basek=%08llx, limitk=%08llx\n", i, mmio_basek, basek, limitk);
793
794 /* split the region to accomodate pci memory space */
795 if ((basek < 4*1024*1024 ) && (limitk > mmio_basek)) {
796 if (basek <= mmio_basek) {
797 unsigned pre_sizek;
798 pre_sizek = mmio_basek - basek;
799 if (pre_sizek>0) {
800 ram_resource(dev, (idx | i), basek, pre_sizek);
801 idx += 0x10;
802 sizek -= pre_sizek;
Kyösti Mälkki2b790f62013-09-03 05:25:57 +0300803 if (!ramtop)
804 ramtop = mmio_basek * 1024;
Siyuan Wang3e32cc02013-07-09 17:16:20 +0800805 }
806 basek = mmio_basek;
807 }
808 if ((basek + sizek) <= 4*1024*1024) {
809 sizek = 0;
810 }
811 else {
812 uint64_t topmem2 = bsp_topmem2();
813 basek = 4*1024*1024;
814 sizek = topmem2/1024 - basek;
815 }
816 }
817
818 ram_resource(dev, (idx | i), basek, sizek);
819 idx += 0x10;
820 printk(BIOS_DEBUG, "node %d: mmio_basek=%08lx, basek=%08llx, limitk=%08llx\n",
821 i, mmio_basek, basek, limitk);
Kyösti Mälkki2b790f62013-09-03 05:25:57 +0300822 if (!ramtop)
823 ramtop = limitk * 1024;
Siyuan Wang3e32cc02013-07-09 17:16:20 +0800824 }
825
826#if CONFIG_GFXUMA
Kyösti Mälkki2b790f62013-09-03 05:25:57 +0300827 set_top_of_ram(uma_memory_base);
Siyuan Wang3e32cc02013-07-09 17:16:20 +0800828 uma_resource(dev, 7, uma_memory_base >> 10, uma_memory_size >> 10);
Kyösti Mälkki2b790f62013-09-03 05:25:57 +0300829#else
830 set_top_of_ram(ramtop);
Siyuan Wang3e32cc02013-07-09 17:16:20 +0800831#endif
832
833 for(link = dev->link_list; link; link = link->next) {
834 if (link->children) {
835 assign_resources(link);
836 }
837 }
838}
839
840static struct device_operations pci_domain_ops = {
841 .read_resources = domain_read_resources,
842 .set_resources = domain_set_resources,
843 .enable_resources = domain_enable_resources,
844 .init = NULL,
845 .scan_bus = pci_domain_scan_bus,
846 .ops_pci_bus = pci_bus_default_ops,
847};
848
849static void sysconf_init(device_t dev) // first node
850{
851 sblink = (pci_read_config32(dev, 0x64)>>8) & 7; // don't forget sublink1
852 node_nums = ((pci_read_config32(dev, 0x60)>>4) & 7) + 1; //NodeCnt[2:0]
853}
854
855static void add_more_links(device_t dev, unsigned total_links)
856{
857 struct bus *link, *last = NULL;
858 int link_num;
859
860 for (link = dev->link_list; link; link = link->next)
861 last = link;
862
863 if (last) {
864 int links = total_links - last->link_num;
865 link_num = last->link_num;
866 if (links > 0) {
867 link = malloc(links*sizeof(*link));
868 if (!link)
869 die("Couldn't allocate more links!\n");
870 memset(link, 0, links*sizeof(*link));
871 last->next = link;
872 }
873 }
874 else {
875 link_num = -1;
876 link = malloc(total_links*sizeof(*link));
877 memset(link, 0, total_links*sizeof(*link));
878 dev->link_list = link;
879 }
880
881 for (link_num = link_num + 1; link_num < total_links; link_num++) {
882 link->link_num = link_num;
883 link->dev = dev;
884 link->next = link + 1;
885 last = link;
886 link = link->next;
887 }
888 last->next = NULL;
889}
890
891static u32 cpu_bus_scan(device_t dev, u32 max)
892{
893 struct bus *cpu_bus;
894 device_t dev_mc;
895#if CONFIG_CBB
896 device_t pci_domain;
897#endif
898 int i,j;
899 int coreid_bits;
900 int core_max = 0;
901 unsigned ApicIdCoreIdSize;
902 unsigned core_nums;
903 int siblings = 0;
904 unsigned int family;
905
906#if CONFIG_CBB
907 dev_mc = dev_find_slot(0, PCI_DEVFN(CONFIG_CDB, 0)); //0x00
908 if (dev_mc && dev_mc->bus) {
909 printk(BIOS_DEBUG, "%s found", dev_path(dev_mc));
910 pci_domain = dev_mc->bus->dev;
911 if (pci_domain && (pci_domain->path.type == DEVICE_PATH_DOMAIN)) {
912 printk(BIOS_DEBUG, "\n%s move to ",dev_path(dev_mc));
913 dev_mc->bus->secondary = CONFIG_CBB; // move to 0xff
914 printk(BIOS_DEBUG, "%s",dev_path(dev_mc));
915 } else {
916 printk(BIOS_DEBUG, " but it is not under pci_domain directly ");
917 }
918 printk(BIOS_DEBUG, "\n");
919 }
920 dev_mc = dev_find_slot(CONFIG_CBB, PCI_DEVFN(CONFIG_CDB, 0));
921 if (!dev_mc) {
922 dev_mc = dev_find_slot(0, PCI_DEVFN(0x18, 0));
923 if (dev_mc && dev_mc->bus) {
924 printk(BIOS_DEBUG, "%s found\n", dev_path(dev_mc));
925 pci_domain = dev_mc->bus->dev;
926 if (pci_domain && (pci_domain->path.type == DEVICE_PATH_DOMAIN)) {
927 if ((pci_domain->link_list) && (pci_domain->link_list->children == dev_mc)) {
928 printk(BIOS_DEBUG, "%s move to ",dev_path(dev_mc));
929 dev_mc->bus->secondary = CONFIG_CBB; // move to 0xff
930 printk(BIOS_DEBUG, "%s\n",dev_path(dev_mc));
931 while (dev_mc) {
932 printk(BIOS_DEBUG, "%s move to ",dev_path(dev_mc));
933 dev_mc->path.pci.devfn -= PCI_DEVFN(0x18,0);
934 printk(BIOS_DEBUG, "%s\n",dev_path(dev_mc));
935 dev_mc = dev_mc->sibling;
936 }
937 }
938 }
939 }
940 }
941#endif
942 dev_mc = dev_find_slot(CONFIG_CBB, PCI_DEVFN(CONFIG_CDB, 0));
943 if (!dev_mc) {
944 printk(BIOS_ERR, "%02x:%02x.0 not found", CONFIG_CBB, CONFIG_CDB);
945 die("");
946 }
947 sysconf_init(dev_mc);
948#if CONFIG_CBB && (MAX_NODE_NUMS > 32)
949 if (node_nums>32) { // need to put node 32 to node 63 to bus 0xfe
950 if (pci_domain->link_list && !pci_domain->link_list->next) {
951 struct bus *new_link = new_link(pci_domain);
952 pci_domain->link_list->next = new_link;
953 new_link->link_num = 1;
954 new_link->dev = pci_domain;
955 new_link->children = 0;
956 printk(BIOS_DEBUG, "%s links now 2\n", dev_path(pci_domain));
957 }
958 pci_domain->link_list->next->secondary = CONFIG_CBB - 1;
959 }
960#endif
961
962 /* Get Max Number of cores(MNC) */
963 coreid_bits = (cpuid_ecx(AMD_CPUID_ASIZE_PCCOUNT) & 0x0000F000) >> 12;
964 core_max = 1 << (coreid_bits & 0x000F); //mnc
965
966 ApicIdCoreIdSize = ((cpuid_ecx(0x80000008)>>12) & 0xF);
967 if (ApicIdCoreIdSize) {
968 core_nums = (1 << ApicIdCoreIdSize) - 1;
969 } else {
970 core_nums = 3; //quad core
971 }
972
973 /* Find which cpus are present */
974 cpu_bus = dev->link_list;
975 for (i = 0; i < node_nums; i++) {
976 device_t cdb_dev;
977 unsigned busn, devn;
978 struct bus *pbus;
979
980 busn = CONFIG_CBB;
981 devn = CONFIG_CDB + i;
982 pbus = dev_mc->bus;
983#if CONFIG_CBB && (MAX_NODE_NUMS > 32)
984 if (i >= 32) {
985 busn--;
986 devn -= 32;
987 pbus = pci_domain->link_list->next;
988 }
989#endif
990
991 /* Find the cpu's pci device */
992 cdb_dev = dev_find_slot(busn, PCI_DEVFN(devn, 0));
993 if (!cdb_dev) {
994 /* If I am probing things in a weird order
995 * ensure all of the cpu's pci devices are found.
996 */
997 int fn;
998 for(fn = 0; fn <= 5; fn++) { //FBDIMM?
999 cdb_dev = pci_probe_dev(NULL, pbus,
1000 PCI_DEVFN(devn, fn));
1001 }
1002 cdb_dev = dev_find_slot(busn, PCI_DEVFN(devn, 0));
1003 } else {
1004 /* Ok, We need to set the links for that device.
1005 * otherwise the device under it will not be scanned
1006 */
1007 int linknum;
1008#if CONFIG_HT3_SUPPORT
1009 linknum = 8;
1010#else
1011 linknum = 4;
1012#endif
1013 add_more_links(cdb_dev, linknum);
1014 }
1015
1016 family = cpuid_eax(1);
1017 family = (family >> 20) & 0xFF;
1018 if (family == 1) { //f10
1019 u32 dword;
1020 cdb_dev = dev_find_slot(busn, PCI_DEVFN(devn, 3));
1021 dword = pci_read_config32(cdb_dev, 0xe8);
1022 siblings = ((dword & BIT15) >> 13) | ((dword & (BIT13 | BIT12)) >> 12);
1023 } else if (family == 7) {//f16
1024 cdb_dev = dev_find_slot(busn, PCI_DEVFN(devn, 5));
1025 if (cdb_dev && cdb_dev->enabled) {
1026 siblings = pci_read_config32(cdb_dev, 0x84);
1027 siblings &= 0xFF;
1028 }
1029 } else {
1030 siblings = 0; //default one core
1031 }
1032 int enable_node = cdb_dev && cdb_dev->enabled;
1033 printk(BIOS_SPEW, "%s family%xh, core_max=0x%x, core_nums=0x%x, siblings=0x%x\n",
1034 dev_path(cdb_dev), 0x0f + family, core_max, core_nums, siblings);
1035
1036 for (j = 0; j <= siblings; j++ ) {
1037 extern CONST OPTIONS_CONFIG_TOPOLOGY ROMDATA TopologyConfiguration;
1038 u32 modules = TopologyConfiguration.PlatformNumberOfModules;
1039 u32 lapicid_start = 0;
1040
1041 /*
1042 * APIC ID calucation is tightly coupled with AGESA v5 code.
1043 * This calculation MUST match the assignment calculation done
1044 * in LocalApicInitializationAtEarly() function.
1045 * And reference GetLocalApicIdForCore()
1046 *
1047 * Apply apic enumeration rules
1048 * For systems with >= 16 APICs, put the IO-APICs at 0..n and
1049 * put the local-APICs at m..z
1050 *
1051 * This is needed because many IO-APIC devices only have 4 bits
1052 * for their APIC id and therefore must reside at 0..15
1053 */
1054#ifndef CFG_PLAT_NUM_IO_APICS /* defined in mainboard buildOpts.c */
1055#define CFG_PLAT_NUM_IO_APICS 3
1056#endif
1057 if ((node_nums * core_max) + CFG_PLAT_NUM_IO_APICS >= 0x10) {
1058 lapicid_start = (CFG_PLAT_NUM_IO_APICS - 1) / core_max;
1059 lapicid_start = (lapicid_start + 1) * core_max;
1060 printk(BIOS_SPEW, "lpaicid_start=0x%x ", lapicid_start);
1061 }
1062 u32 apic_id = (lapicid_start * (i/modules + 1)) + ((i % modules) ? (j + (siblings + 1)) : j);
1063 printk(BIOS_SPEW, "node 0x%x core 0x%x apicid=0x%x\n",
1064 i, j, apic_id);
1065
1066 device_t cpu = add_cpu_device(cpu_bus, apic_id, enable_node);
1067 if (cpu)
1068 amd_cpu_topology(cpu, i, j);
1069 } //j
1070 }
1071 return max;
1072}
1073
1074static void cpu_bus_init(device_t dev)
1075{
1076 initialize_cpus(dev->link_list);
1077}
1078
1079static void cpu_bus_noop(device_t dev)
1080{
1081}
1082
1083static void cpu_bus_read_resources(device_t dev)
1084{
1085#if CONFIG_MMCONF_SUPPORT
1086 struct resource *resource = new_resource(dev, 0xc0010058);
1087 resource->base = CONFIG_MMCONF_BASE_ADDRESS;
1088 resource->size = CONFIG_MMCONF_BUS_NUMBER * 4096*256;
1089 resource->flags = IORESOURCE_MEM | IORESOURCE_RESERVE |
1090 IORESOURCE_FIXED | IORESOURCE_STORED | IORESOURCE_ASSIGNED;
1091#endif
1092}
1093
1094static void cpu_bus_set_resources(device_t dev)
1095{
1096 struct resource *resource = find_resource(dev, 0xc0010058);
1097 if (resource) {
1098 report_resource_stored(dev, resource, " <mmconfig>");
1099 }
1100 pci_dev_set_resources(dev);
1101}
1102
1103static struct device_operations cpu_bus_ops = {
1104 .read_resources = cpu_bus_read_resources,
1105 .set_resources = cpu_bus_set_resources,
1106 .enable_resources = cpu_bus_noop,
1107 .init = cpu_bus_init,
1108 .scan_bus = cpu_bus_scan,
1109};
1110
1111static void root_complex_enable_dev(struct device *dev)
1112{
1113 static int done = 0;
1114
1115 /* Do not delay UMA setup, as a device on the PCI bus may evaluate
1116 the global uma_memory variables already in its enable function. */
1117 if (!done) {
1118 setup_bsp_ramtop();
1119 setup_uma_memory();
1120 done = 1;
1121 }
1122
1123 /* Set the operations if it is a special bus type */
1124 if (dev->path.type == DEVICE_PATH_DOMAIN) {
1125 dev->ops = &pci_domain_ops;
1126 } else if (dev->path.type == DEVICE_PATH_CPU_CLUSTER) {
1127 dev->ops = &cpu_bus_ops;
1128 }
1129}
1130
1131struct chip_operations northbridge_amd_agesa_family16kb_root_complex_ops = {
1132 CHIP_NAME("AMD FAM16 Root Complex")
1133 .enable_dev = root_complex_enable_dev,
1134};
Bruce Griffith76db07e2013-07-07 02:06:53 -06001135
1136/*********************************************************************
1137 * Change the vendor / device IDs to match the generic VBIOS header. *
1138 *********************************************************************/
1139u32 map_oprom_vendev(u32 vendev)
1140{
1141 u32 new_vendev = vendev;
1142
1143 switch(vendev) {
1144 case 0x10029830:
1145 case 0x10029831:
1146 case 0x10029832:
1147 case 0x10029833:
1148 case 0x10029834:
1149 case 0x10029835:
1150 case 0x10029836:
1151 case 0x10029837:
1152 case 0x10029838:
1153 case 0x10029839:
1154 case 0x1002983A:
1155 case 0x1002983D:
1156 new_vendev = 0x10029830; // This is the default value in AMD-generated VBIOS
1157 break;
1158 default:
1159 break;
1160 }
1161
1162 if (vendev != new_vendev)
1163 printk(BIOS_NOTICE, "Mapping PCI device %8x to %8x\n", vendev, new_vendev);
1164
1165 return new_vendev;
1166}