blob: c27a1b281481bccb0604034106f0a27decfa6bd7 [file] [log] [blame]
Siyuan Wang3e32cc02013-07-09 17:16:20 +08001/*
2 * This file is part of the coreboot project.
3 *
4 * Copyright (C) 2012 Advanced Micro Devices, Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 */
19
20#include <console/console.h>
21#include <arch/io.h>
22#include <stdint.h>
23#include <device/device.h>
24#include <device/pci.h>
25#include <device/pci_ids.h>
26#include <device/hypertransport.h>
27#include <stdlib.h>
28#include <string.h>
29#include <lib.h>
30#include <cpu/cpu.h>
31#include <cbmem.h>
32
33#include <cpu/x86/lapic.h>
34#include <cpu/amd/mtrr.h>
35
36#include <Porting.h>
37#include <AGESA.h>
38#include <Options.h>
39#include <Topology.h>
40#include <cpu/amd/amdfam16.h>
41#include <cpuRegisters.h>
42#include "agesawrapper.h"
43#include "northbridge.h"
44
45#define MAX_NODE_NUMS (MAX_NODES * MAX_DIES)
46
47#if (defined CONFIG_EXT_CONF_SUPPORT) && CONFIG_EXT_CONF_SUPPORT == 1
48#error CONFIG_EXT_CONF_SUPPORT == 1 not support anymore!
49#endif
50
51typedef struct dram_base_mask {
52 u32 base; //[47:27] at [28:8]
53 u32 mask; //[47:27] at [28:8] and enable at bit 0
54} dram_base_mask_t;
55
56static unsigned node_nums;
57static unsigned sblink;
58static device_t __f0_dev[MAX_NODE_NUMS];
59static device_t __f1_dev[MAX_NODE_NUMS];
60static device_t __f2_dev[MAX_NODE_NUMS];
61static device_t __f4_dev[MAX_NODE_NUMS];
62static unsigned fx_devs = 0;
63
64static dram_base_mask_t get_dram_base_mask(u32 nodeid)
65{
66 device_t dev;
67 dram_base_mask_t d;
68 dev = __f1_dev[0];
69 u32 temp;
70 temp = pci_read_config32(dev, 0x44 + (nodeid << 3)); //[39:24] at [31:16]
71 d.mask = ((temp & 0xfff80000)>>(8+3)); // mask out DramMask [26:24] too
72 temp = pci_read_config32(dev, 0x144 + (nodeid <<3)) & 0xff; //[47:40] at [7:0]
73 d.mask |= temp<<21;
74 temp = pci_read_config32(dev, 0x40 + (nodeid << 3)); //[39:24] at [31:16]
75 d.mask |= (temp & 1); // enable bit
76 d.base = ((temp & 0xfff80000)>>(8+3)); // mask out DramBase [26:24) too
77 temp = pci_read_config32(dev, 0x140 + (nodeid <<3)) & 0xff; //[47:40] at [7:0]
78 d.base |= temp<<21;
79 return d;
80}
81
82static void set_io_addr_reg(device_t dev, u32 nodeid, u32 linkn, u32 reg,
83 u32 io_min, u32 io_max)
84{
85 u32 i;
86 u32 tempreg;
87 /* io range allocation */
88 tempreg = (nodeid&0xf) | ((nodeid & 0x30)<<(8-4)) | (linkn<<4) | ((io_max&0xf0)<<(12-4)); //limit
89 for (i=0; i<node_nums; i++)
90 pci_write_config32(__f1_dev[i], reg+4, tempreg);
91 tempreg = 3 /*| ( 3<<4)*/ | ((io_min&0xf0)<<(12-4)); //base :ISA and VGA ?
92#if 0
93 // FIXME: can we use VGA reg instead?
94 if (dev->link[link].bridge_ctrl & PCI_BRIDGE_CTL_VGA) {
95 printk(BIOS_SPEW, "%s, enabling legacy VGA IO forwarding for %s link %s\n",
96 __func__, dev_path(dev), link);
97 tempreg |= PCI_IO_BASE_VGA_EN;
98 }
99 if (dev->link[link].bridge_ctrl & PCI_BRIDGE_CTL_NO_ISA) {
100 tempreg |= PCI_IO_BASE_NO_ISA;
101 }
102#endif
103 for (i=0; i<node_nums; i++)
104 pci_write_config32(__f1_dev[i], reg, tempreg);
105}
106
107static void set_mmio_addr_reg(u32 nodeid, u32 linkn, u32 reg, u32 index, u32 mmio_min, u32 mmio_max, u32 nodes)
108{
109 u32 i;
110 u32 tempreg;
111 /* io range allocation */
112 tempreg = (nodeid&0xf) | (linkn<<4) | (mmio_max&0xffffff00); //limit
113 for (i=0; i<nodes; i++)
114 pci_write_config32(__f1_dev[i], reg+4, tempreg);
115 tempreg = 3 | (nodeid & 0x30) | (mmio_min&0xffffff00);
116 for (i=0; i<node_nums; i++)
117 pci_write_config32(__f1_dev[i], reg, tempreg);
118}
119
120static device_t get_node_pci(u32 nodeid, u32 fn)
121{
122#if MAX_NODE_NUMS + CONFIG_CDB >= 32
123 if ((CONFIG_CDB + nodeid) < 32) {
124 return dev_find_slot(CONFIG_CBB, PCI_DEVFN(CONFIG_CDB + nodeid, fn));
125 } else {
126 return dev_find_slot(CONFIG_CBB-1, PCI_DEVFN(CONFIG_CDB + nodeid - 32, fn));
127 }
128#else
129 return dev_find_slot(CONFIG_CBB, PCI_DEVFN(CONFIG_CDB + nodeid, fn));
130#endif
131}
132
133static void get_fx_devs(void)
134{
135 int i;
136 for (i = 0; i < MAX_NODE_NUMS; i++) {
137 __f0_dev[i] = get_node_pci(i, 0);
138 __f1_dev[i] = get_node_pci(i, 1);
139 __f2_dev[i] = get_node_pci(i, 2);
140 __f4_dev[i] = get_node_pci(i, 4);
141 if (__f0_dev[i] != NULL && __f1_dev[i] != NULL)
142 fx_devs = i+1;
143 }
144 if (__f1_dev[0] == NULL || __f0_dev[0] == NULL || fx_devs == 0) {
145 die("Cannot find 0:0x18.[0|1]\n");
146 }
147 printk(BIOS_DEBUG, "fx_devs=0x%x\n", fx_devs);
148}
149
150static u32 f1_read_config32(unsigned reg)
151{
152 if (fx_devs == 0)
153 get_fx_devs();
154 return pci_read_config32(__f1_dev[0], reg);
155}
156
157static void f1_write_config32(unsigned reg, u32 value)
158{
159 int i;
160 if (fx_devs == 0)
161 get_fx_devs();
162 for(i = 0; i < fx_devs; i++) {
163 device_t dev;
164 dev = __f1_dev[i];
165 if (dev && dev->enabled) {
166 pci_write_config32(dev, reg, value);
167 }
168 }
169}
170
171static u32 amdfam16_nodeid(device_t dev)
172{
173#if MAX_NODE_NUMS == 64
174 unsigned busn;
175 busn = dev->bus->secondary;
176 if (busn != CONFIG_CBB) {
177 return (dev->path.pci.devfn >> 3) - CONFIG_CDB + 32;
178 } else {
179 return (dev->path.pci.devfn >> 3) - CONFIG_CDB;
180 }
181
182#else
183 return (dev->path.pci.devfn >> 3) - CONFIG_CDB;
184#endif
185}
186
187static void set_vga_enable_reg(u32 nodeid, u32 linkn)
188{
189 u32 val;
190
191 val = 1 | (nodeid<<4) | (linkn<<12);
192 /* it will routing
193 * (1)mmio 0xa0000:0xbffff
194 * (2)io 0x3b0:0x3bb, 0x3c0:0x3df
195 */
196 f1_write_config32(0xf4, val);
197
198}
199
200/**
201 * @return
202 * @retval 2 resoure does not exist, usable
203 * @retval 0 resource exists, not usable
204 * @retval 1 resource exist, resource has been allocated before
205 */
206static int reg_useable(unsigned reg, device_t goal_dev, unsigned goal_nodeid,
207 unsigned goal_link)
208{
209 struct resource *res;
210 unsigned nodeid, link = 0;
211 int result;
212 res = 0;
213 for (nodeid = 0; !res && (nodeid < fx_devs); nodeid++) {
214 device_t dev;
215 dev = __f0_dev[nodeid];
216 if (!dev)
217 continue;
218 for (link = 0; !res && (link < 8); link++) {
219 res = probe_resource(dev, IOINDEX(0x1000 + reg, link));
220 }
221 }
222 result = 2;
223 if (res) {
224 result = 0;
225 if ((goal_link == (link - 1)) &&
226 (goal_nodeid == (nodeid - 1)) &&
227 (res->flags <= 1)) {
228 result = 1;
229 }
230 }
231 return result;
232}
233
234static struct resource *amdfam16_find_iopair(device_t dev, unsigned nodeid, unsigned link)
235{
236 struct resource *resource;
237 u32 free_reg, reg;
238 resource = 0;
239 free_reg = 0;
240 for (reg = 0xc0; reg <= 0xd8; reg += 0x8) {
241 int result;
242 result = reg_useable(reg, dev, nodeid, link);
243 if (result == 1) {
244 /* I have been allocated this one */
245 break;
246 }
247 else if (result > 1) {
248 /* I have a free register pair */
249 free_reg = reg;
250 }
251 }
252 if (reg > 0xd8) {
253 reg = free_reg; // if no free, the free_reg still be 0
254 }
255
256 resource = new_resource(dev, IOINDEX(0x1000 + reg, link));
257
258 return resource;
259}
260
261static struct resource *amdfam16_find_mempair(device_t dev, u32 nodeid, u32 link)
262{
263 struct resource *resource;
264 u32 free_reg, reg;
265 resource = 0;
266 free_reg = 0;
267 for (reg = 0x80; reg <= 0xb8; reg += 0x8) {
268 int result;
269 result = reg_useable(reg, dev, nodeid, link);
270 if (result == 1) {
271 /* I have been allocated this one */
272 break;
273 }
274 else if (result > 1) {
275 /* I have a free register pair */
276 free_reg = reg;
277 }
278 }
279 if (reg > 0xb8) {
280 reg = free_reg;
281 }
282
283 resource = new_resource(dev, IOINDEX(0x1000 + reg, link));
284 return resource;
285}
286
287static void amdfam16_link_read_bases(device_t dev, u32 nodeid, u32 link)
288{
289 struct resource *resource;
290
291 /* Initialize the io space constraints on the current bus */
292 resource = amdfam16_find_iopair(dev, nodeid, link);
293 if (resource) {
294 u32 align;
295 align = log2(HT_IO_HOST_ALIGN);
296 resource->base = 0;
297 resource->size = 0;
298 resource->align = align;
299 resource->gran = align;
300 resource->limit = 0xffffUL;
301 resource->flags = IORESOURCE_IO | IORESOURCE_BRIDGE;
302 }
303
304 /* Initialize the prefetchable memory constraints on the current bus */
305 resource = amdfam16_find_mempair(dev, nodeid, link);
306 if (resource) {
307 resource->base = 0;
308 resource->size = 0;
309 resource->align = log2(HT_MEM_HOST_ALIGN);
310 resource->gran = log2(HT_MEM_HOST_ALIGN);
311 resource->limit = 0xffffffffffULL;
312 resource->flags = IORESOURCE_MEM | IORESOURCE_PREFETCH;
313 resource->flags |= IORESOURCE_BRIDGE;
314 }
315
316 /* Initialize the memory constraints on the current bus */
317 resource = amdfam16_find_mempair(dev, nodeid, link);
318 if (resource) {
319 resource->base = 0;
320 resource->size = 0;
321 resource->align = log2(HT_MEM_HOST_ALIGN);
322 resource->gran = log2(HT_MEM_HOST_ALIGN);
323 resource->limit = 0xffffffffffULL;
324 resource->flags = IORESOURCE_MEM | IORESOURCE_BRIDGE;
325 }
326
327}
328
329static void read_resources(device_t dev)
330{
331 u32 nodeid;
332 struct bus *link;
333
334 nodeid = amdfam16_nodeid(dev);
335 for (link = dev->link_list; link; link = link->next) {
336 if (link->children) {
337 amdfam16_link_read_bases(dev, nodeid, link->link_num);
338 }
339 }
340}
341
342static void set_resource(device_t dev, struct resource *resource, u32 nodeid)
343{
344 resource_t rbase, rend;
345 unsigned reg, link_num;
346 char buf[50];
347
348 /* Make certain the resource has actually been set */
349 if (!(resource->flags & IORESOURCE_ASSIGNED)) {
350 return;
351 }
352
353 /* If I have already stored this resource don't worry about it */
354 if (resource->flags & IORESOURCE_STORED) {
355 return;
356 }
357
358 /* Only handle PCI memory and IO resources */
359 if (!(resource->flags & (IORESOURCE_MEM | IORESOURCE_IO)))
360 return;
361
362 /* Ensure I am actually looking at a resource of function 1 */
363 if ((resource->index & 0xffff) < 0x1000) {
364 return;
365 }
366 /* Get the base address */
367 rbase = resource->base;
368
369 /* Get the limit (rounded up) */
370 rend = resource_end(resource);
371
372 /* Get the register and link */
373 reg = resource->index & 0xfff; // 4k
374 link_num = IOINDEX_LINK(resource->index);
375
376 if (resource->flags & IORESOURCE_IO) {
377 set_io_addr_reg(dev, nodeid, link_num, reg, rbase>>8, rend>>8);
378 }
379 else if (resource->flags & IORESOURCE_MEM) {
380 set_mmio_addr_reg(nodeid, link_num, reg, (resource->index >>24), rbase>>8, rend>>8, node_nums) ;// [39:8]
381 }
382 resource->flags |= IORESOURCE_STORED;
383 sprintf(buf, " <node %x link %x>",
384 nodeid, link_num);
385 report_resource_stored(dev, resource, buf);
386}
387
388/**
389 * I tried to reuse the resource allocation code in set_resource()
390 * but it is too difficult to deal with the resource allocation magic.
391 */
392
393static void create_vga_resource(device_t dev, unsigned nodeid)
394{
395 struct bus *link;
396
397 /* find out which link the VGA card is connected,
398 * we only deal with the 'first' vga card */
399 for (link = dev->link_list; link; link = link->next) {
400 if (link->bridge_ctrl & PCI_BRIDGE_CTL_VGA) {
401#if CONFIG_MULTIPLE_VGA_ADAPTERS
402 extern device_t vga_pri; // the primary vga device, defined in device.c
403 printk(BIOS_DEBUG, "VGA: vga_pri bus num = %d bus range [%d,%d]\n", vga_pri->bus->secondary,
404 link->secondary,link->subordinate);
405 /* We need to make sure the vga_pri is under the link */
406 if((vga_pri->bus->secondary >= link->secondary ) &&
407 (vga_pri->bus->secondary <= link->subordinate )
408 )
409#endif
410 break;
411 }
412 }
413
414 /* no VGA card installed */
415 if (link == NULL)
416 return;
417
418 printk(BIOS_DEBUG, "VGA: %s (aka node %d) link %d has VGA device\n", dev_path(dev), nodeid, sblink);
419 set_vga_enable_reg(nodeid, sblink);
420}
421
422static void set_resources(device_t dev)
423{
424 unsigned nodeid;
425 struct bus *bus;
426 struct resource *res;
427
428 /* Find the nodeid */
429 nodeid = amdfam16_nodeid(dev);
430
431 create_vga_resource(dev, nodeid); //TODO: do we need this?
432
433 /* Set each resource we have found */
434 for (res = dev->resource_list; res; res = res->next) {
435 set_resource(dev, res, nodeid);
436 }
437
438 for (bus = dev->link_list; bus; bus = bus->next) {
439 if (bus->children) {
440 assign_resources(bus);
441 }
442 }
443}
444
445static void northbridge_init(struct device *dev)
446{
447}
448#if 0 /* TODO: Check if needed. */
449static unsigned scan_chains(device_t dev, unsigned max)
450{
451 unsigned nodeid;
452 struct bus *link;
453 device_t io_hub = NULL;
454 u32 next_unitid = 0x18;
455 nodeid = amdfam16_nodeid(dev);
456 if (nodeid == 0) {
457 for (link = dev->link_list; link; link = link->next) {
458 //if (link->link_num == sblink) { /* devicetree put IO Hub on link_lsit[sblink] */
459 if (link->link_num == 0) { /* devicetree put IO Hub on link_lsit[0] */
460 io_hub = link->children;
461 if (!io_hub || !io_hub->enabled) {
462 die("I can't find the IO Hub, or IO Hub not enabled, please check the device tree.\n");
463 }
464 /* Now that nothing is overlapping it is safe to scan the children. */
465 max = pci_scan_bus(link, 0x00, ((next_unitid - 1) << 3) | 7, 0);
466 }
467 }
468 }
469 return max;
470}
471#endif
472static struct device_operations northbridge_operations = {
473 .read_resources = read_resources,
474 .set_resources = set_resources,
475 .enable_resources = pci_dev_enable_resources,
476 .init = northbridge_init,
477 //.scan_bus = scan_chains, /* TODO: */
478 .enable = 0,
479 .ops_pci = 0,
480};
481
482static const struct pci_driver family16_northbridge __pci_driver = {
483 .ops = &northbridge_operations,
484 .vendor = PCI_VENDOR_ID_AMD,
485 .device = PCI_DEVICE_ID_AMD_16H_MODEL_000F_NB_HT,
486};
487
488static const struct pci_driver family10_northbridge __pci_driver = {
489 .ops = &northbridge_operations,
490 .vendor = PCI_VENDOR_ID_AMD,
491 .device = PCI_DEVICE_ID_AMD_10H_NB_HT,
492};
493
494struct chip_operations northbridge_amd_agesa_family16kb_ops = {
495 CHIP_NAME("AMD FAM16 Northbridge")
496 .enable_dev = 0,
497};
498
499static void domain_read_resources(device_t dev)
500{
501 unsigned reg;
502
503 /* Find the already assigned resource pairs */
504 get_fx_devs();
505 for (reg = 0x80; reg <= 0xd8; reg+= 0x08) {
506 u32 base, limit;
507 base = f1_read_config32(reg);
508 limit = f1_read_config32(reg + 0x04);
509 /* Is this register allocated? */
510 if ((base & 3) != 0) {
511 unsigned nodeid, reg_link;
512 device_t reg_dev;
513 if (reg<0xc0) { // mmio
514 nodeid = (limit & 0xf) + (base&0x30);
515 } else { // io
516 nodeid = (limit & 0xf) + ((base>>4)&0x30);
517 }
518 reg_link = (limit >> 4) & 7;
519 reg_dev = __f0_dev[nodeid];
520 if (reg_dev) {
521 /* Reserve the resource */
522 struct resource *res;
523 res = new_resource(reg_dev, IOINDEX(0x1000 + reg, reg_link));
524 if (res) {
525 res->flags = 1;
526 }
527 }
528 }
529 }
530 /* FIXME: do we need to check extend conf space?
531 I don't believe that much preset value */
532
533#if !CONFIG_PCI_64BIT_PREF_MEM
534 pci_domain_read_resources(dev);
535
536#else
537 struct bus *link;
538 struct resource *resource;
539 for (link=dev->link_list; link; link = link->next) {
540 /* Initialize the system wide io space constraints */
541 resource = new_resource(dev, 0|(link->link_num<<2));
542 resource->base = 0x400;
543 resource->limit = 0xffffUL;
544 resource->flags = IORESOURCE_IO;
545
546 /* Initialize the system wide prefetchable memory resources constraints */
547 resource = new_resource(dev, 1|(link->link_num<<2));
548 resource->limit = 0xfcffffffffULL;
549 resource->flags = IORESOURCE_MEM | IORESOURCE_PREFETCH;
550
551 /* Initialize the system wide memory resources constraints */
552 resource = new_resource(dev, 2|(link->link_num<<2));
553 resource->limit = 0xfcffffffffULL;
554 resource->flags = IORESOURCE_MEM;
555 }
556#endif
557}
558
559extern u8 acpi_slp_type;
560
561static void domain_enable_resources(device_t dev)
562{
563 u32 val;
564#if CONFIG_HAVE_ACPI_RESUME
565 if (acpi_slp_type == 3)
566 agesawrapper_fchs3laterestore();
567#endif
568
569 /* Must be called after PCI enumeration and resource allocation */
570 printk(BIOS_DEBUG, "\nFam16 - domain_enable_resources: AmdInitMid.\n");
571#if CONFIG_HAVE_ACPI_RESUME
572 if (acpi_slp_type != 3) {
573 printk(BIOS_DEBUG, "agesawrapper_amdinitmid ");
574 val = agesawrapper_amdinitmid ();
575 if (val)
576 printk(BIOS_DEBUG, "error level: %x \n", val);
577 else
578 printk(BIOS_DEBUG, "passed.\n");
579 }
580#else
581 printk(BIOS_DEBUG, "agesawrapper_amdinitmid ");
582 val = agesawrapper_amdinitmid ();
583 if (val)
584 printk(BIOS_DEBUG, "error level: %x \n", val);
585 else
586 printk(BIOS_DEBUG, "passed.\n");
587#endif
588
589 printk(BIOS_DEBUG, " ader - leaving domain_enable_resources.\n");
590}
591
592#if CONFIG_HW_MEM_HOLE_SIZEK != 0
593struct hw_mem_hole_info {
594 unsigned hole_startk;
595 int node_id;
596};
597static struct hw_mem_hole_info get_hw_mem_hole_info(void)
598{
599 struct hw_mem_hole_info mem_hole;
600 int i;
601 mem_hole.hole_startk = CONFIG_HW_MEM_HOLE_SIZEK;
602 mem_hole.node_id = -1;
603 for (i = 0; i < node_nums; i++) {
604 dram_base_mask_t d;
605 u32 hole;
606 d = get_dram_base_mask(i);
607 if (!(d.mask & 1)) continue; // no memory on this node
608 hole = pci_read_config32(__f1_dev[i], 0xf0);
609 if (hole & 2) { // we find the hole
610 mem_hole.hole_startk = (hole & (0xff<<24)) >> 10;
611 mem_hole.node_id = i; // record the node No with hole
612 break; // only one hole
613 }
614 }
615 //We need to double check if there is speical set on base reg and limit reg are not continous instead of hole, it will find out it's hole_startk
616 if (mem_hole.node_id == -1) {
617 resource_t limitk_pri = 0;
618 for (i=0; i<node_nums; i++) {
619 dram_base_mask_t d;
620 resource_t base_k, limit_k;
621 d = get_dram_base_mask(i);
622 if (!(d.base & 1)) continue;
623 base_k = ((resource_t)(d.base & 0x1fffff00)) <<9;
624 if (base_k > 4 *1024 * 1024) break; // don't need to go to check
625 if (limitk_pri != base_k) { // we find the hole
626 mem_hole.hole_startk = (unsigned)limitk_pri; // must beblow 4G
627 mem_hole.node_id = i;
628 break; //only one hole
629 }
630 limit_k = ((resource_t)(((d.mask & ~1) + 0x000FF) & 0x1fffff00)) << 9;
631 limitk_pri = limit_k;
632 }
633 }
634 return mem_hole;
635}
636#endif
637
638#define ONE_MB_SHIFT 20
639
640static void setup_uma_memory(void)
641{
642#if CONFIG_GFXUMA
643 uint32_t topmem = (uint32_t) bsp_topmem();
644 uint32_t sys_mem;
645
646 /* refer to UMA Size Consideration in Family16h BKDG. */
647 /* Please reference MemNGetUmaSizeOR () */
648 /*
649 * Total system memory UMASize
650 * >= 2G 512M
651 * >=1G 256M
652 * <1G 64M
653 */
654 sys_mem = topmem + (16 << ONE_MB_SHIFT); // Ignore 16MB allocated for C6 when finding UMA size
655 if ((bsp_topmem2()>>32) || (sys_mem >= 2048 << ONE_MB_SHIFT)) {
656 uma_memory_size = 512 << ONE_MB_SHIFT;
657 } else if (sys_mem >= 1024 << ONE_MB_SHIFT) {
658 uma_memory_size = 256 << ONE_MB_SHIFT;
659 } else {
660 uma_memory_size = 64 << ONE_MB_SHIFT;
661 }
662 uma_memory_base = topmem - uma_memory_size; /* TOP_MEM1 */
663
664 printk(BIOS_INFO, "%s: uma size 0x%08llx, memory start 0x%08llx\n",
665 __func__, uma_memory_size, uma_memory_base);
666
667 /* TODO: TOP_MEM2 */
668#endif
669}
670
671
672static void domain_set_resources(device_t dev)
673{
674#if CONFIG_PCI_64BIT_PREF_MEM
675 struct resource *io, *mem1, *mem2;
676 struct resource *res;
677#endif
678 unsigned long mmio_basek;
679 u32 pci_tolm;
680 int i, idx;
681 struct bus *link;
682#if CONFIG_HW_MEM_HOLE_SIZEK != 0
683 struct hw_mem_hole_info mem_hole;
684 u32 reset_memhole = 1;
685#endif
686
687#if CONFIG_PCI_64BIT_PREF_MEM
688
689 for (link = dev->link_list; link; link = link->next) {
690 /* Now reallocate the pci resources memory with the
691 * highest addresses I can manage.
692 */
693 mem1 = find_resource(dev, 1|(link->link_num<<2));
694 mem2 = find_resource(dev, 2|(link->link_num<<2));
695
696 printk(BIOS_DEBUG, "base1: 0x%08Lx limit1: 0x%08Lx size: 0x%08Lx align: %d\n",
697 mem1->base, mem1->limit, mem1->size, mem1->align);
698 printk(BIOS_DEBUG, "base2: 0x%08Lx limit2: 0x%08Lx size: 0x%08Lx align: %d\n",
699 mem2->base, mem2->limit, mem2->size, mem2->align);
700
701 /* See if both resources have roughly the same limits */
702 if (((mem1->limit <= 0xffffffff) && (mem2->limit <= 0xffffffff)) ||
703 ((mem1->limit > 0xffffffff) && (mem2->limit > 0xffffffff)))
704 {
705 /* If so place the one with the most stringent alignment first */
706 if (mem2->align > mem1->align) {
707 struct resource *tmp;
708 tmp = mem1;
709 mem1 = mem2;
710 mem2 = tmp;
711 }
712 /* Now place the memory as high up as it will go */
713 mem2->base = resource_max(mem2);
714 mem1->limit = mem2->base - 1;
715 mem1->base = resource_max(mem1);
716 }
717 else {
718 /* Place the resources as high up as they will go */
719 mem2->base = resource_max(mem2);
720 mem1->base = resource_max(mem1);
721 }
722
723 printk(BIOS_DEBUG, "base1: 0x%08Lx limit1: 0x%08Lx size: 0x%08Lx align: %d\n",
724 mem1->base, mem1->limit, mem1->size, mem1->align);
725 printk(BIOS_DEBUG, "base2: 0x%08Lx limit2: 0x%08Lx size: 0x%08Lx align: %d\n",
726 mem2->base, mem2->limit, mem2->size, mem2->align);
727 }
728
729 for (res = &dev->resource_list; res; res = res->next)
730 {
731 res->flags |= IORESOURCE_ASSIGNED;
732 res->flags |= IORESOURCE_STORED;
733 report_resource_stored(dev, res, "");
734 }
735#endif
736
737 pci_tolm = 0xffffffffUL;
738 for (link = dev->link_list; link; link = link->next) {
739 pci_tolm = find_pci_tolm(link);
740 }
741
742 // FIXME handle interleaved nodes. If you fix this here, please fix
743 // amdk8, too.
744 mmio_basek = pci_tolm >> 10;
745 /* Round mmio_basek to something the processor can support */
746 mmio_basek &= ~((1 << 6) -1);
747
748 // FIXME improve mtrr.c so we don't use up all of the mtrrs with a 64M
749 // MMIO hole. If you fix this here, please fix amdk8, too.
750 /* Round the mmio hole to 64M */
751 mmio_basek &= ~((64*1024) - 1);
752
753#if CONFIG_HW_MEM_HOLE_SIZEK != 0
754 /* if the hw mem hole is already set in raminit stage, here we will compare
755 * mmio_basek and hole_basek. if mmio_basek is bigger that hole_basek and will
756 * use hole_basek as mmio_basek and we don't need to reset hole.
757 * otherwise We reset the hole to the mmio_basek
758 */
759
760 mem_hole = get_hw_mem_hole_info();
761
762 // Use hole_basek as mmio_basek, and we don't need to reset hole anymore
763 if ((mem_hole.node_id != -1) && (mmio_basek > mem_hole.hole_startk)) {
764 mmio_basek = mem_hole.hole_startk;
765 reset_memhole = 0;
766 }
767#endif
768
769 idx = 0x10;
770 for (i = 0; i < node_nums; i++) {
771 dram_base_mask_t d;
772 resource_t basek, limitk, sizek; // 4 1T
773
774 d = get_dram_base_mask(i);
775
776 if (!(d.mask & 1)) continue;
777 basek = ((resource_t)(d.base & 0x1fffff00)) << 9; // could overflow, we may lost 6 bit here
778 limitk = ((resource_t)(((d.mask & ~1) + 0x000FF) & 0x1fffff00)) << 9 ;
779
780 sizek = limitk - basek;
781
782 /* see if we need a hole from 0xa0000 to 0xbffff */
783 if ((basek < ((8*64)+(8*16))) && (sizek > ((8*64)+(16*16)))) {
784 ram_resource(dev, (idx | i), basek, ((8*64)+(8*16)) - basek);
785 idx += 0x10;
786 basek = (8*64)+(16*16);
787 sizek = limitk - ((8*64)+(16*16));
788
789 }
790
791 //printk(BIOS_DEBUG, "node %d : mmio_basek=%08lx, basek=%08llx, limitk=%08llx\n", i, mmio_basek, basek, limitk);
792
793 /* split the region to accomodate pci memory space */
794 if ((basek < 4*1024*1024 ) && (limitk > mmio_basek)) {
795 if (basek <= mmio_basek) {
796 unsigned pre_sizek;
797 pre_sizek = mmio_basek - basek;
798 if (pre_sizek>0) {
799 ram_resource(dev, (idx | i), basek, pre_sizek);
800 idx += 0x10;
801 sizek -= pre_sizek;
802 if (high_tables_base==0) {
803 /* Leave some space for ACPI, PIRQ and MP tables */
804#if CONFIG_GFXUMA
805 high_tables_base = uma_memory_base - HIGH_MEMORY_SIZE;
806#else
807 high_tables_base = (mmio_basek * 1024) - HIGH_MEMORY_SIZE;
808#endif
809 high_tables_size = HIGH_MEMORY_SIZE;
810 printk(BIOS_DEBUG, " split: %dK table at =%08llx\n",
811 (u32)(high_tables_size / 1024), high_tables_base);
812 }
813 }
814 basek = mmio_basek;
815 }
816 if ((basek + sizek) <= 4*1024*1024) {
817 sizek = 0;
818 }
819 else {
820 uint64_t topmem2 = bsp_topmem2();
821 basek = 4*1024*1024;
822 sizek = topmem2/1024 - basek;
823 }
824 }
825
826 ram_resource(dev, (idx | i), basek, sizek);
827 idx += 0x10;
828 printk(BIOS_DEBUG, "node %d: mmio_basek=%08lx, basek=%08llx, limitk=%08llx\n",
829 i, mmio_basek, basek, limitk);
830 if (high_tables_base==0) {
831 /* Leave some space for ACPI, PIRQ and MP tables */
832#if CONFIG_GFXUMA
833 high_tables_base = uma_memory_base - HIGH_MEMORY_SIZE;
834#else
835 high_tables_base = (limitk * 1024) - HIGH_MEMORY_SIZE;
836#endif
837 high_tables_size = HIGH_MEMORY_SIZE;
838 }
839 }
840
841#if CONFIG_GFXUMA
842 uma_resource(dev, 7, uma_memory_base >> 10, uma_memory_size >> 10);
843#endif
844
845 for(link = dev->link_list; link; link = link->next) {
846 if (link->children) {
847 assign_resources(link);
848 }
849 }
850}
851
852static struct device_operations pci_domain_ops = {
853 .read_resources = domain_read_resources,
854 .set_resources = domain_set_resources,
855 .enable_resources = domain_enable_resources,
856 .init = NULL,
857 .scan_bus = pci_domain_scan_bus,
858 .ops_pci_bus = pci_bus_default_ops,
859};
860
861static void sysconf_init(device_t dev) // first node
862{
863 sblink = (pci_read_config32(dev, 0x64)>>8) & 7; // don't forget sublink1
864 node_nums = ((pci_read_config32(dev, 0x60)>>4) & 7) + 1; //NodeCnt[2:0]
865}
866
867static void add_more_links(device_t dev, unsigned total_links)
868{
869 struct bus *link, *last = NULL;
870 int link_num;
871
872 for (link = dev->link_list; link; link = link->next)
873 last = link;
874
875 if (last) {
876 int links = total_links - last->link_num;
877 link_num = last->link_num;
878 if (links > 0) {
879 link = malloc(links*sizeof(*link));
880 if (!link)
881 die("Couldn't allocate more links!\n");
882 memset(link, 0, links*sizeof(*link));
883 last->next = link;
884 }
885 }
886 else {
887 link_num = -1;
888 link = malloc(total_links*sizeof(*link));
889 memset(link, 0, total_links*sizeof(*link));
890 dev->link_list = link;
891 }
892
893 for (link_num = link_num + 1; link_num < total_links; link_num++) {
894 link->link_num = link_num;
895 link->dev = dev;
896 link->next = link + 1;
897 last = link;
898 link = link->next;
899 }
900 last->next = NULL;
901}
902
903static u32 cpu_bus_scan(device_t dev, u32 max)
904{
905 struct bus *cpu_bus;
906 device_t dev_mc;
907#if CONFIG_CBB
908 device_t pci_domain;
909#endif
910 int i,j;
911 int coreid_bits;
912 int core_max = 0;
913 unsigned ApicIdCoreIdSize;
914 unsigned core_nums;
915 int siblings = 0;
916 unsigned int family;
917
918#if CONFIG_CBB
919 dev_mc = dev_find_slot(0, PCI_DEVFN(CONFIG_CDB, 0)); //0x00
920 if (dev_mc && dev_mc->bus) {
921 printk(BIOS_DEBUG, "%s found", dev_path(dev_mc));
922 pci_domain = dev_mc->bus->dev;
923 if (pci_domain && (pci_domain->path.type == DEVICE_PATH_DOMAIN)) {
924 printk(BIOS_DEBUG, "\n%s move to ",dev_path(dev_mc));
925 dev_mc->bus->secondary = CONFIG_CBB; // move to 0xff
926 printk(BIOS_DEBUG, "%s",dev_path(dev_mc));
927 } else {
928 printk(BIOS_DEBUG, " but it is not under pci_domain directly ");
929 }
930 printk(BIOS_DEBUG, "\n");
931 }
932 dev_mc = dev_find_slot(CONFIG_CBB, PCI_DEVFN(CONFIG_CDB, 0));
933 if (!dev_mc) {
934 dev_mc = dev_find_slot(0, PCI_DEVFN(0x18, 0));
935 if (dev_mc && dev_mc->bus) {
936 printk(BIOS_DEBUG, "%s found\n", dev_path(dev_mc));
937 pci_domain = dev_mc->bus->dev;
938 if (pci_domain && (pci_domain->path.type == DEVICE_PATH_DOMAIN)) {
939 if ((pci_domain->link_list) && (pci_domain->link_list->children == dev_mc)) {
940 printk(BIOS_DEBUG, "%s move to ",dev_path(dev_mc));
941 dev_mc->bus->secondary = CONFIG_CBB; // move to 0xff
942 printk(BIOS_DEBUG, "%s\n",dev_path(dev_mc));
943 while (dev_mc) {
944 printk(BIOS_DEBUG, "%s move to ",dev_path(dev_mc));
945 dev_mc->path.pci.devfn -= PCI_DEVFN(0x18,0);
946 printk(BIOS_DEBUG, "%s\n",dev_path(dev_mc));
947 dev_mc = dev_mc->sibling;
948 }
949 }
950 }
951 }
952 }
953#endif
954 dev_mc = dev_find_slot(CONFIG_CBB, PCI_DEVFN(CONFIG_CDB, 0));
955 if (!dev_mc) {
956 printk(BIOS_ERR, "%02x:%02x.0 not found", CONFIG_CBB, CONFIG_CDB);
957 die("");
958 }
959 sysconf_init(dev_mc);
960#if CONFIG_CBB && (MAX_NODE_NUMS > 32)
961 if (node_nums>32) { // need to put node 32 to node 63 to bus 0xfe
962 if (pci_domain->link_list && !pci_domain->link_list->next) {
963 struct bus *new_link = new_link(pci_domain);
964 pci_domain->link_list->next = new_link;
965 new_link->link_num = 1;
966 new_link->dev = pci_domain;
967 new_link->children = 0;
968 printk(BIOS_DEBUG, "%s links now 2\n", dev_path(pci_domain));
969 }
970 pci_domain->link_list->next->secondary = CONFIG_CBB - 1;
971 }
972#endif
973
974 /* Get Max Number of cores(MNC) */
975 coreid_bits = (cpuid_ecx(AMD_CPUID_ASIZE_PCCOUNT) & 0x0000F000) >> 12;
976 core_max = 1 << (coreid_bits & 0x000F); //mnc
977
978 ApicIdCoreIdSize = ((cpuid_ecx(0x80000008)>>12) & 0xF);
979 if (ApicIdCoreIdSize) {
980 core_nums = (1 << ApicIdCoreIdSize) - 1;
981 } else {
982 core_nums = 3; //quad core
983 }
984
985 /* Find which cpus are present */
986 cpu_bus = dev->link_list;
987 for (i = 0; i < node_nums; i++) {
988 device_t cdb_dev;
989 unsigned busn, devn;
990 struct bus *pbus;
991
992 busn = CONFIG_CBB;
993 devn = CONFIG_CDB + i;
994 pbus = dev_mc->bus;
995#if CONFIG_CBB && (MAX_NODE_NUMS > 32)
996 if (i >= 32) {
997 busn--;
998 devn -= 32;
999 pbus = pci_domain->link_list->next;
1000 }
1001#endif
1002
1003 /* Find the cpu's pci device */
1004 cdb_dev = dev_find_slot(busn, PCI_DEVFN(devn, 0));
1005 if (!cdb_dev) {
1006 /* If I am probing things in a weird order
1007 * ensure all of the cpu's pci devices are found.
1008 */
1009 int fn;
1010 for(fn = 0; fn <= 5; fn++) { //FBDIMM?
1011 cdb_dev = pci_probe_dev(NULL, pbus,
1012 PCI_DEVFN(devn, fn));
1013 }
1014 cdb_dev = dev_find_slot(busn, PCI_DEVFN(devn, 0));
1015 } else {
1016 /* Ok, We need to set the links for that device.
1017 * otherwise the device under it will not be scanned
1018 */
1019 int linknum;
1020#if CONFIG_HT3_SUPPORT
1021 linknum = 8;
1022#else
1023 linknum = 4;
1024#endif
1025 add_more_links(cdb_dev, linknum);
1026 }
1027
1028 family = cpuid_eax(1);
1029 family = (family >> 20) & 0xFF;
1030 if (family == 1) { //f10
1031 u32 dword;
1032 cdb_dev = dev_find_slot(busn, PCI_DEVFN(devn, 3));
1033 dword = pci_read_config32(cdb_dev, 0xe8);
1034 siblings = ((dword & BIT15) >> 13) | ((dword & (BIT13 | BIT12)) >> 12);
1035 } else if (family == 7) {//f16
1036 cdb_dev = dev_find_slot(busn, PCI_DEVFN(devn, 5));
1037 if (cdb_dev && cdb_dev->enabled) {
1038 siblings = pci_read_config32(cdb_dev, 0x84);
1039 siblings &= 0xFF;
1040 }
1041 } else {
1042 siblings = 0; //default one core
1043 }
1044 int enable_node = cdb_dev && cdb_dev->enabled;
1045 printk(BIOS_SPEW, "%s family%xh, core_max=0x%x, core_nums=0x%x, siblings=0x%x\n",
1046 dev_path(cdb_dev), 0x0f + family, core_max, core_nums, siblings);
1047
1048 for (j = 0; j <= siblings; j++ ) {
1049 extern CONST OPTIONS_CONFIG_TOPOLOGY ROMDATA TopologyConfiguration;
1050 u32 modules = TopologyConfiguration.PlatformNumberOfModules;
1051 u32 lapicid_start = 0;
1052
1053 /*
1054 * APIC ID calucation is tightly coupled with AGESA v5 code.
1055 * This calculation MUST match the assignment calculation done
1056 * in LocalApicInitializationAtEarly() function.
1057 * And reference GetLocalApicIdForCore()
1058 *
1059 * Apply apic enumeration rules
1060 * For systems with >= 16 APICs, put the IO-APICs at 0..n and
1061 * put the local-APICs at m..z
1062 *
1063 * This is needed because many IO-APIC devices only have 4 bits
1064 * for their APIC id and therefore must reside at 0..15
1065 */
1066#ifndef CFG_PLAT_NUM_IO_APICS /* defined in mainboard buildOpts.c */
1067#define CFG_PLAT_NUM_IO_APICS 3
1068#endif
1069 if ((node_nums * core_max) + CFG_PLAT_NUM_IO_APICS >= 0x10) {
1070 lapicid_start = (CFG_PLAT_NUM_IO_APICS - 1) / core_max;
1071 lapicid_start = (lapicid_start + 1) * core_max;
1072 printk(BIOS_SPEW, "lpaicid_start=0x%x ", lapicid_start);
1073 }
1074 u32 apic_id = (lapicid_start * (i/modules + 1)) + ((i % modules) ? (j + (siblings + 1)) : j);
1075 printk(BIOS_SPEW, "node 0x%x core 0x%x apicid=0x%x\n",
1076 i, j, apic_id);
1077
1078 device_t cpu = add_cpu_device(cpu_bus, apic_id, enable_node);
1079 if (cpu)
1080 amd_cpu_topology(cpu, i, j);
1081 } //j
1082 }
1083 return max;
1084}
1085
1086static void cpu_bus_init(device_t dev)
1087{
1088 initialize_cpus(dev->link_list);
1089}
1090
1091static void cpu_bus_noop(device_t dev)
1092{
1093}
1094
1095static void cpu_bus_read_resources(device_t dev)
1096{
1097#if CONFIG_MMCONF_SUPPORT
1098 struct resource *resource = new_resource(dev, 0xc0010058);
1099 resource->base = CONFIG_MMCONF_BASE_ADDRESS;
1100 resource->size = CONFIG_MMCONF_BUS_NUMBER * 4096*256;
1101 resource->flags = IORESOURCE_MEM | IORESOURCE_RESERVE |
1102 IORESOURCE_FIXED | IORESOURCE_STORED | IORESOURCE_ASSIGNED;
1103#endif
1104}
1105
1106static void cpu_bus_set_resources(device_t dev)
1107{
1108 struct resource *resource = find_resource(dev, 0xc0010058);
1109 if (resource) {
1110 report_resource_stored(dev, resource, " <mmconfig>");
1111 }
1112 pci_dev_set_resources(dev);
1113}
1114
1115static struct device_operations cpu_bus_ops = {
1116 .read_resources = cpu_bus_read_resources,
1117 .set_resources = cpu_bus_set_resources,
1118 .enable_resources = cpu_bus_noop,
1119 .init = cpu_bus_init,
1120 .scan_bus = cpu_bus_scan,
1121};
1122
1123static void root_complex_enable_dev(struct device *dev)
1124{
1125 static int done = 0;
1126
1127 /* Do not delay UMA setup, as a device on the PCI bus may evaluate
1128 the global uma_memory variables already in its enable function. */
1129 if (!done) {
1130 setup_bsp_ramtop();
1131 setup_uma_memory();
1132 done = 1;
1133 }
1134
1135 /* Set the operations if it is a special bus type */
1136 if (dev->path.type == DEVICE_PATH_DOMAIN) {
1137 dev->ops = &pci_domain_ops;
1138 } else if (dev->path.type == DEVICE_PATH_CPU_CLUSTER) {
1139 dev->ops = &cpu_bus_ops;
1140 }
1141}
1142
1143struct chip_operations northbridge_amd_agesa_family16kb_root_complex_ops = {
1144 CHIP_NAME("AMD FAM16 Root Complex")
1145 .enable_dev = root_complex_enable_dev,
1146};
Bruce Griffith76db07e2013-07-07 02:06:53 -06001147
1148/*********************************************************************
1149 * Change the vendor / device IDs to match the generic VBIOS header. *
1150 *********************************************************************/
1151u32 map_oprom_vendev(u32 vendev)
1152{
1153 u32 new_vendev = vendev;
1154
1155 switch(vendev) {
1156 case 0x10029830:
1157 case 0x10029831:
1158 case 0x10029832:
1159 case 0x10029833:
1160 case 0x10029834:
1161 case 0x10029835:
1162 case 0x10029836:
1163 case 0x10029837:
1164 case 0x10029838:
1165 case 0x10029839:
1166 case 0x1002983A:
1167 case 0x1002983D:
1168 new_vendev = 0x10029830; // This is the default value in AMD-generated VBIOS
1169 break;
1170 default:
1171 break;
1172 }
1173
1174 if (vendev != new_vendev)
1175 printk(BIOS_NOTICE, "Mapping PCI device %8x to %8x\n", vendev, new_vendev);
1176
1177 return new_vendev;
1178}