blob: ef5f1d8416f557b3b412794db9d38936f712a1e0 [file] [log] [blame]
zbao2c08f6a2012-07-02 15:32:58 +08001/*
2 * This file is part of the coreboot project.
3 *
4 * Copyright (C) 2012 Advanced Micro Devices, Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 */
19
20#include <console/console.h>
21#include <arch/io.h>
22#include <stdint.h>
23#include <device/device.h>
24#include <device/pci.h>
25#include <device/pci_ids.h>
26#include <device/hypertransport.h>
27#include <stdlib.h>
28#include <string.h>
29#include <bitops.h>
30#include <cpu/cpu.h>
31#include <cbmem.h>
32
33#include <cpu/x86/lapic.h>
34
35#include <Porting.h>
36#include <AGESA.h>
37#include <Options.h>
38#include <Topology.h>
39#include <cpu/amd/amdfam15.h>
40#include <cpuRegisters.h>
41#include "agesawrapper.h"
42#include "root_complex/chip.h"
43#include "northbridge.h"
44#include "chip.h"
45
46#define MAX_NODE_NUMS (MAX_NODES * MAX_DIES)
47
48#if (defined CONFIG_EXT_CONF_SUPPORT) && CONFIG_EXT_CONF_SUPPORT == 1
49#error CONFIG_EXT_CONF_SUPPORT == 1 not support anymore!
50#endif
51
52typedef struct dram_base_mask {
53 u32 base; //[47:27] at [28:8]
54 u32 mask; //[47:27] at [28:8] and enable at bit 0
55} dram_base_mask_t;
56
57static unsigned node_nums;
58static unsigned sblink;
59static device_t __f0_dev[MAX_NODE_NUMS];
60static device_t __f1_dev[MAX_NODE_NUMS];
61static device_t __f2_dev[MAX_NODE_NUMS];
62static device_t __f4_dev[MAX_NODE_NUMS];
63static unsigned fx_devs = 0;
64
65static dram_base_mask_t get_dram_base_mask(u32 nodeid)
66{
67 device_t dev;
68 dram_base_mask_t d;
69 dev = __f1_dev[0];
70 u32 temp;
71 temp = pci_read_config32(dev, 0x44 + (nodeid << 3)); //[39:24] at [31:16]
72 d.mask = ((temp & 0xfff80000)>>(8+3)); // mask out DramMask [26:24] too
73 temp = pci_read_config32(dev, 0x144 + (nodeid <<3)) & 0xff; //[47:40] at [7:0]
74 d.mask |= temp<<21;
75 temp = pci_read_config32(dev, 0x40 + (nodeid << 3)); //[39:24] at [31:16]
76 d.mask |= (temp & 1); // enable bit
77 d.base = ((temp & 0xfff80000)>>(8+3)); // mask out DramBase [26:24) too
78 temp = pci_read_config32(dev, 0x140 + (nodeid <<3)) & 0xff; //[47:40] at [7:0]
79 d.base |= temp<<21;
80 return d;
81}
82
83static void set_io_addr_reg(device_t dev, u32 nodeid, u32 linkn, u32 reg,
84 u32 io_min, u32 io_max)
85{
86 u32 i;
87 u32 tempreg;
88 /* io range allocation */
89 tempreg = (nodeid&0xf) | ((nodeid & 0x30)<<(8-4)) | (linkn<<4) | ((io_max&0xf0)<<(12-4)); //limit
90 for (i=0; i<node_nums; i++)
91 pci_write_config32(__f1_dev[i], reg+4, tempreg);
92 tempreg = 3 /*| ( 3<<4)*/ | ((io_min&0xf0)<<(12-4)); //base :ISA and VGA ?
93#if 0
94 // FIXME: can we use VGA reg instead?
95 if (dev->link[link].bridge_ctrl & PCI_BRIDGE_CTL_VGA) {
96 printk(BIOS_SPEW, "%s, enabling legacy VGA IO forwarding for %s link %s\n",
97 __func__, dev_path(dev), link);
98 tempreg |= PCI_IO_BASE_VGA_EN;
99 }
100 if (dev->link[link].bridge_ctrl & PCI_BRIDGE_CTL_NO_ISA) {
101 tempreg |= PCI_IO_BASE_NO_ISA;
102 }
103#endif
104 for (i=0; i<node_nums; i++)
105 pci_write_config32(__f1_dev[i], reg, tempreg);
106}
107
108static void set_mmio_addr_reg(u32 nodeid, u32 linkn, u32 reg, u32 index, u32 mmio_min, u32 mmio_max, u32 nodes)
109{
110 u32 i;
111 u32 tempreg;
112 /* io range allocation */
113 tempreg = (nodeid&0xf) | (linkn<<4) | (mmio_max&0xffffff00); //limit
114 for (i=0; i<nodes; i++)
115 pci_write_config32(__f1_dev[i], reg+4, tempreg);
116 tempreg = 3 | (nodeid & 0x30) | (mmio_min&0xffffff00);
117 for (i=0; i<node_nums; i++)
118 pci_write_config32(__f1_dev[i], reg, tempreg);
119}
120
121static device_t get_node_pci(u32 nodeid, u32 fn)
122{
123#if MAX_NODE_NUMS == 64
124 if (nodeid < 32) {
125 return dev_find_slot(CONFIG_CBB, PCI_DEVFN(CONFIG_CDB + nodeid, fn));
126 } else {
127 return dev_find_slot(CONFIG_CBB-1, PCI_DEVFN(CONFIG_CDB + nodeid - 32, fn));
128 }
129#else
130 return dev_find_slot(CONFIG_CBB, PCI_DEVFN(CONFIG_CDB + nodeid, fn));
131#endif
132}
133
134static void get_fx_devs(void)
135{
136 int i;
137 for (i = 0; i < MAX_NODE_NUMS; i++) {
138 __f0_dev[i] = get_node_pci(i, 0);
139 __f1_dev[i] = get_node_pci(i, 1);
140 __f2_dev[i] = get_node_pci(i, 2);
141 __f4_dev[i] = get_node_pci(i, 4);
142 if (__f0_dev[i] != NULL && __f1_dev[i] != NULL)
143 fx_devs = i+1;
144 }
145 if (__f1_dev[0] == NULL || __f0_dev[0] == NULL || fx_devs == 0) {
146 die("Cannot find 0:0x18.[0|1]\n");
147 }
148 printk(BIOS_DEBUG, "fx_devs=0x%x\n", fx_devs);
149}
150
151static u32 f1_read_config32(unsigned reg)
152{
153 if (fx_devs == 0)
154 get_fx_devs();
155 return pci_read_config32(__f1_dev[0], reg);
156}
157
158static void f1_write_config32(unsigned reg, u32 value)
159{
160 int i;
161 if (fx_devs == 0)
162 get_fx_devs();
163 for(i = 0; i < fx_devs; i++) {
164 device_t dev;
165 dev = __f1_dev[i];
166 if (dev && dev->enabled) {
167 pci_write_config32(dev, reg, value);
168 }
169 }
170}
171
172static u32 amdfam15_nodeid(device_t dev)
173{
174#if MAX_NODE_NUMS == 64
175 unsigned busn;
176 busn = dev->bus->secondary;
177 if (busn != CONFIG_CBB) {
178 return (dev->path.pci.devfn >> 3) - CONFIG_CDB + 32;
179 } else {
180 return (dev->path.pci.devfn >> 3) - CONFIG_CDB;
181 }
182
183#else
184 return (dev->path.pci.devfn >> 3) - CONFIG_CDB;
185#endif
186}
187
188static void set_vga_enable_reg(u32 nodeid, u32 linkn)
189{
190 u32 val;
191
192 val = 1 | (nodeid<<4) | (linkn<<12);
193 /* it will routing
194 * (1)mmio 0xa0000:0xbffff
195 * (2)io 0x3b0:0x3bb, 0x3c0:0x3df
196 */
197 f1_write_config32(0xf4, val);
198
199}
200
201/**
202 * @return
203 * @retval 2 resoure not exist, usable
204 * @retval 0 resource exist, not usable
205 * @retval 1 resource exist, resource has been allocated before
206 */
207static int reg_useable(unsigned reg, device_t goal_dev, unsigned goal_nodeid,
208 unsigned goal_link)
209{
210 struct resource *res;
211 unsigned nodeid, link = 0;
212 int result;
213 res = 0;
214 for (nodeid = 0; !res && (nodeid < fx_devs); nodeid++) {
215 device_t dev;
216 dev = __f0_dev[nodeid];
217 if (!dev)
218 continue;
219 for (link = 0; !res && (link < 8); link++) {
220 res = probe_resource(dev, IOINDEX(0x1000 + reg, link));
221 }
222 }
223 result = 2;
224 if (res) {
225 result = 0;
226 if ((goal_link == (link - 1)) &&
227 (goal_nodeid == (nodeid - 1)) &&
228 (res->flags <= 1)) {
229 result = 1;
230 }
231 }
232 return result;
233}
234
235static struct resource *amdfam15_find_iopair(device_t dev, unsigned nodeid, unsigned link)
236{
237 struct resource *resource;
238 u32 free_reg, reg;
239 resource = 0;
240 free_reg = 0;
241 for (reg = 0xc0; reg <= 0xd8; reg += 0x8) {
242 int result;
243 result = reg_useable(reg, dev, nodeid, link);
244 if (result == 1) {
245 /* I have been allocated this one */
246 break;
247 }
248 else if (result > 1) {
249 /* I have a free register pair */
250 free_reg = reg;
251 }
252 }
253 if (reg > 0xd8) {
254 reg = free_reg; // if no free, the free_reg still be 0
255 }
256
257 resource = new_resource(dev, IOINDEX(0x1000 + reg, link));
258
259 return resource;
260}
261
262static struct resource *amdfam15_find_mempair(device_t dev, u32 nodeid, u32 link)
263{
264 struct resource *resource;
265 u32 free_reg, reg;
266 resource = 0;
267 free_reg = 0;
268 for (reg = 0x80; reg <= 0xb8; reg += 0x8) {
269 int result;
270 result = reg_useable(reg, dev, nodeid, link);
271 if (result == 1) {
272 /* I have been allocated this one */
273 break;
274 }
275 else if (result > 1) {
276 /* I have a free register pair */
277 free_reg = reg;
278 }
279 }
280 if (reg > 0xb8) {
281 reg = free_reg;
282 }
283
284 resource = new_resource(dev, IOINDEX(0x1000 + reg, link));
285 return resource;
286}
287
288static void amdfam15_link_read_bases(device_t dev, u32 nodeid, u32 link)
289{
290 struct resource *resource;
291
292 /* Initialize the io space constraints on the current bus */
293 resource = amdfam15_find_iopair(dev, nodeid, link);
294 if (resource) {
295 u32 align;
296 align = log2(HT_IO_HOST_ALIGN);
297 resource->base = 0;
298 resource->size = 0;
299 resource->align = align;
300 resource->gran = align;
301 resource->limit = 0xffffUL;
302 resource->flags = IORESOURCE_IO | IORESOURCE_BRIDGE;
303 }
304
305 /* Initialize the prefetchable memory constraints on the current bus */
306 resource = amdfam15_find_mempair(dev, nodeid, link);
307 if (resource) {
308 resource->base = 0;
309 resource->size = 0;
310 resource->align = log2(HT_MEM_HOST_ALIGN);
311 resource->gran = log2(HT_MEM_HOST_ALIGN);
312 resource->limit = 0xffffffffffULL;
313 resource->flags = IORESOURCE_MEM | IORESOURCE_PREFETCH;
314 resource->flags |= IORESOURCE_BRIDGE;
315 }
316
317 /* Initialize the memory constraints on the current bus */
318 resource = amdfam15_find_mempair(dev, nodeid, link);
319 if (resource) {
320 resource->base = 0;
321 resource->size = 0;
322 resource->align = log2(HT_MEM_HOST_ALIGN);
323 resource->gran = log2(HT_MEM_HOST_ALIGN);
324 resource->limit = 0xffffffffffULL;
325 resource->flags = IORESOURCE_MEM | IORESOURCE_BRIDGE;
326 }
327
328}
329
330static void read_resources(device_t dev)
331{
332 u32 nodeid;
333 struct bus *link;
334
335 nodeid = amdfam15_nodeid(dev);
336 for (link = dev->link_list; link; link = link->next) {
337 if (link->children) {
338 amdfam15_link_read_bases(dev, nodeid, link->link_num);
339 }
340 }
341}
342
343static void set_resource(device_t dev, struct resource *resource, u32 nodeid)
344{
345 resource_t rbase, rend;
346 unsigned reg, link_num;
347 char buf[50];
348
349 /* Make certain the resource has actually been set */
350 if (!(resource->flags & IORESOURCE_ASSIGNED)) {
351 return;
352 }
353
354 /* If I have already stored this resource don't worry about it */
355 if (resource->flags & IORESOURCE_STORED) {
356 return;
357 }
358
359 /* Only handle PCI memory and IO resources */
360 if (!(resource->flags & (IORESOURCE_MEM | IORESOURCE_IO)))
361 return;
362
363 /* Ensure I am actually looking at a resource of function 1 */
364 if ((resource->index & 0xffff) < 0x1000) {
365 return;
366 }
367 /* Get the base address */
368 rbase = resource->base;
369
370 /* Get the limit (rounded up) */
371 rend = resource_end(resource);
372
373 /* Get the register and link */
374 reg = resource->index & 0xfff; // 4k
375 link_num = IOINDEX_LINK(resource->index);
376
377 if (resource->flags & IORESOURCE_IO) {
378 set_io_addr_reg(dev, nodeid, link_num, reg, rbase>>8, rend>>8);
379 }
380 else if (resource->flags & IORESOURCE_MEM) {
381 set_mmio_addr_reg(nodeid, link_num, reg, (resource->index >>24), rbase>>8, rend>>8, node_nums) ;// [39:8]
382 }
383 resource->flags |= IORESOURCE_STORED;
384 sprintf(buf, " <node %x link %x>",
385 nodeid, link_num);
386 report_resource_stored(dev, resource, buf);
387}
388
389/**
390 * I tried to reuse the resource allocation code in set_resource()
391 * but it is too difficult to deal with the resource allocation magic.
392 */
393
394static void create_vga_resource(device_t dev, unsigned nodeid)
395{
396 struct bus *link;
397
398 /* find out which link the VGA card is connected,
399 * we only deal with the 'first' vga card */
400 for (link = dev->link_list; link; link = link->next) {
401 if (link->bridge_ctrl & PCI_BRIDGE_CTL_VGA) {
402#if CONFIG_MULTIPLE_VGA_ADAPTERS == 1
403 extern device_t vga_pri; // the primary vga device, defined in device.c
404 printk(BIOS_DEBUG, "VGA: vga_pri bus num = %d bus range [%d,%d]\n", vga_pri->bus->secondary,
405 link->secondary,link->subordinate);
406 /* We need to make sure the vga_pri is under the link */
407 if((vga_pri->bus->secondary >= link->secondary ) &&
408 (vga_pri->bus->secondary <= link->subordinate )
409 )
410#endif
411 break;
412 }
413 }
414
415 /* no VGA card installed */
416 if (link == NULL)
417 return;
418
419 printk(BIOS_DEBUG, "VGA: %s (aka node %d) link %d has VGA device\n", dev_path(dev), nodeid, sblink);
420 set_vga_enable_reg(nodeid, sblink);
421}
422
423static void set_resources(device_t dev)
424{
425 unsigned nodeid;
426 struct bus *bus;
427 struct resource *res;
428
429 /* Find the nodeid */
430 nodeid = amdfam15_nodeid(dev);
431
432 create_vga_resource(dev, nodeid); //TODO: do we need this?
433
434 /* Set each resource we have found */
435 for (res = dev->resource_list; res; res = res->next) {
436 set_resource(dev, res, nodeid);
437 }
438
439 for (bus = dev->link_list; bus; bus = bus->next) {
440 if (bus->children) {
441 assign_resources(bus);
442 }
443 }
444}
445
446static void northbridge_init(struct device *dev)
447{
448}
449
450static struct device_operations northbridge_operations = {
451 .read_resources = read_resources,
452 .set_resources = set_resources,
453 .enable_resources = pci_dev_enable_resources,
454 .init = northbridge_init,
455 .scan_bus = 0, /*scan_chains, */
456 .enable = 0,
457 .ops_pci = 0,
458};
459
460static const struct pci_driver family15_northbridge __pci_driver = {
461 .ops = &northbridge_operations,
462 .vendor = PCI_VENDOR_ID_AMD,
463 .device = PCI_DEVICE_ID_AMD_15H_MODEL_001F_NB_HT,
464};
465
466static const struct pci_driver family10_northbridge __pci_driver = {
467 .ops = &northbridge_operations,
468 .vendor = PCI_VENDOR_ID_AMD,
469 .device = PCI_DEVICE_ID_AMD_10H_NB_HT,
470};
471
472struct chip_operations northbridge_amd_agesa_family15tn_ops = {
473 CHIP_NAME("AMD FAM15 Northbridge")
474 .enable_dev = 0,
475};
476
477static void domain_read_resources(device_t dev)
478{
479 unsigned reg;
480
481 /* Find the already assigned resource pairs */
482 get_fx_devs();
483 for (reg = 0x80; reg <= 0xd8; reg+= 0x08) {
484 u32 base, limit;
485 base = f1_read_config32(reg);
486 limit = f1_read_config32(reg + 0x04);
487 /* Is this register allocated? */
488 if ((base & 3) != 0) {
489 unsigned nodeid, reg_link;
490 device_t reg_dev;
491 if (reg<0xc0) { // mmio
492 nodeid = (limit & 0xf) + (base&0x30);
493 } else { // io
494 nodeid = (limit & 0xf) + ((base>>4)&0x30);
495 }
496 reg_link = (limit >> 4) & 7;
497 reg_dev = __f0_dev[nodeid];
498 if (reg_dev) {
499 /* Reserve the resource */
500 struct resource *res;
501 res = new_resource(reg_dev, IOINDEX(0x1000 + reg, reg_link));
502 if (res) {
503 res->flags = 1;
504 }
505 }
506 }
507 }
508 /* FIXME: do we need to check extend conf space?
509 I don't believe that much preset value */
510
511#if CONFIG_PCI_64BIT_PREF_MEM == 0
512 pci_domain_read_resources(dev);
513
514#else
515 struct bus *link;
516 struct resource *resource;
517 for (link=dev->link_list; link; link = link->next) {
518 /* Initialize the system wide io space constraints */
519 resource = new_resource(dev, 0|(link->link_num<<2));
520 resource->base = 0x400;
521 resource->limit = 0xffffUL;
522 resource->flags = IORESOURCE_IO;
523
524 /* Initialize the system wide prefetchable memory resources constraints */
525 resource = new_resource(dev, 1|(link->link_num<<2));
526 resource->limit = 0xfcffffffffULL;
527 resource->flags = IORESOURCE_MEM | IORESOURCE_PREFETCH;
528
529 /* Initialize the system wide memory resources constraints */
530 resource = new_resource(dev, 2|(link->link_num<<2));
531 resource->limit = 0xfcffffffffULL;
532 resource->flags = IORESOURCE_MEM;
533 }
534#endif
535}
536
537extern u8 acpi_slp_type;
538
539static void domain_enable_resources(device_t dev)
540{
541 u32 val;
542#if CONFIG_HAVE_ACPI_RESUME
543 if (acpi_slp_type == 3)
544 agesawrapper_fchs3laterestore();
545#endif
546
547 /* Must be called after PCI enumeration and resource allocation */
548 printk(BIOS_DEBUG, "\nFam15 - domain_enable_resources: AmdInitMid.\n");
549#if CONFIG_HAVE_ACPI_RESUME
550 if (acpi_slp_type != 3) {
551 printk(BIOS_DEBUG, "agesawrapper_amdinitmid ");
552 val = agesawrapper_amdinitmid ();
553 if (val)
554 printk(BIOS_DEBUG, "error level: %x \n", val);
555 else
556 printk(BIOS_DEBUG, "passed.\n");
557 }
558#else
559 printk(BIOS_DEBUG, "agesawrapper_amdinitmid ");
560 val = agesawrapper_amdinitmid ();
561 if (val)
562 printk(BIOS_DEBUG, "error level: %x \n", val);
563 else
564 printk(BIOS_DEBUG, "passed.\n");
565#endif
566
567 printk(BIOS_DEBUG, " ader - leaving domain_enable_resources.\n");
568}
569
570#if CONFIG_HW_MEM_HOLE_SIZEK != 0
571struct hw_mem_hole_info {
572 unsigned hole_startk;
573 int node_id;
574};
575static struct hw_mem_hole_info get_hw_mem_hole_info(void)
576{
577 struct hw_mem_hole_info mem_hole;
578 int i;
579 mem_hole.hole_startk = CONFIG_HW_MEM_HOLE_SIZEK;
580 mem_hole.node_id = -1;
581 for (i = 0; i < node_nums; i++) {
582 dram_base_mask_t d;
583 u32 hole;
584 d = get_dram_base_mask(i);
585 if (!(d.mask & 1)) continue; // no memory on this node
586 hole = pci_read_config32(__f1_dev[i], 0xf0);
587 if (hole & 1) { // we find the hole
588 mem_hole.hole_startk = (hole & (0xff<<24)) >> 10;
589 mem_hole.node_id = i; // record the node No with hole
590 break; // only one hole
591 }
592 }
593 //We need to double check if there is speical set on base reg and limit reg are not continous instead of hole, it will find out it's hole_startk
594 if (mem_hole.node_id == -1) {
595 resource_t limitk_pri = 0;
596 for (i=0; i<node_nums; i++) {
597 dram_base_mask_t d;
598 resource_t base_k, limit_k;
599 d = get_dram_base_mask(i);
600 if (!(d.base & 1)) continue;
601 base_k = ((resource_t)(d.base & 0x1fffff00)) <<9;
602 if (base_k > 4 *1024 * 1024) break; // don't need to go to check
603 if (limitk_pri != base_k) { // we find the hole
604 mem_hole.hole_startk = (unsigned)limitk_pri; // must beblow 4G
605 mem_hole.node_id = i;
606 break; //only one hole
607 }
608 limit_k = ((resource_t)((d.mask + 0x00000100) & 0x1fffff00)) << 9;
609 limitk_pri = limit_k;
610 }
611 }
612 return mem_hole;
613}
614#endif
615
616#if CONFIG_GFXUMA == 1
617extern uint64_t uma_memory_base, uma_memory_size;
zbao2c08f6a2012-07-02 15:32:58 +0800618#endif
619
620static void domain_set_resources(device_t dev)
621{
622#if CONFIG_PCI_64BIT_PREF_MEM == 1
623 struct resource *io, *mem1, *mem2;
624 struct resource *res;
625#endif
626 unsigned long mmio_basek;
627 u32 pci_tolm;
628 int i, idx;
629 struct bus *link;
630#if CONFIG_HW_MEM_HOLE_SIZEK != 0
631 struct hw_mem_hole_info mem_hole;
632 u32 reset_memhole = 1;
633#endif
634
635#if CONFIG_PCI_64BIT_PREF_MEM == 1
636
637 for (link = dev->link_list; link; link = link->next) {
638 /* Now reallocate the pci resources memory with the
639 * highest addresses I can manage.
640 */
641 mem1 = find_resource(dev, 1|(link->link_num<<2));
642 mem2 = find_resource(dev, 2|(link->link_num<<2));
643
644 printk(BIOS_DEBUG, "base1: 0x%08Lx limit1: 0x%08Lx size: 0x%08Lx align: %d\n",
645 mem1->base, mem1->limit, mem1->size, mem1->align);
646 printk(BIOS_DEBUG, "base2: 0x%08Lx limit2: 0x%08Lx size: 0x%08Lx align: %d\n",
647 mem2->base, mem2->limit, mem2->size, mem2->align);
648
649 /* See if both resources have roughly the same limits */
650 if (((mem1->limit <= 0xffffffff) && (mem2->limit <= 0xffffffff)) ||
651 ((mem1->limit > 0xffffffff) && (mem2->limit > 0xffffffff)))
652 {
653 /* If so place the one with the most stringent alignment first */
654 if (mem2->align > mem1->align) {
655 struct resource *tmp;
656 tmp = mem1;
657 mem1 = mem2;
658 mem2 = tmp;
659 }
660 /* Now place the memory as high up as it will go */
661 mem2->base = resource_max(mem2);
662 mem1->limit = mem2->base - 1;
663 mem1->base = resource_max(mem1);
664 }
665 else {
666 /* Place the resources as high up as they will go */
667 mem2->base = resource_max(mem2);
668 mem1->base = resource_max(mem1);
669 }
670
671 printk(BIOS_DEBUG, "base1: 0x%08Lx limit1: 0x%08Lx size: 0x%08Lx align: %d\n",
672 mem1->base, mem1->limit, mem1->size, mem1->align);
673 printk(BIOS_DEBUG, "base2: 0x%08Lx limit2: 0x%08Lx size: 0x%08Lx align: %d\n",
674 mem2->base, mem2->limit, mem2->size, mem2->align);
675 }
676
677 for (res = &dev->resource_list; res; res = res->next)
678 {
679 res->flags |= IORESOURCE_ASSIGNED;
680 res->flags |= IORESOURCE_STORED;
681 report_resource_stored(dev, res, "");
682 }
683#endif
684
685 pci_tolm = 0xffffffffUL;
686 for (link = dev->link_list; link; link = link->next) {
687 pci_tolm = find_pci_tolm(link);
688 }
689
690 // FIXME handle interleaved nodes. If you fix this here, please fix
691 // amdk8, too.
692 mmio_basek = pci_tolm >> 10;
693 /* Round mmio_basek to something the processor can support */
694 mmio_basek &= ~((1 << 6) -1);
695
696 // FIXME improve mtrr.c so we don't use up all of the mtrrs with a 64M
697 // MMIO hole. If you fix this here, please fix amdk8, too.
698 /* Round the mmio hole to 64M */
699 mmio_basek &= ~((64*1024) - 1);
700
701#if CONFIG_HW_MEM_HOLE_SIZEK != 0
702 /* if the hw mem hole is already set in raminit stage, here we will compare
703 * mmio_basek and hole_basek. if mmio_basek is bigger that hole_basek and will
704 * use hole_basek as mmio_basek and we don't need to reset hole.
705 * otherwise We reset the hole to the mmio_basek
706 */
707
708 mem_hole = get_hw_mem_hole_info();
709
710 // Use hole_basek as mmio_basek, and we don't need to reset hole anymore
711 if ((mem_hole.node_id != -1) && (mmio_basek > mem_hole.hole_startk)) {
712 mmio_basek = mem_hole.hole_startk;
713 reset_memhole = 0;
714 }
715#endif
716
717 idx = 0x10;
718 for (i = 0; i < node_nums; i++) {
719 dram_base_mask_t d;
720 resource_t basek, limitk, sizek; // 4 1T
721
722 d = get_dram_base_mask(i);
723
724 if (!(d.mask & 1)) continue;
725 basek = ((resource_t)(d.base & 0x1fffff00)) << 9; // could overflow, we may lost 6 bit here
726 limitk = ((resource_t)((d.mask + 0x00000100) & 0x1fffff00)) << 9 ;
727
728 sizek = limitk - basek;
729
730 /* see if we need a hole from 0xa0000 to 0xbffff */
731 if ((basek < ((8*64)+(8*16))) && (sizek > ((8*64)+(16*16)))) {
732 ram_resource(dev, (idx | i), basek, ((8*64)+(8*16)) - basek);
733 idx += 0x10;
734 basek = (8*64)+(16*16);
735 sizek = limitk - ((8*64)+(16*16));
736
737 }
738
739 //printk(BIOS_DEBUG, "node %d : mmio_basek=%08lx, basek=%08llx, limitk=%08llx\n", i, mmio_basek, basek, limitk);
740
741 /* split the region to accomodate pci memory space */
742 if ((basek < 4*1024*1024 ) && (limitk > mmio_basek)) {
743 if (basek <= mmio_basek) {
744 unsigned pre_sizek;
745 pre_sizek = mmio_basek - basek;
746 if (pre_sizek>0) {
747 ram_resource(dev, (idx | i), basek, pre_sizek);
748 idx += 0x10;
749 sizek -= pre_sizek;
750#if CONFIG_WRITE_HIGH_TABLES==1
751 if (high_tables_base==0) {
752 /* Leave some space for ACPI, PIRQ and MP tables */
753#if CONFIG_GFXUMA == 1
754 high_tables_base = uma_memory_base - HIGH_MEMORY_SIZE;
755#else
756 high_tables_base = (mmio_basek * 1024) - HIGH_MEMORY_SIZE;
757#endif
758 high_tables_size = HIGH_MEMORY_SIZE;
759 printk(BIOS_DEBUG, " split: %dK table at =%08llx\n",
760 (u32)(high_tables_size / 1024), high_tables_base);
761 }
762#endif
763 }
764 basek = mmio_basek;
765 }
766 if ((basek + sizek) <= 4*1024*1024) {
767 sizek = 0;
768 }
769 else {
770 basek = 4*1024*1024;
771 sizek -= (4*1024*1024 - mmio_basek);
772 }
773 }
774
775#if CONFIG_GFXUMA == 1
776 /* Deduct uma memory before reporting because
777 * this is what the mtrr code expects */
778 sizek -= uma_memory_size / 1024;
779#endif
780 ram_resource(dev, (idx | i), basek, sizek);
781 idx += 0x10;
782#if CONFIG_WRITE_HIGH_TABLES==1
783 printk(BIOS_DEBUG, "node %d: mmio_basek=%08lx, basek=%08llx, limitk=%08llx\n",
784 i, mmio_basek, basek, limitk);
785 if (high_tables_base==0) {
786 /* Leave some space for ACPI, PIRQ and MP tables */
787#if CONFIG_GFXUMA == 1
788 high_tables_base = uma_memory_base - HIGH_MEMORY_SIZE;
789#else
790 high_tables_base = (limitk * 1024) - HIGH_MEMORY_SIZE;
791#endif
792 high_tables_size = HIGH_MEMORY_SIZE;
793 }
794#endif
795 }
796
Kyösti Mälkki63f8c082012-07-10 13:27:26 +0300797#if CONFIG_GFXUMA
798 uma_resource(dev, 7, uma_memory_base >> 10, uma_memory_size >> 10);
zbao2c08f6a2012-07-02 15:32:58 +0800799#endif
800
801 for(link = dev->link_list; link; link = link->next) {
802 if (link->children) {
803 assign_resources(link);
804 }
805 }
806}
807
808static struct device_operations pci_domain_ops = {
809 .read_resources = domain_read_resources,
810 .set_resources = domain_set_resources,
811 .enable_resources = domain_enable_resources,
812 .init = NULL,
813 .scan_bus = pci_domain_scan_bus,
814
815#if CONFIG_MMCONF_SUPPORT_DEFAULT
816 .ops_pci_bus = &pci_ops_mmconf,
817#else
818 .ops_pci_bus = &pci_cf8_conf1,
819#endif
820};
821
822static void sysconf_init(device_t dev) // first node
823{
824 sblink = (pci_read_config32(dev, 0x64)>>8) & 7; // don't forget sublink1
825 node_nums = ((pci_read_config32(dev, 0x60)>>4) & 7) + 1; //NodeCnt[2:0]
826}
827
828static void add_more_links(device_t dev, unsigned total_links)
829{
830 struct bus *link, *last = NULL;
831 int link_num;
832
833 for (link = dev->link_list; link; link = link->next)
834 last = link;
835
836 if (last) {
837 int links = total_links - last->link_num;
838 link_num = last->link_num;
839 if (links > 0) {
840 link = malloc(links*sizeof(*link));
841 if (!link)
842 die("Couldn't allocate more links!\n");
843 memset(link, 0, links*sizeof(*link));
844 last->next = link;
845 }
846 }
847 else {
848 link_num = -1;
849 link = malloc(total_links*sizeof(*link));
850 memset(link, 0, total_links*sizeof(*link));
851 dev->link_list = link;
852 }
853
854 for (link_num = link_num + 1; link_num < total_links; link_num++) {
855 link->link_num = link_num;
856 link->dev = dev;
857 link->next = link + 1;
858 last = link;
859 link = link->next;
860 }
861 last->next = NULL;
862}
863
864/* dummy read_resources */
865static void lapic_read_resources(device_t dev)
866{
867}
868
869static struct device_operations lapic_ops = {
870 .read_resources = lapic_read_resources,
871 .set_resources = pci_dev_set_resources,
872 .enable_resources = pci_dev_enable_resources,
873 .init = 0,
874 .scan_bus = 0,
875 .enable = 0,
876 .ops_pci = 0,
877};
878
879static u32 cpu_bus_scan(device_t dev, u32 max)
880{
881 struct bus *cpu_bus;
882 device_t dev_mc;
883#if CONFIG_CBB
884 device_t pci_domain;
885#endif
886 int i,j;
887 int coreid_bits;
888 int core_max = 0;
889 unsigned ApicIdCoreIdSize;
890 unsigned core_nums;
891 int siblings = 0;
892 unsigned int family;
893
894#if CONFIG_CBB
895 dev_mc = dev_find_slot(0, PCI_DEVFN(CONFIG_CDB, 0)); //0x00
896 if (dev_mc && dev_mc->bus) {
897 printk(BIOS_DEBUG, "%s found", dev_path(dev_mc));
898 pci_domain = dev_mc->bus->dev;
899 if (pci_domain && (pci_domain->path.type == DEVICE_PATH_PCI_DOMAIN)) {
900 printk(BIOS_DEBUG, "\n%s move to ",dev_path(dev_mc));
901 dev_mc->bus->secondary = CONFIG_CBB; // move to 0xff
902 printk(BIOS_DEBUG, "%s",dev_path(dev_mc));
903 } else {
904 printk(BIOS_DEBUG, " but it is not under pci_domain directly ");
905 }
906 printk(BIOS_DEBUG, "\n");
907 }
908 dev_mc = dev_find_slot(CONFIG_CBB, PCI_DEVFN(CONFIG_CDB, 0));
909 if (!dev_mc) {
910 dev_mc = dev_find_slot(0, PCI_DEVFN(0x18, 0));
911 if (dev_mc && dev_mc->bus) {
912 printk(BIOS_DEBUG, "%s found\n", dev_path(dev_mc));
913 pci_domain = dev_mc->bus->dev;
914 if (pci_domain && (pci_domain->path.type == DEVICE_PATH_PCI_DOMAIN)) {
915 if ((pci_domain->link_list) && (pci_domain->link_list->children == dev_mc)) {
916 printk(BIOS_DEBUG, "%s move to ",dev_path(dev_mc));
917 dev_mc->bus->secondary = CONFIG_CBB; // move to 0xff
918 printk(BIOS_DEBUG, "%s\n",dev_path(dev_mc));
919 while (dev_mc) {
920 printk(BIOS_DEBUG, "%s move to ",dev_path(dev_mc));
921 dev_mc->path.pci.devfn -= PCI_DEVFN(0x18,0);
922 printk(BIOS_DEBUG, "%s\n",dev_path(dev_mc));
923 dev_mc = dev_mc->sibling;
924 }
925 }
926 }
927 }
928 }
929#endif
930 dev_mc = dev_find_slot(CONFIG_CBB, PCI_DEVFN(CONFIG_CDB, 0));
931 if (!dev_mc) {
932 printk(BIOS_ERR, "%02x:%02x.0 not found", CONFIG_CBB, CONFIG_CDB);
933 die("");
934 }
935 sysconf_init(dev_mc);
936#if CONFIG_CBB && (MAX_NODE_NUMS > 32)
937 if (node_nums>32) { // need to put node 32 to node 63 to bus 0xfe
938 if (pci_domain->link_list && !pci_domain->link_list->next) {
939 struct bus *new_link = new_link(pci_domain);
940 pci_domain->link_list->next = new_link;
941 new_link->link_num = 1;
942 new_link->dev = pci_domain;
943 new_link->children = 0;
944 printk(BIOS_DEBUG, "%s links now 2\n", dev_path(pci_domain));
945 }
946 pci_domain->link_list->next->secondary = CONFIG_CBB - 1;
947 }
948#endif
949
950 /* Get Max Number of cores(MNC) */
951 coreid_bits = (cpuid_ecx(AMD_CPUID_ASIZE_PCCOUNT) & 0x0000F000) >> 12;
952 core_max = 1 << (coreid_bits & 0x000F); //mnc
953
954 ApicIdCoreIdSize = ((cpuid_ecx(0x80000008)>>12) & 0xF);
955 if (ApicIdCoreIdSize) {
956 core_nums = (1 << ApicIdCoreIdSize) - 1;
957 } else {
958 core_nums = 3; //quad core
959 }
960
961 /* Find which cpus are present */
962 cpu_bus = dev->link_list;
963 for (i = 0; i < node_nums; i++) {
964 device_t cdb_dev, cpu;
965 struct device_path cpu_path;
966 unsigned busn, devn;
967 struct bus *pbus;
968
969 busn = CONFIG_CBB;
970 devn = CONFIG_CDB + i;
971 pbus = dev_mc->bus;
972#if CONFIG_CBB && (MAX_NODE_NUMS > 32)
973 if (i >= 32) {
974 busn--;
975 devn -= 32;
976 pbus = pci_domain->link_list->next;
977 }
978#endif
979
980 /* Find the cpu's pci device */
981 cdb_dev = dev_find_slot(busn, PCI_DEVFN(devn, 0));
982 if (!cdb_dev) {
983 /* If I am probing things in a weird order
984 * ensure all of the cpu's pci devices are found.
985 */
986 int fn;
987 for(fn = 0; fn <= 5; fn++) { //FBDIMM?
988 cdb_dev = pci_probe_dev(NULL, pbus,
989 PCI_DEVFN(devn, fn));
990 }
991 cdb_dev = dev_find_slot(busn, PCI_DEVFN(devn, 0));
992 } else {
993 /* Ok, We need to set the links for that device.
994 * otherwise the device under it will not be scanned
995 */
996 int linknum;
997#if CONFIG_HT3_SUPPORT==1
998 linknum = 8;
999#else
1000 linknum = 4;
1001#endif
1002 add_more_links(cdb_dev, linknum);
1003 }
1004
1005 family = cpuid_eax(1);
1006 family = (family >> 20) & 0xFF;
1007 if (family == 1) { //f10
1008 u32 dword;
1009 cdb_dev = dev_find_slot(busn, PCI_DEVFN(devn, 3));
1010 dword = pci_read_config32(cdb_dev, 0xe8);
1011 siblings = ((dword & BIT15) >> 13) | ((dword & (BIT13 | BIT12)) >> 12);
1012 } else if (family == 6) {//f15
1013 cdb_dev = dev_find_slot(busn, PCI_DEVFN(devn, 5));
1014 if (cdb_dev && cdb_dev->enabled) {
1015 siblings = pci_read_config32(cdb_dev, 0x84);
1016 siblings &= 0xFF;
1017 }
1018 } else {
1019 siblings = 0; //default one core
1020 }
1021 printk(BIOS_SPEW, "%s family%xh, core_max=0x%x, core_nums=0x%x, siblings=0x%x\n",
1022 dev_path(cdb_dev), 0x0f + family, core_max, core_nums, siblings);
1023
1024 for (j = 0; j <= siblings; j++ ) {
1025 extern CONST OPTIONS_CONFIG_TOPOLOGY ROMDATA TopologyConfiguration;
1026 u32 modules = TopologyConfiguration.PlatformNumberOfModules;
1027 u32 lapicid_start = 0;
1028
1029 /* Build the cpu device path */
1030 cpu_path.type = DEVICE_PATH_APIC;
1031 /*
1032 * APIC ID calucation is tightly coupled with AGESA v5 code.
1033 * This calculation MUST match the assignment calculation done
1034 * in LocalApicInitializationAtEarly() function.
1035 * And reference GetLocalApicIdForCore()
1036 *
1037 * Apply apic enumeration rules
1038 * For systems with >= 16 APICs, put the IO-APICs at 0..n and
1039 * put the local-APICs at m..z
1040 *
1041 * This is needed because many IO-APIC devices only have 4 bits
1042 * for their APIC id and therefore must reside at 0..15
1043 */
1044#ifndef CFG_PLAT_NUM_IO_APICS /* defined in mainboard buildOpts.c */
1045#define CFG_PLAT_NUM_IO_APICS 3
1046#endif
1047 if ((node_nums * core_max) + CFG_PLAT_NUM_IO_APICS >= 0x10) {
1048 lapicid_start = (CFG_PLAT_NUM_IO_APICS - 1) / core_max;
1049 lapicid_start = (lapicid_start + 1) * core_max;
1050 printk(BIOS_SPEW, "lpaicid_start=0x%x ", lapicid_start);
1051 }
1052 cpu_path.apic.apic_id = (lapicid_start * (i/modules + 1)) + ((i % modules) ? (j + (siblings + 1)) : j);
1053 printk(BIOS_SPEW, "node 0x%x core 0x%x apicid=0x%x\n",
1054 i, j, cpu_path.apic.apic_id);
1055
1056 /* See if I can find the cpu */
1057 cpu = find_dev_path(cpu_bus, &cpu_path);
1058 /* Enable the cpu if I have the processor */
1059 if (cdb_dev && cdb_dev->enabled) {
1060 if (!cpu) {
1061 cpu = alloc_dev(cpu_bus, &cpu_path);
1062 }
1063 if (cpu) {
1064 cpu->enabled = 1;
1065 }
1066 }
1067 /* Disable the cpu if I don't have the processor */
1068 if (cpu && (!cdb_dev || !cdb_dev->enabled)) {
1069 cpu->enabled = 0;
1070 }
1071 /* Report what I have done */
1072 if (cpu) {
1073 cpu->path.apic.node_id = i;
1074 cpu->path.apic.core_id = j;
1075 if (cpu->path.type == DEVICE_PATH_APIC) {
1076 cpu->ops = &lapic_ops;
1077 }
1078 printk(BIOS_DEBUG, "CPU: %s %s\n",
1079 dev_path(cpu), cpu->enabled?"enabled":"disabled");
1080 }
1081 } //j
1082 }
1083 return max;
1084}
1085
1086static void cpu_bus_init(device_t dev)
1087{
1088 initialize_cpus(dev->link_list);
1089}
1090
1091static void cpu_bus_noop(device_t dev)
1092{
1093}
1094
1095static void cpu_bus_read_resources(device_t dev)
1096{
1097#if CONFIG_MMCONF_SUPPORT
1098 struct resource *resource = new_resource(dev, 0xc0010058);
1099 resource->base = CONFIG_MMCONF_BASE_ADDRESS;
1100 resource->size = CONFIG_MMCONF_BUS_NUMBER * 4096*256;
1101 resource->flags = IORESOURCE_MEM | IORESOURCE_RESERVE |
1102 IORESOURCE_FIXED | IORESOURCE_STORED | IORESOURCE_ASSIGNED;
1103#endif
1104}
1105
1106static void cpu_bus_set_resources(device_t dev)
1107{
1108 struct resource *resource = find_resource(dev, 0xc0010058);
1109 if (resource) {
1110 report_resource_stored(dev, resource, " <mmconfig>");
1111 }
1112 pci_dev_set_resources(dev);
1113}
1114
1115static struct device_operations cpu_bus_ops = {
1116 .read_resources = cpu_bus_read_resources,
1117 .set_resources = cpu_bus_set_resources,
1118 .enable_resources = cpu_bus_noop,
1119 .init = cpu_bus_init,
1120 .scan_bus = cpu_bus_scan,
1121};
1122
1123static void root_complex_enable_dev(struct device *dev)
1124{
1125 /* Set the operations if it is a special bus type */
1126 if (dev->path.type == DEVICE_PATH_PCI_DOMAIN) {
1127 dev->ops = &pci_domain_ops;
1128 } else if (dev->path.type == DEVICE_PATH_APIC_CLUSTER) {
1129 dev->ops = &cpu_bus_ops;
1130 }
1131}
1132
1133struct chip_operations northbridge_amd_agesa_family15tn_root_complex_ops = {
1134 CHIP_NAME("AMD FAM15 Root Complex")
1135 .enable_dev = root_complex_enable_dev,
1136};