blob: 5ee7f6c0a49f923efd7134f6aa607cbd90e8df95 [file] [log] [blame]
Andrey Petrov2e410752020-03-20 12:08:32 -07001/* SPDX-License-Identifier: GPL-2.0-only */
Andrey Petrov2e410752020-03-20 12:08:32 -07002
3#include <arch/ioapic.h>
Jonathan Zhang7919d612020-04-02 17:27:54 -07004#include <assert.h>
Andrey Petrov2e410752020-03-20 12:08:32 -07005#include <console/console.h>
6#include <cpu/x86/lapic.h>
7#include <device/pci.h>
8#include <fsp/api.h>
Andrey Petrov4e48ac02020-04-30 14:08:19 -07009#include <intelblocks/p2sb.h>
Jonathan Zhang7919d612020-04-02 17:27:54 -070010#include <post.h>
Jonathan Zhang3172f982020-05-28 17:53:48 -070011#include <soc/acpi.h>
Andrey Petrov8670e822020-03-30 12:25:06 -070012#include <soc/cpu.h>
Andrey Petrov2e410752020-03-20 12:08:32 -070013#include <soc/ramstage.h>
14#include <soc/pm.h>
Jonathan Zhang7919d612020-04-02 17:27:54 -070015#include <soc/soc_util.h>
16#include <stdlib.h>
Andrey Petrov2e410752020-03-20 12:08:32 -070017
18/* C620 IOAPIC has 120 redirection entries */
19#define C620_IOAPIC_REDIR_ENTRIES 120
20
Jonathan Zhang7919d612020-04-02 17:27:54 -070021struct pci_resource {
22 struct device *dev;
23 struct resource *res;
24 struct pci_resource *next;
25};
26
27struct stack_dev_resource {
28 uint8_t align;
29 struct pci_resource *children;
30 struct stack_dev_resource *next;
31};
32
33typedef enum {
34 RES_TYPE_IO = 0,
35 RES_TYPE_NONPREF_MEM,
36 RES_TYPE_PREF_MEM,
37 MAX_RES_TYPES
38} ResType;
39
40static ResType get_res_type(uint64_t flags)
41{
42 if (flags & IORESOURCE_IO)
43 return RES_TYPE_IO;
44 if (flags & IORESOURCE_MEM) {
45 if (flags & IORESOURCE_PREFETCH) {
46 printk(BIOS_DEBUG, "%s:%d flags: 0x%llx\n", __func__, __LINE__, flags);
47 return RES_TYPE_PREF_MEM;
48 }
49 /* both 64-bit and 32-bit use below 4GB address space */
50 return RES_TYPE_NONPREF_MEM;
51 }
52 printk(BIOS_ERR, "Invalid resource type 0x%llx\n", flags);
53 die("Invalida resource type");
54}
55
56static bool need_assignment(uint64_t flags)
57{
58 if (flags & (IORESOURCE_STORED | IORESOURCE_RESERVE | IORESOURCE_FIXED |
59 IORESOURCE_ASSIGNED))
60 return false;
61 else
62 return true;
63}
64
65static uint64_t get_resource_base(STACK_RES *stack, ResType res_type)
66{
67 if (res_type == RES_TYPE_IO) {
68 assert(stack->PciResourceIoBase <= stack->PciResourceIoLimit);
69 return stack->PciResourceIoBase;
70 }
71 if (res_type == RES_TYPE_NONPREF_MEM) {
72 assert(stack->PciResourceMem32Base <= stack->PciResourceMem32Limit);
73 return stack->PciResourceMem32Base;
74 }
75 assert(stack->PciResourceMem64Base <= stack->PciResourceMem64Limit);
76 return stack->PciResourceMem64Base;
77}
78
79static void set_resource_base(STACK_RES *stack, ResType res_type, uint64_t base)
80{
81 if (res_type == RES_TYPE_IO) {
82 assert(base <= (stack->PciResourceIoLimit + 1));
83 stack->PciResourceIoBase = base;
84 } else if (res_type == RES_TYPE_NONPREF_MEM) {
85 assert(base <= (stack->PciResourceMem32Limit + 1));
86 stack->PciResourceMem32Base = base;
87 } else {
88 assert(base <= (stack->PciResourceMem64Limit + 1));
89 stack->PciResourceMem64Base = base;
90 }
91}
92
93static void assign_stack_resources(struct iiostack_resource *stack_list,
94 struct device *dev, struct resource *bridge);
95
96static void xeonsp_cpx_pci_domain_scan_bus(struct device *dev)
97{
98 DEV_FUNC_ENTER(dev);
99 struct bus *link = dev->link_list;
100
101 printk(BIOS_SPEW, "%s:%s scanning buses under device %s\n",
102 __FILE__, __func__, dev_path(dev));
103 while (link != NULL) {
104 if (link->secondary == 0) { // scan only PSTACK buses
105 struct device *d;
106 for (d = link->children; d; d = d->sibling)
107 pci_probe_dev(d, link, d->path.pci.devfn);
108 scan_bridges(link);
109 } else {
110 pci_scan_bus(link, PCI_DEVFN(0, 0), 0xff);
111 }
112 link = link->next;
113 }
114 DEV_FUNC_EXIT(dev);
115}
116
117static void xeonsp_pci_dev_iterator(struct bus *bus,
118 void (*dev_iterator)(struct device *, void *),
119 void (*res_iterator)(struct device *, struct resource *, void *),
120 void *data)
121{
122 struct device *curdev;
123 struct resource *res;
124
125 /* Walk through all devices and find which resources they need. */
126 for (curdev = bus->children; curdev; curdev = curdev->sibling) {
127 struct bus *link;
128
129 if (!curdev->enabled)
130 continue;
131
132 if (!curdev->ops || !curdev->ops->read_resources) {
133 if (curdev->path.type != DEVICE_PATH_APIC)
134 printk(BIOS_ERR, "%s missing read_resources\n",
135 dev_path(curdev));
136 continue;
137 }
138
139 if (dev_iterator)
140 dev_iterator(curdev, data);
141
142 if (res_iterator) {
143 for (res = curdev->resource_list; res; res = res->next)
144 res_iterator(curdev, res, data);
145 }
146
147 /* Read in the resources behind the current device's links. */
148 for (link = curdev->link_list; link; link = link->next)
149 xeonsp_pci_dev_iterator(link, dev_iterator, res_iterator, data);
150 }
151}
152
153static void xeonsp_pci_dev_read_resources(struct device *dev, void *data)
154{
155 post_log_path(dev);
156 dev->ops->read_resources(dev);
157}
158
159static void xeonsp_pci_dev_dummy_func(struct device *dev)
160{
161}
162
163static void xeonsp_reset_pci_op(struct device *dev, void *data)
164{
165 if (dev->ops)
166 dev->ops->read_resources = xeonsp_pci_dev_dummy_func;
167}
168
169static STACK_RES *find_stack_for_bus(struct iiostack_resource *info, uint8_t bus)
170{
171 for (int i = 0; i < info->no_of_stacks; ++i) {
172 if (bus >= info->res[i].BusBase && bus <= info->res[i].BusLimit)
173 return &info->res[i];
174 }
175 return NULL;
176}
177
178static void add_res_to_stack(struct stack_dev_resource **root,
179 struct device *dev, struct resource *res)
180{
181 struct stack_dev_resource *cur = *root;
182 while (cur) {
183 if (cur->align == res->align || cur->next == NULL) /* equal or last record */
184 break;
185 else if (cur->align > res->align) {
186 if (cur->next->align < res->align) /* need to insert new record here */
187 break;
188 cur = cur->next;
189 } else {
190 break;
191 }
192 }
193
194 struct stack_dev_resource *nr;
195 if (!cur || cur->align != res->align) { /* need to add new record */
196 nr = malloc(sizeof(struct stack_dev_resource));
197 if (nr == 0)
198 die("assign_resource_to_stack(): out of memory.\n");
199 memset(nr, 0, sizeof(struct stack_dev_resource));
200 nr->align = res->align;
201 if (!cur) {
202 *root = nr; /* head node */
203 } else if (cur->align > nr->align) {
204 if (cur->next == NULL) {
205 cur->next = nr;
206 } else {
207 nr->next = cur->next;
208 cur->next = nr;
209 }
210 } else { /* insert in the beginning */
211 nr->next = cur;
212 *root = nr;
213 }
214 } else {
215 nr = cur;
216 }
217
218 assert(nr != NULL && nr->align == res->align);
219
220 struct pci_resource *npr = malloc(sizeof(struct pci_resource));
221 if (npr == NULL)
222 die("%s: out of memory.\n", __func__);
223 npr->res = res;
224 npr->dev = dev;
225 npr->next = NULL;
226
227 if (nr->children == NULL) {
228 nr->children = npr;
229 } else {
230 struct pci_resource *pr = nr->children;
231 while (pr->next != NULL)
232 pr = pr->next;
233 pr->next = npr;
234 }
235}
236
237static void reserve_dev_resources(STACK_RES *stack, ResType res_type,
238 struct stack_dev_resource *res_root, struct resource *bridge)
239{
240 uint8_t align;
241 uint64_t orig_base, base;
242
243 orig_base = get_resource_base(stack, res_type);
244
245 align = 0;
246 base = orig_base;
247 int first = 1;
248 while (res_root) { /* loop through all devices grouped by alignment requirements */
249 struct pci_resource *pr = res_root->children;
250 while (pr) {
251 if (first) {
252 if (bridge) { /* takes highest alignment */
253 if (bridge->align < pr->res->align)
254 bridge->align = pr->res->align;
255 orig_base = ALIGN_UP(orig_base, 1 << bridge->align);
256 } else {
257 orig_base = ALIGN_UP(orig_base, 1 << pr->res->align);
258 }
259 base = orig_base;
260
261 if (bridge)
262 bridge->base = base;
263 pr->res->base = base;
264 first = 0;
265 } else {
266 pr->res->base = ALIGN_UP(base, 1 << pr->res->align);
267 }
268 pr->res->limit = pr->res->base + pr->res->size - 1;
269 base = pr->res->limit + 1;
270 pr->res->flags |= (IORESOURCE_ASSIGNED);
271 pr = pr->next;
272 }
273 res_root = res_root->next;
274 }
275
276 if (bridge) {
277 /* this bridge doesn't have any resources, will set it to default window */
278 if (first) {
279 orig_base = ALIGN_UP(orig_base, 1 << bridge->align);
280 bridge->base = orig_base;
281 base = orig_base + (1ULL << bridge->gran);
282 }
283
284 bridge->size = ALIGN_UP(base, 1 << bridge->align) - bridge->base;
285
286 bridge->limit = bridge->base + bridge->size - 1;
287 bridge->flags |= (IORESOURCE_ASSIGNED);
288 base = bridge->limit + 1;
289 }
290
291 set_resource_base(stack, res_type, base);
292}
293
294static void reclaim_resource_mem(struct stack_dev_resource *res_root)
295{
296 while (res_root) { /* loop through all devices grouped by alignment requirements */
297 /* free pci_resource */
298 struct pci_resource *pr = res_root->children;
299 while (pr) {
300 struct pci_resource *dpr = pr;
301 pr = pr->next;
302 free(dpr);
303 }
304
305 /* free stack_dev_resource */
306 struct stack_dev_resource *ddr = res_root;
307 res_root = res_root->next;
308 free(ddr);
309 }
310}
311
312static void assign_bridge_resources(struct iiostack_resource *stack_list,
313 struct device *dev, struct resource *bridge)
314{
315 struct resource *res;
316 if (!dev->enabled)
317 return;
318
319 for (res = dev->resource_list; res; res = res->next) {
320 if (!(res->flags & IORESOURCE_BRIDGE) ||
321 (bridge && (get_res_type(bridge->flags) != get_res_type(res->flags))))
322 continue;
323
324 assign_stack_resources(stack_list, dev, res);
325
326 if (!bridge)
327 continue;
328
329 /* for 1st time update, overlading IORESOURCE_ASSIGNED */
330 if (!(bridge->flags & IORESOURCE_ASSIGNED)) {
331 bridge->base = res->base;
332 bridge->limit = res->limit;
333 bridge->flags |= (IORESOURCE_ASSIGNED);
334 } else {
335 /* update bridge range from child bridge range */
336 if (res->base < bridge->base)
337 bridge->base = res->base;
338 if (res->limit > bridge->limit)
339 bridge->limit = res->limit;
340 }
341 bridge->size = (bridge->limit - bridge->base + 1);
342 }
343}
344
345static void assign_stack_resources(struct iiostack_resource *stack_list,
346 struct device *dev, struct resource *bridge)
347{
348 struct bus *bus;
349
350 /* Read in the resources behind the current device's links. */
351 for (bus = dev->link_list; bus; bus = bus->next) {
352 struct device *curdev;
353 STACK_RES *stack;
354
355 /* get IIO stack for this bus */
356 stack = find_stack_for_bus(stack_list, bus->secondary);
357 assert(stack != NULL);
358
359 /* Assign resources to bridge */
360 for (curdev = bus->children; curdev; curdev = curdev->sibling)
361 assign_bridge_resources(stack_list, curdev, bridge);
362
363 /* Pick non-bridged resources for resource allocation for each resource type */
364 ResType res_types[MAX_RES_TYPES] = {
365 RES_TYPE_IO,
366 RES_TYPE_NONPREF_MEM,
367 RES_TYPE_PREF_MEM
368 };
369
370 uint8_t no_res_types = MAX_RES_TYPES;
371
372 /* if it is a bridge, only process matching brigge resource type */
373 if (bridge) {
374 res_types[0] = get_res_type(bridge->flags);
375 no_res_types = 1;
376 }
377
378 printk(BIOS_DEBUG, "%s:%d no_res_types: %d\n", __func__, __LINE__,
379 no_res_types);
380
381 /* Process each resource type */
382 for (int rt = 0; rt < no_res_types; ++rt) {
383 struct stack_dev_resource *res_root = NULL;
384 printk(BIOS_DEBUG, "%s:%d rt: %d\n", __func__, __LINE__, rt);
385 for (curdev = bus->children; curdev; curdev = curdev->sibling) {
386 struct resource *res;
387 printk(BIOS_DEBUG, "%s:%d dev: %s\n",
388 __func__, __LINE__, dev_path(curdev));
389 if (!curdev->enabled)
390 continue;
391
392 for (res = curdev->resource_list; res; res = res->next) {
393 printk(BIOS_DEBUG, "%s:%d dev: %s, flags: 0x%lx\n",
394 __func__, __LINE__,
395 dev_path(curdev), res->flags);
396 if (res->size == 0 ||
397 get_res_type(res->flags) != res_types[rt] ||
398 (res->flags & IORESOURCE_BRIDGE) ||
399 !need_assignment(res->flags))
400 continue;
401 else
402 add_res_to_stack(&res_root, curdev, res);
403 }
404 }
405
406 /* Allocate resources and update bridge range */
407 if (res_root || (bridge && !(bridge->flags & IORESOURCE_ASSIGNED))) {
408 reserve_dev_resources(stack, res_types[rt], res_root, bridge);
409 reclaim_resource_mem(res_root);
410 }
411 }
412 }
413}
414
415static void xeonsp_pci_domain_read_resources(struct device *dev)
416{
417 struct bus *link;
418
419 DEV_FUNC_ENTER(dev);
420
421 pci_domain_read_resources(dev);
422
423 /*
424 * Walk through all devices in this domain and read resources.
425 * Since there is no callback when read resource operation is
426 * complete for all devices, domain read resource function initiates
427 * read resources for all devices and swaps read resource operation
428 * with dummy function to avoid warning.
429 */
430 for (link = dev->link_list; link; link = link->next)
431 xeonsp_pci_dev_iterator(link, xeonsp_pci_dev_read_resources, NULL, NULL);
432
433 for (link = dev->link_list; link; link = link->next)
434 xeonsp_pci_dev_iterator(link, xeonsp_reset_pci_op, NULL, NULL);
435
436 struct iiostack_resource stack_info = {0};
437 uint8_t pci64bit_alloc_flag = get_iiostack_info(&stack_info);
438 if (!pci64bit_alloc_flag) {
439 /*
440 * Split 32 bit address space between prefetchable and
441 * non-prefetchable windows
442 */
443 for (int s = 0; s < stack_info.no_of_stacks; ++s) {
444 STACK_RES *res = &stack_info.res[s];
445 uint64_t length = (res->PciResourceMem32Limit -
446 res->PciResourceMem32Base + 1)/2;
447 res->PciResourceMem64Limit = res->PciResourceMem32Limit;
448 res->PciResourceMem32Limit = (res->PciResourceMem32Base + length - 1);
449 res->PciResourceMem64Base = res->PciResourceMem32Limit + 1;
450 }
451 }
452
Jonathan Zhang7919d612020-04-02 17:27:54 -0700453 /* assign resources */
454 assign_stack_resources(&stack_info, dev, NULL);
455
456 DEV_FUNC_EXIT(dev);
457}
458
459static void reset_resource_to_unassigned(struct device *dev, struct resource *res, void *data)
460{
461 if ((res->flags & (IORESOURCE_IO | IORESOURCE_MEM)) &&
462 !(res->flags & (IORESOURCE_FIXED | IORESOURCE_RESERVE))) {
463 res->flags &= ~IORESOURCE_ASSIGNED;
464 }
465}
466
467static void xeonsp_cpx_pci_domain_set_resources(struct device *dev)
468{
469 DEV_FUNC_ENTER(dev);
470
471 print_resource_tree(dev, BIOS_SPEW, "Before xeonsp pci domain set resource");
472
473 /* reset bus 0 dev resource assignment - need to change them to FSP IIOStack window */
474 xeonsp_pci_dev_iterator(dev->link_list, NULL, reset_resource_to_unassigned, NULL);
475
476 /* update dev resources based on IIOStack IO/Mem32/Mem64 windows */
477 xeonsp_pci_domain_read_resources(dev);
478
479 struct bus *link = dev->link_list;
480 while (link != NULL) {
481 assign_resources(link);
482 link = link->next;
483 }
484
485 print_resource_tree(dev, BIOS_SPEW, "After xeonsp pci domain set resource");
486
487 DEV_FUNC_EXIT(dev);
488}
489
Andrey Petrov2e410752020-03-20 12:08:32 -0700490void platform_fsp_silicon_init_params_cb(FSPS_UPD *silupd)
491{
492 /* not implemented yet */
493}
494
Jonathan Zhang1ba42a92020-09-21 17:14:44 -0700495#if CONFIG(HAVE_ACPI_TABLES)
496static const char *soc_acpi_name(const struct device *dev)
497{
498 if (dev->path.type == DEVICE_PATH_DOMAIN)
499 return "PC00";
500 return NULL;
501}
502#endif
503
Andrey Petrov2e410752020-03-20 12:08:32 -0700504static struct device_operations pci_domain_ops = {
505 .read_resources = &pci_domain_read_resources,
Jonathan Zhang7919d612020-04-02 17:27:54 -0700506 .set_resources = &xeonsp_cpx_pci_domain_set_resources,
507 .scan_bus = &xeonsp_cpx_pci_domain_scan_bus,
Jonathan Zhang1ba42a92020-09-21 17:14:44 -0700508#if CONFIG(HAVE_ACPI_TABLES)
Jonathan Zhang3172f982020-05-28 17:53:48 -0700509 .write_acpi_tables = &northbridge_write_acpi_tables,
Jonathan Zhang1ba42a92020-09-21 17:14:44 -0700510 .acpi_name = soc_acpi_name
511#endif
Andrey Petrov2e410752020-03-20 12:08:32 -0700512};
513
Andrey Petrov2e410752020-03-20 12:08:32 -0700514static struct device_operations cpu_bus_ops = {
Nico Huber2f8ba692020-04-05 14:05:24 +0200515 .read_resources = noop_read_resources,
516 .set_resources = noop_set_resources,
Andrey Petrov8670e822020-03-30 12:25:06 -0700517 .init = cpx_init_cpus,
Jonathan Zhangc1105952020-06-03 15:55:28 -0700518 .acpi_fill_ssdt = generate_cpu_entries,
Andrey Petrov2e410752020-03-20 12:08:32 -0700519};
520
Jonathan Zhang7919d612020-04-02 17:27:54 -0700521/* Attach IIO stack bus numbers with dummy device to PCI DOMAIN 0000 device */
522static void attach_iio_stacks(struct device *dev)
Andrey Petrov2e410752020-03-20 12:08:32 -0700523{
Jonathan Zhang7919d612020-04-02 17:27:54 -0700524 struct bus *iiostack_bus;
525 struct iiostack_resource stack_info = {0};
526
527 DEV_FUNC_ENTER(dev);
528
529 get_iiostack_info(&stack_info);
530 for (int s = 0; s < stack_info.no_of_stacks; ++s) {
531 /* only non zero bus no. needs to be enumerated */
532 if (stack_info.res[s].BusBase == 0)
533 continue;
534
535 iiostack_bus = malloc(sizeof(struct bus));
536 if (iiostack_bus == NULL)
537 die("%s: out of memory.\n", __func__);
538 memset(iiostack_bus, 0, sizeof(*iiostack_bus));
539 memcpy(iiostack_bus, dev->bus, sizeof(*iiostack_bus));
540 iiostack_bus->secondary = stack_info.res[s].BusBase;
541 iiostack_bus->subordinate = stack_info.res[s].BusBase;
542 iiostack_bus->dev = NULL;
543 iiostack_bus->children = NULL;
544 iiostack_bus->next = NULL;
545 iiostack_bus->link_num = 1;
546
547 if (dev->link_list == NULL) {
548 dev->link_list = iiostack_bus;
549 } else {
550 struct bus *nlink = dev->link_list;
551 while (nlink->next != NULL)
552 nlink = nlink->next;
553 nlink->next = iiostack_bus;
554 }
Andrey Petrov2e410752020-03-20 12:08:32 -0700555 }
Jonathan Zhang7919d612020-04-02 17:27:54 -0700556
557 DEV_FUNC_EXIT(dev);
Andrey Petrov2e410752020-03-20 12:08:32 -0700558}
559
560static void pch_enable_ioapic(const struct device *dev)
561{
562 uint32_t reg32;
563
564 set_ioapic_id((void *)IO_APIC_ADDR, 2);
565
566 /* affirm full set of redirection table entries ("write once") */
567 reg32 = io_apic_read((void *)IO_APIC_ADDR, 1);
568
569 reg32 &= ~0x00ff0000;
570 reg32 |= (C620_IOAPIC_REDIR_ENTRIES - 1) << 16;
571
572 io_apic_write((void *)IO_APIC_ADDR, 1, reg32);
573
574 /*
575 * Select Boot Configuration register (0x03) and
576 * use Processor System Bus (0x01) to deliver interrupts.
577 */
578 io_apic_write((void *)IO_APIC_ADDR, 3, 1);
579}
580
581struct pci_operations soc_pci_ops = {
582 .set_subsystem = pci_dev_set_subsystem,
583};
584
Jonathan Zhang7919d612020-04-02 17:27:54 -0700585static void chip_enable_dev(struct device *dev)
586{
587 /* Set the operations if it is a special bus type */
588 if (dev->path.type == DEVICE_PATH_DOMAIN) {
589 dev->ops = &pci_domain_ops;
590 attach_iio_stacks(dev);
591 } else if (dev->path.type == DEVICE_PATH_CPU_CLUSTER) {
592 dev->ops = &cpu_bus_ops;
593 }
594}
595
Andrey Petrov2e410752020-03-20 12:08:32 -0700596static void chip_final(void *data)
597{
Andrey Petrov4e48ac02020-04-30 14:08:19 -0700598 p2sb_hide();
Jonathan Zhangbea19802020-04-13 19:34:53 -0700599
600 set_bios_init_completion();
Andrey Petrov2e410752020-03-20 12:08:32 -0700601}
602
603static void chip_init(void *data)
604{
605 printk(BIOS_DEBUG, "coreboot: calling fsp_silicon_init\n");
606 fsp_silicon_init(false);
607 pch_enable_ioapic(NULL);
608 setup_lapic();
Andrey Petrov4e48ac02020-04-30 14:08:19 -0700609 p2sb_unhide();
Andrey Petrov2e410752020-03-20 12:08:32 -0700610}
611
612struct chip_operations soc_intel_xeon_sp_cpx_ops = {
613 CHIP_NAME("Intel Cooperlake-SP")
614 .enable_dev = chip_enable_dev,
615 .init = chip_init,
Jonathan Zhang7919d612020-04-02 17:27:54 -0700616 .final = chip_final,
Andrey Petrov2e410752020-03-20 12:08:32 -0700617};