blob: 6d2dfba3c79b710d1a525cfc76484c08a3681334 [file] [log] [blame]
Andrey Petrov2e410752020-03-20 12:08:32 -07001/* SPDX-License-Identifier: GPL-2.0-only */
Andrey Petrov2e410752020-03-20 12:08:32 -07002
3#include <arch/ioapic.h>
Jonathan Zhang7919d612020-04-02 17:27:54 -07004#include <assert.h>
Andrey Petrov2e410752020-03-20 12:08:32 -07005#include <console/console.h>
Marc Jones8b522db2020-10-12 11:58:46 -06006#include <console/debug.h>
Andrey Petrov2e410752020-03-20 12:08:32 -07007#include <cpu/x86/lapic.h>
8#include <device/pci.h>
9#include <fsp/api.h>
Subrata Banik1366e442020-09-29 13:55:50 +053010#include <intelblocks/lpc_lib.h>
Andrey Petrov4e48ac02020-04-30 14:08:19 -070011#include <intelblocks/p2sb.h>
Jonathan Zhang7919d612020-04-02 17:27:54 -070012#include <post.h>
Jonathan Zhang3172f982020-05-28 17:53:48 -070013#include <soc/acpi.h>
Andrey Petrov8670e822020-03-30 12:25:06 -070014#include <soc/cpu.h>
Andrey Petrov2e410752020-03-20 12:08:32 -070015#include <soc/ramstage.h>
16#include <soc/pm.h>
Jonathan Zhang7919d612020-04-02 17:27:54 -070017#include <soc/soc_util.h>
18#include <stdlib.h>
Andrey Petrov2e410752020-03-20 12:08:32 -070019
20/* C620 IOAPIC has 120 redirection entries */
21#define C620_IOAPIC_REDIR_ENTRIES 120
22
Jonathan Zhang7919d612020-04-02 17:27:54 -070023struct pci_resource {
24 struct device *dev;
25 struct resource *res;
26 struct pci_resource *next;
27};
28
29struct stack_dev_resource {
30 uint8_t align;
31 struct pci_resource *children;
32 struct stack_dev_resource *next;
33};
34
35typedef enum {
36 RES_TYPE_IO = 0,
37 RES_TYPE_NONPREF_MEM,
38 RES_TYPE_PREF_MEM,
39 MAX_RES_TYPES
40} ResType;
41
42static ResType get_res_type(uint64_t flags)
43{
44 if (flags & IORESOURCE_IO)
45 return RES_TYPE_IO;
46 if (flags & IORESOURCE_MEM) {
47 if (flags & IORESOURCE_PREFETCH) {
48 printk(BIOS_DEBUG, "%s:%d flags: 0x%llx\n", __func__, __LINE__, flags);
49 return RES_TYPE_PREF_MEM;
50 }
51 /* both 64-bit and 32-bit use below 4GB address space */
52 return RES_TYPE_NONPREF_MEM;
53 }
54 printk(BIOS_ERR, "Invalid resource type 0x%llx\n", flags);
55 die("Invalida resource type");
56}
57
58static bool need_assignment(uint64_t flags)
59{
60 if (flags & (IORESOURCE_STORED | IORESOURCE_RESERVE | IORESOURCE_FIXED |
61 IORESOURCE_ASSIGNED))
62 return false;
63 else
64 return true;
65}
66
67static uint64_t get_resource_base(STACK_RES *stack, ResType res_type)
68{
69 if (res_type == RES_TYPE_IO) {
70 assert(stack->PciResourceIoBase <= stack->PciResourceIoLimit);
71 return stack->PciResourceIoBase;
72 }
73 if (res_type == RES_TYPE_NONPREF_MEM) {
74 assert(stack->PciResourceMem32Base <= stack->PciResourceMem32Limit);
75 return stack->PciResourceMem32Base;
76 }
77 assert(stack->PciResourceMem64Base <= stack->PciResourceMem64Limit);
78 return stack->PciResourceMem64Base;
79}
80
81static void set_resource_base(STACK_RES *stack, ResType res_type, uint64_t base)
82{
83 if (res_type == RES_TYPE_IO) {
84 assert(base <= (stack->PciResourceIoLimit + 1));
85 stack->PciResourceIoBase = base;
86 } else if (res_type == RES_TYPE_NONPREF_MEM) {
87 assert(base <= (stack->PciResourceMem32Limit + 1));
88 stack->PciResourceMem32Base = base;
89 } else {
90 assert(base <= (stack->PciResourceMem64Limit + 1));
91 stack->PciResourceMem64Base = base;
92 }
93}
94
95static void assign_stack_resources(struct iiostack_resource *stack_list,
96 struct device *dev, struct resource *bridge);
97
98static void xeonsp_cpx_pci_domain_scan_bus(struct device *dev)
99{
100 DEV_FUNC_ENTER(dev);
101 struct bus *link = dev->link_list;
102
103 printk(BIOS_SPEW, "%s:%s scanning buses under device %s\n",
104 __FILE__, __func__, dev_path(dev));
105 while (link != NULL) {
106 if (link->secondary == 0) { // scan only PSTACK buses
107 struct device *d;
108 for (d = link->children; d; d = d->sibling)
109 pci_probe_dev(d, link, d->path.pci.devfn);
110 scan_bridges(link);
111 } else {
112 pci_scan_bus(link, PCI_DEVFN(0, 0), 0xff);
113 }
114 link = link->next;
115 }
116 DEV_FUNC_EXIT(dev);
117}
118
119static void xeonsp_pci_dev_iterator(struct bus *bus,
120 void (*dev_iterator)(struct device *, void *),
121 void (*res_iterator)(struct device *, struct resource *, void *),
122 void *data)
123{
124 struct device *curdev;
125 struct resource *res;
126
127 /* Walk through all devices and find which resources they need. */
128 for (curdev = bus->children; curdev; curdev = curdev->sibling) {
129 struct bus *link;
130
131 if (!curdev->enabled)
132 continue;
133
134 if (!curdev->ops || !curdev->ops->read_resources) {
135 if (curdev->path.type != DEVICE_PATH_APIC)
136 printk(BIOS_ERR, "%s missing read_resources\n",
137 dev_path(curdev));
138 continue;
139 }
140
141 if (dev_iterator)
142 dev_iterator(curdev, data);
143
144 if (res_iterator) {
145 for (res = curdev->resource_list; res; res = res->next)
146 res_iterator(curdev, res, data);
147 }
148
149 /* Read in the resources behind the current device's links. */
150 for (link = curdev->link_list; link; link = link->next)
151 xeonsp_pci_dev_iterator(link, dev_iterator, res_iterator, data);
152 }
153}
154
155static void xeonsp_pci_dev_read_resources(struct device *dev, void *data)
156{
157 post_log_path(dev);
158 dev->ops->read_resources(dev);
159}
160
161static void xeonsp_pci_dev_dummy_func(struct device *dev)
162{
163}
164
165static void xeonsp_reset_pci_op(struct device *dev, void *data)
166{
167 if (dev->ops)
168 dev->ops->read_resources = xeonsp_pci_dev_dummy_func;
169}
170
171static STACK_RES *find_stack_for_bus(struct iiostack_resource *info, uint8_t bus)
172{
173 for (int i = 0; i < info->no_of_stacks; ++i) {
174 if (bus >= info->res[i].BusBase && bus <= info->res[i].BusLimit)
175 return &info->res[i];
176 }
177 return NULL;
178}
179
180static void add_res_to_stack(struct stack_dev_resource **root,
181 struct device *dev, struct resource *res)
182{
183 struct stack_dev_resource *cur = *root;
184 while (cur) {
185 if (cur->align == res->align || cur->next == NULL) /* equal or last record */
186 break;
187 else if (cur->align > res->align) {
188 if (cur->next->align < res->align) /* need to insert new record here */
189 break;
190 cur = cur->next;
191 } else {
192 break;
193 }
194 }
195
196 struct stack_dev_resource *nr;
197 if (!cur || cur->align != res->align) { /* need to add new record */
198 nr = malloc(sizeof(struct stack_dev_resource));
199 if (nr == 0)
200 die("assign_resource_to_stack(): out of memory.\n");
201 memset(nr, 0, sizeof(struct stack_dev_resource));
202 nr->align = res->align;
203 if (!cur) {
204 *root = nr; /* head node */
205 } else if (cur->align > nr->align) {
206 if (cur->next == NULL) {
207 cur->next = nr;
208 } else {
209 nr->next = cur->next;
210 cur->next = nr;
211 }
212 } else { /* insert in the beginning */
213 nr->next = cur;
214 *root = nr;
215 }
216 } else {
217 nr = cur;
218 }
219
220 assert(nr != NULL && nr->align == res->align);
221
222 struct pci_resource *npr = malloc(sizeof(struct pci_resource));
223 if (npr == NULL)
224 die("%s: out of memory.\n", __func__);
225 npr->res = res;
226 npr->dev = dev;
227 npr->next = NULL;
228
229 if (nr->children == NULL) {
230 nr->children = npr;
231 } else {
232 struct pci_resource *pr = nr->children;
233 while (pr->next != NULL)
234 pr = pr->next;
235 pr->next = npr;
236 }
237}
238
239static void reserve_dev_resources(STACK_RES *stack, ResType res_type,
240 struct stack_dev_resource *res_root, struct resource *bridge)
241{
242 uint8_t align;
243 uint64_t orig_base, base;
244
245 orig_base = get_resource_base(stack, res_type);
246
247 align = 0;
248 base = orig_base;
249 int first = 1;
250 while (res_root) { /* loop through all devices grouped by alignment requirements */
251 struct pci_resource *pr = res_root->children;
252 while (pr) {
253 if (first) {
254 if (bridge) { /* takes highest alignment */
255 if (bridge->align < pr->res->align)
256 bridge->align = pr->res->align;
257 orig_base = ALIGN_UP(orig_base, 1 << bridge->align);
258 } else {
259 orig_base = ALIGN_UP(orig_base, 1 << pr->res->align);
260 }
261 base = orig_base;
262
263 if (bridge)
264 bridge->base = base;
265 pr->res->base = base;
266 first = 0;
267 } else {
268 pr->res->base = ALIGN_UP(base, 1 << pr->res->align);
269 }
270 pr->res->limit = pr->res->base + pr->res->size - 1;
271 base = pr->res->limit + 1;
272 pr->res->flags |= (IORESOURCE_ASSIGNED);
273 pr = pr->next;
274 }
275 res_root = res_root->next;
276 }
277
278 if (bridge) {
279 /* this bridge doesn't have any resources, will set it to default window */
280 if (first) {
281 orig_base = ALIGN_UP(orig_base, 1 << bridge->align);
282 bridge->base = orig_base;
283 base = orig_base + (1ULL << bridge->gran);
284 }
285
286 bridge->size = ALIGN_UP(base, 1 << bridge->align) - bridge->base;
287
288 bridge->limit = bridge->base + bridge->size - 1;
289 bridge->flags |= (IORESOURCE_ASSIGNED);
290 base = bridge->limit + 1;
291 }
292
293 set_resource_base(stack, res_type, base);
294}
295
296static void reclaim_resource_mem(struct stack_dev_resource *res_root)
297{
298 while (res_root) { /* loop through all devices grouped by alignment requirements */
299 /* free pci_resource */
300 struct pci_resource *pr = res_root->children;
301 while (pr) {
302 struct pci_resource *dpr = pr;
303 pr = pr->next;
304 free(dpr);
305 }
306
307 /* free stack_dev_resource */
308 struct stack_dev_resource *ddr = res_root;
309 res_root = res_root->next;
310 free(ddr);
311 }
312}
313
314static void assign_bridge_resources(struct iiostack_resource *stack_list,
315 struct device *dev, struct resource *bridge)
316{
317 struct resource *res;
318 if (!dev->enabled)
319 return;
320
321 for (res = dev->resource_list; res; res = res->next) {
322 if (!(res->flags & IORESOURCE_BRIDGE) ||
323 (bridge && (get_res_type(bridge->flags) != get_res_type(res->flags))))
324 continue;
325
326 assign_stack_resources(stack_list, dev, res);
327
328 if (!bridge)
329 continue;
330
331 /* for 1st time update, overlading IORESOURCE_ASSIGNED */
332 if (!(bridge->flags & IORESOURCE_ASSIGNED)) {
333 bridge->base = res->base;
334 bridge->limit = res->limit;
335 bridge->flags |= (IORESOURCE_ASSIGNED);
336 } else {
337 /* update bridge range from child bridge range */
338 if (res->base < bridge->base)
339 bridge->base = res->base;
340 if (res->limit > bridge->limit)
341 bridge->limit = res->limit;
342 }
343 bridge->size = (bridge->limit - bridge->base + 1);
344 }
345}
346
347static void assign_stack_resources(struct iiostack_resource *stack_list,
348 struct device *dev, struct resource *bridge)
349{
350 struct bus *bus;
351
352 /* Read in the resources behind the current device's links. */
353 for (bus = dev->link_list; bus; bus = bus->next) {
354 struct device *curdev;
355 STACK_RES *stack;
356
357 /* get IIO stack for this bus */
358 stack = find_stack_for_bus(stack_list, bus->secondary);
359 assert(stack != NULL);
360
361 /* Assign resources to bridge */
362 for (curdev = bus->children; curdev; curdev = curdev->sibling)
363 assign_bridge_resources(stack_list, curdev, bridge);
364
365 /* Pick non-bridged resources for resource allocation for each resource type */
366 ResType res_types[MAX_RES_TYPES] = {
367 RES_TYPE_IO,
368 RES_TYPE_NONPREF_MEM,
369 RES_TYPE_PREF_MEM
370 };
371
372 uint8_t no_res_types = MAX_RES_TYPES;
373
374 /* if it is a bridge, only process matching brigge resource type */
375 if (bridge) {
376 res_types[0] = get_res_type(bridge->flags);
377 no_res_types = 1;
378 }
379
380 printk(BIOS_DEBUG, "%s:%d no_res_types: %d\n", __func__, __LINE__,
381 no_res_types);
382
383 /* Process each resource type */
384 for (int rt = 0; rt < no_res_types; ++rt) {
385 struct stack_dev_resource *res_root = NULL;
386 printk(BIOS_DEBUG, "%s:%d rt: %d\n", __func__, __LINE__, rt);
387 for (curdev = bus->children; curdev; curdev = curdev->sibling) {
388 struct resource *res;
389 printk(BIOS_DEBUG, "%s:%d dev: %s\n",
390 __func__, __LINE__, dev_path(curdev));
391 if (!curdev->enabled)
392 continue;
393
394 for (res = curdev->resource_list; res; res = res->next) {
395 printk(BIOS_DEBUG, "%s:%d dev: %s, flags: 0x%lx\n",
396 __func__, __LINE__,
397 dev_path(curdev), res->flags);
398 if (res->size == 0 ||
399 get_res_type(res->flags) != res_types[rt] ||
400 (res->flags & IORESOURCE_BRIDGE) ||
401 !need_assignment(res->flags))
402 continue;
403 else
404 add_res_to_stack(&res_root, curdev, res);
405 }
406 }
407
408 /* Allocate resources and update bridge range */
409 if (res_root || (bridge && !(bridge->flags & IORESOURCE_ASSIGNED))) {
410 reserve_dev_resources(stack, res_types[rt], res_root, bridge);
411 reclaim_resource_mem(res_root);
412 }
413 }
414 }
415}
416
417static void xeonsp_pci_domain_read_resources(struct device *dev)
418{
419 struct bus *link;
420
421 DEV_FUNC_ENTER(dev);
422
423 pci_domain_read_resources(dev);
424
425 /*
426 * Walk through all devices in this domain and read resources.
427 * Since there is no callback when read resource operation is
428 * complete for all devices, domain read resource function initiates
429 * read resources for all devices and swaps read resource operation
430 * with dummy function to avoid warning.
431 */
432 for (link = dev->link_list; link; link = link->next)
433 xeonsp_pci_dev_iterator(link, xeonsp_pci_dev_read_resources, NULL, NULL);
434
435 for (link = dev->link_list; link; link = link->next)
436 xeonsp_pci_dev_iterator(link, xeonsp_reset_pci_op, NULL, NULL);
437
438 struct iiostack_resource stack_info = {0};
439 uint8_t pci64bit_alloc_flag = get_iiostack_info(&stack_info);
440 if (!pci64bit_alloc_flag) {
441 /*
442 * Split 32 bit address space between prefetchable and
443 * non-prefetchable windows
444 */
445 for (int s = 0; s < stack_info.no_of_stacks; ++s) {
446 STACK_RES *res = &stack_info.res[s];
447 uint64_t length = (res->PciResourceMem32Limit -
448 res->PciResourceMem32Base + 1)/2;
449 res->PciResourceMem64Limit = res->PciResourceMem32Limit;
450 res->PciResourceMem32Limit = (res->PciResourceMem32Base + length - 1);
451 res->PciResourceMem64Base = res->PciResourceMem32Limit + 1;
452 }
453 }
454
Jonathan Zhang7919d612020-04-02 17:27:54 -0700455 /* assign resources */
456 assign_stack_resources(&stack_info, dev, NULL);
457
458 DEV_FUNC_EXIT(dev);
459}
460
461static void reset_resource_to_unassigned(struct device *dev, struct resource *res, void *data)
462{
463 if ((res->flags & (IORESOURCE_IO | IORESOURCE_MEM)) &&
464 !(res->flags & (IORESOURCE_FIXED | IORESOURCE_RESERVE))) {
465 res->flags &= ~IORESOURCE_ASSIGNED;
466 }
467}
468
469static void xeonsp_cpx_pci_domain_set_resources(struct device *dev)
470{
471 DEV_FUNC_ENTER(dev);
472
473 print_resource_tree(dev, BIOS_SPEW, "Before xeonsp pci domain set resource");
474
475 /* reset bus 0 dev resource assignment - need to change them to FSP IIOStack window */
476 xeonsp_pci_dev_iterator(dev->link_list, NULL, reset_resource_to_unassigned, NULL);
477
478 /* update dev resources based on IIOStack IO/Mem32/Mem64 windows */
479 xeonsp_pci_domain_read_resources(dev);
480
481 struct bus *link = dev->link_list;
482 while (link != NULL) {
483 assign_resources(link);
484 link = link->next;
485 }
486
487 print_resource_tree(dev, BIOS_SPEW, "After xeonsp pci domain set resource");
488
489 DEV_FUNC_EXIT(dev);
490}
491
Marc Jonesb9365ef2020-10-11 15:00:36 -0600492/* UPD parameters to be initialized before SiliconInit */
Andrey Petrov2e410752020-03-20 12:08:32 -0700493void platform_fsp_silicon_init_params_cb(FSPS_UPD *silupd)
494{
Marc Jonesb9365ef2020-10-11 15:00:36 -0600495
496 mainboard_silicon_init_params(silupd);
Andrey Petrov2e410752020-03-20 12:08:32 -0700497}
498
Jonathan Zhang1ba42a92020-09-21 17:14:44 -0700499#if CONFIG(HAVE_ACPI_TABLES)
500static const char *soc_acpi_name(const struct device *dev)
501{
502 if (dev->path.type == DEVICE_PATH_DOMAIN)
503 return "PC00";
504 return NULL;
505}
506#endif
507
Andrey Petrov2e410752020-03-20 12:08:32 -0700508static struct device_operations pci_domain_ops = {
509 .read_resources = &pci_domain_read_resources,
Jonathan Zhang7919d612020-04-02 17:27:54 -0700510 .set_resources = &xeonsp_cpx_pci_domain_set_resources,
511 .scan_bus = &xeonsp_cpx_pci_domain_scan_bus,
Jonathan Zhang1ba42a92020-09-21 17:14:44 -0700512#if CONFIG(HAVE_ACPI_TABLES)
Jonathan Zhang3172f982020-05-28 17:53:48 -0700513 .write_acpi_tables = &northbridge_write_acpi_tables,
Jonathan Zhang1ba42a92020-09-21 17:14:44 -0700514 .acpi_name = soc_acpi_name
515#endif
Andrey Petrov2e410752020-03-20 12:08:32 -0700516};
517
Andrey Petrov2e410752020-03-20 12:08:32 -0700518static struct device_operations cpu_bus_ops = {
Nico Huber2f8ba692020-04-05 14:05:24 +0200519 .read_resources = noop_read_resources,
520 .set_resources = noop_set_resources,
Andrey Petrov8670e822020-03-30 12:25:06 -0700521 .init = cpx_init_cpus,
Jonathan Zhangc1105952020-06-03 15:55:28 -0700522 .acpi_fill_ssdt = generate_cpu_entries,
Andrey Petrov2e410752020-03-20 12:08:32 -0700523};
524
Jonathan Zhang7919d612020-04-02 17:27:54 -0700525/* Attach IIO stack bus numbers with dummy device to PCI DOMAIN 0000 device */
526static void attach_iio_stacks(struct device *dev)
Andrey Petrov2e410752020-03-20 12:08:32 -0700527{
Jonathan Zhang7919d612020-04-02 17:27:54 -0700528 struct bus *iiostack_bus;
529 struct iiostack_resource stack_info = {0};
530
531 DEV_FUNC_ENTER(dev);
532
533 get_iiostack_info(&stack_info);
534 for (int s = 0; s < stack_info.no_of_stacks; ++s) {
535 /* only non zero bus no. needs to be enumerated */
536 if (stack_info.res[s].BusBase == 0)
537 continue;
538
539 iiostack_bus = malloc(sizeof(struct bus));
540 if (iiostack_bus == NULL)
541 die("%s: out of memory.\n", __func__);
542 memset(iiostack_bus, 0, sizeof(*iiostack_bus));
543 memcpy(iiostack_bus, dev->bus, sizeof(*iiostack_bus));
544 iiostack_bus->secondary = stack_info.res[s].BusBase;
545 iiostack_bus->subordinate = stack_info.res[s].BusBase;
546 iiostack_bus->dev = NULL;
547 iiostack_bus->children = NULL;
548 iiostack_bus->next = NULL;
549 iiostack_bus->link_num = 1;
550
551 if (dev->link_list == NULL) {
552 dev->link_list = iiostack_bus;
553 } else {
554 struct bus *nlink = dev->link_list;
555 while (nlink->next != NULL)
556 nlink = nlink->next;
557 nlink->next = iiostack_bus;
558 }
Andrey Petrov2e410752020-03-20 12:08:32 -0700559 }
Jonathan Zhang7919d612020-04-02 17:27:54 -0700560
561 DEV_FUNC_EXIT(dev);
Andrey Petrov2e410752020-03-20 12:08:32 -0700562}
563
Andrey Petrov2e410752020-03-20 12:08:32 -0700564struct pci_operations soc_pci_ops = {
565 .set_subsystem = pci_dev_set_subsystem,
566};
567
Jonathan Zhang7919d612020-04-02 17:27:54 -0700568static void chip_enable_dev(struct device *dev)
569{
570 /* Set the operations if it is a special bus type */
571 if (dev->path.type == DEVICE_PATH_DOMAIN) {
572 dev->ops = &pci_domain_ops;
573 attach_iio_stacks(dev);
574 } else if (dev->path.type == DEVICE_PATH_CPU_CLUSTER) {
575 dev->ops = &cpu_bus_ops;
576 }
577}
578
Andrey Petrov2e410752020-03-20 12:08:32 -0700579static void chip_final(void *data)
580{
Andrey Petrov4e48ac02020-04-30 14:08:19 -0700581 p2sb_hide();
Jonathan Zhangbea19802020-04-13 19:34:53 -0700582
583 set_bios_init_completion();
Andrey Petrov2e410752020-03-20 12:08:32 -0700584}
585
586static void chip_init(void *data)
587{
588 printk(BIOS_DEBUG, "coreboot: calling fsp_silicon_init\n");
589 fsp_silicon_init(false);
Subrata Banik1366e442020-09-29 13:55:50 +0530590 pch_enable_ioapic();
Andrey Petrov2e410752020-03-20 12:08:32 -0700591 setup_lapic();
Andrey Petrov4e48ac02020-04-30 14:08:19 -0700592 p2sb_unhide();
Andrey Petrov2e410752020-03-20 12:08:32 -0700593}
594
595struct chip_operations soc_intel_xeon_sp_cpx_ops = {
596 CHIP_NAME("Intel Cooperlake-SP")
597 .enable_dev = chip_enable_dev,
598 .init = chip_init,
Jonathan Zhang7919d612020-04-02 17:27:54 -0700599 .final = chip_final,
Andrey Petrov2e410752020-03-20 12:08:32 -0700600};