blob: 19ebe4786d5cccecc8e67cb30f2194648873027a [file] [log] [blame]
Andrey Petrov2e410752020-03-20 12:08:32 -07001/* SPDX-License-Identifier: GPL-2.0-only */
Andrey Petrov2e410752020-03-20 12:08:32 -07002
3#include <arch/ioapic.h>
Jonathan Zhang7919d612020-04-02 17:27:54 -07004#include <assert.h>
Andrey Petrov2e410752020-03-20 12:08:32 -07005#include <console/console.h>
6#include <cpu/x86/lapic.h>
7#include <device/pci.h>
8#include <fsp/api.h>
Andrey Petrov4e48ac02020-04-30 14:08:19 -07009#include <intelblocks/p2sb.h>
Jonathan Zhang7919d612020-04-02 17:27:54 -070010#include <post.h>
Andrey Petrov8670e822020-03-30 12:25:06 -070011#include <soc/cpu.h>
Andrey Petrov2e410752020-03-20 12:08:32 -070012#include <soc/ramstage.h>
13#include <soc/pm.h>
Jonathan Zhang7919d612020-04-02 17:27:54 -070014#include <soc/soc_util.h>
15#include <stdlib.h>
Andrey Petrov2e410752020-03-20 12:08:32 -070016
17/* C620 IOAPIC has 120 redirection entries */
18#define C620_IOAPIC_REDIR_ENTRIES 120
19
Jonathan Zhang7919d612020-04-02 17:27:54 -070020struct pci_resource {
21 struct device *dev;
22 struct resource *res;
23 struct pci_resource *next;
24};
25
26struct stack_dev_resource {
27 uint8_t align;
28 struct pci_resource *children;
29 struct stack_dev_resource *next;
30};
31
32typedef enum {
33 RES_TYPE_IO = 0,
34 RES_TYPE_NONPREF_MEM,
35 RES_TYPE_PREF_MEM,
36 MAX_RES_TYPES
37} ResType;
38
39static ResType get_res_type(uint64_t flags)
40{
41 if (flags & IORESOURCE_IO)
42 return RES_TYPE_IO;
43 if (flags & IORESOURCE_MEM) {
44 if (flags & IORESOURCE_PREFETCH) {
45 printk(BIOS_DEBUG, "%s:%d flags: 0x%llx\n", __func__, __LINE__, flags);
46 return RES_TYPE_PREF_MEM;
47 }
48 /* both 64-bit and 32-bit use below 4GB address space */
49 return RES_TYPE_NONPREF_MEM;
50 }
51 printk(BIOS_ERR, "Invalid resource type 0x%llx\n", flags);
52 die("Invalida resource type");
53}
54
55static bool need_assignment(uint64_t flags)
56{
57 if (flags & (IORESOURCE_STORED | IORESOURCE_RESERVE | IORESOURCE_FIXED |
58 IORESOURCE_ASSIGNED))
59 return false;
60 else
61 return true;
62}
63
64static uint64_t get_resource_base(STACK_RES *stack, ResType res_type)
65{
66 if (res_type == RES_TYPE_IO) {
67 assert(stack->PciResourceIoBase <= stack->PciResourceIoLimit);
68 return stack->PciResourceIoBase;
69 }
70 if (res_type == RES_TYPE_NONPREF_MEM) {
71 assert(stack->PciResourceMem32Base <= stack->PciResourceMem32Limit);
72 return stack->PciResourceMem32Base;
73 }
74 assert(stack->PciResourceMem64Base <= stack->PciResourceMem64Limit);
75 return stack->PciResourceMem64Base;
76}
77
78static void set_resource_base(STACK_RES *stack, ResType res_type, uint64_t base)
79{
80 if (res_type == RES_TYPE_IO) {
81 assert(base <= (stack->PciResourceIoLimit + 1));
82 stack->PciResourceIoBase = base;
83 } else if (res_type == RES_TYPE_NONPREF_MEM) {
84 assert(base <= (stack->PciResourceMem32Limit + 1));
85 stack->PciResourceMem32Base = base;
86 } else {
87 assert(base <= (stack->PciResourceMem64Limit + 1));
88 stack->PciResourceMem64Base = base;
89 }
90}
91
92static void assign_stack_resources(struct iiostack_resource *stack_list,
93 struct device *dev, struct resource *bridge);
94
95static void xeonsp_cpx_pci_domain_scan_bus(struct device *dev)
96{
97 DEV_FUNC_ENTER(dev);
98 struct bus *link = dev->link_list;
99
100 printk(BIOS_SPEW, "%s:%s scanning buses under device %s\n",
101 __FILE__, __func__, dev_path(dev));
102 while (link != NULL) {
103 if (link->secondary == 0) { // scan only PSTACK buses
104 struct device *d;
105 for (d = link->children; d; d = d->sibling)
106 pci_probe_dev(d, link, d->path.pci.devfn);
107 scan_bridges(link);
108 } else {
109 pci_scan_bus(link, PCI_DEVFN(0, 0), 0xff);
110 }
111 link = link->next;
112 }
113 DEV_FUNC_EXIT(dev);
114}
115
116static void xeonsp_pci_dev_iterator(struct bus *bus,
117 void (*dev_iterator)(struct device *, void *),
118 void (*res_iterator)(struct device *, struct resource *, void *),
119 void *data)
120{
121 struct device *curdev;
122 struct resource *res;
123
124 /* Walk through all devices and find which resources they need. */
125 for (curdev = bus->children; curdev; curdev = curdev->sibling) {
126 struct bus *link;
127
128 if (!curdev->enabled)
129 continue;
130
131 if (!curdev->ops || !curdev->ops->read_resources) {
132 if (curdev->path.type != DEVICE_PATH_APIC)
133 printk(BIOS_ERR, "%s missing read_resources\n",
134 dev_path(curdev));
135 continue;
136 }
137
138 if (dev_iterator)
139 dev_iterator(curdev, data);
140
141 if (res_iterator) {
142 for (res = curdev->resource_list; res; res = res->next)
143 res_iterator(curdev, res, data);
144 }
145
146 /* Read in the resources behind the current device's links. */
147 for (link = curdev->link_list; link; link = link->next)
148 xeonsp_pci_dev_iterator(link, dev_iterator, res_iterator, data);
149 }
150}
151
152static void xeonsp_pci_dev_read_resources(struct device *dev, void *data)
153{
154 post_log_path(dev);
155 dev->ops->read_resources(dev);
156}
157
158static void xeonsp_pci_dev_dummy_func(struct device *dev)
159{
160}
161
162static void xeonsp_reset_pci_op(struct device *dev, void *data)
163{
164 if (dev->ops)
165 dev->ops->read_resources = xeonsp_pci_dev_dummy_func;
166}
167
168static STACK_RES *find_stack_for_bus(struct iiostack_resource *info, uint8_t bus)
169{
170 for (int i = 0; i < info->no_of_stacks; ++i) {
171 if (bus >= info->res[i].BusBase && bus <= info->res[i].BusLimit)
172 return &info->res[i];
173 }
174 return NULL;
175}
176
177static void add_res_to_stack(struct stack_dev_resource **root,
178 struct device *dev, struct resource *res)
179{
180 struct stack_dev_resource *cur = *root;
181 while (cur) {
182 if (cur->align == res->align || cur->next == NULL) /* equal or last record */
183 break;
184 else if (cur->align > res->align) {
185 if (cur->next->align < res->align) /* need to insert new record here */
186 break;
187 cur = cur->next;
188 } else {
189 break;
190 }
191 }
192
193 struct stack_dev_resource *nr;
194 if (!cur || cur->align != res->align) { /* need to add new record */
195 nr = malloc(sizeof(struct stack_dev_resource));
196 if (nr == 0)
197 die("assign_resource_to_stack(): out of memory.\n");
198 memset(nr, 0, sizeof(struct stack_dev_resource));
199 nr->align = res->align;
200 if (!cur) {
201 *root = nr; /* head node */
202 } else if (cur->align > nr->align) {
203 if (cur->next == NULL) {
204 cur->next = nr;
205 } else {
206 nr->next = cur->next;
207 cur->next = nr;
208 }
209 } else { /* insert in the beginning */
210 nr->next = cur;
211 *root = nr;
212 }
213 } else {
214 nr = cur;
215 }
216
217 assert(nr != NULL && nr->align == res->align);
218
219 struct pci_resource *npr = malloc(sizeof(struct pci_resource));
220 if (npr == NULL)
221 die("%s: out of memory.\n", __func__);
222 npr->res = res;
223 npr->dev = dev;
224 npr->next = NULL;
225
226 if (nr->children == NULL) {
227 nr->children = npr;
228 } else {
229 struct pci_resource *pr = nr->children;
230 while (pr->next != NULL)
231 pr = pr->next;
232 pr->next = npr;
233 }
234}
235
236static void reserve_dev_resources(STACK_RES *stack, ResType res_type,
237 struct stack_dev_resource *res_root, struct resource *bridge)
238{
239 uint8_t align;
240 uint64_t orig_base, base;
241
242 orig_base = get_resource_base(stack, res_type);
243
244 align = 0;
245 base = orig_base;
246 int first = 1;
247 while (res_root) { /* loop through all devices grouped by alignment requirements */
248 struct pci_resource *pr = res_root->children;
249 while (pr) {
250 if (first) {
251 if (bridge) { /* takes highest alignment */
252 if (bridge->align < pr->res->align)
253 bridge->align = pr->res->align;
254 orig_base = ALIGN_UP(orig_base, 1 << bridge->align);
255 } else {
256 orig_base = ALIGN_UP(orig_base, 1 << pr->res->align);
257 }
258 base = orig_base;
259
260 if (bridge)
261 bridge->base = base;
262 pr->res->base = base;
263 first = 0;
264 } else {
265 pr->res->base = ALIGN_UP(base, 1 << pr->res->align);
266 }
267 pr->res->limit = pr->res->base + pr->res->size - 1;
268 base = pr->res->limit + 1;
269 pr->res->flags |= (IORESOURCE_ASSIGNED);
270 pr = pr->next;
271 }
272 res_root = res_root->next;
273 }
274
275 if (bridge) {
276 /* this bridge doesn't have any resources, will set it to default window */
277 if (first) {
278 orig_base = ALIGN_UP(orig_base, 1 << bridge->align);
279 bridge->base = orig_base;
280 base = orig_base + (1ULL << bridge->gran);
281 }
282
283 bridge->size = ALIGN_UP(base, 1 << bridge->align) - bridge->base;
284
285 bridge->limit = bridge->base + bridge->size - 1;
286 bridge->flags |= (IORESOURCE_ASSIGNED);
287 base = bridge->limit + 1;
288 }
289
290 set_resource_base(stack, res_type, base);
291}
292
293static void reclaim_resource_mem(struct stack_dev_resource *res_root)
294{
295 while (res_root) { /* loop through all devices grouped by alignment requirements */
296 /* free pci_resource */
297 struct pci_resource *pr = res_root->children;
298 while (pr) {
299 struct pci_resource *dpr = pr;
300 pr = pr->next;
301 free(dpr);
302 }
303
304 /* free stack_dev_resource */
305 struct stack_dev_resource *ddr = res_root;
306 res_root = res_root->next;
307 free(ddr);
308 }
309}
310
311static void assign_bridge_resources(struct iiostack_resource *stack_list,
312 struct device *dev, struct resource *bridge)
313{
314 struct resource *res;
315 if (!dev->enabled)
316 return;
317
318 for (res = dev->resource_list; res; res = res->next) {
319 if (!(res->flags & IORESOURCE_BRIDGE) ||
320 (bridge && (get_res_type(bridge->flags) != get_res_type(res->flags))))
321 continue;
322
323 assign_stack_resources(stack_list, dev, res);
324
325 if (!bridge)
326 continue;
327
328 /* for 1st time update, overlading IORESOURCE_ASSIGNED */
329 if (!(bridge->flags & IORESOURCE_ASSIGNED)) {
330 bridge->base = res->base;
331 bridge->limit = res->limit;
332 bridge->flags |= (IORESOURCE_ASSIGNED);
333 } else {
334 /* update bridge range from child bridge range */
335 if (res->base < bridge->base)
336 bridge->base = res->base;
337 if (res->limit > bridge->limit)
338 bridge->limit = res->limit;
339 }
340 bridge->size = (bridge->limit - bridge->base + 1);
341 }
342}
343
344static void assign_stack_resources(struct iiostack_resource *stack_list,
345 struct device *dev, struct resource *bridge)
346{
347 struct bus *bus;
348
349 /* Read in the resources behind the current device's links. */
350 for (bus = dev->link_list; bus; bus = bus->next) {
351 struct device *curdev;
352 STACK_RES *stack;
353
354 /* get IIO stack for this bus */
355 stack = find_stack_for_bus(stack_list, bus->secondary);
356 assert(stack != NULL);
357
358 /* Assign resources to bridge */
359 for (curdev = bus->children; curdev; curdev = curdev->sibling)
360 assign_bridge_resources(stack_list, curdev, bridge);
361
362 /* Pick non-bridged resources for resource allocation for each resource type */
363 ResType res_types[MAX_RES_TYPES] = {
364 RES_TYPE_IO,
365 RES_TYPE_NONPREF_MEM,
366 RES_TYPE_PREF_MEM
367 };
368
369 uint8_t no_res_types = MAX_RES_TYPES;
370
371 /* if it is a bridge, only process matching brigge resource type */
372 if (bridge) {
373 res_types[0] = get_res_type(bridge->flags);
374 no_res_types = 1;
375 }
376
377 printk(BIOS_DEBUG, "%s:%d no_res_types: %d\n", __func__, __LINE__,
378 no_res_types);
379
380 /* Process each resource type */
381 for (int rt = 0; rt < no_res_types; ++rt) {
382 struct stack_dev_resource *res_root = NULL;
383 printk(BIOS_DEBUG, "%s:%d rt: %d\n", __func__, __LINE__, rt);
384 for (curdev = bus->children; curdev; curdev = curdev->sibling) {
385 struct resource *res;
386 printk(BIOS_DEBUG, "%s:%d dev: %s\n",
387 __func__, __LINE__, dev_path(curdev));
388 if (!curdev->enabled)
389 continue;
390
391 for (res = curdev->resource_list; res; res = res->next) {
392 printk(BIOS_DEBUG, "%s:%d dev: %s, flags: 0x%lx\n",
393 __func__, __LINE__,
394 dev_path(curdev), res->flags);
395 if (res->size == 0 ||
396 get_res_type(res->flags) != res_types[rt] ||
397 (res->flags & IORESOURCE_BRIDGE) ||
398 !need_assignment(res->flags))
399 continue;
400 else
401 add_res_to_stack(&res_root, curdev, res);
402 }
403 }
404
405 /* Allocate resources and update bridge range */
406 if (res_root || (bridge && !(bridge->flags & IORESOURCE_ASSIGNED))) {
407 reserve_dev_resources(stack, res_types[rt], res_root, bridge);
408 reclaim_resource_mem(res_root);
409 }
410 }
411 }
412}
413
414static void xeonsp_pci_domain_read_resources(struct device *dev)
415{
416 struct bus *link;
417
418 DEV_FUNC_ENTER(dev);
419
420 pci_domain_read_resources(dev);
421
422 /*
423 * Walk through all devices in this domain and read resources.
424 * Since there is no callback when read resource operation is
425 * complete for all devices, domain read resource function initiates
426 * read resources for all devices and swaps read resource operation
427 * with dummy function to avoid warning.
428 */
429 for (link = dev->link_list; link; link = link->next)
430 xeonsp_pci_dev_iterator(link, xeonsp_pci_dev_read_resources, NULL, NULL);
431
432 for (link = dev->link_list; link; link = link->next)
433 xeonsp_pci_dev_iterator(link, xeonsp_reset_pci_op, NULL, NULL);
434
435 struct iiostack_resource stack_info = {0};
436 uint8_t pci64bit_alloc_flag = get_iiostack_info(&stack_info);
437 if (!pci64bit_alloc_flag) {
438 /*
439 * Split 32 bit address space between prefetchable and
440 * non-prefetchable windows
441 */
442 for (int s = 0; s < stack_info.no_of_stacks; ++s) {
443 STACK_RES *res = &stack_info.res[s];
444 uint64_t length = (res->PciResourceMem32Limit -
445 res->PciResourceMem32Base + 1)/2;
446 res->PciResourceMem64Limit = res->PciResourceMem32Limit;
447 res->PciResourceMem32Limit = (res->PciResourceMem32Base + length - 1);
448 res->PciResourceMem64Base = res->PciResourceMem32Limit + 1;
449 }
450 }
451
452
453 /* assign resources */
454 assign_stack_resources(&stack_info, dev, NULL);
455
456 DEV_FUNC_EXIT(dev);
457}
458
459static void reset_resource_to_unassigned(struct device *dev, struct resource *res, void *data)
460{
461 if ((res->flags & (IORESOURCE_IO | IORESOURCE_MEM)) &&
462 !(res->flags & (IORESOURCE_FIXED | IORESOURCE_RESERVE))) {
463 res->flags &= ~IORESOURCE_ASSIGNED;
464 }
465}
466
467static void xeonsp_cpx_pci_domain_set_resources(struct device *dev)
468{
469 DEV_FUNC_ENTER(dev);
470
471 print_resource_tree(dev, BIOS_SPEW, "Before xeonsp pci domain set resource");
472
473 /* reset bus 0 dev resource assignment - need to change them to FSP IIOStack window */
474 xeonsp_pci_dev_iterator(dev->link_list, NULL, reset_resource_to_unassigned, NULL);
475
476 /* update dev resources based on IIOStack IO/Mem32/Mem64 windows */
477 xeonsp_pci_domain_read_resources(dev);
478
479 struct bus *link = dev->link_list;
480 while (link != NULL) {
481 assign_resources(link);
482 link = link->next;
483 }
484
485 print_resource_tree(dev, BIOS_SPEW, "After xeonsp pci domain set resource");
486
487 DEV_FUNC_EXIT(dev);
488}
489
Andrey Petrov2e410752020-03-20 12:08:32 -0700490void platform_fsp_silicon_init_params_cb(FSPS_UPD *silupd)
491{
492 /* not implemented yet */
493}
494
495static struct device_operations pci_domain_ops = {
496 .read_resources = &pci_domain_read_resources,
Jonathan Zhang7919d612020-04-02 17:27:54 -0700497 .set_resources = &xeonsp_cpx_pci_domain_set_resources,
498 .scan_bus = &xeonsp_cpx_pci_domain_scan_bus,
Andrey Petrov2e410752020-03-20 12:08:32 -0700499};
500
Andrey Petrov2e410752020-03-20 12:08:32 -0700501static struct device_operations cpu_bus_ops = {
Nico Huber2f8ba692020-04-05 14:05:24 +0200502 .read_resources = noop_read_resources,
503 .set_resources = noop_set_resources,
Andrey Petrov8670e822020-03-30 12:25:06 -0700504 .init = cpx_init_cpus,
Andrey Petrov2e410752020-03-20 12:08:32 -0700505};
506
Jonathan Zhang7919d612020-04-02 17:27:54 -0700507/* Attach IIO stack bus numbers with dummy device to PCI DOMAIN 0000 device */
508static void attach_iio_stacks(struct device *dev)
Andrey Petrov2e410752020-03-20 12:08:32 -0700509{
Jonathan Zhang7919d612020-04-02 17:27:54 -0700510 struct bus *iiostack_bus;
511 struct iiostack_resource stack_info = {0};
512
513 DEV_FUNC_ENTER(dev);
514
515 get_iiostack_info(&stack_info);
516 for (int s = 0; s < stack_info.no_of_stacks; ++s) {
517 /* only non zero bus no. needs to be enumerated */
518 if (stack_info.res[s].BusBase == 0)
519 continue;
520
521 iiostack_bus = malloc(sizeof(struct bus));
522 if (iiostack_bus == NULL)
523 die("%s: out of memory.\n", __func__);
524 memset(iiostack_bus, 0, sizeof(*iiostack_bus));
525 memcpy(iiostack_bus, dev->bus, sizeof(*iiostack_bus));
526 iiostack_bus->secondary = stack_info.res[s].BusBase;
527 iiostack_bus->subordinate = stack_info.res[s].BusBase;
528 iiostack_bus->dev = NULL;
529 iiostack_bus->children = NULL;
530 iiostack_bus->next = NULL;
531 iiostack_bus->link_num = 1;
532
533 if (dev->link_list == NULL) {
534 dev->link_list = iiostack_bus;
535 } else {
536 struct bus *nlink = dev->link_list;
537 while (nlink->next != NULL)
538 nlink = nlink->next;
539 nlink->next = iiostack_bus;
540 }
Andrey Petrov2e410752020-03-20 12:08:32 -0700541 }
Jonathan Zhang7919d612020-04-02 17:27:54 -0700542
543 DEV_FUNC_EXIT(dev);
Andrey Petrov2e410752020-03-20 12:08:32 -0700544}
545
546static void pch_enable_ioapic(const struct device *dev)
547{
548 uint32_t reg32;
549
550 set_ioapic_id((void *)IO_APIC_ADDR, 2);
551
552 /* affirm full set of redirection table entries ("write once") */
553 reg32 = io_apic_read((void *)IO_APIC_ADDR, 1);
554
555 reg32 &= ~0x00ff0000;
556 reg32 |= (C620_IOAPIC_REDIR_ENTRIES - 1) << 16;
557
558 io_apic_write((void *)IO_APIC_ADDR, 1, reg32);
559
560 /*
561 * Select Boot Configuration register (0x03) and
562 * use Processor System Bus (0x01) to deliver interrupts.
563 */
564 io_apic_write((void *)IO_APIC_ADDR, 3, 1);
565}
566
567struct pci_operations soc_pci_ops = {
568 .set_subsystem = pci_dev_set_subsystem,
569};
570
Jonathan Zhang7919d612020-04-02 17:27:54 -0700571static void chip_enable_dev(struct device *dev)
572{
573 /* Set the operations if it is a special bus type */
574 if (dev->path.type == DEVICE_PATH_DOMAIN) {
575 dev->ops = &pci_domain_ops;
576 attach_iio_stacks(dev);
577 } else if (dev->path.type == DEVICE_PATH_CPU_CLUSTER) {
578 dev->ops = &cpu_bus_ops;
579 }
580}
581
Andrey Petrov2e410752020-03-20 12:08:32 -0700582static void chip_final(void *data)
583{
Andrey Petrov4e48ac02020-04-30 14:08:19 -0700584 p2sb_hide();
Andrey Petrov2e410752020-03-20 12:08:32 -0700585}
586
587static void chip_init(void *data)
588{
589 printk(BIOS_DEBUG, "coreboot: calling fsp_silicon_init\n");
590 fsp_silicon_init(false);
591 pch_enable_ioapic(NULL);
592 setup_lapic();
Andrey Petrov4e48ac02020-04-30 14:08:19 -0700593 p2sb_unhide();
Andrey Petrov2e410752020-03-20 12:08:32 -0700594}
595
596struct chip_operations soc_intel_xeon_sp_cpx_ops = {
597 CHIP_NAME("Intel Cooperlake-SP")
598 .enable_dev = chip_enable_dev,
599 .init = chip_init,
Jonathan Zhang7919d612020-04-02 17:27:54 -0700600 .final = chip_final,
Andrey Petrov2e410752020-03-20 12:08:32 -0700601};