device: Add support for multiple PCI segment groups

Add initial support for multiple PCI segment groups. Instead of
modifying secondary in the bus struct introduce a new segment_group
struct element and keep existing common code.

Since all platforms currently only use 1 segment this is not a
functional change. On platforms that support more than 1 segment the
segment has to be set when creating the PCI domain.

Signed-off-by: Patrick Rudolph <patrick.rudolph@9elements.com>
Signed-off-by: Felix Held <felix-coreboot@felixheld.de>
Change-Id: Ied3313c41896362dd989ee2ab1b1bcdced840aa8
Reviewed-on: https://review.coreboot.org/c/coreboot/+/79927
Tested-by: build bot (Jenkins) <no-reply@coreboot.org>
Reviewed-by: Nico Huber <nico.h@gmx.de>
Reviewed-by: Martin Roth <martin.roth@amd.corp-partner.google.com>
diff --git a/src/soc/amd/common/block/acpi/ivrs.c b/src/soc/amd/common/block/acpi/ivrs.c
index 140968c..605c4f5 100644
--- a/src/soc/amd/common/block/acpi/ivrs.c
+++ b/src/soc/amd/common/block/acpi/ivrs.c
@@ -218,7 +218,7 @@
 	ivhd_40->capability_offset = pci_find_capability(iommu_dev, IOMMU_CAP_ID);
 	ivhd_40->iommu_base_low = ivhd->iommu_base_low;
 	ivhd_40->iommu_base_high = ivhd->iommu_base_high;
-	ivhd_40->pci_segment_group = 0x0000;
+	ivhd_40->pci_segment_group = nb_dev->bus->segment_group;
 	ivhd_40->iommu_info = ivhd->iommu_info;
 	/* For type 40h bits 31:28 and 12:0 are reserved */
 	ivhd_40->iommu_attributes = ivhd->iommu_feature_info & 0xfffe000;
@@ -275,7 +275,7 @@
 	ivhd_11->capability_offset = pci_find_capability(iommu_dev, IOMMU_CAP_ID);
 	ivhd_11->iommu_base_low = ivhd->iommu_base_low;
 	ivhd_11->iommu_base_high = ivhd->iommu_base_high;
-	ivhd_11->pci_segment_group = 0x0000;
+	ivhd_11->pci_segment_group = nb_dev->bus->segment_group;
 	ivhd_11->iommu_info = ivhd->iommu_info;
 	ivhd11_attr_ptr = (ivhd11_iommu_attr_t *)&ivhd->iommu_feature_info;
 	ivhd_11->iommu_attributes.perf_counters = ivhd11_attr_ptr->perf_counters;
@@ -365,7 +365,7 @@
 		ivhd->flags |= ((mmio_x18_value & MMIO_CTRL_HT_TUN_EN) ?
 							IVHD_FLAG_HT_TUN_EN : 0);
 
-		ivhd->pci_segment_group = 0x0000;
+		ivhd->pci_segment_group = nb_dev->bus->segment_group;
 
 		ivhd->iommu_info = pci_read_config16(iommu_dev,
 			ivhd->capability_offset + 0x10) & 0x1F;
diff --git a/src/soc/amd/common/block/data_fabric/domain.c b/src/soc/amd/common/block/data_fabric/domain.c
index c2f1406..b827dd3 100644
--- a/src/soc/amd/common/block/data_fabric/domain.c
+++ b/src/soc/amd/common/block/data_fabric/domain.c
@@ -9,6 +9,7 @@
 #include <cpu/amd/mtrr.h>
 #include <cpu/cpu.h>
 #include <device/device.h>
+#include <device/pci.h>
 #include <device/pci_ops.h>
 #include <types.h>
 
@@ -21,16 +22,16 @@
 		return;
 	}
 
-	/* TODO: Implement support for more than one PCI segment group in coreboot */
-	if (segment_group) {
-		printk(BIOS_ERR, "coreboot currently only supports one PCI segment group.\n");
+	if (segment_group >= PCI_SEGMENT_GROUP_COUNT) {
+		printk(BIOS_ERR, "Skipping domain %u due to too large segment group %u.\n",
+		       domain->path.domain.domain, segment_group);
 		return;
 	}
 
-	/* TODO: Check if bus >= CONFIG_ECAM_MMCONF_BUS_NUMBER and return in that case */
+	/* TODO: Check if bus >= PCI_BUSES_PER_SEGMENT_GROUP and return in that case */
 
-	/* Make sure to not report more than CONFIG_ECAM_MMCONF_BUS_NUMBER PCI buses */
-	limit = MIN(limit, CONFIG_ECAM_MMCONF_BUS_NUMBER - 1);
+	/* Make sure to not report more than PCI_BUSES_PER_SEGMENT_GROUP PCI buses */
+	limit = MIN(limit, PCI_BUSES_PER_SEGMENT_GROUP - 1);
 
 	/* Set bus first number of PCI root */
 	domain->link_list->secondary = bus;
@@ -38,6 +39,7 @@
 	domain->link_list->subordinate = bus;
 	/* Tell allocator about maximum PCI bus number in domain */
 	domain->link_list->max_subordinate = limit;
+	domain->link_list->segment_group = segment_group;
 
 	pci_host_bridge_scan_bus(domain);
 }
@@ -246,12 +248,13 @@
 	acpigen_write_resourcetemplate_header();
 
 	/* PCI bus number range in domain */
-	printk(BIOS_DEBUG, "%s _CRS: adding busses [%x-%x]\n", acpi_device_name(domain),
-	       domain->link_list->secondary, domain->link_list->max_subordinate);
+	printk(BIOS_DEBUG, "%s _CRS: adding busses [%x-%x] in segment group %x\n",
+	       acpi_device_name(domain), domain->link_list->secondary,
+	       domain->link_list->max_subordinate, domain->link_list->segment_group);
 	acpigen_resource_producer_bus_number(domain->link_list->secondary,
 					     domain->link_list->max_subordinate);
 
-	if (domain->link_list->secondary == 0) {
+	if (domain->link_list->secondary == 0 && domain->link_list->segment_group == 0) {
 		/* ACPI 6.4.2.5 I/O Port Descriptor */
 		acpigen_write_io16(PCI_IO_CONFIG_INDEX, PCI_IO_CONFIG_LAST_PORT, 1,
 				   PCI_IO_CONFIG_PORT_COUNT, 1);
@@ -287,7 +290,7 @@
 
 	acpigen_write_resourcetemplate_footer();
 
-	acpigen_write_SEG(0);
+	acpigen_write_SEG(domain->link_list->segment_group);
 	acpigen_write_BBN(domain->link_list->secondary);
 
 	/* Scope */
diff --git a/src/soc/amd/genoa_poc/domain.c b/src/soc/amd/genoa_poc/domain.c
index f21cad5..dc37450 100644
--- a/src/soc/amd/genoa_poc/domain.c
+++ b/src/soc/amd/genoa_poc/domain.c
@@ -18,7 +18,7 @@
 	amd_pci_domain_read_resources(domain);
 
 	// We only want to add the DRAM memory map once
-	if (domain->link_list->secondary == 0) {
+	if (domain->link_list->secondary == 0 && domain->link_list->segment_group == 0) {
 		/* 0x1000 is a large enough first index to be sure to not overlap with the
 		   resources added by amd_pci_domain_read_resources */
 		add_opensil_memmap(domain, 0x1000);