allocator_v4: Introduce RESOURCE_ALLOCATION_TOP_DOWN

Add option to resource allocator v4 that restores the top-down
allocation approach at the domain level.

This makes it easier to handle 64-bit resources natively. With
the top-down approach, resources that can be placed either above
or below 4G would be placed above, to save precious space below
the 4G boundary.

Change-Id: Iaf463d3e6b37d52e46761d8e210034fded58a8a4
Signed-off-by: Nico Huber <nico.h@gmx.de>
Reviewed-on: https://review.coreboot.org/c/coreboot/+/41957
Tested-by: build bot (Jenkins) <no-reply@coreboot.org>
Reviewed-by: Tim Wawrzynczak <twawrzynczak@chromium.org>
Reviewed-by: Lean Sheng Tan <sheng.tan@9elements.com>
Reviewed-by: Subrata Banik <subratabanik@google.com>
diff --git a/src/device/Kconfig b/src/device/Kconfig
index 6f4a24e..af9beb3 100644
--- a/src/device/Kconfig
+++ b/src/device/Kconfig
@@ -933,6 +933,11 @@
 	  ranges for allocating resources. This allows allocation of resources
 	  above 4G boundary as well.
 
+config RESOURCE_ALLOCATION_TOP_DOWN
+	bool "Allocate resources from top down"
+	default y
+	depends on RESOURCE_ALLOCATOR_V4
+
 config XHCI_UTILS
 	def_bool n
 	help
diff --git a/src/device/resource_allocator_v4.c b/src/device/resource_allocator_v4.c
index 2b62fc8..222f1e9 100644
--- a/src/device/resource_allocator_v4.c
+++ b/src/device/resource_allocator_v4.c
@@ -372,6 +372,9 @@
 static void allocate_child_resources(struct bus *bus, struct memranges *ranges,
 				     unsigned long type_mask, unsigned long type_match)
 {
+	const bool allocate_top_down =
+		bus->dev->path.type == DEVICE_PATH_DOMAIN &&
+		CONFIG(RESOURCE_ALLOCATION_TOP_DOWN);
 	struct resource *resource = NULL;
 	const struct device *dev;
 
@@ -381,7 +384,7 @@
 			continue;
 
 		if (memranges_steal(ranges, resource->limit, resource->size, resource->align,
-				    type_match, &resource->base) == false) {
+				    type_match, &resource->base, allocate_top_down) == false) {
 			printk(BIOS_ERR, "  ERROR: Resource didn't fit!!! ");
 			printk(BIOS_DEBUG, "  %s %02lx *  size: 0x%llx limit: %llx %s\n",
 			       dev_path(dev), resource->index,
diff --git a/src/include/memrange.h b/src/include/memrange.h
index a8a8de9..f7c5bfc 100644
--- a/src/include/memrange.h
+++ b/src/include/memrange.h
@@ -161,15 +161,17 @@
 					 const struct range_entry *r);
 
 /* Steals memory from the available list in given ranges as per the constraints:
- * limit = Upper bound for the memory range to steal (Inclusive).
- * size  = Requested size for the stolen memory.
- * align = Required alignment(log 2) for the starting address of the stolen memory.
- * tag   = Use a range that matches the given tag.
+ * limit    = Upper bound for the memory range to steal (Inclusive).
+ * size     = Requested size for the stolen memory.
+ * align    = Required alignment(log 2) for the starting address of the stolen memory.
+ * tag      = Use a range that matches the given tag.
+ * from_top = Steal the highest possible range.
  *
  * If the constraints can be satisfied, this function creates a hole in the memrange,
  * writes the base address of that hole to stolen_base and returns true. Otherwise it returns
  * false. */
 bool memranges_steal(struct memranges *ranges, resource_t limit, resource_t size,
-			unsigned char align, unsigned long tag, resource_t *stolen_base);
+			unsigned char align, unsigned long tag, resource_t *stolen_base,
+			bool from_top);
 
 #endif /* MEMRANGE_H_ */
diff --git a/src/lib/memrange.c b/src/lib/memrange.c
index 39f502c..b68b86e 100644
--- a/src/lib/memrange.c
+++ b/src/lib/memrange.c
@@ -378,11 +378,11 @@
 
 /* Find a range entry that satisfies the given constraints to fit a hole that matches the
  * required alignment, is big enough, does not exceed the limit and has a matching tag. */
-static const struct range_entry *memranges_find_entry(struct memranges *ranges,
-						      resource_t limit, resource_t size,
-						      unsigned char align, unsigned long tag)
+static const struct range_entry *
+memranges_find_entry(struct memranges *ranges, resource_t limit, resource_t size,
+		     unsigned char align, unsigned long tag, bool last)
 {
-	const struct range_entry *r;
+	const struct range_entry *r, *last_entry = NULL;
 	resource_t base, end;
 
 	if (size == 0)
@@ -407,25 +407,35 @@
 		if (end > limit)
 			break;
 
-		return r;
+		if (!last)
+			return r;
+
+		last_entry = r;
 	}
 
-	return NULL;
+	return last_entry;
 }
 
 bool memranges_steal(struct memranges *ranges, resource_t limit, resource_t size,
-			unsigned char align, unsigned long tag, resource_t *stolen_base)
+			unsigned char align, unsigned long tag, resource_t *stolen_base,
+			bool from_top)
 {
-	resource_t base;
-	const struct range_entry *r = memranges_find_entry(ranges, limit, size, align, tag);
+	const struct range_entry *r;
 
+	r = memranges_find_entry(ranges, limit, size, align, tag, from_top);
 	if (r == NULL)
 		return false;
 
-	base = ALIGN_UP(r->begin, POWER_OF_2(align));
-
-	memranges_create_hole(ranges, base, size);
-	*stolen_base = base;
+	if (from_top) {
+		/* Ensure we're within the range, even aligned down.
+		   Proof is simple: If ALIGN_UP(r->begin) would be
+		   higher, the stolen range wouldn't fit.*/
+		assert(r->begin <= ALIGN_DOWN(range_entry_end(r) - size, POWER_OF_2(align)));
+		*stolen_base = ALIGN_DOWN(range_entry_end(r) - size, POWER_OF_2(align));
+	} else {
+		*stolen_base = ALIGN_UP(r->begin, POWER_OF_2(align));
+	}
+	memranges_create_hole(ranges, *stolen_base, size);
 
 	return true;
 }