Angel Pons | 32859fc | 2020-04-02 23:48:27 +0200 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
Aaron Durbin | a05a852 | 2013-03-22 20:44:46 -0500 | [diff] [blame] | 2 | #ifndef MEMRANGE_H_ |
| 3 | #define MEMRANGE_H_ |
| 4 | |
| 5 | #include <device/resource.h> |
Furquan Shaikh | 9c6274c | 2020-03-11 19:06:24 -0700 | [diff] [blame] | 6 | #include <stdbool.h> |
Elyes HAOUAS | 5817c56 | 2020-07-12 09:03:22 +0200 | [diff] [blame] | 7 | #include <stddef.h> |
Aaron Durbin | a05a852 | 2013-03-22 20:44:46 -0500 | [diff] [blame] | 8 | |
| 9 | /* A memranges structure consists of a list of range_entry(s). The structure |
| 10 | * is exposed so that a memranges can be used on the stack if needed. */ |
| 11 | struct memranges { |
| 12 | struct range_entry *entries; |
Martin Roth | e18e642 | 2017-06-03 20:03:18 -0600 | [diff] [blame] | 13 | /* coreboot doesn't have a free() function. Therefore, keep a cache of |
Aaron Durbin | 1b915b8 | 2016-01-15 21:59:37 -0600 | [diff] [blame] | 14 | * free'd entries. */ |
| 15 | struct range_entry *free_list; |
Furquan Shaikh | 1908340 | 2020-03-24 14:56:38 -0700 | [diff] [blame] | 16 | /* Alignment(log 2) for base and end addresses of the range. */ |
| 17 | unsigned char align; |
Aaron Durbin | a05a852 | 2013-03-22 20:44:46 -0500 | [diff] [blame] | 18 | }; |
| 19 | |
| 20 | /* Each region within a memranges structure is represented by a |
| 21 | * range_entry structure. Use the associated range_entry_(base|end|size|tag) |
| 22 | * functions to interrogate its properties. i.e. don't rely on one's own |
| 23 | * interpretation of the fields. */ |
| 24 | struct range_entry { |
| 25 | resource_t begin; |
| 26 | resource_t end; |
| 27 | unsigned long tag; |
| 28 | struct range_entry *next; |
| 29 | }; |
| 30 | |
Aaron Durbin | e884592 | 2016-03-08 10:47:18 -0600 | [diff] [blame] | 31 | /* Initialize a range_entry with inclusive beginning address and exclusive |
| 32 | * end address along with the appropriate tag. */ |
| 33 | static inline void range_entry_init(struct range_entry *re, |
| 34 | resource_t incl_begin, resource_t excl_end, |
| 35 | unsigned long tag) |
| 36 | { |
| 37 | re->begin = incl_begin; |
| 38 | re->end = excl_end - 1; |
| 39 | re->tag = tag; |
| 40 | re->next = NULL; |
| 41 | } |
| 42 | |
Aaron Durbin | a05a852 | 2013-03-22 20:44:46 -0500 | [diff] [blame] | 43 | /* Return inclusive base address of memory range. */ |
| 44 | static inline resource_t range_entry_base(const struct range_entry *r) |
| 45 | { |
| 46 | return r->begin; |
| 47 | } |
| 48 | |
| 49 | /* Return exclusive end address of memory range. */ |
| 50 | static inline resource_t range_entry_end(const struct range_entry *r) |
| 51 | { |
| 52 | return r->end + 1; |
| 53 | } |
| 54 | |
Elyes HAOUAS | 5323bf4 | 2021-01-16 14:58:46 +0100 | [diff] [blame] | 55 | /* Return size of memory range. */ |
Aaron Durbin | a05a852 | 2013-03-22 20:44:46 -0500 | [diff] [blame] | 56 | static inline resource_t range_entry_size(const struct range_entry *r) |
| 57 | { |
| 58 | return r->end - r->begin + 1; |
| 59 | } |
| 60 | |
| 61 | static inline unsigned long range_entry_tag(const struct range_entry *r) |
| 62 | { |
| 63 | return r->tag; |
| 64 | } |
| 65 | |
Aaron Durbin | f6f6e13 | 2013-03-26 21:22:42 -0500 | [diff] [blame] | 66 | static inline void range_entry_update_tag(struct range_entry *r, |
Lee Leahy | 708fc27 | 2017-03-07 12:18:53 -0800 | [diff] [blame] | 67 | unsigned long new_tag) |
Aaron Durbin | f6f6e13 | 2013-03-26 21:22:42 -0500 | [diff] [blame] | 68 | { |
| 69 | r->tag = new_tag; |
| 70 | } |
| 71 | |
Aaron Durbin | d8bd3ff | 2020-05-06 12:50:51 -0600 | [diff] [blame] | 72 | static inline bool memranges_is_empty(const struct memranges *ranges) |
Furquan Shaikh | 2190a63 | 2020-03-12 16:43:49 -0700 | [diff] [blame] | 73 | { |
| 74 | return ranges->entries == NULL; |
| 75 | } |
| 76 | |
Aaron Durbin | a05a852 | 2013-03-22 20:44:46 -0500 | [diff] [blame] | 77 | /* Iterate over each entry in a memranges structure. Ranges cannot |
| 78 | * be deleted while processing each entry as the list cannot be safely |
| 79 | * traversed after such an operation. |
| 80 | * r - range_entry pointer. |
| 81 | * ranges - memranges pointer */ |
| 82 | #define memranges_each_entry(r, ranges) \ |
| 83 | for (r = (ranges)->entries; r != NULL; r = r->next) |
| 84 | |
Aaron Durbin | 1b915b8 | 2016-01-15 21:59:37 -0600 | [diff] [blame] | 85 | /* Initialize memranges structure providing an optional array of range_entry |
Furquan Shaikh | 1429092 | 2020-03-11 14:35:35 -0700 | [diff] [blame] | 86 | * to use as the free list. Additionally, it accepts an align parameter that |
Furquan Shaikh | 1908340 | 2020-03-24 14:56:38 -0700 | [diff] [blame] | 87 | * represents the required alignment(log 2) of addresses. */ |
Furquan Shaikh | 1429092 | 2020-03-11 14:35:35 -0700 | [diff] [blame] | 88 | void memranges_init_empty_with_alignment(struct memranges *ranges, |
| 89 | struct range_entry *free, |
Furquan Shaikh | 1908340 | 2020-03-24 14:56:38 -0700 | [diff] [blame] | 90 | size_t num_free, unsigned char align); |
Furquan Shaikh | 196ee2b | 2014-07-18 10:25:54 -0700 | [diff] [blame] | 91 | |
Aaron Durbin | a05a852 | 2013-03-22 20:44:46 -0500 | [diff] [blame] | 92 | /* Initialize and fill a memranges structure according to the |
| 93 | * mask and match type for all memory resources. Tag each entry with the |
Furquan Shaikh | 1429092 | 2020-03-11 14:35:35 -0700 | [diff] [blame] | 94 | * specified type. Additionally, it accepts an align parameter that |
Furquan Shaikh | 1908340 | 2020-03-24 14:56:38 -0700 | [diff] [blame] | 95 | * represents the required alignment(log 2) of addresses. */ |
Furquan Shaikh | 1429092 | 2020-03-11 14:35:35 -0700 | [diff] [blame] | 96 | void memranges_init_with_alignment(struct memranges *ranges, |
Lee Leahy | 708fc27 | 2017-03-07 12:18:53 -0800 | [diff] [blame] | 97 | unsigned long mask, unsigned long match, |
Furquan Shaikh | 1908340 | 2020-03-24 14:56:38 -0700 | [diff] [blame] | 98 | unsigned long tag, unsigned char align); |
Furquan Shaikh | 1429092 | 2020-03-11 14:35:35 -0700 | [diff] [blame] | 99 | |
| 100 | /* Initialize memranges structure providing an optional array of range_entry |
Furquan Shaikh | 1908340 | 2020-03-24 14:56:38 -0700 | [diff] [blame] | 101 | * to use as the free list. Addresses are default aligned to 4KiB(2^12). */ |
Furquan Shaikh | 1429092 | 2020-03-11 14:35:35 -0700 | [diff] [blame] | 102 | #define memranges_init_empty(__ranges, __free, __num_free) \ |
Elyes Haouas | 5899b0d | 2022-09-27 17:43:08 +0200 | [diff] [blame] | 103 | memranges_init_empty_with_alignment(__ranges, __free, __num_free, 12) |
Furquan Shaikh | 1429092 | 2020-03-11 14:35:35 -0700 | [diff] [blame] | 104 | |
| 105 | /* Initialize and fill a memranges structure according to the |
| 106 | * mask and match type for all memory resources. Tag each entry with the |
Furquan Shaikh | 1908340 | 2020-03-24 14:56:38 -0700 | [diff] [blame] | 107 | * specified type. Addresses are default aligned to 4KiB(2^12). */ |
Furquan Shaikh | 1429092 | 2020-03-11 14:35:35 -0700 | [diff] [blame] | 108 | #define memranges_init(__ranges, __mask, __match, __tag) \ |
Elyes Haouas | 5899b0d | 2022-09-27 17:43:08 +0200 | [diff] [blame] | 109 | memranges_init_with_alignment(__ranges, __mask, __match, __tag, 12) |
Aaron Durbin | a05a852 | 2013-03-22 20:44:46 -0500 | [diff] [blame] | 110 | |
Patrick Rudolph | d67a4bd | 2018-04-10 09:31:10 +0200 | [diff] [blame] | 111 | /* Clone a memrange. The new memrange has the same entries as the old one. */ |
| 112 | void memranges_clone(struct memranges *newranges, struct memranges *oldranges); |
| 113 | |
Aaron Durbin | a05a852 | 2013-03-22 20:44:46 -0500 | [diff] [blame] | 114 | /* Remove and free all entries within the memranges structure. */ |
| 115 | void memranges_teardown(struct memranges *ranges); |
| 116 | |
| 117 | /* Add memory resources that match with the corresponding mask and match. |
| 118 | * Each entry will be tagged with the provided tag. e.g. To populate |
| 119 | * all cacheable memory resources in the range: |
| 120 | * memranges_add_resources(range, IORESOURCE_CACHEABLE, |
| 121 | * IORESROUCE_CACHEABLE, my_cacheable_tag); */ |
| 122 | void memranges_add_resources(struct memranges *ranges, |
Lee Leahy | 708fc27 | 2017-03-07 12:18:53 -0800 | [diff] [blame] | 123 | unsigned long mask, unsigned long match, |
| 124 | unsigned long tag); |
Aaron Durbin | a05a852 | 2013-03-22 20:44:46 -0500 | [diff] [blame] | 125 | |
Aaron Durbin | ca4f4b8 | 2014-02-08 15:41:52 -0600 | [diff] [blame] | 126 | /* Add memory resources that match with the corresponding mask and match but |
| 127 | * also provide filter as additional check. The filter will return non-zero |
| 128 | * to add the resource or zero to not add the resource. Each entry will be |
| 129 | * tagged with the provided tag. e.g. To populate all cacheable memory |
| 130 | * resources in the range with a filter: |
| 131 | * memranges_add_resources_filter(range, IORESOURCE_CACHEABLE, |
| 132 | * IORESROUCE_CACHEABLE, my_cacheable_tag, filter); */ |
| 133 | typedef int (*memrange_filter_t)(struct device *dev, struct resource *res); |
| 134 | void memranges_add_resources_filter(struct memranges *ranges, |
Lee Leahy | 708fc27 | 2017-03-07 12:18:53 -0800 | [diff] [blame] | 135 | unsigned long mask, unsigned long match, |
| 136 | unsigned long tag, |
| 137 | memrange_filter_t filter); |
Aaron Durbin | ca4f4b8 | 2014-02-08 15:41:52 -0600 | [diff] [blame] | 138 | |
Aaron Durbin | a05a852 | 2013-03-22 20:44:46 -0500 | [diff] [blame] | 139 | /* Fill all address ranges up to limit (exclusive) not covered by an entry by |
| 140 | * inserting new entries with the provided tag. */ |
| 141 | void memranges_fill_holes_up_to(struct memranges *ranges, |
Lee Leahy | 708fc27 | 2017-03-07 12:18:53 -0800 | [diff] [blame] | 142 | resource_t limit, unsigned long tag); |
Aaron Durbin | a05a852 | 2013-03-22 20:44:46 -0500 | [diff] [blame] | 143 | |
| 144 | /* Create a hole in the range by deleting/modifying entries that overlap with |
| 145 | * the region specified by base and size. */ |
| 146 | void memranges_create_hole(struct memranges *ranges, |
Lee Leahy | 708fc27 | 2017-03-07 12:18:53 -0800 | [diff] [blame] | 147 | resource_t base, resource_t size); |
Aaron Durbin | a05a852 | 2013-03-22 20:44:46 -0500 | [diff] [blame] | 148 | |
| 149 | /* Insert a resource to the given memranges. All existing ranges |
| 150 | * covered by range specified by base and size will be removed before a |
| 151 | * new one is added. */ |
| 152 | void memranges_insert(struct memranges *ranges, |
Lee Leahy | 708fc27 | 2017-03-07 12:18:53 -0800 | [diff] [blame] | 153 | resource_t base, resource_t size, unsigned long tag); |
Aaron Durbin | a05a852 | 2013-03-22 20:44:46 -0500 | [diff] [blame] | 154 | |
Aaron Durbin | ed9307d | 2014-02-05 15:44:30 -0600 | [diff] [blame] | 155 | /* Update all entries with old_tag to new_tag. */ |
| 156 | void memranges_update_tag(struct memranges *ranges, unsigned long old_tag, |
Lee Leahy | 708fc27 | 2017-03-07 12:18:53 -0800 | [diff] [blame] | 157 | unsigned long new_tag); |
Aaron Durbin | ed9307d | 2014-02-05 15:44:30 -0600 | [diff] [blame] | 158 | |
Aaron Durbin | f6f6e13 | 2013-03-26 21:22:42 -0500 | [diff] [blame] | 159 | /* Returns next entry after the provided entry. NULL if r is last. */ |
| 160 | struct range_entry *memranges_next_entry(struct memranges *ranges, |
Lee Leahy | 708fc27 | 2017-03-07 12:18:53 -0800 | [diff] [blame] | 161 | const struct range_entry *r); |
Furquan Shaikh | 9c6274c | 2020-03-11 19:06:24 -0700 | [diff] [blame] | 162 | |
| 163 | /* Steals memory from the available list in given ranges as per the constraints: |
Nico Huber | 526c642 | 2020-05-25 00:03:14 +0200 | [diff] [blame] | 164 | * limit = Upper bound for the memory range to steal (Inclusive). |
| 165 | * size = Requested size for the stolen memory. |
| 166 | * align = Required alignment(log 2) for the starting address of the stolen memory. |
| 167 | * tag = Use a range that matches the given tag. |
| 168 | * from_top = Steal the highest possible range. |
Furquan Shaikh | 9c6274c | 2020-03-11 19:06:24 -0700 | [diff] [blame] | 169 | * |
| 170 | * If the constraints can be satisfied, this function creates a hole in the memrange, |
| 171 | * writes the base address of that hole to stolen_base and returns true. Otherwise it returns |
| 172 | * false. */ |
Furquan Shaikh | 1908340 | 2020-03-24 14:56:38 -0700 | [diff] [blame] | 173 | bool memranges_steal(struct memranges *ranges, resource_t limit, resource_t size, |
Nico Huber | 526c642 | 2020-05-25 00:03:14 +0200 | [diff] [blame] | 174 | unsigned char align, unsigned long tag, resource_t *stolen_base, |
| 175 | bool from_top); |
Furquan Shaikh | 9c6274c | 2020-03-11 19:06:24 -0700 | [diff] [blame] | 176 | |
Aaron Durbin | a05a852 | 2013-03-22 20:44:46 -0500 | [diff] [blame] | 177 | #endif /* MEMRANGE_H_ */ |