blob: 35e97608e6cb1654ef13ce331317989062b1a3ca [file] [log] [blame]
Angel Pons32859fc2020-04-02 23:48:27 +02001/* SPDX-License-Identifier: GPL-2.0-only */
Aaron Durbina05a8522013-03-22 20:44:46 -05002#ifndef MEMRANGE_H_
3#define MEMRANGE_H_
4
5#include <device/resource.h>
Furquan Shaikh9c6274c2020-03-11 19:06:24 -07006#include <stdbool.h>
Elyes HAOUAS5817c562020-07-12 09:03:22 +02007#include <stddef.h>
Aaron Durbina05a8522013-03-22 20:44:46 -05008
9/* A memranges structure consists of a list of range_entry(s). The structure
10 * is exposed so that a memranges can be used on the stack if needed. */
11struct memranges {
12 struct range_entry *entries;
Martin Rothe18e6422017-06-03 20:03:18 -060013 /* coreboot doesn't have a free() function. Therefore, keep a cache of
Aaron Durbin1b915b82016-01-15 21:59:37 -060014 * free'd entries. */
15 struct range_entry *free_list;
Furquan Shaikh19083402020-03-24 14:56:38 -070016 /* Alignment(log 2) for base and end addresses of the range. */
17 unsigned char align;
Aaron Durbina05a8522013-03-22 20:44:46 -050018};
19
20/* Each region within a memranges structure is represented by a
21 * range_entry structure. Use the associated range_entry_(base|end|size|tag)
22 * functions to interrogate its properties. i.e. don't rely on one's own
23 * interpretation of the fields. */
24struct range_entry {
25 resource_t begin;
26 resource_t end;
27 unsigned long tag;
28 struct range_entry *next;
29};
30
Aaron Durbine8845922016-03-08 10:47:18 -060031/* Initialize a range_entry with inclusive beginning address and exclusive
32 * end address along with the appropriate tag. */
33static inline void range_entry_init(struct range_entry *re,
34 resource_t incl_begin, resource_t excl_end,
35 unsigned long tag)
36{
37 re->begin = incl_begin;
38 re->end = excl_end - 1;
39 re->tag = tag;
40 re->next = NULL;
41}
42
Aaron Durbina05a8522013-03-22 20:44:46 -050043/* Return inclusive base address of memory range. */
44static inline resource_t range_entry_base(const struct range_entry *r)
45{
46 return r->begin;
47}
48
49/* Return exclusive end address of memory range. */
50static inline resource_t range_entry_end(const struct range_entry *r)
51{
52 return r->end + 1;
53}
54
Elyes HAOUAS5323bf42021-01-16 14:58:46 +010055/* Return size of memory range. */
Aaron Durbina05a8522013-03-22 20:44:46 -050056static inline resource_t range_entry_size(const struct range_entry *r)
57{
58 return r->end - r->begin + 1;
59}
60
61static inline unsigned long range_entry_tag(const struct range_entry *r)
62{
63 return r->tag;
64}
65
Aaron Durbinf6f6e132013-03-26 21:22:42 -050066static inline void range_entry_update_tag(struct range_entry *r,
Lee Leahy708fc272017-03-07 12:18:53 -080067 unsigned long new_tag)
Aaron Durbinf6f6e132013-03-26 21:22:42 -050068{
69 r->tag = new_tag;
70}
71
Aaron Durbind8bd3ff2020-05-06 12:50:51 -060072static inline bool memranges_is_empty(const struct memranges *ranges)
Furquan Shaikh2190a632020-03-12 16:43:49 -070073{
74 return ranges->entries == NULL;
75}
76
Aaron Durbina05a8522013-03-22 20:44:46 -050077/* Iterate over each entry in a memranges structure. Ranges cannot
78 * be deleted while processing each entry as the list cannot be safely
79 * traversed after such an operation.
80 * r - range_entry pointer.
81 * ranges - memranges pointer */
82#define memranges_each_entry(r, ranges) \
83 for (r = (ranges)->entries; r != NULL; r = r->next)
84
Aaron Durbin1b915b82016-01-15 21:59:37 -060085/* Initialize memranges structure providing an optional array of range_entry
Furquan Shaikh14290922020-03-11 14:35:35 -070086 * to use as the free list. Additionally, it accepts an align parameter that
Furquan Shaikh19083402020-03-24 14:56:38 -070087 * represents the required alignment(log 2) of addresses. */
Furquan Shaikh14290922020-03-11 14:35:35 -070088void memranges_init_empty_with_alignment(struct memranges *ranges,
89 struct range_entry *free,
Furquan Shaikh19083402020-03-24 14:56:38 -070090 size_t num_free, unsigned char align);
Furquan Shaikh196ee2b2014-07-18 10:25:54 -070091
Aaron Durbina05a8522013-03-22 20:44:46 -050092/* Initialize and fill a memranges structure according to the
93 * mask and match type for all memory resources. Tag each entry with the
Furquan Shaikh14290922020-03-11 14:35:35 -070094 * specified type. Additionally, it accepts an align parameter that
Furquan Shaikh19083402020-03-24 14:56:38 -070095 * represents the required alignment(log 2) of addresses. */
Furquan Shaikh14290922020-03-11 14:35:35 -070096void memranges_init_with_alignment(struct memranges *ranges,
Lee Leahy708fc272017-03-07 12:18:53 -080097 unsigned long mask, unsigned long match,
Furquan Shaikh19083402020-03-24 14:56:38 -070098 unsigned long tag, unsigned char align);
Furquan Shaikh14290922020-03-11 14:35:35 -070099
100/* Initialize memranges structure providing an optional array of range_entry
Furquan Shaikh19083402020-03-24 14:56:38 -0700101 * to use as the free list. Addresses are default aligned to 4KiB(2^12). */
Furquan Shaikh14290922020-03-11 14:35:35 -0700102#define memranges_init_empty(__ranges, __free, __num_free) \
Elyes Haouas5899b0d2022-09-27 17:43:08 +0200103 memranges_init_empty_with_alignment(__ranges, __free, __num_free, 12)
Furquan Shaikh14290922020-03-11 14:35:35 -0700104
105/* Initialize and fill a memranges structure according to the
106 * mask and match type for all memory resources. Tag each entry with the
Furquan Shaikh19083402020-03-24 14:56:38 -0700107 * specified type. Addresses are default aligned to 4KiB(2^12). */
Furquan Shaikh14290922020-03-11 14:35:35 -0700108#define memranges_init(__ranges, __mask, __match, __tag) \
Elyes Haouas5899b0d2022-09-27 17:43:08 +0200109 memranges_init_with_alignment(__ranges, __mask, __match, __tag, 12)
Aaron Durbina05a8522013-03-22 20:44:46 -0500110
Patrick Rudolphd67a4bd2018-04-10 09:31:10 +0200111/* Clone a memrange. The new memrange has the same entries as the old one. */
112void memranges_clone(struct memranges *newranges, struct memranges *oldranges);
113
Aaron Durbina05a8522013-03-22 20:44:46 -0500114/* Remove and free all entries within the memranges structure. */
115void memranges_teardown(struct memranges *ranges);
116
117/* Add memory resources that match with the corresponding mask and match.
118 * Each entry will be tagged with the provided tag. e.g. To populate
119 * all cacheable memory resources in the range:
120 * memranges_add_resources(range, IORESOURCE_CACHEABLE,
121 * IORESROUCE_CACHEABLE, my_cacheable_tag); */
122void memranges_add_resources(struct memranges *ranges,
Lee Leahy708fc272017-03-07 12:18:53 -0800123 unsigned long mask, unsigned long match,
124 unsigned long tag);
Aaron Durbina05a8522013-03-22 20:44:46 -0500125
Aaron Durbinca4f4b82014-02-08 15:41:52 -0600126/* Add memory resources that match with the corresponding mask and match but
127 * also provide filter as additional check. The filter will return non-zero
128 * to add the resource or zero to not add the resource. Each entry will be
129 * tagged with the provided tag. e.g. To populate all cacheable memory
130 * resources in the range with a filter:
131 * memranges_add_resources_filter(range, IORESOURCE_CACHEABLE,
132 * IORESROUCE_CACHEABLE, my_cacheable_tag, filter); */
133typedef int (*memrange_filter_t)(struct device *dev, struct resource *res);
134void memranges_add_resources_filter(struct memranges *ranges,
Lee Leahy708fc272017-03-07 12:18:53 -0800135 unsigned long mask, unsigned long match,
136 unsigned long tag,
137 memrange_filter_t filter);
Aaron Durbinca4f4b82014-02-08 15:41:52 -0600138
Aaron Durbina05a8522013-03-22 20:44:46 -0500139/* Fill all address ranges up to limit (exclusive) not covered by an entry by
140 * inserting new entries with the provided tag. */
141void memranges_fill_holes_up_to(struct memranges *ranges,
Lee Leahy708fc272017-03-07 12:18:53 -0800142 resource_t limit, unsigned long tag);
Aaron Durbina05a8522013-03-22 20:44:46 -0500143
144/* Create a hole in the range by deleting/modifying entries that overlap with
145 * the region specified by base and size. */
146void memranges_create_hole(struct memranges *ranges,
Lee Leahy708fc272017-03-07 12:18:53 -0800147 resource_t base, resource_t size);
Aaron Durbina05a8522013-03-22 20:44:46 -0500148
149/* Insert a resource to the given memranges. All existing ranges
150 * covered by range specified by base and size will be removed before a
151 * new one is added. */
152void memranges_insert(struct memranges *ranges,
Lee Leahy708fc272017-03-07 12:18:53 -0800153 resource_t base, resource_t size, unsigned long tag);
Aaron Durbina05a8522013-03-22 20:44:46 -0500154
Aaron Durbined9307d2014-02-05 15:44:30 -0600155/* Update all entries with old_tag to new_tag. */
156void memranges_update_tag(struct memranges *ranges, unsigned long old_tag,
Lee Leahy708fc272017-03-07 12:18:53 -0800157 unsigned long new_tag);
Aaron Durbined9307d2014-02-05 15:44:30 -0600158
Aaron Durbinf6f6e132013-03-26 21:22:42 -0500159/* Returns next entry after the provided entry. NULL if r is last. */
160struct range_entry *memranges_next_entry(struct memranges *ranges,
Lee Leahy708fc272017-03-07 12:18:53 -0800161 const struct range_entry *r);
Furquan Shaikh9c6274c2020-03-11 19:06:24 -0700162
163/* Steals memory from the available list in given ranges as per the constraints:
Nico Huber526c6422020-05-25 00:03:14 +0200164 * limit = Upper bound for the memory range to steal (Inclusive).
165 * size = Requested size for the stolen memory.
166 * align = Required alignment(log 2) for the starting address of the stolen memory.
167 * tag = Use a range that matches the given tag.
168 * from_top = Steal the highest possible range.
Furquan Shaikh9c6274c2020-03-11 19:06:24 -0700169 *
170 * If the constraints can be satisfied, this function creates a hole in the memrange,
171 * writes the base address of that hole to stolen_base and returns true. Otherwise it returns
172 * false. */
Furquan Shaikh19083402020-03-24 14:56:38 -0700173bool memranges_steal(struct memranges *ranges, resource_t limit, resource_t size,
Nico Huber526c6422020-05-25 00:03:14 +0200174 unsigned char align, unsigned long tag, resource_t *stolen_base,
175 bool from_top);
Furquan Shaikh9c6274c2020-03-11 19:06:24 -0700176
Aaron Durbina05a8522013-03-22 20:44:46 -0500177#endif /* MEMRANGE_H_ */