blob: 2579f20c188b5dfc8ac65feae9016cb9af0d23d1 [file] [log] [blame]
Angel Pons32859fc2020-04-02 23:48:27 +02001/* SPDX-License-Identifier: GPL-2.0-only */
2/* This file is part of the coreboot project. */
Aaron Durbina05a8522013-03-22 20:44:46 -05003#ifndef MEMRANGE_H_
4#define MEMRANGE_H_
5
6#include <device/resource.h>
Furquan Shaikh9c6274c2020-03-11 19:06:24 -07007#include <stdbool.h>
Aaron Durbina05a8522013-03-22 20:44:46 -05008
9/* A memranges structure consists of a list of range_entry(s). The structure
10 * is exposed so that a memranges can be used on the stack if needed. */
11struct memranges {
12 struct range_entry *entries;
Martin Rothe18e6422017-06-03 20:03:18 -060013 /* coreboot doesn't have a free() function. Therefore, keep a cache of
Aaron Durbin1b915b82016-01-15 21:59:37 -060014 * free'd entries. */
15 struct range_entry *free_list;
Furquan Shaikh19083402020-03-24 14:56:38 -070016 /* Alignment(log 2) for base and end addresses of the range. */
17 unsigned char align;
Aaron Durbina05a8522013-03-22 20:44:46 -050018};
19
20/* Each region within a memranges structure is represented by a
21 * range_entry structure. Use the associated range_entry_(base|end|size|tag)
22 * functions to interrogate its properties. i.e. don't rely on one's own
23 * interpretation of the fields. */
24struct range_entry {
25 resource_t begin;
26 resource_t end;
27 unsigned long tag;
28 struct range_entry *next;
29};
30
Aaron Durbine8845922016-03-08 10:47:18 -060031/* Initialize a range_entry with inclusive beginning address and exclusive
32 * end address along with the appropriate tag. */
33static inline void range_entry_init(struct range_entry *re,
34 resource_t incl_begin, resource_t excl_end,
35 unsigned long tag)
36{
37 re->begin = incl_begin;
38 re->end = excl_end - 1;
39 re->tag = tag;
40 re->next = NULL;
41}
42
Aaron Durbina05a8522013-03-22 20:44:46 -050043/* Return inclusive base address of memory range. */
44static inline resource_t range_entry_base(const struct range_entry *r)
45{
46 return r->begin;
47}
48
49/* Return exclusive end address of memory range. */
50static inline resource_t range_entry_end(const struct range_entry *r)
51{
52 return r->end + 1;
53}
54
55/* Return size of of memory range. */
56static inline resource_t range_entry_size(const struct range_entry *r)
57{
58 return r->end - r->begin + 1;
59}
60
61static inline unsigned long range_entry_tag(const struct range_entry *r)
62{
63 return r->tag;
64}
65
Aaron Durbinf6f6e132013-03-26 21:22:42 -050066static inline void range_entry_update_tag(struct range_entry *r,
Lee Leahy708fc272017-03-07 12:18:53 -080067 unsigned long new_tag)
Aaron Durbinf6f6e132013-03-26 21:22:42 -050068{
69 r->tag = new_tag;
70}
71
Furquan Shaikh2190a632020-03-12 16:43:49 -070072static inline bool memranges_is_empty(struct memranges *ranges)
73{
74 return ranges->entries == NULL;
75}
76
Aaron Durbina05a8522013-03-22 20:44:46 -050077/* Iterate over each entry in a memranges structure. Ranges cannot
78 * be deleted while processing each entry as the list cannot be safely
79 * traversed after such an operation.
80 * r - range_entry pointer.
81 * ranges - memranges pointer */
82#define memranges_each_entry(r, ranges) \
83 for (r = (ranges)->entries; r != NULL; r = r->next)
84
Furquan Shaikh14290922020-03-11 14:35:35 -070085
Aaron Durbin1b915b82016-01-15 21:59:37 -060086/* Initialize memranges structure providing an optional array of range_entry
Furquan Shaikh14290922020-03-11 14:35:35 -070087 * to use as the free list. Additionally, it accepts an align parameter that
Furquan Shaikh19083402020-03-24 14:56:38 -070088 * represents the required alignment(log 2) of addresses. */
Furquan Shaikh14290922020-03-11 14:35:35 -070089void memranges_init_empty_with_alignment(struct memranges *ranges,
90 struct range_entry *free,
Furquan Shaikh19083402020-03-24 14:56:38 -070091 size_t num_free, unsigned char align);
Furquan Shaikh196ee2b2014-07-18 10:25:54 -070092
Aaron Durbina05a8522013-03-22 20:44:46 -050093/* Initialize and fill a memranges structure according to the
94 * mask and match type for all memory resources. Tag each entry with the
Furquan Shaikh14290922020-03-11 14:35:35 -070095 * specified type. Additionally, it accepts an align parameter that
Furquan Shaikh19083402020-03-24 14:56:38 -070096 * represents the required alignment(log 2) of addresses. */
Furquan Shaikh14290922020-03-11 14:35:35 -070097void memranges_init_with_alignment(struct memranges *ranges,
Lee Leahy708fc272017-03-07 12:18:53 -080098 unsigned long mask, unsigned long match,
Furquan Shaikh19083402020-03-24 14:56:38 -070099 unsigned long tag, unsigned char align);
Furquan Shaikh14290922020-03-11 14:35:35 -0700100
101/* Initialize memranges structure providing an optional array of range_entry
Furquan Shaikh19083402020-03-24 14:56:38 -0700102 * to use as the free list. Addresses are default aligned to 4KiB(2^12). */
Furquan Shaikh14290922020-03-11 14:35:35 -0700103#define memranges_init_empty(__ranges, __free, __num_free) \
Furquan Shaikh19083402020-03-24 14:56:38 -0700104 memranges_init_empty_with_alignment(__ranges, __free, __num_free, 12);
Furquan Shaikh14290922020-03-11 14:35:35 -0700105
106/* Initialize and fill a memranges structure according to the
107 * mask and match type for all memory resources. Tag each entry with the
Furquan Shaikh19083402020-03-24 14:56:38 -0700108 * specified type. Addresses are default aligned to 4KiB(2^12). */
Furquan Shaikh14290922020-03-11 14:35:35 -0700109#define memranges_init(__ranges, __mask, __match, __tag) \
Furquan Shaikh19083402020-03-24 14:56:38 -0700110 memranges_init_with_alignment(__ranges, __mask, __match, __tag, 12);
Aaron Durbina05a8522013-03-22 20:44:46 -0500111
Patrick Rudolphd67a4bd2018-04-10 09:31:10 +0200112/* Clone a memrange. The new memrange has the same entries as the old one. */
113void memranges_clone(struct memranges *newranges, struct memranges *oldranges);
114
Aaron Durbina05a8522013-03-22 20:44:46 -0500115/* Remove and free all entries within the memranges structure. */
116void memranges_teardown(struct memranges *ranges);
117
118/* Add memory resources that match with the corresponding mask and match.
119 * Each entry will be tagged with the provided tag. e.g. To populate
120 * all cacheable memory resources in the range:
121 * memranges_add_resources(range, IORESOURCE_CACHEABLE,
122 * IORESROUCE_CACHEABLE, my_cacheable_tag); */
123void memranges_add_resources(struct memranges *ranges,
Lee Leahy708fc272017-03-07 12:18:53 -0800124 unsigned long mask, unsigned long match,
125 unsigned long tag);
Aaron Durbina05a8522013-03-22 20:44:46 -0500126
Aaron Durbinca4f4b82014-02-08 15:41:52 -0600127/* Add memory resources that match with the corresponding mask and match but
128 * also provide filter as additional check. The filter will return non-zero
129 * to add the resource or zero to not add the resource. Each entry will be
130 * tagged with the provided tag. e.g. To populate all cacheable memory
131 * resources in the range with a filter:
132 * memranges_add_resources_filter(range, IORESOURCE_CACHEABLE,
133 * IORESROUCE_CACHEABLE, my_cacheable_tag, filter); */
134typedef int (*memrange_filter_t)(struct device *dev, struct resource *res);
135void memranges_add_resources_filter(struct memranges *ranges,
Lee Leahy708fc272017-03-07 12:18:53 -0800136 unsigned long mask, unsigned long match,
137 unsigned long tag,
138 memrange_filter_t filter);
Aaron Durbinca4f4b82014-02-08 15:41:52 -0600139
Aaron Durbina05a8522013-03-22 20:44:46 -0500140/* Fill all address ranges up to limit (exclusive) not covered by an entry by
141 * inserting new entries with the provided tag. */
142void memranges_fill_holes_up_to(struct memranges *ranges,
Lee Leahy708fc272017-03-07 12:18:53 -0800143 resource_t limit, unsigned long tag);
Aaron Durbina05a8522013-03-22 20:44:46 -0500144
145/* Create a hole in the range by deleting/modifying entries that overlap with
146 * the region specified by base and size. */
147void memranges_create_hole(struct memranges *ranges,
Lee Leahy708fc272017-03-07 12:18:53 -0800148 resource_t base, resource_t size);
Aaron Durbina05a8522013-03-22 20:44:46 -0500149
150/* Insert a resource to the given memranges. All existing ranges
151 * covered by range specified by base and size will be removed before a
152 * new one is added. */
153void memranges_insert(struct memranges *ranges,
Lee Leahy708fc272017-03-07 12:18:53 -0800154 resource_t base, resource_t size, unsigned long tag);
Aaron Durbina05a8522013-03-22 20:44:46 -0500155
Aaron Durbined9307d2014-02-05 15:44:30 -0600156/* Update all entries with old_tag to new_tag. */
157void memranges_update_tag(struct memranges *ranges, unsigned long old_tag,
Lee Leahy708fc272017-03-07 12:18:53 -0800158 unsigned long new_tag);
Aaron Durbined9307d2014-02-05 15:44:30 -0600159
Aaron Durbinf6f6e132013-03-26 21:22:42 -0500160/* Returns next entry after the provided entry. NULL if r is last. */
161struct range_entry *memranges_next_entry(struct memranges *ranges,
Lee Leahy708fc272017-03-07 12:18:53 -0800162 const struct range_entry *r);
Furquan Shaikh9c6274c2020-03-11 19:06:24 -0700163
164/* Steals memory from the available list in given ranges as per the constraints:
165 * limit = Upper bound for the memory range to steal.
166 * size = Requested size for the stolen memory.
Furquan Shaikh19083402020-03-24 14:56:38 -0700167 * align = Required alignment(log 2) for the starting address of the stolen memory.
Furquan Shaikh9c6274c2020-03-11 19:06:24 -0700168 * tag = Use a range that matches the given tag.
169 *
170 * If the constraints can be satisfied, this function creates a hole in the memrange,
171 * writes the base address of that hole to stolen_base and returns true. Otherwise it returns
172 * false. */
Furquan Shaikh19083402020-03-24 14:56:38 -0700173bool memranges_steal(struct memranges *ranges, resource_t limit, resource_t size,
174 unsigned char align, unsigned long tag, resource_t *stolen_base);
Furquan Shaikh9c6274c2020-03-11 19:06:24 -0700175
Aaron Durbina05a8522013-03-22 20:44:46 -0500176#endif /* MEMRANGE_H_ */