blob: 04c3180754ff9f34e5021cdb3eea463375322498 [file] [log] [blame]
Angel Ponsebda03e2020-04-02 23:48:05 +02001/* SPDX-License-Identifier: GPL-2.0-only */
Aaron Durbin5d5f4b32015-03-26 14:39:07 -05002
Aaron Durbinca0a6762015-12-15 17:49:12 -06003#include <commonlib/helpers.h>
Aaron Durbindc9f5cd2015-09-08 13:34:43 -05004#include <commonlib/region.h>
Aaron Durbin5d5f4b32015-03-26 14:39:07 -05005#include <string.h>
6
Aaron Durbin02103e32017-12-14 15:22:04 -07007int region_is_subregion(const struct region *p, const struct region *c)
Aaron Durbin5d5f4b32015-03-26 14:39:07 -05008{
9 if (region_offset(c) < region_offset(p))
10 return 0;
11
Julius Werner5c82c442019-08-15 21:25:16 -070012 if (region_end(c) > region_end(p))
Aaron Durbin5d5f4b32015-03-26 14:39:07 -050013 return 0;
14
Julius Werner5c82c442019-08-15 21:25:16 -070015 if (region_end(c) < region_offset(c))
Aaron Durbin5d5f4b32015-03-26 14:39:07 -050016 return 0;
17
18 return 1;
19}
20
21static int normalize_and_ok(const struct region *outer, struct region *inner)
22{
23 inner->offset += region_offset(outer);
Aaron Durbin02103e32017-12-14 15:22:04 -070024 return region_is_subregion(outer, inner);
Aaron Durbin5d5f4b32015-03-26 14:39:07 -050025}
26
27static const struct region_device *rdev_root(const struct region_device *rdev)
28{
29 if (rdev->root == NULL)
30 return rdev;
31 return rdev->root;
32}
33
Aaron Durbin990ab7e2015-12-15 13:29:41 -060034ssize_t rdev_relative_offset(const struct region_device *p,
35 const struct region_device *c)
36{
37 if (rdev_root(p) != rdev_root(c))
38 return -1;
39
Aaron Durbin02103e32017-12-14 15:22:04 -070040 if (!region_is_subregion(&p->region, &c->region))
Aaron Durbin990ab7e2015-12-15 13:29:41 -060041 return -1;
42
43 return region_device_offset(c) - region_device_offset(p);
44}
45
Aaron Durbin5d5f4b32015-03-26 14:39:07 -050046void *rdev_mmap(const struct region_device *rd, size_t offset, size_t size)
47{
48 const struct region_device *rdev;
49 struct region req = {
50 .offset = offset,
51 .size = size,
52 };
53
54 if (!normalize_and_ok(&rd->region, &req))
55 return NULL;
56
57 rdev = rdev_root(rd);
58
Aaron Durbinedd79592016-08-10 11:39:00 -050059 if (rdev->ops->mmap == NULL)
60 return NULL;
61
Aaron Durbin5d5f4b32015-03-26 14:39:07 -050062 return rdev->ops->mmap(rdev, req.offset, req.size);
63}
64
65int rdev_munmap(const struct region_device *rd, void *mapping)
66{
67 const struct region_device *rdev;
68
69 rdev = rdev_root(rd);
70
Aaron Durbinedd79592016-08-10 11:39:00 -050071 if (rdev->ops->munmap == NULL)
72 return -1;
73
Aaron Durbin5d5f4b32015-03-26 14:39:07 -050074 return rdev->ops->munmap(rdev, mapping);
75}
76
77ssize_t rdev_readat(const struct region_device *rd, void *b, size_t offset,
78 size_t size)
79{
80 const struct region_device *rdev;
81 struct region req = {
82 .offset = offset,
83 .size = size,
84 };
85
86 if (!normalize_and_ok(&rd->region, &req))
87 return -1;
88
89 rdev = rdev_root(rd);
90
91 return rdev->ops->readat(rdev, b, req.offset, req.size);
92}
93
Aaron Durbin258a3502016-08-04 14:33:58 -050094ssize_t rdev_writeat(const struct region_device *rd, const void *b,
95 size_t offset, size_t size)
Antonello Dettorie5f48d22016-06-22 21:09:08 +020096{
97 const struct region_device *rdev;
98 struct region req = {
99 .offset = offset,
100 .size = size,
101 };
102
103 if (!normalize_and_ok(&rd->region, &req))
104 return -1;
105
106 rdev = rdev_root(rd);
107
108 if (rdev->ops->writeat == NULL)
109 return -1;
110
111 return rdev->ops->writeat(rdev, b, req.offset, req.size);
112}
113
114ssize_t rdev_eraseat(const struct region_device *rd, size_t offset,
115 size_t size)
116{
117 const struct region_device *rdev;
118 struct region req = {
119 .offset = offset,
120 .size = size,
121 };
122
123 if (!normalize_and_ok(&rd->region, &req))
124 return -1;
125
126 rdev = rdev_root(rd);
127
128 /* If the eraseat ptr is NULL we assume that the erase
129 * function was completed successfully. */
130 if (rdev->ops->eraseat == NULL)
131 return size;
132
133 return rdev->ops->eraseat(rdev, req.offset, req.size);
134}
135
Aaron Durbin5d5f4b32015-03-26 14:39:07 -0500136int rdev_chain(struct region_device *child, const struct region_device *parent,
137 size_t offset, size_t size)
138{
139 struct region req = {
140 .offset = offset,
141 .size = size,
142 };
143
144 if (!normalize_and_ok(&parent->region, &req))
145 return -1;
146
147 /* Keep track of root region device. Note the offsets are relative
148 * to the root device. */
149 child->root = rdev_root(parent);
150 child->ops = NULL;
151 child->region.offset = req.offset;
152 child->region.size = req.size;
153
154 return 0;
155}
Aaron Durbinb419c1a82015-03-27 01:03:45 -0500156
Antonello Dettorie5f48d22016-06-22 21:09:08 +0200157static void mem_region_device_init(struct mem_region_device *mdev,
158 const struct region_device_ops *ops, void *base, size_t size)
Aaron Durbinb419c1a82015-03-27 01:03:45 -0500159{
160 memset(mdev, 0, sizeof(*mdev));
161 mdev->base = base;
Antonello Dettorie5f48d22016-06-22 21:09:08 +0200162 mdev->rdev.ops = ops;
Aaron Durbinb419c1a82015-03-27 01:03:45 -0500163 mdev->rdev.region.size = size;
164}
165
Antonello Dettorie5f48d22016-06-22 21:09:08 +0200166void mem_region_device_ro_init(struct mem_region_device *mdev, void *base,
167 size_t size)
168{
169 return mem_region_device_init(mdev, &mem_rdev_ro_ops, base, size);
170}
171
172void mem_region_device_rw_init(struct mem_region_device *mdev, void *base,
173 size_t size)
174{
175 return mem_region_device_init(mdev, &mem_rdev_rw_ops, base, size);
176}
177
Furquan Shaikh2b576912016-06-19 23:16:45 -0700178void region_device_init(struct region_device *rdev,
179 const struct region_device_ops *ops, size_t offset,
180 size_t size)
181{
182 memset(rdev, 0, sizeof(*rdev));
183 rdev->root = NULL;
184 rdev->ops = ops;
185 rdev->region.offset = offset;
186 rdev->region.size = size;
187}
188
Antonello Dettorie5f48d22016-06-22 21:09:08 +0200189static void xlate_region_device_init(struct xlate_region_device *xdev,
190 const struct region_device_ops *ops,
Furquan Shaikhf5b30ed2020-11-11 23:23:13 -0800191 size_t window_count, const struct xlate_window *window_arr,
Antonello Dettorie5f48d22016-06-22 21:09:08 +0200192 size_t parent_size)
Furquan Shaikh2b576912016-06-19 23:16:45 -0700193{
194 memset(xdev, 0, sizeof(*xdev));
Furquan Shaikhf5b30ed2020-11-11 23:23:13 -0800195 xdev->window_count = window_count;
196 xdev->window_arr = window_arr;
Antonello Dettorie5f48d22016-06-22 21:09:08 +0200197 region_device_init(&xdev->rdev, ops, 0, parent_size);
198}
199
200void xlate_region_device_ro_init(struct xlate_region_device *xdev,
Furquan Shaikhf5b30ed2020-11-11 23:23:13 -0800201 size_t window_count, const struct xlate_window *window_arr,
Antonello Dettorie5f48d22016-06-22 21:09:08 +0200202 size_t parent_size)
203{
Furquan Shaikhf5b30ed2020-11-11 23:23:13 -0800204 xlate_region_device_init(xdev, &xlate_rdev_ro_ops, window_count, window_arr,
205 parent_size);
Antonello Dettorie5f48d22016-06-22 21:09:08 +0200206}
207
208void xlate_region_device_rw_init(struct xlate_region_device *xdev,
Furquan Shaikhf5b30ed2020-11-11 23:23:13 -0800209 size_t window_count, const struct xlate_window *window_arr,
Antonello Dettorie5f48d22016-06-22 21:09:08 +0200210 size_t parent_size)
211{
Furquan Shaikhf5b30ed2020-11-11 23:23:13 -0800212 xlate_region_device_init(xdev, &xlate_rdev_rw_ops, window_count, window_arr,
213 parent_size);
214}
215
216void xlate_window_init(struct xlate_window *window, const struct region_device *access_dev,
217 size_t sub_region_offset, size_t sub_region_size)
218{
219 window->access_dev = access_dev;
220 window->sub_region.offset = sub_region_offset;
221 window->sub_region.size = sub_region_size;
Furquan Shaikh2b576912016-06-19 23:16:45 -0700222}
223
Aaron Durbinb419c1a82015-03-27 01:03:45 -0500224static void *mdev_mmap(const struct region_device *rd, size_t offset,
Aaron Durbinca0a6762015-12-15 17:49:12 -0600225 size_t size __unused)
Aaron Durbinb419c1a82015-03-27 01:03:45 -0500226{
227 const struct mem_region_device *mdev;
228
Aaron Durbinca0a6762015-12-15 17:49:12 -0600229 mdev = container_of(rd, __typeof__(*mdev), rdev);
Aaron Durbinb419c1a82015-03-27 01:03:45 -0500230
231 return &mdev->base[offset];
232}
233
Lee Leahy36d5b412017-03-10 11:02:11 -0800234static int mdev_munmap(const struct region_device *rd __unused,
Aaron Durbinca0a6762015-12-15 17:49:12 -0600235 void *mapping __unused)
Aaron Durbinb419c1a82015-03-27 01:03:45 -0500236{
237 return 0;
238}
239
240static ssize_t mdev_readat(const struct region_device *rd, void *b,
241 size_t offset, size_t size)
242{
243 const struct mem_region_device *mdev;
244
Aaron Durbinca0a6762015-12-15 17:49:12 -0600245 mdev = container_of(rd, __typeof__(*mdev), rdev);
Aaron Durbinb419c1a82015-03-27 01:03:45 -0500246
247 memcpy(b, &mdev->base[offset], size);
248
249 return size;
250}
251
Aaron Durbin258a3502016-08-04 14:33:58 -0500252static ssize_t mdev_writeat(const struct region_device *rd, const void *b,
Antonello Dettorie5f48d22016-06-22 21:09:08 +0200253 size_t offset, size_t size)
254{
255 const struct mem_region_device *mdev;
256
257 mdev = container_of(rd, __typeof__(*mdev), rdev);
258
259 memcpy(&mdev->base[offset], b, size);
260
261 return size;
262}
263
264static ssize_t mdev_eraseat(const struct region_device *rd, size_t offset,
265 size_t size)
266{
267 const struct mem_region_device *mdev;
268
269 mdev = container_of(rd, __typeof__(*mdev), rdev);
270
271 memset(&mdev->base[offset], 0, size);
272
273 return size;
274}
275
276const struct region_device_ops mem_rdev_ro_ops = {
Aaron Durbinb419c1a82015-03-27 01:03:45 -0500277 .mmap = mdev_mmap,
278 .munmap = mdev_munmap,
279 .readat = mdev_readat,
280};
Aaron Durbine62cf522015-03-27 01:58:06 -0500281
Antonello Dettorie5f48d22016-06-22 21:09:08 +0200282const struct region_device_ops mem_rdev_rw_ops = {
283 .mmap = mdev_mmap,
284 .munmap = mdev_munmap,
285 .readat = mdev_readat,
286 .writeat = mdev_writeat,
287 .eraseat = mdev_eraseat,
288};
289
Julius Wernerc8931972021-04-16 16:48:32 -0700290static const struct mem_region_device mem_rdev = MEM_REGION_DEV_RO_INIT(0, ~(size_t)0);
291static const struct mem_region_device mem_rdev_rw = MEM_REGION_DEV_RW_INIT(0, ~(size_t)0);
292
293int rdev_chain_mem(struct region_device *child, const void *base, size_t size)
294{
295 return rdev_chain(child, &mem_rdev.rdev, (uintptr_t)base, size);
296}
297
298int rdev_chain_mem_rw(struct region_device *child, void *base, size_t size)
299{
300 return rdev_chain(child, &mem_rdev_rw.rdev, (uintptr_t)base, size);
301}
302
Aaron Durbine62cf522015-03-27 01:58:06 -0500303void *mmap_helper_rdev_mmap(const struct region_device *rd, size_t offset,
304 size_t size)
305{
306 struct mmap_helper_region_device *mdev;
307 void *mapping;
308
Aaron Durbinca0a6762015-12-15 17:49:12 -0600309 mdev = container_of((void *)rd, __typeof__(*mdev), rdev);
Aaron Durbine62cf522015-03-27 01:58:06 -0500310
Julius Werner9b1f3cc2020-12-30 17:30:12 -0800311 mapping = mem_pool_alloc(mdev->pool, size);
Aaron Durbine62cf522015-03-27 01:58:06 -0500312
313 if (mapping == NULL)
314 return NULL;
315
316 if (rd->ops->readat(rd, mapping, offset, size) != size) {
Julius Werner9b1f3cc2020-12-30 17:30:12 -0800317 mem_pool_free(mdev->pool, mapping);
Aaron Durbine62cf522015-03-27 01:58:06 -0500318 return NULL;
319 }
320
321 return mapping;
322}
323
324int mmap_helper_rdev_munmap(const struct region_device *rd, void *mapping)
325{
326 struct mmap_helper_region_device *mdev;
327
Aaron Durbinca0a6762015-12-15 17:49:12 -0600328 mdev = container_of((void *)rd, __typeof__(*mdev), rdev);
Aaron Durbine62cf522015-03-27 01:58:06 -0500329
Julius Werner9b1f3cc2020-12-30 17:30:12 -0800330 mem_pool_free(mdev->pool, mapping);
Aaron Durbine62cf522015-03-27 01:58:06 -0500331
332 return 0;
333}
Aaron Durbin5907eb82015-10-28 16:09:42 -0500334
Furquan Shaikhf5b30ed2020-11-11 23:23:13 -0800335static const struct xlate_window *xlate_find_window(const struct xlate_region_device *xldev,
336 const struct region *req)
337{
338 size_t i;
339 const struct xlate_window *xlwindow;
340
341 for (i = 0; i < xldev->window_count; i++) {
342 xlwindow = &xldev->window_arr[i];
343 if (region_is_subregion(&xlwindow->sub_region, req))
344 return xlwindow;
345 }
346
347 return NULL;
348}
349
Aaron Durbin5907eb82015-10-28 16:09:42 -0500350static void *xlate_mmap(const struct region_device *rd, size_t offset,
351 size_t size)
352{
353 const struct xlate_region_device *xldev;
354 struct region req = {
355 .offset = offset,
356 .size = size,
357 };
Furquan Shaikhf5b30ed2020-11-11 23:23:13 -0800358 const struct xlate_window *xlwindow;
Aaron Durbin5907eb82015-10-28 16:09:42 -0500359
Aaron Durbinca0a6762015-12-15 17:49:12 -0600360 xldev = container_of(rd, __typeof__(*xldev), rdev);
Aaron Durbin5907eb82015-10-28 16:09:42 -0500361
Furquan Shaikhf5b30ed2020-11-11 23:23:13 -0800362 xlwindow = xlate_find_window(xldev, &req);
363 if (!xlwindow)
Aaron Durbin5907eb82015-10-28 16:09:42 -0500364 return NULL;
365
Furquan Shaikhf5b30ed2020-11-11 23:23:13 -0800366 offset -= region_offset(&xlwindow->sub_region);
Aaron Durbin5907eb82015-10-28 16:09:42 -0500367
Furquan Shaikhf5b30ed2020-11-11 23:23:13 -0800368 return rdev_mmap(xlwindow->access_dev, offset, size);
Aaron Durbin5907eb82015-10-28 16:09:42 -0500369}
370
Furquan Shaikhf5b30ed2020-11-11 23:23:13 -0800371static int xlate_munmap(const struct region_device *rd __unused, void *mapping __unused)
Aaron Durbin5907eb82015-10-28 16:09:42 -0500372{
Furquan Shaikhf5b30ed2020-11-11 23:23:13 -0800373 /*
374 * xlate_region_device does not keep track of the access device that was used to service
375 * a mmap request. So, munmap does not do anything. If munmap functionality is required,
376 * then xlate_region_device will have to be updated to accept some pre-allocated space
377 * from caller to keep track of the mapping requests. Since xlate_region_device is only
378 * used for memory mapped boot media on the backend right now, skipping munmap is fine.
379 */
380 return 0;
Aaron Durbin5907eb82015-10-28 16:09:42 -0500381}
382
383static ssize_t xlate_readat(const struct region_device *rd, void *b,
384 size_t offset, size_t size)
385{
386 struct region req = {
387 .offset = offset,
388 .size = size,
389 };
Furquan Shaikhf5b30ed2020-11-11 23:23:13 -0800390 const struct xlate_window *xlwindow;
Aaron Durbin5907eb82015-10-28 16:09:42 -0500391 const struct xlate_region_device *xldev;
392
Aaron Durbinca0a6762015-12-15 17:49:12 -0600393 xldev = container_of(rd, __typeof__(*xldev), rdev);
Aaron Durbin5907eb82015-10-28 16:09:42 -0500394
Furquan Shaikhf5b30ed2020-11-11 23:23:13 -0800395 xlwindow = xlate_find_window(xldev, &req);
396 if (!xlwindow)
Aaron Durbin5907eb82015-10-28 16:09:42 -0500397 return -1;
398
Furquan Shaikhf5b30ed2020-11-11 23:23:13 -0800399 offset -= region_offset(&xlwindow->sub_region);
Aaron Durbin5907eb82015-10-28 16:09:42 -0500400
Furquan Shaikhf5b30ed2020-11-11 23:23:13 -0800401 return rdev_readat(xlwindow->access_dev, b, offset, size);
Aaron Durbin5907eb82015-10-28 16:09:42 -0500402}
403
Aaron Durbin258a3502016-08-04 14:33:58 -0500404static ssize_t xlate_writeat(const struct region_device *rd, const void *b,
Antonello Dettorie5f48d22016-06-22 21:09:08 +0200405 size_t offset, size_t size)
406{
407 struct region req = {
408 .offset = offset,
409 .size = size,
410 };
Furquan Shaikhf5b30ed2020-11-11 23:23:13 -0800411 const struct xlate_window *xlwindow;
Antonello Dettorie5f48d22016-06-22 21:09:08 +0200412 const struct xlate_region_device *xldev;
413
414 xldev = container_of(rd, __typeof__(*xldev), rdev);
415
Furquan Shaikhf5b30ed2020-11-11 23:23:13 -0800416 xlwindow = xlate_find_window(xldev, &req);
417 if (!xlwindow)
Antonello Dettorie5f48d22016-06-22 21:09:08 +0200418 return -1;
419
Furquan Shaikhf5b30ed2020-11-11 23:23:13 -0800420 offset -= region_offset(&xlwindow->sub_region);
Antonello Dettorie5f48d22016-06-22 21:09:08 +0200421
Furquan Shaikhf5b30ed2020-11-11 23:23:13 -0800422 return rdev_writeat(xlwindow->access_dev, b, offset, size);
Antonello Dettorie5f48d22016-06-22 21:09:08 +0200423}
424
425static ssize_t xlate_eraseat(const struct region_device *rd,
426 size_t offset, size_t size)
427{
428 struct region req = {
429 .offset = offset,
430 .size = size,
431 };
Furquan Shaikhf5b30ed2020-11-11 23:23:13 -0800432 const struct xlate_window *xlwindow;
Antonello Dettorie5f48d22016-06-22 21:09:08 +0200433 const struct xlate_region_device *xldev;
434
435 xldev = container_of(rd, __typeof__(*xldev), rdev);
436
Furquan Shaikhf5b30ed2020-11-11 23:23:13 -0800437 xlwindow = xlate_find_window(xldev, &req);
438 if (!xlwindow)
Antonello Dettorie5f48d22016-06-22 21:09:08 +0200439 return -1;
440
Furquan Shaikhf5b30ed2020-11-11 23:23:13 -0800441 offset -= region_offset(&xlwindow->sub_region);
Antonello Dettorie5f48d22016-06-22 21:09:08 +0200442
Furquan Shaikhf5b30ed2020-11-11 23:23:13 -0800443 return rdev_eraseat(xlwindow->access_dev, offset, size);
Antonello Dettorie5f48d22016-06-22 21:09:08 +0200444}
445
446const struct region_device_ops xlate_rdev_ro_ops = {
Aaron Durbin5907eb82015-10-28 16:09:42 -0500447 .mmap = xlate_mmap,
448 .munmap = xlate_munmap,
449 .readat = xlate_readat,
450};
Antonello Dettorie5f48d22016-06-22 21:09:08 +0200451
452const struct region_device_ops xlate_rdev_rw_ops = {
453 .mmap = xlate_mmap,
454 .munmap = xlate_munmap,
455 .readat = xlate_readat,
456 .writeat = xlate_writeat,
457 .eraseat = xlate_eraseat,
458};
Aaron Durbin256db402016-12-03 17:08:08 -0600459
Aaron Durbin256db402016-12-03 17:08:08 -0600460static void *incoherent_mmap(const struct region_device *rd, size_t offset,
461 size_t size)
462{
463 const struct incoherent_rdev *irdev;
464
465 irdev = container_of(rd, const struct incoherent_rdev, rdev);
466
467 return rdev_mmap(irdev->read, offset, size);
468}
469
470static int incoherent_munmap(const struct region_device *rd, void *mapping)
471{
472 const struct incoherent_rdev *irdev;
473
474 irdev = container_of(rd, const struct incoherent_rdev, rdev);
475
476 return rdev_munmap(irdev->read, mapping);
477}
478
479static ssize_t incoherent_readat(const struct region_device *rd, void *b,
480 size_t offset, size_t size)
481{
482 const struct incoherent_rdev *irdev;
483
484 irdev = container_of(rd, const struct incoherent_rdev, rdev);
485
486 return rdev_readat(irdev->read, b, offset, size);
487}
488
489static ssize_t incoherent_writeat(const struct region_device *rd, const void *b,
490 size_t offset, size_t size)
491{
492 const struct incoherent_rdev *irdev;
493
494 irdev = container_of(rd, const struct incoherent_rdev, rdev);
495
496 return rdev_writeat(irdev->write, b, offset, size);
497}
498
499static ssize_t incoherent_eraseat(const struct region_device *rd, size_t offset,
500 size_t size)
501{
502 const struct incoherent_rdev *irdev;
503
504 irdev = container_of(rd, const struct incoherent_rdev, rdev);
505
506 return rdev_eraseat(irdev->write, offset, size);
507}
508
509static const struct region_device_ops incoherent_rdev_ops = {
510 .mmap = incoherent_mmap,
511 .munmap = incoherent_munmap,
512 .readat = incoherent_readat,
513 .writeat = incoherent_writeat,
514 .eraseat = incoherent_eraseat,
515};
516
517const struct region_device *incoherent_rdev_init(struct incoherent_rdev *irdev,
518 const struct region *r,
519 const struct region_device *read,
520 const struct region_device *write)
521{
522 const size_t size = region_sz(r);
523
524 if (size != region_device_sz(read) || size != region_device_sz(write))
525 return NULL;
526
527 /* The region is represented as offset 0 to size. That way, the generic
528 * rdev operations can be called on the read or write implementation
529 * without any unnecessary translation because the offsets all start
530 * at 0. */
531 region_device_init(&irdev->rdev, &incoherent_rdev_ops, 0, size);
532 irdev->read = read;
533 irdev->write = write;
534
535 return &irdev->rdev;
536}