blob: 4153f0a47d80f1f7b94188b4a527f92e3760e12a [file] [log] [blame]
Angel Ponsebda03e2020-04-02 23:48:05 +02001/* SPDX-License-Identifier: GPL-2.0-only */
Aaron Durbin5d5f4b32015-03-26 14:39:07 -05002
Aaron Durbinca0a6762015-12-15 17:49:12 -06003#include <commonlib/helpers.h>
Aaron Durbindc9f5cd2015-09-08 13:34:43 -05004#include <commonlib/region.h>
Elyes Haouas6a9ae292022-10-07 10:03:17 +02005#include <stdint.h>
Aaron Durbin5d5f4b32015-03-26 14:39:07 -05006#include <string.h>
7
Aaron Durbin02103e32017-12-14 15:22:04 -07008int region_is_subregion(const struct region *p, const struct region *c)
Aaron Durbin5d5f4b32015-03-26 14:39:07 -05009{
10 if (region_offset(c) < region_offset(p))
11 return 0;
12
Julius Werner5c82c442019-08-15 21:25:16 -070013 if (region_end(c) > region_end(p))
Aaron Durbin5d5f4b32015-03-26 14:39:07 -050014 return 0;
15
Julius Werner5c82c442019-08-15 21:25:16 -070016 if (region_end(c) < region_offset(c))
Aaron Durbin5d5f4b32015-03-26 14:39:07 -050017 return 0;
18
19 return 1;
20}
21
22static int normalize_and_ok(const struct region *outer, struct region *inner)
23{
24 inner->offset += region_offset(outer);
Aaron Durbin02103e32017-12-14 15:22:04 -070025 return region_is_subregion(outer, inner);
Aaron Durbin5d5f4b32015-03-26 14:39:07 -050026}
27
28static const struct region_device *rdev_root(const struct region_device *rdev)
29{
30 if (rdev->root == NULL)
31 return rdev;
32 return rdev->root;
33}
34
Aaron Durbin990ab7e2015-12-15 13:29:41 -060035ssize_t rdev_relative_offset(const struct region_device *p,
36 const struct region_device *c)
37{
38 if (rdev_root(p) != rdev_root(c))
39 return -1;
40
Aaron Durbin02103e32017-12-14 15:22:04 -070041 if (!region_is_subregion(&p->region, &c->region))
Aaron Durbin990ab7e2015-12-15 13:29:41 -060042 return -1;
43
44 return region_device_offset(c) - region_device_offset(p);
45}
46
Aaron Durbin5d5f4b32015-03-26 14:39:07 -050047void *rdev_mmap(const struct region_device *rd, size_t offset, size_t size)
48{
49 const struct region_device *rdev;
50 struct region req = {
51 .offset = offset,
52 .size = size,
53 };
54
55 if (!normalize_and_ok(&rd->region, &req))
56 return NULL;
57
58 rdev = rdev_root(rd);
59
Aaron Durbinedd79592016-08-10 11:39:00 -050060 if (rdev->ops->mmap == NULL)
61 return NULL;
62
Aaron Durbin5d5f4b32015-03-26 14:39:07 -050063 return rdev->ops->mmap(rdev, req.offset, req.size);
64}
65
66int rdev_munmap(const struct region_device *rd, void *mapping)
67{
68 const struct region_device *rdev;
69
70 rdev = rdev_root(rd);
71
Aaron Durbinedd79592016-08-10 11:39:00 -050072 if (rdev->ops->munmap == NULL)
73 return -1;
74
Aaron Durbin5d5f4b32015-03-26 14:39:07 -050075 return rdev->ops->munmap(rdev, mapping);
76}
77
78ssize_t rdev_readat(const struct region_device *rd, void *b, size_t offset,
79 size_t size)
80{
81 const struct region_device *rdev;
82 struct region req = {
83 .offset = offset,
84 .size = size,
85 };
86
87 if (!normalize_and_ok(&rd->region, &req))
88 return -1;
89
90 rdev = rdev_root(rd);
91
92 return rdev->ops->readat(rdev, b, req.offset, req.size);
93}
94
Aaron Durbin258a3502016-08-04 14:33:58 -050095ssize_t rdev_writeat(const struct region_device *rd, const void *b,
96 size_t offset, size_t size)
Antonello Dettorie5f48d22016-06-22 21:09:08 +020097{
98 const struct region_device *rdev;
99 struct region req = {
100 .offset = offset,
101 .size = size,
102 };
103
104 if (!normalize_and_ok(&rd->region, &req))
105 return -1;
106
107 rdev = rdev_root(rd);
108
109 if (rdev->ops->writeat == NULL)
110 return -1;
111
112 return rdev->ops->writeat(rdev, b, req.offset, req.size);
113}
114
115ssize_t rdev_eraseat(const struct region_device *rd, size_t offset,
116 size_t size)
117{
118 const struct region_device *rdev;
119 struct region req = {
120 .offset = offset,
121 .size = size,
122 };
123
124 if (!normalize_and_ok(&rd->region, &req))
125 return -1;
126
127 rdev = rdev_root(rd);
128
129 /* If the eraseat ptr is NULL we assume that the erase
130 * function was completed successfully. */
131 if (rdev->ops->eraseat == NULL)
132 return size;
133
134 return rdev->ops->eraseat(rdev, req.offset, req.size);
135}
136
Aaron Durbin5d5f4b32015-03-26 14:39:07 -0500137int rdev_chain(struct region_device *child, const struct region_device *parent,
138 size_t offset, size_t size)
139{
140 struct region req = {
141 .offset = offset,
142 .size = size,
143 };
144
145 if (!normalize_and_ok(&parent->region, &req))
146 return -1;
147
148 /* Keep track of root region device. Note the offsets are relative
149 * to the root device. */
150 child->root = rdev_root(parent);
151 child->ops = NULL;
152 child->region.offset = req.offset;
153 child->region.size = req.size;
154
155 return 0;
156}
Aaron Durbinb419c1a82015-03-27 01:03:45 -0500157
Antonello Dettorie5f48d22016-06-22 21:09:08 +0200158static void mem_region_device_init(struct mem_region_device *mdev,
159 const struct region_device_ops *ops, void *base, size_t size)
Aaron Durbinb419c1a82015-03-27 01:03:45 -0500160{
161 memset(mdev, 0, sizeof(*mdev));
162 mdev->base = base;
Antonello Dettorie5f48d22016-06-22 21:09:08 +0200163 mdev->rdev.ops = ops;
Aaron Durbinb419c1a82015-03-27 01:03:45 -0500164 mdev->rdev.region.size = size;
165}
166
Antonello Dettorie5f48d22016-06-22 21:09:08 +0200167void mem_region_device_ro_init(struct mem_region_device *mdev, void *base,
168 size_t size)
169{
170 return mem_region_device_init(mdev, &mem_rdev_ro_ops, base, size);
171}
172
173void mem_region_device_rw_init(struct mem_region_device *mdev, void *base,
174 size_t size)
175{
176 return mem_region_device_init(mdev, &mem_rdev_rw_ops, base, size);
177}
178
Furquan Shaikh2b576912016-06-19 23:16:45 -0700179void region_device_init(struct region_device *rdev,
180 const struct region_device_ops *ops, size_t offset,
181 size_t size)
182{
183 memset(rdev, 0, sizeof(*rdev));
184 rdev->root = NULL;
185 rdev->ops = ops;
186 rdev->region.offset = offset;
187 rdev->region.size = size;
188}
189
Antonello Dettorie5f48d22016-06-22 21:09:08 +0200190static void xlate_region_device_init(struct xlate_region_device *xdev,
191 const struct region_device_ops *ops,
Furquan Shaikhf5b30ed2020-11-11 23:23:13 -0800192 size_t window_count, const struct xlate_window *window_arr,
Antonello Dettorie5f48d22016-06-22 21:09:08 +0200193 size_t parent_size)
Furquan Shaikh2b576912016-06-19 23:16:45 -0700194{
195 memset(xdev, 0, sizeof(*xdev));
Furquan Shaikhf5b30ed2020-11-11 23:23:13 -0800196 xdev->window_count = window_count;
197 xdev->window_arr = window_arr;
Antonello Dettorie5f48d22016-06-22 21:09:08 +0200198 region_device_init(&xdev->rdev, ops, 0, parent_size);
199}
200
201void xlate_region_device_ro_init(struct xlate_region_device *xdev,
Furquan Shaikhf5b30ed2020-11-11 23:23:13 -0800202 size_t window_count, const struct xlate_window *window_arr,
Antonello Dettorie5f48d22016-06-22 21:09:08 +0200203 size_t parent_size)
204{
Furquan Shaikhf5b30ed2020-11-11 23:23:13 -0800205 xlate_region_device_init(xdev, &xlate_rdev_ro_ops, window_count, window_arr,
206 parent_size);
Antonello Dettorie5f48d22016-06-22 21:09:08 +0200207}
208
209void xlate_region_device_rw_init(struct xlate_region_device *xdev,
Furquan Shaikhf5b30ed2020-11-11 23:23:13 -0800210 size_t window_count, const struct xlate_window *window_arr,
Antonello Dettorie5f48d22016-06-22 21:09:08 +0200211 size_t parent_size)
212{
Furquan Shaikhf5b30ed2020-11-11 23:23:13 -0800213 xlate_region_device_init(xdev, &xlate_rdev_rw_ops, window_count, window_arr,
214 parent_size);
215}
216
217void xlate_window_init(struct xlate_window *window, const struct region_device *access_dev,
218 size_t sub_region_offset, size_t sub_region_size)
219{
220 window->access_dev = access_dev;
221 window->sub_region.offset = sub_region_offset;
222 window->sub_region.size = sub_region_size;
Furquan Shaikh2b576912016-06-19 23:16:45 -0700223}
224
Aaron Durbinb419c1a82015-03-27 01:03:45 -0500225static void *mdev_mmap(const struct region_device *rd, size_t offset,
Bill XIEac1362502022-07-08 16:53:21 +0800226 size_t size __always_unused)
Aaron Durbinb419c1a82015-03-27 01:03:45 -0500227{
228 const struct mem_region_device *mdev;
229
Aaron Durbinca0a6762015-12-15 17:49:12 -0600230 mdev = container_of(rd, __typeof__(*mdev), rdev);
Aaron Durbinb419c1a82015-03-27 01:03:45 -0500231
232 return &mdev->base[offset];
233}
234
Bill XIEac1362502022-07-08 16:53:21 +0800235static int mdev_munmap(const struct region_device *rd __always_unused,
236 void *mapping __always_unused)
Aaron Durbinb419c1a82015-03-27 01:03:45 -0500237{
238 return 0;
239}
240
241static ssize_t mdev_readat(const struct region_device *rd, void *b,
242 size_t offset, size_t size)
243{
244 const struct mem_region_device *mdev;
245
Aaron Durbinca0a6762015-12-15 17:49:12 -0600246 mdev = container_of(rd, __typeof__(*mdev), rdev);
Aaron Durbinb419c1a82015-03-27 01:03:45 -0500247
248 memcpy(b, &mdev->base[offset], size);
249
250 return size;
251}
252
Aaron Durbin258a3502016-08-04 14:33:58 -0500253static ssize_t mdev_writeat(const struct region_device *rd, const void *b,
Antonello Dettorie5f48d22016-06-22 21:09:08 +0200254 size_t offset, size_t size)
255{
256 const struct mem_region_device *mdev;
257
258 mdev = container_of(rd, __typeof__(*mdev), rdev);
259
260 memcpy(&mdev->base[offset], b, size);
261
262 return size;
263}
264
265static ssize_t mdev_eraseat(const struct region_device *rd, size_t offset,
266 size_t size)
267{
268 const struct mem_region_device *mdev;
269
270 mdev = container_of(rd, __typeof__(*mdev), rdev);
271
272 memset(&mdev->base[offset], 0, size);
273
274 return size;
275}
276
277const struct region_device_ops mem_rdev_ro_ops = {
Aaron Durbinb419c1a82015-03-27 01:03:45 -0500278 .mmap = mdev_mmap,
279 .munmap = mdev_munmap,
280 .readat = mdev_readat,
281};
Aaron Durbine62cf522015-03-27 01:58:06 -0500282
Antonello Dettorie5f48d22016-06-22 21:09:08 +0200283const struct region_device_ops mem_rdev_rw_ops = {
284 .mmap = mdev_mmap,
285 .munmap = mdev_munmap,
286 .readat = mdev_readat,
287 .writeat = mdev_writeat,
288 .eraseat = mdev_eraseat,
289};
290
Julius Wernerc8931972021-04-16 16:48:32 -0700291static const struct mem_region_device mem_rdev = MEM_REGION_DEV_RO_INIT(0, ~(size_t)0);
292static const struct mem_region_device mem_rdev_rw = MEM_REGION_DEV_RW_INIT(0, ~(size_t)0);
293
294int rdev_chain_mem(struct region_device *child, const void *base, size_t size)
295{
296 return rdev_chain(child, &mem_rdev.rdev, (uintptr_t)base, size);
297}
298
299int rdev_chain_mem_rw(struct region_device *child, void *base, size_t size)
300{
301 return rdev_chain(child, &mem_rdev_rw.rdev, (uintptr_t)base, size);
302}
303
Aaron Durbine62cf522015-03-27 01:58:06 -0500304void *mmap_helper_rdev_mmap(const struct region_device *rd, size_t offset,
305 size_t size)
306{
307 struct mmap_helper_region_device *mdev;
308 void *mapping;
309
Aaron Durbinca0a6762015-12-15 17:49:12 -0600310 mdev = container_of((void *)rd, __typeof__(*mdev), rdev);
Aaron Durbine62cf522015-03-27 01:58:06 -0500311
Julius Werner9b1f3cc2020-12-30 17:30:12 -0800312 mapping = mem_pool_alloc(mdev->pool, size);
Aaron Durbine62cf522015-03-27 01:58:06 -0500313
314 if (mapping == NULL)
315 return NULL;
316
317 if (rd->ops->readat(rd, mapping, offset, size) != size) {
Julius Werner9b1f3cc2020-12-30 17:30:12 -0800318 mem_pool_free(mdev->pool, mapping);
Aaron Durbine62cf522015-03-27 01:58:06 -0500319 return NULL;
320 }
321
322 return mapping;
323}
324
325int mmap_helper_rdev_munmap(const struct region_device *rd, void *mapping)
326{
327 struct mmap_helper_region_device *mdev;
328
Aaron Durbinca0a6762015-12-15 17:49:12 -0600329 mdev = container_of((void *)rd, __typeof__(*mdev), rdev);
Aaron Durbine62cf522015-03-27 01:58:06 -0500330
Julius Werner9b1f3cc2020-12-30 17:30:12 -0800331 mem_pool_free(mdev->pool, mapping);
Aaron Durbine62cf522015-03-27 01:58:06 -0500332
333 return 0;
334}
Aaron Durbin5907eb82015-10-28 16:09:42 -0500335
Furquan Shaikhf5b30ed2020-11-11 23:23:13 -0800336static const struct xlate_window *xlate_find_window(const struct xlate_region_device *xldev,
337 const struct region *req)
338{
339 size_t i;
340 const struct xlate_window *xlwindow;
341
342 for (i = 0; i < xldev->window_count; i++) {
343 xlwindow = &xldev->window_arr[i];
344 if (region_is_subregion(&xlwindow->sub_region, req))
345 return xlwindow;
346 }
347
348 return NULL;
349}
350
Aaron Durbin5907eb82015-10-28 16:09:42 -0500351static void *xlate_mmap(const struct region_device *rd, size_t offset,
352 size_t size)
353{
354 const struct xlate_region_device *xldev;
355 struct region req = {
356 .offset = offset,
357 .size = size,
358 };
Furquan Shaikhf5b30ed2020-11-11 23:23:13 -0800359 const struct xlate_window *xlwindow;
Aaron Durbin5907eb82015-10-28 16:09:42 -0500360
Aaron Durbinca0a6762015-12-15 17:49:12 -0600361 xldev = container_of(rd, __typeof__(*xldev), rdev);
Aaron Durbin5907eb82015-10-28 16:09:42 -0500362
Furquan Shaikhf5b30ed2020-11-11 23:23:13 -0800363 xlwindow = xlate_find_window(xldev, &req);
364 if (!xlwindow)
Aaron Durbin5907eb82015-10-28 16:09:42 -0500365 return NULL;
366
Furquan Shaikhf5b30ed2020-11-11 23:23:13 -0800367 offset -= region_offset(&xlwindow->sub_region);
Aaron Durbin5907eb82015-10-28 16:09:42 -0500368
Furquan Shaikhf5b30ed2020-11-11 23:23:13 -0800369 return rdev_mmap(xlwindow->access_dev, offset, size);
Aaron Durbin5907eb82015-10-28 16:09:42 -0500370}
371
Bill XIEac1362502022-07-08 16:53:21 +0800372static int xlate_munmap(const struct region_device *rd __always_unused,
373 void *mapping __always_unused)
Aaron Durbin5907eb82015-10-28 16:09:42 -0500374{
Furquan Shaikhf5b30ed2020-11-11 23:23:13 -0800375 /*
376 * xlate_region_device does not keep track of the access device that was used to service
377 * a mmap request. So, munmap does not do anything. If munmap functionality is required,
378 * then xlate_region_device will have to be updated to accept some pre-allocated space
379 * from caller to keep track of the mapping requests. Since xlate_region_device is only
380 * used for memory mapped boot media on the backend right now, skipping munmap is fine.
381 */
382 return 0;
Aaron Durbin5907eb82015-10-28 16:09:42 -0500383}
384
385static ssize_t xlate_readat(const struct region_device *rd, void *b,
386 size_t offset, size_t size)
387{
388 struct region req = {
389 .offset = offset,
390 .size = size,
391 };
Furquan Shaikhf5b30ed2020-11-11 23:23:13 -0800392 const struct xlate_window *xlwindow;
Aaron Durbin5907eb82015-10-28 16:09:42 -0500393 const struct xlate_region_device *xldev;
394
Aaron Durbinca0a6762015-12-15 17:49:12 -0600395 xldev = container_of(rd, __typeof__(*xldev), rdev);
Aaron Durbin5907eb82015-10-28 16:09:42 -0500396
Furquan Shaikhf5b30ed2020-11-11 23:23:13 -0800397 xlwindow = xlate_find_window(xldev, &req);
398 if (!xlwindow)
Aaron Durbin5907eb82015-10-28 16:09:42 -0500399 return -1;
400
Furquan Shaikhf5b30ed2020-11-11 23:23:13 -0800401 offset -= region_offset(&xlwindow->sub_region);
Aaron Durbin5907eb82015-10-28 16:09:42 -0500402
Furquan Shaikhf5b30ed2020-11-11 23:23:13 -0800403 return rdev_readat(xlwindow->access_dev, b, offset, size);
Aaron Durbin5907eb82015-10-28 16:09:42 -0500404}
405
Aaron Durbin258a3502016-08-04 14:33:58 -0500406static ssize_t xlate_writeat(const struct region_device *rd, const void *b,
Antonello Dettorie5f48d22016-06-22 21:09:08 +0200407 size_t offset, size_t size)
408{
409 struct region req = {
410 .offset = offset,
411 .size = size,
412 };
Furquan Shaikhf5b30ed2020-11-11 23:23:13 -0800413 const struct xlate_window *xlwindow;
Antonello Dettorie5f48d22016-06-22 21:09:08 +0200414 const struct xlate_region_device *xldev;
415
416 xldev = container_of(rd, __typeof__(*xldev), rdev);
417
Furquan Shaikhf5b30ed2020-11-11 23:23:13 -0800418 xlwindow = xlate_find_window(xldev, &req);
419 if (!xlwindow)
Antonello Dettorie5f48d22016-06-22 21:09:08 +0200420 return -1;
421
Furquan Shaikhf5b30ed2020-11-11 23:23:13 -0800422 offset -= region_offset(&xlwindow->sub_region);
Antonello Dettorie5f48d22016-06-22 21:09:08 +0200423
Furquan Shaikhf5b30ed2020-11-11 23:23:13 -0800424 return rdev_writeat(xlwindow->access_dev, b, offset, size);
Antonello Dettorie5f48d22016-06-22 21:09:08 +0200425}
426
427static ssize_t xlate_eraseat(const struct region_device *rd,
428 size_t offset, size_t size)
429{
430 struct region req = {
431 .offset = offset,
432 .size = size,
433 };
Furquan Shaikhf5b30ed2020-11-11 23:23:13 -0800434 const struct xlate_window *xlwindow;
Antonello Dettorie5f48d22016-06-22 21:09:08 +0200435 const struct xlate_region_device *xldev;
436
437 xldev = container_of(rd, __typeof__(*xldev), rdev);
438
Furquan Shaikhf5b30ed2020-11-11 23:23:13 -0800439 xlwindow = xlate_find_window(xldev, &req);
440 if (!xlwindow)
Antonello Dettorie5f48d22016-06-22 21:09:08 +0200441 return -1;
442
Furquan Shaikhf5b30ed2020-11-11 23:23:13 -0800443 offset -= region_offset(&xlwindow->sub_region);
Antonello Dettorie5f48d22016-06-22 21:09:08 +0200444
Furquan Shaikhf5b30ed2020-11-11 23:23:13 -0800445 return rdev_eraseat(xlwindow->access_dev, offset, size);
Antonello Dettorie5f48d22016-06-22 21:09:08 +0200446}
447
448const struct region_device_ops xlate_rdev_ro_ops = {
Aaron Durbin5907eb82015-10-28 16:09:42 -0500449 .mmap = xlate_mmap,
450 .munmap = xlate_munmap,
451 .readat = xlate_readat,
452};
Antonello Dettorie5f48d22016-06-22 21:09:08 +0200453
454const struct region_device_ops xlate_rdev_rw_ops = {
455 .mmap = xlate_mmap,
456 .munmap = xlate_munmap,
457 .readat = xlate_readat,
458 .writeat = xlate_writeat,
459 .eraseat = xlate_eraseat,
460};
Aaron Durbin256db402016-12-03 17:08:08 -0600461
Aaron Durbin256db402016-12-03 17:08:08 -0600462static void *incoherent_mmap(const struct region_device *rd, size_t offset,
463 size_t size)
464{
465 const struct incoherent_rdev *irdev;
466
467 irdev = container_of(rd, const struct incoherent_rdev, rdev);
468
469 return rdev_mmap(irdev->read, offset, size);
470}
471
472static int incoherent_munmap(const struct region_device *rd, void *mapping)
473{
474 const struct incoherent_rdev *irdev;
475
476 irdev = container_of(rd, const struct incoherent_rdev, rdev);
477
478 return rdev_munmap(irdev->read, mapping);
479}
480
481static ssize_t incoherent_readat(const struct region_device *rd, void *b,
482 size_t offset, size_t size)
483{
484 const struct incoherent_rdev *irdev;
485
486 irdev = container_of(rd, const struct incoherent_rdev, rdev);
487
488 return rdev_readat(irdev->read, b, offset, size);
489}
490
491static ssize_t incoherent_writeat(const struct region_device *rd, const void *b,
492 size_t offset, size_t size)
493{
494 const struct incoherent_rdev *irdev;
495
496 irdev = container_of(rd, const struct incoherent_rdev, rdev);
497
498 return rdev_writeat(irdev->write, b, offset, size);
499}
500
501static ssize_t incoherent_eraseat(const struct region_device *rd, size_t offset,
502 size_t size)
503{
504 const struct incoherent_rdev *irdev;
505
506 irdev = container_of(rd, const struct incoherent_rdev, rdev);
507
508 return rdev_eraseat(irdev->write, offset, size);
509}
510
511static const struct region_device_ops incoherent_rdev_ops = {
512 .mmap = incoherent_mmap,
513 .munmap = incoherent_munmap,
514 .readat = incoherent_readat,
515 .writeat = incoherent_writeat,
516 .eraseat = incoherent_eraseat,
517};
518
519const struct region_device *incoherent_rdev_init(struct incoherent_rdev *irdev,
520 const struct region *r,
521 const struct region_device *read,
522 const struct region_device *write)
523{
524 const size_t size = region_sz(r);
525
526 if (size != region_device_sz(read) || size != region_device_sz(write))
527 return NULL;
528
529 /* The region is represented as offset 0 to size. That way, the generic
530 * rdev operations can be called on the read or write implementation
531 * without any unnecessary translation because the offsets all start
532 * at 0. */
533 region_device_init(&irdev->rdev, &incoherent_rdev_ops, 0, size);
534 irdev->read = read;
535 irdev->write = write;
536
537 return &irdev->rdev;
538}