blob: 00bfb1e28b7075a5352717ce3e33c0126df60bc7 [file] [log] [blame]
Angel Ponsebda03e2020-04-02 23:48:05 +02001/* SPDX-License-Identifier: GPL-2.0-only */
Aaron Durbin5d5f4b32015-03-26 14:39:07 -05002
Aaron Durbinca0a6762015-12-15 17:49:12 -06003#include <commonlib/helpers.h>
Aaron Durbindc9f5cd2015-09-08 13:34:43 -05004#include <commonlib/region.h>
Aaron Durbin5d5f4b32015-03-26 14:39:07 -05005#include <string.h>
6
Aaron Durbin02103e32017-12-14 15:22:04 -07007int region_is_subregion(const struct region *p, const struct region *c)
Aaron Durbin5d5f4b32015-03-26 14:39:07 -05008{
9 if (region_offset(c) < region_offset(p))
10 return 0;
11
Julius Werner5c82c442019-08-15 21:25:16 -070012 if (region_end(c) > region_end(p))
Aaron Durbin5d5f4b32015-03-26 14:39:07 -050013 return 0;
14
Julius Werner5c82c442019-08-15 21:25:16 -070015 if (region_end(c) < region_offset(c))
Aaron Durbin5d5f4b32015-03-26 14:39:07 -050016 return 0;
17
18 return 1;
19}
20
21static int normalize_and_ok(const struct region *outer, struct region *inner)
22{
23 inner->offset += region_offset(outer);
Aaron Durbin02103e32017-12-14 15:22:04 -070024 return region_is_subregion(outer, inner);
Aaron Durbin5d5f4b32015-03-26 14:39:07 -050025}
26
27static const struct region_device *rdev_root(const struct region_device *rdev)
28{
29 if (rdev->root == NULL)
30 return rdev;
31 return rdev->root;
32}
33
Aaron Durbin990ab7e2015-12-15 13:29:41 -060034ssize_t rdev_relative_offset(const struct region_device *p,
35 const struct region_device *c)
36{
37 if (rdev_root(p) != rdev_root(c))
38 return -1;
39
Aaron Durbin02103e32017-12-14 15:22:04 -070040 if (!region_is_subregion(&p->region, &c->region))
Aaron Durbin990ab7e2015-12-15 13:29:41 -060041 return -1;
42
43 return region_device_offset(c) - region_device_offset(p);
44}
45
Aaron Durbin5d5f4b32015-03-26 14:39:07 -050046void *rdev_mmap(const struct region_device *rd, size_t offset, size_t size)
47{
48 const struct region_device *rdev;
49 struct region req = {
50 .offset = offset,
51 .size = size,
52 };
53
54 if (!normalize_and_ok(&rd->region, &req))
55 return NULL;
56
57 rdev = rdev_root(rd);
58
Aaron Durbinedd79592016-08-10 11:39:00 -050059 if (rdev->ops->mmap == NULL)
60 return NULL;
61
Aaron Durbin5d5f4b32015-03-26 14:39:07 -050062 return rdev->ops->mmap(rdev, req.offset, req.size);
63}
64
65int rdev_munmap(const struct region_device *rd, void *mapping)
66{
67 const struct region_device *rdev;
68
69 rdev = rdev_root(rd);
70
Aaron Durbinedd79592016-08-10 11:39:00 -050071 if (rdev->ops->munmap == NULL)
72 return -1;
73
Aaron Durbin5d5f4b32015-03-26 14:39:07 -050074 return rdev->ops->munmap(rdev, mapping);
75}
76
77ssize_t rdev_readat(const struct region_device *rd, void *b, size_t offset,
78 size_t size)
79{
80 const struct region_device *rdev;
81 struct region req = {
82 .offset = offset,
83 .size = size,
84 };
85
86 if (!normalize_and_ok(&rd->region, &req))
87 return -1;
88
89 rdev = rdev_root(rd);
90
91 return rdev->ops->readat(rdev, b, req.offset, req.size);
92}
93
Aaron Durbin258a3502016-08-04 14:33:58 -050094ssize_t rdev_writeat(const struct region_device *rd, const void *b,
95 size_t offset, size_t size)
Antonello Dettorie5f48d22016-06-22 21:09:08 +020096{
97 const struct region_device *rdev;
98 struct region req = {
99 .offset = offset,
100 .size = size,
101 };
102
103 if (!normalize_and_ok(&rd->region, &req))
104 return -1;
105
106 rdev = rdev_root(rd);
107
108 if (rdev->ops->writeat == NULL)
109 return -1;
110
111 return rdev->ops->writeat(rdev, b, req.offset, req.size);
112}
113
114ssize_t rdev_eraseat(const struct region_device *rd, size_t offset,
115 size_t size)
116{
117 const struct region_device *rdev;
118 struct region req = {
119 .offset = offset,
120 .size = size,
121 };
122
123 if (!normalize_and_ok(&rd->region, &req))
124 return -1;
125
126 rdev = rdev_root(rd);
127
128 /* If the eraseat ptr is NULL we assume that the erase
129 * function was completed successfully. */
130 if (rdev->ops->eraseat == NULL)
131 return size;
132
133 return rdev->ops->eraseat(rdev, req.offset, req.size);
134}
135
Aaron Durbin5d5f4b32015-03-26 14:39:07 -0500136int rdev_chain(struct region_device *child, const struct region_device *parent,
137 size_t offset, size_t size)
138{
139 struct region req = {
140 .offset = offset,
141 .size = size,
142 };
143
144 if (!normalize_and_ok(&parent->region, &req))
145 return -1;
146
147 /* Keep track of root region device. Note the offsets are relative
148 * to the root device. */
149 child->root = rdev_root(parent);
150 child->ops = NULL;
151 child->region.offset = req.offset;
152 child->region.size = req.size;
153
154 return 0;
155}
Aaron Durbinb419c1a82015-03-27 01:03:45 -0500156
Antonello Dettorie5f48d22016-06-22 21:09:08 +0200157static void mem_region_device_init(struct mem_region_device *mdev,
158 const struct region_device_ops *ops, void *base, size_t size)
Aaron Durbinb419c1a82015-03-27 01:03:45 -0500159{
160 memset(mdev, 0, sizeof(*mdev));
161 mdev->base = base;
Antonello Dettorie5f48d22016-06-22 21:09:08 +0200162 mdev->rdev.ops = ops;
Aaron Durbinb419c1a82015-03-27 01:03:45 -0500163 mdev->rdev.region.size = size;
164}
165
Antonello Dettorie5f48d22016-06-22 21:09:08 +0200166void mem_region_device_ro_init(struct mem_region_device *mdev, void *base,
167 size_t size)
168{
169 return mem_region_device_init(mdev, &mem_rdev_ro_ops, base, size);
170}
171
172void mem_region_device_rw_init(struct mem_region_device *mdev, void *base,
173 size_t size)
174{
175 return mem_region_device_init(mdev, &mem_rdev_rw_ops, base, size);
176}
177
Furquan Shaikh2b576912016-06-19 23:16:45 -0700178void region_device_init(struct region_device *rdev,
179 const struct region_device_ops *ops, size_t offset,
180 size_t size)
181{
182 memset(rdev, 0, sizeof(*rdev));
183 rdev->root = NULL;
184 rdev->ops = ops;
185 rdev->region.offset = offset;
186 rdev->region.size = size;
187}
188
Antonello Dettorie5f48d22016-06-22 21:09:08 +0200189static void xlate_region_device_init(struct xlate_region_device *xdev,
190 const struct region_device_ops *ops,
191 const struct region_device *access_dev,
192 size_t sub_offset, size_t sub_size,
193 size_t parent_size)
Furquan Shaikh2b576912016-06-19 23:16:45 -0700194{
195 memset(xdev, 0, sizeof(*xdev));
196 xdev->access_dev = access_dev;
197 xdev->sub_region.offset = sub_offset;
198 xdev->sub_region.size = sub_size;
Antonello Dettorie5f48d22016-06-22 21:09:08 +0200199 region_device_init(&xdev->rdev, ops, 0, parent_size);
200}
201
202void xlate_region_device_ro_init(struct xlate_region_device *xdev,
203 const struct region_device *access_dev,
204 size_t sub_offset, size_t sub_size,
205 size_t parent_size)
206{
207 xlate_region_device_init(xdev, &xlate_rdev_ro_ops, access_dev,
208 sub_offset, sub_size, parent_size);
209}
210
211void xlate_region_device_rw_init(struct xlate_region_device *xdev,
212 const struct region_device *access_dev,
213 size_t sub_offset, size_t sub_size,
214 size_t parent_size)
215{
216 xlate_region_device_init(xdev, &xlate_rdev_rw_ops, access_dev,
217 sub_offset, sub_size, parent_size);
Furquan Shaikh2b576912016-06-19 23:16:45 -0700218}
219
Aaron Durbinb419c1a82015-03-27 01:03:45 -0500220static void *mdev_mmap(const struct region_device *rd, size_t offset,
Aaron Durbinca0a6762015-12-15 17:49:12 -0600221 size_t size __unused)
Aaron Durbinb419c1a82015-03-27 01:03:45 -0500222{
223 const struct mem_region_device *mdev;
224
Aaron Durbinca0a6762015-12-15 17:49:12 -0600225 mdev = container_of(rd, __typeof__(*mdev), rdev);
Aaron Durbinb419c1a82015-03-27 01:03:45 -0500226
227 return &mdev->base[offset];
228}
229
Lee Leahy36d5b412017-03-10 11:02:11 -0800230static int mdev_munmap(const struct region_device *rd __unused,
Aaron Durbinca0a6762015-12-15 17:49:12 -0600231 void *mapping __unused)
Aaron Durbinb419c1a82015-03-27 01:03:45 -0500232{
233 return 0;
234}
235
236static ssize_t mdev_readat(const struct region_device *rd, void *b,
237 size_t offset, size_t size)
238{
239 const struct mem_region_device *mdev;
240
Aaron Durbinca0a6762015-12-15 17:49:12 -0600241 mdev = container_of(rd, __typeof__(*mdev), rdev);
Aaron Durbinb419c1a82015-03-27 01:03:45 -0500242
243 memcpy(b, &mdev->base[offset], size);
244
245 return size;
246}
247
Aaron Durbin258a3502016-08-04 14:33:58 -0500248static ssize_t mdev_writeat(const struct region_device *rd, const void *b,
Antonello Dettorie5f48d22016-06-22 21:09:08 +0200249 size_t offset, size_t size)
250{
251 const struct mem_region_device *mdev;
252
253 mdev = container_of(rd, __typeof__(*mdev), rdev);
254
255 memcpy(&mdev->base[offset], b, size);
256
257 return size;
258}
259
260static ssize_t mdev_eraseat(const struct region_device *rd, size_t offset,
261 size_t size)
262{
263 const struct mem_region_device *mdev;
264
265 mdev = container_of(rd, __typeof__(*mdev), rdev);
266
267 memset(&mdev->base[offset], 0, size);
268
269 return size;
270}
271
272const struct region_device_ops mem_rdev_ro_ops = {
Aaron Durbinb419c1a82015-03-27 01:03:45 -0500273 .mmap = mdev_mmap,
274 .munmap = mdev_munmap,
275 .readat = mdev_readat,
276};
Aaron Durbine62cf522015-03-27 01:58:06 -0500277
Antonello Dettorie5f48d22016-06-22 21:09:08 +0200278const struct region_device_ops mem_rdev_rw_ops = {
279 .mmap = mdev_mmap,
280 .munmap = mdev_munmap,
281 .readat = mdev_readat,
282 .writeat = mdev_writeat,
283 .eraseat = mdev_eraseat,
284};
285
Aaron Durbine62cf522015-03-27 01:58:06 -0500286void mmap_helper_device_init(struct mmap_helper_region_device *mdev,
287 void *cache, size_t cache_size)
288{
289 mem_pool_init(&mdev->pool, cache, cache_size);
290}
291
292void *mmap_helper_rdev_mmap(const struct region_device *rd, size_t offset,
293 size_t size)
294{
295 struct mmap_helper_region_device *mdev;
296 void *mapping;
297
Aaron Durbinca0a6762015-12-15 17:49:12 -0600298 mdev = container_of((void *)rd, __typeof__(*mdev), rdev);
Aaron Durbine62cf522015-03-27 01:58:06 -0500299
300 mapping = mem_pool_alloc(&mdev->pool, size);
301
302 if (mapping == NULL)
303 return NULL;
304
305 if (rd->ops->readat(rd, mapping, offset, size) != size) {
306 mem_pool_free(&mdev->pool, mapping);
307 return NULL;
308 }
309
310 return mapping;
311}
312
313int mmap_helper_rdev_munmap(const struct region_device *rd, void *mapping)
314{
315 struct mmap_helper_region_device *mdev;
316
Aaron Durbinca0a6762015-12-15 17:49:12 -0600317 mdev = container_of((void *)rd, __typeof__(*mdev), rdev);
Aaron Durbine62cf522015-03-27 01:58:06 -0500318
319 mem_pool_free(&mdev->pool, mapping);
320
321 return 0;
322}
Aaron Durbin5907eb82015-10-28 16:09:42 -0500323
324static void *xlate_mmap(const struct region_device *rd, size_t offset,
325 size_t size)
326{
327 const struct xlate_region_device *xldev;
328 struct region req = {
329 .offset = offset,
330 .size = size,
331 };
332
Aaron Durbinca0a6762015-12-15 17:49:12 -0600333 xldev = container_of(rd, __typeof__(*xldev), rdev);
Aaron Durbin5907eb82015-10-28 16:09:42 -0500334
Aaron Durbin02103e32017-12-14 15:22:04 -0700335 if (!region_is_subregion(&xldev->sub_region, &req))
Aaron Durbin5907eb82015-10-28 16:09:42 -0500336 return NULL;
337
338 offset -= region_offset(&xldev->sub_region);
339
340 return rdev_mmap(xldev->access_dev, offset, size);
341}
342
343static int xlate_munmap(const struct region_device *rd, void *mapping)
344{
345 const struct xlate_region_device *xldev;
346
Aaron Durbinca0a6762015-12-15 17:49:12 -0600347 xldev = container_of(rd, __typeof__(*xldev), rdev);
Aaron Durbin5907eb82015-10-28 16:09:42 -0500348
349 return rdev_munmap(xldev->access_dev, mapping);
350}
351
352static ssize_t xlate_readat(const struct region_device *rd, void *b,
353 size_t offset, size_t size)
354{
355 struct region req = {
356 .offset = offset,
357 .size = size,
358 };
359 const struct xlate_region_device *xldev;
360
Aaron Durbinca0a6762015-12-15 17:49:12 -0600361 xldev = container_of(rd, __typeof__(*xldev), rdev);
Aaron Durbin5907eb82015-10-28 16:09:42 -0500362
Aaron Durbin02103e32017-12-14 15:22:04 -0700363 if (!region_is_subregion(&xldev->sub_region, &req))
Aaron Durbin5907eb82015-10-28 16:09:42 -0500364 return -1;
365
366 offset -= region_offset(&xldev->sub_region);
367
368 return rdev_readat(xldev->access_dev, b, offset, size);
369}
370
Aaron Durbin258a3502016-08-04 14:33:58 -0500371static ssize_t xlate_writeat(const struct region_device *rd, const void *b,
Antonello Dettorie5f48d22016-06-22 21:09:08 +0200372 size_t offset, size_t size)
373{
374 struct region req = {
375 .offset = offset,
376 .size = size,
377 };
378 const struct xlate_region_device *xldev;
379
380 xldev = container_of(rd, __typeof__(*xldev), rdev);
381
Aaron Durbin02103e32017-12-14 15:22:04 -0700382 if (!region_is_subregion(&xldev->sub_region, &req))
Antonello Dettorie5f48d22016-06-22 21:09:08 +0200383 return -1;
384
385 offset -= region_offset(&xldev->sub_region);
386
387 return rdev_writeat(xldev->access_dev, b, offset, size);
388}
389
390static ssize_t xlate_eraseat(const struct region_device *rd,
391 size_t offset, size_t size)
392{
393 struct region req = {
394 .offset = offset,
395 .size = size,
396 };
397 const struct xlate_region_device *xldev;
398
399 xldev = container_of(rd, __typeof__(*xldev), rdev);
400
Aaron Durbin02103e32017-12-14 15:22:04 -0700401 if (!region_is_subregion(&xldev->sub_region, &req))
Antonello Dettorie5f48d22016-06-22 21:09:08 +0200402 return -1;
403
404 offset -= region_offset(&xldev->sub_region);
405
406 return rdev_eraseat(xldev->access_dev, offset, size);
407}
408
409const struct region_device_ops xlate_rdev_ro_ops = {
Aaron Durbin5907eb82015-10-28 16:09:42 -0500410 .mmap = xlate_mmap,
411 .munmap = xlate_munmap,
412 .readat = xlate_readat,
413};
Antonello Dettorie5f48d22016-06-22 21:09:08 +0200414
415const struct region_device_ops xlate_rdev_rw_ops = {
416 .mmap = xlate_mmap,
417 .munmap = xlate_munmap,
418 .readat = xlate_readat,
419 .writeat = xlate_writeat,
420 .eraseat = xlate_eraseat,
421};
Aaron Durbin256db402016-12-03 17:08:08 -0600422
423
424static void *incoherent_mmap(const struct region_device *rd, size_t offset,
425 size_t size)
426{
427 const struct incoherent_rdev *irdev;
428
429 irdev = container_of(rd, const struct incoherent_rdev, rdev);
430
431 return rdev_mmap(irdev->read, offset, size);
432}
433
434static int incoherent_munmap(const struct region_device *rd, void *mapping)
435{
436 const struct incoherent_rdev *irdev;
437
438 irdev = container_of(rd, const struct incoherent_rdev, rdev);
439
440 return rdev_munmap(irdev->read, mapping);
441}
442
443static ssize_t incoherent_readat(const struct region_device *rd, void *b,
444 size_t offset, size_t size)
445{
446 const struct incoherent_rdev *irdev;
447
448 irdev = container_of(rd, const struct incoherent_rdev, rdev);
449
450 return rdev_readat(irdev->read, b, offset, size);
451}
452
453static ssize_t incoherent_writeat(const struct region_device *rd, const void *b,
454 size_t offset, size_t size)
455{
456 const struct incoherent_rdev *irdev;
457
458 irdev = container_of(rd, const struct incoherent_rdev, rdev);
459
460 return rdev_writeat(irdev->write, b, offset, size);
461}
462
463static ssize_t incoherent_eraseat(const struct region_device *rd, size_t offset,
464 size_t size)
465{
466 const struct incoherent_rdev *irdev;
467
468 irdev = container_of(rd, const struct incoherent_rdev, rdev);
469
470 return rdev_eraseat(irdev->write, offset, size);
471}
472
473static const struct region_device_ops incoherent_rdev_ops = {
474 .mmap = incoherent_mmap,
475 .munmap = incoherent_munmap,
476 .readat = incoherent_readat,
477 .writeat = incoherent_writeat,
478 .eraseat = incoherent_eraseat,
479};
480
481const struct region_device *incoherent_rdev_init(struct incoherent_rdev *irdev,
482 const struct region *r,
483 const struct region_device *read,
484 const struct region_device *write)
485{
486 const size_t size = region_sz(r);
487
488 if (size != region_device_sz(read) || size != region_device_sz(write))
489 return NULL;
490
491 /* The region is represented as offset 0 to size. That way, the generic
492 * rdev operations can be called on the read or write implementation
493 * without any unnecessary translation because the offsets all start
494 * at 0. */
495 region_device_init(&irdev->rdev, &incoherent_rdev_ops, 0, size);
496 irdev->read = read;
497 irdev->write = write;
498
499 return &irdev->rdev;
500}