blob: b1203bc8e9d84d4e8b19c571b440f881356ce192 [file] [log] [blame]
Angel Ponsebda03e2020-04-02 23:48:05 +02001/* SPDX-License-Identifier: GPL-2.0-only */
2/* This file is part of the coreboot project. */
Aaron Durbin5d5f4b32015-03-26 14:39:07 -05003
Aaron Durbinca0a6762015-12-15 17:49:12 -06004#include <commonlib/helpers.h>
Aaron Durbindc9f5cd2015-09-08 13:34:43 -05005#include <commonlib/region.h>
Aaron Durbin5d5f4b32015-03-26 14:39:07 -05006#include <string.h>
7
Aaron Durbin02103e32017-12-14 15:22:04 -07008int region_is_subregion(const struct region *p, const struct region *c)
Aaron Durbin5d5f4b32015-03-26 14:39:07 -05009{
10 if (region_offset(c) < region_offset(p))
11 return 0;
12
Julius Werner5c82c442019-08-15 21:25:16 -070013 if (region_end(c) > region_end(p))
Aaron Durbin5d5f4b32015-03-26 14:39:07 -050014 return 0;
15
Julius Werner5c82c442019-08-15 21:25:16 -070016 if (region_end(c) < region_offset(c))
Aaron Durbin5d5f4b32015-03-26 14:39:07 -050017 return 0;
18
19 return 1;
20}
21
22static int normalize_and_ok(const struct region *outer, struct region *inner)
23{
24 inner->offset += region_offset(outer);
Aaron Durbin02103e32017-12-14 15:22:04 -070025 return region_is_subregion(outer, inner);
Aaron Durbin5d5f4b32015-03-26 14:39:07 -050026}
27
28static const struct region_device *rdev_root(const struct region_device *rdev)
29{
30 if (rdev->root == NULL)
31 return rdev;
32 return rdev->root;
33}
34
Aaron Durbin990ab7e2015-12-15 13:29:41 -060035ssize_t rdev_relative_offset(const struct region_device *p,
36 const struct region_device *c)
37{
38 if (rdev_root(p) != rdev_root(c))
39 return -1;
40
Aaron Durbin02103e32017-12-14 15:22:04 -070041 if (!region_is_subregion(&p->region, &c->region))
Aaron Durbin990ab7e2015-12-15 13:29:41 -060042 return -1;
43
44 return region_device_offset(c) - region_device_offset(p);
45}
46
Aaron Durbin5d5f4b32015-03-26 14:39:07 -050047void *rdev_mmap(const struct region_device *rd, size_t offset, size_t size)
48{
49 const struct region_device *rdev;
50 struct region req = {
51 .offset = offset,
52 .size = size,
53 };
54
55 if (!normalize_and_ok(&rd->region, &req))
56 return NULL;
57
58 rdev = rdev_root(rd);
59
Aaron Durbinedd79592016-08-10 11:39:00 -050060 if (rdev->ops->mmap == NULL)
61 return NULL;
62
Aaron Durbin5d5f4b32015-03-26 14:39:07 -050063 return rdev->ops->mmap(rdev, req.offset, req.size);
64}
65
66int rdev_munmap(const struct region_device *rd, void *mapping)
67{
68 const struct region_device *rdev;
69
70 rdev = rdev_root(rd);
71
Aaron Durbinedd79592016-08-10 11:39:00 -050072 if (rdev->ops->munmap == NULL)
73 return -1;
74
Aaron Durbin5d5f4b32015-03-26 14:39:07 -050075 return rdev->ops->munmap(rdev, mapping);
76}
77
78ssize_t rdev_readat(const struct region_device *rd, void *b, size_t offset,
79 size_t size)
80{
81 const struct region_device *rdev;
82 struct region req = {
83 .offset = offset,
84 .size = size,
85 };
86
87 if (!normalize_and_ok(&rd->region, &req))
88 return -1;
89
90 rdev = rdev_root(rd);
91
92 return rdev->ops->readat(rdev, b, req.offset, req.size);
93}
94
Aaron Durbin258a3502016-08-04 14:33:58 -050095ssize_t rdev_writeat(const struct region_device *rd, const void *b,
96 size_t offset, size_t size)
Antonello Dettorie5f48d22016-06-22 21:09:08 +020097{
98 const struct region_device *rdev;
99 struct region req = {
100 .offset = offset,
101 .size = size,
102 };
103
104 if (!normalize_and_ok(&rd->region, &req))
105 return -1;
106
107 rdev = rdev_root(rd);
108
109 if (rdev->ops->writeat == NULL)
110 return -1;
111
112 return rdev->ops->writeat(rdev, b, req.offset, req.size);
113}
114
115ssize_t rdev_eraseat(const struct region_device *rd, size_t offset,
116 size_t size)
117{
118 const struct region_device *rdev;
119 struct region req = {
120 .offset = offset,
121 .size = size,
122 };
123
124 if (!normalize_and_ok(&rd->region, &req))
125 return -1;
126
127 rdev = rdev_root(rd);
128
129 /* If the eraseat ptr is NULL we assume that the erase
130 * function was completed successfully. */
131 if (rdev->ops->eraseat == NULL)
132 return size;
133
134 return rdev->ops->eraseat(rdev, req.offset, req.size);
135}
136
Aaron Durbin5d5f4b32015-03-26 14:39:07 -0500137int rdev_chain(struct region_device *child, const struct region_device *parent,
138 size_t offset, size_t size)
139{
140 struct region req = {
141 .offset = offset,
142 .size = size,
143 };
144
145 if (!normalize_and_ok(&parent->region, &req))
146 return -1;
147
148 /* Keep track of root region device. Note the offsets are relative
149 * to the root device. */
150 child->root = rdev_root(parent);
151 child->ops = NULL;
152 child->region.offset = req.offset;
153 child->region.size = req.size;
154
155 return 0;
156}
Aaron Durbinb419c1a82015-03-27 01:03:45 -0500157
Antonello Dettorie5f48d22016-06-22 21:09:08 +0200158static void mem_region_device_init(struct mem_region_device *mdev,
159 const struct region_device_ops *ops, void *base, size_t size)
Aaron Durbinb419c1a82015-03-27 01:03:45 -0500160{
161 memset(mdev, 0, sizeof(*mdev));
162 mdev->base = base;
Antonello Dettorie5f48d22016-06-22 21:09:08 +0200163 mdev->rdev.ops = ops;
Aaron Durbinb419c1a82015-03-27 01:03:45 -0500164 mdev->rdev.region.size = size;
165}
166
Antonello Dettorie5f48d22016-06-22 21:09:08 +0200167void mem_region_device_ro_init(struct mem_region_device *mdev, void *base,
168 size_t size)
169{
170 return mem_region_device_init(mdev, &mem_rdev_ro_ops, base, size);
171}
172
173void mem_region_device_rw_init(struct mem_region_device *mdev, void *base,
174 size_t size)
175{
176 return mem_region_device_init(mdev, &mem_rdev_rw_ops, base, size);
177}
178
Furquan Shaikh2b576912016-06-19 23:16:45 -0700179void region_device_init(struct region_device *rdev,
180 const struct region_device_ops *ops, size_t offset,
181 size_t size)
182{
183 memset(rdev, 0, sizeof(*rdev));
184 rdev->root = NULL;
185 rdev->ops = ops;
186 rdev->region.offset = offset;
187 rdev->region.size = size;
188}
189
Antonello Dettorie5f48d22016-06-22 21:09:08 +0200190static void xlate_region_device_init(struct xlate_region_device *xdev,
191 const struct region_device_ops *ops,
192 const struct region_device *access_dev,
193 size_t sub_offset, size_t sub_size,
194 size_t parent_size)
Furquan Shaikh2b576912016-06-19 23:16:45 -0700195{
196 memset(xdev, 0, sizeof(*xdev));
197 xdev->access_dev = access_dev;
198 xdev->sub_region.offset = sub_offset;
199 xdev->sub_region.size = sub_size;
Antonello Dettorie5f48d22016-06-22 21:09:08 +0200200 region_device_init(&xdev->rdev, ops, 0, parent_size);
201}
202
203void xlate_region_device_ro_init(struct xlate_region_device *xdev,
204 const struct region_device *access_dev,
205 size_t sub_offset, size_t sub_size,
206 size_t parent_size)
207{
208 xlate_region_device_init(xdev, &xlate_rdev_ro_ops, access_dev,
209 sub_offset, sub_size, parent_size);
210}
211
212void xlate_region_device_rw_init(struct xlate_region_device *xdev,
213 const struct region_device *access_dev,
214 size_t sub_offset, size_t sub_size,
215 size_t parent_size)
216{
217 xlate_region_device_init(xdev, &xlate_rdev_rw_ops, access_dev,
218 sub_offset, sub_size, parent_size);
Furquan Shaikh2b576912016-06-19 23:16:45 -0700219}
220
Aaron Durbinb419c1a82015-03-27 01:03:45 -0500221static void *mdev_mmap(const struct region_device *rd, size_t offset,
Aaron Durbinca0a6762015-12-15 17:49:12 -0600222 size_t size __unused)
Aaron Durbinb419c1a82015-03-27 01:03:45 -0500223{
224 const struct mem_region_device *mdev;
225
Aaron Durbinca0a6762015-12-15 17:49:12 -0600226 mdev = container_of(rd, __typeof__(*mdev), rdev);
Aaron Durbinb419c1a82015-03-27 01:03:45 -0500227
228 return &mdev->base[offset];
229}
230
Lee Leahy36d5b412017-03-10 11:02:11 -0800231static int mdev_munmap(const struct region_device *rd __unused,
Aaron Durbinca0a6762015-12-15 17:49:12 -0600232 void *mapping __unused)
Aaron Durbinb419c1a82015-03-27 01:03:45 -0500233{
234 return 0;
235}
236
237static ssize_t mdev_readat(const struct region_device *rd, void *b,
238 size_t offset, size_t size)
239{
240 const struct mem_region_device *mdev;
241
Aaron Durbinca0a6762015-12-15 17:49:12 -0600242 mdev = container_of(rd, __typeof__(*mdev), rdev);
Aaron Durbinb419c1a82015-03-27 01:03:45 -0500243
244 memcpy(b, &mdev->base[offset], size);
245
246 return size;
247}
248
Aaron Durbin258a3502016-08-04 14:33:58 -0500249static ssize_t mdev_writeat(const struct region_device *rd, const void *b,
Antonello Dettorie5f48d22016-06-22 21:09:08 +0200250 size_t offset, size_t size)
251{
252 const struct mem_region_device *mdev;
253
254 mdev = container_of(rd, __typeof__(*mdev), rdev);
255
256 memcpy(&mdev->base[offset], b, size);
257
258 return size;
259}
260
261static ssize_t mdev_eraseat(const struct region_device *rd, size_t offset,
262 size_t size)
263{
264 const struct mem_region_device *mdev;
265
266 mdev = container_of(rd, __typeof__(*mdev), rdev);
267
268 memset(&mdev->base[offset], 0, size);
269
270 return size;
271}
272
273const struct region_device_ops mem_rdev_ro_ops = {
Aaron Durbinb419c1a82015-03-27 01:03:45 -0500274 .mmap = mdev_mmap,
275 .munmap = mdev_munmap,
276 .readat = mdev_readat,
277};
Aaron Durbine62cf522015-03-27 01:58:06 -0500278
Antonello Dettorie5f48d22016-06-22 21:09:08 +0200279const struct region_device_ops mem_rdev_rw_ops = {
280 .mmap = mdev_mmap,
281 .munmap = mdev_munmap,
282 .readat = mdev_readat,
283 .writeat = mdev_writeat,
284 .eraseat = mdev_eraseat,
285};
286
Aaron Durbine62cf522015-03-27 01:58:06 -0500287void mmap_helper_device_init(struct mmap_helper_region_device *mdev,
288 void *cache, size_t cache_size)
289{
290 mem_pool_init(&mdev->pool, cache, cache_size);
291}
292
293void *mmap_helper_rdev_mmap(const struct region_device *rd, size_t offset,
294 size_t size)
295{
296 struct mmap_helper_region_device *mdev;
297 void *mapping;
298
Aaron Durbinca0a6762015-12-15 17:49:12 -0600299 mdev = container_of((void *)rd, __typeof__(*mdev), rdev);
Aaron Durbine62cf522015-03-27 01:58:06 -0500300
301 mapping = mem_pool_alloc(&mdev->pool, size);
302
303 if (mapping == NULL)
304 return NULL;
305
306 if (rd->ops->readat(rd, mapping, offset, size) != size) {
307 mem_pool_free(&mdev->pool, mapping);
308 return NULL;
309 }
310
311 return mapping;
312}
313
314int mmap_helper_rdev_munmap(const struct region_device *rd, void *mapping)
315{
316 struct mmap_helper_region_device *mdev;
317
Aaron Durbinca0a6762015-12-15 17:49:12 -0600318 mdev = container_of((void *)rd, __typeof__(*mdev), rdev);
Aaron Durbine62cf522015-03-27 01:58:06 -0500319
320 mem_pool_free(&mdev->pool, mapping);
321
322 return 0;
323}
Aaron Durbin5907eb82015-10-28 16:09:42 -0500324
325static void *xlate_mmap(const struct region_device *rd, size_t offset,
326 size_t size)
327{
328 const struct xlate_region_device *xldev;
329 struct region req = {
330 .offset = offset,
331 .size = size,
332 };
333
Aaron Durbinca0a6762015-12-15 17:49:12 -0600334 xldev = container_of(rd, __typeof__(*xldev), rdev);
Aaron Durbin5907eb82015-10-28 16:09:42 -0500335
Aaron Durbin02103e32017-12-14 15:22:04 -0700336 if (!region_is_subregion(&xldev->sub_region, &req))
Aaron Durbin5907eb82015-10-28 16:09:42 -0500337 return NULL;
338
339 offset -= region_offset(&xldev->sub_region);
340
341 return rdev_mmap(xldev->access_dev, offset, size);
342}
343
344static int xlate_munmap(const struct region_device *rd, void *mapping)
345{
346 const struct xlate_region_device *xldev;
347
Aaron Durbinca0a6762015-12-15 17:49:12 -0600348 xldev = container_of(rd, __typeof__(*xldev), rdev);
Aaron Durbin5907eb82015-10-28 16:09:42 -0500349
350 return rdev_munmap(xldev->access_dev, mapping);
351}
352
353static ssize_t xlate_readat(const struct region_device *rd, void *b,
354 size_t offset, size_t size)
355{
356 struct region req = {
357 .offset = offset,
358 .size = size,
359 };
360 const struct xlate_region_device *xldev;
361
Aaron Durbinca0a6762015-12-15 17:49:12 -0600362 xldev = container_of(rd, __typeof__(*xldev), rdev);
Aaron Durbin5907eb82015-10-28 16:09:42 -0500363
Aaron Durbin02103e32017-12-14 15:22:04 -0700364 if (!region_is_subregion(&xldev->sub_region, &req))
Aaron Durbin5907eb82015-10-28 16:09:42 -0500365 return -1;
366
367 offset -= region_offset(&xldev->sub_region);
368
369 return rdev_readat(xldev->access_dev, b, offset, size);
370}
371
Aaron Durbin258a3502016-08-04 14:33:58 -0500372static ssize_t xlate_writeat(const struct region_device *rd, const void *b,
Antonello Dettorie5f48d22016-06-22 21:09:08 +0200373 size_t offset, size_t size)
374{
375 struct region req = {
376 .offset = offset,
377 .size = size,
378 };
379 const struct xlate_region_device *xldev;
380
381 xldev = container_of(rd, __typeof__(*xldev), rdev);
382
Aaron Durbin02103e32017-12-14 15:22:04 -0700383 if (!region_is_subregion(&xldev->sub_region, &req))
Antonello Dettorie5f48d22016-06-22 21:09:08 +0200384 return -1;
385
386 offset -= region_offset(&xldev->sub_region);
387
388 return rdev_writeat(xldev->access_dev, b, offset, size);
389}
390
391static ssize_t xlate_eraseat(const struct region_device *rd,
392 size_t offset, size_t size)
393{
394 struct region req = {
395 .offset = offset,
396 .size = size,
397 };
398 const struct xlate_region_device *xldev;
399
400 xldev = container_of(rd, __typeof__(*xldev), rdev);
401
Aaron Durbin02103e32017-12-14 15:22:04 -0700402 if (!region_is_subregion(&xldev->sub_region, &req))
Antonello Dettorie5f48d22016-06-22 21:09:08 +0200403 return -1;
404
405 offset -= region_offset(&xldev->sub_region);
406
407 return rdev_eraseat(xldev->access_dev, offset, size);
408}
409
410const struct region_device_ops xlate_rdev_ro_ops = {
Aaron Durbin5907eb82015-10-28 16:09:42 -0500411 .mmap = xlate_mmap,
412 .munmap = xlate_munmap,
413 .readat = xlate_readat,
414};
Antonello Dettorie5f48d22016-06-22 21:09:08 +0200415
416const struct region_device_ops xlate_rdev_rw_ops = {
417 .mmap = xlate_mmap,
418 .munmap = xlate_munmap,
419 .readat = xlate_readat,
420 .writeat = xlate_writeat,
421 .eraseat = xlate_eraseat,
422};
Aaron Durbin256db402016-12-03 17:08:08 -0600423
424
425static void *incoherent_mmap(const struct region_device *rd, size_t offset,
426 size_t size)
427{
428 const struct incoherent_rdev *irdev;
429
430 irdev = container_of(rd, const struct incoherent_rdev, rdev);
431
432 return rdev_mmap(irdev->read, offset, size);
433}
434
435static int incoherent_munmap(const struct region_device *rd, void *mapping)
436{
437 const struct incoherent_rdev *irdev;
438
439 irdev = container_of(rd, const struct incoherent_rdev, rdev);
440
441 return rdev_munmap(irdev->read, mapping);
442}
443
444static ssize_t incoherent_readat(const struct region_device *rd, void *b,
445 size_t offset, size_t size)
446{
447 const struct incoherent_rdev *irdev;
448
449 irdev = container_of(rd, const struct incoherent_rdev, rdev);
450
451 return rdev_readat(irdev->read, b, offset, size);
452}
453
454static ssize_t incoherent_writeat(const struct region_device *rd, const void *b,
455 size_t offset, size_t size)
456{
457 const struct incoherent_rdev *irdev;
458
459 irdev = container_of(rd, const struct incoherent_rdev, rdev);
460
461 return rdev_writeat(irdev->write, b, offset, size);
462}
463
464static ssize_t incoherent_eraseat(const struct region_device *rd, size_t offset,
465 size_t size)
466{
467 const struct incoherent_rdev *irdev;
468
469 irdev = container_of(rd, const struct incoherent_rdev, rdev);
470
471 return rdev_eraseat(irdev->write, offset, size);
472}
473
474static const struct region_device_ops incoherent_rdev_ops = {
475 .mmap = incoherent_mmap,
476 .munmap = incoherent_munmap,
477 .readat = incoherent_readat,
478 .writeat = incoherent_writeat,
479 .eraseat = incoherent_eraseat,
480};
481
482const struct region_device *incoherent_rdev_init(struct incoherent_rdev *irdev,
483 const struct region *r,
484 const struct region_device *read,
485 const struct region_device *write)
486{
487 const size_t size = region_sz(r);
488
489 if (size != region_device_sz(read) || size != region_device_sz(write))
490 return NULL;
491
492 /* The region is represented as offset 0 to size. That way, the generic
493 * rdev operations can be called on the read or write implementation
494 * without any unnecessary translation because the offsets all start
495 * at 0. */
496 region_device_init(&irdev->rdev, &incoherent_rdev_ops, 0, size);
497 irdev->read = read;
498 irdev->write = write;
499
500 return &irdev->rdev;
501}