blob: 541a125ad409d3bb282c42559724c2097073aeb4 [file] [log] [blame]
Aaron Durbin5d5f4b32015-03-26 14:39:07 -05001/*
2 * This file is part of the coreboot project.
3 *
4 * Copyright 2015 Google Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
Aaron Durbin5d5f4b32015-03-26 14:39:07 -050014 */
15
Aaron Durbinca0a6762015-12-15 17:49:12 -060016#include <commonlib/helpers.h>
Aaron Durbindc9f5cd2015-09-08 13:34:43 -050017#include <commonlib/region.h>
Aaron Durbin5d5f4b32015-03-26 14:39:07 -050018#include <string.h>
19
Aaron Durbin5d5f4b32015-03-26 14:39:07 -050020static inline size_t region_end(const struct region *r)
21{
22 return region_sz(r) + region_offset(r);
23}
24
Aaron Durbin02103e32017-12-14 15:22:04 -070025int region_is_subregion(const struct region *p, const struct region *c)
Aaron Durbin5d5f4b32015-03-26 14:39:07 -050026{
27 if (region_offset(c) < region_offset(p))
28 return 0;
29
30 if (region_sz(c) > region_sz(p))
31 return 0;
32
33 if (region_end(c) > region_end(p))
34 return 0;
35
36 return 1;
37}
38
39static int normalize_and_ok(const struct region *outer, struct region *inner)
40{
41 inner->offset += region_offset(outer);
Aaron Durbin02103e32017-12-14 15:22:04 -070042 return region_is_subregion(outer, inner);
Aaron Durbin5d5f4b32015-03-26 14:39:07 -050043}
44
45static const struct region_device *rdev_root(const struct region_device *rdev)
46{
47 if (rdev->root == NULL)
48 return rdev;
49 return rdev->root;
50}
51
Aaron Durbin990ab7e2015-12-15 13:29:41 -060052ssize_t rdev_relative_offset(const struct region_device *p,
53 const struct region_device *c)
54{
55 if (rdev_root(p) != rdev_root(c))
56 return -1;
57
Aaron Durbin02103e32017-12-14 15:22:04 -070058 if (!region_is_subregion(&p->region, &c->region))
Aaron Durbin990ab7e2015-12-15 13:29:41 -060059 return -1;
60
61 return region_device_offset(c) - region_device_offset(p);
62}
63
Aaron Durbin5d5f4b32015-03-26 14:39:07 -050064void *rdev_mmap(const struct region_device *rd, size_t offset, size_t size)
65{
66 const struct region_device *rdev;
67 struct region req = {
68 .offset = offset,
69 .size = size,
70 };
71
72 if (!normalize_and_ok(&rd->region, &req))
73 return NULL;
74
75 rdev = rdev_root(rd);
76
Aaron Durbinedd79592016-08-10 11:39:00 -050077 if (rdev->ops->mmap == NULL)
78 return NULL;
79
Aaron Durbin5d5f4b32015-03-26 14:39:07 -050080 return rdev->ops->mmap(rdev, req.offset, req.size);
81}
82
83int rdev_munmap(const struct region_device *rd, void *mapping)
84{
85 const struct region_device *rdev;
86
87 rdev = rdev_root(rd);
88
Aaron Durbinedd79592016-08-10 11:39:00 -050089 if (rdev->ops->munmap == NULL)
90 return -1;
91
Aaron Durbin5d5f4b32015-03-26 14:39:07 -050092 return rdev->ops->munmap(rdev, mapping);
93}
94
95ssize_t rdev_readat(const struct region_device *rd, void *b, size_t offset,
96 size_t size)
97{
98 const struct region_device *rdev;
99 struct region req = {
100 .offset = offset,
101 .size = size,
102 };
103
104 if (!normalize_and_ok(&rd->region, &req))
105 return -1;
106
107 rdev = rdev_root(rd);
108
109 return rdev->ops->readat(rdev, b, req.offset, req.size);
110}
111
Aaron Durbin258a3502016-08-04 14:33:58 -0500112ssize_t rdev_writeat(const struct region_device *rd, const void *b,
113 size_t offset, size_t size)
Antonello Dettorie5f48d22016-06-22 21:09:08 +0200114{
115 const struct region_device *rdev;
116 struct region req = {
117 .offset = offset,
118 .size = size,
119 };
120
121 if (!normalize_and_ok(&rd->region, &req))
122 return -1;
123
124 rdev = rdev_root(rd);
125
126 if (rdev->ops->writeat == NULL)
127 return -1;
128
129 return rdev->ops->writeat(rdev, b, req.offset, req.size);
130}
131
132ssize_t rdev_eraseat(const struct region_device *rd, size_t offset,
133 size_t size)
134{
135 const struct region_device *rdev;
136 struct region req = {
137 .offset = offset,
138 .size = size,
139 };
140
141 if (!normalize_and_ok(&rd->region, &req))
142 return -1;
143
144 rdev = rdev_root(rd);
145
146 /* If the eraseat ptr is NULL we assume that the erase
147 * function was completed successfully. */
148 if (rdev->ops->eraseat == NULL)
149 return size;
150
151 return rdev->ops->eraseat(rdev, req.offset, req.size);
152}
153
Aaron Durbin5d5f4b32015-03-26 14:39:07 -0500154int rdev_chain(struct region_device *child, const struct region_device *parent,
155 size_t offset, size_t size)
156{
157 struct region req = {
158 .offset = offset,
159 .size = size,
160 };
161
162 if (!normalize_and_ok(&parent->region, &req))
163 return -1;
164
165 /* Keep track of root region device. Note the offsets are relative
166 * to the root device. */
167 child->root = rdev_root(parent);
168 child->ops = NULL;
169 child->region.offset = req.offset;
170 child->region.size = req.size;
171
172 return 0;
173}
Aaron Durbinb419c1a82015-03-27 01:03:45 -0500174
Antonello Dettorie5f48d22016-06-22 21:09:08 +0200175static void mem_region_device_init(struct mem_region_device *mdev,
176 const struct region_device_ops *ops, void *base, size_t size)
Aaron Durbinb419c1a82015-03-27 01:03:45 -0500177{
178 memset(mdev, 0, sizeof(*mdev));
179 mdev->base = base;
Antonello Dettorie5f48d22016-06-22 21:09:08 +0200180 mdev->rdev.ops = ops;
Aaron Durbinb419c1a82015-03-27 01:03:45 -0500181 mdev->rdev.region.size = size;
182}
183
Antonello Dettorie5f48d22016-06-22 21:09:08 +0200184void mem_region_device_ro_init(struct mem_region_device *mdev, void *base,
185 size_t size)
186{
187 return mem_region_device_init(mdev, &mem_rdev_ro_ops, base, size);
188}
189
190void mem_region_device_rw_init(struct mem_region_device *mdev, void *base,
191 size_t size)
192{
193 return mem_region_device_init(mdev, &mem_rdev_rw_ops, base, size);
194}
195
Furquan Shaikh2b576912016-06-19 23:16:45 -0700196void region_device_init(struct region_device *rdev,
197 const struct region_device_ops *ops, size_t offset,
198 size_t size)
199{
200 memset(rdev, 0, sizeof(*rdev));
201 rdev->root = NULL;
202 rdev->ops = ops;
203 rdev->region.offset = offset;
204 rdev->region.size = size;
205}
206
Antonello Dettorie5f48d22016-06-22 21:09:08 +0200207static void xlate_region_device_init(struct xlate_region_device *xdev,
208 const struct region_device_ops *ops,
209 const struct region_device *access_dev,
210 size_t sub_offset, size_t sub_size,
211 size_t parent_size)
Furquan Shaikh2b576912016-06-19 23:16:45 -0700212{
213 memset(xdev, 0, sizeof(*xdev));
214 xdev->access_dev = access_dev;
215 xdev->sub_region.offset = sub_offset;
216 xdev->sub_region.size = sub_size;
Antonello Dettorie5f48d22016-06-22 21:09:08 +0200217 region_device_init(&xdev->rdev, ops, 0, parent_size);
218}
219
220void xlate_region_device_ro_init(struct xlate_region_device *xdev,
221 const struct region_device *access_dev,
222 size_t sub_offset, size_t sub_size,
223 size_t parent_size)
224{
225 xlate_region_device_init(xdev, &xlate_rdev_ro_ops, access_dev,
226 sub_offset, sub_size, parent_size);
227}
228
229void xlate_region_device_rw_init(struct xlate_region_device *xdev,
230 const struct region_device *access_dev,
231 size_t sub_offset, size_t sub_size,
232 size_t parent_size)
233{
234 xlate_region_device_init(xdev, &xlate_rdev_rw_ops, access_dev,
235 sub_offset, sub_size, parent_size);
Furquan Shaikh2b576912016-06-19 23:16:45 -0700236}
237
Aaron Durbinb419c1a82015-03-27 01:03:45 -0500238static void *mdev_mmap(const struct region_device *rd, size_t offset,
Aaron Durbinca0a6762015-12-15 17:49:12 -0600239 size_t size __unused)
Aaron Durbinb419c1a82015-03-27 01:03:45 -0500240{
241 const struct mem_region_device *mdev;
242
Aaron Durbinca0a6762015-12-15 17:49:12 -0600243 mdev = container_of(rd, __typeof__(*mdev), rdev);
Aaron Durbinb419c1a82015-03-27 01:03:45 -0500244
245 return &mdev->base[offset];
246}
247
Lee Leahy36d5b412017-03-10 11:02:11 -0800248static int mdev_munmap(const struct region_device *rd __unused,
Aaron Durbinca0a6762015-12-15 17:49:12 -0600249 void *mapping __unused)
Aaron Durbinb419c1a82015-03-27 01:03:45 -0500250{
251 return 0;
252}
253
254static ssize_t mdev_readat(const struct region_device *rd, void *b,
255 size_t offset, size_t size)
256{
257 const struct mem_region_device *mdev;
258
Aaron Durbinca0a6762015-12-15 17:49:12 -0600259 mdev = container_of(rd, __typeof__(*mdev), rdev);
Aaron Durbinb419c1a82015-03-27 01:03:45 -0500260
261 memcpy(b, &mdev->base[offset], size);
262
263 return size;
264}
265
Aaron Durbin258a3502016-08-04 14:33:58 -0500266static ssize_t mdev_writeat(const struct region_device *rd, const void *b,
Antonello Dettorie5f48d22016-06-22 21:09:08 +0200267 size_t offset, size_t size)
268{
269 const struct mem_region_device *mdev;
270
271 mdev = container_of(rd, __typeof__(*mdev), rdev);
272
273 memcpy(&mdev->base[offset], b, size);
274
275 return size;
276}
277
278static ssize_t mdev_eraseat(const struct region_device *rd, size_t offset,
279 size_t size)
280{
281 const struct mem_region_device *mdev;
282
283 mdev = container_of(rd, __typeof__(*mdev), rdev);
284
285 memset(&mdev->base[offset], 0, size);
286
287 return size;
288}
289
290const struct region_device_ops mem_rdev_ro_ops = {
Aaron Durbinb419c1a82015-03-27 01:03:45 -0500291 .mmap = mdev_mmap,
292 .munmap = mdev_munmap,
293 .readat = mdev_readat,
294};
Aaron Durbine62cf522015-03-27 01:58:06 -0500295
Antonello Dettorie5f48d22016-06-22 21:09:08 +0200296const struct region_device_ops mem_rdev_rw_ops = {
297 .mmap = mdev_mmap,
298 .munmap = mdev_munmap,
299 .readat = mdev_readat,
300 .writeat = mdev_writeat,
301 .eraseat = mdev_eraseat,
302};
303
Aaron Durbine62cf522015-03-27 01:58:06 -0500304void mmap_helper_device_init(struct mmap_helper_region_device *mdev,
305 void *cache, size_t cache_size)
306{
307 mem_pool_init(&mdev->pool, cache, cache_size);
308}
309
310void *mmap_helper_rdev_mmap(const struct region_device *rd, size_t offset,
311 size_t size)
312{
313 struct mmap_helper_region_device *mdev;
314 void *mapping;
315
Aaron Durbinca0a6762015-12-15 17:49:12 -0600316 mdev = container_of((void *)rd, __typeof__(*mdev), rdev);
Aaron Durbine62cf522015-03-27 01:58:06 -0500317
318 mapping = mem_pool_alloc(&mdev->pool, size);
319
320 if (mapping == NULL)
321 return NULL;
322
323 if (rd->ops->readat(rd, mapping, offset, size) != size) {
324 mem_pool_free(&mdev->pool, mapping);
325 return NULL;
326 }
327
328 return mapping;
329}
330
331int mmap_helper_rdev_munmap(const struct region_device *rd, void *mapping)
332{
333 struct mmap_helper_region_device *mdev;
334
Aaron Durbinca0a6762015-12-15 17:49:12 -0600335 mdev = container_of((void *)rd, __typeof__(*mdev), rdev);
Aaron Durbine62cf522015-03-27 01:58:06 -0500336
337 mem_pool_free(&mdev->pool, mapping);
338
339 return 0;
340}
Aaron Durbin5907eb82015-10-28 16:09:42 -0500341
342static void *xlate_mmap(const struct region_device *rd, size_t offset,
343 size_t size)
344{
345 const struct xlate_region_device *xldev;
346 struct region req = {
347 .offset = offset,
348 .size = size,
349 };
350
Aaron Durbinca0a6762015-12-15 17:49:12 -0600351 xldev = container_of(rd, __typeof__(*xldev), rdev);
Aaron Durbin5907eb82015-10-28 16:09:42 -0500352
Aaron Durbin02103e32017-12-14 15:22:04 -0700353 if (!region_is_subregion(&xldev->sub_region, &req))
Aaron Durbin5907eb82015-10-28 16:09:42 -0500354 return NULL;
355
356 offset -= region_offset(&xldev->sub_region);
357
358 return rdev_mmap(xldev->access_dev, offset, size);
359}
360
361static int xlate_munmap(const struct region_device *rd, void *mapping)
362{
363 const struct xlate_region_device *xldev;
364
Aaron Durbinca0a6762015-12-15 17:49:12 -0600365 xldev = container_of(rd, __typeof__(*xldev), rdev);
Aaron Durbin5907eb82015-10-28 16:09:42 -0500366
367 return rdev_munmap(xldev->access_dev, mapping);
368}
369
370static ssize_t xlate_readat(const struct region_device *rd, void *b,
371 size_t offset, size_t size)
372{
373 struct region req = {
374 .offset = offset,
375 .size = size,
376 };
377 const struct xlate_region_device *xldev;
378
Aaron Durbinca0a6762015-12-15 17:49:12 -0600379 xldev = container_of(rd, __typeof__(*xldev), rdev);
Aaron Durbin5907eb82015-10-28 16:09:42 -0500380
Aaron Durbin02103e32017-12-14 15:22:04 -0700381 if (!region_is_subregion(&xldev->sub_region, &req))
Aaron Durbin5907eb82015-10-28 16:09:42 -0500382 return -1;
383
384 offset -= region_offset(&xldev->sub_region);
385
386 return rdev_readat(xldev->access_dev, b, offset, size);
387}
388
Aaron Durbin258a3502016-08-04 14:33:58 -0500389static ssize_t xlate_writeat(const struct region_device *rd, const void *b,
Antonello Dettorie5f48d22016-06-22 21:09:08 +0200390 size_t offset, size_t size)
391{
392 struct region req = {
393 .offset = offset,
394 .size = size,
395 };
396 const struct xlate_region_device *xldev;
397
398 xldev = container_of(rd, __typeof__(*xldev), rdev);
399
Aaron Durbin02103e32017-12-14 15:22:04 -0700400 if (!region_is_subregion(&xldev->sub_region, &req))
Antonello Dettorie5f48d22016-06-22 21:09:08 +0200401 return -1;
402
403 offset -= region_offset(&xldev->sub_region);
404
405 return rdev_writeat(xldev->access_dev, b, offset, size);
406}
407
408static ssize_t xlate_eraseat(const struct region_device *rd,
409 size_t offset, size_t size)
410{
411 struct region req = {
412 .offset = offset,
413 .size = size,
414 };
415 const struct xlate_region_device *xldev;
416
417 xldev = container_of(rd, __typeof__(*xldev), rdev);
418
Aaron Durbin02103e32017-12-14 15:22:04 -0700419 if (!region_is_subregion(&xldev->sub_region, &req))
Antonello Dettorie5f48d22016-06-22 21:09:08 +0200420 return -1;
421
422 offset -= region_offset(&xldev->sub_region);
423
424 return rdev_eraseat(xldev->access_dev, offset, size);
425}
426
427const struct region_device_ops xlate_rdev_ro_ops = {
Aaron Durbin5907eb82015-10-28 16:09:42 -0500428 .mmap = xlate_mmap,
429 .munmap = xlate_munmap,
430 .readat = xlate_readat,
431};
Antonello Dettorie5f48d22016-06-22 21:09:08 +0200432
433const struct region_device_ops xlate_rdev_rw_ops = {
434 .mmap = xlate_mmap,
435 .munmap = xlate_munmap,
436 .readat = xlate_readat,
437 .writeat = xlate_writeat,
438 .eraseat = xlate_eraseat,
439};
Aaron Durbin256db402016-12-03 17:08:08 -0600440
441
442static void *incoherent_mmap(const struct region_device *rd, size_t offset,
443 size_t size)
444{
445 const struct incoherent_rdev *irdev;
446
447 irdev = container_of(rd, const struct incoherent_rdev, rdev);
448
449 return rdev_mmap(irdev->read, offset, size);
450}
451
452static int incoherent_munmap(const struct region_device *rd, void *mapping)
453{
454 const struct incoherent_rdev *irdev;
455
456 irdev = container_of(rd, const struct incoherent_rdev, rdev);
457
458 return rdev_munmap(irdev->read, mapping);
459}
460
461static ssize_t incoherent_readat(const struct region_device *rd, void *b,
462 size_t offset, size_t size)
463{
464 const struct incoherent_rdev *irdev;
465
466 irdev = container_of(rd, const struct incoherent_rdev, rdev);
467
468 return rdev_readat(irdev->read, b, offset, size);
469}
470
471static ssize_t incoherent_writeat(const struct region_device *rd, const void *b,
472 size_t offset, size_t size)
473{
474 const struct incoherent_rdev *irdev;
475
476 irdev = container_of(rd, const struct incoherent_rdev, rdev);
477
478 return rdev_writeat(irdev->write, b, offset, size);
479}
480
481static ssize_t incoherent_eraseat(const struct region_device *rd, size_t offset,
482 size_t size)
483{
484 const struct incoherent_rdev *irdev;
485
486 irdev = container_of(rd, const struct incoherent_rdev, rdev);
487
488 return rdev_eraseat(irdev->write, offset, size);
489}
490
491static const struct region_device_ops incoherent_rdev_ops = {
492 .mmap = incoherent_mmap,
493 .munmap = incoherent_munmap,
494 .readat = incoherent_readat,
495 .writeat = incoherent_writeat,
496 .eraseat = incoherent_eraseat,
497};
498
499const struct region_device *incoherent_rdev_init(struct incoherent_rdev *irdev,
500 const struct region *r,
501 const struct region_device *read,
502 const struct region_device *write)
503{
504 const size_t size = region_sz(r);
505
506 if (size != region_device_sz(read) || size != region_device_sz(write))
507 return NULL;
508
509 /* The region is represented as offset 0 to size. That way, the generic
510 * rdev operations can be called on the read or write implementation
511 * without any unnecessary translation because the offsets all start
512 * at 0. */
513 region_device_init(&irdev->rdev, &incoherent_rdev_ops, 0, size);
514 irdev->read = read;
515 irdev->write = write;
516
517 return &irdev->rdev;
518}