Aaron Durbin | 5d5f4b3 | 2015-03-26 14:39:07 -0500 | [diff] [blame] | 1 | /* |
| 2 | * This file is part of the coreboot project. |
| 3 | * |
| 4 | * Copyright 2015 Google Inc. |
| 5 | * |
| 6 | * This program is free software; you can redistribute it and/or modify |
| 7 | * it under the terms of the GNU General Public License as published by |
| 8 | * the Free Software Foundation; version 2 of the License. |
| 9 | * |
| 10 | * This program is distributed in the hope that it will be useful, |
| 11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 13 | * GNU General Public License for more details. |
Aaron Durbin | 5d5f4b3 | 2015-03-26 14:39:07 -0500 | [diff] [blame] | 14 | */ |
| 15 | |
Aaron Durbin | ca0a676 | 2015-12-15 17:49:12 -0600 | [diff] [blame] | 16 | #include <commonlib/helpers.h> |
Aaron Durbin | dc9f5cd | 2015-09-08 13:34:43 -0500 | [diff] [blame] | 17 | #include <commonlib/region.h> |
Aaron Durbin | 5d5f4b3 | 2015-03-26 14:39:07 -0500 | [diff] [blame] | 18 | #include <string.h> |
| 19 | |
Aaron Durbin | 5d5f4b3 | 2015-03-26 14:39:07 -0500 | [diff] [blame] | 20 | static inline size_t region_end(const struct region *r) |
| 21 | { |
| 22 | return region_sz(r) + region_offset(r); |
| 23 | } |
| 24 | |
| 25 | static int is_subregion(const struct region *p, const struct region *c) |
| 26 | { |
| 27 | if (region_offset(c) < region_offset(p)) |
| 28 | return 0; |
| 29 | |
| 30 | if (region_sz(c) > region_sz(p)) |
| 31 | return 0; |
| 32 | |
| 33 | if (region_end(c) > region_end(p)) |
| 34 | return 0; |
| 35 | |
| 36 | return 1; |
| 37 | } |
| 38 | |
| 39 | static int normalize_and_ok(const struct region *outer, struct region *inner) |
| 40 | { |
| 41 | inner->offset += region_offset(outer); |
| 42 | return is_subregion(outer, inner); |
| 43 | } |
| 44 | |
| 45 | static const struct region_device *rdev_root(const struct region_device *rdev) |
| 46 | { |
| 47 | if (rdev->root == NULL) |
| 48 | return rdev; |
| 49 | return rdev->root; |
| 50 | } |
| 51 | |
Aaron Durbin | 990ab7e | 2015-12-15 13:29:41 -0600 | [diff] [blame] | 52 | ssize_t rdev_relative_offset(const struct region_device *p, |
| 53 | const struct region_device *c) |
| 54 | { |
| 55 | if (rdev_root(p) != rdev_root(c)) |
| 56 | return -1; |
| 57 | |
| 58 | if (!is_subregion(&p->region, &c->region)) |
| 59 | return -1; |
| 60 | |
| 61 | return region_device_offset(c) - region_device_offset(p); |
| 62 | } |
| 63 | |
Aaron Durbin | 5d5f4b3 | 2015-03-26 14:39:07 -0500 | [diff] [blame] | 64 | void *rdev_mmap(const struct region_device *rd, size_t offset, size_t size) |
| 65 | { |
| 66 | const struct region_device *rdev; |
| 67 | struct region req = { |
| 68 | .offset = offset, |
| 69 | .size = size, |
| 70 | }; |
| 71 | |
| 72 | if (!normalize_and_ok(&rd->region, &req)) |
| 73 | return NULL; |
| 74 | |
| 75 | rdev = rdev_root(rd); |
| 76 | |
Aaron Durbin | edd7959 | 2016-08-10 11:39:00 -0500 | [diff] [blame] | 77 | if (rdev->ops->mmap == NULL) |
| 78 | return NULL; |
| 79 | |
Aaron Durbin | 5d5f4b3 | 2015-03-26 14:39:07 -0500 | [diff] [blame] | 80 | return rdev->ops->mmap(rdev, req.offset, req.size); |
| 81 | } |
| 82 | |
| 83 | int rdev_munmap(const struct region_device *rd, void *mapping) |
| 84 | { |
| 85 | const struct region_device *rdev; |
| 86 | |
| 87 | rdev = rdev_root(rd); |
| 88 | |
Aaron Durbin | edd7959 | 2016-08-10 11:39:00 -0500 | [diff] [blame] | 89 | if (rdev->ops->munmap == NULL) |
| 90 | return -1; |
| 91 | |
Aaron Durbin | 5d5f4b3 | 2015-03-26 14:39:07 -0500 | [diff] [blame] | 92 | return rdev->ops->munmap(rdev, mapping); |
| 93 | } |
| 94 | |
| 95 | ssize_t rdev_readat(const struct region_device *rd, void *b, size_t offset, |
| 96 | size_t size) |
| 97 | { |
| 98 | const struct region_device *rdev; |
| 99 | struct region req = { |
| 100 | .offset = offset, |
| 101 | .size = size, |
| 102 | }; |
| 103 | |
| 104 | if (!normalize_and_ok(&rd->region, &req)) |
| 105 | return -1; |
| 106 | |
| 107 | rdev = rdev_root(rd); |
| 108 | |
| 109 | return rdev->ops->readat(rdev, b, req.offset, req.size); |
| 110 | } |
| 111 | |
Aaron Durbin | 258a350 | 2016-08-04 14:33:58 -0500 | [diff] [blame] | 112 | ssize_t rdev_writeat(const struct region_device *rd, const void *b, |
| 113 | size_t offset, size_t size) |
Antonello Dettori | e5f48d2 | 2016-06-22 21:09:08 +0200 | [diff] [blame] | 114 | { |
| 115 | const struct region_device *rdev; |
| 116 | struct region req = { |
| 117 | .offset = offset, |
| 118 | .size = size, |
| 119 | }; |
| 120 | |
| 121 | if (!normalize_and_ok(&rd->region, &req)) |
| 122 | return -1; |
| 123 | |
| 124 | rdev = rdev_root(rd); |
| 125 | |
| 126 | if (rdev->ops->writeat == NULL) |
| 127 | return -1; |
| 128 | |
| 129 | return rdev->ops->writeat(rdev, b, req.offset, req.size); |
| 130 | } |
| 131 | |
| 132 | ssize_t rdev_eraseat(const struct region_device *rd, size_t offset, |
| 133 | size_t size) |
| 134 | { |
| 135 | const struct region_device *rdev; |
| 136 | struct region req = { |
| 137 | .offset = offset, |
| 138 | .size = size, |
| 139 | }; |
| 140 | |
| 141 | if (!normalize_and_ok(&rd->region, &req)) |
| 142 | return -1; |
| 143 | |
| 144 | rdev = rdev_root(rd); |
| 145 | |
| 146 | /* If the eraseat ptr is NULL we assume that the erase |
| 147 | * function was completed successfully. */ |
| 148 | if (rdev->ops->eraseat == NULL) |
| 149 | return size; |
| 150 | |
| 151 | return rdev->ops->eraseat(rdev, req.offset, req.size); |
| 152 | } |
| 153 | |
Aaron Durbin | 5d5f4b3 | 2015-03-26 14:39:07 -0500 | [diff] [blame] | 154 | int rdev_chain(struct region_device *child, const struct region_device *parent, |
| 155 | size_t offset, size_t size) |
| 156 | { |
| 157 | struct region req = { |
| 158 | .offset = offset, |
| 159 | .size = size, |
| 160 | }; |
| 161 | |
| 162 | if (!normalize_and_ok(&parent->region, &req)) |
| 163 | return -1; |
| 164 | |
| 165 | /* Keep track of root region device. Note the offsets are relative |
| 166 | * to the root device. */ |
| 167 | child->root = rdev_root(parent); |
| 168 | child->ops = NULL; |
| 169 | child->region.offset = req.offset; |
| 170 | child->region.size = req.size; |
| 171 | |
| 172 | return 0; |
| 173 | } |
Aaron Durbin | b419c1a8 | 2015-03-27 01:03:45 -0500 | [diff] [blame] | 174 | |
Antonello Dettori | e5f48d2 | 2016-06-22 21:09:08 +0200 | [diff] [blame] | 175 | static void mem_region_device_init(struct mem_region_device *mdev, |
| 176 | const struct region_device_ops *ops, void *base, size_t size) |
Aaron Durbin | b419c1a8 | 2015-03-27 01:03:45 -0500 | [diff] [blame] | 177 | { |
| 178 | memset(mdev, 0, sizeof(*mdev)); |
| 179 | mdev->base = base; |
Antonello Dettori | e5f48d2 | 2016-06-22 21:09:08 +0200 | [diff] [blame] | 180 | mdev->rdev.ops = ops; |
Aaron Durbin | b419c1a8 | 2015-03-27 01:03:45 -0500 | [diff] [blame] | 181 | mdev->rdev.region.size = size; |
| 182 | } |
| 183 | |
Antonello Dettori | e5f48d2 | 2016-06-22 21:09:08 +0200 | [diff] [blame] | 184 | void mem_region_device_ro_init(struct mem_region_device *mdev, void *base, |
| 185 | size_t size) |
| 186 | { |
| 187 | return mem_region_device_init(mdev, &mem_rdev_ro_ops, base, size); |
| 188 | } |
| 189 | |
| 190 | void mem_region_device_rw_init(struct mem_region_device *mdev, void *base, |
| 191 | size_t size) |
| 192 | { |
| 193 | return mem_region_device_init(mdev, &mem_rdev_rw_ops, base, size); |
| 194 | } |
| 195 | |
Furquan Shaikh | 2b57691 | 2016-06-19 23:16:45 -0700 | [diff] [blame] | 196 | void region_device_init(struct region_device *rdev, |
| 197 | const struct region_device_ops *ops, size_t offset, |
| 198 | size_t size) |
| 199 | { |
| 200 | memset(rdev, 0, sizeof(*rdev)); |
| 201 | rdev->root = NULL; |
| 202 | rdev->ops = ops; |
| 203 | rdev->region.offset = offset; |
| 204 | rdev->region.size = size; |
| 205 | } |
| 206 | |
Antonello Dettori | e5f48d2 | 2016-06-22 21:09:08 +0200 | [diff] [blame] | 207 | static void xlate_region_device_init(struct xlate_region_device *xdev, |
| 208 | const struct region_device_ops *ops, |
| 209 | const struct region_device *access_dev, |
| 210 | size_t sub_offset, size_t sub_size, |
| 211 | size_t parent_size) |
Furquan Shaikh | 2b57691 | 2016-06-19 23:16:45 -0700 | [diff] [blame] | 212 | { |
| 213 | memset(xdev, 0, sizeof(*xdev)); |
| 214 | xdev->access_dev = access_dev; |
| 215 | xdev->sub_region.offset = sub_offset; |
| 216 | xdev->sub_region.size = sub_size; |
Antonello Dettori | e5f48d2 | 2016-06-22 21:09:08 +0200 | [diff] [blame] | 217 | region_device_init(&xdev->rdev, ops, 0, parent_size); |
| 218 | } |
| 219 | |
| 220 | void xlate_region_device_ro_init(struct xlate_region_device *xdev, |
| 221 | const struct region_device *access_dev, |
| 222 | size_t sub_offset, size_t sub_size, |
| 223 | size_t parent_size) |
| 224 | { |
| 225 | xlate_region_device_init(xdev, &xlate_rdev_ro_ops, access_dev, |
| 226 | sub_offset, sub_size, parent_size); |
| 227 | } |
| 228 | |
| 229 | void xlate_region_device_rw_init(struct xlate_region_device *xdev, |
| 230 | const struct region_device *access_dev, |
| 231 | size_t sub_offset, size_t sub_size, |
| 232 | size_t parent_size) |
| 233 | { |
| 234 | xlate_region_device_init(xdev, &xlate_rdev_rw_ops, access_dev, |
| 235 | sub_offset, sub_size, parent_size); |
Furquan Shaikh | 2b57691 | 2016-06-19 23:16:45 -0700 | [diff] [blame] | 236 | } |
| 237 | |
Aaron Durbin | b419c1a8 | 2015-03-27 01:03:45 -0500 | [diff] [blame] | 238 | static void *mdev_mmap(const struct region_device *rd, size_t offset, |
Aaron Durbin | ca0a676 | 2015-12-15 17:49:12 -0600 | [diff] [blame] | 239 | size_t size __unused) |
Aaron Durbin | b419c1a8 | 2015-03-27 01:03:45 -0500 | [diff] [blame] | 240 | { |
| 241 | const struct mem_region_device *mdev; |
| 242 | |
Aaron Durbin | ca0a676 | 2015-12-15 17:49:12 -0600 | [diff] [blame] | 243 | mdev = container_of(rd, __typeof__(*mdev), rdev); |
Aaron Durbin | b419c1a8 | 2015-03-27 01:03:45 -0500 | [diff] [blame] | 244 | |
| 245 | return &mdev->base[offset]; |
| 246 | } |
| 247 | |
Lee Leahy | 36d5b41 | 2017-03-10 11:02:11 -0800 | [diff] [blame] | 248 | static int mdev_munmap(const struct region_device *rd __unused, |
Aaron Durbin | ca0a676 | 2015-12-15 17:49:12 -0600 | [diff] [blame] | 249 | void *mapping __unused) |
Aaron Durbin | b419c1a8 | 2015-03-27 01:03:45 -0500 | [diff] [blame] | 250 | { |
| 251 | return 0; |
| 252 | } |
| 253 | |
| 254 | static ssize_t mdev_readat(const struct region_device *rd, void *b, |
| 255 | size_t offset, size_t size) |
| 256 | { |
| 257 | const struct mem_region_device *mdev; |
| 258 | |
Aaron Durbin | ca0a676 | 2015-12-15 17:49:12 -0600 | [diff] [blame] | 259 | mdev = container_of(rd, __typeof__(*mdev), rdev); |
Aaron Durbin | b419c1a8 | 2015-03-27 01:03:45 -0500 | [diff] [blame] | 260 | |
| 261 | memcpy(b, &mdev->base[offset], size); |
| 262 | |
| 263 | return size; |
| 264 | } |
| 265 | |
Aaron Durbin | 258a350 | 2016-08-04 14:33:58 -0500 | [diff] [blame] | 266 | static ssize_t mdev_writeat(const struct region_device *rd, const void *b, |
Antonello Dettori | e5f48d2 | 2016-06-22 21:09:08 +0200 | [diff] [blame] | 267 | size_t offset, size_t size) |
| 268 | { |
| 269 | const struct mem_region_device *mdev; |
| 270 | |
| 271 | mdev = container_of(rd, __typeof__(*mdev), rdev); |
| 272 | |
| 273 | memcpy(&mdev->base[offset], b, size); |
| 274 | |
| 275 | return size; |
| 276 | } |
| 277 | |
| 278 | static ssize_t mdev_eraseat(const struct region_device *rd, size_t offset, |
| 279 | size_t size) |
| 280 | { |
| 281 | const struct mem_region_device *mdev; |
| 282 | |
| 283 | mdev = container_of(rd, __typeof__(*mdev), rdev); |
| 284 | |
| 285 | memset(&mdev->base[offset], 0, size); |
| 286 | |
| 287 | return size; |
| 288 | } |
| 289 | |
| 290 | const struct region_device_ops mem_rdev_ro_ops = { |
Aaron Durbin | b419c1a8 | 2015-03-27 01:03:45 -0500 | [diff] [blame] | 291 | .mmap = mdev_mmap, |
| 292 | .munmap = mdev_munmap, |
| 293 | .readat = mdev_readat, |
| 294 | }; |
Aaron Durbin | e62cf52 | 2015-03-27 01:58:06 -0500 | [diff] [blame] | 295 | |
Antonello Dettori | e5f48d2 | 2016-06-22 21:09:08 +0200 | [diff] [blame] | 296 | const struct region_device_ops mem_rdev_rw_ops = { |
| 297 | .mmap = mdev_mmap, |
| 298 | .munmap = mdev_munmap, |
| 299 | .readat = mdev_readat, |
| 300 | .writeat = mdev_writeat, |
| 301 | .eraseat = mdev_eraseat, |
| 302 | }; |
| 303 | |
Aaron Durbin | e62cf52 | 2015-03-27 01:58:06 -0500 | [diff] [blame] | 304 | void mmap_helper_device_init(struct mmap_helper_region_device *mdev, |
| 305 | void *cache, size_t cache_size) |
| 306 | { |
| 307 | mem_pool_init(&mdev->pool, cache, cache_size); |
| 308 | } |
| 309 | |
| 310 | void *mmap_helper_rdev_mmap(const struct region_device *rd, size_t offset, |
| 311 | size_t size) |
| 312 | { |
| 313 | struct mmap_helper_region_device *mdev; |
| 314 | void *mapping; |
| 315 | |
Aaron Durbin | ca0a676 | 2015-12-15 17:49:12 -0600 | [diff] [blame] | 316 | mdev = container_of((void *)rd, __typeof__(*mdev), rdev); |
Aaron Durbin | e62cf52 | 2015-03-27 01:58:06 -0500 | [diff] [blame] | 317 | |
| 318 | mapping = mem_pool_alloc(&mdev->pool, size); |
| 319 | |
| 320 | if (mapping == NULL) |
| 321 | return NULL; |
| 322 | |
| 323 | if (rd->ops->readat(rd, mapping, offset, size) != size) { |
| 324 | mem_pool_free(&mdev->pool, mapping); |
| 325 | return NULL; |
| 326 | } |
| 327 | |
| 328 | return mapping; |
| 329 | } |
| 330 | |
| 331 | int mmap_helper_rdev_munmap(const struct region_device *rd, void *mapping) |
| 332 | { |
| 333 | struct mmap_helper_region_device *mdev; |
| 334 | |
Aaron Durbin | ca0a676 | 2015-12-15 17:49:12 -0600 | [diff] [blame] | 335 | mdev = container_of((void *)rd, __typeof__(*mdev), rdev); |
Aaron Durbin | e62cf52 | 2015-03-27 01:58:06 -0500 | [diff] [blame] | 336 | |
| 337 | mem_pool_free(&mdev->pool, mapping); |
| 338 | |
| 339 | return 0; |
| 340 | } |
Aaron Durbin | 5907eb8 | 2015-10-28 16:09:42 -0500 | [diff] [blame] | 341 | |
| 342 | static void *xlate_mmap(const struct region_device *rd, size_t offset, |
| 343 | size_t size) |
| 344 | { |
| 345 | const struct xlate_region_device *xldev; |
| 346 | struct region req = { |
| 347 | .offset = offset, |
| 348 | .size = size, |
| 349 | }; |
| 350 | |
Aaron Durbin | ca0a676 | 2015-12-15 17:49:12 -0600 | [diff] [blame] | 351 | xldev = container_of(rd, __typeof__(*xldev), rdev); |
Aaron Durbin | 5907eb8 | 2015-10-28 16:09:42 -0500 | [diff] [blame] | 352 | |
| 353 | if (!is_subregion(&xldev->sub_region, &req)) |
| 354 | return NULL; |
| 355 | |
| 356 | offset -= region_offset(&xldev->sub_region); |
| 357 | |
| 358 | return rdev_mmap(xldev->access_dev, offset, size); |
| 359 | } |
| 360 | |
| 361 | static int xlate_munmap(const struct region_device *rd, void *mapping) |
| 362 | { |
| 363 | const struct xlate_region_device *xldev; |
| 364 | |
Aaron Durbin | ca0a676 | 2015-12-15 17:49:12 -0600 | [diff] [blame] | 365 | xldev = container_of(rd, __typeof__(*xldev), rdev); |
Aaron Durbin | 5907eb8 | 2015-10-28 16:09:42 -0500 | [diff] [blame] | 366 | |
| 367 | return rdev_munmap(xldev->access_dev, mapping); |
| 368 | } |
| 369 | |
| 370 | static ssize_t xlate_readat(const struct region_device *rd, void *b, |
| 371 | size_t offset, size_t size) |
| 372 | { |
| 373 | struct region req = { |
| 374 | .offset = offset, |
| 375 | .size = size, |
| 376 | }; |
| 377 | const struct xlate_region_device *xldev; |
| 378 | |
Aaron Durbin | ca0a676 | 2015-12-15 17:49:12 -0600 | [diff] [blame] | 379 | xldev = container_of(rd, __typeof__(*xldev), rdev); |
Aaron Durbin | 5907eb8 | 2015-10-28 16:09:42 -0500 | [diff] [blame] | 380 | |
| 381 | if (!is_subregion(&xldev->sub_region, &req)) |
| 382 | return -1; |
| 383 | |
| 384 | offset -= region_offset(&xldev->sub_region); |
| 385 | |
| 386 | return rdev_readat(xldev->access_dev, b, offset, size); |
| 387 | } |
| 388 | |
Aaron Durbin | 258a350 | 2016-08-04 14:33:58 -0500 | [diff] [blame] | 389 | static ssize_t xlate_writeat(const struct region_device *rd, const void *b, |
Antonello Dettori | e5f48d2 | 2016-06-22 21:09:08 +0200 | [diff] [blame] | 390 | size_t offset, size_t size) |
| 391 | { |
| 392 | struct region req = { |
| 393 | .offset = offset, |
| 394 | .size = size, |
| 395 | }; |
| 396 | const struct xlate_region_device *xldev; |
| 397 | |
| 398 | xldev = container_of(rd, __typeof__(*xldev), rdev); |
| 399 | |
| 400 | if (!is_subregion(&xldev->sub_region, &req)) |
| 401 | return -1; |
| 402 | |
| 403 | offset -= region_offset(&xldev->sub_region); |
| 404 | |
| 405 | return rdev_writeat(xldev->access_dev, b, offset, size); |
| 406 | } |
| 407 | |
| 408 | static ssize_t xlate_eraseat(const struct region_device *rd, |
| 409 | size_t offset, size_t size) |
| 410 | { |
| 411 | struct region req = { |
| 412 | .offset = offset, |
| 413 | .size = size, |
| 414 | }; |
| 415 | const struct xlate_region_device *xldev; |
| 416 | |
| 417 | xldev = container_of(rd, __typeof__(*xldev), rdev); |
| 418 | |
| 419 | if (!is_subregion(&xldev->sub_region, &req)) |
| 420 | return -1; |
| 421 | |
| 422 | offset -= region_offset(&xldev->sub_region); |
| 423 | |
| 424 | return rdev_eraseat(xldev->access_dev, offset, size); |
| 425 | } |
| 426 | |
| 427 | const struct region_device_ops xlate_rdev_ro_ops = { |
Aaron Durbin | 5907eb8 | 2015-10-28 16:09:42 -0500 | [diff] [blame] | 428 | .mmap = xlate_mmap, |
| 429 | .munmap = xlate_munmap, |
| 430 | .readat = xlate_readat, |
| 431 | }; |
Antonello Dettori | e5f48d2 | 2016-06-22 21:09:08 +0200 | [diff] [blame] | 432 | |
| 433 | const struct region_device_ops xlate_rdev_rw_ops = { |
| 434 | .mmap = xlate_mmap, |
| 435 | .munmap = xlate_munmap, |
| 436 | .readat = xlate_readat, |
| 437 | .writeat = xlate_writeat, |
| 438 | .eraseat = xlate_eraseat, |
| 439 | }; |
Aaron Durbin | 256db40 | 2016-12-03 17:08:08 -0600 | [diff] [blame] | 440 | |
| 441 | |
| 442 | static void *incoherent_mmap(const struct region_device *rd, size_t offset, |
| 443 | size_t size) |
| 444 | { |
| 445 | const struct incoherent_rdev *irdev; |
| 446 | |
| 447 | irdev = container_of(rd, const struct incoherent_rdev, rdev); |
| 448 | |
| 449 | return rdev_mmap(irdev->read, offset, size); |
| 450 | } |
| 451 | |
| 452 | static int incoherent_munmap(const struct region_device *rd, void *mapping) |
| 453 | { |
| 454 | const struct incoherent_rdev *irdev; |
| 455 | |
| 456 | irdev = container_of(rd, const struct incoherent_rdev, rdev); |
| 457 | |
| 458 | return rdev_munmap(irdev->read, mapping); |
| 459 | } |
| 460 | |
| 461 | static ssize_t incoherent_readat(const struct region_device *rd, void *b, |
| 462 | size_t offset, size_t size) |
| 463 | { |
| 464 | const struct incoherent_rdev *irdev; |
| 465 | |
| 466 | irdev = container_of(rd, const struct incoherent_rdev, rdev); |
| 467 | |
| 468 | return rdev_readat(irdev->read, b, offset, size); |
| 469 | } |
| 470 | |
| 471 | static ssize_t incoherent_writeat(const struct region_device *rd, const void *b, |
| 472 | size_t offset, size_t size) |
| 473 | { |
| 474 | const struct incoherent_rdev *irdev; |
| 475 | |
| 476 | irdev = container_of(rd, const struct incoherent_rdev, rdev); |
| 477 | |
| 478 | return rdev_writeat(irdev->write, b, offset, size); |
| 479 | } |
| 480 | |
| 481 | static ssize_t incoherent_eraseat(const struct region_device *rd, size_t offset, |
| 482 | size_t size) |
| 483 | { |
| 484 | const struct incoherent_rdev *irdev; |
| 485 | |
| 486 | irdev = container_of(rd, const struct incoherent_rdev, rdev); |
| 487 | |
| 488 | return rdev_eraseat(irdev->write, offset, size); |
| 489 | } |
| 490 | |
| 491 | static const struct region_device_ops incoherent_rdev_ops = { |
| 492 | .mmap = incoherent_mmap, |
| 493 | .munmap = incoherent_munmap, |
| 494 | .readat = incoherent_readat, |
| 495 | .writeat = incoherent_writeat, |
| 496 | .eraseat = incoherent_eraseat, |
| 497 | }; |
| 498 | |
| 499 | const struct region_device *incoherent_rdev_init(struct incoherent_rdev *irdev, |
| 500 | const struct region *r, |
| 501 | const struct region_device *read, |
| 502 | const struct region_device *write) |
| 503 | { |
| 504 | const size_t size = region_sz(r); |
| 505 | |
| 506 | if (size != region_device_sz(read) || size != region_device_sz(write)) |
| 507 | return NULL; |
| 508 | |
| 509 | /* The region is represented as offset 0 to size. That way, the generic |
| 510 | * rdev operations can be called on the read or write implementation |
| 511 | * without any unnecessary translation because the offsets all start |
| 512 | * at 0. */ |
| 513 | region_device_init(&irdev->rdev, &incoherent_rdev_ops, 0, size); |
| 514 | irdev->read = read; |
| 515 | irdev->write = write; |
| 516 | |
| 517 | return &irdev->rdev; |
| 518 | } |