Patrick Georgi | ac95903 | 2020-05-05 22:49:26 +0200 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0-or-later */ |
Patrick Georgi | afd4c87 | 2020-05-05 23:43:18 +0200 | [diff] [blame] | 2 | /* |
Martin Roth | 0443ac2 | 2019-08-30 21:29:41 -0600 | [diff] [blame] | 3 | * Generic bounce buffer implementation |
Lee Leahy | eef40eb | 2017-03-23 10:54:57 -0700 | [diff] [blame] | 4 | */ |
| 5 | |
| 6 | #include <arch/cache.h> |
Lee Leahy | eef40eb | 2017-03-23 10:54:57 -0700 | [diff] [blame] | 7 | #include "bouncebuf.h" |
Lee Leahy | eef40eb | 2017-03-23 10:54:57 -0700 | [diff] [blame] | 8 | #include "storage.h" |
| 9 | #include <string.h> |
| 10 | #include <commonlib/stdlib.h> |
| 11 | |
| 12 | static int addr_aligned(struct bounce_buffer *state) |
| 13 | { |
| 14 | const uint32_t align_mask = ARCH_DMA_MINALIGN - 1; |
| 15 | |
| 16 | // Check if start is aligned |
| 17 | if ((uintptr_t)state->user_buffer & align_mask) { |
| 18 | sdhc_debug("Unaligned buffer address %p\n", state->user_buffer); |
| 19 | return 0; |
| 20 | } |
| 21 | |
| 22 | // Check if length is aligned |
| 23 | if (state->len != state->len_aligned) { |
| 24 | sdhc_debug("Unaligned buffer length %zd\n", state->len); |
| 25 | return 0; |
| 26 | } |
| 27 | |
| 28 | // Aligned |
| 29 | return 1; |
| 30 | } |
| 31 | |
| 32 | int bounce_buffer_start(struct bounce_buffer *state, void *data, |
| 33 | size_t len, unsigned int flags) |
| 34 | { |
| 35 | state->user_buffer = data; |
| 36 | state->bounce_buffer = data; |
| 37 | state->len = len; |
| 38 | state->len_aligned = ROUND(len, ARCH_DMA_MINALIGN); |
| 39 | state->flags = flags; |
| 40 | |
| 41 | if (!addr_aligned(state)) { |
| 42 | state->bounce_buffer = memalign(ARCH_DMA_MINALIGN, |
| 43 | state->len_aligned); |
| 44 | if (!state->bounce_buffer) |
| 45 | return -1; |
| 46 | |
| 47 | if (state->flags & GEN_BB_READ) |
| 48 | memcpy(state->bounce_buffer, state->user_buffer, |
| 49 | state->len); |
| 50 | } |
| 51 | |
| 52 | /* |
| 53 | * Flush data to RAM so DMA reads can pick it up, |
| 54 | * and any CPU writebacks don't race with DMA writes |
| 55 | */ |
| 56 | dcache_clean_invalidate_by_mva(state->bounce_buffer, |
| 57 | state->len_aligned); |
| 58 | return 0; |
| 59 | } |
| 60 | |
| 61 | int bounce_buffer_stop(struct bounce_buffer *state) |
| 62 | { |
| 63 | if (state->flags & GEN_BB_WRITE) { |
| 64 | // Invalidate cache so that CPU can see any newly DMA'd data |
| 65 | dcache_invalidate_by_mva(state->bounce_buffer, |
| 66 | state->len_aligned); |
| 67 | } |
| 68 | |
| 69 | if (state->bounce_buffer == state->user_buffer) |
| 70 | return 0; |
| 71 | |
| 72 | if (state->flags & GEN_BB_WRITE) |
| 73 | memcpy(state->user_buffer, state->bounce_buffer, state->len); |
| 74 | |
| 75 | free(state->bounce_buffer); |
| 76 | |
| 77 | return 0; |
| 78 | } |