blob: 5d98c744895191e7578947d0a61cd1da81428f18 [file] [log] [blame]
Lee Leahyeef40eb2017-03-23 10:54:57 -07001/*
2 * Generic bounce buffer implementation
3 *
4 * Copyright (C) 2012 Marek Vasut <marex@denx.de>
5 * Copyright 2013 Google Inc. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License as
9 * published by the Free Software Foundation; either version 2 of
10 * the License, or (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 */
17
18#include <arch/cache.h>
Lee Leahyeef40eb2017-03-23 10:54:57 -070019#include "bouncebuf.h"
Lee Leahyeef40eb2017-03-23 10:54:57 -070020#include "storage.h"
21#include <string.h>
22#include <commonlib/stdlib.h>
23
24static int addr_aligned(struct bounce_buffer *state)
25{
26 const uint32_t align_mask = ARCH_DMA_MINALIGN - 1;
27
28 // Check if start is aligned
29 if ((uintptr_t)state->user_buffer & align_mask) {
30 sdhc_debug("Unaligned buffer address %p\n", state->user_buffer);
31 return 0;
32 }
33
34 // Check if length is aligned
35 if (state->len != state->len_aligned) {
36 sdhc_debug("Unaligned buffer length %zd\n", state->len);
37 return 0;
38 }
39
40 // Aligned
41 return 1;
42}
43
44int bounce_buffer_start(struct bounce_buffer *state, void *data,
45 size_t len, unsigned int flags)
46{
47 state->user_buffer = data;
48 state->bounce_buffer = data;
49 state->len = len;
50 state->len_aligned = ROUND(len, ARCH_DMA_MINALIGN);
51 state->flags = flags;
52
53 if (!addr_aligned(state)) {
54 state->bounce_buffer = memalign(ARCH_DMA_MINALIGN,
55 state->len_aligned);
56 if (!state->bounce_buffer)
57 return -1;
58
59 if (state->flags & GEN_BB_READ)
60 memcpy(state->bounce_buffer, state->user_buffer,
61 state->len);
62 }
63
64 /*
65 * Flush data to RAM so DMA reads can pick it up,
66 * and any CPU writebacks don't race with DMA writes
67 */
68 dcache_clean_invalidate_by_mva(state->bounce_buffer,
69 state->len_aligned);
70 return 0;
71}
72
73int bounce_buffer_stop(struct bounce_buffer *state)
74{
75 if (state->flags & GEN_BB_WRITE) {
76 // Invalidate cache so that CPU can see any newly DMA'd data
77 dcache_invalidate_by_mva(state->bounce_buffer,
78 state->len_aligned);
79 }
80
81 if (state->bounce_buffer == state->user_buffer)
82 return 0;
83
84 if (state->flags & GEN_BB_WRITE)
85 memcpy(state->user_buffer, state->bounce_buffer, state->len);
86
87 free(state->bounce_buffer);
88
89 return 0;
90}