Thaminda Edirisooriya | 8fad21d | 2015-07-29 17:43:20 -0700 | [diff] [blame] | 1 | // See LICENSE for license details. |
| 2 | |
| 3 | #ifndef _RISCV_ATOMIC_H |
| 4 | #define _RISCV_ATOMIC_H |
| 5 | |
Thaminda Edirisooriya | 8fad21d | 2015-07-29 17:43:20 -0700 | [diff] [blame] | 6 | #include <arch/encoding.h> |
| 7 | |
| 8 | #define disable_irqsave() clear_csr(sstatus, SSTATUS_IE) |
| 9 | #define enable_irqrestore(flags) set_csr(sstatus, (flags) & SSTATUS_IE) |
| 10 | |
| 11 | typedef struct { int lock; } spinlock_t; |
| 12 | #define SPINLOCK_INIT {0} |
| 13 | |
| 14 | #define mb() __sync_synchronize() |
| 15 | #define atomic_set(ptr, val) (*(volatile typeof(*(ptr)) *)(ptr) = val) |
| 16 | #define atomic_read(ptr) (*(volatile typeof(*(ptr)) *)(ptr)) |
| 17 | |
| 18 | #ifdef PK_ENABLE_ATOMICS |
| 19 | # define atomic_add(ptr, inc) __sync_fetch_and_add(ptr, inc) |
| 20 | # define atomic_swap(ptr, swp) __sync_lock_test_and_set(ptr, swp) |
| 21 | # define atomic_cas(ptr, cmp, swp) __sync_val_compare_and_swap(ptr, cmp, swp) |
| 22 | #else |
| 23 | # define atomic_add(ptr, inc) ({ \ |
| 24 | long flags = disable_irqsave(); \ |
| 25 | typeof(ptr) res = *(volatile typeof(ptr))(ptr); \ |
| 26 | *(volatile typeof(ptr))(ptr) = res + (inc); \ |
| 27 | enable_irqrestore(flags); \ |
| 28 | res; }) |
| 29 | # define atomic_swap(ptr, swp) ({ \ |
| 30 | long flags = disable_irqsave(); \ |
| 31 | typeof(*ptr) res = *(volatile typeof(ptr))(ptr); \ |
| 32 | *(volatile typeof(ptr))(ptr) = (swp); \ |
| 33 | enable_irqrestore(flags); \ |
| 34 | res; }) |
| 35 | # define atomic_cas(ptr, cmp, swp) ({ \ |
| 36 | long flags = disable_irqsave(); \ |
| 37 | typeof(ptr) res = *(volatile typeof(ptr))(ptr); \ |
| 38 | if (res == (cmp)) *(volatile typeof(ptr))(ptr) = (swp); \ |
| 39 | enable_irqrestore(flags); \ |
| 40 | res; }) |
| 41 | #endif |
| 42 | |
| 43 | static inline void spinlock_lock(spinlock_t* lock) |
| 44 | { |
| 45 | do |
| 46 | { |
| 47 | while (atomic_read(&lock->lock)) |
| 48 | ; |
| 49 | } while (atomic_swap(&lock->lock, -1)); |
| 50 | mb(); |
| 51 | } |
| 52 | |
| 53 | static inline void spinlock_unlock(spinlock_t* lock) |
| 54 | { |
| 55 | mb(); |
| 56 | atomic_set(&lock->lock,0); |
| 57 | } |
| 58 | |
| 59 | static inline long spinlock_lock_irqsave(spinlock_t* lock) |
| 60 | { |
| 61 | long flags = disable_irqsave(); |
| 62 | spinlock_lock(lock); |
| 63 | return flags; |
| 64 | } |
| 65 | |
| 66 | static inline void spinlock_unlock_irqrestore(spinlock_t* lock, long flags) |
| 67 | { |
| 68 | spinlock_unlock(lock); |
| 69 | enable_irqrestore(flags); |
| 70 | } |
| 71 | |
| 72 | #endif |