blob: b7a7aafb238df38d5fb19ac6e92f74deb4e9a8de [file] [log] [blame]
Gleb Natapov89acfa32010-05-10 11:36:37 +03001#ifndef _VIRTIO_RING_H
2#define _VIRTIO_RING_H
3
Kevin O'Connor7d09d0e2010-05-10 21:51:38 -04004#include "types.h" // u64
5#include "memmap.h" // PAGE_SIZE
6
Gleb Natapov89acfa32010-05-10 11:36:37 +03007#define PAGE_SHIFT 12
8#define PAGE_MASK (PAGE_SIZE-1)
9
10#define virt_to_phys(v) (unsigned long)(v)
11#define phys_to_virt(p) (void*)(p)
Michael S. Tsirkin0f3783b2010-05-20 16:36:32 +030012/* Compiler barrier is enough as an x86 CPU does not reorder reads or writes */
13#define smp_rmb() barrier()
14#define smp_wmb() barrier()
Gleb Natapov89acfa32010-05-10 11:36:37 +030015
16/* Status byte for guest to report progress, and synchronize features. */
17/* We have seen device and processed generic fields (VIRTIO_CONFIG_F_VIRTIO) */
18#define VIRTIO_CONFIG_S_ACKNOWLEDGE 1
19/* We have found a driver for the device. */
20#define VIRTIO_CONFIG_S_DRIVER 2
21/* Driver has used its parts of the config, and is happy */
22#define VIRTIO_CONFIG_S_DRIVER_OK 4
23/* We've given up on this device. */
24#define VIRTIO_CONFIG_S_FAILED 0x80
25
26#define MAX_QUEUE_NUM (128)
27
28#define VRING_DESC_F_NEXT 1
29#define VRING_DESC_F_WRITE 2
30
31#define VRING_AVAIL_F_NO_INTERRUPT 1
32
33#define VRING_USED_F_NO_NOTIFY 1
34
35struct vring_desc
36{
37 u64 addr;
38 u32 len;
39 u16 flags;
40 u16 next;
41};
42
43struct vring_avail
44{
45 u16 flags;
46 u16 idx;
47 u16 ring[0];
48};
49
50struct vring_used_elem
51{
52 u32 id;
53 u32 len;
54};
55
56struct vring_used
57{
58 u16 flags;
59 u16 idx;
60 struct vring_used_elem ring[];
61};
62
63struct vring {
64 unsigned int num;
65 struct vring_desc *desc;
66 struct vring_avail *avail;
67 struct vring_used *used;
68};
69
70#define vring_size(num) \
71 (((((sizeof(struct vring_desc) * num) + \
72 (sizeof(struct vring_avail) + sizeof(u16) * num)) \
73 + PAGE_MASK) & ~PAGE_MASK) + \
74 (sizeof(struct vring_used) + sizeof(struct vring_used_elem) * num))
75
Kevin O'Connorea8ac632010-05-16 11:34:38 -040076typedef unsigned char virtio_queue_t[vring_size(MAX_QUEUE_NUM)];
Gleb Natapov89acfa32010-05-10 11:36:37 +030077
78struct vring_virtqueue {
79 virtio_queue_t queue;
80 struct vring vring;
81 u16 free_head;
82 u16 last_used_idx;
83 u16 vdata[MAX_QUEUE_NUM];
84 /* PCI */
85 int queue_index;
86};
87
88struct vring_list {
89 char *addr;
90 unsigned int length;
91};
92
93static inline void vring_init(struct vring *vr,
94 unsigned int num, unsigned char *queue)
95{
96 unsigned int i;
97 unsigned long pa;
98
99 ASSERT32FLAT();
100 vr->num = num;
101
102 /* physical address of desc must be page aligned */
103
104 pa = virt_to_phys(queue);
105 pa = (pa + PAGE_MASK) & ~PAGE_MASK;
106 vr->desc = phys_to_virt(pa);
107
108 vr->avail = (struct vring_avail *)&vr->desc[num];
Gleb Natapov4030db02010-05-17 16:27:27 +0300109 /* disable interrupts */
110 vr->avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
Gleb Natapov89acfa32010-05-10 11:36:37 +0300111
112 /* physical address of used must be page aligned */
113
114 pa = virt_to_phys(&vr->avail->ring[num]);
115 pa = (pa + PAGE_MASK) & ~PAGE_MASK;
116 vr->used = phys_to_virt(pa);
117
118 for (i = 0; i < num - 1; i++)
119 vr->desc[i].next = i + 1;
120 vr->desc[i].next = 0;
121}
122
123int vring_more_used(struct vring_virtqueue *vq);
124void vring_detach(struct vring_virtqueue *vq, unsigned int head);
125int vring_get_buf(struct vring_virtqueue *vq, unsigned int *len);
126void vring_add_buf(struct vring_virtqueue *vq, struct vring_list list[],
127 unsigned int out, unsigned int in,
128 int index, int num_added);
129void vring_kick(unsigned int ioaddr, struct vring_virtqueue *vq, int num_added);
130
131#endif /* _VIRTIO_RING_H_ */