blob: e5c2c330420fc23f476c8ebfebce0c3860cff51c [file] [log] [blame]
Gleb Natapov89acfa32010-05-10 11:36:37 +03001/* virtio-pci.c - pci interface for virtio interface
2 *
3 * (c) Copyright 2008 Bull S.A.S.
4 *
5 * Author: Laurent Vivier <Laurent.Vivier@bull.net>
6 *
7 * some parts from Linux Virtio PCI driver
8 *
9 * Copyright IBM Corp. 2007
10 * Authors: Anthony Liguori <aliguori@us.ibm.com>
11 *
12 * Adopted for Seabios: Gleb Natapov <gleb@redhat.com>
13 *
14 * This work is licensed under the terms of the GNU LGPLv3
15 * See the COPYING file in the top-level directory.
16 */
17
Kevin O'Connor7d09d0e2010-05-10 21:51:38 -040018#include "config.h" // CONFIG_DEBUG_LEVEL
Kevin O'Connor9dea5902013-09-14 20:23:54 -040019#include "malloc.h" // free
Kevin O'Connor2d2fa312013-09-14 21:55:26 -040020#include "output.h" // dprintf
Sebastian Herbszt70451b62011-11-21 12:23:20 +010021#include "pci.h" // pci_config_readl
Kevin O'Connor4d8510c2016-02-03 01:28:20 -050022#include "pcidevice.h" // pci_find_capability
Sebastian Herbszt70451b62011-11-21 12:23:20 +010023#include "pci_regs.h" // PCI_BASE_ADDRESS_0
Kevin O'Connorfa9c66a2013-09-14 19:10:40 -040024#include "string.h" // memset
Kevin O'Connor2d2fa312013-09-14 21:55:26 -040025#include "virtio-pci.h"
26#include "virtio-ring.h"
Gleb Natapov89acfa32010-05-10 11:36:37 +030027
Gerd Hoffmannc579d2f2016-06-17 11:45:43 +020028u64 _vp_read(struct vp_cap *cap, u32 offset, u8 size)
29{
Gerd Hoffmann0e215482015-07-03 11:07:05 +020030 u64 var = 0;
Gerd Hoffmannc579d2f2016-06-17 11:45:43 +020031
Gerd Hoffmann0e215482015-07-03 11:07:05 +020032 switch (cap->mode) {
33 case VP_ACCESS_IO:
34 {
Gerd Hoffmannc579d2f2016-06-17 11:45:43 +020035 u32 addr = cap->ioaddr + offset;
36 switch (size) {
37 case 8:
38 var = inl(addr);
39 var |= (u64)inl(addr+4) << 32;
40 break;
41 case 4:
42 var = inl(addr);
43 break;
44 case 2:
45 var = inw(addr);
46 break;
47 case 1:
48 var = inb(addr);
49 break;
Gerd Hoffmannc579d2f2016-06-17 11:45:43 +020050 }
Gerd Hoffmann0e215482015-07-03 11:07:05 +020051 break;
52 }
53
54 case VP_ACCESS_MMIO:
55 {
Gerd Hoffmannc579d2f2016-06-17 11:45:43 +020056 void *addr = cap->memaddr + offset;
57 switch (size) {
58 case 8:
59 var = readl(addr);
60 var |= (u64)readl(addr+4) << 32;
61 break;
62 case 4:
63 var = readl(addr);
64 break;
65 case 2:
66 var = readw(addr);
67 break;
68 case 1:
69 var = readb(addr);
70 break;
Gerd Hoffmannc579d2f2016-06-17 11:45:43 +020071 }
Gerd Hoffmann0e215482015-07-03 11:07:05 +020072 break;
73 }
74
75 case VP_ACCESS_PCICFG:
76 {
77 u32 addr = cap->baroff + offset;
78 pci_config_writeb(cap->bdf, cap->cfg +
79 offsetof(struct virtio_pci_cfg_cap, cap.bar),
80 cap->bar);
81 pci_config_writel(cap->bdf, cap->cfg +
82 offsetof(struct virtio_pci_cfg_cap, cap.offset),
83 addr);
84 pci_config_writel(cap->bdf, cap->cfg +
85 offsetof(struct virtio_pci_cfg_cap, cap.length),
86 (size > 4) ? 4 : size);
87 switch (size) {
88 case 8:
89 var = pci_config_readl(cap->bdf, cap->cfg +
90 offsetof(struct virtio_pci_cfg_cap, pci_cfg_data));
91 pci_config_writel(cap->bdf, cap->cfg +
92 offsetof(struct virtio_pci_cfg_cap, cap.offset),
93 addr + 4);
94 var |= (u64)pci_config_readl(cap->bdf, cap->cfg +
95 offsetof(struct virtio_pci_cfg_cap, pci_cfg_data)) << 32;
96 break;
97 case 4:
98 var = pci_config_readl(cap->bdf, cap->cfg +
99 offsetof(struct virtio_pci_cfg_cap, pci_cfg_data));
100 break;
101 case 2:
102 var = pci_config_readw(cap->bdf, cap->cfg +
103 offsetof(struct virtio_pci_cfg_cap, pci_cfg_data));
104 break;
105 case 1:
106 var = pci_config_readb(cap->bdf, cap->cfg +
107 offsetof(struct virtio_pci_cfg_cap, pci_cfg_data));
108 break;
109 }
110 }
Gerd Hoffmannc579d2f2016-06-17 11:45:43 +0200111 }
112 dprintf(9, "vp read %x (%d) -> 0x%llx\n", cap->ioaddr + offset, size, var);
113 return var;
114}
115
116void _vp_write(struct vp_cap *cap, u32 offset, u8 size, u64 var)
117{
118 dprintf(9, "vp write %x (%d) <- 0x%llx\n", cap->ioaddr + offset, size, var);
Gerd Hoffmann0e215482015-07-03 11:07:05 +0200119
120 switch (cap->mode) {
121 case VP_ACCESS_IO:
122 {
Gerd Hoffmannc579d2f2016-06-17 11:45:43 +0200123 u32 addr = cap->ioaddr + offset;
124 switch (size) {
125 case 4:
126 outl(var, addr);
127 break;
128 case 2:
129 outw(var, addr);
130 break;
131 case 1:
132 outb(var, addr);
133 break;
134 }
Gerd Hoffmann0e215482015-07-03 11:07:05 +0200135 break;
136 }
137
138 case VP_ACCESS_MMIO:
139 {
Gerd Hoffmannc579d2f2016-06-17 11:45:43 +0200140 void *addr = cap->memaddr + offset;
141 switch (size) {
142 case 4:
143 writel(addr, var);
144 break;
145 case 2:
146 writew(addr, var);
147 break;
148 case 1:
149 writeb(addr, var);
150 break;
151 }
Gerd Hoffmann0e215482015-07-03 11:07:05 +0200152 break;
153 }
154
155 case VP_ACCESS_PCICFG:
156 {
157 u32 addr = cap->baroff + offset;
158 pci_config_writeb(cap->bdf, cap->cfg +
159 offsetof(struct virtio_pci_cfg_cap, cap.bar),
160 cap->bar);
161 pci_config_writel(cap->bdf, cap->cfg +
162 offsetof(struct virtio_pci_cfg_cap, cap.offset),
163 addr);
164 pci_config_writel(cap->bdf, cap->cfg +
165 offsetof(struct virtio_pci_cfg_cap, cap.length),
166 size);
167 switch (size) {
168 case 4:
169 pci_config_writel(cap->bdf, cap->cfg +
170 offsetof(struct virtio_pci_cfg_cap, pci_cfg_data),
171 var);
172 break;
173 case 2:
174 pci_config_writew(cap->bdf, cap->cfg +
175 offsetof(struct virtio_pci_cfg_cap, pci_cfg_data),
176 var);
177 break;
178 case 1:
179 pci_config_writeb(cap->bdf, cap->cfg +
180 offsetof(struct virtio_pci_cfg_cap, pci_cfg_data),
181 var);
182 break;
183 }
184 }
Gerd Hoffmannc579d2f2016-06-17 11:45:43 +0200185 }
186}
187
Gerd Hoffmann46d17922015-06-25 12:28:46 +0200188u64 vp_get_features(struct vp_device *vp)
189{
190 u32 f0, f1;
191
192 if (vp->use_modern) {
193 vp_write(&vp->common, virtio_pci_common_cfg, device_feature_select, 0);
194 f0 = vp_read(&vp->common, virtio_pci_common_cfg, device_feature);
195 vp_write(&vp->common, virtio_pci_common_cfg, device_feature_select, 1);
196 f1 = vp_read(&vp->common, virtio_pci_common_cfg, device_feature);
197 } else {
198 f0 = vp_read(&vp->legacy, virtio_pci_legacy, host_features);
199 f1 = 0;
200 }
201 return ((u64)f1 << 32) | f0;
202}
203
204void vp_set_features(struct vp_device *vp, u64 features)
205{
206 u32 f0, f1;
207
208 f0 = features;
209 f1 = features >> 32;
210
211 if (vp->use_modern) {
212 vp_write(&vp->common, virtio_pci_common_cfg, guest_feature_select, 0);
213 vp_write(&vp->common, virtio_pci_common_cfg, guest_feature, f0);
214 vp_write(&vp->common, virtio_pci_common_cfg, guest_feature_select, 1);
215 vp_write(&vp->common, virtio_pci_common_cfg, guest_feature, f1);
216 } else {
217 vp_write(&vp->legacy, virtio_pci_legacy, guest_features, f0);
218 }
219}
220
Gerd Hoffmann6ee97762015-06-25 16:37:41 +0200221u8 vp_get_status(struct vp_device *vp)
222{
223 if (vp->use_modern) {
224 return vp_read(&vp->common, virtio_pci_common_cfg, device_status);
225 } else {
226 return vp_read(&vp->legacy, virtio_pci_legacy, status);
227 }
228}
229
230void vp_set_status(struct vp_device *vp, u8 status)
231{
232 if (status == 0) /* reset */
233 return;
234 if (vp->use_modern) {
235 vp_write(&vp->common, virtio_pci_common_cfg, device_status, status);
236 } else {
237 vp_write(&vp->legacy, virtio_pci_legacy, status, status);
238 }
239}
240
Gerd Hoffmannadeb2e32015-06-25 16:43:17 +0200241u8 vp_get_isr(struct vp_device *vp)
242{
243 if (vp->use_modern) {
244 return vp_read(&vp->isr, virtio_pci_isr, isr);
245 } else {
246 return vp_read(&vp->legacy, virtio_pci_legacy, isr);
247 }
248}
249
Gerd Hoffmann984db762015-06-25 16:44:06 +0200250void vp_reset(struct vp_device *vp)
251{
252 if (vp->use_modern) {
253 vp_write(&vp->common, virtio_pci_common_cfg, device_status, 0);
254 vp_read(&vp->isr, virtio_pci_isr, isr);
255 } else {
256 vp_write(&vp->legacy, virtio_pci_legacy, status, 0);
257 vp_read(&vp->legacy, virtio_pci_legacy, isr);
258 }
259}
260
Gerd Hoffmann2e68fb12015-06-26 08:19:28 +0200261void vp_notify(struct vp_device *vp, struct vring_virtqueue *vq)
262{
263 if (vp->use_modern) {
Kevin O'Connorf46739b2016-02-02 22:34:27 -0500264 u32 offset = vq->queue_notify_off * vp->notify_off_multiplier;
Gerd Hoffmann0e215482015-07-03 11:07:05 +0200265 switch (vp->notify.mode) {
266 case VP_ACCESS_IO:
Kevin O'Connorf46739b2016-02-02 22:34:27 -0500267 outw(vq->queue_index, vp->notify.ioaddr + offset);
Gerd Hoffmann0e215482015-07-03 11:07:05 +0200268 break;
269 case VP_ACCESS_MMIO:
Kevin O'Connorf46739b2016-02-02 22:34:27 -0500270 writew(vp->notify.memaddr + offset, vq->queue_index);
Gerd Hoffmann0e215482015-07-03 11:07:05 +0200271 break;
272 case VP_ACCESS_PCICFG:
273 pci_config_writeb(vp->notify.bdf, vp->notify.cfg +
274 offsetof(struct virtio_pci_cfg_cap, cap.bar),
275 vp->notify.bar);
276 pci_config_writel(vp->notify.bdf, vp->notify.cfg +
277 offsetof(struct virtio_pci_cfg_cap, cap.offset),
278 vp->notify.baroff + offset);
279 pci_config_writel(vp->notify.bdf, vp->notify.cfg +
280 offsetof(struct virtio_pci_cfg_cap, cap.length),
281 2);
282 pci_config_writew(vp->notify.bdf, vp->notify.cfg +
283 offsetof(struct virtio_pci_cfg_cap, pci_cfg_data),
284 vq->queue_index);
Gerd Hoffmann2e68fb12015-06-26 08:19:28 +0200285 }
286 dprintf(9, "vp notify %x (%d) -- 0x%x\n",
Kevin O'Connorf46739b2016-02-02 22:34:27 -0500287 vp->notify.ioaddr, 2, vq->queue_index);
Gerd Hoffmann2e68fb12015-06-26 08:19:28 +0200288 } else {
289 vp_write(&vp->legacy, virtio_pci_legacy, queue_notify, vq->queue_index);
290 }
291}
292
Gerd Hoffmanndaf5cc92015-06-25 09:36:16 +0200293int vp_find_vq(struct vp_device *vp, int queue_index,
Paolo Bonzinie1a17bb2011-11-16 13:02:57 +0100294 struct vring_virtqueue **p_vq)
Gleb Natapov89acfa32010-05-10 11:36:37 +0300295{
Gleb Natapov89acfa32010-05-10 11:36:37 +0300296 u16 num;
297
298 ASSERT32FLAT();
Gerd Hoffmann6cfebb42015-07-01 14:39:30 +0200299 struct vring_virtqueue *vq = *p_vq = memalign_high(PAGE_SIZE, sizeof(*vq));
Paolo Bonzinie1a17bb2011-11-16 13:02:57 +0100300 if (!vq) {
301 warn_noalloc();
302 goto fail;
303 }
304 memset(vq, 0, sizeof(*vq));
305
Gleb Natapov89acfa32010-05-10 11:36:37 +0300306
Gerd Hoffmann6c14fef2015-06-26 09:07:59 +0200307 /* select the queue */
308 if (vp->use_modern) {
309 vp_write(&vp->common, virtio_pci_common_cfg, queue_select, queue_index);
310 } else {
311 vp_write(&vp->legacy, virtio_pci_legacy, queue_sel, queue_index);
312 }
Gleb Natapov89acfa32010-05-10 11:36:37 +0300313
314 /* check if the queue is available */
Gerd Hoffmann6c14fef2015-06-26 09:07:59 +0200315 if (vp->use_modern) {
316 num = vp_read(&vp->common, virtio_pci_common_cfg, queue_size);
317 if (num > MAX_QUEUE_NUM) {
318 vp_write(&vp->common, virtio_pci_common_cfg, queue_size,
319 MAX_QUEUE_NUM);
320 num = vp_read(&vp->common, virtio_pci_common_cfg, queue_size);
321 }
322 } else {
323 num = vp_read(&vp->legacy, virtio_pci_legacy, queue_num);
324 }
Gleb Natapov89acfa32010-05-10 11:36:37 +0300325 if (!num) {
326 dprintf(1, "ERROR: queue size is 0\n");
Paolo Bonzinie1a17bb2011-11-16 13:02:57 +0100327 goto fail;
Gleb Natapov89acfa32010-05-10 11:36:37 +0300328 }
Gleb Natapov89acfa32010-05-10 11:36:37 +0300329 if (num > MAX_QUEUE_NUM) {
330 dprintf(1, "ERROR: queue size %d > %d\n", num, MAX_QUEUE_NUM);
Paolo Bonzinie1a17bb2011-11-16 13:02:57 +0100331 goto fail;
Gleb Natapov89acfa32010-05-10 11:36:37 +0300332 }
333
334 /* check if the queue is already active */
Gerd Hoffmann6c14fef2015-06-26 09:07:59 +0200335 if (vp->use_modern) {
336 if (vp_read(&vp->common, virtio_pci_common_cfg, queue_enable)) {
337 dprintf(1, "ERROR: queue already active\n");
338 goto fail;
339 }
340 } else {
341 if (vp_read(&vp->legacy, virtio_pci_legacy, queue_pfn)) {
342 dprintf(1, "ERROR: queue already active\n");
343 goto fail;
344 }
Gleb Natapov89acfa32010-05-10 11:36:37 +0300345 }
Gleb Natapov89acfa32010-05-10 11:36:37 +0300346 vq->queue_index = queue_index;
347
348 /* initialize the queue */
Paolo Bonzinie1a17bb2011-11-16 13:02:57 +0100349 struct vring * vr = &vq->vring;
Gleb Natapov89acfa32010-05-10 11:36:37 +0300350 vring_init(vr, num, (unsigned char*)&vq->queue);
351
352 /* activate the queue
353 *
354 * NOTE: vr->desc is initialized by vring_init()
355 */
356
Gerd Hoffmann6c14fef2015-06-26 09:07:59 +0200357 if (vp->use_modern) {
358 vp_write(&vp->common, virtio_pci_common_cfg, queue_desc_lo,
359 (unsigned long)virt_to_phys(vr->desc));
360 vp_write(&vp->common, virtio_pci_common_cfg, queue_desc_hi, 0);
361 vp_write(&vp->common, virtio_pci_common_cfg, queue_avail_lo,
362 (unsigned long)virt_to_phys(vr->avail));
363 vp_write(&vp->common, virtio_pci_common_cfg, queue_avail_hi, 0);
364 vp_write(&vp->common, virtio_pci_common_cfg, queue_used_lo,
365 (unsigned long)virt_to_phys(vr->used));
366 vp_write(&vp->common, virtio_pci_common_cfg, queue_used_hi, 0);
367 vp_write(&vp->common, virtio_pci_common_cfg, queue_enable, 1);
368 vq->queue_notify_off = vp_read(&vp->common, virtio_pci_common_cfg,
369 queue_notify_off);
370 } else {
371 vp_write(&vp->legacy, virtio_pci_legacy, queue_pfn,
372 (unsigned long)virt_to_phys(vr->desc) >> PAGE_SHIFT);
373 }
Gleb Natapov89acfa32010-05-10 11:36:37 +0300374 return num;
Paolo Bonzinie1a17bb2011-11-16 13:02:57 +0100375
376fail:
377 free(vq);
378 *p_vq = NULL;
379 return -1;
Gleb Natapov89acfa32010-05-10 11:36:37 +0300380}
Paolo Bonzini4e343322011-11-16 13:02:56 +0100381
Gerd Hoffmann74d0cdc2015-06-25 10:24:27 +0200382void vp_init_simple(struct vp_device *vp, struct pci_device *pci)
Paolo Bonzini4e343322011-11-16 13:02:56 +0100383{
Gerd Hoffmannd8966462015-06-25 16:14:21 +0200384 u8 cap = pci_find_capability(pci, PCI_CAP_ID_VNDR, 0);
385 struct vp_cap *vp_cap;
Gerd Hoffmann0e215482015-07-03 11:07:05 +0200386 const char *mode;
387 u32 offset, base, mul;
388 u64 addr;
Gerd Hoffmannd8966462015-06-25 16:14:21 +0200389 u8 type;
390
391 memset(vp, 0, sizeof(*vp));
392 while (cap != 0) {
393 type = pci_config_readb(pci->bdf, cap +
394 offsetof(struct virtio_pci_cap, cfg_type));
395 switch (type) {
396 case VIRTIO_PCI_CAP_COMMON_CFG:
397 vp_cap = &vp->common;
398 break;
399 case VIRTIO_PCI_CAP_NOTIFY_CFG:
400 vp_cap = &vp->notify;
Gerd Hoffmann2e68fb12015-06-26 08:19:28 +0200401 mul = offsetof(struct virtio_pci_notify_cap, notify_off_multiplier);
402 vp->notify_off_multiplier = pci_config_readl(pci->bdf, cap + mul);
Gerd Hoffmannd8966462015-06-25 16:14:21 +0200403 break;
404 case VIRTIO_PCI_CAP_ISR_CFG:
405 vp_cap = &vp->isr;
406 break;
407 case VIRTIO_PCI_CAP_DEVICE_CFG:
408 vp_cap = &vp->device;
409 break;
Gerd Hoffmann0e215482015-07-03 11:07:05 +0200410 case VIRTIO_PCI_CAP_PCI_CFG:
411 vp->common.cfg = cap;
412 vp->common.bdf = pci->bdf;
413 vp->notify.cfg = cap;
414 vp->notify.bdf = pci->bdf;
415 vp->isr.cfg = cap;
416 vp->isr.bdf = pci->bdf;
417 vp->device.cfg = cap;
418 vp->device.bdf = pci->bdf;
419 vp_cap = NULL;
420 dprintf(1, "pci dev %x:%x virtio cap at 0x%x type %d [pci cfg access]\n",
421 pci_bdf_to_bus(pci->bdf), pci_bdf_to_dev(pci->bdf),
422 cap, type);
423 break;
Gerd Hoffmannd8966462015-06-25 16:14:21 +0200424 default:
425 vp_cap = NULL;
426 break;
427 }
428 if (vp_cap && !vp_cap->cap) {
429 vp_cap->cap = cap;
430 vp_cap->bar = pci_config_readb(pci->bdf, cap +
431 offsetof(struct virtio_pci_cap, bar));
432 offset = pci_config_readl(pci->bdf, cap +
433 offsetof(struct virtio_pci_cap, offset));
Gerd Hoffmann0e215482015-07-03 11:07:05 +0200434 base = PCI_BASE_ADDRESS_0 + 4 * vp_cap->bar;
435 addr = pci_config_readl(pci->bdf, base);
436 if (addr & PCI_BASE_ADDRESS_SPACE_IO) {
437 addr &= PCI_BASE_ADDRESS_IO_MASK;
438 vp_cap->mode = VP_ACCESS_IO;
439 } else if ((addr & PCI_BASE_ADDRESS_MEM_TYPE_MASK) ==
440 PCI_BASE_ADDRESS_MEM_TYPE_64) {
441 addr &= PCI_BASE_ADDRESS_MEM_MASK;
442 addr |= (u64)pci_config_readl(pci->bdf, base + 4) << 32;
443 vp_cap->mode = (addr > 0xffffffffll) ?
444 VP_ACCESS_PCICFG : VP_ACCESS_MMIO;
445 } else {
446 addr &= PCI_BASE_ADDRESS_MEM_MASK;
447 vp_cap->mode = VP_ACCESS_MMIO;
448 }
449 switch (vp_cap->mode) {
450 case VP_ACCESS_IO:
451 {
Gerd Hoffmanndc2433e2016-09-16 13:01:46 +0200452 u32 addr = pci_enable_iobar(pci, base);
Kevin O'Connorf46739b2016-02-02 22:34:27 -0500453 if (!addr)
454 return;
455 vp_cap->ioaddr = addr + offset;
Gerd Hoffmann0e215482015-07-03 11:07:05 +0200456 mode = "io";
457 break;
458 }
459 case VP_ACCESS_MMIO:
460 {
Gerd Hoffmanndc2433e2016-09-16 13:01:46 +0200461 void *addr = pci_enable_membar(pci, base);
Kevin O'Connorf46739b2016-02-02 22:34:27 -0500462 if (!addr)
463 return;
464 vp_cap->memaddr = addr + offset;
Gerd Hoffmann0e215482015-07-03 11:07:05 +0200465 mode = "mmio";
466 break;
Gerd Hoffmannd8966462015-06-25 16:14:21 +0200467 }
Gerd Hoffmann0e215482015-07-03 11:07:05 +0200468 case VP_ACCESS_PCICFG:
469 mode = "pcicfg";
470 vp_cap->baroff = offset;
471 break;
472 default:
473 mode = "Huh?";
474 break;
475 }
476 dprintf(1, "pci dev %x:%x virtio cap at 0x%x type %d "
477 "bar %d at 0x%08llx off +0x%04x [%s]\n",
478 pci_bdf_to_bus(pci->bdf), pci_bdf_to_dev(pci->bdf),
479 vp_cap->cap, type, vp_cap->bar, addr, offset, mode);
Gerd Hoffmannd8966462015-06-25 16:14:21 +0200480 }
481
482 cap = pci_find_capability(pci, PCI_CAP_ID_VNDR, cap);
483 }
484
485 if (vp->common.cap && vp->notify.cap && vp->isr.cap && vp->device.cap) {
Kevin O'Connor7b673002016-02-03 03:03:15 -0500486 dprintf(1, "pci dev %pP using modern (1.0) virtio mode\n", pci);
Gerd Hoffmann084eec82015-06-26 08:45:11 +0200487 vp->use_modern = 1;
488 } else {
Kevin O'Connor7b673002016-02-03 03:03:15 -0500489 dprintf(1, "pci dev %pP using legacy (0.9.5) virtio mode\n", pci);
Gerd Hoffmann084eec82015-06-26 08:45:11 +0200490 vp->legacy.bar = 0;
Kevin O'Connorf46739b2016-02-02 22:34:27 -0500491 vp->legacy.ioaddr = pci_enable_iobar(pci, PCI_BASE_ADDRESS_0);
492 if (!vp->legacy.ioaddr)
493 return;
Gerd Hoffmann0e215482015-07-03 11:07:05 +0200494 vp->legacy.mode = VP_ACCESS_IO;
Gerd Hoffmannd8966462015-06-25 16:14:21 +0200495 }
496
Gerd Hoffmanndaf5cc92015-06-25 09:36:16 +0200497 vp_reset(vp);
Kevin O'Connorf46739b2016-02-02 22:34:27 -0500498 pci_enable_busmaster(pci);
Gerd Hoffmanndaf5cc92015-06-25 09:36:16 +0200499 vp_set_status(vp, VIRTIO_CONFIG_S_ACKNOWLEDGE |
Paolo Bonzini4e343322011-11-16 13:02:56 +0100500 VIRTIO_CONFIG_S_DRIVER );
Paolo Bonzini4e343322011-11-16 13:02:56 +0100501}