2 * xen paravirt block device backend
4 * (c) Gerd Hoffmann <kraxel@redhat.com>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; under version 2 of the License.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
30 #include <sys/ioctl.h>
31 #include <sys/types.h>
38 #include <xen/io/xenbus.h>
41 #include "block_int.h"
42 #include "qemu-char.h"
43 #include "xen_blkif.h"
44 #include "xen_backend.h"
46 /* ------------------------------------------------------------- */
48 static int syncwrite
= 0;
49 static int batch_maps
= 0;
51 static int max_requests
= 32;
52 static int use_aio
= 1;
54 /* ------------------------------------------------------------- */
56 #define BLOCK_SIZE 512
57 #define IOCB_COUNT (BLKIF_MAX_SEGMENTS_PER_REQUEST + 2)
70 uint32_t domids
[BLKIF_MAX_SEGMENTS_PER_REQUEST
];
71 uint32_t refs
[BLKIF_MAX_SEGMENTS_PER_REQUEST
];
73 void *page
[BLKIF_MAX_SEGMENTS_PER_REQUEST
];
80 struct XenBlkDev
*blkdev
;
81 LIST_ENTRY(ioreq
) list
;
85 struct XenDevice xendev
; /* must be first */
91 const char *fileproto
;
98 blkif_back_rings_t rings
;
103 LIST_HEAD(inflight_head
, ioreq
) inflight
;
104 LIST_HEAD(finished_head
, ioreq
) finished
;
105 LIST_HEAD(freelist_head
, ioreq
) freelist
;
107 int requests_inflight
;
108 int requests_finished
;
110 /* qemu block driver */
112 BlockDriverState
*bs
;
116 /* ------------------------------------------------------------- */
118 static struct ioreq
*ioreq_start(struct XenBlkDev
*blkdev
)
120 struct ioreq
*ioreq
= NULL
;
122 if (LIST_EMPTY(&blkdev
->freelist
)) {
123 if (blkdev
->requests_total
>= max_requests
)
125 /* allocate new struct */
126 ioreq
= qemu_mallocz(sizeof(*ioreq
));
127 ioreq
->blkdev
= blkdev
;
128 blkdev
->requests_total
++;
129 qemu_iovec_init(&ioreq
->v
, BLKIF_MAX_SEGMENTS_PER_REQUEST
);
131 /* get one from freelist */
132 ioreq
= LIST_FIRST(&blkdev
->freelist
);
133 LIST_REMOVE(ioreq
, list
);
134 qemu_iovec_reset(&ioreq
->v
);
136 LIST_INSERT_HEAD(&blkdev
->inflight
, ioreq
, list
);
137 blkdev
->requests_inflight
++;
143 static void ioreq_finish(struct ioreq
*ioreq
)
145 struct XenBlkDev
*blkdev
= ioreq
->blkdev
;
147 LIST_REMOVE(ioreq
, list
);
148 LIST_INSERT_HEAD(&blkdev
->finished
, ioreq
, list
);
149 blkdev
->requests_inflight
--;
150 blkdev
->requests_finished
++;
153 static void ioreq_release(struct ioreq
*ioreq
)
155 struct XenBlkDev
*blkdev
= ioreq
->blkdev
;
157 LIST_REMOVE(ioreq
, list
);
158 memset(ioreq
, 0, sizeof(*ioreq
));
159 ioreq
->blkdev
= blkdev
;
160 LIST_INSERT_HEAD(&blkdev
->freelist
, ioreq
, list
);
161 blkdev
->requests_finished
--;
165 * translate request into iovec + start offset
166 * do sanity checks along the way
168 static int ioreq_parse(struct ioreq
*ioreq
)
170 struct XenBlkDev
*blkdev
= ioreq
->blkdev
;
175 xen_be_printf(&blkdev
->xendev
, 3,
176 "op %d, nr %d, handle %d, id %" PRId64
", sector %" PRId64
"\n",
177 ioreq
->req
.operation
, ioreq
->req
.nr_segments
,
178 ioreq
->req
.handle
, ioreq
->req
.id
, ioreq
->req
.sector_number
);
179 switch (ioreq
->req
.operation
) {
181 ioreq
->prot
= PROT_WRITE
; /* to memory */
183 case BLKIF_OP_WRITE_BARRIER
:
185 ioreq
->presync
= ioreq
->postsync
= 1;
188 ioreq
->prot
= PROT_READ
; /* from memory */
193 xen_be_printf(&blkdev
->xendev
, 0, "error: unknown operation (%d)\n",
194 ioreq
->req
.operation
);
198 if (ioreq
->req
.operation
!= BLKIF_OP_READ
&& blkdev
->mode
[0] != 'w') {
199 xen_be_printf(&blkdev
->xendev
, 0, "error: write req for ro device\n");
203 ioreq
->start
= ioreq
->req
.sector_number
* blkdev
->file_blk
;
204 for (i
= 0; i
< ioreq
->req
.nr_segments
; i
++) {
205 if (i
== BLKIF_MAX_SEGMENTS_PER_REQUEST
) {
206 xen_be_printf(&blkdev
->xendev
, 0, "error: nr_segments too big\n");
209 if (ioreq
->req
.seg
[i
].first_sect
> ioreq
->req
.seg
[i
].last_sect
) {
210 xen_be_printf(&blkdev
->xendev
, 0, "error: first > last sector\n");
213 if (ioreq
->req
.seg
[i
].last_sect
* BLOCK_SIZE
>= XC_PAGE_SIZE
) {
214 xen_be_printf(&blkdev
->xendev
, 0, "error: page crossing\n");
218 ioreq
->domids
[i
] = blkdev
->xendev
.dom
;
219 ioreq
->refs
[i
] = ioreq
->req
.seg
[i
].gref
;
221 mem
= ioreq
->req
.seg
[i
].first_sect
* blkdev
->file_blk
;
222 len
= (ioreq
->req
.seg
[i
].last_sect
- ioreq
->req
.seg
[i
].first_sect
+ 1) * blkdev
->file_blk
;
223 qemu_iovec_add(&ioreq
->v
, (void*)mem
, len
);
225 if (ioreq
->start
+ ioreq
->v
.size
> blkdev
->file_size
) {
226 xen_be_printf(&blkdev
->xendev
, 0, "error: access beyond end of file\n");
232 ioreq
->status
= BLKIF_RSP_ERROR
;
236 static void ioreq_unmap(struct ioreq
*ioreq
)
238 int gnt
= ioreq
->blkdev
->xendev
.gnttabdev
;
241 if (ioreq
->v
.niov
== 0)
246 if (xc_gnttab_munmap(gnt
, ioreq
->pages
, ioreq
->v
.niov
) != 0)
247 xen_be_printf(&ioreq
->blkdev
->xendev
, 0, "xc_gnttab_munmap failed: %s\n",
249 ioreq
->blkdev
->cnt_map
-= ioreq
->v
.niov
;
252 for (i
= 0; i
< ioreq
->v
.niov
; i
++) {
255 if (xc_gnttab_munmap(gnt
, ioreq
->page
[i
], 1) != 0)
256 xen_be_printf(&ioreq
->blkdev
->xendev
, 0, "xc_gnttab_munmap failed: %s\n",
258 ioreq
->blkdev
->cnt_map
--;
259 ioreq
->page
[i
] = NULL
;
264 static int ioreq_map(struct ioreq
*ioreq
)
266 int gnt
= ioreq
->blkdev
->xendev
.gnttabdev
;
269 if (ioreq
->v
.niov
== 0)
272 ioreq
->pages
= xc_gnttab_map_grant_refs
273 (gnt
, ioreq
->v
.niov
, ioreq
->domids
, ioreq
->refs
, ioreq
->prot
);
274 if (ioreq
->pages
== NULL
) {
275 xen_be_printf(&ioreq
->blkdev
->xendev
, 0,
276 "can't map %d grant refs (%s, %d maps)\n",
277 ioreq
->v
.niov
, strerror(errno
), ioreq
->blkdev
->cnt_map
);
280 for (i
= 0; i
< ioreq
->v
.niov
; i
++)
281 ioreq
->v
.iov
[i
].iov_base
= ioreq
->pages
+ i
* XC_PAGE_SIZE
+
282 (uintptr_t)ioreq
->v
.iov
[i
].iov_base
;
283 ioreq
->blkdev
->cnt_map
+= ioreq
->v
.niov
;
285 for (i
= 0; i
< ioreq
->v
.niov
; i
++) {
286 ioreq
->page
[i
] = xc_gnttab_map_grant_ref
287 (gnt
, ioreq
->domids
[i
], ioreq
->refs
[i
], ioreq
->prot
);
288 if (ioreq
->page
[i
] == NULL
) {
289 xen_be_printf(&ioreq
->blkdev
->xendev
, 0,
290 "can't map grant ref %d (%s, %d maps)\n",
291 ioreq
->refs
[i
], strerror(errno
), ioreq
->blkdev
->cnt_map
);
295 ioreq
->v
.iov
[i
].iov_base
= ioreq
->page
[i
] + (uintptr_t)ioreq
->v
.iov
[i
].iov_base
;
296 ioreq
->blkdev
->cnt_map
++;
302 static int ioreq_runio_qemu_sync(struct ioreq
*ioreq
)
304 struct XenBlkDev
*blkdev
= ioreq
->blkdev
;
308 if (ioreq_map(ioreq
) == -1)
311 bdrv_flush(blkdev
->bs
);
313 switch (ioreq
->req
.operation
) {
316 for (i
= 0; i
< ioreq
->v
.niov
; i
++) {
317 rc
= bdrv_read(blkdev
->bs
, pos
/ BLOCK_SIZE
,
318 ioreq
->v
.iov
[i
].iov_base
,
319 ioreq
->v
.iov
[i
].iov_len
/ BLOCK_SIZE
);
321 xen_be_printf(&blkdev
->xendev
, 0, "rd I/O error (%p, len %zd)\n",
322 ioreq
->v
.iov
[i
].iov_base
,
323 ioreq
->v
.iov
[i
].iov_len
);
326 len
+= ioreq
->v
.iov
[i
].iov_len
;
327 pos
+= ioreq
->v
.iov
[i
].iov_len
;
331 case BLKIF_OP_WRITE_BARRIER
:
333 for (i
= 0; i
< ioreq
->v
.niov
; i
++) {
334 rc
= bdrv_write(blkdev
->bs
, pos
/ BLOCK_SIZE
,
335 ioreq
->v
.iov
[i
].iov_base
,
336 ioreq
->v
.iov
[i
].iov_len
/ BLOCK_SIZE
);
338 xen_be_printf(&blkdev
->xendev
, 0, "wr I/O error (%p, len %zd)\n",
339 ioreq
->v
.iov
[i
].iov_base
,
340 ioreq
->v
.iov
[i
].iov_len
);
343 len
+= ioreq
->v
.iov
[i
].iov_len
;
344 pos
+= ioreq
->v
.iov
[i
].iov_len
;
348 /* unknown operation (shouldn't happen -- parse catches this) */
353 bdrv_flush(blkdev
->bs
);
354 ioreq
->status
= BLKIF_RSP_OKAY
;
361 ioreq
->status
= BLKIF_RSP_ERROR
;
365 static void qemu_aio_complete(void *opaque
, int ret
)
367 struct ioreq
*ioreq
= opaque
;
370 xen_be_printf(&ioreq
->blkdev
->xendev
, 0, "%s I/O error\n",
371 ioreq
->req
.operation
== BLKIF_OP_READ
? "read" : "write");
375 ioreq
->aio_inflight
--;
376 if (ioreq
->aio_inflight
> 0)
379 ioreq
->status
= ioreq
->aio_errors
? BLKIF_RSP_ERROR
: BLKIF_RSP_OKAY
;
382 qemu_bh_schedule(ioreq
->blkdev
->bh
);
385 static int ioreq_runio_qemu_aio(struct ioreq
*ioreq
)
387 struct XenBlkDev
*blkdev
= ioreq
->blkdev
;
389 if (ioreq_map(ioreq
) == -1)
392 ioreq
->aio_inflight
++;
394 bdrv_flush(blkdev
->bs
); /* FIXME: aio_flush() ??? */
396 switch (ioreq
->req
.operation
) {
398 ioreq
->aio_inflight
++;
399 bdrv_aio_readv(blkdev
->bs
, ioreq
->start
/ BLOCK_SIZE
,
400 &ioreq
->v
, ioreq
->v
.size
/ BLOCK_SIZE
,
401 qemu_aio_complete
, ioreq
);
404 case BLKIF_OP_WRITE_BARRIER
:
405 ioreq
->aio_inflight
++;
406 bdrv_aio_writev(blkdev
->bs
, ioreq
->start
/ BLOCK_SIZE
,
407 &ioreq
->v
, ioreq
->v
.size
/ BLOCK_SIZE
,
408 qemu_aio_complete
, ioreq
);
411 /* unknown operation (shouldn't happen -- parse catches this) */
416 bdrv_flush(blkdev
->bs
); /* FIXME: aio_flush() ??? */
417 qemu_aio_complete(ioreq
, 0);
422 ioreq
->status
= BLKIF_RSP_ERROR
;
426 static int blk_send_response_one(struct ioreq
*ioreq
)
428 struct XenBlkDev
*blkdev
= ioreq
->blkdev
;
430 int have_requests
= 0;
431 blkif_response_t resp
;
434 resp
.id
= ioreq
->req
.id
;
435 resp
.operation
= ioreq
->req
.operation
;
436 resp
.status
= ioreq
->status
;
438 /* Place on the response ring for the relevant domain. */
439 switch (blkdev
->protocol
) {
440 case BLKIF_PROTOCOL_NATIVE
:
441 dst
= RING_GET_RESPONSE(&blkdev
->rings
.native
, blkdev
->rings
.native
.rsp_prod_pvt
);
443 case BLKIF_PROTOCOL_X86_32
:
444 dst
= RING_GET_RESPONSE(&blkdev
->rings
.x86_32_part
,
445 blkdev
->rings
.x86_32_part
.rsp_prod_pvt
);
447 case BLKIF_PROTOCOL_X86_64
:
448 dst
= RING_GET_RESPONSE(&blkdev
->rings
.x86_64_part
,
449 blkdev
->rings
.x86_64_part
.rsp_prod_pvt
);
454 memcpy(dst
, &resp
, sizeof(resp
));
455 blkdev
->rings
.common
.rsp_prod_pvt
++;
457 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blkdev
->rings
.common
, send_notify
);
458 if (blkdev
->rings
.common
.rsp_prod_pvt
== blkdev
->rings
.common
.req_cons
) {
460 * Tail check for pending requests. Allows frontend to avoid
461 * notifications if requests are already in flight (lower
462 * overheads and promotes batching).
464 RING_FINAL_CHECK_FOR_REQUESTS(&blkdev
->rings
.common
, have_requests
);
465 } else if (RING_HAS_UNCONSUMED_REQUESTS(&blkdev
->rings
.common
)) {
474 /* walk finished list, send outstanding responses, free requests */
475 static void blk_send_response_all(struct XenBlkDev
*blkdev
)
480 while (!LIST_EMPTY(&blkdev
->finished
)) {
481 ioreq
= LIST_FIRST(&blkdev
->finished
);
482 send_notify
+= blk_send_response_one(ioreq
);
483 ioreq_release(ioreq
);
486 xen_be_send_notify(&blkdev
->xendev
);
489 static int blk_get_request(struct XenBlkDev
*blkdev
, struct ioreq
*ioreq
, RING_IDX rc
)
491 switch (blkdev
->protocol
) {
492 case BLKIF_PROTOCOL_NATIVE
:
493 memcpy(&ioreq
->req
, RING_GET_REQUEST(&blkdev
->rings
.native
, rc
),
496 case BLKIF_PROTOCOL_X86_32
:
497 blkif_get_x86_32_req(&ioreq
->req
,
498 RING_GET_REQUEST(&blkdev
->rings
.x86_32_part
, rc
));
500 case BLKIF_PROTOCOL_X86_64
:
501 blkif_get_x86_64_req(&ioreq
->req
,
502 RING_GET_REQUEST(&blkdev
->rings
.x86_64_part
, rc
));
508 static void blk_handle_requests(struct XenBlkDev
*blkdev
)
513 blkdev
->more_work
= 0;
515 rc
= blkdev
->rings
.common
.req_cons
;
516 rp
= blkdev
->rings
.common
.sring
->req_prod
;
517 xen_rmb(); /* Ensure we see queued requests up to 'rp'. */
520 blk_send_response_all(blkdev
);
522 /* pull request from ring */
523 if (RING_REQUEST_CONS_OVERFLOW(&blkdev
->rings
.common
, rc
))
525 ioreq
= ioreq_start(blkdev
);
530 blk_get_request(blkdev
, ioreq
, rc
);
531 blkdev
->rings
.common
.req_cons
= ++rc
;
534 if (ioreq_parse(ioreq
) != 0) {
535 if (blk_send_response_one(ioreq
))
536 xen_be_send_notify(&blkdev
->xendev
);
537 ioreq_release(ioreq
);
542 /* run i/o in aio mode */
543 ioreq_runio_qemu_aio(ioreq
);
545 /* run i/o in sync mode */
546 ioreq_runio_qemu_sync(ioreq
);
550 blk_send_response_all(blkdev
);
552 if (blkdev
->more_work
&& blkdev
->requests_inflight
< max_requests
)
553 qemu_bh_schedule(blkdev
->bh
);
556 /* ------------------------------------------------------------- */
558 static void blk_bh(void *opaque
)
560 struct XenBlkDev
*blkdev
= opaque
;
561 blk_handle_requests(blkdev
);
564 static void blk_alloc(struct XenDevice
*xendev
)
566 struct XenBlkDev
*blkdev
= container_of(xendev
, struct XenBlkDev
, xendev
);
568 LIST_INIT(&blkdev
->inflight
);
569 LIST_INIT(&blkdev
->finished
);
570 LIST_INIT(&blkdev
->freelist
);
571 blkdev
->bh
= qemu_bh_new(blk_bh
, blkdev
);
572 if (xen_mode
!= XEN_EMULATE
)
576 static int blk_init(struct XenDevice
*xendev
)
578 struct XenBlkDev
*blkdev
= container_of(xendev
, struct XenBlkDev
, xendev
);
579 int mode
, qflags
, have_barriers
, info
= 0;
582 /* read xenstore entries */
583 if (blkdev
->params
== NULL
) {
584 blkdev
->params
= xenstore_read_be_str(&blkdev
->xendev
, "params");
585 h
= strchr(blkdev
->params
, ':');
587 blkdev
->fileproto
= blkdev
->params
;
588 blkdev
->filename
= h
+1;
591 blkdev
->fileproto
= "<unset>";
592 blkdev
->filename
= blkdev
->params
;
595 if (blkdev
->mode
== NULL
)
596 blkdev
->mode
= xenstore_read_be_str(&blkdev
->xendev
, "mode");
597 if (blkdev
->type
== NULL
)
598 blkdev
->type
= xenstore_read_be_str(&blkdev
->xendev
, "type");
599 if (blkdev
->dev
== NULL
)
600 blkdev
->dev
= xenstore_read_be_str(&blkdev
->xendev
, "dev");
601 if (blkdev
->devtype
== NULL
)
602 blkdev
->devtype
= xenstore_read_be_str(&blkdev
->xendev
, "device-type");
604 /* do we have all we need? */
605 if (blkdev
->params
== NULL
||
606 blkdev
->mode
== NULL
||
607 blkdev
->type
== NULL
||
612 if (strcmp(blkdev
->mode
, "w") == 0) {
614 qflags
= BDRV_O_RDWR
;
617 qflags
= BDRV_O_RDONLY
;
618 info
|= VDISK_READONLY
;
622 if (blkdev
->devtype
&& !strcmp(blkdev
->devtype
, "cdrom"))
625 /* init qemu block driver */
626 blkdev
->index
= (blkdev
->xendev
.dev
- 202 * 256) / 16;
627 blkdev
->index
= drive_get_index(IF_XEN
, 0, blkdev
->index
);
628 if (blkdev
->index
== -1) {
629 /* setup via xenbus -> create new block driver instance */
630 xen_be_printf(&blkdev
->xendev
, 2, "create new bdrv (xenbus setup)\n");
631 blkdev
->bs
= bdrv_new(blkdev
->dev
);
633 if (bdrv_open2(blkdev
->bs
, blkdev
->filename
, qflags
,
634 bdrv_find_format(blkdev
->fileproto
)) != 0) {
635 bdrv_delete(blkdev
->bs
);
642 /* setup via qemu cmdline -> already setup for us */
643 xen_be_printf(&blkdev
->xendev
, 2, "get configured bdrv (cmdline setup)\n");
644 blkdev
->bs
= drives_table
[blkdev
->index
].bdrv
;
646 blkdev
->file_blk
= BLOCK_SIZE
;
647 blkdev
->file_size
= bdrv_getlength(blkdev
->bs
);
648 if (blkdev
->file_size
< 0) {
649 xen_be_printf(&blkdev
->xendev
, 1, "bdrv_getlength: %d (%s) | drv %s\n",
650 (int)blkdev
->file_size
, strerror(-blkdev
->file_size
),
651 blkdev
->bs
->drv
? blkdev
->bs
->drv
->format_name
: "-");
652 blkdev
->file_size
= 0;
654 have_barriers
= blkdev
->bs
->drv
&& blkdev
->bs
->drv
->bdrv_flush
? 1 : 0;
656 xen_be_printf(xendev
, 1, "type \"%s\", fileproto \"%s\", filename \"%s\","
657 " size %" PRId64
" (%" PRId64
" MB)\n",
658 blkdev
->type
, blkdev
->fileproto
, blkdev
->filename
,
659 blkdev
->file_size
, blkdev
->file_size
>> 20);
662 xenstore_write_be_int(&blkdev
->xendev
, "feature-barrier", have_barriers
);
663 xenstore_write_be_int(&blkdev
->xendev
, "info", info
);
664 xenstore_write_be_int(&blkdev
->xendev
, "sector-size", blkdev
->file_blk
);
665 xenstore_write_be_int(&blkdev
->xendev
, "sectors",
666 blkdev
->file_size
/ blkdev
->file_blk
);
670 static int blk_connect(struct XenDevice
*xendev
)
672 struct XenBlkDev
*blkdev
= container_of(xendev
, struct XenBlkDev
, xendev
);
674 if (xenstore_read_fe_int(&blkdev
->xendev
, "ring-ref", &blkdev
->ring_ref
) == -1)
676 if (xenstore_read_fe_int(&blkdev
->xendev
, "event-channel",
677 &blkdev
->xendev
.remote_port
) == -1)
680 blkdev
->protocol
= BLKIF_PROTOCOL_NATIVE
;
681 if (blkdev
->xendev
.protocol
) {
682 if (strcmp(blkdev
->xendev
.protocol
, XEN_IO_PROTO_ABI_X86_32
) == 0)
683 blkdev
->protocol
= BLKIF_PROTOCOL_X86_32
;
684 if (strcmp(blkdev
->xendev
.protocol
, XEN_IO_PROTO_ABI_X86_64
) == 0)
685 blkdev
->protocol
= BLKIF_PROTOCOL_X86_64
;
688 blkdev
->sring
= xc_gnttab_map_grant_ref(blkdev
->xendev
.gnttabdev
,
691 PROT_READ
| PROT_WRITE
);
696 switch (blkdev
->protocol
) {
697 case BLKIF_PROTOCOL_NATIVE
:
699 blkif_sring_t
*sring_native
= blkdev
->sring
;
700 BACK_RING_INIT(&blkdev
->rings
.native
, sring_native
, XC_PAGE_SIZE
);
703 case BLKIF_PROTOCOL_X86_32
:
705 blkif_x86_32_sring_t
*sring_x86_32
= blkdev
->sring
;
707 BACK_RING_INIT(&blkdev
->rings
.x86_32_part
, sring_x86_32
, XC_PAGE_SIZE
);
710 case BLKIF_PROTOCOL_X86_64
:
712 blkif_x86_64_sring_t
*sring_x86_64
= blkdev
->sring
;
714 BACK_RING_INIT(&blkdev
->rings
.x86_64_part
, sring_x86_64
, XC_PAGE_SIZE
);
719 xen_be_bind_evtchn(&blkdev
->xendev
);
721 xen_be_printf(&blkdev
->xendev
, 1, "ok: proto %s, ring-ref %d, "
722 "remote port %d, local port %d\n",
723 blkdev
->xendev
.protocol
, blkdev
->ring_ref
,
724 blkdev
->xendev
.remote_port
, blkdev
->xendev
.local_port
);
728 static void blk_disconnect(struct XenDevice
*xendev
)
730 struct XenBlkDev
*blkdev
= container_of(xendev
, struct XenBlkDev
, xendev
);
733 if (blkdev
->index
== -1) {
734 /* close/delete only if we created it ourself */
735 bdrv_close(blkdev
->bs
);
736 bdrv_delete(blkdev
->bs
);
740 xen_be_unbind_evtchn(&blkdev
->xendev
);
743 xc_gnttab_munmap(blkdev
->xendev
.gnttabdev
, blkdev
->sring
, 1);
745 blkdev
->sring
= NULL
;
749 static int blk_free(struct XenDevice
*xendev
)
751 struct XenBlkDev
*blkdev
= container_of(xendev
, struct XenBlkDev
, xendev
);
754 while (!LIST_EMPTY(&blkdev
->freelist
)) {
755 ioreq
= LIST_FIRST(&blkdev
->freelist
);
756 LIST_REMOVE(ioreq
, list
);
757 qemu_iovec_destroy(&ioreq
->v
);
761 qemu_free(blkdev
->params
);
762 qemu_free(blkdev
->mode
);
763 qemu_free(blkdev
->type
);
764 qemu_free(blkdev
->dev
);
765 qemu_free(blkdev
->devtype
);
766 qemu_bh_delete(blkdev
->bh
);
770 static void blk_event(struct XenDevice
*xendev
)
772 struct XenBlkDev
*blkdev
= container_of(xendev
, struct XenBlkDev
, xendev
);
774 qemu_bh_schedule(blkdev
->bh
);
777 struct XenDevOps xen_blkdev_ops
= {
778 .size
= sizeof(struct XenBlkDev
),
779 .flags
= DEVOPS_FLAG_NEED_GNTDEV
,
782 .connect
= blk_connect
,
783 .disconnect
= blk_disconnect
,