2 * xen paravirt block device backend
4 * (c) Gerd Hoffmann <kraxel@redhat.com>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; under version 2 of the License.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, see <http://www.gnu.org/licenses/>.
18 * Contributions after 2012-01-13 are licensed under the terms of the
19 * GNU GPL, version 2 or (at your option) any later version.
32 #include <sys/ioctl.h>
33 #include <sys/types.h>
39 #include "xen_backend.h"
40 #include "xen_blkif.h"
41 #include "sysemu/blockdev.h"
43 /* ------------------------------------------------------------- */
45 static int batch_maps
= 0;
47 static int max_requests
= 32;
49 /* ------------------------------------------------------------- */
51 #define BLOCK_SIZE 512
52 #define IOCB_COUNT (BLKIF_MAX_SEGMENTS_PER_REQUEST + 2)
66 uint32_t domids
[BLKIF_MAX_SEGMENTS_PER_REQUEST
];
67 uint32_t refs
[BLKIF_MAX_SEGMENTS_PER_REQUEST
];
69 void *page
[BLKIF_MAX_SEGMENTS_PER_REQUEST
];
76 struct XenBlkDev
*blkdev
;
77 QLIST_ENTRY(ioreq
) list
;
82 struct XenDevice xendev
; /* must be first */
88 const char *fileproto
;
95 blkif_back_rings_t rings
;
100 QLIST_HEAD(inflight_head
, ioreq
) inflight
;
101 QLIST_HEAD(finished_head
, ioreq
) finished
;
102 QLIST_HEAD(freelist_head
, ioreq
) freelist
;
104 int requests_inflight
;
105 int requests_finished
;
107 /* qemu block driver */
109 BlockDriverState
*bs
;
113 /* ------------------------------------------------------------- */
115 static struct ioreq
*ioreq_start(struct XenBlkDev
*blkdev
)
117 struct ioreq
*ioreq
= NULL
;
119 if (QLIST_EMPTY(&blkdev
->freelist
)) {
120 if (blkdev
->requests_total
>= max_requests
) {
123 /* allocate new struct */
124 ioreq
= g_malloc0(sizeof(*ioreq
));
125 ioreq
->blkdev
= blkdev
;
126 blkdev
->requests_total
++;
127 qemu_iovec_init(&ioreq
->v
, BLKIF_MAX_SEGMENTS_PER_REQUEST
);
129 /* get one from freelist */
130 ioreq
= QLIST_FIRST(&blkdev
->freelist
);
131 QLIST_REMOVE(ioreq
, list
);
132 qemu_iovec_reset(&ioreq
->v
);
134 QLIST_INSERT_HEAD(&blkdev
->inflight
, ioreq
, list
);
135 blkdev
->requests_inflight
++;
141 static void ioreq_finish(struct ioreq
*ioreq
)
143 struct XenBlkDev
*blkdev
= ioreq
->blkdev
;
145 QLIST_REMOVE(ioreq
, list
);
146 QLIST_INSERT_HEAD(&blkdev
->finished
, ioreq
, list
);
147 blkdev
->requests_inflight
--;
148 blkdev
->requests_finished
++;
151 static void ioreq_release(struct ioreq
*ioreq
, bool finish
)
153 struct XenBlkDev
*blkdev
= ioreq
->blkdev
;
155 QLIST_REMOVE(ioreq
, list
);
156 memset(ioreq
, 0, sizeof(*ioreq
));
157 ioreq
->blkdev
= blkdev
;
158 QLIST_INSERT_HEAD(&blkdev
->freelist
, ioreq
, list
);
160 blkdev
->requests_finished
--;
162 blkdev
->requests_inflight
--;
167 * translate request into iovec + start offset
168 * do sanity checks along the way
170 static int ioreq_parse(struct ioreq
*ioreq
)
172 struct XenBlkDev
*blkdev
= ioreq
->blkdev
;
177 xen_be_printf(&blkdev
->xendev
, 3,
178 "op %d, nr %d, handle %d, id %" PRId64
", sector %" PRId64
"\n",
179 ioreq
->req
.operation
, ioreq
->req
.nr_segments
,
180 ioreq
->req
.handle
, ioreq
->req
.id
, ioreq
->req
.sector_number
);
181 switch (ioreq
->req
.operation
) {
183 ioreq
->prot
= PROT_WRITE
; /* to memory */
185 case BLKIF_OP_WRITE_BARRIER
:
186 if (!ioreq
->req
.nr_segments
) {
190 ioreq
->presync
= ioreq
->postsync
= 1;
193 ioreq
->prot
= PROT_READ
; /* from memory */
196 xen_be_printf(&blkdev
->xendev
, 0, "error: unknown operation (%d)\n",
197 ioreq
->req
.operation
);
201 if (ioreq
->req
.operation
!= BLKIF_OP_READ
&& blkdev
->mode
[0] != 'w') {
202 xen_be_printf(&blkdev
->xendev
, 0, "error: write req for ro device\n");
206 ioreq
->start
= ioreq
->req
.sector_number
* blkdev
->file_blk
;
207 for (i
= 0; i
< ioreq
->req
.nr_segments
; i
++) {
208 if (i
== BLKIF_MAX_SEGMENTS_PER_REQUEST
) {
209 xen_be_printf(&blkdev
->xendev
, 0, "error: nr_segments too big\n");
212 if (ioreq
->req
.seg
[i
].first_sect
> ioreq
->req
.seg
[i
].last_sect
) {
213 xen_be_printf(&blkdev
->xendev
, 0, "error: first > last sector\n");
216 if (ioreq
->req
.seg
[i
].last_sect
* BLOCK_SIZE
>= XC_PAGE_SIZE
) {
217 xen_be_printf(&blkdev
->xendev
, 0, "error: page crossing\n");
221 ioreq
->domids
[i
] = blkdev
->xendev
.dom
;
222 ioreq
->refs
[i
] = ioreq
->req
.seg
[i
].gref
;
224 mem
= ioreq
->req
.seg
[i
].first_sect
* blkdev
->file_blk
;
225 len
= (ioreq
->req
.seg
[i
].last_sect
- ioreq
->req
.seg
[i
].first_sect
+ 1) * blkdev
->file_blk
;
226 qemu_iovec_add(&ioreq
->v
, (void*)mem
, len
);
228 if (ioreq
->start
+ ioreq
->v
.size
> blkdev
->file_size
) {
229 xen_be_printf(&blkdev
->xendev
, 0, "error: access beyond end of file\n");
235 ioreq
->status
= BLKIF_RSP_ERROR
;
239 static void ioreq_unmap(struct ioreq
*ioreq
)
241 XenGnttab gnt
= ioreq
->blkdev
->xendev
.gnttabdev
;
244 if (ioreq
->v
.niov
== 0 || ioreq
->mapped
== 0) {
251 if (xc_gnttab_munmap(gnt
, ioreq
->pages
, ioreq
->v
.niov
) != 0) {
252 xen_be_printf(&ioreq
->blkdev
->xendev
, 0, "xc_gnttab_munmap failed: %s\n",
255 ioreq
->blkdev
->cnt_map
-= ioreq
->v
.niov
;
258 for (i
= 0; i
< ioreq
->v
.niov
; i
++) {
259 if (!ioreq
->page
[i
]) {
262 if (xc_gnttab_munmap(gnt
, ioreq
->page
[i
], 1) != 0) {
263 xen_be_printf(&ioreq
->blkdev
->xendev
, 0, "xc_gnttab_munmap failed: %s\n",
266 ioreq
->blkdev
->cnt_map
--;
267 ioreq
->page
[i
] = NULL
;
273 static int ioreq_map(struct ioreq
*ioreq
)
275 XenGnttab gnt
= ioreq
->blkdev
->xendev
.gnttabdev
;
278 if (ioreq
->v
.niov
== 0 || ioreq
->mapped
== 1) {
282 ioreq
->pages
= xc_gnttab_map_grant_refs
283 (gnt
, ioreq
->v
.niov
, ioreq
->domids
, ioreq
->refs
, ioreq
->prot
);
284 if (ioreq
->pages
== NULL
) {
285 xen_be_printf(&ioreq
->blkdev
->xendev
, 0,
286 "can't map %d grant refs (%s, %d maps)\n",
287 ioreq
->v
.niov
, strerror(errno
), ioreq
->blkdev
->cnt_map
);
290 for (i
= 0; i
< ioreq
->v
.niov
; i
++) {
291 ioreq
->v
.iov
[i
].iov_base
= ioreq
->pages
+ i
* XC_PAGE_SIZE
+
292 (uintptr_t)ioreq
->v
.iov
[i
].iov_base
;
294 ioreq
->blkdev
->cnt_map
+= ioreq
->v
.niov
;
296 for (i
= 0; i
< ioreq
->v
.niov
; i
++) {
297 ioreq
->page
[i
] = xc_gnttab_map_grant_ref
298 (gnt
, ioreq
->domids
[i
], ioreq
->refs
[i
], ioreq
->prot
);
299 if (ioreq
->page
[i
] == NULL
) {
300 xen_be_printf(&ioreq
->blkdev
->xendev
, 0,
301 "can't map grant ref %d (%s, %d maps)\n",
302 ioreq
->refs
[i
], strerror(errno
), ioreq
->blkdev
->cnt_map
);
306 ioreq
->v
.iov
[i
].iov_base
= ioreq
->page
[i
] + (uintptr_t)ioreq
->v
.iov
[i
].iov_base
;
307 ioreq
->blkdev
->cnt_map
++;
314 static int ioreq_runio_qemu_aio(struct ioreq
*ioreq
);
316 static void qemu_aio_complete(void *opaque
, int ret
)
318 struct ioreq
*ioreq
= opaque
;
321 xen_be_printf(&ioreq
->blkdev
->xendev
, 0, "%s I/O error\n",
322 ioreq
->req
.operation
== BLKIF_OP_READ
? "read" : "write");
326 ioreq
->aio_inflight
--;
327 if (ioreq
->presync
) {
329 ioreq_runio_qemu_aio(ioreq
);
332 if (ioreq
->aio_inflight
> 0) {
335 if (ioreq
->postsync
) {
337 ioreq
->aio_inflight
++;
338 bdrv_aio_flush(ioreq
->blkdev
->bs
, qemu_aio_complete
, ioreq
);
342 ioreq
->status
= ioreq
->aio_errors
? BLKIF_RSP_ERROR
: BLKIF_RSP_OKAY
;
345 bdrv_acct_done(ioreq
->blkdev
->bs
, &ioreq
->acct
);
346 qemu_bh_schedule(ioreq
->blkdev
->bh
);
349 static int ioreq_runio_qemu_aio(struct ioreq
*ioreq
)
351 struct XenBlkDev
*blkdev
= ioreq
->blkdev
;
353 if (ioreq
->req
.nr_segments
&& ioreq_map(ioreq
) == -1) {
357 ioreq
->aio_inflight
++;
358 if (ioreq
->presync
) {
359 bdrv_aio_flush(ioreq
->blkdev
->bs
, qemu_aio_complete
, ioreq
);
363 switch (ioreq
->req
.operation
) {
365 bdrv_acct_start(blkdev
->bs
, &ioreq
->acct
, ioreq
->v
.size
, BDRV_ACCT_READ
);
366 ioreq
->aio_inflight
++;
367 bdrv_aio_readv(blkdev
->bs
, ioreq
->start
/ BLOCK_SIZE
,
368 &ioreq
->v
, ioreq
->v
.size
/ BLOCK_SIZE
,
369 qemu_aio_complete
, ioreq
);
372 case BLKIF_OP_WRITE_BARRIER
:
373 if (!ioreq
->req
.nr_segments
) {
377 bdrv_acct_start(blkdev
->bs
, &ioreq
->acct
, ioreq
->v
.size
, BDRV_ACCT_WRITE
);
378 ioreq
->aio_inflight
++;
379 bdrv_aio_writev(blkdev
->bs
, ioreq
->start
/ BLOCK_SIZE
,
380 &ioreq
->v
, ioreq
->v
.size
/ BLOCK_SIZE
,
381 qemu_aio_complete
, ioreq
);
384 /* unknown operation (shouldn't happen -- parse catches this) */
388 qemu_aio_complete(ioreq
, 0);
396 ioreq
->status
= BLKIF_RSP_ERROR
;
400 static int blk_send_response_one(struct ioreq
*ioreq
)
402 struct XenBlkDev
*blkdev
= ioreq
->blkdev
;
404 int have_requests
= 0;
405 blkif_response_t resp
;
408 resp
.id
= ioreq
->req
.id
;
409 resp
.operation
= ioreq
->req
.operation
;
410 resp
.status
= ioreq
->status
;
412 /* Place on the response ring for the relevant domain. */
413 switch (blkdev
->protocol
) {
414 case BLKIF_PROTOCOL_NATIVE
:
415 dst
= RING_GET_RESPONSE(&blkdev
->rings
.native
, blkdev
->rings
.native
.rsp_prod_pvt
);
417 case BLKIF_PROTOCOL_X86_32
:
418 dst
= RING_GET_RESPONSE(&blkdev
->rings
.x86_32_part
,
419 blkdev
->rings
.x86_32_part
.rsp_prod_pvt
);
421 case BLKIF_PROTOCOL_X86_64
:
422 dst
= RING_GET_RESPONSE(&blkdev
->rings
.x86_64_part
,
423 blkdev
->rings
.x86_64_part
.rsp_prod_pvt
);
428 memcpy(dst
, &resp
, sizeof(resp
));
429 blkdev
->rings
.common
.rsp_prod_pvt
++;
431 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blkdev
->rings
.common
, send_notify
);
432 if (blkdev
->rings
.common
.rsp_prod_pvt
== blkdev
->rings
.common
.req_cons
) {
434 * Tail check for pending requests. Allows frontend to avoid
435 * notifications if requests are already in flight (lower
436 * overheads and promotes batching).
438 RING_FINAL_CHECK_FOR_REQUESTS(&blkdev
->rings
.common
, have_requests
);
439 } else if (RING_HAS_UNCONSUMED_REQUESTS(&blkdev
->rings
.common
)) {
449 /* walk finished list, send outstanding responses, free requests */
450 static void blk_send_response_all(struct XenBlkDev
*blkdev
)
455 while (!QLIST_EMPTY(&blkdev
->finished
)) {
456 ioreq
= QLIST_FIRST(&blkdev
->finished
);
457 send_notify
+= blk_send_response_one(ioreq
);
458 ioreq_release(ioreq
, true);
461 xen_be_send_notify(&blkdev
->xendev
);
465 static int blk_get_request(struct XenBlkDev
*blkdev
, struct ioreq
*ioreq
, RING_IDX rc
)
467 switch (blkdev
->protocol
) {
468 case BLKIF_PROTOCOL_NATIVE
:
469 memcpy(&ioreq
->req
, RING_GET_REQUEST(&blkdev
->rings
.native
, rc
),
472 case BLKIF_PROTOCOL_X86_32
:
473 blkif_get_x86_32_req(&ioreq
->req
,
474 RING_GET_REQUEST(&blkdev
->rings
.x86_32_part
, rc
));
476 case BLKIF_PROTOCOL_X86_64
:
477 blkif_get_x86_64_req(&ioreq
->req
,
478 RING_GET_REQUEST(&blkdev
->rings
.x86_64_part
, rc
));
484 static void blk_handle_requests(struct XenBlkDev
*blkdev
)
489 blkdev
->more_work
= 0;
491 rc
= blkdev
->rings
.common
.req_cons
;
492 rp
= blkdev
->rings
.common
.sring
->req_prod
;
493 xen_rmb(); /* Ensure we see queued requests up to 'rp'. */
495 blk_send_response_all(blkdev
);
497 /* pull request from ring */
498 if (RING_REQUEST_CONS_OVERFLOW(&blkdev
->rings
.common
, rc
)) {
501 ioreq
= ioreq_start(blkdev
);
506 blk_get_request(blkdev
, ioreq
, rc
);
507 blkdev
->rings
.common
.req_cons
= ++rc
;
510 if (ioreq_parse(ioreq
) != 0) {
511 if (blk_send_response_one(ioreq
)) {
512 xen_be_send_notify(&blkdev
->xendev
);
514 ioreq_release(ioreq
, false);
518 ioreq_runio_qemu_aio(ioreq
);
521 if (blkdev
->more_work
&& blkdev
->requests_inflight
< max_requests
) {
522 qemu_bh_schedule(blkdev
->bh
);
526 /* ------------------------------------------------------------- */
528 static void blk_bh(void *opaque
)
530 struct XenBlkDev
*blkdev
= opaque
;
531 blk_handle_requests(blkdev
);
535 * We need to account for the grant allocations requiring contiguous
536 * chunks; the worst case number would be
537 * max_req * max_seg + (max_req - 1) * (max_seg - 1) + 1,
538 * but in order to keep things simple just use
539 * 2 * max_req * max_seg.
541 #define MAX_GRANTS(max_req, max_seg) (2 * (max_req) * (max_seg))
543 static void blk_alloc(struct XenDevice
*xendev
)
545 struct XenBlkDev
*blkdev
= container_of(xendev
, struct XenBlkDev
, xendev
);
547 QLIST_INIT(&blkdev
->inflight
);
548 QLIST_INIT(&blkdev
->finished
);
549 QLIST_INIT(&blkdev
->freelist
);
550 blkdev
->bh
= qemu_bh_new(blk_bh
, blkdev
);
551 if (xen_mode
!= XEN_EMULATE
) {
554 if (xc_gnttab_set_max_grants(xendev
->gnttabdev
,
555 MAX_GRANTS(max_requests
, BLKIF_MAX_SEGMENTS_PER_REQUEST
)) < 0) {
556 xen_be_printf(xendev
, 0, "xc_gnttab_set_max_grants failed: %s\n",
561 static int blk_init(struct XenDevice
*xendev
)
563 struct XenBlkDev
*blkdev
= container_of(xendev
, struct XenBlkDev
, xendev
);
564 int index
, qflags
, info
= 0;
566 /* read xenstore entries */
567 if (blkdev
->params
== NULL
) {
569 blkdev
->params
= xenstore_read_be_str(&blkdev
->xendev
, "params");
570 if (blkdev
->params
!= NULL
) {
571 h
= strchr(blkdev
->params
, ':');
574 blkdev
->fileproto
= blkdev
->params
;
575 blkdev
->filename
= h
+1;
578 blkdev
->fileproto
= "<unset>";
579 blkdev
->filename
= blkdev
->params
;
582 if (!strcmp("aio", blkdev
->fileproto
)) {
583 blkdev
->fileproto
= "raw";
585 if (blkdev
->mode
== NULL
) {
586 blkdev
->mode
= xenstore_read_be_str(&blkdev
->xendev
, "mode");
588 if (blkdev
->type
== NULL
) {
589 blkdev
->type
= xenstore_read_be_str(&blkdev
->xendev
, "type");
591 if (blkdev
->dev
== NULL
) {
592 blkdev
->dev
= xenstore_read_be_str(&blkdev
->xendev
, "dev");
594 if (blkdev
->devtype
== NULL
) {
595 blkdev
->devtype
= xenstore_read_be_str(&blkdev
->xendev
, "device-type");
598 /* do we have all we need? */
599 if (blkdev
->params
== NULL
||
600 blkdev
->mode
== NULL
||
601 blkdev
->type
== NULL
||
602 blkdev
->dev
== NULL
) {
607 qflags
= BDRV_O_NOCACHE
| BDRV_O_CACHE_WB
| BDRV_O_NATIVE_AIO
;
608 if (strcmp(blkdev
->mode
, "w") == 0) {
609 qflags
|= BDRV_O_RDWR
;
611 info
|= VDISK_READONLY
;
615 if (blkdev
->devtype
&& !strcmp(blkdev
->devtype
, "cdrom")) {
619 /* init qemu block driver */
620 index
= (blkdev
->xendev
.dev
- 202 * 256) / 16;
621 blkdev
->dinfo
= drive_get(IF_XEN
, 0, index
);
622 if (!blkdev
->dinfo
) {
623 /* setup via xenbus -> create new block driver instance */
624 xen_be_printf(&blkdev
->xendev
, 2, "create new bdrv (xenbus setup)\n");
625 blkdev
->bs
= bdrv_new(blkdev
->dev
);
627 if (bdrv_open(blkdev
->bs
, blkdev
->filename
, qflags
,
628 bdrv_find_whitelisted_format(blkdev
->fileproto
)) != 0) {
629 bdrv_delete(blkdev
->bs
);
637 /* setup via qemu cmdline -> already setup for us */
638 xen_be_printf(&blkdev
->xendev
, 2, "get configured bdrv (cmdline setup)\n");
639 blkdev
->bs
= blkdev
->dinfo
->bdrv
;
641 bdrv_attach_dev_nofail(blkdev
->bs
, blkdev
);
642 blkdev
->file_blk
= BLOCK_SIZE
;
643 blkdev
->file_size
= bdrv_getlength(blkdev
->bs
);
644 if (blkdev
->file_size
< 0) {
645 xen_be_printf(&blkdev
->xendev
, 1, "bdrv_getlength: %d (%s) | drv %s\n",
646 (int)blkdev
->file_size
, strerror(-blkdev
->file_size
),
647 bdrv_get_format_name(blkdev
->bs
) ?: "-");
648 blkdev
->file_size
= 0;
651 xen_be_printf(xendev
, 1, "type \"%s\", fileproto \"%s\", filename \"%s\","
652 " size %" PRId64
" (%" PRId64
" MB)\n",
653 blkdev
->type
, blkdev
->fileproto
, blkdev
->filename
,
654 blkdev
->file_size
, blkdev
->file_size
>> 20);
657 xenstore_write_be_int(&blkdev
->xendev
, "feature-barrier", 1);
658 xenstore_write_be_int(&blkdev
->xendev
, "info", info
);
659 xenstore_write_be_int(&blkdev
->xendev
, "sector-size", blkdev
->file_blk
);
660 xenstore_write_be_int(&blkdev
->xendev
, "sectors",
661 blkdev
->file_size
/ blkdev
->file_blk
);
665 g_free(blkdev
->params
);
666 blkdev
->params
= NULL
;
667 g_free(blkdev
->mode
);
669 g_free(blkdev
->type
);
673 g_free(blkdev
->devtype
);
674 blkdev
->devtype
= NULL
;
678 static int blk_connect(struct XenDevice
*xendev
)
680 struct XenBlkDev
*blkdev
= container_of(xendev
, struct XenBlkDev
, xendev
);
682 if (xenstore_read_fe_int(&blkdev
->xendev
, "ring-ref", &blkdev
->ring_ref
) == -1) {
685 if (xenstore_read_fe_int(&blkdev
->xendev
, "event-channel",
686 &blkdev
->xendev
.remote_port
) == -1) {
690 blkdev
->protocol
= BLKIF_PROTOCOL_NATIVE
;
691 if (blkdev
->xendev
.protocol
) {
692 if (strcmp(blkdev
->xendev
.protocol
, XEN_IO_PROTO_ABI_X86_32
) == 0) {
693 blkdev
->protocol
= BLKIF_PROTOCOL_X86_32
;
695 if (strcmp(blkdev
->xendev
.protocol
, XEN_IO_PROTO_ABI_X86_64
) == 0) {
696 blkdev
->protocol
= BLKIF_PROTOCOL_X86_64
;
700 blkdev
->sring
= xc_gnttab_map_grant_ref(blkdev
->xendev
.gnttabdev
,
703 PROT_READ
| PROT_WRITE
);
704 if (!blkdev
->sring
) {
709 switch (blkdev
->protocol
) {
710 case BLKIF_PROTOCOL_NATIVE
:
712 blkif_sring_t
*sring_native
= blkdev
->sring
;
713 BACK_RING_INIT(&blkdev
->rings
.native
, sring_native
, XC_PAGE_SIZE
);
716 case BLKIF_PROTOCOL_X86_32
:
718 blkif_x86_32_sring_t
*sring_x86_32
= blkdev
->sring
;
720 BACK_RING_INIT(&blkdev
->rings
.x86_32_part
, sring_x86_32
, XC_PAGE_SIZE
);
723 case BLKIF_PROTOCOL_X86_64
:
725 blkif_x86_64_sring_t
*sring_x86_64
= blkdev
->sring
;
727 BACK_RING_INIT(&blkdev
->rings
.x86_64_part
, sring_x86_64
, XC_PAGE_SIZE
);
732 xen_be_bind_evtchn(&blkdev
->xendev
);
734 xen_be_printf(&blkdev
->xendev
, 1, "ok: proto %s, ring-ref %d, "
735 "remote port %d, local port %d\n",
736 blkdev
->xendev
.protocol
, blkdev
->ring_ref
,
737 blkdev
->xendev
.remote_port
, blkdev
->xendev
.local_port
);
741 static void blk_disconnect(struct XenDevice
*xendev
)
743 struct XenBlkDev
*blkdev
= container_of(xendev
, struct XenBlkDev
, xendev
);
746 if (!blkdev
->dinfo
) {
747 /* close/delete only if we created it ourself */
748 bdrv_close(blkdev
->bs
);
749 bdrv_detach_dev(blkdev
->bs
, blkdev
);
750 bdrv_delete(blkdev
->bs
);
754 xen_be_unbind_evtchn(&blkdev
->xendev
);
757 xc_gnttab_munmap(blkdev
->xendev
.gnttabdev
, blkdev
->sring
, 1);
759 blkdev
->sring
= NULL
;
763 static int blk_free(struct XenDevice
*xendev
)
765 struct XenBlkDev
*blkdev
= container_of(xendev
, struct XenBlkDev
, xendev
);
768 if (blkdev
->bs
|| blkdev
->sring
) {
769 blk_disconnect(xendev
);
772 while (!QLIST_EMPTY(&blkdev
->freelist
)) {
773 ioreq
= QLIST_FIRST(&blkdev
->freelist
);
774 QLIST_REMOVE(ioreq
, list
);
775 qemu_iovec_destroy(&ioreq
->v
);
779 g_free(blkdev
->params
);
780 g_free(blkdev
->mode
);
781 g_free(blkdev
->type
);
783 g_free(blkdev
->devtype
);
784 qemu_bh_delete(blkdev
->bh
);
788 static void blk_event(struct XenDevice
*xendev
)
790 struct XenBlkDev
*blkdev
= container_of(xendev
, struct XenBlkDev
, xendev
);
792 qemu_bh_schedule(blkdev
->bh
);
795 struct XenDevOps xen_blkdev_ops
= {
796 .size
= sizeof(struct XenBlkDev
),
797 .flags
= DEVOPS_FLAG_NEED_GNTDEV
,
800 .initialise
= blk_connect
,
801 .disconnect
= blk_disconnect
,