2 * xen paravirt block device backend
4 * (c) Gerd Hoffmann <kraxel@redhat.com>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; under version 2 of the License.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, see <http://www.gnu.org/licenses/>.
18 * Contributions after 2012-01-13 are licensed under the terms of the
19 * GNU GPL, version 2 or (at your option) any later version.
22 #include "qemu/osdep.h"
23 #include "qemu/units.h"
24 #include <sys/ioctl.h>
28 #include "hw/xen/xen-legacy-backend.h"
29 #include "xen_blkif.h"
30 #include "sysemu/blockdev.h"
31 #include "sysemu/iothread.h"
32 #include "sysemu/block-backend.h"
33 #include "qapi/error.h"
34 #include "qapi/qmp/qdict.h"
35 #include "qapi/qmp/qstring.h"
38 /* ------------------------------------------------------------- */
40 #define BLOCK_SIZE 512
41 #define IOCB_COUNT (BLKIF_MAX_SEGMENTS_PER_REQUEST + 2)
58 struct XenBlkDev
*blkdev
;
59 QLIST_ENTRY(ioreq
) list
;
63 #define MAX_RING_PAGE_ORDER 4
66 struct XenLegacyDevice xendev
; /* must be first */
73 const char *fileproto
;
75 unsigned int ring_ref
[1 << MAX_RING_PAGE_ORDER
];
76 unsigned int nr_ring_ref
;
81 blkif_back_rings_t rings
;
85 QLIST_HEAD(, ioreq
) inflight
;
86 QLIST_HEAD(, ioreq
) finished
;
87 QLIST_HEAD(, ioreq
) freelist
;
89 int requests_inflight
;
90 int requests_finished
;
91 unsigned int max_requests
;
93 gboolean feature_discard
;
95 /* qemu block driver */
104 /* ------------------------------------------------------------- */
106 static void ioreq_reset(struct ioreq
*ioreq
)
108 memset(&ioreq
->req
, 0, sizeof(ioreq
->req
));
115 ioreq
->aio_inflight
= 0;
116 ioreq
->aio_errors
= 0;
118 ioreq
->blkdev
= NULL
;
119 memset(&ioreq
->list
, 0, sizeof(ioreq
->list
));
120 memset(&ioreq
->acct
, 0, sizeof(ioreq
->acct
));
122 qemu_iovec_reset(&ioreq
->v
);
125 static struct ioreq
*ioreq_start(struct XenBlkDev
*blkdev
)
127 struct ioreq
*ioreq
= NULL
;
129 if (QLIST_EMPTY(&blkdev
->freelist
)) {
130 if (blkdev
->requests_total
>= blkdev
->max_requests
) {
133 /* allocate new struct */
134 ioreq
= g_malloc0(sizeof(*ioreq
));
135 ioreq
->blkdev
= blkdev
;
136 blkdev
->requests_total
++;
137 qemu_iovec_init(&ioreq
->v
, 1);
139 /* get one from freelist */
140 ioreq
= QLIST_FIRST(&blkdev
->freelist
);
141 QLIST_REMOVE(ioreq
, list
);
143 QLIST_INSERT_HEAD(&blkdev
->inflight
, ioreq
, list
);
144 blkdev
->requests_inflight
++;
150 static void ioreq_finish(struct ioreq
*ioreq
)
152 struct XenBlkDev
*blkdev
= ioreq
->blkdev
;
154 QLIST_REMOVE(ioreq
, list
);
155 QLIST_INSERT_HEAD(&blkdev
->finished
, ioreq
, list
);
156 blkdev
->requests_inflight
--;
157 blkdev
->requests_finished
++;
160 static void ioreq_release(struct ioreq
*ioreq
, bool finish
)
162 struct XenBlkDev
*blkdev
= ioreq
->blkdev
;
164 QLIST_REMOVE(ioreq
, list
);
166 ioreq
->blkdev
= blkdev
;
167 QLIST_INSERT_HEAD(&blkdev
->freelist
, ioreq
, list
);
169 blkdev
->requests_finished
--;
171 blkdev
->requests_inflight
--;
176 * translate request into iovec + start offset
177 * do sanity checks along the way
179 static int ioreq_parse(struct ioreq
*ioreq
)
181 struct XenBlkDev
*blkdev
= ioreq
->blkdev
;
182 struct XenLegacyDevice
*xendev
= &blkdev
->xendev
;
186 xen_pv_printf(xendev
, 3,
187 "op %d, nr %d, handle %d, id %" PRId64
", sector %" PRId64
"\n",
188 ioreq
->req
.operation
, ioreq
->req
.nr_segments
,
189 ioreq
->req
.handle
, ioreq
->req
.id
, ioreq
->req
.sector_number
);
190 switch (ioreq
->req
.operation
) {
193 case BLKIF_OP_FLUSH_DISKCACHE
:
195 if (!ioreq
->req
.nr_segments
) {
201 case BLKIF_OP_DISCARD
:
204 xen_pv_printf(xendev
, 0, "error: unknown operation (%d)\n",
205 ioreq
->req
.operation
);
209 if (ioreq
->req
.operation
!= BLKIF_OP_READ
&& blkdev
->mode
[0] != 'w') {
210 xen_pv_printf(xendev
, 0, "error: write req for ro device\n");
214 ioreq
->start
= ioreq
->req
.sector_number
* blkdev
->file_blk
;
215 for (i
= 0; i
< ioreq
->req
.nr_segments
; i
++) {
216 if (i
== BLKIF_MAX_SEGMENTS_PER_REQUEST
) {
217 xen_pv_printf(xendev
, 0, "error: nr_segments too big\n");
220 if (ioreq
->req
.seg
[i
].first_sect
> ioreq
->req
.seg
[i
].last_sect
) {
221 xen_pv_printf(xendev
, 0, "error: first > last sector\n");
224 if (ioreq
->req
.seg
[i
].last_sect
* BLOCK_SIZE
>= XC_PAGE_SIZE
) {
225 xen_pv_printf(xendev
, 0, "error: page crossing\n");
229 len
= (ioreq
->req
.seg
[i
].last_sect
- ioreq
->req
.seg
[i
].first_sect
+ 1) * blkdev
->file_blk
;
232 if (ioreq
->start
+ ioreq
->size
> blkdev
->file_size
) {
233 xen_pv_printf(xendev
, 0, "error: access beyond end of file\n");
239 ioreq
->status
= BLKIF_RSP_ERROR
;
243 static int ioreq_grant_copy(struct ioreq
*ioreq
)
245 struct XenBlkDev
*blkdev
= ioreq
->blkdev
;
246 struct XenLegacyDevice
*xendev
= &blkdev
->xendev
;
247 XenGrantCopySegment segs
[BLKIF_MAX_SEGMENTS_PER_REQUEST
];
249 int64_t file_blk
= blkdev
->file_blk
;
250 bool to_domain
= (ioreq
->req
.operation
== BLKIF_OP_READ
);
251 void *virt
= ioreq
->buf
;
253 if (ioreq
->req
.nr_segments
== 0) {
257 count
= ioreq
->req
.nr_segments
;
259 for (i
= 0; i
< count
; i
++) {
261 segs
[i
].dest
.foreign
.ref
= ioreq
->req
.seg
[i
].gref
;
262 segs
[i
].dest
.foreign
.offset
= ioreq
->req
.seg
[i
].first_sect
* file_blk
;
263 segs
[i
].source
.virt
= virt
;
265 segs
[i
].source
.foreign
.ref
= ioreq
->req
.seg
[i
].gref
;
266 segs
[i
].source
.foreign
.offset
= ioreq
->req
.seg
[i
].first_sect
* file_blk
;
267 segs
[i
].dest
.virt
= virt
;
269 segs
[i
].len
= (ioreq
->req
.seg
[i
].last_sect
270 - ioreq
->req
.seg
[i
].first_sect
+ 1) * file_blk
;
274 rc
= xen_be_copy_grant_refs(xendev
, to_domain
, segs
, count
);
277 xen_pv_printf(xendev
, 0,
278 "failed to copy data %d\n", rc
);
286 static int ioreq_runio_qemu_aio(struct ioreq
*ioreq
);
288 static void qemu_aio_complete(void *opaque
, int ret
)
290 struct ioreq
*ioreq
= opaque
;
291 struct XenBlkDev
*blkdev
= ioreq
->blkdev
;
292 struct XenLegacyDevice
*xendev
= &blkdev
->xendev
;
294 aio_context_acquire(blkdev
->ctx
);
297 xen_pv_printf(xendev
, 0, "%s I/O error\n",
298 ioreq
->req
.operation
== BLKIF_OP_READ
? "read" : "write");
302 ioreq
->aio_inflight
--;
303 if (ioreq
->presync
) {
305 ioreq_runio_qemu_aio(ioreq
);
308 if (ioreq
->aio_inflight
> 0) {
312 switch (ioreq
->req
.operation
) {
314 /* in case of failure ioreq->aio_errors is increased */
316 ioreq_grant_copy(ioreq
);
318 qemu_vfree(ioreq
->buf
);
321 case BLKIF_OP_FLUSH_DISKCACHE
:
322 if (!ioreq
->req
.nr_segments
) {
325 qemu_vfree(ioreq
->buf
);
331 ioreq
->status
= ioreq
->aio_errors
? BLKIF_RSP_ERROR
: BLKIF_RSP_OKAY
;
334 switch (ioreq
->req
.operation
) {
336 case BLKIF_OP_FLUSH_DISKCACHE
:
337 if (!ioreq
->req
.nr_segments
) {
341 if (ioreq
->status
== BLKIF_RSP_OKAY
) {
342 block_acct_done(blk_get_stats(blkdev
->blk
), &ioreq
->acct
);
344 block_acct_failed(blk_get_stats(blkdev
->blk
), &ioreq
->acct
);
347 case BLKIF_OP_DISCARD
:
351 qemu_bh_schedule(blkdev
->bh
);
354 aio_context_release(blkdev
->ctx
);
357 static bool blk_split_discard(struct ioreq
*ioreq
, blkif_sector_t sector_number
,
360 struct XenBlkDev
*blkdev
= ioreq
->blkdev
;
363 uint64_t byte_remaining
, limit
;
364 uint64_t sec_start
= sector_number
;
365 uint64_t sec_count
= nr_sectors
;
367 /* Wrap around, or overflowing byte limit? */
368 if (sec_start
+ sec_count
< sec_count
||
369 sec_start
+ sec_count
> INT64_MAX
>> BDRV_SECTOR_BITS
) {
373 limit
= BDRV_REQUEST_MAX_SECTORS
<< BDRV_SECTOR_BITS
;
374 byte_offset
= sec_start
<< BDRV_SECTOR_BITS
;
375 byte_remaining
= sec_count
<< BDRV_SECTOR_BITS
;
378 byte_chunk
= byte_remaining
> limit
? limit
: byte_remaining
;
379 ioreq
->aio_inflight
++;
380 blk_aio_pdiscard(blkdev
->blk
, byte_offset
, byte_chunk
,
381 qemu_aio_complete
, ioreq
);
382 byte_remaining
-= byte_chunk
;
383 byte_offset
+= byte_chunk
;
384 } while (byte_remaining
> 0);
389 static int ioreq_runio_qemu_aio(struct ioreq
*ioreq
)
391 struct XenBlkDev
*blkdev
= ioreq
->blkdev
;
393 ioreq
->buf
= qemu_memalign(XC_PAGE_SIZE
, ioreq
->size
);
394 if (ioreq
->req
.nr_segments
&&
395 (ioreq
->req
.operation
== BLKIF_OP_WRITE
||
396 ioreq
->req
.operation
== BLKIF_OP_FLUSH_DISKCACHE
) &&
397 ioreq_grant_copy(ioreq
)) {
398 qemu_vfree(ioreq
->buf
);
402 ioreq
->aio_inflight
++;
403 if (ioreq
->presync
) {
404 blk_aio_flush(ioreq
->blkdev
->blk
, qemu_aio_complete
, ioreq
);
408 switch (ioreq
->req
.operation
) {
410 qemu_iovec_add(&ioreq
->v
, ioreq
->buf
, ioreq
->size
);
411 block_acct_start(blk_get_stats(blkdev
->blk
), &ioreq
->acct
,
412 ioreq
->v
.size
, BLOCK_ACCT_READ
);
413 ioreq
->aio_inflight
++;
414 blk_aio_preadv(blkdev
->blk
, ioreq
->start
, &ioreq
->v
, 0,
415 qemu_aio_complete
, ioreq
);
418 case BLKIF_OP_FLUSH_DISKCACHE
:
419 if (!ioreq
->req
.nr_segments
) {
423 qemu_iovec_add(&ioreq
->v
, ioreq
->buf
, ioreq
->size
);
424 block_acct_start(blk_get_stats(blkdev
->blk
), &ioreq
->acct
,
426 ioreq
->req
.operation
== BLKIF_OP_WRITE
?
427 BLOCK_ACCT_WRITE
: BLOCK_ACCT_FLUSH
);
428 ioreq
->aio_inflight
++;
429 blk_aio_pwritev(blkdev
->blk
, ioreq
->start
, &ioreq
->v
, 0,
430 qemu_aio_complete
, ioreq
);
432 case BLKIF_OP_DISCARD
:
434 struct blkif_request_discard
*req
= (void *)&ioreq
->req
;
435 if (!blk_split_discard(ioreq
, req
->sector_number
, req
->nr_sectors
)) {
441 /* unknown operation (shouldn't happen -- parse catches this) */
445 qemu_aio_complete(ioreq
, 0);
451 ioreq
->status
= BLKIF_RSP_ERROR
;
455 static int blk_send_response_one(struct ioreq
*ioreq
)
457 struct XenBlkDev
*blkdev
= ioreq
->blkdev
;
459 int have_requests
= 0;
460 blkif_response_t
*resp
;
462 /* Place on the response ring for the relevant domain. */
463 switch (blkdev
->protocol
) {
464 case BLKIF_PROTOCOL_NATIVE
:
465 resp
= (blkif_response_t
*) RING_GET_RESPONSE(&blkdev
->rings
.native
,
466 blkdev
->rings
.native
.rsp_prod_pvt
);
468 case BLKIF_PROTOCOL_X86_32
:
469 resp
= (blkif_response_t
*) RING_GET_RESPONSE(&blkdev
->rings
.x86_32_part
,
470 blkdev
->rings
.x86_32_part
.rsp_prod_pvt
);
472 case BLKIF_PROTOCOL_X86_64
:
473 resp
= (blkif_response_t
*) RING_GET_RESPONSE(&blkdev
->rings
.x86_64_part
,
474 blkdev
->rings
.x86_64_part
.rsp_prod_pvt
);
480 resp
->id
= ioreq
->req
.id
;
481 resp
->operation
= ioreq
->req
.operation
;
482 resp
->status
= ioreq
->status
;
484 blkdev
->rings
.common
.rsp_prod_pvt
++;
486 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blkdev
->rings
.common
, send_notify
);
487 if (blkdev
->rings
.common
.rsp_prod_pvt
== blkdev
->rings
.common
.req_cons
) {
489 * Tail check for pending requests. Allows frontend to avoid
490 * notifications if requests are already in flight (lower
491 * overheads and promotes batching).
493 RING_FINAL_CHECK_FOR_REQUESTS(&blkdev
->rings
.common
, have_requests
);
494 } else if (RING_HAS_UNCONSUMED_REQUESTS(&blkdev
->rings
.common
)) {
504 /* walk finished list, send outstanding responses, free requests */
505 static void blk_send_response_all(struct XenBlkDev
*blkdev
)
510 while (!QLIST_EMPTY(&blkdev
->finished
)) {
511 ioreq
= QLIST_FIRST(&blkdev
->finished
);
512 send_notify
+= blk_send_response_one(ioreq
);
513 ioreq_release(ioreq
, true);
516 xen_pv_send_notify(&blkdev
->xendev
);
520 static int blk_get_request(struct XenBlkDev
*blkdev
, struct ioreq
*ioreq
, RING_IDX rc
)
522 switch (blkdev
->protocol
) {
523 case BLKIF_PROTOCOL_NATIVE
:
524 memcpy(&ioreq
->req
, RING_GET_REQUEST(&blkdev
->rings
.native
, rc
),
527 case BLKIF_PROTOCOL_X86_32
:
528 blkif_get_x86_32_req(&ioreq
->req
,
529 RING_GET_REQUEST(&blkdev
->rings
.x86_32_part
, rc
));
531 case BLKIF_PROTOCOL_X86_64
:
532 blkif_get_x86_64_req(&ioreq
->req
,
533 RING_GET_REQUEST(&blkdev
->rings
.x86_64_part
, rc
));
536 /* Prevent the compiler from accessing the on-ring fields instead. */
541 static void blk_handle_requests(struct XenBlkDev
*blkdev
)
546 blkdev
->more_work
= 0;
548 rc
= blkdev
->rings
.common
.req_cons
;
549 rp
= blkdev
->rings
.common
.sring
->req_prod
;
550 xen_rmb(); /* Ensure we see queued requests up to 'rp'. */
552 blk_send_response_all(blkdev
);
554 /* pull request from ring */
555 if (RING_REQUEST_CONS_OVERFLOW(&blkdev
->rings
.common
, rc
)) {
558 ioreq
= ioreq_start(blkdev
);
563 blk_get_request(blkdev
, ioreq
, rc
);
564 blkdev
->rings
.common
.req_cons
= ++rc
;
567 if (ioreq_parse(ioreq
) != 0) {
569 switch (ioreq
->req
.operation
) {
571 block_acct_invalid(blk_get_stats(blkdev
->blk
),
575 block_acct_invalid(blk_get_stats(blkdev
->blk
),
578 case BLKIF_OP_FLUSH_DISKCACHE
:
579 block_acct_invalid(blk_get_stats(blkdev
->blk
),
585 if (blk_send_response_one(ioreq
)) {
586 xen_pv_send_notify(&blkdev
->xendev
);
588 ioreq_release(ioreq
, false);
592 ioreq_runio_qemu_aio(ioreq
);
595 if (blkdev
->more_work
&& blkdev
->requests_inflight
< blkdev
->max_requests
) {
596 qemu_bh_schedule(blkdev
->bh
);
600 /* ------------------------------------------------------------- */
602 static void blk_bh(void *opaque
)
604 struct XenBlkDev
*blkdev
= opaque
;
606 aio_context_acquire(blkdev
->ctx
);
607 blk_handle_requests(blkdev
);
608 aio_context_release(blkdev
->ctx
);
611 static void blk_alloc(struct XenLegacyDevice
*xendev
)
613 struct XenBlkDev
*blkdev
= container_of(xendev
, struct XenBlkDev
, xendev
);
616 trace_xen_disk_alloc(xendev
->name
);
618 QLIST_INIT(&blkdev
->inflight
);
619 QLIST_INIT(&blkdev
->finished
);
620 QLIST_INIT(&blkdev
->freelist
);
622 blkdev
->iothread
= iothread_create(xendev
->name
, &err
);
625 blkdev
->ctx
= iothread_get_aio_context(blkdev
->iothread
);
626 blkdev
->bh
= aio_bh_new(blkdev
->ctx
, blk_bh
, blkdev
);
629 static void blk_parse_discard(struct XenBlkDev
*blkdev
)
631 struct XenLegacyDevice
*xendev
= &blkdev
->xendev
;
634 blkdev
->feature_discard
= true;
636 if (xenstore_read_be_int(xendev
, "discard-enable", &enable
) == 0) {
637 blkdev
->feature_discard
= !!enable
;
640 if (blkdev
->feature_discard
) {
641 xenstore_write_be_int(xendev
, "feature-discard", 1);
645 static int blk_init(struct XenLegacyDevice
*xendev
)
647 struct XenBlkDev
*blkdev
= container_of(xendev
, struct XenBlkDev
, xendev
);
649 char *directiosafe
= NULL
;
651 trace_xen_disk_init(xendev
->name
);
653 /* read xenstore entries */
654 if (blkdev
->params
== NULL
) {
656 blkdev
->params
= xenstore_read_be_str(xendev
, "params");
657 if (blkdev
->params
!= NULL
) {
658 h
= strchr(blkdev
->params
, ':');
661 blkdev
->fileproto
= blkdev
->params
;
662 blkdev
->filename
= h
+1;
665 blkdev
->fileproto
= "<unset>";
666 blkdev
->filename
= blkdev
->params
;
669 if (!strcmp("aio", blkdev
->fileproto
)) {
670 blkdev
->fileproto
= "raw";
672 if (!strcmp("vhd", blkdev
->fileproto
)) {
673 blkdev
->fileproto
= "vpc";
675 if (blkdev
->mode
== NULL
) {
676 blkdev
->mode
= xenstore_read_be_str(xendev
, "mode");
678 if (blkdev
->type
== NULL
) {
679 blkdev
->type
= xenstore_read_be_str(xendev
, "type");
681 if (blkdev
->dev
== NULL
) {
682 blkdev
->dev
= xenstore_read_be_str(xendev
, "dev");
684 if (blkdev
->devtype
== NULL
) {
685 blkdev
->devtype
= xenstore_read_be_str(xendev
, "device-type");
687 directiosafe
= xenstore_read_be_str(xendev
, "direct-io-safe");
688 blkdev
->directiosafe
= (directiosafe
&& atoi(directiosafe
));
690 /* do we have all we need? */
691 if (blkdev
->params
== NULL
||
692 blkdev
->mode
== NULL
||
693 blkdev
->type
== NULL
||
694 blkdev
->dev
== NULL
) {
699 if (strcmp(blkdev
->mode
, "w")) {
700 info
|= VDISK_READONLY
;
704 if (blkdev
->devtype
&& !strcmp(blkdev
->devtype
, "cdrom")) {
708 blkdev
->file_blk
= BLOCK_SIZE
;
711 * blk_connect supplies sector-size and sectors
713 xenstore_write_be_int(xendev
, "feature-flush-cache", 1);
714 xenstore_write_be_int(xendev
, "info", info
);
716 xenstore_write_be_int(xendev
, "max-ring-page-order",
717 MAX_RING_PAGE_ORDER
);
719 blk_parse_discard(blkdev
);
721 g_free(directiosafe
);
725 g_free(blkdev
->params
);
726 blkdev
->params
= NULL
;
727 g_free(blkdev
->mode
);
729 g_free(blkdev
->type
);
733 g_free(blkdev
->devtype
);
734 blkdev
->devtype
= NULL
;
735 g_free(directiosafe
);
736 blkdev
->directiosafe
= false;
740 static int blk_connect(struct XenLegacyDevice
*xendev
)
742 struct XenBlkDev
*blkdev
= container_of(xendev
, struct XenBlkDev
, xendev
);
744 bool readonly
= true;
745 bool writethrough
= true;
747 unsigned int ring_size
, max_grants
;
750 trace_xen_disk_connect(xendev
->name
);
753 if (blkdev
->directiosafe
) {
754 qflags
= BDRV_O_NOCACHE
| BDRV_O_NATIVE_AIO
;
757 writethrough
= false;
759 if (strcmp(blkdev
->mode
, "w") == 0) {
760 qflags
|= BDRV_O_RDWR
;
763 if (blkdev
->feature_discard
) {
764 qflags
|= BDRV_O_UNMAP
;
767 /* init qemu block driver */
768 index
= (xendev
->dev
- 202 * 256) / 16;
769 blkdev
->dinfo
= drive_get(IF_XEN
, 0, index
);
770 if (!blkdev
->dinfo
) {
771 Error
*local_err
= NULL
;
772 QDict
*options
= NULL
;
774 if (strcmp(blkdev
->fileproto
, "<unset>")) {
775 options
= qdict_new();
776 qdict_put_str(options
, "driver", blkdev
->fileproto
);
779 /* setup via xenbus -> create new block driver instance */
780 xen_pv_printf(xendev
, 2, "create new bdrv (xenbus setup)\n");
781 blkdev
->blk
= blk_new_open(blkdev
->filename
, NULL
, options
,
784 xen_pv_printf(xendev
, 0, "error: %s\n",
785 error_get_pretty(local_err
));
786 error_free(local_err
);
789 blk_set_enable_write_cache(blkdev
->blk
, !writethrough
);
791 /* setup via qemu cmdline -> already setup for us */
792 xen_pv_printf(xendev
, 2,
793 "get configured bdrv (cmdline setup)\n");
794 blkdev
->blk
= blk_by_legacy_dinfo(blkdev
->dinfo
);
795 if (blk_is_read_only(blkdev
->blk
) && !readonly
) {
796 xen_pv_printf(xendev
, 0, "Unexpected read-only drive");
800 /* blkdev->blk is not create by us, we get a reference
801 * so we can blk_unref() unconditionally */
802 blk_ref(blkdev
->blk
);
804 blk_attach_dev_legacy(blkdev
->blk
, blkdev
);
805 blkdev
->file_size
= blk_getlength(blkdev
->blk
);
806 if (blkdev
->file_size
< 0) {
807 BlockDriverState
*bs
= blk_bs(blkdev
->blk
);
808 const char *drv_name
= bs
? bdrv_get_format_name(bs
) : NULL
;
809 xen_pv_printf(xendev
, 1, "blk_getlength: %d (%s) | drv %s\n",
810 (int)blkdev
->file_size
, strerror(-blkdev
->file_size
),
812 blkdev
->file_size
= 0;
815 xen_pv_printf(xendev
, 1, "type \"%s\", fileproto \"%s\", filename \"%s\","
816 " size %" PRId64
" (%" PRId64
" MB)\n",
817 blkdev
->type
, blkdev
->fileproto
, blkdev
->filename
,
818 blkdev
->file_size
, blkdev
->file_size
/ MiB
);
820 /* Fill in number of sector size and number of sectors */
821 xenstore_write_be_int(xendev
, "sector-size", blkdev
->file_blk
);
822 xenstore_write_be_int64(xendev
, "sectors",
823 blkdev
->file_size
/ blkdev
->file_blk
);
825 if (xenstore_read_fe_int(xendev
, "ring-page-order",
827 blkdev
->nr_ring_ref
= 1;
829 if (xenstore_read_fe_int(xendev
, "ring-ref",
833 blkdev
->ring_ref
[0] = ring_ref
;
835 } else if (order
>= 0 && order
<= MAX_RING_PAGE_ORDER
) {
836 blkdev
->nr_ring_ref
= 1 << order
;
838 for (i
= 0; i
< blkdev
->nr_ring_ref
; i
++) {
841 key
= g_strdup_printf("ring-ref%u", i
);
846 if (xenstore_read_fe_int(xendev
, key
,
851 blkdev
->ring_ref
[i
] = ring_ref
;
856 xen_pv_printf(xendev
, 0, "invalid ring-page-order: %d\n",
861 if (xenstore_read_fe_int(xendev
, "event-channel",
862 &xendev
->remote_port
) == -1) {
866 if (!xendev
->protocol
) {
867 blkdev
->protocol
= BLKIF_PROTOCOL_NATIVE
;
868 } else if (strcmp(xendev
->protocol
, XEN_IO_PROTO_ABI_NATIVE
) == 0) {
869 blkdev
->protocol
= BLKIF_PROTOCOL_NATIVE
;
870 } else if (strcmp(xendev
->protocol
, XEN_IO_PROTO_ABI_X86_32
) == 0) {
871 blkdev
->protocol
= BLKIF_PROTOCOL_X86_32
;
872 } else if (strcmp(xendev
->protocol
, XEN_IO_PROTO_ABI_X86_64
) == 0) {
873 blkdev
->protocol
= BLKIF_PROTOCOL_X86_64
;
875 blkdev
->protocol
= BLKIF_PROTOCOL_NATIVE
;
878 ring_size
= XC_PAGE_SIZE
* blkdev
->nr_ring_ref
;
879 switch (blkdev
->protocol
) {
880 case BLKIF_PROTOCOL_NATIVE
:
882 blkdev
->max_requests
= __CONST_RING_SIZE(blkif
, ring_size
);
885 case BLKIF_PROTOCOL_X86_32
:
887 blkdev
->max_requests
= __CONST_RING_SIZE(blkif_x86_32
, ring_size
);
890 case BLKIF_PROTOCOL_X86_64
:
892 blkdev
->max_requests
= __CONST_RING_SIZE(blkif_x86_64
, ring_size
);
899 /* Add on the number needed for the ring pages */
900 max_grants
= blkdev
->nr_ring_ref
;
902 xen_be_set_max_grant_refs(xendev
, max_grants
);
903 blkdev
->sring
= xen_be_map_grant_refs(xendev
, blkdev
->ring_ref
,
905 PROT_READ
| PROT_WRITE
);
906 if (!blkdev
->sring
) {
910 switch (blkdev
->protocol
) {
911 case BLKIF_PROTOCOL_NATIVE
:
913 blkif_sring_t
*sring_native
= blkdev
->sring
;
914 BACK_RING_INIT(&blkdev
->rings
.native
, sring_native
, ring_size
);
917 case BLKIF_PROTOCOL_X86_32
:
919 blkif_x86_32_sring_t
*sring_x86_32
= blkdev
->sring
;
921 BACK_RING_INIT(&blkdev
->rings
.x86_32_part
, sring_x86_32
, ring_size
);
924 case BLKIF_PROTOCOL_X86_64
:
926 blkif_x86_64_sring_t
*sring_x86_64
= blkdev
->sring
;
928 BACK_RING_INIT(&blkdev
->rings
.x86_64_part
, sring_x86_64
, ring_size
);
933 blk_set_aio_context(blkdev
->blk
, blkdev
->ctx
);
935 xen_be_bind_evtchn(xendev
);
937 xen_pv_printf(xendev
, 1, "ok: proto %s, nr-ring-ref %u, "
938 "remote port %d, local port %d\n",
939 xendev
->protocol
, blkdev
->nr_ring_ref
,
940 xendev
->remote_port
, xendev
->local_port
);
944 static void blk_disconnect(struct XenLegacyDevice
*xendev
)
946 struct XenBlkDev
*blkdev
= container_of(xendev
, struct XenBlkDev
, xendev
);
948 trace_xen_disk_disconnect(xendev
->name
);
950 aio_context_acquire(blkdev
->ctx
);
953 blk_set_aio_context(blkdev
->blk
, qemu_get_aio_context());
954 blk_detach_dev(blkdev
->blk
, blkdev
);
955 blk_unref(blkdev
->blk
);
958 xen_pv_unbind_evtchn(xendev
);
960 aio_context_release(blkdev
->ctx
);
963 xen_be_unmap_grant_refs(xendev
, blkdev
->sring
,
964 blkdev
->nr_ring_ref
);
965 blkdev
->sring
= NULL
;
969 static int blk_free(struct XenLegacyDevice
*xendev
)
971 struct XenBlkDev
*blkdev
= container_of(xendev
, struct XenBlkDev
, xendev
);
974 trace_xen_disk_free(xendev
->name
);
976 blk_disconnect(xendev
);
978 while (!QLIST_EMPTY(&blkdev
->freelist
)) {
979 ioreq
= QLIST_FIRST(&blkdev
->freelist
);
980 QLIST_REMOVE(ioreq
, list
);
981 qemu_iovec_destroy(&ioreq
->v
);
985 g_free(blkdev
->params
);
986 g_free(blkdev
->mode
);
987 g_free(blkdev
->type
);
989 g_free(blkdev
->devtype
);
990 qemu_bh_delete(blkdev
->bh
);
991 iothread_destroy(blkdev
->iothread
);
995 static void blk_event(struct XenLegacyDevice
*xendev
)
997 struct XenBlkDev
*blkdev
= container_of(xendev
, struct XenBlkDev
, xendev
);
999 qemu_bh_schedule(blkdev
->bh
);
1002 struct XenDevOps xen_blkdev_ops
= {
1003 .flags
= DEVOPS_FLAG_NEED_GNTDEV
,
1004 .size
= sizeof(struct XenBlkDev
),
1007 .initialise
= blk_connect
,
1008 .disconnect
= blk_disconnect
,