2 * xen paravirt block device backend
4 * (c) Gerd Hoffmann <kraxel@redhat.com>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; under version 2 of the License.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, see <http://www.gnu.org/licenses/>.
18 * Contributions after 2012-01-13 are licensed under the terms of the
19 * GNU GPL, version 2 or (at your option) any later version.
32 #include <sys/ioctl.h>
33 #include <sys/types.h>
39 #include "hw/xen/xen_backend.h"
40 #include "xen_blkif.h"
41 #include "sysemu/blockdev.h"
43 /* ------------------------------------------------------------- */
45 static int batch_maps
= 0;
47 static int max_requests
= 32;
49 /* ------------------------------------------------------------- */
51 #define BLOCK_SIZE 512
52 #define IOCB_COUNT (BLKIF_MAX_SEGMENTS_PER_REQUEST + 2)
54 struct PersistentGrant
{
56 struct XenBlkDev
*blkdev
;
59 typedef struct PersistentGrant PersistentGrant
;
73 uint32_t domids
[BLKIF_MAX_SEGMENTS_PER_REQUEST
];
74 uint32_t refs
[BLKIF_MAX_SEGMENTS_PER_REQUEST
];
76 void *page
[BLKIF_MAX_SEGMENTS_PER_REQUEST
];
84 struct XenBlkDev
*blkdev
;
85 QLIST_ENTRY(ioreq
) list
;
90 struct XenDevice xendev
; /* must be first */
97 const char *fileproto
;
104 blkif_back_rings_t rings
;
109 QLIST_HEAD(inflight_head
, ioreq
) inflight
;
110 QLIST_HEAD(finished_head
, ioreq
) finished
;
111 QLIST_HEAD(freelist_head
, ioreq
) freelist
;
113 int requests_inflight
;
114 int requests_finished
;
116 /* Persistent grants extension */
117 gboolean feature_discard
;
118 gboolean feature_persistent
;
119 GTree
*persistent_gnts
;
120 unsigned int persistent_gnt_count
;
121 unsigned int max_grants
;
123 /* qemu block driver */
125 BlockDriverState
*bs
;
129 /* ------------------------------------------------------------- */
131 static void ioreq_reset(struct ioreq
*ioreq
)
133 memset(&ioreq
->req
, 0, sizeof(ioreq
->req
));
140 memset(ioreq
->domids
, 0, sizeof(ioreq
->domids
));
141 memset(ioreq
->refs
, 0, sizeof(ioreq
->refs
));
143 memset(ioreq
->page
, 0, sizeof(ioreq
->page
));
146 ioreq
->aio_inflight
= 0;
147 ioreq
->aio_errors
= 0;
149 ioreq
->blkdev
= NULL
;
150 memset(&ioreq
->list
, 0, sizeof(ioreq
->list
));
151 memset(&ioreq
->acct
, 0, sizeof(ioreq
->acct
));
153 qemu_iovec_reset(&ioreq
->v
);
156 static gint
int_cmp(gconstpointer a
, gconstpointer b
, gpointer user_data
)
158 uint ua
= GPOINTER_TO_UINT(a
);
159 uint ub
= GPOINTER_TO_UINT(b
);
160 return (ua
> ub
) - (ua
< ub
);
163 static void destroy_grant(gpointer pgnt
)
165 PersistentGrant
*grant
= pgnt
;
166 XenGnttab gnt
= grant
->blkdev
->xendev
.gnttabdev
;
168 if (xc_gnttab_munmap(gnt
, grant
->page
, 1) != 0) {
169 xen_be_printf(&grant
->blkdev
->xendev
, 0,
170 "xc_gnttab_munmap failed: %s\n",
173 grant
->blkdev
->persistent_gnt_count
--;
174 xen_be_printf(&grant
->blkdev
->xendev
, 3,
175 "unmapped grant %p\n", grant
->page
);
179 static struct ioreq
*ioreq_start(struct XenBlkDev
*blkdev
)
181 struct ioreq
*ioreq
= NULL
;
183 if (QLIST_EMPTY(&blkdev
->freelist
)) {
184 if (blkdev
->requests_total
>= max_requests
) {
187 /* allocate new struct */
188 ioreq
= g_malloc0(sizeof(*ioreq
));
189 ioreq
->blkdev
= blkdev
;
190 blkdev
->requests_total
++;
191 qemu_iovec_init(&ioreq
->v
, BLKIF_MAX_SEGMENTS_PER_REQUEST
);
193 /* get one from freelist */
194 ioreq
= QLIST_FIRST(&blkdev
->freelist
);
195 QLIST_REMOVE(ioreq
, list
);
197 QLIST_INSERT_HEAD(&blkdev
->inflight
, ioreq
, list
);
198 blkdev
->requests_inflight
++;
204 static void ioreq_finish(struct ioreq
*ioreq
)
206 struct XenBlkDev
*blkdev
= ioreq
->blkdev
;
208 QLIST_REMOVE(ioreq
, list
);
209 QLIST_INSERT_HEAD(&blkdev
->finished
, ioreq
, list
);
210 blkdev
->requests_inflight
--;
211 blkdev
->requests_finished
++;
214 static void ioreq_release(struct ioreq
*ioreq
, bool finish
)
216 struct XenBlkDev
*blkdev
= ioreq
->blkdev
;
218 QLIST_REMOVE(ioreq
, list
);
220 ioreq
->blkdev
= blkdev
;
221 QLIST_INSERT_HEAD(&blkdev
->freelist
, ioreq
, list
);
223 blkdev
->requests_finished
--;
225 blkdev
->requests_inflight
--;
230 * translate request into iovec + start offset
231 * do sanity checks along the way
233 static int ioreq_parse(struct ioreq
*ioreq
)
235 struct XenBlkDev
*blkdev
= ioreq
->blkdev
;
240 xen_be_printf(&blkdev
->xendev
, 3,
241 "op %d, nr %d, handle %d, id %" PRId64
", sector %" PRId64
"\n",
242 ioreq
->req
.operation
, ioreq
->req
.nr_segments
,
243 ioreq
->req
.handle
, ioreq
->req
.id
, ioreq
->req
.sector_number
);
244 switch (ioreq
->req
.operation
) {
246 ioreq
->prot
= PROT_WRITE
; /* to memory */
248 case BLKIF_OP_FLUSH_DISKCACHE
:
250 if (!ioreq
->req
.nr_segments
) {
255 ioreq
->prot
= PROT_READ
; /* from memory */
257 case BLKIF_OP_DISCARD
:
260 xen_be_printf(&blkdev
->xendev
, 0, "error: unknown operation (%d)\n",
261 ioreq
->req
.operation
);
265 if (ioreq
->req
.operation
!= BLKIF_OP_READ
&& blkdev
->mode
[0] != 'w') {
266 xen_be_printf(&blkdev
->xendev
, 0, "error: write req for ro device\n");
270 ioreq
->start
= ioreq
->req
.sector_number
* blkdev
->file_blk
;
271 for (i
= 0; i
< ioreq
->req
.nr_segments
; i
++) {
272 if (i
== BLKIF_MAX_SEGMENTS_PER_REQUEST
) {
273 xen_be_printf(&blkdev
->xendev
, 0, "error: nr_segments too big\n");
276 if (ioreq
->req
.seg
[i
].first_sect
> ioreq
->req
.seg
[i
].last_sect
) {
277 xen_be_printf(&blkdev
->xendev
, 0, "error: first > last sector\n");
280 if (ioreq
->req
.seg
[i
].last_sect
* BLOCK_SIZE
>= XC_PAGE_SIZE
) {
281 xen_be_printf(&blkdev
->xendev
, 0, "error: page crossing\n");
285 ioreq
->domids
[i
] = blkdev
->xendev
.dom
;
286 ioreq
->refs
[i
] = ioreq
->req
.seg
[i
].gref
;
288 mem
= ioreq
->req
.seg
[i
].first_sect
* blkdev
->file_blk
;
289 len
= (ioreq
->req
.seg
[i
].last_sect
- ioreq
->req
.seg
[i
].first_sect
+ 1) * blkdev
->file_blk
;
290 qemu_iovec_add(&ioreq
->v
, (void*)mem
, len
);
292 if (ioreq
->start
+ ioreq
->v
.size
> blkdev
->file_size
) {
293 xen_be_printf(&blkdev
->xendev
, 0, "error: access beyond end of file\n");
299 ioreq
->status
= BLKIF_RSP_ERROR
;
303 static void ioreq_unmap(struct ioreq
*ioreq
)
305 XenGnttab gnt
= ioreq
->blkdev
->xendev
.gnttabdev
;
308 if (ioreq
->num_unmap
== 0 || ioreq
->mapped
== 0) {
315 if (xc_gnttab_munmap(gnt
, ioreq
->pages
, ioreq
->num_unmap
) != 0) {
316 xen_be_printf(&ioreq
->blkdev
->xendev
, 0, "xc_gnttab_munmap failed: %s\n",
319 ioreq
->blkdev
->cnt_map
-= ioreq
->num_unmap
;
322 for (i
= 0; i
< ioreq
->num_unmap
; i
++) {
323 if (!ioreq
->page
[i
]) {
326 if (xc_gnttab_munmap(gnt
, ioreq
->page
[i
], 1) != 0) {
327 xen_be_printf(&ioreq
->blkdev
->xendev
, 0, "xc_gnttab_munmap failed: %s\n",
330 ioreq
->blkdev
->cnt_map
--;
331 ioreq
->page
[i
] = NULL
;
337 static int ioreq_map(struct ioreq
*ioreq
)
339 XenGnttab gnt
= ioreq
->blkdev
->xendev
.gnttabdev
;
340 uint32_t domids
[BLKIF_MAX_SEGMENTS_PER_REQUEST
];
341 uint32_t refs
[BLKIF_MAX_SEGMENTS_PER_REQUEST
];
342 void *page
[BLKIF_MAX_SEGMENTS_PER_REQUEST
];
343 int i
, j
, new_maps
= 0;
344 PersistentGrant
*grant
;
345 /* domids and refs variables will contain the information necessary
346 * to map the grants that are needed to fulfill this request.
348 * After mapping the needed grants, the page array will contain the
349 * memory address of each granted page in the order specified in ioreq
350 * (disregarding if it's a persistent grant or not).
353 if (ioreq
->v
.niov
== 0 || ioreq
->mapped
== 1) {
356 if (ioreq
->blkdev
->feature_persistent
) {
357 for (i
= 0; i
< ioreq
->v
.niov
; i
++) {
358 grant
= g_tree_lookup(ioreq
->blkdev
->persistent_gnts
,
359 GUINT_TO_POINTER(ioreq
->refs
[i
]));
362 page
[i
] = grant
->page
;
363 xen_be_printf(&ioreq
->blkdev
->xendev
, 3,
364 "using persistent-grant %" PRIu32
"\n",
367 /* Add the grant to the list of grants that
370 domids
[new_maps
] = ioreq
->domids
[i
];
371 refs
[new_maps
] = ioreq
->refs
[i
];
376 /* Set the protection to RW, since grants may be reused later
377 * with a different protection than the one needed for this request
379 ioreq
->prot
= PROT_WRITE
| PROT_READ
;
381 /* All grants in the request should be mapped */
382 memcpy(refs
, ioreq
->refs
, sizeof(refs
));
383 memcpy(domids
, ioreq
->domids
, sizeof(domids
));
384 memset(page
, 0, sizeof(page
));
385 new_maps
= ioreq
->v
.niov
;
388 if (batch_maps
&& new_maps
) {
389 ioreq
->pages
= xc_gnttab_map_grant_refs
390 (gnt
, new_maps
, domids
, refs
, ioreq
->prot
);
391 if (ioreq
->pages
== NULL
) {
392 xen_be_printf(&ioreq
->blkdev
->xendev
, 0,
393 "can't map %d grant refs (%s, %d maps)\n",
394 new_maps
, strerror(errno
), ioreq
->blkdev
->cnt_map
);
397 for (i
= 0, j
= 0; i
< ioreq
->v
.niov
; i
++) {
398 if (page
[i
] == NULL
) {
399 page
[i
] = ioreq
->pages
+ (j
++) * XC_PAGE_SIZE
;
402 ioreq
->blkdev
->cnt_map
+= new_maps
;
403 } else if (new_maps
) {
404 for (i
= 0; i
< new_maps
; i
++) {
405 ioreq
->page
[i
] = xc_gnttab_map_grant_ref
406 (gnt
, domids
[i
], refs
[i
], ioreq
->prot
);
407 if (ioreq
->page
[i
] == NULL
) {
408 xen_be_printf(&ioreq
->blkdev
->xendev
, 0,
409 "can't map grant ref %d (%s, %d maps)\n",
410 refs
[i
], strerror(errno
), ioreq
->blkdev
->cnt_map
);
415 ioreq
->blkdev
->cnt_map
++;
417 for (i
= 0, j
= 0; i
< ioreq
->v
.niov
; i
++) {
418 if (page
[i
] == NULL
) {
419 page
[i
] = ioreq
->page
[j
++];
423 if (ioreq
->blkdev
->feature_persistent
) {
424 while ((ioreq
->blkdev
->persistent_gnt_count
< ioreq
->blkdev
->max_grants
)
426 /* Go through the list of newly mapped grants and add as many
427 * as possible to the list of persistently mapped grants.
429 * Since we start at the end of ioreq->page(s), we only need
430 * to decrease new_maps to prevent this granted pages from
431 * being unmapped in ioreq_unmap.
433 grant
= g_malloc0(sizeof(*grant
));
436 grant
->page
= ioreq
->pages
+ (new_maps
) * XC_PAGE_SIZE
;
438 grant
->page
= ioreq
->page
[new_maps
];
440 grant
->blkdev
= ioreq
->blkdev
;
441 xen_be_printf(&ioreq
->blkdev
->xendev
, 3,
442 "adding grant %" PRIu32
" page: %p\n",
443 refs
[new_maps
], grant
->page
);
444 g_tree_insert(ioreq
->blkdev
->persistent_gnts
,
445 GUINT_TO_POINTER(refs
[new_maps
]),
447 ioreq
->blkdev
->persistent_gnt_count
++;
450 for (i
= 0; i
< ioreq
->v
.niov
; i
++) {
451 ioreq
->v
.iov
[i
].iov_base
+= (uintptr_t)page
[i
];
454 ioreq
->num_unmap
= new_maps
;
458 static int ioreq_runio_qemu_aio(struct ioreq
*ioreq
);
460 static void qemu_aio_complete(void *opaque
, int ret
)
462 struct ioreq
*ioreq
= opaque
;
465 xen_be_printf(&ioreq
->blkdev
->xendev
, 0, "%s I/O error\n",
466 ioreq
->req
.operation
== BLKIF_OP_READ
? "read" : "write");
470 ioreq
->aio_inflight
--;
471 if (ioreq
->presync
) {
473 ioreq_runio_qemu_aio(ioreq
);
476 if (ioreq
->aio_inflight
> 0) {
479 if (ioreq
->postsync
) {
481 ioreq
->aio_inflight
++;
482 bdrv_aio_flush(ioreq
->blkdev
->bs
, qemu_aio_complete
, ioreq
);
486 ioreq
->status
= ioreq
->aio_errors
? BLKIF_RSP_ERROR
: BLKIF_RSP_OKAY
;
489 switch (ioreq
->req
.operation
) {
491 case BLKIF_OP_FLUSH_DISKCACHE
:
492 if (!ioreq
->req
.nr_segments
) {
496 bdrv_acct_done(ioreq
->blkdev
->bs
, &ioreq
->acct
);
498 case BLKIF_OP_DISCARD
:
502 qemu_bh_schedule(ioreq
->blkdev
->bh
);
505 static int ioreq_runio_qemu_aio(struct ioreq
*ioreq
)
507 struct XenBlkDev
*blkdev
= ioreq
->blkdev
;
509 if (ioreq
->req
.nr_segments
&& ioreq_map(ioreq
) == -1) {
513 ioreq
->aio_inflight
++;
514 if (ioreq
->presync
) {
515 bdrv_aio_flush(ioreq
->blkdev
->bs
, qemu_aio_complete
, ioreq
);
519 switch (ioreq
->req
.operation
) {
521 bdrv_acct_start(blkdev
->bs
, &ioreq
->acct
, ioreq
->v
.size
, BDRV_ACCT_READ
);
522 ioreq
->aio_inflight
++;
523 bdrv_aio_readv(blkdev
->bs
, ioreq
->start
/ BLOCK_SIZE
,
524 &ioreq
->v
, ioreq
->v
.size
/ BLOCK_SIZE
,
525 qemu_aio_complete
, ioreq
);
528 case BLKIF_OP_FLUSH_DISKCACHE
:
529 if (!ioreq
->req
.nr_segments
) {
533 bdrv_acct_start(blkdev
->bs
, &ioreq
->acct
, ioreq
->v
.size
, BDRV_ACCT_WRITE
);
534 ioreq
->aio_inflight
++;
535 bdrv_aio_writev(blkdev
->bs
, ioreq
->start
/ BLOCK_SIZE
,
536 &ioreq
->v
, ioreq
->v
.size
/ BLOCK_SIZE
,
537 qemu_aio_complete
, ioreq
);
539 case BLKIF_OP_DISCARD
:
541 struct blkif_request_discard
*discard_req
= (void *)&ioreq
->req
;
542 ioreq
->aio_inflight
++;
543 bdrv_aio_discard(blkdev
->bs
,
544 discard_req
->sector_number
, discard_req
->nr_sectors
,
545 qemu_aio_complete
, ioreq
);
549 /* unknown operation (shouldn't happen -- parse catches this) */
553 qemu_aio_complete(ioreq
, 0);
561 ioreq
->status
= BLKIF_RSP_ERROR
;
565 static int blk_send_response_one(struct ioreq
*ioreq
)
567 struct XenBlkDev
*blkdev
= ioreq
->blkdev
;
569 int have_requests
= 0;
570 blkif_response_t resp
;
573 resp
.id
= ioreq
->req
.id
;
574 resp
.operation
= ioreq
->req
.operation
;
575 resp
.status
= ioreq
->status
;
577 /* Place on the response ring for the relevant domain. */
578 switch (blkdev
->protocol
) {
579 case BLKIF_PROTOCOL_NATIVE
:
580 dst
= RING_GET_RESPONSE(&blkdev
->rings
.native
, blkdev
->rings
.native
.rsp_prod_pvt
);
582 case BLKIF_PROTOCOL_X86_32
:
583 dst
= RING_GET_RESPONSE(&blkdev
->rings
.x86_32_part
,
584 blkdev
->rings
.x86_32_part
.rsp_prod_pvt
);
586 case BLKIF_PROTOCOL_X86_64
:
587 dst
= RING_GET_RESPONSE(&blkdev
->rings
.x86_64_part
,
588 blkdev
->rings
.x86_64_part
.rsp_prod_pvt
);
594 memcpy(dst
, &resp
, sizeof(resp
));
595 blkdev
->rings
.common
.rsp_prod_pvt
++;
597 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blkdev
->rings
.common
, send_notify
);
598 if (blkdev
->rings
.common
.rsp_prod_pvt
== blkdev
->rings
.common
.req_cons
) {
600 * Tail check for pending requests. Allows frontend to avoid
601 * notifications if requests are already in flight (lower
602 * overheads and promotes batching).
604 RING_FINAL_CHECK_FOR_REQUESTS(&blkdev
->rings
.common
, have_requests
);
605 } else if (RING_HAS_UNCONSUMED_REQUESTS(&blkdev
->rings
.common
)) {
615 /* walk finished list, send outstanding responses, free requests */
616 static void blk_send_response_all(struct XenBlkDev
*blkdev
)
621 while (!QLIST_EMPTY(&blkdev
->finished
)) {
622 ioreq
= QLIST_FIRST(&blkdev
->finished
);
623 send_notify
+= blk_send_response_one(ioreq
);
624 ioreq_release(ioreq
, true);
627 xen_be_send_notify(&blkdev
->xendev
);
631 static int blk_get_request(struct XenBlkDev
*blkdev
, struct ioreq
*ioreq
, RING_IDX rc
)
633 switch (blkdev
->protocol
) {
634 case BLKIF_PROTOCOL_NATIVE
:
635 memcpy(&ioreq
->req
, RING_GET_REQUEST(&blkdev
->rings
.native
, rc
),
638 case BLKIF_PROTOCOL_X86_32
:
639 blkif_get_x86_32_req(&ioreq
->req
,
640 RING_GET_REQUEST(&blkdev
->rings
.x86_32_part
, rc
));
642 case BLKIF_PROTOCOL_X86_64
:
643 blkif_get_x86_64_req(&ioreq
->req
,
644 RING_GET_REQUEST(&blkdev
->rings
.x86_64_part
, rc
));
650 static void blk_handle_requests(struct XenBlkDev
*blkdev
)
655 blkdev
->more_work
= 0;
657 rc
= blkdev
->rings
.common
.req_cons
;
658 rp
= blkdev
->rings
.common
.sring
->req_prod
;
659 xen_rmb(); /* Ensure we see queued requests up to 'rp'. */
661 blk_send_response_all(blkdev
);
663 /* pull request from ring */
664 if (RING_REQUEST_CONS_OVERFLOW(&blkdev
->rings
.common
, rc
)) {
667 ioreq
= ioreq_start(blkdev
);
672 blk_get_request(blkdev
, ioreq
, rc
);
673 blkdev
->rings
.common
.req_cons
= ++rc
;
676 if (ioreq_parse(ioreq
) != 0) {
677 if (blk_send_response_one(ioreq
)) {
678 xen_be_send_notify(&blkdev
->xendev
);
680 ioreq_release(ioreq
, false);
684 ioreq_runio_qemu_aio(ioreq
);
687 if (blkdev
->more_work
&& blkdev
->requests_inflight
< max_requests
) {
688 qemu_bh_schedule(blkdev
->bh
);
692 /* ------------------------------------------------------------- */
694 static void blk_bh(void *opaque
)
696 struct XenBlkDev
*blkdev
= opaque
;
697 blk_handle_requests(blkdev
);
701 * We need to account for the grant allocations requiring contiguous
702 * chunks; the worst case number would be
703 * max_req * max_seg + (max_req - 1) * (max_seg - 1) + 1,
704 * but in order to keep things simple just use
705 * 2 * max_req * max_seg.
707 #define MAX_GRANTS(max_req, max_seg) (2 * (max_req) * (max_seg))
709 static void blk_alloc(struct XenDevice
*xendev
)
711 struct XenBlkDev
*blkdev
= container_of(xendev
, struct XenBlkDev
, xendev
);
713 QLIST_INIT(&blkdev
->inflight
);
714 QLIST_INIT(&blkdev
->finished
);
715 QLIST_INIT(&blkdev
->freelist
);
716 blkdev
->bh
= qemu_bh_new(blk_bh
, blkdev
);
717 if (xen_mode
!= XEN_EMULATE
) {
720 if (xc_gnttab_set_max_grants(xendev
->gnttabdev
,
721 MAX_GRANTS(max_requests
, BLKIF_MAX_SEGMENTS_PER_REQUEST
)) < 0) {
722 xen_be_printf(xendev
, 0, "xc_gnttab_set_max_grants failed: %s\n",
727 static void blk_parse_discard(struct XenBlkDev
*blkdev
)
731 blkdev
->feature_discard
= true;
733 if (xenstore_read_be_int(&blkdev
->xendev
, "discard-enable", &enable
) == 0) {
734 blkdev
->feature_discard
= !!enable
;
737 if (blkdev
->feature_discard
) {
738 xenstore_write_be_int(&blkdev
->xendev
, "feature-discard", 1);
742 static int blk_init(struct XenDevice
*xendev
)
744 struct XenBlkDev
*blkdev
= container_of(xendev
, struct XenBlkDev
, xendev
);
746 char *directiosafe
= NULL
;
748 /* read xenstore entries */
749 if (blkdev
->params
== NULL
) {
751 blkdev
->params
= xenstore_read_be_str(&blkdev
->xendev
, "params");
752 if (blkdev
->params
!= NULL
) {
753 h
= strchr(blkdev
->params
, ':');
756 blkdev
->fileproto
= blkdev
->params
;
757 blkdev
->filename
= h
+1;
760 blkdev
->fileproto
= "<unset>";
761 blkdev
->filename
= blkdev
->params
;
764 if (!strcmp("aio", blkdev
->fileproto
)) {
765 blkdev
->fileproto
= "raw";
767 if (blkdev
->mode
== NULL
) {
768 blkdev
->mode
= xenstore_read_be_str(&blkdev
->xendev
, "mode");
770 if (blkdev
->type
== NULL
) {
771 blkdev
->type
= xenstore_read_be_str(&blkdev
->xendev
, "type");
773 if (blkdev
->dev
== NULL
) {
774 blkdev
->dev
= xenstore_read_be_str(&blkdev
->xendev
, "dev");
776 if (blkdev
->devtype
== NULL
) {
777 blkdev
->devtype
= xenstore_read_be_str(&blkdev
->xendev
, "device-type");
779 directiosafe
= xenstore_read_be_str(&blkdev
->xendev
, "direct-io-safe");
780 blkdev
->directiosafe
= (directiosafe
&& atoi(directiosafe
));
782 /* do we have all we need? */
783 if (blkdev
->params
== NULL
||
784 blkdev
->mode
== NULL
||
785 blkdev
->type
== NULL
||
786 blkdev
->dev
== NULL
) {
791 if (strcmp(blkdev
->mode
, "w")) {
792 info
|= VDISK_READONLY
;
796 if (blkdev
->devtype
&& !strcmp(blkdev
->devtype
, "cdrom")) {
800 blkdev
->file_blk
= BLOCK_SIZE
;
803 * blk_connect supplies sector-size and sectors
805 xenstore_write_be_int(&blkdev
->xendev
, "feature-flush-cache", 1);
806 xenstore_write_be_int(&blkdev
->xendev
, "feature-persistent", 1);
807 xenstore_write_be_int(&blkdev
->xendev
, "info", info
);
809 blk_parse_discard(blkdev
);
811 g_free(directiosafe
);
815 g_free(blkdev
->params
);
816 blkdev
->params
= NULL
;
817 g_free(blkdev
->mode
);
819 g_free(blkdev
->type
);
823 g_free(blkdev
->devtype
);
824 blkdev
->devtype
= NULL
;
825 g_free(directiosafe
);
826 blkdev
->directiosafe
= false;
830 static int blk_connect(struct XenDevice
*xendev
)
832 struct XenBlkDev
*blkdev
= container_of(xendev
, struct XenBlkDev
, xendev
);
833 int pers
, index
, qflags
;
834 bool readonly
= true;
837 if (blkdev
->directiosafe
) {
838 qflags
= BDRV_O_NOCACHE
| BDRV_O_NATIVE_AIO
;
840 qflags
= BDRV_O_CACHE_WB
;
842 if (strcmp(blkdev
->mode
, "w") == 0) {
843 qflags
|= BDRV_O_RDWR
;
846 if (blkdev
->feature_discard
) {
847 qflags
|= BDRV_O_UNMAP
;
850 /* init qemu block driver */
851 index
= (blkdev
->xendev
.dev
- 202 * 256) / 16;
852 blkdev
->dinfo
= drive_get(IF_XEN
, 0, index
);
853 if (!blkdev
->dinfo
) {
854 Error
*local_err
= NULL
;
855 /* setup via xenbus -> create new block driver instance */
856 xen_be_printf(&blkdev
->xendev
, 2, "create new bdrv (xenbus setup)\n");
857 blkdev
->bs
= bdrv_new(blkdev
->dev
, &local_err
);
862 BlockDriver
*drv
= bdrv_find_whitelisted_format(blkdev
->fileproto
,
864 if (bdrv_open(&blkdev
->bs
, blkdev
->filename
, NULL
, NULL
, qflags
,
865 drv
, &local_err
) != 0)
867 xen_be_printf(&blkdev
->xendev
, 0, "error: %s\n",
868 error_get_pretty(local_err
));
869 error_free(local_err
);
870 bdrv_unref(blkdev
->bs
);
878 /* setup via qemu cmdline -> already setup for us */
879 xen_be_printf(&blkdev
->xendev
, 2, "get configured bdrv (cmdline setup)\n");
880 blkdev
->bs
= blkdev
->dinfo
->bdrv
;
881 if (bdrv_is_read_only(blkdev
->bs
) && !readonly
) {
882 xen_be_printf(&blkdev
->xendev
, 0, "Unexpected read-only drive");
886 /* blkdev->bs is not create by us, we get a reference
887 * so we can bdrv_unref() unconditionally */
888 bdrv_ref(blkdev
->bs
);
890 bdrv_attach_dev_nofail(blkdev
->bs
, blkdev
);
891 blkdev
->file_size
= bdrv_getlength(blkdev
->bs
);
892 if (blkdev
->file_size
< 0) {
893 xen_be_printf(&blkdev
->xendev
, 1, "bdrv_getlength: %d (%s) | drv %s\n",
894 (int)blkdev
->file_size
, strerror(-blkdev
->file_size
),
895 bdrv_get_format_name(blkdev
->bs
) ?: "-");
896 blkdev
->file_size
= 0;
899 xen_be_printf(xendev
, 1, "type \"%s\", fileproto \"%s\", filename \"%s\","
900 " size %" PRId64
" (%" PRId64
" MB)\n",
901 blkdev
->type
, blkdev
->fileproto
, blkdev
->filename
,
902 blkdev
->file_size
, blkdev
->file_size
>> 20);
904 /* Fill in number of sector size and number of sectors */
905 xenstore_write_be_int(&blkdev
->xendev
, "sector-size", blkdev
->file_blk
);
906 xenstore_write_be_int64(&blkdev
->xendev
, "sectors",
907 blkdev
->file_size
/ blkdev
->file_blk
);
909 if (xenstore_read_fe_int(&blkdev
->xendev
, "ring-ref", &blkdev
->ring_ref
) == -1) {
912 if (xenstore_read_fe_int(&blkdev
->xendev
, "event-channel",
913 &blkdev
->xendev
.remote_port
) == -1) {
916 if (xenstore_read_fe_int(&blkdev
->xendev
, "feature-persistent", &pers
)) {
917 blkdev
->feature_persistent
= FALSE
;
919 blkdev
->feature_persistent
= !!pers
;
922 blkdev
->protocol
= BLKIF_PROTOCOL_NATIVE
;
923 if (blkdev
->xendev
.protocol
) {
924 if (strcmp(blkdev
->xendev
.protocol
, XEN_IO_PROTO_ABI_X86_32
) == 0) {
925 blkdev
->protocol
= BLKIF_PROTOCOL_X86_32
;
927 if (strcmp(blkdev
->xendev
.protocol
, XEN_IO_PROTO_ABI_X86_64
) == 0) {
928 blkdev
->protocol
= BLKIF_PROTOCOL_X86_64
;
932 blkdev
->sring
= xc_gnttab_map_grant_ref(blkdev
->xendev
.gnttabdev
,
935 PROT_READ
| PROT_WRITE
);
936 if (!blkdev
->sring
) {
941 switch (blkdev
->protocol
) {
942 case BLKIF_PROTOCOL_NATIVE
:
944 blkif_sring_t
*sring_native
= blkdev
->sring
;
945 BACK_RING_INIT(&blkdev
->rings
.native
, sring_native
, XC_PAGE_SIZE
);
948 case BLKIF_PROTOCOL_X86_32
:
950 blkif_x86_32_sring_t
*sring_x86_32
= blkdev
->sring
;
952 BACK_RING_INIT(&blkdev
->rings
.x86_32_part
, sring_x86_32
, XC_PAGE_SIZE
);
955 case BLKIF_PROTOCOL_X86_64
:
957 blkif_x86_64_sring_t
*sring_x86_64
= blkdev
->sring
;
959 BACK_RING_INIT(&blkdev
->rings
.x86_64_part
, sring_x86_64
, XC_PAGE_SIZE
);
964 if (blkdev
->feature_persistent
) {
965 /* Init persistent grants */
966 blkdev
->max_grants
= max_requests
* BLKIF_MAX_SEGMENTS_PER_REQUEST
;
967 blkdev
->persistent_gnts
= g_tree_new_full((GCompareDataFunc
)int_cmp
,
969 (GDestroyNotify
)destroy_grant
);
970 blkdev
->persistent_gnt_count
= 0;
973 xen_be_bind_evtchn(&blkdev
->xendev
);
975 xen_be_printf(&blkdev
->xendev
, 1, "ok: proto %s, ring-ref %d, "
976 "remote port %d, local port %d\n",
977 blkdev
->xendev
.protocol
, blkdev
->ring_ref
,
978 blkdev
->xendev
.remote_port
, blkdev
->xendev
.local_port
);
982 static void blk_disconnect(struct XenDevice
*xendev
)
984 struct XenBlkDev
*blkdev
= container_of(xendev
, struct XenBlkDev
, xendev
);
987 bdrv_detach_dev(blkdev
->bs
, blkdev
);
988 bdrv_unref(blkdev
->bs
);
991 xen_be_unbind_evtchn(&blkdev
->xendev
);
994 xc_gnttab_munmap(blkdev
->xendev
.gnttabdev
, blkdev
->sring
, 1);
996 blkdev
->sring
= NULL
;
1000 static int blk_free(struct XenDevice
*xendev
)
1002 struct XenBlkDev
*blkdev
= container_of(xendev
, struct XenBlkDev
, xendev
);
1003 struct ioreq
*ioreq
;
1005 if (blkdev
->bs
|| blkdev
->sring
) {
1006 blk_disconnect(xendev
);
1009 /* Free persistent grants */
1010 if (blkdev
->feature_persistent
) {
1011 g_tree_destroy(blkdev
->persistent_gnts
);
1014 while (!QLIST_EMPTY(&blkdev
->freelist
)) {
1015 ioreq
= QLIST_FIRST(&blkdev
->freelist
);
1016 QLIST_REMOVE(ioreq
, list
);
1017 qemu_iovec_destroy(&ioreq
->v
);
1021 g_free(blkdev
->params
);
1022 g_free(blkdev
->mode
);
1023 g_free(blkdev
->type
);
1024 g_free(blkdev
->dev
);
1025 g_free(blkdev
->devtype
);
1026 qemu_bh_delete(blkdev
->bh
);
1030 static void blk_event(struct XenDevice
*xendev
)
1032 struct XenBlkDev
*blkdev
= container_of(xendev
, struct XenBlkDev
, xendev
);
1034 qemu_bh_schedule(blkdev
->bh
);
1037 struct XenDevOps xen_blkdev_ops
= {
1038 .size
= sizeof(struct XenBlkDev
),
1039 .flags
= DEVOPS_FLAG_NEED_GNTDEV
,
1042 .initialise
= blk_connect
,
1043 .disconnect
= blk_disconnect
,