2 * xen paravirt block device backend
4 * (c) Gerd Hoffmann <kraxel@redhat.com>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; under version 2 of the License.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, see <http://www.gnu.org/licenses/>.
18 * Contributions after 2012-01-13 are licensed under the terms of the
19 * GNU GPL, version 2 or (at your option) any later version.
32 #include <sys/ioctl.h>
33 #include <sys/types.h>
39 #include "xen_backend.h"
40 #include "xen_blkif.h"
41 #include "sysemu/blockdev.h"
43 /* ------------------------------------------------------------- */
45 static int batch_maps
= 0;
47 static int max_requests
= 32;
49 /* ------------------------------------------------------------- */
51 #define BLOCK_SIZE 512
52 #define IOCB_COUNT (BLKIF_MAX_SEGMENTS_PER_REQUEST + 2)
54 struct PersistentGrant
{
56 struct XenBlkDev
*blkdev
;
59 typedef struct PersistentGrant PersistentGrant
;
73 uint32_t domids
[BLKIF_MAX_SEGMENTS_PER_REQUEST
];
74 uint32_t refs
[BLKIF_MAX_SEGMENTS_PER_REQUEST
];
76 void *page
[BLKIF_MAX_SEGMENTS_PER_REQUEST
];
84 struct XenBlkDev
*blkdev
;
85 QLIST_ENTRY(ioreq
) list
;
90 struct XenDevice xendev
; /* must be first */
96 const char *fileproto
;
103 blkif_back_rings_t rings
;
108 QLIST_HEAD(inflight_head
, ioreq
) inflight
;
109 QLIST_HEAD(finished_head
, ioreq
) finished
;
110 QLIST_HEAD(freelist_head
, ioreq
) freelist
;
112 int requests_inflight
;
113 int requests_finished
;
115 /* Persistent grants extension */
116 gboolean feature_persistent
;
117 GTree
*persistent_gnts
;
118 unsigned int persistent_gnt_count
;
119 unsigned int max_grants
;
121 /* qemu block driver */
123 BlockDriverState
*bs
;
127 /* ------------------------------------------------------------- */
129 static void ioreq_reset(struct ioreq
*ioreq
)
131 memset(&ioreq
->req
, 0, sizeof(ioreq
->req
));
138 memset(ioreq
->domids
, 0, sizeof(ioreq
->domids
));
139 memset(ioreq
->refs
, 0, sizeof(ioreq
->refs
));
141 memset(ioreq
->page
, 0, sizeof(ioreq
->page
));
144 ioreq
->aio_inflight
= 0;
145 ioreq
->aio_errors
= 0;
147 ioreq
->blkdev
= NULL
;
148 memset(&ioreq
->list
, 0, sizeof(ioreq
->list
));
149 memset(&ioreq
->acct
, 0, sizeof(ioreq
->acct
));
151 qemu_iovec_reset(&ioreq
->v
);
154 static gint
int_cmp(gconstpointer a
, gconstpointer b
, gpointer user_data
)
156 uint ua
= GPOINTER_TO_UINT(a
);
157 uint ub
= GPOINTER_TO_UINT(b
);
158 return (ua
> ub
) - (ua
< ub
);
161 static void destroy_grant(gpointer pgnt
)
163 PersistentGrant
*grant
= pgnt
;
164 XenGnttab gnt
= grant
->blkdev
->xendev
.gnttabdev
;
166 if (xc_gnttab_munmap(gnt
, grant
->page
, 1) != 0) {
167 xen_be_printf(&grant
->blkdev
->xendev
, 0,
168 "xc_gnttab_munmap failed: %s\n",
171 grant
->blkdev
->persistent_gnt_count
--;
172 xen_be_printf(&grant
->blkdev
->xendev
, 3,
173 "unmapped grant %p\n", grant
->page
);
177 static struct ioreq
*ioreq_start(struct XenBlkDev
*blkdev
)
179 struct ioreq
*ioreq
= NULL
;
181 if (QLIST_EMPTY(&blkdev
->freelist
)) {
182 if (blkdev
->requests_total
>= max_requests
) {
185 /* allocate new struct */
186 ioreq
= g_malloc0(sizeof(*ioreq
));
187 ioreq
->blkdev
= blkdev
;
188 blkdev
->requests_total
++;
189 qemu_iovec_init(&ioreq
->v
, BLKIF_MAX_SEGMENTS_PER_REQUEST
);
191 /* get one from freelist */
192 ioreq
= QLIST_FIRST(&blkdev
->freelist
);
193 QLIST_REMOVE(ioreq
, list
);
195 QLIST_INSERT_HEAD(&blkdev
->inflight
, ioreq
, list
);
196 blkdev
->requests_inflight
++;
202 static void ioreq_finish(struct ioreq
*ioreq
)
204 struct XenBlkDev
*blkdev
= ioreq
->blkdev
;
206 QLIST_REMOVE(ioreq
, list
);
207 QLIST_INSERT_HEAD(&blkdev
->finished
, ioreq
, list
);
208 blkdev
->requests_inflight
--;
209 blkdev
->requests_finished
++;
212 static void ioreq_release(struct ioreq
*ioreq
, bool finish
)
214 struct XenBlkDev
*blkdev
= ioreq
->blkdev
;
216 QLIST_REMOVE(ioreq
, list
);
218 ioreq
->blkdev
= blkdev
;
219 QLIST_INSERT_HEAD(&blkdev
->freelist
, ioreq
, list
);
221 blkdev
->requests_finished
--;
223 blkdev
->requests_inflight
--;
228 * translate request into iovec + start offset
229 * do sanity checks along the way
231 static int ioreq_parse(struct ioreq
*ioreq
)
233 struct XenBlkDev
*blkdev
= ioreq
->blkdev
;
238 xen_be_printf(&blkdev
->xendev
, 3,
239 "op %d, nr %d, handle %d, id %" PRId64
", sector %" PRId64
"\n",
240 ioreq
->req
.operation
, ioreq
->req
.nr_segments
,
241 ioreq
->req
.handle
, ioreq
->req
.id
, ioreq
->req
.sector_number
);
242 switch (ioreq
->req
.operation
) {
244 ioreq
->prot
= PROT_WRITE
; /* to memory */
246 case BLKIF_OP_FLUSH_DISKCACHE
:
248 if (!ioreq
->req
.nr_segments
) {
253 ioreq
->prot
= PROT_READ
; /* from memory */
256 xen_be_printf(&blkdev
->xendev
, 0, "error: unknown operation (%d)\n",
257 ioreq
->req
.operation
);
261 if (ioreq
->req
.operation
!= BLKIF_OP_READ
&& blkdev
->mode
[0] != 'w') {
262 xen_be_printf(&blkdev
->xendev
, 0, "error: write req for ro device\n");
266 ioreq
->start
= ioreq
->req
.sector_number
* blkdev
->file_blk
;
267 for (i
= 0; i
< ioreq
->req
.nr_segments
; i
++) {
268 if (i
== BLKIF_MAX_SEGMENTS_PER_REQUEST
) {
269 xen_be_printf(&blkdev
->xendev
, 0, "error: nr_segments too big\n");
272 if (ioreq
->req
.seg
[i
].first_sect
> ioreq
->req
.seg
[i
].last_sect
) {
273 xen_be_printf(&blkdev
->xendev
, 0, "error: first > last sector\n");
276 if (ioreq
->req
.seg
[i
].last_sect
* BLOCK_SIZE
>= XC_PAGE_SIZE
) {
277 xen_be_printf(&blkdev
->xendev
, 0, "error: page crossing\n");
281 ioreq
->domids
[i
] = blkdev
->xendev
.dom
;
282 ioreq
->refs
[i
] = ioreq
->req
.seg
[i
].gref
;
284 mem
= ioreq
->req
.seg
[i
].first_sect
* blkdev
->file_blk
;
285 len
= (ioreq
->req
.seg
[i
].last_sect
- ioreq
->req
.seg
[i
].first_sect
+ 1) * blkdev
->file_blk
;
286 qemu_iovec_add(&ioreq
->v
, (void*)mem
, len
);
288 if (ioreq
->start
+ ioreq
->v
.size
> blkdev
->file_size
) {
289 xen_be_printf(&blkdev
->xendev
, 0, "error: access beyond end of file\n");
295 ioreq
->status
= BLKIF_RSP_ERROR
;
299 static void ioreq_unmap(struct ioreq
*ioreq
)
301 XenGnttab gnt
= ioreq
->blkdev
->xendev
.gnttabdev
;
304 if (ioreq
->num_unmap
== 0 || ioreq
->mapped
== 0) {
311 if (xc_gnttab_munmap(gnt
, ioreq
->pages
, ioreq
->num_unmap
) != 0) {
312 xen_be_printf(&ioreq
->blkdev
->xendev
, 0, "xc_gnttab_munmap failed: %s\n",
315 ioreq
->blkdev
->cnt_map
-= ioreq
->num_unmap
;
318 for (i
= 0; i
< ioreq
->num_unmap
; i
++) {
319 if (!ioreq
->page
[i
]) {
322 if (xc_gnttab_munmap(gnt
, ioreq
->page
[i
], 1) != 0) {
323 xen_be_printf(&ioreq
->blkdev
->xendev
, 0, "xc_gnttab_munmap failed: %s\n",
326 ioreq
->blkdev
->cnt_map
--;
327 ioreq
->page
[i
] = NULL
;
333 static int ioreq_map(struct ioreq
*ioreq
)
335 XenGnttab gnt
= ioreq
->blkdev
->xendev
.gnttabdev
;
336 uint32_t domids
[BLKIF_MAX_SEGMENTS_PER_REQUEST
];
337 uint32_t refs
[BLKIF_MAX_SEGMENTS_PER_REQUEST
];
338 void *page
[BLKIF_MAX_SEGMENTS_PER_REQUEST
];
339 int i
, j
, new_maps
= 0;
340 PersistentGrant
*grant
;
341 /* domids and refs variables will contain the information necessary
342 * to map the grants that are needed to fulfill this request.
344 * After mapping the needed grants, the page array will contain the
345 * memory address of each granted page in the order specified in ioreq
346 * (disregarding if it's a persistent grant or not).
349 if (ioreq
->v
.niov
== 0 || ioreq
->mapped
== 1) {
352 if (ioreq
->blkdev
->feature_persistent
) {
353 for (i
= 0; i
< ioreq
->v
.niov
; i
++) {
354 grant
= g_tree_lookup(ioreq
->blkdev
->persistent_gnts
,
355 GUINT_TO_POINTER(ioreq
->refs
[i
]));
358 page
[i
] = grant
->page
;
359 xen_be_printf(&ioreq
->blkdev
->xendev
, 3,
360 "using persistent-grant %" PRIu32
"\n",
363 /* Add the grant to the list of grants that
366 domids
[new_maps
] = ioreq
->domids
[i
];
367 refs
[new_maps
] = ioreq
->refs
[i
];
372 /* Set the protection to RW, since grants may be reused later
373 * with a different protection than the one needed for this request
375 ioreq
->prot
= PROT_WRITE
| PROT_READ
;
377 /* All grants in the request should be mapped */
378 memcpy(refs
, ioreq
->refs
, sizeof(refs
));
379 memcpy(domids
, ioreq
->domids
, sizeof(domids
));
380 memset(page
, 0, sizeof(page
));
381 new_maps
= ioreq
->v
.niov
;
384 if (batch_maps
&& new_maps
) {
385 ioreq
->pages
= xc_gnttab_map_grant_refs
386 (gnt
, new_maps
, domids
, refs
, ioreq
->prot
);
387 if (ioreq
->pages
== NULL
) {
388 xen_be_printf(&ioreq
->blkdev
->xendev
, 0,
389 "can't map %d grant refs (%s, %d maps)\n",
390 new_maps
, strerror(errno
), ioreq
->blkdev
->cnt_map
);
393 for (i
= 0, j
= 0; i
< ioreq
->v
.niov
; i
++) {
394 if (page
[i
] == NULL
) {
395 page
[i
] = ioreq
->pages
+ (j
++) * XC_PAGE_SIZE
;
398 ioreq
->blkdev
->cnt_map
+= new_maps
;
399 } else if (new_maps
) {
400 for (i
= 0; i
< new_maps
; i
++) {
401 ioreq
->page
[i
] = xc_gnttab_map_grant_ref
402 (gnt
, domids
[i
], refs
[i
], ioreq
->prot
);
403 if (ioreq
->page
[i
] == NULL
) {
404 xen_be_printf(&ioreq
->blkdev
->xendev
, 0,
405 "can't map grant ref %d (%s, %d maps)\n",
406 refs
[i
], strerror(errno
), ioreq
->blkdev
->cnt_map
);
410 ioreq
->blkdev
->cnt_map
++;
412 for (i
= 0, j
= 0; i
< ioreq
->v
.niov
; i
++) {
413 if (page
[i
] == NULL
) {
414 page
[i
] = ioreq
->page
[j
++];
418 if (ioreq
->blkdev
->feature_persistent
) {
419 while ((ioreq
->blkdev
->persistent_gnt_count
< ioreq
->blkdev
->max_grants
)
421 /* Go through the list of newly mapped grants and add as many
422 * as possible to the list of persistently mapped grants.
424 * Since we start at the end of ioreq->page(s), we only need
425 * to decrease new_maps to prevent this granted pages from
426 * being unmapped in ioreq_unmap.
428 grant
= g_malloc0(sizeof(*grant
));
431 grant
->page
= ioreq
->pages
+ (new_maps
) * XC_PAGE_SIZE
;
433 grant
->page
= ioreq
->page
[new_maps
];
435 grant
->blkdev
= ioreq
->blkdev
;
436 xen_be_printf(&ioreq
->blkdev
->xendev
, 3,
437 "adding grant %" PRIu32
" page: %p\n",
438 refs
[new_maps
], grant
->page
);
439 g_tree_insert(ioreq
->blkdev
->persistent_gnts
,
440 GUINT_TO_POINTER(refs
[new_maps
]),
442 ioreq
->blkdev
->persistent_gnt_count
++;
445 for (i
= 0; i
< ioreq
->v
.niov
; i
++) {
446 ioreq
->v
.iov
[i
].iov_base
+= (uintptr_t)page
[i
];
449 ioreq
->num_unmap
= new_maps
;
453 static int ioreq_runio_qemu_aio(struct ioreq
*ioreq
);
455 static void qemu_aio_complete(void *opaque
, int ret
)
457 struct ioreq
*ioreq
= opaque
;
460 xen_be_printf(&ioreq
->blkdev
->xendev
, 0, "%s I/O error\n",
461 ioreq
->req
.operation
== BLKIF_OP_READ
? "read" : "write");
465 ioreq
->aio_inflight
--;
466 if (ioreq
->presync
) {
468 ioreq_runio_qemu_aio(ioreq
);
471 if (ioreq
->aio_inflight
> 0) {
474 if (ioreq
->postsync
) {
476 ioreq
->aio_inflight
++;
477 bdrv_aio_flush(ioreq
->blkdev
->bs
, qemu_aio_complete
, ioreq
);
481 ioreq
->status
= ioreq
->aio_errors
? BLKIF_RSP_ERROR
: BLKIF_RSP_OKAY
;
484 bdrv_acct_done(ioreq
->blkdev
->bs
, &ioreq
->acct
);
485 qemu_bh_schedule(ioreq
->blkdev
->bh
);
488 static int ioreq_runio_qemu_aio(struct ioreq
*ioreq
)
490 struct XenBlkDev
*blkdev
= ioreq
->blkdev
;
492 if (ioreq
->req
.nr_segments
&& ioreq_map(ioreq
) == -1) {
496 ioreq
->aio_inflight
++;
497 if (ioreq
->presync
) {
498 bdrv_aio_flush(ioreq
->blkdev
->bs
, qemu_aio_complete
, ioreq
);
502 switch (ioreq
->req
.operation
) {
504 bdrv_acct_start(blkdev
->bs
, &ioreq
->acct
, ioreq
->v
.size
, BDRV_ACCT_READ
);
505 ioreq
->aio_inflight
++;
506 bdrv_aio_readv(blkdev
->bs
, ioreq
->start
/ BLOCK_SIZE
,
507 &ioreq
->v
, ioreq
->v
.size
/ BLOCK_SIZE
,
508 qemu_aio_complete
, ioreq
);
511 case BLKIF_OP_FLUSH_DISKCACHE
:
512 if (!ioreq
->req
.nr_segments
) {
516 bdrv_acct_start(blkdev
->bs
, &ioreq
->acct
, ioreq
->v
.size
, BDRV_ACCT_WRITE
);
517 ioreq
->aio_inflight
++;
518 bdrv_aio_writev(blkdev
->bs
, ioreq
->start
/ BLOCK_SIZE
,
519 &ioreq
->v
, ioreq
->v
.size
/ BLOCK_SIZE
,
520 qemu_aio_complete
, ioreq
);
523 /* unknown operation (shouldn't happen -- parse catches this) */
527 qemu_aio_complete(ioreq
, 0);
535 ioreq
->status
= BLKIF_RSP_ERROR
;
539 static int blk_send_response_one(struct ioreq
*ioreq
)
541 struct XenBlkDev
*blkdev
= ioreq
->blkdev
;
543 int have_requests
= 0;
544 blkif_response_t resp
;
547 resp
.id
= ioreq
->req
.id
;
548 resp
.operation
= ioreq
->req
.operation
;
549 resp
.status
= ioreq
->status
;
551 /* Place on the response ring for the relevant domain. */
552 switch (blkdev
->protocol
) {
553 case BLKIF_PROTOCOL_NATIVE
:
554 dst
= RING_GET_RESPONSE(&blkdev
->rings
.native
, blkdev
->rings
.native
.rsp_prod_pvt
);
556 case BLKIF_PROTOCOL_X86_32
:
557 dst
= RING_GET_RESPONSE(&blkdev
->rings
.x86_32_part
,
558 blkdev
->rings
.x86_32_part
.rsp_prod_pvt
);
560 case BLKIF_PROTOCOL_X86_64
:
561 dst
= RING_GET_RESPONSE(&blkdev
->rings
.x86_64_part
,
562 blkdev
->rings
.x86_64_part
.rsp_prod_pvt
);
567 memcpy(dst
, &resp
, sizeof(resp
));
568 blkdev
->rings
.common
.rsp_prod_pvt
++;
570 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blkdev
->rings
.common
, send_notify
);
571 if (blkdev
->rings
.common
.rsp_prod_pvt
== blkdev
->rings
.common
.req_cons
) {
573 * Tail check for pending requests. Allows frontend to avoid
574 * notifications if requests are already in flight (lower
575 * overheads and promotes batching).
577 RING_FINAL_CHECK_FOR_REQUESTS(&blkdev
->rings
.common
, have_requests
);
578 } else if (RING_HAS_UNCONSUMED_REQUESTS(&blkdev
->rings
.common
)) {
588 /* walk finished list, send outstanding responses, free requests */
589 static void blk_send_response_all(struct XenBlkDev
*blkdev
)
594 while (!QLIST_EMPTY(&blkdev
->finished
)) {
595 ioreq
= QLIST_FIRST(&blkdev
->finished
);
596 send_notify
+= blk_send_response_one(ioreq
);
597 ioreq_release(ioreq
, true);
600 xen_be_send_notify(&blkdev
->xendev
);
604 static int blk_get_request(struct XenBlkDev
*blkdev
, struct ioreq
*ioreq
, RING_IDX rc
)
606 switch (blkdev
->protocol
) {
607 case BLKIF_PROTOCOL_NATIVE
:
608 memcpy(&ioreq
->req
, RING_GET_REQUEST(&blkdev
->rings
.native
, rc
),
611 case BLKIF_PROTOCOL_X86_32
:
612 blkif_get_x86_32_req(&ioreq
->req
,
613 RING_GET_REQUEST(&blkdev
->rings
.x86_32_part
, rc
));
615 case BLKIF_PROTOCOL_X86_64
:
616 blkif_get_x86_64_req(&ioreq
->req
,
617 RING_GET_REQUEST(&blkdev
->rings
.x86_64_part
, rc
));
623 static void blk_handle_requests(struct XenBlkDev
*blkdev
)
628 blkdev
->more_work
= 0;
630 rc
= blkdev
->rings
.common
.req_cons
;
631 rp
= blkdev
->rings
.common
.sring
->req_prod
;
632 xen_rmb(); /* Ensure we see queued requests up to 'rp'. */
634 blk_send_response_all(blkdev
);
636 /* pull request from ring */
637 if (RING_REQUEST_CONS_OVERFLOW(&blkdev
->rings
.common
, rc
)) {
640 ioreq
= ioreq_start(blkdev
);
645 blk_get_request(blkdev
, ioreq
, rc
);
646 blkdev
->rings
.common
.req_cons
= ++rc
;
649 if (ioreq_parse(ioreq
) != 0) {
650 if (blk_send_response_one(ioreq
)) {
651 xen_be_send_notify(&blkdev
->xendev
);
653 ioreq_release(ioreq
, false);
657 ioreq_runio_qemu_aio(ioreq
);
660 if (blkdev
->more_work
&& blkdev
->requests_inflight
< max_requests
) {
661 qemu_bh_schedule(blkdev
->bh
);
665 /* ------------------------------------------------------------- */
667 static void blk_bh(void *opaque
)
669 struct XenBlkDev
*blkdev
= opaque
;
670 blk_handle_requests(blkdev
);
674 * We need to account for the grant allocations requiring contiguous
675 * chunks; the worst case number would be
676 * max_req * max_seg + (max_req - 1) * (max_seg - 1) + 1,
677 * but in order to keep things simple just use
678 * 2 * max_req * max_seg.
680 #define MAX_GRANTS(max_req, max_seg) (2 * (max_req) * (max_seg))
682 static void blk_alloc(struct XenDevice
*xendev
)
684 struct XenBlkDev
*blkdev
= container_of(xendev
, struct XenBlkDev
, xendev
);
686 QLIST_INIT(&blkdev
->inflight
);
687 QLIST_INIT(&blkdev
->finished
);
688 QLIST_INIT(&blkdev
->freelist
);
689 blkdev
->bh
= qemu_bh_new(blk_bh
, blkdev
);
690 if (xen_mode
!= XEN_EMULATE
) {
693 if (xc_gnttab_set_max_grants(xendev
->gnttabdev
,
694 MAX_GRANTS(max_requests
, BLKIF_MAX_SEGMENTS_PER_REQUEST
)) < 0) {
695 xen_be_printf(xendev
, 0, "xc_gnttab_set_max_grants failed: %s\n",
700 static int blk_init(struct XenDevice
*xendev
)
702 struct XenBlkDev
*blkdev
= container_of(xendev
, struct XenBlkDev
, xendev
);
703 int index
, qflags
, info
= 0;
705 /* read xenstore entries */
706 if (blkdev
->params
== NULL
) {
708 blkdev
->params
= xenstore_read_be_str(&blkdev
->xendev
, "params");
709 if (blkdev
->params
!= NULL
) {
710 h
= strchr(blkdev
->params
, ':');
713 blkdev
->fileproto
= blkdev
->params
;
714 blkdev
->filename
= h
+1;
717 blkdev
->fileproto
= "<unset>";
718 blkdev
->filename
= blkdev
->params
;
721 if (!strcmp("aio", blkdev
->fileproto
)) {
722 blkdev
->fileproto
= "raw";
724 if (blkdev
->mode
== NULL
) {
725 blkdev
->mode
= xenstore_read_be_str(&blkdev
->xendev
, "mode");
727 if (blkdev
->type
== NULL
) {
728 blkdev
->type
= xenstore_read_be_str(&blkdev
->xendev
, "type");
730 if (blkdev
->dev
== NULL
) {
731 blkdev
->dev
= xenstore_read_be_str(&blkdev
->xendev
, "dev");
733 if (blkdev
->devtype
== NULL
) {
734 blkdev
->devtype
= xenstore_read_be_str(&blkdev
->xendev
, "device-type");
737 /* do we have all we need? */
738 if (blkdev
->params
== NULL
||
739 blkdev
->mode
== NULL
||
740 blkdev
->type
== NULL
||
741 blkdev
->dev
== NULL
) {
746 qflags
= BDRV_O_NOCACHE
| BDRV_O_CACHE_WB
| BDRV_O_NATIVE_AIO
;
747 if (strcmp(blkdev
->mode
, "w") == 0) {
748 qflags
|= BDRV_O_RDWR
;
750 info
|= VDISK_READONLY
;
754 if (blkdev
->devtype
&& !strcmp(blkdev
->devtype
, "cdrom")) {
758 /* init qemu block driver */
759 index
= (blkdev
->xendev
.dev
- 202 * 256) / 16;
760 blkdev
->dinfo
= drive_get(IF_XEN
, 0, index
);
761 if (!blkdev
->dinfo
) {
762 /* setup via xenbus -> create new block driver instance */
763 xen_be_printf(&blkdev
->xendev
, 2, "create new bdrv (xenbus setup)\n");
764 blkdev
->bs
= bdrv_new(blkdev
->dev
);
766 if (bdrv_open(blkdev
->bs
, blkdev
->filename
, qflags
,
767 bdrv_find_whitelisted_format(blkdev
->fileproto
)) != 0) {
768 bdrv_delete(blkdev
->bs
);
776 /* setup via qemu cmdline -> already setup for us */
777 xen_be_printf(&blkdev
->xendev
, 2, "get configured bdrv (cmdline setup)\n");
778 blkdev
->bs
= blkdev
->dinfo
->bdrv
;
780 bdrv_attach_dev_nofail(blkdev
->bs
, blkdev
);
781 blkdev
->file_blk
= BLOCK_SIZE
;
782 blkdev
->file_size
= bdrv_getlength(blkdev
->bs
);
783 if (blkdev
->file_size
< 0) {
784 xen_be_printf(&blkdev
->xendev
, 1, "bdrv_getlength: %d (%s) | drv %s\n",
785 (int)blkdev
->file_size
, strerror(-blkdev
->file_size
),
786 bdrv_get_format_name(blkdev
->bs
) ?: "-");
787 blkdev
->file_size
= 0;
790 xen_be_printf(xendev
, 1, "type \"%s\", fileproto \"%s\", filename \"%s\","
791 " size %" PRId64
" (%" PRId64
" MB)\n",
792 blkdev
->type
, blkdev
->fileproto
, blkdev
->filename
,
793 blkdev
->file_size
, blkdev
->file_size
>> 20);
796 xenstore_write_be_int(&blkdev
->xendev
, "feature-flush-cache", 1);
797 xenstore_write_be_int(&blkdev
->xendev
, "feature-persistent", 1);
798 xenstore_write_be_int(&blkdev
->xendev
, "info", info
);
799 xenstore_write_be_int(&blkdev
->xendev
, "sector-size", blkdev
->file_blk
);
800 xenstore_write_be_int(&blkdev
->xendev
, "sectors",
801 blkdev
->file_size
/ blkdev
->file_blk
);
805 g_free(blkdev
->params
);
806 blkdev
->params
= NULL
;
807 g_free(blkdev
->mode
);
809 g_free(blkdev
->type
);
813 g_free(blkdev
->devtype
);
814 blkdev
->devtype
= NULL
;
818 static int blk_connect(struct XenDevice
*xendev
)
820 struct XenBlkDev
*blkdev
= container_of(xendev
, struct XenBlkDev
, xendev
);
823 if (xenstore_read_fe_int(&blkdev
->xendev
, "ring-ref", &blkdev
->ring_ref
) == -1) {
826 if (xenstore_read_fe_int(&blkdev
->xendev
, "event-channel",
827 &blkdev
->xendev
.remote_port
) == -1) {
830 if (xenstore_read_fe_int(&blkdev
->xendev
, "feature-persistent", &pers
)) {
831 blkdev
->feature_persistent
= FALSE
;
833 blkdev
->feature_persistent
= !!pers
;
836 blkdev
->protocol
= BLKIF_PROTOCOL_NATIVE
;
837 if (blkdev
->xendev
.protocol
) {
838 if (strcmp(blkdev
->xendev
.protocol
, XEN_IO_PROTO_ABI_X86_32
) == 0) {
839 blkdev
->protocol
= BLKIF_PROTOCOL_X86_32
;
841 if (strcmp(blkdev
->xendev
.protocol
, XEN_IO_PROTO_ABI_X86_64
) == 0) {
842 blkdev
->protocol
= BLKIF_PROTOCOL_X86_64
;
846 blkdev
->sring
= xc_gnttab_map_grant_ref(blkdev
->xendev
.gnttabdev
,
849 PROT_READ
| PROT_WRITE
);
850 if (!blkdev
->sring
) {
855 switch (blkdev
->protocol
) {
856 case BLKIF_PROTOCOL_NATIVE
:
858 blkif_sring_t
*sring_native
= blkdev
->sring
;
859 BACK_RING_INIT(&blkdev
->rings
.native
, sring_native
, XC_PAGE_SIZE
);
862 case BLKIF_PROTOCOL_X86_32
:
864 blkif_x86_32_sring_t
*sring_x86_32
= blkdev
->sring
;
866 BACK_RING_INIT(&blkdev
->rings
.x86_32_part
, sring_x86_32
, XC_PAGE_SIZE
);
869 case BLKIF_PROTOCOL_X86_64
:
871 blkif_x86_64_sring_t
*sring_x86_64
= blkdev
->sring
;
873 BACK_RING_INIT(&blkdev
->rings
.x86_64_part
, sring_x86_64
, XC_PAGE_SIZE
);
878 if (blkdev
->feature_persistent
) {
879 /* Init persistent grants */
880 blkdev
->max_grants
= max_requests
* BLKIF_MAX_SEGMENTS_PER_REQUEST
;
881 blkdev
->persistent_gnts
= g_tree_new_full((GCompareDataFunc
)int_cmp
,
883 (GDestroyNotify
)destroy_grant
);
884 blkdev
->persistent_gnt_count
= 0;
887 xen_be_bind_evtchn(&blkdev
->xendev
);
889 xen_be_printf(&blkdev
->xendev
, 1, "ok: proto %s, ring-ref %d, "
890 "remote port %d, local port %d\n",
891 blkdev
->xendev
.protocol
, blkdev
->ring_ref
,
892 blkdev
->xendev
.remote_port
, blkdev
->xendev
.local_port
);
896 static void blk_disconnect(struct XenDevice
*xendev
)
898 struct XenBlkDev
*blkdev
= container_of(xendev
, struct XenBlkDev
, xendev
);
901 if (!blkdev
->dinfo
) {
902 /* close/delete only if we created it ourself */
903 bdrv_close(blkdev
->bs
);
904 bdrv_detach_dev(blkdev
->bs
, blkdev
);
905 bdrv_delete(blkdev
->bs
);
909 xen_be_unbind_evtchn(&blkdev
->xendev
);
912 xc_gnttab_munmap(blkdev
->xendev
.gnttabdev
, blkdev
->sring
, 1);
914 blkdev
->sring
= NULL
;
918 static int blk_free(struct XenDevice
*xendev
)
920 struct XenBlkDev
*blkdev
= container_of(xendev
, struct XenBlkDev
, xendev
);
923 if (blkdev
->bs
|| blkdev
->sring
) {
924 blk_disconnect(xendev
);
927 /* Free persistent grants */
928 if (blkdev
->feature_persistent
) {
929 g_tree_destroy(blkdev
->persistent_gnts
);
932 while (!QLIST_EMPTY(&blkdev
->freelist
)) {
933 ioreq
= QLIST_FIRST(&blkdev
->freelist
);
934 QLIST_REMOVE(ioreq
, list
);
935 qemu_iovec_destroy(&ioreq
->v
);
939 g_free(blkdev
->params
);
940 g_free(blkdev
->mode
);
941 g_free(blkdev
->type
);
943 g_free(blkdev
->devtype
);
944 qemu_bh_delete(blkdev
->bh
);
948 static void blk_event(struct XenDevice
*xendev
)
950 struct XenBlkDev
*blkdev
= container_of(xendev
, struct XenBlkDev
, xendev
);
952 qemu_bh_schedule(blkdev
->bh
);
955 struct XenDevOps xen_blkdev_ops
= {
956 .size
= sizeof(struct XenBlkDev
),
957 .flags
= DEVOPS_FLAG_NEED_GNTDEV
,
960 .initialise
= blk_connect
,
961 .disconnect
= blk_disconnect
,