2 * xen paravirt block device backend
4 * (c) Gerd Hoffmann <kraxel@redhat.com>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; under version 2 of the License.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, see <http://www.gnu.org/licenses/>.
18 * Contributions after 2012-01-13 are licensed under the terms of the
19 * GNU GPL, version 2 or (at your option) any later version.
22 #include "qemu/osdep.h"
23 #include <sys/ioctl.h>
27 #include "hw/xen/xen_backend.h"
28 #include "xen_blkif.h"
29 #include "sysemu/blockdev.h"
30 #include "sysemu/block-backend.h"
31 #include "qapi/error.h"
32 #include "qapi/qmp/qdict.h"
33 #include "qapi/qmp/qstring.h"
35 /* ------------------------------------------------------------- */
37 static int batch_maps
= 0;
39 /* ------------------------------------------------------------- */
41 #define BLOCK_SIZE 512
42 #define IOCB_COUNT (BLKIF_MAX_SEGMENTS_PER_REQUEST + 2)
44 struct PersistentGrant
{
46 struct XenBlkDev
*blkdev
;
49 typedef struct PersistentGrant PersistentGrant
;
51 struct PersistentRegion
{
56 typedef struct PersistentRegion PersistentRegion
;
69 uint32_t domids
[BLKIF_MAX_SEGMENTS_PER_REQUEST
];
70 uint32_t refs
[BLKIF_MAX_SEGMENTS_PER_REQUEST
];
72 void *page
[BLKIF_MAX_SEGMENTS_PER_REQUEST
];
80 struct XenBlkDev
*blkdev
;
81 QLIST_ENTRY(ioreq
) list
;
85 #define MAX_RING_PAGE_ORDER 4
88 struct XenDevice xendev
; /* must be first */
95 const char *fileproto
;
97 unsigned int ring_ref
[1 << MAX_RING_PAGE_ORDER
];
98 unsigned int nr_ring_ref
;
103 blkif_back_rings_t rings
;
108 QLIST_HEAD(inflight_head
, ioreq
) inflight
;
109 QLIST_HEAD(finished_head
, ioreq
) finished
;
110 QLIST_HEAD(freelist_head
, ioreq
) freelist
;
112 int requests_inflight
;
113 int requests_finished
;
114 unsigned int max_requests
;
116 /* Persistent grants extension */
117 gboolean feature_discard
;
118 gboolean feature_persistent
;
119 GTree
*persistent_gnts
;
120 GSList
*persistent_regions
;
121 unsigned int persistent_gnt_count
;
122 unsigned int max_grants
;
125 gboolean feature_grant_copy
;
127 /* qemu block driver */
133 /* ------------------------------------------------------------- */
135 static void ioreq_reset(struct ioreq
*ioreq
)
137 memset(&ioreq
->req
, 0, sizeof(ioreq
->req
));
143 memset(ioreq
->domids
, 0, sizeof(ioreq
->domids
));
144 memset(ioreq
->refs
, 0, sizeof(ioreq
->refs
));
146 memset(ioreq
->page
, 0, sizeof(ioreq
->page
));
149 ioreq
->aio_inflight
= 0;
150 ioreq
->aio_errors
= 0;
152 ioreq
->blkdev
= NULL
;
153 memset(&ioreq
->list
, 0, sizeof(ioreq
->list
));
154 memset(&ioreq
->acct
, 0, sizeof(ioreq
->acct
));
156 qemu_iovec_reset(&ioreq
->v
);
159 static gint
int_cmp(gconstpointer a
, gconstpointer b
, gpointer user_data
)
161 uint ua
= GPOINTER_TO_UINT(a
);
162 uint ub
= GPOINTER_TO_UINT(b
);
163 return (ua
> ub
) - (ua
< ub
);
166 static void destroy_grant(gpointer pgnt
)
168 PersistentGrant
*grant
= pgnt
;
169 xengnttab_handle
*gnt
= grant
->blkdev
->xendev
.gnttabdev
;
171 if (xengnttab_unmap(gnt
, grant
->page
, 1) != 0) {
172 xen_pv_printf(&grant
->blkdev
->xendev
, 0,
173 "xengnttab_unmap failed: %s\n",
176 grant
->blkdev
->persistent_gnt_count
--;
177 xen_pv_printf(&grant
->blkdev
->xendev
, 3,
178 "unmapped grant %p\n", grant
->page
);
182 static void remove_persistent_region(gpointer data
, gpointer dev
)
184 PersistentRegion
*region
= data
;
185 struct XenBlkDev
*blkdev
= dev
;
186 xengnttab_handle
*gnt
= blkdev
->xendev
.gnttabdev
;
188 if (xengnttab_unmap(gnt
, region
->addr
, region
->num
) != 0) {
189 xen_pv_printf(&blkdev
->xendev
, 0,
190 "xengnttab_unmap region %p failed: %s\n",
191 region
->addr
, strerror(errno
));
193 xen_pv_printf(&blkdev
->xendev
, 3,
194 "unmapped grant region %p with %d pages\n",
195 region
->addr
, region
->num
);
199 static struct ioreq
*ioreq_start(struct XenBlkDev
*blkdev
)
201 struct ioreq
*ioreq
= NULL
;
203 if (QLIST_EMPTY(&blkdev
->freelist
)) {
204 if (blkdev
->requests_total
>= blkdev
->max_requests
) {
207 /* allocate new struct */
208 ioreq
= g_malloc0(sizeof(*ioreq
));
209 ioreq
->blkdev
= blkdev
;
210 blkdev
->requests_total
++;
211 qemu_iovec_init(&ioreq
->v
, BLKIF_MAX_SEGMENTS_PER_REQUEST
);
213 /* get one from freelist */
214 ioreq
= QLIST_FIRST(&blkdev
->freelist
);
215 QLIST_REMOVE(ioreq
, list
);
217 QLIST_INSERT_HEAD(&blkdev
->inflight
, ioreq
, list
);
218 blkdev
->requests_inflight
++;
224 static void ioreq_finish(struct ioreq
*ioreq
)
226 struct XenBlkDev
*blkdev
= ioreq
->blkdev
;
228 QLIST_REMOVE(ioreq
, list
);
229 QLIST_INSERT_HEAD(&blkdev
->finished
, ioreq
, list
);
230 blkdev
->requests_inflight
--;
231 blkdev
->requests_finished
++;
234 static void ioreq_release(struct ioreq
*ioreq
, bool finish
)
236 struct XenBlkDev
*blkdev
= ioreq
->blkdev
;
238 QLIST_REMOVE(ioreq
, list
);
240 ioreq
->blkdev
= blkdev
;
241 QLIST_INSERT_HEAD(&blkdev
->freelist
, ioreq
, list
);
243 blkdev
->requests_finished
--;
245 blkdev
->requests_inflight
--;
250 * translate request into iovec + start offset
251 * do sanity checks along the way
253 static int ioreq_parse(struct ioreq
*ioreq
)
255 struct XenBlkDev
*blkdev
= ioreq
->blkdev
;
260 xen_pv_printf(&blkdev
->xendev
, 3,
261 "op %d, nr %d, handle %d, id %" PRId64
", sector %" PRId64
"\n",
262 ioreq
->req
.operation
, ioreq
->req
.nr_segments
,
263 ioreq
->req
.handle
, ioreq
->req
.id
, ioreq
->req
.sector_number
);
264 switch (ioreq
->req
.operation
) {
266 ioreq
->prot
= PROT_WRITE
; /* to memory */
268 case BLKIF_OP_FLUSH_DISKCACHE
:
270 if (!ioreq
->req
.nr_segments
) {
275 ioreq
->prot
= PROT_READ
; /* from memory */
277 case BLKIF_OP_DISCARD
:
280 xen_pv_printf(&blkdev
->xendev
, 0, "error: unknown operation (%d)\n",
281 ioreq
->req
.operation
);
285 if (ioreq
->req
.operation
!= BLKIF_OP_READ
&& blkdev
->mode
[0] != 'w') {
286 xen_pv_printf(&blkdev
->xendev
, 0, "error: write req for ro device\n");
290 ioreq
->start
= ioreq
->req
.sector_number
* blkdev
->file_blk
;
291 for (i
= 0; i
< ioreq
->req
.nr_segments
; i
++) {
292 if (i
== BLKIF_MAX_SEGMENTS_PER_REQUEST
) {
293 xen_pv_printf(&blkdev
->xendev
, 0, "error: nr_segments too big\n");
296 if (ioreq
->req
.seg
[i
].first_sect
> ioreq
->req
.seg
[i
].last_sect
) {
297 xen_pv_printf(&blkdev
->xendev
, 0, "error: first > last sector\n");
300 if (ioreq
->req
.seg
[i
].last_sect
* BLOCK_SIZE
>= XC_PAGE_SIZE
) {
301 xen_pv_printf(&blkdev
->xendev
, 0, "error: page crossing\n");
305 ioreq
->domids
[i
] = blkdev
->xendev
.dom
;
306 ioreq
->refs
[i
] = ioreq
->req
.seg
[i
].gref
;
308 mem
= ioreq
->req
.seg
[i
].first_sect
* blkdev
->file_blk
;
309 len
= (ioreq
->req
.seg
[i
].last_sect
- ioreq
->req
.seg
[i
].first_sect
+ 1) * blkdev
->file_blk
;
310 qemu_iovec_add(&ioreq
->v
, (void*)mem
, len
);
312 if (ioreq
->start
+ ioreq
->v
.size
> blkdev
->file_size
) {
313 xen_pv_printf(&blkdev
->xendev
, 0, "error: access beyond end of file\n");
319 ioreq
->status
= BLKIF_RSP_ERROR
;
323 static void ioreq_unmap(struct ioreq
*ioreq
)
325 xengnttab_handle
*gnt
= ioreq
->blkdev
->xendev
.gnttabdev
;
328 if (ioreq
->num_unmap
== 0 || ioreq
->mapped
== 0) {
335 if (xengnttab_unmap(gnt
, ioreq
->pages
, ioreq
->num_unmap
) != 0) {
336 xen_pv_printf(&ioreq
->blkdev
->xendev
, 0,
337 "xengnttab_unmap failed: %s\n",
340 ioreq
->blkdev
->cnt_map
-= ioreq
->num_unmap
;
343 for (i
= 0; i
< ioreq
->num_unmap
; i
++) {
344 if (!ioreq
->page
[i
]) {
347 if (xengnttab_unmap(gnt
, ioreq
->page
[i
], 1) != 0) {
348 xen_pv_printf(&ioreq
->blkdev
->xendev
, 0,
349 "xengnttab_unmap failed: %s\n",
352 ioreq
->blkdev
->cnt_map
--;
353 ioreq
->page
[i
] = NULL
;
359 static int ioreq_map(struct ioreq
*ioreq
)
361 xengnttab_handle
*gnt
= ioreq
->blkdev
->xendev
.gnttabdev
;
362 uint32_t domids
[BLKIF_MAX_SEGMENTS_PER_REQUEST
];
363 uint32_t refs
[BLKIF_MAX_SEGMENTS_PER_REQUEST
];
364 void *page
[BLKIF_MAX_SEGMENTS_PER_REQUEST
];
365 int i
, j
, new_maps
= 0;
366 PersistentGrant
*grant
;
367 PersistentRegion
*region
;
368 /* domids and refs variables will contain the information necessary
369 * to map the grants that are needed to fulfill this request.
371 * After mapping the needed grants, the page array will contain the
372 * memory address of each granted page in the order specified in ioreq
373 * (disregarding if it's a persistent grant or not).
376 if (ioreq
->v
.niov
== 0 || ioreq
->mapped
== 1) {
379 if (ioreq
->blkdev
->feature_persistent
) {
380 for (i
= 0; i
< ioreq
->v
.niov
; i
++) {
381 grant
= g_tree_lookup(ioreq
->blkdev
->persistent_gnts
,
382 GUINT_TO_POINTER(ioreq
->refs
[i
]));
385 page
[i
] = grant
->page
;
386 xen_pv_printf(&ioreq
->blkdev
->xendev
, 3,
387 "using persistent-grant %" PRIu32
"\n",
390 /* Add the grant to the list of grants that
393 domids
[new_maps
] = ioreq
->domids
[i
];
394 refs
[new_maps
] = ioreq
->refs
[i
];
399 /* Set the protection to RW, since grants may be reused later
400 * with a different protection than the one needed for this request
402 ioreq
->prot
= PROT_WRITE
| PROT_READ
;
404 /* All grants in the request should be mapped */
405 memcpy(refs
, ioreq
->refs
, sizeof(refs
));
406 memcpy(domids
, ioreq
->domids
, sizeof(domids
));
407 memset(page
, 0, sizeof(page
));
408 new_maps
= ioreq
->v
.niov
;
411 if (batch_maps
&& new_maps
) {
412 ioreq
->pages
= xengnttab_map_grant_refs
413 (gnt
, new_maps
, domids
, refs
, ioreq
->prot
);
414 if (ioreq
->pages
== NULL
) {
415 xen_pv_printf(&ioreq
->blkdev
->xendev
, 0,
416 "can't map %d grant refs (%s, %d maps)\n",
417 new_maps
, strerror(errno
), ioreq
->blkdev
->cnt_map
);
420 for (i
= 0, j
= 0; i
< ioreq
->v
.niov
; i
++) {
421 if (page
[i
] == NULL
) {
422 page
[i
] = ioreq
->pages
+ (j
++) * XC_PAGE_SIZE
;
425 ioreq
->blkdev
->cnt_map
+= new_maps
;
426 } else if (new_maps
) {
427 for (i
= 0; i
< new_maps
; i
++) {
428 ioreq
->page
[i
] = xengnttab_map_grant_ref
429 (gnt
, domids
[i
], refs
[i
], ioreq
->prot
);
430 if (ioreq
->page
[i
] == NULL
) {
431 xen_pv_printf(&ioreq
->blkdev
->xendev
, 0,
432 "can't map grant ref %d (%s, %d maps)\n",
433 refs
[i
], strerror(errno
), ioreq
->blkdev
->cnt_map
);
438 ioreq
->blkdev
->cnt_map
++;
440 for (i
= 0, j
= 0; i
< ioreq
->v
.niov
; i
++) {
441 if (page
[i
] == NULL
) {
442 page
[i
] = ioreq
->page
[j
++];
446 if (ioreq
->blkdev
->feature_persistent
&& new_maps
!= 0 &&
447 (!batch_maps
|| (ioreq
->blkdev
->persistent_gnt_count
+ new_maps
<=
448 ioreq
->blkdev
->max_grants
))) {
450 * If we are using persistent grants and batch mappings only
451 * add the new maps to the list of persistent grants if the whole
452 * area can be persistently mapped.
455 region
= g_malloc0(sizeof(*region
));
456 region
->addr
= ioreq
->pages
;
457 region
->num
= new_maps
;
458 ioreq
->blkdev
->persistent_regions
= g_slist_append(
459 ioreq
->blkdev
->persistent_regions
,
462 while ((ioreq
->blkdev
->persistent_gnt_count
< ioreq
->blkdev
->max_grants
)
464 /* Go through the list of newly mapped grants and add as many
465 * as possible to the list of persistently mapped grants.
467 * Since we start at the end of ioreq->page(s), we only need
468 * to decrease new_maps to prevent this granted pages from
469 * being unmapped in ioreq_unmap.
471 grant
= g_malloc0(sizeof(*grant
));
474 grant
->page
= ioreq
->pages
+ (new_maps
) * XC_PAGE_SIZE
;
476 grant
->page
= ioreq
->page
[new_maps
];
478 grant
->blkdev
= ioreq
->blkdev
;
479 xen_pv_printf(&ioreq
->blkdev
->xendev
, 3,
480 "adding grant %" PRIu32
" page: %p\n",
481 refs
[new_maps
], grant
->page
);
482 g_tree_insert(ioreq
->blkdev
->persistent_gnts
,
483 GUINT_TO_POINTER(refs
[new_maps
]),
485 ioreq
->blkdev
->persistent_gnt_count
++;
487 assert(!batch_maps
|| new_maps
== 0);
489 for (i
= 0; i
< ioreq
->v
.niov
; i
++) {
490 ioreq
->v
.iov
[i
].iov_base
+= (uintptr_t)page
[i
];
493 ioreq
->num_unmap
= new_maps
;
497 #if CONFIG_XEN_CTRL_INTERFACE_VERSION >= 40800
499 static void ioreq_free_copy_buffers(struct ioreq
*ioreq
)
503 for (i
= 0; i
< ioreq
->v
.niov
; i
++) {
504 ioreq
->page
[i
] = NULL
;
507 qemu_vfree(ioreq
->pages
);
510 static int ioreq_init_copy_buffers(struct ioreq
*ioreq
)
514 if (ioreq
->v
.niov
== 0) {
518 ioreq
->pages
= qemu_memalign(XC_PAGE_SIZE
, ioreq
->v
.niov
* XC_PAGE_SIZE
);
520 for (i
= 0; i
< ioreq
->v
.niov
; i
++) {
521 ioreq
->page
[i
] = ioreq
->pages
+ i
* XC_PAGE_SIZE
;
522 ioreq
->v
.iov
[i
].iov_base
= ioreq
->page
[i
];
528 static int ioreq_grant_copy(struct ioreq
*ioreq
)
530 xengnttab_handle
*gnt
= ioreq
->blkdev
->xendev
.gnttabdev
;
531 xengnttab_grant_copy_segment_t segs
[BLKIF_MAX_SEGMENTS_PER_REQUEST
];
533 int64_t file_blk
= ioreq
->blkdev
->file_blk
;
535 if (ioreq
->v
.niov
== 0) {
539 count
= ioreq
->v
.niov
;
541 for (i
= 0; i
< count
; i
++) {
542 if (ioreq
->req
.operation
== BLKIF_OP_READ
) {
543 segs
[i
].flags
= GNTCOPY_dest_gref
;
544 segs
[i
].dest
.foreign
.ref
= ioreq
->refs
[i
];
545 segs
[i
].dest
.foreign
.domid
= ioreq
->domids
[i
];
546 segs
[i
].dest
.foreign
.offset
= ioreq
->req
.seg
[i
].first_sect
* file_blk
;
547 segs
[i
].source
.virt
= ioreq
->v
.iov
[i
].iov_base
;
549 segs
[i
].flags
= GNTCOPY_source_gref
;
550 segs
[i
].source
.foreign
.ref
= ioreq
->refs
[i
];
551 segs
[i
].source
.foreign
.domid
= ioreq
->domids
[i
];
552 segs
[i
].source
.foreign
.offset
= ioreq
->req
.seg
[i
].first_sect
* file_blk
;
553 segs
[i
].dest
.virt
= ioreq
->v
.iov
[i
].iov_base
;
555 segs
[i
].len
= (ioreq
->req
.seg
[i
].last_sect
556 - ioreq
->req
.seg
[i
].first_sect
+ 1) * file_blk
;
559 rc
= xengnttab_grant_copy(gnt
, count
, segs
);
562 xen_pv_printf(&ioreq
->blkdev
->xendev
, 0,
563 "failed to copy data %d\n", rc
);
568 for (i
= 0; i
< count
; i
++) {
569 if (segs
[i
].status
!= GNTST_okay
) {
570 xen_pv_printf(&ioreq
->blkdev
->xendev
, 3,
571 "failed to copy data %d for gref %d, domid %d\n",
572 segs
[i
].status
, ioreq
->refs
[i
], ioreq
->domids
[i
]);
581 static void ioreq_free_copy_buffers(struct ioreq
*ioreq
)
586 static int ioreq_init_copy_buffers(struct ioreq
*ioreq
)
591 static int ioreq_grant_copy(struct ioreq
*ioreq
)
597 static int ioreq_runio_qemu_aio(struct ioreq
*ioreq
);
599 static void qemu_aio_complete(void *opaque
, int ret
)
601 struct ioreq
*ioreq
= opaque
;
604 xen_pv_printf(&ioreq
->blkdev
->xendev
, 0, "%s I/O error\n",
605 ioreq
->req
.operation
== BLKIF_OP_READ
? "read" : "write");
609 ioreq
->aio_inflight
--;
610 if (ioreq
->presync
) {
612 ioreq_runio_qemu_aio(ioreq
);
615 if (ioreq
->aio_inflight
> 0) {
619 if (ioreq
->blkdev
->feature_grant_copy
) {
620 switch (ioreq
->req
.operation
) {
622 /* in case of failure ioreq->aio_errors is increased */
624 ioreq_grant_copy(ioreq
);
626 ioreq_free_copy_buffers(ioreq
);
629 case BLKIF_OP_FLUSH_DISKCACHE
:
630 if (!ioreq
->req
.nr_segments
) {
633 ioreq_free_copy_buffers(ioreq
);
640 ioreq
->status
= ioreq
->aio_errors
? BLKIF_RSP_ERROR
: BLKIF_RSP_OKAY
;
641 if (!ioreq
->blkdev
->feature_grant_copy
) {
645 switch (ioreq
->req
.operation
) {
647 case BLKIF_OP_FLUSH_DISKCACHE
:
648 if (!ioreq
->req
.nr_segments
) {
652 if (ioreq
->status
== BLKIF_RSP_OKAY
) {
653 block_acct_done(blk_get_stats(ioreq
->blkdev
->blk
), &ioreq
->acct
);
655 block_acct_failed(blk_get_stats(ioreq
->blkdev
->blk
), &ioreq
->acct
);
658 case BLKIF_OP_DISCARD
:
662 qemu_bh_schedule(ioreq
->blkdev
->bh
);
665 static bool blk_split_discard(struct ioreq
*ioreq
, blkif_sector_t sector_number
,
668 struct XenBlkDev
*blkdev
= ioreq
->blkdev
;
671 uint64_t byte_remaining
, limit
;
672 uint64_t sec_start
= sector_number
;
673 uint64_t sec_count
= nr_sectors
;
675 /* Wrap around, or overflowing byte limit? */
676 if (sec_start
+ sec_count
< sec_count
||
677 sec_start
+ sec_count
> INT64_MAX
>> BDRV_SECTOR_BITS
) {
681 limit
= BDRV_REQUEST_MAX_SECTORS
<< BDRV_SECTOR_BITS
;
682 byte_offset
= sec_start
<< BDRV_SECTOR_BITS
;
683 byte_remaining
= sec_count
<< BDRV_SECTOR_BITS
;
686 byte_chunk
= byte_remaining
> limit
? limit
: byte_remaining
;
687 ioreq
->aio_inflight
++;
688 blk_aio_pdiscard(blkdev
->blk
, byte_offset
, byte_chunk
,
689 qemu_aio_complete
, ioreq
);
690 byte_remaining
-= byte_chunk
;
691 byte_offset
+= byte_chunk
;
692 } while (byte_remaining
> 0);
697 static int ioreq_runio_qemu_aio(struct ioreq
*ioreq
)
699 struct XenBlkDev
*blkdev
= ioreq
->blkdev
;
701 if (ioreq
->blkdev
->feature_grant_copy
) {
702 ioreq_init_copy_buffers(ioreq
);
703 if (ioreq
->req
.nr_segments
&& (ioreq
->req
.operation
== BLKIF_OP_WRITE
||
704 ioreq
->req
.operation
== BLKIF_OP_FLUSH_DISKCACHE
) &&
705 ioreq_grant_copy(ioreq
)) {
706 ioreq_free_copy_buffers(ioreq
);
710 if (ioreq
->req
.nr_segments
&& ioreq_map(ioreq
)) {
715 ioreq
->aio_inflight
++;
716 if (ioreq
->presync
) {
717 blk_aio_flush(ioreq
->blkdev
->blk
, qemu_aio_complete
, ioreq
);
721 switch (ioreq
->req
.operation
) {
723 block_acct_start(blk_get_stats(blkdev
->blk
), &ioreq
->acct
,
724 ioreq
->v
.size
, BLOCK_ACCT_READ
);
725 ioreq
->aio_inflight
++;
726 blk_aio_preadv(blkdev
->blk
, ioreq
->start
, &ioreq
->v
, 0,
727 qemu_aio_complete
, ioreq
);
730 case BLKIF_OP_FLUSH_DISKCACHE
:
731 if (!ioreq
->req
.nr_segments
) {
735 block_acct_start(blk_get_stats(blkdev
->blk
), &ioreq
->acct
,
737 ioreq
->req
.operation
== BLKIF_OP_WRITE
?
738 BLOCK_ACCT_WRITE
: BLOCK_ACCT_FLUSH
);
739 ioreq
->aio_inflight
++;
740 blk_aio_pwritev(blkdev
->blk
, ioreq
->start
, &ioreq
->v
, 0,
741 qemu_aio_complete
, ioreq
);
743 case BLKIF_OP_DISCARD
:
745 struct blkif_request_discard
*req
= (void *)&ioreq
->req
;
746 if (!blk_split_discard(ioreq
, req
->sector_number
, req
->nr_sectors
)) {
752 /* unknown operation (shouldn't happen -- parse catches this) */
753 if (!ioreq
->blkdev
->feature_grant_copy
) {
759 qemu_aio_complete(ioreq
, 0);
765 ioreq
->status
= BLKIF_RSP_ERROR
;
769 static int blk_send_response_one(struct ioreq
*ioreq
)
771 struct XenBlkDev
*blkdev
= ioreq
->blkdev
;
773 int have_requests
= 0;
774 blkif_response_t
*resp
;
776 /* Place on the response ring for the relevant domain. */
777 switch (blkdev
->protocol
) {
778 case BLKIF_PROTOCOL_NATIVE
:
779 resp
= (blkif_response_t
*) RING_GET_RESPONSE(&blkdev
->rings
.native
,
780 blkdev
->rings
.native
.rsp_prod_pvt
);
782 case BLKIF_PROTOCOL_X86_32
:
783 resp
= (blkif_response_t
*) RING_GET_RESPONSE(&blkdev
->rings
.x86_32_part
,
784 blkdev
->rings
.x86_32_part
.rsp_prod_pvt
);
786 case BLKIF_PROTOCOL_X86_64
:
787 resp
= (blkif_response_t
*) RING_GET_RESPONSE(&blkdev
->rings
.x86_64_part
,
788 blkdev
->rings
.x86_64_part
.rsp_prod_pvt
);
794 resp
->id
= ioreq
->req
.id
;
795 resp
->operation
= ioreq
->req
.operation
;
796 resp
->status
= ioreq
->status
;
798 blkdev
->rings
.common
.rsp_prod_pvt
++;
800 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blkdev
->rings
.common
, send_notify
);
801 if (blkdev
->rings
.common
.rsp_prod_pvt
== blkdev
->rings
.common
.req_cons
) {
803 * Tail check for pending requests. Allows frontend to avoid
804 * notifications if requests are already in flight (lower
805 * overheads and promotes batching).
807 RING_FINAL_CHECK_FOR_REQUESTS(&blkdev
->rings
.common
, have_requests
);
808 } else if (RING_HAS_UNCONSUMED_REQUESTS(&blkdev
->rings
.common
)) {
818 /* walk finished list, send outstanding responses, free requests */
819 static void blk_send_response_all(struct XenBlkDev
*blkdev
)
824 while (!QLIST_EMPTY(&blkdev
->finished
)) {
825 ioreq
= QLIST_FIRST(&blkdev
->finished
);
826 send_notify
+= blk_send_response_one(ioreq
);
827 ioreq_release(ioreq
, true);
830 xen_pv_send_notify(&blkdev
->xendev
);
834 static int blk_get_request(struct XenBlkDev
*blkdev
, struct ioreq
*ioreq
, RING_IDX rc
)
836 switch (blkdev
->protocol
) {
837 case BLKIF_PROTOCOL_NATIVE
:
838 memcpy(&ioreq
->req
, RING_GET_REQUEST(&blkdev
->rings
.native
, rc
),
841 case BLKIF_PROTOCOL_X86_32
:
842 blkif_get_x86_32_req(&ioreq
->req
,
843 RING_GET_REQUEST(&blkdev
->rings
.x86_32_part
, rc
));
845 case BLKIF_PROTOCOL_X86_64
:
846 blkif_get_x86_64_req(&ioreq
->req
,
847 RING_GET_REQUEST(&blkdev
->rings
.x86_64_part
, rc
));
850 /* Prevent the compiler from accessing the on-ring fields instead. */
855 static void blk_handle_requests(struct XenBlkDev
*blkdev
)
860 blkdev
->more_work
= 0;
862 rc
= blkdev
->rings
.common
.req_cons
;
863 rp
= blkdev
->rings
.common
.sring
->req_prod
;
864 xen_rmb(); /* Ensure we see queued requests up to 'rp'. */
866 blk_send_response_all(blkdev
);
868 /* pull request from ring */
869 if (RING_REQUEST_CONS_OVERFLOW(&blkdev
->rings
.common
, rc
)) {
872 ioreq
= ioreq_start(blkdev
);
877 blk_get_request(blkdev
, ioreq
, rc
);
878 blkdev
->rings
.common
.req_cons
= ++rc
;
881 if (ioreq_parse(ioreq
) != 0) {
883 switch (ioreq
->req
.operation
) {
885 block_acct_invalid(blk_get_stats(blkdev
->blk
),
889 block_acct_invalid(blk_get_stats(blkdev
->blk
),
892 case BLKIF_OP_FLUSH_DISKCACHE
:
893 block_acct_invalid(blk_get_stats(blkdev
->blk
),
899 if (blk_send_response_one(ioreq
)) {
900 xen_pv_send_notify(&blkdev
->xendev
);
902 ioreq_release(ioreq
, false);
906 ioreq_runio_qemu_aio(ioreq
);
909 if (blkdev
->more_work
&& blkdev
->requests_inflight
< blkdev
->max_requests
) {
910 qemu_bh_schedule(blkdev
->bh
);
914 /* ------------------------------------------------------------- */
916 static void blk_bh(void *opaque
)
918 struct XenBlkDev
*blkdev
= opaque
;
919 blk_handle_requests(blkdev
);
922 static void blk_alloc(struct XenDevice
*xendev
)
924 struct XenBlkDev
*blkdev
= container_of(xendev
, struct XenBlkDev
, xendev
);
926 QLIST_INIT(&blkdev
->inflight
);
927 QLIST_INIT(&blkdev
->finished
);
928 QLIST_INIT(&blkdev
->freelist
);
929 blkdev
->bh
= qemu_bh_new(blk_bh
, blkdev
);
930 if (xen_mode
!= XEN_EMULATE
) {
935 static void blk_parse_discard(struct XenBlkDev
*blkdev
)
939 blkdev
->feature_discard
= true;
941 if (xenstore_read_be_int(&blkdev
->xendev
, "discard-enable", &enable
) == 0) {
942 blkdev
->feature_discard
= !!enable
;
945 if (blkdev
->feature_discard
) {
946 xenstore_write_be_int(&blkdev
->xendev
, "feature-discard", 1);
950 static int blk_init(struct XenDevice
*xendev
)
952 struct XenBlkDev
*blkdev
= container_of(xendev
, struct XenBlkDev
, xendev
);
954 char *directiosafe
= NULL
;
956 /* read xenstore entries */
957 if (blkdev
->params
== NULL
) {
959 blkdev
->params
= xenstore_read_be_str(&blkdev
->xendev
, "params");
960 if (blkdev
->params
!= NULL
) {
961 h
= strchr(blkdev
->params
, ':');
964 blkdev
->fileproto
= blkdev
->params
;
965 blkdev
->filename
= h
+1;
968 blkdev
->fileproto
= "<unset>";
969 blkdev
->filename
= blkdev
->params
;
972 if (!strcmp("aio", blkdev
->fileproto
)) {
973 blkdev
->fileproto
= "raw";
975 if (!strcmp("vhd", blkdev
->fileproto
)) {
976 blkdev
->fileproto
= "vpc";
978 if (blkdev
->mode
== NULL
) {
979 blkdev
->mode
= xenstore_read_be_str(&blkdev
->xendev
, "mode");
981 if (blkdev
->type
== NULL
) {
982 blkdev
->type
= xenstore_read_be_str(&blkdev
->xendev
, "type");
984 if (blkdev
->dev
== NULL
) {
985 blkdev
->dev
= xenstore_read_be_str(&blkdev
->xendev
, "dev");
987 if (blkdev
->devtype
== NULL
) {
988 blkdev
->devtype
= xenstore_read_be_str(&blkdev
->xendev
, "device-type");
990 directiosafe
= xenstore_read_be_str(&blkdev
->xendev
, "direct-io-safe");
991 blkdev
->directiosafe
= (directiosafe
&& atoi(directiosafe
));
993 /* do we have all we need? */
994 if (blkdev
->params
== NULL
||
995 blkdev
->mode
== NULL
||
996 blkdev
->type
== NULL
||
997 blkdev
->dev
== NULL
) {
1002 if (strcmp(blkdev
->mode
, "w")) {
1003 info
|= VDISK_READONLY
;
1007 if (blkdev
->devtype
&& !strcmp(blkdev
->devtype
, "cdrom")) {
1008 info
|= VDISK_CDROM
;
1011 blkdev
->file_blk
= BLOCK_SIZE
;
1013 blkdev
->feature_grant_copy
=
1014 (xengnttab_grant_copy(blkdev
->xendev
.gnttabdev
, 0, NULL
) == 0);
1016 xen_pv_printf(&blkdev
->xendev
, 3, "grant copy operation %s\n",
1017 blkdev
->feature_grant_copy
? "enabled" : "disabled");
1020 * blk_connect supplies sector-size and sectors
1022 xenstore_write_be_int(&blkdev
->xendev
, "feature-flush-cache", 1);
1023 xenstore_write_be_int(&blkdev
->xendev
, "feature-persistent",
1024 !blkdev
->feature_grant_copy
);
1025 xenstore_write_be_int(&blkdev
->xendev
, "info", info
);
1027 xenstore_write_be_int(&blkdev
->xendev
, "max-ring-page-order",
1028 MAX_RING_PAGE_ORDER
);
1030 blk_parse_discard(blkdev
);
1032 g_free(directiosafe
);
1036 g_free(blkdev
->params
);
1037 blkdev
->params
= NULL
;
1038 g_free(blkdev
->mode
);
1039 blkdev
->mode
= NULL
;
1040 g_free(blkdev
->type
);
1041 blkdev
->type
= NULL
;
1042 g_free(blkdev
->dev
);
1044 g_free(blkdev
->devtype
);
1045 blkdev
->devtype
= NULL
;
1046 g_free(directiosafe
);
1047 blkdev
->directiosafe
= false;
1052 * We need to account for the grant allocations requiring contiguous
1053 * chunks; the worst case number would be
1054 * max_req * max_seg + (max_req - 1) * (max_seg - 1) + 1,
1055 * but in order to keep things simple just use
1056 * 2 * max_req * max_seg.
1058 #define MAX_GRANTS(max_req, max_seg) (2 * (max_req) * (max_seg))
1060 static int blk_connect(struct XenDevice
*xendev
)
1062 struct XenBlkDev
*blkdev
= container_of(xendev
, struct XenBlkDev
, xendev
);
1063 int pers
, index
, qflags
;
1064 bool readonly
= true;
1065 bool writethrough
= true;
1066 int order
, ring_ref
;
1067 unsigned int ring_size
, max_grants
;
1072 if (blkdev
->directiosafe
) {
1073 qflags
= BDRV_O_NOCACHE
| BDRV_O_NATIVE_AIO
;
1076 writethrough
= false;
1078 if (strcmp(blkdev
->mode
, "w") == 0) {
1079 qflags
|= BDRV_O_RDWR
;
1082 if (blkdev
->feature_discard
) {
1083 qflags
|= BDRV_O_UNMAP
;
1086 /* init qemu block driver */
1087 index
= (blkdev
->xendev
.dev
- 202 * 256) / 16;
1088 blkdev
->dinfo
= drive_get(IF_XEN
, 0, index
);
1089 if (!blkdev
->dinfo
) {
1090 Error
*local_err
= NULL
;
1091 QDict
*options
= NULL
;
1093 if (strcmp(blkdev
->fileproto
, "<unset>")) {
1094 options
= qdict_new();
1095 qdict_put_str(options
, "driver", blkdev
->fileproto
);
1098 /* setup via xenbus -> create new block driver instance */
1099 xen_pv_printf(&blkdev
->xendev
, 2, "create new bdrv (xenbus setup)\n");
1100 blkdev
->blk
= blk_new_open(blkdev
->filename
, NULL
, options
,
1101 qflags
, &local_err
);
1103 xen_pv_printf(&blkdev
->xendev
, 0, "error: %s\n",
1104 error_get_pretty(local_err
));
1105 error_free(local_err
);
1108 blk_set_enable_write_cache(blkdev
->blk
, !writethrough
);
1110 /* setup via qemu cmdline -> already setup for us */
1111 xen_pv_printf(&blkdev
->xendev
, 2,
1112 "get configured bdrv (cmdline setup)\n");
1113 blkdev
->blk
= blk_by_legacy_dinfo(blkdev
->dinfo
);
1114 if (blk_is_read_only(blkdev
->blk
) && !readonly
) {
1115 xen_pv_printf(&blkdev
->xendev
, 0, "Unexpected read-only drive");
1119 /* blkdev->blk is not create by us, we get a reference
1120 * so we can blk_unref() unconditionally */
1121 blk_ref(blkdev
->blk
);
1123 blk_attach_dev_legacy(blkdev
->blk
, blkdev
);
1124 blkdev
->file_size
= blk_getlength(blkdev
->blk
);
1125 if (blkdev
->file_size
< 0) {
1126 BlockDriverState
*bs
= blk_bs(blkdev
->blk
);
1127 const char *drv_name
= bs
? bdrv_get_format_name(bs
) : NULL
;
1128 xen_pv_printf(&blkdev
->xendev
, 1, "blk_getlength: %d (%s) | drv %s\n",
1129 (int)blkdev
->file_size
, strerror(-blkdev
->file_size
),
1131 blkdev
->file_size
= 0;
1134 xen_pv_printf(xendev
, 1, "type \"%s\", fileproto \"%s\", filename \"%s\","
1135 " size %" PRId64
" (%" PRId64
" MB)\n",
1136 blkdev
->type
, blkdev
->fileproto
, blkdev
->filename
,
1137 blkdev
->file_size
, blkdev
->file_size
>> 20);
1139 /* Fill in number of sector size and number of sectors */
1140 xenstore_write_be_int(&blkdev
->xendev
, "sector-size", blkdev
->file_blk
);
1141 xenstore_write_be_int64(&blkdev
->xendev
, "sectors",
1142 blkdev
->file_size
/ blkdev
->file_blk
);
1144 if (xenstore_read_fe_int(&blkdev
->xendev
, "ring-page-order",
1146 blkdev
->nr_ring_ref
= 1;
1148 if (xenstore_read_fe_int(&blkdev
->xendev
, "ring-ref",
1152 blkdev
->ring_ref
[0] = ring_ref
;
1154 } else if (order
>= 0 && order
<= MAX_RING_PAGE_ORDER
) {
1155 blkdev
->nr_ring_ref
= 1 << order
;
1157 for (i
= 0; i
< blkdev
->nr_ring_ref
; i
++) {
1160 key
= g_strdup_printf("ring-ref%u", i
);
1165 if (xenstore_read_fe_int(&blkdev
->xendev
, key
,
1170 blkdev
->ring_ref
[i
] = ring_ref
;
1175 xen_pv_printf(xendev
, 0, "invalid ring-page-order: %d\n",
1180 if (xenstore_read_fe_int(&blkdev
->xendev
, "event-channel",
1181 &blkdev
->xendev
.remote_port
) == -1) {
1184 if (xenstore_read_fe_int(&blkdev
->xendev
, "feature-persistent", &pers
)) {
1185 blkdev
->feature_persistent
= FALSE
;
1187 blkdev
->feature_persistent
= !!pers
;
1190 if (!blkdev
->xendev
.protocol
) {
1191 blkdev
->protocol
= BLKIF_PROTOCOL_NATIVE
;
1192 } else if (strcmp(blkdev
->xendev
.protocol
, XEN_IO_PROTO_ABI_NATIVE
) == 0) {
1193 blkdev
->protocol
= BLKIF_PROTOCOL_NATIVE
;
1194 } else if (strcmp(blkdev
->xendev
.protocol
, XEN_IO_PROTO_ABI_X86_32
) == 0) {
1195 blkdev
->protocol
= BLKIF_PROTOCOL_X86_32
;
1196 } else if (strcmp(blkdev
->xendev
.protocol
, XEN_IO_PROTO_ABI_X86_64
) == 0) {
1197 blkdev
->protocol
= BLKIF_PROTOCOL_X86_64
;
1199 blkdev
->protocol
= BLKIF_PROTOCOL_NATIVE
;
1202 ring_size
= XC_PAGE_SIZE
* blkdev
->nr_ring_ref
;
1203 switch (blkdev
->protocol
) {
1204 case BLKIF_PROTOCOL_NATIVE
:
1206 blkdev
->max_requests
= __CONST_RING_SIZE(blkif
, ring_size
);
1209 case BLKIF_PROTOCOL_X86_32
:
1211 blkdev
->max_requests
= __CONST_RING_SIZE(blkif_x86_32
, ring_size
);
1214 case BLKIF_PROTOCOL_X86_64
:
1216 blkdev
->max_requests
= __CONST_RING_SIZE(blkif_x86_64
, ring_size
);
1223 /* Calculate the maximum number of grants needed by ioreqs */
1224 max_grants
= MAX_GRANTS(blkdev
->max_requests
,
1225 BLKIF_MAX_SEGMENTS_PER_REQUEST
);
1226 /* Add on the number needed for the ring pages */
1227 max_grants
+= blkdev
->nr_ring_ref
;
1229 if (xengnttab_set_max_grants(blkdev
->xendev
.gnttabdev
, max_grants
)) {
1230 xen_pv_printf(xendev
, 0, "xengnttab_set_max_grants failed: %s\n",
1235 domids
= g_new0(uint32_t, blkdev
->nr_ring_ref
);
1236 for (i
= 0; i
< blkdev
->nr_ring_ref
; i
++) {
1237 domids
[i
] = blkdev
->xendev
.dom
;
1240 blkdev
->sring
= xengnttab_map_grant_refs(blkdev
->xendev
.gnttabdev
,
1241 blkdev
->nr_ring_ref
,
1244 PROT_READ
| PROT_WRITE
);
1248 if (!blkdev
->sring
) {
1254 switch (blkdev
->protocol
) {
1255 case BLKIF_PROTOCOL_NATIVE
:
1257 blkif_sring_t
*sring_native
= blkdev
->sring
;
1258 BACK_RING_INIT(&blkdev
->rings
.native
, sring_native
, ring_size
);
1261 case BLKIF_PROTOCOL_X86_32
:
1263 blkif_x86_32_sring_t
*sring_x86_32
= blkdev
->sring
;
1265 BACK_RING_INIT(&blkdev
->rings
.x86_32_part
, sring_x86_32
, ring_size
);
1268 case BLKIF_PROTOCOL_X86_64
:
1270 blkif_x86_64_sring_t
*sring_x86_64
= blkdev
->sring
;
1272 BACK_RING_INIT(&blkdev
->rings
.x86_64_part
, sring_x86_64
, ring_size
);
1277 if (blkdev
->feature_persistent
) {
1278 /* Init persistent grants */
1279 blkdev
->max_grants
= blkdev
->max_requests
*
1280 BLKIF_MAX_SEGMENTS_PER_REQUEST
;
1281 blkdev
->persistent_gnts
= g_tree_new_full((GCompareDataFunc
)int_cmp
,
1284 (GDestroyNotify
)g_free
:
1285 (GDestroyNotify
)destroy_grant
);
1286 blkdev
->persistent_regions
= NULL
;
1287 blkdev
->persistent_gnt_count
= 0;
1290 xen_be_bind_evtchn(&blkdev
->xendev
);
1292 xen_pv_printf(&blkdev
->xendev
, 1, "ok: proto %s, nr-ring-ref %u, "
1293 "remote port %d, local port %d\n",
1294 blkdev
->xendev
.protocol
, blkdev
->nr_ring_ref
,
1295 blkdev
->xendev
.remote_port
, blkdev
->xendev
.local_port
);
1299 static void blk_disconnect(struct XenDevice
*xendev
)
1301 struct XenBlkDev
*blkdev
= container_of(xendev
, struct XenBlkDev
, xendev
);
1304 blk_detach_dev(blkdev
->blk
, blkdev
);
1305 blk_unref(blkdev
->blk
);
1308 xen_pv_unbind_evtchn(&blkdev
->xendev
);
1310 if (blkdev
->sring
) {
1311 xengnttab_unmap(blkdev
->xendev
.gnttabdev
, blkdev
->sring
,
1312 blkdev
->nr_ring_ref
);
1314 blkdev
->sring
= NULL
;
1318 * Unmap persistent grants before switching to the closed state
1319 * so the frontend can free them.
1321 * In the !batch_maps case g_tree_destroy will take care of unmapping
1322 * the grant, but in the batch_maps case we need to iterate over every
1323 * region in persistent_regions and unmap it.
1325 if (blkdev
->feature_persistent
) {
1326 g_tree_destroy(blkdev
->persistent_gnts
);
1327 assert(batch_maps
|| blkdev
->persistent_gnt_count
== 0);
1329 blkdev
->persistent_gnt_count
= 0;
1330 g_slist_foreach(blkdev
->persistent_regions
,
1331 (GFunc
)remove_persistent_region
, blkdev
);
1332 g_slist_free(blkdev
->persistent_regions
);
1334 blkdev
->feature_persistent
= false;
1338 static int blk_free(struct XenDevice
*xendev
)
1340 struct XenBlkDev
*blkdev
= container_of(xendev
, struct XenBlkDev
, xendev
);
1341 struct ioreq
*ioreq
;
1343 if (blkdev
->blk
|| blkdev
->sring
) {
1344 blk_disconnect(xendev
);
1347 while (!QLIST_EMPTY(&blkdev
->freelist
)) {
1348 ioreq
= QLIST_FIRST(&blkdev
->freelist
);
1349 QLIST_REMOVE(ioreq
, list
);
1350 qemu_iovec_destroy(&ioreq
->v
);
1354 g_free(blkdev
->params
);
1355 g_free(blkdev
->mode
);
1356 g_free(blkdev
->type
);
1357 g_free(blkdev
->dev
);
1358 g_free(blkdev
->devtype
);
1359 qemu_bh_delete(blkdev
->bh
);
1363 static void blk_event(struct XenDevice
*xendev
)
1365 struct XenBlkDev
*blkdev
= container_of(xendev
, struct XenBlkDev
, xendev
);
1367 qemu_bh_schedule(blkdev
->bh
);
1370 struct XenDevOps xen_blkdev_ops
= {
1371 .size
= sizeof(struct XenBlkDev
),
1372 .flags
= DEVOPS_FLAG_NEED_GNTDEV
,
1375 .initialise
= blk_connect
,
1376 .disconnect
= blk_disconnect
,