2 * xen paravirt block device backend
4 * (c) Gerd Hoffmann <kraxel@redhat.com>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; under version 2 of the License.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, see <http://www.gnu.org/licenses/>.
18 * Contributions after 2012-01-13 are licensed under the terms of the
19 * GNU GPL, version 2 or (at your option) any later version.
22 #include "qemu/osdep.h"
23 #include <sys/ioctl.h>
27 #include "hw/xen/xen_backend.h"
28 #include "xen_blkif.h"
29 #include "sysemu/blockdev.h"
30 #include "sysemu/block-backend.h"
31 #include "qapi/error.h"
32 #include "qapi/qmp/qdict.h"
33 #include "qapi/qmp/qstring.h"
35 /* ------------------------------------------------------------- */
37 static int batch_maps
= 0;
39 /* ------------------------------------------------------------- */
41 #define BLOCK_SIZE 512
42 #define IOCB_COUNT (BLKIF_MAX_SEGMENTS_PER_REQUEST + 2)
44 struct PersistentGrant
{
46 struct XenBlkDev
*blkdev
;
49 typedef struct PersistentGrant PersistentGrant
;
51 struct PersistentRegion
{
56 typedef struct PersistentRegion PersistentRegion
;
69 uint32_t domids
[BLKIF_MAX_SEGMENTS_PER_REQUEST
];
70 uint32_t refs
[BLKIF_MAX_SEGMENTS_PER_REQUEST
];
72 void *page
[BLKIF_MAX_SEGMENTS_PER_REQUEST
];
80 struct XenBlkDev
*blkdev
;
81 QLIST_ENTRY(ioreq
) list
;
85 #define MAX_RING_PAGE_ORDER 4
88 struct XenDevice xendev
; /* must be first */
95 const char *fileproto
;
97 unsigned int ring_ref
[1 << MAX_RING_PAGE_ORDER
];
98 unsigned int nr_ring_ref
;
103 blkif_back_rings_t rings
;
108 QLIST_HEAD(inflight_head
, ioreq
) inflight
;
109 QLIST_HEAD(finished_head
, ioreq
) finished
;
110 QLIST_HEAD(freelist_head
, ioreq
) freelist
;
112 int requests_inflight
;
113 int requests_finished
;
114 unsigned int max_requests
;
116 /* Persistent grants extension */
117 gboolean feature_discard
;
118 gboolean feature_persistent
;
119 GTree
*persistent_gnts
;
120 GSList
*persistent_regions
;
121 unsigned int persistent_gnt_count
;
122 unsigned int max_grants
;
124 /* qemu block driver */
130 /* ------------------------------------------------------------- */
132 static void ioreq_reset(struct ioreq
*ioreq
)
134 memset(&ioreq
->req
, 0, sizeof(ioreq
->req
));
140 memset(ioreq
->domids
, 0, sizeof(ioreq
->domids
));
141 memset(ioreq
->refs
, 0, sizeof(ioreq
->refs
));
143 memset(ioreq
->page
, 0, sizeof(ioreq
->page
));
146 ioreq
->aio_inflight
= 0;
147 ioreq
->aio_errors
= 0;
149 ioreq
->blkdev
= NULL
;
150 memset(&ioreq
->list
, 0, sizeof(ioreq
->list
));
151 memset(&ioreq
->acct
, 0, sizeof(ioreq
->acct
));
153 qemu_iovec_reset(&ioreq
->v
);
156 static gint
int_cmp(gconstpointer a
, gconstpointer b
, gpointer user_data
)
158 uint ua
= GPOINTER_TO_UINT(a
);
159 uint ub
= GPOINTER_TO_UINT(b
);
160 return (ua
> ub
) - (ua
< ub
);
163 static void destroy_grant(gpointer pgnt
)
165 PersistentGrant
*grant
= pgnt
;
166 xengnttab_handle
*gnt
= grant
->blkdev
->xendev
.gnttabdev
;
168 if (xengnttab_unmap(gnt
, grant
->page
, 1) != 0) {
169 xen_pv_printf(&grant
->blkdev
->xendev
, 0,
170 "xengnttab_unmap failed: %s\n",
173 grant
->blkdev
->persistent_gnt_count
--;
174 xen_pv_printf(&grant
->blkdev
->xendev
, 3,
175 "unmapped grant %p\n", grant
->page
);
179 static void remove_persistent_region(gpointer data
, gpointer dev
)
181 PersistentRegion
*region
= data
;
182 struct XenBlkDev
*blkdev
= dev
;
183 xengnttab_handle
*gnt
= blkdev
->xendev
.gnttabdev
;
185 if (xengnttab_unmap(gnt
, region
->addr
, region
->num
) != 0) {
186 xen_pv_printf(&blkdev
->xendev
, 0,
187 "xengnttab_unmap region %p failed: %s\n",
188 region
->addr
, strerror(errno
));
190 xen_pv_printf(&blkdev
->xendev
, 3,
191 "unmapped grant region %p with %d pages\n",
192 region
->addr
, region
->num
);
196 static struct ioreq
*ioreq_start(struct XenBlkDev
*blkdev
)
198 struct ioreq
*ioreq
= NULL
;
200 if (QLIST_EMPTY(&blkdev
->freelist
)) {
201 if (blkdev
->requests_total
>= blkdev
->max_requests
) {
204 /* allocate new struct */
205 ioreq
= g_malloc0(sizeof(*ioreq
));
206 ioreq
->blkdev
= blkdev
;
207 blkdev
->requests_total
++;
208 qemu_iovec_init(&ioreq
->v
, BLKIF_MAX_SEGMENTS_PER_REQUEST
);
210 /* get one from freelist */
211 ioreq
= QLIST_FIRST(&blkdev
->freelist
);
212 QLIST_REMOVE(ioreq
, list
);
214 QLIST_INSERT_HEAD(&blkdev
->inflight
, ioreq
, list
);
215 blkdev
->requests_inflight
++;
221 static void ioreq_finish(struct ioreq
*ioreq
)
223 struct XenBlkDev
*blkdev
= ioreq
->blkdev
;
225 QLIST_REMOVE(ioreq
, list
);
226 QLIST_INSERT_HEAD(&blkdev
->finished
, ioreq
, list
);
227 blkdev
->requests_inflight
--;
228 blkdev
->requests_finished
++;
231 static void ioreq_release(struct ioreq
*ioreq
, bool finish
)
233 struct XenBlkDev
*blkdev
= ioreq
->blkdev
;
235 QLIST_REMOVE(ioreq
, list
);
237 ioreq
->blkdev
= blkdev
;
238 QLIST_INSERT_HEAD(&blkdev
->freelist
, ioreq
, list
);
240 blkdev
->requests_finished
--;
242 blkdev
->requests_inflight
--;
247 * translate request into iovec + start offset
248 * do sanity checks along the way
250 static int ioreq_parse(struct ioreq
*ioreq
)
252 struct XenBlkDev
*blkdev
= ioreq
->blkdev
;
257 xen_pv_printf(&blkdev
->xendev
, 3,
258 "op %d, nr %d, handle %d, id %" PRId64
", sector %" PRId64
"\n",
259 ioreq
->req
.operation
, ioreq
->req
.nr_segments
,
260 ioreq
->req
.handle
, ioreq
->req
.id
, ioreq
->req
.sector_number
);
261 switch (ioreq
->req
.operation
) {
263 ioreq
->prot
= PROT_WRITE
; /* to memory */
265 case BLKIF_OP_FLUSH_DISKCACHE
:
267 if (!ioreq
->req
.nr_segments
) {
272 ioreq
->prot
= PROT_READ
; /* from memory */
274 case BLKIF_OP_DISCARD
:
277 xen_pv_printf(&blkdev
->xendev
, 0, "error: unknown operation (%d)\n",
278 ioreq
->req
.operation
);
282 if (ioreq
->req
.operation
!= BLKIF_OP_READ
&& blkdev
->mode
[0] != 'w') {
283 xen_pv_printf(&blkdev
->xendev
, 0, "error: write req for ro device\n");
287 ioreq
->start
= ioreq
->req
.sector_number
* blkdev
->file_blk
;
288 for (i
= 0; i
< ioreq
->req
.nr_segments
; i
++) {
289 if (i
== BLKIF_MAX_SEGMENTS_PER_REQUEST
) {
290 xen_pv_printf(&blkdev
->xendev
, 0, "error: nr_segments too big\n");
293 if (ioreq
->req
.seg
[i
].first_sect
> ioreq
->req
.seg
[i
].last_sect
) {
294 xen_pv_printf(&blkdev
->xendev
, 0, "error: first > last sector\n");
297 if (ioreq
->req
.seg
[i
].last_sect
* BLOCK_SIZE
>= XC_PAGE_SIZE
) {
298 xen_pv_printf(&blkdev
->xendev
, 0, "error: page crossing\n");
302 ioreq
->domids
[i
] = blkdev
->xendev
.dom
;
303 ioreq
->refs
[i
] = ioreq
->req
.seg
[i
].gref
;
305 mem
= ioreq
->req
.seg
[i
].first_sect
* blkdev
->file_blk
;
306 len
= (ioreq
->req
.seg
[i
].last_sect
- ioreq
->req
.seg
[i
].first_sect
+ 1) * blkdev
->file_blk
;
307 qemu_iovec_add(&ioreq
->v
, (void*)mem
, len
);
309 if (ioreq
->start
+ ioreq
->v
.size
> blkdev
->file_size
) {
310 xen_pv_printf(&blkdev
->xendev
, 0, "error: access beyond end of file\n");
316 ioreq
->status
= BLKIF_RSP_ERROR
;
320 static void ioreq_unmap(struct ioreq
*ioreq
)
322 xengnttab_handle
*gnt
= ioreq
->blkdev
->xendev
.gnttabdev
;
325 if (ioreq
->num_unmap
== 0 || ioreq
->mapped
== 0) {
332 if (xengnttab_unmap(gnt
, ioreq
->pages
, ioreq
->num_unmap
) != 0) {
333 xen_pv_printf(&ioreq
->blkdev
->xendev
, 0,
334 "xengnttab_unmap failed: %s\n",
337 ioreq
->blkdev
->cnt_map
-= ioreq
->num_unmap
;
340 for (i
= 0; i
< ioreq
->num_unmap
; i
++) {
341 if (!ioreq
->page
[i
]) {
344 if (xengnttab_unmap(gnt
, ioreq
->page
[i
], 1) != 0) {
345 xen_pv_printf(&ioreq
->blkdev
->xendev
, 0,
346 "xengnttab_unmap failed: %s\n",
349 ioreq
->blkdev
->cnt_map
--;
350 ioreq
->page
[i
] = NULL
;
356 static int ioreq_map(struct ioreq
*ioreq
)
358 xengnttab_handle
*gnt
= ioreq
->blkdev
->xendev
.gnttabdev
;
359 uint32_t domids
[BLKIF_MAX_SEGMENTS_PER_REQUEST
];
360 uint32_t refs
[BLKIF_MAX_SEGMENTS_PER_REQUEST
];
361 void *page
[BLKIF_MAX_SEGMENTS_PER_REQUEST
];
362 int i
, j
, new_maps
= 0;
363 PersistentGrant
*grant
;
364 PersistentRegion
*region
;
365 /* domids and refs variables will contain the information necessary
366 * to map the grants that are needed to fulfill this request.
368 * After mapping the needed grants, the page array will contain the
369 * memory address of each granted page in the order specified in ioreq
370 * (disregarding if it's a persistent grant or not).
373 if (ioreq
->v
.niov
== 0 || ioreq
->mapped
== 1) {
376 if (ioreq
->blkdev
->feature_persistent
) {
377 for (i
= 0; i
< ioreq
->v
.niov
; i
++) {
378 grant
= g_tree_lookup(ioreq
->blkdev
->persistent_gnts
,
379 GUINT_TO_POINTER(ioreq
->refs
[i
]));
382 page
[i
] = grant
->page
;
383 xen_pv_printf(&ioreq
->blkdev
->xendev
, 3,
384 "using persistent-grant %" PRIu32
"\n",
387 /* Add the grant to the list of grants that
390 domids
[new_maps
] = ioreq
->domids
[i
];
391 refs
[new_maps
] = ioreq
->refs
[i
];
396 /* Set the protection to RW, since grants may be reused later
397 * with a different protection than the one needed for this request
399 ioreq
->prot
= PROT_WRITE
| PROT_READ
;
401 /* All grants in the request should be mapped */
402 memcpy(refs
, ioreq
->refs
, sizeof(refs
));
403 memcpy(domids
, ioreq
->domids
, sizeof(domids
));
404 memset(page
, 0, sizeof(page
));
405 new_maps
= ioreq
->v
.niov
;
408 if (batch_maps
&& new_maps
) {
409 ioreq
->pages
= xengnttab_map_grant_refs
410 (gnt
, new_maps
, domids
, refs
, ioreq
->prot
);
411 if (ioreq
->pages
== NULL
) {
412 xen_pv_printf(&ioreq
->blkdev
->xendev
, 0,
413 "can't map %d grant refs (%s, %d maps)\n",
414 new_maps
, strerror(errno
), ioreq
->blkdev
->cnt_map
);
417 for (i
= 0, j
= 0; i
< ioreq
->v
.niov
; i
++) {
418 if (page
[i
] == NULL
) {
419 page
[i
] = ioreq
->pages
+ (j
++) * XC_PAGE_SIZE
;
422 ioreq
->blkdev
->cnt_map
+= new_maps
;
423 } else if (new_maps
) {
424 for (i
= 0; i
< new_maps
; i
++) {
425 ioreq
->page
[i
] = xengnttab_map_grant_ref
426 (gnt
, domids
[i
], refs
[i
], ioreq
->prot
);
427 if (ioreq
->page
[i
] == NULL
) {
428 xen_pv_printf(&ioreq
->blkdev
->xendev
, 0,
429 "can't map grant ref %d (%s, %d maps)\n",
430 refs
[i
], strerror(errno
), ioreq
->blkdev
->cnt_map
);
435 ioreq
->blkdev
->cnt_map
++;
437 for (i
= 0, j
= 0; i
< ioreq
->v
.niov
; i
++) {
438 if (page
[i
] == NULL
) {
439 page
[i
] = ioreq
->page
[j
++];
443 if (ioreq
->blkdev
->feature_persistent
&& new_maps
!= 0 &&
444 (!batch_maps
|| (ioreq
->blkdev
->persistent_gnt_count
+ new_maps
<=
445 ioreq
->blkdev
->max_grants
))) {
447 * If we are using persistent grants and batch mappings only
448 * add the new maps to the list of persistent grants if the whole
449 * area can be persistently mapped.
452 region
= g_malloc0(sizeof(*region
));
453 region
->addr
= ioreq
->pages
;
454 region
->num
= new_maps
;
455 ioreq
->blkdev
->persistent_regions
= g_slist_append(
456 ioreq
->blkdev
->persistent_regions
,
459 while ((ioreq
->blkdev
->persistent_gnt_count
< ioreq
->blkdev
->max_grants
)
461 /* Go through the list of newly mapped grants and add as many
462 * as possible to the list of persistently mapped grants.
464 * Since we start at the end of ioreq->page(s), we only need
465 * to decrease new_maps to prevent this granted pages from
466 * being unmapped in ioreq_unmap.
468 grant
= g_malloc0(sizeof(*grant
));
471 grant
->page
= ioreq
->pages
+ (new_maps
) * XC_PAGE_SIZE
;
473 grant
->page
= ioreq
->page
[new_maps
];
475 grant
->blkdev
= ioreq
->blkdev
;
476 xen_pv_printf(&ioreq
->blkdev
->xendev
, 3,
477 "adding grant %" PRIu32
" page: %p\n",
478 refs
[new_maps
], grant
->page
);
479 g_tree_insert(ioreq
->blkdev
->persistent_gnts
,
480 GUINT_TO_POINTER(refs
[new_maps
]),
482 ioreq
->blkdev
->persistent_gnt_count
++;
484 assert(!batch_maps
|| new_maps
== 0);
486 for (i
= 0; i
< ioreq
->v
.niov
; i
++) {
487 ioreq
->v
.iov
[i
].iov_base
+= (uintptr_t)page
[i
];
490 ioreq
->num_unmap
= new_maps
;
494 #if CONFIG_XEN_CTRL_INTERFACE_VERSION >= 40800
496 static void ioreq_free_copy_buffers(struct ioreq
*ioreq
)
500 for (i
= 0; i
< ioreq
->v
.niov
; i
++) {
501 ioreq
->page
[i
] = NULL
;
504 qemu_vfree(ioreq
->pages
);
507 static int ioreq_init_copy_buffers(struct ioreq
*ioreq
)
511 if (ioreq
->v
.niov
== 0) {
515 ioreq
->pages
= qemu_memalign(XC_PAGE_SIZE
, ioreq
->v
.niov
* XC_PAGE_SIZE
);
517 for (i
= 0; i
< ioreq
->v
.niov
; i
++) {
518 ioreq
->page
[i
] = ioreq
->pages
+ i
* XC_PAGE_SIZE
;
519 ioreq
->v
.iov
[i
].iov_base
= ioreq
->page
[i
];
525 static int ioreq_grant_copy(struct ioreq
*ioreq
)
527 xengnttab_handle
*gnt
= ioreq
->blkdev
->xendev
.gnttabdev
;
528 xengnttab_grant_copy_segment_t segs
[BLKIF_MAX_SEGMENTS_PER_REQUEST
];
530 int64_t file_blk
= ioreq
->blkdev
->file_blk
;
532 if (ioreq
->v
.niov
== 0) {
536 count
= ioreq
->v
.niov
;
538 for (i
= 0; i
< count
; i
++) {
539 if (ioreq
->req
.operation
== BLKIF_OP_READ
) {
540 segs
[i
].flags
= GNTCOPY_dest_gref
;
541 segs
[i
].dest
.foreign
.ref
= ioreq
->refs
[i
];
542 segs
[i
].dest
.foreign
.domid
= ioreq
->domids
[i
];
543 segs
[i
].dest
.foreign
.offset
= ioreq
->req
.seg
[i
].first_sect
* file_blk
;
544 segs
[i
].source
.virt
= ioreq
->v
.iov
[i
].iov_base
;
546 segs
[i
].flags
= GNTCOPY_source_gref
;
547 segs
[i
].source
.foreign
.ref
= ioreq
->refs
[i
];
548 segs
[i
].source
.foreign
.domid
= ioreq
->domids
[i
];
549 segs
[i
].source
.foreign
.offset
= ioreq
->req
.seg
[i
].first_sect
* file_blk
;
550 segs
[i
].dest
.virt
= ioreq
->v
.iov
[i
].iov_base
;
552 segs
[i
].len
= (ioreq
->req
.seg
[i
].last_sect
553 - ioreq
->req
.seg
[i
].first_sect
+ 1) * file_blk
;
556 rc
= xengnttab_grant_copy(gnt
, count
, segs
);
559 xen_pv_printf(&ioreq
->blkdev
->xendev
, 0,
560 "failed to copy data %d\n", rc
);
565 for (i
= 0; i
< count
; i
++) {
566 if (segs
[i
].status
!= GNTST_okay
) {
567 xen_pv_printf(&ioreq
->blkdev
->xendev
, 3,
568 "failed to copy data %d for gref %d, domid %d\n",
569 segs
[i
].status
, ioreq
->refs
[i
], ioreq
->domids
[i
]);
578 static void ioreq_free_copy_buffers(struct ioreq
*ioreq
)
583 static int ioreq_init_copy_buffers(struct ioreq
*ioreq
)
588 static int ioreq_grant_copy(struct ioreq
*ioreq
)
594 static int ioreq_runio_qemu_aio(struct ioreq
*ioreq
);
596 static void qemu_aio_complete(void *opaque
, int ret
)
598 struct ioreq
*ioreq
= opaque
;
601 xen_pv_printf(&ioreq
->blkdev
->xendev
, 0, "%s I/O error\n",
602 ioreq
->req
.operation
== BLKIF_OP_READ
? "read" : "write");
606 ioreq
->aio_inflight
--;
607 if (ioreq
->presync
) {
609 ioreq_runio_qemu_aio(ioreq
);
612 if (ioreq
->aio_inflight
> 0) {
616 if (xen_feature_grant_copy
) {
617 switch (ioreq
->req
.operation
) {
619 /* in case of failure ioreq->aio_errors is increased */
621 ioreq_grant_copy(ioreq
);
623 ioreq_free_copy_buffers(ioreq
);
626 case BLKIF_OP_FLUSH_DISKCACHE
:
627 if (!ioreq
->req
.nr_segments
) {
630 ioreq_free_copy_buffers(ioreq
);
637 ioreq
->status
= ioreq
->aio_errors
? BLKIF_RSP_ERROR
: BLKIF_RSP_OKAY
;
638 if (!xen_feature_grant_copy
) {
642 switch (ioreq
->req
.operation
) {
644 case BLKIF_OP_FLUSH_DISKCACHE
:
645 if (!ioreq
->req
.nr_segments
) {
649 if (ioreq
->status
== BLKIF_RSP_OKAY
) {
650 block_acct_done(blk_get_stats(ioreq
->blkdev
->blk
), &ioreq
->acct
);
652 block_acct_failed(blk_get_stats(ioreq
->blkdev
->blk
), &ioreq
->acct
);
655 case BLKIF_OP_DISCARD
:
659 qemu_bh_schedule(ioreq
->blkdev
->bh
);
662 static bool blk_split_discard(struct ioreq
*ioreq
, blkif_sector_t sector_number
,
665 struct XenBlkDev
*blkdev
= ioreq
->blkdev
;
668 uint64_t byte_remaining
, limit
;
669 uint64_t sec_start
= sector_number
;
670 uint64_t sec_count
= nr_sectors
;
672 /* Wrap around, or overflowing byte limit? */
673 if (sec_start
+ sec_count
< sec_count
||
674 sec_start
+ sec_count
> INT64_MAX
>> BDRV_SECTOR_BITS
) {
678 limit
= BDRV_REQUEST_MAX_SECTORS
<< BDRV_SECTOR_BITS
;
679 byte_offset
= sec_start
<< BDRV_SECTOR_BITS
;
680 byte_remaining
= sec_count
<< BDRV_SECTOR_BITS
;
683 byte_chunk
= byte_remaining
> limit
? limit
: byte_remaining
;
684 ioreq
->aio_inflight
++;
685 blk_aio_pdiscard(blkdev
->blk
, byte_offset
, byte_chunk
,
686 qemu_aio_complete
, ioreq
);
687 byte_remaining
-= byte_chunk
;
688 byte_offset
+= byte_chunk
;
689 } while (byte_remaining
> 0);
694 static int ioreq_runio_qemu_aio(struct ioreq
*ioreq
)
696 struct XenBlkDev
*blkdev
= ioreq
->blkdev
;
698 if (xen_feature_grant_copy
) {
699 ioreq_init_copy_buffers(ioreq
);
700 if (ioreq
->req
.nr_segments
&& (ioreq
->req
.operation
== BLKIF_OP_WRITE
||
701 ioreq
->req
.operation
== BLKIF_OP_FLUSH_DISKCACHE
) &&
702 ioreq_grant_copy(ioreq
)) {
703 ioreq_free_copy_buffers(ioreq
);
707 if (ioreq
->req
.nr_segments
&& ioreq_map(ioreq
)) {
712 ioreq
->aio_inflight
++;
713 if (ioreq
->presync
) {
714 blk_aio_flush(ioreq
->blkdev
->blk
, qemu_aio_complete
, ioreq
);
718 switch (ioreq
->req
.operation
) {
720 block_acct_start(blk_get_stats(blkdev
->blk
), &ioreq
->acct
,
721 ioreq
->v
.size
, BLOCK_ACCT_READ
);
722 ioreq
->aio_inflight
++;
723 blk_aio_preadv(blkdev
->blk
, ioreq
->start
, &ioreq
->v
, 0,
724 qemu_aio_complete
, ioreq
);
727 case BLKIF_OP_FLUSH_DISKCACHE
:
728 if (!ioreq
->req
.nr_segments
) {
732 block_acct_start(blk_get_stats(blkdev
->blk
), &ioreq
->acct
,
734 ioreq
->req
.operation
== BLKIF_OP_WRITE
?
735 BLOCK_ACCT_WRITE
: BLOCK_ACCT_FLUSH
);
736 ioreq
->aio_inflight
++;
737 blk_aio_pwritev(blkdev
->blk
, ioreq
->start
, &ioreq
->v
, 0,
738 qemu_aio_complete
, ioreq
);
740 case BLKIF_OP_DISCARD
:
742 struct blkif_request_discard
*req
= (void *)&ioreq
->req
;
743 if (!blk_split_discard(ioreq
, req
->sector_number
, req
->nr_sectors
)) {
749 /* unknown operation (shouldn't happen -- parse catches this) */
750 if (!xen_feature_grant_copy
) {
756 qemu_aio_complete(ioreq
, 0);
762 ioreq
->status
= BLKIF_RSP_ERROR
;
766 static int blk_send_response_one(struct ioreq
*ioreq
)
768 struct XenBlkDev
*blkdev
= ioreq
->blkdev
;
770 int have_requests
= 0;
771 blkif_response_t
*resp
;
773 /* Place on the response ring for the relevant domain. */
774 switch (blkdev
->protocol
) {
775 case BLKIF_PROTOCOL_NATIVE
:
776 resp
= (blkif_response_t
*) RING_GET_RESPONSE(&blkdev
->rings
.native
,
777 blkdev
->rings
.native
.rsp_prod_pvt
);
779 case BLKIF_PROTOCOL_X86_32
:
780 resp
= (blkif_response_t
*) RING_GET_RESPONSE(&blkdev
->rings
.x86_32_part
,
781 blkdev
->rings
.x86_32_part
.rsp_prod_pvt
);
783 case BLKIF_PROTOCOL_X86_64
:
784 resp
= (blkif_response_t
*) RING_GET_RESPONSE(&blkdev
->rings
.x86_64_part
,
785 blkdev
->rings
.x86_64_part
.rsp_prod_pvt
);
791 resp
->id
= ioreq
->req
.id
;
792 resp
->operation
= ioreq
->req
.operation
;
793 resp
->status
= ioreq
->status
;
795 blkdev
->rings
.common
.rsp_prod_pvt
++;
797 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blkdev
->rings
.common
, send_notify
);
798 if (blkdev
->rings
.common
.rsp_prod_pvt
== blkdev
->rings
.common
.req_cons
) {
800 * Tail check for pending requests. Allows frontend to avoid
801 * notifications if requests are already in flight (lower
802 * overheads and promotes batching).
804 RING_FINAL_CHECK_FOR_REQUESTS(&blkdev
->rings
.common
, have_requests
);
805 } else if (RING_HAS_UNCONSUMED_REQUESTS(&blkdev
->rings
.common
)) {
815 /* walk finished list, send outstanding responses, free requests */
816 static void blk_send_response_all(struct XenBlkDev
*blkdev
)
821 while (!QLIST_EMPTY(&blkdev
->finished
)) {
822 ioreq
= QLIST_FIRST(&blkdev
->finished
);
823 send_notify
+= blk_send_response_one(ioreq
);
824 ioreq_release(ioreq
, true);
827 xen_pv_send_notify(&blkdev
->xendev
);
831 static int blk_get_request(struct XenBlkDev
*blkdev
, struct ioreq
*ioreq
, RING_IDX rc
)
833 switch (blkdev
->protocol
) {
834 case BLKIF_PROTOCOL_NATIVE
:
835 memcpy(&ioreq
->req
, RING_GET_REQUEST(&blkdev
->rings
.native
, rc
),
838 case BLKIF_PROTOCOL_X86_32
:
839 blkif_get_x86_32_req(&ioreq
->req
,
840 RING_GET_REQUEST(&blkdev
->rings
.x86_32_part
, rc
));
842 case BLKIF_PROTOCOL_X86_64
:
843 blkif_get_x86_64_req(&ioreq
->req
,
844 RING_GET_REQUEST(&blkdev
->rings
.x86_64_part
, rc
));
847 /* Prevent the compiler from accessing the on-ring fields instead. */
852 static void blk_handle_requests(struct XenBlkDev
*blkdev
)
857 blkdev
->more_work
= 0;
859 rc
= blkdev
->rings
.common
.req_cons
;
860 rp
= blkdev
->rings
.common
.sring
->req_prod
;
861 xen_rmb(); /* Ensure we see queued requests up to 'rp'. */
863 blk_send_response_all(blkdev
);
865 /* pull request from ring */
866 if (RING_REQUEST_CONS_OVERFLOW(&blkdev
->rings
.common
, rc
)) {
869 ioreq
= ioreq_start(blkdev
);
874 blk_get_request(blkdev
, ioreq
, rc
);
875 blkdev
->rings
.common
.req_cons
= ++rc
;
878 if (ioreq_parse(ioreq
) != 0) {
880 switch (ioreq
->req
.operation
) {
882 block_acct_invalid(blk_get_stats(blkdev
->blk
),
886 block_acct_invalid(blk_get_stats(blkdev
->blk
),
889 case BLKIF_OP_FLUSH_DISKCACHE
:
890 block_acct_invalid(blk_get_stats(blkdev
->blk
),
896 if (blk_send_response_one(ioreq
)) {
897 xen_pv_send_notify(&blkdev
->xendev
);
899 ioreq_release(ioreq
, false);
903 ioreq_runio_qemu_aio(ioreq
);
906 if (blkdev
->more_work
&& blkdev
->requests_inflight
< blkdev
->max_requests
) {
907 qemu_bh_schedule(blkdev
->bh
);
911 /* ------------------------------------------------------------- */
913 static void blk_bh(void *opaque
)
915 struct XenBlkDev
*blkdev
= opaque
;
916 blk_handle_requests(blkdev
);
919 static void blk_alloc(struct XenDevice
*xendev
)
921 struct XenBlkDev
*blkdev
= container_of(xendev
, struct XenBlkDev
, xendev
);
923 QLIST_INIT(&blkdev
->inflight
);
924 QLIST_INIT(&blkdev
->finished
);
925 QLIST_INIT(&blkdev
->freelist
);
926 blkdev
->bh
= qemu_bh_new(blk_bh
, blkdev
);
927 if (xen_mode
!= XEN_EMULATE
) {
932 static void blk_parse_discard(struct XenBlkDev
*blkdev
)
936 blkdev
->feature_discard
= true;
938 if (xenstore_read_be_int(&blkdev
->xendev
, "discard-enable", &enable
) == 0) {
939 blkdev
->feature_discard
= !!enable
;
942 if (blkdev
->feature_discard
) {
943 xenstore_write_be_int(&blkdev
->xendev
, "feature-discard", 1);
947 static int blk_init(struct XenDevice
*xendev
)
949 struct XenBlkDev
*blkdev
= container_of(xendev
, struct XenBlkDev
, xendev
);
951 char *directiosafe
= NULL
;
953 /* read xenstore entries */
954 if (blkdev
->params
== NULL
) {
956 blkdev
->params
= xenstore_read_be_str(&blkdev
->xendev
, "params");
957 if (blkdev
->params
!= NULL
) {
958 h
= strchr(blkdev
->params
, ':');
961 blkdev
->fileproto
= blkdev
->params
;
962 blkdev
->filename
= h
+1;
965 blkdev
->fileproto
= "<unset>";
966 blkdev
->filename
= blkdev
->params
;
969 if (!strcmp("aio", blkdev
->fileproto
)) {
970 blkdev
->fileproto
= "raw";
972 if (!strcmp("vhd", blkdev
->fileproto
)) {
973 blkdev
->fileproto
= "vpc";
975 if (blkdev
->mode
== NULL
) {
976 blkdev
->mode
= xenstore_read_be_str(&blkdev
->xendev
, "mode");
978 if (blkdev
->type
== NULL
) {
979 blkdev
->type
= xenstore_read_be_str(&blkdev
->xendev
, "type");
981 if (blkdev
->dev
== NULL
) {
982 blkdev
->dev
= xenstore_read_be_str(&blkdev
->xendev
, "dev");
984 if (blkdev
->devtype
== NULL
) {
985 blkdev
->devtype
= xenstore_read_be_str(&blkdev
->xendev
, "device-type");
987 directiosafe
= xenstore_read_be_str(&blkdev
->xendev
, "direct-io-safe");
988 blkdev
->directiosafe
= (directiosafe
&& atoi(directiosafe
));
990 /* do we have all we need? */
991 if (blkdev
->params
== NULL
||
992 blkdev
->mode
== NULL
||
993 blkdev
->type
== NULL
||
994 blkdev
->dev
== NULL
) {
999 if (strcmp(blkdev
->mode
, "w")) {
1000 info
|= VDISK_READONLY
;
1004 if (blkdev
->devtype
&& !strcmp(blkdev
->devtype
, "cdrom")) {
1005 info
|= VDISK_CDROM
;
1008 blkdev
->file_blk
= BLOCK_SIZE
;
1010 xen_pv_printf(&blkdev
->xendev
, 3, "grant copy operation %s\n",
1011 xen_feature_grant_copy
? "enabled" : "disabled");
1014 * blk_connect supplies sector-size and sectors
1016 xenstore_write_be_int(&blkdev
->xendev
, "feature-flush-cache", 1);
1017 xenstore_write_be_int(&blkdev
->xendev
, "feature-persistent",
1018 !xen_feature_grant_copy
);
1019 xenstore_write_be_int(&blkdev
->xendev
, "info", info
);
1021 xenstore_write_be_int(&blkdev
->xendev
, "max-ring-page-order",
1022 MAX_RING_PAGE_ORDER
);
1024 blk_parse_discard(blkdev
);
1026 g_free(directiosafe
);
1030 g_free(blkdev
->params
);
1031 blkdev
->params
= NULL
;
1032 g_free(blkdev
->mode
);
1033 blkdev
->mode
= NULL
;
1034 g_free(blkdev
->type
);
1035 blkdev
->type
= NULL
;
1036 g_free(blkdev
->dev
);
1038 g_free(blkdev
->devtype
);
1039 blkdev
->devtype
= NULL
;
1040 g_free(directiosafe
);
1041 blkdev
->directiosafe
= false;
1046 * We need to account for the grant allocations requiring contiguous
1047 * chunks; the worst case number would be
1048 * max_req * max_seg + (max_req - 1) * (max_seg - 1) + 1,
1049 * but in order to keep things simple just use
1050 * 2 * max_req * max_seg.
1052 #define MAX_GRANTS(max_req, max_seg) (2 * (max_req) * (max_seg))
1054 static int blk_connect(struct XenDevice
*xendev
)
1056 struct XenBlkDev
*blkdev
= container_of(xendev
, struct XenBlkDev
, xendev
);
1057 int pers
, index
, qflags
;
1058 bool readonly
= true;
1059 bool writethrough
= true;
1060 int order
, ring_ref
;
1061 unsigned int ring_size
, max_grants
;
1066 if (blkdev
->directiosafe
) {
1067 qflags
= BDRV_O_NOCACHE
| BDRV_O_NATIVE_AIO
;
1070 writethrough
= false;
1072 if (strcmp(blkdev
->mode
, "w") == 0) {
1073 qflags
|= BDRV_O_RDWR
;
1076 if (blkdev
->feature_discard
) {
1077 qflags
|= BDRV_O_UNMAP
;
1080 /* init qemu block driver */
1081 index
= (blkdev
->xendev
.dev
- 202 * 256) / 16;
1082 blkdev
->dinfo
= drive_get(IF_XEN
, 0, index
);
1083 if (!blkdev
->dinfo
) {
1084 Error
*local_err
= NULL
;
1085 QDict
*options
= NULL
;
1087 if (strcmp(blkdev
->fileproto
, "<unset>")) {
1088 options
= qdict_new();
1089 qdict_put_str(options
, "driver", blkdev
->fileproto
);
1092 /* setup via xenbus -> create new block driver instance */
1093 xen_pv_printf(&blkdev
->xendev
, 2, "create new bdrv (xenbus setup)\n");
1094 blkdev
->blk
= blk_new_open(blkdev
->filename
, NULL
, options
,
1095 qflags
, &local_err
);
1097 xen_pv_printf(&blkdev
->xendev
, 0, "error: %s\n",
1098 error_get_pretty(local_err
));
1099 error_free(local_err
);
1102 blk_set_enable_write_cache(blkdev
->blk
, !writethrough
);
1104 /* setup via qemu cmdline -> already setup for us */
1105 xen_pv_printf(&blkdev
->xendev
, 2,
1106 "get configured bdrv (cmdline setup)\n");
1107 blkdev
->blk
= blk_by_legacy_dinfo(blkdev
->dinfo
);
1108 if (blk_is_read_only(blkdev
->blk
) && !readonly
) {
1109 xen_pv_printf(&blkdev
->xendev
, 0, "Unexpected read-only drive");
1113 /* blkdev->blk is not create by us, we get a reference
1114 * so we can blk_unref() unconditionally */
1115 blk_ref(blkdev
->blk
);
1117 blk_attach_dev_legacy(blkdev
->blk
, blkdev
);
1118 blkdev
->file_size
= blk_getlength(blkdev
->blk
);
1119 if (blkdev
->file_size
< 0) {
1120 BlockDriverState
*bs
= blk_bs(blkdev
->blk
);
1121 const char *drv_name
= bs
? bdrv_get_format_name(bs
) : NULL
;
1122 xen_pv_printf(&blkdev
->xendev
, 1, "blk_getlength: %d (%s) | drv %s\n",
1123 (int)blkdev
->file_size
, strerror(-blkdev
->file_size
),
1125 blkdev
->file_size
= 0;
1128 xen_pv_printf(xendev
, 1, "type \"%s\", fileproto \"%s\", filename \"%s\","
1129 " size %" PRId64
" (%" PRId64
" MB)\n",
1130 blkdev
->type
, blkdev
->fileproto
, blkdev
->filename
,
1131 blkdev
->file_size
, blkdev
->file_size
>> 20);
1133 /* Fill in number of sector size and number of sectors */
1134 xenstore_write_be_int(&blkdev
->xendev
, "sector-size", blkdev
->file_blk
);
1135 xenstore_write_be_int64(&blkdev
->xendev
, "sectors",
1136 blkdev
->file_size
/ blkdev
->file_blk
);
1138 if (xenstore_read_fe_int(&blkdev
->xendev
, "ring-page-order",
1140 blkdev
->nr_ring_ref
= 1;
1142 if (xenstore_read_fe_int(&blkdev
->xendev
, "ring-ref",
1146 blkdev
->ring_ref
[0] = ring_ref
;
1148 } else if (order
>= 0 && order
<= MAX_RING_PAGE_ORDER
) {
1149 blkdev
->nr_ring_ref
= 1 << order
;
1151 for (i
= 0; i
< blkdev
->nr_ring_ref
; i
++) {
1154 key
= g_strdup_printf("ring-ref%u", i
);
1159 if (xenstore_read_fe_int(&blkdev
->xendev
, key
,
1164 blkdev
->ring_ref
[i
] = ring_ref
;
1169 xen_pv_printf(xendev
, 0, "invalid ring-page-order: %d\n",
1174 if (xenstore_read_fe_int(&blkdev
->xendev
, "event-channel",
1175 &blkdev
->xendev
.remote_port
) == -1) {
1178 if (xenstore_read_fe_int(&blkdev
->xendev
, "feature-persistent", &pers
)) {
1179 blkdev
->feature_persistent
= FALSE
;
1181 blkdev
->feature_persistent
= !!pers
;
1184 if (!blkdev
->xendev
.protocol
) {
1185 blkdev
->protocol
= BLKIF_PROTOCOL_NATIVE
;
1186 } else if (strcmp(blkdev
->xendev
.protocol
, XEN_IO_PROTO_ABI_NATIVE
) == 0) {
1187 blkdev
->protocol
= BLKIF_PROTOCOL_NATIVE
;
1188 } else if (strcmp(blkdev
->xendev
.protocol
, XEN_IO_PROTO_ABI_X86_32
) == 0) {
1189 blkdev
->protocol
= BLKIF_PROTOCOL_X86_32
;
1190 } else if (strcmp(blkdev
->xendev
.protocol
, XEN_IO_PROTO_ABI_X86_64
) == 0) {
1191 blkdev
->protocol
= BLKIF_PROTOCOL_X86_64
;
1193 blkdev
->protocol
= BLKIF_PROTOCOL_NATIVE
;
1196 ring_size
= XC_PAGE_SIZE
* blkdev
->nr_ring_ref
;
1197 switch (blkdev
->protocol
) {
1198 case BLKIF_PROTOCOL_NATIVE
:
1200 blkdev
->max_requests
= __CONST_RING_SIZE(blkif
, ring_size
);
1203 case BLKIF_PROTOCOL_X86_32
:
1205 blkdev
->max_requests
= __CONST_RING_SIZE(blkif_x86_32
, ring_size
);
1208 case BLKIF_PROTOCOL_X86_64
:
1210 blkdev
->max_requests
= __CONST_RING_SIZE(blkif_x86_64
, ring_size
);
1217 /* Calculate the maximum number of grants needed by ioreqs */
1218 max_grants
= MAX_GRANTS(blkdev
->max_requests
,
1219 BLKIF_MAX_SEGMENTS_PER_REQUEST
);
1220 /* Add on the number needed for the ring pages */
1221 max_grants
+= blkdev
->nr_ring_ref
;
1223 blkdev
->xendev
.gnttabdev
= xengnttab_open(NULL
, 0);
1224 if (blkdev
->xendev
.gnttabdev
== NULL
) {
1225 xen_pv_printf(xendev
, 0, "xengnttab_open failed: %s\n",
1229 if (xengnttab_set_max_grants(blkdev
->xendev
.gnttabdev
, max_grants
)) {
1230 xen_pv_printf(xendev
, 0, "xengnttab_set_max_grants failed: %s\n",
1235 domids
= g_new0(uint32_t, blkdev
->nr_ring_ref
);
1236 for (i
= 0; i
< blkdev
->nr_ring_ref
; i
++) {
1237 domids
[i
] = blkdev
->xendev
.dom
;
1240 blkdev
->sring
= xengnttab_map_grant_refs(blkdev
->xendev
.gnttabdev
,
1241 blkdev
->nr_ring_ref
,
1244 PROT_READ
| PROT_WRITE
);
1248 if (!blkdev
->sring
) {
1254 switch (blkdev
->protocol
) {
1255 case BLKIF_PROTOCOL_NATIVE
:
1257 blkif_sring_t
*sring_native
= blkdev
->sring
;
1258 BACK_RING_INIT(&blkdev
->rings
.native
, sring_native
, ring_size
);
1261 case BLKIF_PROTOCOL_X86_32
:
1263 blkif_x86_32_sring_t
*sring_x86_32
= blkdev
->sring
;
1265 BACK_RING_INIT(&blkdev
->rings
.x86_32_part
, sring_x86_32
, ring_size
);
1268 case BLKIF_PROTOCOL_X86_64
:
1270 blkif_x86_64_sring_t
*sring_x86_64
= blkdev
->sring
;
1272 BACK_RING_INIT(&blkdev
->rings
.x86_64_part
, sring_x86_64
, ring_size
);
1277 if (blkdev
->feature_persistent
) {
1278 /* Init persistent grants */
1279 blkdev
->max_grants
= blkdev
->max_requests
*
1280 BLKIF_MAX_SEGMENTS_PER_REQUEST
;
1281 blkdev
->persistent_gnts
= g_tree_new_full((GCompareDataFunc
)int_cmp
,
1284 (GDestroyNotify
)g_free
:
1285 (GDestroyNotify
)destroy_grant
);
1286 blkdev
->persistent_regions
= NULL
;
1287 blkdev
->persistent_gnt_count
= 0;
1290 xen_be_bind_evtchn(&blkdev
->xendev
);
1292 xen_pv_printf(&blkdev
->xendev
, 1, "ok: proto %s, nr-ring-ref %u, "
1293 "remote port %d, local port %d\n",
1294 blkdev
->xendev
.protocol
, blkdev
->nr_ring_ref
,
1295 blkdev
->xendev
.remote_port
, blkdev
->xendev
.local_port
);
1299 static void blk_disconnect(struct XenDevice
*xendev
)
1301 struct XenBlkDev
*blkdev
= container_of(xendev
, struct XenBlkDev
, xendev
);
1304 blk_detach_dev(blkdev
->blk
, blkdev
);
1305 blk_unref(blkdev
->blk
);
1308 xen_pv_unbind_evtchn(&blkdev
->xendev
);
1310 if (blkdev
->sring
) {
1311 xengnttab_unmap(blkdev
->xendev
.gnttabdev
, blkdev
->sring
,
1312 blkdev
->nr_ring_ref
);
1314 blkdev
->sring
= NULL
;
1318 * Unmap persistent grants before switching to the closed state
1319 * so the frontend can free them.
1321 * In the !batch_maps case g_tree_destroy will take care of unmapping
1322 * the grant, but in the batch_maps case we need to iterate over every
1323 * region in persistent_regions and unmap it.
1325 if (blkdev
->feature_persistent
) {
1326 g_tree_destroy(blkdev
->persistent_gnts
);
1327 assert(batch_maps
|| blkdev
->persistent_gnt_count
== 0);
1329 blkdev
->persistent_gnt_count
= 0;
1330 g_slist_foreach(blkdev
->persistent_regions
,
1331 (GFunc
)remove_persistent_region
, blkdev
);
1332 g_slist_free(blkdev
->persistent_regions
);
1334 blkdev
->feature_persistent
= false;
1337 if (blkdev
->xendev
.gnttabdev
) {
1338 xengnttab_close(blkdev
->xendev
.gnttabdev
);
1339 blkdev
->xendev
.gnttabdev
= NULL
;
1343 static int blk_free(struct XenDevice
*xendev
)
1345 struct XenBlkDev
*blkdev
= container_of(xendev
, struct XenBlkDev
, xendev
);
1346 struct ioreq
*ioreq
;
1348 blk_disconnect(xendev
);
1350 while (!QLIST_EMPTY(&blkdev
->freelist
)) {
1351 ioreq
= QLIST_FIRST(&blkdev
->freelist
);
1352 QLIST_REMOVE(ioreq
, list
);
1353 qemu_iovec_destroy(&ioreq
->v
);
1357 g_free(blkdev
->params
);
1358 g_free(blkdev
->mode
);
1359 g_free(blkdev
->type
);
1360 g_free(blkdev
->dev
);
1361 g_free(blkdev
->devtype
);
1362 qemu_bh_delete(blkdev
->bh
);
1366 static void blk_event(struct XenDevice
*xendev
)
1368 struct XenBlkDev
*blkdev
= container_of(xendev
, struct XenBlkDev
, xendev
);
1370 qemu_bh_schedule(blkdev
->bh
);
1373 struct XenDevOps xen_blkdev_ops
= {
1374 .size
= sizeof(struct XenBlkDev
),
1377 .initialise
= blk_connect
,
1378 .disconnect
= blk_disconnect
,