xen_disk: fix memory leak
[qemu/ar7.git] / hw / xen_disk.c
blobb7c7977870069546a249499d36356e397c7f7840
1 /*
2 * xen paravirt block device backend
4 * (c) Gerd Hoffmann <kraxel@redhat.com>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; under version 2 of the License.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, see <http://www.gnu.org/licenses/>.
18 * Contributions after 2012-01-13 are licensed under the terms of the
19 * GNU GPL, version 2 or (at your option) any later version.
22 #include <stdio.h>
23 #include <stdlib.h>
24 #include <stdarg.h>
25 #include <string.h>
26 #include <unistd.h>
27 #include <signal.h>
28 #include <inttypes.h>
29 #include <time.h>
30 #include <fcntl.h>
31 #include <errno.h>
32 #include <sys/ioctl.h>
33 #include <sys/types.h>
34 #include <sys/stat.h>
35 #include <sys/mman.h>
36 #include <sys/uio.h>
38 #include "hw.h"
39 #include "xen_backend.h"
40 #include "xen_blkif.h"
41 #include "sysemu/blockdev.h"
43 /* ------------------------------------------------------------- */
45 static int batch_maps = 0;
47 static int max_requests = 32;
49 /* ------------------------------------------------------------- */
51 #define BLOCK_SIZE 512
52 #define IOCB_COUNT (BLKIF_MAX_SEGMENTS_PER_REQUEST + 2)
54 struct ioreq {
55 blkif_request_t req;
56 int16_t status;
58 /* parsed request */
59 off_t start;
60 QEMUIOVector v;
61 int presync;
62 int postsync;
63 uint8_t mapped;
65 /* grant mapping */
66 uint32_t domids[BLKIF_MAX_SEGMENTS_PER_REQUEST];
67 uint32_t refs[BLKIF_MAX_SEGMENTS_PER_REQUEST];
68 int prot;
69 void *page[BLKIF_MAX_SEGMENTS_PER_REQUEST];
70 void *pages;
72 /* aio status */
73 int aio_inflight;
74 int aio_errors;
76 struct XenBlkDev *blkdev;
77 QLIST_ENTRY(ioreq) list;
78 BlockAcctCookie acct;
81 struct XenBlkDev {
82 struct XenDevice xendev; /* must be first */
83 char *params;
84 char *mode;
85 char *type;
86 char *dev;
87 char *devtype;
88 const char *fileproto;
89 const char *filename;
90 int ring_ref;
91 void *sring;
92 int64_t file_blk;
93 int64_t file_size;
94 int protocol;
95 blkif_back_rings_t rings;
96 int more_work;
97 int cnt_map;
99 /* request lists */
100 QLIST_HEAD(inflight_head, ioreq) inflight;
101 QLIST_HEAD(finished_head, ioreq) finished;
102 QLIST_HEAD(freelist_head, ioreq) freelist;
103 int requests_total;
104 int requests_inflight;
105 int requests_finished;
107 /* qemu block driver */
108 DriveInfo *dinfo;
109 BlockDriverState *bs;
110 QEMUBH *bh;
113 /* ------------------------------------------------------------- */
115 static void ioreq_reset(struct ioreq *ioreq)
117 memset(&ioreq->req, 0, sizeof(ioreq->req));
118 ioreq->status = 0;
119 ioreq->start = 0;
120 ioreq->presync = 0;
121 ioreq->postsync = 0;
122 ioreq->mapped = 0;
124 memset(ioreq->domids, 0, sizeof(ioreq->domids));
125 memset(ioreq->refs, 0, sizeof(ioreq->refs));
126 ioreq->prot = 0;
127 memset(ioreq->page, 0, sizeof(ioreq->page));
128 ioreq->pages = NULL;
130 ioreq->aio_inflight = 0;
131 ioreq->aio_errors = 0;
133 ioreq->blkdev = NULL;
134 memset(&ioreq->list, 0, sizeof(ioreq->list));
135 memset(&ioreq->acct, 0, sizeof(ioreq->acct));
137 qemu_iovec_reset(&ioreq->v);
140 static struct ioreq *ioreq_start(struct XenBlkDev *blkdev)
142 struct ioreq *ioreq = NULL;
144 if (QLIST_EMPTY(&blkdev->freelist)) {
145 if (blkdev->requests_total >= max_requests) {
146 goto out;
148 /* allocate new struct */
149 ioreq = g_malloc0(sizeof(*ioreq));
150 ioreq->blkdev = blkdev;
151 blkdev->requests_total++;
152 qemu_iovec_init(&ioreq->v, BLKIF_MAX_SEGMENTS_PER_REQUEST);
153 } else {
154 /* get one from freelist */
155 ioreq = QLIST_FIRST(&blkdev->freelist);
156 QLIST_REMOVE(ioreq, list);
158 QLIST_INSERT_HEAD(&blkdev->inflight, ioreq, list);
159 blkdev->requests_inflight++;
161 out:
162 return ioreq;
165 static void ioreq_finish(struct ioreq *ioreq)
167 struct XenBlkDev *blkdev = ioreq->blkdev;
169 QLIST_REMOVE(ioreq, list);
170 QLIST_INSERT_HEAD(&blkdev->finished, ioreq, list);
171 blkdev->requests_inflight--;
172 blkdev->requests_finished++;
175 static void ioreq_release(struct ioreq *ioreq, bool finish)
177 struct XenBlkDev *blkdev = ioreq->blkdev;
179 QLIST_REMOVE(ioreq, list);
180 ioreq_reset(ioreq);
181 ioreq->blkdev = blkdev;
182 QLIST_INSERT_HEAD(&blkdev->freelist, ioreq, list);
183 if (finish) {
184 blkdev->requests_finished--;
185 } else {
186 blkdev->requests_inflight--;
191 * translate request into iovec + start offset
192 * do sanity checks along the way
194 static int ioreq_parse(struct ioreq *ioreq)
196 struct XenBlkDev *blkdev = ioreq->blkdev;
197 uintptr_t mem;
198 size_t len;
199 int i;
201 xen_be_printf(&blkdev->xendev, 3,
202 "op %d, nr %d, handle %d, id %" PRId64 ", sector %" PRId64 "\n",
203 ioreq->req.operation, ioreq->req.nr_segments,
204 ioreq->req.handle, ioreq->req.id, ioreq->req.sector_number);
205 switch (ioreq->req.operation) {
206 case BLKIF_OP_READ:
207 ioreq->prot = PROT_WRITE; /* to memory */
208 break;
209 case BLKIF_OP_WRITE_BARRIER:
210 if (!ioreq->req.nr_segments) {
211 ioreq->presync = 1;
212 return 0;
214 ioreq->presync = ioreq->postsync = 1;
215 /* fall through */
216 case BLKIF_OP_WRITE:
217 ioreq->prot = PROT_READ; /* from memory */
218 break;
219 default:
220 xen_be_printf(&blkdev->xendev, 0, "error: unknown operation (%d)\n",
221 ioreq->req.operation);
222 goto err;
225 if (ioreq->req.operation != BLKIF_OP_READ && blkdev->mode[0] != 'w') {
226 xen_be_printf(&blkdev->xendev, 0, "error: write req for ro device\n");
227 goto err;
230 ioreq->start = ioreq->req.sector_number * blkdev->file_blk;
231 for (i = 0; i < ioreq->req.nr_segments; i++) {
232 if (i == BLKIF_MAX_SEGMENTS_PER_REQUEST) {
233 xen_be_printf(&blkdev->xendev, 0, "error: nr_segments too big\n");
234 goto err;
236 if (ioreq->req.seg[i].first_sect > ioreq->req.seg[i].last_sect) {
237 xen_be_printf(&blkdev->xendev, 0, "error: first > last sector\n");
238 goto err;
240 if (ioreq->req.seg[i].last_sect * BLOCK_SIZE >= XC_PAGE_SIZE) {
241 xen_be_printf(&blkdev->xendev, 0, "error: page crossing\n");
242 goto err;
245 ioreq->domids[i] = blkdev->xendev.dom;
246 ioreq->refs[i] = ioreq->req.seg[i].gref;
248 mem = ioreq->req.seg[i].first_sect * blkdev->file_blk;
249 len = (ioreq->req.seg[i].last_sect - ioreq->req.seg[i].first_sect + 1) * blkdev->file_blk;
250 qemu_iovec_add(&ioreq->v, (void*)mem, len);
252 if (ioreq->start + ioreq->v.size > blkdev->file_size) {
253 xen_be_printf(&blkdev->xendev, 0, "error: access beyond end of file\n");
254 goto err;
256 return 0;
258 err:
259 ioreq->status = BLKIF_RSP_ERROR;
260 return -1;
263 static void ioreq_unmap(struct ioreq *ioreq)
265 XenGnttab gnt = ioreq->blkdev->xendev.gnttabdev;
266 int i;
268 if (ioreq->v.niov == 0 || ioreq->mapped == 0) {
269 return;
271 if (batch_maps) {
272 if (!ioreq->pages) {
273 return;
275 if (xc_gnttab_munmap(gnt, ioreq->pages, ioreq->v.niov) != 0) {
276 xen_be_printf(&ioreq->blkdev->xendev, 0, "xc_gnttab_munmap failed: %s\n",
277 strerror(errno));
279 ioreq->blkdev->cnt_map -= ioreq->v.niov;
280 ioreq->pages = NULL;
281 } else {
282 for (i = 0; i < ioreq->v.niov; i++) {
283 if (!ioreq->page[i]) {
284 continue;
286 if (xc_gnttab_munmap(gnt, ioreq->page[i], 1) != 0) {
287 xen_be_printf(&ioreq->blkdev->xendev, 0, "xc_gnttab_munmap failed: %s\n",
288 strerror(errno));
290 ioreq->blkdev->cnt_map--;
291 ioreq->page[i] = NULL;
294 ioreq->mapped = 0;
297 static int ioreq_map(struct ioreq *ioreq)
299 XenGnttab gnt = ioreq->blkdev->xendev.gnttabdev;
300 int i;
302 if (ioreq->v.niov == 0 || ioreq->mapped == 1) {
303 return 0;
305 if (batch_maps) {
306 ioreq->pages = xc_gnttab_map_grant_refs
307 (gnt, ioreq->v.niov, ioreq->domids, ioreq->refs, ioreq->prot);
308 if (ioreq->pages == NULL) {
309 xen_be_printf(&ioreq->blkdev->xendev, 0,
310 "can't map %d grant refs (%s, %d maps)\n",
311 ioreq->v.niov, strerror(errno), ioreq->blkdev->cnt_map);
312 return -1;
314 for (i = 0; i < ioreq->v.niov; i++) {
315 ioreq->v.iov[i].iov_base = ioreq->pages + i * XC_PAGE_SIZE +
316 (uintptr_t)ioreq->v.iov[i].iov_base;
318 ioreq->blkdev->cnt_map += ioreq->v.niov;
319 } else {
320 for (i = 0; i < ioreq->v.niov; i++) {
321 ioreq->page[i] = xc_gnttab_map_grant_ref
322 (gnt, ioreq->domids[i], ioreq->refs[i], ioreq->prot);
323 if (ioreq->page[i] == NULL) {
324 xen_be_printf(&ioreq->blkdev->xendev, 0,
325 "can't map grant ref %d (%s, %d maps)\n",
326 ioreq->refs[i], strerror(errno), ioreq->blkdev->cnt_map);
327 ioreq_unmap(ioreq);
328 return -1;
330 ioreq->v.iov[i].iov_base = ioreq->page[i] + (uintptr_t)ioreq->v.iov[i].iov_base;
331 ioreq->blkdev->cnt_map++;
334 ioreq->mapped = 1;
335 return 0;
338 static int ioreq_runio_qemu_aio(struct ioreq *ioreq);
340 static void qemu_aio_complete(void *opaque, int ret)
342 struct ioreq *ioreq = opaque;
344 if (ret != 0) {
345 xen_be_printf(&ioreq->blkdev->xendev, 0, "%s I/O error\n",
346 ioreq->req.operation == BLKIF_OP_READ ? "read" : "write");
347 ioreq->aio_errors++;
350 ioreq->aio_inflight--;
351 if (ioreq->presync) {
352 ioreq->presync = 0;
353 ioreq_runio_qemu_aio(ioreq);
354 return;
356 if (ioreq->aio_inflight > 0) {
357 return;
359 if (ioreq->postsync) {
360 ioreq->postsync = 0;
361 ioreq->aio_inflight++;
362 bdrv_aio_flush(ioreq->blkdev->bs, qemu_aio_complete, ioreq);
363 return;
366 ioreq->status = ioreq->aio_errors ? BLKIF_RSP_ERROR : BLKIF_RSP_OKAY;
367 ioreq_unmap(ioreq);
368 ioreq_finish(ioreq);
369 bdrv_acct_done(ioreq->blkdev->bs, &ioreq->acct);
370 qemu_bh_schedule(ioreq->blkdev->bh);
373 static int ioreq_runio_qemu_aio(struct ioreq *ioreq)
375 struct XenBlkDev *blkdev = ioreq->blkdev;
377 if (ioreq->req.nr_segments && ioreq_map(ioreq) == -1) {
378 goto err_no_map;
381 ioreq->aio_inflight++;
382 if (ioreq->presync) {
383 bdrv_aio_flush(ioreq->blkdev->bs, qemu_aio_complete, ioreq);
384 return 0;
387 switch (ioreq->req.operation) {
388 case BLKIF_OP_READ:
389 bdrv_acct_start(blkdev->bs, &ioreq->acct, ioreq->v.size, BDRV_ACCT_READ);
390 ioreq->aio_inflight++;
391 bdrv_aio_readv(blkdev->bs, ioreq->start / BLOCK_SIZE,
392 &ioreq->v, ioreq->v.size / BLOCK_SIZE,
393 qemu_aio_complete, ioreq);
394 break;
395 case BLKIF_OP_WRITE:
396 case BLKIF_OP_WRITE_BARRIER:
397 if (!ioreq->req.nr_segments) {
398 break;
401 bdrv_acct_start(blkdev->bs, &ioreq->acct, ioreq->v.size, BDRV_ACCT_WRITE);
402 ioreq->aio_inflight++;
403 bdrv_aio_writev(blkdev->bs, ioreq->start / BLOCK_SIZE,
404 &ioreq->v, ioreq->v.size / BLOCK_SIZE,
405 qemu_aio_complete, ioreq);
406 break;
407 default:
408 /* unknown operation (shouldn't happen -- parse catches this) */
409 goto err;
412 qemu_aio_complete(ioreq, 0);
414 return 0;
416 err:
417 ioreq_unmap(ioreq);
418 err_no_map:
419 ioreq_finish(ioreq);
420 ioreq->status = BLKIF_RSP_ERROR;
421 return -1;
424 static int blk_send_response_one(struct ioreq *ioreq)
426 struct XenBlkDev *blkdev = ioreq->blkdev;
427 int send_notify = 0;
428 int have_requests = 0;
429 blkif_response_t resp;
430 void *dst;
432 resp.id = ioreq->req.id;
433 resp.operation = ioreq->req.operation;
434 resp.status = ioreq->status;
436 /* Place on the response ring for the relevant domain. */
437 switch (blkdev->protocol) {
438 case BLKIF_PROTOCOL_NATIVE:
439 dst = RING_GET_RESPONSE(&blkdev->rings.native, blkdev->rings.native.rsp_prod_pvt);
440 break;
441 case BLKIF_PROTOCOL_X86_32:
442 dst = RING_GET_RESPONSE(&blkdev->rings.x86_32_part,
443 blkdev->rings.x86_32_part.rsp_prod_pvt);
444 break;
445 case BLKIF_PROTOCOL_X86_64:
446 dst = RING_GET_RESPONSE(&blkdev->rings.x86_64_part,
447 blkdev->rings.x86_64_part.rsp_prod_pvt);
448 break;
449 default:
450 dst = NULL;
452 memcpy(dst, &resp, sizeof(resp));
453 blkdev->rings.common.rsp_prod_pvt++;
455 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blkdev->rings.common, send_notify);
456 if (blkdev->rings.common.rsp_prod_pvt == blkdev->rings.common.req_cons) {
458 * Tail check for pending requests. Allows frontend to avoid
459 * notifications if requests are already in flight (lower
460 * overheads and promotes batching).
462 RING_FINAL_CHECK_FOR_REQUESTS(&blkdev->rings.common, have_requests);
463 } else if (RING_HAS_UNCONSUMED_REQUESTS(&blkdev->rings.common)) {
464 have_requests = 1;
467 if (have_requests) {
468 blkdev->more_work++;
470 return send_notify;
473 /* walk finished list, send outstanding responses, free requests */
474 static void blk_send_response_all(struct XenBlkDev *blkdev)
476 struct ioreq *ioreq;
477 int send_notify = 0;
479 while (!QLIST_EMPTY(&blkdev->finished)) {
480 ioreq = QLIST_FIRST(&blkdev->finished);
481 send_notify += blk_send_response_one(ioreq);
482 ioreq_release(ioreq, true);
484 if (send_notify) {
485 xen_be_send_notify(&blkdev->xendev);
489 static int blk_get_request(struct XenBlkDev *blkdev, struct ioreq *ioreq, RING_IDX rc)
491 switch (blkdev->protocol) {
492 case BLKIF_PROTOCOL_NATIVE:
493 memcpy(&ioreq->req, RING_GET_REQUEST(&blkdev->rings.native, rc),
494 sizeof(ioreq->req));
495 break;
496 case BLKIF_PROTOCOL_X86_32:
497 blkif_get_x86_32_req(&ioreq->req,
498 RING_GET_REQUEST(&blkdev->rings.x86_32_part, rc));
499 break;
500 case BLKIF_PROTOCOL_X86_64:
501 blkif_get_x86_64_req(&ioreq->req,
502 RING_GET_REQUEST(&blkdev->rings.x86_64_part, rc));
503 break;
505 return 0;
508 static void blk_handle_requests(struct XenBlkDev *blkdev)
510 RING_IDX rc, rp;
511 struct ioreq *ioreq;
513 blkdev->more_work = 0;
515 rc = blkdev->rings.common.req_cons;
516 rp = blkdev->rings.common.sring->req_prod;
517 xen_rmb(); /* Ensure we see queued requests up to 'rp'. */
519 blk_send_response_all(blkdev);
520 while (rc != rp) {
521 /* pull request from ring */
522 if (RING_REQUEST_CONS_OVERFLOW(&blkdev->rings.common, rc)) {
523 break;
525 ioreq = ioreq_start(blkdev);
526 if (ioreq == NULL) {
527 blkdev->more_work++;
528 break;
530 blk_get_request(blkdev, ioreq, rc);
531 blkdev->rings.common.req_cons = ++rc;
533 /* parse them */
534 if (ioreq_parse(ioreq) != 0) {
535 if (blk_send_response_one(ioreq)) {
536 xen_be_send_notify(&blkdev->xendev);
538 ioreq_release(ioreq, false);
539 continue;
542 ioreq_runio_qemu_aio(ioreq);
545 if (blkdev->more_work && blkdev->requests_inflight < max_requests) {
546 qemu_bh_schedule(blkdev->bh);
550 /* ------------------------------------------------------------- */
552 static void blk_bh(void *opaque)
554 struct XenBlkDev *blkdev = opaque;
555 blk_handle_requests(blkdev);
559 * We need to account for the grant allocations requiring contiguous
560 * chunks; the worst case number would be
561 * max_req * max_seg + (max_req - 1) * (max_seg - 1) + 1,
562 * but in order to keep things simple just use
563 * 2 * max_req * max_seg.
565 #define MAX_GRANTS(max_req, max_seg) (2 * (max_req) * (max_seg))
567 static void blk_alloc(struct XenDevice *xendev)
569 struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
571 QLIST_INIT(&blkdev->inflight);
572 QLIST_INIT(&blkdev->finished);
573 QLIST_INIT(&blkdev->freelist);
574 blkdev->bh = qemu_bh_new(blk_bh, blkdev);
575 if (xen_mode != XEN_EMULATE) {
576 batch_maps = 1;
578 if (xc_gnttab_set_max_grants(xendev->gnttabdev,
579 MAX_GRANTS(max_requests, BLKIF_MAX_SEGMENTS_PER_REQUEST)) < 0) {
580 xen_be_printf(xendev, 0, "xc_gnttab_set_max_grants failed: %s\n",
581 strerror(errno));
585 static int blk_init(struct XenDevice *xendev)
587 struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
588 int index, qflags, info = 0;
590 /* read xenstore entries */
591 if (blkdev->params == NULL) {
592 char *h = NULL;
593 blkdev->params = xenstore_read_be_str(&blkdev->xendev, "params");
594 if (blkdev->params != NULL) {
595 h = strchr(blkdev->params, ':');
597 if (h != NULL) {
598 blkdev->fileproto = blkdev->params;
599 blkdev->filename = h+1;
600 *h = 0;
601 } else {
602 blkdev->fileproto = "<unset>";
603 blkdev->filename = blkdev->params;
606 if (!strcmp("aio", blkdev->fileproto)) {
607 blkdev->fileproto = "raw";
609 if (blkdev->mode == NULL) {
610 blkdev->mode = xenstore_read_be_str(&blkdev->xendev, "mode");
612 if (blkdev->type == NULL) {
613 blkdev->type = xenstore_read_be_str(&blkdev->xendev, "type");
615 if (blkdev->dev == NULL) {
616 blkdev->dev = xenstore_read_be_str(&blkdev->xendev, "dev");
618 if (blkdev->devtype == NULL) {
619 blkdev->devtype = xenstore_read_be_str(&blkdev->xendev, "device-type");
622 /* do we have all we need? */
623 if (blkdev->params == NULL ||
624 blkdev->mode == NULL ||
625 blkdev->type == NULL ||
626 blkdev->dev == NULL) {
627 goto out_error;
630 /* read-only ? */
631 qflags = BDRV_O_NOCACHE | BDRV_O_CACHE_WB | BDRV_O_NATIVE_AIO;
632 if (strcmp(blkdev->mode, "w") == 0) {
633 qflags |= BDRV_O_RDWR;
634 } else {
635 info |= VDISK_READONLY;
638 /* cdrom ? */
639 if (blkdev->devtype && !strcmp(blkdev->devtype, "cdrom")) {
640 info |= VDISK_CDROM;
643 /* init qemu block driver */
644 index = (blkdev->xendev.dev - 202 * 256) / 16;
645 blkdev->dinfo = drive_get(IF_XEN, 0, index);
646 if (!blkdev->dinfo) {
647 /* setup via xenbus -> create new block driver instance */
648 xen_be_printf(&blkdev->xendev, 2, "create new bdrv (xenbus setup)\n");
649 blkdev->bs = bdrv_new(blkdev->dev);
650 if (blkdev->bs) {
651 if (bdrv_open(blkdev->bs, blkdev->filename, qflags,
652 bdrv_find_whitelisted_format(blkdev->fileproto)) != 0) {
653 bdrv_delete(blkdev->bs);
654 blkdev->bs = NULL;
657 if (!blkdev->bs) {
658 goto out_error;
660 } else {
661 /* setup via qemu cmdline -> already setup for us */
662 xen_be_printf(&blkdev->xendev, 2, "get configured bdrv (cmdline setup)\n");
663 blkdev->bs = blkdev->dinfo->bdrv;
665 bdrv_attach_dev_nofail(blkdev->bs, blkdev);
666 blkdev->file_blk = BLOCK_SIZE;
667 blkdev->file_size = bdrv_getlength(blkdev->bs);
668 if (blkdev->file_size < 0) {
669 xen_be_printf(&blkdev->xendev, 1, "bdrv_getlength: %d (%s) | drv %s\n",
670 (int)blkdev->file_size, strerror(-blkdev->file_size),
671 bdrv_get_format_name(blkdev->bs) ?: "-");
672 blkdev->file_size = 0;
675 xen_be_printf(xendev, 1, "type \"%s\", fileproto \"%s\", filename \"%s\","
676 " size %" PRId64 " (%" PRId64 " MB)\n",
677 blkdev->type, blkdev->fileproto, blkdev->filename,
678 blkdev->file_size, blkdev->file_size >> 20);
680 /* fill info */
681 xenstore_write_be_int(&blkdev->xendev, "feature-barrier", 1);
682 xenstore_write_be_int(&blkdev->xendev, "info", info);
683 xenstore_write_be_int(&blkdev->xendev, "sector-size", blkdev->file_blk);
684 xenstore_write_be_int(&blkdev->xendev, "sectors",
685 blkdev->file_size / blkdev->file_blk);
686 return 0;
688 out_error:
689 g_free(blkdev->params);
690 blkdev->params = NULL;
691 g_free(blkdev->mode);
692 blkdev->mode = NULL;
693 g_free(blkdev->type);
694 blkdev->type = NULL;
695 g_free(blkdev->dev);
696 blkdev->dev = NULL;
697 g_free(blkdev->devtype);
698 blkdev->devtype = NULL;
699 return -1;
702 static int blk_connect(struct XenDevice *xendev)
704 struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
706 if (xenstore_read_fe_int(&blkdev->xendev, "ring-ref", &blkdev->ring_ref) == -1) {
707 return -1;
709 if (xenstore_read_fe_int(&blkdev->xendev, "event-channel",
710 &blkdev->xendev.remote_port) == -1) {
711 return -1;
714 blkdev->protocol = BLKIF_PROTOCOL_NATIVE;
715 if (blkdev->xendev.protocol) {
716 if (strcmp(blkdev->xendev.protocol, XEN_IO_PROTO_ABI_X86_32) == 0) {
717 blkdev->protocol = BLKIF_PROTOCOL_X86_32;
719 if (strcmp(blkdev->xendev.protocol, XEN_IO_PROTO_ABI_X86_64) == 0) {
720 blkdev->protocol = BLKIF_PROTOCOL_X86_64;
724 blkdev->sring = xc_gnttab_map_grant_ref(blkdev->xendev.gnttabdev,
725 blkdev->xendev.dom,
726 blkdev->ring_ref,
727 PROT_READ | PROT_WRITE);
728 if (!blkdev->sring) {
729 return -1;
731 blkdev->cnt_map++;
733 switch (blkdev->protocol) {
734 case BLKIF_PROTOCOL_NATIVE:
736 blkif_sring_t *sring_native = blkdev->sring;
737 BACK_RING_INIT(&blkdev->rings.native, sring_native, XC_PAGE_SIZE);
738 break;
740 case BLKIF_PROTOCOL_X86_32:
742 blkif_x86_32_sring_t *sring_x86_32 = blkdev->sring;
744 BACK_RING_INIT(&blkdev->rings.x86_32_part, sring_x86_32, XC_PAGE_SIZE);
745 break;
747 case BLKIF_PROTOCOL_X86_64:
749 blkif_x86_64_sring_t *sring_x86_64 = blkdev->sring;
751 BACK_RING_INIT(&blkdev->rings.x86_64_part, sring_x86_64, XC_PAGE_SIZE);
752 break;
756 xen_be_bind_evtchn(&blkdev->xendev);
758 xen_be_printf(&blkdev->xendev, 1, "ok: proto %s, ring-ref %d, "
759 "remote port %d, local port %d\n",
760 blkdev->xendev.protocol, blkdev->ring_ref,
761 blkdev->xendev.remote_port, blkdev->xendev.local_port);
762 return 0;
765 static void blk_disconnect(struct XenDevice *xendev)
767 struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
769 if (blkdev->bs) {
770 if (!blkdev->dinfo) {
771 /* close/delete only if we created it ourself */
772 bdrv_close(blkdev->bs);
773 bdrv_detach_dev(blkdev->bs, blkdev);
774 bdrv_delete(blkdev->bs);
776 blkdev->bs = NULL;
778 xen_be_unbind_evtchn(&blkdev->xendev);
780 if (blkdev->sring) {
781 xc_gnttab_munmap(blkdev->xendev.gnttabdev, blkdev->sring, 1);
782 blkdev->cnt_map--;
783 blkdev->sring = NULL;
787 static int blk_free(struct XenDevice *xendev)
789 struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
790 struct ioreq *ioreq;
792 if (blkdev->bs || blkdev->sring) {
793 blk_disconnect(xendev);
796 while (!QLIST_EMPTY(&blkdev->freelist)) {
797 ioreq = QLIST_FIRST(&blkdev->freelist);
798 QLIST_REMOVE(ioreq, list);
799 qemu_iovec_destroy(&ioreq->v);
800 g_free(ioreq);
803 g_free(blkdev->params);
804 g_free(blkdev->mode);
805 g_free(blkdev->type);
806 g_free(blkdev->dev);
807 g_free(blkdev->devtype);
808 qemu_bh_delete(blkdev->bh);
809 return 0;
812 static void blk_event(struct XenDevice *xendev)
814 struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
816 qemu_bh_schedule(blkdev->bh);
819 struct XenDevOps xen_blkdev_ops = {
820 .size = sizeof(struct XenBlkDev),
821 .flags = DEVOPS_FLAG_NEED_GNTDEV,
822 .alloc = blk_alloc,
823 .init = blk_init,
824 .initialise = blk_connect,
825 .disconnect = blk_disconnect,
826 .event = blk_event,
827 .free = blk_free,