Committer: Michael Beasley <mike@snafu.setup>
[mikesnafu-overlay.git] / drivers / block / xen-blkfront.c
blob9c6f3f99208d073ad9cbfd3ff61e69a0e3dc0a02
1 /*
2 * blkfront.c
4 * XenLinux virtual block device driver.
6 * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
7 * Modifications by Mark A. Williamson are (c) Intel Research Cambridge
8 * Copyright (c) 2004, Christian Limpach
9 * Copyright (c) 2004, Andrew Warfield
10 * Copyright (c) 2005, Christopher Clark
11 * Copyright (c) 2005, XenSource Ltd
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License version 2
15 * as published by the Free Software Foundation; or, when distributed
16 * separately from the Linux kernel or incorporated into other
17 * software packages, subject to the following license:
19 * Permission is hereby granted, free of charge, to any person obtaining a copy
20 * of this source file (the "Software"), to deal in the Software without
21 * restriction, including without limitation the rights to use, copy, modify,
22 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
23 * and to permit persons to whom the Software is furnished to do so, subject to
24 * the following conditions:
26 * The above copyright notice and this permission notice shall be included in
27 * all copies or substantial portions of the Software.
29 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
30 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
31 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
32 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
33 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
34 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
35 * IN THE SOFTWARE.
38 #include <linux/interrupt.h>
39 #include <linux/blkdev.h>
40 #include <linux/hdreg.h>
41 #include <linux/module.h>
43 #include <xen/xenbus.h>
44 #include <xen/grant_table.h>
45 #include <xen/events.h>
46 #include <xen/page.h>
48 #include <xen/interface/grant_table.h>
49 #include <xen/interface/io/blkif.h>
51 #include <asm/xen/hypervisor.h>
53 enum blkif_state {
54 BLKIF_STATE_DISCONNECTED,
55 BLKIF_STATE_CONNECTED,
56 BLKIF_STATE_SUSPENDED,
59 struct blk_shadow {
60 struct blkif_request req;
61 unsigned long request;
62 unsigned long frame[BLKIF_MAX_SEGMENTS_PER_REQUEST];
65 static struct block_device_operations xlvbd_block_fops;
67 #define BLK_RING_SIZE __RING_SIZE((struct blkif_sring *)0, PAGE_SIZE)
70 * We have one of these per vbd, whether ide, scsi or 'other'. They
71 * hang in private_data off the gendisk structure. We may end up
72 * putting all kinds of interesting stuff here :-)
74 struct blkfront_info
76 struct xenbus_device *xbdev;
77 dev_t dev;
78 struct gendisk *gd;
79 int vdevice;
80 blkif_vdev_t handle;
81 enum blkif_state connected;
82 int ring_ref;
83 struct blkif_front_ring ring;
84 unsigned int evtchn, irq;
85 struct request_queue *rq;
86 struct work_struct work;
87 struct gnttab_free_callback callback;
88 struct blk_shadow shadow[BLK_RING_SIZE];
89 unsigned long shadow_free;
90 int feature_barrier;
92 /**
93 * The number of people holding this device open. We won't allow a
94 * hot-unplug unless this is 0.
96 int users;
99 static DEFINE_SPINLOCK(blkif_io_lock);
101 #define MAXIMUM_OUTSTANDING_BLOCK_REQS \
102 (BLKIF_MAX_SEGMENTS_PER_REQUEST * BLK_RING_SIZE)
103 #define GRANT_INVALID_REF 0
105 #define PARTS_PER_DISK 16
107 #define BLKIF_MAJOR(dev) ((dev)>>8)
108 #define BLKIF_MINOR(dev) ((dev) & 0xff)
110 #define DEV_NAME "xvd" /* name in /dev */
112 /* Information about our VBDs. */
113 #define MAX_VBDS 64
114 static LIST_HEAD(vbds_list);
116 static int get_id_from_freelist(struct blkfront_info *info)
118 unsigned long free = info->shadow_free;
119 BUG_ON(free > BLK_RING_SIZE);
120 info->shadow_free = info->shadow[free].req.id;
121 info->shadow[free].req.id = 0x0fffffee; /* debug */
122 return free;
125 static void add_id_to_freelist(struct blkfront_info *info,
126 unsigned long id)
128 info->shadow[id].req.id = info->shadow_free;
129 info->shadow[id].request = 0;
130 info->shadow_free = id;
133 static void blkif_restart_queue_callback(void *arg)
135 struct blkfront_info *info = (struct blkfront_info *)arg;
136 schedule_work(&info->work);
139 int blkif_getgeo(struct block_device *bd, struct hd_geometry *hg)
141 /* We don't have real geometry info, but let's at least return
142 values consistent with the size of the device */
143 sector_t nsect = get_capacity(bd->bd_disk);
144 sector_t cylinders = nsect;
146 hg->heads = 0xff;
147 hg->sectors = 0x3f;
148 sector_div(cylinders, hg->heads * hg->sectors);
149 hg->cylinders = cylinders;
150 if ((sector_t)(hg->cylinders + 1) * hg->heads * hg->sectors < nsect)
151 hg->cylinders = 0xffff;
152 return 0;
156 * blkif_queue_request
158 * request block io
160 * id: for guest use only.
161 * operation: BLKIF_OP_{READ,WRITE,PROBE}
162 * buffer: buffer to read/write into. this should be a
163 * virtual address in the guest os.
165 static int blkif_queue_request(struct request *req)
167 struct blkfront_info *info = req->rq_disk->private_data;
168 unsigned long buffer_mfn;
169 struct blkif_request *ring_req;
170 struct req_iterator iter;
171 struct bio_vec *bvec;
172 unsigned long id;
173 unsigned int fsect, lsect;
174 int ref;
175 grant_ref_t gref_head;
177 if (unlikely(info->connected != BLKIF_STATE_CONNECTED))
178 return 1;
180 if (gnttab_alloc_grant_references(
181 BLKIF_MAX_SEGMENTS_PER_REQUEST, &gref_head) < 0) {
182 gnttab_request_free_callback(
183 &info->callback,
184 blkif_restart_queue_callback,
185 info,
186 BLKIF_MAX_SEGMENTS_PER_REQUEST);
187 return 1;
190 /* Fill out a communications ring structure. */
191 ring_req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt);
192 id = get_id_from_freelist(info);
193 info->shadow[id].request = (unsigned long)req;
195 ring_req->id = id;
196 ring_req->sector_number = (blkif_sector_t)req->sector;
197 ring_req->handle = info->handle;
199 ring_req->operation = rq_data_dir(req) ?
200 BLKIF_OP_WRITE : BLKIF_OP_READ;
201 if (blk_barrier_rq(req))
202 ring_req->operation = BLKIF_OP_WRITE_BARRIER;
204 ring_req->nr_segments = 0;
205 rq_for_each_segment(bvec, req, iter) {
206 BUG_ON(ring_req->nr_segments == BLKIF_MAX_SEGMENTS_PER_REQUEST);
207 buffer_mfn = pfn_to_mfn(page_to_pfn(bvec->bv_page));
208 fsect = bvec->bv_offset >> 9;
209 lsect = fsect + (bvec->bv_len >> 9) - 1;
210 /* install a grant reference. */
211 ref = gnttab_claim_grant_reference(&gref_head);
212 BUG_ON(ref == -ENOSPC);
214 gnttab_grant_foreign_access_ref(
215 ref,
216 info->xbdev->otherend_id,
217 buffer_mfn,
218 rq_data_dir(req) );
220 info->shadow[id].frame[ring_req->nr_segments] =
221 mfn_to_pfn(buffer_mfn);
223 ring_req->seg[ring_req->nr_segments] =
224 (struct blkif_request_segment) {
225 .gref = ref,
226 .first_sect = fsect,
227 .last_sect = lsect };
229 ring_req->nr_segments++;
232 info->ring.req_prod_pvt++;
234 /* Keep a private copy so we can reissue requests when recovering. */
235 info->shadow[id].req = *ring_req;
237 gnttab_free_grant_references(gref_head);
239 return 0;
243 static inline void flush_requests(struct blkfront_info *info)
245 int notify;
247 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&info->ring, notify);
249 if (notify)
250 notify_remote_via_irq(info->irq);
254 * do_blkif_request
255 * read a block; request is in a request queue
257 static void do_blkif_request(struct request_queue *rq)
259 struct blkfront_info *info = NULL;
260 struct request *req;
261 int queued;
263 pr_debug("Entered do_blkif_request\n");
265 queued = 0;
267 while ((req = elv_next_request(rq)) != NULL) {
268 info = req->rq_disk->private_data;
269 if (!blk_fs_request(req)) {
270 end_request(req, 0);
271 continue;
274 if (RING_FULL(&info->ring))
275 goto wait;
277 pr_debug("do_blk_req %p: cmd %p, sec %lx, "
278 "(%u/%li) buffer:%p [%s]\n",
279 req, req->cmd, (unsigned long)req->sector,
280 req->current_nr_sectors,
281 req->nr_sectors, req->buffer,
282 rq_data_dir(req) ? "write" : "read");
285 blkdev_dequeue_request(req);
286 if (blkif_queue_request(req)) {
287 blk_requeue_request(rq, req);
288 wait:
289 /* Avoid pointless unplugs. */
290 blk_stop_queue(rq);
291 break;
294 queued++;
297 if (queued != 0)
298 flush_requests(info);
301 static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size)
303 struct request_queue *rq;
305 rq = blk_init_queue(do_blkif_request, &blkif_io_lock);
306 if (rq == NULL)
307 return -1;
309 elevator_init(rq, "noop");
311 /* Hard sector size and max sectors impersonate the equiv. hardware. */
312 blk_queue_hardsect_size(rq, sector_size);
313 blk_queue_max_sectors(rq, 512);
315 /* Each segment in a request is up to an aligned page in size. */
316 blk_queue_segment_boundary(rq, PAGE_SIZE - 1);
317 blk_queue_max_segment_size(rq, PAGE_SIZE);
319 /* Ensure a merged request will fit in a single I/O ring slot. */
320 blk_queue_max_phys_segments(rq, BLKIF_MAX_SEGMENTS_PER_REQUEST);
321 blk_queue_max_hw_segments(rq, BLKIF_MAX_SEGMENTS_PER_REQUEST);
323 /* Make sure buffer addresses are sector-aligned. */
324 blk_queue_dma_alignment(rq, 511);
326 gd->queue = rq;
328 return 0;
332 static int xlvbd_barrier(struct blkfront_info *info)
334 int err;
336 err = blk_queue_ordered(info->rq,
337 info->feature_barrier ? QUEUE_ORDERED_DRAIN : QUEUE_ORDERED_NONE,
338 NULL);
340 if (err)
341 return err;
343 printk(KERN_INFO "blkfront: %s: barriers %s\n",
344 info->gd->disk_name,
345 info->feature_barrier ? "enabled" : "disabled");
346 return 0;
350 static int xlvbd_alloc_gendisk(int minor, blkif_sector_t capacity,
351 int vdevice, u16 vdisk_info, u16 sector_size,
352 struct blkfront_info *info)
354 struct gendisk *gd;
355 int nr_minors = 1;
356 int err = -ENODEV;
358 BUG_ON(info->gd != NULL);
359 BUG_ON(info->rq != NULL);
361 if ((minor % PARTS_PER_DISK) == 0)
362 nr_minors = PARTS_PER_DISK;
364 gd = alloc_disk(nr_minors);
365 if (gd == NULL)
366 goto out;
368 if (nr_minors > 1)
369 sprintf(gd->disk_name, "%s%c", DEV_NAME,
370 'a' + minor / PARTS_PER_DISK);
371 else
372 sprintf(gd->disk_name, "%s%c%d", DEV_NAME,
373 'a' + minor / PARTS_PER_DISK,
374 minor % PARTS_PER_DISK);
376 gd->major = XENVBD_MAJOR;
377 gd->first_minor = minor;
378 gd->fops = &xlvbd_block_fops;
379 gd->private_data = info;
380 gd->driverfs_dev = &(info->xbdev->dev);
381 set_capacity(gd, capacity);
383 if (xlvbd_init_blk_queue(gd, sector_size)) {
384 del_gendisk(gd);
385 goto out;
388 info->rq = gd->queue;
389 info->gd = gd;
391 if (info->feature_barrier)
392 xlvbd_barrier(info);
394 if (vdisk_info & VDISK_READONLY)
395 set_disk_ro(gd, 1);
397 if (vdisk_info & VDISK_REMOVABLE)
398 gd->flags |= GENHD_FL_REMOVABLE;
400 if (vdisk_info & VDISK_CDROM)
401 gd->flags |= GENHD_FL_CD;
403 return 0;
405 out:
406 return err;
409 static void kick_pending_request_queues(struct blkfront_info *info)
411 if (!RING_FULL(&info->ring)) {
412 /* Re-enable calldowns. */
413 blk_start_queue(info->rq);
414 /* Kick things off immediately. */
415 do_blkif_request(info->rq);
419 static void blkif_restart_queue(struct work_struct *work)
421 struct blkfront_info *info = container_of(work, struct blkfront_info, work);
423 spin_lock_irq(&blkif_io_lock);
424 if (info->connected == BLKIF_STATE_CONNECTED)
425 kick_pending_request_queues(info);
426 spin_unlock_irq(&blkif_io_lock);
429 static void blkif_free(struct blkfront_info *info, int suspend)
431 /* Prevent new requests being issued until we fix things up. */
432 spin_lock_irq(&blkif_io_lock);
433 info->connected = suspend ?
434 BLKIF_STATE_SUSPENDED : BLKIF_STATE_DISCONNECTED;
435 /* No more blkif_request(). */
436 if (info->rq)
437 blk_stop_queue(info->rq);
438 /* No more gnttab callback work. */
439 gnttab_cancel_free_callback(&info->callback);
440 spin_unlock_irq(&blkif_io_lock);
442 /* Flush gnttab callback work. Must be done with no locks held. */
443 flush_scheduled_work();
445 /* Free resources associated with old device channel. */
446 if (info->ring_ref != GRANT_INVALID_REF) {
447 gnttab_end_foreign_access(info->ring_ref, 0,
448 (unsigned long)info->ring.sring);
449 info->ring_ref = GRANT_INVALID_REF;
450 info->ring.sring = NULL;
452 if (info->irq)
453 unbind_from_irqhandler(info->irq, info);
454 info->evtchn = info->irq = 0;
458 static void blkif_completion(struct blk_shadow *s)
460 int i;
461 for (i = 0; i < s->req.nr_segments; i++)
462 gnttab_end_foreign_access(s->req.seg[i].gref, 0, 0UL);
465 static irqreturn_t blkif_interrupt(int irq, void *dev_id)
467 struct request *req;
468 struct blkif_response *bret;
469 RING_IDX i, rp;
470 unsigned long flags;
471 struct blkfront_info *info = (struct blkfront_info *)dev_id;
472 int error;
474 spin_lock_irqsave(&blkif_io_lock, flags);
476 if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) {
477 spin_unlock_irqrestore(&blkif_io_lock, flags);
478 return IRQ_HANDLED;
481 again:
482 rp = info->ring.sring->rsp_prod;
483 rmb(); /* Ensure we see queued responses up to 'rp'. */
485 for (i = info->ring.rsp_cons; i != rp; i++) {
486 unsigned long id;
487 int ret;
489 bret = RING_GET_RESPONSE(&info->ring, i);
490 id = bret->id;
491 req = (struct request *)info->shadow[id].request;
493 blkif_completion(&info->shadow[id]);
495 add_id_to_freelist(info, id);
497 error = (bret->status == BLKIF_RSP_OKAY) ? 0 : -EIO;
498 switch (bret->operation) {
499 case BLKIF_OP_WRITE_BARRIER:
500 if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
501 printk(KERN_WARNING "blkfront: %s: write barrier op failed\n",
502 info->gd->disk_name);
503 error = -EOPNOTSUPP;
504 info->feature_barrier = 0;
505 xlvbd_barrier(info);
507 /* fall through */
508 case BLKIF_OP_READ:
509 case BLKIF_OP_WRITE:
510 if (unlikely(bret->status != BLKIF_RSP_OKAY))
511 dev_dbg(&info->xbdev->dev, "Bad return from blkdev data "
512 "request: %x\n", bret->status);
514 ret = __blk_end_request(req, error, blk_rq_bytes(req));
515 BUG_ON(ret);
516 break;
517 default:
518 BUG();
522 info->ring.rsp_cons = i;
524 if (i != info->ring.req_prod_pvt) {
525 int more_to_do;
526 RING_FINAL_CHECK_FOR_RESPONSES(&info->ring, more_to_do);
527 if (more_to_do)
528 goto again;
529 } else
530 info->ring.sring->rsp_event = i + 1;
532 kick_pending_request_queues(info);
534 spin_unlock_irqrestore(&blkif_io_lock, flags);
536 return IRQ_HANDLED;
540 static int setup_blkring(struct xenbus_device *dev,
541 struct blkfront_info *info)
543 struct blkif_sring *sring;
544 int err;
546 info->ring_ref = GRANT_INVALID_REF;
548 sring = (struct blkif_sring *)__get_free_page(GFP_KERNEL);
549 if (!sring) {
550 xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring");
551 return -ENOMEM;
553 SHARED_RING_INIT(sring);
554 FRONT_RING_INIT(&info->ring, sring, PAGE_SIZE);
556 err = xenbus_grant_ring(dev, virt_to_mfn(info->ring.sring));
557 if (err < 0) {
558 free_page((unsigned long)sring);
559 info->ring.sring = NULL;
560 goto fail;
562 info->ring_ref = err;
564 err = xenbus_alloc_evtchn(dev, &info->evtchn);
565 if (err)
566 goto fail;
568 err = bind_evtchn_to_irqhandler(info->evtchn,
569 blkif_interrupt,
570 IRQF_SAMPLE_RANDOM, "blkif", info);
571 if (err <= 0) {
572 xenbus_dev_fatal(dev, err,
573 "bind_evtchn_to_irqhandler failed");
574 goto fail;
576 info->irq = err;
578 return 0;
579 fail:
580 blkif_free(info, 0);
581 return err;
585 /* Common code used when first setting up, and when resuming. */
586 static int talk_to_backend(struct xenbus_device *dev,
587 struct blkfront_info *info)
589 const char *message = NULL;
590 struct xenbus_transaction xbt;
591 int err;
593 /* Create shared ring, alloc event channel. */
594 err = setup_blkring(dev, info);
595 if (err)
596 goto out;
598 again:
599 err = xenbus_transaction_start(&xbt);
600 if (err) {
601 xenbus_dev_fatal(dev, err, "starting transaction");
602 goto destroy_blkring;
605 err = xenbus_printf(xbt, dev->nodename,
606 "ring-ref", "%u", info->ring_ref);
607 if (err) {
608 message = "writing ring-ref";
609 goto abort_transaction;
611 err = xenbus_printf(xbt, dev->nodename,
612 "event-channel", "%u", info->evtchn);
613 if (err) {
614 message = "writing event-channel";
615 goto abort_transaction;
618 err = xenbus_transaction_end(xbt, 0);
619 if (err) {
620 if (err == -EAGAIN)
621 goto again;
622 xenbus_dev_fatal(dev, err, "completing transaction");
623 goto destroy_blkring;
626 xenbus_switch_state(dev, XenbusStateInitialised);
628 return 0;
630 abort_transaction:
631 xenbus_transaction_end(xbt, 1);
632 if (message)
633 xenbus_dev_fatal(dev, err, "%s", message);
634 destroy_blkring:
635 blkif_free(info, 0);
636 out:
637 return err;
642 * Entry point to this code when a new device is created. Allocate the basic
643 * structures and the ring buffer for communication with the backend, and
644 * inform the backend of the appropriate details for those. Switch to
645 * Initialised state.
647 static int blkfront_probe(struct xenbus_device *dev,
648 const struct xenbus_device_id *id)
650 int err, vdevice, i;
651 struct blkfront_info *info;
653 /* FIXME: Use dynamic device id if this is not set. */
654 err = xenbus_scanf(XBT_NIL, dev->nodename,
655 "virtual-device", "%i", &vdevice);
656 if (err != 1) {
657 xenbus_dev_fatal(dev, err, "reading virtual-device");
658 return err;
661 info = kzalloc(sizeof(*info), GFP_KERNEL);
662 if (!info) {
663 xenbus_dev_fatal(dev, -ENOMEM, "allocating info structure");
664 return -ENOMEM;
667 info->xbdev = dev;
668 info->vdevice = vdevice;
669 info->connected = BLKIF_STATE_DISCONNECTED;
670 INIT_WORK(&info->work, blkif_restart_queue);
672 for (i = 0; i < BLK_RING_SIZE; i++)
673 info->shadow[i].req.id = i+1;
674 info->shadow[BLK_RING_SIZE-1].req.id = 0x0fffffff;
676 /* Front end dir is a number, which is used as the id. */
677 info->handle = simple_strtoul(strrchr(dev->nodename, '/')+1, NULL, 0);
678 dev->dev.driver_data = info;
680 err = talk_to_backend(dev, info);
681 if (err) {
682 kfree(info);
683 dev->dev.driver_data = NULL;
684 return err;
687 return 0;
691 static int blkif_recover(struct blkfront_info *info)
693 int i;
694 struct blkif_request *req;
695 struct blk_shadow *copy;
696 int j;
698 /* Stage 1: Make a safe copy of the shadow state. */
699 copy = kmalloc(sizeof(info->shadow), GFP_KERNEL);
700 if (!copy)
701 return -ENOMEM;
702 memcpy(copy, info->shadow, sizeof(info->shadow));
704 /* Stage 2: Set up free list. */
705 memset(&info->shadow, 0, sizeof(info->shadow));
706 for (i = 0; i < BLK_RING_SIZE; i++)
707 info->shadow[i].req.id = i+1;
708 info->shadow_free = info->ring.req_prod_pvt;
709 info->shadow[BLK_RING_SIZE-1].req.id = 0x0fffffff;
711 /* Stage 3: Find pending requests and requeue them. */
712 for (i = 0; i < BLK_RING_SIZE; i++) {
713 /* Not in use? */
714 if (copy[i].request == 0)
715 continue;
717 /* Grab a request slot and copy shadow state into it. */
718 req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt);
719 *req = copy[i].req;
721 /* We get a new request id, and must reset the shadow state. */
722 req->id = get_id_from_freelist(info);
723 memcpy(&info->shadow[req->id], &copy[i], sizeof(copy[i]));
725 /* Rewrite any grant references invalidated by susp/resume. */
726 for (j = 0; j < req->nr_segments; j++)
727 gnttab_grant_foreign_access_ref(
728 req->seg[j].gref,
729 info->xbdev->otherend_id,
730 pfn_to_mfn(info->shadow[req->id].frame[j]),
731 rq_data_dir(
732 (struct request *)
733 info->shadow[req->id].request));
734 info->shadow[req->id].req = *req;
736 info->ring.req_prod_pvt++;
739 kfree(copy);
741 xenbus_switch_state(info->xbdev, XenbusStateConnected);
743 spin_lock_irq(&blkif_io_lock);
745 /* Now safe for us to use the shared ring */
746 info->connected = BLKIF_STATE_CONNECTED;
748 /* Send off requeued requests */
749 flush_requests(info);
751 /* Kick any other new requests queued since we resumed */
752 kick_pending_request_queues(info);
754 spin_unlock_irq(&blkif_io_lock);
756 return 0;
760 * We are reconnecting to the backend, due to a suspend/resume, or a backend
761 * driver restart. We tear down our blkif structure and recreate it, but
762 * leave the device-layer structures intact so that this is transparent to the
763 * rest of the kernel.
765 static int blkfront_resume(struct xenbus_device *dev)
767 struct blkfront_info *info = dev->dev.driver_data;
768 int err;
770 dev_dbg(&dev->dev, "blkfront_resume: %s\n", dev->nodename);
772 blkif_free(info, info->connected == BLKIF_STATE_CONNECTED);
774 err = talk_to_backend(dev, info);
775 if (info->connected == BLKIF_STATE_SUSPENDED && !err)
776 err = blkif_recover(info);
778 return err;
783 * Invoked when the backend is finally 'ready' (and has told produced
784 * the details about the physical device - #sectors, size, etc).
786 static void blkfront_connect(struct blkfront_info *info)
788 unsigned long long sectors;
789 unsigned long sector_size;
790 unsigned int binfo;
791 int err;
793 if ((info->connected == BLKIF_STATE_CONNECTED) ||
794 (info->connected == BLKIF_STATE_SUSPENDED) )
795 return;
797 dev_dbg(&info->xbdev->dev, "%s:%s.\n",
798 __func__, info->xbdev->otherend);
800 err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
801 "sectors", "%llu", &sectors,
802 "info", "%u", &binfo,
803 "sector-size", "%lu", &sector_size,
804 NULL);
805 if (err) {
806 xenbus_dev_fatal(info->xbdev, err,
807 "reading backend fields at %s",
808 info->xbdev->otherend);
809 return;
812 err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
813 "feature-barrier", "%lu", &info->feature_barrier,
814 NULL);
815 if (err)
816 info->feature_barrier = 0;
818 err = xlvbd_alloc_gendisk(BLKIF_MINOR(info->vdevice),
819 sectors, info->vdevice,
820 binfo, sector_size, info);
821 if (err) {
822 xenbus_dev_fatal(info->xbdev, err, "xlvbd_add at %s",
823 info->xbdev->otherend);
824 return;
827 xenbus_switch_state(info->xbdev, XenbusStateConnected);
829 /* Kick pending requests. */
830 spin_lock_irq(&blkif_io_lock);
831 info->connected = BLKIF_STATE_CONNECTED;
832 kick_pending_request_queues(info);
833 spin_unlock_irq(&blkif_io_lock);
835 add_disk(info->gd);
839 * Handle the change of state of the backend to Closing. We must delete our
840 * device-layer structures now, to ensure that writes are flushed through to
841 * the backend. Once is this done, we can switch to Closed in
842 * acknowledgement.
844 static void blkfront_closing(struct xenbus_device *dev)
846 struct blkfront_info *info = dev->dev.driver_data;
847 unsigned long flags;
849 dev_dbg(&dev->dev, "blkfront_closing: %s removed\n", dev->nodename);
851 if (info->rq == NULL)
852 goto out;
854 spin_lock_irqsave(&blkif_io_lock, flags);
856 del_gendisk(info->gd);
858 /* No more blkif_request(). */
859 blk_stop_queue(info->rq);
861 /* No more gnttab callback work. */
862 gnttab_cancel_free_callback(&info->callback);
863 spin_unlock_irqrestore(&blkif_io_lock, flags);
865 /* Flush gnttab callback work. Must be done with no locks held. */
866 flush_scheduled_work();
868 blk_cleanup_queue(info->rq);
869 info->rq = NULL;
871 out:
872 xenbus_frontend_closed(dev);
876 * Callback received when the backend's state changes.
878 static void backend_changed(struct xenbus_device *dev,
879 enum xenbus_state backend_state)
881 struct blkfront_info *info = dev->dev.driver_data;
882 struct block_device *bd;
884 dev_dbg(&dev->dev, "blkfront:backend_changed.\n");
886 switch (backend_state) {
887 case XenbusStateInitialising:
888 case XenbusStateInitWait:
889 case XenbusStateInitialised:
890 case XenbusStateUnknown:
891 case XenbusStateClosed:
892 break;
894 case XenbusStateConnected:
895 blkfront_connect(info);
896 break;
898 case XenbusStateClosing:
899 bd = bdget(info->dev);
900 if (bd == NULL)
901 xenbus_dev_fatal(dev, -ENODEV, "bdget failed");
903 mutex_lock(&bd->bd_mutex);
904 if (info->users > 0)
905 xenbus_dev_error(dev, -EBUSY,
906 "Device in use; refusing to close");
907 else
908 blkfront_closing(dev);
909 mutex_unlock(&bd->bd_mutex);
910 bdput(bd);
911 break;
915 static int blkfront_remove(struct xenbus_device *dev)
917 struct blkfront_info *info = dev->dev.driver_data;
919 dev_dbg(&dev->dev, "blkfront_remove: %s removed\n", dev->nodename);
921 blkif_free(info, 0);
923 kfree(info);
925 return 0;
928 static int blkif_open(struct inode *inode, struct file *filep)
930 struct blkfront_info *info = inode->i_bdev->bd_disk->private_data;
931 info->users++;
932 return 0;
935 static int blkif_release(struct inode *inode, struct file *filep)
937 struct blkfront_info *info = inode->i_bdev->bd_disk->private_data;
938 info->users--;
939 if (info->users == 0) {
940 /* Check whether we have been instructed to close. We will
941 have ignored this request initially, as the device was
942 still mounted. */
943 struct xenbus_device *dev = info->xbdev;
944 enum xenbus_state state = xenbus_read_driver_state(dev->otherend);
946 if (state == XenbusStateClosing)
947 blkfront_closing(dev);
949 return 0;
952 static struct block_device_operations xlvbd_block_fops =
954 .owner = THIS_MODULE,
955 .open = blkif_open,
956 .release = blkif_release,
957 .getgeo = blkif_getgeo,
961 static struct xenbus_device_id blkfront_ids[] = {
962 { "vbd" },
963 { "" }
966 static struct xenbus_driver blkfront = {
967 .name = "vbd",
968 .owner = THIS_MODULE,
969 .ids = blkfront_ids,
970 .probe = blkfront_probe,
971 .remove = blkfront_remove,
972 .resume = blkfront_resume,
973 .otherend_changed = backend_changed,
976 static int __init xlblk_init(void)
978 if (!is_running_on_xen())
979 return -ENODEV;
981 if (register_blkdev(XENVBD_MAJOR, DEV_NAME)) {
982 printk(KERN_WARNING "xen_blk: can't get major %d with name %s\n",
983 XENVBD_MAJOR, DEV_NAME);
984 return -ENODEV;
987 return xenbus_register_frontend(&blkfront);
989 module_init(xlblk_init);
992 static void xlblk_exit(void)
994 return xenbus_unregister_driver(&blkfront);
996 module_exit(xlblk_exit);
998 MODULE_DESCRIPTION("Xen virtual block device frontend");
999 MODULE_LICENSE("GPL");
1000 MODULE_ALIAS_BLOCKDEV_MAJOR(XENVBD_MAJOR);