2 * Network block device - make block devices work over TCP
4 * Note that you can not swap over this thing, yet. Seems to work but
5 * deadlocks sometimes - you can not swap over TCP in general.
7 * Copyright 1997-2000 Pavel Machek <pavel@ucw.cz>
8 * Parts copyright 2001 Steven Whitehouse <steve@chygwyn.com>
10 * This file is released under GPLv2 or later.
12 * (part of code stolen from loop.c)
15 #include <linux/major.h>
17 #include <linux/blkdev.h>
18 #include <linux/module.h>
19 #include <linux/init.h>
20 #include <linux/sched.h>
22 #include <linux/bio.h>
23 #include <linux/stat.h>
24 #include <linux/errno.h>
25 #include <linux/file.h>
26 #include <linux/ioctl.h>
27 #include <linux/compiler.h>
28 #include <linux/err.h>
29 #include <linux/kernel.h>
31 #include <linux/net.h>
33 #include <asm/uaccess.h>
34 #include <asm/system.h>
35 #include <asm/types.h>
37 #include <linux/nbd.h>
39 #define LO_MAGIC 0x68797548
42 #define dprintk(flags, fmt...)
44 #define dprintk(flags, fmt...) do { \
45 if (debugflags & (flags)) printk(KERN_DEBUG fmt); \
47 #define DBG_IOCTL 0x0004
48 #define DBG_INIT 0x0010
49 #define DBG_EXIT 0x0020
50 #define DBG_BLKDEV 0x0100
53 static unsigned int debugflags
;
56 static unsigned int nbds_max
= 16;
57 static struct nbd_device nbd_dev
[MAX_NBD
];
60 * Use just one lock (or at most 1 per NIC). Two arguments for this:
61 * 1. Each NIC is essentially a synchronization point for all servers
62 * accessed through that NIC so there's no need to have more locks
64 * 2. More locks lead to more "Dirty cache line bouncing" which will slow
65 * down each lock to the point where they're actually slower than just
67 * Thanks go to Jens Axboe and Al Viro for their LKML emails explaining this!
69 static DEFINE_SPINLOCK(nbd_lock
);
72 static const char *ioctl_cmd_to_ascii(int cmd
)
75 case NBD_SET_SOCK
: return "set-sock";
76 case NBD_SET_BLKSIZE
: return "set-blksize";
77 case NBD_SET_SIZE
: return "set-size";
78 case NBD_DO_IT
: return "do-it";
79 case NBD_CLEAR_SOCK
: return "clear-sock";
80 case NBD_CLEAR_QUE
: return "clear-que";
81 case NBD_PRINT_DEBUG
: return "print-debug";
82 case NBD_SET_SIZE_BLOCKS
: return "set-size-blocks";
83 case NBD_DISCONNECT
: return "disconnect";
84 case BLKROSET
: return "set-read-only";
85 case BLKFLSBUF
: return "flush-buffer-cache";
90 static const char *nbdcmd_to_ascii(int cmd
)
93 case NBD_CMD_READ
: return "read";
94 case NBD_CMD_WRITE
: return "write";
95 case NBD_CMD_DISC
: return "disconnect";
101 static void nbd_end_request(struct request
*req
)
103 int error
= req
->errors
? -EIO
: 0;
104 struct request_queue
*q
= req
->q
;
107 dprintk(DBG_BLKDEV
, "%s: request %p: %s\n", req
->rq_disk
->disk_name
,
108 req
, error
? "failed" : "done");
110 spin_lock_irqsave(q
->queue_lock
, flags
);
111 __blk_end_request(req
, error
, req
->nr_sectors
<< 9);
112 spin_unlock_irqrestore(q
->queue_lock
, flags
);
115 static void sock_shutdown(struct nbd_device
*lo
, int lock
)
117 /* Forcibly shutdown the socket causing all listeners
120 * FIXME: This code is duplicated from sys_shutdown, but
121 * there should be a more generic interface rather than
122 * calling socket ops directly here */
124 mutex_lock(&lo
->tx_lock
);
126 printk(KERN_WARNING
"%s: shutting down socket\n",
127 lo
->disk
->disk_name
);
128 kernel_sock_shutdown(lo
->sock
, SHUT_RDWR
);
132 mutex_unlock(&lo
->tx_lock
);
135 static void nbd_xmit_timeout(unsigned long arg
)
137 struct task_struct
*task
= (struct task_struct
*)arg
;
139 printk(KERN_WARNING
"nbd: killing hung xmit (%s, pid: %d)\n",
140 task
->comm
, task
->pid
);
141 force_sig(SIGKILL
, task
);
145 * Send or receive packet.
147 static int sock_xmit(struct nbd_device
*lo
, int send
, void *buf
, int size
,
150 struct socket
*sock
= lo
->sock
;
154 sigset_t blocked
, oldset
;
156 /* Allow interception of SIGKILL only
157 * Don't allow other signals to interrupt the transmission */
158 siginitsetinv(&blocked
, sigmask(SIGKILL
));
159 sigprocmask(SIG_SETMASK
, &blocked
, &oldset
);
162 sock
->sk
->sk_allocation
= GFP_NOIO
;
167 msg
.msg_control
= NULL
;
168 msg
.msg_controllen
= 0;
169 msg
.msg_flags
= msg_flags
| MSG_NOSIGNAL
;
172 struct timer_list ti
;
174 if (lo
->xmit_timeout
) {
176 ti
.function
= nbd_xmit_timeout
;
177 ti
.data
= (unsigned long)current
;
178 ti
.expires
= jiffies
+ lo
->xmit_timeout
;
181 result
= kernel_sendmsg(sock
, &msg
, &iov
, 1, size
);
182 if (lo
->xmit_timeout
)
185 result
= kernel_recvmsg(sock
, &msg
, &iov
, 1, size
, 0);
187 if (signal_pending(current
)) {
189 printk(KERN_WARNING
"nbd (pid %d: %s) got signal %d\n",
190 task_pid_nr(current
), current
->comm
,
191 dequeue_signal_lock(current
, ¤t
->blocked
, &info
));
193 sock_shutdown(lo
, !send
);
199 result
= -EPIPE
; /* short read */
206 sigprocmask(SIG_SETMASK
, &oldset
, NULL
);
211 static inline int sock_send_bvec(struct nbd_device
*lo
, struct bio_vec
*bvec
,
215 void *kaddr
= kmap(bvec
->bv_page
);
216 result
= sock_xmit(lo
, 1, kaddr
+ bvec
->bv_offset
, bvec
->bv_len
, flags
);
217 kunmap(bvec
->bv_page
);
221 /* always call with the tx_lock held */
222 static int nbd_send_req(struct nbd_device
*lo
, struct request
*req
)
225 struct nbd_request request
;
226 unsigned long size
= req
->nr_sectors
<< 9;
228 request
.magic
= htonl(NBD_REQUEST_MAGIC
);
229 request
.type
= htonl(nbd_cmd(req
));
230 request
.from
= cpu_to_be64((u64
) req
->sector
<< 9);
231 request
.len
= htonl(size
);
232 memcpy(request
.handle
, &req
, sizeof(req
));
234 dprintk(DBG_TX
, "%s: request %p: sending control (%s@%llu,%luB)\n",
235 lo
->disk
->disk_name
, req
,
236 nbdcmd_to_ascii(nbd_cmd(req
)),
237 (unsigned long long)req
->sector
<< 9,
238 req
->nr_sectors
<< 9);
239 result
= sock_xmit(lo
, 1, &request
, sizeof(request
),
240 (nbd_cmd(req
) == NBD_CMD_WRITE
) ? MSG_MORE
: 0);
242 printk(KERN_ERR
"%s: Send control failed (result %d)\n",
243 lo
->disk
->disk_name
, result
);
247 if (nbd_cmd(req
) == NBD_CMD_WRITE
) {
248 struct req_iterator iter
;
249 struct bio_vec
*bvec
;
251 * we are really probing at internals to determine
252 * whether to set MSG_MORE or not...
254 rq_for_each_segment(bvec
, req
, iter
) {
256 if (!rq_iter_last(req
, iter
))
258 dprintk(DBG_TX
, "%s: request %p: sending %d bytes data\n",
259 lo
->disk
->disk_name
, req
, bvec
->bv_len
);
260 result
= sock_send_bvec(lo
, bvec
, flags
);
262 printk(KERN_ERR
"%s: Send data failed (result %d)\n",
263 lo
->disk
->disk_name
, result
);
274 static struct request
*nbd_find_request(struct nbd_device
*lo
,
275 struct request
*xreq
)
277 struct request
*req
, *tmp
;
280 err
= wait_event_interruptible(lo
->active_wq
, lo
->active_req
!= xreq
);
284 spin_lock(&lo
->queue_lock
);
285 list_for_each_entry_safe(req
, tmp
, &lo
->queue_head
, queuelist
) {
288 list_del_init(&req
->queuelist
);
289 spin_unlock(&lo
->queue_lock
);
292 spin_unlock(&lo
->queue_lock
);
300 static inline int sock_recv_bvec(struct nbd_device
*lo
, struct bio_vec
*bvec
)
303 void *kaddr
= kmap(bvec
->bv_page
);
304 result
= sock_xmit(lo
, 0, kaddr
+ bvec
->bv_offset
, bvec
->bv_len
,
306 kunmap(bvec
->bv_page
);
310 /* NULL returned = something went wrong, inform userspace */
311 static struct request
*nbd_read_stat(struct nbd_device
*lo
)
314 struct nbd_reply reply
;
318 result
= sock_xmit(lo
, 0, &reply
, sizeof(reply
), MSG_WAITALL
);
320 printk(KERN_ERR
"%s: Receive control failed (result %d)\n",
321 lo
->disk
->disk_name
, result
);
325 if (ntohl(reply
.magic
) != NBD_REPLY_MAGIC
) {
326 printk(KERN_ERR
"%s: Wrong magic (0x%lx)\n",
328 (unsigned long)ntohl(reply
.magic
));
333 req
= nbd_find_request(lo
, *(struct request
**)reply
.handle
);
334 if (unlikely(IS_ERR(req
))) {
335 result
= PTR_ERR(req
);
336 if (result
!= -ENOENT
)
339 printk(KERN_ERR
"%s: Unexpected reply (%p)\n",
340 lo
->disk
->disk_name
, reply
.handle
);
345 if (ntohl(reply
.error
)) {
346 printk(KERN_ERR
"%s: Other side returned error (%d)\n",
347 lo
->disk
->disk_name
, ntohl(reply
.error
));
352 dprintk(DBG_RX
, "%s: request %p: got reply\n",
353 lo
->disk
->disk_name
, req
);
354 if (nbd_cmd(req
) == NBD_CMD_READ
) {
355 struct req_iterator iter
;
356 struct bio_vec
*bvec
;
358 rq_for_each_segment(bvec
, req
, iter
) {
359 result
= sock_recv_bvec(lo
, bvec
);
361 printk(KERN_ERR
"%s: Receive data failed (result %d)\n",
362 lo
->disk
->disk_name
, result
);
366 dprintk(DBG_RX
, "%s: request %p: got %d bytes data\n",
367 lo
->disk
->disk_name
, req
, bvec
->bv_len
);
372 lo
->harderror
= result
;
376 static ssize_t
pid_show(struct device
*dev
,
377 struct device_attribute
*attr
, char *buf
)
379 struct gendisk
*disk
= dev_to_disk(dev
);
381 return sprintf(buf
, "%ld\n",
382 (long) ((struct nbd_device
*)disk
->private_data
)->pid
);
385 static struct device_attribute pid_attr
= {
386 .attr
= { .name
= "pid", .mode
= S_IRUGO
, .owner
= THIS_MODULE
},
390 static int nbd_do_it(struct nbd_device
*lo
)
395 BUG_ON(lo
->magic
!= LO_MAGIC
);
397 lo
->pid
= current
->pid
;
398 ret
= sysfs_create_file(&lo
->disk
->dev
.kobj
, &pid_attr
.attr
);
400 printk(KERN_ERR
"nbd: sysfs_create_file failed!");
404 while ((req
= nbd_read_stat(lo
)) != NULL
)
405 nbd_end_request(req
);
407 sysfs_remove_file(&lo
->disk
->dev
.kobj
, &pid_attr
.attr
);
411 static void nbd_clear_que(struct nbd_device
*lo
)
415 BUG_ON(lo
->magic
!= LO_MAGIC
);
418 * Because we have set lo->sock to NULL under the tx_lock, all
419 * modifications to the list must have completed by now. For
420 * the same reason, the active_req must be NULL.
422 * As a consequence, we don't need to take the spin lock while
423 * purging the list here.
426 BUG_ON(lo
->active_req
);
428 while (!list_empty(&lo
->queue_head
)) {
429 req
= list_entry(lo
->queue_head
.next
, struct request
,
431 list_del_init(&req
->queuelist
);
433 nbd_end_request(req
);
439 * We always wait for result of write, for now. It would be nice to make it optional
441 * if ((rq_data_dir(req) == WRITE) && (lo->flags & NBD_WRITE_NOCHK))
442 * { printk( "Warning: Ignoring result!\n"); nbd_end_request( req ); }
445 static void do_nbd_request(struct request_queue
* q
)
449 while ((req
= elv_next_request(q
)) != NULL
) {
450 struct nbd_device
*lo
;
452 blkdev_dequeue_request(req
);
453 dprintk(DBG_BLKDEV
, "%s: request %p: dequeued (flags=%x)\n",
454 req
->rq_disk
->disk_name
, req
, req
->cmd_type
);
456 if (!blk_fs_request(req
))
459 lo
= req
->rq_disk
->private_data
;
461 BUG_ON(lo
->magic
!= LO_MAGIC
);
463 nbd_cmd(req
) = NBD_CMD_READ
;
464 if (rq_data_dir(req
) == WRITE
) {
465 nbd_cmd(req
) = NBD_CMD_WRITE
;
466 if (lo
->flags
& NBD_READ_ONLY
) {
467 printk(KERN_ERR
"%s: Write on read-only\n",
468 lo
->disk
->disk_name
);
474 spin_unlock_irq(q
->queue_lock
);
476 mutex_lock(&lo
->tx_lock
);
477 if (unlikely(!lo
->sock
)) {
478 mutex_unlock(&lo
->tx_lock
);
479 printk(KERN_ERR
"%s: Attempted send on closed socket\n",
480 lo
->disk
->disk_name
);
482 nbd_end_request(req
);
483 spin_lock_irq(q
->queue_lock
);
487 lo
->active_req
= req
;
489 if (nbd_send_req(lo
, req
) != 0) {
490 printk(KERN_ERR
"%s: Request send failed\n",
491 lo
->disk
->disk_name
);
493 nbd_end_request(req
);
495 spin_lock(&lo
->queue_lock
);
496 list_add(&req
->queuelist
, &lo
->queue_head
);
497 spin_unlock(&lo
->queue_lock
);
500 lo
->active_req
= NULL
;
501 mutex_unlock(&lo
->tx_lock
);
502 wake_up_all(&lo
->active_wq
);
504 spin_lock_irq(q
->queue_lock
);
509 spin_unlock(q
->queue_lock
);
510 nbd_end_request(req
);
511 spin_lock(q
->queue_lock
);
515 static int nbd_ioctl(struct inode
*inode
, struct file
*file
,
516 unsigned int cmd
, unsigned long arg
)
518 struct nbd_device
*lo
= inode
->i_bdev
->bd_disk
->private_data
;
520 struct request sreq
;
522 if (!capable(CAP_SYS_ADMIN
))
525 BUG_ON(lo
->magic
!= LO_MAGIC
);
527 /* Anyone capable of this syscall can do *real bad* things */
528 dprintk(DBG_IOCTL
, "%s: nbd_ioctl cmd=%s(0x%x) arg=%lu\n",
529 lo
->disk
->disk_name
, ioctl_cmd_to_ascii(cmd
), cmd
, arg
);
533 printk(KERN_INFO
"%s: NBD_DISCONNECT\n", lo
->disk
->disk_name
);
534 sreq
.cmd_type
= REQ_TYPE_SPECIAL
;
535 nbd_cmd(&sreq
) = NBD_CMD_DISC
;
537 * Set these to sane values in case server implementation
538 * fails to check the request type first and also to keep
539 * debugging output cleaner.
545 mutex_lock(&lo
->tx_lock
);
546 nbd_send_req(lo
, &sreq
);
547 mutex_unlock(&lo
->tx_lock
);
552 mutex_lock(&lo
->tx_lock
);
554 mutex_unlock(&lo
->tx_lock
);
558 BUG_ON(!list_empty(&lo
->queue_head
));
568 inode
= file
->f_path
.dentry
->d_inode
;
569 if (S_ISSOCK(inode
->i_mode
)) {
571 lo
->sock
= SOCKET_I(inode
);
578 case NBD_SET_BLKSIZE
:
580 lo
->bytesize
&= ~(lo
->blksize
-1);
581 inode
->i_bdev
->bd_inode
->i_size
= lo
->bytesize
;
582 set_blocksize(inode
->i_bdev
, lo
->blksize
);
583 set_capacity(lo
->disk
, lo
->bytesize
>> 9);
586 lo
->bytesize
= arg
& ~(lo
->blksize
-1);
587 inode
->i_bdev
->bd_inode
->i_size
= lo
->bytesize
;
588 set_blocksize(inode
->i_bdev
, lo
->blksize
);
589 set_capacity(lo
->disk
, lo
->bytesize
>> 9);
591 case NBD_SET_TIMEOUT
:
592 lo
->xmit_timeout
= arg
* HZ
;
594 case NBD_SET_SIZE_BLOCKS
:
595 lo
->bytesize
= ((u64
) arg
) * lo
->blksize
;
596 inode
->i_bdev
->bd_inode
->i_size
= lo
->bytesize
;
597 set_blocksize(inode
->i_bdev
, lo
->blksize
);
598 set_capacity(lo
->disk
, lo
->bytesize
>> 9);
603 error
= nbd_do_it(lo
);
606 sock_shutdown(lo
, 1);
610 printk(KERN_WARNING
"%s: queue cleared\n", lo
->disk
->disk_name
);
614 inode
->i_bdev
->bd_inode
->i_size
= 0;
615 set_capacity(lo
->disk
, 0);
616 return lo
->harderror
;
619 * This is for compatibility only. The queue is always cleared
620 * by NBD_DO_IT or NBD_CLEAR_SOCK.
622 BUG_ON(!lo
->sock
&& !list_empty(&lo
->queue_head
));
624 case NBD_PRINT_DEBUG
:
625 printk(KERN_INFO
"%s: next = %p, prev = %p, head = %p\n",
626 inode
->i_bdev
->bd_disk
->disk_name
,
627 lo
->queue_head
.next
, lo
->queue_head
.prev
,
634 static struct block_device_operations nbd_fops
=
636 .owner
= THIS_MODULE
,
641 * And here should be modules and kernel interface
642 * (Just smiley confuses emacs :-)
645 static int __init
nbd_init(void)
650 BUILD_BUG_ON(sizeof(struct nbd_request
) != 28);
652 if (nbds_max
> MAX_NBD
) {
653 printk(KERN_CRIT
"nbd: cannot allocate more than %u nbds; %u requested.\n", MAX_NBD
,
658 for (i
= 0; i
< nbds_max
; i
++) {
659 struct gendisk
*disk
= alloc_disk(1);
662 nbd_dev
[i
].disk
= disk
;
664 * The new linux 2.5 block layer implementation requires
665 * every gendisk to have its very own request_queue struct.
666 * These structs are big so we dynamically allocate them.
668 disk
->queue
= blk_init_queue(do_nbd_request
, &nbd_lock
);
675 if (register_blkdev(NBD_MAJOR
, "nbd")) {
680 printk(KERN_INFO
"nbd: registered device at major %d\n", NBD_MAJOR
);
681 dprintk(DBG_INIT
, "nbd: debugflags=0x%x\n", debugflags
);
683 for (i
= 0; i
< nbds_max
; i
++) {
684 struct gendisk
*disk
= nbd_dev
[i
].disk
;
685 nbd_dev
[i
].file
= NULL
;
686 nbd_dev
[i
].magic
= LO_MAGIC
;
687 nbd_dev
[i
].flags
= 0;
688 spin_lock_init(&nbd_dev
[i
].queue_lock
);
689 INIT_LIST_HEAD(&nbd_dev
[i
].queue_head
);
690 mutex_init(&nbd_dev
[i
].tx_lock
);
691 init_waitqueue_head(&nbd_dev
[i
].active_wq
);
692 nbd_dev
[i
].blksize
= 1024;
693 nbd_dev
[i
].bytesize
= 0;
694 disk
->major
= NBD_MAJOR
;
695 disk
->first_minor
= i
;
696 disk
->fops
= &nbd_fops
;
697 disk
->private_data
= &nbd_dev
[i
];
698 disk
->flags
|= GENHD_FL_SUPPRESS_PARTITION_INFO
;
699 sprintf(disk
->disk_name
, "nbd%d", i
);
700 set_capacity(disk
, 0);
707 blk_cleanup_queue(nbd_dev
[i
].disk
->queue
);
708 put_disk(nbd_dev
[i
].disk
);
713 static void __exit
nbd_cleanup(void)
716 for (i
= 0; i
< nbds_max
; i
++) {
717 struct gendisk
*disk
= nbd_dev
[i
].disk
;
718 nbd_dev
[i
].magic
= 0;
721 blk_cleanup_queue(disk
->queue
);
725 unregister_blkdev(NBD_MAJOR
, "nbd");
726 printk(KERN_INFO
"nbd: unregistered device at major %d\n", NBD_MAJOR
);
729 module_init(nbd_init
);
730 module_exit(nbd_cleanup
);
732 MODULE_DESCRIPTION("Network Block Device");
733 MODULE_LICENSE("GPL");
735 module_param(nbds_max
, int, 0444);
736 MODULE_PARM_DESC(nbds_max
, "How many network block devices to initialize.");
738 module_param(debugflags
, int, 0644);
739 MODULE_PARM_DESC(debugflags
, "flags for controlling debug output");