2 * Network block device - make block devices work over TCP
4 * Note that you can not swap over this thing, yet. Seems to work but
5 * deadlocks sometimes - you can not swap over TCP in general.
7 * Copyright 1997-2000, 2008 Pavel Machek <pavel@ucw.cz>
8 * Parts copyright 2001 Steven Whitehouse <steve@chygwyn.com>
10 * This file is released under GPLv2 or later.
12 * (part of code stolen from loop.c)
15 #include <linux/major.h>
17 #include <linux/blkdev.h>
18 #include <linux/module.h>
19 #include <linux/init.h>
20 #include <linux/sched.h>
22 #include <linux/bio.h>
23 #include <linux/stat.h>
24 #include <linux/errno.h>
25 #include <linux/file.h>
26 #include <linux/ioctl.h>
27 #include <linux/mutex.h>
28 #include <linux/compiler.h>
29 #include <linux/err.h>
30 #include <linux/kernel.h>
31 #include <linux/slab.h>
33 #include <linux/net.h>
34 #include <linux/kthread.h>
35 #include <linux/types.h>
36 #include <linux/debugfs.h>
37 #include <linux/blk-mq.h>
39 #include <linux/uaccess.h>
40 #include <asm/types.h>
42 #include <linux/nbd.h>
49 #define NBD_TIMEDOUT 0
50 #define NBD_DISCONNECT_REQUESTED 1
51 #define NBD_DISCONNECTED 2
56 unsigned long runtime_flags
;
57 struct nbd_sock
**socks
;
60 struct blk_mq_tag_set tag_set
;
62 struct mutex config_lock
;
65 atomic_t recv_threads
;
66 wait_queue_head_t recv_wq
;
70 struct task_struct
*task_recv
;
71 struct task_struct
*task_setup
;
73 #if IS_ENABLED(CONFIG_DEBUG_FS)
74 struct dentry
*dbg_dir
;
79 struct nbd_device
*nbd
;
80 struct completion send_complete
;
83 #if IS_ENABLED(CONFIG_DEBUG_FS)
84 static struct dentry
*nbd_dbg_dir
;
87 #define nbd_name(nbd) ((nbd)->disk->disk_name)
89 #define NBD_MAGIC 0x68797548
91 static unsigned int nbds_max
= 16;
92 static struct nbd_device
*nbd_dev
;
95 static inline struct device
*nbd_to_dev(struct nbd_device
*nbd
)
97 return disk_to_dev(nbd
->disk
);
100 static bool nbd_is_connected(struct nbd_device
*nbd
)
102 return !!nbd
->task_recv
;
105 static const char *nbdcmd_to_ascii(int cmd
)
108 case NBD_CMD_READ
: return "read";
109 case NBD_CMD_WRITE
: return "write";
110 case NBD_CMD_DISC
: return "disconnect";
111 case NBD_CMD_FLUSH
: return "flush";
112 case NBD_CMD_TRIM
: return "trim/discard";
117 static int nbd_size_clear(struct nbd_device
*nbd
, struct block_device
*bdev
)
119 bdev
->bd_inode
->i_size
= 0;
120 set_capacity(nbd
->disk
, 0);
121 kobject_uevent(&nbd_to_dev(nbd
)->kobj
, KOBJ_CHANGE
);
126 static void nbd_size_update(struct nbd_device
*nbd
, struct block_device
*bdev
)
128 if (!nbd_is_connected(nbd
))
131 bdev
->bd_inode
->i_size
= nbd
->bytesize
;
132 set_capacity(nbd
->disk
, nbd
->bytesize
>> 9);
133 kobject_uevent(&nbd_to_dev(nbd
)->kobj
, KOBJ_CHANGE
);
136 static int nbd_size_set(struct nbd_device
*nbd
, struct block_device
*bdev
,
137 loff_t blocksize
, loff_t nr_blocks
)
141 ret
= set_blocksize(bdev
, blocksize
);
145 nbd
->blksize
= blocksize
;
146 nbd
->bytesize
= blocksize
* nr_blocks
;
148 nbd_size_update(nbd
, bdev
);
153 static void nbd_end_request(struct nbd_cmd
*cmd
)
155 struct nbd_device
*nbd
= cmd
->nbd
;
156 struct request
*req
= blk_mq_rq_from_pdu(cmd
);
157 int error
= req
->errors
? -EIO
: 0;
159 dev_dbg(nbd_to_dev(nbd
), "request %p: %s\n", cmd
,
160 error
? "failed" : "done");
162 blk_mq_complete_request(req
, error
);
166 * Forcibly shutdown the socket causing all listeners to error
168 static void sock_shutdown(struct nbd_device
*nbd
)
172 if (nbd
->num_connections
== 0)
174 if (test_and_set_bit(NBD_DISCONNECTED
, &nbd
->runtime_flags
))
177 for (i
= 0; i
< nbd
->num_connections
; i
++) {
178 struct nbd_sock
*nsock
= nbd
->socks
[i
];
179 mutex_lock(&nsock
->tx_lock
);
180 kernel_sock_shutdown(nsock
->sock
, SHUT_RDWR
);
181 mutex_unlock(&nsock
->tx_lock
);
183 dev_warn(disk_to_dev(nbd
->disk
), "shutting down sockets\n");
186 static enum blk_eh_timer_return
nbd_xmit_timeout(struct request
*req
,
189 struct nbd_cmd
*cmd
= blk_mq_rq_to_pdu(req
);
190 struct nbd_device
*nbd
= cmd
->nbd
;
192 dev_err(nbd_to_dev(nbd
), "Connection timed out, shutting down connection\n");
193 set_bit(NBD_TIMEDOUT
, &nbd
->runtime_flags
);
197 * If our disconnect packet times out then we're already holding the
198 * config_lock and could deadlock here, so just set an error and return,
199 * we'll handle shutting everything down later.
201 if (req
->cmd_type
== REQ_TYPE_DRV_PRIV
)
202 return BLK_EH_HANDLED
;
203 mutex_lock(&nbd
->config_lock
);
205 mutex_unlock(&nbd
->config_lock
);
206 return BLK_EH_HANDLED
;
210 * Send or receive packet.
212 static int sock_xmit(struct nbd_device
*nbd
, int index
, int send
, void *buf
,
213 int size
, int msg_flags
)
215 struct socket
*sock
= nbd
->socks
[index
]->sock
;
219 unsigned long pflags
= current
->flags
;
221 if (unlikely(!sock
)) {
222 dev_err_ratelimited(disk_to_dev(nbd
->disk
),
223 "Attempted %s on closed socket in sock_xmit\n",
224 (send
? "send" : "recv"));
228 current
->flags
|= PF_MEMALLOC
;
230 sock
->sk
->sk_allocation
= GFP_NOIO
| __GFP_MEMALLOC
;
235 msg
.msg_control
= NULL
;
236 msg
.msg_controllen
= 0;
237 msg
.msg_flags
= msg_flags
| MSG_NOSIGNAL
;
240 result
= kernel_sendmsg(sock
, &msg
, &iov
, 1, size
);
242 result
= kernel_recvmsg(sock
, &msg
, &iov
, 1, size
,
247 result
= -EPIPE
; /* short read */
254 tsk_restore_flags(current
, pflags
, PF_MEMALLOC
);
259 static inline int sock_send_bvec(struct nbd_device
*nbd
, int index
,
260 struct bio_vec
*bvec
, int flags
)
263 void *kaddr
= kmap(bvec
->bv_page
);
264 result
= sock_xmit(nbd
, index
, 1, kaddr
+ bvec
->bv_offset
,
265 bvec
->bv_len
, flags
);
266 kunmap(bvec
->bv_page
);
270 /* always call with the tx_lock held */
271 static int nbd_send_cmd(struct nbd_device
*nbd
, struct nbd_cmd
*cmd
, int index
)
273 struct request
*req
= blk_mq_rq_from_pdu(cmd
);
275 struct nbd_request request
;
276 unsigned long size
= blk_rq_bytes(req
);
279 u32 tag
= blk_mq_unique_tag(req
);
281 if (req_op(req
) == REQ_OP_DISCARD
)
283 else if (req_op(req
) == REQ_OP_FLUSH
)
284 type
= NBD_CMD_FLUSH
;
285 else if (rq_data_dir(req
) == WRITE
)
286 type
= NBD_CMD_WRITE
;
290 memset(&request
, 0, sizeof(request
));
291 request
.magic
= htonl(NBD_REQUEST_MAGIC
);
292 request
.type
= htonl(type
);
293 if (type
!= NBD_CMD_FLUSH
) {
294 request
.from
= cpu_to_be64((u64
)blk_rq_pos(req
) << 9);
295 request
.len
= htonl(size
);
297 memcpy(request
.handle
, &tag
, sizeof(tag
));
299 dev_dbg(nbd_to_dev(nbd
), "request %p: sending control (%s@%llu,%uB)\n",
300 cmd
, nbdcmd_to_ascii(type
),
301 (unsigned long long)blk_rq_pos(req
) << 9, blk_rq_bytes(req
));
302 result
= sock_xmit(nbd
, index
, 1, &request
, sizeof(request
),
303 (type
== NBD_CMD_WRITE
) ? MSG_MORE
: 0);
305 dev_err_ratelimited(disk_to_dev(nbd
->disk
),
306 "Send control failed (result %d)\n", result
);
310 if (type
!= NBD_CMD_WRITE
)
315 struct bio
*next
= bio
->bi_next
;
316 struct bvec_iter iter
;
319 bio_for_each_segment(bvec
, bio
, iter
) {
320 bool is_last
= !next
&& bio_iter_last(bvec
, iter
);
321 int flags
= is_last
? 0 : MSG_MORE
;
323 dev_dbg(nbd_to_dev(nbd
), "request %p: sending %d bytes data\n",
325 result
= sock_send_bvec(nbd
, index
, &bvec
, flags
);
327 dev_err(disk_to_dev(nbd
->disk
),
328 "Send data failed (result %d)\n",
333 * The completion might already have come in,
334 * so break for the last one instead of letting
335 * the iterator do it. This prevents use-after-free
346 static inline int sock_recv_bvec(struct nbd_device
*nbd
, int index
,
347 struct bio_vec
*bvec
)
350 void *kaddr
= kmap(bvec
->bv_page
);
351 result
= sock_xmit(nbd
, index
, 0, kaddr
+ bvec
->bv_offset
,
352 bvec
->bv_len
, MSG_WAITALL
);
353 kunmap(bvec
->bv_page
);
357 /* NULL returned = something went wrong, inform userspace */
358 static struct nbd_cmd
*nbd_read_stat(struct nbd_device
*nbd
, int index
)
361 struct nbd_reply reply
;
363 struct request
*req
= NULL
;
368 result
= sock_xmit(nbd
, index
, 0, &reply
, sizeof(reply
), MSG_WAITALL
);
370 if (!test_bit(NBD_DISCONNECTED
, &nbd
->runtime_flags
) &&
371 !test_bit(NBD_DISCONNECT_REQUESTED
, &nbd
->runtime_flags
))
372 dev_err(disk_to_dev(nbd
->disk
),
373 "Receive control failed (result %d)\n", result
);
374 return ERR_PTR(result
);
377 if (ntohl(reply
.magic
) != NBD_REPLY_MAGIC
) {
378 dev_err(disk_to_dev(nbd
->disk
), "Wrong magic (0x%lx)\n",
379 (unsigned long)ntohl(reply
.magic
));
380 return ERR_PTR(-EPROTO
);
383 memcpy(&tag
, reply
.handle
, sizeof(u32
));
385 hwq
= blk_mq_unique_tag_to_hwq(tag
);
386 if (hwq
< nbd
->tag_set
.nr_hw_queues
)
387 req
= blk_mq_tag_to_rq(nbd
->tag_set
.tags
[hwq
],
388 blk_mq_unique_tag_to_tag(tag
));
389 if (!req
|| !blk_mq_request_started(req
)) {
390 dev_err(disk_to_dev(nbd
->disk
), "Unexpected reply (%d) %p\n",
392 return ERR_PTR(-ENOENT
);
394 cmd
= blk_mq_rq_to_pdu(req
);
395 if (ntohl(reply
.error
)) {
396 dev_err(disk_to_dev(nbd
->disk
), "Other side returned error (%d)\n",
402 dev_dbg(nbd_to_dev(nbd
), "request %p: got reply\n", cmd
);
403 if (rq_data_dir(req
) != WRITE
) {
404 struct req_iterator iter
;
407 rq_for_each_segment(bvec
, req
, iter
) {
408 result
= sock_recv_bvec(nbd
, index
, &bvec
);
410 dev_err(disk_to_dev(nbd
->disk
), "Receive data failed (result %d)\n",
415 dev_dbg(nbd_to_dev(nbd
), "request %p: got %d bytes data\n",
419 /* See the comment in nbd_queue_rq. */
420 wait_for_completion(&cmd
->send_complete
);
425 static ssize_t
pid_show(struct device
*dev
,
426 struct device_attribute
*attr
, char *buf
)
428 struct gendisk
*disk
= dev_to_disk(dev
);
429 struct nbd_device
*nbd
= (struct nbd_device
*)disk
->private_data
;
431 return sprintf(buf
, "%d\n", task_pid_nr(nbd
->task_recv
));
434 static struct device_attribute pid_attr
= {
435 .attr
= { .name
= "pid", .mode
= S_IRUGO
},
439 struct recv_thread_args
{
440 struct work_struct work
;
441 struct nbd_device
*nbd
;
445 static void recv_work(struct work_struct
*work
)
447 struct recv_thread_args
*args
= container_of(work
,
448 struct recv_thread_args
,
450 struct nbd_device
*nbd
= args
->nbd
;
454 BUG_ON(nbd
->magic
!= NBD_MAGIC
);
456 cmd
= nbd_read_stat(nbd
, args
->index
);
462 nbd_end_request(cmd
);
466 * We got an error, shut everybody down if this wasn't the result of a
467 * disconnect request.
469 if (ret
&& !test_bit(NBD_DISCONNECT_REQUESTED
, &nbd
->runtime_flags
))
471 atomic_dec(&nbd
->recv_threads
);
472 wake_up(&nbd
->recv_wq
);
475 static void nbd_clear_req(struct request
*req
, void *data
, bool reserved
)
479 if (!blk_mq_request_started(req
))
481 cmd
= blk_mq_rq_to_pdu(req
);
483 nbd_end_request(cmd
);
486 static void nbd_clear_que(struct nbd_device
*nbd
)
488 BUG_ON(nbd
->magic
!= NBD_MAGIC
);
490 blk_mq_tagset_busy_iter(&nbd
->tag_set
, nbd_clear_req
, NULL
);
491 dev_dbg(disk_to_dev(nbd
->disk
), "queue cleared\n");
495 static void nbd_handle_cmd(struct nbd_cmd
*cmd
, int index
)
497 struct request
*req
= blk_mq_rq_from_pdu(cmd
);
498 struct nbd_device
*nbd
= cmd
->nbd
;
499 struct nbd_sock
*nsock
;
501 if (index
>= nbd
->num_connections
) {
502 dev_err_ratelimited(disk_to_dev(nbd
->disk
),
503 "Attempted send on invalid socket\n");
507 if (test_bit(NBD_DISCONNECTED
, &nbd
->runtime_flags
)) {
508 dev_err_ratelimited(disk_to_dev(nbd
->disk
),
509 "Attempted send on closed socket\n");
513 if (req
->cmd_type
!= REQ_TYPE_FS
&&
514 req
->cmd_type
!= REQ_TYPE_DRV_PRIV
)
517 if (req
->cmd_type
== REQ_TYPE_FS
&&
518 rq_data_dir(req
) == WRITE
&&
519 (nbd
->flags
& NBD_FLAG_READ_ONLY
)) {
520 dev_err_ratelimited(disk_to_dev(nbd
->disk
),
521 "Write on read-only\n");
527 nsock
= nbd
->socks
[index
];
528 mutex_lock(&nsock
->tx_lock
);
529 if (unlikely(!nsock
->sock
)) {
530 mutex_unlock(&nsock
->tx_lock
);
531 dev_err_ratelimited(disk_to_dev(nbd
->disk
),
532 "Attempted send on closed socket\n");
536 if (nbd_send_cmd(nbd
, cmd
, index
) != 0) {
537 dev_err_ratelimited(disk_to_dev(nbd
->disk
),
538 "Request send failed\n");
540 nbd_end_request(cmd
);
543 mutex_unlock(&nsock
->tx_lock
);
549 nbd_end_request(cmd
);
552 static int nbd_queue_rq(struct blk_mq_hw_ctx
*hctx
,
553 const struct blk_mq_queue_data
*bd
)
555 struct nbd_cmd
*cmd
= blk_mq_rq_to_pdu(bd
->rq
);
558 * Since we look at the bio's to send the request over the network we
559 * need to make sure the completion work doesn't mark this request done
560 * before we are done doing our send. This keeps us from dereferencing
561 * freed data if we have particularly fast completions (ie we get the
562 * completion before we exit sock_xmit on the last bvec) or in the case
563 * that the server is misbehaving (or there was an error) before we're
564 * done sending everything over the wire.
566 init_completion(&cmd
->send_complete
);
567 blk_mq_start_request(bd
->rq
);
568 nbd_handle_cmd(cmd
, hctx
->queue_num
);
569 complete(&cmd
->send_complete
);
571 return BLK_MQ_RQ_QUEUE_OK
;
574 static int nbd_add_socket(struct nbd_device
*nbd
, struct socket
*sock
)
576 struct nbd_sock
**socks
;
577 struct nbd_sock
*nsock
;
579 if (!nbd
->task_setup
)
580 nbd
->task_setup
= current
;
581 if (nbd
->task_setup
!= current
) {
582 dev_err(disk_to_dev(nbd
->disk
),
583 "Device being setup by another task");
587 socks
= krealloc(nbd
->socks
, (nbd
->num_connections
+ 1) *
588 sizeof(struct nbd_sock
*), GFP_KERNEL
);
591 nsock
= kzalloc(sizeof(struct nbd_sock
), GFP_KERNEL
);
597 mutex_init(&nsock
->tx_lock
);
599 socks
[nbd
->num_connections
++] = nsock
;
604 /* Reset all properties of an NBD device */
605 static void nbd_reset(struct nbd_device
*nbd
)
609 for (i
= 0; i
< nbd
->num_connections
; i
++)
610 kfree(nbd
->socks
[i
]);
613 nbd
->runtime_flags
= 0;
616 set_capacity(nbd
->disk
, 0);
618 nbd
->tag_set
.timeout
= 0;
619 nbd
->num_connections
= 0;
620 nbd
->task_setup
= NULL
;
621 queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD
, nbd
->disk
->queue
);
624 static void nbd_bdev_reset(struct block_device
*bdev
)
626 set_device_ro(bdev
, false);
627 bdev
->bd_inode
->i_size
= 0;
629 blkdev_reread_part(bdev
);
630 bdev
->bd_invalidated
= 1;
634 static void nbd_parse_flags(struct nbd_device
*nbd
, struct block_device
*bdev
)
636 if (nbd
->flags
& NBD_FLAG_READ_ONLY
)
637 set_device_ro(bdev
, true);
638 if (nbd
->flags
& NBD_FLAG_SEND_TRIM
)
639 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD
, nbd
->disk
->queue
);
640 if (nbd
->flags
& NBD_FLAG_SEND_FLUSH
)
641 blk_queue_write_cache(nbd
->disk
->queue
, true, false);
643 blk_queue_write_cache(nbd
->disk
->queue
, false, false);
646 static void send_disconnects(struct nbd_device
*nbd
)
648 struct nbd_request request
= {};
651 request
.magic
= htonl(NBD_REQUEST_MAGIC
);
652 request
.type
= htonl(NBD_CMD_DISC
);
654 for (i
= 0; i
< nbd
->num_connections
; i
++) {
655 ret
= sock_xmit(nbd
, i
, 1, &request
, sizeof(request
), 0);
657 dev_err(disk_to_dev(nbd
->disk
),
658 "Send disconnect failed %d\n", ret
);
662 static int nbd_dev_dbg_init(struct nbd_device
*nbd
);
663 static void nbd_dev_dbg_close(struct nbd_device
*nbd
);
665 /* Must be called with config_lock held */
666 static int __nbd_ioctl(struct block_device
*bdev
, struct nbd_device
*nbd
,
667 unsigned int cmd
, unsigned long arg
)
670 case NBD_DISCONNECT
: {
671 dev_info(disk_to_dev(nbd
->disk
), "NBD_DISCONNECT\n");
675 mutex_unlock(&nbd
->config_lock
);
677 mutex_lock(&nbd
->config_lock
);
679 /* Check again after getting mutex back. */
683 if (!test_and_set_bit(NBD_DISCONNECT_REQUESTED
,
684 &nbd
->runtime_flags
))
685 send_disconnects(nbd
);
693 nbd_bdev_reset(bdev
);
695 * We want to give the run thread a chance to wait for everybody
696 * to clean up and then do it's own cleanup.
698 if (!test_bit(NBD_RUNNING
, &nbd
->runtime_flags
)) {
701 for (i
= 0; i
< nbd
->num_connections
; i
++)
702 kfree(nbd
->socks
[i
]);
705 nbd
->num_connections
= 0;
706 nbd
->task_setup
= NULL
;
712 struct socket
*sock
= sockfd_lookup(arg
, &err
);
717 err
= nbd_add_socket(nbd
, sock
);
718 if (!err
&& max_part
)
719 bdev
->bd_invalidated
= 1;
724 case NBD_SET_BLKSIZE
: {
725 loff_t bsize
= div_s64(nbd
->bytesize
, arg
);
727 return nbd_size_set(nbd
, bdev
, arg
, bsize
);
731 return nbd_size_set(nbd
, bdev
, nbd
->blksize
,
732 div_s64(arg
, nbd
->blksize
));
734 case NBD_SET_SIZE_BLOCKS
:
735 return nbd_size_set(nbd
, bdev
, nbd
->blksize
, arg
);
737 case NBD_SET_TIMEOUT
:
738 nbd
->tag_set
.timeout
= arg
* HZ
;
746 struct recv_thread_args
*args
;
747 int num_connections
= nbd
->num_connections
;
754 if (num_connections
> 1 &&
755 !(nbd
->flags
& NBD_FLAG_CAN_MULTI_CONN
)) {
756 dev_err(disk_to_dev(nbd
->disk
), "server does not support multiple connections per device.\n");
761 set_bit(NBD_RUNNING
, &nbd
->runtime_flags
);
762 blk_mq_update_nr_hw_queues(&nbd
->tag_set
, nbd
->num_connections
);
763 args
= kcalloc(num_connections
, sizeof(*args
), GFP_KERNEL
);
768 nbd
->task_recv
= current
;
769 mutex_unlock(&nbd
->config_lock
);
771 nbd_parse_flags(nbd
, bdev
);
773 error
= device_create_file(disk_to_dev(nbd
->disk
), &pid_attr
);
775 dev_err(disk_to_dev(nbd
->disk
), "device_create_file failed!\n");
779 nbd_size_update(nbd
, bdev
);
781 nbd_dev_dbg_init(nbd
);
782 for (i
= 0; i
< num_connections
; i
++) {
783 sk_set_memalloc(nbd
->socks
[i
]->sock
->sk
);
784 atomic_inc(&nbd
->recv_threads
);
785 INIT_WORK(&args
[i
].work
, recv_work
);
788 queue_work(system_long_wq
, &args
[i
].work
);
790 wait_event_interruptible(nbd
->recv_wq
,
791 atomic_read(&nbd
->recv_threads
) == 0);
792 for (i
= 0; i
< num_connections
; i
++)
793 flush_work(&args
[i
].work
);
794 nbd_dev_dbg_close(nbd
);
795 nbd_size_clear(nbd
, bdev
);
796 device_remove_file(disk_to_dev(nbd
->disk
), &pid_attr
);
798 mutex_lock(&nbd
->config_lock
);
799 nbd
->task_recv
= NULL
;
804 nbd_bdev_reset(bdev
);
806 /* user requested, ignore socket errors */
807 if (test_bit(NBD_DISCONNECT_REQUESTED
, &nbd
->runtime_flags
))
809 if (test_bit(NBD_TIMEDOUT
, &nbd
->runtime_flags
))
818 * This is for compatibility only. The queue is always cleared
819 * by NBD_DO_IT or NBD_CLEAR_SOCK.
823 case NBD_PRINT_DEBUG
:
825 * For compatibility only, we no longer keep a list of
826 * outstanding requests.
833 static int nbd_ioctl(struct block_device
*bdev
, fmode_t mode
,
834 unsigned int cmd
, unsigned long arg
)
836 struct nbd_device
*nbd
= bdev
->bd_disk
->private_data
;
839 if (!capable(CAP_SYS_ADMIN
))
842 BUG_ON(nbd
->magic
!= NBD_MAGIC
);
844 mutex_lock(&nbd
->config_lock
);
845 error
= __nbd_ioctl(bdev
, nbd
, cmd
, arg
);
846 mutex_unlock(&nbd
->config_lock
);
851 static const struct block_device_operations nbd_fops
=
853 .owner
= THIS_MODULE
,
855 .compat_ioctl
= nbd_ioctl
,
858 #if IS_ENABLED(CONFIG_DEBUG_FS)
860 static int nbd_dbg_tasks_show(struct seq_file
*s
, void *unused
)
862 struct nbd_device
*nbd
= s
->private;
865 seq_printf(s
, "recv: %d\n", task_pid_nr(nbd
->task_recv
));
870 static int nbd_dbg_tasks_open(struct inode
*inode
, struct file
*file
)
872 return single_open(file
, nbd_dbg_tasks_show
, inode
->i_private
);
875 static const struct file_operations nbd_dbg_tasks_ops
= {
876 .open
= nbd_dbg_tasks_open
,
879 .release
= single_release
,
882 static int nbd_dbg_flags_show(struct seq_file
*s
, void *unused
)
884 struct nbd_device
*nbd
= s
->private;
885 u32 flags
= nbd
->flags
;
887 seq_printf(s
, "Hex: 0x%08x\n\n", flags
);
889 seq_puts(s
, "Known flags:\n");
891 if (flags
& NBD_FLAG_HAS_FLAGS
)
892 seq_puts(s
, "NBD_FLAG_HAS_FLAGS\n");
893 if (flags
& NBD_FLAG_READ_ONLY
)
894 seq_puts(s
, "NBD_FLAG_READ_ONLY\n");
895 if (flags
& NBD_FLAG_SEND_FLUSH
)
896 seq_puts(s
, "NBD_FLAG_SEND_FLUSH\n");
897 if (flags
& NBD_FLAG_SEND_TRIM
)
898 seq_puts(s
, "NBD_FLAG_SEND_TRIM\n");
903 static int nbd_dbg_flags_open(struct inode
*inode
, struct file
*file
)
905 return single_open(file
, nbd_dbg_flags_show
, inode
->i_private
);
908 static const struct file_operations nbd_dbg_flags_ops
= {
909 .open
= nbd_dbg_flags_open
,
912 .release
= single_release
,
915 static int nbd_dev_dbg_init(struct nbd_device
*nbd
)
922 dir
= debugfs_create_dir(nbd_name(nbd
), nbd_dbg_dir
);
924 dev_err(nbd_to_dev(nbd
), "Failed to create debugfs dir for '%s'\n",
930 debugfs_create_file("tasks", 0444, dir
, nbd
, &nbd_dbg_tasks_ops
);
931 debugfs_create_u64("size_bytes", 0444, dir
, &nbd
->bytesize
);
932 debugfs_create_u32("timeout", 0444, dir
, &nbd
->tag_set
.timeout
);
933 debugfs_create_u64("blocksize", 0444, dir
, &nbd
->blksize
);
934 debugfs_create_file("flags", 0444, dir
, nbd
, &nbd_dbg_flags_ops
);
939 static void nbd_dev_dbg_close(struct nbd_device
*nbd
)
941 debugfs_remove_recursive(nbd
->dbg_dir
);
944 static int nbd_dbg_init(void)
946 struct dentry
*dbg_dir
;
948 dbg_dir
= debugfs_create_dir("nbd", NULL
);
952 nbd_dbg_dir
= dbg_dir
;
957 static void nbd_dbg_close(void)
959 debugfs_remove_recursive(nbd_dbg_dir
);
962 #else /* IS_ENABLED(CONFIG_DEBUG_FS) */
964 static int nbd_dev_dbg_init(struct nbd_device
*nbd
)
969 static void nbd_dev_dbg_close(struct nbd_device
*nbd
)
973 static int nbd_dbg_init(void)
978 static void nbd_dbg_close(void)
984 static int nbd_init_request(void *data
, struct request
*rq
,
985 unsigned int hctx_idx
, unsigned int request_idx
,
986 unsigned int numa_node
)
988 struct nbd_cmd
*cmd
= blk_mq_rq_to_pdu(rq
);
993 static struct blk_mq_ops nbd_mq_ops
= {
994 .queue_rq
= nbd_queue_rq
,
995 .init_request
= nbd_init_request
,
996 .timeout
= nbd_xmit_timeout
,
1000 * And here should be modules and kernel interface
1001 * (Just smiley confuses emacs :-)
1004 static int __init
nbd_init(void)
1010 BUILD_BUG_ON(sizeof(struct nbd_request
) != 28);
1013 printk(KERN_ERR
"nbd: max_part must be >= 0\n");
1019 part_shift
= fls(max_part
);
1022 * Adjust max_part according to part_shift as it is exported
1023 * to user space so that user can know the max number of
1024 * partition kernel should be able to manage.
1026 * Note that -1 is required because partition 0 is reserved
1027 * for the whole disk.
1029 max_part
= (1UL << part_shift
) - 1;
1032 if ((1UL << part_shift
) > DISK_MAX_PARTS
)
1035 if (nbds_max
> 1UL << (MINORBITS
- part_shift
))
1038 nbd_dev
= kcalloc(nbds_max
, sizeof(*nbd_dev
), GFP_KERNEL
);
1042 for (i
= 0; i
< nbds_max
; i
++) {
1043 struct request_queue
*q
;
1044 struct gendisk
*disk
= alloc_disk(1 << part_shift
);
1047 nbd_dev
[i
].disk
= disk
;
1049 nbd_dev
[i
].tag_set
.ops
= &nbd_mq_ops
;
1050 nbd_dev
[i
].tag_set
.nr_hw_queues
= 1;
1051 nbd_dev
[i
].tag_set
.queue_depth
= 128;
1052 nbd_dev
[i
].tag_set
.numa_node
= NUMA_NO_NODE
;
1053 nbd_dev
[i
].tag_set
.cmd_size
= sizeof(struct nbd_cmd
);
1054 nbd_dev
[i
].tag_set
.flags
= BLK_MQ_F_SHOULD_MERGE
|
1055 BLK_MQ_F_SG_MERGE
| BLK_MQ_F_BLOCKING
;
1056 nbd_dev
[i
].tag_set
.driver_data
= &nbd_dev
[i
];
1058 err
= blk_mq_alloc_tag_set(&nbd_dev
[i
].tag_set
);
1065 * The new linux 2.5 block layer implementation requires
1066 * every gendisk to have its very own request_queue struct.
1067 * These structs are big so we dynamically allocate them.
1069 q
= blk_mq_init_queue(&nbd_dev
[i
].tag_set
);
1071 blk_mq_free_tag_set(&nbd_dev
[i
].tag_set
);
1078 * Tell the block layer that we are not a rotational device
1080 queue_flag_set_unlocked(QUEUE_FLAG_NONROT
, disk
->queue
);
1081 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM
, disk
->queue
);
1082 disk
->queue
->limits
.discard_granularity
= 512;
1083 blk_queue_max_discard_sectors(disk
->queue
, UINT_MAX
);
1084 disk
->queue
->limits
.discard_zeroes_data
= 0;
1085 blk_queue_max_hw_sectors(disk
->queue
, 65536);
1086 disk
->queue
->limits
.max_sectors
= 256;
1089 if (register_blkdev(NBD_MAJOR
, "nbd")) {
1094 printk(KERN_INFO
"nbd: registered device at major %d\n", NBD_MAJOR
);
1098 for (i
= 0; i
< nbds_max
; i
++) {
1099 struct gendisk
*disk
= nbd_dev
[i
].disk
;
1100 nbd_dev
[i
].magic
= NBD_MAGIC
;
1101 mutex_init(&nbd_dev
[i
].config_lock
);
1102 disk
->major
= NBD_MAJOR
;
1103 disk
->first_minor
= i
<< part_shift
;
1104 disk
->fops
= &nbd_fops
;
1105 disk
->private_data
= &nbd_dev
[i
];
1106 sprintf(disk
->disk_name
, "nbd%d", i
);
1107 init_waitqueue_head(&nbd_dev
[i
].recv_wq
);
1108 nbd_reset(&nbd_dev
[i
]);
1115 blk_mq_free_tag_set(&nbd_dev
[i
].tag_set
);
1116 blk_cleanup_queue(nbd_dev
[i
].disk
->queue
);
1117 put_disk(nbd_dev
[i
].disk
);
1123 static void __exit
nbd_cleanup(void)
1129 for (i
= 0; i
< nbds_max
; i
++) {
1130 struct gendisk
*disk
= nbd_dev
[i
].disk
;
1131 nbd_dev
[i
].magic
= 0;
1134 blk_cleanup_queue(disk
->queue
);
1135 blk_mq_free_tag_set(&nbd_dev
[i
].tag_set
);
1139 unregister_blkdev(NBD_MAJOR
, "nbd");
1141 printk(KERN_INFO
"nbd: unregistered device at major %d\n", NBD_MAJOR
);
1144 module_init(nbd_init
);
1145 module_exit(nbd_cleanup
);
1147 MODULE_DESCRIPTION("Network Block Device");
1148 MODULE_LICENSE("GPL");
1150 module_param(nbds_max
, int, 0444);
1151 MODULE_PARM_DESC(nbds_max
, "number of network block devices to initialize (default: 16)");
1152 module_param(max_part
, int, 0444);
1153 MODULE_PARM_DESC(max_part
, "number of partitions per device (default: 0)");