18 #include <linux/module.h>
20 #include <linux/sched.h>
23 #include <linux/stat.h>
24 #include <linux/errno.h>
28 #include <linux/compiler.h>
30 #include <linux/kernel.h>
31 #include <linux/slab.h>
33 #include <linux/net.h>
36 #include <asm/uaccess.h>
37 #include <asm/types.h>
39 #include <linux/nbd.h>
41 #define NBD_MAGIC 0x68797548
44 #define dprintk(flags, fmt...)
46 #define dprintk(flags, fmt...) do { \
47 if (debugflags & (flags)) printk(KERN_DEBUG fmt); \
49 #define DBG_IOCTL 0x0004
50 #define DBG_INIT 0x0010
51 #define DBG_EXIT 0x0020
52 #define DBG_BLKDEV 0x0100
55 static unsigned int debugflags;
58 static unsigned int nbds_max = 16;
75 static const char *ioctl_cmd_to_ascii(
int cmd)
89 case BLKROSET:
return "set-read-only";
90 case BLKFLSBUF:
return "flush-buffer-cache";
95 static const char *nbdcmd_to_ascii(
int cmd)
107 static void nbd_end_request(
struct request *
req)
114 req, error ?
"failed" :
"done");
118 spin_unlock_irqrestore(q->queue_lock, flags);
121 static void sock_shutdown(
struct nbd_device *nbd,
int lock)
132 dev_warn(disk_to_dev(nbd->
disk),
"shutting down socket\n");
140 static void nbd_xmit_timeout(
unsigned long arg)
160 unsigned long pflags =
current->flags;
164 "Attempted %s on closed socket in sock_xmit\n",
165 (send ?
"send" :
"recv"));
182 msg.msg_controllen = 0;
190 ti.function = nbd_xmit_timeout;
208 sock_shutdown(nbd, !send);
227 static inline int sock_send_bvec(
struct nbd_device *nbd,
struct bio_vec *bvec,
231 void *kaddr =
kmap(bvec->bv_page);
232 result = sock_xmit(nbd, 1, kaddr + bvec->bv_offset,
233 bvec->bv_len, flags);
243 unsigned long size = blk_rq_bytes(req);
251 dprintk(
DBG_TX,
"%s: request %p: sending control (%s@%llu,%uB)\n",
252 nbd->
disk->disk_name, req,
254 (
unsigned long long)blk_rq_pos(req) << 9,
260 "Send control failed (result %d)\n", result);
265 struct req_iterator iter;
266 struct bio_vec *bvec;
271 rq_for_each_segment(bvec, req, iter) {
273 if (!rq_iter_last(req, iter))
276 nbd->
disk->disk_name, req, bvec->bv_len);
277 result = sock_send_bvec(nbd, bvec, flags);
280 "Send data failed (result %d)\n",
306 list_del_init(&req->queuelist);
318 static inline int sock_recv_bvec(
struct nbd_device *nbd,
struct bio_vec *bvec)
321 void *kaddr =
kmap(bvec->bv_page);
322 result = sock_xmit(nbd, 0, kaddr + bvec->bv_offset, bvec->bv_len,
336 result = sock_xmit(nbd, 0, &reply,
sizeof(reply),
MSG_WAITALL);
339 "Receive control failed (result %d)\n", result);
344 dev_err(disk_to_dev(nbd->
disk),
"Wrong magic (0x%lx)\n",
345 (
unsigned long)
ntohl(reply.magic));
350 req = nbd_find_request(nbd, *(
struct request **)reply.handle);
352 result = PTR_ERR(req);
356 dev_err(disk_to_dev(nbd->
disk),
"Unexpected reply (%p)\n",
362 if (
ntohl(reply.error)) {
363 dev_err(disk_to_dev(nbd->
disk),
"Other side returned error (%d)\n",
370 nbd->
disk->disk_name, req);
372 struct req_iterator iter;
373 struct bio_vec *bvec;
375 rq_for_each_segment(bvec, req, iter) {
376 result = sock_recv_bvec(nbd, bvec);
378 dev_err(disk_to_dev(nbd->
disk),
"Receive data failed (result %d)\n",
384 nbd->
disk->disk_name, req, bvec->bv_len);
396 struct gendisk *disk = dev_to_disk(dev);
399 (
long) ((
struct nbd_device *)disk->private_data)->pid);
403 .attr = { .name =
"pid", .mode =
S_IRUGO},
418 dev_err(disk_to_dev(nbd->
disk),
"device_create_file failed!\n");
423 while ((req = nbd_read_stat(nbd)) !=
NULL)
424 nbd_end_request(req);
431 static void nbd_clear_que(
struct nbd_device *nbd)
451 list_del_init(&req->queuelist);
453 nbd_end_request(req);
459 list_del_init(&req->queuelist);
461 nbd_end_request(req);
468 if (req->cmd_type != REQ_TYPE_FS)
472 if (rq_data_dir(req) ==
WRITE) {
480 "Write on read-only\n");
491 "Attempted send on closed socket\n");
497 if (nbd_send_req(nbd, req) != 0) {
498 dev_err(disk_to_dev(nbd->
disk),
"Request send failed\n");
500 nbd_end_request(req);
515 nbd_end_request(req);
518 static int nbd_thread(
void *
data)
537 list_del_init(&req->queuelist);
541 nbd_handle_req(nbd, req);
560 spin_unlock_irq(q->queue_lock);
563 req->rq_disk->disk_name, req, req->cmd_type);
565 nbd = req->rq_disk->private_data;
571 "Attempted send on closed socket\n");
573 nbd_end_request(req);
574 spin_lock_irq(q->queue_lock);
584 spin_lock_irq(q->queue_lock);
591 unsigned int cmd,
unsigned long arg)
600 sreq.cmd_type = REQ_TYPE_SPECIAL;
604 nbd_send_req(nbd, &sreq);
631 nbd->
sock = SOCKET_I(inode);
685 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD,
689 if (IS_ERR(thread)) {
691 return PTR_ERR(thread);
694 error = nbd_do_it(nbd);
700 sock_shutdown(nbd, 0);
705 queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, nbd->
disk->queue);
710 set_capacity(nbd->
disk, 0);
726 "next = %p, prev = %p, head = %p\n",
735 unsigned int cmd,
unsigned long arg)
747 nbd->
disk->disk_name, ioctl_cmd_to_ascii(cmd), cmd, arg);
750 error = __nbd_ioctl(bdev, nbd, cmd, arg);
756 static const struct block_device_operations nbd_fops =
767 static int __init nbd_init(
void)
780 nbd_dev = kcalloc(nbds_max,
sizeof(*nbd_dev),
GFP_KERNEL);
786 part_shift = fls(max_part);
796 max_part = (1
UL << part_shift) - 1;
799 if ((1
UL << part_shift) > DISK_MAX_PARTS)
805 for (i = 0; i < nbds_max; i++) {
806 struct gendisk *disk =
alloc_disk(1 << part_shift);
809 nbd_dev[
i].
disk = disk;
823 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, disk->queue);
824 disk->queue->limits.discard_granularity = 512;
825 disk->queue->limits.max_discard_sectors =
UINT_MAX;
826 disk->queue->limits.discard_zeroes_data = 0;
837 for (i = 0; i < nbds_max; i++) {
838 struct gendisk *disk = nbd_dev[
i].
disk;
842 INIT_LIST_HEAD(&nbd_dev[i].waiting_queue);
844 INIT_LIST_HEAD(&nbd_dev[i].queue_head);
851 disk->first_minor = i << part_shift;
852 disk->fops = &nbd_fops;
853 disk->private_data = &nbd_dev[
i];
854 sprintf(disk->disk_name,
"nbd%d", i);
855 set_capacity(disk, 0);
869 static void __exit nbd_cleanup(
void)
872 for (i = 0; i < nbds_max; i++) {
873 struct gendisk *disk = nbd_dev[
i].
disk;
893 MODULE_PARM_DESC(nbds_max,
"number of network block devices to initialize (default: 16)");