12 #include <linux/module.h>
13 #include <linux/poll.h>
14 #include <linux/uio.h>
18 #include <linux/slab.h>
39 memset(req, 0,
sizeof(*req));
40 INIT_LIST_HEAD(&req->
list);
50 fuse_request_init(req);
59 fuse_request_init(req);
68 static void block_sigs(
sigset_t *oldset)
76 static void restore_sigs(
sigset_t *oldset)
81 static void __fuse_get_request(
struct fuse_req *req)
87 static void __fuse_put_request(
struct fuse_req *req)
93 static void fuse_req_init_context(
struct fuse_req *req)
110 restore_sigs(&oldset);
124 fuse_req_init_context(req);
147 spin_lock(&fc->
lock);
153 spin_unlock(&fc->
lock);
167 spin_lock(&fc->
lock);
168 fuse_request_init(req);
172 spin_unlock(&fc->
lock);
197 req = get_reserved_req(fc, file);
199 fuse_req_init_context(req);
211 put_reserved_req(fc, req);
218 static unsigned len_args(
unsigned numargs,
struct fuse_arg *args)
223 for (i = 0; i < numargs; i++)
224 nbytes += args[i].
size;
259 spin_lock(&fc->
lock);
268 spin_unlock(&fc->
lock);
271 static void flush_bg_queue(
struct fuse_conn *fc)
280 req->
in.h.unique = fuse_get_unique(fc);
281 queue_request(fc, req);
317 spin_unlock(&fc->
lock);
324 static void wait_answer_interruptible(
struct fuse_conn *fc,
332 spin_unlock(&fc->
lock);
334 spin_lock(&fc->
lock);
350 wait_answer_interruptible(fc, req);
359 queue_interrupt(fc, req);
367 wait_answer_interruptible(fc, req);
368 restore_sigs(&oldset);
378 __fuse_put_request(req);
388 spin_unlock(&fc->
lock);
390 spin_lock(&fc->
lock);
403 spin_unlock(&fc->
lock);
405 spin_lock(&fc->
lock);
412 spin_lock(&fc->
lock);
418 req->
in.h.unique = fuse_get_unique(fc);
419 queue_request(fc, req);
422 __fuse_get_request(req);
424 request_wait_answer(fc, req);
426 spin_unlock(&fc->
lock);
430 static void fuse_request_send_nowait_locked(
struct fuse_conn *fc,
448 spin_lock(&fc->
lock);
450 fuse_request_send_nowait_locked(fc, req);
451 spin_unlock(&fc->
lock);
454 request_end(fc, req);
461 fuse_request_send_nowait(fc, req);
465 static int fuse_request_send_notify_reply(
struct fuse_conn *fc,
471 req->
in.h.unique = unique;
472 spin_lock(&fc->
lock);
474 queue_request(fc, req);
477 spin_unlock(&fc->
lock);
491 fuse_request_send_nowait_locked(fc, req);
503 spin_lock(&fc->
lock);
508 spin_unlock(&fc->
lock);
521 spin_lock(&fc->
lock);
525 spin_unlock(&fc->
lock);
549 const struct iovec *iov,
unsigned long nr_segs)
551 memset(cs, 0,
sizeof(*cs));
592 unlock_request(cs->
fc, cs->
req);
593 fuse_copy_finish(cs);
598 err = buf->
ops->confirm(cs->
pipe, buf);
650 return lock_request(cs->
fc, cs->
req);
656 unsigned ncpy =
min(*size, cs->
len);
670 static int fuse_check_page(
struct page *page)
672 if (page_mapcount(page) ||
674 page_count(page) != 1 ||
683 printk(
KERN_WARNING " page=%p index=%li flags=%08lx, count=%i, mapcount=%i, mapping=%p\n", page, page->
index, page->
flags, page_count(page), page_mapcount(page), page->
mapping);
689 static int fuse_try_move_page(
struct fuse_copy_state *cs,
struct page **pagep)
692 struct page *oldpage = *pagep;
693 struct page *newpage;
698 unlock_request(cs->
fc, cs->
req);
699 fuse_copy_finish(cs);
701 err = buf->
ops->confirm(cs->
pipe, buf);
714 if (buf->
ops->steal(cs->
pipe, buf) != 0)
719 if (
WARN_ON(!PageUptodate(newpage)))
722 ClearPageMappedToDisk(newpage);
724 if (fuse_check_page(newpage) != 0)
725 goto out_fallback_unlock;
728 index = oldpage->
index;
734 if (
WARN_ON(page_mapped(oldpage)))
735 goto out_fallback_unlock;
736 if (
WARN_ON(page_has_private(oldpage)))
737 goto out_fallback_unlock;
738 if (
WARN_ON(PageDirty(oldpage) || PageWriteback(oldpage)))
739 goto out_fallback_unlock;
740 if (
WARN_ON(PageMlocked(oldpage)))
741 goto out_fallback_unlock;
752 lru_cache_add_file(newpage);
755 spin_lock(&cs->
fc->lock);
756 if (cs->
req->aborted)
760 spin_unlock(&cs->
fc->lock);
780 err = lock_request(cs->
fc, cs->
req);
787 static int fuse_ref_page(
struct fuse_copy_state *cs,
struct page *page,
788 unsigned offset,
unsigned count)
795 unlock_request(cs->
fc, cs->
req);
796 fuse_copy_finish(cs);
815 static int fuse_copy_page(
struct fuse_copy_state *cs,
struct page **pagep,
816 unsigned offset,
unsigned count,
int zeroing)
819 struct page *page = *pagep;
821 if (page && zeroing && count <
PAGE_SIZE)
822 clear_highpage(page);
826 return fuse_ref_page(cs, page, offset, count);
827 }
else if (!cs->
len) {
830 err = fuse_try_move_page(cs, pagep);
834 err = fuse_copy_fill(cs);
841 void *buf = mapaddr +
offset;
842 offset += fuse_copy_do(cs, &buf, &count);
845 offset += fuse_copy_do(cs,
NULL, &count);
847 if (page && !cs->
write)
853 static int fuse_copy_pages(
struct fuse_copy_state *cs,
unsigned nbytes,
859 unsigned count =
min(nbytes, (
unsigned)
PAGE_SIZE - offset);
861 for (i = 0; i < req->
num_pages && (nbytes || zeroing); i++) {
864 err = fuse_copy_page(cs, &req->
pages[i], offset, count,
877 static int fuse_copy_one(
struct fuse_copy_state *cs,
void *val,
unsigned size)
881 int err = fuse_copy_fill(cs);
885 fuse_copy_do(cs, &val, &size);
891 static int fuse_copy_args(
struct fuse_copy_state *cs,
unsigned numargs,
892 unsigned argpages,
struct fuse_arg *args,
898 for (i = 0; !err && i < numargs; i++) {
900 if (i == numargs - 1 && argpages)
901 err = fuse_copy_pages(cs, arg->
size, zeroing);
903 err = fuse_copy_one(cs, arg->
value, arg->
size);
908 static int forget_pending(
struct fuse_conn *fc)
913 static int request_pending(
struct fuse_conn *fc)
920 static void request_wait(
struct fuse_conn *fc)
927 while (fc->
connected && !request_pending(fc)) {
932 spin_unlock(&fc->
lock);
934 spin_lock(&fc->
lock);
949 size_t nbytes,
struct fuse_req *req)
954 unsigned reqsize =
sizeof(ih) +
sizeof(
arg);
959 memset(&ih, 0,
sizeof(ih));
964 arg.unique = req->
in.h.unique;
966 spin_unlock(&fc->
lock);
967 if (nbytes < reqsize)
970 err = fuse_copy_one(cs, &ih,
sizeof(ih));
972 err = fuse_copy_one(cs, &
arg,
sizeof(
arg));
973 fuse_copy_finish(cs);
975 return err ? err : reqsize;
986 for (count = 0; *newhead !=
NULL && count <
max; count++)
987 newhead = &(*newhead)->
next;
1000 static int fuse_read_single_forget(
struct fuse_conn *fc,
1013 .unique = fuse_get_unique(fc),
1014 .len =
sizeof(ih) +
sizeof(arg),
1017 spin_unlock(&fc->
lock);
1019 if (nbytes < ih.
len)
1022 err = fuse_copy_one(cs, &ih,
sizeof(ih));
1024 err = fuse_copy_one(cs, &arg,
sizeof(arg));
1025 fuse_copy_finish(cs);
1033 static int fuse_read_batch_forget(
struct fuse_conn *fc,
1038 unsigned max_forgets;
1044 .unique = fuse_get_unique(fc),
1045 .len =
sizeof(ih) +
sizeof(arg),
1048 if (nbytes < ih.
len) {
1049 spin_unlock(&fc->
lock);
1054 head = dequeue_forget(fc, max_forgets, &count);
1055 spin_unlock(&fc->
lock);
1059 err = fuse_copy_one(cs, &ih,
sizeof(ih));
1061 err = fuse_copy_one(cs, &arg,
sizeof(arg));
1070 head = forget->
next;
1074 fuse_copy_finish(cs);
1087 return fuse_read_single_forget(fc, cs, nbytes);
1089 return fuse_read_batch_forget(fc, cs, nbytes);
1110 spin_lock(&fc->
lock);
1113 !request_pending(fc))
1121 if (!request_pending(fc))
1127 return fuse_read_interrupt(fc, cs, nbytes, req);
1130 if (forget_pending(fc)) {
1132 return fuse_read_forget(fc, cs, nbytes);
1140 list_move(&req->
list, &fc->
io);
1143 reqsize = in->
h.len;
1145 if (nbytes < reqsize) {
1150 request_end(fc, req);
1153 spin_unlock(&fc->
lock);
1155 err = fuse_copy_one(cs, &in->
h,
sizeof(in->
h));
1159 fuse_copy_finish(cs);
1160 spin_lock(&fc->
lock);
1163 request_end(fc, req);
1168 request_end(fc, req);
1172 request_end(fc, req);
1177 queue_interrupt(fc, req);
1178 spin_unlock(&fc->
lock);
1183 spin_unlock(&fc->
lock);
1188 unsigned long nr_segs, loff_t
pos)
1191 struct file *file = iocb->
ki_filp;
1192 struct fuse_conn *fc = fuse_get_conn(file);
1196 fuse_copy_init(&cs, fc, 1, iov, nr_segs);
1198 return fuse_dev_do_read(fc, file, &cs, iov_length(iov, nr_segs));
1213 .steal = fuse_dev_pipe_buf_steal,
1217 static ssize_t fuse_dev_splice_read(
struct file *in, loff_t *ppos,
1219 size_t len,
unsigned int flags)
1226 struct fuse_conn *fc = fuse_get_conn(in);
1234 fuse_copy_init(&cs, fc, 1,
NULL, 0);
1237 ret = fuse_dev_do_read(fc, in, &cs, len);
1256 while (page_nr < cs.
nr_segs) {
1262 buf->
len = bufs[page_nr].
len;
1263 buf->
ops = &fuse_dev_pipe_buf_ops;
1278 if (waitqueue_active(&pipe->
wait))
1284 for (; page_nr < cs.
nr_segs; page_nr++)
1291 static int fuse_notify_poll(
struct fuse_conn *fc,
unsigned int size,
1297 if (size !=
sizeof(outarg))
1300 err = fuse_copy_one(cs, &outarg,
sizeof(outarg));
1304 fuse_copy_finish(cs);
1308 fuse_copy_finish(cs);
1312 static int fuse_notify_inval_inode(
struct fuse_conn *fc,
unsigned int size,
1318 if (size !=
sizeof(outarg))
1321 err = fuse_copy_one(cs, &outarg,
sizeof(outarg));
1324 fuse_copy_finish(cs);
1330 outarg.off, outarg.len);
1336 fuse_copy_finish(cs);
1340 static int fuse_notify_inval_entry(
struct fuse_conn *fc,
unsigned int size,
1353 if (size <
sizeof(outarg))
1356 err = fuse_copy_one(cs, &outarg,
sizeof(outarg));
1365 if (size !=
sizeof(outarg) + outarg.namelen + 1)
1369 name.len = outarg.namelen;
1370 err = fuse_copy_one(cs, buf, outarg.namelen + 1);
1373 fuse_copy_finish(cs);
1374 buf[outarg.namelen] = 0;
1387 fuse_copy_finish(cs);
1391 static int fuse_notify_delete(
struct fuse_conn *fc,
unsigned int size,
1404 if (size <
sizeof(outarg))
1407 err = fuse_copy_one(cs, &outarg,
sizeof(outarg));
1416 if (size !=
sizeof(outarg) + outarg.namelen + 1)
1420 name.len = outarg.namelen;
1421 err = fuse_copy_one(cs, buf, outarg.namelen + 1);
1424 fuse_copy_finish(cs);
1425 buf[outarg.namelen] = 0;
1432 outarg.child, &
name);
1439 fuse_copy_finish(cs);
1443 static int fuse_notify_store(
struct fuse_conn *fc,
unsigned int size,
1458 if (size <
sizeof(outarg))
1461 err = fuse_copy_one(cs, &outarg,
sizeof(outarg));
1466 if (size -
sizeof(outarg) != outarg.size)
1469 nodeid = outarg.nodeid;
1484 file_size = i_size_read(inode);
1485 end = outarg.offset + outarg.size;
1486 if (end > file_size) {
1494 unsigned int this_num;
1498 mapping_gfp_mask(mapping));
1503 err = fuse_copy_page(cs, &page, offset, this_num, 0);
1504 if (!err && offset == 0 && (num != 0 || file_size == end))
1505 SetPageUptodate(page);
1524 fuse_copy_finish(cs);
1533 static int fuse_retrieve(
struct fuse_conn *fc,
struct inode *inode,
1547 return PTR_ERR(req);
1553 req->
in.numargs = 2;
1554 req->
in.argpages = 1;
1556 req->
end = fuse_retrieve_end;
1559 file_size = i_size_read(inode);
1561 if (outarg->
offset > file_size)
1563 else if (outarg->
offset + num > file_size)
1564 num = file_size - outarg->
offset;
1568 unsigned int this_num;
1580 total_len += this_num;
1589 err = fuse_request_send_notify_reply(fc, req, outarg->
notify_unique);
1591 fuse_retrieve_end(fc, req);
1596 static int fuse_notify_retrieve(
struct fuse_conn *fc,
unsigned int size,
1600 struct inode *
inode;
1604 if (size !=
sizeof(outarg))
1607 err = fuse_copy_one(cs, &outarg,
sizeof(outarg));
1611 fuse_copy_finish(cs);
1620 err = fuse_retrieve(fc, inode, &outarg);
1629 fuse_copy_finish(cs);
1638 return fuse_notify_poll(fc, size, cs);
1641 return fuse_notify_inval_inode(fc, size, cs);
1644 return fuse_notify_inval_entry(fc, size, cs);
1647 return fuse_notify_store(fc, size, cs);
1650 return fuse_notify_retrieve(fc, size, cs);
1653 return fuse_notify_delete(fc, size, cs);
1656 fuse_copy_finish(cs);
1669 if (req->
in.h.unique == unique || req->
intr_unique == unique)
1681 return nbytes != reqsize ? -
EINVAL : 0;
1685 if (reqsize < nbytes || (reqsize > nbytes && !out->
argvar))
1687 else if (reqsize > nbytes) {
1689 unsigned diffsize = reqsize -
nbytes;
1690 if (diffsize > lastarg->
size)
1692 lastarg->
size -= diffsize;
1715 err = fuse_copy_one(cs, &oh,
sizeof(oh));
1720 if (oh.len != nbytes)
1728 err = fuse_notify(fc, oh.error, nbytes -
sizeof(oh), cs);
1729 return err ? err :
nbytes;
1733 if (oh.error <= -1000 || oh.error > 0)
1736 spin_lock(&fc->
lock);
1741 req = request_find(fc, oh.unique);
1746 spin_unlock(&fc->
lock);
1747 fuse_copy_finish(cs);
1748 spin_lock(&fc->
lock);
1749 request_end(fc, req);
1760 else if (oh.error == -
EAGAIN)
1761 queue_interrupt(fc, req);
1763 spin_unlock(&fc->
lock);
1764 fuse_copy_finish(cs);
1769 list_move(&req->
list, &fc->
io);
1773 if (!req->
out.page_replace)
1775 spin_unlock(&fc->
lock);
1777 err = copy_out_args(cs, &req->
out, nbytes);
1778 fuse_copy_finish(cs);
1780 spin_lock(&fc->
lock);
1787 request_end(fc, req);
1789 return err ? err :
nbytes;
1792 spin_unlock(&fc->
lock);
1794 fuse_copy_finish(cs);
1799 unsigned long nr_segs, loff_t pos)
1806 fuse_copy_init(&cs, fc, 0, iov, nr_segs);
1808 return fuse_dev_do_write(fc, &cs, iov_length(iov, nr_segs));
1812 struct file *out, loff_t *ppos,
1813 size_t len,
unsigned int flags)
1823 fc = fuse_get_conn(out);
1834 for (idx = 0; idx < pipe->
nrbufs && rem < len; idx++)
1853 if (rem >= ibuf->
len) {
1859 ibuf->
ops->get(pipe, ibuf);
1871 fuse_copy_init(&cs, fc, 0,
NULL, nbuf);
1878 ret = fuse_dev_do_write(fc, &cs, len);
1880 for (idx = 0; idx < nbuf; idx++) {
1882 buf->
ops->release(pipe, buf);
1889 static unsigned fuse_dev_poll(
struct file *file,
poll_table *
wait)
1892 struct fuse_conn *fc = fuse_get_conn(file);
1896 poll_wait(file, &fc->
waitq, wait);
1898 spin_lock(&fc->
lock);
1901 else if (request_pending(fc))
1903 spin_unlock(&fc->
lock);
1917 while (!list_empty(head)) {
1921 request_end(fc, req);
1922 spin_lock(&fc->
lock);
1937 static void end_io_requests(
struct fuse_conn *fc)
1941 while (!list_empty(&fc->
io)) {
1949 list_del_init(&req->
list);
1953 __fuse_get_request(req);
1954 spin_unlock(&fc->
lock);
1958 spin_lock(&fc->
lock);
1963 static void end_queued_requests(
struct fuse_conn *fc)
1969 end_requests(fc, &fc->
pending);
1971 while (forget_pending(fc))
1975 static void end_polls(
struct fuse_conn *fc)
2011 spin_lock(&fc->
lock);
2015 end_io_requests(fc);
2016 end_queued_requests(fc);
2022 spin_unlock(&fc->
lock);
2028 struct fuse_conn *fc = fuse_get_conn(file);
2030 spin_lock(&fc->
lock);
2033 end_queued_requests(fc);
2036 spin_unlock(&fc->
lock);
2044 static int fuse_dev_fasync(
int fd,
struct file *file,
int on)
2046 struct fuse_conn *fc = fuse_get_conn(file);
2058 .aio_read = fuse_dev_read,
2059 .splice_read = fuse_dev_splice_read,
2061 .aio_write = fuse_dev_write,
2062 .splice_write = fuse_dev_splice_write,
2063 .poll = fuse_dev_poll,
2065 .fasync = fuse_dev_fasync,
2081 if (!fuse_req_cachep)
2086 goto out_cache_clean;