26 #include <linux/sched.h>
28 #include <linux/uio.h>
32 #include <asm/uaccess.h>
61 static int ntfs_file_open(
struct inode *vi,
struct file *filp)
63 if (
sizeof(
unsigned long) < 8) {
64 if (i_size_read(vi) > MAX_LFS_FILESIZE)
110 static int ntfs_attr_extend_initialized(
ntfs_inode *ni,
const s64 new_init_size)
116 struct inode *vi = VFS_I(ni);
118 MFT_RECORD *
m =
NULL;
129 old_i_size = i_size_read(vi);
132 ntfs_debug(
"Entering for i_ino 0x%lx, attribute type 0x%x, "
133 "old_initialized_size 0x%llx, "
134 "new_initialized_size 0x%llx, i_size 0x%llx.",
136 (
unsigned long long)old_init_size,
137 (
unsigned long long)new_init_size, old_i_size);
143 if (NInoNonResident(ni))
144 goto do_non_resident_extend;
145 BUG_ON(old_init_size != old_i_size);
168 attr_len =
le32_to_cpu(a->data.resident.value_length);
169 BUG_ON(old_i_size != (loff_t)attr_len);
175 memset(kattr + attr_len, 0, new_init_size - attr_len);
179 i_size_write(vi, new_init_size);
183 do_non_resident_extend:
189 if (new_init_size > old_i_size) {
211 BUG_ON(old_i_size != (loff_t)
212 sle64_to_cpu(a->data.non_resident.data_size));
213 a->data.non_resident.data_size = cpu_to_sle64(new_init_size);
214 flush_dcache_mft_record_page(ctx->
ntfs_ino);
215 mark_mft_record_dirty(ctx->
ntfs_ino);
217 i_size_write(vi, new_init_size);
231 page = read_mapping_page(mapping, index,
NULL);
280 balance_dirty_pages_ratelimited(mapping);
282 }
while (++index < end_index);
308 a->data.non_resident.initialized_size = cpu_to_sle64(new_init_size);
310 flush_dcache_mft_record_page(ctx->
ntfs_ino);
311 mark_mft_record_dirty(ctx->
ntfs_ino);
316 ntfs_debug(
"Done, initialized_size 0x%llx, i_size 0x%llx.",
317 (
unsigned long long)new_init_size, i_size_read(vi));
328 ntfs_debug(
"Failed. Returning error code %i.", err);
354 static inline void ntfs_fault_in_pages_readable(
const char __user *
uaddr,
372 static inline void ntfs_fault_in_pages_readable_iovec(
const struct iovec *iov,
373 size_t iov_ofs,
int bytes)
383 ntfs_fault_in_pages_readable(buf, len);
406 static inline int __ntfs_grab_cache_pages(
struct address_space *mapping,
407 pgoff_t index,
const unsigned nr_pages,
struct page **
pages,
408 struct page **cached_page)
418 *cached_page = page_cache_alloc(mapping);
431 pages[
nr] = *cached_page;
436 }
while (nr < nr_pages);
447 static inline int ntfs_submit_bh_for_read(
struct buffer_head *bh)
480 static int ntfs_prepare_pages_for_non_resident_write(
struct page **pages,
481 unsigned nr_pages,
s64 pos,
size_t bytes)
492 struct buffer_head *bh, *
head, *
wait[2], **wait_bh =
wait;
494 MFT_RECORD *m =
NULL;
495 ATTR_RECORD *a =
NULL;
497 u32 attr_rec_len = 0;
498 unsigned blocksize,
u;
500 bool rl_write_locked, was_hole, is_retry;
501 unsigned char blocksize_bits;
504 u8 mft_attr_mapped:1;
507 }
status = { 0, 0, 0, 0 };
515 ntfs_debug(
"Entering for inode 0x%lx, attribute type 0x%x, start page "
516 "index 0x%lx, nr_pages 0x%x, pos 0x%llx, bytes 0x%zx.",
518 (
long long)pos, bytes);
519 blocksize = vol->
sb->s_blocksize;
520 blocksize_bits = vol->
sb->s_blocksize_bits;
529 if (!page_has_buffers(page)) {
531 if (
unlikely(!page_has_buffers(page)))
534 }
while (++u < nr_pages);
535 rl_write_locked =
false;
553 bh = head = page_buffers(page);
561 clear_buffer_new(bh);
562 bh_end = bh_pos + blocksize;
565 if (buffer_mapped(bh)) {
570 if (buffer_uptodate(bh))
576 if (PageUptodate(page)) {
577 set_buffer_uptodate(bh);
585 if ((bh_pos < pos && bh_end > pos) ||
586 (bh_pos < end && bh_end > end)) {
595 if (bh_pos < initialized_size) {
596 ntfs_submit_bh_for_read(bh);
599 zero_user(page, bh_offset(bh),
601 set_buffer_uptodate(bh);
607 bh->b_bdev = vol->
sb->s_bdev;
616 cdelta = bh_cpos - vcn;
617 if (
likely(!cdelta || (cdelta > 0 && cdelta < vcn_len))) {
620 bh->b_blocknr = lcn_block +
623 (bh_cofs >> blocksize_bits);
624 set_buffer_mapped(bh);
634 if (PageUptodate(page)) {
635 if (!buffer_uptodate(bh))
636 set_buffer_uptodate(bh);
641 if (bh_end <= pos || bh_pos >= end)
656 if (!buffer_uptodate(bh) && bh_pos < end &&
671 if (bh_pos < initialized_size) {
672 ntfs_submit_bh_for_read(bh);
675 zero_user(page, bh_offset(bh),
677 set_buffer_uptodate(bh);
692 if (bh_end <= pos || bh_pos >= end) {
693 if (!buffer_uptodate(bh)) {
694 zero_user(page, bh_offset(bh),
696 set_buffer_uptodate(bh);
702 if (!buffer_uptodate(bh) &&
703 (bh_pos < pos || bh_end > end)) {
710 memset(kaddr + pofs, 0, pos - bh_pos);
714 memset(kaddr + pofs, 0, bh_end - end);
729 if (bh_pos > initialized_size) {
730 if (PageUptodate(page)) {
731 if (!buffer_uptodate(bh))
732 set_buffer_uptodate(bh);
733 }
else if (!buffer_uptodate(bh)) {
734 zero_user(page, bh_offset(bh), blocksize);
735 set_buffer_uptodate(bh);
747 while (rl->
length && rl[1].
vcn <= bh_cpos)
757 vcn_len = rl[1].
vcn - vcn;
768 if (
likely(vcn + vcn_len >= cend)) {
769 if (rl_write_locked) {
771 rl_write_locked =
false;
776 goto map_buffer_cached;
787 if (!rl_write_locked) {
797 rl_write_locked =
true;
814 goto rl_not_mapped_enoent;
821 "attribute type 0x%x, vcn 0x%llx, "
822 "vcn offset 0x%x, because its "
823 "location on disk could not be "
824 "determined%s (error code %i).",
826 (
unsigned long long)bh_cpos,
829 is_retry ?
" even after retrying" :
"",
833 rl_not_mapped_enoent:
844 if ((bh_cend <= cpos || bh_cpos >= cend)) {
854 if (PageUptodate(page)) {
855 if (!buffer_uptodate(bh))
856 set_buffer_uptodate(bh);
857 }
else if (!buffer_uptodate(bh)) {
858 zero_user(page, bh_offset(bh),
860 set_buffer_uptodate(bh);
876 if (!rl_write_locked) {
879 rl_write_locked =
true;
892 rl2 = ntfs_cluster_alloc(vol, bh_cpos, 1, lcn, DATA_ZONE,
896 ntfs_debug(
"Failed to allocate cluster, error code %i.",
906 if (ntfs_cluster_free_from_rl(vol, rl2)) {
908 "allocated cluster in error "
909 "code path. Run chkdsk to "
910 "recover the lost cluster.");
917 status.runlist_merged = 1;
919 (
unsigned long long)lcn);
936 status.mft_attr_mapped = 1;
953 vcn = sle64_to_cpu(a->data.non_resident.lowest_vcn);
954 rl2 = ntfs_rl_find_vcn_nolock(rl, vcn);
958 highest_vcn = sle64_to_cpu(a->data.non_resident.highest_vcn);
964 highest_vcn = (sle64_to_cpu(
965 a->data.non_resident.allocated_size) >>
971 mp_size = ntfs_get_size_for_mapping_pairs(vol, rl2, vcn,
974 if (!(err = mp_size))
976 ntfs_debug(
"Failed to get size for mapping pairs "
977 "array, error code %i.", err);
985 err = ntfs_attr_record_resize(m, a, mp_size +
le16_to_cpu(
986 a->data.non_resident.mapping_pairs_offset));
1002 "record for the extended attribute "
1003 "record. This case is not "
1004 "implemented yet.");
1014 a->data.non_resident.mapping_pairs_offset),
1015 mp_size, rl2, vcn, highest_vcn,
NULL);
1017 ntfs_error(vol->
sb,
"Cannot fill hole in inode 0x%lx, "
1018 "attribute type 0x%x, because building "
1019 "the mapping pairs failed with error "
1020 "code %i.", vi->
i_ino,
1026 if (
unlikely(!a->data.non_resident.highest_vcn))
1027 a->data.non_resident.highest_vcn =
1028 cpu_to_sle64(highest_vcn);
1033 if (
likely(NInoSparse(ni) || NInoCompressed(ni))) {
1039 if (a->data.non_resident.lowest_vcn) {
1040 flush_dcache_mft_record_page(ctx->
ntfs_ino);
1041 mark_mft_record_dirty(ctx->
ntfs_ino);
1047 status.attr_switched = 1;
1055 a->data.non_resident.compressed_size =
1060 flush_dcache_mft_record_page(ctx->
ntfs_ino);
1061 mark_mft_record_dirty(ctx->
ntfs_ino);
1065 status.runlist_merged = 0;
1066 status.mft_attr_mapped = 0;
1079 if (
likely(vcn + vcn_len >= cend)) {
1081 rl_write_locked =
false;
1084 goto map_buffer_cached;
1085 }
while (bh_pos += blocksize, (bh = bh->b_this_page) != head);
1087 if (
likely(!err && ++u < nr_pages))
1093 rl_write_locked =
false;
1102 while (wait_bh > wait) {
1105 if (
likely(buffer_uptodate(bh))) {
1107 bh_pos = ((
s64)page->
index << PAGE_CACHE_SHIFT) +
1113 if (
unlikely(bh_pos + blocksize > initialized_size)) {
1116 if (
likely(bh_pos < initialized_size))
1117 ofs = initialized_size - bh_pos;
1118 zero_user_segment(page, bh_offset(bh) + ofs,
1128 bh = head = page_buffers(pages[u]);
1131 clear_buffer_new(bh);
1132 }
while ((bh = bh->b_this_page) != head);
1133 }
while (++u < nr_pages);
1137 if (
status.attr_switched) {
1143 "attribute extent of attribute in "
1144 "error code path. Run chkdsk to "
1149 flush_dcache_mft_record_page(ctx->
ntfs_ino);
1150 mark_mft_record_dirty(ctx->
ntfs_ino);
1160 status.attr_switched = 0;
1171 BUG_ON(!rl_write_locked);
1173 if (ntfs_rl_punch_nolock(vol, &ni->
runlist, bh_cpos, 1)) {
1175 "attribute runlist in error code "
1176 "path. Run chkdsk to recover the "
1180 status.runlist_merged = 0;
1187 if (ntfs_bitmap_clear_bit(vol->
lcnbmp_ino, lcn)) {
1189 "allocated cluster in error "
1190 "code path. Run chkdsk to "
1191 "recover the lost cluster.");
1204 if (ntfs_attr_record_resize(m, a, attr_rec_len)) {
1206 "record in error code path. Run "
1207 "chkdsk to recover.");
1210 if (ntfs_mapping_pairs_build(vol, (
u8*)a +
1215 vcn, highest_vcn,
NULL)) {
1217 "mapping pairs array in error "
1218 "code path. Run chkdsk to "
1222 flush_dcache_mft_record_page(ctx->
ntfs_ino);
1223 mark_mft_record_dirty(ctx->
ntfs_ino);
1227 if (
status.mft_attr_mapped) {
1232 if (rl_write_locked)
1246 bh = head = page_buffers(page);
1248 if (u == nr_pages &&
1249 ((
s64)page->
index << PAGE_CACHE_SHIFT) +
1250 bh_offset(bh) >= end)
1252 if (!buffer_new(bh))
1254 clear_buffer_new(bh);
1255 if (!buffer_uptodate(bh)) {
1256 if (PageUptodate(page))
1257 set_buffer_uptodate(bh);
1259 zero_user(page, bh_offset(bh),
1261 set_buffer_uptodate(bh);
1265 }
while ((bh = bh->b_this_page) != head);
1266 }
while (++u <= nr_pages);
1267 ntfs_error(vol->
sb,
"Failed. Returning error code %i.", err);
1276 static inline size_t ntfs_copy_from_user(
struct page **pages,
1277 unsigned nr_pages,
unsigned ofs,
const char __user *buf,
1280 struct page **last_page = pages + nr_pages;
1295 addr =
kmap(*pages);
1307 }
while (++pages < last_page);
1311 total += len -
left;
1313 while (++pages < last_page) {
1320 zero_user(*pages, 0, len);
1325 static size_t __ntfs_copy_from_user_iovec_inatomic(
char *
vaddr,
1326 const struct iovec *iov,
size_t iov_ofs,
size_t bytes)
1354 static inline void ntfs_set_next_iovec(
const struct iovec **iovp,
1355 size_t *iov_ofsp,
size_t bytes)
1357 const struct iovec *iov = *iovp;
1358 size_t iov_ofs = *iov_ofsp;
1368 if (iov->
iov_len == iov_ofs) {
1374 *iov_ofsp = iov_ofs;
1392 static inline size_t ntfs_copy_from_user_iovec(
struct page **pages,
1393 unsigned nr_pages,
unsigned ofs,
const struct iovec **iov,
1394 size_t *iov_ofs,
size_t bytes)
1396 struct page **last_page = pages + nr_pages;
1398 size_t copied, len, total = 0;
1405 copied = __ntfs_copy_from_user_iovec_inatomic(addr + ofs,
1406 *iov, *iov_ofs, len);
1410 addr =
kmap(*pages);
1411 copied = __ntfs_copy_from_user_iovec_inatomic(addr +
1412 ofs, *iov, *iov_ofs, len);
1418 ntfs_set_next_iovec(iov, iov_ofs, len);
1423 }
while (++pages < last_page);
1429 memset(addr + ofs + copied, 0, len - copied);
1432 ntfs_set_next_iovec(iov, iov_ofs, copied);
1433 while (++pages < last_page) {
1440 zero_user(*pages, 0, len);
1445 static inline void ntfs_flush_dcache_pages(
struct page **pages,
1457 }
while (nr_pages > 0);
1469 static inline int ntfs_commit_pages_after_non_resident_write(
1470 struct page **pages,
const unsigned nr_pages,
1471 s64 pos,
size_t bytes)
1476 struct buffer_head *bh, *
head;
1480 unsigned long flags;
1481 unsigned blocksize,
u;
1486 blocksize = vi->
i_sb->s_blocksize;
1495 bh_pos = (
s64)page->
index << PAGE_CACHE_SHIFT;
1496 bh = head = page_buffers(page);
1501 bh_end = bh_pos + blocksize;
1502 if (bh_end <= pos || bh_pos >= end) {
1503 if (!buffer_uptodate(bh))
1506 set_buffer_uptodate(bh);
1509 }
while (bh_pos += blocksize, (bh = bh->b_this_page) != head);
1514 if (!partial && !PageUptodate(page))
1515 SetPageUptodate(page);
1516 }
while (++u < nr_pages);
1524 if (end <= initialized_size) {
1544 BUG_ON(!NInoNonResident(ni));
1558 BUG_ON(!a->non_resident);
1562 a->data.non_resident.initialized_size = cpu_to_sle64(end);
1563 if (end > i_size_read(vi)) {
1564 i_size_write(vi, end);
1565 a->data.non_resident.data_size =
1566 a->data.non_resident.initialized_size;
1570 flush_dcache_mft_record_page(ctx->
ntfs_ino);
1571 mark_mft_record_dirty(ctx->
ntfs_ino);
1581 ntfs_error(vi->
i_sb,
"Failed to update initialized_size/i_size (error "
1584 NVolSetErrors(ni->
vol);
1624 static int ntfs_commit_pages_after_write(
struct page **pages,
1625 const unsigned nr_pages,
s64 pos,
size_t bytes)
1635 char *kattr, *kaddr;
1636 unsigned long flags;
1646 ntfs_debug(
"Entering for inode 0x%lx, attribute type 0x%x, start page "
1647 "index 0x%lx, nr_pages 0x%x, pos 0x%llx, bytes 0x%zx.",
1649 (
long long)pos, bytes);
1650 if (NInoNonResident(ni))
1651 return ntfs_commit_pages_after_non_resident_write(pages,
1652 nr_pages, pos, bytes);
1662 BUG_ON(NInoNonResident(ni));
1686 attr_len =
le32_to_cpu(a->data.resident.value_length);
1687 i_size = i_size_read(vi);
1688 BUG_ON(attr_len != i_size);
1693 kattr = (
u8*)a +
le16_to_cpu(a->data.resident.value_offset);
1696 memcpy(kattr + pos, kaddr + pos, bytes);
1698 if (end > attr_len) {
1700 a->data.resident.value_length =
cpu_to_le32(attr_len);
1706 if (!PageUptodate(page)) {
1708 memcpy(kaddr, kattr, pos);
1710 memcpy(kaddr + end, kattr + end, attr_len - end);
1714 SetPageUptodate(page);
1722 BUG_ON(initialized_size != i_size);
1723 if (end > initialized_size) {
1726 i_size_write(vi, end);
1730 flush_dcache_mft_record_page(ctx->
ntfs_ino);
1731 mark_mft_record_dirty(ctx->
ntfs_ino);
1739 "commit the write.");
1740 if (PageUptodate(page)) {
1742 "dirty so the write will be retried "
1743 "later on by the VM.");
1752 "data has been lost.");
1755 "with error %i.", err);
1756 NVolSetErrors(ni->
vol);
1771 const struct iovec *iov,
unsigned long nr_segs,
1772 loff_t pos, loff_t *ppos,
size_t count)
1780 struct page *cached_page =
NULL;
1785 unsigned long flags;
1786 size_t bytes, iov_ofs = 0;
1791 ntfs_debug(
"Entering for i_ino 0x%lx, attribute type 0x%x, "
1792 "pos 0x%llx, count 0x%lx.",
1794 (
unsigned long long)pos, (
unsigned long)count);
1797 BUG_ON(NInoMstProtected(ni));
1806 if (NInoEncrypted(ni)) {
1812 ntfs_debug(
"Denying write access to encrypted file.");
1815 if (NInoCompressed(ni)) {
1826 "not implemented yet. Sorry.");
1834 if (
unlikely(NInoTruncateFailed(ni))) {
1836 err = ntfs_truncate(vi);
1837 if (err || NInoTruncateFailed(ni)) {
1841 "0x%lx, attribute type 0x%x, because "
1842 "ntfs_truncate() failed (error code "
1859 ll = ntfs_attr_extend_allocation(ni, end, -1, pos);
1864 ntfs_debug(
"Truncating write to inode 0x%lx, "
1865 "attribute type 0x%x, because "
1866 "the allocation was only "
1867 "partially extended.",
1868 vi->
i_ino, (
unsigned)
1880 ntfs_debug(
"Truncating write to inode 0x%lx, "
1881 "attribute type 0x%x, because "
1882 "extending the allocation "
1883 "failed (error code %i).",
1884 vi->
i_ino, (
unsigned)
1890 "inode 0x%lx, attribute type "
1891 "0x%x, because extending the "
1892 "allocation failed (error "
1893 "code %i).", vi->
i_ino,
1912 err = ntfs_attr_extend_initialized(ni, pos);
1915 "0x%lx, attribute type 0x%x, because "
1916 "extending the initialized size "
1917 "failed (error code %i).", vi->
i_ino,
1932 if (
likely(nr_segs == 1))
1937 unsigned ofs, do_pages,
u;
1946 if (vcn != last_vcn) {
1965 "attribute type 0x%x, "
1966 "because the attribute "
1968 vi->
i_ino, (
unsigned)
1973 start_idx = (pos & ~(
s64)
1978 do_pages = nr_pages;
1992 if (
likely(nr_segs == 1))
1993 ntfs_fault_in_pages_readable(buf, bytes);
1995 ntfs_fault_in_pages_readable_iovec(iov, iov_ofs, bytes);
1997 status = __ntfs_grab_cache_pages(mapping, start_idx, do_pages,
1998 pages, &cached_page);
2007 if (NInoNonResident(ni)) {
2008 status = ntfs_prepare_pages_for_non_resident_write(
2009 pages, do_pages, pos, bytes);
2024 i_size = i_size_read(vi);
2025 if (pos + bytes > i_size)
2031 if (
likely(nr_segs == 1)) {
2032 copied = ntfs_copy_from_user(pages + u, do_pages - u,
2036 copied = ntfs_copy_from_user_iovec(pages + u,
2037 do_pages - u, ofs, &iov, &iov_ofs,
2039 ntfs_flush_dcache_pages(pages + u, do_pages - u);
2040 status = ntfs_commit_pages_after_write(pages, do_pages, pos,
2056 balance_dirty_pages_ratelimited(mapping);
2063 ntfs_debug(
"Done. Returning %s (written 0x%lx, status %li).",
2064 written ?
"written" :
"status", (
unsigned long)written,
2066 return written ? written :
status;
2072 static ssize_t ntfs_file_aio_write_nolock(
struct kiocb *iocb,
2073 const struct iovec *iov,
unsigned long nr_segs, loff_t *ppos)
2075 struct file *file = iocb->
ki_filp;
2101 written = ntfs_file_buffered_write(iocb, iov, nr_segs, pos, ppos,
2105 return written ? written :
err;
2112 unsigned long nr_segs, loff_t pos)
2114 struct file *file = iocb->
ki_filp;
2116 struct inode *inode = mapping->
host;
2121 sb_start_write(inode->
i_sb);
2123 ret = ntfs_file_aio_write_nolock(iocb, iov, nr_segs, &iocb->
ki_pos);
2130 sb_end_write(inode->
i_sb);
2158 static int ntfs_file_fsync(
struct file *filp, loff_t
start, loff_t end,
2161 struct inode *vi = filp->
f_mapping->host;
2172 if (!datasync || !NInoNonResident(NTFS_I(vi)))
2173 ret = __ntfs_write_inode(vi, 1);
2187 "%u.", datasync ?
"data" :
"", vi->
i_ino, -ret);
2200 .aio_write = ntfs_file_aio_write,
2207 .fsync = ntfs_file_fsync,
2215 .open = ntfs_file_open,
2230 .truncate = ntfs_truncate_vfs,
2231 .setattr = ntfs_setattr,