28 #include <linux/slab.h>
61 static u8 *ntfs_compression_buffer =
NULL;
77 BUG_ON(ntfs_compression_buffer);
80 if (!ntfs_compression_buffer)
92 BUG_ON(!ntfs_compression_buffer);
93 vfree(ntfs_compression_buffer);
94 ntfs_compression_buffer =
NULL;
100 static void zero_partial_compressed_page(
struct page *
page,
106 ntfs_debug(
"Zeroing page region outside initialized size.");
123 static inline void handle_bounds_compressed_page(
struct page *page,
124 const loff_t i_size,
const s64 initialized_size)
127 (initialized_size < i_size))
128 zero_partial_compressed_page(page, initialized_size);
169 static int ntfs_decompress(
struct page *dest_pages[],
int *dest_index,
170 int *dest_ofs,
const int dest_max_index,
const int dest_max_ofs,
171 const int xpage,
char *xpage_done,
u8 *
const cb_start,
172 const u32 cb_size,
const loff_t i_size,
173 const s64 initialized_size)
179 u8 *cb_end = cb_start + cb_size;
181 u8 *cb_sb_start = cb;
199 int completed_pages[dest_max_index - *dest_index + 1];
200 int nr_completed_pages = 0;
205 ntfs_debug(
"Entering, cb_size = 0x%x.", cb_size);
207 ntfs_debug(
"Beginning sub-block at offset = 0x%zx in the cb.",
216 (*dest_index == dest_max_index &&
217 *dest_ofs == dest_max_ofs)) {
220 ntfs_debug(
"Completed. Returning success (0).");
224 spin_unlock(&ntfs_cb_lock);
226 if (nr_completed_pages > 0) {
227 for (i = 0; i < nr_completed_pages; i++) {
228 int di = completed_pages[
i];
235 handle_bounds_compressed_page(dp, i_size,
245 dest_pages[di] =
NULL;
252 do_sb_start = *dest_ofs;
256 if (*dest_index == dest_max_index && do_sb_end > dest_max_ofs)
257 goto return_overflow;
261 goto return_overflow;
267 if (cb_sb_end > cb_end)
268 goto return_overflow;
271 dp = dest_pages[*dest_index];
278 if (!*dest_ofs && (++*dest_index > dest_max_index))
279 goto return_overflow;
295 if (cb_sb_end - cb != NTFS_SB_SIZE)
296 goto return_overflow;
299 memcpy(dp_addr, cb, NTFS_SB_SIZE);
310 completed_pages[nr_completed_pages++] = *dest_index;
311 if (++*dest_index > dest_max_index)
312 goto return_overflow;
320 dp_sb_start = dp_addr;
326 if (cb == cb_sb_end) {
328 if (dp_addr < dp_sb_end) {
329 int nr_bytes = do_sb_end - *dest_ofs;
331 ntfs_debug(
"Filling incomplete sub-block with "
334 memset(dp_addr, 0, nr_bytes);
335 *dest_ofs += nr_bytes;
344 if (cb > cb_sb_end || dp_addr > dp_sb_end)
345 goto return_overflow;
357 if (cb >= cb_sb_end || dp_addr > dp_sb_end)
377 if (dp_addr == dp_sb_start)
378 goto return_overflow;
388 for (i = *dest_ofs - do_sb_start - 1; i >= 0x10; i >>= 1)
399 dp_back_addr = dp_addr - (pt >> (12 - lg)) - 1;
400 if (dp_back_addr < dp_sb_start)
401 goto return_overflow;
404 length = (pt & (0xfff >> lg)) + 3;
408 if (*dest_ofs > do_sb_end)
409 goto return_overflow;
412 max_non_overlap = dp_addr - dp_back_addr;
414 if (length <= max_non_overlap) {
416 memcpy(dp_addr, dp_back_addr, length);
427 memcpy(dp_addr, dp_back_addr, max_non_overlap);
428 dp_addr += max_non_overlap;
429 dp_back_addr += max_non_overlap;
430 length -= max_non_overlap;
432 *dp_addr++ = *dp_back_addr++;
493 u8 *cb, *cb_pos, *cb_end;
494 struct buffer_head **bhs;
497 u64 cb_size_mask = cb_size - 1
UL;
517 unsigned int nr_pages = (end_vcn - start_vcn) <<
519 unsigned int xpage, max_page, cur_page, cur_ofs,
i;
520 unsigned int cb_clusters, cb_max_ofs;
521 int block, max_block, cb_max_page, bhs_size, nr_bhs, err = 0;
523 unsigned char xpage_done = 0;
525 ntfs_debug(
"Entering, page->index = 0x%lx, cb_size = 0x%x, nr_pages = "
526 "%i.", index, cb_size, nr_pages);
537 bhs_size = cb_size / block_size *
sizeof(
struct buffer_head *);
544 ntfs_error(vol->
sb,
"Failed to allocate internal buffers.");
560 i_size = i_size_read(VFS_I(ni));
566 if (xpage >= max_page) {
570 ntfs_debug(
"Compressed read outside i_size - truncated?");
571 SetPageUptodate(page);
575 if (nr_pages < max_page)
577 for (i = 0; i < max_page; i++, offset++) {
587 if (!PageDirty(page) && (!PageUptodate(page) ||
589 ClearPageError(page);
612 for (vcn = start_vcn, start_vcn += cb_clusters; vcn < start_vcn;
614 bool is_retry =
false;
628 ntfs_debug(
"Reading vcn = 0x%llx, lcn = 0x%llx.",
629 (
unsigned long long)vcn,
630 (
unsigned long long)lcn);
647 goto lock_retry_remap;
652 max_block = block + (vol->
cluster_size >> block_size_bits);
655 if (
unlikely(!(bhs[nr_bhs] = sb_getblk(sb, block))))
658 }
while (++block < max_block);
666 for (i = 0; i < nr_bhs; i++) {
667 struct buffer_head *tbh = bhs[
i];
669 if (!trylock_buffer(tbh))
671 if (
unlikely(buffer_uptodate(tbh))) {
681 for (i = 0; i < nr_bhs; i++) {
682 struct buffer_head *tbh = bhs[
i];
684 if (buffer_uptodate(tbh))
696 if (
unlikely(!buffer_uptodate(tbh))) {
698 "uptodate! Unplugging the disk queue "
699 "and rescheduling.");
703 if (
unlikely(!buffer_uptodate(tbh)))
713 spin_lock(&ntfs_cb_lock);
714 cb = ntfs_compression_buffer;
719 cb_end = cb + cb_size;
722 for (i = 0; i < nr_bhs; i++) {
723 memcpy(cb_pos, bhs[i]->b_data, block_size);
728 if (cb_pos + 2 <= cb + cb_size)
735 ntfs_debug(
"Successfully read the compression block.");
743 if (cb_max_page > max_page)
744 cb_max_page = max_page;
746 if (vcn == start_vcn - cb_clusters) {
748 ntfs_debug(
"Found sparse compression block.");
750 spin_unlock(&ntfs_cb_lock);
753 for (; cur_page < cb_max_page; cur_page++) {
754 page = pages[cur_page];
769 SetPageUptodate(page);
771 if (cur_page == xpage)
775 pages[cur_page] =
NULL;
779 if (cb_pos >= cb_end)
783 if (cb_max_ofs && cb_pos < cb_end) {
784 page = pages[cur_page];
787 cb_max_ofs - cur_ofs);
792 cur_ofs = cb_max_ofs;
794 }
else if (vcn == start_vcn) {
796 unsigned int cur2_page = cur_page;
797 unsigned int cur_ofs2 = cur_ofs;
798 u8 *cb_pos2 = cb_pos;
800 ntfs_debug(
"Found uncompressed compression block.");
815 for (; cur_page < cb_max_page; cur_page++) {
816 page = pages[cur_page];
822 if (cb_pos >= cb_end)
826 if (cb_max_ofs && cb_pos < cb_end) {
827 page = pages[cur_page];
830 cb_max_ofs - cur_ofs);
831 cb_pos += cb_max_ofs - cur_ofs;
832 cur_ofs = cb_max_ofs;
835 spin_unlock(&ntfs_cb_lock);
837 for (; cur2_page < cb_max_page; cur2_page++) {
838 page = pages[cur2_page];
844 handle_bounds_compressed_page(page, i_size,
848 SetPageUptodate(page);
850 if (cur2_page == xpage)
854 pages[cur2_page] =
NULL;
858 if (cb_pos2 >= cb_end)
863 unsigned int prev_cur_page = cur_page;
865 ntfs_debug(
"Found compressed compression block.");
866 err = ntfs_decompress(pages, &cur_page, &cur_ofs,
867 cb_max_page, cb_max_ofs, xpage, &xpage_done,
868 cb_pos, cb_size - (cb_pos - cb), i_size,
875 ntfs_error(vol->
sb,
"ntfs_decompress() failed in inode "
876 "0x%lx with error code %i. Skipping "
877 "this compression block.",
880 for (; prev_cur_page < cur_page; prev_cur_page++) {
881 page = pages[prev_cur_page];
886 if (prev_cur_page != xpage)
888 pages[prev_cur_page] =
NULL;
895 for (i = 0; i < nr_bhs; i++)
906 for (cur_page = 0; cur_page < max_page; cur_page++) {
907 page = pages[cur_page];
910 "Terminating them with extreme "
911 "prejudice. Inode 0x%lx, page index "
916 if (cur_page != xpage)
918 pages[cur_page] =
NULL;
930 "EOVERFLOW" : (!err ?
"EIO" :
"unknown error"));
931 return err < 0 ? err : -
EIO;
934 ntfs_error(vol->
sb,
"IO error while reading compressed data.");
936 for (i = 0; i < nr_bhs; i++)
941 ntfs_error(vol->
sb,
"ntfs_map_runlist() failed. Cannot read "
942 "compression block.");
947 ntfs_error(vol->
sb,
"ntfs_rl_vcn_to_lcn() failed. Cannot read "
948 "compression block.");
953 ntfs_error(vol->
sb,
"getblk() failed. Cannot read compression block.");
957 for (i = cur_page; i < max_page; i++) {