Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
attrib.c
Go to the documentation of this file.
1 
23 #include <linux/buffer_head.h>
24 #include <linux/sched.h>
25 #include <linux/slab.h>
26 #include <linux/swap.h>
27 #include <linux/writeback.h>
28 
29 #include "attrib.h"
30 #include "debug.h"
31 #include "layout.h"
32 #include "lcnalloc.h"
33 #include "malloc.h"
34 #include "mft.h"
35 #include "ntfs.h"
36 #include "types.h"
37 
85 {
86  VCN end_vcn;
87  unsigned long flags;
88  ntfs_inode *base_ni;
89  MFT_RECORD *m;
90  ATTR_RECORD *a;
91  runlist_element *rl;
92  struct page *put_this_page = NULL;
93  int err = 0;
94  bool ctx_is_temporary, ctx_needs_reset;
95  ntfs_attr_search_ctx old_ctx = { NULL, };
96 
97  ntfs_debug("Mapping runlist part containing vcn 0x%llx.",
98  (unsigned long long)vcn);
99  if (!NInoAttr(ni))
100  base_ni = ni;
101  else
102  base_ni = ni->ext.base_ntfs_ino;
103  if (!ctx) {
104  ctx_is_temporary = ctx_needs_reset = true;
105  m = map_mft_record(base_ni);
106  if (IS_ERR(m))
107  return PTR_ERR(m);
108  ctx = ntfs_attr_get_search_ctx(base_ni, m);
109  if (unlikely(!ctx)) {
110  err = -ENOMEM;
111  goto err_out;
112  }
113  } else {
114  VCN allocated_size_vcn;
115 
116  BUG_ON(IS_ERR(ctx->mrec));
117  a = ctx->attr;
118  BUG_ON(!a->non_resident);
119  ctx_is_temporary = false;
120  end_vcn = sle64_to_cpu(a->data.non_resident.highest_vcn);
121  read_lock_irqsave(&ni->size_lock, flags);
122  allocated_size_vcn = ni->allocated_size >>
123  ni->vol->cluster_size_bits;
124  read_unlock_irqrestore(&ni->size_lock, flags);
125  if (!a->data.non_resident.lowest_vcn && end_vcn <= 0)
126  end_vcn = allocated_size_vcn - 1;
127  /*
128  * If we already have the attribute extent containing @vcn in
129  * @ctx, no need to look it up again. We slightly cheat in
130  * that if vcn exceeds the allocated size, we will refuse to
131  * map the runlist below, so there is definitely no need to get
132  * the right attribute extent.
133  */
134  if (vcn >= allocated_size_vcn || (a->type == ni->type &&
135  a->name_length == ni->name_len &&
136  !memcmp((u8*)a + le16_to_cpu(a->name_offset),
137  ni->name, ni->name_len) &&
138  sle64_to_cpu(a->data.non_resident.lowest_vcn)
139  <= vcn && end_vcn >= vcn))
140  ctx_needs_reset = false;
141  else {
142  /* Save the old search context. */
143  old_ctx = *ctx;
144  /*
145  * If the currently mapped (extent) inode is not the
146  * base inode we will unmap it when we reinitialize the
147  * search context which means we need to get a
148  * reference to the page containing the mapped mft
149  * record so we do not accidentally drop changes to the
150  * mft record when it has not been marked dirty yet.
151  */
152  if (old_ctx.base_ntfs_ino && old_ctx.ntfs_ino !=
153  old_ctx.base_ntfs_ino) {
154  put_this_page = old_ctx.ntfs_ino->page;
155  page_cache_get(put_this_page);
156  }
157  /*
158  * Reinitialize the search context so we can lookup the
159  * needed attribute extent.
160  */
162  ctx_needs_reset = true;
163  }
164  }
165  if (ctx_needs_reset) {
166  err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
167  CASE_SENSITIVE, vcn, NULL, 0, ctx);
168  if (unlikely(err)) {
169  if (err == -ENOENT)
170  err = -EIO;
171  goto err_out;
172  }
173  BUG_ON(!ctx->attr->non_resident);
174  }
175  a = ctx->attr;
176  /*
177  * Only decompress the mapping pairs if @vcn is inside it. Otherwise
178  * we get into problems when we try to map an out of bounds vcn because
179  * we then try to map the already mapped runlist fragment and
180  * ntfs_mapping_pairs_decompress() fails.
181  */
182  end_vcn = sle64_to_cpu(a->data.non_resident.highest_vcn) + 1;
183  if (unlikely(vcn && vcn >= end_vcn)) {
184  err = -ENOENT;
185  goto err_out;
186  }
187  rl = ntfs_mapping_pairs_decompress(ni->vol, a, ni->runlist.rl);
188  if (IS_ERR(rl))
189  err = PTR_ERR(rl);
190  else
191  ni->runlist.rl = rl;
192 err_out:
193  if (ctx_is_temporary) {
194  if (likely(ctx))
196  unmap_mft_record(base_ni);
197  } else if (ctx_needs_reset) {
198  /*
199  * If there is no attribute list, restoring the search context
200  * is accomplished simply by copying the saved context back over
201  * the caller supplied context. If there is an attribute list,
202  * things are more complicated as we need to deal with mapping
203  * of mft records and resulting potential changes in pointers.
204  */
205  if (NInoAttrList(base_ni)) {
206  /*
207  * If the currently mapped (extent) inode is not the
208  * one we had before, we need to unmap it and map the
209  * old one.
210  */
211  if (ctx->ntfs_ino != old_ctx.ntfs_ino) {
212  /*
213  * If the currently mapped inode is not the
214  * base inode, unmap it.
215  */
216  if (ctx->base_ntfs_ino && ctx->ntfs_ino !=
217  ctx->base_ntfs_ino) {
218  unmap_extent_mft_record(ctx->ntfs_ino);
219  ctx->mrec = ctx->base_mrec;
220  BUG_ON(!ctx->mrec);
221  }
222  /*
223  * If the old mapped inode is not the base
224  * inode, map it.
225  */
226  if (old_ctx.base_ntfs_ino &&
227  old_ctx.ntfs_ino !=
228  old_ctx.base_ntfs_ino) {
229 retry_map:
230  ctx->mrec = map_mft_record(
231  old_ctx.ntfs_ino);
232  /*
233  * Something bad has happened. If out
234  * of memory retry till it succeeds.
235  * Any other errors are fatal and we
236  * return the error code in ctx->mrec.
237  * Let the caller deal with it... We
238  * just need to fudge things so the
239  * caller can reinit and/or put the
240  * search context safely.
241  */
242  if (IS_ERR(ctx->mrec)) {
243  if (PTR_ERR(ctx->mrec) ==
244  -ENOMEM) {
245  schedule();
246  goto retry_map;
247  } else
248  old_ctx.ntfs_ino =
249  old_ctx.
250  base_ntfs_ino;
251  }
252  }
253  }
254  /* Update the changed pointers in the saved context. */
255  if (ctx->mrec != old_ctx.mrec) {
256  if (!IS_ERR(ctx->mrec))
257  old_ctx.attr = (ATTR_RECORD*)(
258  (u8*)ctx->mrec +
259  ((u8*)old_ctx.attr -
260  (u8*)old_ctx.mrec));
261  old_ctx.mrec = ctx->mrec;
262  }
263  }
264  /* Restore the search context to the saved one. */
265  *ctx = old_ctx;
266  /*
267  * We drop the reference on the page we took earlier. In the
268  * case that IS_ERR(ctx->mrec) is true this means we might lose
269  * some changes to the mft record that had been made between
270  * the last time it was marked dirty/written out and now. This
271  * at this stage is not a problem as the mapping error is fatal
272  * enough that the mft record cannot be written out anyway and
273  * the caller is very likely to shutdown the whole inode
274  * immediately and mark the volume dirty for chkdsk to pick up
275  * the pieces anyway.
276  */
277  if (put_this_page)
278  page_cache_release(put_this_page);
279  }
280  return err;
281 }
282 
299 {
300  int err = 0;
301 
302  down_write(&ni->runlist.lock);
303  /* Make sure someone else didn't do the work while we were sleeping. */
304  if (likely(ntfs_rl_vcn_to_lcn(ni->runlist.rl, vcn) <=
306  err = ntfs_map_runlist_nolock(ni, vcn, NULL);
307  up_write(&ni->runlist.lock);
308  return err;
309 }
310 
342  const bool write_locked)
343 {
344  LCN lcn;
345  unsigned long flags;
346  bool is_retry = false;
347 
348  BUG_ON(!ni);
349  ntfs_debug("Entering for i_ino 0x%lx, vcn 0x%llx, %s_locked.",
350  ni->mft_no, (unsigned long long)vcn,
351  write_locked ? "write" : "read");
352  BUG_ON(!NInoNonResident(ni));
353  BUG_ON(vcn < 0);
354  if (!ni->runlist.rl) {
355  read_lock_irqsave(&ni->size_lock, flags);
356  if (!ni->allocated_size) {
357  read_unlock_irqrestore(&ni->size_lock, flags);
358  return LCN_ENOENT;
359  }
360  read_unlock_irqrestore(&ni->size_lock, flags);
361  }
362 retry_remap:
363  /* Convert vcn to lcn. If that fails map the runlist and retry once. */
364  lcn = ntfs_rl_vcn_to_lcn(ni->runlist.rl, vcn);
365  if (likely(lcn >= LCN_HOLE)) {
366  ntfs_debug("Done, lcn 0x%llx.", (long long)lcn);
367  return lcn;
368  }
369  if (lcn != LCN_RL_NOT_MAPPED) {
370  if (lcn != LCN_ENOENT)
371  lcn = LCN_EIO;
372  } else if (!is_retry) {
373  int err;
374 
375  if (!write_locked) {
376  up_read(&ni->runlist.lock);
377  down_write(&ni->runlist.lock);
378  if (unlikely(ntfs_rl_vcn_to_lcn(ni->runlist.rl, vcn) !=
380  up_write(&ni->runlist.lock);
381  down_read(&ni->runlist.lock);
382  goto retry_remap;
383  }
384  }
385  err = ntfs_map_runlist_nolock(ni, vcn, NULL);
386  if (!write_locked) {
387  up_write(&ni->runlist.lock);
388  down_read(&ni->runlist.lock);
389  }
390  if (likely(!err)) {
391  is_retry = true;
392  goto retry_remap;
393  }
394  if (err == -ENOENT)
395  lcn = LCN_ENOENT;
396  else if (err == -ENOMEM)
397  lcn = LCN_ENOMEM;
398  else
399  lcn = LCN_EIO;
400  }
401  if (lcn != LCN_ENOENT)
402  ntfs_error(ni->vol->sb, "Failed with error code %lli.",
403  (long long)lcn);
404  return lcn;
405 }
406 
466 {
467  unsigned long flags;
468  runlist_element *rl;
469  int err = 0;
470  bool is_retry = false;
471 
472  BUG_ON(!ni);
473  ntfs_debug("Entering for i_ino 0x%lx, vcn 0x%llx, with%s ctx.",
474  ni->mft_no, (unsigned long long)vcn, ctx ? "" : "out");
475  BUG_ON(!NInoNonResident(ni));
476  BUG_ON(vcn < 0);
477  if (!ni->runlist.rl) {
478  read_lock_irqsave(&ni->size_lock, flags);
479  if (!ni->allocated_size) {
480  read_unlock_irqrestore(&ni->size_lock, flags);
481  return ERR_PTR(-ENOENT);
482  }
483  read_unlock_irqrestore(&ni->size_lock, flags);
484  }
485 retry_remap:
486  rl = ni->runlist.rl;
487  if (likely(rl && vcn >= rl[0].vcn)) {
488  while (likely(rl->length)) {
489  if (unlikely(vcn < rl[1].vcn)) {
490  if (likely(rl->lcn >= LCN_HOLE)) {
491  ntfs_debug("Done.");
492  return rl;
493  }
494  break;
495  }
496  rl++;
497  }
498  if (likely(rl->lcn != LCN_RL_NOT_MAPPED)) {
499  if (likely(rl->lcn == LCN_ENOENT))
500  err = -ENOENT;
501  else
502  err = -EIO;
503  }
504  }
505  if (!err && !is_retry) {
506  /*
507  * If the search context is invalid we cannot map the unmapped
508  * region.
509  */
510  if (IS_ERR(ctx->mrec))
511  err = PTR_ERR(ctx->mrec);
512  else {
513  /*
514  * The @vcn is in an unmapped region, map the runlist
515  * and retry.
516  */
517  err = ntfs_map_runlist_nolock(ni, vcn, ctx);
518  if (likely(!err)) {
519  is_retry = true;
520  goto retry_remap;
521  }
522  }
523  if (err == -EINVAL)
524  err = -EIO;
525  } else if (!err)
526  err = -EIO;
527  if (err != -ENOENT)
528  ntfs_error(ni->vol->sb, "Failed with error code %i.", err);
529  return ERR_PTR(err);
530 }
531 
589 static int ntfs_attr_find(const ATTR_TYPE type, const ntfschar *name,
590  const u32 name_len, const IGNORE_CASE_BOOL ic,
591  const u8 *val, const u32 val_len, ntfs_attr_search_ctx *ctx)
592 {
593  ATTR_RECORD *a;
594  ntfs_volume *vol = ctx->ntfs_ino->vol;
595  ntfschar *upcase = vol->upcase;
596  u32 upcase_len = vol->upcase_len;
597 
598  /*
599  * Iterate over attributes in mft record starting at @ctx->attr, or the
600  * attribute following that, if @ctx->is_first is 'true'.
601  */
602  if (ctx->is_first) {
603  a = ctx->attr;
604  ctx->is_first = false;
605  } else
606  a = (ATTR_RECORD*)((u8*)ctx->attr +
607  le32_to_cpu(ctx->attr->length));
608  for (;; a = (ATTR_RECORD*)((u8*)a + le32_to_cpu(a->length))) {
609  if ((u8*)a < (u8*)ctx->mrec || (u8*)a > (u8*)ctx->mrec +
610  le32_to_cpu(ctx->mrec->bytes_allocated))
611  break;
612  ctx->attr = a;
613  if (unlikely(le32_to_cpu(a->type) > le32_to_cpu(type) ||
614  a->type == AT_END))
615  return -ENOENT;
616  if (unlikely(!a->length))
617  break;
618  if (a->type != type)
619  continue;
620  /*
621  * If @name is present, compare the two names. If @name is
622  * missing, assume we want an unnamed attribute.
623  */
624  if (!name) {
625  /* The search failed if the found attribute is named. */
626  if (a->name_length)
627  return -ENOENT;
628  } else if (!ntfs_are_names_equal(name, name_len,
629  (ntfschar*)((u8*)a + le16_to_cpu(a->name_offset)),
630  a->name_length, ic, upcase, upcase_len)) {
631  register int rc;
632 
633  rc = ntfs_collate_names(name, name_len,
634  (ntfschar*)((u8*)a +
635  le16_to_cpu(a->name_offset)),
636  a->name_length, 1, IGNORE_CASE,
637  upcase, upcase_len);
638  /*
639  * If @name collates before a->name, there is no
640  * matching attribute.
641  */
642  if (rc == -1)
643  return -ENOENT;
644  /* If the strings are not equal, continue search. */
645  if (rc)
646  continue;
647  rc = ntfs_collate_names(name, name_len,
648  (ntfschar*)((u8*)a +
649  le16_to_cpu(a->name_offset)),
650  a->name_length, 1, CASE_SENSITIVE,
651  upcase, upcase_len);
652  if (rc == -1)
653  return -ENOENT;
654  if (rc)
655  continue;
656  }
657  /*
658  * The names match or @name not present and attribute is
659  * unnamed. If no @val specified, we have found the attribute
660  * and are done.
661  */
662  if (!val)
663  return 0;
664  /* @val is present; compare values. */
665  else {
666  register int rc;
667 
668  rc = memcmp(val, (u8*)a + le16_to_cpu(
669  a->data.resident.value_offset),
670  min_t(u32, val_len, le32_to_cpu(
671  a->data.resident.value_length)));
672  /*
673  * If @val collates before the current attribute's
674  * value, there is no matching attribute.
675  */
676  if (!rc) {
677  register u32 avl;
678 
679  avl = le32_to_cpu(
680  a->data.resident.value_length);
681  if (val_len == avl)
682  return 0;
683  if (val_len < avl)
684  return -ENOENT;
685  } else if (rc < 0)
686  return -ENOENT;
687  }
688  }
689  ntfs_error(vol->sb, "Inode is corrupt. Run chkdsk.");
690  NVolSetErrors(vol);
691  return -EIO;
692 }
693 
711  const s64 size, const s64 initialized_size)
712 {
713  LCN lcn;
714  u8 *al = al_start;
715  u8 *al_end = al + initialized_size;
716  runlist_element *rl;
717  struct buffer_head *bh;
718  struct super_block *sb;
719  unsigned long block_size;
720  unsigned long block, max_block;
721  int err = 0;
722  unsigned char block_size_bits;
723 
724  ntfs_debug("Entering.");
725  if (!vol || !runlist || !al || size <= 0 || initialized_size < 0 ||
726  initialized_size > size)
727  return -EINVAL;
728  if (!initialized_size) {
729  memset(al, 0, size);
730  return 0;
731  }
732  sb = vol->sb;
733  block_size = sb->s_blocksize;
734  block_size_bits = sb->s_blocksize_bits;
735  down_read(&runlist->lock);
736  rl = runlist->rl;
737  if (!rl) {
738  ntfs_error(sb, "Cannot read attribute list since runlist is "
739  "missing.");
740  goto err_out;
741  }
742  /* Read all clusters specified by the runlist one run at a time. */
743  while (rl->length) {
744  lcn = ntfs_rl_vcn_to_lcn(rl, rl->vcn);
745  ntfs_debug("Reading vcn = 0x%llx, lcn = 0x%llx.",
746  (unsigned long long)rl->vcn,
747  (unsigned long long)lcn);
748  /* The attribute list cannot be sparse. */
749  if (lcn < 0) {
750  ntfs_error(sb, "ntfs_rl_vcn_to_lcn() failed. Cannot "
751  "read attribute list.");
752  goto err_out;
753  }
754  block = lcn << vol->cluster_size_bits >> block_size_bits;
755  /* Read the run from device in chunks of block_size bytes. */
756  max_block = block + (rl->length << vol->cluster_size_bits >>
757  block_size_bits);
758  ntfs_debug("max_block = 0x%lx.", max_block);
759  do {
760  ntfs_debug("Reading block = 0x%lx.", block);
761  bh = sb_bread(sb, block);
762  if (!bh) {
763  ntfs_error(sb, "sb_bread() failed. Cannot "
764  "read attribute list.");
765  goto err_out;
766  }
767  if (al + block_size >= al_end)
768  goto do_final;
769  memcpy(al, bh->b_data, block_size);
770  brelse(bh);
771  al += block_size;
772  } while (++block < max_block);
773  rl++;
774  }
775  if (initialized_size < size) {
776 initialize:
777  memset(al_start + initialized_size, 0, size - initialized_size);
778  }
779 done:
780  up_read(&runlist->lock);
781  return err;
782 do_final:
783  if (al < al_end) {
784  /*
785  * Partial block.
786  *
787  * Note: The attribute list can be smaller than its allocation
788  * by multiple clusters. This has been encountered by at least
789  * two people running Windows XP, thus we cannot do any
790  * truncation sanity checking here. (AIA)
791  */
792  memcpy(al, bh->b_data, al_end - al);
793  brelse(bh);
794  if (initialized_size < size)
795  goto initialize;
796  goto done;
797  }
798  brelse(bh);
799  /* Real overflow! */
800  ntfs_error(sb, "Attribute list buffer overflow. Read attribute list "
801  "is truncated.");
802 err_out:
803  err = -EIO;
804  goto done;
805 }
806 
857 static int ntfs_external_attr_find(const ATTR_TYPE type,
858  const ntfschar *name, const u32 name_len,
859  const IGNORE_CASE_BOOL ic, const VCN lowest_vcn,
860  const u8 *val, const u32 val_len, ntfs_attr_search_ctx *ctx)
861 {
862  ntfs_inode *base_ni, *ni;
863  ntfs_volume *vol;
864  ATTR_LIST_ENTRY *al_entry, *next_al_entry;
865  u8 *al_start, *al_end;
866  ATTR_RECORD *a;
867  ntfschar *al_name;
868  u32 al_name_len;
869  int err = 0;
870  static const char *es = " Unmount and run chkdsk.";
871 
872  ni = ctx->ntfs_ino;
873  base_ni = ctx->base_ntfs_ino;
874  ntfs_debug("Entering for inode 0x%lx, type 0x%x.", ni->mft_no, type);
875  if (!base_ni) {
876  /* First call happens with the base mft record. */
877  base_ni = ctx->base_ntfs_ino = ctx->ntfs_ino;
878  ctx->base_mrec = ctx->mrec;
879  }
880  if (ni == base_ni)
881  ctx->base_attr = ctx->attr;
882  if (type == AT_END)
883  goto not_found;
884  vol = base_ni->vol;
885  al_start = base_ni->attr_list;
886  al_end = al_start + base_ni->attr_list_size;
887  if (!ctx->al_entry)
888  ctx->al_entry = (ATTR_LIST_ENTRY*)al_start;
889  /*
890  * Iterate over entries in attribute list starting at @ctx->al_entry,
891  * or the entry following that, if @ctx->is_first is 'true'.
892  */
893  if (ctx->is_first) {
894  al_entry = ctx->al_entry;
895  ctx->is_first = false;
896  } else
897  al_entry = (ATTR_LIST_ENTRY*)((u8*)ctx->al_entry +
898  le16_to_cpu(ctx->al_entry->length));
899  for (;; al_entry = next_al_entry) {
900  /* Out of bounds check. */
901  if ((u8*)al_entry < base_ni->attr_list ||
902  (u8*)al_entry > al_end)
903  break; /* Inode is corrupt. */
904  ctx->al_entry = al_entry;
905  /* Catch the end of the attribute list. */
906  if ((u8*)al_entry == al_end)
907  goto not_found;
908  if (!al_entry->length)
909  break;
910  if ((u8*)al_entry + 6 > al_end || (u8*)al_entry +
911  le16_to_cpu(al_entry->length) > al_end)
912  break;
913  next_al_entry = (ATTR_LIST_ENTRY*)((u8*)al_entry +
914  le16_to_cpu(al_entry->length));
915  if (le32_to_cpu(al_entry->type) > le32_to_cpu(type))
916  goto not_found;
917  if (type != al_entry->type)
918  continue;
919  /*
920  * If @name is present, compare the two names. If @name is
921  * missing, assume we want an unnamed attribute.
922  */
923  al_name_len = al_entry->name_length;
924  al_name = (ntfschar*)((u8*)al_entry + al_entry->name_offset);
925  if (!name) {
926  if (al_name_len)
927  goto not_found;
928  } else if (!ntfs_are_names_equal(al_name, al_name_len, name,
929  name_len, ic, vol->upcase, vol->upcase_len)) {
930  register int rc;
931 
932  rc = ntfs_collate_names(name, name_len, al_name,
933  al_name_len, 1, IGNORE_CASE,
934  vol->upcase, vol->upcase_len);
935  /*
936  * If @name collates before al_name, there is no
937  * matching attribute.
938  */
939  if (rc == -1)
940  goto not_found;
941  /* If the strings are not equal, continue search. */
942  if (rc)
943  continue;
944  /*
945  * FIXME: Reverse engineering showed 0, IGNORE_CASE but
946  * that is inconsistent with ntfs_attr_find(). The
947  * subsequent rc checks were also different. Perhaps I
948  * made a mistake in one of the two. Need to recheck
949  * which is correct or at least see what is going on...
950  * (AIA)
951  */
952  rc = ntfs_collate_names(name, name_len, al_name,
953  al_name_len, 1, CASE_SENSITIVE,
954  vol->upcase, vol->upcase_len);
955  if (rc == -1)
956  goto not_found;
957  if (rc)
958  continue;
959  }
960  /*
961  * The names match or @name not present and attribute is
962  * unnamed. Now check @lowest_vcn. Continue search if the
963  * next attribute list entry still fits @lowest_vcn. Otherwise
964  * we have reached the right one or the search has failed.
965  */
966  if (lowest_vcn && (u8*)next_al_entry >= al_start &&
967  (u8*)next_al_entry + 6 < al_end &&
968  (u8*)next_al_entry + le16_to_cpu(
969  next_al_entry->length) <= al_end &&
970  sle64_to_cpu(next_al_entry->lowest_vcn) <=
971  lowest_vcn &&
972  next_al_entry->type == al_entry->type &&
973  next_al_entry->name_length == al_name_len &&
975  next_al_entry +
976  next_al_entry->name_offset),
977  next_al_entry->name_length,
978  al_name, al_name_len, CASE_SENSITIVE,
979  vol->upcase, vol->upcase_len))
980  continue;
981  if (MREF_LE(al_entry->mft_reference) == ni->mft_no) {
982  if (MSEQNO_LE(al_entry->mft_reference) != ni->seq_no) {
983  ntfs_error(vol->sb, "Found stale mft "
984  "reference in attribute list "
985  "of base inode 0x%lx.%s",
986  base_ni->mft_no, es);
987  err = -EIO;
988  break;
989  }
990  } else { /* Mft references do not match. */
991  /* If there is a mapped record unmap it first. */
992  if (ni != base_ni)
993  unmap_extent_mft_record(ni);
994  /* Do we want the base record back? */
995  if (MREF_LE(al_entry->mft_reference) ==
996  base_ni->mft_no) {
997  ni = ctx->ntfs_ino = base_ni;
998  ctx->mrec = ctx->base_mrec;
999  } else {
1000  /* We want an extent record. */
1001  ctx->mrec = map_extent_mft_record(base_ni,
1002  le64_to_cpu(
1003  al_entry->mft_reference), &ni);
1004  if (IS_ERR(ctx->mrec)) {
1005  ntfs_error(vol->sb, "Failed to map "
1006  "extent mft record "
1007  "0x%lx of base inode "
1008  "0x%lx.%s",
1009  MREF_LE(al_entry->
1010  mft_reference),
1011  base_ni->mft_no, es);
1012  err = PTR_ERR(ctx->mrec);
1013  if (err == -ENOENT)
1014  err = -EIO;
1015  /* Cause @ctx to be sanitized below. */
1016  ni = NULL;
1017  break;
1018  }
1019  ctx->ntfs_ino = ni;
1020  }
1021  ctx->attr = (ATTR_RECORD*)((u8*)ctx->mrec +
1022  le16_to_cpu(ctx->mrec->attrs_offset));
1023  }
1024  /*
1025  * ctx->vfs_ino, ctx->mrec, and ctx->attr now point to the
1026  * mft record containing the attribute represented by the
1027  * current al_entry.
1028  */
1029  /*
1030  * We could call into ntfs_attr_find() to find the right
1031  * attribute in this mft record but this would be less
1032  * efficient and not quite accurate as ntfs_attr_find() ignores
1033  * the attribute instance numbers for example which become
1034  * important when one plays with attribute lists. Also,
1035  * because a proper match has been found in the attribute list
1036  * entry above, the comparison can now be optimized. So it is
1037  * worth re-implementing a simplified ntfs_attr_find() here.
1038  */
1039  a = ctx->attr;
1040  /*
1041  * Use a manual loop so we can still use break and continue
1042  * with the same meanings as above.
1043  */
1044 do_next_attr_loop:
1045  if ((u8*)a < (u8*)ctx->mrec || (u8*)a > (u8*)ctx->mrec +
1046  le32_to_cpu(ctx->mrec->bytes_allocated))
1047  break;
1048  if (a->type == AT_END)
1049  break;
1050  if (!a->length)
1051  break;
1052  if (al_entry->instance != a->instance)
1053  goto do_next_attr;
1054  /*
1055  * If the type and/or the name are mismatched between the
1056  * attribute list entry and the attribute record, there is
1057  * corruption so we break and return error EIO.
1058  */
1059  if (al_entry->type != a->type)
1060  break;
1061  if (!ntfs_are_names_equal((ntfschar*)((u8*)a +
1062  le16_to_cpu(a->name_offset)), a->name_length,
1063  al_name, al_name_len, CASE_SENSITIVE,
1064  vol->upcase, vol->upcase_len))
1065  break;
1066  ctx->attr = a;
1067  /*
1068  * If no @val specified or @val specified and it matches, we
1069  * have found it!
1070  */
1071  if (!val || (!a->non_resident && le32_to_cpu(
1072  a->data.resident.value_length) == val_len &&
1073  !memcmp((u8*)a +
1074  le16_to_cpu(a->data.resident.value_offset),
1075  val, val_len))) {
1076  ntfs_debug("Done, found.");
1077  return 0;
1078  }
1079 do_next_attr:
1080  /* Proceed to the next attribute in the current mft record. */
1081  a = (ATTR_RECORD*)((u8*)a + le32_to_cpu(a->length));
1082  goto do_next_attr_loop;
1083  }
1084  if (!err) {
1085  ntfs_error(vol->sb, "Base inode 0x%lx contains corrupt "
1086  "attribute list attribute.%s", base_ni->mft_no,
1087  es);
1088  err = -EIO;
1089  }
1090  if (ni != base_ni) {
1091  if (ni)
1092  unmap_extent_mft_record(ni);
1093  ctx->ntfs_ino = base_ni;
1094  ctx->mrec = ctx->base_mrec;
1095  ctx->attr = ctx->base_attr;
1096  }
1097  if (err != -ENOMEM)
1098  NVolSetErrors(vol);
1099  return err;
1100 not_found:
1101  /*
1102  * If we were looking for AT_END, we reset the search context @ctx and
1103  * use ntfs_attr_find() to seek to the end of the base mft record.
1104  */
1105  if (type == AT_END) {
1107  return ntfs_attr_find(AT_END, name, name_len, ic, val, val_len,
1108  ctx);
1109  }
1110  /*
1111  * The attribute was not found. Before we return, we want to ensure
1112  * @ctx->mrec and @ctx->attr indicate the position at which the
1113  * attribute should be inserted in the base mft record. Since we also
1114  * want to preserve @ctx->al_entry we cannot reinitialize the search
1115  * context using ntfs_attr_reinit_search_ctx() as this would set
1116  * @ctx->al_entry to NULL. Thus we do the necessary bits manually (see
1117  * ntfs_attr_init_search_ctx() below). Note, we _only_ preserve
1118  * @ctx->al_entry as the remaining fields (base_*) are identical to
1119  * their non base_ counterparts and we cannot set @ctx->base_attr
1120  * correctly yet as we do not know what @ctx->attr will be set to by
1121  * the call to ntfs_attr_find() below.
1122  */
1123  if (ni != base_ni)
1124  unmap_extent_mft_record(ni);
1125  ctx->mrec = ctx->base_mrec;
1126  ctx->attr = (ATTR_RECORD*)((u8*)ctx->mrec +
1127  le16_to_cpu(ctx->mrec->attrs_offset));
1128  ctx->is_first = true;
1129  ctx->ntfs_ino = base_ni;
1130  ctx->base_ntfs_ino = NULL;
1131  ctx->base_mrec = NULL;
1132  ctx->base_attr = NULL;
1133  /*
1134  * In case there are multiple matches in the base mft record, need to
1135  * keep enumerating until we get an attribute not found response (or
1136  * another error), otherwise we would keep returning the same attribute
1137  * over and over again and all programs using us for enumeration would
1138  * lock up in a tight loop.
1139  */
1140  do {
1141  err = ntfs_attr_find(type, name, name_len, ic, val, val_len,
1142  ctx);
1143  } while (!err);
1144  ntfs_debug("Done, not found.");
1145  return err;
1146 }
1147 
1187 int ntfs_attr_lookup(const ATTR_TYPE type, const ntfschar *name,
1188  const u32 name_len, const IGNORE_CASE_BOOL ic,
1189  const VCN lowest_vcn, const u8 *val, const u32 val_len,
1190  ntfs_attr_search_ctx *ctx)
1191 {
1192  ntfs_inode *base_ni;
1193 
1194  ntfs_debug("Entering.");
1195  BUG_ON(IS_ERR(ctx->mrec));
1196  if (ctx->base_ntfs_ino)
1197  base_ni = ctx->base_ntfs_ino;
1198  else
1199  base_ni = ctx->ntfs_ino;
1200  /* Sanity check, just for debugging really. */
1201  BUG_ON(!base_ni);
1202  if (!NInoAttrList(base_ni) || type == AT_ATTRIBUTE_LIST)
1203  return ntfs_attr_find(type, name, name_len, ic, val, val_len,
1204  ctx);
1205  return ntfs_external_attr_find(type, name, name_len, ic, lowest_vcn,
1206  val, val_len, ctx);
1207 }
1208 
1217 static inline void ntfs_attr_init_search_ctx(ntfs_attr_search_ctx *ctx,
1218  ntfs_inode *ni, MFT_RECORD *mrec)
1219 {
1220  *ctx = (ntfs_attr_search_ctx) {
1221  .mrec = mrec,
1222  /* Sanity checks are performed elsewhere. */
1223  .attr = (ATTR_RECORD*)((u8*)mrec +
1224  le16_to_cpu(mrec->attrs_offset)),
1225  .is_first = true,
1226  .ntfs_ino = ni,
1227  };
1228 }
1229 
1241 {
1242  if (likely(!ctx->base_ntfs_ino)) {
1243  /* No attribute list. */
1244  ctx->is_first = true;
1245  /* Sanity checks are performed elsewhere. */
1246  ctx->attr = (ATTR_RECORD*)((u8*)ctx->mrec +
1247  le16_to_cpu(ctx->mrec->attrs_offset));
1248  /*
1249  * This needs resetting due to ntfs_external_attr_find() which
1250  * can leave it set despite having zeroed ctx->base_ntfs_ino.
1251  */
1252  ctx->al_entry = NULL;
1253  return;
1254  } /* Attribute list. */
1255  if (ctx->ntfs_ino != ctx->base_ntfs_ino)
1256  unmap_extent_mft_record(ctx->ntfs_ino);
1257  ntfs_attr_init_search_ctx(ctx, ctx->base_ntfs_ino, ctx->base_mrec);
1258  return;
1259 }
1260 
1270 {
1272 
1274  if (ctx)
1275  ntfs_attr_init_search_ctx(ctx, ni, mrec);
1276  return ctx;
1277 }
1278 
1287 {
1288  if (ctx->base_ntfs_ino && ctx->ntfs_ino != ctx->base_ntfs_ino)
1289  unmap_extent_mft_record(ctx->ntfs_ino);
1291  return;
1292 }
1293 
1294 #ifdef NTFS_RW
1295 
1306 static ATTR_DEF *ntfs_attr_find_in_attrdef(const ntfs_volume *vol,
1307  const ATTR_TYPE type)
1308 {
1309  ATTR_DEF *ad;
1310 
1311  BUG_ON(!vol->attrdef);
1312  BUG_ON(!type);
1313  for (ad = vol->attrdef; (u8*)ad - (u8*)vol->attrdef <
1314  vol->attrdef_size && ad->type; ++ad) {
1315  /* We have not found it yet, carry on searching. */
1316  if (likely(le32_to_cpu(ad->type) < le32_to_cpu(type)))
1317  continue;
1318  /* We found the attribute; return it. */
1319  if (likely(ad->type == type))
1320  return ad;
1321  /* We have gone too far already. No point in continuing. */
1322  break;
1323  }
1324  /* Attribute not found. */
1325  ntfs_debug("Attribute type 0x%x not found in $AttrDef.",
1326  le32_to_cpu(type));
1327  return NULL;
1328 }
1329 
1342 int ntfs_attr_size_bounds_check(const ntfs_volume *vol, const ATTR_TYPE type,
1343  const s64 size)
1344 {
1345  ATTR_DEF *ad;
1346 
1347  BUG_ON(size < 0);
1348  /*
1349  * $ATTRIBUTE_LIST has a maximum size of 256kiB, but this is not
1350  * listed in $AttrDef.
1351  */
1352  if (unlikely(type == AT_ATTRIBUTE_LIST && size > 256 * 1024))
1353  return -ERANGE;
1354  /* Get the $AttrDef entry for the attribute @type. */
1355  ad = ntfs_attr_find_in_attrdef(vol, type);
1356  if (unlikely(!ad))
1357  return -ENOENT;
1358  /* Do the bounds check. */
1359  if (((sle64_to_cpu(ad->min_size) > 0) &&
1360  size < sle64_to_cpu(ad->min_size)) ||
1361  ((sle64_to_cpu(ad->max_size) > 0) && size >
1362  sle64_to_cpu(ad->max_size)))
1363  return -ERANGE;
1364  return 0;
1365 }
1366 
1378 int ntfs_attr_can_be_non_resident(const ntfs_volume *vol, const ATTR_TYPE type)
1379 {
1380  ATTR_DEF *ad;
1381 
1382  /* Find the attribute definition record in $AttrDef. */
1383  ad = ntfs_attr_find_in_attrdef(vol, type);
1384  if (unlikely(!ad))
1385  return -ENOENT;
1386  /* Check the flags and return the result. */
1387  if (ad->flags & ATTR_DEF_RESIDENT)
1388  return -EPERM;
1389  return 0;
1390 }
1391 
1410 int ntfs_attr_can_be_resident(const ntfs_volume *vol, const ATTR_TYPE type)
1411 {
1412  if (type == AT_INDEX_ALLOCATION)
1413  return -EPERM;
1414  return 0;
1415 }
1416 
1435 int ntfs_attr_record_resize(MFT_RECORD *m, ATTR_RECORD *a, u32 new_size)
1436 {
1437  ntfs_debug("Entering for new_size %u.", new_size);
1438  /* Align to 8 bytes if it is not already done. */
1439  if (new_size & 7)
1440  new_size = (new_size + 7) & ~7;
1441  /* If the actual attribute length has changed, move things around. */
1442  if (new_size != le32_to_cpu(a->length)) {
1443  u32 new_muse = le32_to_cpu(m->bytes_in_use) -
1444  le32_to_cpu(a->length) + new_size;
1445  /* Not enough space in this mft record. */
1446  if (new_muse > le32_to_cpu(m->bytes_allocated))
1447  return -ENOSPC;
1448  /* Move attributes following @a to their new location. */
1449  memmove((u8*)a + new_size, (u8*)a + le32_to_cpu(a->length),
1450  le32_to_cpu(m->bytes_in_use) - ((u8*)a -
1451  (u8*)m) - le32_to_cpu(a->length));
1452  /* Adjust @m to reflect the change in used space. */
1453  m->bytes_in_use = cpu_to_le32(new_muse);
1454  /* Adjust @a to reflect the new size. */
1455  if (new_size >= offsetof(ATTR_REC, length) + sizeof(a->length))
1456  a->length = cpu_to_le32(new_size);
1457  }
1458  return 0;
1459 }
1460 
1479 int ntfs_resident_attr_value_resize(MFT_RECORD *m, ATTR_RECORD *a,
1480  const u32 new_size)
1481 {
1482  u32 old_size;
1483 
1484  /* Resize the resident part of the attribute record. */
1485  if (ntfs_attr_record_resize(m, a,
1486  le16_to_cpu(a->data.resident.value_offset) + new_size))
1487  return -ENOSPC;
1488  /*
1489  * The resize succeeded! If we made the attribute value bigger, clear
1490  * the area between the old size and @new_size.
1491  */
1492  old_size = le32_to_cpu(a->data.resident.value_length);
1493  if (new_size > old_size)
1494  memset((u8*)a + le16_to_cpu(a->data.resident.value_offset) +
1495  old_size, 0, new_size - old_size);
1496  /* Finally update the length of the attribute value. */
1497  a->data.resident.value_length = cpu_to_le32(new_size);
1498  return 0;
1499 }
1500 
1535 int ntfs_attr_make_non_resident(ntfs_inode *ni, const u32 data_size)
1536 {
1537  s64 new_size;
1538  struct inode *vi = VFS_I(ni);
1539  ntfs_volume *vol = ni->vol;
1540  ntfs_inode *base_ni;
1541  MFT_RECORD *m;
1542  ATTR_RECORD *a;
1544  struct page *page;
1545  runlist_element *rl;
1546  u8 *kaddr;
1547  unsigned long flags;
1548  int mp_size, mp_ofs, name_ofs, arec_size, err, err2;
1549  u32 attr_size;
1550  u8 old_res_attr_flags;
1551 
1552  /* Check that the attribute is allowed to be non-resident. */
1553  err = ntfs_attr_can_be_non_resident(vol, ni->type);
1554  if (unlikely(err)) {
1555  if (err == -EPERM)
1556  ntfs_debug("Attribute is not allowed to be "
1557  "non-resident.");
1558  else
1559  ntfs_debug("Attribute not defined on the NTFS "
1560  "volume!");
1561  return err;
1562  }
1563  /*
1564  * FIXME: Compressed and encrypted attributes are not supported when
1565  * writing and we should never have gotten here for them.
1566  */
1567  BUG_ON(NInoCompressed(ni));
1568  BUG_ON(NInoEncrypted(ni));
1569  /*
1570  * The size needs to be aligned to a cluster boundary for allocation
1571  * purposes.
1572  */
1573  new_size = (data_size + vol->cluster_size - 1) &
1574  ~(vol->cluster_size - 1);
1575  if (new_size > 0) {
1576  /*
1577  * Will need the page later and since the page lock nests
1578  * outside all ntfs locks, we need to get the page now.
1579  */
1580  page = find_or_create_page(vi->i_mapping, 0,
1581  mapping_gfp_mask(vi->i_mapping));
1582  if (unlikely(!page))
1583  return -ENOMEM;
1584  /* Start by allocating clusters to hold the attribute value. */
1585  rl = ntfs_cluster_alloc(vol, 0, new_size >>
1586  vol->cluster_size_bits, -1, DATA_ZONE, true);
1587  if (IS_ERR(rl)) {
1588  err = PTR_ERR(rl);
1589  ntfs_debug("Failed to allocate cluster%s, error code "
1590  "%i.", (new_size >>
1591  vol->cluster_size_bits) > 1 ? "s" : "",
1592  err);
1593  goto page_err_out;
1594  }
1595  } else {
1596  rl = NULL;
1597  page = NULL;
1598  }
1599  /* Determine the size of the mapping pairs array. */
1600  mp_size = ntfs_get_size_for_mapping_pairs(vol, rl, 0, -1);
1601  if (unlikely(mp_size < 0)) {
1602  err = mp_size;
1603  ntfs_debug("Failed to get size for mapping pairs array, error "
1604  "code %i.", err);
1605  goto rl_err_out;
1606  }
1607  down_write(&ni->runlist.lock);
1608  if (!NInoAttr(ni))
1609  base_ni = ni;
1610  else
1611  base_ni = ni->ext.base_ntfs_ino;
1612  m = map_mft_record(base_ni);
1613  if (IS_ERR(m)) {
1614  err = PTR_ERR(m);
1615  m = NULL;
1616  ctx = NULL;
1617  goto err_out;
1618  }
1619  ctx = ntfs_attr_get_search_ctx(base_ni, m);
1620  if (unlikely(!ctx)) {
1621  err = -ENOMEM;
1622  goto err_out;
1623  }
1624  err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
1625  CASE_SENSITIVE, 0, NULL, 0, ctx);
1626  if (unlikely(err)) {
1627  if (err == -ENOENT)
1628  err = -EIO;
1629  goto err_out;
1630  }
1631  m = ctx->mrec;
1632  a = ctx->attr;
1633  BUG_ON(NInoNonResident(ni));
1634  BUG_ON(a->non_resident);
1635  /*
1636  * Calculate new offsets for the name and the mapping pairs array.
1637  */
1638  if (NInoSparse(ni) || NInoCompressed(ni))
1639  name_ofs = (offsetof(ATTR_REC,
1640  data.non_resident.compressed_size) +
1641  sizeof(a->data.non_resident.compressed_size) +
1642  7) & ~7;
1643  else
1644  name_ofs = (offsetof(ATTR_REC,
1645  data.non_resident.compressed_size) + 7) & ~7;
1646  mp_ofs = (name_ofs + a->name_length * sizeof(ntfschar) + 7) & ~7;
1647  /*
1648  * Determine the size of the resident part of the now non-resident
1649  * attribute record.
1650  */
1651  arec_size = (mp_ofs + mp_size + 7) & ~7;
1652  /*
1653  * If the page is not uptodate bring it uptodate by copying from the
1654  * attribute value.
1655  */
1656  attr_size = le32_to_cpu(a->data.resident.value_length);
1657  BUG_ON(attr_size != data_size);
1658  if (page && !PageUptodate(page)) {
1659  kaddr = kmap_atomic(page);
1660  memcpy(kaddr, (u8*)a +
1661  le16_to_cpu(a->data.resident.value_offset),
1662  attr_size);
1663  memset(kaddr + attr_size, 0, PAGE_CACHE_SIZE - attr_size);
1664  kunmap_atomic(kaddr);
1665  flush_dcache_page(page);
1666  SetPageUptodate(page);
1667  }
1668  /* Backup the attribute flag. */
1669  old_res_attr_flags = a->data.resident.flags;
1670  /* Resize the resident part of the attribute record. */
1671  err = ntfs_attr_record_resize(m, a, arec_size);
1672  if (unlikely(err))
1673  goto err_out;
1674  /*
1675  * Convert the resident part of the attribute record to describe a
1676  * non-resident attribute.
1677  */
1678  a->non_resident = 1;
1679  /* Move the attribute name if it exists and update the offset. */
1680  if (a->name_length)
1681  memmove((u8*)a + name_ofs, (u8*)a + le16_to_cpu(a->name_offset),
1682  a->name_length * sizeof(ntfschar));
1683  a->name_offset = cpu_to_le16(name_ofs);
1684  /* Setup the fields specific to non-resident attributes. */
1685  a->data.non_resident.lowest_vcn = 0;
1686  a->data.non_resident.highest_vcn = cpu_to_sle64((new_size - 1) >>
1687  vol->cluster_size_bits);
1688  a->data.non_resident.mapping_pairs_offset = cpu_to_le16(mp_ofs);
1689  memset(&a->data.non_resident.reserved, 0,
1690  sizeof(a->data.non_resident.reserved));
1691  a->data.non_resident.allocated_size = cpu_to_sle64(new_size);
1692  a->data.non_resident.data_size =
1693  a->data.non_resident.initialized_size =
1694  cpu_to_sle64(attr_size);
1695  if (NInoSparse(ni) || NInoCompressed(ni)) {
1696  a->data.non_resident.compression_unit = 0;
1697  if (NInoCompressed(ni) || vol->major_ver < 3)
1698  a->data.non_resident.compression_unit = 4;
1699  a->data.non_resident.compressed_size =
1700  a->data.non_resident.allocated_size;
1701  } else
1702  a->data.non_resident.compression_unit = 0;
1703  /* Generate the mapping pairs array into the attribute record. */
1704  err = ntfs_mapping_pairs_build(vol, (u8*)a + mp_ofs,
1705  arec_size - mp_ofs, rl, 0, -1, NULL);
1706  if (unlikely(err)) {
1707  ntfs_debug("Failed to build mapping pairs, error code %i.",
1708  err);
1709  goto undo_err_out;
1710  }
1711  /* Setup the in-memory attribute structure to be non-resident. */
1712  ni->runlist.rl = rl;
1713  write_lock_irqsave(&ni->size_lock, flags);
1714  ni->allocated_size = new_size;
1715  if (NInoSparse(ni) || NInoCompressed(ni)) {
1716  ni->itype.compressed.size = ni->allocated_size;
1717  if (a->data.non_resident.compression_unit) {
1718  ni->itype.compressed.block_size = 1U << (a->data.
1719  non_resident.compression_unit +
1720  vol->cluster_size_bits);
1721  ni->itype.compressed.block_size_bits =
1722  ffs(ni->itype.compressed.block_size) -
1723  1;
1724  ni->itype.compressed.block_clusters = 1U <<
1725  a->data.non_resident.compression_unit;
1726  } else {
1727  ni->itype.compressed.block_size = 0;
1728  ni->itype.compressed.block_size_bits = 0;
1729  ni->itype.compressed.block_clusters = 0;
1730  }
1731  vi->i_blocks = ni->itype.compressed.size >> 9;
1732  } else
1733  vi->i_blocks = ni->allocated_size >> 9;
1734  write_unlock_irqrestore(&ni->size_lock, flags);
1735  /*
1736  * This needs to be last since the address space operations ->readpage
1737  * and ->writepage can run concurrently with us as they are not
1738  * serialized on i_mutex. Note, we are not allowed to fail once we flip
1739  * this switch, which is another reason to do this last.
1740  */
1741  NInoSetNonResident(ni);
1742  /* Mark the mft record dirty, so it gets written back. */
1743  flush_dcache_mft_record_page(ctx->ntfs_ino);
1744  mark_mft_record_dirty(ctx->ntfs_ino);
1746  unmap_mft_record(base_ni);
1747  up_write(&ni->runlist.lock);
1748  if (page) {
1749  set_page_dirty(page);
1750  unlock_page(page);
1751  mark_page_accessed(page);
1752  page_cache_release(page);
1753  }
1754  ntfs_debug("Done.");
1755  return 0;
1756 undo_err_out:
1757  /* Convert the attribute back into a resident attribute. */
1758  a->non_resident = 0;
1759  /* Move the attribute name if it exists and update the offset. */
1760  name_ofs = (offsetof(ATTR_RECORD, data.resident.reserved) +
1761  sizeof(a->data.resident.reserved) + 7) & ~7;
1762  if (a->name_length)
1763  memmove((u8*)a + name_ofs, (u8*)a + le16_to_cpu(a->name_offset),
1764  a->name_length * sizeof(ntfschar));
1765  mp_ofs = (name_ofs + a->name_length * sizeof(ntfschar) + 7) & ~7;
1766  a->name_offset = cpu_to_le16(name_ofs);
1767  arec_size = (mp_ofs + attr_size + 7) & ~7;
1768  /* Resize the resident part of the attribute record. */
1769  err2 = ntfs_attr_record_resize(m, a, arec_size);
1770  if (unlikely(err2)) {
1771  /*
1772  * This cannot happen (well if memory corruption is at work it
1773  * could happen in theory), but deal with it as well as we can.
1774  * If the old size is too small, truncate the attribute,
1775  * otherwise simply give it a larger allocated size.
1776  * FIXME: Should check whether chkdsk complains when the
1777  * allocated size is much bigger than the resident value size.
1778  */
1779  arec_size = le32_to_cpu(a->length);
1780  if ((mp_ofs + attr_size) > arec_size) {
1781  err2 = attr_size;
1782  attr_size = arec_size - mp_ofs;
1783  ntfs_error(vol->sb, "Failed to undo partial resident "
1784  "to non-resident attribute "
1785  "conversion. Truncating inode 0x%lx, "
1786  "attribute type 0x%x from %i bytes to "
1787  "%i bytes to maintain metadata "
1788  "consistency. THIS MEANS YOU ARE "
1789  "LOSING %i BYTES DATA FROM THIS %s.",
1790  vi->i_ino,
1791  (unsigned)le32_to_cpu(ni->type),
1792  err2, attr_size, err2 - attr_size,
1793  ((ni->type == AT_DATA) &&
1794  !ni->name_len) ? "FILE": "ATTRIBUTE");
1795  write_lock_irqsave(&ni->size_lock, flags);
1796  ni->initialized_size = attr_size;
1797  i_size_write(vi, attr_size);
1798  write_unlock_irqrestore(&ni->size_lock, flags);
1799  }
1800  }
1801  /* Setup the fields specific to resident attributes. */
1802  a->data.resident.value_length = cpu_to_le32(attr_size);
1803  a->data.resident.value_offset = cpu_to_le16(mp_ofs);
1804  a->data.resident.flags = old_res_attr_flags;
1805  memset(&a->data.resident.reserved, 0,
1806  sizeof(a->data.resident.reserved));
1807  /* Copy the data from the page back to the attribute value. */
1808  if (page) {
1809  kaddr = kmap_atomic(page);
1810  memcpy((u8*)a + mp_ofs, kaddr, attr_size);
1811  kunmap_atomic(kaddr);
1812  }
1813  /* Setup the allocated size in the ntfs inode in case it changed. */
1814  write_lock_irqsave(&ni->size_lock, flags);
1815  ni->allocated_size = arec_size - mp_ofs;
1816  write_unlock_irqrestore(&ni->size_lock, flags);
1817  /* Mark the mft record dirty, so it gets written back. */
1818  flush_dcache_mft_record_page(ctx->ntfs_ino);
1819  mark_mft_record_dirty(ctx->ntfs_ino);
1820 err_out:
1821  if (ctx)
1823  if (m)
1824  unmap_mft_record(base_ni);
1825  ni->runlist.rl = NULL;
1826  up_write(&ni->runlist.lock);
1827 rl_err_out:
1828  if (rl) {
1829  if (ntfs_cluster_free_from_rl(vol, rl) < 0) {
1830  ntfs_error(vol->sb, "Failed to release allocated "
1831  "cluster(s) in error code path. Run "
1832  "chkdsk to recover the lost "
1833  "cluster(s).");
1834  NVolSetErrors(vol);
1835  }
1836  ntfs_free(rl);
1837 page_err_out:
1838  unlock_page(page);
1839  page_cache_release(page);
1840  }
1841  if (err == -EINVAL)
1842  err = -EIO;
1843  return err;
1844 }
1845 
1905 s64 ntfs_attr_extend_allocation(ntfs_inode *ni, s64 new_alloc_size,
1906  const s64 new_data_size, const s64 data_start)
1907 {
1908  VCN vcn;
1909  s64 ll, allocated_size, start = data_start;
1910  struct inode *vi = VFS_I(ni);
1911  ntfs_volume *vol = ni->vol;
1912  ntfs_inode *base_ni;
1913  MFT_RECORD *m;
1914  ATTR_RECORD *a;
1916  runlist_element *rl, *rl2;
1917  unsigned long flags;
1918  int err, mp_size;
1919  u32 attr_len = 0; /* Silence stupid gcc warning. */
1920  bool mp_rebuilt;
1921 
1922 #ifdef DEBUG
1923  read_lock_irqsave(&ni->size_lock, flags);
1924  allocated_size = ni->allocated_size;
1925  read_unlock_irqrestore(&ni->size_lock, flags);
1926  ntfs_debug("Entering for i_ino 0x%lx, attribute type 0x%x, "
1927  "old_allocated_size 0x%llx, "
1928  "new_allocated_size 0x%llx, new_data_size 0x%llx, "
1929  "data_start 0x%llx.", vi->i_ino,
1930  (unsigned)le32_to_cpu(ni->type),
1931  (unsigned long long)allocated_size,
1932  (unsigned long long)new_alloc_size,
1933  (unsigned long long)new_data_size,
1934  (unsigned long long)start);
1935 #endif
1936 retry_extend:
1937  /*
1938  * For non-resident attributes, @start and @new_size need to be aligned
1939  * to cluster boundaries for allocation purposes.
1940  */
1941  if (NInoNonResident(ni)) {
1942  if (start > 0)
1943  start &= ~(s64)vol->cluster_size_mask;
1944  new_alloc_size = (new_alloc_size + vol->cluster_size - 1) &
1945  ~(s64)vol->cluster_size_mask;
1946  }
1947  BUG_ON(new_data_size >= 0 && new_data_size > new_alloc_size);
1948  /* Check if new size is allowed in $AttrDef. */
1949  err = ntfs_attr_size_bounds_check(vol, ni->type, new_alloc_size);
1950  if (unlikely(err)) {
1951  /* Only emit errors when the write will fail completely. */
1952  read_lock_irqsave(&ni->size_lock, flags);
1953  allocated_size = ni->allocated_size;
1954  read_unlock_irqrestore(&ni->size_lock, flags);
1955  if (start < 0 || start >= allocated_size) {
1956  if (err == -ERANGE) {
1957  ntfs_error(vol->sb, "Cannot extend allocation "
1958  "of inode 0x%lx, attribute "
1959  "type 0x%x, because the new "
1960  "allocation would exceed the "
1961  "maximum allowed size for "
1962  "this attribute type.",
1963  vi->i_ino, (unsigned)
1964  le32_to_cpu(ni->type));
1965  } else {
1966  ntfs_error(vol->sb, "Cannot extend allocation "
1967  "of inode 0x%lx, attribute "
1968  "type 0x%x, because this "
1969  "attribute type is not "
1970  "defined on the NTFS volume. "
1971  "Possible corruption! You "
1972  "should run chkdsk!",
1973  vi->i_ino, (unsigned)
1974  le32_to_cpu(ni->type));
1975  }
1976  }
1977  /* Translate error code to be POSIX conformant for write(2). */
1978  if (err == -ERANGE)
1979  err = -EFBIG;
1980  else
1981  err = -EIO;
1982  return err;
1983  }
1984  if (!NInoAttr(ni))
1985  base_ni = ni;
1986  else
1987  base_ni = ni->ext.base_ntfs_ino;
1988  /*
1989  * We will be modifying both the runlist (if non-resident) and the mft
1990  * record so lock them both down.
1991  */
1992  down_write(&ni->runlist.lock);
1993  m = map_mft_record(base_ni);
1994  if (IS_ERR(m)) {
1995  err = PTR_ERR(m);
1996  m = NULL;
1997  ctx = NULL;
1998  goto err_out;
1999  }
2000  ctx = ntfs_attr_get_search_ctx(base_ni, m);
2001  if (unlikely(!ctx)) {
2002  err = -ENOMEM;
2003  goto err_out;
2004  }
2005  read_lock_irqsave(&ni->size_lock, flags);
2006  allocated_size = ni->allocated_size;
2007  read_unlock_irqrestore(&ni->size_lock, flags);
2008  /*
2009  * If non-resident, seek to the last extent. If resident, there is
2010  * only one extent, so seek to that.
2011  */
2012  vcn = NInoNonResident(ni) ? allocated_size >> vol->cluster_size_bits :
2013  0;
2014  /*
2015  * Abort if someone did the work whilst we waited for the locks. If we
2016  * just converted the attribute from resident to non-resident it is
2017  * likely that exactly this has happened already. We cannot quite
2018  * abort if we need to update the data size.
2019  */
2020  if (unlikely(new_alloc_size <= allocated_size)) {
2021  ntfs_debug("Allocated size already exceeds requested size.");
2022  new_alloc_size = allocated_size;
2023  if (new_data_size < 0)
2024  goto done;
2025  /*
2026  * We want the first attribute extent so that we can update the
2027  * data size.
2028  */
2029  vcn = 0;
2030  }
2031  err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
2032  CASE_SENSITIVE, vcn, NULL, 0, ctx);
2033  if (unlikely(err)) {
2034  if (err == -ENOENT)
2035  err = -EIO;
2036  goto err_out;
2037  }
2038  m = ctx->mrec;
2039  a = ctx->attr;
2040  /* Use goto to reduce indentation. */
2041  if (a->non_resident)
2042  goto do_non_resident_extend;
2043  BUG_ON(NInoNonResident(ni));
2044  /* The total length of the attribute value. */
2045  attr_len = le32_to_cpu(a->data.resident.value_length);
2046  /*
2047  * Extend the attribute record to be able to store the new attribute
2048  * size. ntfs_attr_record_resize() will not do anything if the size is
2049  * not changing.
2050  */
2051  if (new_alloc_size < vol->mft_record_size &&
2052  !ntfs_attr_record_resize(m, a,
2053  le16_to_cpu(a->data.resident.value_offset) +
2054  new_alloc_size)) {
2055  /* The resize succeeded! */
2056  write_lock_irqsave(&ni->size_lock, flags);
2057  ni->allocated_size = le32_to_cpu(a->length) -
2058  le16_to_cpu(a->data.resident.value_offset);
2059  write_unlock_irqrestore(&ni->size_lock, flags);
2060  if (new_data_size >= 0) {
2061  BUG_ON(new_data_size < attr_len);
2062  a->data.resident.value_length =
2063  cpu_to_le32((u32)new_data_size);
2064  }
2065  goto flush_done;
2066  }
2067  /*
2068  * We have to drop all the locks so we can call
2069  * ntfs_attr_make_non_resident(). This could be optimised by try-
2070  * locking the first page cache page and only if that fails dropping
2071  * the locks, locking the page, and redoing all the locking and
2072  * lookups. While this would be a huge optimisation, it is not worth
2073  * it as this is definitely a slow code path.
2074  */
2076  unmap_mft_record(base_ni);
2077  up_write(&ni->runlist.lock);
2078  /*
2079  * Not enough space in the mft record, try to make the attribute
2080  * non-resident and if successful restart the extension process.
2081  */
2082  err = ntfs_attr_make_non_resident(ni, attr_len);
2083  if (likely(!err))
2084  goto retry_extend;
2085  /*
2086  * Could not make non-resident. If this is due to this not being
2087  * permitted for this attribute type or there not being enough space,
2088  * try to make other attributes non-resident. Otherwise fail.
2089  */
2090  if (unlikely(err != -EPERM && err != -ENOSPC)) {
2091  /* Only emit errors when the write will fail completely. */
2092  read_lock_irqsave(&ni->size_lock, flags);
2093  allocated_size = ni->allocated_size;
2094  read_unlock_irqrestore(&ni->size_lock, flags);
2095  if (start < 0 || start >= allocated_size)
2096  ntfs_error(vol->sb, "Cannot extend allocation of "
2097  "inode 0x%lx, attribute type 0x%x, "
2098  "because the conversion from resident "
2099  "to non-resident attribute failed "
2100  "with error code %i.", vi->i_ino,
2101  (unsigned)le32_to_cpu(ni->type), err);
2102  if (err != -ENOMEM)
2103  err = -EIO;
2104  goto conv_err_out;
2105  }
2106  /* TODO: Not implemented from here, abort. */
2107  read_lock_irqsave(&ni->size_lock, flags);
2108  allocated_size = ni->allocated_size;
2109  read_unlock_irqrestore(&ni->size_lock, flags);
2110  if (start < 0 || start >= allocated_size) {
2111  if (err == -ENOSPC)
2112  ntfs_error(vol->sb, "Not enough space in the mft "
2113  "record/on disk for the non-resident "
2114  "attribute value. This case is not "
2115  "implemented yet.");
2116  else /* if (err == -EPERM) */
2117  ntfs_error(vol->sb, "This attribute type may not be "
2118  "non-resident. This case is not "
2119  "implemented yet.");
2120  }
2121  err = -EOPNOTSUPP;
2122  goto conv_err_out;
2123 #if 0
2124  // TODO: Attempt to make other attributes non-resident.
2125  if (!err)
2126  goto do_resident_extend;
2127  /*
2128  * Both the attribute list attribute and the standard information
2129  * attribute must remain in the base inode. Thus, if this is one of
2130  * these attributes, we have to try to move other attributes out into
2131  * extent mft records instead.
2132  */
2133  if (ni->type == AT_ATTRIBUTE_LIST ||
2134  ni->type == AT_STANDARD_INFORMATION) {
2135  // TODO: Attempt to move other attributes into extent mft
2136  // records.
2137  err = -EOPNOTSUPP;
2138  if (!err)
2139  goto do_resident_extend;
2140  goto err_out;
2141  }
2142  // TODO: Attempt to move this attribute to an extent mft record, but
2143  // only if it is not already the only attribute in an mft record in
2144  // which case there would be nothing to gain.
2145  err = -EOPNOTSUPP;
2146  if (!err)
2147  goto do_resident_extend;
2148  /* There is nothing we can do to make enough space. )-: */
2149  goto err_out;
2150 #endif
2151 do_non_resident_extend:
2152  BUG_ON(!NInoNonResident(ni));
2153  if (new_alloc_size == allocated_size) {
2154  BUG_ON(vcn);
2155  goto alloc_done;
2156  }
2157  /*
2158  * If the data starts after the end of the old allocation, this is a
2159  * $DATA attribute and sparse attributes are enabled on the volume and
2160  * for this inode, then create a sparse region between the old
2161  * allocated size and the start of the data. Otherwise simply proceed
2162  * with filling the whole space between the old allocated size and the
2163  * new allocated size with clusters.
2164  */
2165  if ((start >= 0 && start <= allocated_size) || ni->type != AT_DATA ||
2166  !NVolSparseEnabled(vol) || NInoSparseDisabled(ni))
2167  goto skip_sparse;
2168  // TODO: This is not implemented yet. We just fill in with real
2169  // clusters for now...
2170  ntfs_debug("Inserting holes is not-implemented yet. Falling back to "
2171  "allocating real clusters instead.");
2172 skip_sparse:
2173  rl = ni->runlist.rl;
2174  if (likely(rl)) {
2175  /* Seek to the end of the runlist. */
2176  while (rl->length)
2177  rl++;
2178  }
2179  /* If this attribute extent is not mapped, map it now. */
2180  if (unlikely(!rl || rl->lcn == LCN_RL_NOT_MAPPED ||
2181  (rl->lcn == LCN_ENOENT && rl > ni->runlist.rl &&
2182  (rl-1)->lcn == LCN_RL_NOT_MAPPED))) {
2183  if (!rl && !allocated_size)
2184  goto first_alloc;
2185  rl = ntfs_mapping_pairs_decompress(vol, a, ni->runlist.rl);
2186  if (IS_ERR(rl)) {
2187  err = PTR_ERR(rl);
2188  if (start < 0 || start >= allocated_size)
2189  ntfs_error(vol->sb, "Cannot extend allocation "
2190  "of inode 0x%lx, attribute "
2191  "type 0x%x, because the "
2192  "mapping of a runlist "
2193  "fragment failed with error "
2194  "code %i.", vi->i_ino,
2195  (unsigned)le32_to_cpu(ni->type),
2196  err);
2197  if (err != -ENOMEM)
2198  err = -EIO;
2199  goto err_out;
2200  }
2201  ni->runlist.rl = rl;
2202  /* Seek to the end of the runlist. */
2203  while (rl->length)
2204  rl++;
2205  }
2206  /*
2207  * We now know the runlist of the last extent is mapped and @rl is at
2208  * the end of the runlist. We want to begin allocating clusters
2209  * starting at the last allocated cluster to reduce fragmentation. If
2210  * there are no valid LCNs in the attribute we let the cluster
2211  * allocator choose the starting cluster.
2212  */
2213  /* If the last LCN is a hole or simillar seek back to last real LCN. */
2214  while (rl->lcn < 0 && rl > ni->runlist.rl)
2215  rl--;
2216 first_alloc:
2217  // FIXME: Need to implement partial allocations so at least part of the
2218  // write can be performed when start >= 0. (Needed for POSIX write(2)
2219  // conformance.)
2220  rl2 = ntfs_cluster_alloc(vol, allocated_size >> vol->cluster_size_bits,
2221  (new_alloc_size - allocated_size) >>
2222  vol->cluster_size_bits, (rl && (rl->lcn >= 0)) ?
2223  rl->lcn + rl->length : -1, DATA_ZONE, true);
2224  if (IS_ERR(rl2)) {
2225  err = PTR_ERR(rl2);
2226  if (start < 0 || start >= allocated_size)
2227  ntfs_error(vol->sb, "Cannot extend allocation of "
2228  "inode 0x%lx, attribute type 0x%x, "
2229  "because the allocation of clusters "
2230  "failed with error code %i.", vi->i_ino,
2231  (unsigned)le32_to_cpu(ni->type), err);
2232  if (err != -ENOMEM && err != -ENOSPC)
2233  err = -EIO;
2234  goto err_out;
2235  }
2236  rl = ntfs_runlists_merge(ni->runlist.rl, rl2);
2237  if (IS_ERR(rl)) {
2238  err = PTR_ERR(rl);
2239  if (start < 0 || start >= allocated_size)
2240  ntfs_error(vol->sb, "Cannot extend allocation of "
2241  "inode 0x%lx, attribute type 0x%x, "
2242  "because the runlist merge failed "
2243  "with error code %i.", vi->i_ino,
2244  (unsigned)le32_to_cpu(ni->type), err);
2245  if (err != -ENOMEM)
2246  err = -EIO;
2247  if (ntfs_cluster_free_from_rl(vol, rl2)) {
2248  ntfs_error(vol->sb, "Failed to release allocated "
2249  "cluster(s) in error code path. Run "
2250  "chkdsk to recover the lost "
2251  "cluster(s).");
2252  NVolSetErrors(vol);
2253  }
2254  ntfs_free(rl2);
2255  goto err_out;
2256  }
2257  ni->runlist.rl = rl;
2258  ntfs_debug("Allocated 0x%llx clusters.", (long long)(new_alloc_size -
2259  allocated_size) >> vol->cluster_size_bits);
2260  /* Find the runlist element with which the attribute extent starts. */
2261  ll = sle64_to_cpu(a->data.non_resident.lowest_vcn);
2262  rl2 = ntfs_rl_find_vcn_nolock(rl, ll);
2263  BUG_ON(!rl2);
2264  BUG_ON(!rl2->length);
2265  BUG_ON(rl2->lcn < LCN_HOLE);
2266  mp_rebuilt = false;
2267  /* Get the size for the new mapping pairs array for this extent. */
2268  mp_size = ntfs_get_size_for_mapping_pairs(vol, rl2, ll, -1);
2269  if (unlikely(mp_size <= 0)) {
2270  err = mp_size;
2271  if (start < 0 || start >= allocated_size)
2272  ntfs_error(vol->sb, "Cannot extend allocation of "
2273  "inode 0x%lx, attribute type 0x%x, "
2274  "because determining the size for the "
2275  "mapping pairs failed with error code "
2276  "%i.", vi->i_ino,
2277  (unsigned)le32_to_cpu(ni->type), err);
2278  err = -EIO;
2279  goto undo_alloc;
2280  }
2281  /* Extend the attribute record to fit the bigger mapping pairs array. */
2282  attr_len = le32_to_cpu(a->length);
2283  err = ntfs_attr_record_resize(m, a, mp_size +
2284  le16_to_cpu(a->data.non_resident.mapping_pairs_offset));
2285  if (unlikely(err)) {
2286  BUG_ON(err != -ENOSPC);
2287  // TODO: Deal with this by moving this extent to a new mft
2288  // record or by starting a new extent in a new mft record,
2289  // possibly by extending this extent partially and filling it
2290  // and creating a new extent for the remainder, or by making
2291  // other attributes non-resident and/or by moving other
2292  // attributes out of this mft record.
2293  if (start < 0 || start >= allocated_size)
2294  ntfs_error(vol->sb, "Not enough space in the mft "
2295  "record for the extended attribute "
2296  "record. This case is not "
2297  "implemented yet.");
2298  err = -EOPNOTSUPP;
2299  goto undo_alloc;
2300  }
2301  mp_rebuilt = true;
2302  /* Generate the mapping pairs array directly into the attr record. */
2303  err = ntfs_mapping_pairs_build(vol, (u8*)a +
2304  le16_to_cpu(a->data.non_resident.mapping_pairs_offset),
2305  mp_size, rl2, ll, -1, NULL);
2306  if (unlikely(err)) {
2307  if (start < 0 || start >= allocated_size)
2308  ntfs_error(vol->sb, "Cannot extend allocation of "
2309  "inode 0x%lx, attribute type 0x%x, "
2310  "because building the mapping pairs "
2311  "failed with error code %i.", vi->i_ino,
2312  (unsigned)le32_to_cpu(ni->type), err);
2313  err = -EIO;
2314  goto undo_alloc;
2315  }
2316  /* Update the highest_vcn. */
2317  a->data.non_resident.highest_vcn = cpu_to_sle64((new_alloc_size >>
2318  vol->cluster_size_bits) - 1);
2319  /*
2320  * We now have extended the allocated size of the attribute. Reflect
2321  * this in the ntfs_inode structure and the attribute record.
2322  */
2323  if (a->data.non_resident.lowest_vcn) {
2324  /*
2325  * We are not in the first attribute extent, switch to it, but
2326  * first ensure the changes will make it to disk later.
2327  */
2328  flush_dcache_mft_record_page(ctx->ntfs_ino);
2329  mark_mft_record_dirty(ctx->ntfs_ino);
2331  err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
2332  CASE_SENSITIVE, 0, NULL, 0, ctx);
2333  if (unlikely(err))
2334  goto restore_undo_alloc;
2335  /* @m is not used any more so no need to set it. */
2336  a = ctx->attr;
2337  }
2338  write_lock_irqsave(&ni->size_lock, flags);
2339  ni->allocated_size = new_alloc_size;
2340  a->data.non_resident.allocated_size = cpu_to_sle64(new_alloc_size);
2341  /*
2342  * FIXME: This would fail if @ni is a directory, $MFT, or an index,
2343  * since those can have sparse/compressed set. For example can be
2344  * set compressed even though it is not compressed itself and in that
2345  * case the bit means that files are to be created compressed in the
2346  * directory... At present this is ok as this code is only called for
2347  * regular files, and only for their $DATA attribute(s).
2348  * FIXME: The calculation is wrong if we created a hole above. For now
2349  * it does not matter as we never create holes.
2350  */
2351  if (NInoSparse(ni) || NInoCompressed(ni)) {
2352  ni->itype.compressed.size += new_alloc_size - allocated_size;
2353  a->data.non_resident.compressed_size =
2354  cpu_to_sle64(ni->itype.compressed.size);
2355  vi->i_blocks = ni->itype.compressed.size >> 9;
2356  } else
2357  vi->i_blocks = new_alloc_size >> 9;
2358  write_unlock_irqrestore(&ni->size_lock, flags);
2359 alloc_done:
2360  if (new_data_size >= 0) {
2361  BUG_ON(new_data_size <
2362  sle64_to_cpu(a->data.non_resident.data_size));
2363  a->data.non_resident.data_size = cpu_to_sle64(new_data_size);
2364  }
2365 flush_done:
2366  /* Ensure the changes make it to disk. */
2367  flush_dcache_mft_record_page(ctx->ntfs_ino);
2368  mark_mft_record_dirty(ctx->ntfs_ino);
2369 done:
2371  unmap_mft_record(base_ni);
2372  up_write(&ni->runlist.lock);
2373  ntfs_debug("Done, new_allocated_size 0x%llx.",
2374  (unsigned long long)new_alloc_size);
2375  return new_alloc_size;
2376 restore_undo_alloc:
2377  if (start < 0 || start >= allocated_size)
2378  ntfs_error(vol->sb, "Cannot complete extension of allocation "
2379  "of inode 0x%lx, attribute type 0x%x, because "
2380  "lookup of first attribute extent failed with "
2381  "error code %i.", vi->i_ino,
2382  (unsigned)le32_to_cpu(ni->type), err);
2383  if (err == -ENOENT)
2384  err = -EIO;
2386  if (ntfs_attr_lookup(ni->type, ni->name, ni->name_len, CASE_SENSITIVE,
2387  allocated_size >> vol->cluster_size_bits, NULL, 0,
2388  ctx)) {
2389  ntfs_error(vol->sb, "Failed to find last attribute extent of "
2390  "attribute in error code path. Run chkdsk to "
2391  "recover.");
2392  write_lock_irqsave(&ni->size_lock, flags);
2393  ni->allocated_size = new_alloc_size;
2394  /*
2395  * FIXME: This would fail if @ni is a directory... See above.
2396  * FIXME: The calculation is wrong if we created a hole above.
2397  * For now it does not matter as we never create holes.
2398  */
2399  if (NInoSparse(ni) || NInoCompressed(ni)) {
2400  ni->itype.compressed.size += new_alloc_size -
2402  vi->i_blocks = ni->itype.compressed.size >> 9;
2403  } else
2404  vi->i_blocks = new_alloc_size >> 9;
2405  write_unlock_irqrestore(&ni->size_lock, flags);
2407  unmap_mft_record(base_ni);
2408  up_write(&ni->runlist.lock);
2409  /*
2410  * The only thing that is now wrong is the allocated size of the
2411  * base attribute extent which chkdsk should be able to fix.
2412  */
2413  NVolSetErrors(vol);
2414  return err;
2415  }
2416  ctx->attr->data.non_resident.highest_vcn = cpu_to_sle64(
2417  (allocated_size >> vol->cluster_size_bits) - 1);
2418 undo_alloc:
2419  ll = allocated_size >> vol->cluster_size_bits;
2420  if (ntfs_cluster_free(ni, ll, -1, ctx) < 0) {
2421  ntfs_error(vol->sb, "Failed to release allocated cluster(s) "
2422  "in error code path. Run chkdsk to recover "
2423  "the lost cluster(s).");
2424  NVolSetErrors(vol);
2425  }
2426  m = ctx->mrec;
2427  a = ctx->attr;
2428  /*
2429  * If the runlist truncation fails and/or the search context is no
2430  * longer valid, we cannot resize the attribute record or build the
2431  * mapping pairs array thus we mark the inode bad so that no access to
2432  * the freed clusters can happen.
2433  */
2434  if (ntfs_rl_truncate_nolock(vol, &ni->runlist, ll) || IS_ERR(m)) {
2435  ntfs_error(vol->sb, "Failed to %s in error code path. Run "
2436  "chkdsk to recover.", IS_ERR(m) ?
2437  "restore attribute search context" :
2438  "truncate attribute runlist");
2439  NVolSetErrors(vol);
2440  } else if (mp_rebuilt) {
2441  if (ntfs_attr_record_resize(m, a, attr_len)) {
2442  ntfs_error(vol->sb, "Failed to restore attribute "
2443  "record in error code path. Run "
2444  "chkdsk to recover.");
2445  NVolSetErrors(vol);
2446  } else /* if (success) */ {
2447  if (ntfs_mapping_pairs_build(vol, (u8*)a + le16_to_cpu(
2448  a->data.non_resident.
2449  mapping_pairs_offset), attr_len -
2450  le16_to_cpu(a->data.non_resident.
2451  mapping_pairs_offset), rl2, ll, -1,
2452  NULL)) {
2453  ntfs_error(vol->sb, "Failed to restore "
2454  "mapping pairs array in error "
2455  "code path. Run chkdsk to "
2456  "recover.");
2457  NVolSetErrors(vol);
2458  }
2459  flush_dcache_mft_record_page(ctx->ntfs_ino);
2460  mark_mft_record_dirty(ctx->ntfs_ino);
2461  }
2462  }
2463 err_out:
2464  if (ctx)
2466  if (m)
2467  unmap_mft_record(base_ni);
2468  up_write(&ni->runlist.lock);
2469 conv_err_out:
2470  ntfs_debug("Failed. Returning error code %i.", err);
2471  return err;
2472 }
2473 
2494 int ntfs_attr_set(ntfs_inode *ni, const s64 ofs, const s64 cnt, const u8 val)
2495 {
2496  ntfs_volume *vol = ni->vol;
2497  struct address_space *mapping;
2498  struct page *page;
2499  u8 *kaddr;
2500  pgoff_t idx, end;
2501  unsigned start_ofs, end_ofs, size;
2502 
2503  ntfs_debug("Entering for ofs 0x%llx, cnt 0x%llx, val 0x%hx.",
2504  (long long)ofs, (long long)cnt, val);
2505  BUG_ON(ofs < 0);
2506  BUG_ON(cnt < 0);
2507  if (!cnt)
2508  goto done;
2509  /*
2510  * FIXME: Compressed and encrypted attributes are not supported when
2511  * writing and we should never have gotten here for them.
2512  */
2513  BUG_ON(NInoCompressed(ni));
2514  BUG_ON(NInoEncrypted(ni));
2515  mapping = VFS_I(ni)->i_mapping;
2516  /* Work out the starting index and page offset. */
2517  idx = ofs >> PAGE_CACHE_SHIFT;
2518  start_ofs = ofs & ~PAGE_CACHE_MASK;
2519  /* Work out the ending index and page offset. */
2520  end = ofs + cnt;
2521  end_ofs = end & ~PAGE_CACHE_MASK;
2522  /* If the end is outside the inode size return -ESPIPE. */
2523  if (unlikely(end > i_size_read(VFS_I(ni)))) {
2524  ntfs_error(vol->sb, "Request exceeds end of attribute.");
2525  return -ESPIPE;
2526  }
2527  end >>= PAGE_CACHE_SHIFT;
2528  /* If there is a first partial page, need to do it the slow way. */
2529  if (start_ofs) {
2530  page = read_mapping_page(mapping, idx, NULL);
2531  if (IS_ERR(page)) {
2532  ntfs_error(vol->sb, "Failed to read first partial "
2533  "page (error, index 0x%lx).", idx);
2534  return PTR_ERR(page);
2535  }
2536  /*
2537  * If the last page is the same as the first page, need to
2538  * limit the write to the end offset.
2539  */
2540  size = PAGE_CACHE_SIZE;
2541  if (idx == end)
2542  size = end_ofs;
2543  kaddr = kmap_atomic(page);
2544  memset(kaddr + start_ofs, val, size - start_ofs);
2545  flush_dcache_page(page);
2546  kunmap_atomic(kaddr);
2547  set_page_dirty(page);
2548  page_cache_release(page);
2549  balance_dirty_pages_ratelimited(mapping);
2550  cond_resched();
2551  if (idx == end)
2552  goto done;
2553  idx++;
2554  }
2555  /* Do the whole pages the fast way. */
2556  for (; idx < end; idx++) {
2557  /* Find or create the current page. (The page is locked.) */
2558  page = grab_cache_page(mapping, idx);
2559  if (unlikely(!page)) {
2560  ntfs_error(vol->sb, "Insufficient memory to grab "
2561  "page (index 0x%lx).", idx);
2562  return -ENOMEM;
2563  }
2564  kaddr = kmap_atomic(page);
2565  memset(kaddr, val, PAGE_CACHE_SIZE);
2566  flush_dcache_page(page);
2567  kunmap_atomic(kaddr);
2568  /*
2569  * If the page has buffers, mark them uptodate since buffer
2570  * state and not page state is definitive in 2.6 kernels.
2571  */
2572  if (page_has_buffers(page)) {
2573  struct buffer_head *bh, *head;
2574 
2575  bh = head = page_buffers(page);
2576  do {
2577  set_buffer_uptodate(bh);
2578  } while ((bh = bh->b_this_page) != head);
2579  }
2580  /* Now that buffers are uptodate, set the page uptodate, too. */
2581  SetPageUptodate(page);
2582  /*
2583  * Set the page and all its buffers dirty and mark the inode
2584  * dirty, too. The VM will write the page later on.
2585  */
2586  set_page_dirty(page);
2587  /* Finally unlock and release the page. */
2588  unlock_page(page);
2589  page_cache_release(page);
2590  balance_dirty_pages_ratelimited(mapping);
2591  cond_resched();
2592  }
2593  /* If there is a last partial page, need to do it the slow way. */
2594  if (end_ofs) {
2595  page = read_mapping_page(mapping, idx, NULL);
2596  if (IS_ERR(page)) {
2597  ntfs_error(vol->sb, "Failed to read last partial page "
2598  "(error, index 0x%lx).", idx);
2599  return PTR_ERR(page);
2600  }
2601  kaddr = kmap_atomic(page);
2602  memset(kaddr, val, end_ofs);
2603  flush_dcache_page(page);
2604  kunmap_atomic(kaddr);
2605  set_page_dirty(page);
2606  page_cache_release(page);
2607  balance_dirty_pages_ratelimited(mapping);
2608  cond_resched();
2609  }
2610 done:
2611  ntfs_debug("Done.");
2612  return 0;
2613 }
2614 
2615 #endif /* NTFS_RW */