Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
xattr.c
Go to the documentation of this file.
1 /*
2  * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3  * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
4  *
5  * This copyrighted material is made available to anyone wishing to use,
6  * modify, copy, or redistribute it subject to the terms and conditions
7  * of the GNU General Public License version 2.
8  */
9 
10 #include <linux/slab.h>
11 #include <linux/spinlock.h>
12 #include <linux/completion.h>
13 #include <linux/buffer_head.h>
14 #include <linux/xattr.h>
15 #include <linux/gfs2_ondisk.h>
16 #include <asm/uaccess.h>
17 
18 #include "gfs2.h"
19 #include "incore.h"
20 #include "acl.h"
21 #include "xattr.h"
22 #include "glock.h"
23 #include "inode.h"
24 #include "meta_io.h"
25 #include "quota.h"
26 #include "rgrp.h"
27 #include "trans.h"
28 #include "util.h"
29 
40 static int ea_calc_size(struct gfs2_sbd *sdp, unsigned int nsize, size_t dsize,
41  unsigned int *size)
42 {
43  unsigned int jbsize = sdp->sd_jbsize;
44 
45  /* Stuffed */
46  *size = ALIGN(sizeof(struct gfs2_ea_header) + nsize + dsize, 8);
47 
48  if (*size <= jbsize)
49  return 1;
50 
51  /* Unstuffed */
52  *size = ALIGN(sizeof(struct gfs2_ea_header) + nsize +
53  (sizeof(__be64) * DIV_ROUND_UP(dsize, jbsize)), 8);
54 
55  return 0;
56 }
57 
58 static int ea_check_size(struct gfs2_sbd *sdp, unsigned int nsize, size_t dsize)
59 {
60  unsigned int size;
61 
62  if (dsize > GFS2_EA_MAX_DATA_LEN)
63  return -ERANGE;
64 
65  ea_calc_size(sdp, nsize, dsize, &size);
66 
67  /* This can only happen with 512 byte blocks */
68  if (size > sdp->sd_jbsize)
69  return -ERANGE;
70 
71  return 0;
72 }
73 
74 typedef int (*ea_call_t) (struct gfs2_inode *ip, struct buffer_head *bh,
75  struct gfs2_ea_header *ea,
76  struct gfs2_ea_header *prev, void *private);
77 
78 static int ea_foreach_i(struct gfs2_inode *ip, struct buffer_head *bh,
79  ea_call_t ea_call, void *data)
80 {
81  struct gfs2_ea_header *ea, *prev = NULL;
82  int error = 0;
83 
84  if (gfs2_metatype_check(GFS2_SB(&ip->i_inode), bh, GFS2_METATYPE_EA))
85  return -EIO;
86 
87  for (ea = GFS2_EA_BH2FIRST(bh);; prev = ea, ea = GFS2_EA2NEXT(ea)) {
88  if (!GFS2_EA_REC_LEN(ea))
89  goto fail;
90  if (!(bh->b_data <= (char *)ea && (char *)GFS2_EA2NEXT(ea) <=
91  bh->b_data + bh->b_size))
92  goto fail;
93  if (!GFS2_EATYPE_VALID(ea->ea_type))
94  goto fail;
95 
96  error = ea_call(ip, bh, ea, prev, data);
97  if (error)
98  return error;
99 
100  if (GFS2_EA_IS_LAST(ea)) {
101  if ((char *)GFS2_EA2NEXT(ea) !=
102  bh->b_data + bh->b_size)
103  goto fail;
104  break;
105  }
106  }
107 
108  return error;
109 
110 fail:
111  gfs2_consist_inode(ip);
112  return -EIO;
113 }
114 
115 static int ea_foreach(struct gfs2_inode *ip, ea_call_t ea_call, void *data)
116 {
117  struct buffer_head *bh, *eabh;
118  __be64 *eablk, *end;
119  int error;
120 
121  error = gfs2_meta_read(ip->i_gl, ip->i_eattr, DIO_WAIT, &bh);
122  if (error)
123  return error;
124 
125  if (!(ip->i_diskflags & GFS2_DIF_EA_INDIRECT)) {
126  error = ea_foreach_i(ip, bh, ea_call, data);
127  goto out;
128  }
129 
130  if (gfs2_metatype_check(GFS2_SB(&ip->i_inode), bh, GFS2_METATYPE_IN)) {
131  error = -EIO;
132  goto out;
133  }
134 
135  eablk = (__be64 *)(bh->b_data + sizeof(struct gfs2_meta_header));
136  end = eablk + GFS2_SB(&ip->i_inode)->sd_inptrs;
137 
138  for (; eablk < end; eablk++) {
139  u64 bn;
140 
141  if (!*eablk)
142  break;
143  bn = be64_to_cpu(*eablk);
144 
145  error = gfs2_meta_read(ip->i_gl, bn, DIO_WAIT, &eabh);
146  if (error)
147  break;
148  error = ea_foreach_i(ip, eabh, ea_call, data);
149  brelse(eabh);
150  if (error)
151  break;
152  }
153 out:
154  brelse(bh);
155  return error;
156 }
157 
158 struct ea_find {
159  int type;
160  const char *name;
161  size_t namel;
163 };
164 
165 static int ea_find_i(struct gfs2_inode *ip, struct buffer_head *bh,
166  struct gfs2_ea_header *ea, struct gfs2_ea_header *prev,
167  void *private)
168 {
169  struct ea_find *ef = private;
170 
171  if (ea->ea_type == GFS2_EATYPE_UNUSED)
172  return 0;
173 
174  if (ea->ea_type == ef->type) {
175  if (ea->ea_name_len == ef->namel &&
176  !memcmp(GFS2_EA2NAME(ea), ef->name, ea->ea_name_len)) {
177  struct gfs2_ea_location *el = ef->ef_el;
178  get_bh(bh);
179  el->el_bh = bh;
180  el->el_ea = ea;
181  el->el_prev = prev;
182  return 1;
183  }
184  }
185 
186  return 0;
187 }
188 
189 static int gfs2_ea_find(struct gfs2_inode *ip, int type, const char *name,
190  struct gfs2_ea_location *el)
191 {
192  struct ea_find ef;
193  int error;
194 
195  ef.type = type;
196  ef.name = name;
197  ef.namel = strlen(name);
198  ef.ef_el = el;
199 
200  memset(el, 0, sizeof(struct gfs2_ea_location));
201 
202  error = ea_foreach(ip, ea_find_i, &ef);
203  if (error > 0)
204  return 0;
205 
206  return error;
207 }
208 
224 static int ea_dealloc_unstuffed(struct gfs2_inode *ip, struct buffer_head *bh,
225  struct gfs2_ea_header *ea,
226  struct gfs2_ea_header *prev, void *private)
227 {
228  int *leave = private;
229  struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
230  struct gfs2_rgrpd *rgd;
231  struct gfs2_holder rg_gh;
232  struct buffer_head *dibh;
233  __be64 *dataptrs;
234  u64 bn = 0;
235  u64 bstart = 0;
236  unsigned int blen = 0;
237  unsigned int blks = 0;
238  unsigned int x;
239  int error;
240 
241  error = gfs2_rindex_update(sdp);
242  if (error)
243  return error;
244 
245  if (GFS2_EA_IS_STUFFED(ea))
246  return 0;
247 
248  dataptrs = GFS2_EA2DATAPTRS(ea);
249  for (x = 0; x < ea->ea_num_ptrs; x++, dataptrs++) {
250  if (*dataptrs) {
251  blks++;
252  bn = be64_to_cpu(*dataptrs);
253  }
254  }
255  if (!blks)
256  return 0;
257 
258  rgd = gfs2_blk2rgrpd(sdp, bn, 1);
259  if (!rgd) {
260  gfs2_consist_inode(ip);
261  return -EIO;
262  }
263 
264  error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, &rg_gh);
265  if (error)
266  return error;
267 
268  error = gfs2_trans_begin(sdp, rgd->rd_length + RES_DINODE +
269  RES_EATTR + RES_STATFS + RES_QUOTA, blks);
270  if (error)
271  goto out_gunlock;
272 
273  gfs2_trans_add_bh(ip->i_gl, bh, 1);
274 
275  dataptrs = GFS2_EA2DATAPTRS(ea);
276  for (x = 0; x < ea->ea_num_ptrs; x++, dataptrs++) {
277  if (!*dataptrs)
278  break;
279  bn = be64_to_cpu(*dataptrs);
280 
281  if (bstart + blen == bn)
282  blen++;
283  else {
284  if (bstart)
285  gfs2_free_meta(ip, bstart, blen);
286  bstart = bn;
287  blen = 1;
288  }
289 
290  *dataptrs = 0;
291  gfs2_add_inode_blocks(&ip->i_inode, -1);
292  }
293  if (bstart)
294  gfs2_free_meta(ip, bstart, blen);
295 
296  if (prev && !leave) {
297  u32 len;
298 
299  len = GFS2_EA_REC_LEN(prev) + GFS2_EA_REC_LEN(ea);
300  prev->ea_rec_len = cpu_to_be32(len);
301 
302  if (GFS2_EA_IS_LAST(ea))
303  prev->ea_flags |= GFS2_EAFLAG_LAST;
304  } else {
306  ea->ea_num_ptrs = 0;
307  }
308 
309  error = gfs2_meta_inode_buffer(ip, &dibh);
310  if (!error) {
311  ip->i_inode.i_ctime = CURRENT_TIME;
312  gfs2_trans_add_bh(ip->i_gl, dibh, 1);
313  gfs2_dinode_out(ip, dibh->b_data);
314  brelse(dibh);
315  }
316 
317  gfs2_trans_end(sdp);
318 
319 out_gunlock:
320  gfs2_glock_dq_uninit(&rg_gh);
321  return error;
322 }
323 
324 static int ea_remove_unstuffed(struct gfs2_inode *ip, struct buffer_head *bh,
325  struct gfs2_ea_header *ea,
326  struct gfs2_ea_header *prev, int leave)
327 {
328  int error;
329 
330  error = gfs2_rindex_update(GFS2_SB(&ip->i_inode));
331  if (error)
332  return error;
333 
335  if (error)
336  goto out_alloc;
337 
338  error = ea_dealloc_unstuffed(ip, bh, ea, prev, (leave) ? &error : NULL);
339 
340  gfs2_quota_unhold(ip);
341 out_alloc:
342  return error;
343 }
344 
345 struct ea_list {
347  unsigned int ei_size;
348 };
349 
350 static inline unsigned int gfs2_ea_strlen(struct gfs2_ea_header *ea)
351 {
352  switch (ea->ea_type) {
353  case GFS2_EATYPE_USR:
354  return 5 + ea->ea_name_len + 1;
355  case GFS2_EATYPE_SYS:
356  return 7 + ea->ea_name_len + 1;
358  return 9 + ea->ea_name_len + 1;
359  default:
360  return 0;
361  }
362 }
363 
364 static int ea_list_i(struct gfs2_inode *ip, struct buffer_head *bh,
365  struct gfs2_ea_header *ea, struct gfs2_ea_header *prev,
366  void *private)
367 {
368  struct ea_list *ei = private;
369  struct gfs2_ea_request *er = ei->ei_er;
370  unsigned int ea_size = gfs2_ea_strlen(ea);
371 
372  if (ea->ea_type == GFS2_EATYPE_UNUSED)
373  return 0;
374 
375  if (er->er_data_len) {
376  char *prefix = NULL;
377  unsigned int l = 0;
378  char c = 0;
379 
380  if (ei->ei_size + ea_size > er->er_data_len)
381  return -ERANGE;
382 
383  switch (ea->ea_type) {
384  case GFS2_EATYPE_USR:
385  prefix = "user.";
386  l = 5;
387  break;
388  case GFS2_EATYPE_SYS:
389  prefix = "system.";
390  l = 7;
391  break;
393  prefix = "security.";
394  l = 9;
395  break;
396  }
397 
398  BUG_ON(l == 0);
399 
400  memcpy(er->er_data + ei->ei_size, prefix, l);
401  memcpy(er->er_data + ei->ei_size + l, GFS2_EA2NAME(ea),
402  ea->ea_name_len);
403  memcpy(er->er_data + ei->ei_size + ea_size - 1, &c, 1);
404  }
405 
406  ei->ei_size += ea_size;
407 
408  return 0;
409 }
410 
420 ssize_t gfs2_listxattr(struct dentry *dentry, char *buffer, size_t size)
421 {
422  struct gfs2_inode *ip = GFS2_I(dentry->d_inode);
423  struct gfs2_ea_request er;
424  struct gfs2_holder i_gh;
425  int error;
426 
427  memset(&er, 0, sizeof(struct gfs2_ea_request));
428  if (size) {
429  er.er_data = buffer;
430  er.er_data_len = size;
431  }
432 
433  error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
434  if (error)
435  return error;
436 
437  if (ip->i_eattr) {
438  struct ea_list ei = { .ei_er = &er, .ei_size = 0 };
439 
440  error = ea_foreach(ip, ea_list_i, &ei);
441  if (!error)
442  error = ei.ei_size;
443  }
444 
445  gfs2_glock_dq_uninit(&i_gh);
446 
447  return error;
448 }
449 
461 static int gfs2_iter_unstuffed(struct gfs2_inode *ip, struct gfs2_ea_header *ea,
462  const char *din, char *dout)
463 {
464  struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
465  struct buffer_head **bh;
466  unsigned int amount = GFS2_EA_DATA_LEN(ea);
467  unsigned int nptrs = DIV_ROUND_UP(amount, sdp->sd_jbsize);
468  __be64 *dataptrs = GFS2_EA2DATAPTRS(ea);
469  unsigned int x;
470  int error = 0;
471  unsigned char *pos;
472  unsigned cp_size;
473 
474  bh = kcalloc(nptrs, sizeof(struct buffer_head *), GFP_NOFS);
475  if (!bh)
476  return -ENOMEM;
477 
478  for (x = 0; x < nptrs; x++) {
479  error = gfs2_meta_read(ip->i_gl, be64_to_cpu(*dataptrs), 0,
480  bh + x);
481  if (error) {
482  while (x--)
483  brelse(bh[x]);
484  goto out;
485  }
486  dataptrs++;
487  }
488 
489  for (x = 0; x < nptrs; x++) {
490  error = gfs2_meta_wait(sdp, bh[x]);
491  if (error) {
492  for (; x < nptrs; x++)
493  brelse(bh[x]);
494  goto out;
495  }
496  if (gfs2_metatype_check(sdp, bh[x], GFS2_METATYPE_ED)) {
497  for (; x < nptrs; x++)
498  brelse(bh[x]);
499  error = -EIO;
500  goto out;
501  }
502 
503  pos = bh[x]->b_data + sizeof(struct gfs2_meta_header);
504  cp_size = (sdp->sd_jbsize > amount) ? amount : sdp->sd_jbsize;
505 
506  if (dout) {
507  memcpy(dout, pos, cp_size);
508  dout += sdp->sd_jbsize;
509  }
510 
511  if (din) {
512  gfs2_trans_add_bh(ip->i_gl, bh[x], 1);
513  memcpy(pos, din, cp_size);
514  din += sdp->sd_jbsize;
515  }
516 
517  amount -= sdp->sd_jbsize;
518  brelse(bh[x]);
519  }
520 
521 out:
522  kfree(bh);
523  return error;
524 }
525 
526 static int gfs2_ea_get_copy(struct gfs2_inode *ip, struct gfs2_ea_location *el,
527  char *data, size_t size)
528 {
529  int ret;
530  size_t len = GFS2_EA_DATA_LEN(el->el_ea);
531  if (len > size)
532  return -ERANGE;
533 
534  if (GFS2_EA_IS_STUFFED(el->el_ea)) {
535  memcpy(data, GFS2_EA2DATA(el->el_ea), len);
536  return len;
537  }
538  ret = gfs2_iter_unstuffed(ip, el->el_ea, NULL, data);
539  if (ret < 0)
540  return ret;
541  return len;
542 }
543 
544 int gfs2_xattr_acl_get(struct gfs2_inode *ip, const char *name, char **ppdata)
545 {
546  struct gfs2_ea_location el;
547  int error;
548  int len;
549  char *data;
550 
551  error = gfs2_ea_find(ip, GFS2_EATYPE_SYS, name, &el);
552  if (error)
553  return error;
554  if (!el.el_ea)
555  goto out;
556  if (!GFS2_EA_DATA_LEN(el.el_ea))
557  goto out;
558 
559  len = GFS2_EA_DATA_LEN(el.el_ea);
560  data = kmalloc(len, GFP_NOFS);
561  error = -ENOMEM;
562  if (data == NULL)
563  goto out;
564 
565  error = gfs2_ea_get_copy(ip, &el, data, len);
566  if (error < 0)
567  kfree(data);
568  else
569  *ppdata = data;
570 out:
571  brelse(el.el_bh);
572  return error;
573 }
574 
585 static int gfs2_xattr_get(struct dentry *dentry, const char *name,
586  void *buffer, size_t size, int type)
587 {
588  struct gfs2_inode *ip = GFS2_I(dentry->d_inode);
589  struct gfs2_ea_location el;
590  int error;
591 
592  if (!ip->i_eattr)
593  return -ENODATA;
594  if (strlen(name) > GFS2_EA_MAX_NAME_LEN)
595  return -EINVAL;
596 
597  error = gfs2_ea_find(ip, type, name, &el);
598  if (error)
599  return error;
600  if (!el.el_ea)
601  return -ENODATA;
602  if (size)
603  error = gfs2_ea_get_copy(ip, &el, buffer, size);
604  else
605  error = GFS2_EA_DATA_LEN(el.el_ea);
606  brelse(el.el_bh);
607 
608  return error;
609 }
610 
619 static int ea_alloc_blk(struct gfs2_inode *ip, struct buffer_head **bhp)
620 {
621  struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
622  struct gfs2_ea_header *ea;
623  unsigned int n = 1;
624  u64 block;
625  int error;
626 
627  error = gfs2_alloc_blocks(ip, &block, &n, 0, NULL);
628  if (error)
629  return error;
630  gfs2_trans_add_unrevoke(sdp, block, 1);
631  *bhp = gfs2_meta_new(ip->i_gl, block);
632  gfs2_trans_add_bh(ip->i_gl, *bhp, 1);
633  gfs2_metatype_set(*bhp, GFS2_METATYPE_EA, GFS2_FORMAT_EA);
634  gfs2_buffer_clear_tail(*bhp, sizeof(struct gfs2_meta_header));
635 
636  ea = GFS2_EA_BH2FIRST(*bhp);
637  ea->ea_rec_len = cpu_to_be32(sdp->sd_jbsize);
640  ea->ea_num_ptrs = 0;
641 
642  gfs2_add_inode_blocks(&ip->i_inode, 1);
643 
644  return 0;
645 }
646 
659 static int ea_write(struct gfs2_inode *ip, struct gfs2_ea_header *ea,
660  struct gfs2_ea_request *er)
661 {
662  struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
663  int error;
664 
666  ea->ea_name_len = er->er_name_len;
667  ea->ea_type = er->er_type;
668  ea->__pad = 0;
669 
670  memcpy(GFS2_EA2NAME(ea), er->er_name, er->er_name_len);
671 
672  if (GFS2_EAREQ_SIZE_STUFFED(er) <= sdp->sd_jbsize) {
673  ea->ea_num_ptrs = 0;
674  memcpy(GFS2_EA2DATA(ea), er->er_data, er->er_data_len);
675  } else {
676  __be64 *dataptr = GFS2_EA2DATAPTRS(ea);
677  const char *data = er->er_data;
678  unsigned int data_len = er->er_data_len;
679  unsigned int copy;
680  unsigned int x;
681 
683  for (x = 0; x < ea->ea_num_ptrs; x++) {
684  struct buffer_head *bh;
685  u64 block;
686  int mh_size = sizeof(struct gfs2_meta_header);
687  unsigned int n = 1;
688 
689  error = gfs2_alloc_blocks(ip, &block, &n, 0, NULL);
690  if (error)
691  return error;
692  gfs2_trans_add_unrevoke(sdp, block, 1);
693  bh = gfs2_meta_new(ip->i_gl, block);
694  gfs2_trans_add_bh(ip->i_gl, bh, 1);
695  gfs2_metatype_set(bh, GFS2_METATYPE_ED, GFS2_FORMAT_ED);
696 
697  gfs2_add_inode_blocks(&ip->i_inode, 1);
698 
699  copy = data_len > sdp->sd_jbsize ? sdp->sd_jbsize :
700  data_len;
701  memcpy(bh->b_data + mh_size, data, copy);
702  if (copy < sdp->sd_jbsize)
703  memset(bh->b_data + mh_size + copy, 0,
704  sdp->sd_jbsize - copy);
705 
706  *dataptr++ = cpu_to_be64(bh->b_blocknr);
707  data += copy;
708  data_len -= copy;
709 
710  brelse(bh);
711  }
712 
713  gfs2_assert_withdraw(sdp, !data_len);
714  }
715 
716  return 0;
717 }
718 
719 typedef int (*ea_skeleton_call_t) (struct gfs2_inode *ip,
720  struct gfs2_ea_request *er, void *private);
721 
722 static int ea_alloc_skeleton(struct gfs2_inode *ip, struct gfs2_ea_request *er,
723  unsigned int blks,
724  ea_skeleton_call_t skeleton_call, void *private)
725 {
726  struct buffer_head *dibh;
727  int error;
728 
729  error = gfs2_rindex_update(GFS2_SB(&ip->i_inode));
730  if (error)
731  return error;
732 
733  error = gfs2_quota_lock_check(ip);
734  if (error)
735  return error;
736 
737  error = gfs2_inplace_reserve(ip, blks);
738  if (error)
739  goto out_gunlock_q;
740 
741  error = gfs2_trans_begin(GFS2_SB(&ip->i_inode),
742  blks + gfs2_rg_blocks(ip, blks) +
744  if (error)
745  goto out_ipres;
746 
747  error = skeleton_call(ip, er, private);
748  if (error)
749  goto out_end_trans;
750 
751  error = gfs2_meta_inode_buffer(ip, &dibh);
752  if (!error) {
753  ip->i_inode.i_ctime = CURRENT_TIME;
754  gfs2_trans_add_bh(ip->i_gl, dibh, 1);
755  gfs2_dinode_out(ip, dibh->b_data);
756  brelse(dibh);
757  }
758 
759 out_end_trans:
760  gfs2_trans_end(GFS2_SB(&ip->i_inode));
761 out_ipres:
763 out_gunlock_q:
764  gfs2_quota_unlock(ip);
765  return error;
766 }
767 
768 static int ea_init_i(struct gfs2_inode *ip, struct gfs2_ea_request *er,
769  void *private)
770 {
771  struct buffer_head *bh;
772  int error;
773 
774  error = ea_alloc_blk(ip, &bh);
775  if (error)
776  return error;
777 
778  ip->i_eattr = bh->b_blocknr;
779  error = ea_write(ip, GFS2_EA_BH2FIRST(bh), er);
780 
781  brelse(bh);
782 
783  return error;
784 }
785 
794 static int ea_init(struct gfs2_inode *ip, int type, const char *name,
795  const void *data, size_t size)
796 {
797  struct gfs2_ea_request er;
798  unsigned int jbsize = GFS2_SB(&ip->i_inode)->sd_jbsize;
799  unsigned int blks = 1;
800 
801  er.er_type = type;
802  er.er_name = name;
803  er.er_name_len = strlen(name);
804  er.er_data = (void *)data;
805  er.er_data_len = size;
806 
807  if (GFS2_EAREQ_SIZE_STUFFED(&er) > jbsize)
808  blks += DIV_ROUND_UP(er.er_data_len, jbsize);
809 
810  return ea_alloc_skeleton(ip, &er, blks, ea_init_i, NULL);
811 }
812 
813 static struct gfs2_ea_header *ea_split_ea(struct gfs2_ea_header *ea)
814 {
815  u32 ea_size = GFS2_EA_SIZE(ea);
816  struct gfs2_ea_header *new = (struct gfs2_ea_header *)((char *)ea +
817  ea_size);
818  u32 new_size = GFS2_EA_REC_LEN(ea) - ea_size;
819  int last = ea->ea_flags & GFS2_EAFLAG_LAST;
820 
821  ea->ea_rec_len = cpu_to_be32(ea_size);
822  ea->ea_flags ^= last;
823 
824  new->ea_rec_len = cpu_to_be32(new_size);
825  new->ea_flags = last;
826 
827  return new;
828 }
829 
830 static void ea_set_remove_stuffed(struct gfs2_inode *ip,
831  struct gfs2_ea_location *el)
832 {
833  struct gfs2_ea_header *ea = el->el_ea;
834  struct gfs2_ea_header *prev = el->el_prev;
835  u32 len;
836 
837  gfs2_trans_add_bh(ip->i_gl, el->el_bh, 1);
838 
839  if (!prev || !GFS2_EA_IS_STUFFED(ea)) {
841  return;
842  } else if (GFS2_EA2NEXT(prev) != ea) {
843  prev = GFS2_EA2NEXT(prev);
844  gfs2_assert_withdraw(GFS2_SB(&ip->i_inode), GFS2_EA2NEXT(prev) == ea);
845  }
846 
847  len = GFS2_EA_REC_LEN(prev) + GFS2_EA_REC_LEN(ea);
848  prev->ea_rec_len = cpu_to_be32(len);
849 
850  if (GFS2_EA_IS_LAST(ea))
851  prev->ea_flags |= GFS2_EAFLAG_LAST;
852 }
853 
854 struct ea_set {
855  int ea_split;
856 
859 
860  struct buffer_head *es_bh;
862 };
863 
864 static int ea_set_simple_noalloc(struct gfs2_inode *ip, struct buffer_head *bh,
865  struct gfs2_ea_header *ea, struct ea_set *es)
866 {
867  struct gfs2_ea_request *er = es->es_er;
868  struct buffer_head *dibh;
869  int error;
870 
871  error = gfs2_trans_begin(GFS2_SB(&ip->i_inode), RES_DINODE + 2 * RES_EATTR, 0);
872  if (error)
873  return error;
874 
875  gfs2_trans_add_bh(ip->i_gl, bh, 1);
876 
877  if (es->ea_split)
878  ea = ea_split_ea(ea);
879 
880  ea_write(ip, ea, er);
881 
882  if (es->es_el)
883  ea_set_remove_stuffed(ip, es->es_el);
884 
885  error = gfs2_meta_inode_buffer(ip, &dibh);
886  if (error)
887  goto out;
888  ip->i_inode.i_ctime = CURRENT_TIME;
889  gfs2_trans_add_bh(ip->i_gl, dibh, 1);
890  gfs2_dinode_out(ip, dibh->b_data);
891  brelse(dibh);
892 out:
893  gfs2_trans_end(GFS2_SB(&ip->i_inode));
894  return error;
895 }
896 
897 static int ea_set_simple_alloc(struct gfs2_inode *ip,
898  struct gfs2_ea_request *er, void *private)
899 {
900  struct ea_set *es = private;
901  struct gfs2_ea_header *ea = es->es_ea;
902  int error;
903 
904  gfs2_trans_add_bh(ip->i_gl, es->es_bh, 1);
905 
906  if (es->ea_split)
907  ea = ea_split_ea(ea);
908 
909  error = ea_write(ip, ea, er);
910  if (error)
911  return error;
912 
913  if (es->es_el)
914  ea_set_remove_stuffed(ip, es->es_el);
915 
916  return 0;
917 }
918 
919 static int ea_set_simple(struct gfs2_inode *ip, struct buffer_head *bh,
920  struct gfs2_ea_header *ea, struct gfs2_ea_header *prev,
921  void *private)
922 {
923  struct ea_set *es = private;
924  unsigned int size;
925  int stuffed;
926  int error;
927 
928  stuffed = ea_calc_size(GFS2_SB(&ip->i_inode), es->es_er->er_name_len,
929  es->es_er->er_data_len, &size);
930 
931  if (ea->ea_type == GFS2_EATYPE_UNUSED) {
932  if (GFS2_EA_REC_LEN(ea) < size)
933  return 0;
934  if (!GFS2_EA_IS_STUFFED(ea)) {
935  error = ea_remove_unstuffed(ip, bh, ea, prev, 1);
936  if (error)
937  return error;
938  }
939  es->ea_split = 0;
940  } else if (GFS2_EA_REC_LEN(ea) - GFS2_EA_SIZE(ea) >= size)
941  es->ea_split = 1;
942  else
943  return 0;
944 
945  if (stuffed) {
946  error = ea_set_simple_noalloc(ip, bh, ea, es);
947  if (error)
948  return error;
949  } else {
950  unsigned int blks;
951 
952  es->es_bh = bh;
953  es->es_ea = ea;
954  blks = 2 + DIV_ROUND_UP(es->es_er->er_data_len,
955  GFS2_SB(&ip->i_inode)->sd_jbsize);
956 
957  error = ea_alloc_skeleton(ip, es->es_er, blks,
958  ea_set_simple_alloc, es);
959  if (error)
960  return error;
961  }
962 
963  return 1;
964 }
965 
966 static int ea_set_block(struct gfs2_inode *ip, struct gfs2_ea_request *er,
967  void *private)
968 {
969  struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
970  struct buffer_head *indbh, *newbh;
971  __be64 *eablk;
972  int error;
973  int mh_size = sizeof(struct gfs2_meta_header);
974 
975  if (ip->i_diskflags & GFS2_DIF_EA_INDIRECT) {
976  __be64 *end;
977 
978  error = gfs2_meta_read(ip->i_gl, ip->i_eattr, DIO_WAIT,
979  &indbh);
980  if (error)
981  return error;
982 
983  if (gfs2_metatype_check(sdp, indbh, GFS2_METATYPE_IN)) {
984  error = -EIO;
985  goto out;
986  }
987 
988  eablk = (__be64 *)(indbh->b_data + mh_size);
989  end = eablk + sdp->sd_inptrs;
990 
991  for (; eablk < end; eablk++)
992  if (!*eablk)
993  break;
994 
995  if (eablk == end) {
996  error = -ENOSPC;
997  goto out;
998  }
999 
1000  gfs2_trans_add_bh(ip->i_gl, indbh, 1);
1001  } else {
1002  u64 blk;
1003  unsigned int n = 1;
1004  error = gfs2_alloc_blocks(ip, &blk, &n, 0, NULL);
1005  if (error)
1006  return error;
1007  gfs2_trans_add_unrevoke(sdp, blk, 1);
1008  indbh = gfs2_meta_new(ip->i_gl, blk);
1009  gfs2_trans_add_bh(ip->i_gl, indbh, 1);
1010  gfs2_metatype_set(indbh, GFS2_METATYPE_IN, GFS2_FORMAT_IN);
1011  gfs2_buffer_clear_tail(indbh, mh_size);
1012 
1013  eablk = (__be64 *)(indbh->b_data + mh_size);
1014  *eablk = cpu_to_be64(ip->i_eattr);
1015  ip->i_eattr = blk;
1017  gfs2_add_inode_blocks(&ip->i_inode, 1);
1018 
1019  eablk++;
1020  }
1021 
1022  error = ea_alloc_blk(ip, &newbh);
1023  if (error)
1024  goto out;
1025 
1026  *eablk = cpu_to_be64((u64)newbh->b_blocknr);
1027  error = ea_write(ip, GFS2_EA_BH2FIRST(newbh), er);
1028  brelse(newbh);
1029  if (error)
1030  goto out;
1031 
1032  if (private)
1033  ea_set_remove_stuffed(ip, private);
1034 
1035 out:
1036  brelse(indbh);
1037  return error;
1038 }
1039 
1040 static int ea_set_i(struct gfs2_inode *ip, int type, const char *name,
1041  const void *value, size_t size, struct gfs2_ea_location *el)
1042 {
1043  struct gfs2_ea_request er;
1044  struct ea_set es;
1045  unsigned int blks = 2;
1046  int error;
1047 
1048  er.er_type = type;
1049  er.er_name = name;
1050  er.er_data = (void *)value;
1051  er.er_name_len = strlen(name);
1052  er.er_data_len = size;
1053 
1054  memset(&es, 0, sizeof(struct ea_set));
1055  es.es_er = &er;
1056  es.es_el = el;
1057 
1058  error = ea_foreach(ip, ea_set_simple, &es);
1059  if (error > 0)
1060  return 0;
1061  if (error)
1062  return error;
1063 
1064  if (!(ip->i_diskflags & GFS2_DIF_EA_INDIRECT))
1065  blks++;
1066  if (GFS2_EAREQ_SIZE_STUFFED(&er) > GFS2_SB(&ip->i_inode)->sd_jbsize)
1067  blks += DIV_ROUND_UP(er.er_data_len, GFS2_SB(&ip->i_inode)->sd_jbsize);
1068 
1069  return ea_alloc_skeleton(ip, &er, blks, ea_set_block, el);
1070 }
1071 
1072 static int ea_set_remove_unstuffed(struct gfs2_inode *ip,
1073  struct gfs2_ea_location *el)
1074 {
1075  if (el->el_prev && GFS2_EA2NEXT(el->el_prev) != el->el_ea) {
1076  el->el_prev = GFS2_EA2NEXT(el->el_prev);
1077  gfs2_assert_withdraw(GFS2_SB(&ip->i_inode),
1078  GFS2_EA2NEXT(el->el_prev) == el->el_ea);
1079  }
1080 
1081  return ea_remove_unstuffed(ip, el->el_bh, el->el_ea, el->el_prev, 0);
1082 }
1083 
1084 static int ea_remove_stuffed(struct gfs2_inode *ip, struct gfs2_ea_location *el)
1085 {
1086  struct gfs2_ea_header *ea = el->el_ea;
1087  struct gfs2_ea_header *prev = el->el_prev;
1088  struct buffer_head *dibh;
1089  int error;
1090 
1091  error = gfs2_trans_begin(GFS2_SB(&ip->i_inode), RES_DINODE + RES_EATTR, 0);
1092  if (error)
1093  return error;
1094 
1095  gfs2_trans_add_bh(ip->i_gl, el->el_bh, 1);
1096 
1097  if (prev) {
1098  u32 len;
1099 
1100  len = GFS2_EA_REC_LEN(prev) + GFS2_EA_REC_LEN(ea);
1101  prev->ea_rec_len = cpu_to_be32(len);
1102 
1103  if (GFS2_EA_IS_LAST(ea))
1104  prev->ea_flags |= GFS2_EAFLAG_LAST;
1105  } else {
1107  }
1108 
1109  error = gfs2_meta_inode_buffer(ip, &dibh);
1110  if (!error) {
1111  ip->i_inode.i_ctime = CURRENT_TIME;
1112  gfs2_trans_add_bh(ip->i_gl, dibh, 1);
1113  gfs2_dinode_out(ip, dibh->b_data);
1114  brelse(dibh);
1115  }
1116 
1117  gfs2_trans_end(GFS2_SB(&ip->i_inode));
1118 
1119  return error;
1120 }
1121 
1135 static int gfs2_xattr_remove(struct gfs2_inode *ip, int type, const char *name)
1136 {
1137  struct gfs2_ea_location el;
1138  int error;
1139 
1140  if (!ip->i_eattr)
1141  return -ENODATA;
1142 
1143  error = gfs2_ea_find(ip, type, name, &el);
1144  if (error)
1145  return error;
1146  if (!el.el_ea)
1147  return -ENODATA;
1148 
1149  if (GFS2_EA_IS_STUFFED(el.el_ea))
1150  error = ea_remove_stuffed(ip, &el);
1151  else
1152  error = ea_remove_unstuffed(ip, el.el_bh, el.el_ea, el.el_prev, 0);
1153 
1154  brelse(el.el_bh);
1155 
1156  return error;
1157 }
1158 
1173 int __gfs2_xattr_set(struct inode *inode, const char *name,
1174  const void *value, size_t size, int flags, int type)
1175 {
1176  struct gfs2_inode *ip = GFS2_I(inode);
1177  struct gfs2_sbd *sdp = GFS2_SB(inode);
1178  struct gfs2_ea_location el;
1179  unsigned int namel = strlen(name);
1180  int error;
1181 
1182  if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
1183  return -EPERM;
1184  if (namel > GFS2_EA_MAX_NAME_LEN)
1185  return -ERANGE;
1186 
1187  if (value == NULL)
1188  return gfs2_xattr_remove(ip, type, name);
1189 
1190  if (ea_check_size(sdp, namel, size))
1191  return -ERANGE;
1192 
1193  if (!ip->i_eattr) {
1194  if (flags & XATTR_REPLACE)
1195  return -ENODATA;
1196  return ea_init(ip, type, name, value, size);
1197  }
1198 
1199  error = gfs2_ea_find(ip, type, name, &el);
1200  if (error)
1201  return error;
1202 
1203  if (el.el_ea) {
1204  if (ip->i_diskflags & GFS2_DIF_APPENDONLY) {
1205  brelse(el.el_bh);
1206  return -EPERM;
1207  }
1208 
1209  error = -EEXIST;
1210  if (!(flags & XATTR_CREATE)) {
1211  int unstuffed = !GFS2_EA_IS_STUFFED(el.el_ea);
1212  error = ea_set_i(ip, type, name, value, size, &el);
1213  if (!error && unstuffed)
1214  ea_set_remove_unstuffed(ip, &el);
1215  }
1216 
1217  brelse(el.el_bh);
1218  return error;
1219  }
1220 
1221  error = -ENODATA;
1222  if (!(flags & XATTR_REPLACE))
1223  error = ea_set_i(ip, type, name, value, size, NULL);
1224 
1225  return error;
1226 }
1227 
1228 static int gfs2_xattr_set(struct dentry *dentry, const char *name,
1229  const void *value, size_t size, int flags, int type)
1230 {
1231  return __gfs2_xattr_set(dentry->d_inode, name, value,
1232  size, flags, type);
1233 }
1234 
1235 
1236 static int ea_acl_chmod_unstuffed(struct gfs2_inode *ip,
1237  struct gfs2_ea_header *ea, char *data)
1238 {
1239  struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1240  unsigned int amount = GFS2_EA_DATA_LEN(ea);
1241  unsigned int nptrs = DIV_ROUND_UP(amount, sdp->sd_jbsize);
1242  int ret;
1243 
1244  ret = gfs2_trans_begin(sdp, nptrs + RES_DINODE, 0);
1245  if (ret)
1246  return ret;
1247 
1248  ret = gfs2_iter_unstuffed(ip, ea, data, NULL);
1249  gfs2_trans_end(sdp);
1250 
1251  return ret;
1252 }
1253 
1254 int gfs2_xattr_acl_chmod(struct gfs2_inode *ip, struct iattr *attr, char *data)
1255 {
1256  struct inode *inode = &ip->i_inode;
1257  struct gfs2_sbd *sdp = GFS2_SB(inode);
1258  struct gfs2_ea_location el;
1259  int error;
1260 
1261  error = gfs2_ea_find(ip, GFS2_EATYPE_SYS, GFS2_POSIX_ACL_ACCESS, &el);
1262  if (error)
1263  return error;
1264 
1265  if (GFS2_EA_IS_STUFFED(el.el_ea)) {
1266  error = gfs2_trans_begin(sdp, RES_DINODE + RES_EATTR, 0);
1267  if (error == 0) {
1268  gfs2_trans_add_bh(ip->i_gl, el.el_bh, 1);
1269  memcpy(GFS2_EA2DATA(el.el_ea), data,
1270  GFS2_EA_DATA_LEN(el.el_ea));
1271  }
1272  } else {
1273  error = ea_acl_chmod_unstuffed(ip, el.el_ea, data);
1274  }
1275 
1276  brelse(el.el_bh);
1277  if (error)
1278  return error;
1279 
1280  error = gfs2_setattr_simple(inode, attr);
1281  gfs2_trans_end(sdp);
1282  return error;
1283 }
1284 
1285 static int ea_dealloc_indirect(struct gfs2_inode *ip)
1286 {
1287  struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1288  struct gfs2_rgrp_list rlist;
1289  struct buffer_head *indbh, *dibh;
1290  __be64 *eablk, *end;
1291  unsigned int rg_blocks = 0;
1292  u64 bstart = 0;
1293  unsigned int blen = 0;
1294  unsigned int blks = 0;
1295  unsigned int x;
1296  int error;
1297 
1298  error = gfs2_rindex_update(sdp);
1299  if (error)
1300  return error;
1301 
1302  memset(&rlist, 0, sizeof(struct gfs2_rgrp_list));
1303 
1304  error = gfs2_meta_read(ip->i_gl, ip->i_eattr, DIO_WAIT, &indbh);
1305  if (error)
1306  return error;
1307 
1308  if (gfs2_metatype_check(sdp, indbh, GFS2_METATYPE_IN)) {
1309  error = -EIO;
1310  goto out;
1311  }
1312 
1313  eablk = (__be64 *)(indbh->b_data + sizeof(struct gfs2_meta_header));
1314  end = eablk + sdp->sd_inptrs;
1315 
1316  for (; eablk < end; eablk++) {
1317  u64 bn;
1318 
1319  if (!*eablk)
1320  break;
1321  bn = be64_to_cpu(*eablk);
1322 
1323  if (bstart + blen == bn)
1324  blen++;
1325  else {
1326  if (bstart)
1327  gfs2_rlist_add(ip, &rlist, bstart);
1328  bstart = bn;
1329  blen = 1;
1330  }
1331  blks++;
1332  }
1333  if (bstart)
1334  gfs2_rlist_add(ip, &rlist, bstart);
1335  else
1336  goto out;
1337 
1339 
1340  for (x = 0; x < rlist.rl_rgrps; x++) {
1341  struct gfs2_rgrpd *rgd;
1342  rgd = rlist.rl_ghs[x].gh_gl->gl_object;
1343  rg_blocks += rgd->rd_length;
1344  }
1345 
1346  error = gfs2_glock_nq_m(rlist.rl_rgrps, rlist.rl_ghs);
1347  if (error)
1348  goto out_rlist_free;
1349 
1350  error = gfs2_trans_begin(sdp, rg_blocks + RES_DINODE + RES_INDIRECT +
1351  RES_STATFS + RES_QUOTA, blks);
1352  if (error)
1353  goto out_gunlock;
1354 
1355  gfs2_trans_add_bh(ip->i_gl, indbh, 1);
1356 
1357  eablk = (__be64 *)(indbh->b_data + sizeof(struct gfs2_meta_header));
1358  bstart = 0;
1359  blen = 0;
1360 
1361  for (; eablk < end; eablk++) {
1362  u64 bn;
1363 
1364  if (!*eablk)
1365  break;
1366  bn = be64_to_cpu(*eablk);
1367 
1368  if (bstart + blen == bn)
1369  blen++;
1370  else {
1371  if (bstart)
1372  gfs2_free_meta(ip, bstart, blen);
1373  bstart = bn;
1374  blen = 1;
1375  }
1376 
1377  *eablk = 0;
1378  gfs2_add_inode_blocks(&ip->i_inode, -1);
1379  }
1380  if (bstart)
1381  gfs2_free_meta(ip, bstart, blen);
1382 
1384 
1385  error = gfs2_meta_inode_buffer(ip, &dibh);
1386  if (!error) {
1387  gfs2_trans_add_bh(ip->i_gl, dibh, 1);
1388  gfs2_dinode_out(ip, dibh->b_data);
1389  brelse(dibh);
1390  }
1391 
1392  gfs2_trans_end(sdp);
1393 
1394 out_gunlock:
1395  gfs2_glock_dq_m(rlist.rl_rgrps, rlist.rl_ghs);
1396 out_rlist_free:
1398 out:
1399  brelse(indbh);
1400  return error;
1401 }
1402 
1403 static int ea_dealloc_block(struct gfs2_inode *ip)
1404 {
1405  struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1406  struct gfs2_rgrpd *rgd;
1407  struct buffer_head *dibh;
1408  struct gfs2_holder gh;
1409  int error;
1410 
1411  error = gfs2_rindex_update(sdp);
1412  if (error)
1413  return error;
1414 
1415  rgd = gfs2_blk2rgrpd(sdp, ip->i_eattr, 1);
1416  if (!rgd) {
1417  gfs2_consist_inode(ip);
1418  return -EIO;
1419  }
1420 
1421  error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, &gh);
1422  if (error)
1423  return error;
1424 
1426  RES_QUOTA, 1);
1427  if (error)
1428  goto out_gunlock;
1429 
1430  gfs2_free_meta(ip, ip->i_eattr, 1);
1431 
1432  ip->i_eattr = 0;
1433  gfs2_add_inode_blocks(&ip->i_inode, -1);
1434 
1435  error = gfs2_meta_inode_buffer(ip, &dibh);
1436  if (!error) {
1437  gfs2_trans_add_bh(ip->i_gl, dibh, 1);
1438  gfs2_dinode_out(ip, dibh->b_data);
1439  brelse(dibh);
1440  }
1441 
1442  gfs2_trans_end(sdp);
1443 
1444 out_gunlock:
1445  gfs2_glock_dq_uninit(&gh);
1446  return error;
1447 }
1448 
1457 {
1458  int error;
1459 
1460  error = gfs2_rindex_update(GFS2_SB(&ip->i_inode));
1461  if (error)
1462  return error;
1463 
1465  if (error)
1466  return error;
1467 
1468  error = ea_foreach(ip, ea_dealloc_unstuffed, NULL);
1469  if (error)
1470  goto out_quota;
1471 
1472  if (ip->i_diskflags & GFS2_DIF_EA_INDIRECT) {
1473  error = ea_dealloc_indirect(ip);
1474  if (error)
1475  goto out_quota;
1476  }
1477 
1478  error = ea_dealloc_block(ip);
1479 
1480 out_quota:
1481  gfs2_quota_unhold(ip);
1482  return error;
1483 }
1484 
1485 static const struct xattr_handler gfs2_xattr_user_handler = {
1486  .prefix = XATTR_USER_PREFIX,
1487  .flags = GFS2_EATYPE_USR,
1488  .get = gfs2_xattr_get,
1489  .set = gfs2_xattr_set,
1490 };
1491 
1492 static const struct xattr_handler gfs2_xattr_security_handler = {
1493  .prefix = XATTR_SECURITY_PREFIX,
1494  .flags = GFS2_EATYPE_SECURITY,
1495  .get = gfs2_xattr_get,
1496  .set = gfs2_xattr_set,
1497 };
1498 
1500  &gfs2_xattr_user_handler,
1501  &gfs2_xattr_security_handler,
1503  NULL,
1504 };
1505