Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
xfs_trans_dquot.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2000-2002 Silicon Graphics, Inc.
3  * All Rights Reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it would be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write the Free Software Foundation,
16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17  */
18 #include "xfs.h"
19 #include "xfs_fs.h"
20 #include "xfs_log.h"
21 #include "xfs_trans.h"
22 #include "xfs_sb.h"
23 #include "xfs_ag.h"
24 #include "xfs_alloc.h"
25 #include "xfs_quota.h"
26 #include "xfs_mount.h"
27 #include "xfs_bmap_btree.h"
28 #include "xfs_inode.h"
29 #include "xfs_itable.h"
30 #include "xfs_bmap.h"
31 #include "xfs_rtalloc.h"
32 #include "xfs_error.h"
33 #include "xfs_attr.h"
34 #include "xfs_buf_item.h"
35 #include "xfs_trans_priv.h"
36 #include "xfs_qm.h"
37 
38 STATIC void xfs_trans_alloc_dqinfo(xfs_trans_t *);
39 
40 /*
41  * Add the locked dquot to the transaction.
42  * The dquot must be locked, and it cannot be associated with any
43  * transaction.
44  */
45 void
47  xfs_trans_t *tp,
48  xfs_dquot_t *dqp)
49 {
50  ASSERT(dqp->q_transp != tp);
52  ASSERT(dqp->q_logitem.qli_dquot == dqp);
53 
54  /*
55  * Get a log_item_desc to point at the new item.
56  */
58 
59  /*
60  * Initialize d_transp so we can later determine if this dquot is
61  * associated with this transaction.
62  */
63  dqp->q_transp = tp;
64 }
65 
66 
67 /*
68  * This is called to mark the dquot as needing
69  * to be logged when the transaction is committed. The dquot must
70  * already be associated with the given transaction.
71  * Note that it marks the entire transaction as dirty. In the ordinary
72  * case, this gets called via xfs_trans_commit, after the transaction
73  * is already dirty. However, there's nothing stop this from getting
74  * called directly, as done by xfs_qm_scall_setqlim. Hence, the TRANS_DIRTY
75  * flag.
76  */
77 void
79  xfs_trans_t *tp,
80  xfs_dquot_t *dqp)
81 {
82  ASSERT(dqp->q_transp == tp);
84 
85  tp->t_flags |= XFS_TRANS_DIRTY;
86  dqp->q_logitem.qli_item.li_desc->lid_flags |= XFS_LID_DIRTY;
87 }
88 
89 /*
90  * Carry forward whatever is left of the quota blk reservation to
91  * the spanky new transaction
92  */
93 void
95  xfs_trans_t *otp,
96  xfs_trans_t *ntp)
97 {
98  xfs_dqtrx_t *oq, *nq;
99  int i,j;
100  xfs_dqtrx_t *oqa, *nqa;
101 
102  if (!otp->t_dqinfo)
103  return;
104 
106  oqa = otp->t_dqinfo->dqa_usrdquots;
107  nqa = ntp->t_dqinfo->dqa_usrdquots;
108 
109  /*
110  * Because the quota blk reservation is carried forward,
111  * it is also necessary to carry forward the DQ_DIRTY flag.
112  */
113  if(otp->t_flags & XFS_TRANS_DQ_DIRTY)
114  ntp->t_flags |= XFS_TRANS_DQ_DIRTY;
115 
116  for (j = 0; j < 2; j++) {
117  for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) {
118  if (oqa[i].qt_dquot == NULL)
119  break;
120  oq = &oqa[i];
121  nq = &nqa[i];
122 
123  nq->qt_dquot = oq->qt_dquot;
124  nq->qt_bcount_delta = nq->qt_icount_delta = 0;
125  nq->qt_rtbcount_delta = 0;
126 
127  /*
128  * Transfer whatever is left of the reservations.
129  */
130  nq->qt_blk_res = oq->qt_blk_res - oq->qt_blk_res_used;
131  oq->qt_blk_res = oq->qt_blk_res_used;
132 
133  nq->qt_rtblk_res = oq->qt_rtblk_res -
134  oq->qt_rtblk_res_used;
135  oq->qt_rtblk_res = oq->qt_rtblk_res_used;
136 
137  nq->qt_ino_res = oq->qt_ino_res - oq->qt_ino_res_used;
138  oq->qt_ino_res = oq->qt_ino_res_used;
139 
140  }
141  oqa = otp->t_dqinfo->dqa_grpdquots;
142  nqa = ntp->t_dqinfo->dqa_grpdquots;
143  }
144 }
145 
146 /*
147  * Wrap around mod_dquot to account for both user and group quotas.
148  */
149 void
151  xfs_trans_t *tp,
152  xfs_inode_t *ip,
153  uint field,
154  long delta)
155 {
156  xfs_mount_t *mp = tp->t_mountp;
157 
158  if (!XFS_IS_QUOTA_RUNNING(mp) ||
159  !XFS_IS_QUOTA_ON(mp) ||
160  ip->i_ino == mp->m_sb.sb_uquotino ||
161  ip->i_ino == mp->m_sb.sb_gquotino)
162  return;
163 
164  if (tp->t_dqinfo == NULL)
166 
167  if (XFS_IS_UQUOTA_ON(mp) && ip->i_udquot)
168  (void) xfs_trans_mod_dquot(tp, ip->i_udquot, field, delta);
169  if (XFS_IS_OQUOTA_ON(mp) && ip->i_gdquot)
170  (void) xfs_trans_mod_dquot(tp, ip->i_gdquot, field, delta);
171 }
172 
173 STATIC xfs_dqtrx_t *
175  xfs_trans_t *tp,
176  xfs_dquot_t *dqp)
177 {
178  int i;
179  xfs_dqtrx_t *qa;
180 
181  qa = XFS_QM_ISUDQ(dqp) ?
182  tp->t_dqinfo->dqa_usrdquots : tp->t_dqinfo->dqa_grpdquots;
183 
184  for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) {
185  if (qa[i].qt_dquot == NULL ||
186  qa[i].qt_dquot == dqp)
187  return &qa[i];
188  }
189 
190  return NULL;
191 }
192 
193 /*
194  * Make the changes in the transaction structure.
195  * The moral equivalent to xfs_trans_mod_sb().
196  * We don't touch any fields in the dquot, so we don't care
197  * if it's locked or not (most of the time it won't be).
198  */
199 void
201  xfs_trans_t *tp,
202  xfs_dquot_t *dqp,
203  uint field,
204  long delta)
205 {
206  xfs_dqtrx_t *qtrx;
207 
208  ASSERT(tp);
209  ASSERT(XFS_IS_QUOTA_RUNNING(tp->t_mountp));
210  qtrx = NULL;
211 
212  if (tp->t_dqinfo == NULL)
214  /*
215  * Find either the first free slot or the slot that belongs
216  * to this dquot.
217  */
218  qtrx = xfs_trans_get_dqtrx(tp, dqp);
219  ASSERT(qtrx);
220  if (qtrx->qt_dquot == NULL)
221  qtrx->qt_dquot = dqp;
222 
223  switch (field) {
224 
225  /*
226  * regular disk blk reservation
227  */
229  qtrx->qt_blk_res += (ulong)delta;
230  break;
231 
232  /*
233  * inode reservation
234  */
236  qtrx->qt_ino_res += (ulong)delta;
237  break;
238 
239  /*
240  * disk blocks used.
241  */
242  case XFS_TRANS_DQ_BCOUNT:
243  if (qtrx->qt_blk_res && delta > 0) {
244  qtrx->qt_blk_res_used += (ulong)delta;
245  ASSERT(qtrx->qt_blk_res >= qtrx->qt_blk_res_used);
246  }
247  qtrx->qt_bcount_delta += delta;
248  break;
249 
251  qtrx->qt_delbcnt_delta += delta;
252  break;
253 
254  /*
255  * Inode Count
256  */
257  case XFS_TRANS_DQ_ICOUNT:
258  if (qtrx->qt_ino_res && delta > 0) {
259  qtrx->qt_ino_res_used += (ulong)delta;
260  ASSERT(qtrx->qt_ino_res >= qtrx->qt_ino_res_used);
261  }
262  qtrx->qt_icount_delta += delta;
263  break;
264 
265  /*
266  * rtblk reservation
267  */
269  qtrx->qt_rtblk_res += (ulong)delta;
270  break;
271 
272  /*
273  * rtblk count
274  */
276  if (qtrx->qt_rtblk_res && delta > 0) {
277  qtrx->qt_rtblk_res_used += (ulong)delta;
278  ASSERT(qtrx->qt_rtblk_res >= qtrx->qt_rtblk_res_used);
279  }
280  qtrx->qt_rtbcount_delta += delta;
281  break;
282 
284  qtrx->qt_delrtb_delta += delta;
285  break;
286 
287  default:
288  ASSERT(0);
289  }
290  tp->t_flags |= XFS_TRANS_DQ_DIRTY;
291 }
292 
293 
294 /*
295  * Given an array of dqtrx structures, lock all the dquots associated
296  * and join them to the transaction, provided they have been modified.
297  * We know that the highest number of dquots (of one type - usr OR grp),
298  * involved in a transaction is 2 and that both usr and grp combined - 3.
299  * So, we don't attempt to make this very generic.
300  */
301 STATIC void
303  xfs_trans_t *tp,
304  xfs_dqtrx_t *q)
305 {
306  ASSERT(q[0].qt_dquot != NULL);
307  if (q[1].qt_dquot == NULL) {
308  xfs_dqlock(q[0].qt_dquot);
309  xfs_trans_dqjoin(tp, q[0].qt_dquot);
310  } else {
312  xfs_dqlock2(q[0].qt_dquot, q[1].qt_dquot);
313  xfs_trans_dqjoin(tp, q[0].qt_dquot);
314  xfs_trans_dqjoin(tp, q[1].qt_dquot);
315  }
316 }
317 
318 
319 /*
320  * Called by xfs_trans_commit() and similar in spirit to
321  * xfs_trans_apply_sb_deltas().
322  * Go thru all the dquots belonging to this transaction and modify the
323  * INCORE dquot to reflect the actual usages.
324  * Unreserve just the reservations done by this transaction.
325  * dquot is still left locked at exit.
326  */
327 void
329  xfs_trans_t *tp)
330 {
331  int i, j;
332  xfs_dquot_t *dqp;
333  xfs_dqtrx_t *qtrx, *qa;
335  long totalbdelta;
336  long totalrtbdelta;
337 
338  if (!(tp->t_flags & XFS_TRANS_DQ_DIRTY))
339  return;
340 
341  ASSERT(tp->t_dqinfo);
342  qa = tp->t_dqinfo->dqa_usrdquots;
343  for (j = 0; j < 2; j++) {
344  if (qa[0].qt_dquot == NULL) {
345  qa = tp->t_dqinfo->dqa_grpdquots;
346  continue;
347  }
348 
349  /*
350  * Lock all of the dquots and join them to the transaction.
351  */
352  xfs_trans_dqlockedjoin(tp, qa);
353 
354  for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) {
355  qtrx = &qa[i];
356  /*
357  * The array of dquots is filled
358  * sequentially, not sparsely.
359  */
360  if ((dqp = qtrx->qt_dquot) == NULL)
361  break;
362 
363  ASSERT(XFS_DQ_IS_LOCKED(dqp));
364  ASSERT(dqp->q_transp == tp);
365 
366  /*
367  * adjust the actual number of blocks used
368  */
369  d = &dqp->q_core;
370 
371  /*
372  * The issue here is - sometimes we don't make a blkquota
373  * reservation intentionally to be fair to users
374  * (when the amount is small). On the other hand,
375  * delayed allocs do make reservations, but that's
376  * outside of a transaction, so we have no
377  * idea how much was really reserved.
378  * So, here we've accumulated delayed allocation blks and
379  * non-delay blks. The assumption is that the
380  * delayed ones are always reserved (outside of a
381  * transaction), and the others may or may not have
382  * quota reservations.
383  */
384  totalbdelta = qtrx->qt_bcount_delta +
385  qtrx->qt_delbcnt_delta;
386  totalrtbdelta = qtrx->qt_rtbcount_delta +
387  qtrx->qt_delrtb_delta;
388 #ifdef DEBUG
389  if (totalbdelta < 0)
391  -totalbdelta);
392 
393  if (totalrtbdelta < 0)
395  -totalrtbdelta);
396 
397  if (qtrx->qt_icount_delta < 0)
399  -qtrx->qt_icount_delta);
400 #endif
401  if (totalbdelta)
402  be64_add_cpu(&d->d_bcount, (xfs_qcnt_t)totalbdelta);
403 
404  if (qtrx->qt_icount_delta)
405  be64_add_cpu(&d->d_icount, (xfs_qcnt_t)qtrx->qt_icount_delta);
406 
407  if (totalrtbdelta)
408  be64_add_cpu(&d->d_rtbcount, (xfs_qcnt_t)totalrtbdelta);
409 
410  /*
411  * Get any default limits in use.
412  * Start/reset the timer(s) if needed.
413  */
414  if (d->d_id) {
415  xfs_qm_adjust_dqlimits(tp->t_mountp, d);
416  xfs_qm_adjust_dqtimers(tp->t_mountp, d);
417  }
418 
419  dqp->dq_flags |= XFS_DQ_DIRTY;
420  /*
421  * add this to the list of items to get logged
422  */
423  xfs_trans_log_dquot(tp, dqp);
424  /*
425  * Take off what's left of the original reservation.
426  * In case of delayed allocations, there's no
427  * reservation that a transaction structure knows of.
428  */
429  if (qtrx->qt_blk_res != 0) {
430  if (qtrx->qt_blk_res != qtrx->qt_blk_res_used) {
431  if (qtrx->qt_blk_res >
432  qtrx->qt_blk_res_used)
433  dqp->q_res_bcount -= (xfs_qcnt_t)
434  (qtrx->qt_blk_res -
435  qtrx->qt_blk_res_used);
436  else
437  dqp->q_res_bcount -= (xfs_qcnt_t)
438  (qtrx->qt_blk_res_used -
439  qtrx->qt_blk_res);
440  }
441  } else {
442  /*
443  * These blks were never reserved, either inside
444  * a transaction or outside one (in a delayed
445  * allocation). Also, this isn't always a
446  * negative number since we sometimes
447  * deliberately skip quota reservations.
448  */
449  if (qtrx->qt_bcount_delta) {
450  dqp->q_res_bcount +=
451  (xfs_qcnt_t)qtrx->qt_bcount_delta;
452  }
453  }
454  /*
455  * Adjust the RT reservation.
456  */
457  if (qtrx->qt_rtblk_res != 0) {
458  if (qtrx->qt_rtblk_res != qtrx->qt_rtblk_res_used) {
459  if (qtrx->qt_rtblk_res >
460  qtrx->qt_rtblk_res_used)
461  dqp->q_res_rtbcount -= (xfs_qcnt_t)
462  (qtrx->qt_rtblk_res -
463  qtrx->qt_rtblk_res_used);
464  else
465  dqp->q_res_rtbcount -= (xfs_qcnt_t)
466  (qtrx->qt_rtblk_res_used -
467  qtrx->qt_rtblk_res);
468  }
469  } else {
470  if (qtrx->qt_rtbcount_delta)
471  dqp->q_res_rtbcount +=
472  (xfs_qcnt_t)qtrx->qt_rtbcount_delta;
473  }
474 
475  /*
476  * Adjust the inode reservation.
477  */
478  if (qtrx->qt_ino_res != 0) {
479  ASSERT(qtrx->qt_ino_res >=
480  qtrx->qt_ino_res_used);
481  if (qtrx->qt_ino_res > qtrx->qt_ino_res_used)
482  dqp->q_res_icount -= (xfs_qcnt_t)
483  (qtrx->qt_ino_res -
484  qtrx->qt_ino_res_used);
485  } else {
486  if (qtrx->qt_icount_delta)
487  dqp->q_res_icount +=
488  (xfs_qcnt_t)qtrx->qt_icount_delta;
489  }
490 
491  ASSERT(dqp->q_res_bcount >=
492  be64_to_cpu(dqp->q_core.d_bcount));
493  ASSERT(dqp->q_res_icount >=
494  be64_to_cpu(dqp->q_core.d_icount));
495  ASSERT(dqp->q_res_rtbcount >=
497  }
498  /*
499  * Do the group quotas next
500  */
501  qa = tp->t_dqinfo->dqa_grpdquots;
502  }
503 }
504 
505 /*
506  * Release the reservations, and adjust the dquots accordingly.
507  * This is called only when the transaction is being aborted. If by
508  * any chance we have done dquot modifications incore (ie. deltas) already,
509  * we simply throw those away, since that's the expected behavior
510  * when a transaction is curtailed without a commit.
511  */
512 void
514  xfs_trans_t *tp)
515 {
516  int i, j;
517  xfs_dquot_t *dqp;
518  xfs_dqtrx_t *qtrx, *qa;
519  boolean_t locked;
520 
521  if (!tp->t_dqinfo || !(tp->t_flags & XFS_TRANS_DQ_DIRTY))
522  return;
523 
524  qa = tp->t_dqinfo->dqa_usrdquots;
525 
526  for (j = 0; j < 2; j++) {
527  for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) {
528  qtrx = &qa[i];
529  /*
530  * We assume that the array of dquots is filled
531  * sequentially, not sparsely.
532  */
533  if ((dqp = qtrx->qt_dquot) == NULL)
534  break;
535  /*
536  * Unreserve the original reservation. We don't care
537  * about the number of blocks used field, or deltas.
538  * Also we don't bother to zero the fields.
539  */
540  locked = B_FALSE;
541  if (qtrx->qt_blk_res) {
542  xfs_dqlock(dqp);
543  locked = B_TRUE;
544  dqp->q_res_bcount -=
545  (xfs_qcnt_t)qtrx->qt_blk_res;
546  }
547  if (qtrx->qt_ino_res) {
548  if (!locked) {
549  xfs_dqlock(dqp);
550  locked = B_TRUE;
551  }
552  dqp->q_res_icount -=
553  (xfs_qcnt_t)qtrx->qt_ino_res;
554  }
555 
556  if (qtrx->qt_rtblk_res) {
557  if (!locked) {
558  xfs_dqlock(dqp);
559  locked = B_TRUE;
560  }
561  dqp->q_res_rtbcount -=
562  (xfs_qcnt_t)qtrx->qt_rtblk_res;
563  }
564  if (locked)
565  xfs_dqunlock(dqp);
566 
567  }
568  qa = tp->t_dqinfo->dqa_grpdquots;
569  }
570 }
571 
572 STATIC void
574  struct xfs_mount *mp,
575  struct xfs_dquot *dqp,
576  int type)
577 {
578  /* no warnings for project quotas - we just return ENOSPC later */
579  if (dqp->dq_flags & XFS_DQ_PROJ)
580  return;
581  quota_send_warning(make_kqid(&init_user_ns,
582  (dqp->dq_flags & XFS_DQ_USER) ?
583  USRQUOTA : GRPQUOTA,
584  be32_to_cpu(dqp->q_core.d_id)),
585  mp->m_super->s_dev, type);
586 }
587 
588 /*
589  * This reserves disk blocks and inodes against a dquot.
590  * Flags indicate if the dquot is to be locked here and also
591  * if the blk reservation is for RT or regular blocks.
592  * Sending in XFS_QMOPT_FORCE_RES flag skips the quota check.
593  */
594 STATIC int
596  xfs_trans_t *tp,
597  xfs_mount_t *mp,
598  xfs_dquot_t *dqp,
599  long nblks,
600  long ninos,
601  uint flags)
602 {
603  xfs_qcnt_t hardlimit;
604  xfs_qcnt_t softlimit;
605  time_t timer;
606  xfs_qwarncnt_t warns;
607  xfs_qwarncnt_t warnlimit;
608  xfs_qcnt_t total_count;
609  xfs_qcnt_t *resbcountp;
610  xfs_quotainfo_t *q = mp->m_quotainfo;
611 
612 
613  xfs_dqlock(dqp);
614 
615  if (flags & XFS_TRANS_DQ_RES_BLKS) {
616  hardlimit = be64_to_cpu(dqp->q_core.d_blk_hardlimit);
617  if (!hardlimit)
618  hardlimit = q->qi_bhardlimit;
619  softlimit = be64_to_cpu(dqp->q_core.d_blk_softlimit);
620  if (!softlimit)
621  softlimit = q->qi_bsoftlimit;
622  timer = be32_to_cpu(dqp->q_core.d_btimer);
623  warns = be16_to_cpu(dqp->q_core.d_bwarns);
624  warnlimit = dqp->q_mount->m_quotainfo->qi_bwarnlimit;
625  resbcountp = &dqp->q_res_bcount;
626  } else {
628  hardlimit = be64_to_cpu(dqp->q_core.d_rtb_hardlimit);
629  if (!hardlimit)
630  hardlimit = q->qi_rtbhardlimit;
631  softlimit = be64_to_cpu(dqp->q_core.d_rtb_softlimit);
632  if (!softlimit)
633  softlimit = q->qi_rtbsoftlimit;
634  timer = be32_to_cpu(dqp->q_core.d_rtbtimer);
635  warns = be16_to_cpu(dqp->q_core.d_rtbwarns);
636  warnlimit = dqp->q_mount->m_quotainfo->qi_rtbwarnlimit;
637  resbcountp = &dqp->q_res_rtbcount;
638  }
639 
640  if ((flags & XFS_QMOPT_FORCE_RES) == 0 &&
641  dqp->q_core.d_id &&
642  ((XFS_IS_UQUOTA_ENFORCED(dqp->q_mount) && XFS_QM_ISUDQ(dqp)) ||
644  (XFS_QM_ISPDQ(dqp) || XFS_QM_ISGDQ(dqp))))) {
645  if (nblks > 0) {
646  /*
647  * dquot is locked already. See if we'd go over the
648  * hardlimit or exceed the timelimit if we allocate
649  * nblks.
650  */
651  total_count = *resbcountp + nblks;
652  if (hardlimit && total_count > hardlimit) {
654  goto error_return;
655  }
656  if (softlimit && total_count > softlimit) {
657  if ((timer != 0 && get_seconds() > timer) ||
658  (warns != 0 && warns >= warnlimit)) {
659  xfs_quota_warn(mp, dqp,
661  goto error_return;
662  }
663 
665  }
666  }
667  if (ninos > 0) {
668  total_count = be64_to_cpu(dqp->q_core.d_icount) + ninos;
669  timer = be32_to_cpu(dqp->q_core.d_itimer);
670  warns = be16_to_cpu(dqp->q_core.d_iwarns);
671  warnlimit = dqp->q_mount->m_quotainfo->qi_iwarnlimit;
672  hardlimit = be64_to_cpu(dqp->q_core.d_ino_hardlimit);
673  if (!hardlimit)
674  hardlimit = q->qi_ihardlimit;
675  softlimit = be64_to_cpu(dqp->q_core.d_ino_softlimit);
676  if (!softlimit)
677  softlimit = q->qi_isoftlimit;
678 
679  if (hardlimit && total_count > hardlimit) {
681  goto error_return;
682  }
683  if (softlimit && total_count > softlimit) {
684  if ((timer != 0 && get_seconds() > timer) ||
685  (warns != 0 && warns >= warnlimit)) {
686  xfs_quota_warn(mp, dqp,
688  goto error_return;
689  }
691  }
692  }
693  }
694 
695  /*
696  * Change the reservation, but not the actual usage.
697  * Note that q_res_bcount = q_core.d_bcount + resv
698  */
699  (*resbcountp) += (xfs_qcnt_t)nblks;
700  if (ninos != 0)
701  dqp->q_res_icount += (xfs_qcnt_t)ninos;
702 
703  /*
704  * note the reservation amt in the trans struct too,
705  * so that the transaction knows how much was reserved by
706  * it against this particular dquot.
707  * We don't do this when we are reserving for a delayed allocation,
708  * because we don't have the luxury of a transaction envelope then.
709  */
710  if (tp) {
711  ASSERT(tp->t_dqinfo);
712  ASSERT(flags & XFS_QMOPT_RESBLK_MASK);
713  if (nblks != 0)
714  xfs_trans_mod_dquot(tp, dqp,
715  flags & XFS_QMOPT_RESBLK_MASK,
716  nblks);
717  if (ninos != 0)
718  xfs_trans_mod_dquot(tp, dqp,
720  ninos);
721  }
725 
726  xfs_dqunlock(dqp);
727  return 0;
728 
729 error_return:
730  xfs_dqunlock(dqp);
731  if (flags & XFS_QMOPT_ENOSPC)
732  return ENOSPC;
733  return EDQUOT;
734 }
735 
736 
737 /*
738  * Given dquot(s), make disk block and/or inode reservations against them.
739  * The fact that this does the reservation against both the usr and
740  * grp/prj quotas is important, because this follows a both-or-nothing
741  * approach.
742  *
743  * flags = XFS_QMOPT_FORCE_RES evades limit enforcement. Used by chown.
744  * XFS_QMOPT_ENOSPC returns ENOSPC not EDQUOT. Used by pquota.
745  * XFS_TRANS_DQ_RES_BLKS reserves regular disk blocks
746  * XFS_TRANS_DQ_RES_RTBLKS reserves realtime disk blocks
747  * dquots are unlocked on return, if they were not locked by caller.
748  */
749 int
751  xfs_trans_t *tp,
752  xfs_mount_t *mp,
753  xfs_dquot_t *udqp,
754  xfs_dquot_t *gdqp,
755  long nblks,
756  long ninos,
757  uint flags)
758 {
759  int resvd = 0, error;
760 
761  if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
762  return 0;
763 
764  if (tp && tp->t_dqinfo == NULL)
766 
767  ASSERT(flags & XFS_QMOPT_RESBLK_MASK);
768 
769  if (udqp) {
770  error = xfs_trans_dqresv(tp, mp, udqp, nblks, ninos,
771  (flags & ~XFS_QMOPT_ENOSPC));
772  if (error)
773  return error;
774  resvd = 1;
775  }
776 
777  if (gdqp) {
778  error = xfs_trans_dqresv(tp, mp, gdqp, nblks, ninos, flags);
779  if (error) {
780  /*
781  * can't do it, so backout previous reservation
782  */
783  if (resvd) {
784  flags |= XFS_QMOPT_FORCE_RES;
785  xfs_trans_dqresv(tp, mp, udqp,
786  -nblks, -ninos, flags);
787  }
788  return error;
789  }
790  }
791 
792  /*
793  * Didn't change anything critical, so, no need to log
794  */
795  return 0;
796 }
797 
798 
799 /*
800  * Lock the dquot and change the reservation if we can.
801  * This doesn't change the actual usage, just the reservation.
802  * The inode sent in is locked.
803  */
804 int
806  struct xfs_trans *tp,
807  struct xfs_inode *ip,
808  long nblks,
809  long ninos,
810  uint flags)
811 {
812  struct xfs_mount *mp = ip->i_mount;
813 
814  if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
815  return 0;
816  if (XFS_IS_PQUOTA_ON(mp))
817  flags |= XFS_QMOPT_ENOSPC;
818 
819  ASSERT(ip->i_ino != mp->m_sb.sb_uquotino);
820  ASSERT(ip->i_ino != mp->m_sb.sb_gquotino);
821 
822  ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
825  (flags & ~(XFS_QMOPT_FORCE_RES | XFS_QMOPT_ENOSPC)) ==
827 
828  /*
829  * Reserve nblks against these dquots, with trans as the mediator.
830  */
831  return xfs_trans_reserve_quota_bydquots(tp, mp,
832  ip->i_udquot, ip->i_gdquot,
833  nblks, ninos, flags);
834 }
835 
836 /*
837  * This routine is called to allocate a quotaoff log item.
838  */
841  xfs_trans_t *tp,
842  xfs_qoff_logitem_t *startqoff,
843  uint flags)
844 {
846 
847  ASSERT(tp != NULL);
848 
849  q = xfs_qm_qoff_logitem_init(tp->t_mountp, startqoff, flags);
850  ASSERT(q != NULL);
851 
852  /*
853  * Get a log_item_desc to point at the new item.
854  */
855  xfs_trans_add_item(tp, &q->qql_item);
856  return q;
857 }
858 
859 
860 /*
861  * This is called to mark the quotaoff logitem as needing
862  * to be logged when the transaction is committed. The logitem must
863  * already be associated with the given transaction.
864  */
865 void
867  xfs_trans_t *tp,
868  xfs_qoff_logitem_t *qlp)
869 {
870  tp->t_flags |= XFS_TRANS_DIRTY;
871  qlp->qql_item.li_desc->lid_flags |= XFS_LID_DIRTY;
872 }
873 
874 STATIC void
876  xfs_trans_t *tp)
877 {
878  tp->t_dqinfo = kmem_zone_zalloc(xfs_qm_dqtrxzone, KM_SLEEP);
879 }
880 
881 void
883  xfs_trans_t *tp)
884 {
885  if (!tp->t_dqinfo)
886  return;
887  kmem_zone_free(xfs_qm_dqtrxzone, tp->t_dqinfo);
888  tp->t_dqinfo = NULL;
889 }