Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
xfs_alloc_btree.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2000-2001,2005 Silicon Graphics, Inc.
3  * All Rights Reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it would be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write the Free Software Foundation,
16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17  */
18 #include "xfs.h"
19 #include "xfs_fs.h"
20 #include "xfs_types.h"
21 #include "xfs_log.h"
22 #include "xfs_trans.h"
23 #include "xfs_sb.h"
24 #include "xfs_ag.h"
25 #include "xfs_mount.h"
26 #include "xfs_bmap_btree.h"
27 #include "xfs_alloc_btree.h"
28 #include "xfs_ialloc_btree.h"
29 #include "xfs_dinode.h"
30 #include "xfs_inode.h"
31 #include "xfs_btree.h"
32 #include "xfs_alloc.h"
33 #include "xfs_extent_busy.h"
34 #include "xfs_error.h"
35 #include "xfs_trace.h"
36 
37 
38 STATIC struct xfs_btree_cur *
40  struct xfs_btree_cur *cur)
41 {
42  return xfs_allocbt_init_cursor(cur->bc_mp, cur->bc_tp,
43  cur->bc_private.a.agbp, cur->bc_private.a.agno,
44  cur->bc_btnum);
45 }
46 
47 STATIC void
49  struct xfs_btree_cur *cur,
50  union xfs_btree_ptr *ptr,
51  int inc)
52 {
53  struct xfs_buf *agbp = cur->bc_private.a.agbp;
54  struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp);
56  int btnum = cur->bc_btnum;
57  struct xfs_perag *pag = xfs_perag_get(cur->bc_mp, seqno);
58 
59  ASSERT(ptr->s != 0);
60 
61  agf->agf_roots[btnum] = ptr->s;
62  be32_add_cpu(&agf->agf_levels[btnum], inc);
63  pag->pagf_levels[btnum] += inc;
64  xfs_perag_put(pag);
65 
67 }
68 
69 STATIC int
71  struct xfs_btree_cur *cur,
72  union xfs_btree_ptr *start,
73  union xfs_btree_ptr *new,
74  int length,
75  int *stat)
76 {
77  int error;
78  xfs_agblock_t bno;
79 
80  XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
81 
82  /* Allocate the new block from the freelist. If we can't, give up. */
83  error = xfs_alloc_get_freelist(cur->bc_tp, cur->bc_private.a.agbp,
84  &bno, 1);
85  if (error) {
86  XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
87  return error;
88  }
89 
90  if (bno == NULLAGBLOCK) {
91  XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
92  *stat = 0;
93  return 0;
94  }
95 
96  xfs_extent_busy_reuse(cur->bc_mp, cur->bc_private.a.agno, bno, 1, false);
97 
98  xfs_trans_agbtree_delta(cur->bc_tp, 1);
99  new->s = cpu_to_be32(bno);
100 
101  XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
102  *stat = 1;
103  return 0;
104 }
105 
106 STATIC int
108  struct xfs_btree_cur *cur,
109  struct xfs_buf *bp)
110 {
111  struct xfs_buf *agbp = cur->bc_private.a.agbp;
112  struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp);
113  xfs_agblock_t bno;
114  int error;
115 
116  bno = xfs_daddr_to_agbno(cur->bc_mp, XFS_BUF_ADDR(bp));
117  error = xfs_alloc_put_freelist(cur->bc_tp, agbp, NULL, bno, 1);
118  if (error)
119  return error;
120 
123  xfs_trans_agbtree_delta(cur->bc_tp, -1);
124 
125  xfs_trans_binval(cur->bc_tp, bp);
126  return 0;
127 }
128 
129 /*
130  * Update the longest extent in the AGF
131  */
132 STATIC void
134  struct xfs_btree_cur *cur,
135  struct xfs_btree_block *block,
136  union xfs_btree_rec *rec,
137  int ptr,
138  int reason)
139 {
140  struct xfs_agf *agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp);
142  struct xfs_perag *pag;
143  __be32 len;
144  int numrecs;
145 
146  ASSERT(cur->bc_btnum == XFS_BTNUM_CNT);
147 
148  switch (reason) {
149  case LASTREC_UPDATE:
150  /*
151  * If this is the last leaf block and it's the last record,
152  * then update the size of the longest extent in the AG.
153  */
154  if (ptr != xfs_btree_get_numrecs(block))
155  return;
156  len = rec->alloc.ar_blockcount;
157  break;
158  case LASTREC_INSREC:
159  if (be32_to_cpu(rec->alloc.ar_blockcount) <=
160  be32_to_cpu(agf->agf_longest))
161  return;
162  len = rec->alloc.ar_blockcount;
163  break;
164  case LASTREC_DELREC:
165  numrecs = xfs_btree_get_numrecs(block);
166  if (ptr <= numrecs)
167  return;
168  ASSERT(ptr == numrecs + 1);
169 
170  if (numrecs) {
171  xfs_alloc_rec_t *rrp;
172 
173  rrp = XFS_ALLOC_REC_ADDR(cur->bc_mp, block, numrecs);
174  len = rrp->ar_blockcount;
175  } else {
176  len = 0;
177  }
178 
179  break;
180  default:
181  ASSERT(0);
182  return;
183  }
184 
185  agf->agf_longest = len;
186  pag = xfs_perag_get(cur->bc_mp, seqno);
187  pag->pagf_longest = be32_to_cpu(len);
188  xfs_perag_put(pag);
190 }
191 
192 STATIC int
194  struct xfs_btree_cur *cur,
195  int level)
196 {
197  return cur->bc_mp->m_alloc_mnr[level != 0];
198 }
199 
200 STATIC int
202  struct xfs_btree_cur *cur,
203  int level)
204 {
205  return cur->bc_mp->m_alloc_mxr[level != 0];
206 }
207 
208 STATIC void
210  union xfs_btree_key *key,
211  union xfs_btree_rec *rec)
212 {
213  ASSERT(rec->alloc.ar_startblock != 0);
214 
217 }
218 
219 STATIC void
221  union xfs_btree_key *key,
222  union xfs_btree_rec *rec)
223 {
224  ASSERT(key->alloc.ar_startblock != 0);
225 
228 }
229 
230 STATIC void
232  struct xfs_btree_cur *cur,
233  union xfs_btree_rec *rec)
234 {
235  ASSERT(cur->bc_rec.a.ar_startblock != 0);
236 
239 }
240 
241 STATIC void
243  struct xfs_btree_cur *cur,
244  union xfs_btree_ptr *ptr)
245 {
246  struct xfs_agf *agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp);
247 
248  ASSERT(cur->bc_private.a.agno == be32_to_cpu(agf->agf_seqno));
249  ASSERT(agf->agf_roots[cur->bc_btnum] != 0);
250 
251  ptr->s = agf->agf_roots[cur->bc_btnum];
252 }
253 
254 STATIC __int64_t
256  struct xfs_btree_cur *cur,
257  union xfs_btree_key *key)
258 {
259  xfs_alloc_rec_incore_t *rec = &cur->bc_rec.a;
260  xfs_alloc_key_t *kp = &key->alloc;
261  __int64_t diff;
262 
263  if (cur->bc_btnum == XFS_BTNUM_BNO) {
264  return (__int64_t)be32_to_cpu(kp->ar_startblock) -
265  rec->ar_startblock;
266  }
267 
268  diff = (__int64_t)be32_to_cpu(kp->ar_blockcount) - rec->ar_blockcount;
269  if (diff)
270  return diff;
271 
272  return (__int64_t)be32_to_cpu(kp->ar_startblock) - rec->ar_startblock;
273 }
274 
275 #ifdef DEBUG
276 STATIC int
277 xfs_allocbt_keys_inorder(
278  struct xfs_btree_cur *cur,
279  union xfs_btree_key *k1,
280  union xfs_btree_key *k2)
281 {
282  if (cur->bc_btnum == XFS_BTNUM_BNO) {
283  return be32_to_cpu(k1->alloc.ar_startblock) <
285  } else {
286  return be32_to_cpu(k1->alloc.ar_blockcount) <
288  (k1->alloc.ar_blockcount == k2->alloc.ar_blockcount &&
291  }
292 }
293 
294 STATIC int
295 xfs_allocbt_recs_inorder(
296  struct xfs_btree_cur *cur,
297  union xfs_btree_rec *r1,
298  union xfs_btree_rec *r2)
299 {
300  if (cur->bc_btnum == XFS_BTNUM_BNO) {
301  return be32_to_cpu(r1->alloc.ar_startblock) +
304  } else {
305  return be32_to_cpu(r1->alloc.ar_blockcount) <
307  (r1->alloc.ar_blockcount == r2->alloc.ar_blockcount &&
310  }
311 }
312 #endif /* DEBUG */
313 
314 static const struct xfs_btree_ops xfs_allocbt_ops = {
315  .rec_len = sizeof(xfs_alloc_rec_t),
316  .key_len = sizeof(xfs_alloc_key_t),
317 
318  .dup_cursor = xfs_allocbt_dup_cursor,
319  .set_root = xfs_allocbt_set_root,
320  .alloc_block = xfs_allocbt_alloc_block,
321  .free_block = xfs_allocbt_free_block,
322  .update_lastrec = xfs_allocbt_update_lastrec,
323  .get_minrecs = xfs_allocbt_get_minrecs,
324  .get_maxrecs = xfs_allocbt_get_maxrecs,
325  .init_key_from_rec = xfs_allocbt_init_key_from_rec,
326  .init_rec_from_key = xfs_allocbt_init_rec_from_key,
327  .init_rec_from_cur = xfs_allocbt_init_rec_from_cur,
328  .init_ptr_from_cur = xfs_allocbt_init_ptr_from_cur,
329  .key_diff = xfs_allocbt_key_diff,
330 #ifdef DEBUG
331  .keys_inorder = xfs_allocbt_keys_inorder,
332  .recs_inorder = xfs_allocbt_recs_inorder,
333 #endif
334 };
335 
336 /*
337  * Allocate a new allocation btree cursor.
338  */
339 struct xfs_btree_cur * /* new alloc btree cursor */
341  struct xfs_mount *mp, /* file system mount point */
342  struct xfs_trans *tp, /* transaction pointer */
343  struct xfs_buf *agbp, /* buffer for agf structure */
344  xfs_agnumber_t agno, /* allocation group number */
345  xfs_btnum_t btnum) /* btree identifier */
346 {
347  struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp);
348  struct xfs_btree_cur *cur;
349 
350  ASSERT(btnum == XFS_BTNUM_BNO || btnum == XFS_BTNUM_CNT);
351 
353 
354  cur->bc_tp = tp;
355  cur->bc_mp = mp;
356  cur->bc_btnum = btnum;
357  cur->bc_blocklog = mp->m_sb.sb_blocklog;
358  cur->bc_ops = &xfs_allocbt_ops;
359 
360  if (btnum == XFS_BTNUM_CNT) {
363  } else {
365  }
366 
367  cur->bc_private.a.agbp = agbp;
368  cur->bc_private.a.agno = agno;
369 
370  return cur;
371 }
372 
373 /*
374  * Calculate number of records in an alloc btree block.
375  */
376 int
378  struct xfs_mount *mp,
379  int blocklen,
380  int leaf)
381 {
382  blocklen -= XFS_ALLOC_BLOCK_LEN(mp);
383 
384  if (leaf)
385  return blocklen / sizeof(xfs_alloc_rec_t);
386  return blocklen / (sizeof(xfs_alloc_key_t) + sizeof(xfs_alloc_ptr_t));
387 }