Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
localalloc.c
Go to the documentation of this file.
1 /* -*- mode: c; c-basic-offset: 8; -*-
2  * vim: noexpandtab sw=8 ts=8 sts=0:
3  *
4  * localalloc.c
5  *
6  * Node local data allocation
7  *
8  * Copyright (C) 2002, 2004 Oracle. All rights reserved.
9  *
10  * This program is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU General Public
12  * License as published by the Free Software Foundation; either
13  * version 2 of the License, or (at your option) any later version.
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18  * General Public License for more details.
19  *
20  * You should have received a copy of the GNU General Public
21  * License along with this program; if not, write to the
22  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
23  * Boston, MA 021110-1307, USA.
24  */
25 
26 #include <linux/fs.h>
27 #include <linux/types.h>
28 #include <linux/slab.h>
29 #include <linux/highmem.h>
30 #include <linux/bitops.h>
31 
32 #include <cluster/masklog.h>
33 
34 #include "ocfs2.h"
35 
36 #include "alloc.h"
37 #include "blockcheck.h"
38 #include "dlmglue.h"
39 #include "inode.h"
40 #include "journal.h"
41 #include "localalloc.h"
42 #include "suballoc.h"
43 #include "super.h"
44 #include "sysfile.h"
45 #include "ocfs2_trace.h"
46 
47 #include "buffer_head_io.h"
48 
49 #define OCFS2_LOCAL_ALLOC(dinode) (&((dinode)->id2.i_lab))
50 
51 static u32 ocfs2_local_alloc_count_bits(struct ocfs2_dinode *alloc);
52 
53 static int ocfs2_local_alloc_find_clear_bits(struct ocfs2_super *osb,
54  struct ocfs2_dinode *alloc,
55  u32 *numbits,
57 
58 static void ocfs2_clear_local_alloc(struct ocfs2_dinode *alloc);
59 
60 static int ocfs2_sync_local_to_main(struct ocfs2_super *osb,
61  handle_t *handle,
62  struct ocfs2_dinode *alloc,
63  struct inode *main_bm_inode,
64  struct buffer_head *main_bm_bh);
65 
66 static int ocfs2_local_alloc_reserve_for_window(struct ocfs2_super *osb,
67  struct ocfs2_alloc_context **ac,
68  struct inode **bitmap_inode,
69  struct buffer_head **bitmap_bh);
70 
71 static int ocfs2_local_alloc_new_window(struct ocfs2_super *osb,
72  handle_t *handle,
73  struct ocfs2_alloc_context *ac);
74 
75 static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
76  struct inode *local_alloc_inode);
77 
78 /*
79  * ocfs2_la_default_mb() - determine a default size, in megabytes of
80  * the local alloc.
81  *
82  * Generally, we'd like to pick as large a local alloc as
83  * possible. Performance on large workloads tends to scale
84  * proportionally to la size. In addition to that, the reservations
85  * code functions more efficiently as it can reserve more windows for
86  * write.
87  *
88  * Some things work against us when trying to choose a large local alloc:
89  *
90  * - We need to ensure our sizing is picked to leave enough space in
91  * group descriptors for other allocations (such as block groups,
92  * etc). Picking default sizes which are a multiple of 4 could help
93  * - block groups are allocated in 2mb and 4mb chunks.
94  *
95  * - Likewise, we don't want to starve other nodes of bits on small
96  * file systems. This can easily be taken care of by limiting our
97  * default to a reasonable size (256M) on larger cluster sizes.
98  *
99  * - Some file systems can't support very large sizes - 4k and 8k in
100  * particular are limited to less than 128 and 256 megabytes respectively.
101  *
102  * The following reference table shows group descriptor and local
103  * alloc maximums at various cluster sizes (4k blocksize)
104  *
105  * csize: 4K group: 126M la: 121M
106  * csize: 8K group: 252M la: 243M
107  * csize: 16K group: 504M la: 486M
108  * csize: 32K group: 1008M la: 972M
109  * csize: 64K group: 2016M la: 1944M
110  * csize: 128K group: 4032M la: 3888M
111  * csize: 256K group: 8064M la: 7776M
112  * csize: 512K group: 16128M la: 15552M
113  * csize: 1024K group: 32256M la: 31104M
114  */
115 #define OCFS2_LA_MAX_DEFAULT_MB 256
116 #define OCFS2_LA_OLD_DEFAULT 8
117 unsigned int ocfs2_la_default_mb(struct ocfs2_super *osb)
118 {
119  unsigned int la_mb;
120  unsigned int gd_mb;
121  unsigned int la_max_mb;
122  unsigned int megs_per_slot;
123  struct super_block *sb = osb->sb;
124 
125  gd_mb = ocfs2_clusters_to_megabytes(osb->sb,
126  8 * ocfs2_group_bitmap_size(sb, 0, osb->s_feature_incompat));
127 
128  /*
129  * This takes care of files systems with very small group
130  * descriptors - 512 byte blocksize at cluster sizes lower
131  * than 16K and also 1k blocksize with 4k cluster size.
132  */
133  if ((sb->s_blocksize == 512 && osb->s_clustersize <= 8192)
134  || (sb->s_blocksize == 1024 && osb->s_clustersize == 4096))
135  return OCFS2_LA_OLD_DEFAULT;
136 
137  /*
138  * Leave enough room for some block groups and make the final
139  * value we work from a multiple of 4.
140  */
141  gd_mb -= 16;
142  gd_mb &= 0xFFFFFFFB;
143 
144  la_mb = gd_mb;
145 
146  /*
147  * Keep window sizes down to a reasonable default
148  */
149  if (la_mb > OCFS2_LA_MAX_DEFAULT_MB) {
150  /*
151  * Some clustersize / blocksize combinations will have
152  * given us a larger than OCFS2_LA_MAX_DEFAULT_MB
153  * default size, but get poor distribution when
154  * limited to exactly 256 megabytes.
155  *
156  * As an example, 16K clustersize at 4K blocksize
157  * gives us a cluster group size of 504M. Paring the
158  * local alloc size down to 256 however, would give us
159  * only one window and around 200MB left in the
160  * cluster group. Instead, find the first size below
161  * 256 which would give us an even distribution.
162  *
163  * Larger cluster group sizes actually work out pretty
164  * well when pared to 256, so we don't have to do this
165  * for any group that fits more than two
166  * OCFS2_LA_MAX_DEFAULT_MB windows.
167  */
168  if (gd_mb > (2 * OCFS2_LA_MAX_DEFAULT_MB))
169  la_mb = 256;
170  else {
171  unsigned int gd_mult = gd_mb;
172 
173  while (gd_mult > 256)
174  gd_mult = gd_mult >> 1;
175 
176  la_mb = gd_mult;
177  }
178  }
179 
180  megs_per_slot = osb->osb_clusters_at_boot / osb->max_slots;
181  megs_per_slot = ocfs2_clusters_to_megabytes(osb->sb, megs_per_slot);
182  /* Too many nodes, too few disk clusters. */
183  if (megs_per_slot < la_mb)
184  la_mb = megs_per_slot;
185 
186  /* We can't store more bits than we can in a block. */
187  la_max_mb = ocfs2_clusters_to_megabytes(osb->sb,
188  ocfs2_local_alloc_size(sb) * 8);
189  if (la_mb > la_max_mb)
190  la_mb = la_max_mb;
191 
192  return la_mb;
193 }
194 
195 void ocfs2_la_set_sizes(struct ocfs2_super *osb, int requested_mb)
196 {
197  struct super_block *sb = osb->sb;
198  unsigned int la_default_mb = ocfs2_la_default_mb(osb);
199  unsigned int la_max_mb;
200 
201  la_max_mb = ocfs2_clusters_to_megabytes(sb,
202  ocfs2_local_alloc_size(sb) * 8);
203 
204  trace_ocfs2_la_set_sizes(requested_mb, la_max_mb, la_default_mb);
205 
206  if (requested_mb == -1) {
207  /* No user request - use defaults */
209  ocfs2_megabytes_to_clusters(sb, la_default_mb);
210  } else if (requested_mb > la_max_mb) {
211  /* Request is too big, we give the maximum available */
213  ocfs2_megabytes_to_clusters(sb, la_max_mb);
214  } else {
216  ocfs2_megabytes_to_clusters(sb, requested_mb);
217  }
218 
220 }
221 
222 static inline int ocfs2_la_state_enabled(struct ocfs2_super *osb)
223 {
224  return (osb->local_alloc_state == OCFS2_LA_THROTTLED ||
226 }
227 
229  unsigned int num_clusters)
230 {
231  spin_lock(&osb->osb_lock);
232  if (osb->local_alloc_state == OCFS2_LA_DISABLED ||
234  if (num_clusters >= osb->local_alloc_default_bits) {
237  }
238  spin_unlock(&osb->osb_lock);
239 }
240 
242 {
243  struct ocfs2_super *osb =
244  container_of(work, struct ocfs2_super,
245  la_enable_wq.work);
246  spin_lock(&osb->osb_lock);
248  spin_unlock(&osb->osb_lock);
249 }
250 
251 /*
252  * Tell us whether a given allocation should use the local alloc
253  * file. Otherwise, it has to go to the main bitmap.
254  *
255  * This function does semi-dirty reads of local alloc size and state!
256  * This is ok however, as the values are re-checked once under mutex.
257  */
259 {
260  int ret = 0;
261  int la_bits;
262 
263  spin_lock(&osb->osb_lock);
264  la_bits = osb->local_alloc_bits;
265 
266  if (!ocfs2_la_state_enabled(osb))
267  goto bail;
268 
269  /* la_bits should be at least twice the size (in clusters) of
270  * a new block group. We want to be sure block group
271  * allocations go through the local alloc, so allow an
272  * allocation to take up to half the bitmap. */
273  if (bits > (la_bits / 2))
274  goto bail;
275 
276  ret = 1;
277 bail:
278  trace_ocfs2_alloc_should_use_local(
279  (unsigned long long)bits, osb->local_alloc_state, la_bits, ret);
280  spin_unlock(&osb->osb_lock);
281  return ret;
282 }
283 
285 {
286  int status = 0;
287  struct ocfs2_dinode *alloc = NULL;
288  struct buffer_head *alloc_bh = NULL;
289  u32 num_used;
290  struct inode *inode = NULL;
291  struct ocfs2_local_alloc *la;
292 
293  if (osb->local_alloc_bits == 0)
294  goto bail;
295 
296  if (osb->local_alloc_bits >= osb->bitmap_cpg) {
297  mlog(ML_NOTICE, "Requested local alloc window %d is larger "
298  "than max possible %u. Using defaults.\n",
299  osb->local_alloc_bits, (osb->bitmap_cpg - 1));
300  osb->local_alloc_bits =
301  ocfs2_megabytes_to_clusters(osb->sb,
302  ocfs2_la_default_mb(osb));
303  }
304 
305  /* read the alloc off disk */
307  osb->slot_num);
308  if (!inode) {
309  status = -EINVAL;
310  mlog_errno(status);
311  goto bail;
312  }
313 
314  status = ocfs2_read_inode_block_full(inode, &alloc_bh,
316  if (status < 0) {
317  mlog_errno(status);
318  goto bail;
319  }
320 
321  alloc = (struct ocfs2_dinode *) alloc_bh->b_data;
322  la = OCFS2_LOCAL_ALLOC(alloc);
323 
324  if (!(le32_to_cpu(alloc->i_flags) &
326  mlog(ML_ERROR, "Invalid local alloc inode, %llu\n",
327  (unsigned long long)OCFS2_I(inode)->ip_blkno);
328  status = -EINVAL;
329  goto bail;
330  }
331 
332  if ((la->la_size == 0) ||
333  (le16_to_cpu(la->la_size) > ocfs2_local_alloc_size(inode->i_sb))) {
334  mlog(ML_ERROR, "Local alloc size is invalid (la_size = %u)\n",
335  le16_to_cpu(la->la_size));
336  status = -EINVAL;
337  goto bail;
338  }
339 
340  /* do a little verification. */
341  num_used = ocfs2_local_alloc_count_bits(alloc);
342 
343  /* hopefully the local alloc has always been recovered before
344  * we load it. */
345  if (num_used
346  || alloc->id1.bitmap1.i_used
347  || alloc->id1.bitmap1.i_total
348  || la->la_bm_off)
349  mlog(ML_ERROR, "Local alloc hasn't been recovered!\n"
350  "found = %u, set = %u, taken = %u, off = %u\n",
351  num_used, le32_to_cpu(alloc->id1.bitmap1.i_used),
352  le32_to_cpu(alloc->id1.bitmap1.i_total),
353  OCFS2_LOCAL_ALLOC(alloc)->la_bm_off);
354 
355  osb->local_alloc_bh = alloc_bh;
357 
358 bail:
359  if (status < 0)
360  brelse(alloc_bh);
361  if (inode)
362  iput(inode);
363 
364  trace_ocfs2_load_local_alloc(osb->local_alloc_bits);
365 
366  if (status)
367  mlog_errno(status);
368  return status;
369 }
370 
371 /*
372  * return any unused bits to the bitmap and write out a clean
373  * local_alloc.
374  *
375  * local_alloc_bh is optional. If not passed, we will simply use the
376  * one off osb. If you do pass it however, be warned that it *will* be
377  * returned brelse'd and NULL'd out.*/
379 {
380  int status;
381  handle_t *handle;
382  struct inode *local_alloc_inode = NULL;
383  struct buffer_head *bh = NULL;
384  struct buffer_head *main_bm_bh = NULL;
385  struct inode *main_bm_inode = NULL;
386  struct ocfs2_dinode *alloc_copy = NULL;
387  struct ocfs2_dinode *alloc = NULL;
388 
391 
393  goto out;
394 
395  local_alloc_inode =
398  osb->slot_num);
399  if (!local_alloc_inode) {
400  status = -ENOENT;
401  mlog_errno(status);
402  goto out;
403  }
404 
406 
408 
409  main_bm_inode = ocfs2_get_system_file_inode(osb,
412  if (!main_bm_inode) {
413  status = -EINVAL;
414  mlog_errno(status);
415  goto out;
416  }
417 
418  mutex_lock(&main_bm_inode->i_mutex);
419 
420  status = ocfs2_inode_lock(main_bm_inode, &main_bm_bh, 1);
421  if (status < 0) {
422  mlog_errno(status);
423  goto out_mutex;
424  }
425 
426  /* WINDOW_MOVE_CREDITS is a bit heavy... */
428  if (IS_ERR(handle)) {
429  mlog_errno(PTR_ERR(handle));
430  handle = NULL;
431  goto out_unlock;
432  }
433 
434  bh = osb->local_alloc_bh;
435  alloc = (struct ocfs2_dinode *) bh->b_data;
436 
437  alloc_copy = kmalloc(bh->b_size, GFP_NOFS);
438  if (!alloc_copy) {
439  status = -ENOMEM;
440  goto out_commit;
441  }
442  memcpy(alloc_copy, alloc, bh->b_size);
443 
444  status = ocfs2_journal_access_di(handle, INODE_CACHE(local_alloc_inode),
446  if (status < 0) {
447  mlog_errno(status);
448  goto out_commit;
449  }
450 
451  ocfs2_clear_local_alloc(alloc);
452  ocfs2_journal_dirty(handle, bh);
453 
454  brelse(bh);
455  osb->local_alloc_bh = NULL;
457 
458  status = ocfs2_sync_local_to_main(osb, handle, alloc_copy,
459  main_bm_inode, main_bm_bh);
460  if (status < 0)
461  mlog_errno(status);
462 
463 out_commit:
464  ocfs2_commit_trans(osb, handle);
465 
466 out_unlock:
467  brelse(main_bm_bh);
468 
469  ocfs2_inode_unlock(main_bm_inode, 1);
470 
471 out_mutex:
472  mutex_unlock(&main_bm_inode->i_mutex);
473  iput(main_bm_inode);
474 
475 out:
476  if (local_alloc_inode)
477  iput(local_alloc_inode);
478 
479  if (alloc_copy)
480  kfree(alloc_copy);
481 }
482 
483 /*
484  * We want to free the bitmap bits outside of any recovery context as
485  * we'll need a cluster lock to do so, but we must clear the local
486  * alloc before giving up the recovered nodes journal. To solve this,
487  * we kmalloc a copy of the local alloc before it's change for the
488  * caller to process with ocfs2_complete_local_alloc_recovery
489  */
491  int slot_num,
492  struct ocfs2_dinode **alloc_copy)
493 {
494  int status = 0;
495  struct buffer_head *alloc_bh = NULL;
496  struct inode *inode = NULL;
497  struct ocfs2_dinode *alloc;
498 
499  trace_ocfs2_begin_local_alloc_recovery(slot_num);
500 
501  *alloc_copy = NULL;
502 
503  inode = ocfs2_get_system_file_inode(osb,
505  slot_num);
506  if (!inode) {
507  status = -EINVAL;
508  mlog_errno(status);
509  goto bail;
510  }
511 
512  mutex_lock(&inode->i_mutex);
513 
514  status = ocfs2_read_inode_block_full(inode, &alloc_bh,
516  if (status < 0) {
517  mlog_errno(status);
518  goto bail;
519  }
520 
521  *alloc_copy = kmalloc(alloc_bh->b_size, GFP_KERNEL);
522  if (!(*alloc_copy)) {
523  status = -ENOMEM;
524  goto bail;
525  }
526  memcpy((*alloc_copy), alloc_bh->b_data, alloc_bh->b_size);
527 
528  alloc = (struct ocfs2_dinode *) alloc_bh->b_data;
529  ocfs2_clear_local_alloc(alloc);
530 
531  ocfs2_compute_meta_ecc(osb->sb, alloc_bh->b_data, &alloc->i_check);
532  status = ocfs2_write_block(osb, alloc_bh, INODE_CACHE(inode));
533  if (status < 0)
534  mlog_errno(status);
535 
536 bail:
537  if ((status < 0) && (*alloc_copy)) {
538  kfree(*alloc_copy);
539  *alloc_copy = NULL;
540  }
541 
542  brelse(alloc_bh);
543 
544  if (inode) {
545  mutex_unlock(&inode->i_mutex);
546  iput(inode);
547  }
548 
549  if (status)
550  mlog_errno(status);
551  return status;
552 }
553 
554 /*
555  * Step 2: By now, we've completed the journal recovery, we've stamped
556  * a clean local alloc on disk and dropped the node out of the
557  * recovery map. Dlm locks will no longer stall, so lets clear out the
558  * main bitmap.
559  */
561  struct ocfs2_dinode *alloc)
562 {
563  int status;
564  handle_t *handle;
565  struct buffer_head *main_bm_bh = NULL;
566  struct inode *main_bm_inode;
567 
568  main_bm_inode = ocfs2_get_system_file_inode(osb,
571  if (!main_bm_inode) {
572  status = -EINVAL;
573  mlog_errno(status);
574  goto out;
575  }
576 
577  mutex_lock(&main_bm_inode->i_mutex);
578 
579  status = ocfs2_inode_lock(main_bm_inode, &main_bm_bh, 1);
580  if (status < 0) {
581  mlog_errno(status);
582  goto out_mutex;
583  }
584 
586  if (IS_ERR(handle)) {
587  status = PTR_ERR(handle);
588  handle = NULL;
589  mlog_errno(status);
590  goto out_unlock;
591  }
592 
593  /* we want the bitmap change to be recorded on disk asap */
594  handle->h_sync = 1;
595 
596  status = ocfs2_sync_local_to_main(osb, handle, alloc,
597  main_bm_inode, main_bm_bh);
598  if (status < 0)
599  mlog_errno(status);
600 
601  ocfs2_commit_trans(osb, handle);
602 
603 out_unlock:
604  ocfs2_inode_unlock(main_bm_inode, 1);
605 
606 out_mutex:
607  mutex_unlock(&main_bm_inode->i_mutex);
608 
609  brelse(main_bm_bh);
610 
611  iput(main_bm_inode);
612 
613 out:
614  if (!status)
616  if (status)
617  mlog_errno(status);
618  return status;
619 }
620 
621 /*
622  * make sure we've got at least bits_wanted contiguous bits in the
623  * local alloc. You lose them when you drop i_mutex.
624  *
625  * We will add ourselves to the transaction passed in, but may start
626  * our own in order to shift windows.
627  */
629  u32 bits_wanted,
630  struct ocfs2_alloc_context *ac)
631 {
632  int status;
633  struct ocfs2_dinode *alloc;
634  struct inode *local_alloc_inode;
635  unsigned int free_bits;
636 
637  BUG_ON(!ac);
638 
639  local_alloc_inode =
642  osb->slot_num);
643  if (!local_alloc_inode) {
644  status = -ENOENT;
645  mlog_errno(status);
646  goto bail;
647  }
648 
649  mutex_lock(&local_alloc_inode->i_mutex);
650 
651  /*
652  * We must double check state and allocator bits because
653  * another process may have changed them while holding i_mutex.
654  */
655  spin_lock(&osb->osb_lock);
656  if (!ocfs2_la_state_enabled(osb) ||
657  (bits_wanted > osb->local_alloc_bits)) {
658  spin_unlock(&osb->osb_lock);
659  status = -ENOSPC;
660  goto bail;
661  }
662  spin_unlock(&osb->osb_lock);
663 
664  alloc = (struct ocfs2_dinode *) osb->local_alloc_bh->b_data;
665 
666 #ifdef CONFIG_OCFS2_DEBUG_FS
667  if (le32_to_cpu(alloc->id1.bitmap1.i_used) !=
668  ocfs2_local_alloc_count_bits(alloc)) {
669  ocfs2_error(osb->sb, "local alloc inode %llu says it has "
670  "%u free bits, but a count shows %u",
671  (unsigned long long)le64_to_cpu(alloc->i_blkno),
672  le32_to_cpu(alloc->id1.bitmap1.i_used),
673  ocfs2_local_alloc_count_bits(alloc));
674  status = -EIO;
675  goto bail;
676  }
677 #endif
678 
679  free_bits = le32_to_cpu(alloc->id1.bitmap1.i_total) -
680  le32_to_cpu(alloc->id1.bitmap1.i_used);
681  if (bits_wanted > free_bits) {
682  /* uhoh, window change time. */
683  status =
684  ocfs2_local_alloc_slide_window(osb, local_alloc_inode);
685  if (status < 0) {
686  if (status != -ENOSPC)
687  mlog_errno(status);
688  goto bail;
689  }
690 
691  /*
692  * Under certain conditions, the window slide code
693  * might have reduced the number of bits available or
694  * disabled the the local alloc entirely. Re-check
695  * here and return -ENOSPC if necessary.
696  */
697  status = -ENOSPC;
698  if (!ocfs2_la_state_enabled(osb))
699  goto bail;
700 
701  free_bits = le32_to_cpu(alloc->id1.bitmap1.i_total) -
702  le32_to_cpu(alloc->id1.bitmap1.i_used);
703  if (bits_wanted > free_bits)
704  goto bail;
705  }
706 
707  ac->ac_inode = local_alloc_inode;
708  /* We should never use localalloc from another slot */
709  ac->ac_alloc_slot = osb->slot_num;
711  get_bh(osb->local_alloc_bh);
712  ac->ac_bh = osb->local_alloc_bh;
713  status = 0;
714 bail:
715  if (status < 0 && local_alloc_inode) {
716  mutex_unlock(&local_alloc_inode->i_mutex);
717  iput(local_alloc_inode);
718  }
719 
720  trace_ocfs2_reserve_local_alloc_bits(
721  (unsigned long long)ac->ac_max_block,
722  bits_wanted, osb->slot_num, status);
723 
724  if (status)
725  mlog_errno(status);
726  return status;
727 }
728 
730  handle_t *handle,
731  struct ocfs2_alloc_context *ac,
732  u32 bits_wanted,
733  u32 *bit_off,
734  u32 *num_bits)
735 {
736  int status, start;
737  struct inode *local_alloc_inode;
738  void *bitmap;
739  struct ocfs2_dinode *alloc;
740  struct ocfs2_local_alloc *la;
741 
743 
744  local_alloc_inode = ac->ac_inode;
745  alloc = (struct ocfs2_dinode *) osb->local_alloc_bh->b_data;
746  la = OCFS2_LOCAL_ALLOC(alloc);
747 
748  start = ocfs2_local_alloc_find_clear_bits(osb, alloc, &bits_wanted,
749  ac->ac_resv);
750  if (start == -1) {
751  /* TODO: Shouldn't we just BUG here? */
752  status = -ENOSPC;
753  mlog_errno(status);
754  goto bail;
755  }
756 
757  bitmap = la->la_bitmap;
758  *bit_off = le32_to_cpu(la->la_bm_off) + start;
759  *num_bits = bits_wanted;
760 
761  status = ocfs2_journal_access_di(handle,
762  INODE_CACHE(local_alloc_inode),
763  osb->local_alloc_bh,
765  if (status < 0) {
766  mlog_errno(status);
767  goto bail;
768  }
769 
771  bits_wanted);
772 
773  while(bits_wanted--)
774  ocfs2_set_bit(start++, bitmap);
775 
776  le32_add_cpu(&alloc->id1.bitmap1.i_used, *num_bits);
777  ocfs2_journal_dirty(handle, osb->local_alloc_bh);
778 
779 bail:
780  if (status)
781  mlog_errno(status);
782  return status;
783 }
784 
785 static u32 ocfs2_local_alloc_count_bits(struct ocfs2_dinode *alloc)
786 {
787  u32 count;
788  struct ocfs2_local_alloc *la = OCFS2_LOCAL_ALLOC(alloc);
789 
790  count = memweight(la->la_bitmap, le16_to_cpu(la->la_size));
791 
792  trace_ocfs2_local_alloc_count_bits(count);
793  return count;
794 }
795 
796 static int ocfs2_local_alloc_find_clear_bits(struct ocfs2_super *osb,
797  struct ocfs2_dinode *alloc,
798  u32 *numbits,
800 {
801  int numfound, bitoff, left, startoff, lastzero;
802  int local_resv = 0;
803  struct ocfs2_alloc_reservation r;
804  void *bitmap = NULL;
805  struct ocfs2_reservation_map *resmap = &osb->osb_la_resmap;
806 
807  if (!alloc->id1.bitmap1.i_total) {
808  bitoff = -1;
809  goto bail;
810  }
811 
812  if (!resv) {
813  local_resv = 1;
816  resv = &r;
817  }
818 
819  numfound = *numbits;
820  if (ocfs2_resmap_resv_bits(resmap, resv, &bitoff, &numfound) == 0) {
821  if (numfound < *numbits)
822  *numbits = numfound;
823  goto bail;
824  }
825 
826  /*
827  * Code error. While reservations are enabled, local
828  * allocation should _always_ go through them.
829  */
830  BUG_ON(osb->osb_resv_level != 0);
831 
832  /*
833  * Reservations are disabled. Handle this the old way.
834  */
835 
836  bitmap = OCFS2_LOCAL_ALLOC(alloc)->la_bitmap;
837 
838  numfound = bitoff = startoff = 0;
839  lastzero = -1;
840  left = le32_to_cpu(alloc->id1.bitmap1.i_total);
841  while ((bitoff = ocfs2_find_next_zero_bit(bitmap, left, startoff)) != -1) {
842  if (bitoff == left) {
843  /* mlog(0, "bitoff (%d) == left", bitoff); */
844  break;
845  }
846  /* mlog(0, "Found a zero: bitoff = %d, startoff = %d, "
847  "numfound = %d\n", bitoff, startoff, numfound);*/
848 
849  /* Ok, we found a zero bit... is it contig. or do we
850  * start over?*/
851  if (bitoff == startoff) {
852  /* we found a zero */
853  numfound++;
854  startoff++;
855  } else {
856  /* got a zero after some ones */
857  numfound = 1;
858  startoff = bitoff+1;
859  }
860  /* we got everything we needed */
861  if (numfound == *numbits) {
862  /* mlog(0, "Found it all!\n"); */
863  break;
864  }
865  }
866 
867  trace_ocfs2_local_alloc_find_clear_bits_search_bitmap(bitoff, numfound);
868 
869  if (numfound == *numbits)
870  bitoff = startoff - numfound;
871  else
872  bitoff = -1;
873 
874 bail:
875  if (local_resv)
876  ocfs2_resv_discard(resmap, resv);
877 
878  trace_ocfs2_local_alloc_find_clear_bits(*numbits,
879  le32_to_cpu(alloc->id1.bitmap1.i_total),
880  bitoff, numfound);
881 
882  return bitoff;
883 }
884 
885 static void ocfs2_clear_local_alloc(struct ocfs2_dinode *alloc)
886 {
887  struct ocfs2_local_alloc *la = OCFS2_LOCAL_ALLOC(alloc);
888  int i;
889 
890  alloc->id1.bitmap1.i_total = 0;
891  alloc->id1.bitmap1.i_used = 0;
892  la->la_bm_off = 0;
893  for(i = 0; i < le16_to_cpu(la->la_size); i++)
894  la->la_bitmap[i] = 0;
895 }
896 
897 #if 0
898 /* turn this on and uncomment below to aid debugging window shifts. */
899 static void ocfs2_verify_zero_bits(unsigned long *bitmap,
900  unsigned int start,
901  unsigned int count)
902 {
903  unsigned int tmp = count;
904  while(tmp--) {
905  if (ocfs2_test_bit(start + tmp, bitmap)) {
906  printk("ocfs2_verify_zero_bits: start = %u, count = "
907  "%u\n", start, count);
908  printk("ocfs2_verify_zero_bits: bit %u is set!",
909  start + tmp);
910  BUG();
911  }
912  }
913 }
914 #endif
915 
916 /*
917  * sync the local alloc to main bitmap.
918  *
919  * assumes you've already locked the main bitmap -- the bitmap inode
920  * passed is used for caching.
921  */
922 static int ocfs2_sync_local_to_main(struct ocfs2_super *osb,
923  handle_t *handle,
924  struct ocfs2_dinode *alloc,
925  struct inode *main_bm_inode,
926  struct buffer_head *main_bm_bh)
927 {
928  int status = 0;
929  int bit_off, left, count, start;
930  u64 la_start_blk;
931  u64 blkno;
932  void *bitmap;
933  struct ocfs2_local_alloc *la = OCFS2_LOCAL_ALLOC(alloc);
934 
935  trace_ocfs2_sync_local_to_main(
936  le32_to_cpu(alloc->id1.bitmap1.i_total),
937  le32_to_cpu(alloc->id1.bitmap1.i_used));
938 
939  if (!alloc->id1.bitmap1.i_total) {
940  goto bail;
941  }
942 
943  if (le32_to_cpu(alloc->id1.bitmap1.i_used) ==
944  le32_to_cpu(alloc->id1.bitmap1.i_total)) {
945  goto bail;
946  }
947 
948  la_start_blk = ocfs2_clusters_to_blocks(osb->sb,
949  le32_to_cpu(la->la_bm_off));
950  bitmap = la->la_bitmap;
951  start = count = bit_off = 0;
952  left = le32_to_cpu(alloc->id1.bitmap1.i_total);
953 
954  while ((bit_off = ocfs2_find_next_zero_bit(bitmap, left, start))
955  != -1) {
956  if ((bit_off < left) && (bit_off == start)) {
957  count++;
958  start++;
959  continue;
960  }
961  if (count) {
962  blkno = la_start_blk +
963  ocfs2_clusters_to_blocks(osb->sb,
964  start - count);
965 
966  trace_ocfs2_sync_local_to_main_free(
967  count, start - count,
968  (unsigned long long)la_start_blk,
969  (unsigned long long)blkno);
970 
971  status = ocfs2_release_clusters(handle,
972  main_bm_inode,
973  main_bm_bh, blkno,
974  count);
975  if (status < 0) {
976  mlog_errno(status);
977  goto bail;
978  }
979  }
980  if (bit_off >= left)
981  break;
982  count = 1;
983  start = bit_off + 1;
984  }
985 
986 bail:
987  if (status)
988  mlog_errno(status);
989  return status;
990 }
991 
993  OCFS2_LA_EVENT_SLIDE, /* Normal window slide. */
994  OCFS2_LA_EVENT_FRAGMENTED, /* The global bitmap has
995  * enough bits theoretically
996  * free, but a contiguous
997  * allocation could not be
998  * found. */
999  OCFS2_LA_EVENT_ENOSPC, /* Global bitmap doesn't have
1000  * enough bits free to satisfy
1001  * our request. */
1002 };
1003 #define OCFS2_LA_ENABLE_INTERVAL (30 * HZ)
1004 /*
1005  * Given an event, calculate the size of our next local alloc window.
1006  *
1007  * This should always be called under i_mutex of the local alloc inode
1008  * so that local alloc disabling doesn't race with processes trying to
1009  * use the allocator.
1010  *
1011  * Returns the state which the local alloc was left in. This value can
1012  * be ignored by some paths.
1013  */
1014 static int ocfs2_recalc_la_window(struct ocfs2_super *osb,
1015  enum ocfs2_la_event event)
1016 {
1017  unsigned int bits;
1018  int state;
1019 
1020  spin_lock(&osb->osb_lock);
1021  if (osb->local_alloc_state == OCFS2_LA_DISABLED) {
1023  goto out_unlock;
1024  }
1025 
1026  /*
1027  * ENOSPC and fragmentation are treated similarly for now.
1028  */
1029  if (event == OCFS2_LA_EVENT_ENOSPC ||
1030  event == OCFS2_LA_EVENT_FRAGMENTED) {
1031  /*
1032  * We ran out of contiguous space in the primary
1033  * bitmap. Drastically reduce the number of bits used
1034  * by local alloc until we have to disable it.
1035  */
1036  bits = osb->local_alloc_bits >> 1;
1037  if (bits > ocfs2_megabytes_to_clusters(osb->sb, 1)) {
1038  /*
1039  * By setting state to THROTTLED, we'll keep
1040  * the number of local alloc bits used down
1041  * until an event occurs which would give us
1042  * reason to assume the bitmap situation might
1043  * have changed.
1044  */
1046  osb->local_alloc_bits = bits;
1047  } else {
1049  }
1052  goto out_unlock;
1053  }
1054 
1055  /*
1056  * Don't increase the size of the local alloc window until we
1057  * know we might be able to fulfill the request. Otherwise, we
1058  * risk bouncing around the global bitmap during periods of
1059  * low space.
1060  */
1063 
1064 out_unlock:
1065  state = osb->local_alloc_state;
1066  spin_unlock(&osb->osb_lock);
1067 
1068  return state;
1069 }
1070 
1071 static int ocfs2_local_alloc_reserve_for_window(struct ocfs2_super *osb,
1072  struct ocfs2_alloc_context **ac,
1073  struct inode **bitmap_inode,
1074  struct buffer_head **bitmap_bh)
1075 {
1076  int status;
1077 
1078  *ac = kzalloc(sizeof(struct ocfs2_alloc_context), GFP_KERNEL);
1079  if (!(*ac)) {
1080  status = -ENOMEM;
1081  mlog_errno(status);
1082  goto bail;
1083  }
1084 
1085 retry_enospc:
1086  (*ac)->ac_bits_wanted = osb->local_alloc_default_bits;
1087  status = ocfs2_reserve_cluster_bitmap_bits(osb, *ac);
1088  if (status == -ENOSPC) {
1089  if (ocfs2_recalc_la_window(osb, OCFS2_LA_EVENT_ENOSPC) ==
1091  goto bail;
1092 
1094  memset(*ac, 0, sizeof(struct ocfs2_alloc_context));
1095  goto retry_enospc;
1096  }
1097  if (status < 0) {
1098  mlog_errno(status);
1099  goto bail;
1100  }
1101 
1102  *bitmap_inode = (*ac)->ac_inode;
1103  igrab(*bitmap_inode);
1104  *bitmap_bh = (*ac)->ac_bh;
1105  get_bh(*bitmap_bh);
1106  status = 0;
1107 bail:
1108  if ((status < 0) && *ac) {
1110  *ac = NULL;
1111  }
1112 
1113  if (status)
1114  mlog_errno(status);
1115  return status;
1116 }
1117 
1118 /*
1119  * pass it the bitmap lock in lock_bh if you have it.
1120  */
1121 static int ocfs2_local_alloc_new_window(struct ocfs2_super *osb,
1122  handle_t *handle,
1123  struct ocfs2_alloc_context *ac)
1124 {
1125  int status = 0;
1126  u32 cluster_off, cluster_count;
1127  struct ocfs2_dinode *alloc = NULL;
1128  struct ocfs2_local_alloc *la;
1129 
1130  alloc = (struct ocfs2_dinode *) osb->local_alloc_bh->b_data;
1131  la = OCFS2_LOCAL_ALLOC(alloc);
1132 
1133  trace_ocfs2_local_alloc_new_window(
1134  le32_to_cpu(alloc->id1.bitmap1.i_total),
1135  osb->local_alloc_bits);
1136 
1137  /* Instruct the allocation code to try the most recently used
1138  * cluster group. We'll re-record the group used this pass
1139  * below. */
1140  ac->ac_last_group = osb->la_last_gd;
1141 
1142  /* we used the generic suballoc reserve function, but we set
1143  * everything up nicely, so there's no reason why we can't use
1144  * the more specific cluster api to claim bits. */
1145  status = ocfs2_claim_clusters(handle, ac, osb->local_alloc_bits,
1146  &cluster_off, &cluster_count);
1147  if (status == -ENOSPC) {
1148 retry_enospc:
1149  /*
1150  * Note: We could also try syncing the journal here to
1151  * allow use of any free bits which the current
1152  * transaction can't give us access to. --Mark
1153  */
1154  if (ocfs2_recalc_la_window(osb, OCFS2_LA_EVENT_FRAGMENTED) ==
1156  goto bail;
1157 
1159  status = ocfs2_claim_clusters(handle, ac,
1160  osb->local_alloc_bits,
1161  &cluster_off,
1162  &cluster_count);
1163  if (status == -ENOSPC)
1164  goto retry_enospc;
1165  /*
1166  * We only shrunk the *minimum* number of in our
1167  * request - it's entirely possible that the allocator
1168  * might give us more than we asked for.
1169  */
1170  if (status == 0) {
1171  spin_lock(&osb->osb_lock);
1172  osb->local_alloc_bits = cluster_count;
1173  spin_unlock(&osb->osb_lock);
1174  }
1175  }
1176  if (status < 0) {
1177  if (status != -ENOSPC)
1178  mlog_errno(status);
1179  goto bail;
1180  }
1181 
1182  osb->la_last_gd = ac->ac_last_group;
1183 
1184  la->la_bm_off = cpu_to_le32(cluster_off);
1185  alloc->id1.bitmap1.i_total = cpu_to_le32(cluster_count);
1186  /* just in case... In the future when we find space ourselves,
1187  * we don't have to get all contiguous -- but we'll have to
1188  * set all previously used bits in bitmap and update
1189  * la_bits_set before setting the bits in the main bitmap. */
1190  alloc->id1.bitmap1.i_used = 0;
1191  memset(OCFS2_LOCAL_ALLOC(alloc)->la_bitmap, 0,
1192  le16_to_cpu(la->la_size));
1193 
1194  ocfs2_resmap_restart(&osb->osb_la_resmap, cluster_count,
1195  OCFS2_LOCAL_ALLOC(alloc)->la_bitmap);
1196 
1197  trace_ocfs2_local_alloc_new_window_result(
1198  OCFS2_LOCAL_ALLOC(alloc)->la_bm_off,
1199  le32_to_cpu(alloc->id1.bitmap1.i_total));
1200 
1201 bail:
1202  if (status)
1203  mlog_errno(status);
1204  return status;
1205 }
1206 
1207 /* Note that we do *NOT* lock the local alloc inode here as
1208  * it's been locked already for us. */
1209 static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
1210  struct inode *local_alloc_inode)
1211 {
1212  int status = 0;
1213  struct buffer_head *main_bm_bh = NULL;
1214  struct inode *main_bm_inode = NULL;
1215  handle_t *handle = NULL;
1216  struct ocfs2_dinode *alloc;
1217  struct ocfs2_dinode *alloc_copy = NULL;
1218  struct ocfs2_alloc_context *ac = NULL;
1219 
1220  ocfs2_recalc_la_window(osb, OCFS2_LA_EVENT_SLIDE);
1221 
1222  /* This will lock the main bitmap for us. */
1223  status = ocfs2_local_alloc_reserve_for_window(osb,
1224  &ac,
1225  &main_bm_inode,
1226  &main_bm_bh);
1227  if (status < 0) {
1228  if (status != -ENOSPC)
1229  mlog_errno(status);
1230  goto bail;
1231  }
1232 
1234  if (IS_ERR(handle)) {
1235  status = PTR_ERR(handle);
1236  handle = NULL;
1237  mlog_errno(status);
1238  goto bail;
1239  }
1240 
1241  alloc = (struct ocfs2_dinode *) osb->local_alloc_bh->b_data;
1242 
1243  /* We want to clear the local alloc before doing anything
1244  * else, so that if we error later during this operation,
1245  * local alloc shutdown won't try to double free main bitmap
1246  * bits. Make a copy so the sync function knows which bits to
1247  * free. */
1248  alloc_copy = kmalloc(osb->local_alloc_bh->b_size, GFP_NOFS);
1249  if (!alloc_copy) {
1250  status = -ENOMEM;
1251  mlog_errno(status);
1252  goto bail;
1253  }
1254  memcpy(alloc_copy, alloc, osb->local_alloc_bh->b_size);
1255 
1256  status = ocfs2_journal_access_di(handle,
1257  INODE_CACHE(local_alloc_inode),
1258  osb->local_alloc_bh,
1260  if (status < 0) {
1261  mlog_errno(status);
1262  goto bail;
1263  }
1264 
1265  ocfs2_clear_local_alloc(alloc);
1266  ocfs2_journal_dirty(handle, osb->local_alloc_bh);
1267 
1268  status = ocfs2_sync_local_to_main(osb, handle, alloc_copy,
1269  main_bm_inode, main_bm_bh);
1270  if (status < 0) {
1271  mlog_errno(status);
1272  goto bail;
1273  }
1274 
1275  status = ocfs2_local_alloc_new_window(osb, handle, ac);
1276  if (status < 0) {
1277  if (status != -ENOSPC)
1278  mlog_errno(status);
1279  goto bail;
1280  }
1281 
1282  atomic_inc(&osb->alloc_stats.moves);
1283 
1284 bail:
1285  if (handle)
1286  ocfs2_commit_trans(osb, handle);
1287 
1288  brelse(main_bm_bh);
1289 
1290  if (main_bm_inode)
1291  iput(main_bm_inode);
1292 
1293  if (alloc_copy)
1294  kfree(alloc_copy);
1295 
1296  if (ac)
1298 
1299  if (status)
1300  mlog_errno(status);
1301  return status;
1302 }
1303