Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
direct.c
Go to the documentation of this file.
1 /*
2  * direct.c - NILFS direct block pointer.
3  *
4  * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
19  *
20  * Written by Koji Sato <[email protected]>.
21  */
22 
23 #include <linux/errno.h>
24 #include "nilfs.h"
25 #include "page.h"
26 #include "direct.h"
27 #include "alloc.h"
28 #include "dat.h"
29 
30 static inline __le64 *nilfs_direct_dptrs(const struct nilfs_bmap *direct)
31 {
32  return (__le64 *)
33  ((struct nilfs_direct_node *)direct->b_u.u_data + 1);
34 }
35 
36 static inline __u64
37 nilfs_direct_get_ptr(const struct nilfs_bmap *direct, __u64 key)
38 {
39  return le64_to_cpu(*(nilfs_direct_dptrs(direct) + key));
40 }
41 
42 static inline void nilfs_direct_set_ptr(struct nilfs_bmap *direct,
43  __u64 key, __u64 ptr)
44 {
45  *(nilfs_direct_dptrs(direct) + key) = cpu_to_le64(ptr);
46 }
47 
48 static int nilfs_direct_lookup(const struct nilfs_bmap *direct,
49  __u64 key, int level, __u64 *ptrp)
50 {
51  __u64 ptr;
52 
53  if (key > NILFS_DIRECT_KEY_MAX || level != 1)
54  return -ENOENT;
55  ptr = nilfs_direct_get_ptr(direct, key);
56  if (ptr == NILFS_BMAP_INVALID_PTR)
57  return -ENOENT;
58 
59  *ptrp = ptr;
60  return 0;
61 }
62 
63 static int nilfs_direct_lookup_contig(const struct nilfs_bmap *direct,
64  __u64 key, __u64 *ptrp,
65  unsigned maxblocks)
66 {
67  struct inode *dat = NULL;
68  __u64 ptr, ptr2;
70  int ret, cnt;
71 
72  if (key > NILFS_DIRECT_KEY_MAX)
73  return -ENOENT;
74  ptr = nilfs_direct_get_ptr(direct, key);
75  if (ptr == NILFS_BMAP_INVALID_PTR)
76  return -ENOENT;
77 
78  if (NILFS_BMAP_USE_VBN(direct)) {
79  dat = nilfs_bmap_get_dat(direct);
80  ret = nilfs_dat_translate(dat, ptr, &blocknr);
81  if (ret < 0)
82  return ret;
83  ptr = blocknr;
84  }
85 
86  maxblocks = min_t(unsigned, maxblocks, NILFS_DIRECT_KEY_MAX - key + 1);
87  for (cnt = 1; cnt < maxblocks &&
88  (ptr2 = nilfs_direct_get_ptr(direct, key + cnt)) !=
90  cnt++) {
91  if (dat) {
92  ret = nilfs_dat_translate(dat, ptr2, &blocknr);
93  if (ret < 0)
94  return ret;
95  ptr2 = blocknr;
96  }
97  if (ptr2 != ptr + cnt)
98  break;
99  }
100  *ptrp = ptr;
101  return cnt;
102 }
103 
104 static __u64
105 nilfs_direct_find_target_v(const struct nilfs_bmap *direct, __u64 key)
106 {
107  __u64 ptr;
108 
109  ptr = nilfs_bmap_find_target_seq(direct, key);
110  if (ptr != NILFS_BMAP_INVALID_PTR)
111  /* sequential access */
112  return ptr;
113  else
114  /* block group */
115  return nilfs_bmap_find_target_in_group(direct);
116 }
117 
118 static int nilfs_direct_insert(struct nilfs_bmap *bmap, __u64 key, __u64 ptr)
119 {
120  union nilfs_bmap_ptr_req req;
121  struct inode *dat = NULL;
122  struct buffer_head *bh;
123  int ret;
124 
125  if (key > NILFS_DIRECT_KEY_MAX)
126  return -ENOENT;
127  if (nilfs_direct_get_ptr(bmap, key) != NILFS_BMAP_INVALID_PTR)
128  return -EEXIST;
129 
130  if (NILFS_BMAP_USE_VBN(bmap)) {
131  req.bpr_ptr = nilfs_direct_find_target_v(bmap, key);
132  dat = nilfs_bmap_get_dat(bmap);
133  }
134  ret = nilfs_bmap_prepare_alloc_ptr(bmap, &req, dat);
135  if (!ret) {
136  /* ptr must be a pointer to a buffer head. */
137  bh = (struct buffer_head *)((unsigned long)ptr);
138  set_buffer_nilfs_volatile(bh);
139 
140  nilfs_bmap_commit_alloc_ptr(bmap, &req, dat);
141  nilfs_direct_set_ptr(bmap, key, req.bpr_ptr);
142 
143  if (!nilfs_bmap_dirty(bmap))
144  nilfs_bmap_set_dirty(bmap);
145 
146  if (NILFS_BMAP_USE_VBN(bmap))
147  nilfs_bmap_set_target_v(bmap, key, req.bpr_ptr);
148 
150  }
151  return ret;
152 }
153 
154 static int nilfs_direct_delete(struct nilfs_bmap *bmap, __u64 key)
155 {
156  union nilfs_bmap_ptr_req req;
157  struct inode *dat;
158  int ret;
159 
160  if (key > NILFS_DIRECT_KEY_MAX ||
161  nilfs_direct_get_ptr(bmap, key) == NILFS_BMAP_INVALID_PTR)
162  return -ENOENT;
163 
164  dat = NILFS_BMAP_USE_VBN(bmap) ? nilfs_bmap_get_dat(bmap) : NULL;
165  req.bpr_ptr = nilfs_direct_get_ptr(bmap, key);
166 
167  ret = nilfs_bmap_prepare_end_ptr(bmap, &req, dat);
168  if (!ret) {
169  nilfs_bmap_commit_end_ptr(bmap, &req, dat);
170  nilfs_direct_set_ptr(bmap, key, NILFS_BMAP_INVALID_PTR);
172  }
173  return ret;
174 }
175 
176 static int nilfs_direct_last_key(const struct nilfs_bmap *direct, __u64 *keyp)
177 {
178  __u64 key, lastkey;
179 
180  lastkey = NILFS_DIRECT_KEY_MAX + 1;
181  for (key = NILFS_DIRECT_KEY_MIN; key <= NILFS_DIRECT_KEY_MAX; key++)
182  if (nilfs_direct_get_ptr(direct, key) !=
184  lastkey = key;
185 
186  if (lastkey == NILFS_DIRECT_KEY_MAX + 1)
187  return -ENOENT;
188 
189  *keyp = lastkey;
190 
191  return 0;
192 }
193 
194 static int nilfs_direct_check_insert(const struct nilfs_bmap *bmap, __u64 key)
195 {
196  return key > NILFS_DIRECT_KEY_MAX;
197 }
198 
199 static int nilfs_direct_gather_data(struct nilfs_bmap *direct,
200  __u64 *keys, __u64 *ptrs, int nitems)
201 {
202  __u64 key;
203  __u64 ptr;
204  int n;
205 
206  if (nitems > NILFS_DIRECT_NBLOCKS)
207  nitems = NILFS_DIRECT_NBLOCKS;
208  n = 0;
209  for (key = 0; key < nitems; key++) {
210  ptr = nilfs_direct_get_ptr(direct, key);
211  if (ptr != NILFS_BMAP_INVALID_PTR) {
212  keys[n] = key;
213  ptrs[n] = ptr;
214  n++;
215  }
216  }
217  return n;
218 }
219 
221  __u64 key, __u64 *keys, __u64 *ptrs, int n)
222 {
223  __le64 *dptrs;
224  int ret, i, j;
225 
226  /* no need to allocate any resource for conversion */
227 
228  /* delete */
229  ret = bmap->b_ops->bop_delete(bmap, key);
230  if (ret < 0)
231  return ret;
232 
233  /* free resources */
234  if (bmap->b_ops->bop_clear != NULL)
235  bmap->b_ops->bop_clear(bmap);
236 
237  /* convert */
238  dptrs = nilfs_direct_dptrs(bmap);
239  for (i = 0, j = 0; i < NILFS_DIRECT_NBLOCKS; i++) {
240  if ((j < n) && (i == keys[j])) {
241  dptrs[i] = (i != key) ?
242  cpu_to_le64(ptrs[j]) :
244  j++;
245  } else
246  dptrs[i] = NILFS_BMAP_INVALID_PTR;
247  }
248 
249  nilfs_direct_init(bmap);
250  return 0;
251 }
252 
253 static int nilfs_direct_propagate(struct nilfs_bmap *bmap,
254  struct buffer_head *bh)
255 {
256  struct nilfs_palloc_req oldreq, newreq;
257  struct inode *dat;
258  __u64 key;
259  __u64 ptr;
260  int ret;
261 
262  if (!NILFS_BMAP_USE_VBN(bmap))
263  return 0;
264 
265  dat = nilfs_bmap_get_dat(bmap);
266  key = nilfs_bmap_data_get_key(bmap, bh);
267  ptr = nilfs_direct_get_ptr(bmap, key);
268  if (!buffer_nilfs_volatile(bh)) {
269  oldreq.pr_entry_nr = ptr;
270  newreq.pr_entry_nr = ptr;
271  ret = nilfs_dat_prepare_update(dat, &oldreq, &newreq);
272  if (ret < 0)
273  return ret;
274  nilfs_dat_commit_update(dat, &oldreq, &newreq,
275  bmap->b_ptr_type == NILFS_BMAP_PTR_VS);
276  set_buffer_nilfs_volatile(bh);
277  nilfs_direct_set_ptr(bmap, key, newreq.pr_entry_nr);
278  } else
279  ret = nilfs_dat_mark_dirty(dat, ptr);
280 
281  return ret;
282 }
283 
284 static int nilfs_direct_assign_v(struct nilfs_bmap *direct,
285  __u64 key, __u64 ptr,
286  struct buffer_head **bh,
287  sector_t blocknr,
288  union nilfs_binfo *binfo)
289 {
290  struct inode *dat = nilfs_bmap_get_dat(direct);
291  union nilfs_bmap_ptr_req req;
292  int ret;
293 
294  req.bpr_ptr = ptr;
295  ret = nilfs_dat_prepare_start(dat, &req.bpr_req);
296  if (!ret) {
297  nilfs_dat_commit_start(dat, &req.bpr_req, blocknr);
298  binfo->bi_v.bi_vblocknr = cpu_to_le64(ptr);
299  binfo->bi_v.bi_blkoff = cpu_to_le64(key);
300  }
301  return ret;
302 }
303 
304 static int nilfs_direct_assign_p(struct nilfs_bmap *direct,
305  __u64 key, __u64 ptr,
306  struct buffer_head **bh,
307  sector_t blocknr,
308  union nilfs_binfo *binfo)
309 {
310  nilfs_direct_set_ptr(direct, key, blocknr);
311 
312  binfo->bi_dat.bi_blkoff = cpu_to_le64(key);
313  binfo->bi_dat.bi_level = 0;
314 
315  return 0;
316 }
317 
318 static int nilfs_direct_assign(struct nilfs_bmap *bmap,
319  struct buffer_head **bh,
320  sector_t blocknr,
321  union nilfs_binfo *binfo)
322 {
323  __u64 key;
324  __u64 ptr;
325 
326  key = nilfs_bmap_data_get_key(bmap, *bh);
327  if (unlikely(key > NILFS_DIRECT_KEY_MAX)) {
328  printk(KERN_CRIT "%s: invalid key: %llu\n", __func__,
329  (unsigned long long)key);
330  return -EINVAL;
331  }
332  ptr = nilfs_direct_get_ptr(bmap, key);
333  if (unlikely(ptr == NILFS_BMAP_INVALID_PTR)) {
334  printk(KERN_CRIT "%s: invalid pointer: %llu\n", __func__,
335  (unsigned long long)ptr);
336  return -EINVAL;
337  }
338 
339  return NILFS_BMAP_USE_VBN(bmap) ?
340  nilfs_direct_assign_v(bmap, key, ptr, bh, blocknr, binfo) :
341  nilfs_direct_assign_p(bmap, key, ptr, bh, blocknr, binfo);
342 }
343 
344 static const struct nilfs_bmap_operations nilfs_direct_ops = {
345  .bop_lookup = nilfs_direct_lookup,
346  .bop_lookup_contig = nilfs_direct_lookup_contig,
347  .bop_insert = nilfs_direct_insert,
348  .bop_delete = nilfs_direct_delete,
349  .bop_clear = NULL,
350 
351  .bop_propagate = nilfs_direct_propagate,
352 
353  .bop_lookup_dirty_buffers = NULL,
354 
355  .bop_assign = nilfs_direct_assign,
356  .bop_mark = NULL,
357 
358  .bop_last_key = nilfs_direct_last_key,
359  .bop_check_insert = nilfs_direct_check_insert,
360  .bop_check_delete = NULL,
361  .bop_gather_data = nilfs_direct_gather_data,
362 };
363 
364 
365 int nilfs_direct_init(struct nilfs_bmap *bmap)
366 {
367  bmap->b_ops = &nilfs_direct_ops;
368  return 0;
369 }