56 #include <linux/slab.h>
79 #define MAX_RA_BLOCKS 32
81 #define gfs2_disk_hash2offset(h) (((u64)(h)) >> 1)
82 #define gfs2_dir_offset2hash(p) ((u32)(((u64)(p)) << 1))
88 const struct qstr *
name,
void *opaque);
91 struct buffer_head **bhp)
93 struct buffer_head *bh;
104 struct buffer_head **bhp)
106 struct buffer_head *bh;
120 static int gfs2_dir_write_stuffed(
struct gfs2_inode *ip,
const char *
buf,
123 struct buffer_head *dibh;
126 error = gfs2_meta_inode_buffer(ip, &dibh);
132 if (ip->
i_inode.i_size < offset + size)
133 i_size_write(&ip->
i_inode, offset + size);
153 static int gfs2_dir_write_data(
struct gfs2_inode *ip,
const char *buf,
154 u64 offset,
unsigned int size)
157 struct buffer_head *dibh;
168 if (gfs2_is_stuffed(ip) &&
169 offset + size <= sdp->sd_sb.sb_bsize -
sizeof(
struct gfs2_dinode))
170 return gfs2_dir_write_stuffed(ip, buf, (
unsigned int)offset,
176 if (gfs2_is_stuffed(ip)) {
185 while (copied < size) {
187 struct buffer_head *bh;
189 amount = size - copied;
190 if (amount > sdp->
sd_sb.sb_bsize - o)
191 amount = sdp->
sd_sb.sb_bsize - o;
207 error = gfs2_dir_get_existing_buffer(ip, dblock, &bh);
213 memcpy(bh->b_data + o, buf, amount);
226 error = gfs2_meta_inode_buffer(ip, &dibh);
230 if (ip->
i_inode.i_size < offset + copied)
231 i_size_write(&ip->
i_inode, offset + copied);
248 struct buffer_head *dibh;
251 error = gfs2_meta_inode_buffer(ip, &dibh);
257 return (error) ? error :
size;
279 if (gfs2_is_stuffed(ip))
280 return gfs2_dir_read_stuffed(ip, buf, size);
288 while (copied < size) {
290 struct buffer_head *bh;
293 amount = size - copied;
294 if (amount > sdp->
sd_sb.sb_bsize - o)
295 amount = sdp->
sd_sb.sb_bsize - o;
301 if (error || !dblock)
317 memcpy(buf, bh->b_data + o, amount);
319 buf += (amount/
sizeof(
__be64));
327 return (copied) ? copied :
error;
352 if (hsize != i_size_read(&ip->
i_inode)) {
354 return ERR_PTR(-
EIO);
362 ret = gfs2_dir_read_data(ip, hc, hsize);
368 spin_lock(&inode->
i_lock);
373 spin_unlock(&inode->
i_lock);
391 static inline int gfs2_dirent_sentinel(
const struct gfs2_dirent *dent)
393 return dent->
de_inum.no_addr == 0 || dent->
de_inum.no_formal_ino == 0;
396 static inline int __gfs2_dirent_find(
const struct gfs2_dirent *dent,
397 const struct qstr *name,
int ret)
399 if (!gfs2_dirent_sentinel(dent) &&
407 static int gfs2_dirent_find(
const struct gfs2_dirent *dent,
408 const struct qstr *name,
411 return __gfs2_dirent_find(dent, name, 1);
414 static int gfs2_dirent_prev(
const struct gfs2_dirent *dent,
415 const struct qstr *name,
418 return __gfs2_dirent_find(dent, name, 2);
425 static int gfs2_dirent_last(
const struct gfs2_dirent *dent,
426 const struct qstr *name,
431 if (name->len == (end - start))
436 static int gfs2_dirent_find_space(
const struct gfs2_dirent *dent,
437 const struct qstr *name,
444 if (gfs2_dirent_sentinel(dent))
446 if (totlen - actual >= required)
456 static int gfs2_dirent_gather(
const struct gfs2_dirent *dent,
457 const struct qstr *name,
461 if (!gfs2_dirent_sentinel(dent)) {
476 static int gfs2_check_dirent(
struct gfs2_dirent *dent,
unsigned int offset,
477 unsigned int size,
unsigned int len,
int first)
479 const char *
msg =
"gfs2_dirent too small";
482 msg =
"gfs2_dirent misaligned";
485 msg =
"gfs2_dirent points beyond end of block";
488 msg =
"zero inode number";
489 if (
unlikely(!first && gfs2_dirent_sentinel(dent)))
491 msg =
"name length is greater than space in dirent";
492 if (!gfs2_dirent_sentinel(dent) &&
499 first ?
"first in block" :
"not first in block");
503 static int gfs2_dirent_offset(
const void *buf)
527 static struct gfs2_dirent *gfs2_dirent_scan(
struct inode *inode,
void *buf,
529 const struct qstr *name,
537 ret = gfs2_dirent_offset(buf);
545 if (gfs2_check_dirent(dent, offset, size, len, 1))
548 ret =
scan(dent, name, opaque);
557 if (gfs2_check_dirent(dent, offset, size, len, 0))
567 return prev ? prev : dent;
575 return ERR_PTR(-
EIO);
578 static int dirent_check_reclen(
struct gfs2_inode *dip,
605 static int dirent_next(
struct gfs2_inode *dip,
struct buffer_head *bh,
609 char *bh_end = bh->b_data + bh->b_size;
612 ret = dirent_check_reclen(dip, cur, bh_end);
616 tmp = (
void *)cur + ret;
617 ret = dirent_check_reclen(dip, tmp, bh_end);
622 if (gfs2_dirent_sentinel(tmp)) {
640 static void dirent_del(
struct gfs2_inode *dip,
struct buffer_head *bh,
643 u16 cur_rec_len, prev_rec_len;
645 if (gfs2_dirent_sentinel(cur)) {
658 cur->
de_inum.no_formal_ino = 0;
667 if ((
char *)prev + prev_rec_len != (
char *)cur)
669 if ((
char *)cur + cur_rec_len > bh->b_data + bh->b_size)
672 prev_rec_len += cur_rec_len;
680 static struct gfs2_dirent *gfs2_init_dirent(
struct inode *inode,
682 const struct qstr *name,
683 struct buffer_head *bh)
687 unsigned offset = 0,
totlen;
689 if (!gfs2_dirent_sentinel(dent))
692 BUG_ON(offset + name->len > totlen);
696 gfs2_qstr2dirent(name, totlen - offset, ndent);
700 static struct gfs2_dirent *gfs2_dirent_alloc(
struct inode *inode,
701 struct buffer_head *bh,
702 const struct qstr *name)
705 dent = gfs2_dirent_scan(inode, bh->b_data, bh->b_size,
706 gfs2_dirent_find_space, name,
NULL);
707 if (!dent || IS_ERR(dent))
709 return gfs2_init_dirent(inode, dent, name, bh);
713 struct buffer_head **bhp)
740 hash = gfs2_dir_get_hash_table(dip);
742 return PTR_ERR(hash);
748 struct buffer_head **bh_out)
753 error = get_leaf_nr(dip, index, &leaf_no);
755 error = get_leaf(dip, leaf_no, bh_out);
760 static struct gfs2_dirent *gfs2_dirent_search(
struct inode *inode,
761 const struct qstr *name,
763 struct buffer_head **pbh)
765 struct buffer_head *bh;
772 unsigned hsize = 1 << ip->
i_depth;
775 if (hsize *
sizeof(
u64) != i_size_read(inode)) {
777 return ERR_PTR(-
EIO);
780 index = name->hash >> (32 - ip->
i_depth);
781 error = get_first_leaf(ip, index, &bh);
783 return ERR_PTR(error);
785 dent = gfs2_dirent_scan(inode, bh->b_data, bh->b_size,
795 error = get_leaf(ip, ln, &bh);
798 return error ? ERR_PTR(error) :
NULL;
802 error = gfs2_meta_inode_buffer(ip, &bh);
804 return ERR_PTR(error);
805 dent = gfs2_dirent_scan(inode, bh->b_data, bh->b_size, scan, name,
NULL);
815 static struct gfs2_leaf *new_leaf(
struct inode *inode,
struct buffer_head **pbh,
u16 depth)
821 struct buffer_head *bh;
843 gfs2_qstr2dirent(&name, bh->b_size -
sizeof(
struct gfs2_leaf), dent);
855 static int dir_make_exhash(
struct inode *inode)
858 struct gfs2_sbd *sdp = GFS2_SB(inode);
861 struct buffer_head *bh, *dibh;
869 error = gfs2_meta_inode_buffer(dip, &dibh);
875 leaf = new_leaf(inode, &bh, 0);
885 gfs2_buffer_copy_tail(bh,
sizeof(
struct gfs2_leaf), dibh,
893 args.name = bh->b_data;
894 dent = gfs2_dirent_scan(&dip->
i_inode, bh->b_data, bh->b_size,
895 gfs2_dirent_last, &args,
NULL);
904 return PTR_ERR(dent);
920 gfs2_buffer_clear_tail(dibh,
sizeof(
struct gfs2_dinode));
927 i_size_write(inode, sdp->
sd_sb.sb_bsize / 2);
928 gfs2_add_inode_blocks(&dip->
i_inode, 1);
950 static int dir_split_leaf(
struct inode *inode,
const struct qstr *name)
953 struct buffer_head *nbh, *obh, *dibh;
963 index = name->hash >> (32 - dip->
i_depth);
964 error = get_leaf_nr(dip, index, &leaf_no);
969 error = get_leaf(dip, leaf_no, &obh);
998 start = (index & ~(len - 1));
1010 for (x = 0; x < half_len; x++)
1015 error = gfs2_dir_write_data(dip, (
char *)lp, start *
sizeof(
u64),
1016 half_len *
sizeof(
u64));
1017 if (error != half_len *
sizeof(
u64)) {
1026 divider = (start + half_len) << (32 - dip->
i_depth);
1033 if (dirent_next(dip, obh, &next))
1036 if (!gfs2_dirent_sentinel(dent) &&
1039 str.name = (
char*)(dent+1);
1042 new = gfs2_dirent_alloc(inode, nbh, &
str);
1044 error = PTR_ERR(
new);
1052 dirent_del(dip, obh, prev, dent);
1070 error = gfs2_meta_inode_buffer(dip, &dibh);
1073 gfs2_add_inode_blocks(&dip->
i_inode, 1);
1099 static int dir_double_exhash(
struct gfs2_inode *dip)
1101 struct buffer_head *dibh;
1110 hsize_bytes = hsize *
sizeof(
__be64);
1112 hc = gfs2_dir_get_hash_table(dip);
1120 error = gfs2_meta_inode_buffer(dip, &dibh);
1124 for (x = 0; x <
hsize; x++) {
1130 error = gfs2_dir_write_data(dip, (
char *)hc2, 0, hsize_bytes * 2);
1131 if (error != (hsize_bytes * 2))
1143 gfs2_dir_write_data(dip, (
char *)hc, 0, hsize_bytes);
1144 i_size_write(&dip->
i_inode, hsize_bytes);
1163 static int compare_dents(
const void *
a,
const void *
b)
1175 if (hash_a > hash_b)
1177 else if (hash_a < hash_b)
1185 else if (len_a < len_b)
1188 ret =
memcmp(dent_a + 1, dent_b + 1, len_a);
1212 static int do_filldir_main(
struct gfs2_inode *dip,
u64 *offset,
1225 dent_next = darr[0];
1229 for (x = 0, y = 1; x <
entries; x++, y++) {
1234 dent_next = darr[
y];
1242 if (off_next == off) {
1243 if (*copied && !run)
1254 error = filldir(opaque, (
const char *)(dent + 1),
1273 static void *gfs2_alloc_sort_buffer(
unsigned size)
1284 static void gfs2_free_sort_buffer(
void *ptr)
1286 if (is_vmalloc_addr(ptr))
1292 static int gfs2_dir_read_leaf(
struct inode *inode,
u64 *offset,
void *opaque,
1293 filldir_t filldir,
int *copied,
unsigned *depth,
1297 struct gfs2_sbd *sdp = GFS2_SB(inode);
1298 struct buffer_head *bh;
1300 unsigned entries = 0, entries2 = 0;
1301 unsigned leaves = 0;
1304 struct buffer_head **larr;
1310 error = get_leaf(ip, lfn, &bh);
1332 larr = gfs2_alloc_sort_buffer((leaves + entries + 99) *
sizeof(
void *));
1335 darr = (
const struct gfs2_dirent **)(larr + leaves);
1341 error = get_leaf(ip, lfn, &bh);
1348 dent = gfs2_dirent_scan(inode, bh->b_data, bh->b_size,
1349 gfs2_dirent_gather,
NULL, &g);
1350 error = PTR_ERR(dent);
1353 if (entries2 !=
g.offset) {
1354 fs_warn(sdp,
"Number of entries corrupt in dir "
1355 "leaf %llu, entries2 (%u) != "
1357 (
unsigned long long)bh->b_blocknr,
1358 entries2,
g.offset);
1370 BUG_ON(entries2 != entries);
1371 error = do_filldir_main(ip, offset, opaque, filldir, darr,
1374 for(i = 0; i < leaf; i++)
1376 gfs2_free_sort_buffer(larr);
1389 static void gfs2_dir_readahead(
struct inode *inode,
unsigned hsize,
u32 index,
1394 struct buffer_head *bh;
1399 if (index + MAX_RA_BLOCKS < f_ra->start)
1404 if (f_ra->
start >= hsize)
1410 if (blocknr == last)
1414 if (trylock_buffer(bh)) {
1415 if (buffer_uptodate(bh)) {
1438 static int dir_e_read(
struct inode *inode,
u64 *offset,
void *opaque,
1451 index = hash >> (32 - dip->
i_depth);
1455 lp = gfs2_dir_get_hash_table(dip);
1459 gfs2_dir_readahead(inode, hsize, index, f_ra);
1461 while (index < hsize) {
1462 error = gfs2_dir_read_leaf(inode, offset, opaque, filldir,
1469 index = (index & ~(len - 1)) + len;
1481 struct gfs2_sbd *sdp = GFS2_SB(inode);
1484 struct buffer_head *dibh;
1492 return dir_e_read(inode, offset, opaque, filldir, f_ra);
1494 if (!gfs2_is_stuffed(dip)) {
1499 error = gfs2_meta_inode_buffer(dip, &dibh);
1509 dent = gfs2_dirent_scan(inode, dibh->b_data, dibh->b_size,
1510 gfs2_dirent_gather,
NULL, &g);
1512 error = PTR_ERR(dent);
1516 fs_warn(sdp,
"Number of entries corrupt in dir %llu, "
1517 "ip->i_entries (%u) != g.offset (%u)\n",
1524 error = do_filldir_main(dip, offset, opaque, filldir, darr,
1552 struct buffer_head *bh;
1554 struct inode *
inode;
1556 dent = gfs2_dirent_search(dir, name, gfs2_dirent_find, &bh);
1559 return ERR_CAST(dent);
1573 struct buffer_head *bh;
1577 dent = gfs2_dirent_search(dir, name, gfs2_dirent_find, &bh);
1580 return PTR_ERR(dent);
1601 static int dir_new_leaf(
struct inode *inode,
const struct qstr *name)
1603 struct buffer_head *bh, *obh;
1610 index = name->hash >> (32 - ip->
i_depth);
1611 error = get_first_leaf(ip, index, &obh);
1615 oleaf = (
struct gfs2_leaf *)obh->b_data;
1620 error = get_leaf(ip, bn, &obh);
1636 error = gfs2_meta_inode_buffer(ip, &bh);
1640 gfs2_add_inode_blocks(&ip->
i_inode, 1);
1660 struct buffer_head *bh;
1666 dent = gfs2_dirent_search(inode, name, gfs2_dirent_find_space,
1670 return PTR_ERR(dent);
1671 dent = gfs2_init_dirent(inode, dent, name, bh);
1672 gfs2_inum_out(nip, dent);
1679 error = gfs2_meta_inode_buffer(ip, &bh);
1693 error = dir_make_exhash(inode);
1698 error = dir_split_leaf(inode, name);
1704 error = dir_double_exhash(ip);
1707 error = dir_split_leaf(inode, name);
1713 error = dir_new_leaf(inode, name);
1735 struct buffer_head *bh;
1739 dent = gfs2_dirent_search(&dip->
i_inode, name, gfs2_dirent_prev, &bh);
1746 return PTR_ERR(dent);
1749 if (gfs2_dirent_find(dent, name,
NULL) == 0) {
1754 dirent_del(dip, bh, prev, dent);
1770 mark_inode_dirty(&dip->
i_inode);
1789 const struct gfs2_inode *nip,
unsigned int new_type)
1791 struct buffer_head *bh;
1795 dent = gfs2_dirent_search(&dip->
i_inode, filename, gfs2_dirent_find, &bh);
1801 return PTR_ERR(dent);
1804 gfs2_inum_out(nip, dent);
1809 error = gfs2_meta_inode_buffer(dip, &bh);
1834 u64 leaf_no,
struct buffer_head *leaf_bh,
1840 struct buffer_head *bh, *dibh;
1842 unsigned int rg_blocks = 0, l_blocks = 0;
1844 unsigned int x, size = len *
sizeof(
u64);
1864 for (blk = leaf_no;
blk; blk = nblk) {
1865 if (blk != leaf_no) {
1866 error = get_leaf(dip, blk, &bh);
1870 tmp_leaf = (
struct gfs2_leaf *)bh->b_data;
1881 for (x = 0; x <
rlist.rl_rgrps; x++) {
1883 rgd =
rlist.rl_ghs[
x].gh_gl->gl_object;
1895 goto out_rg_gunlock;
1899 for (blk = leaf_no;
blk; blk = nblk) {
1900 if (blk != leaf_no) {
1901 error = get_leaf(dip, blk, &bh);
1905 tmp_leaf = (
struct gfs2_leaf *)bh->b_data;
1911 gfs2_add_inode_blocks(&dip->
i_inode, -1);
1914 error = gfs2_dir_write_data(dip, ht, index *
sizeof(
u64), size);
1915 if (error != size) {
1921 error = gfs2_meta_inode_buffer(dip, &dibh);
1957 struct buffer_head *bh;
1960 u32 index = 0, next_index;
1963 int error = 0,
last;
1967 lp = gfs2_dir_get_hash_table(dip);
1971 while (index < hsize) {
1974 error = get_leaf(dip, leaf_no, &bh);
1980 next_index = (index & ~(len - 1)) + len;
1982 error = leaf_dealloc(dip, index, len, leaf_no, bh,
1992 if (index != hsize) {
2013 struct buffer_head *bh;
2015 dent = gfs2_dirent_search(inode, name, gfs2_dirent_find_space, &bh);
2020 return PTR_ERR(dent);