31 #define o2info_from_user(a, b) \
32 copy_from_user(&(a), (b), sizeof(a))
33 #define o2info_to_user(a, b) \
34 copy_to_user((typeof(a) __user *)b, &(a), sizeof(a))
73 *flags = OCFS2_I(inode)->ip_attr;
79 static int ocfs2_set_inode_attr(
struct inode *inode,
unsigned flags,
85 struct buffer_head *bh =
NULL;
105 if (IS_ERR(handle)) {
106 status = PTR_ERR(handle);
111 oldflags = ocfs2_inode->
ip_attr;
112 flags = flags &
mask;
113 flags |= oldflags & ~mask;
156 o2info_set_request_filled(&oib.
ib_req);
164 o2info_set_request_error(&oib.
ib_req, req);
181 o2info_set_request_filled(&oic.
ic_req);
189 o2info_set_request_error(&oic.
ic_req, req);
206 o2info_set_request_filled(&oim.
im_req);
214 o2info_set_request_error(&oim.
im_req, req);
231 o2info_set_request_filled(&oil.
il_req);
239 o2info_set_request_error(&oil.
il_req, req);
256 o2info_set_request_filled(&oiu.
iu_req);
264 o2info_set_request_error(&oiu.
iu_req, req);
283 o2info_set_request_filled(&oif.
if_req);
291 o2info_set_request_error(&oif.
if_req, req);
308 o2info_set_request_filled(&oij.
ij_req);
316 o2info_set_request_error(&oij.
ij_req, req);
322 struct inode *inode_alloc,
u64 blkno,
325 int status = 0, unlock = 0;
327 struct buffer_head *bh =
NULL;
333 if (o2info_coherent(&fi->
ifi_req)) {
377 struct inode *inode_alloc =
NULL;
392 if (o2info_coherent(&oifi->
ifi_req)) {
401 ocfs2_sprintf_system_inode_name(namebuf,
422 o2info_set_request_filled(&oifi->
ifi_req);
430 o2info_set_request_error(&oifi->
ifi_req, req);
437 static void o2ffg_update_histogram(
struct ocfs2_info_free_chunk_list *hist,
438 unsigned int chunksize)
442 index = __ilog2_u32(chunksize);
446 hist->fc_chunks[
index]++;
447 hist->fc_clusters[
index] += chunksize;
450 static void o2ffg_update_stats(
struct ocfs2_info_freefrag_stats *
stats,
451 unsigned int chunksize)
453 if (chunksize > stats->ffs_max)
454 stats->ffs_max = chunksize;
456 if (chunksize < stats->ffs_min)
457 stats->ffs_min = chunksize;
459 stats->ffs_avg += chunksize;
460 stats->ffs_free_chunks_real++;
464 unsigned int chunksize)
466 o2ffg_update_histogram(&(ffg->
iff_ffs.ffs_fc_hist), chunksize);
467 o2ffg_update_stats(&(ffg->
iff_ffs), chunksize);
471 struct inode *gb_inode,
477 int status = 0,
used;
480 struct buffer_head *bh =
NULL;
483 unsigned int max_bits, num_clusters;
485 unsigned int chunk_free, last_chunksize = 0;
501 if (o2info_coherent(&ffg->
iff_req))
510 "%llu from device.", (
unsigned long long)blkno);
523 for (chunk = 0; chunk < chunks_in_group; chunk++) {
528 num_clusters = max_bits -
offset;
546 if (
used && last_chunksize) {
556 ffg->
iff_ffs.ffs_free_chunks++;
574 struct inode *gb_inode,
u64 blkno,
578 int status = 0, unlock = 0,
i;
580 struct buffer_head *bh =
NULL;
588 if (o2info_coherent(&ffg->
iff_req)) {
615 memset(&ffg->
iff_ffs, 0,
sizeof(
struct ocfs2_info_freefrag_stats));
635 if (ffg->
iff_ffs.ffs_free_chunks_real)
637 ffg->
iff_ffs.ffs_free_chunks_real);
662 struct inode *gb_inode =
NULL;
682 if (o2info_coherent(&oiff->
iff_req)) {
691 ocfs2_sprintf_system_inode_name(namebuf,
sizeof(namebuf), type,
707 o2info_set_request_filled(&oiff->
iff_req);
715 o2info_set_request_error(&oiff->
iff_req, req);
731 o2info_clear_request_filled(&oir);
739 o2info_set_request_error(&oir, req);
811 u64 *req_addr,
int compat_flag)
861 for (i = 0; i < info->
oi_count; i++) {
884 struct inode *inode = filp->
f_path.dentry->d_inode;
899 status = ocfs2_get_inode_attr(inode, &flags);
912 status = ocfs2_set_inode_attr(inode, flags,
997 struct inode *inode = file->
f_path.dentry->d_inode;
1020 preserve = (
args.preserve != 0);
1023 compat_ptr(
args.new_path), preserve);