39 #include <linux/sched.h>
40 #include <linux/slab.h>
51 #include <linux/quota.h>
93 spin_lock(&qd_lru_lock);
94 while (nr_to_scan && !list_empty(&qd_lru_list)) {
97 sdp = qd->
qd_gl->gl_sbd;
112 spin_unlock(&qd_lru_lock);
114 spin_lock(&qd_lru_lock);
117 spin_unlock(&qd_lru_lock);
139 qd = kmem_cache_zalloc(gfs2_quotad_cachep,
GFP_NOFS);
164 static int qd_get(
struct gfs2_sbd *sdp,
int user,
u32 id,
174 spin_lock(&qd_lru_lock);
176 if (qd->
qd_id ==
id &&
200 spin_unlock(&qd_lru_lock);
211 error = qd_alloc(sdp, user,
id, &new_qd);
230 spin_unlock(&qd_lru_lock);
237 unsigned int c, o = 0,
b;
238 unsigned char byte = 0;
240 spin_lock(&qd_lru_lock);
243 spin_unlock(&qd_lru_lock);
257 for (
b = 0;
b < 8;
b++)
258 if (!(byte & (1 <<
b)))
267 spin_unlock(&qd_lru_lock);
273 spin_unlock(&qd_lru_lock);
281 spin_lock(&qd_lru_lock);
284 spin_unlock(&qd_lru_lock);
291 spin_lock(&qd_lru_lock);
297 spin_unlock(&qd_lru_lock);
305 struct buffer_head *bh;
307 struct buffer_head bh_map = { .b_state = 0, .b_blocknr = 0 };
319 bh_map.b_size = 1 << ip->
i_inode.i_blkbits;
372 spin_lock(&qd_lru_lock);
396 spin_unlock(&qd_lru_lock);
421 spin_lock(&qd_lru_lock);
425 spin_unlock(&qd_lru_lock);
438 spin_unlock(&qd_lru_lock);
461 static int qdsb_get(
struct gfs2_sbd *sdp,
int user,
u32 id,
466 error = qd_get(sdp, user,
id, qdp);
470 error = slot_get(*qdp);
474 error = bh_get(*qdp);
506 qd = ip->
i_res->rs_qa_qd;
518 ip->
i_res->rs_qa_qd_num++;
524 ip->
i_res->rs_qa_qd_num++;
531 ip->
i_res->rs_qa_qd_num++;
539 ip->
i_res->rs_qa_qd_num++;
558 for (x = 0; x < ip->
i_res->rs_qa_qd_num; x++) {
559 qdsb_put(ip->
i_res->rs_qa_qd[x]);
562 ip->
i_res->rs_qa_qd_num = 0;
565 static int sort_qd(
const void *
a,
const void *
b)
606 spin_lock(&qd_lru_lock);
608 spin_unlock(&qd_lru_lock);
639 static int gfs2_adjust_quota(
struct gfs2_inode *ip, loff_t
loc,
644 struct gfs2_sbd *sdp = GFS2_SB(inode);
648 unsigned blocksize, iblock,
pos;
649 struct buffer_head *bh;
656 if (gfs2_is_stuffed(ip)) {
696 blocksize = inode->
i_sb->s_blocksize;
697 iblock = index << (PAGE_CACHE_SHIFT - inode->
i_sb->s_blocksize_bits);
699 if (!page_has_buffers(page))
702 bh = page_buffers(page);
704 while (offset >= pos) {
705 bh = bh->b_this_page;
710 if (!buffer_mapped(bh)) {
712 if (!buffer_mapped(bh))
716 zero_user(page, pos - blocksize, bh->b_size);
719 if (PageUptodate(page))
720 set_buffer_uptodate(bh);
722 if (!buffer_uptodate(bh)) {
725 if (!buffer_uptodate(bh))
734 memcpy(kaddr + offset, ptr, nbytes);
752 i_size_write(inode, size);
754 mark_inode_dirty(inode);
765 struct gfs2_sbd *sdp = (*qda)->qd_gl->gl_sbd;
767 unsigned int data_blocks, ind_blocks;
773 unsigned int nalloc = 0, blocks;
780 gfs2_write_calc_reserv(ip,
sizeof(
struct gfs2_quota),
781 &data_blocks, &ind_blocks);
789 for (qx = 0; qx < num_qd; qx++) {
800 for (x = 0; x < num_qd; x++) {
801 offset = qd2offset(qda[x]);
816 blocks = num_qd * data_blocks +
RES_DINODE + num_qd + 3;
818 reserved = 1 + (nalloc * (data_blocks + ind_blocks));
824 blocks += gfs2_rg_blocks(ip, reserved) + nalloc * ind_blocks +
RES_STATFS;
830 for (x = 0; x < num_qd; x++) {
832 offset = qd2offset(qd);
909 error = update_qd(sdp, qd);
946 for (x = 0; x < ip->
i_res->rs_qa_qd_num; x++) {
948 qd = ip->
i_res->rs_qa_qd[
x];
951 error = do_glock(qd, force, &ip->
i_res->rs_qa_qd_ghs[x]);
975 if (!qd->
qd_qb.qb_limit)
978 spin_lock(&qd_lru_lock);
980 spin_unlock(&qd_lru_lock);
993 value *= gfs2_jindex_size(sdp) * num;
994 value = div_s64(value, den);
1006 unsigned int count = 0;
1012 for (x = 0; x < ip->
i_res->rs_qa_qd_num; x++) {
1016 qd = ip->
i_res->rs_qa_qd[
x];
1017 sync = need_sync(qd);
1021 if (sync && qd_trylock(qd))
1026 do_sync(count, qda);
1027 for (x = 0; x <
count; x++)
1035 #define MAX_LINE 256
1063 for (x = 0; x < ip->
i_res->rs_qa_qd_num; x++) {
1064 qd = ip->
i_res->rs_qa_qd[
x];
1071 spin_lock(&qd_lru_lock);
1073 spin_unlock(&qd_lru_lock);
1076 print_message(qd,
"exceeded");
1089 gt_quota_warn_period) *
HZ)) {
1095 error = print_message(qd,
"warning");
1114 for (x = 0; x < ip->
i_res->rs_qa_qd_num; x++) {
1115 qd = ip->
i_res->rs_qa_qd[
x];
1128 unsigned int max_qd =
gfs2_tune_get(sdp, gt_quota_simul_sync);
1129 unsigned int num_qd;
1143 error = qd_fish(sdp, qda + num_qd);
1144 if (error || !qda[num_qd])
1146 if (++num_qd == max_qd)
1152 error = do_sync(num_qd, qda);
1154 for (x = 0; x < num_qd; x++)
1158 for (x = 0; x < num_qd; x++)
1161 }
while (!error && num_qd == max_qd);
1179 error = qd_get(sdp, user,
id, &qd);
1183 error = do_glock(qd,
FORCE, &q_gh);
1204 unsigned int blocks = size >> sdp->
sd_sb.sb_bsize_shift;
1205 unsigned int x,
slot = 0;
1206 unsigned int found = 0;
1211 if (gfs2_check_internal_file_size(sdp->
sd_qc_inode, 1, 64 << 20))
1220 sizeof(
unsigned char *),
GFP_NOFS);
1230 for (x = 0; x < blocks; x++) {
1231 struct buffer_head *bh;
1254 gfs2_quota_change_in(&qc, bh->b_data +
1272 spin_lock(&qd_lru_lock);
1276 spin_unlock(&qd_lru_lock);
1287 fs_info(sdp,
"found %u quota changes\n", found);
1302 spin_lock(&qd_lru_lock);
1303 while (!list_empty(head)) {
1309 list_move(&qd->
qd_list, head);
1310 spin_unlock(&qd_lru_lock);
1312 spin_lock(&qd_lru_lock);
1323 spin_unlock(&qd_lru_lock);
1335 spin_lock(&qd_lru_lock);
1337 spin_unlock(&qd_lru_lock);
1348 static void quotad_error(
struct gfs2_sbd *sdp,
const char *
msg,
int error)
1350 if (error == 0 || error == -
EROFS)
1353 fs_err(sdp,
"gfs2_quotad: %s error %d\n", msg, error);
1356 static void quotad_check_timeo(
struct gfs2_sbd *sdp,
const char *msg,
1358 unsigned long t,
unsigned long *timeo,
1359 unsigned int *new_timeo)
1362 int error = fxn(sdp->
sd_vfs, 0);
1363 quotad_error(sdp, msg, error);
1364 *timeo = gfs2_tune_get_i(&sdp->
sd_tune, new_timeo) *
HZ;
1370 static void quotad_check_trunc_list(
struct gfs2_sbd *sdp)
1407 unsigned long statfs_timeo = 0;
1408 unsigned long quotad_timeo = 0;
1409 unsigned long t = 0;
1418 quotad_error(sdp,
"statfs", error);
1427 quotad_check_timeo(sdp,
"sync", gfs2_quota_sync_timeo, t,
1431 quotad_check_trunc_list(sdp);
1435 t =
min(quotad_timeo, statfs_timeo);
1451 static int gfs2_quota_get_xstate(
struct super_block *sb,
1459 switch (sdp->
sd_args.ar_quota) {
1505 error = do_glock(qd,
FORCE, &q_gh);
1524 #define GFS2_FIELDMASK (FS_DQ_BSOFT|FS_DQ_BHARD|FS_DQ_BCOUNT)
1533 unsigned int data_blocks, ind_blocks;
1534 unsigned int blocks = 0;
1580 error = update_qd(sdp, qd);
1600 offset = qd2offset(qd);
1602 if (gfs2_is_stuffed(ip))
1604 if (alloc_required) {
1605 gfs2_write_calc_reserv(ip,
sizeof(
struct gfs2_quota),
1606 &data_blocks, &ind_blocks);
1607 blocks = 1 + data_blocks + ind_blocks;
1611 blocks += gfs2_rg_blocks(ip, blocks);
1621 error = gfs2_adjust_quota(ip, offset, 0, qd, fdq);
1640 .get_xstate = gfs2_quota_get_xstate,
1641 .get_dqblk = gfs2_get_dqblk,
1642 .set_dqblk = gfs2_set_dqblk,