2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved.
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
11 * Quota change tags are associated with each transaction that allocates or
12 * deallocates space. Those changes are accumulated locally to each node (in a
13 * per-node file) and then are periodically synced to the quota file. This
14 * avoids the bottleneck of constantly touching the quota file, but introduces
15 * fuzziness in the current usage value of IDs that are being used on different
16 * nodes in the cluster simultaneously. So, it is possible for a user on
17 * multiple nodes to overrun their quota, but that overrun is controlable.
18 * Since quota tags are part of transactions, there is no need to a quota check
19 * program to be run on node crashes or anything like that.
21 * There are couple of knobs that let the administrator manage the quota
22 * fuzziness. "quota_quantum" sets the maximum time a quota change can be
23 * sitting on one node before being synced to the quota file. (The default is
24 * 60 seconds.) Another knob, "quota_scale" controls how quickly the frequency
25 * of quota file syncs increases as the user moves closer to their limit. The
26 * more frequent the syncs, the more accurate the quota enforcement, but that
27 * means that there is more contention between the nodes for the quota file.
28 * The default value is one. This sets the maximum theoretical quota overrun
29 * (with infinite node with infinite bandwidth) to twice the user's limit. (In
30 * practice, the maximum overrun you see should be much less.) A "quota_scale"
31 * number greater than one makes quota syncs more frequent and reduces the
32 * maximum overrun. Numbers less than one (but greater than zero) make quota
33 * syncs less frequent.
35 * GFS quotas also use per-ID Lock Value Blocks (LVBs) to cache the contents of
36 * the quota file, so it is not being constantly read.
39 #include <linux/sched.h>
40 #include <linux/slab.h>
41 #include <linux/spinlock.h>
42 #include <linux/completion.h>
43 #include <linux/buffer_head.h>
44 #include <linux/sort.h>
46 #include <linux/bio.h>
47 #include <linux/gfs2_ondisk.h>
48 #include <linux/kthread.h>
49 #include <linux/freezer.h>
68 struct gfs2_quota_host
{
75 struct gfs2_quota_change_host
{
77 u32 qc_flags
; /* GFS2_QCF_... */
81 static LIST_HEAD(qd_lru_list
);
82 static atomic_t qd_lru_count
= ATOMIC_INIT(0);
83 static DEFINE_SPINLOCK(qd_lru_lock
);
85 int gfs2_shrink_qd_memory(int nr
, gfp_t gfp_mask
)
87 struct gfs2_quota_data
*qd
;
93 if (!(gfp_mask
& __GFP_FS
))
96 spin_lock(&qd_lru_lock
);
97 while (nr
&& !list_empty(&qd_lru_list
)) {
98 qd
= list_entry(qd_lru_list
.next
,
99 struct gfs2_quota_data
, qd_reclaim
);
100 sdp
= qd
->qd_gl
->gl_sbd
;
102 /* Free from the filesystem-specific list */
103 list_del(&qd
->qd_list
);
105 gfs2_assert_warn(sdp
, !qd
->qd_change
);
106 gfs2_assert_warn(sdp
, !qd
->qd_slot_count
);
107 gfs2_assert_warn(sdp
, !qd
->qd_bh_count
);
109 gfs2_glock_put(qd
->qd_gl
);
110 atomic_dec(&sdp
->sd_quota_count
);
112 /* Delete it from the common reclaim list */
113 list_del_init(&qd
->qd_reclaim
);
114 atomic_dec(&qd_lru_count
);
115 spin_unlock(&qd_lru_lock
);
116 kmem_cache_free(gfs2_quotad_cachep
, qd
);
117 spin_lock(&qd_lru_lock
);
120 spin_unlock(&qd_lru_lock
);
123 return (atomic_read(&qd_lru_count
) * sysctl_vfs_cache_pressure
) / 100;
126 static u64
qd2offset(struct gfs2_quota_data
*qd
)
130 offset
= 2 * (u64
)qd
->qd_id
+ !test_bit(QDF_USER
, &qd
->qd_flags
);
131 offset
*= sizeof(struct gfs2_quota
);
136 static int qd_alloc(struct gfs2_sbd
*sdp
, int user
, u32 id
,
137 struct gfs2_quota_data
**qdp
)
139 struct gfs2_quota_data
*qd
;
142 qd
= kmem_cache_zalloc(gfs2_quotad_cachep
, GFP_NOFS
);
146 atomic_set(&qd
->qd_count
, 1);
149 set_bit(QDF_USER
, &qd
->qd_flags
);
151 INIT_LIST_HEAD(&qd
->qd_reclaim
);
153 error
= gfs2_glock_get(sdp
, 2 * (u64
)id
+ !user
,
154 &gfs2_quota_glops
, CREATE
, &qd
->qd_gl
);
163 kmem_cache_free(gfs2_quotad_cachep
, qd
);
167 static int qd_get(struct gfs2_sbd
*sdp
, int user
, u32 id
, int create
,
168 struct gfs2_quota_data
**qdp
)
170 struct gfs2_quota_data
*qd
= NULL
, *new_qd
= NULL
;
177 spin_lock(&qd_lru_lock
);
178 list_for_each_entry(qd
, &sdp
->sd_quota_list
, qd_list
) {
179 if (qd
->qd_id
== id
&&
180 !test_bit(QDF_USER
, &qd
->qd_flags
) == !user
) {
181 if (!atomic_read(&qd
->qd_count
) &&
182 !list_empty(&qd
->qd_reclaim
)) {
183 /* Remove it from reclaim list */
184 list_del_init(&qd
->qd_reclaim
);
185 atomic_dec(&qd_lru_count
);
187 atomic_inc(&qd
->qd_count
);
198 list_add(&qd
->qd_list
, &sdp
->sd_quota_list
);
199 atomic_inc(&sdp
->sd_quota_count
);
203 spin_unlock(&qd_lru_lock
);
207 gfs2_glock_put(new_qd
->qd_gl
);
208 kmem_cache_free(gfs2_quotad_cachep
, new_qd
);
214 error
= qd_alloc(sdp
, user
, id
, &new_qd
);
220 static void qd_hold(struct gfs2_quota_data
*qd
)
222 struct gfs2_sbd
*sdp
= qd
->qd_gl
->gl_sbd
;
223 gfs2_assert(sdp
, atomic_read(&qd
->qd_count
));
224 atomic_inc(&qd
->qd_count
);
227 static void qd_put(struct gfs2_quota_data
*qd
)
229 if (atomic_dec_and_lock(&qd
->qd_count
, &qd_lru_lock
)) {
230 /* Add to the reclaim list */
231 list_add_tail(&qd
->qd_reclaim
, &qd_lru_list
);
232 atomic_inc(&qd_lru_count
);
233 spin_unlock(&qd_lru_lock
);
237 static int slot_get(struct gfs2_quota_data
*qd
)
239 struct gfs2_sbd
*sdp
= qd
->qd_gl
->gl_sbd
;
240 unsigned int c
, o
= 0, b
;
241 unsigned char byte
= 0;
243 spin_lock(&qd_lru_lock
);
245 if (qd
->qd_slot_count
++) {
246 spin_unlock(&qd_lru_lock
);
250 for (c
= 0; c
< sdp
->sd_quota_chunks
; c
++)
251 for (o
= 0; o
< PAGE_SIZE
; o
++) {
252 byte
= sdp
->sd_quota_bitmap
[c
][o
];
260 for (b
= 0; b
< 8; b
++)
261 if (!(byte
& (1 << b
)))
263 qd
->qd_slot
= c
* (8 * PAGE_SIZE
) + o
* 8 + b
;
265 if (qd
->qd_slot
>= sdp
->sd_quota_slots
)
268 sdp
->sd_quota_bitmap
[c
][o
] |= 1 << b
;
270 spin_unlock(&qd_lru_lock
);
276 spin_unlock(&qd_lru_lock
);
280 static void slot_hold(struct gfs2_quota_data
*qd
)
282 struct gfs2_sbd
*sdp
= qd
->qd_gl
->gl_sbd
;
284 spin_lock(&qd_lru_lock
);
285 gfs2_assert(sdp
, qd
->qd_slot_count
);
287 spin_unlock(&qd_lru_lock
);
290 static void slot_put(struct gfs2_quota_data
*qd
)
292 struct gfs2_sbd
*sdp
= qd
->qd_gl
->gl_sbd
;
294 spin_lock(&qd_lru_lock
);
295 gfs2_assert(sdp
, qd
->qd_slot_count
);
296 if (!--qd
->qd_slot_count
) {
297 gfs2_icbit_munge(sdp
, sdp
->sd_quota_bitmap
, qd
->qd_slot
, 0);
300 spin_unlock(&qd_lru_lock
);
303 static int bh_get(struct gfs2_quota_data
*qd
)
305 struct gfs2_sbd
*sdp
= qd
->qd_gl
->gl_sbd
;
306 struct gfs2_inode
*ip
= GFS2_I(sdp
->sd_qc_inode
);
307 unsigned int block
, offset
;
308 struct buffer_head
*bh
;
310 struct buffer_head bh_map
= { .b_state
= 0, .b_blocknr
= 0 };
312 mutex_lock(&sdp
->sd_quota_mutex
);
314 if (qd
->qd_bh_count
++) {
315 mutex_unlock(&sdp
->sd_quota_mutex
);
319 block
= qd
->qd_slot
/ sdp
->sd_qc_per_block
;
320 offset
= qd
->qd_slot
% sdp
->sd_qc_per_block
;
322 bh_map
.b_size
= 1 << ip
->i_inode
.i_blkbits
;
323 error
= gfs2_block_map(&ip
->i_inode
, block
, &bh_map
, 0);
326 error
= gfs2_meta_read(ip
->i_gl
, bh_map
.b_blocknr
, DIO_WAIT
, &bh
);
330 if (gfs2_metatype_check(sdp
, bh
, GFS2_METATYPE_QC
))
334 qd
->qd_bh_qc
= (struct gfs2_quota_change
*)
335 (bh
->b_data
+ sizeof(struct gfs2_meta_header
) +
336 offset
* sizeof(struct gfs2_quota_change
));
338 mutex_unlock(&sdp
->sd_quota_mutex
);
346 mutex_unlock(&sdp
->sd_quota_mutex
);
350 static void bh_put(struct gfs2_quota_data
*qd
)
352 struct gfs2_sbd
*sdp
= qd
->qd_gl
->gl_sbd
;
354 mutex_lock(&sdp
->sd_quota_mutex
);
355 gfs2_assert(sdp
, qd
->qd_bh_count
);
356 if (!--qd
->qd_bh_count
) {
361 mutex_unlock(&sdp
->sd_quota_mutex
);
364 static int qd_fish(struct gfs2_sbd
*sdp
, struct gfs2_quota_data
**qdp
)
366 struct gfs2_quota_data
*qd
= NULL
;
372 if (sdp
->sd_vfs
->s_flags
& MS_RDONLY
)
375 spin_lock(&qd_lru_lock
);
377 list_for_each_entry(qd
, &sdp
->sd_quota_list
, qd_list
) {
378 if (test_bit(QDF_LOCKED
, &qd
->qd_flags
) ||
379 !test_bit(QDF_CHANGE
, &qd
->qd_flags
) ||
380 qd
->qd_sync_gen
>= sdp
->sd_quota_sync_gen
)
383 list_move_tail(&qd
->qd_list
, &sdp
->sd_quota_list
);
385 set_bit(QDF_LOCKED
, &qd
->qd_flags
);
386 gfs2_assert_warn(sdp
, atomic_read(&qd
->qd_count
));
387 atomic_inc(&qd
->qd_count
);
388 qd
->qd_change_sync
= qd
->qd_change
;
389 gfs2_assert_warn(sdp
, qd
->qd_slot_count
);
399 spin_unlock(&qd_lru_lock
);
402 gfs2_assert_warn(sdp
, qd
->qd_change_sync
);
405 clear_bit(QDF_LOCKED
, &qd
->qd_flags
);
417 static int qd_trylock(struct gfs2_quota_data
*qd
)
419 struct gfs2_sbd
*sdp
= qd
->qd_gl
->gl_sbd
;
421 if (sdp
->sd_vfs
->s_flags
& MS_RDONLY
)
424 spin_lock(&qd_lru_lock
);
426 if (test_bit(QDF_LOCKED
, &qd
->qd_flags
) ||
427 !test_bit(QDF_CHANGE
, &qd
->qd_flags
)) {
428 spin_unlock(&qd_lru_lock
);
432 list_move_tail(&qd
->qd_list
, &sdp
->sd_quota_list
);
434 set_bit(QDF_LOCKED
, &qd
->qd_flags
);
435 gfs2_assert_warn(sdp
, atomic_read(&qd
->qd_count
));
436 atomic_inc(&qd
->qd_count
);
437 qd
->qd_change_sync
= qd
->qd_change
;
438 gfs2_assert_warn(sdp
, qd
->qd_slot_count
);
441 spin_unlock(&qd_lru_lock
);
443 gfs2_assert_warn(sdp
, qd
->qd_change_sync
);
445 clear_bit(QDF_LOCKED
, &qd
->qd_flags
);
454 static void qd_unlock(struct gfs2_quota_data
*qd
)
456 gfs2_assert_warn(qd
->qd_gl
->gl_sbd
,
457 test_bit(QDF_LOCKED
, &qd
->qd_flags
));
458 clear_bit(QDF_LOCKED
, &qd
->qd_flags
);
464 static int qdsb_get(struct gfs2_sbd
*sdp
, int user
, u32 id
, int create
,
465 struct gfs2_quota_data
**qdp
)
469 error
= qd_get(sdp
, user
, id
, create
, qdp
);
473 error
= slot_get(*qdp
);
477 error
= bh_get(*qdp
);
490 static void qdsb_put(struct gfs2_quota_data
*qd
)
497 int gfs2_quota_hold(struct gfs2_inode
*ip
, u32 uid
, u32 gid
)
499 struct gfs2_sbd
*sdp
= GFS2_SB(&ip
->i_inode
);
500 struct gfs2_alloc
*al
= ip
->i_alloc
;
501 struct gfs2_quota_data
**qd
= al
->al_qd
;
504 if (gfs2_assert_warn(sdp
, !al
->al_qd_num
) ||
505 gfs2_assert_warn(sdp
, !test_bit(GIF_QD_LOCKED
, &ip
->i_flags
)))
508 if (sdp
->sd_args
.ar_quota
== GFS2_QUOTA_OFF
)
511 error
= qdsb_get(sdp
, QUOTA_USER
, ip
->i_inode
.i_uid
, CREATE
, qd
);
517 error
= qdsb_get(sdp
, QUOTA_GROUP
, ip
->i_inode
.i_gid
, CREATE
, qd
);
523 if (uid
!= NO_QUOTA_CHANGE
&& uid
!= ip
->i_inode
.i_uid
) {
524 error
= qdsb_get(sdp
, QUOTA_USER
, uid
, CREATE
, qd
);
531 if (gid
!= NO_QUOTA_CHANGE
&& gid
!= ip
->i_inode
.i_gid
) {
532 error
= qdsb_get(sdp
, QUOTA_GROUP
, gid
, CREATE
, qd
);
541 gfs2_quota_unhold(ip
);
545 void gfs2_quota_unhold(struct gfs2_inode
*ip
)
547 struct gfs2_sbd
*sdp
= GFS2_SB(&ip
->i_inode
);
548 struct gfs2_alloc
*al
= ip
->i_alloc
;
551 gfs2_assert_warn(sdp
, !test_bit(GIF_QD_LOCKED
, &ip
->i_flags
));
553 for (x
= 0; x
< al
->al_qd_num
; x
++) {
554 qdsb_put(al
->al_qd
[x
]);
560 static int sort_qd(const void *a
, const void *b
)
562 const struct gfs2_quota_data
*qd_a
= *(const struct gfs2_quota_data
**)a
;
563 const struct gfs2_quota_data
*qd_b
= *(const struct gfs2_quota_data
**)b
;
565 if (!test_bit(QDF_USER
, &qd_a
->qd_flags
) !=
566 !test_bit(QDF_USER
, &qd_b
->qd_flags
)) {
567 if (test_bit(QDF_USER
, &qd_a
->qd_flags
))
572 if (qd_a
->qd_id
< qd_b
->qd_id
)
574 if (qd_a
->qd_id
> qd_b
->qd_id
)
580 static void do_qc(struct gfs2_quota_data
*qd
, s64 change
)
582 struct gfs2_sbd
*sdp
= qd
->qd_gl
->gl_sbd
;
583 struct gfs2_inode
*ip
= GFS2_I(sdp
->sd_qc_inode
);
584 struct gfs2_quota_change
*qc
= qd
->qd_bh_qc
;
587 mutex_lock(&sdp
->sd_quota_mutex
);
588 gfs2_trans_add_bh(ip
->i_gl
, qd
->qd_bh
, 1);
590 if (!test_bit(QDF_CHANGE
, &qd
->qd_flags
)) {
593 if (test_bit(QDF_USER
, &qd
->qd_flags
))
594 qc
->qc_flags
= cpu_to_be32(GFS2_QCF_USER
);
595 qc
->qc_id
= cpu_to_be32(qd
->qd_id
);
598 x
= be64_to_cpu(qc
->qc_change
) + change
;
599 qc
->qc_change
= cpu_to_be64(x
);
601 spin_lock(&qd_lru_lock
);
603 spin_unlock(&qd_lru_lock
);
606 gfs2_assert_warn(sdp
, test_bit(QDF_CHANGE
, &qd
->qd_flags
));
607 clear_bit(QDF_CHANGE
, &qd
->qd_flags
);
612 } else if (!test_and_set_bit(QDF_CHANGE
, &qd
->qd_flags
)) {
617 mutex_unlock(&sdp
->sd_quota_mutex
);
620 static void gfs2_quota_in(struct gfs2_quota_host
*qu
, const void *buf
)
622 const struct gfs2_quota
*str
= buf
;
624 qu
->qu_limit
= be64_to_cpu(str
->qu_limit
);
625 qu
->qu_warn
= be64_to_cpu(str
->qu_warn
);
626 qu
->qu_value
= be64_to_cpu(str
->qu_value
);
627 qu
->qu_ll_next
= be32_to_cpu(str
->qu_ll_next
);
630 static void gfs2_quota_out(const struct gfs2_quota_host
*qu
, void *buf
)
632 struct gfs2_quota
*str
= buf
;
634 str
->qu_limit
= cpu_to_be64(qu
->qu_limit
);
635 str
->qu_warn
= cpu_to_be64(qu
->qu_warn
);
636 str
->qu_value
= cpu_to_be64(qu
->qu_value
);
637 str
->qu_ll_next
= cpu_to_be32(qu
->qu_ll_next
);
638 memset(&str
->qu_reserved
, 0, sizeof(str
->qu_reserved
));
644 * This function was mostly borrowed from gfs2_block_truncate_page which was
645 * in turn mostly borrowed from ext3
647 static int gfs2_adjust_quota(struct gfs2_inode
*ip
, loff_t loc
,
648 s64 change
, struct gfs2_quota_data
*qd
)
650 struct inode
*inode
= &ip
->i_inode
;
651 struct address_space
*mapping
= inode
->i_mapping
;
652 unsigned long index
= loc
>> PAGE_CACHE_SHIFT
;
653 unsigned offset
= loc
& (PAGE_CACHE_SIZE
- 1);
654 unsigned blocksize
, iblock
, pos
;
655 struct buffer_head
*bh
;
659 struct gfs2_quota_host qp
;
663 if (gfs2_is_stuffed(ip
))
664 gfs2_unstuff_dinode(ip
, NULL
);
666 page
= grab_cache_page(mapping
, index
);
670 blocksize
= inode
->i_sb
->s_blocksize
;
671 iblock
= index
<< (PAGE_CACHE_SHIFT
- inode
->i_sb
->s_blocksize_bits
);
673 if (!page_has_buffers(page
))
674 create_empty_buffers(page
, blocksize
, 0);
676 bh
= page_buffers(page
);
678 while (offset
>= pos
) {
679 bh
= bh
->b_this_page
;
684 if (!buffer_mapped(bh
)) {
685 gfs2_block_map(inode
, iblock
, bh
, 1);
686 if (!buffer_mapped(bh
))
690 if (PageUptodate(page
))
691 set_buffer_uptodate(bh
);
693 if (!buffer_uptodate(bh
)) {
694 ll_rw_block(READ_META
, 1, &bh
);
696 if (!buffer_uptodate(bh
))
700 gfs2_trans_add_bh(ip
->i_gl
, bh
, 0);
702 kaddr
= kmap_atomic(page
, KM_USER0
);
703 ptr
= kaddr
+ offset
;
704 gfs2_quota_in(&qp
, ptr
);
705 qp
.qu_value
+= change
;
707 gfs2_quota_out(&qp
, ptr
);
708 flush_dcache_page(page
);
709 kunmap_atomic(kaddr
, KM_USER0
);
711 qd
->qd_qb
.qb_magic
= cpu_to_be32(GFS2_MAGIC
);
712 qd
->qd_qb
.qb_value
= cpu_to_be64(value
);
713 ((struct gfs2_quota_lvb
*)(qd
->qd_gl
->gl_lvb
))->qb_magic
= cpu_to_be32(GFS2_MAGIC
);
714 ((struct gfs2_quota_lvb
*)(qd
->qd_gl
->gl_lvb
))->qb_value
= cpu_to_be64(value
);
717 page_cache_release(page
);
721 static int do_sync(unsigned int num_qd
, struct gfs2_quota_data
**qda
)
723 struct gfs2_sbd
*sdp
= (*qda
)->qd_gl
->gl_sbd
;
724 struct gfs2_inode
*ip
= GFS2_I(sdp
->sd_quota_inode
);
725 unsigned int data_blocks
, ind_blocks
;
726 struct gfs2_holder
*ghs
, i_gh
;
728 struct gfs2_quota_data
*qd
;
730 unsigned int nalloc
= 0, blocks
;
731 struct gfs2_alloc
*al
= NULL
;
734 gfs2_write_calc_reserv(ip
, sizeof(struct gfs2_quota
),
735 &data_blocks
, &ind_blocks
);
737 ghs
= kcalloc(num_qd
, sizeof(struct gfs2_holder
), GFP_NOFS
);
741 sort(qda
, num_qd
, sizeof(struct gfs2_quota_data
*), sort_qd
, NULL
);
742 for (qx
= 0; qx
< num_qd
; qx
++) {
743 error
= gfs2_glock_nq_init(qda
[qx
]->qd_gl
,
745 GL_NOCACHE
, &ghs
[qx
]);
750 error
= gfs2_glock_nq_init(ip
->i_gl
, LM_ST_EXCLUSIVE
, 0, &i_gh
);
754 for (x
= 0; x
< num_qd
; x
++) {
757 offset
= qd2offset(qda
[x
]);
758 error
= gfs2_write_alloc_required(ip
, offset
,
759 sizeof(struct gfs2_quota
),
767 al
= gfs2_alloc_get(ip
);
773 * 1 blk for unstuffing inode if stuffed. We add this extra
774 * block to the reservation unconditionally. If the inode
775 * doesn't need unstuffing, the block will be released to the
776 * rgrp since it won't be allocated during the transaction
778 al
->al_requested
= 1;
779 /* +1 in the end for block requested above for unstuffing */
780 blocks
= num_qd
* data_blocks
+ RES_DINODE
+ num_qd
+ 1;
783 al
->al_requested
+= nalloc
* (data_blocks
+ ind_blocks
);
784 error
= gfs2_inplace_reserve(ip
);
789 blocks
+= al
->al_rgd
->rd_length
+ nalloc
* ind_blocks
+ RES_STATFS
;
791 error
= gfs2_trans_begin(sdp
, blocks
, 0);
795 for (x
= 0; x
< num_qd
; x
++) {
797 offset
= qd2offset(qd
);
798 error
= gfs2_adjust_quota(ip
, offset
, qd
->qd_change_sync
,
799 (struct gfs2_quota_data
*)
804 do_qc(qd
, -qd
->qd_change_sync
);
812 gfs2_inplace_release(ip
);
816 gfs2_glock_dq_uninit(&i_gh
);
819 gfs2_glock_dq_uninit(&ghs
[qx
]);
821 gfs2_log_flush(ip
->i_gl
->gl_sbd
, ip
->i_gl
);
825 static int do_glock(struct gfs2_quota_data
*qd
, int force_refresh
,
826 struct gfs2_holder
*q_gh
)
828 struct gfs2_sbd
*sdp
= qd
->qd_gl
->gl_sbd
;
829 struct gfs2_inode
*ip
= GFS2_I(sdp
->sd_quota_inode
);
830 struct gfs2_holder i_gh
;
831 struct gfs2_quota_host q
;
832 char buf
[sizeof(struct gfs2_quota
)];
834 struct gfs2_quota_lvb
*qlvb
;
837 error
= gfs2_glock_nq_init(qd
->qd_gl
, LM_ST_SHARED
, 0, q_gh
);
841 qd
->qd_qb
= *(struct gfs2_quota_lvb
*)qd
->qd_gl
->gl_lvb
;
843 if (force_refresh
|| qd
->qd_qb
.qb_magic
!= cpu_to_be32(GFS2_MAGIC
)) {
845 gfs2_glock_dq_uninit(q_gh
);
846 error
= gfs2_glock_nq_init(qd
->qd_gl
,
847 LM_ST_EXCLUSIVE
, GL_NOCACHE
,
852 error
= gfs2_glock_nq_init(ip
->i_gl
, LM_ST_SHARED
, 0, &i_gh
);
856 memset(buf
, 0, sizeof(struct gfs2_quota
));
858 error
= gfs2_internal_read(ip
, NULL
, buf
, &pos
,
859 sizeof(struct gfs2_quota
));
863 gfs2_glock_dq_uninit(&i_gh
);
865 gfs2_quota_in(&q
, buf
);
866 qlvb
= (struct gfs2_quota_lvb
*)qd
->qd_gl
->gl_lvb
;
867 qlvb
->qb_magic
= cpu_to_be32(GFS2_MAGIC
);
869 qlvb
->qb_limit
= cpu_to_be64(q
.qu_limit
);
870 qlvb
->qb_warn
= cpu_to_be64(q
.qu_warn
);
871 qlvb
->qb_value
= cpu_to_be64(q
.qu_value
);
874 if (gfs2_glock_is_blocking(qd
->qd_gl
)) {
875 gfs2_glock_dq_uninit(q_gh
);
884 gfs2_glock_dq_uninit(&i_gh
);
886 gfs2_glock_dq_uninit(q_gh
);
890 int gfs2_quota_lock(struct gfs2_inode
*ip
, u32 uid
, u32 gid
)
892 struct gfs2_sbd
*sdp
= GFS2_SB(&ip
->i_inode
);
893 struct gfs2_alloc
*al
= ip
->i_alloc
;
897 gfs2_quota_hold(ip
, uid
, gid
);
899 if (capable(CAP_SYS_RESOURCE
) ||
900 sdp
->sd_args
.ar_quota
!= GFS2_QUOTA_ON
)
903 sort(al
->al_qd
, al
->al_qd_num
, sizeof(struct gfs2_quota_data
*),
906 for (x
= 0; x
< al
->al_qd_num
; x
++) {
907 error
= do_glock(al
->al_qd
[x
], NO_FORCE
, &al
->al_qd_ghs
[x
]);
913 set_bit(GIF_QD_LOCKED
, &ip
->i_flags
);
916 gfs2_glock_dq_uninit(&al
->al_qd_ghs
[x
]);
917 gfs2_quota_unhold(ip
);
923 static int need_sync(struct gfs2_quota_data
*qd
)
925 struct gfs2_sbd
*sdp
= qd
->qd_gl
->gl_sbd
;
926 struct gfs2_tune
*gt
= &sdp
->sd_tune
;
928 unsigned int num
, den
;
931 if (!qd
->qd_qb
.qb_limit
)
934 spin_lock(&qd_lru_lock
);
935 value
= qd
->qd_change
;
936 spin_unlock(&qd_lru_lock
);
938 spin_lock(>
->gt_spin
);
939 num
= gt
->gt_quota_scale_num
;
940 den
= gt
->gt_quota_scale_den
;
941 spin_unlock(>
->gt_spin
);
945 else if ((s64
)be64_to_cpu(qd
->qd_qb
.qb_value
) >=
946 (s64
)be64_to_cpu(qd
->qd_qb
.qb_limit
))
949 value
*= gfs2_jindex_size(sdp
) * num
;
950 value
= div_s64(value
, den
);
951 value
+= (s64
)be64_to_cpu(qd
->qd_qb
.qb_value
);
952 if (value
< (s64
)be64_to_cpu(qd
->qd_qb
.qb_limit
))
959 void gfs2_quota_unlock(struct gfs2_inode
*ip
)
961 struct gfs2_alloc
*al
= ip
->i_alloc
;
962 struct gfs2_quota_data
*qda
[4];
963 unsigned int count
= 0;
966 if (!test_and_clear_bit(GIF_QD_LOCKED
, &ip
->i_flags
))
969 for (x
= 0; x
< al
->al_qd_num
; x
++) {
970 struct gfs2_quota_data
*qd
;
974 sync
= need_sync(qd
);
976 gfs2_glock_dq_uninit(&al
->al_qd_ghs
[x
]);
978 if (sync
&& qd_trylock(qd
))
984 for (x
= 0; x
< count
; x
++)
989 gfs2_quota_unhold(ip
);
994 static int print_message(struct gfs2_quota_data
*qd
, char *type
)
996 struct gfs2_sbd
*sdp
= qd
->qd_gl
->gl_sbd
;
998 printk(KERN_INFO
"GFS2: fsid=%s: quota %s for %s %u\r\n",
999 sdp
->sd_fsname
, type
,
1000 (test_bit(QDF_USER
, &qd
->qd_flags
)) ? "user" : "group",
1006 int gfs2_quota_check(struct gfs2_inode
*ip
, u32 uid
, u32 gid
)
1008 struct gfs2_sbd
*sdp
= GFS2_SB(&ip
->i_inode
);
1009 struct gfs2_alloc
*al
= ip
->i_alloc
;
1010 struct gfs2_quota_data
*qd
;
1015 if (!test_bit(GIF_QD_LOCKED
, &ip
->i_flags
))
1018 if (sdp
->sd_args
.ar_quota
!= GFS2_QUOTA_ON
)
1021 for (x
= 0; x
< al
->al_qd_num
; x
++) {
1024 if (!((qd
->qd_id
== uid
&& test_bit(QDF_USER
, &qd
->qd_flags
)) ||
1025 (qd
->qd_id
== gid
&& !test_bit(QDF_USER
, &qd
->qd_flags
))))
1028 value
= (s64
)be64_to_cpu(qd
->qd_qb
.qb_value
);
1029 spin_lock(&qd_lru_lock
);
1030 value
+= qd
->qd_change
;
1031 spin_unlock(&qd_lru_lock
);
1033 if (be64_to_cpu(qd
->qd_qb
.qb_limit
) && (s64
)be64_to_cpu(qd
->qd_qb
.qb_limit
) < value
) {
1034 print_message(qd
, "exceeded");
1037 } else if (be64_to_cpu(qd
->qd_qb
.qb_warn
) &&
1038 (s64
)be64_to_cpu(qd
->qd_qb
.qb_warn
) < value
&&
1039 time_after_eq(jiffies
, qd
->qd_last_warn
+
1041 gt_quota_warn_period
) * HZ
)) {
1042 error
= print_message(qd
, "warning");
1043 qd
->qd_last_warn
= jiffies
;
1050 void gfs2_quota_change(struct gfs2_inode
*ip
, s64 change
,
1053 struct gfs2_alloc
*al
= ip
->i_alloc
;
1054 struct gfs2_quota_data
*qd
;
1057 if (gfs2_assert_warn(GFS2_SB(&ip
->i_inode
), change
))
1059 if (ip
->i_diskflags
& GFS2_DIF_SYSTEM
)
1062 for (x
= 0; x
< al
->al_qd_num
; x
++) {
1065 if ((qd
->qd_id
== uid
&& test_bit(QDF_USER
, &qd
->qd_flags
)) ||
1066 (qd
->qd_id
== gid
&& !test_bit(QDF_USER
, &qd
->qd_flags
))) {
1072 int gfs2_quota_sync(struct gfs2_sbd
*sdp
)
1074 struct gfs2_quota_data
**qda
;
1075 unsigned int max_qd
= gfs2_tune_get(sdp
, gt_quota_simul_sync
);
1076 unsigned int num_qd
;
1080 sdp
->sd_quota_sync_gen
++;
1082 qda
= kcalloc(max_qd
, sizeof(struct gfs2_quota_data
*), GFP_KERNEL
);
1090 error
= qd_fish(sdp
, qda
+ num_qd
);
1091 if (error
|| !qda
[num_qd
])
1093 if (++num_qd
== max_qd
)
1099 error
= do_sync(num_qd
, qda
);
1101 for (x
= 0; x
< num_qd
; x
++)
1102 qda
[x
]->qd_sync_gen
=
1103 sdp
->sd_quota_sync_gen
;
1105 for (x
= 0; x
< num_qd
; x
++)
1108 } while (!error
&& num_qd
== max_qd
);
1115 int gfs2_quota_refresh(struct gfs2_sbd
*sdp
, int user
, u32 id
)
1117 struct gfs2_quota_data
*qd
;
1118 struct gfs2_holder q_gh
;
1121 error
= qd_get(sdp
, user
, id
, CREATE
, &qd
);
1125 error
= do_glock(qd
, FORCE
, &q_gh
);
1127 gfs2_glock_dq_uninit(&q_gh
);
1134 static void gfs2_quota_change_in(struct gfs2_quota_change_host
*qc
, const void *buf
)
1136 const struct gfs2_quota_change
*str
= buf
;
1138 qc
->qc_change
= be64_to_cpu(str
->qc_change
);
1139 qc
->qc_flags
= be32_to_cpu(str
->qc_flags
);
1140 qc
->qc_id
= be32_to_cpu(str
->qc_id
);
1143 int gfs2_quota_init(struct gfs2_sbd
*sdp
)
1145 struct gfs2_inode
*ip
= GFS2_I(sdp
->sd_qc_inode
);
1146 unsigned int blocks
= ip
->i_disksize
>> sdp
->sd_sb
.sb_bsize_shift
;
1147 unsigned int x
, slot
= 0;
1148 unsigned int found
= 0;
1153 if (!ip
->i_disksize
|| ip
->i_disksize
> (64 << 20) ||
1154 ip
->i_disksize
& (sdp
->sd_sb
.sb_bsize
- 1)) {
1155 gfs2_consist_inode(ip
);
1158 sdp
->sd_quota_slots
= blocks
* sdp
->sd_qc_per_block
;
1159 sdp
->sd_quota_chunks
= DIV_ROUND_UP(sdp
->sd_quota_slots
, 8 * PAGE_SIZE
);
1163 sdp
->sd_quota_bitmap
= kcalloc(sdp
->sd_quota_chunks
,
1164 sizeof(unsigned char *), GFP_NOFS
);
1165 if (!sdp
->sd_quota_bitmap
)
1168 for (x
= 0; x
< sdp
->sd_quota_chunks
; x
++) {
1169 sdp
->sd_quota_bitmap
[x
] = kzalloc(PAGE_SIZE
, GFP_NOFS
);
1170 if (!sdp
->sd_quota_bitmap
[x
])
1174 for (x
= 0; x
< blocks
; x
++) {
1175 struct buffer_head
*bh
;
1180 error
= gfs2_extent_map(&ip
->i_inode
, x
, &new, &dblock
, &extlen
);
1185 bh
= gfs2_meta_ra(ip
->i_gl
, dblock
, extlen
);
1188 if (gfs2_metatype_check(sdp
, bh
, GFS2_METATYPE_QC
)) {
1193 for (y
= 0; y
< sdp
->sd_qc_per_block
&& slot
< sdp
->sd_quota_slots
;
1195 struct gfs2_quota_change_host qc
;
1196 struct gfs2_quota_data
*qd
;
1198 gfs2_quota_change_in(&qc
, bh
->b_data
+
1199 sizeof(struct gfs2_meta_header
) +
1200 y
* sizeof(struct gfs2_quota_change
));
1204 error
= qd_alloc(sdp
, (qc
.qc_flags
& GFS2_QCF_USER
),
1211 set_bit(QDF_CHANGE
, &qd
->qd_flags
);
1212 qd
->qd_change
= qc
.qc_change
;
1214 qd
->qd_slot_count
= 1;
1216 spin_lock(&qd_lru_lock
);
1217 gfs2_icbit_munge(sdp
, sdp
->sd_quota_bitmap
, slot
, 1);
1218 list_add(&qd
->qd_list
, &sdp
->sd_quota_list
);
1219 atomic_inc(&sdp
->sd_quota_count
);
1220 spin_unlock(&qd_lru_lock
);
1231 fs_info(sdp
, "found %u quota changes\n", found
);
1236 gfs2_quota_cleanup(sdp
);
1240 void gfs2_quota_cleanup(struct gfs2_sbd
*sdp
)
1242 struct list_head
*head
= &sdp
->sd_quota_list
;
1243 struct gfs2_quota_data
*qd
;
1246 spin_lock(&qd_lru_lock
);
1247 while (!list_empty(head
)) {
1248 qd
= list_entry(head
->prev
, struct gfs2_quota_data
, qd_list
);
1250 if (atomic_read(&qd
->qd_count
) > 1 ||
1251 (atomic_read(&qd
->qd_count
) &&
1252 !test_bit(QDF_CHANGE
, &qd
->qd_flags
))) {
1253 list_move(&qd
->qd_list
, head
);
1254 spin_unlock(&qd_lru_lock
);
1256 spin_lock(&qd_lru_lock
);
1260 list_del(&qd
->qd_list
);
1261 /* Also remove if this qd exists in the reclaim list */
1262 if (!list_empty(&qd
->qd_reclaim
)) {
1263 list_del_init(&qd
->qd_reclaim
);
1264 atomic_dec(&qd_lru_count
);
1266 atomic_dec(&sdp
->sd_quota_count
);
1267 spin_unlock(&qd_lru_lock
);
1269 if (!atomic_read(&qd
->qd_count
)) {
1270 gfs2_assert_warn(sdp
, !qd
->qd_change
);
1271 gfs2_assert_warn(sdp
, !qd
->qd_slot_count
);
1273 gfs2_assert_warn(sdp
, qd
->qd_slot_count
== 1);
1274 gfs2_assert_warn(sdp
, !qd
->qd_bh_count
);
1276 gfs2_glock_put(qd
->qd_gl
);
1277 kmem_cache_free(gfs2_quotad_cachep
, qd
);
1279 spin_lock(&qd_lru_lock
);
1281 spin_unlock(&qd_lru_lock
);
1283 gfs2_assert_warn(sdp
, !atomic_read(&sdp
->sd_quota_count
));
1285 if (sdp
->sd_quota_bitmap
) {
1286 for (x
= 0; x
< sdp
->sd_quota_chunks
; x
++)
1287 kfree(sdp
->sd_quota_bitmap
[x
]);
1288 kfree(sdp
->sd_quota_bitmap
);
1292 static void quotad_error(struct gfs2_sbd
*sdp
, const char *msg
, int error
)
1294 if (error
== 0 || error
== -EROFS
)
1296 if (!test_bit(SDF_SHUTDOWN
, &sdp
->sd_flags
))
1297 fs_err(sdp
, "gfs2_quotad: %s error %d\n", msg
, error
);
1300 static void quotad_check_timeo(struct gfs2_sbd
*sdp
, const char *msg
,
1301 int (*fxn
)(struct gfs2_sbd
*sdp
),
1302 unsigned long t
, unsigned long *timeo
,
1303 unsigned int *new_timeo
)
1306 int error
= fxn(sdp
);
1307 quotad_error(sdp
, msg
, error
);
1308 *timeo
= gfs2_tune_get_i(&sdp
->sd_tune
, new_timeo
) * HZ
;
1314 static void quotad_check_trunc_list(struct gfs2_sbd
*sdp
)
1316 struct gfs2_inode
*ip
;
1320 spin_lock(&sdp
->sd_trunc_lock
);
1321 if (!list_empty(&sdp
->sd_trunc_list
)) {
1322 ip
= list_entry(sdp
->sd_trunc_list
.next
,
1323 struct gfs2_inode
, i_trunc_list
);
1324 list_del_init(&ip
->i_trunc_list
);
1326 spin_unlock(&sdp
->sd_trunc_lock
);
1329 gfs2_glock_finish_truncate(ip
);
1334 * gfs2_quotad - Write cached quota changes into the quota file
1335 * @sdp: Pointer to GFS2 superblock
1339 int gfs2_quotad(void *data
)
1341 struct gfs2_sbd
*sdp
= data
;
1342 struct gfs2_tune
*tune
= &sdp
->sd_tune
;
1343 unsigned long statfs_timeo
= 0;
1344 unsigned long quotad_timeo
= 0;
1345 unsigned long t
= 0;
1349 while (!kthread_should_stop()) {
1351 /* Update the master statfs file */
1352 quotad_check_timeo(sdp
, "statfs", gfs2_statfs_sync
, t
,
1353 &statfs_timeo
, &tune
->gt_statfs_quantum
);
1355 /* Update quota file */
1356 quotad_check_timeo(sdp
, "sync", gfs2_quota_sync
, t
,
1357 "ad_timeo
, &tune
->gt_quota_quantum
);
1359 /* Check for & recover partially truncated inodes */
1360 quotad_check_trunc_list(sdp
);
1362 if (freezing(current
))
1364 t
= min(quotad_timeo
, statfs_timeo
);
1366 prepare_to_wait(&sdp
->sd_quota_wait
, &wait
, TASK_INTERRUPTIBLE
);
1367 spin_lock(&sdp
->sd_trunc_lock
);
1368 empty
= list_empty(&sdp
->sd_trunc_list
);
1369 spin_unlock(&sdp
->sd_trunc_lock
);
1371 t
-= schedule_timeout(t
);
1374 finish_wait(&sdp
->sd_quota_wait
, &wait
);