2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
11 * Quota change tags are associated with each transaction that allocates or
12 * deallocates space. Those changes are accumulated locally to each node (in a
13 * per-node file) and then are periodically synced to the quota file. This
14 * avoids the bottleneck of constantly touching the quota file, but introduces
15 * fuzziness in the current usage value of IDs that are being used on different
16 * nodes in the cluster simultaneously. So, it is possible for a user on
17 * multiple nodes to overrun their quota, but that overrun is controlable.
18 * Since quota tags are part of transactions, there is no need to a quota check
19 * program to be run on node crashes or anything like that.
21 * There are couple of knobs that let the administrator manage the quota
22 * fuzziness. "quota_quantum" sets the maximum time a quota change can be
23 * sitting on one node before being synced to the quota file. (The default is
24 * 60 seconds.) Another knob, "quota_scale" controls how quickly the frequency
25 * of quota file syncs increases as the user moves closer to their limit. The
26 * more frequent the syncs, the more accurate the quota enforcement, but that
27 * means that there is more contention between the nodes for the quota file.
28 * The default value is one. This sets the maximum theoretical quota overrun
29 * (with infinite node with infinite bandwidth) to twice the user's limit. (In
30 * practice, the maximum overrun you see should be much less.) A "quota_scale"
31 * number greater than one makes quota syncs more frequent and reduces the
32 * maximum overrun. Numbers less than one (but greater than zero) make quota
33 * syncs less frequent.
35 * GFS quotas also use per-ID Lock Value Blocks (LVBs) to cache the contents of
36 * the quota file, so it is not being constantly read.
39 #include <linux/sched.h>
40 #include <linux/slab.h>
41 #include <linux/spinlock.h>
42 #include <linux/completion.h>
43 #include <linux/buffer_head.h>
44 #include <linux/sort.h>
46 #include <linux/bio.h>
47 #include <linux/gfs2_ondisk.h>
48 #include <linux/lm_interface.h>
63 #include "ops_address.h"
69 struct gfs2_quota_host
{
75 struct gfs2_quota_change_host
{
77 u32 qc_flags
; /* GFS2_QCF_... */
81 static u64
qd2offset(struct gfs2_quota_data
*qd
)
85 offset
= 2 * (u64
)qd
->qd_id
+ !test_bit(QDF_USER
, &qd
->qd_flags
);
86 offset
*= sizeof(struct gfs2_quota
);
91 static int qd_alloc(struct gfs2_sbd
*sdp
, int user
, u32 id
,
92 struct gfs2_quota_data
**qdp
)
94 struct gfs2_quota_data
*qd
;
97 qd
= kzalloc(sizeof(struct gfs2_quota_data
), GFP_KERNEL
);
104 set_bit(QDF_USER
, &qd
->qd_flags
);
107 error
= gfs2_glock_get(sdp
, 2 * (u64
)id
+ !user
,
108 &gfs2_quota_glops
, CREATE
, &qd
->qd_gl
);
112 error
= gfs2_lvb_hold(qd
->qd_gl
);
113 gfs2_glock_put(qd
->qd_gl
);
126 static int qd_get(struct gfs2_sbd
*sdp
, int user
, u32 id
, int create
,
127 struct gfs2_quota_data
**qdp
)
129 struct gfs2_quota_data
*qd
= NULL
, *new_qd
= NULL
;
136 spin_lock(&sdp
->sd_quota_spin
);
137 list_for_each_entry(qd
, &sdp
->sd_quota_list
, qd_list
) {
138 if (qd
->qd_id
== id
&&
139 !test_bit(QDF_USER
, &qd
->qd_flags
) == !user
) {
151 list_add(&qd
->qd_list
, &sdp
->sd_quota_list
);
152 atomic_inc(&sdp
->sd_quota_count
);
156 spin_unlock(&sdp
->sd_quota_spin
);
160 gfs2_lvb_unhold(new_qd
->qd_gl
);
167 error
= qd_alloc(sdp
, user
, id
, &new_qd
);
173 static void qd_hold(struct gfs2_quota_data
*qd
)
175 struct gfs2_sbd
*sdp
= qd
->qd_gl
->gl_sbd
;
177 spin_lock(&sdp
->sd_quota_spin
);
178 gfs2_assert(sdp
, qd
->qd_count
);
180 spin_unlock(&sdp
->sd_quota_spin
);
183 static void qd_put(struct gfs2_quota_data
*qd
)
185 struct gfs2_sbd
*sdp
= qd
->qd_gl
->gl_sbd
;
186 spin_lock(&sdp
->sd_quota_spin
);
187 gfs2_assert(sdp
, qd
->qd_count
);
189 qd
->qd_last_touched
= jiffies
;
190 spin_unlock(&sdp
->sd_quota_spin
);
193 static int slot_get(struct gfs2_quota_data
*qd
)
195 struct gfs2_sbd
*sdp
= qd
->qd_gl
->gl_sbd
;
196 unsigned int c
, o
= 0, b
;
197 unsigned char byte
= 0;
199 spin_lock(&sdp
->sd_quota_spin
);
201 if (qd
->qd_slot_count
++) {
202 spin_unlock(&sdp
->sd_quota_spin
);
206 for (c
= 0; c
< sdp
->sd_quota_chunks
; c
++)
207 for (o
= 0; o
< PAGE_SIZE
; o
++) {
208 byte
= sdp
->sd_quota_bitmap
[c
][o
];
216 for (b
= 0; b
< 8; b
++)
217 if (!(byte
& (1 << b
)))
219 qd
->qd_slot
= c
* (8 * PAGE_SIZE
) + o
* 8 + b
;
221 if (qd
->qd_slot
>= sdp
->sd_quota_slots
)
224 sdp
->sd_quota_bitmap
[c
][o
] |= 1 << b
;
226 spin_unlock(&sdp
->sd_quota_spin
);
232 spin_unlock(&sdp
->sd_quota_spin
);
236 static void slot_hold(struct gfs2_quota_data
*qd
)
238 struct gfs2_sbd
*sdp
= qd
->qd_gl
->gl_sbd
;
240 spin_lock(&sdp
->sd_quota_spin
);
241 gfs2_assert(sdp
, qd
->qd_slot_count
);
243 spin_unlock(&sdp
->sd_quota_spin
);
246 static void slot_put(struct gfs2_quota_data
*qd
)
248 struct gfs2_sbd
*sdp
= qd
->qd_gl
->gl_sbd
;
250 spin_lock(&sdp
->sd_quota_spin
);
251 gfs2_assert(sdp
, qd
->qd_slot_count
);
252 if (!--qd
->qd_slot_count
) {
253 gfs2_icbit_munge(sdp
, sdp
->sd_quota_bitmap
, qd
->qd_slot
, 0);
256 spin_unlock(&sdp
->sd_quota_spin
);
259 static int bh_get(struct gfs2_quota_data
*qd
)
261 struct gfs2_sbd
*sdp
= qd
->qd_gl
->gl_sbd
;
262 struct gfs2_inode
*ip
= GFS2_I(sdp
->sd_qc_inode
);
263 unsigned int block
, offset
;
264 struct buffer_head
*bh
;
266 struct buffer_head bh_map
= { .b_state
= 0, .b_blocknr
= 0 };
268 mutex_lock(&sdp
->sd_quota_mutex
);
270 if (qd
->qd_bh_count
++) {
271 mutex_unlock(&sdp
->sd_quota_mutex
);
275 block
= qd
->qd_slot
/ sdp
->sd_qc_per_block
;
276 offset
= qd
->qd_slot
% sdp
->sd_qc_per_block
;;
278 bh_map
.b_size
= 1 << ip
->i_inode
.i_blkbits
;
279 error
= gfs2_block_map(&ip
->i_inode
, block
, 0, &bh_map
);
282 error
= gfs2_meta_read(ip
->i_gl
, bh_map
.b_blocknr
, DIO_WAIT
, &bh
);
286 if (gfs2_metatype_check(sdp
, bh
, GFS2_METATYPE_QC
))
290 qd
->qd_bh_qc
= (struct gfs2_quota_change
*)
291 (bh
->b_data
+ sizeof(struct gfs2_meta_header
) +
292 offset
* sizeof(struct gfs2_quota_change
));
294 mutex_unlock(&sdp
->sd_quota_mutex
);
302 mutex_unlock(&sdp
->sd_quota_mutex
);
306 static void bh_put(struct gfs2_quota_data
*qd
)
308 struct gfs2_sbd
*sdp
= qd
->qd_gl
->gl_sbd
;
310 mutex_lock(&sdp
->sd_quota_mutex
);
311 gfs2_assert(sdp
, qd
->qd_bh_count
);
312 if (!--qd
->qd_bh_count
) {
317 mutex_unlock(&sdp
->sd_quota_mutex
);
320 static int qd_fish(struct gfs2_sbd
*sdp
, struct gfs2_quota_data
**qdp
)
322 struct gfs2_quota_data
*qd
= NULL
;
328 if (sdp
->sd_vfs
->s_flags
& MS_RDONLY
)
331 spin_lock(&sdp
->sd_quota_spin
);
333 list_for_each_entry(qd
, &sdp
->sd_quota_list
, qd_list
) {
334 if (test_bit(QDF_LOCKED
, &qd
->qd_flags
) ||
335 !test_bit(QDF_CHANGE
, &qd
->qd_flags
) ||
336 qd
->qd_sync_gen
>= sdp
->sd_quota_sync_gen
)
339 list_move_tail(&qd
->qd_list
, &sdp
->sd_quota_list
);
341 set_bit(QDF_LOCKED
, &qd
->qd_flags
);
342 gfs2_assert_warn(sdp
, qd
->qd_count
);
344 qd
->qd_change_sync
= qd
->qd_change
;
345 gfs2_assert_warn(sdp
, qd
->qd_slot_count
);
355 spin_unlock(&sdp
->sd_quota_spin
);
358 gfs2_assert_warn(sdp
, qd
->qd_change_sync
);
361 clear_bit(QDF_LOCKED
, &qd
->qd_flags
);
373 static int qd_trylock(struct gfs2_quota_data
*qd
)
375 struct gfs2_sbd
*sdp
= qd
->qd_gl
->gl_sbd
;
377 if (sdp
->sd_vfs
->s_flags
& MS_RDONLY
)
380 spin_lock(&sdp
->sd_quota_spin
);
382 if (test_bit(QDF_LOCKED
, &qd
->qd_flags
) ||
383 !test_bit(QDF_CHANGE
, &qd
->qd_flags
)) {
384 spin_unlock(&sdp
->sd_quota_spin
);
388 list_move_tail(&qd
->qd_list
, &sdp
->sd_quota_list
);
390 set_bit(QDF_LOCKED
, &qd
->qd_flags
);
391 gfs2_assert_warn(sdp
, qd
->qd_count
);
393 qd
->qd_change_sync
= qd
->qd_change
;
394 gfs2_assert_warn(sdp
, qd
->qd_slot_count
);
397 spin_unlock(&sdp
->sd_quota_spin
);
399 gfs2_assert_warn(sdp
, qd
->qd_change_sync
);
401 clear_bit(QDF_LOCKED
, &qd
->qd_flags
);
410 static void qd_unlock(struct gfs2_quota_data
*qd
)
412 gfs2_assert_warn(qd
->qd_gl
->gl_sbd
,
413 test_bit(QDF_LOCKED
, &qd
->qd_flags
));
414 clear_bit(QDF_LOCKED
, &qd
->qd_flags
);
420 static int qdsb_get(struct gfs2_sbd
*sdp
, int user
, u32 id
, int create
,
421 struct gfs2_quota_data
**qdp
)
425 error
= qd_get(sdp
, user
, id
, create
, qdp
);
429 error
= slot_get(*qdp
);
433 error
= bh_get(*qdp
);
446 static void qdsb_put(struct gfs2_quota_data
*qd
)
453 int gfs2_quota_hold(struct gfs2_inode
*ip
, u32 uid
, u32 gid
)
455 struct gfs2_sbd
*sdp
= GFS2_SB(&ip
->i_inode
);
456 struct gfs2_alloc
*al
= &ip
->i_alloc
;
457 struct gfs2_quota_data
**qd
= al
->al_qd
;
460 if (gfs2_assert_warn(sdp
, !al
->al_qd_num
) ||
461 gfs2_assert_warn(sdp
, !test_bit(GIF_QD_LOCKED
, &ip
->i_flags
)))
464 if (sdp
->sd_args
.ar_quota
== GFS2_QUOTA_OFF
)
467 error
= qdsb_get(sdp
, QUOTA_USER
, ip
->i_inode
.i_uid
, CREATE
, qd
);
473 error
= qdsb_get(sdp
, QUOTA_GROUP
, ip
->i_inode
.i_gid
, CREATE
, qd
);
479 if (uid
!= NO_QUOTA_CHANGE
&& uid
!= ip
->i_inode
.i_uid
) {
480 error
= qdsb_get(sdp
, QUOTA_USER
, uid
, CREATE
, qd
);
487 if (gid
!= NO_QUOTA_CHANGE
&& gid
!= ip
->i_inode
.i_gid
) {
488 error
= qdsb_get(sdp
, QUOTA_GROUP
, gid
, CREATE
, qd
);
497 gfs2_quota_unhold(ip
);
501 void gfs2_quota_unhold(struct gfs2_inode
*ip
)
503 struct gfs2_sbd
*sdp
= GFS2_SB(&ip
->i_inode
);
504 struct gfs2_alloc
*al
= &ip
->i_alloc
;
507 gfs2_assert_warn(sdp
, !test_bit(GIF_QD_LOCKED
, &ip
->i_flags
));
509 for (x
= 0; x
< al
->al_qd_num
; x
++) {
510 qdsb_put(al
->al_qd
[x
]);
516 static int sort_qd(const void *a
, const void *b
)
518 const struct gfs2_quota_data
*qd_a
= *(const struct gfs2_quota_data
**)a
;
519 const struct gfs2_quota_data
*qd_b
= *(const struct gfs2_quota_data
**)b
;
521 if (!test_bit(QDF_USER
, &qd_a
->qd_flags
) !=
522 !test_bit(QDF_USER
, &qd_b
->qd_flags
)) {
523 if (test_bit(QDF_USER
, &qd_a
->qd_flags
))
528 if (qd_a
->qd_id
< qd_b
->qd_id
)
530 if (qd_a
->qd_id
> qd_b
->qd_id
)
536 static void do_qc(struct gfs2_quota_data
*qd
, s64 change
)
538 struct gfs2_sbd
*sdp
= qd
->qd_gl
->gl_sbd
;
539 struct gfs2_inode
*ip
= GFS2_I(sdp
->sd_qc_inode
);
540 struct gfs2_quota_change
*qc
= qd
->qd_bh_qc
;
543 mutex_lock(&sdp
->sd_quota_mutex
);
544 gfs2_trans_add_bh(ip
->i_gl
, qd
->qd_bh
, 1);
546 if (!test_bit(QDF_CHANGE
, &qd
->qd_flags
)) {
549 if (test_bit(QDF_USER
, &qd
->qd_flags
))
550 qc
->qc_flags
= cpu_to_be32(GFS2_QCF_USER
);
551 qc
->qc_id
= cpu_to_be32(qd
->qd_id
);
554 x
= be64_to_cpu(qc
->qc_change
) + change
;
555 qc
->qc_change
= cpu_to_be64(x
);
557 spin_lock(&sdp
->sd_quota_spin
);
559 spin_unlock(&sdp
->sd_quota_spin
);
562 gfs2_assert_warn(sdp
, test_bit(QDF_CHANGE
, &qd
->qd_flags
));
563 clear_bit(QDF_CHANGE
, &qd
->qd_flags
);
568 } else if (!test_and_set_bit(QDF_CHANGE
, &qd
->qd_flags
)) {
573 mutex_unlock(&sdp
->sd_quota_mutex
);
576 static void gfs2_quota_in(struct gfs2_quota_host
*qu
, const void *buf
)
578 const struct gfs2_quota
*str
= buf
;
580 qu
->qu_limit
= be64_to_cpu(str
->qu_limit
);
581 qu
->qu_warn
= be64_to_cpu(str
->qu_warn
);
582 qu
->qu_value
= be64_to_cpu(str
->qu_value
);
585 static void gfs2_quota_out(const struct gfs2_quota_host
*qu
, void *buf
)
587 struct gfs2_quota
*str
= buf
;
589 str
->qu_limit
= cpu_to_be64(qu
->qu_limit
);
590 str
->qu_warn
= cpu_to_be64(qu
->qu_warn
);
591 str
->qu_value
= cpu_to_be64(qu
->qu_value
);
592 memset(&str
->qu_reserved
, 0, sizeof(str
->qu_reserved
));
598 * This function was mostly borrowed from gfs2_block_truncate_page which was
599 * in turn mostly borrowed from ext3
601 static int gfs2_adjust_quota(struct gfs2_inode
*ip
, loff_t loc
,
602 s64 change
, struct gfs2_quota_data
*qd
)
604 struct inode
*inode
= &ip
->i_inode
;
605 struct address_space
*mapping
= inode
->i_mapping
;
606 unsigned long index
= loc
>> PAGE_CACHE_SHIFT
;
607 unsigned offset
= loc
& (PAGE_CACHE_SIZE
- 1);
608 unsigned blocksize
, iblock
, pos
;
609 struct buffer_head
*bh
;
613 struct gfs2_quota_host qp
;
617 page
= grab_cache_page(mapping
, index
);
621 blocksize
= inode
->i_sb
->s_blocksize
;
622 iblock
= index
<< (PAGE_CACHE_SHIFT
- inode
->i_sb
->s_blocksize_bits
);
624 if (!page_has_buffers(page
))
625 create_empty_buffers(page
, blocksize
, 0);
627 bh
= page_buffers(page
);
629 while (offset
>= pos
) {
630 bh
= bh
->b_this_page
;
635 if (!buffer_mapped(bh
)) {
636 gfs2_get_block(inode
, iblock
, bh
, 1);
637 if (!buffer_mapped(bh
))
641 if (PageUptodate(page
))
642 set_buffer_uptodate(bh
);
644 if (!buffer_uptodate(bh
)) {
645 ll_rw_block(READ_META
, 1, &bh
);
647 if (!buffer_uptodate(bh
))
651 gfs2_trans_add_bh(ip
->i_gl
, bh
, 0);
653 kaddr
= kmap_atomic(page
, KM_USER0
);
654 ptr
= kaddr
+ offset
;
655 gfs2_quota_in(&qp
, ptr
);
656 qp
.qu_value
+= change
;
658 gfs2_quota_out(&qp
, ptr
);
659 flush_dcache_page(page
);
660 kunmap_atomic(kaddr
, KM_USER0
);
662 qd
->qd_qb
.qb_magic
= cpu_to_be32(GFS2_MAGIC
);
663 qd
->qd_qb
.qb_value
= cpu_to_be64(value
);
664 ((struct gfs2_quota_lvb
*)(qd
->qd_gl
->gl_lvb
))->qb_magic
= cpu_to_be32(GFS2_MAGIC
);
665 ((struct gfs2_quota_lvb
*)(qd
->qd_gl
->gl_lvb
))->qb_value
= cpu_to_be64(value
);
668 page_cache_release(page
);
672 static int do_sync(unsigned int num_qd
, struct gfs2_quota_data
**qda
)
674 struct gfs2_sbd
*sdp
= (*qda
)->qd_gl
->gl_sbd
;
675 struct gfs2_inode
*ip
= GFS2_I(sdp
->sd_quota_inode
);
676 unsigned int data_blocks
, ind_blocks
;
677 struct gfs2_holder
*ghs
, i_gh
;
679 struct gfs2_quota_data
*qd
;
681 unsigned int nalloc
= 0;
682 struct gfs2_alloc
*al
= NULL
;
685 gfs2_write_calc_reserv(ip
, sizeof(struct gfs2_quota
),
686 &data_blocks
, &ind_blocks
);
688 ghs
= kcalloc(num_qd
, sizeof(struct gfs2_holder
), GFP_KERNEL
);
692 sort(qda
, num_qd
, sizeof(struct gfs2_quota_data
*), sort_qd
, NULL
);
693 for (qx
= 0; qx
< num_qd
; qx
++) {
694 error
= gfs2_glock_nq_init(qda
[qx
]->qd_gl
,
696 GL_NOCACHE
, &ghs
[qx
]);
701 error
= gfs2_glock_nq_init(ip
->i_gl
, LM_ST_EXCLUSIVE
, 0, &i_gh
);
705 for (x
= 0; x
< num_qd
; x
++) {
708 offset
= qd2offset(qda
[x
]);
709 error
= gfs2_write_alloc_required(ip
, offset
,
710 sizeof(struct gfs2_quota
),
719 al
= gfs2_alloc_get(ip
);
721 al
->al_requested
= nalloc
* (data_blocks
+ ind_blocks
);
723 error
= gfs2_inplace_reserve(ip
);
727 error
= gfs2_trans_begin(sdp
,
728 al
->al_rgd
->rd_length
+
729 num_qd
* data_blocks
+
730 nalloc
* ind_blocks
+
731 RES_DINODE
+ num_qd
+
736 error
= gfs2_trans_begin(sdp
,
737 num_qd
* data_blocks
+
738 RES_DINODE
+ num_qd
, 0);
743 for (x
= 0; x
< num_qd
; x
++) {
745 offset
= qd2offset(qd
);
746 error
= gfs2_adjust_quota(ip
, offset
, qd
->qd_change_sync
,
747 (struct gfs2_quota_data
*)
752 do_qc(qd
, -qd
->qd_change_sync
);
761 gfs2_inplace_release(ip
);
766 gfs2_glock_dq_uninit(&i_gh
);
769 gfs2_glock_dq_uninit(&ghs
[qx
]);
771 gfs2_log_flush(ip
->i_gl
->gl_sbd
, ip
->i_gl
);
775 static int do_glock(struct gfs2_quota_data
*qd
, int force_refresh
,
776 struct gfs2_holder
*q_gh
)
778 struct gfs2_sbd
*sdp
= qd
->qd_gl
->gl_sbd
;
779 struct gfs2_inode
*ip
= GFS2_I(sdp
->sd_quota_inode
);
780 struct gfs2_holder i_gh
;
781 struct gfs2_quota_host q
;
782 char buf
[sizeof(struct gfs2_quota
)];
783 struct file_ra_state ra_state
;
785 struct gfs2_quota_lvb
*qlvb
;
787 file_ra_state_init(&ra_state
, sdp
->sd_quota_inode
->i_mapping
);
789 error
= gfs2_glock_nq_init(qd
->qd_gl
, LM_ST_SHARED
, 0, q_gh
);
793 qd
->qd_qb
= *(struct gfs2_quota_lvb
*)qd
->qd_gl
->gl_lvb
;
795 if (force_refresh
|| qd
->qd_qb
.qb_magic
!= cpu_to_be32(GFS2_MAGIC
)) {
797 gfs2_glock_dq_uninit(q_gh
);
798 error
= gfs2_glock_nq_init(qd
->qd_gl
,
799 LM_ST_EXCLUSIVE
, GL_NOCACHE
,
804 error
= gfs2_glock_nq_init(ip
->i_gl
, LM_ST_SHARED
, 0, &i_gh
);
808 memset(buf
, 0, sizeof(struct gfs2_quota
));
810 error
= gfs2_internal_read(ip
, &ra_state
, buf
,
811 &pos
, sizeof(struct gfs2_quota
));
815 gfs2_glock_dq_uninit(&i_gh
);
818 gfs2_quota_in(&q
, buf
);
819 qlvb
= (struct gfs2_quota_lvb
*)qd
->qd_gl
->gl_lvb
;
820 qlvb
->qb_magic
= cpu_to_be32(GFS2_MAGIC
);
822 qlvb
->qb_limit
= cpu_to_be64(q
.qu_limit
);
823 qlvb
->qb_warn
= cpu_to_be64(q
.qu_warn
);
824 qlvb
->qb_value
= cpu_to_be64(q
.qu_value
);
827 if (gfs2_glock_is_blocking(qd
->qd_gl
)) {
828 gfs2_glock_dq_uninit(q_gh
);
837 gfs2_glock_dq_uninit(&i_gh
);
839 gfs2_glock_dq_uninit(q_gh
);
843 int gfs2_quota_lock(struct gfs2_inode
*ip
, u32 uid
, u32 gid
)
845 struct gfs2_sbd
*sdp
= GFS2_SB(&ip
->i_inode
);
846 struct gfs2_alloc
*al
= &ip
->i_alloc
;
850 gfs2_quota_hold(ip
, uid
, gid
);
852 if (capable(CAP_SYS_RESOURCE
) ||
853 sdp
->sd_args
.ar_quota
!= GFS2_QUOTA_ON
)
856 sort(al
->al_qd
, al
->al_qd_num
, sizeof(struct gfs2_quota_data
*),
859 for (x
= 0; x
< al
->al_qd_num
; x
++) {
860 error
= do_glock(al
->al_qd
[x
], NO_FORCE
, &al
->al_qd_ghs
[x
]);
866 set_bit(GIF_QD_LOCKED
, &ip
->i_flags
);
869 gfs2_glock_dq_uninit(&al
->al_qd_ghs
[x
]);
870 gfs2_quota_unhold(ip
);
876 static int need_sync(struct gfs2_quota_data
*qd
)
878 struct gfs2_sbd
*sdp
= qd
->qd_gl
->gl_sbd
;
879 struct gfs2_tune
*gt
= &sdp
->sd_tune
;
881 unsigned int num
, den
;
884 if (!qd
->qd_qb
.qb_limit
)
887 spin_lock(&sdp
->sd_quota_spin
);
888 value
= qd
->qd_change
;
889 spin_unlock(&sdp
->sd_quota_spin
);
891 spin_lock(>
->gt_spin
);
892 num
= gt
->gt_quota_scale_num
;
893 den
= gt
->gt_quota_scale_den
;
894 spin_unlock(>
->gt_spin
);
898 else if ((s64
)be64_to_cpu(qd
->qd_qb
.qb_value
) >=
899 (s64
)be64_to_cpu(qd
->qd_qb
.qb_limit
))
902 value
*= gfs2_jindex_size(sdp
) * num
;
904 value
+= (s64
)be64_to_cpu(qd
->qd_qb
.qb_value
);
905 if (value
< (s64
)be64_to_cpu(qd
->qd_qb
.qb_limit
))
912 void gfs2_quota_unlock(struct gfs2_inode
*ip
)
914 struct gfs2_alloc
*al
= &ip
->i_alloc
;
915 struct gfs2_quota_data
*qda
[4];
916 unsigned int count
= 0;
919 if (!test_and_clear_bit(GIF_QD_LOCKED
, &ip
->i_flags
))
922 for (x
= 0; x
< al
->al_qd_num
; x
++) {
923 struct gfs2_quota_data
*qd
;
927 sync
= need_sync(qd
);
929 gfs2_glock_dq_uninit(&al
->al_qd_ghs
[x
]);
931 if (sync
&& qd_trylock(qd
))
937 for (x
= 0; x
< count
; x
++)
942 gfs2_quota_unhold(ip
);
947 static int print_message(struct gfs2_quota_data
*qd
, char *type
)
949 struct gfs2_sbd
*sdp
= qd
->qd_gl
->gl_sbd
;
951 printk(KERN_INFO
"GFS2: fsid=%s: quota %s for %s %u\r\n",
952 sdp
->sd_fsname
, type
,
953 (test_bit(QDF_USER
, &qd
->qd_flags
)) ? "user" : "group",
959 int gfs2_quota_check(struct gfs2_inode
*ip
, u32 uid
, u32 gid
)
961 struct gfs2_sbd
*sdp
= GFS2_SB(&ip
->i_inode
);
962 struct gfs2_alloc
*al
= &ip
->i_alloc
;
963 struct gfs2_quota_data
*qd
;
968 if (!test_bit(GIF_QD_LOCKED
, &ip
->i_flags
))
971 if (sdp
->sd_args
.ar_quota
!= GFS2_QUOTA_ON
)
974 for (x
= 0; x
< al
->al_qd_num
; x
++) {
977 if (!((qd
->qd_id
== uid
&& test_bit(QDF_USER
, &qd
->qd_flags
)) ||
978 (qd
->qd_id
== gid
&& !test_bit(QDF_USER
, &qd
->qd_flags
))))
981 value
= (s64
)be64_to_cpu(qd
->qd_qb
.qb_value
);
982 spin_lock(&sdp
->sd_quota_spin
);
983 value
+= qd
->qd_change
;
984 spin_unlock(&sdp
->sd_quota_spin
);
986 if (be64_to_cpu(qd
->qd_qb
.qb_limit
) && (s64
)be64_to_cpu(qd
->qd_qb
.qb_limit
) < value
) {
987 print_message(qd
, "exceeded");
990 } else if (be64_to_cpu(qd
->qd_qb
.qb_warn
) &&
991 (s64
)be64_to_cpu(qd
->qd_qb
.qb_warn
) < value
&&
992 time_after_eq(jiffies
, qd
->qd_last_warn
+
994 gt_quota_warn_period
) * HZ
)) {
995 error
= print_message(qd
, "warning");
996 qd
->qd_last_warn
= jiffies
;
1003 void gfs2_quota_change(struct gfs2_inode
*ip
, s64 change
,
1006 struct gfs2_alloc
*al
= &ip
->i_alloc
;
1007 struct gfs2_quota_data
*qd
;
1009 unsigned int found
= 0;
1011 if (gfs2_assert_warn(GFS2_SB(&ip
->i_inode
), change
))
1013 if (ip
->i_di
.di_flags
& GFS2_DIF_SYSTEM
)
1016 for (x
= 0; x
< al
->al_qd_num
; x
++) {
1019 if ((qd
->qd_id
== uid
&& test_bit(QDF_USER
, &qd
->qd_flags
)) ||
1020 (qd
->qd_id
== gid
&& !test_bit(QDF_USER
, &qd
->qd_flags
))) {
1027 int gfs2_quota_sync(struct gfs2_sbd
*sdp
)
1029 struct gfs2_quota_data
**qda
;
1030 unsigned int max_qd
= gfs2_tune_get(sdp
, gt_quota_simul_sync
);
1031 unsigned int num_qd
;
1035 sdp
->sd_quota_sync_gen
++;
1037 qda
= kcalloc(max_qd
, sizeof(struct gfs2_quota_data
*), GFP_KERNEL
);
1045 error
= qd_fish(sdp
, qda
+ num_qd
);
1046 if (error
|| !qda
[num_qd
])
1048 if (++num_qd
== max_qd
)
1054 error
= do_sync(num_qd
, qda
);
1056 for (x
= 0; x
< num_qd
; x
++)
1057 qda
[x
]->qd_sync_gen
=
1058 sdp
->sd_quota_sync_gen
;
1060 for (x
= 0; x
< num_qd
; x
++)
1063 } while (!error
&& num_qd
== max_qd
);
1070 int gfs2_quota_refresh(struct gfs2_sbd
*sdp
, int user
, u32 id
)
1072 struct gfs2_quota_data
*qd
;
1073 struct gfs2_holder q_gh
;
1076 error
= qd_get(sdp
, user
, id
, CREATE
, &qd
);
1080 error
= do_glock(qd
, FORCE
, &q_gh
);
1082 gfs2_glock_dq_uninit(&q_gh
);
1089 static void gfs2_quota_change_in(struct gfs2_quota_change_host
*qc
, const void *buf
)
1091 const struct gfs2_quota_change
*str
= buf
;
1093 qc
->qc_change
= be64_to_cpu(str
->qc_change
);
1094 qc
->qc_flags
= be32_to_cpu(str
->qc_flags
);
1095 qc
->qc_id
= be32_to_cpu(str
->qc_id
);
1098 int gfs2_quota_init(struct gfs2_sbd
*sdp
)
1100 struct gfs2_inode
*ip
= GFS2_I(sdp
->sd_qc_inode
);
1101 unsigned int blocks
= ip
->i_di
.di_size
>> sdp
->sd_sb
.sb_bsize_shift
;
1102 unsigned int x
, slot
= 0;
1103 unsigned int found
= 0;
1108 if (!ip
->i_di
.di_size
|| ip
->i_di
.di_size
> (64 << 20) ||
1109 ip
->i_di
.di_size
& (sdp
->sd_sb
.sb_bsize
- 1)) {
1110 gfs2_consist_inode(ip
);
1113 sdp
->sd_quota_slots
= blocks
* sdp
->sd_qc_per_block
;
1114 sdp
->sd_quota_chunks
= DIV_ROUND_UP(sdp
->sd_quota_slots
, 8 * PAGE_SIZE
);
1118 sdp
->sd_quota_bitmap
= kcalloc(sdp
->sd_quota_chunks
,
1119 sizeof(unsigned char *), GFP_KERNEL
);
1120 if (!sdp
->sd_quota_bitmap
)
1123 for (x
= 0; x
< sdp
->sd_quota_chunks
; x
++) {
1124 sdp
->sd_quota_bitmap
[x
] = kzalloc(PAGE_SIZE
, GFP_KERNEL
);
1125 if (!sdp
->sd_quota_bitmap
[x
])
1129 for (x
= 0; x
< blocks
; x
++) {
1130 struct buffer_head
*bh
;
1135 error
= gfs2_extent_map(&ip
->i_inode
, x
, &new, &dblock
, &extlen
);
1140 bh
= gfs2_meta_ra(ip
->i_gl
, dblock
, extlen
);
1143 if (gfs2_metatype_check(sdp
, bh
, GFS2_METATYPE_QC
)) {
1148 for (y
= 0; y
< sdp
->sd_qc_per_block
&& slot
< sdp
->sd_quota_slots
;
1150 struct gfs2_quota_change_host qc
;
1151 struct gfs2_quota_data
*qd
;
1153 gfs2_quota_change_in(&qc
, bh
->b_data
+
1154 sizeof(struct gfs2_meta_header
) +
1155 y
* sizeof(struct gfs2_quota_change
));
1159 error
= qd_alloc(sdp
, (qc
.qc_flags
& GFS2_QCF_USER
),
1166 set_bit(QDF_CHANGE
, &qd
->qd_flags
);
1167 qd
->qd_change
= qc
.qc_change
;
1169 qd
->qd_slot_count
= 1;
1170 qd
->qd_last_touched
= jiffies
;
1172 spin_lock(&sdp
->sd_quota_spin
);
1173 gfs2_icbit_munge(sdp
, sdp
->sd_quota_bitmap
, slot
, 1);
1174 list_add(&qd
->qd_list
, &sdp
->sd_quota_list
);
1175 atomic_inc(&sdp
->sd_quota_count
);
1176 spin_unlock(&sdp
->sd_quota_spin
);
1187 fs_info(sdp
, "found %u quota changes\n", found
);
1192 gfs2_quota_cleanup(sdp
);
1196 void gfs2_quota_scan(struct gfs2_sbd
*sdp
)
1198 struct gfs2_quota_data
*qd
, *safe
;
1201 spin_lock(&sdp
->sd_quota_spin
);
1202 list_for_each_entry_safe(qd
, safe
, &sdp
->sd_quota_list
, qd_list
) {
1203 if (!qd
->qd_count
&&
1204 time_after_eq(jiffies
, qd
->qd_last_touched
+
1205 gfs2_tune_get(sdp
, gt_quota_cache_secs
) * HZ
)) {
1206 list_move(&qd
->qd_list
, &dead
);
1207 gfs2_assert_warn(sdp
,
1208 atomic_read(&sdp
->sd_quota_count
) > 0);
1209 atomic_dec(&sdp
->sd_quota_count
);
1212 spin_unlock(&sdp
->sd_quota_spin
);
1214 while (!list_empty(&dead
)) {
1215 qd
= list_entry(dead
.next
, struct gfs2_quota_data
, qd_list
);
1216 list_del(&qd
->qd_list
);
1218 gfs2_assert_warn(sdp
, !qd
->qd_change
);
1219 gfs2_assert_warn(sdp
, !qd
->qd_slot_count
);
1220 gfs2_assert_warn(sdp
, !qd
->qd_bh_count
);
1222 gfs2_lvb_unhold(qd
->qd_gl
);
1227 void gfs2_quota_cleanup(struct gfs2_sbd
*sdp
)
1229 struct list_head
*head
= &sdp
->sd_quota_list
;
1230 struct gfs2_quota_data
*qd
;
1233 spin_lock(&sdp
->sd_quota_spin
);
1234 while (!list_empty(head
)) {
1235 qd
= list_entry(head
->prev
, struct gfs2_quota_data
, qd_list
);
1237 if (qd
->qd_count
> 1 ||
1238 (qd
->qd_count
&& !test_bit(QDF_CHANGE
, &qd
->qd_flags
))) {
1239 list_move(&qd
->qd_list
, head
);
1240 spin_unlock(&sdp
->sd_quota_spin
);
1242 spin_lock(&sdp
->sd_quota_spin
);
1246 list_del(&qd
->qd_list
);
1247 atomic_dec(&sdp
->sd_quota_count
);
1248 spin_unlock(&sdp
->sd_quota_spin
);
1250 if (!qd
->qd_count
) {
1251 gfs2_assert_warn(sdp
, !qd
->qd_change
);
1252 gfs2_assert_warn(sdp
, !qd
->qd_slot_count
);
1254 gfs2_assert_warn(sdp
, qd
->qd_slot_count
== 1);
1255 gfs2_assert_warn(sdp
, !qd
->qd_bh_count
);
1257 gfs2_lvb_unhold(qd
->qd_gl
);
1260 spin_lock(&sdp
->sd_quota_spin
);
1262 spin_unlock(&sdp
->sd_quota_spin
);
1264 gfs2_assert_warn(sdp
, !atomic_read(&sdp
->sd_quota_count
));
1266 if (sdp
->sd_quota_bitmap
) {
1267 for (x
= 0; x
< sdp
->sd_quota_chunks
; x
++)
1268 kfree(sdp
->sd_quota_bitmap
[x
]);
1269 kfree(sdp
->sd_quota_bitmap
);