Cleanup random differences to mips64 equivalent.
[linux-2.6/linux-mips.git] / fs / dquot.c
blob9b3a6aae6c5f0cb4634c0e21813bc10fb213f381
1 /*
2 * Implementation of the diskquota system for the LINUX operating
3 * system. QUOTA is implemented using the BSD system call interface as
4 * the means of communication with the user level. Currently only the
5 * ext2 filesystem has support for disk quotas. Other filesystems may
6 * be added in the future. This file contains the generic routines
7 * called by the different filesystems on allocation of an inode or
8 * block. These routines take care of the administration needed to
9 * have a consistent diskquota tracking system. The ideas of both
10 * user and group quotas are based on the Melbourne quota system as
11 * used on BSD derived systems. The internal implementation is
12 * based on one of the several variants of the LINUX inode-subsystem
13 * with added complexity of the diskquota system.
15 * Version: $Id: dquot.c,v 6.3 1996/11/17 18:35:34 mvw Exp mvw $
17 * Author: Marco van Wieringen <mvw@planets.elm.net>
19 * Fixes: Dmitry Gorodchanin <pgmdsg@ibi.com>, 11 Feb 96
21 * Revised list management to avoid races
22 * -- Bill Hawes, <whawes@star.net>, 9/98
24 * Fixed races in dquot_transfer(), dqget() and dquot_alloc_...().
25 * As the consequence the locking was moved from dquot_decr_...(),
26 * dquot_incr_...() to calling functions.
27 * invalidate_dquots() now writes modified dquots.
28 * Serialized quota_off() and quota_on() for mount point.
29 * Fixed a few bugs in grow_dquots().
30 * Fixed deadlock in write_dquot() - we no longer account quotas on
31 * quota files
32 * remove_dquot_ref() moved to inode.c - it now traverses through inodes
33 * add_dquot_ref() restarts after blocking
34 * Added check for bogus uid and fixed check for group in quotactl.
35 * Jan Kara, <jack@suse.cz>, sponsored by SuSE CR, 10-11/99
37 * Used struct list_head instead of own list struct
38 * Invalidation of referenced dquots is no longer possible
39 * Improved free_dquots list management
40 * Quota and i_blocks are now updated in one place to avoid races
41 * Warnings are now delayed so we won't block in critical section
42 * Write updated not to require dquot lock
43 * Jan Kara, <jack@suse.cz>, 9/2000
45 * Added dynamic quota structure allocation
46 * Jan Kara <jack@suse.cz> 12/2000
48 * Rewritten quota interface. Implemented new quota format and
49 * formats registering.
50 * Jan Kara, <jack@suse.cz>, 2001,2002
52 * New SMP locking.
53 * Jan Kara, <jack@suse.cz>, 10/2002
55 * (C) Copyright 1994 - 1997 Marco van Wieringen
58 #include <linux/errno.h>
59 #include <linux/kernel.h>
60 #include <linux/fs.h>
61 #include <linux/mount.h>
62 #include <linux/mm.h>
63 #include <linux/time.h>
64 #include <linux/types.h>
65 #include <linux/string.h>
66 #include <linux/fcntl.h>
67 #include <linux/stat.h>
68 #include <linux/tty.h>
69 #include <linux/file.h>
70 #include <linux/slab.h>
71 #include <linux/sysctl.h>
72 #include <linux/smp_lock.h>
73 #include <linux/init.h>
74 #include <linux/module.h>
75 #include <linux/proc_fs.h>
76 #include <linux/security.h>
77 #include <linux/kmod.h>
79 #include <asm/uaccess.h>
81 #define __DQUOT_PARANOIA
84 * There are two quota SMP locks. dq_list_lock protects all lists with quotas
85 * and quota formats and also dqstats structure containing statistics about the
86 * lists. dq_data_lock protects data from dq_dqb and also mem_dqinfo structures
87 * and also guards consistency of dquot->dq_dqb with inode->i_blocks, i_bytes.
88 * Note that we don't have to do the locking of i_blocks and i_bytes when the
89 * quota is disabled - i_sem should serialize the access. dq_data_lock should
90 * be always grabbed before dq_list_lock.
92 * Note that some things (eg. sb pointer, type, id) doesn't change during
93 * the life of the dquot structure and so needn't to be protected by a lock
95 spinlock_t dq_list_lock = SPIN_LOCK_UNLOCKED;
96 spinlock_t dq_data_lock = SPIN_LOCK_UNLOCKED;
98 static char *quotatypes[] = INITQFNAMES;
99 static struct quota_format_type *quota_formats; /* List of registered formats */
100 static struct quota_module_name module_names[] = INIT_QUOTA_MODULE_NAMES;
102 int register_quota_format(struct quota_format_type *fmt)
104 spin_lock(&dq_list_lock);
105 fmt->qf_next = quota_formats;
106 quota_formats = fmt;
107 spin_unlock(&dq_list_lock);
108 return 0;
111 void unregister_quota_format(struct quota_format_type *fmt)
113 struct quota_format_type **actqf;
115 spin_lock(&dq_list_lock);
116 for (actqf = &quota_formats; *actqf && *actqf != fmt; actqf = &(*actqf)->qf_next);
117 if (*actqf)
118 *actqf = (*actqf)->qf_next;
119 spin_unlock(&dq_list_lock);
122 static struct quota_format_type *find_quota_format(int id)
124 struct quota_format_type *actqf;
126 spin_lock(&dq_list_lock);
127 for (actqf = quota_formats; actqf && actqf->qf_fmt_id != id; actqf = actqf->qf_next);
128 if (!actqf || !try_module_get(actqf->qf_owner)) {
129 int qm;
131 for (qm = 0; module_names[qm].qm_fmt_id && module_names[qm].qm_fmt_id != id; qm++);
132 if (!module_names[qm].qm_fmt_id || request_module(module_names[qm].qm_mod_name)) {
133 actqf = NULL;
134 goto out;
136 for (actqf = quota_formats; actqf && actqf->qf_fmt_id != id; actqf = actqf->qf_next);
137 if (actqf && !try_module_get(actqf->qf_owner))
138 actqf = NULL;
140 out:
141 spin_unlock(&dq_list_lock);
142 return actqf;
145 static void put_quota_format(struct quota_format_type *fmt)
147 module_put(fmt->qf_owner);
151 * Dquot List Management:
152 * The quota code uses three lists for dquot management: the inuse_list,
153 * free_dquots, and dquot_hash[] array. A single dquot structure may be
154 * on all three lists, depending on its current state.
156 * All dquots are placed to the end of inuse_list when first created, and this
157 * list is used for the sync and invalidate operations, which must look
158 * at every dquot.
160 * Unused dquots (dq_count == 0) are added to the free_dquots list when freed,
161 * and this list is searched whenever we need an available dquot. Dquots are
162 * removed from the list as soon as they are used again, and
163 * dqstats.free_dquots gives the number of dquots on the list. When
164 * dquot is invalidated it's completely released from memory.
166 * Dquots with a specific identity (device, type and id) are placed on
167 * one of the dquot_hash[] hash chains. The provides an efficient search
168 * mechanism to locate a specific dquot.
172 * Note that any operation which operates on dquot data (ie. dq_dqb) must
173 * hold dq_data_lock.
175 * Any operation working with dquots must hold dqptr_sem. If operation is
176 * just reading pointers from inodes than read lock is enough. If pointers
177 * are altered function must hold write lock.
179 * Locked dquots might not be referenced in inodes. Currently dquot it locked
180 * only once in its existence - when it's being read to memory on first dqget()
181 * and at that time it can't be referenced from inode. Write operations on
182 * dquots don't hold dquot lock as they copy data to internal buffers before
183 * writing anyway and copying as well as any data update should be atomic. Also
184 * nobody can change used entries in dquot structure as this is done only when
185 * quota is destroyed and invalidate_dquots() is called only when dq_count == 0.
188 static LIST_HEAD(inuse_list);
189 static LIST_HEAD(free_dquots);
190 static struct list_head dquot_hash[NR_DQHASH];
192 struct dqstats dqstats;
194 static inline int const hashfn(struct super_block *sb, unsigned int id, int type)
196 return((((unsigned long)sb>>L1_CACHE_SHIFT) ^ id) * (MAXQUOTAS - type)) % NR_DQHASH;
200 * Following list functions expect dq_list_lock to be held
202 static inline void insert_dquot_hash(struct dquot *dquot)
204 struct list_head *head = dquot_hash + hashfn(dquot->dq_sb, dquot->dq_id, dquot->dq_type);
205 list_add(&dquot->dq_hash, head);
208 static inline void remove_dquot_hash(struct dquot *dquot)
210 list_del_init(&dquot->dq_hash);
213 static inline struct dquot *find_dquot(unsigned int hashent, struct super_block *sb, unsigned int id, int type)
215 struct list_head *head;
216 struct dquot *dquot;
218 for (head = dquot_hash[hashent].next; head != dquot_hash+hashent; head = head->next) {
219 dquot = list_entry(head, struct dquot, dq_hash);
220 if (dquot->dq_sb == sb && dquot->dq_id == id && dquot->dq_type == type)
221 return dquot;
223 return NODQUOT;
226 /* Add a dquot to the tail of the free list */
227 static inline void put_dquot_last(struct dquot *dquot)
229 list_add(&dquot->dq_free, free_dquots.prev);
230 dqstats.free_dquots++;
233 static inline void remove_free_dquot(struct dquot *dquot)
235 if (list_empty(&dquot->dq_free))
236 return;
237 list_del_init(&dquot->dq_free);
238 dqstats.free_dquots--;
241 static inline void put_inuse(struct dquot *dquot)
243 /* We add to the back of inuse list so we don't have to restart
244 * when traversing this list and we block */
245 list_add(&dquot->dq_inuse, inuse_list.prev);
246 dqstats.allocated_dquots++;
249 static inline void remove_inuse(struct dquot *dquot)
251 dqstats.allocated_dquots--;
252 list_del(&dquot->dq_inuse);
255 static void wait_on_dquot(struct dquot *dquot)
257 down(&dquot->dq_lock);
258 up(&dquot->dq_lock);
261 static int read_dqblk(struct dquot *dquot)
263 int ret;
264 struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
266 down(&dquot->dq_lock);
267 down(&dqopt->dqio_sem);
268 ret = dqopt->ops[dquot->dq_type]->read_dqblk(dquot);
269 up(&dqopt->dqio_sem);
270 up(&dquot->dq_lock);
271 return ret;
274 static int commit_dqblk(struct dquot *dquot)
276 int ret;
277 struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
279 down(&dqopt->dqio_sem);
280 ret = dqopt->ops[dquot->dq_type]->commit_dqblk(dquot);
281 up(&dqopt->dqio_sem);
282 return ret;
285 /* Invalidate all dquots on the list. Note that this function is called after
286 * quota is disabled so no new quota might be created. Because we hold dqptr_sem
287 * for writing and pointers were already removed from inodes we actually know that
288 * no quota for this sb+type should be held. */
289 static void invalidate_dquots(struct super_block *sb, int type)
291 struct dquot *dquot;
292 struct list_head *head;
294 spin_lock(&dq_list_lock);
295 for (head = inuse_list.next; head != &inuse_list;) {
296 dquot = list_entry(head, struct dquot, dq_inuse);
297 head = head->next;
298 if (dquot->dq_sb != sb)
299 continue;
300 if (dquot->dq_type != type)
301 continue;
302 #ifdef __DQUOT_PARANOIA
303 /* There should be no users of quota - we hold dqptr_sem for writing */
304 if (atomic_read(&dquot->dq_count))
305 BUG();
306 #endif
307 /* Quota now have no users and it has been written on last dqput() */
308 remove_dquot_hash(dquot);
309 remove_free_dquot(dquot);
310 remove_inuse(dquot);
311 kmem_cache_free(dquot_cachep, dquot);
313 spin_unlock(&dq_list_lock);
316 static int vfs_quota_sync(struct super_block *sb, int type)
318 struct list_head *head;
319 struct dquot *dquot;
320 struct quota_info *dqopt = sb_dqopt(sb);
321 int cnt;
323 down_read(&dqopt->dqptr_sem);
324 restart:
325 /* At this point any dirty dquot will definitely be written so we can clear
326 dirty flag from info */
327 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
328 if ((cnt == type || type == -1) && sb_has_quota_enabled(sb, cnt))
329 clear_bit(DQF_ANY_DQUOT_DIRTY_B, &dqopt->info[cnt].dqi_flags);
330 spin_lock(&dq_list_lock);
331 list_for_each(head, &inuse_list) {
332 dquot = list_entry(head, struct dquot, dq_inuse);
333 if (sb && dquot->dq_sb != sb)
334 continue;
335 if (type != -1 && dquot->dq_type != type)
336 continue;
337 if (!dquot->dq_sb) /* Invalidated? */
338 continue;
339 if (!dquot_dirty(dquot))
340 continue;
341 spin_unlock(&dq_list_lock);
342 sb->dq_op->sync_dquot(dquot);
343 goto restart;
345 spin_unlock(&dq_list_lock);
347 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
348 if ((cnt == type || type == -1) && sb_has_quota_enabled(sb, cnt) && info_dirty(&dqopt->info[cnt])) {
349 down(&dqopt->dqio_sem);
350 dqopt->ops[cnt]->write_file_info(sb, cnt);
351 up(&dqopt->dqio_sem);
353 spin_lock(&dq_list_lock);
354 dqstats.syncs++;
355 spin_unlock(&dq_list_lock);
356 up_read(&dqopt->dqptr_sem);
358 return 0;
361 /* Free unused dquots from cache */
362 static void prune_dqcache(int count)
364 struct list_head *head;
365 struct dquot *dquot;
367 head = free_dquots.prev;
368 while (head != &free_dquots && count) {
369 dquot = list_entry(head, struct dquot, dq_free);
370 remove_dquot_hash(dquot);
371 remove_free_dquot(dquot);
372 remove_inuse(dquot);
373 kmem_cache_free(dquot_cachep, dquot);
374 count--;
375 head = free_dquots.prev;
380 * This is called from kswapd when we think we need some
381 * more memory
384 static int shrink_dqcache_memory(int nr, unsigned int gfp_mask)
386 int ret;
388 spin_lock(&dq_list_lock);
389 if (nr)
390 prune_dqcache(nr);
391 ret = dqstats.allocated_dquots;
392 spin_unlock(&dq_list_lock);
393 return ret;
397 * Put reference to dquot
398 * NOTE: If you change this function please check whether dqput_blocks() works right...
399 * MUST be called with dqptr_sem held
401 static void dqput(struct dquot *dquot)
403 if (!dquot)
404 return;
405 #ifdef __DQUOT_PARANOIA
406 if (!atomic_read(&dquot->dq_count)) {
407 printk("VFS: dqput: trying to free free dquot\n");
408 printk("VFS: device %s, dquot of %s %d\n",
409 dquot->dq_sb->s_id,
410 quotatypes[dquot->dq_type],
411 dquot->dq_id);
412 BUG();
414 #endif
416 spin_lock(&dq_list_lock);
417 dqstats.drops++;
418 spin_unlock(&dq_list_lock);
419 we_slept:
420 spin_lock(&dq_list_lock);
421 if (atomic_read(&dquot->dq_count) > 1) {
422 /* We have more than one user... nothing to do */
423 atomic_dec(&dquot->dq_count);
424 spin_unlock(&dq_list_lock);
425 return;
427 if (dquot_dirty(dquot)) {
428 spin_unlock(&dq_list_lock);
429 commit_dqblk(dquot);
430 goto we_slept;
432 atomic_dec(&dquot->dq_count);
433 #ifdef __DQUOT_PARANOIA
434 /* sanity check */
435 if (!list_empty(&dquot->dq_free))
436 BUG();
437 #endif
438 put_dquot_last(dquot);
439 spin_unlock(&dq_list_lock);
442 static struct dquot *get_empty_dquot(struct super_block *sb, int type)
444 struct dquot *dquot;
446 dquot = kmem_cache_alloc(dquot_cachep, SLAB_KERNEL);
447 if(!dquot)
448 return NODQUOT;
450 memset((caddr_t)dquot, 0, sizeof(struct dquot));
451 sema_init(&dquot->dq_lock, 1);
452 INIT_LIST_HEAD(&dquot->dq_free);
453 INIT_LIST_HEAD(&dquot->dq_inuse);
454 INIT_LIST_HEAD(&dquot->dq_hash);
455 dquot->dq_sb = sb;
456 dquot->dq_type = type;
457 atomic_set(&dquot->dq_count, 1);
459 return dquot;
463 * Get reference to dquot
464 * MUST be called with dqptr_sem held
466 static struct dquot *dqget(struct super_block *sb, unsigned int id, int type)
468 unsigned int hashent = hashfn(sb, id, type);
469 struct dquot *dquot, *empty = NODQUOT;
471 if (!sb_has_quota_enabled(sb, type))
472 return NODQUOT;
473 we_slept:
474 spin_lock(&dq_list_lock);
475 if ((dquot = find_dquot(hashent, sb, id, type)) == NODQUOT) {
476 if (empty == NODQUOT) {
477 spin_unlock(&dq_list_lock);
478 if ((empty = get_empty_dquot(sb, type)) == NODQUOT)
479 schedule(); /* Try to wait for a moment... */
480 goto we_slept;
482 dquot = empty;
483 dquot->dq_id = id;
484 /* all dquots go on the inuse_list */
485 put_inuse(dquot);
486 /* hash it first so it can be found */
487 insert_dquot_hash(dquot);
488 dqstats.lookups++;
489 spin_unlock(&dq_list_lock);
490 read_dqblk(dquot);
491 } else {
492 if (!atomic_read(&dquot->dq_count))
493 remove_free_dquot(dquot);
494 atomic_inc(&dquot->dq_count);
495 dqstats.cache_hits++;
496 dqstats.lookups++;
497 spin_unlock(&dq_list_lock);
498 wait_on_dquot(dquot);
499 if (empty)
500 kmem_cache_free(dquot_cachep, empty);
503 #ifdef __DQUOT_PARANOIA
504 if (!dquot->dq_sb) /* Has somebody invalidated entry under us? */
505 BUG();
506 #endif
508 return dquot;
511 static int dqinit_needed(struct inode *inode, int type)
513 int cnt;
515 if (IS_NOQUOTA(inode))
516 return 0;
517 if (type != -1)
518 return inode->i_dquot[type] == NODQUOT;
519 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
520 if (inode->i_dquot[cnt] == NODQUOT)
521 return 1;
522 return 0;
525 /* This routine is guarded by dqptr_sem semaphore */
526 static void add_dquot_ref(struct super_block *sb, int type)
528 struct list_head *p;
530 restart:
531 file_list_lock();
532 list_for_each(p, &sb->s_files) {
533 struct file *filp = list_entry(p, struct file, f_list);
534 struct inode *inode = filp->f_dentry->d_inode;
535 if (filp->f_mode & FMODE_WRITE && dqinit_needed(inode, type)) {
536 struct vfsmount *mnt = mntget(filp->f_vfsmnt);
537 struct dentry *dentry = dget(filp->f_dentry);
538 file_list_unlock();
539 sb->dq_op->initialize(inode, type);
540 dput(dentry);
541 mntput(mnt);
542 /* As we may have blocked we had better restart... */
543 goto restart;
546 file_list_unlock();
549 /* Return 0 if dqput() won't block (note that 1 doesn't necessarily mean blocking) */
550 static inline int dqput_blocks(struct dquot *dquot)
552 if (atomic_read(&dquot->dq_count) <= 1 && dquot_dirty(dquot))
553 return 1;
554 return 0;
557 /* Remove references to dquots from inode - add dquot to list for freeing if needed */
558 /* We can't race with anybody because we hold dqptr_sem for writing... */
559 int remove_inode_dquot_ref(struct inode *inode, int type, struct list_head *tofree_head)
561 struct dquot *dquot = inode->i_dquot[type];
562 int cnt;
564 inode->i_dquot[type] = NODQUOT;
565 /* any other quota in use? */
566 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
567 if (inode->i_dquot[cnt] != NODQUOT)
568 goto put_it;
570 inode->i_flags &= ~S_QUOTA;
571 put_it:
572 if (dquot != NODQUOT) {
573 if (dqput_blocks(dquot)) {
574 #ifdef __DQUOT_PARANOIA
575 if (atomic_read(&dquot->dq_count) != 1)
576 printk(KERN_WARNING "VFS: Adding dquot with dq_count %d to dispose list.\n", atomic_read(&dquot->dq_count));
577 #endif
578 spin_lock(&dq_list_lock);
579 list_add(&dquot->dq_free, tofree_head); /* As dquot must have currently users it can't be on the free list... */
580 spin_unlock(&dq_list_lock);
581 return 1;
583 else
584 dqput(dquot); /* We have guaranteed we won't block */
586 return 0;
589 /* Free list of dquots - called from inode.c */
590 /* dquots are removed from inodes, no new references can be got so we are the only ones holding reference */
591 void put_dquot_list(struct list_head *tofree_head)
593 struct list_head *act_head;
594 struct dquot *dquot;
596 act_head = tofree_head->next;
597 /* So now we have dquots on the list... Just free them */
598 while (act_head != tofree_head) {
599 dquot = list_entry(act_head, struct dquot, dq_free);
600 act_head = act_head->next;
601 list_del_init(&dquot->dq_free); /* Remove dquot from the list so we won't have problems... */
602 dqput(dquot);
606 static inline void dquot_incr_inodes(struct dquot *dquot, unsigned long number)
608 dquot->dq_dqb.dqb_curinodes += number;
609 mark_dquot_dirty(dquot);
612 static inline void dquot_incr_space(struct dquot *dquot, qsize_t number)
614 dquot->dq_dqb.dqb_curspace += number;
615 mark_dquot_dirty(dquot);
618 static inline void dquot_decr_inodes(struct dquot *dquot, unsigned long number)
620 if (dquot->dq_dqb.dqb_curinodes > number)
621 dquot->dq_dqb.dqb_curinodes -= number;
622 else
623 dquot->dq_dqb.dqb_curinodes = 0;
624 if (dquot->dq_dqb.dqb_curinodes < dquot->dq_dqb.dqb_isoftlimit)
625 dquot->dq_dqb.dqb_itime = (time_t) 0;
626 clear_bit(DQ_INODES_B, &dquot->dq_flags);
627 mark_dquot_dirty(dquot);
630 static inline void dquot_decr_space(struct dquot *dquot, qsize_t number)
632 if (dquot->dq_dqb.dqb_curspace > number)
633 dquot->dq_dqb.dqb_curspace -= number;
634 else
635 dquot->dq_dqb.dqb_curspace = 0;
636 if (toqb(dquot->dq_dqb.dqb_curspace) < dquot->dq_dqb.dqb_bsoftlimit)
637 dquot->dq_dqb.dqb_btime = (time_t) 0;
638 clear_bit(DQ_BLKS_B, &dquot->dq_flags);
639 mark_dquot_dirty(dquot);
642 static inline int need_print_warning(struct dquot *dquot)
644 switch (dquot->dq_type) {
645 case USRQUOTA:
646 return current->fsuid == dquot->dq_id;
647 case GRPQUOTA:
648 return in_group_p(dquot->dq_id);
650 return 0;
653 /* Values of warnings */
654 #define NOWARN 0
655 #define IHARDWARN 1
656 #define ISOFTLONGWARN 2
657 #define ISOFTWARN 3
658 #define BHARDWARN 4
659 #define BSOFTLONGWARN 5
660 #define BSOFTWARN 6
662 /* Print warning to user which exceeded quota */
663 static void print_warning(struct dquot *dquot, const char warntype)
665 char *msg = NULL;
666 int flag = (warntype == BHARDWARN || warntype == BSOFTLONGWARN) ? DQ_BLKS_B :
667 ((warntype == IHARDWARN || warntype == ISOFTLONGWARN) ? DQ_INODES_B : 0);
669 if (!need_print_warning(dquot) || (flag && test_and_set_bit(flag, &dquot->dq_flags)))
670 return;
671 tty_write_message(current->tty, dquot->dq_sb->s_id);
672 if (warntype == ISOFTWARN || warntype == BSOFTWARN)
673 tty_write_message(current->tty, ": warning, ");
674 else
675 tty_write_message(current->tty, ": write failed, ");
676 tty_write_message(current->tty, quotatypes[dquot->dq_type]);
677 switch (warntype) {
678 case IHARDWARN:
679 msg = " file limit reached.\n";
680 break;
681 case ISOFTLONGWARN:
682 msg = " file quota exceeded too long.\n";
683 break;
684 case ISOFTWARN:
685 msg = " file quota exceeded.\n";
686 break;
687 case BHARDWARN:
688 msg = " block limit reached.\n";
689 break;
690 case BSOFTLONGWARN:
691 msg = " block quota exceeded too long.\n";
692 break;
693 case BSOFTWARN:
694 msg = " block quota exceeded.\n";
695 break;
697 tty_write_message(current->tty, msg);
700 static inline void flush_warnings(struct dquot **dquots, char *warntype)
702 int i;
704 for (i = 0; i < MAXQUOTAS; i++)
705 if (dquots[i] != NODQUOT && warntype[i] != NOWARN)
706 print_warning(dquots[i], warntype[i]);
709 static inline char ignore_hardlimit(struct dquot *dquot)
711 struct mem_dqinfo *info = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_type];
713 return capable(CAP_SYS_RESOURCE) &&
714 (info->dqi_format->qf_fmt_id != QFMT_VFS_OLD || !(info->dqi_flags & V1_DQF_RSQUASH));
717 /* needs dq_data_lock */
718 static int check_idq(struct dquot *dquot, ulong inodes, char *warntype)
720 *warntype = NOWARN;
721 if (inodes <= 0 || test_bit(DQ_FAKE_B, &dquot->dq_flags))
722 return QUOTA_OK;
724 if (dquot->dq_dqb.dqb_ihardlimit &&
725 (dquot->dq_dqb.dqb_curinodes + inodes) > dquot->dq_dqb.dqb_ihardlimit &&
726 !ignore_hardlimit(dquot)) {
727 *warntype = IHARDWARN;
728 return NO_QUOTA;
731 if (dquot->dq_dqb.dqb_isoftlimit &&
732 (dquot->dq_dqb.dqb_curinodes + inodes) > dquot->dq_dqb.dqb_isoftlimit &&
733 dquot->dq_dqb.dqb_itime && get_seconds() >= dquot->dq_dqb.dqb_itime &&
734 !ignore_hardlimit(dquot)) {
735 *warntype = ISOFTLONGWARN;
736 return NO_QUOTA;
739 if (dquot->dq_dqb.dqb_isoftlimit &&
740 (dquot->dq_dqb.dqb_curinodes + inodes) > dquot->dq_dqb.dqb_isoftlimit &&
741 dquot->dq_dqb.dqb_itime == 0) {
742 *warntype = ISOFTWARN;
743 dquot->dq_dqb.dqb_itime = get_seconds() + sb_dqopt(dquot->dq_sb)->info[dquot->dq_type].dqi_igrace;
746 return QUOTA_OK;
749 /* needs dq_data_lock */
750 static int check_bdq(struct dquot *dquot, qsize_t space, int prealloc, char *warntype)
752 *warntype = 0;
753 if (space <= 0 || test_bit(DQ_FAKE_B, &dquot->dq_flags))
754 return QUOTA_OK;
756 if (dquot->dq_dqb.dqb_bhardlimit &&
757 toqb(dquot->dq_dqb.dqb_curspace + space) > dquot->dq_dqb.dqb_bhardlimit &&
758 !ignore_hardlimit(dquot)) {
759 if (!prealloc)
760 *warntype = BHARDWARN;
761 return NO_QUOTA;
764 if (dquot->dq_dqb.dqb_bsoftlimit &&
765 toqb(dquot->dq_dqb.dqb_curspace + space) > dquot->dq_dqb.dqb_bsoftlimit &&
766 dquot->dq_dqb.dqb_btime && get_seconds() >= dquot->dq_dqb.dqb_btime &&
767 !ignore_hardlimit(dquot)) {
768 if (!prealloc)
769 *warntype = BSOFTLONGWARN;
770 return NO_QUOTA;
773 if (dquot->dq_dqb.dqb_bsoftlimit &&
774 toqb(dquot->dq_dqb.dqb_curspace + space) > dquot->dq_dqb.dqb_bsoftlimit &&
775 dquot->dq_dqb.dqb_btime == 0) {
776 if (!prealloc) {
777 *warntype = BSOFTWARN;
778 dquot->dq_dqb.dqb_btime = get_seconds() + sb_dqopt(dquot->dq_sb)->info[dquot->dq_type].dqi_bgrace;
780 else
782 * We don't allow preallocation to exceed softlimit so exceeding will
783 * be always printed
785 return NO_QUOTA;
788 return QUOTA_OK;
792 * Externally referenced functions through dquot_operations in inode.
794 * Note: this is a blocking operation.
796 void dquot_initialize(struct inode *inode, int type)
798 unsigned int id = 0;
799 int cnt;
801 down_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
802 /* Having dqptr_sem we know NOQUOTA flags can't be altered... */
803 if (IS_NOQUOTA(inode)) {
804 up_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
805 return;
807 /* Build list of quotas to initialize... */
808 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
809 if (type != -1 && cnt != type)
810 continue;
811 if (inode->i_dquot[cnt] == NODQUOT) {
812 switch (cnt) {
813 case USRQUOTA:
814 id = inode->i_uid;
815 break;
816 case GRPQUOTA:
817 id = inode->i_gid;
818 break;
820 inode->i_dquot[cnt] = dqget(inode->i_sb, id, cnt);
821 if (inode->i_dquot[cnt])
822 inode->i_flags |= S_QUOTA;
825 up_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
829 * Release all quota for the specified inode.
831 * Note: this is a blocking operation.
833 static void dquot_drop_nolock(struct inode *inode)
835 int cnt;
837 inode->i_flags &= ~S_QUOTA;
838 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
839 if (inode->i_dquot[cnt] == NODQUOT)
840 continue;
841 dqput(inode->i_dquot[cnt]);
842 inode->i_dquot[cnt] = NODQUOT;
846 void dquot_drop(struct inode *inode)
848 down_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
849 dquot_drop_nolock(inode);
850 up_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
854 * This operation can block, but only after everything is updated
856 int dquot_alloc_space(struct inode *inode, qsize_t number, int warn)
858 int cnt, ret = NO_QUOTA;
859 char warntype[MAXQUOTAS];
861 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
862 warntype[cnt] = NOWARN;
864 down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
865 spin_lock(&dq_data_lock);
866 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
867 if (inode->i_dquot[cnt] == NODQUOT)
868 continue;
869 if (check_bdq(inode->i_dquot[cnt], number, warn, warntype+cnt) == NO_QUOTA)
870 goto warn_put_all;
872 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
873 if (inode->i_dquot[cnt] == NODQUOT)
874 continue;
875 dquot_incr_space(inode->i_dquot[cnt], number);
877 inode_add_bytes(inode, number);
878 ret = QUOTA_OK;
879 warn_put_all:
880 spin_unlock(&dq_data_lock);
881 flush_warnings(inode->i_dquot, warntype);
882 up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
883 return ret;
887 * This operation can block, but only after everything is updated
889 int dquot_alloc_inode(const struct inode *inode, unsigned long number)
891 int cnt, ret = NO_QUOTA;
892 char warntype[MAXQUOTAS];
894 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
895 warntype[cnt] = NOWARN;
896 down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
897 spin_lock(&dq_data_lock);
898 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
899 if (inode->i_dquot[cnt] == NODQUOT)
900 continue;
901 if (check_idq(inode->i_dquot[cnt], number, warntype+cnt) == NO_QUOTA)
902 goto warn_put_all;
905 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
906 if (inode->i_dquot[cnt] == NODQUOT)
907 continue;
908 dquot_incr_inodes(inode->i_dquot[cnt], number);
910 ret = QUOTA_OK;
911 warn_put_all:
912 spin_unlock(&dq_data_lock);
913 flush_warnings((struct dquot **)inode->i_dquot, warntype);
914 up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
915 return ret;
919 * This is a non-blocking operation.
921 void dquot_free_space(struct inode *inode, qsize_t number)
923 unsigned int cnt;
925 down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
926 spin_lock(&dq_data_lock);
927 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
928 if (inode->i_dquot[cnt] == NODQUOT)
929 continue;
930 dquot_decr_space(inode->i_dquot[cnt], number);
932 inode_sub_bytes(inode, number);
933 spin_unlock(&dq_data_lock);
934 up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
938 * This is a non-blocking operation.
940 void dquot_free_inode(const struct inode *inode, unsigned long number)
942 unsigned int cnt;
944 down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
945 spin_lock(&dq_data_lock);
946 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
947 if (inode->i_dquot[cnt] == NODQUOT)
948 continue;
949 dquot_decr_inodes(inode->i_dquot[cnt], number);
951 spin_unlock(&dq_data_lock);
952 up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
956 * Transfer the number of inode and blocks from one diskquota to an other.
958 * This operation can block, but only after everything is updated
960 int dquot_transfer(struct inode *inode, struct iattr *iattr)
962 qsize_t space;
963 struct dquot *transfer_from[MAXQUOTAS];
964 struct dquot *transfer_to[MAXQUOTAS];
965 int cnt, ret = NO_QUOTA, chuid = (iattr->ia_valid & ATTR_UID) && inode->i_uid != iattr->ia_uid,
966 chgid = (iattr->ia_valid & ATTR_GID) && inode->i_gid != iattr->ia_gid;
967 char warntype[MAXQUOTAS];
969 /* Clear the arrays */
970 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
971 transfer_to[cnt] = transfer_from[cnt] = NODQUOT;
972 warntype[cnt] = NOWARN;
974 down_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
975 if (IS_NOQUOTA(inode)) /* File without quota accounting? */
976 goto warn_put_all;
977 /* First build the transfer_to list - here we can block on reading of dquots... */
978 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
979 switch (cnt) {
980 case USRQUOTA:
981 if (!chuid)
982 continue;
983 transfer_to[cnt] = dqget(inode->i_sb, iattr->ia_uid, cnt);
984 break;
985 case GRPQUOTA:
986 if (!chgid)
987 continue;
988 transfer_to[cnt] = dqget(inode->i_sb, iattr->ia_gid, cnt);
989 break;
992 spin_lock(&dq_data_lock);
993 space = inode_get_bytes(inode);
994 /* Build the transfer_from list and check the limits */
995 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
996 if (transfer_to[cnt] == NODQUOT)
997 continue;
998 transfer_from[cnt] = inode->i_dquot[cnt];
999 if (check_idq(transfer_to[cnt], 1, warntype+cnt) == NO_QUOTA ||
1000 check_bdq(transfer_to[cnt], space, 0, warntype+cnt) == NO_QUOTA)
1001 goto warn_put_all;
1005 * Finally perform the needed transfer from transfer_from to transfer_to
1007 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1009 * Skip changes for same uid or gid or for turned off quota-type.
1011 if (transfer_to[cnt] == NODQUOT)
1012 continue;
1014 dquot_decr_inodes(transfer_from[cnt], 1);
1015 dquot_decr_space(transfer_from[cnt], space);
1017 dquot_incr_inodes(transfer_to[cnt], 1);
1018 dquot_incr_space(transfer_to[cnt], space);
1020 inode->i_dquot[cnt] = transfer_to[cnt];
1022 ret = QUOTA_OK;
1023 warn_put_all:
1024 spin_unlock(&dq_data_lock);
1025 flush_warnings(transfer_to, warntype);
1027 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1028 if (ret == QUOTA_OK && transfer_from[cnt] != NODQUOT)
1029 dqput(transfer_from[cnt]);
1030 if (ret == NO_QUOTA && transfer_to[cnt] != NODQUOT)
1031 dqput(transfer_to[cnt]);
1033 up_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
1034 return ret;
1038 * Definitions of diskquota operations.
1040 struct dquot_operations dquot_operations = {
1041 .initialize = dquot_initialize, /* mandatory */
1042 .drop = dquot_drop, /* mandatory */
1043 .alloc_space = dquot_alloc_space,
1044 .alloc_inode = dquot_alloc_inode,
1045 .free_space = dquot_free_space,
1046 .free_inode = dquot_free_inode,
1047 .transfer = dquot_transfer,
1048 .sync_dquot = commit_dqblk
1051 /* Function used by filesystems for initializing the dquot_operations structure */
1052 void init_dquot_operations(struct dquot_operations *fsdqops)
1054 memcpy(fsdqops, &dquot_operations, sizeof(dquot_operations));
1057 static inline void set_enable_flags(struct quota_info *dqopt, int type)
1059 switch (type) {
1060 case USRQUOTA:
1061 dqopt->flags |= DQUOT_USR_ENABLED;
1062 break;
1063 case GRPQUOTA:
1064 dqopt->flags |= DQUOT_GRP_ENABLED;
1065 break;
1069 static inline void reset_enable_flags(struct quota_info *dqopt, int type)
1071 switch (type) {
1072 case USRQUOTA:
1073 dqopt->flags &= ~DQUOT_USR_ENABLED;
1074 break;
1075 case GRPQUOTA:
1076 dqopt->flags &= ~DQUOT_GRP_ENABLED;
1077 break;
1081 /* Function in inode.c - remove pointers to dquots in icache */
1082 extern void remove_dquot_ref(struct super_block *, int);
1085 * Turn quota off on a device. type == -1 ==> quotaoff for all types (umount)
1087 int vfs_quota_off(struct super_block *sb, int type)
1089 int cnt;
1090 struct quota_info *dqopt = sb_dqopt(sb);
1092 if (!sb)
1093 goto out;
1095 /* We need to serialize quota_off() for device */
1096 down(&dqopt->dqonoff_sem);
1097 down_write(&dqopt->dqptr_sem);
1098 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1099 if (type != -1 && cnt != type)
1100 continue;
1101 if (!sb_has_quota_enabled(sb, cnt))
1102 continue;
1103 reset_enable_flags(dqopt, cnt);
1105 /* Note: these are blocking operations */
1106 remove_dquot_ref(sb, cnt);
1107 invalidate_dquots(sb, cnt);
1109 * Now all dquots should be invalidated, all writes done so we should be only
1110 * users of the info. No locks needed.
1112 if (info_dirty(&dqopt->info[cnt])) {
1113 down(&dqopt->dqio_sem);
1114 dqopt->ops[cnt]->write_file_info(sb, cnt);
1115 up(&dqopt->dqio_sem);
1117 if (dqopt->ops[cnt]->free_file_info)
1118 dqopt->ops[cnt]->free_file_info(sb, cnt);
1119 put_quota_format(dqopt->info[cnt].dqi_format);
1121 fput(dqopt->files[cnt]);
1122 dqopt->files[cnt] = (struct file *)NULL;
1123 dqopt->info[cnt].dqi_flags = 0;
1124 dqopt->info[cnt].dqi_igrace = 0;
1125 dqopt->info[cnt].dqi_bgrace = 0;
1126 dqopt->ops[cnt] = NULL;
1128 up_write(&dqopt->dqptr_sem);
1129 up(&dqopt->dqonoff_sem);
1130 out:
1131 return 0;
1134 int vfs_quota_on(struct super_block *sb, int type, int format_id, char *path)
1136 struct file *f;
1137 struct inode *inode;
1138 struct quota_info *dqopt = sb_dqopt(sb);
1139 struct quota_format_type *fmt = find_quota_format(format_id);
1140 int error;
1141 unsigned int oldflags;
1143 if (!fmt)
1144 return -ESRCH;
1145 f = filp_open(path, O_RDWR, 0600);
1146 if (IS_ERR(f)) {
1147 error = PTR_ERR(f);
1148 goto out_fmt;
1150 error = -EIO;
1151 if (!f->f_op || !f->f_op->read || !f->f_op->write)
1152 goto out_f;
1153 error = security_quota_on(f);
1154 if (error)
1155 goto out_f;
1156 inode = f->f_dentry->d_inode;
1157 error = -EACCES;
1158 if (!S_ISREG(inode->i_mode))
1159 goto out_f;
1161 down(&dqopt->dqonoff_sem);
1162 down_write(&dqopt->dqptr_sem);
1163 if (sb_has_quota_enabled(sb, type)) {
1164 error = -EBUSY;
1165 goto out_lock;
1167 oldflags = inode->i_flags;
1168 dqopt->files[type] = f;
1169 error = -EINVAL;
1170 if (!fmt->qf_ops->check_quota_file(sb, type))
1171 goto out_file_init;
1172 /* We don't want quota on quota files */
1173 dquot_drop_nolock(inode);
1174 inode->i_flags |= S_NOQUOTA;
1176 dqopt->ops[type] = fmt->qf_ops;
1177 dqopt->info[type].dqi_format = fmt;
1178 down(&dqopt->dqio_sem);
1179 if ((error = dqopt->ops[type]->read_file_info(sb, type)) < 0) {
1180 up(&dqopt->dqio_sem);
1181 goto out_file_init;
1183 up(&dqopt->dqio_sem);
1184 set_enable_flags(dqopt, type);
1185 up_write(&dqopt->dqptr_sem);
1187 add_dquot_ref(sb, type);
1188 up(&dqopt->dqonoff_sem);
1190 return 0;
1192 out_file_init:
1193 inode->i_flags = oldflags;
1194 dqopt->files[type] = NULL;
1195 out_lock:
1196 up_write(&dqopt->dqptr_sem);
1197 up(&dqopt->dqonoff_sem);
1198 out_f:
1199 filp_close(f, NULL);
1200 out_fmt:
1201 put_quota_format(fmt);
1203 return error;
1206 /* Generic routine for getting common part of quota structure */
1207 static void do_get_dqblk(struct dquot *dquot, struct if_dqblk *di)
1209 struct mem_dqblk *dm = &dquot->dq_dqb;
1211 spin_lock(&dq_data_lock);
1212 di->dqb_bhardlimit = dm->dqb_bhardlimit;
1213 di->dqb_bsoftlimit = dm->dqb_bsoftlimit;
1214 di->dqb_curspace = dm->dqb_curspace;
1215 di->dqb_ihardlimit = dm->dqb_ihardlimit;
1216 di->dqb_isoftlimit = dm->dqb_isoftlimit;
1217 di->dqb_curinodes = dm->dqb_curinodes;
1218 di->dqb_btime = dm->dqb_btime;
1219 di->dqb_itime = dm->dqb_itime;
1220 di->dqb_valid = QIF_ALL;
1221 spin_unlock(&dq_data_lock);
1224 int vfs_get_dqblk(struct super_block *sb, int type, qid_t id, struct if_dqblk *di)
1226 struct dquot *dquot;
1228 down_read(&sb_dqopt(sb)->dqptr_sem);
1229 if (!(dquot = dqget(sb, id, type))) {
1230 up_read(&sb_dqopt(sb)->dqptr_sem);
1231 return -ESRCH;
1233 do_get_dqblk(dquot, di);
1234 dqput(dquot);
1235 up_read(&sb_dqopt(sb)->dqptr_sem);
1236 return 0;
1239 /* Generic routine for setting common part of quota structure */
1240 static void do_set_dqblk(struct dquot *dquot, struct if_dqblk *di)
1242 struct mem_dqblk *dm = &dquot->dq_dqb;
1243 int check_blim = 0, check_ilim = 0;
1245 spin_lock(&dq_data_lock);
1246 if (di->dqb_valid & QIF_SPACE) {
1247 dm->dqb_curspace = di->dqb_curspace;
1248 check_blim = 1;
1250 if (di->dqb_valid & QIF_BLIMITS) {
1251 dm->dqb_bsoftlimit = di->dqb_bsoftlimit;
1252 dm->dqb_bhardlimit = di->dqb_bhardlimit;
1253 check_blim = 1;
1255 if (di->dqb_valid & QIF_INODES) {
1256 dm->dqb_curinodes = di->dqb_curinodes;
1257 check_ilim = 1;
1259 if (di->dqb_valid & QIF_ILIMITS) {
1260 dm->dqb_isoftlimit = di->dqb_isoftlimit;
1261 dm->dqb_ihardlimit = di->dqb_ihardlimit;
1262 check_ilim = 1;
1264 if (di->dqb_valid & QIF_BTIME)
1265 dm->dqb_btime = di->dqb_btime;
1266 if (di->dqb_valid & QIF_ITIME)
1267 dm->dqb_itime = di->dqb_itime;
1269 if (check_blim) {
1270 if (!dm->dqb_bsoftlimit || toqb(dm->dqb_curspace) < dm->dqb_bsoftlimit) {
1271 dm->dqb_btime = 0;
1272 clear_bit(DQ_BLKS_B, &dquot->dq_flags);
1274 else if (!(di->dqb_valid & QIF_BTIME)) /* Set grace only if user hasn't provided his own... */
1275 dm->dqb_btime = get_seconds() + sb_dqopt(dquot->dq_sb)->info[dquot->dq_type].dqi_bgrace;
1277 if (check_ilim) {
1278 if (!dm->dqb_isoftlimit || dm->dqb_curinodes < dm->dqb_isoftlimit) {
1279 dm->dqb_itime = 0;
1280 clear_bit(DQ_INODES_B, &dquot->dq_flags);
1282 else if (!(di->dqb_valid & QIF_ITIME)) /* Set grace only if user hasn't provided his own... */
1283 dm->dqb_itime = get_seconds() + sb_dqopt(dquot->dq_sb)->info[dquot->dq_type].dqi_igrace;
1285 if (dm->dqb_bhardlimit || dm->dqb_bsoftlimit || dm->dqb_ihardlimit || dm->dqb_isoftlimit)
1286 clear_bit(DQ_FAKE_B, &dquot->dq_flags);
1287 else
1288 set_bit(DQ_FAKE_B, &dquot->dq_flags);
1289 mark_dquot_dirty(dquot);
1290 spin_unlock(&dq_data_lock);
1293 int vfs_set_dqblk(struct super_block *sb, int type, qid_t id, struct if_dqblk *di)
1295 struct dquot *dquot;
1297 down_read(&sb_dqopt(sb)->dqptr_sem);
1298 if (!(dquot = dqget(sb, id, type))) {
1299 up_read(&sb_dqopt(sb)->dqptr_sem);
1300 return -ESRCH;
1302 do_set_dqblk(dquot, di);
1303 dqput(dquot);
1304 up_read(&sb_dqopt(sb)->dqptr_sem);
1305 return 0;
1308 /* Generic routine for getting common part of quota file information */
1309 int vfs_get_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii)
1311 struct mem_dqinfo *mi;
1313 down_read(&sb_dqopt(sb)->dqptr_sem);
1314 if (!sb_has_quota_enabled(sb, type)) {
1315 up_read(&sb_dqopt(sb)->dqptr_sem);
1316 return -ESRCH;
1318 mi = sb_dqopt(sb)->info + type;
1319 spin_lock(&dq_data_lock);
1320 ii->dqi_bgrace = mi->dqi_bgrace;
1321 ii->dqi_igrace = mi->dqi_igrace;
1322 ii->dqi_flags = mi->dqi_flags & DQF_MASK;
1323 ii->dqi_valid = IIF_ALL;
1324 spin_unlock(&dq_data_lock);
1325 up_read(&sb_dqopt(sb)->dqptr_sem);
1326 return 0;
1329 /* Generic routine for setting common part of quota file information */
1330 int vfs_set_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii)
1332 struct mem_dqinfo *mi;
1334 down_read(&sb_dqopt(sb)->dqptr_sem);
1335 if (!sb_has_quota_enabled(sb, type)) {
1336 up_read(&sb_dqopt(sb)->dqptr_sem);
1337 return -ESRCH;
1339 mi = sb_dqopt(sb)->info + type;
1340 spin_lock(&dq_data_lock);
1341 if (ii->dqi_valid & IIF_BGRACE)
1342 mi->dqi_bgrace = ii->dqi_bgrace;
1343 if (ii->dqi_valid & IIF_IGRACE)
1344 mi->dqi_igrace = ii->dqi_igrace;
1345 if (ii->dqi_valid & IIF_FLAGS)
1346 mi->dqi_flags = (mi->dqi_flags & ~DQF_MASK) | (ii->dqi_flags & DQF_MASK);
1347 mark_info_dirty(mi);
1348 spin_unlock(&dq_data_lock);
1349 up_read(&sb_dqopt(sb)->dqptr_sem);
1350 return 0;
1353 struct quotactl_ops vfs_quotactl_ops = {
1354 .quota_on = vfs_quota_on,
1355 .quota_off = vfs_quota_off,
1356 .quota_sync = vfs_quota_sync,
1357 .get_info = vfs_get_dqinfo,
1358 .set_info = vfs_set_dqinfo,
1359 .get_dqblk = vfs_get_dqblk,
1360 .set_dqblk = vfs_set_dqblk
1363 static ctl_table fs_dqstats_table[] = {
1365 .ctl_name = FS_DQ_LOOKUPS,
1366 .procname = "lookups",
1367 .data = &dqstats.lookups,
1368 .maxlen = sizeof(int),
1369 .mode = 0444,
1370 .proc_handler = &proc_dointvec,
1373 .ctl_name = FS_DQ_DROPS,
1374 .procname = "drops",
1375 .data = &dqstats.drops,
1376 .maxlen = sizeof(int),
1377 .mode = 0444,
1378 .proc_handler = &proc_dointvec,
1381 .ctl_name = FS_DQ_READS,
1382 .procname = "reads",
1383 .data = &dqstats.reads,
1384 .maxlen = sizeof(int),
1385 .mode = 0444,
1386 .proc_handler = &proc_dointvec,
1389 .ctl_name = FS_DQ_WRITES,
1390 .procname = "writes",
1391 .data = &dqstats.writes,
1392 .maxlen = sizeof(int),
1393 .mode = 0444,
1394 .proc_handler = &proc_dointvec,
1397 .ctl_name = FS_DQ_CACHE_HITS,
1398 .procname = "cache_hits",
1399 .data = &dqstats.cache_hits,
1400 .maxlen = sizeof(int),
1401 .mode = 0444,
1402 .proc_handler = &proc_dointvec,
1405 .ctl_name = FS_DQ_ALLOCATED,
1406 .procname = "allocated_dquots",
1407 .data = &dqstats.allocated_dquots,
1408 .maxlen = sizeof(int),
1409 .mode = 0444,
1410 .proc_handler = &proc_dointvec,
1413 .ctl_name = FS_DQ_FREE,
1414 .procname = "free_dquots",
1415 .data = &dqstats.free_dquots,
1416 .maxlen = sizeof(int),
1417 .mode = 0444,
1418 .proc_handler = &proc_dointvec,
1421 .ctl_name = FS_DQ_SYNCS,
1422 .procname = "syncs",
1423 .data = &dqstats.syncs,
1424 .maxlen = sizeof(int),
1425 .mode = 0444,
1426 .proc_handler = &proc_dointvec,
1428 { .ctl_name = 0 },
1431 static ctl_table fs_table[] = {
1433 .ctl_name = FS_DQSTATS,
1434 .procname = "quota",
1435 .mode = 0555,
1436 .child = fs_dqstats_table,
1438 { .ctl_name = 0 },
1441 static ctl_table sys_table[] = {
1443 .ctl_name = CTL_FS,
1444 .procname = "fs",
1445 .mode = 0555,
1446 .child = fs_table,
1448 { .ctl_name = 0 },
1451 /* SLAB cache for dquot structures */
1452 kmem_cache_t *dquot_cachep;
1454 static int __init dquot_init(void)
1456 int i;
1458 register_sysctl_table(sys_table, 0);
1459 for (i = 0; i < NR_DQHASH; i++)
1460 INIT_LIST_HEAD(dquot_hash + i);
1461 printk(KERN_NOTICE "VFS: Disk quotas %s\n", __DQUOT_VERSION__);
1463 dquot_cachep = kmem_cache_create("dquot",
1464 sizeof(struct dquot), sizeof(unsigned long) * 4,
1465 SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT, NULL, NULL);
1466 if (!dquot_cachep)
1467 panic("Cannot create dquot SLAB cache");
1469 set_shrinker(DEFAULT_SEEKS, shrink_dqcache_memory);
1471 return 0;
1473 module_init(dquot_init);
1475 EXPORT_SYMBOL(register_quota_format);
1476 EXPORT_SYMBOL(unregister_quota_format);
1477 EXPORT_SYMBOL(dqstats);
1478 EXPORT_SYMBOL(dq_list_lock);
1479 EXPORT_SYMBOL(dq_data_lock);
1480 EXPORT_SYMBOL(init_dquot_operations);