Linux-2.3.3 and a short hiatus..
[davej-history.git] / fs / dquot.c
blobdfef0a63ab415375c1bf371bb9042336c481d98c
1 /*
2 * Implementation of the diskquota system for the LINUX operating
3 * system. QUOTA is implemented using the BSD system call interface as
4 * the means of communication with the user level. Currently only the
5 * ext2 filesystem has support for disk quotas. Other filesystems may
6 * be added in the future. This file contains the generic routines
7 * called by the different filesystems on allocation of an inode or
8 * block. These routines take care of the administration needed to
9 * have a consistent diskquota tracking system. The ideas of both
10 * user and group quotas are based on the Melbourne quota system as
11 * used on BSD derived systems. The internal implementation is
12 * based on one of the several variants of the LINUX inode-subsystem
13 * with added complexity of the diskquota system.
15 * Version: $Id: dquot.c,v 6.3 1996/11/17 18:35:34 mvw Exp mvw $
17 * Author: Marco van Wieringen <mvw@planets.elm.net>
19 * Fixes: Dmitry Gorodchanin <pgmdsg@ibi.com>, 11 Feb 96
21 * Revised list management to avoid races
22 * -- Bill Hawes, <whawes@star.net>, 9/98
24 * (C) Copyright 1994 - 1997 Marco van Wieringen
27 #include <linux/errno.h>
28 #include <linux/kernel.h>
29 #include <linux/sched.h>
31 #include <linux/types.h>
32 #include <linux/string.h>
33 #include <linux/fcntl.h>
34 #include <linux/stat.h>
35 #include <linux/tty.h>
36 #include <linux/file.h>
37 #include <linux/malloc.h>
38 #include <linux/mount.h>
39 #include <linux/smp.h>
40 #include <linux/smp_lock.h>
41 #include <linux/init.h>
42 #include <linux/slab.h>
44 #include <asm/uaccess.h>
46 #define __DQUOT_VERSION__ "dquot_6.4.0"
48 int nr_dquots = 0, nr_free_dquots = 0;
49 int max_dquots = NR_DQUOTS;
51 static char quotamessage[MAX_QUOTA_MESSAGE];
52 static char *quotatypes[] = INITQFNAMES;
54 static kmem_cache_t *dquot_cachep;
57 * Dquot List Management:
58 * The quota code uses three lists for dquot management: the inuse_list,
59 * free_dquots, and dquot_hash[] array. A single dquot structure may be
60 * on all three lists, depending on its current state.
62 * All dquots are placed on the inuse_list when first created, and this
63 * list is used for the sync and invalidate operations, which must look
64 * at every dquot.
66 * Unused dquots (dq_count == 0) are added to the free_dquots list when
67 * freed, and this list is searched whenever we need an available dquot.
68 * Dquots are removed from the list as soon as they are used again, and
69 * nr_free_dquots gives the number of dquots on the list.
71 * Dquots with a specific identity (device, type and id) are placed on
72 * one of the dquot_hash[] hash chains. The provides an efficient search
73 * mechanism to lcoate a specific dquot.
76 static struct dquot *inuse_list = NULL;
77 LIST_HEAD(free_dquots);
78 static struct dquot *dquot_hash[NR_DQHASH];
79 static int dquot_updating[NR_DQHASH];
81 static struct dqstats dqstats;
82 static DECLARE_WAIT_QUEUE_HEAD(dquot_wait);
83 static DECLARE_WAIT_QUEUE_HEAD(update_wait);
85 static inline char is_enabled(struct vfsmount *vfsmnt, short type)
87 switch (type) {
88 case USRQUOTA:
89 return((vfsmnt->mnt_dquot.flags & DQUOT_USR_ENABLED) != 0);
90 case GRPQUOTA:
91 return((vfsmnt->mnt_dquot.flags & DQUOT_GRP_ENABLED) != 0);
93 return(0);
96 static inline char sb_has_quota_enabled(struct super_block *sb, short type)
98 struct vfsmount *vfsmnt;
100 return((vfsmnt = lookup_vfsmnt(sb->s_dev)) != (struct vfsmount *)NULL && is_enabled(vfsmnt, type));
103 static inline char dev_has_quota_enabled(kdev_t dev, short type)
105 struct vfsmount *vfsmnt;
107 return((vfsmnt = lookup_vfsmnt(dev)) != (struct vfsmount *)NULL && is_enabled(vfsmnt, type));
110 static inline int const hashfn(kdev_t dev, unsigned int id, short type)
112 return((HASHDEV(dev) ^ id) * (MAXQUOTAS - type)) % NR_DQHASH;
115 static inline void insert_dquot_hash(struct dquot *dquot)
117 struct dquot **htable;
119 htable = &dquot_hash[hashfn(dquot->dq_dev, dquot->dq_id, dquot->dq_type)];
120 if ((dquot->dq_hash_next = *htable) != NULL)
121 (*htable)->dq_hash_pprev = &dquot->dq_hash_next;
122 *htable = dquot;
123 dquot->dq_hash_pprev = htable;
126 static inline void hash_dquot(struct dquot *dquot)
128 insert_dquot_hash(dquot);
131 static inline void unhash_dquot(struct dquot *dquot)
133 if (dquot->dq_hash_pprev) {
134 if (dquot->dq_hash_next)
135 dquot->dq_hash_next->dq_hash_pprev = dquot->dq_hash_pprev;
136 *(dquot->dq_hash_pprev) = dquot->dq_hash_next;
137 dquot->dq_hash_pprev = NULL;
141 static inline struct dquot *find_dquot(unsigned int hashent, kdev_t dev, unsigned int id, short type)
143 struct dquot *dquot;
145 for (dquot = dquot_hash[hashent]; dquot; dquot = dquot->dq_hash_next)
146 if (dquot->dq_dev == dev && dquot->dq_id == id && dquot->dq_type == type)
147 break;
148 return dquot;
151 /* Add a dquot to the head of the free list */
152 static inline void put_dquot_head(struct dquot *dquot)
154 list_add(&dquot->dq_free, &free_dquots);
155 nr_free_dquots++;
158 /* Add a dquot to the tail of the free list */
159 static inline void put_dquot_last(struct dquot *dquot)
161 list_add(&dquot->dq_free, free_dquots.prev);
162 nr_free_dquots++;
165 static inline void remove_free_dquot(struct dquot *dquot)
167 /* sanity check */
168 if (list_empty(&dquot->dq_free)) {
169 printk("remove_free_dquot: dquot not on free list??\n");
171 list_del(&dquot->dq_free);
172 INIT_LIST_HEAD(&dquot->dq_free);
173 nr_free_dquots--;
176 static inline void put_inuse(struct dquot *dquot)
178 if ((dquot->dq_next = inuse_list) != NULL)
179 inuse_list->dq_pprev = &dquot->dq_next;
180 inuse_list = dquot;
181 dquot->dq_pprev = &inuse_list;
184 #if 0 /* currently not needed */
185 static inline void remove_inuse(struct dquot *dquot)
187 if (dquot->dq_pprev) {
188 if (dquot->dq_next)
189 dquot->dq_next->dq_pprev = dquot->dq_pprev;
190 *dquot->dq_pprev = dquot->dq_next;
191 dquot->dq_pprev = NULL;
194 #endif
196 static void __wait_on_dquot(struct dquot *dquot)
198 DECLARE_WAITQUEUE(wait, current);
200 add_wait_queue(&dquot->dq_wait, &wait);
201 repeat:
202 current->state = TASK_UNINTERRUPTIBLE;
203 if (dquot->dq_flags & DQ_LOCKED) {
204 schedule();
205 goto repeat;
207 remove_wait_queue(&dquot->dq_wait, &wait);
208 current->state = TASK_RUNNING;
211 static inline void wait_on_dquot(struct dquot *dquot)
213 if (dquot->dq_flags & DQ_LOCKED)
214 __wait_on_dquot(dquot);
217 static inline void lock_dquot(struct dquot *dquot)
219 wait_on_dquot(dquot);
220 dquot->dq_flags |= DQ_LOCKED;
223 static inline void unlock_dquot(struct dquot *dquot)
225 dquot->dq_flags &= ~DQ_LOCKED;
226 wake_up(&dquot->dq_wait);
229 static void write_dquot(struct dquot *dquot)
231 short type = dquot->dq_type;
232 struct file *filp = dquot->dq_mnt->mnt_dquot.files[type];
233 mm_segment_t fs;
234 loff_t offset;
235 ssize_t ret;
237 lock_dquot(dquot);
238 down(&dquot->dq_mnt->mnt_dquot.semaphore);
239 offset = dqoff(dquot->dq_id);
240 fs = get_fs();
241 set_fs(KERNEL_DS);
244 * Note: clear the DQ_MOD flag unconditionally,
245 * so we don't loop forever on failure.
247 dquot->dq_flags &= ~DQ_MOD;
248 ret = 0;
249 if (filp)
250 ret = filp->f_op->write(filp, (char *)&dquot->dq_dqb,
251 sizeof(struct dqblk), &offset);
252 if (ret != sizeof(struct dqblk))
253 printk(KERN_WARNING "VFS: dquota write failed on dev %s\n",
254 kdevname(dquot->dq_dev));
256 up(&dquot->dq_mnt->mnt_dquot.semaphore);
257 set_fs(fs);
259 unlock_dquot(dquot);
260 dqstats.writes++;
263 static void read_dquot(struct dquot *dquot)
265 short type;
266 struct file *filp;
267 mm_segment_t fs;
268 loff_t offset;
270 type = dquot->dq_type;
271 filp = dquot->dq_mnt->mnt_dquot.files[type];
273 if (filp == (struct file *)NULL)
274 return;
276 lock_dquot(dquot);
277 down(&dquot->dq_mnt->mnt_dquot.semaphore);
278 offset = dqoff(dquot->dq_id);
279 fs = get_fs();
280 set_fs(KERNEL_DS);
281 filp->f_op->read(filp, (char *)&dquot->dq_dqb, sizeof(struct dqblk), &offset);
282 up(&dquot->dq_mnt->mnt_dquot.semaphore);
283 set_fs(fs);
285 if (dquot->dq_bhardlimit == 0 && dquot->dq_bsoftlimit == 0 &&
286 dquot->dq_ihardlimit == 0 && dquot->dq_isoftlimit == 0)
287 dquot->dq_flags |= DQ_FAKE;
288 unlock_dquot(dquot);
289 dqstats.reads++;
293 * Unhash and selectively clear the dquot structure,
294 * but preserve the use count, list pointers, and
295 * wait queue.
297 void clear_dquot(struct dquot *dquot)
299 /* unhash it first */
300 unhash_dquot(dquot);
301 dquot->dq_mnt = NULL;
302 dquot->dq_flags = 0;
303 dquot->dq_referenced = 0;
304 memset(&dquot->dq_dqb, 0, sizeof(struct dqblk));
307 void invalidate_dquots(kdev_t dev, short type)
309 struct dquot *dquot, *next = inuse_list;
310 int need_restart;
312 restart:
313 need_restart = 0;
314 while ((dquot = next) != NULL) {
315 next = dquot->dq_next;
316 if (dquot->dq_dev != dev)
317 continue;
318 if (dquot->dq_type != type)
319 continue;
320 if (dquot->dq_flags & DQ_LOCKED) {
321 __wait_on_dquot(dquot);
323 /* Set the flag for another pass. */
324 need_restart = 1;
326 * Make sure it's still the same dquot.
328 if (dquot->dq_dev != dev)
329 continue;
330 if (dquot->dq_type != type)
331 continue;
333 clear_dquot(dquot);
336 * If anything blocked, restart the operation
337 * to ensure we don't miss any dquots.
339 if (need_restart)
340 goto restart;
343 int sync_dquots(kdev_t dev, short type)
345 struct dquot *dquot, *next = inuse_list;
346 int need_restart;
348 restart:
349 need_restart = 0;
350 while ((dquot = next) != NULL) {
351 next = dquot->dq_next;
352 if (dev && dquot->dq_dev != dev)
353 continue;
354 if (type != -1 && dquot->dq_type != type)
355 continue;
356 if (!(dquot->dq_flags & (DQ_LOCKED | DQ_MOD)))
357 continue;
359 wait_on_dquot(dquot);
360 if (dquot->dq_flags & DQ_MOD)
361 write_dquot(dquot);
362 /* Set the flag for another pass. */
363 need_restart = 1;
366 * If anything blocked, restart the operation
367 * to ensure we don't miss any dquots.
369 if (need_restart)
370 goto restart;
372 dqstats.syncs++;
373 return(0);
376 void dqput(struct dquot *dquot)
378 if (!dquot)
379 return;
380 if (!dquot->dq_count) {
381 printk("VFS: dqput: trying to free free dquot\n");
382 printk("VFS: device %s, dquot of %s %d\n",
383 kdevname(dquot->dq_dev), quotatypes[dquot->dq_type],
384 dquot->dq_id);
385 return;
389 * If the dq_mnt pointer isn't initialized this entry needs no
390 * checking and doesn't need to be written. It's just an empty
391 * dquot that is put back on to the freelist.
393 if (dquot->dq_mnt != (struct vfsmount *)NULL) {
394 dqstats.drops++;
395 we_slept:
396 wait_on_dquot(dquot);
397 if (dquot->dq_count > 1) {
398 dquot->dq_count--;
399 return;
401 if (dquot->dq_flags & DQ_MOD) {
402 write_dquot(dquot);
403 goto we_slept;
407 /* sanity check */
408 if (!list_empty(&dquot->dq_free)) {
409 printk("dqput: dquot already on free list??\n");
411 if (--dquot->dq_count == 0) {
412 /* Place at end of LRU free queue */
413 put_dquot_last(dquot);
414 wake_up(&dquot_wait);
417 return;
420 static void grow_dquots(void)
422 struct dquot *dquot;
423 int cnt = 32;
425 while (cnt > 0) {
426 dquot = kmem_cache_alloc(dquot_cachep, SLAB_KERNEL);
427 if(!dquot)
428 return;
430 nr_dquots++;
431 memset((caddr_t)dquot, 0, sizeof(struct dquot));
432 init_waitqueue_head(&dquot->dq_wait);
433 /* all dquots go on the inuse_list */
434 put_inuse(dquot);
435 put_dquot_head(dquot);
436 cnt--;
440 static struct dquot *find_best_candidate_weighted(void)
442 struct list_head *tmp = &free_dquots;
443 struct dquot *dquot, *best = NULL;
444 unsigned long myscore, bestscore = ~0U;
445 int limit = (nr_free_dquots > 128) ? nr_free_dquots >> 2 : 32;
447 while ((tmp = tmp->next) != &free_dquots && --limit) {
448 dquot = list_entry(tmp, struct dquot, dq_free);
449 if (dquot->dq_flags & (DQ_LOCKED | DQ_MOD))
450 continue;
451 myscore = dquot->dq_referenced;
452 if (myscore < bestscore) {
453 bestscore = myscore;
454 best = dquot;
457 return best;
460 static inline struct dquot *find_best_free(void)
462 struct list_head *tmp = &free_dquots;
463 struct dquot *dquot;
464 int limit = (nr_free_dquots > 1024) ? nr_free_dquots >> 5 : 32;
466 while ((tmp = tmp->next) != &free_dquots && --limit) {
467 dquot = list_entry(tmp, struct dquot, dq_free);
468 if (dquot->dq_referenced == 0)
469 return dquot;
471 return NULL;
474 struct dquot *get_empty_dquot(void)
476 struct dquot *dquot;
477 int count;
479 repeat:
480 dquot = find_best_free();
481 if (!dquot)
482 goto pressure;
483 got_it:
484 if (dquot->dq_flags & (DQ_LOCKED | DQ_MOD)) {
485 wait_on_dquot(dquot);
486 if (dquot->dq_flags & DQ_MOD)
488 if(dquot->dq_mnt != (struct vfsmount *)NULL)
489 write_dquot(dquot);
492 * The dquot may be back in use now, so we
493 * must recheck the free list.
495 goto repeat;
497 /* sanity check ... */
498 if (dquot->dq_count != 0)
499 printk(KERN_ERR "VFS: free dquot count=%d\n", dquot->dq_count);
501 remove_free_dquot(dquot);
502 dquot->dq_count = 1;
503 /* unhash and selectively clear the structure */
504 clear_dquot(dquot);
505 return dquot;
507 pressure:
508 if (nr_dquots < max_dquots) {
509 grow_dquots();
510 goto repeat;
513 dquot = find_best_candidate_weighted();
514 if (dquot)
515 goto got_it;
517 * Try pruning the dcache to free up some dquots ...
519 count = select_dcache(128, 0);
520 if (count) {
521 printk(KERN_DEBUG "get_empty_dquot: pruning %d\n", count);
522 prune_dcache(count);
523 free_inode_memory(count);
524 goto repeat;
527 printk("VFS: No free dquots, contact mvw@planets.elm.net\n");
528 sleep_on(&dquot_wait);
529 goto repeat;
532 struct dquot *dqget(kdev_t dev, unsigned int id, short type)
534 unsigned int hashent = hashfn(dev, id, type);
535 struct dquot *dquot, *empty = NULL;
536 struct vfsmount *vfsmnt;
538 if ((vfsmnt = lookup_vfsmnt(dev)) == (struct vfsmount *)NULL || is_enabled(vfsmnt, type) == 0)
539 return(NODQUOT);
541 we_slept:
542 if ((dquot = find_dquot(hashent, dev, id, type)) == NULL) {
543 if (empty == NULL) {
544 dquot_updating[hashent]++;
545 empty = get_empty_dquot();
546 if (!--dquot_updating[hashent])
547 wake_up(&update_wait);
548 goto we_slept;
550 dquot = empty;
551 dquot->dq_id = id;
552 dquot->dq_type = type;
553 dquot->dq_dev = dev;
554 dquot->dq_mnt = vfsmnt;
555 /* hash it first so it can be found */
556 hash_dquot(dquot);
557 read_dquot(dquot);
558 } else {
559 if (!dquot->dq_count++) {
560 remove_free_dquot(dquot);
561 } else
562 dqstats.cache_hits++;
563 wait_on_dquot(dquot);
564 if (empty)
565 dqput(empty);
568 while (dquot_updating[hashent])
569 sleep_on(&update_wait);
571 dquot->dq_referenced++;
572 dqstats.lookups++;
574 return dquot;
577 static void add_dquot_ref(kdev_t dev, short type)
579 struct super_block *sb = get_super(dev);
580 struct file *filp;
581 struct inode *inode;
583 if (!sb || !sb->dq_op)
584 return; /* nothing to do */
586 for (filp = inuse_filps; filp; filp = filp->f_next) {
587 if (!filp->f_dentry)
588 continue;
589 if (filp->f_dentry->d_sb != sb)
590 continue;
591 inode = filp->f_dentry->d_inode;
592 if (!inode)
593 continue;
594 /* N.B. race problem -- filp could become unused */
595 if (filp->f_mode & FMODE_WRITE) {
596 sb->dq_op->initialize(inode, type);
597 inode->i_flags |= S_QUOTA;
602 static void reset_dquot_ptrs(kdev_t dev, short type)
604 struct super_block *sb = get_super(dev);
605 struct file *filp;
606 struct inode *inode;
607 struct dquot *dquot;
608 int cnt;
610 if (!sb || !sb->dq_op)
611 return; /* nothing to do */
613 restart:
614 /* free any quota for unused dentries */
615 shrink_dcache_sb(sb);
617 for (filp = inuse_filps; filp; filp = filp->f_next) {
618 if (!filp->f_dentry)
619 continue;
620 if (filp->f_dentry->d_sb != sb)
621 continue;
622 inode = filp->f_dentry->d_inode;
623 if (!inode)
624 continue;
626 * Note: we restart after each blocking operation,
627 * as the inuse_filps list may have changed.
629 if (IS_QUOTAINIT(inode)) {
630 dquot = inode->i_dquot[type];
631 inode->i_dquot[type] = NODQUOT;
632 /* any other quota in use? */
633 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
634 if (inode->i_dquot[cnt] != NODQUOT)
635 goto put_it;
637 inode->i_flags &= ~S_QUOTA;
638 put_it:
639 if (dquot != NODQUOT) {
640 dqput(dquot);
641 /* we may have blocked ... */
642 goto restart;
648 static inline void dquot_incr_inodes(struct dquot *dquot, unsigned long number)
650 lock_dquot(dquot);
651 dquot->dq_curinodes += number;
652 dquot->dq_flags |= DQ_MOD;
653 unlock_dquot(dquot);
656 static inline void dquot_incr_blocks(struct dquot *dquot, unsigned long number)
658 lock_dquot(dquot);
659 dquot->dq_curblocks += number;
660 dquot->dq_flags |= DQ_MOD;
661 unlock_dquot(dquot);
664 static inline void dquot_decr_inodes(struct dquot *dquot, unsigned long number)
666 lock_dquot(dquot);
667 if (dquot->dq_curinodes > number)
668 dquot->dq_curinodes -= number;
669 else
670 dquot->dq_curinodes = 0;
671 if (dquot->dq_curinodes < dquot->dq_isoftlimit)
672 dquot->dq_itime = (time_t) 0;
673 dquot->dq_flags &= ~DQ_INODES;
674 dquot->dq_flags |= DQ_MOD;
675 unlock_dquot(dquot);
678 static inline void dquot_decr_blocks(struct dquot *dquot, unsigned long number)
680 lock_dquot(dquot);
681 if (dquot->dq_curblocks > number)
682 dquot->dq_curblocks -= number;
683 else
684 dquot->dq_curblocks = 0;
685 if (dquot->dq_curblocks < dquot->dq_bsoftlimit)
686 dquot->dq_btime = (time_t) 0;
687 dquot->dq_flags &= ~DQ_BLKS;
688 dquot->dq_flags |= DQ_MOD;
689 unlock_dquot(dquot);
692 static inline char need_print_warning(short type, uid_t initiator, struct dquot *dquot)
694 switch (type) {
695 case USRQUOTA:
696 return(initiator == dquot->dq_id);
697 case GRPQUOTA:
698 return(initiator == dquot->dq_id);
700 return(0);
703 static inline char ignore_hardlimit(struct dquot *dquot, uid_t initiator)
705 return(initiator == 0 && dquot->dq_mnt->mnt_dquot.rsquash[dquot->dq_type] == 0);
708 static int check_idq(struct dquot *dquot, short type, u_long short inodes, uid_t initiator,
709 struct tty_struct *tty)
711 if (inodes <= 0 || dquot->dq_flags & DQ_FAKE)
712 return(QUOTA_OK);
714 if (dquot->dq_ihardlimit &&
715 (dquot->dq_curinodes + inodes) > dquot->dq_ihardlimit &&
716 !ignore_hardlimit(dquot, initiator)) {
717 if ((dquot->dq_flags & DQ_INODES) == 0 &&
718 need_print_warning(type, initiator, dquot)) {
719 sprintf(quotamessage, "%s: write failed, %s file limit reached\n",
720 dquot->dq_mnt->mnt_dirname, quotatypes[type]);
721 tty_write_message(tty, quotamessage);
722 dquot->dq_flags |= DQ_INODES;
724 return(NO_QUOTA);
727 if (dquot->dq_isoftlimit &&
728 (dquot->dq_curinodes + inodes) > dquot->dq_isoftlimit &&
729 dquot->dq_itime && CURRENT_TIME >= dquot->dq_itime &&
730 !ignore_hardlimit(dquot, initiator)) {
731 if (need_print_warning(type, initiator, dquot)) {
732 sprintf(quotamessage, "%s: warning, %s file quota exceeded too long.\n",
733 dquot->dq_mnt->mnt_dirname, quotatypes[type]);
734 tty_write_message(tty, quotamessage);
736 return(NO_QUOTA);
739 if (dquot->dq_isoftlimit &&
740 (dquot->dq_curinodes + inodes) > dquot->dq_isoftlimit &&
741 dquot->dq_itime == 0) {
742 if (need_print_warning(type, initiator, dquot)) {
743 sprintf(quotamessage, "%s: warning, %s file quota exceeded\n",
744 dquot->dq_mnt->mnt_dirname, quotatypes[type]);
745 tty_write_message(tty, quotamessage);
747 dquot->dq_itime = CURRENT_TIME + dquot->dq_mnt->mnt_dquot.inode_expire[type];
750 return(QUOTA_OK);
753 static int check_bdq(struct dquot *dquot, short type, u_long blocks, uid_t initiator,
754 struct tty_struct *tty, char warn)
756 if (blocks <= 0 || dquot->dq_flags & DQ_FAKE)
757 return(QUOTA_OK);
759 if (dquot->dq_bhardlimit &&
760 (dquot->dq_curblocks + blocks) > dquot->dq_bhardlimit &&
761 !ignore_hardlimit(dquot, initiator)) {
762 if (warn && (dquot->dq_flags & DQ_BLKS) == 0 &&
763 need_print_warning(type, initiator, dquot)) {
764 sprintf(quotamessage, "%s: write failed, %s disk limit reached.\n",
765 dquot->dq_mnt->mnt_dirname, quotatypes[type]);
766 tty_write_message(tty, quotamessage);
767 dquot->dq_flags |= DQ_BLKS;
769 return(NO_QUOTA);
772 if (dquot->dq_bsoftlimit &&
773 (dquot->dq_curblocks + blocks) > dquot->dq_bsoftlimit &&
774 dquot->dq_btime && CURRENT_TIME >= dquot->dq_btime &&
775 !ignore_hardlimit(dquot, initiator)) {
776 if (warn && need_print_warning(type, initiator, dquot)) {
777 sprintf(quotamessage, "%s: write failed, %s disk quota exceeded too long.\n",
778 dquot->dq_mnt->mnt_dirname, quotatypes[type]);
779 tty_write_message(tty, quotamessage);
781 return(NO_QUOTA);
784 if (dquot->dq_bsoftlimit &&
785 (dquot->dq_curblocks + blocks) > dquot->dq_bsoftlimit &&
786 dquot->dq_btime == 0) {
787 if (warn && need_print_warning(type, initiator, dquot)) {
788 sprintf(quotamessage, "%s: warning, %s disk quota exceeded\n",
789 dquot->dq_mnt->mnt_dirname, quotatypes[type]);
790 tty_write_message(tty, quotamessage);
792 dquot->dq_btime = CURRENT_TIME + dquot->dq_mnt->mnt_dquot.block_expire[type];
795 return(QUOTA_OK);
799 * Initialize a dquot-struct with new quota info. This is used by the
800 * system call interface functions.
802 static int set_dqblk(kdev_t dev, int id, short type, int flags, struct dqblk *dqblk)
804 struct dquot *dquot;
805 int error = -EFAULT;
806 struct dqblk dq_dqblk;
808 if (dqblk == (struct dqblk *)NULL)
809 return error;
811 if (flags & QUOTA_SYSCALL) {
812 if (copy_from_user(&dq_dqblk, dqblk, sizeof(struct dqblk)))
813 return(error);
814 } else
815 memcpy((caddr_t)&dq_dqblk, (caddr_t)dqblk, sizeof(struct dqblk));
817 if ((dquot = dqget(dev, id, type)) != NODQUOT) {
818 lock_dquot(dquot);
820 if (id > 0 && ((flags & SET_QUOTA) || (flags & SET_QLIMIT))) {
821 dquot->dq_bhardlimit = dq_dqblk.dqb_bhardlimit;
822 dquot->dq_bsoftlimit = dq_dqblk.dqb_bsoftlimit;
823 dquot->dq_ihardlimit = dq_dqblk.dqb_ihardlimit;
824 dquot->dq_isoftlimit = dq_dqblk.dqb_isoftlimit;
827 if ((flags & SET_QUOTA) || (flags & SET_USE)) {
828 if (dquot->dq_isoftlimit &&
829 dquot->dq_curinodes < dquot->dq_isoftlimit &&
830 dq_dqblk.dqb_curinodes >= dquot->dq_isoftlimit)
831 dquot->dq_itime = CURRENT_TIME + dquot->dq_mnt->mnt_dquot.inode_expire[type];
832 dquot->dq_curinodes = dq_dqblk.dqb_curinodes;
833 if (dquot->dq_curinodes < dquot->dq_isoftlimit)
834 dquot->dq_flags &= ~DQ_INODES;
835 if (dquot->dq_bsoftlimit &&
836 dquot->dq_curblocks < dquot->dq_bsoftlimit &&
837 dq_dqblk.dqb_curblocks >= dquot->dq_bsoftlimit)
838 dquot->dq_btime = CURRENT_TIME + dquot->dq_mnt->mnt_dquot.block_expire[type];
839 dquot->dq_curblocks = dq_dqblk.dqb_curblocks;
840 if (dquot->dq_curblocks < dquot->dq_bsoftlimit)
841 dquot->dq_flags &= ~DQ_BLKS;
844 if (id == 0) {
845 dquot->dq_mnt->mnt_dquot.block_expire[type] = dquot->dq_btime = dq_dqblk.dqb_btime;
846 dquot->dq_mnt->mnt_dquot.inode_expire[type] = dquot->dq_itime = dq_dqblk.dqb_itime;
849 if (dq_dqblk.dqb_bhardlimit == 0 && dq_dqblk.dqb_bsoftlimit == 0 &&
850 dq_dqblk.dqb_ihardlimit == 0 && dq_dqblk.dqb_isoftlimit == 0)
851 dquot->dq_flags |= DQ_FAKE;
852 else
853 dquot->dq_flags &= ~DQ_FAKE;
855 dquot->dq_flags |= DQ_MOD;
856 unlock_dquot(dquot);
857 dqput(dquot);
859 return(0);
862 static int get_quota(kdev_t dev, int id, short type, struct dqblk *dqblk)
864 struct dquot *dquot;
865 int error = -ESRCH;
867 if (!dev_has_quota_enabled(dev, type))
868 goto out;
869 dquot = dqget(dev, id, type);
870 if (dquot == NODQUOT)
871 goto out;
873 error = -EFAULT;
874 if (dqblk && !copy_to_user(dqblk, &dquot->dq_dqb, sizeof(struct dqblk)))
875 error = 0;
876 dqput(dquot);
877 out:
878 return error;
881 static int get_stats(caddr_t addr)
883 int error = -EFAULT;
884 struct dqstats stats;
886 dqstats.allocated_dquots = nr_dquots;
887 dqstats.free_dquots = nr_free_dquots;
889 /* make a copy, in case we page-fault in user space */
890 memcpy(&stats, &dqstats, sizeof(struct dqstats));
891 if (!copy_to_user(addr, &stats, sizeof(struct dqstats)))
892 error = 0;
893 return error;
896 static int quota_root_squash(kdev_t dev, short type, int *addr)
898 struct vfsmount *vfsmnt;
899 int new_value, error;
901 if ((vfsmnt = lookup_vfsmnt(dev)) == (struct vfsmount *)NULL)
902 return(-ENODEV);
904 error = -EFAULT;
905 if (!copy_from_user(&new_value, addr, sizeof(int))) {
906 vfsmnt->mnt_dquot.rsquash[type] = new_value;
907 error = 0;
909 return error;
913 * This is a simple algorithm that calculates the size of a file in blocks.
914 * This is only used on filesystems that do not have an i_blocks count.
916 static u_long isize_to_blocks(size_t isize, size_t blksize)
918 u_long blocks;
919 u_long indirect;
921 if (!blksize)
922 blksize = BLOCK_SIZE;
923 blocks = (isize / blksize) + ((isize % blksize) ? 1 : 0);
924 if (blocks > 10) {
925 indirect = ((blocks - 11) >> 8) + 1; /* single indirect blocks */
926 if (blocks > (10 + 256)) {
927 indirect += ((blocks - 267) >> 16) + 1; /* double indirect blocks */
928 if (blocks > (10 + 256 + (256 << 8)))
929 indirect++; /* triple indirect blocks */
931 blocks += indirect;
933 return(blocks);
937 * Externally referenced functions through dquot_operations in inode.
939 * Note: this is a blocking operation.
941 void dquot_initialize(struct inode *inode, short type)
943 struct dquot *dquot;
944 unsigned int id = 0;
945 short cnt;
947 if (S_ISREG(inode->i_mode) ||
948 S_ISDIR(inode->i_mode) ||
949 S_ISLNK(inode->i_mode)) {
950 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
951 if (type != -1 && cnt != type)
952 continue;
954 if (!sb_has_quota_enabled(inode->i_sb, cnt))
955 continue;
957 if (inode->i_dquot[cnt] == NODQUOT) {
958 switch (cnt) {
959 case USRQUOTA:
960 id = inode->i_uid;
961 break;
962 case GRPQUOTA:
963 id = inode->i_gid;
964 break;
966 dquot = dqget(inode->i_dev, id, cnt);
967 if (inode->i_dquot[cnt] != NODQUOT) {
968 dqput(dquot);
969 continue;
971 inode->i_dquot[cnt] = dquot;
972 inode->i_flags |= S_QUOTA;
979 * Release all quota for the specified inode.
981 * Note: this is a blocking operation.
983 void dquot_drop(struct inode *inode)
985 struct dquot *dquot;
986 short cnt;
988 inode->i_flags &= ~S_QUOTA;
989 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
990 if (inode->i_dquot[cnt] == NODQUOT)
991 continue;
992 dquot = inode->i_dquot[cnt];
993 inode->i_dquot[cnt] = NODQUOT;
994 dqput(dquot);
999 * Note: this is a blocking operation.
1001 int dquot_alloc_block(const struct inode *inode, unsigned long number, uid_t initiator,
1002 char warn)
1004 unsigned short cnt;
1005 struct tty_struct *tty = current->tty;
1007 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1008 if (inode->i_dquot[cnt] == NODQUOT)
1009 continue;
1010 if (check_bdq(inode->i_dquot[cnt], cnt, number, initiator, tty, warn))
1011 return(NO_QUOTA);
1014 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1015 if (inode->i_dquot[cnt] == NODQUOT)
1016 continue;
1017 dquot_incr_blocks(inode->i_dquot[cnt], number);
1020 return(QUOTA_OK);
1024 * Note: this is a blocking operation.
1026 int dquot_alloc_inode(const struct inode *inode, unsigned long number, uid_t initiator)
1028 unsigned short cnt;
1029 struct tty_struct *tty = current->tty;
1031 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1032 if (inode->i_dquot[cnt] == NODQUOT)
1033 continue;
1034 if (check_idq(inode->i_dquot[cnt], cnt, number, initiator, tty))
1035 return(NO_QUOTA);
1038 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1039 if (inode->i_dquot[cnt] == NODQUOT)
1040 continue;
1041 dquot_incr_inodes(inode->i_dquot[cnt], number);
1044 return(QUOTA_OK);
1048 * Note: this is a blocking operation.
1050 void dquot_free_block(const struct inode *inode, unsigned long number)
1052 unsigned short cnt;
1054 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1055 if (inode->i_dquot[cnt] == NODQUOT)
1056 continue;
1057 dquot_decr_blocks(inode->i_dquot[cnt], number);
1062 * Note: this is a blocking operation.
1064 void dquot_free_inode(const struct inode *inode, unsigned long number)
1066 unsigned short cnt;
1068 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1069 if (inode->i_dquot[cnt] == NODQUOT)
1070 continue;
1071 dquot_decr_inodes(inode->i_dquot[cnt], number);
1076 * Transfer the number of inode and blocks from one diskquota to an other.
1078 * Note: this is a blocking operation.
1080 int dquot_transfer(struct inode *inode, struct iattr *iattr, char direction, uid_t initiator)
1082 unsigned long blocks;
1083 struct dquot *transfer_from[MAXQUOTAS];
1084 struct dquot *transfer_to[MAXQUOTAS];
1085 struct tty_struct *tty = current->tty;
1086 short cnt, disc;
1089 * Find out if this filesystem uses i_blocks.
1091 if (inode->i_blksize == 0)
1092 blocks = isize_to_blocks(inode->i_size, BLOCK_SIZE);
1093 else
1094 blocks = (inode->i_blocks / 2);
1097 * Build the transfer_from and transfer_to lists and check quotas to see
1098 * if operation is permitted.
1100 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1101 transfer_from[cnt] = NODQUOT;
1102 transfer_to[cnt] = NODQUOT;
1104 if (!sb_has_quota_enabled(inode->i_sb, cnt))
1105 continue;
1107 switch (cnt) {
1108 case USRQUOTA:
1109 if (inode->i_uid == iattr->ia_uid)
1110 continue;
1111 transfer_from[cnt] = dqget(inode->i_dev, (direction) ? iattr->ia_uid : inode->i_uid, cnt);
1112 transfer_to[cnt] = dqget(inode->i_dev, (direction) ? inode->i_uid : iattr->ia_uid, cnt);
1113 break;
1114 case GRPQUOTA:
1115 if (inode->i_gid == iattr->ia_gid)
1116 continue;
1117 transfer_from[cnt] = dqget(inode->i_dev, (direction) ? iattr->ia_gid : inode->i_gid, cnt);
1118 transfer_to[cnt] = dqget(inode->i_dev, (direction) ? inode->i_gid : iattr->ia_gid, cnt);
1119 break;
1122 if (check_idq(transfer_to[cnt], cnt, 1, initiator, tty) == NO_QUOTA ||
1123 check_bdq(transfer_to[cnt], cnt, blocks, initiator, tty, 0) == NO_QUOTA) {
1124 for (disc = 0; disc <= cnt; disc++) {
1125 dqput(transfer_from[disc]);
1126 dqput(transfer_to[disc]);
1128 return(NO_QUOTA);
1133 * Finally perform the needed transfer from transfer_from to transfer_to,
1134 * and release any pointers to dquots not needed anymore.
1136 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1138 * Skip changes for same uid or gid or for non-existing quota-type.
1140 if (transfer_from[cnt] == NODQUOT && transfer_to[cnt] == NODQUOT)
1141 continue;
1143 if (transfer_from[cnt] != NODQUOT) {
1144 dquot_decr_inodes(transfer_from[cnt], 1);
1145 dquot_decr_blocks(transfer_from[cnt], blocks);
1148 if (transfer_to[cnt] != NODQUOT) {
1149 dquot_incr_inodes(transfer_to[cnt], 1);
1150 dquot_incr_blocks(transfer_to[cnt], blocks);
1153 if (inode->i_dquot[cnt] != NODQUOT) {
1154 struct dquot *temp = inode->i_dquot[cnt];
1155 inode->i_dquot[cnt] = transfer_to[cnt];
1156 dqput(temp);
1157 dqput(transfer_from[cnt]);
1158 } else {
1159 dqput(transfer_from[cnt]);
1160 dqput(transfer_to[cnt]);
1164 return(QUOTA_OK);
1168 void __init dquot_init_hash(void)
1170 printk(KERN_NOTICE "VFS: Diskquotas version %s initialized\n", __DQUOT_VERSION__);
1172 dquot_cachep = kmem_cache_create("dquot", sizeof(struct dquot),
1173 sizeof(unsigned long) * 4,
1174 SLAB_HWCACHE_ALIGN, NULL, NULL);
1176 if (!dquot_cachep)
1177 panic("Cannot create dquot SLAB cache\n");
1179 memset(dquot_hash, 0, sizeof(dquot_hash));
1180 memset((caddr_t)&dqstats, 0, sizeof(dqstats));
1184 * Definitions of diskquota operations.
1186 struct dquot_operations dquot_operations = {
1187 dquot_initialize, /* mandatory */
1188 dquot_drop, /* mandatory */
1189 dquot_alloc_block,
1190 dquot_alloc_inode,
1191 dquot_free_block,
1192 dquot_free_inode,
1193 dquot_transfer
1196 static inline void set_enable_flags(struct vfsmount *vfsmnt, short type)
1198 switch (type) {
1199 case USRQUOTA:
1200 vfsmnt->mnt_dquot.flags |= DQUOT_USR_ENABLED;
1201 break;
1202 case GRPQUOTA:
1203 vfsmnt->mnt_dquot.flags |= DQUOT_GRP_ENABLED;
1204 break;
1208 static inline void reset_enable_flags(struct vfsmount *vfsmnt, short type)
1210 switch (type) {
1211 case USRQUOTA:
1212 vfsmnt->mnt_dquot.flags &= ~DQUOT_USR_ENABLED;
1213 break;
1214 case GRPQUOTA:
1215 vfsmnt->mnt_dquot.flags &= ~DQUOT_GRP_ENABLED;
1216 break;
1221 * Turn quota off on a device. type == -1 ==> quotaoff for all types (umount)
1223 int quota_off(kdev_t dev, short type)
1225 struct vfsmount *vfsmnt;
1226 struct file *filp;
1227 short cnt;
1229 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1230 if (type != -1 && cnt != type)
1231 continue;
1233 vfsmnt = lookup_vfsmnt(dev);
1234 if (!vfsmnt)
1235 goto out;
1236 if (!vfsmnt->mnt_sb)
1237 goto out;
1238 if (!is_enabled(vfsmnt, cnt))
1239 continue;
1240 reset_enable_flags(vfsmnt, cnt);
1242 /* Note: these are blocking operations */
1243 reset_dquot_ptrs(dev, cnt);
1244 invalidate_dquots(dev, cnt);
1246 filp = vfsmnt->mnt_dquot.files[cnt];
1247 vfsmnt->mnt_dquot.files[cnt] = (struct file *)NULL;
1248 vfsmnt->mnt_dquot.inode_expire[cnt] = 0;
1249 vfsmnt->mnt_dquot.block_expire[cnt] = 0;
1250 fput(filp);
1254 * Check whether any quota is still enabled,
1255 * and if not clear the dq_op pointer.
1257 vfsmnt = lookup_vfsmnt(dev);
1258 if (vfsmnt && vfsmnt->mnt_sb) {
1259 int enabled = 0;
1260 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
1261 enabled |= is_enabled(vfsmnt, cnt);
1262 if (!enabled)
1263 vfsmnt->mnt_sb->dq_op = NULL;
1266 out:
1267 return(0);
1270 int quota_on(kdev_t dev, short type, char *path)
1272 struct file *f;
1273 struct vfsmount *vfsmnt;
1274 struct inode *inode;
1275 struct dquot *dquot;
1276 struct quota_mount_options *mnt_dquot;
1277 char *tmp;
1278 int error;
1280 vfsmnt = lookup_vfsmnt(dev);
1281 if (vfsmnt == (struct vfsmount *)NULL)
1282 return -ENODEV;
1284 if (is_enabled(vfsmnt, type))
1285 return -EBUSY;
1286 mnt_dquot = &vfsmnt->mnt_dquot;
1288 tmp = getname(path);
1289 error = PTR_ERR(tmp);
1290 if (IS_ERR(tmp))
1291 return error;
1293 f = filp_open(tmp, O_RDWR, 0600);
1294 putname(tmp);
1295 if (IS_ERR(f))
1296 return PTR_ERR(f);
1298 /* sanity checks */
1299 error = -EIO;
1300 if (!f->f_op->read && !f->f_op->write)
1301 goto cleanup;
1302 inode = f->f_dentry->d_inode;
1303 error = -EACCES;
1304 if (!S_ISREG(inode->i_mode))
1305 goto cleanup;
1306 error = -EINVAL;
1307 if (inode->i_size == 0 || (inode->i_size % sizeof(struct dqblk)) != 0)
1308 goto cleanup;
1310 /* OK, there we go */
1311 set_enable_flags(vfsmnt, type);
1312 mnt_dquot->files[type] = f;
1314 dquot = dqget(dev, 0, type);
1315 mnt_dquot->inode_expire[type] = (dquot) ? dquot->dq_itime : MAX_IQ_TIME;
1316 mnt_dquot->block_expire[type] = (dquot) ? dquot->dq_btime : MAX_DQ_TIME;
1317 dqput(dquot);
1319 vfsmnt->mnt_sb->dq_op = &dquot_operations;
1320 add_dquot_ref(dev, type);
1322 return(0);
1324 cleanup:
1325 fput(f);
1326 return error;
1330 * This is the system call interface. This communicates with
1331 * the user-level programs. Currently this only supports diskquota
1332 * calls. Maybe we need to add the process quotas etc. in the future,
1333 * but we probably should use rlimits for that.
1335 asmlinkage int sys_quotactl(int cmd, const char *special, int id, caddr_t addr)
1337 int cmds = 0, type = 0, flags = 0;
1338 kdev_t dev;
1339 int ret = -EINVAL;
1341 lock_kernel();
1342 cmds = cmd >> SUBCMDSHIFT;
1343 type = cmd & SUBCMDMASK;
1345 if ((u_int) type >= MAXQUOTAS)
1346 goto out;
1347 ret = -EPERM;
1348 switch (cmds) {
1349 case Q_SYNC:
1350 case Q_GETSTATS:
1351 break;
1352 case Q_GETQUOTA:
1353 if (((type == USRQUOTA && current->euid != id) ||
1354 (type == GRPQUOTA && current->egid != id)) &&
1355 !capable(CAP_SYS_RESOURCE))
1356 goto out;
1357 break;
1358 default:
1359 if (!capable(CAP_SYS_RESOURCE))
1360 goto out;
1363 ret = -EINVAL;
1364 dev = 0;
1365 if (special != NULL || (cmds != Q_SYNC && cmds != Q_GETSTATS)) {
1366 mode_t mode;
1367 struct dentry * dentry;
1369 dentry = namei(special);
1370 if (IS_ERR(dentry))
1371 goto out;
1373 dev = dentry->d_inode->i_rdev;
1374 mode = dentry->d_inode->i_mode;
1375 dput(dentry);
1377 ret = -ENOTBLK;
1378 if (!S_ISBLK(mode))
1379 goto out;
1382 ret = -EINVAL;
1383 switch (cmds) {
1384 case Q_QUOTAON:
1385 ret = quota_on(dev, type, (char *) addr);
1386 goto out;
1387 case Q_QUOTAOFF:
1388 ret = quota_off(dev, type);
1389 goto out;
1390 case Q_GETQUOTA:
1391 ret = get_quota(dev, id, type, (struct dqblk *) addr);
1392 goto out;
1393 case Q_SETQUOTA:
1394 flags |= SET_QUOTA;
1395 break;
1396 case Q_SETUSE:
1397 flags |= SET_USE;
1398 break;
1399 case Q_SETQLIM:
1400 flags |= SET_QLIMIT;
1401 break;
1402 case Q_SYNC:
1403 ret = sync_dquots(dev, type);
1404 goto out;
1405 case Q_GETSTATS:
1406 ret = get_stats(addr);
1407 goto out;
1408 case Q_RSQUASH:
1409 ret = quota_root_squash(dev, type, (int *) addr);
1410 goto out;
1411 default:
1412 goto out;
1415 flags |= QUOTA_SYSCALL;
1417 ret = -ESRCH;
1418 if (dev_has_quota_enabled(dev, type))
1419 ret = set_dqblk(dev, id, type, flags, (struct dqblk *) addr);
1420 out:
1421 unlock_kernel();
1422 return ret;