2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 #include "xfs_trans.h"
25 #include "xfs_alloc.h"
26 #include "xfs_quota.h"
27 #include "xfs_mount.h"
28 #include "xfs_bmap_btree.h"
29 #include "xfs_ialloc_btree.h"
30 #include "xfs_dinode.h"
31 #include "xfs_inode.h"
32 #include "xfs_ialloc.h"
33 #include "xfs_itable.h"
34 #include "xfs_rtalloc.h"
35 #include "xfs_error.h"
38 #include "xfs_buf_item.h"
39 #include "xfs_trans_space.h"
40 #include "xfs_utils.h"
42 #include "xfs_trace.h"
43 #include "xfs_icache.h"
44 #include "xfs_cksum.h"
47 * The global quota manager. There is only one of these for the entire
48 * system, _not_ one per file system. XQM keeps track of the overall
49 * quota functionality, including maintaining the freelist and hash
52 STATIC
int xfs_qm_init_quotainos(xfs_mount_t
*);
53 STATIC
int xfs_qm_init_quotainfo(xfs_mount_t
*);
54 STATIC
int xfs_qm_shake(struct shrinker
*, struct shrink_control
*);
57 * We use the batch lookup interface to iterate over the dquots as it
58 * currently is the only interface into the radix tree code that allows
59 * fuzzy lookups instead of exact matches. Holding the lock over multiple
60 * operations is fine as all callers are used either during mount/umount
63 #define XFS_DQ_LOOKUP_BATCH 32
69 int (*execute
)(struct xfs_dquot
*dqp
, void *data
),
72 struct xfs_quotainfo
*qi
= mp
->m_quotainfo
;
73 struct radix_tree_root
*tree
= xfs_dquot_tree(qi
, type
);
85 struct xfs_dquot
*batch
[XFS_DQ_LOOKUP_BATCH
];
89 mutex_lock(&qi
->qi_tree_lock
);
90 nr_found
= radix_tree_gang_lookup(tree
, (void **)batch
,
91 next_index
, XFS_DQ_LOOKUP_BATCH
);
93 mutex_unlock(&qi
->qi_tree_lock
);
97 for (i
= 0; i
< nr_found
; i
++) {
98 struct xfs_dquot
*dqp
= batch
[i
];
100 next_index
= be32_to_cpu(dqp
->q_core
.d_id
) + 1;
102 error
= execute(batch
[i
], data
);
103 if (error
== EAGAIN
) {
107 if (error
&& last_error
!= EFSCORRUPTED
)
111 mutex_unlock(&qi
->qi_tree_lock
);
113 /* bail out if the filesystem is corrupted. */
114 if (last_error
== EFSCORRUPTED
) {
130 * Purge a dquot from all tracking data structures and free it.
134 struct xfs_dquot
*dqp
,
137 struct xfs_mount
*mp
= dqp
->q_mount
;
138 struct xfs_quotainfo
*qi
= mp
->m_quotainfo
;
139 struct xfs_dquot
*gdqp
= NULL
;
142 if ((dqp
->dq_flags
& XFS_DQ_FREEING
) || dqp
->q_nrefs
!= 0) {
148 * If this quota has a group hint attached, prepare for releasing it
151 gdqp
= dqp
->q_gdquot
;
154 dqp
->q_gdquot
= NULL
;
157 dqp
->dq_flags
|= XFS_DQ_FREEING
;
162 * If we are turning this type of quotas off, we don't care
163 * about the dirty metadata sitting in this dquot. OTOH, if
164 * we're unmounting, we do care, so we flush it and wait.
166 if (XFS_DQ_IS_DIRTY(dqp
)) {
167 struct xfs_buf
*bp
= NULL
;
171 * We don't care about getting disk errors here. We need
172 * to purge this dquot anyway, so we go ahead regardless.
174 error
= xfs_qm_dqflush(dqp
, &bp
);
176 xfs_warn(mp
, "%s: dquot %p flush failed",
179 error
= xfs_bwrite(bp
);
185 ASSERT(atomic_read(&dqp
->q_pincount
) == 0);
186 ASSERT(XFS_FORCED_SHUTDOWN(mp
) ||
187 !(dqp
->q_logitem
.qli_item
.li_flags
& XFS_LI_IN_AIL
));
192 radix_tree_delete(xfs_dquot_tree(qi
, dqp
->q_core
.d_flags
),
193 be32_to_cpu(dqp
->q_core
.d_id
));
197 * We move dquots to the freelist as soon as their reference count
198 * hits zero, so it really should be on the freelist here.
200 mutex_lock(&qi
->qi_lru_lock
);
201 ASSERT(!list_empty(&dqp
->q_lru
));
202 list_del_init(&dqp
->q_lru
);
204 XFS_STATS_DEC(xs_qm_dquot_unused
);
205 mutex_unlock(&qi
->qi_lru_lock
);
207 xfs_qm_dqdestroy(dqp
);
215 * Purge the dquot cache.
219 struct xfs_mount
*mp
,
222 if (flags
& XFS_QMOPT_UQUOTA
)
223 xfs_qm_dquot_walk(mp
, XFS_DQ_USER
, xfs_qm_dqpurge
, NULL
);
224 if (flags
& XFS_QMOPT_GQUOTA
)
225 xfs_qm_dquot_walk(mp
, XFS_DQ_GROUP
, xfs_qm_dqpurge
, NULL
);
226 if (flags
& XFS_QMOPT_PQUOTA
)
227 xfs_qm_dquot_walk(mp
, XFS_DQ_PROJ
, xfs_qm_dqpurge
, NULL
);
231 * Just destroy the quotainfo structure.
235 struct xfs_mount
*mp
)
237 if (mp
->m_quotainfo
) {
238 xfs_qm_dqpurge_all(mp
, XFS_QMOPT_QUOTALL
);
239 xfs_qm_destroy_quotainfo(mp
);
245 * This is called from xfs_mountfs to start quotas and initialize all
246 * necessary data structures like quotainfo. This is also responsible for
247 * running a quotacheck as necessary. We are guaranteed that the superblock
248 * is consistently read in at this point.
250 * If we fail here, the mount will continue with quota turned off. We don't
251 * need to inidicate success or failure at all.
261 * If quotas on realtime volumes is not supported, we disable
262 * quotas immediately.
264 if (mp
->m_sb
.sb_rextents
) {
265 xfs_notice(mp
, "Cannot turn on quotas for realtime filesystem");
270 ASSERT(XFS_IS_QUOTA_RUNNING(mp
));
273 * Allocate the quotainfo structure inside the mount struct, and
274 * create quotainode(s), and change/rev superblock if necessary.
276 error
= xfs_qm_init_quotainfo(mp
);
279 * We must turn off quotas.
281 ASSERT(mp
->m_quotainfo
== NULL
);
286 * If any of the quotas are not consistent, do a quotacheck.
288 if (XFS_QM_NEED_QUOTACHECK(mp
)) {
289 error
= xfs_qm_quotacheck(mp
);
291 /* Quotacheck failed and disabled quotas. */
296 * If one type of quotas is off, then it will lose its
297 * quotachecked status, since we won't be doing accounting for
300 if (!XFS_IS_UQUOTA_ON(mp
))
301 mp
->m_qflags
&= ~XFS_UQUOTA_CHKD
;
302 if (!XFS_IS_GQUOTA_ON(mp
))
303 mp
->m_qflags
&= ~XFS_GQUOTA_CHKD
;
304 if (!XFS_IS_PQUOTA_ON(mp
))
305 mp
->m_qflags
&= ~XFS_PQUOTA_CHKD
;
309 * We actually don't have to acquire the m_sb_lock at all.
310 * This can only be called from mount, and that's single threaded. XXX
312 spin_lock(&mp
->m_sb_lock
);
313 sbf
= mp
->m_sb
.sb_qflags
;
314 mp
->m_sb
.sb_qflags
= mp
->m_qflags
& XFS_MOUNT_QUOTA_ALL
;
315 spin_unlock(&mp
->m_sb_lock
);
317 if (sbf
!= (mp
->m_qflags
& XFS_MOUNT_QUOTA_ALL
)) {
318 if (xfs_qm_write_sb_changes(mp
, XFS_SB_QFLAGS
)) {
320 * We could only have been turning quotas off.
321 * We aren't in very good shape actually because
322 * the incore structures are convinced that quotas are
323 * off, but the on disk superblock doesn't know that !
325 ASSERT(!(XFS_IS_QUOTA_RUNNING(mp
)));
326 xfs_alert(mp
, "%s: Superblock update failed!",
332 xfs_warn(mp
, "Failed to initialize disk quotas.");
338 * Called from the vfsops layer.
341 xfs_qm_unmount_quotas(
345 * Release the dquots that root inode, et al might be holding,
346 * before we flush quotas and blow away the quotainfo structure.
348 ASSERT(mp
->m_rootip
);
349 xfs_qm_dqdetach(mp
->m_rootip
);
351 xfs_qm_dqdetach(mp
->m_rbmip
);
353 xfs_qm_dqdetach(mp
->m_rsumip
);
356 * Release the quota inodes.
358 if (mp
->m_quotainfo
) {
359 if (mp
->m_quotainfo
->qi_uquotaip
) {
360 IRELE(mp
->m_quotainfo
->qi_uquotaip
);
361 mp
->m_quotainfo
->qi_uquotaip
= NULL
;
363 if (mp
->m_quotainfo
->qi_gquotaip
) {
364 IRELE(mp
->m_quotainfo
->qi_gquotaip
);
365 mp
->m_quotainfo
->qi_gquotaip
= NULL
;
376 xfs_dquot_t
*udqhint
, /* hint */
377 xfs_dquot_t
**IO_idqpp
)
382 ASSERT(xfs_isilocked(ip
, XFS_ILOCK_EXCL
));
386 * See if we already have it in the inode itself. IO_idqpp is
387 * &i_udquot or &i_gdquot. This made the code look weird, but
388 * made the logic a lot simpler.
392 trace_xfs_dqattach_found(dqp
);
397 * udqhint is the i_udquot field in inode, and is non-NULL only
398 * when the type arg is group/project. Its purpose is to save a
399 * lookup by dqid (xfs_qm_dqget) by caching a group dquot inside
403 ASSERT(type
== XFS_DQ_GROUP
|| type
== XFS_DQ_PROJ
);
407 * No need to take dqlock to look at the id.
409 * The ID can't change until it gets reclaimed, and it won't
410 * be reclaimed as long as we have a ref from inode and we
413 dqp
= udqhint
->q_gdquot
;
414 if (dqp
&& be32_to_cpu(dqp
->q_core
.d_id
) == id
) {
415 ASSERT(*IO_idqpp
== NULL
);
417 *IO_idqpp
= xfs_qm_dqhold(dqp
);
418 xfs_dqunlock(udqhint
);
423 * We can't hold a dquot lock when we call the dqget code.
424 * We'll deadlock in no time, because of (not conforming to)
425 * lock ordering - the inodelock comes before any dquot lock,
426 * and we may drop and reacquire the ilock in xfs_qm_dqget().
428 xfs_dqunlock(udqhint
);
432 * Find the dquot from somewhere. This bumps the
433 * reference count of dquot and returns it locked.
434 * This can return ENOENT if dquot didn't exist on
435 * disk and we didn't ask it to allocate;
436 * ESRCH if quotas got turned off suddenly.
438 error
= xfs_qm_dqget(ip
->i_mount
, ip
, id
, type
,
439 doalloc
| XFS_QMOPT_DOWARN
, &dqp
);
443 trace_xfs_dqattach_get(dqp
);
446 * dqget may have dropped and re-acquired the ilock, but it guarantees
447 * that the dquot returned is the one that should go in the inode.
456 * Given a udquot and gdquot, attach a ptr to the group dquot in the
457 * udquot as a hint for future lookups.
460 xfs_qm_dqattach_grouphint(
473 udq
->q_gdquot
= NULL
;
477 udq
->q_gdquot
= xfs_qm_dqhold(gdq
);
483 xfs_qm_need_dqattach(
484 struct xfs_inode
*ip
)
486 struct xfs_mount
*mp
= ip
->i_mount
;
488 if (!XFS_IS_QUOTA_RUNNING(mp
))
490 if (!XFS_IS_QUOTA_ON(mp
))
492 if (!XFS_NOT_DQATTACHED(mp
, ip
))
494 if (xfs_is_quota_inode(&mp
->m_sb
, ip
->i_ino
))
500 * Given a locked inode, attach dquot(s) to it, taking U/G/P-QUOTAON
502 * If XFS_QMOPT_DQALLOC, the dquot(s) will be allocated if needed.
503 * Inode may get unlocked and relocked in here, and the caller must deal with
507 xfs_qm_dqattach_locked(
511 xfs_mount_t
*mp
= ip
->i_mount
;
515 if (!xfs_qm_need_dqattach(ip
))
518 ASSERT(xfs_isilocked(ip
, XFS_ILOCK_EXCL
));
520 if (XFS_IS_UQUOTA_ON(mp
)) {
521 error
= xfs_qm_dqattach_one(ip
, ip
->i_d
.di_uid
, XFS_DQ_USER
,
522 flags
& XFS_QMOPT_DQALLOC
,
523 NULL
, &ip
->i_udquot
);
529 ASSERT(xfs_isilocked(ip
, XFS_ILOCK_EXCL
));
530 if (XFS_IS_OQUOTA_ON(mp
)) {
531 error
= XFS_IS_GQUOTA_ON(mp
) ?
532 xfs_qm_dqattach_one(ip
, ip
->i_d
.di_gid
, XFS_DQ_GROUP
,
533 flags
& XFS_QMOPT_DQALLOC
,
534 ip
->i_udquot
, &ip
->i_gdquot
) :
535 xfs_qm_dqattach_one(ip
, xfs_get_projid(ip
), XFS_DQ_PROJ
,
536 flags
& XFS_QMOPT_DQALLOC
,
537 ip
->i_udquot
, &ip
->i_gdquot
);
539 * Don't worry about the udquot that we may have
540 * attached above. It'll get detached, if not already.
548 * Attach this group quota to the user quota as a hint.
549 * This WON'T, in general, result in a thrash.
552 ASSERT(xfs_isilocked(ip
, XFS_ILOCK_EXCL
));
553 ASSERT(ip
->i_udquot
);
554 ASSERT(ip
->i_gdquot
);
557 * We do not have i_udquot locked at this point, but this check
558 * is OK since we don't depend on the i_gdquot to be accurate
559 * 100% all the time. It is just a hint, and this will
560 * succeed in general.
562 if (ip
->i_udquot
->q_gdquot
!= ip
->i_gdquot
)
563 xfs_qm_dqattach_grouphint(ip
->i_udquot
, ip
->i_gdquot
);
569 if (XFS_IS_UQUOTA_ON(mp
))
570 ASSERT(ip
->i_udquot
);
571 if (XFS_IS_OQUOTA_ON(mp
))
572 ASSERT(ip
->i_gdquot
);
574 ASSERT(xfs_isilocked(ip
, XFS_ILOCK_EXCL
));
581 struct xfs_inode
*ip
,
586 if (!xfs_qm_need_dqattach(ip
))
589 xfs_ilock(ip
, XFS_ILOCK_EXCL
);
590 error
= xfs_qm_dqattach_locked(ip
, flags
);
591 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
597 * Release dquots (and their references) if any.
598 * The inode should be locked EXCL except when this's called by
605 if (!(ip
->i_udquot
|| ip
->i_gdquot
))
608 trace_xfs_dquot_dqdetach(ip
);
610 ASSERT(!xfs_is_quota_inode(&ip
->i_mount
->m_sb
, ip
->i_ino
));
612 xfs_qm_dqrele(ip
->i_udquot
);
616 xfs_qm_dqrele(ip
->i_gdquot
);
622 xfs_qm_calc_dquots_per_chunk(
623 struct xfs_mount
*mp
,
624 unsigned int nbblks
) /* basic block units */
626 unsigned int ndquots
;
629 ndquots
= BBTOB(nbblks
);
630 do_div(ndquots
, sizeof(xfs_dqblk_t
));
636 * This initializes all the quota information that's kept in the
640 xfs_qm_init_quotainfo(
643 xfs_quotainfo_t
*qinf
;
647 ASSERT(XFS_IS_QUOTA_RUNNING(mp
));
649 qinf
= mp
->m_quotainfo
= kmem_zalloc(sizeof(xfs_quotainfo_t
), KM_SLEEP
);
652 * See if quotainodes are setup, and if not, allocate them,
653 * and change the superblock accordingly.
655 if ((error
= xfs_qm_init_quotainos(mp
))) {
657 mp
->m_quotainfo
= NULL
;
661 INIT_RADIX_TREE(&qinf
->qi_uquota_tree
, GFP_NOFS
);
662 INIT_RADIX_TREE(&qinf
->qi_gquota_tree
, GFP_NOFS
);
663 mutex_init(&qinf
->qi_tree_lock
);
665 INIT_LIST_HEAD(&qinf
->qi_lru_list
);
666 qinf
->qi_lru_count
= 0;
667 mutex_init(&qinf
->qi_lru_lock
);
669 /* mutex used to serialize quotaoffs */
670 mutex_init(&qinf
->qi_quotaofflock
);
672 /* Precalc some constants */
673 qinf
->qi_dqchunklen
= XFS_FSB_TO_BB(mp
, XFS_DQUOT_CLUSTER_SIZE_FSB
);
674 qinf
->qi_dqperchunk
= xfs_qm_calc_dquots_per_chunk(mp
,
675 qinf
->qi_dqchunklen
);
677 mp
->m_qflags
|= (mp
->m_sb
.sb_qflags
& XFS_ALL_QUOTA_CHKD
);
680 * We try to get the limits from the superuser's limits fields.
681 * This is quite hacky, but it is standard quota practice.
683 * We look at the USR dquot with id == 0 first, but if user quotas
684 * are not enabled we goto the GRP dquot with id == 0.
685 * We don't really care to keep separate default limits for user
686 * and group quotas, at least not at this point.
688 * Since we may not have done a quotacheck by this point, just read
689 * the dquot without attaching it to any hashtables or lists.
691 error
= xfs_qm_dqread(mp
, 0,
692 XFS_IS_UQUOTA_RUNNING(mp
) ? XFS_DQ_USER
:
693 (XFS_IS_GQUOTA_RUNNING(mp
) ? XFS_DQ_GROUP
:
695 XFS_QMOPT_DOWARN
, &dqp
);
697 xfs_disk_dquot_t
*ddqp
= &dqp
->q_core
;
700 * The warnings and timers set the grace period given to
701 * a user or group before he or she can not perform any
702 * more writing. If it is zero, a default is used.
704 qinf
->qi_btimelimit
= ddqp
->d_btimer
?
705 be32_to_cpu(ddqp
->d_btimer
) : XFS_QM_BTIMELIMIT
;
706 qinf
->qi_itimelimit
= ddqp
->d_itimer
?
707 be32_to_cpu(ddqp
->d_itimer
) : XFS_QM_ITIMELIMIT
;
708 qinf
->qi_rtbtimelimit
= ddqp
->d_rtbtimer
?
709 be32_to_cpu(ddqp
->d_rtbtimer
) : XFS_QM_RTBTIMELIMIT
;
710 qinf
->qi_bwarnlimit
= ddqp
->d_bwarns
?
711 be16_to_cpu(ddqp
->d_bwarns
) : XFS_QM_BWARNLIMIT
;
712 qinf
->qi_iwarnlimit
= ddqp
->d_iwarns
?
713 be16_to_cpu(ddqp
->d_iwarns
) : XFS_QM_IWARNLIMIT
;
714 qinf
->qi_rtbwarnlimit
= ddqp
->d_rtbwarns
?
715 be16_to_cpu(ddqp
->d_rtbwarns
) : XFS_QM_RTBWARNLIMIT
;
716 qinf
->qi_bhardlimit
= be64_to_cpu(ddqp
->d_blk_hardlimit
);
717 qinf
->qi_bsoftlimit
= be64_to_cpu(ddqp
->d_blk_softlimit
);
718 qinf
->qi_ihardlimit
= be64_to_cpu(ddqp
->d_ino_hardlimit
);
719 qinf
->qi_isoftlimit
= be64_to_cpu(ddqp
->d_ino_softlimit
);
720 qinf
->qi_rtbhardlimit
= be64_to_cpu(ddqp
->d_rtb_hardlimit
);
721 qinf
->qi_rtbsoftlimit
= be64_to_cpu(ddqp
->d_rtb_softlimit
);
723 xfs_qm_dqdestroy(dqp
);
725 qinf
->qi_btimelimit
= XFS_QM_BTIMELIMIT
;
726 qinf
->qi_itimelimit
= XFS_QM_ITIMELIMIT
;
727 qinf
->qi_rtbtimelimit
= XFS_QM_RTBTIMELIMIT
;
728 qinf
->qi_bwarnlimit
= XFS_QM_BWARNLIMIT
;
729 qinf
->qi_iwarnlimit
= XFS_QM_IWARNLIMIT
;
730 qinf
->qi_rtbwarnlimit
= XFS_QM_RTBWARNLIMIT
;
733 qinf
->qi_shrinker
.shrink
= xfs_qm_shake
;
734 qinf
->qi_shrinker
.seeks
= DEFAULT_SEEKS
;
735 register_shrinker(&qinf
->qi_shrinker
);
741 * Gets called when unmounting a filesystem or when all quotas get
743 * This purges the quota inodes, destroys locks and frees itself.
746 xfs_qm_destroy_quotainfo(
751 qi
= mp
->m_quotainfo
;
754 unregister_shrinker(&qi
->qi_shrinker
);
756 if (qi
->qi_uquotaip
) {
757 IRELE(qi
->qi_uquotaip
);
758 qi
->qi_uquotaip
= NULL
; /* paranoia */
760 if (qi
->qi_gquotaip
) {
761 IRELE(qi
->qi_gquotaip
);
762 qi
->qi_gquotaip
= NULL
;
764 mutex_destroy(&qi
->qi_quotaofflock
);
766 mp
->m_quotainfo
= NULL
;
770 * Create an inode and return with a reference already taken, but unlocked
771 * This is how we create quota inodes
784 tp
= xfs_trans_alloc(mp
, XFS_TRANS_QM_QINOCREATE
);
785 if ((error
= xfs_trans_reserve(tp
,
786 XFS_QM_QINOCREATE_SPACE_RES(mp
),
787 XFS_CREATE_LOG_RES(mp
), 0,
788 XFS_TRANS_PERM_LOG_RES
,
789 XFS_CREATE_LOG_COUNT
))) {
790 xfs_trans_cancel(tp
, 0);
794 error
= xfs_dir_ialloc(&tp
, NULL
, S_IFREG
, 1, 0, 0, 1, ip
, &committed
);
796 xfs_trans_cancel(tp
, XFS_TRANS_RELEASE_LOG_RES
|
802 * Make the changes in the superblock, and log those too.
803 * sbfields arg may contain fields other than *QUOTINO;
804 * VERSIONNUM for example.
806 spin_lock(&mp
->m_sb_lock
);
807 if (flags
& XFS_QMOPT_SBVERSION
) {
808 ASSERT(!xfs_sb_version_hasquota(&mp
->m_sb
));
809 ASSERT((sbfields
& (XFS_SB_VERSIONNUM
| XFS_SB_UQUOTINO
|
810 XFS_SB_GQUOTINO
| XFS_SB_QFLAGS
)) ==
811 (XFS_SB_VERSIONNUM
| XFS_SB_UQUOTINO
|
812 XFS_SB_GQUOTINO
| XFS_SB_QFLAGS
));
814 xfs_sb_version_addquota(&mp
->m_sb
);
815 mp
->m_sb
.sb_uquotino
= NULLFSINO
;
816 mp
->m_sb
.sb_gquotino
= NULLFSINO
;
818 /* qflags will get updated _after_ quotacheck */
819 mp
->m_sb
.sb_qflags
= 0;
821 if (flags
& XFS_QMOPT_UQUOTA
)
822 mp
->m_sb
.sb_uquotino
= (*ip
)->i_ino
;
824 mp
->m_sb
.sb_gquotino
= (*ip
)->i_ino
;
825 spin_unlock(&mp
->m_sb_lock
);
826 xfs_mod_sb(tp
, sbfields
);
828 if ((error
= xfs_trans_commit(tp
, XFS_TRANS_RELEASE_LOG_RES
))) {
829 xfs_alert(mp
, "%s failed (error %d)!", __func__
, error
);
837 xfs_qm_reset_dqcounts(
843 struct xfs_dqblk
*dqb
;
846 trace_xfs_reset_dqcounts(bp
, _RET_IP_
);
849 * Reset all counters and timers. They'll be
850 * started afresh by xfs_qm_quotacheck.
853 j
= XFS_FSB_TO_B(mp
, XFS_DQUOT_CLUSTER_SIZE_FSB
);
854 do_div(j
, sizeof(xfs_dqblk_t
));
855 ASSERT(mp
->m_quotainfo
->qi_dqperchunk
== j
);
858 for (j
= 0; j
< mp
->m_quotainfo
->qi_dqperchunk
; j
++) {
859 struct xfs_disk_dquot
*ddq
;
861 ddq
= (struct xfs_disk_dquot
*)&dqb
[j
];
864 * Do a sanity check, and if needed, repair the dqblk. Don't
865 * output any warnings because it's perfectly possible to
866 * find uninitialised dquot blks. See comment in xfs_qm_dqcheck.
868 (void) xfs_qm_dqcheck(mp
, ddq
, id
+j
, type
, XFS_QMOPT_DQREPAIR
,
880 if (xfs_sb_version_hascrc(&mp
->m_sb
)) {
881 xfs_update_cksum((char *)&dqb
[j
],
882 sizeof(struct xfs_dqblk
),
890 struct xfs_mount
*mp
,
893 xfs_filblks_t blkcnt
,
895 struct list_head
*buffer_list
)
902 type
= flags
& XFS_QMOPT_UQUOTA
? XFS_DQ_USER
:
903 (flags
& XFS_QMOPT_PQUOTA
? XFS_DQ_PROJ
: XFS_DQ_GROUP
);
907 * Blkcnt arg can be a very big number, and might even be
908 * larger than the log itself. So, we have to break it up into
909 * manageable-sized transactions.
910 * Note that we don't start a permanent transaction here; we might
911 * not be able to get a log reservation for the whole thing up front,
912 * and we don't really care to either, because we just discard
913 * everything if we were to crash in the middle of this loop.
916 error
= xfs_trans_read_buf(mp
, NULL
, mp
->m_ddev_targp
,
917 XFS_FSB_TO_DADDR(mp
, bno
),
918 mp
->m_quotainfo
->qi_dqchunklen
, 0, &bp
,
922 * CRC and validation errors will return a EFSCORRUPTED here. If
923 * this occurs, re-read without CRC validation so that we can
924 * repair the damage via xfs_qm_reset_dqcounts(). This process
925 * will leave a trace in the log indicating corruption has
928 if (error
== EFSCORRUPTED
) {
929 error
= xfs_trans_read_buf(mp
, NULL
, mp
->m_ddev_targp
,
930 XFS_FSB_TO_DADDR(mp
, bno
),
931 mp
->m_quotainfo
->qi_dqchunklen
, 0, &bp
,
938 xfs_qm_reset_dqcounts(mp
, bp
, firstid
, type
);
939 xfs_buf_delwri_queue(bp
, buffer_list
);
942 /* goto the next block. */
944 firstid
+= mp
->m_quotainfo
->qi_dqperchunk
;
951 * Iterate over all allocated USR/GRP/PRJ dquots in the system, calling a
952 * caller supplied function for every chunk of dquots that we find.
956 struct xfs_mount
*mp
,
957 struct xfs_inode
*qip
,
959 struct list_head
*buffer_list
)
961 struct xfs_bmbt_irec
*map
;
962 int i
, nmaps
; /* number of map entries */
963 int error
; /* return value */
964 xfs_fileoff_t lblkno
;
965 xfs_filblks_t maxlblkcnt
;
967 xfs_fsblock_t rablkno
;
968 xfs_filblks_t rablkcnt
;
972 * This looks racy, but we can't keep an inode lock across a
973 * trans_reserve. But, this gets called during quotacheck, and that
974 * happens only at mount time which is single threaded.
976 if (qip
->i_d
.di_nblocks
== 0)
979 map
= kmem_alloc(XFS_DQITER_MAP_SIZE
* sizeof(*map
), KM_SLEEP
);
982 maxlblkcnt
= XFS_B_TO_FSB(mp
, mp
->m_super
->s_maxbytes
);
984 nmaps
= XFS_DQITER_MAP_SIZE
;
986 * We aren't changing the inode itself. Just changing
987 * some of its data. No new blocks are added here, and
988 * the inode is never added to the transaction.
990 xfs_ilock(qip
, XFS_ILOCK_SHARED
);
991 error
= xfs_bmapi_read(qip
, lblkno
, maxlblkcnt
- lblkno
,
993 xfs_iunlock(qip
, XFS_ILOCK_SHARED
);
997 ASSERT(nmaps
<= XFS_DQITER_MAP_SIZE
);
998 for (i
= 0; i
< nmaps
; i
++) {
999 ASSERT(map
[i
].br_startblock
!= DELAYSTARTBLOCK
);
1000 ASSERT(map
[i
].br_blockcount
);
1003 lblkno
+= map
[i
].br_blockcount
;
1005 if (map
[i
].br_startblock
== HOLESTARTBLOCK
)
1008 firstid
= (xfs_dqid_t
) map
[i
].br_startoff
*
1009 mp
->m_quotainfo
->qi_dqperchunk
;
1011 * Do a read-ahead on the next extent.
1013 if ((i
+1 < nmaps
) &&
1014 (map
[i
+1].br_startblock
!= HOLESTARTBLOCK
)) {
1015 rablkcnt
= map
[i
+1].br_blockcount
;
1016 rablkno
= map
[i
+1].br_startblock
;
1017 while (rablkcnt
--) {
1018 xfs_buf_readahead(mp
->m_ddev_targp
,
1019 XFS_FSB_TO_DADDR(mp
, rablkno
),
1020 mp
->m_quotainfo
->qi_dqchunklen
,
1026 * Iterate thru all the blks in the extent and
1027 * reset the counters of all the dquots inside them.
1029 error
= xfs_qm_dqiter_bufs(mp
, firstid
,
1030 map
[i
].br_startblock
,
1031 map
[i
].br_blockcount
,
1032 flags
, buffer_list
);
1036 } while (nmaps
> 0);
1044 * Called by dqusage_adjust in doing a quotacheck.
1046 * Given the inode, and a dquot id this updates both the incore dqout as well
1047 * as the buffer copy. This is so that once the quotacheck is done, we can
1048 * just log all the buffers, as opposed to logging numerous updates to
1049 * individual dquots.
1052 xfs_qm_quotacheck_dqadjust(
1053 struct xfs_inode
*ip
,
1059 struct xfs_mount
*mp
= ip
->i_mount
;
1060 struct xfs_dquot
*dqp
;
1063 error
= xfs_qm_dqget(mp
, ip
, id
, type
,
1064 XFS_QMOPT_DQALLOC
| XFS_QMOPT_DOWARN
, &dqp
);
1067 * Shouldn't be able to turn off quotas here.
1069 ASSERT(error
!= ESRCH
);
1070 ASSERT(error
!= ENOENT
);
1074 trace_xfs_dqadjust(dqp
);
1077 * Adjust the inode count and the block count to reflect this inode's
1080 be64_add_cpu(&dqp
->q_core
.d_icount
, 1);
1081 dqp
->q_res_icount
++;
1083 be64_add_cpu(&dqp
->q_core
.d_bcount
, nblks
);
1084 dqp
->q_res_bcount
+= nblks
;
1087 be64_add_cpu(&dqp
->q_core
.d_rtbcount
, rtblks
);
1088 dqp
->q_res_rtbcount
+= rtblks
;
1092 * Set default limits, adjust timers (since we changed usages)
1094 * There are no timers for the default values set in the root dquot.
1096 if (dqp
->q_core
.d_id
) {
1097 xfs_qm_adjust_dqlimits(mp
, dqp
);
1098 xfs_qm_adjust_dqtimers(mp
, &dqp
->q_core
);
1101 dqp
->dq_flags
|= XFS_DQ_DIRTY
;
1109 xfs_qcnt_t
*O_rtblks
)
1111 xfs_filblks_t rtblks
; /* total rt blks */
1112 xfs_extnum_t idx
; /* extent record index */
1113 xfs_ifork_t
*ifp
; /* inode fork pointer */
1114 xfs_extnum_t nextents
; /* number of extent entries */
1117 ASSERT(XFS_IS_REALTIME_INODE(ip
));
1118 ifp
= XFS_IFORK_PTR(ip
, XFS_DATA_FORK
);
1119 if (!(ifp
->if_flags
& XFS_IFEXTENTS
)) {
1120 if ((error
= xfs_iread_extents(NULL
, ip
, XFS_DATA_FORK
)))
1124 nextents
= ifp
->if_bytes
/ (uint
)sizeof(xfs_bmbt_rec_t
);
1125 for (idx
= 0; idx
< nextents
; idx
++)
1126 rtblks
+= xfs_bmbt_get_blockcount(xfs_iext_get_ext(ifp
, idx
));
1127 *O_rtblks
= (xfs_qcnt_t
)rtblks
;
1132 * callback routine supplied to bulkstat(). Given an inumber, find its
1133 * dquots and update them to account for resources taken by that inode.
1137 xfs_qm_dqusage_adjust(
1138 xfs_mount_t
*mp
, /* mount point for filesystem */
1139 xfs_ino_t ino
, /* inode number to get data for */
1140 void __user
*buffer
, /* not used */
1141 int ubsize
, /* not used */
1142 int *ubused
, /* not used */
1143 int *res
) /* result code value */
1146 xfs_qcnt_t nblks
, rtblks
= 0;
1149 ASSERT(XFS_IS_QUOTA_RUNNING(mp
));
1152 * rootino must have its resources accounted for, not so with the quota
1155 if (xfs_is_quota_inode(&mp
->m_sb
, ino
)) {
1156 *res
= BULKSTAT_RV_NOTHING
;
1157 return XFS_ERROR(EINVAL
);
1161 * We don't _need_ to take the ilock EXCL. However, the xfs_qm_dqget
1162 * interface expects the inode to be exclusively locked because that's
1163 * the case in all other instances. It's OK that we do this because
1164 * quotacheck is done only at mount time.
1166 error
= xfs_iget(mp
, NULL
, ino
, 0, XFS_ILOCK_EXCL
, &ip
);
1168 *res
= BULKSTAT_RV_NOTHING
;
1172 ASSERT(ip
->i_delayed_blks
== 0);
1174 if (XFS_IS_REALTIME_INODE(ip
)) {
1176 * Walk thru the extent list and count the realtime blocks.
1178 error
= xfs_qm_get_rtblks(ip
, &rtblks
);
1183 nblks
= (xfs_qcnt_t
)ip
->i_d
.di_nblocks
- rtblks
;
1186 * Add the (disk blocks and inode) resources occupied by this
1187 * inode to its dquots. We do this adjustment in the incore dquot,
1188 * and also copy the changes to its buffer.
1189 * We don't care about putting these changes in a transaction
1190 * envelope because if we crash in the middle of a 'quotacheck'
1191 * we have to start from the beginning anyway.
1192 * Once we're done, we'll log all the dquot bufs.
1194 * The *QUOTA_ON checks below may look pretty racy, but quotachecks
1195 * and quotaoffs don't race. (Quotachecks happen at mount time only).
1197 if (XFS_IS_UQUOTA_ON(mp
)) {
1198 error
= xfs_qm_quotacheck_dqadjust(ip
, ip
->i_d
.di_uid
,
1199 XFS_DQ_USER
, nblks
, rtblks
);
1204 if (XFS_IS_GQUOTA_ON(mp
)) {
1205 error
= xfs_qm_quotacheck_dqadjust(ip
, ip
->i_d
.di_gid
,
1206 XFS_DQ_GROUP
, nblks
, rtblks
);
1211 if (XFS_IS_PQUOTA_ON(mp
)) {
1212 error
= xfs_qm_quotacheck_dqadjust(ip
, xfs_get_projid(ip
),
1213 XFS_DQ_PROJ
, nblks
, rtblks
);
1218 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
1220 *res
= BULKSTAT_RV_DIDONE
;
1224 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
1226 *res
= BULKSTAT_RV_GIVEUP
;
1232 struct xfs_dquot
*dqp
,
1235 struct list_head
*buffer_list
= data
;
1236 struct xfs_buf
*bp
= NULL
;
1240 if (dqp
->dq_flags
& XFS_DQ_FREEING
)
1242 if (!XFS_DQ_IS_DIRTY(dqp
))
1246 error
= xfs_qm_dqflush(dqp
, &bp
);
1250 xfs_buf_delwri_queue(bp
, buffer_list
);
1258 * Walk thru all the filesystem inodes and construct a consistent view
1259 * of the disk quota world. If the quotacheck fails, disable quotas.
1265 int done
, count
, error
, error2
;
1269 LIST_HEAD (buffer_list
);
1270 struct xfs_inode
*uip
= mp
->m_quotainfo
->qi_uquotaip
;
1271 struct xfs_inode
*gip
= mp
->m_quotainfo
->qi_gquotaip
;
1279 ASSERT(XFS_IS_QUOTA_RUNNING(mp
));
1281 xfs_notice(mp
, "Quotacheck needed: Please wait.");
1284 * First we go thru all the dquots on disk, USR and GRP/PRJ, and reset
1285 * their counters to zero. We need a clean slate.
1286 * We don't log our changes till later.
1289 error
= xfs_qm_dqiterate(mp
, uip
, XFS_QMOPT_UQUOTA
,
1293 flags
|= XFS_UQUOTA_CHKD
;
1297 error
= xfs_qm_dqiterate(mp
, gip
, XFS_IS_GQUOTA_ON(mp
) ?
1298 XFS_QMOPT_GQUOTA
: XFS_QMOPT_PQUOTA
,
1302 flags
|= XFS_IS_GQUOTA_ON(mp
) ?
1303 XFS_GQUOTA_CHKD
: XFS_PQUOTA_CHKD
;
1308 * Iterate thru all the inodes in the file system,
1309 * adjusting the corresponding dquot counters in core.
1311 error
= xfs_bulkstat(mp
, &lastino
, &count
,
1312 xfs_qm_dqusage_adjust
,
1313 structsz
, NULL
, &done
);
1320 * We've made all the changes that we need to make incore. Flush them
1321 * down to disk buffers if everything was updated successfully.
1323 if (XFS_IS_UQUOTA_ON(mp
)) {
1324 error
= xfs_qm_dquot_walk(mp
, XFS_DQ_USER
, xfs_qm_flush_one
,
1327 if (XFS_IS_GQUOTA_ON(mp
)) {
1328 error2
= xfs_qm_dquot_walk(mp
, XFS_DQ_GROUP
, xfs_qm_flush_one
,
1333 if (XFS_IS_PQUOTA_ON(mp
)) {
1334 error2
= xfs_qm_dquot_walk(mp
, XFS_DQ_PROJ
, xfs_qm_flush_one
,
1340 error2
= xfs_buf_delwri_submit(&buffer_list
);
1345 * We can get this error if we couldn't do a dquot allocation inside
1346 * xfs_qm_dqusage_adjust (via bulkstat). We don't care about the
1347 * dirty dquots that might be cached, we just want to get rid of them
1348 * and turn quotaoff. The dquots won't be attached to any of the inodes
1349 * at this point (because we intentionally didn't in dqget_noattach).
1352 xfs_qm_dqpurge_all(mp
, XFS_QMOPT_QUOTALL
);
1357 * If one type of quotas is off, then it will lose its
1358 * quotachecked status, since we won't be doing accounting for
1359 * that type anymore.
1361 mp
->m_qflags
&= ~XFS_ALL_QUOTA_CHKD
;
1362 mp
->m_qflags
|= flags
;
1365 while (!list_empty(&buffer_list
)) {
1366 struct xfs_buf
*bp
=
1367 list_first_entry(&buffer_list
, struct xfs_buf
, b_list
);
1368 list_del_init(&bp
->b_list
);
1374 "Quotacheck: Unsuccessful (Error %d): Disabling quotas.",
1377 * We must turn off quotas.
1379 ASSERT(mp
->m_quotainfo
!= NULL
);
1380 xfs_qm_destroy_quotainfo(mp
);
1381 if (xfs_mount_reset_sbqflags(mp
)) {
1383 "Quotacheck: Failed to reset quota flags.");
1386 xfs_notice(mp
, "Quotacheck: Done.");
1391 * This is called after the superblock has been read in and we're ready to
1392 * iget the quota inodes.
1395 xfs_qm_init_quotainos(
1398 struct xfs_inode
*uip
= NULL
;
1399 struct xfs_inode
*gip
= NULL
;
1401 __int64_t sbflags
= 0;
1404 ASSERT(mp
->m_quotainfo
);
1407 * Get the uquota and gquota inodes
1409 if (xfs_sb_version_hasquota(&mp
->m_sb
)) {
1410 if (XFS_IS_UQUOTA_ON(mp
) &&
1411 mp
->m_sb
.sb_uquotino
!= NULLFSINO
) {
1412 ASSERT(mp
->m_sb
.sb_uquotino
> 0);
1413 error
= xfs_iget(mp
, NULL
, mp
->m_sb
.sb_uquotino
,
1416 return XFS_ERROR(error
);
1418 if (XFS_IS_OQUOTA_ON(mp
) &&
1419 mp
->m_sb
.sb_gquotino
!= NULLFSINO
) {
1420 ASSERT(mp
->m_sb
.sb_gquotino
> 0);
1421 error
= xfs_iget(mp
, NULL
, mp
->m_sb
.sb_gquotino
,
1427 flags
|= XFS_QMOPT_SBVERSION
;
1428 sbflags
|= (XFS_SB_VERSIONNUM
| XFS_SB_UQUOTINO
|
1429 XFS_SB_GQUOTINO
| XFS_SB_QFLAGS
);
1433 * Create the two inodes, if they don't exist already. The changes
1434 * made above will get added to a transaction and logged in one of
1435 * the qino_alloc calls below. If the device is readonly,
1436 * temporarily switch to read-write to do this.
1438 if (XFS_IS_UQUOTA_ON(mp
) && uip
== NULL
) {
1439 error
= xfs_qm_qino_alloc(mp
, &uip
,
1440 sbflags
| XFS_SB_UQUOTINO
,
1441 flags
| XFS_QMOPT_UQUOTA
);
1445 flags
&= ~XFS_QMOPT_SBVERSION
;
1447 if (XFS_IS_OQUOTA_ON(mp
) && gip
== NULL
) {
1448 flags
|= (XFS_IS_GQUOTA_ON(mp
) ?
1449 XFS_QMOPT_GQUOTA
: XFS_QMOPT_PQUOTA
);
1450 error
= xfs_qm_qino_alloc(mp
, &gip
,
1451 sbflags
| XFS_SB_GQUOTINO
, flags
);
1456 mp
->m_quotainfo
->qi_uquotaip
= uip
;
1457 mp
->m_quotainfo
->qi_gquotaip
= gip
;
1466 return XFS_ERROR(error
);
1471 struct xfs_dquot
*dqp
)
1473 struct xfs_mount
*mp
= dqp
->q_mount
;
1474 struct xfs_quotainfo
*qi
= mp
->m_quotainfo
;
1476 mutex_lock(&qi
->qi_tree_lock
);
1477 radix_tree_delete(xfs_dquot_tree(qi
, dqp
->q_core
.d_flags
),
1478 be32_to_cpu(dqp
->q_core
.d_id
));
1481 mutex_unlock(&qi
->qi_tree_lock
);
1483 xfs_qm_dqdestroy(dqp
);
1487 xfs_qm_dqreclaim_one(
1488 struct xfs_dquot
*dqp
,
1489 struct list_head
*buffer_list
,
1490 struct list_head
*dispose_list
)
1492 struct xfs_mount
*mp
= dqp
->q_mount
;
1493 struct xfs_quotainfo
*qi
= mp
->m_quotainfo
;
1496 if (!xfs_dqlock_nowait(dqp
))
1500 * This dquot has acquired a reference in the meantime remove it from
1501 * the freelist and try again.
1506 trace_xfs_dqreclaim_want(dqp
);
1507 XFS_STATS_INC(xs_qm_dqwants
);
1509 list_del_init(&dqp
->q_lru
);
1511 XFS_STATS_DEC(xs_qm_dquot_unused
);
1516 * Try to grab the flush lock. If this dquot is in the process of
1517 * getting flushed to disk, we don't want to reclaim it.
1519 if (!xfs_dqflock_nowait(dqp
))
1520 goto out_unlock_move_tail
;
1522 if (XFS_DQ_IS_DIRTY(dqp
)) {
1523 struct xfs_buf
*bp
= NULL
;
1525 trace_xfs_dqreclaim_dirty(dqp
);
1527 error
= xfs_qm_dqflush(dqp
, &bp
);
1529 xfs_warn(mp
, "%s: dquot %p flush failed",
1531 goto out_unlock_move_tail
;
1534 xfs_buf_delwri_queue(bp
, buffer_list
);
1537 * Give the dquot another try on the freelist, as the
1538 * flushing will take some time.
1540 goto out_unlock_move_tail
;
1545 * Prevent lookups now that we are past the point of no return.
1547 dqp
->dq_flags
|= XFS_DQ_FREEING
;
1550 ASSERT(dqp
->q_nrefs
== 0);
1551 list_move_tail(&dqp
->q_lru
, dispose_list
);
1553 XFS_STATS_DEC(xs_qm_dquot_unused
);
1555 trace_xfs_dqreclaim_done(dqp
);
1556 XFS_STATS_INC(xs_qm_dqreclaims
);
1560 * Move the dquot to the tail of the list so that we don't spin on it.
1562 out_unlock_move_tail
:
1565 list_move_tail(&dqp
->q_lru
, &qi
->qi_lru_list
);
1566 trace_xfs_dqreclaim_busy(dqp
);
1567 XFS_STATS_INC(xs_qm_dqreclaim_misses
);
1572 struct shrinker
*shrink
,
1573 struct shrink_control
*sc
)
1575 struct xfs_quotainfo
*qi
=
1576 container_of(shrink
, struct xfs_quotainfo
, qi_shrinker
);
1577 int nr_to_scan
= sc
->nr_to_scan
;
1578 LIST_HEAD (buffer_list
);
1579 LIST_HEAD (dispose_list
);
1580 struct xfs_dquot
*dqp
;
1583 if ((sc
->gfp_mask
& (__GFP_FS
|__GFP_WAIT
)) != (__GFP_FS
|__GFP_WAIT
))
1588 mutex_lock(&qi
->qi_lru_lock
);
1589 while (!list_empty(&qi
->qi_lru_list
)) {
1590 if (nr_to_scan
-- <= 0)
1592 dqp
= list_first_entry(&qi
->qi_lru_list
, struct xfs_dquot
,
1594 xfs_qm_dqreclaim_one(dqp
, &buffer_list
, &dispose_list
);
1596 mutex_unlock(&qi
->qi_lru_lock
);
1598 error
= xfs_buf_delwri_submit(&buffer_list
);
1600 xfs_warn(NULL
, "%s: dquot reclaim failed", __func__
);
1602 while (!list_empty(&dispose_list
)) {
1603 dqp
= list_first_entry(&dispose_list
, struct xfs_dquot
, q_lru
);
1604 list_del_init(&dqp
->q_lru
);
1605 xfs_qm_dqfree_one(dqp
);
1609 return (qi
->qi_lru_count
/ 100) * sysctl_vfs_cache_pressure
;
1613 * Start a transaction and write the incore superblock changes to
1614 * disk. flags parameter indicates which fields have changed.
1617 xfs_qm_write_sb_changes(
1624 tp
= xfs_trans_alloc(mp
, XFS_TRANS_QM_SBCHANGE
);
1625 error
= xfs_trans_reserve(tp
, 0, XFS_QM_SBCHANGE_LOG_RES(mp
),
1626 0, 0, XFS_DEFAULT_LOG_COUNT
);
1628 xfs_trans_cancel(tp
, 0);
1632 xfs_mod_sb(tp
, flags
);
1633 error
= xfs_trans_commit(tp
, 0);
1639 /* --------------- utility functions for vnodeops ---------------- */
1643 * Given an inode, a uid, gid and prid make sure that we have
1644 * allocated relevant dquot(s) on disk, and that we won't exceed inode
1645 * quotas by creating this file.
1646 * This also attaches dquot(s) to the given inode after locking it,
1647 * and returns the dquots corresponding to the uid and/or gid.
1649 * in : inode (unlocked)
1650 * out : udquot, gdquot with references taken and unlocked
1654 struct xfs_inode
*ip
,
1659 struct xfs_dquot
**O_udqpp
,
1660 struct xfs_dquot
**O_gdqpp
)
1662 struct xfs_mount
*mp
= ip
->i_mount
;
1663 struct xfs_dquot
*uq
= NULL
;
1664 struct xfs_dquot
*gq
= NULL
;
1668 if (!XFS_IS_QUOTA_RUNNING(mp
) || !XFS_IS_QUOTA_ON(mp
))
1671 lockflags
= XFS_ILOCK_EXCL
;
1672 xfs_ilock(ip
, lockflags
);
1674 if ((flags
& XFS_QMOPT_INHERIT
) && XFS_INHERIT_GID(ip
))
1675 gid
= ip
->i_d
.di_gid
;
1678 * Attach the dquot(s) to this inode, doing a dquot allocation
1679 * if necessary. The dquot(s) will not be locked.
1681 if (XFS_NOT_DQATTACHED(mp
, ip
)) {
1682 error
= xfs_qm_dqattach_locked(ip
, XFS_QMOPT_DQALLOC
);
1684 xfs_iunlock(ip
, lockflags
);
1689 if ((flags
& XFS_QMOPT_UQUOTA
) && XFS_IS_UQUOTA_ON(mp
)) {
1690 if (ip
->i_d
.di_uid
!= uid
) {
1692 * What we need is the dquot that has this uid, and
1693 * if we send the inode to dqget, the uid of the inode
1694 * takes priority over what's sent in the uid argument.
1695 * We must unlock inode here before calling dqget if
1696 * we're not sending the inode, because otherwise
1697 * we'll deadlock by doing trans_reserve while
1700 xfs_iunlock(ip
, lockflags
);
1701 error
= xfs_qm_dqget(mp
, NULL
, (xfs_dqid_t
) uid
,
1707 ASSERT(error
!= ENOENT
);
1711 * Get the ilock in the right order.
1714 lockflags
= XFS_ILOCK_SHARED
;
1715 xfs_ilock(ip
, lockflags
);
1718 * Take an extra reference, because we'll return
1721 ASSERT(ip
->i_udquot
);
1722 uq
= xfs_qm_dqhold(ip
->i_udquot
);
1725 if ((flags
& XFS_QMOPT_GQUOTA
) && XFS_IS_GQUOTA_ON(mp
)) {
1726 if (ip
->i_d
.di_gid
!= gid
) {
1727 xfs_iunlock(ip
, lockflags
);
1728 error
= xfs_qm_dqget(mp
, NULL
, (xfs_dqid_t
)gid
,
1734 ASSERT(error
!= ENOENT
);
1738 lockflags
= XFS_ILOCK_SHARED
;
1739 xfs_ilock(ip
, lockflags
);
1741 ASSERT(ip
->i_gdquot
);
1742 gq
= xfs_qm_dqhold(ip
->i_gdquot
);
1744 } else if ((flags
& XFS_QMOPT_PQUOTA
) && XFS_IS_PQUOTA_ON(mp
)) {
1745 if (xfs_get_projid(ip
) != prid
) {
1746 xfs_iunlock(ip
, lockflags
);
1747 error
= xfs_qm_dqget(mp
, NULL
, (xfs_dqid_t
)prid
,
1753 ASSERT(error
!= ENOENT
);
1757 lockflags
= XFS_ILOCK_SHARED
;
1758 xfs_ilock(ip
, lockflags
);
1760 ASSERT(ip
->i_gdquot
);
1761 gq
= xfs_qm_dqhold(ip
->i_gdquot
);
1765 trace_xfs_dquot_dqalloc(ip
);
1767 xfs_iunlock(ip
, lockflags
);
1785 * Actually transfer ownership, and do dquot modifications.
1786 * These were already reserved.
1792 xfs_dquot_t
**IO_olddq
,
1795 xfs_dquot_t
*prevdq
;
1796 uint bfield
= XFS_IS_REALTIME_INODE(ip
) ?
1797 XFS_TRANS_DQ_RTBCOUNT
: XFS_TRANS_DQ_BCOUNT
;
1800 ASSERT(xfs_isilocked(ip
, XFS_ILOCK_EXCL
));
1801 ASSERT(XFS_IS_QUOTA_RUNNING(ip
->i_mount
));
1806 ASSERT(prevdq
!= newdq
);
1808 xfs_trans_mod_dquot(tp
, prevdq
, bfield
, -(ip
->i_d
.di_nblocks
));
1809 xfs_trans_mod_dquot(tp
, prevdq
, XFS_TRANS_DQ_ICOUNT
, -1);
1811 /* the sparkling new dquot */
1812 xfs_trans_mod_dquot(tp
, newdq
, bfield
, ip
->i_d
.di_nblocks
);
1813 xfs_trans_mod_dquot(tp
, newdq
, XFS_TRANS_DQ_ICOUNT
, 1);
1816 * Take an extra reference, because the inode is going to keep
1817 * this dquot pointer even after the trans_commit.
1819 *IO_olddq
= xfs_qm_dqhold(newdq
);
1825 * Quota reservations for setattr(AT_UID|AT_GID|AT_PROJID).
1828 xfs_qm_vop_chown_reserve(
1829 struct xfs_trans
*tp
,
1830 struct xfs_inode
*ip
,
1831 struct xfs_dquot
*udqp
,
1832 struct xfs_dquot
*gdqp
,
1835 struct xfs_mount
*mp
= ip
->i_mount
;
1836 uint delblks
, blkflags
, prjflags
= 0;
1837 struct xfs_dquot
*udq_unres
= NULL
;
1838 struct xfs_dquot
*gdq_unres
= NULL
;
1839 struct xfs_dquot
*udq_delblks
= NULL
;
1840 struct xfs_dquot
*gdq_delblks
= NULL
;
1844 ASSERT(xfs_isilocked(ip
, XFS_ILOCK_EXCL
|XFS_ILOCK_SHARED
));
1845 ASSERT(XFS_IS_QUOTA_RUNNING(mp
));
1847 delblks
= ip
->i_delayed_blks
;
1848 blkflags
= XFS_IS_REALTIME_INODE(ip
) ?
1849 XFS_QMOPT_RES_RTBLKS
: XFS_QMOPT_RES_REGBLKS
;
1851 if (XFS_IS_UQUOTA_ON(mp
) && udqp
&&
1852 ip
->i_d
.di_uid
!= (uid_t
)be32_to_cpu(udqp
->q_core
.d_id
)) {
1855 * If there are delayed allocation blocks, then we have to
1856 * unreserve those from the old dquot, and add them to the
1860 ASSERT(ip
->i_udquot
);
1861 udq_unres
= ip
->i_udquot
;
1864 if (XFS_IS_OQUOTA_ON(ip
->i_mount
) && gdqp
) {
1865 if (XFS_IS_PQUOTA_ON(ip
->i_mount
) &&
1866 xfs_get_projid(ip
) != be32_to_cpu(gdqp
->q_core
.d_id
))
1867 prjflags
= XFS_QMOPT_ENOSPC
;
1870 (XFS_IS_GQUOTA_ON(ip
->i_mount
) &&
1871 ip
->i_d
.di_gid
!= be32_to_cpu(gdqp
->q_core
.d_id
))) {
1874 ASSERT(ip
->i_gdquot
);
1875 gdq_unres
= ip
->i_gdquot
;
1880 error
= xfs_trans_reserve_quota_bydquots(tp
, ip
->i_mount
,
1881 udq_delblks
, gdq_delblks
, ip
->i_d
.di_nblocks
, 1,
1882 flags
| blkflags
| prjflags
);
1887 * Do the delayed blks reservations/unreservations now. Since, these
1888 * are done without the help of a transaction, if a reservation fails
1889 * its previous reservations won't be automatically undone by trans
1890 * code. So, we have to do it manually here.
1894 * Do the reservations first. Unreservation can't fail.
1896 ASSERT(udq_delblks
|| gdq_delblks
);
1897 ASSERT(udq_unres
|| gdq_unres
);
1898 error
= xfs_trans_reserve_quota_bydquots(NULL
, ip
->i_mount
,
1899 udq_delblks
, gdq_delblks
, (xfs_qcnt_t
)delblks
, 0,
1900 flags
| blkflags
| prjflags
);
1903 xfs_trans_reserve_quota_bydquots(NULL
, ip
->i_mount
,
1904 udq_unres
, gdq_unres
, -((xfs_qcnt_t
)delblks
), 0,
1912 xfs_qm_vop_rename_dqattach(
1913 struct xfs_inode
**i_tab
)
1915 struct xfs_mount
*mp
= i_tab
[0]->i_mount
;
1918 if (!XFS_IS_QUOTA_RUNNING(mp
) || !XFS_IS_QUOTA_ON(mp
))
1921 for (i
= 0; (i
< 4 && i_tab
[i
]); i
++) {
1922 struct xfs_inode
*ip
= i_tab
[i
];
1926 * Watch out for duplicate entries in the table.
1928 if (i
== 0 || ip
!= i_tab
[i
-1]) {
1929 if (XFS_NOT_DQATTACHED(mp
, ip
)) {
1930 error
= xfs_qm_dqattach(ip
, 0);
1940 xfs_qm_vop_create_dqattach(
1941 struct xfs_trans
*tp
,
1942 struct xfs_inode
*ip
,
1943 struct xfs_dquot
*udqp
,
1944 struct xfs_dquot
*gdqp
)
1946 struct xfs_mount
*mp
= tp
->t_mountp
;
1948 if (!XFS_IS_QUOTA_RUNNING(mp
) || !XFS_IS_QUOTA_ON(mp
))
1951 ASSERT(xfs_isilocked(ip
, XFS_ILOCK_EXCL
));
1952 ASSERT(XFS_IS_QUOTA_RUNNING(mp
));
1955 ASSERT(ip
->i_udquot
== NULL
);
1956 ASSERT(XFS_IS_UQUOTA_ON(mp
));
1957 ASSERT(ip
->i_d
.di_uid
== be32_to_cpu(udqp
->q_core
.d_id
));
1959 ip
->i_udquot
= xfs_qm_dqhold(udqp
);
1960 xfs_trans_mod_dquot(tp
, udqp
, XFS_TRANS_DQ_ICOUNT
, 1);
1963 ASSERT(ip
->i_gdquot
== NULL
);
1964 ASSERT(XFS_IS_OQUOTA_ON(mp
));
1965 ASSERT((XFS_IS_GQUOTA_ON(mp
) ?
1966 ip
->i_d
.di_gid
: xfs_get_projid(ip
)) ==
1967 be32_to_cpu(gdqp
->q_core
.d_id
));
1969 ip
->i_gdquot
= xfs_qm_dqhold(gdqp
);
1970 xfs_trans_mod_dquot(tp
, gdqp
, XFS_TRANS_DQ_ICOUNT
, 1);