drm/i915: use new macros to access the ring start register
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / fs / xfs / quota / xfs_qm_syscalls.c
blob45e5849df238c1b80f6ea19b438f065f1c185d79
1 /*
2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
19 #include <linux/capability.h>
21 #include "xfs.h"
22 #include "xfs_fs.h"
23 #include "xfs_bit.h"
24 #include "xfs_log.h"
25 #include "xfs_inum.h"
26 #include "xfs_trans.h"
27 #include "xfs_sb.h"
28 #include "xfs_ag.h"
29 #include "xfs_alloc.h"
30 #include "xfs_quota.h"
31 #include "xfs_mount.h"
32 #include "xfs_bmap_btree.h"
33 #include "xfs_inode.h"
34 #include "xfs_itable.h"
35 #include "xfs_bmap.h"
36 #include "xfs_rtalloc.h"
37 #include "xfs_error.h"
38 #include "xfs_attr.h"
39 #include "xfs_buf_item.h"
40 #include "xfs_utils.h"
41 #include "xfs_qm.h"
42 #include "xfs_trace.h"
44 #ifdef DEBUG
45 # define qdprintk(s, args...) cmn_err(CE_DEBUG, s, ## args)
46 #else
47 # define qdprintk(s, args...) do { } while (0)
48 #endif
50 STATIC int xfs_qm_log_quotaoff(xfs_mount_t *, xfs_qoff_logitem_t **, uint);
51 STATIC int xfs_qm_log_quotaoff_end(xfs_mount_t *, xfs_qoff_logitem_t *,
52 uint);
53 STATIC uint xfs_qm_export_flags(uint);
54 STATIC uint xfs_qm_export_qtype_flags(uint);
55 STATIC void xfs_qm_export_dquot(xfs_mount_t *, xfs_disk_dquot_t *,
56 fs_disk_quota_t *);
60 * Turn off quota accounting and/or enforcement for all udquots and/or
61 * gdquots. Called only at unmount time.
63 * This assumes that there are no dquots of this file system cached
64 * incore, and modifies the ondisk dquot directly. Therefore, for example,
65 * it is an error to call this twice, without purging the cache.
67 int
68 xfs_qm_scall_quotaoff(
69 xfs_mount_t *mp,
70 uint flags)
72 struct xfs_quotainfo *q = mp->m_quotainfo;
73 uint dqtype;
74 int error;
75 uint inactivate_flags;
76 xfs_qoff_logitem_t *qoffstart;
77 int nculprits;
80 * No file system can have quotas enabled on disk but not in core.
81 * Note that quota utilities (like quotaoff) _expect_
82 * errno == EEXIST here.
84 if ((mp->m_qflags & flags) == 0)
85 return XFS_ERROR(EEXIST);
86 error = 0;
88 flags &= (XFS_ALL_QUOTA_ACCT | XFS_ALL_QUOTA_ENFD);
91 * We don't want to deal with two quotaoffs messing up each other,
92 * so we're going to serialize it. quotaoff isn't exactly a performance
93 * critical thing.
94 * If quotaoff, then we must be dealing with the root filesystem.
96 ASSERT(q);
97 mutex_lock(&q->qi_quotaofflock);
100 * If we're just turning off quota enforcement, change mp and go.
102 if ((flags & XFS_ALL_QUOTA_ACCT) == 0) {
103 mp->m_qflags &= ~(flags);
105 spin_lock(&mp->m_sb_lock);
106 mp->m_sb.sb_qflags = mp->m_qflags;
107 spin_unlock(&mp->m_sb_lock);
108 mutex_unlock(&q->qi_quotaofflock);
110 /* XXX what to do if error ? Revert back to old vals incore ? */
111 error = xfs_qm_write_sb_changes(mp, XFS_SB_QFLAGS);
112 return (error);
115 dqtype = 0;
116 inactivate_flags = 0;
118 * If accounting is off, we must turn enforcement off, clear the
119 * quota 'CHKD' certificate to make it known that we have to
120 * do a quotacheck the next time this quota is turned on.
122 if (flags & XFS_UQUOTA_ACCT) {
123 dqtype |= XFS_QMOPT_UQUOTA;
124 flags |= (XFS_UQUOTA_CHKD | XFS_UQUOTA_ENFD);
125 inactivate_flags |= XFS_UQUOTA_ACTIVE;
127 if (flags & XFS_GQUOTA_ACCT) {
128 dqtype |= XFS_QMOPT_GQUOTA;
129 flags |= (XFS_OQUOTA_CHKD | XFS_OQUOTA_ENFD);
130 inactivate_flags |= XFS_GQUOTA_ACTIVE;
131 } else if (flags & XFS_PQUOTA_ACCT) {
132 dqtype |= XFS_QMOPT_PQUOTA;
133 flags |= (XFS_OQUOTA_CHKD | XFS_OQUOTA_ENFD);
134 inactivate_flags |= XFS_PQUOTA_ACTIVE;
138 * Nothing to do? Don't complain. This happens when we're just
139 * turning off quota enforcement.
141 if ((mp->m_qflags & flags) == 0)
142 goto out_unlock;
145 * Write the LI_QUOTAOFF log record, and do SB changes atomically,
146 * and synchronously. If we fail to write, we should abort the
147 * operation as it cannot be recovered safely if we crash.
149 error = xfs_qm_log_quotaoff(mp, &qoffstart, flags);
150 if (error)
151 goto out_unlock;
154 * Next we clear the XFS_MOUNT_*DQ_ACTIVE bit(s) in the mount struct
155 * to take care of the race between dqget and quotaoff. We don't take
156 * any special locks to reset these bits. All processes need to check
157 * these bits *after* taking inode lock(s) to see if the particular
158 * quota type is in the process of being turned off. If *ACTIVE, it is
159 * guaranteed that all dquot structures and all quotainode ptrs will all
160 * stay valid as long as that inode is kept locked.
162 * There is no turning back after this.
164 mp->m_qflags &= ~inactivate_flags;
167 * Give back all the dquot reference(s) held by inodes.
168 * Here we go thru every single incore inode in this file system, and
169 * do a dqrele on the i_udquot/i_gdquot that it may have.
170 * Essentially, as long as somebody has an inode locked, this guarantees
171 * that quotas will not be turned off. This is handy because in a
172 * transaction once we lock the inode(s) and check for quotaon, we can
173 * depend on the quota inodes (and other things) being valid as long as
174 * we keep the lock(s).
176 xfs_qm_dqrele_all_inodes(mp, flags);
179 * Next we make the changes in the quota flag in the mount struct.
180 * This isn't protected by a particular lock directly, because we
181 * don't want to take a mrlock everytime we depend on quotas being on.
183 mp->m_qflags &= ~(flags);
186 * Go through all the dquots of this file system and purge them,
187 * according to what was turned off. We may not be able to get rid
188 * of all dquots, because dquots can have temporary references that
189 * are not attached to inodes. eg. xfs_setattr, xfs_create.
190 * So, if we couldn't purge all the dquots from the filesystem,
191 * we can't get rid of the incore data structures.
193 while ((nculprits = xfs_qm_dqpurge_all(mp, dqtype)))
194 delay(10 * nculprits);
197 * Transactions that had started before ACTIVE state bit was cleared
198 * could have logged many dquots, so they'd have higher LSNs than
199 * the first QUOTAOFF log record does. If we happen to crash when
200 * the tail of the log has gone past the QUOTAOFF record, but
201 * before the last dquot modification, those dquots __will__
202 * recover, and that's not good.
204 * So, we have QUOTAOFF start and end logitems; the start
205 * logitem won't get overwritten until the end logitem appears...
207 error = xfs_qm_log_quotaoff_end(mp, qoffstart, flags);
208 if (error) {
209 /* We're screwed now. Shutdown is the only option. */
210 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
211 goto out_unlock;
215 * If quotas is completely disabled, close shop.
217 if (((flags & XFS_MOUNT_QUOTA_ALL) == XFS_MOUNT_QUOTA_SET1) ||
218 ((flags & XFS_MOUNT_QUOTA_ALL) == XFS_MOUNT_QUOTA_SET2)) {
219 mutex_unlock(&q->qi_quotaofflock);
220 xfs_qm_destroy_quotainfo(mp);
221 return (0);
225 * Release our quotainode references if we don't need them anymore.
227 if ((dqtype & XFS_QMOPT_UQUOTA) && q->qi_uquotaip) {
228 IRELE(q->qi_uquotaip);
229 q->qi_uquotaip = NULL;
231 if ((dqtype & (XFS_QMOPT_GQUOTA|XFS_QMOPT_PQUOTA)) && q->qi_gquotaip) {
232 IRELE(q->qi_gquotaip);
233 q->qi_gquotaip = NULL;
236 out_unlock:
237 mutex_unlock(&q->qi_quotaofflock);
238 return error;
241 STATIC int
242 xfs_qm_scall_trunc_qfile(
243 struct xfs_mount *mp,
244 xfs_ino_t ino)
246 struct xfs_inode *ip;
247 struct xfs_trans *tp;
248 int error;
250 if (ino == NULLFSINO)
251 return 0;
253 error = xfs_iget(mp, NULL, ino, 0, 0, &ip);
254 if (error)
255 return error;
257 xfs_ilock(ip, XFS_IOLOCK_EXCL);
259 tp = xfs_trans_alloc(mp, XFS_TRANS_TRUNCATE_FILE);
260 error = xfs_trans_reserve(tp, 0, XFS_ITRUNCATE_LOG_RES(mp), 0,
261 XFS_TRANS_PERM_LOG_RES,
262 XFS_ITRUNCATE_LOG_COUNT);
263 if (error) {
264 xfs_trans_cancel(tp, 0);
265 xfs_iunlock(ip, XFS_IOLOCK_EXCL);
266 goto out_put;
269 xfs_ilock(ip, XFS_ILOCK_EXCL);
270 xfs_trans_ijoin(tp, ip);
272 error = xfs_itruncate_finish(&tp, ip, 0, XFS_DATA_FORK, 1);
273 if (error) {
274 xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES |
275 XFS_TRANS_ABORT);
276 goto out_unlock;
279 xfs_ichgtime(ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
280 error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
282 out_unlock:
283 xfs_iunlock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
284 out_put:
285 IRELE(ip);
286 return error;
290 xfs_qm_scall_trunc_qfiles(
291 xfs_mount_t *mp,
292 uint flags)
294 int error = 0, error2 = 0;
296 if (!xfs_sb_version_hasquota(&mp->m_sb) || flags == 0) {
297 qdprintk("qtrunc flags=%x m_qflags=%x\n", flags, mp->m_qflags);
298 return XFS_ERROR(EINVAL);
301 if (flags & XFS_DQ_USER)
302 error = xfs_qm_scall_trunc_qfile(mp, mp->m_sb.sb_uquotino);
303 if (flags & (XFS_DQ_GROUP|XFS_DQ_PROJ))
304 error2 = xfs_qm_scall_trunc_qfile(mp, mp->m_sb.sb_gquotino);
306 return error ? error : error2;
310 * Switch on (a given) quota enforcement for a filesystem. This takes
311 * effect immediately.
312 * (Switching on quota accounting must be done at mount time.)
315 xfs_qm_scall_quotaon(
316 xfs_mount_t *mp,
317 uint flags)
319 int error;
320 uint qf;
321 uint accflags;
322 __int64_t sbflags;
324 flags &= (XFS_ALL_QUOTA_ACCT | XFS_ALL_QUOTA_ENFD);
326 * Switching on quota accounting must be done at mount time.
328 accflags = flags & XFS_ALL_QUOTA_ACCT;
329 flags &= ~(XFS_ALL_QUOTA_ACCT);
331 sbflags = 0;
333 if (flags == 0) {
334 qdprintk("quotaon: zero flags, m_qflags=%x\n", mp->m_qflags);
335 return XFS_ERROR(EINVAL);
338 /* No fs can turn on quotas with a delayed effect */
339 ASSERT((flags & XFS_ALL_QUOTA_ACCT) == 0);
342 * Can't enforce without accounting. We check the superblock
343 * qflags here instead of m_qflags because rootfs can have
344 * quota acct on ondisk without m_qflags' knowing.
346 if (((flags & XFS_UQUOTA_ACCT) == 0 &&
347 (mp->m_sb.sb_qflags & XFS_UQUOTA_ACCT) == 0 &&
348 (flags & XFS_UQUOTA_ENFD))
350 ((flags & XFS_PQUOTA_ACCT) == 0 &&
351 (mp->m_sb.sb_qflags & XFS_PQUOTA_ACCT) == 0 &&
352 (flags & XFS_GQUOTA_ACCT) == 0 &&
353 (mp->m_sb.sb_qflags & XFS_GQUOTA_ACCT) == 0 &&
354 (flags & XFS_OQUOTA_ENFD))) {
355 qdprintk("Can't enforce without acct, flags=%x sbflags=%x\n",
356 flags, mp->m_sb.sb_qflags);
357 return XFS_ERROR(EINVAL);
360 * If everything's upto-date incore, then don't waste time.
362 if ((mp->m_qflags & flags) == flags)
363 return XFS_ERROR(EEXIST);
366 * Change sb_qflags on disk but not incore mp->qflags
367 * if this is the root filesystem.
369 spin_lock(&mp->m_sb_lock);
370 qf = mp->m_sb.sb_qflags;
371 mp->m_sb.sb_qflags = qf | flags;
372 spin_unlock(&mp->m_sb_lock);
375 * There's nothing to change if it's the same.
377 if ((qf & flags) == flags && sbflags == 0)
378 return XFS_ERROR(EEXIST);
379 sbflags |= XFS_SB_QFLAGS;
381 if ((error = xfs_qm_write_sb_changes(mp, sbflags)))
382 return (error);
384 * If we aren't trying to switch on quota enforcement, we are done.
386 if (((mp->m_sb.sb_qflags & XFS_UQUOTA_ACCT) !=
387 (mp->m_qflags & XFS_UQUOTA_ACCT)) ||
388 ((mp->m_sb.sb_qflags & XFS_PQUOTA_ACCT) !=
389 (mp->m_qflags & XFS_PQUOTA_ACCT)) ||
390 ((mp->m_sb.sb_qflags & XFS_GQUOTA_ACCT) !=
391 (mp->m_qflags & XFS_GQUOTA_ACCT)) ||
392 (flags & XFS_ALL_QUOTA_ENFD) == 0)
393 return (0);
395 if (! XFS_IS_QUOTA_RUNNING(mp))
396 return XFS_ERROR(ESRCH);
399 * Switch on quota enforcement in core.
401 mutex_lock(&mp->m_quotainfo->qi_quotaofflock);
402 mp->m_qflags |= (flags & XFS_ALL_QUOTA_ENFD);
403 mutex_unlock(&mp->m_quotainfo->qi_quotaofflock);
405 return (0);
410 * Return quota status information, such as uquota-off, enforcements, etc.
413 xfs_qm_scall_getqstat(
414 struct xfs_mount *mp,
415 struct fs_quota_stat *out)
417 struct xfs_quotainfo *q = mp->m_quotainfo;
418 struct xfs_inode *uip, *gip;
419 boolean_t tempuqip, tempgqip;
421 uip = gip = NULL;
422 tempuqip = tempgqip = B_FALSE;
423 memset(out, 0, sizeof(fs_quota_stat_t));
425 out->qs_version = FS_QSTAT_VERSION;
426 if (!xfs_sb_version_hasquota(&mp->m_sb)) {
427 out->qs_uquota.qfs_ino = NULLFSINO;
428 out->qs_gquota.qfs_ino = NULLFSINO;
429 return (0);
431 out->qs_flags = (__uint16_t) xfs_qm_export_flags(mp->m_qflags &
432 (XFS_ALL_QUOTA_ACCT|
433 XFS_ALL_QUOTA_ENFD));
434 out->qs_pad = 0;
435 out->qs_uquota.qfs_ino = mp->m_sb.sb_uquotino;
436 out->qs_gquota.qfs_ino = mp->m_sb.sb_gquotino;
438 if (q) {
439 uip = q->qi_uquotaip;
440 gip = q->qi_gquotaip;
442 if (!uip && mp->m_sb.sb_uquotino != NULLFSINO) {
443 if (xfs_iget(mp, NULL, mp->m_sb.sb_uquotino,
444 0, 0, &uip) == 0)
445 tempuqip = B_TRUE;
447 if (!gip && mp->m_sb.sb_gquotino != NULLFSINO) {
448 if (xfs_iget(mp, NULL, mp->m_sb.sb_gquotino,
449 0, 0, &gip) == 0)
450 tempgqip = B_TRUE;
452 if (uip) {
453 out->qs_uquota.qfs_nblks = uip->i_d.di_nblocks;
454 out->qs_uquota.qfs_nextents = uip->i_d.di_nextents;
455 if (tempuqip)
456 IRELE(uip);
458 if (gip) {
459 out->qs_gquota.qfs_nblks = gip->i_d.di_nblocks;
460 out->qs_gquota.qfs_nextents = gip->i_d.di_nextents;
461 if (tempgqip)
462 IRELE(gip);
464 if (q) {
465 out->qs_incoredqs = q->qi_dquots;
466 out->qs_btimelimit = q->qi_btimelimit;
467 out->qs_itimelimit = q->qi_itimelimit;
468 out->qs_rtbtimelimit = q->qi_rtbtimelimit;
469 out->qs_bwarnlimit = q->qi_bwarnlimit;
470 out->qs_iwarnlimit = q->qi_iwarnlimit;
472 return 0;
475 #define XFS_DQ_MASK \
476 (FS_DQ_LIMIT_MASK | FS_DQ_TIMER_MASK | FS_DQ_WARNS_MASK)
479 * Adjust quota limits, and start/stop timers accordingly.
482 xfs_qm_scall_setqlim(
483 xfs_mount_t *mp,
484 xfs_dqid_t id,
485 uint type,
486 fs_disk_quota_t *newlim)
488 struct xfs_quotainfo *q = mp->m_quotainfo;
489 xfs_disk_dquot_t *ddq;
490 xfs_dquot_t *dqp;
491 xfs_trans_t *tp;
492 int error;
493 xfs_qcnt_t hard, soft;
495 if (newlim->d_fieldmask & ~XFS_DQ_MASK)
496 return EINVAL;
497 if ((newlim->d_fieldmask & XFS_DQ_MASK) == 0)
498 return 0;
500 tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SETQLIM);
501 if ((error = xfs_trans_reserve(tp, 0, sizeof(xfs_disk_dquot_t) + 128,
502 0, 0, XFS_DEFAULT_LOG_COUNT))) {
503 xfs_trans_cancel(tp, 0);
504 return (error);
508 * We don't want to race with a quotaoff so take the quotaoff lock.
509 * (We don't hold an inode lock, so there's nothing else to stop
510 * a quotaoff from happening). (XXXThis doesn't currently happen
511 * because we take the vfslock before calling xfs_qm_sysent).
513 mutex_lock(&q->qi_quotaofflock);
516 * Get the dquot (locked), and join it to the transaction.
517 * Allocate the dquot if this doesn't exist.
519 if ((error = xfs_qm_dqget(mp, NULL, id, type, XFS_QMOPT_DQALLOC, &dqp))) {
520 xfs_trans_cancel(tp, XFS_TRANS_ABORT);
521 ASSERT(error != ENOENT);
522 goto out_unlock;
524 xfs_trans_dqjoin(tp, dqp);
525 ddq = &dqp->q_core;
528 * Make sure that hardlimits are >= soft limits before changing.
530 hard = (newlim->d_fieldmask & FS_DQ_BHARD) ?
531 (xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_blk_hardlimit) :
532 be64_to_cpu(ddq->d_blk_hardlimit);
533 soft = (newlim->d_fieldmask & FS_DQ_BSOFT) ?
534 (xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_blk_softlimit) :
535 be64_to_cpu(ddq->d_blk_softlimit);
536 if (hard == 0 || hard >= soft) {
537 ddq->d_blk_hardlimit = cpu_to_be64(hard);
538 ddq->d_blk_softlimit = cpu_to_be64(soft);
539 if (id == 0) {
540 q->qi_bhardlimit = hard;
541 q->qi_bsoftlimit = soft;
543 } else {
544 qdprintk("blkhard %Ld < blksoft %Ld\n", hard, soft);
546 hard = (newlim->d_fieldmask & FS_DQ_RTBHARD) ?
547 (xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_rtb_hardlimit) :
548 be64_to_cpu(ddq->d_rtb_hardlimit);
549 soft = (newlim->d_fieldmask & FS_DQ_RTBSOFT) ?
550 (xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_rtb_softlimit) :
551 be64_to_cpu(ddq->d_rtb_softlimit);
552 if (hard == 0 || hard >= soft) {
553 ddq->d_rtb_hardlimit = cpu_to_be64(hard);
554 ddq->d_rtb_softlimit = cpu_to_be64(soft);
555 if (id == 0) {
556 q->qi_rtbhardlimit = hard;
557 q->qi_rtbsoftlimit = soft;
559 } else {
560 qdprintk("rtbhard %Ld < rtbsoft %Ld\n", hard, soft);
563 hard = (newlim->d_fieldmask & FS_DQ_IHARD) ?
564 (xfs_qcnt_t) newlim->d_ino_hardlimit :
565 be64_to_cpu(ddq->d_ino_hardlimit);
566 soft = (newlim->d_fieldmask & FS_DQ_ISOFT) ?
567 (xfs_qcnt_t) newlim->d_ino_softlimit :
568 be64_to_cpu(ddq->d_ino_softlimit);
569 if (hard == 0 || hard >= soft) {
570 ddq->d_ino_hardlimit = cpu_to_be64(hard);
571 ddq->d_ino_softlimit = cpu_to_be64(soft);
572 if (id == 0) {
573 q->qi_ihardlimit = hard;
574 q->qi_isoftlimit = soft;
576 } else {
577 qdprintk("ihard %Ld < isoft %Ld\n", hard, soft);
581 * Update warnings counter(s) if requested
583 if (newlim->d_fieldmask & FS_DQ_BWARNS)
584 ddq->d_bwarns = cpu_to_be16(newlim->d_bwarns);
585 if (newlim->d_fieldmask & FS_DQ_IWARNS)
586 ddq->d_iwarns = cpu_to_be16(newlim->d_iwarns);
587 if (newlim->d_fieldmask & FS_DQ_RTBWARNS)
588 ddq->d_rtbwarns = cpu_to_be16(newlim->d_rtbwarns);
590 if (id == 0) {
592 * Timelimits for the super user set the relative time
593 * the other users can be over quota for this file system.
594 * If it is zero a default is used. Ditto for the default
595 * soft and hard limit values (already done, above), and
596 * for warnings.
598 if (newlim->d_fieldmask & FS_DQ_BTIMER) {
599 q->qi_btimelimit = newlim->d_btimer;
600 ddq->d_btimer = cpu_to_be32(newlim->d_btimer);
602 if (newlim->d_fieldmask & FS_DQ_ITIMER) {
603 q->qi_itimelimit = newlim->d_itimer;
604 ddq->d_itimer = cpu_to_be32(newlim->d_itimer);
606 if (newlim->d_fieldmask & FS_DQ_RTBTIMER) {
607 q->qi_rtbtimelimit = newlim->d_rtbtimer;
608 ddq->d_rtbtimer = cpu_to_be32(newlim->d_rtbtimer);
610 if (newlim->d_fieldmask & FS_DQ_BWARNS)
611 q->qi_bwarnlimit = newlim->d_bwarns;
612 if (newlim->d_fieldmask & FS_DQ_IWARNS)
613 q->qi_iwarnlimit = newlim->d_iwarns;
614 if (newlim->d_fieldmask & FS_DQ_RTBWARNS)
615 q->qi_rtbwarnlimit = newlim->d_rtbwarns;
616 } else {
618 * If the user is now over quota, start the timelimit.
619 * The user will not be 'warned'.
620 * Note that we keep the timers ticking, whether enforcement
621 * is on or off. We don't really want to bother with iterating
622 * over all ondisk dquots and turning the timers on/off.
624 xfs_qm_adjust_dqtimers(mp, ddq);
626 dqp->dq_flags |= XFS_DQ_DIRTY;
627 xfs_trans_log_dquot(tp, dqp);
629 error = xfs_trans_commit(tp, 0);
630 xfs_qm_dqprint(dqp);
631 xfs_qm_dqrele(dqp);
633 out_unlock:
634 mutex_unlock(&q->qi_quotaofflock);
635 return error;
639 xfs_qm_scall_getquota(
640 xfs_mount_t *mp,
641 xfs_dqid_t id,
642 uint type,
643 fs_disk_quota_t *out)
645 xfs_dquot_t *dqp;
646 int error;
649 * Try to get the dquot. We don't want it allocated on disk, so
650 * we aren't passing the XFS_QMOPT_DOALLOC flag. If it doesn't
651 * exist, we'll get ENOENT back.
653 if ((error = xfs_qm_dqget(mp, NULL, id, type, 0, &dqp))) {
654 return (error);
658 * If everything's NULL, this dquot doesn't quite exist as far as
659 * our utility programs are concerned.
661 if (XFS_IS_DQUOT_UNINITIALIZED(dqp)) {
662 xfs_qm_dqput(dqp);
663 return XFS_ERROR(ENOENT);
665 /* xfs_qm_dqprint(dqp); */
667 * Convert the disk dquot to the exportable format
669 xfs_qm_export_dquot(mp, &dqp->q_core, out);
670 xfs_qm_dqput(dqp);
671 return (error ? XFS_ERROR(EFAULT) : 0);
675 STATIC int
676 xfs_qm_log_quotaoff_end(
677 xfs_mount_t *mp,
678 xfs_qoff_logitem_t *startqoff,
679 uint flags)
681 xfs_trans_t *tp;
682 int error;
683 xfs_qoff_logitem_t *qoffi;
685 tp = xfs_trans_alloc(mp, XFS_TRANS_QM_QUOTAOFF_END);
687 if ((error = xfs_trans_reserve(tp, 0, sizeof(xfs_qoff_logitem_t) * 2,
688 0, 0, XFS_DEFAULT_LOG_COUNT))) {
689 xfs_trans_cancel(tp, 0);
690 return (error);
693 qoffi = xfs_trans_get_qoff_item(tp, startqoff,
694 flags & XFS_ALL_QUOTA_ACCT);
695 xfs_trans_log_quotaoff_item(tp, qoffi);
698 * We have to make sure that the transaction is secure on disk before we
699 * return and actually stop quota accounting. So, make it synchronous.
700 * We don't care about quotoff's performance.
702 xfs_trans_set_sync(tp);
703 error = xfs_trans_commit(tp, 0);
704 return (error);
708 STATIC int
709 xfs_qm_log_quotaoff(
710 xfs_mount_t *mp,
711 xfs_qoff_logitem_t **qoffstartp,
712 uint flags)
714 xfs_trans_t *tp;
715 int error;
716 xfs_qoff_logitem_t *qoffi=NULL;
717 uint oldsbqflag=0;
719 tp = xfs_trans_alloc(mp, XFS_TRANS_QM_QUOTAOFF);
720 if ((error = xfs_trans_reserve(tp, 0,
721 sizeof(xfs_qoff_logitem_t) * 2 +
722 mp->m_sb.sb_sectsize + 128,
725 XFS_DEFAULT_LOG_COUNT))) {
726 goto error0;
729 qoffi = xfs_trans_get_qoff_item(tp, NULL, flags & XFS_ALL_QUOTA_ACCT);
730 xfs_trans_log_quotaoff_item(tp, qoffi);
732 spin_lock(&mp->m_sb_lock);
733 oldsbqflag = mp->m_sb.sb_qflags;
734 mp->m_sb.sb_qflags = (mp->m_qflags & ~(flags)) & XFS_MOUNT_QUOTA_ALL;
735 spin_unlock(&mp->m_sb_lock);
737 xfs_mod_sb(tp, XFS_SB_QFLAGS);
740 * We have to make sure that the transaction is secure on disk before we
741 * return and actually stop quota accounting. So, make it synchronous.
742 * We don't care about quotoff's performance.
744 xfs_trans_set_sync(tp);
745 error = xfs_trans_commit(tp, 0);
747 error0:
748 if (error) {
749 xfs_trans_cancel(tp, 0);
751 * No one else is modifying sb_qflags, so this is OK.
752 * We still hold the quotaofflock.
754 spin_lock(&mp->m_sb_lock);
755 mp->m_sb.sb_qflags = oldsbqflag;
756 spin_unlock(&mp->m_sb_lock);
758 *qoffstartp = qoffi;
759 return (error);
764 * Translate an internal style on-disk-dquot to the exportable format.
765 * The main differences are that the counters/limits are all in Basic
766 * Blocks (BBs) instead of the internal FSBs, and all on-disk data has
767 * to be converted to the native endianness.
769 STATIC void
770 xfs_qm_export_dquot(
771 xfs_mount_t *mp,
772 xfs_disk_dquot_t *src,
773 struct fs_disk_quota *dst)
775 memset(dst, 0, sizeof(*dst));
776 dst->d_version = FS_DQUOT_VERSION; /* different from src->d_version */
777 dst->d_flags = xfs_qm_export_qtype_flags(src->d_flags);
778 dst->d_id = be32_to_cpu(src->d_id);
779 dst->d_blk_hardlimit =
780 XFS_FSB_TO_BB(mp, be64_to_cpu(src->d_blk_hardlimit));
781 dst->d_blk_softlimit =
782 XFS_FSB_TO_BB(mp, be64_to_cpu(src->d_blk_softlimit));
783 dst->d_ino_hardlimit = be64_to_cpu(src->d_ino_hardlimit);
784 dst->d_ino_softlimit = be64_to_cpu(src->d_ino_softlimit);
785 dst->d_bcount = XFS_FSB_TO_BB(mp, be64_to_cpu(src->d_bcount));
786 dst->d_icount = be64_to_cpu(src->d_icount);
787 dst->d_btimer = be32_to_cpu(src->d_btimer);
788 dst->d_itimer = be32_to_cpu(src->d_itimer);
789 dst->d_iwarns = be16_to_cpu(src->d_iwarns);
790 dst->d_bwarns = be16_to_cpu(src->d_bwarns);
791 dst->d_rtb_hardlimit =
792 XFS_FSB_TO_BB(mp, be64_to_cpu(src->d_rtb_hardlimit));
793 dst->d_rtb_softlimit =
794 XFS_FSB_TO_BB(mp, be64_to_cpu(src->d_rtb_softlimit));
795 dst->d_rtbcount = XFS_FSB_TO_BB(mp, be64_to_cpu(src->d_rtbcount));
796 dst->d_rtbtimer = be32_to_cpu(src->d_rtbtimer);
797 dst->d_rtbwarns = be16_to_cpu(src->d_rtbwarns);
800 * Internally, we don't reset all the timers when quota enforcement
801 * gets turned off. No need to confuse the user level code,
802 * so return zeroes in that case.
804 if ((!XFS_IS_UQUOTA_ENFORCED(mp) && src->d_flags == XFS_DQ_USER) ||
805 (!XFS_IS_OQUOTA_ENFORCED(mp) &&
806 (src->d_flags & (XFS_DQ_PROJ | XFS_DQ_GROUP)))) {
807 dst->d_btimer = 0;
808 dst->d_itimer = 0;
809 dst->d_rtbtimer = 0;
812 #ifdef DEBUG
813 if (((XFS_IS_UQUOTA_ENFORCED(mp) && dst->d_flags == FS_USER_QUOTA) ||
814 (XFS_IS_OQUOTA_ENFORCED(mp) &&
815 (dst->d_flags & (FS_PROJ_QUOTA | FS_GROUP_QUOTA)))) &&
816 dst->d_id != 0) {
817 if (((int) dst->d_bcount >= (int) dst->d_blk_softlimit) &&
818 (dst->d_blk_softlimit > 0)) {
819 ASSERT(dst->d_btimer != 0);
821 if (((int) dst->d_icount >= (int) dst->d_ino_softlimit) &&
822 (dst->d_ino_softlimit > 0)) {
823 ASSERT(dst->d_itimer != 0);
826 #endif
829 STATIC uint
830 xfs_qm_export_qtype_flags(
831 uint flags)
834 * Can't be more than one, or none.
836 ASSERT((flags & (FS_PROJ_QUOTA | FS_USER_QUOTA)) !=
837 (FS_PROJ_QUOTA | FS_USER_QUOTA));
838 ASSERT((flags & (FS_PROJ_QUOTA | FS_GROUP_QUOTA)) !=
839 (FS_PROJ_QUOTA | FS_GROUP_QUOTA));
840 ASSERT((flags & (FS_USER_QUOTA | FS_GROUP_QUOTA)) !=
841 (FS_USER_QUOTA | FS_GROUP_QUOTA));
842 ASSERT((flags & (FS_PROJ_QUOTA|FS_USER_QUOTA|FS_GROUP_QUOTA)) != 0);
844 return (flags & XFS_DQ_USER) ?
845 FS_USER_QUOTA : (flags & XFS_DQ_PROJ) ?
846 FS_PROJ_QUOTA : FS_GROUP_QUOTA;
849 STATIC uint
850 xfs_qm_export_flags(
851 uint flags)
853 uint uflags;
855 uflags = 0;
856 if (flags & XFS_UQUOTA_ACCT)
857 uflags |= FS_QUOTA_UDQ_ACCT;
858 if (flags & XFS_PQUOTA_ACCT)
859 uflags |= FS_QUOTA_PDQ_ACCT;
860 if (flags & XFS_GQUOTA_ACCT)
861 uflags |= FS_QUOTA_GDQ_ACCT;
862 if (flags & XFS_UQUOTA_ENFD)
863 uflags |= FS_QUOTA_UDQ_ENFD;
864 if (flags & (XFS_OQUOTA_ENFD)) {
865 uflags |= (flags & XFS_GQUOTA_ACCT) ?
866 FS_QUOTA_GDQ_ENFD : FS_QUOTA_PDQ_ENFD;
868 return (uflags);
872 STATIC int
873 xfs_dqrele_inode(
874 struct xfs_inode *ip,
875 struct xfs_perag *pag,
876 int flags)
878 int error;
880 /* skip quota inodes */
881 if (ip == ip->i_mount->m_quotainfo->qi_uquotaip ||
882 ip == ip->i_mount->m_quotainfo->qi_gquotaip) {
883 ASSERT(ip->i_udquot == NULL);
884 ASSERT(ip->i_gdquot == NULL);
885 read_unlock(&pag->pag_ici_lock);
886 return 0;
889 error = xfs_sync_inode_valid(ip, pag);
890 if (error)
891 return error;
893 xfs_ilock(ip, XFS_ILOCK_EXCL);
894 if ((flags & XFS_UQUOTA_ACCT) && ip->i_udquot) {
895 xfs_qm_dqrele(ip->i_udquot);
896 ip->i_udquot = NULL;
898 if (flags & (XFS_PQUOTA_ACCT|XFS_GQUOTA_ACCT) && ip->i_gdquot) {
899 xfs_qm_dqrele(ip->i_gdquot);
900 ip->i_gdquot = NULL;
902 xfs_iunlock(ip, XFS_ILOCK_EXCL);
904 IRELE(ip);
905 return 0;
910 * Go thru all the inodes in the file system, releasing their dquots.
912 * Note that the mount structure gets modified to indicate that quotas are off
913 * AFTER this, in the case of quotaoff.
915 void
916 xfs_qm_dqrele_all_inodes(
917 struct xfs_mount *mp,
918 uint flags)
920 ASSERT(mp->m_quotainfo);
921 xfs_inode_ag_iterator(mp, xfs_dqrele_inode, flags,
922 XFS_ICI_NO_TAG, 0, NULL);
925 /*------------------------------------------------------------------------*/
926 #ifdef DEBUG
928 * This contains all the test functions for XFS disk quotas.
929 * Currently it does a quota accounting check. ie. it walks through
930 * all inodes in the file system, calculating the dquot accounting fields,
931 * and prints out any inconsistencies.
933 xfs_dqhash_t *qmtest_udqtab;
934 xfs_dqhash_t *qmtest_gdqtab;
935 int qmtest_hashmask;
936 int qmtest_nfails;
937 struct mutex qcheck_lock;
939 #define DQTEST_HASHVAL(mp, id) (((__psunsigned_t)(mp) + \
940 (__psunsigned_t)(id)) & \
941 (qmtest_hashmask - 1))
943 #define DQTEST_HASH(mp, id, type) ((type & XFS_DQ_USER) ? \
944 (qmtest_udqtab + \
945 DQTEST_HASHVAL(mp, id)) : \
946 (qmtest_gdqtab + \
947 DQTEST_HASHVAL(mp, id)))
949 #define DQTEST_LIST_PRINT(l, NXT, title) \
951 xfs_dqtest_t *dqp; int i = 0;\
952 cmn_err(CE_DEBUG, "%s (#%d)", title, (int) (l)->qh_nelems); \
953 for (dqp = (xfs_dqtest_t *)(l)->qh_next; dqp != NULL; \
954 dqp = (xfs_dqtest_t *)dqp->NXT) { \
955 cmn_err(CE_DEBUG, " %d. \"%d (%s)\" bcnt = %d, icnt = %d", \
956 ++i, dqp->d_id, DQFLAGTO_TYPESTR(dqp), \
957 dqp->d_bcount, dqp->d_icount); } \
960 typedef struct dqtest {
961 uint dq_flags; /* various flags (XFS_DQ_*) */
962 struct list_head q_hashlist;
963 xfs_dqhash_t *q_hash; /* the hashchain header */
964 xfs_mount_t *q_mount; /* filesystem this relates to */
965 xfs_dqid_t d_id; /* user id or group id */
966 xfs_qcnt_t d_bcount; /* # disk blocks owned by the user */
967 xfs_qcnt_t d_icount; /* # inodes owned by the user */
968 } xfs_dqtest_t;
970 STATIC void
971 xfs_qm_hashinsert(xfs_dqhash_t *h, xfs_dqtest_t *dqp)
973 list_add(&dqp->q_hashlist, &h->qh_list);
974 h->qh_version++;
975 h->qh_nelems++;
977 STATIC void
978 xfs_qm_dqtest_print(
979 xfs_dqtest_t *d)
981 cmn_err(CE_DEBUG, "-----------DQTEST DQUOT----------------");
982 cmn_err(CE_DEBUG, "---- dquot ID = %d", d->d_id);
983 cmn_err(CE_DEBUG, "---- fs = 0x%p", d->q_mount);
984 cmn_err(CE_DEBUG, "---- bcount = %Lu (0x%x)",
985 d->d_bcount, (int)d->d_bcount);
986 cmn_err(CE_DEBUG, "---- icount = %Lu (0x%x)",
987 d->d_icount, (int)d->d_icount);
988 cmn_err(CE_DEBUG, "---------------------------");
991 STATIC void
992 xfs_qm_dqtest_failed(
993 xfs_dqtest_t *d,
994 xfs_dquot_t *dqp,
995 char *reason,
996 xfs_qcnt_t a,
997 xfs_qcnt_t b,
998 int error)
1000 qmtest_nfails++;
1001 if (error)
1002 cmn_err(CE_DEBUG, "quotacheck failed id=%d, err=%d\nreason: %s",
1003 d->d_id, error, reason);
1004 else
1005 cmn_err(CE_DEBUG, "quotacheck failed id=%d (%s) [%d != %d]",
1006 d->d_id, reason, (int)a, (int)b);
1007 xfs_qm_dqtest_print(d);
1008 if (dqp)
1009 xfs_qm_dqprint(dqp);
1012 STATIC int
1013 xfs_dqtest_cmp2(
1014 xfs_dqtest_t *d,
1015 xfs_dquot_t *dqp)
1017 int err = 0;
1018 if (be64_to_cpu(dqp->q_core.d_icount) != d->d_icount) {
1019 xfs_qm_dqtest_failed(d, dqp, "icount mismatch",
1020 be64_to_cpu(dqp->q_core.d_icount),
1021 d->d_icount, 0);
1022 err++;
1024 if (be64_to_cpu(dqp->q_core.d_bcount) != d->d_bcount) {
1025 xfs_qm_dqtest_failed(d, dqp, "bcount mismatch",
1026 be64_to_cpu(dqp->q_core.d_bcount),
1027 d->d_bcount, 0);
1028 err++;
1030 if (dqp->q_core.d_blk_softlimit &&
1031 be64_to_cpu(dqp->q_core.d_bcount) >=
1032 be64_to_cpu(dqp->q_core.d_blk_softlimit)) {
1033 if (!dqp->q_core.d_btimer && dqp->q_core.d_id) {
1034 cmn_err(CE_DEBUG,
1035 "%d [%s] [0x%p] BLK TIMER NOT STARTED",
1036 d->d_id, DQFLAGTO_TYPESTR(d), d->q_mount);
1037 err++;
1040 if (dqp->q_core.d_ino_softlimit &&
1041 be64_to_cpu(dqp->q_core.d_icount) >=
1042 be64_to_cpu(dqp->q_core.d_ino_softlimit)) {
1043 if (!dqp->q_core.d_itimer && dqp->q_core.d_id) {
1044 cmn_err(CE_DEBUG,
1045 "%d [%s] [0x%p] INO TIMER NOT STARTED",
1046 d->d_id, DQFLAGTO_TYPESTR(d), d->q_mount);
1047 err++;
1050 #ifdef QUOTADEBUG
1051 if (!err) {
1052 cmn_err(CE_DEBUG, "%d [%s] [0x%p] qchecked",
1053 d->d_id, DQFLAGTO_TYPESTR(d), d->q_mount);
1055 #endif
1056 return (err);
1059 STATIC void
1060 xfs_dqtest_cmp(
1061 xfs_dqtest_t *d)
1063 xfs_dquot_t *dqp;
1064 int error;
1066 /* xfs_qm_dqtest_print(d); */
1067 if ((error = xfs_qm_dqget(d->q_mount, NULL, d->d_id, d->dq_flags, 0,
1068 &dqp))) {
1069 xfs_qm_dqtest_failed(d, NULL, "dqget failed", 0, 0, error);
1070 return;
1072 xfs_dqtest_cmp2(d, dqp);
1073 xfs_qm_dqput(dqp);
1076 STATIC int
1077 xfs_qm_internalqcheck_dqget(
1078 xfs_mount_t *mp,
1079 xfs_dqid_t id,
1080 uint type,
1081 xfs_dqtest_t **O_dq)
1083 xfs_dqtest_t *d;
1084 xfs_dqhash_t *h;
1086 h = DQTEST_HASH(mp, id, type);
1087 list_for_each_entry(d, &h->qh_list, q_hashlist) {
1088 if (d->d_id == id && mp == d->q_mount) {
1089 *O_dq = d;
1090 return (0);
1093 d = kmem_zalloc(sizeof(xfs_dqtest_t), KM_SLEEP);
1094 d->dq_flags = type;
1095 d->d_id = id;
1096 d->q_mount = mp;
1097 d->q_hash = h;
1098 INIT_LIST_HEAD(&d->q_hashlist);
1099 xfs_qm_hashinsert(h, d);
1100 *O_dq = d;
1101 return (0);
1104 STATIC void
1105 xfs_qm_internalqcheck_get_dquots(
1106 xfs_mount_t *mp,
1107 xfs_dqid_t uid,
1108 xfs_dqid_t projid,
1109 xfs_dqid_t gid,
1110 xfs_dqtest_t **ud,
1111 xfs_dqtest_t **gd)
1113 if (XFS_IS_UQUOTA_ON(mp))
1114 xfs_qm_internalqcheck_dqget(mp, uid, XFS_DQ_USER, ud);
1115 if (XFS_IS_GQUOTA_ON(mp))
1116 xfs_qm_internalqcheck_dqget(mp, gid, XFS_DQ_GROUP, gd);
1117 else if (XFS_IS_PQUOTA_ON(mp))
1118 xfs_qm_internalqcheck_dqget(mp, projid, XFS_DQ_PROJ, gd);
1122 STATIC void
1123 xfs_qm_internalqcheck_dqadjust(
1124 xfs_inode_t *ip,
1125 xfs_dqtest_t *d)
1127 d->d_icount++;
1128 d->d_bcount += (xfs_qcnt_t)ip->i_d.di_nblocks;
1131 STATIC int
1132 xfs_qm_internalqcheck_adjust(
1133 xfs_mount_t *mp, /* mount point for filesystem */
1134 xfs_ino_t ino, /* inode number to get data for */
1135 void __user *buffer, /* not used */
1136 int ubsize, /* not used */
1137 int *ubused, /* not used */
1138 int *res) /* bulkstat result code */
1140 xfs_inode_t *ip;
1141 xfs_dqtest_t *ud, *gd;
1142 uint lock_flags;
1143 boolean_t ipreleased;
1144 int error;
1146 ASSERT(XFS_IS_QUOTA_RUNNING(mp));
1148 if (ino == mp->m_sb.sb_uquotino || ino == mp->m_sb.sb_gquotino) {
1149 *res = BULKSTAT_RV_NOTHING;
1150 qdprintk("internalqcheck: ino=%llu, uqino=%llu, gqino=%llu\n",
1151 (unsigned long long) ino,
1152 (unsigned long long) mp->m_sb.sb_uquotino,
1153 (unsigned long long) mp->m_sb.sb_gquotino);
1154 return XFS_ERROR(EINVAL);
1156 ipreleased = B_FALSE;
1157 again:
1158 lock_flags = XFS_ILOCK_SHARED;
1159 if ((error = xfs_iget(mp, NULL, ino, 0, lock_flags, &ip))) {
1160 *res = BULKSTAT_RV_NOTHING;
1161 return (error);
1165 * This inode can have blocks after eof which can get released
1166 * when we send it to inactive. Since we don't check the dquot
1167 * until the after all our calculations are done, we must get rid
1168 * of those now.
1170 if (! ipreleased) {
1171 xfs_iunlock(ip, lock_flags);
1172 IRELE(ip);
1173 ipreleased = B_TRUE;
1174 goto again;
1176 xfs_qm_internalqcheck_get_dquots(mp,
1177 (xfs_dqid_t) ip->i_d.di_uid,
1178 (xfs_dqid_t) ip->i_d.di_projid,
1179 (xfs_dqid_t) ip->i_d.di_gid,
1180 &ud, &gd);
1181 if (XFS_IS_UQUOTA_ON(mp)) {
1182 ASSERT(ud);
1183 xfs_qm_internalqcheck_dqadjust(ip, ud);
1185 if (XFS_IS_OQUOTA_ON(mp)) {
1186 ASSERT(gd);
1187 xfs_qm_internalqcheck_dqadjust(ip, gd);
1189 xfs_iunlock(ip, lock_flags);
1190 IRELE(ip);
1191 *res = BULKSTAT_RV_DIDONE;
1192 return (0);
1196 /* PRIVATE, debugging */
1198 xfs_qm_internalqcheck(
1199 xfs_mount_t *mp)
1201 xfs_ino_t lastino;
1202 int done, count;
1203 int i;
1204 int error;
1206 lastino = 0;
1207 qmtest_hashmask = 32;
1208 count = 5;
1209 done = 0;
1210 qmtest_nfails = 0;
1212 if (! XFS_IS_QUOTA_ON(mp))
1213 return XFS_ERROR(ESRCH);
1215 xfs_log_force(mp, XFS_LOG_SYNC);
1216 XFS_bflush(mp->m_ddev_targp);
1217 xfs_log_force(mp, XFS_LOG_SYNC);
1218 XFS_bflush(mp->m_ddev_targp);
1220 mutex_lock(&qcheck_lock);
1221 /* There should be absolutely no quota activity while this
1222 is going on. */
1223 qmtest_udqtab = kmem_zalloc(qmtest_hashmask *
1224 sizeof(xfs_dqhash_t), KM_SLEEP);
1225 qmtest_gdqtab = kmem_zalloc(qmtest_hashmask *
1226 sizeof(xfs_dqhash_t), KM_SLEEP);
1227 do {
1229 * Iterate thru all the inodes in the file system,
1230 * adjusting the corresponding dquot counters
1232 error = xfs_bulkstat(mp, &lastino, &count,
1233 xfs_qm_internalqcheck_adjust,
1234 0, NULL, &done);
1235 if (error) {
1236 cmn_err(CE_DEBUG, "Bulkstat returned error 0x%x", error);
1237 break;
1239 } while (!done);
1241 cmn_err(CE_DEBUG, "Checking results against system dquots");
1242 for (i = 0; i < qmtest_hashmask; i++) {
1243 xfs_dqtest_t *d, *n;
1244 xfs_dqhash_t *h;
1246 h = &qmtest_udqtab[i];
1247 list_for_each_entry_safe(d, n, &h->qh_list, q_hashlist) {
1248 xfs_dqtest_cmp(d);
1249 kmem_free(d);
1251 h = &qmtest_gdqtab[i];
1252 list_for_each_entry_safe(d, n, &h->qh_list, q_hashlist) {
1253 xfs_dqtest_cmp(d);
1254 kmem_free(d);
1258 if (qmtest_nfails) {
1259 cmn_err(CE_DEBUG, "******** quotacheck failed ********");
1260 cmn_err(CE_DEBUG, "failures = %d", qmtest_nfails);
1261 } else {
1262 cmn_err(CE_DEBUG, "******** quotacheck successful! ********");
1264 kmem_free(qmtest_udqtab);
1265 kmem_free(qmtest_gdqtab);
1266 mutex_unlock(&qcheck_lock);
1267 return (qmtest_nfails);
1270 #endif /* DEBUG */