Merge tag 'for-v3.9' of git://git.infradead.org/battery-2.6
[linux-2.6/btrfs-unstable.git] / fs / xfs / xfs_dquot.c
blob9e1bf5294c914a92dc12e2b31d9ab54644ae9a59
1 /*
2 * Copyright (c) 2000-2003 Silicon Graphics, Inc.
3 * All Rights Reserved.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 #include "xfs.h"
19 #include "xfs_fs.h"
20 #include "xfs_bit.h"
21 #include "xfs_log.h"
22 #include "xfs_trans.h"
23 #include "xfs_sb.h"
24 #include "xfs_ag.h"
25 #include "xfs_alloc.h"
26 #include "xfs_quota.h"
27 #include "xfs_mount.h"
28 #include "xfs_bmap_btree.h"
29 #include "xfs_inode.h"
30 #include "xfs_bmap.h"
31 #include "xfs_rtalloc.h"
32 #include "xfs_error.h"
33 #include "xfs_itable.h"
34 #include "xfs_attr.h"
35 #include "xfs_buf_item.h"
36 #include "xfs_trans_space.h"
37 #include "xfs_trans_priv.h"
38 #include "xfs_qm.h"
39 #include "xfs_trace.h"
42 * Lock order:
44 * ip->i_lock
45 * qi->qi_tree_lock
46 * dquot->q_qlock (xfs_dqlock() and friends)
47 * dquot->q_flush (xfs_dqflock() and friends)
48 * qi->qi_lru_lock
50 * If two dquots need to be locked the order is user before group/project,
51 * otherwise by the lowest id first, see xfs_dqlock2.
54 #ifdef DEBUG
55 xfs_buftarg_t *xfs_dqerror_target;
56 int xfs_do_dqerror;
57 int xfs_dqreq_num;
58 int xfs_dqerror_mod = 33;
59 #endif
61 struct kmem_zone *xfs_qm_dqtrxzone;
62 static struct kmem_zone *xfs_qm_dqzone;
64 static struct lock_class_key xfs_dquot_other_class;
67 * This is called to free all the memory associated with a dquot
69 void
70 xfs_qm_dqdestroy(
71 xfs_dquot_t *dqp)
73 ASSERT(list_empty(&dqp->q_lru));
75 mutex_destroy(&dqp->q_qlock);
76 kmem_zone_free(xfs_qm_dqzone, dqp);
78 XFS_STATS_DEC(xs_qm_dquot);
82 * If default limits are in force, push them into the dquot now.
83 * We overwrite the dquot limits only if they are zero and this
84 * is not the root dquot.
86 void
87 xfs_qm_adjust_dqlimits(
88 xfs_mount_t *mp,
89 xfs_disk_dquot_t *d)
91 xfs_quotainfo_t *q = mp->m_quotainfo;
93 ASSERT(d->d_id);
95 if (q->qi_bsoftlimit && !d->d_blk_softlimit)
96 d->d_blk_softlimit = cpu_to_be64(q->qi_bsoftlimit);
97 if (q->qi_bhardlimit && !d->d_blk_hardlimit)
98 d->d_blk_hardlimit = cpu_to_be64(q->qi_bhardlimit);
99 if (q->qi_isoftlimit && !d->d_ino_softlimit)
100 d->d_ino_softlimit = cpu_to_be64(q->qi_isoftlimit);
101 if (q->qi_ihardlimit && !d->d_ino_hardlimit)
102 d->d_ino_hardlimit = cpu_to_be64(q->qi_ihardlimit);
103 if (q->qi_rtbsoftlimit && !d->d_rtb_softlimit)
104 d->d_rtb_softlimit = cpu_to_be64(q->qi_rtbsoftlimit);
105 if (q->qi_rtbhardlimit && !d->d_rtb_hardlimit)
106 d->d_rtb_hardlimit = cpu_to_be64(q->qi_rtbhardlimit);
110 * Check the limits and timers of a dquot and start or reset timers
111 * if necessary.
112 * This gets called even when quota enforcement is OFF, which makes our
113 * life a little less complicated. (We just don't reject any quota
114 * reservations in that case, when enforcement is off).
115 * We also return 0 as the values of the timers in Q_GETQUOTA calls, when
116 * enforcement's off.
117 * In contrast, warnings are a little different in that they don't
118 * 'automatically' get started when limits get exceeded. They do
119 * get reset to zero, however, when we find the count to be under
120 * the soft limit (they are only ever set non-zero via userspace).
122 void
123 xfs_qm_adjust_dqtimers(
124 xfs_mount_t *mp,
125 xfs_disk_dquot_t *d)
127 ASSERT(d->d_id);
129 #ifdef DEBUG
130 if (d->d_blk_hardlimit)
131 ASSERT(be64_to_cpu(d->d_blk_softlimit) <=
132 be64_to_cpu(d->d_blk_hardlimit));
133 if (d->d_ino_hardlimit)
134 ASSERT(be64_to_cpu(d->d_ino_softlimit) <=
135 be64_to_cpu(d->d_ino_hardlimit));
136 if (d->d_rtb_hardlimit)
137 ASSERT(be64_to_cpu(d->d_rtb_softlimit) <=
138 be64_to_cpu(d->d_rtb_hardlimit));
139 #endif
141 if (!d->d_btimer) {
142 if ((d->d_blk_softlimit &&
143 (be64_to_cpu(d->d_bcount) >
144 be64_to_cpu(d->d_blk_softlimit))) ||
145 (d->d_blk_hardlimit &&
146 (be64_to_cpu(d->d_bcount) >
147 be64_to_cpu(d->d_blk_hardlimit)))) {
148 d->d_btimer = cpu_to_be32(get_seconds() +
149 mp->m_quotainfo->qi_btimelimit);
150 } else {
151 d->d_bwarns = 0;
153 } else {
154 if ((!d->d_blk_softlimit ||
155 (be64_to_cpu(d->d_bcount) <=
156 be64_to_cpu(d->d_blk_softlimit))) &&
157 (!d->d_blk_hardlimit ||
158 (be64_to_cpu(d->d_bcount) <=
159 be64_to_cpu(d->d_blk_hardlimit)))) {
160 d->d_btimer = 0;
164 if (!d->d_itimer) {
165 if ((d->d_ino_softlimit &&
166 (be64_to_cpu(d->d_icount) >
167 be64_to_cpu(d->d_ino_softlimit))) ||
168 (d->d_ino_hardlimit &&
169 (be64_to_cpu(d->d_icount) >
170 be64_to_cpu(d->d_ino_hardlimit)))) {
171 d->d_itimer = cpu_to_be32(get_seconds() +
172 mp->m_quotainfo->qi_itimelimit);
173 } else {
174 d->d_iwarns = 0;
176 } else {
177 if ((!d->d_ino_softlimit ||
178 (be64_to_cpu(d->d_icount) <=
179 be64_to_cpu(d->d_ino_softlimit))) &&
180 (!d->d_ino_hardlimit ||
181 (be64_to_cpu(d->d_icount) <=
182 be64_to_cpu(d->d_ino_hardlimit)))) {
183 d->d_itimer = 0;
187 if (!d->d_rtbtimer) {
188 if ((d->d_rtb_softlimit &&
189 (be64_to_cpu(d->d_rtbcount) >
190 be64_to_cpu(d->d_rtb_softlimit))) ||
191 (d->d_rtb_hardlimit &&
192 (be64_to_cpu(d->d_rtbcount) >
193 be64_to_cpu(d->d_rtb_hardlimit)))) {
194 d->d_rtbtimer = cpu_to_be32(get_seconds() +
195 mp->m_quotainfo->qi_rtbtimelimit);
196 } else {
197 d->d_rtbwarns = 0;
199 } else {
200 if ((!d->d_rtb_softlimit ||
201 (be64_to_cpu(d->d_rtbcount) <=
202 be64_to_cpu(d->d_rtb_softlimit))) &&
203 (!d->d_rtb_hardlimit ||
204 (be64_to_cpu(d->d_rtbcount) <=
205 be64_to_cpu(d->d_rtb_hardlimit)))) {
206 d->d_rtbtimer = 0;
212 * initialize a buffer full of dquots and log the whole thing
214 STATIC void
215 xfs_qm_init_dquot_blk(
216 xfs_trans_t *tp,
217 xfs_mount_t *mp,
218 xfs_dqid_t id,
219 uint type,
220 xfs_buf_t *bp)
222 struct xfs_quotainfo *q = mp->m_quotainfo;
223 xfs_dqblk_t *d;
224 int curid, i;
226 ASSERT(tp);
227 ASSERT(xfs_buf_islocked(bp));
229 d = bp->b_addr;
232 * ID of the first dquot in the block - id's are zero based.
234 curid = id - (id % q->qi_dqperchunk);
235 ASSERT(curid >= 0);
236 memset(d, 0, BBTOB(q->qi_dqchunklen));
237 for (i = 0; i < q->qi_dqperchunk; i++, d++, curid++) {
238 d->dd_diskdq.d_magic = cpu_to_be16(XFS_DQUOT_MAGIC);
239 d->dd_diskdq.d_version = XFS_DQUOT_VERSION;
240 d->dd_diskdq.d_id = cpu_to_be32(curid);
241 d->dd_diskdq.d_flags = type;
244 xfs_trans_dquot_buf(tp, bp,
245 (type & XFS_DQ_USER ? XFS_BLF_UDQUOT_BUF :
246 ((type & XFS_DQ_PROJ) ? XFS_BLF_PDQUOT_BUF :
247 XFS_BLF_GDQUOT_BUF)));
248 xfs_trans_log_buf(tp, bp, 0, BBTOB(q->qi_dqchunklen) - 1);
251 static void
252 xfs_dquot_buf_verify(
253 struct xfs_buf *bp)
255 struct xfs_mount *mp = bp->b_target->bt_mount;
256 struct xfs_dqblk *d = (struct xfs_dqblk *)bp->b_addr;
257 struct xfs_disk_dquot *ddq;
258 xfs_dqid_t id = 0;
259 int i;
262 * On the first read of the buffer, verify that each dquot is valid.
263 * We don't know what the id of the dquot is supposed to be, just that
264 * they should be increasing monotonically within the buffer. If the
265 * first id is corrupt, then it will fail on the second dquot in the
266 * buffer so corruptions could point to the wrong dquot in this case.
268 for (i = 0; i < mp->m_quotainfo->qi_dqperchunk; i++) {
269 int error;
271 ddq = &d[i].dd_diskdq;
273 if (i == 0)
274 id = be32_to_cpu(ddq->d_id);
276 error = xfs_qm_dqcheck(mp, ddq, id + i, 0, XFS_QMOPT_DOWARN,
277 "xfs_dquot_read_verify");
278 if (error) {
279 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, d);
280 xfs_buf_ioerror(bp, EFSCORRUPTED);
281 break;
286 static void
287 xfs_dquot_buf_read_verify(
288 struct xfs_buf *bp)
290 xfs_dquot_buf_verify(bp);
293 void
294 xfs_dquot_buf_write_verify(
295 struct xfs_buf *bp)
297 xfs_dquot_buf_verify(bp);
300 const struct xfs_buf_ops xfs_dquot_buf_ops = {
301 .verify_read = xfs_dquot_buf_read_verify,
302 .verify_write = xfs_dquot_buf_write_verify,
306 * Allocate a block and fill it with dquots.
307 * This is called when the bmapi finds a hole.
309 STATIC int
310 xfs_qm_dqalloc(
311 xfs_trans_t **tpp,
312 xfs_mount_t *mp,
313 xfs_dquot_t *dqp,
314 xfs_inode_t *quotip,
315 xfs_fileoff_t offset_fsb,
316 xfs_buf_t **O_bpp)
318 xfs_fsblock_t firstblock;
319 xfs_bmap_free_t flist;
320 xfs_bmbt_irec_t map;
321 int nmaps, error, committed;
322 xfs_buf_t *bp;
323 xfs_trans_t *tp = *tpp;
325 ASSERT(tp != NULL);
327 trace_xfs_dqalloc(dqp);
330 * Initialize the bmap freelist prior to calling bmapi code.
332 xfs_bmap_init(&flist, &firstblock);
333 xfs_ilock(quotip, XFS_ILOCK_EXCL);
335 * Return if this type of quotas is turned off while we didn't
336 * have an inode lock
338 if (!xfs_this_quota_on(dqp->q_mount, dqp->dq_flags)) {
339 xfs_iunlock(quotip, XFS_ILOCK_EXCL);
340 return (ESRCH);
343 xfs_trans_ijoin(tp, quotip, XFS_ILOCK_EXCL);
344 nmaps = 1;
345 error = xfs_bmapi_write(tp, quotip, offset_fsb,
346 XFS_DQUOT_CLUSTER_SIZE_FSB, XFS_BMAPI_METADATA,
347 &firstblock, XFS_QM_DQALLOC_SPACE_RES(mp),
348 &map, &nmaps, &flist);
349 if (error)
350 goto error0;
351 ASSERT(map.br_blockcount == XFS_DQUOT_CLUSTER_SIZE_FSB);
352 ASSERT(nmaps == 1);
353 ASSERT((map.br_startblock != DELAYSTARTBLOCK) &&
354 (map.br_startblock != HOLESTARTBLOCK));
357 * Keep track of the blkno to save a lookup later
359 dqp->q_blkno = XFS_FSB_TO_DADDR(mp, map.br_startblock);
361 /* now we can just get the buffer (there's nothing to read yet) */
362 bp = xfs_trans_get_buf(tp, mp->m_ddev_targp,
363 dqp->q_blkno,
364 mp->m_quotainfo->qi_dqchunklen,
367 error = xfs_buf_geterror(bp);
368 if (error)
369 goto error1;
370 bp->b_ops = &xfs_dquot_buf_ops;
373 * Make a chunk of dquots out of this buffer and log
374 * the entire thing.
376 xfs_qm_init_dquot_blk(tp, mp, be32_to_cpu(dqp->q_core.d_id),
377 dqp->dq_flags & XFS_DQ_ALLTYPES, bp);
380 * xfs_bmap_finish() may commit the current transaction and
381 * start a second transaction if the freelist is not empty.
383 * Since we still want to modify this buffer, we need to
384 * ensure that the buffer is not released on commit of
385 * the first transaction and ensure the buffer is added to the
386 * second transaction.
388 * If there is only one transaction then don't stop the buffer
389 * from being released when it commits later on.
392 xfs_trans_bhold(tp, bp);
394 if ((error = xfs_bmap_finish(tpp, &flist, &committed))) {
395 goto error1;
398 if (committed) {
399 tp = *tpp;
400 xfs_trans_bjoin(tp, bp);
401 } else {
402 xfs_trans_bhold_release(tp, bp);
405 *O_bpp = bp;
406 return 0;
408 error1:
409 xfs_bmap_cancel(&flist);
410 error0:
411 xfs_iunlock(quotip, XFS_ILOCK_EXCL);
413 return (error);
415 STATIC int
416 xfs_qm_dqrepair(
417 struct xfs_mount *mp,
418 struct xfs_trans *tp,
419 struct xfs_dquot *dqp,
420 xfs_dqid_t firstid,
421 struct xfs_buf **bpp)
423 int error;
424 struct xfs_disk_dquot *ddq;
425 struct xfs_dqblk *d;
426 int i;
429 * Read the buffer without verification so we get the corrupted
430 * buffer returned to us. make sure we verify it on write, though.
432 error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, dqp->q_blkno,
433 mp->m_quotainfo->qi_dqchunklen,
434 0, bpp, NULL);
436 if (error) {
437 ASSERT(*bpp == NULL);
438 return XFS_ERROR(error);
440 (*bpp)->b_ops = &xfs_dquot_buf_ops;
442 ASSERT(xfs_buf_islocked(*bpp));
443 d = (struct xfs_dqblk *)(*bpp)->b_addr;
445 /* Do the actual repair of dquots in this buffer */
446 for (i = 0; i < mp->m_quotainfo->qi_dqperchunk; i++) {
447 ddq = &d[i].dd_diskdq;
448 error = xfs_qm_dqcheck(mp, ddq, firstid + i,
449 dqp->dq_flags & XFS_DQ_ALLTYPES,
450 XFS_QMOPT_DQREPAIR, "xfs_qm_dqrepair");
451 if (error) {
452 /* repair failed, we're screwed */
453 xfs_trans_brelse(tp, *bpp);
454 return XFS_ERROR(EIO);
458 return 0;
462 * Maps a dquot to the buffer containing its on-disk version.
463 * This returns a ptr to the buffer containing the on-disk dquot
464 * in the bpp param, and a ptr to the on-disk dquot within that buffer
466 STATIC int
467 xfs_qm_dqtobp(
468 xfs_trans_t **tpp,
469 xfs_dquot_t *dqp,
470 xfs_disk_dquot_t **O_ddpp,
471 xfs_buf_t **O_bpp,
472 uint flags)
474 xfs_bmbt_irec_t map;
475 int nmaps = 1, error;
476 xfs_buf_t *bp;
477 xfs_inode_t *quotip = XFS_DQ_TO_QIP(dqp);
478 xfs_mount_t *mp = dqp->q_mount;
479 xfs_dqid_t id = be32_to_cpu(dqp->q_core.d_id);
480 xfs_trans_t *tp = (tpp ? *tpp : NULL);
482 dqp->q_fileoffset = (xfs_fileoff_t)id / mp->m_quotainfo->qi_dqperchunk;
484 xfs_ilock(quotip, XFS_ILOCK_SHARED);
485 if (!xfs_this_quota_on(dqp->q_mount, dqp->dq_flags)) {
487 * Return if this type of quotas is turned off while we
488 * didn't have the quota inode lock.
490 xfs_iunlock(quotip, XFS_ILOCK_SHARED);
491 return ESRCH;
495 * Find the block map; no allocations yet
497 error = xfs_bmapi_read(quotip, dqp->q_fileoffset,
498 XFS_DQUOT_CLUSTER_SIZE_FSB, &map, &nmaps, 0);
500 xfs_iunlock(quotip, XFS_ILOCK_SHARED);
501 if (error)
502 return error;
504 ASSERT(nmaps == 1);
505 ASSERT(map.br_blockcount == 1);
508 * Offset of dquot in the (fixed sized) dquot chunk.
510 dqp->q_bufoffset = (id % mp->m_quotainfo->qi_dqperchunk) *
511 sizeof(xfs_dqblk_t);
513 ASSERT(map.br_startblock != DELAYSTARTBLOCK);
514 if (map.br_startblock == HOLESTARTBLOCK) {
516 * We don't allocate unless we're asked to
518 if (!(flags & XFS_QMOPT_DQALLOC))
519 return ENOENT;
521 ASSERT(tp);
522 error = xfs_qm_dqalloc(tpp, mp, dqp, quotip,
523 dqp->q_fileoffset, &bp);
524 if (error)
525 return error;
526 tp = *tpp;
527 } else {
528 trace_xfs_dqtobp_read(dqp);
531 * store the blkno etc so that we don't have to do the
532 * mapping all the time
534 dqp->q_blkno = XFS_FSB_TO_DADDR(mp, map.br_startblock);
536 error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp,
537 dqp->q_blkno,
538 mp->m_quotainfo->qi_dqchunklen,
539 0, &bp, &xfs_dquot_buf_ops);
541 if (error == EFSCORRUPTED && (flags & XFS_QMOPT_DQREPAIR)) {
542 xfs_dqid_t firstid = (xfs_dqid_t)map.br_startoff *
543 mp->m_quotainfo->qi_dqperchunk;
544 ASSERT(bp == NULL);
545 error = xfs_qm_dqrepair(mp, tp, dqp, firstid, &bp);
548 if (error) {
549 ASSERT(bp == NULL);
550 return XFS_ERROR(error);
554 ASSERT(xfs_buf_islocked(bp));
555 *O_bpp = bp;
556 *O_ddpp = bp->b_addr + dqp->q_bufoffset;
558 return (0);
563 * Read in the ondisk dquot using dqtobp() then copy it to an incore version,
564 * and release the buffer immediately.
566 * If XFS_QMOPT_DQALLOC is set, allocate a dquot on disk if it needed.
569 xfs_qm_dqread(
570 struct xfs_mount *mp,
571 xfs_dqid_t id,
572 uint type,
573 uint flags,
574 struct xfs_dquot **O_dqpp)
576 struct xfs_dquot *dqp;
577 struct xfs_disk_dquot *ddqp;
578 struct xfs_buf *bp;
579 struct xfs_trans *tp = NULL;
580 int error;
581 int cancelflags = 0;
584 dqp = kmem_zone_zalloc(xfs_qm_dqzone, KM_SLEEP);
586 dqp->dq_flags = type;
587 dqp->q_core.d_id = cpu_to_be32(id);
588 dqp->q_mount = mp;
589 INIT_LIST_HEAD(&dqp->q_lru);
590 mutex_init(&dqp->q_qlock);
591 init_waitqueue_head(&dqp->q_pinwait);
594 * Because we want to use a counting completion, complete
595 * the flush completion once to allow a single access to
596 * the flush completion without blocking.
598 init_completion(&dqp->q_flush);
599 complete(&dqp->q_flush);
602 * Make sure group quotas have a different lock class than user
603 * quotas.
605 if (!(type & XFS_DQ_USER))
606 lockdep_set_class(&dqp->q_qlock, &xfs_dquot_other_class);
608 XFS_STATS_INC(xs_qm_dquot);
610 trace_xfs_dqread(dqp);
612 if (flags & XFS_QMOPT_DQALLOC) {
613 tp = xfs_trans_alloc(mp, XFS_TRANS_QM_DQALLOC);
614 error = xfs_trans_reserve(tp, XFS_QM_DQALLOC_SPACE_RES(mp),
615 XFS_WRITE_LOG_RES(mp) +
617 * Round the chunklen up to the next multiple
618 * of 128 (buf log item chunk size)).
620 BBTOB(mp->m_quotainfo->qi_dqchunklen) - 1 + 128,
622 XFS_TRANS_PERM_LOG_RES,
623 XFS_WRITE_LOG_COUNT);
624 if (error)
625 goto error1;
626 cancelflags = XFS_TRANS_RELEASE_LOG_RES;
630 * get a pointer to the on-disk dquot and the buffer containing it
631 * dqp already knows its own type (GROUP/USER).
633 error = xfs_qm_dqtobp(&tp, dqp, &ddqp, &bp, flags);
634 if (error) {
636 * This can happen if quotas got turned off (ESRCH),
637 * or if the dquot didn't exist on disk and we ask to
638 * allocate (ENOENT).
640 trace_xfs_dqread_fail(dqp);
641 cancelflags |= XFS_TRANS_ABORT;
642 goto error1;
645 /* copy everything from disk dquot to the incore dquot */
646 memcpy(&dqp->q_core, ddqp, sizeof(xfs_disk_dquot_t));
647 xfs_qm_dquot_logitem_init(dqp);
650 * Reservation counters are defined as reservation plus current usage
651 * to avoid having to add every time.
653 dqp->q_res_bcount = be64_to_cpu(ddqp->d_bcount);
654 dqp->q_res_icount = be64_to_cpu(ddqp->d_icount);
655 dqp->q_res_rtbcount = be64_to_cpu(ddqp->d_rtbcount);
657 /* Mark the buf so that this will stay incore a little longer */
658 xfs_buf_set_ref(bp, XFS_DQUOT_REF);
661 * We got the buffer with a xfs_trans_read_buf() (in dqtobp())
662 * So we need to release with xfs_trans_brelse().
663 * The strategy here is identical to that of inodes; we lock
664 * the dquot in xfs_qm_dqget() before making it accessible to
665 * others. This is because dquots, like inodes, need a good level of
666 * concurrency, and we don't want to take locks on the entire buffers
667 * for dquot accesses.
668 * Note also that the dquot buffer may even be dirty at this point, if
669 * this particular dquot was repaired. We still aren't afraid to
670 * brelse it because we have the changes incore.
672 ASSERT(xfs_buf_islocked(bp));
673 xfs_trans_brelse(tp, bp);
675 if (tp) {
676 error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
677 if (error)
678 goto error0;
681 *O_dqpp = dqp;
682 return error;
684 error1:
685 if (tp)
686 xfs_trans_cancel(tp, cancelflags);
687 error0:
688 xfs_qm_dqdestroy(dqp);
689 *O_dqpp = NULL;
690 return error;
694 * Given the file system, inode OR id, and type (UDQUOT/GDQUOT), return a
695 * a locked dquot, doing an allocation (if requested) as needed.
696 * When both an inode and an id are given, the inode's id takes precedence.
697 * That is, if the id changes while we don't hold the ilock inside this
698 * function, the new dquot is returned, not necessarily the one requested
699 * in the id argument.
702 xfs_qm_dqget(
703 xfs_mount_t *mp,
704 xfs_inode_t *ip, /* locked inode (optional) */
705 xfs_dqid_t id, /* uid/projid/gid depending on type */
706 uint type, /* XFS_DQ_USER/XFS_DQ_PROJ/XFS_DQ_GROUP */
707 uint flags, /* DQALLOC, DQSUSER, DQREPAIR, DOWARN */
708 xfs_dquot_t **O_dqpp) /* OUT : locked incore dquot */
710 struct xfs_quotainfo *qi = mp->m_quotainfo;
711 struct radix_tree_root *tree = XFS_DQUOT_TREE(qi, type);
712 struct xfs_dquot *dqp;
713 int error;
715 ASSERT(XFS_IS_QUOTA_RUNNING(mp));
716 if ((! XFS_IS_UQUOTA_ON(mp) && type == XFS_DQ_USER) ||
717 (! XFS_IS_PQUOTA_ON(mp) && type == XFS_DQ_PROJ) ||
718 (! XFS_IS_GQUOTA_ON(mp) && type == XFS_DQ_GROUP)) {
719 return (ESRCH);
722 #ifdef DEBUG
723 if (xfs_do_dqerror) {
724 if ((xfs_dqerror_target == mp->m_ddev_targp) &&
725 (xfs_dqreq_num++ % xfs_dqerror_mod) == 0) {
726 xfs_debug(mp, "Returning error in dqget");
727 return (EIO);
731 ASSERT(type == XFS_DQ_USER ||
732 type == XFS_DQ_PROJ ||
733 type == XFS_DQ_GROUP);
734 if (ip) {
735 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
736 ASSERT(xfs_inode_dquot(ip, type) == NULL);
738 #endif
740 restart:
741 mutex_lock(&qi->qi_tree_lock);
742 dqp = radix_tree_lookup(tree, id);
743 if (dqp) {
744 xfs_dqlock(dqp);
745 if (dqp->dq_flags & XFS_DQ_FREEING) {
746 xfs_dqunlock(dqp);
747 mutex_unlock(&qi->qi_tree_lock);
748 trace_xfs_dqget_freeing(dqp);
749 delay(1);
750 goto restart;
753 dqp->q_nrefs++;
754 mutex_unlock(&qi->qi_tree_lock);
756 trace_xfs_dqget_hit(dqp);
757 XFS_STATS_INC(xs_qm_dqcachehits);
758 *O_dqpp = dqp;
759 return 0;
761 mutex_unlock(&qi->qi_tree_lock);
762 XFS_STATS_INC(xs_qm_dqcachemisses);
765 * Dquot cache miss. We don't want to keep the inode lock across
766 * a (potential) disk read. Also we don't want to deal with the lock
767 * ordering between quotainode and this inode. OTOH, dropping the inode
768 * lock here means dealing with a chown that can happen before
769 * we re-acquire the lock.
771 if (ip)
772 xfs_iunlock(ip, XFS_ILOCK_EXCL);
774 error = xfs_qm_dqread(mp, id, type, flags, &dqp);
776 if (ip)
777 xfs_ilock(ip, XFS_ILOCK_EXCL);
779 if (error)
780 return error;
782 if (ip) {
784 * A dquot could be attached to this inode by now, since
785 * we had dropped the ilock.
787 if (xfs_this_quota_on(mp, type)) {
788 struct xfs_dquot *dqp1;
790 dqp1 = xfs_inode_dquot(ip, type);
791 if (dqp1) {
792 xfs_qm_dqdestroy(dqp);
793 dqp = dqp1;
794 xfs_dqlock(dqp);
795 goto dqret;
797 } else {
798 /* inode stays locked on return */
799 xfs_qm_dqdestroy(dqp);
800 return XFS_ERROR(ESRCH);
804 mutex_lock(&qi->qi_tree_lock);
805 error = -radix_tree_insert(tree, id, dqp);
806 if (unlikely(error)) {
807 WARN_ON(error != EEXIST);
810 * Duplicate found. Just throw away the new dquot and start
811 * over.
813 mutex_unlock(&qi->qi_tree_lock);
814 trace_xfs_dqget_dup(dqp);
815 xfs_qm_dqdestroy(dqp);
816 XFS_STATS_INC(xs_qm_dquot_dups);
817 goto restart;
821 * We return a locked dquot to the caller, with a reference taken
823 xfs_dqlock(dqp);
824 dqp->q_nrefs = 1;
826 qi->qi_dquots++;
827 mutex_unlock(&qi->qi_tree_lock);
829 dqret:
830 ASSERT((ip == NULL) || xfs_isilocked(ip, XFS_ILOCK_EXCL));
831 trace_xfs_dqget_miss(dqp);
832 *O_dqpp = dqp;
833 return (0);
837 STATIC void
838 xfs_qm_dqput_final(
839 struct xfs_dquot *dqp)
841 struct xfs_quotainfo *qi = dqp->q_mount->m_quotainfo;
842 struct xfs_dquot *gdqp;
844 trace_xfs_dqput_free(dqp);
846 mutex_lock(&qi->qi_lru_lock);
847 if (list_empty(&dqp->q_lru)) {
848 list_add_tail(&dqp->q_lru, &qi->qi_lru_list);
849 qi->qi_lru_count++;
850 XFS_STATS_INC(xs_qm_dquot_unused);
852 mutex_unlock(&qi->qi_lru_lock);
855 * If we just added a udquot to the freelist, then we want to release
856 * the gdquot reference that it (probably) has. Otherwise it'll keep
857 * the gdquot from getting reclaimed.
859 gdqp = dqp->q_gdquot;
860 if (gdqp) {
861 xfs_dqlock(gdqp);
862 dqp->q_gdquot = NULL;
864 xfs_dqunlock(dqp);
867 * If we had a group quota hint, release it now.
869 if (gdqp)
870 xfs_qm_dqput(gdqp);
874 * Release a reference to the dquot (decrement ref-count) and unlock it.
876 * If there is a group quota attached to this dquot, carefully release that
877 * too without tripping over deadlocks'n'stuff.
879 void
880 xfs_qm_dqput(
881 struct xfs_dquot *dqp)
883 ASSERT(dqp->q_nrefs > 0);
884 ASSERT(XFS_DQ_IS_LOCKED(dqp));
886 trace_xfs_dqput(dqp);
888 if (--dqp->q_nrefs > 0)
889 xfs_dqunlock(dqp);
890 else
891 xfs_qm_dqput_final(dqp);
895 * Release a dquot. Flush it if dirty, then dqput() it.
896 * dquot must not be locked.
898 void
899 xfs_qm_dqrele(
900 xfs_dquot_t *dqp)
902 if (!dqp)
903 return;
905 trace_xfs_dqrele(dqp);
907 xfs_dqlock(dqp);
909 * We don't care to flush it if the dquot is dirty here.
910 * That will create stutters that we want to avoid.
911 * Instead we do a delayed write when we try to reclaim
912 * a dirty dquot. Also xfs_sync will take part of the burden...
914 xfs_qm_dqput(dqp);
918 * This is the dquot flushing I/O completion routine. It is called
919 * from interrupt level when the buffer containing the dquot is
920 * flushed to disk. It is responsible for removing the dquot logitem
921 * from the AIL if it has not been re-logged, and unlocking the dquot's
922 * flush lock. This behavior is very similar to that of inodes..
924 STATIC void
925 xfs_qm_dqflush_done(
926 struct xfs_buf *bp,
927 struct xfs_log_item *lip)
929 xfs_dq_logitem_t *qip = (struct xfs_dq_logitem *)lip;
930 xfs_dquot_t *dqp = qip->qli_dquot;
931 struct xfs_ail *ailp = lip->li_ailp;
934 * We only want to pull the item from the AIL if its
935 * location in the log has not changed since we started the flush.
936 * Thus, we only bother if the dquot's lsn has
937 * not changed. First we check the lsn outside the lock
938 * since it's cheaper, and then we recheck while
939 * holding the lock before removing the dquot from the AIL.
941 if ((lip->li_flags & XFS_LI_IN_AIL) &&
942 lip->li_lsn == qip->qli_flush_lsn) {
944 /* xfs_trans_ail_delete() drops the AIL lock. */
945 spin_lock(&ailp->xa_lock);
946 if (lip->li_lsn == qip->qli_flush_lsn)
947 xfs_trans_ail_delete(ailp, lip, SHUTDOWN_CORRUPT_INCORE);
948 else
949 spin_unlock(&ailp->xa_lock);
953 * Release the dq's flush lock since we're done with it.
955 xfs_dqfunlock(dqp);
959 * Write a modified dquot to disk.
960 * The dquot must be locked and the flush lock too taken by caller.
961 * The flush lock will not be unlocked until the dquot reaches the disk,
962 * but the dquot is free to be unlocked and modified by the caller
963 * in the interim. Dquot is still locked on return. This behavior is
964 * identical to that of inodes.
967 xfs_qm_dqflush(
968 struct xfs_dquot *dqp,
969 struct xfs_buf **bpp)
971 struct xfs_mount *mp = dqp->q_mount;
972 struct xfs_buf *bp;
973 struct xfs_disk_dquot *ddqp;
974 int error;
976 ASSERT(XFS_DQ_IS_LOCKED(dqp));
977 ASSERT(!completion_done(&dqp->q_flush));
979 trace_xfs_dqflush(dqp);
981 *bpp = NULL;
983 xfs_qm_dqunpin_wait(dqp);
986 * This may have been unpinned because the filesystem is shutting
987 * down forcibly. If that's the case we must not write this dquot
988 * to disk, because the log record didn't make it to disk.
990 * We also have to remove the log item from the AIL in this case,
991 * as we wait for an emptry AIL as part of the unmount process.
993 if (XFS_FORCED_SHUTDOWN(mp)) {
994 struct xfs_log_item *lip = &dqp->q_logitem.qli_item;
995 dqp->dq_flags &= ~XFS_DQ_DIRTY;
997 spin_lock(&mp->m_ail->xa_lock);
998 if (lip->li_flags & XFS_LI_IN_AIL)
999 xfs_trans_ail_delete(mp->m_ail, lip,
1000 SHUTDOWN_CORRUPT_INCORE);
1001 else
1002 spin_unlock(&mp->m_ail->xa_lock);
1003 error = XFS_ERROR(EIO);
1004 goto out_unlock;
1008 * Get the buffer containing the on-disk dquot
1010 error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, dqp->q_blkno,
1011 mp->m_quotainfo->qi_dqchunklen, 0, &bp, NULL);
1012 if (error)
1013 goto out_unlock;
1016 * Calculate the location of the dquot inside the buffer.
1018 ddqp = bp->b_addr + dqp->q_bufoffset;
1021 * A simple sanity check in case we got a corrupted dquot..
1023 error = xfs_qm_dqcheck(mp, &dqp->q_core, be32_to_cpu(ddqp->d_id), 0,
1024 XFS_QMOPT_DOWARN, "dqflush (incore copy)");
1025 if (error) {
1026 xfs_buf_relse(bp);
1027 xfs_dqfunlock(dqp);
1028 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1029 return XFS_ERROR(EIO);
1032 /* This is the only portion of data that needs to persist */
1033 memcpy(ddqp, &dqp->q_core, sizeof(xfs_disk_dquot_t));
1036 * Clear the dirty field and remember the flush lsn for later use.
1038 dqp->dq_flags &= ~XFS_DQ_DIRTY;
1040 xfs_trans_ail_copy_lsn(mp->m_ail, &dqp->q_logitem.qli_flush_lsn,
1041 &dqp->q_logitem.qli_item.li_lsn);
1044 * Attach an iodone routine so that we can remove this dquot from the
1045 * AIL and release the flush lock once the dquot is synced to disk.
1047 xfs_buf_attach_iodone(bp, xfs_qm_dqflush_done,
1048 &dqp->q_logitem.qli_item);
1051 * If the buffer is pinned then push on the log so we won't
1052 * get stuck waiting in the write for too long.
1054 if (xfs_buf_ispinned(bp)) {
1055 trace_xfs_dqflush_force(dqp);
1056 xfs_log_force(mp, 0);
1059 trace_xfs_dqflush_done(dqp);
1060 *bpp = bp;
1061 return 0;
1063 out_unlock:
1064 xfs_dqfunlock(dqp);
1065 return XFS_ERROR(EIO);
1069 * Lock two xfs_dquot structures.
1071 * To avoid deadlocks we always lock the quota structure with
1072 * the lowerd id first.
1074 void
1075 xfs_dqlock2(
1076 xfs_dquot_t *d1,
1077 xfs_dquot_t *d2)
1079 if (d1 && d2) {
1080 ASSERT(d1 != d2);
1081 if (be32_to_cpu(d1->q_core.d_id) >
1082 be32_to_cpu(d2->q_core.d_id)) {
1083 mutex_lock(&d2->q_qlock);
1084 mutex_lock_nested(&d1->q_qlock, XFS_QLOCK_NESTED);
1085 } else {
1086 mutex_lock(&d1->q_qlock);
1087 mutex_lock_nested(&d2->q_qlock, XFS_QLOCK_NESTED);
1089 } else if (d1) {
1090 mutex_lock(&d1->q_qlock);
1091 } else if (d2) {
1092 mutex_lock(&d2->q_qlock);
1096 int __init
1097 xfs_qm_init(void)
1099 xfs_qm_dqzone =
1100 kmem_zone_init(sizeof(struct xfs_dquot), "xfs_dquot");
1101 if (!xfs_qm_dqzone)
1102 goto out;
1104 xfs_qm_dqtrxzone =
1105 kmem_zone_init(sizeof(struct xfs_dquot_acct), "xfs_dqtrx");
1106 if (!xfs_qm_dqtrxzone)
1107 goto out_free_dqzone;
1109 return 0;
1111 out_free_dqzone:
1112 kmem_zone_destroy(xfs_qm_dqzone);
1113 out:
1114 return -ENOMEM;
1117 void
1118 xfs_qm_exit(void)
1120 kmem_zone_destroy(xfs_qm_dqtrxzone);
1121 kmem_zone_destroy(xfs_qm_dqzone);