memcg: always create memsw files if CONFIG_CGROUP_MEM_RES_CTLR_SWAP
[linux-2.6.git] / fs / xfs / xfs_dquot.c
blob1155208fa830f2953ca0e1019f59b1b9379af7e2
1 /*
2 * Copyright (c) 2000-2003 Silicon Graphics, Inc.
3 * All Rights Reserved.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 #include "xfs.h"
19 #include "xfs_fs.h"
20 #include "xfs_bit.h"
21 #include "xfs_log.h"
22 #include "xfs_inum.h"
23 #include "xfs_trans.h"
24 #include "xfs_sb.h"
25 #include "xfs_ag.h"
26 #include "xfs_alloc.h"
27 #include "xfs_quota.h"
28 #include "xfs_mount.h"
29 #include "xfs_bmap_btree.h"
30 #include "xfs_inode.h"
31 #include "xfs_bmap.h"
32 #include "xfs_rtalloc.h"
33 #include "xfs_error.h"
34 #include "xfs_itable.h"
35 #include "xfs_attr.h"
36 #include "xfs_buf_item.h"
37 #include "xfs_trans_space.h"
38 #include "xfs_trans_priv.h"
39 #include "xfs_qm.h"
40 #include "xfs_trace.h"
43 * Lock order:
45 * ip->i_lock
46 * qi->qi_tree_lock
47 * dquot->q_qlock (xfs_dqlock() and friends)
48 * dquot->q_flush (xfs_dqflock() and friends)
49 * qi->qi_lru_lock
51 * If two dquots need to be locked the order is user before group/project,
52 * otherwise by the lowest id first, see xfs_dqlock2.
55 #ifdef DEBUG
56 xfs_buftarg_t *xfs_dqerror_target;
57 int xfs_do_dqerror;
58 int xfs_dqreq_num;
59 int xfs_dqerror_mod = 33;
60 #endif
62 struct kmem_zone *xfs_qm_dqtrxzone;
63 static struct kmem_zone *xfs_qm_dqzone;
65 static struct lock_class_key xfs_dquot_other_class;
68 * This is called to free all the memory associated with a dquot
70 void
71 xfs_qm_dqdestroy(
72 xfs_dquot_t *dqp)
74 ASSERT(list_empty(&dqp->q_lru));
76 mutex_destroy(&dqp->q_qlock);
77 kmem_zone_free(xfs_qm_dqzone, dqp);
79 XFS_STATS_DEC(xs_qm_dquot);
83 * If default limits are in force, push them into the dquot now.
84 * We overwrite the dquot limits only if they are zero and this
85 * is not the root dquot.
87 void
88 xfs_qm_adjust_dqlimits(
89 xfs_mount_t *mp,
90 xfs_disk_dquot_t *d)
92 xfs_quotainfo_t *q = mp->m_quotainfo;
94 ASSERT(d->d_id);
96 if (q->qi_bsoftlimit && !d->d_blk_softlimit)
97 d->d_blk_softlimit = cpu_to_be64(q->qi_bsoftlimit);
98 if (q->qi_bhardlimit && !d->d_blk_hardlimit)
99 d->d_blk_hardlimit = cpu_to_be64(q->qi_bhardlimit);
100 if (q->qi_isoftlimit && !d->d_ino_softlimit)
101 d->d_ino_softlimit = cpu_to_be64(q->qi_isoftlimit);
102 if (q->qi_ihardlimit && !d->d_ino_hardlimit)
103 d->d_ino_hardlimit = cpu_to_be64(q->qi_ihardlimit);
104 if (q->qi_rtbsoftlimit && !d->d_rtb_softlimit)
105 d->d_rtb_softlimit = cpu_to_be64(q->qi_rtbsoftlimit);
106 if (q->qi_rtbhardlimit && !d->d_rtb_hardlimit)
107 d->d_rtb_hardlimit = cpu_to_be64(q->qi_rtbhardlimit);
111 * Check the limits and timers of a dquot and start or reset timers
112 * if necessary.
113 * This gets called even when quota enforcement is OFF, which makes our
114 * life a little less complicated. (We just don't reject any quota
115 * reservations in that case, when enforcement is off).
116 * We also return 0 as the values of the timers in Q_GETQUOTA calls, when
117 * enforcement's off.
118 * In contrast, warnings are a little different in that they don't
119 * 'automatically' get started when limits get exceeded. They do
120 * get reset to zero, however, when we find the count to be under
121 * the soft limit (they are only ever set non-zero via userspace).
123 void
124 xfs_qm_adjust_dqtimers(
125 xfs_mount_t *mp,
126 xfs_disk_dquot_t *d)
128 ASSERT(d->d_id);
130 #ifdef DEBUG
131 if (d->d_blk_hardlimit)
132 ASSERT(be64_to_cpu(d->d_blk_softlimit) <=
133 be64_to_cpu(d->d_blk_hardlimit));
134 if (d->d_ino_hardlimit)
135 ASSERT(be64_to_cpu(d->d_ino_softlimit) <=
136 be64_to_cpu(d->d_ino_hardlimit));
137 if (d->d_rtb_hardlimit)
138 ASSERT(be64_to_cpu(d->d_rtb_softlimit) <=
139 be64_to_cpu(d->d_rtb_hardlimit));
140 #endif
142 if (!d->d_btimer) {
143 if ((d->d_blk_softlimit &&
144 (be64_to_cpu(d->d_bcount) >
145 be64_to_cpu(d->d_blk_softlimit))) ||
146 (d->d_blk_hardlimit &&
147 (be64_to_cpu(d->d_bcount) >
148 be64_to_cpu(d->d_blk_hardlimit)))) {
149 d->d_btimer = cpu_to_be32(get_seconds() +
150 mp->m_quotainfo->qi_btimelimit);
151 } else {
152 d->d_bwarns = 0;
154 } else {
155 if ((!d->d_blk_softlimit ||
156 (be64_to_cpu(d->d_bcount) <=
157 be64_to_cpu(d->d_blk_softlimit))) &&
158 (!d->d_blk_hardlimit ||
159 (be64_to_cpu(d->d_bcount) <=
160 be64_to_cpu(d->d_blk_hardlimit)))) {
161 d->d_btimer = 0;
165 if (!d->d_itimer) {
166 if ((d->d_ino_softlimit &&
167 (be64_to_cpu(d->d_icount) >
168 be64_to_cpu(d->d_ino_softlimit))) ||
169 (d->d_ino_hardlimit &&
170 (be64_to_cpu(d->d_icount) >
171 be64_to_cpu(d->d_ino_hardlimit)))) {
172 d->d_itimer = cpu_to_be32(get_seconds() +
173 mp->m_quotainfo->qi_itimelimit);
174 } else {
175 d->d_iwarns = 0;
177 } else {
178 if ((!d->d_ino_softlimit ||
179 (be64_to_cpu(d->d_icount) <=
180 be64_to_cpu(d->d_ino_softlimit))) &&
181 (!d->d_ino_hardlimit ||
182 (be64_to_cpu(d->d_icount) <=
183 be64_to_cpu(d->d_ino_hardlimit)))) {
184 d->d_itimer = 0;
188 if (!d->d_rtbtimer) {
189 if ((d->d_rtb_softlimit &&
190 (be64_to_cpu(d->d_rtbcount) >
191 be64_to_cpu(d->d_rtb_softlimit))) ||
192 (d->d_rtb_hardlimit &&
193 (be64_to_cpu(d->d_rtbcount) >
194 be64_to_cpu(d->d_rtb_hardlimit)))) {
195 d->d_rtbtimer = cpu_to_be32(get_seconds() +
196 mp->m_quotainfo->qi_rtbtimelimit);
197 } else {
198 d->d_rtbwarns = 0;
200 } else {
201 if ((!d->d_rtb_softlimit ||
202 (be64_to_cpu(d->d_rtbcount) <=
203 be64_to_cpu(d->d_rtb_softlimit))) &&
204 (!d->d_rtb_hardlimit ||
205 (be64_to_cpu(d->d_rtbcount) <=
206 be64_to_cpu(d->d_rtb_hardlimit)))) {
207 d->d_rtbtimer = 0;
213 * initialize a buffer full of dquots and log the whole thing
215 STATIC void
216 xfs_qm_init_dquot_blk(
217 xfs_trans_t *tp,
218 xfs_mount_t *mp,
219 xfs_dqid_t id,
220 uint type,
221 xfs_buf_t *bp)
223 struct xfs_quotainfo *q = mp->m_quotainfo;
224 xfs_dqblk_t *d;
225 int curid, i;
227 ASSERT(tp);
228 ASSERT(xfs_buf_islocked(bp));
230 d = bp->b_addr;
233 * ID of the first dquot in the block - id's are zero based.
235 curid = id - (id % q->qi_dqperchunk);
236 ASSERT(curid >= 0);
237 memset(d, 0, BBTOB(q->qi_dqchunklen));
238 for (i = 0; i < q->qi_dqperchunk; i++, d++, curid++) {
239 d->dd_diskdq.d_magic = cpu_to_be16(XFS_DQUOT_MAGIC);
240 d->dd_diskdq.d_version = XFS_DQUOT_VERSION;
241 d->dd_diskdq.d_id = cpu_to_be32(curid);
242 d->dd_diskdq.d_flags = type;
245 xfs_trans_dquot_buf(tp, bp,
246 (type & XFS_DQ_USER ? XFS_BLF_UDQUOT_BUF :
247 ((type & XFS_DQ_PROJ) ? XFS_BLF_PDQUOT_BUF :
248 XFS_BLF_GDQUOT_BUF)));
249 xfs_trans_log_buf(tp, bp, 0, BBTOB(q->qi_dqchunklen) - 1);
255 * Allocate a block and fill it with dquots.
256 * This is called when the bmapi finds a hole.
258 STATIC int
259 xfs_qm_dqalloc(
260 xfs_trans_t **tpp,
261 xfs_mount_t *mp,
262 xfs_dquot_t *dqp,
263 xfs_inode_t *quotip,
264 xfs_fileoff_t offset_fsb,
265 xfs_buf_t **O_bpp)
267 xfs_fsblock_t firstblock;
268 xfs_bmap_free_t flist;
269 xfs_bmbt_irec_t map;
270 int nmaps, error, committed;
271 xfs_buf_t *bp;
272 xfs_trans_t *tp = *tpp;
274 ASSERT(tp != NULL);
276 trace_xfs_dqalloc(dqp);
279 * Initialize the bmap freelist prior to calling bmapi code.
281 xfs_bmap_init(&flist, &firstblock);
282 xfs_ilock(quotip, XFS_ILOCK_EXCL);
284 * Return if this type of quotas is turned off while we didn't
285 * have an inode lock
287 if (!xfs_this_quota_on(dqp->q_mount, dqp->dq_flags)) {
288 xfs_iunlock(quotip, XFS_ILOCK_EXCL);
289 return (ESRCH);
292 xfs_trans_ijoin(tp, quotip, XFS_ILOCK_EXCL);
293 nmaps = 1;
294 error = xfs_bmapi_write(tp, quotip, offset_fsb,
295 XFS_DQUOT_CLUSTER_SIZE_FSB, XFS_BMAPI_METADATA,
296 &firstblock, XFS_QM_DQALLOC_SPACE_RES(mp),
297 &map, &nmaps, &flist);
298 if (error)
299 goto error0;
300 ASSERT(map.br_blockcount == XFS_DQUOT_CLUSTER_SIZE_FSB);
301 ASSERT(nmaps == 1);
302 ASSERT((map.br_startblock != DELAYSTARTBLOCK) &&
303 (map.br_startblock != HOLESTARTBLOCK));
306 * Keep track of the blkno to save a lookup later
308 dqp->q_blkno = XFS_FSB_TO_DADDR(mp, map.br_startblock);
310 /* now we can just get the buffer (there's nothing to read yet) */
311 bp = xfs_trans_get_buf(tp, mp->m_ddev_targp,
312 dqp->q_blkno,
313 mp->m_quotainfo->qi_dqchunklen,
316 error = xfs_buf_geterror(bp);
317 if (error)
318 goto error1;
321 * Make a chunk of dquots out of this buffer and log
322 * the entire thing.
324 xfs_qm_init_dquot_blk(tp, mp, be32_to_cpu(dqp->q_core.d_id),
325 dqp->dq_flags & XFS_DQ_ALLTYPES, bp);
328 * xfs_bmap_finish() may commit the current transaction and
329 * start a second transaction if the freelist is not empty.
331 * Since we still want to modify this buffer, we need to
332 * ensure that the buffer is not released on commit of
333 * the first transaction and ensure the buffer is added to the
334 * second transaction.
336 * If there is only one transaction then don't stop the buffer
337 * from being released when it commits later on.
340 xfs_trans_bhold(tp, bp);
342 if ((error = xfs_bmap_finish(tpp, &flist, &committed))) {
343 goto error1;
346 if (committed) {
347 tp = *tpp;
348 xfs_trans_bjoin(tp, bp);
349 } else {
350 xfs_trans_bhold_release(tp, bp);
353 *O_bpp = bp;
354 return 0;
356 error1:
357 xfs_bmap_cancel(&flist);
358 error0:
359 xfs_iunlock(quotip, XFS_ILOCK_EXCL);
361 return (error);
365 * Maps a dquot to the buffer containing its on-disk version.
366 * This returns a ptr to the buffer containing the on-disk dquot
367 * in the bpp param, and a ptr to the on-disk dquot within that buffer
369 STATIC int
370 xfs_qm_dqtobp(
371 xfs_trans_t **tpp,
372 xfs_dquot_t *dqp,
373 xfs_disk_dquot_t **O_ddpp,
374 xfs_buf_t **O_bpp,
375 uint flags)
377 xfs_bmbt_irec_t map;
378 int nmaps = 1, error;
379 xfs_buf_t *bp;
380 xfs_inode_t *quotip = XFS_DQ_TO_QIP(dqp);
381 xfs_mount_t *mp = dqp->q_mount;
382 xfs_disk_dquot_t *ddq;
383 xfs_dqid_t id = be32_to_cpu(dqp->q_core.d_id);
384 xfs_trans_t *tp = (tpp ? *tpp : NULL);
386 dqp->q_fileoffset = (xfs_fileoff_t)id / mp->m_quotainfo->qi_dqperchunk;
388 xfs_ilock(quotip, XFS_ILOCK_SHARED);
389 if (!xfs_this_quota_on(dqp->q_mount, dqp->dq_flags)) {
391 * Return if this type of quotas is turned off while we
392 * didn't have the quota inode lock.
394 xfs_iunlock(quotip, XFS_ILOCK_SHARED);
395 return ESRCH;
399 * Find the block map; no allocations yet
401 error = xfs_bmapi_read(quotip, dqp->q_fileoffset,
402 XFS_DQUOT_CLUSTER_SIZE_FSB, &map, &nmaps, 0);
404 xfs_iunlock(quotip, XFS_ILOCK_SHARED);
405 if (error)
406 return error;
408 ASSERT(nmaps == 1);
409 ASSERT(map.br_blockcount == 1);
412 * Offset of dquot in the (fixed sized) dquot chunk.
414 dqp->q_bufoffset = (id % mp->m_quotainfo->qi_dqperchunk) *
415 sizeof(xfs_dqblk_t);
417 ASSERT(map.br_startblock != DELAYSTARTBLOCK);
418 if (map.br_startblock == HOLESTARTBLOCK) {
420 * We don't allocate unless we're asked to
422 if (!(flags & XFS_QMOPT_DQALLOC))
423 return ENOENT;
425 ASSERT(tp);
426 error = xfs_qm_dqalloc(tpp, mp, dqp, quotip,
427 dqp->q_fileoffset, &bp);
428 if (error)
429 return error;
430 tp = *tpp;
431 } else {
432 trace_xfs_dqtobp_read(dqp);
435 * store the blkno etc so that we don't have to do the
436 * mapping all the time
438 dqp->q_blkno = XFS_FSB_TO_DADDR(mp, map.br_startblock);
440 error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp,
441 dqp->q_blkno,
442 mp->m_quotainfo->qi_dqchunklen,
443 0, &bp);
444 if (error || !bp)
445 return XFS_ERROR(error);
448 ASSERT(xfs_buf_islocked(bp));
451 * calculate the location of the dquot inside the buffer.
453 ddq = bp->b_addr + dqp->q_bufoffset;
456 * A simple sanity check in case we got a corrupted dquot...
458 error = xfs_qm_dqcheck(mp, ddq, id, dqp->dq_flags & XFS_DQ_ALLTYPES,
459 flags & (XFS_QMOPT_DQREPAIR|XFS_QMOPT_DOWARN),
460 "dqtobp");
461 if (error) {
462 if (!(flags & XFS_QMOPT_DQREPAIR)) {
463 xfs_trans_brelse(tp, bp);
464 return XFS_ERROR(EIO);
468 *O_bpp = bp;
469 *O_ddpp = ddq;
471 return (0);
476 * Read in the ondisk dquot using dqtobp() then copy it to an incore version,
477 * and release the buffer immediately.
479 * If XFS_QMOPT_DQALLOC is set, allocate a dquot on disk if it needed.
482 xfs_qm_dqread(
483 struct xfs_mount *mp,
484 xfs_dqid_t id,
485 uint type,
486 uint flags,
487 struct xfs_dquot **O_dqpp)
489 struct xfs_dquot *dqp;
490 struct xfs_disk_dquot *ddqp;
491 struct xfs_buf *bp;
492 struct xfs_trans *tp = NULL;
493 int error;
494 int cancelflags = 0;
497 dqp = kmem_zone_zalloc(xfs_qm_dqzone, KM_SLEEP);
499 dqp->dq_flags = type;
500 dqp->q_core.d_id = cpu_to_be32(id);
501 dqp->q_mount = mp;
502 INIT_LIST_HEAD(&dqp->q_lru);
503 mutex_init(&dqp->q_qlock);
504 init_waitqueue_head(&dqp->q_pinwait);
507 * Because we want to use a counting completion, complete
508 * the flush completion once to allow a single access to
509 * the flush completion without blocking.
511 init_completion(&dqp->q_flush);
512 complete(&dqp->q_flush);
515 * Make sure group quotas have a different lock class than user
516 * quotas.
518 if (!(type & XFS_DQ_USER))
519 lockdep_set_class(&dqp->q_qlock, &xfs_dquot_other_class);
521 XFS_STATS_INC(xs_qm_dquot);
523 trace_xfs_dqread(dqp);
525 if (flags & XFS_QMOPT_DQALLOC) {
526 tp = xfs_trans_alloc(mp, XFS_TRANS_QM_DQALLOC);
527 error = xfs_trans_reserve(tp, XFS_QM_DQALLOC_SPACE_RES(mp),
528 XFS_WRITE_LOG_RES(mp) +
530 * Round the chunklen up to the next multiple
531 * of 128 (buf log item chunk size)).
533 BBTOB(mp->m_quotainfo->qi_dqchunklen) - 1 + 128,
535 XFS_TRANS_PERM_LOG_RES,
536 XFS_WRITE_LOG_COUNT);
537 if (error)
538 goto error1;
539 cancelflags = XFS_TRANS_RELEASE_LOG_RES;
543 * get a pointer to the on-disk dquot and the buffer containing it
544 * dqp already knows its own type (GROUP/USER).
546 error = xfs_qm_dqtobp(&tp, dqp, &ddqp, &bp, flags);
547 if (error) {
549 * This can happen if quotas got turned off (ESRCH),
550 * or if the dquot didn't exist on disk and we ask to
551 * allocate (ENOENT).
553 trace_xfs_dqread_fail(dqp);
554 cancelflags |= XFS_TRANS_ABORT;
555 goto error1;
558 /* copy everything from disk dquot to the incore dquot */
559 memcpy(&dqp->q_core, ddqp, sizeof(xfs_disk_dquot_t));
560 xfs_qm_dquot_logitem_init(dqp);
563 * Reservation counters are defined as reservation plus current usage
564 * to avoid having to add every time.
566 dqp->q_res_bcount = be64_to_cpu(ddqp->d_bcount);
567 dqp->q_res_icount = be64_to_cpu(ddqp->d_icount);
568 dqp->q_res_rtbcount = be64_to_cpu(ddqp->d_rtbcount);
570 /* Mark the buf so that this will stay incore a little longer */
571 xfs_buf_set_ref(bp, XFS_DQUOT_REF);
574 * We got the buffer with a xfs_trans_read_buf() (in dqtobp())
575 * So we need to release with xfs_trans_brelse().
576 * The strategy here is identical to that of inodes; we lock
577 * the dquot in xfs_qm_dqget() before making it accessible to
578 * others. This is because dquots, like inodes, need a good level of
579 * concurrency, and we don't want to take locks on the entire buffers
580 * for dquot accesses.
581 * Note also that the dquot buffer may even be dirty at this point, if
582 * this particular dquot was repaired. We still aren't afraid to
583 * brelse it because we have the changes incore.
585 ASSERT(xfs_buf_islocked(bp));
586 xfs_trans_brelse(tp, bp);
588 if (tp) {
589 error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
590 if (error)
591 goto error0;
594 *O_dqpp = dqp;
595 return error;
597 error1:
598 if (tp)
599 xfs_trans_cancel(tp, cancelflags);
600 error0:
601 xfs_qm_dqdestroy(dqp);
602 *O_dqpp = NULL;
603 return error;
607 * Given the file system, inode OR id, and type (UDQUOT/GDQUOT), return a
608 * a locked dquot, doing an allocation (if requested) as needed.
609 * When both an inode and an id are given, the inode's id takes precedence.
610 * That is, if the id changes while we don't hold the ilock inside this
611 * function, the new dquot is returned, not necessarily the one requested
612 * in the id argument.
615 xfs_qm_dqget(
616 xfs_mount_t *mp,
617 xfs_inode_t *ip, /* locked inode (optional) */
618 xfs_dqid_t id, /* uid/projid/gid depending on type */
619 uint type, /* XFS_DQ_USER/XFS_DQ_PROJ/XFS_DQ_GROUP */
620 uint flags, /* DQALLOC, DQSUSER, DQREPAIR, DOWARN */
621 xfs_dquot_t **O_dqpp) /* OUT : locked incore dquot */
623 struct xfs_quotainfo *qi = mp->m_quotainfo;
624 struct radix_tree_root *tree = XFS_DQUOT_TREE(qi, type);
625 struct xfs_dquot *dqp;
626 int error;
628 ASSERT(XFS_IS_QUOTA_RUNNING(mp));
629 if ((! XFS_IS_UQUOTA_ON(mp) && type == XFS_DQ_USER) ||
630 (! XFS_IS_PQUOTA_ON(mp) && type == XFS_DQ_PROJ) ||
631 (! XFS_IS_GQUOTA_ON(mp) && type == XFS_DQ_GROUP)) {
632 return (ESRCH);
635 #ifdef DEBUG
636 if (xfs_do_dqerror) {
637 if ((xfs_dqerror_target == mp->m_ddev_targp) &&
638 (xfs_dqreq_num++ % xfs_dqerror_mod) == 0) {
639 xfs_debug(mp, "Returning error in dqget");
640 return (EIO);
644 ASSERT(type == XFS_DQ_USER ||
645 type == XFS_DQ_PROJ ||
646 type == XFS_DQ_GROUP);
647 if (ip) {
648 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
649 ASSERT(xfs_inode_dquot(ip, type) == NULL);
651 #endif
653 restart:
654 mutex_lock(&qi->qi_tree_lock);
655 dqp = radix_tree_lookup(tree, id);
656 if (dqp) {
657 xfs_dqlock(dqp);
658 if (dqp->dq_flags & XFS_DQ_FREEING) {
659 xfs_dqunlock(dqp);
660 mutex_unlock(&qi->qi_tree_lock);
661 trace_xfs_dqget_freeing(dqp);
662 delay(1);
663 goto restart;
666 dqp->q_nrefs++;
667 mutex_unlock(&qi->qi_tree_lock);
669 trace_xfs_dqget_hit(dqp);
670 XFS_STATS_INC(xs_qm_dqcachehits);
671 *O_dqpp = dqp;
672 return 0;
674 mutex_unlock(&qi->qi_tree_lock);
675 XFS_STATS_INC(xs_qm_dqcachemisses);
678 * Dquot cache miss. We don't want to keep the inode lock across
679 * a (potential) disk read. Also we don't want to deal with the lock
680 * ordering between quotainode and this inode. OTOH, dropping the inode
681 * lock here means dealing with a chown that can happen before
682 * we re-acquire the lock.
684 if (ip)
685 xfs_iunlock(ip, XFS_ILOCK_EXCL);
687 error = xfs_qm_dqread(mp, id, type, flags, &dqp);
689 if (ip)
690 xfs_ilock(ip, XFS_ILOCK_EXCL);
692 if (error)
693 return error;
695 if (ip) {
697 * A dquot could be attached to this inode by now, since
698 * we had dropped the ilock.
700 if (xfs_this_quota_on(mp, type)) {
701 struct xfs_dquot *dqp1;
703 dqp1 = xfs_inode_dquot(ip, type);
704 if (dqp1) {
705 xfs_qm_dqdestroy(dqp);
706 dqp = dqp1;
707 xfs_dqlock(dqp);
708 goto dqret;
710 } else {
711 /* inode stays locked on return */
712 xfs_qm_dqdestroy(dqp);
713 return XFS_ERROR(ESRCH);
717 mutex_lock(&qi->qi_tree_lock);
718 error = -radix_tree_insert(tree, id, dqp);
719 if (unlikely(error)) {
720 WARN_ON(error != EEXIST);
723 * Duplicate found. Just throw away the new dquot and start
724 * over.
726 mutex_unlock(&qi->qi_tree_lock);
727 trace_xfs_dqget_dup(dqp);
728 xfs_qm_dqdestroy(dqp);
729 XFS_STATS_INC(xs_qm_dquot_dups);
730 goto restart;
734 * We return a locked dquot to the caller, with a reference taken
736 xfs_dqlock(dqp);
737 dqp->q_nrefs = 1;
739 qi->qi_dquots++;
740 mutex_unlock(&qi->qi_tree_lock);
742 dqret:
743 ASSERT((ip == NULL) || xfs_isilocked(ip, XFS_ILOCK_EXCL));
744 trace_xfs_dqget_miss(dqp);
745 *O_dqpp = dqp;
746 return (0);
750 STATIC void
751 xfs_qm_dqput_final(
752 struct xfs_dquot *dqp)
754 struct xfs_quotainfo *qi = dqp->q_mount->m_quotainfo;
755 struct xfs_dquot *gdqp;
757 trace_xfs_dqput_free(dqp);
759 mutex_lock(&qi->qi_lru_lock);
760 if (list_empty(&dqp->q_lru)) {
761 list_add_tail(&dqp->q_lru, &qi->qi_lru_list);
762 qi->qi_lru_count++;
763 XFS_STATS_INC(xs_qm_dquot_unused);
765 mutex_unlock(&qi->qi_lru_lock);
768 * If we just added a udquot to the freelist, then we want to release
769 * the gdquot reference that it (probably) has. Otherwise it'll keep
770 * the gdquot from getting reclaimed.
772 gdqp = dqp->q_gdquot;
773 if (gdqp) {
774 xfs_dqlock(gdqp);
775 dqp->q_gdquot = NULL;
777 xfs_dqunlock(dqp);
780 * If we had a group quota hint, release it now.
782 if (gdqp)
783 xfs_qm_dqput(gdqp);
787 * Release a reference to the dquot (decrement ref-count) and unlock it.
789 * If there is a group quota attached to this dquot, carefully release that
790 * too without tripping over deadlocks'n'stuff.
792 void
793 xfs_qm_dqput(
794 struct xfs_dquot *dqp)
796 ASSERT(dqp->q_nrefs > 0);
797 ASSERT(XFS_DQ_IS_LOCKED(dqp));
799 trace_xfs_dqput(dqp);
801 if (--dqp->q_nrefs > 0)
802 xfs_dqunlock(dqp);
803 else
804 xfs_qm_dqput_final(dqp);
808 * Release a dquot. Flush it if dirty, then dqput() it.
809 * dquot must not be locked.
811 void
812 xfs_qm_dqrele(
813 xfs_dquot_t *dqp)
815 if (!dqp)
816 return;
818 trace_xfs_dqrele(dqp);
820 xfs_dqlock(dqp);
822 * We don't care to flush it if the dquot is dirty here.
823 * That will create stutters that we want to avoid.
824 * Instead we do a delayed write when we try to reclaim
825 * a dirty dquot. Also xfs_sync will take part of the burden...
827 xfs_qm_dqput(dqp);
831 * This is the dquot flushing I/O completion routine. It is called
832 * from interrupt level when the buffer containing the dquot is
833 * flushed to disk. It is responsible for removing the dquot logitem
834 * from the AIL if it has not been re-logged, and unlocking the dquot's
835 * flush lock. This behavior is very similar to that of inodes..
837 STATIC void
838 xfs_qm_dqflush_done(
839 struct xfs_buf *bp,
840 struct xfs_log_item *lip)
842 xfs_dq_logitem_t *qip = (struct xfs_dq_logitem *)lip;
843 xfs_dquot_t *dqp = qip->qli_dquot;
844 struct xfs_ail *ailp = lip->li_ailp;
847 * We only want to pull the item from the AIL if its
848 * location in the log has not changed since we started the flush.
849 * Thus, we only bother if the dquot's lsn has
850 * not changed. First we check the lsn outside the lock
851 * since it's cheaper, and then we recheck while
852 * holding the lock before removing the dquot from the AIL.
854 if ((lip->li_flags & XFS_LI_IN_AIL) &&
855 lip->li_lsn == qip->qli_flush_lsn) {
857 /* xfs_trans_ail_delete() drops the AIL lock. */
858 spin_lock(&ailp->xa_lock);
859 if (lip->li_lsn == qip->qli_flush_lsn)
860 xfs_trans_ail_delete(ailp, lip);
861 else
862 spin_unlock(&ailp->xa_lock);
866 * Release the dq's flush lock since we're done with it.
868 xfs_dqfunlock(dqp);
872 * Write a modified dquot to disk.
873 * The dquot must be locked and the flush lock too taken by caller.
874 * The flush lock will not be unlocked until the dquot reaches the disk,
875 * but the dquot is free to be unlocked and modified by the caller
876 * in the interim. Dquot is still locked on return. This behavior is
877 * identical to that of inodes.
880 xfs_qm_dqflush(
881 xfs_dquot_t *dqp,
882 uint flags)
884 struct xfs_mount *mp = dqp->q_mount;
885 struct xfs_buf *bp;
886 struct xfs_disk_dquot *ddqp;
887 int error;
889 ASSERT(XFS_DQ_IS_LOCKED(dqp));
890 ASSERT(!completion_done(&dqp->q_flush));
892 trace_xfs_dqflush(dqp);
895 * If not dirty, or it's pinned and we are not supposed to block, nada.
897 if (!XFS_DQ_IS_DIRTY(dqp) ||
898 ((flags & SYNC_TRYLOCK) && atomic_read(&dqp->q_pincount) > 0)) {
899 xfs_dqfunlock(dqp);
900 return 0;
902 xfs_qm_dqunpin_wait(dqp);
905 * This may have been unpinned because the filesystem is shutting
906 * down forcibly. If that's the case we must not write this dquot
907 * to disk, because the log record didn't make it to disk!
909 if (XFS_FORCED_SHUTDOWN(mp)) {
910 dqp->dq_flags &= ~XFS_DQ_DIRTY;
911 xfs_dqfunlock(dqp);
912 return XFS_ERROR(EIO);
916 * Get the buffer containing the on-disk dquot
918 error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, dqp->q_blkno,
919 mp->m_quotainfo->qi_dqchunklen, 0, &bp);
920 if (error) {
921 ASSERT(error != ENOENT);
922 xfs_dqfunlock(dqp);
923 return error;
927 * Calculate the location of the dquot inside the buffer.
929 ddqp = bp->b_addr + dqp->q_bufoffset;
932 * A simple sanity check in case we got a corrupted dquot..
934 error = xfs_qm_dqcheck(mp, &dqp->q_core, be32_to_cpu(ddqp->d_id), 0,
935 XFS_QMOPT_DOWARN, "dqflush (incore copy)");
936 if (error) {
937 xfs_buf_relse(bp);
938 xfs_dqfunlock(dqp);
939 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
940 return XFS_ERROR(EIO);
943 /* This is the only portion of data that needs to persist */
944 memcpy(ddqp, &dqp->q_core, sizeof(xfs_disk_dquot_t));
947 * Clear the dirty field and remember the flush lsn for later use.
949 dqp->dq_flags &= ~XFS_DQ_DIRTY;
951 xfs_trans_ail_copy_lsn(mp->m_ail, &dqp->q_logitem.qli_flush_lsn,
952 &dqp->q_logitem.qli_item.li_lsn);
955 * Attach an iodone routine so that we can remove this dquot from the
956 * AIL and release the flush lock once the dquot is synced to disk.
958 xfs_buf_attach_iodone(bp, xfs_qm_dqflush_done,
959 &dqp->q_logitem.qli_item);
962 * If the buffer is pinned then push on the log so we won't
963 * get stuck waiting in the write for too long.
965 if (xfs_buf_ispinned(bp)) {
966 trace_xfs_dqflush_force(dqp);
967 xfs_log_force(mp, 0);
970 if (flags & SYNC_WAIT)
971 error = xfs_bwrite(bp);
972 else
973 xfs_buf_delwri_queue(bp);
975 xfs_buf_relse(bp);
977 trace_xfs_dqflush_done(dqp);
980 * dqp is still locked, but caller is free to unlock it now.
982 return error;
987 * Lock two xfs_dquot structures.
989 * To avoid deadlocks we always lock the quota structure with
990 * the lowerd id first.
992 void
993 xfs_dqlock2(
994 xfs_dquot_t *d1,
995 xfs_dquot_t *d2)
997 if (d1 && d2) {
998 ASSERT(d1 != d2);
999 if (be32_to_cpu(d1->q_core.d_id) >
1000 be32_to_cpu(d2->q_core.d_id)) {
1001 mutex_lock(&d2->q_qlock);
1002 mutex_lock_nested(&d1->q_qlock, XFS_QLOCK_NESTED);
1003 } else {
1004 mutex_lock(&d1->q_qlock);
1005 mutex_lock_nested(&d2->q_qlock, XFS_QLOCK_NESTED);
1007 } else if (d1) {
1008 mutex_lock(&d1->q_qlock);
1009 } else if (d2) {
1010 mutex_lock(&d2->q_qlock);
1015 * Give the buffer a little push if it is incore and
1016 * wait on the flush lock.
1018 void
1019 xfs_dqflock_pushbuf_wait(
1020 xfs_dquot_t *dqp)
1022 xfs_mount_t *mp = dqp->q_mount;
1023 xfs_buf_t *bp;
1026 * Check to see if the dquot has been flushed delayed
1027 * write. If so, grab its buffer and send it
1028 * out immediately. We'll be able to acquire
1029 * the flush lock when the I/O completes.
1031 bp = xfs_incore(mp->m_ddev_targp, dqp->q_blkno,
1032 mp->m_quotainfo->qi_dqchunklen, XBF_TRYLOCK);
1033 if (!bp)
1034 goto out_lock;
1036 if (XFS_BUF_ISDELAYWRITE(bp)) {
1037 if (xfs_buf_ispinned(bp))
1038 xfs_log_force(mp, 0);
1039 xfs_buf_delwri_promote(bp);
1040 wake_up_process(bp->b_target->bt_task);
1042 xfs_buf_relse(bp);
1043 out_lock:
1044 xfs_dqflock(dqp);
1047 int __init
1048 xfs_qm_init(void)
1050 xfs_qm_dqzone =
1051 kmem_zone_init(sizeof(struct xfs_dquot), "xfs_dquot");
1052 if (!xfs_qm_dqzone)
1053 goto out;
1055 xfs_qm_dqtrxzone =
1056 kmem_zone_init(sizeof(struct xfs_dquot_acct), "xfs_dqtrx");
1057 if (!xfs_qm_dqtrxzone)
1058 goto out_free_dqzone;
1060 return 0;
1062 out_free_dqzone:
1063 kmem_zone_destroy(xfs_qm_dqzone);
1064 out:
1065 return -ENOMEM;
1068 void
1069 xfs_qm_exit(void)
1071 kmem_zone_destroy(xfs_qm_dqtrxzone);
1072 kmem_zone_destroy(xfs_qm_dqzone);