2 * Implementation of operations over global quota file
4 #include <linux/spinlock.h>
6 #include <linux/quota.h>
7 #include <linux/quotaops.h>
8 #include <linux/dqblk_qtree.h>
9 #include <linux/jiffies.h>
10 #include <linux/writeback.h>
11 #include <linux/workqueue.h>
13 #define MLOG_MASK_PREFIX ML_QUOTA
14 #include <cluster/masklog.h>
19 #include "blockcheck.h"
29 static struct workqueue_struct
*ocfs2_quota_wq
= NULL
;
31 static void qsync_work_fn(struct work_struct
*work
);
33 static void ocfs2_global_disk2memdqb(struct dquot
*dquot
, void *dp
)
35 struct ocfs2_global_disk_dqblk
*d
= dp
;
36 struct mem_dqblk
*m
= &dquot
->dq_dqb
;
38 /* Update from disk only entries not set by the admin */
39 if (!test_bit(DQ_LASTSET_B
+ QIF_ILIMITS_B
, &dquot
->dq_flags
)) {
40 m
->dqb_ihardlimit
= le64_to_cpu(d
->dqb_ihardlimit
);
41 m
->dqb_isoftlimit
= le64_to_cpu(d
->dqb_isoftlimit
);
43 if (!test_bit(DQ_LASTSET_B
+ QIF_INODES_B
, &dquot
->dq_flags
))
44 m
->dqb_curinodes
= le64_to_cpu(d
->dqb_curinodes
);
45 if (!test_bit(DQ_LASTSET_B
+ QIF_BLIMITS_B
, &dquot
->dq_flags
)) {
46 m
->dqb_bhardlimit
= le64_to_cpu(d
->dqb_bhardlimit
);
47 m
->dqb_bsoftlimit
= le64_to_cpu(d
->dqb_bsoftlimit
);
49 if (!test_bit(DQ_LASTSET_B
+ QIF_SPACE_B
, &dquot
->dq_flags
))
50 m
->dqb_curspace
= le64_to_cpu(d
->dqb_curspace
);
51 if (!test_bit(DQ_LASTSET_B
+ QIF_BTIME_B
, &dquot
->dq_flags
))
52 m
->dqb_btime
= le64_to_cpu(d
->dqb_btime
);
53 if (!test_bit(DQ_LASTSET_B
+ QIF_ITIME_B
, &dquot
->dq_flags
))
54 m
->dqb_itime
= le64_to_cpu(d
->dqb_itime
);
55 OCFS2_DQUOT(dquot
)->dq_use_count
= le32_to_cpu(d
->dqb_use_count
);
58 static void ocfs2_global_mem2diskdqb(void *dp
, struct dquot
*dquot
)
60 struct ocfs2_global_disk_dqblk
*d
= dp
;
61 struct mem_dqblk
*m
= &dquot
->dq_dqb
;
63 d
->dqb_id
= cpu_to_le32(dquot
->dq_id
);
64 d
->dqb_use_count
= cpu_to_le32(OCFS2_DQUOT(dquot
)->dq_use_count
);
65 d
->dqb_ihardlimit
= cpu_to_le64(m
->dqb_ihardlimit
);
66 d
->dqb_isoftlimit
= cpu_to_le64(m
->dqb_isoftlimit
);
67 d
->dqb_curinodes
= cpu_to_le64(m
->dqb_curinodes
);
68 d
->dqb_bhardlimit
= cpu_to_le64(m
->dqb_bhardlimit
);
69 d
->dqb_bsoftlimit
= cpu_to_le64(m
->dqb_bsoftlimit
);
70 d
->dqb_curspace
= cpu_to_le64(m
->dqb_curspace
);
71 d
->dqb_btime
= cpu_to_le64(m
->dqb_btime
);
72 d
->dqb_itime
= cpu_to_le64(m
->dqb_itime
);
73 d
->dqb_pad1
= d
->dqb_pad2
= 0;
76 static int ocfs2_global_is_id(void *dp
, struct dquot
*dquot
)
78 struct ocfs2_global_disk_dqblk
*d
= dp
;
79 struct ocfs2_mem_dqinfo
*oinfo
=
80 sb_dqinfo(dquot
->dq_sb
, dquot
->dq_type
)->dqi_priv
;
82 if (qtree_entry_unused(&oinfo
->dqi_gi
, dp
))
84 return le32_to_cpu(d
->dqb_id
) == dquot
->dq_id
;
87 struct qtree_fmt_operations ocfs2_global_ops
= {
88 .mem2disk_dqblk
= ocfs2_global_mem2diskdqb
,
89 .disk2mem_dqblk
= ocfs2_global_disk2memdqb
,
90 .is_id
= ocfs2_global_is_id
,
93 static int ocfs2_validate_quota_block(struct super_block
*sb
,
94 struct buffer_head
*bh
)
96 struct ocfs2_disk_dqtrailer
*dqt
=
97 ocfs2_block_dqtrailer(sb
->s_blocksize
, bh
->b_data
);
99 mlog(0, "Validating quota block %llu\n",
100 (unsigned long long)bh
->b_blocknr
);
102 BUG_ON(!buffer_uptodate(bh
));
105 * If the ecc fails, we return the error but otherwise
106 * leave the filesystem running. We know any error is
107 * local to this block.
109 return ocfs2_validate_meta_ecc(sb
, bh
->b_data
, &dqt
->dq_check
);
112 int ocfs2_read_quota_block(struct inode
*inode
, u64 v_block
,
113 struct buffer_head
**bh
)
116 struct buffer_head
*tmp
= *bh
;
118 if (i_size_read(inode
) >> inode
->i_sb
->s_blocksize_bits
<= v_block
) {
119 ocfs2_error(inode
->i_sb
,
120 "Quota file %llu is probably corrupted! Requested "
121 "to read block %Lu but file has size only %Lu\n",
122 (unsigned long long)OCFS2_I(inode
)->ip_blkno
,
123 (unsigned long long)v_block
,
124 (unsigned long long)i_size_read(inode
));
127 rc
= ocfs2_read_virt_blocks(inode
, v_block
, 1, &tmp
, 0,
128 ocfs2_validate_quota_block
);
132 /* If ocfs2_read_virt_blocks() got us a new bh, pass it up. */
139 static int ocfs2_get_quota_block(struct inode
*inode
, int block
,
140 struct buffer_head
**bh
)
145 down_read(&OCFS2_I(inode
)->ip_alloc_sem
);
146 err
= ocfs2_extent_map_get_blocks(inode
, block
, &pblock
, &pcount
, NULL
);
147 up_read(&OCFS2_I(inode
)->ip_alloc_sem
);
152 *bh
= sb_getblk(inode
->i_sb
, pblock
);
160 /* Read data from global quotafile - avoid pagecache and such because we cannot
161 * afford acquiring the locks... We use quota cluster lock to serialize
162 * operations. Caller is responsible for acquiring it. */
163 ssize_t
ocfs2_quota_read(struct super_block
*sb
, int type
, char *data
,
164 size_t len
, loff_t off
)
166 struct ocfs2_mem_dqinfo
*oinfo
= sb_dqinfo(sb
, type
)->dqi_priv
;
167 struct inode
*gqinode
= oinfo
->dqi_gqinode
;
168 loff_t i_size
= i_size_read(gqinode
);
169 int offset
= off
& (sb
->s_blocksize
- 1);
170 sector_t blk
= off
>> sb
->s_blocksize_bits
;
172 struct buffer_head
*bh
;
173 size_t toread
, tocopy
;
177 if (off
+ len
> i_size
)
181 tocopy
= min_t(size_t, (sb
->s_blocksize
- offset
), toread
);
183 err
= ocfs2_read_quota_block(gqinode
, blk
, &bh
);
188 memcpy(data
, bh
->b_data
+ offset
, tocopy
);
198 /* Write to quotafile (we know the transaction is already started and has
200 ssize_t
ocfs2_quota_write(struct super_block
*sb
, int type
,
201 const char *data
, size_t len
, loff_t off
)
203 struct mem_dqinfo
*info
= sb_dqinfo(sb
, type
);
204 struct ocfs2_mem_dqinfo
*oinfo
= info
->dqi_priv
;
205 struct inode
*gqinode
= oinfo
->dqi_gqinode
;
206 int offset
= off
& (sb
->s_blocksize
- 1);
207 sector_t blk
= off
>> sb
->s_blocksize_bits
;
208 int err
= 0, new = 0, ja_type
;
209 struct buffer_head
*bh
= NULL
;
210 handle_t
*handle
= journal_current_handle();
213 mlog(ML_ERROR
, "Quota write (off=%llu, len=%llu) cancelled "
214 "because transaction was not started.\n",
215 (unsigned long long)off
, (unsigned long long)len
);
218 if (len
> sb
->s_blocksize
- OCFS2_QBLK_RESERVED_SPACE
- offset
) {
220 len
= sb
->s_blocksize
- OCFS2_QBLK_RESERVED_SPACE
- offset
;
223 mutex_lock_nested(&gqinode
->i_mutex
, I_MUTEX_QUOTA
);
224 if (gqinode
->i_size
< off
+ len
) {
226 ocfs2_align_bytes_to_blocks(sb
, off
+ len
);
228 /* Space is already allocated in ocfs2_global_read_dquot() */
229 err
= ocfs2_simple_size_update(gqinode
,
236 /* Not rewriting whole block? */
237 if ((offset
|| len
< sb
->s_blocksize
- OCFS2_QBLK_RESERVED_SPACE
) &&
239 err
= ocfs2_read_quota_block(gqinode
, blk
, &bh
);
240 ja_type
= OCFS2_JOURNAL_ACCESS_WRITE
;
242 err
= ocfs2_get_quota_block(gqinode
, blk
, &bh
);
243 ja_type
= OCFS2_JOURNAL_ACCESS_CREATE
;
251 memset(bh
->b_data
, 0, sb
->s_blocksize
);
252 memcpy(bh
->b_data
+ offset
, data
, len
);
253 flush_dcache_page(bh
->b_page
);
254 set_buffer_uptodate(bh
);
256 ocfs2_set_buffer_uptodate(INODE_CACHE(gqinode
), bh
);
257 err
= ocfs2_journal_access_dq(handle
, INODE_CACHE(gqinode
), bh
,
263 err
= ocfs2_journal_dirty(handle
, bh
);
269 mutex_unlock(&gqinode
->i_mutex
);
273 gqinode
->i_version
++;
274 ocfs2_mark_inode_dirty(handle
, gqinode
, oinfo
->dqi_gqi_bh
);
275 mutex_unlock(&gqinode
->i_mutex
);
279 int ocfs2_lock_global_qf(struct ocfs2_mem_dqinfo
*oinfo
, int ex
)
282 struct buffer_head
*bh
= NULL
;
284 status
= ocfs2_inode_lock(oinfo
->dqi_gqinode
, &bh
, ex
);
287 spin_lock(&dq_data_lock
);
288 if (!oinfo
->dqi_gqi_count
++)
289 oinfo
->dqi_gqi_bh
= bh
;
291 WARN_ON(bh
!= oinfo
->dqi_gqi_bh
);
292 spin_unlock(&dq_data_lock
);
296 void ocfs2_unlock_global_qf(struct ocfs2_mem_dqinfo
*oinfo
, int ex
)
298 ocfs2_inode_unlock(oinfo
->dqi_gqinode
, ex
);
299 brelse(oinfo
->dqi_gqi_bh
);
300 spin_lock(&dq_data_lock
);
301 if (!--oinfo
->dqi_gqi_count
)
302 oinfo
->dqi_gqi_bh
= NULL
;
303 spin_unlock(&dq_data_lock
);
306 /* Read information header from global quota file */
307 int ocfs2_global_read_info(struct super_block
*sb
, int type
)
309 struct inode
*gqinode
= NULL
;
310 unsigned int ino
[MAXQUOTAS
] = { USER_QUOTA_SYSTEM_INODE
,
311 GROUP_QUOTA_SYSTEM_INODE
};
312 struct ocfs2_global_disk_dqinfo dinfo
;
313 struct mem_dqinfo
*info
= sb_dqinfo(sb
, type
);
314 struct ocfs2_mem_dqinfo
*oinfo
= info
->dqi_priv
;
319 /* Read global header */
320 gqinode
= ocfs2_get_system_file_inode(OCFS2_SB(sb
), ino
[type
],
323 mlog(ML_ERROR
, "failed to get global quota inode (type=%d)\n",
328 oinfo
->dqi_gi
.dqi_sb
= sb
;
329 oinfo
->dqi_gi
.dqi_type
= type
;
330 ocfs2_qinfo_lock_res_init(&oinfo
->dqi_gqlock
, oinfo
);
331 oinfo
->dqi_gi
.dqi_entry_size
= sizeof(struct ocfs2_global_disk_dqblk
);
332 oinfo
->dqi_gi
.dqi_ops
= &ocfs2_global_ops
;
333 oinfo
->dqi_gqi_bh
= NULL
;
334 oinfo
->dqi_gqi_count
= 0;
335 oinfo
->dqi_gqinode
= gqinode
;
336 status
= ocfs2_lock_global_qf(oinfo
, 0);
341 status
= sb
->s_op
->quota_read(sb
, type
, (char *)&dinfo
,
342 sizeof(struct ocfs2_global_disk_dqinfo
),
343 OCFS2_GLOBAL_INFO_OFF
);
344 ocfs2_unlock_global_qf(oinfo
, 0);
345 if (status
!= sizeof(struct ocfs2_global_disk_dqinfo
)) {
346 mlog(ML_ERROR
, "Cannot read global quota info (%d).\n",
353 info
->dqi_bgrace
= le32_to_cpu(dinfo
.dqi_bgrace
);
354 info
->dqi_igrace
= le32_to_cpu(dinfo
.dqi_igrace
);
355 oinfo
->dqi_syncms
= le32_to_cpu(dinfo
.dqi_syncms
);
356 oinfo
->dqi_gi
.dqi_blocks
= le32_to_cpu(dinfo
.dqi_blocks
);
357 oinfo
->dqi_gi
.dqi_free_blk
= le32_to_cpu(dinfo
.dqi_free_blk
);
358 oinfo
->dqi_gi
.dqi_free_entry
= le32_to_cpu(dinfo
.dqi_free_entry
);
359 oinfo
->dqi_gi
.dqi_blocksize_bits
= sb
->s_blocksize_bits
;
360 oinfo
->dqi_gi
.dqi_usable_bs
= sb
->s_blocksize
-
361 OCFS2_QBLK_RESERVED_SPACE
;
362 oinfo
->dqi_gi
.dqi_qtree_depth
= qtree_depth(&oinfo
->dqi_gi
);
363 INIT_DELAYED_WORK(&oinfo
->dqi_sync_work
, qsync_work_fn
);
364 queue_delayed_work(ocfs2_quota_wq
, &oinfo
->dqi_sync_work
,
365 msecs_to_jiffies(oinfo
->dqi_syncms
));
372 /* Write information to global quota file. Expects exlusive lock on quota
373 * file inode and quota info */
374 static int __ocfs2_global_write_info(struct super_block
*sb
, int type
)
376 struct mem_dqinfo
*info
= sb_dqinfo(sb
, type
);
377 struct ocfs2_mem_dqinfo
*oinfo
= info
->dqi_priv
;
378 struct ocfs2_global_disk_dqinfo dinfo
;
381 spin_lock(&dq_data_lock
);
382 info
->dqi_flags
&= ~DQF_INFO_DIRTY
;
383 dinfo
.dqi_bgrace
= cpu_to_le32(info
->dqi_bgrace
);
384 dinfo
.dqi_igrace
= cpu_to_le32(info
->dqi_igrace
);
385 spin_unlock(&dq_data_lock
);
386 dinfo
.dqi_syncms
= cpu_to_le32(oinfo
->dqi_syncms
);
387 dinfo
.dqi_blocks
= cpu_to_le32(oinfo
->dqi_gi
.dqi_blocks
);
388 dinfo
.dqi_free_blk
= cpu_to_le32(oinfo
->dqi_gi
.dqi_free_blk
);
389 dinfo
.dqi_free_entry
= cpu_to_le32(oinfo
->dqi_gi
.dqi_free_entry
);
390 size
= sb
->s_op
->quota_write(sb
, type
, (char *)&dinfo
,
391 sizeof(struct ocfs2_global_disk_dqinfo
),
392 OCFS2_GLOBAL_INFO_OFF
);
393 if (size
!= sizeof(struct ocfs2_global_disk_dqinfo
)) {
394 mlog(ML_ERROR
, "Cannot write global quota info structure\n");
402 int ocfs2_global_write_info(struct super_block
*sb
, int type
)
405 struct ocfs2_mem_dqinfo
*info
= sb_dqinfo(sb
, type
)->dqi_priv
;
407 err
= ocfs2_qinfo_lock(info
, 1);
410 err
= __ocfs2_global_write_info(sb
, type
);
411 ocfs2_qinfo_unlock(info
, 1);
415 static int ocfs2_global_qinit_alloc(struct super_block
*sb
, int type
)
417 struct ocfs2_mem_dqinfo
*oinfo
= sb_dqinfo(sb
, type
)->dqi_priv
;
420 * We may need to allocate tree blocks and a leaf block but not the
423 return oinfo
->dqi_gi
.dqi_qtree_depth
;
426 static int ocfs2_calc_global_qinit_credits(struct super_block
*sb
, int type
)
428 /* We modify all the allocated blocks, tree root, and info block */
429 return (ocfs2_global_qinit_alloc(sb
, type
) + 2) *
430 OCFS2_QUOTA_BLOCK_UPDATE_CREDITS
;
433 /* Read in information from global quota file and acquire a reference to it.
434 * dquot_acquire() has already started the transaction and locked quota file */
435 int ocfs2_global_read_dquot(struct dquot
*dquot
)
437 int err
, err2
, ex
= 0;
438 struct super_block
*sb
= dquot
->dq_sb
;
439 int type
= dquot
->dq_type
;
440 struct ocfs2_mem_dqinfo
*info
= sb_dqinfo(sb
, type
)->dqi_priv
;
441 struct ocfs2_super
*osb
= OCFS2_SB(sb
);
442 struct inode
*gqinode
= info
->dqi_gqinode
;
443 int need_alloc
= ocfs2_global_qinit_alloc(sb
, type
);
444 handle_t
*handle
= NULL
;
446 err
= ocfs2_qinfo_lock(info
, 0);
449 err
= qtree_read_dquot(&info
->dqi_gi
, dquot
);
452 OCFS2_DQUOT(dquot
)->dq_use_count
++;
453 OCFS2_DQUOT(dquot
)->dq_origspace
= dquot
->dq_dqb
.dqb_curspace
;
454 OCFS2_DQUOT(dquot
)->dq_originodes
= dquot
->dq_dqb
.dqb_curinodes
;
455 ocfs2_qinfo_unlock(info
, 0);
457 if (!dquot
->dq_off
) { /* No real quota entry? */
460 * Add blocks to quota file before we start a transaction since
461 * locking allocators ranks above a transaction start
463 WARN_ON(journal_current_handle());
464 down_write(&OCFS2_I(gqinode
)->ip_alloc_sem
);
465 err
= ocfs2_extend_no_holes(gqinode
,
466 gqinode
->i_size
+ (need_alloc
<< sb
->s_blocksize_bits
),
468 up_write(&OCFS2_I(gqinode
)->ip_alloc_sem
);
473 handle
= ocfs2_start_trans(osb
,
474 ocfs2_calc_global_qinit_credits(sb
, type
));
475 if (IS_ERR(handle
)) {
476 err
= PTR_ERR(handle
);
479 err
= ocfs2_qinfo_lock(info
, ex
);
482 err
= qtree_write_dquot(&info
->dqi_gi
, dquot
);
483 if (ex
&& info_dirty(sb_dqinfo(dquot
->dq_sb
, dquot
->dq_type
))) {
484 err2
= __ocfs2_global_write_info(dquot
->dq_sb
, dquot
->dq_type
);
490 ocfs2_qinfo_unlock(info
, 1);
492 ocfs2_qinfo_unlock(info
, 0);
495 ocfs2_commit_trans(osb
, handle
);
502 /* Sync local information about quota modifications with global quota file.
503 * Caller must have started the transaction and obtained exclusive lock for
504 * global quota file inode */
505 int __ocfs2_sync_dquot(struct dquot
*dquot
, int freeing
)
508 struct super_block
*sb
= dquot
->dq_sb
;
509 int type
= dquot
->dq_type
;
510 struct ocfs2_mem_dqinfo
*info
= sb_dqinfo(sb
, type
)->dqi_priv
;
511 struct ocfs2_global_disk_dqblk dqblk
;
512 s64 spacechange
, inodechange
;
513 time_t olditime
, oldbtime
;
515 err
= sb
->s_op
->quota_read(sb
, type
, (char *)&dqblk
,
516 sizeof(struct ocfs2_global_disk_dqblk
),
518 if (err
!= sizeof(struct ocfs2_global_disk_dqblk
)) {
520 mlog(ML_ERROR
, "Short read from global quota file "
527 /* Update space and inode usage. Get also other information from
528 * global quota file so that we don't overwrite any changes there.
530 spin_lock(&dq_data_lock
);
531 spacechange
= dquot
->dq_dqb
.dqb_curspace
-
532 OCFS2_DQUOT(dquot
)->dq_origspace
;
533 inodechange
= dquot
->dq_dqb
.dqb_curinodes
-
534 OCFS2_DQUOT(dquot
)->dq_originodes
;
535 olditime
= dquot
->dq_dqb
.dqb_itime
;
536 oldbtime
= dquot
->dq_dqb
.dqb_btime
;
537 ocfs2_global_disk2memdqb(dquot
, &dqblk
);
538 mlog(0, "Syncing global dquot %u space %lld+%lld, inodes %lld+%lld\n",
539 dquot
->dq_id
, dquot
->dq_dqb
.dqb_curspace
, (long long)spacechange
,
540 dquot
->dq_dqb
.dqb_curinodes
, (long long)inodechange
);
541 if (!test_bit(DQ_LASTSET_B
+ QIF_SPACE_B
, &dquot
->dq_flags
))
542 dquot
->dq_dqb
.dqb_curspace
+= spacechange
;
543 if (!test_bit(DQ_LASTSET_B
+ QIF_INODES_B
, &dquot
->dq_flags
))
544 dquot
->dq_dqb
.dqb_curinodes
+= inodechange
;
545 /* Set properly space grace time... */
546 if (dquot
->dq_dqb
.dqb_bsoftlimit
&&
547 dquot
->dq_dqb
.dqb_curspace
> dquot
->dq_dqb
.dqb_bsoftlimit
) {
548 if (!test_bit(DQ_LASTSET_B
+ QIF_BTIME_B
, &dquot
->dq_flags
) &&
550 if (dquot
->dq_dqb
.dqb_btime
> 0)
551 dquot
->dq_dqb
.dqb_btime
=
552 min(dquot
->dq_dqb
.dqb_btime
, oldbtime
);
554 dquot
->dq_dqb
.dqb_btime
= oldbtime
;
557 dquot
->dq_dqb
.dqb_btime
= 0;
558 clear_bit(DQ_BLKS_B
, &dquot
->dq_flags
);
560 /* Set properly inode grace time... */
561 if (dquot
->dq_dqb
.dqb_isoftlimit
&&
562 dquot
->dq_dqb
.dqb_curinodes
> dquot
->dq_dqb
.dqb_isoftlimit
) {
563 if (!test_bit(DQ_LASTSET_B
+ QIF_ITIME_B
, &dquot
->dq_flags
) &&
565 if (dquot
->dq_dqb
.dqb_itime
> 0)
566 dquot
->dq_dqb
.dqb_itime
=
567 min(dquot
->dq_dqb
.dqb_itime
, olditime
);
569 dquot
->dq_dqb
.dqb_itime
= olditime
;
572 dquot
->dq_dqb
.dqb_itime
= 0;
573 clear_bit(DQ_INODES_B
, &dquot
->dq_flags
);
575 /* All information is properly updated, clear the flags */
576 __clear_bit(DQ_LASTSET_B
+ QIF_SPACE_B
, &dquot
->dq_flags
);
577 __clear_bit(DQ_LASTSET_B
+ QIF_INODES_B
, &dquot
->dq_flags
);
578 __clear_bit(DQ_LASTSET_B
+ QIF_BLIMITS_B
, &dquot
->dq_flags
);
579 __clear_bit(DQ_LASTSET_B
+ QIF_ILIMITS_B
, &dquot
->dq_flags
);
580 __clear_bit(DQ_LASTSET_B
+ QIF_BTIME_B
, &dquot
->dq_flags
);
581 __clear_bit(DQ_LASTSET_B
+ QIF_ITIME_B
, &dquot
->dq_flags
);
582 OCFS2_DQUOT(dquot
)->dq_origspace
= dquot
->dq_dqb
.dqb_curspace
;
583 OCFS2_DQUOT(dquot
)->dq_originodes
= dquot
->dq_dqb
.dqb_curinodes
;
584 spin_unlock(&dq_data_lock
);
585 err
= ocfs2_qinfo_lock(info
, freeing
);
587 mlog(ML_ERROR
, "Failed to lock quota info, loosing quota write"
588 " (type=%d, id=%u)\n", dquot
->dq_type
,
589 (unsigned)dquot
->dq_id
);
593 OCFS2_DQUOT(dquot
)->dq_use_count
--;
594 err
= qtree_write_dquot(&info
->dqi_gi
, dquot
);
597 if (freeing
&& !OCFS2_DQUOT(dquot
)->dq_use_count
) {
598 err
= qtree_release_dquot(&info
->dqi_gi
, dquot
);
599 if (info_dirty(sb_dqinfo(sb
, type
))) {
600 err2
= __ocfs2_global_write_info(sb
, type
);
606 ocfs2_qinfo_unlock(info
, freeing
);
614 * Functions for periodic syncing of dquots with global file
616 static int ocfs2_sync_dquot_helper(struct dquot
*dquot
, unsigned long type
)
619 struct super_block
*sb
= dquot
->dq_sb
;
620 struct ocfs2_mem_dqinfo
*oinfo
= sb_dqinfo(sb
, type
)->dqi_priv
;
621 struct ocfs2_super
*osb
= OCFS2_SB(sb
);
624 mlog_entry("id=%u qtype=%u type=%lu device=%s\n", dquot
->dq_id
,
625 dquot
->dq_type
, type
, sb
->s_id
);
626 if (type
!= dquot
->dq_type
)
628 status
= ocfs2_lock_global_qf(oinfo
, 1);
632 handle
= ocfs2_start_trans(osb
, OCFS2_QSYNC_CREDITS
);
633 if (IS_ERR(handle
)) {
634 status
= PTR_ERR(handle
);
638 mutex_lock(&sb_dqopt(sb
)->dqio_mutex
);
639 status
= ocfs2_sync_dquot(dquot
);
640 mutex_unlock(&sb_dqopt(sb
)->dqio_mutex
);
643 /* We have to write local structure as well... */
644 dquot_mark_dquot_dirty(dquot
);
645 status
= dquot_commit(dquot
);
648 ocfs2_commit_trans(osb
, handle
);
650 ocfs2_unlock_global_qf(oinfo
, 1);
656 static void qsync_work_fn(struct work_struct
*work
)
658 struct ocfs2_mem_dqinfo
*oinfo
= container_of(work
,
659 struct ocfs2_mem_dqinfo
,
661 struct super_block
*sb
= oinfo
->dqi_gqinode
->i_sb
;
663 dquot_scan_active(sb
, ocfs2_sync_dquot_helper
, oinfo
->dqi_type
);
664 queue_delayed_work(ocfs2_quota_wq
, &oinfo
->dqi_sync_work
,
665 msecs_to_jiffies(oinfo
->dqi_syncms
));
669 * Wrappers for generic quota functions
672 static int ocfs2_write_dquot(struct dquot
*dquot
)
675 struct ocfs2_super
*osb
= OCFS2_SB(dquot
->dq_sb
);
678 mlog_entry("id=%u, type=%d", dquot
->dq_id
, dquot
->dq_type
);
680 handle
= ocfs2_start_trans(osb
, OCFS2_QWRITE_CREDITS
);
681 if (IS_ERR(handle
)) {
682 status
= PTR_ERR(handle
);
686 status
= dquot_commit(dquot
);
687 ocfs2_commit_trans(osb
, handle
);
693 static int ocfs2_calc_qdel_credits(struct super_block
*sb
, int type
)
695 struct ocfs2_mem_dqinfo
*oinfo
= sb_dqinfo(sb
, type
)->dqi_priv
;
697 * We modify tree, leaf block, global info, local chunk header,
698 * global and local inode; OCFS2_QINFO_WRITE_CREDITS already
699 * accounts for inode update
701 return (oinfo
->dqi_gi
.dqi_qtree_depth
+ 2) *
702 OCFS2_QUOTA_BLOCK_UPDATE_CREDITS
+
703 OCFS2_QINFO_WRITE_CREDITS
+
704 OCFS2_INODE_UPDATE_CREDITS
;
707 static int ocfs2_release_dquot(struct dquot
*dquot
)
710 struct ocfs2_mem_dqinfo
*oinfo
=
711 sb_dqinfo(dquot
->dq_sb
, dquot
->dq_type
)->dqi_priv
;
712 struct ocfs2_super
*osb
= OCFS2_SB(dquot
->dq_sb
);
715 mlog_entry("id=%u, type=%d", dquot
->dq_id
, dquot
->dq_type
);
717 status
= ocfs2_lock_global_qf(oinfo
, 1);
720 handle
= ocfs2_start_trans(osb
,
721 ocfs2_calc_qdel_credits(dquot
->dq_sb
, dquot
->dq_type
));
722 if (IS_ERR(handle
)) {
723 status
= PTR_ERR(handle
);
727 status
= dquot_release(dquot
);
728 ocfs2_commit_trans(osb
, handle
);
730 ocfs2_unlock_global_qf(oinfo
, 1);
736 static int ocfs2_acquire_dquot(struct dquot
*dquot
)
738 struct ocfs2_mem_dqinfo
*oinfo
=
739 sb_dqinfo(dquot
->dq_sb
, dquot
->dq_type
)->dqi_priv
;
742 mlog_entry("id=%u, type=%d", dquot
->dq_id
, dquot
->dq_type
);
743 /* We need an exclusive lock, because we're going to update use count
744 * and instantiate possibly new dquot structure */
745 status
= ocfs2_lock_global_qf(oinfo
, 1);
748 status
= dquot_acquire(dquot
);
749 ocfs2_unlock_global_qf(oinfo
, 1);
755 static int ocfs2_mark_dquot_dirty(struct dquot
*dquot
)
757 unsigned long mask
= (1 << (DQ_LASTSET_B
+ QIF_ILIMITS_B
)) |
758 (1 << (DQ_LASTSET_B
+ QIF_BLIMITS_B
)) |
759 (1 << (DQ_LASTSET_B
+ QIF_INODES_B
)) |
760 (1 << (DQ_LASTSET_B
+ QIF_SPACE_B
)) |
761 (1 << (DQ_LASTSET_B
+ QIF_BTIME_B
)) |
762 (1 << (DQ_LASTSET_B
+ QIF_ITIME_B
));
765 struct super_block
*sb
= dquot
->dq_sb
;
766 int type
= dquot
->dq_type
;
767 struct ocfs2_mem_dqinfo
*oinfo
= sb_dqinfo(sb
, type
)->dqi_priv
;
769 struct ocfs2_super
*osb
= OCFS2_SB(sb
);
771 mlog_entry("id=%u, type=%d", dquot
->dq_id
, type
);
772 dquot_mark_dquot_dirty(dquot
);
774 /* In case user set some limits, sync dquot immediately to global
775 * quota file so that information propagates quicker */
776 spin_lock(&dq_data_lock
);
777 if (dquot
->dq_flags
& mask
)
779 spin_unlock(&dq_data_lock
);
780 /* This is a slight hack but we can't afford getting global quota
781 * lock if we already have a transaction started. */
782 if (!sync
|| journal_current_handle()) {
783 status
= ocfs2_write_dquot(dquot
);
786 status
= ocfs2_lock_global_qf(oinfo
, 1);
789 handle
= ocfs2_start_trans(osb
, OCFS2_QSYNC_CREDITS
);
790 if (IS_ERR(handle
)) {
791 status
= PTR_ERR(handle
);
795 status
= ocfs2_sync_dquot(dquot
);
800 /* Now write updated local dquot structure */
801 status
= dquot_commit(dquot
);
803 ocfs2_commit_trans(osb
, handle
);
805 ocfs2_unlock_global_qf(oinfo
, 1);
811 /* This should happen only after set_dqinfo(). */
812 static int ocfs2_write_info(struct super_block
*sb
, int type
)
816 struct ocfs2_mem_dqinfo
*oinfo
= sb_dqinfo(sb
, type
)->dqi_priv
;
820 status
= ocfs2_lock_global_qf(oinfo
, 1);
823 handle
= ocfs2_start_trans(OCFS2_SB(sb
), OCFS2_QINFO_WRITE_CREDITS
);
824 if (IS_ERR(handle
)) {
825 status
= PTR_ERR(handle
);
829 status
= dquot_commit_info(sb
, type
);
830 ocfs2_commit_trans(OCFS2_SB(sb
), handle
);
832 ocfs2_unlock_global_qf(oinfo
, 1);
838 static struct dquot
*ocfs2_alloc_dquot(struct super_block
*sb
, int type
)
840 struct ocfs2_dquot
*dquot
=
841 kmem_cache_zalloc(ocfs2_dquot_cachep
, GFP_NOFS
);
845 return &dquot
->dq_dquot
;
848 static void ocfs2_destroy_dquot(struct dquot
*dquot
)
850 kmem_cache_free(ocfs2_dquot_cachep
, dquot
);
853 const struct dquot_operations ocfs2_quota_operations
= {
854 .initialize
= dquot_initialize
,
856 .alloc_space
= dquot_alloc_space
,
857 .alloc_inode
= dquot_alloc_inode
,
858 .free_space
= dquot_free_space
,
859 .free_inode
= dquot_free_inode
,
860 .transfer
= dquot_transfer
,
861 .write_dquot
= ocfs2_write_dquot
,
862 .acquire_dquot
= ocfs2_acquire_dquot
,
863 .release_dquot
= ocfs2_release_dquot
,
864 .mark_dirty
= ocfs2_mark_dquot_dirty
,
865 .write_info
= ocfs2_write_info
,
866 .alloc_dquot
= ocfs2_alloc_dquot
,
867 .destroy_dquot
= ocfs2_destroy_dquot
,
870 int ocfs2_quota_setup(void)
872 ocfs2_quota_wq
= create_workqueue("o2quot");
878 void ocfs2_quota_shutdown(void)
880 if (ocfs2_quota_wq
) {
881 flush_workqueue(ocfs2_quota_wq
);
882 destroy_workqueue(ocfs2_quota_wq
);
883 ocfs2_quota_wq
= NULL
;