2 * Implementation of operations over global quota file
4 #include <linux/spinlock.h>
6 #include <linux/quota.h>
7 #include <linux/quotaops.h>
8 #include <linux/dqblk_qtree.h>
9 #include <linux/jiffies.h>
10 #include <linux/writeback.h>
11 #include <linux/workqueue.h>
13 #define MLOG_MASK_PREFIX ML_QUOTA
14 #include <cluster/masklog.h>
19 #include "blockcheck.h"
29 static struct workqueue_struct
*ocfs2_quota_wq
= NULL
;
31 static void qsync_work_fn(struct work_struct
*work
);
33 static void ocfs2_global_disk2memdqb(struct dquot
*dquot
, void *dp
)
35 struct ocfs2_global_disk_dqblk
*d
= dp
;
36 struct mem_dqblk
*m
= &dquot
->dq_dqb
;
38 /* Update from disk only entries not set by the admin */
39 if (!test_bit(DQ_LASTSET_B
+ QIF_ILIMITS_B
, &dquot
->dq_flags
)) {
40 m
->dqb_ihardlimit
= le64_to_cpu(d
->dqb_ihardlimit
);
41 m
->dqb_isoftlimit
= le64_to_cpu(d
->dqb_isoftlimit
);
43 if (!test_bit(DQ_LASTSET_B
+ QIF_INODES_B
, &dquot
->dq_flags
))
44 m
->dqb_curinodes
= le64_to_cpu(d
->dqb_curinodes
);
45 if (!test_bit(DQ_LASTSET_B
+ QIF_BLIMITS_B
, &dquot
->dq_flags
)) {
46 m
->dqb_bhardlimit
= le64_to_cpu(d
->dqb_bhardlimit
);
47 m
->dqb_bsoftlimit
= le64_to_cpu(d
->dqb_bsoftlimit
);
49 if (!test_bit(DQ_LASTSET_B
+ QIF_SPACE_B
, &dquot
->dq_flags
))
50 m
->dqb_curspace
= le64_to_cpu(d
->dqb_curspace
);
51 if (!test_bit(DQ_LASTSET_B
+ QIF_BTIME_B
, &dquot
->dq_flags
))
52 m
->dqb_btime
= le64_to_cpu(d
->dqb_btime
);
53 if (!test_bit(DQ_LASTSET_B
+ QIF_ITIME_B
, &dquot
->dq_flags
))
54 m
->dqb_itime
= le64_to_cpu(d
->dqb_itime
);
55 OCFS2_DQUOT(dquot
)->dq_use_count
= le32_to_cpu(d
->dqb_use_count
);
58 static void ocfs2_global_mem2diskdqb(void *dp
, struct dquot
*dquot
)
60 struct ocfs2_global_disk_dqblk
*d
= dp
;
61 struct mem_dqblk
*m
= &dquot
->dq_dqb
;
63 d
->dqb_id
= cpu_to_le32(dquot
->dq_id
);
64 d
->dqb_use_count
= cpu_to_le32(OCFS2_DQUOT(dquot
)->dq_use_count
);
65 d
->dqb_ihardlimit
= cpu_to_le64(m
->dqb_ihardlimit
);
66 d
->dqb_isoftlimit
= cpu_to_le64(m
->dqb_isoftlimit
);
67 d
->dqb_curinodes
= cpu_to_le64(m
->dqb_curinodes
);
68 d
->dqb_bhardlimit
= cpu_to_le64(m
->dqb_bhardlimit
);
69 d
->dqb_bsoftlimit
= cpu_to_le64(m
->dqb_bsoftlimit
);
70 d
->dqb_curspace
= cpu_to_le64(m
->dqb_curspace
);
71 d
->dqb_btime
= cpu_to_le64(m
->dqb_btime
);
72 d
->dqb_itime
= cpu_to_le64(m
->dqb_itime
);
73 d
->dqb_pad1
= d
->dqb_pad2
= 0;
76 static int ocfs2_global_is_id(void *dp
, struct dquot
*dquot
)
78 struct ocfs2_global_disk_dqblk
*d
= dp
;
79 struct ocfs2_mem_dqinfo
*oinfo
=
80 sb_dqinfo(dquot
->dq_sb
, dquot
->dq_type
)->dqi_priv
;
82 if (qtree_entry_unused(&oinfo
->dqi_gi
, dp
))
84 return le32_to_cpu(d
->dqb_id
) == dquot
->dq_id
;
87 struct qtree_fmt_operations ocfs2_global_ops
= {
88 .mem2disk_dqblk
= ocfs2_global_mem2diskdqb
,
89 .disk2mem_dqblk
= ocfs2_global_disk2memdqb
,
90 .is_id
= ocfs2_global_is_id
,
93 static int ocfs2_validate_quota_block(struct super_block
*sb
,
94 struct buffer_head
*bh
)
96 struct ocfs2_disk_dqtrailer
*dqt
=
97 ocfs2_block_dqtrailer(sb
->s_blocksize
, bh
->b_data
);
99 mlog(0, "Validating quota block %llu\n",
100 (unsigned long long)bh
->b_blocknr
);
102 BUG_ON(!buffer_uptodate(bh
));
105 * If the ecc fails, we return the error but otherwise
106 * leave the filesystem running. We know any error is
107 * local to this block.
109 return ocfs2_validate_meta_ecc(sb
, bh
->b_data
, &dqt
->dq_check
);
112 int ocfs2_read_quota_block(struct inode
*inode
, u64 v_block
,
113 struct buffer_head
**bh
)
116 struct buffer_head
*tmp
= *bh
;
118 if (i_size_read(inode
) >> inode
->i_sb
->s_blocksize_bits
<= v_block
) {
119 ocfs2_error(inode
->i_sb
,
120 "Quota file %llu is probably corrupted! Requested "
121 "to read block %Lu but file has size only %Lu\n",
122 (unsigned long long)OCFS2_I(inode
)->ip_blkno
,
123 (unsigned long long)v_block
,
124 (unsigned long long)i_size_read(inode
));
127 rc
= ocfs2_read_virt_blocks(inode
, v_block
, 1, &tmp
, 0,
128 ocfs2_validate_quota_block
);
132 /* If ocfs2_read_virt_blocks() got us a new bh, pass it up. */
139 static int ocfs2_get_quota_block(struct inode
*inode
, int block
,
140 struct buffer_head
**bh
)
145 down_read(&OCFS2_I(inode
)->ip_alloc_sem
);
146 err
= ocfs2_extent_map_get_blocks(inode
, block
, &pblock
, &pcount
, NULL
);
147 up_read(&OCFS2_I(inode
)->ip_alloc_sem
);
152 *bh
= sb_getblk(inode
->i_sb
, pblock
);
160 /* Read data from global quotafile - avoid pagecache and such because we cannot
161 * afford acquiring the locks... We use quota cluster lock to serialize
162 * operations. Caller is responsible for acquiring it. */
163 ssize_t
ocfs2_quota_read(struct super_block
*sb
, int type
, char *data
,
164 size_t len
, loff_t off
)
166 struct ocfs2_mem_dqinfo
*oinfo
= sb_dqinfo(sb
, type
)->dqi_priv
;
167 struct inode
*gqinode
= oinfo
->dqi_gqinode
;
168 loff_t i_size
= i_size_read(gqinode
);
169 int offset
= off
& (sb
->s_blocksize
- 1);
170 sector_t blk
= off
>> sb
->s_blocksize_bits
;
172 struct buffer_head
*bh
;
173 size_t toread
, tocopy
;
177 if (off
+ len
> i_size
)
181 tocopy
= min_t(size_t, (sb
->s_blocksize
- offset
), toread
);
183 err
= ocfs2_read_quota_block(gqinode
, blk
, &bh
);
188 memcpy(data
, bh
->b_data
+ offset
, tocopy
);
198 /* Write to quotafile (we know the transaction is already started and has
200 ssize_t
ocfs2_quota_write(struct super_block
*sb
, int type
,
201 const char *data
, size_t len
, loff_t off
)
203 struct mem_dqinfo
*info
= sb_dqinfo(sb
, type
);
204 struct ocfs2_mem_dqinfo
*oinfo
= info
->dqi_priv
;
205 struct inode
*gqinode
= oinfo
->dqi_gqinode
;
206 int offset
= off
& (sb
->s_blocksize
- 1);
207 sector_t blk
= off
>> sb
->s_blocksize_bits
;
208 int err
= 0, new = 0, ja_type
;
209 struct buffer_head
*bh
= NULL
;
210 handle_t
*handle
= journal_current_handle();
213 mlog(ML_ERROR
, "Quota write (off=%llu, len=%llu) cancelled "
214 "because transaction was not started.\n",
215 (unsigned long long)off
, (unsigned long long)len
);
218 if (len
> sb
->s_blocksize
- OCFS2_QBLK_RESERVED_SPACE
- offset
) {
220 len
= sb
->s_blocksize
- OCFS2_QBLK_RESERVED_SPACE
- offset
;
223 mutex_lock_nested(&gqinode
->i_mutex
, I_MUTEX_QUOTA
);
224 if (gqinode
->i_size
< off
+ len
) {
226 ocfs2_align_bytes_to_blocks(sb
, off
+ len
);
228 /* Space is already allocated in ocfs2_global_read_dquot() */
229 err
= ocfs2_simple_size_update(gqinode
,
236 /* Not rewriting whole block? */
237 if ((offset
|| len
< sb
->s_blocksize
- OCFS2_QBLK_RESERVED_SPACE
) &&
239 err
= ocfs2_read_quota_block(gqinode
, blk
, &bh
);
240 ja_type
= OCFS2_JOURNAL_ACCESS_WRITE
;
242 err
= ocfs2_get_quota_block(gqinode
, blk
, &bh
);
243 ja_type
= OCFS2_JOURNAL_ACCESS_CREATE
;
251 memset(bh
->b_data
, 0, sb
->s_blocksize
);
252 memcpy(bh
->b_data
+ offset
, data
, len
);
253 flush_dcache_page(bh
->b_page
);
254 set_buffer_uptodate(bh
);
256 ocfs2_set_buffer_uptodate(gqinode
, bh
);
257 err
= ocfs2_journal_access_dq(handle
, gqinode
, bh
, ja_type
);
262 err
= ocfs2_journal_dirty(handle
, bh
);
268 mutex_unlock(&gqinode
->i_mutex
);
272 gqinode
->i_version
++;
273 ocfs2_mark_inode_dirty(handle
, gqinode
, oinfo
->dqi_gqi_bh
);
274 mutex_unlock(&gqinode
->i_mutex
);
278 int ocfs2_lock_global_qf(struct ocfs2_mem_dqinfo
*oinfo
, int ex
)
281 struct buffer_head
*bh
= NULL
;
283 status
= ocfs2_inode_lock(oinfo
->dqi_gqinode
, &bh
, ex
);
286 spin_lock(&dq_data_lock
);
287 if (!oinfo
->dqi_gqi_count
++)
288 oinfo
->dqi_gqi_bh
= bh
;
290 WARN_ON(bh
!= oinfo
->dqi_gqi_bh
);
291 spin_unlock(&dq_data_lock
);
295 void ocfs2_unlock_global_qf(struct ocfs2_mem_dqinfo
*oinfo
, int ex
)
297 ocfs2_inode_unlock(oinfo
->dqi_gqinode
, ex
);
298 brelse(oinfo
->dqi_gqi_bh
);
299 spin_lock(&dq_data_lock
);
300 if (!--oinfo
->dqi_gqi_count
)
301 oinfo
->dqi_gqi_bh
= NULL
;
302 spin_unlock(&dq_data_lock
);
305 /* Read information header from global quota file */
306 int ocfs2_global_read_info(struct super_block
*sb
, int type
)
308 struct inode
*gqinode
= NULL
;
309 unsigned int ino
[MAXQUOTAS
] = { USER_QUOTA_SYSTEM_INODE
,
310 GROUP_QUOTA_SYSTEM_INODE
};
311 struct ocfs2_global_disk_dqinfo dinfo
;
312 struct mem_dqinfo
*info
= sb_dqinfo(sb
, type
);
313 struct ocfs2_mem_dqinfo
*oinfo
= info
->dqi_priv
;
318 /* Read global header */
319 gqinode
= ocfs2_get_system_file_inode(OCFS2_SB(sb
), ino
[type
],
322 mlog(ML_ERROR
, "failed to get global quota inode (type=%d)\n",
327 oinfo
->dqi_gi
.dqi_sb
= sb
;
328 oinfo
->dqi_gi
.dqi_type
= type
;
329 ocfs2_qinfo_lock_res_init(&oinfo
->dqi_gqlock
, oinfo
);
330 oinfo
->dqi_gi
.dqi_entry_size
= sizeof(struct ocfs2_global_disk_dqblk
);
331 oinfo
->dqi_gi
.dqi_ops
= &ocfs2_global_ops
;
332 oinfo
->dqi_gqi_bh
= NULL
;
333 oinfo
->dqi_gqi_count
= 0;
334 oinfo
->dqi_gqinode
= gqinode
;
335 status
= ocfs2_lock_global_qf(oinfo
, 0);
340 status
= sb
->s_op
->quota_read(sb
, type
, (char *)&dinfo
,
341 sizeof(struct ocfs2_global_disk_dqinfo
),
342 OCFS2_GLOBAL_INFO_OFF
);
343 ocfs2_unlock_global_qf(oinfo
, 0);
344 if (status
!= sizeof(struct ocfs2_global_disk_dqinfo
)) {
345 mlog(ML_ERROR
, "Cannot read global quota info (%d).\n",
352 info
->dqi_bgrace
= le32_to_cpu(dinfo
.dqi_bgrace
);
353 info
->dqi_igrace
= le32_to_cpu(dinfo
.dqi_igrace
);
354 oinfo
->dqi_syncms
= le32_to_cpu(dinfo
.dqi_syncms
);
355 oinfo
->dqi_gi
.dqi_blocks
= le32_to_cpu(dinfo
.dqi_blocks
);
356 oinfo
->dqi_gi
.dqi_free_blk
= le32_to_cpu(dinfo
.dqi_free_blk
);
357 oinfo
->dqi_gi
.dqi_free_entry
= le32_to_cpu(dinfo
.dqi_free_entry
);
358 oinfo
->dqi_gi
.dqi_blocksize_bits
= sb
->s_blocksize_bits
;
359 oinfo
->dqi_gi
.dqi_usable_bs
= sb
->s_blocksize
-
360 OCFS2_QBLK_RESERVED_SPACE
;
361 oinfo
->dqi_gi
.dqi_qtree_depth
= qtree_depth(&oinfo
->dqi_gi
);
362 INIT_DELAYED_WORK(&oinfo
->dqi_sync_work
, qsync_work_fn
);
363 queue_delayed_work(ocfs2_quota_wq
, &oinfo
->dqi_sync_work
,
364 msecs_to_jiffies(oinfo
->dqi_syncms
));
371 /* Write information to global quota file. Expects exlusive lock on quota
372 * file inode and quota info */
373 static int __ocfs2_global_write_info(struct super_block
*sb
, int type
)
375 struct mem_dqinfo
*info
= sb_dqinfo(sb
, type
);
376 struct ocfs2_mem_dqinfo
*oinfo
= info
->dqi_priv
;
377 struct ocfs2_global_disk_dqinfo dinfo
;
380 spin_lock(&dq_data_lock
);
381 info
->dqi_flags
&= ~DQF_INFO_DIRTY
;
382 dinfo
.dqi_bgrace
= cpu_to_le32(info
->dqi_bgrace
);
383 dinfo
.dqi_igrace
= cpu_to_le32(info
->dqi_igrace
);
384 spin_unlock(&dq_data_lock
);
385 dinfo
.dqi_syncms
= cpu_to_le32(oinfo
->dqi_syncms
);
386 dinfo
.dqi_blocks
= cpu_to_le32(oinfo
->dqi_gi
.dqi_blocks
);
387 dinfo
.dqi_free_blk
= cpu_to_le32(oinfo
->dqi_gi
.dqi_free_blk
);
388 dinfo
.dqi_free_entry
= cpu_to_le32(oinfo
->dqi_gi
.dqi_free_entry
);
389 size
= sb
->s_op
->quota_write(sb
, type
, (char *)&dinfo
,
390 sizeof(struct ocfs2_global_disk_dqinfo
),
391 OCFS2_GLOBAL_INFO_OFF
);
392 if (size
!= sizeof(struct ocfs2_global_disk_dqinfo
)) {
393 mlog(ML_ERROR
, "Cannot write global quota info structure\n");
401 int ocfs2_global_write_info(struct super_block
*sb
, int type
)
404 struct ocfs2_mem_dqinfo
*info
= sb_dqinfo(sb
, type
)->dqi_priv
;
406 err
= ocfs2_qinfo_lock(info
, 1);
409 err
= __ocfs2_global_write_info(sb
, type
);
410 ocfs2_qinfo_unlock(info
, 1);
414 static int ocfs2_global_qinit_alloc(struct super_block
*sb
, int type
)
416 struct ocfs2_mem_dqinfo
*oinfo
= sb_dqinfo(sb
, type
)->dqi_priv
;
419 * We may need to allocate tree blocks and a leaf block but not the
422 return oinfo
->dqi_gi
.dqi_qtree_depth
;
425 static int ocfs2_calc_global_qinit_credits(struct super_block
*sb
, int type
)
427 /* We modify all the allocated blocks, tree root, and info block */
428 return (ocfs2_global_qinit_alloc(sb
, type
) + 2) *
429 OCFS2_QUOTA_BLOCK_UPDATE_CREDITS
;
432 /* Read in information from global quota file and acquire a reference to it.
433 * dquot_acquire() has already started the transaction and locked quota file */
434 int ocfs2_global_read_dquot(struct dquot
*dquot
)
436 int err
, err2
, ex
= 0;
437 struct super_block
*sb
= dquot
->dq_sb
;
438 int type
= dquot
->dq_type
;
439 struct ocfs2_mem_dqinfo
*info
= sb_dqinfo(sb
, type
)->dqi_priv
;
440 struct ocfs2_super
*osb
= OCFS2_SB(sb
);
441 struct inode
*gqinode
= info
->dqi_gqinode
;
442 int need_alloc
= ocfs2_global_qinit_alloc(sb
, type
);
443 handle_t
*handle
= NULL
;
445 err
= ocfs2_qinfo_lock(info
, 0);
448 err
= qtree_read_dquot(&info
->dqi_gi
, dquot
);
451 OCFS2_DQUOT(dquot
)->dq_use_count
++;
452 OCFS2_DQUOT(dquot
)->dq_origspace
= dquot
->dq_dqb
.dqb_curspace
;
453 OCFS2_DQUOT(dquot
)->dq_originodes
= dquot
->dq_dqb
.dqb_curinodes
;
454 ocfs2_qinfo_unlock(info
, 0);
456 if (!dquot
->dq_off
) { /* No real quota entry? */
459 * Add blocks to quota file before we start a transaction since
460 * locking allocators ranks above a transaction start
462 WARN_ON(journal_current_handle());
463 down_write(&OCFS2_I(gqinode
)->ip_alloc_sem
);
464 err
= ocfs2_extend_no_holes(gqinode
,
465 gqinode
->i_size
+ (need_alloc
<< sb
->s_blocksize_bits
),
467 up_write(&OCFS2_I(gqinode
)->ip_alloc_sem
);
472 handle
= ocfs2_start_trans(osb
,
473 ocfs2_calc_global_qinit_credits(sb
, type
));
474 if (IS_ERR(handle
)) {
475 err
= PTR_ERR(handle
);
478 err
= ocfs2_qinfo_lock(info
, ex
);
481 err
= qtree_write_dquot(&info
->dqi_gi
, dquot
);
482 if (ex
&& info_dirty(sb_dqinfo(dquot
->dq_sb
, dquot
->dq_type
))) {
483 err2
= __ocfs2_global_write_info(dquot
->dq_sb
, dquot
->dq_type
);
489 ocfs2_qinfo_unlock(info
, 1);
491 ocfs2_qinfo_unlock(info
, 0);
494 ocfs2_commit_trans(osb
, handle
);
501 /* Sync local information about quota modifications with global quota file.
502 * Caller must have started the transaction and obtained exclusive lock for
503 * global quota file inode */
504 int __ocfs2_sync_dquot(struct dquot
*dquot
, int freeing
)
507 struct super_block
*sb
= dquot
->dq_sb
;
508 int type
= dquot
->dq_type
;
509 struct ocfs2_mem_dqinfo
*info
= sb_dqinfo(sb
, type
)->dqi_priv
;
510 struct ocfs2_global_disk_dqblk dqblk
;
511 s64 spacechange
, inodechange
;
512 time_t olditime
, oldbtime
;
514 err
= sb
->s_op
->quota_read(sb
, type
, (char *)&dqblk
,
515 sizeof(struct ocfs2_global_disk_dqblk
),
517 if (err
!= sizeof(struct ocfs2_global_disk_dqblk
)) {
519 mlog(ML_ERROR
, "Short read from global quota file "
526 /* Update space and inode usage. Get also other information from
527 * global quota file so that we don't overwrite any changes there.
529 spin_lock(&dq_data_lock
);
530 spacechange
= dquot
->dq_dqb
.dqb_curspace
-
531 OCFS2_DQUOT(dquot
)->dq_origspace
;
532 inodechange
= dquot
->dq_dqb
.dqb_curinodes
-
533 OCFS2_DQUOT(dquot
)->dq_originodes
;
534 olditime
= dquot
->dq_dqb
.dqb_itime
;
535 oldbtime
= dquot
->dq_dqb
.dqb_btime
;
536 ocfs2_global_disk2memdqb(dquot
, &dqblk
);
537 mlog(0, "Syncing global dquot %u space %lld+%lld, inodes %lld+%lld\n",
538 dquot
->dq_id
, dquot
->dq_dqb
.dqb_curspace
, (long long)spacechange
,
539 dquot
->dq_dqb
.dqb_curinodes
, (long long)inodechange
);
540 if (!test_bit(DQ_LASTSET_B
+ QIF_SPACE_B
, &dquot
->dq_flags
))
541 dquot
->dq_dqb
.dqb_curspace
+= spacechange
;
542 if (!test_bit(DQ_LASTSET_B
+ QIF_INODES_B
, &dquot
->dq_flags
))
543 dquot
->dq_dqb
.dqb_curinodes
+= inodechange
;
544 /* Set properly space grace time... */
545 if (dquot
->dq_dqb
.dqb_bsoftlimit
&&
546 dquot
->dq_dqb
.dqb_curspace
> dquot
->dq_dqb
.dqb_bsoftlimit
) {
547 if (!test_bit(DQ_LASTSET_B
+ QIF_BTIME_B
, &dquot
->dq_flags
) &&
549 if (dquot
->dq_dqb
.dqb_btime
> 0)
550 dquot
->dq_dqb
.dqb_btime
=
551 min(dquot
->dq_dqb
.dqb_btime
, oldbtime
);
553 dquot
->dq_dqb
.dqb_btime
= oldbtime
;
556 dquot
->dq_dqb
.dqb_btime
= 0;
557 clear_bit(DQ_BLKS_B
, &dquot
->dq_flags
);
559 /* Set properly inode grace time... */
560 if (dquot
->dq_dqb
.dqb_isoftlimit
&&
561 dquot
->dq_dqb
.dqb_curinodes
> dquot
->dq_dqb
.dqb_isoftlimit
) {
562 if (!test_bit(DQ_LASTSET_B
+ QIF_ITIME_B
, &dquot
->dq_flags
) &&
564 if (dquot
->dq_dqb
.dqb_itime
> 0)
565 dquot
->dq_dqb
.dqb_itime
=
566 min(dquot
->dq_dqb
.dqb_itime
, olditime
);
568 dquot
->dq_dqb
.dqb_itime
= olditime
;
571 dquot
->dq_dqb
.dqb_itime
= 0;
572 clear_bit(DQ_INODES_B
, &dquot
->dq_flags
);
574 /* All information is properly updated, clear the flags */
575 __clear_bit(DQ_LASTSET_B
+ QIF_SPACE_B
, &dquot
->dq_flags
);
576 __clear_bit(DQ_LASTSET_B
+ QIF_INODES_B
, &dquot
->dq_flags
);
577 __clear_bit(DQ_LASTSET_B
+ QIF_BLIMITS_B
, &dquot
->dq_flags
);
578 __clear_bit(DQ_LASTSET_B
+ QIF_ILIMITS_B
, &dquot
->dq_flags
);
579 __clear_bit(DQ_LASTSET_B
+ QIF_BTIME_B
, &dquot
->dq_flags
);
580 __clear_bit(DQ_LASTSET_B
+ QIF_ITIME_B
, &dquot
->dq_flags
);
581 OCFS2_DQUOT(dquot
)->dq_origspace
= dquot
->dq_dqb
.dqb_curspace
;
582 OCFS2_DQUOT(dquot
)->dq_originodes
= dquot
->dq_dqb
.dqb_curinodes
;
583 spin_unlock(&dq_data_lock
);
584 err
= ocfs2_qinfo_lock(info
, freeing
);
586 mlog(ML_ERROR
, "Failed to lock quota info, loosing quota write"
587 " (type=%d, id=%u)\n", dquot
->dq_type
,
588 (unsigned)dquot
->dq_id
);
592 OCFS2_DQUOT(dquot
)->dq_use_count
--;
593 err
= qtree_write_dquot(&info
->dqi_gi
, dquot
);
596 if (freeing
&& !OCFS2_DQUOT(dquot
)->dq_use_count
) {
597 err
= qtree_release_dquot(&info
->dqi_gi
, dquot
);
598 if (info_dirty(sb_dqinfo(sb
, type
))) {
599 err2
= __ocfs2_global_write_info(sb
, type
);
605 ocfs2_qinfo_unlock(info
, freeing
);
613 * Functions for periodic syncing of dquots with global file
615 static int ocfs2_sync_dquot_helper(struct dquot
*dquot
, unsigned long type
)
618 struct super_block
*sb
= dquot
->dq_sb
;
619 struct ocfs2_mem_dqinfo
*oinfo
= sb_dqinfo(sb
, type
)->dqi_priv
;
620 struct ocfs2_super
*osb
= OCFS2_SB(sb
);
623 mlog_entry("id=%u qtype=%u type=%lu device=%s\n", dquot
->dq_id
,
624 dquot
->dq_type
, type
, sb
->s_id
);
625 if (type
!= dquot
->dq_type
)
627 status
= ocfs2_lock_global_qf(oinfo
, 1);
631 handle
= ocfs2_start_trans(osb
, OCFS2_QSYNC_CREDITS
);
632 if (IS_ERR(handle
)) {
633 status
= PTR_ERR(handle
);
637 mutex_lock(&sb_dqopt(sb
)->dqio_mutex
);
638 status
= ocfs2_sync_dquot(dquot
);
639 mutex_unlock(&sb_dqopt(sb
)->dqio_mutex
);
642 /* We have to write local structure as well... */
643 dquot_mark_dquot_dirty(dquot
);
644 status
= dquot_commit(dquot
);
647 ocfs2_commit_trans(osb
, handle
);
649 ocfs2_unlock_global_qf(oinfo
, 1);
655 static void qsync_work_fn(struct work_struct
*work
)
657 struct ocfs2_mem_dqinfo
*oinfo
= container_of(work
,
658 struct ocfs2_mem_dqinfo
,
660 struct super_block
*sb
= oinfo
->dqi_gqinode
->i_sb
;
662 dquot_scan_active(sb
, ocfs2_sync_dquot_helper
, oinfo
->dqi_type
);
663 queue_delayed_work(ocfs2_quota_wq
, &oinfo
->dqi_sync_work
,
664 msecs_to_jiffies(oinfo
->dqi_syncms
));
668 * Wrappers for generic quota functions
671 static int ocfs2_write_dquot(struct dquot
*dquot
)
674 struct ocfs2_super
*osb
= OCFS2_SB(dquot
->dq_sb
);
677 mlog_entry("id=%u, type=%d", dquot
->dq_id
, dquot
->dq_type
);
679 handle
= ocfs2_start_trans(osb
, OCFS2_QWRITE_CREDITS
);
680 if (IS_ERR(handle
)) {
681 status
= PTR_ERR(handle
);
685 status
= dquot_commit(dquot
);
686 ocfs2_commit_trans(osb
, handle
);
692 static int ocfs2_calc_qdel_credits(struct super_block
*sb
, int type
)
694 struct ocfs2_mem_dqinfo
*oinfo
= sb_dqinfo(sb
, type
)->dqi_priv
;
696 * We modify tree, leaf block, global info, local chunk header,
697 * global and local inode; OCFS2_QINFO_WRITE_CREDITS already
698 * accounts for inode update
700 return (oinfo
->dqi_gi
.dqi_qtree_depth
+ 2) *
701 OCFS2_QUOTA_BLOCK_UPDATE_CREDITS
+
702 OCFS2_QINFO_WRITE_CREDITS
+
703 OCFS2_INODE_UPDATE_CREDITS
;
706 static int ocfs2_release_dquot(struct dquot
*dquot
)
709 struct ocfs2_mem_dqinfo
*oinfo
=
710 sb_dqinfo(dquot
->dq_sb
, dquot
->dq_type
)->dqi_priv
;
711 struct ocfs2_super
*osb
= OCFS2_SB(dquot
->dq_sb
);
714 mlog_entry("id=%u, type=%d", dquot
->dq_id
, dquot
->dq_type
);
716 status
= ocfs2_lock_global_qf(oinfo
, 1);
719 handle
= ocfs2_start_trans(osb
,
720 ocfs2_calc_qdel_credits(dquot
->dq_sb
, dquot
->dq_type
));
721 if (IS_ERR(handle
)) {
722 status
= PTR_ERR(handle
);
726 status
= dquot_release(dquot
);
727 ocfs2_commit_trans(osb
, handle
);
729 ocfs2_unlock_global_qf(oinfo
, 1);
735 static int ocfs2_acquire_dquot(struct dquot
*dquot
)
737 struct ocfs2_mem_dqinfo
*oinfo
=
738 sb_dqinfo(dquot
->dq_sb
, dquot
->dq_type
)->dqi_priv
;
741 mlog_entry("id=%u, type=%d", dquot
->dq_id
, dquot
->dq_type
);
742 /* We need an exclusive lock, because we're going to update use count
743 * and instantiate possibly new dquot structure */
744 status
= ocfs2_lock_global_qf(oinfo
, 1);
747 status
= dquot_acquire(dquot
);
748 ocfs2_unlock_global_qf(oinfo
, 1);
754 static int ocfs2_mark_dquot_dirty(struct dquot
*dquot
)
756 unsigned long mask
= (1 << (DQ_LASTSET_B
+ QIF_ILIMITS_B
)) |
757 (1 << (DQ_LASTSET_B
+ QIF_BLIMITS_B
)) |
758 (1 << (DQ_LASTSET_B
+ QIF_INODES_B
)) |
759 (1 << (DQ_LASTSET_B
+ QIF_SPACE_B
)) |
760 (1 << (DQ_LASTSET_B
+ QIF_BTIME_B
)) |
761 (1 << (DQ_LASTSET_B
+ QIF_ITIME_B
));
764 struct super_block
*sb
= dquot
->dq_sb
;
765 int type
= dquot
->dq_type
;
766 struct ocfs2_mem_dqinfo
*oinfo
= sb_dqinfo(sb
, type
)->dqi_priv
;
768 struct ocfs2_super
*osb
= OCFS2_SB(sb
);
770 mlog_entry("id=%u, type=%d", dquot
->dq_id
, type
);
771 dquot_mark_dquot_dirty(dquot
);
773 /* In case user set some limits, sync dquot immediately to global
774 * quota file so that information propagates quicker */
775 spin_lock(&dq_data_lock
);
776 if (dquot
->dq_flags
& mask
)
778 spin_unlock(&dq_data_lock
);
779 /* This is a slight hack but we can't afford getting global quota
780 * lock if we already have a transaction started. */
781 if (!sync
|| journal_current_handle()) {
782 status
= ocfs2_write_dquot(dquot
);
785 status
= ocfs2_lock_global_qf(oinfo
, 1);
788 handle
= ocfs2_start_trans(osb
, OCFS2_QSYNC_CREDITS
);
789 if (IS_ERR(handle
)) {
790 status
= PTR_ERR(handle
);
794 status
= ocfs2_sync_dquot(dquot
);
799 /* Now write updated local dquot structure */
800 status
= dquot_commit(dquot
);
802 ocfs2_commit_trans(osb
, handle
);
804 ocfs2_unlock_global_qf(oinfo
, 1);
810 /* This should happen only after set_dqinfo(). */
811 static int ocfs2_write_info(struct super_block
*sb
, int type
)
815 struct ocfs2_mem_dqinfo
*oinfo
= sb_dqinfo(sb
, type
)->dqi_priv
;
819 status
= ocfs2_lock_global_qf(oinfo
, 1);
822 handle
= ocfs2_start_trans(OCFS2_SB(sb
), OCFS2_QINFO_WRITE_CREDITS
);
823 if (IS_ERR(handle
)) {
824 status
= PTR_ERR(handle
);
828 status
= dquot_commit_info(sb
, type
);
829 ocfs2_commit_trans(OCFS2_SB(sb
), handle
);
831 ocfs2_unlock_global_qf(oinfo
, 1);
837 static struct dquot
*ocfs2_alloc_dquot(struct super_block
*sb
, int type
)
839 struct ocfs2_dquot
*dquot
=
840 kmem_cache_zalloc(ocfs2_dquot_cachep
, GFP_NOFS
);
844 return &dquot
->dq_dquot
;
847 static void ocfs2_destroy_dquot(struct dquot
*dquot
)
849 kmem_cache_free(ocfs2_dquot_cachep
, dquot
);
852 struct dquot_operations ocfs2_quota_operations
= {
853 .initialize
= dquot_initialize
,
855 .alloc_space
= dquot_alloc_space
,
856 .alloc_inode
= dquot_alloc_inode
,
857 .free_space
= dquot_free_space
,
858 .free_inode
= dquot_free_inode
,
859 .transfer
= dquot_transfer
,
860 .write_dquot
= ocfs2_write_dquot
,
861 .acquire_dquot
= ocfs2_acquire_dquot
,
862 .release_dquot
= ocfs2_release_dquot
,
863 .mark_dirty
= ocfs2_mark_dquot_dirty
,
864 .write_info
= ocfs2_write_info
,
865 .alloc_dquot
= ocfs2_alloc_dquot
,
866 .destroy_dquot
= ocfs2_destroy_dquot
,
869 int ocfs2_quota_setup(void)
871 ocfs2_quota_wq
= create_workqueue("o2quot");
877 void ocfs2_quota_shutdown(void)
879 if (ocfs2_quota_wq
) {
880 flush_workqueue(ocfs2_quota_wq
);
881 destroy_workqueue(ocfs2_quota_wq
);
882 ocfs2_quota_wq
= NULL
;