ocfs2: Add missing initialization
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / fs / ocfs2 / quota_global.c
blob10ecb33298d8a405b45b03d2aa57377f615f793f
1 /*
2 * Implementation of operations over global quota file
3 */
4 #include <linux/spinlock.h>
5 #include <linux/fs.h>
6 #include <linux/quota.h>
7 #include <linux/quotaops.h>
8 #include <linux/dqblk_qtree.h>
9 #include <linux/jiffies.h>
10 #include <linux/writeback.h>
11 #include <linux/workqueue.h>
13 #define MLOG_MASK_PREFIX ML_QUOTA
14 #include <cluster/masklog.h>
16 #include "ocfs2_fs.h"
17 #include "ocfs2.h"
18 #include "alloc.h"
19 #include "inode.h"
20 #include "journal.h"
21 #include "file.h"
22 #include "sysfile.h"
23 #include "dlmglue.h"
24 #include "uptodate.h"
25 #include "quota.h"
27 static struct workqueue_struct *ocfs2_quota_wq = NULL;
29 static void qsync_work_fn(struct work_struct *work);
31 static void ocfs2_global_disk2memdqb(struct dquot *dquot, void *dp)
33 struct ocfs2_global_disk_dqblk *d = dp;
34 struct mem_dqblk *m = &dquot->dq_dqb;
36 /* Update from disk only entries not set by the admin */
37 if (!test_bit(DQ_LASTSET_B + QIF_ILIMITS_B, &dquot->dq_flags)) {
38 m->dqb_ihardlimit = le64_to_cpu(d->dqb_ihardlimit);
39 m->dqb_isoftlimit = le64_to_cpu(d->dqb_isoftlimit);
41 if (!test_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags))
42 m->dqb_curinodes = le64_to_cpu(d->dqb_curinodes);
43 if (!test_bit(DQ_LASTSET_B + QIF_BLIMITS_B, &dquot->dq_flags)) {
44 m->dqb_bhardlimit = le64_to_cpu(d->dqb_bhardlimit);
45 m->dqb_bsoftlimit = le64_to_cpu(d->dqb_bsoftlimit);
47 if (!test_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags))
48 m->dqb_curspace = le64_to_cpu(d->dqb_curspace);
49 if (!test_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags))
50 m->dqb_btime = le64_to_cpu(d->dqb_btime);
51 if (!test_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags))
52 m->dqb_itime = le64_to_cpu(d->dqb_itime);
53 OCFS2_DQUOT(dquot)->dq_use_count = le32_to_cpu(d->dqb_use_count);
56 static void ocfs2_global_mem2diskdqb(void *dp, struct dquot *dquot)
58 struct ocfs2_global_disk_dqblk *d = dp;
59 struct mem_dqblk *m = &dquot->dq_dqb;
61 d->dqb_id = cpu_to_le32(dquot->dq_id);
62 d->dqb_use_count = cpu_to_le32(OCFS2_DQUOT(dquot)->dq_use_count);
63 d->dqb_ihardlimit = cpu_to_le64(m->dqb_ihardlimit);
64 d->dqb_isoftlimit = cpu_to_le64(m->dqb_isoftlimit);
65 d->dqb_curinodes = cpu_to_le64(m->dqb_curinodes);
66 d->dqb_bhardlimit = cpu_to_le64(m->dqb_bhardlimit);
67 d->dqb_bsoftlimit = cpu_to_le64(m->dqb_bsoftlimit);
68 d->dqb_curspace = cpu_to_le64(m->dqb_curspace);
69 d->dqb_btime = cpu_to_le64(m->dqb_btime);
70 d->dqb_itime = cpu_to_le64(m->dqb_itime);
73 static int ocfs2_global_is_id(void *dp, struct dquot *dquot)
75 struct ocfs2_global_disk_dqblk *d = dp;
76 struct ocfs2_mem_dqinfo *oinfo =
77 sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv;
79 if (qtree_entry_unused(&oinfo->dqi_gi, dp))
80 return 0;
81 return le32_to_cpu(d->dqb_id) == dquot->dq_id;
84 struct qtree_fmt_operations ocfs2_global_ops = {
85 .mem2disk_dqblk = ocfs2_global_mem2diskdqb,
86 .disk2mem_dqblk = ocfs2_global_disk2memdqb,
87 .is_id = ocfs2_global_is_id,
90 struct buffer_head *ocfs2_read_quota_block(struct inode *inode,
91 int block, int *err)
93 struct buffer_head *tmp = NULL;
95 *err = ocfs2_read_virt_blocks(inode, block, 1, &tmp, 0, NULL);
96 if (*err)
97 mlog_errno(*err);
99 return tmp;
102 static struct buffer_head *ocfs2_get_quota_block(struct inode *inode,
103 int block, int *err)
105 u64 pblock, pcount;
106 struct buffer_head *bh;
108 down_read(&OCFS2_I(inode)->ip_alloc_sem);
109 *err = ocfs2_extent_map_get_blocks(inode, block, &pblock, &pcount,
110 NULL);
111 up_read(&OCFS2_I(inode)->ip_alloc_sem);
112 if (*err) {
113 mlog_errno(*err);
114 return NULL;
116 bh = sb_getblk(inode->i_sb, pblock);
117 if (!bh) {
118 *err = -EIO;
119 mlog_errno(*err);
121 return bh;
124 /* Read data from global quotafile - avoid pagecache and such because we cannot
125 * afford acquiring the locks... We use quota cluster lock to serialize
126 * operations. Caller is responsible for acquiring it. */
127 ssize_t ocfs2_quota_read(struct super_block *sb, int type, char *data,
128 size_t len, loff_t off)
130 struct ocfs2_mem_dqinfo *oinfo = sb_dqinfo(sb, type)->dqi_priv;
131 struct inode *gqinode = oinfo->dqi_gqinode;
132 loff_t i_size = i_size_read(gqinode);
133 int offset = off & (sb->s_blocksize - 1);
134 sector_t blk = off >> sb->s_blocksize_bits;
135 int err = 0;
136 struct buffer_head *bh;
137 size_t toread, tocopy;
139 if (off > i_size)
140 return 0;
141 if (off + len > i_size)
142 len = i_size - off;
143 toread = len;
144 while (toread > 0) {
145 tocopy = min((size_t)(sb->s_blocksize - offset), toread);
146 bh = ocfs2_read_quota_block(gqinode, blk, &err);
147 if (!bh) {
148 mlog_errno(err);
149 return err;
151 memcpy(data, bh->b_data + offset, tocopy);
152 brelse(bh);
153 offset = 0;
154 toread -= tocopy;
155 data += tocopy;
156 blk++;
158 return len;
161 /* Write to quotafile (we know the transaction is already started and has
162 * enough credits) */
163 ssize_t ocfs2_quota_write(struct super_block *sb, int type,
164 const char *data, size_t len, loff_t off)
166 struct mem_dqinfo *info = sb_dqinfo(sb, type);
167 struct ocfs2_mem_dqinfo *oinfo = info->dqi_priv;
168 struct inode *gqinode = oinfo->dqi_gqinode;
169 int offset = off & (sb->s_blocksize - 1);
170 sector_t blk = off >> sb->s_blocksize_bits;
171 int err = 0, new = 0;
172 struct buffer_head *bh;
173 handle_t *handle = journal_current_handle();
175 if (!handle) {
176 mlog(ML_ERROR, "Quota write (off=%llu, len=%llu) cancelled "
177 "because transaction was not started.\n",
178 (unsigned long long)off, (unsigned long long)len);
179 return -EIO;
181 if (len > sb->s_blocksize - OCFS2_QBLK_RESERVED_SPACE - offset) {
182 WARN_ON(1);
183 len = sb->s_blocksize - OCFS2_QBLK_RESERVED_SPACE - offset;
186 mutex_lock_nested(&gqinode->i_mutex, I_MUTEX_QUOTA);
187 if (gqinode->i_size < off + len) {
188 down_write(&OCFS2_I(gqinode)->ip_alloc_sem);
189 err = ocfs2_extend_no_holes(gqinode, off + len, off);
190 up_write(&OCFS2_I(gqinode)->ip_alloc_sem);
191 if (err < 0)
192 goto out;
193 err = ocfs2_simple_size_update(gqinode,
194 oinfo->dqi_gqi_bh,
195 off + len);
196 if (err < 0)
197 goto out;
198 new = 1;
200 /* Not rewriting whole block? */
201 if ((offset || len < sb->s_blocksize - OCFS2_QBLK_RESERVED_SPACE) &&
202 !new) {
203 bh = ocfs2_read_quota_block(gqinode, blk, &err);
204 if (!bh) {
205 mlog_errno(err);
206 return err;
208 err = ocfs2_journal_access(handle, gqinode, bh,
209 OCFS2_JOURNAL_ACCESS_WRITE);
210 } else {
211 bh = ocfs2_get_quota_block(gqinode, blk, &err);
212 if (!bh) {
213 mlog_errno(err);
214 return err;
216 err = ocfs2_journal_access(handle, gqinode, bh,
217 OCFS2_JOURNAL_ACCESS_CREATE);
219 if (err < 0) {
220 brelse(bh);
221 goto out;
223 lock_buffer(bh);
224 if (new)
225 memset(bh->b_data, 0, sb->s_blocksize);
226 memcpy(bh->b_data + offset, data, len);
227 flush_dcache_page(bh->b_page);
228 unlock_buffer(bh);
229 ocfs2_set_buffer_uptodate(gqinode, bh);
230 err = ocfs2_journal_dirty(handle, bh);
231 brelse(bh);
232 if (err < 0)
233 goto out;
234 out:
235 if (err) {
236 mutex_unlock(&gqinode->i_mutex);
237 mlog_errno(err);
238 return err;
240 gqinode->i_version++;
241 ocfs2_mark_inode_dirty(handle, gqinode, oinfo->dqi_gqi_bh);
242 mutex_unlock(&gqinode->i_mutex);
243 return len;
246 int ocfs2_lock_global_qf(struct ocfs2_mem_dqinfo *oinfo, int ex)
248 int status;
249 struct buffer_head *bh = NULL;
251 status = ocfs2_inode_lock(oinfo->dqi_gqinode, &bh, ex);
252 if (status < 0)
253 return status;
254 spin_lock(&dq_data_lock);
255 if (!oinfo->dqi_gqi_count++)
256 oinfo->dqi_gqi_bh = bh;
257 else
258 WARN_ON(bh != oinfo->dqi_gqi_bh);
259 spin_unlock(&dq_data_lock);
260 return 0;
263 void ocfs2_unlock_global_qf(struct ocfs2_mem_dqinfo *oinfo, int ex)
265 ocfs2_inode_unlock(oinfo->dqi_gqinode, ex);
266 brelse(oinfo->dqi_gqi_bh);
267 spin_lock(&dq_data_lock);
268 if (!--oinfo->dqi_gqi_count)
269 oinfo->dqi_gqi_bh = NULL;
270 spin_unlock(&dq_data_lock);
273 /* Read information header from global quota file */
274 int ocfs2_global_read_info(struct super_block *sb, int type)
276 struct inode *gqinode = NULL;
277 unsigned int ino[MAXQUOTAS] = { USER_QUOTA_SYSTEM_INODE,
278 GROUP_QUOTA_SYSTEM_INODE };
279 struct ocfs2_global_disk_dqinfo dinfo;
280 struct mem_dqinfo *info = sb_dqinfo(sb, type);
281 struct ocfs2_mem_dqinfo *oinfo = info->dqi_priv;
282 int status;
284 mlog_entry_void();
286 /* Read global header */
287 gqinode = ocfs2_get_system_file_inode(OCFS2_SB(sb), ino[type],
288 OCFS2_INVALID_SLOT);
289 if (!gqinode) {
290 mlog(ML_ERROR, "failed to get global quota inode (type=%d)\n",
291 type);
292 status = -EINVAL;
293 goto out_err;
295 oinfo->dqi_gi.dqi_sb = sb;
296 oinfo->dqi_gi.dqi_type = type;
297 ocfs2_qinfo_lock_res_init(&oinfo->dqi_gqlock, oinfo);
298 oinfo->dqi_gi.dqi_entry_size = sizeof(struct ocfs2_global_disk_dqblk);
299 oinfo->dqi_gi.dqi_ops = &ocfs2_global_ops;
300 oinfo->dqi_gqi_bh = NULL;
301 oinfo->dqi_gqi_count = 0;
302 oinfo->dqi_gqinode = gqinode;
303 status = ocfs2_lock_global_qf(oinfo, 0);
304 if (status < 0) {
305 mlog_errno(status);
306 goto out_err;
308 status = sb->s_op->quota_read(sb, type, (char *)&dinfo,
309 sizeof(struct ocfs2_global_disk_dqinfo),
310 OCFS2_GLOBAL_INFO_OFF);
311 ocfs2_unlock_global_qf(oinfo, 0);
312 if (status != sizeof(struct ocfs2_global_disk_dqinfo)) {
313 mlog(ML_ERROR, "Cannot read global quota info (%d).\n",
314 status);
315 if (status >= 0)
316 status = -EIO;
317 mlog_errno(status);
318 goto out_err;
320 info->dqi_bgrace = le32_to_cpu(dinfo.dqi_bgrace);
321 info->dqi_igrace = le32_to_cpu(dinfo.dqi_igrace);
322 oinfo->dqi_syncms = le32_to_cpu(dinfo.dqi_syncms);
323 oinfo->dqi_syncjiff = msecs_to_jiffies(oinfo->dqi_syncms);
324 oinfo->dqi_gi.dqi_blocks = le32_to_cpu(dinfo.dqi_blocks);
325 oinfo->dqi_gi.dqi_free_blk = le32_to_cpu(dinfo.dqi_free_blk);
326 oinfo->dqi_gi.dqi_free_entry = le32_to_cpu(dinfo.dqi_free_entry);
327 oinfo->dqi_gi.dqi_blocksize_bits = sb->s_blocksize_bits;
328 oinfo->dqi_gi.dqi_usable_bs = sb->s_blocksize -
329 OCFS2_QBLK_RESERVED_SPACE;
330 oinfo->dqi_gi.dqi_qtree_depth = qtree_depth(&oinfo->dqi_gi);
331 INIT_DELAYED_WORK(&oinfo->dqi_sync_work, qsync_work_fn);
332 queue_delayed_work(ocfs2_quota_wq, &oinfo->dqi_sync_work,
333 oinfo->dqi_syncjiff);
335 out_err:
336 mlog_exit(status);
337 return status;
340 /* Write information to global quota file. Expects exlusive lock on quota
341 * file inode and quota info */
342 static int __ocfs2_global_write_info(struct super_block *sb, int type)
344 struct mem_dqinfo *info = sb_dqinfo(sb, type);
345 struct ocfs2_mem_dqinfo *oinfo = info->dqi_priv;
346 struct ocfs2_global_disk_dqinfo dinfo;
347 ssize_t size;
349 spin_lock(&dq_data_lock);
350 info->dqi_flags &= ~DQF_INFO_DIRTY;
351 dinfo.dqi_bgrace = cpu_to_le32(info->dqi_bgrace);
352 dinfo.dqi_igrace = cpu_to_le32(info->dqi_igrace);
353 spin_unlock(&dq_data_lock);
354 dinfo.dqi_syncms = cpu_to_le32(oinfo->dqi_syncms);
355 dinfo.dqi_blocks = cpu_to_le32(oinfo->dqi_gi.dqi_blocks);
356 dinfo.dqi_free_blk = cpu_to_le32(oinfo->dqi_gi.dqi_free_blk);
357 dinfo.dqi_free_entry = cpu_to_le32(oinfo->dqi_gi.dqi_free_entry);
358 size = sb->s_op->quota_write(sb, type, (char *)&dinfo,
359 sizeof(struct ocfs2_global_disk_dqinfo),
360 OCFS2_GLOBAL_INFO_OFF);
361 if (size != sizeof(struct ocfs2_global_disk_dqinfo)) {
362 mlog(ML_ERROR, "Cannot write global quota info structure\n");
363 if (size >= 0)
364 size = -EIO;
365 return size;
367 return 0;
370 int ocfs2_global_write_info(struct super_block *sb, int type)
372 int err;
373 struct ocfs2_mem_dqinfo *info = sb_dqinfo(sb, type)->dqi_priv;
375 err = ocfs2_qinfo_lock(info, 1);
376 if (err < 0)
377 return err;
378 err = __ocfs2_global_write_info(sb, type);
379 ocfs2_qinfo_unlock(info, 1);
380 return err;
383 /* Read in information from global quota file and acquire a reference to it.
384 * dquot_acquire() has already started the transaction and locked quota file */
385 int ocfs2_global_read_dquot(struct dquot *dquot)
387 int err, err2, ex = 0;
388 struct ocfs2_mem_dqinfo *info =
389 sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv;
391 err = ocfs2_qinfo_lock(info, 0);
392 if (err < 0)
393 goto out;
394 err = qtree_read_dquot(&info->dqi_gi, dquot);
395 if (err < 0)
396 goto out_qlock;
397 OCFS2_DQUOT(dquot)->dq_use_count++;
398 OCFS2_DQUOT(dquot)->dq_origspace = dquot->dq_dqb.dqb_curspace;
399 OCFS2_DQUOT(dquot)->dq_originodes = dquot->dq_dqb.dqb_curinodes;
400 if (!dquot->dq_off) { /* No real quota entry? */
401 /* Upgrade to exclusive lock for allocation */
402 err = ocfs2_qinfo_lock(info, 1);
403 if (err < 0)
404 goto out_qlock;
405 ex = 1;
407 err = qtree_write_dquot(&info->dqi_gi, dquot);
408 if (ex && info_dirty(sb_dqinfo(dquot->dq_sb, dquot->dq_type))) {
409 err2 = __ocfs2_global_write_info(dquot->dq_sb, dquot->dq_type);
410 if (!err)
411 err = err2;
413 out_qlock:
414 if (ex)
415 ocfs2_qinfo_unlock(info, 1);
416 ocfs2_qinfo_unlock(info, 0);
417 out:
418 if (err < 0)
419 mlog_errno(err);
420 return err;
423 /* Sync local information about quota modifications with global quota file.
424 * Caller must have started the transaction and obtained exclusive lock for
425 * global quota file inode */
426 int __ocfs2_sync_dquot(struct dquot *dquot, int freeing)
428 int err, err2;
429 struct super_block *sb = dquot->dq_sb;
430 int type = dquot->dq_type;
431 struct ocfs2_mem_dqinfo *info = sb_dqinfo(sb, type)->dqi_priv;
432 struct ocfs2_global_disk_dqblk dqblk;
433 s64 spacechange, inodechange;
434 time_t olditime, oldbtime;
436 err = sb->s_op->quota_read(sb, type, (char *)&dqblk,
437 sizeof(struct ocfs2_global_disk_dqblk),
438 dquot->dq_off);
439 if (err != sizeof(struct ocfs2_global_disk_dqblk)) {
440 if (err >= 0) {
441 mlog(ML_ERROR, "Short read from global quota file "
442 "(%u read)\n", err);
443 err = -EIO;
445 goto out;
448 /* Update space and inode usage. Get also other information from
449 * global quota file so that we don't overwrite any changes there.
450 * We are */
451 spin_lock(&dq_data_lock);
452 spacechange = dquot->dq_dqb.dqb_curspace -
453 OCFS2_DQUOT(dquot)->dq_origspace;
454 inodechange = dquot->dq_dqb.dqb_curinodes -
455 OCFS2_DQUOT(dquot)->dq_originodes;
456 olditime = dquot->dq_dqb.dqb_itime;
457 oldbtime = dquot->dq_dqb.dqb_btime;
458 ocfs2_global_disk2memdqb(dquot, &dqblk);
459 mlog(0, "Syncing global dquot %d space %lld+%lld, inodes %lld+%lld\n",
460 dquot->dq_id, dquot->dq_dqb.dqb_curspace, spacechange,
461 dquot->dq_dqb.dqb_curinodes, inodechange);
462 if (!test_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags))
463 dquot->dq_dqb.dqb_curspace += spacechange;
464 if (!test_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags))
465 dquot->dq_dqb.dqb_curinodes += inodechange;
466 /* Set properly space grace time... */
467 if (dquot->dq_dqb.dqb_bsoftlimit &&
468 dquot->dq_dqb.dqb_curspace > dquot->dq_dqb.dqb_bsoftlimit) {
469 if (!test_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags) &&
470 oldbtime > 0) {
471 if (dquot->dq_dqb.dqb_btime > 0)
472 dquot->dq_dqb.dqb_btime =
473 min(dquot->dq_dqb.dqb_btime, oldbtime);
474 else
475 dquot->dq_dqb.dqb_btime = oldbtime;
477 } else {
478 dquot->dq_dqb.dqb_btime = 0;
479 clear_bit(DQ_BLKS_B, &dquot->dq_flags);
481 /* Set properly inode grace time... */
482 if (dquot->dq_dqb.dqb_isoftlimit &&
483 dquot->dq_dqb.dqb_curinodes > dquot->dq_dqb.dqb_isoftlimit) {
484 if (!test_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags) &&
485 olditime > 0) {
486 if (dquot->dq_dqb.dqb_itime > 0)
487 dquot->dq_dqb.dqb_itime =
488 min(dquot->dq_dqb.dqb_itime, olditime);
489 else
490 dquot->dq_dqb.dqb_itime = olditime;
492 } else {
493 dquot->dq_dqb.dqb_itime = 0;
494 clear_bit(DQ_INODES_B, &dquot->dq_flags);
496 /* All information is properly updated, clear the flags */
497 __clear_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags);
498 __clear_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags);
499 __clear_bit(DQ_LASTSET_B + QIF_BLIMITS_B, &dquot->dq_flags);
500 __clear_bit(DQ_LASTSET_B + QIF_ILIMITS_B, &dquot->dq_flags);
501 __clear_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags);
502 __clear_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags);
503 OCFS2_DQUOT(dquot)->dq_origspace = dquot->dq_dqb.dqb_curspace;
504 OCFS2_DQUOT(dquot)->dq_originodes = dquot->dq_dqb.dqb_curinodes;
505 spin_unlock(&dq_data_lock);
506 err = ocfs2_qinfo_lock(info, freeing);
507 if (err < 0) {
508 mlog(ML_ERROR, "Failed to lock quota info, loosing quota write"
509 " (type=%d, id=%u)\n", dquot->dq_type,
510 (unsigned)dquot->dq_id);
511 goto out;
513 if (freeing)
514 OCFS2_DQUOT(dquot)->dq_use_count--;
515 err = qtree_write_dquot(&info->dqi_gi, dquot);
516 if (err < 0)
517 goto out_qlock;
518 if (freeing && !OCFS2_DQUOT(dquot)->dq_use_count) {
519 err = qtree_release_dquot(&info->dqi_gi, dquot);
520 if (info_dirty(sb_dqinfo(sb, type))) {
521 err2 = __ocfs2_global_write_info(sb, type);
522 if (!err)
523 err = err2;
526 out_qlock:
527 ocfs2_qinfo_unlock(info, freeing);
528 out:
529 if (err < 0)
530 mlog_errno(err);
531 return err;
535 * Functions for periodic syncing of dquots with global file
537 static int ocfs2_sync_dquot_helper(struct dquot *dquot, unsigned long type)
539 handle_t *handle;
540 struct super_block *sb = dquot->dq_sb;
541 struct ocfs2_mem_dqinfo *oinfo = sb_dqinfo(sb, type)->dqi_priv;
542 struct ocfs2_super *osb = OCFS2_SB(sb);
543 int status = 0;
545 mlog_entry("id=%u qtype=%u type=%lu device=%s\n", dquot->dq_id,
546 dquot->dq_type, type, sb->s_id);
547 if (type != dquot->dq_type)
548 goto out;
549 status = ocfs2_lock_global_qf(oinfo, 1);
550 if (status < 0)
551 goto out;
553 handle = ocfs2_start_trans(osb, OCFS2_QSYNC_CREDITS);
554 if (IS_ERR(handle)) {
555 status = PTR_ERR(handle);
556 mlog_errno(status);
557 goto out_ilock;
559 mutex_lock(&sb_dqopt(sb)->dqio_mutex);
560 status = ocfs2_sync_dquot(dquot);
561 mutex_unlock(&sb_dqopt(sb)->dqio_mutex);
562 if (status < 0)
563 mlog_errno(status);
564 /* We have to write local structure as well... */
565 dquot_mark_dquot_dirty(dquot);
566 status = dquot_commit(dquot);
567 if (status < 0)
568 mlog_errno(status);
569 ocfs2_commit_trans(osb, handle);
570 out_ilock:
571 ocfs2_unlock_global_qf(oinfo, 1);
572 out:
573 mlog_exit(status);
574 return status;
577 static void qsync_work_fn(struct work_struct *work)
579 struct ocfs2_mem_dqinfo *oinfo = container_of(work,
580 struct ocfs2_mem_dqinfo,
581 dqi_sync_work.work);
582 struct super_block *sb = oinfo->dqi_gqinode->i_sb;
584 dquot_scan_active(sb, ocfs2_sync_dquot_helper, oinfo->dqi_type);
585 queue_delayed_work(ocfs2_quota_wq, &oinfo->dqi_sync_work,
586 oinfo->dqi_syncjiff);
590 * Wrappers for generic quota functions
593 static int ocfs2_write_dquot(struct dquot *dquot)
595 handle_t *handle;
596 struct ocfs2_super *osb = OCFS2_SB(dquot->dq_sb);
597 int status = 0;
599 mlog_entry("id=%u, type=%d", dquot->dq_id, dquot->dq_type);
601 handle = ocfs2_start_trans(osb, OCFS2_QWRITE_CREDITS);
602 if (IS_ERR(handle)) {
603 status = PTR_ERR(handle);
604 mlog_errno(status);
605 goto out;
607 status = dquot_commit(dquot);
608 ocfs2_commit_trans(osb, handle);
609 out:
610 mlog_exit(status);
611 return status;
614 int ocfs2_calc_qdel_credits(struct super_block *sb, int type)
616 struct ocfs2_mem_dqinfo *oinfo;
617 int features[MAXQUOTAS] = { OCFS2_FEATURE_RO_COMPAT_USRQUOTA,
618 OCFS2_FEATURE_RO_COMPAT_GRPQUOTA };
620 if (!OCFS2_HAS_RO_COMPAT_FEATURE(sb, features[type]))
621 return 0;
623 oinfo = sb_dqinfo(sb, type)->dqi_priv;
624 /* We modify tree, leaf block, global info, local chunk header,
625 * global and local inode */
626 return oinfo->dqi_gi.dqi_qtree_depth + 2 + 1 +
627 2 * OCFS2_INODE_UPDATE_CREDITS;
630 static int ocfs2_release_dquot(struct dquot *dquot)
632 handle_t *handle;
633 struct ocfs2_mem_dqinfo *oinfo =
634 sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv;
635 struct ocfs2_super *osb = OCFS2_SB(dquot->dq_sb);
636 int status = 0;
638 mlog_entry("id=%u, type=%d", dquot->dq_id, dquot->dq_type);
640 status = ocfs2_lock_global_qf(oinfo, 1);
641 if (status < 0)
642 goto out;
643 handle = ocfs2_start_trans(osb,
644 ocfs2_calc_qdel_credits(dquot->dq_sb, dquot->dq_type));
645 if (IS_ERR(handle)) {
646 status = PTR_ERR(handle);
647 mlog_errno(status);
648 goto out_ilock;
650 status = dquot_release(dquot);
651 ocfs2_commit_trans(osb, handle);
652 out_ilock:
653 ocfs2_unlock_global_qf(oinfo, 1);
654 out:
655 mlog_exit(status);
656 return status;
659 int ocfs2_calc_qinit_credits(struct super_block *sb, int type)
661 struct ocfs2_mem_dqinfo *oinfo;
662 int features[MAXQUOTAS] = { OCFS2_FEATURE_RO_COMPAT_USRQUOTA,
663 OCFS2_FEATURE_RO_COMPAT_GRPQUOTA };
664 struct ocfs2_dinode *lfe, *gfe;
666 if (!OCFS2_HAS_RO_COMPAT_FEATURE(sb, features[type]))
667 return 0;
669 oinfo = sb_dqinfo(sb, type)->dqi_priv;
670 gfe = (struct ocfs2_dinode *)oinfo->dqi_gqi_bh->b_data;
671 lfe = (struct ocfs2_dinode *)oinfo->dqi_lqi_bh->b_data;
672 /* We can extend local file + global file. In local file we
673 * can modify info, chunk header block and dquot block. In
674 * global file we can modify info, tree and leaf block */
675 return ocfs2_calc_extend_credits(sb, &lfe->id2.i_list, 0) +
676 ocfs2_calc_extend_credits(sb, &gfe->id2.i_list, 0) +
677 3 + oinfo->dqi_gi.dqi_qtree_depth + 2;
680 static int ocfs2_acquire_dquot(struct dquot *dquot)
682 handle_t *handle;
683 struct ocfs2_mem_dqinfo *oinfo =
684 sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv;
685 struct ocfs2_super *osb = OCFS2_SB(dquot->dq_sb);
686 int status = 0;
688 mlog_entry("id=%u, type=%d", dquot->dq_id, dquot->dq_type);
689 /* We need an exclusive lock, because we're going to update use count
690 * and instantiate possibly new dquot structure */
691 status = ocfs2_lock_global_qf(oinfo, 1);
692 if (status < 0)
693 goto out;
694 handle = ocfs2_start_trans(osb,
695 ocfs2_calc_qinit_credits(dquot->dq_sb, dquot->dq_type));
696 if (IS_ERR(handle)) {
697 status = PTR_ERR(handle);
698 mlog_errno(status);
699 goto out_ilock;
701 status = dquot_acquire(dquot);
702 ocfs2_commit_trans(osb, handle);
703 out_ilock:
704 ocfs2_unlock_global_qf(oinfo, 1);
705 out:
706 mlog_exit(status);
707 return status;
710 static int ocfs2_mark_dquot_dirty(struct dquot *dquot)
712 unsigned long mask = (1 << (DQ_LASTSET_B + QIF_ILIMITS_B)) |
713 (1 << (DQ_LASTSET_B + QIF_BLIMITS_B)) |
714 (1 << (DQ_LASTSET_B + QIF_INODES_B)) |
715 (1 << (DQ_LASTSET_B + QIF_SPACE_B)) |
716 (1 << (DQ_LASTSET_B + QIF_BTIME_B)) |
717 (1 << (DQ_LASTSET_B + QIF_ITIME_B));
718 int sync = 0;
719 int status;
720 struct super_block *sb = dquot->dq_sb;
721 int type = dquot->dq_type;
722 struct ocfs2_mem_dqinfo *oinfo = sb_dqinfo(sb, type)->dqi_priv;
723 handle_t *handle;
724 struct ocfs2_super *osb = OCFS2_SB(sb);
726 mlog_entry("id=%u, type=%d", dquot->dq_id, type);
727 dquot_mark_dquot_dirty(dquot);
729 /* In case user set some limits, sync dquot immediately to global
730 * quota file so that information propagates quicker */
731 spin_lock(&dq_data_lock);
732 if (dquot->dq_flags & mask)
733 sync = 1;
734 spin_unlock(&dq_data_lock);
735 if (!sync) {
736 status = ocfs2_write_dquot(dquot);
737 goto out;
739 status = ocfs2_lock_global_qf(oinfo, 1);
740 if (status < 0)
741 goto out;
742 handle = ocfs2_start_trans(osb, OCFS2_QSYNC_CREDITS);
743 if (IS_ERR(handle)) {
744 status = PTR_ERR(handle);
745 mlog_errno(status);
746 goto out_ilock;
748 status = ocfs2_sync_dquot(dquot);
749 if (status < 0) {
750 mlog_errno(status);
751 goto out_trans;
753 /* Now write updated local dquot structure */
754 status = dquot_commit(dquot);
755 out_trans:
756 ocfs2_commit_trans(osb, handle);
757 out_ilock:
758 ocfs2_unlock_global_qf(oinfo, 1);
759 out:
760 mlog_exit(status);
761 return status;
764 /* This should happen only after set_dqinfo(). */
765 static int ocfs2_write_info(struct super_block *sb, int type)
767 handle_t *handle;
768 int status = 0;
769 struct ocfs2_mem_dqinfo *oinfo = sb_dqinfo(sb, type)->dqi_priv;
771 mlog_entry_void();
773 status = ocfs2_lock_global_qf(oinfo, 1);
774 if (status < 0)
775 goto out;
776 handle = ocfs2_start_trans(OCFS2_SB(sb), OCFS2_QINFO_WRITE_CREDITS);
777 if (IS_ERR(handle)) {
778 status = PTR_ERR(handle);
779 mlog_errno(status);
780 goto out_ilock;
782 status = dquot_commit_info(sb, type);
783 ocfs2_commit_trans(OCFS2_SB(sb), handle);
784 out_ilock:
785 ocfs2_unlock_global_qf(oinfo, 1);
786 out:
787 mlog_exit(status);
788 return status;
791 /* This is difficult. We have to lock quota inode and start transaction
792 * in this function but we don't want to take the penalty of exlusive
793 * quota file lock when we are just going to use cached structures. So
794 * we just take read lock check whether we have dquot cached and if so,
795 * we don't have to take the write lock... */
796 static int ocfs2_dquot_initialize(struct inode *inode, int type)
798 handle_t *handle = NULL;
799 int status = 0;
800 struct super_block *sb = inode->i_sb;
801 struct ocfs2_mem_dqinfo *oinfo;
802 int exclusive = 0;
803 int cnt;
804 qid_t id;
806 mlog_entry_void();
808 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
809 if (type != -1 && cnt != type)
810 continue;
811 if (!sb_has_quota_active(sb, cnt))
812 continue;
813 oinfo = sb_dqinfo(sb, cnt)->dqi_priv;
814 status = ocfs2_lock_global_qf(oinfo, 0);
815 if (status < 0)
816 goto out;
817 /* This is just a performance optimization not a reliable test.
818 * Since we hold an inode lock, noone can actually release
819 * the structure until we are finished with initialization. */
820 if (inode->i_dquot[cnt] != NODQUOT) {
821 ocfs2_unlock_global_qf(oinfo, 0);
822 continue;
824 /* When we have inode lock, we know that no dquot_release() can
825 * run and thus we can safely check whether we need to
826 * read+modify global file to get quota information or whether
827 * our node already has it. */
828 if (cnt == USRQUOTA)
829 id = inode->i_uid;
830 else if (cnt == GRPQUOTA)
831 id = inode->i_gid;
832 else
833 BUG();
834 /* Obtain exclusion from quota off... */
835 down_write(&sb_dqopt(sb)->dqptr_sem);
836 exclusive = !dquot_is_cached(sb, id, cnt);
837 up_write(&sb_dqopt(sb)->dqptr_sem);
838 if (exclusive) {
839 status = ocfs2_lock_global_qf(oinfo, 1);
840 if (status < 0) {
841 exclusive = 0;
842 mlog_errno(status);
843 goto out_ilock;
845 handle = ocfs2_start_trans(OCFS2_SB(sb),
846 ocfs2_calc_qinit_credits(sb, cnt));
847 if (IS_ERR(handle)) {
848 status = PTR_ERR(handle);
849 mlog_errno(status);
850 goto out_ilock;
853 dquot_initialize(inode, cnt);
854 if (exclusive) {
855 ocfs2_commit_trans(OCFS2_SB(sb), handle);
856 ocfs2_unlock_global_qf(oinfo, 1);
858 ocfs2_unlock_global_qf(oinfo, 0);
860 mlog_exit(0);
861 return 0;
862 out_ilock:
863 if (exclusive)
864 ocfs2_unlock_global_qf(oinfo, 1);
865 ocfs2_unlock_global_qf(oinfo, 0);
866 out:
867 mlog_exit(status);
868 return status;
871 static int ocfs2_dquot_drop_slow(struct inode *inode)
873 int status = 0;
874 int cnt;
875 int got_lock[MAXQUOTAS] = {0, 0};
876 handle_t *handle;
877 struct super_block *sb = inode->i_sb;
878 struct ocfs2_mem_dqinfo *oinfo;
880 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
881 if (!sb_has_quota_active(sb, cnt))
882 continue;
883 oinfo = sb_dqinfo(sb, cnt)->dqi_priv;
884 status = ocfs2_lock_global_qf(oinfo, 1);
885 if (status < 0)
886 goto out;
887 got_lock[cnt] = 1;
889 handle = ocfs2_start_trans(OCFS2_SB(sb),
890 ocfs2_calc_qinit_credits(sb, USRQUOTA) +
891 ocfs2_calc_qinit_credits(sb, GRPQUOTA));
892 if (IS_ERR(handle)) {
893 status = PTR_ERR(handle);
894 mlog_errno(status);
895 goto out;
897 dquot_drop(inode);
898 ocfs2_commit_trans(OCFS2_SB(sb), handle);
899 out:
900 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
901 if (got_lock[cnt]) {
902 oinfo = sb_dqinfo(sb, cnt)->dqi_priv;
903 ocfs2_unlock_global_qf(oinfo, 1);
905 return status;
908 /* See the comment before ocfs2_dquot_initialize. */
909 static int ocfs2_dquot_drop(struct inode *inode)
911 int status = 0;
912 struct super_block *sb = inode->i_sb;
913 struct ocfs2_mem_dqinfo *oinfo;
914 int exclusive = 0;
915 int cnt;
916 int got_lock[MAXQUOTAS] = {0, 0};
918 mlog_entry_void();
919 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
920 if (!sb_has_quota_active(sb, cnt))
921 continue;
922 oinfo = sb_dqinfo(sb, cnt)->dqi_priv;
923 status = ocfs2_lock_global_qf(oinfo, 0);
924 if (status < 0)
925 goto out;
926 got_lock[cnt] = 1;
928 /* Lock against anyone releasing references so that when when we check
929 * we know we are not going to be last ones to release dquot */
930 down_write(&sb_dqopt(sb)->dqptr_sem);
931 /* Urgh, this is a terrible hack :( */
932 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
933 if (inode->i_dquot[cnt] != NODQUOT &&
934 atomic_read(&inode->i_dquot[cnt]->dq_count) > 1) {
935 exclusive = 1;
936 break;
939 if (!exclusive)
940 dquot_drop_locked(inode);
941 up_write(&sb_dqopt(sb)->dqptr_sem);
942 out:
943 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
944 if (got_lock[cnt]) {
945 oinfo = sb_dqinfo(sb, cnt)->dqi_priv;
946 ocfs2_unlock_global_qf(oinfo, 0);
948 /* In case we bailed out because we had to do expensive locking
949 * do it now... */
950 if (exclusive)
951 status = ocfs2_dquot_drop_slow(inode);
952 mlog_exit(status);
953 return status;
956 static struct dquot *ocfs2_alloc_dquot(struct super_block *sb, int type)
958 struct ocfs2_dquot *dquot =
959 kmem_cache_zalloc(ocfs2_dquot_cachep, GFP_NOFS);
961 if (!dquot)
962 return NULL;
963 return &dquot->dq_dquot;
966 static void ocfs2_destroy_dquot(struct dquot *dquot)
968 kmem_cache_free(ocfs2_dquot_cachep, dquot);
971 struct dquot_operations ocfs2_quota_operations = {
972 .initialize = ocfs2_dquot_initialize,
973 .drop = ocfs2_dquot_drop,
974 .alloc_space = dquot_alloc_space,
975 .alloc_inode = dquot_alloc_inode,
976 .free_space = dquot_free_space,
977 .free_inode = dquot_free_inode,
978 .transfer = dquot_transfer,
979 .write_dquot = ocfs2_write_dquot,
980 .acquire_dquot = ocfs2_acquire_dquot,
981 .release_dquot = ocfs2_release_dquot,
982 .mark_dirty = ocfs2_mark_dquot_dirty,
983 .write_info = ocfs2_write_info,
984 .alloc_dquot = ocfs2_alloc_dquot,
985 .destroy_dquot = ocfs2_destroy_dquot,
988 int ocfs2_quota_setup(void)
990 ocfs2_quota_wq = create_workqueue("o2quot");
991 if (!ocfs2_quota_wq)
992 return -ENOMEM;
993 return 0;
996 void ocfs2_quota_shutdown(void)
998 if (ocfs2_quota_wq) {
999 flush_workqueue(ocfs2_quota_wq);
1000 destroy_workqueue(ocfs2_quota_wq);
1001 ocfs2_quota_wq = NULL;