ext3: Fix possible deadlock between ext3_truncate() and ext3_get_blocks()
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / fs / nilfs2 / cpfile.c
blob1c6cfb59128d6b522139cd26274efe76ca054415
1 /*
2 * cpfile.c - NILFS checkpoint file.
4 * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 * Written by Koji Sato <koji@osrg.net>.
23 #include <linux/kernel.h>
24 #include <linux/fs.h>
25 #include <linux/string.h>
26 #include <linux/buffer_head.h>
27 #include <linux/errno.h>
28 #include <linux/nilfs2_fs.h>
29 #include "mdt.h"
30 #include "cpfile.h"
33 static inline unsigned long
34 nilfs_cpfile_checkpoints_per_block(const struct inode *cpfile)
36 return NILFS_MDT(cpfile)->mi_entries_per_block;
39 /* block number from the beginning of the file */
40 static unsigned long
41 nilfs_cpfile_get_blkoff(const struct inode *cpfile, __u64 cno)
43 __u64 tcno = cno + NILFS_MDT(cpfile)->mi_first_entry_offset - 1;
44 do_div(tcno, nilfs_cpfile_checkpoints_per_block(cpfile));
45 return (unsigned long)tcno;
48 /* offset in block */
49 static unsigned long
50 nilfs_cpfile_get_offset(const struct inode *cpfile, __u64 cno)
52 __u64 tcno = cno + NILFS_MDT(cpfile)->mi_first_entry_offset - 1;
53 return do_div(tcno, nilfs_cpfile_checkpoints_per_block(cpfile));
56 static unsigned long
57 nilfs_cpfile_checkpoints_in_block(const struct inode *cpfile,
58 __u64 curr,
59 __u64 max)
61 return min_t(__u64,
62 nilfs_cpfile_checkpoints_per_block(cpfile) -
63 nilfs_cpfile_get_offset(cpfile, curr),
64 max - curr);
67 static inline int nilfs_cpfile_is_in_first(const struct inode *cpfile,
68 __u64 cno)
70 return nilfs_cpfile_get_blkoff(cpfile, cno) == 0;
73 static unsigned int
74 nilfs_cpfile_block_add_valid_checkpoints(const struct inode *cpfile,
75 struct buffer_head *bh,
76 void *kaddr,
77 unsigned int n)
79 struct nilfs_checkpoint *cp = kaddr + bh_offset(bh);
80 unsigned int count;
82 count = le32_to_cpu(cp->cp_checkpoints_count) + n;
83 cp->cp_checkpoints_count = cpu_to_le32(count);
84 return count;
87 static unsigned int
88 nilfs_cpfile_block_sub_valid_checkpoints(const struct inode *cpfile,
89 struct buffer_head *bh,
90 void *kaddr,
91 unsigned int n)
93 struct nilfs_checkpoint *cp = kaddr + bh_offset(bh);
94 unsigned int count;
96 WARN_ON(le32_to_cpu(cp->cp_checkpoints_count) < n);
97 count = le32_to_cpu(cp->cp_checkpoints_count) - n;
98 cp->cp_checkpoints_count = cpu_to_le32(count);
99 return count;
102 static inline struct nilfs_cpfile_header *
103 nilfs_cpfile_block_get_header(const struct inode *cpfile,
104 struct buffer_head *bh,
105 void *kaddr)
107 return kaddr + bh_offset(bh);
110 static struct nilfs_checkpoint *
111 nilfs_cpfile_block_get_checkpoint(const struct inode *cpfile, __u64 cno,
112 struct buffer_head *bh,
113 void *kaddr)
115 return kaddr + bh_offset(bh) + nilfs_cpfile_get_offset(cpfile, cno) *
116 NILFS_MDT(cpfile)->mi_entry_size;
119 static void nilfs_cpfile_block_init(struct inode *cpfile,
120 struct buffer_head *bh,
121 void *kaddr)
123 struct nilfs_checkpoint *cp = kaddr + bh_offset(bh);
124 size_t cpsz = NILFS_MDT(cpfile)->mi_entry_size;
125 int n = nilfs_cpfile_checkpoints_per_block(cpfile);
127 while (n-- > 0) {
128 nilfs_checkpoint_set_invalid(cp);
129 cp = (void *)cp + cpsz;
133 static inline int nilfs_cpfile_get_header_block(struct inode *cpfile,
134 struct buffer_head **bhp)
136 return nilfs_mdt_get_block(cpfile, 0, 0, NULL, bhp);
139 static inline int nilfs_cpfile_get_checkpoint_block(struct inode *cpfile,
140 __u64 cno,
141 int create,
142 struct buffer_head **bhp)
144 return nilfs_mdt_get_block(cpfile,
145 nilfs_cpfile_get_blkoff(cpfile, cno),
146 create, nilfs_cpfile_block_init, bhp);
149 static inline int nilfs_cpfile_delete_checkpoint_block(struct inode *cpfile,
150 __u64 cno)
152 return nilfs_mdt_delete_block(cpfile,
153 nilfs_cpfile_get_blkoff(cpfile, cno));
157 * nilfs_cpfile_get_checkpoint - get a checkpoint
158 * @cpfile: inode of checkpoint file
159 * @cno: checkpoint number
160 * @create: create flag
161 * @cpp: pointer to a checkpoint
162 * @bhp: pointer to a buffer head
164 * Description: nilfs_cpfile_get_checkpoint() acquires the checkpoint
165 * specified by @cno. A new checkpoint will be created if @cno is the current
166 * checkpoint number and @create is nonzero.
168 * Return Value: On success, 0 is returned, and the checkpoint and the
169 * buffer head of the buffer on which the checkpoint is located are stored in
170 * the place pointed by @cpp and @bhp, respectively. On error, one of the
171 * following negative error codes is returned.
173 * %-EIO - I/O error.
175 * %-ENOMEM - Insufficient amount of memory available.
177 * %-ENOENT - No such checkpoint.
179 * %-EINVAL - invalid checkpoint.
181 int nilfs_cpfile_get_checkpoint(struct inode *cpfile,
182 __u64 cno,
183 int create,
184 struct nilfs_checkpoint **cpp,
185 struct buffer_head **bhp)
187 struct buffer_head *header_bh, *cp_bh;
188 struct nilfs_cpfile_header *header;
189 struct nilfs_checkpoint *cp;
190 void *kaddr;
191 int ret;
193 if (unlikely(cno < 1 || cno > nilfs_mdt_cno(cpfile) ||
194 (cno < nilfs_mdt_cno(cpfile) && create)))
195 return -EINVAL;
197 down_write(&NILFS_MDT(cpfile)->mi_sem);
199 ret = nilfs_cpfile_get_header_block(cpfile, &header_bh);
200 if (ret < 0)
201 goto out_sem;
202 ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, create, &cp_bh);
203 if (ret < 0)
204 goto out_header;
205 kaddr = kmap(cp_bh->b_page);
206 cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr);
207 if (nilfs_checkpoint_invalid(cp)) {
208 if (!create) {
209 kunmap(cp_bh->b_page);
210 brelse(cp_bh);
211 ret = -ENOENT;
212 goto out_header;
214 /* a newly-created checkpoint */
215 nilfs_checkpoint_clear_invalid(cp);
216 if (!nilfs_cpfile_is_in_first(cpfile, cno))
217 nilfs_cpfile_block_add_valid_checkpoints(cpfile, cp_bh,
218 kaddr, 1);
219 nilfs_mdt_mark_buffer_dirty(cp_bh);
221 kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
222 header = nilfs_cpfile_block_get_header(cpfile, header_bh,
223 kaddr);
224 le64_add_cpu(&header->ch_ncheckpoints, 1);
225 kunmap_atomic(kaddr, KM_USER0);
226 nilfs_mdt_mark_buffer_dirty(header_bh);
227 nilfs_mdt_mark_dirty(cpfile);
230 if (cpp != NULL)
231 *cpp = cp;
232 *bhp = cp_bh;
234 out_header:
235 brelse(header_bh);
237 out_sem:
238 up_write(&NILFS_MDT(cpfile)->mi_sem);
239 return ret;
243 * nilfs_cpfile_put_checkpoint - put a checkpoint
244 * @cpfile: inode of checkpoint file
245 * @cno: checkpoint number
246 * @bh: buffer head
248 * Description: nilfs_cpfile_put_checkpoint() releases the checkpoint
249 * specified by @cno. @bh must be the buffer head which has been returned by
250 * a previous call to nilfs_cpfile_get_checkpoint() with @cno.
252 void nilfs_cpfile_put_checkpoint(struct inode *cpfile, __u64 cno,
253 struct buffer_head *bh)
255 kunmap(bh->b_page);
256 brelse(bh);
260 * nilfs_cpfile_delete_checkpoints - delete checkpoints
261 * @cpfile: inode of checkpoint file
262 * @start: start checkpoint number
263 * @end: end checkpoint numer
265 * Description: nilfs_cpfile_delete_checkpoints() deletes the checkpoints in
266 * the period from @start to @end, excluding @end itself. The checkpoints
267 * which have been already deleted are ignored.
269 * Return Value: On success, 0 is returned. On error, one of the following
270 * negative error codes is returned.
272 * %-EIO - I/O error.
274 * %-ENOMEM - Insufficient amount of memory available.
276 * %-EINVAL - invalid checkpoints.
278 int nilfs_cpfile_delete_checkpoints(struct inode *cpfile,
279 __u64 start,
280 __u64 end)
282 struct buffer_head *header_bh, *cp_bh;
283 struct nilfs_cpfile_header *header;
284 struct nilfs_checkpoint *cp;
285 size_t cpsz = NILFS_MDT(cpfile)->mi_entry_size;
286 __u64 cno;
287 void *kaddr;
288 unsigned long tnicps;
289 int ret, ncps, nicps, count, i;
291 if (unlikely(start == 0 || start > end)) {
292 printk(KERN_ERR "%s: invalid range of checkpoint numbers: "
293 "[%llu, %llu)\n", __func__,
294 (unsigned long long)start, (unsigned long long)end);
295 return -EINVAL;
298 down_write(&NILFS_MDT(cpfile)->mi_sem);
300 ret = nilfs_cpfile_get_header_block(cpfile, &header_bh);
301 if (ret < 0)
302 goto out_sem;
303 tnicps = 0;
305 for (cno = start; cno < end; cno += ncps) {
306 ncps = nilfs_cpfile_checkpoints_in_block(cpfile, cno, end);
307 ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &cp_bh);
308 if (ret < 0) {
309 if (ret != -ENOENT)
310 break;
311 /* skip hole */
312 ret = 0;
313 continue;
316 kaddr = kmap_atomic(cp_bh->b_page, KM_USER0);
317 cp = nilfs_cpfile_block_get_checkpoint(
318 cpfile, cno, cp_bh, kaddr);
319 nicps = 0;
320 for (i = 0; i < ncps; i++, cp = (void *)cp + cpsz) {
321 WARN_ON(nilfs_checkpoint_snapshot(cp));
322 if (!nilfs_checkpoint_invalid(cp)) {
323 nilfs_checkpoint_set_invalid(cp);
324 nicps++;
327 if (nicps > 0) {
328 tnicps += nicps;
329 nilfs_mdt_mark_buffer_dirty(cp_bh);
330 nilfs_mdt_mark_dirty(cpfile);
331 if (!nilfs_cpfile_is_in_first(cpfile, cno) &&
332 (count = nilfs_cpfile_block_sub_valid_checkpoints(
333 cpfile, cp_bh, kaddr, nicps)) == 0) {
334 /* make hole */
335 kunmap_atomic(kaddr, KM_USER0);
336 brelse(cp_bh);
337 ret = nilfs_cpfile_delete_checkpoint_block(
338 cpfile, cno);
339 if (ret == 0)
340 continue;
341 printk(KERN_ERR "%s: cannot delete block\n",
342 __func__);
343 break;
347 kunmap_atomic(kaddr, KM_USER0);
348 brelse(cp_bh);
351 if (tnicps > 0) {
352 kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
353 header = nilfs_cpfile_block_get_header(cpfile, header_bh,
354 kaddr);
355 le64_add_cpu(&header->ch_ncheckpoints, -(u64)tnicps);
356 nilfs_mdt_mark_buffer_dirty(header_bh);
357 nilfs_mdt_mark_dirty(cpfile);
358 kunmap_atomic(kaddr, KM_USER0);
361 brelse(header_bh);
363 out_sem:
364 up_write(&NILFS_MDT(cpfile)->mi_sem);
365 return ret;
368 static void nilfs_cpfile_checkpoint_to_cpinfo(struct inode *cpfile,
369 struct nilfs_checkpoint *cp,
370 struct nilfs_cpinfo *ci)
372 ci->ci_flags = le32_to_cpu(cp->cp_flags);
373 ci->ci_cno = le64_to_cpu(cp->cp_cno);
374 ci->ci_create = le64_to_cpu(cp->cp_create);
375 ci->ci_nblk_inc = le64_to_cpu(cp->cp_nblk_inc);
376 ci->ci_inodes_count = le64_to_cpu(cp->cp_inodes_count);
377 ci->ci_blocks_count = le64_to_cpu(cp->cp_blocks_count);
378 ci->ci_next = le64_to_cpu(cp->cp_snapshot_list.ssl_next);
381 static ssize_t nilfs_cpfile_do_get_cpinfo(struct inode *cpfile, __u64 *cnop,
382 void *buf, unsigned cisz, size_t nci)
384 struct nilfs_checkpoint *cp;
385 struct nilfs_cpinfo *ci = buf;
386 struct buffer_head *bh;
387 size_t cpsz = NILFS_MDT(cpfile)->mi_entry_size;
388 __u64 cur_cno = nilfs_mdt_cno(cpfile), cno = *cnop;
389 void *kaddr;
390 int n, ret;
391 int ncps, i;
393 if (cno == 0)
394 return -ENOENT; /* checkpoint number 0 is invalid */
395 down_read(&NILFS_MDT(cpfile)->mi_sem);
397 for (n = 0; cno < cur_cno && n < nci; cno += ncps) {
398 ncps = nilfs_cpfile_checkpoints_in_block(cpfile, cno, cur_cno);
399 ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &bh);
400 if (ret < 0) {
401 if (ret != -ENOENT)
402 goto out;
403 continue; /* skip hole */
406 kaddr = kmap_atomic(bh->b_page, KM_USER0);
407 cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, bh, kaddr);
408 for (i = 0; i < ncps && n < nci; i++, cp = (void *)cp + cpsz) {
409 if (!nilfs_checkpoint_invalid(cp)) {
410 nilfs_cpfile_checkpoint_to_cpinfo(cpfile, cp,
411 ci);
412 ci = (void *)ci + cisz;
413 n++;
416 kunmap_atomic(kaddr, KM_USER0);
417 brelse(bh);
420 ret = n;
421 if (n > 0) {
422 ci = (void *)ci - cisz;
423 *cnop = ci->ci_cno + 1;
426 out:
427 up_read(&NILFS_MDT(cpfile)->mi_sem);
428 return ret;
431 static ssize_t nilfs_cpfile_do_get_ssinfo(struct inode *cpfile, __u64 *cnop,
432 void *buf, unsigned cisz, size_t nci)
434 struct buffer_head *bh;
435 struct nilfs_cpfile_header *header;
436 struct nilfs_checkpoint *cp;
437 struct nilfs_cpinfo *ci = buf;
438 __u64 curr = *cnop, next;
439 unsigned long curr_blkoff, next_blkoff;
440 void *kaddr;
441 int n = 0, ret;
443 down_read(&NILFS_MDT(cpfile)->mi_sem);
445 if (curr == 0) {
446 ret = nilfs_cpfile_get_header_block(cpfile, &bh);
447 if (ret < 0)
448 goto out;
449 kaddr = kmap_atomic(bh->b_page, KM_USER0);
450 header = nilfs_cpfile_block_get_header(cpfile, bh, kaddr);
451 curr = le64_to_cpu(header->ch_snapshot_list.ssl_next);
452 kunmap_atomic(kaddr, KM_USER0);
453 brelse(bh);
454 if (curr == 0) {
455 ret = 0;
456 goto out;
458 } else if (unlikely(curr == ~(__u64)0)) {
459 ret = 0;
460 goto out;
463 curr_blkoff = nilfs_cpfile_get_blkoff(cpfile, curr);
464 ret = nilfs_cpfile_get_checkpoint_block(cpfile, curr, 0, &bh);
465 if (unlikely(ret < 0)) {
466 if (ret == -ENOENT)
467 ret = 0; /* No snapshots (started from a hole block) */
468 goto out;
470 kaddr = kmap_atomic(bh->b_page, KM_USER0);
471 while (n < nci) {
472 cp = nilfs_cpfile_block_get_checkpoint(cpfile, curr, bh, kaddr);
473 curr = ~(__u64)0; /* Terminator */
474 if (unlikely(nilfs_checkpoint_invalid(cp) ||
475 !nilfs_checkpoint_snapshot(cp)))
476 break;
477 nilfs_cpfile_checkpoint_to_cpinfo(cpfile, cp, ci);
478 ci = (void *)ci + cisz;
479 n++;
480 next = le64_to_cpu(cp->cp_snapshot_list.ssl_next);
481 if (next == 0)
482 break; /* reach end of the snapshot list */
484 next_blkoff = nilfs_cpfile_get_blkoff(cpfile, next);
485 if (curr_blkoff != next_blkoff) {
486 kunmap_atomic(kaddr, KM_USER0);
487 brelse(bh);
488 ret = nilfs_cpfile_get_checkpoint_block(cpfile, next,
489 0, &bh);
490 if (unlikely(ret < 0)) {
491 WARN_ON(ret == -ENOENT);
492 goto out;
494 kaddr = kmap_atomic(bh->b_page, KM_USER0);
496 curr = next;
497 curr_blkoff = next_blkoff;
499 kunmap_atomic(kaddr, KM_USER0);
500 brelse(bh);
501 *cnop = curr;
502 ret = n;
504 out:
505 up_read(&NILFS_MDT(cpfile)->mi_sem);
506 return ret;
510 * nilfs_cpfile_get_cpinfo -
511 * @cpfile:
512 * @cno:
513 * @ci:
514 * @nci:
517 ssize_t nilfs_cpfile_get_cpinfo(struct inode *cpfile, __u64 *cnop, int mode,
518 void *buf, unsigned cisz, size_t nci)
520 switch (mode) {
521 case NILFS_CHECKPOINT:
522 return nilfs_cpfile_do_get_cpinfo(cpfile, cnop, buf, cisz, nci);
523 case NILFS_SNAPSHOT:
524 return nilfs_cpfile_do_get_ssinfo(cpfile, cnop, buf, cisz, nci);
525 default:
526 return -EINVAL;
531 * nilfs_cpfile_delete_checkpoint -
532 * @cpfile:
533 * @cno:
535 int nilfs_cpfile_delete_checkpoint(struct inode *cpfile, __u64 cno)
537 struct nilfs_cpinfo ci;
538 __u64 tcno = cno;
539 ssize_t nci;
541 nci = nilfs_cpfile_do_get_cpinfo(cpfile, &tcno, &ci, sizeof(ci), 1);
542 if (nci < 0)
543 return nci;
544 else if (nci == 0 || ci.ci_cno != cno)
545 return -ENOENT;
546 else if (nilfs_cpinfo_snapshot(&ci))
547 return -EBUSY;
549 return nilfs_cpfile_delete_checkpoints(cpfile, cno, cno + 1);
552 static struct nilfs_snapshot_list *
553 nilfs_cpfile_block_get_snapshot_list(const struct inode *cpfile,
554 __u64 cno,
555 struct buffer_head *bh,
556 void *kaddr)
558 struct nilfs_cpfile_header *header;
559 struct nilfs_checkpoint *cp;
560 struct nilfs_snapshot_list *list;
562 if (cno != 0) {
563 cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, bh, kaddr);
564 list = &cp->cp_snapshot_list;
565 } else {
566 header = nilfs_cpfile_block_get_header(cpfile, bh, kaddr);
567 list = &header->ch_snapshot_list;
569 return list;
572 static int nilfs_cpfile_set_snapshot(struct inode *cpfile, __u64 cno)
574 struct buffer_head *header_bh, *curr_bh, *prev_bh, *cp_bh;
575 struct nilfs_cpfile_header *header;
576 struct nilfs_checkpoint *cp;
577 struct nilfs_snapshot_list *list;
578 __u64 curr, prev;
579 unsigned long curr_blkoff, prev_blkoff;
580 void *kaddr;
581 int ret;
583 if (cno == 0)
584 return -ENOENT; /* checkpoint number 0 is invalid */
585 down_write(&NILFS_MDT(cpfile)->mi_sem);
587 ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &cp_bh);
588 if (ret < 0)
589 goto out_sem;
590 kaddr = kmap_atomic(cp_bh->b_page, KM_USER0);
591 cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr);
592 if (nilfs_checkpoint_invalid(cp)) {
593 ret = -ENOENT;
594 kunmap_atomic(kaddr, KM_USER0);
595 goto out_cp;
597 if (nilfs_checkpoint_snapshot(cp)) {
598 ret = 0;
599 kunmap_atomic(kaddr, KM_USER0);
600 goto out_cp;
602 kunmap_atomic(kaddr, KM_USER0);
604 ret = nilfs_cpfile_get_header_block(cpfile, &header_bh);
605 if (ret < 0)
606 goto out_cp;
607 kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
608 header = nilfs_cpfile_block_get_header(cpfile, header_bh, kaddr);
609 list = &header->ch_snapshot_list;
610 curr_bh = header_bh;
611 get_bh(curr_bh);
612 curr = 0;
613 curr_blkoff = 0;
614 prev = le64_to_cpu(list->ssl_prev);
615 while (prev > cno) {
616 prev_blkoff = nilfs_cpfile_get_blkoff(cpfile, prev);
617 curr = prev;
618 if (curr_blkoff != prev_blkoff) {
619 kunmap_atomic(kaddr, KM_USER0);
620 brelse(curr_bh);
621 ret = nilfs_cpfile_get_checkpoint_block(cpfile, curr,
622 0, &curr_bh);
623 if (ret < 0)
624 goto out_header;
625 kaddr = kmap_atomic(curr_bh->b_page, KM_USER0);
627 curr_blkoff = prev_blkoff;
628 cp = nilfs_cpfile_block_get_checkpoint(
629 cpfile, curr, curr_bh, kaddr);
630 list = &cp->cp_snapshot_list;
631 prev = le64_to_cpu(list->ssl_prev);
633 kunmap_atomic(kaddr, KM_USER0);
635 if (prev != 0) {
636 ret = nilfs_cpfile_get_checkpoint_block(cpfile, prev, 0,
637 &prev_bh);
638 if (ret < 0)
639 goto out_curr;
640 } else {
641 prev_bh = header_bh;
642 get_bh(prev_bh);
645 kaddr = kmap_atomic(curr_bh->b_page, KM_USER0);
646 list = nilfs_cpfile_block_get_snapshot_list(
647 cpfile, curr, curr_bh, kaddr);
648 list->ssl_prev = cpu_to_le64(cno);
649 kunmap_atomic(kaddr, KM_USER0);
651 kaddr = kmap_atomic(cp_bh->b_page, KM_USER0);
652 cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr);
653 cp->cp_snapshot_list.ssl_next = cpu_to_le64(curr);
654 cp->cp_snapshot_list.ssl_prev = cpu_to_le64(prev);
655 nilfs_checkpoint_set_snapshot(cp);
656 kunmap_atomic(kaddr, KM_USER0);
658 kaddr = kmap_atomic(prev_bh->b_page, KM_USER0);
659 list = nilfs_cpfile_block_get_snapshot_list(
660 cpfile, prev, prev_bh, kaddr);
661 list->ssl_next = cpu_to_le64(cno);
662 kunmap_atomic(kaddr, KM_USER0);
664 kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
665 header = nilfs_cpfile_block_get_header(cpfile, header_bh, kaddr);
666 le64_add_cpu(&header->ch_nsnapshots, 1);
667 kunmap_atomic(kaddr, KM_USER0);
669 nilfs_mdt_mark_buffer_dirty(prev_bh);
670 nilfs_mdt_mark_buffer_dirty(curr_bh);
671 nilfs_mdt_mark_buffer_dirty(cp_bh);
672 nilfs_mdt_mark_buffer_dirty(header_bh);
673 nilfs_mdt_mark_dirty(cpfile);
675 brelse(prev_bh);
677 out_curr:
678 brelse(curr_bh);
680 out_header:
681 brelse(header_bh);
683 out_cp:
684 brelse(cp_bh);
686 out_sem:
687 up_write(&NILFS_MDT(cpfile)->mi_sem);
688 return ret;
691 static int nilfs_cpfile_clear_snapshot(struct inode *cpfile, __u64 cno)
693 struct buffer_head *header_bh, *next_bh, *prev_bh, *cp_bh;
694 struct nilfs_cpfile_header *header;
695 struct nilfs_checkpoint *cp;
696 struct nilfs_snapshot_list *list;
697 __u64 next, prev;
698 void *kaddr;
699 int ret;
701 if (cno == 0)
702 return -ENOENT; /* checkpoint number 0 is invalid */
703 down_write(&NILFS_MDT(cpfile)->mi_sem);
705 ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &cp_bh);
706 if (ret < 0)
707 goto out_sem;
708 kaddr = kmap_atomic(cp_bh->b_page, KM_USER0);
709 cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr);
710 if (nilfs_checkpoint_invalid(cp)) {
711 ret = -ENOENT;
712 kunmap_atomic(kaddr, KM_USER0);
713 goto out_cp;
715 if (!nilfs_checkpoint_snapshot(cp)) {
716 ret = 0;
717 kunmap_atomic(kaddr, KM_USER0);
718 goto out_cp;
721 list = &cp->cp_snapshot_list;
722 next = le64_to_cpu(list->ssl_next);
723 prev = le64_to_cpu(list->ssl_prev);
724 kunmap_atomic(kaddr, KM_USER0);
726 ret = nilfs_cpfile_get_header_block(cpfile, &header_bh);
727 if (ret < 0)
728 goto out_cp;
729 if (next != 0) {
730 ret = nilfs_cpfile_get_checkpoint_block(cpfile, next, 0,
731 &next_bh);
732 if (ret < 0)
733 goto out_header;
734 } else {
735 next_bh = header_bh;
736 get_bh(next_bh);
738 if (prev != 0) {
739 ret = nilfs_cpfile_get_checkpoint_block(cpfile, prev, 0,
740 &prev_bh);
741 if (ret < 0)
742 goto out_next;
743 } else {
744 prev_bh = header_bh;
745 get_bh(prev_bh);
748 kaddr = kmap_atomic(next_bh->b_page, KM_USER0);
749 list = nilfs_cpfile_block_get_snapshot_list(
750 cpfile, next, next_bh, kaddr);
751 list->ssl_prev = cpu_to_le64(prev);
752 kunmap_atomic(kaddr, KM_USER0);
754 kaddr = kmap_atomic(prev_bh->b_page, KM_USER0);
755 list = nilfs_cpfile_block_get_snapshot_list(
756 cpfile, prev, prev_bh, kaddr);
757 list->ssl_next = cpu_to_le64(next);
758 kunmap_atomic(kaddr, KM_USER0);
760 kaddr = kmap_atomic(cp_bh->b_page, KM_USER0);
761 cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr);
762 cp->cp_snapshot_list.ssl_next = cpu_to_le64(0);
763 cp->cp_snapshot_list.ssl_prev = cpu_to_le64(0);
764 nilfs_checkpoint_clear_snapshot(cp);
765 kunmap_atomic(kaddr, KM_USER0);
767 kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
768 header = nilfs_cpfile_block_get_header(cpfile, header_bh, kaddr);
769 le64_add_cpu(&header->ch_nsnapshots, -1);
770 kunmap_atomic(kaddr, KM_USER0);
772 nilfs_mdt_mark_buffer_dirty(next_bh);
773 nilfs_mdt_mark_buffer_dirty(prev_bh);
774 nilfs_mdt_mark_buffer_dirty(cp_bh);
775 nilfs_mdt_mark_buffer_dirty(header_bh);
776 nilfs_mdt_mark_dirty(cpfile);
778 brelse(prev_bh);
780 out_next:
781 brelse(next_bh);
783 out_header:
784 brelse(header_bh);
786 out_cp:
787 brelse(cp_bh);
789 out_sem:
790 up_write(&NILFS_MDT(cpfile)->mi_sem);
791 return ret;
795 * nilfs_cpfile_is_snapshot -
796 * @cpfile: inode of checkpoint file
797 * @cno: checkpoint number
799 * Description:
801 * Return Value: On success, 1 is returned if the checkpoint specified by
802 * @cno is a snapshot, or 0 if not. On error, one of the following negative
803 * error codes is returned.
805 * %-EIO - I/O error.
807 * %-ENOMEM - Insufficient amount of memory available.
809 * %-ENOENT - No such checkpoint.
811 int nilfs_cpfile_is_snapshot(struct inode *cpfile, __u64 cno)
813 struct buffer_head *bh;
814 struct nilfs_checkpoint *cp;
815 void *kaddr;
816 int ret;
818 /* CP number is invalid if it's zero or larger than the
819 largest exist one.*/
820 if (cno == 0 || cno >= nilfs_mdt_cno(cpfile))
821 return -ENOENT;
822 down_read(&NILFS_MDT(cpfile)->mi_sem);
824 ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &bh);
825 if (ret < 0)
826 goto out;
827 kaddr = kmap_atomic(bh->b_page, KM_USER0);
828 cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, bh, kaddr);
829 if (nilfs_checkpoint_invalid(cp))
830 ret = -ENOENT;
831 else
832 ret = nilfs_checkpoint_snapshot(cp);
833 kunmap_atomic(kaddr, KM_USER0);
834 brelse(bh);
836 out:
837 up_read(&NILFS_MDT(cpfile)->mi_sem);
838 return ret;
842 * nilfs_cpfile_change_cpmode - change checkpoint mode
843 * @cpfile: inode of checkpoint file
844 * @cno: checkpoint number
845 * @status: mode of checkpoint
847 * Description: nilfs_change_cpmode() changes the mode of the checkpoint
848 * specified by @cno. The mode @mode is NILFS_CHECKPOINT or NILFS_SNAPSHOT.
850 * Return Value: On success, 0 is returned. On error, one of the following
851 * negative error codes is returned.
853 * %-EIO - I/O error.
855 * %-ENOMEM - Insufficient amount of memory available.
857 * %-ENOENT - No such checkpoint.
859 int nilfs_cpfile_change_cpmode(struct inode *cpfile, __u64 cno, int mode)
861 struct the_nilfs *nilfs;
862 int ret;
864 nilfs = NILFS_MDT(cpfile)->mi_nilfs;
866 switch (mode) {
867 case NILFS_CHECKPOINT:
869 * Check for protecting existing snapshot mounts:
870 * ns_mount_mutex is used to make this operation atomic and
871 * exclusive with a new mount job. Though it doesn't cover
872 * umount, it's enough for the purpose.
874 mutex_lock(&nilfs->ns_mount_mutex);
875 if (nilfs_checkpoint_is_mounted(nilfs, cno, 1)) {
876 /* Current implementation does not have to protect
877 plain read-only mounts since they are exclusive
878 with a read/write mount and are protected from the
879 cleaner. */
880 ret = -EBUSY;
881 } else
882 ret = nilfs_cpfile_clear_snapshot(cpfile, cno);
883 mutex_unlock(&nilfs->ns_mount_mutex);
884 return ret;
885 case NILFS_SNAPSHOT:
886 return nilfs_cpfile_set_snapshot(cpfile, cno);
887 default:
888 return -EINVAL;
893 * nilfs_cpfile_get_stat - get checkpoint statistics
894 * @cpfile: inode of checkpoint file
895 * @stat: pointer to a structure of checkpoint statistics
897 * Description: nilfs_cpfile_get_stat() returns information about checkpoints.
899 * Return Value: On success, 0 is returned, and checkpoints information is
900 * stored in the place pointed by @stat. On error, one of the following
901 * negative error codes is returned.
903 * %-EIO - I/O error.
905 * %-ENOMEM - Insufficient amount of memory available.
907 int nilfs_cpfile_get_stat(struct inode *cpfile, struct nilfs_cpstat *cpstat)
909 struct buffer_head *bh;
910 struct nilfs_cpfile_header *header;
911 void *kaddr;
912 int ret;
914 down_read(&NILFS_MDT(cpfile)->mi_sem);
916 ret = nilfs_cpfile_get_header_block(cpfile, &bh);
917 if (ret < 0)
918 goto out_sem;
919 kaddr = kmap_atomic(bh->b_page, KM_USER0);
920 header = nilfs_cpfile_block_get_header(cpfile, bh, kaddr);
921 cpstat->cs_cno = nilfs_mdt_cno(cpfile);
922 cpstat->cs_ncps = le64_to_cpu(header->ch_ncheckpoints);
923 cpstat->cs_nsss = le64_to_cpu(header->ch_nsnapshots);
924 kunmap_atomic(kaddr, KM_USER0);
925 brelse(bh);
927 out_sem:
928 up_read(&NILFS_MDT(cpfile)->mi_sem);
929 return ret;