2 * cpfile.c - NILFS checkpoint file.
4 * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 * Written by Koji Sato <koji@osrg.net>.
23 #include <linux/kernel.h>
25 #include <linux/string.h>
26 #include <linux/buffer_head.h>
27 #include <linux/errno.h>
28 #include <linux/nilfs2_fs.h>
33 static inline unsigned long
34 nilfs_cpfile_checkpoints_per_block(const struct inode
*cpfile
)
36 return NILFS_MDT(cpfile
)->mi_entries_per_block
;
39 /* block number from the beginning of the file */
41 nilfs_cpfile_get_blkoff(const struct inode
*cpfile
, __u64 cno
)
43 __u64 tcno
= cno
+ NILFS_MDT(cpfile
)->mi_first_entry_offset
- 1;
44 do_div(tcno
, nilfs_cpfile_checkpoints_per_block(cpfile
));
45 return (unsigned long)tcno
;
50 nilfs_cpfile_get_offset(const struct inode
*cpfile
, __u64 cno
)
52 __u64 tcno
= cno
+ NILFS_MDT(cpfile
)->mi_first_entry_offset
- 1;
53 return do_div(tcno
, nilfs_cpfile_checkpoints_per_block(cpfile
));
57 nilfs_cpfile_checkpoints_in_block(const struct inode
*cpfile
,
62 nilfs_cpfile_checkpoints_per_block(cpfile
) -
63 nilfs_cpfile_get_offset(cpfile
, curr
),
67 static inline int nilfs_cpfile_is_in_first(const struct inode
*cpfile
,
70 return nilfs_cpfile_get_blkoff(cpfile
, cno
) == 0;
74 nilfs_cpfile_block_add_valid_checkpoints(const struct inode
*cpfile
,
75 struct buffer_head
*bh
,
79 struct nilfs_checkpoint
*cp
= kaddr
+ bh_offset(bh
);
82 count
= le32_to_cpu(cp
->cp_checkpoints_count
) + n
;
83 cp
->cp_checkpoints_count
= cpu_to_le32(count
);
88 nilfs_cpfile_block_sub_valid_checkpoints(const struct inode
*cpfile
,
89 struct buffer_head
*bh
,
93 struct nilfs_checkpoint
*cp
= kaddr
+ bh_offset(bh
);
96 WARN_ON(le32_to_cpu(cp
->cp_checkpoints_count
) < n
);
97 count
= le32_to_cpu(cp
->cp_checkpoints_count
) - n
;
98 cp
->cp_checkpoints_count
= cpu_to_le32(count
);
102 static inline struct nilfs_cpfile_header
*
103 nilfs_cpfile_block_get_header(const struct inode
*cpfile
,
104 struct buffer_head
*bh
,
107 return kaddr
+ bh_offset(bh
);
110 static struct nilfs_checkpoint
*
111 nilfs_cpfile_block_get_checkpoint(const struct inode
*cpfile
, __u64 cno
,
112 struct buffer_head
*bh
,
115 return kaddr
+ bh_offset(bh
) + nilfs_cpfile_get_offset(cpfile
, cno
) *
116 NILFS_MDT(cpfile
)->mi_entry_size
;
119 static void nilfs_cpfile_block_init(struct inode
*cpfile
,
120 struct buffer_head
*bh
,
123 struct nilfs_checkpoint
*cp
= kaddr
+ bh_offset(bh
);
124 size_t cpsz
= NILFS_MDT(cpfile
)->mi_entry_size
;
125 int n
= nilfs_cpfile_checkpoints_per_block(cpfile
);
128 nilfs_checkpoint_set_invalid(cp
);
129 cp
= (void *)cp
+ cpsz
;
133 static inline int nilfs_cpfile_get_header_block(struct inode
*cpfile
,
134 struct buffer_head
**bhp
)
136 return nilfs_mdt_get_block(cpfile
, 0, 0, NULL
, bhp
);
139 static inline int nilfs_cpfile_get_checkpoint_block(struct inode
*cpfile
,
142 struct buffer_head
**bhp
)
144 return nilfs_mdt_get_block(cpfile
,
145 nilfs_cpfile_get_blkoff(cpfile
, cno
),
146 create
, nilfs_cpfile_block_init
, bhp
);
149 static inline int nilfs_cpfile_delete_checkpoint_block(struct inode
*cpfile
,
152 return nilfs_mdt_delete_block(cpfile
,
153 nilfs_cpfile_get_blkoff(cpfile
, cno
));
157 * nilfs_cpfile_get_checkpoint - get a checkpoint
158 * @cpfile: inode of checkpoint file
159 * @cno: checkpoint number
160 * @create: create flag
161 * @cpp: pointer to a checkpoint
162 * @bhp: pointer to a buffer head
164 * Description: nilfs_cpfile_get_checkpoint() acquires the checkpoint
165 * specified by @cno. A new checkpoint will be created if @cno is the current
166 * checkpoint number and @create is nonzero.
168 * Return Value: On success, 0 is returned, and the checkpoint and the
169 * buffer head of the buffer on which the checkpoint is located are stored in
170 * the place pointed by @cpp and @bhp, respectively. On error, one of the
171 * following negative error codes is returned.
175 * %-ENOMEM - Insufficient amount of memory available.
177 * %-ENOENT - No such checkpoint.
179 * %-EINVAL - invalid checkpoint.
181 int nilfs_cpfile_get_checkpoint(struct inode
*cpfile
,
184 struct nilfs_checkpoint
**cpp
,
185 struct buffer_head
**bhp
)
187 struct buffer_head
*header_bh
, *cp_bh
;
188 struct nilfs_cpfile_header
*header
;
189 struct nilfs_checkpoint
*cp
;
193 if (unlikely(cno
< 1 || cno
> nilfs_mdt_cno(cpfile
) ||
194 (cno
< nilfs_mdt_cno(cpfile
) && create
)))
197 down_write(&NILFS_MDT(cpfile
)->mi_sem
);
199 ret
= nilfs_cpfile_get_header_block(cpfile
, &header_bh
);
202 ret
= nilfs_cpfile_get_checkpoint_block(cpfile
, cno
, create
, &cp_bh
);
205 kaddr
= kmap(cp_bh
->b_page
);
206 cp
= nilfs_cpfile_block_get_checkpoint(cpfile
, cno
, cp_bh
, kaddr
);
207 if (nilfs_checkpoint_invalid(cp
)) {
209 kunmap(cp_bh
->b_page
);
214 /* a newly-created checkpoint */
215 nilfs_checkpoint_clear_invalid(cp
);
216 if (!nilfs_cpfile_is_in_first(cpfile
, cno
))
217 nilfs_cpfile_block_add_valid_checkpoints(cpfile
, cp_bh
,
219 nilfs_mdt_mark_buffer_dirty(cp_bh
);
221 kaddr
= kmap_atomic(header_bh
->b_page
, KM_USER0
);
222 header
= nilfs_cpfile_block_get_header(cpfile
, header_bh
,
224 le64_add_cpu(&header
->ch_ncheckpoints
, 1);
225 kunmap_atomic(kaddr
, KM_USER0
);
226 nilfs_mdt_mark_buffer_dirty(header_bh
);
227 nilfs_mdt_mark_dirty(cpfile
);
238 up_write(&NILFS_MDT(cpfile
)->mi_sem
);
243 * nilfs_cpfile_put_checkpoint - put a checkpoint
244 * @cpfile: inode of checkpoint file
245 * @cno: checkpoint number
248 * Description: nilfs_cpfile_put_checkpoint() releases the checkpoint
249 * specified by @cno. @bh must be the buffer head which has been returned by
250 * a previous call to nilfs_cpfile_get_checkpoint() with @cno.
252 void nilfs_cpfile_put_checkpoint(struct inode
*cpfile
, __u64 cno
,
253 struct buffer_head
*bh
)
260 * nilfs_cpfile_delete_checkpoints - delete checkpoints
261 * @cpfile: inode of checkpoint file
262 * @start: start checkpoint number
263 * @end: end checkpoint numer
265 * Description: nilfs_cpfile_delete_checkpoints() deletes the checkpoints in
266 * the period from @start to @end, excluding @end itself. The checkpoints
267 * which have been already deleted are ignored.
269 * Return Value: On success, 0 is returned. On error, one of the following
270 * negative error codes is returned.
274 * %-ENOMEM - Insufficient amount of memory available.
276 * %-EINVAL - invalid checkpoints.
278 int nilfs_cpfile_delete_checkpoints(struct inode
*cpfile
,
282 struct buffer_head
*header_bh
, *cp_bh
;
283 struct nilfs_cpfile_header
*header
;
284 struct nilfs_checkpoint
*cp
;
285 size_t cpsz
= NILFS_MDT(cpfile
)->mi_entry_size
;
288 unsigned long tnicps
;
289 int ret
, ncps
, nicps
, count
, i
;
291 if (unlikely(start
== 0 || start
> end
)) {
292 printk(KERN_ERR
"%s: invalid range of checkpoint numbers: "
293 "[%llu, %llu)\n", __func__
,
294 (unsigned long long)start
, (unsigned long long)end
);
298 down_write(&NILFS_MDT(cpfile
)->mi_sem
);
300 ret
= nilfs_cpfile_get_header_block(cpfile
, &header_bh
);
305 for (cno
= start
; cno
< end
; cno
+= ncps
) {
306 ncps
= nilfs_cpfile_checkpoints_in_block(cpfile
, cno
, end
);
307 ret
= nilfs_cpfile_get_checkpoint_block(cpfile
, cno
, 0, &cp_bh
);
316 kaddr
= kmap_atomic(cp_bh
->b_page
, KM_USER0
);
317 cp
= nilfs_cpfile_block_get_checkpoint(
318 cpfile
, cno
, cp_bh
, kaddr
);
320 for (i
= 0; i
< ncps
; i
++, cp
= (void *)cp
+ cpsz
) {
321 WARN_ON(nilfs_checkpoint_snapshot(cp
));
322 if (!nilfs_checkpoint_invalid(cp
)) {
323 nilfs_checkpoint_set_invalid(cp
);
329 nilfs_mdt_mark_buffer_dirty(cp_bh
);
330 nilfs_mdt_mark_dirty(cpfile
);
331 if (!nilfs_cpfile_is_in_first(cpfile
, cno
)) {
333 nilfs_cpfile_block_sub_valid_checkpoints(
334 cpfile
, cp_bh
, kaddr
, nicps
);
337 kunmap_atomic(kaddr
, KM_USER0
);
340 nilfs_cpfile_delete_checkpoint_block(
345 "%s: cannot delete block\n",
352 kunmap_atomic(kaddr
, KM_USER0
);
357 kaddr
= kmap_atomic(header_bh
->b_page
, KM_USER0
);
358 header
= nilfs_cpfile_block_get_header(cpfile
, header_bh
,
360 le64_add_cpu(&header
->ch_ncheckpoints
, -(u64
)tnicps
);
361 nilfs_mdt_mark_buffer_dirty(header_bh
);
362 nilfs_mdt_mark_dirty(cpfile
);
363 kunmap_atomic(kaddr
, KM_USER0
);
369 up_write(&NILFS_MDT(cpfile
)->mi_sem
);
373 static void nilfs_cpfile_checkpoint_to_cpinfo(struct inode
*cpfile
,
374 struct nilfs_checkpoint
*cp
,
375 struct nilfs_cpinfo
*ci
)
377 ci
->ci_flags
= le32_to_cpu(cp
->cp_flags
);
378 ci
->ci_cno
= le64_to_cpu(cp
->cp_cno
);
379 ci
->ci_create
= le64_to_cpu(cp
->cp_create
);
380 ci
->ci_nblk_inc
= le64_to_cpu(cp
->cp_nblk_inc
);
381 ci
->ci_inodes_count
= le64_to_cpu(cp
->cp_inodes_count
);
382 ci
->ci_blocks_count
= le64_to_cpu(cp
->cp_blocks_count
);
383 ci
->ci_next
= le64_to_cpu(cp
->cp_snapshot_list
.ssl_next
);
386 static ssize_t
nilfs_cpfile_do_get_cpinfo(struct inode
*cpfile
, __u64
*cnop
,
387 void *buf
, unsigned cisz
, size_t nci
)
389 struct nilfs_checkpoint
*cp
;
390 struct nilfs_cpinfo
*ci
= buf
;
391 struct buffer_head
*bh
;
392 size_t cpsz
= NILFS_MDT(cpfile
)->mi_entry_size
;
393 __u64 cur_cno
= nilfs_mdt_cno(cpfile
), cno
= *cnop
;
399 return -ENOENT
; /* checkpoint number 0 is invalid */
400 down_read(&NILFS_MDT(cpfile
)->mi_sem
);
402 for (n
= 0; cno
< cur_cno
&& n
< nci
; cno
+= ncps
) {
403 ncps
= nilfs_cpfile_checkpoints_in_block(cpfile
, cno
, cur_cno
);
404 ret
= nilfs_cpfile_get_checkpoint_block(cpfile
, cno
, 0, &bh
);
408 continue; /* skip hole */
411 kaddr
= kmap_atomic(bh
->b_page
, KM_USER0
);
412 cp
= nilfs_cpfile_block_get_checkpoint(cpfile
, cno
, bh
, kaddr
);
413 for (i
= 0; i
< ncps
&& n
< nci
; i
++, cp
= (void *)cp
+ cpsz
) {
414 if (!nilfs_checkpoint_invalid(cp
)) {
415 nilfs_cpfile_checkpoint_to_cpinfo(cpfile
, cp
,
417 ci
= (void *)ci
+ cisz
;
421 kunmap_atomic(kaddr
, KM_USER0
);
427 ci
= (void *)ci
- cisz
;
428 *cnop
= ci
->ci_cno
+ 1;
432 up_read(&NILFS_MDT(cpfile
)->mi_sem
);
436 static ssize_t
nilfs_cpfile_do_get_ssinfo(struct inode
*cpfile
, __u64
*cnop
,
437 void *buf
, unsigned cisz
, size_t nci
)
439 struct buffer_head
*bh
;
440 struct nilfs_cpfile_header
*header
;
441 struct nilfs_checkpoint
*cp
;
442 struct nilfs_cpinfo
*ci
= buf
;
443 __u64 curr
= *cnop
, next
;
444 unsigned long curr_blkoff
, next_blkoff
;
448 down_read(&NILFS_MDT(cpfile
)->mi_sem
);
451 ret
= nilfs_cpfile_get_header_block(cpfile
, &bh
);
454 kaddr
= kmap_atomic(bh
->b_page
, KM_USER0
);
455 header
= nilfs_cpfile_block_get_header(cpfile
, bh
, kaddr
);
456 curr
= le64_to_cpu(header
->ch_snapshot_list
.ssl_next
);
457 kunmap_atomic(kaddr
, KM_USER0
);
463 } else if (unlikely(curr
== ~(__u64
)0)) {
468 curr_blkoff
= nilfs_cpfile_get_blkoff(cpfile
, curr
);
469 ret
= nilfs_cpfile_get_checkpoint_block(cpfile
, curr
, 0, &bh
);
470 if (unlikely(ret
< 0)) {
472 ret
= 0; /* No snapshots (started from a hole block) */
475 kaddr
= kmap_atomic(bh
->b_page
, KM_USER0
);
477 cp
= nilfs_cpfile_block_get_checkpoint(cpfile
, curr
, bh
, kaddr
);
478 curr
= ~(__u64
)0; /* Terminator */
479 if (unlikely(nilfs_checkpoint_invalid(cp
) ||
480 !nilfs_checkpoint_snapshot(cp
)))
482 nilfs_cpfile_checkpoint_to_cpinfo(cpfile
, cp
, ci
);
483 ci
= (void *)ci
+ cisz
;
485 next
= le64_to_cpu(cp
->cp_snapshot_list
.ssl_next
);
487 break; /* reach end of the snapshot list */
489 next_blkoff
= nilfs_cpfile_get_blkoff(cpfile
, next
);
490 if (curr_blkoff
!= next_blkoff
) {
491 kunmap_atomic(kaddr
, KM_USER0
);
493 ret
= nilfs_cpfile_get_checkpoint_block(cpfile
, next
,
495 if (unlikely(ret
< 0)) {
496 WARN_ON(ret
== -ENOENT
);
499 kaddr
= kmap_atomic(bh
->b_page
, KM_USER0
);
502 curr_blkoff
= next_blkoff
;
504 kunmap_atomic(kaddr
, KM_USER0
);
510 up_read(&NILFS_MDT(cpfile
)->mi_sem
);
515 * nilfs_cpfile_get_cpinfo -
522 ssize_t
nilfs_cpfile_get_cpinfo(struct inode
*cpfile
, __u64
*cnop
, int mode
,
523 void *buf
, unsigned cisz
, size_t nci
)
526 case NILFS_CHECKPOINT
:
527 return nilfs_cpfile_do_get_cpinfo(cpfile
, cnop
, buf
, cisz
, nci
);
529 return nilfs_cpfile_do_get_ssinfo(cpfile
, cnop
, buf
, cisz
, nci
);
536 * nilfs_cpfile_delete_checkpoint -
540 int nilfs_cpfile_delete_checkpoint(struct inode
*cpfile
, __u64 cno
)
542 struct nilfs_cpinfo ci
;
546 nci
= nilfs_cpfile_do_get_cpinfo(cpfile
, &tcno
, &ci
, sizeof(ci
), 1);
549 else if (nci
== 0 || ci
.ci_cno
!= cno
)
551 else if (nilfs_cpinfo_snapshot(&ci
))
554 return nilfs_cpfile_delete_checkpoints(cpfile
, cno
, cno
+ 1);
557 static struct nilfs_snapshot_list
*
558 nilfs_cpfile_block_get_snapshot_list(const struct inode
*cpfile
,
560 struct buffer_head
*bh
,
563 struct nilfs_cpfile_header
*header
;
564 struct nilfs_checkpoint
*cp
;
565 struct nilfs_snapshot_list
*list
;
568 cp
= nilfs_cpfile_block_get_checkpoint(cpfile
, cno
, bh
, kaddr
);
569 list
= &cp
->cp_snapshot_list
;
571 header
= nilfs_cpfile_block_get_header(cpfile
, bh
, kaddr
);
572 list
= &header
->ch_snapshot_list
;
577 static int nilfs_cpfile_set_snapshot(struct inode
*cpfile
, __u64 cno
)
579 struct buffer_head
*header_bh
, *curr_bh
, *prev_bh
, *cp_bh
;
580 struct nilfs_cpfile_header
*header
;
581 struct nilfs_checkpoint
*cp
;
582 struct nilfs_snapshot_list
*list
;
584 unsigned long curr_blkoff
, prev_blkoff
;
589 return -ENOENT
; /* checkpoint number 0 is invalid */
590 down_write(&NILFS_MDT(cpfile
)->mi_sem
);
592 ret
= nilfs_cpfile_get_checkpoint_block(cpfile
, cno
, 0, &cp_bh
);
595 kaddr
= kmap_atomic(cp_bh
->b_page
, KM_USER0
);
596 cp
= nilfs_cpfile_block_get_checkpoint(cpfile
, cno
, cp_bh
, kaddr
);
597 if (nilfs_checkpoint_invalid(cp
)) {
599 kunmap_atomic(kaddr
, KM_USER0
);
602 if (nilfs_checkpoint_snapshot(cp
)) {
604 kunmap_atomic(kaddr
, KM_USER0
);
607 kunmap_atomic(kaddr
, KM_USER0
);
609 ret
= nilfs_cpfile_get_header_block(cpfile
, &header_bh
);
612 kaddr
= kmap_atomic(header_bh
->b_page
, KM_USER0
);
613 header
= nilfs_cpfile_block_get_header(cpfile
, header_bh
, kaddr
);
614 list
= &header
->ch_snapshot_list
;
619 prev
= le64_to_cpu(list
->ssl_prev
);
621 prev_blkoff
= nilfs_cpfile_get_blkoff(cpfile
, prev
);
623 if (curr_blkoff
!= prev_blkoff
) {
624 kunmap_atomic(kaddr
, KM_USER0
);
626 ret
= nilfs_cpfile_get_checkpoint_block(cpfile
, curr
,
630 kaddr
= kmap_atomic(curr_bh
->b_page
, KM_USER0
);
632 curr_blkoff
= prev_blkoff
;
633 cp
= nilfs_cpfile_block_get_checkpoint(
634 cpfile
, curr
, curr_bh
, kaddr
);
635 list
= &cp
->cp_snapshot_list
;
636 prev
= le64_to_cpu(list
->ssl_prev
);
638 kunmap_atomic(kaddr
, KM_USER0
);
641 ret
= nilfs_cpfile_get_checkpoint_block(cpfile
, prev
, 0,
650 kaddr
= kmap_atomic(curr_bh
->b_page
, KM_USER0
);
651 list
= nilfs_cpfile_block_get_snapshot_list(
652 cpfile
, curr
, curr_bh
, kaddr
);
653 list
->ssl_prev
= cpu_to_le64(cno
);
654 kunmap_atomic(kaddr
, KM_USER0
);
656 kaddr
= kmap_atomic(cp_bh
->b_page
, KM_USER0
);
657 cp
= nilfs_cpfile_block_get_checkpoint(cpfile
, cno
, cp_bh
, kaddr
);
658 cp
->cp_snapshot_list
.ssl_next
= cpu_to_le64(curr
);
659 cp
->cp_snapshot_list
.ssl_prev
= cpu_to_le64(prev
);
660 nilfs_checkpoint_set_snapshot(cp
);
661 kunmap_atomic(kaddr
, KM_USER0
);
663 kaddr
= kmap_atomic(prev_bh
->b_page
, KM_USER0
);
664 list
= nilfs_cpfile_block_get_snapshot_list(
665 cpfile
, prev
, prev_bh
, kaddr
);
666 list
->ssl_next
= cpu_to_le64(cno
);
667 kunmap_atomic(kaddr
, KM_USER0
);
669 kaddr
= kmap_atomic(header_bh
->b_page
, KM_USER0
);
670 header
= nilfs_cpfile_block_get_header(cpfile
, header_bh
, kaddr
);
671 le64_add_cpu(&header
->ch_nsnapshots
, 1);
672 kunmap_atomic(kaddr
, KM_USER0
);
674 nilfs_mdt_mark_buffer_dirty(prev_bh
);
675 nilfs_mdt_mark_buffer_dirty(curr_bh
);
676 nilfs_mdt_mark_buffer_dirty(cp_bh
);
677 nilfs_mdt_mark_buffer_dirty(header_bh
);
678 nilfs_mdt_mark_dirty(cpfile
);
692 up_write(&NILFS_MDT(cpfile
)->mi_sem
);
696 static int nilfs_cpfile_clear_snapshot(struct inode
*cpfile
, __u64 cno
)
698 struct buffer_head
*header_bh
, *next_bh
, *prev_bh
, *cp_bh
;
699 struct nilfs_cpfile_header
*header
;
700 struct nilfs_checkpoint
*cp
;
701 struct nilfs_snapshot_list
*list
;
707 return -ENOENT
; /* checkpoint number 0 is invalid */
708 down_write(&NILFS_MDT(cpfile
)->mi_sem
);
710 ret
= nilfs_cpfile_get_checkpoint_block(cpfile
, cno
, 0, &cp_bh
);
713 kaddr
= kmap_atomic(cp_bh
->b_page
, KM_USER0
);
714 cp
= nilfs_cpfile_block_get_checkpoint(cpfile
, cno
, cp_bh
, kaddr
);
715 if (nilfs_checkpoint_invalid(cp
)) {
717 kunmap_atomic(kaddr
, KM_USER0
);
720 if (!nilfs_checkpoint_snapshot(cp
)) {
722 kunmap_atomic(kaddr
, KM_USER0
);
726 list
= &cp
->cp_snapshot_list
;
727 next
= le64_to_cpu(list
->ssl_next
);
728 prev
= le64_to_cpu(list
->ssl_prev
);
729 kunmap_atomic(kaddr
, KM_USER0
);
731 ret
= nilfs_cpfile_get_header_block(cpfile
, &header_bh
);
735 ret
= nilfs_cpfile_get_checkpoint_block(cpfile
, next
, 0,
744 ret
= nilfs_cpfile_get_checkpoint_block(cpfile
, prev
, 0,
753 kaddr
= kmap_atomic(next_bh
->b_page
, KM_USER0
);
754 list
= nilfs_cpfile_block_get_snapshot_list(
755 cpfile
, next
, next_bh
, kaddr
);
756 list
->ssl_prev
= cpu_to_le64(prev
);
757 kunmap_atomic(kaddr
, KM_USER0
);
759 kaddr
= kmap_atomic(prev_bh
->b_page
, KM_USER0
);
760 list
= nilfs_cpfile_block_get_snapshot_list(
761 cpfile
, prev
, prev_bh
, kaddr
);
762 list
->ssl_next
= cpu_to_le64(next
);
763 kunmap_atomic(kaddr
, KM_USER0
);
765 kaddr
= kmap_atomic(cp_bh
->b_page
, KM_USER0
);
766 cp
= nilfs_cpfile_block_get_checkpoint(cpfile
, cno
, cp_bh
, kaddr
);
767 cp
->cp_snapshot_list
.ssl_next
= cpu_to_le64(0);
768 cp
->cp_snapshot_list
.ssl_prev
= cpu_to_le64(0);
769 nilfs_checkpoint_clear_snapshot(cp
);
770 kunmap_atomic(kaddr
, KM_USER0
);
772 kaddr
= kmap_atomic(header_bh
->b_page
, KM_USER0
);
773 header
= nilfs_cpfile_block_get_header(cpfile
, header_bh
, kaddr
);
774 le64_add_cpu(&header
->ch_nsnapshots
, -1);
775 kunmap_atomic(kaddr
, KM_USER0
);
777 nilfs_mdt_mark_buffer_dirty(next_bh
);
778 nilfs_mdt_mark_buffer_dirty(prev_bh
);
779 nilfs_mdt_mark_buffer_dirty(cp_bh
);
780 nilfs_mdt_mark_buffer_dirty(header_bh
);
781 nilfs_mdt_mark_dirty(cpfile
);
795 up_write(&NILFS_MDT(cpfile
)->mi_sem
);
800 * nilfs_cpfile_is_snapshot -
801 * @cpfile: inode of checkpoint file
802 * @cno: checkpoint number
806 * Return Value: On success, 1 is returned if the checkpoint specified by
807 * @cno is a snapshot, or 0 if not. On error, one of the following negative
808 * error codes is returned.
812 * %-ENOMEM - Insufficient amount of memory available.
814 * %-ENOENT - No such checkpoint.
816 int nilfs_cpfile_is_snapshot(struct inode
*cpfile
, __u64 cno
)
818 struct buffer_head
*bh
;
819 struct nilfs_checkpoint
*cp
;
823 /* CP number is invalid if it's zero or larger than the
825 if (cno
== 0 || cno
>= nilfs_mdt_cno(cpfile
))
827 down_read(&NILFS_MDT(cpfile
)->mi_sem
);
829 ret
= nilfs_cpfile_get_checkpoint_block(cpfile
, cno
, 0, &bh
);
832 kaddr
= kmap_atomic(bh
->b_page
, KM_USER0
);
833 cp
= nilfs_cpfile_block_get_checkpoint(cpfile
, cno
, bh
, kaddr
);
834 if (nilfs_checkpoint_invalid(cp
))
837 ret
= nilfs_checkpoint_snapshot(cp
);
838 kunmap_atomic(kaddr
, KM_USER0
);
842 up_read(&NILFS_MDT(cpfile
)->mi_sem
);
847 * nilfs_cpfile_change_cpmode - change checkpoint mode
848 * @cpfile: inode of checkpoint file
849 * @cno: checkpoint number
850 * @status: mode of checkpoint
852 * Description: nilfs_change_cpmode() changes the mode of the checkpoint
853 * specified by @cno. The mode @mode is NILFS_CHECKPOINT or NILFS_SNAPSHOT.
855 * Return Value: On success, 0 is returned. On error, one of the following
856 * negative error codes is returned.
860 * %-ENOMEM - Insufficient amount of memory available.
862 * %-ENOENT - No such checkpoint.
864 int nilfs_cpfile_change_cpmode(struct inode
*cpfile
, __u64 cno
, int mode
)
869 case NILFS_CHECKPOINT
:
870 if (nilfs_checkpoint_is_mounted(cpfile
->i_sb
, cno
))
872 * Current implementation does not have to protect
873 * plain read-only mounts since they are exclusive
874 * with a read/write mount and are protected from the
879 ret
= nilfs_cpfile_clear_snapshot(cpfile
, cno
);
882 return nilfs_cpfile_set_snapshot(cpfile
, cno
);
889 * nilfs_cpfile_get_stat - get checkpoint statistics
890 * @cpfile: inode of checkpoint file
891 * @stat: pointer to a structure of checkpoint statistics
893 * Description: nilfs_cpfile_get_stat() returns information about checkpoints.
895 * Return Value: On success, 0 is returned, and checkpoints information is
896 * stored in the place pointed by @stat. On error, one of the following
897 * negative error codes is returned.
901 * %-ENOMEM - Insufficient amount of memory available.
903 int nilfs_cpfile_get_stat(struct inode
*cpfile
, struct nilfs_cpstat
*cpstat
)
905 struct buffer_head
*bh
;
906 struct nilfs_cpfile_header
*header
;
910 down_read(&NILFS_MDT(cpfile
)->mi_sem
);
912 ret
= nilfs_cpfile_get_header_block(cpfile
, &bh
);
915 kaddr
= kmap_atomic(bh
->b_page
, KM_USER0
);
916 header
= nilfs_cpfile_block_get_header(cpfile
, bh
, kaddr
);
917 cpstat
->cs_cno
= nilfs_mdt_cno(cpfile
);
918 cpstat
->cs_ncps
= le64_to_cpu(header
->ch_ncheckpoints
);
919 cpstat
->cs_nsss
= le64_to_cpu(header
->ch_nsnapshots
);
920 kunmap_atomic(kaddr
, KM_USER0
);
924 up_read(&NILFS_MDT(cpfile
)->mi_sem
);
929 * nilfs_cpfile_read - read or get cpfile inode
930 * @sb: super block instance
931 * @cpsize: size of a checkpoint entry
932 * @raw_inode: on-disk cpfile inode
933 * @inodep: buffer to store the inode
935 int nilfs_cpfile_read(struct super_block
*sb
, size_t cpsize
,
936 struct nilfs_inode
*raw_inode
, struct inode
**inodep
)
938 struct inode
*cpfile
;
941 cpfile
= nilfs_iget_locked(sb
, NULL
, NILFS_CPFILE_INO
);
942 if (unlikely(!cpfile
))
944 if (!(cpfile
->i_state
& I_NEW
))
947 err
= nilfs_mdt_init(cpfile
, NILFS_MDT_GFP
, 0);
951 nilfs_mdt_set_entry_size(cpfile
, cpsize
,
952 sizeof(struct nilfs_cpfile_header
));
954 err
= nilfs_read_inode_common(cpfile
, raw_inode
);
958 unlock_new_inode(cpfile
);