2 * sufile.c - NILFS segment usage file.
4 * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 * Written by Koji Sato <koji@osrg.net>.
21 * Revised by Ryusuke Konishi <ryusuke@osrg.net>.
24 #include <linux/kernel.h>
26 #include <linux/string.h>
27 #include <linux/buffer_head.h>
28 #include <linux/errno.h>
29 #include <linux/nilfs2_fs.h>
34 struct nilfs_sufile_info
{
35 struct nilfs_mdt_info mi
;
36 unsigned long ncleansegs
;/* number of clean segments */
37 __u64 allocmin
; /* lower limit of allocatable segment range */
38 __u64 allocmax
; /* upper limit of allocatable segment range */
41 static inline struct nilfs_sufile_info
*NILFS_SUI(struct inode
*sufile
)
43 return (struct nilfs_sufile_info
*)NILFS_MDT(sufile
);
46 static inline unsigned long
47 nilfs_sufile_segment_usages_per_block(const struct inode
*sufile
)
49 return NILFS_MDT(sufile
)->mi_entries_per_block
;
53 nilfs_sufile_get_blkoff(const struct inode
*sufile
, __u64 segnum
)
55 __u64 t
= segnum
+ NILFS_MDT(sufile
)->mi_first_entry_offset
;
56 do_div(t
, nilfs_sufile_segment_usages_per_block(sufile
));
57 return (unsigned long)t
;
61 nilfs_sufile_get_offset(const struct inode
*sufile
, __u64 segnum
)
63 __u64 t
= segnum
+ NILFS_MDT(sufile
)->mi_first_entry_offset
;
64 return do_div(t
, nilfs_sufile_segment_usages_per_block(sufile
));
68 nilfs_sufile_segment_usages_in_block(const struct inode
*sufile
, __u64 curr
,
71 return min_t(unsigned long,
72 nilfs_sufile_segment_usages_per_block(sufile
) -
73 nilfs_sufile_get_offset(sufile
, curr
),
77 static struct nilfs_segment_usage
*
78 nilfs_sufile_block_get_segment_usage(const struct inode
*sufile
, __u64 segnum
,
79 struct buffer_head
*bh
, void *kaddr
)
81 return kaddr
+ bh_offset(bh
) +
82 nilfs_sufile_get_offset(sufile
, segnum
) *
83 NILFS_MDT(sufile
)->mi_entry_size
;
86 static inline int nilfs_sufile_get_header_block(struct inode
*sufile
,
87 struct buffer_head
**bhp
)
89 return nilfs_mdt_get_block(sufile
, 0, 0, NULL
, bhp
);
93 nilfs_sufile_get_segment_usage_block(struct inode
*sufile
, __u64 segnum
,
94 int create
, struct buffer_head
**bhp
)
96 return nilfs_mdt_get_block(sufile
,
97 nilfs_sufile_get_blkoff(sufile
, segnum
),
101 static int nilfs_sufile_delete_segment_usage_block(struct inode
*sufile
,
104 return nilfs_mdt_delete_block(sufile
,
105 nilfs_sufile_get_blkoff(sufile
, segnum
));
108 static void nilfs_sufile_mod_counter(struct buffer_head
*header_bh
,
109 u64 ncleanadd
, u64 ndirtyadd
)
111 struct nilfs_sufile_header
*header
;
114 kaddr
= kmap_atomic(header_bh
->b_page
, KM_USER0
);
115 header
= kaddr
+ bh_offset(header_bh
);
116 le64_add_cpu(&header
->sh_ncleansegs
, ncleanadd
);
117 le64_add_cpu(&header
->sh_ndirtysegs
, ndirtyadd
);
118 kunmap_atomic(kaddr
, KM_USER0
);
120 mark_buffer_dirty(header_bh
);
124 * nilfs_sufile_get_ncleansegs - return the number of clean segments
125 * @sufile: inode of segment usage file
127 unsigned long nilfs_sufile_get_ncleansegs(struct inode
*sufile
)
129 return NILFS_SUI(sufile
)->ncleansegs
;
133 * nilfs_sufile_updatev - modify multiple segment usages at a time
134 * @sufile: inode of segment usage file
135 * @segnumv: array of segment numbers
136 * @nsegs: size of @segnumv array
137 * @create: creation flag
138 * @ndone: place to store number of modified segments on @segnumv
139 * @dofunc: primitive operation for the update
141 * Description: nilfs_sufile_updatev() repeatedly calls @dofunc
142 * against the given array of segments. The @dofunc is called with
143 * buffers of a header block and the sufile block in which the target
144 * segment usage entry is contained. If @ndone is given, the number
145 * of successfully modified segments from the head is stored in the
146 * place @ndone points to.
148 * Return Value: On success, zero is returned. On error, one of the
149 * following negative error codes is returned.
153 * %-ENOMEM - Insufficient amount of memory available.
155 * %-ENOENT - Given segment usage is in hole block (may be returned if
158 * %-EINVAL - Invalid segment usage number
160 int nilfs_sufile_updatev(struct inode
*sufile
, __u64
*segnumv
, size_t nsegs
,
161 int create
, size_t *ndone
,
162 void (*dofunc
)(struct inode
*, __u64
,
163 struct buffer_head
*,
164 struct buffer_head
*))
166 struct buffer_head
*header_bh
, *bh
;
167 unsigned long blkoff
, prev_blkoff
;
169 size_t nerr
= 0, n
= 0;
172 if (unlikely(nsegs
== 0))
175 down_write(&NILFS_MDT(sufile
)->mi_sem
);
176 for (seg
= segnumv
; seg
< segnumv
+ nsegs
; seg
++) {
177 if (unlikely(*seg
>= nilfs_sufile_get_nsegments(sufile
))) {
179 "%s: invalid segment number: %llu\n", __func__
,
180 (unsigned long long)*seg
);
189 ret
= nilfs_sufile_get_header_block(sufile
, &header_bh
);
194 blkoff
= nilfs_sufile_get_blkoff(sufile
, *seg
);
195 ret
= nilfs_mdt_get_block(sufile
, blkoff
, create
, NULL
, &bh
);
200 dofunc(sufile
, *seg
, header_bh
, bh
);
202 if (++seg
>= segnumv
+ nsegs
)
204 prev_blkoff
= blkoff
;
205 blkoff
= nilfs_sufile_get_blkoff(sufile
, *seg
);
206 if (blkoff
== prev_blkoff
)
209 /* get different block */
211 ret
= nilfs_mdt_get_block(sufile
, blkoff
, create
, NULL
, &bh
);
212 if (unlikely(ret
< 0))
221 up_write(&NILFS_MDT(sufile
)->mi_sem
);
228 int nilfs_sufile_update(struct inode
*sufile
, __u64 segnum
, int create
,
229 void (*dofunc
)(struct inode
*, __u64
,
230 struct buffer_head
*,
231 struct buffer_head
*))
233 struct buffer_head
*header_bh
, *bh
;
236 if (unlikely(segnum
>= nilfs_sufile_get_nsegments(sufile
))) {
237 printk(KERN_WARNING
"%s: invalid segment number: %llu\n",
238 __func__
, (unsigned long long)segnum
);
241 down_write(&NILFS_MDT(sufile
)->mi_sem
);
243 ret
= nilfs_sufile_get_header_block(sufile
, &header_bh
);
247 ret
= nilfs_sufile_get_segment_usage_block(sufile
, segnum
, create
, &bh
);
249 dofunc(sufile
, segnum
, header_bh
, bh
);
255 up_write(&NILFS_MDT(sufile
)->mi_sem
);
260 * nilfs_sufile_set_alloc_range - limit range of segment to be allocated
261 * @sufile: inode of segment usage file
262 * @start: minimum segment number of allocatable region (inclusive)
263 * @end: maximum segment number of allocatable region (inclusive)
265 * Return Value: On success, 0 is returned. On error, one of the
266 * following negative error codes is returned.
268 * %-ERANGE - invalid segment region
270 int nilfs_sufile_set_alloc_range(struct inode
*sufile
, __u64 start
, __u64 end
)
272 struct nilfs_sufile_info
*sui
= NILFS_SUI(sufile
);
276 down_write(&NILFS_MDT(sufile
)->mi_sem
);
277 nsegs
= nilfs_sufile_get_nsegments(sufile
);
279 if (start
<= end
&& end
< nsegs
) {
280 sui
->allocmin
= start
;
284 up_write(&NILFS_MDT(sufile
)->mi_sem
);
289 * nilfs_sufile_alloc - allocate a segment
290 * @sufile: inode of segment usage file
291 * @segnump: pointer to segment number
293 * Description: nilfs_sufile_alloc() allocates a clean segment.
295 * Return Value: On success, 0 is returned and the segment number of the
296 * allocated segment is stored in the place pointed by @segnump. On error, one
297 * of the following negative error codes is returned.
301 * %-ENOMEM - Insufficient amount of memory available.
303 * %-ENOSPC - No clean segment left.
305 int nilfs_sufile_alloc(struct inode
*sufile
, __u64
*segnump
)
307 struct buffer_head
*header_bh
, *su_bh
;
308 struct nilfs_sufile_header
*header
;
309 struct nilfs_segment_usage
*su
;
310 struct nilfs_sufile_info
*sui
= NILFS_SUI(sufile
);
311 size_t susz
= NILFS_MDT(sufile
)->mi_entry_size
;
312 __u64 segnum
, maxsegnum
, last_alloc
;
314 unsigned long nsegments
, ncleansegs
, nsus
, cnt
;
317 down_write(&NILFS_MDT(sufile
)->mi_sem
);
319 ret
= nilfs_sufile_get_header_block(sufile
, &header_bh
);
322 kaddr
= kmap_atomic(header_bh
->b_page
, KM_USER0
);
323 header
= kaddr
+ bh_offset(header_bh
);
324 ncleansegs
= le64_to_cpu(header
->sh_ncleansegs
);
325 last_alloc
= le64_to_cpu(header
->sh_last_alloc
);
326 kunmap_atomic(kaddr
, KM_USER0
);
328 nsegments
= nilfs_sufile_get_nsegments(sufile
);
329 maxsegnum
= sui
->allocmax
;
330 segnum
= last_alloc
+ 1;
331 if (segnum
< sui
->allocmin
|| segnum
> sui
->allocmax
)
332 segnum
= sui
->allocmin
;
334 for (cnt
= 0; cnt
< nsegments
; cnt
+= nsus
) {
335 if (segnum
> maxsegnum
) {
336 if (cnt
< sui
->allocmax
- sui
->allocmin
+ 1) {
338 * wrap around in the limited region.
339 * if allocation started from
340 * sui->allocmin, this never happens.
342 segnum
= sui
->allocmin
;
343 maxsegnum
= last_alloc
;
344 } else if (segnum
> sui
->allocmin
&&
345 sui
->allocmax
+ 1 < nsegments
) {
346 segnum
= sui
->allocmax
+ 1;
347 maxsegnum
= nsegments
- 1;
348 } else if (sui
->allocmin
> 0) {
350 maxsegnum
= sui
->allocmin
- 1;
352 break; /* never happens */
355 ret
= nilfs_sufile_get_segment_usage_block(sufile
, segnum
, 1,
359 kaddr
= kmap_atomic(su_bh
->b_page
, KM_USER0
);
360 su
= nilfs_sufile_block_get_segment_usage(
361 sufile
, segnum
, su_bh
, kaddr
);
363 nsus
= nilfs_sufile_segment_usages_in_block(
364 sufile
, segnum
, maxsegnum
);
365 for (j
= 0; j
< nsus
; j
++, su
= (void *)su
+ susz
, segnum
++) {
366 if (!nilfs_segment_usage_clean(su
))
368 /* found a clean segment */
369 nilfs_segment_usage_set_dirty(su
);
370 kunmap_atomic(kaddr
, KM_USER0
);
372 kaddr
= kmap_atomic(header_bh
->b_page
, KM_USER0
);
373 header
= kaddr
+ bh_offset(header_bh
);
374 le64_add_cpu(&header
->sh_ncleansegs
, -1);
375 le64_add_cpu(&header
->sh_ndirtysegs
, 1);
376 header
->sh_last_alloc
= cpu_to_le64(segnum
);
377 kunmap_atomic(kaddr
, KM_USER0
);
380 mark_buffer_dirty(header_bh
);
381 mark_buffer_dirty(su_bh
);
382 nilfs_mdt_mark_dirty(sufile
);
388 kunmap_atomic(kaddr
, KM_USER0
);
392 /* no segments left */
399 up_write(&NILFS_MDT(sufile
)->mi_sem
);
403 void nilfs_sufile_do_cancel_free(struct inode
*sufile
, __u64 segnum
,
404 struct buffer_head
*header_bh
,
405 struct buffer_head
*su_bh
)
407 struct nilfs_segment_usage
*su
;
410 kaddr
= kmap_atomic(su_bh
->b_page
, KM_USER0
);
411 su
= nilfs_sufile_block_get_segment_usage(sufile
, segnum
, su_bh
, kaddr
);
412 if (unlikely(!nilfs_segment_usage_clean(su
))) {
413 printk(KERN_WARNING
"%s: segment %llu must be clean\n",
414 __func__
, (unsigned long long)segnum
);
415 kunmap_atomic(kaddr
, KM_USER0
);
418 nilfs_segment_usage_set_dirty(su
);
419 kunmap_atomic(kaddr
, KM_USER0
);
421 nilfs_sufile_mod_counter(header_bh
, -1, 1);
422 NILFS_SUI(sufile
)->ncleansegs
--;
424 mark_buffer_dirty(su_bh
);
425 nilfs_mdt_mark_dirty(sufile
);
428 void nilfs_sufile_do_scrap(struct inode
*sufile
, __u64 segnum
,
429 struct buffer_head
*header_bh
,
430 struct buffer_head
*su_bh
)
432 struct nilfs_segment_usage
*su
;
436 kaddr
= kmap_atomic(su_bh
->b_page
, KM_USER0
);
437 su
= nilfs_sufile_block_get_segment_usage(sufile
, segnum
, su_bh
, kaddr
);
438 if (su
->su_flags
== cpu_to_le32(1UL << NILFS_SEGMENT_USAGE_DIRTY
) &&
439 su
->su_nblocks
== cpu_to_le32(0)) {
440 kunmap_atomic(kaddr
, KM_USER0
);
443 clean
= nilfs_segment_usage_clean(su
);
444 dirty
= nilfs_segment_usage_dirty(su
);
446 /* make the segment garbage */
447 su
->su_lastmod
= cpu_to_le64(0);
448 su
->su_nblocks
= cpu_to_le32(0);
449 su
->su_flags
= cpu_to_le32(1UL << NILFS_SEGMENT_USAGE_DIRTY
);
450 kunmap_atomic(kaddr
, KM_USER0
);
452 nilfs_sufile_mod_counter(header_bh
, clean
? (u64
)-1 : 0, dirty
? 0 : 1);
453 NILFS_SUI(sufile
)->ncleansegs
-= clean
;
455 mark_buffer_dirty(su_bh
);
456 nilfs_mdt_mark_dirty(sufile
);
459 void nilfs_sufile_do_free(struct inode
*sufile
, __u64 segnum
,
460 struct buffer_head
*header_bh
,
461 struct buffer_head
*su_bh
)
463 struct nilfs_segment_usage
*su
;
467 kaddr
= kmap_atomic(su_bh
->b_page
, KM_USER0
);
468 su
= nilfs_sufile_block_get_segment_usage(sufile
, segnum
, su_bh
, kaddr
);
469 if (nilfs_segment_usage_clean(su
)) {
470 printk(KERN_WARNING
"%s: segment %llu is already clean\n",
471 __func__
, (unsigned long long)segnum
);
472 kunmap_atomic(kaddr
, KM_USER0
);
475 WARN_ON(nilfs_segment_usage_error(su
));
476 WARN_ON(!nilfs_segment_usage_dirty(su
));
478 sudirty
= nilfs_segment_usage_dirty(su
);
479 nilfs_segment_usage_set_clean(su
);
480 kunmap_atomic(kaddr
, KM_USER0
);
481 mark_buffer_dirty(su_bh
);
483 nilfs_sufile_mod_counter(header_bh
, 1, sudirty
? (u64
)-1 : 0);
484 NILFS_SUI(sufile
)->ncleansegs
++;
486 nilfs_mdt_mark_dirty(sufile
);
490 * nilfs_sufile_mark_dirty - mark the buffer having a segment usage dirty
491 * @sufile: inode of segment usage file
492 * @segnum: segment number
494 int nilfs_sufile_mark_dirty(struct inode
*sufile
, __u64 segnum
)
496 struct buffer_head
*bh
;
499 ret
= nilfs_sufile_get_segment_usage_block(sufile
, segnum
, 0, &bh
);
501 mark_buffer_dirty(bh
);
502 nilfs_mdt_mark_dirty(sufile
);
509 * nilfs_sufile_set_segment_usage - set usage of a segment
510 * @sufile: inode of segment usage file
511 * @segnum: segment number
512 * @nblocks: number of live blocks in the segment
513 * @modtime: modification time (option)
515 int nilfs_sufile_set_segment_usage(struct inode
*sufile
, __u64 segnum
,
516 unsigned long nblocks
, time_t modtime
)
518 struct buffer_head
*bh
;
519 struct nilfs_segment_usage
*su
;
523 down_write(&NILFS_MDT(sufile
)->mi_sem
);
524 ret
= nilfs_sufile_get_segment_usage_block(sufile
, segnum
, 0, &bh
);
528 kaddr
= kmap_atomic(bh
->b_page
, KM_USER0
);
529 su
= nilfs_sufile_block_get_segment_usage(sufile
, segnum
, bh
, kaddr
);
530 WARN_ON(nilfs_segment_usage_error(su
));
532 su
->su_lastmod
= cpu_to_le64(modtime
);
533 su
->su_nblocks
= cpu_to_le32(nblocks
);
534 kunmap_atomic(kaddr
, KM_USER0
);
536 mark_buffer_dirty(bh
);
537 nilfs_mdt_mark_dirty(sufile
);
541 up_write(&NILFS_MDT(sufile
)->mi_sem
);
546 * nilfs_sufile_get_stat - get segment usage statistics
547 * @sufile: inode of segment usage file
548 * @stat: pointer to a structure of segment usage statistics
550 * Description: nilfs_sufile_get_stat() returns information about segment
553 * Return Value: On success, 0 is returned, and segment usage information is
554 * stored in the place pointed by @stat. On error, one of the following
555 * negative error codes is returned.
559 * %-ENOMEM - Insufficient amount of memory available.
561 int nilfs_sufile_get_stat(struct inode
*sufile
, struct nilfs_sustat
*sustat
)
563 struct buffer_head
*header_bh
;
564 struct nilfs_sufile_header
*header
;
565 struct the_nilfs
*nilfs
= sufile
->i_sb
->s_fs_info
;
569 down_read(&NILFS_MDT(sufile
)->mi_sem
);
571 ret
= nilfs_sufile_get_header_block(sufile
, &header_bh
);
575 kaddr
= kmap_atomic(header_bh
->b_page
, KM_USER0
);
576 header
= kaddr
+ bh_offset(header_bh
);
577 sustat
->ss_nsegs
= nilfs_sufile_get_nsegments(sufile
);
578 sustat
->ss_ncleansegs
= le64_to_cpu(header
->sh_ncleansegs
);
579 sustat
->ss_ndirtysegs
= le64_to_cpu(header
->sh_ndirtysegs
);
580 sustat
->ss_ctime
= nilfs
->ns_ctime
;
581 sustat
->ss_nongc_ctime
= nilfs
->ns_nongc_ctime
;
582 spin_lock(&nilfs
->ns_last_segment_lock
);
583 sustat
->ss_prot_seq
= nilfs
->ns_prot_seq
;
584 spin_unlock(&nilfs
->ns_last_segment_lock
);
585 kunmap_atomic(kaddr
, KM_USER0
);
589 up_read(&NILFS_MDT(sufile
)->mi_sem
);
593 void nilfs_sufile_do_set_error(struct inode
*sufile
, __u64 segnum
,
594 struct buffer_head
*header_bh
,
595 struct buffer_head
*su_bh
)
597 struct nilfs_segment_usage
*su
;
601 kaddr
= kmap_atomic(su_bh
->b_page
, KM_USER0
);
602 su
= nilfs_sufile_block_get_segment_usage(sufile
, segnum
, su_bh
, kaddr
);
603 if (nilfs_segment_usage_error(su
)) {
604 kunmap_atomic(kaddr
, KM_USER0
);
607 suclean
= nilfs_segment_usage_clean(su
);
608 nilfs_segment_usage_set_error(su
);
609 kunmap_atomic(kaddr
, KM_USER0
);
612 nilfs_sufile_mod_counter(header_bh
, -1, 0);
613 NILFS_SUI(sufile
)->ncleansegs
--;
615 mark_buffer_dirty(su_bh
);
616 nilfs_mdt_mark_dirty(sufile
);
620 * nilfs_sufile_truncate_range - truncate range of segment array
621 * @sufile: inode of segment usage file
622 * @start: start segment number (inclusive)
623 * @end: end segment number (inclusive)
625 * Return Value: On success, 0 is returned. On error, one of the
626 * following negative error codes is returned.
630 * %-ENOMEM - Insufficient amount of memory available.
632 * %-EINVAL - Invalid number of segments specified
634 * %-EBUSY - Dirty or active segments are present in the range
636 static int nilfs_sufile_truncate_range(struct inode
*sufile
,
637 __u64 start
, __u64 end
)
639 struct the_nilfs
*nilfs
= sufile
->i_sb
->s_fs_info
;
640 struct buffer_head
*header_bh
;
641 struct buffer_head
*su_bh
;
642 struct nilfs_segment_usage
*su
, *su2
;
643 size_t susz
= NILFS_MDT(sufile
)->mi_entry_size
;
644 unsigned long segusages_per_block
;
645 unsigned long nsegs
, ncleaned
;
652 nsegs
= nilfs_sufile_get_nsegments(sufile
);
655 if (start
> end
|| start
>= nsegs
)
658 ret
= nilfs_sufile_get_header_block(sufile
, &header_bh
);
662 segusages_per_block
= nilfs_sufile_segment_usages_per_block(sufile
);
665 for (segnum
= start
; segnum
<= end
; segnum
+= n
) {
666 n
= min_t(unsigned long,
667 segusages_per_block
-
668 nilfs_sufile_get_offset(sufile
, segnum
),
670 ret
= nilfs_sufile_get_segment_usage_block(sufile
, segnum
, 0,
678 kaddr
= kmap_atomic(su_bh
->b_page
, KM_USER0
);
679 su
= nilfs_sufile_block_get_segment_usage(
680 sufile
, segnum
, su_bh
, kaddr
);
682 for (j
= 0; j
< n
; j
++, su
= (void *)su
+ susz
) {
683 if ((le32_to_cpu(su
->su_flags
) &
684 ~(1UL << NILFS_SEGMENT_USAGE_ERROR
)) ||
685 nilfs_segment_is_active(nilfs
, segnum
+ j
)) {
687 kunmap_atomic(kaddr
, KM_USER0
);
693 for (su
= su2
, j
= 0; j
< n
; j
++, su
= (void *)su
+ susz
) {
694 if (nilfs_segment_usage_error(su
)) {
695 nilfs_segment_usage_set_clean(su
);
699 kunmap_atomic(kaddr
, KM_USER0
);
701 mark_buffer_dirty(su_bh
);
706 if (n
== segusages_per_block
) {
708 nilfs_sufile_delete_segment_usage_block(sufile
, segnum
);
715 NILFS_SUI(sufile
)->ncleansegs
+= ncleaned
;
716 nilfs_sufile_mod_counter(header_bh
, ncleaned
, 0);
717 nilfs_mdt_mark_dirty(sufile
);
725 * nilfs_sufile_resize - resize segment array
726 * @sufile: inode of segment usage file
727 * @newnsegs: new number of segments
729 * Return Value: On success, 0 is returned. On error, one of the
730 * following negative error codes is returned.
734 * %-ENOMEM - Insufficient amount of memory available.
736 * %-ENOSPC - Enough free space is not left for shrinking
738 * %-EBUSY - Dirty or active segments exist in the region to be truncated
740 int nilfs_sufile_resize(struct inode
*sufile
, __u64 newnsegs
)
742 struct the_nilfs
*nilfs
= sufile
->i_sb
->s_fs_info
;
743 struct buffer_head
*header_bh
;
744 struct nilfs_sufile_header
*header
;
745 struct nilfs_sufile_info
*sui
= NILFS_SUI(sufile
);
747 unsigned long nsegs
, nrsvsegs
;
750 down_write(&NILFS_MDT(sufile
)->mi_sem
);
752 nsegs
= nilfs_sufile_get_nsegments(sufile
);
753 if (nsegs
== newnsegs
)
757 nrsvsegs
= nilfs_nrsvsegs(nilfs
, newnsegs
);
758 if (newnsegs
< nsegs
&& nsegs
- newnsegs
+ nrsvsegs
> sui
->ncleansegs
)
761 ret
= nilfs_sufile_get_header_block(sufile
, &header_bh
);
765 if (newnsegs
> nsegs
) {
766 sui
->ncleansegs
+= newnsegs
- nsegs
;
767 } else /* newnsegs < nsegs */ {
768 ret
= nilfs_sufile_truncate_range(sufile
, newnsegs
, nsegs
- 1);
772 sui
->ncleansegs
-= nsegs
- newnsegs
;
775 kaddr
= kmap_atomic(header_bh
->b_page
, KM_USER0
);
776 header
= kaddr
+ bh_offset(header_bh
);
777 header
->sh_ncleansegs
= cpu_to_le64(sui
->ncleansegs
);
778 kunmap_atomic(kaddr
, KM_USER0
);
780 mark_buffer_dirty(header_bh
);
781 nilfs_mdt_mark_dirty(sufile
);
782 nilfs_set_nsegments(nilfs
, newnsegs
);
787 up_write(&NILFS_MDT(sufile
)->mi_sem
);
792 * nilfs_sufile_get_suinfo -
793 * @sufile: inode of segment usage file
794 * @segnum: segment number to start looking
795 * @buf: array of suinfo
796 * @sisz: byte size of suinfo
797 * @nsi: size of suinfo array
801 * Return Value: On success, 0 is returned and .... On error, one of the
802 * following negative error codes is returned.
806 * %-ENOMEM - Insufficient amount of memory available.
808 ssize_t
nilfs_sufile_get_suinfo(struct inode
*sufile
, __u64 segnum
, void *buf
,
809 unsigned sisz
, size_t nsi
)
811 struct buffer_head
*su_bh
;
812 struct nilfs_segment_usage
*su
;
813 struct nilfs_suinfo
*si
= buf
;
814 size_t susz
= NILFS_MDT(sufile
)->mi_entry_size
;
815 struct the_nilfs
*nilfs
= sufile
->i_sb
->s_fs_info
;
817 unsigned long nsegs
, segusages_per_block
;
821 down_read(&NILFS_MDT(sufile
)->mi_sem
);
823 segusages_per_block
= nilfs_sufile_segment_usages_per_block(sufile
);
824 nsegs
= min_t(unsigned long,
825 nilfs_sufile_get_nsegments(sufile
) - segnum
,
827 for (i
= 0; i
< nsegs
; i
+= n
, segnum
+= n
) {
828 n
= min_t(unsigned long,
829 segusages_per_block
-
830 nilfs_sufile_get_offset(sufile
, segnum
),
832 ret
= nilfs_sufile_get_segment_usage_block(sufile
, segnum
, 0,
838 memset(si
, 0, sisz
* n
);
839 si
= (void *)si
+ sisz
* n
;
843 kaddr
= kmap_atomic(su_bh
->b_page
, KM_USER0
);
844 su
= nilfs_sufile_block_get_segment_usage(
845 sufile
, segnum
, su_bh
, kaddr
);
847 j
++, su
= (void *)su
+ susz
, si
= (void *)si
+ sisz
) {
848 si
->sui_lastmod
= le64_to_cpu(su
->su_lastmod
);
849 si
->sui_nblocks
= le32_to_cpu(su
->su_nblocks
);
850 si
->sui_flags
= le32_to_cpu(su
->su_flags
) &
851 ~(1UL << NILFS_SEGMENT_USAGE_ACTIVE
);
852 if (nilfs_segment_is_active(nilfs
, segnum
+ j
))
854 (1UL << NILFS_SEGMENT_USAGE_ACTIVE
);
856 kunmap_atomic(kaddr
, KM_USER0
);
862 up_read(&NILFS_MDT(sufile
)->mi_sem
);
867 * nilfs_sufile_read - read or get sufile inode
868 * @sb: super block instance
869 * @susize: size of a segment usage entry
870 * @raw_inode: on-disk sufile inode
871 * @inodep: buffer to store the inode
873 int nilfs_sufile_read(struct super_block
*sb
, size_t susize
,
874 struct nilfs_inode
*raw_inode
, struct inode
**inodep
)
876 struct inode
*sufile
;
877 struct nilfs_sufile_info
*sui
;
878 struct buffer_head
*header_bh
;
879 struct nilfs_sufile_header
*header
;
883 sufile
= nilfs_iget_locked(sb
, NULL
, NILFS_SUFILE_INO
);
884 if (unlikely(!sufile
))
886 if (!(sufile
->i_state
& I_NEW
))
889 err
= nilfs_mdt_init(sufile
, NILFS_MDT_GFP
, sizeof(*sui
));
893 nilfs_mdt_set_entry_size(sufile
, susize
,
894 sizeof(struct nilfs_sufile_header
));
896 err
= nilfs_read_inode_common(sufile
, raw_inode
);
900 err
= nilfs_sufile_get_header_block(sufile
, &header_bh
);
904 sui
= NILFS_SUI(sufile
);
905 kaddr
= kmap_atomic(header_bh
->b_page
, KM_USER0
);
906 header
= kaddr
+ bh_offset(header_bh
);
907 sui
->ncleansegs
= le64_to_cpu(header
->sh_ncleansegs
);
908 kunmap_atomic(kaddr
, KM_USER0
);
911 sui
->allocmax
= nilfs_sufile_get_nsegments(sufile
) - 1;
914 unlock_new_inode(sufile
);