1 /* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
7 * Inspired by ext3/resize.c.
9 * Copyright (C) 2007 Oracle. All rights reserved.
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public
13 * License as published by the Free Software Foundation; either
14 * version 2 of the License, or (at your option) any later version.
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
21 * You should have received a copy of the GNU General Public
22 * License along with this program; if not, write to the
23 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
24 * Boston, MA 021110-1307, USA.
28 #include <linux/types.h>
30 #include <cluster/masklog.h>
41 #include "ocfs2_trace.h"
43 #include "buffer_head_io.h"
48 * Check whether there are new backup superblocks exist
49 * in the last group. If there are some, mark them or clear
52 * Return how many backups we find in the last group.
54 static u16
ocfs2_calc_new_backup_super(struct inode
*inode
,
55 struct ocfs2_group_desc
*gd
,
57 u32 first_new_cluster
,
64 u64 blkno
, gd_blkno
, lgd_blkno
= le64_to_cpu(gd
->bg_blkno
);
66 for (i
= 0; i
< OCFS2_MAX_BACKUP_SUPERBLOCKS
; i
++) {
67 blkno
= ocfs2_backup_super_blkno(inode
->i_sb
, i
);
68 cluster
= ocfs2_blocks_to_clusters(inode
->i_sb
, blkno
);
70 gd_blkno
= ocfs2_which_cluster_group(inode
, cluster
);
71 if (gd_blkno
< lgd_blkno
)
73 else if (gd_blkno
> lgd_blkno
)
77 ocfs2_set_bit(cluster
% cl_cpg
,
78 (unsigned long *)gd
->bg_bitmap
);
80 ocfs2_clear_bit(cluster
% cl_cpg
,
81 (unsigned long *)gd
->bg_bitmap
);
88 static int ocfs2_update_last_group_and_inode(handle_t
*handle
,
89 struct inode
*bm_inode
,
90 struct buffer_head
*bm_bh
,
91 struct buffer_head
*group_bh
,
92 u32 first_new_cluster
,
96 struct ocfs2_super
*osb
= OCFS2_SB(bm_inode
->i_sb
);
97 struct ocfs2_dinode
*fe
= (struct ocfs2_dinode
*) bm_bh
->b_data
;
98 struct ocfs2_chain_list
*cl
= &fe
->id2
.i_chain
;
99 struct ocfs2_chain_rec
*cr
;
100 struct ocfs2_group_desc
*group
;
101 u16 chain
, num_bits
, backups
= 0;
102 u16 cl_bpc
= le16_to_cpu(cl
->cl_bpc
);
103 u16 cl_cpg
= le16_to_cpu(cl
->cl_cpg
);
105 trace_ocfs2_update_last_group_and_inode(new_clusters
,
108 ret
= ocfs2_journal_access_gd(handle
, INODE_CACHE(bm_inode
),
109 group_bh
, OCFS2_JOURNAL_ACCESS_WRITE
);
115 group
= (struct ocfs2_group_desc
*)group_bh
->b_data
;
117 /* update the group first. */
118 num_bits
= new_clusters
* cl_bpc
;
119 le16_add_cpu(&group
->bg_bits
, num_bits
);
120 le16_add_cpu(&group
->bg_free_bits_count
, num_bits
);
123 * check whether there are some new backup superblocks exist in
124 * this group and update the group bitmap accordingly.
126 if (OCFS2_HAS_COMPAT_FEATURE(osb
->sb
,
127 OCFS2_FEATURE_COMPAT_BACKUP_SB
)) {
128 backups
= ocfs2_calc_new_backup_super(bm_inode
,
133 le16_add_cpu(&group
->bg_free_bits_count
, -1 * backups
);
136 ocfs2_journal_dirty(handle
, group_bh
);
138 /* update the inode accordingly. */
139 ret
= ocfs2_journal_access_di(handle
, INODE_CACHE(bm_inode
), bm_bh
,
140 OCFS2_JOURNAL_ACCESS_WRITE
);
146 chain
= le16_to_cpu(group
->bg_chain
);
147 cr
= (&cl
->cl_recs
[chain
]);
148 le32_add_cpu(&cr
->c_total
, num_bits
);
149 le32_add_cpu(&cr
->c_free
, num_bits
);
150 le32_add_cpu(&fe
->id1
.bitmap1
.i_total
, num_bits
);
151 le32_add_cpu(&fe
->i_clusters
, new_clusters
);
154 le32_add_cpu(&cr
->c_free
, -1 * backups
);
155 le32_add_cpu(&fe
->id1
.bitmap1
.i_used
, backups
);
158 spin_lock(&OCFS2_I(bm_inode
)->ip_lock
);
159 OCFS2_I(bm_inode
)->ip_clusters
= le32_to_cpu(fe
->i_clusters
);
160 le64_add_cpu(&fe
->i_size
, new_clusters
<< osb
->s_clustersize_bits
);
161 spin_unlock(&OCFS2_I(bm_inode
)->ip_lock
);
162 i_size_write(bm_inode
, le64_to_cpu(fe
->i_size
));
164 ocfs2_journal_dirty(handle
, bm_bh
);
168 ocfs2_calc_new_backup_super(bm_inode
,
173 le16_add_cpu(&group
->bg_free_bits_count
, backups
);
174 le16_add_cpu(&group
->bg_bits
, -1 * num_bits
);
175 le16_add_cpu(&group
->bg_free_bits_count
, -1 * num_bits
);
183 static int update_backups(struct inode
* inode
, u32 clusters
, char *data
)
188 struct buffer_head
*backup
= NULL
;
189 struct ocfs2_dinode
*backup_di
= NULL
;
190 struct ocfs2_super
*osb
= OCFS2_SB(inode
->i_sb
);
192 /* calculate the real backups we need to update. */
193 for (i
= 0; i
< OCFS2_MAX_BACKUP_SUPERBLOCKS
; i
++) {
194 blkno
= ocfs2_backup_super_blkno(inode
->i_sb
, i
);
195 cluster
= ocfs2_blocks_to_clusters(inode
->i_sb
, blkno
);
196 if (cluster
> clusters
)
199 ret
= ocfs2_read_blocks_sync(osb
, blkno
, 1, &backup
);
205 memcpy(backup
->b_data
, data
, inode
->i_sb
->s_blocksize
);
207 backup_di
= (struct ocfs2_dinode
*)backup
->b_data
;
208 backup_di
->i_blkno
= cpu_to_le64(blkno
);
210 ret
= ocfs2_write_super_or_backup(osb
, backup
);
222 static void ocfs2_update_super_and_backups(struct inode
*inode
,
227 struct buffer_head
*super_bh
= NULL
;
228 struct ocfs2_dinode
*super_di
= NULL
;
229 struct ocfs2_super
*osb
= OCFS2_SB(inode
->i_sb
);
232 * update the superblock last.
233 * It doesn't matter if the write failed.
235 ret
= ocfs2_read_blocks_sync(osb
, OCFS2_SUPER_BLOCK_BLKNO
, 1,
242 super_di
= (struct ocfs2_dinode
*)super_bh
->b_data
;
243 le32_add_cpu(&super_di
->i_clusters
, new_clusters
);
244 clusters
= le32_to_cpu(super_di
->i_clusters
);
246 ret
= ocfs2_write_super_or_backup(osb
, super_bh
);
252 if (OCFS2_HAS_COMPAT_FEATURE(osb
->sb
, OCFS2_FEATURE_COMPAT_BACKUP_SB
))
253 ret
= update_backups(inode
, clusters
, super_bh
->b_data
);
258 printk(KERN_WARNING
"ocfs2: Failed to update super blocks on %s"
259 " during fs resize. This condition is not fatal,"
260 " but fsck.ocfs2 should be run to fix it\n",
266 * Extend the filesystem to the new number of clusters specified. This entry
267 * point is only used to extend the current filesystem to the end of the last
270 int ocfs2_group_extend(struct inode
* inode
, int new_clusters
)
274 struct buffer_head
*main_bm_bh
= NULL
;
275 struct buffer_head
*group_bh
= NULL
;
276 struct inode
*main_bm_inode
= NULL
;
277 struct ocfs2_dinode
*fe
= NULL
;
278 struct ocfs2_group_desc
*group
= NULL
;
279 struct ocfs2_super
*osb
= OCFS2_SB(inode
->i_sb
);
281 u32 first_new_cluster
;
284 if (ocfs2_is_hard_readonly(osb
) || ocfs2_is_soft_readonly(osb
))
287 if (new_clusters
< 0)
289 else if (new_clusters
== 0)
292 main_bm_inode
= ocfs2_get_system_file_inode(osb
,
293 GLOBAL_BITMAP_SYSTEM_INODE
,
295 if (!main_bm_inode
) {
301 mutex_lock(&main_bm_inode
->i_mutex
);
303 ret
= ocfs2_inode_lock(main_bm_inode
, &main_bm_bh
, 1);
309 fe
= (struct ocfs2_dinode
*)main_bm_bh
->b_data
;
311 /* main_bm_bh is validated by inode read inside ocfs2_inode_lock(),
312 * so any corruption is a code bug. */
313 BUG_ON(!OCFS2_IS_VALID_DINODE(fe
));
315 if (le16_to_cpu(fe
->id2
.i_chain
.cl_cpg
) !=
316 ocfs2_group_bitmap_size(osb
->sb
, 0,
317 osb
->s_feature_incompat
) * 8) {
318 mlog(ML_ERROR
, "The disk is too old and small. "
319 "Force to do offline resize.");
324 first_new_cluster
= le32_to_cpu(fe
->i_clusters
);
325 lgd_blkno
= ocfs2_which_cluster_group(main_bm_inode
,
326 first_new_cluster
- 1);
328 ret
= ocfs2_read_group_descriptor(main_bm_inode
, fe
, lgd_blkno
,
334 group
= (struct ocfs2_group_desc
*)group_bh
->b_data
;
336 cl_bpc
= le16_to_cpu(fe
->id2
.i_chain
.cl_bpc
);
337 if (le16_to_cpu(group
->bg_bits
) / cl_bpc
+ new_clusters
>
338 le16_to_cpu(fe
->id2
.i_chain
.cl_cpg
)) {
344 trace_ocfs2_group_extend(
345 (unsigned long long)le64_to_cpu(group
->bg_blkno
), new_clusters
);
347 handle
= ocfs2_start_trans(osb
, OCFS2_GROUP_EXTEND_CREDITS
);
348 if (IS_ERR(handle
)) {
349 mlog_errno(PTR_ERR(handle
));
354 /* update the last group descriptor and inode. */
355 ret
= ocfs2_update_last_group_and_inode(handle
, main_bm_inode
,
356 main_bm_bh
, group_bh
,
364 ocfs2_update_super_and_backups(main_bm_inode
, new_clusters
);
367 ocfs2_commit_trans(osb
, handle
);
372 ocfs2_inode_unlock(main_bm_inode
, 1);
375 mutex_unlock(&main_bm_inode
->i_mutex
);
382 static int ocfs2_check_new_group(struct inode
*inode
,
383 struct ocfs2_dinode
*di
,
384 struct ocfs2_new_group_input
*input
,
385 struct buffer_head
*group_bh
)
388 struct ocfs2_group_desc
*gd
=
389 (struct ocfs2_group_desc
*)group_bh
->b_data
;
390 u16 cl_bpc
= le16_to_cpu(di
->id2
.i_chain
.cl_bpc
);
392 ret
= ocfs2_check_group_descriptor(inode
->i_sb
, di
, group_bh
);
397 if (le16_to_cpu(gd
->bg_chain
) != input
->chain
)
398 mlog(ML_ERROR
, "Group descriptor # %llu has bad chain %u "
399 "while input has %u set.\n",
400 (unsigned long long)le64_to_cpu(gd
->bg_blkno
),
401 le16_to_cpu(gd
->bg_chain
), input
->chain
);
402 else if (le16_to_cpu(gd
->bg_bits
) != input
->clusters
* cl_bpc
)
403 mlog(ML_ERROR
, "Group descriptor # %llu has bit count %u but "
404 "input has %u clusters set\n",
405 (unsigned long long)le64_to_cpu(gd
->bg_blkno
),
406 le16_to_cpu(gd
->bg_bits
), input
->clusters
);
407 else if (le16_to_cpu(gd
->bg_free_bits_count
) != input
->frees
* cl_bpc
)
408 mlog(ML_ERROR
, "Group descriptor # %llu has free bit count %u "
409 "but it should have %u set\n",
410 (unsigned long long)le64_to_cpu(gd
->bg_blkno
),
411 le16_to_cpu(gd
->bg_bits
),
412 input
->frees
* cl_bpc
);
420 static int ocfs2_verify_group_and_input(struct inode
*inode
,
421 struct ocfs2_dinode
*di
,
422 struct ocfs2_new_group_input
*input
,
423 struct buffer_head
*group_bh
)
425 u16 cl_count
= le16_to_cpu(di
->id2
.i_chain
.cl_count
);
426 u16 cl_cpg
= le16_to_cpu(di
->id2
.i_chain
.cl_cpg
);
427 u16 next_free
= le16_to_cpu(di
->id2
.i_chain
.cl_next_free_rec
);
428 u32 cluster
= ocfs2_blocks_to_clusters(inode
->i_sb
, input
->group
);
429 u32 total_clusters
= le32_to_cpu(di
->i_clusters
);
432 if (cluster
< total_clusters
)
433 mlog(ML_ERROR
, "add a group which is in the current volume.\n");
434 else if (input
->chain
>= cl_count
)
435 mlog(ML_ERROR
, "input chain exceeds the limit.\n");
436 else if (next_free
!= cl_count
&& next_free
!= input
->chain
)
438 "the add group should be in chain %u\n", next_free
);
439 else if (total_clusters
+ input
->clusters
< total_clusters
)
440 mlog(ML_ERROR
, "add group's clusters overflow.\n");
441 else if (input
->clusters
> cl_cpg
)
442 mlog(ML_ERROR
, "the cluster exceeds the maximum of a group\n");
443 else if (input
->frees
> input
->clusters
)
444 mlog(ML_ERROR
, "the free cluster exceeds the total clusters\n");
445 else if (total_clusters
% cl_cpg
!= 0)
447 "the last group isn't full. Use group extend first.\n");
448 else if (input
->group
!= ocfs2_which_cluster_group(inode
, cluster
))
449 mlog(ML_ERROR
, "group blkno is invalid\n");
450 else if ((ret
= ocfs2_check_new_group(inode
, di
, input
, group_bh
)))
451 mlog(ML_ERROR
, "group descriptor check failed.\n");
458 /* Add a new group descriptor to global_bitmap. */
459 int ocfs2_group_add(struct inode
*inode
, struct ocfs2_new_group_input
*input
)
463 struct buffer_head
*main_bm_bh
= NULL
;
464 struct inode
*main_bm_inode
= NULL
;
465 struct ocfs2_dinode
*fe
= NULL
;
466 struct ocfs2_super
*osb
= OCFS2_SB(inode
->i_sb
);
467 struct buffer_head
*group_bh
= NULL
;
468 struct ocfs2_group_desc
*group
= NULL
;
469 struct ocfs2_chain_list
*cl
;
470 struct ocfs2_chain_rec
*cr
;
473 if (ocfs2_is_hard_readonly(osb
) || ocfs2_is_soft_readonly(osb
))
476 main_bm_inode
= ocfs2_get_system_file_inode(osb
,
477 GLOBAL_BITMAP_SYSTEM_INODE
,
479 if (!main_bm_inode
) {
485 mutex_lock(&main_bm_inode
->i_mutex
);
487 ret
= ocfs2_inode_lock(main_bm_inode
, &main_bm_bh
, 1);
493 fe
= (struct ocfs2_dinode
*)main_bm_bh
->b_data
;
495 if (le16_to_cpu(fe
->id2
.i_chain
.cl_cpg
) !=
496 ocfs2_group_bitmap_size(osb
->sb
, 0,
497 osb
->s_feature_incompat
) * 8) {
498 mlog(ML_ERROR
, "The disk is too old and small."
499 " Force to do offline resize.");
504 ret
= ocfs2_read_blocks_sync(osb
, input
->group
, 1, &group_bh
);
506 mlog(ML_ERROR
, "Can't read the group descriptor # %llu "
507 "from the device.", (unsigned long long)input
->group
);
511 ocfs2_set_new_buffer_uptodate(INODE_CACHE(inode
), group_bh
);
513 ret
= ocfs2_verify_group_and_input(main_bm_inode
, fe
, input
, group_bh
);
519 trace_ocfs2_group_add((unsigned long long)input
->group
,
520 input
->chain
, input
->clusters
, input
->frees
);
522 handle
= ocfs2_start_trans(osb
, OCFS2_GROUP_ADD_CREDITS
);
523 if (IS_ERR(handle
)) {
524 mlog_errno(PTR_ERR(handle
));
529 cl_bpc
= le16_to_cpu(fe
->id2
.i_chain
.cl_bpc
);
530 cl
= &fe
->id2
.i_chain
;
531 cr
= &cl
->cl_recs
[input
->chain
];
533 ret
= ocfs2_journal_access_gd(handle
, INODE_CACHE(main_bm_inode
),
534 group_bh
, OCFS2_JOURNAL_ACCESS_WRITE
);
540 group
= (struct ocfs2_group_desc
*)group_bh
->b_data
;
541 group
->bg_next_group
= cr
->c_blkno
;
542 ocfs2_journal_dirty(handle
, group_bh
);
544 ret
= ocfs2_journal_access_di(handle
, INODE_CACHE(main_bm_inode
),
545 main_bm_bh
, OCFS2_JOURNAL_ACCESS_WRITE
);
551 if (input
->chain
== le16_to_cpu(cl
->cl_next_free_rec
)) {
552 le16_add_cpu(&cl
->cl_next_free_rec
, 1);
553 memset(cr
, 0, sizeof(struct ocfs2_chain_rec
));
556 cr
->c_blkno
= cpu_to_le64(input
->group
);
557 le32_add_cpu(&cr
->c_total
, input
->clusters
* cl_bpc
);
558 le32_add_cpu(&cr
->c_free
, input
->frees
* cl_bpc
);
560 le32_add_cpu(&fe
->id1
.bitmap1
.i_total
, input
->clusters
*cl_bpc
);
561 le32_add_cpu(&fe
->id1
.bitmap1
.i_used
,
562 (input
->clusters
- input
->frees
) * cl_bpc
);
563 le32_add_cpu(&fe
->i_clusters
, input
->clusters
);
565 ocfs2_journal_dirty(handle
, main_bm_bh
);
567 spin_lock(&OCFS2_I(main_bm_inode
)->ip_lock
);
568 OCFS2_I(main_bm_inode
)->ip_clusters
= le32_to_cpu(fe
->i_clusters
);
569 le64_add_cpu(&fe
->i_size
, input
->clusters
<< osb
->s_clustersize_bits
);
570 spin_unlock(&OCFS2_I(main_bm_inode
)->ip_lock
);
571 i_size_write(main_bm_inode
, le64_to_cpu(fe
->i_size
));
573 ocfs2_update_super_and_backups(main_bm_inode
, input
->clusters
);
576 ocfs2_commit_trans(osb
, handle
);
581 ocfs2_inode_unlock(main_bm_inode
, 1);
584 mutex_unlock(&main_bm_inode
->i_mutex
);