1 /* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
7 * Inspired by ext3/resize.c.
9 * Copyright (C) 2007 Oracle. All rights reserved.
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public
13 * License as published by the Free Software Foundation; either
14 * version 2 of the License, or (at your option) any later version.
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
21 * You should have received a copy of the GNU General Public
22 * License along with this program; if not, write to the
23 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
24 * Boston, MA 021110-1307, USA.
28 #include <linux/types.h>
30 #define MLOG_MASK_PREFIX ML_DISK_ALLOC
31 #include <cluster/masklog.h>
43 #include "buffer_head_io.h"
48 * Check whether there are new backup superblocks exist
49 * in the last group. If there are some, mark them or clear
52 * Return how many backups we find in the last group.
54 static u16
ocfs2_calc_new_backup_super(struct inode
*inode
,
55 struct ocfs2_group_desc
*gd
,
57 u32 first_new_cluster
,
64 u64 blkno
, gd_blkno
, lgd_blkno
= le64_to_cpu(gd
->bg_blkno
);
66 for (i
= 0; i
< OCFS2_MAX_BACKUP_SUPERBLOCKS
; i
++) {
67 blkno
= ocfs2_backup_super_blkno(inode
->i_sb
, i
);
68 cluster
= ocfs2_blocks_to_clusters(inode
->i_sb
, blkno
);
70 gd_blkno
= ocfs2_which_cluster_group(inode
, cluster
);
71 if (gd_blkno
< lgd_blkno
)
73 else if (gd_blkno
> lgd_blkno
)
77 ocfs2_set_bit(cluster
% cl_cpg
,
78 (unsigned long *)gd
->bg_bitmap
);
80 ocfs2_clear_bit(cluster
% cl_cpg
,
81 (unsigned long *)gd
->bg_bitmap
);
89 static int ocfs2_update_last_group_and_inode(handle_t
*handle
,
90 struct inode
*bm_inode
,
91 struct buffer_head
*bm_bh
,
92 struct buffer_head
*group_bh
,
93 u32 first_new_cluster
,
97 struct ocfs2_super
*osb
= OCFS2_SB(bm_inode
->i_sb
);
98 struct ocfs2_dinode
*fe
= (struct ocfs2_dinode
*) bm_bh
->b_data
;
99 struct ocfs2_chain_list
*cl
= &fe
->id2
.i_chain
;
100 struct ocfs2_chain_rec
*cr
;
101 struct ocfs2_group_desc
*group
;
102 u16 chain
, num_bits
, backups
= 0;
103 u16 cl_bpc
= le16_to_cpu(cl
->cl_bpc
);
104 u16 cl_cpg
= le16_to_cpu(cl
->cl_cpg
);
106 mlog_entry("(new_clusters=%d, first_new_cluster = %u)\n",
107 new_clusters
, first_new_cluster
);
109 ret
= ocfs2_journal_access_gd(handle
, INODE_CACHE(bm_inode
),
110 group_bh
, OCFS2_JOURNAL_ACCESS_WRITE
);
116 group
= (struct ocfs2_group_desc
*)group_bh
->b_data
;
118 /* update the group first. */
119 num_bits
= new_clusters
* cl_bpc
;
120 le16_add_cpu(&group
->bg_bits
, num_bits
);
121 le16_add_cpu(&group
->bg_free_bits_count
, num_bits
);
124 * check whether there are some new backup superblocks exist in
125 * this group and update the group bitmap accordingly.
127 if (OCFS2_HAS_COMPAT_FEATURE(osb
->sb
,
128 OCFS2_FEATURE_COMPAT_BACKUP_SB
)) {
129 backups
= ocfs2_calc_new_backup_super(bm_inode
,
134 le16_add_cpu(&group
->bg_free_bits_count
, -1 * backups
);
137 ocfs2_journal_dirty(handle
, group_bh
);
139 /* update the inode accordingly. */
140 ret
= ocfs2_journal_access_di(handle
, INODE_CACHE(bm_inode
), bm_bh
,
141 OCFS2_JOURNAL_ACCESS_WRITE
);
147 chain
= le16_to_cpu(group
->bg_chain
);
148 cr
= (&cl
->cl_recs
[chain
]);
149 le32_add_cpu(&cr
->c_total
, num_bits
);
150 le32_add_cpu(&cr
->c_free
, num_bits
);
151 le32_add_cpu(&fe
->id1
.bitmap1
.i_total
, num_bits
);
152 le32_add_cpu(&fe
->i_clusters
, new_clusters
);
155 le32_add_cpu(&cr
->c_free
, -1 * backups
);
156 le32_add_cpu(&fe
->id1
.bitmap1
.i_used
, backups
);
159 spin_lock(&OCFS2_I(bm_inode
)->ip_lock
);
160 OCFS2_I(bm_inode
)->ip_clusters
= le32_to_cpu(fe
->i_clusters
);
161 le64_add_cpu(&fe
->i_size
, new_clusters
<< osb
->s_clustersize_bits
);
162 spin_unlock(&OCFS2_I(bm_inode
)->ip_lock
);
163 i_size_write(bm_inode
, le64_to_cpu(fe
->i_size
));
165 ocfs2_journal_dirty(handle
, bm_bh
);
169 ocfs2_calc_new_backup_super(bm_inode
,
174 le16_add_cpu(&group
->bg_free_bits_count
, backups
);
175 le16_add_cpu(&group
->bg_bits
, -1 * num_bits
);
176 le16_add_cpu(&group
->bg_free_bits_count
, -1 * num_bits
);
183 static int update_backups(struct inode
* inode
, u32 clusters
, char *data
)
188 struct buffer_head
*backup
= NULL
;
189 struct ocfs2_dinode
*backup_di
= NULL
;
190 struct ocfs2_super
*osb
= OCFS2_SB(inode
->i_sb
);
192 /* calculate the real backups we need to update. */
193 for (i
= 0; i
< OCFS2_MAX_BACKUP_SUPERBLOCKS
; i
++) {
194 blkno
= ocfs2_backup_super_blkno(inode
->i_sb
, i
);
195 cluster
= ocfs2_blocks_to_clusters(inode
->i_sb
, blkno
);
196 if (cluster
> clusters
)
199 ret
= ocfs2_read_blocks_sync(osb
, blkno
, 1, &backup
);
205 memcpy(backup
->b_data
, data
, inode
->i_sb
->s_blocksize
);
207 backup_di
= (struct ocfs2_dinode
*)backup
->b_data
;
208 backup_di
->i_blkno
= cpu_to_le64(blkno
);
210 ret
= ocfs2_write_super_or_backup(osb
, backup
);
222 static void ocfs2_update_super_and_backups(struct inode
*inode
,
227 struct buffer_head
*super_bh
= NULL
;
228 struct ocfs2_dinode
*super_di
= NULL
;
229 struct ocfs2_super
*osb
= OCFS2_SB(inode
->i_sb
);
232 * update the superblock last.
233 * It doesn't matter if the write failed.
235 ret
= ocfs2_read_blocks_sync(osb
, OCFS2_SUPER_BLOCK_BLKNO
, 1,
242 super_di
= (struct ocfs2_dinode
*)super_bh
->b_data
;
243 le32_add_cpu(&super_di
->i_clusters
, new_clusters
);
244 clusters
= le32_to_cpu(super_di
->i_clusters
);
246 ret
= ocfs2_write_super_or_backup(osb
, super_bh
);
252 if (OCFS2_HAS_COMPAT_FEATURE(osb
->sb
, OCFS2_FEATURE_COMPAT_BACKUP_SB
))
253 ret
= update_backups(inode
, clusters
, super_bh
->b_data
);
258 printk(KERN_WARNING
"ocfs2: Failed to update super blocks on %s"
259 " during fs resize. This condition is not fatal,"
260 " but fsck.ocfs2 should be run to fix it\n",
266 * Extend the filesystem to the new number of clusters specified. This entry
267 * point is only used to extend the current filesystem to the end of the last
270 int ocfs2_group_extend(struct inode
* inode
, int new_clusters
)
274 struct buffer_head
*main_bm_bh
= NULL
;
275 struct buffer_head
*group_bh
= NULL
;
276 struct inode
*main_bm_inode
= NULL
;
277 struct ocfs2_dinode
*fe
= NULL
;
278 struct ocfs2_group_desc
*group
= NULL
;
279 struct ocfs2_super
*osb
= OCFS2_SB(inode
->i_sb
);
281 u32 first_new_cluster
;
286 if (ocfs2_is_hard_readonly(osb
) || ocfs2_is_soft_readonly(osb
))
289 if (new_clusters
< 0)
291 else if (new_clusters
== 0)
294 main_bm_inode
= ocfs2_get_system_file_inode(osb
,
295 GLOBAL_BITMAP_SYSTEM_INODE
,
297 if (!main_bm_inode
) {
303 mutex_lock(&main_bm_inode
->i_mutex
);
305 ret
= ocfs2_inode_lock(main_bm_inode
, &main_bm_bh
, 1);
311 fe
= (struct ocfs2_dinode
*)main_bm_bh
->b_data
;
313 /* main_bm_bh is validated by inode read inside ocfs2_inode_lock(),
314 * so any corruption is a code bug. */
315 BUG_ON(!OCFS2_IS_VALID_DINODE(fe
));
317 if (le16_to_cpu(fe
->id2
.i_chain
.cl_cpg
) !=
318 ocfs2_group_bitmap_size(osb
->sb
, 0,
319 osb
->s_feature_incompat
) * 8) {
320 mlog(ML_ERROR
, "The disk is too old and small. "
321 "Force to do offline resize.");
326 first_new_cluster
= le32_to_cpu(fe
->i_clusters
);
327 lgd_blkno
= ocfs2_which_cluster_group(main_bm_inode
,
328 first_new_cluster
- 1);
330 ret
= ocfs2_read_group_descriptor(main_bm_inode
, fe
, lgd_blkno
,
336 group
= (struct ocfs2_group_desc
*)group_bh
->b_data
;
338 cl_bpc
= le16_to_cpu(fe
->id2
.i_chain
.cl_bpc
);
339 if (le16_to_cpu(group
->bg_bits
) / cl_bpc
+ new_clusters
>
340 le16_to_cpu(fe
->id2
.i_chain
.cl_cpg
)) {
345 mlog(0, "extend the last group at %llu, new clusters = %d\n",
346 (unsigned long long)le64_to_cpu(group
->bg_blkno
), new_clusters
);
348 handle
= ocfs2_start_trans(osb
, OCFS2_GROUP_EXTEND_CREDITS
);
349 if (IS_ERR(handle
)) {
350 mlog_errno(PTR_ERR(handle
));
355 /* update the last group descriptor and inode. */
356 ret
= ocfs2_update_last_group_and_inode(handle
, main_bm_inode
,
357 main_bm_bh
, group_bh
,
365 ocfs2_update_super_and_backups(main_bm_inode
, new_clusters
);
368 ocfs2_commit_trans(osb
, handle
);
373 ocfs2_inode_unlock(main_bm_inode
, 1);
376 mutex_unlock(&main_bm_inode
->i_mutex
);
384 static int ocfs2_check_new_group(struct inode
*inode
,
385 struct ocfs2_dinode
*di
,
386 struct ocfs2_new_group_input
*input
,
387 struct buffer_head
*group_bh
)
390 struct ocfs2_group_desc
*gd
=
391 (struct ocfs2_group_desc
*)group_bh
->b_data
;
392 u16 cl_bpc
= le16_to_cpu(di
->id2
.i_chain
.cl_bpc
);
394 ret
= ocfs2_check_group_descriptor(inode
->i_sb
, di
, group_bh
);
399 if (le16_to_cpu(gd
->bg_chain
) != input
->chain
)
400 mlog(ML_ERROR
, "Group descriptor # %llu has bad chain %u "
401 "while input has %u set.\n",
402 (unsigned long long)le64_to_cpu(gd
->bg_blkno
),
403 le16_to_cpu(gd
->bg_chain
), input
->chain
);
404 else if (le16_to_cpu(gd
->bg_bits
) != input
->clusters
* cl_bpc
)
405 mlog(ML_ERROR
, "Group descriptor # %llu has bit count %u but "
406 "input has %u clusters set\n",
407 (unsigned long long)le64_to_cpu(gd
->bg_blkno
),
408 le16_to_cpu(gd
->bg_bits
), input
->clusters
);
409 else if (le16_to_cpu(gd
->bg_free_bits_count
) != input
->frees
* cl_bpc
)
410 mlog(ML_ERROR
, "Group descriptor # %llu has free bit count %u "
411 "but it should have %u set\n",
412 (unsigned long long)le64_to_cpu(gd
->bg_blkno
),
413 le16_to_cpu(gd
->bg_bits
),
414 input
->frees
* cl_bpc
);
422 static int ocfs2_verify_group_and_input(struct inode
*inode
,
423 struct ocfs2_dinode
*di
,
424 struct ocfs2_new_group_input
*input
,
425 struct buffer_head
*group_bh
)
427 u16 cl_count
= le16_to_cpu(di
->id2
.i_chain
.cl_count
);
428 u16 cl_cpg
= le16_to_cpu(di
->id2
.i_chain
.cl_cpg
);
429 u16 next_free
= le16_to_cpu(di
->id2
.i_chain
.cl_next_free_rec
);
430 u32 cluster
= ocfs2_blocks_to_clusters(inode
->i_sb
, input
->group
);
431 u32 total_clusters
= le32_to_cpu(di
->i_clusters
);
434 if (cluster
< total_clusters
)
435 mlog(ML_ERROR
, "add a group which is in the current volume.\n");
436 else if (input
->chain
>= cl_count
)
437 mlog(ML_ERROR
, "input chain exceeds the limit.\n");
438 else if (next_free
!= cl_count
&& next_free
!= input
->chain
)
440 "the add group should be in chain %u\n", next_free
);
441 else if (total_clusters
+ input
->clusters
< total_clusters
)
442 mlog(ML_ERROR
, "add group's clusters overflow.\n");
443 else if (input
->clusters
> cl_cpg
)
444 mlog(ML_ERROR
, "the cluster exceeds the maximum of a group\n");
445 else if (input
->frees
> input
->clusters
)
446 mlog(ML_ERROR
, "the free cluster exceeds the total clusters\n");
447 else if (total_clusters
% cl_cpg
!= 0)
449 "the last group isn't full. Use group extend first.\n");
450 else if (input
->group
!= ocfs2_which_cluster_group(inode
, cluster
))
451 mlog(ML_ERROR
, "group blkno is invalid\n");
452 else if ((ret
= ocfs2_check_new_group(inode
, di
, input
, group_bh
)))
453 mlog(ML_ERROR
, "group descriptor check failed.\n");
460 /* Add a new group descriptor to global_bitmap. */
461 int ocfs2_group_add(struct inode
*inode
, struct ocfs2_new_group_input
*input
)
465 struct buffer_head
*main_bm_bh
= NULL
;
466 struct inode
*main_bm_inode
= NULL
;
467 struct ocfs2_dinode
*fe
= NULL
;
468 struct ocfs2_super
*osb
= OCFS2_SB(inode
->i_sb
);
469 struct buffer_head
*group_bh
= NULL
;
470 struct ocfs2_group_desc
*group
= NULL
;
471 struct ocfs2_chain_list
*cl
;
472 struct ocfs2_chain_rec
*cr
;
477 if (ocfs2_is_hard_readonly(osb
) || ocfs2_is_soft_readonly(osb
))
480 main_bm_inode
= ocfs2_get_system_file_inode(osb
,
481 GLOBAL_BITMAP_SYSTEM_INODE
,
483 if (!main_bm_inode
) {
489 mutex_lock(&main_bm_inode
->i_mutex
);
491 ret
= ocfs2_inode_lock(main_bm_inode
, &main_bm_bh
, 1);
497 fe
= (struct ocfs2_dinode
*)main_bm_bh
->b_data
;
499 if (le16_to_cpu(fe
->id2
.i_chain
.cl_cpg
) !=
500 ocfs2_group_bitmap_size(osb
->sb
, 0,
501 osb
->s_feature_incompat
) * 8) {
502 mlog(ML_ERROR
, "The disk is too old and small."
503 " Force to do offline resize.");
508 ret
= ocfs2_read_blocks_sync(osb
, input
->group
, 1, &group_bh
);
510 mlog(ML_ERROR
, "Can't read the group descriptor # %llu "
511 "from the device.", (unsigned long long)input
->group
);
515 ocfs2_set_new_buffer_uptodate(INODE_CACHE(inode
), group_bh
);
517 ret
= ocfs2_verify_group_and_input(main_bm_inode
, fe
, input
, group_bh
);
523 mlog(0, "Add a new group %llu in chain = %u, length = %u\n",
524 (unsigned long long)input
->group
, input
->chain
, input
->clusters
);
526 handle
= ocfs2_start_trans(osb
, OCFS2_GROUP_ADD_CREDITS
);
527 if (IS_ERR(handle
)) {
528 mlog_errno(PTR_ERR(handle
));
533 cl_bpc
= le16_to_cpu(fe
->id2
.i_chain
.cl_bpc
);
534 cl
= &fe
->id2
.i_chain
;
535 cr
= &cl
->cl_recs
[input
->chain
];
537 ret
= ocfs2_journal_access_gd(handle
, INODE_CACHE(main_bm_inode
),
538 group_bh
, OCFS2_JOURNAL_ACCESS_WRITE
);
544 group
= (struct ocfs2_group_desc
*)group_bh
->b_data
;
545 group
->bg_next_group
= cr
->c_blkno
;
546 ocfs2_journal_dirty(handle
, group_bh
);
548 ret
= ocfs2_journal_access_di(handle
, INODE_CACHE(main_bm_inode
),
549 main_bm_bh
, OCFS2_JOURNAL_ACCESS_WRITE
);
555 if (input
->chain
== le16_to_cpu(cl
->cl_next_free_rec
)) {
556 le16_add_cpu(&cl
->cl_next_free_rec
, 1);
557 memset(cr
, 0, sizeof(struct ocfs2_chain_rec
));
560 cr
->c_blkno
= cpu_to_le64(input
->group
);
561 le32_add_cpu(&cr
->c_total
, input
->clusters
* cl_bpc
);
562 le32_add_cpu(&cr
->c_free
, input
->frees
* cl_bpc
);
564 le32_add_cpu(&fe
->id1
.bitmap1
.i_total
, input
->clusters
*cl_bpc
);
565 le32_add_cpu(&fe
->id1
.bitmap1
.i_used
,
566 (input
->clusters
- input
->frees
) * cl_bpc
);
567 le32_add_cpu(&fe
->i_clusters
, input
->clusters
);
569 ocfs2_journal_dirty(handle
, main_bm_bh
);
571 spin_lock(&OCFS2_I(main_bm_inode
)->ip_lock
);
572 OCFS2_I(main_bm_inode
)->ip_clusters
= le32_to_cpu(fe
->i_clusters
);
573 le64_add_cpu(&fe
->i_size
, input
->clusters
<< osb
->s_clustersize_bits
);
574 spin_unlock(&OCFS2_I(main_bm_inode
)->ip_lock
);
575 i_size_write(main_bm_inode
, le64_to_cpu(fe
->i_size
));
577 ocfs2_update_super_and_backups(main_bm_inode
, input
->clusters
);
580 ocfs2_commit_trans(osb
, handle
);
585 ocfs2_inode_unlock(main_bm_inode
, 1);
588 mutex_unlock(&main_bm_inode
->i_mutex
);