2 * linux/fs/ufs/cylinder.c
5 * Daniel Pirkl <daniel.pirkl@email.cz>
6 * Charles University, Faculty of Mathematics and Physics
8 * ext2 - inode (block) bitmap caching inspired
12 #include <linux/ufs_fs.h>
13 #include <linux/time.h>
14 #include <linux/stat.h>
15 #include <linux/string.h>
16 #include <linux/bitops.h>
18 #include <asm/byteorder.h>
23 #undef UFS_CYLINDER_DEBUG
25 #ifdef UFS_CYLINDER_DEBUG
26 #define UFSD(x) printk("(%s, %d), %s:", __FILE__, __LINE__, __FUNCTION__); printk x;
33 * Read cylinder group into cache. The memory space for ufs_cg_private_info
34 * structure is already allocated during ufs_read_super.
36 static void ufs_read_cylinder (struct super_block
* sb
,
37 unsigned cgno
, unsigned bitmap_nr
)
39 struct ufs_sb_info
* sbi
= UFS_SB(sb
);
40 struct ufs_sb_private_info
* uspi
;
41 struct ufs_cg_private_info
* ucpi
;
42 struct ufs_cylinder_group
* ucg
;
45 UFSD(("ENTER, cgno %u, bitmap_nr %u\n", cgno
, bitmap_nr
))
47 ucpi
= sbi
->s_ucpi
[bitmap_nr
];
48 ucg
= (struct ufs_cylinder_group
*)sbi
->s_ucg
[cgno
]->b_data
;
50 UCPI_UBH
->fragment
= ufs_cgcmin(cgno
);
51 UCPI_UBH
->count
= uspi
->s_cgsize
>> sb
->s_blocksize_bits
;
53 * We have already the first fragment of cylinder group block in buffer
55 UCPI_UBH
->bh
[0] = sbi
->s_ucg
[cgno
];
56 for (i
= 1; i
< UCPI_UBH
->count
; i
++)
57 if (!(UCPI_UBH
->bh
[i
] = sb_bread(sb
, UCPI_UBH
->fragment
+ i
)))
59 sbi
->s_cgno
[bitmap_nr
] = cgno
;
61 ucpi
->c_cgx
= fs32_to_cpu(sb
, ucg
->cg_cgx
);
62 ucpi
->c_ncyl
= fs16_to_cpu(sb
, ucg
->cg_ncyl
);
63 ucpi
->c_niblk
= fs16_to_cpu(sb
, ucg
->cg_niblk
);
64 ucpi
->c_ndblk
= fs32_to_cpu(sb
, ucg
->cg_ndblk
);
65 ucpi
->c_rotor
= fs32_to_cpu(sb
, ucg
->cg_rotor
);
66 ucpi
->c_frotor
= fs32_to_cpu(sb
, ucg
->cg_frotor
);
67 ucpi
->c_irotor
= fs32_to_cpu(sb
, ucg
->cg_irotor
);
68 ucpi
->c_btotoff
= fs32_to_cpu(sb
, ucg
->cg_btotoff
);
69 ucpi
->c_boff
= fs32_to_cpu(sb
, ucg
->cg_boff
);
70 ucpi
->c_iusedoff
= fs32_to_cpu(sb
, ucg
->cg_iusedoff
);
71 ucpi
->c_freeoff
= fs32_to_cpu(sb
, ucg
->cg_freeoff
);
72 ucpi
->c_nextfreeoff
= fs32_to_cpu(sb
, ucg
->cg_nextfreeoff
);
73 ucpi
->c_clustersumoff
= fs32_to_cpu(sb
, ucg
->cg_u
.cg_44
.cg_clustersumoff
);
74 ucpi
->c_clusteroff
= fs32_to_cpu(sb
, ucg
->cg_u
.cg_44
.cg_clusteroff
);
75 ucpi
->c_nclusterblks
= fs32_to_cpu(sb
, ucg
->cg_u
.cg_44
.cg_nclusterblks
);
80 for (j
= 1; j
< i
; j
++)
81 brelse (sbi
->s_ucg
[j
]);
82 sbi
->s_cgno
[bitmap_nr
] = UFS_CGNO_EMPTY
;
83 ufs_error (sb
, "ufs_read_cylinder", "can't read cylinder group block %u", cgno
);
87 * Remove cylinder group from cache, doesn't release memory
88 * allocated for cylinder group (this is done at ufs_put_super only).
90 void ufs_put_cylinder (struct super_block
* sb
, unsigned bitmap_nr
)
92 struct ufs_sb_info
* sbi
= UFS_SB(sb
);
93 struct ufs_sb_private_info
* uspi
;
94 struct ufs_cg_private_info
* ucpi
;
95 struct ufs_cylinder_group
* ucg
;
98 UFSD(("ENTER, bitmap_nr %u\n", bitmap_nr
))
101 if (sbi
->s_cgno
[bitmap_nr
] == UFS_CGNO_EMPTY
) {
105 ucpi
= sbi
->s_ucpi
[bitmap_nr
];
106 ucg
= ubh_get_ucg(UCPI_UBH
);
108 if (uspi
->s_ncg
> UFS_MAX_GROUP_LOADED
&& bitmap_nr
>= sbi
->s_cg_loaded
) {
109 ufs_panic (sb
, "ufs_put_cylinder", "internal error");
113 * rotor is not so important data, so we put it to disk
114 * at the end of working with cylinder
116 ucg
->cg_rotor
= cpu_to_fs32(sb
, ucpi
->c_rotor
);
117 ucg
->cg_frotor
= cpu_to_fs32(sb
, ucpi
->c_frotor
);
118 ucg
->cg_irotor
= cpu_to_fs32(sb
, ucpi
->c_irotor
);
119 ubh_mark_buffer_dirty (UCPI_UBH
);
120 for (i
= 1; i
< UCPI_UBH
->count
; i
++) {
121 brelse (UCPI_UBH
->bh
[i
]);
124 sbi
->s_cgno
[bitmap_nr
] = UFS_CGNO_EMPTY
;
129 * Find cylinder group in cache and return it as pointer.
130 * If cylinder group is not in cache, we will load it from disk.
132 * The cache is managed by LRU algorithm.
134 struct ufs_cg_private_info
* ufs_load_cylinder (
135 struct super_block
* sb
, unsigned cgno
)
137 struct ufs_sb_info
* sbi
= UFS_SB(sb
);
138 struct ufs_sb_private_info
* uspi
;
139 struct ufs_cg_private_info
* ucpi
;
142 UFSD(("ENTER, cgno %u\n", cgno
))
145 if (cgno
>= uspi
->s_ncg
) {
146 ufs_panic (sb
, "ufs_load_cylinder", "internal error, high number of cg");
150 * Cylinder group number cg it in cache and it was last used
152 if (sbi
->s_cgno
[0] == cgno
) {
154 return sbi
->s_ucpi
[0];
157 * Number of cylinder groups is not higher than UFS_MAX_GROUP_LOADED
159 if (uspi
->s_ncg
<= UFS_MAX_GROUP_LOADED
) {
160 if (sbi
->s_cgno
[cgno
] != UFS_CGNO_EMPTY
) {
161 if (sbi
->s_cgno
[cgno
] != cgno
) {
162 ufs_panic (sb
, "ufs_load_cylinder", "internal error, wrong number of cg in cache");
163 UFSD(("EXIT (FAILED)\n"))
168 return sbi
->s_ucpi
[cgno
];
171 ufs_read_cylinder (sb
, cgno
, cgno
);
173 return sbi
->s_ucpi
[cgno
];
177 * Cylinder group number cg is in cache but it was not last used,
178 * we will move to the first position
180 for (i
= 0; i
< sbi
->s_cg_loaded
&& sbi
->s_cgno
[i
] != cgno
; i
++);
181 if (i
< sbi
->s_cg_loaded
&& sbi
->s_cgno
[i
] == cgno
) {
183 ucpi
= sbi
->s_ucpi
[i
];
184 for (j
= i
; j
> 0; j
--) {
185 sbi
->s_cgno
[j
] = sbi
->s_cgno
[j
-1];
186 sbi
->s_ucpi
[j
] = sbi
->s_ucpi
[j
-1];
189 sbi
->s_ucpi
[0] = ucpi
;
191 * Cylinder group number cg is not in cache, we will read it from disk
192 * and put it to the first position
195 if (sbi
->s_cg_loaded
< UFS_MAX_GROUP_LOADED
)
198 ufs_put_cylinder (sb
, UFS_MAX_GROUP_LOADED
-1);
199 ucpi
= sbi
->s_ucpi
[sbi
->s_cg_loaded
- 1];
200 for (j
= sbi
->s_cg_loaded
- 1; j
> 0; j
--) {
201 sbi
->s_cgno
[j
] = sbi
->s_cgno
[j
-1];
202 sbi
->s_ucpi
[j
] = sbi
->s_ucpi
[j
-1];
204 sbi
->s_ucpi
[0] = ucpi
;
205 ufs_read_cylinder (sb
, cgno
, 0);
208 return sbi
->s_ucpi
[0];