5 * Partition handling routines for the OSTA-UDF(tm) filesystem.
8 * This file is distributed under the terms of the GNU General Public
9 * License (GPL). Copies of the GPL can be obtained from:
10 * ftp://prep.ai.mit.edu/pub/gnu/GPL
11 * Each contributing author retains all rights to their own work.
13 * (C) 1998-2001 Ben Fennema
17 * 12/06/98 blf Created file.
26 #include <linux/string.h>
27 #include <linux/udf_fs.h>
28 #include <linux/slab.h>
29 #include <linux/buffer_head.h>
31 inline uint32_t udf_get_pblock(struct super_block
*sb
, uint32_t block
,
32 uint16_t partition
, uint32_t offset
)
34 struct udf_sb_info
*sbi
= UDF_SB(sb
);
35 struct udf_part_map
*map
;
36 if (partition
>= sbi
->s_partitions
) {
37 udf_debug("block=%d, partition=%d, offset=%d: "
38 "invalid partition\n", block
, partition
, offset
);
41 map
= &sbi
->s_partmaps
[partition
];
42 if (map
->s_partition_func
)
43 return map
->s_partition_func(sb
, block
, partition
, offset
);
45 return map
->s_partition_root
+ block
+ offset
;
48 uint32_t udf_get_pblock_virt15(struct super_block
*sb
, uint32_t block
,
49 uint16_t partition
, uint32_t offset
)
51 struct buffer_head
*bh
= NULL
;
55 struct udf_sb_info
*sbi
= UDF_SB(sb
);
56 struct udf_part_map
*map
;
57 struct udf_virtual_data
*vdata
;
58 struct udf_inode_info
*iinfo
;
60 map
= &sbi
->s_partmaps
[partition
];
61 vdata
= &map
->s_type_specific
.s_virtual
;
62 index
= (sb
->s_blocksize
- vdata
->s_start_offset
) / sizeof(uint32_t);
64 if (block
> vdata
->s_num_entries
) {
65 udf_debug("Trying to access block beyond end of VAT "
66 "(%d max %d)\n", block
, vdata
->s_num_entries
);
72 newblock
= 1 + (block
/ (sb
->s_blocksize
/ sizeof(uint32_t)));
73 index
= block
% (sb
->s_blocksize
/ sizeof(uint32_t));
76 index
= vdata
->s_start_offset
/ sizeof(uint32_t) + block
;
79 loc
= udf_block_map(sbi
->s_vat_inode
, newblock
);
81 bh
= sb_bread(sb
, loc
);
83 udf_debug("get_pblock(UDF_VIRTUAL_MAP:%p,%d,%d) VAT: %d[%d]\n",
84 sb
, block
, partition
, loc
, index
);
88 loc
= le32_to_cpu(((__le32
*)bh
->b_data
)[index
]);
92 iinfo
= UDF_I(sbi
->s_vat_inode
);
93 if (iinfo
->i_location
.partitionReferenceNum
== partition
) {
94 udf_debug("recursive call to udf_get_pblock!\n");
98 return udf_get_pblock(sb
, loc
,
99 iinfo
->i_location
.partitionReferenceNum
,
103 inline uint32_t udf_get_pblock_virt20(struct super_block
*sb
, uint32_t block
,
104 uint16_t partition
, uint32_t offset
)
106 return udf_get_pblock_virt15(sb
, block
, partition
, offset
);
109 uint32_t udf_get_pblock_spar15(struct super_block
*sb
, uint32_t block
,
110 uint16_t partition
, uint32_t offset
)
113 struct sparingTable
*st
= NULL
;
114 struct udf_sb_info
*sbi
= UDF_SB(sb
);
115 struct udf_part_map
*map
;
117 struct udf_sparing_data
*sdata
;
119 map
= &sbi
->s_partmaps
[partition
];
120 sdata
= &map
->s_type_specific
.s_sparing
;
121 packet
= (block
+ offset
) & ~(sdata
->s_packet_len
- 1);
123 for (i
= 0; i
< 4; i
++) {
124 if (sdata
->s_spar_map
[i
] != NULL
) {
125 st
= (struct sparingTable
*)
126 sdata
->s_spar_map
[i
]->b_data
;
132 for (i
= 0; i
< le16_to_cpu(st
->reallocationTableLen
); i
++) {
133 struct sparingEntry
*entry
= &st
->mapEntry
[i
];
134 u32 origLoc
= le32_to_cpu(entry
->origLocation
);
135 if (origLoc
>= 0xFFFFFFF0)
137 else if (origLoc
== packet
)
138 return le32_to_cpu(entry
->mappedLocation
) +
140 (sdata
->s_packet_len
- 1));
141 else if (origLoc
> packet
)
146 return map
->s_partition_root
+ block
+ offset
;
149 int udf_relocate_blocks(struct super_block
*sb
, long old_block
, long *new_block
)
151 struct udf_sparing_data
*sdata
;
152 struct sparingTable
*st
= NULL
;
153 struct sparingEntry mapEntry
;
156 struct udf_sb_info
*sbi
= UDF_SB(sb
);
157 u16 reallocationTableLen
;
158 struct buffer_head
*bh
;
160 for (i
= 0; i
< sbi
->s_partitions
; i
++) {
161 struct udf_part_map
*map
= &sbi
->s_partmaps
[i
];
162 if (old_block
> map
->s_partition_root
&&
163 old_block
< map
->s_partition_root
+ map
->s_partition_len
) {
164 sdata
= &map
->s_type_specific
.s_sparing
;
165 packet
= (old_block
- map
->s_partition_root
) &
166 ~(sdata
->s_packet_len
- 1);
168 for (j
= 0; j
< 4; j
++)
169 if (sdata
->s_spar_map
[j
] != NULL
) {
170 st
= (struct sparingTable
*)
171 sdata
->s_spar_map
[j
]->b_data
;
178 reallocationTableLen
=
179 le16_to_cpu(st
->reallocationTableLen
);
180 for (k
= 0; k
< reallocationTableLen
; k
++) {
181 struct sparingEntry
*entry
= &st
->mapEntry
[k
];
182 u32 origLoc
= le32_to_cpu(entry
->origLocation
);
184 if (origLoc
== 0xFFFFFFFF) {
187 bh
= sdata
->s_spar_map
[j
];
191 st
= (struct sparingTable
*)
193 entry
->origLocation
=
196 sizeof(struct sparingTable
) +
197 reallocationTableLen
*
198 sizeof(struct sparingEntry
);
199 udf_update_tag((char *)st
, len
);
200 mark_buffer_dirty(bh
);
202 *new_block
= le32_to_cpu(
203 entry
->mappedLocation
) +
205 map
->s_partition_root
) &
206 (sdata
->s_packet_len
- 1));
208 } else if (origLoc
== packet
) {
209 *new_block
= le32_to_cpu(
210 entry
->mappedLocation
) +
212 map
->s_partition_root
) &
213 (sdata
->s_packet_len
- 1));
215 } else if (origLoc
> packet
)
219 for (l
= k
; l
< reallocationTableLen
; l
++) {
220 struct sparingEntry
*entry
= &st
->mapEntry
[l
];
221 u32 origLoc
= le32_to_cpu(entry
->origLocation
);
223 if (origLoc
!= 0xFFFFFFFF)
227 bh
= sdata
->s_spar_map
[j
];
231 st
= (struct sparingTable
*)bh
->b_data
;
232 mapEntry
= st
->mapEntry
[l
];
233 mapEntry
.origLocation
=
235 memmove(&st
->mapEntry
[k
+ 1],
238 sizeof(struct sparingEntry
));
239 st
->mapEntry
[k
] = mapEntry
;
240 udf_update_tag((char *)st
,
241 sizeof(struct sparingTable
) +
242 reallocationTableLen
*
243 sizeof(struct sparingEntry
));
244 mark_buffer_dirty(bh
);
248 st
->mapEntry
[k
].mappedLocation
) +
249 ((old_block
- map
->s_partition_root
) &
250 (sdata
->s_packet_len
- 1));
258 if (i
== sbi
->s_partitions
) {
259 /* outside of partitions */
260 /* for now, fail =) */