5 * Partition handling routines for the OSTA-UDF(tm) filesystem.
8 * This file is distributed under the terms of the GNU General Public
9 * License (GPL). Copies of the GPL can be obtained from:
10 * ftp://prep.ai.mit.edu/pub/gnu/GPL
11 * Each contributing author retains all rights to their own work.
13 * (C) 1998-2001 Ben Fennema
17 * 12/06/98 blf Created file.
26 #include <linux/string.h>
27 #include <linux/udf_fs.h>
28 #include <linux/slab.h>
29 #include <linux/buffer_head.h>
31 inline uint32_t udf_get_pblock(struct super_block
*sb
, uint32_t block
, uint16_t partition
, uint32_t offset
)
33 if (partition
>= UDF_SB_NUMPARTS(sb
))
35 udf_debug("block=%d, partition=%d, offset=%d: invalid partition\n",
36 block
, partition
, offset
);
39 if (UDF_SB_PARTFUNC(sb
, partition
))
40 return UDF_SB_PARTFUNC(sb
, partition
)(sb
, block
, partition
, offset
);
42 return UDF_SB_PARTROOT(sb
, partition
) + block
+ offset
;
45 uint32_t udf_get_pblock_virt15(struct super_block
*sb
, uint32_t block
, uint16_t partition
, uint32_t offset
)
47 struct buffer_head
*bh
= NULL
;
52 index
= (sb
->s_blocksize
- UDF_SB_TYPEVIRT(sb
,partition
).s_start_offset
) / sizeof(uint32_t);
54 if (block
> UDF_SB_TYPEVIRT(sb
,partition
).s_num_entries
)
56 udf_debug("Trying to access block beyond end of VAT (%d max %d)\n",
57 block
, UDF_SB_TYPEVIRT(sb
,partition
).s_num_entries
);
64 newblock
= 1 + (block
/ (sb
->s_blocksize
/ sizeof(uint32_t)));
65 index
= block
% (sb
->s_blocksize
/ sizeof(uint32_t));
70 index
= UDF_SB_TYPEVIRT(sb
,partition
).s_start_offset
/ sizeof(uint32_t) + block
;
73 loc
= udf_block_map(UDF_SB_VAT(sb
), newblock
);
75 if (!(bh
= sb_bread(sb
, loc
)))
77 udf_debug("get_pblock(UDF_VIRTUAL_MAP:%p,%d,%d) VAT: %d[%d]\n",
78 sb
, block
, partition
, loc
, index
);
82 loc
= le32_to_cpu(((__le32
*)bh
->b_data
)[index
]);
86 if (UDF_I_LOCATION(UDF_SB_VAT(sb
)).partitionReferenceNum
== partition
)
88 udf_debug("recursive call to udf_get_pblock!\n");
92 return udf_get_pblock(sb
, loc
, UDF_I_LOCATION(UDF_SB_VAT(sb
)).partitionReferenceNum
, offset
);
95 inline uint32_t udf_get_pblock_virt20(struct super_block
*sb
, uint32_t block
, uint16_t partition
, uint32_t offset
)
97 return udf_get_pblock_virt15(sb
, block
, partition
, offset
);
100 uint32_t udf_get_pblock_spar15(struct super_block
*sb
, uint32_t block
, uint16_t partition
, uint32_t offset
)
103 struct sparingTable
*st
= NULL
;
104 uint32_t packet
= (block
+ offset
) & ~(UDF_SB_TYPESPAR(sb
,partition
).s_packet_len
- 1);
108 if (UDF_SB_TYPESPAR(sb
,partition
).s_spar_map
[i
] != NULL
)
110 st
= (struct sparingTable
*)UDF_SB_TYPESPAR(sb
,partition
).s_spar_map
[i
]->b_data
;
117 for (i
=0; i
<le16_to_cpu(st
->reallocationTableLen
); i
++)
119 if (le32_to_cpu(st
->mapEntry
[i
].origLocation
) >= 0xFFFFFFF0)
121 else if (le32_to_cpu(st
->mapEntry
[i
].origLocation
) == packet
)
123 return le32_to_cpu(st
->mapEntry
[i
].mappedLocation
) +
124 ((block
+ offset
) & (UDF_SB_TYPESPAR(sb
,partition
).s_packet_len
- 1));
126 else if (le32_to_cpu(st
->mapEntry
[i
].origLocation
) > packet
)
130 return UDF_SB_PARTROOT(sb
,partition
) + block
+ offset
;
133 int udf_relocate_blocks(struct super_block
*sb
, long old_block
, long *new_block
)
135 struct udf_sparing_data
*sdata
;
136 struct sparingTable
*st
= NULL
;
137 struct sparingEntry mapEntry
;
141 for (i
=0; i
<UDF_SB_NUMPARTS(sb
); i
++)
143 if (old_block
> UDF_SB_PARTROOT(sb
,i
) &&
144 old_block
< UDF_SB_PARTROOT(sb
,i
) + UDF_SB_PARTLEN(sb
,i
))
146 sdata
= &UDF_SB_TYPESPAR(sb
,i
);
147 packet
= (old_block
- UDF_SB_PARTROOT(sb
,i
)) & ~(sdata
->s_packet_len
- 1);
151 if (UDF_SB_TYPESPAR(sb
,i
).s_spar_map
[j
] != NULL
)
153 st
= (struct sparingTable
*)sdata
->s_spar_map
[j
]->b_data
;
161 for (k
=0; k
<le16_to_cpu(st
->reallocationTableLen
); k
++)
163 if (le32_to_cpu(st
->mapEntry
[k
].origLocation
) == 0xFFFFFFFF)
167 if (sdata
->s_spar_map
[j
])
169 st
= (struct sparingTable
*)sdata
->s_spar_map
[j
]->b_data
;
170 st
->mapEntry
[k
].origLocation
= cpu_to_le32(packet
);
171 udf_update_tag((char *)st
, sizeof(struct sparingTable
) + le16_to_cpu(st
->reallocationTableLen
) * sizeof(struct sparingEntry
));
172 mark_buffer_dirty(sdata
->s_spar_map
[j
]);
175 *new_block
= le32_to_cpu(st
->mapEntry
[k
].mappedLocation
) +
176 ((old_block
- UDF_SB_PARTROOT(sb
,i
)) & (sdata
->s_packet_len
- 1));
179 else if (le32_to_cpu(st
->mapEntry
[k
].origLocation
) == packet
)
181 *new_block
= le32_to_cpu(st
->mapEntry
[k
].mappedLocation
) +
182 ((old_block
- UDF_SB_PARTROOT(sb
,i
)) & (sdata
->s_packet_len
- 1));
185 else if (le32_to_cpu(st
->mapEntry
[k
].origLocation
) > packet
)
188 for (l
=k
; l
<le16_to_cpu(st
->reallocationTableLen
); l
++)
190 if (le32_to_cpu(st
->mapEntry
[l
].origLocation
) == 0xFFFFFFFF)
194 if (sdata
->s_spar_map
[j
])
196 st
= (struct sparingTable
*)sdata
->s_spar_map
[j
]->b_data
;
197 mapEntry
= st
->mapEntry
[l
];
198 mapEntry
.origLocation
= cpu_to_le32(packet
);
199 memmove(&st
->mapEntry
[k
+1], &st
->mapEntry
[k
], (l
-k
)*sizeof(struct sparingEntry
));
200 st
->mapEntry
[k
] = mapEntry
;
201 udf_update_tag((char *)st
, sizeof(struct sparingTable
) + le16_to_cpu(st
->reallocationTableLen
) * sizeof(struct sparingEntry
));
202 mark_buffer_dirty(sdata
->s_spar_map
[j
]);
205 *new_block
= le32_to_cpu(st
->mapEntry
[k
].mappedLocation
) +
206 ((old_block
- UDF_SB_PARTROOT(sb
,i
)) & (sdata
->s_packet_len
- 1));
213 if (i
== UDF_SB_NUMPARTS(sb
))
215 /* outside of partitions */
216 /* for now, fail =) */