Translations: Update the Hungarian translation.
[xz.git] / tests / test_index_hash.c
blob842d5348b10da20be3e774bbb566bca78de3a816
1 // SPDX-License-Identifier: 0BSD
3 ///////////////////////////////////////////////////////////////////////////////
4 //
5 /// \file test_index_hash.c
6 /// \brief Tests src/liblzma/common/index_hash.c API functions
7 ///
8 /// \note No test included for lzma_index_hash_end since it
9 /// would be trivial unless tested for memory leaks
10 /// with something like valgrind
12 // Author: Jia Tan
14 ///////////////////////////////////////////////////////////////////////////////
16 #include "tests.h"
18 // Needed for UNPADDED_SIZE_MIN and UNPADDED_SIZE_MAX macro definitions
19 // and index_size and vli_ceil4 helper functions
20 #include "common/index.h"
23 static void
24 test_lzma_index_hash_init(void)
26 #ifndef HAVE_DECODERS
27 assert_skip("Decoder support disabled");
28 #else
29 // First test with NULL index_hash.
30 // This should create a fresh index_hash.
31 lzma_index_hash *index_hash = lzma_index_hash_init(NULL, NULL);
32 assert_true(index_hash != NULL);
34 // Next test with non-NULL index_hash.
35 lzma_index_hash *second_hash = lzma_index_hash_init(index_hash, NULL);
37 // It should not create a new index_hash pointer.
38 // Instead it must just re-init the first index_hash.
39 assert_true(index_hash == second_hash);
41 lzma_index_hash_end(index_hash, NULL);
42 #endif
46 static void
47 test_lzma_index_hash_append(void)
49 #ifndef HAVE_DECODERS
50 assert_skip("Decoder support disabled");
51 #else
52 // Test all invalid parameters
53 assert_lzma_ret(lzma_index_hash_append(NULL, 0, 0),
54 LZMA_PROG_ERROR);
56 // Test NULL index_hash
57 assert_lzma_ret(lzma_index_hash_append(NULL, UNPADDED_SIZE_MIN,
58 LZMA_VLI_MAX), LZMA_PROG_ERROR);
60 // Test with invalid Unpadded Size
61 lzma_index_hash *index_hash = lzma_index_hash_init(NULL, NULL);
62 assert_true(index_hash != NULL);
63 assert_lzma_ret(lzma_index_hash_append(index_hash,
64 UNPADDED_SIZE_MIN - 1, LZMA_VLI_MAX),
65 LZMA_PROG_ERROR);
67 // Test with invalid Uncompressed Size
68 assert_lzma_ret(lzma_index_hash_append(index_hash,
69 UNPADDED_SIZE_MIN, LZMA_VLI_MAX + 1),
70 LZMA_PROG_ERROR);
72 // First append a Record describing a small Block.
73 // This should succeed.
74 assert_lzma_ret(lzma_index_hash_append(index_hash,
75 UNPADDED_SIZE_MIN, 1), LZMA_OK);
77 // Append another small Record.
78 assert_lzma_ret(lzma_index_hash_append(index_hash,
79 UNPADDED_SIZE_MIN, 1), LZMA_OK);
81 // Append a Record that would cause the compressed size to grow
82 // too big
83 assert_lzma_ret(lzma_index_hash_append(index_hash,
84 UNPADDED_SIZE_MAX, 1), LZMA_DATA_ERROR);
86 lzma_index_hash_end(index_hash, NULL);
87 #endif
91 #if defined(HAVE_ENCODERS) && defined(HAVE_DECODERS)
92 // Fill an index_hash with unpadded and uncompressed VLIs
93 // by calling lzma_index_hash_append
94 static void
95 fill_index_hash(lzma_index_hash *index_hash, const lzma_vli *unpadded_sizes,
96 const lzma_vli *uncomp_sizes, uint32_t block_count)
98 for (uint32_t i = 0; i < block_count; ++i)
99 assert_lzma_ret(lzma_index_hash_append(index_hash,
100 unpadded_sizes[i], uncomp_sizes[i]), LZMA_OK);
104 // Set the contents of buf to the expected Index based on the
105 // .xz specification. This needs the unpadded and uncompressed VLIs
106 // to correctly create the Index.
107 static void
108 generate_index(uint8_t *buf, const lzma_vli *unpadded_sizes,
109 const lzma_vli *uncomp_sizes, uint32_t block_count,
110 size_t index_max_size)
112 size_t in_pos = 0;
113 size_t out_pos = 0;
115 // First set Index Indicator
116 buf[out_pos++] = INDEX_INDICATOR;
118 // Next write out Number of Records
119 assert_lzma_ret(lzma_vli_encode(block_count, &in_pos, buf,
120 &out_pos, index_max_size), LZMA_STREAM_END);
122 // Next write out each Record.
123 // A Record consists of Unpadded Size and Uncompressed Size
124 // written next to each other as VLIs.
125 for (uint32_t i = 0; i < block_count; ++i) {
126 in_pos = 0;
127 assert_lzma_ret(lzma_vli_encode(unpadded_sizes[i], &in_pos,
128 buf, &out_pos, index_max_size), LZMA_STREAM_END);
129 in_pos = 0;
130 assert_lzma_ret(lzma_vli_encode(uncomp_sizes[i], &in_pos,
131 buf, &out_pos, index_max_size), LZMA_STREAM_END);
134 // Add Index Padding
135 lzma_vli rounded_out_pos = vli_ceil4(out_pos);
136 memzero(buf + out_pos, rounded_out_pos - out_pos);
137 out_pos = rounded_out_pos;
139 // Add the CRC32
140 write32le(buf + out_pos, lzma_crc32(buf, out_pos, 0));
141 out_pos += 4;
143 assert_uint_eq(out_pos, index_max_size);
145 #endif
148 static void
149 test_lzma_index_hash_decode(void)
151 #if !defined(HAVE_ENCODERS) || !defined(HAVE_DECODERS)
152 assert_skip("Encoder or decoder support disabled");
153 #else
154 lzma_index_hash *index_hash = lzma_index_hash_init(NULL, NULL);
155 assert_true(index_hash != NULL);
157 size_t in_pos = 0;
159 // Six valid values for the Unpadded Size fields in an Index
160 const lzma_vli unpadded_sizes[6] = {
161 UNPADDED_SIZE_MIN,
162 1000,
163 4000,
164 8000,
165 16000,
166 32000
169 // Six valid values for the Uncompressed Size fields in an Index
170 const lzma_vli uncomp_sizes[6] = {
172 500,
173 8000,
179 // Add two Records to an index_hash
180 fill_index_hash(index_hash, unpadded_sizes, uncomp_sizes, 2);
182 const lzma_vli size_two_records = lzma_index_hash_size(index_hash);
183 assert_uint(size_two_records, >, 0);
184 uint8_t *index_two_records = tuktest_malloc(size_two_records);
186 generate_index(index_two_records, unpadded_sizes, uncomp_sizes, 2,
187 size_two_records);
189 // First test for basic buffer size error
190 in_pos = size_two_records + 1;
191 assert_lzma_ret(lzma_index_hash_decode(index_hash,
192 index_two_records, &in_pos,
193 size_two_records), LZMA_BUF_ERROR);
195 // Next test for invalid Index Indicator
196 in_pos = 0;
197 index_two_records[0] ^= 1;
198 assert_lzma_ret(lzma_index_hash_decode(index_hash,
199 index_two_records, &in_pos,
200 size_two_records), LZMA_DATA_ERROR);
201 index_two_records[0] ^= 1;
203 // Next verify the index_hash as expected
204 in_pos = 0;
205 assert_lzma_ret(lzma_index_hash_decode(index_hash,
206 index_two_records, &in_pos,
207 size_two_records), LZMA_STREAM_END);
209 // Next test an index_hash with three Records
210 index_hash = lzma_index_hash_init(index_hash, NULL);
211 fill_index_hash(index_hash, unpadded_sizes, uncomp_sizes, 3);
213 const lzma_vli size_three_records = lzma_index_hash_size(
214 index_hash);
215 assert_uint(size_three_records, >, 0);
216 uint8_t *index_three_records = tuktest_malloc(size_three_records);
218 generate_index(index_three_records, unpadded_sizes, uncomp_sizes,
219 3, size_three_records);
221 in_pos = 0;
222 assert_lzma_ret(lzma_index_hash_decode(index_hash,
223 index_three_records, &in_pos,
224 size_three_records), LZMA_STREAM_END);
226 // Next test an index_hash with five Records
227 index_hash = lzma_index_hash_init(index_hash, NULL);
228 fill_index_hash(index_hash, unpadded_sizes, uncomp_sizes, 5);
230 const lzma_vli size_five_records = lzma_index_hash_size(
231 index_hash);
232 assert_uint(size_five_records, >, 0);
233 uint8_t *index_five_records = tuktest_malloc(size_five_records);
235 generate_index(index_five_records, unpadded_sizes, uncomp_sizes, 5,
236 size_five_records);
238 // Instead of testing all input at once, give input
239 // one byte at a time
240 in_pos = 0;
241 for (lzma_vli i = 0; i < size_five_records - 1; ++i) {
242 assert_lzma_ret(lzma_index_hash_decode(index_hash,
243 index_five_records, &in_pos, in_pos + 1),
244 LZMA_OK);
247 // Last byte should return LZMA_STREAM_END
248 assert_lzma_ret(lzma_index_hash_decode(index_hash,
249 index_five_records, &in_pos,
250 in_pos + 1), LZMA_STREAM_END);
252 // Next test if the index_hash is given an incorrect Unpadded
253 // Size. Should detect and report LZMA_DATA_ERROR
254 index_hash = lzma_index_hash_init(index_hash, NULL);
255 fill_index_hash(index_hash, unpadded_sizes, uncomp_sizes, 5);
256 // The sixth Record will have an invalid Unpadded Size
257 assert_lzma_ret(lzma_index_hash_append(index_hash,
258 unpadded_sizes[5] + 1,
259 uncomp_sizes[5]), LZMA_OK);
261 const lzma_vli size_six_records = lzma_index_hash_size(
262 index_hash);
264 assert_uint(size_six_records, >, 0);
265 uint8_t *index_six_records = tuktest_malloc(size_six_records);
267 generate_index(index_six_records, unpadded_sizes, uncomp_sizes, 6,
268 size_six_records);
269 in_pos = 0;
270 assert_lzma_ret(lzma_index_hash_decode(index_hash,
271 index_six_records, &in_pos,
272 size_six_records), LZMA_DATA_ERROR);
274 // Next test if the Index is corrupt (invalid CRC32).
275 // Should detect and report LZMA_DATA_ERROR
276 index_hash = lzma_index_hash_init(index_hash, NULL);
277 fill_index_hash(index_hash, unpadded_sizes, uncomp_sizes, 2);
279 index_two_records[size_two_records - 1] ^= 1;
281 in_pos = 0;
282 assert_lzma_ret(lzma_index_hash_decode(index_hash,
283 index_two_records, &in_pos,
284 size_two_records), LZMA_DATA_ERROR);
286 // Next test with Index and index_hash struct not matching
287 // a Record
288 index_hash = lzma_index_hash_init(index_hash, NULL);
289 fill_index_hash(index_hash, unpadded_sizes, uncomp_sizes, 2);
290 // Recalculate Index with invalid Unpadded Size
291 const lzma_vli unpadded_sizes_invalid[2] = {
292 unpadded_sizes[0],
293 unpadded_sizes[1] + 1
296 generate_index(index_two_records, unpadded_sizes_invalid,
297 uncomp_sizes, 2, size_two_records);
299 in_pos = 0;
300 assert_lzma_ret(lzma_index_hash_decode(index_hash,
301 index_two_records, &in_pos,
302 size_two_records), LZMA_DATA_ERROR);
304 lzma_index_hash_end(index_hash, NULL);
305 #endif
309 static void
310 test_lzma_index_hash_size(void)
312 #ifndef HAVE_DECODERS
313 assert_skip("Decoder support disabled");
314 #else
315 lzma_index_hash *index_hash = lzma_index_hash_init(NULL, NULL);
316 assert_true(index_hash != NULL);
318 // First test empty index_hash
319 // Expected size should be:
320 // Index Indicator - 1 byte
321 // Number of Records - 1 byte
322 // List of Records - 0 bytes
323 // Index Padding - 2 bytes
324 // CRC32 - 4 bytes
325 // Total - 8 bytes
326 assert_uint_eq(lzma_index_hash_size(index_hash), 8);
328 // Append a Record describing a small Block to the index_hash
329 assert_lzma_ret(lzma_index_hash_append(index_hash,
330 UNPADDED_SIZE_MIN, 1), LZMA_OK);
332 // Expected size should be:
333 // Index Indicator - 1 byte
334 // Number of Records - 1 byte
335 // List of Records - 2 bytes
336 // Index Padding - 0 bytes
337 // CRC32 - 4 bytes
338 // Total - 8 bytes
339 lzma_vli expected_size = 8;
340 assert_uint_eq(lzma_index_hash_size(index_hash), expected_size);
342 // Append additional small Record
343 assert_lzma_ret(lzma_index_hash_append(index_hash,
344 UNPADDED_SIZE_MIN, 1), LZMA_OK);
346 // Expected size should be:
347 // Index Indicator - 1 byte
348 // Number of Records - 1 byte
349 // List of Records - 4 bytes
350 // Index Padding - 2 bytes
351 // CRC32 - 4 bytes
352 // Total - 12 bytes
353 expected_size = 12;
354 assert_uint_eq(lzma_index_hash_size(index_hash), expected_size);
356 // Append a larger Record to the index_hash (3 bytes for each VLI)
357 const lzma_vli three_byte_vli = 0x10000;
358 assert_lzma_ret(lzma_index_hash_append(index_hash,
359 three_byte_vli, three_byte_vli), LZMA_OK);
361 // Expected size should be:
362 // Index Indicator - 1 byte
363 // Number of Records - 1 byte
364 // List of Records - 10 bytes
365 // Index Padding - 0 bytes
366 // CRC32 - 4 bytes
367 // Total - 16 bytes
368 expected_size = 16;
369 assert_uint_eq(lzma_index_hash_size(index_hash), expected_size);
371 lzma_index_hash_end(index_hash, NULL);
372 #endif
376 extern int
377 main(int argc, char **argv)
379 tuktest_start(argc, argv);
380 tuktest_run(test_lzma_index_hash_init);
381 tuktest_run(test_lzma_index_hash_append);
382 tuktest_run(test_lzma_index_hash_decode);
383 tuktest_run(test_lzma_index_hash_size);
384 return tuktest_end();