treewide: be explicit about dependence on advice.h
[alt-git.git] / pack-revindex.c
blob9f9927d9471784fca23208299e9af80b647c80ed
1 #include "cache.h"
2 #include "gettext.h"
3 #include "pack-revindex.h"
4 #include "object-store.h"
5 #include "packfile.h"
6 #include "trace2.h"
7 #include "config.h"
8 #include "midx.h"
10 struct revindex_entry {
11 off_t offset;
12 unsigned int nr;
16 * Pack index for existing packs give us easy access to the offsets into
17 * corresponding pack file where each object's data starts, but the entries
18 * do not store the size of the compressed representation (uncompressed
19 * size is easily available by examining the pack entry header). It is
20 * also rather expensive to find the sha1 for an object given its offset.
22 * The pack index file is sorted by object name mapping to offset;
23 * this revindex array is a list of offset/index_nr pairs
24 * ordered by offset, so if you know the offset of an object, next offset
25 * is where its packed representation ends and the index_nr can be used to
26 * get the object sha1 from the main index.
30 * This is a least-significant-digit radix sort.
32 * It sorts each of the "n" items in "entries" by its offset field. The "max"
33 * parameter must be at least as large as the largest offset in the array,
34 * and lets us quit the sort early.
36 static void sort_revindex(struct revindex_entry *entries, unsigned n, off_t max)
39 * We use a "digit" size of 16 bits. That keeps our memory
40 * usage reasonable, and we can generally (for a 4G or smaller
41 * packfile) quit after two rounds of radix-sorting.
43 #define DIGIT_SIZE (16)
44 #define BUCKETS (1 << DIGIT_SIZE)
46 * We want to know the bucket that a[i] will go into when we are using
47 * the digit that is N bits from the (least significant) end.
49 #define BUCKET_FOR(a, i, bits) (((a)[(i)].offset >> (bits)) & (BUCKETS-1))
52 * We need O(n) temporary storage. Rather than do an extra copy of the
53 * partial results into "entries", we sort back and forth between the
54 * real array and temporary storage. In each iteration of the loop, we
55 * keep track of them with alias pointers, always sorting from "from"
56 * to "to".
58 struct revindex_entry *tmp, *from, *to;
59 int bits;
60 unsigned *pos;
62 ALLOC_ARRAY(pos, BUCKETS);
63 ALLOC_ARRAY(tmp, n);
64 from = entries;
65 to = tmp;
68 * If (max >> bits) is zero, then we know that the radix digit we are
69 * on (and any higher) will be zero for all entries, and our loop will
70 * be a no-op, as everybody lands in the same zero-th bucket.
72 for (bits = 0; max >> bits; bits += DIGIT_SIZE) {
73 unsigned i;
75 memset(pos, 0, BUCKETS * sizeof(*pos));
78 * We want pos[i] to store the index of the last element that
79 * will go in bucket "i" (actually one past the last element).
80 * To do this, we first count the items that will go in each
81 * bucket, which gives us a relative offset from the last
82 * bucket. We can then cumulatively add the index from the
83 * previous bucket to get the true index.
85 for (i = 0; i < n; i++)
86 pos[BUCKET_FOR(from, i, bits)]++;
87 for (i = 1; i < BUCKETS; i++)
88 pos[i] += pos[i-1];
91 * Now we can drop the elements into their correct buckets (in
92 * our temporary array). We iterate the pos counter backwards
93 * to avoid using an extra index to count up. And since we are
94 * going backwards there, we must also go backwards through the
95 * array itself, to keep the sort stable.
97 * Note that we use an unsigned iterator to make sure we can
98 * handle 2^32-1 objects, even on a 32-bit system. But this
99 * means we cannot use the more obvious "i >= 0" loop condition
100 * for counting backwards, and must instead check for
101 * wrap-around with UINT_MAX.
103 for (i = n - 1; i != UINT_MAX; i--)
104 to[--pos[BUCKET_FOR(from, i, bits)]] = from[i];
107 * Now "to" contains the most sorted list, so we swap "from" and
108 * "to" for the next iteration.
110 SWAP(from, to);
114 * If we ended with our data in the original array, great. If not,
115 * we have to move it back from the temporary storage.
117 if (from != entries)
118 COPY_ARRAY(entries, tmp, n);
119 free(tmp);
120 free(pos);
122 #undef BUCKET_FOR
123 #undef BUCKETS
124 #undef DIGIT_SIZE
128 * Ordered list of offsets of objects in the pack.
130 static void create_pack_revindex(struct packed_git *p)
132 const unsigned num_ent = p->num_objects;
133 unsigned i;
134 const char *index = p->index_data;
135 const unsigned hashsz = the_hash_algo->rawsz;
137 ALLOC_ARRAY(p->revindex, num_ent + 1);
138 index += 4 * 256;
140 if (p->index_version > 1) {
141 const uint32_t *off_32 =
142 (uint32_t *)(index + 8 + (size_t)p->num_objects * (hashsz + 4));
143 const uint32_t *off_64 = off_32 + p->num_objects;
144 for (i = 0; i < num_ent; i++) {
145 const uint32_t off = ntohl(*off_32++);
146 if (!(off & 0x80000000)) {
147 p->revindex[i].offset = off;
148 } else {
149 p->revindex[i].offset = get_be64(off_64);
150 off_64 += 2;
152 p->revindex[i].nr = i;
154 } else {
155 for (i = 0; i < num_ent; i++) {
156 const uint32_t hl = *((uint32_t *)(index + (hashsz + 4) * i));
157 p->revindex[i].offset = ntohl(hl);
158 p->revindex[i].nr = i;
163 * This knows the pack format -- the hash trailer
164 * follows immediately after the last object data.
166 p->revindex[num_ent].offset = p->pack_size - hashsz;
167 p->revindex[num_ent].nr = -1;
168 sort_revindex(p->revindex, num_ent, p->pack_size);
171 static int create_pack_revindex_in_memory(struct packed_git *p)
173 if (git_env_bool(GIT_TEST_REV_INDEX_DIE_IN_MEMORY, 0))
174 die("dying as requested by '%s'",
175 GIT_TEST_REV_INDEX_DIE_IN_MEMORY);
176 if (open_pack_index(p))
177 return -1;
178 create_pack_revindex(p);
179 return 0;
182 static char *pack_revindex_filename(struct packed_git *p)
184 size_t len;
185 if (!strip_suffix(p->pack_name, ".pack", &len))
186 BUG("pack_name does not end in .pack");
187 return xstrfmt("%.*s.rev", (int)len, p->pack_name);
190 #define RIDX_HEADER_SIZE (12)
191 #define RIDX_MIN_SIZE (RIDX_HEADER_SIZE + (2 * the_hash_algo->rawsz))
193 struct revindex_header {
194 uint32_t signature;
195 uint32_t version;
196 uint32_t hash_id;
199 static int load_revindex_from_disk(char *revindex_name,
200 uint32_t num_objects,
201 const uint32_t **data_p, size_t *len_p)
203 int fd, ret = 0;
204 struct stat st;
205 void *data = NULL;
206 size_t revindex_size;
207 struct revindex_header *hdr;
209 fd = git_open(revindex_name);
211 if (fd < 0) {
212 ret = -1;
213 goto cleanup;
215 if (fstat(fd, &st)) {
216 ret = error_errno(_("failed to read %s"), revindex_name);
217 goto cleanup;
220 revindex_size = xsize_t(st.st_size);
222 if (revindex_size < RIDX_MIN_SIZE) {
223 ret = error(_("reverse-index file %s is too small"), revindex_name);
224 goto cleanup;
227 if (revindex_size - RIDX_MIN_SIZE != st_mult(sizeof(uint32_t), num_objects)) {
228 ret = error(_("reverse-index file %s is corrupt"), revindex_name);
229 goto cleanup;
232 data = xmmap(NULL, revindex_size, PROT_READ, MAP_PRIVATE, fd, 0);
233 hdr = data;
235 if (ntohl(hdr->signature) != RIDX_SIGNATURE) {
236 ret = error(_("reverse-index file %s has unknown signature"), revindex_name);
237 goto cleanup;
239 if (ntohl(hdr->version) != 1) {
240 ret = error(_("reverse-index file %s has unsupported version %"PRIu32),
241 revindex_name, ntohl(hdr->version));
242 goto cleanup;
244 if (!(ntohl(hdr->hash_id) == 1 || ntohl(hdr->hash_id) == 2)) {
245 ret = error(_("reverse-index file %s has unsupported hash id %"PRIu32),
246 revindex_name, ntohl(hdr->hash_id));
247 goto cleanup;
250 cleanup:
251 if (ret) {
252 if (data)
253 munmap(data, revindex_size);
254 } else {
255 *len_p = revindex_size;
256 *data_p = (const uint32_t *)data;
259 if (fd >= 0)
260 close(fd);
261 return ret;
264 static int load_pack_revindex_from_disk(struct packed_git *p)
266 char *revindex_name;
267 int ret;
268 if (open_pack_index(p))
269 return -1;
271 revindex_name = pack_revindex_filename(p);
273 ret = load_revindex_from_disk(revindex_name,
274 p->num_objects,
275 &p->revindex_map,
276 &p->revindex_size);
277 if (ret)
278 goto cleanup;
280 p->revindex_data = (const uint32_t *)((const char *)p->revindex_map + RIDX_HEADER_SIZE);
282 cleanup:
283 free(revindex_name);
284 return ret;
287 int load_pack_revindex(struct packed_git *p)
289 if (p->revindex || p->revindex_data)
290 return 0;
292 if (!load_pack_revindex_from_disk(p))
293 return 0;
294 else if (!create_pack_revindex_in_memory(p))
295 return 0;
296 return -1;
299 int load_midx_revindex(struct multi_pack_index *m)
301 struct strbuf revindex_name = STRBUF_INIT;
302 int ret;
304 if (m->revindex_data)
305 return 0;
307 if (m->chunk_revindex) {
309 * If the MIDX `m` has a `RIDX` chunk, then use its contents for
310 * the reverse index instead of trying to load a separate `.rev`
311 * file.
313 * Note that we do *not* set `m->revindex_map` here, since we do
314 * not want to accidentally call munmap() in the middle of the
315 * MIDX.
317 trace2_data_string("load_midx_revindex", the_repository,
318 "source", "midx");
319 m->revindex_data = (const uint32_t *)m->chunk_revindex;
320 return 0;
323 trace2_data_string("load_midx_revindex", the_repository,
324 "source", "rev");
326 get_midx_rev_filename(&revindex_name, m);
328 ret = load_revindex_from_disk(revindex_name.buf,
329 m->num_objects,
330 &m->revindex_map,
331 &m->revindex_len);
332 if (ret)
333 goto cleanup;
335 m->revindex_data = (const uint32_t *)((const char *)m->revindex_map + RIDX_HEADER_SIZE);
337 cleanup:
338 strbuf_release(&revindex_name);
339 return ret;
342 int close_midx_revindex(struct multi_pack_index *m)
344 if (!m || !m->revindex_map)
345 return 0;
347 munmap((void*)m->revindex_map, m->revindex_len);
349 m->revindex_map = NULL;
350 m->revindex_data = NULL;
351 m->revindex_len = 0;
353 return 0;
356 int offset_to_pack_pos(struct packed_git *p, off_t ofs, uint32_t *pos)
358 unsigned lo, hi;
360 if (load_pack_revindex(p) < 0)
361 return -1;
363 lo = 0;
364 hi = p->num_objects + 1;
366 do {
367 const unsigned mi = lo + (hi - lo) / 2;
368 off_t got = pack_pos_to_offset(p, mi);
370 if (got == ofs) {
371 *pos = mi;
372 return 0;
373 } else if (ofs < got)
374 hi = mi;
375 else
376 lo = mi + 1;
377 } while (lo < hi);
379 error("bad offset for revindex");
380 return -1;
383 uint32_t pack_pos_to_index(struct packed_git *p, uint32_t pos)
385 if (!(p->revindex || p->revindex_data))
386 BUG("pack_pos_to_index: reverse index not yet loaded");
387 if (p->num_objects <= pos)
388 BUG("pack_pos_to_index: out-of-bounds object at %"PRIu32, pos);
390 if (p->revindex)
391 return p->revindex[pos].nr;
392 else
393 return get_be32(p->revindex_data + pos);
396 off_t pack_pos_to_offset(struct packed_git *p, uint32_t pos)
398 if (!(p->revindex || p->revindex_data))
399 BUG("pack_pos_to_index: reverse index not yet loaded");
400 if (p->num_objects < pos)
401 BUG("pack_pos_to_offset: out-of-bounds object at %"PRIu32, pos);
403 if (p->revindex)
404 return p->revindex[pos].offset;
405 else if (pos == p->num_objects)
406 return p->pack_size - the_hash_algo->rawsz;
407 else
408 return nth_packed_object_offset(p, pack_pos_to_index(p, pos));
411 uint32_t pack_pos_to_midx(struct multi_pack_index *m, uint32_t pos)
413 if (!m->revindex_data)
414 BUG("pack_pos_to_midx: reverse index not yet loaded");
415 if (m->num_objects <= pos)
416 BUG("pack_pos_to_midx: out-of-bounds object at %"PRIu32, pos);
417 return get_be32(m->revindex_data + pos);
420 struct midx_pack_key {
421 uint32_t pack;
422 off_t offset;
424 uint32_t preferred_pack;
425 struct multi_pack_index *midx;
428 static int midx_pack_order_cmp(const void *va, const void *vb)
430 const struct midx_pack_key *key = va;
431 struct multi_pack_index *midx = key->midx;
433 uint32_t versus = pack_pos_to_midx(midx, (uint32_t*)vb - (const uint32_t *)midx->revindex_data);
434 uint32_t versus_pack = nth_midxed_pack_int_id(midx, versus);
435 off_t versus_offset;
437 uint32_t key_preferred = key->pack == key->preferred_pack;
438 uint32_t versus_preferred = versus_pack == key->preferred_pack;
441 * First, compare the preferred-ness, noting that the preferred pack
442 * comes first.
444 if (key_preferred && !versus_preferred)
445 return -1;
446 else if (!key_preferred && versus_preferred)
447 return 1;
449 /* Then, break ties first by comparing the pack IDs. */
450 if (key->pack < versus_pack)
451 return -1;
452 else if (key->pack > versus_pack)
453 return 1;
455 /* Finally, break ties by comparing offsets within a pack. */
456 versus_offset = nth_midxed_offset(midx, versus);
457 if (key->offset < versus_offset)
458 return -1;
459 else if (key->offset > versus_offset)
460 return 1;
462 return 0;
465 int midx_to_pack_pos(struct multi_pack_index *m, uint32_t at, uint32_t *pos)
467 struct midx_pack_key key;
468 uint32_t *found;
470 if (!m->revindex_data)
471 BUG("midx_to_pack_pos: reverse index not yet loaded");
472 if (m->num_objects <= at)
473 BUG("midx_to_pack_pos: out-of-bounds object at %"PRIu32, at);
475 key.pack = nth_midxed_pack_int_id(m, at);
476 key.offset = nth_midxed_offset(m, at);
477 key.midx = m;
479 * The preferred pack sorts first, so determine its identifier by
480 * looking at the first object in pseudo-pack order.
482 * Note that if no --preferred-pack is explicitly given when writing a
483 * multi-pack index, then whichever pack has the lowest identifier
484 * implicitly is preferred (and includes all its objects, since ties are
485 * broken first by pack identifier).
487 key.preferred_pack = nth_midxed_pack_int_id(m, pack_pos_to_midx(m, 0));
489 found = bsearch(&key, m->revindex_data, m->num_objects,
490 sizeof(*m->revindex_data), midx_pack_order_cmp);
492 if (!found)
493 return error("bad offset for revindex");
495 *pos = found - m->revindex_data;
496 return 0;