7 #include "object-store.h"
11 #define MIDX_SIGNATURE 0x4d494458 /* "MIDX" */
12 #define MIDX_VERSION 1
13 #define MIDX_BYTE_FILE_VERSION 4
14 #define MIDX_BYTE_HASH_VERSION 5
15 #define MIDX_BYTE_NUM_CHUNKS 6
16 #define MIDX_BYTE_NUM_PACKS 8
17 #define MIDX_HASH_VERSION 1
18 #define MIDX_HEADER_SIZE 12
19 #define MIDX_HASH_LEN 20
20 #define MIDX_MIN_SIZE (MIDX_HEADER_SIZE + MIDX_HASH_LEN)
22 #define MIDX_MAX_CHUNKS 5
23 #define MIDX_CHUNK_ALIGNMENT 4
24 #define MIDX_CHUNKID_PACKNAMES 0x504e414d /* "PNAM" */
25 #define MIDX_CHUNKID_OIDFANOUT 0x4f494446 /* "OIDF" */
26 #define MIDX_CHUNKID_OIDLOOKUP 0x4f49444c /* "OIDL" */
27 #define MIDX_CHUNKID_OBJECTOFFSETS 0x4f4f4646 /* "OOFF" */
28 #define MIDX_CHUNKID_LARGEOFFSETS 0x4c4f4646 /* "LOFF" */
29 #define MIDX_CHUNKLOOKUP_WIDTH (sizeof(uint32_t) + sizeof(uint64_t))
30 #define MIDX_CHUNK_FANOUT_SIZE (sizeof(uint32_t) * 256)
31 #define MIDX_CHUNK_OFFSET_WIDTH (2 * sizeof(uint32_t))
32 #define MIDX_CHUNK_LARGE_OFFSET_WIDTH (sizeof(uint64_t))
33 #define MIDX_LARGE_OFFSET_NEEDED 0x80000000
35 static char *get_midx_filename(const char *object_dir
)
37 return xstrfmt("%s/pack/multi-pack-index", object_dir
);
40 struct multi_pack_index
*load_multi_pack_index(const char *object_dir
)
42 struct multi_pack_index
*m
= NULL
;
46 void *midx_map
= NULL
;
47 uint32_t hash_version
;
48 char *midx_name
= get_midx_filename(object_dir
);
50 const char *cur_pack_name
;
52 fd
= git_open(midx_name
);
57 error_errno(_("failed to read %s"), midx_name
);
61 midx_size
= xsize_t(st
.st_size
);
63 if (midx_size
< MIDX_MIN_SIZE
) {
64 error(_("multi-pack-index file %s is too small"), midx_name
);
68 FREE_AND_NULL(midx_name
);
70 midx_map
= xmmap(NULL
, midx_size
, PROT_READ
, MAP_PRIVATE
, fd
, 0);
72 FLEX_ALLOC_MEM(m
, object_dir
, object_dir
, strlen(object_dir
));
75 m
->data_len
= midx_size
;
77 m
->signature
= get_be32(m
->data
);
78 if (m
->signature
!= MIDX_SIGNATURE
) {
79 error(_("multi-pack-index signature 0x%08x does not match signature 0x%08x"),
80 m
->signature
, MIDX_SIGNATURE
);
84 m
->version
= m
->data
[MIDX_BYTE_FILE_VERSION
];
85 if (m
->version
!= MIDX_VERSION
) {
86 error(_("multi-pack-index version %d not recognized"),
91 hash_version
= m
->data
[MIDX_BYTE_HASH_VERSION
];
92 if (hash_version
!= MIDX_HASH_VERSION
) {
93 error(_("hash version %u does not match"), hash_version
);
96 m
->hash_len
= MIDX_HASH_LEN
;
98 m
->num_chunks
= m
->data
[MIDX_BYTE_NUM_CHUNKS
];
100 m
->num_packs
= get_be32(m
->data
+ MIDX_BYTE_NUM_PACKS
);
102 for (i
= 0; i
< m
->num_chunks
; i
++) {
103 uint32_t chunk_id
= get_be32(m
->data
+ MIDX_HEADER_SIZE
+
104 MIDX_CHUNKLOOKUP_WIDTH
* i
);
105 uint64_t chunk_offset
= get_be64(m
->data
+ MIDX_HEADER_SIZE
+ 4 +
106 MIDX_CHUNKLOOKUP_WIDTH
* i
);
109 case MIDX_CHUNKID_PACKNAMES
:
110 m
->chunk_pack_names
= m
->data
+ chunk_offset
;
113 case MIDX_CHUNKID_OIDFANOUT
:
114 m
->chunk_oid_fanout
= (uint32_t *)(m
->data
+ chunk_offset
);
117 case MIDX_CHUNKID_OIDLOOKUP
:
118 m
->chunk_oid_lookup
= m
->data
+ chunk_offset
;
121 case MIDX_CHUNKID_OBJECTOFFSETS
:
122 m
->chunk_object_offsets
= m
->data
+ chunk_offset
;
125 case MIDX_CHUNKID_LARGEOFFSETS
:
126 m
->chunk_large_offsets
= m
->data
+ chunk_offset
;
130 die(_("terminating multi-pack-index chunk id appears earlier than expected"));
135 * Do nothing on unrecognized chunks, allowing future
136 * extensions to add optional chunks.
142 if (!m
->chunk_pack_names
)
143 die(_("multi-pack-index missing required pack-name chunk"));
144 if (!m
->chunk_oid_fanout
)
145 die(_("multi-pack-index missing required OID fanout chunk"));
146 if (!m
->chunk_oid_lookup
)
147 die(_("multi-pack-index missing required OID lookup chunk"));
148 if (!m
->chunk_object_offsets
)
149 die(_("multi-pack-index missing required object offsets chunk"));
151 m
->num_objects
= ntohl(m
->chunk_oid_fanout
[255]);
153 m
->pack_names
= xcalloc(m
->num_packs
, sizeof(*m
->pack_names
));
155 cur_pack_name
= (const char *)m
->chunk_pack_names
;
156 for (i
= 0; i
< m
->num_packs
; i
++) {
157 m
->pack_names
[i
] = cur_pack_name
;
159 cur_pack_name
+= strlen(cur_pack_name
) + 1;
161 if (i
&& strcmp(m
->pack_names
[i
], m
->pack_names
[i
- 1]) <= 0) {
162 error(_("multi-pack-index pack names out of order: '%s' before '%s'"),
163 m
->pack_names
[i
- 1],
175 munmap(midx_map
, midx_size
);
181 int prepare_multi_pack_index_one(struct repository
*r
, const char *object_dir
)
183 struct multi_pack_index
*m
= r
->objects
->multi_pack_index
;
184 struct multi_pack_index
*m_search
;
187 if (repo_config_get_bool(r
, "core.multipackindex", &config_value
) ||
191 for (m_search
= m
; m_search
; m_search
= m_search
->next
)
192 if (!strcmp(object_dir
, m_search
->object_dir
))
195 r
->objects
->multi_pack_index
= load_multi_pack_index(object_dir
);
197 if (r
->objects
->multi_pack_index
) {
198 r
->objects
->multi_pack_index
->next
= m
;
205 static size_t write_midx_header(struct hashfile
*f
,
206 unsigned char num_chunks
,
209 unsigned char byte_values
[4];
211 hashwrite_be32(f
, MIDX_SIGNATURE
);
212 byte_values
[0] = MIDX_VERSION
;
213 byte_values
[1] = MIDX_HASH_VERSION
;
214 byte_values
[2] = num_chunks
;
215 byte_values
[3] = 0; /* unused */
216 hashwrite(f
, byte_values
, sizeof(byte_values
));
217 hashwrite_be32(f
, num_packs
);
219 return MIDX_HEADER_SIZE
;
223 struct packed_git
**list
;
227 uint32_t alloc_names
;
228 size_t pack_name_concat_len
;
231 static void add_pack_to_midx(const char *full_path
, size_t full_path_len
,
232 const char *file_name
, void *data
)
234 struct pack_list
*packs
= (struct pack_list
*)data
;
236 if (ends_with(file_name
, ".idx")) {
237 ALLOC_GROW(packs
->list
, packs
->nr
+ 1, packs
->alloc_list
);
238 ALLOC_GROW(packs
->names
, packs
->nr
+ 1, packs
->alloc_names
);
240 packs
->list
[packs
->nr
] = add_packed_git(full_path
,
244 if (!packs
->list
[packs
->nr
]) {
245 warning(_("failed to add packfile '%s'"),
250 if (open_pack_index(packs
->list
[packs
->nr
])) {
251 warning(_("failed to open pack-index '%s'"),
253 close_pack(packs
->list
[packs
->nr
]);
254 FREE_AND_NULL(packs
->list
[packs
->nr
]);
258 packs
->names
[packs
->nr
] = xstrdup(file_name
);
259 packs
->pack_name_concat_len
+= strlen(file_name
) + 1;
265 uint32_t pack_int_id
;
269 static int pack_pair_compare(const void *_a
, const void *_b
)
271 struct pack_pair
*a
= (struct pack_pair
*)_a
;
272 struct pack_pair
*b
= (struct pack_pair
*)_b
;
273 return strcmp(a
->pack_name
, b
->pack_name
);
276 static void sort_packs_by_name(char **pack_names
, uint32_t nr_packs
, uint32_t *perm
)
279 struct pack_pair
*pairs
;
281 ALLOC_ARRAY(pairs
, nr_packs
);
283 for (i
= 0; i
< nr_packs
; i
++) {
284 pairs
[i
].pack_int_id
= i
;
285 pairs
[i
].pack_name
= pack_names
[i
];
288 QSORT(pairs
, nr_packs
, pack_pair_compare
);
290 for (i
= 0; i
< nr_packs
; i
++) {
291 pack_names
[i
] = pairs
[i
].pack_name
;
292 perm
[pairs
[i
].pack_int_id
] = i
;
298 struct pack_midx_entry
{
299 struct object_id oid
;
300 uint32_t pack_int_id
;
305 static int midx_oid_compare(const void *_a
, const void *_b
)
307 const struct pack_midx_entry
*a
= (const struct pack_midx_entry
*)_a
;
308 const struct pack_midx_entry
*b
= (const struct pack_midx_entry
*)_b
;
309 int cmp
= oidcmp(&a
->oid
, &b
->oid
);
314 if (a
->pack_mtime
> b
->pack_mtime
)
316 else if (a
->pack_mtime
< b
->pack_mtime
)
319 return a
->pack_int_id
- b
->pack_int_id
;
322 static void fill_pack_entry(uint32_t pack_int_id
,
323 struct packed_git
*p
,
325 struct pack_midx_entry
*entry
)
327 if (!nth_packed_object_oid(&entry
->oid
, p
, cur_object
))
328 die(_("failed to locate object %d in packfile"), cur_object
);
330 entry
->pack_int_id
= pack_int_id
;
331 entry
->pack_mtime
= p
->mtime
;
333 entry
->offset
= nth_packed_object_offset(p
, cur_object
);
337 * It is possible to artificially get into a state where there are many
338 * duplicate copies of objects. That can create high memory pressure if
339 * we are to create a list of all objects before de-duplication. To reduce
340 * this memory pressure without a significant performance drop, automatically
341 * group objects by the first byte of their object id. Use the IDX fanout
342 * tables to group the data, copy to a local array, then sort.
344 * Copy only the de-duplicated entries (selected by most-recent modified time
345 * of a packfile containing the object).
347 static struct pack_midx_entry
*get_sorted_entries(struct packed_git
**p
,
350 uint32_t *nr_objects
)
352 uint32_t cur_fanout
, cur_pack
, cur_object
;
353 uint32_t alloc_fanout
, alloc_objects
, total_objects
= 0;
354 struct pack_midx_entry
*entries_by_fanout
= NULL
;
355 struct pack_midx_entry
*deduplicated_entries
= NULL
;
357 for (cur_pack
= 0; cur_pack
< nr_packs
; cur_pack
++)
358 total_objects
+= p
[cur_pack
]->num_objects
;
361 * As we de-duplicate by fanout value, we expect the fanout
362 * slices to be evenly distributed, with some noise. Hence,
363 * allocate slightly more than one 256th.
365 alloc_objects
= alloc_fanout
= total_objects
> 3200 ? total_objects
/ 200 : 16;
367 ALLOC_ARRAY(entries_by_fanout
, alloc_fanout
);
368 ALLOC_ARRAY(deduplicated_entries
, alloc_objects
);
371 for (cur_fanout
= 0; cur_fanout
< 256; cur_fanout
++) {
372 uint32_t nr_fanout
= 0;
374 for (cur_pack
= 0; cur_pack
< nr_packs
; cur_pack
++) {
375 uint32_t start
= 0, end
;
378 start
= get_pack_fanout(p
[cur_pack
], cur_fanout
- 1);
379 end
= get_pack_fanout(p
[cur_pack
], cur_fanout
);
381 for (cur_object
= start
; cur_object
< end
; cur_object
++) {
382 ALLOC_GROW(entries_by_fanout
, nr_fanout
+ 1, alloc_fanout
);
383 fill_pack_entry(perm
[cur_pack
], p
[cur_pack
], cur_object
, &entries_by_fanout
[nr_fanout
]);
388 QSORT(entries_by_fanout
, nr_fanout
, midx_oid_compare
);
391 * The batch is now sorted by OID and then mtime (descending).
392 * Take only the first duplicate.
394 for (cur_object
= 0; cur_object
< nr_fanout
; cur_object
++) {
395 if (cur_object
&& !oidcmp(&entries_by_fanout
[cur_object
- 1].oid
,
396 &entries_by_fanout
[cur_object
].oid
))
399 ALLOC_GROW(deduplicated_entries
, *nr_objects
+ 1, alloc_objects
);
400 memcpy(&deduplicated_entries
[*nr_objects
],
401 &entries_by_fanout
[cur_object
],
402 sizeof(struct pack_midx_entry
));
407 free(entries_by_fanout
);
408 return deduplicated_entries
;
411 static size_t write_midx_pack_names(struct hashfile
*f
,
416 unsigned char padding
[MIDX_CHUNK_ALIGNMENT
];
419 for (i
= 0; i
< num_packs
; i
++) {
420 size_t writelen
= strlen(pack_names
[i
]) + 1;
422 if (i
&& strcmp(pack_names
[i
], pack_names
[i
- 1]) <= 0)
423 BUG("incorrect pack-file order: %s before %s",
427 hashwrite(f
, pack_names
[i
], writelen
);
431 /* add padding to be aligned */
432 i
= MIDX_CHUNK_ALIGNMENT
- (written
% MIDX_CHUNK_ALIGNMENT
);
433 if (i
< MIDX_CHUNK_ALIGNMENT
) {
434 memset(padding
, 0, sizeof(padding
));
435 hashwrite(f
, padding
, i
);
442 static size_t write_midx_oid_fanout(struct hashfile
*f
,
443 struct pack_midx_entry
*objects
,
446 struct pack_midx_entry
*list
= objects
;
447 struct pack_midx_entry
*last
= objects
+ nr_objects
;
452 * Write the first-level table (the list is sorted,
453 * but we use a 256-entry lookup to be able to avoid
454 * having to do eight extra binary search iterations).
456 for (i
= 0; i
< 256; i
++) {
457 struct pack_midx_entry
*next
= list
;
459 while (next
< last
&& next
->oid
.hash
[0] == i
) {
464 hashwrite_be32(f
, count
);
468 return MIDX_CHUNK_FANOUT_SIZE
;
471 static size_t write_midx_oid_lookup(struct hashfile
*f
, unsigned char hash_len
,
472 struct pack_midx_entry
*objects
,
475 struct pack_midx_entry
*list
= objects
;
479 for (i
= 0; i
< nr_objects
; i
++) {
480 struct pack_midx_entry
*obj
= list
++;
482 if (i
< nr_objects
- 1) {
483 struct pack_midx_entry
*next
= list
;
484 if (oidcmp(&obj
->oid
, &next
->oid
) >= 0)
485 BUG("OIDs not in order: %s >= %s",
486 oid_to_hex(&obj
->oid
),
487 oid_to_hex(&next
->oid
));
490 hashwrite(f
, obj
->oid
.hash
, (int)hash_len
);
497 static size_t write_midx_object_offsets(struct hashfile
*f
, int large_offset_needed
,
498 struct pack_midx_entry
*objects
, uint32_t nr_objects
)
500 struct pack_midx_entry
*list
= objects
;
501 uint32_t i
, nr_large_offset
= 0;
504 for (i
= 0; i
< nr_objects
; i
++) {
505 struct pack_midx_entry
*obj
= list
++;
507 hashwrite_be32(f
, obj
->pack_int_id
);
509 if (large_offset_needed
&& obj
->offset
>> 31)
510 hashwrite_be32(f
, MIDX_LARGE_OFFSET_NEEDED
| nr_large_offset
++);
511 else if (!large_offset_needed
&& obj
->offset
>> 32)
512 BUG("object %s requires a large offset (%"PRIx64
") but the MIDX is not writing large offsets!",
513 oid_to_hex(&obj
->oid
),
516 hashwrite_be32(f
, (uint32_t)obj
->offset
);
518 written
+= MIDX_CHUNK_OFFSET_WIDTH
;
524 static size_t write_midx_large_offsets(struct hashfile
*f
, uint32_t nr_large_offset
,
525 struct pack_midx_entry
*objects
, uint32_t nr_objects
)
527 struct pack_midx_entry
*list
= objects
;
530 while (nr_large_offset
) {
531 struct pack_midx_entry
*obj
= list
++;
532 uint64_t offset
= obj
->offset
;
537 hashwrite_be32(f
, offset
>> 32);
538 hashwrite_be32(f
, offset
& 0xffffffffUL
);
539 written
+= 2 * sizeof(uint32_t);
547 int write_midx_file(const char *object_dir
)
549 unsigned char cur_chunk
, num_chunks
= 0;
552 struct hashfile
*f
= NULL
;
554 struct pack_list packs
;
555 uint32_t *pack_perm
= NULL
;
556 uint64_t written
= 0;
557 uint32_t chunk_ids
[MIDX_MAX_CHUNKS
+ 1];
558 uint64_t chunk_offsets
[MIDX_MAX_CHUNKS
+ 1];
559 uint32_t nr_entries
, num_large_offsets
= 0;
560 struct pack_midx_entry
*entries
= NULL
;
561 int large_offsets_needed
= 0;
563 midx_name
= get_midx_filename(object_dir
);
564 if (safe_create_leading_directories(midx_name
)) {
566 die_errno(_("unable to create leading directories of %s"),
571 packs
.alloc_list
= 16;
572 packs
.alloc_names
= 16;
574 packs
.pack_name_concat_len
= 0;
575 ALLOC_ARRAY(packs
.list
, packs
.alloc_list
);
576 ALLOC_ARRAY(packs
.names
, packs
.alloc_names
);
578 for_each_file_in_pack_dir(object_dir
, add_pack_to_midx
, &packs
);
580 if (packs
.pack_name_concat_len
% MIDX_CHUNK_ALIGNMENT
)
581 packs
.pack_name_concat_len
+= MIDX_CHUNK_ALIGNMENT
-
582 (packs
.pack_name_concat_len
% MIDX_CHUNK_ALIGNMENT
);
584 ALLOC_ARRAY(pack_perm
, packs
.nr
);
585 sort_packs_by_name(packs
.names
, packs
.nr
, pack_perm
);
587 entries
= get_sorted_entries(packs
.list
, pack_perm
, packs
.nr
, &nr_entries
);
588 for (i
= 0; i
< nr_entries
; i
++) {
589 if (entries
[i
].offset
> 0x7fffffff)
591 if (entries
[i
].offset
> 0xffffffff)
592 large_offsets_needed
= 1;
595 hold_lock_file_for_update(&lk
, midx_name
, LOCK_DIE_ON_ERROR
);
596 f
= hashfd(lk
.tempfile
->fd
, lk
.tempfile
->filename
.buf
);
597 FREE_AND_NULL(midx_name
);
600 num_chunks
= large_offsets_needed
? 5 : 4;
602 written
= write_midx_header(f
, num_chunks
, packs
.nr
);
604 chunk_ids
[cur_chunk
] = MIDX_CHUNKID_PACKNAMES
;
605 chunk_offsets
[cur_chunk
] = written
+ (num_chunks
+ 1) * MIDX_CHUNKLOOKUP_WIDTH
;
608 chunk_ids
[cur_chunk
] = MIDX_CHUNKID_OIDFANOUT
;
609 chunk_offsets
[cur_chunk
] = chunk_offsets
[cur_chunk
- 1] + packs
.pack_name_concat_len
;
612 chunk_ids
[cur_chunk
] = MIDX_CHUNKID_OIDLOOKUP
;
613 chunk_offsets
[cur_chunk
] = chunk_offsets
[cur_chunk
- 1] + MIDX_CHUNK_FANOUT_SIZE
;
616 chunk_ids
[cur_chunk
] = MIDX_CHUNKID_OBJECTOFFSETS
;
617 chunk_offsets
[cur_chunk
] = chunk_offsets
[cur_chunk
- 1] + nr_entries
* MIDX_HASH_LEN
;
620 chunk_offsets
[cur_chunk
] = chunk_offsets
[cur_chunk
- 1] + nr_entries
* MIDX_CHUNK_OFFSET_WIDTH
;
621 if (large_offsets_needed
) {
622 chunk_ids
[cur_chunk
] = MIDX_CHUNKID_LARGEOFFSETS
;
625 chunk_offsets
[cur_chunk
] = chunk_offsets
[cur_chunk
- 1] +
626 num_large_offsets
* MIDX_CHUNK_LARGE_OFFSET_WIDTH
;
629 chunk_ids
[cur_chunk
] = 0;
631 for (i
= 0; i
<= num_chunks
; i
++) {
632 if (i
&& chunk_offsets
[i
] < chunk_offsets
[i
- 1])
633 BUG("incorrect chunk offsets: %"PRIu64
" before %"PRIu64
,
634 chunk_offsets
[i
- 1],
637 if (chunk_offsets
[i
] % MIDX_CHUNK_ALIGNMENT
)
638 BUG("chunk offset %"PRIu64
" is not properly aligned",
641 hashwrite_be32(f
, chunk_ids
[i
]);
642 hashwrite_be32(f
, chunk_offsets
[i
] >> 32);
643 hashwrite_be32(f
, chunk_offsets
[i
]);
645 written
+= MIDX_CHUNKLOOKUP_WIDTH
;
648 for (i
= 0; i
< num_chunks
; i
++) {
649 if (written
!= chunk_offsets
[i
])
650 BUG("incorrect chunk offset (%"PRIu64
" != %"PRIu64
") for chunk id %"PRIx32
,
655 switch (chunk_ids
[i
]) {
656 case MIDX_CHUNKID_PACKNAMES
:
657 written
+= write_midx_pack_names(f
, packs
.names
, packs
.nr
);
660 case MIDX_CHUNKID_OIDFANOUT
:
661 written
+= write_midx_oid_fanout(f
, entries
, nr_entries
);
664 case MIDX_CHUNKID_OIDLOOKUP
:
665 written
+= write_midx_oid_lookup(f
, MIDX_HASH_LEN
, entries
, nr_entries
);
668 case MIDX_CHUNKID_OBJECTOFFSETS
:
669 written
+= write_midx_object_offsets(f
, large_offsets_needed
, entries
, nr_entries
);
672 case MIDX_CHUNKID_LARGEOFFSETS
:
673 written
+= write_midx_large_offsets(f
, num_large_offsets
, entries
, nr_entries
);
677 BUG("trying to write unknown chunk id %"PRIx32
,
682 if (written
!= chunk_offsets
[num_chunks
])
683 BUG("incorrect final offset %"PRIu64
" != %"PRIu64
,
685 chunk_offsets
[num_chunks
]);
687 finalize_hashfile(f
, NULL
, CSUM_FSYNC
| CSUM_HASH_IN_STREAM
);
688 commit_lock_file(&lk
);
690 for (i
= 0; i
< packs
.nr
; i
++) {
692 close_pack(packs
.list
[i
]);
695 free(packs
.names
[i
]);