7 #include "object-store.h"
8 #include "sha1-lookup.h"
11 #define MIDX_SIGNATURE 0x4d494458 /* "MIDX" */
12 #define MIDX_VERSION 1
13 #define MIDX_BYTE_FILE_VERSION 4
14 #define MIDX_BYTE_HASH_VERSION 5
15 #define MIDX_BYTE_NUM_CHUNKS 6
16 #define MIDX_BYTE_NUM_PACKS 8
17 #define MIDX_HASH_VERSION 1
18 #define MIDX_HEADER_SIZE 12
19 #define MIDX_HASH_LEN 20
20 #define MIDX_MIN_SIZE (MIDX_HEADER_SIZE + MIDX_HASH_LEN)
22 #define MIDX_MAX_CHUNKS 5
23 #define MIDX_CHUNK_ALIGNMENT 4
24 #define MIDX_CHUNKID_PACKNAMES 0x504e414d /* "PNAM" */
25 #define MIDX_CHUNKID_OIDFANOUT 0x4f494446 /* "OIDF" */
26 #define MIDX_CHUNKID_OIDLOOKUP 0x4f49444c /* "OIDL" */
27 #define MIDX_CHUNKID_OBJECTOFFSETS 0x4f4f4646 /* "OOFF" */
28 #define MIDX_CHUNKID_LARGEOFFSETS 0x4c4f4646 /* "LOFF" */
29 #define MIDX_CHUNKLOOKUP_WIDTH (sizeof(uint32_t) + sizeof(uint64_t))
30 #define MIDX_CHUNK_FANOUT_SIZE (sizeof(uint32_t) * 256)
31 #define MIDX_CHUNK_OFFSET_WIDTH (2 * sizeof(uint32_t))
32 #define MIDX_CHUNK_LARGE_OFFSET_WIDTH (sizeof(uint64_t))
33 #define MIDX_LARGE_OFFSET_NEEDED 0x80000000
35 static char *get_midx_filename(const char *object_dir
)
37 return xstrfmt("%s/pack/multi-pack-index", object_dir
);
40 struct multi_pack_index
*load_multi_pack_index(const char *object_dir
, int local
)
42 struct multi_pack_index
*m
= NULL
;
46 void *midx_map
= NULL
;
47 uint32_t hash_version
;
48 char *midx_name
= get_midx_filename(object_dir
);
50 const char *cur_pack_name
;
52 fd
= git_open(midx_name
);
57 error_errno(_("failed to read %s"), midx_name
);
61 midx_size
= xsize_t(st
.st_size
);
63 if (midx_size
< MIDX_MIN_SIZE
) {
64 error(_("multi-pack-index file %s is too small"), midx_name
);
68 FREE_AND_NULL(midx_name
);
70 midx_map
= xmmap(NULL
, midx_size
, PROT_READ
, MAP_PRIVATE
, fd
, 0);
72 FLEX_ALLOC_MEM(m
, object_dir
, object_dir
, strlen(object_dir
));
75 m
->data_len
= midx_size
;
78 m
->signature
= get_be32(m
->data
);
79 if (m
->signature
!= MIDX_SIGNATURE
) {
80 error(_("multi-pack-index signature 0x%08x does not match signature 0x%08x"),
81 m
->signature
, MIDX_SIGNATURE
);
85 m
->version
= m
->data
[MIDX_BYTE_FILE_VERSION
];
86 if (m
->version
!= MIDX_VERSION
) {
87 error(_("multi-pack-index version %d not recognized"),
92 hash_version
= m
->data
[MIDX_BYTE_HASH_VERSION
];
93 if (hash_version
!= MIDX_HASH_VERSION
) {
94 error(_("hash version %u does not match"), hash_version
);
97 m
->hash_len
= MIDX_HASH_LEN
;
99 m
->num_chunks
= m
->data
[MIDX_BYTE_NUM_CHUNKS
];
101 m
->num_packs
= get_be32(m
->data
+ MIDX_BYTE_NUM_PACKS
);
103 for (i
= 0; i
< m
->num_chunks
; i
++) {
104 uint32_t chunk_id
= get_be32(m
->data
+ MIDX_HEADER_SIZE
+
105 MIDX_CHUNKLOOKUP_WIDTH
* i
);
106 uint64_t chunk_offset
= get_be64(m
->data
+ MIDX_HEADER_SIZE
+ 4 +
107 MIDX_CHUNKLOOKUP_WIDTH
* i
);
110 case MIDX_CHUNKID_PACKNAMES
:
111 m
->chunk_pack_names
= m
->data
+ chunk_offset
;
114 case MIDX_CHUNKID_OIDFANOUT
:
115 m
->chunk_oid_fanout
= (uint32_t *)(m
->data
+ chunk_offset
);
118 case MIDX_CHUNKID_OIDLOOKUP
:
119 m
->chunk_oid_lookup
= m
->data
+ chunk_offset
;
122 case MIDX_CHUNKID_OBJECTOFFSETS
:
123 m
->chunk_object_offsets
= m
->data
+ chunk_offset
;
126 case MIDX_CHUNKID_LARGEOFFSETS
:
127 m
->chunk_large_offsets
= m
->data
+ chunk_offset
;
131 die(_("terminating multi-pack-index chunk id appears earlier than expected"));
136 * Do nothing on unrecognized chunks, allowing future
137 * extensions to add optional chunks.
143 if (!m
->chunk_pack_names
)
144 die(_("multi-pack-index missing required pack-name chunk"));
145 if (!m
->chunk_oid_fanout
)
146 die(_("multi-pack-index missing required OID fanout chunk"));
147 if (!m
->chunk_oid_lookup
)
148 die(_("multi-pack-index missing required OID lookup chunk"));
149 if (!m
->chunk_object_offsets
)
150 die(_("multi-pack-index missing required object offsets chunk"));
152 m
->num_objects
= ntohl(m
->chunk_oid_fanout
[255]);
154 m
->pack_names
= xcalloc(m
->num_packs
, sizeof(*m
->pack_names
));
155 m
->packs
= xcalloc(m
->num_packs
, sizeof(*m
->packs
));
157 cur_pack_name
= (const char *)m
->chunk_pack_names
;
158 for (i
= 0; i
< m
->num_packs
; i
++) {
159 m
->pack_names
[i
] = cur_pack_name
;
161 cur_pack_name
+= strlen(cur_pack_name
) + 1;
163 if (i
&& strcmp(m
->pack_names
[i
], m
->pack_names
[i
- 1]) <= 0) {
164 error(_("multi-pack-index pack names out of order: '%s' before '%s'"),
165 m
->pack_names
[i
- 1],
177 munmap(midx_map
, midx_size
);
183 static void close_midx(struct multi_pack_index
*m
)
186 munmap((unsigned char *)m
->data
, m
->data_len
);
190 for (i
= 0; i
< m
->num_packs
; i
++) {
192 close_pack(m
->packs
[i
]);
196 FREE_AND_NULL(m
->packs
);
197 FREE_AND_NULL(m
->pack_names
);
200 static int prepare_midx_pack(struct multi_pack_index
*m
, uint32_t pack_int_id
)
202 struct strbuf pack_name
= STRBUF_INIT
;
204 if (pack_int_id
>= m
->num_packs
)
205 BUG("bad pack-int-id");
207 if (m
->packs
[pack_int_id
])
210 strbuf_addf(&pack_name
, "%s/pack/%s", m
->object_dir
,
211 m
->pack_names
[pack_int_id
]);
213 m
->packs
[pack_int_id
] = add_packed_git(pack_name
.buf
, pack_name
.len
, m
->local
);
214 strbuf_release(&pack_name
);
215 return !m
->packs
[pack_int_id
];
218 int bsearch_midx(const struct object_id
*oid
, struct multi_pack_index
*m
, uint32_t *result
)
220 return bsearch_hash(oid
->hash
, m
->chunk_oid_fanout
, m
->chunk_oid_lookup
,
221 MIDX_HASH_LEN
, result
);
224 struct object_id
*nth_midxed_object_oid(struct object_id
*oid
,
225 struct multi_pack_index
*m
,
228 if (n
>= m
->num_objects
)
231 hashcpy(oid
->hash
, m
->chunk_oid_lookup
+ m
->hash_len
* n
);
235 static off_t
nth_midxed_offset(struct multi_pack_index
*m
, uint32_t pos
)
237 const unsigned char *offset_data
;
240 offset_data
= m
->chunk_object_offsets
+ pos
* MIDX_CHUNK_OFFSET_WIDTH
;
241 offset32
= get_be32(offset_data
+ sizeof(uint32_t));
243 if (m
->chunk_large_offsets
&& offset32
& MIDX_LARGE_OFFSET_NEEDED
) {
244 if (sizeof(offset32
) < sizeof(uint64_t))
245 die(_("multi-pack-index stores a 64-bit offset, but off_t is too small"));
247 offset32
^= MIDX_LARGE_OFFSET_NEEDED
;
248 return get_be64(m
->chunk_large_offsets
+ sizeof(uint64_t) * offset32
);
254 static uint32_t nth_midxed_pack_int_id(struct multi_pack_index
*m
, uint32_t pos
)
256 return get_be32(m
->chunk_object_offsets
+ pos
* MIDX_CHUNK_OFFSET_WIDTH
);
259 static int nth_midxed_pack_entry(struct multi_pack_index
*m
, struct pack_entry
*e
, uint32_t pos
)
261 uint32_t pack_int_id
;
262 struct packed_git
*p
;
264 if (pos
>= m
->num_objects
)
267 pack_int_id
= nth_midxed_pack_int_id(m
, pos
);
269 if (prepare_midx_pack(m
, pack_int_id
))
270 die(_("error preparing packfile from multi-pack-index"));
271 p
= m
->packs
[pack_int_id
];
274 * We are about to tell the caller where they can locate the
275 * requested object. We better make sure the packfile is
276 * still here and can be accessed before supplying that
277 * answer, as it may have been deleted since the MIDX was
280 if (!is_pack_valid(p
))
283 if (p
->num_bad_objects
) {
285 struct object_id oid
;
286 nth_midxed_object_oid(&oid
, m
, pos
);
287 for (i
= 0; i
< p
->num_bad_objects
; i
++)
288 if (!hashcmp(oid
.hash
,
289 p
->bad_object_sha1
+ the_hash_algo
->rawsz
* i
))
293 e
->offset
= nth_midxed_offset(m
, pos
);
299 int fill_midx_entry(const struct object_id
*oid
, struct pack_entry
*e
, struct multi_pack_index
*m
)
303 if (!bsearch_midx(oid
, m
, &pos
))
306 return nth_midxed_pack_entry(m
, e
, pos
);
309 int midx_contains_pack(struct multi_pack_index
*m
, const char *idx_name
)
311 uint32_t first
= 0, last
= m
->num_packs
;
313 while (first
< last
) {
314 uint32_t mid
= first
+ (last
- first
) / 2;
318 current
= m
->pack_names
[mid
];
319 cmp
= strcmp(idx_name
, current
);
332 int prepare_multi_pack_index_one(struct repository
*r
, const char *object_dir
, int local
)
334 struct multi_pack_index
*m
= r
->objects
->multi_pack_index
;
335 struct multi_pack_index
*m_search
;
338 if (repo_config_get_bool(r
, "core.multipackindex", &config_value
) ||
342 for (m_search
= m
; m_search
; m_search
= m_search
->next
)
343 if (!strcmp(object_dir
, m_search
->object_dir
))
346 r
->objects
->multi_pack_index
= load_multi_pack_index(object_dir
, local
);
348 if (r
->objects
->multi_pack_index
) {
349 r
->objects
->multi_pack_index
->next
= m
;
356 static size_t write_midx_header(struct hashfile
*f
,
357 unsigned char num_chunks
,
360 unsigned char byte_values
[4];
362 hashwrite_be32(f
, MIDX_SIGNATURE
);
363 byte_values
[0] = MIDX_VERSION
;
364 byte_values
[1] = MIDX_HASH_VERSION
;
365 byte_values
[2] = num_chunks
;
366 byte_values
[3] = 0; /* unused */
367 hashwrite(f
, byte_values
, sizeof(byte_values
));
368 hashwrite_be32(f
, num_packs
);
370 return MIDX_HEADER_SIZE
;
374 struct packed_git
**list
;
378 uint32_t alloc_names
;
379 size_t pack_name_concat_len
;
380 struct multi_pack_index
*m
;
383 static void add_pack_to_midx(const char *full_path
, size_t full_path_len
,
384 const char *file_name
, void *data
)
386 struct pack_list
*packs
= (struct pack_list
*)data
;
388 if (ends_with(file_name
, ".idx")) {
389 if (packs
->m
&& midx_contains_pack(packs
->m
, file_name
))
392 ALLOC_GROW(packs
->list
, packs
->nr
+ 1, packs
->alloc_list
);
393 ALLOC_GROW(packs
->names
, packs
->nr
+ 1, packs
->alloc_names
);
395 packs
->list
[packs
->nr
] = add_packed_git(full_path
,
399 if (!packs
->list
[packs
->nr
]) {
400 warning(_("failed to add packfile '%s'"),
405 if (open_pack_index(packs
->list
[packs
->nr
])) {
406 warning(_("failed to open pack-index '%s'"),
408 close_pack(packs
->list
[packs
->nr
]);
409 FREE_AND_NULL(packs
->list
[packs
->nr
]);
413 packs
->names
[packs
->nr
] = xstrdup(file_name
);
414 packs
->pack_name_concat_len
+= strlen(file_name
) + 1;
420 uint32_t pack_int_id
;
424 static int pack_pair_compare(const void *_a
, const void *_b
)
426 struct pack_pair
*a
= (struct pack_pair
*)_a
;
427 struct pack_pair
*b
= (struct pack_pair
*)_b
;
428 return strcmp(a
->pack_name
, b
->pack_name
);
431 static void sort_packs_by_name(char **pack_names
, uint32_t nr_packs
, uint32_t *perm
)
434 struct pack_pair
*pairs
;
436 ALLOC_ARRAY(pairs
, nr_packs
);
438 for (i
= 0; i
< nr_packs
; i
++) {
439 pairs
[i
].pack_int_id
= i
;
440 pairs
[i
].pack_name
= pack_names
[i
];
443 QSORT(pairs
, nr_packs
, pack_pair_compare
);
445 for (i
= 0; i
< nr_packs
; i
++) {
446 pack_names
[i
] = pairs
[i
].pack_name
;
447 perm
[pairs
[i
].pack_int_id
] = i
;
453 struct pack_midx_entry
{
454 struct object_id oid
;
455 uint32_t pack_int_id
;
460 static int midx_oid_compare(const void *_a
, const void *_b
)
462 const struct pack_midx_entry
*a
= (const struct pack_midx_entry
*)_a
;
463 const struct pack_midx_entry
*b
= (const struct pack_midx_entry
*)_b
;
464 int cmp
= oidcmp(&a
->oid
, &b
->oid
);
469 if (a
->pack_mtime
> b
->pack_mtime
)
471 else if (a
->pack_mtime
< b
->pack_mtime
)
474 return a
->pack_int_id
- b
->pack_int_id
;
477 static int nth_midxed_pack_midx_entry(struct multi_pack_index
*m
,
479 struct pack_midx_entry
*e
,
482 if (pos
>= m
->num_objects
)
485 nth_midxed_object_oid(&e
->oid
, m
, pos
);
486 e
->pack_int_id
= pack_perm
[nth_midxed_pack_int_id(m
, pos
)];
487 e
->offset
= nth_midxed_offset(m
, pos
);
489 /* consider objects in midx to be from "old" packs */
494 static void fill_pack_entry(uint32_t pack_int_id
,
495 struct packed_git
*p
,
497 struct pack_midx_entry
*entry
)
499 if (!nth_packed_object_oid(&entry
->oid
, p
, cur_object
))
500 die(_("failed to locate object %d in packfile"), cur_object
);
502 entry
->pack_int_id
= pack_int_id
;
503 entry
->pack_mtime
= p
->mtime
;
505 entry
->offset
= nth_packed_object_offset(p
, cur_object
);
509 * It is possible to artificially get into a state where there are many
510 * duplicate copies of objects. That can create high memory pressure if
511 * we are to create a list of all objects before de-duplication. To reduce
512 * this memory pressure without a significant performance drop, automatically
513 * group objects by the first byte of their object id. Use the IDX fanout
514 * tables to group the data, copy to a local array, then sort.
516 * Copy only the de-duplicated entries (selected by most-recent modified time
517 * of a packfile containing the object).
519 static struct pack_midx_entry
*get_sorted_entries(struct multi_pack_index
*m
,
520 struct packed_git
**p
,
523 uint32_t *nr_objects
)
525 uint32_t cur_fanout
, cur_pack
, cur_object
;
526 uint32_t alloc_fanout
, alloc_objects
, total_objects
= 0;
527 struct pack_midx_entry
*entries_by_fanout
= NULL
;
528 struct pack_midx_entry
*deduplicated_entries
= NULL
;
529 uint32_t start_pack
= m
? m
->num_packs
: 0;
531 for (cur_pack
= start_pack
; cur_pack
< nr_packs
; cur_pack
++)
532 total_objects
+= p
[cur_pack
]->num_objects
;
535 * As we de-duplicate by fanout value, we expect the fanout
536 * slices to be evenly distributed, with some noise. Hence,
537 * allocate slightly more than one 256th.
539 alloc_objects
= alloc_fanout
= total_objects
> 3200 ? total_objects
/ 200 : 16;
541 ALLOC_ARRAY(entries_by_fanout
, alloc_fanout
);
542 ALLOC_ARRAY(deduplicated_entries
, alloc_objects
);
545 for (cur_fanout
= 0; cur_fanout
< 256; cur_fanout
++) {
546 uint32_t nr_fanout
= 0;
549 uint32_t start
= 0, end
;
552 start
= ntohl(m
->chunk_oid_fanout
[cur_fanout
- 1]);
553 end
= ntohl(m
->chunk_oid_fanout
[cur_fanout
]);
555 for (cur_object
= start
; cur_object
< end
; cur_object
++) {
556 ALLOC_GROW(entries_by_fanout
, nr_fanout
+ 1, alloc_fanout
);
557 nth_midxed_pack_midx_entry(m
, perm
,
558 &entries_by_fanout
[nr_fanout
],
564 for (cur_pack
= start_pack
; cur_pack
< nr_packs
; cur_pack
++) {
565 uint32_t start
= 0, end
;
568 start
= get_pack_fanout(p
[cur_pack
], cur_fanout
- 1);
569 end
= get_pack_fanout(p
[cur_pack
], cur_fanout
);
571 for (cur_object
= start
; cur_object
< end
; cur_object
++) {
572 ALLOC_GROW(entries_by_fanout
, nr_fanout
+ 1, alloc_fanout
);
573 fill_pack_entry(perm
[cur_pack
], p
[cur_pack
], cur_object
, &entries_by_fanout
[nr_fanout
]);
578 QSORT(entries_by_fanout
, nr_fanout
, midx_oid_compare
);
581 * The batch is now sorted by OID and then mtime (descending).
582 * Take only the first duplicate.
584 for (cur_object
= 0; cur_object
< nr_fanout
; cur_object
++) {
585 if (cur_object
&& !oidcmp(&entries_by_fanout
[cur_object
- 1].oid
,
586 &entries_by_fanout
[cur_object
].oid
))
589 ALLOC_GROW(deduplicated_entries
, *nr_objects
+ 1, alloc_objects
);
590 memcpy(&deduplicated_entries
[*nr_objects
],
591 &entries_by_fanout
[cur_object
],
592 sizeof(struct pack_midx_entry
));
597 free(entries_by_fanout
);
598 return deduplicated_entries
;
601 static size_t write_midx_pack_names(struct hashfile
*f
,
606 unsigned char padding
[MIDX_CHUNK_ALIGNMENT
];
609 for (i
= 0; i
< num_packs
; i
++) {
610 size_t writelen
= strlen(pack_names
[i
]) + 1;
612 if (i
&& strcmp(pack_names
[i
], pack_names
[i
- 1]) <= 0)
613 BUG("incorrect pack-file order: %s before %s",
617 hashwrite(f
, pack_names
[i
], writelen
);
621 /* add padding to be aligned */
622 i
= MIDX_CHUNK_ALIGNMENT
- (written
% MIDX_CHUNK_ALIGNMENT
);
623 if (i
< MIDX_CHUNK_ALIGNMENT
) {
624 memset(padding
, 0, sizeof(padding
));
625 hashwrite(f
, padding
, i
);
632 static size_t write_midx_oid_fanout(struct hashfile
*f
,
633 struct pack_midx_entry
*objects
,
636 struct pack_midx_entry
*list
= objects
;
637 struct pack_midx_entry
*last
= objects
+ nr_objects
;
642 * Write the first-level table (the list is sorted,
643 * but we use a 256-entry lookup to be able to avoid
644 * having to do eight extra binary search iterations).
646 for (i
= 0; i
< 256; i
++) {
647 struct pack_midx_entry
*next
= list
;
649 while (next
< last
&& next
->oid
.hash
[0] == i
) {
654 hashwrite_be32(f
, count
);
658 return MIDX_CHUNK_FANOUT_SIZE
;
661 static size_t write_midx_oid_lookup(struct hashfile
*f
, unsigned char hash_len
,
662 struct pack_midx_entry
*objects
,
665 struct pack_midx_entry
*list
= objects
;
669 for (i
= 0; i
< nr_objects
; i
++) {
670 struct pack_midx_entry
*obj
= list
++;
672 if (i
< nr_objects
- 1) {
673 struct pack_midx_entry
*next
= list
;
674 if (oidcmp(&obj
->oid
, &next
->oid
) >= 0)
675 BUG("OIDs not in order: %s >= %s",
676 oid_to_hex(&obj
->oid
),
677 oid_to_hex(&next
->oid
));
680 hashwrite(f
, obj
->oid
.hash
, (int)hash_len
);
687 static size_t write_midx_object_offsets(struct hashfile
*f
, int large_offset_needed
,
688 struct pack_midx_entry
*objects
, uint32_t nr_objects
)
690 struct pack_midx_entry
*list
= objects
;
691 uint32_t i
, nr_large_offset
= 0;
694 for (i
= 0; i
< nr_objects
; i
++) {
695 struct pack_midx_entry
*obj
= list
++;
697 hashwrite_be32(f
, obj
->pack_int_id
);
699 if (large_offset_needed
&& obj
->offset
>> 31)
700 hashwrite_be32(f
, MIDX_LARGE_OFFSET_NEEDED
| nr_large_offset
++);
701 else if (!large_offset_needed
&& obj
->offset
>> 32)
702 BUG("object %s requires a large offset (%"PRIx64
") but the MIDX is not writing large offsets!",
703 oid_to_hex(&obj
->oid
),
706 hashwrite_be32(f
, (uint32_t)obj
->offset
);
708 written
+= MIDX_CHUNK_OFFSET_WIDTH
;
714 static size_t write_midx_large_offsets(struct hashfile
*f
, uint32_t nr_large_offset
,
715 struct pack_midx_entry
*objects
, uint32_t nr_objects
)
717 struct pack_midx_entry
*list
= objects
;
720 while (nr_large_offset
) {
721 struct pack_midx_entry
*obj
= list
++;
722 uint64_t offset
= obj
->offset
;
727 hashwrite_be32(f
, offset
>> 32);
728 hashwrite_be32(f
, offset
& 0xffffffffUL
);
729 written
+= 2 * sizeof(uint32_t);
737 int write_midx_file(const char *object_dir
)
739 unsigned char cur_chunk
, num_chunks
= 0;
742 struct hashfile
*f
= NULL
;
744 struct pack_list packs
;
745 uint32_t *pack_perm
= NULL
;
746 uint64_t written
= 0;
747 uint32_t chunk_ids
[MIDX_MAX_CHUNKS
+ 1];
748 uint64_t chunk_offsets
[MIDX_MAX_CHUNKS
+ 1];
749 uint32_t nr_entries
, num_large_offsets
= 0;
750 struct pack_midx_entry
*entries
= NULL
;
751 int large_offsets_needed
= 0;
753 midx_name
= get_midx_filename(object_dir
);
754 if (safe_create_leading_directories(midx_name
)) {
756 die_errno(_("unable to create leading directories of %s"),
760 packs
.m
= load_multi_pack_index(object_dir
, 1);
763 packs
.alloc_list
= packs
.m
? packs
.m
->num_packs
: 16;
764 packs
.alloc_names
= packs
.alloc_list
;
767 packs
.pack_name_concat_len
= 0;
768 ALLOC_ARRAY(packs
.list
, packs
.alloc_list
);
769 ALLOC_ARRAY(packs
.names
, packs
.alloc_names
);
772 for (i
= 0; i
< packs
.m
->num_packs
; i
++) {
773 ALLOC_GROW(packs
.list
, packs
.nr
+ 1, packs
.alloc_list
);
774 ALLOC_GROW(packs
.names
, packs
.nr
+ 1, packs
.alloc_names
);
776 packs
.list
[packs
.nr
] = NULL
;
777 packs
.names
[packs
.nr
] = xstrdup(packs
.m
->pack_names
[i
]);
778 packs
.pack_name_concat_len
+= strlen(packs
.names
[packs
.nr
]) + 1;
783 for_each_file_in_pack_dir(object_dir
, add_pack_to_midx
, &packs
);
785 if (packs
.m
&& packs
.nr
== packs
.m
->num_packs
)
788 if (packs
.pack_name_concat_len
% MIDX_CHUNK_ALIGNMENT
)
789 packs
.pack_name_concat_len
+= MIDX_CHUNK_ALIGNMENT
-
790 (packs
.pack_name_concat_len
% MIDX_CHUNK_ALIGNMENT
);
792 ALLOC_ARRAY(pack_perm
, packs
.nr
);
793 sort_packs_by_name(packs
.names
, packs
.nr
, pack_perm
);
795 entries
= get_sorted_entries(packs
.m
, packs
.list
, pack_perm
, packs
.nr
, &nr_entries
);
797 for (i
= 0; i
< nr_entries
; i
++) {
798 if (entries
[i
].offset
> 0x7fffffff)
800 if (entries
[i
].offset
> 0xffffffff)
801 large_offsets_needed
= 1;
804 hold_lock_file_for_update(&lk
, midx_name
, LOCK_DIE_ON_ERROR
);
805 f
= hashfd(lk
.tempfile
->fd
, lk
.tempfile
->filename
.buf
);
806 FREE_AND_NULL(midx_name
);
812 num_chunks
= large_offsets_needed
? 5 : 4;
814 written
= write_midx_header(f
, num_chunks
, packs
.nr
);
816 chunk_ids
[cur_chunk
] = MIDX_CHUNKID_PACKNAMES
;
817 chunk_offsets
[cur_chunk
] = written
+ (num_chunks
+ 1) * MIDX_CHUNKLOOKUP_WIDTH
;
820 chunk_ids
[cur_chunk
] = MIDX_CHUNKID_OIDFANOUT
;
821 chunk_offsets
[cur_chunk
] = chunk_offsets
[cur_chunk
- 1] + packs
.pack_name_concat_len
;
824 chunk_ids
[cur_chunk
] = MIDX_CHUNKID_OIDLOOKUP
;
825 chunk_offsets
[cur_chunk
] = chunk_offsets
[cur_chunk
- 1] + MIDX_CHUNK_FANOUT_SIZE
;
828 chunk_ids
[cur_chunk
] = MIDX_CHUNKID_OBJECTOFFSETS
;
829 chunk_offsets
[cur_chunk
] = chunk_offsets
[cur_chunk
- 1] + nr_entries
* MIDX_HASH_LEN
;
832 chunk_offsets
[cur_chunk
] = chunk_offsets
[cur_chunk
- 1] + nr_entries
* MIDX_CHUNK_OFFSET_WIDTH
;
833 if (large_offsets_needed
) {
834 chunk_ids
[cur_chunk
] = MIDX_CHUNKID_LARGEOFFSETS
;
837 chunk_offsets
[cur_chunk
] = chunk_offsets
[cur_chunk
- 1] +
838 num_large_offsets
* MIDX_CHUNK_LARGE_OFFSET_WIDTH
;
841 chunk_ids
[cur_chunk
] = 0;
843 for (i
= 0; i
<= num_chunks
; i
++) {
844 if (i
&& chunk_offsets
[i
] < chunk_offsets
[i
- 1])
845 BUG("incorrect chunk offsets: %"PRIu64
" before %"PRIu64
,
846 chunk_offsets
[i
- 1],
849 if (chunk_offsets
[i
] % MIDX_CHUNK_ALIGNMENT
)
850 BUG("chunk offset %"PRIu64
" is not properly aligned",
853 hashwrite_be32(f
, chunk_ids
[i
]);
854 hashwrite_be32(f
, chunk_offsets
[i
] >> 32);
855 hashwrite_be32(f
, chunk_offsets
[i
]);
857 written
+= MIDX_CHUNKLOOKUP_WIDTH
;
860 for (i
= 0; i
< num_chunks
; i
++) {
861 if (written
!= chunk_offsets
[i
])
862 BUG("incorrect chunk offset (%"PRIu64
" != %"PRIu64
") for chunk id %"PRIx32
,
867 switch (chunk_ids
[i
]) {
868 case MIDX_CHUNKID_PACKNAMES
:
869 written
+= write_midx_pack_names(f
, packs
.names
, packs
.nr
);
872 case MIDX_CHUNKID_OIDFANOUT
:
873 written
+= write_midx_oid_fanout(f
, entries
, nr_entries
);
876 case MIDX_CHUNKID_OIDLOOKUP
:
877 written
+= write_midx_oid_lookup(f
, MIDX_HASH_LEN
, entries
, nr_entries
);
880 case MIDX_CHUNKID_OBJECTOFFSETS
:
881 written
+= write_midx_object_offsets(f
, large_offsets_needed
, entries
, nr_entries
);
884 case MIDX_CHUNKID_LARGEOFFSETS
:
885 written
+= write_midx_large_offsets(f
, num_large_offsets
, entries
, nr_entries
);
889 BUG("trying to write unknown chunk id %"PRIx32
,
894 if (written
!= chunk_offsets
[num_chunks
])
895 BUG("incorrect final offset %"PRIu64
" != %"PRIu64
,
897 chunk_offsets
[num_chunks
]);
899 finalize_hashfile(f
, NULL
, CSUM_FSYNC
| CSUM_HASH_IN_STREAM
);
900 commit_lock_file(&lk
);
903 for (i
= 0; i
< packs
.nr
; i
++) {
905 close_pack(packs
.list
[i
]);
908 free(packs
.names
[i
]);
919 void clear_midx_file(const char *object_dir
)
921 char *midx
= get_midx_filename(object_dir
);
923 if (remove_path(midx
)) {
925 die(_("failed to clear multi-pack-index at %s"), midx
);