7 #include "object-store.h"
8 #include "sha1-lookup.h"
12 #include "run-command.h"
14 #define MIDX_SIGNATURE 0x4d494458 /* "MIDX" */
15 #define MIDX_VERSION 1
16 #define MIDX_BYTE_FILE_VERSION 4
17 #define MIDX_BYTE_HASH_VERSION 5
18 #define MIDX_BYTE_NUM_CHUNKS 6
19 #define MIDX_BYTE_NUM_PACKS 8
20 #define MIDX_HASH_VERSION 1
21 #define MIDX_HEADER_SIZE 12
22 #define MIDX_MIN_SIZE (MIDX_HEADER_SIZE + the_hash_algo->rawsz)
24 #define MIDX_MAX_CHUNKS 5
25 #define MIDX_CHUNK_ALIGNMENT 4
26 #define MIDX_CHUNKID_PACKNAMES 0x504e414d /* "PNAM" */
27 #define MIDX_CHUNKID_OIDFANOUT 0x4f494446 /* "OIDF" */
28 #define MIDX_CHUNKID_OIDLOOKUP 0x4f49444c /* "OIDL" */
29 #define MIDX_CHUNKID_OBJECTOFFSETS 0x4f4f4646 /* "OOFF" */
30 #define MIDX_CHUNKID_LARGEOFFSETS 0x4c4f4646 /* "LOFF" */
31 #define MIDX_CHUNKLOOKUP_WIDTH (sizeof(uint32_t) + sizeof(uint64_t))
32 #define MIDX_CHUNK_FANOUT_SIZE (sizeof(uint32_t) * 256)
33 #define MIDX_CHUNK_OFFSET_WIDTH (2 * sizeof(uint32_t))
34 #define MIDX_CHUNK_LARGE_OFFSET_WIDTH (sizeof(uint64_t))
35 #define MIDX_LARGE_OFFSET_NEEDED 0x80000000
37 #define PACK_EXPIRED UINT_MAX
39 static char *get_midx_filename(const char *object_dir
)
41 return xstrfmt("%s/pack/multi-pack-index", object_dir
);
44 struct multi_pack_index
*load_multi_pack_index(const char *object_dir
, int local
)
46 struct multi_pack_index
*m
= NULL
;
50 void *midx_map
= NULL
;
51 uint32_t hash_version
;
52 char *midx_name
= get_midx_filename(object_dir
);
54 const char *cur_pack_name
;
56 fd
= git_open(midx_name
);
61 error_errno(_("failed to read %s"), midx_name
);
65 midx_size
= xsize_t(st
.st_size
);
67 if (midx_size
< MIDX_MIN_SIZE
) {
68 error(_("multi-pack-index file %s is too small"), midx_name
);
72 FREE_AND_NULL(midx_name
);
74 midx_map
= xmmap(NULL
, midx_size
, PROT_READ
, MAP_PRIVATE
, fd
, 0);
76 FLEX_ALLOC_STR(m
, object_dir
, object_dir
);
79 m
->data_len
= midx_size
;
82 m
->signature
= get_be32(m
->data
);
83 if (m
->signature
!= MIDX_SIGNATURE
)
84 die(_("multi-pack-index signature 0x%08x does not match signature 0x%08x"),
85 m
->signature
, MIDX_SIGNATURE
);
87 m
->version
= m
->data
[MIDX_BYTE_FILE_VERSION
];
88 if (m
->version
!= MIDX_VERSION
)
89 die(_("multi-pack-index version %d not recognized"),
92 hash_version
= m
->data
[MIDX_BYTE_HASH_VERSION
];
93 if (hash_version
!= MIDX_HASH_VERSION
)
94 die(_("hash version %u does not match"), hash_version
);
95 m
->hash_len
= the_hash_algo
->rawsz
;
97 m
->num_chunks
= m
->data
[MIDX_BYTE_NUM_CHUNKS
];
99 m
->num_packs
= get_be32(m
->data
+ MIDX_BYTE_NUM_PACKS
);
101 for (i
= 0; i
< m
->num_chunks
; i
++) {
102 uint32_t chunk_id
= get_be32(m
->data
+ MIDX_HEADER_SIZE
+
103 MIDX_CHUNKLOOKUP_WIDTH
* i
);
104 uint64_t chunk_offset
= get_be64(m
->data
+ MIDX_HEADER_SIZE
+ 4 +
105 MIDX_CHUNKLOOKUP_WIDTH
* i
);
107 if (chunk_offset
>= m
->data_len
)
108 die(_("invalid chunk offset (too large)"));
111 case MIDX_CHUNKID_PACKNAMES
:
112 m
->chunk_pack_names
= m
->data
+ chunk_offset
;
115 case MIDX_CHUNKID_OIDFANOUT
:
116 m
->chunk_oid_fanout
= (uint32_t *)(m
->data
+ chunk_offset
);
119 case MIDX_CHUNKID_OIDLOOKUP
:
120 m
->chunk_oid_lookup
= m
->data
+ chunk_offset
;
123 case MIDX_CHUNKID_OBJECTOFFSETS
:
124 m
->chunk_object_offsets
= m
->data
+ chunk_offset
;
127 case MIDX_CHUNKID_LARGEOFFSETS
:
128 m
->chunk_large_offsets
= m
->data
+ chunk_offset
;
132 die(_("terminating multi-pack-index chunk id appears earlier than expected"));
137 * Do nothing on unrecognized chunks, allowing future
138 * extensions to add optional chunks.
144 if (!m
->chunk_pack_names
)
145 die(_("multi-pack-index missing required pack-name chunk"));
146 if (!m
->chunk_oid_fanout
)
147 die(_("multi-pack-index missing required OID fanout chunk"));
148 if (!m
->chunk_oid_lookup
)
149 die(_("multi-pack-index missing required OID lookup chunk"));
150 if (!m
->chunk_object_offsets
)
151 die(_("multi-pack-index missing required object offsets chunk"));
153 m
->num_objects
= ntohl(m
->chunk_oid_fanout
[255]);
155 m
->pack_names
= xcalloc(m
->num_packs
, sizeof(*m
->pack_names
));
156 m
->packs
= xcalloc(m
->num_packs
, sizeof(*m
->packs
));
158 cur_pack_name
= (const char *)m
->chunk_pack_names
;
159 for (i
= 0; i
< m
->num_packs
; i
++) {
160 m
->pack_names
[i
] = cur_pack_name
;
162 cur_pack_name
+= strlen(cur_pack_name
) + 1;
164 if (i
&& strcmp(m
->pack_names
[i
], m
->pack_names
[i
- 1]) <= 0)
165 die(_("multi-pack-index pack names out of order: '%s' before '%s'"),
166 m
->pack_names
[i
- 1],
170 trace2_data_intmax("midx", the_repository
, "load/num_packs", m
->num_packs
);
171 trace2_data_intmax("midx", the_repository
, "load/num_objects", m
->num_objects
);
179 munmap(midx_map
, midx_size
);
185 void close_midx(struct multi_pack_index
*m
)
192 munmap((unsigned char *)m
->data
, m
->data_len
);
196 for (i
= 0; i
< m
->num_packs
; i
++) {
198 m
->packs
[i
]->multi_pack_index
= 0;
200 FREE_AND_NULL(m
->packs
);
201 FREE_AND_NULL(m
->pack_names
);
204 int prepare_midx_pack(struct repository
*r
, struct multi_pack_index
*m
, uint32_t pack_int_id
)
206 struct strbuf pack_name
= STRBUF_INIT
;
207 struct packed_git
*p
;
209 if (pack_int_id
>= m
->num_packs
)
210 die(_("bad pack-int-id: %u (%u total packs)"),
211 pack_int_id
, m
->num_packs
);
213 if (m
->packs
[pack_int_id
])
216 strbuf_addf(&pack_name
, "%s/pack/%s", m
->object_dir
,
217 m
->pack_names
[pack_int_id
]);
219 p
= add_packed_git(pack_name
.buf
, pack_name
.len
, m
->local
);
220 strbuf_release(&pack_name
);
225 p
->multi_pack_index
= 1;
226 m
->packs
[pack_int_id
] = p
;
227 install_packed_git(r
, p
);
228 list_add_tail(&p
->mru
, &r
->objects
->packed_git_mru
);
233 int bsearch_midx(const struct object_id
*oid
, struct multi_pack_index
*m
, uint32_t *result
)
235 return bsearch_hash(oid
->hash
, m
->chunk_oid_fanout
, m
->chunk_oid_lookup
,
236 the_hash_algo
->rawsz
, result
);
239 struct object_id
*nth_midxed_object_oid(struct object_id
*oid
,
240 struct multi_pack_index
*m
,
243 if (n
>= m
->num_objects
)
246 hashcpy(oid
->hash
, m
->chunk_oid_lookup
+ m
->hash_len
* n
);
250 static off_t
nth_midxed_offset(struct multi_pack_index
*m
, uint32_t pos
)
252 const unsigned char *offset_data
;
255 offset_data
= m
->chunk_object_offsets
+ pos
* MIDX_CHUNK_OFFSET_WIDTH
;
256 offset32
= get_be32(offset_data
+ sizeof(uint32_t));
258 if (m
->chunk_large_offsets
&& offset32
& MIDX_LARGE_OFFSET_NEEDED
) {
259 if (sizeof(off_t
) < sizeof(uint64_t))
260 die(_("multi-pack-index stores a 64-bit offset, but off_t is too small"));
262 offset32
^= MIDX_LARGE_OFFSET_NEEDED
;
263 return get_be64(m
->chunk_large_offsets
+ sizeof(uint64_t) * offset32
);
269 static uint32_t nth_midxed_pack_int_id(struct multi_pack_index
*m
, uint32_t pos
)
271 return get_be32(m
->chunk_object_offsets
+ pos
* MIDX_CHUNK_OFFSET_WIDTH
);
274 static int nth_midxed_pack_entry(struct repository
*r
,
275 struct multi_pack_index
*m
,
276 struct pack_entry
*e
,
279 uint32_t pack_int_id
;
280 struct packed_git
*p
;
282 if (pos
>= m
->num_objects
)
285 pack_int_id
= nth_midxed_pack_int_id(m
, pos
);
287 if (prepare_midx_pack(r
, m
, pack_int_id
))
288 die(_("error preparing packfile from multi-pack-index"));
289 p
= m
->packs
[pack_int_id
];
292 * We are about to tell the caller where they can locate the
293 * requested object. We better make sure the packfile is
294 * still here and can be accessed before supplying that
295 * answer, as it may have been deleted since the MIDX was
298 if (!is_pack_valid(p
))
301 if (p
->num_bad_objects
) {
303 struct object_id oid
;
304 nth_midxed_object_oid(&oid
, m
, pos
);
305 for (i
= 0; i
< p
->num_bad_objects
; i
++)
307 p
->bad_object_sha1
+ the_hash_algo
->rawsz
* i
))
311 e
->offset
= nth_midxed_offset(m
, pos
);
317 int fill_midx_entry(struct repository
* r
,
318 const struct object_id
*oid
,
319 struct pack_entry
*e
,
320 struct multi_pack_index
*m
)
324 if (!bsearch_midx(oid
, m
, &pos
))
327 return nth_midxed_pack_entry(r
, m
, e
, pos
);
330 /* Match "foo.idx" against either "foo.pack" _or_ "foo.idx". */
331 static int cmp_idx_or_pack_name(const char *idx_or_pack_name
,
332 const char *idx_name
)
334 /* Skip past any initial matching prefix. */
335 while (*idx_name
&& *idx_name
== *idx_or_pack_name
) {
341 * If we didn't match completely, we may have matched "pack-1234." and
342 * be left with "idx" and "pack" respectively, which is also OK. We do
343 * not have to check for "idx" and "idx", because that would have been
344 * a complete match (and in that case these strcmps will be false, but
345 * we'll correctly return 0 from the final strcmp() below.
347 * Technically this matches "fooidx" and "foopack", but we'd never have
348 * such names in the first place.
350 if (!strcmp(idx_name
, "idx") && !strcmp(idx_or_pack_name
, "pack"))
354 * This not only checks for a complete match, but also orders based on
355 * the first non-identical character, which means our ordering will
356 * match a raw strcmp(). That makes it OK to use this to binary search
357 * a naively-sorted list.
359 return strcmp(idx_or_pack_name
, idx_name
);
362 int midx_contains_pack(struct multi_pack_index
*m
, const char *idx_or_pack_name
)
364 uint32_t first
= 0, last
= m
->num_packs
;
366 while (first
< last
) {
367 uint32_t mid
= first
+ (last
- first
) / 2;
371 current
= m
->pack_names
[mid
];
372 cmp
= cmp_idx_or_pack_name(idx_or_pack_name
, current
);
385 int prepare_multi_pack_index_one(struct repository
*r
, const char *object_dir
, int local
)
387 struct multi_pack_index
*m
;
388 struct multi_pack_index
*m_search
;
390 static int env_value
= -1;
393 env_value
= git_env_bool(GIT_TEST_MULTI_PACK_INDEX
, 0);
396 (repo_config_get_bool(r
, "core.multipackindex", &config_value
) ||
400 for (m_search
= r
->objects
->multi_pack_index
; m_search
; m_search
= m_search
->next
)
401 if (!strcmp(object_dir
, m_search
->object_dir
))
404 m
= load_multi_pack_index(object_dir
, local
);
407 m
->next
= r
->objects
->multi_pack_index
;
408 r
->objects
->multi_pack_index
= m
;
415 static size_t write_midx_header(struct hashfile
*f
,
416 unsigned char num_chunks
,
419 unsigned char byte_values
[4];
421 hashwrite_be32(f
, MIDX_SIGNATURE
);
422 byte_values
[0] = MIDX_VERSION
;
423 byte_values
[1] = MIDX_HASH_VERSION
;
424 byte_values
[2] = num_chunks
;
425 byte_values
[3] = 0; /* unused */
426 hashwrite(f
, byte_values
, sizeof(byte_values
));
427 hashwrite_be32(f
, num_packs
);
429 return MIDX_HEADER_SIZE
;
433 uint32_t orig_pack_int_id
;
435 struct packed_git
*p
;
436 unsigned expired
: 1;
439 static int pack_info_compare(const void *_a
, const void *_b
)
441 struct pack_info
*a
= (struct pack_info
*)_a
;
442 struct pack_info
*b
= (struct pack_info
*)_b
;
443 return strcmp(a
->pack_name
, b
->pack_name
);
447 struct pack_info
*info
;
450 struct multi_pack_index
*m
;
451 struct progress
*progress
;
452 unsigned pack_paths_checked
;
455 static void add_pack_to_midx(const char *full_path
, size_t full_path_len
,
456 const char *file_name
, void *data
)
458 struct pack_list
*packs
= (struct pack_list
*)data
;
460 if (ends_with(file_name
, ".idx")) {
461 display_progress(packs
->progress
, ++packs
->pack_paths_checked
);
462 if (packs
->m
&& midx_contains_pack(packs
->m
, file_name
))
465 ALLOC_GROW(packs
->info
, packs
->nr
+ 1, packs
->alloc
);
467 packs
->info
[packs
->nr
].p
= add_packed_git(full_path
,
471 if (!packs
->info
[packs
->nr
].p
) {
472 warning(_("failed to add packfile '%s'"),
477 if (open_pack_index(packs
->info
[packs
->nr
].p
)) {
478 warning(_("failed to open pack-index '%s'"),
480 close_pack(packs
->info
[packs
->nr
].p
);
481 FREE_AND_NULL(packs
->info
[packs
->nr
].p
);
485 packs
->info
[packs
->nr
].pack_name
= xstrdup(file_name
);
486 packs
->info
[packs
->nr
].orig_pack_int_id
= packs
->nr
;
487 packs
->info
[packs
->nr
].expired
= 0;
492 struct pack_midx_entry
{
493 struct object_id oid
;
494 uint32_t pack_int_id
;
499 static int midx_oid_compare(const void *_a
, const void *_b
)
501 const struct pack_midx_entry
*a
= (const struct pack_midx_entry
*)_a
;
502 const struct pack_midx_entry
*b
= (const struct pack_midx_entry
*)_b
;
503 int cmp
= oidcmp(&a
->oid
, &b
->oid
);
508 if (a
->pack_mtime
> b
->pack_mtime
)
510 else if (a
->pack_mtime
< b
->pack_mtime
)
513 return a
->pack_int_id
- b
->pack_int_id
;
516 static int nth_midxed_pack_midx_entry(struct multi_pack_index
*m
,
517 struct pack_midx_entry
*e
,
520 if (pos
>= m
->num_objects
)
523 nth_midxed_object_oid(&e
->oid
, m
, pos
);
524 e
->pack_int_id
= nth_midxed_pack_int_id(m
, pos
);
525 e
->offset
= nth_midxed_offset(m
, pos
);
527 /* consider objects in midx to be from "old" packs */
532 static void fill_pack_entry(uint32_t pack_int_id
,
533 struct packed_git
*p
,
535 struct pack_midx_entry
*entry
)
537 if (!nth_packed_object_oid(&entry
->oid
, p
, cur_object
))
538 die(_("failed to locate object %d in packfile"), cur_object
);
540 entry
->pack_int_id
= pack_int_id
;
541 entry
->pack_mtime
= p
->mtime
;
543 entry
->offset
= nth_packed_object_offset(p
, cur_object
);
547 * It is possible to artificially get into a state where there are many
548 * duplicate copies of objects. That can create high memory pressure if
549 * we are to create a list of all objects before de-duplication. To reduce
550 * this memory pressure without a significant performance drop, automatically
551 * group objects by the first byte of their object id. Use the IDX fanout
552 * tables to group the data, copy to a local array, then sort.
554 * Copy only the de-duplicated entries (selected by most-recent modified time
555 * of a packfile containing the object).
557 static struct pack_midx_entry
*get_sorted_entries(struct multi_pack_index
*m
,
558 struct pack_info
*info
,
560 uint32_t *nr_objects
)
562 uint32_t cur_fanout
, cur_pack
, cur_object
;
563 uint32_t alloc_fanout
, alloc_objects
, total_objects
= 0;
564 struct pack_midx_entry
*entries_by_fanout
= NULL
;
565 struct pack_midx_entry
*deduplicated_entries
= NULL
;
566 uint32_t start_pack
= m
? m
->num_packs
: 0;
568 for (cur_pack
= start_pack
; cur_pack
< nr_packs
; cur_pack
++)
569 total_objects
+= info
[cur_pack
].p
->num_objects
;
572 * As we de-duplicate by fanout value, we expect the fanout
573 * slices to be evenly distributed, with some noise. Hence,
574 * allocate slightly more than one 256th.
576 alloc_objects
= alloc_fanout
= total_objects
> 3200 ? total_objects
/ 200 : 16;
578 ALLOC_ARRAY(entries_by_fanout
, alloc_fanout
);
579 ALLOC_ARRAY(deduplicated_entries
, alloc_objects
);
582 for (cur_fanout
= 0; cur_fanout
< 256; cur_fanout
++) {
583 uint32_t nr_fanout
= 0;
586 uint32_t start
= 0, end
;
589 start
= ntohl(m
->chunk_oid_fanout
[cur_fanout
- 1]);
590 end
= ntohl(m
->chunk_oid_fanout
[cur_fanout
]);
592 for (cur_object
= start
; cur_object
< end
; cur_object
++) {
593 ALLOC_GROW(entries_by_fanout
, nr_fanout
+ 1, alloc_fanout
);
594 nth_midxed_pack_midx_entry(m
,
595 &entries_by_fanout
[nr_fanout
],
601 for (cur_pack
= start_pack
; cur_pack
< nr_packs
; cur_pack
++) {
602 uint32_t start
= 0, end
;
605 start
= get_pack_fanout(info
[cur_pack
].p
, cur_fanout
- 1);
606 end
= get_pack_fanout(info
[cur_pack
].p
, cur_fanout
);
608 for (cur_object
= start
; cur_object
< end
; cur_object
++) {
609 ALLOC_GROW(entries_by_fanout
, nr_fanout
+ 1, alloc_fanout
);
610 fill_pack_entry(cur_pack
, info
[cur_pack
].p
, cur_object
, &entries_by_fanout
[nr_fanout
]);
615 QSORT(entries_by_fanout
, nr_fanout
, midx_oid_compare
);
618 * The batch is now sorted by OID and then mtime (descending).
619 * Take only the first duplicate.
621 for (cur_object
= 0; cur_object
< nr_fanout
; cur_object
++) {
622 if (cur_object
&& oideq(&entries_by_fanout
[cur_object
- 1].oid
,
623 &entries_by_fanout
[cur_object
].oid
))
626 ALLOC_GROW(deduplicated_entries
, *nr_objects
+ 1, alloc_objects
);
627 memcpy(&deduplicated_entries
[*nr_objects
],
628 &entries_by_fanout
[cur_object
],
629 sizeof(struct pack_midx_entry
));
634 free(entries_by_fanout
);
635 return deduplicated_entries
;
638 static size_t write_midx_pack_names(struct hashfile
*f
,
639 struct pack_info
*info
,
643 unsigned char padding
[MIDX_CHUNK_ALIGNMENT
];
646 for (i
= 0; i
< num_packs
; i
++) {
652 if (i
&& strcmp(info
[i
].pack_name
, info
[i
- 1].pack_name
) <= 0)
653 BUG("incorrect pack-file order: %s before %s",
654 info
[i
- 1].pack_name
,
657 writelen
= strlen(info
[i
].pack_name
) + 1;
658 hashwrite(f
, info
[i
].pack_name
, writelen
);
662 /* add padding to be aligned */
663 i
= MIDX_CHUNK_ALIGNMENT
- (written
% MIDX_CHUNK_ALIGNMENT
);
664 if (i
< MIDX_CHUNK_ALIGNMENT
) {
665 memset(padding
, 0, sizeof(padding
));
666 hashwrite(f
, padding
, i
);
673 static size_t write_midx_oid_fanout(struct hashfile
*f
,
674 struct pack_midx_entry
*objects
,
677 struct pack_midx_entry
*list
= objects
;
678 struct pack_midx_entry
*last
= objects
+ nr_objects
;
683 * Write the first-level table (the list is sorted,
684 * but we use a 256-entry lookup to be able to avoid
685 * having to do eight extra binary search iterations).
687 for (i
= 0; i
< 256; i
++) {
688 struct pack_midx_entry
*next
= list
;
690 while (next
< last
&& next
->oid
.hash
[0] == i
) {
695 hashwrite_be32(f
, count
);
699 return MIDX_CHUNK_FANOUT_SIZE
;
702 static size_t write_midx_oid_lookup(struct hashfile
*f
, unsigned char hash_len
,
703 struct pack_midx_entry
*objects
,
706 struct pack_midx_entry
*list
= objects
;
710 for (i
= 0; i
< nr_objects
; i
++) {
711 struct pack_midx_entry
*obj
= list
++;
713 if (i
< nr_objects
- 1) {
714 struct pack_midx_entry
*next
= list
;
715 if (oidcmp(&obj
->oid
, &next
->oid
) >= 0)
716 BUG("OIDs not in order: %s >= %s",
717 oid_to_hex(&obj
->oid
),
718 oid_to_hex(&next
->oid
));
721 hashwrite(f
, obj
->oid
.hash
, (int)hash_len
);
728 static size_t write_midx_object_offsets(struct hashfile
*f
, int large_offset_needed
,
730 struct pack_midx_entry
*objects
, uint32_t nr_objects
)
732 struct pack_midx_entry
*list
= objects
;
733 uint32_t i
, nr_large_offset
= 0;
736 for (i
= 0; i
< nr_objects
; i
++) {
737 struct pack_midx_entry
*obj
= list
++;
739 if (perm
[obj
->pack_int_id
] == PACK_EXPIRED
)
740 BUG("object %s is in an expired pack with int-id %d",
741 oid_to_hex(&obj
->oid
),
744 hashwrite_be32(f
, perm
[obj
->pack_int_id
]);
746 if (large_offset_needed
&& obj
->offset
>> 31)
747 hashwrite_be32(f
, MIDX_LARGE_OFFSET_NEEDED
| nr_large_offset
++);
748 else if (!large_offset_needed
&& obj
->offset
>> 32)
749 BUG("object %s requires a large offset (%"PRIx64
") but the MIDX is not writing large offsets!",
750 oid_to_hex(&obj
->oid
),
753 hashwrite_be32(f
, (uint32_t)obj
->offset
);
755 written
+= MIDX_CHUNK_OFFSET_WIDTH
;
761 static size_t write_midx_large_offsets(struct hashfile
*f
, uint32_t nr_large_offset
,
762 struct pack_midx_entry
*objects
, uint32_t nr_objects
)
764 struct pack_midx_entry
*list
= objects
, *end
= objects
+ nr_objects
;
767 while (nr_large_offset
) {
768 struct pack_midx_entry
*obj
;
772 BUG("too many large-offset objects");
775 offset
= obj
->offset
;
780 hashwrite_be32(f
, offset
>> 32);
781 hashwrite_be32(f
, offset
& 0xffffffffUL
);
782 written
+= 2 * sizeof(uint32_t);
790 static int write_midx_internal(const char *object_dir
, struct multi_pack_index
*m
,
791 struct string_list
*packs_to_drop
, unsigned flags
)
793 unsigned char cur_chunk
, num_chunks
= 0;
796 struct hashfile
*f
= NULL
;
798 struct pack_list packs
;
799 uint32_t *pack_perm
= NULL
;
800 uint64_t written
= 0;
801 uint32_t chunk_ids
[MIDX_MAX_CHUNKS
+ 1];
802 uint64_t chunk_offsets
[MIDX_MAX_CHUNKS
+ 1];
803 uint32_t nr_entries
, num_large_offsets
= 0;
804 struct pack_midx_entry
*entries
= NULL
;
805 struct progress
*progress
= NULL
;
806 int large_offsets_needed
= 0;
807 int pack_name_concat_len
= 0;
808 int dropped_packs
= 0;
811 midx_name
= get_midx_filename(object_dir
);
812 if (safe_create_leading_directories(midx_name
)) {
814 die_errno(_("unable to create leading directories of %s"),
821 packs
.m
= load_multi_pack_index(object_dir
, 1);
824 packs
.alloc
= packs
.m
? packs
.m
->num_packs
: 16;
826 ALLOC_ARRAY(packs
.info
, packs
.alloc
);
829 for (i
= 0; i
< packs
.m
->num_packs
; i
++) {
830 ALLOC_GROW(packs
.info
, packs
.nr
+ 1, packs
.alloc
);
832 packs
.info
[packs
.nr
].orig_pack_int_id
= i
;
833 packs
.info
[packs
.nr
].pack_name
= xstrdup(packs
.m
->pack_names
[i
]);
834 packs
.info
[packs
.nr
].p
= NULL
;
835 packs
.info
[packs
.nr
].expired
= 0;
840 packs
.pack_paths_checked
= 0;
841 if (flags
& MIDX_PROGRESS
)
842 packs
.progress
= start_progress(_("Adding packfiles to multi-pack-index"), 0);
844 packs
.progress
= NULL
;
846 for_each_file_in_pack_dir(object_dir
, add_pack_to_midx
, &packs
);
847 stop_progress(&packs
.progress
);
849 if (packs
.m
&& packs
.nr
== packs
.m
->num_packs
&& !packs_to_drop
)
852 entries
= get_sorted_entries(packs
.m
, packs
.info
, packs
.nr
, &nr_entries
);
854 for (i
= 0; i
< nr_entries
; i
++) {
855 if (entries
[i
].offset
> 0x7fffffff)
857 if (entries
[i
].offset
> 0xffffffff)
858 large_offsets_needed
= 1;
861 QSORT(packs
.info
, packs
.nr
, pack_info_compare
);
863 if (packs_to_drop
&& packs_to_drop
->nr
) {
865 int missing_drops
= 0;
867 for (i
= 0; i
< packs
.nr
&& drop_index
< packs_to_drop
->nr
; i
++) {
868 int cmp
= strcmp(packs
.info
[i
].pack_name
,
869 packs_to_drop
->items
[drop_index
].string
);
873 packs
.info
[i
].expired
= 1;
874 } else if (cmp
> 0) {
875 error(_("did not see pack-file %s to drop"),
876 packs_to_drop
->items
[drop_index
].string
);
881 packs
.info
[i
].expired
= 0;
892 * pack_perm stores a permutation between pack-int-ids from the
893 * previous multi-pack-index to the new one we are writing:
895 * pack_perm[old_id] = new_id
897 ALLOC_ARRAY(pack_perm
, packs
.nr
);
898 for (i
= 0; i
< packs
.nr
; i
++) {
899 if (packs
.info
[i
].expired
) {
901 pack_perm
[packs
.info
[i
].orig_pack_int_id
] = PACK_EXPIRED
;
903 pack_perm
[packs
.info
[i
].orig_pack_int_id
] = i
- dropped_packs
;
907 for (i
= 0; i
< packs
.nr
; i
++) {
908 if (!packs
.info
[i
].expired
)
909 pack_name_concat_len
+= strlen(packs
.info
[i
].pack_name
) + 1;
912 if (pack_name_concat_len
% MIDX_CHUNK_ALIGNMENT
)
913 pack_name_concat_len
+= MIDX_CHUNK_ALIGNMENT
-
914 (pack_name_concat_len
% MIDX_CHUNK_ALIGNMENT
);
916 hold_lock_file_for_update(&lk
, midx_name
, LOCK_DIE_ON_ERROR
);
917 f
= hashfd(lk
.tempfile
->fd
, lk
.tempfile
->filename
.buf
);
918 FREE_AND_NULL(midx_name
);
924 num_chunks
= large_offsets_needed
? 5 : 4;
926 written
= write_midx_header(f
, num_chunks
, packs
.nr
- dropped_packs
);
928 chunk_ids
[cur_chunk
] = MIDX_CHUNKID_PACKNAMES
;
929 chunk_offsets
[cur_chunk
] = written
+ (num_chunks
+ 1) * MIDX_CHUNKLOOKUP_WIDTH
;
932 chunk_ids
[cur_chunk
] = MIDX_CHUNKID_OIDFANOUT
;
933 chunk_offsets
[cur_chunk
] = chunk_offsets
[cur_chunk
- 1] + pack_name_concat_len
;
936 chunk_ids
[cur_chunk
] = MIDX_CHUNKID_OIDLOOKUP
;
937 chunk_offsets
[cur_chunk
] = chunk_offsets
[cur_chunk
- 1] + MIDX_CHUNK_FANOUT_SIZE
;
940 chunk_ids
[cur_chunk
] = MIDX_CHUNKID_OBJECTOFFSETS
;
941 chunk_offsets
[cur_chunk
] = chunk_offsets
[cur_chunk
- 1] + nr_entries
* the_hash_algo
->rawsz
;
944 chunk_offsets
[cur_chunk
] = chunk_offsets
[cur_chunk
- 1] + nr_entries
* MIDX_CHUNK_OFFSET_WIDTH
;
945 if (large_offsets_needed
) {
946 chunk_ids
[cur_chunk
] = MIDX_CHUNKID_LARGEOFFSETS
;
949 chunk_offsets
[cur_chunk
] = chunk_offsets
[cur_chunk
- 1] +
950 num_large_offsets
* MIDX_CHUNK_LARGE_OFFSET_WIDTH
;
953 chunk_ids
[cur_chunk
] = 0;
955 for (i
= 0; i
<= num_chunks
; i
++) {
956 if (i
&& chunk_offsets
[i
] < chunk_offsets
[i
- 1])
957 BUG("incorrect chunk offsets: %"PRIu64
" before %"PRIu64
,
958 chunk_offsets
[i
- 1],
961 if (chunk_offsets
[i
] % MIDX_CHUNK_ALIGNMENT
)
962 BUG("chunk offset %"PRIu64
" is not properly aligned",
965 hashwrite_be32(f
, chunk_ids
[i
]);
966 hashwrite_be32(f
, chunk_offsets
[i
] >> 32);
967 hashwrite_be32(f
, chunk_offsets
[i
]);
969 written
+= MIDX_CHUNKLOOKUP_WIDTH
;
972 if (flags
& MIDX_PROGRESS
)
973 progress
= start_progress(_("Writing chunks to multi-pack-index"),
975 for (i
= 0; i
< num_chunks
; i
++) {
976 if (written
!= chunk_offsets
[i
])
977 BUG("incorrect chunk offset (%"PRIu64
" != %"PRIu64
") for chunk id %"PRIx32
,
982 switch (chunk_ids
[i
]) {
983 case MIDX_CHUNKID_PACKNAMES
:
984 written
+= write_midx_pack_names(f
, packs
.info
, packs
.nr
);
987 case MIDX_CHUNKID_OIDFANOUT
:
988 written
+= write_midx_oid_fanout(f
, entries
, nr_entries
);
991 case MIDX_CHUNKID_OIDLOOKUP
:
992 written
+= write_midx_oid_lookup(f
, the_hash_algo
->rawsz
, entries
, nr_entries
);
995 case MIDX_CHUNKID_OBJECTOFFSETS
:
996 written
+= write_midx_object_offsets(f
, large_offsets_needed
, pack_perm
, entries
, nr_entries
);
999 case MIDX_CHUNKID_LARGEOFFSETS
:
1000 written
+= write_midx_large_offsets(f
, num_large_offsets
, entries
, nr_entries
);
1004 BUG("trying to write unknown chunk id %"PRIx32
,
1008 display_progress(progress
, i
+ 1);
1010 stop_progress(&progress
);
1012 if (written
!= chunk_offsets
[num_chunks
])
1013 BUG("incorrect final offset %"PRIu64
" != %"PRIu64
,
1015 chunk_offsets
[num_chunks
]);
1017 finalize_hashfile(f
, NULL
, CSUM_FSYNC
| CSUM_HASH_IN_STREAM
);
1018 commit_lock_file(&lk
);
1021 for (i
= 0; i
< packs
.nr
; i
++) {
1022 if (packs
.info
[i
].p
) {
1023 close_pack(packs
.info
[i
].p
);
1024 free(packs
.info
[i
].p
);
1026 free(packs
.info
[i
].pack_name
);
1036 int write_midx_file(const char *object_dir
, unsigned flags
)
1038 return write_midx_internal(object_dir
, NULL
, NULL
, flags
);
1041 void clear_midx_file(struct repository
*r
)
1043 char *midx
= get_midx_filename(r
->objects
->odb
->path
);
1045 if (r
->objects
&& r
->objects
->multi_pack_index
) {
1046 close_midx(r
->objects
->multi_pack_index
);
1047 r
->objects
->multi_pack_index
= NULL
;
1050 if (remove_path(midx
)) {
1052 die(_("failed to clear multi-pack-index at %s"), midx
);
1058 static int verify_midx_error
;
1060 static void midx_report(const char *fmt
, ...)
1063 verify_midx_error
= 1;
1065 vfprintf(stderr
, fmt
, ap
);
1066 fprintf(stderr
, "\n");
1070 struct pair_pos_vs_id
1073 uint32_t pack_int_id
;
1076 static int compare_pair_pos_vs_id(const void *_a
, const void *_b
)
1078 struct pair_pos_vs_id
*a
= (struct pair_pos_vs_id
*)_a
;
1079 struct pair_pos_vs_id
*b
= (struct pair_pos_vs_id
*)_b
;
1081 return b
->pack_int_id
- a
->pack_int_id
;
1085 * Limit calls to display_progress() for performance reasons.
1086 * The interval here was arbitrarily chosen.
1088 #define SPARSE_PROGRESS_INTERVAL (1 << 12)
1089 #define midx_display_sparse_progress(progress, n) \
1091 uint64_t _n = (n); \
1092 if ((_n & (SPARSE_PROGRESS_INTERVAL - 1)) == 0) \
1093 display_progress(progress, _n); \
1096 int verify_midx_file(struct repository
*r
, const char *object_dir
, unsigned flags
)
1098 struct pair_pos_vs_id
*pairs
= NULL
;
1100 struct progress
*progress
;
1101 struct multi_pack_index
*m
= load_multi_pack_index(object_dir
, 1);
1102 verify_midx_error
= 0;
1107 progress
= start_progress(_("Looking for referenced packfiles"),
1109 for (i
= 0; i
< m
->num_packs
; i
++) {
1110 if (prepare_midx_pack(r
, m
, i
))
1111 midx_report("failed to load pack in position %d", i
);
1113 display_progress(progress
, i
+ 1);
1115 stop_progress(&progress
);
1117 for (i
= 0; i
< 255; i
++) {
1118 uint32_t oid_fanout1
= ntohl(m
->chunk_oid_fanout
[i
]);
1119 uint32_t oid_fanout2
= ntohl(m
->chunk_oid_fanout
[i
+ 1]);
1121 if (oid_fanout1
> oid_fanout2
)
1122 midx_report(_("oid fanout out of order: fanout[%d] = %"PRIx32
" > %"PRIx32
" = fanout[%d]"),
1123 i
, oid_fanout1
, oid_fanout2
, i
+ 1);
1126 progress
= start_sparse_progress(_("Verifying OID order in MIDX"),
1127 m
->num_objects
- 1);
1128 for (i
= 0; i
< m
->num_objects
- 1; i
++) {
1129 struct object_id oid1
, oid2
;
1131 nth_midxed_object_oid(&oid1
, m
, i
);
1132 nth_midxed_object_oid(&oid2
, m
, i
+ 1);
1134 if (oidcmp(&oid1
, &oid2
) >= 0)
1135 midx_report(_("oid lookup out of order: oid[%d] = %s >= %s = oid[%d]"),
1136 i
, oid_to_hex(&oid1
), oid_to_hex(&oid2
), i
+ 1);
1138 midx_display_sparse_progress(progress
, i
+ 1);
1140 stop_progress(&progress
);
1143 * Create an array mapping each object to its packfile id. Sort it
1144 * to group the objects by packfile. Use this permutation to visit
1145 * each of the objects and only require 1 packfile to be open at a
1148 ALLOC_ARRAY(pairs
, m
->num_objects
);
1149 for (i
= 0; i
< m
->num_objects
; i
++) {
1151 pairs
[i
].pack_int_id
= nth_midxed_pack_int_id(m
, i
);
1154 progress
= start_sparse_progress(_("Sorting objects by packfile"),
1156 display_progress(progress
, 0); /* TODO: Measure QSORT() progress */
1157 QSORT(pairs
, m
->num_objects
, compare_pair_pos_vs_id
);
1158 stop_progress(&progress
);
1160 progress
= start_sparse_progress(_("Verifying object offsets"), m
->num_objects
);
1161 for (i
= 0; i
< m
->num_objects
; i
++) {
1162 struct object_id oid
;
1163 struct pack_entry e
;
1164 off_t m_offset
, p_offset
;
1166 if (i
> 0 && pairs
[i
-1].pack_int_id
!= pairs
[i
].pack_int_id
&&
1167 m
->packs
[pairs
[i
-1].pack_int_id
])
1169 close_pack_fd(m
->packs
[pairs
[i
-1].pack_int_id
]);
1170 close_pack_index(m
->packs
[pairs
[i
-1].pack_int_id
]);
1173 nth_midxed_object_oid(&oid
, m
, pairs
[i
].pos
);
1175 if (!fill_midx_entry(r
, &oid
, &e
, m
)) {
1176 midx_report(_("failed to load pack entry for oid[%d] = %s"),
1177 pairs
[i
].pos
, oid_to_hex(&oid
));
1181 if (open_pack_index(e
.p
)) {
1182 midx_report(_("failed to load pack-index for packfile %s"),
1187 m_offset
= e
.offset
;
1188 p_offset
= find_pack_entry_one(oid
.hash
, e
.p
);
1190 if (m_offset
!= p_offset
)
1191 midx_report(_("incorrect object offset for oid[%d] = %s: %"PRIx64
" != %"PRIx64
),
1192 pairs
[i
].pos
, oid_to_hex(&oid
), m_offset
, p_offset
);
1194 midx_display_sparse_progress(progress
, i
+ 1);
1196 stop_progress(&progress
);
1200 return verify_midx_error
;
1203 int expire_midx_packs(struct repository
*r
, const char *object_dir
, unsigned flags
)
1205 uint32_t i
, *count
, result
= 0;
1206 struct string_list packs_to_drop
= STRING_LIST_INIT_DUP
;
1207 struct multi_pack_index
*m
= load_multi_pack_index(object_dir
, 1);
1208 struct progress
*progress
= NULL
;
1213 count
= xcalloc(m
->num_packs
, sizeof(uint32_t));
1215 if (flags
& MIDX_PROGRESS
)
1216 progress
= start_progress(_("Counting referenced objects"),
1218 for (i
= 0; i
< m
->num_objects
; i
++) {
1219 int pack_int_id
= nth_midxed_pack_int_id(m
, i
);
1220 count
[pack_int_id
]++;
1221 display_progress(progress
, i
+ 1);
1223 stop_progress(&progress
);
1225 if (flags
& MIDX_PROGRESS
)
1226 progress
= start_progress(_("Finding and deleting unreferenced packfiles"),
1228 for (i
= 0; i
< m
->num_packs
; i
++) {
1230 display_progress(progress
, i
+ 1);
1235 if (prepare_midx_pack(r
, m
, i
))
1238 if (m
->packs
[i
]->pack_keep
)
1241 pack_name
= xstrdup(m
->packs
[i
]->pack_name
);
1242 close_pack(m
->packs
[i
]);
1244 string_list_insert(&packs_to_drop
, m
->pack_names
[i
]);
1245 unlink_pack_path(pack_name
, 0);
1248 stop_progress(&progress
);
1252 if (packs_to_drop
.nr
)
1253 result
= write_midx_internal(object_dir
, m
, &packs_to_drop
, flags
);
1255 string_list_clear(&packs_to_drop
, 0);
1259 struct repack_info
{
1261 uint32_t referenced_objects
;
1262 uint32_t pack_int_id
;
1265 static int compare_by_mtime(const void *a_
, const void *b_
)
1267 const struct repack_info
*a
, *b
;
1269 a
= (const struct repack_info
*)a_
;
1270 b
= (const struct repack_info
*)b_
;
1272 if (a
->mtime
< b
->mtime
)
1274 if (a
->mtime
> b
->mtime
)
1279 static int fill_included_packs_all(struct multi_pack_index
*m
,
1280 unsigned char *include_pack
)
1284 for (i
= 0; i
< m
->num_packs
; i
++)
1285 include_pack
[i
] = 1;
1287 return m
->num_packs
< 2;
1290 static int fill_included_packs_batch(struct repository
*r
,
1291 struct multi_pack_index
*m
,
1292 unsigned char *include_pack
,
1295 uint32_t i
, packs_to_repack
;
1297 struct repack_info
*pack_info
= xcalloc(m
->num_packs
, sizeof(struct repack_info
));
1299 for (i
= 0; i
< m
->num_packs
; i
++) {
1300 pack_info
[i
].pack_int_id
= i
;
1302 if (prepare_midx_pack(r
, m
, i
))
1305 pack_info
[i
].mtime
= m
->packs
[i
]->mtime
;
1308 for (i
= 0; batch_size
&& i
< m
->num_objects
; i
++) {
1309 uint32_t pack_int_id
= nth_midxed_pack_int_id(m
, i
);
1310 pack_info
[pack_int_id
].referenced_objects
++;
1313 QSORT(pack_info
, m
->num_packs
, compare_by_mtime
);
1316 packs_to_repack
= 0;
1317 for (i
= 0; total_size
< batch_size
&& i
< m
->num_packs
; i
++) {
1318 int pack_int_id
= pack_info
[i
].pack_int_id
;
1319 struct packed_git
*p
= m
->packs
[pack_int_id
];
1320 size_t expected_size
;
1324 if (open_pack_index(p
) || !p
->num_objects
)
1327 expected_size
= (size_t)(p
->pack_size
1328 * pack_info
[i
].referenced_objects
);
1329 expected_size
/= p
->num_objects
;
1331 if (expected_size
>= batch_size
)
1335 total_size
+= expected_size
;
1336 include_pack
[pack_int_id
] = 1;
1341 if (total_size
< batch_size
|| packs_to_repack
< 2)
1347 int midx_repack(struct repository
*r
, const char *object_dir
, size_t batch_size
, unsigned flags
)
1351 unsigned char *include_pack
;
1352 struct child_process cmd
= CHILD_PROCESS_INIT
;
1353 struct strbuf base_name
= STRBUF_INIT
;
1354 struct multi_pack_index
*m
= load_multi_pack_index(object_dir
, 1);
1359 include_pack
= xcalloc(m
->num_packs
, sizeof(unsigned char));
1362 if (fill_included_packs_batch(r
, m
, include_pack
, batch_size
))
1364 } else if (fill_included_packs_all(m
, include_pack
))
1367 argv_array_push(&cmd
.args
, "pack-objects");
1369 strbuf_addstr(&base_name
, object_dir
);
1370 strbuf_addstr(&base_name
, "/pack/pack");
1371 argv_array_push(&cmd
.args
, base_name
.buf
);
1372 strbuf_release(&base_name
);
1375 cmd
.in
= cmd
.out
= -1;
1377 if (start_command(&cmd
)) {
1378 error(_("could not start pack-objects"));
1383 for (i
= 0; i
< m
->num_objects
; i
++) {
1384 struct object_id oid
;
1385 uint32_t pack_int_id
= nth_midxed_pack_int_id(m
, i
);
1387 if (!include_pack
[pack_int_id
])
1390 nth_midxed_object_oid(&oid
, m
, i
);
1391 xwrite(cmd
.in
, oid_to_hex(&oid
), the_hash_algo
->hexsz
);
1392 xwrite(cmd
.in
, "\n", 1);
1396 if (finish_command(&cmd
)) {
1397 error(_("could not finish pack-objects"));
1402 result
= write_midx_internal(object_dir
, m
, NULL
, flags
);