10 static const char pack_usage
[] = "git-pack-objects [-q] [--no-reuse-delta] [--non-empty] [--local] [--incremental] [--window=N] [--depth=N] {--stdout | base-name} < object-list";
13 unsigned char sha1
[20];
14 unsigned long size
; /* uncompressed size */
15 unsigned long offset
; /* offset into the final pack file;
16 * nonzero if already written.
18 unsigned int depth
; /* delta depth */
19 unsigned int delta_limit
; /* base adjustment for in-pack delta */
20 unsigned int hash
; /* name hint hash */
21 enum object_type type
;
22 enum object_type in_pack_type
; /* could be delta */
23 unsigned long delta_size
; /* delta data size (uncompressed) */
24 struct object_entry
*delta
; /* delta base object */
25 struct packed_git
*in_pack
; /* already in pack */
26 unsigned int in_pack_offset
;
27 struct object_entry
*delta_child
; /* delitified objects who bases me */
28 struct object_entry
*delta_sibling
; /* other deltified objects who
29 * uses the same base as me
31 int preferred_base
; /* we do not pack this, but is encouraged to
32 * be used as the base objectto delta huge
35 int based_on_preferred
; /* current delta candidate is a preferred
36 * one, or delta against a preferred one.
41 * Objects we are going to pack are colected in objects array (dynamically
42 * expanded). nr_objects & nr_alloc controls this array. They are stored
43 * in the order we see -- typically rev-list --objects order that gives us
44 * nice "minimum seek" order.
46 * sorted-by-sha ans sorted-by-type are arrays of pointers that point at
47 * elements in the objects array. The former is used to build the pack
48 * index (lists object names in the ascending order to help offset lookup),
49 * and the latter is used to group similar things together by try_delta()
53 static unsigned char object_list_sha1
[20];
54 static int non_empty
= 0;
55 static int no_reuse_delta
= 0;
57 static int incremental
= 0;
58 static struct object_entry
**sorted_by_sha
, **sorted_by_type
;
59 static struct object_entry
*objects
= NULL
;
60 static int nr_objects
= 0, nr_alloc
= 0, nr_result
= 0;
61 static const char *base_name
;
62 static unsigned char pack_file_sha1
[20];
63 static int progress
= 1;
64 static volatile int progress_update
= 0;
67 * The object names in objects array are hashed with this hashtable,
68 * to help looking up the entry by object name. Binary search from
69 * sorted_by_sha is also possible but this was easier to code and faster.
70 * This hashtable is built after all the objects are seen.
72 static int *object_ix
= NULL
;
73 static int object_ix_hashsz
= 0;
76 * Pack index for existing packs give us easy access to the offsets into
77 * corresponding pack file where each object's data starts, but the entries
78 * do not store the size of the compressed representation (uncompressed
79 * size is easily available by examining the pack entry header). We build
80 * a hashtable of existing packs (pack_revindex), and keep reverse index
81 * here -- pack index file is sorted by object name mapping to offset; this
82 * pack_revindex[].revindex array is an ordered list of offsets, so if you
83 * know the offset of an object, next offset is where its packed
84 * representation ends.
86 struct pack_revindex
{
88 unsigned long *revindex
;
89 } *pack_revindex
= NULL
;
90 static int pack_revindex_hashsz
= 0;
95 static int written
= 0;
96 static int written_delta
= 0;
97 static int reused
= 0;
98 static int reused_delta
= 0;
100 static int pack_revindex_ix(struct packed_git
*p
)
102 unsigned long ui
= (unsigned long)(long)p
;
105 ui
= ui
^ (ui
>> 16); /* defeat structure alignment */
106 i
= (int)(ui
% pack_revindex_hashsz
);
107 while (pack_revindex
[i
].p
) {
108 if (pack_revindex
[i
].p
== p
)
110 if (++i
== pack_revindex_hashsz
)
116 static void prepare_pack_ix(void)
119 struct packed_git
*p
;
120 for (num
= 0, p
= packed_git
; p
; p
= p
->next
)
124 pack_revindex_hashsz
= num
* 11;
125 pack_revindex
= xcalloc(sizeof(*pack_revindex
), pack_revindex_hashsz
);
126 for (p
= packed_git
; p
; p
= p
->next
) {
127 num
= pack_revindex_ix(p
);
129 pack_revindex
[num
].p
= p
;
131 /* revindex elements are lazily initialized */
134 static int cmp_offset(const void *a_
, const void *b_
)
136 unsigned long a
= *(unsigned long *) a_
;
137 unsigned long b
= *(unsigned long *) b_
;
147 * Ordered list of offsets of objects in the pack.
149 static void prepare_pack_revindex(struct pack_revindex
*rix
)
151 struct packed_git
*p
= rix
->p
;
152 int num_ent
= num_packed_objects(p
);
154 void *index
= p
->index_base
+ 256;
156 rix
->revindex
= xmalloc(sizeof(unsigned long) * (num_ent
+ 1));
157 for (i
= 0; i
< num_ent
; i
++) {
158 long hl
= *((long *)(index
+ 24 * i
));
159 rix
->revindex
[i
] = ntohl(hl
);
161 /* This knows the pack format -- the 20-byte trailer
162 * follows immediately after the last object data.
164 rix
->revindex
[num_ent
] = p
->pack_size
- 20;
165 qsort(rix
->revindex
, num_ent
, sizeof(unsigned long), cmp_offset
);
168 static unsigned long find_packed_object_size(struct packed_git
*p
,
173 struct pack_revindex
*rix
;
174 unsigned long *revindex
;
175 num
= pack_revindex_ix(p
);
177 die("internal error: pack revindex uninitialized");
178 rix
= &pack_revindex
[num
];
180 prepare_pack_revindex(rix
);
181 revindex
= rix
->revindex
;
183 hi
= num_packed_objects(p
) + 1;
185 int mi
= (lo
+ hi
) / 2;
186 if (revindex
[mi
] == ofs
) {
187 return revindex
[mi
+1] - ofs
;
189 else if (ofs
< revindex
[mi
])
194 die("internal error: pack revindex corrupt");
197 static void *delta_against(void *buf
, unsigned long size
, struct object_entry
*entry
)
199 unsigned long othersize
, delta_size
;
201 void *otherbuf
= read_sha1_file(entry
->delta
->sha1
, type
, &othersize
);
205 die("unable to read %s", sha1_to_hex(entry
->delta
->sha1
));
206 delta_buf
= diff_delta(otherbuf
, othersize
,
207 buf
, size
, &delta_size
, 0);
208 if (!delta_buf
|| delta_size
!= entry
->delta_size
)
209 die("delta size changed");
216 * The per-object header is a pretty dense thing, which is
217 * - first byte: low four bits are "size", then three bits of "type",
218 * and the high bit is "size continues".
219 * - each byte afterwards: low seven bits are size continuation,
220 * with the high bit being "size continues"
222 static int encode_header(enum object_type type
, unsigned long size
, unsigned char *hdr
)
227 if (type
< OBJ_COMMIT
|| type
> OBJ_DELTA
)
228 die("bad type %d", type
);
230 c
= (type
<< 4) | (size
& 15);
242 static unsigned long write_object(struct sha1file
*f
,
243 struct object_entry
*entry
)
248 unsigned char header
[10];
249 unsigned hdrlen
, datalen
;
250 enum object_type obj_type
;
253 if (entry
->preferred_base
)
256 obj_type
= entry
->type
;
257 if (! entry
->in_pack
)
258 to_reuse
= 0; /* can't reuse what we don't have */
259 else if (obj_type
== OBJ_DELTA
)
260 to_reuse
= 1; /* check_object() decided it for us */
261 else if (obj_type
!= entry
->in_pack_type
)
262 to_reuse
= 0; /* pack has delta which is unusable */
263 else if (entry
->delta
)
264 to_reuse
= 0; /* we want to pack afresh */
266 to_reuse
= 1; /* we have it in-pack undeltified,
267 * and we do not need to deltify it.
271 buf
= read_sha1_file(entry
->sha1
, type
, &size
);
273 die("unable to read %s", sha1_to_hex(entry
->sha1
));
274 if (size
!= entry
->size
)
275 die("object %s size inconsistency (%lu vs %lu)",
276 sha1_to_hex(entry
->sha1
), size
, entry
->size
);
278 buf
= delta_against(buf
, size
, entry
);
279 size
= entry
->delta_size
;
280 obj_type
= OBJ_DELTA
;
283 * The object header is a byte of 'type' followed by zero or
284 * more bytes of length. For deltas, the 20 bytes of delta
287 hdrlen
= encode_header(obj_type
, size
, header
);
288 sha1write(f
, header
, hdrlen
);
291 sha1write(f
, entry
->delta
, 20);
294 datalen
= sha1write_compressed(f
, buf
, size
);
298 struct packed_git
*p
= entry
->in_pack
;
301 datalen
= find_packed_object_size(p
, entry
->in_pack_offset
);
302 buf
= p
->pack_base
+ entry
->in_pack_offset
;
303 sha1write(f
, buf
, datalen
);
305 hdrlen
= 0; /* not really */
306 if (obj_type
== OBJ_DELTA
)
310 if (obj_type
== OBJ_DELTA
)
313 return hdrlen
+ datalen
;
316 static unsigned long write_one(struct sha1file
*f
,
317 struct object_entry
*e
,
318 unsigned long offset
)
321 /* offset starts from header size and cannot be zero
322 * if it is written already.
326 offset
+= write_object(f
, e
);
327 /* if we are deltified, write out its base object. */
329 offset
= write_one(f
, e
->delta
, offset
);
333 static void write_pack_file(void)
337 unsigned long offset
;
338 struct pack_header hdr
;
339 unsigned last_percent
= 999;
343 f
= sha1fd(1, "<stdout>");
345 f
= sha1create("%s-%s.%s", base_name
,
346 sha1_to_hex(object_list_sha1
), "pack");
347 do_progress
= progress
;
350 fprintf(stderr
, "Writing %d objects.\n", nr_result
);
352 hdr
.hdr_signature
= htonl(PACK_SIGNATURE
);
353 hdr
.hdr_version
= htonl(PACK_VERSION
);
354 hdr
.hdr_entries
= htonl(nr_result
);
355 sha1write(f
, &hdr
, sizeof(hdr
));
356 offset
= sizeof(hdr
);
359 for (i
= 0; i
< nr_objects
; i
++) {
360 offset
= write_one(f
, objects
+ i
, offset
);
362 unsigned percent
= written
* 100 / nr_result
;
363 if (progress_update
|| percent
!= last_percent
) {
364 fprintf(stderr
, "%4u%% (%u/%u) done\r",
365 percent
, written
, nr_result
);
367 last_percent
= percent
;
374 sha1close(f
, pack_file_sha1
, 1);
377 static void write_index_file(void)
380 struct sha1file
*f
= sha1create("%s-%s.%s", base_name
,
381 sha1_to_hex(object_list_sha1
), "idx");
382 struct object_entry
**list
= sorted_by_sha
;
383 struct object_entry
**last
= list
+ nr_result
;
384 unsigned int array
[256];
387 * Write the first-level table (the list is sorted,
388 * but we use a 256-entry lookup to be able to avoid
389 * having to do eight extra binary search iterations).
391 for (i
= 0; i
< 256; i
++) {
392 struct object_entry
**next
= list
;
393 while (next
< last
) {
394 struct object_entry
*entry
= *next
;
395 if (entry
->sha1
[0] != i
)
399 array
[i
] = htonl(next
- sorted_by_sha
);
402 sha1write(f
, array
, 256 * sizeof(int));
405 * Write the actual SHA1 entries..
407 list
= sorted_by_sha
;
408 for (i
= 0; i
< nr_result
; i
++) {
409 struct object_entry
*entry
= *list
++;
410 unsigned int offset
= htonl(entry
->offset
);
411 sha1write(f
, &offset
, 4);
412 sha1write(f
, entry
->sha1
, 20);
414 sha1write(f
, pack_file_sha1
, 20);
415 sha1close(f
, NULL
, 1);
418 static int locate_object_entry_hash(const unsigned char *sha1
)
422 memcpy(&ui
, sha1
, sizeof(unsigned int));
423 i
= ui
% object_ix_hashsz
;
424 while (0 < object_ix
[i
]) {
425 if (!memcmp(sha1
, objects
[object_ix
[i
]-1].sha1
, 20))
427 if (++i
== object_ix_hashsz
)
433 static struct object_entry
*locate_object_entry(const unsigned char *sha1
)
437 if (!object_ix_hashsz
)
440 i
= locate_object_entry_hash(sha1
);
442 return &objects
[object_ix
[i
]-1];
446 static void rehash_objects(void)
449 struct object_entry
*oe
;
451 object_ix_hashsz
= nr_objects
* 3;
452 if (object_ix_hashsz
< 1024)
453 object_ix_hashsz
= 1024;
454 object_ix
= xrealloc(object_ix
, sizeof(int) * object_ix_hashsz
);
455 object_ix
= memset(object_ix
, 0, sizeof(int) * object_ix_hashsz
);
456 for (i
= 0, oe
= objects
; i
< nr_objects
; i
++, oe
++) {
457 int ix
= locate_object_entry_hash(oe
->sha1
);
461 object_ix
[ix
] = i
+ 1;
466 struct name_path
*up
;
473 static unsigned name_hash(struct name_path
*path
, const char *name
)
475 struct name_path
*p
= path
;
476 const char *n
= name
+ strlen(name
);
477 unsigned hash
= 0, name_hash
= 0, name_done
= 0;
479 if (n
!= name
&& n
[-1] == '\n')
481 while (name
<= --n
) {
482 unsigned char c
= *n
;
483 if (c
== '/' && !name_done
) {
488 hash
= hash
* 11 + c
;
494 for (p
= path
; p
; p
= p
->up
) {
495 hash
= hash
* 11 + '/';
496 n
= p
->elem
+ p
->len
;
497 while (p
->elem
<= --n
) {
498 unsigned char c
= *n
;
499 hash
= hash
* 11 + c
;
503 * Make sure "Makefile" and "t/Makefile" are hashed separately
506 hash
= (name_hash
<<DIRBITS
) | (hash
& ((1U<<DIRBITS
)-1));
509 n
= name
+ strlen(name
);
510 if (n
!= name
&& n
[-1] == '\n')
514 for (p
= path
; p
; p
= p
->up
) {
516 n
= p
->elem
+ p
->len
;
517 while (p
->elem
<= --n
)
520 fprintf(stderr
, "\t%08x\n", hash
);
525 static int add_object_entry(const unsigned char *sha1
, unsigned hash
, int exclude
)
527 unsigned int idx
= nr_objects
;
528 struct object_entry
*entry
;
529 struct packed_git
*p
;
530 unsigned int found_offset
= 0;
531 struct packed_git
*found_pack
= NULL
;
535 for (p
= packed_git
; p
; p
= p
->next
) {
537 if (find_pack_entry_one(sha1
, &e
, p
)) {
540 if (local
&& !p
->pack_local
)
543 found_offset
= e
.offset
;
549 if ((entry
= locate_object_entry(sha1
)) != NULL
)
552 if (idx
>= nr_alloc
) {
553 unsigned int needed
= (idx
+ 1024) * 3 / 2;
554 objects
= xrealloc(objects
, needed
* sizeof(*entry
));
557 entry
= objects
+ idx
;
558 nr_objects
= idx
+ 1;
559 memset(entry
, 0, sizeof(*entry
));
560 memcpy(entry
->sha1
, sha1
, 20);
563 if (object_ix_hashsz
* 3 <= nr_objects
* 4)
566 ix
= locate_object_entry_hash(entry
->sha1
);
568 die("internal error in object hashing.");
569 object_ix
[-1 - ix
] = idx
+ 1;
574 if (progress_update
) {
575 fprintf(stderr
, "Counting objects...%d\r", nr_objects
);
579 entry
->preferred_base
= 1;
582 entry
->in_pack
= found_pack
;
583 entry
->in_pack_offset
= found_offset
;
589 static void add_pbase_tree(struct tree_desc
*tree
, struct name_path
*up
)
592 const unsigned char *sha1
;
598 sha1
= tree_entry_extract(tree
, &name
, &mode
);
599 update_tree_entry(tree
);
600 if (!has_sha1_file(sha1
))
602 if (sha1_object_info(sha1
, type
, &size
))
605 hash
= name_hash(up
, name
);
606 if (!add_object_entry(sha1
, hash
, 1))
609 if (!strcmp(type
, "tree")) {
610 struct tree_desc sub
;
614 elem
= read_sha1_file(sha1
, type
, &sub
.size
);
619 me
.len
= strlen(name
);
620 add_pbase_tree(&sub
, &me
);
627 static void add_preferred_base(unsigned char *sha1
)
629 struct tree_desc tree
;
632 elem
= read_object_with_reference(sha1
, "tree", &tree
.size
, NULL
);
636 if (add_object_entry(sha1
, name_hash(NULL
, ""), 1))
637 add_pbase_tree(&tree
, NULL
);
641 static void check_object(struct object_entry
*entry
)
645 if (entry
->in_pack
&& !entry
->preferred_base
) {
646 unsigned char base
[20];
648 struct object_entry
*base_entry
;
650 /* We want in_pack_type even if we do not reuse delta.
651 * There is no point not reusing non-delta representations.
653 check_reuse_pack_delta(entry
->in_pack
,
654 entry
->in_pack_offset
,
656 &entry
->in_pack_type
);
658 /* Check if it is delta, and the base is also an object
659 * we are going to pack. If so we will reuse the existing
662 if (!no_reuse_delta
&&
663 entry
->in_pack_type
== OBJ_DELTA
&&
664 (base_entry
= locate_object_entry(base
)) &&
665 (!base_entry
->preferred_base
)) {
667 /* Depth value does not matter - find_deltas()
668 * will never consider reused delta as the
669 * base object to deltify other objects
670 * against, in order to avoid circular deltas.
673 /* uncompressed size of the delta data */
674 entry
->size
= entry
->delta_size
= size
;
675 entry
->delta
= base_entry
;
676 entry
->type
= OBJ_DELTA
;
678 entry
->delta_sibling
= base_entry
->delta_child
;
679 base_entry
->delta_child
= entry
;
683 /* Otherwise we would do the usual */
686 if (sha1_object_info(entry
->sha1
, type
, &entry
->size
))
687 die("unable to get type of object %s",
688 sha1_to_hex(entry
->sha1
));
690 if (!strcmp(type
, "commit")) {
691 entry
->type
= OBJ_COMMIT
;
692 } else if (!strcmp(type
, "tree")) {
693 entry
->type
= OBJ_TREE
;
694 } else if (!strcmp(type
, "blob")) {
695 entry
->type
= OBJ_BLOB
;
696 } else if (!strcmp(type
, "tag")) {
697 entry
->type
= OBJ_TAG
;
699 die("unable to pack object %s of type %s",
700 sha1_to_hex(entry
->sha1
), type
);
703 static unsigned int check_delta_limit(struct object_entry
*me
, unsigned int n
)
705 struct object_entry
*child
= me
->delta_child
;
708 unsigned int c
= check_delta_limit(child
, n
+ 1);
711 child
= child
->delta_sibling
;
716 static void get_object_details(void)
719 struct object_entry
*entry
;
722 for (i
= 0, entry
= objects
; i
< nr_objects
; i
++, entry
++)
725 if (nr_objects
== nr_result
) {
727 * Depth of objects that depend on the entry -- this
728 * is subtracted from depth-max to break too deep
729 * delta chain because of delta data reusing.
730 * However, we loosen this restriction when we know we
731 * are creating a thin pack -- it will have to be
732 * expanded on the other end anyway, so do not
733 * artificially cut the delta chain and let it go as
736 for (i
= 0, entry
= objects
; i
< nr_objects
; i
++, entry
++)
737 if (!entry
->delta
&& entry
->delta_child
)
739 check_delta_limit(entry
, 1);
743 typedef int (*entry_sort_t
)(const struct object_entry
*, const struct object_entry
*);
745 static entry_sort_t current_sort
;
747 static int sort_comparator(const void *_a
, const void *_b
)
749 struct object_entry
*a
= *(struct object_entry
**)_a
;
750 struct object_entry
*b
= *(struct object_entry
**)_b
;
751 return current_sort(a
,b
);
754 static struct object_entry
**create_sorted_list(entry_sort_t sort
)
756 struct object_entry
**list
= xmalloc(nr_objects
* sizeof(struct object_entry
*));
759 for (i
= 0; i
< nr_objects
; i
++)
760 list
[i
] = objects
+ i
;
762 qsort(list
, nr_objects
, sizeof(struct object_entry
*), sort_comparator
);
766 static int sha1_sort(const struct object_entry
*a
, const struct object_entry
*b
)
768 return memcmp(a
->sha1
, b
->sha1
, 20);
771 static struct object_entry
**create_final_object_list()
773 struct object_entry
**list
;
776 for (i
= nr_result
= 0; i
< nr_objects
; i
++)
777 if (!objects
[i
].preferred_base
)
779 list
= xmalloc(nr_result
* sizeof(struct object_entry
*));
780 for (i
= j
= 0; i
< nr_objects
; i
++) {
781 if (!objects
[i
].preferred_base
)
782 list
[j
++] = objects
+ i
;
784 current_sort
= sha1_sort
;
785 qsort(list
, nr_result
, sizeof(struct object_entry
*), sort_comparator
);
789 static int type_size_sort(const struct object_entry
*a
, const struct object_entry
*b
)
791 if (a
->type
< b
->type
)
793 if (a
->type
> b
->type
)
795 if (a
->hash
< b
->hash
)
797 if (a
->hash
> b
->hash
)
799 if (a
->preferred_base
< b
->preferred_base
)
801 if (a
->preferred_base
> b
->preferred_base
)
803 if (a
->size
< b
->size
)
805 if (a
->size
> b
->size
)
807 return a
< b
? -1 : (a
> b
);
811 struct object_entry
*entry
;
816 * We search for deltas _backwards_ in a list sorted by type and
817 * by size, so that we see progressively smaller and smaller files.
818 * That's because we prefer deltas to be from the bigger file
819 * to the smaller - deletes are potentially cheaper, but perhaps
820 * more importantly, the bigger file is likely the more recent
823 static int try_delta(struct unpacked
*cur
, struct unpacked
*old
, unsigned max_depth
)
825 struct object_entry
*cur_entry
= cur
->entry
;
826 struct object_entry
*old_entry
= old
->entry
;
827 int old_preferred
= (old_entry
->preferred_base
||
828 old_entry
->based_on_preferred
);
829 unsigned long size
, oldsize
, delta_size
, sizediff
;
833 /* Don't bother doing diffs between different types */
834 if (cur_entry
->type
!= old_entry
->type
)
837 /* We do not compute delta to *create* objects we are not
840 if (cur_entry
->preferred_base
)
843 /* If the current object is at pack edge, take the depth the
844 * objects that depend on the current object into account --
845 * otherwise they would become too deep.
847 if (cur_entry
->delta_child
) {
848 if (max_depth
<= cur_entry
->delta_limit
)
850 max_depth
-= cur_entry
->delta_limit
;
853 size
= cur_entry
->size
;
854 oldsize
= old_entry
->size
;
855 sizediff
= oldsize
> size
? oldsize
- size
: size
- oldsize
;
859 if (old_entry
->depth
>= max_depth
)
865 * We always delta from the bigger to the smaller, since that's
866 * more space-efficient (deletes don't have to say _what_ they
869 max_size
= size
/ 2 - 20;
870 if (cur_entry
->delta
) {
871 if (cur_entry
->based_on_preferred
) {
873 max_size
= cur_entry
->delta_size
-1;
875 /* trying with non-preferred one when we
876 * already have a delta based on preferred
881 else if (!old_preferred
)
882 max_size
= cur_entry
->delta_size
-1;
884 /* otherwise... even if delta with a
885 * preferred one produces a bigger result than
886 * what we currently have, which is based on a
887 * non-preferred one, it is OK.
891 if (sizediff
>= max_size
)
893 delta_buf
= diff_delta(old
->data
, oldsize
,
894 cur
->data
, size
, &delta_size
, max_size
);
897 cur_entry
->delta
= old_entry
;
898 cur_entry
->delta_size
= delta_size
;
899 cur_entry
->depth
= old_entry
->depth
+ 1;
900 cur_entry
->based_on_preferred
= old_preferred
;
905 static void progress_interval(int signum
)
907 signal(SIGALRM
, progress_interval
);
911 static void find_deltas(struct object_entry
**list
, int window
, int depth
)
914 unsigned int array_size
= window
* sizeof(struct unpacked
);
915 struct unpacked
*array
= xmalloc(array_size
);
916 unsigned processed
= 0;
917 unsigned last_percent
= 999;
919 memset(array
, 0, array_size
);
923 fprintf(stderr
, "Deltifying %d objects.\n", nr_result
);
926 struct object_entry
*entry
= list
[i
];
927 struct unpacked
*n
= array
+ idx
;
932 if (!entry
->preferred_base
)
936 unsigned percent
= processed
* 100 / nr_result
;
937 if (percent
!= last_percent
|| progress_update
) {
938 fprintf(stderr
, "%4u%% (%u/%u) done\r",
939 percent
, processed
, nr_result
);
941 last_percent
= percent
;
946 /* This happens if we decided to reuse existing
947 * delta from a pack. "!no_reuse_delta &&" is implied.
953 n
->data
= read_sha1_file(entry
->sha1
, type
, &size
);
954 if (size
!= entry
->size
)
955 die("object %s inconsistent object length (%lu vs %lu)", sha1_to_hex(entry
->sha1
), size
, entry
->size
);
959 unsigned int other_idx
= idx
+ j
;
961 if (other_idx
>= window
)
963 m
= array
+ other_idx
;
966 if (try_delta(n
, m
, depth
) < 0)
977 for (i
= 0; i
< window
; ++i
)
982 static void prepare_pack(int window
, int depth
)
984 get_object_details();
985 sorted_by_type
= create_sorted_list(type_size_sort
);
987 find_deltas(sorted_by_type
, window
+1, depth
);
990 static int reuse_cached_pack(unsigned char *sha1
, int pack_to_stdout
)
992 static const char cache
[] = "pack-cache/pack-%s.%s";
993 char *cached_pack
, *cached_idx
;
994 int ifd
, ofd
, ifd_ix
= -1;
996 cached_pack
= git_path(cache
, sha1_to_hex(sha1
), "pack");
997 ifd
= open(cached_pack
, O_RDONLY
);
1001 if (!pack_to_stdout
) {
1002 cached_idx
= git_path(cache
, sha1_to_hex(sha1
), "idx");
1003 ifd_ix
= open(cached_idx
, O_RDONLY
);
1011 fprintf(stderr
, "Reusing %d objects pack %s\n", nr_objects
,
1014 if (pack_to_stdout
) {
1015 if (copy_fd(ifd
, 1))
1020 char name
[PATH_MAX
];
1021 snprintf(name
, sizeof(name
),
1022 "%s-%s.%s", base_name
, sha1_to_hex(sha1
), "pack");
1023 ofd
= open(name
, O_CREAT
| O_EXCL
| O_WRONLY
, 0666);
1025 die("unable to open %s (%s)", name
, strerror(errno
));
1026 if (copy_fd(ifd
, ofd
))
1030 snprintf(name
, sizeof(name
),
1031 "%s-%s.%s", base_name
, sha1_to_hex(sha1
), "idx");
1032 ofd
= open(name
, O_CREAT
| O_EXCL
| O_WRONLY
, 0666);
1034 die("unable to open %s (%s)", name
, strerror(errno
));
1035 if (copy_fd(ifd_ix
, ofd
))
1038 puts(sha1_to_hex(sha1
));
1044 int main(int argc
, char **argv
)
1047 char line
[PATH_MAX
+ 20];
1048 int window
= 10, depth
= 10, pack_to_stdout
= 0;
1049 struct object_entry
**list
;
1052 setup_git_directory();
1054 for (i
= 1; i
< argc
; i
++) {
1055 const char *arg
= argv
[i
];
1058 if (!strcmp("--non-empty", arg
)) {
1062 if (!strcmp("--local", arg
)) {
1066 if (!strcmp("--incremental", arg
)) {
1070 if (!strncmp("--window=", arg
, 9)) {
1072 window
= strtoul(arg
+9, &end
, 0);
1073 if (!arg
[9] || *end
)
1077 if (!strncmp("--depth=", arg
, 8)) {
1079 depth
= strtoul(arg
+8, &end
, 0);
1080 if (!arg
[8] || *end
)
1084 if (!strcmp("-q", arg
)) {
1088 if (!strcmp("--no-reuse-delta", arg
)) {
1092 if (!strcmp("--stdout", arg
)) {
1103 if (pack_to_stdout
!= !base_name
)
1106 prepare_packed_git();
1110 v
.it_interval
.tv_sec
= 1;
1111 v
.it_interval
.tv_usec
= 0;
1112 v
.it_value
= v
.it_interval
;
1113 signal(SIGALRM
, progress_interval
);
1114 setitimer(ITIMER_REAL
, &v
, NULL
);
1115 fprintf(stderr
, "Generating pack...\n");
1118 while (fgets(line
, sizeof(line
), stdin
) != NULL
) {
1119 unsigned char sha1
[20];
1121 if (line
[0] == '-') {
1122 if (get_sha1_hex(line
+1, sha1
))
1123 die("expected edge sha1, got garbage:\n %s",
1125 add_preferred_base(sha1
);
1128 if (get_sha1_hex(line
, sha1
))
1129 die("expected sha1, got garbage:\n %s", line
);
1130 add_object_entry(sha1
, name_hash(NULL
, line
+41), 0);
1133 fprintf(stderr
, "Done counting %d objects.\n", nr_objects
);
1134 sorted_by_sha
= create_final_object_list();
1135 if (non_empty
&& !nr_result
)
1139 list
= sorted_by_sha
;
1140 for (i
= 0; i
< nr_result
; i
++) {
1141 struct object_entry
*entry
= *list
++;
1142 SHA1_Update(&ctx
, entry
->sha1
, 20);
1144 SHA1_Final(object_list_sha1
, &ctx
);
1145 if (progress
&& (nr_objects
!= nr_result
))
1146 fprintf(stderr
, "Result has %d objects.\n", nr_result
);
1148 if (reuse_cached_pack(object_list_sha1
, pack_to_stdout
))
1152 prepare_pack(window
, depth
);
1153 if (progress
&& pack_to_stdout
) {
1154 /* the other end usually displays progress itself */
1155 struct itimerval v
= {{0,},};
1156 setitimer(ITIMER_REAL
, &v
, NULL
);
1157 signal(SIGALRM
, SIG_IGN
);
1158 progress_update
= 0;
1161 if (!pack_to_stdout
) {
1163 puts(sha1_to_hex(object_list_sha1
));
1167 fprintf(stderr
, "Total %d, written %d (delta %d), reused %d (delta %d)\n",
1168 nr_result
, written
, written_delta
, reused
, reused_delta
);