3 #include "cache-tree.h"
10 #include "commit-slab.h"
12 define_commit_slab(blame_suspects
, struct blame_origin
*);
13 static struct blame_suspects blame_suspects
;
15 struct blame_origin
*get_blame_suspects(struct commit
*commit
)
17 struct blame_origin
**result
;
19 result
= blame_suspects_peek(&blame_suspects
, commit
);
21 return result
? *result
: NULL
;
24 static void set_blame_suspects(struct commit
*commit
, struct blame_origin
*origin
)
26 *blame_suspects_at(&blame_suspects
, commit
) = origin
;
29 void blame_origin_decref(struct blame_origin
*o
)
31 if (o
&& --o
->refcnt
<= 0) {
32 struct blame_origin
*p
, *l
= NULL
;
34 blame_origin_decref(o
->previous
);
36 /* Should be present exactly once in commit chain */
37 for (p
= get_blame_suspects(o
->commit
); p
; l
= p
, p
= p
->next
) {
42 set_blame_suspects(o
->commit
, p
->next
);
47 die("internal error in blame_origin_decref");
52 * Given a commit and a path in it, create a new origin structure.
53 * The callers that add blame to the scoreboard should use
54 * get_origin() to obtain shared, refcounted copy instead of calling
55 * this function directly.
57 static struct blame_origin
*make_origin(struct commit
*commit
, const char *path
)
59 struct blame_origin
*o
;
60 FLEX_ALLOC_STR(o
, path
, path
);
63 o
->next
= get_blame_suspects(commit
);
64 set_blame_suspects(commit
, o
);
69 * Locate an existing origin or create a new one.
70 * This moves the origin to front position in the commit util list.
72 static struct blame_origin
*get_origin(struct commit
*commit
, const char *path
)
74 struct blame_origin
*o
, *l
;
76 for (o
= get_blame_suspects(commit
), l
= NULL
; o
; l
= o
, o
= o
->next
) {
77 if (!strcmp(o
->path
, path
)) {
81 o
->next
= get_blame_suspects(commit
);
82 set_blame_suspects(commit
, o
);
84 return blame_origin_incref(o
);
87 return make_origin(commit
, path
);
92 static void verify_working_tree_path(struct commit
*work_tree
, const char *path
)
94 struct commit_list
*parents
;
97 for (parents
= work_tree
->parents
; parents
; parents
= parents
->next
) {
98 const struct object_id
*commit_oid
= &parents
->item
->object
.oid
;
99 struct object_id blob_oid
;
102 if (!get_tree_entry(commit_oid
, path
, &blob_oid
, &mode
) &&
103 oid_object_info(the_repository
, &blob_oid
, NULL
) == OBJ_BLOB
)
107 pos
= cache_name_pos(path
, strlen(path
));
109 ; /* path is in the index */
110 else if (-1 - pos
< active_nr
&&
111 !strcmp(active_cache
[-1 - pos
]->name
, path
))
112 ; /* path is in the index, unmerged */
114 die("no such path '%s' in HEAD", path
);
117 static struct commit_list
**append_parent(struct commit_list
**tail
, const struct object_id
*oid
)
119 struct commit
*parent
;
121 parent
= lookup_commit_reference(oid
);
123 die("no such commit %s", oid_to_hex(oid
));
124 return &commit_list_insert(parent
, tail
)->next
;
127 static void append_merge_parents(struct commit_list
**tail
)
130 struct strbuf line
= STRBUF_INIT
;
132 merge_head
= open(git_path_merge_head(), O_RDONLY
);
133 if (merge_head
< 0) {
136 die("cannot open '%s' for reading", git_path_merge_head());
139 while (!strbuf_getwholeline_fd(&line
, merge_head
, '\n')) {
140 struct object_id oid
;
141 if (line
.len
< GIT_SHA1_HEXSZ
|| get_oid_hex(line
.buf
, &oid
))
142 die("unknown line in '%s': %s", git_path_merge_head(), line
.buf
);
143 tail
= append_parent(tail
, &oid
);
146 strbuf_release(&line
);
150 * This isn't as simple as passing sb->buf and sb->len, because we
151 * want to transfer ownership of the buffer to the commit (so we
154 static void set_commit_buffer_from_strbuf(struct commit
*c
, struct strbuf
*sb
)
157 void *buf
= strbuf_detach(sb
, &len
);
158 set_commit_buffer(c
, buf
, len
);
162 * Prepare a dummy commit that represents the work tree (or staged) item.
163 * Note that annotating work tree item never works in the reverse.
165 static struct commit
*fake_working_tree_commit(struct diff_options
*opt
,
167 const char *contents_from
)
169 struct commit
*commit
;
170 struct blame_origin
*origin
;
171 struct commit_list
**parent_tail
, *parent
;
172 struct object_id head_oid
;
173 struct strbuf buf
= STRBUF_INIT
;
177 struct cache_entry
*ce
;
179 struct strbuf msg
= STRBUF_INIT
;
183 commit
= alloc_commit_node(the_repository
);
184 commit
->object
.parsed
= 1;
186 parent_tail
= &commit
->parents
;
188 if (!resolve_ref_unsafe("HEAD", RESOLVE_REF_READING
, &head_oid
, NULL
))
189 die("no such ref: HEAD");
191 parent_tail
= append_parent(parent_tail
, &head_oid
);
192 append_merge_parents(parent_tail
);
193 verify_working_tree_path(commit
, path
);
195 origin
= make_origin(commit
, path
);
197 ident
= fmt_ident("Not Committed Yet", "not.committed.yet", NULL
, 0);
198 strbuf_addstr(&msg
, "tree 0000000000000000000000000000000000000000\n");
199 for (parent
= commit
->parents
; parent
; parent
= parent
->next
)
200 strbuf_addf(&msg
, "parent %s\n",
201 oid_to_hex(&parent
->item
->object
.oid
));
205 "Version of %s from %s\n",
207 (!contents_from
? path
:
208 (!strcmp(contents_from
, "-") ? "standard input" : contents_from
)));
209 set_commit_buffer_from_strbuf(commit
, &msg
);
211 if (!contents_from
|| strcmp("-", contents_from
)) {
213 const char *read_from
;
215 unsigned long buf_len
;
218 if (stat(contents_from
, &st
) < 0)
219 die_errno("Cannot stat '%s'", contents_from
);
220 read_from
= contents_from
;
223 if (lstat(path
, &st
) < 0)
224 die_errno("Cannot lstat '%s'", path
);
227 mode
= canon_mode(st
.st_mode
);
229 switch (st
.st_mode
& S_IFMT
) {
231 if (opt
->flags
.allow_textconv
&&
232 textconv_object(read_from
, mode
, &null_oid
, 0, &buf_ptr
, &buf_len
))
233 strbuf_attach(&buf
, buf_ptr
, buf_len
, buf_len
+ 1);
234 else if (strbuf_read_file(&buf
, read_from
, st
.st_size
) != st
.st_size
)
235 die_errno("cannot open or read '%s'", read_from
);
238 if (strbuf_readlink(&buf
, read_from
, st
.st_size
) < 0)
239 die_errno("cannot readlink '%s'", read_from
);
242 die("unsupported file type %s", read_from
);
246 /* Reading from stdin */
248 if (strbuf_read(&buf
, 0, 0) < 0)
249 die_errno("failed to read from stdin");
251 convert_to_git(&the_index
, path
, buf
.buf
, buf
.len
, &buf
, 0);
252 origin
->file
.ptr
= buf
.buf
;
253 origin
->file
.size
= buf
.len
;
254 pretend_object_file(buf
.buf
, buf
.len
, OBJ_BLOB
, &origin
->blob_oid
);
257 * Read the current index, replace the path entry with
258 * origin->blob_sha1 without mucking with its mode or type
259 * bits; we are not going to write this index out -- we just
260 * want to run "diff-index --cached".
267 int pos
= cache_name_pos(path
, len
);
269 mode
= active_cache
[pos
]->ce_mode
;
271 /* Let's not bother reading from HEAD tree */
272 mode
= S_IFREG
| 0644;
274 size
= cache_entry_size(len
);
275 ce
= xcalloc(1, size
);
276 oidcpy(&ce
->oid
, &origin
->blob_oid
);
277 memcpy(ce
->name
, path
, len
);
278 ce
->ce_flags
= create_ce_flags(0);
279 ce
->ce_namelen
= len
;
280 ce
->ce_mode
= create_ce_mode(mode
);
281 add_cache_entry(ce
, ADD_CACHE_OK_TO_ADD
|ADD_CACHE_OK_TO_REPLACE
);
283 cache_tree_invalidate_path(&the_index
, path
);
290 static int diff_hunks(mmfile_t
*file_a
, mmfile_t
*file_b
,
291 xdl_emit_hunk_consume_func_t hunk_func
, void *cb_data
, int xdl_opts
)
294 xdemitconf_t xecfg
= {0};
295 xdemitcb_t ecb
= {NULL
};
297 xpp
.flags
= xdl_opts
;
298 xecfg
.hunk_func
= hunk_func
;
300 return xdi_diff(file_a
, file_b
, &xpp
, &xecfg
, &ecb
);
304 * Given an origin, prepare mmfile_t structure to be used by the
307 static void fill_origin_blob(struct diff_options
*opt
,
308 struct blame_origin
*o
, mmfile_t
*file
, int *num_read_blob
)
311 enum object_type type
;
312 unsigned long file_size
;
315 if (opt
->flags
.allow_textconv
&&
316 textconv_object(o
->path
, o
->mode
, &o
->blob_oid
, 1, &file
->ptr
, &file_size
))
319 file
->ptr
= read_object_file(&o
->blob_oid
, &type
,
321 file
->size
= file_size
;
324 die("Cannot read blob %s for path %s",
325 oid_to_hex(&o
->blob_oid
),
333 static void drop_origin_blob(struct blame_origin
*o
)
336 FREE_AND_NULL(o
->file
.ptr
);
341 * Any merge of blames happens on lists of blames that arrived via
342 * different parents in a single suspect. In this case, we want to
343 * sort according to the suspect line numbers as opposed to the final
344 * image line numbers. The function body is somewhat longish because
345 * it avoids unnecessary writes.
348 static struct blame_entry
*blame_merge(struct blame_entry
*list1
,
349 struct blame_entry
*list2
)
351 struct blame_entry
*p1
= list1
, *p2
= list2
,
359 if (p1
->s_lno
<= p2
->s_lno
) {
362 if ((p1
= *tail
) == NULL
) {
366 } while (p1
->s_lno
<= p2
->s_lno
);
372 if ((p2
= *tail
) == NULL
) {
376 } while (p1
->s_lno
> p2
->s_lno
);
380 if ((p1
= *tail
) == NULL
) {
384 } while (p1
->s_lno
<= p2
->s_lno
);
388 static void *get_next_blame(const void *p
)
390 return ((struct blame_entry
*)p
)->next
;
393 static void set_next_blame(void *p1
, void *p2
)
395 ((struct blame_entry
*)p1
)->next
= p2
;
399 * Final image line numbers are all different, so we don't need a
400 * three-way comparison here.
403 static int compare_blame_final(const void *p1
, const void *p2
)
405 return ((struct blame_entry
*)p1
)->lno
> ((struct blame_entry
*)p2
)->lno
409 static int compare_blame_suspect(const void *p1
, const void *p2
)
411 const struct blame_entry
*s1
= p1
, *s2
= p2
;
413 * to allow for collating suspects, we sort according to the
414 * respective pointer value as the primary sorting criterion.
415 * The actual relation is pretty unimportant as long as it
416 * establishes a total order. Comparing as integers gives us
419 if (s1
->suspect
!= s2
->suspect
)
420 return (intptr_t)s1
->suspect
> (intptr_t)s2
->suspect
? 1 : -1;
421 if (s1
->s_lno
== s2
->s_lno
)
423 return s1
->s_lno
> s2
->s_lno
? 1 : -1;
426 void blame_sort_final(struct blame_scoreboard
*sb
)
428 sb
->ent
= llist_mergesort(sb
->ent
, get_next_blame
, set_next_blame
,
429 compare_blame_final
);
432 static int compare_commits_by_reverse_commit_date(const void *a
,
436 return -compare_commits_by_commit_date(a
, b
, c
);
440 * For debugging -- origin is refcounted, and this asserts that
441 * we do not underflow.
443 static void sanity_check_refcnt(struct blame_scoreboard
*sb
)
446 struct blame_entry
*ent
;
448 for (ent
= sb
->ent
; ent
; ent
= ent
->next
) {
449 /* Nobody should have zero or negative refcnt */
450 if (ent
->suspect
->refcnt
<= 0) {
451 fprintf(stderr
, "%s in %s has negative refcnt %d\n",
453 oid_to_hex(&ent
->suspect
->commit
->object
.oid
),
454 ent
->suspect
->refcnt
);
459 sb
->on_sanity_fail(sb
, baa
);
463 * If two blame entries that are next to each other came from
464 * contiguous lines in the same origin (i.e. <commit, path> pair),
465 * merge them together.
467 void blame_coalesce(struct blame_scoreboard
*sb
)
469 struct blame_entry
*ent
, *next
;
471 for (ent
= sb
->ent
; ent
&& (next
= ent
->next
); ent
= next
) {
472 if (ent
->suspect
== next
->suspect
&&
473 ent
->s_lno
+ ent
->num_lines
== next
->s_lno
) {
474 ent
->num_lines
+= next
->num_lines
;
475 ent
->next
= next
->next
;
476 blame_origin_decref(next
->suspect
);
479 next
= ent
; /* again */
483 if (sb
->debug
) /* sanity */
484 sanity_check_refcnt(sb
);
488 * Merge the given sorted list of blames into a preexisting origin.
489 * If there were no previous blames to that commit, it is entered into
490 * the commit priority queue of the score board.
493 static void queue_blames(struct blame_scoreboard
*sb
, struct blame_origin
*porigin
,
494 struct blame_entry
*sorted
)
496 if (porigin
->suspects
)
497 porigin
->suspects
= blame_merge(porigin
->suspects
, sorted
);
499 struct blame_origin
*o
;
500 for (o
= get_blame_suspects(porigin
->commit
); o
; o
= o
->next
) {
502 porigin
->suspects
= sorted
;
506 porigin
->suspects
= sorted
;
507 prio_queue_put(&sb
->commits
, porigin
->commit
);
512 * Fill the blob_sha1 field of an origin if it hasn't, so that later
513 * call to fill_origin_blob() can use it to locate the data. blob_sha1
514 * for an origin is also used to pass the blame for the entire file to
515 * the parent to detect the case where a child's blob is identical to
516 * that of its parent's.
518 * This also fills origin->mode for corresponding tree path.
520 static int fill_blob_sha1_and_mode(struct blame_origin
*origin
)
522 if (!is_null_oid(&origin
->blob_oid
))
524 if (get_tree_entry(&origin
->commit
->object
.oid
, origin
->path
, &origin
->blob_oid
, &origin
->mode
))
526 if (oid_object_info(the_repository
, &origin
->blob_oid
, NULL
) != OBJ_BLOB
)
530 oidclr(&origin
->blob_oid
);
531 origin
->mode
= S_IFINVALID
;
536 * We have an origin -- check if the same path exists in the
537 * parent and return an origin structure to represent it.
539 static struct blame_origin
*find_origin(struct commit
*parent
,
540 struct blame_origin
*origin
)
542 struct blame_origin
*porigin
;
543 struct diff_options diff_opts
;
544 const char *paths
[2];
546 /* First check any existing origins */
547 for (porigin
= get_blame_suspects(parent
); porigin
; porigin
= porigin
->next
)
548 if (!strcmp(porigin
->path
, origin
->path
)) {
550 * The same path between origin and its parent
551 * without renaming -- the most common case.
553 return blame_origin_incref (porigin
);
556 /* See if the origin->path is different between parent
557 * and origin first. Most of the time they are the
558 * same and diff-tree is fairly efficient about this.
560 diff_setup(&diff_opts
);
561 diff_opts
.flags
.recursive
= 1;
562 diff_opts
.detect_rename
= 0;
563 diff_opts
.output_format
= DIFF_FORMAT_NO_OUTPUT
;
564 paths
[0] = origin
->path
;
567 parse_pathspec(&diff_opts
.pathspec
,
568 PATHSPEC_ALL_MAGIC
& ~PATHSPEC_LITERAL
,
569 PATHSPEC_LITERAL_PATH
, "", paths
);
570 diff_setup_done(&diff_opts
);
572 if (is_null_oid(&origin
->commit
->object
.oid
))
573 do_diff_cache(get_commit_tree_oid(parent
), &diff_opts
);
575 diff_tree_oid(get_commit_tree_oid(parent
),
576 get_commit_tree_oid(origin
->commit
),
578 diffcore_std(&diff_opts
);
580 if (!diff_queued_diff
.nr
) {
581 /* The path is the same as parent */
582 porigin
= get_origin(parent
, origin
->path
);
583 oidcpy(&porigin
->blob_oid
, &origin
->blob_oid
);
584 porigin
->mode
= origin
->mode
;
587 * Since origin->path is a pathspec, if the parent
588 * commit had it as a directory, we will see a whole
589 * bunch of deletion of files in the directory that we
593 struct diff_filepair
*p
= NULL
;
594 for (i
= 0; i
< diff_queued_diff
.nr
; i
++) {
596 p
= diff_queued_diff
.queue
[i
];
597 name
= p
->one
->path
? p
->one
->path
: p
->two
->path
;
598 if (!strcmp(name
, origin
->path
))
602 die("internal error in blame::find_origin");
605 die("internal error in blame::find_origin (%c)",
608 porigin
= get_origin(parent
, origin
->path
);
609 oidcpy(&porigin
->blob_oid
, &p
->one
->oid
);
610 porigin
->mode
= p
->one
->mode
;
614 /* Did not exist in parent, or type changed */
618 diff_flush(&diff_opts
);
619 clear_pathspec(&diff_opts
.pathspec
);
624 * We have an origin -- find the path that corresponds to it in its
625 * parent and return an origin structure to represent it.
627 static struct blame_origin
*find_rename(struct commit
*parent
,
628 struct blame_origin
*origin
)
630 struct blame_origin
*porigin
= NULL
;
631 struct diff_options diff_opts
;
634 diff_setup(&diff_opts
);
635 diff_opts
.flags
.recursive
= 1;
636 diff_opts
.detect_rename
= DIFF_DETECT_RENAME
;
637 diff_opts
.output_format
= DIFF_FORMAT_NO_OUTPUT
;
638 diff_opts
.single_follow
= origin
->path
;
639 diff_setup_done(&diff_opts
);
641 if (is_null_oid(&origin
->commit
->object
.oid
))
642 do_diff_cache(get_commit_tree_oid(parent
), &diff_opts
);
644 diff_tree_oid(get_commit_tree_oid(parent
),
645 get_commit_tree_oid(origin
->commit
),
647 diffcore_std(&diff_opts
);
649 for (i
= 0; i
< diff_queued_diff
.nr
; i
++) {
650 struct diff_filepair
*p
= diff_queued_diff
.queue
[i
];
651 if ((p
->status
== 'R' || p
->status
== 'C') &&
652 !strcmp(p
->two
->path
, origin
->path
)) {
653 porigin
= get_origin(parent
, p
->one
->path
);
654 oidcpy(&porigin
->blob_oid
, &p
->one
->oid
);
655 porigin
->mode
= p
->one
->mode
;
659 diff_flush(&diff_opts
);
660 clear_pathspec(&diff_opts
.pathspec
);
665 * Append a new blame entry to a given output queue.
667 static void add_blame_entry(struct blame_entry
***queue
,
668 const struct blame_entry
*src
)
670 struct blame_entry
*e
= xmalloc(sizeof(*e
));
671 memcpy(e
, src
, sizeof(*e
));
672 blame_origin_incref(e
->suspect
);
680 * src typically is on-stack; we want to copy the information in it to
681 * a malloced blame_entry that gets added to the given queue. The
682 * origin of dst loses a refcnt.
684 static void dup_entry(struct blame_entry
***queue
,
685 struct blame_entry
*dst
, struct blame_entry
*src
)
687 blame_origin_incref(src
->suspect
);
688 blame_origin_decref(dst
->suspect
);
689 memcpy(dst
, src
, sizeof(*src
));
695 const char *blame_nth_line(struct blame_scoreboard
*sb
, long lno
)
697 return sb
->final_buf
+ sb
->lineno
[lno
];
701 * It is known that lines between tlno to same came from parent, and e
702 * has an overlap with that range. it also is known that parent's
703 * line plno corresponds to e's line tlno.
709 * <------------------>
711 * Split e into potentially three parts; before this chunk, the chunk
712 * to be blamed for the parent, and after that portion.
714 static void split_overlap(struct blame_entry
*split
,
715 struct blame_entry
*e
,
716 int tlno
, int plno
, int same
,
717 struct blame_origin
*parent
)
720 memset(split
, 0, sizeof(struct blame_entry
[3]));
722 if (e
->s_lno
< tlno
) {
723 /* there is a pre-chunk part not blamed on parent */
724 split
[0].suspect
= blame_origin_incref(e
->suspect
);
725 split
[0].lno
= e
->lno
;
726 split
[0].s_lno
= e
->s_lno
;
727 split
[0].num_lines
= tlno
- e
->s_lno
;
728 split
[1].lno
= e
->lno
+ tlno
- e
->s_lno
;
729 split
[1].s_lno
= plno
;
732 split
[1].lno
= e
->lno
;
733 split
[1].s_lno
= plno
+ (e
->s_lno
- tlno
);
736 if (same
< e
->s_lno
+ e
->num_lines
) {
737 /* there is a post-chunk part not blamed on parent */
738 split
[2].suspect
= blame_origin_incref(e
->suspect
);
739 split
[2].lno
= e
->lno
+ (same
- e
->s_lno
);
740 split
[2].s_lno
= e
->s_lno
+ (same
- e
->s_lno
);
741 split
[2].num_lines
= e
->s_lno
+ e
->num_lines
- same
;
742 chunk_end_lno
= split
[2].lno
;
745 chunk_end_lno
= e
->lno
+ e
->num_lines
;
746 split
[1].num_lines
= chunk_end_lno
- split
[1].lno
;
749 * if it turns out there is nothing to blame the parent for,
750 * forget about the splitting. !split[1].suspect signals this.
752 if (split
[1].num_lines
< 1)
754 split
[1].suspect
= blame_origin_incref(parent
);
758 * split_overlap() divided an existing blame e into up to three parts
759 * in split. Any assigned blame is moved to queue to
762 static void split_blame(struct blame_entry
***blamed
,
763 struct blame_entry
***unblamed
,
764 struct blame_entry
*split
,
765 struct blame_entry
*e
)
767 if (split
[0].suspect
&& split
[2].suspect
) {
768 /* The first part (reuse storage for the existing entry e) */
769 dup_entry(unblamed
, e
, &split
[0]);
771 /* The last part -- me */
772 add_blame_entry(unblamed
, &split
[2]);
774 /* ... and the middle part -- parent */
775 add_blame_entry(blamed
, &split
[1]);
777 else if (!split
[0].suspect
&& !split
[2].suspect
)
779 * The parent covers the entire area; reuse storage for
780 * e and replace it with the parent.
782 dup_entry(blamed
, e
, &split
[1]);
783 else if (split
[0].suspect
) {
784 /* me and then parent */
785 dup_entry(unblamed
, e
, &split
[0]);
786 add_blame_entry(blamed
, &split
[1]);
789 /* parent and then me */
790 dup_entry(blamed
, e
, &split
[1]);
791 add_blame_entry(unblamed
, &split
[2]);
796 * After splitting the blame, the origins used by the
797 * on-stack blame_entry should lose one refcnt each.
799 static void decref_split(struct blame_entry
*split
)
803 for (i
= 0; i
< 3; i
++)
804 blame_origin_decref(split
[i
].suspect
);
808 * reverse_blame reverses the list given in head, appending tail.
809 * That allows us to build lists in reverse order, then reverse them
810 * afterwards. This can be faster than building the list in proper
811 * order right away. The reason is that building in proper order
812 * requires writing a link in the _previous_ element, while building
813 * in reverse order just requires placing the list head into the
817 static struct blame_entry
*reverse_blame(struct blame_entry
*head
,
818 struct blame_entry
*tail
)
821 struct blame_entry
*next
= head
->next
;
830 * Process one hunk from the patch between the current suspect for
831 * blame_entry e and its parent. This first blames any unfinished
832 * entries before the chunk (which is where target and parent start
833 * differing) on the parent, and then splits blame entries at the
834 * start and at the end of the difference region. Since use of -M and
835 * -C options may lead to overlapping/duplicate source line number
836 * ranges, all we can rely on from sorting/merging is the order of the
837 * first suspect line number.
839 static void blame_chunk(struct blame_entry
***dstq
, struct blame_entry
***srcq
,
840 int tlno
, int offset
, int same
,
841 struct blame_origin
*parent
)
843 struct blame_entry
*e
= **srcq
;
844 struct blame_entry
*samep
= NULL
, *diffp
= NULL
;
846 while (e
&& e
->s_lno
< tlno
) {
847 struct blame_entry
*next
= e
->next
;
849 * current record starts before differing portion. If
850 * it reaches into it, we need to split it up and
851 * examine the second part separately.
853 if (e
->s_lno
+ e
->num_lines
> tlno
) {
854 /* Move second half to a new record */
855 int len
= tlno
- e
->s_lno
;
856 struct blame_entry
*n
= xcalloc(1, sizeof (struct blame_entry
));
857 n
->suspect
= e
->suspect
;
858 n
->lno
= e
->lno
+ len
;
859 n
->s_lno
= e
->s_lno
+ len
;
860 n
->num_lines
= e
->num_lines
- len
;
863 /* Push new record to diffp */
867 blame_origin_decref(e
->suspect
);
868 /* Pass blame for everything before the differing
869 * chunk to the parent */
870 e
->suspect
= blame_origin_incref(parent
);
877 * As we don't know how much of a common stretch after this
878 * diff will occur, the currently blamed parts are all that we
879 * can assign to the parent for now.
883 **dstq
= reverse_blame(samep
, **dstq
);
884 *dstq
= &samep
->next
;
887 * Prepend the split off portions: everything after e starts
888 * after the blameable portion.
890 e
= reverse_blame(diffp
, e
);
893 * Now retain records on the target while parts are different
898 while (e
&& e
->s_lno
< same
) {
899 struct blame_entry
*next
= e
->next
;
902 * If current record extends into sameness, need to split.
904 if (e
->s_lno
+ e
->num_lines
> same
) {
906 * Move second half to a new record to be
907 * processed by later chunks
909 int len
= same
- e
->s_lno
;
910 struct blame_entry
*n
= xcalloc(1, sizeof (struct blame_entry
));
911 n
->suspect
= blame_origin_incref(e
->suspect
);
912 n
->lno
= e
->lno
+ len
;
913 n
->s_lno
= e
->s_lno
+ len
;
914 n
->num_lines
= e
->num_lines
- len
;
917 /* Push new record to samep */
925 **srcq
= reverse_blame(diffp
, reverse_blame(samep
, e
));
926 /* Move across elements that are in the unblamable portion */
928 *srcq
= &diffp
->next
;
931 struct blame_chunk_cb_data
{
932 struct blame_origin
*parent
;
934 struct blame_entry
**dstq
;
935 struct blame_entry
**srcq
;
938 /* diff chunks are from parent to target */
939 static int blame_chunk_cb(long start_a
, long count_a
,
940 long start_b
, long count_b
, void *data
)
942 struct blame_chunk_cb_data
*d
= data
;
943 if (start_a
- start_b
!= d
->offset
)
944 die("internal error in blame::blame_chunk_cb");
945 blame_chunk(&d
->dstq
, &d
->srcq
, start_b
, start_a
- start_b
,
946 start_b
+ count_b
, d
->parent
);
947 d
->offset
= start_a
+ count_a
- (start_b
+ count_b
);
952 * We are looking at the origin 'target' and aiming to pass blame
953 * for the lines it is suspected to its parent. Run diff to find
954 * which lines came from parent and pass blame for them.
956 static void pass_blame_to_parent(struct blame_scoreboard
*sb
,
957 struct blame_origin
*target
,
958 struct blame_origin
*parent
)
960 mmfile_t file_p
, file_o
;
961 struct blame_chunk_cb_data d
;
962 struct blame_entry
*newdest
= NULL
;
964 if (!target
->suspects
)
965 return; /* nothing remains for this target */
969 d
.dstq
= &newdest
; d
.srcq
= &target
->suspects
;
971 fill_origin_blob(&sb
->revs
->diffopt
, parent
, &file_p
, &sb
->num_read_blob
);
972 fill_origin_blob(&sb
->revs
->diffopt
, target
, &file_o
, &sb
->num_read_blob
);
975 if (diff_hunks(&file_p
, &file_o
, blame_chunk_cb
, &d
, sb
->xdl_opts
))
976 die("unable to generate diff (%s -> %s)",
977 oid_to_hex(&parent
->commit
->object
.oid
),
978 oid_to_hex(&target
->commit
->object
.oid
));
979 /* The rest are the same as the parent */
980 blame_chunk(&d
.dstq
, &d
.srcq
, INT_MAX
, d
.offset
, INT_MAX
, parent
);
982 queue_blames(sb
, parent
, newdest
);
988 * The lines in blame_entry after splitting blames many times can become
989 * very small and trivial, and at some point it becomes pointless to
990 * blame the parents. E.g. "\t\t}\n\t}\n\n" appears everywhere in any
991 * ordinary C program, and it is not worth to say it was copied from
992 * totally unrelated file in the parent.
994 * Compute how trivial the lines in the blame_entry are.
996 unsigned blame_entry_score(struct blame_scoreboard
*sb
, struct blame_entry
*e
)
1005 cp
= blame_nth_line(sb
, e
->lno
);
1006 ep
= blame_nth_line(sb
, e
->lno
+ e
->num_lines
);
1008 unsigned ch
= *((unsigned char *)cp
);
1018 * best_so_far[] and potential[] are both a split of an existing blame_entry
1019 * that passes blame to the parent. Maintain best_so_far the best split so
1020 * far, by comparing potential and best_so_far and copying potential into
1021 * bst_so_far as needed.
1023 static void copy_split_if_better(struct blame_scoreboard
*sb
,
1024 struct blame_entry
*best_so_far
,
1025 struct blame_entry
*potential
)
1029 if (!potential
[1].suspect
)
1031 if (best_so_far
[1].suspect
) {
1032 if (blame_entry_score(sb
, &potential
[1]) <
1033 blame_entry_score(sb
, &best_so_far
[1]))
1037 for (i
= 0; i
< 3; i
++)
1038 blame_origin_incref(potential
[i
].suspect
);
1039 decref_split(best_so_far
);
1040 memcpy(best_so_far
, potential
, sizeof(struct blame_entry
[3]));
1044 * We are looking at a part of the final image represented by
1045 * ent (tlno and same are offset by ent->s_lno).
1046 * tlno is where we are looking at in the final image.
1047 * up to (but not including) same match preimage.
1048 * plno is where we are looking at in the preimage.
1050 * <-------------- final image ---------------------->
1053 * <---------preimage----->
1056 * All line numbers are 0-based.
1058 static void handle_split(struct blame_scoreboard
*sb
,
1059 struct blame_entry
*ent
,
1060 int tlno
, int plno
, int same
,
1061 struct blame_origin
*parent
,
1062 struct blame_entry
*split
)
1064 if (ent
->num_lines
<= tlno
)
1067 struct blame_entry potential
[3];
1070 split_overlap(potential
, ent
, tlno
, plno
, same
, parent
);
1071 copy_split_if_better(sb
, split
, potential
);
1072 decref_split(potential
);
1076 struct handle_split_cb_data
{
1077 struct blame_scoreboard
*sb
;
1078 struct blame_entry
*ent
;
1079 struct blame_origin
*parent
;
1080 struct blame_entry
*split
;
1085 static int handle_split_cb(long start_a
, long count_a
,
1086 long start_b
, long count_b
, void *data
)
1088 struct handle_split_cb_data
*d
= data
;
1089 handle_split(d
->sb
, d
->ent
, d
->tlno
, d
->plno
, start_b
, d
->parent
,
1091 d
->plno
= start_a
+ count_a
;
1092 d
->tlno
= start_b
+ count_b
;
1097 * Find the lines from parent that are the same as ent so that
1098 * we can pass blames to it. file_p has the blob contents for
1101 static void find_copy_in_blob(struct blame_scoreboard
*sb
,
1102 struct blame_entry
*ent
,
1103 struct blame_origin
*parent
,
1104 struct blame_entry
*split
,
1109 struct handle_split_cb_data d
;
1111 memset(&d
, 0, sizeof(d
));
1112 d
.sb
= sb
; d
.ent
= ent
; d
.parent
= parent
; d
.split
= split
;
1114 * Prepare mmfile that contains only the lines in ent.
1116 cp
= blame_nth_line(sb
, ent
->lno
);
1117 file_o
.ptr
= (char *) cp
;
1118 file_o
.size
= blame_nth_line(sb
, ent
->lno
+ ent
->num_lines
) - cp
;
1121 * file_o is a part of final image we are annotating.
1122 * file_p partially may match that image.
1124 memset(split
, 0, sizeof(struct blame_entry
[3]));
1125 if (diff_hunks(file_p
, &file_o
, handle_split_cb
, &d
, sb
->xdl_opts
))
1126 die("unable to generate diff (%s)",
1127 oid_to_hex(&parent
->commit
->object
.oid
));
1128 /* remainder, if any, all match the preimage */
1129 handle_split(sb
, ent
, d
.tlno
, d
.plno
, ent
->num_lines
, parent
, split
);
1132 /* Move all blame entries from list *source that have a score smaller
1133 * than score_min to the front of list *small.
1134 * Returns a pointer to the link pointing to the old head of the small list.
1137 static struct blame_entry
**filter_small(struct blame_scoreboard
*sb
,
1138 struct blame_entry
**small
,
1139 struct blame_entry
**source
,
1142 struct blame_entry
*p
= *source
;
1143 struct blame_entry
*oldsmall
= *small
;
1145 if (blame_entry_score(sb
, p
) <= score_min
) {
1161 * See if lines currently target is suspected for can be attributed to
1164 static void find_move_in_parent(struct blame_scoreboard
*sb
,
1165 struct blame_entry
***blamed
,
1166 struct blame_entry
**toosmall
,
1167 struct blame_origin
*target
,
1168 struct blame_origin
*parent
)
1170 struct blame_entry
*e
, split
[3];
1171 struct blame_entry
*unblamed
= target
->suspects
;
1172 struct blame_entry
*leftover
= NULL
;
1176 return; /* nothing remains for this target */
1178 fill_origin_blob(&sb
->revs
->diffopt
, parent
, &file_p
, &sb
->num_read_blob
);
1182 /* At each iteration, unblamed has a NULL-terminated list of
1183 * entries that have not yet been tested for blame. leftover
1184 * contains the reversed list of entries that have been tested
1185 * without being assignable to the parent.
1188 struct blame_entry
**unblamedtail
= &unblamed
;
1189 struct blame_entry
*next
;
1190 for (e
= unblamed
; e
; e
= next
) {
1192 find_copy_in_blob(sb
, e
, parent
, split
, &file_p
);
1193 if (split
[1].suspect
&&
1194 sb
->move_score
< blame_entry_score(sb
, &split
[1])) {
1195 split_blame(blamed
, &unblamedtail
, split
, e
);
1200 decref_split(split
);
1202 *unblamedtail
= NULL
;
1203 toosmall
= filter_small(sb
, toosmall
, &unblamed
, sb
->move_score
);
1205 target
->suspects
= reverse_blame(leftover
, NULL
);
1209 struct blame_entry
*ent
;
1210 struct blame_entry split
[3];
1214 * Count the number of entries the target is suspected for,
1215 * and prepare a list of entry and the best split.
1217 static struct blame_list
*setup_blame_list(struct blame_entry
*unblamed
,
1220 struct blame_entry
*e
;
1222 struct blame_list
*blame_list
= NULL
;
1224 for (e
= unblamed
, num_ents
= 0; e
; e
= e
->next
)
1227 blame_list
= xcalloc(num_ents
, sizeof(struct blame_list
));
1228 for (e
= unblamed
, i
= 0; e
; e
= e
->next
)
1229 blame_list
[i
++].ent
= e
;
1231 *num_ents_p
= num_ents
;
1236 * For lines target is suspected for, see if we can find code movement
1237 * across file boundary from the parent commit. porigin is the path
1238 * in the parent we already tried.
1240 static void find_copy_in_parent(struct blame_scoreboard
*sb
,
1241 struct blame_entry
***blamed
,
1242 struct blame_entry
**toosmall
,
1243 struct blame_origin
*target
,
1244 struct commit
*parent
,
1245 struct blame_origin
*porigin
,
1248 struct diff_options diff_opts
;
1250 struct blame_list
*blame_list
;
1252 struct blame_entry
*unblamed
= target
->suspects
;
1253 struct blame_entry
*leftover
= NULL
;
1256 return; /* nothing remains for this target */
1258 diff_setup(&diff_opts
);
1259 diff_opts
.flags
.recursive
= 1;
1260 diff_opts
.output_format
= DIFF_FORMAT_NO_OUTPUT
;
1262 diff_setup_done(&diff_opts
);
1264 /* Try "find copies harder" on new path if requested;
1265 * we do not want to use diffcore_rename() actually to
1266 * match things up; find_copies_harder is set only to
1267 * force diff_tree_oid() to feed all filepairs to diff_queue,
1268 * and this code needs to be after diff_setup_done(), which
1269 * usually makes find-copies-harder imply copy detection.
1271 if ((opt
& PICKAXE_BLAME_COPY_HARDEST
)
1272 || ((opt
& PICKAXE_BLAME_COPY_HARDER
)
1273 && (!porigin
|| strcmp(target
->path
, porigin
->path
))))
1274 diff_opts
.flags
.find_copies_harder
= 1;
1276 if (is_null_oid(&target
->commit
->object
.oid
))
1277 do_diff_cache(get_commit_tree_oid(parent
), &diff_opts
);
1279 diff_tree_oid(get_commit_tree_oid(parent
),
1280 get_commit_tree_oid(target
->commit
),
1283 if (!diff_opts
.flags
.find_copies_harder
)
1284 diffcore_std(&diff_opts
);
1287 struct blame_entry
**unblamedtail
= &unblamed
;
1288 blame_list
= setup_blame_list(unblamed
, &num_ents
);
1290 for (i
= 0; i
< diff_queued_diff
.nr
; i
++) {
1291 struct diff_filepair
*p
= diff_queued_diff
.queue
[i
];
1292 struct blame_origin
*norigin
;
1294 struct blame_entry potential
[3];
1296 if (!DIFF_FILE_VALID(p
->one
))
1297 continue; /* does not exist in parent */
1298 if (S_ISGITLINK(p
->one
->mode
))
1299 continue; /* ignore git links */
1300 if (porigin
&& !strcmp(p
->one
->path
, porigin
->path
))
1301 /* find_move already dealt with this path */
1304 norigin
= get_origin(parent
, p
->one
->path
);
1305 oidcpy(&norigin
->blob_oid
, &p
->one
->oid
);
1306 norigin
->mode
= p
->one
->mode
;
1307 fill_origin_blob(&sb
->revs
->diffopt
, norigin
, &file_p
, &sb
->num_read_blob
);
1311 for (j
= 0; j
< num_ents
; j
++) {
1312 find_copy_in_blob(sb
, blame_list
[j
].ent
,
1313 norigin
, potential
, &file_p
);
1314 copy_split_if_better(sb
, blame_list
[j
].split
,
1316 decref_split(potential
);
1318 blame_origin_decref(norigin
);
1321 for (j
= 0; j
< num_ents
; j
++) {
1322 struct blame_entry
*split
= blame_list
[j
].split
;
1323 if (split
[1].suspect
&&
1324 sb
->copy_score
< blame_entry_score(sb
, &split
[1])) {
1325 split_blame(blamed
, &unblamedtail
, split
,
1328 blame_list
[j
].ent
->next
= leftover
;
1329 leftover
= blame_list
[j
].ent
;
1331 decref_split(split
);
1334 *unblamedtail
= NULL
;
1335 toosmall
= filter_small(sb
, toosmall
, &unblamed
, sb
->copy_score
);
1337 target
->suspects
= reverse_blame(leftover
, NULL
);
1338 diff_flush(&diff_opts
);
1339 clear_pathspec(&diff_opts
.pathspec
);
1343 * The blobs of origin and porigin exactly match, so everything
1344 * origin is suspected for can be blamed on the parent.
1346 static void pass_whole_blame(struct blame_scoreboard
*sb
,
1347 struct blame_origin
*origin
, struct blame_origin
*porigin
)
1349 struct blame_entry
*e
, *suspects
;
1351 if (!porigin
->file
.ptr
&& origin
->file
.ptr
) {
1352 /* Steal its file */
1353 porigin
->file
= origin
->file
;
1354 origin
->file
.ptr
= NULL
;
1356 suspects
= origin
->suspects
;
1357 origin
->suspects
= NULL
;
1358 for (e
= suspects
; e
; e
= e
->next
) {
1359 blame_origin_incref(porigin
);
1360 blame_origin_decref(e
->suspect
);
1361 e
->suspect
= porigin
;
1363 queue_blames(sb
, porigin
, suspects
);
1367 * We pass blame from the current commit to its parents. We keep saying
1368 * "parent" (and "porigin"), but what we mean is to find scapegoat to
1369 * exonerate ourselves.
1371 static struct commit_list
*first_scapegoat(struct rev_info
*revs
, struct commit
*commit
,
1375 if (revs
->first_parent_only
&&
1377 commit
->parents
->next
) {
1378 free_commit_list(commit
->parents
->next
);
1379 commit
->parents
->next
= NULL
;
1381 return commit
->parents
;
1383 return lookup_decoration(&revs
->children
, &commit
->object
);
1386 static int num_scapegoats(struct rev_info
*revs
, struct commit
*commit
, int reverse
)
1388 struct commit_list
*l
= first_scapegoat(revs
, commit
, reverse
);
1389 return commit_list_count(l
);
1392 /* Distribute collected unsorted blames to the respected sorted lists
1393 * in the various origins.
1395 static void distribute_blame(struct blame_scoreboard
*sb
, struct blame_entry
*blamed
)
1397 blamed
= llist_mergesort(blamed
, get_next_blame
, set_next_blame
,
1398 compare_blame_suspect
);
1401 struct blame_origin
*porigin
= blamed
->suspect
;
1402 struct blame_entry
*suspects
= NULL
;
1404 struct blame_entry
*next
= blamed
->next
;
1405 blamed
->next
= suspects
;
1408 } while (blamed
&& blamed
->suspect
== porigin
);
1409 suspects
= reverse_blame(suspects
, NULL
);
1410 queue_blames(sb
, porigin
, suspects
);
1416 static void pass_blame(struct blame_scoreboard
*sb
, struct blame_origin
*origin
, int opt
)
1418 struct rev_info
*revs
= sb
->revs
;
1419 int i
, pass
, num_sg
;
1420 struct commit
*commit
= origin
->commit
;
1421 struct commit_list
*sg
;
1422 struct blame_origin
*sg_buf
[MAXSG
];
1423 struct blame_origin
*porigin
, **sg_origin
= sg_buf
;
1424 struct blame_entry
*toosmall
= NULL
;
1425 struct blame_entry
*blames
, **blametail
= &blames
;
1427 num_sg
= num_scapegoats(revs
, commit
, sb
->reverse
);
1430 else if (num_sg
< ARRAY_SIZE(sg_buf
))
1431 memset(sg_buf
, 0, sizeof(sg_buf
));
1433 sg_origin
= xcalloc(num_sg
, sizeof(*sg_origin
));
1436 * The first pass looks for unrenamed path to optimize for
1437 * common cases, then we look for renames in the second pass.
1439 for (pass
= 0; pass
< 2 - sb
->no_whole_file_rename
; pass
++) {
1440 struct blame_origin
*(*find
)(struct commit
*, struct blame_origin
*);
1441 find
= pass
? find_rename
: find_origin
;
1443 for (i
= 0, sg
= first_scapegoat(revs
, commit
, sb
->reverse
);
1445 sg
= sg
->next
, i
++) {
1446 struct commit
*p
= sg
->item
;
1451 if (parse_commit(p
))
1453 porigin
= find(p
, origin
);
1456 if (!oidcmp(&porigin
->blob_oid
, &origin
->blob_oid
)) {
1457 pass_whole_blame(sb
, origin
, porigin
);
1458 blame_origin_decref(porigin
);
1461 for (j
= same
= 0; j
< i
; j
++)
1463 !oidcmp(&sg_origin
[j
]->blob_oid
, &porigin
->blob_oid
)) {
1468 sg_origin
[i
] = porigin
;
1470 blame_origin_decref(porigin
);
1475 for (i
= 0, sg
= first_scapegoat(revs
, commit
, sb
->reverse
);
1477 sg
= sg
->next
, i
++) {
1478 struct blame_origin
*porigin
= sg_origin
[i
];
1481 if (!origin
->previous
) {
1482 blame_origin_incref(porigin
);
1483 origin
->previous
= porigin
;
1485 pass_blame_to_parent(sb
, origin
, porigin
);
1486 if (!origin
->suspects
)
1491 * Optionally find moves in parents' files.
1493 if (opt
& PICKAXE_BLAME_MOVE
) {
1494 filter_small(sb
, &toosmall
, &origin
->suspects
, sb
->move_score
);
1495 if (origin
->suspects
) {
1496 for (i
= 0, sg
= first_scapegoat(revs
, commit
, sb
->reverse
);
1498 sg
= sg
->next
, i
++) {
1499 struct blame_origin
*porigin
= sg_origin
[i
];
1502 find_move_in_parent(sb
, &blametail
, &toosmall
, origin
, porigin
);
1503 if (!origin
->suspects
)
1510 * Optionally find copies from parents' files.
1512 if (opt
& PICKAXE_BLAME_COPY
) {
1513 if (sb
->copy_score
> sb
->move_score
)
1514 filter_small(sb
, &toosmall
, &origin
->suspects
, sb
->copy_score
);
1515 else if (sb
->copy_score
< sb
->move_score
) {
1516 origin
->suspects
= blame_merge(origin
->suspects
, toosmall
);
1518 filter_small(sb
, &toosmall
, &origin
->suspects
, sb
->copy_score
);
1520 if (!origin
->suspects
)
1523 for (i
= 0, sg
= first_scapegoat(revs
, commit
, sb
->reverse
);
1525 sg
= sg
->next
, i
++) {
1526 struct blame_origin
*porigin
= sg_origin
[i
];
1527 find_copy_in_parent(sb
, &blametail
, &toosmall
,
1528 origin
, sg
->item
, porigin
, opt
);
1529 if (!origin
->suspects
)
1536 distribute_blame(sb
, blames
);
1538 * prepend toosmall to origin->suspects
1540 * There is no point in sorting: this ends up on a big
1541 * unsorted list in the caller anyway.
1544 struct blame_entry
**tail
= &toosmall
;
1546 tail
= &(*tail
)->next
;
1547 *tail
= origin
->suspects
;
1548 origin
->suspects
= toosmall
;
1550 for (i
= 0; i
< num_sg
; i
++) {
1552 drop_origin_blob(sg_origin
[i
]);
1553 blame_origin_decref(sg_origin
[i
]);
1556 drop_origin_blob(origin
);
1557 if (sg_buf
!= sg_origin
)
1562 * The main loop -- while we have blobs with lines whose true origin
1563 * is still unknown, pick one blob, and allow its lines to pass blames
1564 * to its parents. */
1565 void assign_blame(struct blame_scoreboard
*sb
, int opt
)
1567 struct rev_info
*revs
= sb
->revs
;
1568 struct commit
*commit
= prio_queue_get(&sb
->commits
);
1571 struct blame_entry
*ent
;
1572 struct blame_origin
*suspect
= get_blame_suspects(commit
);
1574 /* find one suspect to break down */
1575 while (suspect
&& !suspect
->suspects
)
1576 suspect
= suspect
->next
;
1579 commit
= prio_queue_get(&sb
->commits
);
1583 assert(commit
== suspect
->commit
);
1586 * We will use this suspect later in the loop,
1587 * so hold onto it in the meantime.
1589 blame_origin_incref(suspect
);
1590 parse_commit(commit
);
1592 (!(commit
->object
.flags
& UNINTERESTING
) &&
1593 !(revs
->max_age
!= -1 && commit
->date
< revs
->max_age
)))
1594 pass_blame(sb
, suspect
, opt
);
1596 commit
->object
.flags
|= UNINTERESTING
;
1597 if (commit
->object
.parsed
)
1598 mark_parents_uninteresting(commit
);
1600 /* treat root commit as boundary */
1601 if (!commit
->parents
&& !sb
->show_root
)
1602 commit
->object
.flags
|= UNINTERESTING
;
1604 /* Take responsibility for the remaining entries */
1605 ent
= suspect
->suspects
;
1607 suspect
->guilty
= 1;
1609 struct blame_entry
*next
= ent
->next
;
1610 if (sb
->found_guilty_entry
)
1611 sb
->found_guilty_entry(ent
, sb
->found_guilty_entry_data
);
1616 ent
->next
= sb
->ent
;
1617 sb
->ent
= suspect
->suspects
;
1618 suspect
->suspects
= NULL
;
1622 blame_origin_decref(suspect
);
1624 if (sb
->debug
) /* sanity */
1625 sanity_check_refcnt(sb
);
1629 static const char *get_next_line(const char *start
, const char *end
)
1631 const char *nl
= memchr(start
, '\n', end
- start
);
1632 return nl
? nl
+ 1 : end
;
1636 * To allow quick access to the contents of nth line in the
1637 * final image, prepare an index in the scoreboard.
1639 static int prepare_lines(struct blame_scoreboard
*sb
)
1641 const char *buf
= sb
->final_buf
;
1642 unsigned long len
= sb
->final_buf_size
;
1643 const char *end
= buf
+ len
;
1648 for (p
= buf
; p
< end
; p
= get_next_line(p
, end
))
1651 ALLOC_ARRAY(sb
->lineno
, num
+ 1);
1652 lineno
= sb
->lineno
;
1654 for (p
= buf
; p
< end
; p
= get_next_line(p
, end
))
1655 *lineno
++ = p
- buf
;
1659 sb
->num_lines
= num
;
1660 return sb
->num_lines
;
1663 static struct commit
*find_single_final(struct rev_info
*revs
,
1664 const char **name_p
)
1667 struct commit
*found
= NULL
;
1668 const char *name
= NULL
;
1670 for (i
= 0; i
< revs
->pending
.nr
; i
++) {
1671 struct object
*obj
= revs
->pending
.objects
[i
].item
;
1672 if (obj
->flags
& UNINTERESTING
)
1674 obj
= deref_tag(obj
, NULL
, 0);
1675 if (obj
->type
!= OBJ_COMMIT
)
1676 die("Non commit %s?", revs
->pending
.objects
[i
].name
);
1678 die("More than one commit to dig from %s and %s?",
1679 revs
->pending
.objects
[i
].name
, name
);
1680 found
= (struct commit
*)obj
;
1681 name
= revs
->pending
.objects
[i
].name
;
1684 *name_p
= xstrdup_or_null(name
);
1688 static struct commit
*dwim_reverse_initial(struct rev_info
*revs
,
1689 const char **name_p
)
1692 * DWIM "git blame --reverse ONE -- PATH" as
1693 * "git blame --reverse ONE..HEAD -- PATH" but only do so
1694 * when it makes sense.
1697 struct commit
*head_commit
;
1698 struct object_id head_oid
;
1700 if (revs
->pending
.nr
!= 1)
1703 /* Is that sole rev a committish? */
1704 obj
= revs
->pending
.objects
[0].item
;
1705 obj
= deref_tag(obj
, NULL
, 0);
1706 if (obj
->type
!= OBJ_COMMIT
)
1709 /* Do we have HEAD? */
1710 if (!resolve_ref_unsafe("HEAD", RESOLVE_REF_READING
, &head_oid
, NULL
))
1712 head_commit
= lookup_commit_reference_gently(&head_oid
, 1);
1716 /* Turn "ONE" into "ONE..HEAD" then */
1717 obj
->flags
|= UNINTERESTING
;
1718 add_pending_object(revs
, &head_commit
->object
, "HEAD");
1721 *name_p
= revs
->pending
.objects
[0].name
;
1722 return (struct commit
*)obj
;
1725 static struct commit
*find_single_initial(struct rev_info
*revs
,
1726 const char **name_p
)
1729 struct commit
*found
= NULL
;
1730 const char *name
= NULL
;
1733 * There must be one and only one negative commit, and it must be
1736 for (i
= 0; i
< revs
->pending
.nr
; i
++) {
1737 struct object
*obj
= revs
->pending
.objects
[i
].item
;
1738 if (!(obj
->flags
& UNINTERESTING
))
1740 obj
= deref_tag(obj
, NULL
, 0);
1741 if (obj
->type
!= OBJ_COMMIT
)
1742 die("Non commit %s?", revs
->pending
.objects
[i
].name
);
1744 die("More than one commit to dig up from, %s and %s?",
1745 revs
->pending
.objects
[i
].name
, name
);
1746 found
= (struct commit
*) obj
;
1747 name
= revs
->pending
.objects
[i
].name
;
1751 found
= dwim_reverse_initial(revs
, &name
);
1753 die("No commit to dig up from?");
1756 *name_p
= xstrdup(name
);
1760 void init_scoreboard(struct blame_scoreboard
*sb
)
1762 memset(sb
, 0, sizeof(struct blame_scoreboard
));
1763 sb
->move_score
= BLAME_DEFAULT_MOVE_SCORE
;
1764 sb
->copy_score
= BLAME_DEFAULT_COPY_SCORE
;
1767 void setup_scoreboard(struct blame_scoreboard
*sb
, const char *path
, struct blame_origin
**orig
)
1769 const char *final_commit_name
= NULL
;
1770 struct blame_origin
*o
;
1771 struct commit
*final_commit
= NULL
;
1772 enum object_type type
;
1774 init_blame_suspects(&blame_suspects
);
1776 if (sb
->reverse
&& sb
->contents_from
)
1777 die(_("--contents and --reverse do not blend well."));
1780 sb
->final
= find_single_final(sb
->revs
, &final_commit_name
);
1781 sb
->commits
.compare
= compare_commits_by_commit_date
;
1783 sb
->final
= find_single_initial(sb
->revs
, &final_commit_name
);
1784 sb
->commits
.compare
= compare_commits_by_reverse_commit_date
;
1787 if (sb
->final
&& sb
->contents_from
)
1788 die(_("cannot use --contents with final commit object name"));
1790 if (sb
->reverse
&& sb
->revs
->first_parent_only
)
1791 sb
->revs
->children
.name
= NULL
;
1795 * "--not A B -- path" without anything positive;
1796 * do not default to HEAD, but use the working tree
1800 sb
->final
= fake_working_tree_commit(&sb
->revs
->diffopt
,
1801 path
, sb
->contents_from
);
1802 add_pending_object(sb
->revs
, &(sb
->final
->object
), ":");
1805 if (sb
->reverse
&& sb
->revs
->first_parent_only
) {
1806 final_commit
= find_single_final(sb
->revs
, NULL
);
1808 die(_("--reverse and --first-parent together require specified latest commit"));
1812 * If we have bottom, this will mark the ancestors of the
1813 * bottom commits we would reach while traversing as
1816 if (prepare_revision_walk(sb
->revs
))
1817 die(_("revision walk setup failed"));
1819 if (sb
->reverse
&& sb
->revs
->first_parent_only
) {
1820 struct commit
*c
= final_commit
;
1822 sb
->revs
->children
.name
= "children";
1823 while (c
->parents
&&
1824 oidcmp(&c
->object
.oid
, &sb
->final
->object
.oid
)) {
1825 struct commit_list
*l
= xcalloc(1, sizeof(*l
));
1828 if (add_decoration(&sb
->revs
->children
,
1829 &c
->parents
->item
->object
, l
))
1830 BUG("not unique item in first-parent chain");
1831 c
= c
->parents
->item
;
1834 if (oidcmp(&c
->object
.oid
, &sb
->final
->object
.oid
))
1835 die(_("--reverse --first-parent together require range along first-parent chain"));
1838 if (is_null_oid(&sb
->final
->object
.oid
)) {
1839 o
= get_blame_suspects(sb
->final
);
1840 sb
->final_buf
= xmemdupz(o
->file
.ptr
, o
->file
.size
);
1841 sb
->final_buf_size
= o
->file
.size
;
1844 o
= get_origin(sb
->final
, path
);
1845 if (fill_blob_sha1_and_mode(o
))
1846 die(_("no such path %s in %s"), path
, final_commit_name
);
1848 if (sb
->revs
->diffopt
.flags
.allow_textconv
&&
1849 textconv_object(path
, o
->mode
, &o
->blob_oid
, 1, (char **) &sb
->final_buf
,
1850 &sb
->final_buf_size
))
1853 sb
->final_buf
= read_object_file(&o
->blob_oid
, &type
,
1854 &sb
->final_buf_size
);
1857 die(_("cannot read blob %s for path %s"),
1858 oid_to_hex(&o
->blob_oid
),
1861 sb
->num_read_blob
++;
1867 free((char *)final_commit_name
);
1872 struct blame_entry
*blame_entry_prepend(struct blame_entry
*head
,
1873 long start
, long end
,
1874 struct blame_origin
*o
)
1876 struct blame_entry
*new_head
= xcalloc(1, sizeof(struct blame_entry
));
1877 new_head
->lno
= start
;
1878 new_head
->num_lines
= end
- start
;
1879 new_head
->suspect
= o
;
1880 new_head
->s_lno
= start
;
1881 new_head
->next
= head
;
1882 blame_origin_incref(o
);