2 * GIT - The information manager from hell
4 * Copyright (C) Linus Torvalds, 2005
6 #define NO_THE_INDEX_COMPATIBILITY_MACROS
13 #include "cache-tree.h"
16 #include "object-store.h"
20 #include "resolve-undo.h"
23 #include "split-index.h"
25 #include "fsmonitor.h"
26 #include "thread-utils.h"
29 /* Mask for the name length in ce_flags in the on-disk index */
31 #define CE_NAMEMASK (0x0fff)
35 * The first letter should be 'A'..'Z' for extensions that are not
36 * necessary for a correct operation (i.e. optimization data).
37 * When new extensions are added that _needs_ to be understood in
38 * order to correctly interpret the index file, pick character that
39 * is outside the range, to cause the reader to abort.
42 #define CACHE_EXT(s) ( (s[0]<<24)|(s[1]<<16)|(s[2]<<8)|(s[3]) )
43 #define CACHE_EXT_TREE 0x54524545 /* "TREE" */
44 #define CACHE_EXT_RESOLVE_UNDO 0x52455543 /* "REUC" */
45 #define CACHE_EXT_LINK 0x6c696e6b /* "link" */
46 #define CACHE_EXT_UNTRACKED 0x554E5452 /* "UNTR" */
47 #define CACHE_EXT_FSMONITOR 0x46534D4E /* "FSMN" */
48 #define CACHE_EXT_ENDOFINDEXENTRIES 0x454F4945 /* "EOIE" */
49 #define CACHE_EXT_INDEXENTRYOFFSETTABLE 0x49454F54 /* "IEOT" */
51 /* changes that can be kept in $GIT_DIR/index (basically all extensions) */
52 #define EXTMASK (RESOLVE_UNDO_CHANGED | CACHE_TREE_CHANGED | \
53 CE_ENTRY_ADDED | CE_ENTRY_REMOVED | CE_ENTRY_CHANGED | \
54 SPLIT_INDEX_ORDERED | UNTRACKED_CHANGED | FSMONITOR_CHANGED)
58 * This is an estimate of the pathname length in the index. We use
59 * this for V4 index files to guess the un-deltafied size of the index
60 * in memory because of pathname deltafication. This is not required
61 * for V2/V3 index formats because their pathnames are not compressed.
62 * If the initial amount of memory set aside is not sufficient, the
63 * mem pool will allocate extra memory.
65 #define CACHE_ENTRY_PATH_LENGTH 80
67 static inline struct cache_entry
*mem_pool__ce_alloc(struct mem_pool
*mem_pool
, size_t len
)
69 struct cache_entry
*ce
;
70 ce
= mem_pool_alloc(mem_pool
, cache_entry_size(len
));
71 ce
->mem_pool_allocated
= 1;
75 static inline struct cache_entry
*mem_pool__ce_calloc(struct mem_pool
*mem_pool
, size_t len
)
77 struct cache_entry
* ce
;
78 ce
= mem_pool_calloc(mem_pool
, 1, cache_entry_size(len
));
79 ce
->mem_pool_allocated
= 1;
83 static struct mem_pool
*find_mem_pool(struct index_state
*istate
)
85 struct mem_pool
**pool_ptr
;
87 if (istate
->split_index
&& istate
->split_index
->base
)
88 pool_ptr
= &istate
->split_index
->base
->ce_mem_pool
;
90 pool_ptr
= &istate
->ce_mem_pool
;
93 mem_pool_init(pool_ptr
, 0);
98 struct index_state the_index
;
99 static const char *alternate_index_output
;
101 static void set_index_entry(struct index_state
*istate
, int nr
, struct cache_entry
*ce
)
103 istate
->cache
[nr
] = ce
;
104 add_name_hash(istate
, ce
);
107 static void replace_index_entry(struct index_state
*istate
, int nr
, struct cache_entry
*ce
)
109 struct cache_entry
*old
= istate
->cache
[nr
];
111 replace_index_entry_in_base(istate
, old
, ce
);
112 remove_name_hash(istate
, old
);
113 discard_cache_entry(old
);
114 ce
->ce_flags
&= ~CE_HASHED
;
115 set_index_entry(istate
, nr
, ce
);
116 ce
->ce_flags
|= CE_UPDATE_IN_BASE
;
117 mark_fsmonitor_invalid(istate
, ce
);
118 istate
->cache_changed
|= CE_ENTRY_CHANGED
;
121 void rename_index_entry_at(struct index_state
*istate
, int nr
, const char *new_name
)
123 struct cache_entry
*old_entry
= istate
->cache
[nr
], *new_entry
;
124 int namelen
= strlen(new_name
);
126 new_entry
= make_empty_cache_entry(istate
, namelen
);
127 copy_cache_entry(new_entry
, old_entry
);
128 new_entry
->ce_flags
&= ~CE_HASHED
;
129 new_entry
->ce_namelen
= namelen
;
130 new_entry
->index
= 0;
131 memcpy(new_entry
->name
, new_name
, namelen
+ 1);
133 cache_tree_invalidate_path(istate
, old_entry
->name
);
134 untracked_cache_remove_from_index(istate
, old_entry
->name
);
135 remove_index_entry_at(istate
, nr
);
136 add_index_entry(istate
, new_entry
, ADD_CACHE_OK_TO_ADD
|ADD_CACHE_OK_TO_REPLACE
);
139 void fill_stat_data(struct stat_data
*sd
, struct stat
*st
)
141 sd
->sd_ctime
.sec
= (unsigned int)st
->st_ctime
;
142 sd
->sd_mtime
.sec
= (unsigned int)st
->st_mtime
;
143 sd
->sd_ctime
.nsec
= ST_CTIME_NSEC(*st
);
144 sd
->sd_mtime
.nsec
= ST_MTIME_NSEC(*st
);
145 sd
->sd_dev
= st
->st_dev
;
146 sd
->sd_ino
= st
->st_ino
;
147 sd
->sd_uid
= st
->st_uid
;
148 sd
->sd_gid
= st
->st_gid
;
149 sd
->sd_size
= st
->st_size
;
152 int match_stat_data(const struct stat_data
*sd
, struct stat
*st
)
156 if (sd
->sd_mtime
.sec
!= (unsigned int)st
->st_mtime
)
157 changed
|= MTIME_CHANGED
;
158 if (trust_ctime
&& check_stat
&&
159 sd
->sd_ctime
.sec
!= (unsigned int)st
->st_ctime
)
160 changed
|= CTIME_CHANGED
;
163 if (check_stat
&& sd
->sd_mtime
.nsec
!= ST_MTIME_NSEC(*st
))
164 changed
|= MTIME_CHANGED
;
165 if (trust_ctime
&& check_stat
&&
166 sd
->sd_ctime
.nsec
!= ST_CTIME_NSEC(*st
))
167 changed
|= CTIME_CHANGED
;
171 if (sd
->sd_uid
!= (unsigned int) st
->st_uid
||
172 sd
->sd_gid
!= (unsigned int) st
->st_gid
)
173 changed
|= OWNER_CHANGED
;
174 if (sd
->sd_ino
!= (unsigned int) st
->st_ino
)
175 changed
|= INODE_CHANGED
;
180 * st_dev breaks on network filesystems where different
181 * clients will have different views of what "device"
182 * the filesystem is on
184 if (check_stat
&& sd
->sd_dev
!= (unsigned int) st
->st_dev
)
185 changed
|= INODE_CHANGED
;
188 if (sd
->sd_size
!= (unsigned int) st
->st_size
)
189 changed
|= DATA_CHANGED
;
195 * This only updates the "non-critical" parts of the directory
196 * cache, ie the parts that aren't tracked by GIT, and only used
197 * to validate the cache.
199 void fill_stat_cache_info(struct cache_entry
*ce
, struct stat
*st
)
201 fill_stat_data(&ce
->ce_stat_data
, st
);
203 if (assume_unchanged
)
204 ce
->ce_flags
|= CE_VALID
;
206 if (S_ISREG(st
->st_mode
)) {
207 ce_mark_uptodate(ce
);
208 mark_fsmonitor_valid(ce
);
212 static int ce_compare_data(struct index_state
*istate
,
213 const struct cache_entry
*ce
,
217 int fd
= git_open_cloexec(ce
->name
, O_RDONLY
);
220 struct object_id oid
;
221 if (!index_fd(istate
, &oid
, fd
, st
, OBJ_BLOB
, ce
->name
, 0))
222 match
= !oideq(&oid
, &ce
->oid
);
223 /* index_fd() closed the file descriptor already */
228 static int ce_compare_link(const struct cache_entry
*ce
, size_t expected_size
)
233 enum object_type type
;
234 struct strbuf sb
= STRBUF_INIT
;
236 if (strbuf_readlink(&sb
, ce
->name
, expected_size
))
239 buffer
= read_object_file(&ce
->oid
, &type
, &size
);
242 match
= memcmp(buffer
, sb
.buf
, size
);
249 static int ce_compare_gitlink(const struct cache_entry
*ce
)
251 struct object_id oid
;
254 * We don't actually require that the .git directory
255 * under GITLINK directory be a valid git directory. It
256 * might even be missing (in case nobody populated that
259 * If so, we consider it always to match.
261 if (resolve_gitlink_ref(ce
->name
, "HEAD", &oid
) < 0)
263 return !oideq(&oid
, &ce
->oid
);
266 static int ce_modified_check_fs(struct index_state
*istate
,
267 const struct cache_entry
*ce
,
270 switch (st
->st_mode
& S_IFMT
) {
272 if (ce_compare_data(istate
, ce
, st
))
276 if (ce_compare_link(ce
, xsize_t(st
->st_size
)))
280 if (S_ISGITLINK(ce
->ce_mode
))
281 return ce_compare_gitlink(ce
) ? DATA_CHANGED
: 0;
282 /* else fallthrough */
289 static int ce_match_stat_basic(const struct cache_entry
*ce
, struct stat
*st
)
291 unsigned int changed
= 0;
293 if (ce
->ce_flags
& CE_REMOVE
)
294 return MODE_CHANGED
| DATA_CHANGED
| TYPE_CHANGED
;
296 switch (ce
->ce_mode
& S_IFMT
) {
298 changed
|= !S_ISREG(st
->st_mode
) ? TYPE_CHANGED
: 0;
299 /* We consider only the owner x bit to be relevant for
302 if (trust_executable_bit
&&
303 (0100 & (ce
->ce_mode
^ st
->st_mode
)))
304 changed
|= MODE_CHANGED
;
307 if (!S_ISLNK(st
->st_mode
) &&
308 (has_symlinks
|| !S_ISREG(st
->st_mode
)))
309 changed
|= TYPE_CHANGED
;
312 /* We ignore most of the st_xxx fields for gitlinks */
313 if (!S_ISDIR(st
->st_mode
))
314 changed
|= TYPE_CHANGED
;
315 else if (ce_compare_gitlink(ce
))
316 changed
|= DATA_CHANGED
;
319 die("internal error: ce_mode is %o", ce
->ce_mode
);
322 changed
|= match_stat_data(&ce
->ce_stat_data
, st
);
324 /* Racily smudged entry? */
325 if (!ce
->ce_stat_data
.sd_size
) {
326 if (!is_empty_blob_sha1(ce
->oid
.hash
))
327 changed
|= DATA_CHANGED
;
333 static int is_racy_stat(const struct index_state
*istate
,
334 const struct stat_data
*sd
)
336 return (istate
->timestamp
.sec
&&
338 /* nanosecond timestamped files can also be racy! */
339 (istate
->timestamp
.sec
< sd
->sd_mtime
.sec
||
340 (istate
->timestamp
.sec
== sd
->sd_mtime
.sec
&&
341 istate
->timestamp
.nsec
<= sd
->sd_mtime
.nsec
))
343 istate
->timestamp
.sec
<= sd
->sd_mtime
.sec
348 int is_racy_timestamp(const struct index_state
*istate
,
349 const struct cache_entry
*ce
)
351 return (!S_ISGITLINK(ce
->ce_mode
) &&
352 is_racy_stat(istate
, &ce
->ce_stat_data
));
355 int match_stat_data_racy(const struct index_state
*istate
,
356 const struct stat_data
*sd
, struct stat
*st
)
358 if (is_racy_stat(istate
, sd
))
359 return MTIME_CHANGED
;
360 return match_stat_data(sd
, st
);
363 int ie_match_stat(struct index_state
*istate
,
364 const struct cache_entry
*ce
, struct stat
*st
,
365 unsigned int options
)
367 unsigned int changed
;
368 int ignore_valid
= options
& CE_MATCH_IGNORE_VALID
;
369 int ignore_skip_worktree
= options
& CE_MATCH_IGNORE_SKIP_WORKTREE
;
370 int assume_racy_is_modified
= options
& CE_MATCH_RACY_IS_DIRTY
;
371 int ignore_fsmonitor
= options
& CE_MATCH_IGNORE_FSMONITOR
;
373 if (!ignore_fsmonitor
)
374 refresh_fsmonitor(istate
);
376 * If it's marked as always valid in the index, it's
377 * valid whatever the checked-out copy says.
379 * skip-worktree has the same effect with higher precedence
381 if (!ignore_skip_worktree
&& ce_skip_worktree(ce
))
383 if (!ignore_valid
&& (ce
->ce_flags
& CE_VALID
))
385 if (!ignore_fsmonitor
&& (ce
->ce_flags
& CE_FSMONITOR_VALID
))
389 * Intent-to-add entries have not been added, so the index entry
390 * by definition never matches what is in the work tree until it
391 * actually gets added.
393 if (ce_intent_to_add(ce
))
394 return DATA_CHANGED
| TYPE_CHANGED
| MODE_CHANGED
;
396 changed
= ce_match_stat_basic(ce
, st
);
399 * Within 1 second of this sequence:
400 * echo xyzzy >file && git-update-index --add file
401 * running this command:
403 * would give a falsely clean cache entry. The mtime and
404 * length match the cache, and other stat fields do not change.
406 * We could detect this at update-index time (the cache entry
407 * being registered/updated records the same time as "now")
408 * and delay the return from git-update-index, but that would
409 * effectively mean we can make at most one commit per second,
410 * which is not acceptable. Instead, we check cache entries
411 * whose mtime are the same as the index file timestamp more
412 * carefully than others.
414 if (!changed
&& is_racy_timestamp(istate
, ce
)) {
415 if (assume_racy_is_modified
)
416 changed
|= DATA_CHANGED
;
418 changed
|= ce_modified_check_fs(istate
, ce
, st
);
424 int ie_modified(struct index_state
*istate
,
425 const struct cache_entry
*ce
,
426 struct stat
*st
, unsigned int options
)
428 int changed
, changed_fs
;
430 changed
= ie_match_stat(istate
, ce
, st
, options
);
434 * If the mode or type has changed, there's no point in trying
435 * to refresh the entry - it's not going to match
437 if (changed
& (MODE_CHANGED
| TYPE_CHANGED
))
441 * Immediately after read-tree or update-index --cacheinfo,
442 * the length field is zero, as we have never even read the
443 * lstat(2) information once, and we cannot trust DATA_CHANGED
444 * returned by ie_match_stat() which in turn was returned by
445 * ce_match_stat_basic() to signal that the filesize of the
446 * blob changed. We have to actually go to the filesystem to
447 * see if the contents match, and if so, should answer "unchanged".
449 * The logic does not apply to gitlinks, as ce_match_stat_basic()
450 * already has checked the actual HEAD from the filesystem in the
451 * subproject. If ie_match_stat() already said it is different,
452 * then we know it is.
454 if ((changed
& DATA_CHANGED
) &&
455 (S_ISGITLINK(ce
->ce_mode
) || ce
->ce_stat_data
.sd_size
!= 0))
458 changed_fs
= ce_modified_check_fs(istate
, ce
, st
);
460 return changed
| changed_fs
;
464 int base_name_compare(const char *name1
, int len1
, int mode1
,
465 const char *name2
, int len2
, int mode2
)
467 unsigned char c1
, c2
;
468 int len
= len1
< len2
? len1
: len2
;
471 cmp
= memcmp(name1
, name2
, len
);
476 if (!c1
&& S_ISDIR(mode1
))
478 if (!c2
&& S_ISDIR(mode2
))
480 return (c1
< c2
) ? -1 : (c1
> c2
) ? 1 : 0;
484 * df_name_compare() is identical to base_name_compare(), except it
485 * compares conflicting directory/file entries as equal. Note that
486 * while a directory name compares as equal to a regular file, they
487 * then individually compare _differently_ to a filename that has
488 * a dot after the basename (because '\0' < '.' < '/').
490 * This is used by routines that want to traverse the git namespace
491 * but then handle conflicting entries together when possible.
493 int df_name_compare(const char *name1
, int len1
, int mode1
,
494 const char *name2
, int len2
, int mode2
)
496 int len
= len1
< len2
? len1
: len2
, cmp
;
497 unsigned char c1
, c2
;
499 cmp
= memcmp(name1
, name2
, len
);
502 /* Directories and files compare equal (same length, same name) */
506 if (!c1
&& S_ISDIR(mode1
))
509 if (!c2
&& S_ISDIR(mode2
))
511 if (c1
== '/' && !c2
)
513 if (c2
== '/' && !c1
)
518 int name_compare(const char *name1
, size_t len1
, const char *name2
, size_t len2
)
520 size_t min_len
= (len1
< len2
) ? len1
: len2
;
521 int cmp
= memcmp(name1
, name2
, min_len
);
531 int cache_name_stage_compare(const char *name1
, int len1
, int stage1
, const char *name2
, int len2
, int stage2
)
535 cmp
= name_compare(name1
, len1
, name2
, len2
);
546 static int index_name_stage_pos(const struct index_state
*istate
, const char *name
, int namelen
, int stage
)
551 last
= istate
->cache_nr
;
552 while (last
> first
) {
553 int next
= (last
+ first
) >> 1;
554 struct cache_entry
*ce
= istate
->cache
[next
];
555 int cmp
= cache_name_stage_compare(name
, namelen
, stage
, ce
->name
, ce_namelen(ce
), ce_stage(ce
));
567 int index_name_pos(const struct index_state
*istate
, const char *name
, int namelen
)
569 return index_name_stage_pos(istate
, name
, namelen
, 0);
572 int remove_index_entry_at(struct index_state
*istate
, int pos
)
574 struct cache_entry
*ce
= istate
->cache
[pos
];
576 record_resolve_undo(istate
, ce
);
577 remove_name_hash(istate
, ce
);
578 save_or_free_index_entry(istate
, ce
);
579 istate
->cache_changed
|= CE_ENTRY_REMOVED
;
581 if (pos
>= istate
->cache_nr
)
583 MOVE_ARRAY(istate
->cache
+ pos
, istate
->cache
+ pos
+ 1,
584 istate
->cache_nr
- pos
);
589 * Remove all cache entries marked for removal, that is where
590 * CE_REMOVE is set in ce_flags. This is much more effective than
591 * calling remove_index_entry_at() for each entry to be removed.
593 void remove_marked_cache_entries(struct index_state
*istate
)
595 struct cache_entry
**ce_array
= istate
->cache
;
598 for (i
= j
= 0; i
< istate
->cache_nr
; i
++) {
599 if (ce_array
[i
]->ce_flags
& CE_REMOVE
) {
600 remove_name_hash(istate
, ce_array
[i
]);
601 save_or_free_index_entry(istate
, ce_array
[i
]);
604 ce_array
[j
++] = ce_array
[i
];
606 if (j
== istate
->cache_nr
)
608 istate
->cache_changed
|= CE_ENTRY_REMOVED
;
609 istate
->cache_nr
= j
;
612 int remove_file_from_index(struct index_state
*istate
, const char *path
)
614 int pos
= index_name_pos(istate
, path
, strlen(path
));
617 cache_tree_invalidate_path(istate
, path
);
618 untracked_cache_remove_from_index(istate
, path
);
619 while (pos
< istate
->cache_nr
&& !strcmp(istate
->cache
[pos
]->name
, path
))
620 remove_index_entry_at(istate
, pos
);
624 static int compare_name(struct cache_entry
*ce
, const char *path
, int namelen
)
626 return namelen
!= ce_namelen(ce
) || memcmp(path
, ce
->name
, namelen
);
629 static int index_name_pos_also_unmerged(struct index_state
*istate
,
630 const char *path
, int namelen
)
632 int pos
= index_name_pos(istate
, path
, namelen
);
633 struct cache_entry
*ce
;
638 /* maybe unmerged? */
640 if (pos
>= istate
->cache_nr
||
641 compare_name((ce
= istate
->cache
[pos
]), path
, namelen
))
644 /* order of preference: stage 2, 1, 3 */
645 if (ce_stage(ce
) == 1 && pos
+ 1 < istate
->cache_nr
&&
646 ce_stage((ce
= istate
->cache
[pos
+ 1])) == 2 &&
647 !compare_name(ce
, path
, namelen
))
652 static int different_name(struct cache_entry
*ce
, struct cache_entry
*alias
)
654 int len
= ce_namelen(ce
);
655 return ce_namelen(alias
) != len
|| memcmp(ce
->name
, alias
->name
, len
);
659 * If we add a filename that aliases in the cache, we will use the
660 * name that we already have - but we don't want to update the same
661 * alias twice, because that implies that there were actually two
662 * different files with aliasing names!
664 * So we use the CE_ADDED flag to verify that the alias was an old
665 * one before we accept it as
667 static struct cache_entry
*create_alias_ce(struct index_state
*istate
,
668 struct cache_entry
*ce
,
669 struct cache_entry
*alias
)
672 struct cache_entry
*new_entry
;
674 if (alias
->ce_flags
& CE_ADDED
)
675 die("Will not add file alias '%s' ('%s' already exists in index)", ce
->name
, alias
->name
);
677 /* Ok, create the new entry using the name of the existing alias */
678 len
= ce_namelen(alias
);
679 new_entry
= make_empty_cache_entry(istate
, len
);
680 memcpy(new_entry
->name
, alias
->name
, len
);
681 copy_cache_entry(new_entry
, ce
);
682 save_or_free_index_entry(istate
, ce
);
686 void set_object_name_for_intent_to_add_entry(struct cache_entry
*ce
)
688 struct object_id oid
;
689 if (write_object_file("", 0, blob_type
, &oid
))
690 die("cannot create an empty blob in the object database");
691 oidcpy(&ce
->oid
, &oid
);
694 int add_to_index(struct index_state
*istate
, const char *path
, struct stat
*st
, int flags
)
696 int namelen
, was_same
;
697 mode_t st_mode
= st
->st_mode
;
698 struct cache_entry
*ce
, *alias
= NULL
;
699 unsigned ce_option
= CE_MATCH_IGNORE_VALID
|CE_MATCH_IGNORE_SKIP_WORKTREE
|CE_MATCH_RACY_IS_DIRTY
;
700 int verbose
= flags
& (ADD_CACHE_VERBOSE
| ADD_CACHE_PRETEND
);
701 int pretend
= flags
& ADD_CACHE_PRETEND
;
702 int intent_only
= flags
& ADD_CACHE_INTENT
;
703 int add_option
= (ADD_CACHE_OK_TO_ADD
|ADD_CACHE_OK_TO_REPLACE
|
704 (intent_only
? ADD_CACHE_NEW_ONLY
: 0));
705 int newflags
= HASH_WRITE_OBJECT
;
707 if (flags
& HASH_RENORMALIZE
)
708 newflags
|= HASH_RENORMALIZE
;
710 if (!S_ISREG(st_mode
) && !S_ISLNK(st_mode
) && !S_ISDIR(st_mode
))
711 return error("%s: can only add regular files, symbolic links or git-directories", path
);
713 namelen
= strlen(path
);
714 if (S_ISDIR(st_mode
)) {
715 while (namelen
&& path
[namelen
-1] == '/')
718 ce
= make_empty_cache_entry(istate
, namelen
);
719 memcpy(ce
->name
, path
, namelen
);
720 ce
->ce_namelen
= namelen
;
722 fill_stat_cache_info(ce
, st
);
724 ce
->ce_flags
|= CE_INTENT_TO_ADD
;
727 if (trust_executable_bit
&& has_symlinks
) {
728 ce
->ce_mode
= create_ce_mode(st_mode
);
730 /* If there is an existing entry, pick the mode bits and type
731 * from it, otherwise assume unexecutable regular file.
733 struct cache_entry
*ent
;
734 int pos
= index_name_pos_also_unmerged(istate
, path
, namelen
);
736 ent
= (0 <= pos
) ? istate
->cache
[pos
] : NULL
;
737 ce
->ce_mode
= ce_mode_from_stat(ent
, st_mode
);
740 /* When core.ignorecase=true, determine if a directory of the same name but differing
741 * case already exists within the Git repository. If it does, ensure the directory
742 * case of the file being added to the repository matches (is folded into) the existing
743 * entry's directory case.
746 adjust_dirname_case(istate
, ce
->name
);
748 if (!(flags
& HASH_RENORMALIZE
)) {
749 alias
= index_file_exists(istate
, ce
->name
,
750 ce_namelen(ce
), ignore_case
);
753 !ie_match_stat(istate
, alias
, st
, ce_option
)) {
754 /* Nothing changed, really */
755 if (!S_ISGITLINK(alias
->ce_mode
))
756 ce_mark_uptodate(alias
);
757 alias
->ce_flags
|= CE_ADDED
;
759 discard_cache_entry(ce
);
764 if (index_path(istate
, &ce
->oid
, path
, st
, newflags
)) {
765 discard_cache_entry(ce
);
766 return error("unable to index file %s", path
);
769 set_object_name_for_intent_to_add_entry(ce
);
771 if (ignore_case
&& alias
&& different_name(ce
, alias
))
772 ce
= create_alias_ce(istate
, ce
, alias
);
773 ce
->ce_flags
|= CE_ADDED
;
775 /* It was suspected to be racily clean, but it turns out to be Ok */
778 oideq(&alias
->oid
, &ce
->oid
) &&
779 ce
->ce_mode
== alias
->ce_mode
);
782 discard_cache_entry(ce
);
783 else if (add_index_entry(istate
, ce
, add_option
)) {
784 discard_cache_entry(ce
);
785 return error("unable to add %s to index", path
);
787 if (verbose
&& !was_same
)
788 printf("add '%s'\n", path
);
792 int add_file_to_index(struct index_state
*istate
, const char *path
, int flags
)
795 if (lstat(path
, &st
))
796 die_errno("unable to stat '%s'", path
);
797 return add_to_index(istate
, path
, &st
, flags
);
800 struct cache_entry
*make_empty_cache_entry(struct index_state
*istate
, size_t len
)
802 return mem_pool__ce_calloc(find_mem_pool(istate
), len
);
805 struct cache_entry
*make_empty_transient_cache_entry(size_t len
)
807 return xcalloc(1, cache_entry_size(len
));
810 struct cache_entry
*make_cache_entry(struct index_state
*istate
,
812 const struct object_id
*oid
,
815 unsigned int refresh_options
)
817 struct cache_entry
*ce
, *ret
;
820 if (!verify_path(path
, mode
)) {
821 error("Invalid path '%s'", path
);
826 ce
= make_empty_cache_entry(istate
, len
);
828 oidcpy(&ce
->oid
, oid
);
829 memcpy(ce
->name
, path
, len
);
830 ce
->ce_flags
= create_ce_flags(stage
);
831 ce
->ce_namelen
= len
;
832 ce
->ce_mode
= create_ce_mode(mode
);
834 ret
= refresh_cache_entry(istate
, ce
, refresh_options
);
836 discard_cache_entry(ce
);
840 struct cache_entry
*make_transient_cache_entry(unsigned int mode
, const struct object_id
*oid
,
841 const char *path
, int stage
)
843 struct cache_entry
*ce
;
846 if (!verify_path(path
, mode
)) {
847 error("Invalid path '%s'", path
);
852 ce
= make_empty_transient_cache_entry(len
);
854 oidcpy(&ce
->oid
, oid
);
855 memcpy(ce
->name
, path
, len
);
856 ce
->ce_flags
= create_ce_flags(stage
);
857 ce
->ce_namelen
= len
;
858 ce
->ce_mode
= create_ce_mode(mode
);
864 * Chmod an index entry with either +x or -x.
866 * Returns -1 if the chmod for the particular cache entry failed (if it's
867 * not a regular file), -2 if an invalid flip argument is passed in, 0
870 int chmod_index_entry(struct index_state
*istate
, struct cache_entry
*ce
,
873 if (!S_ISREG(ce
->ce_mode
))
880 ce
->ce_mode
&= ~0111;
885 cache_tree_invalidate_path(istate
, ce
->name
);
886 ce
->ce_flags
|= CE_UPDATE_IN_BASE
;
887 mark_fsmonitor_invalid(istate
, ce
);
888 istate
->cache_changed
|= CE_ENTRY_CHANGED
;
893 int ce_same_name(const struct cache_entry
*a
, const struct cache_entry
*b
)
895 int len
= ce_namelen(a
);
896 return ce_namelen(b
) == len
&& !memcmp(a
->name
, b
->name
, len
);
900 * We fundamentally don't like some paths: we don't want
901 * dot or dot-dot anywhere, and for obvious reasons don't
902 * want to recurse into ".git" either.
904 * Also, we don't want double slashes or slashes at the
905 * end that can make pathnames ambiguous.
907 static int verify_dotfile(const char *rest
, unsigned mode
)
910 * The first character was '.', but that
911 * has already been discarded, we now test
915 /* "." is not allowed */
916 if (*rest
== '\0' || is_dir_sep(*rest
))
921 * ".git" followed by NUL or slash is bad. Note that we match
922 * case-insensitively here, even if ignore_case is not set.
923 * This outlaws ".GIT" everywhere out of an abundance of caution,
924 * since there's really no good reason to allow it.
926 * Once we've seen ".git", we can also find ".gitmodules", etc (also
927 * case-insensitively).
931 if (rest
[1] != 'i' && rest
[1] != 'I')
933 if (rest
[2] != 't' && rest
[2] != 'T')
935 if (rest
[3] == '\0' || is_dir_sep(rest
[3]))
939 if (skip_iprefix(rest
, "modules", &rest
) &&
940 (*rest
== '\0' || is_dir_sep(*rest
)))
945 if (rest
[1] == '\0' || is_dir_sep(rest
[1]))
951 int verify_path(const char *path
, unsigned mode
)
955 if (has_dos_drive_prefix(path
))
958 if (!is_valid_path(path
))
968 if (is_hfs_dotgit(path
))
971 if (is_hfs_dotgitmodules(path
))
976 if (is_ntfs_dotgit(path
))
979 if (is_ntfs_dotgitmodules(path
))
985 if ((c
== '.' && !verify_dotfile(path
, mode
)) ||
986 is_dir_sep(c
) || c
== '\0')
988 } else if (c
== '\\' && protect_ntfs
) {
989 if (is_ntfs_dotgit(path
))
992 if (is_ntfs_dotgitmodules(path
))
1002 * Do we have another file that has the beginning components being a
1003 * proper superset of the name we're trying to add?
1005 static int has_file_name(struct index_state
*istate
,
1006 const struct cache_entry
*ce
, int pos
, int ok_to_replace
)
1009 int len
= ce_namelen(ce
);
1010 int stage
= ce_stage(ce
);
1011 const char *name
= ce
->name
;
1013 while (pos
< istate
->cache_nr
) {
1014 struct cache_entry
*p
= istate
->cache
[pos
++];
1016 if (len
>= ce_namelen(p
))
1018 if (memcmp(name
, p
->name
, len
))
1020 if (ce_stage(p
) != stage
)
1022 if (p
->name
[len
] != '/')
1024 if (p
->ce_flags
& CE_REMOVE
)
1029 remove_index_entry_at(istate
, --pos
);
1036 * Like strcmp(), but also return the offset of the first change.
1037 * If strings are equal, return the length.
1039 int strcmp_offset(const char *s1
, const char *s2
, size_t *first_change
)
1044 return strcmp(s1
, s2
);
1046 for (k
= 0; s1
[k
] == s2
[k
]; k
++)
1051 return (unsigned char)s1
[k
] - (unsigned char)s2
[k
];
1055 * Do we have another file with a pathname that is a proper
1056 * subset of the name we're trying to add?
1058 * That is, is there another file in the index with a path
1059 * that matches a sub-directory in the given entry?
1061 static int has_dir_name(struct index_state
*istate
,
1062 const struct cache_entry
*ce
, int pos
, int ok_to_replace
)
1065 int stage
= ce_stage(ce
);
1066 const char *name
= ce
->name
;
1067 const char *slash
= name
+ ce_namelen(ce
);
1072 * We are frequently called during an iteration on a sorted
1073 * list of pathnames and while building a new index. Therefore,
1074 * there is a high probability that this entry will eventually
1075 * be appended to the index, rather than inserted in the middle.
1076 * If we can confirm that, we can avoid binary searches on the
1077 * components of the pathname.
1079 * Compare the entry's full path with the last path in the index.
1081 if (istate
->cache_nr
> 0) {
1082 cmp_last
= strcmp_offset(name
,
1083 istate
->cache
[istate
->cache_nr
- 1]->name
,
1086 if (len_eq_last
== 0) {
1088 * The entry sorts AFTER the last one in the
1089 * index and their paths have no common prefix,
1090 * so there cannot be a F/D conflict.
1095 * The entry sorts AFTER the last one in the
1096 * index, but has a common prefix. Fall through
1097 * to the loop below to disect the entry's path
1098 * and see where the difference is.
1101 } else if (cmp_last
== 0) {
1103 * The entry exactly matches the last one in the
1104 * index, but because of multiple stage and CE_REMOVE
1105 * items, we fall through and let the regular search
1115 if (*--slash
== '/')
1117 if (slash
<= ce
->name
)
1124 * (len + 1) is a directory boundary (including
1125 * the trailing slash). And since the loop is
1126 * decrementing "slash", the first iteration is
1127 * the longest directory prefix; subsequent
1128 * iterations consider parent directories.
1131 if (len
+ 1 <= len_eq_last
) {
1133 * The directory prefix (including the trailing
1134 * slash) also appears as a prefix in the last
1135 * entry, so the remainder cannot collide (because
1136 * strcmp said the whole path was greater).
1141 * LT: last: xxx/file_A
1147 if (len
> len_eq_last
) {
1149 * This part of the directory prefix (excluding
1150 * the trailing slash) is longer than the known
1151 * equal portions, so this sub-directory cannot
1152 * collide with a file.
1160 if (istate
->cache_nr
> 0 &&
1161 ce_namelen(istate
->cache
[istate
->cache_nr
- 1]) > len
) {
1163 * The directory prefix lines up with part of
1164 * a longer file or directory name, but sorts
1165 * after it, so this sub-directory cannot
1166 * collide with a file.
1168 * last: xxx/yy-file (because '-' sorts before '/')
1175 * This is a possible collision. Fall through and
1176 * let the regular search code handle it.
1183 pos
= index_name_stage_pos(istate
, name
, len
, stage
);
1186 * Found one, but not so fast. This could
1187 * be a marker that says "I was here, but
1188 * I am being removed". Such an entry is
1189 * not a part of the resulting tree, and
1190 * it is Ok to have a directory at the same
1193 if (!(istate
->cache
[pos
]->ce_flags
& CE_REMOVE
)) {
1197 remove_index_entry_at(istate
, pos
);
1205 * Trivial optimization: if we find an entry that
1206 * already matches the sub-directory, then we know
1207 * we're ok, and we can exit.
1209 while (pos
< istate
->cache_nr
) {
1210 struct cache_entry
*p
= istate
->cache
[pos
];
1211 if ((ce_namelen(p
) <= len
) ||
1212 (p
->name
[len
] != '/') ||
1213 memcmp(p
->name
, name
, len
))
1214 break; /* not our subdirectory */
1215 if (ce_stage(p
) == stage
&& !(p
->ce_flags
& CE_REMOVE
))
1217 * p is at the same stage as our entry, and
1218 * is a subdirectory of what we are looking
1219 * at, so we cannot have conflicts at our
1220 * level or anything shorter.
1229 /* We may be in a situation where we already have path/file and path
1230 * is being added, or we already have path and path/file is being
1231 * added. Either one would result in a nonsense tree that has path
1232 * twice when git-write-tree tries to write it out. Prevent it.
1234 * If ok-to-replace is specified, we remove the conflicting entries
1235 * from the cache so the caller should recompute the insert position.
1236 * When this happens, we return non-zero.
1238 static int check_file_directory_conflict(struct index_state
*istate
,
1239 const struct cache_entry
*ce
,
1240 int pos
, int ok_to_replace
)
1245 * When ce is an "I am going away" entry, we allow it to be added
1247 if (ce
->ce_flags
& CE_REMOVE
)
1251 * We check if the path is a sub-path of a subsequent pathname
1252 * first, since removing those will not change the position
1255 retval
= has_file_name(istate
, ce
, pos
, ok_to_replace
);
1258 * Then check if the path might have a clashing sub-directory
1261 return retval
+ has_dir_name(istate
, ce
, pos
, ok_to_replace
);
1264 static int add_index_entry_with_check(struct index_state
*istate
, struct cache_entry
*ce
, int option
)
1267 int ok_to_add
= option
& ADD_CACHE_OK_TO_ADD
;
1268 int ok_to_replace
= option
& ADD_CACHE_OK_TO_REPLACE
;
1269 int skip_df_check
= option
& ADD_CACHE_SKIP_DFCHECK
;
1270 int new_only
= option
& ADD_CACHE_NEW_ONLY
;
1272 if (!(option
& ADD_CACHE_KEEP_CACHE_TREE
))
1273 cache_tree_invalidate_path(istate
, ce
->name
);
1276 * If this entry's path sorts after the last entry in the index,
1277 * we can avoid searching for it.
1279 if (istate
->cache_nr
> 0 &&
1280 strcmp(ce
->name
, istate
->cache
[istate
->cache_nr
- 1]->name
) > 0)
1281 pos
= -istate
->cache_nr
- 1;
1283 pos
= index_name_stage_pos(istate
, ce
->name
, ce_namelen(ce
), ce_stage(ce
));
1285 /* existing match? Just replace it. */
1288 replace_index_entry(istate
, pos
, ce
);
1293 if (!(option
& ADD_CACHE_KEEP_CACHE_TREE
))
1294 untracked_cache_add_to_index(istate
, ce
->name
);
1297 * Inserting a merged entry ("stage 0") into the index
1298 * will always replace all non-merged entries..
1300 if (pos
< istate
->cache_nr
&& ce_stage(ce
) == 0) {
1301 while (ce_same_name(istate
->cache
[pos
], ce
)) {
1303 if (!remove_index_entry_at(istate
, pos
))
1310 if (!verify_path(ce
->name
, ce
->ce_mode
))
1311 return error("Invalid path '%s'", ce
->name
);
1313 if (!skip_df_check
&&
1314 check_file_directory_conflict(istate
, ce
, pos
, ok_to_replace
)) {
1316 return error("'%s' appears as both a file and as a directory",
1318 pos
= index_name_stage_pos(istate
, ce
->name
, ce_namelen(ce
), ce_stage(ce
));
1324 int add_index_entry(struct index_state
*istate
, struct cache_entry
*ce
, int option
)
1328 if (option
& ADD_CACHE_JUST_APPEND
)
1329 pos
= istate
->cache_nr
;
1332 ret
= add_index_entry_with_check(istate
, ce
, option
);
1338 /* Make sure the array is big enough .. */
1339 ALLOC_GROW(istate
->cache
, istate
->cache_nr
+ 1, istate
->cache_alloc
);
1343 if (istate
->cache_nr
> pos
+ 1)
1344 MOVE_ARRAY(istate
->cache
+ pos
+ 1, istate
->cache
+ pos
,
1345 istate
->cache_nr
- pos
- 1);
1346 set_index_entry(istate
, pos
, ce
);
1347 istate
->cache_changed
|= CE_ENTRY_ADDED
;
1352 * "refresh" does not calculate a new sha1 file or bring the
1353 * cache up-to-date for mode/content changes. But what it
1354 * _does_ do is to "re-match" the stat information of a file
1355 * with the cache, so that you can refresh the cache for a
1356 * file that hasn't been changed but where the stat entry is
1359 * For example, you'd want to do this after doing a "git-read-tree",
1360 * to link up the stat cache details with the proper files.
1362 static struct cache_entry
*refresh_cache_ent(struct index_state
*istate
,
1363 struct cache_entry
*ce
,
1364 unsigned int options
, int *err
,
1368 struct cache_entry
*updated
;
1370 int refresh
= options
& CE_MATCH_REFRESH
;
1371 int ignore_valid
= options
& CE_MATCH_IGNORE_VALID
;
1372 int ignore_skip_worktree
= options
& CE_MATCH_IGNORE_SKIP_WORKTREE
;
1373 int ignore_missing
= options
& CE_MATCH_IGNORE_MISSING
;
1374 int ignore_fsmonitor
= options
& CE_MATCH_IGNORE_FSMONITOR
;
1376 if (!refresh
|| ce_uptodate(ce
))
1379 if (!ignore_fsmonitor
)
1380 refresh_fsmonitor(istate
);
1382 * CE_VALID or CE_SKIP_WORKTREE means the user promised us
1383 * that the change to the work tree does not matter and told
1386 if (!ignore_skip_worktree
&& ce_skip_worktree(ce
)) {
1387 ce_mark_uptodate(ce
);
1390 if (!ignore_valid
&& (ce
->ce_flags
& CE_VALID
)) {
1391 ce_mark_uptodate(ce
);
1394 if (!ignore_fsmonitor
&& (ce
->ce_flags
& CE_FSMONITOR_VALID
)) {
1395 ce_mark_uptodate(ce
);
1399 if (has_symlink_leading_path(ce
->name
, ce_namelen(ce
))) {
1407 if (lstat(ce
->name
, &st
) < 0) {
1408 if (ignore_missing
&& errno
== ENOENT
)
1415 changed
= ie_match_stat(istate
, ce
, &st
, options
);
1417 *changed_ret
= changed
;
1420 * The path is unchanged. If we were told to ignore
1421 * valid bit, then we did the actual stat check and
1422 * found that the entry is unmodified. If the entry
1423 * is not marked VALID, this is the place to mark it
1424 * valid again, under "assume unchanged" mode.
1426 if (ignore_valid
&& assume_unchanged
&&
1427 !(ce
->ce_flags
& CE_VALID
))
1428 ; /* mark this one VALID again */
1431 * We do not mark the index itself "modified"
1432 * because CE_UPTODATE flag is in-core only;
1433 * we are not going to write this change out.
1435 if (!S_ISGITLINK(ce
->ce_mode
)) {
1436 ce_mark_uptodate(ce
);
1437 mark_fsmonitor_valid(ce
);
1443 if (ie_modified(istate
, ce
, &st
, options
)) {
1449 updated
= make_empty_cache_entry(istate
, ce_namelen(ce
));
1450 copy_cache_entry(updated
, ce
);
1451 memcpy(updated
->name
, ce
->name
, ce
->ce_namelen
+ 1);
1452 fill_stat_cache_info(updated
, &st
);
1454 * If ignore_valid is not set, we should leave CE_VALID bit
1455 * alone. Otherwise, paths marked with --no-assume-unchanged
1456 * (i.e. things to be edited) will reacquire CE_VALID bit
1457 * automatically, which is not really what we want.
1459 if (!ignore_valid
&& assume_unchanged
&&
1460 !(ce
->ce_flags
& CE_VALID
))
1461 updated
->ce_flags
&= ~CE_VALID
;
1463 /* istate->cache_changed is updated in the caller */
1467 static void show_file(const char * fmt
, const char * name
, int in_porcelain
,
1468 int * first
, const char *header_msg
)
1470 if (in_porcelain
&& *first
&& header_msg
) {
1471 printf("%s\n", header_msg
);
1477 int refresh_index(struct index_state
*istate
, unsigned int flags
,
1478 const struct pathspec
*pathspec
,
1479 char *seen
, const char *header_msg
)
1483 int really
= (flags
& REFRESH_REALLY
) != 0;
1484 int allow_unmerged
= (flags
& REFRESH_UNMERGED
) != 0;
1485 int quiet
= (flags
& REFRESH_QUIET
) != 0;
1486 int not_new
= (flags
& REFRESH_IGNORE_MISSING
) != 0;
1487 int ignore_submodules
= (flags
& REFRESH_IGNORE_SUBMODULES
) != 0;
1489 int in_porcelain
= (flags
& REFRESH_IN_PORCELAIN
);
1490 unsigned int options
= (CE_MATCH_REFRESH
|
1491 (really
? CE_MATCH_IGNORE_VALID
: 0) |
1492 (not_new
? CE_MATCH_IGNORE_MISSING
: 0));
1493 const char *modified_fmt
;
1494 const char *deleted_fmt
;
1495 const char *typechange_fmt
;
1496 const char *added_fmt
;
1497 const char *unmerged_fmt
;
1498 struct progress
*progress
= NULL
;
1500 if (flags
& REFRESH_PROGRESS
&& isatty(2))
1501 progress
= start_delayed_progress(_("Refresh index"),
1504 trace_performance_enter();
1505 modified_fmt
= (in_porcelain
? "M\t%s\n" : "%s: needs update\n");
1506 deleted_fmt
= (in_porcelain
? "D\t%s\n" : "%s: needs update\n");
1507 typechange_fmt
= (in_porcelain
? "T\t%s\n" : "%s needs update\n");
1508 added_fmt
= (in_porcelain
? "A\t%s\n" : "%s needs update\n");
1509 unmerged_fmt
= (in_porcelain
? "U\t%s\n" : "%s: needs merge\n");
1511 * Use the multi-threaded preload_index() to refresh most of the
1512 * cache entries quickly then in the single threaded loop below,
1513 * we only have to do the special cases that are left.
1515 preload_index(istate
, pathspec
, 0);
1516 for (i
= 0; i
< istate
->cache_nr
; i
++) {
1517 struct cache_entry
*ce
, *new_entry
;
1518 int cache_errno
= 0;
1522 ce
= istate
->cache
[i
];
1523 if (ignore_submodules
&& S_ISGITLINK(ce
->ce_mode
))
1526 if (pathspec
&& !ce_path_match(istate
, ce
, pathspec
, seen
))
1530 while ((i
< istate
->cache_nr
) &&
1531 ! strcmp(istate
->cache
[i
]->name
, ce
->name
))
1537 show_file(unmerged_fmt
, ce
->name
, in_porcelain
,
1538 &first
, header_msg
);
1546 new_entry
= refresh_cache_ent(istate
, ce
, options
, &cache_errno
, &changed
);
1547 if (new_entry
== ce
)
1550 display_progress(progress
, i
);
1554 if (really
&& cache_errno
== EINVAL
) {
1555 /* If we are doing --really-refresh that
1556 * means the index is not valid anymore.
1558 ce
->ce_flags
&= ~CE_VALID
;
1559 ce
->ce_flags
|= CE_UPDATE_IN_BASE
;
1560 mark_fsmonitor_invalid(istate
, ce
);
1561 istate
->cache_changed
|= CE_ENTRY_CHANGED
;
1566 if (cache_errno
== ENOENT
)
1568 else if (ce_intent_to_add(ce
))
1569 fmt
= added_fmt
; /* must be before other checks */
1570 else if (changed
& TYPE_CHANGED
)
1571 fmt
= typechange_fmt
;
1575 ce
->name
, in_porcelain
, &first
, header_msg
);
1580 replace_index_entry(istate
, i
, new_entry
);
1583 display_progress(progress
, istate
->cache_nr
);
1584 stop_progress(&progress
);
1586 trace_performance_leave("refresh index");
1590 struct cache_entry
*refresh_cache_entry(struct index_state
*istate
,
1591 struct cache_entry
*ce
,
1592 unsigned int options
)
1594 return refresh_cache_ent(istate
, ce
, options
, NULL
, NULL
);
1598 /*****************************************************************
1600 *****************************************************************/
1602 #define INDEX_FORMAT_DEFAULT 3
1604 static unsigned int get_index_format_default(void)
1606 char *envversion
= getenv("GIT_INDEX_VERSION");
1609 unsigned int version
= INDEX_FORMAT_DEFAULT
;
1612 if (!git_config_get_int("index.version", &value
))
1614 if (version
< INDEX_FORMAT_LB
|| INDEX_FORMAT_UB
< version
) {
1615 warning(_("index.version set, but the value is invalid.\n"
1616 "Using version %i"), INDEX_FORMAT_DEFAULT
);
1617 return INDEX_FORMAT_DEFAULT
;
1622 version
= strtoul(envversion
, &endp
, 10);
1624 version
< INDEX_FORMAT_LB
|| INDEX_FORMAT_UB
< version
) {
1625 warning(_("GIT_INDEX_VERSION set, but the value is invalid.\n"
1626 "Using version %i"), INDEX_FORMAT_DEFAULT
);
1627 version
= INDEX_FORMAT_DEFAULT
;
1633 * dev/ino/uid/gid/size are also just tracked to the low 32 bits
1634 * Again - this is just a (very strong in practice) heuristic that
1635 * the inode hasn't changed.
1637 * We save the fields in big-endian order to allow using the
1638 * index file over NFS transparently.
1640 struct ondisk_cache_entry
{
1641 struct cache_time ctime
;
1642 struct cache_time mtime
;
1649 unsigned char sha1
[20];
1651 char name
[FLEX_ARRAY
]; /* more */
1655 * This struct is used when CE_EXTENDED bit is 1
1656 * The struct must match ondisk_cache_entry exactly from
1659 struct ondisk_cache_entry_extended
{
1660 struct cache_time ctime
;
1661 struct cache_time mtime
;
1668 unsigned char sha1
[20];
1671 char name
[FLEX_ARRAY
]; /* more */
1674 /* These are only used for v3 or lower */
1675 #define align_padding_size(size, len) ((size + (len) + 8) & ~7) - (size + len)
1676 #define align_flex_name(STRUCT,len) ((offsetof(struct STRUCT,name) + (len) + 8) & ~7)
1677 #define ondisk_cache_entry_size(len) align_flex_name(ondisk_cache_entry,len)
1678 #define ondisk_cache_entry_extended_size(len) align_flex_name(ondisk_cache_entry_extended,len)
1679 #define ondisk_ce_size(ce) (((ce)->ce_flags & CE_EXTENDED) ? \
1680 ondisk_cache_entry_extended_size(ce_namelen(ce)) : \
1681 ondisk_cache_entry_size(ce_namelen(ce)))
1683 /* Allow fsck to force verification of the index checksum. */
1684 int verify_index_checksum
;
1686 /* Allow fsck to force verification of the cache entry order. */
1687 int verify_ce_order
;
1689 static int verify_hdr(const struct cache_header
*hdr
, unsigned long size
)
1692 unsigned char hash
[GIT_MAX_RAWSZ
];
1695 if (hdr
->hdr_signature
!= htonl(CACHE_SIGNATURE
))
1696 return error("bad signature");
1697 hdr_version
= ntohl(hdr
->hdr_version
);
1698 if (hdr_version
< INDEX_FORMAT_LB
|| INDEX_FORMAT_UB
< hdr_version
)
1699 return error("bad index version %d", hdr_version
);
1701 if (!verify_index_checksum
)
1704 the_hash_algo
->init_fn(&c
);
1705 the_hash_algo
->update_fn(&c
, hdr
, size
- the_hash_algo
->rawsz
);
1706 the_hash_algo
->final_fn(hash
, &c
);
1707 if (!hasheq(hash
, (unsigned char *)hdr
+ size
- the_hash_algo
->rawsz
))
1708 return error("bad index file sha1 signature");
1712 static int read_index_extension(struct index_state
*istate
,
1713 const char *ext
, const char *data
, unsigned long sz
)
1715 switch (CACHE_EXT(ext
)) {
1716 case CACHE_EXT_TREE
:
1717 istate
->cache_tree
= cache_tree_read(data
, sz
);
1719 case CACHE_EXT_RESOLVE_UNDO
:
1720 istate
->resolve_undo
= resolve_undo_read(data
, sz
);
1722 case CACHE_EXT_LINK
:
1723 if (read_link_extension(istate
, data
, sz
))
1726 case CACHE_EXT_UNTRACKED
:
1727 istate
->untracked
= read_untracked_extension(data
, sz
);
1729 case CACHE_EXT_FSMONITOR
:
1730 read_fsmonitor_extension(istate
, data
, sz
);
1732 case CACHE_EXT_ENDOFINDEXENTRIES
:
1733 case CACHE_EXT_INDEXENTRYOFFSETTABLE
:
1734 /* already handled in do_read_index() */
1737 if (*ext
< 'A' || 'Z' < *ext
)
1738 return error("index uses %.4s extension, which we do not understand",
1740 fprintf(stderr
, "ignoring %.4s extension\n", ext
);
1746 int hold_locked_index(struct lock_file
*lk
, int lock_flags
)
1748 return hold_lock_file_for_update(lk
, get_index_file(), lock_flags
);
1751 int read_index(struct index_state
*istate
)
1753 return read_index_from(istate
, get_index_file(), get_git_dir());
1756 static struct cache_entry
*create_from_disk(struct mem_pool
*ce_mem_pool
,
1757 unsigned int version
,
1758 struct ondisk_cache_entry
*ondisk
,
1759 unsigned long *ent_size
,
1760 const struct cache_entry
*previous_ce
)
1762 struct cache_entry
*ce
;
1766 size_t copy_len
= 0;
1768 * Adjacent cache entries tend to share the leading paths, so it makes
1769 * sense to only store the differences in later entries. In the v4
1770 * on-disk format of the index, each on-disk cache entry stores the
1771 * number of bytes to be stripped from the end of the previous name,
1772 * and the bytes to append to the result, to come up with its name.
1774 int expand_name_field
= version
== 4;
1776 /* On-disk flags are just 16 bits */
1777 flags
= get_be16(&ondisk
->flags
);
1778 len
= flags
& CE_NAMEMASK
;
1780 if (flags
& CE_EXTENDED
) {
1781 struct ondisk_cache_entry_extended
*ondisk2
;
1783 ondisk2
= (struct ondisk_cache_entry_extended
*)ondisk
;
1784 extended_flags
= get_be16(&ondisk2
->flags2
) << 16;
1785 /* We do not yet understand any bit out of CE_EXTENDED_FLAGS */
1786 if (extended_flags
& ~CE_EXTENDED_FLAGS
)
1787 die("Unknown index entry format %08x", extended_flags
);
1788 flags
|= extended_flags
;
1789 name
= ondisk2
->name
;
1792 name
= ondisk
->name
;
1794 if (expand_name_field
) {
1795 const unsigned char *cp
= (const unsigned char *)name
;
1796 size_t strip_len
, previous_len
;
1798 /* If we're at the begining of a block, ignore the previous name */
1799 strip_len
= decode_varint(&cp
);
1801 previous_len
= previous_ce
->ce_namelen
;
1802 if (previous_len
< strip_len
)
1803 die(_("malformed name field in the index, near path '%s'"),
1805 copy_len
= previous_len
- strip_len
;
1807 name
= (const char *)cp
;
1810 if (len
== CE_NAMEMASK
) {
1812 if (expand_name_field
)
1816 ce
= mem_pool__ce_alloc(ce_mem_pool
, len
);
1818 ce
->ce_stat_data
.sd_ctime
.sec
= get_be32(&ondisk
->ctime
.sec
);
1819 ce
->ce_stat_data
.sd_mtime
.sec
= get_be32(&ondisk
->mtime
.sec
);
1820 ce
->ce_stat_data
.sd_ctime
.nsec
= get_be32(&ondisk
->ctime
.nsec
);
1821 ce
->ce_stat_data
.sd_mtime
.nsec
= get_be32(&ondisk
->mtime
.nsec
);
1822 ce
->ce_stat_data
.sd_dev
= get_be32(&ondisk
->dev
);
1823 ce
->ce_stat_data
.sd_ino
= get_be32(&ondisk
->ino
);
1824 ce
->ce_mode
= get_be32(&ondisk
->mode
);
1825 ce
->ce_stat_data
.sd_uid
= get_be32(&ondisk
->uid
);
1826 ce
->ce_stat_data
.sd_gid
= get_be32(&ondisk
->gid
);
1827 ce
->ce_stat_data
.sd_size
= get_be32(&ondisk
->size
);
1828 ce
->ce_flags
= flags
& ~CE_NAMEMASK
;
1829 ce
->ce_namelen
= len
;
1831 hashcpy(ce
->oid
.hash
, ondisk
->sha1
);
1833 if (expand_name_field
) {
1835 memcpy(ce
->name
, previous_ce
->name
, copy_len
);
1836 memcpy(ce
->name
+ copy_len
, name
, len
+ 1 - copy_len
);
1837 *ent_size
= (name
- ((char *)ondisk
)) + len
+ 1 - copy_len
;
1839 memcpy(ce
->name
, name
, len
+ 1);
1840 *ent_size
= ondisk_ce_size(ce
);
1845 static void check_ce_order(struct index_state
*istate
)
1849 if (!verify_ce_order
)
1852 for (i
= 1; i
< istate
->cache_nr
; i
++) {
1853 struct cache_entry
*ce
= istate
->cache
[i
- 1];
1854 struct cache_entry
*next_ce
= istate
->cache
[i
];
1855 int name_compare
= strcmp(ce
->name
, next_ce
->name
);
1857 if (0 < name_compare
)
1858 die("unordered stage entries in index");
1859 if (!name_compare
) {
1861 die("multiple stage entries for merged file '%s'",
1863 if (ce_stage(ce
) > ce_stage(next_ce
))
1864 die("unordered stage entries for '%s'",
1870 static void tweak_untracked_cache(struct index_state
*istate
)
1872 switch (git_config_get_untracked_cache()) {
1873 case -1: /* keep: do nothing */
1876 remove_untracked_cache(istate
);
1879 add_untracked_cache(istate
);
1881 default: /* unknown value: do nothing */
1886 static void tweak_split_index(struct index_state
*istate
)
1888 switch (git_config_get_split_index()) {
1889 case -1: /* unset: do nothing */
1892 remove_split_index(istate
);
1895 add_split_index(istate
);
1897 default: /* unknown value: do nothing */
1902 static void post_read_index_from(struct index_state
*istate
)
1904 check_ce_order(istate
);
1905 tweak_untracked_cache(istate
);
1906 tweak_split_index(istate
);
1907 tweak_fsmonitor(istate
);
1910 static size_t estimate_cache_size_from_compressed(unsigned int entries
)
1912 return entries
* (sizeof(struct cache_entry
) + CACHE_ENTRY_PATH_LENGTH
);
1915 static size_t estimate_cache_size(size_t ondisk_size
, unsigned int entries
)
1917 long per_entry
= sizeof(struct cache_entry
) - sizeof(struct ondisk_cache_entry
);
1920 * Account for potential alignment differences.
1922 per_entry
+= align_padding_size(sizeof(struct cache_entry
), -sizeof(struct ondisk_cache_entry
));
1923 return ondisk_size
+ entries
* per_entry
;
1926 struct index_entry_offset
1928 /* starting byte offset into index file, count of index entries in this block */
1932 struct index_entry_offset_table
1935 struct index_entry_offset entries
[FLEX_ARRAY
];
1938 static struct index_entry_offset_table
*read_ieot_extension(const char *mmap
, size_t mmap_size
, size_t offset
);
1939 static void write_ieot_extension(struct strbuf
*sb
, struct index_entry_offset_table
*ieot
);
1941 static size_t read_eoie_extension(const char *mmap
, size_t mmap_size
);
1942 static void write_eoie_extension(struct strbuf
*sb
, git_hash_ctx
*eoie_context
, size_t offset
);
1944 struct load_index_extensions
1947 struct index_state
*istate
;
1950 unsigned long src_offset
;
1953 static void *load_index_extensions(void *_data
)
1955 struct load_index_extensions
*p
= _data
;
1956 unsigned long src_offset
= p
->src_offset
;
1958 while (src_offset
<= p
->mmap_size
- the_hash_algo
->rawsz
- 8) {
1959 /* After an array of active_nr index entries,
1960 * there can be arbitrary number of extended
1961 * sections, each of which is prefixed with
1962 * extension name (4-byte) and section length
1963 * in 4-byte network byte order.
1965 uint32_t extsize
= get_be32(p
->mmap
+ src_offset
+ 4);
1966 if (read_index_extension(p
->istate
,
1967 p
->mmap
+ src_offset
,
1968 p
->mmap
+ src_offset
+ 8,
1970 munmap((void *)p
->mmap
, p
->mmap_size
);
1971 die(_("index file corrupt"));
1974 src_offset
+= extsize
;
1981 * A helper function that will load the specified range of cache entries
1982 * from the memory mapped file and add them to the given index.
1984 static unsigned long load_cache_entry_block(struct index_state
*istate
,
1985 struct mem_pool
*ce_mem_pool
, int offset
, int nr
, const char *mmap
,
1986 unsigned long start_offset
, const struct cache_entry
*previous_ce
)
1989 unsigned long src_offset
= start_offset
;
1991 for (i
= offset
; i
< offset
+ nr
; i
++) {
1992 struct ondisk_cache_entry
*disk_ce
;
1993 struct cache_entry
*ce
;
1994 unsigned long consumed
;
1996 disk_ce
= (struct ondisk_cache_entry
*)(mmap
+ src_offset
);
1997 ce
= create_from_disk(ce_mem_pool
, istate
->version
, disk_ce
, &consumed
, previous_ce
);
1998 set_index_entry(istate
, i
, ce
);
2000 src_offset
+= consumed
;
2003 return src_offset
- start_offset
;
2006 static unsigned long load_all_cache_entries(struct index_state
*istate
,
2007 const char *mmap
, size_t mmap_size
, unsigned long src_offset
)
2009 unsigned long consumed
;
2011 if (istate
->version
== 4) {
2012 mem_pool_init(&istate
->ce_mem_pool
,
2013 estimate_cache_size_from_compressed(istate
->cache_nr
));
2015 mem_pool_init(&istate
->ce_mem_pool
,
2016 estimate_cache_size(mmap_size
, istate
->cache_nr
));
2019 consumed
= load_cache_entry_block(istate
, istate
->ce_mem_pool
,
2020 0, istate
->cache_nr
, mmap
, src_offset
, NULL
);
2025 * Mostly randomly chosen maximum thread counts: we
2026 * cap the parallelism to online_cpus() threads, and we want
2027 * to have at least 10000 cache entries per thread for it to
2028 * be worth starting a thread.
2031 #define THREAD_COST (10000)
2033 struct load_cache_entries_thread_data
2036 struct index_state
*istate
;
2037 struct mem_pool
*ce_mem_pool
;
2040 struct index_entry_offset_table
*ieot
;
2041 int ieot_start
; /* starting index into the ieot array */
2042 int ieot_blocks
; /* count of ieot entries to process */
2043 unsigned long consumed
; /* return # of bytes in index file processed */
2047 * A thread proc to run the load_cache_entries() computation
2048 * across multiple background threads.
2050 static void *load_cache_entries_thread(void *_data
)
2052 struct load_cache_entries_thread_data
*p
= _data
;
2055 /* iterate across all ieot blocks assigned to this thread */
2056 for (i
= p
->ieot_start
; i
< p
->ieot_start
+ p
->ieot_blocks
; i
++) {
2057 p
->consumed
+= load_cache_entry_block(p
->istate
, p
->ce_mem_pool
,
2058 p
->offset
, p
->ieot
->entries
[i
].nr
, p
->mmap
, p
->ieot
->entries
[i
].offset
, NULL
);
2059 p
->offset
+= p
->ieot
->entries
[i
].nr
;
2064 static unsigned long load_cache_entries_threaded(struct index_state
*istate
, const char *mmap
, size_t mmap_size
,
2065 unsigned long src_offset
, int nr_threads
, struct index_entry_offset_table
*ieot
)
2067 int i
, offset
, ieot_blocks
, ieot_start
, err
;
2068 struct load_cache_entries_thread_data
*data
;
2069 unsigned long consumed
= 0;
2071 /* a little sanity checking */
2072 if (istate
->name_hash_initialized
)
2073 BUG("the name hash isn't thread safe");
2075 mem_pool_init(&istate
->ce_mem_pool
, 0);
2077 /* ensure we have no more threads than we have blocks to process */
2078 if (nr_threads
> ieot
->nr
)
2079 nr_threads
= ieot
->nr
;
2080 data
= xcalloc(nr_threads
, sizeof(*data
));
2082 offset
= ieot_start
= 0;
2083 ieot_blocks
= DIV_ROUND_UP(ieot
->nr
, nr_threads
);
2084 for (i
= 0; i
< nr_threads
; i
++) {
2085 struct load_cache_entries_thread_data
*p
= &data
[i
];
2088 if (ieot_start
+ ieot_blocks
> ieot
->nr
)
2089 ieot_blocks
= ieot
->nr
- ieot_start
;
2095 p
->ieot_start
= ieot_start
;
2096 p
->ieot_blocks
= ieot_blocks
;
2098 /* create a mem_pool for each thread */
2100 for (j
= p
->ieot_start
; j
< p
->ieot_start
+ p
->ieot_blocks
; j
++)
2101 nr
+= p
->ieot
->entries
[j
].nr
;
2102 if (istate
->version
== 4) {
2103 mem_pool_init(&p
->ce_mem_pool
,
2104 estimate_cache_size_from_compressed(nr
));
2106 mem_pool_init(&p
->ce_mem_pool
,
2107 estimate_cache_size(mmap_size
, nr
));
2110 err
= pthread_create(&p
->pthread
, NULL
, load_cache_entries_thread
, p
);
2112 die(_("unable to create load_cache_entries thread: %s"), strerror(err
));
2114 /* increment by the number of cache entries in the ieot block being processed */
2115 for (j
= 0; j
< ieot_blocks
; j
++)
2116 offset
+= ieot
->entries
[ieot_start
+ j
].nr
;
2117 ieot_start
+= ieot_blocks
;
2120 for (i
= 0; i
< nr_threads
; i
++) {
2121 struct load_cache_entries_thread_data
*p
= &data
[i
];
2123 err
= pthread_join(p
->pthread
, NULL
);
2125 die(_("unable to join load_cache_entries thread: %s"), strerror(err
));
2126 mem_pool_combine(istate
->ce_mem_pool
, p
->ce_mem_pool
);
2127 consumed
+= p
->consumed
;
2135 /* remember to discard_cache() before reading a different cache! */
2136 int do_read_index(struct index_state
*istate
, const char *path
, int must_exist
)
2140 unsigned long src_offset
;
2141 const struct cache_header
*hdr
;
2144 struct load_index_extensions p
;
2145 size_t extension_offset
= 0;
2146 int nr_threads
, cpus
;
2147 struct index_entry_offset_table
*ieot
= NULL
;
2149 if (istate
->initialized
)
2150 return istate
->cache_nr
;
2152 istate
->timestamp
.sec
= 0;
2153 istate
->timestamp
.nsec
= 0;
2154 fd
= open(path
, O_RDONLY
);
2156 if (!must_exist
&& errno
== ENOENT
)
2158 die_errno("%s: index file open failed", path
);
2162 die_errno("cannot stat the open index");
2164 mmap_size
= xsize_t(st
.st_size
);
2165 if (mmap_size
< sizeof(struct cache_header
) + the_hash_algo
->rawsz
)
2166 die("index file smaller than expected");
2168 mmap
= xmmap(NULL
, mmap_size
, PROT_READ
, MAP_PRIVATE
, fd
, 0);
2169 if (mmap
== MAP_FAILED
)
2170 die_errno("unable to map index file");
2173 hdr
= (const struct cache_header
*)mmap
;
2174 if (verify_hdr(hdr
, mmap_size
) < 0)
2177 hashcpy(istate
->oid
.hash
, (const unsigned char *)hdr
+ mmap_size
- the_hash_algo
->rawsz
);
2178 istate
->version
= ntohl(hdr
->hdr_version
);
2179 istate
->cache_nr
= ntohl(hdr
->hdr_entries
);
2180 istate
->cache_alloc
= alloc_nr(istate
->cache_nr
);
2181 istate
->cache
= xcalloc(istate
->cache_alloc
, sizeof(*istate
->cache
));
2182 istate
->initialized
= 1;
2186 p
.mmap_size
= mmap_size
;
2188 src_offset
= sizeof(*hdr
);
2190 if (git_config_get_index_threads(&nr_threads
))
2193 /* TODO: does creating more threads than cores help? */
2195 nr_threads
= istate
->cache_nr
/ THREAD_COST
;
2196 cpus
= online_cpus();
2197 if (nr_threads
> cpus
)
2204 if (nr_threads
> 1) {
2205 extension_offset
= read_eoie_extension(mmap
, mmap_size
);
2206 if (extension_offset
) {
2209 p
.src_offset
= extension_offset
;
2210 err
= pthread_create(&p
.pthread
, NULL
, load_index_extensions
, &p
);
2212 die(_("unable to create load_index_extensions thread: %s"), strerror(err
));
2219 * Locate and read the index entry offset table so that we can use it
2220 * to multi-thread the reading of the cache entries.
2222 if (extension_offset
&& nr_threads
> 1)
2223 ieot
= read_ieot_extension(mmap
, mmap_size
, extension_offset
);
2226 src_offset
+= load_cache_entries_threaded(istate
, mmap
, mmap_size
, src_offset
, nr_threads
, ieot
);
2229 src_offset
+= load_all_cache_entries(istate
, mmap
, mmap_size
, src_offset
);
2232 istate
->timestamp
.sec
= st
.st_mtime
;
2233 istate
->timestamp
.nsec
= ST_MTIME_NSEC(st
);
2235 /* if we created a thread, join it otherwise load the extensions on the primary thread */
2236 if (extension_offset
) {
2237 int ret
= pthread_join(p
.pthread
, NULL
);
2239 die(_("unable to join load_index_extensions thread: %s"), strerror(ret
));
2241 p
.src_offset
= src_offset
;
2242 load_index_extensions(&p
);
2244 munmap((void *)mmap
, mmap_size
);
2245 return istate
->cache_nr
;
2248 munmap((void *)mmap
, mmap_size
);
2249 die("index file corrupt");
2253 * Signal that the shared index is used by updating its mtime.
2255 * This way, shared index can be removed if they have not been used
2258 static void freshen_shared_index(const char *shared_index
, int warn
)
2260 if (!check_and_freshen_file(shared_index
, 1) && warn
)
2261 warning("could not freshen shared index '%s'", shared_index
);
2264 int read_index_from(struct index_state
*istate
, const char *path
,
2267 struct split_index
*split_index
;
2272 /* istate->initialized covers both .git/index and .git/sharedindex.xxx */
2273 if (istate
->initialized
)
2274 return istate
->cache_nr
;
2276 trace_performance_enter();
2277 ret
= do_read_index(istate
, path
, 0);
2278 trace_performance_leave("read cache %s", path
);
2280 split_index
= istate
->split_index
;
2281 if (!split_index
|| is_null_oid(&split_index
->base_oid
)) {
2282 post_read_index_from(istate
);
2286 trace_performance_enter();
2287 if (split_index
->base
)
2288 discard_index(split_index
->base
);
2290 split_index
->base
= xcalloc(1, sizeof(*split_index
->base
));
2292 base_oid_hex
= oid_to_hex(&split_index
->base_oid
);
2293 base_path
= xstrfmt("%s/sharedindex.%s", gitdir
, base_oid_hex
);
2294 ret
= do_read_index(split_index
->base
, base_path
, 1);
2295 if (!oideq(&split_index
->base_oid
, &split_index
->base
->oid
))
2296 die("broken index, expect %s in %s, got %s",
2297 base_oid_hex
, base_path
,
2298 oid_to_hex(&split_index
->base
->oid
));
2300 freshen_shared_index(base_path
, 0);
2301 merge_base_index(istate
);
2302 post_read_index_from(istate
);
2303 trace_performance_leave("read cache %s", base_path
);
2308 int is_index_unborn(struct index_state
*istate
)
2310 return (!istate
->cache_nr
&& !istate
->timestamp
.sec
);
2313 int discard_index(struct index_state
*istate
)
2316 * Cache entries in istate->cache[] should have been allocated
2317 * from the memory pool associated with this index, or from an
2318 * associated split_index. There is no need to free individual
2319 * cache entries. validate_cache_entries can detect when this
2320 * assertion does not hold.
2322 validate_cache_entries(istate
);
2324 resolve_undo_clear_index(istate
);
2325 istate
->cache_nr
= 0;
2326 istate
->cache_changed
= 0;
2327 istate
->timestamp
.sec
= 0;
2328 istate
->timestamp
.nsec
= 0;
2329 free_name_hash(istate
);
2330 cache_tree_free(&(istate
->cache_tree
));
2331 istate
->initialized
= 0;
2332 FREE_AND_NULL(istate
->cache
);
2333 istate
->cache_alloc
= 0;
2334 discard_split_index(istate
);
2335 free_untracked_cache(istate
->untracked
);
2336 istate
->untracked
= NULL
;
2338 if (istate
->ce_mem_pool
) {
2339 mem_pool_discard(istate
->ce_mem_pool
, should_validate_cache_entries());
2340 istate
->ce_mem_pool
= NULL
;
2347 * Validate the cache entries of this index.
2348 * All cache entries associated with this index
2349 * should have been allocated by the memory pool
2350 * associated with this index, or by a referenced
2353 void validate_cache_entries(const struct index_state
*istate
)
2357 if (!should_validate_cache_entries() ||!istate
|| !istate
->initialized
)
2360 for (i
= 0; i
< istate
->cache_nr
; i
++) {
2362 die("internal error: cache entry is not allocated from expected memory pool");
2363 } else if (!istate
->ce_mem_pool
||
2364 !mem_pool_contains(istate
->ce_mem_pool
, istate
->cache
[i
])) {
2365 if (!istate
->split_index
||
2366 !istate
->split_index
->base
||
2367 !istate
->split_index
->base
->ce_mem_pool
||
2368 !mem_pool_contains(istate
->split_index
->base
->ce_mem_pool
, istate
->cache
[i
])) {
2369 die("internal error: cache entry is not allocated from expected memory pool");
2374 if (istate
->split_index
)
2375 validate_cache_entries(istate
->split_index
->base
);
2378 int unmerged_index(const struct index_state
*istate
)
2381 for (i
= 0; i
< istate
->cache_nr
; i
++) {
2382 if (ce_stage(istate
->cache
[i
]))
2388 int index_has_changes(struct index_state
*istate
,
2392 struct object_id cmp
;
2395 if (istate
!= &the_index
) {
2396 BUG("index_has_changes cannot yet accept istate != &the_index; do_diff_cache needs updating first.");
2399 cmp
= tree
->object
.oid
;
2400 if (tree
|| !get_oid_tree("HEAD", &cmp
)) {
2401 struct diff_options opt
;
2403 repo_diff_setup(the_repository
, &opt
);
2404 opt
.flags
.exit_with_status
= 1;
2406 opt
.flags
.quick
= 1;
2407 do_diff_cache(&cmp
, &opt
);
2409 for (i
= 0; sb
&& i
< diff_queued_diff
.nr
; i
++) {
2411 strbuf_addch(sb
, ' ');
2412 strbuf_addstr(sb
, diff_queued_diff
.queue
[i
]->two
->path
);
2415 return opt
.flags
.has_changes
!= 0;
2417 for (i
= 0; sb
&& i
< istate
->cache_nr
; i
++) {
2419 strbuf_addch(sb
, ' ');
2420 strbuf_addstr(sb
, istate
->cache
[i
]->name
);
2422 return !!istate
->cache_nr
;
2426 #define WRITE_BUFFER_SIZE 8192
2427 static unsigned char write_buffer
[WRITE_BUFFER_SIZE
];
2428 static unsigned long write_buffer_len
;
2430 static int ce_write_flush(git_hash_ctx
*context
, int fd
)
2432 unsigned int buffered
= write_buffer_len
;
2434 the_hash_algo
->update_fn(context
, write_buffer
, buffered
);
2435 if (write_in_full(fd
, write_buffer
, buffered
) < 0)
2437 write_buffer_len
= 0;
2442 static int ce_write(git_hash_ctx
*context
, int fd
, void *data
, unsigned int len
)
2445 unsigned int buffered
= write_buffer_len
;
2446 unsigned int partial
= WRITE_BUFFER_SIZE
- buffered
;
2449 memcpy(write_buffer
+ buffered
, data
, partial
);
2450 buffered
+= partial
;
2451 if (buffered
== WRITE_BUFFER_SIZE
) {
2452 write_buffer_len
= buffered
;
2453 if (ce_write_flush(context
, fd
))
2457 write_buffer_len
= buffered
;
2459 data
= (char *) data
+ partial
;
2464 static int write_index_ext_header(git_hash_ctx
*context
, git_hash_ctx
*eoie_context
,
2465 int fd
, unsigned int ext
, unsigned int sz
)
2470 the_hash_algo
->update_fn(eoie_context
, &ext
, 4);
2471 the_hash_algo
->update_fn(eoie_context
, &sz
, 4);
2473 return ((ce_write(context
, fd
, &ext
, 4) < 0) ||
2474 (ce_write(context
, fd
, &sz
, 4) < 0)) ? -1 : 0;
2477 static int ce_flush(git_hash_ctx
*context
, int fd
, unsigned char *hash
)
2479 unsigned int left
= write_buffer_len
;
2482 write_buffer_len
= 0;
2483 the_hash_algo
->update_fn(context
, write_buffer
, left
);
2486 /* Flush first if not enough space for hash signature */
2487 if (left
+ the_hash_algo
->rawsz
> WRITE_BUFFER_SIZE
) {
2488 if (write_in_full(fd
, write_buffer
, left
) < 0)
2493 /* Append the hash signature at the end */
2494 the_hash_algo
->final_fn(write_buffer
+ left
, context
);
2495 hashcpy(hash
, write_buffer
+ left
);
2496 left
+= the_hash_algo
->rawsz
;
2497 return (write_in_full(fd
, write_buffer
, left
) < 0) ? -1 : 0;
2500 static void ce_smudge_racily_clean_entry(struct index_state
*istate
,
2501 struct cache_entry
*ce
)
2504 * The only thing we care about in this function is to smudge the
2505 * falsely clean entry due to touch-update-touch race, so we leave
2506 * everything else as they are. We are called for entries whose
2507 * ce_stat_data.sd_mtime match the index file mtime.
2509 * Note that this actually does not do much for gitlinks, for
2510 * which ce_match_stat_basic() always goes to the actual
2511 * contents. The caller checks with is_racy_timestamp() which
2512 * always says "no" for gitlinks, so we are not called for them ;-)
2516 if (lstat(ce
->name
, &st
) < 0)
2518 if (ce_match_stat_basic(ce
, &st
))
2520 if (ce_modified_check_fs(istate
, ce
, &st
)) {
2521 /* This is "racily clean"; smudge it. Note that this
2522 * is a tricky code. At first glance, it may appear
2523 * that it can break with this sequence:
2525 * $ echo xyzzy >frotz
2526 * $ git-update-index --add frotz
2529 * $ echo filfre >nitfol
2530 * $ git-update-index --add nitfol
2532 * but it does not. When the second update-index runs,
2533 * it notices that the entry "frotz" has the same timestamp
2534 * as index, and if we were to smudge it by resetting its
2535 * size to zero here, then the object name recorded
2536 * in index is the 6-byte file but the cached stat information
2537 * becomes zero --- which would then match what we would
2538 * obtain from the filesystem next time we stat("frotz").
2540 * However, the second update-index, before calling
2541 * this function, notices that the cached size is 6
2542 * bytes and what is on the filesystem is an empty
2543 * file, and never calls us, so the cached size information
2544 * for "frotz" stays 6 which does not match the filesystem.
2546 ce
->ce_stat_data
.sd_size
= 0;
2550 /* Copy miscellaneous fields but not the name */
2551 static void copy_cache_entry_to_ondisk(struct ondisk_cache_entry
*ondisk
,
2552 struct cache_entry
*ce
)
2556 ondisk
->ctime
.sec
= htonl(ce
->ce_stat_data
.sd_ctime
.sec
);
2557 ondisk
->mtime
.sec
= htonl(ce
->ce_stat_data
.sd_mtime
.sec
);
2558 ondisk
->ctime
.nsec
= htonl(ce
->ce_stat_data
.sd_ctime
.nsec
);
2559 ondisk
->mtime
.nsec
= htonl(ce
->ce_stat_data
.sd_mtime
.nsec
);
2560 ondisk
->dev
= htonl(ce
->ce_stat_data
.sd_dev
);
2561 ondisk
->ino
= htonl(ce
->ce_stat_data
.sd_ino
);
2562 ondisk
->mode
= htonl(ce
->ce_mode
);
2563 ondisk
->uid
= htonl(ce
->ce_stat_data
.sd_uid
);
2564 ondisk
->gid
= htonl(ce
->ce_stat_data
.sd_gid
);
2565 ondisk
->size
= htonl(ce
->ce_stat_data
.sd_size
);
2566 hashcpy(ondisk
->sha1
, ce
->oid
.hash
);
2568 flags
= ce
->ce_flags
& ~CE_NAMEMASK
;
2569 flags
|= (ce_namelen(ce
) >= CE_NAMEMASK
? CE_NAMEMASK
: ce_namelen(ce
));
2570 ondisk
->flags
= htons(flags
);
2571 if (ce
->ce_flags
& CE_EXTENDED
) {
2572 struct ondisk_cache_entry_extended
*ondisk2
;
2573 ondisk2
= (struct ondisk_cache_entry_extended
*)ondisk
;
2574 ondisk2
->flags2
= htons((ce
->ce_flags
& CE_EXTENDED_FLAGS
) >> 16);
2578 static int ce_write_entry(git_hash_ctx
*c
, int fd
, struct cache_entry
*ce
,
2579 struct strbuf
*previous_name
, struct ondisk_cache_entry
*ondisk
)
2583 unsigned int saved_namelen
;
2584 int stripped_name
= 0;
2585 static unsigned char padding
[8] = { 0x00 };
2587 if (ce
->ce_flags
& CE_STRIP_NAME
) {
2588 saved_namelen
= ce_namelen(ce
);
2593 if (ce
->ce_flags
& CE_EXTENDED
)
2594 size
= offsetof(struct ondisk_cache_entry_extended
, name
);
2596 size
= offsetof(struct ondisk_cache_entry
, name
);
2598 if (!previous_name
) {
2599 int len
= ce_namelen(ce
);
2600 copy_cache_entry_to_ondisk(ondisk
, ce
);
2601 result
= ce_write(c
, fd
, ondisk
, size
);
2603 result
= ce_write(c
, fd
, ce
->name
, len
);
2605 result
= ce_write(c
, fd
, padding
, align_padding_size(size
, len
));
2607 int common
, to_remove
, prefix_size
;
2608 unsigned char to_remove_vi
[16];
2610 (ce
->name
[common
] &&
2611 common
< previous_name
->len
&&
2612 ce
->name
[common
] == previous_name
->buf
[common
]);
2614 ; /* still matching */
2615 to_remove
= previous_name
->len
- common
;
2616 prefix_size
= encode_varint(to_remove
, to_remove_vi
);
2618 copy_cache_entry_to_ondisk(ondisk
, ce
);
2619 result
= ce_write(c
, fd
, ondisk
, size
);
2621 result
= ce_write(c
, fd
, to_remove_vi
, prefix_size
);
2623 result
= ce_write(c
, fd
, ce
->name
+ common
, ce_namelen(ce
) - common
);
2625 result
= ce_write(c
, fd
, padding
, 1);
2627 strbuf_splice(previous_name
, common
, to_remove
,
2628 ce
->name
+ common
, ce_namelen(ce
) - common
);
2630 if (stripped_name
) {
2631 ce
->ce_namelen
= saved_namelen
;
2632 ce
->ce_flags
&= ~CE_STRIP_NAME
;
2639 * This function verifies if index_state has the correct sha1 of the
2640 * index file. Don't die if we have any other failure, just return 0.
2642 static int verify_index_from(const struct index_state
*istate
, const char *path
)
2647 unsigned char hash
[GIT_MAX_RAWSZ
];
2649 if (!istate
->initialized
)
2652 fd
= open(path
, O_RDONLY
);
2659 if (st
.st_size
< sizeof(struct cache_header
) + the_hash_algo
->rawsz
)
2662 n
= pread_in_full(fd
, hash
, the_hash_algo
->rawsz
, st
.st_size
- the_hash_algo
->rawsz
);
2663 if (n
!= the_hash_algo
->rawsz
)
2666 if (!hasheq(istate
->oid
.hash
, hash
))
2677 static int verify_index(const struct index_state
*istate
)
2679 return verify_index_from(istate
, get_index_file());
2682 static int has_racy_timestamp(struct index_state
*istate
)
2684 int entries
= istate
->cache_nr
;
2687 for (i
= 0; i
< entries
; i
++) {
2688 struct cache_entry
*ce
= istate
->cache
[i
];
2689 if (is_racy_timestamp(istate
, ce
))
2695 void update_index_if_able(struct index_state
*istate
, struct lock_file
*lockfile
)
2697 if ((istate
->cache_changed
|| has_racy_timestamp(istate
)) &&
2698 verify_index(istate
))
2699 write_locked_index(istate
, lockfile
, COMMIT_LOCK
);
2701 rollback_lock_file(lockfile
);
2704 static int record_eoie(void)
2708 if (!git_config_get_bool("index.recordendofindexentries", &val
))
2712 * As a convenience, the end of index entries extension
2713 * used for threading is written by default if the user
2714 * explicitly requested threaded index reads.
2716 return !git_config_get_index_threads(&val
) && val
!= 1;
2719 static int record_ieot(void)
2723 if (!git_config_get_bool("index.recordoffsettable", &val
))
2727 * As a convenience, the offset table used for threading is
2728 * written by default if the user explicitly requested
2729 * threaded index reads.
2731 return !git_config_get_index_threads(&val
) && val
!= 1;
2735 * On success, `tempfile` is closed. If it is the temporary file
2736 * of a `struct lock_file`, we will therefore effectively perform
2737 * a 'close_lock_file_gently()`. Since that is an implementation
2738 * detail of lockfiles, callers of `do_write_index()` should not
2741 static int do_write_index(struct index_state
*istate
, struct tempfile
*tempfile
,
2742 int strip_extensions
)
2744 uint64_t start
= getnanotime();
2745 int newfd
= tempfile
->fd
;
2746 git_hash_ctx c
, eoie_c
;
2747 struct cache_header hdr
;
2748 int i
, err
= 0, removed
, extended
, hdr_version
;
2749 struct cache_entry
**cache
= istate
->cache
;
2750 int entries
= istate
->cache_nr
;
2752 struct ondisk_cache_entry_extended ondisk
;
2753 struct strbuf previous_name_buf
= STRBUF_INIT
, *previous_name
;
2754 int drop_cache_tree
= istate
->drop_cache_tree
;
2756 int ieot_entries
= 1;
2757 struct index_entry_offset_table
*ieot
= NULL
;
2760 for (i
= removed
= extended
= 0; i
< entries
; i
++) {
2761 if (cache
[i
]->ce_flags
& CE_REMOVE
)
2764 /* reduce extended entries if possible */
2765 cache
[i
]->ce_flags
&= ~CE_EXTENDED
;
2766 if (cache
[i
]->ce_flags
& CE_EXTENDED_FLAGS
) {
2768 cache
[i
]->ce_flags
|= CE_EXTENDED
;
2772 if (!istate
->version
) {
2773 istate
->version
= get_index_format_default();
2774 if (git_env_bool("GIT_TEST_SPLIT_INDEX", 0))
2775 init_split_index(istate
);
2778 /* demote version 3 to version 2 when the latter suffices */
2779 if (istate
->version
== 3 || istate
->version
== 2)
2780 istate
->version
= extended
? 3 : 2;
2782 hdr_version
= istate
->version
;
2784 hdr
.hdr_signature
= htonl(CACHE_SIGNATURE
);
2785 hdr
.hdr_version
= htonl(hdr_version
);
2786 hdr
.hdr_entries
= htonl(entries
- removed
);
2788 the_hash_algo
->init_fn(&c
);
2789 if (ce_write(&c
, newfd
, &hdr
, sizeof(hdr
)) < 0)
2792 if (!HAVE_THREADS
|| git_config_get_index_threads(&nr_threads
))
2795 if (nr_threads
!= 1 && record_ieot()) {
2796 int ieot_blocks
, cpus
;
2799 * ensure default number of ieot blocks maps evenly to the
2800 * default number of threads that will process them leaving
2801 * room for the thread to load the index extensions.
2804 ieot_blocks
= istate
->cache_nr
/ THREAD_COST
;
2805 cpus
= online_cpus();
2806 if (ieot_blocks
> cpus
- 1)
2807 ieot_blocks
= cpus
- 1;
2809 ieot_blocks
= nr_threads
;
2810 if (ieot_blocks
> istate
->cache_nr
)
2811 ieot_blocks
= istate
->cache_nr
;
2815 * no reason to write out the IEOT extension if we don't
2816 * have enough blocks to utilize multi-threading
2818 if (ieot_blocks
> 1) {
2819 ieot
= xcalloc(1, sizeof(struct index_entry_offset_table
)
2820 + (ieot_blocks
* sizeof(struct index_entry_offset
)));
2821 ieot_entries
= DIV_ROUND_UP(entries
, ieot_blocks
);
2825 offset
= lseek(newfd
, 0, SEEK_CUR
);
2830 offset
+= write_buffer_len
;
2832 previous_name
= (hdr_version
== 4) ? &previous_name_buf
: NULL
;
2834 for (i
= 0; i
< entries
; i
++) {
2835 struct cache_entry
*ce
= cache
[i
];
2836 if (ce
->ce_flags
& CE_REMOVE
)
2838 if (!ce_uptodate(ce
) && is_racy_timestamp(istate
, ce
))
2839 ce_smudge_racily_clean_entry(istate
, ce
);
2840 if (is_null_oid(&ce
->oid
)) {
2841 static const char msg
[] = "cache entry has null sha1: %s";
2842 static int allow
= -1;
2845 allow
= git_env_bool("GIT_ALLOW_NULL_SHA1", 0);
2847 warning(msg
, ce
->name
);
2849 err
= error(msg
, ce
->name
);
2851 drop_cache_tree
= 1;
2853 if (ieot
&& i
&& (i
% ieot_entries
== 0)) {
2854 ieot
->entries
[ieot
->nr
].nr
= nr
;
2855 ieot
->entries
[ieot
->nr
].offset
= offset
;
2858 * If we have a V4 index, set the first byte to an invalid
2859 * character to ensure there is nothing common with the previous
2863 previous_name
->buf
[0] = 0;
2865 offset
= lseek(newfd
, 0, SEEK_CUR
);
2870 offset
+= write_buffer_len
;
2872 if (ce_write_entry(&c
, newfd
, ce
, previous_name
, (struct ondisk_cache_entry
*)&ondisk
) < 0)
2880 ieot
->entries
[ieot
->nr
].nr
= nr
;
2881 ieot
->entries
[ieot
->nr
].offset
= offset
;
2884 strbuf_release(&previous_name_buf
);
2891 /* Write extension data here */
2892 offset
= lseek(newfd
, 0, SEEK_CUR
);
2897 offset
+= write_buffer_len
;
2898 the_hash_algo
->init_fn(&eoie_c
);
2901 * Lets write out CACHE_EXT_INDEXENTRYOFFSETTABLE first so that we
2902 * can minimize the number of extensions we have to scan through to
2903 * find it during load. Write it out regardless of the
2904 * strip_extensions parameter as we need it when loading the shared
2908 struct strbuf sb
= STRBUF_INIT
;
2910 write_ieot_extension(&sb
, ieot
);
2911 err
= write_index_ext_header(&c
, &eoie_c
, newfd
, CACHE_EXT_INDEXENTRYOFFSETTABLE
, sb
.len
) < 0
2912 || ce_write(&c
, newfd
, sb
.buf
, sb
.len
) < 0;
2913 strbuf_release(&sb
);
2919 if (!strip_extensions
&& istate
->split_index
) {
2920 struct strbuf sb
= STRBUF_INIT
;
2922 err
= write_link_extension(&sb
, istate
) < 0 ||
2923 write_index_ext_header(&c
, &eoie_c
, newfd
, CACHE_EXT_LINK
,
2925 ce_write(&c
, newfd
, sb
.buf
, sb
.len
) < 0;
2926 strbuf_release(&sb
);
2930 if (!strip_extensions
&& !drop_cache_tree
&& istate
->cache_tree
) {
2931 struct strbuf sb
= STRBUF_INIT
;
2933 cache_tree_write(&sb
, istate
->cache_tree
);
2934 err
= write_index_ext_header(&c
, &eoie_c
, newfd
, CACHE_EXT_TREE
, sb
.len
) < 0
2935 || ce_write(&c
, newfd
, sb
.buf
, sb
.len
) < 0;
2936 strbuf_release(&sb
);
2940 if (!strip_extensions
&& istate
->resolve_undo
) {
2941 struct strbuf sb
= STRBUF_INIT
;
2943 resolve_undo_write(&sb
, istate
->resolve_undo
);
2944 err
= write_index_ext_header(&c
, &eoie_c
, newfd
, CACHE_EXT_RESOLVE_UNDO
,
2946 || ce_write(&c
, newfd
, sb
.buf
, sb
.len
) < 0;
2947 strbuf_release(&sb
);
2951 if (!strip_extensions
&& istate
->untracked
) {
2952 struct strbuf sb
= STRBUF_INIT
;
2954 write_untracked_extension(&sb
, istate
->untracked
);
2955 err
= write_index_ext_header(&c
, &eoie_c
, newfd
, CACHE_EXT_UNTRACKED
,
2957 ce_write(&c
, newfd
, sb
.buf
, sb
.len
) < 0;
2958 strbuf_release(&sb
);
2962 if (!strip_extensions
&& istate
->fsmonitor_last_update
) {
2963 struct strbuf sb
= STRBUF_INIT
;
2965 write_fsmonitor_extension(&sb
, istate
);
2966 err
= write_index_ext_header(&c
, &eoie_c
, newfd
, CACHE_EXT_FSMONITOR
, sb
.len
) < 0
2967 || ce_write(&c
, newfd
, sb
.buf
, sb
.len
) < 0;
2968 strbuf_release(&sb
);
2974 * CACHE_EXT_ENDOFINDEXENTRIES must be written as the last entry before the SHA1
2975 * so that it can be found and processed before all the index entries are
2976 * read. Write it out regardless of the strip_extensions parameter as we need it
2977 * when loading the shared index.
2979 if (offset
&& record_eoie()) {
2980 struct strbuf sb
= STRBUF_INIT
;
2982 write_eoie_extension(&sb
, &eoie_c
, offset
);
2983 err
= write_index_ext_header(&c
, NULL
, newfd
, CACHE_EXT_ENDOFINDEXENTRIES
, sb
.len
) < 0
2984 || ce_write(&c
, newfd
, sb
.buf
, sb
.len
) < 0;
2985 strbuf_release(&sb
);
2990 if (ce_flush(&c
, newfd
, istate
->oid
.hash
))
2992 if (close_tempfile_gently(tempfile
)) {
2993 error(_("could not close '%s'"), tempfile
->filename
.buf
);
2996 if (stat(tempfile
->filename
.buf
, &st
))
2998 istate
->timestamp
.sec
= (unsigned int)st
.st_mtime
;
2999 istate
->timestamp
.nsec
= ST_MTIME_NSEC(st
);
3000 trace_performance_since(start
, "write index, changed mask = %x", istate
->cache_changed
);
3004 void set_alternate_index_output(const char *name
)
3006 alternate_index_output
= name
;
3009 static int commit_locked_index(struct lock_file
*lk
)
3011 if (alternate_index_output
)
3012 return commit_lock_file_to(lk
, alternate_index_output
);
3014 return commit_lock_file(lk
);
3017 static int do_write_locked_index(struct index_state
*istate
, struct lock_file
*lock
,
3020 int ret
= do_write_index(istate
, lock
->tempfile
, 0);
3023 if (flags
& COMMIT_LOCK
)
3024 return commit_locked_index(lock
);
3025 return close_lock_file_gently(lock
);
3028 static int write_split_index(struct index_state
*istate
,
3029 struct lock_file
*lock
,
3033 prepare_to_write_split_index(istate
);
3034 ret
= do_write_locked_index(istate
, lock
, flags
);
3035 finish_writing_split_index(istate
);
3039 static const char *shared_index_expire
= "2.weeks.ago";
3041 static unsigned long get_shared_index_expire_date(void)
3043 static unsigned long shared_index_expire_date
;
3044 static int shared_index_expire_date_prepared
;
3046 if (!shared_index_expire_date_prepared
) {
3047 git_config_get_expiry("splitindex.sharedindexexpire",
3048 &shared_index_expire
);
3049 shared_index_expire_date
= approxidate(shared_index_expire
);
3050 shared_index_expire_date_prepared
= 1;
3053 return shared_index_expire_date
;
3056 static int should_delete_shared_index(const char *shared_index_path
)
3059 unsigned long expiration
;
3061 /* Check timestamp */
3062 expiration
= get_shared_index_expire_date();
3065 if (stat(shared_index_path
, &st
))
3066 return error_errno(_("could not stat '%s'"), shared_index_path
);
3067 if (st
.st_mtime
> expiration
)
3073 static int clean_shared_index_files(const char *current_hex
)
3076 DIR *dir
= opendir(get_git_dir());
3079 return error_errno(_("unable to open git dir: %s"), get_git_dir());
3081 while ((de
= readdir(dir
)) != NULL
) {
3082 const char *sha1_hex
;
3083 const char *shared_index_path
;
3084 if (!skip_prefix(de
->d_name
, "sharedindex.", &sha1_hex
))
3086 if (!strcmp(sha1_hex
, current_hex
))
3088 shared_index_path
= git_path("%s", de
->d_name
);
3089 if (should_delete_shared_index(shared_index_path
) > 0 &&
3090 unlink(shared_index_path
))
3091 warning_errno(_("unable to unlink: %s"), shared_index_path
);
3098 static int write_shared_index(struct index_state
*istate
,
3099 struct tempfile
**temp
)
3101 struct split_index
*si
= istate
->split_index
;
3104 move_cache_to_base_index(istate
);
3105 ret
= do_write_index(si
->base
, *temp
, 1);
3108 ret
= adjust_shared_perm(get_tempfile_path(*temp
));
3110 error("cannot fix permission bits on %s", get_tempfile_path(*temp
));
3113 ret
= rename_tempfile(temp
,
3114 git_path("sharedindex.%s", oid_to_hex(&si
->base
->oid
)));
3116 oidcpy(&si
->base_oid
, &si
->base
->oid
);
3117 clean_shared_index_files(oid_to_hex(&si
->base
->oid
));
3123 static const int default_max_percent_split_change
= 20;
3125 static int too_many_not_shared_entries(struct index_state
*istate
)
3127 int i
, not_shared
= 0;
3128 int max_split
= git_config_get_max_percent_split_change();
3130 switch (max_split
) {
3132 /* not or badly configured: use the default value */
3133 max_split
= default_max_percent_split_change
;
3136 return 1; /* 0% means always write a new shared index */
3138 return 0; /* 100% means never write a new shared index */
3140 break; /* just use the configured value */
3143 /* Count not shared entries */
3144 for (i
= 0; i
< istate
->cache_nr
; i
++) {
3145 struct cache_entry
*ce
= istate
->cache
[i
];
3150 return (int64_t)istate
->cache_nr
* max_split
< (int64_t)not_shared
* 100;
3153 int write_locked_index(struct index_state
*istate
, struct lock_file
*lock
,
3156 int new_shared_index
, ret
;
3157 struct split_index
*si
= istate
->split_index
;
3159 if (git_env_bool("GIT_TEST_CHECK_CACHE_TREE", 0))
3160 cache_tree_verify(istate
);
3162 if ((flags
& SKIP_IF_UNCHANGED
) && !istate
->cache_changed
) {
3163 if (flags
& COMMIT_LOCK
)
3164 rollback_lock_file(lock
);
3168 if (istate
->fsmonitor_last_update
)
3169 fill_fsmonitor_bitmap(istate
);
3171 if (!si
|| alternate_index_output
||
3172 (istate
->cache_changed
& ~EXTMASK
)) {
3174 oidclr(&si
->base_oid
);
3175 ret
= do_write_locked_index(istate
, lock
, flags
);
3179 if (git_env_bool("GIT_TEST_SPLIT_INDEX", 0)) {
3180 int v
= si
->base_oid
.hash
[0];
3182 istate
->cache_changed
|= SPLIT_INDEX_ORDERED
;
3184 if (too_many_not_shared_entries(istate
))
3185 istate
->cache_changed
|= SPLIT_INDEX_ORDERED
;
3187 new_shared_index
= istate
->cache_changed
& SPLIT_INDEX_ORDERED
;
3189 if (new_shared_index
) {
3190 struct tempfile
*temp
;
3193 /* Same initial permissions as the main .git/index file */
3194 temp
= mks_tempfile_sm(git_path("sharedindex_XXXXXX"), 0, 0666);
3196 oidclr(&si
->base_oid
);
3197 ret
= do_write_locked_index(istate
, lock
, flags
);
3200 ret
= write_shared_index(istate
, &temp
);
3202 saved_errno
= errno
;
3203 if (is_tempfile_active(temp
))
3204 delete_tempfile(&temp
);
3205 errno
= saved_errno
;
3211 ret
= write_split_index(istate
, lock
, flags
);
3213 /* Freshen the shared index only if the split-index was written */
3214 if (!ret
&& !new_shared_index
) {
3215 const char *shared_index
= git_path("sharedindex.%s",
3216 oid_to_hex(&si
->base_oid
));
3217 freshen_shared_index(shared_index
, 1);
3221 if (flags
& COMMIT_LOCK
)
3222 rollback_lock_file(lock
);
3227 * Read the index file that is potentially unmerged into given
3228 * index_state, dropping any unmerged entries to stage #0 (potentially
3229 * resulting in a path appearing as both a file and a directory in the
3230 * index; the caller is responsible to clear out the extra entries
3231 * before writing the index to a tree). Returns true if the index is
3232 * unmerged. Callers who want to refuse to work from an unmerged
3233 * state can call this and check its return value, instead of calling
3236 int read_index_unmerged(struct index_state
*istate
)
3242 for (i
= 0; i
< istate
->cache_nr
; i
++) {
3243 struct cache_entry
*ce
= istate
->cache
[i
];
3244 struct cache_entry
*new_ce
;
3250 len
= ce_namelen(ce
);
3251 new_ce
= make_empty_cache_entry(istate
, len
);
3252 memcpy(new_ce
->name
, ce
->name
, len
);
3253 new_ce
->ce_flags
= create_ce_flags(0) | CE_CONFLICTED
;
3254 new_ce
->ce_namelen
= len
;
3255 new_ce
->ce_mode
= ce
->ce_mode
;
3256 if (add_index_entry(istate
, new_ce
, ADD_CACHE_SKIP_DFCHECK
))
3257 return error("%s: cannot drop to stage #0",
3264 * Returns 1 if the path is an "other" path with respect to
3265 * the index; that is, the path is not mentioned in the index at all,
3266 * either as a file, a directory with some files in the index,
3267 * or as an unmerged entry.
3269 * We helpfully remove a trailing "/" from directories so that
3270 * the output of read_directory can be used as-is.
3272 int index_name_is_other(const struct index_state
*istate
, const char *name
,
3276 if (namelen
&& name
[namelen
- 1] == '/')
3278 pos
= index_name_pos(istate
, name
, namelen
);
3280 return 0; /* exact match */
3282 if (pos
< istate
->cache_nr
) {
3283 struct cache_entry
*ce
= istate
->cache
[pos
];
3284 if (ce_namelen(ce
) == namelen
&&
3285 !memcmp(ce
->name
, name
, namelen
))
3286 return 0; /* Yup, this one exists unmerged */
3291 void *read_blob_data_from_index(const struct index_state
*istate
,
3292 const char *path
, unsigned long *size
)
3296 enum object_type type
;
3300 pos
= index_name_pos(istate
, path
, len
);
3303 * We might be in the middle of a merge, in which
3304 * case we would read stage #2 (ours).
3308 (pos
< 0 && i
< istate
->cache_nr
&&
3309 !strcmp(istate
->cache
[i
]->name
, path
));
3311 if (ce_stage(istate
->cache
[i
]) == 2)
3316 data
= read_object_file(&istate
->cache
[pos
]->oid
, &type
, &sz
);
3317 if (!data
|| type
!= OBJ_BLOB
) {
3326 void stat_validity_clear(struct stat_validity
*sv
)
3328 FREE_AND_NULL(sv
->sd
);
3331 int stat_validity_check(struct stat_validity
*sv
, const char *path
)
3335 if (stat(path
, &st
) < 0)
3336 return sv
->sd
== NULL
;
3339 return S_ISREG(st
.st_mode
) && !match_stat_data(sv
->sd
, &st
);
3342 void stat_validity_update(struct stat_validity
*sv
, int fd
)
3346 if (fstat(fd
, &st
) < 0 || !S_ISREG(st
.st_mode
))
3347 stat_validity_clear(sv
);
3350 sv
->sd
= xcalloc(1, sizeof(struct stat_data
));
3351 fill_stat_data(sv
->sd
, &st
);
3355 void move_index_extensions(struct index_state
*dst
, struct index_state
*src
)
3357 dst
->untracked
= src
->untracked
;
3358 src
->untracked
= NULL
;
3359 dst
->cache_tree
= src
->cache_tree
;
3360 src
->cache_tree
= NULL
;
3363 struct cache_entry
*dup_cache_entry(const struct cache_entry
*ce
,
3364 struct index_state
*istate
)
3366 unsigned int size
= ce_size(ce
);
3367 int mem_pool_allocated
;
3368 struct cache_entry
*new_entry
= make_empty_cache_entry(istate
, ce_namelen(ce
));
3369 mem_pool_allocated
= new_entry
->mem_pool_allocated
;
3371 memcpy(new_entry
, ce
, size
);
3372 new_entry
->mem_pool_allocated
= mem_pool_allocated
;
3376 void discard_cache_entry(struct cache_entry
*ce
)
3378 if (ce
&& should_validate_cache_entries())
3379 memset(ce
, 0xCD, cache_entry_size(ce
->ce_namelen
));
3381 if (ce
&& ce
->mem_pool_allocated
)
3387 int should_validate_cache_entries(void)
3389 static int validate_index_cache_entries
= -1;
3391 if (validate_index_cache_entries
< 0) {
3392 if (getenv("GIT_TEST_VALIDATE_INDEX_CACHE_ENTRIES"))
3393 validate_index_cache_entries
= 1;
3395 validate_index_cache_entries
= 0;
3398 return validate_index_cache_entries
;
3401 #define EOIE_SIZE (4 + GIT_SHA1_RAWSZ) /* <4-byte offset> + <20-byte hash> */
3402 #define EOIE_SIZE_WITH_HEADER (4 + 4 + EOIE_SIZE) /* <4-byte signature> + <4-byte length> + EOIE_SIZE */
3404 static size_t read_eoie_extension(const char *mmap
, size_t mmap_size
)
3407 * The end of index entries (EOIE) extension is guaranteed to be last
3408 * so that it can be found by scanning backwards from the EOF.
3415 const char *index
, *eoie
;
3417 size_t offset
, src_offset
;
3418 unsigned char hash
[GIT_MAX_RAWSZ
];
3421 /* ensure we have an index big enough to contain an EOIE extension */
3422 if (mmap_size
< sizeof(struct cache_header
) + EOIE_SIZE_WITH_HEADER
+ the_hash_algo
->rawsz
)
3425 /* validate the extension signature */
3426 index
= eoie
= mmap
+ mmap_size
- EOIE_SIZE_WITH_HEADER
- the_hash_algo
->rawsz
;
3427 if (CACHE_EXT(index
) != CACHE_EXT_ENDOFINDEXENTRIES
)
3429 index
+= sizeof(uint32_t);
3431 /* validate the extension size */
3432 extsize
= get_be32(index
);
3433 if (extsize
!= EOIE_SIZE
)
3435 index
+= sizeof(uint32_t);
3438 * Validate the offset we're going to look for the first extension
3439 * signature is after the index header and before the eoie extension.
3441 offset
= get_be32(index
);
3442 if (mmap
+ offset
< mmap
+ sizeof(struct cache_header
))
3444 if (mmap
+ offset
>= eoie
)
3446 index
+= sizeof(uint32_t);
3449 * The hash is computed over extension types and their sizes (but not
3450 * their contents). E.g. if we have "TREE" extension that is N-bytes
3451 * long, "REUC" extension that is M-bytes long, followed by "EOIE",
3452 * then the hash would be:
3454 * SHA-1("TREE" + <binary representation of N> +
3455 * "REUC" + <binary representation of M>)
3457 src_offset
= offset
;
3458 the_hash_algo
->init_fn(&c
);
3459 while (src_offset
< mmap_size
- the_hash_algo
->rawsz
- EOIE_SIZE_WITH_HEADER
) {
3460 /* After an array of active_nr index entries,
3461 * there can be arbitrary number of extended
3462 * sections, each of which is prefixed with
3463 * extension name (4-byte) and section length
3464 * in 4-byte network byte order.
3467 memcpy(&extsize
, mmap
+ src_offset
+ 4, 4);
3468 extsize
= ntohl(extsize
);
3470 /* verify the extension size isn't so large it will wrap around */
3471 if (src_offset
+ 8 + extsize
< src_offset
)
3474 the_hash_algo
->update_fn(&c
, mmap
+ src_offset
, 8);
3477 src_offset
+= extsize
;
3479 the_hash_algo
->final_fn(hash
, &c
);
3480 if (!hasheq(hash
, (const unsigned char *)index
))
3483 /* Validate that the extension offsets returned us back to the eoie extension. */
3484 if (src_offset
!= mmap_size
- the_hash_algo
->rawsz
- EOIE_SIZE_WITH_HEADER
)
3490 static void write_eoie_extension(struct strbuf
*sb
, git_hash_ctx
*eoie_context
, size_t offset
)
3493 unsigned char hash
[GIT_MAX_RAWSZ
];
3496 put_be32(&buffer
, offset
);
3497 strbuf_add(sb
, &buffer
, sizeof(uint32_t));
3500 the_hash_algo
->final_fn(hash
, eoie_context
);
3501 strbuf_add(sb
, hash
, the_hash_algo
->rawsz
);
3504 #define IEOT_VERSION (1)
3506 static struct index_entry_offset_table
*read_ieot_extension(const char *mmap
, size_t mmap_size
, size_t offset
)
3508 const char *index
= NULL
;
3509 uint32_t extsize
, ext_version
;
3510 struct index_entry_offset_table
*ieot
;
3513 /* find the IEOT extension */
3516 while (offset
<= mmap_size
- the_hash_algo
->rawsz
- 8) {
3517 extsize
= get_be32(mmap
+ offset
+ 4);
3518 if (CACHE_EXT((mmap
+ offset
)) == CACHE_EXT_INDEXENTRYOFFSETTABLE
) {
3519 index
= mmap
+ offset
+ 4 + 4;
3528 /* validate the version is IEOT_VERSION */
3529 ext_version
= get_be32(index
);
3530 if (ext_version
!= IEOT_VERSION
) {
3531 error("invalid IEOT version %d", ext_version
);
3534 index
+= sizeof(uint32_t);
3536 /* extension size - version bytes / bytes per entry */
3537 nr
= (extsize
- sizeof(uint32_t)) / (sizeof(uint32_t) + sizeof(uint32_t));
3539 error("invalid number of IEOT entries %d", nr
);
3542 ieot
= xmalloc(sizeof(struct index_entry_offset_table
)
3543 + (nr
* sizeof(struct index_entry_offset
)));
3545 for (i
= 0; i
< nr
; i
++) {
3546 ieot
->entries
[i
].offset
= get_be32(index
);
3547 index
+= sizeof(uint32_t);
3548 ieot
->entries
[i
].nr
= get_be32(index
);
3549 index
+= sizeof(uint32_t);
3555 static void write_ieot_extension(struct strbuf
*sb
, struct index_entry_offset_table
*ieot
)
3561 put_be32(&buffer
, IEOT_VERSION
);
3562 strbuf_add(sb
, &buffer
, sizeof(uint32_t));
3565 for (i
= 0; i
< ieot
->nr
; i
++) {
3568 put_be32(&buffer
, ieot
->entries
[i
].offset
);
3569 strbuf_add(sb
, &buffer
, sizeof(uint32_t));
3572 put_be32(&buffer
, ieot
->entries
[i
].nr
);
3573 strbuf_add(sb
, &buffer
, sizeof(uint32_t));