2 * GIT - The information manager from hell
4 * Copyright (C) Linus Torvalds, 2005
8 struct cache_entry
**active_cache
= NULL
;
9 static time_t index_file_timestamp
;
10 unsigned int active_nr
= 0, active_alloc
= 0, active_cache_changed
= 0;
13 * This only updates the "non-critical" parts of the directory
14 * cache, ie the parts that aren't tracked by GIT, and only used
15 * to validate the cache.
17 void fill_stat_cache_info(struct cache_entry
*ce
, struct stat
*st
)
19 ce
->ce_ctime
.sec
= htonl(st
->st_ctime
);
20 ce
->ce_mtime
.sec
= htonl(st
->st_mtime
);
22 ce
->ce_ctime
.nsec
= htonl(st
->st_ctim
.tv_nsec
);
23 ce
->ce_mtime
.nsec
= htonl(st
->st_mtim
.tv_nsec
);
25 ce
->ce_dev
= htonl(st
->st_dev
);
26 ce
->ce_ino
= htonl(st
->st_ino
);
27 ce
->ce_uid
= htonl(st
->st_uid
);
28 ce
->ce_gid
= htonl(st
->st_gid
);
29 ce
->ce_size
= htonl(st
->st_size
);
32 static int ce_compare_data(struct cache_entry
*ce
, struct stat
*st
)
35 int fd
= open(ce
->name
, O_RDONLY
);
38 unsigned char sha1
[20];
39 if (!index_fd(sha1
, fd
, st
, 0, NULL
))
40 match
= memcmp(sha1
, ce
->sha1
, 20);
46 static int ce_compare_link(struct cache_entry
*ce
, unsigned long expected_size
)
55 target
= xmalloc(expected_size
);
56 len
= readlink(ce
->name
, target
, expected_size
);
57 if (len
!= expected_size
) {
61 buffer
= read_sha1_file(ce
->sha1
, type
, &size
);
66 if (size
== expected_size
)
67 match
= memcmp(buffer
, target
, size
);
73 static int ce_modified_check_fs(struct cache_entry
*ce
, struct stat
*st
)
75 switch (st
->st_mode
& S_IFMT
) {
77 if (ce_compare_data(ce
, st
))
81 if (ce_compare_link(ce
, st
->st_size
))
90 static int ce_match_stat_basic(struct cache_entry
*ce
, struct stat
*st
)
92 unsigned int changed
= 0;
94 switch (ntohl(ce
->ce_mode
) & S_IFMT
) {
96 changed
|= !S_ISREG(st
->st_mode
) ? TYPE_CHANGED
: 0;
97 /* We consider only the owner x bit to be relevant for
100 if (trust_executable_bit
&&
101 (0100 & (ntohl(ce
->ce_mode
) ^ st
->st_mode
)))
102 changed
|= MODE_CHANGED
;
105 changed
|= !S_ISLNK(st
->st_mode
) ? TYPE_CHANGED
: 0;
108 die("internal error: ce_mode is %o", ntohl(ce
->ce_mode
));
110 if (ce
->ce_mtime
.sec
!= htonl(st
->st_mtime
))
111 changed
|= MTIME_CHANGED
;
112 if (ce
->ce_ctime
.sec
!= htonl(st
->st_ctime
))
113 changed
|= CTIME_CHANGED
;
117 * nsec seems unreliable - not all filesystems support it, so
118 * as long as it is in the inode cache you get right nsec
119 * but after it gets flushed, you get zero nsec.
121 if (ce
->ce_mtime
.nsec
!= htonl(st
->st_mtim
.tv_nsec
))
122 changed
|= MTIME_CHANGED
;
123 if (ce
->ce_ctime
.nsec
!= htonl(st
->st_ctim
.tv_nsec
))
124 changed
|= CTIME_CHANGED
;
127 if (ce
->ce_uid
!= htonl(st
->st_uid
) ||
128 ce
->ce_gid
!= htonl(st
->st_gid
))
129 changed
|= OWNER_CHANGED
;
130 if (ce
->ce_ino
!= htonl(st
->st_ino
))
131 changed
|= INODE_CHANGED
;
135 * st_dev breaks on network filesystems where different
136 * clients will have different views of what "device"
137 * the filesystem is on
139 if (ce
->ce_dev
!= htonl(st
->st_dev
))
140 changed
|= INODE_CHANGED
;
143 if (ce
->ce_size
!= htonl(st
->st_size
))
144 changed
|= DATA_CHANGED
;
149 int ce_match_stat(struct cache_entry
*ce
, struct stat
*st
)
151 unsigned int changed
= ce_match_stat_basic(ce
, st
);
154 * Within 1 second of this sequence:
155 * echo xyzzy >file && git-update-index --add file
156 * running this command:
158 * would give a falsely clean cache entry. The mtime and
159 * length match the cache, and other stat fields do not change.
161 * We could detect this at update-index time (the cache entry
162 * being registered/updated records the same time as "now")
163 * and delay the return from git-update-index, but that would
164 * effectively mean we can make at most one commit per second,
165 * which is not acceptable. Instead, we check cache entries
166 * whose mtime are the same as the index file timestamp more
167 * careful than others.
170 index_file_timestamp
&&
171 index_file_timestamp
<= ntohl(ce
->ce_mtime
.sec
))
172 changed
|= ce_modified_check_fs(ce
, st
);
177 int ce_modified(struct cache_entry
*ce
, struct stat
*st
)
179 int changed
, changed_fs
;
180 changed
= ce_match_stat(ce
, st
);
184 * If the mode or type has changed, there's no point in trying
185 * to refresh the entry - it's not going to match
187 if (changed
& (MODE_CHANGED
| TYPE_CHANGED
))
190 /* Immediately after read-tree or update-index --cacheinfo,
191 * the length field is zero. For other cases the ce_size
192 * should match the SHA1 recorded in the index entry.
194 if ((changed
& DATA_CHANGED
) && ce
->ce_size
!= htonl(0))
197 changed_fs
= ce_modified_check_fs(ce
, st
);
199 return changed
| changed_fs
;
203 int base_name_compare(const char *name1
, int len1
, int mode1
,
204 const char *name2
, int len2
, int mode2
)
206 unsigned char c1
, c2
;
207 int len
= len1
< len2
? len1
: len2
;
210 cmp
= memcmp(name1
, name2
, len
);
215 if (!c1
&& S_ISDIR(mode1
))
217 if (!c2
&& S_ISDIR(mode2
))
219 return (c1
< c2
) ? -1 : (c1
> c2
) ? 1 : 0;
222 int cache_name_compare(const char *name1
, int flags1
, const char *name2
, int flags2
)
224 int len1
= flags1
& CE_NAMEMASK
;
225 int len2
= flags2
& CE_NAMEMASK
;
226 int len
= len1
< len2
? len1
: len2
;
229 cmp
= memcmp(name1
, name2
, len
);
243 int cache_name_pos(const char *name
, int namelen
)
249 while (last
> first
) {
250 int next
= (last
+ first
) >> 1;
251 struct cache_entry
*ce
= active_cache
[next
];
252 int cmp
= cache_name_compare(name
, namelen
, ce
->name
, ntohs(ce
->ce_flags
));
264 /* Remove entry, return true if there are more entries to go.. */
265 int remove_cache_entry_at(int pos
)
267 active_cache_changed
= 1;
269 if (pos
>= active_nr
)
271 memmove(active_cache
+ pos
, active_cache
+ pos
+ 1, (active_nr
- pos
) * sizeof(struct cache_entry
*));
275 int remove_file_from_cache(const char *path
)
277 int pos
= cache_name_pos(path
, strlen(path
));
280 while (pos
< active_nr
&& !strcmp(active_cache
[pos
]->name
, path
))
281 remove_cache_entry_at(pos
);
285 int ce_same_name(struct cache_entry
*a
, struct cache_entry
*b
)
287 int len
= ce_namelen(a
);
288 return ce_namelen(b
) == len
&& !memcmp(a
->name
, b
->name
, len
);
291 int ce_path_match(const struct cache_entry
*ce
, const char **pathspec
)
293 const char *match
, *name
;
299 len
= ce_namelen(ce
);
301 while ((match
= *pathspec
++) != NULL
) {
302 int matchlen
= strlen(match
);
305 if (memcmp(name
, match
, matchlen
))
307 if (matchlen
&& name
[matchlen
-1] == '/')
309 if (name
[matchlen
] == '/' || !name
[matchlen
])
318 * Do we have another file that has the beginning components being a
319 * proper superset of the name we're trying to add?
321 static int has_file_name(const struct cache_entry
*ce
, int pos
, int ok_to_replace
)
324 int len
= ce_namelen(ce
);
325 int stage
= ce_stage(ce
);
326 const char *name
= ce
->name
;
328 while (pos
< active_nr
) {
329 struct cache_entry
*p
= active_cache
[pos
++];
331 if (len
>= ce_namelen(p
))
333 if (memcmp(name
, p
->name
, len
))
335 if (ce_stage(p
) != stage
)
337 if (p
->name
[len
] != '/')
342 remove_cache_entry_at(--pos
);
348 * Do we have another file with a pathname that is a proper
349 * subset of the name we're trying to add?
351 static int has_dir_name(const struct cache_entry
*ce
, int pos
, int ok_to_replace
)
354 int stage
= ce_stage(ce
);
355 const char *name
= ce
->name
;
356 const char *slash
= name
+ ce_namelen(ce
);
364 if (slash
<= ce
->name
)
369 pos
= cache_name_pos(name
, ntohs(create_ce_flags(len
, stage
)));
374 remove_cache_entry_at(pos
);
379 * Trivial optimization: if we find an entry that
380 * already matches the sub-directory, then we know
381 * we're ok, and we can exit.
384 while (pos
< active_nr
) {
385 struct cache_entry
*p
= active_cache
[pos
];
386 if ((ce_namelen(p
) <= len
) ||
387 (p
->name
[len
] != '/') ||
388 memcmp(p
->name
, name
, len
))
389 break; /* not our subdirectory */
390 if (ce_stage(p
) == stage
)
391 /* p is at the same stage as our entry, and
392 * is a subdirectory of what we are looking
393 * at, so we cannot have conflicts at our
394 * level or anything shorter.
403 /* We may be in a situation where we already have path/file and path
404 * is being added, or we already have path and path/file is being
405 * added. Either one would result in a nonsense tree that has path
406 * twice when git-write-tree tries to write it out. Prevent it.
408 * If ok-to-replace is specified, we remove the conflicting entries
409 * from the cache so the caller should recompute the insert position.
410 * When this happens, we return non-zero.
412 static int check_file_directory_conflict(const struct cache_entry
*ce
, int pos
, int ok_to_replace
)
415 * We check if the path is a sub-path of a subsequent pathname
416 * first, since removing those will not change the position
419 int retval
= has_file_name(ce
, pos
, ok_to_replace
);
421 * Then check if the path might have a clashing sub-directory
424 return retval
+ has_dir_name(ce
, pos
, ok_to_replace
);
427 int add_cache_entry(struct cache_entry
*ce
, int option
)
430 int ok_to_add
= option
& ADD_CACHE_OK_TO_ADD
;
431 int ok_to_replace
= option
& ADD_CACHE_OK_TO_REPLACE
;
432 int skip_df_check
= option
& ADD_CACHE_SKIP_DFCHECK
;
433 pos
= cache_name_pos(ce
->name
, ntohs(ce
->ce_flags
));
435 /* existing match? Just replace it. */
437 active_cache_changed
= 1;
438 active_cache
[pos
] = ce
;
444 * Inserting a merged entry ("stage 0") into the index
445 * will always replace all non-merged entries..
447 if (pos
< active_nr
&& ce_stage(ce
) == 0) {
448 while (ce_same_name(active_cache
[pos
], ce
)) {
450 if (!remove_cache_entry_at(pos
))
458 if (!skip_df_check
&&
459 check_file_directory_conflict(ce
, pos
, ok_to_replace
)) {
462 pos
= cache_name_pos(ce
->name
, ntohs(ce
->ce_flags
));
466 /* Make sure the array is big enough .. */
467 if (active_nr
== active_alloc
) {
468 active_alloc
= alloc_nr(active_alloc
);
469 active_cache
= xrealloc(active_cache
, active_alloc
* sizeof(struct cache_entry
*));
475 memmove(active_cache
+ pos
+ 1, active_cache
+ pos
, (active_nr
- pos
- 1) * sizeof(ce
));
476 active_cache
[pos
] = ce
;
477 active_cache_changed
= 1;
481 static int verify_hdr(struct cache_header
*hdr
, unsigned long size
)
484 unsigned char sha1
[20];
486 if (hdr
->hdr_signature
!= htonl(CACHE_SIGNATURE
))
487 return error("bad signature");
488 if (hdr
->hdr_version
!= htonl(2))
489 return error("bad index version");
491 SHA1_Update(&c
, hdr
, size
- 20);
492 SHA1_Final(sha1
, &c
);
493 if (memcmp(sha1
, (void *)hdr
+ size
- 20, 20))
494 return error("bad index file sha1 signature");
502 unsigned long size
, offset
;
504 struct cache_header
*hdr
;
511 index_file_timestamp
= 0;
512 fd
= open(get_index_file(), O_RDONLY
);
516 die("index file open failed (%s)", strerror(errno
));
519 size
= 0; // avoid gcc warning
521 if (!fstat(fd
, &st
)) {
524 if (size
>= sizeof(struct cache_header
) + 20)
525 map
= mmap(NULL
, size
, PROT_READ
| PROT_WRITE
, MAP_PRIVATE
, fd
, 0);
528 if (map
== MAP_FAILED
)
529 die("index file mmap failed (%s)", strerror(errno
));
532 if (verify_hdr(hdr
, size
) < 0)
535 active_nr
= ntohl(hdr
->hdr_entries
);
536 active_alloc
= alloc_nr(active_nr
);
537 active_cache
= calloc(active_alloc
, sizeof(struct cache_entry
*));
539 offset
= sizeof(*hdr
);
540 for (i
= 0; i
< active_nr
; i
++) {
541 struct cache_entry
*ce
= map
+ offset
;
542 offset
= offset
+ ce_size(ce
);
543 active_cache
[i
] = ce
;
545 index_file_timestamp
= st
.st_mtime
;
551 die("index file corrupt");
554 #define WRITE_BUFFER_SIZE 8192
555 static unsigned char write_buffer
[WRITE_BUFFER_SIZE
];
556 static unsigned long write_buffer_len
;
558 static int ce_write(SHA_CTX
*context
, int fd
, void *data
, unsigned int len
)
561 unsigned int buffered
= write_buffer_len
;
562 unsigned int partial
= WRITE_BUFFER_SIZE
- buffered
;
565 memcpy(write_buffer
+ buffered
, data
, partial
);
567 if (buffered
== WRITE_BUFFER_SIZE
) {
568 SHA1_Update(context
, write_buffer
, WRITE_BUFFER_SIZE
);
569 if (write(fd
, write_buffer
, WRITE_BUFFER_SIZE
) != WRITE_BUFFER_SIZE
)
573 write_buffer_len
= buffered
;
580 static int ce_flush(SHA_CTX
*context
, int fd
)
582 unsigned int left
= write_buffer_len
;
585 write_buffer_len
= 0;
586 SHA1_Update(context
, write_buffer
, left
);
589 /* Flush first if not enough space for SHA1 signature */
590 if (left
+ 20 > WRITE_BUFFER_SIZE
) {
591 if (write(fd
, write_buffer
, left
) != left
)
596 /* Append the SHA1 signature at the end */
597 SHA1_Final(write_buffer
+ left
, context
);
599 if (write(fd
, write_buffer
, left
) != left
)
604 static void ce_smudge_racily_clean_entry(struct cache_entry
*ce
)
607 * The only thing we care about in this function is to smudge the
608 * falsely clean entry due to touch-update-touch race, so we leave
609 * everything else as they are. We are called for entries whose
610 * ce_mtime match the index file mtime.
614 if (lstat(ce
->name
, &st
) < 0)
616 if (ce_match_stat_basic(ce
, &st
))
618 if (ce_modified_check_fs(ce
, &st
)) {
619 /* This is "racily clean"; smudge it */
620 ce
->ce_size
= htonl(0);
624 int write_cache(int newfd
, struct cache_entry
**cache
, int entries
)
627 struct cache_header hdr
;
630 for (i
= removed
= 0; i
< entries
; i
++)
631 if (!cache
[i
]->ce_mode
)
634 hdr
.hdr_signature
= htonl(CACHE_SIGNATURE
);
635 hdr
.hdr_version
= htonl(2);
636 hdr
.hdr_entries
= htonl(entries
- removed
);
639 if (ce_write(&c
, newfd
, &hdr
, sizeof(hdr
)) < 0)
642 for (i
= 0; i
< entries
; i
++) {
643 struct cache_entry
*ce
= cache
[i
];
646 if (index_file_timestamp
&&
647 index_file_timestamp
<= ntohl(ce
->ce_mtime
.sec
))
648 ce_smudge_racily_clean_entry(ce
);
649 if (ce_write(&c
, newfd
, ce
, ce_size(ce
)) < 0)
652 return ce_flush(&c
, newfd
);