treewide: reduce includes of cache.h in other headers
[git.git] / cache.h
blob16b5fc9fa31fd405f4df190cfe10d2e1f77388ba
1 #ifndef CACHE_H
2 #define CACHE_H
4 #include "git-compat-util.h"
5 #include "strbuf.h"
6 #include "hashmap.h"
7 #include "gettext.h"
8 #include "string-list.h"
9 #include "pathspec.h"
10 #include "object.h"
11 #include "statinfo.h"
13 #if defined(DT_UNKNOWN) && !defined(NO_D_TYPE_IN_DIRENT)
14 #define DTYPE(de) ((de)->d_type)
15 #else
16 #undef DT_UNKNOWN
17 #undef DT_DIR
18 #undef DT_REG
19 #undef DT_LNK
20 #define DT_UNKNOWN 0
21 #define DT_DIR 1
22 #define DT_REG 2
23 #define DT_LNK 3
24 #define DTYPE(de) DT_UNKNOWN
25 #endif
28 * Some mode bits are also used internally for computations.
30 * They *must* not overlap with any valid modes, and they *must* not be emitted
31 * to outside world - i.e. appear on disk or network. In other words, it's just
32 * temporary fields, which we internally use, but they have to stay in-house.
34 * ( such approach is valid, as standard S_IF* fits into 16 bits, and in Git
35 * codebase mode is `unsigned int` which is assumed to be at least 32 bits )
38 /* used internally in tree-diff */
39 #define S_DIFFTREE_IFXMIN_NEQ 0x80000000
43 * Intensive research over the course of many years has shown that
44 * port 9418 is totally unused by anything else. Or
46 * Your search - "port 9418" - did not match any documents.
48 * as www.google.com puts it.
50 * This port has been properly assigned for git use by IANA:
51 * git (Assigned-9418) [I06-050728-0001].
53 * git 9418/tcp git pack transfer service
54 * git 9418/udp git pack transfer service
56 * with Linus Torvalds <torvalds@osdl.org> as the point of
57 * contact. September 2005.
59 * See http://www.iana.org/assignments/port-numbers
61 #define DEFAULT_GIT_PORT 9418
64 * Basic data structures for the directory cache
67 #define CACHE_SIGNATURE 0x44495243 /* "DIRC" */
68 struct cache_header {
69 uint32_t hdr_signature;
70 uint32_t hdr_version;
71 uint32_t hdr_entries;
74 #define INDEX_FORMAT_LB 2
75 #define INDEX_FORMAT_UB 4
77 struct cache_entry {
78 struct hashmap_entry ent;
79 struct stat_data ce_stat_data;
80 unsigned int ce_mode;
81 unsigned int ce_flags;
82 unsigned int mem_pool_allocated;
83 unsigned int ce_namelen;
84 unsigned int index; /* for link extension */
85 struct object_id oid;
86 char name[FLEX_ARRAY]; /* more */
89 #define CE_STAGEMASK (0x3000)
90 #define CE_EXTENDED (0x4000)
91 #define CE_VALID (0x8000)
92 #define CE_STAGESHIFT 12
95 * Range 0xFFFF0FFF in ce_flags is divided into
96 * two parts: in-memory flags and on-disk ones.
97 * Flags in CE_EXTENDED_FLAGS will get saved on-disk
98 * if you want to save a new flag, add it in
99 * CE_EXTENDED_FLAGS
101 * In-memory only flags
103 #define CE_UPDATE (1 << 16)
104 #define CE_REMOVE (1 << 17)
105 #define CE_UPTODATE (1 << 18)
106 #define CE_ADDED (1 << 19)
108 #define CE_HASHED (1 << 20)
109 #define CE_FSMONITOR_VALID (1 << 21)
110 #define CE_WT_REMOVE (1 << 22) /* remove in work directory */
111 #define CE_CONFLICTED (1 << 23)
113 #define CE_UNPACKED (1 << 24)
114 #define CE_NEW_SKIP_WORKTREE (1 << 25)
116 /* used to temporarily mark paths matched by pathspecs */
117 #define CE_MATCHED (1 << 26)
119 #define CE_UPDATE_IN_BASE (1 << 27)
120 #define CE_STRIP_NAME (1 << 28)
123 * Extended on-disk flags
125 #define CE_INTENT_TO_ADD (1 << 29)
126 #define CE_SKIP_WORKTREE (1 << 30)
127 /* CE_EXTENDED2 is for future extension */
128 #define CE_EXTENDED2 (1U << 31)
130 #define CE_EXTENDED_FLAGS (CE_INTENT_TO_ADD | CE_SKIP_WORKTREE)
133 * Safeguard to avoid saving wrong flags:
134 * - CE_EXTENDED2 won't get saved until its semantic is known
135 * - Bits in 0x0000FFFF have been saved in ce_flags already
136 * - Bits in 0x003F0000 are currently in-memory flags
138 #if CE_EXTENDED_FLAGS & 0x803FFFFF
139 #error "CE_EXTENDED_FLAGS out of range"
140 #endif
142 /* Forward structure decls */
143 struct pathspec;
144 struct tree;
147 * Copy the sha1 and stat state of a cache entry from one to
148 * another. But we never change the name, or the hash state!
150 static inline void copy_cache_entry(struct cache_entry *dst,
151 const struct cache_entry *src)
153 unsigned int state = dst->ce_flags & CE_HASHED;
154 int mem_pool_allocated = dst->mem_pool_allocated;
156 /* Don't copy hash chain and name */
157 memcpy(&dst->ce_stat_data, &src->ce_stat_data,
158 offsetof(struct cache_entry, name) -
159 offsetof(struct cache_entry, ce_stat_data));
161 /* Restore the hash state */
162 dst->ce_flags = (dst->ce_flags & ~CE_HASHED) | state;
164 /* Restore the mem_pool_allocated flag */
165 dst->mem_pool_allocated = mem_pool_allocated;
168 static inline unsigned create_ce_flags(unsigned stage)
170 return (stage << CE_STAGESHIFT);
173 #define ce_namelen(ce) ((ce)->ce_namelen)
174 #define ce_size(ce) cache_entry_size(ce_namelen(ce))
175 #define ce_stage(ce) ((CE_STAGEMASK & (ce)->ce_flags) >> CE_STAGESHIFT)
176 #define ce_uptodate(ce) ((ce)->ce_flags & CE_UPTODATE)
177 #define ce_skip_worktree(ce) ((ce)->ce_flags & CE_SKIP_WORKTREE)
178 #define ce_mark_uptodate(ce) ((ce)->ce_flags |= CE_UPTODATE)
179 #define ce_intent_to_add(ce) ((ce)->ce_flags & CE_INTENT_TO_ADD)
181 static inline unsigned int ce_mode_from_stat(const struct cache_entry *ce,
182 unsigned int mode)
184 extern int trust_executable_bit, has_symlinks;
185 if (!has_symlinks && S_ISREG(mode) &&
186 ce && S_ISLNK(ce->ce_mode))
187 return ce->ce_mode;
188 if (!trust_executable_bit && S_ISREG(mode)) {
189 if (ce && S_ISREG(ce->ce_mode))
190 return ce->ce_mode;
191 return create_ce_mode(0666);
193 return create_ce_mode(mode);
195 static inline int ce_to_dtype(const struct cache_entry *ce)
197 unsigned ce_mode = ntohl(ce->ce_mode);
198 if (S_ISREG(ce_mode))
199 return DT_REG;
200 else if (S_ISDIR(ce_mode) || S_ISGITLINK(ce_mode))
201 return DT_DIR;
202 else if (S_ISLNK(ce_mode))
203 return DT_LNK;
204 else
205 return DT_UNKNOWN;
208 static inline int ce_path_match(struct index_state *istate,
209 const struct cache_entry *ce,
210 const struct pathspec *pathspec,
211 char *seen)
213 return match_pathspec(istate, pathspec, ce->name, ce_namelen(ce), 0, seen,
214 S_ISDIR(ce->ce_mode) || S_ISGITLINK(ce->ce_mode));
217 #define cache_entry_size(len) (offsetof(struct cache_entry,name) + (len) + 1)
219 #define SOMETHING_CHANGED (1 << 0) /* unclassified changes go here */
220 #define CE_ENTRY_CHANGED (1 << 1)
221 #define CE_ENTRY_REMOVED (1 << 2)
222 #define CE_ENTRY_ADDED (1 << 3)
223 #define RESOLVE_UNDO_CHANGED (1 << 4)
224 #define CACHE_TREE_CHANGED (1 << 5)
225 #define SPLIT_INDEX_ORDERED (1 << 6)
226 #define UNTRACKED_CHANGED (1 << 7)
227 #define FSMONITOR_CHANGED (1 << 8)
229 struct split_index;
230 struct untracked_cache;
231 struct progress;
232 struct pattern_list;
234 enum sparse_index_mode {
236 * There are no sparse directories in the index at all.
238 * Repositories that don't use cone-mode sparse-checkout will
239 * always have their indexes in this mode.
241 INDEX_EXPANDED = 0,
244 * The index has already been collapsed to sparse directories
245 * whereever possible.
247 INDEX_COLLAPSED,
250 * The sparse directories that exist are outside the
251 * sparse-checkout boundary, but it is possible that some file
252 * entries could collapse to sparse directory entries.
254 INDEX_PARTIALLY_SPARSE,
257 struct index_state {
258 struct cache_entry **cache;
259 unsigned int version;
260 unsigned int cache_nr, cache_alloc, cache_changed;
261 struct string_list *resolve_undo;
262 struct cache_tree *cache_tree;
263 struct split_index *split_index;
264 struct cache_time timestamp;
265 unsigned name_hash_initialized : 1,
266 initialized : 1,
267 drop_cache_tree : 1,
268 updated_workdir : 1,
269 updated_skipworktree : 1,
270 fsmonitor_has_run_once : 1;
271 enum sparse_index_mode sparse_index;
272 struct hashmap name_hash;
273 struct hashmap dir_hash;
274 struct object_id oid;
275 struct untracked_cache *untracked;
276 char *fsmonitor_last_update;
277 struct ewah_bitmap *fsmonitor_dirty;
278 struct mem_pool *ce_mem_pool;
279 struct progress *progress;
280 struct repository *repo;
281 struct pattern_list *sparse_checkout_patterns;
285 * A "struct index_state istate" must be initialized with
286 * INDEX_STATE_INIT or the corresponding index_state_init().
288 * If the variable won't be used again, use release_index() to free()
289 * its resources. If it needs to be used again use discard_index(),
290 * which does the same thing, but will use use index_state_init() at
291 * the end. The discard_index() will use its own "istate->repo" as the
292 * "r" argument to index_state_init() in that case.
294 #define INDEX_STATE_INIT(r) { \
295 .repo = (r), \
297 void index_state_init(struct index_state *istate, struct repository *r);
298 void release_index(struct index_state *istate);
300 /* Name hashing */
301 int test_lazy_init_name_hash(struct index_state *istate, int try_threaded);
302 void add_name_hash(struct index_state *istate, struct cache_entry *ce);
303 void remove_name_hash(struct index_state *istate, struct cache_entry *ce);
304 void free_name_hash(struct index_state *istate);
306 /* Cache entry creation and cleanup */
309 * Create cache_entry intended for use in the specified index. Caller
310 * is responsible for discarding the cache_entry with
311 * `discard_cache_entry`.
313 struct cache_entry *make_cache_entry(struct index_state *istate,
314 unsigned int mode,
315 const struct object_id *oid,
316 const char *path,
317 int stage,
318 unsigned int refresh_options);
320 struct cache_entry *make_empty_cache_entry(struct index_state *istate,
321 size_t name_len);
324 * Create a cache_entry that is not intended to be added to an index. If
325 * `ce_mem_pool` is not NULL, the entry is allocated within the given memory
326 * pool. Caller is responsible for discarding "loose" entries with
327 * `discard_cache_entry()` and the memory pool with
328 * `mem_pool_discard(ce_mem_pool, should_validate_cache_entries())`.
330 struct cache_entry *make_transient_cache_entry(unsigned int mode,
331 const struct object_id *oid,
332 const char *path,
333 int stage,
334 struct mem_pool *ce_mem_pool);
336 struct cache_entry *make_empty_transient_cache_entry(size_t len,
337 struct mem_pool *ce_mem_pool);
340 * Discard cache entry.
342 void discard_cache_entry(struct cache_entry *ce);
345 * Check configuration if we should perform extra validation on cache
346 * entries.
348 int should_validate_cache_entries(void);
351 * Duplicate a cache_entry. Allocate memory for the new entry from a
352 * memory_pool. Takes into account cache_entry fields that are meant
353 * for managing the underlying memory allocation of the cache_entry.
355 struct cache_entry *dup_cache_entry(const struct cache_entry *ce, struct index_state *istate);
358 * Validate the cache entries in the index. This is an internal
359 * consistency check that the cache_entry structs are allocated from
360 * the expected memory pool.
362 void validate_cache_entries(const struct index_state *istate);
365 * Bulk prefetch all missing cache entries that are not GITLINKs and that match
366 * the given predicate. This function should only be called if
367 * repo_has_promisor_remote() returns true.
369 typedef int (*must_prefetch_predicate)(const struct cache_entry *);
370 void prefetch_cache_entries(const struct index_state *istate,
371 must_prefetch_predicate must_prefetch);
373 #ifdef USE_THE_INDEX_VARIABLE
374 extern struct index_state the_index;
375 #endif
377 #define INIT_DB_QUIET 0x0001
378 #define INIT_DB_EXIST_OK 0x0002
380 int init_db(const char *git_dir, const char *real_git_dir,
381 const char *template_dir, int hash_algo,
382 const char *initial_branch, unsigned int flags);
383 void initialize_repository_version(int hash_algo, int reinit);
385 /* Initialize and use the cache information */
386 struct lock_file;
387 void preload_index(struct index_state *index,
388 const struct pathspec *pathspec,
389 unsigned int refresh_flags);
390 int do_read_index(struct index_state *istate, const char *path,
391 int must_exist); /* for testting only! */
392 int read_index_from(struct index_state *, const char *path,
393 const char *gitdir);
394 int is_index_unborn(struct index_state *);
396 void ensure_full_index(struct index_state *istate);
398 /* For use with `write_locked_index()`. */
399 #define COMMIT_LOCK (1 << 0)
400 #define SKIP_IF_UNCHANGED (1 << 1)
403 * Write the index while holding an already-taken lock. Close the lock,
404 * and if `COMMIT_LOCK` is given, commit it.
406 * Unless a split index is in use, write the index into the lockfile.
408 * With a split index, write the shared index to a temporary file,
409 * adjust its permissions and rename it into place, then write the
410 * split index to the lockfile. If the temporary file for the shared
411 * index cannot be created, fall back to the behavior described in
412 * the previous paragraph.
414 * With `COMMIT_LOCK`, the lock is always committed or rolled back.
415 * Without it, the lock is closed, but neither committed nor rolled
416 * back.
418 * If `SKIP_IF_UNCHANGED` is given and the index is unchanged, nothing
419 * is written (and the lock is rolled back if `COMMIT_LOCK` is given).
421 int write_locked_index(struct index_state *, struct lock_file *lock, unsigned flags);
423 void discard_index(struct index_state *);
424 void move_index_extensions(struct index_state *dst, struct index_state *src);
425 int unmerged_index(const struct index_state *);
428 * Returns 1 if istate differs from tree, 0 otherwise. If tree is NULL,
429 * compares istate to HEAD. If tree is NULL and on an unborn branch,
430 * returns 1 if there are entries in istate, 0 otherwise. If an strbuf is
431 * provided, the space-separated list of files that differ will be appended
432 * to it.
434 int repo_index_has_changes(struct repository *repo,
435 struct tree *tree,
436 struct strbuf *sb);
438 int verify_path(const char *path, unsigned mode);
439 int strcmp_offset(const char *s1, const char *s2, size_t *first_change);
440 int index_dir_exists(struct index_state *istate, const char *name, int namelen);
441 void adjust_dirname_case(struct index_state *istate, char *name);
442 struct cache_entry *index_file_exists(struct index_state *istate, const char *name, int namelen, int igncase);
445 * Searches for an entry defined by name and namelen in the given index.
446 * If the return value is positive (including 0) it is the position of an
447 * exact match. If the return value is negative, the negated value minus 1
448 * is the position where the entry would be inserted.
449 * Example: The current index consists of these files and its stages:
451 * b#0, d#0, f#1, f#3
453 * index_name_pos(&index, "a", 1) -> -1
454 * index_name_pos(&index, "b", 1) -> 0
455 * index_name_pos(&index, "c", 1) -> -2
456 * index_name_pos(&index, "d", 1) -> 1
457 * index_name_pos(&index, "e", 1) -> -3
458 * index_name_pos(&index, "f", 1) -> -3
459 * index_name_pos(&index, "g", 1) -> -5
461 int index_name_pos(struct index_state *, const char *name, int namelen);
464 * Like index_name_pos, returns the position of an entry of the given name in
465 * the index if one exists, otherwise returns a negative value where the negated
466 * value minus 1 is the position where the index entry would be inserted. Unlike
467 * index_name_pos, however, a sparse index is not expanded to find an entry
468 * inside a sparse directory.
470 int index_name_pos_sparse(struct index_state *, const char *name, int namelen);
473 * Determines whether an entry with the given name exists within the
474 * given index. The return value is 1 if an exact match is found, otherwise
475 * it is 0. Note that, unlike index_name_pos, this function does not expand
476 * the index if it is sparse. If an item exists within the full index but it
477 * is contained within a sparse directory (and not in the sparse index), 0 is
478 * returned.
480 int index_entry_exists(struct index_state *, const char *name, int namelen);
483 * Some functions return the negative complement of an insert position when a
484 * precise match was not found but a position was found where the entry would
485 * need to be inserted. This helper protects that logic from any integer
486 * underflow.
488 static inline int index_pos_to_insert_pos(uintmax_t pos)
490 if (pos > INT_MAX)
491 die("overflow: -1 - %"PRIuMAX, pos);
492 return -1 - (int)pos;
495 #define ADD_CACHE_OK_TO_ADD 1 /* Ok to add */
496 #define ADD_CACHE_OK_TO_REPLACE 2 /* Ok to replace file/directory */
497 #define ADD_CACHE_SKIP_DFCHECK 4 /* Ok to skip DF conflict checks */
498 #define ADD_CACHE_JUST_APPEND 8 /* Append only */
499 #define ADD_CACHE_NEW_ONLY 16 /* Do not replace existing ones */
500 #define ADD_CACHE_KEEP_CACHE_TREE 32 /* Do not invalidate cache-tree */
501 #define ADD_CACHE_RENORMALIZE 64 /* Pass along HASH_RENORMALIZE */
502 int add_index_entry(struct index_state *, struct cache_entry *ce, int option);
503 void rename_index_entry_at(struct index_state *, int pos, const char *new_name);
505 /* Remove entry, return true if there are more entries to go. */
506 int remove_index_entry_at(struct index_state *, int pos);
508 void remove_marked_cache_entries(struct index_state *istate, int invalidate);
509 int remove_file_from_index(struct index_state *, const char *path);
510 #define ADD_CACHE_VERBOSE 1
511 #define ADD_CACHE_PRETEND 2
512 #define ADD_CACHE_IGNORE_ERRORS 4
513 #define ADD_CACHE_IGNORE_REMOVAL 8
514 #define ADD_CACHE_INTENT 16
516 * These two are used to add the contents of the file at path
517 * to the index, marking the working tree up-to-date by storing
518 * the cached stat info in the resulting cache entry. A caller
519 * that has already run lstat(2) on the path can call
520 * add_to_index(), and all others can call add_file_to_index();
521 * the latter will do necessary lstat(2) internally before
522 * calling the former.
524 int add_to_index(struct index_state *, const char *path, struct stat *, int flags);
525 int add_file_to_index(struct index_state *, const char *path, int flags);
527 int chmod_index_entry(struct index_state *, struct cache_entry *ce, char flip);
528 int ce_same_name(const struct cache_entry *a, const struct cache_entry *b);
529 void set_object_name_for_intent_to_add_entry(struct cache_entry *ce);
530 int index_name_is_other(struct index_state *, const char *, int);
531 void *read_blob_data_from_index(struct index_state *, const char *, unsigned long *);
533 /* do stat comparison even if CE_VALID is true */
534 #define CE_MATCH_IGNORE_VALID 01
535 /* do not check the contents but report dirty on racily-clean entries */
536 #define CE_MATCH_RACY_IS_DIRTY 02
537 /* do stat comparison even if CE_SKIP_WORKTREE is true */
538 #define CE_MATCH_IGNORE_SKIP_WORKTREE 04
539 /* ignore non-existent files during stat update */
540 #define CE_MATCH_IGNORE_MISSING 0x08
541 /* enable stat refresh */
542 #define CE_MATCH_REFRESH 0x10
543 /* don't refresh_fsmonitor state or do stat comparison even if CE_FSMONITOR_VALID is true */
544 #define CE_MATCH_IGNORE_FSMONITOR 0X20
545 int is_racy_timestamp(const struct index_state *istate,
546 const struct cache_entry *ce);
547 int has_racy_timestamp(struct index_state *istate);
548 int ie_match_stat(struct index_state *, const struct cache_entry *, struct stat *, unsigned int);
549 int ie_modified(struct index_state *, const struct cache_entry *, struct stat *, unsigned int);
552 * Record to sd the data from st that we use to check whether a file
553 * might have changed.
555 void fill_stat_data(struct stat_data *sd, struct stat *st);
558 * Return 0 if st is consistent with a file not having been changed
559 * since sd was filled. If there are differences, return a
560 * combination of MTIME_CHANGED, CTIME_CHANGED, OWNER_CHANGED,
561 * INODE_CHANGED, and DATA_CHANGED.
563 int match_stat_data(const struct stat_data *sd, struct stat *st);
564 int match_stat_data_racy(const struct index_state *istate,
565 const struct stat_data *sd, struct stat *st);
567 void fill_stat_cache_info(struct index_state *istate, struct cache_entry *ce, struct stat *st);
569 #define REFRESH_REALLY (1 << 0) /* ignore_valid */
570 #define REFRESH_UNMERGED (1 << 1) /* allow unmerged */
571 #define REFRESH_QUIET (1 << 2) /* be quiet about it */
572 #define REFRESH_IGNORE_MISSING (1 << 3) /* ignore non-existent */
573 #define REFRESH_IGNORE_SUBMODULES (1 << 4) /* ignore submodules */
574 #define REFRESH_IN_PORCELAIN (1 << 5) /* user friendly output, not "needs update" */
575 #define REFRESH_PROGRESS (1 << 6) /* show progress bar if stderr is tty */
576 #define REFRESH_IGNORE_SKIP_WORKTREE (1 << 7) /* ignore skip_worktree entries */
577 int refresh_index(struct index_state *, unsigned int flags, const struct pathspec *pathspec, char *seen, const char *header_msg);
579 * Refresh the index and write it to disk.
581 * 'refresh_flags' is passed directly to 'refresh_index()', while
582 * 'COMMIT_LOCK | write_flags' is passed to 'write_locked_index()', so
583 * the lockfile is always either committed or rolled back.
585 * If 'gentle' is passed, errors locking the index are ignored.
587 * Return 1 if refreshing the index returns an error, -1 if writing
588 * the index to disk fails, 0 on success.
590 * Note that if refreshing the index returns an error, we still write
591 * out the index (unless locking fails).
593 int repo_refresh_and_write_index(struct repository*, unsigned int refresh_flags, unsigned int write_flags, int gentle, const struct pathspec *, char *seen, const char *header_msg);
595 struct cache_entry *refresh_cache_entry(struct index_state *, struct cache_entry *, unsigned int);
597 void set_alternate_index_output(const char *);
599 extern int verify_index_checksum;
600 extern int verify_ce_order;
602 extern int quote_path_fully;
604 #define MTIME_CHANGED 0x0001
605 #define CTIME_CHANGED 0x0002
606 #define OWNER_CHANGED 0x0004
607 #define MODE_CHANGED 0x0008
608 #define INODE_CHANGED 0x0010
609 #define DATA_CHANGED 0x0020
610 #define TYPE_CHANGED 0x0040
612 int base_name_compare(const char *name1, size_t len1, int mode1,
613 const char *name2, size_t len2, int mode2);
614 int df_name_compare(const char *name1, size_t len1, int mode1,
615 const char *name2, size_t len2, int mode2);
616 int name_compare(const char *name1, size_t len1, const char *name2, size_t len2);
617 int cache_name_stage_compare(const char *name1, int len1, int stage1, const char *name2, int len2, int stage2);
619 struct cache_def {
620 struct strbuf path;
621 int flags;
622 int track_flags;
623 int prefix_len_stat_func;
625 #define CACHE_DEF_INIT { \
626 .path = STRBUF_INIT, \
628 static inline void cache_def_clear(struct cache_def *cache)
630 strbuf_release(&cache->path);
633 int has_symlink_leading_path(const char *name, int len);
634 int threaded_has_symlink_leading_path(struct cache_def *, const char *, int);
635 int check_leading_path(const char *name, int len, int warn_on_lstat_err);
636 int has_dirs_only_path(const char *name, int len, int prefix_len);
637 void invalidate_lstat_cache(void);
638 void schedule_dir_for_removal(const char *name, int len);
639 void remove_scheduled_dirs(void);
641 struct pack_window {
642 struct pack_window *next;
643 unsigned char *base;
644 off_t offset;
645 size_t len;
646 unsigned int last_used;
647 unsigned int inuse_cnt;
650 struct pack_entry {
651 off_t offset;
652 struct packed_git *p;
655 /* Dumb servers support */
656 int update_server_info(int);
658 extern const char *git_mailmap_file;
659 extern const char *git_mailmap_blob;
661 #define COPY_READ_ERROR (-2)
662 #define COPY_WRITE_ERROR (-3)
663 int copy_fd(int ifd, int ofd);
664 int copy_file(const char *dst, const char *src, int mode);
665 int copy_file_with_time(const char *dst, const char *src, int mode);
667 /* base85 */
668 int decode_85(char *dst, const char *line, int linelen);
669 void encode_85(char *buf, const unsigned char *data, int bytes);
671 /* pkt-line.c */
672 void packet_trace_identity(const char *prog);
674 /* add */
676 * return 0 if success, 1 - if addition of a file failed and
677 * ADD_FILES_IGNORE_ERRORS was specified in flags
679 int add_files_to_cache(const char *prefix, const struct pathspec *pathspec, int flags);
681 /* diff.c */
682 extern int diff_auto_refresh_index;
684 /* match-trees.c */
685 void shift_tree(struct repository *, const struct object_id *, const struct object_id *, struct object_id *, int);
686 void shift_tree_by(struct repository *, const struct object_id *, const struct object_id *, struct object_id *, const char *);
689 * whitespace rules.
690 * used by both diff and apply
691 * last two digits are tab width
693 #define WS_BLANK_AT_EOL 0100
694 #define WS_SPACE_BEFORE_TAB 0200
695 #define WS_INDENT_WITH_NON_TAB 0400
696 #define WS_CR_AT_EOL 01000
697 #define WS_BLANK_AT_EOF 02000
698 #define WS_TAB_IN_INDENT 04000
699 #define WS_TRAILING_SPACE (WS_BLANK_AT_EOL|WS_BLANK_AT_EOF)
700 #define WS_DEFAULT_RULE (WS_TRAILING_SPACE|WS_SPACE_BEFORE_TAB|8)
701 #define WS_TAB_WIDTH_MASK 077
702 /* All WS_* -- when extended, adapt diff.c emit_symbol */
703 #define WS_RULE_MASK 07777
704 extern unsigned whitespace_rule_cfg;
705 unsigned whitespace_rule(struct index_state *, const char *);
706 unsigned parse_whitespace_rule(const char *);
707 unsigned ws_check(const char *line, int len, unsigned ws_rule);
708 void ws_check_emit(const char *line, int len, unsigned ws_rule, FILE *stream, const char *set, const char *reset, const char *ws);
709 char *whitespace_error_string(unsigned ws);
710 void ws_fix_copy(struct strbuf *, const char *, int, unsigned, int *);
711 int ws_blank_line(const char *line, int len);
712 #define ws_tab_width(rule) ((rule) & WS_TAB_WIDTH_MASK)
714 /* ls-files */
715 void overlay_tree_on_index(struct index_state *istate,
716 const char *tree_name, const char *prefix);
718 /* merge.c */
719 struct commit_list;
720 int try_merge_command(struct repository *r,
721 const char *strategy, size_t xopts_nr,
722 const char **xopts, struct commit_list *common,
723 const char *head_arg, struct commit_list *remotes);
724 int checkout_fast_forward(struct repository *r,
725 const struct object_id *from,
726 const struct object_id *to,
727 int overwrite_ignore);
730 int sane_execvp(const char *file, char *const argv[]);
733 * A struct to encapsulate the concept of whether a file has changed
734 * since we last checked it. This uses criteria similar to those used
735 * for the index.
737 struct stat_validity {
738 struct stat_data *sd;
741 void stat_validity_clear(struct stat_validity *sv);
744 * Returns 1 if the path is a regular file (or a symlink to a regular
745 * file) and matches the saved stat_validity, 0 otherwise. A missing
746 * or inaccessible file is considered a match if the struct was just
747 * initialized, or if the previous update found an inaccessible file.
749 int stat_validity_check(struct stat_validity *sv, const char *path);
752 * Update the stat_validity from a file opened at descriptor fd. If
753 * the file is missing, inaccessible, or not a regular file, then
754 * future calls to stat_validity_check will match iff one of those
755 * conditions continues to be true.
757 void stat_validity_update(struct stat_validity *sv, int fd);
759 int versioncmp(const char *s1, const char *s2);
761 #endif /* CACHE_H */