Merge branch 'jk/doc-read-tree-table-asciidoctor-fix'
[git.git] / preload-index.c
blob70a4c808783bab92b4b6319e3dbd8e16641b1918
1 /*
2 * Copyright (C) 2008 Linus Torvalds
3 */
4 #include "cache.h"
5 #include "pathspec.h"
6 #include "dir.h"
8 #ifdef NO_PTHREADS
9 static void preload_index(struct index_state *index,
10 const struct pathspec *pathspec)
12 ; /* nothing */
14 #else
16 #include <pthread.h>
19 * Mostly randomly chosen maximum thread counts: we
20 * cap the parallelism to 20 threads, and we want
21 * to have at least 500 lstat's per thread for it to
22 * be worth starting a thread.
24 #define MAX_PARALLEL (20)
25 #define THREAD_COST (500)
27 struct thread_data {
28 pthread_t pthread;
29 struct index_state *index;
30 struct pathspec pathspec;
31 int offset, nr;
34 static void *preload_thread(void *_data)
36 int nr;
37 struct thread_data *p = _data;
38 struct index_state *index = p->index;
39 struct cache_entry **cep = index->cache + p->offset;
40 struct cache_def cache = CACHE_DEF_INIT;
42 nr = p->nr;
43 if (nr + p->offset > index->cache_nr)
44 nr = index->cache_nr - p->offset;
46 do {
47 struct cache_entry *ce = *cep++;
48 struct stat st;
50 if (ce_stage(ce))
51 continue;
52 if (S_ISGITLINK(ce->ce_mode))
53 continue;
54 if (ce_uptodate(ce))
55 continue;
56 if (ce_skip_worktree(ce))
57 continue;
58 if (!ce_path_match(ce, &p->pathspec, NULL))
59 continue;
60 if (threaded_has_symlink_leading_path(&cache, ce->name, ce_namelen(ce)))
61 continue;
62 if (lstat(ce->name, &st))
63 continue;
64 if (ie_match_stat(index, ce, &st, CE_MATCH_RACY_IS_DIRTY))
65 continue;
66 ce_mark_uptodate(ce);
67 } while (--nr > 0);
68 cache_def_clear(&cache);
69 return NULL;
72 static void preload_index(struct index_state *index,
73 const struct pathspec *pathspec)
75 int threads, i, work, offset;
76 struct thread_data data[MAX_PARALLEL];
78 if (!core_preload_index)
79 return;
81 threads = index->cache_nr / THREAD_COST;
82 if (threads < 2)
83 return;
84 if (threads > MAX_PARALLEL)
85 threads = MAX_PARALLEL;
86 offset = 0;
87 work = DIV_ROUND_UP(index->cache_nr, threads);
88 memset(&data, 0, sizeof(data));
89 for (i = 0; i < threads; i++) {
90 struct thread_data *p = data+i;
91 p->index = index;
92 if (pathspec)
93 copy_pathspec(&p->pathspec, pathspec);
94 p->offset = offset;
95 p->nr = work;
96 offset += work;
97 if (pthread_create(&p->pthread, NULL, preload_thread, p))
98 die("unable to create threaded lstat");
100 for (i = 0; i < threads; i++) {
101 struct thread_data *p = data+i;
102 if (pthread_join(p->pthread, NULL))
103 die("unable to join threaded lstat");
106 #endif
108 int read_index_preload(struct index_state *index,
109 const struct pathspec *pathspec)
111 int retval = read_index(index);
113 preload_index(index, pathspec);
114 return retval;