2 * Copyright (C) 2008 Linus Torvalds
11 #include "thread-utils.h"
12 #include "repository.h"
15 * Mostly randomly chosen maximum thread counts: we
16 * cap the parallelism to 20 threads, and we want
17 * to have at least 500 lstat's per thread for it to
18 * be worth starting a thread.
20 #define MAX_PARALLEL (20)
21 #define THREAD_COST (500)
23 struct progress_data
{
25 struct progress
*progress
;
26 pthread_mutex_t mutex
;
31 struct index_state
*index
;
32 struct pathspec pathspec
;
33 struct progress_data
*progress
;
38 static void *preload_thread(void *_data
)
41 struct thread_data
*p
= _data
;
42 struct index_state
*index
= p
->index
;
43 struct cache_entry
**cep
= index
->cache
+ p
->offset
;
44 struct cache_def cache
= CACHE_DEF_INIT
;
47 if (nr
+ p
->offset
> index
->cache_nr
)
48 nr
= index
->cache_nr
- p
->offset
;
52 struct cache_entry
*ce
= *cep
++;
57 if (S_ISGITLINK(ce
->ce_mode
))
61 if (ce_skip_worktree(ce
))
63 if (ce
->ce_flags
& CE_FSMONITOR_VALID
)
65 if (p
->progress
&& !(nr
& 31)) {
66 struct progress_data
*pd
= p
->progress
;
68 pthread_mutex_lock(&pd
->mutex
);
69 pd
->n
+= last_nr
- nr
;
70 display_progress(pd
->progress
, pd
->n
);
71 pthread_mutex_unlock(&pd
->mutex
);
74 if (!ce_path_match(index
, ce
, &p
->pathspec
, NULL
))
76 if (threaded_has_symlink_leading_path(&cache
, ce
->name
, ce_namelen(ce
)))
79 if (lstat(ce
->name
, &st
))
81 if (ie_match_stat(index
, ce
, &st
, CE_MATCH_RACY_IS_DIRTY
|CE_MATCH_IGNORE_FSMONITOR
))
84 mark_fsmonitor_valid(index
, ce
);
87 struct progress_data
*pd
= p
->progress
;
89 pthread_mutex_lock(&pd
->mutex
);
90 display_progress(pd
->progress
, pd
->n
+ last_nr
);
91 pthread_mutex_unlock(&pd
->mutex
);
93 cache_def_clear(&cache
);
97 void preload_index(struct index_state
*index
,
98 const struct pathspec
*pathspec
,
99 unsigned int refresh_flags
)
101 int threads
, i
, work
, offset
;
102 struct thread_data data
[MAX_PARALLEL
];
103 struct progress_data pd
;
104 int t2_sum_lstat
= 0;
106 if (!HAVE_THREADS
|| !core_preload_index
)
109 threads
= index
->cache_nr
/ THREAD_COST
;
110 if ((index
->cache_nr
> 1) && (threads
< 2) && git_env_bool("GIT_TEST_PRELOAD_INDEX", 0))
115 trace2_region_enter("index", "preload", NULL
);
117 trace_performance_enter();
118 if (threads
> MAX_PARALLEL
)
119 threads
= MAX_PARALLEL
;
121 work
= DIV_ROUND_UP(index
->cache_nr
, threads
);
122 memset(&data
, 0, sizeof(data
));
124 memset(&pd
, 0, sizeof(pd
));
125 if (refresh_flags
& REFRESH_PROGRESS
&& isatty(2)) {
126 pd
.progress
= start_delayed_progress(_("Refreshing index"), index
->cache_nr
);
127 pthread_mutex_init(&pd
.mutex
, NULL
);
130 for (i
= 0; i
< threads
; i
++) {
131 struct thread_data
*p
= data
+i
;
136 copy_pathspec(&p
->pathspec
, pathspec
);
142 err
= pthread_create(&p
->pthread
, NULL
, preload_thread
, p
);
145 die(_("unable to create threaded lstat: %s"), strerror(err
));
147 for (i
= 0; i
< threads
; i
++) {
148 struct thread_data
*p
= data
+i
;
149 if (pthread_join(p
->pthread
, NULL
))
150 die("unable to join threaded lstat");
151 t2_sum_lstat
+= p
->t2_nr_lstat
;
153 stop_progress(&pd
.progress
);
156 /* earlier we made deep copies for each thread to work with */
157 for (i
= 0; i
< threads
; i
++)
158 clear_pathspec(&data
[i
].pathspec
);
161 trace_performance_leave("preload index");
163 trace2_data_intmax("index", NULL
, "preload/sum_lstat", t2_sum_lstat
);
164 trace2_region_leave("index", "preload", NULL
);
167 int repo_read_index_preload(struct repository
*repo
,
168 const struct pathspec
*pathspec
,
169 unsigned int refresh_flags
)
171 int retval
= repo_read_index(repo
);
173 preload_index(repo
->index
, pathspec
, refresh_flags
);