2 * Copyright (C) 2008 Linus Torvalds
10 #include "thread-utils.h"
11 #include "repository.h"
14 * Mostly randomly chosen maximum thread counts: we
15 * cap the parallelism to 20 threads, and we want
16 * to have at least 500 lstat's per thread for it to
17 * be worth starting a thread.
19 #define MAX_PARALLEL (20)
20 #define THREAD_COST (500)
22 struct progress_data
{
24 struct progress
*progress
;
25 pthread_mutex_t mutex
;
30 struct index_state
*index
;
31 struct pathspec pathspec
;
32 struct progress_data
*progress
;
37 static void *preload_thread(void *_data
)
40 struct thread_data
*p
= _data
;
41 struct index_state
*index
= p
->index
;
42 struct cache_entry
**cep
= index
->cache
+ p
->offset
;
43 struct cache_def cache
= CACHE_DEF_INIT
;
46 if (nr
+ p
->offset
> index
->cache_nr
)
47 nr
= index
->cache_nr
- p
->offset
;
51 struct cache_entry
*ce
= *cep
++;
56 if (S_ISGITLINK(ce
->ce_mode
))
60 if (ce_skip_worktree(ce
))
62 if (ce
->ce_flags
& CE_FSMONITOR_VALID
)
64 if (p
->progress
&& !(nr
& 31)) {
65 struct progress_data
*pd
= p
->progress
;
67 pthread_mutex_lock(&pd
->mutex
);
68 pd
->n
+= last_nr
- nr
;
69 display_progress(pd
->progress
, pd
->n
);
70 pthread_mutex_unlock(&pd
->mutex
);
73 if (!ce_path_match(index
, ce
, &p
->pathspec
, NULL
))
75 if (threaded_has_symlink_leading_path(&cache
, ce
->name
, ce_namelen(ce
)))
78 if (lstat(ce
->name
, &st
))
80 if (ie_match_stat(index
, ce
, &st
, CE_MATCH_RACY_IS_DIRTY
|CE_MATCH_IGNORE_FSMONITOR
))
83 mark_fsmonitor_valid(index
, ce
);
86 struct progress_data
*pd
= p
->progress
;
88 pthread_mutex_lock(&pd
->mutex
);
89 display_progress(pd
->progress
, pd
->n
+ last_nr
);
90 pthread_mutex_unlock(&pd
->mutex
);
92 cache_def_clear(&cache
);
96 void preload_index(struct index_state
*index
,
97 const struct pathspec
*pathspec
,
98 unsigned int refresh_flags
)
100 int threads
, i
, work
, offset
;
101 struct thread_data data
[MAX_PARALLEL
];
102 struct progress_data pd
;
103 int t2_sum_lstat
= 0;
105 if (!HAVE_THREADS
|| !core_preload_index
)
108 threads
= index
->cache_nr
/ THREAD_COST
;
109 if ((index
->cache_nr
> 1) && (threads
< 2) && git_env_bool("GIT_TEST_PRELOAD_INDEX", 0))
114 trace2_region_enter("index", "preload", NULL
);
116 trace_performance_enter();
117 if (threads
> MAX_PARALLEL
)
118 threads
= MAX_PARALLEL
;
120 work
= DIV_ROUND_UP(index
->cache_nr
, threads
);
121 memset(&data
, 0, sizeof(data
));
123 memset(&pd
, 0, sizeof(pd
));
124 if (refresh_flags
& REFRESH_PROGRESS
&& isatty(2)) {
125 pd
.progress
= start_delayed_progress(_("Refreshing index"), index
->cache_nr
);
126 pthread_mutex_init(&pd
.mutex
, NULL
);
129 for (i
= 0; i
< threads
; i
++) {
130 struct thread_data
*p
= data
+i
;
135 copy_pathspec(&p
->pathspec
, pathspec
);
141 err
= pthread_create(&p
->pthread
, NULL
, preload_thread
, p
);
144 die(_("unable to create threaded lstat: %s"), strerror(err
));
146 for (i
= 0; i
< threads
; i
++) {
147 struct thread_data
*p
= data
+i
;
148 if (pthread_join(p
->pthread
, NULL
))
149 die("unable to join threaded lstat");
150 t2_sum_lstat
+= p
->t2_nr_lstat
;
152 stop_progress(&pd
.progress
);
155 /* earlier we made deep copies for each thread to work with */
156 for (i
= 0; i
< threads
; i
++)
157 clear_pathspec(&data
[i
].pathspec
);
160 trace_performance_leave("preload index");
162 trace2_data_intmax("index", NULL
, "preload/sum_lstat", t2_sum_lstat
);
163 trace2_region_leave("index", "preload", NULL
);
166 int repo_read_index_preload(struct repository
*repo
,
167 const struct pathspec
*pathspec
,
168 unsigned int refresh_flags
)
170 int retval
= repo_read_index(repo
);
172 preload_index(repo
->index
, pathspec
, refresh_flags
);