Merge branch 'jk/rebase-apply-leakfix'
[alt-git.git] / name-hash.c
blob3a58ce03d9c4a6721941f0ff8b262b6ddb73acd2
1 /*
2 * name-hash.c
4 * Hashing names in the index state
6 * Copyright (C) 2008 Linus Torvalds
7 */
8 #include "git-compat-util.h"
9 #include "environment.h"
10 #include "gettext.h"
11 #include "name-hash.h"
12 #include "object.h"
13 #include "read-cache-ll.h"
14 #include "thread-utils.h"
15 #include "trace.h"
16 #include "trace2.h"
17 #include "sparse-index.h"
19 struct dir_entry {
20 struct hashmap_entry ent;
21 struct dir_entry *parent;
22 int nr;
23 unsigned int namelen;
24 char name[FLEX_ARRAY];
27 static int dir_entry_cmp(const void *cmp_data UNUSED,
28 const struct hashmap_entry *eptr,
29 const struct hashmap_entry *entry_or_key,
30 const void *keydata)
32 const struct dir_entry *e1, *e2;
33 const char *name = keydata;
35 e1 = container_of(eptr, const struct dir_entry, ent);
36 e2 = container_of(entry_or_key, const struct dir_entry, ent);
38 return e1->namelen != e2->namelen || strncasecmp(e1->name,
39 name ? name : e2->name, e1->namelen);
42 static struct dir_entry *find_dir_entry__hash(struct index_state *istate,
43 const char *name, unsigned int namelen, unsigned int hash)
45 struct dir_entry key;
46 hashmap_entry_init(&key.ent, hash);
47 key.namelen = namelen;
48 return hashmap_get_entry(&istate->dir_hash, &key, ent, name);
51 static struct dir_entry *find_dir_entry(struct index_state *istate,
52 const char *name, unsigned int namelen)
54 return find_dir_entry__hash(istate, name, namelen, memihash(name, namelen));
57 static struct dir_entry *hash_dir_entry(struct index_state *istate,
58 struct cache_entry *ce, int namelen)
61 * Throw each directory component in the hash for quick lookup
62 * during a git status. Directory components are stored without their
63 * closing slash. Despite submodules being a directory, they never
64 * reach this point, because they are stored
65 * in index_state.name_hash (as ordinary cache_entries).
67 struct dir_entry *dir;
69 /* get length of parent directory */
70 while (namelen > 0 && !is_dir_sep(ce->name[namelen - 1]))
71 namelen--;
72 if (namelen <= 0)
73 return NULL;
74 namelen--;
76 /* lookup existing entry for that directory */
77 dir = find_dir_entry(istate, ce->name, namelen);
78 if (!dir) {
79 /* not found, create it and add to hash table */
80 FLEX_ALLOC_MEM(dir, name, ce->name, namelen);
81 hashmap_entry_init(&dir->ent, memihash(ce->name, namelen));
82 dir->namelen = namelen;
83 hashmap_add(&istate->dir_hash, &dir->ent);
85 /* recursively add missing parent directories */
86 dir->parent = hash_dir_entry(istate, ce, namelen);
88 return dir;
91 static void add_dir_entry(struct index_state *istate, struct cache_entry *ce)
93 /* Add reference to the directory entry (and parents if 0). */
94 struct dir_entry *dir = hash_dir_entry(istate, ce, ce_namelen(ce));
95 while (dir && !(dir->nr++))
96 dir = dir->parent;
99 static void remove_dir_entry(struct index_state *istate, struct cache_entry *ce)
102 * Release reference to the directory entry. If 0, remove and continue
103 * with parent directory.
105 struct dir_entry *dir = hash_dir_entry(istate, ce, ce_namelen(ce));
106 while (dir && !(--dir->nr)) {
107 struct dir_entry *parent = dir->parent;
108 hashmap_remove(&istate->dir_hash, &dir->ent, NULL);
109 free(dir);
110 dir = parent;
114 static void hash_index_entry(struct index_state *istate, struct cache_entry *ce)
116 if (ce->ce_flags & CE_HASHED)
117 return;
118 ce->ce_flags |= CE_HASHED;
120 if (!S_ISSPARSEDIR(ce->ce_mode)) {
121 hashmap_entry_init(&ce->ent, memihash(ce->name, ce_namelen(ce)));
122 hashmap_add(&istate->name_hash, &ce->ent);
125 if (ignore_case)
126 add_dir_entry(istate, ce);
129 static int cache_entry_cmp(const void *cmp_data UNUSED,
130 const struct hashmap_entry *eptr,
131 const struct hashmap_entry *entry_or_key,
132 const void *remove)
134 const struct cache_entry *ce1, *ce2;
136 ce1 = container_of(eptr, const struct cache_entry, ent);
137 ce2 = container_of(entry_or_key, const struct cache_entry, ent);
140 * For remove_name_hash, find the exact entry (pointer equality); for
141 * index_file_exists, find all entries with matching hash code and
142 * decide whether the entry matches in same_name.
144 return remove ? !(ce1 == ce2) : 0;
147 static int lazy_try_threaded = 1;
148 static int lazy_nr_dir_threads;
151 * Set a minimum number of cache_entries that we will handle per
152 * thread and use that to decide how many threads to run (up to
153 * the number on the system).
155 * For guidance setting the lower per-thread bound, see:
156 * t/helper/test-lazy-init-name-hash --analyze
158 #define LAZY_THREAD_COST (2000)
161 * We use n mutexes to guard n partitions of the "istate->dir_hash"
162 * hashtable. Since "find" and "insert" operations will hash to a
163 * particular bucket and modify/search a single chain, we can say
164 * that "all chains mod n" are guarded by the same mutex -- rather
165 * than having a single mutex to guard the entire table. (This does
166 * require that we disable "rehashing" on the hashtable.)
168 * So, a larger value here decreases the probability of a collision
169 * and the time that each thread must wait for the mutex.
171 #define LAZY_MAX_MUTEX (32)
173 static pthread_mutex_t *lazy_dir_mutex_array;
176 * An array of lazy_entry items is used by the n threads in
177 * the directory parse (first) phase to (lock-free) store the
178 * intermediate results. These values are then referenced by
179 * the 2 threads in the second phase.
181 struct lazy_entry {
182 struct dir_entry *dir;
183 unsigned int hash_dir;
184 unsigned int hash_name;
188 * Decide if we want to use threads (if available) to load
189 * the hash tables. We set "lazy_nr_dir_threads" to zero when
190 * it is not worth it.
192 static int lookup_lazy_params(struct index_state *istate)
194 int nr_cpus;
196 lazy_nr_dir_threads = 0;
198 if (!lazy_try_threaded)
199 return 0;
202 * If we are respecting case, just use the original
203 * code to build the "istate->name_hash". We don't
204 * need the complexity here.
206 if (!ignore_case)
207 return 0;
209 nr_cpus = online_cpus();
210 if (nr_cpus < 2)
211 return 0;
213 if (istate->cache_nr < 2 * LAZY_THREAD_COST)
214 return 0;
216 if (istate->cache_nr < nr_cpus * LAZY_THREAD_COST)
217 nr_cpus = istate->cache_nr / LAZY_THREAD_COST;
218 lazy_nr_dir_threads = nr_cpus;
219 return lazy_nr_dir_threads;
223 * Initialize n mutexes for use when searching and inserting
224 * into "istate->dir_hash". All "dir" threads are trying
225 * to insert partial pathnames into the hash as they iterate
226 * over their portions of the index, so lock contention is
227 * high.
229 * However, the hashmap is going to put items into bucket
230 * chains based on their hash values. Use that to create n
231 * mutexes and lock on mutex[bucket(hash) % n]. This will
232 * decrease the collision rate by (hopefully) a factor of n.
234 static void init_dir_mutex(void)
236 int j;
238 CALLOC_ARRAY(lazy_dir_mutex_array, LAZY_MAX_MUTEX);
240 for (j = 0; j < LAZY_MAX_MUTEX; j++)
241 init_recursive_mutex(&lazy_dir_mutex_array[j]);
244 static void cleanup_dir_mutex(void)
246 int j;
248 for (j = 0; j < LAZY_MAX_MUTEX; j++)
249 pthread_mutex_destroy(&lazy_dir_mutex_array[j]);
251 free(lazy_dir_mutex_array);
254 static void lock_dir_mutex(int j)
256 pthread_mutex_lock(&lazy_dir_mutex_array[j]);
259 static void unlock_dir_mutex(int j)
261 pthread_mutex_unlock(&lazy_dir_mutex_array[j]);
264 static inline int compute_dir_lock_nr(
265 const struct hashmap *map,
266 unsigned int hash)
268 return hashmap_bucket(map, hash) % LAZY_MAX_MUTEX;
271 static struct dir_entry *hash_dir_entry_with_parent_and_prefix(
272 struct index_state *istate,
273 struct dir_entry *parent,
274 struct strbuf *prefix)
276 struct dir_entry *dir;
277 unsigned int hash;
278 int lock_nr;
281 * Either we have a parent directory and path with slash(es)
282 * or the directory is an immediate child of the root directory.
284 assert((parent != NULL) ^ (strchr(prefix->buf, '/') == NULL));
286 if (parent)
287 hash = memihash_cont(parent->ent.hash,
288 prefix->buf + parent->namelen,
289 prefix->len - parent->namelen);
290 else
291 hash = memihash(prefix->buf, prefix->len);
293 lock_nr = compute_dir_lock_nr(&istate->dir_hash, hash);
294 lock_dir_mutex(lock_nr);
296 dir = find_dir_entry__hash(istate, prefix->buf, prefix->len, hash);
297 if (!dir) {
298 FLEX_ALLOC_MEM(dir, name, prefix->buf, prefix->len);
299 hashmap_entry_init(&dir->ent, hash);
300 dir->namelen = prefix->len;
301 dir->parent = parent;
302 hashmap_add(&istate->dir_hash, &dir->ent);
304 if (parent) {
305 unlock_dir_mutex(lock_nr);
307 /* All I really need here is an InterlockedIncrement(&(parent->nr)) */
308 lock_nr = compute_dir_lock_nr(&istate->dir_hash, parent->ent.hash);
309 lock_dir_mutex(lock_nr);
310 parent->nr++;
314 unlock_dir_mutex(lock_nr);
316 return dir;
320 * handle_range_1() and handle_range_dir() are derived from
321 * clear_ce_flags_1() and clear_ce_flags_dir() in unpack-trees.c
322 * and handle the iteration over the entire array of index entries.
323 * They use recursion for adjacent entries in the same parent
324 * directory.
326 static int handle_range_1(
327 struct index_state *istate,
328 int k_start,
329 int k_end,
330 struct dir_entry *parent,
331 struct strbuf *prefix,
332 struct lazy_entry *lazy_entries);
334 static int handle_range_dir(
335 struct index_state *istate,
336 int k_start,
337 int k_end,
338 struct dir_entry *parent,
339 struct strbuf *prefix,
340 struct lazy_entry *lazy_entries,
341 struct dir_entry **dir_new_out)
343 int rc, k;
344 int input_prefix_len = prefix->len;
345 struct dir_entry *dir_new;
347 dir_new = hash_dir_entry_with_parent_and_prefix(istate, parent, prefix);
349 strbuf_addch(prefix, '/');
352 * Scan forward in the index array for index entries having the same
353 * path prefix (that are also in this directory).
355 if (k_start + 1 >= k_end)
356 k = k_end;
357 else if (strncmp(istate->cache[k_start + 1]->name, prefix->buf, prefix->len) > 0)
358 k = k_start + 1;
359 else if (strncmp(istate->cache[k_end - 1]->name, prefix->buf, prefix->len) == 0)
360 k = k_end;
361 else {
362 int begin = k_start;
363 int end = k_end;
364 assert(begin >= 0);
365 while (begin < end) {
366 int mid = begin + ((end - begin) >> 1);
367 int cmp = strncmp(istate->cache[mid]->name, prefix->buf, prefix->len);
368 if (cmp == 0) /* mid has same prefix; look in second part */
369 begin = mid + 1;
370 else if (cmp > 0) /* mid is past group; look in first part */
371 end = mid;
372 else
373 die("cache entry out of order");
375 k = begin;
379 * Recurse and process what we can of this subset [k_start, k).
381 rc = handle_range_1(istate, k_start, k, dir_new, prefix, lazy_entries);
383 strbuf_setlen(prefix, input_prefix_len);
385 *dir_new_out = dir_new;
386 return rc;
389 static int handle_range_1(
390 struct index_state *istate,
391 int k_start,
392 int k_end,
393 struct dir_entry *parent,
394 struct strbuf *prefix,
395 struct lazy_entry *lazy_entries)
397 int input_prefix_len = prefix->len;
398 int k = k_start;
400 while (k < k_end) {
401 struct cache_entry *ce_k = istate->cache[k];
402 const char *name, *slash;
404 if (prefix->len && strncmp(ce_k->name, prefix->buf, prefix->len))
405 break;
407 name = ce_k->name + prefix->len;
408 slash = strchr(name, '/');
410 if (slash) {
411 int len = slash - name;
412 int processed;
413 struct dir_entry *dir_new;
415 strbuf_add(prefix, name, len);
416 processed = handle_range_dir(istate, k, k_end, parent, prefix, lazy_entries, &dir_new);
417 if (processed) {
418 k += processed;
419 strbuf_setlen(prefix, input_prefix_len);
420 continue;
423 strbuf_addch(prefix, '/');
424 processed = handle_range_1(istate, k, k_end, dir_new, prefix, lazy_entries);
425 k += processed;
426 strbuf_setlen(prefix, input_prefix_len);
427 continue;
431 * It is too expensive to take a lock to insert "ce_k"
432 * into "istate->name_hash" and increment the ref-count
433 * on the "parent" dir. So we defer actually updating
434 * permanent data structures until phase 2 (where we
435 * can change the locking requirements) and simply
436 * accumulate our current results into the lazy_entries
437 * data array).
439 * We do not need to lock the lazy_entries array because
440 * we have exclusive access to the cells in the range
441 * [k_start,k_end) that this thread was given.
443 lazy_entries[k].dir = parent;
444 if (parent) {
445 lazy_entries[k].hash_name = memihash_cont(
446 parent->ent.hash,
447 ce_k->name + parent->namelen,
448 ce_namelen(ce_k) - parent->namelen);
449 lazy_entries[k].hash_dir = parent->ent.hash;
450 } else {
451 lazy_entries[k].hash_name = memihash(ce_k->name, ce_namelen(ce_k));
454 k++;
457 return k - k_start;
460 struct lazy_dir_thread_data {
461 pthread_t pthread;
462 struct index_state *istate;
463 struct lazy_entry *lazy_entries;
464 int k_start;
465 int k_end;
468 static void *lazy_dir_thread_proc(void *_data)
470 struct lazy_dir_thread_data *d = _data;
471 struct strbuf prefix = STRBUF_INIT;
472 handle_range_1(d->istate, d->k_start, d->k_end, NULL, &prefix, d->lazy_entries);
473 strbuf_release(&prefix);
474 return NULL;
477 struct lazy_name_thread_data {
478 pthread_t pthread;
479 struct index_state *istate;
480 struct lazy_entry *lazy_entries;
483 static void *lazy_name_thread_proc(void *_data)
485 struct lazy_name_thread_data *d = _data;
486 int k;
488 for (k = 0; k < d->istate->cache_nr; k++) {
489 struct cache_entry *ce_k = d->istate->cache[k];
490 ce_k->ce_flags |= CE_HASHED;
491 hashmap_entry_init(&ce_k->ent, d->lazy_entries[k].hash_name);
492 hashmap_add(&d->istate->name_hash, &ce_k->ent);
495 return NULL;
498 static inline void lazy_update_dir_ref_counts(
499 struct index_state *istate,
500 struct lazy_entry *lazy_entries)
502 int k;
504 for (k = 0; k < istate->cache_nr; k++) {
505 if (lazy_entries[k].dir)
506 lazy_entries[k].dir->nr++;
510 static void threaded_lazy_init_name_hash(
511 struct index_state *istate)
513 int err;
514 int nr_each;
515 int k_start;
516 int t;
517 struct lazy_entry *lazy_entries;
518 struct lazy_dir_thread_data *td_dir;
519 struct lazy_name_thread_data *td_name;
521 if (!HAVE_THREADS)
522 return;
524 k_start = 0;
525 nr_each = DIV_ROUND_UP(istate->cache_nr, lazy_nr_dir_threads);
527 CALLOC_ARRAY(lazy_entries, istate->cache_nr);
528 CALLOC_ARRAY(td_dir, lazy_nr_dir_threads);
529 CALLOC_ARRAY(td_name, 1);
531 init_dir_mutex();
534 * Phase 1:
535 * Build "istate->dir_hash" using n "dir" threads (and a read-only index).
537 for (t = 0; t < lazy_nr_dir_threads; t++) {
538 struct lazy_dir_thread_data *td_dir_t = td_dir + t;
539 td_dir_t->istate = istate;
540 td_dir_t->lazy_entries = lazy_entries;
541 td_dir_t->k_start = k_start;
542 k_start += nr_each;
543 if (k_start > istate->cache_nr)
544 k_start = istate->cache_nr;
545 td_dir_t->k_end = k_start;
546 err = pthread_create(&td_dir_t->pthread, NULL, lazy_dir_thread_proc, td_dir_t);
547 if (err)
548 die(_("unable to create lazy_dir thread: %s"), strerror(err));
550 for (t = 0; t < lazy_nr_dir_threads; t++) {
551 struct lazy_dir_thread_data *td_dir_t = td_dir + t;
552 if (pthread_join(td_dir_t->pthread, NULL))
553 die("unable to join lazy_dir_thread");
557 * Phase 2:
558 * Iterate over all index entries and add them to the "istate->name_hash"
559 * using a single "name" background thread.
560 * (Testing showed it wasn't worth running more than 1 thread for this.)
562 * Meanwhile, finish updating the parent directory ref-counts for each
563 * index entry using the current thread. (This step is very fast and
564 * doesn't need threading.)
566 td_name->istate = istate;
567 td_name->lazy_entries = lazy_entries;
568 err = pthread_create(&td_name->pthread, NULL, lazy_name_thread_proc, td_name);
569 if (err)
570 die(_("unable to create lazy_name thread: %s"), strerror(err));
572 lazy_update_dir_ref_counts(istate, lazy_entries);
574 err = pthread_join(td_name->pthread, NULL);
575 if (err)
576 die(_("unable to join lazy_name thread: %s"), strerror(err));
578 cleanup_dir_mutex();
580 free(td_name);
581 free(td_dir);
582 free(lazy_entries);
585 static void lazy_init_name_hash(struct index_state *istate)
588 if (istate->name_hash_initialized)
589 return;
590 trace_performance_enter();
591 trace2_region_enter("index", "name-hash-init", istate->repo);
592 hashmap_init(&istate->name_hash, cache_entry_cmp, NULL, istate->cache_nr);
593 hashmap_init(&istate->dir_hash, dir_entry_cmp, NULL, istate->cache_nr);
595 if (lookup_lazy_params(istate)) {
597 * Disable item counting and automatic rehashing because
598 * we do per-chain (mod n) locking rather than whole hashmap
599 * locking and we need to prevent the table-size from changing
600 * and bucket items from being redistributed.
602 hashmap_disable_item_counting(&istate->dir_hash);
603 threaded_lazy_init_name_hash(istate);
604 hashmap_enable_item_counting(&istate->dir_hash);
605 } else {
606 int nr;
607 for (nr = 0; nr < istate->cache_nr; nr++)
608 hash_index_entry(istate, istate->cache[nr]);
611 istate->name_hash_initialized = 1;
612 trace2_region_leave("index", "name-hash-init", istate->repo);
613 trace_performance_leave("initialize name hash");
617 * A test routine for t/helper/ sources.
619 * Returns the number of threads used or 0 when
620 * the non-threaded code path was used.
622 * Requesting threading WILL NOT override guards
623 * in lookup_lazy_params().
625 int test_lazy_init_name_hash(struct index_state *istate, int try_threaded)
627 lazy_nr_dir_threads = 0;
628 lazy_try_threaded = try_threaded;
630 lazy_init_name_hash(istate);
632 return lazy_nr_dir_threads;
635 void add_name_hash(struct index_state *istate, struct cache_entry *ce)
637 if (istate->name_hash_initialized)
638 hash_index_entry(istate, ce);
641 void remove_name_hash(struct index_state *istate, struct cache_entry *ce)
643 if (!istate->name_hash_initialized || !(ce->ce_flags & CE_HASHED))
644 return;
645 ce->ce_flags &= ~CE_HASHED;
646 hashmap_remove(&istate->name_hash, &ce->ent, ce);
648 if (ignore_case)
649 remove_dir_entry(istate, ce);
652 static int slow_same_name(const char *name1, int len1, const char *name2, int len2)
654 if (len1 != len2)
655 return 0;
657 while (len1) {
658 unsigned char c1 = *name1++;
659 unsigned char c2 = *name2++;
660 len1--;
661 if (c1 != c2) {
662 c1 = toupper(c1);
663 c2 = toupper(c2);
664 if (c1 != c2)
665 return 0;
668 return 1;
671 static int same_name(const struct cache_entry *ce, const char *name, int namelen, int icase)
673 int len = ce_namelen(ce);
676 * Always do exact compare, even if we want a case-ignoring comparison;
677 * we do the quick exact one first, because it will be the common case.
679 if (len == namelen && !memcmp(name, ce->name, len))
680 return 1;
682 if (!icase)
683 return 0;
685 return slow_same_name(name, namelen, ce->name, len);
688 int index_dir_find(struct index_state *istate, const char *name, int namelen,
689 struct strbuf *canonical_path)
691 struct dir_entry *dir;
693 lazy_init_name_hash(istate);
694 expand_to_path(istate, name, namelen, 0);
695 dir = find_dir_entry(istate, name, namelen);
697 if (canonical_path && dir && dir->nr) {
698 strbuf_reset(canonical_path);
699 strbuf_add(canonical_path, dir->name, dir->namelen);
702 return dir && dir->nr;
705 void adjust_dirname_case(struct index_state *istate, char *name)
707 const char *startPtr = name;
708 const char *ptr = startPtr;
710 lazy_init_name_hash(istate);
711 expand_to_path(istate, name, strlen(name), 0);
712 while (*ptr) {
713 while (*ptr && *ptr != '/')
714 ptr++;
716 if (*ptr == '/') {
717 struct dir_entry *dir;
719 dir = find_dir_entry(istate, name, ptr - name);
720 if (dir) {
721 memcpy((void *)startPtr, dir->name + (startPtr - name), ptr - startPtr);
722 startPtr = ptr + 1;
724 ptr++;
729 struct cache_entry *index_file_exists(struct index_state *istate, const char *name, int namelen, int icase)
731 struct cache_entry *ce;
732 unsigned int hash = memihash(name, namelen);
734 lazy_init_name_hash(istate);
735 expand_to_path(istate, name, namelen, icase);
737 ce = hashmap_get_entry_from_hash(&istate->name_hash, hash, NULL,
738 struct cache_entry, ent);
739 hashmap_for_each_entry_from(&istate->name_hash, ce, ent) {
740 if (same_name(ce, name, namelen, icase))
741 return ce;
743 return NULL;
746 void free_name_hash(struct index_state *istate)
748 if (!istate->name_hash_initialized)
749 return;
750 istate->name_hash_initialized = 0;
752 hashmap_clear(&istate->name_hash);
753 hashmap_clear_and_free(&istate->dir_hash, struct dir_entry, ent);