apply: avoid using fixed-size buffer in write_out_one_reject()
[git.git] / bloom.c
blobe529f7605ca14d3fcb779412fc22e1e20d45ac20
1 #include "git-compat-util.h"
2 #include "bloom.h"
3 #include "diff.h"
4 #include "diffcore.h"
5 #include "hashmap.h"
6 #include "commit-graph.h"
7 #include "commit.h"
8 #include "commit-slab.h"
10 define_commit_slab(bloom_filter_slab, struct bloom_filter);
12 static struct bloom_filter_slab bloom_filters;
14 struct pathmap_hash_entry {
15 struct hashmap_entry entry;
16 const char path[FLEX_ARRAY];
19 static uint32_t rotate_left(uint32_t value, int32_t count)
21 uint32_t mask = 8 * sizeof(uint32_t) - 1;
22 count &= mask;
23 return ((value << count) | (value >> ((-count) & mask)));
26 static inline unsigned char get_bitmask(uint32_t pos)
28 return ((unsigned char)1) << (pos & (BITS_PER_WORD - 1));
31 static int check_bloom_offset(struct commit_graph *g, uint32_t pos,
32 uint32_t offset)
35 * Note that we allow offsets equal to the data size, which would set
36 * our pointers at one past the end of the chunk memory. This is
37 * necessary because the on-disk index points to the end of the
38 * entries (so we can compute size by comparing adjacent ones). And
39 * naturally the final entry's end is one-past-the-end of the chunk.
41 if (offset <= g->chunk_bloom_data_size - BLOOMDATA_CHUNK_HEADER_SIZE)
42 return 0;
44 warning("ignoring out-of-range offset (%"PRIuMAX") for changed-path"
45 " filter at pos %"PRIuMAX" of %s (chunk size: %"PRIuMAX")",
46 (uintmax_t)offset, (uintmax_t)pos,
47 g->filename, (uintmax_t)g->chunk_bloom_data_size);
48 return -1;
51 static int load_bloom_filter_from_graph(struct commit_graph *g,
52 struct bloom_filter *filter,
53 uint32_t graph_pos)
55 uint32_t lex_pos, start_index, end_index;
57 while (graph_pos < g->num_commits_in_base)
58 g = g->base_graph;
60 /* The commit graph commit 'c' lives in doesn't carry Bloom filters. */
61 if (!g->chunk_bloom_indexes)
62 return 0;
64 lex_pos = graph_pos - g->num_commits_in_base;
66 end_index = get_be32(g->chunk_bloom_indexes + 4 * lex_pos);
68 if (lex_pos > 0)
69 start_index = get_be32(g->chunk_bloom_indexes + 4 * (lex_pos - 1));
70 else
71 start_index = 0;
73 if (check_bloom_offset(g, lex_pos, end_index) < 0 ||
74 check_bloom_offset(g, lex_pos - 1, start_index) < 0)
75 return 0;
77 if (end_index < start_index) {
78 warning("ignoring decreasing changed-path index offsets"
79 " (%"PRIuMAX" > %"PRIuMAX") for positions"
80 " %"PRIuMAX" and %"PRIuMAX" of %s",
81 (uintmax_t)start_index, (uintmax_t)end_index,
82 (uintmax_t)(lex_pos-1), (uintmax_t)lex_pos,
83 g->filename);
84 return 0;
87 filter->len = end_index - start_index;
88 filter->data = (unsigned char *)(g->chunk_bloom_data +
89 sizeof(unsigned char) * start_index +
90 BLOOMDATA_CHUNK_HEADER_SIZE);
92 return 1;
96 * Calculate the murmur3 32-bit hash value for the given data
97 * using the given seed.
98 * Produces a uniformly distributed hash value.
99 * Not considered to be cryptographically secure.
100 * Implemented as described in https://en.wikipedia.org/wiki/MurmurHash#Algorithm
102 uint32_t murmur3_seeded(uint32_t seed, const char *data, size_t len)
104 const uint32_t c1 = 0xcc9e2d51;
105 const uint32_t c2 = 0x1b873593;
106 const uint32_t r1 = 15;
107 const uint32_t r2 = 13;
108 const uint32_t m = 5;
109 const uint32_t n = 0xe6546b64;
110 int i;
111 uint32_t k1 = 0;
112 const char *tail;
114 int len4 = len / sizeof(uint32_t);
116 uint32_t k;
117 for (i = 0; i < len4; i++) {
118 uint32_t byte1 = (uint32_t)data[4*i];
119 uint32_t byte2 = ((uint32_t)data[4*i + 1]) << 8;
120 uint32_t byte3 = ((uint32_t)data[4*i + 2]) << 16;
121 uint32_t byte4 = ((uint32_t)data[4*i + 3]) << 24;
122 k = byte1 | byte2 | byte3 | byte4;
123 k *= c1;
124 k = rotate_left(k, r1);
125 k *= c2;
127 seed ^= k;
128 seed = rotate_left(seed, r2) * m + n;
131 tail = (data + len4 * sizeof(uint32_t));
133 switch (len & (sizeof(uint32_t) - 1)) {
134 case 3:
135 k1 ^= ((uint32_t)tail[2]) << 16;
136 /*-fallthrough*/
137 case 2:
138 k1 ^= ((uint32_t)tail[1]) << 8;
139 /*-fallthrough*/
140 case 1:
141 k1 ^= ((uint32_t)tail[0]) << 0;
142 k1 *= c1;
143 k1 = rotate_left(k1, r1);
144 k1 *= c2;
145 seed ^= k1;
146 break;
149 seed ^= (uint32_t)len;
150 seed ^= (seed >> 16);
151 seed *= 0x85ebca6b;
152 seed ^= (seed >> 13);
153 seed *= 0xc2b2ae35;
154 seed ^= (seed >> 16);
156 return seed;
159 void fill_bloom_key(const char *data,
160 size_t len,
161 struct bloom_key *key,
162 const struct bloom_filter_settings *settings)
164 int i;
165 const uint32_t seed0 = 0x293ae76f;
166 const uint32_t seed1 = 0x7e646e2c;
167 const uint32_t hash0 = murmur3_seeded(seed0, data, len);
168 const uint32_t hash1 = murmur3_seeded(seed1, data, len);
170 key->hashes = (uint32_t *)xcalloc(settings->num_hashes, sizeof(uint32_t));
171 for (i = 0; i < settings->num_hashes; i++)
172 key->hashes[i] = hash0 + i * hash1;
175 void clear_bloom_key(struct bloom_key *key)
177 FREE_AND_NULL(key->hashes);
180 void add_key_to_filter(const struct bloom_key *key,
181 struct bloom_filter *filter,
182 const struct bloom_filter_settings *settings)
184 int i;
185 uint64_t mod = filter->len * BITS_PER_WORD;
187 for (i = 0; i < settings->num_hashes; i++) {
188 uint64_t hash_mod = key->hashes[i] % mod;
189 uint64_t block_pos = hash_mod / BITS_PER_WORD;
191 filter->data[block_pos] |= get_bitmask(hash_mod);
195 void init_bloom_filters(void)
197 init_bloom_filter_slab(&bloom_filters);
200 static int pathmap_cmp(const void *hashmap_cmp_fn_data UNUSED,
201 const struct hashmap_entry *eptr,
202 const struct hashmap_entry *entry_or_key,
203 const void *keydata UNUSED)
205 const struct pathmap_hash_entry *e1, *e2;
207 e1 = container_of(eptr, const struct pathmap_hash_entry, entry);
208 e2 = container_of(entry_or_key, const struct pathmap_hash_entry, entry);
210 return strcmp(e1->path, e2->path);
213 static void init_truncated_large_filter(struct bloom_filter *filter)
215 filter->data = xmalloc(1);
216 filter->data[0] = 0xFF;
217 filter->len = 1;
220 struct bloom_filter *get_or_compute_bloom_filter(struct repository *r,
221 struct commit *c,
222 int compute_if_not_present,
223 const struct bloom_filter_settings *settings,
224 enum bloom_filter_computed *computed)
226 struct bloom_filter *filter;
227 int i;
228 struct diff_options diffopt;
230 if (computed)
231 *computed = BLOOM_NOT_COMPUTED;
233 if (!bloom_filters.slab_size)
234 return NULL;
236 filter = bloom_filter_slab_at(&bloom_filters, c);
238 if (!filter->data) {
239 uint32_t graph_pos;
240 if (repo_find_commit_pos_in_graph(r, c, &graph_pos))
241 load_bloom_filter_from_graph(r->objects->commit_graph,
242 filter, graph_pos);
245 if (filter->data && filter->len)
246 return filter;
247 if (!compute_if_not_present)
248 return NULL;
250 repo_diff_setup(r, &diffopt);
251 diffopt.flags.recursive = 1;
252 diffopt.detect_rename = 0;
253 diffopt.max_changes = settings->max_changed_paths;
254 diff_setup_done(&diffopt);
256 /* ensure commit is parsed so we have parent information */
257 repo_parse_commit(r, c);
259 if (c->parents)
260 diff_tree_oid(&c->parents->item->object.oid, &c->object.oid, "", &diffopt);
261 else
262 diff_tree_oid(NULL, &c->object.oid, "", &diffopt);
263 diffcore_std(&diffopt);
265 if (diff_queued_diff.nr <= settings->max_changed_paths) {
266 struct hashmap pathmap = HASHMAP_INIT(pathmap_cmp, NULL);
267 struct pathmap_hash_entry *e;
268 struct hashmap_iter iter;
270 for (i = 0; i < diff_queued_diff.nr; i++) {
271 const char *path = diff_queued_diff.queue[i]->two->path;
274 * Add each leading directory of the changed file, i.e. for
275 * 'dir/subdir/file' add 'dir' and 'dir/subdir' as well, so
276 * the Bloom filter could be used to speed up commands like
277 * 'git log dir/subdir', too.
279 * Note that directories are added without the trailing '/'.
281 do {
282 char *last_slash = strrchr(path, '/');
284 FLEX_ALLOC_STR(e, path, path);
285 hashmap_entry_init(&e->entry, strhash(path));
287 if (!hashmap_get(&pathmap, &e->entry, NULL))
288 hashmap_add(&pathmap, &e->entry);
289 else
290 free(e);
292 if (!last_slash)
293 last_slash = (char*)path;
294 *last_slash = '\0';
296 } while (*path);
298 diff_free_filepair(diff_queued_diff.queue[i]);
301 if (hashmap_get_size(&pathmap) > settings->max_changed_paths) {
302 init_truncated_large_filter(filter);
303 if (computed)
304 *computed |= BLOOM_TRUNC_LARGE;
305 goto cleanup;
308 filter->len = (hashmap_get_size(&pathmap) * settings->bits_per_entry + BITS_PER_WORD - 1) / BITS_PER_WORD;
309 if (!filter->len) {
310 if (computed)
311 *computed |= BLOOM_TRUNC_EMPTY;
312 filter->len = 1;
314 CALLOC_ARRAY(filter->data, filter->len);
316 hashmap_for_each_entry(&pathmap, &iter, e, entry) {
317 struct bloom_key key;
318 fill_bloom_key(e->path, strlen(e->path), &key, settings);
319 add_key_to_filter(&key, filter, settings);
320 clear_bloom_key(&key);
323 cleanup:
324 hashmap_clear_and_free(&pathmap, struct pathmap_hash_entry, entry);
325 } else {
326 for (i = 0; i < diff_queued_diff.nr; i++)
327 diff_free_filepair(diff_queued_diff.queue[i]);
328 init_truncated_large_filter(filter);
330 if (computed)
331 *computed |= BLOOM_TRUNC_LARGE;
334 if (computed)
335 *computed |= BLOOM_COMPUTED;
337 free(diff_queued_diff.queue);
338 DIFF_QUEUE_CLEAR(&diff_queued_diff);
340 return filter;
343 int bloom_filter_contains(const struct bloom_filter *filter,
344 const struct bloom_key *key,
345 const struct bloom_filter_settings *settings)
347 int i;
348 uint64_t mod = filter->len * BITS_PER_WORD;
350 if (!mod)
351 return -1;
353 for (i = 0; i < settings->num_hashes; i++) {
354 uint64_t hash_mod = key->hashes[i] % mod;
355 uint64_t block_pos = hash_mod / BITS_PER_WORD;
356 if (!(filter->data[block_pos] & get_bitmask(hash_mod)))
357 return 0;
360 return 1;