split-index: do not invalidate cache-tree at read time
[git/mjg.git] / split-index.c
blob33c0c4b6983f29bf29a4bcca9706180bd32b59ff
1 #include "cache.h"
2 #include "split-index.h"
3 #include "ewah/ewok.h"
5 struct split_index *init_split_index(struct index_state *istate)
7 if (!istate->split_index) {
8 istate->split_index = xcalloc(1, sizeof(*istate->split_index));
9 istate->split_index->refcount = 1;
11 return istate->split_index;
14 int read_link_extension(struct index_state *istate,
15 const void *data_, unsigned long sz)
17 const unsigned char *data = data_;
18 struct split_index *si;
19 int ret;
21 if (sz < 20)
22 return error("corrupt link extension (too short)");
23 si = init_split_index(istate);
24 hashcpy(si->base_sha1, data);
25 data += 20;
26 sz -= 20;
27 if (!sz)
28 return 0;
29 si->delete_bitmap = ewah_new();
30 ret = ewah_read_mmap(si->delete_bitmap, data, sz);
31 if (ret < 0)
32 return error("corrupt delete bitmap in link extension");
33 data += ret;
34 sz -= ret;
35 si->replace_bitmap = ewah_new();
36 ret = ewah_read_mmap(si->replace_bitmap, data, sz);
37 if (ret < 0)
38 return error("corrupt replace bitmap in link extension");
39 if (ret != sz)
40 return error("garbage at the end of link extension");
41 return 0;
44 static int write_strbuf(void *user_data, const void *data, size_t len)
46 struct strbuf *sb = user_data;
47 strbuf_add(sb, data, len);
48 return len;
51 int write_link_extension(struct strbuf *sb,
52 struct index_state *istate)
54 struct split_index *si = istate->split_index;
55 strbuf_add(sb, si->base_sha1, 20);
56 if (!si->delete_bitmap && !si->replace_bitmap)
57 return 0;
58 ewah_serialize_to(si->delete_bitmap, write_strbuf, sb);
59 ewah_serialize_to(si->replace_bitmap, write_strbuf, sb);
60 return 0;
63 static void mark_base_index_entries(struct index_state *base)
65 int i;
67 * To keep track of the shared entries between
68 * istate->base->cache[] and istate->cache[], base entry
69 * position is stored in each base entry. All positions start
70 * from 1 instead of 0, which is resrved to say "this is a new
71 * entry".
73 for (i = 0; i < base->cache_nr; i++)
74 base->cache[i]->index = i + 1;
77 static void mark_entry_for_delete(size_t pos, void *data)
79 struct index_state *istate = data;
80 if (pos >= istate->cache_nr)
81 die("position for delete %d exceeds base index size %d",
82 (int)pos, istate->cache_nr);
83 istate->cache[pos]->ce_flags |= CE_REMOVE;
84 istate->split_index->nr_deletions = 1;
87 static void replace_entry(size_t pos, void *data)
89 struct index_state *istate = data;
90 struct split_index *si = istate->split_index;
91 struct cache_entry *dst, *src;
92 if (pos >= istate->cache_nr)
93 die("position for replacement %d exceeds base index size %d",
94 (int)pos, istate->cache_nr);
95 if (si->nr_replacements >= si->saved_cache_nr)
96 die("too many replacements (%d vs %d)",
97 si->nr_replacements, si->saved_cache_nr);
98 dst = istate->cache[pos];
99 if (dst->ce_flags & CE_REMOVE)
100 die("entry %d is marked as both replaced and deleted",
101 (int)pos);
102 src = si->saved_cache[si->nr_replacements];
103 src->index = pos + 1;
104 src->ce_flags |= CE_UPDATE_IN_BASE;
105 free(dst);
106 dst = src;
107 si->nr_replacements++;
110 void merge_base_index(struct index_state *istate)
112 struct split_index *si = istate->split_index;
113 unsigned int i;
115 mark_base_index_entries(si->base);
117 si->saved_cache = istate->cache;
118 si->saved_cache_nr = istate->cache_nr;
119 istate->cache_nr = si->base->cache_nr;
120 istate->cache = NULL;
121 istate->cache_alloc = 0;
122 ALLOC_GROW(istate->cache, istate->cache_nr, istate->cache_alloc);
123 memcpy(istate->cache, si->base->cache,
124 sizeof(*istate->cache) * istate->cache_nr);
126 si->nr_deletions = 0;
127 si->nr_replacements = 0;
128 ewah_each_bit(si->replace_bitmap, replace_entry, istate);
129 ewah_each_bit(si->delete_bitmap, mark_entry_for_delete, istate);
130 if (si->nr_deletions)
131 remove_marked_cache_entries(istate);
133 for (i = si->nr_replacements; i < si->saved_cache_nr; i++) {
134 add_index_entry(istate, si->saved_cache[i],
135 ADD_CACHE_OK_TO_ADD |
136 ADD_CACHE_KEEP_CACHE_TREE |
138 * we may have to replay what
139 * merge-recursive.c:update_stages()
140 * does, which has this flag on
142 ADD_CACHE_SKIP_DFCHECK);
143 si->saved_cache[i] = NULL;
146 ewah_free(si->delete_bitmap);
147 ewah_free(si->replace_bitmap);
148 free(si->saved_cache);
149 si->delete_bitmap = NULL;
150 si->replace_bitmap = NULL;
151 si->saved_cache = NULL;
152 si->saved_cache_nr = 0;
155 void prepare_to_write_split_index(struct index_state *istate)
157 struct split_index *si = init_split_index(istate);
158 struct cache_entry **entries = NULL, *ce;
159 int i, nr_entries = 0, nr_alloc = 0;
161 si->delete_bitmap = ewah_new();
162 si->replace_bitmap = ewah_new();
164 if (si->base) {
165 /* Go through istate->cache[] and mark CE_MATCHED to
166 * entry with positive index. We'll go through
167 * base->cache[] later to delete all entries in base
168 * that are not marked eith either CE_MATCHED or
169 * CE_UPDATE_IN_BASE. If istate->cache[i] is a
170 * duplicate, deduplicate it.
172 for (i = 0; i < istate->cache_nr; i++) {
173 struct cache_entry *base;
174 /* namelen is checked separately */
175 const unsigned int ondisk_flags =
176 CE_STAGEMASK | CE_VALID | CE_EXTENDED_FLAGS;
177 unsigned int ce_flags, base_flags, ret;
178 ce = istate->cache[i];
179 if (!ce->index)
180 continue;
181 if (ce->index > si->base->cache_nr) {
182 ce->index = 0;
183 continue;
185 ce->ce_flags |= CE_MATCHED; /* or "shared" */
186 base = si->base->cache[ce->index - 1];
187 if (ce == base)
188 continue;
189 if (ce->ce_namelen != base->ce_namelen ||
190 strcmp(ce->name, base->name)) {
191 ce->index = 0;
192 continue;
194 ce_flags = ce->ce_flags;
195 base_flags = base->ce_flags;
196 /* only on-disk flags matter */
197 ce->ce_flags &= ondisk_flags;
198 base->ce_flags &= ondisk_flags;
199 ret = memcmp(&ce->ce_stat_data, &base->ce_stat_data,
200 offsetof(struct cache_entry, name) -
201 offsetof(struct cache_entry, ce_stat_data));
202 ce->ce_flags = ce_flags;
203 base->ce_flags = base_flags;
204 if (ret)
205 ce->ce_flags |= CE_UPDATE_IN_BASE;
206 free(base);
207 si->base->cache[ce->index - 1] = ce;
209 for (i = 0; i < si->base->cache_nr; i++) {
210 ce = si->base->cache[i];
211 if ((ce->ce_flags & CE_REMOVE) ||
212 !(ce->ce_flags & CE_MATCHED))
213 ewah_set(si->delete_bitmap, i);
214 else if (ce->ce_flags & CE_UPDATE_IN_BASE) {
215 ewah_set(si->replace_bitmap, i);
216 ALLOC_GROW(entries, nr_entries+1, nr_alloc);
217 entries[nr_entries++] = ce;
222 for (i = 0; i < istate->cache_nr; i++) {
223 ce = istate->cache[i];
224 if ((!si->base || !ce->index) && !(ce->ce_flags & CE_REMOVE)) {
225 ALLOC_GROW(entries, nr_entries+1, nr_alloc);
226 entries[nr_entries++] = ce;
228 ce->ce_flags &= ~CE_MATCHED;
232 * take cache[] out temporarily, put entries[] in its place
233 * for writing
235 si->saved_cache = istate->cache;
236 si->saved_cache_nr = istate->cache_nr;
237 istate->cache = entries;
238 istate->cache_nr = nr_entries;
241 void finish_writing_split_index(struct index_state *istate)
243 struct split_index *si = init_split_index(istate);
245 ewah_free(si->delete_bitmap);
246 ewah_free(si->replace_bitmap);
247 si->delete_bitmap = NULL;
248 si->replace_bitmap = NULL;
249 free(istate->cache);
250 istate->cache = si->saved_cache;
251 istate->cache_nr = si->saved_cache_nr;
254 void discard_split_index(struct index_state *istate)
256 struct split_index *si = istate->split_index;
257 if (!si)
258 return;
259 istate->split_index = NULL;
260 si->refcount--;
261 if (si->refcount)
262 return;
263 if (si->base) {
264 discard_index(si->base);
265 free(si->base);
267 free(si);
270 void save_or_free_index_entry(struct index_state *istate, struct cache_entry *ce)
272 if (ce->index &&
273 istate->split_index &&
274 istate->split_index->base &&
275 ce->index <= istate->split_index->base->cache_nr &&
276 ce == istate->split_index->base->cache[ce->index - 1])
277 ce->ce_flags |= CE_REMOVE;
278 else
279 free(ce);
282 void replace_index_entry_in_base(struct index_state *istate,
283 struct cache_entry *old,
284 struct cache_entry *new)
286 if (old->index &&
287 istate->split_index &&
288 istate->split_index->base &&
289 old->index <= istate->split_index->base->cache_nr) {
290 new->index = old->index;
291 if (old != istate->split_index->base->cache[new->index - 1])
292 free(istate->split_index->base->cache[new->index - 1]);
293 istate->split_index->base->cache[new->index - 1] = new;