Teach fsck-cache to accept non-commits for reachability analysis.
[git/fastimport.git] / read-cache.c
blob53f1da815bc2c23b25894663fa2cac17b08e64b2
1 /*
2 * GIT - The information manager from hell
4 * Copyright (C) Linus Torvalds, 2005
5 */
6 #include <stdarg.h>
7 #include "cache.h"
9 struct cache_entry **active_cache = NULL;
10 unsigned int active_nr = 0, active_alloc = 0;
12 int cache_match_stat(struct cache_entry *ce, struct stat *st)
14 unsigned int changed = 0;
16 if (ce->ce_mtime.sec != htonl(st->st_mtime))
17 changed |= MTIME_CHANGED;
18 if (ce->ce_ctime.sec != htonl(st->st_ctime))
19 changed |= CTIME_CHANGED;
21 #ifdef NSEC
23 * nsec seems unreliable - not all filesystems support it, so
24 * as long as it is in the inode cache you get right nsec
25 * but after it gets flushed, you get zero nsec.
27 if (ce->ce_mtime.nsec != htonl(st->st_mtim.tv_nsec))
28 changed |= MTIME_CHANGED;
29 if (ce->ce_ctime.nsec != htonl(st->st_ctim.tv_nsec))
30 changed |= CTIME_CHANGED;
31 #endif
33 if (ce->ce_uid != htonl(st->st_uid) ||
34 ce->ce_gid != htonl(st->st_gid))
35 changed |= OWNER_CHANGED;
36 /* We consider only the owner x bit to be relevant for "mode changes" */
37 if (0100 & (ntohl(ce->ce_mode) ^ st->st_mode))
38 changed |= MODE_CHANGED;
39 if (ce->ce_dev != htonl(st->st_dev) ||
40 ce->ce_ino != htonl(st->st_ino))
41 changed |= INODE_CHANGED;
42 if (ce->ce_size != htonl(st->st_size))
43 changed |= DATA_CHANGED;
44 return changed;
47 int cache_name_compare(const char *name1, int flags1, const char *name2, int flags2)
49 int len1 = flags1 & CE_NAMEMASK;
50 int len2 = flags2 & CE_NAMEMASK;
51 int len = len1 < len2 ? len1 : len2;
52 int cmp;
54 cmp = memcmp(name1, name2, len);
55 if (cmp)
56 return cmp;
57 if (len1 < len2)
58 return -1;
59 if (len1 > len2)
60 return 1;
61 if (flags1 < flags2)
62 return -1;
63 if (flags1 > flags2)
64 return 1;
65 return 0;
68 int cache_name_pos(const char *name, int namelen)
70 int first, last;
72 first = 0;
73 last = active_nr;
74 while (last > first) {
75 int next = (last + first) >> 1;
76 struct cache_entry *ce = active_cache[next];
77 int cmp = cache_name_compare(name, namelen, ce->name, htons(ce->ce_flags));
78 if (!cmp)
79 return next;
80 if (cmp < 0) {
81 last = next;
82 continue;
84 first = next+1;
86 return -first-1;
89 /* Remove entry, return true if there are more entries to go.. */
90 int remove_entry_at(int pos)
92 active_nr--;
93 if (pos >= active_nr)
94 return 0;
95 memmove(active_cache + pos, active_cache + pos + 1, (active_nr - pos) * sizeof(struct cache_entry *));
96 return 1;
99 int remove_file_from_cache(char *path)
101 int pos = cache_name_pos(path, strlen(path));
102 if (pos < 0)
103 pos = -pos-1;
104 while (pos < active_nr && !strcmp(active_cache[pos]->name, path))
105 remove_entry_at(pos);
106 return 0;
109 int same_name(struct cache_entry *a, struct cache_entry *b)
111 int len = ce_namelen(a);
112 return ce_namelen(b) == len && !memcmp(a->name, b->name, len);
115 int add_cache_entry(struct cache_entry *ce, int ok_to_add)
117 int pos;
119 pos = cache_name_pos(ce->name, htons(ce->ce_flags));
121 /* existing match? Just replace it */
122 if (pos >= 0) {
123 active_cache[pos] = ce;
124 return 0;
126 pos = -pos-1;
129 * Inserting a merged entry ("stage 0") into the index
130 * will always replace all non-merged entries..
132 if (pos < active_nr && ce_stage(ce) == 0) {
133 while (same_name(active_cache[pos], ce)) {
134 ok_to_add = 1;
135 if (!remove_entry_at(pos))
136 break;
140 if (!ok_to_add)
141 return -1;
143 /* Make sure the array is big enough .. */
144 if (active_nr == active_alloc) {
145 active_alloc = alloc_nr(active_alloc);
146 active_cache = xrealloc(active_cache, active_alloc * sizeof(struct cache_entry *));
149 /* Add it in.. */
150 active_nr++;
151 if (active_nr > pos)
152 memmove(active_cache + pos + 1, active_cache + pos, (active_nr - pos - 1) * sizeof(ce));
153 active_cache[pos] = ce;
154 return 0;
157 static int verify_hdr(struct cache_header *hdr, unsigned long size)
159 SHA_CTX c;
160 unsigned char sha1[20];
162 if (hdr->hdr_signature != htonl(CACHE_SIGNATURE))
163 return error("bad signature");
164 if (hdr->hdr_version != htonl(2))
165 return error("bad index version");
166 SHA1_Init(&c);
167 SHA1_Update(&c, hdr, size - 20);
168 SHA1_Final(sha1, &c);
169 if (memcmp(sha1, (void *)hdr + size - 20, 20))
170 return error("bad index file sha1 signature");
171 return 0;
174 int read_cache(void)
176 int fd, i;
177 struct stat st;
178 unsigned long size, offset;
179 void *map;
180 struct cache_header *hdr;
182 errno = EBUSY;
183 if (active_cache)
184 return error("more than one cachefile");
185 errno = ENOENT;
186 sha1_file_directory = getenv(DB_ENVIRONMENT);
187 if (!sha1_file_directory)
188 sha1_file_directory = DEFAULT_DB_ENVIRONMENT;
189 if (access(sha1_file_directory, X_OK) < 0)
190 return error("no access to SHA1 file directory");
191 fd = open(get_index_file(), O_RDONLY);
192 if (fd < 0)
193 return (errno == ENOENT) ? 0 : error("open failed");
195 size = 0; // avoid gcc warning
196 map = (void *)-1;
197 if (!fstat(fd, &st)) {
198 size = st.st_size;
199 errno = EINVAL;
200 if (size >= sizeof(struct cache_header) + 20)
201 map = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
203 close(fd);
204 if (-1 == (int)(long)map)
205 return error("mmap failed");
207 hdr = map;
208 if (verify_hdr(hdr, size) < 0)
209 goto unmap;
211 active_nr = ntohl(hdr->hdr_entries);
212 active_alloc = alloc_nr(active_nr);
213 active_cache = calloc(active_alloc, sizeof(struct cache_entry *));
215 offset = sizeof(*hdr);
216 for (i = 0; i < active_nr; i++) {
217 struct cache_entry *ce = map + offset;
218 offset = offset + ce_size(ce);
219 active_cache[i] = ce;
221 return active_nr;
223 unmap:
224 munmap(map, size);
225 errno = EINVAL;
226 return error("verify header failed");
229 #define WRITE_BUFFER_SIZE 8192
230 static char write_buffer[WRITE_BUFFER_SIZE];
231 static unsigned long write_buffer_len;
233 static int ce_write(SHA_CTX *context, int fd, void *data, unsigned int len)
235 while (len) {
236 unsigned int buffered = write_buffer_len;
237 unsigned int partial = WRITE_BUFFER_SIZE - buffered;
238 if (partial > len)
239 partial = len;
240 memcpy(write_buffer + buffered, data, partial);
241 buffered += partial;
242 if (buffered == WRITE_BUFFER_SIZE) {
243 SHA1_Update(context, write_buffer, WRITE_BUFFER_SIZE);
244 if (write(fd, write_buffer, WRITE_BUFFER_SIZE) != WRITE_BUFFER_SIZE)
245 return -1;
246 buffered = 0;
248 write_buffer_len = buffered;
249 len -= partial;
250 data += partial;
252 return 0;
255 static int ce_flush(SHA_CTX *context, int fd)
257 unsigned int left = write_buffer_len;
259 if (left) {
260 write_buffer_len = 0;
261 SHA1_Update(context, write_buffer, left);
264 /* Append the SHA1 signature at the end */
265 SHA1_Final(write_buffer + left, context);
266 left += 20;
267 if (write(fd, write_buffer, left) != left)
268 return -1;
269 return 0;
272 int write_cache(int newfd, struct cache_entry **cache, int entries)
274 SHA_CTX c;
275 struct cache_header hdr;
276 int i;
278 hdr.hdr_signature = htonl(CACHE_SIGNATURE);
279 hdr.hdr_version = htonl(2);
280 hdr.hdr_entries = htonl(entries);
282 SHA1_Init(&c);
283 if (ce_write(&c, newfd, &hdr, sizeof(hdr)) < 0)
284 return -1;
286 for (i = 0; i < entries; i++) {
287 struct cache_entry *ce = cache[i];
288 if (ce_write(&c, newfd, ce, ce_size(ce)) < 0)
289 return -1;
291 return ce_flush(&c, newfd);