2 #include "cache-tree.h"
9 const char *tree_type
= "tree";
11 static int read_one_entry_opt(const unsigned char *sha1
, const char *base
, int baselen
, const char *pathname
, unsigned mode
, int stage
, int opt
)
15 struct cache_entry
*ce
;
18 return READ_TREE_RECURSIVE
;
20 len
= strlen(pathname
);
21 size
= cache_entry_size(baselen
+ len
);
22 ce
= xcalloc(1, size
);
24 ce
->ce_mode
= create_ce_mode(mode
);
25 ce
->ce_flags
= create_ce_flags(baselen
+ len
, stage
);
26 memcpy(ce
->name
, base
, baselen
);
27 memcpy(ce
->name
+ baselen
, pathname
, len
+1);
28 hashcpy(ce
->sha1
, sha1
);
29 return add_cache_entry(ce
, opt
);
32 static int read_one_entry(const unsigned char *sha1
, const char *base
, int baselen
, const char *pathname
, unsigned mode
, int stage
, void *context
)
34 return read_one_entry_opt(sha1
, base
, baselen
, pathname
, mode
, stage
,
35 ADD_CACHE_OK_TO_ADD
|ADD_CACHE_SKIP_DFCHECK
);
39 * This is used when the caller knows there is no existing entries at
40 * the stage that will conflict with the entry being added.
42 static int read_one_entry_quick(const unsigned char *sha1
, const char *base
, int baselen
, const char *pathname
, unsigned mode
, int stage
, void *context
)
44 return read_one_entry_opt(sha1
, base
, baselen
, pathname
, mode
, stage
,
45 ADD_CACHE_JUST_APPEND
);
48 static int match_tree_entry(const char *base
, int baselen
, const char *path
, unsigned int mode
, const char **paths
)
55 pathlen
= strlen(path
);
56 while ((match
= *paths
++) != NULL
) {
57 int matchlen
= strlen(match
);
59 if (baselen
>= matchlen
) {
60 /* If it doesn't match, move along... */
61 if (strncmp(base
, match
, matchlen
))
63 /* pathspecs match only at the directory boundaries */
65 base
[matchlen
] == '/' ||
66 match
[matchlen
- 1] == '/')
71 /* Does the base match? */
72 if (strncmp(base
, match
, baselen
))
78 if (pathlen
> matchlen
)
81 if (matchlen
> pathlen
) {
82 if (match
[pathlen
] != '/')
88 if (strncmp(path
, match
, pathlen
))
96 int read_tree_recursive(struct tree
*tree
,
97 const char *base
, int baselen
,
98 int stage
, const char **match
,
99 read_tree_fn_t fn
, void *context
)
101 struct tree_desc desc
;
102 struct name_entry entry
;
104 if (parse_tree(tree
))
107 init_tree_desc(&desc
, tree
->buffer
, tree
->size
);
109 while (tree_entry(&desc
, &entry
)) {
110 if (!match_tree_entry(base
, baselen
, entry
.path
, entry
.mode
, match
))
113 switch (fn(entry
.sha1
, base
, baselen
, entry
.path
, entry
.mode
, stage
, context
)) {
116 case READ_TREE_RECURSIVE
:
121 if (S_ISDIR(entry
.mode
)) {
124 unsigned int pathlen
= tree_entry_len(entry
.path
, entry
.sha1
);
126 newbase
= xmalloc(baselen
+ 1 + pathlen
);
127 memcpy(newbase
, base
, baselen
);
128 memcpy(newbase
+ baselen
, entry
.path
, pathlen
);
129 newbase
[baselen
+ pathlen
] = '/';
130 retval
= read_tree_recursive(lookup_tree(entry
.sha1
),
132 baselen
+ pathlen
+ 1,
133 stage
, match
, fn
, context
);
143 static int cmp_cache_name_compare(const void *a_
, const void *b_
)
145 const struct cache_entry
*ce1
, *ce2
;
147 ce1
= *((const struct cache_entry
**)a_
);
148 ce2
= *((const struct cache_entry
**)b_
);
149 return cache_name_compare(ce1
->name
, ce1
->ce_flags
,
150 ce2
->name
, ce2
->ce_flags
);
153 int read_tree(struct tree
*tree
, int stage
, const char **match
)
155 read_tree_fn_t fn
= NULL
;
159 * Currently the only existing callers of this function all
160 * call it with stage=1 and after making sure there is nothing
161 * at that stage; we could always use read_one_entry_quick().
163 * But when we decide to straighten out git-read-tree not to
164 * use unpack_trees() in some cases, this will probably start
169 * See if we have cache entry at the stage. If so,
170 * do it the original slow way, otherwise, append and then
173 for (i
= 0; !fn
&& i
< active_nr
; i
++) {
174 struct cache_entry
*ce
= active_cache
[i
];
175 if (ce_stage(ce
) == stage
)
180 fn
= read_one_entry_quick
;
181 err
= read_tree_recursive(tree
, "", 0, stage
, match
, fn
, NULL
);
182 if (fn
== read_one_entry
|| err
)
186 * Sort the cache entry -- we need to nuke the cache tree, though.
188 cache_tree_free(&active_cache_tree
);
189 qsort(active_cache
, active_nr
, sizeof(active_cache
[0]),
190 cmp_cache_name_compare
);
194 struct tree
*lookup_tree(const unsigned char *sha1
)
196 struct object
*obj
= lookup_object(sha1
);
198 return create_object(sha1
, OBJ_TREE
, alloc_tree_node());
200 obj
->type
= OBJ_TREE
;
201 if (obj
->type
!= OBJ_TREE
) {
202 error("Object %s is a %s, not a tree",
203 sha1_to_hex(sha1
), typename(obj
->type
));
206 return (struct tree
*) obj
;
209 int parse_tree_buffer(struct tree
*item
, void *buffer
, unsigned long size
)
211 if (item
->object
.parsed
)
213 item
->object
.parsed
= 1;
214 item
->buffer
= buffer
;
220 int parse_tree(struct tree
*item
)
222 enum object_type type
;
226 if (item
->object
.parsed
)
228 buffer
= read_sha1_file(item
->object
.sha1
, &type
, &size
);
230 return error("Could not read %s",
231 sha1_to_hex(item
->object
.sha1
));
232 if (type
!= OBJ_TREE
) {
234 return error("Object %s not a tree",
235 sha1_to_hex(item
->object
.sha1
));
237 return parse_tree_buffer(item
, buffer
, size
);
240 struct tree
*parse_tree_indirect(const unsigned char *sha1
)
242 struct object
*obj
= parse_object(sha1
);
246 if (obj
->type
== OBJ_TREE
)
247 return (struct tree
*) obj
;
248 else if (obj
->type
== OBJ_COMMIT
)
249 obj
= &(((struct commit
*) obj
)->tree
->object
);
250 else if (obj
->type
== OBJ_TAG
)
251 obj
= ((struct tag
*) obj
)->tagged
;
255 parse_object(obj
->sha1
);