1 #define NO_THE_INDEX_COMPATIBILITY_MACROS
3 #include "cache-tree.h"
5 #include "object-store.h"
10 #include "tree-walk.h"
11 #include "repository.h"
13 const char *tree_type
= "tree";
15 static int read_one_entry_opt(struct index_state
*istate
,
16 const struct object_id
*oid
,
17 const char *base
, int baselen
,
19 unsigned mode
, int stage
, int opt
)
22 struct cache_entry
*ce
;
25 return READ_TREE_RECURSIVE
;
27 len
= strlen(pathname
);
28 ce
= make_empty_cache_entry(istate
, baselen
+ len
);
30 ce
->ce_mode
= create_ce_mode(mode
);
31 ce
->ce_flags
= create_ce_flags(stage
);
32 ce
->ce_namelen
= baselen
+ len
;
33 memcpy(ce
->name
, base
, baselen
);
34 memcpy(ce
->name
+ baselen
, pathname
, len
+1);
35 oidcpy(&ce
->oid
, oid
);
36 return add_index_entry(istate
, ce
, opt
);
39 static int read_one_entry(const struct object_id
*oid
, struct strbuf
*base
,
40 const char *pathname
, unsigned mode
, int stage
,
43 struct index_state
*istate
= context
;
44 return read_one_entry_opt(istate
, oid
, base
->buf
, base
->len
, pathname
,
46 ADD_CACHE_OK_TO_ADD
|ADD_CACHE_SKIP_DFCHECK
);
50 * This is used when the caller knows there is no existing entries at
51 * the stage that will conflict with the entry being added.
53 static int read_one_entry_quick(const struct object_id
*oid
, struct strbuf
*base
,
54 const char *pathname
, unsigned mode
, int stage
,
57 struct index_state
*istate
= context
;
58 return read_one_entry_opt(istate
, oid
, base
->buf
, base
->len
, pathname
,
60 ADD_CACHE_JUST_APPEND
);
63 static int read_tree_1(struct repository
*r
,
64 struct tree
*tree
, struct strbuf
*base
,
65 int stage
, const struct pathspec
*pathspec
,
66 read_tree_fn_t fn
, void *context
)
68 struct tree_desc desc
;
69 struct name_entry entry
;
71 int len
, oldlen
= base
->len
;
72 enum interesting retval
= entry_not_interesting
;
77 init_tree_desc(&desc
, tree
->buffer
, tree
->size
);
79 while (tree_entry(&desc
, &entry
)) {
80 if (retval
!= all_entries_interesting
) {
81 retval
= tree_entry_interesting(r
->index
, &entry
,
83 if (retval
== all_entries_not_interesting
)
85 if (retval
== entry_not_interesting
)
89 switch (fn(&entry
.oid
, base
,
90 entry
.path
, entry
.mode
, stage
, context
)) {
93 case READ_TREE_RECURSIVE
:
99 if (S_ISDIR(entry
.mode
))
100 oidcpy(&oid
, &entry
.oid
);
101 else if (S_ISGITLINK(entry
.mode
)) {
102 struct commit
*commit
;
104 commit
= lookup_commit(r
, &entry
.oid
);
106 die("Commit %s in submodule path %s%s not found",
107 oid_to_hex(&entry
.oid
),
108 base
->buf
, entry
.path
);
110 if (parse_commit(commit
))
111 die("Invalid commit %s in submodule path %s%s",
112 oid_to_hex(&entry
.oid
),
113 base
->buf
, entry
.path
);
115 oidcpy(&oid
, get_commit_tree_oid(commit
));
120 len
= tree_entry_len(&entry
);
121 strbuf_add(base
, entry
.path
, len
);
122 strbuf_addch(base
, '/');
123 retval
= read_tree_1(r
, lookup_tree(r
, &oid
),
124 base
, stage
, pathspec
,
126 strbuf_setlen(base
, oldlen
);
133 int read_tree_recursive(struct repository
*r
,
135 const char *base
, int baselen
,
136 int stage
, const struct pathspec
*pathspec
,
137 read_tree_fn_t fn
, void *context
)
139 struct strbuf sb
= STRBUF_INIT
;
142 strbuf_add(&sb
, base
, baselen
);
143 ret
= read_tree_1(r
, tree
, &sb
, stage
, pathspec
, fn
, context
);
148 static int cmp_cache_name_compare(const void *a_
, const void *b_
)
150 const struct cache_entry
*ce1
, *ce2
;
152 ce1
= *((const struct cache_entry
**)a_
);
153 ce2
= *((const struct cache_entry
**)b_
);
154 return cache_name_stage_compare(ce1
->name
, ce1
->ce_namelen
, ce_stage(ce1
),
155 ce2
->name
, ce2
->ce_namelen
, ce_stage(ce2
));
158 int read_tree(struct repository
*r
, struct tree
*tree
, int stage
,
159 struct pathspec
*match
, struct index_state
*istate
)
161 read_tree_fn_t fn
= NULL
;
165 * Currently the only existing callers of this function all
166 * call it with stage=1 and after making sure there is nothing
167 * at that stage; we could always use read_one_entry_quick().
169 * But when we decide to straighten out git-read-tree not to
170 * use unpack_trees() in some cases, this will probably start
175 * See if we have cache entry at the stage. If so,
176 * do it the original slow way, otherwise, append and then
179 for (i
= 0; !fn
&& i
< istate
->cache_nr
; i
++) {
180 const struct cache_entry
*ce
= istate
->cache
[i
];
181 if (ce_stage(ce
) == stage
)
186 fn
= read_one_entry_quick
;
187 err
= read_tree_recursive(r
, tree
, "", 0, stage
, match
, fn
, istate
);
188 if (fn
== read_one_entry
|| err
)
192 * Sort the cache entry -- we need to nuke the cache tree, though.
194 cache_tree_free(&istate
->cache_tree
);
195 QSORT(istate
->cache
, istate
->cache_nr
, cmp_cache_name_compare
);
199 struct tree
*lookup_tree(struct repository
*r
, const struct object_id
*oid
)
201 struct object
*obj
= lookup_object(r
, oid
->hash
);
203 return create_object(r
, oid
->hash
,
205 return object_as_type(r
, obj
, OBJ_TREE
, 0);
208 int parse_tree_buffer(struct tree
*item
, void *buffer
, unsigned long size
)
210 if (item
->object
.parsed
)
212 item
->object
.parsed
= 1;
213 item
->buffer
= buffer
;
219 int parse_tree_gently(struct tree
*item
, int quiet_on_missing
)
221 enum object_type type
;
225 if (item
->object
.parsed
)
227 buffer
= read_object_file(&item
->object
.oid
, &type
, &size
);
229 return quiet_on_missing
? -1 :
230 error("Could not read %s",
231 oid_to_hex(&item
->object
.oid
));
232 if (type
!= OBJ_TREE
) {
234 return error("Object %s not a tree",
235 oid_to_hex(&item
->object
.oid
));
237 return parse_tree_buffer(item
, buffer
, size
);
240 void free_tree_buffer(struct tree
*tree
)
242 FREE_AND_NULL(tree
->buffer
);
244 tree
->object
.parsed
= 0;
247 struct tree
*parse_tree_indirect(const struct object_id
*oid
)
249 struct object
*obj
= parse_object(the_repository
, oid
);
253 if (obj
->type
== OBJ_TREE
)
254 return (struct tree
*) obj
;
255 else if (obj
->type
== OBJ_COMMIT
)
256 obj
= &(get_commit_tree(((struct commit
*)obj
))->object
);
257 else if (obj
->type
== OBJ_TAG
)
258 obj
= ((struct tag
*) obj
)->tagged
;
262 parse_object(the_repository
, &obj
->oid
);