Per-path attribute based hunk header selection.
[git/jnareb-git.git] / builtin-read-tree.c
blob41f81102380bf0ec86e223d393d0ab61b3f854ed
1 /*
2 * GIT - The information manager from hell
4 * Copyright (C) Linus Torvalds, 2005
5 */
7 #include "cache.h"
8 #include "object.h"
9 #include "tree.h"
10 #include "tree-walk.h"
11 #include "cache-tree.h"
12 #include "unpack-trees.h"
13 #include "dir.h"
14 #include "builtin.h"
16 static struct object_list *trees;
18 static int list_tree(unsigned char *sha1)
20 struct tree *tree = parse_tree_indirect(sha1);
21 if (!tree)
22 return -1;
23 object_list_append(&tree->object, &trees);
24 return 0;
27 static int read_cache_unmerged(void)
29 int i;
30 struct cache_entry **dst;
31 struct cache_entry *last = NULL;
33 read_cache();
34 dst = active_cache;
35 for (i = 0; i < active_nr; i++) {
36 struct cache_entry *ce = active_cache[i];
37 if (ce_stage(ce)) {
38 if (last && !strcmp(ce->name, last->name))
39 continue;
40 cache_tree_invalidate_path(active_cache_tree, ce->name);
41 last = ce;
42 ce->ce_mode = 0;
43 ce->ce_flags &= ~htons(CE_STAGEMASK);
45 *dst++ = ce;
47 active_nr = dst - active_cache;
48 return !!last;
51 static void prime_cache_tree_rec(struct cache_tree *it, struct tree *tree)
53 struct tree_desc desc;
54 struct name_entry entry;
55 int cnt;
57 hashcpy(it->sha1, tree->object.sha1);
58 init_tree_desc(&desc, tree->buffer, tree->size);
59 cnt = 0;
60 while (tree_entry(&desc, &entry)) {
61 if (!S_ISDIR(entry.mode))
62 cnt++;
63 else {
64 struct cache_tree_sub *sub;
65 struct tree *subtree = lookup_tree(entry.sha1);
66 if (!subtree->object.parsed)
67 parse_tree(subtree);
68 sub = cache_tree_sub(it, entry.path);
69 sub->cache_tree = cache_tree();
70 prime_cache_tree_rec(sub->cache_tree, subtree);
71 cnt += sub->cache_tree->entry_count;
74 it->entry_count = cnt;
77 static void prime_cache_tree(void)
79 struct tree *tree = (struct tree *)trees->item;
80 if (!tree)
81 return;
82 active_cache_tree = cache_tree();
83 prime_cache_tree_rec(active_cache_tree, tree);
87 static const char read_tree_usage[] = "git-read-tree (<sha> | [[-m [--trivial] [--aggressive] | --reset | --prefix=<prefix>] [-u | -i]] [--exclude-per-directory=<gitignore>] [--index-output=<file>] <sha1> [<sha2> [<sha3>]])";
89 static struct lock_file lock_file;
91 int cmd_read_tree(int argc, const char **argv, const char *unused_prefix)
93 int i, newfd, stage = 0;
94 unsigned char sha1[20];
95 struct unpack_trees_options opts;
97 memset(&opts, 0, sizeof(opts));
98 opts.head_idx = -1;
100 setup_git_directory();
101 git_config(git_default_config);
103 newfd = hold_locked_index(&lock_file, 1);
105 git_config(git_default_config);
107 for (i = 1; i < argc; i++) {
108 const char *arg = argv[i];
110 /* "-u" means "update", meaning that a merge will update
111 * the working tree.
113 if (!strcmp(arg, "-u")) {
114 opts.update = 1;
115 continue;
118 if (!strcmp(arg, "-v")) {
119 opts.verbose_update = 1;
120 continue;
123 /* "-i" means "index only", meaning that a merge will
124 * not even look at the working tree.
126 if (!strcmp(arg, "-i")) {
127 opts.index_only = 1;
128 continue;
131 if (!prefixcmp(arg, "--index-output=")) {
132 set_alternate_index_output(arg + 15);
133 continue;
136 /* "--prefix=<subdirectory>/" means keep the current index
137 * entries and put the entries from the tree under the
138 * given subdirectory.
140 if (!prefixcmp(arg, "--prefix=")) {
141 if (stage || opts.merge || opts.prefix)
142 usage(read_tree_usage);
143 opts.prefix = arg + 9;
144 opts.merge = 1;
145 stage = 1;
146 if (read_cache_unmerged())
147 die("you need to resolve your current index first");
148 continue;
151 /* This differs from "-m" in that we'll silently ignore
152 * unmerged entries and overwrite working tree files that
153 * correspond to them.
155 if (!strcmp(arg, "--reset")) {
156 if (stage || opts.merge || opts.prefix)
157 usage(read_tree_usage);
158 opts.reset = 1;
159 opts.merge = 1;
160 stage = 1;
161 read_cache_unmerged();
162 continue;
165 if (!strcmp(arg, "--trivial")) {
166 opts.trivial_merges_only = 1;
167 continue;
170 if (!strcmp(arg, "--aggressive")) {
171 opts.aggressive = 1;
172 continue;
175 /* "-m" stands for "merge", meaning we start in stage 1 */
176 if (!strcmp(arg, "-m")) {
177 if (stage || opts.merge || opts.prefix)
178 usage(read_tree_usage);
179 if (read_cache_unmerged())
180 die("you need to resolve your current index first");
181 stage = 1;
182 opts.merge = 1;
183 continue;
186 if (!prefixcmp(arg, "--exclude-per-directory=")) {
187 struct dir_struct *dir;
189 if (opts.dir)
190 die("more than one --exclude-per-directory are given.");
192 dir = xcalloc(1, sizeof(*opts.dir));
193 dir->show_ignored = 1;
194 dir->exclude_per_dir = arg + 24;
195 opts.dir = dir;
196 /* We do not need to nor want to do read-directory
197 * here; we are merely interested in reusing the
198 * per directory ignore stack mechanism.
200 continue;
203 /* using -u and -i at the same time makes no sense */
204 if (1 < opts.index_only + opts.update)
205 usage(read_tree_usage);
207 if (get_sha1(arg, sha1))
208 die("Not a valid object name %s", arg);
209 if (list_tree(sha1) < 0)
210 die("failed to unpack tree object %s", arg);
211 stage++;
213 if ((opts.update||opts.index_only) && !opts.merge)
214 usage(read_tree_usage);
215 if ((opts.dir && !opts.update))
216 die("--exclude-per-directory is meaningless unless -u");
218 if (opts.prefix) {
219 int pfxlen = strlen(opts.prefix);
220 int pos;
221 if (opts.prefix[pfxlen-1] != '/')
222 die("prefix must end with /");
223 if (stage != 2)
224 die("binding merge takes only one tree");
225 pos = cache_name_pos(opts.prefix, pfxlen);
226 if (0 <= pos)
227 die("corrupt index file");
228 pos = -pos-1;
229 if (pos < active_nr &&
230 !strncmp(active_cache[pos]->name, opts.prefix, pfxlen))
231 die("subdirectory '%s' already exists.", opts.prefix);
232 pos = cache_name_pos(opts.prefix, pfxlen-1);
233 if (0 <= pos)
234 die("file '%.*s' already exists.",
235 pfxlen-1, opts.prefix);
236 opts.pos = -1 - pos;
239 if (opts.merge) {
240 if (stage < 2)
241 die("just how do you expect me to merge %d trees?", stage-1);
242 switch (stage - 1) {
243 case 1:
244 opts.fn = opts.prefix ? bind_merge : oneway_merge;
245 break;
246 case 2:
247 opts.fn = twoway_merge;
248 break;
249 case 3:
250 default:
251 opts.fn = threeway_merge;
252 cache_tree_free(&active_cache_tree);
253 break;
256 if (stage - 1 >= 3)
257 opts.head_idx = stage - 2;
258 else
259 opts.head_idx = 1;
262 unpack_trees(trees, &opts);
265 * When reading only one tree (either the most basic form,
266 * "-m ent" or "--reset ent" form), we can obtain a fully
267 * valid cache-tree because the index must match exactly
268 * what came from the tree.
270 if (trees && trees->item && !opts.prefix && (!opts.merge || (stage == 2))) {
271 cache_tree_free(&active_cache_tree);
272 prime_cache_tree();
275 if (write_cache(newfd, active_cache, active_nr) ||
276 close(newfd) || commit_locked_index(&lock_file))
277 die("unable to write new index file");
278 return 0;