tree.c 6.43 KB
Newer Older
1
#define NO_THE_INDEX_COMPATIBILITY_MACROS
2
#include "cache.h"
3
#include "cache-tree.h"
4 5
#include "tree.h"
#include "blob.h"
6 7
#include "commit.h"
#include "tag.h"
8
#include "tree-walk.h"
9 10 11

const char *tree_type = "tree";

12 13 14 15 16
static int read_one_entry_opt(struct index_state *istate,
			      const unsigned char *sha1,
			      const char *base, int baselen,
			      const char *pathname,
			      unsigned mode, int stage, int opt)
17
{
18 19 20 21 22 23 24 25 26
	int len;
	unsigned int size;
	struct cache_entry *ce;

	if (S_ISDIR(mode))
		return READ_TREE_RECURSIVE;

	len = strlen(pathname);
	size = cache_entry_size(baselen + len);
27
	ce = xcalloc(1, size);
28 29

	ce->ce_mode = create_ce_mode(mode);
30 31
	ce->ce_flags = create_ce_flags(stage);
	ce->ce_namelen = baselen + len;
32 33
	memcpy(ce->name, base, baselen);
	memcpy(ce->name + baselen, pathname, len+1);
34
	hashcpy(ce->oid.hash, sha1);
35
	return add_index_entry(istate, ce, opt);
36 37
}

38 39 40
static int read_one_entry(const unsigned char *sha1, struct strbuf *base,
			  const char *pathname, unsigned mode, int stage,
			  void *context)
41
{
42 43
	struct index_state *istate = context;
	return read_one_entry_opt(istate, sha1, base->buf, base->len, pathname,
44
				  mode, stage,
45 46 47 48 49 50 51
				  ADD_CACHE_OK_TO_ADD|ADD_CACHE_SKIP_DFCHECK);
}

/*
 * This is used when the caller knows there is no existing entries at
 * the stage that will conflict with the entry being added.
 */
52 53 54
static int read_one_entry_quick(const unsigned char *sha1, struct strbuf *base,
				const char *pathname, unsigned mode, int stage,
				void *context)
55
{
56 57
	struct index_state *istate = context;
	return read_one_entry_opt(istate, sha1, base->buf, base->len, pathname,
58
				  mode, stage,
59
				  ADD_CACHE_JUST_APPEND);
60 61
}

62
static int read_tree_1(struct tree *tree, struct strbuf *base,
63
		       int stage, const struct pathspec *pathspec,
64
		       read_tree_fn_t fn, void *context)
65
{
66
	struct tree_desc desc;
67
	struct name_entry entry;
68
	struct object_id oid;
69 70
	int len, oldlen = base->len;
	enum interesting retval = entry_not_interesting;
71

72 73
	if (parse_tree(tree))
		return -1;
74

75
	init_tree_desc(&desc, tree->buffer, tree->size);
76

77
	while (tree_entry(&desc, &entry)) {
78
		if (retval != all_entries_interesting) {
79
			retval = tree_entry_interesting(&entry, base, 0, pathspec);
80
			if (retval == all_entries_not_interesting)
81
				break;
82
			if (retval == entry_not_interesting)
83 84
				continue;
		}
85

86
		switch (fn(entry.oid->hash, base,
87
			   entry.path, entry.mode, stage, context)) {
88 89 90
		case 0:
			continue;
		case READ_TREE_RECURSIVE:
Junio C Hamano's avatar
Junio C Hamano committed
91
			break;
92 93 94
		default:
			return -1;
		}
95

96
		if (S_ISDIR(entry.mode))
97
			oidcpy(&oid, entry.oid);
98 99
		else if (S_ISGITLINK(entry.mode)) {
			struct commit *commit;
100

101
			commit = lookup_commit(entry.oid);
102
			if (!commit)
103
				die("Commit %s in submodule path %s%s not found",
104
				    oid_to_hex(entry.oid),
105
				    base->buf, entry.path);
106 107

			if (parse_commit(commit))
108
				die("Invalid commit %s in submodule path %s%s",
109
				    oid_to_hex(entry.oid),
110 111
				    base->buf, entry.path);

112
			oidcpy(&oid, &commit->tree->object.oid);
113
		}
114 115 116
		else
			continue;

117
		len = tree_entry_len(&entry);
118 119
		strbuf_add(base, entry.path, len);
		strbuf_addch(base, '/');
120
		retval = read_tree_1(lookup_tree(&oid),
121 122 123 124 125
				     base, stage, pathspec,
				     fn, context);
		strbuf_setlen(base, oldlen);
		if (retval)
			return -1;
126 127 128 129
	}
	return 0;
}

130 131
int read_tree_recursive(struct tree *tree,
			const char *base, int baselen,
132
			int stage, const struct pathspec *pathspec,
133 134 135
			read_tree_fn_t fn, void *context)
{
	struct strbuf sb = STRBUF_INIT;
136
	int ret;
137 138

	strbuf_add(&sb, base, baselen);
139
	ret = read_tree_1(tree, &sb, stage, pathspec, fn, context);
140 141 142 143
	strbuf_release(&sb);
	return ret;
}

144 145 146 147 148 149
static int cmp_cache_name_compare(const void *a_, const void *b_)
{
	const struct cache_entry *ce1, *ce2;

	ce1 = *((const struct cache_entry **)a_);
	ce2 = *((const struct cache_entry **)b_);
150 151
	return cache_name_stage_compare(ce1->name, ce1->ce_namelen, ce_stage(ce1),
				  ce2->name, ce2->ce_namelen, ce_stage(ce2));
152 153
}

154 155
int read_tree(struct tree *tree, int stage, struct pathspec *match,
	      struct index_state *istate)
156
{
157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174
	read_tree_fn_t fn = NULL;
	int i, err;

	/*
	 * Currently the only existing callers of this function all
	 * call it with stage=1 and after making sure there is nothing
	 * at that stage; we could always use read_one_entry_quick().
	 *
	 * But when we decide to straighten out git-read-tree not to
	 * use unpack_trees() in some cases, this will probably start
	 * to matter.
	 */

	/*
	 * See if we have cache entry at the stage.  If so,
	 * do it the original slow way, otherwise, append and then
	 * sort at the end.
	 */
175 176
	for (i = 0; !fn && i < istate->cache_nr; i++) {
		const struct cache_entry *ce = istate->cache[i];
177 178 179 180 181 182
		if (ce_stage(ce) == stage)
			fn = read_one_entry;
	}

	if (!fn)
		fn = read_one_entry_quick;
183
	err = read_tree_recursive(tree, "", 0, stage, match, fn, istate);
184 185 186 187 188 189
	if (fn == read_one_entry || err)
		return err;

	/*
	 * Sort the cache entry -- we need to nuke the cache tree, though.
	 */
190 191
	cache_tree_free(&istate->cache_tree);
	QSORT(istate->cache, istate->cache_nr, cmp_cache_name_compare);
192
	return 0;
193 194
}

195
struct tree *lookup_tree(const struct object_id *oid)
196
{
197
	struct object *obj = lookup_object(oid->hash);
198
	if (!obj)
199
		return create_object(oid->hash, alloc_tree_node());
200
	return object_as_type(obj, OBJ_TREE, 0);
201 202
}

203 204
int parse_tree_buffer(struct tree *item, void *buffer, unsigned long size)
{
205 206 207
	if (item->object.parsed)
		return 0;
	item->object.parsed = 1;
208 209 210
	item->buffer = buffer;
	item->size = size;

211 212 213
	return 0;
}

214
int parse_tree_gently(struct tree *item, int quiet_on_missing)
215
{
216
	 enum object_type type;
217 218 219 220 221
	 void *buffer;
	 unsigned long size;

	if (item->object.parsed)
		return 0;
brian m. carlson's avatar
brian m. carlson committed
222
	buffer = read_sha1_file(item->object.oid.hash, &type, &size);
223
	if (!buffer)
224 225
		return quiet_on_missing ? -1 :
			error("Could not read %s",
226
			     oid_to_hex(&item->object.oid));
227
	if (type != OBJ_TREE) {
228 229
		free(buffer);
		return error("Object %s not a tree",
230
			     oid_to_hex(&item->object.oid));
231
	}
232
	return parse_tree_buffer(item, buffer, size);
233
}
234

235 236
void free_tree_buffer(struct tree *tree)
{
237
	FREE_AND_NULL(tree->buffer);
238 239 240 241
	tree->size = 0;
	tree->object.parsed = 0;
}

242
struct tree *parse_tree_indirect(const struct object_id *oid)
243
{
244
	struct object *obj = parse_object(oid);
245 246 247
	do {
		if (!obj)
			return NULL;
248
		if (obj->type == OBJ_TREE)
249
			return (struct tree *) obj;
250
		else if (obj->type == OBJ_COMMIT)
251
			obj = &(((struct commit *) obj)->tree->object);
252
		else if (obj->type == OBJ_TAG)
253 254 255 256
			obj = ((struct tag *) obj)->tagged;
		else
			return NULL;
		if (!obj->parsed)
257
			parse_object(&obj->oid);
258 259
	} while (1);
}