Add Coccinelle patch for replacing NULL/non-NULL tt_assert().
[tor.git] / src / or / parsecommon.c
blob6b5359303ad8f12ad4a5055d63497137dcf0f7a1
1 /* Copyright (c) 2016-2017, The Tor Project, Inc. */
2 /* See LICENSE for licensing information */
4 /**
5 * \file parsecommon.c
6 * \brief Common code to parse and validate various type of descriptors.
7 **/
9 #include "parsecommon.h"
10 #include "torlog.h"
11 #include "util_format.h"
13 #define MIN_ANNOTATION A_PURPOSE
14 #define MAX_ANNOTATION A_UNKNOWN_
16 #define ALLOC_ZERO(sz) memarea_alloc_zero(area,sz)
17 #define ALLOC(sz) memarea_alloc(area,sz)
18 #define STRDUP(str) memarea_strdup(area,str)
19 #define STRNDUP(str,n) memarea_strndup(area,(str),(n))
21 #define RET_ERR(msg) \
22 STMT_BEGIN \
23 if (tok) token_clear(tok); \
24 tok = ALLOC_ZERO(sizeof(directory_token_t)); \
25 tok->tp = ERR_; \
26 tok->error = STRDUP(msg); \
27 goto done_tokenizing; \
28 STMT_END
30 /** Free all resources allocated for <b>tok</b> */
31 void
32 token_clear(directory_token_t *tok)
34 if (tok->key)
35 crypto_pk_free(tok->key);
38 /** Read all tokens from a string between <b>start</b> and <b>end</b>, and add
39 * them to <b>out</b>. Parse according to the token rules in <b>table</b>.
40 * Caller must free tokens in <b>out</b>. If <b>end</b> is NULL, use the
41 * entire string.
43 int
44 tokenize_string(memarea_t *area,
45 const char *start, const char *end, smartlist_t *out,
46 token_rule_t *table, int flags)
48 const char **s;
49 directory_token_t *tok = NULL;
50 int counts[NIL_];
51 int i;
52 int first_nonannotation;
53 int prev_len = smartlist_len(out);
54 tor_assert(area);
56 s = &start;
57 if (!end) {
58 end = start+strlen(start);
59 } else {
60 /* it's only meaningful to check for nuls if we got an end-of-string ptr */
61 if (memchr(start, '\0', end-start)) {
62 log_warn(LD_DIR, "parse error: internal NUL character.");
63 return -1;
66 for (i = 0; i < NIL_; ++i)
67 counts[i] = 0;
69 SMARTLIST_FOREACH(out, const directory_token_t *, t, ++counts[t->tp]);
71 while (*s < end && (!tok || tok->tp != EOF_)) {
72 tok = get_next_token(area, s, end, table);
73 if (tok->tp == ERR_) {
74 log_warn(LD_DIR, "parse error: %s", tok->error);
75 token_clear(tok);
76 return -1;
78 ++counts[tok->tp];
79 smartlist_add(out, tok);
80 *s = eat_whitespace_eos(*s, end);
83 if (flags & TS_NOCHECK)
84 return 0;
86 if ((flags & TS_ANNOTATIONS_OK)) {
87 first_nonannotation = -1;
88 for (i = 0; i < smartlist_len(out); ++i) {
89 tok = smartlist_get(out, i);
90 if (tok->tp < MIN_ANNOTATION || tok->tp > MAX_ANNOTATION) {
91 first_nonannotation = i;
92 break;
95 if (first_nonannotation < 0) {
96 log_warn(LD_DIR, "parse error: item contains only annotations");
97 return -1;
99 for (i=first_nonannotation; i < smartlist_len(out); ++i) {
100 tok = smartlist_get(out, i);
101 if (tok->tp >= MIN_ANNOTATION && tok->tp <= MAX_ANNOTATION) {
102 log_warn(LD_DIR, "parse error: Annotations mixed with keywords");
103 return -1;
106 if ((flags & TS_NO_NEW_ANNOTATIONS)) {
107 if (first_nonannotation != prev_len) {
108 log_warn(LD_DIR, "parse error: Unexpected annotations.");
109 return -1;
112 } else {
113 for (i=0; i < smartlist_len(out); ++i) {
114 tok = smartlist_get(out, i);
115 if (tok->tp >= MIN_ANNOTATION && tok->tp <= MAX_ANNOTATION) {
116 log_warn(LD_DIR, "parse error: no annotations allowed.");
117 return -1;
120 first_nonannotation = 0;
122 for (i = 0; table[i].t; ++i) {
123 if (counts[table[i].v] < table[i].min_cnt) {
124 log_warn(LD_DIR, "Parse error: missing %s element.", table[i].t);
125 return -1;
127 if (counts[table[i].v] > table[i].max_cnt) {
128 log_warn(LD_DIR, "Parse error: too many %s elements.", table[i].t);
129 return -1;
131 if (table[i].pos & AT_START) {
132 if (smartlist_len(out) < 1 ||
133 (tok = smartlist_get(out, first_nonannotation))->tp != table[i].v) {
134 log_warn(LD_DIR, "Parse error: first item is not %s.", table[i].t);
135 return -1;
138 if (table[i].pos & AT_END) {
139 if (smartlist_len(out) < 1 ||
140 (tok = smartlist_get(out, smartlist_len(out)-1))->tp != table[i].v) {
141 log_warn(LD_DIR, "Parse error: last item is not %s.", table[i].t);
142 return -1;
146 return 0;
149 /** Helper: parse space-separated arguments from the string <b>s</b> ending at
150 * <b>eol</b>, and store them in the args field of <b>tok</b>. Store the
151 * number of parsed elements into the n_args field of <b>tok</b>. Allocate
152 * all storage in <b>area</b>. Return the number of arguments parsed, or
153 * return -1 if there was an insanely high number of arguments. */
154 static inline int
155 get_token_arguments(memarea_t *area, directory_token_t *tok,
156 const char *s, const char *eol)
158 /** Largest number of arguments we'll accept to any token, ever. */
159 #define MAX_ARGS 512
160 char *mem = memarea_strndup(area, s, eol-s);
161 char *cp = mem;
162 int j = 0;
163 char *args[MAX_ARGS];
164 while (*cp) {
165 if (j == MAX_ARGS)
166 return -1;
167 args[j++] = cp;
168 cp = (char*)find_whitespace(cp);
169 if (!cp || !*cp)
170 break; /* End of the line. */
171 *cp++ = '\0';
172 cp = (char*)eat_whitespace(cp);
174 tok->n_args = j;
175 tok->args = memarea_memdup(area, args, j*sizeof(char*));
176 return j;
177 #undef MAX_ARGS
180 /** Helper: make sure that the token <b>tok</b> with keyword <b>kwd</b> obeys
181 * the object syntax of <b>o_syn</b>. Allocate all storage in <b>area</b>.
182 * Return <b>tok</b> on success, or a new ERR_ token if the token didn't
183 * conform to the syntax we wanted.
185 static inline directory_token_t *
186 token_check_object(memarea_t *area, const char *kwd,
187 directory_token_t *tok, obj_syntax o_syn)
189 char ebuf[128];
190 switch (o_syn) {
191 case NO_OBJ:
192 /* No object is allowed for this token. */
193 if (tok->object_body) {
194 tor_snprintf(ebuf, sizeof(ebuf), "Unexpected object for %s", kwd);
195 RET_ERR(ebuf);
197 if (tok->key) {
198 tor_snprintf(ebuf, sizeof(ebuf), "Unexpected public key for %s", kwd);
199 RET_ERR(ebuf);
201 break;
202 case NEED_OBJ:
203 /* There must be a (non-key) object. */
204 if (!tok->object_body) {
205 tor_snprintf(ebuf, sizeof(ebuf), "Missing object for %s", kwd);
206 RET_ERR(ebuf);
208 break;
209 case NEED_KEY_1024: /* There must be a 1024-bit public key. */
210 case NEED_SKEY_1024: /* There must be a 1024-bit private key. */
211 if (tok->key && crypto_pk_num_bits(tok->key) != PK_BYTES*8) {
212 tor_snprintf(ebuf, sizeof(ebuf), "Wrong size on key for %s: %d bits",
213 kwd, crypto_pk_num_bits(tok->key));
214 RET_ERR(ebuf);
216 /* fall through */
217 case NEED_KEY: /* There must be some kind of key. */
218 if (!tok->key) {
219 tor_snprintf(ebuf, sizeof(ebuf), "Missing public key for %s", kwd);
220 RET_ERR(ebuf);
222 if (o_syn != NEED_SKEY_1024) {
223 if (crypto_pk_key_is_private(tok->key)) {
224 tor_snprintf(ebuf, sizeof(ebuf),
225 "Private key given for %s, which wants a public key", kwd);
226 RET_ERR(ebuf);
228 } else { /* o_syn == NEED_SKEY_1024 */
229 if (!crypto_pk_key_is_private(tok->key)) {
230 tor_snprintf(ebuf, sizeof(ebuf),
231 "Public key given for %s, which wants a private key", kwd);
232 RET_ERR(ebuf);
235 break;
236 case OBJ_OK:
237 /* Anything goes with this token. */
238 break;
241 done_tokenizing:
242 return tok;
245 /** Helper function: read the next token from *s, advance *s to the end of the
246 * token, and return the parsed token. Parse *<b>s</b> according to the list
247 * of tokens in <b>table</b>.
249 directory_token_t *
250 get_next_token(memarea_t *area,
251 const char **s, const char *eos, token_rule_t *table)
253 /** Reject any object at least this big; it is probably an overflow, an
254 * attack, a bug, or some other nonsense. */
255 #define MAX_UNPARSED_OBJECT_SIZE (128*1024)
256 /** Reject any line at least this big; it is probably an overflow, an
257 * attack, a bug, or some other nonsense. */
258 #define MAX_LINE_LENGTH (128*1024)
260 const char *next, *eol, *obstart;
261 size_t obname_len;
262 int i;
263 directory_token_t *tok;
264 obj_syntax o_syn = NO_OBJ;
265 char ebuf[128];
266 const char *kwd = "";
268 tor_assert(area);
269 tok = ALLOC_ZERO(sizeof(directory_token_t));
270 tok->tp = ERR_;
272 /* Set *s to first token, eol to end-of-line, next to after first token */
273 *s = eat_whitespace_eos(*s, eos); /* eat multi-line whitespace */
274 tor_assert(eos >= *s);
275 eol = memchr(*s, '\n', eos-*s);
276 if (!eol)
277 eol = eos;
278 if (eol - *s > MAX_LINE_LENGTH) {
279 RET_ERR("Line far too long");
282 next = find_whitespace_eos(*s, eol);
284 if (!strcmp_len(*s, "opt", next-*s)) {
285 /* Skip past an "opt" at the start of the line. */
286 *s = eat_whitespace_eos_no_nl(next, eol);
287 next = find_whitespace_eos(*s, eol);
288 } else if (*s == eos) { /* If no "opt", and end-of-line, line is invalid */
289 RET_ERR("Unexpected EOF");
292 /* Search the table for the appropriate entry. (I tried a binary search
293 * instead, but it wasn't any faster.) */
294 for (i = 0; table[i].t ; ++i) {
295 if (!strcmp_len(*s, table[i].t, next-*s)) {
296 /* We've found the keyword. */
297 kwd = table[i].t;
298 tok->tp = table[i].v;
299 o_syn = table[i].os;
300 *s = eat_whitespace_eos_no_nl(next, eol);
301 /* We go ahead whether there are arguments or not, so that tok->args is
302 * always set if we want arguments. */
303 if (table[i].concat_args) {
304 /* The keyword takes the line as a single argument */
305 tok->args = ALLOC(sizeof(char*));
306 tok->args[0] = STRNDUP(*s,eol-*s); /* Grab everything on line */
307 tok->n_args = 1;
308 } else {
309 /* This keyword takes multiple arguments. */
310 if (get_token_arguments(area, tok, *s, eol)<0) {
311 tor_snprintf(ebuf, sizeof(ebuf),"Far too many arguments to %s", kwd);
312 RET_ERR(ebuf);
314 *s = eol;
316 if (tok->n_args < table[i].min_args) {
317 tor_snprintf(ebuf, sizeof(ebuf), "Too few arguments to %s", kwd);
318 RET_ERR(ebuf);
319 } else if (tok->n_args > table[i].max_args) {
320 tor_snprintf(ebuf, sizeof(ebuf), "Too many arguments to %s", kwd);
321 RET_ERR(ebuf);
323 break;
327 if (tok->tp == ERR_) {
328 /* No keyword matched; call it an "K_opt" or "A_unrecognized" */
329 if (*s < eol && **s == '@')
330 tok->tp = A_UNKNOWN_;
331 else
332 tok->tp = K_OPT;
333 tok->args = ALLOC(sizeof(char*));
334 tok->args[0] = STRNDUP(*s, eol-*s);
335 tok->n_args = 1;
336 o_syn = OBJ_OK;
339 /* Check whether there's an object present */
340 *s = eat_whitespace_eos(eol, eos); /* Scan from end of first line */
341 tor_assert(eos >= *s);
342 eol = memchr(*s, '\n', eos-*s);
343 if (!eol || eol-*s<11 || strcmpstart(*s, "-----BEGIN ")) /* No object. */
344 goto check_object;
346 obstart = *s; /* Set obstart to start of object spec */
347 if (*s+16 >= eol || memchr(*s+11,'\0',eol-*s-16) || /* no short lines, */
348 strcmp_len(eol-5, "-----", 5) || /* nuls or invalid endings */
349 (eol-*s) > MAX_UNPARSED_OBJECT_SIZE) { /* name too long */
350 RET_ERR("Malformed object: bad begin line");
352 tok->object_type = STRNDUP(*s+11, eol-*s-16);
353 obname_len = eol-*s-16; /* store objname length here to avoid a strlen() */
354 *s = eol+1; /* Set *s to possible start of object data (could be eos) */
356 /* Go to the end of the object */
357 next = tor_memstr(*s, eos-*s, "-----END ");
358 if (!next) {
359 RET_ERR("Malformed object: missing object end line");
361 tor_assert(eos >= next);
362 eol = memchr(next, '\n', eos-next);
363 if (!eol) /* end-of-line marker, or eos if there's no '\n' */
364 eol = eos;
365 /* Validate the ending tag, which should be 9 + NAME + 5 + eol */
366 if ((size_t)(eol-next) != 9+obname_len+5 ||
367 strcmp_len(next+9, tok->object_type, obname_len) ||
368 strcmp_len(eol-5, "-----", 5)) {
369 tor_snprintf(ebuf, sizeof(ebuf), "Malformed object: mismatched end tag %s",
370 tok->object_type);
371 ebuf[sizeof(ebuf)-1] = '\0';
372 RET_ERR(ebuf);
374 if (next - *s > MAX_UNPARSED_OBJECT_SIZE)
375 RET_ERR("Couldn't parse object: missing footer or object much too big.");
377 if (!strcmp(tok->object_type, "RSA PUBLIC KEY")) { /* If it's a public key */
378 tok->key = crypto_pk_new();
379 if (crypto_pk_read_public_key_from_string(tok->key, obstart, eol-obstart))
380 RET_ERR("Couldn't parse public key.");
381 } else if (!strcmp(tok->object_type, "RSA PRIVATE KEY")) { /* private key */
382 tok->key = crypto_pk_new();
383 if (crypto_pk_read_private_key_from_string(tok->key, obstart, eol-obstart))
384 RET_ERR("Couldn't parse private key.");
385 } else { /* If it's something else, try to base64-decode it */
386 int r;
387 tok->object_body = ALLOC(next-*s); /* really, this is too much RAM. */
388 r = base64_decode(tok->object_body, next-*s, *s, next-*s);
389 if (r<0)
390 RET_ERR("Malformed object: bad base64-encoded data");
391 tok->object_size = r;
393 *s = eol;
395 check_object:
396 tok = token_check_object(area, kwd, tok, o_syn);
398 done_tokenizing:
399 return tok;
401 #undef RET_ERR
402 #undef ALLOC
403 #undef ALLOC_ZERO
404 #undef STRDUP
405 #undef STRNDUP
408 /** Find the first token in <b>s</b> whose keyword is <b>keyword</b>; fail
409 * with an assert if no such keyword is found.
411 directory_token_t *
412 find_by_keyword_(smartlist_t *s, directory_keyword keyword,
413 const char *keyword_as_string)
415 directory_token_t *tok = find_opt_by_keyword(s, keyword);
416 if (PREDICT_UNLIKELY(!tok)) {
417 log_err(LD_BUG, "Missing %s [%d] in directory object that should have "
418 "been validated. Internal error.", keyword_as_string, (int)keyword);
419 tor_assert(tok);
421 return tok;
424 /** Find the first token in <b>s</b> whose keyword is <b>keyword</b>; return
425 * NULL if no such keyword is found.
427 directory_token_t *
428 find_opt_by_keyword(smartlist_t *s, directory_keyword keyword)
430 SMARTLIST_FOREACH(s, directory_token_t *, t, if (t->tp == keyword) return t);
431 return NULL;
434 /** If there are any directory_token_t entries in <b>s</b> whose keyword is
435 * <b>k</b>, return a newly allocated smartlist_t containing all such entries,
436 * in the same order in which they occur in <b>s</b>. Otherwise return
437 * NULL. */
438 smartlist_t *
439 find_all_by_keyword(const smartlist_t *s, directory_keyword k)
441 smartlist_t *out = NULL;
442 SMARTLIST_FOREACH(s, directory_token_t *, t,
443 if (t->tp == k) {
444 if (!out)
445 out = smartlist_new();
446 smartlist_add(out, t);
448 return out;