lib: Convert [up|low]case.dat to C
[Samba.git] / buildtools / wafadmin / Tools / preproc.py
blob6c49326ec3e5945137972c5fab97226291e1118c
1 #!/usr/bin/env python
2 # encoding: utf-8
3 # Thomas Nagy, 2006-2009 (ita)
5 """
6 C/C++ preprocessor for finding dependencies
8 Reasons for using the Waf preprocessor by default
9 1. Some c/c++ extensions (Qt) require a custom preprocessor for obtaining the dependencies (.moc files)
10 2. Not all compilers provide .d files for obtaining the dependencies (portability)
11 3. A naive file scanner will not catch the constructs such as "#include foo()"
12 4. A naive file scanner will catch unnecessary dependencies (change an unused header -> recompile everything)
14 Regarding the speed concerns:
15 a. the preprocessing is performed only when files must be compiled
16 b. the macros are evaluated only for #if/#elif/#include
17 c. the time penalty is about 10%
18 d. system headers are not scanned
20 Now if you do not want the Waf preprocessor, the tool "gccdeps" uses the .d files produced
21 during the compilation to track the dependencies (useful when used with the boost libraries).
22 It only works with gcc though, and it cannot be used with Qt builds. A dumb
23 file scanner will be added in the future, so we will have most bahaviours.
24 """
25 # TODO: more varargs, pragma once
26 # TODO: dumb file scanner tracking all includes
28 import re, sys, os, string
29 import Logs, Build, Utils
30 from Logs import debug, error
31 import traceback
33 class PreprocError(Utils.WafError):
34 pass
36 POPFILE = '-'
39 recursion_limit = 5000
40 "do not loop too much on header inclusion"
42 go_absolute = 0
43 "set to 1 to track headers on files in /usr/include - else absolute paths are ignored"
45 standard_includes = ['/usr/include']
46 if sys.platform == "win32":
47 standard_includes = []
49 use_trigraphs = 0
50 'apply the trigraph rules first'
52 strict_quotes = 0
53 "Keep <> for system includes (do not search for those includes)"
55 g_optrans = {
56 'not':'!',
57 'and':'&&',
58 'bitand':'&',
59 'and_eq':'&=',
60 'or':'||',
61 'bitor':'|',
62 'or_eq':'|=',
63 'xor':'^',
64 'xor_eq':'^=',
65 'compl':'~',
67 "these ops are for c++, to reset, set an empty dict"
69 # ignore #warning and #error
70 re_lines = re.compile(\
71 '^[ \t]*(#|%:)[ \t]*(ifdef|ifndef|if|else|elif|endif|include|import|define|undef|pragma)[ \t]*(.*)\r*$',
72 re.IGNORECASE | re.MULTILINE)
74 re_mac = re.compile("^[a-zA-Z_]\w*")
75 re_fun = re.compile('^[a-zA-Z_][a-zA-Z0-9_]*[(]')
76 re_pragma_once = re.compile('^\s*once\s*', re.IGNORECASE)
77 re_nl = re.compile('\\\\\r*\n', re.MULTILINE)
78 re_cpp = re.compile(
79 r"""(/\*[^*]*\*+(?:[^/*][^*]*\*+)*/)|//[^\n]*|("(?:\\.|[^"\\])*"|'(?:\\.|[^'\\])*'|.[^/"'\\]*)""",
80 re.MULTILINE)
81 trig_def = [('??'+a, b) for a, b in zip("=-/!'()<>", r'#~\|^[]{}')]
82 chr_esc = {'0':0, 'a':7, 'b':8, 't':9, 'n':10, 'f':11, 'v':12, 'r':13, '\\':92, "'":39}
84 NUM = 'i'
85 OP = 'O'
86 IDENT = 'T'
87 STR = 's'
88 CHAR = 'c'
90 tok_types = [NUM, STR, IDENT, OP]
91 exp_types = [
92 r"""0[xX](?P<hex>[a-fA-F0-9]+)(?P<qual1>[uUlL]*)|L*?'(?P<char>(\\.|[^\\'])+)'|(?P<n1>\d+)[Ee](?P<exp0>[+-]*?\d+)(?P<float0>[fFlL]*)|(?P<n2>\d*\.\d+)([Ee](?P<exp1>[+-]*?\d+))?(?P<float1>[fFlL]*)|(?P<n4>\d+\.\d*)([Ee](?P<exp2>[+-]*?\d+))?(?P<float2>[fFlL]*)|(?P<oct>0*)(?P<n0>\d+)(?P<qual2>[uUlL]*)""",
93 r'L?"([^"\\]|\\.)*"',
94 r'[a-zA-Z_]\w*',
95 r'%:%:|<<=|>>=|\.\.\.|<<|<%|<:|<=|>>|>=|\+\+|\+=|--|->|-=|\*=|/=|%:|%=|%>|==|&&|&=|\|\||\|=|\^=|:>|!=|##|[\(\)\{\}\[\]<>\?\|\^\*\+&=:!#;,%/\-\?\~\.]',
97 re_clexer = re.compile('|'.join(["(?P<%s>%s)" % (name, part) for name, part in zip(tok_types, exp_types)]), re.M)
99 accepted = 'a'
100 ignored = 'i'
101 undefined = 'u'
102 skipped = 's'
104 def repl(m):
105 if m.group(1):
106 return ' '
107 s = m.group(2)
108 if s is None:
109 return ''
110 return s
112 def filter_comments(filename):
113 # return a list of tuples : keyword, line
114 code = Utils.readf(filename)
115 if use_trigraphs:
116 for (a, b) in trig_def: code = code.split(a).join(b)
117 code = re_nl.sub('', code)
118 code = re_cpp.sub(repl, code)
119 return [(m.group(2), m.group(3)) for m in re.finditer(re_lines, code)]
121 prec = {}
122 # op -> number, needed for such expressions: #if 1 && 2 != 0
123 ops = ['* / %', '+ -', '<< >>', '< <= >= >', '== !=', '& | ^', '&& ||', ',']
124 for x in range(len(ops)):
125 syms = ops[x]
126 for u in syms.split():
127 prec[u] = x
129 def reduce_nums(val_1, val_2, val_op):
130 """apply arithmetic rules and try to return an integer result"""
131 #print val_1, val_2, val_op
133 # now perform the operation, make certain a and b are numeric
134 try: a = 0 + val_1
135 except TypeError: a = int(val_1)
136 try: b = 0 + val_2
137 except TypeError: b = int(val_2)
139 d = val_op
140 if d == '%': c = a%b
141 elif d=='+': c = a+b
142 elif d=='-': c = a-b
143 elif d=='*': c = a*b
144 elif d=='/': c = a/b
145 elif d=='^': c = a^b
146 elif d=='|': c = a|b
147 elif d=='||': c = int(a or b)
148 elif d=='&': c = a&b
149 elif d=='&&': c = int(a and b)
150 elif d=='==': c = int(a == b)
151 elif d=='!=': c = int(a != b)
152 elif d=='<=': c = int(a <= b)
153 elif d=='<': c = int(a < b)
154 elif d=='>': c = int(a > b)
155 elif d=='>=': c = int(a >= b)
156 elif d=='^': c = int(a^b)
157 elif d=='<<': c = a<<b
158 elif d=='>>': c = a>>b
159 else: c = 0
160 return c
162 def get_num(lst):
163 if not lst: raise PreprocError("empty list for get_num")
164 (p, v) = lst[0]
165 if p == OP:
166 if v == '(':
167 count_par = 1
168 i = 1
169 while i < len(lst):
170 (p, v) = lst[i]
172 if p == OP:
173 if v == ')':
174 count_par -= 1
175 if count_par == 0:
176 break
177 elif v == '(':
178 count_par += 1
179 i += 1
180 else:
181 raise PreprocError("rparen expected %r" % lst)
183 (num, _) = get_term(lst[1:i])
184 return (num, lst[i+1:])
186 elif v == '+':
187 return get_num(lst[1:])
188 elif v == '-':
189 num, lst = get_num(lst[1:])
190 return (reduce_nums('-1', num, '*'), lst)
191 elif v == '!':
192 num, lst = get_num(lst[1:])
193 return (int(not int(num)), lst)
194 elif v == '~':
195 return (~ int(num), lst)
196 else:
197 raise PreprocError("invalid op token %r for get_num" % lst)
198 elif p == NUM:
199 return v, lst[1:]
200 elif p == IDENT:
201 # all macros should have been replaced, remaining identifiers eval to 0
202 return 0, lst[1:]
203 else:
204 raise PreprocError("invalid token %r for get_num" % lst)
206 def get_term(lst):
207 if not lst: raise PreprocError("empty list for get_term")
208 num, lst = get_num(lst)
209 if not lst:
210 return (num, [])
211 (p, v) = lst[0]
212 if p == OP:
213 if v == '&&' and not num:
214 return (num, [])
215 elif v == '||' and num:
216 return (num, [])
217 elif v == ',':
218 # skip
219 return get_term(lst[1:])
220 elif v == '?':
221 count_par = 0
222 i = 1
223 while i < len(lst):
224 (p, v) = lst[i]
226 if p == OP:
227 if v == ')':
228 count_par -= 1
229 elif v == '(':
230 count_par += 1
231 elif v == ':':
232 if count_par == 0:
233 break
234 i += 1
235 else:
236 raise PreprocError("rparen expected %r" % lst)
238 if int(num):
239 return get_term(lst[1:i])
240 else:
241 return get_term(lst[i+1:])
243 else:
244 num2, lst = get_num(lst[1:])
246 if not lst:
247 # no more tokens to process
248 num2 = reduce_nums(num, num2, v)
249 return get_term([(NUM, num2)] + lst)
251 # operator precedence
252 p2, v2 = lst[0]
253 if p2 != OP:
254 raise PreprocError("op expected %r" % lst)
256 if prec[v2] >= prec[v]:
257 num2 = reduce_nums(num, num2, v)
258 return get_term([(NUM, num2)] + lst)
259 else:
260 num3, lst = get_num(lst[1:])
261 num3 = reduce_nums(num2, num3, v2)
262 return get_term([(NUM, num), (p, v), (NUM, num3)] + lst)
265 raise PreprocError("cannot reduce %r" % lst)
267 def reduce_eval(lst):
268 """take a list of tokens and output true or false (#if/#elif conditions)"""
269 num, lst = get_term(lst)
270 return (NUM, num)
272 def stringize(lst):
273 """use for converting a list of tokens to a string"""
274 lst = [str(v2) for (p2, v2) in lst]
275 return "".join(lst)
277 def paste_tokens(t1, t2):
279 here is what we can paste:
280 a ## b -> ab
281 > ## = -> >=
282 a ## 2 -> a2
284 p1 = None
285 if t1[0] == OP and t2[0] == OP:
286 p1 = OP
287 elif t1[0] == IDENT and (t2[0] == IDENT or t2[0] == NUM):
288 p1 = IDENT
289 elif t1[0] == NUM and t2[0] == NUM:
290 p1 = NUM
291 if not p1:
292 raise PreprocError('tokens do not make a valid paste %r and %r' % (t1, t2))
293 return (p1, t1[1] + t2[1])
295 def reduce_tokens(lst, defs, ban=[]):
296 """replace the tokens in lst, using the macros provided in defs, and a list of macros that cannot be re-applied"""
297 i = 0
299 while i < len(lst):
300 (p, v) = lst[i]
302 if p == IDENT and v == "defined":
303 del lst[i]
304 if i < len(lst):
305 (p2, v2) = lst[i]
306 if p2 == IDENT:
307 if v2 in defs:
308 lst[i] = (NUM, 1)
309 else:
310 lst[i] = (NUM, 0)
311 elif p2 == OP and v2 == '(':
312 del lst[i]
313 (p2, v2) = lst[i]
314 del lst[i] # remove the ident, and change the ) for the value
315 if v2 in defs:
316 lst[i] = (NUM, 1)
317 else:
318 lst[i] = (NUM, 0)
319 else:
320 raise PreprocError("invalid define expression %r" % lst)
322 elif p == IDENT and v in defs:
324 if isinstance(defs[v], str):
325 a, b = extract_macro(defs[v])
326 defs[v] = b
327 macro_def = defs[v]
328 to_add = macro_def[1]
330 if isinstance(macro_def[0], list):
331 # macro without arguments
332 del lst[i]
333 for x in xrange(len(to_add)):
334 lst.insert(i, to_add[x])
335 i += 1
336 else:
337 # collect the arguments for the funcall
339 args = []
340 del lst[i]
342 if i >= len(lst):
343 raise PreprocError("expected '(' after %r (got nothing)" % v)
345 (p2, v2) = lst[i]
346 if p2 != OP or v2 != '(':
347 raise PreprocError("expected '(' after %r" % v)
349 del lst[i]
351 one_param = []
352 count_paren = 0
353 while i < len(lst):
354 p2, v2 = lst[i]
356 del lst[i]
357 if p2 == OP and count_paren == 0:
358 if v2 == '(':
359 one_param.append((p2, v2))
360 count_paren += 1
361 elif v2 == ')':
362 if one_param: args.append(one_param)
363 break
364 elif v2 == ',':
365 if not one_param: raise PreprocError("empty param in funcall %s" % p)
366 args.append(one_param)
367 one_param = []
368 else:
369 one_param.append((p2, v2))
370 else:
371 one_param.append((p2, v2))
372 if v2 == '(': count_paren += 1
373 elif v2 == ')': count_paren -= 1
374 else:
375 raise PreprocError('malformed macro')
377 # substitute the arguments within the define expression
378 accu = []
379 arg_table = macro_def[0]
380 j = 0
381 while j < len(to_add):
382 (p2, v2) = to_add[j]
384 if p2 == OP and v2 == '#':
385 # stringize is for arguments only
386 if j+1 < len(to_add) and to_add[j+1][0] == IDENT and to_add[j+1][1] in arg_table:
387 toks = args[arg_table[to_add[j+1][1]]]
388 accu.append((STR, stringize(toks)))
389 j += 1
390 else:
391 accu.append((p2, v2))
392 elif p2 == OP and v2 == '##':
393 # token pasting, how can man invent such a complicated system?
394 if accu and j+1 < len(to_add):
395 # we have at least two tokens
397 t1 = accu[-1]
399 if to_add[j+1][0] == IDENT and to_add[j+1][1] in arg_table:
400 toks = args[arg_table[to_add[j+1][1]]]
402 if toks:
403 accu[-1] = paste_tokens(t1, toks[0]) #(IDENT, accu[-1][1] + toks[0][1])
404 accu.extend(toks[1:])
405 else:
406 # error, case "a##"
407 accu.append((p2, v2))
408 accu.extend(toks)
409 elif to_add[j+1][0] == IDENT and to_add[j+1][1] == '__VA_ARGS__':
410 # TODO not sure
411 # first collect the tokens
412 va_toks = []
413 st = len(macro_def[0])
414 pt = len(args)
415 for x in args[pt-st+1:]:
416 va_toks.extend(x)
417 va_toks.append((OP, ','))
418 if va_toks: va_toks.pop() # extra comma
419 if len(accu)>1:
420 (p3, v3) = accu[-1]
421 (p4, v4) = accu[-2]
422 if v3 == '##':
423 # remove the token paste
424 accu.pop()
425 if v4 == ',' and pt < st:
426 # remove the comma
427 accu.pop()
428 accu += va_toks
429 else:
430 accu[-1] = paste_tokens(t1, to_add[j+1])
432 j += 1
433 else:
434 # invalid paste, case "##a" or "b##"
435 accu.append((p2, v2))
437 elif p2 == IDENT and v2 in arg_table:
438 toks = args[arg_table[v2]]
439 reduce_tokens(toks, defs, ban+[v])
440 accu.extend(toks)
441 else:
442 accu.append((p2, v2))
444 j += 1
447 reduce_tokens(accu, defs, ban+[v])
449 for x in xrange(len(accu)-1, -1, -1):
450 lst.insert(i, accu[x])
452 i += 1
455 def eval_macro(lst, adefs):
456 """reduce the tokens from the list lst, and try to return a 0/1 result"""
457 reduce_tokens(lst, adefs, [])
458 if not lst: raise PreprocError("missing tokens to evaluate")
459 (p, v) = reduce_eval(lst)
460 return int(v) != 0
462 def extract_macro(txt):
463 """process a macro definition from "#define f(x, y) x * y" into a function or a simple macro without arguments"""
464 t = tokenize(txt)
465 if re_fun.search(txt):
466 p, name = t[0]
468 p, v = t[1]
469 if p != OP: raise PreprocError("expected open parenthesis")
471 i = 1
472 pindex = 0
473 params = {}
474 prev = '('
476 while 1:
477 i += 1
478 p, v = t[i]
480 if prev == '(':
481 if p == IDENT:
482 params[v] = pindex
483 pindex += 1
484 prev = p
485 elif p == OP and v == ')':
486 break
487 else:
488 raise PreprocError("unexpected token (3)")
489 elif prev == IDENT:
490 if p == OP and v == ',':
491 prev = v
492 elif p == OP and v == ')':
493 break
494 else:
495 raise PreprocError("comma or ... expected")
496 elif prev == ',':
497 if p == IDENT:
498 params[v] = pindex
499 pindex += 1
500 prev = p
501 elif p == OP and v == '...':
502 raise PreprocError("not implemented (1)")
503 else:
504 raise PreprocError("comma or ... expected (2)")
505 elif prev == '...':
506 raise PreprocError("not implemented (2)")
507 else:
508 raise PreprocError("unexpected else")
510 #~ print (name, [params, t[i+1:]])
511 return (name, [params, t[i+1:]])
512 else:
513 (p, v) = t[0]
514 return (v, [[], t[1:]])
516 re_include = re.compile('^\s*(<(?P<a>.*)>|"(?P<b>.*)")')
517 def extract_include(txt, defs):
518 """process a line in the form "#include foo" to return a string representing the file"""
519 m = re_include.search(txt)
520 if m:
521 if m.group('a'): return '<', m.group('a')
522 if m.group('b'): return '"', m.group('b')
524 # perform preprocessing and look at the result, it must match an include
525 toks = tokenize(txt)
526 reduce_tokens(toks, defs, ['waf_include'])
528 if not toks:
529 raise PreprocError("could not parse include %s" % txt)
531 if len(toks) == 1:
532 if toks[0][0] == STR:
533 return '"', toks[0][1]
534 else:
535 if toks[0][1] == '<' and toks[-1][1] == '>':
536 return stringize(toks).lstrip('<').rstrip('>')
538 raise PreprocError("could not parse include %s." % txt)
540 def parse_char(txt):
541 if not txt: raise PreprocError("attempted to parse a null char")
542 if txt[0] != '\\':
543 return ord(txt)
544 c = txt[1]
545 if c == 'x':
546 if len(txt) == 4 and txt[3] in string.hexdigits: return int(txt[2:], 16)
547 return int(txt[2:], 16)
548 elif c.isdigit():
549 if c == '0' and len(txt)==2: return 0
550 for i in 3, 2, 1:
551 if len(txt) > i and txt[1:1+i].isdigit():
552 return (1+i, int(txt[1:1+i], 8))
553 else:
554 try: return chr_esc[c]
555 except KeyError: raise PreprocError("could not parse char literal '%s'" % txt)
557 @Utils.run_once
558 def tokenize_private(s):
559 ret = []
560 for match in re_clexer.finditer(s):
561 m = match.group
562 for name in tok_types:
563 v = m(name)
564 if v:
565 if name == IDENT:
566 try: v = g_optrans[v]; name = OP
567 except KeyError:
568 # c++ specific
569 if v.lower() == "true":
570 v = 1
571 name = NUM
572 elif v.lower() == "false":
573 v = 0
574 name = NUM
575 elif name == NUM:
576 if m('oct'): v = int(v, 8)
577 elif m('hex'): v = int(m('hex'), 16)
578 elif m('n0'): v = m('n0')
579 else:
580 v = m('char')
581 if v: v = parse_char(v)
582 else: v = m('n2') or m('n4')
583 elif name == OP:
584 if v == '%:': v = '#'
585 elif v == '%:%:': v = '##'
586 elif name == STR:
587 # remove the quotes around the string
588 v = v[1:-1]
589 ret.append((name, v))
590 break
591 return ret
593 def tokenize(s):
594 """convert a string into a list of tokens (shlex.split does not apply to c/c++/d)"""
595 return tokenize_private(s)[:]
597 @Utils.run_once
598 def define_name(line):
599 return re_mac.match(line).group(0)
601 class c_parser(object):
602 def __init__(self, nodepaths=None, defines=None):
603 #self.lines = txt.split('\n')
604 self.lines = []
606 if defines is None:
607 self.defs = {}
608 else:
609 self.defs = dict(defines) # make a copy
610 self.state = []
612 self.env = None # needed for the variant when searching for files
614 self.count_files = 0
615 self.currentnode_stack = []
617 self.nodepaths = nodepaths or []
619 self.nodes = []
620 self.names = []
622 # file added
623 self.curfile = ''
624 self.ban_includes = set([])
626 def cached_find_resource(self, node, filename):
627 try:
628 nd = node.bld.cache_nd
629 except:
630 nd = node.bld.cache_nd = {}
632 tup = (node.id, filename)
633 try:
634 return nd[tup]
635 except KeyError:
636 ret = node.find_resource(filename)
637 nd[tup] = ret
638 return ret
640 def tryfind(self, filename):
641 self.curfile = filename
643 # for msvc it should be a for loop on the whole stack
644 found = self.cached_find_resource(self.currentnode_stack[-1], filename)
646 for n in self.nodepaths:
647 if found:
648 break
649 found = self.cached_find_resource(n, filename)
651 if found:
652 self.nodes.append(found)
653 if filename[-4:] != '.moc':
654 self.addlines(found)
655 else:
656 if not filename in self.names:
657 self.names.append(filename)
658 return found
660 def addlines(self, node):
662 self.currentnode_stack.append(node.parent)
663 filepath = node.abspath(self.env)
665 self.count_files += 1
666 if self.count_files > recursion_limit: raise PreprocError("recursion limit exceeded")
667 pc = self.parse_cache
668 debug('preproc: reading file %r', filepath)
669 try:
670 lns = pc[filepath]
671 except KeyError:
672 pass
673 else:
674 self.lines.extend(lns)
675 return
677 try:
678 lines = filter_comments(filepath)
679 lines.append((POPFILE, ''))
680 lines.reverse()
681 pc[filepath] = lines # cache the lines filtered
682 self.lines.extend(lines)
683 except IOError:
684 raise PreprocError("could not read the file %s" % filepath)
685 except Exception:
686 if Logs.verbose > 0:
687 error("parsing %s failed" % filepath)
688 traceback.print_exc()
690 def start(self, node, env):
691 debug('preproc: scanning %s (in %s)', node.name, node.parent.name)
693 self.env = env
694 variant = node.variant(env)
695 bld = node.__class__.bld
696 try:
697 self.parse_cache = bld.parse_cache
698 except AttributeError:
699 bld.parse_cache = {}
700 self.parse_cache = bld.parse_cache
702 self.addlines(node)
703 if env['DEFLINES']:
704 lst = [('define', x) for x in env['DEFLINES']]
705 lst.reverse()
706 self.lines.extend(lst)
708 while self.lines:
709 (kind, line) = self.lines.pop()
710 if kind == POPFILE:
711 self.currentnode_stack.pop()
712 continue
713 try:
714 self.process_line(kind, line)
715 except Exception, e:
716 if Logs.verbose:
717 debug('preproc: line parsing failed (%s): %s %s', e, line, Utils.ex_stack())
719 def process_line(self, token, line):
721 WARNING: a new state must be added for if* because the endif
723 ve = Logs.verbose
724 if ve: debug('preproc: line is %s - %s state is %s', token, line, self.state)
725 state = self.state
727 # make certain we define the state if we are about to enter in an if block
728 if token in ['ifdef', 'ifndef', 'if']:
729 state.append(undefined)
730 elif token == 'endif':
731 state.pop()
733 # skip lines when in a dead 'if' branch, wait for the endif
734 if not token in ['else', 'elif', 'endif']:
735 if skipped in self.state or ignored in self.state:
736 return
738 if token == 'if':
739 ret = eval_macro(tokenize(line), self.defs)
740 if ret: state[-1] = accepted
741 else: state[-1] = ignored
742 elif token == 'ifdef':
743 m = re_mac.match(line)
744 if m and m.group(0) in self.defs: state[-1] = accepted
745 else: state[-1] = ignored
746 elif token == 'ifndef':
747 m = re_mac.match(line)
748 if m and m.group(0) in self.defs: state[-1] = ignored
749 else: state[-1] = accepted
750 elif token == 'include' or token == 'import':
751 (kind, inc) = extract_include(line, self.defs)
752 if inc in self.ban_includes: return
753 if token == 'import': self.ban_includes.add(inc)
754 if ve: debug('preproc: include found %s (%s) ', inc, kind)
755 if kind == '"' or not strict_quotes:
756 self.tryfind(inc)
757 elif token == 'elif':
758 if state[-1] == accepted:
759 state[-1] = skipped
760 elif state[-1] == ignored:
761 if eval_macro(tokenize(line), self.defs):
762 state[-1] = accepted
763 elif token == 'else':
764 if state[-1] == accepted: state[-1] = skipped
765 elif state[-1] == ignored: state[-1] = accepted
766 elif token == 'define':
767 try:
768 self.defs[define_name(line)] = line
769 except:
770 raise PreprocError("invalid define line %s" % line)
771 elif token == 'undef':
772 m = re_mac.match(line)
773 if m and m.group(0) in self.defs:
774 self.defs.__delitem__(m.group(0))
775 #print "undef %s" % name
776 elif token == 'pragma':
777 if re_pragma_once.match(line.lower()):
778 self.ban_includes.add(self.curfile)
780 def get_deps(node, env, nodepaths=[]):
782 Get the dependencies using a c/c++ preprocessor, this is required for finding dependencies of the kind
783 #include some_macro()
786 gruik = c_parser(nodepaths)
787 gruik.start(node, env)
788 return (gruik.nodes, gruik.names)
790 #################### dumb dependency scanner
792 re_inc = re.compile(\
793 '^[ \t]*(#|%:)[ \t]*(include)[ \t]*(.*)\r*$',
794 re.IGNORECASE | re.MULTILINE)
796 def lines_includes(filename):
797 code = Utils.readf(filename)
798 if use_trigraphs:
799 for (a, b) in trig_def: code = code.split(a).join(b)
800 code = re_nl.sub('', code)
801 code = re_cpp.sub(repl, code)
802 return [(m.group(2), m.group(3)) for m in re.finditer(re_inc, code)]
804 def get_deps_simple(node, env, nodepaths=[], defines={}):
806 Get the dependencies by just looking recursively at the #include statements
809 nodes = []
810 names = []
812 def find_deps(node):
813 lst = lines_includes(node.abspath(env))
815 for (_, line) in lst:
816 (t, filename) = extract_include(line, defines)
817 if filename in names:
818 continue
820 if filename.endswith('.moc'):
821 names.append(filename)
823 found = None
824 for n in nodepaths:
825 if found:
826 break
827 found = n.find_resource(filename)
829 if not found:
830 if not filename in names:
831 names.append(filename)
832 elif not found in nodes:
833 nodes.append(found)
834 find_deps(node)
836 find_deps(node)
837 return (nodes, names)