improve source file lookup
[rofl0r-gdbpimp.git] / tokenizer.py
blob7b51d6ea2ee808eb300023ac3a3547976eb2c29e
1 def split_tokens(x):
2 b = []
3 i = 0
4 start = 0
5 in_str = False
6 while i < len(x):
7 if not in_str:
8 if x[i] in " \t\n":
9 if i > start:
10 b.append(x[start:i])
11 start = i+1
12 if x[i] in "(),={}*":
13 token = x[i]
14 if i > start:
15 b.append(x[start:i])
16 start = i+1
17 b.append(token)
19 if x[i] in '"':
20 if i > 0 and x[i-1] == '\\':
21 pass
22 else:
23 in_str = not in_str
25 i += 1
26 if i > start:
27 b.append(x[start:i])
28 return b