backup: Wire up qemu full pull backup commands over QMP
[libvirt/ericb.git] / docs / apibuild.py
blobdbdc1c95affb24b7de1b0a0dd7589bf64c6f8437
1 #!/usr/bin/env python
3 # This is the API builder, it parses the C sources and build the
4 # API formal description in XML.
6 # See Copyright for the status of this software.
8 # daniel@veillard.com
11 from __future__ import print_function
13 import os
14 import sys
15 import glob
16 import re
18 quiet = True
19 warnings = 0
20 debug = False
21 debugsym = None
24 # C parser analysis code
26 included_files = {
27 "libvirt-common.h": "header with general libvirt API definitions",
28 "libvirt-domain.h": "header with general libvirt API definitions",
29 "libvirt-domain-checkpoint.h": "header with general libvirt API definitions",
30 "libvirt-domain-snapshot.h": "header with general libvirt API definitions",
31 "libvirt-event.h": "header with general libvirt API definitions",
32 "libvirt-host.h": "header with general libvirt API definitions",
33 "libvirt-interface.h": "header with general libvirt API definitions",
34 "libvirt-network.h": "header with general libvirt API definitions",
35 "libvirt-nodedev.h": "header with general libvirt API definitions",
36 "libvirt-nwfilter.h": "header with general libvirt API definitions",
37 "libvirt-secret.h": "header with general libvirt API definitions",
38 "libvirt-storage.h": "header with general libvirt API definitions",
39 "libvirt-stream.h": "header with general libvirt API definitions",
40 "virterror.h": "header with error specific API definitions",
41 "libvirt.c": "Main interfaces for the libvirt library",
42 "libvirt-domain.c": "Domain interfaces for the libvirt library",
43 "libvirt-domain-checkpoint.c": "Domain checkpoint interfaces for the libvirt library",
44 "libvirt-domain-snapshot.c": "Domain snapshot interfaces for the libvirt library",
45 "libvirt-host.c": "Host interfaces for the libvirt library",
46 "libvirt-interface.c": "Interface interfaces for the libvirt library",
47 "libvirt-network.c": "Network interfaces for the libvirt library",
48 "libvirt-nodedev.c": "Node device interfaces for the libvirt library",
49 "libvirt-nwfilter.c": "NWFilter interfaces for the libvirt library",
50 "libvirt-secret.c": "Secret interfaces for the libvirt library",
51 "libvirt-storage.c": "Storage interfaces for the libvirt library",
52 "libvirt-stream.c": "Stream interfaces for the libvirt library",
53 "virerror.c": "implements error handling and reporting code for libvirt",
54 "virevent.c": "event loop for monitoring file handles",
55 "virtypedparam.c": "virTypedParameters APIs",
58 qemu_included_files = {
59 "libvirt-qemu.h": "header with QEMU specific API definitions",
60 "libvirt-qemu.c": "Implementations for the QEMU specific APIs",
63 lxc_included_files = {
64 "libvirt-lxc.h": "header with LXC specific API definitions",
65 "libvirt-lxc.c": "Implementations for the LXC specific APIs",
68 admin_included_files = {
69 "libvirt-admin.h": "header with admin specific API definitions",
70 "libvirt-admin.c": "Implementations for the admin specific APIs",
73 ignored_words = {
74 "ATTRIBUTE_UNUSED": (0, "macro keyword"),
75 "ATTRIBUTE_SENTINEL": (0, "macro keyword"),
76 "VIR_DEPRECATED": (0, "macro keyword"),
77 "VIR_EXPORT_VAR": (0, "macro keyword"),
78 "WINAPI": (0, "Windows keyword"),
79 "__declspec": (3, "Windows keyword"),
80 "__stdcall": (0, "Windows keyword"),
83 ignored_functions = {
84 "virConnectSupportsFeature": "private function for remote access",
85 "virDomainMigrateFinish": "private function for migration",
86 "virDomainMigrateFinish2": "private function for migration",
87 "virDomainMigratePerform": "private function for migration",
88 "virDomainMigratePrepare": "private function for migration",
89 "virDomainMigratePrepare2": "private function for migration",
90 "virDomainMigratePrepareTunnel": "private function for tunnelled migration",
91 "virDomainMigrateBegin3": "private function for migration",
92 "virDomainMigrateFinish3": "private function for migration",
93 "virDomainMigratePerform3": "private function for migration",
94 "virDomainMigratePrepare3": "private function for migration",
95 "virDomainMigrateConfirm3": "private function for migration",
96 "virDomainMigratePrepareTunnel3": "private function for tunnelled migration",
97 "DllMain": "specific function for Win32",
98 "virTypedParamsValidate": "internal function in virtypedparam.c",
99 "virTypedParameterValidateSet": "internal function in virtypedparam.c",
100 "virTypedParameterAssign": "internal function in virtypedparam.c",
101 "virTypedParameterAssignFromStr": "internal function in virtypedparam.c",
102 "virTypedParameterToString": "internal function in virtypedparam.c",
103 "virTypedParamsCheck": "internal function in virtypedparam.c",
104 "virTypedParamsCopy": "internal function in virtypedparam.c",
105 "virDomainMigrateBegin3Params": "private function for migration",
106 "virDomainMigrateFinish3Params": "private function for migration",
107 "virDomainMigratePerform3Params": "private function for migration",
108 "virDomainMigratePrepare3Params": "private function for migration",
109 "virDomainMigrateConfirm3Params": "private function for migration",
110 "virDomainMigratePrepareTunnel3Params": "private function for tunnelled migration",
111 "virErrorCopyNew": "private",
114 ignored_macros = {
115 "_virSchedParameter": "backward compatibility macro for virTypedParameter",
116 "_virBlkioParameter": "backward compatibility macro for virTypedParameter",
117 "_virMemoryParameter": "backward compatibility macro for virTypedParameter",
120 # macros that should be completely skipped
121 hidden_macros = {
122 "VIR_DEPRECATED": "internal macro to mark deprecated apis",
123 "VIR_EXPORT_VAR": "internal macro to mark exported vars",
126 def escape(raw):
127 raw = raw.replace('&', '&')
128 raw = raw.replace('<', '&lt;')
129 raw = raw.replace('>', '&gt;')
130 raw = raw.replace("'", '&apos;')
131 raw = raw.replace('"', '&quot;')
132 return raw
134 def uniq(items):
135 return sorted(set(items))
137 class identifier:
138 def __init__(self, name, header=None, module=None, type=None, lineno=0,
139 info=None, extra=None, conditionals=None):
140 self.name = name
141 self.header = header
142 self.module = module
143 self.type = type
144 self.info = info
145 self.extra = extra
146 self.lineno = lineno
147 self.static = 0
148 if conditionals is None or len(conditionals) == 0:
149 self.conditionals = None
150 else:
151 self.conditionals = conditionals[:]
152 if self.name == debugsym and not quiet:
153 print("=> define %s : %s" % (debugsym, (module, type, info,
154 extra, conditionals)))
156 def __repr__(self):
157 r = "%s %s:" % (self.type, self.name)
158 if self.static:
159 r = r + " static"
160 if self.module is not None:
161 r = r + " from %s" % self.module
162 if self.info is not None:
163 r = r + " " + repr(self.info)
164 if self.extra is not None:
165 r = r + " " + repr(self.extra)
166 if self.conditionals is not None:
167 r = r + " " + repr(self.conditionals)
168 return r
171 def set_header(self, header):
172 self.header = header
173 def set_module(self, module):
174 self.module = module
175 def set_type(self, type):
176 self.type = type
177 def set_info(self, info):
178 self.info = info
179 def set_extra(self, extra):
180 self.extra = extra
181 def set_lineno(self, lineno):
182 self.lineno = lineno
183 def set_static(self, static):
184 self.static = static
185 def set_conditionals(self, conditionals):
186 if conditionals is None or len(conditionals) == 0:
187 self.conditionals = None
188 else:
189 self.conditionals = conditionals[:]
191 def get_name(self):
192 return self.name
193 def get_header(self):
194 return self.module
195 def get_module(self):
196 return self.module
197 def get_type(self):
198 return self.type
199 def get_info(self):
200 return self.info
201 def get_lineno(self):
202 return self.lineno
203 def get_extra(self):
204 return self.extra
205 def get_static(self):
206 return self.static
207 def get_conditionals(self):
208 return self.conditionals
210 def update(self, header, module, type=None, info=None, extra=None,
211 conditionals=None):
212 if self.name == debugsym and not quiet:
213 print("=> update %s : %s" % (debugsym, (module, type, info,
214 extra, conditionals)))
215 if header is not None and self.header is None:
216 self.set_header(module)
217 if module is not None and (self.module is None or self.header == self.module):
218 self.set_module(module)
219 if type is not None and self.type is None:
220 self.set_type(type)
221 if info is not None:
222 self.set_info(info)
223 if extra is not None:
224 self.set_extra(extra)
225 if conditionals is not None:
226 self.set_conditionals(conditionals)
228 class index:
229 def __init__(self, name="noname"):
230 self.name = name
231 self.identifiers = {}
232 self.functions = {}
233 self.variables = {}
234 self.includes = {}
235 self.structs = {}
236 self.unions = {}
237 self.enums = {}
238 self.typedefs = {}
239 self.macros = {}
240 self.references = {}
241 self.info = {}
243 def warning(self, msg):
244 global warnings
245 warnings = warnings + 1
246 print(msg)
248 def add_ref(self, name, header, module, static, type, lineno, info=None, extra=None, conditionals=None):
249 if name[0:2] == '__':
250 return None
251 d = None
252 try:
253 d = self.identifiers[name]
254 d.update(header, module, type, lineno, info, extra, conditionals)
255 except:
256 d = identifier(name, header, module, type, lineno, info, extra,
257 conditionals)
258 self.identifiers[name] = d
260 if d is not None and static == 1:
261 d.set_static(1)
263 if d is not None and name is not None and type is not None:
264 self.references[name] = d
266 if name == debugsym and not quiet:
267 print("New ref: %s" % (d))
269 return d
271 def add(self, name, header, module, static, type, lineno, info=None,
272 extra=None, conditionals=None):
273 if name[0:2] == '__':
274 return None
275 d = None
276 try:
277 d = self.identifiers[name]
278 d.update(header, module, type, lineno, info, extra, conditionals)
279 except:
280 d = identifier(name, header, module, type, lineno, info, extra,
281 conditionals)
282 self.identifiers[name] = d
284 if d is not None and static == 1:
285 d.set_static(1)
287 if d is not None and name is not None and type is not None:
288 type_map = {
289 "function": self.functions,
290 "functype": self.functions,
291 "variable": self.variables,
292 "include": self.includes,
293 "struct": self.structs,
294 "union": self.unions,
295 "enum": self.enums,
296 "typedef": self.typedefs,
297 "macro": self.macros
299 if type in type_map:
300 type_map[type][name] = d
301 else:
302 self.warning("Unable to register type ", type)
304 if name == debugsym and not quiet:
305 print("New symbol: %s" % (d))
307 return d
309 def merge(self, idx):
310 for id in idx.functions.keys():
312 # macro might be used to override functions or variables
313 # definitions
315 if id in self.macros:
316 del self.macros[id]
317 if id in self.functions:
318 self.warning("function %s from %s redeclared in %s" % (
319 id, self.functions[id].header, idx.functions[id].header))
320 else:
321 self.functions[id] = idx.functions[id]
322 self.identifiers[id] = idx.functions[id]
323 for id in idx.variables.keys():
325 # macro might be used to override functions or variables
326 # definitions
328 if id in self.macros:
329 del self.macros[id]
330 if id in self.variables:
331 self.warning("variable %s from %s redeclared in %s" % (
332 id, self.variables[id].header, idx.variables[id].header))
333 else:
334 self.variables[id] = idx.variables[id]
335 self.identifiers[id] = idx.variables[id]
336 for id in idx.structs.keys():
337 if id in self.structs:
338 self.warning("struct %s from %s redeclared in %s" % (
339 id, self.structs[id].header, idx.structs[id].header))
340 else:
341 self.structs[id] = idx.structs[id]
342 self.identifiers[id] = idx.structs[id]
343 for id in idx.unions.keys():
344 if id in self.unions:
345 print("union %s from %s redeclared in %s" % (
346 id, self.unions[id].header, idx.unions[id].header))
347 else:
348 self.unions[id] = idx.unions[id]
349 self.identifiers[id] = idx.unions[id]
350 for id in idx.typedefs.keys():
351 if id in self.typedefs:
352 self.warning("typedef %s from %s redeclared in %s" % (
353 id, self.typedefs[id].header, idx.typedefs[id].header))
354 else:
355 self.typedefs[id] = idx.typedefs[id]
356 self.identifiers[id] = idx.typedefs[id]
357 for id in idx.macros.keys():
359 # macro might be used to override functions or variables
360 # definitions
362 if id in self.variables:
363 continue
364 if id in self.functions:
365 continue
366 if id in self.enums:
367 continue
368 if id in self.macros:
369 self.warning("macro %s from %s redeclared in %s" % (
370 id, self.macros[id].header, idx.macros[id].header))
371 else:
372 self.macros[id] = idx.macros[id]
373 self.identifiers[id] = idx.macros[id]
374 for id in idx.enums.keys():
375 if id in self.enums:
376 self.warning("enum %s from %s redeclared in %s" % (
377 id, self.enums[id].header, idx.enums[id].header))
378 else:
379 self.enums[id] = idx.enums[id]
380 self.identifiers[id] = idx.enums[id]
382 def merge_public(self, idx):
383 for id in idx.functions.keys():
384 if id in self.functions:
385 up = idx.functions[id]
386 # check that function condition agrees with header
387 if up.conditionals != self.functions[id].conditionals:
388 self.warning("Header condition differs from Function"
389 " for %s:" % id)
390 self.warning(" H: %s" % self.functions[id].conditionals)
391 self.warning(" C: %s" % up.conditionals)
392 self.functions[id].update(None, up.module, up.type, up.info,
393 up.extra)
394 # else:
395 # print("Function %s from %s is not declared in headers" % (
396 # id, idx.functions[id].module))
397 # TODO: do the same for variables.
399 def analyze_dict(self, type, dict):
400 count = 0
401 public = 0
402 for name in dict.keys():
403 id = dict[name]
404 count = count + 1
405 if id.static == 0:
406 public = public + 1
407 if count != public:
408 print(" %d %s , %d public" % (count, type, public))
409 elif count != 0:
410 print(" %d public %s" % (count, type))
413 def analyze(self):
414 if not quiet:
415 self.analyze_dict("functions", self.functions)
416 self.analyze_dict("variables", self.variables)
417 self.analyze_dict("structs", self.structs)
418 self.analyze_dict("unions", self.unions)
419 self.analyze_dict("typedefs", self.typedefs)
420 self.analyze_dict("macros", self.macros)
422 class CLexer:
423 """A lexer for the C language, tokenize the input by reading and
424 analyzing it line by line"""
425 def __init__(self, input):
426 self.input = input
427 self.tokens = []
428 self.line = ""
429 self.lineno = 0
431 def getline(self):
432 line = ''
433 while line == '':
434 line = self.input.readline()
435 if not line:
436 return None
437 self.lineno += 1
438 line = line.strip()
439 if line == '':
440 continue
441 while line[-1] == '\\':
442 line = line[:-1]
443 n = self.input.readline().strip()
444 self.lineno += 1
445 if not n:
446 break
447 line += n
448 return line
450 def getlineno(self):
451 return self.lineno
453 def push(self, token):
454 self.tokens.insert(0, token)
456 def debug(self):
457 print("Last token: ", self.last)
458 print("Token queue: ", self.tokens)
459 print("Line %d end: " % self.lineno, self.line)
461 def token(self):
462 while self.tokens == []:
463 if self.line == "":
464 line = self.getline()
465 else:
466 line = self.line
467 self.line = ""
468 if line is None:
469 return None
471 if line[0] == '#':
472 self.tokens = [('preproc', word) for word in line.split()]
474 # We might have whitespace between the '#' and preproc
475 # macro name, so instead of having a single token element
476 # of '#define' we might end up with '#' and 'define'. This
477 # merges them back together
478 if self.tokens[0][1] == "#":
479 self.tokens[0] = ('preproc', "#" + self.tokens[1][1])
480 del self.tokens[1]
481 break
482 l = len(line)
483 if line[0] == '"' or line[0] == "'":
484 quote = line[0]
485 i = 1
486 while quote not in line[i:]:
487 i = len(line)
488 nextline = self.getline()
489 if nextline is None:
490 return None
491 line += nextline
493 tok, self.line = line[1:].split(quote, 1)
494 self.last = ('string', tok)
495 return self.last
497 if line.startswith("/*"):
498 line = line[2:]
499 found = 0
500 tok = ""
501 while found == 0:
502 i = 0
503 l = len(line)
504 while i < l:
505 if line[i] == '*' and i+1 < l and line[i+1] == '/':
506 self.line = line[i+2:]
507 line = line[:i-1]
508 l = i
509 found = 1
510 break
511 i = i + 1
512 if tok != "":
513 tok = tok + "\n"
514 tok = tok + line
515 if found == 0:
516 line = self.getline()
517 if line is None:
518 return None
519 self.last = ('comment', tok)
520 return self.last
521 if line.startswith("//"):
522 line = line[2:]
523 self.last = ('comment', line)
524 return self.last
525 i = 0
526 while i < l:
527 if line[i] == '/' and i+1 < l and line[i+1] == '/':
528 self.line = line[i:]
529 line = line[:i]
530 break
531 if line[i] == '/' and i+1 < l and line[i+1] == '*':
532 self.line = line[i:]
533 line = line[:i]
534 break
535 if line[i] == '"' or line[i] == "'":
536 self.line = line[i:]
537 line = line[:i]
538 break
539 i = i + 1
540 l = len(line)
541 i = 0
542 while i < l:
543 if line[i] == ' ' or line[i] == '\t':
544 i = i + 1
545 continue
546 if line[i].isalnum():
547 s = i
548 while i < l:
549 if line[i] not in " \t(){}:;,+-*/%&!|[]=><":
550 i = i + 1
551 else:
552 break
553 self.tokens.append(('name', line[s:i]))
554 continue
555 if line[i] in "(){}:;,[]":
556 # if line[i] == '(' or line[i] == ')' or line[i] == '{' or \
557 # line[i] == '}' or line[i] == ':' or line[i] == ';' or \
558 # line[i] == ',' or line[i] == '[' or line[i] == ']':
559 self.tokens.append(('sep', line[i]))
560 i = i + 1
561 continue
562 if line[i] in "+-*><=/%&!|.":
563 # if line[i] == '+' or line[i] == '-' or line[i] == '*' or \
564 # line[i] == '>' or line[i] == '<' or line[i] == '=' or \
565 # line[i] == '/' or line[i] == '%' or line[i] == '&' or \
566 # line[i] == '!' or line[i] == '|' or line[i] == '.':
567 if line[i] == '.' and i + 2 < l and \
568 line[i+1] == '.' and line[i+2] == '.':
569 self.tokens.append(('name', '...'))
570 i = i + 3
571 continue
573 j = i + 1
574 if j < l and line[j] in "+-*><=/%&!|":
575 # line[j] == '+' or line[j] == '-' or line[j] == '*' or \
576 # line[j] == '>' or line[j] == '<' or line[j] == '=' or \
577 # line[j] == '/' or line[j] == '%' or line[j] == '&' or \
578 # line[j] == '!' or line[j] == '|'):
579 self.tokens.append(('op', line[i:j+1]))
580 i = j + 1
581 else:
582 self.tokens.append(('op', line[i]))
583 i = i + 1
584 continue
585 s = i
586 while i < l:
587 if line[i] not in " \t(){}:;,+-*/%&!|[]=><":
588 # line[i] != ' ' and line[i] != '\t' and
589 # line[i] != '(' and line[i] != ')' and
590 # line[i] != '{' and line[i] != '}' and
591 # line[i] != ':' and line[i] != ';' and
592 # line[i] != ',' and line[i] != '+' and
593 # line[i] != '-' and line[i] != '*' and
594 # line[i] != '/' and line[i] != '%' and
595 # line[i] != '&' and line[i] != '!' and
596 # line[i] != '|' and line[i] != '[' and
597 # line[i] != ']' and line[i] != '=' and
598 # line[i] != '*' and line[i] != '>' and
599 # line[i] != '<'):
600 i = i + 1
601 else:
602 break
603 self.tokens.append(('name', line[s:i]))
605 tok = self.tokens[0]
606 self.tokens = self.tokens[1:]
607 self.last = tok
608 return tok
610 class CParser:
611 """The C module parser"""
612 def __init__(self, filename, idx=None):
613 self.filename = filename
614 if len(filename) > 2 and filename[-2:] == '.h':
615 self.is_header = 1
616 else:
617 self.is_header = 0
618 self.input = open(filename)
619 self.lexer = CLexer(self.input)
620 if idx is None:
621 self.index = index()
622 else:
623 self.index = idx
624 self.top_comment = ""
625 self.last_comment = ""
626 self.comment = None
627 self.collect_ref = 0
628 self.no_error = 0
629 self.conditionals = []
630 self.defines = []
632 def collect_references(self):
633 self.collect_ref = 1
635 def stop_error(self):
636 self.no_error = 1
638 def start_error(self):
639 self.no_error = 0
641 def lineno(self):
642 return self.lexer.getlineno()
644 def index_add(self, name, module, static, type, info=None, extra=None):
645 if self.is_header == 1:
646 self.index.add(name, module, module, static, type, self.lineno(),
647 info, extra, self.conditionals)
648 else:
649 self.index.add(name, None, module, static, type, self.lineno(),
650 info, extra, self.conditionals)
652 def index_add_ref(self, name, module, static, type, info=None,
653 extra=None):
654 if self.is_header == 1:
655 self.index.add_ref(name, module, module, static, type,
656 self.lineno(), info, extra, self.conditionals)
657 else:
658 self.index.add_ref(name, None, module, static, type, self.lineno(),
659 info, extra, self.conditionals)
661 def warning(self, msg):
662 global warnings
663 warnings = warnings + 1
664 if self.no_error:
665 return
666 print(msg)
668 def error(self, msg, token=-1):
669 if self.no_error:
670 return
672 print("Parse Error: " + msg)
673 if token != -1:
674 print("Got token ", token)
675 self.lexer.debug()
676 sys.exit(1)
678 def debug(self, msg, token=-1):
679 print("Debug: " + msg)
680 if token != -1:
681 print("Got token ", token)
682 self.lexer.debug()
684 def parseTopComment(self, comment):
685 res = {}
686 lines = comment.split("\n")
687 item = None
688 for line in lines:
689 line = line.lstrip().lstrip('*').lstrip()
691 m = re.match('([_.a-zA-Z0-9]+):(.*)', line)
692 if m:
693 item = m.group(1)
694 line = m.group(2).lstrip()
696 if item:
697 if item in res:
698 res[item] = res[item] + " " + line
699 else:
700 res[item] = line
701 self.index.info = res
703 def strip_lead_star(self, line):
704 if line.lstrip().startswith('*'):
705 line = line.replace('*', '', 1)
706 return line
708 def cleanupComment(self):
709 if not isinstance(self.comment, str):
710 return
711 # remove the leading * on multi-line comments
712 lines = self.comment.splitlines(True)
713 com = ""
714 for line in lines:
715 com = com + self.strip_lead_star(line)
716 self.comment = com.strip()
718 def parseComment(self, token):
719 com = token[1]
720 if self.top_comment == "":
721 self.top_comment = com
722 if self.comment is None or com[0] == '*':
723 self.comment = com
724 else:
725 self.comment = self.comment + com
726 token = self.lexer.token()
728 if self.comment.find("DOC_DISABLE") != -1:
729 self.stop_error()
731 if self.comment.find("DOC_ENABLE") != -1:
732 self.start_error()
734 return token
737 # Parse a comment block associate to a typedef
739 def parseTypeComment(self, name, quiet=False):
740 if name[0:2] == '__':
741 quiet = True
743 if self.comment is None:
744 if not quiet:
745 self.warning("Missing comment for type %s" % name)
746 return None
747 if not self.comment.startswith('*'):
748 if not quiet:
749 self.warning("Missing * in type comment for %s" % name)
750 return None
752 lines = self.comment.split('\n')
753 # Remove lines that contain only single asterisk
754 lines[:] = [line for line in lines if line.strip() != '*']
756 if lines[0] != "* %s:" % name:
757 if not quiet:
758 self.warning("Misformatted type comment for %s" % name)
759 self.warning(" Expecting '* %s:' got '%s'" % (name, lines[0]))
760 return None
761 del lines[0]
763 # Concatenate all remaining lines by striping leading asterisks
764 desc = " ".join([line.lstrip("*").strip() for line in lines]).strip()
766 if not (quiet or desc):
767 self.warning("Type comment for %s lack description of the macro"
768 % name)
770 return desc
772 # Parse a comment block associate to a macro
774 def parseMacroComment(self, name, quiet=0):
775 global ignored_macros
777 if name[0:2] == '__':
778 quiet = 1
779 if name in ignored_macros:
780 quiet = 1
782 args = []
783 desc = ""
785 if self.comment is None:
786 if not quiet:
787 self.warning("Missing comment for macro %s" % name)
788 return args, desc
789 if self.comment[0] != '*':
790 if not quiet:
791 self.warning("Missing * in macro comment for %s" % name)
792 return args, desc
793 lines = self.comment.split('\n')
794 if lines[0] == '*':
795 del lines[0]
796 if lines[0] != "* %s:" % name:
797 if not quiet:
798 self.warning("Misformatted macro comment for %s" % name)
799 self.warning(" Expecting '* %s:' got '%s'" % (name, lines[0]))
800 return args, desc
801 del lines[0]
802 while lines[0] == '*':
803 del lines[0]
804 while len(lines) > 0 and lines[0][0:3] == '* @':
805 l = lines[0][3:]
806 try:
807 arg, desc = l.split(':', 1)
808 desc = desc.strip()
809 arg = arg.strip()
810 except:
811 if not quiet:
812 self.warning("Misformatted macro comment for %s" % name)
813 self.warning(" problem with '%s'" % lines[0])
814 del lines[0]
815 continue
816 del lines[0]
817 l = lines[0].strip()
818 while len(l) > 2 and l[0:3] != '* @':
819 while l[0] == '*':
820 l = l[1:]
821 desc = desc + ' ' + l.strip()
822 del lines[0]
823 if len(lines) == 0:
824 break
825 l = lines[0]
826 args.append((arg, desc))
827 while len(lines) > 0 and lines[0] == '*':
828 del lines[0]
829 desc = ""
830 while len(lines) > 0:
831 l = lines[0]
832 while len(l) > 0 and l[0] == '*':
833 l = l[1:]
834 l = l.strip()
835 desc = desc + " " + l
836 del lines[0]
838 desc = desc.strip()
840 if quiet == 0:
841 if desc == "":
842 self.warning("Macro comment for %s lack description of the macro" % name)
844 return args, desc
847 # Parse a comment block and merge the information found in the
848 # parameters descriptions, finally returns a block as complete
849 # as possible
851 def mergeFunctionComment(self, name, description, quiet=0):
852 global ignored_functions
854 if name == 'main':
855 quiet = 1
856 if name[0:2] == '__':
857 quiet = 1
858 if name in ignored_functions:
859 quiet = 1
861 ret, args = description
862 desc = ""
863 retdesc = ""
865 if self.comment is None:
866 if not quiet:
867 self.warning("Missing comment for function %s" % name)
868 return (ret[0], retdesc), args, desc
869 if self.comment[0] != '*':
870 if not quiet:
871 self.warning("Missing * in function comment for %s" % name)
872 return (ret[0], retdesc), args, desc
873 lines = self.comment.split('\n')
874 if lines[0] == '*':
875 del lines[0]
876 if lines[0] != "* %s:" % name:
877 if not quiet:
878 self.warning("Misformatted function comment for %s" % name)
879 self.warning(" Expecting '* %s:' got '%s'" % (name, lines[0]))
880 return (ret[0], retdesc), args, desc
881 del lines[0]
882 while lines[0] == '*':
883 del lines[0]
884 nbargs = len(args)
885 while len(lines) > 0 and lines[0][0:3] == '* @':
886 l = lines[0][3:]
887 try:
888 arg, desc = l.split(':', 1)
889 desc = desc.strip()
890 arg = arg.strip()
891 except:
892 if not quiet:
893 self.warning("Misformatted function comment for %s" % name)
894 self.warning(" problem with '%s'" % lines[0])
895 del lines[0]
896 continue
897 del lines[0]
898 l = lines[0].strip()
899 while len(l) > 2 and l[0:3] != '* @':
900 while l[0] == '*':
901 l = l[1:]
902 desc = desc + ' ' + l.strip()
903 del lines[0]
904 if len(lines) == 0:
905 break
906 l = lines[0]
907 i = 0
908 while i < nbargs:
909 if args[i][1] == arg:
910 args[i] = (args[i][0], arg, desc)
911 break
912 i = i + 1
913 if i >= nbargs:
914 if not quiet:
915 self.warning("Unable to find arg %s from function comment for %s" % (
916 arg, name))
917 while len(lines) > 0 and lines[0] == '*':
918 del lines[0]
919 desc = None
920 while len(lines) > 0:
921 l = lines[0]
922 i = 0
923 # Remove all leading '*', followed by at most one ' ' character
924 # since we need to preserve correct indentation of code examples
925 while i < len(l) and l[i] == '*':
926 i = i + 1
927 if i > 0:
928 if i < len(l) and l[i] == ' ':
929 i = i + 1
930 l = l[i:]
931 if len(l) >= 6 and l[0:7] == "Returns":
932 try:
933 l = l.split(' ', 1)[1]
934 except:
935 l = ""
936 retdesc = l.strip()
937 del lines[0]
938 while len(lines) > 0:
939 l = lines[0]
940 while len(l) > 0 and l[0] == '*':
941 l = l[1:]
942 l = l.strip()
943 retdesc = retdesc + " " + l
944 del lines[0]
945 else:
946 if desc is not None:
947 desc = desc + "\n" + l
948 else:
949 desc = l
950 del lines[0]
952 if desc is None:
953 desc = ""
954 retdesc = retdesc.strip()
955 desc = desc.strip()
957 if quiet == 0:
959 # report missing comments
961 i = 0
962 while i < nbargs:
963 if args[i][2] is None and args[i][0] != "void" and args[i][1] is not None:
964 self.warning("Function comment for %s lacks description of arg %s" % (name, args[i][1]))
965 i = i + 1
966 if retdesc == "" and ret[0] != "void":
967 self.warning("Function comment for %s lacks description of return value" % name)
968 if desc == "":
969 self.warning("Function comment for %s lacks description of the function" % name)
972 return (ret[0], retdesc), args, desc
974 def parsePreproc(self, token):
975 if debug:
976 print("=> preproc ", token, self.lexer.tokens)
977 name = token[1]
978 if name == "#include":
979 token = self.lexer.token()
980 if token is None:
981 return None
982 if token[0] == 'preproc':
983 self.index_add(token[1], self.filename, not self.is_header,
984 "include")
985 return self.lexer.token()
986 return token
987 if name == "#define":
988 token = self.lexer.token()
989 if token is None:
990 return None
991 if token[0] == 'preproc':
992 # TODO macros with arguments
993 name = token[1]
994 lst = []
995 token = self.lexer.token()
996 while token is not None and token[0] == 'preproc' and \
997 token[1][0] != '#':
998 lst.append(token[1])
999 token = self.lexer.token()
1000 try:
1001 name = name.split('(') [0]
1002 except:
1003 pass
1005 # skip hidden macros
1006 if name in hidden_macros:
1007 return token
1008 if name[-2:] == "_H" or name[-8:] == "_H_ALLOW":
1009 return token
1011 strValue = None
1012 if len(lst) == 1 and lst[0][0] == '"' and lst[0][-1] == '"':
1013 strValue = lst[0][1:-1]
1014 (args, desc) = self.parseMacroComment(name, not self.is_header)
1015 self.index_add(name, self.filename, not self.is_header,
1016 "macro", (args, desc, strValue))
1017 return token
1020 # Processing of conditionals modified by Bill 1/1/05
1022 # We process conditionals (i.e. tokens from #ifdef, #ifndef,
1023 # #if, #else and #endif) for headers and mainline code,
1024 # store the ones from the header in libxml2-api.xml, and later
1025 # (in the routine merge_public) verify that the two (header and
1026 # mainline code) agree.
1028 # There is a small problem with processing the headers. Some of
1029 # the variables are not concerned with enabling / disabling of
1030 # library functions (e.g. '__XML_PARSER_H__'), and we don't want
1031 # them to be included in libxml2-api.xml, or involved in
1032 # the check between the header and the mainline code. To
1033 # accomplish this, we ignore any conditional which doesn't include
1034 # the string 'ENABLED'
1036 if name == "#ifdef":
1037 apstr = self.lexer.tokens[0][1]
1038 try:
1039 self.defines.append(apstr)
1040 if apstr.find('ENABLED') != -1:
1041 self.conditionals.append("defined(%s)" % apstr)
1042 except:
1043 pass
1044 elif name == "#ifndef":
1045 apstr = self.lexer.tokens[0][1]
1046 try:
1047 self.defines.append(apstr)
1048 if apstr.find('ENABLED') != -1:
1049 self.conditionals.append("!defined(%s)" % apstr)
1050 except:
1051 pass
1052 elif name == "#if":
1053 apstr = ""
1054 for tok in self.lexer.tokens:
1055 if apstr != "":
1056 apstr = apstr + " "
1057 apstr = apstr + tok[1]
1058 try:
1059 self.defines.append(apstr)
1060 if apstr.find('ENABLED') != -1:
1061 self.conditionals.append(apstr)
1062 except:
1063 pass
1064 elif name == "#else":
1065 if self.conditionals != [] and \
1066 self.defines[-1].find('ENABLED') != -1:
1067 self.conditionals[-1] = "!(%s)" % self.conditionals[-1]
1068 elif name == "#endif":
1069 if self.conditionals != [] and \
1070 self.defines[-1].find('ENABLED') != -1:
1071 self.conditionals = self.conditionals[:-1]
1072 self.defines = self.defines[:-1]
1073 token = self.lexer.token()
1074 while token is not None and token[0] == 'preproc' and \
1075 token[1][0] != '#':
1076 token = self.lexer.token()
1077 return token
1080 # token acquisition on top of the lexer, it handle internally
1081 # preprocessor and comments since they are logically not part of
1082 # the program structure.
1084 def push(self, tok):
1085 self.lexer.push(tok)
1087 def token(self):
1088 global ignored_words
1090 token = self.lexer.token()
1091 while token is not None:
1092 if token[0] == 'comment':
1093 token = self.parseComment(token)
1094 continue
1095 elif token[0] == 'preproc':
1096 token = self.parsePreproc(token)
1097 continue
1098 elif token[0] == "name" and token[1] == "__const":
1099 token = ("name", "const")
1100 return token
1101 elif token[0] == "name" and token[1] == "__attribute":
1102 token = self.lexer.token()
1103 while token is not None and token[1] != ";":
1104 token = self.lexer.token()
1105 return token
1106 elif token[0] == "name" and token[1] in ignored_words:
1107 (n, info) = ignored_words[token[1]]
1108 i = 0
1109 while i < n:
1110 token = self.lexer.token()
1111 i = i + 1
1112 token = self.lexer.token()
1113 continue
1114 else:
1115 if debug:
1116 print("=> ", token)
1117 return token
1118 return None
1121 # Parse a typedef, it records the type and its name.
1123 def parseTypedef(self, token):
1124 if token is None:
1125 return None
1126 token = self.parseType(token)
1127 if token is None:
1128 self.error("parsing typedef")
1129 return None
1130 base_type = self.type
1131 type = base_type
1132 # self.debug("end typedef type", token)
1133 while token is not None:
1134 if token[0] == "name":
1135 name = token[1]
1136 signature = self.signature
1137 if signature is not None:
1138 type = type.split('(')[0]
1139 d = self.mergeFunctionComment(name,
1140 ((type, None), signature), 1)
1141 self.index_add(name, self.filename, not self.is_header,
1142 "functype", d)
1143 else:
1144 if base_type == "struct":
1145 self.index_add(name, self.filename, not self.is_header,
1146 "struct", type)
1147 base_type = "struct " + name
1148 else:
1149 # TODO report missing or misformatted comments
1150 info = self.parseTypeComment(name, 1)
1151 self.index_add(name, self.filename, not self.is_header,
1152 "typedef", type, info)
1153 token = self.token()
1154 else:
1155 self.error("parsing typedef: expecting a name")
1156 return token
1157 # self.debug("end typedef", token)
1158 if token is not None and token[0] == 'sep' and token[1] == ',':
1159 type = base_type
1160 token = self.token()
1161 while token is not None and token[0] == "op":
1162 type = type + token[1]
1163 token = self.token()
1164 elif token is not None and token[0] == 'sep' and token[1] == ';':
1165 break
1166 elif token is not None and token[0] == 'name':
1167 type = base_type
1168 continue
1169 else:
1170 self.error("parsing typedef: expecting ';'", token)
1171 return token
1172 token = self.token()
1173 return token
1176 # Parse a C code block, used for functions it parse till
1177 # the balancing } included
1179 def parseBlock(self, token):
1180 while token is not None:
1181 if token[0] == "sep" and token[1] == "{":
1182 token = self.token()
1183 token = self.parseBlock(token)
1184 elif token[0] == "sep" and token[1] == "}":
1185 self.comment = None
1186 token = self.token()
1187 return token
1188 else:
1189 if self.collect_ref == 1:
1190 oldtok = token
1191 token = self.token()
1192 if oldtok[0] == "name" and oldtok[1][0:3] == "vir":
1193 if token[0] == "sep" and token[1] == "(":
1194 self.index_add_ref(oldtok[1], self.filename,
1195 0, "function")
1196 token = self.token()
1197 elif token[0] == "name":
1198 token = self.token()
1199 if token[0] == "sep" and (token[1] == ";" or
1200 token[1] == "," or token[1] == "="):
1201 self.index_add_ref(oldtok[1], self.filename,
1202 0, "type")
1203 elif oldtok[0] == "name" and oldtok[1][0:4] == "XEN_":
1204 self.index_add_ref(oldtok[1], self.filename,
1205 0, "typedef")
1206 elif oldtok[0] == "name" and oldtok[1][0:7] == "LIBXEN_":
1207 self.index_add_ref(oldtok[1], self.filename,
1208 0, "typedef")
1210 else:
1211 token = self.token()
1212 return token
1215 # Parse a C struct definition till the balancing }
1217 def parseStruct(self, token):
1218 fields = []
1219 # self.debug("start parseStruct", token)
1220 while token is not None:
1221 if token[0] == "sep" and token[1] == "{":
1222 token = self.token()
1223 token = self.parseTypeBlock(token)
1224 elif token[0] == "sep" and token[1] == "}":
1225 self.struct_fields = fields
1226 # self.debug("end parseStruct", token)
1227 # print(fields)
1228 token = self.token()
1229 return token
1230 else:
1231 base_type = self.type
1232 # self.debug("before parseType", token)
1233 token = self.parseType(token)
1234 # self.debug("after parseType", token)
1235 if token is not None and token[0] == "name":
1236 fname = token[1]
1237 token = self.token()
1238 if token[0] == "sep" and token[1] == ";":
1239 self.comment = None
1240 token = self.token()
1241 self.cleanupComment()
1242 if self.type == "union":
1243 fields.append((self.type, fname, self.comment,
1244 self.union_fields))
1245 self.union_fields = []
1246 else:
1247 fields.append((self.type, fname, self.comment))
1248 self.comment = None
1249 else:
1250 self.error("parseStruct: expecting ;", token)
1251 elif token is not None and token[0] == "sep" and token[1] == "{":
1252 token = self.token()
1253 token = self.parseTypeBlock(token)
1254 if token is not None and token[0] == "name":
1255 token = self.token()
1256 if token is not None and token[0] == "sep" and token[1] == ";":
1257 token = self.token()
1258 else:
1259 self.error("parseStruct: expecting ;", token)
1260 else:
1261 self.error("parseStruct: name", token)
1262 token = self.token()
1263 self.type = base_type
1264 self.struct_fields = fields
1265 # self.debug("end parseStruct", token)
1266 # print(fields)
1267 return token
1270 # Parse a C union definition till the balancing }
1272 def parseUnion(self, token):
1273 fields = []
1274 # self.debug("start parseUnion", token)
1275 while token is not None:
1276 if token[0] == "sep" and token[1] == "{":
1277 token = self.token()
1278 token = self.parseTypeBlock(token)
1279 elif token[0] == "sep" and token[1] == "}":
1280 self.union_fields = fields
1281 # self.debug("end parseUnion", token)
1282 # print(fields)
1283 token = self.token()
1284 return token
1285 else:
1286 base_type = self.type
1287 # self.debug("before parseType", token)
1288 token = self.parseType(token)
1289 # self.debug("after parseType", token)
1290 if token is not None and token[0] == "name":
1291 fname = token[1]
1292 token = self.token()
1293 if token[0] == "sep" and token[1] == ";":
1294 self.comment = None
1295 token = self.token()
1296 self.cleanupComment()
1297 fields.append((self.type, fname, self.comment))
1298 self.comment = None
1299 else:
1300 self.error("parseUnion: expecting ;", token)
1301 elif token is not None and token[0] == "sep" and token[1] == "{":
1302 token = self.token()
1303 token = self.parseTypeBlock(token)
1304 if token is not None and token[0] == "name":
1305 token = self.token()
1306 if token is not None and token[0] == "sep" and token[1] == ";":
1307 token = self.token()
1308 else:
1309 self.error("parseUnion: expecting ;", token)
1310 else:
1311 self.error("parseUnion: name", token)
1312 token = self.token()
1313 self.type = base_type
1314 self.union_fields = fields
1315 # self.debug("end parseUnion", token)
1316 # print(fields)
1317 return token
1320 # Parse a C enum block, parse till the balancing }
1322 def parseEnumBlock(self, token):
1323 self.enums = []
1324 name = None
1325 comment = ""
1326 value = "-1"
1327 commentsBeforeVal = self.comment is not None
1328 while token is not None:
1329 if token[0] == "sep" and token[1] == "{":
1330 token = self.token()
1331 token = self.parseTypeBlock(token)
1332 elif token[0] == "sep" and token[1] == "}":
1333 if name is not None:
1334 self.cleanupComment()
1335 if self.comment is not None:
1336 comment = self.comment
1337 self.comment = None
1338 self.enums.append((name, value, comment))
1339 token = self.token()
1340 return token
1341 elif token[0] == "name":
1342 self.cleanupComment()
1343 if name is not None:
1344 if self.comment is not None:
1345 comment = self.comment.strip()
1346 self.comment = None
1347 self.enums.append((name, value, comment))
1348 name = token[1]
1349 comment = ""
1350 token = self.token()
1351 if token[0] == "op" and token[1][0] == "=":
1352 value = ""
1353 if len(token[1]) > 1:
1354 value = token[1][1:]
1355 token = self.token()
1356 while token[0] != "sep" or (token[1] != ',' and
1357 token[1] != '}'):
1358 # We might be dealing with '1U << 12' here
1359 value = value + re.sub("^(\d+)U$", "\\1", token[1])
1360 token = self.token()
1361 else:
1362 try:
1363 value = "%d" % (int(value) + 1)
1364 except:
1365 self.warning("Failed to compute value of enum %s" % name)
1366 value = ""
1367 if token[0] == "sep" and token[1] == ",":
1368 if commentsBeforeVal:
1369 self.cleanupComment()
1370 self.enums.append((name, value, self.comment))
1371 name = comment = self.comment = None
1372 token = self.token()
1373 else:
1374 token = self.token()
1375 return token
1377 def parseVirEnumDecl(self, token):
1378 if token[0] != "name":
1379 self.error("parsing VIR_ENUM_DECL: expecting name", token)
1381 token = self.token()
1383 if token[0] != "sep":
1384 self.error("parsing VIR_ENUM_DECL: expecting ')'", token)
1386 if token[1] != ')':
1387 self.error("parsing VIR_ENUM_DECL: expecting ')'", token)
1389 token = self.token()
1390 if token[0] == "sep" and token[1] == ';':
1391 token = self.token()
1393 return token
1395 def parseVirEnumImpl(self, token):
1396 # First the type name
1397 if token[0] != "name":
1398 self.error("parsing VIR_ENUM_IMPL: expecting name", token)
1400 token = self.token()
1402 if token[0] != "sep":
1403 self.error("parsing VIR_ENUM_IMPL: expecting ','", token)
1405 if token[1] != ',':
1406 self.error("parsing VIR_ENUM_IMPL: expecting ','", token)
1407 token = self.token()
1409 # Now the sentinel name
1410 if token[0] != "name":
1411 self.error("parsing VIR_ENUM_IMPL: expecting name", token)
1413 token = self.token()
1415 if token[0] != "sep":
1416 self.error("parsing VIR_ENUM_IMPL: expecting ','", token)
1418 if token[1] != ',':
1419 self.error("parsing VIR_ENUM_IMPL: expecting ','", token)
1421 token = self.token()
1423 # Now a list of strings (optional comments)
1424 while token is not None:
1425 isGettext = False
1426 # First a string, optionally with N_(...)
1427 if token[0] == 'name':
1428 if token[1] != 'N_':
1429 self.error("parsing VIR_ENUM_IMPL: expecting 'N_'", token)
1430 token = self.token()
1431 if token[0] != "sep" or token[1] != '(':
1432 self.error("parsing VIR_ENUM_IMPL: expecting '('", token)
1433 token = self.token()
1434 isGettext = True
1436 if token[0] != "string":
1437 self.error("parsing VIR_ENUM_IMPL: expecting a string", token)
1438 token = self.token()
1439 elif token[0] == "string":
1440 token = self.token()
1441 else:
1442 self.error("parsing VIR_ENUM_IMPL: expecting a string", token)
1444 # Then a separator
1445 if token[0] == "sep":
1446 if isGettext and token[1] == ')':
1447 token = self.token()
1449 if token[1] == ',':
1450 token = self.token()
1452 if token[1] == ')':
1453 token = self.token()
1454 break
1456 # Then an optional comment
1457 if token[0] == "comment":
1458 token = self.token()
1461 if token[0] == "sep" and token[1] == ';':
1462 token = self.token()
1464 return token
1466 def parseVirLogInit(self, token):
1467 if token[0] != "string":
1468 self.error("parsing VIR_LOG_INIT: expecting string", token)
1470 token = self.token()
1472 if token[0] != "sep":
1473 self.error("parsing VIR_LOG_INIT: expecting ')'", token)
1475 if token[1] != ')':
1476 self.error("parsing VIR_LOG_INIT: expecting ')'", token)
1478 token = self.token()
1479 if token[0] == "sep" and token[1] == ';':
1480 token = self.token()
1482 return token
1485 # Parse a C definition block, used for structs or unions it parse till
1486 # the balancing }
1488 def parseTypeBlock(self, token):
1489 while token is not None:
1490 if token[0] == "sep" and token[1] == "{":
1491 token = self.token()
1492 token = self.parseTypeBlock(token)
1493 elif token[0] == "sep" and token[1] == "}":
1494 token = self.token()
1495 return token
1496 else:
1497 token = self.token()
1498 return token
1501 # Parse a type: the fact that the type name can either occur after
1502 # the definition or within the definition makes it a little harder
1503 # if inside, the name token is pushed back before returning
1505 def parseType(self, token):
1506 self.type = ""
1507 self.struct_fields = []
1508 self.union_fields = []
1509 self.signature = None
1510 if token is None:
1511 return token
1513 while (token[0] == "name" and
1514 token[1] in ["const", "unsigned", "signed"]):
1515 if self.type == "":
1516 self.type = token[1]
1517 else:
1518 self.type = self.type + " " + token[1]
1519 token = self.token()
1521 if token[0] == "name" and token[1] == "long":
1522 if self.type == "":
1523 self.type = token[1]
1524 else:
1525 self.type = self.type + " " + token[1]
1527 # some read ahead for long long
1528 oldtmp = token
1529 token = self.token()
1530 if token[0] == "name" and token[1] == "long":
1531 self.type = self.type + " " + token[1]
1532 else:
1533 self.push(token)
1534 token = oldtmp
1536 oldtmp = token
1537 token = self.token()
1538 if token[0] == "name" and token[1] == "int":
1539 self.type = self.type + " " + token[1]
1540 else:
1541 self.push(token)
1542 token = oldtmp
1544 elif token[0] == "name" and token[1] == "short":
1545 if self.type == "":
1546 self.type = token[1]
1547 else:
1548 self.type = self.type + " " + token[1]
1550 elif token[0] == "name" and token[1] == "struct":
1551 if self.type == "":
1552 self.type = token[1]
1553 else:
1554 self.type = self.type + " " + token[1]
1555 token = self.token()
1556 nametok = None
1557 if token[0] == "name":
1558 nametok = token
1559 token = self.token()
1560 if token is not None and token[0] == "sep" and token[1] == "{":
1561 token = self.token()
1562 token = self.parseStruct(token)
1563 elif token is not None and token[0] == "op" and token[1] == "*":
1564 self.type = self.type + " " + nametok[1] + " *"
1565 token = self.token()
1566 while token is not None and token[0] == "op" and token[1] == "*":
1567 self.type = self.type + " *"
1568 token = self.token()
1569 if token[0] == "name":
1570 nametok = token
1571 token = self.token()
1572 else:
1573 self.error("struct : expecting name", token)
1574 return token
1575 elif token is not None and token[0] == "name" and nametok is not None:
1576 self.type = self.type + " " + nametok[1]
1577 return token
1579 if nametok is not None:
1580 self.lexer.push(token)
1581 token = nametok
1582 return token
1584 elif token[0] == "name" and token[1] == "union":
1585 if self.type == "":
1586 self.type = token[1]
1587 else:
1588 self.type = self.type + " " + token[1]
1589 token = self.token()
1590 nametok = None
1591 if token[0] == "name":
1592 nametok = token
1593 token = self.token()
1594 if token is not None and token[0] == "sep" and token[1] == "{":
1595 token = self.token()
1596 token = self.parseUnion(token)
1597 elif token is not None and token[0] == "name" and nametok is not None:
1598 self.type = self.type + " " + nametok[1]
1599 return token
1601 if nametok is not None:
1602 self.lexer.push(token)
1603 token = nametok
1604 return token
1606 elif token[0] == "name" and token[1] == "enum":
1607 if self.type == "":
1608 self.type = token[1]
1609 else:
1610 self.type = self.type + " " + token[1]
1611 self.enums = []
1612 token = self.token()
1613 if token is not None and token[0] == "sep" and token[1] == "{":
1614 # drop comments before the enum block
1615 self.comment = None
1616 token = self.token()
1617 token = self.parseEnumBlock(token)
1618 else:
1619 self.error("parsing enum: expecting '{'", token)
1620 enum_type = None
1621 if token is not None and token[0] != "name":
1622 self.lexer.push(token)
1623 token = ("name", "enum")
1624 else:
1625 enum_type = token[1]
1626 for enum in self.enums:
1627 self.index_add(enum[0], self.filename,
1628 not self.is_header, "enum",
1629 (enum[1], enum[2], enum_type))
1630 return token
1631 elif token[0] == "name" and token[1] == "VIR_ENUM_DECL":
1632 token = self.token()
1633 if token is not None and token[0] == "sep" and token[1] == "(":
1634 token = self.token()
1635 token = self.parseVirEnumDecl(token)
1636 else:
1637 self.error("parsing VIR_ENUM_DECL: expecting '('", token)
1638 if token is not None:
1639 self.lexer.push(token)
1640 token = ("name", "virenumdecl")
1641 return token
1643 elif token[0] == "name" and token[1] == "VIR_ENUM_IMPL":
1644 token = self.token()
1645 if token is not None and token[0] == "sep" and token[1] == "(":
1646 token = self.token()
1647 token = self.parseVirEnumImpl(token)
1648 else:
1649 self.error("parsing VIR_ENUM_IMPL: expecting '('", token)
1650 if token is not None:
1651 self.lexer.push(token)
1652 token = ("name", "virenumimpl")
1653 return token
1655 elif token[0] == "name" and token[1] == "VIR_LOG_INIT":
1656 token = self.token()
1657 if token is not None and token[0] == "sep" and token[1] == "(":
1658 token = self.token()
1659 token = self.parseVirLogInit(token)
1660 else:
1661 self.error("parsing VIR_LOG_INIT: expecting '('", token)
1662 if token is not None:
1663 self.lexer.push(token)
1664 token = ("name", "virloginit")
1665 return token
1667 elif token[0] == "name":
1668 if self.type == "":
1669 self.type = token[1]
1670 else:
1671 self.type = self.type + " " + token[1]
1672 else:
1673 self.error("parsing type %s: expecting a name" % (self.type),
1674 token)
1675 return token
1676 token = self.token()
1677 while token is not None and (token[0] == "op" or
1678 token[0] == "name" and token[1] == "const"):
1679 self.type = self.type + " " + token[1]
1680 token = self.token()
1683 # if there is a parenthesis here, this means a function type
1685 if token is not None and token[0] == "sep" and token[1] == '(':
1686 self.type = self.type + token[1]
1687 token = self.token()
1688 while token is not None and token[0] == "op" and token[1] == '*':
1689 self.type = self.type + token[1]
1690 token = self.token()
1691 if token is None or token[0] != "name":
1692 self.error("parsing function type, name expected", token)
1693 return token
1694 self.type = self.type + token[1]
1695 nametok = token
1696 token = self.token()
1697 if token is not None and token[0] == "sep" and token[1] == ')':
1698 self.type = self.type + token[1]
1699 token = self.token()
1700 if token is not None and token[0] == "sep" and token[1] == '(':
1701 token = self.token()
1702 type = self.type
1703 token = self.parseSignature(token)
1704 self.type = type
1705 else:
1706 self.error("parsing function type, '(' expected", token)
1707 return token
1708 else:
1709 self.error("parsing function type, ')' expected", token)
1710 return token
1711 self.lexer.push(token)
1712 token = nametok
1713 return token
1716 # do some lookahead for arrays
1718 if token is not None and token[0] == "name":
1719 nametok = token
1720 token = self.token()
1721 if token is not None and token[0] == "sep" and token[1] == '[':
1722 self.type = self.type + " " + nametok[1]
1723 while token is not None and token[0] == "sep" and token[1] == '[':
1724 self.type = self.type + token[1]
1725 token = self.token()
1726 while token is not None and token[0] != 'sep' and \
1727 token[1] != ']' and token[1] != ';':
1728 self.type = self.type + token[1]
1729 token = self.token()
1730 if token is not None and token[0] == 'sep' and token[1] == ']':
1731 self.type = self.type + token[1]
1732 token = self.token()
1733 else:
1734 self.error("parsing array type, ']' expected", token)
1735 return token
1736 elif token is not None and token[0] == "sep" and token[1] == ':':
1737 # remove :12 in case it's a limited int size
1738 token = self.token()
1739 token = self.token()
1740 self.lexer.push(token)
1741 token = nametok
1743 return token
1746 # Parse a signature: '(' has been parsed and we scan the type definition
1747 # up to the ')' included
1748 def parseSignature(self, token):
1749 signature = []
1750 if token is not None and token[0] == "sep" and token[1] == ')':
1751 self.signature = []
1752 token = self.token()
1753 return token
1754 while token is not None:
1755 token = self.parseType(token)
1756 if token is not None and token[0] == "name":
1757 signature.append((self.type, token[1], None))
1758 token = self.token()
1759 elif token is not None and token[0] == "sep" and token[1] == ',':
1760 token = self.token()
1761 continue
1762 elif token is not None and token[0] == "sep" and token[1] == ')':
1763 # only the type was provided
1764 if self.type == "...":
1765 signature.append((self.type, "...", None))
1766 else:
1767 signature.append((self.type, None, None))
1768 if token is not None and token[0] == "sep":
1769 if token[1] == ',':
1770 token = self.token()
1771 continue
1772 elif token[1] == ')':
1773 token = self.token()
1774 break
1775 self.signature = signature
1776 return token
1778 # this dict contains the functions that are allowed to use [unsigned]
1779 # long for legacy reasons in their signature and return type. this list is
1780 # fixed. new procedures and public APIs have to use [unsigned] long long
1781 long_legacy_functions = {
1782 "virGetVersion": (False, ("libVer", "typeVer")),
1783 "virConnectGetLibVersion": (False, ("libVer")),
1784 "virConnectGetVersion": (False, ("hvVer")),
1785 "virDomainGetMaxMemory": (True, ()),
1786 "virDomainMigrate": (False, ("flags", "bandwidth")),
1787 "virDomainMigrate2": (False, ("flags", "bandwidth")),
1788 "virDomainMigrateBegin3": (False, ("flags", "bandwidth")),
1789 "virDomainMigrateConfirm3": (False, ("flags", "bandwidth")),
1790 "virDomainMigrateDirect": (False, ("flags", "bandwidth")),
1791 "virDomainMigrateFinish": (False, ("flags")),
1792 "virDomainMigrateFinish2": (False, ("flags")),
1793 "virDomainMigrateFinish3": (False, ("flags")),
1794 "virDomainMigratePeer2Peer": (False, ("flags", "bandwidth")),
1795 "virDomainMigratePerform": (False, ("flags", "bandwidth")),
1796 "virDomainMigratePerform3": (False, ("flags", "bandwidth")),
1797 "virDomainMigratePrepare": (False, ("flags", "bandwidth")),
1798 "virDomainMigratePrepare2": (False, ("flags", "bandwidth")),
1799 "virDomainMigratePrepare3": (False, ("flags", "bandwidth")),
1800 "virDomainMigratePrepareTunnel": (False, ("flags", "bandwidth")),
1801 "virDomainMigratePrepareTunnel3": (False, ("flags", "bandwidth")),
1802 "virDomainMigrateToURI": (False, ("flags", "bandwidth")),
1803 "virDomainMigrateToURI2": (False, ("flags", "bandwidth")),
1804 "virDomainMigrateVersion1": (False, ("flags", "bandwidth")),
1805 "virDomainMigrateVersion2": (False, ("flags", "bandwidth")),
1806 "virDomainMigrateVersion3": (False, ("flags", "bandwidth")),
1807 "virDomainMigrateSetMaxSpeed": (False, ("bandwidth")),
1808 "virDomainSetMaxMemory": (False, ("memory")),
1809 "virDomainSetMemory": (False, ("memory")),
1810 "virDomainSetMemoryFlags": (False, ("memory")),
1811 "virDomainBlockCommit": (False, ("bandwidth")),
1812 "virDomainBlockJobSetSpeed": (False, ("bandwidth")),
1813 "virDomainBlockPull": (False, ("bandwidth")),
1814 "virDomainBlockRebase": (False, ("bandwidth")),
1815 "virDomainMigrateGetMaxSpeed": (False, ("bandwidth"))
1818 def checkLongLegacyFunction(self, name, return_type, signature):
1819 if "long" in return_type and "long long" not in return_type:
1820 try:
1821 if not CParser.long_legacy_functions[name][0]:
1822 raise Exception()
1823 except:
1824 self.error(("function '%s' is not allowed to return long, "
1825 "use long long instead") % name)
1827 for param in signature:
1828 if "long" in param[0] and "long long" not in param[0]:
1829 try:
1830 if param[1] not in CParser.long_legacy_functions[name][1]:
1831 raise Exception()
1832 except:
1833 self.error(("function '%s' is not allowed to take long "
1834 "parameter '%s', use long long instead")
1835 % (name, param[1]))
1837 # this dict contains the structs that are allowed to use [unsigned]
1838 # long for legacy reasons. this list is fixed. new structs have to use
1839 # [unsigned] long long
1840 long_legacy_struct_fields = {
1841 "_virDomainInfo": ("maxMem", "memory"),
1842 "_virNodeInfo": ("memory"),
1843 "_virDomainBlockJobInfo": ("bandwidth")
1846 def checkLongLegacyStruct(self, name, fields):
1847 for field in fields:
1848 if "long" in field[0] and "long long" not in field[0]:
1849 try:
1850 if field[1] not in CParser.long_legacy_struct_fields[name]:
1851 raise Exception()
1852 except:
1853 self.error(("struct '%s' is not allowed to contain long "
1854 "field '%s', use long long instead")
1855 % (name, field[1]))
1858 # Parse a global definition, be it a type, variable or function
1859 # the extern "C" blocks are a bit nasty and require it to recurse.
1861 def parseGlobal(self, token):
1862 static = 0
1863 if token[1] == 'extern':
1864 token = self.token()
1865 if token is None:
1866 return token
1867 if token[0] == 'string':
1868 if token[1] == 'C':
1869 token = self.token()
1870 if token is None:
1871 return token
1872 if token[0] == 'sep' and token[1] == "{":
1873 token = self.token()
1874 # print('Entering extern "C line ', self.lineno())
1875 while token is not None and (token[0] != 'sep' or
1876 token[1] != "}"):
1877 if token[0] == 'name':
1878 token = self.parseGlobal(token)
1879 else:
1880 self.error(
1881 "token %s %s unexpected at the top level" % (
1882 token[0], token[1]))
1883 token = self.parseGlobal(token)
1884 # print('Exiting extern "C" line', self.lineno())
1885 token = self.token()
1886 return token
1887 else:
1888 return token
1889 elif token[1] == 'static':
1890 static = 1
1891 token = self.token()
1892 if token is None or token[0] != 'name':
1893 return token
1895 if token[1] == 'typedef':
1896 token = self.token()
1897 return self.parseTypedef(token)
1898 else:
1899 token = self.parseType(token)
1900 type_orig = self.type
1901 if token is None or token[0] != "name":
1902 return token
1903 type = type_orig
1904 self.name = token[1]
1905 token = self.token()
1906 while token is not None and (token[0] == "sep" or token[0] == "op"):
1907 if token[0] == "sep":
1908 if token[1] == "[":
1909 type = type + token[1]
1910 token = self.token()
1911 while token is not None and (token[0] != "sep" or
1912 token[1] != ";"):
1913 type = type + token[1]
1914 token = self.token()
1916 if token is not None and token[0] == "op" and token[1] == "=":
1918 # Skip the initialization of the variable
1920 token = self.token()
1921 if token[0] == 'sep' and token[1] == '{':
1922 token = self.token()
1923 token = self.parseBlock(token)
1924 else:
1925 self.comment = None
1926 while token is not None and (token[0] != "sep" or
1927 token[1] not in ',;'):
1928 token = self.token()
1929 self.comment = None
1930 if token is None or token[0] != "sep" or (token[1] != ';' and
1931 token[1] != ','):
1932 self.error("missing ';' or ',' after value")
1934 if token is not None and token[0] == "sep":
1935 if token[1] == ";":
1936 self.comment = None
1937 token = self.token()
1938 if type == "struct":
1939 self.checkLongLegacyStruct(self.name, self.struct_fields)
1940 self.index_add(self.name, self.filename,
1941 not self.is_header, "struct", self.struct_fields)
1942 else:
1943 self.index_add(self.name, self.filename,
1944 not self.is_header, "variable", type)
1945 break
1946 elif token[1] == "(":
1947 token = self.token()
1948 token = self.parseSignature(token)
1949 if token is None:
1950 return None
1951 if token[0] == "sep" and token[1] == ";":
1952 self.checkLongLegacyFunction(self.name, type, self.signature)
1953 d = self.mergeFunctionComment(self.name,
1954 ((type, None), self.signature), 1)
1955 self.index_add(self.name, self.filename, static,
1956 "function", d)
1957 token = self.token()
1958 elif token[0] == "sep" and token[1] == "{":
1959 self.checkLongLegacyFunction(self.name, type, self.signature)
1960 d = self.mergeFunctionComment(self.name,
1961 ((type, None), self.signature), static)
1962 self.index_add(self.name, self.filename, static,
1963 "function", d)
1964 token = self.token()
1965 token = self.parseBlock(token)
1966 elif token[1] == ',':
1967 self.comment = None
1968 self.index_add(self.name, self.filename, static,
1969 "variable", type)
1970 type = type_orig
1971 token = self.token()
1972 while token is not None and token[0] == "sep":
1973 type = type + token[1]
1974 token = self.token()
1975 if token is not None and token[0] == "name":
1976 self.name = token[1]
1977 token = self.token()
1978 else:
1979 break
1981 return token
1983 def parse(self):
1984 if not quiet:
1985 print("Parsing %s" % (self.filename))
1986 token = self.token()
1987 while token is not None:
1988 if token[0] == 'name':
1989 token = self.parseGlobal(token)
1990 else:
1991 self.error("token %s %s unexpected at the top level" % (
1992 token[0], token[1]))
1993 token = self.parseGlobal(token)
1994 return
1995 self.parseTopComment(self.top_comment)
1996 return self.index
1999 class docBuilder:
2000 """A documentation builder"""
2001 def __init__(self, name, path='.', directories=['.'], includes=[]):
2002 self.name = name
2003 self.path = path
2004 self.directories = directories
2005 if name == "libvirt":
2006 self.includes = includes + list(included_files.keys())
2007 elif name == "libvirt-qemu":
2008 self.includes = includes + list(qemu_included_files.keys())
2009 elif name == "libvirt-lxc":
2010 self.includes = includes + list(lxc_included_files.keys())
2011 elif name == "libvirt-admin":
2012 self.includes = includes + list(admin_included_files.keys())
2013 self.modules = {}
2014 self.headers = {}
2015 self.idx = index()
2016 self.xref = {}
2017 self.index = {}
2018 self.basename = name
2019 self.errors = 0
2021 def warning(self, msg):
2022 global warnings
2023 warnings = warnings + 1
2024 print(msg)
2026 def error(self, msg):
2027 self.errors += 1
2028 print("Error:", msg, file=sys.stderr)
2030 def indexString(self, id, str):
2031 if str is None:
2032 return
2033 str = str.replace("'", ' ')
2034 str = str.replace('"', ' ')
2035 str = str.replace("/", ' ')
2036 str = str.replace('*', ' ')
2037 str = str.replace("[", ' ')
2038 str = str.replace("]", ' ')
2039 str = str.replace("(", ' ')
2040 str = str.replace(")", ' ')
2041 str = str.replace("<", ' ')
2042 str = str.replace('>', ' ')
2043 str = str.replace("&", ' ')
2044 str = str.replace('#', ' ')
2045 str = str.replace(",", ' ')
2046 str = str.replace('.', ' ')
2047 str = str.replace(';', ' ')
2048 tokens = str.split()
2049 for token in tokens:
2050 c = token[0]
2051 if not re.match(r"[a-zA-Z]", c):
2052 pass
2053 elif len(token) < 3:
2054 pass
2055 else:
2056 lower = token.lower()
2057 # TODO: generalize this a bit
2058 if lower == 'and' or lower == 'the':
2059 pass
2060 elif token in self.xref:
2061 self.xref[token].append(id)
2062 else:
2063 self.xref[token] = [id]
2065 def analyze(self):
2066 if not quiet:
2067 print("Project %s : %d headers, %d modules" % (self.name, len(self.headers.keys()), len(self.modules.keys())))
2068 self.idx.analyze()
2070 def scanHeaders(self):
2071 for header in self.headers.keys():
2072 parser = CParser(header)
2073 idx = parser.parse()
2074 self.headers[header] = idx
2075 self.idx.merge(idx)
2077 def scanModules(self):
2078 for module in self.modules.keys():
2079 parser = CParser(module)
2080 idx = parser.parse()
2081 # idx.analyze()
2082 self.modules[module] = idx
2083 self.idx.merge_public(idx)
2085 def scan(self):
2086 for directory in self.directories:
2087 files = glob.glob(directory + "/*.c")
2088 for file in files:
2089 skip = 1
2090 for incl in self.includes:
2091 if file.find(incl) != -1:
2092 skip = 0
2093 break
2094 if skip == 0:
2095 self.modules[file] = None
2096 files = glob.glob(directory + "/*.h")
2097 for file in files:
2098 skip = 1
2099 for incl in self.includes:
2100 if file.find(incl) != -1:
2101 skip = 0
2102 break
2103 if skip == 0:
2104 self.headers[file] = None
2105 self.scanHeaders()
2106 self.scanModules()
2108 def modulename_file(self, file):
2109 module = os.path.basename(file)
2110 if module[-2:] == '.h':
2111 module = module[:-2]
2112 elif module[-2:] == '.c':
2113 module = module[:-2]
2114 return module
2116 def serialize_enum(self, output, name):
2117 id = self.idx.enums[name]
2118 output.write(" <enum name='%s' file='%s'" % (name,
2119 self.modulename_file(id.header)))
2120 if id.info is not None:
2121 info = id.info
2122 valhex = ""
2123 if info[0] is not None and info[0] != '':
2124 try:
2125 val = eval(info[0])
2126 valhex = hex(val)
2127 except:
2128 val = info[0]
2129 output.write(" value='%s'" % (val))
2131 if valhex != "":
2132 output.write(" value_hex='%s'" % (valhex))
2134 m = re.match("\(?1<<(\d+)\)?", info[0])
2135 if m:
2136 output.write(" value_bitshift='%s'" % (m.group(1)))
2138 if info[2] is not None and info[2] != '':
2139 output.write(" type='%s'" % info[2])
2140 if info[1] is not None and info[1] != '':
2141 output.write(" info='%s'" % escape(info[1]))
2142 output.write("/>\n")
2144 def serialize_macro(self, output, name):
2145 id = self.idx.macros[name]
2146 output.write(" <macro name='%s' file='%s'" % (name,
2147 self.modulename_file(id.header)))
2148 if id.info is None:
2149 args = []
2150 desc = None
2151 strValue = None
2152 else:
2153 (args, desc, strValue) = id.info
2155 if strValue is not None:
2156 output.write(" string='%s'" % strValue)
2157 output.write(">\n")
2159 if desc is not None and desc != "":
2160 output.write(" <info><![CDATA[%s]]></info>\n" % (desc))
2161 self.indexString(name, desc)
2162 for arg in args:
2163 (name, desc) = arg
2164 if desc is not None and desc != "":
2165 output.write(" <arg name='%s' info='%s'/>\n" % (
2166 name, escape(desc)))
2167 self.indexString(name, desc)
2168 else:
2169 output.write(" <arg name='%s'/>\n" % name)
2170 output.write(" </macro>\n")
2172 def serialize_union(self, output, field, desc):
2173 output.write(" <field name='%s' type='union' info='%s'>\n" % (field[1], desc))
2174 output.write(" <union>\n")
2175 for f in field[3]:
2176 desc = f[2]
2177 if desc is None:
2178 desc = ''
2179 else:
2180 desc = escape(desc)
2181 output.write(" <field name='%s' type='%s' info='%s'/>\n" % (f[1], f[0], desc))
2183 output.write(" </union>\n")
2184 output.write(" </field>\n")
2186 def serialize_typedef(self, output, name):
2187 id = self.idx.typedefs[name]
2188 if id.info[0:7] == 'struct ':
2189 output.write(" <struct name='%s' file='%s' type='%s'" % (
2190 name, self.modulename_file(id.header), id.info))
2191 name = id.info[7:]
2192 if (name in self.idx.structs and
2193 isinstance(self.idx.structs[name].info, (list, tuple))):
2194 output.write(">\n")
2195 try:
2196 for field in self.idx.structs[name].info:
2197 desc = field[2]
2198 self.indexString(name, desc)
2199 if desc is None:
2200 desc = ''
2201 else:
2202 desc = escape(desc)
2203 if field[0] == "union":
2204 self.serialize_union(output, field, desc)
2205 else:
2206 output.write(" <field name='%s' type='%s' info='%s'/>\n" % (field[1], field[0], desc))
2207 except:
2208 self.warning("Failed to serialize struct %s" % name)
2209 output.write(" </struct>\n")
2210 else:
2211 output.write("/>\n")
2212 else:
2213 output.write(" <typedef name='%s' file='%s' type='%s'" % (
2214 name, self.modulename_file(id.header), id.info))
2215 try:
2216 desc = id.extra
2217 if desc is not None and desc != "":
2218 output.write(">\n <info><![CDATA[%s]]></info>\n" % (desc))
2219 output.write(" </typedef>\n")
2220 else:
2221 output.write("/>\n")
2222 except:
2223 output.write("/>\n")
2225 def serialize_variable(self, output, name):
2226 id = self.idx.variables[name]
2227 if id.info is not None:
2228 output.write(" <variable name='%s' file='%s' type='%s'/>\n" % (
2229 name, self.modulename_file(id.header), id.info))
2230 else:
2231 output.write(" <variable name='%s' file='%s'/>\n" % (
2232 name, self.modulename_file(id.header)))
2234 def serialize_function(self, output, name):
2235 id = self.idx.functions[name]
2236 if name == debugsym and not quiet:
2237 print("=>", id)
2239 # NB: this is consumed by a regex in 'getAPIFilenames' in hvsupport.pl
2240 output.write(" <%s name='%s' file='%s' module='%s'>\n" % (id.type,
2241 name, self.modulename_file(id.header),
2242 self.modulename_file(id.module)))
2244 # Processing of conditionals modified by Bill 1/1/05
2246 if id.conditionals is not None:
2247 apstr = ""
2248 for cond in id.conditionals:
2249 if apstr != "":
2250 apstr = apstr + " &amp;&amp; "
2251 apstr = apstr + cond
2252 output.write(" <cond>%s</cond>\n" % (apstr))
2253 try:
2254 (ret, params, desc) = id.info
2255 output.write(" <info><![CDATA[%s]]></info>\n" % (desc))
2256 self.indexString(name, desc)
2257 if ret[0] is not None:
2258 if ret[0] == "void":
2259 output.write(" <return type='void'/>\n")
2260 elif (ret[1] is None or ret[1] == '') and name not in ignored_functions:
2261 self.error("Missing documentation for return of function `%s'" % name)
2262 else:
2263 output.write(" <return type='%s' info='%s'/>\n" % (
2264 ret[0], escape(ret[1])))
2265 self.indexString(name, ret[1])
2266 for param in params:
2267 if param[0] == 'void':
2268 continue
2269 if (param[2] is None or param[2] == ''):
2270 if name in ignored_functions:
2271 output.write(" <arg name='%s' type='%s' info=''/>\n" % (param[1], param[0]))
2272 else:
2273 self.error("Missing documentation for arg `%s' of function `%s'" % (param[1], name))
2274 else:
2275 output.write(" <arg name='%s' type='%s' info='%s'/>\n" % (param[1], param[0], escape(param[2])))
2276 self.indexString(name, param[2])
2277 except:
2278 print("Exception:", sys.exc_info()[1], file=sys.stderr)
2279 self.warning("Failed to save function %s info: %s" % (name, repr(id.info)))
2280 output.write(" </%s>\n" % (id.type))
2282 def serialize_exports(self, output, file):
2283 module = self.modulename_file(file)
2284 output.write(" <file name='%s'>\n" % (module))
2285 dict = self.headers[file]
2286 if dict.info is not None:
2287 for data in ('Summary', 'Description'):
2288 try:
2289 output.write(" <%s>%s</%s>\n" % (
2290 data.lower(),
2291 escape(dict.info[data]),
2292 data.lower()))
2293 except KeyError:
2294 self.warning("Header %s lacks a %s description" % (module, data))
2295 if 'Description' in dict.info:
2296 desc = dict.info['Description']
2297 if desc.find("DEPRECATED") != -1:
2298 output.write(" <deprecated/>\n")
2300 for id in uniq(dict.macros.keys()):
2301 # Macros are sometime used to masquerade other types.
2302 if id in dict.functions:
2303 continue
2304 if id in dict.variables:
2305 continue
2306 if id in dict.typedefs:
2307 continue
2308 if id in dict.structs:
2309 continue
2310 if id in dict.unions:
2311 continue
2312 if id in dict.enums:
2313 continue
2314 output.write(" <exports symbol='%s' type='macro'/>\n" % (id))
2315 for id in uniq(dict.enums.keys()):
2316 output.write(" <exports symbol='%s' type='enum'/>\n" % (id))
2317 for id in uniq(dict.typedefs.keys()):
2318 output.write(" <exports symbol='%s' type='typedef'/>\n" % (id))
2319 for id in uniq(dict.structs.keys()):
2320 output.write(" <exports symbol='%s' type='struct'/>\n" % (id))
2321 for id in uniq(dict.variables.keys()):
2322 output.write(" <exports symbol='%s' type='variable'/>\n" % (id))
2323 for id in uniq(dict.functions.keys()):
2324 output.write(" <exports symbol='%s' type='function'/>\n" % (id))
2325 output.write(" </file>\n")
2327 def serialize_xrefs_files(self, output):
2328 headers = sorted(self.headers.keys())
2329 for file in headers:
2330 module = self.modulename_file(file)
2331 output.write(" <file name='%s'>\n" % (module))
2332 dict = self.headers[file]
2333 ids = uniq(list(dict.functions.keys()) +
2334 list(dict.variables.keys()) +
2335 list(dict.macros.keys()) +
2336 list(dict.typedefs.keys()) +
2337 list(dict.structs.keys()) +
2338 list(dict.enums.keys()))
2339 for id in ids:
2340 output.write(" <ref name='%s'/>\n" % (id))
2341 output.write(" </file>\n")
2342 pass
2344 def serialize_xrefs_functions(self, output):
2345 funcs = {}
2346 for name in self.idx.functions.keys():
2347 id = self.idx.functions[name]
2348 try:
2349 (ret, params, desc) = id.info
2350 for param in params:
2351 if param[0] == 'void':
2352 continue
2353 if param[0] in funcs:
2354 funcs[param[0]].append(name)
2355 else:
2356 funcs[param[0]] = [name]
2357 except:
2358 pass
2359 typ = sorted(funcs.keys())
2360 for type in typ:
2361 if type in ['', "void", "int", "char *", "const char *"]:
2362 continue
2363 output.write(" <type name='%s'>\n" % (type))
2364 ids = funcs[type]
2365 ids.sort()
2366 pid = '' # not sure why we have dups, but get rid of them!
2367 for id in ids:
2368 if id != pid:
2369 output.write(" <ref name='%s'/>\n" % (id))
2370 pid = id
2371 output.write(" </type>\n")
2373 def serialize_xrefs_constructors(self, output):
2374 funcs = {}
2375 for name in self.idx.functions.keys():
2376 id = self.idx.functions[name]
2377 try:
2378 (ret, params, desc) = id.info
2379 if ret[0] == "void":
2380 continue
2381 if ret[0] in funcs:
2382 funcs[ret[0]].append(name)
2383 else:
2384 funcs[ret[0]] = [name]
2385 except:
2386 pass
2387 typ = sorted(funcs.keys())
2388 for type in typ:
2389 if type in ['', "void", "int", "char *", "const char *"]:
2390 continue
2391 output.write(" <type name='%s'>\n" % (type))
2392 ids = sorted(funcs[type])
2393 for id in ids:
2394 output.write(" <ref name='%s'/>\n" % (id))
2395 output.write(" </type>\n")
2397 def serialize_xrefs_alpha(self, output):
2398 letter = None
2399 ids = sorted(self.idx.identifiers.keys())
2400 for id in ids:
2401 if id[0] != letter:
2402 if letter is not None:
2403 output.write(" </letter>\n")
2404 letter = id[0]
2405 output.write(" <letter name='%s'>\n" % (letter))
2406 output.write(" <ref name='%s'/>\n" % (id))
2407 if letter is not None:
2408 output.write(" </letter>\n")
2410 def serialize_xrefs_references(self, output):
2411 typ = sorted(self.idx.identifiers.keys())
2412 for id in typ:
2413 idf = self.idx.identifiers[id]
2414 module = idf.header
2415 output.write(" <reference name='%s' href='%s'/>\n" % (id,
2416 'html/' + self.basename + '-' +
2417 self.modulename_file(module) + '.html#' +
2418 id))
2420 def serialize_xrefs_index(self, output):
2421 index = self.xref
2422 typ = sorted(index.keys())
2423 letter = None
2424 count = 0
2425 chunk = 0
2426 chunks = []
2427 for id in typ:
2428 if len(index[id]) > 30:
2429 continue
2430 if id[0] != letter:
2431 if letter is None or count > 200:
2432 if letter is not None:
2433 output.write(" </letter>\n")
2434 output.write(" </chunk>\n")
2435 count = 0
2436 chunks.append(["chunk%s" % (chunk - 1), first_letter, letter])
2437 output.write(" <chunk name='chunk%s'>\n" % (chunk))
2438 first_letter = id[0]
2439 chunk = chunk + 1
2440 elif letter is not None:
2441 output.write(" </letter>\n")
2442 letter = id[0]
2443 output.write(" <letter name='%s'>\n" % (letter))
2444 output.write(" <word name='%s'>\n" % (id))
2445 tokens = index[id]
2446 tokens.sort()
2447 tok = None
2448 for token in tokens:
2449 if tok == token:
2450 continue
2451 tok = token
2452 output.write(" <ref name='%s'/>\n" % (token))
2453 count = count + 1
2454 output.write(" </word>\n")
2455 if letter is not None:
2456 output.write(" </letter>\n")
2457 output.write(" </chunk>\n")
2458 if count != 0:
2459 chunks.append(["chunk%s" % (chunk - 1), first_letter, letter])
2460 output.write(" <chunks>\n")
2461 for ch in chunks:
2462 output.write(" <chunk name='%s' start='%s' end='%s'/>\n" % (
2463 ch[0], ch[1], ch[2]))
2464 output.write(" </chunks>\n")
2466 def serialize_xrefs(self, output):
2467 output.write(" <references>\n")
2468 self.serialize_xrefs_references(output)
2469 output.write(" </references>\n")
2470 output.write(" <alpha>\n")
2471 self.serialize_xrefs_alpha(output)
2472 output.write(" </alpha>\n")
2473 output.write(" <constructors>\n")
2474 self.serialize_xrefs_constructors(output)
2475 output.write(" </constructors>\n")
2476 output.write(" <functions>\n")
2477 self.serialize_xrefs_functions(output)
2478 output.write(" </functions>\n")
2479 output.write(" <files>\n")
2480 self.serialize_xrefs_files(output)
2481 output.write(" </files>\n")
2482 output.write(" <index>\n")
2483 self.serialize_xrefs_index(output)
2484 output.write(" </index>\n")
2486 def serialize(self):
2487 filename = "%s/%s-api.xml" % (self.path, self.name)
2488 if not quiet:
2489 print("Saving XML description %s" % (filename))
2490 output = open(filename, "w")
2491 output.write('<?xml version="1.0" encoding="ISO-8859-1"?>\n')
2492 output.write("<api name='%s'>\n" % self.name)
2493 output.write(" <files>\n")
2494 headers = sorted(self.headers.keys())
2495 for file in headers:
2496 self.serialize_exports(output, file)
2497 output.write(" </files>\n")
2498 output.write(" <symbols>\n")
2499 macros = sorted(self.idx.macros.keys())
2500 for macro in macros:
2501 self.serialize_macro(output, macro)
2502 enums = sorted(self.idx.enums.keys())
2503 for enum in enums:
2504 self.serialize_enum(output, enum)
2505 typedefs = sorted(self.idx.typedefs.keys())
2506 for typedef in typedefs:
2507 self.serialize_typedef(output, typedef)
2508 variables = sorted(self.idx.variables.keys())
2509 for variable in variables:
2510 self.serialize_variable(output, variable)
2511 functions = sorted(self.idx.functions.keys())
2512 for function in functions:
2513 self.serialize_function(output, function)
2514 output.write(" </symbols>\n")
2515 output.write("</api>\n")
2516 output.close()
2518 if self.errors > 0:
2519 print("apibuild.py: %d error(s) encountered during generation" % self.errors, file=sys.stderr)
2520 sys.exit(3)
2522 filename = "%s/%s-refs.xml" % (self.path, self.name)
2523 if not quiet:
2524 print("Saving XML Cross References %s" % (filename))
2525 output = open(filename, "w")
2526 output.write('<?xml version="1.0" encoding="ISO-8859-1"?>\n')
2527 output.write("<apirefs name='%s'>\n" % self.name)
2528 self.serialize_xrefs(output)
2529 output.write("</apirefs>\n")
2530 output.close()
2533 class app:
2534 def warning(self, msg):
2535 global warnings
2536 warnings = warnings + 1
2537 print(msg)
2539 def rebuild(self, name):
2540 if name not in ["libvirt", "libvirt-qemu", "libvirt-lxc", "libvirt-admin"]:
2541 self.warning("rebuild() failed, unknown module %s" % name)
2542 return None
2543 builder = None
2544 srcdir = os.path.abspath((os.environ["srcdir"]))
2545 builddir = os.path.abspath((os.environ["builddir"]))
2546 if srcdir == builddir:
2547 builddir = None
2548 if glob.glob(srcdir + "/../src/libvirt.c") != []:
2549 if not quiet:
2550 print("Rebuilding API description for %s" % name)
2551 dirs = [srcdir + "/../src",
2552 srcdir + "/../src/util",
2553 srcdir + "/../include/libvirt"]
2554 if (builddir and
2555 not os.path.exists(srcdir + "/../include/libvirt/libvirt-common.h")):
2556 dirs.append(builddir + "/../include/libvirt")
2557 builder = docBuilder(name, srcdir, dirs, [])
2558 elif glob.glob("src/libvirt.c") != []:
2559 if not quiet:
2560 print("Rebuilding API description for %s" % name)
2561 builder = docBuilder(name, srcdir,
2562 ["src", "src/util", "include/libvirt"],
2564 else:
2565 self.warning("rebuild() failed, unable to guess the module")
2566 return None
2567 builder.scan()
2568 builder.analyze()
2569 builder.serialize()
2570 return builder
2573 # for debugging the parser
2575 def parse(self, filename):
2576 parser = CParser(filename)
2577 idx = parser.parse()
2578 return idx
2581 if __name__ == "__main__":
2582 app = app()
2583 if len(sys.argv) > 1:
2584 debug = 1
2585 app.parse(sys.argv[1])
2586 else:
2587 app.rebuild("libvirt")
2588 app.rebuild("libvirt-qemu")
2589 app.rebuild("libvirt-lxc")
2590 app.rebuild("libvirt-admin")
2591 if warnings > 0:
2592 sys.exit(2)
2593 else:
2594 sys.exit(0)