KVM test: Add helpers to control the TAP/bridge
[autotest-zwu.git] / client / common_lib / logging_manager.py
blob96f718a844d5fd988c9a63d1e41a6b28e6dc60ab
1 import logging, os, signal, sys, warnings
3 # primary public APIs
5 def configure_logging(logging_config, **kwargs):
6 """
7 Configure the logging module using the specific configuration object, which
8 should be an instance of logging_config.LoggingConfig (usually of a
9 subclass). Any keyword args will be passed to the object's
10 configure_logging() method.
12 Every entry point should call this method at application startup.
13 """
14 LoggingManager.logging_config_object = logging_config
15 logging_config.configure_logging(**kwargs)
18 def get_logging_manager(manage_stdout_and_stderr=False, redirect_fds=False):
19 """
20 Create a LoggingManager that's managing sys.stdout and sys.stderr.
22 Every entry point that wants to capture stdout/stderr and/or use
23 LoggingManager to manage a stack of destinations should call this method
24 at application startup.
25 """
26 if redirect_fds:
27 manager = FdRedirectionLoggingManager()
28 else:
29 manager = LoggingManager()
30 if manage_stdout_and_stderr:
31 manager.manage_stdout()
32 manager.manage_stderr()
33 return manager
36 # implementation follows
38 logger = logging.getLogger()
41 def _current_handlers():
42 return set(logger.handlers)
45 _caller_code_to_skip_in_logging_stack = set()
48 def do_not_report_as_logging_caller(func):
49 """Decorator to annotate functions we will tell logging not to log."""
50 # These are not the droids you are looking for.
51 # You may go about your business.
52 _caller_code_to_skip_in_logging_stack.add(func.func_code)
53 return func
56 # Copied from Python 2.4 logging/__init__.py Logger.findCaller and enhanced.
57 # The logging code remains the same and compatible with this monkey patching
58 # through at least Python version 2.6.2.
59 def _logging_manager_aware_logger__find_caller(unused):
60 """
61 Find the stack frame of the caller so that we can note the source
62 file name, line number and function name.
63 """
64 f = sys._getframe(2).f_back
65 rv = "(unknown file)", 0, "(unknown function)"
66 while hasattr(f, "f_code"):
67 co = f.f_code
68 filename = os.path.normcase(co.co_filename)
69 if filename == logging._srcfile:
70 f = f.f_back
71 continue
72 ### START additional code.
73 if co in _caller_code_to_skip_in_logging_stack:
74 f = f.f_back
75 continue
76 ### END additional code.
77 rv = (filename, f.f_lineno, co.co_name)
78 break
79 return rv
82 if sys.version_info[:2] > (2, 7):
83 warnings.warn('This module has not been reviewed for Python %s' %
84 sys.version)
87 # Monkey patch our way around logging's design...
88 _original_logger__find_caller = logging.Logger.findCaller
89 logging.Logger.findCaller = _logging_manager_aware_logger__find_caller
92 class LoggingFile(object):
93 """
94 File-like object that will receive messages pass them to the logging
95 infrastructure in an appropriate way.
96 """
97 def __init__(self, prefix='', level=logging.DEBUG):
98 """
99 @param prefix - The prefix for each line logged by this object.
101 self._prefix = prefix
102 self._level = level
103 self._buffer = []
106 @do_not_report_as_logging_caller
107 def write(self, data):
108 """"
109 Writes data only if it constitutes a whole line. If it's not the case,
110 store it in a buffer and wait until we have a complete line.
111 @param data - Raw data (a string) that will be processed.
113 # splitlines() discards a trailing blank line, so use split() instead
114 data_lines = data.split('\n')
115 if len(data_lines) > 1:
116 self._buffer.append(data_lines[0])
117 self._flush_buffer()
118 for line in data_lines[1:-1]:
119 self._log_line(line)
120 if data_lines[-1]:
121 self._buffer.append(data_lines[-1])
124 @do_not_report_as_logging_caller
125 def _log_line(self, line):
127 Passes lines of output to the logging module.
129 logging.log(self._level, self._prefix + line)
132 @do_not_report_as_logging_caller
133 def _flush_buffer(self):
134 if self._buffer:
135 self._log_line(''.join(self._buffer))
136 self._buffer = []
139 @do_not_report_as_logging_caller
140 def flush(self):
141 self._flush_buffer()
144 class _StreamManager(object):
146 Redirects all output for some output stream (normally stdout or stderr) to
147 the logging module by replacing the file objects with a new LoggingFile
148 that calls logging.log().
150 def __init__(self, stream, level, stream_setter):
152 @param stream: stream object to manage
153 @param level: level at which data written to the stream will be logged
154 @param stream_setter: function accepting a stream object that will
155 replace the given stream in its original location.
157 self._stream = stream
158 self._level = level
159 self._stream_setter = stream_setter
160 self._logging_stream = None
163 def _replace_with_logger(self):
164 self._logging_stream = LoggingFile(level=self._level)
165 self._stream_setter(self._logging_stream)
168 def _restore_stream(self):
169 self._stream_setter(self._stream)
172 def flush(self):
173 self._logging_stream.flush()
176 def start_logging(self):
177 """Start directing the stream to the logging module."""
178 self._replace_with_logger()
181 def stop_logging(self):
182 """Restore the stream to its original settings."""
183 self._restore_stream()
186 def on_push_context(self, context):
188 Called when the logging manager is about to push a new context onto the
189 stack and has changed logging settings. The StreamHandler can modify
190 the context to be saved before returning.
192 pass
195 def on_restore_context(self, context):
197 Called when the logging manager is restoring a previous context.
199 pass
203 class LoggingManager(object):
205 Manages a stack of logging configurations, allowing clients to conveniently
206 add and remove logging destinations. Also keeps a list of StreamManagers
207 to easily direct streams into the logging module.
210 STREAM_MANAGER_CLASS = _StreamManager
212 logging_config_object = None
214 def __init__(self):
216 This class should not ordinarily be constructed directly (other than in
217 tests). Use the module-global factory method get_logging_manager()
218 instead.
220 if self.logging_config_object is None:
221 raise RuntimeError('You must call configure_logging() before this')
223 # _context_stack holds a stack of context dicts. Each context dict
224 # contains:
225 # * old_handlers: list of registered logging Handlers
226 # contexts may also be extended by _StreamHandlers
227 self._context_stack = []
228 self._streams = []
229 self._started = False
232 def manage_stream(self, stream, level, stream_setter):
234 Tells this manager to manage the given stream. All data written to the
235 stream will be directed to the logging module instead. Must be called
236 before start_logging().
238 @param stream: stream to manage
239 @param level: level to log data written to this stream
240 @param stream_setter: function to set the stream to a new object
242 if self._started:
243 raise RuntimeError('You must call this before start_logging()')
244 self._streams.append(self.STREAM_MANAGER_CLASS(stream, level,
245 stream_setter))
248 def _sys_stream_setter(self, stream_name):
249 assert stream_name in ('stdout', 'stderr'), stream_name
250 def set_stream(file_object):
251 setattr(sys, stream_name, file_object)
252 return set_stream
255 def manage_stdout(self):
256 self.manage_stream(sys.stdout, logging.INFO,
257 self._sys_stream_setter('stdout'))
260 def manage_stderr(self):
261 self.manage_stream(sys.stderr, self.logging_config_object.stderr_level,
262 self._sys_stream_setter('stderr'))
265 def start_logging(self):
267 Begin capturing output to the logging module.
269 for stream_manager in self._streams:
270 stream_manager.start_logging()
271 self._started = True
274 def stop_logging(self):
276 Restore output to its original state.
278 while self._context_stack:
279 self._pop_context()
281 for stream_manager in self._streams:
282 stream_manager.stop_logging()
284 self._started = False
287 def _clear_all_handlers(self):
288 for handler in _current_handlers():
289 logger.removeHandler(handler)
292 def _get_context(self):
293 return {'old_handlers': _current_handlers()}
296 def _push_context(self, context):
297 for stream_manager in self._streams:
298 stream_manager.on_push_context(context)
299 self._context_stack.append(context)
302 def _flush_all_streams(self):
303 for stream_manager in self._streams:
304 stream_manager.flush()
307 def _add_log_handlers(self, add_handlers_fn):
309 Modify the logging module's registered handlers and push a new context
310 onto the stack.
311 @param add_handlers_fn: function to modify the registered logging
312 handlers. Accepts a context dictionary which may be modified.
314 self._flush_all_streams()
315 context = self._get_context()
317 add_handlers_fn(context)
319 self._push_context(context)
322 class _TaggingFormatter(logging.Formatter):
324 Delegates to a given formatter, but prefixes each line of output with a
325 tag.
327 def __init__(self, base_formatter, tag):
328 self.base_formatter = base_formatter
329 prefix = tag + ' : '
330 self._fmt = base_formatter._fmt.replace('%(message)s',
331 prefix + '%(message)s')
332 self.datefmt = base_formatter.datefmt
335 def _add_tagging_formatter(self, tag):
336 for handler in _current_handlers():
337 tagging_formatter = self._TaggingFormatter(handler.formatter, tag)
338 handler.setFormatter(tagging_formatter)
341 def _do_redirect(self, stream=None, filename=None, level=None,
342 clear_other_handlers=False):
344 @param clear_other_handlers - if true, clear out all other logging
345 handlers.
347 assert bool(stream) != bool(filename) # xor
348 if not self._started:
349 raise RuntimeError('You must call start_logging() before this')
351 def add_handler(context):
352 if clear_other_handlers:
353 self._clear_all_handlers()
355 if stream:
356 handler = self.logging_config_object.add_stream_handler(stream)
357 else:
358 handler = self.logging_config_object.add_file_handler(filename)
360 if level:
361 handler.setLevel(level)
363 self._add_log_handlers(add_handler)
366 def redirect(self, filename):
367 """Redirect output to the specified file"""
368 self._do_redirect(filename=filename, clear_other_handlers=True)
371 def redirect_to_stream(self, stream):
372 """Redirect output to the given stream"""
373 self._do_redirect(stream=stream, clear_other_handlers=True)
376 def tee_redirect(self, filename, level=None):
377 """Tee output to the specified file"""
378 self._do_redirect(filename=filename, level=level)
381 def tee_redirect_to_stream(self, stream):
382 """Tee output to the given stream"""
383 self._do_redirect(stream=stream)
386 def tee_redirect_debug_dir(self, debug_dir, log_name=None, tag=None):
388 Tee output to a full new set of debug logs in the given directory.
390 def add_handlers(context):
391 if tag:
392 self._add_tagging_formatter(tag)
393 context['tag_added'] = True
394 self.logging_config_object.add_debug_file_handlers(
395 debug_dir, log_name=log_name)
396 self._add_log_handlers(add_handlers)
399 def _restore_context(self, context):
400 for stream_handler in self._streams:
401 stream_handler.on_restore_context(context)
403 # restore logging handlers
404 old_handlers = context['old_handlers']
405 for handler in _current_handlers() - old_handlers:
406 handler.close()
407 self._clear_all_handlers()
408 for handler in old_handlers:
409 logger.addHandler(handler)
411 if 'tag_added' in context:
412 for handler in _current_handlers():
413 tagging_formatter = handler.formatter
414 handler.setFormatter(tagging_formatter.base_formatter)
417 def _pop_context(self):
418 self._flush_all_streams()
419 context = self._context_stack.pop()
420 self._restore_context(context)
423 def undo_redirect(self):
425 Undo the last redirection (that hasn't yet been undone).
427 If any subprocesses have been launched since the redirection was
428 performed, they must have ended by the time this is called. Otherwise,
429 this will hang waiting for the logging subprocess to end.
431 if not self._context_stack:
432 raise RuntimeError('No redirects to undo')
433 self._pop_context()
436 def restore(self):
438 Same as undo_redirect(). For backwards compatibility with
439 fd_stack.
441 self.undo_redirect()
444 class _FdRedirectionStreamManager(_StreamManager):
446 Like StreamManager, but also captures output from subprocesses by modifying
447 the underlying file descriptors.
449 For the underlying file descriptors, we spawn a subprocess that writes all
450 input to the logging module, and we point the FD to that subprocess. As a
451 result, every time we redirect output we need to spawn a new subprocess to
452 pick up the new logging settings (without disturbing any existing processes
453 using the old logging subprocess).
455 If, one day, we could get all code using utils.run() and friends to launch
456 subprocesses, we'd no longer need to handle raw FD output, and we could
457 get rid of all this business with subprocesses. Another option would be
458 to capture all stray output to a single, separate destination.
460 def __init__(self, stream, level, stream_setter):
461 if not hasattr(stream, 'fileno'):
462 # with fake, in-process file objects, subprocess output won't be
463 # captured. this should never happen in normal use, since the
464 # factory methods will only pass sys.stdout and sys.stderr.
465 raise ValueError("FdRedirectionLoggingManager won't work with "
466 "streams that aren't backed by file "
467 "descriptors")
469 super(_FdRedirectionStreamManager, self).__init__(stream, level,
470 stream_setter)
471 self._fd = stream.fileno()
472 self._fd_copy_stream = None
475 def _point_stream_handlers_to_copy(self):
477 point logging StreamHandlers that point to this stream to a safe
478 copy of the underlying FD. otherwise, StreamHandler output will go
479 to the logging subprocess, effectively getting doubly logged.
481 fd_copy = os.dup(self._fd)
482 self._fd_copy_stream = os.fdopen(fd_copy, 'w')
483 self._redirect_logging_stream_handlers(self._stream,
484 self._fd_copy_stream)
487 def _restore_stream_handlers(self):
488 """ point logging StreamHandlers back to the original FD """
489 self._redirect_logging_stream_handlers(self._fd_copy_stream,
490 self._stream)
491 self._fd_copy_stream.close()
494 def _redirect_logging_stream_handlers(self, old_stream, new_stream):
496 Redirect all configured logging StreamHandlers pointing to
497 old_stream to point to new_stream instead.
499 for handler in _current_handlers():
500 points_to_stream = (isinstance(handler, logging.StreamHandler) and
501 hasattr(handler.stream, 'fileno') and
502 handler.stream.fileno() == old_stream.fileno())
503 if points_to_stream:
504 logger.removeHandler(handler)
505 handler.close() # doesn't close the stream, just the handler
507 new_handler = logging.StreamHandler(new_stream)
508 new_handler.setLevel(handler.level)
509 new_handler.setFormatter(handler.formatter)
510 for log_filter in handler.filters:
511 new_handler.addFilter(log_filter)
512 logger.addHandler(new_handler)
515 def start_logging(self):
516 super(_FdRedirectionStreamManager, self).start_logging()
517 self._point_stream_handlers_to_copy()
520 def stop_logging(self):
521 super(_FdRedirectionStreamManager, self).stop_logging()
522 self._restore_stream_handlers()
525 def _spawn_logging_subprocess(self):
527 Spawn a subprocess to log all input to the logging module with the
528 current settings, and direct output to it.
530 read_end, write_end = os.pipe()
531 pid = os.fork()
532 if pid: # parent
533 os.close(read_end)
534 os.dup2(write_end, self._fd) # point FD to the subprocess
535 os.close(write_end)
536 return pid
537 else: # child
538 try:
539 os.close(write_end)
540 # ensure this subprocess doesn't hold any pipes to others
541 os.close(1)
542 os.close(2)
543 self._run_logging_subprocess(read_end) # never returns
544 except:
545 # don't let exceptions in the child escape
546 try:
547 logging.exception('Logging subprocess died:')
548 finally:
549 os._exit(1)
552 def _run_logging_subprocess(self, read_fd):
554 Always run from a subprocess. Read from read_fd and write to the
555 logging module until EOF.
557 signal.signal(signal.SIGTERM, signal.SIG_DFL) # clear handler
558 input_file = os.fdopen(read_fd, 'r')
559 for line in iter(input_file.readline, ''):
560 logging.log(self._level, line.rstrip('\n'))
561 logging.debug('Logging subprocess finished')
562 os._exit(0)
565 def _context_id(self):
566 return '%s_context' % id(self)
569 def on_push_context(self, context):
570 # adds a context dict for this stream, $id_context, with the following:
571 # * old_fd: FD holding a copy of the managed FD before launching a new
572 # subprocess.
573 # * child_pid: PID of the logging subprocess launched
574 fd_copy = os.dup(self._fd)
575 child_pid = self._spawn_logging_subprocess()
576 my_context = {'old_fd': fd_copy, 'child_pid': child_pid}
577 context[self._context_id()] = my_context
580 def on_restore_context(self, context):
581 my_context = context[self._context_id()]
583 # shut down subprocess
584 child_pid = my_context['child_pid']
585 try:
586 os.close(self._fd)
587 os.waitpid(child_pid, 0)
588 except OSError:
589 logging.exception('Failed to cleanly shutdown logging subprocess:')
591 # restore previous FD
592 old_fd = my_context['old_fd']
593 os.dup2(old_fd, self._fd)
594 os.close(old_fd)
597 class FdRedirectionLoggingManager(LoggingManager):
599 A simple extension of LoggingManager to use FdRedirectionStreamManagers,
600 so that managed streams have their underlying FDs redirected.
603 STREAM_MANAGER_CLASS = _FdRedirectionStreamManager
605 def start_logging(self):
606 super(FdRedirectionLoggingManager, self).start_logging()
607 # spawn the initial logging subprocess
608 self._push_context(self._get_context())
611 def undo_redirect(self):
612 # len == 1 would mean only start_logging() had been called (but no
613 # redirects had occurred)
614 if len(self._context_stack) < 2:
615 raise RuntimeError('No redirects to undo')
616 super(FdRedirectionLoggingManager, self).undo_redirect()