Remove C++/C99-style comments.
[python.git] / Lib / logging / handlers.py
blob45b7cf67d5f5de0d6384a86d795f3ba1a42694d0
1 # Copyright 2001-2009 by Vinay Sajip. All Rights Reserved.
3 # Permission to use, copy, modify, and distribute this software and its
4 # documentation for any purpose and without fee is hereby granted,
5 # provided that the above copyright notice appear in all copies and that
6 # both that copyright notice and this permission notice appear in
7 # supporting documentation, and that the name of Vinay Sajip
8 # not be used in advertising or publicity pertaining to distribution
9 # of the software without specific, written prior permission.
10 # VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
11 # ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
12 # VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
13 # ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
14 # IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
15 # OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 """
18 Additional handlers for the logging package for Python. The core package is
19 based on PEP 282 and comments thereto in comp.lang.python, and influenced by
20 Apache's log4j system.
22 Copyright (C) 2001-2009 Vinay Sajip. All Rights Reserved.
24 To use, simply 'import logging.handlers' and log away!
25 """
27 import logging, socket, os, cPickle, struct, time, re
28 from stat import ST_DEV, ST_INO
30 try:
31 import codecs
32 except ImportError:
33 codecs = None
34 try:
35 unicode
36 _unicode = True
37 except NameError:
38 _unicode = False
41 # Some constants...
44 DEFAULT_TCP_LOGGING_PORT = 9020
45 DEFAULT_UDP_LOGGING_PORT = 9021
46 DEFAULT_HTTP_LOGGING_PORT = 9022
47 DEFAULT_SOAP_LOGGING_PORT = 9023
48 SYSLOG_UDP_PORT = 514
49 SYSLOG_TCP_PORT = 514
51 _MIDNIGHT = 24 * 60 * 60 # number of seconds in a day
53 class BaseRotatingHandler(logging.FileHandler):
54 """
55 Base class for handlers that rotate log files at a certain point.
56 Not meant to be instantiated directly. Instead, use RotatingFileHandler
57 or TimedRotatingFileHandler.
58 """
59 def __init__(self, filename, mode, encoding=None, delay=0):
60 """
61 Use the specified filename for streamed logging
62 """
63 if codecs is None:
64 encoding = None
65 logging.FileHandler.__init__(self, filename, mode, encoding, delay)
66 self.mode = mode
67 self.encoding = encoding
69 def emit(self, record):
70 """
71 Emit a record.
73 Output the record to the file, catering for rollover as described
74 in doRollover().
75 """
76 try:
77 if self.shouldRollover(record):
78 self.doRollover()
79 logging.FileHandler.emit(self, record)
80 except (KeyboardInterrupt, SystemExit):
81 raise
82 except:
83 self.handleError(record)
85 class RotatingFileHandler(BaseRotatingHandler):
86 """
87 Handler for logging to a set of files, which switches from one file
88 to the next when the current file reaches a certain size.
89 """
90 def __init__(self, filename, mode='a', maxBytes=0, backupCount=0, encoding=None, delay=0):
91 """
92 Open the specified file and use it as the stream for logging.
94 By default, the file grows indefinitely. You can specify particular
95 values of maxBytes and backupCount to allow the file to rollover at
96 a predetermined size.
98 Rollover occurs whenever the current log file is nearly maxBytes in
99 length. If backupCount is >= 1, the system will successively create
100 new files with the same pathname as the base file, but with extensions
101 ".1", ".2" etc. appended to it. For example, with a backupCount of 5
102 and a base file name of "app.log", you would get "app.log",
103 "app.log.1", "app.log.2", ... through to "app.log.5". The file being
104 written to is always "app.log" - when it gets filled up, it is closed
105 and renamed to "app.log.1", and if files "app.log.1", "app.log.2" etc.
106 exist, then they are renamed to "app.log.2", "app.log.3" etc.
107 respectively.
109 If maxBytes is zero, rollover never occurs.
111 if maxBytes > 0:
112 mode = 'a' # doesn't make sense otherwise!
113 BaseRotatingHandler.__init__(self, filename, mode, encoding, delay)
114 self.maxBytes = maxBytes
115 self.backupCount = backupCount
117 def doRollover(self):
119 Do a rollover, as described in __init__().
121 if self.stream:
122 self.stream.close()
123 if self.backupCount > 0:
124 for i in range(self.backupCount - 1, 0, -1):
125 sfn = "%s.%d" % (self.baseFilename, i)
126 dfn = "%s.%d" % (self.baseFilename, i + 1)
127 if os.path.exists(sfn):
128 #print "%s -> %s" % (sfn, dfn)
129 if os.path.exists(dfn):
130 os.remove(dfn)
131 os.rename(sfn, dfn)
132 dfn = self.baseFilename + ".1"
133 if os.path.exists(dfn):
134 os.remove(dfn)
135 os.rename(self.baseFilename, dfn)
136 #print "%s -> %s" % (self.baseFilename, dfn)
137 self.mode = 'w'
138 self.stream = self._open()
140 def shouldRollover(self, record):
142 Determine if rollover should occur.
144 Basically, see if the supplied record would cause the file to exceed
145 the size limit we have.
147 if self.stream is None: # delay was set...
148 self.stream = self._open()
149 if self.maxBytes > 0: # are we rolling over?
150 msg = "%s\n" % self.format(record)
151 self.stream.seek(0, 2) #due to non-posix-compliant Windows feature
152 if self.stream.tell() + len(msg) >= self.maxBytes:
153 return 1
154 return 0
156 class TimedRotatingFileHandler(BaseRotatingHandler):
158 Handler for logging to a file, rotating the log file at certain timed
159 intervals.
161 If backupCount is > 0, when rollover is done, no more than backupCount
162 files are kept - the oldest ones are deleted.
164 def __init__(self, filename, when='h', interval=1, backupCount=0, encoding=None, delay=False, utc=False):
165 BaseRotatingHandler.__init__(self, filename, 'a', encoding, delay)
166 self.when = when.upper()
167 self.backupCount = backupCount
168 self.utc = utc
169 # Calculate the real rollover interval, which is just the number of
170 # seconds between rollovers. Also set the filename suffix used when
171 # a rollover occurs. Current 'when' events supported:
172 # S - Seconds
173 # M - Minutes
174 # H - Hours
175 # D - Days
176 # midnight - roll over at midnight
177 # W{0-6} - roll over on a certain day; 0 - Monday
179 # Case of the 'when' specifier is not important; lower or upper case
180 # will work.
181 if self.when == 'S':
182 self.interval = 1 # one second
183 self.suffix = "%Y-%m-%d_%H-%M-%S"
184 self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}-\d{2}-\d{2}$"
185 elif self.when == 'M':
186 self.interval = 60 # one minute
187 self.suffix = "%Y-%m-%d_%H-%M"
188 self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}-\d{2}$"
189 elif self.when == 'H':
190 self.interval = 60 * 60 # one hour
191 self.suffix = "%Y-%m-%d_%H"
192 self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}$"
193 elif self.when == 'D' or self.when == 'MIDNIGHT':
194 self.interval = 60 * 60 * 24 # one day
195 self.suffix = "%Y-%m-%d"
196 self.extMatch = r"^\d{4}-\d{2}-\d{2}$"
197 elif self.when.startswith('W'):
198 self.interval = 60 * 60 * 24 * 7 # one week
199 if len(self.when) != 2:
200 raise ValueError("You must specify a day for weekly rollover from 0 to 6 (0 is Monday): %s" % self.when)
201 if self.when[1] < '0' or self.when[1] > '6':
202 raise ValueError("Invalid day specified for weekly rollover: %s" % self.when)
203 self.dayOfWeek = int(self.when[1])
204 self.suffix = "%Y-%m-%d"
205 self.extMatch = r"^\d{4}-\d{2}-\d{2}$"
206 else:
207 raise ValueError("Invalid rollover interval specified: %s" % self.when)
209 self.extMatch = re.compile(self.extMatch)
210 self.interval = self.interval * interval # multiply by units requested
211 self.rolloverAt = self.computeRollover(int(time.time()))
213 def computeRollover(self, currentTime):
215 Work out the rollover time based on the specified time.
217 result = currentTime + self.interval
218 # If we are rolling over at midnight or weekly, then the interval is already known.
219 # What we need to figure out is WHEN the next interval is. In other words,
220 # if you are rolling over at midnight, then your base interval is 1 day,
221 # but you want to start that one day clock at midnight, not now. So, we
222 # have to fudge the rolloverAt value in order to trigger the first rollover
223 # at the right time. After that, the regular interval will take care of
224 # the rest. Note that this code doesn't care about leap seconds. :)
225 if self.when == 'MIDNIGHT' or self.when.startswith('W'):
226 # This could be done with less code, but I wanted it to be clear
227 if self.utc:
228 t = time.gmtime(currentTime)
229 else:
230 t = time.localtime(currentTime)
231 currentHour = t[3]
232 currentMinute = t[4]
233 currentSecond = t[5]
234 # r is the number of seconds left between now and midnight
235 r = _MIDNIGHT - ((currentHour * 60 + currentMinute) * 60 +
236 currentSecond)
237 result = currentTime + r
238 # If we are rolling over on a certain day, add in the number of days until
239 # the next rollover, but offset by 1 since we just calculated the time
240 # until the next day starts. There are three cases:
241 # Case 1) The day to rollover is today; in this case, do nothing
242 # Case 2) The day to rollover is further in the interval (i.e., today is
243 # day 2 (Wednesday) and rollover is on day 6 (Sunday). Days to
244 # next rollover is simply 6 - 2 - 1, or 3.
245 # Case 3) The day to rollover is behind us in the interval (i.e., today
246 # is day 5 (Saturday) and rollover is on day 3 (Thursday).
247 # Days to rollover is 6 - 5 + 3, or 4. In this case, it's the
248 # number of days left in the current week (1) plus the number
249 # of days in the next week until the rollover day (3).
250 # The calculations described in 2) and 3) above need to have a day added.
251 # This is because the above time calculation takes us to midnight on this
252 # day, i.e. the start of the next day.
253 if self.when.startswith('W'):
254 day = t[6] # 0 is Monday
255 if day != self.dayOfWeek:
256 if day < self.dayOfWeek:
257 daysToWait = self.dayOfWeek - day
258 else:
259 daysToWait = 6 - day + self.dayOfWeek + 1
260 newRolloverAt = result + (daysToWait * (60 * 60 * 24))
261 if not self.utc:
262 dstNow = t[-1]
263 dstAtRollover = time.localtime(newRolloverAt)[-1]
264 if dstNow != dstAtRollover:
265 if not dstNow: # DST kicks in before next rollover, so we need to deduct an hour
266 newRolloverAt = newRolloverAt - 3600
267 else: # DST bows out before next rollover, so we need to add an hour
268 newRolloverAt = newRolloverAt + 3600
269 result = newRolloverAt
270 return result
272 def shouldRollover(self, record):
274 Determine if rollover should occur.
276 record is not used, as we are just comparing times, but it is needed so
277 the method signatures are the same
279 t = int(time.time())
280 if t >= self.rolloverAt:
281 return 1
282 #print "No need to rollover: %d, %d" % (t, self.rolloverAt)
283 return 0
285 def getFilesToDelete(self):
287 Determine the files to delete when rolling over.
289 More specific than the earlier method, which just used glob.glob().
291 dirName, baseName = os.path.split(self.baseFilename)
292 fileNames = os.listdir(dirName)
293 result = []
294 prefix = baseName + "."
295 plen = len(prefix)
296 for fileName in fileNames:
297 if fileName[:plen] == prefix:
298 suffix = fileName[plen:]
299 if self.extMatch.match(suffix):
300 result.append(os.path.join(dirName, fileName))
301 result.sort()
302 if len(result) < self.backupCount:
303 result = []
304 else:
305 result = result[:len(result) - self.backupCount]
306 return result
308 def doRollover(self):
310 do a rollover; in this case, a date/time stamp is appended to the filename
311 when the rollover happens. However, you want the file to be named for the
312 start of the interval, not the current time. If there is a backup count,
313 then we have to get a list of matching filenames, sort them and remove
314 the one with the oldest suffix.
316 if self.stream:
317 self.stream.close()
318 # get the time that this sequence started at and make it a TimeTuple
319 t = self.rolloverAt - self.interval
320 if self.utc:
321 timeTuple = time.gmtime(t)
322 else:
323 timeTuple = time.localtime(t)
324 dfn = self.baseFilename + "." + time.strftime(self.suffix, timeTuple)
325 if os.path.exists(dfn):
326 os.remove(dfn)
327 os.rename(self.baseFilename, dfn)
328 if self.backupCount > 0:
329 # find the oldest log file and delete it
330 #s = glob.glob(self.baseFilename + ".20*")
331 #if len(s) > self.backupCount:
332 # s.sort()
333 # os.remove(s[0])
334 for s in self.getFilesToDelete():
335 os.remove(s)
336 #print "%s -> %s" % (self.baseFilename, dfn)
337 self.mode = 'w'
338 self.stream = self._open()
339 currentTime = int(time.time())
340 newRolloverAt = self.computeRollover(currentTime)
341 while newRolloverAt <= currentTime:
342 newRolloverAt = newRolloverAt + self.interval
343 #If DST changes and midnight or weekly rollover, adjust for this.
344 if (self.when == 'MIDNIGHT' or self.when.startswith('W')) and not self.utc:
345 dstNow = time.localtime(currentTime)[-1]
346 dstAtRollover = time.localtime(newRolloverAt)[-1]
347 if dstNow != dstAtRollover:
348 if not dstNow: # DST kicks in before next rollover, so we need to deduct an hour
349 newRolloverAt = newRolloverAt - 3600
350 else: # DST bows out before next rollover, so we need to add an hour
351 newRolloverAt = newRolloverAt + 3600
352 self.rolloverAt = newRolloverAt
354 class WatchedFileHandler(logging.FileHandler):
356 A handler for logging to a file, which watches the file
357 to see if it has changed while in use. This can happen because of
358 usage of programs such as newsyslog and logrotate which perform
359 log file rotation. This handler, intended for use under Unix,
360 watches the file to see if it has changed since the last emit.
361 (A file has changed if its device or inode have changed.)
362 If it has changed, the old file stream is closed, and the file
363 opened to get a new stream.
365 This handler is not appropriate for use under Windows, because
366 under Windows open files cannot be moved or renamed - logging
367 opens the files with exclusive locks - and so there is no need
368 for such a handler. Furthermore, ST_INO is not supported under
369 Windows; stat always returns zero for this value.
371 This handler is based on a suggestion and patch by Chad J.
372 Schroeder.
374 def __init__(self, filename, mode='a', encoding=None, delay=0):
375 logging.FileHandler.__init__(self, filename, mode, encoding, delay)
376 if not os.path.exists(self.baseFilename):
377 self.dev, self.ino = -1, -1
378 else:
379 stat = os.stat(self.baseFilename)
380 self.dev, self.ino = stat[ST_DEV], stat[ST_INO]
382 def emit(self, record):
384 Emit a record.
386 First check if the underlying file has changed, and if it
387 has, close the old stream and reopen the file to get the
388 current stream.
390 if not os.path.exists(self.baseFilename):
391 stat = None
392 changed = 1
393 else:
394 stat = os.stat(self.baseFilename)
395 changed = (stat[ST_DEV] != self.dev) or (stat[ST_INO] != self.ino)
396 if changed and self.stream is not None:
397 self.stream.flush()
398 self.stream.close()
399 self.stream = self._open()
400 if stat is None:
401 stat = os.stat(self.baseFilename)
402 self.dev, self.ino = stat[ST_DEV], stat[ST_INO]
403 logging.FileHandler.emit(self, record)
405 class SocketHandler(logging.Handler):
407 A handler class which writes logging records, in pickle format, to
408 a streaming socket. The socket is kept open across logging calls.
409 If the peer resets it, an attempt is made to reconnect on the next call.
410 The pickle which is sent is that of the LogRecord's attribute dictionary
411 (__dict__), so that the receiver does not need to have the logging module
412 installed in order to process the logging event.
414 To unpickle the record at the receiving end into a LogRecord, use the
415 makeLogRecord function.
418 def __init__(self, host, port):
420 Initializes the handler with a specific host address and port.
422 The attribute 'closeOnError' is set to 1 - which means that if
423 a socket error occurs, the socket is silently closed and then
424 reopened on the next logging call.
426 logging.Handler.__init__(self)
427 self.host = host
428 self.port = port
429 self.sock = None
430 self.closeOnError = 0
431 self.retryTime = None
433 # Exponential backoff parameters.
435 self.retryStart = 1.0
436 self.retryMax = 30.0
437 self.retryFactor = 2.0
439 def makeSocket(self, timeout=1):
441 A factory method which allows subclasses to define the precise
442 type of socket they want.
444 s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
445 if hasattr(s, 'settimeout'):
446 s.settimeout(timeout)
447 s.connect((self.host, self.port))
448 return s
450 def createSocket(self):
452 Try to create a socket, using an exponential backoff with
453 a max retry time. Thanks to Robert Olson for the original patch
454 (SF #815911) which has been slightly refactored.
456 now = time.time()
457 # Either retryTime is None, in which case this
458 # is the first time back after a disconnect, or
459 # we've waited long enough.
460 if self.retryTime is None:
461 attempt = 1
462 else:
463 attempt = (now >= self.retryTime)
464 if attempt:
465 try:
466 self.sock = self.makeSocket()
467 self.retryTime = None # next time, no delay before trying
468 except socket.error:
469 #Creation failed, so set the retry time and return.
470 if self.retryTime is None:
471 self.retryPeriod = self.retryStart
472 else:
473 self.retryPeriod = self.retryPeriod * self.retryFactor
474 if self.retryPeriod > self.retryMax:
475 self.retryPeriod = self.retryMax
476 self.retryTime = now + self.retryPeriod
478 def send(self, s):
480 Send a pickled string to the socket.
482 This function allows for partial sends which can happen when the
483 network is busy.
485 if self.sock is None:
486 self.createSocket()
487 #self.sock can be None either because we haven't reached the retry
488 #time yet, or because we have reached the retry time and retried,
489 #but are still unable to connect.
490 if self.sock:
491 try:
492 if hasattr(self.sock, "sendall"):
493 self.sock.sendall(s)
494 else:
495 sentsofar = 0
496 left = len(s)
497 while left > 0:
498 sent = self.sock.send(s[sentsofar:])
499 sentsofar = sentsofar + sent
500 left = left - sent
501 except socket.error:
502 self.sock.close()
503 self.sock = None # so we can call createSocket next time
505 def makePickle(self, record):
507 Pickles the record in binary format with a length prefix, and
508 returns it ready for transmission across the socket.
510 ei = record.exc_info
511 if ei:
512 dummy = self.format(record) # just to get traceback text into record.exc_text
513 record.exc_info = None # to avoid Unpickleable error
514 s = cPickle.dumps(record.__dict__, 1)
515 if ei:
516 record.exc_info = ei # for next handler
517 slen = struct.pack(">L", len(s))
518 return slen + s
520 def handleError(self, record):
522 Handle an error during logging.
524 An error has occurred during logging. Most likely cause -
525 connection lost. Close the socket so that we can retry on the
526 next event.
528 if self.closeOnError and self.sock:
529 self.sock.close()
530 self.sock = None #try to reconnect next time
531 else:
532 logging.Handler.handleError(self, record)
534 def emit(self, record):
536 Emit a record.
538 Pickles the record and writes it to the socket in binary format.
539 If there is an error with the socket, silently drop the packet.
540 If there was a problem with the socket, re-establishes the
541 socket.
543 try:
544 s = self.makePickle(record)
545 self.send(s)
546 except (KeyboardInterrupt, SystemExit):
547 raise
548 except:
549 self.handleError(record)
551 def close(self):
553 Closes the socket.
555 if self.sock:
556 self.sock.close()
557 self.sock = None
558 logging.Handler.close(self)
560 class DatagramHandler(SocketHandler):
562 A handler class which writes logging records, in pickle format, to
563 a datagram socket. The pickle which is sent is that of the LogRecord's
564 attribute dictionary (__dict__), so that the receiver does not need to
565 have the logging module installed in order to process the logging event.
567 To unpickle the record at the receiving end into a LogRecord, use the
568 makeLogRecord function.
571 def __init__(self, host, port):
573 Initializes the handler with a specific host address and port.
575 SocketHandler.__init__(self, host, port)
576 self.closeOnError = 0
578 def makeSocket(self):
580 The factory method of SocketHandler is here overridden to create
581 a UDP socket (SOCK_DGRAM).
583 s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
584 return s
586 def send(self, s):
588 Send a pickled string to a socket.
590 This function no longer allows for partial sends which can happen
591 when the network is busy - UDP does not guarantee delivery and
592 can deliver packets out of sequence.
594 if self.sock is None:
595 self.createSocket()
596 self.sock.sendto(s, (self.host, self.port))
598 class SysLogHandler(logging.Handler):
600 A handler class which sends formatted logging records to a syslog
601 server. Based on Sam Rushing's syslog module:
602 http://www.nightmare.com/squirl/python-ext/misc/syslog.py
603 Contributed by Nicolas Untz (after which minor refactoring changes
604 have been made).
607 # from <linux/sys/syslog.h>:
608 # ======================================================================
609 # priorities/facilities are encoded into a single 32-bit quantity, where
610 # the bottom 3 bits are the priority (0-7) and the top 28 bits are the
611 # facility (0-big number). Both the priorities and the facilities map
612 # roughly one-to-one to strings in the syslogd(8) source code. This
613 # mapping is included in this file.
615 # priorities (these are ordered)
617 LOG_EMERG = 0 # system is unusable
618 LOG_ALERT = 1 # action must be taken immediately
619 LOG_CRIT = 2 # critical conditions
620 LOG_ERR = 3 # error conditions
621 LOG_WARNING = 4 # warning conditions
622 LOG_NOTICE = 5 # normal but significant condition
623 LOG_INFO = 6 # informational
624 LOG_DEBUG = 7 # debug-level messages
626 # facility codes
627 LOG_KERN = 0 # kernel messages
628 LOG_USER = 1 # random user-level messages
629 LOG_MAIL = 2 # mail system
630 LOG_DAEMON = 3 # system daemons
631 LOG_AUTH = 4 # security/authorization messages
632 LOG_SYSLOG = 5 # messages generated internally by syslogd
633 LOG_LPR = 6 # line printer subsystem
634 LOG_NEWS = 7 # network news subsystem
635 LOG_UUCP = 8 # UUCP subsystem
636 LOG_CRON = 9 # clock daemon
637 LOG_AUTHPRIV = 10 # security/authorization messages (private)
639 # other codes through 15 reserved for system use
640 LOG_LOCAL0 = 16 # reserved for local use
641 LOG_LOCAL1 = 17 # reserved for local use
642 LOG_LOCAL2 = 18 # reserved for local use
643 LOG_LOCAL3 = 19 # reserved for local use
644 LOG_LOCAL4 = 20 # reserved for local use
645 LOG_LOCAL5 = 21 # reserved for local use
646 LOG_LOCAL6 = 22 # reserved for local use
647 LOG_LOCAL7 = 23 # reserved for local use
649 priority_names = {
650 "alert": LOG_ALERT,
651 "crit": LOG_CRIT,
652 "critical": LOG_CRIT,
653 "debug": LOG_DEBUG,
654 "emerg": LOG_EMERG,
655 "err": LOG_ERR,
656 "error": LOG_ERR, # DEPRECATED
657 "info": LOG_INFO,
658 "notice": LOG_NOTICE,
659 "panic": LOG_EMERG, # DEPRECATED
660 "warn": LOG_WARNING, # DEPRECATED
661 "warning": LOG_WARNING,
664 facility_names = {
665 "auth": LOG_AUTH,
666 "authpriv": LOG_AUTHPRIV,
667 "cron": LOG_CRON,
668 "daemon": LOG_DAEMON,
669 "kern": LOG_KERN,
670 "lpr": LOG_LPR,
671 "mail": LOG_MAIL,
672 "news": LOG_NEWS,
673 "security": LOG_AUTH, # DEPRECATED
674 "syslog": LOG_SYSLOG,
675 "user": LOG_USER,
676 "uucp": LOG_UUCP,
677 "local0": LOG_LOCAL0,
678 "local1": LOG_LOCAL1,
679 "local2": LOG_LOCAL2,
680 "local3": LOG_LOCAL3,
681 "local4": LOG_LOCAL4,
682 "local5": LOG_LOCAL5,
683 "local6": LOG_LOCAL6,
684 "local7": LOG_LOCAL7,
687 #The map below appears to be trivially lowercasing the key. However,
688 #there's more to it than meets the eye - in some locales, lowercasing
689 #gives unexpected results. See SF #1524081: in the Turkish locale,
690 #"INFO".lower() != "info"
691 priority_map = {
692 "DEBUG" : "debug",
693 "INFO" : "info",
694 "WARNING" : "warning",
695 "ERROR" : "error",
696 "CRITICAL" : "critical"
699 def __init__(self, address=('localhost', SYSLOG_UDP_PORT),
700 facility=LOG_USER, socktype=socket.SOCK_DGRAM):
702 Initialize a handler.
704 If address is specified as a string, a UNIX socket is used. To log to a
705 local syslogd, "SysLogHandler(address="/dev/log")" can be used.
706 If facility is not specified, LOG_USER is used.
708 logging.Handler.__init__(self)
710 self.address = address
711 self.facility = facility
712 self.socktype = socktype
714 if isinstance(address, basestring):
715 self.unixsocket = 1
716 self._connect_unixsocket(address)
717 else:
718 self.unixsocket = 0
719 self.socket = socket.socket(socket.AF_INET, socktype)
720 if socktype == socket.SOCK_STREAM:
721 self.socket.connect(address)
722 self.formatter = None
724 def _connect_unixsocket(self, address):
725 self.socket = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
726 # syslog may require either DGRAM or STREAM sockets
727 try:
728 self.socket.connect(address)
729 except socket.error:
730 self.socket.close()
731 self.socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
732 self.socket.connect(address)
734 # curious: when talking to the unix-domain '/dev/log' socket, a
735 # zero-terminator seems to be required. this string is placed
736 # into a class variable so that it can be overridden if
737 # necessary.
738 log_format_string = '<%d>%s\000'
740 def encodePriority(self, facility, priority):
742 Encode the facility and priority. You can pass in strings or
743 integers - if strings are passed, the facility_names and
744 priority_names mapping dictionaries are used to convert them to
745 integers.
747 if isinstance(facility, basestring):
748 facility = self.facility_names[facility]
749 if isinstance(priority, basestring):
750 priority = self.priority_names[priority]
751 return (facility << 3) | priority
753 def close (self):
755 Closes the socket.
757 if self.unixsocket:
758 self.socket.close()
759 logging.Handler.close(self)
761 def mapPriority(self, levelName):
763 Map a logging level name to a key in the priority_names map.
764 This is useful in two scenarios: when custom levels are being
765 used, and in the case where you can't do a straightforward
766 mapping by lowercasing the logging level name because of locale-
767 specific issues (see SF #1524081).
769 return self.priority_map.get(levelName, "warning")
771 def emit(self, record):
773 Emit a record.
775 The record is formatted, and then sent to the syslog server. If
776 exception information is present, it is NOT sent to the server.
778 msg = self.format(record)
780 We need to convert record level to lowercase, maybe this will
781 change in the future.
783 msg = self.log_format_string % (
784 self.encodePriority(self.facility,
785 self.mapPriority(record.levelname)),
786 msg)
787 # Treat unicode messages as required by RFC 5424
788 if _unicode and type(msg) is unicode:
789 msg = msg.encode('utf-8')
790 if codecs:
791 msg = codecs.BOM_UTF8 + msg
792 try:
793 if self.unixsocket:
794 try:
795 self.socket.send(msg)
796 except socket.error:
797 self._connect_unixsocket(self.address)
798 self.socket.send(msg)
799 elif self.socktype == socket.SOCK_DGRAM:
800 self.socket.sendto(msg, self.address)
801 else:
802 self.socket.sendall(msg)
803 except (KeyboardInterrupt, SystemExit):
804 raise
805 except:
806 self.handleError(record)
808 class SMTPHandler(logging.Handler):
810 A handler class which sends an SMTP email for each logging event.
812 def __init__(self, mailhost, fromaddr, toaddrs, subject,
813 credentials=None, secure=None):
815 Initialize the handler.
817 Initialize the instance with the from and to addresses and subject
818 line of the email. To specify a non-standard SMTP port, use the
819 (host, port) tuple format for the mailhost argument. To specify
820 authentication credentials, supply a (username, password) tuple
821 for the credentials argument. To specify the use of a secure
822 protocol (TLS), pass in a tuple for the secure argument. This will
823 only be used when authentication credentials are supplied. The tuple
824 will be either an empty tuple, or a single-value tuple with the name
825 of a keyfile, or a 2-value tuple with the names of the keyfile and
826 certificate file. (This tuple is passed to the `starttls` method).
828 logging.Handler.__init__(self)
829 if isinstance(mailhost, tuple):
830 self.mailhost, self.mailport = mailhost
831 else:
832 self.mailhost, self.mailport = mailhost, None
833 if isinstance(credentials, tuple):
834 self.username, self.password = credentials
835 else:
836 self.username = None
837 self.fromaddr = fromaddr
838 if isinstance(toaddrs, basestring):
839 toaddrs = [toaddrs]
840 self.toaddrs = toaddrs
841 self.subject = subject
842 self.secure = secure
844 def getSubject(self, record):
846 Determine the subject for the email.
848 If you want to specify a subject line which is record-dependent,
849 override this method.
851 return self.subject
853 weekdayname = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
855 monthname = [None,
856 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
857 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
859 def date_time(self):
861 Return the current date and time formatted for a MIME header.
862 Needed for Python 1.5.2 (no email package available)
864 year, month, day, hh, mm, ss, wd, y, z = time.gmtime(time.time())
865 s = "%s, %02d %3s %4d %02d:%02d:%02d GMT" % (
866 self.weekdayname[wd],
867 day, self.monthname[month], year,
868 hh, mm, ss)
869 return s
871 def emit(self, record):
873 Emit a record.
875 Format the record and send it to the specified addressees.
877 try:
878 import smtplib
879 try:
880 from email.utils import formatdate
881 except ImportError:
882 formatdate = self.date_time
883 port = self.mailport
884 if not port:
885 port = smtplib.SMTP_PORT
886 smtp = smtplib.SMTP(self.mailhost, port)
887 msg = self.format(record)
888 msg = "From: %s\r\nTo: %s\r\nSubject: %s\r\nDate: %s\r\n\r\n%s" % (
889 self.fromaddr,
890 ",".join(self.toaddrs),
891 self.getSubject(record),
892 formatdate(), msg)
893 if self.username:
894 if self.secure is not None:
895 smtp.ehlo()
896 smtp.starttls(*self.secure)
897 smtp.ehlo()
898 smtp.login(self.username, self.password)
899 smtp.sendmail(self.fromaddr, self.toaddrs, msg)
900 smtp.quit()
901 except (KeyboardInterrupt, SystemExit):
902 raise
903 except:
904 self.handleError(record)
906 class NTEventLogHandler(logging.Handler):
908 A handler class which sends events to the NT Event Log. Adds a
909 registry entry for the specified application name. If no dllname is
910 provided, win32service.pyd (which contains some basic message
911 placeholders) is used. Note that use of these placeholders will make
912 your event logs big, as the entire message source is held in the log.
913 If you want slimmer logs, you have to pass in the name of your own DLL
914 which contains the message definitions you want to use in the event log.
916 def __init__(self, appname, dllname=None, logtype="Application"):
917 logging.Handler.__init__(self)
918 try:
919 import win32evtlogutil, win32evtlog
920 self.appname = appname
921 self._welu = win32evtlogutil
922 if not dllname:
923 dllname = os.path.split(self._welu.__file__)
924 dllname = os.path.split(dllname[0])
925 dllname = os.path.join(dllname[0], r'win32service.pyd')
926 self.dllname = dllname
927 self.logtype = logtype
928 self._welu.AddSourceToRegistry(appname, dllname, logtype)
929 self.deftype = win32evtlog.EVENTLOG_ERROR_TYPE
930 self.typemap = {
931 logging.DEBUG : win32evtlog.EVENTLOG_INFORMATION_TYPE,
932 logging.INFO : win32evtlog.EVENTLOG_INFORMATION_TYPE,
933 logging.WARNING : win32evtlog.EVENTLOG_WARNING_TYPE,
934 logging.ERROR : win32evtlog.EVENTLOG_ERROR_TYPE,
935 logging.CRITICAL: win32evtlog.EVENTLOG_ERROR_TYPE,
937 except ImportError:
938 print("The Python Win32 extensions for NT (service, event "\
939 "logging) appear not to be available.")
940 self._welu = None
942 def getMessageID(self, record):
944 Return the message ID for the event record. If you are using your
945 own messages, you could do this by having the msg passed to the
946 logger being an ID rather than a formatting string. Then, in here,
947 you could use a dictionary lookup to get the message ID. This
948 version returns 1, which is the base message ID in win32service.pyd.
950 return 1
952 def getEventCategory(self, record):
954 Return the event category for the record.
956 Override this if you want to specify your own categories. This version
957 returns 0.
959 return 0
961 def getEventType(self, record):
963 Return the event type for the record.
965 Override this if you want to specify your own types. This version does
966 a mapping using the handler's typemap attribute, which is set up in
967 __init__() to a dictionary which contains mappings for DEBUG, INFO,
968 WARNING, ERROR and CRITICAL. If you are using your own levels you will
969 either need to override this method or place a suitable dictionary in
970 the handler's typemap attribute.
972 return self.typemap.get(record.levelno, self.deftype)
974 def emit(self, record):
976 Emit a record.
978 Determine the message ID, event category and event type. Then
979 log the message in the NT event log.
981 if self._welu:
982 try:
983 id = self.getMessageID(record)
984 cat = self.getEventCategory(record)
985 type = self.getEventType(record)
986 msg = self.format(record)
987 self._welu.ReportEvent(self.appname, id, cat, type, [msg])
988 except (KeyboardInterrupt, SystemExit):
989 raise
990 except:
991 self.handleError(record)
993 def close(self):
995 Clean up this handler.
997 You can remove the application name from the registry as a
998 source of event log entries. However, if you do this, you will
999 not be able to see the events as you intended in the Event Log
1000 Viewer - it needs to be able to access the registry to get the
1001 DLL name.
1003 #self._welu.RemoveSourceFromRegistry(self.appname, self.logtype)
1004 logging.Handler.close(self)
1006 class HTTPHandler(logging.Handler):
1008 A class which sends records to a Web server, using either GET or
1009 POST semantics.
1011 def __init__(self, host, url, method="GET"):
1013 Initialize the instance with the host, the request URL, and the method
1014 ("GET" or "POST")
1016 logging.Handler.__init__(self)
1017 method = method.upper()
1018 if method not in ["GET", "POST"]:
1019 raise ValueError("method must be GET or POST")
1020 self.host = host
1021 self.url = url
1022 self.method = method
1024 def mapLogRecord(self, record):
1026 Default implementation of mapping the log record into a dict
1027 that is sent as the CGI data. Overwrite in your class.
1028 Contributed by Franz Glasner.
1030 return record.__dict__
1032 def emit(self, record):
1034 Emit a record.
1036 Send the record to the Web server as an URL-encoded dictionary
1038 try:
1039 import httplib, urllib
1040 host = self.host
1041 h = httplib.HTTP(host)
1042 url = self.url
1043 data = urllib.urlencode(self.mapLogRecord(record))
1044 if self.method == "GET":
1045 if (url.find('?') >= 0):
1046 sep = '&'
1047 else:
1048 sep = '?'
1049 url = url + "%c%s" % (sep, data)
1050 h.putrequest(self.method, url)
1051 # support multiple hosts on one IP address...
1052 # need to strip optional :port from host, if present
1053 i = host.find(":")
1054 if i >= 0:
1055 host = host[:i]
1056 h.putheader("Host", host)
1057 if self.method == "POST":
1058 h.putheader("Content-type",
1059 "application/x-www-form-urlencoded")
1060 h.putheader("Content-length", str(len(data)))
1061 h.endheaders(data if self.method == "POST" else None)
1062 h.getreply() #can't do anything with the result
1063 except (KeyboardInterrupt, SystemExit):
1064 raise
1065 except:
1066 self.handleError(record)
1068 class BufferingHandler(logging.Handler):
1070 A handler class which buffers logging records in memory. Whenever each
1071 record is added to the buffer, a check is made to see if the buffer should
1072 be flushed. If it should, then flush() is expected to do what's needed.
1074 def __init__(self, capacity):
1076 Initialize the handler with the buffer size.
1078 logging.Handler.__init__(self)
1079 self.capacity = capacity
1080 self.buffer = []
1082 def shouldFlush(self, record):
1084 Should the handler flush its buffer?
1086 Returns true if the buffer is up to capacity. This method can be
1087 overridden to implement custom flushing strategies.
1089 return (len(self.buffer) >= self.capacity)
1091 def emit(self, record):
1093 Emit a record.
1095 Append the record. If shouldFlush() tells us to, call flush() to process
1096 the buffer.
1098 self.buffer.append(record)
1099 if self.shouldFlush(record):
1100 self.flush()
1102 def flush(self):
1104 Override to implement custom flushing behaviour.
1106 This version just zaps the buffer to empty.
1108 self.buffer = []
1110 def close(self):
1112 Close the handler.
1114 This version just flushes and chains to the parent class' close().
1116 self.flush()
1117 logging.Handler.close(self)
1119 class MemoryHandler(BufferingHandler):
1121 A handler class which buffers logging records in memory, periodically
1122 flushing them to a target handler. Flushing occurs whenever the buffer
1123 is full, or when an event of a certain severity or greater is seen.
1125 def __init__(self, capacity, flushLevel=logging.ERROR, target=None):
1127 Initialize the handler with the buffer size, the level at which
1128 flushing should occur and an optional target.
1130 Note that without a target being set either here or via setTarget(),
1131 a MemoryHandler is no use to anyone!
1133 BufferingHandler.__init__(self, capacity)
1134 self.flushLevel = flushLevel
1135 self.target = target
1137 def shouldFlush(self, record):
1139 Check for buffer full or a record at the flushLevel or higher.
1141 return (len(self.buffer) >= self.capacity) or \
1142 (record.levelno >= self.flushLevel)
1144 def setTarget(self, target):
1146 Set the target handler for this handler.
1148 self.target = target
1150 def flush(self):
1152 For a MemoryHandler, flushing means just sending the buffered
1153 records to the target, if there is one. Override if you want
1154 different behaviour.
1156 if self.target:
1157 for record in self.buffer:
1158 self.target.handle(record)
1159 self.buffer = []
1161 def close(self):
1163 Flush, set the target to None and lose the buffer.
1165 self.flush()
1166 self.target = None
1167 BufferingHandler.close(self)