Refactoring: Changed all check parameters starting with an 'o' to the new rulespec...
[check_mk.git] / agents / plugins / mk_logwatch
blobf3cf00c5fdd79d55f6b939ec5cbce03654f749af
1 #!/usr/bin/env python
2 # -*- encoding: utf-8; py-indent-offset: 4 -*-
3 # +------------------------------------------------------------------+
4 # | ____ _ _ __ __ _ __ |
5 # | / ___| |__ ___ ___| | __ | \/ | |/ / |
6 # | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / |
7 # | | |___| | | | __/ (__| < | | | | . \ |
8 # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ |
9 # | |
10 # | Copyright Mathias Kettner 2014 mk@mathias-kettner.de |
11 # +------------------------------------------------------------------+
13 # This file is part of Check_MK.
14 # The official homepage is at http://mathias-kettner.de/check_mk.
16 # check_mk is free software; you can redistribute it and/or modify it
17 # under the terms of the GNU General Public License as published by
18 # the Free Software Foundation in version 2. check_mk is distributed
19 # in the hope that it will be useful, but WITHOUT ANY WARRANTY; with-
20 # out even the implied warranty of MERCHANTABILITY or FITNESS FOR A
21 # PARTICULAR PURPOSE. See the GNU General Public License for more de-
22 # tails. You should have received a copy of the GNU General Public
23 # License along with GNU Make; see the file COPYING. If not, write
24 # to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
25 # Boston, MA 02110-1301 USA.
27 import glob
28 import logging
29 import os
30 import re
31 import shutil
32 import sys
33 import time
35 from collections import namedtuple # from Python2.6, Python3.1 onwards
37 # logical: LogfilesConfig([file, ...], (level, compiled, [continuation pattern, ...], [rewrite pattern, ...]))
38 # types: LogfilesConfig[List[str], Tuple[str, sre.SRE_Pattern object, List[str], List[str]]]
39 LogfilesConfig = namedtuple('LogfilesConfig', 'files, patterns')
41 # logical: ClusterConfig(name, ips)
42 # types: ClusterConfig[str, List[str]]
43 ClusterConfig = namedtuple('ClusterConfig', 'name, ips')
45 LOGGER = logging.getLogger(__name__)
48 def parse_arguments(argv=None):
49 """
50 Custom argument parsing.
51 (Neither use optparse which is Python 2.3 to 2.7 only.
52 Nor use argparse which is Python 2.7 onwards only.)
53 """
54 args = {}
55 if argv is None:
56 argv = sys.argv[1:]
57 if "-h" in argv:
58 sys.stderr.write("""
59 This is the Check_MK Agent plugin. If configured it will be called by the
60 agent without arguments.
62 Options:
63 -d Debug mode: Colored output, no saving of status.
64 -h Show help.
65 -v Verbose output for debugging purposes (no debug mode).
67 You should find an example configuration file at
68 '../cfg_examples/logwatch.cfg' relative to this file.
70 """)
71 sys.exit(0)
72 if "-v" in argv:
73 logging.basicConfig(level=logging.INFO, format="%(levelname)s: %(message)s")
74 elif "-vv" in argv:
75 logging.basicConfig(level=logging.DEBUG, format="%(levelname)s: %(lineno)s: %(message)s")
76 else:
77 LOGGER.propagate = False
78 return args
81 # .--MEI-Cleanup---------------------------------------------------------.
82 # | __ __ _____ ___ ____ _ |
83 # | | \/ | ____|_ _| / ___| | ___ __ _ _ __ _ _ _ __ |
84 # | | |\/| | _| | |_____| | | |/ _ \/ _` | '_ \| | | | '_ \ |
85 # | | | | | |___ | |_____| |___| | __/ (_| | | | | |_| | |_) | |
86 # | |_| |_|_____|___| \____|_|\___|\__,_|_| |_|\__,_| .__/ |
87 # | |_| |
88 # +----------------------------------------------------------------------+
89 # In case the program crashes or is killed in a hard way, the frozen binary .exe
90 # may leave temporary directories named "_MEI..." in the temporary path. Clean them
91 # up to prevent eating disk space over time.
93 ########################################################################
94 ############## DUPLICATE CODE WARNING ##################################
95 ### This code is also used in the cmk-update-agent frozen binary #######
96 ### Any changes to this class should also be made in cmk-update-agent ##
97 ### In the bright future we will move this code into a library #########
98 ########################################################################
101 class MEIFolderCleaner(object):
102 def pid_running(self, pid):
103 import ctypes
104 kernel32 = ctypes.windll.kernel32
105 SYNCHRONIZE = 0x100000
107 process = kernel32.OpenProcess(SYNCHRONIZE, 0, pid)
109 if process != 0:
110 kernel32.CloseHandle(process)
111 return True
112 return False
114 def find_and_remove_leftover_folders(self, hint_filenames):
115 if not hasattr(sys, "frozen"):
116 return
118 import win32file # pylint: disable=import-error
119 import tempfile
120 base_path = tempfile.gettempdir()
121 for f in os.listdir(base_path):
122 try:
123 path = os.path.join(base_path, f)
125 if not os.path.isdir(path):
126 continue
128 # Only care about directories related to our program
129 invalid_dir = False
130 for hint_filename in hint_filenames:
131 if not os.path.exists(os.path.join(path, hint_filename)):
132 invalid_dir = True
133 break
134 if invalid_dir:
135 continue
137 pyinstaller_tmp_path = win32file.GetLongPathName(sys._MEIPASS).lower() # pylint: disable=no-member
138 if pyinstaller_tmp_path == path.lower():
139 continue # Skip our own directory
141 # Extract the process id from the directory and check whether or not it is still
142 # running. Don't delete directories of running processes!
143 # The name of the temporary directories is "_MEI<PID><NR>". We try to extract the PID
144 # by stripping of a single digit from the right. In the hope the NR is a single digit
145 # in all relevant cases.
146 pid = int(f[4:-1])
147 if self.pid_running(pid):
148 continue
150 shutil.rmtree(path)
151 except Exception as e:
152 LOGGER.debug("Finding and removing leftover folders failed: %s", e)
155 def debug():
156 return '-d' in sys.argv[1:] or '--debug' in sys.argv[1:]
159 # The configuration file and status file are searched
160 # in the directory named by the environment variable
161 # LOGWATCH_DIR. If that is not set, MK_CONFDIR is used.
162 # If that is not set either, the current directory ist
163 # used.
164 def mk_vardir():
165 return os.getenv("LOGWATCH_DIR") or os.getenv("MK_VARDIR") or os.getenv("MK_STATEDIR") or "."
168 def mk_confdir():
169 """Note: When debugging this plugin on hosts this function will always return "." cause
170 required sudo rights for execution of /usr/lib/check_mk_agent/plugins/mk_logwatch
171 doesn't provide env vars."""
172 return os.getenv("LOGWATCH_DIR") or os.getenv("MK_CONFDIR") or "."
175 def get_status_filename(config):
177 Side effect:
178 - Depend on ENV var.
179 - In case agent plugin is called with debug option set -> depends on global
180 LOGGER and stdout.
182 Determine the name of the state file dependent on ENV variable and config:
183 $REMOTE set, no cluster set or no ip match -> logwatch.state.<formatted-REMOTE>
184 $REMOTE set, cluster set and ip match -> logwatch.state.<cluster-name>
185 $REMOTE not set and a tty -> logwatch.state.local
186 $REMOTE not set and not a tty -> logwatch.state
188 $REMOTE is determined by the check_mk_agent and varies dependent on how the
189 check_mk_agent is accessed:
190 - telnet ($REMOTE_HOST): $REMOTE is in IPv6 notation. IPv4 is extended to IPv6
191 notation e.g. ::ffff:127.0.0.1
192 - ssh ($SSH_CLIENT): $REMOTE is either in IPv4 or IPv6 notation dependent on the
193 IP family of the remote host.
195 <formatted-REMOTE> is REMOTE with colons (:) replaced with underscores (_) for
196 IPv6 address, is to IPv6 notation extended address with colons (:) replaced with
197 underscores (_) for IPv4 address or is plain $REMOTE in case it does not match
198 an IPv4 or IPv6 address.
200 remote = os.getenv("REMOTE", "")
201 ipv4_regex = r"^(?:[0-9]{1,3}\.){3}[0-9]{1,3}$"
202 ipv6_regex = r"^(?:[A-F0-9]{1,4}:){7}[A-F0-9]{1,4}$"
203 ipv4_match = re.match(ipv4_regex, remote)
204 ipv6_match = re.match(ipv6_regex, remote)
205 remote_hostname = remote.replace(":", "_")
206 if ipv4_match:
207 remote_ip = ipv4_match.group()
208 elif ipv6_match:
209 remote_ip = ipv6_match.group()
210 # in case of IPv4 extended to IPv6 get rid of prefix for ip match lookup
211 if remote_ip.startswith("::ffff:"):
212 remote_ip = remote_ip.replace("::ffff:", "")
213 else:
214 remote_ip = None
215 LOGGER.debug("REMOTE neither IPv4 nor IPv6 address.")
216 # In case cluster configured map ip to cluster name if configured.
217 # key "name" is mandatory and unique for cluster dicts
218 status_filename = None
219 cluster_name = None
220 cluster_configs = [nt for nt in config if isinstance(nt, ClusterConfig)]
221 LOGGER.debug("Cluster configurations:")
222 LOGGER.debug(cluster_configs)
223 for nt in cluster_configs:
224 for ip in nt.ips:
225 if remote_ip == ip:
226 # cluster name may not contain whitespaces (must be provided from
227 # the WATO config as type ID or hostname).
228 cluster_name = nt.name
229 LOGGER.info("matching cluster ip %s", ip)
230 LOGGER.info("matching cluster name %s", cluster_name)
231 if cluster_name:
232 status_filename = "%s/logwatch.state.%s" % (mk_vardir(), cluster_name)
233 elif remote_hostname != "":
234 status_filename = "%s/logwatch.state.%s" % (mk_vardir(), remote_hostname)
235 elif remote_hostname == "" and sys.stdout.isatty(): # real terminal
236 status_filename = "%s/logwatch.state.local" % mk_vardir()
237 elif remote_hostname == "" and not sys.stdout.isatty(): # piped or redirected
238 status_filename = "%s/logwatch.state" % mk_vardir()
239 else:
240 raise Exception("Status filename could not be determined.")
241 LOGGER.info("Status filename: %s", status_filename)
242 return status_filename
245 def os_type():
246 try:
247 import platform # added in Python 2.3
248 return platform.system().lower()
249 except ImportError:
250 return "linux"
253 def is_comment(line):
254 return line.lstrip().startswith('#')
257 def is_empty(line):
258 return line.strip() == ""
261 def is_pattern_or_ip(line):
262 return line.startswith(" ")
265 def parse_filenames(line):
266 return line.split()
269 def parse_pattern(level, pattern, line):
270 if level not in ['C', 'W', 'I', 'O']:
271 raise Exception("Invalid pattern line '%s'" % line)
272 try:
273 compiled = re.compile(pattern)
274 except:
275 raise Exception("Invalid regular expression in line '%s'" % line)
276 return (level, compiled)
279 def get_config_files(directory):
280 config_file_paths = []
281 config_file_paths.append(directory + "/logwatch.cfg")
282 # Add config file paths from a logwatch.d folder
283 for config_file in glob.glob(directory + "/logwatch.d/*.cfg"):
284 config_file_paths.append(config_file)
285 LOGGER.info("Configuration file paths:")
286 LOGGER.info(config_file_paths)
287 return config_file_paths
290 def read_config(files):
292 Read logwatch.cfg (patterns, cluster mapping, etc.).
294 Side effect: Reads filesystem files logwatch.cfg and /logwatch.d/*.cfg
296 Returns configuration as list. List elements are namedtuples.
297 Namedtuple either describes logile patterns and is LogfilesConfig(files, patterns).
298 Or tuple describes optional cluster mapping and is ClusterConfig(name, ips)
299 with ips as list of strings.
301 LOGGER.debug("config files:")
302 LOGGER.debug(files)
304 config = []
305 logfiles_configs = []
306 cluster_configs = []
307 config_lines = []
308 for f in files:
309 try:
310 config_lines += [line.rstrip() for line in open(f).readlines() if not is_comment(line)]
311 except IOError:
312 if debug():
313 raise
315 patterns = None
316 cont_list = []
317 rewrite_list = []
318 # only cluster_mapping context requires parse context
319 section_context = "no_section" # valid: "no_section", "logfiles", "cluster"
320 cluster_ips = []
321 # parsing has to consider the following possible lines:
322 # - comment lines (begin with #)
323 # - logfiles line (begin not with #, are not empty and do not contain CLUSTER)
324 # - cluster lines (begin with CLUSTER)
325 # - logfiles patterns (follow logfiles lines, begin with whitespace)
326 # - cluster ips (follow cluster lines, begin with whitespace)
327 # Needs to consider end of lines to append ips to clusters as well.
328 for line in config_lines:
329 if is_comment(line):
330 continue # skip comments
331 if is_empty(line):
332 section_context = "no_section"
333 continue # go to next line
334 if section_context == "no_section":
335 if not line.startswith("CLUSTER") and not is_comment(line):
336 section_context = "logfiles"
337 patterns = []
338 cont_list = [] # Clear list of continuation patterns from last file
339 rewrite_list = [] # Same for rewrite patterns
340 filenames = parse_filenames(line)
341 logfiles_configs.append(LogfilesConfig(filenames, patterns))
342 LOGGER.debug("filenames: %s", filenames)
343 elif line.startswith("CLUSTER"):
344 section_context = "cluster"
345 cluster_ips = []
346 cluster_name = line.replace("CLUSTER ", "").strip()
347 cluster_configs.append(ClusterConfig(cluster_name, cluster_ips))
348 LOGGER.debug("new cluster: %s", cluster_name)
349 else:
350 raise Exception("Parsing error. (section: %s, line: %s)" % (section_context, line))
351 elif section_context == "logfiles":
352 if not is_pattern_or_ip(line):
353 raise Exception("Parsing error. (section: %s, line: %s)" % (section_context, line))
354 LOGGER.debug("pattern line: %s", line)
355 if patterns is None:
356 raise Exception("Missing logfile names")
357 level, pattern = line.split(None, 1)
359 if level == 'A':
360 cont_list.append(parse_cont_pattern(pattern))
361 elif level == 'R':
362 rewrite_list.append(pattern)
363 else:
364 level, compiled = parse_pattern(level, pattern, line)
365 # New pattern for line matching => clear continuation and rewrite patterns
366 cont_list = []
367 rewrite_list = []
368 pattern = (level, compiled, cont_list, rewrite_list)
369 patterns.append(pattern)
370 LOGGER.debug("pattern %s", pattern)
371 elif section_context == "cluster":
372 if not is_pattern_or_ip(line):
373 raise Exception("Parsing error. (section: %s, line: %s)" % (section_context, line))
374 ip = line.strip()
375 cluster_ips.append(ip)
376 LOGGER.debug("cluster ip: %s", ip)
377 else:
378 raise Exception("Parsing error. (section: %s, line: %s)" % (section_context, line))
380 config.extend(logfiles_configs)
381 config.extend(cluster_configs)
383 LOGGER.info("Logfiles configurations:")
384 LOGGER.info(logfiles_configs)
385 LOGGER.info("Optional cluster configurations:")
386 LOGGER.info(cluster_configs)
387 return config
390 def parse_cont_pattern(pattern):
391 try:
392 return int(pattern)
393 except:
394 try:
395 return re.compile(pattern)
396 except:
397 if debug():
398 raise
399 raise Exception("Invalid regular expression in line '%s'" % pattern)
402 def read_status(file_name):
404 Support status files with the following structure:
406 # LOGFILE OFFSET INODE
407 /var/log/messages|7767698|32455445
408 /var/test/x12134.log|12345|32444355
410 Status file lines may not be empty but must contain | separated status meta data.
412 LOGGER.debug("Status file:")
413 LOGGER.debug(file_name)
414 if debug():
415 return {}
417 status = {}
418 for line in open(file_name):
419 inode = -1
420 try:
421 parts = line.split('|')
422 filename = parts[0]
423 offset = parts[1]
424 if len(parts) >= 3:
425 inode = parts[2]
426 except Exception as e:
427 raise Exception("Parsing of status file %s line \"%s\" failed: %s" \
428 % (file_name, line, e))
429 try:
430 status[filename] = int(offset), int(inode)
431 except Exception as e:
432 raise Exception("Parsing of status file %s line \"%s\" failed: %s" \
433 % (file_name, line, e))
435 LOGGER.info("read status:")
436 LOGGER.info(status)
437 return status
440 def save_status(status, file_name):
441 LOGGER.debug("save status:")
442 LOGGER.debug("status: %s", status)
443 LOGGER.debug("filename: %s", file_name)
444 with open(file_name, "w") as f:
445 for filename, (offset, inode) in status.items():
446 f.write("%s|%d|%d\n" % (filename, offset, inode))
449 def next_line(file_handle, continuation_line):
450 if continuation_line is not None:
451 return continuation_line, None
453 try:
454 line = file_handle.next()
455 # Avoid parsing of (yet) incomplete lines (when actual application
456 # is just in the process of writing)
457 # Just check if the line ends with a \n. This handles \n and \r\n
458 if not line.endswith("\n"):
459 begin_of_line_offset = file_handle.tell() - len(line)
460 os.lseek(file_handle.fileno(), begin_of_line_offset, 0)
461 return None, None
462 return line, None
463 except:
464 return None, None
467 def is_inode_cabable(path):
468 if "linux" in os_type():
469 return True
470 elif "windows" in os_type():
471 volume_name = "%s:\\\\" % path.split(":", 1)[0]
472 import win32api # pylint: disable=import-error
473 volume_info = win32api.GetVolumeInformation(volume_name)
474 volume_type = volume_info[-1]
475 return "ntfs" in volume_type.lower()
476 return False
479 def process_logfile(logfile, patterns, opt, status):
481 Returns tuple of (logfile lines, warning and/or error indicator, warning and/or error lines, logfile lines (list) in case the file has never been seen before and
482 None in case the logfile cannot be opened.
484 loglines = []
486 if debug():
487 tty_red = '\033[1;31m'
488 tty_green = '\033[1;32m'
489 tty_yellow = '\033[1;33m'
490 tty_blue = '\033[1;34m'
491 tty_normal = '\033[0m'
492 else:
493 tty_red = ''
494 tty_green = ''
495 tty_yellow = ''
496 tty_blue = ''
497 tty_normal = ''
499 # Look at which file offset we have finished scanning
500 # the logfile last time. If we have never seen this file
501 # before, we set the offset to -1
502 offset, prev_inode = status.get(logfile, (-1, -1))
503 try:
504 file_desc = os.open(logfile, os.O_RDONLY)
505 if not is_inode_cabable(logfile):
506 inode = 1 # Create a dummy inode
507 else:
508 inode = os.fstat(file_desc)[1] # 1 = st_ino
509 except Exception:
510 if debug():
511 raise
512 return None
513 loglines.append("[[[%s]]]\n" % logfile)
515 # Seek to the current end in order to determine file size
516 current_end = os.lseek(file_desc, 0, 2) # os.SEEK_END not available in Python 2.4
517 status[logfile] = current_end, inode
519 # If we have never seen this file before, we just set the
520 # current pointer to the file end. We do not want to make
521 # a fuss about ancient log messages...
522 if offset == -1:
523 if not debug():
524 return loglines
525 else:
526 offset = 0
528 # If the inode of the logfile has changed it has appearently
529 # been started from new (logfile rotation). At least we must
530 # assume that. In some rare cases (restore of a backup, etc)
531 # we are wrong and resend old log messages
532 if prev_inode >= 0 and inode != prev_inode:
533 offset = 0
535 # Our previously stored offset is the current end ->
536 # no new lines in this file
537 if offset == current_end:
538 return (loglines, False, []) # loglines contain logfile name only
540 # If our offset is beyond the current end, the logfile has been
541 # truncated or wrapped while keeping the same inode. We assume
542 # that it contains all new data in that case and restart from
543 # offset 0.
544 if offset > current_end:
545 offset = 0
547 # now seek to offset where interesting data begins
548 os.lseek(file_desc, offset, 0) # os.SEEK_SET not available in Python 2.4
549 if "windows" in os_type():
550 import io # Available with python 2.6
551 # Some windows files are encoded in utf_16
552 # Peak the first two bytes to determine the encoding...
553 peak_handle = os.fdopen(file_desc, "rb")
554 first_two_bytes = peak_handle.read(2)
555 use_encoding = None
556 if first_two_bytes == "\xFF\xFE":
557 use_encoding = "utf_16"
558 elif first_two_bytes == "\xFE\xFF":
559 use_encoding = "utf_16_be"
561 os.lseek(file_desc, offset, 0) # os.SEEK_SET not available in Python 2.4
562 file_handle = io.open(file_desc, encoding=use_encoding)
563 else:
564 file_handle = os.fdopen(file_desc)
566 worst = -1
567 warnings = False
568 warnings_and_errors = []
569 lines_parsed = 0
570 start_time = time.time()
571 pushed_back_line = None
573 while True:
574 line, pushed_back_line = next_line(file_handle, pushed_back_line)
575 if line is None:
576 break # End of file
578 # Handle option maxlinesize
579 if opt.maxlinesize is not None and len(line) > opt.maxlinesize:
580 line = line[:opt.maxlinesize] + "[TRUNCATED]\n"
582 lines_parsed += 1
583 # Check if maximum number of new log messages is exceeded
584 if opt.maxlines is not None and lines_parsed > opt.maxlines:
585 warnings_and_errors.append("%s Maximum number (%d) of new log messages exceeded.\n" % (
586 opt.overflow,
587 opt.maxlines,
589 worst = max(worst, opt.overflow_level)
590 os.lseek(file_desc, 0, os.SEEK_END) # skip all other messages
591 break
593 # Check if maximum processing time (per file) is exceeded. Check only
594 # every 100'th line in order to save system calls
595 if opt.maxtime is not None and lines_parsed % 100 == 10 \
596 and time.time() - start_time > opt.maxtime:
597 warnings_and_errors.append(
598 "%s Maximum parsing time (%.1f sec) of this log file exceeded.\n" % (
599 opt.overflow,
600 opt.maxtime,
602 worst = max(worst, opt.overflow_level)
603 os.lseek(file_desc, 0, os.SEEK_END) # skip all other messages
604 break
606 level = "."
607 for lev, pattern, cont_patterns, replacements in patterns:
608 matches = pattern.search(line[:-1])
609 if matches:
610 level = lev
611 levelint = {'C': 2, 'W': 1, 'O': 0, 'I': -1, '.': -1}[lev]
612 worst = max(levelint, worst)
614 # Check for continuation lines
615 for cont_pattern in cont_patterns:
616 if isinstance(cont_pattern, int): # add that many lines
617 for _unused_x in range(cont_pattern):
618 cont_line, pushed_back_line = next_line(file_handle, pushed_back_line)
619 if cont_line is None: # end of file
620 break
621 line = line[:-1] + "\1" + cont_line
623 else: # pattern is regex
624 while True:
625 cont_line, pushed_back_line = next_line(file_handle, pushed_back_line)
626 if cont_line is None: # end of file
627 break
628 elif cont_pattern.search(cont_line[:-1]):
629 line = line[:-1] + "\1" + cont_line
630 else:
631 pushed_back_line = cont_line # sorry for stealing this line
632 break
634 # Replacement
635 for replace in replacements:
636 line = replace.replace('\\0', line.rstrip()) + "\n"
637 for nr, group in enumerate(matches.groups()):
638 line = line.replace('\\%d' % (nr + 1), group)
640 break # matching rule found and executed
642 color = {'C': tty_red, 'W': tty_yellow, 'O': tty_green, 'I': tty_blue, '.': ''}[level]
643 if debug():
644 line = line.replace("\1", "\nCONT:")
645 if level == "I":
646 level = "."
647 if opt.nocontext and level == '.':
648 continue
649 warnings_and_errors.append("%s%s %s%s\n" % (color, level, line[:-1], tty_normal))
651 new_offset = os.lseek(file_desc, 0, 1) # os.SEEK_CUR not available in Python 2.4
652 status[logfile] = new_offset, inode
654 # output all lines if at least one warning, error or ok has been found
655 if worst > -1:
656 warnings = True
658 # Handle option maxfilesize, regardless of warning or errors that have happened
659 if opt.maxfilesize is not None and (offset / opt.maxfilesize) < (new_offset / opt.maxfilesize):
660 warnings_and_errors.append(
661 "%sW Maximum allowed logfile size (%d bytes) exceeded for the %dth time.%s\n" %
662 (tty_yellow, opt.maxfilesize, new_offset / opt.maxfilesize, tty_normal))
663 return (loglines, warnings, warnings_and_errors)
666 class Options(object):
667 """Options w.r.t. logfile patterns (not w.r.t. cluster mapping)."""
668 MAP_OVERFLOW = {'C': 2, 'W': 1, 'I': 0, 'O': 0}
669 MAP_BOOL = {'true': True, 'false': False}
671 def __init__(self):
672 self.maxfilesize = None
673 self.maxlines = None
674 self.maxtime = None
675 self.maxlinesize = None
676 self.regex = None
677 self._overflow = None
678 self.nocontext = None
680 @property
681 def overflow(self):
682 return 'C' if self._overflow is None else self._overflow
684 @property
685 def overflow_level(self):
686 return self.MAP_OVERFLOW[self.overflow]
688 def update(self, other):
689 for attr in (
690 'maxfilesize',
691 'maxlines',
692 'maxtime',
693 'maxlinesize',
694 'regex',
695 '_overflow',
696 'nocontext',
698 new = getattr(other, attr)
699 if new is not None:
700 setattr(self, attr, new)
702 def set_opt(self, opt_str):
703 try:
704 key, value = opt_str.split('=', 1)
705 if key in ('maxlines', 'maxlinesize', 'maxfilesize'):
706 setattr(self, key, int(value))
707 elif key in ('maxtime',):
708 setattr(self, key, float(value))
709 elif key == 'overflow':
710 if value not in self.MAP_OVERFLOW.keys():
711 raise ValueError("Invalid overflow: %r (choose from %r)" % (
712 value,
713 self.MAP_OVERFLOW.keys(),
715 self._overflow = value
716 elif key in ('regex', 'iregex'):
717 self.regex = re.compile(value, re.I if key.startswith('i') else 0)
718 elif key in ('nocontext',):
719 try:
720 setattr(self, key, self.MAP_BOOL[value.lower()])
721 except KeyError:
722 raise ValueError("Invalid %s: %r (choose from %r)" % (
723 key,
724 value,
725 self.MAP_BOOL.keys(),
727 else:
728 raise ValueError("Invalid option: %r" % opt_str)
729 except ValueError as e:
730 if debug():
731 raise
732 sys.stdout.write("INVALID CONFIGURATION: %s\n" % e)
733 sys.exit(1)
736 def parse_sections(config):
738 Returns dict with logfile name as key and either tuple of (patterns, options)
739 or None (in case the file cannot be found) as value.
741 logfile_patterns = {}
743 logfiles_configs = [c for c in config if isinstance(c, LogfilesConfig)]
744 for filenames, patterns in logfiles_configs:
746 # First read all the options like 'maxlines=100' or 'maxtime=10'
747 opt = Options()
748 for item in filenames:
749 if '=' in item:
750 opt.set_opt(item)
752 # Then handle the file patterns
753 for glob_pattern in (f for f in filenames if '=' not in f):
755 logfiles = glob.glob(glob_pattern) # TODO: discard dirs via filter
756 if opt.regex is not None:
757 logfiles = [f for f in logfiles if opt.regex.search(f)]
758 if not logfiles:
759 logfile_patterns[glob_pattern] = None
760 for logfile in logfiles:
761 present_patterns, present_options = logfile_patterns.get(logfile, ([], Options()))
762 present_patterns.extend(patterns)
763 present_options.update(opt)
764 logfile_patterns[logfile] = (present_patterns, present_options)
766 return logfile_patterns.items()
769 def main():
771 parse_arguments()
773 sys.stdout.write("<<<logwatch>>>\n")
775 try:
776 # This removes leftover folders which may be generated by crashing frozen binaries
777 folder_cleaner = MEIFolderCleaner()
778 folder_cleaner.find_and_remove_leftover_folders(hint_filenames=["mk_logwatch.exe.manifest"])
779 except Exception as e:
780 sys.stdout.write("ERROR WHILE DOING FOLDER: %s\n" % e)
781 sys.exit(1)
783 try:
784 files = get_config_files(mk_confdir())
785 config = read_config(files)
786 except Exception as e:
787 if debug():
788 raise
789 sys.stdout.write("CANNOT READ CONFIG FILE: %s\n" % e)
790 sys.exit(1)
792 status_filename = get_status_filename(config)
793 # Copy the last known state from the logwatch.state when there is no status_filename yet.
794 if not os.path.exists(status_filename) and os.path.exists("%s/logwatch.state" % mk_vardir()):
795 shutil.copy("%s/logwatch.state" % mk_vardir(), status_filename)
797 # Simply ignore errors in the status file. In case of a corrupted status file we simply begin
798 # with an empty status. That keeps the monitoring up and running - even if we might lose a
799 # message in the extreme case of a corrupted status file.
800 try:
801 status = read_status(status_filename)
802 except Exception as e:
803 status = {}
805 loglines = []
806 at_least_warnings = False
807 warnings_and_errors = []
808 for logfile, meta_data in parse_sections(config):
809 if isinstance(meta_data, tuple):
810 (patterns, options) = meta_data
811 # When debugging use option -d to prevent from misleading exceptions
812 # due to side effects of process_logfiles().
813 process_result = process_logfile(logfile, patterns, options, status)
814 if isinstance(process_result, tuple):
815 loglines, at_least_warnings, warnings_and_errors = process_result
816 elif isinstance(process_result, list):
817 loglines = process_result
818 elif isinstance(process_result, None):
819 sys.stdout.write('[[[%s:cannotopen]]]' % logfile)
820 else:
821 LOGGER.debug("Invalid logfile processing result %s", process_result)
822 elif isinstance(meta_data, None):
823 sys.stdout.write('[[[%s:missing]]]\n' % logfile)
824 else:
825 LOGGER.debug("Invalid parse metadata %s", meta_data)
827 LOGGER.debug("Loglines:")
828 LOGGER.debug(loglines)
829 for l in loglines:
830 sys.stdout.write(l)
831 if at_least_warnings:
832 LOGGER.debug("Warnings and errors:")
833 LOGGER.debug(warnings_and_errors)
834 for we in warnings_and_errors:
835 sys.stdout.write(we)
837 if not debug():
838 save_status(status, status_filename)
841 if __name__ == "__main__":
842 main()