2 # -*- encoding: utf-8; py-indent-offset: 4 -*-
3 # +------------------------------------------------------------------+
4 # | ____ _ _ __ __ _ __ |
5 # | / ___| |__ ___ ___| | __ | \/ | |/ / |
6 # | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / |
7 # | | |___| | | | __/ (__| < | | | | . \ |
8 # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ |
10 # | Copyright Mathias Kettner 2014 mk@mathias-kettner.de |
11 # +------------------------------------------------------------------+
13 # This file is part of Check_MK.
14 # The official homepage is at http://mathias-kettner.de/check_mk.
16 # check_mk is free software; you can redistribute it and/or modify it
17 # under the terms of the GNU General Public License as published by
18 # the Free Software Foundation in version 2. check_mk is distributed
19 # in the hope that it will be useful, but WITHOUT ANY WARRANTY; with-
20 # out even the implied warranty of MERCHANTABILITY or FITNESS FOR A
21 # PARTICULAR PURPOSE. See the GNU General Public License for more de-
22 # tails. You should have received a copy of the GNU General Public
23 # License along with GNU Make; see the file COPYING. If not, write
24 # to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
25 # Boston, MA 02110-1301 USA.
35 from collections
import namedtuple
# from Python2.6, Python3.1 onwards
37 # logical: LogfilesConfig([file, ...], (level, compiled, [continuation pattern, ...], [rewrite pattern, ...]))
38 # types: LogfilesConfig[List[str], Tuple[str, sre.SRE_Pattern object, List[str], List[str]]]
39 LogfilesConfig
= namedtuple('LogfilesConfig', 'files, patterns')
41 # logical: ClusterConfig(name, ips)
42 # types: ClusterConfig[str, List[str]]
43 ClusterConfig
= namedtuple('ClusterConfig', 'name, ips')
45 LOGGER
= logging
.getLogger(__name__
)
48 def parse_arguments(argv
=None):
50 Custom argument parsing.
51 (Neither use optparse which is Python 2.3 to 2.7 only.
52 Nor use argparse which is Python 2.7 onwards only.)
59 This is the Check_MK Agent plugin. If configured it will be called by the
60 agent without arguments.
63 -d Debug mode: Colored output, no saving of status.
65 -v Verbose output for debugging purposes (no debug mode).
67 You should find an example configuration file at
68 '../cfg_examples/logwatch.cfg' relative to this file.
73 logging
.basicConfig(level
=logging
.INFO
, format
="%(levelname)s: %(message)s")
75 logging
.basicConfig(level
=logging
.DEBUG
, format
="%(levelname)s: %(lineno)s: %(message)s")
77 LOGGER
.propagate
= False
81 # .--MEI-Cleanup---------------------------------------------------------.
82 # | __ __ _____ ___ ____ _ |
83 # | | \/ | ____|_ _| / ___| | ___ __ _ _ __ _ _ _ __ |
84 # | | |\/| | _| | |_____| | | |/ _ \/ _` | '_ \| | | | '_ \ |
85 # | | | | | |___ | |_____| |___| | __/ (_| | | | | |_| | |_) | |
86 # | |_| |_|_____|___| \____|_|\___|\__,_|_| |_|\__,_| .__/ |
88 # +----------------------------------------------------------------------+
89 # In case the program crashes or is killed in a hard way, the frozen binary .exe
90 # may leave temporary directories named "_MEI..." in the temporary path. Clean them
91 # up to prevent eating disk space over time.
93 ########################################################################
94 ############## DUPLICATE CODE WARNING ##################################
95 ### This code is also used in the cmk-update-agent frozen binary #######
96 ### Any changes to this class should also be made in cmk-update-agent ##
97 ### In the bright future we will move this code into a library #########
98 ########################################################################
101 class MEIFolderCleaner(object):
102 def pid_running(self
, pid
):
104 kernel32
= ctypes
.windll
.kernel32
105 SYNCHRONIZE
= 0x100000
107 process
= kernel32
.OpenProcess(SYNCHRONIZE
, 0, pid
)
110 kernel32
.CloseHandle(process
)
114 def find_and_remove_leftover_folders(self
, hint_filenames
):
115 if not hasattr(sys
, "frozen"):
118 import win32file
# pylint: disable=import-error
120 base_path
= tempfile
.gettempdir()
121 for f
in os
.listdir(base_path
):
123 path
= os
.path
.join(base_path
, f
)
125 if not os
.path
.isdir(path
):
128 # Only care about directories related to our program
130 for hint_filename
in hint_filenames
:
131 if not os
.path
.exists(os
.path
.join(path
, hint_filename
)):
137 pyinstaller_tmp_path
= win32file
.GetLongPathName(sys
._MEIPASS
).lower() # pylint: disable=no-member
138 if pyinstaller_tmp_path
== path
.lower():
139 continue # Skip our own directory
141 # Extract the process id from the directory and check whether or not it is still
142 # running. Don't delete directories of running processes!
143 # The name of the temporary directories is "_MEI<PID><NR>". We try to extract the PID
144 # by stripping of a single digit from the right. In the hope the NR is a single digit
145 # in all relevant cases.
147 if self
.pid_running(pid
):
151 except Exception as e
:
152 LOGGER
.debug("Finding and removing leftover folders failed: %s", e
)
156 return '-d' in sys
.argv
[1:] or '--debug' in sys
.argv
[1:]
159 # The configuration file and status file are searched
160 # in the directory named by the environment variable
161 # LOGWATCH_DIR. If that is not set, MK_CONFDIR is used.
162 # If that is not set either, the current directory ist
165 return os
.getenv("LOGWATCH_DIR") or os
.getenv("MK_VARDIR") or os
.getenv("MK_STATEDIR") or "."
169 """Note: When debugging this plugin on hosts this function will always return "." cause
170 required sudo rights for execution of /usr/lib/check_mk_agent/plugins/mk_logwatch
171 doesn't provide env vars."""
172 return os
.getenv("LOGWATCH_DIR") or os
.getenv("MK_CONFDIR") or "."
175 def get_status_filename(config
):
179 - In case agent plugin is called with debug option set -> depends on global
182 Determine the name of the state file dependent on ENV variable and config:
183 $REMOTE set, no cluster set or no ip match -> logwatch.state.<formatted-REMOTE>
184 $REMOTE set, cluster set and ip match -> logwatch.state.<cluster-name>
185 $REMOTE not set and a tty -> logwatch.state.local
186 $REMOTE not set and not a tty -> logwatch.state
188 $REMOTE is determined by the check_mk_agent and varies dependent on how the
189 check_mk_agent is accessed:
190 - telnet ($REMOTE_HOST): $REMOTE is in IPv6 notation. IPv4 is extended to IPv6
191 notation e.g. ::ffff:127.0.0.1
192 - ssh ($SSH_CLIENT): $REMOTE is either in IPv4 or IPv6 notation dependent on the
193 IP family of the remote host.
195 <formatted-REMOTE> is REMOTE with colons (:) replaced with underscores (_) for
196 IPv6 address, is to IPv6 notation extended address with colons (:) replaced with
197 underscores (_) for IPv4 address or is plain $REMOTE in case it does not match
198 an IPv4 or IPv6 address.
200 remote
= os
.getenv("REMOTE", "")
201 ipv4_regex
= r
"^(?:[0-9]{1,3}\.){3}[0-9]{1,3}$"
202 ipv6_regex
= r
"^(?:[A-F0-9]{1,4}:){7}[A-F0-9]{1,4}$"
203 ipv4_match
= re
.match(ipv4_regex
, remote
)
204 ipv6_match
= re
.match(ipv6_regex
, remote
)
205 remote_hostname
= remote
.replace(":", "_")
207 remote_ip
= ipv4_match
.group()
209 remote_ip
= ipv6_match
.group()
210 # in case of IPv4 extended to IPv6 get rid of prefix for ip match lookup
211 if remote_ip
.startswith("::ffff:"):
212 remote_ip
= remote_ip
.replace("::ffff:", "")
215 LOGGER
.debug("REMOTE neither IPv4 nor IPv6 address.")
216 # In case cluster configured map ip to cluster name if configured.
217 # key "name" is mandatory and unique for cluster dicts
218 status_filename
= None
220 cluster_configs
= [nt
for nt
in config
if isinstance(nt
, ClusterConfig
)]
221 LOGGER
.debug("Cluster configurations:")
222 LOGGER
.debug(cluster_configs
)
223 for nt
in cluster_configs
:
226 # cluster name may not contain whitespaces (must be provided from
227 # the WATO config as type ID or hostname).
228 cluster_name
= nt
.name
229 LOGGER
.info("matching cluster ip %s", ip
)
230 LOGGER
.info("matching cluster name %s", cluster_name
)
232 status_filename
= "%s/logwatch.state.%s" % (mk_vardir(), cluster_name
)
233 elif remote_hostname
!= "":
234 status_filename
= "%s/logwatch.state.%s" % (mk_vardir(), remote_hostname
)
235 elif remote_hostname
== "" and sys
.stdout
.isatty(): # real terminal
236 status_filename
= "%s/logwatch.state.local" % mk_vardir()
237 elif remote_hostname
== "" and not sys
.stdout
.isatty(): # piped or redirected
238 status_filename
= "%s/logwatch.state" % mk_vardir()
240 raise Exception("Status filename could not be determined.")
241 LOGGER
.info("Status filename: %s", status_filename
)
242 return status_filename
247 import platform
# added in Python 2.3
248 return platform
.system().lower()
253 def is_comment(line
):
254 return line
.lstrip().startswith('#')
258 return line
.strip() == ""
261 def is_pattern_or_ip(line
):
262 return line
.startswith(" ")
265 def parse_filenames(line
):
269 def parse_pattern(level
, pattern
, line
):
270 if level
not in ['C', 'W', 'I', 'O']:
271 raise Exception("Invalid pattern line '%s'" % line
)
273 compiled
= re
.compile(pattern
)
275 raise Exception("Invalid regular expression in line '%s'" % line
)
276 return (level
, compiled
)
279 def get_config_files(directory
):
280 config_file_paths
= []
281 config_file_paths
.append(directory
+ "/logwatch.cfg")
282 # Add config file paths from a logwatch.d folder
283 for config_file
in glob
.glob(directory
+ "/logwatch.d/*.cfg"):
284 config_file_paths
.append(config_file
)
285 LOGGER
.info("Configuration file paths:")
286 LOGGER
.info(config_file_paths
)
287 return config_file_paths
290 def read_config(files
):
292 Read logwatch.cfg (patterns, cluster mapping, etc.).
294 Side effect: Reads filesystem files logwatch.cfg and /logwatch.d/*.cfg
296 Returns configuration as list. List elements are namedtuples.
297 Namedtuple either describes logile patterns and is LogfilesConfig(files, patterns).
298 Or tuple describes optional cluster mapping and is ClusterConfig(name, ips)
299 with ips as list of strings.
301 LOGGER
.debug("config files:")
305 logfiles_configs
= []
310 config_lines
+= [line
.rstrip() for line
in open(f
).readlines() if not is_comment(line
)]
318 # only cluster_mapping context requires parse context
319 section_context
= "no_section" # valid: "no_section", "logfiles", "cluster"
321 # parsing has to consider the following possible lines:
322 # - comment lines (begin with #)
323 # - logfiles line (begin not with #, are not empty and do not contain CLUSTER)
324 # - cluster lines (begin with CLUSTER)
325 # - logfiles patterns (follow logfiles lines, begin with whitespace)
326 # - cluster ips (follow cluster lines, begin with whitespace)
327 # Needs to consider end of lines to append ips to clusters as well.
328 for line
in config_lines
:
330 continue # skip comments
332 section_context
= "no_section"
333 continue # go to next line
334 if section_context
== "no_section":
335 if not line
.startswith("CLUSTER") and not is_comment(line
):
336 section_context
= "logfiles"
338 cont_list
= [] # Clear list of continuation patterns from last file
339 rewrite_list
= [] # Same for rewrite patterns
340 filenames
= parse_filenames(line
)
341 logfiles_configs
.append(LogfilesConfig(filenames
, patterns
))
342 LOGGER
.debug("filenames: %s", filenames
)
343 elif line
.startswith("CLUSTER"):
344 section_context
= "cluster"
346 cluster_name
= line
.replace("CLUSTER ", "").strip()
347 cluster_configs
.append(ClusterConfig(cluster_name
, cluster_ips
))
348 LOGGER
.debug("new cluster: %s", cluster_name
)
350 raise Exception("Parsing error. (section: %s, line: %s)" % (section_context
, line
))
351 elif section_context
== "logfiles":
352 if not is_pattern_or_ip(line
):
353 raise Exception("Parsing error. (section: %s, line: %s)" % (section_context
, line
))
354 LOGGER
.debug("pattern line: %s", line
)
356 raise Exception("Missing logfile names")
357 level
, pattern
= line
.split(None, 1)
360 cont_list
.append(parse_cont_pattern(pattern
))
362 rewrite_list
.append(pattern
)
364 level
, compiled
= parse_pattern(level
, pattern
, line
)
365 # New pattern for line matching => clear continuation and rewrite patterns
368 pattern
= (level
, compiled
, cont_list
, rewrite_list
)
369 patterns
.append(pattern
)
370 LOGGER
.debug("pattern %s", pattern
)
371 elif section_context
== "cluster":
372 if not is_pattern_or_ip(line
):
373 raise Exception("Parsing error. (section: %s, line: %s)" % (section_context
, line
))
375 cluster_ips
.append(ip
)
376 LOGGER
.debug("cluster ip: %s", ip
)
378 raise Exception("Parsing error. (section: %s, line: %s)" % (section_context
, line
))
380 config
.extend(logfiles_configs
)
381 config
.extend(cluster_configs
)
383 LOGGER
.info("Logfiles configurations:")
384 LOGGER
.info(logfiles_configs
)
385 LOGGER
.info("Optional cluster configurations:")
386 LOGGER
.info(cluster_configs
)
390 def parse_cont_pattern(pattern
):
395 return re
.compile(pattern
)
399 raise Exception("Invalid regular expression in line '%s'" % pattern
)
402 def read_status(file_name
):
404 Support status files with the following structure:
406 # LOGFILE OFFSET INODE
407 /var/log/messages|7767698|32455445
408 /var/test/x12134.log|12345|32444355
410 Status file lines may not be empty but must contain | separated status meta data.
412 LOGGER
.debug("Status file:")
413 LOGGER
.debug(file_name
)
418 for line
in open(file_name
):
421 parts
= line
.split('|')
426 except Exception as e
:
427 raise Exception("Parsing of status file %s line \"%s\" failed: %s" \
428 % (file_name
, line
, e
))
430 status
[filename
] = int(offset
), int(inode
)
431 except Exception as e
:
432 raise Exception("Parsing of status file %s line \"%s\" failed: %s" \
433 % (file_name
, line
, e
))
435 LOGGER
.info("read status:")
440 def save_status(status
, file_name
):
441 LOGGER
.debug("save status:")
442 LOGGER
.debug("status: %s", status
)
443 LOGGER
.debug("filename: %s", file_name
)
444 with
open(file_name
, "w") as f
:
445 for filename
, (offset
, inode
) in status
.items():
446 f
.write("%s|%d|%d\n" % (filename
, offset
, inode
))
449 def next_line(file_handle
, continuation_line
):
450 if continuation_line
is not None:
451 return continuation_line
, None
454 line
= file_handle
.next()
455 # Avoid parsing of (yet) incomplete lines (when actual application
456 # is just in the process of writing)
457 # Just check if the line ends with a \n. This handles \n and \r\n
458 if not line
.endswith("\n"):
459 begin_of_line_offset
= file_handle
.tell() - len(line
)
460 os
.lseek(file_handle
.fileno(), begin_of_line_offset
, 0)
467 def is_inode_cabable(path
):
468 if "linux" in os_type():
470 elif "windows" in os_type():
471 volume_name
= "%s:\\\\" % path
.split(":", 1)[0]
472 import win32api
# pylint: disable=import-error
473 volume_info
= win32api
.GetVolumeInformation(volume_name
)
474 volume_type
= volume_info
[-1]
475 return "ntfs" in volume_type
.lower()
479 def process_logfile(logfile
, patterns
, opt
, status
):
481 Returns tuple of (logfile lines, warning and/or error indicator, warning and/or error lines, logfile lines (list) in case the file has never been seen before and
482 None in case the logfile cannot be opened.
487 tty_red
= '\033[1;31m'
488 tty_green
= '\033[1;32m'
489 tty_yellow
= '\033[1;33m'
490 tty_blue
= '\033[1;34m'
491 tty_normal
= '\033[0m'
499 # Look at which file offset we have finished scanning
500 # the logfile last time. If we have never seen this file
501 # before, we set the offset to -1
502 offset
, prev_inode
= status
.get(logfile
, (-1, -1))
504 file_desc
= os
.open(logfile
, os
.O_RDONLY
)
505 if not is_inode_cabable(logfile
):
506 inode
= 1 # Create a dummy inode
508 inode
= os
.fstat(file_desc
)[1] # 1 = st_ino
513 loglines
.append("[[[%s]]]\n" % logfile
)
515 # Seek to the current end in order to determine file size
516 current_end
= os
.lseek(file_desc
, 0, 2) # os.SEEK_END not available in Python 2.4
517 status
[logfile
] = current_end
, inode
519 # If we have never seen this file before, we just set the
520 # current pointer to the file end. We do not want to make
521 # a fuss about ancient log messages...
528 # If the inode of the logfile has changed it has appearently
529 # been started from new (logfile rotation). At least we must
530 # assume that. In some rare cases (restore of a backup, etc)
531 # we are wrong and resend old log messages
532 if prev_inode
>= 0 and inode
!= prev_inode
:
535 # Our previously stored offset is the current end ->
536 # no new lines in this file
537 if offset
== current_end
:
538 return (loglines
, False, []) # loglines contain logfile name only
540 # If our offset is beyond the current end, the logfile has been
541 # truncated or wrapped while keeping the same inode. We assume
542 # that it contains all new data in that case and restart from
544 if offset
> current_end
:
547 # now seek to offset where interesting data begins
548 os
.lseek(file_desc
, offset
, 0) # os.SEEK_SET not available in Python 2.4
549 if "windows" in os_type():
550 import io
# Available with python 2.6
551 # Some windows files are encoded in utf_16
552 # Peak the first two bytes to determine the encoding...
553 peak_handle
= os
.fdopen(file_desc
, "rb")
554 first_two_bytes
= peak_handle
.read(2)
556 if first_two_bytes
== "\xFF\xFE":
557 use_encoding
= "utf_16"
558 elif first_two_bytes
== "\xFE\xFF":
559 use_encoding
= "utf_16_be"
561 os
.lseek(file_desc
, offset
, 0) # os.SEEK_SET not available in Python 2.4
562 file_handle
= io
.open(file_desc
, encoding
=use_encoding
)
564 file_handle
= os
.fdopen(file_desc
)
568 warnings_and_errors
= []
570 start_time
= time
.time()
571 pushed_back_line
= None
574 line
, pushed_back_line
= next_line(file_handle
, pushed_back_line
)
578 # Handle option maxlinesize
579 if opt
.maxlinesize
is not None and len(line
) > opt
.maxlinesize
:
580 line
= line
[:opt
.maxlinesize
] + "[TRUNCATED]\n"
583 # Check if maximum number of new log messages is exceeded
584 if opt
.maxlines
is not None and lines_parsed
> opt
.maxlines
:
585 warnings_and_errors
.append("%s Maximum number (%d) of new log messages exceeded.\n" % (
589 worst
= max(worst
, opt
.overflow_level
)
590 os
.lseek(file_desc
, 0, os
.SEEK_END
) # skip all other messages
593 # Check if maximum processing time (per file) is exceeded. Check only
594 # every 100'th line in order to save system calls
595 if opt
.maxtime
is not None and lines_parsed
% 100 == 10 \
596 and time
.time() - start_time
> opt
.maxtime
:
597 warnings_and_errors
.append(
598 "%s Maximum parsing time (%.1f sec) of this log file exceeded.\n" % (
602 worst
= max(worst
, opt
.overflow_level
)
603 os
.lseek(file_desc
, 0, os
.SEEK_END
) # skip all other messages
607 for lev
, pattern
, cont_patterns
, replacements
in patterns
:
608 matches
= pattern
.search(line
[:-1])
611 levelint
= {'C': 2, 'W': 1, 'O': 0, 'I': -1, '.': -1}[lev
]
612 worst
= max(levelint
, worst
)
614 # Check for continuation lines
615 for cont_pattern
in cont_patterns
:
616 if isinstance(cont_pattern
, int): # add that many lines
617 for _unused_x
in range(cont_pattern
):
618 cont_line
, pushed_back_line
= next_line(file_handle
, pushed_back_line
)
619 if cont_line
is None: # end of file
621 line
= line
[:-1] + "\1" + cont_line
623 else: # pattern is regex
625 cont_line
, pushed_back_line
= next_line(file_handle
, pushed_back_line
)
626 if cont_line
is None: # end of file
628 elif cont_pattern
.search(cont_line
[:-1]):
629 line
= line
[:-1] + "\1" + cont_line
631 pushed_back_line
= cont_line
# sorry for stealing this line
635 for replace
in replacements
:
636 line
= replace
.replace('\\0', line
.rstrip()) + "\n"
637 for nr
, group
in enumerate(matches
.groups()):
638 line
= line
.replace('\\%d' % (nr
+ 1), group
)
640 break # matching rule found and executed
642 color
= {'C': tty_red
, 'W': tty_yellow
, 'O': tty_green
, 'I': tty_blue
, '.': ''}[level
]
644 line
= line
.replace("\1", "\nCONT:")
647 if opt
.nocontext
and level
== '.':
649 warnings_and_errors
.append("%s%s %s%s\n" % (color
, level
, line
[:-1], tty_normal
))
651 new_offset
= os
.lseek(file_desc
, 0, 1) # os.SEEK_CUR not available in Python 2.4
652 status
[logfile
] = new_offset
, inode
654 # output all lines if at least one warning, error or ok has been found
658 # Handle option maxfilesize, regardless of warning or errors that have happened
659 if opt
.maxfilesize
is not None and (offset
/ opt
.maxfilesize
) < (new_offset
/ opt
.maxfilesize
):
660 warnings_and_errors
.append(
661 "%sW Maximum allowed logfile size (%d bytes) exceeded for the %dth time.%s\n" %
662 (tty_yellow
, opt
.maxfilesize
, new_offset
/ opt
.maxfilesize
, tty_normal
))
663 return (loglines
, warnings
, warnings_and_errors
)
666 class Options(object):
667 """Options w.r.t. logfile patterns (not w.r.t. cluster mapping)."""
668 MAP_OVERFLOW
= {'C': 2, 'W': 1, 'I': 0, 'O': 0}
669 MAP_BOOL
= {'true': True, 'false': False}
672 self
.maxfilesize
= None
675 self
.maxlinesize
= None
677 self
._overflow
= None
678 self
.nocontext
= None
682 return 'C' if self
._overflow
is None else self
._overflow
685 def overflow_level(self
):
686 return self
.MAP_OVERFLOW
[self
.overflow
]
688 def update(self
, other
):
698 new
= getattr(other
, attr
)
700 setattr(self
, attr
, new
)
702 def set_opt(self
, opt_str
):
704 key
, value
= opt_str
.split('=', 1)
705 if key
in ('maxlines', 'maxlinesize', 'maxfilesize'):
706 setattr(self
, key
, int(value
))
707 elif key
in ('maxtime',):
708 setattr(self
, key
, float(value
))
709 elif key
== 'overflow':
710 if value
not in self
.MAP_OVERFLOW
.keys():
711 raise ValueError("Invalid overflow: %r (choose from %r)" % (
713 self
.MAP_OVERFLOW
.keys(),
715 self
._overflow
= value
716 elif key
in ('regex', 'iregex'):
717 self
.regex
= re
.compile(value
, re
.I
if key
.startswith('i') else 0)
718 elif key
in ('nocontext',):
720 setattr(self
, key
, self
.MAP_BOOL
[value
.lower()])
722 raise ValueError("Invalid %s: %r (choose from %r)" % (
725 self
.MAP_BOOL
.keys(),
728 raise ValueError("Invalid option: %r" % opt_str
)
729 except ValueError as e
:
732 sys
.stdout
.write("INVALID CONFIGURATION: %s\n" % e
)
736 def parse_sections(config
):
738 Returns dict with logfile name as key and either tuple of (patterns, options)
739 or None (in case the file cannot be found) as value.
741 logfile_patterns
= {}
743 logfiles_configs
= [c
for c
in config
if isinstance(c
, LogfilesConfig
)]
744 for filenames
, patterns
in logfiles_configs
:
746 # First read all the options like 'maxlines=100' or 'maxtime=10'
748 for item
in filenames
:
752 # Then handle the file patterns
753 for glob_pattern
in (f
for f
in filenames
if '=' not in f
):
755 logfiles
= glob
.glob(glob_pattern
) # TODO: discard dirs via filter
756 if opt
.regex
is not None:
757 logfiles
= [f
for f
in logfiles
if opt
.regex
.search(f
)]
759 logfile_patterns
[glob_pattern
] = None
760 for logfile
in logfiles
:
761 present_patterns
, present_options
= logfile_patterns
.get(logfile
, ([], Options()))
762 present_patterns
.extend(patterns
)
763 present_options
.update(opt
)
764 logfile_patterns
[logfile
] = (present_patterns
, present_options
)
766 return logfile_patterns
.items()
773 sys
.stdout
.write("<<<logwatch>>>\n")
776 # This removes leftover folders which may be generated by crashing frozen binaries
777 folder_cleaner
= MEIFolderCleaner()
778 folder_cleaner
.find_and_remove_leftover_folders(hint_filenames
=["mk_logwatch.exe.manifest"])
779 except Exception as e
:
780 sys
.stdout
.write("ERROR WHILE DOING FOLDER: %s\n" % e
)
784 files
= get_config_files(mk_confdir())
785 config
= read_config(files
)
786 except Exception as e
:
789 sys
.stdout
.write("CANNOT READ CONFIG FILE: %s\n" % e
)
792 status_filename
= get_status_filename(config
)
793 # Copy the last known state from the logwatch.state when there is no status_filename yet.
794 if not os
.path
.exists(status_filename
) and os
.path
.exists("%s/logwatch.state" % mk_vardir()):
795 shutil
.copy("%s/logwatch.state" % mk_vardir(), status_filename
)
797 # Simply ignore errors in the status file. In case of a corrupted status file we simply begin
798 # with an empty status. That keeps the monitoring up and running - even if we might lose a
799 # message in the extreme case of a corrupted status file.
801 status
= read_status(status_filename
)
802 except Exception as e
:
806 at_least_warnings
= False
807 warnings_and_errors
= []
808 for logfile
, meta_data
in parse_sections(config
):
809 if isinstance(meta_data
, tuple):
810 (patterns
, options
) = meta_data
811 # When debugging use option -d to prevent from misleading exceptions
812 # due to side effects of process_logfiles().
813 process_result
= process_logfile(logfile
, patterns
, options
, status
)
814 if isinstance(process_result
, tuple):
815 loglines
, at_least_warnings
, warnings_and_errors
= process_result
816 elif isinstance(process_result
, list):
817 loglines
= process_result
818 elif isinstance(process_result
, None):
819 sys
.stdout
.write('[[[%s:cannotopen]]]' % logfile
)
821 LOGGER
.debug("Invalid logfile processing result %s", process_result
)
822 elif isinstance(meta_data
, None):
823 sys
.stdout
.write('[[[%s:missing]]]\n' % logfile
)
825 LOGGER
.debug("Invalid parse metadata %s", meta_data
)
827 LOGGER
.debug("Loglines:")
828 LOGGER
.debug(loglines
)
831 if at_least_warnings
:
832 LOGGER
.debug("Warnings and errors:")
833 LOGGER
.debug(warnings_and_errors
)
834 for we
in warnings_and_errors
:
838 save_status(status
, status_filename
)
841 if __name__
== "__main__":