Refactoring: Changed all check parameters starting with an 'o' to the new rulespec...
[check_mk.git] / checks / db2_logsizes
blob6cfbcc004cbbabfaf2946d9d757941e900f1e3cb
1 #!/usr/bin/python
2 # -*- encoding: utf-8; py-indent-offset: 4 -*-
3 # ------------------------------------------------------------------+
4 # | ____ _ _ __ __ _ __ |
5 # | / ___| |__ ___ ___| | __ | \/ | |/ / |
6 # | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / |
7 # | | |___| | | | __/ (__| < | | | | . \ |
8 # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ |
9 # | |
10 # | Copyright Mathias Kettner 2014 mk@mathias-kettner.de |
11 # ------------------------------------------------------------------+
13 # This file is part of Check_MK.
14 # The official homepage is at http://mathias-kettner.de/check_mk.
16 # check_mk is free software; you can redistribute it and/or modify it
17 # under the terms of the GNU General Public License as published by
18 # the Free Software Foundation in version 2. check_mk is distributed
19 # in the hope that it will be useful, but WITHOUT ANY WARRANTY; with-
20 # out even the implied warranty of MERCHANTABILITY or FITNESS FOR A
21 # PARTICULAR PURPOSE. See the GNU General Public License for more de-
22 # tails. You should have received a copy of the GNU General Public
23 # License along with GNU Make; see the file COPYING. If not, write
24 # to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
25 # Boston, MA 02110-1301 USA.
27 # <<<db2_logsizes>>>
28 # [[[db2taddm:CMDBS1]]]
29 # TIMESTAMP 1426495343
30 # usedspace 7250240
31 # logfilsiz 2048
32 # logprimary 6
33 # logsecond 100
35 factory_settings["db2_logsizes_default_levels"] = {
36 "levels": (-20.0, -10.0) # Interpreted as free space in df_check_filesystem_single
40 def parse_db2_logsizes(info):
41 pre_parsed = parse_db2_dbs(info)
42 global_timestamp = pre_parsed[0]
43 parsed = {}
44 for key, values in pre_parsed[1].items():
45 instance_info = {}
46 for value in values:
47 instance_info.setdefault(value[0], []).append(" ".join(map(str, (value[1:]))))
48 # Some databases run in DPF mode. Means that the database is split over several nodes
49 # Each node has its own logfile for the same database. We create one service for each logfile
50 if "TIMESTAMP" not in instance_info:
51 instance_info["TIMESTAMP"] = [global_timestamp]
53 if "node" in instance_info:
54 for node in instance_info["node"]:
55 parsed["%s DPF %s" % (key, node)] = instance_info
56 else:
57 parsed[key] = instance_info
59 return parsed
62 def inventory_db2_logsizes(parsed):
63 for db, db_info in parsed.items():
64 if "logfilsiz" in db_info:
65 yield db, {}
68 def check_db2_logsizes(item, params, parsed):
69 db = parsed.get(item)
71 if not db:
72 raise MKCounterWrapped("Login into database failed")
74 # A DPF instance could look like
75 # {'TIMESTAMP': ['1439976757'],
76 # u'logfilsiz': ['20480', '20480', '20480', '20480', '20480', '20480'],
77 # u'logprimary': ['13', '13', '13', '13', '13', '13'],
78 # u'logsecond': ['100', '100', '100', '100', '100', '100'],
79 # u'node': ['0 wasv091 0',
80 # '1 wasv091 1',
81 # '2 wasv091 2',
82 # '3 wasv091 3',
83 # '4 wasv091 4',
84 # '5 wasv091 5'],
86 if "node" in db:
87 node_key = " ".join(item.split()[2:])
88 for idx, node in enumerate(db["node"]):
89 if node == node_key:
90 data_offset = idx
91 else:
92 data_offset = 0
94 timestamp = int(db["TIMESTAMP"][0])
96 if "logfilsiz" not in db:
97 return 3, "Invalid database info"
99 total = int(db["logfilsiz"][data_offset]) * (int(db["logprimary"][data_offset]) + \
100 int(db["logsecond"][data_offset])) * 4096
101 free = total - int(db["usedspace"][data_offset])
103 return df_check_filesystem_single(
104 item, total / 1024**2, free / 1024**2, 0, None, None, params, this_time=timestamp)
107 check_info['db2_logsizes'] = {
108 "parse_function": parse_db2_logsizes,
109 "service_description": "DB2 Logsize %s",
110 "check_function": check_db2_logsizes,
111 "inventory_function": inventory_db2_logsizes,
112 "group": "db2_logsize",
113 "has_perfdata": True,
114 "default_levels_variable": "db2_logsize_default_levels",
115 "includes": ["size_trend.include", "df.include", "db2.include"]