2 # -*- encoding: utf-8; py-indent-offset: 4 -*-
3 # +------------------------------------------------------------------+
4 # | ____ _ _ __ __ _ __ |
5 # | / ___| |__ ___ ___| | __ | \/ | |/ / |
6 # | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / |
7 # | | |___| | | | __/ (__| < | | | | . \ |
8 # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ |
10 # | Copyright Mathias Kettner 2014 mk@mathias-kettner.de |
11 # +------------------------------------------------------------------+
13 # This file is part of Check_MK.
14 # The official homepage is at http://mathias-kettner.de/check_mk.
16 # check_mk is free software; you can redistribute it and/or modify it
17 # under the terms of the GNU General Public License as published by
18 # the Free Software Foundation in version 2. check_mk is distributed
19 # in the hope that it will be useful, but WITHOUT ANY WARRANTY; with-
20 # out even the implied warranty of MERCHANTABILITY or FITNESS FOR A
21 # PARTICULAR PURPOSE. See the GNU General Public License for more de-
22 # tails. You should have received a copy of the GNU General Public
23 # License along with GNU Make; see the file COPYING. If not, write
24 # to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
25 # Boston, MA 02110-1301 USA.
37 import cmk
.utils
.paths
38 import cmk
.utils
.debug
39 import cmk
.utils
.man_pages
as man_pages
40 from cmk
.utils
.exceptions
import MKGeneralException
43 import cmk_base
.config
as config
45 import cmk_base
.core_config
as core_config
46 import cmk_base
.snmp
as snmp
47 import cmk_base
.snmp_utils
as snmp_utils
48 import cmk_base
.discovery
as discovery
49 import cmk_base
.check_table
as check_table
50 from cmk_base
.automations
import automations
, Automation
, MKAutomationError
51 import cmk_base
.check_utils
52 import cmk_base
.autochecks
53 import cmk_base
.nagios_utils
54 from cmk_base
.core_factory
import create_core
55 import cmk_base
.check_api_utils
as check_api_utils
56 import cmk_base
.check_api
as check_api
57 import cmk_base
.parent_scan
58 import cmk_base
.notify
as notify
59 import cmk_base
.ip_lookup
as ip_lookup
60 import cmk_base
.data_sources
as data_sources
63 class DiscoveryAutomation(Automation
):
64 # if required, schedule an inventory check
65 def _trigger_discovery_check(self
, hostname
):
66 if (config
.inventory_check_autotrigger
and config
.inventory_check_interval
) and\
67 (not config
.is_cluster(hostname
) or config
.nodes_of(hostname
)):
68 discovery
.schedule_discovery_check(hostname
)
71 class AutomationDiscovery(DiscoveryAutomation
):
72 cmd
= "inventory" # TODO: Rename!
74 needs_checks
= True # TODO: Can we change this?
76 # Does discovery for a list of hosts. Possible values for how:
77 # "new" - find only new services (like -I)
78 # "remove" - remove exceeding services
79 # "fixall" - find new, remove exceeding
80 # "refresh" - drop all services and reinventorize
81 # Hosts on the list that are offline (unmonitored) will
83 def execute(self
, args
):
85 if args
[0] == "@raiseerrors":
88 os
.dup2(os
.open("/dev/null", os
.O_WRONLY
), 2)
92 # perform full SNMP scan on SNMP devices?
93 if args
[0] == "@scan":
99 # use cache files if present?
100 # TODO: Why is this handling inconsistent with try-inventory?
101 if args
[0] == "@cache":
108 raise MKAutomationError("Need two arguments: new|remove|fixall|refresh HOSTNAME")
116 for hostname
in hostnames
:
117 result
, error
= discovery
.discover_on_host(how
, hostname
, do_snmp_scan
, use_caches
,
120 result
["self_new"], result
["self_removed"], result
["self_kept"],
124 if error
is not None:
125 failed_hosts
[hostname
] = error
127 self
._trigger
_discovery
_check
(hostname
)
129 return counts
, failed_hosts
132 automations
.register(AutomationDiscovery())
135 # Python 3? use contextlib.redirect_stdout
136 @contextlib.contextmanager
137 def redirect_output(where
):
138 """Redirects stdout/stderr to the given file like object"""
139 prev_stdout
, prev_stderr
= sys
.stdout
, sys
.stderr
142 sys
.stdout
= sys
.stderr
= where
147 sys
.stdout
, sys
.stderr
= prev_stdout
, prev_stderr
150 class AutomationTryDiscovery(Automation
):
151 cmd
= "try-inventory" # TODO: Rename!
153 needs_checks
= True # TODO: Can we change this?
155 def execute(self
, args
):
156 with
redirect_output(cStringIO
.StringIO()) as buf
:
157 cmk
.utils
.log
.setup_console_logging()
158 cmk
.utils
.log
.set_verbosity(1)
159 result
= self
._execute
_discovery
(args
)
160 return {"output": buf
.getvalue(), "check_table": result
}
162 def _execute_discovery(self
, args
):
165 if args
[0] == '@noscan':
169 data_sources
.abstract
.DataSource
.set_use_outdated_cache_file()
170 data_sources
.tcp
.TCPDataSource
.use_only_cache()
172 elif args
[0] == '@scan':
177 if args
[0] == '@raiseerrors':
183 data_sources
.abstract
.DataSource
.set_may_use_cache_file(use_caches
)
185 table
= discovery
.get_check_preview(
186 hostname
, use_caches
=use_caches
, do_snmp_scan
=do_snmp_scan
, on_error
=on_error
)
189 # check_source, check_plugin_name, checkgroup, item, paramstring, params, descr, exitcode, output, perfdata
192 for idx
, row
in enumerate(table
):
194 # This isinstance check is also done within determine check_params,
195 # but the explicit check here saves performance
196 if isinstance(params
, cmk_base
.config
.TimespecificParamList
):
197 new_params
= cmk_base
.checking
.determine_check_params(params
)
198 # Since the row is a tuple, we cannot simply replace an entry..
200 new_row
[5] = {"tp_computed_params": {"params": new_params
, "computed_at": now
}}
201 table
[idx
] = tuple(new_row
)
206 automations
.register(AutomationTryDiscovery())
209 class AutomationSetAutochecks(DiscoveryAutomation
):
210 cmd
= "set-autochecks"
212 needs_checks
= True # TODO: Can we change this?
214 # Set the new list of autochecks. This list is specified by a
215 # table of (checktype, item). No parameters are specified. Those
216 # are either (1) kept from existing autochecks or (2) computed
217 # from a new inventory. Note: we must never convert check parameters
218 # from python source code to actual values.
219 def execute(self
, args
):
221 new_items
= ast
.literal_eval(sys
.stdin
.read())
222 discovery
.set_autochecks_of(hostname
, new_items
)
223 self
._trigger
_discovery
_check
(hostname
)
227 automations
.register(AutomationSetAutochecks())
230 # TODO: Is this automation still needed?
231 class AutomationGetAutochecks(Automation
):
232 cmd
= "get-autochecks"
234 needs_checks
= True # TODO: Can we change this?
236 def execute(self
, args
):
239 for ct
, item
, paramstring
in discovery
.parse_autochecks_file(hostname
):
240 result
.append((ct
, item
, discovery
.resolve_paramstring(ct
, paramstring
), paramstring
))
244 automations
.register(AutomationGetAutochecks())
247 class AutomationRenameHosts(Automation
):
253 super(AutomationRenameHosts
, self
).__init
__()
254 self
._finished
_history
_files
= {}
256 # WATO calls this automation when hosts have been renamed. We need to change
257 # several file and directory names. This function has no argument but reads
258 # Python pair-list from stdin:
259 # [("old1", "new1"), ("old2", "new2")])
260 def execute(self
, args
):
261 renamings
= ast
.literal_eval(sys
.stdin
.read())
265 # The history archive can be renamed with running core. We need to keep
266 # the list of already handled history archive files, because a new history
267 # file may be created by the core during this step. All unhandled files,
268 # including the current history files will be handled later when the core
270 for oldname
, newname
in renamings
:
271 self
._finished
_history
_files
[(oldname
, newname
)] = \
272 self
._rename
_host
_in
_core
_history
_archive
(oldname
, newname
)
273 if self
._finished
_history
_files
[(oldname
, newname
)]:
274 actions
.append("history")
276 # At this place WATO already has changed it's configuration. All further
277 # data might be changed by the still running core. So we need to stop
279 core_was_running
= self
._core
_is
_running
()
281 cmk_base
.core
.do_core_action("stop", quiet
=True)
284 for oldname
, newname
in renamings
:
285 # Autochecks: simply read and write out the file again. We do
286 # not store a host name here anymore - but old versions did.
287 # by rewriting we get rid of the host name.
288 actions
+= self
._rename
_host
_autochecks
(oldname
, newname
)
289 actions
+= self
._rename
_host
_files
(oldname
, newname
)
291 # Start monitoring again
293 # force config generation to succeed. The core *must* start.
294 # TODO: Can't we drop this hack since we have config warnings now?
295 core_config
.ignore_ip_lookup_failures()
296 # TODO: Clean this up!
297 restart
= AutomationRestart()
298 restart
._mode
= lambda: "start"
301 for hostname
in core_config
.failed_ip_lookups():
302 actions
.append("dnsfail-" + hostname
)
304 # Convert actions into a dictionary { "what" : count }
306 for action
in actions
:
307 action_counts
.setdefault(action
, 0)
308 action_counts
[action
] += 1
312 def _core_is_running(self
):
313 if config
.monitoring_core
== "nagios":
314 command
= cmk
.utils
.paths
.nagios_startscript
+ " status >/dev/null 2>&1"
316 command
= "omd status cmc >/dev/null 2>&1"
317 code
= os
.system(command
) # nosec
320 def _rename_host_autochecks(self
, oldname
, newname
):
322 acpath
= cmk
.utils
.paths
.autochecks_dir
+ "/" + oldname
+ ".mk"
323 if os
.path
.exists(acpath
):
324 old_autochecks
= discovery
.parse_autochecks_file(oldname
)
325 out
= file(cmk
.utils
.paths
.autochecks_dir
+ "/" + newname
+ ".mk", "w")
327 for ct
, item
, paramstring
in old_autochecks
:
328 out
.write(" (%r, %r, %s),\n" % (ct
, item
, paramstring
))
331 os
.remove(acpath
) # Remove old file
332 actions
.append("autochecks")
335 def _rename_host_files(self
, oldname
, newname
):
338 # Rename temporary files of the host
339 for d
in ["cache", "counters"]:
340 if self
._rename
_host
_file
(cmk
.utils
.paths
.tmp_dir
+ "/" + d
+ "/", oldname
, newname
):
343 if self
._rename
_host
_dir
(cmk
.utils
.paths
.tmp_dir
+ "/piggyback/", oldname
, newname
):
344 actions
.append("piggyback-load")
346 # Rename piggy files *created* by the host
347 piggybase
= cmk
.utils
.paths
.tmp_dir
+ "/piggyback/"
348 if os
.path
.exists(piggybase
):
349 for piggydir
in os
.listdir(piggybase
):
350 if self
._rename
_host
_file
(piggybase
+ piggydir
, oldname
, newname
):
351 actions
.append("piggyback-pig")
354 if self
._rename
_host
_dir
(cmk
.utils
.paths
.logwatch_dir
, oldname
, newname
):
355 actions
.append("logwatch")
358 if self
._rename
_host
_file
(cmk
.utils
.paths
.snmpwalks_dir
, oldname
, newname
):
359 actions
.append("snmpwalk")
362 if self
._rename
_host
_file
(cmk
.utils
.paths
.var_dir
+ "/inventory", oldname
, newname
):
363 self
._rename
_host
_file
(cmk
.utils
.paths
.var_dir
+ "/inventory", oldname
+ ".gz",
365 actions
.append("inv")
367 if self
._rename
_host
_dir
(cmk
.utils
.paths
.var_dir
+ "/inventory_archive", oldname
, newname
):
368 actions
.append("invarch")
371 baked_agents_dir
= cmk
.utils
.paths
.var_dir
+ "/agents/"
372 have_renamed_agent
= False
373 if os
.path
.exists(baked_agents_dir
):
374 for opsys
in os
.listdir(baked_agents_dir
):
375 if self
._rename
_host
_file
(baked_agents_dir
+ opsys
, oldname
, newname
):
376 have_renamed_agent
= True
377 if have_renamed_agent
:
378 actions
.append("agent")
381 deployment_dir
= cmk
.utils
.paths
.var_dir
+ "/agent_deployment/"
382 if self
._rename
_host
_file
(deployment_dir
, oldname
, newname
):
383 actions
.append("agent_deployment")
385 actions
+= self
._omd
_rename
_host
(oldname
, newname
)
389 def _rename_host_dir(self
, basedir
, oldname
, newname
):
390 if os
.path
.exists(basedir
+ "/" + oldname
):
391 if os
.path
.exists(basedir
+ "/" + newname
):
392 shutil
.rmtree(basedir
+ "/" + newname
)
393 os
.rename(basedir
+ "/" + oldname
, basedir
+ "/" + newname
)
397 def _rename_host_file(self
, basedir
, oldname
, newname
):
398 if os
.path
.exists(basedir
+ "/" + oldname
):
399 if os
.path
.exists(basedir
+ "/" + newname
):
400 os
.remove(basedir
+ "/" + newname
)
401 os
.rename(basedir
+ "/" + oldname
, basedir
+ "/" + newname
)
405 # This functions could be moved out of Check_MK.
406 def _omd_rename_host(self
, oldname
, newname
):
407 oldregex
= self
._escape
_name
_for
_regex
_matching
(oldname
)
410 # Temporarily stop processing of performance data
411 npcd_running
= os
.path
.exists(cmk
.utils
.paths
.omd_root
+ "/tmp/pnp4nagios/run/npcd.pid")
413 os
.system("omd stop npcd >/dev/null 2>&1 </dev/null")
415 rrdcache_running
= os
.path
.exists(cmk
.utils
.paths
.omd_root
+ "/tmp/run/rrdcached.sock")
417 os
.system("omd stop rrdcached >/dev/null 2>&1 </dev/null")
420 # Fix pathnames in XML files
421 self
.rename_host_in_files(
422 os
.path
.join(cmk
.utils
.paths
.omd_root
, "var/pnp4nagios/perfdata", oldname
, "*.xml"),
423 "/perfdata/%s/" % oldregex
, "/perfdata/%s/" % newname
)
426 if self
._rename
_host
_dir
(cmk
.utils
.paths
.omd_root
+ "/var/pnp4nagios/perfdata", oldname
,
428 actions
.append("rrd")
431 if self
._rename
_host
_dir
(cmk
.utils
.paths
.omd_root
+ "/var/check_mk/rrd", oldname
,
433 actions
.append("rrd")
435 # entries of rrdcached journal
436 if self
.rename_host_in_files(
437 os
.path
.join(cmk
.utils
.paths
.omd_root
, "var/rrdcached/rrd.journal.*"),
438 "/(perfdata|rrd)/%s/" % oldregex
,
439 "/\\1/%s/" % newname
,
440 extended_regex
=True):
441 actions
.append("rrdcached")
444 if self
.rename_host_in_files("%s/var/pnp4nagios/perfdata.dump" % cmk
.utils
.paths
.omd_root
,
445 "HOSTNAME::%s " % oldregex
,
446 "HOSTNAME::%s " % newname
) or \
447 self
.rename_host_in_files("%s/var/pnp4nagios/spool/perfdata.*" % cmk
.utils
.paths
.omd_root
,
448 "HOSTNAME::%s " % oldregex
,
449 "HOSTNAME::%s " % newname
):
450 actions
.append("pnpspool")
453 os
.system("omd start rrdcached >/dev/null 2>&1 </dev/null")
456 os
.system("omd start npcd >/dev/null 2>&1 </dev/null")
458 self
._rename
_host
_in
_remaining
_core
_history
_files
(oldname
, newname
)
460 # State retention (important for Downtimes, Acknowledgements, etc.)
461 if config
.monitoring_core
== "nagios":
462 if self
.rename_host_in_files(
463 "%s/var/nagios/retention.dat" % cmk
.utils
.paths
.omd_root
,
464 "^host_name=%s$" % oldregex
,
465 "host_name=%s" % newname
,
466 extended_regex
=True):
467 actions
.append("retention")
470 # Create a file "renamed_hosts" with the information about the
471 # renaming of the hosts. The core will honor this file when it
472 # reads the status file with the saved state.
473 file(cmk
.utils
.paths
.var_dir
+ "/core/renamed_hosts",
474 "w").write("%s\n%s\n" % (oldname
, newname
))
475 actions
.append("retention")
478 if self
.rename_host_in_files(
479 "%s/etc/nagvis/maps/*.cfg" % cmk
.utils
.paths
.omd_root
,
480 "^[[:space:]]*host_name=%s[[:space:]]*$" % oldregex
,
481 "host_name=%s" % newname
,
482 extended_regex
=True):
483 actions
.append("nagvis")
487 def _rename_host_in_remaining_core_history_files(self
, oldname
, newname
):
488 """Perform the rename operation in all history archive files that have not been handled yet"""
489 finished_file_paths
= self
._finished
_history
_files
[(oldname
, newname
)]
490 all_file_paths
= set(self
._get
_core
_history
_files
(only_archive
=False))
491 todo_file_paths
= list(all_file_paths
.difference(finished_file_paths
))
492 return self
._rename
_host
_in
_core
_history
_files
(todo_file_paths
, oldname
, newname
)
494 def _rename_host_in_core_history_archive(self
, oldname
, newname
):
495 """Perform the rename operation in all history archive files"""
496 file_paths
= self
._get
_core
_history
_files
(only_archive
=True)
497 return self
._rename
_host
_in
_core
_history
_files
(file_paths
, oldname
, newname
)
499 def _get_core_history_files(self
, only_archive
):
501 "var/check_mk/core/archive/*",
502 "var/nagios/archive/*",
507 "var/check_mk/core/history",
508 "var/nagios/nagios.log",
512 for path_pattern
in path_patterns
:
513 file_paths
+= glob
.glob("%s/%s" % (cmk
.utils
.paths
.omd_root
, path_pattern
))
516 def _rename_host_in_core_history_files(self
, file_paths
, oldname
, newname
):
517 oldregex
= self
._escape
_name
_for
_regex
_matching
(oldname
)
519 # Logfiles and history files of CMC and Nagios. Problem
520 # here: the exact place of the hostname varies between the
521 # various log entry lines
523 s/(INITIAL|CURRENT) (HOST|SERVICE) STATE: %(old)s;/\1 \2 STATE: %(new)s;/
524 s/(HOST|SERVICE) (DOWNTIME |FLAPPING |)ALERT: %(old)s;/\1 \2ALERT: %(new)s;/
525 s/PASSIVE (HOST|SERVICE) CHECK: %(old)s;/PASSIVE \1 CHECK: %(new)s;/
526 s/(HOST|SERVICE) NOTIFICATION: ([^;]+);%(old)s;/\1 NOTIFICATION: \2;%(new)s;/
534 command
= ["sed", "-ri", "--file=/dev/fd/0"]
535 p
= subprocess
.Popen(
536 command
+ file_paths
,
537 stdin
=subprocess
.PIPE
,
538 stdout
=open(os
.devnull
, "w"),
539 stderr
=subprocess
.STDOUT
,
541 p
.communicate(sed_commands
)
542 # TODO: error handling?
544 handled_files
+= file_paths
548 # Returns True in case files were found, otherwise False
549 def rename_host_in_files(self
, path_pattern
, old
, new
, extended_regex
=False):
550 paths
= glob
.glob(path_pattern
)
552 extended
= ["-r"] if extended_regex
else []
554 ["sed", "-i"] + extended
+ ["s@%s@%s@" % (old
, new
)] + paths
,
555 stderr
=open(os
.devnull
, "w"))
560 def _escape_name_for_regex_matching(self
, name
):
561 return name
.replace(".", "[.]")
564 automations
.register(AutomationRenameHosts())
567 class AutomationAnalyseServices(Automation
):
568 cmd
= "analyse-service"
570 needs_checks
= True # TODO: Can we change this?
572 def execute(self
, args
):
573 config_cache
= config
.get_config_cache()
575 servicedesc
= args
[1].decode("utf-8")
577 service_info
= self
._get
_service
_info
(config_cache
, hostname
, servicedesc
)
579 service_info
.update({
580 "labels": config_cache
.labels_of_service(hostname
, servicedesc
),
581 "label_sources": config_cache
.label_sources_of_service(hostname
, servicedesc
),
585 # Determine the type of the check, and how the parameters are being
587 # TODO: Refactor this huge function
588 # TODO: Was ist mit Clustern???
589 # TODO: Klappt das mit automatischen verschatten von SNMP-Checks (bei dual Monitoring)
590 def _get_service_info(self
, config_cache
, hostname
, servicedesc
):
591 check_api_utils
.set_hostname(hostname
)
593 # We just consider types of checks that are managed via WATO.
594 # We have the following possible types of services:
595 # 1. manual checks (static_checks) (currently overriding inventorized checks)
596 # 2. inventorized check
597 # 3. classical checks
600 # Compute effective check table, in order to remove SNMP duplicates
601 table
= check_table
.get_check_table(hostname
, remove_duplicates
=True)
604 for checkgroup_name
in config
.static_checks
:
605 for value
in self
.static_check_rules_of(checkgroup_name
, hostname
):
606 # Parameters are optional
608 checktype
, item
= value
611 checktype
, item
, params
= value
613 descr
= config
.service_description(hostname
, checktype
, item
)
614 if descr
== servicedesc
:
617 "checkgroup": checkgroup_name
,
618 "checktype": checktype
,
620 "parameters": params
,
623 # TODO: There is a lot of duplicated logic with discovery.py/check_table.py. Clean this
625 if config
.is_cluster(hostname
):
627 for node
in config
.nodes_of(hostname
):
628 for check_plugin_name
, item
, paramstring
in cmk_base
.autochecks
.read_autochecks_of(
630 descr
= config
.service_description(node
, check_plugin_name
, item
)
631 if hostname
== config_cache
.host_of_clustered_service(node
, descr
):
632 autochecks
.append((check_plugin_name
, item
, paramstring
))
634 autochecks
= cmk_base
.autochecks
.read_autochecks_of(hostname
)
636 # 2. Load all autochecks of the host in question and try to find
638 for entry
in autochecks
:
639 ct
, item
, params
= entry
# new format without host name
641 if (ct
, item
) not in table
:
642 continue # this is a removed duplicate or clustered service
644 descr
= config
.service_description(hostname
, ct
, item
)
645 if descr
== servicedesc
:
646 dlv
= config
.check_info
[ct
].get("default_levels_variable")
648 fs
= config
.factory_settings
.get(dlv
, None)
652 check_parameters
= config
.compute_check_parameters(hostname
, ct
, item
, params
)
653 if isinstance(check_parameters
, cmk_base
.config
.TimespecificParamList
):
654 check_parameters
= cmk_base
.checking
.determine_check_params(check_parameters
)
656 "tp_computed_params": {
657 "params": check_parameters
,
658 "computed_at": time
.time()
665 "checkgroup": config
.check_info
[ct
].get("group"),
667 "inv_parameters": params
,
668 "factory_settings": fs
,
669 "parameters": check_parameters
,
672 # 3. Classical checks
673 for nr
, entry
in enumerate(config
.custom_checks
):
675 rule
, tags
, hosts
, options
= entry
676 if options
.get("disabled"):
679 rule
, tags
, hosts
= entry
681 matching_hosts
= config
.all_matching_hosts(tags
, hosts
, with_foreign_hosts
=True)
682 if hostname
in matching_hosts
:
683 desc
= rule
["service_description"]
684 if desc
== servicedesc
:
689 if "command_line" in rule
: # Only active checks have a command line
690 result
["command_line"] = rule
["command_line"]
694 for acttype
, rules
in config
.active_checks
.items():
695 entries
= config_cache
.host_extra_conf(hostname
, rules
)
697 for params
in entries
:
698 description
= config
.active_check_service_description(hostname
, acttype
, params
)
699 if description
== servicedesc
:
702 "checktype": acttype
,
703 "parameters": params
,
706 return {} # not found
708 def static_check_rules_of(self
, checkgroup_name
, hostname
):
709 config_cache
= config
.get_config_cache()
710 return config_cache
.host_extra_conf(hostname
, config
.static_checks
.get(checkgroup_name
, []))
713 automations
.register(AutomationAnalyseServices())
716 class AutomationAnalyseHost(Automation
):
721 def execute(self
, args
):
723 config_cache
= config
.get_config_cache()
725 "labels": config_cache
.get_host_config(host_name
).labels
,
726 "label_sources": config_cache
.get_host_config(host_name
).label_sources
,
730 automations
.register(AutomationAnalyseHost())
733 class AutomationDeleteHosts(Automation
):
736 needs_checks
= True # TODO: Can we change this?
738 def execute(self
, args
):
739 for hostname
in args
:
740 self
._delete
_host
_files
(hostname
)
743 def _delete_host_files(self
, hostname
):
744 # The inventory_archive as well as the performance data is kept
745 # we do not want to loose any historic data for accidently deleted hosts.
747 # These files are cleaned up by the disk space mechanism.
751 "%s/%s" % (cmk
.utils
.paths
.precompiled_hostchecks_dir
, hostname
),
752 "%s/%s.py" % (cmk
.utils
.paths
.precompiled_hostchecks_dir
, hostname
),
753 "%s/%s.mk" % (cmk
.utils
.paths
.autochecks_dir
, hostname
),
754 "%s/%s" % (cmk
.utils
.paths
.counters_dir
, hostname
),
755 "%s/%s" % (cmk
.utils
.paths
.tcp_cache_dir
, hostname
),
756 "%s/persisted/%s" % (cmk
.utils
.paths
.var_dir
, hostname
),
757 "%s/inventory/%s" % (cmk
.utils
.paths
.var_dir
, hostname
),
758 "%s/inventory/%s.gz" % (cmk
.utils
.paths
.var_dir
, hostname
),
759 "%s/agent_deployment/%s" % (cmk
.utils
.paths
.var_dir
, hostname
),
761 if os
.path
.exists(path
):
765 ds_directories
= os
.listdir(cmk
.utils
.paths
.data_source_cache_dir
)
772 for data_source_name
in ds_directories
:
773 filename
= "%s/%s/%s" % (cmk
.utils
.paths
.data_source_cache_dir
, data_source_name
,
783 # softlinks for baked agents. obsolete packages are removed upon next bake action
784 # TODO: Move to bakery code
785 baked_agents_dir
= cmk
.utils
.paths
.var_dir
+ "/agents/"
786 if os
.path
.exists(baked_agents_dir
):
787 for folder
in os
.listdir(baked_agents_dir
):
788 if os
.path
.exists("%s/%s" % (folder
, hostname
)):
789 os
.unlink("%s/%s" % (folder
, hostname
))
791 # logwatch and piggyback folders
793 "%s/%s" % (cmk
.utils
.paths
.logwatch_dir
, hostname
),
794 "%s/piggyback/%s" % (cmk
.utils
.paths
.tmp_dir
, hostname
),
796 if os
.path
.exists(what_dir
):
797 shutil
.rmtree(what_dir
)
802 automations
.register(AutomationDeleteHosts())
805 class AutomationRestart(Automation
):
808 needs_checks
= True # TODO: Can we change this?
811 if config
.monitoring_core
== "cmc" and not self
._check
_plugins
_have
_changed
():
812 return "reload" # force reload for cmc
815 # TODO: Cleanup duplicate code with cmk_base.core.do_restart()
816 def execute(self
, args
):
817 # make sure, Nagios does not inherit any open
818 # filedescriptors. This really happens, e.g. if
819 # check_mk is called by WATO via Apache. Nagios inherits
820 # the open file where Apache is listening for incoming
821 # HTTP connections. Really.
822 if config
.monitoring_core
== "nagios":
823 objects_file
= cmk
.utils
.paths
.nagios_objects_file
824 for fd
in range(3, 256):
830 objects_file
= cmk
.utils
.paths
.var_dir
+ "/core/config"
832 # os.closerange(3, 256) --> not available in older Python versions
834 class null_file(object):
835 def write(self
, stuff
):
841 # Deactivate stdout by introducing fake file without filedescriptor
842 old_stdout
= sys
.stdout
843 sys
.stdout
= null_file()
847 if cmk_base
.core
.try_get_activation_lock():
848 raise MKAutomationError("Cannot activate changes. "
849 "Another activation process is currently in progresss")
851 if os
.path
.exists(objects_file
):
852 backup_path
= objects_file
+ ".save"
853 os
.rename(objects_file
, backup_path
)
859 configuration_warnings
= core_config
.create_core_config(core
)
862 from cmk_base
.cee
.agent_bakery
import bake_on_restart
867 except Exception as e
:
869 os
.rename(backup_path
, objects_file
)
870 if cmk
.utils
.debug
.enabled():
872 raise MKAutomationError("Error creating configuration: %s" % e
)
874 if config
.monitoring_core
== "cmc" or cmk_base
.nagios_utils
.do_check_nagiosconfig():
876 os
.remove(backup_path
)
880 cmk_base
.core
.do_core_action(self
._mode
())
882 broken_config_path
= "%s/check_mk_objects.cfg.broken" % cmk
.utils
.paths
.tmp_dir
883 file(broken_config_path
, "w").write(
884 file(cmk
.utils
.paths
.nagios_objects_file
).read())
887 os
.rename(backup_path
, objects_file
)
889 os
.remove(objects_file
)
891 raise MKAutomationError(
892 "Configuration for monitoring core is invalid. Rolling back. "
893 "The broken file has been copied to \"%s\" for analysis." % broken_config_path
)
895 except Exception as e
:
896 if backup_path
and os
.path
.exists(backup_path
):
897 os
.remove(backup_path
)
898 if cmk
.utils
.debug
.enabled():
900 raise MKAutomationError(str(e
))
902 sys
.stdout
= old_stdout
903 return configuration_warnings
905 def _check_plugins_have_changed(self
):
906 this_time
= self
._last
_modification
_in
_dir
(cmk
.utils
.paths
.local_checks_dir
)
907 last_time
= self
._time
_of
_last
_core
_restart
()
908 return this_time
> last_time
910 def _last_modification_in_dir(self
, dir_path
):
911 max_time
= os
.stat(dir_path
).st_mtime
912 for file_name
in os
.listdir(dir_path
):
913 max_time
= max(max_time
, os
.stat(dir_path
+ "/" + file_name
).st_mtime
)
916 def _time_of_last_core_restart(self
):
917 if config
.monitoring_core
== "cmc":
918 pidfile_path
= cmk
.utils
.paths
.omd_root
+ "/tmp/run/cmc.pid"
920 pidfile_path
= cmk
.utils
.paths
.omd_root
+ "/tmp/lock/nagios.lock"
922 if os
.path
.exists(pidfile_path
):
923 return os
.stat(pidfile_path
).st_mtime
928 automations
.register(AutomationRestart())
931 class AutomationReload(AutomationRestart
):
935 if self
._check
_plugins
_have
_changed
():
940 automations
.register(AutomationReload())
943 class AutomationGetConfiguration(Automation
):
944 cmd
= "get-configuration"
946 # This needed the checks in the past. This was necessary to get the
947 # default values of check related global settings. This kind of
948 # global settings have been removed from the global settings page
949 # of WATO. We can now disable this (by default).
950 # We need to be careful here, because users may have added their own
951 # global settings related to checks. To deal with this, we check
952 # for requested but missing global variables and load the checks in
953 # case one is missing. When it's still missing then, we silenlty skip
954 # this option (like before).
957 def execute(self
, args
):
958 config
.load(with_conf_d
=False)
960 # We read the list of variable names from stdin since
961 # that could be too much for the command line
962 variable_names
= ast
.literal_eval(sys
.stdin
.read())
964 missing_variables
= [v
for v
in variable_names
if not hasattr(config
, v
)]
966 if missing_variables
:
967 config
.load_all_checks(check_api
.get_check_api_context
)
968 config
.load(with_conf_d
=False)
971 for varname
in variable_names
:
972 if hasattr(config
, varname
):
973 value
= getattr(config
, varname
)
974 if not hasattr(value
, '__call__'):
975 result
[varname
] = value
979 automations
.register(AutomationGetConfiguration())
982 class AutomationGetCheckInformation(Automation
):
983 cmd
= "get-check-information"
987 def execute(self
, args
):
988 manuals
= man_pages
.all_man_pages()
991 for check_plugin_name
, check
in config
.check_info
.items():
993 manfile
= manuals
.get(check_plugin_name
)
994 # TODO: Use cmk.utils.man_pages module standard functions to read the title
996 title
= file(manfile
).readline().strip().split(":", 1)[1].strip()
998 title
= check_plugin_name
999 check_infos
[check_plugin_name
] = {"title": title
.decode("utf-8")}
1001 check_infos
[check_plugin_name
]["group"] = check
["group"]
1002 check_infos
[check_plugin_name
]["service_description"] = check
.get(
1003 "service_description", "%s")
1004 check_infos
[check_plugin_name
]["snmp"] = cmk_base
.check_utils
.is_snmp_check(
1006 except Exception as e
:
1007 if cmk
.utils
.debug
.enabled():
1009 raise MKAutomationError(
1010 "Failed to parse man page '%s': %s" % (check_plugin_name
, e
))
1014 automations
.register(AutomationGetCheckInformation())
1017 class AutomationGetRealTimeChecks(Automation
):
1018 cmd
= "get-real-time-checks"
1019 needs_config
= False
1022 def execute(self
, args
):
1023 manuals
= man_pages
.all_man_pages()
1026 for check_plugin_name
, check
in config
.check_info
.items():
1027 if check
["handle_real_time_checks"]:
1028 # TODO: Use cmk.utils.man_pages module standard functions to read the title
1029 title
= check_plugin_name
1031 manfile
= manuals
.get(check_plugin_name
)
1033 title
= file(manfile
).readline().strip().split(":", 1)[1].strip()
1035 if cmk
.utils
.debug
.enabled():
1038 rt_checks
.append((check_plugin_name
,
1039 "%s - %s" % (check_plugin_name
, title
.decode("utf-8"))))
1044 automations
.register(AutomationGetRealTimeChecks())
1047 class AutomationGetCheckManPage(Automation
):
1048 cmd
= "get-check-manpage"
1049 needs_config
= False
1052 def execute(self
, args
):
1054 raise MKAutomationError("Need exactly one argument.")
1056 check_plugin_name
= args
[0]
1057 manpage
= man_pages
.load_man_page(args
[0])
1059 # Add a few informations from check_info. Note: active checks do not
1060 # have an entry in check_info
1061 if check_plugin_name
in config
.check_info
:
1062 manpage
["type"] = "check_mk"
1063 info
= config
.check_info
[check_plugin_name
]
1064 for key
in ["snmp_info", "has_perfdata", "service_description"]:
1066 manpage
[key
] = info
[key
]
1067 if "." in check_plugin_name
:
1068 section_name
= cmk_base
.check_utils
.section_name_of(check_plugin_name
)
1069 if section_name
in config
.check_info
and "snmp_info" in config
.check_info
[
1071 manpage
["snmp_info"] = config
.check_info
[section_name
]["snmp_info"]
1074 manpage
["group"] = info
["group"]
1076 # Assume active check
1077 elif check_plugin_name
.startswith("check_"):
1078 manpage
["type"] = "active"
1080 raise MKAutomationError("Could not detect type of manpage: %s. "
1081 "Maybe the check is missing." % check_plugin_name
)
1086 automations
.register(AutomationGetCheckManPage())
1089 class AutomationScanParents(Automation
):
1090 cmd
= "scan-parents"
1094 def execute(self
, args
):
1096 "timeout": int(args
[0]),
1097 "probes": int(args
[1]),
1098 "max_ttl": int(args
[2]),
1099 "ping_probes": int(args
[3]),
1101 hostnames
= args
[4:]
1102 if not cmk_base
.parent_scan
.traceroute_available():
1103 raise MKAutomationError("Cannot find binary <tt>traceroute</tt> in search path.")
1106 gateways
= cmk_base
.parent_scan
.scan_parents_of(
1107 hostnames
, silent
=True, settings
=settings
)
1109 except Exception as e
:
1110 raise MKAutomationError("%s" % e
)
1113 automations
.register(AutomationScanParents())
1116 class AutomationDiagHost(Automation
):
1121 def execute(self
, args
):
1122 hostname
, test
, ipaddress
, snmp_community
= args
[:4]
1123 agent_port
, snmp_timeout
, snmp_retries
= map(int, args
[4:7])
1125 # In 1.5 the tcp connect timeout has been added. The automation may
1126 # be called from a remote site with an older version. For this reason
1127 # we need to deal with the old args.
1129 tcp_connect_timeout
= None
1132 tcp_connect_timeout
= float(args
[7])
1136 snmpv3_auth_proto
= None
1137 snmpv3_security_name
= None
1138 snmpv3_security_password
= None
1139 snmpv3_privacy_proto
= None
1140 snmpv3_privacy_password
= None
1143 snmpv3_use
= args
[9]
1144 if snmpv3_use
in ["authNoPriv", "authPriv"]:
1145 snmpv3_auth_proto
, snmpv3_security_name
, snmpv3_security_password
= args
[10:13]
1147 snmpv3_security_name
= args
[11]
1148 if snmpv3_use
== "authPriv":
1149 snmpv3_privacy_proto
, snmpv3_privacy_password
= args
[13:15]
1153 ipaddress
= ip_lookup
.lookup_ip_address(hostname
)
1155 raise MKGeneralException("Cannot resolve hostname %s into IP address" % hostname
)
1157 ipv6_primary
= config
.is_ipv6_primary(hostname
)
1161 base_cmd
= "ping6" if ipv6_primary
else "ping"
1162 p
= subprocess
.Popen([base_cmd
, "-A", "-i", "0.2", "-c", "2", "-W", "5", ipaddress
],
1163 stdout
=subprocess
.PIPE
,
1164 stderr
=subprocess
.STDOUT
)
1165 response
= p
.stdout
.read()
1166 return (p
.wait(), response
)
1168 elif test
== 'agent':
1169 sources
= data_sources
.DataSources(hostname
, ipaddress
)
1170 sources
.set_max_cachefile_age(config
.check_max_cachefile_age
)
1173 for source
in sources
.get_data_sources():
1174 if isinstance(source
, data_sources
.DSProgramDataSource
) and cmd
:
1175 source
= data_sources
.DSProgramDataSource(hostname
, ipaddress
, cmd
)
1176 elif isinstance(source
, data_sources
.TCPDataSource
):
1177 source
.set_port(agent_port
)
1178 if tcp_connect_timeout
is not None:
1179 source
.set_timeout(tcp_connect_timeout
)
1181 output
+= source
.run_raw()
1185 elif test
== 'traceroute':
1186 family_flag
= "-6" if ipv6_primary
else "-4"
1188 p
= subprocess
.Popen(['traceroute', family_flag
, '-n', ipaddress
],
1189 stdout
=subprocess
.PIPE
,
1190 stderr
=subprocess
.STDOUT
)
1191 except OSError as e
:
1193 return 1, "Cannot find binary <tt>traceroute</tt>."
1196 response
= p
.stdout
.read()
1197 return (p
.wait(), response
)
1199 elif test
.startswith('snmp'):
1201 # ('noAuthNoPriv', "username")
1202 # ('authNoPriv', 'md5', '11111111', '22222222')
1203 # ('authPriv', 'md5', '11111111', '22222222', 'DES', '33333333')
1205 credentials
= config
.snmp_credentials_of(hostname
)
1207 # Insert preconfigured communitiy
1208 if test
== "snmpv3":
1210 snmpv3_credentials
= [snmpv3_use
]
1211 if snmpv3_use
in ["authNoPriv", "authPriv"]:
1212 snmpv3_credentials
.extend(
1213 [snmpv3_auth_proto
, snmpv3_security_name
, snmpv3_security_password
])
1215 snmpv3_credentials
.extend([snmpv3_security_name
])
1216 if snmpv3_use
== "authPriv":
1217 snmpv3_credentials
.extend(
1218 [snmpv3_privacy_proto
, snmpv3_privacy_password
])
1219 credentials
= tuple(snmpv3_credentials
)
1220 elif snmp_community
:
1221 credentials
= snmp_community
1223 # Determine SNMPv2/v3 community
1224 if hostname
not in config
.explicit_snmp_communities
:
1225 communities
= config
.host_extra_conf(hostname
, config
.snmp_communities
)
1226 for entry
in communities
:
1227 if (test
== "snmpv3") and not isinstance(entry
, tuple):
1230 if (test
!= "snmpv3") and isinstance(entry
, tuple):
1237 if test
in ['snmpv2', 'snmpv3']:
1238 is_bulkwalk_host
= True
1239 is_snmpv2or3_without_bulkwalk_host
= False
1240 elif test
== 'snmpv2_nobulk':
1241 is_bulkwalk_host
= False
1242 is_snmpv2or3_without_bulkwalk_host
= True
1243 elif test
== 'snmpv1':
1244 is_bulkwalk_host
= False
1245 is_snmpv2or3_without_bulkwalk_host
= False
1248 return 1, "SNMP command not implemented"
1250 #TODO: What about SNMP management boards?
1251 host_config
= snmp_utils
.SNMPHostConfig(
1252 is_ipv6_primary
=ipv6_primary
,
1254 ipaddress
=ipaddress
,
1255 credentials
=credentials
,
1256 port
=config
.snmp_port_of(hostname
),
1257 is_bulkwalk_host
=is_bulkwalk_host
,
1258 is_snmpv2or3_without_bulkwalk_host
=is_snmpv2or3_without_bulkwalk_host
,
1259 bulk_walk_size_of
=config
.bulk_walk_size_of(hostname
),
1261 'timeout': snmp_timeout
,
1262 'retries': snmp_retries
,
1264 oid_range_limits
=config
.oid_range_limits_of(hostname
),
1266 data
= snmp
.get_snmp_table(
1268 None, ('.1.3.6.1.2.1.1', ['1.0', '4.0', '5.0', '6.0']),
1269 use_snmpwalk_cache
=True)
1272 return 0, 'sysDescr:\t%s\nsysContact:\t%s\nsysName:\t%s\nsysLocation:\t%s\n' % tuple(
1275 return 1, 'Got empty SNMP response'
1278 return 1, "Command not implemented"
1280 except Exception as e
:
1281 if cmk
.utils
.debug
.enabled():
1286 automations
.register(AutomationDiagHost())
1289 class AutomationActiveCheck(Automation
):
1290 cmd
= "active-check"
1294 def execute(self
, args
):
1295 hostname
, plugin
, item
= args
1296 item
= item
.decode("utf-8")
1298 if plugin
== "custom":
1299 custchecks
= config
.host_extra_conf(hostname
, config
.custom_checks
)
1300 for entry
in custchecks
:
1301 if entry
["service_description"] == item
:
1302 command_line
= self
._replace
_core
_macros
(hostname
, entry
.get(
1303 "command_line", ""))
1305 command_line
= core_config
.autodetect_plugin(command_line
)
1306 return self
._execute
_check
_plugin
(command_line
)
1308 return -1, "Passive check - cannot be executed"
1310 rules
= config
.active_checks
.get(plugin
)
1312 entries
= config
.host_extra_conf(hostname
, rules
)
1314 act_info
= config
.active_check_info
[plugin
]
1315 for params
in entries
:
1316 description
= config
.active_check_service_description(
1317 hostname
, plugin
, params
)
1318 if description
== item
:
1319 args
= core_config
.active_check_arguments(
1320 hostname
, description
, act_info
["argument_function"](params
))
1321 command_line
= self
._replace
_core
_macros
(
1322 hostname
, act_info
["command_line"].replace("$ARG1$", args
))
1323 return self
._execute
_check
_plugin
(command_line
)
1325 def _load_resource_file(self
, macros
):
1327 for line
in file(cmk
.utils
.paths
.omd_root
+ "/etc/nagios/resource.cfg"):
1329 if not line
or line
[0] == '#':
1331 varname
, value
= line
.split('=', 1)
1332 macros
[varname
] = value
1334 if cmk
.utils
.debug
.enabled():
1337 # Simulate replacing some of the more important macros of hosts. We
1338 # cannot use dynamic macros, of course. Note: this will not work
1339 # without OMD, since we do not know the value of $USER1$ and $USER2$
1340 # here. We could read the Nagios resource.cfg file, but we do not
1341 # know for sure the place of that either.
1342 def _replace_core_macros(self
, hostname
, commandline
):
1343 config_cache
= config
.get_config_cache()
1344 macros
= core_config
.get_host_macros_from_attributes(
1345 hostname
, core_config
.get_host_attributes(hostname
, config_cache
))
1346 self
._load
_resource
_file
(macros
)
1347 for varname
, value
in macros
.items():
1348 commandline
= commandline
.replace(varname
, "%s" % value
)
1351 def _execute_check_plugin(self
, commandline
):
1353 p
= os
.popen(commandline
+ " 2>&1") # nosec
1354 output
= p
.read().strip()
1363 if status
< 0 or status
> 3:
1365 output
= output
.split("|", 1)[0] # Drop performance data
1366 return status
, output
1368 except Exception as e
:
1369 if cmk
.utils
.debug
.enabled():
1371 return 3, "UNKNOWN - Cannot execute command: %s" % e
1374 automations
.register(AutomationActiveCheck())
1377 class AutomationUpdateDNSCache(Automation
):
1378 cmd
= "update-dns-cache"
1380 needs_checks
= True # TODO: Can we change this?
1382 def execute(self
, args
):
1383 return ip_lookup
.update_dns_cache()
1386 automations
.register(AutomationUpdateDNSCache())
1389 class AutomationGetAgentOutput(Automation
):
1390 cmd
= "get-agent-output"
1392 needs_checks
= True # TODO: Can we change this?
1394 def execute(self
, args
):
1403 data_sources
.abstract
.DataSource
.set_may_use_cache_file(
1404 not data_sources
.abstract
.DataSource
.is_agent_cache_disabled())
1406 ipaddress
= ip_lookup
.lookup_ip_address(hostname
)
1407 sources
= data_sources
.DataSources(hostname
, ipaddress
)
1408 sources
.set_max_cachefile_age(config
.check_max_cachefile_age
)
1411 for source
in sources
.get_data_sources():
1412 if isinstance(source
, data_sources
.abstract
.CheckMKAgentDataSource
):
1413 agent_output
+= source
.run(hostname
, ipaddress
, get_raw_data
=True)
1416 # Optionally show errors of problematic data sources
1417 for source
in sources
.get_data_sources():
1418 source_state
, source_output
, _source_perfdata
= source
.get_summary_result_for_checking(
1420 if source_state
!= 0:
1422 output
+= "[%s] %s\n" % (source
.id(), source_output
)
1424 host_config
= snmp
.create_snmp_host_config(hostname
)
1427 for walk_oid
in snmp
.oids_to_walk():
1429 for oid
, value
in snmp
.walk_for_export(host_config
, walk_oid
):
1430 lines
.append("%s %s\n" % (oid
, value
))
1431 except Exception as e
:
1432 if cmk
.utils
.debug
.enabled():
1435 output
+= "OID '%s': %s\n" % (oid
, e
)
1437 info
= "".join(lines
)
1438 except Exception as e
:
1440 output
= "Failed to fetch data from %s: %s\n" % (hostname
, e
)
1441 if cmk
.utils
.debug
.enabled():
1444 return success
, output
, info
1447 automations
.register(AutomationGetAgentOutput())
1450 class AutomationNotificationReplay(Automation
):
1451 cmd
= "notification-replay"
1453 needs_checks
= True # TODO: Can we change this?
1455 def execute(self
, args
):
1457 return notify
.notification_replay_backlog(int(nr
))
1460 automations
.register(AutomationNotificationReplay())
1463 class AutomationNotificationAnalyse(Automation
):
1464 cmd
= "notification-analyse"
1466 needs_checks
= True # TODO: Can we change this?
1468 def execute(self
, args
):
1470 return notify
.notification_analyse_backlog(int(nr
))
1473 automations
.register(AutomationNotificationAnalyse())
1476 class AutomationGetBulks(Automation
):
1477 cmd
= "notification-get-bulks"
1478 needs_config
= False
1479 needs_checks
= False
1481 def execute(self
, args
):
1482 only_ripe
= args
[0] == "1"
1483 return notify
.find_bulks(only_ripe
)
1486 automations
.register(AutomationGetBulks())
1489 class AutomationGetServiceConfigurations(Automation
):
1490 cmd
= "get-service-configurations"
1494 def execute(self
, args
):
1495 result
= {"hosts": {}}
1496 for hostname
in config
.all_active_hosts():
1497 result
["hosts"][hostname
] = self
._get
_config
_for
_host
(hostname
)
1499 result
["checkgroup_of_checks"] = self
._get
_checkgroup
_of
_checks
()
1502 def _get_config_for_host(self
, hostname
):
1504 "checks": check_table
.get_check_table(hostname
, remove_duplicates
=True),
1505 "active_checks": self
._get
_active
_checks
(hostname
)
1508 def _get_active_checks(self
, hostname
):
1509 # legacy checks via active_checks
1511 config_cache
= config
.get_config_cache()
1512 for acttype
, rules
in config
.active_checks
.iteritems():
1513 entries
= config_cache
.host_extra_conf(hostname
, rules
)
1514 for params
in entries
:
1515 description
= config
.active_check_service_description(hostname
, acttype
, params
)
1516 actchecks
.append((acttype
, description
, params
))
1519 def _get_checkgroup_of_checks(self
):
1520 checkgroup_of_checks
= {}
1521 for check_plugin_name
, check
in config
.check_info
.items():
1522 checkgroup_of_checks
[check_plugin_name
] = check
.get("group")
1523 return checkgroup_of_checks
1526 automations
.register(AutomationGetServiceConfigurations())
1529 class AutomationGetLabelsOf(Automation
):
1530 cmd
= "get-labels-of"
1532 needs_checks
= False
1534 def execute(self
, args
):
1535 object_type
, host_name
= args
[:2]
1537 config_cache
= config
.get_config_cache()
1539 if object_type
== "host":
1541 "labels": config_cache
.get_host_config(host_name
).labels
,
1542 "label_sources": config_cache
.get_host_config(host_name
).label_sources
,
1545 if object_type
== "service":
1546 service_description
= args
[2].decode("utf-8")
1548 "labels": config_cache
.labels_of_service(host_name
, service_description
),
1549 "label_sources": config_cache
.label_sources_of_service(
1550 host_name
, service_description
),
1553 raise NotImplementedError()
1556 automations
.register(AutomationGetLabelsOf())