2 # -*- encoding: utf-8; py-indent-offset: 4 -*-
3 # +------------------------------------------------------------------+
4 # | ____ _ _ __ __ _ __ |
5 # | / ___| |__ ___ ___| | __ | \/ | |/ / |
6 # | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / |
7 # | | |___| | | | __/ (__| < | | | | . \ |
8 # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ |
10 # | Copyright Mathias Kettner 2018 mk@mathias-kettner.de |
11 # +------------------------------------------------------------------+
13 # This file is part of Check_MK.
14 # The official homepage is at http://mathias-kettner.de/check_mk.
16 # check_mk is free software; you can redistribute it and/or modify it
17 # under the terms of the GNU General Public License as published by
18 # the Free Software Foundation in version 2. check_mk is distributed
19 # in the hope that it will be useful, but WITHOUT ANY WARRANTY; with-
20 # out even the implied warranty of MERCHANTABILITY or FITNESS FOR A
21 # PARTICULAR PURPOSE. See the GNU General Public License for more de-
22 # tails. You should have received a copy of the GNU General Public
23 # License along with GNU Make; see the file COPYING. If not, write
24 # to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
25 # Boston, MA 02110-1301 USA.
27 Special agent for monitoring Amazon web services (AWS) with Check_MK.
38 from typing
import ( # pylint: disable=unused-import
39 Union
, NamedTuple
, Any
, List
,
41 from pathlib2
import Path
42 import boto3
# type: ignore
43 import botocore
# type: ignore
44 from cmk
.utils
.paths
import tmp_dir
45 import cmk
.utils
.store
as store
46 import cmk
.utils
.password_store
48 AWSStrings
= Union
[bytes
, unicode]
51 # Rewrite API calls from low-level client to high-level resource:
52 # Boto3 has two distinct levels of APIs. Client (or "low-level") APIs provide
53 # one-to-one mappings to the underlying HTTP API operations. Resource APIs hide
54 # explicit network calls but instead provide resource objects and collections to
55 # access attributes and perform actions.
57 # Note that in this case you do not have to make a second API call to get the
58 # objects; they're available to you as a collection on the bucket. These
59 # collections of subresources are lazily-loaded.
63 # - per region (EC2, EBS, ELB, RDS)
65 # .--overview------------------------------------------------------------.
67 # | _____ _____ _ ____ _(_) _____ __ |
68 # | / _ \ \ / / _ \ '__\ \ / / |/ _ \ \ /\ / / |
69 # | | (_) \ V / __/ | \ V /| | __/\ V V / |
70 # | \___/ \_/ \___|_| \_/ |_|\___| \_/\_/ |
72 # +----------------------------------------------------------------------+
73 # | Overview of sections and dependencies |
74 # '----------------------------------------------------------------------'
84 # |-- EC2SecurityGroups
98 # '-- ELBSummaryGeneric
100 # |-- ELBLabelsGeneric
108 # '-- ELBSummaryGeneric
110 # |-- ELBLabelsGeneric
112 # |-- ELBv2TargetGroups
114 # '-- ELBv2Application, ELBv2Network
116 # EBSLimits,EC2Summary
128 # CloudwatchAlarmsLimits
130 # '-- CloudwatchAlarms
133 # .--for imports---------------------------------------------------------.
135 # | / _| ___ _ __ (_)_ __ ___ _ __ ___ _ __| |_ ___ |
136 # | | |_ / _ \| '__| | | '_ ` _ \| '_ \ / _ \| '__| __/ __| |
137 # | | _| (_) | | | | | | | | | |_) | (_) | | | |_\__ \ |
138 # | |_| \___/|_| |_|_| |_| |_| .__/ \___/|_| \__|___/ |
140 # '----------------------------------------------------------------------'
142 # .--regions--------------------------------------------------------------
145 ("ap-south-1", "Asia Pacific (Mumbai)"),
146 ("ap-northeast-3", "Asia Pacific (Osaka-Local)"),
147 ("ap-northeast-2", "Asia Pacific (Seoul)"),
148 ("ap-southeast-1", "Asia Pacific (Singapore)"),
149 ("ap-southeast-2", "Asia Pacific (Sydney)"),
150 ("ap-northeast-1", "Asia Pacific (Tokyo)"),
151 ("ca-central-1", "Canada (Central)"),
152 ("cn-north-1", "China (Beijing)"),
153 ("cn-northwest-1", "China (Ningxia)"),
154 ("eu-central-1", "EU (Frankfurt)"),
155 ("eu-west-1", "EU (Ireland)"),
156 ("eu-west-2", "EU (London)"),
157 ("eu-west-3", "EU (Paris)"),
158 ("eu-north-1", "EU (Stockholm)"),
159 ("sa-east-1", "South America (Sao Paulo)"),
160 ("us-east-2", "US East (Ohio)"),
161 ("us-east-1", "US East (N. Virginia)"),
162 ("us-west-1", "US West (N. California)"),
163 ("us-west-2", "US West (Oregon)"),
167 # .--EC2 instance types---------------------------------------------------
169 AWSEC2InstGeneralTypes
= [
219 AWSEC2InstPrevGeneralTypes
= [
227 AWSEC2InstMemoryTypes
= [
279 AWSEC2InstPrevMemoryTypes
= [
286 AWSEC2InstComputeTypes
= [
317 AWSEC2InstPrevComputeTypes
= [
324 AWSEC2InstAcceleratedComputeTypes
= [
335 AWSEC2InstStorageTypes
= [
353 # 'hi1.4xlarge' is no longer in the instance type listings,
354 # but some accounts might still have a limit for it
355 AWSEC2InstPrevStorageTypes
= [
360 AWSEC2InstDenseStorageTypes
= [
367 AWSEC2InstGPUTypes
= [
376 AWSEC2InstPrevGPUTypes
= [
380 # note, as of 2016-12-17, these are still in Developer Preview;
381 # there isn't a published instance limit yet, so we'll assume
382 # it's the default...
383 AWSEC2InstFPGATypes
= [
389 AWSEC2InstGeneralTypes
+ AWSEC2InstPrevGeneralTypes
+ AWSEC2InstMemoryTypes
+
390 AWSEC2InstPrevMemoryTypes
+ AWSEC2InstComputeTypes
+ AWSEC2InstPrevComputeTypes
+
391 AWSEC2InstAcceleratedComputeTypes
+ AWSEC2InstStorageTypes
+ AWSEC2InstPrevStorageTypes
+
392 AWSEC2InstDenseStorageTypes
+ AWSEC2InstGPUTypes
+ AWSEC2InstPrevGPUTypes
+ AWSEC2InstFPGATypes
)
394 # (On-Demand, Reserved, Spot)
396 AWSEC2LimitsDefault
= (20, 20, 5)
398 AWSEC2LimitsSpecial
= {
399 'c4.4xlarge': (10, 20, 5),
400 'c4.8xlarge': (5, 20, 5),
401 'c5.4xlarge': (10, 20, 5),
402 'c5.9xlarge': (5, 20, 5),
403 'c5.18xlarge': (5, 20, 5),
404 'cg1.4xlarge': (2, 20, 5),
405 'cr1.8xlarge': (2, 20, 5),
406 'd2.4xlarge': (10, 20, 5),
407 'd2.8xlarge': (5, 20, 5),
408 'g2.2xlarge': (5, 20, 5),
409 'g2.8xlarge': (2, 20, 5),
410 'g3.4xlarge': (1, 20, 5),
411 'g3.8xlarge': (1, 20, 5),
412 'g3.16xlarge': (1, 20, 5),
413 'h1.8xlarge': (10, 20, 5),
414 'h1.16xlarge': (5, 20, 5),
415 'hi1.4xlarge': (2, 20, 5),
416 'hs1.8xlarge': (2, 20, 0),
417 'i2.2xlarge': (8, 20, 0),
418 'i2.4xlarge': (4, 20, 0),
419 'i2.8xlarge': (2, 20, 0),
420 'i2.xlarge': (8, 20, 0),
421 'i3.2xlarge': (2, 20, 0),
422 'i3.4xlarge': (2, 20, 0),
423 'i3.8xlarge': (2, 20, 0),
424 'i3.16xlarge': (2, 20, 0),
425 'i3.large': (2, 20, 0),
426 'i3.xlarge': (2, 20, 0),
427 'm4.4xlarge': (10, 20, 5),
428 'm4.10xlarge': (5, 20, 5),
429 'm4.16xlarge': (5, 20, 5),
430 'm5.4xlarge': (10, 20, 5),
431 'm5.12xlarge': (5, 20, 5),
432 'm5.24xlarge': (5, 20, 5),
433 'p2.8xlarge': (1, 20, 5),
434 'p2.16xlarge': (1, 20, 5),
435 'p2.xlarge': (1, 20, 5),
436 'p3.2xlarge': (1, 20, 5),
437 'p3.8xlarge': (1, 20, 5),
438 'p3.16xlarge': (1, 20, 5),
439 'p3dn.24xlarge': (1, 20, 5),
440 'r3.4xlarge': (10, 20, 5),
441 'r3.8xlarge': (5, 20, 5),
442 'r4.4xlarge': (10, 20, 5),
443 'r4.8xlarge': (5, 20, 5),
444 'r4.16xlarge': (1, 20, 5),
450 # .--helpers-------------------------------------------------------------.
452 # | | |__ ___| |_ __ ___ _ __ ___ |
453 # | | '_ \ / _ \ | '_ \ / _ \ '__/ __| |
454 # | | | | | __/ | |_) | __/ | \__ \ |
455 # | |_| |_|\___|_| .__/ \___|_| |___/ |
457 # '----------------------------------------------------------------------'
460 def _datetime_converter(o
):
461 if isinstance(o
, datetime
.datetime
):
465 def _chunks(list_
, length
=100):
466 return [list_
[i
:i
+ length
] for i
in xrange(0, len(list_
), length
)]
469 def _get_ec2_piggyback_hostname(inst
, region
):
470 # PrivateIpAddress and InstanceId is available although the instance is stopped
471 return u
"%s-%s-%s" % (inst
['PrivateIpAddress'], region
, inst
['InstanceId'])
475 # .--section API---------------------------------------------------------.
477 # | ___ ___ ___| |_(_) ___ _ __ / \ | _ \_ _| |
478 # | / __|/ _ \/ __| __| |/ _ \| '_ \ / _ \ | |_) | | |
479 # | \__ \ __/ (__| |_| | (_) | | | | / ___ \| __/| | |
480 # | |___/\___|\___|\__|_|\___/|_| |_| /_/ \_\_| |___| |
482 # '----------------------------------------------------------------------'
484 # ---result distributor---------------------------------------------------
487 class ResultDistributor(object):
489 Mediator which distributes results from sections
490 in order to reduce queries to AWS account.
494 self
._colleagues
= []
496 def add(self
, colleague
):
497 self
._colleagues
.append(colleague
)
499 def distribute(self
, sender
, result
):
500 for colleague
in self
._colleagues
:
501 if colleague
.name
!= sender
.name
:
502 colleague
.receive(sender
, result
)
505 # ---sections/colleagues--------------------------------------------------
507 AWSSectionResults
= NamedTuple("AWSSectionResults", [
509 ("cache_timestamp", float),
512 AWSSectionResult
= NamedTuple("AWSSectionResult", [
513 ("piggyback_hostname", AWSStrings
),
517 AWSLimit
= NamedTuple("AWSLimit", [
519 ("title", AWSStrings
),
524 AWSColleagueContents
= NamedTuple("AWSColleagueContents", [
526 ("cache_timestamp", float),
529 AWSRawContent
= NamedTuple("AWSRawContent", [
531 ("cache_timestamp", float),
534 AWSComputedContent
= NamedTuple("AWSComputedContent", [
536 ("cache_timestamp", float),
539 AWSCacheFilePath
= Path(tmp_dir
) / "agents" / "agent_aws"
542 class AWSSection(object):
543 __metaclass__
= abc
.ABCMeta
545 def __init__(self
, client
, region
, config
, distributor
=None):
546 self
._client
= client
547 self
._region
= region
548 self
._config
= config
549 self
._distributor
= ResultDistributor() if distributor
is None else distributor
550 self
._received
_results
= {}
551 self
._cache
_file
_dir
= AWSCacheFilePath
/ self
._region
/ self
._config
.hostname
552 self
._cache
_file
= AWSCacheFilePath
/ self
._region
/ self
._config
.hostname
/ self
.name
554 @abc.abstractproperty
558 @abc.abstractproperty
561 In general the default resolution of AWS metrics is 5 min (300 sec)
562 The default resolution of AWS S3 metrics is 1 day (86400 sec)
563 We use interval property for cached section.
569 return 2 * self
.interval
571 def _send(self
, content
):
572 self
._distributor
.distribute(self
, content
)
574 def receive(self
, sender
, content
):
575 self
._received
_results
.setdefault(sender
.name
, content
)
577 def run(self
, use_cache
=False):
578 colleague_contents
= self
._get
_colleague
_contents
()
580 colleague_contents
, AWSColleagueContents
581 ), "%s: Colleague contents must be of type 'AWSColleagueContents'" % self
.name
583 colleague_contents
.cache_timestamp
,
584 float), "%s: Cache timestamp of colleague contents must be of type 'float'" % self
.name
586 raw_content
= self
._get
_raw
_content
(colleague_contents
, use_cache
=use_cache
)
589 AWSRawContent
), "%s: Raw content must be of type 'AWSRawContent'" % self
.name
591 raw_content
.cache_timestamp
,
592 float), "%s: Cache timestamp of raw content must be of type 'float'" % self
.name
594 computed_content
= self
._compute
_content
(raw_content
, colleague_contents
)
595 assert isinstance(computed_content
, AWSComputedContent
596 ), "%s: Computed content must be of type 'AWSComputedContent'" % self
.name
598 computed_content
.cache_timestamp
,
599 float), "%s: Cache timestamp of computed content must be of type 'float'" % self
.name
601 self
._send
(computed_content
)
602 created_results
= self
._create
_results
(computed_content
)
603 assert isinstance(created_results
,
604 list), "%s: Created results must be fo type 'list'" % self
.name
607 for result
in created_results
:
610 AWSSectionResult
), "%s: Result must be of type 'AWSSectionResult'" % self
.name
612 if not result
.content
:
613 logging
.info("%s: Result is empty or None", self
.name
)
617 result
.piggyback_hostname
, (unicode, str)
618 ), "%s: Piggyback hostname of created result must be of type 'unicode' or 'str'" % self
.name
620 # In the related check plugin aws.include we parse these results and
621 # extend list of json-loaded results, except for labels sections.
622 self
._validate
_result
_content
(result
.content
)
624 final_results
.append(result
)
625 return AWSSectionResults(final_results
, computed_content
.cache_timestamp
)
627 def _get_raw_content(self
, colleague_contents
, use_cache
=False):
628 # Cache is only used if the age is lower than section interval AND
629 # the collected data from colleagues are not newer
630 self
._cache
_file
_dir
.mkdir(parents
=True, exist_ok
=True)
631 if use_cache
and self
._cache
_is
_recent
_enough
(colleague_contents
):
632 raw_content
, cache_timestamp
= self
._read
_from
_cache
()
634 raw_content
= self
._fetch
_raw
_content
(colleague_contents
)
635 # TODO: Write cache only when _compute_section_content succeeded?
637 self
._write
_to
_cache
(raw_content
)
638 cache_timestamp
= time
.time()
639 return AWSRawContent(raw_content
, cache_timestamp
)
641 def _cache_is_recent_enough(self
, colleague_contents
):
642 if not self
._cache
_file
.exists():
643 logging
.info("New cache file %s", self
._cache
_file
)
648 age
= now
- self
._cache
_file
.stat().st_mtime
651 logging
.info("No such file or directory %s (calculate age)", self
._cache
_file
)
654 logging
.info("Cannot calculate cache file age: %s", e
)
657 if age
>= self
.interval
:
658 logging
.info("Cache file %s is outdated", self
._cache
_file
)
661 if colleague_contents
.cache_timestamp
> now
:
662 logging
.info("Colleague data is newer than cache file %s", self
._cache
_file
)
666 def _read_from_cache(self
):
668 with self
._cache
_file
.open(encoding
="utf-8") as f
:
669 raw_content
= f
.read().strip()
671 if e
.errno
== errno
.ENOENT
:
672 logging
.info("No such file or directory %s (read from cache)", self
._cache
_file
)
675 logging
.info("Cannot read from cache file: %s", e
)
678 content
= json
.loads(raw_content
)
679 except ValueError as e
:
680 logging
.info("Cannot load raw content: %s", e
)
682 return content
, self
._cache
_file
.stat().st_mtime
684 def _write_to_cache(self
, raw_content
):
685 json_dump
= json
.dumps(raw_content
, default
=_datetime_converter
)
686 store
.save_file(str(self
._cache
_file
), json_dump
)
689 def _get_colleague_contents(self
):
690 # type: (Any) -> AWSColleagueContents
692 Receive section contents from colleagues. The results are stored in
693 self._receive_results: {<KEY>: AWSComputedContent}.
694 The relation between two sections must be declared in the related
695 distributor in advance to make this work.
696 Use max. cache_timestamp of all received results for
697 AWSColleagueContents.cache_timestamp
702 def _fetch_raw_content(self
, colleague_contents
):
704 Call API methods, eg. 'response = ec2_client.describe_instances()' and
705 extract content from raw content. Raw contents basically consist of
709 Return raw_result['<KEY>'].
714 def _compute_content(self
, raw_content
, colleague_contents
):
715 # type: (AWSRawContent, Any) -> AWSComputedContent
717 Compute the final content of this section based on the raw content of
718 this section and the content received from the optional colleague
724 def _create_results(self
, computed_content
):
725 # type: (Any) -> List[AWSSectionResult]
728 def _get_response_content(self
, response
, key
, dflt
=None):
734 logging
.info("%s: KeyError; Available keys are %s", self
.name
, response
.keys())
737 def _validate_result_content(self
, content
):
738 assert isinstance(content
, list), "%s: Result content must be of type 'list'" % self
.name
740 def _prepare_tags_for_api_response(self
, tags
):
742 We need to change the format, in order to filter out instances with specific
743 tags if and only if we already fetched instances, eg. by limits section.
745 [{'Key': KEY, 'Value': VALUE}, ...]
751 tag_name
= tag
['Name']
752 if tag_name
.startswith('tag:'):
753 tag_key
= tag_name
[4:]
756 prepared_tags
.extend([{'Key': tag_key
, 'Value': v
} for v
in tag
['Values']])
760 class AWSSectionLimits(AWSSection
):
761 __metaclass__
= abc
.ABCMeta
763 def __init__(self
, client
, region
, config
, distributor
=None):
764 super(AWSSectionLimits
, self
).__init
__(client
, region
, config
, distributor
=distributor
)
767 def _add_limit(self
, piggyback_hostname
, limit
):
768 assert isinstance(limit
, AWSLimit
), "%s: Limit must be of type 'AWSLimit'" % self
.name
769 self
._limits
.setdefault(piggyback_hostname
, []).append(limit
)
771 def _create_results(self
, computed_content
):
773 AWSSectionResult(piggyback_hostname
, limits
)
774 for piggyback_hostname
, limits
in self
._limits
.iteritems()
778 class AWSSectionLabels(AWSSection
):
779 __metaclass__
= abc
.ABCMeta
785 def _create_results(self
, computed_content
):
787 computed_content
.content
,
788 dict), "%s: Computed result of Labels section must be of type 'dict'" % self
.name
789 for pb
in computed_content
.content
.iterkeys():
790 assert bool(pb
), "%s: Piggyback hostname is not allowed to be empty" % self
.name
792 AWSSectionResult(piggyback_hostname
, rows
)
793 for piggyback_hostname
, rows
in computed_content
.content
.iteritems()
796 def _validate_result_content(self
, content
):
797 assert isinstance(content
, dict), "%s: Result content must be of type 'dict'" % self
.name
800 class AWSSectionGeneric(AWSSection
):
801 __metaclass__
= abc
.ABCMeta
804 class AWSSectionCloudwatch(AWSSection
):
805 __metaclass__
= abc
.ABCMeta
807 def _fetch_raw_content(self
, colleague_contents
):
808 end_time
= time
.time()
809 start_time
= end_time
- self
.period
810 metrics
= self
._get
_metrics
(colleague_contents
)
814 # A single GetMetricData call can include up to 100 MetricDataQuery structures
815 # There's no pagination for this operation:
816 # self._client.can_paginate('get_metric_data') = False
818 for chunk
in _chunks(metrics
):
821 response
= self
._client
.get_metric_data(
822 MetricDataQueries
=chunk
,
823 StartTime
=start_time
,
827 metrics
= self
._get
_response
_content
(response
, 'MetricDataResults')
830 raw_content
.extend(metrics
)
834 def _get_metrics(self
, colleague_contents
):
837 def _create_id_for_metric_data_query(self
, index
, metric_name
, *args
):
839 ID field must be unique in a single call.
840 The valid characters are letters, numbers, and underscore.
841 The first character must be a lowercase letter.
842 Regex: ^[a-z][a-zA-Z0-9_]*$
844 return "_".join(["id", str(index
)] + list(args
) + [metric_name
])
848 # .--costs/usage---------------------------------------------------------.
850 # | ___ ___ ___| |_ ___ / / _ ___ __ _ __ _ ___ |
851 # | / __/ _ \/ __| __/ __| / / | | / __|/ _` |/ _` |/ _ \ |
852 # | | (_| (_) \__ \ |_\__ \/ /| |_| \__ \ (_| | (_| | __/ |
853 # | \___\___/|___/\__|___/_/ \__,_|___/\__,_|\__, |\___| |
855 # '----------------------------------------------------------------------'
857 # Interval between 'Start' and 'End' must be a DateInterval. 'End' is exclusive.
859 # 2017-01-01 - 2017-05-01; cost and usage data is retrieved from 2017-01-01 up
860 # to and including 2017-04-30 but not including 2017-05-01.
861 # The GetCostAndUsageRequest operation supports only DAILY and MONTHLY granularities.
864 class CostsAndUsage(AWSSectionGeneric
):
867 return "costs_and_usage"
873 def _get_colleague_contents(self
):
874 return AWSColleagueContents(None, 0.0)
876 def _fetch_raw_content(self
, colleague_contents
):
879 response
= self
._client
.get_cost_and_usage(
881 'Start': time
.strftime(fmt
, time
.gmtime(now
- self
.interval
)),
882 'End': time
.strftime(fmt
, time
.gmtime(now
)),
885 Metrics
=['UnblendedCost'],
888 'Key': 'LINKED_ACCOUNT'
894 return self
._get
_response
_content
(response
, 'ResultsByTime')
896 def _compute_content(self
, raw_content
, colleague_contents
):
897 return AWSComputedContent(raw_content
.content
, raw_content
.cache_timestamp
)
899 def _create_results(self
, computed_content
):
900 return [AWSSectionResult("", computed_content
.content
)]
904 # .--EC2-----------------------------------------------------------------.
905 # | _____ ____ ____ |
906 # | | ____/ ___|___ \ |
908 # | | |__| |___ / __/ |
909 # | |_____\____|_____| |
911 # '----------------------------------------------------------------------'
914 class EC2Limits(AWSSectionLimits
):
923 def _get_colleague_contents(self
):
924 return AWSColleagueContents(None, 0.0)
926 def _fetch_raw_content(self
, colleague_contents
):
927 response
= self
._client
.describe_instances()
928 reservations
= self
._get
_response
_content
(response
, 'Reservations')
930 response
= self
._client
.describe_reserved_instances()
931 reserved_instances
= self
._get
_response
_content
(response
, 'ReservedInstances')
933 response
= self
._client
.describe_addresses()
934 addresses
= self
._get
_response
_content
(response
, 'Addresses')
936 response
= self
._client
.describe_security_groups()
937 security_groups
= self
._get
_response
_content
(response
, 'SecurityGroups')
939 response
= self
._client
.describe_network_interfaces()
940 interfaces
= self
._get
_response
_content
(response
, 'NetworkInterfaces')
942 response
= self
._client
.describe_spot_instance_requests()
943 spot_inst_requests
= self
._get
_response
_content
(response
, 'SpotInstanceRequests')
945 response
= self
._client
.describe_spot_fleet_requests()
946 spot_fleet_requests
= self
._get
_response
_content
(response
, 'SpotFleetRequestConfigs')
948 return reservations
, reserved_instances
, addresses
, security_groups
, interfaces
, spot_inst_requests
, spot_fleet_requests
950 def _compute_content(self
, raw_content
, colleague_contents
):
951 reservations
, reserved_instances
, addresses
, security_groups
, interfaces
, spot_inst_requests
, spot_fleet_requests
= raw_content
.content
952 instances
= {inst
['InstanceId']: inst
for res
in reservations
for inst
in res
['Instances']}
953 res_instances
= {inst
['ReservedInstancesId']: inst
for inst
in reserved_instances
}
955 self
._add
_instance
_limits
(instances
, res_instances
, spot_inst_requests
)
956 self
._add
_addresses
_limits
(addresses
)
957 self
._add
_security
_group
_limits
(instances
, security_groups
)
958 self
._add
_interface
_limits
(instances
, interfaces
)
959 self
._add
_spot
_inst
_limits
(spot_inst_requests
)
960 self
._add
_spot
_fleet
_limits
(spot_fleet_requests
)
961 return AWSComputedContent(reservations
, raw_content
.cache_timestamp
)
963 def _add_instance_limits(self
, instances
, res_instances
, spot_inst_requests
):
964 inst_limits
= self
._get
_inst
_limits
(instances
, spot_inst_requests
)
965 res_limits
= self
._get
_res
_inst
_limits
(res_instances
)
970 # subtract reservations from instance usage
971 for inst_az
, inst_types
in inst_limits
.iteritems():
972 if inst_az
not in res_limits
:
973 for inst_type
, count
in inst_types
.iteritems():
974 ondemand_limits
[inst_type
] = ondemand_limits
.get(inst_type
, 0) + count
977 # else we have reservations for this AZ
978 for inst_type
, count
in inst_types
.iteritems():
979 if inst_type
not in res_limits
[inst_az
]:
980 # no reservations for this type
981 ondemand_limits
[inst_type
] = ondemand_limits
.get(inst_type
, 0) + count
984 amount_res_inst_type
= res_limits
[inst_az
][inst_type
]
985 ondemand
= count
- amount_res_inst_type
986 total_ris
+= amount_res_inst_type
987 if count
< amount_res_inst_type
:
990 running_ris
+= amount_res_inst_type
992 # we have unused reservations
994 ondemand_limits
[inst_type
] = ondemand_limits
.get(inst_type
, 0) + ondemand
996 dflt_ondemand_limit
, _reserved_limit
, _spot_limit
= AWSEC2LimitsDefault
998 for inst_type
, count
in ondemand_limits
.iteritems():
999 total_instances
+= count
1000 ondemand_limit
, _reserved_limit
, _spot_limit
= AWSEC2LimitsSpecial
.get(
1001 inst_type
, AWSEC2LimitsDefault
)
1004 AWSLimit("running_ondemand_instances_%s" % inst_type
,
1005 "Running On-Demand %s Instances" % inst_type
, ondemand_limit
, count
))
1008 AWSLimit("running_ondemand_instances_total", "Total Running On-Demand Instances",
1009 dflt_ondemand_limit
, total_instances
))
1011 def _get_inst_limits(self
, instances
, spot_inst_requests
):
1012 spot_instance_ids
= [inst
['InstanceId'] for inst
in spot_inst_requests
]
1014 for inst_id
, inst
in instances
.iteritems():
1015 if inst_id
in spot_instance_ids
:
1017 if inst
['State']['Name'] in ['stopped', 'terminated']:
1019 inst_type
= inst
['InstanceType']
1020 inst_az
= inst
['Placement']['AvailabilityZone']
1021 inst_limits
.setdefault(
1022 inst_az
, {})[inst_type
] = inst_limits
.get(inst_az
, {}).get(inst_type
, 0) + 1
1025 def _get_res_inst_limits(self
, res_instances
):
1027 for res_inst
in res_instances
.itervalues():
1028 if res_inst
['State'] != 'active':
1030 inst_type
= res_inst
['InstanceType']
1031 if inst_type
not in AWSEC2InstTypes
:
1032 logging
.info("%s: Unknown instance type '%s'", self
.name
, inst_type
)
1035 inst_az
= res_inst
['AvailabilityZone']
1036 res_limits
.setdefault(inst_az
, {})[inst_type
] = res_limits
.get(inst_az
, {}).get(
1037 inst_type
, 0) + res_inst
['InstanceCount']
1040 def _add_addresses_limits(self
, addresses
):
1044 for address
in addresses
:
1045 domain
= address
['Domain']
1048 elif domain
== "standard":
1051 "", AWSLimit("vpc_elastic_ip_addresses", "VPC Elastic IP Addresses", 5, vpc_addresses
))
1053 AWSLimit("elastic_ip_addresses", "Elastic IP Addresses", 5, std_addresses
))
1055 def _add_security_group_limits(self
, instances
, security_groups
):
1056 # Security groups for EC2-Classic per instance
1057 # Rules per security group for EC2-Classic
1059 for sec_group
in security_groups
:
1060 vpc_id
= sec_group
['VpcId']
1063 inst
= self
._get
_inst
_assignment
(instances
, 'VpcId', vpc_id
)
1066 inst_id
= _get_ec2_piggyback_hostname(inst
, self
._region
)
1067 key
= (inst_id
, vpc_id
)
1068 sgs_per_vpc
[key
] = sgs_per_vpc
.get(key
, 0) + 1
1071 AWSLimit("vpc_sec_group_rules",
1072 "Rules of VPC security group %s" % sec_group
['GroupName'], 50,
1073 len(sec_group
['IpPermissions'])))
1075 for (inst_id
, vpc_id
), count
in sgs_per_vpc
.iteritems():
1077 inst_id
, AWSLimit("vpc_sec_groups", "Security Groups of VPC %s" % vpc_id
, 500,
1080 def _get_inst_assignment(self
, instances
, key
, assignment
):
1081 for inst
in instances
.itervalues():
1082 if inst
.get(key
) == assignment
:
1085 def _add_interface_limits(self
, instances
, interfaces
):
1086 # These limits are per security groups and
1087 # security groups are per instance
1088 for iface
in interfaces
:
1089 inst
= self
._get
_inst
_assignment
(instances
, 'VpcId', iface
.get('VpcId'))
1093 _get_ec2_piggyback_hostname(inst
, self
._region
),
1095 "if_vpc_sec_group", "VPC security groups of elastic network interface %s" %
1096 iface
['NetworkInterfaceId'], 5, len(iface
['Groups'])))
1098 def _add_spot_inst_limits(self
, spot_inst_requests
):
1099 count_spot_inst_reqs
= 0
1100 for spot_inst_req
in spot_inst_requests
:
1101 if spot_inst_req
['State'] in ['open', 'active']:
1102 count_spot_inst_reqs
+= 1
1104 "", AWSLimit('spot_inst_requests', 'Spot Instance Requests', 20, count_spot_inst_reqs
))
1106 def _add_spot_fleet_limits(self
, spot_fleet_requests
):
1107 active_spot_fleet_requests
= 0
1108 total_target_cap
= 0
1109 for spot_fleet_req
in spot_fleet_requests
:
1110 if spot_fleet_req
['SpotFleetRequestState'] != 'active':
1113 active_spot_fleet_requests
+= 1
1114 total_target_cap
+= spot_fleet_req
['SpotFleetRequestConfig']['TargetCapacity']
1118 AWSLimit('active_spot_fleet_requests', 'Active Spot Fleet Requests', 1000,
1119 active_spot_fleet_requests
))
1122 AWSLimit('spot_fleet_total_target_capacity',
1123 'Spot Fleet Requests Total Target Capacity', 5000, total_target_cap
))
1126 class EC2Summary(AWSSectionGeneric
):
1127 def __init__(self
, client
, region
, config
, distributor
=None):
1128 super(EC2Summary
, self
).__init
__(client
, region
, config
, distributor
=distributor
)
1129 self
._names
= self
._config
.service_config
['ec2_names']
1130 self
._tags
= self
._config
.service_config
['ec2_tags']
1134 return "ec2_summary"
1140 def _get_colleague_contents(self
):
1141 colleague
= self
._received
_results
.get('ec2_limits')
1142 if colleague
and colleague
.content
:
1143 return AWSColleagueContents(colleague
.content
, colleague
.cache_timestamp
)
1144 return AWSColleagueContents([], 0.0)
1146 def _fetch_raw_content(self
, colleague_contents
):
1147 if self
._tags
is None and self
._names
is not None:
1148 return self
._fetch
_instances
_filtered
_by
_names
(colleague_contents
.content
)
1149 if self
._tags
is not None:
1150 return self
._fetch
_instances
_filtered
_by
_tags
(colleague_contents
.content
)
1151 return self
._fetch
_instances
_without
_filter
()
1153 def _fetch_instances_filtered_by_names(self
, col_reservations
):
1154 if col_reservations
:
1156 inst
for res
in col_reservations
for inst
in res
['Instances']
1157 if inst
['InstanceId'] in self
._names
1160 response
= self
._client
.describe_instances(InstanceIds
=self
._names
)
1162 inst
for res
in self
._get
_response
_content
(response
, 'Reservations')
1163 for inst
in res
['Instances']
1167 def _fetch_instances_filtered_by_tags(self
, col_reservations
):
1168 if col_reservations
:
1169 tags
= self
._prepare
_tags
_for
_api
_response
(self
._tags
)
1171 inst
for res
in col_reservations
1172 for inst
in res
['Instances'] for tag
in inst
['Tags'] if tag
in tags
1175 response
= self
._client
.describe_instances(Filters
=self
._tags
)
1177 inst
for res
in self
._get
_response
_content
(response
, 'Reservations')
1178 for inst
in res
['Instances']
1182 def _fetch_instances_without_filter(self
):
1183 response
= self
._client
.describe_instances()
1185 inst
for res
in self
._get
_response
_content
(response
, 'Reservations')
1186 for inst
in res
['Instances']
1189 def _compute_content(self
, raw_content
, colleague_contents
):
1190 return AWSComputedContent(
1191 self
._format
_instances
(raw_content
.content
), raw_content
.cache_timestamp
)
1193 def _format_instances(self
, instances
):
1194 return {_get_ec2_piggyback_hostname(inst
, self
._region
): inst
for inst
in instances
}
1196 def _create_results(self
, computed_content
):
1197 return [AWSSectionResult("", computed_content
.content
.values())]
1200 class EC2Labels(AWSSectionLabels
):
1205 def _get_colleague_contents(self
):
1206 colleague
= self
._received
_results
.get('ec2_summary')
1207 if colleague
and colleague
.content
:
1208 return AWSColleagueContents(colleague
.content
, colleague
.cache_timestamp
)
1209 return AWSColleagueContents({}, 0.0)
1211 def _fetch_raw_content(self
, colleague_contents
):
1212 response
= self
._client
.describe_tags(Filters
=[{
1213 'Name': 'resource-id',
1214 'Values': [inst
['InstanceId'] for inst
in colleague_contents
.content
.itervalues()],
1216 return self
._get
_response
_content
(response
, 'Tags')
1218 def _compute_content(self
, raw_content
, colleague_contents
):
1219 inst_id_to_ec2_piggyback_hostname_map
= {
1220 inst
['InstanceId']: ec2_instance_id
1221 for ec2_instance_id
, inst
in colleague_contents
.content
.iteritems()
1224 computed_content
= {}
1225 for tag
in raw_content
.content
:
1226 ec2_piggyback_hostname
= inst_id_to_ec2_piggyback_hostname_map
.get(tag
['ResourceId'])
1227 if not ec2_piggyback_hostname
:
1229 computed_content
.setdefault(ec2_piggyback_hostname
, {}).setdefault(
1230 tag
['Key'], tag
['Value'])
1231 return AWSComputedContent(computed_content
, raw_content
.cache_timestamp
)
1234 class EC2SecurityGroups(AWSSectionGeneric
):
1235 def __init__(self
, client
, region
, config
, distributor
=None):
1236 super(EC2SecurityGroups
, self
).__init
__(client
, region
, config
, distributor
=distributor
)
1237 self
._names
= self
._config
.service_config
['ec2_names']
1238 self
._tags
= self
._config
.service_config
['ec2_tags']
1242 return "ec2_security_groups"
1248 def _get_colleague_contents(self
):
1249 colleague
= self
._received
_results
.get('ec2_summary')
1250 if colleague
and colleague
.content
:
1251 return AWSColleagueContents(colleague
.content
, colleague
.cache_timestamp
)
1252 return AWSColleagueContents({}, 0.0)
1254 def _fetch_raw_content(self
, colleague_contents
):
1255 response
= self
._describe
_security
_groups
()
1257 group
['GroupId']: group
1258 for group
in self
._get
_response
_content
(response
, 'SecurityGroups')
1261 def _describe_security_groups(self
):
1262 if self
._names
is not None:
1263 return self
._client
.describe_security_groups(InstanceIds
=self
._names
)
1264 elif self
._tags
is not None:
1265 return self
._client
.describe_security_groups(Filters
=self
._tags
)
1266 return self
._client
.describe_security_groups()
1268 def _compute_content(self
, raw_content
, colleague_contents
):
1269 content_by_piggyback_hosts
= {}
1270 for instance_name
, instance
in colleague_contents
.content
.iteritems():
1271 for security_group_from_instance
in instance
.get('SecurityGroups', []):
1272 security_group
= raw_content
.content
.get(security_group_from_instance
['GroupId'])
1273 if security_group
is None:
1275 content_by_piggyback_hosts
.setdefault(instance_name
, []).append(security_group
)
1276 return AWSComputedContent(content_by_piggyback_hosts
, raw_content
.cache_timestamp
)
1278 def _create_results(self
, computed_content
):
1280 AWSSectionResult(piggyback_hostname
, rows
)
1281 for piggyback_hostname
, rows
in computed_content
.content
.iteritems()
1285 class EC2(AWSSectionCloudwatch
):
1294 def _get_colleague_contents(self
):
1295 colleague
= self
._received
_results
.get('ec2_summary')
1296 if colleague
and colleague
.content
:
1297 return AWSColleagueContents(colleague
.content
, colleague
.cache_timestamp
)
1298 return AWSColleagueContents({}, 0.0)
1300 def _get_metrics(self
, colleague_contents
):
1302 for idx
, (instance_name
, instance
) in enumerate(colleague_contents
.content
.iteritems()):
1303 instance_id
= instance
['InstanceId']
1304 for metric_name
, unit
in [
1305 ("CPUCreditUsage", "Count"),
1306 ("CPUCreditBalance", "Count"),
1307 ("CPUUtilization", "Percent"),
1308 ("DiskReadOps", "Count"),
1309 ("DiskWriteOps", "Count"),
1310 ("DiskReadBytes", "Bytes"),
1311 ("DiskWriteBytes", "Bytes"),
1312 ("NetworkIn", "Bytes"),
1313 ("NetworkOut", "Bytes"),
1314 ("StatusCheckFailed_Instance", "Count"),
1315 ("StatusCheckFailed_System", "Count"),
1318 'Id': self
._create
_id
_for
_metric
_data
_query
(idx
, metric_name
),
1319 'Label': instance_name
,
1322 'Namespace': 'AWS/EC2',
1323 'MetricName': metric_name
,
1325 'Name': "InstanceId",
1326 'Value': instance_id
,
1329 'Period': self
.period
,
1336 def _compute_content(self
, raw_content
, colleague_contents
):
1337 content_by_piggyback_hosts
= {}
1338 for row
in raw_content
.content
:
1339 content_by_piggyback_hosts
.setdefault(row
['Label'], []).append(row
)
1340 return AWSComputedContent(content_by_piggyback_hosts
, raw_content
.cache_timestamp
)
1342 def _create_results(self
, computed_content
):
1344 AWSSectionResult(piggyback_hostname
, rows
)
1345 for piggyback_hostname
, rows
in computed_content
.content
.iteritems()
1350 # .--S3------------------------------------------------------------------.
1357 # '----------------------------------------------------------------------'
1360 class S3Limits(AWSSectionLimits
):
1369 def _get_colleague_contents(self
):
1370 return AWSColleagueContents(None, 0.0)
1372 def _fetch_raw_content(self
, colleague_contents
):
1374 There's no API method for getting account limits thus we have to
1377 response
= self
._client
.list_buckets()
1378 return self
._get
_response
_content
(response
, 'Buckets')
1380 def _compute_content(self
, raw_content
, colleague_contents
):
1381 self
._add
_limit
("", AWSLimit('buckets', 'Buckets', 100, len(raw_content
.content
)))
1382 return AWSComputedContent(raw_content
.content
, raw_content
.cache_timestamp
)
1385 class S3Summary(AWSSectionGeneric
):
1386 def __init__(self
, client
, region
, config
, distributor
=None):
1387 super(S3Summary
, self
).__init
__(client
, region
, config
, distributor
=distributor
)
1388 self
._names
= self
._config
.service_config
['s3_names']
1389 self
._tags
= self
._prepare
_tags
_for
_api
_response
(self
._config
.service_config
['s3_tags'])
1399 def _get_colleague_contents(self
):
1400 colleague
= self
._received
_results
.get('s3_limits')
1401 if colleague
and colleague
.content
:
1402 return AWSColleagueContents(colleague
.content
, colleague
.cache_timestamp
)
1403 return AWSColleagueContents([], 0.0)
1405 def _fetch_raw_content(self
, colleague_contents
):
1407 for bucket
in self
._list
_buckets
(colleague_contents
):
1408 bucket_name
= bucket
['Name']
1410 # We can request buckets globally but if a bucket is located in
1411 # another region we do not get any results
1412 response
= self
._client
.get_bucket_location(Bucket
=bucket_name
)
1413 location
= self
._get
_response
_content
(response
, 'LocationConstraint', dflt
="")
1414 if location
!= self
._region
:
1416 bucket
['LocationConstraint'] = location
1419 # Why do we get the following error while calling these methods:
1420 #_response = self._client.get_public_access_block(Bucket=bucket_name)
1421 #_response = self._client.get_bucket_policy_status(Bucket=bucket_name)
1422 # 'S3' object has no attribute 'get_bucket_policy_status'
1424 response
= self
._client
.get_bucket_tagging(Bucket
=bucket_name
)
1425 except botocore
.exceptions
.ClientError
as e
:
1426 # If there are no tags attached to a bucket we receive a 'ClientError'
1427 logging
.info("%s/%s: No tags set, %s", self
.name
, bucket_name
, e
)
1430 tagging
= self
._get
_response
_content
(response
, 'TagSet')
1431 if self
._matches
_tag
_conditions
(tagging
):
1432 bucket
['Tagging'] = tagging
1433 found_buckets
.append(bucket
)
1434 return found_buckets
1436 def _list_buckets(self
, colleague_contents
):
1437 if self
._tags
is None and self
._names
is not None:
1438 if colleague_contents
.content
:
1440 bucket
for bucket
in colleague_contents
.content
if bucket
['Name'] in self
._names
1442 return [{'Name': n
} for n
in self
._names
]
1444 response
= self
._client
.list_buckets()
1445 return self
._get
_response
_content
(response
, 'Buckets')
1447 def _matches_tag_conditions(self
, tagging
):
1448 if self
._names
is not None:
1450 if self
._tags
is None:
1453 if tag
in self
._tags
:
1457 def _compute_content(self
, raw_content
, colleague_contents
):
1458 return AWSComputedContent({bucket
['Name']: bucket
for bucket
in raw_content
.content
},
1459 raw_content
.cache_timestamp
)
1461 def _create_results(self
, computed_content
):
1462 return [AWSSectionResult("", None)]
1465 class S3(AWSSectionCloudwatch
):
1472 # BucketSizeBytes and NumberOfObjects are available per day
1473 # and must include 00:00h
1476 def _get_colleague_contents(self
):
1477 colleague
= self
._received
_results
.get('s3_summary')
1478 if colleague
and colleague
.content
:
1479 return AWSColleagueContents(colleague
.content
, colleague
.cache_timestamp
)
1480 return AWSColleagueContents({}, 0.0)
1482 def _get_metrics(self
, colleague_contents
):
1484 for idx
, bucket_name
in enumerate(colleague_contents
.content
.iterkeys()):
1485 for metric_name
, unit
, storage_classes
in [
1486 ("BucketSizeBytes", "Bytes", [
1488 "StandardIAStorage",
1489 "ReducedRedundancyStorage",
1491 ("NumberOfObjects", "Count", ["AllStorageTypes"]),
1493 for storage_class
in storage_classes
:
1495 'Id': self
._create
_id
_for
_metric
_data
_query
(idx
, metric_name
,
1497 'Label': bucket_name
,
1500 'Namespace': 'AWS/S3',
1501 'MetricName': metric_name
,
1503 'Name': "BucketName",
1504 'Value': bucket_name
,
1506 'Name': 'StorageType',
1507 'Value': storage_class
,
1510 'Period': self
.period
,
1517 def _compute_content(self
, raw_content
, colleague_contents
):
1518 for row
in raw_content
.content
:
1519 bucket
= colleague_contents
.content
.get(row
['Label'])
1522 return AWSComputedContent(raw_content
.content
, raw_content
.cache_timestamp
)
1524 def _create_results(self
, computed_content
):
1525 return [AWSSectionResult("", computed_content
.content
)]
1528 class S3Requests(AWSSectionCloudwatch
):
1531 return "s3_requests"
1537 def _get_colleague_contents(self
):
1538 colleague
= self
._received
_results
.get('s3_summary')
1539 if colleague
and colleague
.content
:
1540 return AWSColleagueContents(colleague
.content
, colleague
.cache_timestamp
)
1541 return AWSColleagueContents({}, 0.0)
1543 def _get_metrics(self
, colleague_contents
):
1545 for idx
, bucket_name
in enumerate(colleague_contents
.content
.iterkeys()):
1546 for metric_name
, unit
in [
1547 ("AllRequests", "Count"),
1548 ("GetRequests", "Count"),
1549 ("PutRequests", "Count"),
1550 ("DeleteRequests", "Count"),
1551 ("HeadRequests", "Count"),
1552 ("PostRequests", "Count"),
1553 ("SelectRequests", "Count"),
1554 ("SelectScannedBytes", "Bytes"),
1555 ("SelectReturnedBytes", "Bytes"),
1556 ("ListRequests", "Count"),
1557 ("BytesDownloaded", "Bytes"),
1558 ("BytesUploaded", "Bytes"),
1559 ("4xxErrors", "Count"),
1560 ("5xxErrors", "Count"),
1561 ("FirstByteLatency", "Milliseconds"),
1562 ("TotalRequestLatency", "Milliseconds"),
1565 'Id': self
._create
_id
_for
_metric
_data
_query
(idx
, metric_name
),
1566 'Label': bucket_name
,
1569 'Namespace': 'AWS/S3',
1570 'MetricName': metric_name
,
1572 'Name': "BucketName",
1573 'Value': bucket_name
,
1576 'Period': self
.period
,
1577 'Stat': 'Sum', #reports per period
1583 def _compute_content(self
, raw_content
, colleague_contents
):
1584 for row
in raw_content
.content
:
1585 bucket
= colleague_contents
.content
.get(row
['Label'])
1588 return AWSComputedContent(raw_content
.content
, raw_content
.cache_timestamp
)
1590 def _create_results(self
, computed_content
):
1591 return [AWSSectionResult("", computed_content
.content
)]
1595 # .--ELB-----------------------------------------------------------------.
1597 # | | ____| | | __ ) |
1598 # | | _| | | | _ \ |
1599 # | | |___| |___| |_) | |
1600 # | |_____|_____|____/ |
1602 # '----------------------------------------------------------------------'
1605 class ELBLimits(AWSSectionLimits
):
1614 def _get_colleague_contents(self
):
1615 return AWSColleagueContents(None, 0.0)
1617 def _fetch_raw_content(self
, colleague_contents
):
1619 The AWS/ELB API method 'describe_account_limits' provides limit values
1620 but no values about the usage per limit thus we have to gather the usage
1621 values from 'describe_load_balancers'.
1623 response
= self
._client
.describe_load_balancers()
1624 load_balancers
= self
._get
_response
_content
(response
, 'LoadBalancerDescriptions')
1626 response
= self
._client
.describe_account_limits()
1627 limits
= self
._get
_response
_content
(response
, 'Limits')
1628 return load_balancers
, limits
1630 def _compute_content(self
, raw_content
, colleague_contents
):
1631 load_balancers
, limits
= raw_content
.content
1632 limits
= {r
["Name"]: int(r
['Max']) for r
in limits
}
1636 AWSLimit("load_balancers", "Load balancers", limits
['classic-load-balancers'],
1637 len(load_balancers
)))
1639 for load_balancer
in load_balancers
:
1640 dns_name
= load_balancer
['DNSName']
1643 AWSLimit("load_balancer_listeners", "Listeners", limits
['classic-listeners'],
1644 len(load_balancer
['ListenerDescriptions'])))
1647 AWSLimit("load_balancer_registered_instances", "Registered instances",
1648 limits
['classic-registered-instances'], len(load_balancer
['Instances'])))
1649 return AWSComputedContent(load_balancers
, raw_content
.cache_timestamp
)
1652 class ELBSummaryGeneric(AWSSectionGeneric
):
1653 def __init__(self
, client
, region
, config
, distributor
=None, resource
=""):
1654 self
._resource
= resource
1655 super(ELBSummaryGeneric
, self
).__init
__(client
, region
, config
, distributor
=distributor
)
1656 self
._names
= self
._config
.service_config
['%s_names' % resource
]
1657 self
._tags
= self
._prepare
_tags
_for
_api
_response
(
1658 self
._config
.service_config
['%s_tags' % resource
])
1662 return "%s_summary" % self
._resource
1668 def _get_colleague_contents(self
):
1669 colleague
= self
._received
_results
.get('%s_limits' % self
._resource
)
1670 if colleague
and colleague
.content
:
1671 return AWSColleagueContents(colleague
.content
, colleague
.cache_timestamp
)
1672 return AWSColleagueContents([], 0.0)
1674 def _fetch_raw_content(self
, colleague_contents
):
1675 found_load_balancers
= []
1676 for load_balancer
in self
._describe
_load
_balancers
(colleague_contents
):
1677 load_balancer_name
= load_balancer
['LoadBalancerName']
1679 response
= self
._client
.describe_tags(LoadBalancerNames
=[load_balancer_name
])
1680 except botocore
.exceptions
.ClientError
as e
:
1681 # If there are no tags attached to a bucket we receive a 'ClientError'
1682 logging
.info("%s/%s: No tags set, %s", self
.name
, load_balancer_name
, e
)
1686 tag
for tag_descr
in self
._get
_response
_content
(response
, 'TagDescriptions')
1687 for tag
in tag_descr
['Tags']
1689 if self
._matches
_tag
_conditions
(tagging
):
1690 load_balancer
['TagDescriptions'] = tagging
1691 found_load_balancers
.append(load_balancer
)
1692 return found_load_balancers
1694 def _describe_load_balancers(self
, colleague_contents
):
1695 if self
._tags
is None and self
._names
is not None:
1696 if colleague_contents
.content
:
1698 load_balancer
for load_balancer
in colleague_contents
.content
1699 if load_balancer
['LoadBalancerName'] in self
._names
1701 response
= self
._client
.describe_load_balancers(LoadBalancerNames
=self
._names
)
1703 response
= self
._client
.describe_load_balancers()
1704 return self
._get
_response
_content
(response
, 'LoadBalancerDescriptions')
1706 def _matches_tag_conditions(self
, tagging
):
1707 if self
._names
is not None:
1709 if self
._tags
is None:
1712 if tag
in self
._tags
:
1716 def _compute_content(self
, raw_content
, colleague_contents
):
1717 content_by_piggyback_hosts
= {}
1718 for load_balancer
in raw_content
.content
:
1719 content_by_piggyback_hosts
.setdefault(load_balancer
['DNSName'], load_balancer
)
1720 return AWSComputedContent(content_by_piggyback_hosts
, raw_content
.cache_timestamp
)
1722 def _create_results(self
, computed_content
):
1723 return [AWSSectionResult("", computed_content
.content
.values())]
1726 class ELBLabelsGeneric(AWSSectionLabels
):
1727 def __init__(self
, client
, region
, config
, distributor
=None, resource
=""):
1728 super(ELBLabelsGeneric
, self
).__init
__(client
, region
, config
, distributor
=distributor
)
1729 self
._resource
= resource
1735 def _get_colleague_contents(self
):
1736 colleague
= self
._received
_results
.get('%s_summary' % self
._resource
)
1737 if colleague
and colleague
.content
:
1738 return AWSColleagueContents(colleague
.content
, colleague
.cache_timestamp
)
1739 return AWSColleagueContents({}, 0.0)
1741 def _fetch_raw_content(self
, colleague_contents
):
1742 return colleague_contents
.content
1744 def _compute_content(self
, raw_content
, colleague_contents
):
1745 computed_content
= {
1746 elb_instance_id
: {tag
['Key']: tag
['Value'] for tag
in data
.get('TagDescriptions', [])
1747 } for elb_instance_id
, data
in raw_content
.content
.iteritems()
1749 return AWSComputedContent(computed_content
, raw_content
.cache_timestamp
)
1752 class ELBHealth(AWSSectionGeneric
):
1761 def _get_colleague_contents(self
):
1762 colleague
= self
._received
_results
.get('elb_summary')
1763 if colleague
and colleague
.content
:
1764 return AWSColleagueContents(colleague
.content
, colleague
.cache_timestamp
)
1765 return AWSColleagueContents({}, 0.0)
1767 def _fetch_raw_content(self
, colleague_contents
):
1769 for load_balancer_dns_name
, load_balancer
in colleague_contents
.content
.iteritems():
1770 load_balancer_name
= load_balancer
['LoadBalancerName']
1771 response
= self
._client
.describe_instance_health(LoadBalancerName
=load_balancer_name
)
1772 states
= self
._get
_response
_content
(response
, 'InstanceStates')
1774 load_balancers
.setdefault(load_balancer_dns_name
, states
)
1775 return load_balancers
1777 def _compute_content(self
, raw_content
, colleague_contents
):
1778 return AWSComputedContent(raw_content
.content
, raw_content
.cache_timestamp
)
1780 def _create_results(self
, computed_content
):
1782 AWSSectionResult(piggyback_hostname
, content
)
1783 for piggyback_hostname
, content
in computed_content
.content
.iteritems()
1787 class ELB(AWSSectionCloudwatch
):
1796 def _get_colleague_contents(self
):
1797 colleague
= self
._received
_results
.get('elb_summary')
1798 if colleague
and colleague
.content
:
1799 return AWSColleagueContents(colleague
.content
, colleague
.cache_timestamp
)
1800 return AWSColleagueContents({}, 0.0)
1802 def _get_metrics(self
, colleague_contents
):
1804 for idx
, (load_balancer_dns_name
,
1805 load_balancer
) in enumerate(colleague_contents
.content
.iteritems()):
1806 load_balancer_name
= load_balancer
['LoadBalancerName']
1807 for metric_name
, stat
in [
1808 ("RequestCount", "Sum"),
1809 ("SurgeQueueLength", "Maximum"),
1810 ("SpilloverCount", "Sum"),
1811 ("Latency", "Average"),
1812 ("HTTPCode_ELB_4XX", "Sum"),
1813 ("HTTPCode_ELB_5XX", "Sum"),
1814 ("HTTPCode_Backend_2XX", "Sum"),
1815 ("HTTPCode_Backend_3XX", "Sum"),
1816 ("HTTPCode_Backend_4XX", "Sum"),
1817 ("HTTPCode_Backend_5XX", "Sum"),
1818 ("HealthyHostCount", "Average"),
1819 ("UnHealthyHostCount", "Average"),
1820 ("BackendConnectionErrors", "Sum"),
1823 'Id': self
._create
_id
_for
_metric
_data
_query
(idx
, metric_name
),
1824 'Label': load_balancer_dns_name
,
1827 'Namespace': 'AWS/ELB',
1828 'MetricName': metric_name
,
1830 'Name': "LoadBalancerName",
1831 'Value': load_balancer_name
,
1834 'Period': self
.period
,
1840 def _compute_content(self
, raw_content
, colleague_contents
):
1841 content_by_piggyback_hosts
= {}
1842 for row
in raw_content
.content
:
1843 content_by_piggyback_hosts
.setdefault(row
['Label'], []).append(row
)
1844 return AWSComputedContent(content_by_piggyback_hosts
, raw_content
.cache_timestamp
)
1846 def _create_results(self
, computed_content
):
1848 AWSSectionResult(piggyback_hostname
, rows
)
1849 for piggyback_hostname
, rows
in computed_content
.content
.iteritems()
1854 # .--ELBv2---------------------------------------------------------------.
1855 # | _____ _ ____ ____ |
1856 # | | ____| | | __ )_ _|___ \ |
1857 # | | _| | | | _ \ \ / / __) | |
1858 # | | |___| |___| |_) \ V / / __/ |
1859 # | |_____|_____|____/ \_/ |_____| |
1861 # '----------------------------------------------------------------------'
1864 class ELBv2Limits(AWSSectionLimits
):
1867 return "elbv2_limits"
1873 def _get_colleague_contents(self
):
1874 return AWSColleagueContents(None, 0.0)
1876 def _fetch_raw_content(self
, colleague_contents
):
1878 The AWS/ELBv2 API method 'describe_account_limits' provides limit values
1879 but no values about the usage per limit thus we have to gather the usage
1880 values from 'describe_load_balancers'.
1882 response
= self
._client
.describe_load_balancers()
1883 load_balancers
= self
._get
_response
_content
(response
, 'LoadBalancers')
1885 for load_balancer
in load_balancers
:
1886 lb_arn
= load_balancer
['LoadBalancerArn']
1888 response
= self
._client
.describe_target_groups(LoadBalancerArn
=lb_arn
)
1889 load_balancer
['TargetGroups'] = self
._get
_response
_content
(response
, 'TargetGroups')
1891 response
= self
._client
.describe_listeners(LoadBalancerArn
=lb_arn
)
1892 listeners
= self
._get
_response
_content
(response
, 'Listeners')
1893 load_balancer
['Listeners'] = listeners
1895 if load_balancer
['Type'] == "application":
1897 for listener
in listeners
:
1898 response
= self
._client
.describe_rules(ListenerArn
=listener
['ListenerArn'])
1899 rules
.append(self
._get
_response
_content
(response
, 'Rules'))
1901 # Limit 100 holds for rules which are not default, see AWS docs:
1902 # https://docs.aws.amazon.com/de_de/general/latest/gr/aws_service_limits.html
1903 # > Limits für Elastic Load Balancing
1904 load_balancer
['Rules'] = [rule
for rule
in rules
if not rule
['IsDefault']]
1906 response
= self
._client
.describe_target_groups(LoadBalancerArn
=lb_arn
)
1907 load_balancer
['TargetGroups'] = self
._get
_response
_content
(response
, 'TargetGroups')
1909 response
= self
._client
.describe_account_limits()
1910 limits
= self
._get
_response
_content
(response
, 'Limits')
1911 return load_balancers
, limits
1913 def _compute_content(self
, raw_content
, colleague_contents
):
1914 load_balancers
, limits
= raw_content
.content
1915 limits
= {r
["Name"]: int(r
['Max']) for r
in limits
}
1919 target_groups_count
= 0
1920 for load_balancer
in load_balancers
:
1921 lb_dns_name
= load_balancer
['DNSName']
1922 lb_type
= load_balancer
['Type']
1924 lb_listeners_count
= len(load_balancer
.get('Listeners', []))
1925 lb_target_groups_count
= len(load_balancer
.get('TargetGroups', []))
1926 target_groups_count
+= lb_target_groups_count
1928 if lb_type
== "application":
1931 title
= 'Application'
1934 AWSLimit("application_load_balancer_rules", "Application Load Balancer Rules",
1935 limits
['rules-per-application-load-balancer'],
1936 len(load_balancer
.get('Rules', []))))
1941 "application_load_balancer_certificates",
1942 "Application Load Balancer Certificates", 25,
1944 cert
for cert
in load_balancer
.get('Certificates', [])
1945 if not cert
['IsDefault']
1948 elif lb_type
== "network":
1958 AWSLimit("%s_load_balancer_listeners" % key
, "%s Load Balancer Listeners" % title
,
1959 limits
['listeners-per-%s-load-balancer' % key
], lb_listeners_count
))
1963 AWSLimit("%s_load_balancer_target_groups" % key
,
1964 "%s Load Balancer Target Groups" % title
,
1965 limits
['targets-per-%s-load-balancer' % key
], lb_target_groups_count
))
1969 AWSLimit("application_load_balancers", "Application Load balancers",
1970 limits
['application-load-balancers'], alb_count
))
1974 AWSLimit("network_load_balancers", "Network Load balancers",
1975 limits
['network-load-balancers'], nlb_count
))
1979 AWSLimit("load_balancer_target_groups", "Load balancers Target Groups",
1980 limits
['target-groups'], target_groups_count
))
1981 return AWSComputedContent(load_balancers
, raw_content
.cache_timestamp
)
1984 class ELBv2TargetGroups(AWSSectionGeneric
):
1987 return "elbv2_target_groups"
1993 def _get_colleague_contents(self
):
1994 colleague
= self
._received
_results
.get('elbv2_summary')
1995 if colleague
and colleague
.content
:
1996 return AWSColleagueContents(colleague
.content
, colleague
.cache_timestamp
)
1997 return AWSColleagueContents({}, 0.0)
1999 def _fetch_raw_content(self
, colleague_contents
):
2001 for load_balancer_dns_name
, load_balancer
in colleague_contents
.content
.iteritems():
2002 load_balancer_type
= load_balancer
.get('Type')
2003 if load_balancer_type
not in ['application', 'network']:
2004 # Just to be sure, that we do not describe target groups of other lbs
2007 load_balancer_arn
= load_balancer
['LoadBalancerArn']
2008 response
= self
._client
.describe_target_groups(LoadBalancerArn
=load_balancer_arn
)
2009 target_groups
= self
._get
_response
_content
(response
, 'TargetGroups')
2010 for target_group
in target_groups
:
2011 response
= self
._client
.describe_target_health(
2012 TargetGroupArn
=target_group
['TargetGroupArn'])
2013 target_group_health_descrs
= self
._get
_response
_content
(
2014 response
, 'TargetHealthDescriptions')
2015 target_group
['TargetHealth'] = target_group_health_descrs
.get('TargetHealth', {})
2017 load_balancers
.setdefault(load_balancer_dns_name
, []).append((load_balancer_type
,
2019 return load_balancers
2021 def _compute_content(self
, raw_content
, colleague_contents
):
2022 return AWSComputedContent(raw_content
.content
, raw_content
.cache_timestamp
)
2024 def _create_results(self
, computed_content
):
2026 AWSSectionResult(piggyback_hostname
, content
)
2027 for piggyback_hostname
, content
in computed_content
.content
.iteritems()
2032 # .--Application ELB-----------------------------------------------------.
2034 # | / \ _ __ _ __ | (_) ___ __ _| |_(_) ___ _ __ |
2035 # | / _ \ | '_ \| '_ \| | |/ __/ _` | __| |/ _ \| '_ \ |
2036 # | / ___ \| |_) | |_) | | | (_| (_| | |_| | (_) | | | | |
2037 # | /_/ \_\ .__/| .__/|_|_|\___\__,_|\__|_|\___/|_| |_| |
2040 # | | ____| | | __ ) |
2041 # | | _| | | | _ \ |
2042 # | | |___| |___| |_) | |
2043 # | |_____|_____|____/ |
2045 # '----------------------------------------------------------------------'
2048 class ELBv2Application(AWSSectionCloudwatch
):
2051 return "elbv2_application"
2057 def _get_colleague_contents(self
):
2058 colleague
= self
._received
_results
.get('elb_summary')
2059 if colleague
and colleague
.content
:
2060 return AWSColleagueContents(colleague
.content
, colleague
.cache_timestamp
)
2061 return AWSColleagueContents({}, 0.0)
2063 def _get_metrics(self
, colleague_contents
):
2065 for idx
, (load_balancer_dns_name
,
2066 load_balancer
) in enumerate(colleague_contents
.content
.iteritems()):
2067 # arn:aws:elasticloadbalancing:region:account-id:loadbalancer/app/load-balancer-name/load-balancer-id
2068 # We need: app/LOAD-BALANCER-NAME/LOAD-BALANCER-ID
2069 load_balancer_dim
= "/".join(load_balancer
['LoadBalancerArn'].split("/")[-3:])
2070 for metric_name
, stat
in [
2071 ('ActiveConnectionCount', 'Sum'),
2072 ('ClientTLSNegotiationErrorCount', 'Sum'),
2073 ('ConsumedLCUs', 'Sum'),
2074 ('HTTP_Fixed_Response_Count', 'Sum'),
2075 ('HTTP_Redirect_Count', 'Sum'),
2076 ('HTTP_Redirect_Url_Limit_Exceeded_Count', 'Sum'),
2077 ('HTTPCode_ELB_3XX_Count', 'Sum'),
2078 ('HTTPCode_ELB_4XX_Count', 'Sum'),
2079 ('HTTPCode_ELB_5XX_Count', 'Sum'),
2080 ('HTTPCode_ELB_500_Count', 'Sum'),
2081 ('HTTPCode_ELB_502_Count', 'Sum'),
2082 ('HTTPCode_ELB_503_Count', 'Sum'),
2083 ('HTTPCode_ELB_504_Count', 'Sum'),
2084 ('IPv6ProcessedBytes', 'Sum'),
2085 ('IPv6RequestCount', 'Sum'),
2086 ('NewConnectionCount', 'Sum'),
2087 ('ProcessedBytes', 'Sum'),
2088 ('RejectedConnectionCount', 'Sum'),
2089 ('RequestCount', 'Sum'),
2090 ('RuleEvaluations', 'Sum'),
2093 'Id': self
._create
_id
_for
_metric
_data
_query
(idx
, metric_name
),
2094 'Label': load_balancer_dns_name
,
2097 'Namespace': 'AWS/ApplicationELB',
2098 'MetricName': metric_name
,
2100 'Name': "LoadBalancer",
2101 'Value': load_balancer_dim
,
2104 'Period': self
.period
,
2110 def _compute_content(self
, raw_content
, colleague_contents
):
2111 content_by_piggyback_hosts
= {}
2112 for row
in raw_content
.content
:
2113 content_by_piggyback_hosts
.setdefault(row
['Label'], []).append(row
)
2114 return AWSComputedContent(content_by_piggyback_hosts
, raw_content
.cache_timestamp
)
2116 def _create_results(self
, computed_content
):
2118 AWSSectionResult(piggyback_hostname
, rows
)
2119 for piggyback_hostname
, rows
in computed_content
.content
.iteritems()
2124 # .--Network ELB---------------------------------------------------------.
2125 # | _ _ _ _ _____ _ ____ |
2126 # | | \ | | ___| |___ _____ _ __| | __ | ____| | | __ ) |
2127 # | | \| |/ _ \ __\ \ /\ / / _ \| '__| |/ / | _| | | | _ \ |
2128 # | | |\ | __/ |_ \ V V / (_) | | | < | |___| |___| |_) | |
2129 # | |_| \_|\___|\__| \_/\_/ \___/|_| |_|\_\ |_____|_____|____/ |
2131 # '----------------------------------------------------------------------'
2134 class ELBv2Network(AWSSectionCloudwatch
):
2137 return "elbv2_network"
2143 def _get_colleague_contents(self
):
2144 colleague
= self
._received
_results
.get('elb_summary')
2145 if colleague
and colleague
.content
:
2146 return AWSColleagueContents(colleague
.content
, colleague
.cache_timestamp
)
2147 return AWSColleagueContents({}, 0.0)
2149 def _get_metrics(self
, colleague_contents
):
2151 for idx
, (load_balancer_dns_name
,
2152 load_balancer
) in enumerate(colleague_contents
.content
.iteritems()):
2153 # arn:aws:elasticloadbalancing:region:account-id:loadbalancer/net/load-balancer-name/load-balancer-id
2154 # We need: net/LOAD-BALANCER-NAME/LOAD-BALANCER-ID
2155 load_balancer_dim
= "/".join(load_balancer
['LoadBalancerArn'].split("/")[-3:])
2156 for metric_name
, stat
in [
2157 ('ActiveFlowCount', 'Average'),
2158 ('ActiveFlowCount_TLS', 'Average'),
2159 ('ClientTLSNegotiationErrorCount', 'Sum'),
2160 ('ConsumedLCUs', 'Sum'),
2161 ('HealthyHostCount', 'Maximum'),
2162 ('NewFlowCount', 'Sum'),
2163 ('NewFlowCount_TLS', 'Sum'),
2164 ('ProcessedBytes', 'Sum'),
2165 ('ProcessedBytes_TLS', 'Sum'),
2166 ('TargetTLSNegotiationErrorCount', 'Sum'),
2167 ('TCP_Client_Reset_Count', 'Sum'),
2168 ('TCP_ELB_Reset_Count', 'Sum'),
2169 ('TCP_Target_Reset_Count', 'Sum'),
2170 ('UnHealthyHostCount', 'Maximum'),
2173 'Id': self
._create
_id
_for
_metric
_data
_query
(idx
, metric_name
),
2174 'Label': load_balancer_dns_name
,
2177 'Namespace': 'AWS/NetworkELB',
2178 'MetricName': metric_name
,
2180 'Name': "LoadBalancer",
2181 'Value': load_balancer_dim
,
2184 'Period': self
.period
,
2190 def _compute_content(self
, raw_content
, colleague_contents
):
2191 content_by_piggyback_hosts
= {}
2192 for row
in raw_content
.content
:
2193 content_by_piggyback_hosts
.setdefault(row
['Label'], []).append(row
)
2194 return AWSComputedContent(content_by_piggyback_hosts
, raw_content
.cache_timestamp
)
2196 def _create_results(self
, computed_content
):
2198 AWSSectionResult(piggyback_hostname
, rows
)
2199 for piggyback_hostname
, rows
in computed_content
.content
.iteritems()
2204 # .--EBS-----------------------------------------------------------------.
2205 # | _____ ____ ____ |
2206 # | | ____| __ ) ___| |
2207 # | | _| | _ \___ \ |
2208 # | | |___| |_) |__) | |
2209 # | |_____|____/____/ |
2211 # '----------------------------------------------------------------------'
2213 # EBS are attached to EC2 instances. Thus we put the content to related EC2
2214 # instance as piggyback host.
2217 class EBSLimits(AWSSectionLimits
):
2226 def _get_colleague_contents(self
):
2227 return AWSColleagueContents(None, 0.0)
2229 def _fetch_raw_content(self
, colleague_contents
):
2230 response
= self
._client
.describe_volumes()
2231 volumes
= self
._get
_response
_content
(response
, 'Volumes')
2233 response
= self
._client
.describe_snapshots()
2234 snapshots
= self
._get
_response
_content
(response
, 'Snapshots')
2235 return volumes
, snapshots
2237 def _compute_content(self
, raw_content
, colleague_contents
):
2238 volumes
, snapshots
= raw_content
.content
2240 vol_storage_standard
= 0
2246 for volume
in volumes
:
2247 vol_type
= volume
['VolumeType']
2248 vol_size
= volume
['Size']
2249 if vol_type
== 'standard':
2250 vol_storage_standard
+= vol_size
2251 elif vol_type
== 'io1':
2252 vol_storage_io1
+= vol_size
2253 vol_iops_io1
+= volume
['Iops']
2254 elif vol_type
== 'gp2':
2255 vol_storage_gp2
+= vol_size
2256 elif vol_type
== 'sc1':
2257 vol_storage_sc1
+= vol_size
2258 elif vol_type
== 'st1':
2259 vol_storage_st1
+= vol_size
2261 logging
.info("%s: Unhandled volume type: '%s'", self
.name
, vol_type
)
2263 # These are total limits and not instance specific
2264 # Space values are in TiB.
2266 "", AWSLimit('block_store_snapshots', 'Block store snapshots', 100000, len(snapshots
)))
2269 AWSLimit('block_store_space_standard', 'Magnetic volumes space', 300,
2270 vol_storage_standard
))
2272 "", AWSLimit('block_store_space_io1', 'Provisioned IOPS SSD space', 300,
2276 AWSLimit('block_store_iops_io1', 'Provisioned IOPS SSD IO operations per second',
2277 300000, vol_storage_io1
))
2279 "", AWSLimit('block_store_space_gp2', 'General Purpose SSD space', 300,
2281 self
._add
_limit
("", AWSLimit('block_store_space_sc1', 'Cold HDD space', 300,
2285 AWSLimit('block_store_space_st1', 'Throughput Optimized HDD space', 300,
2287 return AWSComputedContent(volumes
, raw_content
.cache_timestamp
)
2290 class EBSSummary(AWSSectionGeneric
):
2291 def __init__(self
, client
, region
, config
, distributor
=None):
2292 super(EBSSummary
, self
).__init
__(client
, region
, config
, distributor
=distributor
)
2293 self
._names
= self
._config
.service_config
['ebs_names']
2294 self
._tags
= self
._config
.service_config
['ebs_tags']
2298 return "ebs_summary"
2304 def _get_colleague_contents(self
):
2305 colleague
= self
._received
_results
.get('ebs_limits')
2307 max_cache_timestamp
= 0.0
2308 if colleague
and colleague
.content
:
2309 max_cache_timestamp
= max(max_cache_timestamp
, colleague
.cache_timestamp
)
2310 volumes
= colleague
.content
2312 colleague
= self
._received
_results
.get('ec2_summary')
2314 if colleague
and colleague
.content
:
2315 max_cache_timestamp
= max(max_cache_timestamp
, colleague
.cache_timestamp
)
2316 instances
= colleague
.content
2318 return AWSColleagueContents((volumes
, instances
), max_cache_timestamp
)
2320 def _fetch_raw_content(self
, colleague_contents
):
2321 col_volumes
, _col_instances
= colleague_contents
.content
2322 if self
._tags
is None and self
._names
is not None:
2323 return self
._fetch
_volumes
_filtered
_by
_names
(col_volumes
)
2324 if self
._tags
is not None:
2325 return self
._fetch
_volumes
_filtered
_by
_tags
(col_volumes
)
2326 return self
._fetch
_volumes
_without
_filter
()
2328 def _fetch_volumes_filtered_by_names(self
, col_volumes
):
2331 vol
['VolumeId']: vol
for vol
in col_volumes
if vol
['VolumeId'] in self
._names
2334 volumes
= self
._format
_volumes
(self
._client
.describe_volumes(VolumeIds
=self
._names
))
2336 self
._format
_volume
_states
(
2337 self
._client
.describe_volume_status(VolumeIds
=self
._names
)))
2339 def _fetch_volumes_filtered_by_tags(self
, col_volumes
):
2341 tags
= self
._prepare
_tags
_for
_api
_response
(self
._tags
)
2343 vol
['VolumeId']: vol
for vol
in col_volumes
for tag
in vol
['Tags'] if tag
in tags
2346 volumes
= self
._format
_volumes
(self
._client
.describe_volumes(Filters
=self
._tags
))
2348 self
._format
_volume
_states
(self
._client
.describe_volume_status(Filters
=self
._tags
)))
2350 def _fetch_volumes_without_filter(self
):
2351 return (self
._format
_volumes
(self
._client
.describe_volumes()),
2352 self
._format
_volume
_states
(self
._client
.describe_volume_status()))
2354 def _format_volumes(self
, response
):
2355 return {r
['VolumeId']: r
for r
in self
._get
_response
_content
(response
, 'Volumes')}
2357 def _format_volume_states(self
, response
):
2358 return {r
['VolumeId']: r
for r
in self
._get
_response
_content
(response
, 'VolumeStatuses')}
2360 def _compute_content(self
, raw_content
, colleague_contents
):
2361 _col_volumes
, col_instances
= colleague_contents
.content
2362 instance_name_mapping
= {v
['InstanceId']: k
for k
, v
in col_instances
.iteritems()}
2364 volumes
, volume_states
= raw_content
.content
2366 for volume_id
in set(volumes
.keys()).union(set(volume_states
.keys())):
2367 volume
= volumes
.get(volume_id
, {})
2368 volume
.update(volume_states
.get(volume_id
, {}))
2369 content
.append(volume
)
2371 content_by_piggyback_hosts
= {}
2373 for attachment
in row
['Attachments']:
2374 attachment_id
= attachment
['InstanceId']
2375 instance_name
= instance_name_mapping
.get(attachment_id
)
2376 if instance_name
is None:
2378 content_by_piggyback_hosts
.setdefault(instance_name
, []).append(row
)
2379 return AWSComputedContent(content_by_piggyback_hosts
, raw_content
.cache_timestamp
)
2381 def _create_results(self
, computed_content
):
2383 AWSSectionResult(piggyback_hostname
, rows
)
2384 for piggyback_hostname
, rows
in computed_content
.content
.iteritems()
2388 class EBS(AWSSectionCloudwatch
):
2397 def _get_colleague_contents(self
):
2398 colleague
= self
._received
_results
.get('ebs_summary')
2399 if colleague
and colleague
.content
:
2400 return AWSColleagueContents([(instance_name
, row
['VolumeId'], row
['VolumeType'])
2401 for instance_name
, rows
in colleague
.content
.iteritems()
2402 for row
in rows
], colleague
.cache_timestamp
)
2403 return AWSColleagueContents([], 0.0)
2405 def _get_metrics(self
, colleague_contents
):
2407 for idx
, (instance_name
, volume_name
, volume_type
) in enumerate(colleague_contents
.content
):
2408 for metric_name
, unit
, volume_types
in [
2409 ("VolumeReadOps", "Count", []),
2410 ("VolumeWriteOps", "Count", []),
2411 ("VolumeReadBytes", "Bytes", []),
2412 ("VolumeWriteBytes", "Bytes", []),
2413 ("VolumeQueueLength", "Count", []),
2414 ("BurstBalance", "Percent", ["gp2", "st1", "sc1"]),
2415 #("VolumeThroughputPercentage", "Percent", ["io1"]),
2416 #("VolumeConsumedReadWriteOps", "Count", ["io1"]),
2417 #("VolumeTotalReadTime", "Seconds", []),
2418 #("VolumeTotalWriteTime", "Seconds", []),
2419 #("VolumeIdleTime", "Seconds", []),
2420 #("VolumeStatus", None, []),
2421 #("IOPerformance", None, ["io1"]),
2423 if volume_types
and volume_type
not in volume_types
:
2426 'Id': self
._create
_id
_for
_metric
_data
_query
(idx
, metric_name
),
2427 'Label': instance_name
,
2430 'Namespace': 'AWS/EBS',
2431 'MetricName': metric_name
,
2434 'Value': volume_name
,
2437 'Period': self
.period
,
2442 metric
['MetricStat']['Unit'] = unit
2443 metrics
.append(metric
)
2446 def _compute_content(self
, raw_content
, colleague_contents
):
2447 content_by_piggyback_hosts
= {}
2448 for row
in raw_content
.content
:
2449 content_by_piggyback_hosts
.setdefault(row
['Label'], []).append(row
)
2450 return AWSComputedContent(content_by_piggyback_hosts
, raw_content
.cache_timestamp
)
2452 def _create_results(self
, computed_content
):
2454 AWSSectionResult(piggyback_hostname
, rows
)
2455 for piggyback_hostname
, rows
in computed_content
.content
.iteritems()
2460 # .--RDS-----------------------------------------------------------------.
2461 # | ____ ____ ____ |
2462 # | | _ \| _ \/ ___| |
2463 # | | |_) | | | \___ \ |
2464 # | | _ <| |_| |___) | |
2465 # | |_| \_\____/|____/ |
2467 # '----------------------------------------------------------------------'
2469 AWSRDSLimitNameMap
= {
2470 "DBClusters": ("db_clusters", "DB clusters"),
2471 "DBClusterParameterGroups": ("db_cluster_parameter_groups", "DB cluster parameter groups"),
2472 "DBInstances": ("db_instances", "DB instances"),
2473 "EventSubscriptions": ("event_subscriptions", "Event subscriptions"),
2474 "ManualSnapshots": ("manual_snapshots", "Manual snapshots"),
2475 "OptionGroups": ("option_groups", "Option groups"),
2476 "DBParameterGroups": ("db_parameter_groups", "DB parameter groups"),
2477 "ReadReplicasPerMaster": ("read_replica_per_master", "Read replica per master"),
2478 "ReservedDBInstances": ("reserved_db_instances", "Reserved DB instances"),
2479 "DBSecurityGroups": ("db_security_groups", "DB security groups"),
2480 "DBSubnetGroups": ("db_subnet_groups", "DB subnet groups"),
2481 "SubnetsPerDBSubnetGroup": ("subnet_per_db_subnet_groups", "Subnet per DB subnet groups"),
2482 "AllocatedStorage": ("allocated_storage", "Allocated storage"),
2483 "AuthorizationsPerDBSecurityGroup": ("auths_per_db_security_groups",
2484 "Authorizations per DB security group"),
2485 "DBClusterRoles": ("db_cluster_roles", "DB cluster roles"),
2489 class RDSLimits(AWSSectionLimits
):
2498 def _get_colleague_contents(self
):
2499 return AWSColleagueContents(None, 0.0)
2501 def _fetch_raw_content(self
, colleague_contents
):
2503 AWS/RDS API method 'describe_account_attributes' already sends
2504 limit and usage values.
2506 response
= self
._client
.describe_account_attributes()
2507 return self
._get
_response
_content
(response
, 'AccountQuotas')
2509 def _compute_content(self
, raw_content
, colleague_contents
):
2510 for limit
in raw_content
.content
:
2511 quota_name
= limit
['AccountQuotaName']
2512 key
, title
= AWSRDSLimitNameMap
.get(quota_name
, (None, None))
2513 if key
is None or title
is None:
2514 logging
.info("%s: Unhandled account quota name: '%s'", self
.name
, quota_name
)
2516 self
._add
_limit
("", AWSLimit(key
, title
, int(limit
['Max']), int(limit
['Used'])))
2517 return AWSComputedContent(None, 0.0)
2520 class RDSSummary(AWSSectionGeneric
):
2521 def __init__(self
, client
, region
, config
, distributor
=None):
2522 super(RDSSummary
, self
).__init
__(client
, region
, config
, distributor
=distributor
)
2523 self
._names
= self
._config
.service_config
['rds_names']
2524 self
._tags
= self
._config
.service_config
['rds_tags']
2528 return "rds_summary"
2534 def _get_colleague_contents(self
):
2535 return AWSColleagueContents(None, 0.0)
2537 def _fetch_raw_content(self
, colleague_contents
):
2538 response
= self
._describe
_db
_instances
()
2539 return self
._get
_response
_content
(response
, 'DBInstances')
2541 def _describe_db_instances(self
):
2542 if self
._names
is not None:
2544 self
._client
.describe_db_instances(DBInstanceIdentifier
=name
)
2545 for name
in self
._names
2547 elif self
._tags
is not None:
2548 return [self
._client
.describe_db_instances(Filters
=self
._tags
) for name
in self
._names
]
2549 return self
._client
.describe_db_instances()
2551 def _compute_content(self
, raw_content
, colleague_contents
):
2552 return AWSComputedContent(
2553 {instance
['DBInstanceIdentifier']: instance
for instance
in raw_content
.content
},
2554 raw_content
.cache_timestamp
)
2556 def _create_results(self
, computed_content
):
2557 return [AWSSectionResult("", computed_content
.content
.values())]
2560 class RDS(AWSSectionCloudwatch
):
2569 def _get_colleague_contents(self
):
2570 colleague
= self
._received
_results
.get('rds_summary')
2571 if colleague
and colleague
.content
:
2572 return AWSColleagueContents(colleague
.content
, colleague
.cache_timestamp
)
2573 return AWSColleagueContents({}, 0.0)
2575 def _get_metrics(self
, colleague_contents
):
2577 for idx
, instance_id
in enumerate(colleague_contents
.content
.iterkeys()):
2578 for metric_name
, unit
in [
2579 ("BinLogDiskUsage", "Bytes"),
2580 ("BurstBalance", "Percent"),
2581 ("CPUUtilization", "Percent"),
2582 ("CPUCreditUsage", "Count"),
2583 ("CPUCreditBalance", "Count"),
2584 ("DatabaseConnections", "Count"),
2585 ("DiskQueueDepth", "Count"),
2586 ("FailedSQLServerAgentJobsCount", "Count/Second"),
2587 ("NetworkReceiveThroughput", "Bytes/Second"),
2588 ("NetworkTransmitThroughput", "Bytes/Second"),
2589 ("OldestReplicationSlotLag", "Megabytes"),
2590 ("ReadIOPS", "Count/Second"),
2591 ("ReadLatency", "Seconds"),
2592 ("ReadThroughput", "Bytes/Second"),
2593 ("ReplicaLag", "Seconds"),
2594 ("ReplicationSlotDiskUsage", "Megabytes"),
2595 ("TransactionLogsDiskUsage", "Megabytes"),
2596 ("TransactionLogsGeneration", "Megabytes/Second"),
2597 ("WriteIOPS", "Count/Second"),
2598 ("WriteLatency", "Seconds"),
2599 ("WriteThroughput", "Bytes/Second"),
2600 #("FreeableMemory", "Bytes"),
2601 #("SwapUsage", "Bytes"),
2602 #("FreeStorageSpace", "Bytes"),
2603 #("MaximumUsedTransactionIDs", "Count"),
2606 'Id': self
._create
_id
_for
_metric
_data
_query
(idx
, metric_name
),
2607 'Label': instance_id
,
2610 'Namespace': 'AWS/RDS',
2611 'MetricName': metric_name
,
2613 'Name': "DBInstanceIdentifier",
2614 'Value': instance_id
,
2617 'Period': self
.period
,
2622 metric
['MetricStat']['Unit'] = unit
2623 metrics
.append(metric
)
2626 def _compute_content(self
, raw_content
, colleague_contents
):
2627 for row
in raw_content
.content
:
2628 row
.update(colleague_contents
.content
.get(row
['Label'], {}))
2629 return AWSComputedContent(raw_content
.content
, raw_content
.cache_timestamp
)
2631 def _create_results(self
, computed_content
):
2632 return [AWSSectionResult("", computed_content
.content
)]
2636 # .--Cloudwatch----------------------------------------------------------.
2638 # | / ___| | ___ _ _ __| |_ ____ _| |_ ___| |__ |
2639 # | | | | |/ _ \| | | |/ _` \ \ /\ / / _` | __/ __| '_ \ |
2640 # | | |___| | (_) | |_| | (_| |\ V V / (_| | || (__| | | | |
2641 # | \____|_|\___/ \__,_|\__,_| \_/\_/ \__,_|\__\___|_| |_| |
2643 # '----------------------------------------------------------------------'
2646 class CloudwatchAlarmsLimits(AWSSectionLimits
):
2649 return "cloudwatch_alarms_limits"
2655 def _get_colleague_contents(self
):
2656 return AWSColleagueContents(None, 0.0)
2658 def _fetch_raw_content(self
, colleague_contents
):
2659 response
= self
._client
.describe_alarms()
2660 return self
._get
_response
_content
(response
, 'MetricAlarms')
2662 def _compute_content(self
, raw_content
, colleague_contents
):
2664 "", AWSLimit('cloudwatch_alarms', 'Cloudwatch Alarms', 5000, len(raw_content
.content
)))
2665 return AWSComputedContent(raw_content
.content
, raw_content
.cache_timestamp
)
2668 class CloudwatchAlarms(AWSSectionGeneric
):
2669 def __init__(self
, client
, region
, config
, distributor
=None):
2670 super(CloudwatchAlarms
, self
).__init
__(client
, region
, config
, distributor
=distributor
)
2671 self
._names
= self
._config
.service_config
['cloudwatch_alarms']
2675 return "cloudwatch_alarms"
2681 def _get_colleague_contents(self
):
2682 colleague
= self
._received
_results
.get('cloudwatch_alarms_limits')
2683 if colleague
and colleague
.content
:
2684 return AWSColleagueContents(colleague
.content
, colleague
.cache_timestamp
)
2685 return AWSColleagueContents([], 0.0)
2687 def _fetch_raw_content(self
, colleague_contents
):
2689 if colleague_contents
.content
:
2691 alarm
for alarm
in colleague_contents
.content
2692 if alarm
['AlarmName'] in self
._names
2694 response
= self
._client
.describe_alarms(AlarmNames
=self
._names
)
2696 response
= self
._client
.describe_alarms()
2697 return self
._get
_response
_content
(response
, 'MetricAlarms')
2699 def _compute_content(self
, raw_content
, colleague_contents
):
2700 if raw_content
.content
:
2701 return AWSComputedContent(raw_content
.content
, raw_content
.cache_timestamp
)
2702 dflt_alarms
= [{'AlarmName': 'Check_MK/Cloudwatch Alarms', 'StateValue': 'NO_ALARMS'}]
2703 return AWSComputedContent(dflt_alarms
, raw_content
.cache_timestamp
)
2705 def _create_results(self
, computed_content
):
2706 return [AWSSectionResult("", computed_content
.content
)]
2710 # .--sections------------------------------------------------------------.
2712 # | ___ ___ ___| |_(_) ___ _ __ ___ |
2713 # | / __|/ _ \/ __| __| |/ _ \| '_ \/ __| |
2714 # | \__ \ __/ (__| |_| | (_) | | | \__ \ |
2715 # | |___/\___|\___|\__|_|\___/|_| |_|___/ |
2717 # '----------------------------------------------------------------------'
2720 class AWSSections(object):
2721 __metaclass__
= abc
.ABCMeta
2723 def __init__(self
, hostname
, session
, debug
=False):
2724 self
._hostname
= hostname
2725 self
._session
= session
2730 def init_sections(self
, services
, region
, config
):
2733 def _init_client(self
, client_key
):
2735 return self
._session
.client(client_key
)
2736 except (ValueError, botocore
.exceptions
.ClientError
,
2737 botocore
.exceptions
.UnknownServiceError
) as e
:
2738 # If region name is not valid we get a ValueError
2739 # but not in all cases, eg.:
2740 # 1. 'eu-central-' raises a ValueError
2741 # 2. 'foobar' does not raise a ValueError
2742 # In the second case we get an exception raised by botocore
2743 # during we execute an operation, eg. cloudwatch.get_metrics(**kwargs):
2744 # - botocore.exceptions.EndpointConnectionError
2745 logging
.info("Invalid region name or client key %s: %s", client_key
, e
)
2748 def run(self
, use_cache
=True):
2751 for section
in self
._sections
:
2753 section_result
= section
.run(use_cache
=use_cache
)
2754 except AssertionError as e
:
2758 except Exception as e
:
2762 exceptions
.append(e
)
2764 results
.setdefault((section
.name
, section_result
.cache_timestamp
, section
.interval
),
2765 section_result
.results
)
2767 self
._write
_exceptions
(exceptions
)
2768 self
._write
_section
_results
(results
)
2770 def _write_exceptions(self
, exceptions
):
2771 sys
.stdout
.write("<<<aws_exceptions>>>\n")
2773 out
= "\n".join([e
.message
for e
in exceptions
])
2775 out
= "No exceptions"
2776 sys
.stdout
.write("%s: %s\n" % (self
.__class
__.__name
__, out
))
2778 def _write_section_results(self
, results
):
2780 logging
.info("%s: No results or cached data", self
.__class
__.__name
__)
2783 for (section_name
, cache_timestamp
, section_interval
), result
in results
.iteritems():
2785 logging
.info("%s: No results", section_name
)
2788 if not isinstance(result
, list):
2790 "%s: Section result must be of type 'list' containing 'AWSSectionResults'",
2795 if section_interval
> 60:
2796 cached_suffix
= ":cached(%s,%s)" % (int(cache_timestamp
), section_interval
+ 60)
2798 if any([r
.content
for r
in result
]):
2799 self
._write
_section
_result
(section_name
, cached_suffix
, result
)
2801 def _write_section_result(self
, section_name
, cached_suffix
, result
):
2802 if section_name
== "labels":
2803 section_header
= "<<<%s:sep(0)%s>>>\n" % (section_name
, cached_suffix
)
2805 section_header
= "<<<aws_%s%s>>>\n" % (section_name
, cached_suffix
)
2808 write_piggyback_header
= row
.piggyback_hostname\
2809 and row
.piggyback_hostname
!= self
._hostname
2810 if write_piggyback_header
:
2811 sys
.stdout
.write("<<<<%s>>>>\n" % row
.piggyback_hostname
)
2812 sys
.stdout
.write(section_header
)
2813 sys
.stdout
.write("%s\n" % json
.dumps(row
.content
, default
=_datetime_converter
))
2814 if write_piggyback_header
:
2815 sys
.stdout
.write("<<<<>>>>\n")
2818 class AWSSectionsUSEast(AWSSections
):
2820 Some clients like CostExplorer only work with US East region:
2821 https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/ce-api.html
2824 def init_sections(self
, services
, region
, config
):
2825 #---clients---------------------------------------------------------
2826 ce_client
= self
._init
_client
('ce')
2828 #---distributors----------------------------------------------------
2830 #---sections with distributors--------------------------------------
2832 #---sections--------------------------------------------------------
2833 ce
= CostsAndUsage(ce_client
, region
, config
)
2835 #---register sections to distributors-------------------------------
2837 #---register sections for execution---------------------------------
2838 if 'ce' in services
:
2839 self
._sections
.append(ce
)
2842 class AWSSectionsGeneric(AWSSections
):
2843 def init_sections(self
, services
, region
, config
):
2844 #---clients---------------------------------------------------------
2845 ec2_client
= self
._init
_client
('ec2')
2846 elb_client
= self
._init
_client
('elb')
2847 elbv2_client
= self
._init
_client
('elbv2')
2848 s3_client
= self
._init
_client
('s3')
2849 rds_client
= self
._init
_client
('rds')
2850 cloudwatch_client
= self
._init
_client
('cloudwatch')
2852 #---distributors----------------------------------------------------
2853 ec2_limits_distributor
= ResultDistributor()
2854 ec2_summary_distributor
= ResultDistributor()
2856 elb_limits_distributor
= ResultDistributor()
2857 elb_summary_distributor
= ResultDistributor()
2859 elbv2_limits_distributor
= ResultDistributor()
2860 elbv2_summary_distributor
= ResultDistributor()
2862 ebs_limits_distributor
= ResultDistributor()
2863 ebs_summary_distributor
= ResultDistributor()
2865 s3_limits_distributor
= ResultDistributor()
2866 s3_summary_distributor
= ResultDistributor()
2868 rds_summary_distributor
= ResultDistributor()
2870 cloudwatch_alarms_limits_distributor
= ResultDistributor()
2872 #---sections with distributors--------------------------------------
2873 ec2_limits
= EC2Limits(ec2_client
, region
, config
, ec2_limits_distributor
)
2874 ec2_summary
= EC2Summary(ec2_client
, region
, config
, ec2_summary_distributor
)
2876 ebs_limits
= EBSLimits(ec2_client
, region
, config
, ebs_limits_distributor
)
2877 ebs_summary
= EBSSummary(ec2_client
, region
, config
, ebs_summary_distributor
)
2879 elb_limits
= ELBLimits(elb_client
, region
, config
, elb_limits_distributor
)
2880 elb_summary
= ELBSummaryGeneric(
2881 elb_client
, region
, config
, elb_summary_distributor
, resource
='elb')
2883 elbv2_limits
= ELBv2Limits(elbv2_client
, region
, config
, elbv2_limits_distributor
)
2884 elbv2_summary
= ELBSummaryGeneric(
2885 elbv2_client
, region
, config
, elbv2_summary_distributor
, resource
='elbv2')
2887 s3_limits
= S3Limits(s3_client
, region
, config
, s3_limits_distributor
)
2888 s3_summary
= S3Summary(s3_client
, region
, config
, s3_summary_distributor
)
2890 rds_summary
= RDSSummary(rds_client
, region
, config
, rds_summary_distributor
)
2892 cloudwatch_alarms_limits
= CloudwatchAlarmsLimits(cloudwatch_client
, region
, config
,
2893 cloudwatch_alarms_limits_distributor
)
2895 #---sections--------------------------------------------------------
2896 ec2_labels
= EC2Labels(ec2_client
, region
, config
)
2897 ec2_security_groups
= EC2SecurityGroups(ec2_client
, region
, config
)
2898 ec2
= EC2(cloudwatch_client
, region
, config
)
2900 ebs
= EBS(cloudwatch_client
, region
, config
)
2902 elb_labels
= ELBLabelsGeneric(elb_client
, region
, config
, resource
='elb')
2903 elb_health
= ELBHealth(elb_client
, region
, config
)
2904 elb
= ELB(cloudwatch_client
, region
, config
)
2906 elbv2_labels
= ELBLabelsGeneric(elb_client
, region
, config
, resource
='elbv2')
2907 elbv2_target_groups
= ELBv2TargetGroups(elb_client
, region
, config
)
2908 elbv2_application
= ELBv2Application(cloudwatch_client
, region
, config
)
2909 elbv2_network
= ELBv2Network(cloudwatch_client
, region
, config
)
2911 s3
= S3(cloudwatch_client
, region
, config
)
2912 s3_requests
= S3Requests(cloudwatch_client
, region
, config
)
2914 rds_limits
= RDSLimits(rds_client
, region
, config
)
2915 rds
= RDS(cloudwatch_client
, region
, config
)
2917 cloudwatch_alarms
= CloudwatchAlarms(cloudwatch_client
, region
, config
)
2919 #---register sections to distributors-------------------------------
2920 ec2_limits_distributor
.add(ec2_summary
)
2921 ec2_summary_distributor
.add(ec2_labels
)
2922 ec2_summary_distributor
.add(ec2_security_groups
)
2923 ec2_summary_distributor
.add(ec2
)
2924 ec2_summary_distributor
.add(ebs_summary
)
2925 ec2_summary_distributor
.add(ebs
)
2927 ebs_limits_distributor
.add(ebs_summary
)
2928 ebs_summary_distributor
.add(ebs
)
2930 elb_limits_distributor
.add(elb_summary
)
2931 elb_summary_distributor
.add(elb_labels
)
2932 elb_summary_distributor
.add(elb_health
)
2933 elb_summary_distributor
.add(elb
)
2935 elbv2_limits_distributor
.add(elbv2_summary
)
2936 elbv2_summary_distributor
.add(elbv2_labels
)
2937 elbv2_summary_distributor
.add(elbv2_target_groups
)
2938 elbv2_summary_distributor
.add(elbv2_application
)
2939 elbv2_summary_distributor
.add(elbv2_network
)
2941 s3_limits_distributor
.add(s3_summary
)
2942 s3_summary_distributor
.add(s3
)
2943 s3_summary_distributor
.add(s3_requests
)
2945 rds_summary_distributor
.add(rds
)
2947 cloudwatch_alarms_limits_distributor
.add(cloudwatch_alarms
)
2949 #---register sections for execution---------------------------------
2950 if 'ec2' in services
:
2951 if config
.service_config
.get('ec2_limits'):
2952 self
._sections
.append(ec2_limits
)
2953 self
._sections
.append(ec2_summary
)
2954 self
._sections
.append(ec2_labels
)
2955 self
._sections
.append(ec2_security_groups
)
2956 self
._sections
.append(ec2
)
2958 if 'ebs' in services
:
2959 if config
.service_config
.get('ebs_limits'):
2960 self
._sections
.append(ebs_limits
)
2961 self
._sections
.append(ebs_summary
)
2962 self
._sections
.append(ebs
)
2964 if 'elb' in services
:
2965 if config
.service_config
.get('elb_limits'):
2966 self
._sections
.append(elb_limits
)
2967 self
._sections
.append(elb_summary
)
2968 self
._sections
.append(elb_labels
)
2969 self
._sections
.append(elb_health
)
2970 self
._sections
.append(elb
)
2972 if 'elbv2' in services
:
2973 if config
.service_config
.get('elbv2_limits'):
2974 self
._sections
.append(elbv2_limits
)
2975 self
._sections
.append(elbv2_summary
)
2976 self
._sections
.append(elbv2_labels
)
2977 self
._sections
.append(elbv2_target_groups
)
2978 #TODO enable when checks are ready
2979 #self._sections.append(elbv2_application)
2980 #self._sections.append(elbv2_network)
2982 if 's3' in services
:
2983 if config
.service_config
.get('s3_limits'):
2984 self
._sections
.append(s3_limits
)
2985 self
._sections
.append(s3_summary
)
2986 self
._sections
.append(s3
)
2987 if config
.service_config
['s3_requests']:
2988 self
._sections
.append(s3_requests
)
2990 if 'rds' in services
:
2991 if config
.service_config
.get('rds_limits'):
2992 self
._sections
.append(rds_limits
)
2993 self
._sections
.append(rds_summary
)
2994 self
._sections
.append(rds
)
2996 if 'cloudwatch' in services
:
2997 if config
.service_config
.get('cloudwatch_alarms_limits'):
2998 self
._sections
.append(cloudwatch_alarms_limits
)
2999 if 'cloudwatch_alarms' in config
.service_config
:
3000 self
._sections
.append(cloudwatch_alarms
)
3004 # .--main----------------------------------------------------------------.
3006 # | _ __ ___ __ _(_)_ __ |
3007 # | | '_ ` _ \ / _` | | '_ \ |
3008 # | | | | | | | (_| | | | | | |
3009 # | |_| |_| |_|\__,_|_|_| |_| |
3011 # '----------------------------------------------------------------------'
3013 AWSServiceAttributes
= NamedTuple("AWSServiceAttributes", [
3016 ("global_service", bool),
3017 ("filter_by_names", bool),
3018 ("filter_by_tags", bool),
3023 AWSServiceAttributes(
3025 title
="Costs and usage",
3026 global_service
=True,
3027 filter_by_names
=False,
3028 filter_by_tags
=False,
3030 AWSServiceAttributes(
3032 title
="Elastic Compute Cloud (EC2)",
3033 global_service
=False,
3034 filter_by_names
=True,
3035 filter_by_tags
=True,
3037 AWSServiceAttributes(
3039 title
="Elastic Block Storage (EBS)",
3040 global_service
=False,
3041 filter_by_names
=True,
3042 filter_by_tags
=True,
3044 AWSServiceAttributes(
3046 title
="Simple Storage Service (S3)",
3047 global_service
=False,
3048 filter_by_names
=True,
3049 filter_by_tags
=True,
3051 AWSServiceAttributes(
3053 title
="Classic Load Balancing (ELB)",
3054 global_service
=False,
3055 filter_by_names
=True,
3056 filter_by_tags
=True,
3058 AWSServiceAttributes(
3060 title
="Application and Network Load Balancing (ELBv2)",
3061 global_service
=False,
3062 filter_by_names
=True,
3063 filter_by_tags
=True,
3065 AWSServiceAttributes(
3067 title
="Relational Database Service (RDS)",
3068 global_service
=False,
3069 filter_by_names
=True,
3070 filter_by_tags
=True,
3072 AWSServiceAttributes(
3075 global_service
=False,
3076 filter_by_names
=False,
3077 filter_by_tags
=False,
3082 def parse_arguments(argv
):
3083 parser
= argparse
.ArgumentParser(
3084 description
=__doc__
, formatter_class
=argparse
.RawTextHelpFormatter
)
3085 parser
.add_argument("--debug", action
="store_true", help="Raise Python exceptions.")
3086 parser
.add_argument(
3088 action
="store_true",
3089 help="Log messages from AWS library 'boto3' and 'botocore'.")
3090 parser
.add_argument(
3092 action
="store_true",
3093 help="Execute all sections, do not rely on cached data. Cached data will not be overwritten."
3096 parser
.add_argument(
3097 "--access-key-id", required
=True, help="The access key ID for your AWS account.")
3098 parser
.add_argument(
3099 "--secret-access-key", required
=True, help="The secret access key for your AWS account.")
3100 parser
.add_argument(
3103 help="Regions to use:\n%s" % "\n".join(["%-15s %s" % e
for e
in AWSRegions
]))
3105 parser
.add_argument(
3106 "--global-services",
3108 help="Global services to monitor:\n%s" % "\n".join(
3109 ["%-15s %s" % (e
.key
, e
.title
) for e
in AWSServices
if e
.global_service
]))
3111 parser
.add_argument(
3114 help="Services per region to monitor:\n%s" % "\n".join(
3115 ["%-15s %s" % (e
.key
, e
.title
) for e
in AWSServices
if not e
.global_service
]))
3117 for service
in AWSServices
:
3118 if service
.filter_by_names
:
3119 parser
.add_argument(
3120 '--%s-names' % service
.key
, nargs
='+', help="Names for %s" % service
.title
)
3121 if service
.filter_by_tags
:
3122 parser
.add_argument(
3123 '--%s-tag-key' % service
.key
,
3126 help="Tag key for %s" % service
.title
)
3127 parser
.add_argument(
3128 '--%s-tag-values' % service
.key
,
3131 help="Tag values for %s" % service
.title
)
3133 parser
.add_argument(
3134 '--%s-limits' % service
.key
,
3135 action
="store_true",
3136 help="Monitor limits for %s" % service
.title
)
3138 parser
.add_argument(
3140 action
="store_true",
3141 help="You have to enable requests metrics in AWS/S3 console. This is a paid feature.")
3143 parser
.add_argument("--cloudwatch-alarms", nargs
='*')
3145 parser
.add_argument('--overall-tag-key', nargs
=1, action
='append', help="Overall tag key")
3146 parser
.add_argument(
3147 '--overall-tag-values', nargs
='+', action
='append', help="Overall tag values")
3149 parser
.add_argument("--hostname", required
=True)
3150 return parser
.parse_args(argv
)
3153 def setup_logging(opt_debug
, opt_verbose
):
3154 logger
= logging
.getLogger()
3155 logger
.disabled
= True
3156 fmt
= '%(levelname)s: %(name)s: %(filename)s: %(lineno)s: %(message)s'
3159 logger
.disabled
= False
3162 logger
.disabled
= False
3163 logging
.basicConfig(level
=lvl
, format
=fmt
)
3166 def create_session(access_key_id
, secret_access_key
, region
):
3167 return boto3
.session
.Session(
3168 aws_access_key_id
=access_key_id
,
3169 aws_secret_access_key
=secret_access_key
,
3173 class AWSConfig(object):
3174 def __init__(self
, hostname
, overall_tags
):
3175 self
.hostname
= hostname
3176 self
._overall
_tags
= self
._prepare
_tags
(overall_tags
)
3177 self
.service_config
= {}
3179 def add_service_tags(self
, tags_key
, tags
):
3180 """Convert commandline input
3182 ([['foo'], ['aaa'], ...], [['bar', 'baz'], ['bbb', 'ccc'], ...])
3184 Filters=[{'Name': 'tag:foo', 'Values': ['bar', 'baz']},
3185 {'Name': 'tag:aaa', 'Values': ['bbb', 'ccc']}, ...]
3186 as we need in API methods if and only if keys AND values are set.
3188 self
.service_config
.setdefault(tags_key
, None)
3189 if tags
!= (None, None):
3190 self
.service_config
[tags_key
] = self
._prepare
_tags
(tags
)
3191 elif self
._overall
_tags
:
3192 self
.service_config
[tags_key
] = self
._overall
_tags
3194 def _prepare_tags(self
, tags
):
3198 'Name': 'tag:%s' % k
,
3200 } for k
, v
in zip([k
[0] for k
in keys
], values
)]
3203 def add_single_service_config(self
, key
, value
):
3204 self
.service_config
.setdefault(key
, value
)
3207 def main(args
=None):
3209 cmk
.utils
.password_store
.replace_passwords()
3212 args
= parse_arguments(args
)
3213 setup_logging(args
.debug
, args
.verbose
)
3214 hostname
= args
.hostname
3216 aws_config
= AWSConfig(hostname
, (args
.overall_tag_key
, args
.overall_tag_values
))
3217 for service_key
, service_names
, service_tags
, service_limits
in [
3218 ("ec2", args
.ec2_names
, (args
.ec2_tag_key
, args
.ec2_tag_values
), args
.ec2_limits
),
3219 ("ebs", args
.ebs_names
, (args
.ebs_tag_key
, args
.ebs_tag_values
), args
.ebs_limits
),
3220 ("s3", args
.s3_names
, (args
.s3_tag_key
, args
.s3_tag_values
), args
.s3_limits
),
3221 ("elb", args
.elb_names
, (args
.elb_tag_key
, args
.elb_tag_values
), args
.elb_limits
),
3222 ("elbv2", args
.elbv2_names
, (args
.elbv2_tag_key
, args
.elbv2_tag_values
), args
.elbv2_limits
),
3223 ("rds", args
.rds_names
, (args
.rds_tag_key
, args
.rds_tag_values
), args
.rds_limits
),
3225 aws_config
.add_single_service_config("%s_names" % service_key
, service_names
)
3226 aws_config
.add_service_tags("%s_tags" % service_key
, service_tags
)
3227 aws_config
.add_single_service_config("%s_limits" % service_key
, service_limits
)
3229 aws_config
.add_single_service_config("s3_requests", args
.s3_requests
)
3230 aws_config
.add_single_service_config("cloudwatch_alarms", args
.cloudwatch_alarms
)
3232 has_exceptions
= False
3233 for aws_services
, aws_regions
, aws_sections
in [
3234 (args
.global_services
, ["us-east-1"], AWSSectionsUSEast
),
3235 (args
.services
, args
.regions
, AWSSectionsGeneric
),
3237 if not aws_services
or not aws_regions
:
3239 for region
in aws_regions
:
3241 session
= create_session(args
.access_key_id
, args
.secret_access_key
, region
)
3242 sections
= aws_sections(hostname
, session
, debug
=args
.debug
)
3243 sections
.init_sections(aws_services
, region
, aws_config
)
3244 sections
.run(use_cache
=not args
.no_cache
)
3245 except AssertionError:
3248 except Exception as e
:
3250 has_exceptions
= True