From 0b1e85e46671b798d5d582f5694c7fc9def06733 Mon Sep 17 00:00:00 2001 From: Asato Wakisaka Date: Fri, 2 Aug 2019 18:44:56 +0900 Subject: [PATCH] publish markdowns --- content/api-docs/entry/alerts.md | 4 +- content/api-docs/entry/monitors.md | 108 +++++++++++++++++- content/docs/entry/custom-checks.md | 4 + .../faq/contracts/combined-integration.md | 4 +- .../howto/anomaly-detection-for-roles.md | 3 + content/docs/entry/howto/container-agent.md | 4 + .../entry/howto/install-agent/amazon-linux.md | 48 ++++++-- content/docs/entry/integrations/aws.md | 27 +++-- .../docs/entry/integrations/aws/firehose.md | 42 +++++++ content/ja/api-docs/entry/alerts.md | 4 +- content/ja/api-docs/entry/monitors.md | 98 ++++++++++++++++ content/ja/docs/entry/custom-checks.md | 4 + .../faq/contracts/combined-integration.md | 2 +- .../howto/anomaly-detection-for-roles.md | 3 + .../ja/docs/entry/howto/container-agent.md | 3 + .../entry/howto/install-agent/amazon-linux.md | 48 ++++++-- content/ja/docs/entry/integrations/aws.md | 28 +++-- .../docs/entry/integrations/aws/firehose.md | 42 +++++++ 18 files changed, 434 insertions(+), 42 deletions(-) create mode 100644 content/docs/entry/integrations/aws/firehose.md create mode 100644 content/ja/docs/entry/integrations/aws/firehose.md diff --git a/content/api-docs/entry/alerts.md b/content/api-docs/entry/alerts.md index bf5ea0f..debd076 100644 --- a/content/api-docs/entry/alerts.md +++ b/content/api-docs/entry/alerts.md @@ -56,8 +56,8 @@ Alerts will be in chronological order of when they were generated from newest to | `id` | *string* | alert's ID | | `status` | *string* | alert's current status: `"OK"`, `"CRITICAL"`, `"WARNING"`, or `"UNKNOWN"` | | `monitorId` | *string* | ID of the monitor that generated the alert | -| `type` | *string* | the type of monitor: connectivity (`"connectivity"`), host metric (`"host"`), service metric (`"service"`), external monitor (`"external"`), check monitor (`"check"`) or expression monitor (`"expression"`) | -| `hostId` | *string* | [optional] the associated host ID. only exists when the type of monitor is either `"connectivity"`, `"host"`, or `"check"` | +| `type` | *string* | the type of monitor: connectivity (`"connectivity"`), host metric (`"host"`), service metric (`"service"`), external monitor (`"external"`), check monitor (`"check"`), expression monitor (`"expression"`), or anomaly detection for roles (`"anomalyDetection"`) | +| `hostId` | *string* | [optional] the associated host ID. only exists when the type of monitor is either `"connectivity"`, `"host"`, `"check"`, or `"anomalyDetection"` | | `value` | *number* | [optional] value of the monitoring target. only exists when the type of monitor is `"host"`, or `"service"` or when there is a reponse time configuration in `"external"` | | `message` | *string* | [optional] monitoring target's message. only exists when the type of monitor is either `"check"` or `"external"` | | `reason` | *string* | [optional] reason that the alert was closed. does not exist if the alert is unresolved. | diff --git a/content/api-docs/entry/monitors.md b/content/api-docs/entry/monitors.md index d4828b3..c81f6fa 100644 --- a/content/api-docs/entry/monitors.md +++ b/content/api-docs/entry/monitors.md @@ -37,6 +37,7 @@ The input procedure varies depending on the monitoring target.
  • Service metric monitoring
  • External monitoring
  • Expression monitoring
  • +
  • Monitoring with Anomaly Detection for Roles
  • Host metric monitoring

    @@ -46,7 +47,7 @@ The input procedure varies depending on the monitoring target. | KEY | TYPE | DESCRIPTION | | ------------ | -------- | -------------------------------- | | `type` | *string* | constant string `"host"` | -| `name` | *string* | arbitrary name that can be seen in the list of monitors and elsewhere | +| `name` | *string* | arbitrary name that can be referenced from the monitors list, etc. | | `memo` | *string* | [optional] notes for the monitoring configuration | | `duration` | *number* | average value of the designated interval (in minutes) will be monitored. valid interval (1 to 10 min.) | | `metric` | *string* | name of the host metric targeted by monitoring. by designating a specific constant string, comparative monitoring is possible [*1](#comparative-monitoring) | @@ -195,7 +196,7 @@ This function disables notifications in monitoring. Alerts occur in response to | KEY | TYPE | DESCRIPTION | | ------------ | -------- | -------------------------------- | | `type` | *string* | constant string `"connectivity"` | -| `name` | *string* | [optional] arbitrary name that can be seen in the list of monitors and elsewhere. The default value is `connectivity`. | +| `name` | *string* | [optional] arbitrary name that can be referenced from the monitors list, etc. The default value is `connectivity`. | | `memo` | *string* | [optional] notes for the monitoring configuration | | `scopes` | *array[string]* | [optional] The service name or role details name of the monitoring target. [*2](#service-name) | | `excludeScopes` | *array[string]* | [optional] The service name or role details name of the monitoring exception. [*2](#service-name) | @@ -283,7 +284,7 @@ This function disables notifications in monitoring. Alerts occur in response to | KEY | TYPE | DESCRIPTION | | ------------ | -------- | -------------------------------- | | `type` | *string* | constant string `"service"` | -| `name` | *string* | arbitrary name that can refer to monitors list, etc. | +| `name` | *string* | arbitrary name that can be referenced from the monitors list, etc. | | `memo` | *string* | [optional] notes for the monitoring configuration | | `service` | *string* | name of the service targeted by monitoring | | `duration` | *number* | monitors the average value of the designated number of points. range: most recent 1~10 points | @@ -398,7 +399,7 @@ This function disables notifications in monitoring. Alerts occur in response to | KEY | TYPE | DESCRIPTION | | ---------------------- | ---------- | -------------------------------- | | `type` | *string* | constant string `"external"` | -| `name` | *string* | arbitrary name that can refer to monitors list, etc. | +| `name` | *string* | arbitrary name that can be referenced from the monitors list, etc. | | `memo` | *string* | [optional] notes for the monitoring configuration | | `url` | *string* | monitoring target URL | | `method` | *string* | [optional] request method, one of `GET`, `POST`, `PUT`, `DELETE`. If omitted, `GET` method is used. | @@ -518,7 +519,7 @@ In order to monitor response time, it's necessary to assign `responseTimeWarning | KEY | TYPE | DESCRIPTION | | ------------ | -------- | -------------------------------- | | `type` | *string* | constant string `"expression"` | -| `name` | *string* | arbitrary name that can be seen in the list of monitors and elsewhere | +| `name` | *string* | arbitrary name that can be referenced from the monitors list, etc. | | `memo` | *string* | [optional] notes for the monitoring configuration | | `expression` | *string* | Expression of the monitoring target. Only valid for graph sequences that become one line. | | `operator` | *string* | determines the conditions that state whether the designated variable is either big or small. the observed value is on the left of ”>”or ”<” and the designated value is on the right| @@ -685,6 +686,103 @@ In order to monitor response time, it's necessary to assign `responseTimeWarning - each field is the same as when the [monitor was created](#create) - list is ordered as monitor type -> name (same as the list of monitors on mackerel.io) +

    Monitoring with Anomaly Detection for Roles

    + +#### Input (when monitoring with Anomaly Detection for Roles) + +| KEY | TYPE | DESCRIPTION | +| ------------ | -------- | -------------------------------- | +| `type` | *string* | constant string `"anomalyDetection"` | +| `name` | *string* | arbitrary name that can be referenced from the monitors list, etc. | +| `memo` | *string* | [optional] notes for the monitoring configuration | +| `scopes` | *array[string]* | [optional] monitoring target’s service name and role details name [*2](#service-name) | +| `warningSensitivity` | *string* | the sensitivity (`insensitive`, `normal`, or `sensitive`) that generates warning alerts. | +| `criticalSensitivity` | *string* | the sensitivity (`insensitive`, `normal`, or `sensitive`) that generates critical alerts. | +| `maxCheckAttempts` | *number* | [optional] number of consecutive Warning/Critical instances before an alert is made. Default setting is 3 (1-10) | +| `trainingPeriodFrom` | *number* | [optional] Specified training period (Uses metric data starting from the specified time) | +| `notificationInterval` | *number* | [optional] the time interval (in minutes) for re-sending notifications. If this field is omitted, notifications will not be re-sent. | +| `isMute` | *boolean* | [optional] whether monitoring is muted or not | + +##### Example Input + +```json +{ + "type": "anomalyDetection", + "name": "anomaly detection", + "memo": "my anomaly detection for roles", + "scopes": [ + "myService: myRole" + ], + "warningSensitivity": "insensitive", + "maxCheckAttempts": 3 +} +``` + +#### Response (Monitoring with Anomaly Detection for Roles) + +##### Success + +```json +{ + "id" : "2cSZzK3XfmG", + "type": "anomalyDetection", + "name": "anomaly detection", + "memo": "my anomaly detection for roles", + "scopes": [ + "myService: myRole" + ], + "warningSensitivity": "insensitive", + "maxCheckAttempts": 3 +} +``` + +`id` will be given and returned + +##### Error + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    STATUS CODEDESCRIPTION
    400when the input is in a format that can’t be received
    400when the name is empty
    400when thememoexceeds 250 characters
    400when the specified service name or role details name is not registered in scope or excludeScopes
    400when the specified warningSensitivity or criticalSensitivity is not insensitive / normal / sensitive
    400when the notification re-sending time interval is not set at 10 minutes or more
    400when a future vaule is specified for trainingPeriodFrom
    403when the API doesn't have the required permissions
    + ----------------------------------------------

    Get monitor configurations

    diff --git a/content/docs/entry/custom-checks.md b/content/docs/entry/custom-checks.md index 5af3418..04fac52 100644 --- a/content/docs/entry/custom-checks.md +++ b/content/docs/entry/custom-checks.md @@ -22,6 +22,7 @@ In the [agent settings file](https://mackerel.io/docs/entry/spec/agent#config-fi ```config [plugin.checks.ssh] command = ["ruby", "/path/to/check-ssh.rb"] +custom_identifier = "SOME_IDENTIFIER" # optional notification_interval = 60 max_check_attempts = 1 check_interval = 5 @@ -34,6 +35,9 @@ memo = "This check monitor is ..." - Item name: With the key for the settings file, the item name must begin with "plugin.checks." and contain exactly two periods. Anything after the second dot will be used as the monitor settings name. - command: This command will have the agent temporarily execute, and use it’s exit status/standard output as the monitoring result. +- custom_identifier: Monitoring results are sent as a monitor of the host of the specified identifier, not the host on which the agent is running. + - If the check result is not OK, it will be notified as an alert for the host that is specified here. + - This can be useful for adding monitors to hosts integrated with AWS / Azure Integration. For more details, refer to the [AWS Integration Document](https://mackerel.io/docs/entry/integrations/aws#plugin-custom-identifier). - notification_interval: The notification re-sending interval will be designated in minutes. If the notification is abbreviated, it will not be re-sent. An interval of less than 10 minutes can not be designated. If an interval of less than 10 min is designated, the notification will be re-sent at 10 minutes. - max_check_attempts: An alert will be sent for any result other than “okay” in the designated number sequence. For example, if set at 3 and the latest monitoring result for all three is not ok, then a notification will be sent. **When used with `prevent_alert_auto_close`, the value of `max_check_attempts` will be treated as `1` regardless of the specified value.** - check_interval: Designate the check monitoring execution interval in minutes. The default value is 1 minute. The configurable range is 1 to 60 minutes. If a value of less than 1 minute is designated, monitoring will be run at 1 minute intervals. If a value of more than 60 minutes is designated, monitoring will be run at 60 minute intervals. diff --git a/content/docs/entry/faq/contracts/combined-integration.md b/content/docs/entry/faq/contracts/combined-integration.md index 8a47b7a..ea326d2 100644 --- a/content/docs/entry/faq/contracts/combined-integration.md +++ b/content/docs/entry/faq/contracts/combined-integration.md @@ -8,8 +8,8 @@ EditURL: https://blog.hatena.ne.jp/mackerelio/mackerelio-docs.hatenablog.mackere When using mackerel-agent and AWS/Azure Integration together, the metrics obtained from the two functions are aggregated into one host with a billable rate of either a standard host or a micro host. -- When mackerel-agent is installed on an EC2/Azure VM instance while also using the Integration function: 1 Standard host -- When [using the custom_identifier to post metrics](https://mackerel.io/docs/entry/integrations/aws#plugin-custom-identifier) for Integration registered hosts in other services: 1 Micro host +- When mackerel-agent is installed on an EC2/Azure VM instance while also using the Integration function: 1 Standard host +- When [using the custom_identifier to aggregate monitors with the plugin](https://mackerel.io/docs/entry/integrations/aws#plugin-custom-identifier) for Integration registered hosts in other services: 1 Micro host However, please note that the upper limit for the number of metrics per host does not change. For other precautions regarding exceeding metrics etc., refer to [Handling of host conversion when plan limits are exceeded](https://mackerel.io/docs/entry/faq/contracts/limit-exceeded-conversion). diff --git a/content/docs/entry/howto/anomaly-detection-for-roles.md b/content/docs/entry/howto/anomaly-detection-for-roles.md index cde8ada..0815c3c 100644 --- a/content/docs/entry/howto/anomaly-detection-for-roles.md +++ b/content/docs/entry/howto/anomaly-detection-for-roles.md @@ -38,3 +38,6 @@ Go to the Monitors screen, located on the left side menu, and click the “Add M - Monitoring will not continue until the anomaly detection training has completed. - Multiple monitors with anomaly detection can not be specified for the same role - Only available for the Trial and Paid plans + - Every 5 hosts targeted for monitoring will count as 1 Standard host + - The same host will be counted multiple times if it is monitored through multiple roles + - A usage charge will incur starting with one monitored host diff --git a/content/docs/entry/howto/container-agent.md b/content/docs/entry/howto/container-agent.md index 1a06376..c4e3886 100644 --- a/content/docs/entry/howto/container-agent.md +++ b/content/docs/entry/howto/container-agent.md @@ -93,9 +93,13 @@ If not using the plugin, the agent can be used with just environment variable co | MACKEREL_APIBASE | Specifies the Mackerel API endpoint(Default: `https://api.mackerelio.com/`). | | MACKEREL_ROLES | Sets tasks, pod services, and roles. | | MACKEREL_AGENT_CONFIG | Sets the agent configuration file. Details for this will be described later. | +| MACKEREL_AGENT_CONFIG_POLLING_DURATION_MINUTES | Specifies time interval of retrieval in minutes for detecting changes in the agent configuration file.| | MACKEREL_IGNORE_CONTAINER | Sets the name of the container to be excluded from monitoring with regular expressions. | | MACKEREL_HOST_STATUS_ON_START | When set, the host status changes to the specified value upon startup of the agent. Valid values are "standby", "working", "maintenance", and "poweroff".| + +By default, the configuration file is read once at startup. With `MACKEREL_AGENT_CONFIG_POLLING_DURATION_MINUTES` configured, the configuration file is periodically retrieved to detect and apply changes. + ### Using the configuration file Set the file path of the agent configuration file with the environment variable `MACKEREL_AGENT_CONFIG`. diff --git a/content/docs/entry/howto/install-agent/amazon-linux.md b/content/docs/entry/howto/install-agent/amazon-linux.md index 9bd6607..a80ff09 100644 --- a/content/docs/entry/howto/install-agent/amazon-linux.md +++ b/content/docs/entry/howto/install-agent/amazon-linux.md @@ -7,7 +7,11 @@ EditURL: https://blog.hatena.ne.jp/mackerelio/mackerelio-docs.hatenablog.mackere The content described on this page is also available from the [New Host registration screen in Mackerel](https://mackerel.io/my/instruction-agent). -

    When using Amazon Linux 2 LTS

    +

    Install the package

    +Amazon Linux and Amazon Linux 2 LTS run different commands for installation. Please check out each procedure. +Installation can also be done directly using the `rpm` command. + +

    When using Amazon Linux 2 LTS

    Execute the following command: @@ -17,7 +21,7 @@ curl -fsSL https://mackerel.io/file/script/amznlinux/setup-all-yum-v2.sh | MACKE You can check the API key from the [Organization page’s API Keys tab](https://mackerel.io/my?tab=apikeys). Keep in mind that this key is used to identify your organization, so we strongly advise not sharing it with others. -

    When using Amazon Linux

    +

    When using Amazon Linux

    Execute the following command: @@ -27,17 +31,37 @@ curl -fsSL https://mackerel.io/file/script/amznlinux/setup-all-yum.sh | MACKEREL You can check the API key from the [Organization page’s API Keys tab](https://mackerel.io/my?tab=apikeys). Keep in mind that this key is used to identify your organization, so we strongly advise not sharing it with others. -

    Edit the configuration file

    +

    Using the rpm command with Amazon Linux 2 LTS

    -Edit the file `/etc/mackerel-agent/mackerel-agent.conf` and configure the API key. +To install mackerel-agent with `rpm` run the following command: ``` -apikey = "" +sudo rpm -ivh https://mackerel.io/file/agent/rpm/mackerel-agent-latest.amzn2.x86_64.rpm +``` + +To update Mackerel-agent with `rpm` run the following command: + +``` +sudo rpm -Uvh https://mackerel.io/file/agent/rpm/mackerel-agent-latest.amzn2.x86_64.rpm ``` -You can check the API key from the [Organization page’s API Keys tab](https://mackerel.io/my?tab=apikeys). Keep in mind that this key is used to identify your organization, so we strongly advise not sharing it with others. +

    Using the rpm command with Amazon Linux

    -For more details, check out the [mackerel-agent specifications](https://mackerel.io/docs/entry/spec/agent) help page. +To install mackerel-agent with `rpm` run the following command: + +``` +sudo rpm -ivh https://mackerel.io/file/agent/rpm/mackerel-agent-latest.x86_64.rpm +``` + +To update Mackerel-agent with `rpm` run the following command: + +``` +sudo rpm -Uvh https://mackerel.io/file/agent/rpm/mackerel-agent-latest.x86_64.rpm +``` + +

    Edit the configuration file

    + +Edit the `/etc/mackerel-agent/mackerel-agent.conf` file to configure the agent. By using the configuration file, the following can be implemented: @@ -45,6 +69,16 @@ By using the configuration file, the following can be implemented: - [Posting user-defined custom metrics](https://mackerel.io/docs/entry/advanced/custom-metrics) - [Adding monitors for script checks](https://mackerel.io/docs/entry/custom-checks) +For more details, check out the [mackerel-agent specifications](https://mackerel.io/docs/entry/spec/agent) help page. + +Please be sure to configure the API key if installed from rpm. This is done automatically if set up with the setup script. + +``` +apikey = "" +``` + +You can check the API key from the [Organization page’s API Keys tab](https://mackerel.io/my?tab=apikeys). Keep in mind that this key is used to identify your organization, so we strongly advise not sharing it with others. +

    Starting the agent

    When using Amazon Linux 2 LTS

    diff --git a/content/docs/entry/integrations/aws.md b/content/docs/entry/integrations/aws.md index 60c0b0d..0c71e72 100644 --- a/content/docs/entry/integrations/aws.md +++ b/content/docs/entry/integrations/aws.md @@ -13,7 +13,7 @@ Additionally, the API of AWS will be called every 5 minutes for each targeted me Currently, the following AWS cloud products are supported. For information on obtaining metrics, please refer to each individual document. -[EC2](https://mackerel.io/docs/entry/integrations/aws/ec2)・[ELB (CLB)](https://mackerel.io/docs/entry/integrations/aws/elb)・[ALB](https://mackerel.io/docs/entry/integrations/aws/alb)・[NLB](https://mackerel.io/docs/entry/integrations/aws/nlb)・[RDS](https://mackerel.io/docs/entry/integrations/aws/rds)・[ElastiCache](https://mackerel.io/docs/entry/integrations/aws/elasticache)・[Redshift](https://mackerel.io/docs/entry/integrations/aws/redshift)・[Lambda](https://mackerel.io/docs/entry/integrations/aws/lambda)・[SQS](https://mackerel.io/docs/entry/integrations/aws/sqs)・[DynamoDB](https://mackerel.io/docs/entry/integrations/aws/dynamodb)・[CloudFront](https://mackerel.io/docs/entry/integrations/aws/cloudfront)・[API Gateway](https://mackerel.io/docs/entry/integrations/aws/apigateway)・[Kinesis](https://mackerel.io/docs/entry/integrations/aws/kinesis)・[S3](https://mackerel.io/docs/entry/integrations/aws/s3)・[ES](https://mackerel.io/docs/entry/integrations/aws/es)・[ECS](https://mackerel.io/docs/entry/integrations/aws/ecs)・[SES](https://mackerel.io/ja/docs/entry/integrations/aws/ses)・[Step Functions](https://mackerel.io/docs/entry/integrations/aws/states)・[EFS](https://mackerel.io/docs/entry/integrations/aws/efs) +[EC2](https://mackerel.io/docs/entry/integrations/aws/ec2)・[ELB (CLB)](https://mackerel.io/docs/entry/integrations/aws/elb)・[ALB](https://mackerel.io/docs/entry/integrations/aws/alb)・[NLB](https://mackerel.io/docs/entry/integrations/aws/nlb)・[RDS](https://mackerel.io/docs/entry/integrations/aws/rds)・[ElastiCache](https://mackerel.io/docs/entry/integrations/aws/elasticache)・[Redshift](https://mackerel.io/docs/entry/integrations/aws/redshift)・[Lambda](https://mackerel.io/docs/entry/integrations/aws/lambda)・[SQS](https://mackerel.io/docs/entry/integrations/aws/sqs)・[DynamoDB](https://mackerel.io/docs/entry/integrations/aws/dynamodb)・[CloudFront](https://mackerel.io/docs/entry/integrations/aws/cloudfront)・[API Gateway](https://mackerel.io/docs/entry/integrations/aws/apigateway)・[Kinesis](https://mackerel.io/docs/entry/integrations/aws/kinesis)・[S3](https://mackerel.io/docs/entry/integrations/aws/s3)・[ES](https://mackerel.io/docs/entry/integrations/aws/es)・[ECS](https://mackerel.io/docs/entry/integrations/aws/ecs)・[SES](https://mackerel.io/ja/docs/entry/integrations/aws/ses)・[Step Functions](https://mackerel.io/docs/entry/integrations/aws/states)・[EFS](https://mackerel.io/docs/entry/integrations/aws/efs)・[Kinesis Data Firehose](https://mackerel.io/docs/entry/integrations/aws/firehose)

    Integration method

    @@ -52,7 +52,8 @@ Grant the policies listed below for the role. Be careful not to grant FullAccess - `AmazonSESReadOnlyAccess / ses:Describe*` - `AWSStepFunctionsReadOnlyAccess` - `AmazonElasticFileSystemReadOnlyAccess` -- `CloudWatchReadOnlyAccess`(When only configuring CloudFront, API Gateway, Kinesis, S3, ES, ECS, SES, Step Functions or EFS) +- `AmazonKinesisFirehoseReadOnlyAccess` +- `CloudWatchReadOnlyAccess`(When only configuring CloudFront, API Gateway, Kinesis, S3, ES, ECS, SES, Step Functions, EFS or Firehose) Additionally, in AWS Integration you can filter using tags as is mentioned further down, but if you filter using tags with ElastiCache or SQS, additional policies need to be added. For more details, refer to Filter by tag. @@ -100,7 +101,8 @@ Grant the policies listed below for the newly created user. Be careful not to gr - `AmazonSESReadOnlyAccess / ses:Describe*` - `AWSStepFunctionsReadOnlyAccess` - `AmazonElasticFileSystemReadOnlyAccess` -- `CloudWatchReadOnlyAccess`(When only configuring CloudFront, API Gateway, Kinesis, S3, ES, ECS, SES, Step Functions or EFS) +- `AmazonKinesisFirehoseReadOnlyAccess` +- `CloudWatchReadOnlyAccess`(When only configuring CloudFront, API Gateway, Kinesis, S3, ES, ECS, SES, Step Functions, EFS or Firehose) Additionally, in AWS Integration you can filter using tags as is mentioned further down, but if you filter using tags with ElastiCache or SQS, additional policies need to be added. @@ -152,11 +154,16 @@ By configuring the integration above, AWS cloud products complying with the targ Even if a host is not retired, host information will remain and hosts without metric posts will not be subject to billing. -

    Regarding the aggregation of custom metrics obtained with the plugin in integrated hosts

    +

    Regarding the aggregation of monitoring content with plugins in integrated hosts

    -In mackerel-agent’s plugin configuration, a `custom_identifier` can be specified. `custom_identifier` is a mechanism to grant an identifier that is unique to the user to be used as a host identifier. By using this, metrics that have been posted from mackerel-agent installed on another machine can be aggregated as metrics of a host integrated with AWS Integration. Specify the `custom_identifier` in the plugin configuration of the plugin to be sending the custom metrics. +`custom_identifier` can be specified in the settings of mackerel-agent's custom metrics and check monitoring plugins. `custom_identifier` is a mechanism that allows the user to assign a distinct identifier to a host. Using this, it is possible to aggregate metrics and check monitors that have been posted from mackerel-agent installed on another machine as a part of the AWS integration host. `custom_identifier` is specified in the configuration of compatible plugins. -For example, if using Amazon RDS and the [mackerel-plugin-mysql](https://github.com/mackerelio/mackerel-agent-plugins/tree/master/mackerel-plugin-mysql) plugin, by adding the `custom_identifier` description as shown below in the plugin configuration of mackerel-agent.conf, metrics obtained by the plugin can be aggregated as custom metrics of an RDS host. +For example, the endpoint in the case of Amazon RDS, and the DNS Name in the case of ELB, will be the `custom_identifier`string. + +#### Example uses +Here are two example uses. In either case, the agent needs to be restarted after adding to the mackerel-agent configuration file. + +The first example is of MySQL monitoring using the [mackerel-plugin-mysql](https://github.com/mackerelio/mackerel-agent-plugins/tree/master/mackerel-plugin-mysql) plugin for Amazon RDS. Metrics retrieved by the plugin can be aggregated as custom metrics for the RDS host by adding the plugin configuration which includes the `custom_identifier` as shown below to the mackerel-agent.conf. ``` [plugin.metrics.mysql] @@ -164,6 +171,10 @@ command = ["mackerel-plugin-mysql", "-host", "", "-username", "use custom_identifier = "" ``` -The endpoint (for Amazon RDS) and the DNS Name (for ELB) each become a `custom_identifier` string. +The second example is of Elasticsearch monitoring using the Amazon Elasticsearch Service and the [check-elasticsearch](https://github.com/mackerelio/go-check-plugins/tree/master/check-elasticsearch) plugin. Elasticsearch Service Cluster Health Checks can be aggregated as check monitoring for ElasticsearchService hosts by adding the plugin configuration which includes the `custom_identifier` as shown below to the mackerel-agent.conf. -After making the addition to the conf file, you’ll need to restart the agent. +``` +[plugin.checks.elasticsearch] +command = ["check-elasticsearch", "-s", "https", "-H", "", "-p", "443"] +custom_identifier = "" +``` diff --git a/content/docs/entry/integrations/aws/firehose.md b/content/docs/entry/integrations/aws/firehose.md new file mode 100644 index 0000000..95a9985 --- /dev/null +++ b/content/docs/entry/integrations/aws/firehose.md @@ -0,0 +1,42 @@ +--- +Title: AWS Integration - Kinesis Data Firehose +Date: 2019-07-16T10:00:00+09:00 +URL: https://mackerel.io/docs/entry/integrations/aws/firehose +EditURL: https://blog.hatena.ne.jp/mackerelio/mackerelio-docs.hatenablog.mackerel.io/atom/entry/26006613376612825 +CustomPath: integrations/aws/firehose +--- + +Mackerel supports obtaining and monitoring the metric of Amazon Kinesis Data Firehose in AWS Integration. When integrating with AWS Integration, billable targets are determined using the conversion 1 delivery stream = 1 Micro Host. In addition to this, depending on the number of metrics retrieved, you may be charged for exceeding the maximum number of metrics per micro host. + +Please refer to the following page for AWS Integration configuration methods and a list of supported AWS services.
    +AWS Integration + +## Obtaining metrics +The metrics obtainable with AWS Integration's support for Kinesis Data Firehose are as follows. For `Metric` explanations, refer to the AWS help page. + +The maximum number of metrics obtainable is detemined by destination services: + +- In S3 cases:33 +- In Elasticsearch Service cases:29 +- In Redshift cases:33 +- In Splunk cases:30 + +|Graph name|Metric|Metric name in Mackerel|Unit|Statistics| +|:--|:--|:--|:--|:--| +|Service Bytes|BackupToS3.Bytes
    DeliveryToElasticsearch.Bytes
    DeliveryToRedshift.Bytes
    DeliveryToS3.Bytes
    DeliveryToSplunk.Bytes
    IncomingBytes
    DataReadFromKinesisStream.Bytes|firehose.service_bytes.backup_to_s3
    firehose.service_bytes.delivery_to_elasticsearch
    firehose.service_bytes.delivery_to_redshift
    firehose.service_bytes.delivery_to_s3
    firehose.service_bytes.delivery_to_splunk
    firehose.service_bytes.incoming
    firehose.service_bytes.read_from_kinesis_streams|bytes|Sum| +|Service Records|BackupToS3.Records
    DeliveryToElasticsearch.Records
    DeliveryToRedshift.Records
    DeliveryToS3.Records
    DeliveryToSplunk.Records
    IncomingRecords
    DataReadFromKinesisStream.Records|firehose.service_records.backup_to_s3
    firehose.service_records.delivery_to_elasticsearch
    firehose.service_records.delivery_to_redshift
    firehose.service_records.delivery_to_s3
    firehose.service_records.delivery_to_splunk
    firehose.service_records.incoming
    firehose.service_records.read_from_kinesis_streams|integer|Sum| +|Service Success|BackupToS3.Success
    DeliveryToElasticsearch.Success
    DeliveryToRedshift.Success
    DeliveryToS3.Success
    DeliveryToSplunk.Success|firehose.service_success.backup_to_s3
    firehose.service_success.delivery_to_elasticsearch
    firehose.service_success.delivery_to_redshift
    firehose.service_success.delivery_to_s3
    firehose.service_success.delivery_to_splunk|integer|Average| +|Service Data Freshness|BackupToS3.DataFreshness
    DeliveryToElasticsearch.DataFreshness
    DeliveryToRedshift.DataFreshness
    DeliveryToS3.DataFreshness
    DeliveryToSplunk.DataFreshness|firehose.service_data_freshness.backup_to_s3
    firehose.service_data_freshness.delivery_to_elasticsearch
    firehose.service_data_freshness.delivery_to_redshift
    firehose.service_data_freshness.delivery_to_s3
    firehose.service_data_freshness.delivery_to_splunk|float|Maximum| +|Service Data Ack Latency|DeliveryToSplunk.DataAckLatency|firehose.service_data_ack_latency.delivery_to_splunk|float|Maximum| +|Millis Behind Latest|KinesisMillisBehindLatest|firehose.millis_behind_latest.kinesis|integer|Sum| +|API Latency|DescribeDeliveryStream.Latency
    PutRecord.Latency
    PutRecordBatch.Latency
    UpdateDeliveryStream.Latency|firehose.api_latency.describe_delivery_stream_average
    firehose.api_latency.describe_delivery_stream_maximum
    firehose.api_latency.put_record_average
    firehose.api_latency.put_record_maximum
    firehose.api_latency.put_record_batch_average
    firehose.api_latency.put_record_batch_maximum
    firehose.api_latency.update_delivery_stream_average
    firehose.api_latency.update_delivery_stream_maximum|float|Average
    Maximum| +|API Bytes|PutRecord.Bytes
    PutRecordBatch.Bytes|firehose.api_bytes.put_record
    firehose.api_bytes.put_record_batch|bytes|Sum| +|API Requests|DescribeDeliveryStream.Requests
    PutRecord.Requests
    PutRecordBatch.Requests
    UpdateDeliveryStream.Requests|firehose.api_requests.describe_delivery_stream
    firehose.api_requests.put_record
    firehose.api_requests.put_record_batch
    firehose.api_requests.update_delivery_stream|integer|Sum| +|API Records|PutRecordBatch.Records|firehose.api_records.put_record_batch|integer|Sum| +|API Throttled|ThrottledDescribeStream
    ThrottledGetRecords
    ThrottledGetShardIterator|firehose.api_throttled.describe_stream
    firehose.api_throttled.get_records
    firehose.api_throttled.get_shard_iterator|integer|Sum| +|Data Transformation Duration|ExecuteProcessing.Duration|firehose.data_transformation_duration|float|Average| +|Data Transformation Success|ExecuteProcessing.Success|firehose.data_transformation_success.execute|integer|Average| +|Data Transformation Records|SucceedProcessing.Records|firehose.data_transformation_records.succeed|integer|Sum| +|Data Transformation Bytes|SucceedProcessing.Bytes|firehose.data_transformation_bytes.succeed|bytes|Sum| +|Format Conversion Records|SucceedConversion.Records
    FailedConversion.Records|firehose.format_conversion_records.succeed
    firehose.format_conversion_recordsfailed|integer|Sum| +|Format Conversion Bytes|SucceedConversion.Bytes
    FailedConversion.Bytes|firehose.format_conversion_bytes.succeed
    firehose.format_conversion_bytes.failed|bytes|Sum| diff --git a/content/ja/api-docs/entry/alerts.md b/content/ja/api-docs/entry/alerts.md index 1ac376c..522fb00 100644 --- a/content/ja/api-docs/entry/alerts.md +++ b/content/ja/api-docs/entry/alerts.md @@ -57,8 +57,8 @@ EditURL: https://blog.hatena.ne.jp/mackerelio/mackerelio-api-jp.hatenablog.macke | `id` | *string* | アラートのid。 | | `status` | *string* | アラートの現在のステータス `"OK"`、 `"CRITICAL"`、 `"WARNING"`、 `"UNKNOWN"` のいずれかになります。 | | `monitorId` | *string* | アラートを発生させた監視設定のid。 | -| `type` | *string* | 監視設定の種別。疎通 (`"connectivity"`)、ホストメトリック (`"host"`)、サービスメトリック (`"service"`)、外形監視 (`"external"`)、チェック監視 (`"check"`)、式監視 (`"expression"`) のいずれかになります。 | -| `hostId` | *string* | [optional] 関連するホストのid。監視設定の種別が `"connectivity"`、`"host"`、`"check"`の時のみ存在します。 | +| `type` | *string* | 監視設定の種別。疎通 (`"connectivity"`)、ホストメトリック (`"host"`)、サービスメトリック (`"service"`)、外形監視 (`"external"`)、チェック監視 (`"check"`)、式監視 (`"expression"`)、ロール内異常検知 (`"anomalyDetection"`) のいずれかになります。 | +| `hostId` | *string* | [optional] 関連するホストのid。監視設定の種別が `"connectivity"`、`"host"`、`"check"`、`"anomalyDetection"`の時のみ存在します。 | | `value` | *number* | [optional] 監視対象の値。監視設定の種別が `"host"`、`"service"` の時または `"external"`で応答時間の設定があるときのみ存在します。 | | `message` | *string* | [optional] 監視対象のメッセージ。監視設定の種別が `"check"` の時または `"external"` の時のみ存在します。 | | `reason` | *string* | [optional] アラートを閉じた理由。アラートが解決していない時は存在しません。 | diff --git a/content/ja/api-docs/entry/monitors.md b/content/ja/api-docs/entry/monitors.md index cfc15b6..32204e0 100644 --- a/content/ja/api-docs/entry/monitors.md +++ b/content/ja/api-docs/entry/monitors.md @@ -36,6 +36,7 @@ EditURL: https://blog.hatena.ne.jp/mackerelio/mackerelio-api-jp.hatenablog.macke
  • サービスメトリック監視
  • 外形監視
  • 式による監視
  • +
  • ロール内異常検知による監視
  • ホストメトリック監視

    @@ -596,6 +597,103 @@ EditURL: https://blog.hatena.ne.jp/mackerelio/mackerelio-api-jp.hatenablog.macke +

    ロール内異常検知による監視

    + +#### 入力(ロール内異常検知による監視) + +| KEY | TYPE | DESCRIPTION | +| ------------ | -------- | -------------------------------- | +| `type` | *string* | 定数文字列 `"anomalyDetection"` | +| `name` | *string* | 監視一覧などで参照できる任意の名称。 | +| `memo` | *string* | [optional] 監視設定のメモ。 | +| `scopes` | *array[string]* | 監視対象のサービス名とロール詳細名。[*2](#service-name) | +| `warningSensitivity` | *string* | warningのAlert発生の閾値。`insensitive`、`normal`、`sensitive`のいずれか。 | +| `criticalSensitivity` | *string* | criticalのAlert発生の閾値。`insensitive`、`normal`、`sensitive`のいずれか。 | +| `maxCheckAttempts` | *number* | [optional] 何回連続で Warning/Critical になったらアラートを発生させるか。デフォルトは3 (1~10)です。 | +| `trainingPeriodFrom` | *number* | [optional] 再学習させる際に起点となる時刻(epoch秒)。 | +| `notificationInterval` | *number* | [optional] 通知の再送設定をするときの再送間隔 (分)。このフィールドを省略すると通知は再送されません。 | +| `isMute` | *boolean* | [optional] 監視がミュート状態か否か | + +##### 入力例 + +```json +{ + "type": "anomalyDetection", + "name": "anomaly detection", + "memo": "my anomaly detection for roles", + "scopes": [ + "myService: myRole" + ], + "warningSensitivity": "insensitive", + "maxCheckAttempts": 3 +} +``` + +#### 応答(ロール内異常検知による監視) + +##### 成功時 + +```json +{ + "id" : "2cSZzK3XfmG", + "type": "anomalyDetection", + "name": "anomaly detection", + "memo": "my anomaly detection for roles", + "scopes": [ + "myService: myRole" + ], + "warningSensitivity": "insensitive", + "maxCheckAttempts": 3 +} +``` + +`id` が付与されて返却されます。 + +##### 失敗時 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    STATUS CODEDESCRIPTION
    400入力が受け付けられないフォーマットだったとき
    400nameが空文字列のとき
    400memoが250文字を超えているとき
    400scopesに指定されているサービス名やロール詳細名が未登録のとき
    400warningSensitivityまたはcriticalSensitivityinsensitive / normal / sensitive 以外の値が指定されたとき
    400再送間隔が10分以上でないとき
    400trainingPeriodFromが未来の値のとき
    403APIキーに書き込み権限がないとき
    + ----------------------------------------------

    監視設定の一覧

    diff --git a/content/ja/docs/entry/custom-checks.md b/content/ja/docs/entry/custom-checks.md index 9aa9bce..d336676 100644 --- a/content/ja/docs/entry/custom-checks.md +++ b/content/ja/docs/entry/custom-checks.md @@ -23,6 +23,7 @@ EditURL: https://blog.hatena.ne.jp/mackerelio/mackerelio-docs-ja.hatenablog.mack ```config [plugin.checks.ssh] command = ["ruby", "/path/to/check-ssh.rb"] +custom_identifier = "SOME_IDENTIFIER" # optional notification_interval = 60 max_check_attempts = 1 check_interval = 5 @@ -35,6 +36,9 @@ memo = "This check monitor is ..." - 項目名: 設定ファイル用のキーで、"plugin.checks." で始まっている必要があり、含まれるドットの数はちょうど2である必要があります。2つめのドット以降は監視設定の名前として利用されます。 - command: エージェントが定期的に実行し、その終了ステータス/標準出力を監視結果として使用するコマンドです。コマンドは後述する仕様に沿って動作する必要があります。 +- custom_identifier: 監視の実行結果を、 agent が動作しているホストではなく指定した識別子のホストの監視として送信します。 + - チェック結果が OK でなかった場合、ここで指定したホストのアラートとして発報されます。 + - AWS / Azure インテグレーションの連携ホストに監視を追加する場合などに有用です。詳細は[AWSインテグレーションのドキュメント](https://mackerel.io/ja/docs/entry/integrations/aws#plugin-custom-identifier)をご覧下さい。 - notification_interval: アラートの再送間隔を分で指定します。省略した場合、アラートは再送通知されません。10分未満は指定できません。10分未満を指定した場合は、10分間隔で通知を再送します。 - max_check_attempts: 最大試行回数を指定します。ここで指定した回数以上、OK以外のチェック結果が続いた場合にアラートを発報します。たとえば3が設定されている場合、OK以外の状態が3回続いた場合にアラートとなります。**`prevent_alert_auto_close` と併用した場合、指定した数値に関わらず `1` として扱われます。** - check_interval: チェック監視の実行間隔を分で指定します。デフォルト値は1分です。設定可能な範囲は1分から60分で、1分未満の場合は1分、60分以上を指定した場合は60分間隔で監視が実行されます。 diff --git a/content/ja/docs/entry/faq/contracts/combined-integration.md b/content/ja/docs/entry/faq/contracts/combined-integration.md index f2c1be3..e09bd88 100644 --- a/content/ja/docs/entry/faq/contracts/combined-integration.md +++ b/content/ja/docs/entry/faq/contracts/combined-integration.md @@ -8,7 +8,7 @@ EditURL: https://blog.hatena.ne.jp/mackerelio/mackerelio-docs-ja.hatenablog.mack mackerel-agent と AWS / Azure インテグレーションを併用した場合、その2つの機能から取得されるメトリックは一つのホストに集約されますので、スタンダードホストもしくはマイクロホスト1台分のご利用料金で利用できます。 - EC2 / Azure VM インスタンスに mackerel-agent がインストールされ、インテグレーションによる連携も行われている場合: 1スタンダードホスト -- その他のサービスで、インテグレーションにより登録されたホストに対して、[custom_identifierを使用してメトリックを投稿](https://mackerel.io/ja/docs/entry/integrations/aws#plugin-custom-identifier)している場合: 1マイクロホスト +- その他のサービスで、インテグレーションにより登録されたホストに対して、[custom_identifierを使用してプラグインによる監視を集約](https://mackerel.io/ja/docs/entry/integrations/aws#plugin-custom-identifier)している場合: 1マイクロホスト ただし、ホストあたりのメトリック数の上限などについては変わりませんので、その点はご注意ください。メトリック数超過などに関する注意事項は、[プラン上限超過時のホスト台数換算について](https://mackerel.io/ja/docs/entry/faq/contracts/limit-exceeded-conversion) を参照してください。 diff --git a/content/ja/docs/entry/howto/anomaly-detection-for-roles.md b/content/ja/docs/entry/howto/anomaly-detection-for-roles.md index a328c0d..5b66055 100644 --- a/content/ja/docs/entry/howto/anomaly-detection-for-roles.md +++ b/content/ja/docs/entry/howto/anomaly-detection-for-roles.md @@ -38,3 +38,6 @@ EditURL: https://blog.hatena.ne.jp/mackerelio/mackerelio-docs-ja.hatenablog.mack - 異常検知の学習が完了するまでは監視は行なわれません - 同一のロールに対して異常検知による監視を複数指定することはできません - Trialプラン及び有料プランでのみご利用いただけます + - 監視対象となっている5ホスト単位でスタンダードホスト1台分としてカウントします + - 同一のホストが複数のロールを通して監視されている場合、重複してカウントします + - 監視対象ホスト1台から利用料金が発生します diff --git a/content/ja/docs/entry/howto/container-agent.md b/content/ja/docs/entry/howto/container-agent.md index 6295c7f..74f4e5d 100644 --- a/content/ja/docs/entry/howto/container-agent.md +++ b/content/ja/docs/entry/howto/container-agent.md @@ -95,6 +95,7 @@ mackerel-container-agentでは各コンテナオーケストレーションプ | MACKEREL_APIBASE | Mackerel APIのエンドポイントを指定します (デフォルト: `https://api.mackerelio.com/`)。 | | MACKEREL_ROLES | タスク、Podのサービス、ロールを設定できます。 | | MACKEREL_AGENT_CONFIG | エージェントの設定ファイルを設定できます。こちらの詳細は後述します。 | +| MACKEREL_AGENT_CONFIG_POLLING_DURATION_MINUTES | エージェントの設定ファイルの変更を検知するために取得する間隔を分で指定します。| | MACKEREL_IGNORE_CONTAINER | 監視を除外するコンテナの名前を正規表現で設定します。 | | MACKEREL_HOST_STATUS_ON_START | 設定すると、エージェント起動時にホストのステータスを指定した値に変更します。 有効な値は "standby", "working", "maintenance", "poweroff" のいずれかです。| @@ -119,6 +120,8 @@ roles: ignoreContainer: '\Amackerel-container-agent\z' ``` +デフォルトでは、設定ファイルは起動時に一度だけ読み込まれます。 `MACKEREL_AGENT_CONFIG_POLLING_DURATION_MINUTES` を設定することで、定期的に設定ファイルを取得して、変更を検知して適用します。 + #### 設定項目 | 項目名 | 説明 | diff --git a/content/ja/docs/entry/howto/install-agent/amazon-linux.md b/content/ja/docs/entry/howto/install-agent/amazon-linux.md index b32a16e..7ca869c 100644 --- a/content/ja/docs/entry/howto/install-agent/amazon-linux.md +++ b/content/ja/docs/entry/howto/install-agent/amazon-linux.md @@ -7,7 +7,11 @@ EditURL: https://blog.hatena.ne.jp/mackerelio/mackerelio-docs-ja.hatenablog.mack このページに記載の内容は、[Mackerel Web画面・新規ホスト登録画面](https://mackerel.io/my/instruction-agent)からも利用可能です。 -

    Amazon Linux 2 LTS をご利用の場合

    +

    パッケージをインストールする

    +Amazon Linux と Amazon Linux 2 LTS でインストール時に実行するコマンドが異なります。それぞれの手順を確認して下さい。 +また、 `rpm` コマンドを用いて直接インストールすることもできます。 + +

    Amazon Linux 2 LTS をご利用の場合

    以下のコマンドを実行してください: @@ -17,7 +21,7 @@ curl -fsSL https://mackerel.io/file/script/amznlinux/setup-all-yum-v2.sh | MACKE APIキーは[オーガニゼーションページ内・APIキータブ](https://mackerel.io/my?tab=apikeys)から確認できます。このAPIキーでオーガニゼーションを識別しますので、APIキーは外部に漏らさないようご注意ください。 -

    Amazon Linux をご利用の場合

    +

    Amazon Linux をご利用の場合

    以下のコマンドを実行してください: @@ -27,17 +31,37 @@ curl -fsSL https://mackerel.io/file/script/amznlinux/setup-all-yum.sh | MACKEREL APIキーは[オーガニゼーションページ内・APIキータブ](https://mackerel.io/my?tab=apikeys)から確認できます。このAPIキーでオーガニゼーションを識別しますので、APIキーは外部に漏らさないようご注意ください。 -

    設定ファイルを編集

    +

    Amazon Linux 2 LTS で rpmコマンドを使用する

    -`/etc/mackerel-agent/mackerel-agent.conf` ファイルを編集して、APIキーを設定してください。 +`rpm`コマンドでインストールするには、以下のコマンドを実行します: ``` -apikey = "" +sudo rpm -ivh https://mackerel.io/file/agent/rpm/mackerel-agent-latest.amzn2.x86_64.rpm ``` -APIキーは[オーガニゼーションページ内・APIキータブ](https://mackerel.io/my?tab=apikeys)から確認できます。このAPIキーでオーガニゼーションを識別しますので、APIキーは外部に漏らさないようご注意ください。 +アップデートの際は、以下のコマンドを実行してください: -詳細は[mackerel-agent仕様](https://mackerel.io/ja/docs/entry/spec/agent)をご覧ください。 +``` +sudo rpm -Uvh https://mackerel.io/file/agent/rpm/mackerel-agent-latest.amzn2.x86_64.rpm +``` + +

    Amazon Linux で rpmコマンドを使用する

    + +`rpm`コマンドでインストールするには、以下のコマンドを実行します: + +``` +sudo rpm -ivh https://mackerel.io/file/agent/rpm/mackerel-agent-latest.x86_64.rpm +``` + +アップデートの際は、以下のコマンドを実行してください: + +``` +sudo rpm -Uvh https://mackerel.io/file/agent/rpm/mackerel-agent-latest.x86_64.rpm +``` + +

    設定ファイルを編集

    + +`/etc/mackerel-agent/mackerel-agent.conf` ファイルを編集してエージェントを設定します。 設定ファイルを利用して、以下のことを実現できます: @@ -45,6 +69,16 @@ APIキーは[オーガニゼーションページ内・APIキータブ](https:// - [カスタムメトリックを投稿する](https://mackerel.io/ja/docs/entry/advanced/custom-metrics) - [チェック監視項目を追加する](https://mackerel.io/ja/docs/entry/custom-checks) +詳細は[mackerel-agent仕様](https://mackerel.io/ja/docs/entry/spec/agent)をご覧ください。 + +また、 rpm からインストールした場合はAPIキーの設定も行って下さい。セットアップスクリプトによりセットアップした場合は自動的に設定されています。 + +``` +apikey = "" +``` + +APIキーは[オーガニゼーションページ内・APIキータブ](https://mackerel.io/my?tab=apikeys)から確認できます。このAPIキーでオーガニゼーションを識別しますので、APIキーは外部に漏らさないようご注意ください。 +

    エージェントを起動する

    Amazon Linux 2 LTS をご利用の場合

    diff --git a/content/ja/docs/entry/integrations/aws.md b/content/ja/docs/entry/integrations/aws.md index cf391bc..842baae 100644 --- a/content/ja/docs/entry/integrations/aws.md +++ b/content/ja/docs/entry/integrations/aws.md @@ -15,7 +15,7 @@ AWSインテグレーションは現在は以下のAWSクラウド製品に対 [EC2](https://mackerel.io/ja/docs/entry/integrations/aws/ec2)・[ELB (CLB)](https://mackerel.io/ja/docs/entry/integrations/aws/elb)・[ALB](https://mackerel.io/ja/docs/entry/integrations/aws/alb)・[NLB](https://mackerel.io/ja/docs/entry/integrations/aws/nlb)・[RDS](https://mackerel.io/ja/docs/entry/integrations/aws/rds)・[ElastiCache](https://mackerel.io/ja/docs/entry/integrations/aws/elasticache)・[Redshift](https://mackerel.io/ja/docs/entry/integrations/aws/redshift)・[Lambda](https://mackerel.io/ja/docs/entry/integrations/aws/lambda)・[SQS](https://mackerel.io/ja/docs/entry/integrations/aws/sqs)・[DynamoDB](https://mackerel.io/ja/docs/entry/integrations/aws/dynamodb)・[CloudFront](https://mackerel.io/ja/docs/entry/integrations/aws/cloudfront) ・[API Gateway](https://mackerel.io/ja/docs/entry/integrations/aws/apigateway) -・[Kinesis](https://mackerel.io/ja/docs/entry/integrations/aws/kinesis)・[S3](https://mackerel.io/ja/docs/entry/integrations/aws/s3)・[ES](https://mackerel.io/ja/docs/entry/integrations/aws/es)・[ECS](https://mackerel.io/ja/docs/entry/integrations/aws/ecs)・[SES](https://mackerel.io/ja/docs/entry/integrations/aws/ses)・[Step Functions](https://mackerel.io/ja/docs/entry/integrations/aws/states)・[EFS](https://mackerel.io/ja/docs/entry/integrations/aws/efs) +・[Kinesis](https://mackerel.io/ja/docs/entry/integrations/aws/kinesis)・[S3](https://mackerel.io/ja/docs/entry/integrations/aws/s3)・[ES](https://mackerel.io/ja/docs/entry/integrations/aws/es)・[ECS](https://mackerel.io/ja/docs/entry/integrations/aws/ecs)・[SES](https://mackerel.io/ja/docs/entry/integrations/aws/ses)・[Step Functions](https://mackerel.io/ja/docs/entry/integrations/aws/states)・[EFS](https://mackerel.io/ja/docs/entry/integrations/aws/efs)・[Kinesis Data Firehose](https://mackerel.io/ja/docs/entry/integrations/aws/firehose)

    連携方法

    AWSインテグレーションの連携方法には2つの方法があります。 @@ -54,7 +54,8 @@ FullAccess権限を付与しないようにご注意ください。また、ひ - `AmazonSESReadOnlyAccess / ses:Describe*` - `AWSStepFunctionsReadOnlyAccess` - `AmazonElasticFileSystemReadOnlyAccess` -- `CloudWatchReadOnlyAccess`(CloudFrontのみ、API Gatewayのみ、Kinesisのみ、S3のみ、ESのみ、ECSのみ、SESのみ、Step Functionsのみ、またはEFSのみを設定する場合) +- `AmazonKinesisFirehoseReadOnlyAccess` +- `CloudWatchReadOnlyAccess`(CloudFrontのみ、API Gatewayのみ、Kinesisのみ、S3のみ、ESのみ、ECSのみ、SESのみ、Step Functionsのみ、EFSのみ、またはFirehoseのみを設定する場合) また、AWSインテグレーションでは後述するようにタグによって絞り込みを行うことが出来ますが、ElastiCacheやSQSでタグによる絞り込みを行う場合は追加のポリシーを付与する必要があります。 詳しくはタグで絞り込む の項目を参照してください。 @@ -107,7 +108,8 @@ FullAccess権限を付与しないようにご注意ください。また、ひ - `AmazonSESReadOnlyAccess / ses:Describe*` - `AWSStepFunctionsReadOnlyAccess` - `AmazonElasticFileSystemReadOnlyAccess` -- `CloudWatchReadOnlyAccess`(CloudFrontのみ、API Gatewayのみ、Kinesisのみ、S3のみ、ESのみ、ECSのみ、SESのみ、Step Functionsのみ、またはEFSのみを設定する場合) +- `AmazonKinesisFirehoseReadOnlyAccess` +- `CloudWatchReadOnlyAccess`(CloudFrontのみ、API Gatewayのみ、Kinesisのみ、S3のみ、ESのみ、ECSのみ、SESのみ、Step Functionsのみ、EFSのみ、またはFirehoseのみを設定する場合) また、AWSインテグレーションでは後述するようにタグによって絞り込みを行うことが出来ますが、ElastiCacheやSQSでタグによる絞り込みを行う場合は追加のポリシーを付与する必要があります。 詳しくはタグで絞り込む の項目を参照してください。 @@ -161,11 +163,16 @@ Mackerelの設定画面でタグを指定します。連携ホスト数を確認 仮に退役作業をしない場合でも、ホスト情報が残り続けるだけで、メトリック投稿のないホストは課金対象にはなりません。 -

    プラグインにより取得したカスタムメトリックの連携ホストへの集約に関して

    +

    プラグインによる監視内容の連携ホストへの集約に関して

    -mackerel-agent の plugin 設定には、`custom_identifier` を指定できます。`custom_identifier` とは、ホストの識別子としてユーザー独自の identifier を付与するための仕組みです。これを利用して、別のマシンにインストールした mackerel-agent から投稿されたメトリックを、AWSインテグレーション連携ホストのメトリックとして集約できます。`custom_identifier` は、カスタムメトリックを投稿するためのプラグイン設定に指定します。 +mackerel-agent のカスタムメトリックとチェック監視の plugin 設定には、`custom_identifier` を指定できます。`custom_identifier` とは、ホストの識別子としてユーザー独自の identifier を付与するための仕組みです。これを利用して、別のマシンにインストールした mackerel-agent から投稿されたメトリックやチェック監視を、AWSインテグレーション連携ホストの物として集約できます。`custom_identifier` は、対応するプラグイン設定に指定します。 -例として、Amazon RDS と [mackerel-plugin-mysql](https://github.com/mackerelio/mackerel-agent-plugins/tree/master/mackerel-plugin-mysql) プラグインを利用している場合、mackerel-agent.conf のプラグイン設定に、以下のように `custom_identifier` の記述を追加することで、プラグインで取得したメトリックをRDSホストのカスタムメトリックとして集約できます。 +例えば Amazon RDS の場合はそのエンドポイントが、ELB の場合は DNS Name が、それぞれ `custom_identifier` 文字列となります。 + +#### 利用例 +以下にふたつの利用例を紹介します。いずれの場合も、mackerel-agent の設定ファイルへの追記後はエージェントの再起動が必要です。 + +ひとつめの例は、Amazon RDS に対する [mackerel-plugin-mysql](https://github.com/mackerelio/mackerel-agent-plugins/tree/master/mackerel-plugin-mysql) プラグインを用いた MySQL 監視です。mackerel-agent.conf の設定に以下のように `custom_identifier` を含むプラグイン設定を追加することで、プラグインで取得したメトリックをRDSホストのカスタムメトリックとして集約できます。 ``` [plugin.metrics.mysql] @@ -173,6 +180,11 @@ command = ["mackerel-plugin-mysql", "-host", "RDSのエンドポイント", "-us custom_identifier = "RDSのエンドポイント" ``` -Amazon RDS の場合はそのエンドポイントが、ELB の場合は DNS Name が、それぞれ `custom_identifier` 文字列となります。 +ふたつめの例は、 Amazon Elasticsearch Service と [check-elasticsearch](https://github.com/mackerelio/go-check-plugins/tree/master/check-elasticsearch) プラグインを用いた Elasticsearch 監視です。mackerel-agent.conf の設定に以下のように `custom_identifier` を含むプラグイン設定を追加することで、 Elasticsearch Service クラスターのヘルスチェックを Elasticsearch Service ホストのチェック監視として集約できます。 + +``` +[plugin.checks.elasticsearch] +command = ["check-elasticsearch", "-s", "https", "-H", "Elasticsearch Service のエンドポイント", "-p", "443"] +custom_identifier = "Elasticsearch Service の ARN" +``` -conf ファイルへの追記後は、エージェントの再起動が必要です。 diff --git a/content/ja/docs/entry/integrations/aws/firehose.md b/content/ja/docs/entry/integrations/aws/firehose.md new file mode 100644 index 0000000..0bcbc0a --- /dev/null +++ b/content/ja/docs/entry/integrations/aws/firehose.md @@ -0,0 +1,42 @@ +--- +Title: AWSインテグレーション - Kinesis Data Firehose +Date: 2019-07-16T10:00:00+09:00 +URL: https://mackerel.io/ja/docs/entry/integrations/aws/firehose +EditURL: https://blog.hatena.ne.jp/mackerelio/mackerelio-docs-ja.hatenablog.mackerel.io/atom/entry/26006613376610645 +CustomPath: integrations/aws/firehose +--- + +MackerelはAWSインテグレーションにてAmazon Kinesis Data Firehoseのメトリック取得や監視に対応しています。AWSインテグレーションで連携を行なった場合、課金対象として1デリバリーストリーム = 1マイクロホストと換算します。またそれに加えて、取得されるメトリックの数に応じて、1マイクロホストあたりのメトリック数上限の超過による請求が行われる場合があります。 + +AWSインテグレーションの設定方法や対応AWSサービス一覧についてはこちらのページをご確認ください。
    +AWSインテグレーション + +## 取得メトリック +AWSインテグレーションのKinesis Data Firehose対応で取得できるメトリックは以下の通りです。`メトリック`の説明に関してはAWSのヘルプをご確認ください。 + +送信先に指定するサービス毎に、最大で以下のメトリックが取得されます。 + +- S3の場合:33個 +- Elasticsearch Serviceの場合:29個 +- Redshiftの場合:33個 +- Splunkの場合:30個 + +|グラフ名|メトリック|Mackerel上のメトリック名|単位|Statistics| +|:--|:--|:--|:--|:--| +|Service Bytes|BackupToS3.Bytes
    DeliveryToElasticsearch.Bytes
    DeliveryToRedshift.Bytes
    DeliveryToS3.Bytes
    DeliveryToSplunk.Bytes
    IncomingBytes
    DataReadFromKinesisStream.Bytes|firehose.service_bytes.backup_to_s3
    firehose.service_bytes.delivery_to_elasticsearch
    firehose.service_bytes.delivery_to_redshift
    firehose.service_bytes.delivery_to_s3
    firehose.service_bytes.delivery_to_splunk
    firehose.service_bytes.incoming
    firehose.service_bytes.read_from_kinesis_streams|bytes|Sum| +|Service Records|BackupToS3.Records
    DeliveryToElasticsearch.Records
    DeliveryToRedshift.Records
    DeliveryToS3.Records
    DeliveryToSplunk.Records
    IncomingRecords
    DataReadFromKinesisStream.Records|firehose.service_records.backup_to_s3
    firehose.service_records.delivery_to_elasticsearch
    firehose.service_records.delivery_to_redshift
    firehose.service_records.delivery_to_s3
    firehose.service_records.delivery_to_splunk
    firehose.service_records.incoming
    firehose.service_records.read_from_kinesis_streams|integer|Sum| +|Service Success|BackupToS3.Success
    DeliveryToElasticsearch.Success
    DeliveryToRedshift.Success
    DeliveryToS3.Success
    DeliveryToSplunk.Success|firehose.service_success.backup_to_s3
    firehose.service_success.delivery_to_elasticsearch
    firehose.service_success.delivery_to_redshift
    firehose.service_success.delivery_to_s3
    firehose.service_success.delivery_to_splunk|integer|Average| +|Service Data Freshness|BackupToS3.DataFreshness
    DeliveryToElasticsearch.DataFreshness
    DeliveryToRedshift.DataFreshness
    DeliveryToS3.DataFreshness
    DeliveryToSplunk.DataFreshness|firehose.service_data_freshness.backup_to_s3
    firehose.service_data_freshness.delivery_to_elasticsearch
    firehose.service_data_freshness.delivery_to_redshift
    firehose.service_data_freshness.delivery_to_s3
    firehose.service_data_freshness.delivery_to_splunk|float|Maximum| +|Service Data Ack Latency|DeliveryToSplunk.DataAckLatency|firehose.service_data_ack_latency.delivery_to_splunk|float|Maximum| +|Millis Behind Latest|KinesisMillisBehindLatest|firehose.millis_behind_latest.kinesis|integer|Sum| +|API Latency|DescribeDeliveryStream.Latency
    PutRecord.Latency
    PutRecordBatch.Latency
    UpdateDeliveryStream.Latency|firehose.api_latency.describe_delivery_stream_average
    firehose.api_latency.describe_delivery_stream_maximum
    firehose.api_latency.put_record_average
    firehose.api_latency.put_record_maximum
    firehose.api_latency.put_record_batch_average
    firehose.api_latency.put_record_batch_maximum
    firehose.api_latency.update_delivery_stream_average
    firehose.api_latency.update_delivery_stream_maximum|float|Average
    Maximum| +|API Bytes|PutRecord.Bytes
    PutRecordBatch.Bytes|firehose.api_bytes.put_record
    firehose.api_bytes.put_record_batch|bytes|Sum| +|API Requests|DescribeDeliveryStream.Requests
    PutRecord.Requests
    PutRecordBatch.Requests
    UpdateDeliveryStream.Requests|firehose.api_requests.describe_delivery_stream
    firehose.api_requests.put_record
    firehose.api_requests.put_record_batch
    firehose.api_requests.update_delivery_stream|integer|Sum| +|API Records|PutRecordBatch.Records|firehose.api_records.put_record_batch|integer|Sum| +|API Throttled|ThrottledDescribeStream
    ThrottledGetRecords
    ThrottledGetShardIterator|firehose.api_throttled.describe_stream
    firehose.api_throttled.get_records
    firehose.api_throttled.get_shard_iterator|integer|Sum| +|Data Transformation Duration|ExecuteProcessing.Duration|firehose.data_transformation_duration|float|Average| +|Data Transformation Success|ExecuteProcessing.Success|firehose.data_transformation_success.execute|integer|Average| +|Data Transformation Records|SucceedProcessing.Records|firehose.data_transformation_records.succeed|integer|Sum| +|Data Transformation Bytes|SucceedProcessing.Bytes|firehose.data_transformation_bytes.succeed|bytes|Sum| +|Format Conversion Records|SucceedConversion.Records
    FailedConversion.Records|firehose.format_conversion_records.succeed
    firehose.format_conversion_recordsfailed|integer|Sum| +|Format Conversion Bytes|SucceedConversion.Bytes
    FailedConversion.Bytes|firehose.format_conversion_bytes.succeed
    firehose.format_conversion_bytes.failed|bytes|Sum|