From f98c2660df113edc3f3acd109a329689634aa1ed Mon Sep 17 00:00:00 2001 From: rabunkosar-dd Date: Sat, 18 Nov 2023 22:18:28 -0800 Subject: [PATCH 01/10] add AWS support for some services --- cmd/cloudprober.go | 5 +- go.mod | 14 +- go.sum | 23 +- internal/rds/aws/aws.go | 148 +++++ internal/rds/aws/aws_test.go | 100 ++++ internal/rds/aws/ec2.go | 198 +++++++ internal/rds/aws/elasticache.go | 251 +++++++++ internal/rds/aws/proto/config.pb.go | 529 ++++++++++++++++++ internal/rds/aws/proto/config.proto | 75 +++ internal/rds/aws/proto/config_proto_gen.cue | 53 ++ internal/rds/aws/rds.go | 202 +++++++ internal/rds/aws/testdata/targets.json | 31 + internal/rds/aws/testdata/targets1.textpb | 23 + internal/rds/aws/testdata/targets2.textpb | 11 + internal/rds/server/proto/config.pb.go | 110 ++-- internal/rds/server/proto/config.proto | 3 + .../rds/server/proto/config_proto_gen.cue | 3 + internal/rds/server/server.go | 9 + 18 files changed, 1733 insertions(+), 55 deletions(-) create mode 100644 internal/rds/aws/aws.go create mode 100644 internal/rds/aws/aws_test.go create mode 100644 internal/rds/aws/ec2.go create mode 100644 internal/rds/aws/elasticache.go create mode 100644 internal/rds/aws/proto/config.pb.go create mode 100644 internal/rds/aws/proto/config.proto create mode 100644 internal/rds/aws/proto/config_proto_gen.cue create mode 100644 internal/rds/aws/rds.go create mode 100644 internal/rds/aws/testdata/targets.json create mode 100644 internal/rds/aws/testdata/targets1.textpb create mode 100644 internal/rds/aws/testdata/targets2.textpb diff --git a/cmd/cloudprober.go b/cmd/cloudprober.go index eec8685fc28..ffbd3e58ada 100644 --- a/cmd/cloudprober.go +++ b/cmd/cloudprober.go @@ -22,6 +22,7 @@ package main import ( "context" + "flag" "fmt" "log/slog" "os" @@ -31,8 +32,6 @@ import ( "syscall" "time" - "flag" - "github.com/cloudprober/cloudprober" "github.com/cloudprober/cloudprober/config" "github.com/cloudprober/cloudprober/config/runconfig" @@ -41,7 +40,7 @@ import ( ) var ( - versionFlag = flag.Bool("version", false, "Print version and exit") + versionFlag = flag.Bool(" version", false, "Print version and exit") buildInfoFlag = flag.Bool("buildinfo", false, "Print build info and exit") stopTime = flag.Duration("stop_time", 0, "How long to wait for cleanup before process exits on SIGINT and SIGTERM") cpuprofile = flag.String("cpuprof", "", "Write cpu profile to file") diff --git a/go.mod b/go.mod index ed9e271d49b..743c9707abb 100644 --- a/go.mod +++ b/go.mod @@ -8,10 +8,13 @@ require ( cloud.google.com/go/logging v1.8.1 cloud.google.com/go/pubsub v1.33.0 github.com/Masterminds/sprig/v3 v3.2.3 - github.com/aws/aws-sdk-go-v2 v1.16.10 + github.com/aws/aws-sdk-go-v2 v1.23.0 github.com/aws/aws-sdk-go-v2/config v1.15.9 github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.11 github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.18.3 + github.com/aws/aws-sdk-go-v2/service/ec2 v1.136.0 + github.com/aws/aws-sdk-go-v2/service/elasticache v1.32.2 + github.com/aws/aws-sdk-go-v2/service/rds v1.63.0 github.com/fullstorydev/grpcurl v1.8.7 github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 github.com/google/uuid v1.3.1 @@ -41,13 +44,14 @@ require ( github.com/apache/arrow/go/v12 v12.0.0 // indirect github.com/apache/thrift v0.16.0 // indirect github.com/aws/aws-sdk-go-v2/credentials v1.12.12 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.17 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.11 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.3 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.3 // indirect github.com/aws/aws-sdk-go-v2/internal/ini v1.3.18 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.11 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.1 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.3 // indirect github.com/aws/aws-sdk-go-v2/service/sso v1.11.15 // indirect github.com/aws/aws-sdk-go-v2/service/sts v1.16.12 // indirect - github.com/aws/smithy-go v1.12.1 // indirect + github.com/aws/smithy-go v1.17.0 // indirect github.com/bufbuild/protocompile v0.4.0 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 // indirect diff --git a/go.sum b/go.sum index 91a8b875952..3884a1274e7 100644 --- a/go.sum +++ b/go.sum @@ -61,8 +61,9 @@ github.com/apache/arrow/go/v12 v12.0.0/go.mod h1:d+tV/eHZZ7Dz7RPrFKtPK02tpr+c9/P github.com/apache/thrift v0.16.0 h1:qEy6UW60iVOlUy+b9ZR0d5WzUWYGOo4HfopoyBaNmoY= github.com/apache/thrift v0.16.0/go.mod h1:PHK3hniurgQaNMZYaCLEqXKsYK8upmhPbmdP2FXSqgU= github.com/aws/aws-sdk-go-v2 v1.16.4/go.mod h1:ytwTPBG6fXTZLxxeeCCWj2/EMYp/xDUgX+OET6TLNNU= -github.com/aws/aws-sdk-go-v2 v1.16.10 h1:+yDD0tcuHRQZgqONkpDwzepqmElQaSlFPymHRHR9mrc= github.com/aws/aws-sdk-go-v2 v1.16.10/go.mod h1:WTACcleLz6VZTp7fak4EO5b9Q4foxbn+8PIz3PmyKlo= +github.com/aws/aws-sdk-go-v2 v1.23.0 h1:PiHAzmiQQr6JULBUdvR8fKlA+UPKLT/8KbiqpFBWiAo= +github.com/aws/aws-sdk-go-v2 v1.23.0/go.mod h1:i1XDttT4rnf6vxc9AuskLc6s7XBee8rlLilKlc03uAA= github.com/aws/aws-sdk-go-v2/config v1.15.9 h1:TK5yNEnFDQ9iaO04gJS/3Y+eW8BioQiCUafW75/Wc3Q= github.com/aws/aws-sdk-go-v2/config v1.15.9/go.mod h1:rv/l/TbZo67kp99v/3Kb0qV6Fm1KEtKyruEV2GvVfgs= github.com/aws/aws-sdk-go-v2/credentials v1.12.4/go.mod h1:7g+GGSp7xtR823o1jedxKmqRZGqLdoHQfI4eFasKKxs= @@ -72,19 +73,30 @@ github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.5/go.mod h1:WAPnuhG5IQ/i6DET github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.11 h1:zZHPdM2x09/0F8D7XyVvQnP2/jaW7bEMmtcSCPYq/iI= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.11/go.mod h1:38Asv/UyQbDNpSXCurZRlDMjzIl6J+wUe8vY3TtUuzA= github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.11/go.mod h1:tmUB6jakq5DFNcXsXOA/ZQ7/C8VnSKYkx58OI7Fh79g= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.17 h1:U8DZvyFFesBmK62dYC6BRXm4Cd/wPP3aPcecu3xv/F4= github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.17/go.mod h1:6qtGip7sJEyvgsLjphRZWF9qPe3xJf1mL/MM01E35Wc= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.3 h1:DUwbD79T8gyQ23qVXFUthjzVMTviSHi3y4z58KvghhM= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.3/go.mod h1:7sGSz1JCKHWWBHq98m6sMtWQikmYPpxjqOydDemiVoM= github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.5/go.mod h1:fV1AaS2gFc1tM0RCb015FJ0pvWVUfJZANzjwoO4YakM= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.11 h1:GMp98usVW5tzQhxd26KWhoNQPlR2noIlfbzqjVGBhLU= github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.11/go.mod h1:cYAfnB+9ZkmZWpQWmPDsuIGm4EA+6k2ZVtxKjw/XJBY= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.3 h1:AplLJCtIaUZDCbr6+gLYdsYNxne4iuaboJhVt9d+WXI= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.3/go.mod h1:ify42Rb7nKeDDPkFjKn7q1bPscVPu/+gmHH8d2c+anU= github.com/aws/aws-sdk-go-v2/internal/ini v1.3.12/go.mod h1:00c7+ALdPh4YeEUPXJzyU0Yy01nPGOq2+9rUaz05z9g= github.com/aws/aws-sdk-go-v2/internal/ini v1.3.18 h1:/spg6h3tG4pefphbvhpgdMtFMegSajPPSEJd1t8lnpc= github.com/aws/aws-sdk-go-v2/internal/ini v1.3.18/go.mod h1:hTHq8hL4bAxJyng364s9d4IUGXZOs7Y5LSqAhIiIQ2A= github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.18.3 h1:PK6c4wYv3wbb88eH0X0FjJwRykEoJwAesuslNReY7iE= github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.18.3/go.mod h1:BrAJyOMrnwzYVQcP5ziqlCpnEuFfkNppZLzqDyW/YTg= +github.com/aws/aws-sdk-go-v2/service/ec2 v1.136.0 h1:nZPVFkGojUUJupKJzaCKE07LaFDO3Tto1U69F8JipsI= +github.com/aws/aws-sdk-go-v2/service/ec2 v1.136.0/go.mod h1:xYJZQIo/YZxEbeBxUYRQJTCJ924EuKtDfrhVx76yzOE= +github.com/aws/aws-sdk-go-v2/service/elasticache v1.32.2 h1:3V4GHH05PtNsDjXlKXVbIw0vSjSNUQoPQEtILns4BMs= +github.com/aws/aws-sdk-go-v2/service/elasticache v1.32.2/go.mod h1:p5/Vq+5NiyqnyQyH5JVK3lrHDygbiQiqdCNaDT018I8= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.1 h1:rpkF4n0CyFcrJUG/rNNohoTmhtWlFTRI4BsZOh9PvLs= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.1/go.mod h1:l9ymW25HOqymeU2m1gbUQ3rUIsTwKs8gYHXkqDQUhiI= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.5/go.mod h1:ZbkttHXaVn3bBo/wpJbQGiiIWR90eTBUVBrEHUEQlho= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.11 h1:GkYtp4gi4wdWUV+pPetjk5y2aDxbr0t8n5OjVBwZdII= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.11/go.mod h1:OEofCUKF7Hri4ShOCokF6k6hGq9PCB2sywt/9rLSXjY= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.3 h1:kJOolE8xBAD13xTCgOakByZkyP4D/owNmvEiioeUNAg= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.3/go.mod h1:Owv1I59vaghv1Ax8zz8ELY8DN7/Y0rGS+WWAmjgi950= +github.com/aws/aws-sdk-go-v2/service/rds v1.63.0 h1:Mb6mn6mRfTpI2WEZkLmmJ7jtiPHKzwJ8otzUA5rIq+o= +github.com/aws/aws-sdk-go-v2/service/rds v1.63.0/go.mod h1:C73+OVOdaMZNc/Z8PhnLoArk6mCqxkiqfgmCrvNrDDM= github.com/aws/aws-sdk-go-v2/service/sso v1.11.7/go.mod h1:TFVe6Rr2joVLsYQ1ABACXgOC6lXip/qpX2x5jWg/A9w= github.com/aws/aws-sdk-go-v2/service/sso v1.11.15 h1:HaIE5/TtKr66qZTJpvMifDxH4lRt2JZawbkLYOo1F+Y= github.com/aws/aws-sdk-go-v2/service/sso v1.11.15/go.mod h1:dDVD4ElJRTQXx7dOQ59EkqGyNU9tnwy1RKln+oLIOTU= @@ -92,8 +104,9 @@ github.com/aws/aws-sdk-go-v2/service/sts v1.16.6/go.mod h1:rP1rEOKAGZoXp4iGDxSXF github.com/aws/aws-sdk-go-v2/service/sts v1.16.12 h1:YU9UHPukkCCnETHEExOptF/BxPvGJKXO/NBx+RMQ/2A= github.com/aws/aws-sdk-go-v2/service/sts v1.16.12/go.mod h1:b53qpmhHk7mTL2J/tfG6f38neZiyBQSiNXGCuNKq4+4= github.com/aws/smithy-go v1.11.2/go.mod h1:3xHYmszWVx2c0kIwQeEVf9uSm4fYZt67FBJnwub1bgM= -github.com/aws/smithy-go v1.12.1 h1:yQRC55aXN/y1W10HgwHle01DRuV9Dpf31iGkotjt3Ag= github.com/aws/smithy-go v1.12.1/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA= +github.com/aws/smithy-go v1.17.0 h1:wWJD7LX6PBV6etBUwO0zElG0nWN9rUhp0WdYeHSHAaI= +github.com/aws/smithy-go v1.17.0/go.mod h1:NukqUGpCZIILqqiV0NIjeFh24kd/FAa4beRb6nbIUPE= github.com/bufbuild/protocompile v0.4.0 h1:LbFKd2XowZvQ/kajzguUp2DC9UEIQhIq77fZZlaQsNA= github.com/bufbuild/protocompile v0.4.0/go.mod h1:3v93+mbWn/v3xzN+31nwkJfrEpAUwp+BagBSZWx+TP8= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= diff --git a/internal/rds/aws/aws.go b/internal/rds/aws/aws.go new file mode 100644 index 00000000000..84d70855812 --- /dev/null +++ b/internal/rds/aws/aws.go @@ -0,0 +1,148 @@ +// Copyright 2017-2023 The Cloudprober Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package aws implements a AWS (Amazon Web Services) resources provider for +ResourceDiscovery server. + +See ResourceTypes variable for the list of supported resource types. + +AWS provider is configured through a protobuf based config file +(proto/config.proto). Example config: + + { + aws_instances {} + } +*/ + +package aws + +import ( + "fmt" + + configpb "github.com/cloudprober/cloudprober/internal/rds/aws/proto" + pb "github.com/cloudprober/cloudprober/internal/rds/proto" + serverconfigpb "github.com/cloudprober/cloudprober/internal/rds/server/proto" + "github.com/cloudprober/cloudprober/logger" + + "google.golang.org/protobuf/proto" +) + +// DefaultProviderID is the povider id to use for this provider if a provider +// id is not configured explicitly. +const DefaultProviderID = "aws" + +// ResourceTypes declares resource types supported by the AWS provider. +var ResourceTypes = struct { + EC2Instances, ElastiCaches, RDS string +}{ + "ec2_instances", + "elasticaches", + "rds", +} + +type lister interface { + listResources(req *pb.ListResourcesRequest) ([]*pb.Resource, error) +} + +// Provider implements a AWS provider for a ResourceDiscovery server. +type Provider struct { + listers map[string]lister +} + +// ListResources returns the list of resources based on the given request. +func (p *Provider) ListResources(req *pb.ListResourcesRequest) (*pb.ListResourcesResponse, error) { + lr := p.listers[req.GetResourcePath()] + if lr == nil { + return nil, fmt.Errorf("unknown resource type: %s", req.GetResourcePath()) + } + + resources, err := lr.listResources(req) + return &pb.ListResourcesResponse{Resources: resources}, err +} + +func initAWSProject(c *configpb.ProviderConfig, l *logger.Logger) (map[string]lister, error) { + resourceLister := make(map[string]lister) + + // Enable EC2 instances lister if configured. + if c.GetEc2Instances() != nil { + lr, err := newEC2InstancesLister(c.GetEc2Instances(), c.GetRegion(), l) + if err != nil { + return nil, err + } + resourceLister[ResourceTypes.EC2Instances] = lr + } + + // Enable ElastiCaches lister if configured. + if c.GetElasticaches() != nil { + lr, err := newElastiCacheLister(c.GetElasticaches(), c.GetRegion(), l) + if err != nil { + return nil, err + } + resourceLister[ResourceTypes.ElastiCaches] = lr + } + + // Enable RDS (AWS) if configured. + if c.GetRds() != nil { + lr, err := newRdsLister(c.GetRds(), c.GetRegion(), l) + if err != nil { + return nil, err + } + resourceLister[ResourceTypes.RDS] = lr + } + + return resourceLister, nil +} + +// New creates a AWS provider for RDS server, based on the provided config. +func New(c *configpb.ProviderConfig, l *logger.Logger) (*Provider, error) { + resourceLister, err := initAWSProject(c, l) + if err != nil { + return nil, err + } + + return &Provider{ + listers: resourceLister, + }, nil +} + +// DefaultProviderConfig is a convenience function that builds and returns a +// basic AWS provider config based on the given parameters. +func DefaultProviderConfig(resTypes map[string]string, reEvalSec int) *serverconfigpb.Provider { + c := &configpb.ProviderConfig{} + + for k := range resTypes { + switch k { + case ResourceTypes.EC2Instances: + c.Ec2Instances = &configpb.EC2Instances{ + ReEvalSec: proto.Int32(int32(reEvalSec)), + } + + case ResourceTypes.ElastiCaches: + c.Elasticaches = &configpb.ElastiCaches{ + ReEvalSec: proto.Int32(int32(reEvalSec)), + } + + case ResourceTypes.RDS: + c.Rds = &configpb.RDS{ + ReEvalSec: proto.Int32(int32(reEvalSec)), + } + } + } + + return &serverconfigpb.Provider{ + Id: proto.String(DefaultProviderID), + Config: &serverconfigpb.Provider_AwsConfig{AwsConfig: c}, + } +} diff --git a/internal/rds/aws/aws_test.go b/internal/rds/aws/aws_test.go new file mode 100644 index 00000000000..b6647b38e9f --- /dev/null +++ b/internal/rds/aws/aws_test.go @@ -0,0 +1,100 @@ +// Copyright 2019 The Cloudprober Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package aws + +import ( + "testing" + + serverconfigpb "github.com/cloudprober/cloudprober/internal/rds/server/proto" +) + +func testAWSConfig(t *testing.T, pc *serverconfigpb.Provider, awsInstances bool, rdsConfig, elasticCachesConfig string, reEvalSec int) { + t.Helper() + + if pc.GetId() != DefaultProviderID { + t.Errorf("pc.GetId()=%s, wanted=%s", pc.GetId(), DefaultProviderID) + } + c := pc.GetAwsConfig() + + if !awsInstances { + if c.GetEc2Instances() != nil { + t.Errorf("c.GetEc2Instances()=%v, wanted=nil", c.GetEc2Instances()) + } + } else { + if c.GetEc2Instances() == nil { + t.Fatal("c.GetGceInstances() is nil, wanted=not-nil") + } + if c.GetEc2Instances().GetReEvalSec() != int32(reEvalSec) { + t.Errorf("AWS instance reEvalSec=%d, wanted=%d", c.GetEc2Instances().GetReEvalSec(), reEvalSec) + } + } + + // Verify that RDS config is set correctly. + if rdsConfig == "" { + if c.GetRds() != nil { + t.Errorf("c.GetRds()=%v, wanted=nil", c.GetRds()) + } + } else { + if c.GetRds() == nil { + t.Fatalf("c.GetRds()=nil, wanted=not-nil") + } + if c.GetRds().GetReEvalSec() != int32(reEvalSec) { + t.Errorf("RDS config reEvalSec=%d, wanted=%d", c.GetRds().GetReEvalSec(), reEvalSec) + } + } + + // Verify that Elasticache is set correctly. + if elasticCachesConfig == "" { + if c.GetElasticaches() != nil { + t.Errorf("c.GetElasticaches()=%v, wanted=nil", c.GetElasticaches()) + } + } else { + if c.GetElasticaches() == nil { + t.Fatalf("c.GetElasticaches()=nil, wanted=not-nil") + } + if c.GetElasticaches().GetReEvalSec() != int32(reEvalSec) { + t.Errorf("Elasticaches config reEvalSec=%d, wanted=%d", c.GetElasticaches().GetReEvalSec(), reEvalSec) + } + } +} + +func TestDefaultProviderConfig(t *testing.T) { + resTypes := map[string]string{ + ResourceTypes.EC2Instances: "", + } + + c := DefaultProviderConfig(resTypes, 10) + testAWSConfig(t, c, true, "", "", 10) + + // Elasticache and RDS + testElastiCacheConfig := "elasticaches" + testRDSConfig := "rds" + + resTypes = map[string]string{ + ResourceTypes.ElastiCaches: testElastiCacheConfig, + ResourceTypes.RDS: testRDSConfig, + } + c = DefaultProviderConfig(resTypes, 10) + testAWSConfig(t, c, false, testRDSConfig, testElastiCacheConfig, 10) + + // EC2 instances, RTC and pub-sub + resTypes = map[string]string{ + ResourceTypes.EC2Instances: "", + ResourceTypes.ElastiCaches: testElastiCacheConfig, + ResourceTypes.RDS: testRDSConfig, + } + c = DefaultProviderConfig(resTypes, 10) + testAWSConfig(t, c, true, testRDSConfig, testElastiCacheConfig, 10) +} diff --git a/internal/rds/aws/ec2.go b/internal/rds/aws/ec2.go new file mode 100644 index 00000000000..9ab381ba9b6 --- /dev/null +++ b/internal/rds/aws/ec2.go @@ -0,0 +1,198 @@ +// Copyright 2017-2023 The Cloudprober Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package aws + +import ( + "context" + "fmt" + "math/rand" + "sync" + "time" + + configpb "github.com/cloudprober/cloudprober/internal/rds/aws/proto" + pb "github.com/cloudprober/cloudprober/internal/rds/proto" + "github.com/cloudprober/cloudprober/internal/rds/server/filter" + "github.com/cloudprober/cloudprober/logger" + + "github.com/aws/aws-sdk-go-v2/config" + "github.com/aws/aws-sdk-go-v2/service/ec2" + "google.golang.org/protobuf/proto" +) + +// instanceInfo represents instance items that we fetch from the API. +type instanceInfo struct { + ID string + Tags map[string]string + IPAddr string +} + +// instanceData represents objects that we store in cache. +type instanceData struct { + ii *instanceInfo + lastUpdated int64 +} + +/* +AWSInstancesFilters defines filters supported by the ec2_instances resource +type. + + Example: + filter { + key: "name" + value: "cloudprober.*" + } + filter { + key: "labels.app" + value: "service-a" + } +*/ +var AWSInstancesFilters = struct { + RegexFilterKeys []string + LabelsFilter bool +}{ + []string{"name"}, + true, +} + +// ec2InstancesLister is a AWS EC2 instances lister. It implements a cache, +// that's populated at a regular interval by making the AWS API calls. +// Listing actually only returns the current contents of that cache. +type ec2InstancesLister struct { + c *configpb.EC2Instances + client *ec2.Client + l *logger.Logger + mu sync.RWMutex + names []string + cache map[string]*instanceData +} + +// listResources returns the list of resource records, where each record +// consists of an instance name and the IP address associated with it. IP address +// to return is selected based on the provided ipConfig. +func (il *ec2InstancesLister) listResources(req *pb.ListResourcesRequest) ([]*pb.Resource, error) { + var resources []*pb.Resource + + allFilters, err := filter.ParseFilters(req.GetFilter(), AWSInstancesFilters.RegexFilterKeys, "") + if err != nil { + return nil, err + } + + nameFilter, labelsFilter := allFilters.RegexFilters["name"], allFilters.LabelsFilter + + il.mu.RLock() + defer il.mu.RUnlock() + + for _, name := range il.names { + ins := il.cache[name].ii + if ins == nil { + il.l.Errorf("ec2_instances: cached info missing for %s", name) + continue + } + + if nameFilter != nil && !nameFilter.Match(name, il.l) { + continue + } + if labelsFilter != nil && !labelsFilter.Match(ins.Tags, il.l) { + continue + } + + resources = append(resources, &pb.Resource{ + Name: proto.String(name), + Ip: proto.String(ins.IPAddr), + Labels: ins.Tags, + LastUpdated: proto.Int64(il.cache[name].lastUpdated), + }) + } + + il.l.Infof("ec2_instances.listResources: returning %d instances", len(resources)) + return resources, nil +} + +// expand runs equivalent API calls as "aws describe-instances", +// and is used to populate the cache. +func (il *ec2InstancesLister) expand(reEvalInterval time.Duration) { + il.l.Infof("ec2_instances.expand: expanding AWS EC2 targets") + + result, err := il.client.DescribeInstances(context.TODO(), nil) + if err != nil { + il.l.Errorf("ec2_instances.expand: error while listing instances: %v", err) + return + } + + var ids = make([]string, 0) + var cache = make(map[string]*instanceData) + + ts := time.Now().Unix() + for _, r := range result.Reservations { + for _, i := range r.Instances { + + if i.PrivateIpAddress == nil { + continue + } + + ii := &instanceInfo{ + ID: *i.InstanceId, + IPAddr: *i.PrivateIpAddress, + Tags: make(map[string]string), + } + + // Convert to map + for _, t := range i.Tags { + ii.Tags[*t.Key] = *t.Value + } + + cache[*i.InstanceId] = &instanceData{ii, ts} + ids = append(ids, *i.InstanceId) + } + } + + il.mu.Lock() + il.names = ids + il.cache = cache + il.mu.Unlock() + + il.l.Infof("ec2_instances.expand: got %d instances", len(ids)) +} + +func newEC2InstancesLister(c *configpb.EC2Instances, region string, l *logger.Logger) (*ec2InstancesLister, error) { + cfg, err := config.LoadDefaultConfig(context.TODO(), config.WithRegion(region)) + if err != nil { + return nil, fmt.Errorf("AWS configuration error : %v", err) + } + + client := ec2.NewFromConfig(cfg) + + il := &ec2InstancesLister{ + c: c, + client: client, + cache: make(map[string]*instanceData), + l: l, + } + + reEvalInterval := time.Duration(c.GetReEvalSec()) * time.Second + go func() { + il.expand(0) + // Introduce a random delay between 0-reEvalInterval before + // starting the refresh loop. If there are multiple cloudprober + // awsInstances, this will make sure that each instance calls AWS + // API at a different point of time. + randomDelaySec := rand.Intn(int(reEvalInterval.Seconds())) + time.Sleep(time.Duration(randomDelaySec) * time.Second) + for range time.Tick(reEvalInterval) { + il.expand(reEvalInterval) + } + }() + return il, nil +} diff --git a/internal/rds/aws/elasticache.go b/internal/rds/aws/elasticache.go new file mode 100644 index 00000000000..2bbfc20bb60 --- /dev/null +++ b/internal/rds/aws/elasticache.go @@ -0,0 +1,251 @@ +package aws + +import ( + "context" + "fmt" + "math/rand" + "sync" + "time" + + configpb "github.com/cloudprober/cloudprober/internal/rds/aws/proto" + pb "github.com/cloudprober/cloudprober/internal/rds/proto" + "github.com/cloudprober/cloudprober/internal/rds/server/filter" + "github.com/cloudprober/cloudprober/logger" + + "github.com/aws/aws-sdk-go-v2/config" + "github.com/aws/aws-sdk-go-v2/service/elasticache" + "google.golang.org/protobuf/proto" +) + +// cacheInfo represents instance items that we fetch from the elasticache API. +type cacheInfo struct { + ID string + Ip string + Port int32 + Clustered bool + TLSEnabled bool + Engine string + Tags map[string]string +} + +// cacheData represents objects that we store in cache. +type cacheData struct { + ci *cacheInfo + lastUpdated int64 +} + +/* +AWSInstancesFilters defines filters supported by the ec2_instances resource +type. + + Example: + filter { + key: "name" + value: "service.*" + } + filter { + key: "engine" + value: "redis" + } + filter { + key: "labels.app" + value: "service-a" + } +*/ + +var ElastiCacheFilters = struct { + RegexFilterKeys []string + LabelsFilter bool +}{ + []string{"name", "engine"}, + true, +} + +// elastiCacheLister is a AWS ElastiCache cluster lister. It implements a cache, +// that's populated at a regular interval by making the AWS API calls. +// Listing actually only returns the current contents of that cache. +type elastiCacheLister struct { + c *configpb.ElastiCaches + client *elasticache.Client + l *logger.Logger + mu sync.RWMutex + names []string + cacheList map[string]*cacheData +} + +// listResources returns the list of resource records, where each record +// consists of an cluster name and the endpoint associated with it. +func (cl *elastiCacheLister) listResources(req *pb.ListResourcesRequest) ([]*pb.Resource, error) { + var resources []*pb.Resource + + allFilters, err := filter.ParseFilters(req.GetFilter(), AWSInstancesFilters.RegexFilterKeys, "") + if err != nil { + return nil, err + } + + nameFilter, engineFilter, labelsFilter := allFilters.RegexFilters["name"], allFilters.RegexFilters["engine"], allFilters.LabelsFilter + + cl.mu.RLock() + defer cl.mu.RUnlock() + + for _, name := range cl.names { + ins := cl.cacheList[name].ci + if ins == nil { + cl.l.Errorf("elacticaches: cached info missing for %s", name) + continue + } + + if nameFilter != nil && !nameFilter.Match(name, cl.l) { + continue + } + if labelsFilter != nil && !labelsFilter.Match(ins.Tags, cl.l) { + continue + } + + if engineFilter != nil && !engineFilter.Match(ins.Engine, cl.l) { + continue + } + + resources = append(resources, &pb.Resource{ + Name: proto.String(name), + Ip: proto.String(ins.Ip), + Port: proto.Int32(ins.Port), + Labels: ins.Tags, + LastUpdated: proto.Int64(cl.cacheList[name].lastUpdated), + }) + } + + cl.l.Infof("elasticaches.listResources: returning %d instances", len(resources)) + return resources, nil +} + +// expand runs equivalent API calls as "aws describe-instances", +// and is used to populate the cache. +func (cl *elastiCacheLister) expand(reEvalInterval time.Duration) { + cl.l.Infof("elasticaches.expand: expanding AWS targets") + + resCC, err := cl.client.DescribeCacheClusters(context.TODO(), nil) + if err != nil { + cl.l.Errorf("elasticaches.expand: error while listing cache clusters: %v", err) + return + } + + var ids = make([]string, 0) + var cacheList = make(map[string]*cacheData) + + ts := time.Now().Unix() + for _, r := range resCC.CacheClusters { + ci := &cacheInfo{ + ID: *r.CacheClusterId, + TLSEnabled: *r.TransitEncryptionEnabled, + Ip: *r.CacheNodes[0].Endpoint.Address, + Port: *r.CacheNodes[0].Endpoint.Port, + Engine: *r.Engine, + Clustered: false, + Tags: make(map[string]string), + } + + // AWS doesn't return Tag information in the response, we'll need to request it separately + // NB: This might get throttled by AWS, if we make too many requests, see if we can batch or slow down + // Add sleep if needed to the end of the loop + tagsResp, err := cl.client.ListTagsForResource(context.TODO(), &elasticache.ListTagsForResourceInput{ + ResourceName: r.ARN, + }) + if err != nil { + cl.l.Errorf("elasticaches.expand: error getting tags for cluster %s: %v", *r.CacheClusterId, err) + continue + } + + // Convert to map + for _, t := range tagsResp.TagList { + ci.Tags[*t.Key] = *t.Value + } + + cacheList[*r.CacheClusterId] = &cacheData{ci, ts} + ids = append(ids, *r.CacheClusterId) + } + + resRG, err := cl.client.DescribeReplicationGroups(context.TODO(), nil) + if err != nil { + cl.l.Errorf("elasticaches.expand: error while listing replication groups: %v", err) + return + } + for _, r := range resRG.ReplicationGroups { + tlsEnabled := *r.TransitEncryptionEnabled + + var ci *cacheInfo + + if r.ConfigurationEndpoint != nil { //clustered + ci = &cacheInfo{ + Ip: *r.ConfigurationEndpoint.Address, + Port: *r.ConfigurationEndpoint.Port, + TLSEnabled: tlsEnabled, + Clustered: true, + } + } else if len(r.NodeGroups) > 0 && r.NodeGroups[0].PrimaryEndpoint != nil { + ci = &cacheInfo{ + Ip: *r.NodeGroups[0].PrimaryEndpoint.Address, + Port: *r.NodeGroups[0].PrimaryEndpoint.Port, + TLSEnabled: tlsEnabled, + Clustered: false, + } + } else { + continue + } + + // Same comments as the same calls above + tagsResp, err := cl.client.ListTagsForResource(context.TODO(), &elasticache.ListTagsForResourceInput{ + ResourceName: r.ARN, + }) + if err != nil { + cl.l.Errorf("elasticaches.expand: error getting tags for replication group %s: %v", *r.ReplicationGroupId, err) + continue + } + + // Convert to map + for _, t := range tagsResp.TagList { + ci.Tags[*t.Key] = *t.Value + } + + cacheList[*r.ReplicationGroupId] = &cacheData{ci, ts} + ids = append(ids, *r.ReplicationGroupId) + } + + cl.mu.Lock() + cl.names = ids + cl.cacheList = cacheList + cl.mu.Unlock() + + cl.l.Infof("elasticaches.expand: got %d caches", len(ids)) +} + +func newElastiCacheLister(c *configpb.ElastiCaches, region string, l *logger.Logger) (*elastiCacheLister, error) { + cfg, err := config.LoadDefaultConfig(context.TODO(), config.WithRegion(region)) + if err != nil { + return nil, fmt.Errorf("AWS configuration error : %v", err) + } + + client := elasticache.NewFromConfig(cfg) + + cl := &elastiCacheLister{ + c: c, + client: client, + cacheList: make(map[string]*cacheData), + l: l, + } + + reEvalInterval := time.Duration(c.GetReEvalSec()) * time.Second + go func() { + cl.expand(0) + // Introduce a random delay between 0-reEvalInterval before + // starting the refresh loop. If there are multiple cloudprober + // awsInstances, this will make sure that each instance calls AWS + // API at a different point of time. + randomDelaySec := rand.Intn(int(reEvalInterval.Seconds())) + time.Sleep(time.Duration(randomDelaySec) * time.Second) + for range time.Tick(reEvalInterval) { + cl.expand(reEvalInterval) + } + }() + return cl, nil +} diff --git a/internal/rds/aws/proto/config.pb.go b/internal/rds/aws/proto/config.pb.go new file mode 100644 index 00000000000..9ec3b96a21f --- /dev/null +++ b/internal/rds/aws/proto/config.pb.go @@ -0,0 +1,529 @@ +// Configuration proto for AWS provider. +// Example config: +// { +// +// # EC2 instances +// ec2_instances {} +// +// # ElastiCache cluster +// elasticache {} +// +// # RDS clusters +// rds { +// identifier: "arn" +// } +// } + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.31.0 +// protoc v3.21.5 +// source: github.com/cloudprober/cloudprober/internal/rds/aws/proto/config.proto + +package proto + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type EC2Instances struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // How often resources should be refreshed. + ReEvalSec *int32 `protobuf:"varint,98,opt,name=re_eval_sec,json=reEvalSec,def=600" json:"re_eval_sec,omitempty"` // default 10 mins +} + +// Default values for EC2Instances fields. +const ( + Default_EC2Instances_ReEvalSec = int32(600) +) + +func (x *EC2Instances) Reset() { + *x = EC2Instances{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_cloudprober_cloudprober_internal_rds_aws_proto_config_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EC2Instances) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EC2Instances) ProtoMessage() {} + +func (x *EC2Instances) ProtoReflect() protoreflect.Message { + mi := &file_github_com_cloudprober_cloudprober_internal_rds_aws_proto_config_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EC2Instances.ProtoReflect.Descriptor instead. +func (*EC2Instances) Descriptor() ([]byte, []int) { + return file_github_com_cloudprober_cloudprober_internal_rds_aws_proto_config_proto_rawDescGZIP(), []int{0} +} + +func (x *EC2Instances) GetReEvalSec() int32 { + if x != nil && x.ReEvalSec != nil { + return *x.ReEvalSec + } + return Default_EC2Instances_ReEvalSec +} + +// ElastiCaches discovery options. +type ElastiCaches struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // How often resources should be refreshed. + ReEvalSec *int32 `protobuf:"varint,98,opt,name=re_eval_sec,json=reEvalSec,def=600" json:"re_eval_sec,omitempty"` // default 10 mins +} + +// Default values for ElastiCaches fields. +const ( + Default_ElastiCaches_ReEvalSec = int32(600) +) + +func (x *ElastiCaches) Reset() { + *x = ElastiCaches{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_cloudprober_cloudprober_internal_rds_aws_proto_config_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ElastiCaches) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ElastiCaches) ProtoMessage() {} + +func (x *ElastiCaches) ProtoReflect() protoreflect.Message { + mi := &file_github_com_cloudprober_cloudprober_internal_rds_aws_proto_config_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ElastiCaches.ProtoReflect.Descriptor instead. +func (*ElastiCaches) Descriptor() ([]byte, []int) { + return file_github_com_cloudprober_cloudprober_internal_rds_aws_proto_config_proto_rawDescGZIP(), []int{1} +} + +func (x *ElastiCaches) GetReEvalSec() int32 { + if x != nil && x.ReEvalSec != nil { + return *x.ReEvalSec + } + return Default_ElastiCaches_ReEvalSec +} + +// RDS (Amazon Relational Databases) discovery options. +type RDS struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // DB cluster identifier or the Amazon Resource Name (ARN) of the DB cluster + // if specified, only the corresponding cluster information is returned. + Identifier *string `protobuf:"bytes,1,opt,name=identifier" json:"identifier,omitempty"` + // Filters to be added to the discovery and search. + Filter []string `protobuf:"bytes,2,rep,name=filter" json:"filter,omitempty"` + // Whether to includes information about clusters shared from other AWS accounts. + IncludeShared *bool `protobuf:"varint,3,opt,name=include_shared,json=includeShared" json:"include_shared,omitempty"` + ReEvalSec *int32 `protobuf:"varint,98,opt,name=re_eval_sec,json=reEvalSec,def=600" json:"re_eval_sec,omitempty"` // default 10 mins +} + +// Default values for RDS fields. +const ( + Default_RDS_ReEvalSec = int32(600) +) + +func (x *RDS) Reset() { + *x = RDS{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_cloudprober_cloudprober_internal_rds_aws_proto_config_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RDS) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RDS) ProtoMessage() {} + +func (x *RDS) ProtoReflect() protoreflect.Message { + mi := &file_github_com_cloudprober_cloudprober_internal_rds_aws_proto_config_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RDS.ProtoReflect.Descriptor instead. +func (*RDS) Descriptor() ([]byte, []int) { + return file_github_com_cloudprober_cloudprober_internal_rds_aws_proto_config_proto_rawDescGZIP(), []int{2} +} + +func (x *RDS) GetIdentifier() string { + if x != nil && x.Identifier != nil { + return *x.Identifier + } + return "" +} + +func (x *RDS) GetFilter() []string { + if x != nil { + return x.Filter + } + return nil +} + +func (x *RDS) GetIncludeShared() bool { + if x != nil && x.IncludeShared != nil { + return *x.IncludeShared + } + return false +} + +func (x *RDS) GetReEvalSec() int32 { + if x != nil && x.ReEvalSec != nil { + return *x.ReEvalSec + } + return Default_RDS_ReEvalSec +} + +// LoadBalancers discovery options. +type LoadBalancers struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Amazon Resource Name (ARN) of the load balancer + // if specified, only the corresponding load balancer information is returned. + Name []string `protobuf:"bytes,1,rep,name=name" json:"name,omitempty"` +} + +func (x *LoadBalancers) Reset() { + *x = LoadBalancers{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_cloudprober_cloudprober_internal_rds_aws_proto_config_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LoadBalancers) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LoadBalancers) ProtoMessage() {} + +func (x *LoadBalancers) ProtoReflect() protoreflect.Message { + mi := &file_github_com_cloudprober_cloudprober_internal_rds_aws_proto_config_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LoadBalancers.ProtoReflect.Descriptor instead. +func (*LoadBalancers) Descriptor() ([]byte, []int) { + return file_github_com_cloudprober_cloudprober_internal_rds_aws_proto_config_proto_rawDescGZIP(), []int{3} +} + +func (x *LoadBalancers) GetName() []string { + if x != nil { + return x.Name + } + return nil +} + +// AWS provider config. +type ProviderConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Profile for the session. + ProfileName *string `protobuf:"bytes,1,opt,name=profile_name,json=profileName" json:"profile_name,omitempty"` + // AWS region + Region *string `protobuf:"bytes,2,opt,name=region" json:"region,omitempty"` + // ECS instances discovery options. This field should be declared for the AWS + // instances discovery to be enabled. + Ec2Instances *EC2Instances `protobuf:"bytes,3,opt,name=ec2_instances,json=ec2Instances" json:"ec2_instances,omitempty"` + // ElastiCache discovery options. This field should be declared for the + // elasticache discovery to be enabled. + Elasticaches *ElastiCaches `protobuf:"bytes,4,opt,name=elasticaches" json:"elasticaches,omitempty"` + // RDS discovery options. + Rds *RDS `protobuf:"bytes,5,opt,name=rds" json:"rds,omitempty"` +} + +func (x *ProviderConfig) Reset() { + *x = ProviderConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_cloudprober_cloudprober_internal_rds_aws_proto_config_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ProviderConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ProviderConfig) ProtoMessage() {} + +func (x *ProviderConfig) ProtoReflect() protoreflect.Message { + mi := &file_github_com_cloudprober_cloudprober_internal_rds_aws_proto_config_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ProviderConfig.ProtoReflect.Descriptor instead. +func (*ProviderConfig) Descriptor() ([]byte, []int) { + return file_github_com_cloudprober_cloudprober_internal_rds_aws_proto_config_proto_rawDescGZIP(), []int{4} +} + +func (x *ProviderConfig) GetProfileName() string { + if x != nil && x.ProfileName != nil { + return *x.ProfileName + } + return "" +} + +func (x *ProviderConfig) GetRegion() string { + if x != nil && x.Region != nil { + return *x.Region + } + return "" +} + +func (x *ProviderConfig) GetEc2Instances() *EC2Instances { + if x != nil { + return x.Ec2Instances + } + return nil +} + +func (x *ProviderConfig) GetElasticaches() *ElastiCaches { + if x != nil { + return x.Elasticaches + } + return nil +} + +func (x *ProviderConfig) GetRds() *RDS { + if x != nil { + return x.Rds + } + return nil +} + +var File_github_com_cloudprober_cloudprober_internal_rds_aws_proto_config_proto protoreflect.FileDescriptor + +var file_github_com_cloudprober_cloudprober_internal_rds_aws_proto_config_proto_rawDesc = []byte{ + 0x0a, 0x46, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6c, 0x6f, + 0x75, 0x64, 0x70, 0x72, 0x6f, 0x62, 0x65, 0x72, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x70, 0x72, + 0x6f, 0x62, 0x65, 0x72, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x72, 0x64, + 0x73, 0x2f, 0x61, 0x77, 0x73, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x13, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x70, + 0x72, 0x6f, 0x62, 0x65, 0x72, 0x2e, 0x72, 0x64, 0x73, 0x2e, 0x61, 0x77, 0x73, 0x22, 0x33, 0x0a, + 0x0c, 0x45, 0x43, 0x32, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x12, 0x23, 0x0a, + 0x0b, 0x72, 0x65, 0x5f, 0x65, 0x76, 0x61, 0x6c, 0x5f, 0x73, 0x65, 0x63, 0x18, 0x62, 0x20, 0x01, + 0x28, 0x05, 0x3a, 0x03, 0x36, 0x30, 0x30, 0x52, 0x09, 0x72, 0x65, 0x45, 0x76, 0x61, 0x6c, 0x53, + 0x65, 0x63, 0x22, 0x33, 0x0a, 0x0c, 0x45, 0x6c, 0x61, 0x73, 0x74, 0x69, 0x43, 0x61, 0x63, 0x68, + 0x65, 0x73, 0x12, 0x23, 0x0a, 0x0b, 0x72, 0x65, 0x5f, 0x65, 0x76, 0x61, 0x6c, 0x5f, 0x73, 0x65, + 0x63, 0x18, 0x62, 0x20, 0x01, 0x28, 0x05, 0x3a, 0x03, 0x36, 0x30, 0x30, 0x52, 0x09, 0x72, 0x65, + 0x45, 0x76, 0x61, 0x6c, 0x53, 0x65, 0x63, 0x22, 0x89, 0x01, 0x0a, 0x03, 0x52, 0x44, 0x53, 0x12, + 0x1e, 0x0a, 0x0a, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0a, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, + 0x16, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, + 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x25, 0x0a, 0x0e, 0x69, 0x6e, 0x63, 0x6c, 0x75, + 0x64, 0x65, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x0d, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x53, 0x68, 0x61, 0x72, 0x65, 0x64, 0x12, 0x23, + 0x0a, 0x0b, 0x72, 0x65, 0x5f, 0x65, 0x76, 0x61, 0x6c, 0x5f, 0x73, 0x65, 0x63, 0x18, 0x62, 0x20, + 0x01, 0x28, 0x05, 0x3a, 0x03, 0x36, 0x30, 0x30, 0x52, 0x09, 0x72, 0x65, 0x45, 0x76, 0x61, 0x6c, + 0x53, 0x65, 0x63, 0x22, 0x23, 0x0a, 0x0d, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, + 0x63, 0x65, 0x72, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x86, 0x02, 0x0a, 0x0e, 0x50, 0x72, 0x6f, + 0x76, 0x69, 0x64, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x21, 0x0a, 0x0c, 0x70, + 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0b, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x16, + 0x0a, 0x06, 0x72, 0x65, 0x67, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, + 0x72, 0x65, 0x67, 0x69, 0x6f, 0x6e, 0x12, 0x46, 0x0a, 0x0d, 0x65, 0x63, 0x32, 0x5f, 0x69, 0x6e, + 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, + 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x70, 0x72, 0x6f, 0x62, 0x65, 0x72, 0x2e, 0x72, 0x64, 0x73, 0x2e, + 0x61, 0x77, 0x73, 0x2e, 0x45, 0x43, 0x32, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, + 0x52, 0x0c, 0x65, 0x63, 0x32, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x12, 0x45, + 0x0a, 0x0c, 0x65, 0x6c, 0x61, 0x73, 0x74, 0x69, 0x63, 0x61, 0x63, 0x68, 0x65, 0x73, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x70, 0x72, 0x6f, 0x62, + 0x65, 0x72, 0x2e, 0x72, 0x64, 0x73, 0x2e, 0x61, 0x77, 0x73, 0x2e, 0x45, 0x6c, 0x61, 0x73, 0x74, + 0x69, 0x43, 0x61, 0x63, 0x68, 0x65, 0x73, 0x52, 0x0c, 0x65, 0x6c, 0x61, 0x73, 0x74, 0x69, 0x63, + 0x61, 0x63, 0x68, 0x65, 0x73, 0x12, 0x2a, 0x0a, 0x03, 0x72, 0x64, 0x73, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x70, 0x72, 0x6f, 0x62, 0x65, 0x72, + 0x2e, 0x72, 0x64, 0x73, 0x2e, 0x61, 0x77, 0x73, 0x2e, 0x52, 0x44, 0x53, 0x52, 0x03, 0x72, 0x64, + 0x73, 0x42, 0x3b, 0x5a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x70, 0x72, 0x6f, 0x62, 0x65, 0x72, 0x2f, 0x63, 0x6c, 0x6f, 0x75, + 0x64, 0x70, 0x72, 0x6f, 0x62, 0x65, 0x72, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, + 0x2f, 0x72, 0x64, 0x73, 0x2f, 0x61, 0x77, 0x73, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, +} + +var ( + file_github_com_cloudprober_cloudprober_internal_rds_aws_proto_config_proto_rawDescOnce sync.Once + file_github_com_cloudprober_cloudprober_internal_rds_aws_proto_config_proto_rawDescData = file_github_com_cloudprober_cloudprober_internal_rds_aws_proto_config_proto_rawDesc +) + +func file_github_com_cloudprober_cloudprober_internal_rds_aws_proto_config_proto_rawDescGZIP() []byte { + file_github_com_cloudprober_cloudprober_internal_rds_aws_proto_config_proto_rawDescOnce.Do(func() { + file_github_com_cloudprober_cloudprober_internal_rds_aws_proto_config_proto_rawDescData = protoimpl.X.CompressGZIP(file_github_com_cloudprober_cloudprober_internal_rds_aws_proto_config_proto_rawDescData) + }) + return file_github_com_cloudprober_cloudprober_internal_rds_aws_proto_config_proto_rawDescData +} + +var file_github_com_cloudprober_cloudprober_internal_rds_aws_proto_config_proto_msgTypes = make([]protoimpl.MessageInfo, 5) +var file_github_com_cloudprober_cloudprober_internal_rds_aws_proto_config_proto_goTypes = []interface{}{ + (*EC2Instances)(nil), // 0: cloudprober.rds.aws.EC2Instances + (*ElastiCaches)(nil), // 1: cloudprober.rds.aws.ElastiCaches + (*RDS)(nil), // 2: cloudprober.rds.aws.RDS + (*LoadBalancers)(nil), // 3: cloudprober.rds.aws.LoadBalancers + (*ProviderConfig)(nil), // 4: cloudprober.rds.aws.ProviderConfig +} +var file_github_com_cloudprober_cloudprober_internal_rds_aws_proto_config_proto_depIdxs = []int32{ + 0, // 0: cloudprober.rds.aws.ProviderConfig.ec2_instances:type_name -> cloudprober.rds.aws.EC2Instances + 1, // 1: cloudprober.rds.aws.ProviderConfig.elasticaches:type_name -> cloudprober.rds.aws.ElastiCaches + 2, // 2: cloudprober.rds.aws.ProviderConfig.rds:type_name -> cloudprober.rds.aws.RDS + 3, // [3:3] is the sub-list for method output_type + 3, // [3:3] is the sub-list for method input_type + 3, // [3:3] is the sub-list for extension type_name + 3, // [3:3] is the sub-list for extension extendee + 0, // [0:3] is the sub-list for field type_name +} + +func init() { file_github_com_cloudprober_cloudprober_internal_rds_aws_proto_config_proto_init() } +func file_github_com_cloudprober_cloudprober_internal_rds_aws_proto_config_proto_init() { + if File_github_com_cloudprober_cloudprober_internal_rds_aws_proto_config_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_github_com_cloudprober_cloudprober_internal_rds_aws_proto_config_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EC2Instances); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_cloudprober_cloudprober_internal_rds_aws_proto_config_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ElastiCaches); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_cloudprober_cloudprober_internal_rds_aws_proto_config_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RDS); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_cloudprober_cloudprober_internal_rds_aws_proto_config_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LoadBalancers); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_cloudprober_cloudprober_internal_rds_aws_proto_config_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ProviderConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_github_com_cloudprober_cloudprober_internal_rds_aws_proto_config_proto_rawDesc, + NumEnums: 0, + NumMessages: 5, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_github_com_cloudprober_cloudprober_internal_rds_aws_proto_config_proto_goTypes, + DependencyIndexes: file_github_com_cloudprober_cloudprober_internal_rds_aws_proto_config_proto_depIdxs, + MessageInfos: file_github_com_cloudprober_cloudprober_internal_rds_aws_proto_config_proto_msgTypes, + }.Build() + File_github_com_cloudprober_cloudprober_internal_rds_aws_proto_config_proto = out.File + file_github_com_cloudprober_cloudprober_internal_rds_aws_proto_config_proto_rawDesc = nil + file_github_com_cloudprober_cloudprober_internal_rds_aws_proto_config_proto_goTypes = nil + file_github_com_cloudprober_cloudprober_internal_rds_aws_proto_config_proto_depIdxs = nil +} diff --git a/internal/rds/aws/proto/config.proto b/internal/rds/aws/proto/config.proto new file mode 100644 index 00000000000..837ecd7bb12 --- /dev/null +++ b/internal/rds/aws/proto/config.proto @@ -0,0 +1,75 @@ +// Configuration proto for AWS provider. +// Example config: +// { +// +// # EC2 instances +// ec2_instances {} +// +// # ElastiCache cluster +// elasticache {} +// +// # RDS clusters +// rds { +// identifier: "arn" +// } +// } +syntax = "proto2"; + +package cloudprober.rds.aws; + +option go_package = "github.com/cloudprober/cloudprober/internal/rds/aws/proto"; + +message EC2Instances { + // How often resources should be refreshed. + optional int32 re_eval_sec = 98 [default = 600]; // default 10 mins +} + +// ElastiCaches discovery options. +message ElastiCaches { + + // How often resources should be refreshed. + optional int32 re_eval_sec = 98 [default = 600]; // default 10 mins +} + +// RDS (Amazon Relational Databases) discovery options. +message RDS { + // DB cluster identifier or the Amazon Resource Name (ARN) of the DB cluster + // if specified, only the corresponding cluster information is returned. + optional string identifier = 1; + + // Filters to be added to the discovery and search. + repeated string filter = 2; + + // Whether to includes information about clusters shared from other AWS accounts. + optional bool include_shared = 3; + + optional int32 re_eval_sec = 98 [default = 600]; // default 10 mins +} + +// LoadBalancers discovery options. +message LoadBalancers { + // Amazon Resource Name (ARN) of the load balancer + // if specified, only the corresponding load balancer information is returned. + repeated string name = 1; +} + +// AWS provider config. +message ProviderConfig { + // Profile for the session. + optional string profile_name = 1; + + // AWS region + optional string region = 2; + + // ECS instances discovery options. This field should be declared for the AWS + // instances discovery to be enabled. + optional EC2Instances ec2_instances = 3; + + // ElastiCache discovery options. This field should be declared for the + // elasticache discovery to be enabled. + optional ElastiCaches elasticaches = 4; + + // RDS discovery options. + optional RDS rds = 5; + +} diff --git a/internal/rds/aws/proto/config_proto_gen.cue b/internal/rds/aws/proto/config_proto_gen.cue new file mode 100644 index 00000000000..92f9c78f2f8 --- /dev/null +++ b/internal/rds/aws/proto/config_proto_gen.cue @@ -0,0 +1,53 @@ +package proto + +#EC2Instances: { + // How often resources should be refreshed. + reEvalSec?: int32 @protobuf(98,int32,name=re_eval_sec,"default=600") // default 10 mins +} + +// ElastiCaches discovery options. +#ElastiCaches: { + // How often resources should be refreshed. + reEvalSec?: int32 @protobuf(98,int32,name=re_eval_sec,"default=600") // default 10 mins +} + +// RDS (Amazon Relational Databases) discovery options. +#RDS: { + // DB cluster identifier or the Amazon Resource Name (ARN) of the DB cluster + // if specified, only the corresponding cluster information is returned. + identifier?: string @protobuf(1,string) + + // Filters to be added to the discovery and search. + filter?: [...string] @protobuf(2,string) + + // Whether to includes information about clusters shared from other AWS accounts. + includeShared?: bool @protobuf(3,bool,name=include_shared) + reEvalSec?: int32 @protobuf(98,int32,name=re_eval_sec,"default=600") // default 10 mins +} + +// LoadBalancers discovery options. +#LoadBalancers: { + // Amazon Resource Name (ARN) of the load balancer + // if specified, only the corresponding load balancer information is returned. + name?: [...string] @protobuf(1,string) +} + +// AWS provider config. +#ProviderConfig: { + // Profile for the session. + profileName?: string @protobuf(1,string,name=profile_name) + + // AWS region + region?: string @protobuf(2,string) + + // ECS instances discovery options. This field should be declared for the AWS + // instances discovery to be enabled. + ec2Instances?: #EC2Instances @protobuf(3,EC2Instances,name=ec2_instances) + + // ElastiCache discovery options. This field should be declared for the + // elasticache discovery to be enabled. + elasticaches?: #ElastiCaches @protobuf(4,ElastiCaches) + + // RDS discovery options. + rds?: #RDS @protobuf(5,RDS) +} diff --git a/internal/rds/aws/rds.go b/internal/rds/aws/rds.go new file mode 100644 index 00000000000..e482716d811 --- /dev/null +++ b/internal/rds/aws/rds.go @@ -0,0 +1,202 @@ +package aws + +import ( + "context" + "fmt" + "math/rand" + "sync" + "time" + + configpb "github.com/cloudprober/cloudprober/internal/rds/aws/proto" + pb "github.com/cloudprober/cloudprober/internal/rds/proto" + "github.com/cloudprober/cloudprober/internal/rds/server/filter" + "github.com/cloudprober/cloudprober/logger" + + "github.com/aws/aws-sdk-go-v2/config" + "github.com/aws/aws-sdk-go-v2/service/rds" + "google.golang.org/protobuf/proto" +) + +// rdsInfo represents instance items that we fetch from the RDS API. +type rdsInfo struct { + Name string + Ip string + Port int32 + IsReplica bool + IsCluster bool + Tags map[string]string +} + +// rdsData represents objects that we store in cache. +type rdsData struct { + ri *rdsInfo + lastUpdated int64 +} + +var RDSFilters = struct { + RegexFilterKeys []string + LabelsFilter bool +}{ + []string{"name", "engine"}, + true, +} + +// rdsLister is a AWS Relational Database Service lister. It implements a cache, +// that's populated at a regular interval by making the AWS API calls. +// Listing actually only returns the current contents of that cache. +type rdsLister struct { + c *configpb.RDS + client *rds.Client + l *logger.Logger + mu sync.RWMutex + names []string + dbList map[string]*rdsData +} + +// listResources returns the list of resource records, where each record +// consists of an cluster name and the endpoint associated with it. +func (rl *rdsLister) listResources(req *pb.ListResourcesRequest) ([]*pb.Resource, error) { + var resources []*pb.Resource + + allFilters, err := filter.ParseFilters(req.GetFilter(), AWSInstancesFilters.RegexFilterKeys, "") + if err != nil { + return nil, err + } + + nameFilter, labelsFilter := allFilters.RegexFilters["name"], allFilters.LabelsFilter + + rl.mu.RLock() + defer rl.mu.RUnlock() + + for _, name := range rl.names { + ins := rl.dbList[name].ri + if ins == nil { + rl.l.Errorf("rds: db info missing for %s", name) + continue + } + + if nameFilter != nil && !nameFilter.Match(name, rl.l) { + continue + } + if labelsFilter != nil && !labelsFilter.Match(ins.Tags, rl.l) { + continue + } + + resources = append(resources, &pb.Resource{ + Name: proto.String(name), + Ip: proto.String(ins.Ip), + Port: proto.Int32(ins.Port), + Labels: ins.Tags, + LastUpdated: proto.Int64(rl.dbList[name].lastUpdated), + }) + } + + rl.l.Infof("rds.listResources: returning %d instances", len(resources)) + return resources, nil +} + +// expand runs equivalent API calls as "aws describe-db-instances", +// and is used to populate the cache. +func (rl *rdsLister) expand(reEvalInterval time.Duration) { + rl.l.Infof("rds.expand: expanding AWS targets") + + result, err := rl.client.DescribeDBInstances(context.TODO(), nil) + if err != nil { + rl.l.Errorf("rds.expand: error while listing database instances: %v", err) + return + } + + var ids = make([]string, 0) + var dbList = make(map[string]*rdsData) + + ts := time.Now().Unix() + for _, r := range result.DBInstances { + if r.DBInstanceIdentifier == nil || r.DBName == nil || r.Endpoint == nil { + continue + } + isReplica := false + if r.DBClusterIdentifier != nil || r.ReadReplicaSourceDBInstanceIdentifier != nil { + isReplica = true + } + + ci := &rdsInfo{ + Name: *r.DBName, + Ip: *r.Endpoint.Address, + Port: *r.Endpoint.Port, + IsReplica: isReplica, + Tags: make(map[string]string), + } + + // Convert to map + for _, t := range r.TagList { + ci.Tags[*t.Key] = *t.Value + } + + dbList[*r.DBName] = &rdsData{ci, ts} + ids = append(ids, *r.DBName) + } + + resCluster, err := rl.client.DescribeDBClusters(context.TODO(), nil) + if err != nil { + rl.l.Errorf("rds.expand: error while listing database clusters: %v", err) + return + } + for _, r := range resCluster.DBClusters { + if r.DBClusterIdentifier == nil || r.DatabaseName == nil || r.Endpoint == nil || r.Port == nil { + continue + } + + ci := &rdsInfo{ + Name: *r.DBClusterIdentifier, + Ip: *r.Endpoint, + Port: *r.Port, + IsCluster: true, + } + + // Convert to map + for _, t := range r.TagList { + ci.Tags[*t.Key] = *t.Value + } + + dbList[*r.DBClusterIdentifier] = &rdsData{ci, ts} + ids = append(ids, *r.DBClusterIdentifier) + } + + rl.mu.Lock() + rl.names = ids + rl.dbList = dbList + rl.mu.Unlock() + + rl.l.Infof("rds.expand: got %d databases", len(ids)) +} + +func newRdsLister(c *configpb.RDS, region string, l *logger.Logger) (*rdsLister, error) { + cfg, err := config.LoadDefaultConfig(context.TODO(), config.WithRegion(region)) + if err != nil { + return nil, fmt.Errorf("AWS configuration error : %v", err) + } + + client := rds.NewFromConfig(cfg) + + cl := &rdsLister{ + c: c, + client: client, + dbList: make(map[string]*rdsData), + l: l, + } + + reEvalInterval := time.Duration(c.GetReEvalSec()) * time.Second + go func() { + cl.expand(0) + // Introduce a random delay between 0-reEvalInterval before + // starting the refresh loop. If there are multiple cloudprober + // awsInstances, this will make sure that each instance calls AWS + // API at a different point of time. + randomDelaySec := rand.Intn(int(reEvalInterval.Seconds())) + time.Sleep(time.Duration(randomDelaySec) * time.Second) + for range time.Tick(reEvalInterval) { + cl.expand(reEvalInterval) + } + }() + return cl, nil +} diff --git a/internal/rds/aws/testdata/targets.json b/internal/rds/aws/testdata/targets.json new file mode 100644 index 00000000000..14d423802b6 --- /dev/null +++ b/internal/rds/aws/testdata/targets.json @@ -0,0 +1,31 @@ +{ + "resource": [ + { + "name": "switch-xx-1", + "ip": "10.1.1.1", + "port": 8080, + "labels": { + "device_type": "switch", + "cluster": "xx" + } + }, + { + "name": "switch-xx-2", + "ip": "10.1.1.2", + "port": 8081, + "labels": { + "cluster": "xx" + } + }, + { + "name": "switch-yy-1", + "ip": "10.1.2.1", + "port": 8080 + }, + { + "name": "switch-zz-1", + "ip": "::aaa:1", + "port": 8080 + } + ] +} diff --git a/internal/rds/aws/testdata/targets1.textpb b/internal/rds/aws/testdata/targets1.textpb new file mode 100644 index 00000000000..ecad1f25a52 --- /dev/null +++ b/internal/rds/aws/testdata/targets1.textpb @@ -0,0 +1,23 @@ +resource { + name: "switch-xx-1" + ip: "10.1.1.1" + port: 8080 + labels { + key: "device_type" + value: "switch" + } + labels { + key: "cluster" + value: "xx" + } +} + +resource { + name: "switch-xx-2" + ip: "10.1.1.2" + port: 8081 + labels { + key: "cluster" + value: "xx" + } +} diff --git a/internal/rds/aws/testdata/targets2.textpb b/internal/rds/aws/testdata/targets2.textpb new file mode 100644 index 00000000000..fb80c4293f0 --- /dev/null +++ b/internal/rds/aws/testdata/targets2.textpb @@ -0,0 +1,11 @@ +resource { + name: "switch-yy-1" + ip: "10.1.2.1" + port: 8080 +} + +resource { + name: "switch-zz-1" + ip: "::aaa:1" + port: 8080 +} diff --git a/internal/rds/server/proto/config.pb.go b/internal/rds/server/proto/config.pb.go index 11e5977ebdd..c6e6771814c 100644 --- a/internal/rds/server/proto/config.pb.go +++ b/internal/rds/server/proto/config.pb.go @@ -19,6 +19,7 @@ package proto import ( + proto3 "github.com/cloudprober/cloudprober/internal/rds/aws/proto" proto "github.com/cloudprober/cloudprober/internal/rds/file/proto" proto1 "github.com/cloudprober/cloudprober/internal/rds/gcp/proto" proto2 "github.com/cloudprober/cloudprober/internal/rds/kubernetes/proto" @@ -96,6 +97,7 @@ type Provider struct { // *Provider_FileConfig // *Provider_GcpConfig // *Provider_KubernetesConfig + // *Provider_AwsConfig Config isProvider_Config `protobuf_oneof:"config"` } @@ -166,6 +168,13 @@ func (x *Provider) GetKubernetesConfig() *proto2.ProviderConfig { return nil } +func (x *Provider) GetAwsConfig() *proto3.ProviderConfig { + if x, ok := x.GetConfig().(*Provider_AwsConfig); ok { + return x.AwsConfig + } + return nil +} + type isProvider_Config interface { isProvider_Config() } @@ -182,12 +191,18 @@ type Provider_KubernetesConfig struct { KubernetesConfig *proto2.ProviderConfig `protobuf:"bytes,3,opt,name=kubernetes_config,json=kubernetesConfig,oneof"` } +type Provider_AwsConfig struct { + AwsConfig *proto3.ProviderConfig `protobuf:"bytes,5,opt,name=aws_config,json=awsConfig,oneof"` +} + func (*Provider_FileConfig) isProvider_Config() {} func (*Provider_GcpConfig) isProvider_Config() {} func (*Provider_KubernetesConfig) isProvider_Config() {} +func (*Provider_AwsConfig) isProvider_Config() {} + var File_github_com_cloudprober_cloudprober_internal_rds_server_proto_config_proto protoreflect.FileDescriptor var file_github_com_cloudprober_cloudprober_internal_rds_server_proto_config_proto_rawDesc = []byte{ @@ -196,47 +211,55 @@ var file_github_com_cloudprober_cloudprober_internal_rds_server_proto_config_pro 0x6f, 0x62, 0x65, 0x72, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x72, 0x64, 0x73, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x63, 0x6c, 0x6f, - 0x75, 0x64, 0x70, 0x72, 0x6f, 0x62, 0x65, 0x72, 0x2e, 0x72, 0x64, 0x73, 0x1a, 0x47, 0x67, 0x69, + 0x75, 0x64, 0x70, 0x72, 0x6f, 0x62, 0x65, 0x72, 0x2e, 0x72, 0x64, 0x73, 0x1a, 0x46, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x70, 0x72, 0x6f, 0x62, 0x65, 0x72, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x70, 0x72, 0x6f, 0x62, 0x65, 0x72, - 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x72, 0x64, 0x73, 0x2f, 0x66, 0x69, - 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x46, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, - 0x6d, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x70, 0x72, 0x6f, 0x62, 0x65, 0x72, 0x2f, 0x63, 0x6c, - 0x6f, 0x75, 0x64, 0x70, 0x72, 0x6f, 0x62, 0x65, 0x72, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, - 0x61, 0x6c, 0x2f, 0x72, 0x64, 0x73, 0x2f, 0x67, 0x63, 0x70, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x4d, 0x67, - 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x70, - 0x72, 0x6f, 0x62, 0x65, 0x72, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x70, 0x72, 0x6f, 0x62, 0x65, - 0x72, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x72, 0x64, 0x73, 0x2f, 0x6b, - 0x75, 0x62, 0x65, 0x72, 0x6e, 0x65, 0x74, 0x65, 0x73, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, - 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x43, 0x0a, 0x0a, - 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x12, 0x35, 0x0a, 0x08, 0x70, 0x72, - 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x63, - 0x6c, 0x6f, 0x75, 0x64, 0x70, 0x72, 0x6f, 0x62, 0x65, 0x72, 0x2e, 0x72, 0x64, 0x73, 0x2e, 0x50, - 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, - 0x72, 0x22, 0x8e, 0x02, 0x0a, 0x08, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x12, 0x0e, - 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x47, - 0x0a, 0x0b, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x70, 0x72, 0x6f, 0x62, 0x65, - 0x72, 0x2e, 0x72, 0x64, 0x73, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x76, 0x69, - 0x64, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x0a, 0x66, 0x69, 0x6c, - 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x44, 0x0a, 0x0a, 0x67, 0x63, 0x70, 0x5f, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x63, 0x6c, - 0x6f, 0x75, 0x64, 0x70, 0x72, 0x6f, 0x62, 0x65, 0x72, 0x2e, 0x72, 0x64, 0x73, 0x2e, 0x67, 0x63, - 0x70, 0x2e, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x48, 0x00, 0x52, 0x09, 0x67, 0x63, 0x70, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x59, 0x0a, - 0x11, 0x6b, 0x75, 0x62, 0x65, 0x72, 0x6e, 0x65, 0x74, 0x65, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, - 0x70, 0x72, 0x6f, 0x62, 0x65, 0x72, 0x2e, 0x72, 0x64, 0x73, 0x2e, 0x6b, 0x75, 0x62, 0x65, 0x72, - 0x6e, 0x65, 0x74, 0x65, 0x73, 0x2e, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x10, 0x6b, 0x75, 0x62, 0x65, 0x72, 0x6e, 0x65, 0x74, - 0x65, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x08, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x42, 0x3e, 0x5a, 0x3c, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x72, 0x64, 0x73, 0x2f, 0x61, 0x77, + 0x73, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x47, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x70, 0x72, 0x6f, 0x62, 0x65, 0x72, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x70, 0x72, 0x6f, 0x62, 0x65, 0x72, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, - 0x6c, 0x2f, 0x72, 0x64, 0x73, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2f, 0x70, 0x72, 0x6f, - 0x74, 0x6f, + 0x6c, 0x2f, 0x72, 0x64, 0x73, 0x2f, 0x66, 0x69, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x46, 0x67, + 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x70, + 0x72, 0x6f, 0x62, 0x65, 0x72, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x70, 0x72, 0x6f, 0x62, 0x65, + 0x72, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x72, 0x64, 0x73, 0x2f, 0x67, + 0x63, 0x70, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x4d, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, + 0x6d, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x70, 0x72, 0x6f, 0x62, 0x65, 0x72, 0x2f, 0x63, 0x6c, + 0x6f, 0x75, 0x64, 0x70, 0x72, 0x6f, 0x62, 0x65, 0x72, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, + 0x61, 0x6c, 0x2f, 0x72, 0x64, 0x73, 0x2f, 0x6b, 0x75, 0x62, 0x65, 0x72, 0x6e, 0x65, 0x74, 0x65, + 0x73, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x43, 0x0a, 0x0a, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x43, 0x6f, + 0x6e, 0x66, 0x12, 0x35, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x70, 0x72, 0x6f, 0x62, + 0x65, 0x72, 0x2e, 0x72, 0x64, 0x73, 0x2e, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x52, + 0x08, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x22, 0xd4, 0x02, 0x0a, 0x08, 0x50, 0x72, + 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x47, 0x0a, 0x0b, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x63, 0x6c, + 0x6f, 0x75, 0x64, 0x70, 0x72, 0x6f, 0x62, 0x65, 0x72, 0x2e, 0x72, 0x64, 0x73, 0x2e, 0x66, 0x69, + 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x48, 0x00, 0x52, 0x0a, 0x66, 0x69, 0x6c, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, + 0x44, 0x0a, 0x0a, 0x67, 0x63, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x70, 0x72, 0x6f, 0x62, 0x65, + 0x72, 0x2e, 0x72, 0x64, 0x73, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, + 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x09, 0x67, 0x63, 0x70, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x59, 0x0a, 0x11, 0x6b, 0x75, 0x62, 0x65, 0x72, 0x6e, 0x65, + 0x74, 0x65, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x2a, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x70, 0x72, 0x6f, 0x62, 0x65, 0x72, 0x2e, 0x72, + 0x64, 0x73, 0x2e, 0x6b, 0x75, 0x62, 0x65, 0x72, 0x6e, 0x65, 0x74, 0x65, 0x73, 0x2e, 0x50, 0x72, + 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x10, + 0x6b, 0x75, 0x62, 0x65, 0x72, 0x6e, 0x65, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x12, 0x44, 0x0a, 0x0a, 0x61, 0x77, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x70, 0x72, 0x6f, 0x62, + 0x65, 0x72, 0x2e, 0x72, 0x64, 0x73, 0x2e, 0x61, 0x77, 0x73, 0x2e, 0x50, 0x72, 0x6f, 0x76, 0x69, + 0x64, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x09, 0x61, 0x77, 0x73, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x08, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x42, 0x3e, 0x5a, 0x3c, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, + 0x6c, 0x6f, 0x75, 0x64, 0x70, 0x72, 0x6f, 0x62, 0x65, 0x72, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, + 0x70, 0x72, 0x6f, 0x62, 0x65, 0x72, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, + 0x72, 0x64, 0x73, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, } var ( @@ -258,17 +281,19 @@ var file_github_com_cloudprober_cloudprober_internal_rds_server_proto_config_pro (*proto.ProviderConfig)(nil), // 2: cloudprober.rds.file.ProviderConfig (*proto1.ProviderConfig)(nil), // 3: cloudprober.rds.gcp.ProviderConfig (*proto2.ProviderConfig)(nil), // 4: cloudprober.rds.kubernetes.ProviderConfig + (*proto3.ProviderConfig)(nil), // 5: cloudprober.rds.aws.ProviderConfig } var file_github_com_cloudprober_cloudprober_internal_rds_server_proto_config_proto_depIdxs = []int32{ 1, // 0: cloudprober.rds.ServerConf.provider:type_name -> cloudprober.rds.Provider 2, // 1: cloudprober.rds.Provider.file_config:type_name -> cloudprober.rds.file.ProviderConfig 3, // 2: cloudprober.rds.Provider.gcp_config:type_name -> cloudprober.rds.gcp.ProviderConfig 4, // 3: cloudprober.rds.Provider.kubernetes_config:type_name -> cloudprober.rds.kubernetes.ProviderConfig - 4, // [4:4] is the sub-list for method output_type - 4, // [4:4] is the sub-list for method input_type - 4, // [4:4] is the sub-list for extension type_name - 4, // [4:4] is the sub-list for extension extendee - 0, // [0:4] is the sub-list for field type_name + 5, // 4: cloudprober.rds.Provider.aws_config:type_name -> cloudprober.rds.aws.ProviderConfig + 5, // [5:5] is the sub-list for method output_type + 5, // [5:5] is the sub-list for method input_type + 5, // [5:5] is the sub-list for extension type_name + 5, // [5:5] is the sub-list for extension extendee + 0, // [0:5] is the sub-list for field type_name } func init() { file_github_com_cloudprober_cloudprober_internal_rds_server_proto_config_proto_init() } @@ -306,6 +331,7 @@ func file_github_com_cloudprober_cloudprober_internal_rds_server_proto_config_pr (*Provider_FileConfig)(nil), (*Provider_GcpConfig)(nil), (*Provider_KubernetesConfig)(nil), + (*Provider_AwsConfig)(nil), } type x struct{} out := protoimpl.TypeBuilder{ diff --git a/internal/rds/server/proto/config.proto b/internal/rds/server/proto/config.proto index 92390d7da7e..fbf8022bab8 100644 --- a/internal/rds/server/proto/config.proto +++ b/internal/rds/server/proto/config.proto @@ -13,10 +13,12 @@ syntax = "proto2"; package cloudprober.rds; +import "github.com/cloudprober/cloudprober/internal/rds/aws/proto/config.proto"; import "github.com/cloudprober/cloudprober/internal/rds/file/proto/config.proto"; import "github.com/cloudprober/cloudprober/internal/rds/gcp/proto/config.proto"; import "github.com/cloudprober/cloudprober/internal/rds/kubernetes/proto/config.proto"; + option go_package = "github.com/cloudprober/cloudprober/internal/rds/server/proto"; message ServerConf { @@ -33,5 +35,6 @@ message Provider { file.ProviderConfig file_config = 4; gcp.ProviderConfig gcp_config = 2; kubernetes.ProviderConfig kubernetes_config = 3; + aws.ProviderConfig aws_config = 5; } } diff --git a/internal/rds/server/proto/config_proto_gen.cue b/internal/rds/server/proto/config_proto_gen.cue index b601582748f..fa88099faa2 100644 --- a/internal/rds/server/proto/config_proto_gen.cue +++ b/internal/rds/server/proto/config_proto_gen.cue @@ -4,6 +4,7 @@ import ( "github.com/cloudprober/cloudprober/internal/rds/file/proto" proto_1 "github.com/cloudprober/cloudprober/internal/rds/gcp/proto" proto_5 "github.com/cloudprober/cloudprober/internal/rds/kubernetes/proto" + proto_A "github.com/cloudprober/cloudprober/internal/rds/aws/proto" ) #ServerConf: { @@ -21,5 +22,7 @@ import ( gcpConfig: proto_1.#ProviderConfig @protobuf(2,gcp.ProviderConfig,name=gcp_config) } | { kubernetesConfig: proto_5.#ProviderConfig @protobuf(3,kubernetes.ProviderConfig,name=kubernetes_config) + } | { + awsConfig: proto_A.#ProviderConfig @protobuf(5,aws.ProviderConfig,name=aws_config) } } diff --git a/internal/rds/server/server.go b/internal/rds/server/server.go index 095feb56de3..803a6b43026 100644 --- a/internal/rds/server/server.go +++ b/internal/rds/server/server.go @@ -23,6 +23,7 @@ import ( "context" "fmt" + "github.com/cloudprober/cloudprober/internal/rds/aws" "github.com/cloudprober/cloudprober/internal/rds/file" "github.com/cloudprober/cloudprober/internal/rds/gcp" "github.com/cloudprober/cloudprober/internal/rds/kubernetes" @@ -87,6 +88,14 @@ func (s *Server) initProviders(c *configpb.ServerConf) error { if p, err = kubernetes.New(pc.GetKubernetesConfig(), s.l); err != nil { return err } + case *configpb.Provider_AwsConfig: + if id == "" { + id = aws.DefaultProviderID + } + s.l.Infof("rds.server: adding AWS provider with id: %s", id) + if p, err = aws.New(pc.GetAwsConfig(), s.l); err != nil { + return err + } } s.providers[id] = p } From 87c7f26eda8297e9e0870f04eabd0161d2f77174 Mon Sep 17 00:00:00 2001 From: rabunkosar-dd Date: Sat, 18 Nov 2023 22:24:53 -0800 Subject: [PATCH 02/10] init maps --- internal/rds/aws/elasticache.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/internal/rds/aws/elasticache.go b/internal/rds/aws/elasticache.go index 2bbfc20bb60..4344ac6bf12 100644 --- a/internal/rds/aws/elasticache.go +++ b/internal/rds/aws/elasticache.go @@ -135,6 +135,9 @@ func (cl *elastiCacheLister) expand(reEvalInterval time.Duration) { ts := time.Now().Unix() for _, r := range resCC.CacheClusters { + if len(r.CacheNodes) == 0 { + continue + } ci := &cacheInfo{ ID: *r.CacheClusterId, TLSEnabled: *r.TransitEncryptionEnabled, @@ -181,6 +184,7 @@ func (cl *elastiCacheLister) expand(reEvalInterval time.Duration) { Port: *r.ConfigurationEndpoint.Port, TLSEnabled: tlsEnabled, Clustered: true, + Tags: make(map[string]string), } } else if len(r.NodeGroups) > 0 && r.NodeGroups[0].PrimaryEndpoint != nil { ci = &cacheInfo{ @@ -188,6 +192,7 @@ func (cl *elastiCacheLister) expand(reEvalInterval time.Duration) { Port: *r.NodeGroups[0].PrimaryEndpoint.Port, TLSEnabled: tlsEnabled, Clustered: false, + Tags: make(map[string]string), } } else { continue From dbceb92ce5b4cf7c9ceade9cbadd8ad2e0ee3c94 Mon Sep 17 00:00:00 2001 From: rabunkosar-dd Date: Tue, 21 Nov 2023 00:40:21 -0800 Subject: [PATCH 03/10] unit tests for ec2 --- internal/rds/aws/ec2.go | 2 +- internal/rds/aws/ec2_test.go | 270 +++++++++++++++++++++++++++++++++++ 2 files changed, 271 insertions(+), 1 deletion(-) create mode 100644 internal/rds/aws/ec2_test.go diff --git a/internal/rds/aws/ec2.go b/internal/rds/aws/ec2.go index 9ab381ba9b6..304ee506284 100644 --- a/internal/rds/aws/ec2.go +++ b/internal/rds/aws/ec2.go @@ -71,7 +71,7 @@ var AWSInstancesFilters = struct { // Listing actually only returns the current contents of that cache. type ec2InstancesLister struct { c *configpb.EC2Instances - client *ec2.Client + client ec2.DescribeInstancesAPIClient l *logger.Logger mu sync.RWMutex names []string diff --git a/internal/rds/aws/ec2_test.go b/internal/rds/aws/ec2_test.go new file mode 100644 index 00000000000..83312694213 --- /dev/null +++ b/internal/rds/aws/ec2_test.go @@ -0,0 +1,270 @@ +package aws + +import ( + "context" + "fmt" + "strconv" + "testing" + "time" + + pb "github.com/cloudprober/cloudprober/internal/rds/proto" + "github.com/cloudprober/cloudprober/logger" + "google.golang.org/protobuf/proto" + + "github.com/aws/aws-sdk-go-v2/service/ec2" + "github.com/aws/aws-sdk-go-v2/service/ec2/types" +) + +type mockEC2DescribeInstances func(context.Context, *ec2.DescribeInstancesInput, ...func(*ec2.Options)) (*ec2.DescribeInstancesOutput, error) + +func (m mockEC2DescribeInstances) DescribeInstances(ctx context.Context, params *ec2.DescribeInstancesInput, optFns ...func(*ec2.Options)) (*ec2.DescribeInstancesOutput, error) { + return m(ctx, params, optFns...) +} + +type testInstance struct { + id string + ipAddr string + tags map[string]string +} + +func TestExpand(t *testing.T) { + cases := []struct { + err error + instances []*testInstance + expectCount int + }{ + { + instances: []*testInstance{ + { + id: "test-id", + ipAddr: "10.0.0.2", + tags: map[string]string{"a": "b"}, + }, + { + id: "test-id-2", + ipAddr: "10.0.0.3", + tags: map[string]string{"a": "b"}, + }, + }, + err: nil, + expectCount: 2, + }, + { + instances: []*testInstance{}, + err: nil, + expectCount: 0, + }, + { + instances: []*testInstance{ + { + id: "test-id", + }, + { + id: "test-id-2", + }, + }, + err: nil, + expectCount: 0, + }, + { + instances: []*testInstance{}, + err: fmt.Errorf("some error"), + expectCount: 0, + }, + } + + for i, tt := range cases { + t.Run(strconv.Itoa(i), func(t *testing.T) { + client := func(t *testing.T, instances []*testInstance) ec2.DescribeInstancesAPIClient { + return mockEC2DescribeInstances(func(ctx context.Context, params *ec2.DescribeInstancesInput, optFns ...func(*ec2.Options)) (*ec2.DescribeInstancesOutput, error) { + t.Helper() + + r := types.Reservation{} + + for _, v := range instances { + i := types.Instance{ + InstanceId: &v.id, + } + + if v.ipAddr != "" { + i.PrivateIpAddress = &v.ipAddr + } + + for tk, tv := range v.tags { + tag := types.Tag{ + Key: &tk, + Value: &tv, + } + + i.Tags = append(i.Tags, tag) + + } + r.Instances = append(r.Instances, i) + } + + out := &ec2.DescribeInstancesOutput{ + Reservations: []types.Reservation{r}, + } + + return out, tt.err + }) + } + + il := &ec2InstancesLister{ + client: client(t, tt.instances), + cache: make(map[string]*instanceData), + } + + il.expand(time.Second) + + // Check for instance count + if len(il.cache) != tt.expectCount { + t.Errorf("Got %d instances, want %d", len(il.cache), tt.expectCount) + } + }) + } +} + +func TestLister(t *testing.T) { + cases := []struct { + instances []*testInstance + filter []*pb.Filter + expectErr bool + expectedCount int + }{ + { + instances: []*testInstance{ + { + id: "test-id", + ipAddr: "10.0.0.2", + tags: map[string]string{"a": "b"}, + }, + { + id: "test-id-2", + ipAddr: "10.0.0.3", + tags: map[string]string{"a": "b"}, + }, + }, + filter: []*pb.Filter{ + { + Key: proto.String("labels.a"), + Value: proto.String("b"), + }, + }, + expectedCount: 2, + }, + { + instances: []*testInstance{ + { + id: "test-id", + ipAddr: "10.0.0.2", + tags: map[string]string{"a": "b"}, + }, + { + id: "test-id-2", + ipAddr: "10.0.0.3", + tags: map[string]string{"a": "b"}, + }, + }, + filter: []*pb.Filter{ + { + Key: proto.String("ins."), + Value: proto.String("b"), + }, + }, + expectErr: true, + }, + { + instances: []*testInstance{}, + expectedCount: 0, + }, + { + instances: []*testInstance{ + { + id: "test-id", + ipAddr: "10.0.0.2", + tags: map[string]string{"test1": "a"}, + }, + { + id: "test-id-2", + ipAddr: "10.0.0.3", + tags: map[string]string{"test2": "b"}, + }, + }, + filter: []*pb.Filter{ + { + Key: proto.String("labels.a"), + Value: proto.String("b"), + }, + }, + expectedCount: 0, + }, + { + instances: []*testInstance{ + { + id: "test-id", + ipAddr: "10.0.0.2", + tags: map[string]string{"a": "b"}, + }, + { + id: "test-id-2", + ipAddr: "10.0.0.3", + tags: map[string]string{"a": "b"}, + }, + }, + filter: []*pb.Filter{ + { + Key: proto.String("name"), + Value: proto.String("nonexistent"), + }, + }, + expectedCount: 0, + }, + } + + for i, tt := range cases { + t.Run(strconv.Itoa(i), func(t *testing.T) { + + names := []string{} + cache := make(map[string]*instanceData) + for _, ti := range tt.instances { + ii := &instanceInfo{ + ID: ti.id, + IPAddr: ti.ipAddr, + Tags: ti.tags, + } + cache[ti.id] = &instanceData{ + ii: ii, + } + + names = append(names, ti.id) + } + + lister := &ec2InstancesLister{ + cache: cache, + names: names, + l: &logger.Logger{}, + } + + var filters []*pb.Filter + if tt.filter != nil { + filters = append(filters, tt.filter...) + } + + resources, err := lister.listResources(&pb.ListResourcesRequest{ + Filter: filters, + }) + + if err != nil { + if !tt.expectErr { + t.Errorf("Got error while listing resources: %v, expected no errors", err) + } + return + } + + if len(resources) != tt.expectedCount { + t.Errorf("Got wrong number of targets. Expected: %d, Got: %d", tt.expectedCount, len(resources)) + } + }) + } +} From 525c8f72c4517135123b302a198a3e78576ba350 Mon Sep 17 00:00:00 2001 From: rabunkosar-dd Date: Wed, 22 Nov 2023 13:13:38 -0800 Subject: [PATCH 04/10] add elasticache test and bug fix --- internal/rds/aws/ec2_test.go | 30 +-- internal/rds/aws/elasticache.go | 95 +++---- internal/rds/aws/elasticache_test.go | 357 +++++++++++++++++++++++++++ 3 files changed, 425 insertions(+), 57 deletions(-) create mode 100644 internal/rds/aws/elasticache_test.go diff --git a/internal/rds/aws/ec2_test.go b/internal/rds/aws/ec2_test.go index 83312694213..68771c4178e 100644 --- a/internal/rds/aws/ec2_test.go +++ b/internal/rds/aws/ec2_test.go @@ -21,20 +21,20 @@ func (m mockEC2DescribeInstances) DescribeInstances(ctx context.Context, params return m(ctx, params, optFns...) } -type testInstance struct { +type testEC2Instance struct { id string ipAddr string tags map[string]string } -func TestExpand(t *testing.T) { +func TestEC2Expand(t *testing.T) { cases := []struct { err error - instances []*testInstance + instances []*testEC2Instance expectCount int }{ { - instances: []*testInstance{ + instances: []*testEC2Instance{ { id: "test-id", ipAddr: "10.0.0.2", @@ -50,12 +50,12 @@ func TestExpand(t *testing.T) { expectCount: 2, }, { - instances: []*testInstance{}, + instances: []*testEC2Instance{}, err: nil, expectCount: 0, }, { - instances: []*testInstance{ + instances: []*testEC2Instance{ { id: "test-id", }, @@ -67,7 +67,7 @@ func TestExpand(t *testing.T) { expectCount: 0, }, { - instances: []*testInstance{}, + instances: []*testEC2Instance{}, err: fmt.Errorf("some error"), expectCount: 0, }, @@ -75,7 +75,7 @@ func TestExpand(t *testing.T) { for i, tt := range cases { t.Run(strconv.Itoa(i), func(t *testing.T) { - client := func(t *testing.T, instances []*testInstance) ec2.DescribeInstancesAPIClient { + client := func(t *testing.T, instances []*testEC2Instance) ec2.DescribeInstancesAPIClient { return mockEC2DescribeInstances(func(ctx context.Context, params *ec2.DescribeInstancesInput, optFns ...func(*ec2.Options)) (*ec2.DescribeInstancesOutput, error) { t.Helper() @@ -125,15 +125,15 @@ func TestExpand(t *testing.T) { } } -func TestLister(t *testing.T) { +func TestEC2Lister(t *testing.T) { cases := []struct { - instances []*testInstance + instances []*testEC2Instance filter []*pb.Filter expectErr bool expectedCount int }{ { - instances: []*testInstance{ + instances: []*testEC2Instance{ { id: "test-id", ipAddr: "10.0.0.2", @@ -154,7 +154,7 @@ func TestLister(t *testing.T) { expectedCount: 2, }, { - instances: []*testInstance{ + instances: []*testEC2Instance{ { id: "test-id", ipAddr: "10.0.0.2", @@ -175,11 +175,11 @@ func TestLister(t *testing.T) { expectErr: true, }, { - instances: []*testInstance{}, + instances: []*testEC2Instance{}, expectedCount: 0, }, { - instances: []*testInstance{ + instances: []*testEC2Instance{ { id: "test-id", ipAddr: "10.0.0.2", @@ -200,7 +200,7 @@ func TestLister(t *testing.T) { expectedCount: 0, }, { - instances: []*testInstance{ + instances: []*testEC2Instance{ { id: "test-id", ipAddr: "10.0.0.2", diff --git a/internal/rds/aws/elasticache.go b/internal/rds/aws/elasticache.go index 4344ac6bf12..c28b1046dfb 100644 --- a/internal/rds/aws/elasticache.go +++ b/internal/rds/aws/elasticache.go @@ -65,12 +65,17 @@ var ElastiCacheFilters = struct { // that's populated at a regular interval by making the AWS API calls. // Listing actually only returns the current contents of that cache. type elastiCacheLister struct { - c *configpb.ElastiCaches - client *elasticache.Client - l *logger.Logger - mu sync.RWMutex - names []string - cacheList map[string]*cacheData + c *configpb.ElastiCaches + clusterclient elasticache.DescribeCacheClustersAPIClient + rgclient elasticache.DescribeReplicationGroupsAPIClient + tagclient *elasticache.Client + l *logger.Logger + mu sync.RWMutex + names []string + cacheList map[string]*cacheData + // This is for unit testing, should be taken out if/when there is a respective + // interface in AWS SDK for go v2 to replace this logs + discoverTags bool } // listResources returns the list of resource records, where each record @@ -78,7 +83,7 @@ type elastiCacheLister struct { func (cl *elastiCacheLister) listResources(req *pb.ListResourcesRequest) ([]*pb.Resource, error) { var resources []*pb.Resource - allFilters, err := filter.ParseFilters(req.GetFilter(), AWSInstancesFilters.RegexFilterKeys, "") + allFilters, err := filter.ParseFilters(req.GetFilter(), ElastiCacheFilters.RegexFilterKeys, "") if err != nil { return nil, err } @@ -124,7 +129,7 @@ func (cl *elastiCacheLister) listResources(req *pb.ListResourcesRequest) ([]*pb. func (cl *elastiCacheLister) expand(reEvalInterval time.Duration) { cl.l.Infof("elasticaches.expand: expanding AWS targets") - resCC, err := cl.client.DescribeCacheClusters(context.TODO(), nil) + resCC, err := cl.clusterclient.DescribeCacheClusters(context.TODO(), nil) if err != nil { cl.l.Errorf("elasticaches.expand: error while listing cache clusters: %v", err) return @@ -148,49 +153,50 @@ func (cl *elastiCacheLister) expand(reEvalInterval time.Duration) { Tags: make(map[string]string), } - // AWS doesn't return Tag information in the response, we'll need to request it separately - // NB: This might get throttled by AWS, if we make too many requests, see if we can batch or slow down - // Add sleep if needed to the end of the loop - tagsResp, err := cl.client.ListTagsForResource(context.TODO(), &elasticache.ListTagsForResourceInput{ - ResourceName: r.ARN, - }) - if err != nil { - cl.l.Errorf("elasticaches.expand: error getting tags for cluster %s: %v", *r.CacheClusterId, err) - continue - } + if cl.discoverTags { + // AWS doesn't return Tag information in the response, we'll need to request it separately + // NB: This might get throttled by AWS, if we make too many requests, see if we can batch or slow down + // Add sleep if needed to the end of the loop + tagsResp, err := cl.tagclient.ListTagsForResource(context.TODO(), &elasticache.ListTagsForResourceInput{ + ResourceName: r.ARN, + }) + if err != nil { + cl.l.Errorf("elasticaches.expand: error getting tags for cluster %s: %v", *r.CacheClusterId, err) + continue + } - // Convert to map - for _, t := range tagsResp.TagList { - ci.Tags[*t.Key] = *t.Value + // Convert to map + for _, t := range tagsResp.TagList { + ci.Tags[*t.Key] = *t.Value + } } cacheList[*r.CacheClusterId] = &cacheData{ci, ts} ids = append(ids, *r.CacheClusterId) } - resRG, err := cl.client.DescribeReplicationGroups(context.TODO(), nil) + resRG, err := cl.rgclient.DescribeReplicationGroups(context.TODO(), nil) if err != nil { cl.l.Errorf("elasticaches.expand: error while listing replication groups: %v", err) return } for _, r := range resRG.ReplicationGroups { - tlsEnabled := *r.TransitEncryptionEnabled - var ci *cacheInfo - if r.ConfigurationEndpoint != nil { //clustered ci = &cacheInfo{ + ID: *r.ReplicationGroupId, Ip: *r.ConfigurationEndpoint.Address, Port: *r.ConfigurationEndpoint.Port, - TLSEnabled: tlsEnabled, + TLSEnabled: *r.TransitEncryptionEnabled, Clustered: true, Tags: make(map[string]string), } } else if len(r.NodeGroups) > 0 && r.NodeGroups[0].PrimaryEndpoint != nil { ci = &cacheInfo{ + ID: *r.ReplicationGroupId, Ip: *r.NodeGroups[0].PrimaryEndpoint.Address, Port: *r.NodeGroups[0].PrimaryEndpoint.Port, - TLSEnabled: tlsEnabled, + TLSEnabled: *r.TransitEncryptionEnabled, Clustered: false, Tags: make(map[string]string), } @@ -198,18 +204,20 @@ func (cl *elastiCacheLister) expand(reEvalInterval time.Duration) { continue } - // Same comments as the same calls above - tagsResp, err := cl.client.ListTagsForResource(context.TODO(), &elasticache.ListTagsForResourceInput{ - ResourceName: r.ARN, - }) - if err != nil { - cl.l.Errorf("elasticaches.expand: error getting tags for replication group %s: %v", *r.ReplicationGroupId, err) - continue - } + if cl.discoverTags { + // Same comments as the same calls above + tagsResp, err := cl.tagclient.ListTagsForResource(context.TODO(), &elasticache.ListTagsForResourceInput{ + ResourceName: r.ARN, + }) + if err != nil { + cl.l.Errorf("elasticaches.expand: error getting tags for replication group %s: %v", *r.ReplicationGroupId, err) + continue + } - // Convert to map - for _, t := range tagsResp.TagList { - ci.Tags[*t.Key] = *t.Value + // Convert to map + for _, t := range tagsResp.TagList { + ci.Tags[*t.Key] = *t.Value + } } cacheList[*r.ReplicationGroupId] = &cacheData{ci, ts} @@ -233,10 +241,13 @@ func newElastiCacheLister(c *configpb.ElastiCaches, region string, l *logger.Log client := elasticache.NewFromConfig(cfg) cl := &elastiCacheLister{ - c: c, - client: client, - cacheList: make(map[string]*cacheData), - l: l, + c: c, + clusterclient: client, + rgclient: client, + tagclient: client, + cacheList: make(map[string]*cacheData), + l: l, + discoverTags: true, } reEvalInterval := time.Duration(c.GetReEvalSec()) * time.Second diff --git a/internal/rds/aws/elasticache_test.go b/internal/rds/aws/elasticache_test.go new file mode 100644 index 00000000000..71ecc16de7a --- /dev/null +++ b/internal/rds/aws/elasticache_test.go @@ -0,0 +1,357 @@ +package aws + +import ( + "context" + "fmt" + "strconv" + "testing" + "time" + + "github.com/aws/aws-sdk-go-v2/service/elasticache" + "github.com/aws/aws-sdk-go-v2/service/elasticache/types" + pb "github.com/cloudprober/cloudprober/internal/rds/proto" + "github.com/cloudprober/cloudprober/logger" + "google.golang.org/protobuf/proto" +) + +type mockECCache struct { + ccoutput elasticache.DescribeCacheClustersOutput + rgoutput elasticache.DescribeReplicationGroupsOutput + ccerr error + rgerr error +} + +func (m mockECCache) DescribeCacheClusters(ctx context.Context, params *elasticache.DescribeCacheClustersInput, optFns ...func(*elasticache.Options)) (*elasticache.DescribeCacheClustersOutput, error) { + return &m.ccoutput, m.ccerr +} + +func (m mockECCache) DescribeReplicationGroups(ctx context.Context, params *elasticache.DescribeReplicationGroupsInput, optFns ...func(*elasticache.Options)) (*elasticache.DescribeReplicationGroupsOutput, error) { + return &m.rgoutput, m.rgerr +} + +type testECCluster struct { + instances []testECInstance + id string +} + +type testECInstance struct { + id string + ipAddr string + port int32 + tags map[string]string + engine string +} + +func TestECExpand(t *testing.T) { + cases := []struct { + rgerr error + rg *testECCluster + ccerr error + cc *testECCluster + expectCount int + }{ + { + rg: &testECCluster{ + id: "test-cluster-id", + instances: []testECInstance{ + { + id: "test-id", + ipAddr: "10.0.0.2", + port: 1000, + tags: map[string]string{"a": "b"}, + }, + { + id: "test-id-2", + ipAddr: "10.0.0.3", + port: 1000, + tags: map[string]string{"a": "b"}, + }, + }, + }, + rgerr: nil, + cc: &testECCluster{}, + ccerr: nil, + expectCount: 1, + }, + { + rg: &testECCluster{}, + rgerr: nil, + expectCount: 0, + }, + { + rg: &testECCluster{}, + rgerr: fmt.Errorf("some error"), + expectCount: 0, + }, + { + cc: &testECCluster{ + id: "test-cluster-id", + + instances: []testECInstance{ + { + id: "test-id", + }, + { + id: "test-id-2", + }, + }, + }, + rgerr: nil, + ccerr: nil, + expectCount: 1, + }, + } + + for i, tt := range cases { + t.Run(strconv.Itoa(i), func(t *testing.T) { + mock := mockECCache{ + rgerr: tt.rgerr, + ccerr: tt.ccerr, + } + + tlsenabled := false + engine := "redis" + + if tt.cc != nil { + mock.ccoutput = elasticache.DescribeCacheClustersOutput{ + CacheClusters: []types.CacheCluster{ + { + CacheClusterId: &tt.cc.id, + TransitEncryptionEnabled: &tlsenabled, + Engine: &engine, + }, + }, + } + for _, v := range tt.cc.instances { + c := types.CacheNode{ + Endpoint: &types.Endpoint{ + Address: &v.ipAddr, + Port: &v.port, + }, + } + mock.ccoutput.CacheClusters[0].CacheNodes = append(mock.ccoutput.CacheClusters[0].CacheNodes, c) + } + } + + if tt.rg != nil { + mock.rgoutput = elasticache.DescribeReplicationGroupsOutput{} + for _, v := range tt.rg.instances { + g := types.ReplicationGroup{ + ReplicationGroupId: &v.id, + TransitEncryptionEnabled: &tlsenabled, + ConfigurationEndpoint: &types.Endpoint{ + Address: &v.ipAddr, + Port: &v.port, + }, + } + mock.rgoutput.ReplicationGroups = append(mock.rgoutput.ReplicationGroups, g) + } + } + + il := &elastiCacheLister{ + clusterclient: mock, + rgclient: mock, + tagclient: &elasticache.Client{}, + cacheList: make(map[string]*cacheData), + discoverTags: false, // tag discovery to be tested once the client can be mocked + } + il.expand(time.Second) + + // Check for instance count + if len(il.cacheList) != tt.expectCount { + t.Errorf("Got %d instances, want %d", len(il.cacheList), tt.expectCount) + } + }) + } +} + +func TestECLister(t *testing.T) { + cases := []struct { + instances []*testECInstance + filter []*pb.Filter + expectErr bool + expectedCount int + }{ + { + instances: []*testECInstance{ + { + id: "test-id", + ipAddr: "10.0.0.2", + tags: map[string]string{"a": "b"}, + }, + { + id: "test-id-2", + ipAddr: "10.0.0.3", + tags: map[string]string{"a": "b"}, + }, + }, + filter: []*pb.Filter{ + { + Key: proto.String("labels.a"), + Value: proto.String("b"), + }, + }, + expectedCount: 2, + }, + { + instances: []*testECInstance{ + { + id: "test-id", + ipAddr: "10.0.0.2", + tags: map[string]string{"a": "b"}, + }, + { + id: "test-id-2", + ipAddr: "10.0.0.3", + tags: map[string]string{"a": "b"}, + }, + }, + filter: []*pb.Filter{ + { + Key: proto.String("ins."), + Value: proto.String("b"), + }, + }, + expectErr: true, + }, + { + instances: []*testECInstance{}, + expectedCount: 0, + }, + { + instances: []*testECInstance{ + { + id: "test-id", + ipAddr: "10.0.0.2", + tags: map[string]string{"test1": "a"}, + }, + { + id: "test-id-2", + ipAddr: "10.0.0.3", + tags: map[string]string{"test2": "b"}, + }, + }, + filter: []*pb.Filter{ + { + Key: proto.String("labels.a"), + Value: proto.String("b"), + }, + }, + expectedCount: 0, + }, + { + instances: []*testECInstance{ + { + id: "test-id", + ipAddr: "10.0.0.2", + tags: map[string]string{"a": "b"}, + }, + { + id: "test-id-2", + ipAddr: "10.0.0.3", + tags: map[string]string{"a": "b"}, + }, + }, + filter: []*pb.Filter{ + { + Key: proto.String("name"), + Value: proto.String("nonexistent"), + }, + }, + expectedCount: 0, + }, + { + instances: []*testECInstance{ + { + id: "test-id", + ipAddr: "10.0.0.2", + tags: map[string]string{"a": "b"}, + engine: "memcached", + }, + { + id: "test-id-2", + ipAddr: "10.0.0.3", + tags: map[string]string{"a": "b"}, + engine: "memcached", + }, + }, + filter: []*pb.Filter{ + { + Key: proto.String("engine"), + Value: proto.String("redis"), + }, + }, + expectedCount: 0, + }, + { + instances: []*testECInstance{ + { + id: "test-id", + ipAddr: "10.0.0.2", + tags: map[string]string{"a": "b"}, + engine: "redis", + }, + { + id: "test-id-2", + ipAddr: "10.0.0.3", + tags: map[string]string{"a": "b"}, + engine: "redis", + }, + }, + filter: []*pb.Filter{ + { + Key: proto.String("engine"), + Value: proto.String("redis"), + }, + }, + expectedCount: 2, + }, + } + + for i, tt := range cases { + t.Run(strconv.Itoa(i), func(t *testing.T) { + + names := []string{} + cache := make(map[string]*cacheData) + for _, ti := range tt.instances { + ci := &cacheInfo{ + ID: ti.id, + Ip: ti.ipAddr, + Tags: ti.tags, + Engine: ti.engine, + } + cache[ti.id] = &cacheData{ + ci: ci, + } + + names = append(names, ti.id) + } + + lister := &elastiCacheLister{ + cacheList: cache, + names: names, + l: &logger.Logger{}, + } + + var filters []*pb.Filter + if tt.filter != nil { + filters = append(filters, tt.filter...) + } + + resources, err := lister.listResources(&pb.ListResourcesRequest{ + Filter: filters, + }) + + if err != nil { + if !tt.expectErr { + t.Errorf("Got error while listing resources: %v, expected no errors", err) + } + return + } + + if len(resources) != tt.expectedCount { + t.Errorf("Got wrong number of targets. Expected: %d, Got: %d", tt.expectedCount, len(resources)) + } + }) + } +} From 2e116af30e4bbaf6bc5cea84cff2d17890010ae2 Mon Sep 17 00:00:00 2001 From: rabunkosar-dd Date: Wed, 3 Jan 2024 13:29:30 -0800 Subject: [PATCH 05/10] split rds and ec API calls --- client-rds.cfg | 20 + client.cfg | 20 + cmd/cloudprober.go | 2 +- internal/rds/aws/aws.go | 61 ++- internal/rds/aws/aws_test.go | 91 ++-- internal/rds/aws/ec2.go | 2 +- internal/rds/aws/ec_clusters.go | 216 ++++++++++ internal/rds/aws/ec_clusters_test.go | 298 +++++++++++++ ...elasticache.go => ec_replicationgroups.go} | 146 +++---- ...e_test.go => ec_replicationgroups_test.go} | 135 ++---- internal/rds/aws/proto/config.pb.go | 402 +++++++++++++----- internal/rds/aws/proto/config.proto | 49 ++- internal/rds/aws/proto/config_proto_gen.cue | 45 +- internal/rds/aws/rds_clusters.go | 172 ++++++++ internal/rds/aws/rds_clusters_test.go | 276 ++++++++++++ internal/rds/aws/{rds.go => rds_instances.go} | 102 ++--- internal/rds/aws/rds_instances_test.go | 285 +++++++++++++ internal/rds/client/client.go | 15 +- server.cfg | 13 + targets/endpoint/endpoint.go | 3 +- 20 files changed, 1939 insertions(+), 414 deletions(-) create mode 100644 client-rds.cfg create mode 100644 client.cfg create mode 100644 internal/rds/aws/ec_clusters.go create mode 100644 internal/rds/aws/ec_clusters_test.go rename internal/rds/aws/{elasticache.go => ec_replicationgroups.go} (52%) rename internal/rds/aws/{elasticache_test.go => ec_replicationgroups_test.go} (61%) create mode 100644 internal/rds/aws/rds_clusters.go create mode 100644 internal/rds/aws/rds_clusters_test.go rename internal/rds/aws/{rds.go => rds_instances.go} (59%) create mode 100644 internal/rds/aws/rds_instances_test.go create mode 100644 server.cfg diff --git a/client-rds.cfg b/client-rds.cfg new file mode 100644 index 00000000000..bc839c33f67 --- /dev/null +++ b/client-rds.cfg @@ -0,0 +1,20 @@ +probe { + name:"obs-rds" + type: PING + + targets{ + rds_targets { + rds_server_options { + server_address: "localhost:9314" + } + + resource_path: "aws://rds" + filter { + key: "labels.component" + value: "observability" + } + } + } +} + +port: 9400 diff --git a/client.cfg b/client.cfg new file mode 100644 index 00000000000..0dc0ebcb332 --- /dev/null +++ b/client.cfg @@ -0,0 +1,20 @@ +probe { + name:"obs-ec2" + type: PING + + targets{ + rds_targets { + rds_server_options { + server_address: "localhost:9314" + } + + resource_path: "aws://ec2_instances" + filter { + key: "labels.component" + value: "observability" + } + } + } +} + +port: 9400 diff --git a/cmd/cloudprober.go b/cmd/cloudprober.go index ffbd3e58ada..3a81179d17e 100644 --- a/cmd/cloudprober.go +++ b/cmd/cloudprober.go @@ -40,7 +40,7 @@ import ( ) var ( - versionFlag = flag.Bool(" version", false, "Print version and exit") + versionFlag = flag.Bool("version", false, "Print version and exit") buildInfoFlag = flag.Bool("buildinfo", false, "Print build info and exit") stopTime = flag.Duration("stop_time", 0, "How long to wait for cleanup before process exits on SIGINT and SIGTERM") cpuprofile = flag.String("cpuprof", "", "Write cpu profile to file") diff --git a/internal/rds/aws/aws.go b/internal/rds/aws/aws.go index 84d70855812..982ae41e0b0 100644 --- a/internal/rds/aws/aws.go +++ b/internal/rds/aws/aws.go @@ -45,11 +45,13 @@ const DefaultProviderID = "aws" // ResourceTypes declares resource types supported by the AWS provider. var ResourceTypes = struct { - EC2Instances, ElastiCaches, RDS string + EC2Instances, ElastiCacheClusters, ElastiCacheReplicationGroups, RDSClusters, RDSInstances string }{ "ec2_instances", - "elasticaches", - "rds", + "elasticache_clusters", + "elasticache_replicationgroups", + "rds_clusters", + "rds_instances", } type lister interface { @@ -84,22 +86,40 @@ func initAWSProject(c *configpb.ProviderConfig, l *logger.Logger) (map[string]li resourceLister[ResourceTypes.EC2Instances] = lr } - // Enable ElastiCaches lister if configured. - if c.GetElasticaches() != nil { - lr, err := newElastiCacheLister(c.GetElasticaches(), c.GetRegion(), l) + // Enable GetElasticacheClusters lister if configured. + if c.GetElasticacheClusters() != nil { + lr, err := newElastiCacheClusterLister(c.GetElasticacheClusters(), c.GetRegion(), l) if err != nil { return nil, err } - resourceLister[ResourceTypes.ElastiCaches] = lr + resourceLister[ResourceTypes.ElastiCacheClusters] = lr } - // Enable RDS (AWS) if configured. - if c.GetRds() != nil { - lr, err := newRdsLister(c.GetRds(), c.GetRegion(), l) + // Enable GetElasticacheClusters lister if configured. + if c.GetElasticacheReplicationgroups() != nil { + lr, err := newElastiCacheRGLister(c.GetElasticacheReplicationgroups(), c.GetRegion(), l) if err != nil { return nil, err } - resourceLister[ResourceTypes.RDS] = lr + resourceLister[ResourceTypes.ElastiCacheReplicationGroups] = lr + } + + // Enable RDSInstances (AWS) lister if configured. + if c.GetRdsInstances() != nil { + lr, err := newRdsInstancesLister(c.GetRdsInstances(), c.GetRegion(), l) + if err != nil { + return nil, err + } + resourceLister[ResourceTypes.RDSInstances] = lr + } + + // Enable RDSClusters (AWS) lister if configured. + if c.GetRdsClusters() != nil { + lr, err := newRdsClustersLister(c.GetRdsClusters(), c.GetRegion(), l) + if err != nil { + return nil, err + } + resourceLister[ResourceTypes.RDSClusters] = lr } return resourceLister, nil @@ -129,16 +149,27 @@ func DefaultProviderConfig(resTypes map[string]string, reEvalSec int) *servercon ReEvalSec: proto.Int32(int32(reEvalSec)), } - case ResourceTypes.ElastiCaches: - c.Elasticaches = &configpb.ElastiCaches{ + case ResourceTypes.ElastiCacheClusters: + c.ElasticacheClusters = &configpb.ElastiCacheClusters{ + ReEvalSec: proto.Int32(int32(reEvalSec)), + } + + case ResourceTypes.ElastiCacheReplicationGroups: + c.ElasticacheReplicationgroups = &configpb.ElastiCacheReplicationGroups{ ReEvalSec: proto.Int32(int32(reEvalSec)), } - case ResourceTypes.RDS: - c.Rds = &configpb.RDS{ + case ResourceTypes.RDSInstances: + c.RdsInstances = &configpb.RDSInstances{ + ReEvalSec: proto.Int32(int32(reEvalSec)), + } + + case ResourceTypes.RDSClusters: + c.RdsClusters = &configpb.RDSClusters{ ReEvalSec: proto.Int32(int32(reEvalSec)), } } + } return &serverconfigpb.Provider{ diff --git a/internal/rds/aws/aws_test.go b/internal/rds/aws/aws_test.go index b6647b38e9f..2958c09b131 100644 --- a/internal/rds/aws/aws_test.go +++ b/internal/rds/aws/aws_test.go @@ -20,7 +20,7 @@ import ( serverconfigpb "github.com/cloudprober/cloudprober/internal/rds/server/proto" ) -func testAWSConfig(t *testing.T, pc *serverconfigpb.Provider, awsInstances bool, rdsConfig, elasticCachesConfig string, reEvalSec int) { +func testAWSConfig(t *testing.T, pc *serverconfigpb.Provider, awsInstances bool, rdsInstancesConfig, rdsClustersConfig, elasticCacheClustersConfig, elasticCacheRGConfig string, reEvalSec int) { t.Helper() if pc.GetId() != DefaultProviderID { @@ -41,33 +41,62 @@ func testAWSConfig(t *testing.T, pc *serverconfigpb.Provider, awsInstances bool, } } - // Verify that RDS config is set correctly. - if rdsConfig == "" { - if c.GetRds() != nil { - t.Errorf("c.GetRds()=%v, wanted=nil", c.GetRds()) + // Verify that ElastiCacheClusters is set correctly. + if elasticCacheClustersConfig == "" { + if c.GetElasticacheClusters() != nil { + t.Errorf("c.GetElasticacheClusters()=%v, wanted=nil", c.GetElasticacheClusters()) } } else { - if c.GetRds() == nil { - t.Fatalf("c.GetRds()=nil, wanted=not-nil") + if c.GetElasticacheClusters() == nil { + t.Fatalf("c.GetElasticaches()=nil, wanted=not-nil") } - if c.GetRds().GetReEvalSec() != int32(reEvalSec) { - t.Errorf("RDS config reEvalSec=%d, wanted=%d", c.GetRds().GetReEvalSec(), reEvalSec) + if c.GetElasticacheClusters().GetReEvalSec() != int32(reEvalSec) { + t.Errorf("Elasticacheclusters config reEvalSec=%d, wanted=%d", c.GetElasticacheClusters().GetReEvalSec(), reEvalSec) } } - // Verify that Elasticache is set correctly. - if elasticCachesConfig == "" { - if c.GetElasticaches() != nil { - t.Errorf("c.GetElasticaches()=%v, wanted=nil", c.GetElasticaches()) + // Verify that ElastiCacheReplicationGroups is set correctly. + if elasticCacheRGConfig == "" { + if c.GetElasticacheReplicationgroups() != nil { + t.Errorf("c.GetElasticacheReplicationgroups()=%v, wanted=nil", c.GetElasticacheReplicationgroups()) } } else { - if c.GetElasticaches() == nil { - t.Fatalf("c.GetElasticaches()=nil, wanted=not-nil") + if c.GetElasticacheReplicationgroups() == nil { + t.Fatalf("c.GetElasticacheReplicationgroups()=nil, wanted=not-nil") + } + if c.GetElasticacheReplicationgroups().GetReEvalSec() != int32(reEvalSec) { + t.Errorf("Elasticachereplicationgroups config reEvalSec=%d, wanted=%d", c.GetElasticacheReplicationgroups().GetReEvalSec(), reEvalSec) + } + } + + // Verify that RDS Clusters config is set correctly. + if rdsClustersConfig == "" { + if c.GetRdsClusters() != nil { + t.Errorf("c.GetRdsClusters()=%v, wanted=nil", c.GetRdsClusters()) + } + } else { + if c.GetRdsClusters() == nil { + t.Fatalf("c.GetRdsClusters()=nil, wanted=not-nil") } - if c.GetElasticaches().GetReEvalSec() != int32(reEvalSec) { - t.Errorf("Elasticaches config reEvalSec=%d, wanted=%d", c.GetElasticaches().GetReEvalSec(), reEvalSec) + if c.GetRdsClusters().GetReEvalSec() != int32(reEvalSec) { + t.Errorf("RDSClusters config reEvalSec=%d, wanted=%d", c.GetRdsClusters().GetReEvalSec(), reEvalSec) } } + + // Verify that RDS Instances config is set correctly. + if rdsInstancesConfig == "" { + if c.GetRdsInstances() != nil { + t.Errorf("c.GetRdsInstances()=%v, wanted=nil", c.GetRdsInstances()) + } + } else { + if c.GetRdsInstances() == nil { + t.Fatalf("c.GetRdsInstances()=nil, wanted=not-nil") + } + if c.GetRdsInstances().GetReEvalSec() != int32(reEvalSec) { + t.Errorf("RDSInstances config reEvalSec=%d, wanted=%d", c.GetRdsInstances().GetReEvalSec(), reEvalSec) + } + } + } func TestDefaultProviderConfig(t *testing.T) { @@ -76,25 +105,29 @@ func TestDefaultProviderConfig(t *testing.T) { } c := DefaultProviderConfig(resTypes, 10) - testAWSConfig(t, c, true, "", "", 10) + testAWSConfig(t, c, true, "", "", "", "", 10) - // Elasticache and RDS - testElastiCacheConfig := "elasticaches" - testRDSConfig := "rds" + // Elasticache cluster, replication groups and RDS + testElastiCacheClustersConfig := "elasticache_clusters" + testElastiCacheReplicationGroupsConfig := "elasticache_replicationgroups" + testRDSInstancesConfig := "rds_instances" + testRDSClustersConfig := "rds_clusters" resTypes = map[string]string{ - ResourceTypes.ElastiCaches: testElastiCacheConfig, - ResourceTypes.RDS: testRDSConfig, + ResourceTypes.ElastiCacheClusters: testElastiCacheClustersConfig, + ResourceTypes.ElastiCacheReplicationGroups: testElastiCacheReplicationGroupsConfig, + ResourceTypes.RDSClusters: testRDSClustersConfig, + ResourceTypes.RDSInstances: testRDSInstancesConfig, } c = DefaultProviderConfig(resTypes, 10) - testAWSConfig(t, c, false, testRDSConfig, testElastiCacheConfig, 10) + testAWSConfig(t, c, false, testRDSInstancesConfig, testRDSClustersConfig, testElastiCacheClustersConfig, testElastiCacheReplicationGroupsConfig, 10) - // EC2 instances, RTC and pub-sub + // EC2 and RDS instances resTypes = map[string]string{ - ResourceTypes.EC2Instances: "", - ResourceTypes.ElastiCaches: testElastiCacheConfig, - ResourceTypes.RDS: testRDSConfig, + ResourceTypes.EC2Instances: "", + ResourceTypes.ElastiCacheReplicationGroups: testElastiCacheReplicationGroupsConfig, + ResourceTypes.RDSInstances: testRDSInstancesConfig, } c = DefaultProviderConfig(resTypes, 10) - testAWSConfig(t, c, true, testRDSConfig, testElastiCacheConfig, 10) + testAWSConfig(t, c, true, testRDSInstancesConfig, "", "", testElastiCacheReplicationGroupsConfig, 10) } diff --git a/internal/rds/aws/ec2.go b/internal/rds/aws/ec2.go index 304ee506284..aa1b01833de 100644 --- a/internal/rds/aws/ec2.go +++ b/internal/rds/aws/ec2.go @@ -169,7 +169,7 @@ func (il *ec2InstancesLister) expand(reEvalInterval time.Duration) { func newEC2InstancesLister(c *configpb.EC2Instances, region string, l *logger.Logger) (*ec2InstancesLister, error) { cfg, err := config.LoadDefaultConfig(context.TODO(), config.WithRegion(region)) if err != nil { - return nil, fmt.Errorf("AWS configuration error : %v", err) + return nil, fmt.Errorf("AWS configuration error: %v", err) } client := ec2.NewFromConfig(cfg) diff --git a/internal/rds/aws/ec_clusters.go b/internal/rds/aws/ec_clusters.go new file mode 100644 index 00000000000..c06bac126aa --- /dev/null +++ b/internal/rds/aws/ec_clusters.go @@ -0,0 +1,216 @@ +package aws + +import ( + "context" + "fmt" + "math/rand" + "sync" + "time" + + configpb "github.com/cloudprober/cloudprober/internal/rds/aws/proto" + pb "github.com/cloudprober/cloudprober/internal/rds/proto" + "github.com/cloudprober/cloudprober/internal/rds/server/filter" + "github.com/cloudprober/cloudprober/logger" + + "github.com/aws/aws-sdk-go-v2/config" + "github.com/aws/aws-sdk-go-v2/service/elasticache" + "google.golang.org/protobuf/proto" +) + +// ecClusterInfo represents cache cluster items that we fetch from the elasticache API. +type ecClusterInfo struct { + ID string + IP string + Port int32 + TLSEnabled bool + Engine string + Tags map[string]string +} + +// ecClusterLocalCacheData represents objects that we store in local cache. +type ecClusterLocalCacheData struct { + ci *ecClusterInfo + lastUpdated int64 +} + +/* +ElastiCacheClustersFilters defines filters supported by the ec_cluster_instances resource +type. + + Example: + filter { + key: "name" + value: "service.*" + } + filter { + key: "engine" + value: "redis" + } + filter { + key: "labels.app" + value: "service-a" + } +*/ + +var ElastiCacheClustersFilters = struct { + RegexFilterKeys []string + LabelsFilter bool +}{ + []string{"name", "engine"}, + true, +} + +// elastiCacheClusterLister is a AWS ElastiCache cluster lister. It implements a cache, +// that's populated at a regular interval by making the AWS API calls. +// Listing actually only returns the current contents of that cache. +type elastiCacheClusterLister struct { + c *configpb.ElastiCacheClusters + client elasticache.DescribeCacheClustersAPIClient + tagclient *elasticache.Client + l *logger.Logger + mu sync.RWMutex + names []string + cacheList map[string]*ecClusterLocalCacheData + // This is mainly for unit testing, should be taken out if/when there is a respective + // interface in AWS SDK for go v2 to replace this. + discoverTags bool +} + +// listResources returns the list of resource records, where each record +// consists of an cluster name and the endpoint associated with it. +func (cl *elastiCacheClusterLister) listResources(req *pb.ListResourcesRequest) ([]*pb.Resource, error) { + var resources []*pb.Resource + + allFilters, err := filter.ParseFilters(req.GetFilter(), ElastiCacheClustersFilters.RegexFilterKeys, "") + if err != nil { + return nil, err + } + + nameFilter, engineFilter, labelsFilter := allFilters.RegexFilters["name"], allFilters.RegexFilters["engine"], allFilters.LabelsFilter + + cl.mu.RLock() + defer cl.mu.RUnlock() + + for _, name := range cl.names { + ins := cl.cacheList[name].ci + if ins == nil { + cl.l.Errorf("elacticaches: cached info missing for %s", name) + continue + } + + if nameFilter != nil && !nameFilter.Match(name, cl.l) { + continue + } + if labelsFilter != nil && !labelsFilter.Match(ins.Tags, cl.l) { + continue + } + + if engineFilter != nil && !engineFilter.Match(ins.Engine, cl.l) { + continue + } + + resources = append(resources, &pb.Resource{ + Id: proto.String(ins.ID), + Name: proto.String(name), + Ip: proto.String(ins.IP), + Port: proto.Int32(ins.Port), + Labels: ins.Tags, + LastUpdated: proto.Int64(cl.cacheList[name].lastUpdated), + Info: []byte("clustered"), + }) + } + + cl.l.Infof("ec_clusters.listResources: returning %d instances", len(resources)) + return resources, nil +} + +// expand runs equivalent API calls as "aws elasticache describe-cache-clusters", +// and is used to populate the cache. More details about this call is available in +// https://docs.aws.amazon.com/cli/latest/reference/elasticache/describe-cache-clusters.html +func (cl *elastiCacheClusterLister) expand(reEvalInterval time.Duration) { + cl.l.Infof("ec_clusters.expand: expanding AWS targets") + + resp, err := cl.client.DescribeCacheClusters(context.TODO(), nil) + if err != nil { + cl.l.Errorf("ec_clusters.expand: error while listing cache clusters: %v", err) + return + } + + var ids = make([]string, 0) + var cacheList = make(map[string]*ecClusterLocalCacheData) + ts := time.Now().Unix() + for _, c := range resp.CacheClusters { + if len(c.CacheNodes) == 0 { + continue + } + ci := &ecClusterInfo{ + ID: *c.CacheClusterId, + TLSEnabled: *c.TransitEncryptionEnabled, + IP: *c.CacheNodes[0].Endpoint.Address, + Port: *c.CacheNodes[0].Endpoint.Port, + Engine: *c.Engine, + Tags: make(map[string]string), + } + + if cl.discoverTags { + // AWS doesn't return Tag information in the response, we'll need to request it separately + // NB: This might get throttled by AWS, if we make too many requests, see if we can batch or slow down + // Add sleep if needed to the end of the loop + tagsResp, err := cl.tagclient.ListTagsForResource(context.TODO(), &elasticache.ListTagsForResourceInput{ + ResourceName: c.ARN, + }) + if err != nil { + cl.l.Errorf("ec_clusters.expand: error getting tags for cluster %s: %v", *c.CacheClusterId, err) + continue + } + + // Convert to map + for _, t := range tagsResp.TagList { + ci.Tags[*t.Key] = *t.Value + } + } + + cacheList[*c.CacheClusterId] = &ecClusterLocalCacheData{ci, ts} + ids = append(ids, *c.CacheClusterId) + } + + cl.mu.Lock() + cl.names = ids + cl.cacheList = cacheList + cl.mu.Unlock() + + cl.l.Infof("ec_clusters.expand: got %d caches", len(ids)) +} + +func newElastiCacheClusterLister(c *configpb.ElastiCacheClusters, region string, l *logger.Logger) (*elastiCacheClusterLister, error) { + cfg, err := config.LoadDefaultConfig(context.TODO(), config.WithRegion(region)) + if err != nil { + return nil, fmt.Errorf("AWS configuration error: %v", err) + } + + client := elasticache.NewFromConfig(cfg) + + cl := &elastiCacheClusterLister{ + c: c, + client: client, + tagclient: client, + cacheList: make(map[string]*ecClusterLocalCacheData), + l: l, + discoverTags: true, + } + + reEvalInterval := time.Duration(c.GetReEvalSec()) * time.Second + go func() { + cl.expand(0) + // Introduce a random delay between 0-reEvalInterval before + // starting the refresh loop. If there are multiple cloudprober + // awsInstances, this will make sure that each instance calls AWS + // API at a different point of time. + randomDelaySec := rand.Intn(int(reEvalInterval.Seconds())) + time.Sleep(time.Duration(randomDelaySec) * time.Second) + for range time.Tick(reEvalInterval) { + cl.expand(reEvalInterval) + } + }() + return cl, nil +} diff --git a/internal/rds/aws/ec_clusters_test.go b/internal/rds/aws/ec_clusters_test.go new file mode 100644 index 00000000000..e31411855b8 --- /dev/null +++ b/internal/rds/aws/ec_clusters_test.go @@ -0,0 +1,298 @@ +package aws + +import ( + "context" + "strconv" + "testing" + "time" + + "github.com/aws/aws-sdk-go-v2/service/elasticache" + "github.com/aws/aws-sdk-go-v2/service/elasticache/types" + pb "github.com/cloudprober/cloudprober/internal/rds/proto" + "github.com/cloudprober/cloudprober/logger" + "google.golang.org/protobuf/proto" +) + +type mockECClusterCache struct { + output elasticache.DescribeCacheClustersOutput + err error +} + +func (m mockECClusterCache) DescribeCacheClusters(ctx context.Context, params *elasticache.DescribeCacheClustersInput, optFns ...func(*elasticache.Options)) (*elasticache.DescribeCacheClustersOutput, error) { + return &m.output, m.err +} + +type testECCluster struct { + instances []testECClusterInstance + id string +} + +type testECClusterInstance struct { + id string + ipAddr string + port int32 + tags map[string]string + engine string +} + +func TestECClusterExpand(t *testing.T) { + cases := []struct { + err error + cluster *testECCluster + expectCount int + }{ + + { + cluster: &testECCluster{ + id: "test-cluster-id", + + instances: []testECClusterInstance{ + { + id: "test-id", + }, + { + id: "test-id-2", + }, + }, + }, + err: nil, + expectCount: 1, + }, + } + + for i, tt := range cases { + t.Run(strconv.Itoa(i), func(t *testing.T) { + mock := mockECClusterCache{ + err: tt.err, + } + + tlsenabled := false + engine := "redis" + + if tt.cluster != nil { + mock.output = elasticache.DescribeCacheClustersOutput{ + CacheClusters: []types.CacheCluster{ + { + CacheClusterId: &tt.cluster.id, + TransitEncryptionEnabled: &tlsenabled, + Engine: &engine, + }, + }, + } + for _, v := range tt.cluster.instances { + c := types.CacheNode{ + Endpoint: &types.Endpoint{ + Address: &v.ipAddr, + Port: &v.port, + }, + } + mock.output.CacheClusters[0].CacheNodes = append(mock.output.CacheClusters[0].CacheNodes, c) + } + } + + il := &elastiCacheClusterLister{ + client: mock, + tagclient: &elasticache.Client{}, + cacheList: make(map[string]*ecClusterLocalCacheData), + discoverTags: false, // tag discovery to be tested once the client can be mocked + } + il.expand(time.Second) + + // Check for instance count + if len(il.cacheList) != tt.expectCount { + t.Errorf("Got %d instances, want %d", len(il.cacheList), tt.expectCount) + } + }) + } +} + +func TestECClusterLister(t *testing.T) { + cases := []struct { + instances []*testECClusterInstance + filter []*pb.Filter + expectErr bool + expectedCount int + }{ + { + instances: []*testECClusterInstance{ + { + id: "test-id", + ipAddr: "10.0.0.2", + tags: map[string]string{"a": "b"}, + }, + { + id: "test-id-2", + ipAddr: "10.0.0.3", + tags: map[string]string{"a": "b"}, + }, + }, + filter: []*pb.Filter{ + { + Key: proto.String("labels.a"), + Value: proto.String("b"), + }, + }, + expectedCount: 2, + }, + { + instances: []*testECClusterInstance{ + { + id: "test-id", + ipAddr: "10.0.0.2", + tags: map[string]string{"a": "b"}, + }, + { + id: "test-id-2", + ipAddr: "10.0.0.3", + tags: map[string]string{"a": "b"}, + }, + }, + filter: []*pb.Filter{ + { + Key: proto.String("ins."), + Value: proto.String("b"), + }, + }, + expectErr: true, + }, + { + instances: []*testECClusterInstance{}, + expectedCount: 0, + }, + { + instances: []*testECClusterInstance{ + { + id: "test-id", + ipAddr: "10.0.0.2", + tags: map[string]string{"test1": "a"}, + }, + { + id: "test-id-2", + ipAddr: "10.0.0.3", + tags: map[string]string{"test2": "b"}, + }, + }, + filter: []*pb.Filter{ + { + Key: proto.String("labels.a"), + Value: proto.String("b"), + }, + }, + expectedCount: 0, + }, + { + instances: []*testECClusterInstance{ + { + id: "test-id", + ipAddr: "10.0.0.2", + tags: map[string]string{"a": "b"}, + }, + { + id: "test-id-2", + ipAddr: "10.0.0.3", + tags: map[string]string{"a": "b"}, + }, + }, + filter: []*pb.Filter{ + { + Key: proto.String("name"), + Value: proto.String("nonexistent"), + }, + }, + expectedCount: 0, + }, + { + instances: []*testECClusterInstance{ + { + id: "test-id", + ipAddr: "10.0.0.2", + tags: map[string]string{"a": "b"}, + engine: "memcached", + }, + { + id: "test-id-2", + ipAddr: "10.0.0.3", + tags: map[string]string{"a": "b"}, + engine: "memcached", + }, + }, + filter: []*pb.Filter{ + { + Key: proto.String("engine"), + Value: proto.String("redis"), + }, + }, + expectedCount: 0, + }, + { + instances: []*testECClusterInstance{ + { + id: "test-id", + ipAddr: "10.0.0.2", + tags: map[string]string{"a": "b"}, + engine: "redis", + }, + { + id: "test-id-2", + ipAddr: "10.0.0.3", + tags: map[string]string{"a": "b"}, + engine: "redis", + }, + }, + filter: []*pb.Filter{ + { + Key: proto.String("engine"), + Value: proto.String("redis"), + }, + }, + expectedCount: 2, + }, + } + + for i, tt := range cases { + t.Run(strconv.Itoa(i), func(t *testing.T) { + + names := []string{} + cache := make(map[string]*ecClusterLocalCacheData) + for _, ti := range tt.instances { + ci := &ecClusterInfo{ + ID: ti.id, + IP: ti.ipAddr, + Tags: ti.tags, + Engine: ti.engine, + } + cache[ti.id] = &ecClusterLocalCacheData{ + ci: ci, + } + + names = append(names, ti.id) + } + + lister := &elastiCacheClusterLister{ + cacheList: cache, + names: names, + l: &logger.Logger{}, + } + + var filters []*pb.Filter + if tt.filter != nil { + filters = append(filters, tt.filter...) + } + + resources, err := lister.listResources(&pb.ListResourcesRequest{ + Filter: filters, + }) + + if err != nil { + if !tt.expectErr { + t.Errorf("Got error while listing resources: %v, expected no errors", err) + } + return + } + + if len(resources) != tt.expectedCount { + t.Errorf("Got wrong number of targets. Expected: %d, Got: %d", tt.expectedCount, len(resources)) + } + }) + } +} diff --git a/internal/rds/aws/elasticache.go b/internal/rds/aws/ec_replicationgroups.go similarity index 52% rename from internal/rds/aws/elasticache.go rename to internal/rds/aws/ec_replicationgroups.go index c28b1046dfb..f479bfa0f45 100644 --- a/internal/rds/aws/elasticache.go +++ b/internal/rds/aws/ec_replicationgroups.go @@ -17,25 +17,25 @@ import ( "google.golang.org/protobuf/proto" ) -// cacheInfo represents instance items that we fetch from the elasticache API. -type cacheInfo struct { +// elastiCacheClusterLister represents replication group items that we fetch from the elasticache API. +type ecReplicationGroupInfo struct { ID string - Ip string + IP string Port int32 - Clustered bool TLSEnabled bool + Clustered bool Engine string Tags map[string]string } -// cacheData represents objects that we store in cache. -type cacheData struct { - ci *cacheInfo +// ecReplicationGroupCacheData represents objects that we store in the local cache. +type ecReplicationGroupCacheData struct { + ci *ecReplicationGroupInfo lastUpdated int64 } /* -AWSInstancesFilters defines filters supported by the ec2_instances resource +ElastiCacheRGFilters defines filters supported by the ec_replicationgroups_instances resource type. Example: @@ -53,7 +53,7 @@ type. } */ -var ElastiCacheFilters = struct { +var ElastiCacheRGFilters = struct { RegexFilterKeys []string LabelsFilter bool }{ @@ -61,18 +61,17 @@ var ElastiCacheFilters = struct { true, } -// elastiCacheLister is a AWS ElastiCache cluster lister. It implements a cache, +// elastiCacheRGLister is a AWS ElastiCache replication group lister. It implements a cache, // that's populated at a regular interval by making the AWS API calls. // Listing actually only returns the current contents of that cache. -type elastiCacheLister struct { - c *configpb.ElastiCaches - clusterclient elasticache.DescribeCacheClustersAPIClient - rgclient elasticache.DescribeReplicationGroupsAPIClient - tagclient *elasticache.Client - l *logger.Logger - mu sync.RWMutex - names []string - cacheList map[string]*cacheData +type elastiCacheRGLister struct { + c *configpb.ElastiCacheReplicationGroups + client elasticache.DescribeReplicationGroupsAPIClient + tagclient *elasticache.Client + l *logger.Logger + mu sync.RWMutex + names []string + cacheList map[string]*ecReplicationGroupCacheData // This is for unit testing, should be taken out if/when there is a respective // interface in AWS SDK for go v2 to replace this logs discoverTags bool @@ -80,10 +79,10 @@ type elastiCacheLister struct { // listResources returns the list of resource records, where each record // consists of an cluster name and the endpoint associated with it. -func (cl *elastiCacheLister) listResources(req *pb.ListResourcesRequest) ([]*pb.Resource, error) { +func (cl *elastiCacheRGLister) listResources(req *pb.ListResourcesRequest) ([]*pb.Resource, error) { var resources []*pb.Resource - allFilters, err := filter.ParseFilters(req.GetFilter(), ElastiCacheFilters.RegexFilterKeys, "") + allFilters, err := filter.ParseFilters(req.GetFilter(), ElastiCacheRGFilters.RegexFilterKeys, "") if err != nil { return nil, err } @@ -96,7 +95,7 @@ func (cl *elastiCacheLister) listResources(req *pb.ListResourcesRequest) ([]*pb. for _, name := range cl.names { ins := cl.cacheList[name].ci if ins == nil { - cl.l.Errorf("elacticaches: cached info missing for %s", name) + cl.l.Errorf("ec_replicationgroups: cached info missing for %s", name) continue } @@ -111,90 +110,56 @@ func (cl *elastiCacheLister) listResources(req *pb.ListResourcesRequest) ([]*pb. continue } + var info string + if ins.Clustered { + info = "clustered" + } resources = append(resources, &pb.Resource{ + Id: proto.String(ins.ID), Name: proto.String(name), - Ip: proto.String(ins.Ip), + Ip: proto.String(ins.IP), Port: proto.Int32(ins.Port), Labels: ins.Tags, LastUpdated: proto.Int64(cl.cacheList[name].lastUpdated), + Info: []byte(info), }) } - cl.l.Infof("elasticaches.listResources: returning %d instances", len(resources)) + cl.l.Infof("ec_replicationgroups.listResources: returning %d instances", len(resources)) return resources, nil } -// expand runs equivalent API calls as "aws describe-instances", -// and is used to populate the cache. -func (cl *elastiCacheLister) expand(reEvalInterval time.Duration) { - cl.l.Infof("elasticaches.expand: expanding AWS targets") +// expand runs equivalent API calls as "aws elasticache describe-replication-groups", +// and is used to populate the cache. More details about this API call can be found in +// https://docs.aws.amazon.com/AmazonElastiCache/latest/APIReference/API_DescribeReplicationGroups.html +func (cl *elastiCacheRGLister) expand(reEvalInterval time.Duration) { + cl.l.Infof("ec_replicationgroups.expand: expanding AWS targets") - resCC, err := cl.clusterclient.DescribeCacheClusters(context.TODO(), nil) + resp, err := cl.client.DescribeReplicationGroups(context.TODO(), nil) if err != nil { - cl.l.Errorf("elasticaches.expand: error while listing cache clusters: %v", err) + cl.l.Errorf("ec_replicationgroups.expand: error while listing replication groups: %v", err) return } var ids = make([]string, 0) - var cacheList = make(map[string]*cacheData) - + var cacheList = make(map[string]*ecReplicationGroupCacheData) ts := time.Now().Unix() - for _, r := range resCC.CacheClusters { - if len(r.CacheNodes) == 0 { - continue - } - ci := &cacheInfo{ - ID: *r.CacheClusterId, - TLSEnabled: *r.TransitEncryptionEnabled, - Ip: *r.CacheNodes[0].Endpoint.Address, - Port: *r.CacheNodes[0].Endpoint.Port, - Engine: *r.Engine, - Clustered: false, - Tags: make(map[string]string), - } - - if cl.discoverTags { - // AWS doesn't return Tag information in the response, we'll need to request it separately - // NB: This might get throttled by AWS, if we make too many requests, see if we can batch or slow down - // Add sleep if needed to the end of the loop - tagsResp, err := cl.tagclient.ListTagsForResource(context.TODO(), &elasticache.ListTagsForResourceInput{ - ResourceName: r.ARN, - }) - if err != nil { - cl.l.Errorf("elasticaches.expand: error getting tags for cluster %s: %v", *r.CacheClusterId, err) - continue - } - // Convert to map - for _, t := range tagsResp.TagList { - ci.Tags[*t.Key] = *t.Value - } - } - - cacheList[*r.CacheClusterId] = &cacheData{ci, ts} - ids = append(ids, *r.CacheClusterId) - } - - resRG, err := cl.rgclient.DescribeReplicationGroups(context.TODO(), nil) - if err != nil { - cl.l.Errorf("elasticaches.expand: error while listing replication groups: %v", err) - return - } - for _, r := range resRG.ReplicationGroups { - var ci *cacheInfo + for _, r := range resp.ReplicationGroups { + var ci *ecReplicationGroupInfo if r.ConfigurationEndpoint != nil { //clustered - ci = &cacheInfo{ + ci = &ecReplicationGroupInfo{ ID: *r.ReplicationGroupId, - Ip: *r.ConfigurationEndpoint.Address, + IP: *r.ConfigurationEndpoint.Address, Port: *r.ConfigurationEndpoint.Port, TLSEnabled: *r.TransitEncryptionEnabled, Clustered: true, Tags: make(map[string]string), } } else if len(r.NodeGroups) > 0 && r.NodeGroups[0].PrimaryEndpoint != nil { - ci = &cacheInfo{ + ci = &ecReplicationGroupInfo{ ID: *r.ReplicationGroupId, - Ip: *r.NodeGroups[0].PrimaryEndpoint.Address, + IP: *r.NodeGroups[0].PrimaryEndpoint.Address, Port: *r.NodeGroups[0].PrimaryEndpoint.Port, TLSEnabled: *r.TransitEncryptionEnabled, Clustered: false, @@ -210,7 +175,7 @@ func (cl *elastiCacheLister) expand(reEvalInterval time.Duration) { ResourceName: r.ARN, }) if err != nil { - cl.l.Errorf("elasticaches.expand: error getting tags for replication group %s: %v", *r.ReplicationGroupId, err) + cl.l.Errorf("ec_replicationgroups.expand: error getting tags for replication group %s: %v", *r.ReplicationGroupId, err) continue } @@ -220,7 +185,7 @@ func (cl *elastiCacheLister) expand(reEvalInterval time.Duration) { } } - cacheList[*r.ReplicationGroupId] = &cacheData{ci, ts} + cacheList[*r.ReplicationGroupId] = &ecReplicationGroupCacheData{ci, ts} ids = append(ids, *r.ReplicationGroupId) } @@ -229,25 +194,24 @@ func (cl *elastiCacheLister) expand(reEvalInterval time.Duration) { cl.cacheList = cacheList cl.mu.Unlock() - cl.l.Infof("elasticaches.expand: got %d caches", len(ids)) + cl.l.Infof("ec_replicationgroups.expand: got %d caches", len(ids)) } -func newElastiCacheLister(c *configpb.ElastiCaches, region string, l *logger.Logger) (*elastiCacheLister, error) { +func newElastiCacheRGLister(c *configpb.ElastiCacheReplicationGroups, region string, l *logger.Logger) (*elastiCacheRGLister, error) { cfg, err := config.LoadDefaultConfig(context.TODO(), config.WithRegion(region)) if err != nil { - return nil, fmt.Errorf("AWS configuration error : %v", err) + return nil, fmt.Errorf("AWS configuration error: %v", err) } client := elasticache.NewFromConfig(cfg) - cl := &elastiCacheLister{ - c: c, - clusterclient: client, - rgclient: client, - tagclient: client, - cacheList: make(map[string]*cacheData), - l: l, - discoverTags: true, + cl := &elastiCacheRGLister{ + c: c, + client: client, + tagclient: client, + cacheList: make(map[string]*ecReplicationGroupCacheData), + l: l, + discoverTags: true, } reEvalInterval := time.Duration(c.GetReEvalSec()) * time.Second diff --git a/internal/rds/aws/elasticache_test.go b/internal/rds/aws/ec_replicationgroups_test.go similarity index 61% rename from internal/rds/aws/elasticache_test.go rename to internal/rds/aws/ec_replicationgroups_test.go index 71ecc16de7a..95ec4951b2d 100644 --- a/internal/rds/aws/elasticache_test.go +++ b/internal/rds/aws/ec_replicationgroups_test.go @@ -14,27 +14,21 @@ import ( "google.golang.org/protobuf/proto" ) -type mockECCache struct { - ccoutput elasticache.DescribeCacheClustersOutput - rgoutput elasticache.DescribeReplicationGroupsOutput - ccerr error - rgerr error +type mockECRGCache struct { + output elasticache.DescribeReplicationGroupsOutput + err error } -func (m mockECCache) DescribeCacheClusters(ctx context.Context, params *elasticache.DescribeCacheClustersInput, optFns ...func(*elasticache.Options)) (*elasticache.DescribeCacheClustersOutput, error) { - return &m.ccoutput, m.ccerr +func (m mockECRGCache) DescribeReplicationGroups(ctx context.Context, params *elasticache.DescribeReplicationGroupsInput, optFns ...func(*elasticache.Options)) (*elasticache.DescribeReplicationGroupsOutput, error) { + return &m.output, m.err } -func (m mockECCache) DescribeReplicationGroups(ctx context.Context, params *elasticache.DescribeReplicationGroupsInput, optFns ...func(*elasticache.Options)) (*elasticache.DescribeReplicationGroupsOutput, error) { - return &m.rgoutput, m.rgerr -} - -type testECCluster struct { - instances []testECInstance +type testECReplicationGroup struct { + instances []testECRGInstance id string } -type testECInstance struct { +type testECRGInstance struct { id string ipAddr string port int32 @@ -42,18 +36,16 @@ type testECInstance struct { engine string } -func TestECExpand(t *testing.T) { +func TestECRGExpand(t *testing.T) { cases := []struct { - rgerr error - rg *testECCluster - ccerr error - cc *testECCluster + err error + group *testECReplicationGroup expectCount int }{ { - rg: &testECCluster{ + group: &testECReplicationGroup{ id: "test-cluster-id", - instances: []testECInstance{ + instances: []testECRGInstance{ { id: "test-id", ipAddr: "10.0.0.2", @@ -68,74 +60,32 @@ func TestECExpand(t *testing.T) { }, }, }, - rgerr: nil, - cc: &testECCluster{}, - ccerr: nil, + err: nil, expectCount: 1, }, { - rg: &testECCluster{}, - rgerr: nil, + group: &testECReplicationGroup{}, + err: nil, expectCount: 0, }, { - rg: &testECCluster{}, - rgerr: fmt.Errorf("some error"), + group: &testECReplicationGroup{}, + err: fmt.Errorf("some error"), expectCount: 0, }, - { - cc: &testECCluster{ - id: "test-cluster-id", - - instances: []testECInstance{ - { - id: "test-id", - }, - { - id: "test-id-2", - }, - }, - }, - rgerr: nil, - ccerr: nil, - expectCount: 1, - }, } for i, tt := range cases { t.Run(strconv.Itoa(i), func(t *testing.T) { - mock := mockECCache{ - rgerr: tt.rgerr, - ccerr: tt.ccerr, + mock := mockECRGCache{ + err: tt.err, } tlsenabled := false - engine := "redis" - - if tt.cc != nil { - mock.ccoutput = elasticache.DescribeCacheClustersOutput{ - CacheClusters: []types.CacheCluster{ - { - CacheClusterId: &tt.cc.id, - TransitEncryptionEnabled: &tlsenabled, - Engine: &engine, - }, - }, - } - for _, v := range tt.cc.instances { - c := types.CacheNode{ - Endpoint: &types.Endpoint{ - Address: &v.ipAddr, - Port: &v.port, - }, - } - mock.ccoutput.CacheClusters[0].CacheNodes = append(mock.ccoutput.CacheClusters[0].CacheNodes, c) - } - } - if tt.rg != nil { - mock.rgoutput = elasticache.DescribeReplicationGroupsOutput{} - for _, v := range tt.rg.instances { + if tt.group != nil { + mock.output = elasticache.DescribeReplicationGroupsOutput{} + for _, v := range tt.group.instances { g := types.ReplicationGroup{ ReplicationGroupId: &v.id, TransitEncryptionEnabled: &tlsenabled, @@ -144,16 +94,15 @@ func TestECExpand(t *testing.T) { Port: &v.port, }, } - mock.rgoutput.ReplicationGroups = append(mock.rgoutput.ReplicationGroups, g) + mock.output.ReplicationGroups = append(mock.output.ReplicationGroups, g) } } - il := &elastiCacheLister{ - clusterclient: mock, - rgclient: mock, - tagclient: &elasticache.Client{}, - cacheList: make(map[string]*cacheData), - discoverTags: false, // tag discovery to be tested once the client can be mocked + il := &elastiCacheRGLister{ + client: mock, + tagclient: &elasticache.Client{}, + cacheList: make(map[string]*ecReplicationGroupCacheData), + discoverTags: false, // tag discovery to be tested once the client can be mocked } il.expand(time.Second) @@ -167,13 +116,13 @@ func TestECExpand(t *testing.T) { func TestECLister(t *testing.T) { cases := []struct { - instances []*testECInstance + instances []*testECRGInstance filter []*pb.Filter expectErr bool expectedCount int }{ { - instances: []*testECInstance{ + instances: []*testECRGInstance{ { id: "test-id", ipAddr: "10.0.0.2", @@ -194,7 +143,7 @@ func TestECLister(t *testing.T) { expectedCount: 2, }, { - instances: []*testECInstance{ + instances: []*testECRGInstance{ { id: "test-id", ipAddr: "10.0.0.2", @@ -215,11 +164,11 @@ func TestECLister(t *testing.T) { expectErr: true, }, { - instances: []*testECInstance{}, + instances: []*testECRGInstance{}, expectedCount: 0, }, { - instances: []*testECInstance{ + instances: []*testECRGInstance{ { id: "test-id", ipAddr: "10.0.0.2", @@ -240,7 +189,7 @@ func TestECLister(t *testing.T) { expectedCount: 0, }, { - instances: []*testECInstance{ + instances: []*testECRGInstance{ { id: "test-id", ipAddr: "10.0.0.2", @@ -261,7 +210,7 @@ func TestECLister(t *testing.T) { expectedCount: 0, }, { - instances: []*testECInstance{ + instances: []*testECRGInstance{ { id: "test-id", ipAddr: "10.0.0.2", @@ -284,7 +233,7 @@ func TestECLister(t *testing.T) { expectedCount: 0, }, { - instances: []*testECInstance{ + instances: []*testECRGInstance{ { id: "test-id", ipAddr: "10.0.0.2", @@ -312,22 +261,22 @@ func TestECLister(t *testing.T) { t.Run(strconv.Itoa(i), func(t *testing.T) { names := []string{} - cache := make(map[string]*cacheData) + cache := make(map[string]*ecReplicationGroupCacheData) for _, ti := range tt.instances { - ci := &cacheInfo{ + ci := &ecReplicationGroupInfo{ ID: ti.id, - Ip: ti.ipAddr, + IP: ti.ipAddr, Tags: ti.tags, Engine: ti.engine, } - cache[ti.id] = &cacheData{ + cache[ti.id] = &ecReplicationGroupCacheData{ ci: ci, } names = append(names, ti.id) } - lister := &elastiCacheLister{ + lister := &elastiCacheRGLister{ cacheList: cache, names: names, l: &logger.Logger{}, diff --git a/internal/rds/aws/proto/config.pb.go b/internal/rds/aws/proto/config.pb.go index 9ec3b96a21f..48c6c76fd21 100644 --- a/internal/rds/aws/proto/config.pb.go +++ b/internal/rds/aws/proto/config.pb.go @@ -89,8 +89,8 @@ func (x *EC2Instances) GetReEvalSec() int32 { return Default_EC2Instances_ReEvalSec } -// ElastiCaches discovery options. -type ElastiCaches struct { +// ElastiCacheReplicationGroups discovery options. +type ElastiCacheReplicationGroups struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields @@ -99,13 +99,13 @@ type ElastiCaches struct { ReEvalSec *int32 `protobuf:"varint,98,opt,name=re_eval_sec,json=reEvalSec,def=600" json:"re_eval_sec,omitempty"` // default 10 mins } -// Default values for ElastiCaches fields. +// Default values for ElastiCacheReplicationGroups fields. const ( - Default_ElastiCaches_ReEvalSec = int32(600) + Default_ElastiCacheReplicationGroups_ReEvalSec = int32(600) ) -func (x *ElastiCaches) Reset() { - *x = ElastiCaches{} +func (x *ElastiCacheReplicationGroups) Reset() { + *x = ElastiCacheReplicationGroups{} if protoimpl.UnsafeEnabled { mi := &file_github_com_cloudprober_cloudprober_internal_rds_aws_proto_config_proto_msgTypes[1] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -113,13 +113,13 @@ func (x *ElastiCaches) Reset() { } } -func (x *ElastiCaches) String() string { +func (x *ElastiCacheReplicationGroups) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ElastiCaches) ProtoMessage() {} +func (*ElastiCacheReplicationGroups) ProtoMessage() {} -func (x *ElastiCaches) ProtoReflect() protoreflect.Message { +func (x *ElastiCacheReplicationGroups) ProtoReflect() protoreflect.Message { mi := &file_github_com_cloudprober_cloudprober_internal_rds_aws_proto_config_proto_msgTypes[1] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -131,20 +131,74 @@ func (x *ElastiCaches) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ElastiCaches.ProtoReflect.Descriptor instead. -func (*ElastiCaches) Descriptor() ([]byte, []int) { +// Deprecated: Use ElastiCacheReplicationGroups.ProtoReflect.Descriptor instead. +func (*ElastiCacheReplicationGroups) Descriptor() ([]byte, []int) { return file_github_com_cloudprober_cloudprober_internal_rds_aws_proto_config_proto_rawDescGZIP(), []int{1} } -func (x *ElastiCaches) GetReEvalSec() int32 { +func (x *ElastiCacheReplicationGroups) GetReEvalSec() int32 { if x != nil && x.ReEvalSec != nil { return *x.ReEvalSec } - return Default_ElastiCaches_ReEvalSec + return Default_ElastiCacheReplicationGroups_ReEvalSec } -// RDS (Amazon Relational Databases) discovery options. -type RDS struct { +// ElastiCacheClusters discovery options. +type ElastiCacheClusters struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // How often resources should be refreshed. + ReEvalSec *int32 `protobuf:"varint,98,opt,name=re_eval_sec,json=reEvalSec,def=600" json:"re_eval_sec,omitempty"` // default 10 mins +} + +// Default values for ElastiCacheClusters fields. +const ( + Default_ElastiCacheClusters_ReEvalSec = int32(600) +) + +func (x *ElastiCacheClusters) Reset() { + *x = ElastiCacheClusters{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_cloudprober_cloudprober_internal_rds_aws_proto_config_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ElastiCacheClusters) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ElastiCacheClusters) ProtoMessage() {} + +func (x *ElastiCacheClusters) ProtoReflect() protoreflect.Message { + mi := &file_github_com_cloudprober_cloudprober_internal_rds_aws_proto_config_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ElastiCacheClusters.ProtoReflect.Descriptor instead. +func (*ElastiCacheClusters) Descriptor() ([]byte, []int) { + return file_github_com_cloudprober_cloudprober_internal_rds_aws_proto_config_proto_rawDescGZIP(), []int{2} +} + +func (x *ElastiCacheClusters) GetReEvalSec() int32 { + if x != nil && x.ReEvalSec != nil { + return *x.ReEvalSec + } + return Default_ElastiCacheClusters_ReEvalSec +} + +// RDS (Amazon Relational Databases) Clusters discovery options. +type RDSClusters struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields @@ -159,28 +213,28 @@ type RDS struct { ReEvalSec *int32 `protobuf:"varint,98,opt,name=re_eval_sec,json=reEvalSec,def=600" json:"re_eval_sec,omitempty"` // default 10 mins } -// Default values for RDS fields. +// Default values for RDSClusters fields. const ( - Default_RDS_ReEvalSec = int32(600) + Default_RDSClusters_ReEvalSec = int32(600) ) -func (x *RDS) Reset() { - *x = RDS{} +func (x *RDSClusters) Reset() { + *x = RDSClusters{} if protoimpl.UnsafeEnabled { - mi := &file_github_com_cloudprober_cloudprober_internal_rds_aws_proto_config_proto_msgTypes[2] + mi := &file_github_com_cloudprober_cloudprober_internal_rds_aws_proto_config_proto_msgTypes[3] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *RDS) String() string { +func (x *RDSClusters) String() string { return protoimpl.X.MessageStringOf(x) } -func (*RDS) ProtoMessage() {} +func (*RDSClusters) ProtoMessage() {} -func (x *RDS) ProtoReflect() protoreflect.Message { - mi := &file_github_com_cloudprober_cloudprober_internal_rds_aws_proto_config_proto_msgTypes[2] +func (x *RDSClusters) ProtoReflect() protoreflect.Message { + mi := &file_github_com_cloudprober_cloudprober_internal_rds_aws_proto_config_proto_msgTypes[3] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -191,37 +245,118 @@ func (x *RDS) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use RDS.ProtoReflect.Descriptor instead. -func (*RDS) Descriptor() ([]byte, []int) { - return file_github_com_cloudprober_cloudprober_internal_rds_aws_proto_config_proto_rawDescGZIP(), []int{2} +// Deprecated: Use RDSClusters.ProtoReflect.Descriptor instead. +func (*RDSClusters) Descriptor() ([]byte, []int) { + return file_github_com_cloudprober_cloudprober_internal_rds_aws_proto_config_proto_rawDescGZIP(), []int{3} } -func (x *RDS) GetIdentifier() string { +func (x *RDSClusters) GetIdentifier() string { if x != nil && x.Identifier != nil { return *x.Identifier } return "" } -func (x *RDS) GetFilter() []string { +func (x *RDSClusters) GetFilter() []string { if x != nil { return x.Filter } return nil } -func (x *RDS) GetIncludeShared() bool { +func (x *RDSClusters) GetIncludeShared() bool { if x != nil && x.IncludeShared != nil { return *x.IncludeShared } return false } -func (x *RDS) GetReEvalSec() int32 { +func (x *RDSClusters) GetReEvalSec() int32 { if x != nil && x.ReEvalSec != nil { return *x.ReEvalSec } - return Default_RDS_ReEvalSec + return Default_RDSClusters_ReEvalSec +} + +// RDS (Amazon Relational Databases) Clusters discovery options. +type RDSInstances struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // DB cluster identifier or the Amazon Resource Name (ARN) of the DB cluster + // if specified, only the corresponding cluster information is returned. + Identifier *string `protobuf:"bytes,1,opt,name=identifier" json:"identifier,omitempty"` + // Filters to be added to the discovery and search. + Filter []string `protobuf:"bytes,2,rep,name=filter" json:"filter,omitempty"` + // Whether to includes information about clusters shared from other AWS accounts. + IncludeShared *bool `protobuf:"varint,3,opt,name=include_shared,json=includeShared" json:"include_shared,omitempty"` + ReEvalSec *int32 `protobuf:"varint,98,opt,name=re_eval_sec,json=reEvalSec,def=600" json:"re_eval_sec,omitempty"` // default 10 mins +} + +// Default values for RDSInstances fields. +const ( + Default_RDSInstances_ReEvalSec = int32(600) +) + +func (x *RDSInstances) Reset() { + *x = RDSInstances{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_cloudprober_cloudprober_internal_rds_aws_proto_config_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RDSInstances) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RDSInstances) ProtoMessage() {} + +func (x *RDSInstances) ProtoReflect() protoreflect.Message { + mi := &file_github_com_cloudprober_cloudprober_internal_rds_aws_proto_config_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RDSInstances.ProtoReflect.Descriptor instead. +func (*RDSInstances) Descriptor() ([]byte, []int) { + return file_github_com_cloudprober_cloudprober_internal_rds_aws_proto_config_proto_rawDescGZIP(), []int{4} +} + +func (x *RDSInstances) GetIdentifier() string { + if x != nil && x.Identifier != nil { + return *x.Identifier + } + return "" +} + +func (x *RDSInstances) GetFilter() []string { + if x != nil { + return x.Filter + } + return nil +} + +func (x *RDSInstances) GetIncludeShared() bool { + if x != nil && x.IncludeShared != nil { + return *x.IncludeShared + } + return false +} + +func (x *RDSInstances) GetReEvalSec() int32 { + if x != nil && x.ReEvalSec != nil { + return *x.ReEvalSec + } + return Default_RDSInstances_ReEvalSec } // LoadBalancers discovery options. @@ -238,7 +373,7 @@ type LoadBalancers struct { func (x *LoadBalancers) Reset() { *x = LoadBalancers{} if protoimpl.UnsafeEnabled { - mi := &file_github_com_cloudprober_cloudprober_internal_rds_aws_proto_config_proto_msgTypes[3] + mi := &file_github_com_cloudprober_cloudprober_internal_rds_aws_proto_config_proto_msgTypes[5] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -251,7 +386,7 @@ func (x *LoadBalancers) String() string { func (*LoadBalancers) ProtoMessage() {} func (x *LoadBalancers) ProtoReflect() protoreflect.Message { - mi := &file_github_com_cloudprober_cloudprober_internal_rds_aws_proto_config_proto_msgTypes[3] + mi := &file_github_com_cloudprober_cloudprober_internal_rds_aws_proto_config_proto_msgTypes[5] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -264,7 +399,7 @@ func (x *LoadBalancers) ProtoReflect() protoreflect.Message { // Deprecated: Use LoadBalancers.ProtoReflect.Descriptor instead. func (*LoadBalancers) Descriptor() ([]byte, []int) { - return file_github_com_cloudprober_cloudprober_internal_rds_aws_proto_config_proto_rawDescGZIP(), []int{3} + return file_github_com_cloudprober_cloudprober_internal_rds_aws_proto_config_proto_rawDescGZIP(), []int{5} } func (x *LoadBalancers) GetName() []string { @@ -287,17 +422,22 @@ type ProviderConfig struct { // ECS instances discovery options. This field should be declared for the AWS // instances discovery to be enabled. Ec2Instances *EC2Instances `protobuf:"bytes,3,opt,name=ec2_instances,json=ec2Instances" json:"ec2_instances,omitempty"` - // ElastiCache discovery options. This field should be declared for the - // elasticache discovery to be enabled. - Elasticaches *ElastiCaches `protobuf:"bytes,4,opt,name=elasticaches" json:"elasticaches,omitempty"` - // RDS discovery options. - Rds *RDS `protobuf:"bytes,5,opt,name=rds" json:"rds,omitempty"` + // ElastiCacheReplicationGroups discovery options. This field should be declared for the + // elasticache replication groups discovery to be enabled. + ElasticacheReplicationgroups *ElastiCacheReplicationGroups `protobuf:"bytes,4,opt,name=elasticache_replicationgroups,json=elasticacheReplicationgroups" json:"elasticache_replicationgroups,omitempty"` + // ElastiCacheClusters discovery options. This field should be declared for the + // elasticache cluster discovery to be enabled. + ElasticacheClusters *ElastiCacheClusters `protobuf:"bytes,5,opt,name=elasticache_clusters,json=elasticacheClusters" json:"elasticache_clusters,omitempty"` + // RDS instances discovery options. + RdsInstances *RDSInstances `protobuf:"bytes,6,opt,name=rds_instances,json=rdsInstances" json:"rds_instances,omitempty"` + // RDS clusters discovery options. + RdsClusters *RDSClusters `protobuf:"bytes,7,opt,name=rds_clusters,json=rdsClusters" json:"rds_clusters,omitempty"` } func (x *ProviderConfig) Reset() { *x = ProviderConfig{} if protoimpl.UnsafeEnabled { - mi := &file_github_com_cloudprober_cloudprober_internal_rds_aws_proto_config_proto_msgTypes[4] + mi := &file_github_com_cloudprober_cloudprober_internal_rds_aws_proto_config_proto_msgTypes[6] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -310,7 +450,7 @@ func (x *ProviderConfig) String() string { func (*ProviderConfig) ProtoMessage() {} func (x *ProviderConfig) ProtoReflect() protoreflect.Message { - mi := &file_github_com_cloudprober_cloudprober_internal_rds_aws_proto_config_proto_msgTypes[4] + mi := &file_github_com_cloudprober_cloudprober_internal_rds_aws_proto_config_proto_msgTypes[6] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -323,7 +463,7 @@ func (x *ProviderConfig) ProtoReflect() protoreflect.Message { // Deprecated: Use ProviderConfig.ProtoReflect.Descriptor instead. func (*ProviderConfig) Descriptor() ([]byte, []int) { - return file_github_com_cloudprober_cloudprober_internal_rds_aws_proto_config_proto_rawDescGZIP(), []int{4} + return file_github_com_cloudprober_cloudprober_internal_rds_aws_proto_config_proto_rawDescGZIP(), []int{6} } func (x *ProviderConfig) GetProfileName() string { @@ -347,16 +487,30 @@ func (x *ProviderConfig) GetEc2Instances() *EC2Instances { return nil } -func (x *ProviderConfig) GetElasticaches() *ElastiCaches { +func (x *ProviderConfig) GetElasticacheReplicationgroups() *ElastiCacheReplicationGroups { + if x != nil { + return x.ElasticacheReplicationgroups + } + return nil +} + +func (x *ProviderConfig) GetElasticacheClusters() *ElastiCacheClusters { if x != nil { - return x.Elasticaches + return x.ElasticacheClusters } return nil } -func (x *ProviderConfig) GetRds() *RDS { +func (x *ProviderConfig) GetRdsInstances() *RDSInstances { if x != nil { - return x.Rds + return x.RdsInstances + } + return nil +} + +func (x *ProviderConfig) GetRdsClusters() *RDSClusters { + if x != nil { + return x.RdsClusters } return nil } @@ -373,41 +527,71 @@ var file_github_com_cloudprober_cloudprober_internal_rds_aws_proto_config_proto_ 0x0c, 0x45, 0x43, 0x32, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x12, 0x23, 0x0a, 0x0b, 0x72, 0x65, 0x5f, 0x65, 0x76, 0x61, 0x6c, 0x5f, 0x73, 0x65, 0x63, 0x18, 0x62, 0x20, 0x01, 0x28, 0x05, 0x3a, 0x03, 0x36, 0x30, 0x30, 0x52, 0x09, 0x72, 0x65, 0x45, 0x76, 0x61, 0x6c, 0x53, - 0x65, 0x63, 0x22, 0x33, 0x0a, 0x0c, 0x45, 0x6c, 0x61, 0x73, 0x74, 0x69, 0x43, 0x61, 0x63, 0x68, - 0x65, 0x73, 0x12, 0x23, 0x0a, 0x0b, 0x72, 0x65, 0x5f, 0x65, 0x76, 0x61, 0x6c, 0x5f, 0x73, 0x65, + 0x65, 0x63, 0x22, 0x43, 0x0a, 0x1c, 0x45, 0x6c, 0x61, 0x73, 0x74, 0x69, 0x43, 0x61, 0x63, 0x68, + 0x65, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x47, 0x72, 0x6f, 0x75, + 0x70, 0x73, 0x12, 0x23, 0x0a, 0x0b, 0x72, 0x65, 0x5f, 0x65, 0x76, 0x61, 0x6c, 0x5f, 0x73, 0x65, 0x63, 0x18, 0x62, 0x20, 0x01, 0x28, 0x05, 0x3a, 0x03, 0x36, 0x30, 0x30, 0x52, 0x09, 0x72, 0x65, - 0x45, 0x76, 0x61, 0x6c, 0x53, 0x65, 0x63, 0x22, 0x89, 0x01, 0x0a, 0x03, 0x52, 0x44, 0x53, 0x12, - 0x1e, 0x0a, 0x0a, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x0a, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, - 0x16, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, - 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x25, 0x0a, 0x0e, 0x69, 0x6e, 0x63, 0x6c, 0x75, - 0x64, 0x65, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x0d, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x53, 0x68, 0x61, 0x72, 0x65, 0x64, 0x12, 0x23, + 0x45, 0x76, 0x61, 0x6c, 0x53, 0x65, 0x63, 0x22, 0x3a, 0x0a, 0x13, 0x45, 0x6c, 0x61, 0x73, 0x74, + 0x69, 0x43, 0x61, 0x63, 0x68, 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x12, 0x23, 0x0a, 0x0b, 0x72, 0x65, 0x5f, 0x65, 0x76, 0x61, 0x6c, 0x5f, 0x73, 0x65, 0x63, 0x18, 0x62, 0x20, 0x01, 0x28, 0x05, 0x3a, 0x03, 0x36, 0x30, 0x30, 0x52, 0x09, 0x72, 0x65, 0x45, 0x76, 0x61, 0x6c, - 0x53, 0x65, 0x63, 0x22, 0x23, 0x0a, 0x0d, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, - 0x63, 0x65, 0x72, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x03, - 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x86, 0x02, 0x0a, 0x0e, 0x50, 0x72, 0x6f, - 0x76, 0x69, 0x64, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x21, 0x0a, 0x0c, 0x70, - 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0b, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x16, - 0x0a, 0x06, 0x72, 0x65, 0x67, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, - 0x72, 0x65, 0x67, 0x69, 0x6f, 0x6e, 0x12, 0x46, 0x0a, 0x0d, 0x65, 0x63, 0x32, 0x5f, 0x69, 0x6e, - 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, + 0x53, 0x65, 0x63, 0x22, 0x91, 0x01, 0x0a, 0x0b, 0x52, 0x44, 0x53, 0x43, 0x6c, 0x75, 0x73, 0x74, + 0x65, 0x72, 0x73, 0x12, 0x1e, 0x0a, 0x0a, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, + 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, + 0x69, 0x65, 0x72, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, + 0x03, 0x28, 0x09, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x25, 0x0a, 0x0e, 0x69, + 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x65, 0x64, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x0d, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x53, 0x68, 0x61, 0x72, + 0x65, 0x64, 0x12, 0x23, 0x0a, 0x0b, 0x72, 0x65, 0x5f, 0x65, 0x76, 0x61, 0x6c, 0x5f, 0x73, 0x65, + 0x63, 0x18, 0x62, 0x20, 0x01, 0x28, 0x05, 0x3a, 0x03, 0x36, 0x30, 0x30, 0x52, 0x09, 0x72, 0x65, + 0x45, 0x76, 0x61, 0x6c, 0x53, 0x65, 0x63, 0x22, 0x92, 0x01, 0x0a, 0x0c, 0x52, 0x44, 0x53, 0x49, + 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x12, 0x1e, 0x0a, 0x0a, 0x69, 0x64, 0x65, 0x6e, + 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x69, 0x64, + 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, + 0x65, 0x72, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, + 0x12, 0x25, 0x0a, 0x0e, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x73, 0x68, 0x61, 0x72, + 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, + 0x65, 0x53, 0x68, 0x61, 0x72, 0x65, 0x64, 0x12, 0x23, 0x0a, 0x0b, 0x72, 0x65, 0x5f, 0x65, 0x76, + 0x61, 0x6c, 0x5f, 0x73, 0x65, 0x63, 0x18, 0x62, 0x20, 0x01, 0x28, 0x05, 0x3a, 0x03, 0x36, 0x30, + 0x30, 0x52, 0x09, 0x72, 0x65, 0x45, 0x76, 0x61, 0x6c, 0x53, 0x65, 0x63, 0x22, 0x23, 0x0a, 0x0d, + 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x73, 0x12, 0x12, 0x0a, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x22, 0xf5, 0x03, 0x0a, 0x0e, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x12, 0x21, 0x0a, 0x0c, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x5f, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x70, 0x72, 0x6f, 0x66, + 0x69, 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, 0x67, 0x69, 0x6f, + 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x72, 0x65, 0x67, 0x69, 0x6f, 0x6e, 0x12, + 0x46, 0x0a, 0x0d, 0x65, 0x63, 0x32, 0x5f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x70, 0x72, + 0x6f, 0x62, 0x65, 0x72, 0x2e, 0x72, 0x64, 0x73, 0x2e, 0x61, 0x77, 0x73, 0x2e, 0x45, 0x43, 0x32, + 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x52, 0x0c, 0x65, 0x63, 0x32, 0x49, 0x6e, + 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x12, 0x76, 0x0a, 0x1d, 0x65, 0x6c, 0x61, 0x73, 0x74, + 0x69, 0x63, 0x61, 0x63, 0x68, 0x65, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, + 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x70, 0x72, 0x6f, 0x62, 0x65, 0x72, 0x2e, 0x72, 0x64, 0x73, + 0x2e, 0x61, 0x77, 0x73, 0x2e, 0x45, 0x6c, 0x61, 0x73, 0x74, 0x69, 0x43, 0x61, 0x63, 0x68, 0x65, + 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x47, 0x72, 0x6f, 0x75, 0x70, + 0x73, 0x52, 0x1c, 0x65, 0x6c, 0x61, 0x73, 0x74, 0x69, 0x63, 0x61, 0x63, 0x68, 0x65, 0x52, 0x65, + 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x12, + 0x5b, 0x0a, 0x14, 0x65, 0x6c, 0x61, 0x73, 0x74, 0x69, 0x63, 0x61, 0x63, 0x68, 0x65, 0x5f, 0x63, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x70, 0x72, 0x6f, 0x62, 0x65, 0x72, 0x2e, 0x72, 0x64, 0x73, 0x2e, - 0x61, 0x77, 0x73, 0x2e, 0x45, 0x43, 0x32, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, - 0x52, 0x0c, 0x65, 0x63, 0x32, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x12, 0x45, - 0x0a, 0x0c, 0x65, 0x6c, 0x61, 0x73, 0x74, 0x69, 0x63, 0x61, 0x63, 0x68, 0x65, 0x73, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x70, 0x72, 0x6f, 0x62, - 0x65, 0x72, 0x2e, 0x72, 0x64, 0x73, 0x2e, 0x61, 0x77, 0x73, 0x2e, 0x45, 0x6c, 0x61, 0x73, 0x74, - 0x69, 0x43, 0x61, 0x63, 0x68, 0x65, 0x73, 0x52, 0x0c, 0x65, 0x6c, 0x61, 0x73, 0x74, 0x69, 0x63, - 0x61, 0x63, 0x68, 0x65, 0x73, 0x12, 0x2a, 0x0a, 0x03, 0x72, 0x64, 0x73, 0x18, 0x05, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x70, 0x72, 0x6f, 0x62, 0x65, 0x72, - 0x2e, 0x72, 0x64, 0x73, 0x2e, 0x61, 0x77, 0x73, 0x2e, 0x52, 0x44, 0x53, 0x52, 0x03, 0x72, 0x64, - 0x73, 0x42, 0x3b, 0x5a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, - 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x70, 0x72, 0x6f, 0x62, 0x65, 0x72, 0x2f, 0x63, 0x6c, 0x6f, 0x75, - 0x64, 0x70, 0x72, 0x6f, 0x62, 0x65, 0x72, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, - 0x2f, 0x72, 0x64, 0x73, 0x2f, 0x61, 0x77, 0x73, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x61, 0x77, 0x73, 0x2e, 0x45, 0x6c, 0x61, 0x73, 0x74, 0x69, 0x43, 0x61, 0x63, 0x68, 0x65, 0x43, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x52, 0x13, 0x65, 0x6c, 0x61, 0x73, 0x74, 0x69, 0x63, + 0x61, 0x63, 0x68, 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x12, 0x46, 0x0a, 0x0d, + 0x72, 0x64, 0x73, 0x5f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x70, 0x72, 0x6f, 0x62, 0x65, + 0x72, 0x2e, 0x72, 0x64, 0x73, 0x2e, 0x61, 0x77, 0x73, 0x2e, 0x52, 0x44, 0x53, 0x49, 0x6e, 0x73, + 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x52, 0x0c, 0x72, 0x64, 0x73, 0x49, 0x6e, 0x73, 0x74, 0x61, + 0x6e, 0x63, 0x65, 0x73, 0x12, 0x43, 0x0a, 0x0c, 0x72, 0x64, 0x73, 0x5f, 0x63, 0x6c, 0x75, 0x73, + 0x74, 0x65, 0x72, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x63, 0x6c, 0x6f, + 0x75, 0x64, 0x70, 0x72, 0x6f, 0x62, 0x65, 0x72, 0x2e, 0x72, 0x64, 0x73, 0x2e, 0x61, 0x77, 0x73, + 0x2e, 0x52, 0x44, 0x53, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x52, 0x0b, 0x72, 0x64, + 0x73, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x42, 0x3b, 0x5a, 0x39, 0x67, 0x69, 0x74, + 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x70, 0x72, 0x6f, + 0x62, 0x65, 0x72, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x70, 0x72, 0x6f, 0x62, 0x65, 0x72, 0x2f, + 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x72, 0x64, 0x73, 0x2f, 0x61, 0x77, 0x73, + 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, } var ( @@ -422,23 +606,27 @@ func file_github_com_cloudprober_cloudprober_internal_rds_aws_proto_config_proto return file_github_com_cloudprober_cloudprober_internal_rds_aws_proto_config_proto_rawDescData } -var file_github_com_cloudprober_cloudprober_internal_rds_aws_proto_config_proto_msgTypes = make([]protoimpl.MessageInfo, 5) +var file_github_com_cloudprober_cloudprober_internal_rds_aws_proto_config_proto_msgTypes = make([]protoimpl.MessageInfo, 7) var file_github_com_cloudprober_cloudprober_internal_rds_aws_proto_config_proto_goTypes = []interface{}{ - (*EC2Instances)(nil), // 0: cloudprober.rds.aws.EC2Instances - (*ElastiCaches)(nil), // 1: cloudprober.rds.aws.ElastiCaches - (*RDS)(nil), // 2: cloudprober.rds.aws.RDS - (*LoadBalancers)(nil), // 3: cloudprober.rds.aws.LoadBalancers - (*ProviderConfig)(nil), // 4: cloudprober.rds.aws.ProviderConfig + (*EC2Instances)(nil), // 0: cloudprober.rds.aws.EC2Instances + (*ElastiCacheReplicationGroups)(nil), // 1: cloudprober.rds.aws.ElastiCacheReplicationGroups + (*ElastiCacheClusters)(nil), // 2: cloudprober.rds.aws.ElastiCacheClusters + (*RDSClusters)(nil), // 3: cloudprober.rds.aws.RDSClusters + (*RDSInstances)(nil), // 4: cloudprober.rds.aws.RDSInstances + (*LoadBalancers)(nil), // 5: cloudprober.rds.aws.LoadBalancers + (*ProviderConfig)(nil), // 6: cloudprober.rds.aws.ProviderConfig } var file_github_com_cloudprober_cloudprober_internal_rds_aws_proto_config_proto_depIdxs = []int32{ 0, // 0: cloudprober.rds.aws.ProviderConfig.ec2_instances:type_name -> cloudprober.rds.aws.EC2Instances - 1, // 1: cloudprober.rds.aws.ProviderConfig.elasticaches:type_name -> cloudprober.rds.aws.ElastiCaches - 2, // 2: cloudprober.rds.aws.ProviderConfig.rds:type_name -> cloudprober.rds.aws.RDS - 3, // [3:3] is the sub-list for method output_type - 3, // [3:3] is the sub-list for method input_type - 3, // [3:3] is the sub-list for extension type_name - 3, // [3:3] is the sub-list for extension extendee - 0, // [0:3] is the sub-list for field type_name + 1, // 1: cloudprober.rds.aws.ProviderConfig.elasticache_replicationgroups:type_name -> cloudprober.rds.aws.ElastiCacheReplicationGroups + 2, // 2: cloudprober.rds.aws.ProviderConfig.elasticache_clusters:type_name -> cloudprober.rds.aws.ElastiCacheClusters + 4, // 3: cloudprober.rds.aws.ProviderConfig.rds_instances:type_name -> cloudprober.rds.aws.RDSInstances + 3, // 4: cloudprober.rds.aws.ProviderConfig.rds_clusters:type_name -> cloudprober.rds.aws.RDSClusters + 5, // [5:5] is the sub-list for method output_type + 5, // [5:5] is the sub-list for method input_type + 5, // [5:5] is the sub-list for extension type_name + 5, // [5:5] is the sub-list for extension extendee + 0, // [0:5] is the sub-list for field type_name } func init() { file_github_com_cloudprober_cloudprober_internal_rds_aws_proto_config_proto_init() } @@ -460,7 +648,7 @@ func file_github_com_cloudprober_cloudprober_internal_rds_aws_proto_config_proto } } file_github_com_cloudprober_cloudprober_internal_rds_aws_proto_config_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ElastiCaches); i { + switch v := v.(*ElastiCacheReplicationGroups); i { case 0: return &v.state case 1: @@ -472,7 +660,7 @@ func file_github_com_cloudprober_cloudprober_internal_rds_aws_proto_config_proto } } file_github_com_cloudprober_cloudprober_internal_rds_aws_proto_config_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RDS); i { + switch v := v.(*ElastiCacheClusters); i { case 0: return &v.state case 1: @@ -484,7 +672,7 @@ func file_github_com_cloudprober_cloudprober_internal_rds_aws_proto_config_proto } } file_github_com_cloudprober_cloudprober_internal_rds_aws_proto_config_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*LoadBalancers); i { + switch v := v.(*RDSClusters); i { case 0: return &v.state case 1: @@ -496,6 +684,30 @@ func file_github_com_cloudprober_cloudprober_internal_rds_aws_proto_config_proto } } file_github_com_cloudprober_cloudprober_internal_rds_aws_proto_config_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RDSInstances); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_cloudprober_cloudprober_internal_rds_aws_proto_config_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LoadBalancers); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_cloudprober_cloudprober_internal_rds_aws_proto_config_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ProviderConfig); i { case 0: return &v.state @@ -514,7 +726,7 @@ func file_github_com_cloudprober_cloudprober_internal_rds_aws_proto_config_proto GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_github_com_cloudprober_cloudprober_internal_rds_aws_proto_config_proto_rawDesc, NumEnums: 0, - NumMessages: 5, + NumMessages: 7, NumExtensions: 0, NumServices: 0, }, diff --git a/internal/rds/aws/proto/config.proto b/internal/rds/aws/proto/config.proto index 837ecd7bb12..a390af87d37 100644 --- a/internal/rds/aws/proto/config.proto +++ b/internal/rds/aws/proto/config.proto @@ -24,15 +24,38 @@ message EC2Instances { optional int32 re_eval_sec = 98 [default = 600]; // default 10 mins } -// ElastiCaches discovery options. -message ElastiCaches { +// ElastiCacheReplicationGroups discovery options. +message ElastiCacheReplicationGroups { // How often resources should be refreshed. optional int32 re_eval_sec = 98 [default = 600]; // default 10 mins } -// RDS (Amazon Relational Databases) discovery options. -message RDS { +// ElastiCacheClusters discovery options. +message ElastiCacheClusters { + + // How often resources should be refreshed. + optional int32 re_eval_sec = 98 [default = 600]; // default 10 mins +} + + +// RDS (Amazon Relational Databases) Clusters discovery options. +message RDSClusters { + // DB cluster identifier or the Amazon Resource Name (ARN) of the DB cluster + // if specified, only the corresponding cluster information is returned. + optional string identifier = 1; + + // Filters to be added to the discovery and search. + repeated string filter = 2; + + // Whether to includes information about clusters shared from other AWS accounts. + optional bool include_shared = 3; + + optional int32 re_eval_sec = 98 [default = 600]; // default 10 mins +} + +// RDS (Amazon Relational Databases) Clusters discovery options. +message RDSInstances { // DB cluster identifier or the Amazon Resource Name (ARN) of the DB cluster // if specified, only the corresponding cluster information is returned. optional string identifier = 1; @@ -65,11 +88,19 @@ message ProviderConfig { // instances discovery to be enabled. optional EC2Instances ec2_instances = 3; - // ElastiCache discovery options. This field should be declared for the - // elasticache discovery to be enabled. - optional ElastiCaches elasticaches = 4; + // ElastiCacheReplicationGroups discovery options. This field should be declared for the + // elasticache replication groups discovery to be enabled. + optional ElastiCacheReplicationGroups elasticache_replicationgroups = 4; + + // ElastiCacheClusters discovery options. This field should be declared for the + // elasticache cluster discovery to be enabled. + optional ElastiCacheClusters elasticache_clusters = 5; + + // RDS instances discovery options. + optional RDSInstances rds_instances = 6; + + // RDS clusters discovery options. + optional RDSClusters rds_clusters = 7; - // RDS discovery options. - optional RDS rds = 5; } diff --git a/internal/rds/aws/proto/config_proto_gen.cue b/internal/rds/aws/proto/config_proto_gen.cue index 92f9c78f2f8..a66e23418ad 100644 --- a/internal/rds/aws/proto/config_proto_gen.cue +++ b/internal/rds/aws/proto/config_proto_gen.cue @@ -5,14 +5,34 @@ package proto reEvalSec?: int32 @protobuf(98,int32,name=re_eval_sec,"default=600") // default 10 mins } -// ElastiCaches discovery options. -#ElastiCaches: { +// ElastiCacheReplicationGroups discovery options. +#ElastiCacheReplicationGroups: { // How often resources should be refreshed. reEvalSec?: int32 @protobuf(98,int32,name=re_eval_sec,"default=600") // default 10 mins } -// RDS (Amazon Relational Databases) discovery options. -#RDS: { +// ElastiCacheClusters discovery options. +#ElastiCacheClusters: { + // How often resources should be refreshed. + reEvalSec?: int32 @protobuf(98,int32,name=re_eval_sec,"default=600") // default 10 mins +} + +// RDS (Amazon Relational Databases) Clusters discovery options. +#RDSClusters: { + // DB cluster identifier or the Amazon Resource Name (ARN) of the DB cluster + // if specified, only the corresponding cluster information is returned. + identifier?: string @protobuf(1,string) + + // Filters to be added to the discovery and search. + filter?: [...string] @protobuf(2,string) + + // Whether to includes information about clusters shared from other AWS accounts. + includeShared?: bool @protobuf(3,bool,name=include_shared) + reEvalSec?: int32 @protobuf(98,int32,name=re_eval_sec,"default=600") // default 10 mins +} + +// RDS (Amazon Relational Databases) Clusters discovery options. +#RDSInstances: { // DB cluster identifier or the Amazon Resource Name (ARN) of the DB cluster // if specified, only the corresponding cluster information is returned. identifier?: string @protobuf(1,string) @@ -44,10 +64,17 @@ package proto // instances discovery to be enabled. ec2Instances?: #EC2Instances @protobuf(3,EC2Instances,name=ec2_instances) - // ElastiCache discovery options. This field should be declared for the - // elasticache discovery to be enabled. - elasticaches?: #ElastiCaches @protobuf(4,ElastiCaches) + // ElastiCacheReplicationGroups discovery options. This field should be declared for the + // elasticache replication groups discovery to be enabled. + elasticacheReplicationgroups?: #ElastiCacheReplicationGroups @protobuf(4,ElastiCacheReplicationGroups,name=elasticache_replicationgroups) + + // ElastiCacheClusters discovery options. This field should be declared for the + // elasticache cluster discovery to be enabled. + elasticacheClusters?: #ElastiCacheClusters @protobuf(5,ElastiCacheClusters,name=elasticache_clusters) + + // RDS instances discovery options. + rdsInstances?: #RDSInstances @protobuf(6,RDSInstances,name=rds_instances) - // RDS discovery options. - rds?: #RDS @protobuf(5,RDS) + // RDS clusters discovery options. + rdsClusters?: #RDSClusters @protobuf(7,RDSClusters,name=rds_clusters) } diff --git a/internal/rds/aws/rds_clusters.go b/internal/rds/aws/rds_clusters.go new file mode 100644 index 00000000000..c1e9f53a799 --- /dev/null +++ b/internal/rds/aws/rds_clusters.go @@ -0,0 +1,172 @@ +package aws + +import ( + "context" + "fmt" + "math/rand" + "sync" + "time" + + configpb "github.com/cloudprober/cloudprober/internal/rds/aws/proto" + pb "github.com/cloudprober/cloudprober/internal/rds/proto" + "github.com/cloudprober/cloudprober/internal/rds/server/filter" + "github.com/cloudprober/cloudprober/logger" + + "github.com/aws/aws-sdk-go-v2/config" + "github.com/aws/aws-sdk-go-v2/service/rds" + "google.golang.org/protobuf/proto" +) + +// rdsClusterInfo represents cluster items that we fetch from the RDS API. +type rdsClusterInfo struct { + Name string + Ip string + Port int32 + IsCluster bool + Tags map[string]string +} + +// rdsClusterData represents objects that we store in cache. +type rdsClusterData struct { + ri *rdsClusterInfo + lastUpdated int64 +} + +var RDSClustersFilters = struct { + RegexFilterKeys []string + LabelsFilter bool +}{ + []string{"name", "engine"}, + true, +} + +// rdsClustersLister is a AWS Relational Database Service lister. It implements a cache, +// that's populated at a regular interval by making the AWS API calls. +// Listing actually only returns the current contents of that cache. +type rdsClustersLister struct { + c *configpb.RDSClusters + client rds.DescribeDBClustersAPIClient + l *logger.Logger + mu sync.RWMutex + names []string + dbClustersList map[string]*rdsClusterData +} + +// listResources returns the list of resource records, where each record +// consists of an cluster name and the endpoint associated with it. +func (rl *rdsClustersLister) listResources(req *pb.ListResourcesRequest) ([]*pb.Resource, error) { + var resources []*pb.Resource + + allFilters, err := filter.ParseFilters(req.GetFilter(), RDSClustersFilters.RegexFilterKeys, "") + if err != nil { + return nil, err + } + + nameFilter, labelsFilter := allFilters.RegexFilters["name"], allFilters.LabelsFilter + + rl.mu.RLock() + defer rl.mu.RUnlock() + + for _, name := range rl.names { + ins := rl.dbClustersList[name].ri + if ins == nil { + rl.l.Errorf("rds_clusters.listResources: db info missing for %s", name) + continue + } + + if nameFilter != nil && !nameFilter.Match(name, rl.l) { + continue + } + if labelsFilter != nil && !labelsFilter.Match(ins.Tags, rl.l) { + continue + } + + resources = append(resources, &pb.Resource{ + Name: proto.String(name), + Ip: proto.String(ins.Ip), + Port: proto.Int32(ins.Port), + Labels: ins.Tags, + LastUpdated: proto.Int64(rl.dbClustersList[name].lastUpdated), + }) + } + + rl.l.Infof("rds_clusters.listResources: returning %d instances", len(resources)) + return resources, nil +} + +// expand runs equivalent API calls as "aws rds describe-db-clusters", +// and is used to populate the cache. It is used to obtain RDS cluster information +// More details are available in +// https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_DescribeDBClusters.html +func (rl *rdsClustersLister) expand(reEvalInterval time.Duration) { + rl.l.Infof("rds_clusters.expand: expanding AWS targets") + + resCluster, err := rl.client.DescribeDBClusters(context.TODO(), nil) + if err != nil { + rl.l.Errorf("rds_clusters.expand: error while listing database clusters: %v", err) + return + } + + var ids = make([]string, 0) + var dbList = make(map[string]*rdsClusterData) + + ts := time.Now().Unix() + for _, d := range resCluster.DBClusters { + if d.DBClusterIdentifier == nil || d.DatabaseName == nil || d.Endpoint == nil || d.Port == nil { + continue + } + + ci := &rdsClusterInfo{ + Name: *d.DBClusterIdentifier, + Ip: *d.Endpoint, + Port: *d.Port, + Tags: make(map[string]string), + } + + // Convert to map + for _, t := range d.TagList { + ci.Tags[*t.Key] = *t.Value + } + + dbList[*d.DBClusterIdentifier] = &rdsClusterData{ci, ts} + ids = append(ids, *d.DBClusterIdentifier) + } + + rl.mu.Lock() + rl.names = ids + rl.dbClustersList = dbList + rl.mu.Unlock() + + rl.l.Infof("rds_clusters.expand: got %d databases", len(ids)) +} + +func newRdsClustersLister(c *configpb.RDSClusters, region string, l *logger.Logger) (*rdsClustersLister, error) { + cfg, err := config.LoadDefaultConfig(context.TODO(), config.WithRegion(region)) + if err != nil { + return nil, fmt.Errorf("AWS configuration error: %v", err) + } + + client := rds.NewFromConfig(cfg) + + cl := &rdsClustersLister{ + c: c, + client: client, + dbClustersList: make(map[string]*rdsClusterData), + l: l, + } + + reEvalInterval := time.Duration(c.GetReEvalSec()) * time.Second + go func() { + cl.expand(0) + // Introduce a random delay between 0-reEvalInterval before + // starting the refresh loop. If there are multiple cloudprober + // awsInstances, this will make sure that each instance calls AWS + // API at a different point of time. + randomDelaySec := rand.Intn(int(reEvalInterval.Seconds())) + time.Sleep(time.Duration(randomDelaySec) * time.Second) + for range time.Tick(reEvalInterval) { + cl.expand(reEvalInterval) + } + }() + return cl, nil +} diff --git a/internal/rds/aws/rds_clusters_test.go b/internal/rds/aws/rds_clusters_test.go new file mode 100644 index 00000000000..128a18b7a0f --- /dev/null +++ b/internal/rds/aws/rds_clusters_test.go @@ -0,0 +1,276 @@ +package aws + +import ( + "context" + "fmt" + "strconv" + "testing" + "time" + + pb "github.com/cloudprober/cloudprober/internal/rds/proto" + "github.com/cloudprober/cloudprober/logger" + "google.golang.org/protobuf/proto" + + "github.com/aws/aws-sdk-go-v2/service/rds" + "github.com/aws/aws-sdk-go-v2/service/rds/types" +) + +type mockRDSDescribeDBClustersAPIClient func(context.Context, *rds.DescribeDBClustersInput, ...func(*rds.Options)) (*rds.DescribeDBClustersOutput, error) + +func (m mockRDSDescribeDBClustersAPIClient) DescribeDBClusters(ctx context.Context, params *rds.DescribeDBClustersInput, optFns ...func(*rds.Options)) (*rds.DescribeDBClustersOutput, error) { + return m(ctx, params, optFns...) +} + +type testRDSClusters struct { + id string + name string + ipAddr string + port int32 + tags map[string]string +} + +func TestRDSClustersExpand(t *testing.T) { + cases := []struct { + err error + instances []*testRDSClusters + expectCount int + }{ + { + instances: []*testRDSClusters{ + { + id: "rds-test-id", + name: "rds-test-cluster", + ipAddr: "10.0.0.2", + port: 5431, + tags: map[string]string{"a": "b"}, + }, + { + id: "rds-test-id-2", + name: "rds-test-cluster-2", + ipAddr: "10.0.0.3", + port: 5431, + tags: map[string]string{"a": "b"}, + }, + }, + err: nil, + expectCount: 2, + }, + { + instances: []*testRDSClusters{}, + err: nil, + expectCount: 0, + }, + { + instances: []*testRDSClusters{ + { + id: "rds-test-id", + }, + { + id: "rds-test-id-2", + }, + }, + err: nil, + expectCount: 0, + }, + { + instances: []*testRDSClusters{}, + err: fmt.Errorf("some rds error"), + expectCount: 0, + }, + } + + for i, tt := range cases { + t.Run(strconv.Itoa(i), func(t *testing.T) { + client := func(t *testing.T, instances []*testRDSClusters) rds.DescribeDBClustersAPIClient { + return mockRDSDescribeDBClustersAPIClient(func(ctx context.Context, params *rds.DescribeDBClustersInput, optFns ...func(*rds.Options)) (*rds.DescribeDBClustersOutput, error) { + t.Helper() + + out := &rds.DescribeDBClustersOutput{} + + for _, v := range instances { + c := types.DBCluster{ + DBClusterIdentifier: &v.id, + DatabaseName: &v.name, + } + + if v.ipAddr != "" { + c.Endpoint = &v.ipAddr + } + + if v.port > 0 { + c.Port = &v.port + } + + for tk, tv := range v.tags { + tag := types.Tag{ + Key: &tk, + Value: &tv, + } + + c.TagList = append(c.TagList, tag) + } + out.DBClusters = append(out.DBClusters, c) + } + + return out, tt.err + }) + } + + il := &rdsClustersLister{ + client: client(t, tt.instances), + dbClustersList: make(map[string]*rdsClusterData), + } + + il.expand(time.Second) + + // Check for instance count + if len(il.dbClustersList) != tt.expectCount { + t.Errorf("Got %d instances, want %d", len(il.dbClustersList), tt.expectCount) + } + }) + } +} + +func TestRDSClustersLister(t *testing.T) { + cases := []struct { + instances []*testRDSClusters + filter []*pb.Filter + expectErr bool + expectedCount int + }{ + { + instances: []*testRDSClusters{ + { + id: "rds-cluster-test-id", + ipAddr: "10.0.0.2", + tags: map[string]string{"a": "b"}, + }, + { + id: "rds-cluster-test-id-2", + ipAddr: "10.0.0.3", + tags: map[string]string{"a": "b"}, + }, + }, + filter: []*pb.Filter{ + { + Key: proto.String("labels.a"), + Value: proto.String("b"), + }, + }, + expectedCount: 2, + }, + { + instances: []*testRDSClusters{ + { + id: "rds-cluster-test-id", + ipAddr: "10.0.0.2", + tags: map[string]string{"a": "b"}, + }, + { + id: "rds-cluster-test-id-2", + ipAddr: "10.0.0.3", + tags: map[string]string{"a": "b"}, + }, + }, + filter: []*pb.Filter{ + { + Key: proto.String("ins."), + Value: proto.String("b"), + }, + }, + expectErr: true, + }, + { + instances: []*testRDSClusters{}, + expectedCount: 0, + }, + { + instances: []*testRDSClusters{ + { + id: "rds-test-id", + ipAddr: "10.0.0.2", + tags: map[string]string{"test1": "a"}, + }, + { + id: "rds-test-id-2", + ipAddr: "10.0.0.3", + tags: map[string]string{"test2": "b"}, + }, + }, + filter: []*pb.Filter{ + { + Key: proto.String("labels.a"), + Value: proto.String("b"), + }, + }, + expectedCount: 0, + }, + { + instances: []*testRDSClusters{ + { + id: "rds-test-id", + ipAddr: "10.0.0.2", + tags: map[string]string{"a": "b"}, + }, + { + id: "rds-test-id-2", + ipAddr: "10.0.0.3", + tags: map[string]string{"a": "b"}, + }, + }, + filter: []*pb.Filter{ + { + Key: proto.String("name"), + Value: proto.String("nonexistent"), + }, + }, + expectedCount: 0, + }, + } + + for i, tt := range cases { + t.Run(strconv.Itoa(i), func(t *testing.T) { + + names := []string{} + cache := make(map[string]*rdsClusterData) + for _, ti := range tt.instances { + ii := &rdsClusterInfo{ + Name: ti.id, + Ip: ti.ipAddr, + Tags: ti.tags, + } + cache[ti.id] = &rdsClusterData{ + ri: ii, + } + + names = append(names, ti.id) + } + + lister := &rdsClustersLister{ + dbClustersList: cache, + names: names, + l: &logger.Logger{}, + } + + var filters []*pb.Filter + if tt.filter != nil { + filters = append(filters, tt.filter...) + } + + resources, err := lister.listResources(&pb.ListResourcesRequest{ + Filter: filters, + }) + + if err != nil { + if !tt.expectErr { + t.Errorf("Got error while listing resources: %v, expected no errors", err) + } + return + } + + if len(resources) != tt.expectedCount { + t.Errorf("Got wrong number of targets. Expected: %d, Got: %d", tt.expectedCount, len(resources)) + } + }) + } +} diff --git a/internal/rds/aws/rds.go b/internal/rds/aws/rds_instances.go similarity index 59% rename from internal/rds/aws/rds.go rename to internal/rds/aws/rds_instances.go index e482716d811..8e06ab16375 100644 --- a/internal/rds/aws/rds.go +++ b/internal/rds/aws/rds_instances.go @@ -17,23 +17,22 @@ import ( "google.golang.org/protobuf/proto" ) -// rdsInfo represents instance items that we fetch from the RDS API. -type rdsInfo struct { +// rdsInstanceInfo represents instance items that we fetch from the RDS API. +type rdsInstanceInfo struct { Name string Ip string Port int32 IsReplica bool - IsCluster bool Tags map[string]string } // rdsData represents objects that we store in cache. -type rdsData struct { - ri *rdsInfo +type rdsInstanceData struct { + ri *rdsInstanceInfo lastUpdated int64 } -var RDSFilters = struct { +var RDSInstancesFilters = struct { RegexFilterKeys []string LabelsFilter bool }{ @@ -41,24 +40,24 @@ var RDSFilters = struct { true, } -// rdsLister is a AWS Relational Database Service lister. It implements a cache, +// rdsInstancesLister is an AWS Relational Database Service Instances lister. It implements a cache, // that's populated at a regular interval by making the AWS API calls. // Listing actually only returns the current contents of that cache. -type rdsLister struct { - c *configpb.RDS - client *rds.Client - l *logger.Logger - mu sync.RWMutex - names []string - dbList map[string]*rdsData +type rdsInstancesLister struct { + c *configpb.RDSInstances + client rds.DescribeDBInstancesAPIClient + l *logger.Logger + mu sync.RWMutex + names []string + dbInstancesList map[string]*rdsInstanceData } // listResources returns the list of resource records, where each record // consists of an cluster name and the endpoint associated with it. -func (rl *rdsLister) listResources(req *pb.ListResourcesRequest) ([]*pb.Resource, error) { +func (rl *rdsInstancesLister) listResources(req *pb.ListResourcesRequest) ([]*pb.Resource, error) { var resources []*pb.Resource - allFilters, err := filter.ParseFilters(req.GetFilter(), AWSInstancesFilters.RegexFilterKeys, "") + allFilters, err := filter.ParseFilters(req.GetFilter(), RDSInstancesFilters.RegexFilterKeys, "") if err != nil { return nil, err } @@ -69,9 +68,9 @@ func (rl *rdsLister) listResources(req *pb.ListResourcesRequest) ([]*pb.Resource defer rl.mu.RUnlock() for _, name := range rl.names { - ins := rl.dbList[name].ri + ins := rl.dbInstancesList[name].ri if ins == nil { - rl.l.Errorf("rds: db info missing for %s", name) + rl.l.Errorf("rds_instances.listResources: db info missing for %s", name) continue } @@ -87,27 +86,30 @@ func (rl *rdsLister) listResources(req *pb.ListResourcesRequest) ([]*pb.Resource Ip: proto.String(ins.Ip), Port: proto.Int32(ins.Port), Labels: ins.Tags, - LastUpdated: proto.Int64(rl.dbList[name].lastUpdated), + LastUpdated: proto.Int64(rl.dbInstancesList[name].lastUpdated), }) } - rl.l.Infof("rds.listResources: returning %d instances", len(resources)) + rl.l.Infof("rds_instances.listResources: returning %d instances", len(resources)) return resources, nil } -// expand runs equivalent API calls as "aws describe-db-instances", -// and is used to populate the cache. -func (rl *rdsLister) expand(reEvalInterval time.Duration) { - rl.l.Infof("rds.expand: expanding AWS targets") +// expand runs equivalent API calls as "aws rds describe-db-instances", +// and is used to populate the cache. It returns the instance information +// for instances provisioned for RDS. +// More details can be found in +// https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_DescribeDBInstances.html +func (rl *rdsInstancesLister) expand(reEvalInterval time.Duration) { + rl.l.Infof("rds_instances.expand: expanding AWS targets") result, err := rl.client.DescribeDBInstances(context.TODO(), nil) if err != nil { - rl.l.Errorf("rds.expand: error while listing database instances: %v", err) + rl.l.Errorf("rds_instances.expand: error while listing database instances: %v", err) return } var ids = make([]string, 0) - var dbList = make(map[string]*rdsData) + var dbInstancesList = make(map[string]*rdsInstanceData) ts := time.Now().Unix() for _, r := range result.DBInstances { @@ -119,7 +121,7 @@ func (rl *rdsLister) expand(reEvalInterval time.Duration) { isReplica = true } - ci := &rdsInfo{ + ci := &rdsInstanceInfo{ Name: *r.DBName, Ip: *r.Endpoint.Address, Port: *r.Endpoint.Port, @@ -132,57 +134,31 @@ func (rl *rdsLister) expand(reEvalInterval time.Duration) { ci.Tags[*t.Key] = *t.Value } - dbList[*r.DBName] = &rdsData{ci, ts} + dbInstancesList[*r.DBName] = &rdsInstanceData{ci, ts} ids = append(ids, *r.DBName) } - resCluster, err := rl.client.DescribeDBClusters(context.TODO(), nil) - if err != nil { - rl.l.Errorf("rds.expand: error while listing database clusters: %v", err) - return - } - for _, r := range resCluster.DBClusters { - if r.DBClusterIdentifier == nil || r.DatabaseName == nil || r.Endpoint == nil || r.Port == nil { - continue - } - - ci := &rdsInfo{ - Name: *r.DBClusterIdentifier, - Ip: *r.Endpoint, - Port: *r.Port, - IsCluster: true, - } - - // Convert to map - for _, t := range r.TagList { - ci.Tags[*t.Key] = *t.Value - } - - dbList[*r.DBClusterIdentifier] = &rdsData{ci, ts} - ids = append(ids, *r.DBClusterIdentifier) - } - rl.mu.Lock() rl.names = ids - rl.dbList = dbList + rl.dbInstancesList = dbInstancesList rl.mu.Unlock() - rl.l.Infof("rds.expand: got %d databases", len(ids)) + rl.l.Infof("rds_instances.expand: got %d databases", len(ids)) } -func newRdsLister(c *configpb.RDS, region string, l *logger.Logger) (*rdsLister, error) { +func newRdsInstancesLister(c *configpb.RDSInstances, region string, l *logger.Logger) (*rdsInstancesLister, error) { cfg, err := config.LoadDefaultConfig(context.TODO(), config.WithRegion(region)) if err != nil { - return nil, fmt.Errorf("AWS configuration error : %v", err) + return nil, fmt.Errorf("AWS configuration error: %v", err) } client := rds.NewFromConfig(cfg) - cl := &rdsLister{ - c: c, - client: client, - dbList: make(map[string]*rdsData), - l: l, + cl := &rdsInstancesLister{ + c: c, + client: client, + dbInstancesList: make(map[string]*rdsInstanceData), + l: l, } reEvalInterval := time.Duration(c.GetReEvalSec()) * time.Second diff --git a/internal/rds/aws/rds_instances_test.go b/internal/rds/aws/rds_instances_test.go new file mode 100644 index 00000000000..92c4fc99874 --- /dev/null +++ b/internal/rds/aws/rds_instances_test.go @@ -0,0 +1,285 @@ +package aws + +import ( + "context" + "fmt" + "strconv" + "testing" + "time" + + pb "github.com/cloudprober/cloudprober/internal/rds/proto" + "github.com/cloudprober/cloudprober/logger" + "google.golang.org/protobuf/proto" + + "github.com/aws/aws-sdk-go-v2/service/rds" + "github.com/aws/aws-sdk-go-v2/service/rds/types" +) + +type mockRDSDescribeDBInstancesAPIClient func(context.Context, *rds.DescribeDBInstancesInput, ...func(*rds.Options)) (*rds.DescribeDBInstancesOutput, error) + +func (m mockRDSDescribeDBInstancesAPIClient) DescribeDBInstances(ctx context.Context, params *rds.DescribeDBInstancesInput, optFns ...func(*rds.Options)) (*rds.DescribeDBInstancesOutput, error) { + return m(ctx, params, optFns...) +} + +type testRDSInstances struct { + name string + ipAddr string + port int32 + isReplica bool + tags map[string]string +} + +func TestRDSInstancesExpand(t *testing.T) { + cases := []struct { + err error + instances []*testRDSInstances + expectCount int + }{ + { + instances: []*testRDSInstances{ + { + name: "rds-test-instance", + ipAddr: "10.0.0.2", + port: 5431, + tags: map[string]string{"a": "b"}, + }, + }, + err: nil, + expectCount: 1, + }, + { + instances: []*testRDSInstances{ + { + name: "rds-test-instance", + ipAddr: "10.0.0.2", + port: 5431, + tags: map[string]string{"a": "b"}, + }, + { + name: "rds-test-instance-2", + ipAddr: "10.0.0.3", + port: 5431, + tags: map[string]string{"a": "b"}, + }, + }, + err: nil, + expectCount: 2, + }, + { + instances: []*testRDSInstances{}, + err: nil, + expectCount: 0, + }, + { + instances: []*testRDSInstances{ + { + name: "rds-test-name", + }, + { + name: "rds-test-name-2", + }, + }, + err: nil, + expectCount: 0, + }, + { + instances: []*testRDSInstances{}, + err: fmt.Errorf("some rds error"), + expectCount: 0, + }, + } + + for i, tt := range cases { + t.Run(strconv.Itoa(i), func(t *testing.T) { + client := func(t *testing.T, instances []*testRDSInstances) rds.DescribeDBInstancesAPIClient { + return mockRDSDescribeDBInstancesAPIClient(func(ctx context.Context, params *rds.DescribeDBInstancesInput, optFns ...func(*rds.Options)) (*rds.DescribeDBInstancesOutput, error) { + t.Helper() + + out := &rds.DescribeDBInstancesOutput{} + + for _, v := range instances { + c := types.DBInstance{ + DBInstanceIdentifier: &v.name, + DBName: &v.name, + } + + if v.ipAddr != "" { + c.Endpoint = &types.Endpoint{ + Address: &v.ipAddr, + Port: &v.port, + } + } + + for tk, tv := range v.tags { + tag := types.Tag{ + Key: &tk, + Value: &tv, + } + + c.TagList = append(c.TagList, tag) + } + out.DBInstances = append(out.DBInstances, c) + } + + return out, tt.err + }) + } + + il := &rdsInstancesLister{ + client: client(t, tt.instances), + dbInstancesList: make(map[string]*rdsInstanceData), + } + + il.expand(time.Second) + + // Check for instance count + if len(il.dbInstancesList) != tt.expectCount { + t.Errorf("Got %d instances, want %d", len(il.dbInstancesList), tt.expectCount) + } + }) + } +} + +func TestRDSInstancesLister(t *testing.T) { + cases := []struct { + instances []*testRDSInstances + filter []*pb.Filter + expectErr bool + expectedCount int + }{ + { + instances: []*testRDSInstances{ + { + name: "rds-cluster-test-id", + ipAddr: "10.0.0.2", + tags: map[string]string{"a": "b"}, + }, + { + name: "rds-cluster-test-id-2", + ipAddr: "10.0.0.3", + tags: map[string]string{"a": "b"}, + }, + }, + filter: []*pb.Filter{ + { + Key: proto.String("labels.a"), + Value: proto.String("b"), + }, + }, + expectedCount: 2, + }, + { + instances: []*testRDSInstances{ + { + name: "rds-instance-test-name", + ipAddr: "10.0.0.2", + tags: map[string]string{"a": "b"}, + }, + { + name: "rds-instance-test-name-2", + ipAddr: "10.0.0.3", + tags: map[string]string{"a": "b"}, + }, + }, + filter: []*pb.Filter{ + { + Key: proto.String("ins."), + Value: proto.String("b"), + }, + }, + expectErr: true, + }, + { + instances: []*testRDSInstances{}, + expectedCount: 0, + }, + { + instances: []*testRDSInstances{ + { + name: "rds-test-name", + ipAddr: "10.0.0.2", + tags: map[string]string{"test1": "a"}, + }, + { + name: "rds-test-name-2", + ipAddr: "10.0.0.3", + tags: map[string]string{"test2": "b"}, + }, + }, + filter: []*pb.Filter{ + { + Key: proto.String("labels.a"), + Value: proto.String("b"), + }, + }, + expectedCount: 0, + }, + { + instances: []*testRDSInstances{ + { + name: "rds-test-name", + ipAddr: "10.0.0.2", + tags: map[string]string{"a": "b"}, + }, + { + name: "rds-test-name-2", + ipAddr: "10.0.0.3", + tags: map[string]string{"a": "b"}, + }, + }, + filter: []*pb.Filter{ + { + Key: proto.String("name"), + Value: proto.String("nonexistent"), + }, + }, + expectedCount: 0, + }, + } + + for i, tt := range cases { + t.Run(strconv.Itoa(i), func(t *testing.T) { + + names := []string{} + cache := make(map[string]*rdsInstanceData) + for _, ti := range tt.instances { + ii := &rdsInstanceInfo{ + Name: ti.name, + Ip: ti.ipAddr, + Tags: ti.tags, + } + cache[ti.name] = &rdsInstanceData{ + ri: ii, + } + + names = append(names, ti.name) + } + + lister := &rdsInstancesLister{ + dbInstancesList: cache, + names: names, + l: &logger.Logger{}, + } + + var filters []*pb.Filter + if tt.filter != nil { + filters = append(filters, tt.filter...) + } + + resources, err := lister.listResources(&pb.ListResourcesRequest{ + Filter: filters, + }) + + if err != nil { + if !tt.expectErr { + t.Errorf("Got error while listing resources: %v, expected no errors", err) + } + return + } + + if len(resources) != tt.expectedCount { + t.Errorf("Got wrong number of targets. Expected: %d, Got: %d", tt.expectedCount, len(resources)) + } + }) + } +} diff --git a/internal/rds/client/client.go b/internal/rds/client/client.go index 051948e0574..8fc6bd8e7fc 100644 --- a/internal/rds/client/client.go +++ b/internal/rds/client/client.go @@ -32,13 +32,13 @@ import ( "github.com/cloudprober/cloudprober/internal/oauth" configpb "github.com/cloudprober/cloudprober/internal/rds/client/proto" pb "github.com/cloudprober/cloudprober/internal/rds/proto" - spb "github.com/cloudprober/cloudprober/internal/rds/proto" "github.com/cloudprober/cloudprober/internal/tlsconfig" "github.com/cloudprober/cloudprober/logger" "github.com/cloudprober/cloudprober/targets/endpoint" dnsRes "github.com/cloudprober/cloudprober/targets/resolver" "google.golang.org/grpc" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/credentials/insecure" grpcoauth "google.golang.org/grpc/credentials/oauth" "google.golang.org/protobuf/proto" ) @@ -55,6 +55,7 @@ type cacheRecord struct { ipStr string port int labels map[string]string + info string lastUpdated time.Time } @@ -137,6 +138,7 @@ func (client *Client) updateState(response *pb.ListResourcesResponse) { ipStr: res.GetIp(), port: int(res.GetPort()), labels: res.Labels, + info: string(res.GetInfo()), lastUpdated: time.Unix(res.GetLastUpdated(), 0), } client.names[i] = res.GetName() @@ -158,7 +160,7 @@ func (client *Client) ListEndpoints() []endpoint.Endpoint { result := make([]endpoint.Endpoint, len(client.names)) for i, name := range client.names { cr := client.cache[name] - result[i] = endpoint.Endpoint{Name: name, IP: cr.ip, Port: cr.port, Labels: cr.labels, LastUpdated: cr.lastUpdated} + result[i] = endpoint.Endpoint{Name: name, IP: cr.ip, Port: cr.port, Labels: cr.labels, LastUpdated: cr.lastUpdated, Info: cr.info} } return result } @@ -213,15 +215,15 @@ func (client *Client) initListResourcesFunc() error { } // Transport security options. + transportCred := insecure.NewCredentials() if client.serverOpts.GetTlsConfig() != nil { tlsConfig := &tls.Config{} if err := tlsconfig.UpdateTLSConfig(tlsConfig, client.serverOpts.GetTlsConfig()); err != nil { return fmt.Errorf("rds/client: error initializing TLS config (%+v): %v", client.serverOpts.GetTlsConfig(), err) } - client.dialOpts = append(client.dialOpts, grpc.WithTransportCredentials(credentials.NewTLS(tlsConfig))) - } else { - client.dialOpts = append(client.dialOpts, grpc.WithInsecure()) + transportCred = credentials.NewTLS(tlsConfig) } + client.dialOpts = append(client.dialOpts, grpc.WithTransportCredentials(transportCred)) // OAuth related options. if client.serverOpts.GetOauthConfig() != nil { @@ -238,7 +240,7 @@ func (client *Client) initListResourcesFunc() error { } client.listResources = func(ctx context.Context, in *pb.ListResourcesRequest) (*pb.ListResourcesResponse, error) { - return spb.NewResourceDiscoveryClient(conn).ListResources(ctx, in) + return pb.NewResourceDiscoveryClient(conn).ListResources(ctx, in) } return nil @@ -271,7 +273,6 @@ func New(c *configpb.ClientConf, listResources ListResourcesFunc, l *logger.Logg // refreshState loop. If there are multiple cloudprober instances, this will // make sure that each instance calls RDS server at a different point of // time. - rand.Seed(time.Now().UnixNano()) randomDelaySec := rand.Intn(int(reEvalInterval.Seconds())) time.Sleep(time.Duration(randomDelaySec) * time.Second) for range time.Tick(reEvalInterval) { diff --git a/server.cfg b/server.cfg new file mode 100644 index 00000000000..2a1a48ae506 --- /dev/null +++ b/server.cfg @@ -0,0 +1,13 @@ +rds_server { + provider { + aws_config { + region: "us-west-2" + ec2_instances {} + elasticache_clusters {} + rds_instances {} + rds_clusters {} + } + } +} + +grpc_port: 9314 diff --git a/targets/endpoint/endpoint.go b/targets/endpoint/endpoint.go index 2fa3b2a8ef1..92f6c3f4570 100644 --- a/targets/endpoint/endpoint.go +++ b/targets/endpoint/endpoint.go @@ -29,13 +29,14 @@ import ( targetspb "github.com/cloudprober/cloudprober/targets/proto" ) -// Endpoint represents a targets and associated parameters. +// Endpoint represents a target and associated parameters. type Endpoint struct { Name string Labels map[string]string LastUpdated time.Time Port int IP net.IP + Info string } // Key returns a string key that uniquely identifies that endpoint. From 48b6e871a1263e4335a93590789476aaebb8fdce Mon Sep 17 00:00:00 2001 From: rabunkosar-dd Date: Wed, 3 Jan 2024 13:30:02 -0800 Subject: [PATCH 06/10] remove test files --- client-rds.cfg | 20 -------------------- client.cfg | 20 -------------------- server.cfg | 13 ------------- 3 files changed, 53 deletions(-) delete mode 100644 client-rds.cfg delete mode 100644 client.cfg delete mode 100644 server.cfg diff --git a/client-rds.cfg b/client-rds.cfg deleted file mode 100644 index bc839c33f67..00000000000 --- a/client-rds.cfg +++ /dev/null @@ -1,20 +0,0 @@ -probe { - name:"obs-rds" - type: PING - - targets{ - rds_targets { - rds_server_options { - server_address: "localhost:9314" - } - - resource_path: "aws://rds" - filter { - key: "labels.component" - value: "observability" - } - } - } -} - -port: 9400 diff --git a/client.cfg b/client.cfg deleted file mode 100644 index 0dc0ebcb332..00000000000 --- a/client.cfg +++ /dev/null @@ -1,20 +0,0 @@ -probe { - name:"obs-ec2" - type: PING - - targets{ - rds_targets { - rds_server_options { - server_address: "localhost:9314" - } - - resource_path: "aws://ec2_instances" - filter { - key: "labels.component" - value: "observability" - } - } - } -} - -port: 9400 diff --git a/server.cfg b/server.cfg deleted file mode 100644 index 2a1a48ae506..00000000000 --- a/server.cfg +++ /dev/null @@ -1,13 +0,0 @@ -rds_server { - provider { - aws_config { - region: "us-west-2" - ec2_instances {} - elasticache_clusters {} - rds_instances {} - rds_clusters {} - } - } -} - -grpc_port: 9314 From 2a06fbe154b5bee17c39398829a9c9b1e2843380 Mon Sep 17 00:00:00 2001 From: rabunkosar-dd Date: Thu, 4 Jan 2024 11:36:02 -0800 Subject: [PATCH 07/10] update comments --- internal/rds/aws/ec2.go | 3 ++- internal/rds/aws/proto/config.proto | 14 +++++++++++--- 2 files changed, 13 insertions(+), 4 deletions(-) diff --git a/internal/rds/aws/ec2.go b/internal/rds/aws/ec2.go index aa1b01833de..fa9afd05844 100644 --- a/internal/rds/aws/ec2.go +++ b/internal/rds/aws/ec2.go @@ -121,7 +121,8 @@ func (il *ec2InstancesLister) listResources(req *pb.ListResourcesRequest) ([]*pb } // expand runs equivalent API calls as "aws describe-instances", -// and is used to populate the cache. +// and is used to populate the cache. It will list the EC2 instances +// in the target account with some basic networking information func (il *ec2InstancesLister) expand(reEvalInterval time.Duration) { il.l.Infof("ec2_instances.expand: expanding AWS EC2 targets") diff --git a/internal/rds/aws/proto/config.proto b/internal/rds/aws/proto/config.proto index a390af87d37..c33855673dd 100644 --- a/internal/rds/aws/proto/config.proto +++ b/internal/rds/aws/proto/config.proto @@ -5,11 +5,19 @@ // # EC2 instances // ec2_instances {} // -// # ElastiCache cluster -// elasticache {} +// # ElastiCache clusters +// elasticache_clusters {} +// +// # ElastiCache replication groups +// elasticache_replicationgroups {} // // # RDS clusters -// rds { +// rds_clusters { +// identifier: "arn" +// } +// +// # RDS instances +// rds_instances { // identifier: "arn" // } // } From 02cad6039b118ced03fe28f4d0baa9e6f7f1620f Mon Sep 17 00:00:00 2001 From: rabunkosar-dd Date: Thu, 4 Jan 2024 11:42:19 -0800 Subject: [PATCH 08/10] remove AWS resource discovery for a separate PR --- internal/rds/aws/aws.go | 45 +-- internal/rds/aws/ec2.go | 199 ------------ internal/rds/aws/ec2_test.go | 270 ---------------- internal/rds/aws/ec_clusters.go | 216 ------------- internal/rds/aws/ec_clusters_test.go | 298 ----------------- internal/rds/aws/ec_replicationgroups.go | 231 ------------- internal/rds/aws/ec_replicationgroups_test.go | 306 ------------------ internal/rds/aws/rds_clusters.go | 172 ---------- internal/rds/aws/rds_clusters_test.go | 276 ---------------- internal/rds/aws/rds_instances.go | 178 ---------- internal/rds/aws/rds_instances_test.go | 285 ---------------- 11 files changed, 1 insertion(+), 2475 deletions(-) delete mode 100644 internal/rds/aws/ec2.go delete mode 100644 internal/rds/aws/ec2_test.go delete mode 100644 internal/rds/aws/ec_clusters.go delete mode 100644 internal/rds/aws/ec_clusters_test.go delete mode 100644 internal/rds/aws/ec_replicationgroups.go delete mode 100644 internal/rds/aws/ec_replicationgroups_test.go delete mode 100644 internal/rds/aws/rds_clusters.go delete mode 100644 internal/rds/aws/rds_clusters_test.go delete mode 100644 internal/rds/aws/rds_instances.go delete mode 100644 internal/rds/aws/rds_instances_test.go diff --git a/internal/rds/aws/aws.go b/internal/rds/aws/aws.go index 982ae41e0b0..b34760ae9b7 100644 --- a/internal/rds/aws/aws.go +++ b/internal/rds/aws/aws.go @@ -77,50 +77,7 @@ func (p *Provider) ListResources(req *pb.ListResourcesRequest) (*pb.ListResource func initAWSProject(c *configpb.ProviderConfig, l *logger.Logger) (map[string]lister, error) { resourceLister := make(map[string]lister) - // Enable EC2 instances lister if configured. - if c.GetEc2Instances() != nil { - lr, err := newEC2InstancesLister(c.GetEc2Instances(), c.GetRegion(), l) - if err != nil { - return nil, err - } - resourceLister[ResourceTypes.EC2Instances] = lr - } - - // Enable GetElasticacheClusters lister if configured. - if c.GetElasticacheClusters() != nil { - lr, err := newElastiCacheClusterLister(c.GetElasticacheClusters(), c.GetRegion(), l) - if err != nil { - return nil, err - } - resourceLister[ResourceTypes.ElastiCacheClusters] = lr - } - - // Enable GetElasticacheClusters lister if configured. - if c.GetElasticacheReplicationgroups() != nil { - lr, err := newElastiCacheRGLister(c.GetElasticacheReplicationgroups(), c.GetRegion(), l) - if err != nil { - return nil, err - } - resourceLister[ResourceTypes.ElastiCacheReplicationGroups] = lr - } - - // Enable RDSInstances (AWS) lister if configured. - if c.GetRdsInstances() != nil { - lr, err := newRdsInstancesLister(c.GetRdsInstances(), c.GetRegion(), l) - if err != nil { - return nil, err - } - resourceLister[ResourceTypes.RDSInstances] = lr - } - - // Enable RDSClusters (AWS) lister if configured. - if c.GetRdsClusters() != nil { - lr, err := newRdsClustersLister(c.GetRdsClusters(), c.GetRegion(), l) - if err != nil { - return nil, err - } - resourceLister[ResourceTypes.RDSClusters] = lr - } + // TODO when resources are added return resourceLister, nil } diff --git a/internal/rds/aws/ec2.go b/internal/rds/aws/ec2.go deleted file mode 100644 index fa9afd05844..00000000000 --- a/internal/rds/aws/ec2.go +++ /dev/null @@ -1,199 +0,0 @@ -// Copyright 2017-2023 The Cloudprober Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package aws - -import ( - "context" - "fmt" - "math/rand" - "sync" - "time" - - configpb "github.com/cloudprober/cloudprober/internal/rds/aws/proto" - pb "github.com/cloudprober/cloudprober/internal/rds/proto" - "github.com/cloudprober/cloudprober/internal/rds/server/filter" - "github.com/cloudprober/cloudprober/logger" - - "github.com/aws/aws-sdk-go-v2/config" - "github.com/aws/aws-sdk-go-v2/service/ec2" - "google.golang.org/protobuf/proto" -) - -// instanceInfo represents instance items that we fetch from the API. -type instanceInfo struct { - ID string - Tags map[string]string - IPAddr string -} - -// instanceData represents objects that we store in cache. -type instanceData struct { - ii *instanceInfo - lastUpdated int64 -} - -/* -AWSInstancesFilters defines filters supported by the ec2_instances resource -type. - - Example: - filter { - key: "name" - value: "cloudprober.*" - } - filter { - key: "labels.app" - value: "service-a" - } -*/ -var AWSInstancesFilters = struct { - RegexFilterKeys []string - LabelsFilter bool -}{ - []string{"name"}, - true, -} - -// ec2InstancesLister is a AWS EC2 instances lister. It implements a cache, -// that's populated at a regular interval by making the AWS API calls. -// Listing actually only returns the current contents of that cache. -type ec2InstancesLister struct { - c *configpb.EC2Instances - client ec2.DescribeInstancesAPIClient - l *logger.Logger - mu sync.RWMutex - names []string - cache map[string]*instanceData -} - -// listResources returns the list of resource records, where each record -// consists of an instance name and the IP address associated with it. IP address -// to return is selected based on the provided ipConfig. -func (il *ec2InstancesLister) listResources(req *pb.ListResourcesRequest) ([]*pb.Resource, error) { - var resources []*pb.Resource - - allFilters, err := filter.ParseFilters(req.GetFilter(), AWSInstancesFilters.RegexFilterKeys, "") - if err != nil { - return nil, err - } - - nameFilter, labelsFilter := allFilters.RegexFilters["name"], allFilters.LabelsFilter - - il.mu.RLock() - defer il.mu.RUnlock() - - for _, name := range il.names { - ins := il.cache[name].ii - if ins == nil { - il.l.Errorf("ec2_instances: cached info missing for %s", name) - continue - } - - if nameFilter != nil && !nameFilter.Match(name, il.l) { - continue - } - if labelsFilter != nil && !labelsFilter.Match(ins.Tags, il.l) { - continue - } - - resources = append(resources, &pb.Resource{ - Name: proto.String(name), - Ip: proto.String(ins.IPAddr), - Labels: ins.Tags, - LastUpdated: proto.Int64(il.cache[name].lastUpdated), - }) - } - - il.l.Infof("ec2_instances.listResources: returning %d instances", len(resources)) - return resources, nil -} - -// expand runs equivalent API calls as "aws describe-instances", -// and is used to populate the cache. It will list the EC2 instances -// in the target account with some basic networking information -func (il *ec2InstancesLister) expand(reEvalInterval time.Duration) { - il.l.Infof("ec2_instances.expand: expanding AWS EC2 targets") - - result, err := il.client.DescribeInstances(context.TODO(), nil) - if err != nil { - il.l.Errorf("ec2_instances.expand: error while listing instances: %v", err) - return - } - - var ids = make([]string, 0) - var cache = make(map[string]*instanceData) - - ts := time.Now().Unix() - for _, r := range result.Reservations { - for _, i := range r.Instances { - - if i.PrivateIpAddress == nil { - continue - } - - ii := &instanceInfo{ - ID: *i.InstanceId, - IPAddr: *i.PrivateIpAddress, - Tags: make(map[string]string), - } - - // Convert to map - for _, t := range i.Tags { - ii.Tags[*t.Key] = *t.Value - } - - cache[*i.InstanceId] = &instanceData{ii, ts} - ids = append(ids, *i.InstanceId) - } - } - - il.mu.Lock() - il.names = ids - il.cache = cache - il.mu.Unlock() - - il.l.Infof("ec2_instances.expand: got %d instances", len(ids)) -} - -func newEC2InstancesLister(c *configpb.EC2Instances, region string, l *logger.Logger) (*ec2InstancesLister, error) { - cfg, err := config.LoadDefaultConfig(context.TODO(), config.WithRegion(region)) - if err != nil { - return nil, fmt.Errorf("AWS configuration error: %v", err) - } - - client := ec2.NewFromConfig(cfg) - - il := &ec2InstancesLister{ - c: c, - client: client, - cache: make(map[string]*instanceData), - l: l, - } - - reEvalInterval := time.Duration(c.GetReEvalSec()) * time.Second - go func() { - il.expand(0) - // Introduce a random delay between 0-reEvalInterval before - // starting the refresh loop. If there are multiple cloudprober - // awsInstances, this will make sure that each instance calls AWS - // API at a different point of time. - randomDelaySec := rand.Intn(int(reEvalInterval.Seconds())) - time.Sleep(time.Duration(randomDelaySec) * time.Second) - for range time.Tick(reEvalInterval) { - il.expand(reEvalInterval) - } - }() - return il, nil -} diff --git a/internal/rds/aws/ec2_test.go b/internal/rds/aws/ec2_test.go deleted file mode 100644 index 68771c4178e..00000000000 --- a/internal/rds/aws/ec2_test.go +++ /dev/null @@ -1,270 +0,0 @@ -package aws - -import ( - "context" - "fmt" - "strconv" - "testing" - "time" - - pb "github.com/cloudprober/cloudprober/internal/rds/proto" - "github.com/cloudprober/cloudprober/logger" - "google.golang.org/protobuf/proto" - - "github.com/aws/aws-sdk-go-v2/service/ec2" - "github.com/aws/aws-sdk-go-v2/service/ec2/types" -) - -type mockEC2DescribeInstances func(context.Context, *ec2.DescribeInstancesInput, ...func(*ec2.Options)) (*ec2.DescribeInstancesOutput, error) - -func (m mockEC2DescribeInstances) DescribeInstances(ctx context.Context, params *ec2.DescribeInstancesInput, optFns ...func(*ec2.Options)) (*ec2.DescribeInstancesOutput, error) { - return m(ctx, params, optFns...) -} - -type testEC2Instance struct { - id string - ipAddr string - tags map[string]string -} - -func TestEC2Expand(t *testing.T) { - cases := []struct { - err error - instances []*testEC2Instance - expectCount int - }{ - { - instances: []*testEC2Instance{ - { - id: "test-id", - ipAddr: "10.0.0.2", - tags: map[string]string{"a": "b"}, - }, - { - id: "test-id-2", - ipAddr: "10.0.0.3", - tags: map[string]string{"a": "b"}, - }, - }, - err: nil, - expectCount: 2, - }, - { - instances: []*testEC2Instance{}, - err: nil, - expectCount: 0, - }, - { - instances: []*testEC2Instance{ - { - id: "test-id", - }, - { - id: "test-id-2", - }, - }, - err: nil, - expectCount: 0, - }, - { - instances: []*testEC2Instance{}, - err: fmt.Errorf("some error"), - expectCount: 0, - }, - } - - for i, tt := range cases { - t.Run(strconv.Itoa(i), func(t *testing.T) { - client := func(t *testing.T, instances []*testEC2Instance) ec2.DescribeInstancesAPIClient { - return mockEC2DescribeInstances(func(ctx context.Context, params *ec2.DescribeInstancesInput, optFns ...func(*ec2.Options)) (*ec2.DescribeInstancesOutput, error) { - t.Helper() - - r := types.Reservation{} - - for _, v := range instances { - i := types.Instance{ - InstanceId: &v.id, - } - - if v.ipAddr != "" { - i.PrivateIpAddress = &v.ipAddr - } - - for tk, tv := range v.tags { - tag := types.Tag{ - Key: &tk, - Value: &tv, - } - - i.Tags = append(i.Tags, tag) - - } - r.Instances = append(r.Instances, i) - } - - out := &ec2.DescribeInstancesOutput{ - Reservations: []types.Reservation{r}, - } - - return out, tt.err - }) - } - - il := &ec2InstancesLister{ - client: client(t, tt.instances), - cache: make(map[string]*instanceData), - } - - il.expand(time.Second) - - // Check for instance count - if len(il.cache) != tt.expectCount { - t.Errorf("Got %d instances, want %d", len(il.cache), tt.expectCount) - } - }) - } -} - -func TestEC2Lister(t *testing.T) { - cases := []struct { - instances []*testEC2Instance - filter []*pb.Filter - expectErr bool - expectedCount int - }{ - { - instances: []*testEC2Instance{ - { - id: "test-id", - ipAddr: "10.0.0.2", - tags: map[string]string{"a": "b"}, - }, - { - id: "test-id-2", - ipAddr: "10.0.0.3", - tags: map[string]string{"a": "b"}, - }, - }, - filter: []*pb.Filter{ - { - Key: proto.String("labels.a"), - Value: proto.String("b"), - }, - }, - expectedCount: 2, - }, - { - instances: []*testEC2Instance{ - { - id: "test-id", - ipAddr: "10.0.0.2", - tags: map[string]string{"a": "b"}, - }, - { - id: "test-id-2", - ipAddr: "10.0.0.3", - tags: map[string]string{"a": "b"}, - }, - }, - filter: []*pb.Filter{ - { - Key: proto.String("ins."), - Value: proto.String("b"), - }, - }, - expectErr: true, - }, - { - instances: []*testEC2Instance{}, - expectedCount: 0, - }, - { - instances: []*testEC2Instance{ - { - id: "test-id", - ipAddr: "10.0.0.2", - tags: map[string]string{"test1": "a"}, - }, - { - id: "test-id-2", - ipAddr: "10.0.0.3", - tags: map[string]string{"test2": "b"}, - }, - }, - filter: []*pb.Filter{ - { - Key: proto.String("labels.a"), - Value: proto.String("b"), - }, - }, - expectedCount: 0, - }, - { - instances: []*testEC2Instance{ - { - id: "test-id", - ipAddr: "10.0.0.2", - tags: map[string]string{"a": "b"}, - }, - { - id: "test-id-2", - ipAddr: "10.0.0.3", - tags: map[string]string{"a": "b"}, - }, - }, - filter: []*pb.Filter{ - { - Key: proto.String("name"), - Value: proto.String("nonexistent"), - }, - }, - expectedCount: 0, - }, - } - - for i, tt := range cases { - t.Run(strconv.Itoa(i), func(t *testing.T) { - - names := []string{} - cache := make(map[string]*instanceData) - for _, ti := range tt.instances { - ii := &instanceInfo{ - ID: ti.id, - IPAddr: ti.ipAddr, - Tags: ti.tags, - } - cache[ti.id] = &instanceData{ - ii: ii, - } - - names = append(names, ti.id) - } - - lister := &ec2InstancesLister{ - cache: cache, - names: names, - l: &logger.Logger{}, - } - - var filters []*pb.Filter - if tt.filter != nil { - filters = append(filters, tt.filter...) - } - - resources, err := lister.listResources(&pb.ListResourcesRequest{ - Filter: filters, - }) - - if err != nil { - if !tt.expectErr { - t.Errorf("Got error while listing resources: %v, expected no errors", err) - } - return - } - - if len(resources) != tt.expectedCount { - t.Errorf("Got wrong number of targets. Expected: %d, Got: %d", tt.expectedCount, len(resources)) - } - }) - } -} diff --git a/internal/rds/aws/ec_clusters.go b/internal/rds/aws/ec_clusters.go deleted file mode 100644 index c06bac126aa..00000000000 --- a/internal/rds/aws/ec_clusters.go +++ /dev/null @@ -1,216 +0,0 @@ -package aws - -import ( - "context" - "fmt" - "math/rand" - "sync" - "time" - - configpb "github.com/cloudprober/cloudprober/internal/rds/aws/proto" - pb "github.com/cloudprober/cloudprober/internal/rds/proto" - "github.com/cloudprober/cloudprober/internal/rds/server/filter" - "github.com/cloudprober/cloudprober/logger" - - "github.com/aws/aws-sdk-go-v2/config" - "github.com/aws/aws-sdk-go-v2/service/elasticache" - "google.golang.org/protobuf/proto" -) - -// ecClusterInfo represents cache cluster items that we fetch from the elasticache API. -type ecClusterInfo struct { - ID string - IP string - Port int32 - TLSEnabled bool - Engine string - Tags map[string]string -} - -// ecClusterLocalCacheData represents objects that we store in local cache. -type ecClusterLocalCacheData struct { - ci *ecClusterInfo - lastUpdated int64 -} - -/* -ElastiCacheClustersFilters defines filters supported by the ec_cluster_instances resource -type. - - Example: - filter { - key: "name" - value: "service.*" - } - filter { - key: "engine" - value: "redis" - } - filter { - key: "labels.app" - value: "service-a" - } -*/ - -var ElastiCacheClustersFilters = struct { - RegexFilterKeys []string - LabelsFilter bool -}{ - []string{"name", "engine"}, - true, -} - -// elastiCacheClusterLister is a AWS ElastiCache cluster lister. It implements a cache, -// that's populated at a regular interval by making the AWS API calls. -// Listing actually only returns the current contents of that cache. -type elastiCacheClusterLister struct { - c *configpb.ElastiCacheClusters - client elasticache.DescribeCacheClustersAPIClient - tagclient *elasticache.Client - l *logger.Logger - mu sync.RWMutex - names []string - cacheList map[string]*ecClusterLocalCacheData - // This is mainly for unit testing, should be taken out if/when there is a respective - // interface in AWS SDK for go v2 to replace this. - discoverTags bool -} - -// listResources returns the list of resource records, where each record -// consists of an cluster name and the endpoint associated with it. -func (cl *elastiCacheClusterLister) listResources(req *pb.ListResourcesRequest) ([]*pb.Resource, error) { - var resources []*pb.Resource - - allFilters, err := filter.ParseFilters(req.GetFilter(), ElastiCacheClustersFilters.RegexFilterKeys, "") - if err != nil { - return nil, err - } - - nameFilter, engineFilter, labelsFilter := allFilters.RegexFilters["name"], allFilters.RegexFilters["engine"], allFilters.LabelsFilter - - cl.mu.RLock() - defer cl.mu.RUnlock() - - for _, name := range cl.names { - ins := cl.cacheList[name].ci - if ins == nil { - cl.l.Errorf("elacticaches: cached info missing for %s", name) - continue - } - - if nameFilter != nil && !nameFilter.Match(name, cl.l) { - continue - } - if labelsFilter != nil && !labelsFilter.Match(ins.Tags, cl.l) { - continue - } - - if engineFilter != nil && !engineFilter.Match(ins.Engine, cl.l) { - continue - } - - resources = append(resources, &pb.Resource{ - Id: proto.String(ins.ID), - Name: proto.String(name), - Ip: proto.String(ins.IP), - Port: proto.Int32(ins.Port), - Labels: ins.Tags, - LastUpdated: proto.Int64(cl.cacheList[name].lastUpdated), - Info: []byte("clustered"), - }) - } - - cl.l.Infof("ec_clusters.listResources: returning %d instances", len(resources)) - return resources, nil -} - -// expand runs equivalent API calls as "aws elasticache describe-cache-clusters", -// and is used to populate the cache. More details about this call is available in -// https://docs.aws.amazon.com/cli/latest/reference/elasticache/describe-cache-clusters.html -func (cl *elastiCacheClusterLister) expand(reEvalInterval time.Duration) { - cl.l.Infof("ec_clusters.expand: expanding AWS targets") - - resp, err := cl.client.DescribeCacheClusters(context.TODO(), nil) - if err != nil { - cl.l.Errorf("ec_clusters.expand: error while listing cache clusters: %v", err) - return - } - - var ids = make([]string, 0) - var cacheList = make(map[string]*ecClusterLocalCacheData) - ts := time.Now().Unix() - for _, c := range resp.CacheClusters { - if len(c.CacheNodes) == 0 { - continue - } - ci := &ecClusterInfo{ - ID: *c.CacheClusterId, - TLSEnabled: *c.TransitEncryptionEnabled, - IP: *c.CacheNodes[0].Endpoint.Address, - Port: *c.CacheNodes[0].Endpoint.Port, - Engine: *c.Engine, - Tags: make(map[string]string), - } - - if cl.discoverTags { - // AWS doesn't return Tag information in the response, we'll need to request it separately - // NB: This might get throttled by AWS, if we make too many requests, see if we can batch or slow down - // Add sleep if needed to the end of the loop - tagsResp, err := cl.tagclient.ListTagsForResource(context.TODO(), &elasticache.ListTagsForResourceInput{ - ResourceName: c.ARN, - }) - if err != nil { - cl.l.Errorf("ec_clusters.expand: error getting tags for cluster %s: %v", *c.CacheClusterId, err) - continue - } - - // Convert to map - for _, t := range tagsResp.TagList { - ci.Tags[*t.Key] = *t.Value - } - } - - cacheList[*c.CacheClusterId] = &ecClusterLocalCacheData{ci, ts} - ids = append(ids, *c.CacheClusterId) - } - - cl.mu.Lock() - cl.names = ids - cl.cacheList = cacheList - cl.mu.Unlock() - - cl.l.Infof("ec_clusters.expand: got %d caches", len(ids)) -} - -func newElastiCacheClusterLister(c *configpb.ElastiCacheClusters, region string, l *logger.Logger) (*elastiCacheClusterLister, error) { - cfg, err := config.LoadDefaultConfig(context.TODO(), config.WithRegion(region)) - if err != nil { - return nil, fmt.Errorf("AWS configuration error: %v", err) - } - - client := elasticache.NewFromConfig(cfg) - - cl := &elastiCacheClusterLister{ - c: c, - client: client, - tagclient: client, - cacheList: make(map[string]*ecClusterLocalCacheData), - l: l, - discoverTags: true, - } - - reEvalInterval := time.Duration(c.GetReEvalSec()) * time.Second - go func() { - cl.expand(0) - // Introduce a random delay between 0-reEvalInterval before - // starting the refresh loop. If there are multiple cloudprober - // awsInstances, this will make sure that each instance calls AWS - // API at a different point of time. - randomDelaySec := rand.Intn(int(reEvalInterval.Seconds())) - time.Sleep(time.Duration(randomDelaySec) * time.Second) - for range time.Tick(reEvalInterval) { - cl.expand(reEvalInterval) - } - }() - return cl, nil -} diff --git a/internal/rds/aws/ec_clusters_test.go b/internal/rds/aws/ec_clusters_test.go deleted file mode 100644 index e31411855b8..00000000000 --- a/internal/rds/aws/ec_clusters_test.go +++ /dev/null @@ -1,298 +0,0 @@ -package aws - -import ( - "context" - "strconv" - "testing" - "time" - - "github.com/aws/aws-sdk-go-v2/service/elasticache" - "github.com/aws/aws-sdk-go-v2/service/elasticache/types" - pb "github.com/cloudprober/cloudprober/internal/rds/proto" - "github.com/cloudprober/cloudprober/logger" - "google.golang.org/protobuf/proto" -) - -type mockECClusterCache struct { - output elasticache.DescribeCacheClustersOutput - err error -} - -func (m mockECClusterCache) DescribeCacheClusters(ctx context.Context, params *elasticache.DescribeCacheClustersInput, optFns ...func(*elasticache.Options)) (*elasticache.DescribeCacheClustersOutput, error) { - return &m.output, m.err -} - -type testECCluster struct { - instances []testECClusterInstance - id string -} - -type testECClusterInstance struct { - id string - ipAddr string - port int32 - tags map[string]string - engine string -} - -func TestECClusterExpand(t *testing.T) { - cases := []struct { - err error - cluster *testECCluster - expectCount int - }{ - - { - cluster: &testECCluster{ - id: "test-cluster-id", - - instances: []testECClusterInstance{ - { - id: "test-id", - }, - { - id: "test-id-2", - }, - }, - }, - err: nil, - expectCount: 1, - }, - } - - for i, tt := range cases { - t.Run(strconv.Itoa(i), func(t *testing.T) { - mock := mockECClusterCache{ - err: tt.err, - } - - tlsenabled := false - engine := "redis" - - if tt.cluster != nil { - mock.output = elasticache.DescribeCacheClustersOutput{ - CacheClusters: []types.CacheCluster{ - { - CacheClusterId: &tt.cluster.id, - TransitEncryptionEnabled: &tlsenabled, - Engine: &engine, - }, - }, - } - for _, v := range tt.cluster.instances { - c := types.CacheNode{ - Endpoint: &types.Endpoint{ - Address: &v.ipAddr, - Port: &v.port, - }, - } - mock.output.CacheClusters[0].CacheNodes = append(mock.output.CacheClusters[0].CacheNodes, c) - } - } - - il := &elastiCacheClusterLister{ - client: mock, - tagclient: &elasticache.Client{}, - cacheList: make(map[string]*ecClusterLocalCacheData), - discoverTags: false, // tag discovery to be tested once the client can be mocked - } - il.expand(time.Second) - - // Check for instance count - if len(il.cacheList) != tt.expectCount { - t.Errorf("Got %d instances, want %d", len(il.cacheList), tt.expectCount) - } - }) - } -} - -func TestECClusterLister(t *testing.T) { - cases := []struct { - instances []*testECClusterInstance - filter []*pb.Filter - expectErr bool - expectedCount int - }{ - { - instances: []*testECClusterInstance{ - { - id: "test-id", - ipAddr: "10.0.0.2", - tags: map[string]string{"a": "b"}, - }, - { - id: "test-id-2", - ipAddr: "10.0.0.3", - tags: map[string]string{"a": "b"}, - }, - }, - filter: []*pb.Filter{ - { - Key: proto.String("labels.a"), - Value: proto.String("b"), - }, - }, - expectedCount: 2, - }, - { - instances: []*testECClusterInstance{ - { - id: "test-id", - ipAddr: "10.0.0.2", - tags: map[string]string{"a": "b"}, - }, - { - id: "test-id-2", - ipAddr: "10.0.0.3", - tags: map[string]string{"a": "b"}, - }, - }, - filter: []*pb.Filter{ - { - Key: proto.String("ins."), - Value: proto.String("b"), - }, - }, - expectErr: true, - }, - { - instances: []*testECClusterInstance{}, - expectedCount: 0, - }, - { - instances: []*testECClusterInstance{ - { - id: "test-id", - ipAddr: "10.0.0.2", - tags: map[string]string{"test1": "a"}, - }, - { - id: "test-id-2", - ipAddr: "10.0.0.3", - tags: map[string]string{"test2": "b"}, - }, - }, - filter: []*pb.Filter{ - { - Key: proto.String("labels.a"), - Value: proto.String("b"), - }, - }, - expectedCount: 0, - }, - { - instances: []*testECClusterInstance{ - { - id: "test-id", - ipAddr: "10.0.0.2", - tags: map[string]string{"a": "b"}, - }, - { - id: "test-id-2", - ipAddr: "10.0.0.3", - tags: map[string]string{"a": "b"}, - }, - }, - filter: []*pb.Filter{ - { - Key: proto.String("name"), - Value: proto.String("nonexistent"), - }, - }, - expectedCount: 0, - }, - { - instances: []*testECClusterInstance{ - { - id: "test-id", - ipAddr: "10.0.0.2", - tags: map[string]string{"a": "b"}, - engine: "memcached", - }, - { - id: "test-id-2", - ipAddr: "10.0.0.3", - tags: map[string]string{"a": "b"}, - engine: "memcached", - }, - }, - filter: []*pb.Filter{ - { - Key: proto.String("engine"), - Value: proto.String("redis"), - }, - }, - expectedCount: 0, - }, - { - instances: []*testECClusterInstance{ - { - id: "test-id", - ipAddr: "10.0.0.2", - tags: map[string]string{"a": "b"}, - engine: "redis", - }, - { - id: "test-id-2", - ipAddr: "10.0.0.3", - tags: map[string]string{"a": "b"}, - engine: "redis", - }, - }, - filter: []*pb.Filter{ - { - Key: proto.String("engine"), - Value: proto.String("redis"), - }, - }, - expectedCount: 2, - }, - } - - for i, tt := range cases { - t.Run(strconv.Itoa(i), func(t *testing.T) { - - names := []string{} - cache := make(map[string]*ecClusterLocalCacheData) - for _, ti := range tt.instances { - ci := &ecClusterInfo{ - ID: ti.id, - IP: ti.ipAddr, - Tags: ti.tags, - Engine: ti.engine, - } - cache[ti.id] = &ecClusterLocalCacheData{ - ci: ci, - } - - names = append(names, ti.id) - } - - lister := &elastiCacheClusterLister{ - cacheList: cache, - names: names, - l: &logger.Logger{}, - } - - var filters []*pb.Filter - if tt.filter != nil { - filters = append(filters, tt.filter...) - } - - resources, err := lister.listResources(&pb.ListResourcesRequest{ - Filter: filters, - }) - - if err != nil { - if !tt.expectErr { - t.Errorf("Got error while listing resources: %v, expected no errors", err) - } - return - } - - if len(resources) != tt.expectedCount { - t.Errorf("Got wrong number of targets. Expected: %d, Got: %d", tt.expectedCount, len(resources)) - } - }) - } -} diff --git a/internal/rds/aws/ec_replicationgroups.go b/internal/rds/aws/ec_replicationgroups.go deleted file mode 100644 index f479bfa0f45..00000000000 --- a/internal/rds/aws/ec_replicationgroups.go +++ /dev/null @@ -1,231 +0,0 @@ -package aws - -import ( - "context" - "fmt" - "math/rand" - "sync" - "time" - - configpb "github.com/cloudprober/cloudprober/internal/rds/aws/proto" - pb "github.com/cloudprober/cloudprober/internal/rds/proto" - "github.com/cloudprober/cloudprober/internal/rds/server/filter" - "github.com/cloudprober/cloudprober/logger" - - "github.com/aws/aws-sdk-go-v2/config" - "github.com/aws/aws-sdk-go-v2/service/elasticache" - "google.golang.org/protobuf/proto" -) - -// elastiCacheClusterLister represents replication group items that we fetch from the elasticache API. -type ecReplicationGroupInfo struct { - ID string - IP string - Port int32 - TLSEnabled bool - Clustered bool - Engine string - Tags map[string]string -} - -// ecReplicationGroupCacheData represents objects that we store in the local cache. -type ecReplicationGroupCacheData struct { - ci *ecReplicationGroupInfo - lastUpdated int64 -} - -/* -ElastiCacheRGFilters defines filters supported by the ec_replicationgroups_instances resource -type. - - Example: - filter { - key: "name" - value: "service.*" - } - filter { - key: "engine" - value: "redis" - } - filter { - key: "labels.app" - value: "service-a" - } -*/ - -var ElastiCacheRGFilters = struct { - RegexFilterKeys []string - LabelsFilter bool -}{ - []string{"name", "engine"}, - true, -} - -// elastiCacheRGLister is a AWS ElastiCache replication group lister. It implements a cache, -// that's populated at a regular interval by making the AWS API calls. -// Listing actually only returns the current contents of that cache. -type elastiCacheRGLister struct { - c *configpb.ElastiCacheReplicationGroups - client elasticache.DescribeReplicationGroupsAPIClient - tagclient *elasticache.Client - l *logger.Logger - mu sync.RWMutex - names []string - cacheList map[string]*ecReplicationGroupCacheData - // This is for unit testing, should be taken out if/when there is a respective - // interface in AWS SDK for go v2 to replace this logs - discoverTags bool -} - -// listResources returns the list of resource records, where each record -// consists of an cluster name and the endpoint associated with it. -func (cl *elastiCacheRGLister) listResources(req *pb.ListResourcesRequest) ([]*pb.Resource, error) { - var resources []*pb.Resource - - allFilters, err := filter.ParseFilters(req.GetFilter(), ElastiCacheRGFilters.RegexFilterKeys, "") - if err != nil { - return nil, err - } - - nameFilter, engineFilter, labelsFilter := allFilters.RegexFilters["name"], allFilters.RegexFilters["engine"], allFilters.LabelsFilter - - cl.mu.RLock() - defer cl.mu.RUnlock() - - for _, name := range cl.names { - ins := cl.cacheList[name].ci - if ins == nil { - cl.l.Errorf("ec_replicationgroups: cached info missing for %s", name) - continue - } - - if nameFilter != nil && !nameFilter.Match(name, cl.l) { - continue - } - if labelsFilter != nil && !labelsFilter.Match(ins.Tags, cl.l) { - continue - } - - if engineFilter != nil && !engineFilter.Match(ins.Engine, cl.l) { - continue - } - - var info string - if ins.Clustered { - info = "clustered" - } - resources = append(resources, &pb.Resource{ - Id: proto.String(ins.ID), - Name: proto.String(name), - Ip: proto.String(ins.IP), - Port: proto.Int32(ins.Port), - Labels: ins.Tags, - LastUpdated: proto.Int64(cl.cacheList[name].lastUpdated), - Info: []byte(info), - }) - } - - cl.l.Infof("ec_replicationgroups.listResources: returning %d instances", len(resources)) - return resources, nil -} - -// expand runs equivalent API calls as "aws elasticache describe-replication-groups", -// and is used to populate the cache. More details about this API call can be found in -// https://docs.aws.amazon.com/AmazonElastiCache/latest/APIReference/API_DescribeReplicationGroups.html -func (cl *elastiCacheRGLister) expand(reEvalInterval time.Duration) { - cl.l.Infof("ec_replicationgroups.expand: expanding AWS targets") - - resp, err := cl.client.DescribeReplicationGroups(context.TODO(), nil) - if err != nil { - cl.l.Errorf("ec_replicationgroups.expand: error while listing replication groups: %v", err) - return - } - - var ids = make([]string, 0) - var cacheList = make(map[string]*ecReplicationGroupCacheData) - ts := time.Now().Unix() - - for _, r := range resp.ReplicationGroups { - var ci *ecReplicationGroupInfo - if r.ConfigurationEndpoint != nil { //clustered - ci = &ecReplicationGroupInfo{ - ID: *r.ReplicationGroupId, - IP: *r.ConfigurationEndpoint.Address, - Port: *r.ConfigurationEndpoint.Port, - TLSEnabled: *r.TransitEncryptionEnabled, - Clustered: true, - Tags: make(map[string]string), - } - } else if len(r.NodeGroups) > 0 && r.NodeGroups[0].PrimaryEndpoint != nil { - ci = &ecReplicationGroupInfo{ - ID: *r.ReplicationGroupId, - IP: *r.NodeGroups[0].PrimaryEndpoint.Address, - Port: *r.NodeGroups[0].PrimaryEndpoint.Port, - TLSEnabled: *r.TransitEncryptionEnabled, - Clustered: false, - Tags: make(map[string]string), - } - } else { - continue - } - - if cl.discoverTags { - // Same comments as the same calls above - tagsResp, err := cl.tagclient.ListTagsForResource(context.TODO(), &elasticache.ListTagsForResourceInput{ - ResourceName: r.ARN, - }) - if err != nil { - cl.l.Errorf("ec_replicationgroups.expand: error getting tags for replication group %s: %v", *r.ReplicationGroupId, err) - continue - } - - // Convert to map - for _, t := range tagsResp.TagList { - ci.Tags[*t.Key] = *t.Value - } - } - - cacheList[*r.ReplicationGroupId] = &ecReplicationGroupCacheData{ci, ts} - ids = append(ids, *r.ReplicationGroupId) - } - - cl.mu.Lock() - cl.names = ids - cl.cacheList = cacheList - cl.mu.Unlock() - - cl.l.Infof("ec_replicationgroups.expand: got %d caches", len(ids)) -} - -func newElastiCacheRGLister(c *configpb.ElastiCacheReplicationGroups, region string, l *logger.Logger) (*elastiCacheRGLister, error) { - cfg, err := config.LoadDefaultConfig(context.TODO(), config.WithRegion(region)) - if err != nil { - return nil, fmt.Errorf("AWS configuration error: %v", err) - } - - client := elasticache.NewFromConfig(cfg) - - cl := &elastiCacheRGLister{ - c: c, - client: client, - tagclient: client, - cacheList: make(map[string]*ecReplicationGroupCacheData), - l: l, - discoverTags: true, - } - - reEvalInterval := time.Duration(c.GetReEvalSec()) * time.Second - go func() { - cl.expand(0) - // Introduce a random delay between 0-reEvalInterval before - // starting the refresh loop. If there are multiple cloudprober - // awsInstances, this will make sure that each instance calls AWS - // API at a different point of time. - randomDelaySec := rand.Intn(int(reEvalInterval.Seconds())) - time.Sleep(time.Duration(randomDelaySec) * time.Second) - for range time.Tick(reEvalInterval) { - cl.expand(reEvalInterval) - } - }() - return cl, nil -} diff --git a/internal/rds/aws/ec_replicationgroups_test.go b/internal/rds/aws/ec_replicationgroups_test.go deleted file mode 100644 index 95ec4951b2d..00000000000 --- a/internal/rds/aws/ec_replicationgroups_test.go +++ /dev/null @@ -1,306 +0,0 @@ -package aws - -import ( - "context" - "fmt" - "strconv" - "testing" - "time" - - "github.com/aws/aws-sdk-go-v2/service/elasticache" - "github.com/aws/aws-sdk-go-v2/service/elasticache/types" - pb "github.com/cloudprober/cloudprober/internal/rds/proto" - "github.com/cloudprober/cloudprober/logger" - "google.golang.org/protobuf/proto" -) - -type mockECRGCache struct { - output elasticache.DescribeReplicationGroupsOutput - err error -} - -func (m mockECRGCache) DescribeReplicationGroups(ctx context.Context, params *elasticache.DescribeReplicationGroupsInput, optFns ...func(*elasticache.Options)) (*elasticache.DescribeReplicationGroupsOutput, error) { - return &m.output, m.err -} - -type testECReplicationGroup struct { - instances []testECRGInstance - id string -} - -type testECRGInstance struct { - id string - ipAddr string - port int32 - tags map[string]string - engine string -} - -func TestECRGExpand(t *testing.T) { - cases := []struct { - err error - group *testECReplicationGroup - expectCount int - }{ - { - group: &testECReplicationGroup{ - id: "test-cluster-id", - instances: []testECRGInstance{ - { - id: "test-id", - ipAddr: "10.0.0.2", - port: 1000, - tags: map[string]string{"a": "b"}, - }, - { - id: "test-id-2", - ipAddr: "10.0.0.3", - port: 1000, - tags: map[string]string{"a": "b"}, - }, - }, - }, - err: nil, - expectCount: 1, - }, - { - group: &testECReplicationGroup{}, - err: nil, - expectCount: 0, - }, - { - group: &testECReplicationGroup{}, - err: fmt.Errorf("some error"), - expectCount: 0, - }, - } - - for i, tt := range cases { - t.Run(strconv.Itoa(i), func(t *testing.T) { - mock := mockECRGCache{ - err: tt.err, - } - - tlsenabled := false - - if tt.group != nil { - mock.output = elasticache.DescribeReplicationGroupsOutput{} - for _, v := range tt.group.instances { - g := types.ReplicationGroup{ - ReplicationGroupId: &v.id, - TransitEncryptionEnabled: &tlsenabled, - ConfigurationEndpoint: &types.Endpoint{ - Address: &v.ipAddr, - Port: &v.port, - }, - } - mock.output.ReplicationGroups = append(mock.output.ReplicationGroups, g) - } - } - - il := &elastiCacheRGLister{ - client: mock, - tagclient: &elasticache.Client{}, - cacheList: make(map[string]*ecReplicationGroupCacheData), - discoverTags: false, // tag discovery to be tested once the client can be mocked - } - il.expand(time.Second) - - // Check for instance count - if len(il.cacheList) != tt.expectCount { - t.Errorf("Got %d instances, want %d", len(il.cacheList), tt.expectCount) - } - }) - } -} - -func TestECLister(t *testing.T) { - cases := []struct { - instances []*testECRGInstance - filter []*pb.Filter - expectErr bool - expectedCount int - }{ - { - instances: []*testECRGInstance{ - { - id: "test-id", - ipAddr: "10.0.0.2", - tags: map[string]string{"a": "b"}, - }, - { - id: "test-id-2", - ipAddr: "10.0.0.3", - tags: map[string]string{"a": "b"}, - }, - }, - filter: []*pb.Filter{ - { - Key: proto.String("labels.a"), - Value: proto.String("b"), - }, - }, - expectedCount: 2, - }, - { - instances: []*testECRGInstance{ - { - id: "test-id", - ipAddr: "10.0.0.2", - tags: map[string]string{"a": "b"}, - }, - { - id: "test-id-2", - ipAddr: "10.0.0.3", - tags: map[string]string{"a": "b"}, - }, - }, - filter: []*pb.Filter{ - { - Key: proto.String("ins."), - Value: proto.String("b"), - }, - }, - expectErr: true, - }, - { - instances: []*testECRGInstance{}, - expectedCount: 0, - }, - { - instances: []*testECRGInstance{ - { - id: "test-id", - ipAddr: "10.0.0.2", - tags: map[string]string{"test1": "a"}, - }, - { - id: "test-id-2", - ipAddr: "10.0.0.3", - tags: map[string]string{"test2": "b"}, - }, - }, - filter: []*pb.Filter{ - { - Key: proto.String("labels.a"), - Value: proto.String("b"), - }, - }, - expectedCount: 0, - }, - { - instances: []*testECRGInstance{ - { - id: "test-id", - ipAddr: "10.0.0.2", - tags: map[string]string{"a": "b"}, - }, - { - id: "test-id-2", - ipAddr: "10.0.0.3", - tags: map[string]string{"a": "b"}, - }, - }, - filter: []*pb.Filter{ - { - Key: proto.String("name"), - Value: proto.String("nonexistent"), - }, - }, - expectedCount: 0, - }, - { - instances: []*testECRGInstance{ - { - id: "test-id", - ipAddr: "10.0.0.2", - tags: map[string]string{"a": "b"}, - engine: "memcached", - }, - { - id: "test-id-2", - ipAddr: "10.0.0.3", - tags: map[string]string{"a": "b"}, - engine: "memcached", - }, - }, - filter: []*pb.Filter{ - { - Key: proto.String("engine"), - Value: proto.String("redis"), - }, - }, - expectedCount: 0, - }, - { - instances: []*testECRGInstance{ - { - id: "test-id", - ipAddr: "10.0.0.2", - tags: map[string]string{"a": "b"}, - engine: "redis", - }, - { - id: "test-id-2", - ipAddr: "10.0.0.3", - tags: map[string]string{"a": "b"}, - engine: "redis", - }, - }, - filter: []*pb.Filter{ - { - Key: proto.String("engine"), - Value: proto.String("redis"), - }, - }, - expectedCount: 2, - }, - } - - for i, tt := range cases { - t.Run(strconv.Itoa(i), func(t *testing.T) { - - names := []string{} - cache := make(map[string]*ecReplicationGroupCacheData) - for _, ti := range tt.instances { - ci := &ecReplicationGroupInfo{ - ID: ti.id, - IP: ti.ipAddr, - Tags: ti.tags, - Engine: ti.engine, - } - cache[ti.id] = &ecReplicationGroupCacheData{ - ci: ci, - } - - names = append(names, ti.id) - } - - lister := &elastiCacheRGLister{ - cacheList: cache, - names: names, - l: &logger.Logger{}, - } - - var filters []*pb.Filter - if tt.filter != nil { - filters = append(filters, tt.filter...) - } - - resources, err := lister.listResources(&pb.ListResourcesRequest{ - Filter: filters, - }) - - if err != nil { - if !tt.expectErr { - t.Errorf("Got error while listing resources: %v, expected no errors", err) - } - return - } - - if len(resources) != tt.expectedCount { - t.Errorf("Got wrong number of targets. Expected: %d, Got: %d", tt.expectedCount, len(resources)) - } - }) - } -} diff --git a/internal/rds/aws/rds_clusters.go b/internal/rds/aws/rds_clusters.go deleted file mode 100644 index c1e9f53a799..00000000000 --- a/internal/rds/aws/rds_clusters.go +++ /dev/null @@ -1,172 +0,0 @@ -package aws - -import ( - "context" - "fmt" - "math/rand" - "sync" - "time" - - configpb "github.com/cloudprober/cloudprober/internal/rds/aws/proto" - pb "github.com/cloudprober/cloudprober/internal/rds/proto" - "github.com/cloudprober/cloudprober/internal/rds/server/filter" - "github.com/cloudprober/cloudprober/logger" - - "github.com/aws/aws-sdk-go-v2/config" - "github.com/aws/aws-sdk-go-v2/service/rds" - "google.golang.org/protobuf/proto" -) - -// rdsClusterInfo represents cluster items that we fetch from the RDS API. -type rdsClusterInfo struct { - Name string - Ip string - Port int32 - IsCluster bool - Tags map[string]string -} - -// rdsClusterData represents objects that we store in cache. -type rdsClusterData struct { - ri *rdsClusterInfo - lastUpdated int64 -} - -var RDSClustersFilters = struct { - RegexFilterKeys []string - LabelsFilter bool -}{ - []string{"name", "engine"}, - true, -} - -// rdsClustersLister is a AWS Relational Database Service lister. It implements a cache, -// that's populated at a regular interval by making the AWS API calls. -// Listing actually only returns the current contents of that cache. -type rdsClustersLister struct { - c *configpb.RDSClusters - client rds.DescribeDBClustersAPIClient - l *logger.Logger - mu sync.RWMutex - names []string - dbClustersList map[string]*rdsClusterData -} - -// listResources returns the list of resource records, where each record -// consists of an cluster name and the endpoint associated with it. -func (rl *rdsClustersLister) listResources(req *pb.ListResourcesRequest) ([]*pb.Resource, error) { - var resources []*pb.Resource - - allFilters, err := filter.ParseFilters(req.GetFilter(), RDSClustersFilters.RegexFilterKeys, "") - if err != nil { - return nil, err - } - - nameFilter, labelsFilter := allFilters.RegexFilters["name"], allFilters.LabelsFilter - - rl.mu.RLock() - defer rl.mu.RUnlock() - - for _, name := range rl.names { - ins := rl.dbClustersList[name].ri - if ins == nil { - rl.l.Errorf("rds_clusters.listResources: db info missing for %s", name) - continue - } - - if nameFilter != nil && !nameFilter.Match(name, rl.l) { - continue - } - if labelsFilter != nil && !labelsFilter.Match(ins.Tags, rl.l) { - continue - } - - resources = append(resources, &pb.Resource{ - Name: proto.String(name), - Ip: proto.String(ins.Ip), - Port: proto.Int32(ins.Port), - Labels: ins.Tags, - LastUpdated: proto.Int64(rl.dbClustersList[name].lastUpdated), - }) - } - - rl.l.Infof("rds_clusters.listResources: returning %d instances", len(resources)) - return resources, nil -} - -// expand runs equivalent API calls as "aws rds describe-db-clusters", -// and is used to populate the cache. It is used to obtain RDS cluster information -// More details are available in -// https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_DescribeDBClusters.html -func (rl *rdsClustersLister) expand(reEvalInterval time.Duration) { - rl.l.Infof("rds_clusters.expand: expanding AWS targets") - - resCluster, err := rl.client.DescribeDBClusters(context.TODO(), nil) - if err != nil { - rl.l.Errorf("rds_clusters.expand: error while listing database clusters: %v", err) - return - } - - var ids = make([]string, 0) - var dbList = make(map[string]*rdsClusterData) - - ts := time.Now().Unix() - for _, d := range resCluster.DBClusters { - if d.DBClusterIdentifier == nil || d.DatabaseName == nil || d.Endpoint == nil || d.Port == nil { - continue - } - - ci := &rdsClusterInfo{ - Name: *d.DBClusterIdentifier, - Ip: *d.Endpoint, - Port: *d.Port, - Tags: make(map[string]string), - } - - // Convert to map - for _, t := range d.TagList { - ci.Tags[*t.Key] = *t.Value - } - - dbList[*d.DBClusterIdentifier] = &rdsClusterData{ci, ts} - ids = append(ids, *d.DBClusterIdentifier) - } - - rl.mu.Lock() - rl.names = ids - rl.dbClustersList = dbList - rl.mu.Unlock() - - rl.l.Infof("rds_clusters.expand: got %d databases", len(ids)) -} - -func newRdsClustersLister(c *configpb.RDSClusters, region string, l *logger.Logger) (*rdsClustersLister, error) { - cfg, err := config.LoadDefaultConfig(context.TODO(), config.WithRegion(region)) - if err != nil { - return nil, fmt.Errorf("AWS configuration error: %v", err) - } - - client := rds.NewFromConfig(cfg) - - cl := &rdsClustersLister{ - c: c, - client: client, - dbClustersList: make(map[string]*rdsClusterData), - l: l, - } - - reEvalInterval := time.Duration(c.GetReEvalSec()) * time.Second - go func() { - cl.expand(0) - // Introduce a random delay between 0-reEvalInterval before - // starting the refresh loop. If there are multiple cloudprober - // awsInstances, this will make sure that each instance calls AWS - // API at a different point of time. - randomDelaySec := rand.Intn(int(reEvalInterval.Seconds())) - time.Sleep(time.Duration(randomDelaySec) * time.Second) - for range time.Tick(reEvalInterval) { - cl.expand(reEvalInterval) - } - }() - return cl, nil -} diff --git a/internal/rds/aws/rds_clusters_test.go b/internal/rds/aws/rds_clusters_test.go deleted file mode 100644 index 128a18b7a0f..00000000000 --- a/internal/rds/aws/rds_clusters_test.go +++ /dev/null @@ -1,276 +0,0 @@ -package aws - -import ( - "context" - "fmt" - "strconv" - "testing" - "time" - - pb "github.com/cloudprober/cloudprober/internal/rds/proto" - "github.com/cloudprober/cloudprober/logger" - "google.golang.org/protobuf/proto" - - "github.com/aws/aws-sdk-go-v2/service/rds" - "github.com/aws/aws-sdk-go-v2/service/rds/types" -) - -type mockRDSDescribeDBClustersAPIClient func(context.Context, *rds.DescribeDBClustersInput, ...func(*rds.Options)) (*rds.DescribeDBClustersOutput, error) - -func (m mockRDSDescribeDBClustersAPIClient) DescribeDBClusters(ctx context.Context, params *rds.DescribeDBClustersInput, optFns ...func(*rds.Options)) (*rds.DescribeDBClustersOutput, error) { - return m(ctx, params, optFns...) -} - -type testRDSClusters struct { - id string - name string - ipAddr string - port int32 - tags map[string]string -} - -func TestRDSClustersExpand(t *testing.T) { - cases := []struct { - err error - instances []*testRDSClusters - expectCount int - }{ - { - instances: []*testRDSClusters{ - { - id: "rds-test-id", - name: "rds-test-cluster", - ipAddr: "10.0.0.2", - port: 5431, - tags: map[string]string{"a": "b"}, - }, - { - id: "rds-test-id-2", - name: "rds-test-cluster-2", - ipAddr: "10.0.0.3", - port: 5431, - tags: map[string]string{"a": "b"}, - }, - }, - err: nil, - expectCount: 2, - }, - { - instances: []*testRDSClusters{}, - err: nil, - expectCount: 0, - }, - { - instances: []*testRDSClusters{ - { - id: "rds-test-id", - }, - { - id: "rds-test-id-2", - }, - }, - err: nil, - expectCount: 0, - }, - { - instances: []*testRDSClusters{}, - err: fmt.Errorf("some rds error"), - expectCount: 0, - }, - } - - for i, tt := range cases { - t.Run(strconv.Itoa(i), func(t *testing.T) { - client := func(t *testing.T, instances []*testRDSClusters) rds.DescribeDBClustersAPIClient { - return mockRDSDescribeDBClustersAPIClient(func(ctx context.Context, params *rds.DescribeDBClustersInput, optFns ...func(*rds.Options)) (*rds.DescribeDBClustersOutput, error) { - t.Helper() - - out := &rds.DescribeDBClustersOutput{} - - for _, v := range instances { - c := types.DBCluster{ - DBClusterIdentifier: &v.id, - DatabaseName: &v.name, - } - - if v.ipAddr != "" { - c.Endpoint = &v.ipAddr - } - - if v.port > 0 { - c.Port = &v.port - } - - for tk, tv := range v.tags { - tag := types.Tag{ - Key: &tk, - Value: &tv, - } - - c.TagList = append(c.TagList, tag) - } - out.DBClusters = append(out.DBClusters, c) - } - - return out, tt.err - }) - } - - il := &rdsClustersLister{ - client: client(t, tt.instances), - dbClustersList: make(map[string]*rdsClusterData), - } - - il.expand(time.Second) - - // Check for instance count - if len(il.dbClustersList) != tt.expectCount { - t.Errorf("Got %d instances, want %d", len(il.dbClustersList), tt.expectCount) - } - }) - } -} - -func TestRDSClustersLister(t *testing.T) { - cases := []struct { - instances []*testRDSClusters - filter []*pb.Filter - expectErr bool - expectedCount int - }{ - { - instances: []*testRDSClusters{ - { - id: "rds-cluster-test-id", - ipAddr: "10.0.0.2", - tags: map[string]string{"a": "b"}, - }, - { - id: "rds-cluster-test-id-2", - ipAddr: "10.0.0.3", - tags: map[string]string{"a": "b"}, - }, - }, - filter: []*pb.Filter{ - { - Key: proto.String("labels.a"), - Value: proto.String("b"), - }, - }, - expectedCount: 2, - }, - { - instances: []*testRDSClusters{ - { - id: "rds-cluster-test-id", - ipAddr: "10.0.0.2", - tags: map[string]string{"a": "b"}, - }, - { - id: "rds-cluster-test-id-2", - ipAddr: "10.0.0.3", - tags: map[string]string{"a": "b"}, - }, - }, - filter: []*pb.Filter{ - { - Key: proto.String("ins."), - Value: proto.String("b"), - }, - }, - expectErr: true, - }, - { - instances: []*testRDSClusters{}, - expectedCount: 0, - }, - { - instances: []*testRDSClusters{ - { - id: "rds-test-id", - ipAddr: "10.0.0.2", - tags: map[string]string{"test1": "a"}, - }, - { - id: "rds-test-id-2", - ipAddr: "10.0.0.3", - tags: map[string]string{"test2": "b"}, - }, - }, - filter: []*pb.Filter{ - { - Key: proto.String("labels.a"), - Value: proto.String("b"), - }, - }, - expectedCount: 0, - }, - { - instances: []*testRDSClusters{ - { - id: "rds-test-id", - ipAddr: "10.0.0.2", - tags: map[string]string{"a": "b"}, - }, - { - id: "rds-test-id-2", - ipAddr: "10.0.0.3", - tags: map[string]string{"a": "b"}, - }, - }, - filter: []*pb.Filter{ - { - Key: proto.String("name"), - Value: proto.String("nonexistent"), - }, - }, - expectedCount: 0, - }, - } - - for i, tt := range cases { - t.Run(strconv.Itoa(i), func(t *testing.T) { - - names := []string{} - cache := make(map[string]*rdsClusterData) - for _, ti := range tt.instances { - ii := &rdsClusterInfo{ - Name: ti.id, - Ip: ti.ipAddr, - Tags: ti.tags, - } - cache[ti.id] = &rdsClusterData{ - ri: ii, - } - - names = append(names, ti.id) - } - - lister := &rdsClustersLister{ - dbClustersList: cache, - names: names, - l: &logger.Logger{}, - } - - var filters []*pb.Filter - if tt.filter != nil { - filters = append(filters, tt.filter...) - } - - resources, err := lister.listResources(&pb.ListResourcesRequest{ - Filter: filters, - }) - - if err != nil { - if !tt.expectErr { - t.Errorf("Got error while listing resources: %v, expected no errors", err) - } - return - } - - if len(resources) != tt.expectedCount { - t.Errorf("Got wrong number of targets. Expected: %d, Got: %d", tt.expectedCount, len(resources)) - } - }) - } -} diff --git a/internal/rds/aws/rds_instances.go b/internal/rds/aws/rds_instances.go deleted file mode 100644 index 8e06ab16375..00000000000 --- a/internal/rds/aws/rds_instances.go +++ /dev/null @@ -1,178 +0,0 @@ -package aws - -import ( - "context" - "fmt" - "math/rand" - "sync" - "time" - - configpb "github.com/cloudprober/cloudprober/internal/rds/aws/proto" - pb "github.com/cloudprober/cloudprober/internal/rds/proto" - "github.com/cloudprober/cloudprober/internal/rds/server/filter" - "github.com/cloudprober/cloudprober/logger" - - "github.com/aws/aws-sdk-go-v2/config" - "github.com/aws/aws-sdk-go-v2/service/rds" - "google.golang.org/protobuf/proto" -) - -// rdsInstanceInfo represents instance items that we fetch from the RDS API. -type rdsInstanceInfo struct { - Name string - Ip string - Port int32 - IsReplica bool - Tags map[string]string -} - -// rdsData represents objects that we store in cache. -type rdsInstanceData struct { - ri *rdsInstanceInfo - lastUpdated int64 -} - -var RDSInstancesFilters = struct { - RegexFilterKeys []string - LabelsFilter bool -}{ - []string{"name", "engine"}, - true, -} - -// rdsInstancesLister is an AWS Relational Database Service Instances lister. It implements a cache, -// that's populated at a regular interval by making the AWS API calls. -// Listing actually only returns the current contents of that cache. -type rdsInstancesLister struct { - c *configpb.RDSInstances - client rds.DescribeDBInstancesAPIClient - l *logger.Logger - mu sync.RWMutex - names []string - dbInstancesList map[string]*rdsInstanceData -} - -// listResources returns the list of resource records, where each record -// consists of an cluster name and the endpoint associated with it. -func (rl *rdsInstancesLister) listResources(req *pb.ListResourcesRequest) ([]*pb.Resource, error) { - var resources []*pb.Resource - - allFilters, err := filter.ParseFilters(req.GetFilter(), RDSInstancesFilters.RegexFilterKeys, "") - if err != nil { - return nil, err - } - - nameFilter, labelsFilter := allFilters.RegexFilters["name"], allFilters.LabelsFilter - - rl.mu.RLock() - defer rl.mu.RUnlock() - - for _, name := range rl.names { - ins := rl.dbInstancesList[name].ri - if ins == nil { - rl.l.Errorf("rds_instances.listResources: db info missing for %s", name) - continue - } - - if nameFilter != nil && !nameFilter.Match(name, rl.l) { - continue - } - if labelsFilter != nil && !labelsFilter.Match(ins.Tags, rl.l) { - continue - } - - resources = append(resources, &pb.Resource{ - Name: proto.String(name), - Ip: proto.String(ins.Ip), - Port: proto.Int32(ins.Port), - Labels: ins.Tags, - LastUpdated: proto.Int64(rl.dbInstancesList[name].lastUpdated), - }) - } - - rl.l.Infof("rds_instances.listResources: returning %d instances", len(resources)) - return resources, nil -} - -// expand runs equivalent API calls as "aws rds describe-db-instances", -// and is used to populate the cache. It returns the instance information -// for instances provisioned for RDS. -// More details can be found in -// https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_DescribeDBInstances.html -func (rl *rdsInstancesLister) expand(reEvalInterval time.Duration) { - rl.l.Infof("rds_instances.expand: expanding AWS targets") - - result, err := rl.client.DescribeDBInstances(context.TODO(), nil) - if err != nil { - rl.l.Errorf("rds_instances.expand: error while listing database instances: %v", err) - return - } - - var ids = make([]string, 0) - var dbInstancesList = make(map[string]*rdsInstanceData) - - ts := time.Now().Unix() - for _, r := range result.DBInstances { - if r.DBInstanceIdentifier == nil || r.DBName == nil || r.Endpoint == nil { - continue - } - isReplica := false - if r.DBClusterIdentifier != nil || r.ReadReplicaSourceDBInstanceIdentifier != nil { - isReplica = true - } - - ci := &rdsInstanceInfo{ - Name: *r.DBName, - Ip: *r.Endpoint.Address, - Port: *r.Endpoint.Port, - IsReplica: isReplica, - Tags: make(map[string]string), - } - - // Convert to map - for _, t := range r.TagList { - ci.Tags[*t.Key] = *t.Value - } - - dbInstancesList[*r.DBName] = &rdsInstanceData{ci, ts} - ids = append(ids, *r.DBName) - } - - rl.mu.Lock() - rl.names = ids - rl.dbInstancesList = dbInstancesList - rl.mu.Unlock() - - rl.l.Infof("rds_instances.expand: got %d databases", len(ids)) -} - -func newRdsInstancesLister(c *configpb.RDSInstances, region string, l *logger.Logger) (*rdsInstancesLister, error) { - cfg, err := config.LoadDefaultConfig(context.TODO(), config.WithRegion(region)) - if err != nil { - return nil, fmt.Errorf("AWS configuration error: %v", err) - } - - client := rds.NewFromConfig(cfg) - - cl := &rdsInstancesLister{ - c: c, - client: client, - dbInstancesList: make(map[string]*rdsInstanceData), - l: l, - } - - reEvalInterval := time.Duration(c.GetReEvalSec()) * time.Second - go func() { - cl.expand(0) - // Introduce a random delay between 0-reEvalInterval before - // starting the refresh loop. If there are multiple cloudprober - // awsInstances, this will make sure that each instance calls AWS - // API at a different point of time. - randomDelaySec := rand.Intn(int(reEvalInterval.Seconds())) - time.Sleep(time.Duration(randomDelaySec) * time.Second) - for range time.Tick(reEvalInterval) { - cl.expand(reEvalInterval) - } - }() - return cl, nil -} diff --git a/internal/rds/aws/rds_instances_test.go b/internal/rds/aws/rds_instances_test.go deleted file mode 100644 index 92c4fc99874..00000000000 --- a/internal/rds/aws/rds_instances_test.go +++ /dev/null @@ -1,285 +0,0 @@ -package aws - -import ( - "context" - "fmt" - "strconv" - "testing" - "time" - - pb "github.com/cloudprober/cloudprober/internal/rds/proto" - "github.com/cloudprober/cloudprober/logger" - "google.golang.org/protobuf/proto" - - "github.com/aws/aws-sdk-go-v2/service/rds" - "github.com/aws/aws-sdk-go-v2/service/rds/types" -) - -type mockRDSDescribeDBInstancesAPIClient func(context.Context, *rds.DescribeDBInstancesInput, ...func(*rds.Options)) (*rds.DescribeDBInstancesOutput, error) - -func (m mockRDSDescribeDBInstancesAPIClient) DescribeDBInstances(ctx context.Context, params *rds.DescribeDBInstancesInput, optFns ...func(*rds.Options)) (*rds.DescribeDBInstancesOutput, error) { - return m(ctx, params, optFns...) -} - -type testRDSInstances struct { - name string - ipAddr string - port int32 - isReplica bool - tags map[string]string -} - -func TestRDSInstancesExpand(t *testing.T) { - cases := []struct { - err error - instances []*testRDSInstances - expectCount int - }{ - { - instances: []*testRDSInstances{ - { - name: "rds-test-instance", - ipAddr: "10.0.0.2", - port: 5431, - tags: map[string]string{"a": "b"}, - }, - }, - err: nil, - expectCount: 1, - }, - { - instances: []*testRDSInstances{ - { - name: "rds-test-instance", - ipAddr: "10.0.0.2", - port: 5431, - tags: map[string]string{"a": "b"}, - }, - { - name: "rds-test-instance-2", - ipAddr: "10.0.0.3", - port: 5431, - tags: map[string]string{"a": "b"}, - }, - }, - err: nil, - expectCount: 2, - }, - { - instances: []*testRDSInstances{}, - err: nil, - expectCount: 0, - }, - { - instances: []*testRDSInstances{ - { - name: "rds-test-name", - }, - { - name: "rds-test-name-2", - }, - }, - err: nil, - expectCount: 0, - }, - { - instances: []*testRDSInstances{}, - err: fmt.Errorf("some rds error"), - expectCount: 0, - }, - } - - for i, tt := range cases { - t.Run(strconv.Itoa(i), func(t *testing.T) { - client := func(t *testing.T, instances []*testRDSInstances) rds.DescribeDBInstancesAPIClient { - return mockRDSDescribeDBInstancesAPIClient(func(ctx context.Context, params *rds.DescribeDBInstancesInput, optFns ...func(*rds.Options)) (*rds.DescribeDBInstancesOutput, error) { - t.Helper() - - out := &rds.DescribeDBInstancesOutput{} - - for _, v := range instances { - c := types.DBInstance{ - DBInstanceIdentifier: &v.name, - DBName: &v.name, - } - - if v.ipAddr != "" { - c.Endpoint = &types.Endpoint{ - Address: &v.ipAddr, - Port: &v.port, - } - } - - for tk, tv := range v.tags { - tag := types.Tag{ - Key: &tk, - Value: &tv, - } - - c.TagList = append(c.TagList, tag) - } - out.DBInstances = append(out.DBInstances, c) - } - - return out, tt.err - }) - } - - il := &rdsInstancesLister{ - client: client(t, tt.instances), - dbInstancesList: make(map[string]*rdsInstanceData), - } - - il.expand(time.Second) - - // Check for instance count - if len(il.dbInstancesList) != tt.expectCount { - t.Errorf("Got %d instances, want %d", len(il.dbInstancesList), tt.expectCount) - } - }) - } -} - -func TestRDSInstancesLister(t *testing.T) { - cases := []struct { - instances []*testRDSInstances - filter []*pb.Filter - expectErr bool - expectedCount int - }{ - { - instances: []*testRDSInstances{ - { - name: "rds-cluster-test-id", - ipAddr: "10.0.0.2", - tags: map[string]string{"a": "b"}, - }, - { - name: "rds-cluster-test-id-2", - ipAddr: "10.0.0.3", - tags: map[string]string{"a": "b"}, - }, - }, - filter: []*pb.Filter{ - { - Key: proto.String("labels.a"), - Value: proto.String("b"), - }, - }, - expectedCount: 2, - }, - { - instances: []*testRDSInstances{ - { - name: "rds-instance-test-name", - ipAddr: "10.0.0.2", - tags: map[string]string{"a": "b"}, - }, - { - name: "rds-instance-test-name-2", - ipAddr: "10.0.0.3", - tags: map[string]string{"a": "b"}, - }, - }, - filter: []*pb.Filter{ - { - Key: proto.String("ins."), - Value: proto.String("b"), - }, - }, - expectErr: true, - }, - { - instances: []*testRDSInstances{}, - expectedCount: 0, - }, - { - instances: []*testRDSInstances{ - { - name: "rds-test-name", - ipAddr: "10.0.0.2", - tags: map[string]string{"test1": "a"}, - }, - { - name: "rds-test-name-2", - ipAddr: "10.0.0.3", - tags: map[string]string{"test2": "b"}, - }, - }, - filter: []*pb.Filter{ - { - Key: proto.String("labels.a"), - Value: proto.String("b"), - }, - }, - expectedCount: 0, - }, - { - instances: []*testRDSInstances{ - { - name: "rds-test-name", - ipAddr: "10.0.0.2", - tags: map[string]string{"a": "b"}, - }, - { - name: "rds-test-name-2", - ipAddr: "10.0.0.3", - tags: map[string]string{"a": "b"}, - }, - }, - filter: []*pb.Filter{ - { - Key: proto.String("name"), - Value: proto.String("nonexistent"), - }, - }, - expectedCount: 0, - }, - } - - for i, tt := range cases { - t.Run(strconv.Itoa(i), func(t *testing.T) { - - names := []string{} - cache := make(map[string]*rdsInstanceData) - for _, ti := range tt.instances { - ii := &rdsInstanceInfo{ - Name: ti.name, - Ip: ti.ipAddr, - Tags: ti.tags, - } - cache[ti.name] = &rdsInstanceData{ - ri: ii, - } - - names = append(names, ti.name) - } - - lister := &rdsInstancesLister{ - dbInstancesList: cache, - names: names, - l: &logger.Logger{}, - } - - var filters []*pb.Filter - if tt.filter != nil { - filters = append(filters, tt.filter...) - } - - resources, err := lister.listResources(&pb.ListResourcesRequest{ - Filter: filters, - }) - - if err != nil { - if !tt.expectErr { - t.Errorf("Got error while listing resources: %v, expected no errors", err) - } - return - } - - if len(resources) != tt.expectedCount { - t.Errorf("Got wrong number of targets. Expected: %d, Got: %d", tt.expectedCount, len(resources)) - } - }) - } -} From 6785310292824ca20a0b381b07d44475545e65f7 Mon Sep 17 00:00:00 2001 From: rabunkosar-dd Date: Tue, 20 Feb 2024 14:25:21 -0800 Subject: [PATCH 09/10] address comments --- internal/rds/aws/testdata/targets.json | 31 ----------------------- internal/rds/aws/testdata/targets1.textpb | 23 ----------------- internal/rds/aws/testdata/targets2.textpb | 11 -------- internal/rds/client/client.go | 7 +++-- targets/endpoint/endpoint.go | 1 - 5 files changed, 3 insertions(+), 70 deletions(-) delete mode 100644 internal/rds/aws/testdata/targets.json delete mode 100644 internal/rds/aws/testdata/targets1.textpb delete mode 100644 internal/rds/aws/testdata/targets2.textpb diff --git a/internal/rds/aws/testdata/targets.json b/internal/rds/aws/testdata/targets.json deleted file mode 100644 index 14d423802b6..00000000000 --- a/internal/rds/aws/testdata/targets.json +++ /dev/null @@ -1,31 +0,0 @@ -{ - "resource": [ - { - "name": "switch-xx-1", - "ip": "10.1.1.1", - "port": 8080, - "labels": { - "device_type": "switch", - "cluster": "xx" - } - }, - { - "name": "switch-xx-2", - "ip": "10.1.1.2", - "port": 8081, - "labels": { - "cluster": "xx" - } - }, - { - "name": "switch-yy-1", - "ip": "10.1.2.1", - "port": 8080 - }, - { - "name": "switch-zz-1", - "ip": "::aaa:1", - "port": 8080 - } - ] -} diff --git a/internal/rds/aws/testdata/targets1.textpb b/internal/rds/aws/testdata/targets1.textpb deleted file mode 100644 index ecad1f25a52..00000000000 --- a/internal/rds/aws/testdata/targets1.textpb +++ /dev/null @@ -1,23 +0,0 @@ -resource { - name: "switch-xx-1" - ip: "10.1.1.1" - port: 8080 - labels { - key: "device_type" - value: "switch" - } - labels { - key: "cluster" - value: "xx" - } -} - -resource { - name: "switch-xx-2" - ip: "10.1.1.2" - port: 8081 - labels { - key: "cluster" - value: "xx" - } -} diff --git a/internal/rds/aws/testdata/targets2.textpb b/internal/rds/aws/testdata/targets2.textpb deleted file mode 100644 index fb80c4293f0..00000000000 --- a/internal/rds/aws/testdata/targets2.textpb +++ /dev/null @@ -1,11 +0,0 @@ -resource { - name: "switch-yy-1" - ip: "10.1.2.1" - port: 8080 -} - -resource { - name: "switch-zz-1" - ip: "::aaa:1" - port: 8080 -} diff --git a/internal/rds/client/client.go b/internal/rds/client/client.go index 8fc6bd8e7fc..e64f17e2b39 100644 --- a/internal/rds/client/client.go +++ b/internal/rds/client/client.go @@ -32,6 +32,7 @@ import ( "github.com/cloudprober/cloudprober/internal/oauth" configpb "github.com/cloudprober/cloudprober/internal/rds/client/proto" pb "github.com/cloudprober/cloudprober/internal/rds/proto" + spb "github.com/cloudprober/cloudprober/internal/rds/proto" "github.com/cloudprober/cloudprober/internal/tlsconfig" "github.com/cloudprober/cloudprober/logger" "github.com/cloudprober/cloudprober/targets/endpoint" @@ -55,7 +56,6 @@ type cacheRecord struct { ipStr string port int labels map[string]string - info string lastUpdated time.Time } @@ -138,7 +138,6 @@ func (client *Client) updateState(response *pb.ListResourcesResponse) { ipStr: res.GetIp(), port: int(res.GetPort()), labels: res.Labels, - info: string(res.GetInfo()), lastUpdated: time.Unix(res.GetLastUpdated(), 0), } client.names[i] = res.GetName() @@ -160,7 +159,7 @@ func (client *Client) ListEndpoints() []endpoint.Endpoint { result := make([]endpoint.Endpoint, len(client.names)) for i, name := range client.names { cr := client.cache[name] - result[i] = endpoint.Endpoint{Name: name, IP: cr.ip, Port: cr.port, Labels: cr.labels, LastUpdated: cr.lastUpdated, Info: cr.info} + result[i] = endpoint.Endpoint{Name: name, IP: cr.ip, Port: cr.port, Labels: cr.labels, LastUpdated: cr.lastUpdated} } return result } @@ -240,7 +239,7 @@ func (client *Client) initListResourcesFunc() error { } client.listResources = func(ctx context.Context, in *pb.ListResourcesRequest) (*pb.ListResourcesResponse, error) { - return pb.NewResourceDiscoveryClient(conn).ListResources(ctx, in) + return spb.NewResourceDiscoveryClient(conn).ListResources(ctx, in) } return nil diff --git a/targets/endpoint/endpoint.go b/targets/endpoint/endpoint.go index 92f6c3f4570..858329abe11 100644 --- a/targets/endpoint/endpoint.go +++ b/targets/endpoint/endpoint.go @@ -36,7 +36,6 @@ type Endpoint struct { LastUpdated time.Time Port int IP net.IP - Info string } // Key returns a string key that uniquely identifies that endpoint. From 46069f8803de7127f2b98e78e055909c85f70cf6 Mon Sep 17 00:00:00 2001 From: rabunkosar-dd Date: Tue, 20 Feb 2024 14:37:11 -0800 Subject: [PATCH 10/10] update the comment --- internal/rds/aws/proto/config.proto | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/internal/rds/aws/proto/config.proto b/internal/rds/aws/proto/config.proto index c33855673dd..a40aba87ac2 100644 --- a/internal/rds/aws/proto/config.proto +++ b/internal/rds/aws/proto/config.proto @@ -79,7 +79,7 @@ message RDSInstances { // LoadBalancers discovery options. message LoadBalancers { - // Amazon Resource Name (ARN) of the load balancer + // Names of the load balancers, // if specified, only the corresponding load balancer information is returned. repeated string name = 1; } @@ -110,5 +110,4 @@ message ProviderConfig { // RDS clusters discovery options. optional RDSClusters rds_clusters = 7; - }