Skip to content

Commit

Permalink
Merge 302cd72 into f088727
Browse files Browse the repository at this point in the history
  • Loading branch information
tbuchaillot committed Sep 23, 2020
2 parents f088727 + 302cd72 commit 212d879
Show file tree
Hide file tree
Showing 5 changed files with 222 additions and 8 deletions.
6 changes: 6 additions & 0 deletions cli/linter/schema.json
Original file line number Diff line number Diff line change
Expand Up @@ -928,6 +928,12 @@
},
"disable": {
"type": "boolean"
},
"disable_management_poller": {
"type": "boolean"
},
"poller_group": {
"type": "string"
}
}
},
Expand Down
6 changes: 4 additions & 2 deletions config/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -206,8 +206,10 @@ type UptimeTestsConfigDetail struct {
}

type UptimeTestsConfig struct {
Disable bool `json:"disable"`
Config UptimeTestsConfigDetail `json:"config"`
Disable bool `json:"disable"`
DisableManagementPoller bool `json:"disable_management_poller"`
PollerGroup string `json:"poller_group"`
Config UptimeTestsConfigDetail `json:"config"`
}

type ServiceDiscoveryConf struct {
Expand Down
11 changes: 8 additions & 3 deletions gateway/host_checker_manager.go
Original file line number Diff line number Diff line change
Expand Up @@ -131,20 +131,25 @@ func (hc *HostCheckerManager) AmIPolling() bool {
}).Error("No storage instance set for uptime tests! Disabling poller...")
return false
}
activeInstance, err := hc.store.GetKey(PollerCacheKey)
pollerCacheKey := PollerCacheKey
if config.Global().UptimeTests.PollerGroup != "" {
pollerCacheKey = pollerCacheKey + "." + config.Global().UptimeTests.PollerGroup
}

activeInstance, err := hc.store.GetKey(pollerCacheKey)
if err != nil {
log.WithFields(logrus.Fields{
"prefix": "host-check-mgr",
}).Debug("No Primary instance found, assuming control")
hc.store.SetKey(PollerCacheKey, hc.Id, 15)
hc.store.SetKey(pollerCacheKey, hc.Id, 15)
return true
}

if activeInstance == hc.Id {
log.WithFields(logrus.Fields{
"prefix": "host-check-mgr",
}).Debug("Primary instance set, I am master")
hc.store.SetKey(PollerCacheKey, hc.Id, 15) // Reset TTL
hc.store.SetKey(pollerCacheKey, hc.Id, 15) // Reset TTL
return true
}

Expand Down
196 changes: 196 additions & 0 deletions gateway/host_checker_manager_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,196 @@
package gateway

import (
"bytes"
"net/http"
"testing"
"text/template"

"github.com/TykTechnologies/tyk/config"
"github.com/TykTechnologies/tyk/storage"
uuid "github.com/satori/go.uuid"
)

func TestHostCheckerManagerInit(t *testing.T) {
ts := StartTest()
defer ts.Close()

hc := HostCheckerManager{}
redisStorage := &storage.RedisCluster{KeyPrefix: "host-checker:"}
hc.Init(redisStorage)

if hc.Id == "" {
t.Error("HostCheckerManager should create an Id on Init")
}
if hc.unhealthyHostList == nil {
t.Error("HostCheckerManager should initialize unhealthyHostList on Init")
}
if hc.resetsInitiated == nil {
t.Error("HostCheckerManager should initialize resetsInitiated on Init")
}
}

func TestAmIPolling(t *testing.T) {
hc := HostCheckerManager{}

pooling := hc.AmIPolling()
if pooling {
t.Error("HostCheckerManager storage not configured, it should have failed.")
}

//Testing if we had 2 active host checker managers, only 1 takes control of the uptimechecks
globalConf := config.Global()
globalConf.UptimeTests.PollerGroup = "TEST"
config.SetGlobal(globalConf)

ts := StartTest()
defer ts.Close()

redisStorage := &storage.RedisCluster{KeyPrefix: "host-checker:"}
hc.Init(redisStorage)
hc2 := HostCheckerManager{}
hc2.Init(redisStorage)

pooling = hc.AmIPolling()
poolingHc2 := hc2.AmIPolling()
if !pooling && poolingHc2 {
t.Error("HostCheckerManager storage configured, it shouldn't have failed.")
}

//Testing if the PollerCacheKey contains the poller_group
activeInstance, err := hc.store.GetKey("PollerActiveInstanceID.TEST")
if err != nil {
t.Error("PollerActiveInstanceID.TEST should exist in redis.", activeInstance)
}
if activeInstance != hc.Id {
t.Error("PollerActiveInstanceID.TEST value should be hc.Id")
}

//Testing if the PollerCacheKey doesn't contains the poller_group by default
ResetTestConfig()
emptyRedis()
hc = HostCheckerManager{}

redisStorage = &storage.RedisCluster{KeyPrefix: "host-checker:"}
hc.Init(redisStorage)
hc.AmIPolling()

activeInstance, err = hc.store.GetKey("PollerActiveInstanceID")
if err != nil {
t.Error("PollerActiveInstanceID should exist in redis.", activeInstance)
}
if activeInstance != hc.Id {
t.Error("PollerActiveInstanceID value should be hc.Id")
}
}

func TestGenerateCheckerId(t *testing.T) {
hc := HostCheckerManager{}
hc.GenerateCheckerId()
if hc.Id == "" {
t.Error("HostCheckerManager should generate an Id on GenerateCheckerId")
}

uuid, _ := uuid.FromString(hc.Id)
if uuid.Version() != 4 {
t.Error("HostCheckerManager should generate an uuid.v4 id")
}
}

func TestCheckActivePollerLoop(t *testing.T) {
ts := StartTest()
defer ts.Close()
emptyRedis()

hc := &HostCheckerManager{}
redisStorage := &storage.RedisCluster{KeyPrefix: "host-checker:"}
hc.Init(redisStorage)
//defering the stop of the CheckActivePollerLoop
defer func(hc *HostCheckerManager) {
hc.stopLoop = true
}(hc)

go hc.CheckActivePollerLoop()

found := false

//Giving 5 retries to find the poller active key
for i := 0; i < 5; i++ {
activeInstance, err := hc.store.GetKey("PollerActiveInstanceID")
if activeInstance == hc.Id && err == nil {
found = true
break
}
}

if !found {
t.Error("activeInstance should be hc.Id when the CheckActivePollerLoop is running")
}

}

func TestStartPoller(t *testing.T) {
hc := HostCheckerManager{}
hc.StartPoller()

if hc.checker == nil {
t.Error("StartPoller should have initialized the HostUptimeChecker")
}
}

func TestRecordUptimeAnalytics(t *testing.T) {
ts := StartTest()
defer ts.Close()
emptyRedis()

hc := &HostCheckerManager{}
redisStorage := &storage.RedisCluster{KeyPrefix: "host-checker:"}
hc.Init(redisStorage)

specTmpl := template.Must(template.New("spec").Parse(sampleUptimeTestAPI))

tmplData := struct {
Host1, Host2 string
}{
testHttpFailureAny,
testHttpFailureAny,
}

specBuf := &bytes.Buffer{}
specTmpl.ExecuteTemplate(specBuf, specTmpl.Name(), &tmplData)

spec := CreateDefinitionFromString(specBuf.String())
spec.UptimeTests.Config.ExpireUptimeAnalyticsAfter = 30
apisMu.Lock()
apisByID = map[string]*APISpec{spec.APIID: spec}
apisMu.Unlock()
defer func() {
apisMu.Lock()
apisByID = make(map[string]*APISpec)
apisMu.Unlock()
}()

hostData := HostData{
CheckURL: "/test",
Method: http.MethodGet,
}
report := HostHealthReport{
HostData: hostData,
ResponseCode: http.StatusOK,
Latency: 10.00,
IsTCPError: false,
}
report.MetaData = make(map[string]string)
report.MetaData[UnHealthyHostMetaDataAPIKey] = spec.APIID

err := hc.RecordUptimeAnalytics(report)
if err != nil {
t.Error("RecordUptimeAnalytics shouldn't fail")
}

set, err := hc.store.Exists(UptimeAnalytics_KEYNAME)
if err != nil || !set {
t.Error("tyk-uptime-analytics should exist in redis.", err)
}

}
11 changes: 8 additions & 3 deletions gateway/server.go
Original file line number Diff line number Diff line change
Expand Up @@ -169,9 +169,14 @@ func setupGlobals(ctx context.Context) {
mainLog.Fatal("Analytics requires Redis Storage backend, please enable Redis in the tyk.conf file.")
}

// Initialise our Host Checker
healthCheckStore := storage.RedisCluster{KeyPrefix: "host-checker:"}
InitHostCheckManager(&healthCheckStore)
if !config.Global().UptimeTests.DisableManagementPoller && config.Global().ManagementNode {
mainLog.Warn("Running Uptime checks in a management node.")
}
// Initialise our Host Checker if it's enabled to be a management poller
if !config.Global().UptimeTests.DisableManagementPoller {
healthCheckStore := storage.RedisCluster{KeyPrefix: "host-checker:"}
InitHostCheckManager(&healthCheckStore)
}

redisStore := storage.RedisCluster{KeyPrefix: "apikey-", HashKeys: config.Global().HashKeys}
FallbackKeySesionManager.Init(&redisStore)
Expand Down

0 comments on commit 212d879

Please sign in to comment.