Skip to content

Commit

Permalink
Refine cluster config (#34)
Browse files Browse the repository at this point in the history
* added-all

* added-all

* added-all
  • Loading branch information
tamalsaha committed Jul 13, 2017
1 parent 2b3786a commit 63be87a
Show file tree
Hide file tree
Showing 8 changed files with 66 additions and 104 deletions.
10 changes: 6 additions & 4 deletions pkg/cmds/run.go
Expand Up @@ -10,7 +10,7 @@ import (
"github.com/appscode/kubed/pkg/analytics"
"github.com/appscode/kubed/pkg/config"
"github.com/appscode/kubed/pkg/indexers"
"github.com/appscode/kubed/pkg/recover"
"github.com/appscode/kubed/pkg/recyclebin"
"github.com/appscode/kubed/pkg/watcher"
"github.com/appscode/log"
"github.com/appscode/pat"
Expand All @@ -27,11 +27,13 @@ import (

func NewCmdRun(version string) *cobra.Command {
opt := watcher.Options{
ConfigPath: runtime.GOPath() + "/src/github.com/appscode/kubed/hack/config/clusterconfig.yaml",
Indexer: "indexers.bleve",
EnableReverseIndex: true,
ServerAddress: ":8081",
EnableAnalytics: true,
ConfigPath: runtime.GOPath() + "/src/github.com/appscode/kubed/hack/config/clusterconfig.yaml",
EnableConfigSync: true,
ScratchDir: "/tmp",
}
cmd := &cobra.Command{
Use: "run",
Expand Down Expand Up @@ -91,8 +93,8 @@ func Run(opt watcher.Options) {
Cron: cron.New(),
Opt: opt,
Config: *cfg,
Saver: &recover.RecoverStuff{
Opt: cfg.Recover,
Saver: &recyclebin.RecoverStuff{
Opt: *cfg.RecycleBin,
},
SyncPeriod: time.Minute * 2,
}
Expand Down
86 changes: 40 additions & 46 deletions pkg/config/config.go
Expand Up @@ -5,69 +5,63 @@ import (
"io/ioutil"
"os"
"path/filepath"
"time"

yc "github.com/appscode/go/encoding/yaml"
"github.com/ghodss/yaml"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)

type RecoverSpec struct {
Path string
TTL time.Duration
HandleSpecUpdates bool
EmailOneDelete bool // Notify Via
}
const (
ConfigSyncKey = "kubernetes.appscode.com/sync-config"
)

type ClusterConfig struct {
ElasticSearch struct {
Endpoint string
LogIndexPrefix string `json:"log_index_prefix"`
LogStorageLifetime int64 `json:"log_storage_lifetime"`
}

InfluxDB struct {
Endpoint string
Username string
Password string
MonitoringStorageLifetime int64 `json:"monitoring_storage_lifetime"`
}

// For periodic full cluster backup
// https://github.com/appscode/kubed/issues/16
Backup struct {
Schedule string `json:"schedule,omitempty"`
Sanitize bool

Storage Backend
}
Elasticsearch *ElasticSearchSpec `json:"elasticsearch,omitempty,omitempty"`
InfluxDB *InfluxDBSpec `json:"influxdb,omitempty"`
EventForwarder *EventForwarderSpec `json:"event_forwarder,omitempty"`
RecycleBin *RecycleBinSpec `json:"recycle_bin,omitempty"`
Backup *BackupSpec `json:"backup,omitempty"`
}

Recover RecoverSpec
type ElasticSearchSpec struct {
Endpoint string `json:"endpoint,omitempty"`
LogIndexPrefix string `json:"log_index_prefix,omitempty"`
TTL metav1.Duration `json:"ttl,omitempty"`
}

// Email Warning events
EventLogger struct {
NotifyVia string
Namespace []string // only email for a fixed set of namespaces (Optional)
}
type InfluxDBSpec struct {
Endpoint string `json:"endpoint,omitempty"`
Username string `json:"username,omitempty"`
Password string `json:"password,omitempty"`
TTL metav1.Duration `json:"ttl,omitempty"`
}

// Take ConfigMap/Secret with label to other namespaces
// kubernetes.appscode.com/sync-config: true
type RecycleBinSpec struct {
Path string `json:"path,omitempty"`
TTL metav1.Duration `json:"ttl,omitempty"`
HandleUpdate bool `json:"handle_update,omitempty"`
}

// Search
// Reverse Index
type EventForwarderSpec struct {
SkipForwardingStorageChange bool `json:"skip_forwarding_storage_change,omitempty"`
SkipForwardingIngressChange bool `json:"skip_forwarding_ingress_change,omitempty"`
SkipForwardingWarningEvents bool `json:"skip_forwarding_warning_events,omitempty"`
ForwardingEventNamespaces []string `json:"forwarding_event_namespace,omitempty"`
NotifyVia string `json:"notify_via,omitempty"`
}

ESEndpoint string
InfluxSecretName string
InfluxSecretNamespace string
ClusterKubedConfigSecretName string
ClusterKubedConfigSecretNamespace string
NotifyOnCertSoonToBeExpired bool
NotifyVia string
// For periodic full cluster backup
// https://github.com/appscode/kubed/issues/16
type BackupSpec struct {
Schedule string `json:"schedule,omitempty"`
Sanitize bool `json:"sanitize,omitempty"`
Storage Backend `json:",inline"`
}

type Backend struct {
StorageSecretName string `json:"storageSecretName,omitempty"`

Local *LocalSpec `json:"local"`
Local *LocalSpec `json:"local,omitempty"`
S3 *S3Spec `json:"s3,omitempty"`
GCS *GCSSpec `json:"gcs,omitempty"`
Azure *AzureSpec `json:"azure,omitempty"`
Expand Down
38 changes: 0 additions & 38 deletions pkg/dns/dns.go

This file was deleted.

6 changes: 3 additions & 3 deletions pkg/elasticsearch/janitor.go
Expand Up @@ -16,19 +16,19 @@ type Janitor struct {
func (j *Janitor) CleanES() error {
client, err := elastic.NewClient(
// elastic.SetSniff(false),
elastic.SetURL(j.Config.ElasticSearch.Endpoint),
elastic.SetURL(j.Config.Elasticsearch.Endpoint),
)
if err != nil {
return err
}

now := time.Now().UTC()
oldDate := now.Add(time.Duration(-(j.Config.ElasticSearch.LogStorageLifetime)) * time.Second)
oldDate := now.Add(-j.Config.Elasticsearch.TTL.Duration)

// how many index should we check to delete? I set it to 7
for i := 1; i <= 7; i++ {
date := oldDate.AddDate(0, 0, -i)
prefix := fmt.Sprintf("%s%s", j.Config.ElasticSearch.LogIndexPrefix, date.Format("2006.01.02"))
prefix := fmt.Sprintf("%s%s", j.Config.Elasticsearch.LogIndexPrefix, date.Format("2006.01.02"))

if _, err := client.Search(prefix).Do(); err == nil {
if _, err := client.DeleteIndex(prefix).Do(); err != nil {
Expand Down
2 changes: 1 addition & 1 deletion pkg/influxdb/lib.go → pkg/influxdb/janitor.go
Expand Up @@ -28,7 +28,7 @@ func (j *Janitor) CleanInflux() error {
}

query := influxdb.Query{
Command: fmt.Sprintf("ALTER RETENTION POLICY default ON k8s DURATION %vs", j.Config.InfluxDB.MonitoringStorageLifetime),
Command: fmt.Sprintf("ALTER RETENTION POLICY default ON k8s DURATION %vs", j.Config.InfluxDB.TTL),
Database: "k8s",
}
_, err = client.Query(query)
Expand Down
File renamed without changes.
7 changes: 3 additions & 4 deletions pkg/recover/saver.go → pkg/recyclebin/saver.go
@@ -1,19 +1,18 @@
package recover
package recyclebin

import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"time"

"github.com/appscode/kubed/pkg/config"
"github.com/ghodss/yaml"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)

type RecoverStuff struct {
Opt config.RecoverSpec
Opt config.RecycleBinSpec
}

func (c *RecoverStuff) Save(meta metav1.ObjectMeta, v interface{}) error {
Expand All @@ -24,7 +23,7 @@ func (c *RecoverStuff) Save(meta metav1.ObjectMeta, v interface{}) error {
return err
}
name := filepath.Base(p)
fn := fmt.Sprintf("%s.%d.yaml", name, time.Now().UTC().Unix())
fn := fmt.Sprintf("%s.%d.yaml", name, meta.CreationTimestamp.Unix())

fullPath := filepath.Join(dir, fn)
bytes, err := yaml.Marshal(v)
Expand Down
21 changes: 13 additions & 8 deletions pkg/watcher/watcher.go
Expand Up @@ -11,7 +11,7 @@ import (
"github.com/appscode/kubed/pkg/elasticsearch"
"github.com/appscode/kubed/pkg/indexers"
"github.com/appscode/kubed/pkg/influxdb"
"github.com/appscode/kubed/pkg/recover"
"github.com/appscode/kubed/pkg/recyclebin"
"github.com/appscode/log"
srch_cs "github.com/appscode/searchlight/client/clientset"
scs "github.com/appscode/stash/client/clientset"
Expand All @@ -24,13 +24,18 @@ import (
)

type Options struct {
Master string
KubeConfig string
EnableAnalytics bool
Master string
KubeConfig string

ConfigPath string
ServerAddress string
Indexer string
EnableReverseIndex bool
ServerAddress string
ConfigPath string

EnableConfigSync bool
ScratchDir string

EnableAnalytics bool
}

type Watchers struct {
Expand All @@ -43,7 +48,7 @@ type Watchers struct {

Opt Options
Config config.ClusterConfig
Saver *recover.RecoverStuff
Saver *recyclebin.RecoverStuff
Indexer *indexers.ResourceIndexer
ReverseIndex *indexers.ReverseIndexer

Expand Down Expand Up @@ -97,7 +102,7 @@ func (w *Watchers) StartCron() {
janitor.CleanES()
})
w.Cron.AddFunc("@every 24h", func() {
err := filepath.Walk(w.Config.Recover.Path, func(path string, info os.FileInfo, err error) error {
err := filepath.Walk(w.Config.RecycleBin.Path, func(path string, info os.FileInfo, err error) error {
// delete old objects
return nil
})
Expand Down

0 comments on commit 63be87a

Please sign in to comment.