Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[v9.17.0] Maestro v9 to coexist with other Game Room providers #623

Closed
wants to merge 6 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
34 changes: 16 additions & 18 deletions controller/controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -87,26 +87,23 @@ func CreateScheduler(
return err
}

if exists {
logger.Error("namespace already exists, aborting scheduler creation")
return fmt.Errorf(`namespace "%s" already exists`, namespace.Name)
}

err = mr.WithSegment(models.SegmentNamespace, func() error {
return namespace.Create(clientset)
})
if !exists {
err = mr.WithSegment(models.SegmentNamespace, func() error {
return namespace.Create(clientset)
})

if err != nil {
logger.WithError(err).Error("error creating namespace")
if err != nil {
logger.WithError(err).Error("error creating namespace")

deleteErr := mr.WithSegment(models.SegmentNamespace, func() error {
return namespace.Delete(clientset)
})
if deleteErr != nil {
logger.WithError(err).Error("error deleting namespace")
return deleteErr
deleteErr := mr.WithSegment(models.SegmentNamespace, func() error {
return namespace.Delete(clientset)
})
if deleteErr != nil {
logger.WithError(err).Error("error deleting namespace")
return deleteErr
}
return err
}
return err
}

scheduler := models.NewScheduler(configYAML.Name, configYAML.Game, yamlString)
Expand Down Expand Up @@ -840,7 +837,8 @@ func UpdateSchedulerConfig(
}

// MustUpdatePods returns true if it's necessary to delete old pod and create a new one
// so this have the new configuration.
//
// so this have the new configuration.
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

is this intended?

func MustUpdatePods(old, new *models.ConfigYAML) bool {
if old.Version() == "v1" && new.Version() == "v2" && len(new.Containers) != 1 {
return true
Expand Down
28 changes: 0 additions & 28 deletions controller/utils.go
Original file line number Diff line number Diff line change
Expand Up @@ -1413,34 +1413,6 @@ func deleteSchedulerHelper(
}
}

err = mr.WithSegment(models.SegmentNamespace, func() error {
return namespace.Delete(clientset)
})
if err != nil {
logger.WithError(err).Error("failed to delete namespace while deleting scheduler")
return err
}
timeoutNamespace := time.NewTimer(time.Duration(timeoutSec) * time.Second)
defer timeoutNamespace.Stop()

time.Sleep(10 * time.Nanosecond) //This negligible sleep avoids race condition
exit = false
for !exit {
select {
case <-timeoutNamespace.C:
return errors.New("timeout deleting namespace")
default:
exists, existsErr := namespace.Exists(clientset)
if existsErr != nil {
logger.WithError(existsErr).Error("error checking namespace existence")
} else if !exists {
exit = true
}
logger.Debug("deleting scheduler namespace")
time.Sleep(time.Duration(1) * time.Second)
}
}

// Delete from DB must be the last operation because
// if kubernetes failed to delete pods, watcher will recreate
// and keep the last state
Expand Down
6 changes: 3 additions & 3 deletions metadata/version.go
Original file line number Diff line number Diff line change
Expand Up @@ -7,8 +7,8 @@

package metadata

//Version of Maestro
var Version = "9.15.2"
// Version of Maestro
var Version = "9.17.0"

//KubeVersion is the desired Kubernetes version
// KubeVersion is the desired Kubernetes version
var KubeVersion = "v1.13.9"
26 changes: 18 additions & 8 deletions watcher/watcher.go
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,7 @@ import (
v1 "k8s.io/api/core/v1"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/wait"
informersv1 "k8s.io/client-go/informers/core/v1"
"k8s.io/client-go/kubernetes"
Expand Down Expand Up @@ -854,13 +855,13 @@ func (w *Watcher) AutoScale() error {
})
logger.Info("starting auto scale")

if reporters.HasReporters() {
reporters.Report(reportersConstants.EventWatcherAutoScale, map[string]interface{}{
reportersConstants.TagGame: w.GameName,
reportersConstants.TagScheduler: w.SchedulerName,
reportersConstants.ValueGauge: "1.00",
})
}
if reporters.HasReporters() {
reporters.Report(reportersConstants.EventWatcherAutoScale, map[string]interface{}{
reportersConstants.TagGame: w.GameName,
reportersConstants.TagScheduler: w.SchedulerName,
reportersConstants.ValueGauge: "1.00",
})
}

scheduler, autoScalingInfo, roomCountByStatus, err := controller.GetSchedulerScalingInfo(
logger,
Expand Down Expand Up @@ -1700,6 +1701,7 @@ func (w *Watcher) podEventHandler(key string) error {
}

kubePod, err := w.Lister.Get(name)
w.Logger.Infof("[podEventHandler]: name=%s, kubePod=%s, err=%s", name, kubePod, err)
if err != nil {
if !k8serrors.IsNotFound(err) {
return err
Expand Down Expand Up @@ -1794,7 +1796,15 @@ func (w *Watcher) watchPods(stopCh <-chan struct{}) {
}

func (w *Watcher) configureKubeWatch() {
w.Informer = informersv1.NewPodInformer(w.KubernetesClient, w.SchedulerName, 30*time.Second, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
w.Informer = informersv1.NewFilteredPodInformer(
w.KubernetesClient,
w.SchedulerName,
30*time.Second,
cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc},
func(options *metav1.ListOptions) {
options.LabelSelector = labels.Set{"heritage": "maestro"}.AsSelector().String()
},
)
w.Lister = listersv1.NewPodLister(w.Informer.GetIndexer()).Pods(w.SchedulerName)
rateLimiter := workqueue.NewItemFastSlowRateLimiter(20*time.Millisecond, 500*time.Millisecond, 5)
w.Queue = workqueue.NewNamedRateLimitingQueue(rateLimiter, w.SchedulerName)
Expand Down
Loading