Skip to content
Open
47 changes: 41 additions & 6 deletions cmd/mc-router/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -34,10 +34,14 @@ type WebhookConfig struct {
}

type AutoScale struct {
Up bool `usage:"Increase Kubernetes StatefulSet Replicas (only) from 0 to 1 on respective backend servers when accessed"`
Down bool `default:"false" usage:"Decrease Kubernetes StatefulSet Replicas (only) from 1 to 0 on respective backend servers after there are no connections"`
DownAfter string `default:"10m" usage:"Server scale down delay after there are no connections"`
AllowDeny string `usage:"Path to config for server allowlists and denylists. If a global/server entry is specified, only players allowed to connect to the server will be able to trigger a scale up when -auto-scale-up is enabled or cancel active down scalers when -auto-scale-down is enabled"`
Up bool `usage:"Increase Kubernetes StatefulSet Replicas (only) from 0 to 1 on respective backend servers when accessed"`
Down bool `default:"false" usage:"Decrease Kubernetes StatefulSet Replicas (only) from 1 to 0 on respective backend servers after there are no connections"`
DownAfter string `default:"10m" usage:"Server scale down delay after there are no connections"`
AllowDeny string `usage:"Path to config for server allowlists and denylists. If a global/server entry is specified, only players allowed to connect to the server will be able to trigger a scale up when -auto-scale-up is enabled or cancel active down scalers when -auto-scale-down is enabled"`
FakeOnline bool `default:"false" usage:"Enable fake online status when backend is offline and auto-scale-up is enabled"`
FakeOnlineMOTD string `default:"Server is sleeping\nJoin to wake it up" usage:"Custom MOTD to show when backend is offline, status has been cached and auto-scale-up is enabled"`
CacheStatus bool `default:"false" usage:"Cache status response for backends"`
CacheStatusInterval string `default:"30s" usage:"Interval to update the status cache"`
}

type Config struct {
Expand Down Expand Up @@ -138,7 +142,6 @@ func main() {
// Only one instance should be created
server.DownScaler = server.NewDownScaler(ctx, downScalerEnabled, downScalerDelay)


c := make(chan os.Signal, 1)
signal.Notify(c, syscall.SIGINT, syscall.SIGTERM)

Expand Down Expand Up @@ -167,7 +170,21 @@ func main() {
trustedIpNets = append(trustedIpNets, ipNet)
}

connector := server.NewConnector(metricsBuilder.BuildConnectorMetrics(), config.UseProxyProtocol, config.ReceiveProxyProtocol, trustedIpNets, config.RecordLogins, autoScaleAllowDenyConfig)
fakeOnlineEnabled := config.AutoScale.FakeOnline && config.AutoScale.Up && (config.InKubeCluster || config.KubeConfig != "")

connectorConfig := server.ConnectorConfig{
SendProxyProto: config.UseProxyProtocol,
ReceiveProxyProto: config.ReceiveProxyProtocol,
TrustedProxyNets: trustedIpNets,
RecordLogins: config.RecordLogins,
AutoScaleUpAllowDenyConfig: autoScaleAllowDenyConfig,
AutoScaleUp: config.AutoScale.Up,
FakeOnline: fakeOnlineEnabled,
FakeOnlineMOTD: config.AutoScale.FakeOnlineMOTD,
CacheStatus: config.AutoScale.CacheStatus,
}

connector := server.NewConnector(metricsBuilder.BuildConnectorMetrics(), connectorConfig)

clientFilter, err := server.NewClientFilter(config.ClientsToAllow, config.ClientsToDeny)
if err != nil {
Expand All @@ -184,6 +201,15 @@ func main() {
server.NewWebhookNotifier(config.Webhook.Url, config.Webhook.RequireUser))
}

var cacheInterval time.Duration
if config.AutoScale.CacheStatus {
cacheInterval, err = time.ParseDuration(config.AutoScale.CacheStatusInterval)
Copy link
Owner

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It seems like this config processing and then ultimately cacheInterval should be fed into server.ConnectorConfig

if err != nil {
logrus.WithError(err).Fatal("Unable to parse cache status interval")
}
logrus.WithField("interval", config.AutoScale.CacheStatusInterval).Debug("Using cache status interval")
}

if config.NgrokToken != "" {
connector.UseNgrok(config.NgrokToken)
}
Expand Down Expand Up @@ -240,6 +266,15 @@ func main() {
logrus.WithError(err).Fatal("Unable to start metrics reporter")
}

if config.AutoScale.CacheStatus {
logrus.Info("Starting status cache updater")
connector.StatusCache.StartUpdater(connector, cacheInterval, func() map[string]string {
mappings := server.Routes.GetMappings()
logrus.WithField("mappings", mappings).Debug("Updating status cache with mappings")
return mappings
})
Comment on lines +271 to +275
Copy link
Owner

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

...and that starting of the cache updater should be encapsulated inside of the connector.WaitForConnections method.

As it is, it's leaking a bit too much of runtime behavior into the main method.

}

// wait for process-stop signal
<-c
logrus.Info("Stopping. Waiting for connections to complete...")
Expand Down
2 changes: 2 additions & 0 deletions go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@ require (
)

require (
github.com/Raqbit/mc-pinger v0.2.4 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/containerd/log v0.1.0 // indirect
Expand Down Expand Up @@ -50,6 +51,7 @@ require (
github.com/prometheus/client_model v0.6.1 // indirect
github.com/prometheus/common v0.62.0 // indirect
github.com/prometheus/procfs v0.15.1 // indirect
github.com/sethvargo/go-retry v0.3.0 // indirect
github.com/x448/float16 v0.8.4 // indirect
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.47.0 // indirect
Expand Down
Loading
Loading