Skip to content

Commit

Permalink
improve logs, fix banner formatting (#14456)
Browse files Browse the repository at this point in the history
  • Loading branch information
harshavardhana committed Mar 3, 2022
1 parent b48f719 commit 0e3bafc
Show file tree
Hide file tree
Showing 22 changed files with 73 additions and 100 deletions.
24 changes: 14 additions & 10 deletions cmd/background-newdisks-heal-ops.go
Expand Up @@ -263,7 +263,7 @@ func initAutoHeal(ctx context.Context, objAPI ObjectLayer) {
globalBackgroundHealState.pushHealLocalDisks(getLocalDisksToHeal()...)

if drivesToHeal := globalBackgroundHealState.healDriveCount(); drivesToHeal > 0 {
logger.Info(fmt.Sprintf("Found drives to heal %d, waiting until %s to heal the content...",
logger.Info(fmt.Sprintf("Found drives to heal %d, waiting until %s to heal the content - use 'mc admin heal alias/ --verbose' to check the status",
drivesToHeal, defaultMonitorNewDiskInterval))

// Heal any disk format and metadata early, if possible.
Expand Down Expand Up @@ -333,7 +333,7 @@ func monitorLocalDisksAndHeal(ctx context.Context, z *erasureServerPools, bgSeq
// Ensure that reformatting disks is finished
bgSeq.queueHealTask(healSource{bucket: nopHeal}, madmin.HealItemMetadata)

logger.Info(fmt.Sprintf("Found drives to heal %d, proceeding to heal content...",
logger.Info(fmt.Sprintf("Found drives to heal %d, proceeding to heal - 'mc admin heal alias/ --verbose' to check the status.",
len(healDisks)))

erasureSetInPoolDisksToHeal = make([]map[int][]StorageAPI, len(z.serverPools))
Expand Down Expand Up @@ -404,13 +404,15 @@ func monitorLocalDisksAndHeal(ctx context.Context, z *erasureServerPools, bgSeq
go func(setIndex int, disks []StorageAPI) {
defer wg.Done()
for _, disk := range disks {
logger.Info("Healing disk '%v' on %s pool", disk, humanize.Ordinal(i+1))
if serverDebugLog {
logger.Info("Healing disk '%v' on %s pool", disk, humanize.Ordinal(i+1))
}

// So someone changed the drives underneath, healing tracker missing.
tracker, err := loadHealingTracker(ctx, disk)
if err != nil {
logger.Info("Healing tracker missing on '%s', disk was swapped again on %s pool",
disk, humanize.Ordinal(i+1))
logger.LogIf(ctx, fmt.Errorf("Healing tracker missing on '%s', disk was swapped again on %s pool: %w",
disk, humanize.Ordinal(i+1), err))
tracker = newHealingTracker(disk)
}

Expand Down Expand Up @@ -438,12 +440,14 @@ func monitorLocalDisksAndHeal(ctx context.Context, z *erasureServerPools, bgSeq
continue
}

logger.Info("Healing disk '%s' on %s pool, %s set complete", disk,
humanize.Ordinal(i+1), humanize.Ordinal(setIndex+1))
logger.Info("Summary:\n")
tracker.printTo(os.Stdout)
if serverDebugLog {
logger.Info("Healing disk '%s' on %s pool, %s set complete", disk,
humanize.Ordinal(i+1), humanize.Ordinal(setIndex+1))
logger.Info("Summary:\n")
tracker.printTo(os.Stdout)
logger.Info("\n")
}
logger.LogIf(ctx, tracker.delete(ctx))
logger.Info("\n")

// Only upon success pop the healed disk.
globalBackgroundHealState.popHealLocalDisks(disk.Endpoint())
Expand Down
2 changes: 1 addition & 1 deletion cmd/bootstrap-peer-server.go
Expand Up @@ -206,7 +206,7 @@ func verifyServerSystemConfig(ctx context.Context, endpointServerPools EndpointS
for _, clnt := range clnts {
if err := clnt.Verify(ctx, srcCfg); err != nil {
if !isNetworkError(err) {
logger.Info(fmt.Errorf("%s has incorrect configuration: %w", clnt.String(), err).Error())
logger.LogIf(ctx, fmt.Errorf("%s has incorrect configuration: %w", clnt.String(), err))
}
offlineEndpoints = append(offlineEndpoints, clnt.String())
continue
Expand Down
11 changes: 2 additions & 9 deletions cmd/common-main.go
Expand Up @@ -327,7 +327,7 @@ func checkUpdate(mode string) {
return
}

logStartupMessage(prepareUpdateMessage("\nRun `mc admin update`", lrTime.Sub(crTime)))
logger.Info(prepareUpdateMessage("Run `mc admin update`", lrTime.Sub(crTime)))
}

func newConfigDirFromCtx(ctx *cli.Context, option string, getDefaultDir func() string) (*ConfigDir, bool) {
Expand Down Expand Up @@ -762,7 +762,7 @@ func handleCommonEnvVars() {
" Please use %s and %s",
config.EnvAccessKey, config.EnvSecretKey,
config.EnvRootUser, config.EnvRootPassword)
logStartupMessage(color.RedBold(msg))
logger.Info(color.RedBold(msg))
}
globalActiveCred = cred
}
Expand Down Expand Up @@ -827,13 +827,6 @@ func handleCommonEnvVars() {
}
}

func logStartupMessage(msg string) {
if globalConsoleSys != nil {
globalConsoleSys.Send(msg, string(logger.All))
}
logger.StartupMessage(msg)
}

func getTLSConfig() (x509Certs []*x509.Certificate, manager *certs.Manager, secureConn bool, err error) {
if !(isFile(getPublicCertFile()) && isFile(getPrivateKeyFile())) {
return nil, nil, false, nil
Expand Down
2 changes: 1 addition & 1 deletion cmd/config-current.go
Expand Up @@ -529,7 +529,7 @@ func lookupConfigs(s config.Config, objAPI ObjectLayer) {
}

if globalSTSTLSConfig.InsecureSkipVerify {
logger.Info("CRITICAL: enabling %s is not recommended in a production environment", xtls.EnvIdentityTLSSkipVerify)
logger.LogIf(ctx, fmt.Errorf("CRITICAL: enabling %s is not recommended in a production environment", xtls.EnvIdentityTLSSkipVerify))
}

globalOpenIDConfig, err = openid.LookupConfig(s[config.IdentityOpenIDSubSys][config.Default],
Expand Down
4 changes: 2 additions & 2 deletions cmd/config-encrypted.go
Expand Up @@ -74,7 +74,7 @@ func migrateIAMConfigsEtcdToEncrypted(ctx context.Context, client *etcd.Client)
if err != nil {
return err
}
logger.Info("Attempting to re-encrypt IAM users and policies on etcd with %q (%s)", stat.DefaultKey, stat.Name)
logger.Info(fmt.Sprintf("Attempting to re-encrypt IAM users and policies on etcd with %q (%s)", stat.DefaultKey, stat.Name))
}

listCtx, cancel := context.WithTimeout(ctx, 1*time.Minute)
Expand Down Expand Up @@ -143,7 +143,7 @@ func migrateConfigPrefixToEncrypted(objAPI ObjectLayer, encrypted bool) error {
if err != nil {
return err
}
logger.Info("Attempting to re-encrypt config, IAM users and policies on MinIO with %q (%s)", stat.DefaultKey, stat.Name)
logger.Info(fmt.Sprintf("Attempting to re-encrypt config, IAM users and policies on MinIO with %q (%s)", stat.DefaultKey, stat.Name))
}

var marker string
Expand Down
8 changes: 5 additions & 3 deletions cmd/disk-cache-backend.go
Expand Up @@ -24,6 +24,7 @@ import (
"crypto/rand"
"encoding/base64"
"encoding/hex"
"errors"
"fmt"
"io"
"io/ioutil"
Expand Down Expand Up @@ -252,16 +253,17 @@ func (c *diskCache) diskUsageLow() bool {
// Returns if the disk usage reaches or exceeds configured cache quota when size is added.
// If current usage without size exceeds high watermark a GC is automatically queued.
func (c *diskCache) diskSpaceAvailable(size int64) bool {
reqInfo := (&logger.ReqInfo{}).AppendTags("cachePath", c.dir)
ctx := logger.SetReqInfo(GlobalContext, reqInfo)

gcTriggerPct := c.quotaPct * c.highWatermark / 100
di, err := disk.GetInfo(c.dir)
if err != nil {
reqInfo := (&logger.ReqInfo{}).AppendTags("cachePath", c.dir)
ctx := logger.SetReqInfo(GlobalContext, reqInfo)
logger.LogIf(ctx, err)
return false
}
if di.Total == 0 {
logger.Info("diskCache: Received 0 total disk size")
logger.LogIf(ctx, errors.New("diskCache: Received 0 total disk size"))
return false
}
usedPercent := float64(di.Used) * 100 / float64(di.Total)
Expand Down
4 changes: 2 additions & 2 deletions cmd/disk-cache.go
Expand Up @@ -614,7 +614,7 @@ func newCache(config cache.Config) ([]*diskCache, bool, error) {
}

func (c *cacheObjects) migrateCacheFromV1toV2(ctx context.Context) {
logStartupMessage(color.Blue("Cache migration initiated ...."))
logger.Info(color.Blue("Cache migration initiated ...."))

g := errgroup.WithNErrs(len(c.cache))
for index, dc := range c.cache {
Expand Down Expand Up @@ -643,7 +643,7 @@ func (c *cacheObjects) migrateCacheFromV1toV2(ctx context.Context) {

// update migration status
c.migrating = false
logStartupMessage(color.Blue("Cache migration completed successfully."))
logger.Info(color.Blue("Cache migration completed successfully."))
}

// PutObject - caches the uploaded object for single Put operations
Expand Down
2 changes: 0 additions & 2 deletions cmd/erasure-object.go
Expand Up @@ -343,10 +343,8 @@ func (er erasureObjects) getObjectWithFileInfo(ctx context.Context, bucket, obje
switch {
case errors.Is(err, errFileNotFound):
scan = madmin.HealNormalScan
logger.Info("Healing required, triggering async heal missing shards for %s", pathJoin(bucket, object, fi.VersionID))
case errors.Is(err, errFileCorrupt):
scan = madmin.HealDeepScan
logger.Info("Healing required, triggering async heal bitrot for %s", pathJoin(bucket, object, fi.VersionID))
}
switch scan {
case madmin.HealNormalScan, madmin.HealDeepScan:
Expand Down
6 changes: 1 addition & 5 deletions cmd/erasure-sets.go
Expand Up @@ -221,15 +221,13 @@ func (s *erasureSets) connectDisks() {
if err != nil {
if endpoint.IsLocal && errors.Is(err, errUnformattedDisk) {
globalBackgroundHealState.pushHealLocalDisks(endpoint)
logger.Info(fmt.Sprintf("Found unformatted drive %s, attempting to heal...", endpoint))
} else {
printEndpointError(endpoint, err, true)
}
return
}
if disk.IsLocal() && disk.Healing() != nil {
globalBackgroundHealState.pushHealLocalDisks(disk.Endpoint())
logger.Info(fmt.Sprintf("Found the drive %s that needs healing, attempting to heal...", disk))
}
s.erasureDisksMu.RLock()
setIndex, diskIndex, err := findDiskIndex(s.format, format)
Expand Down Expand Up @@ -1256,9 +1254,7 @@ func markRootDisksAsDown(storageDisks []StorageAPI, errs []error) {
if storageDisks[i] != nil && infos[i].RootDisk {
// We should not heal on root disk. i.e in a situation where the minio-administrator has unmounted a
// defective drive we should not heal a path on the root disk.
logger.Info("Disk `%s` the same as the system root disk.\n"+
"Disk will not be used. Please supply a separate disk and restart the server.",
storageDisks[i].String())
logger.LogIf(GlobalContext, fmt.Errorf("Disk `%s` is part of root disk, will not be used", storageDisks[i]))
storageDisks[i] = nil
}
}
Expand Down
3 changes: 1 addition & 2 deletions cmd/erasure.go
Expand Up @@ -29,7 +29,6 @@ import (

"github.com/minio/madmin-go"
"github.com/minio/minio/internal/bpool"
"github.com/minio/minio/internal/color"
"github.com/minio/minio/internal/dsync"
"github.com/minio/minio/internal/logger"
"github.com/minio/minio/internal/sync/errgroup"
Expand Down Expand Up @@ -353,7 +352,7 @@ func (er erasureObjects) nsScanner(ctx context.Context, buckets []BucketInfo, bf
// Collect disks we can use.
disks, healing := er.getOnlineDisksWithHealing()
if len(disks) == 0 {
logger.Info(color.Green("data-scanner:") + " all disks are offline or being healed, skipping scanner")
logger.LogIf(ctx, errors.New("data-scanner: all disks are offline or being healed, skipping scanner cycle"))
return nil
}

Expand Down
2 changes: 1 addition & 1 deletion cmd/gateway-main.go
Expand Up @@ -377,7 +377,7 @@ func StartGateway(ctx *cli.Context, gw Gateway) {
}

// TODO: remove the following line by June 1st.
logStartupMessage(
logger.Info(
color.RedBold(`
===================================================================================
**** WARNING: MinIO Gateway will be removed by June 1st from MinIO repository *****
Expand Down
15 changes: 8 additions & 7 deletions cmd/gateway-startup-msg.go
Expand Up @@ -22,6 +22,7 @@ import (
"strings"

"github.com/minio/minio/internal/color"
"github.com/minio/minio/internal/logger"
)

// Prints the formatted startup message.
Expand All @@ -45,7 +46,7 @@ func printGatewayStartupMessage(apiEndPoints []string, backendType string) {
if globalMinioConsolePortAuto && globalBrowserEnabled {
msg := fmt.Sprintf("\nWARNING: Console endpoint is listening on a dynamic port (%s), please use --console-address \":PORT\" to choose a static port.",
globalMinioConsolePort)
logStartupMessage(color.RedBold(msg))
logger.Info(color.RedBold(msg))
}
}

Expand All @@ -57,19 +58,19 @@ func printGatewayCommonMsg(apiEndpoints []string) {
apiEndpointStr := strings.Join(apiEndpoints, " ")

// Colorize the message and print.
logStartupMessage(color.Blue("API: ") + color.Bold(fmt.Sprintf("%s ", apiEndpointStr)))
logger.Info(color.Blue("API: ") + color.Bold(fmt.Sprintf("%s ", apiEndpointStr)))
if color.IsTerminal() && !globalCLIContext.Anonymous {
logStartupMessage(color.Blue("RootUser: ") + color.Bold(fmt.Sprintf("%s ", cred.AccessKey)))
logStartupMessage(color.Blue("RootPass: ") + color.Bold(fmt.Sprintf("%s ", cred.SecretKey)))
logger.Info(color.Blue("RootUser: ") + color.Bold(fmt.Sprintf("%s ", cred.AccessKey)))
logger.Info(color.Blue("RootPass: ") + color.Bold(fmt.Sprintf("%s ", cred.SecretKey)))
}
printEventNotifiers()

if globalBrowserEnabled {
consoleEndpointStr := strings.Join(stripStandardPorts(getConsoleEndpoints(), globalMinioConsoleHost), " ")
logStartupMessage(color.Blue("\nConsole: ") + color.Bold(fmt.Sprintf("%s ", consoleEndpointStr)))
logger.Info(color.Blue("\nConsole: ") + color.Bold(fmt.Sprintf("%s ", consoleEndpointStr)))
if color.IsTerminal() && !globalCLIContext.Anonymous {
logStartupMessage(color.Blue("RootUser: ") + color.Bold(fmt.Sprintf("%s ", cred.AccessKey)))
logStartupMessage(color.Blue("RootPass: ") + color.Bold(fmt.Sprintf("%s ", cred.SecretKey)))
logger.Info(color.Blue("RootUser: ") + color.Bold(fmt.Sprintf("%s ", cred.AccessKey)))
logger.Info(color.Blue("RootPass: ") + color.Bold(fmt.Sprintf("%s ", cred.SecretKey)))
}
}
}
2 changes: 0 additions & 2 deletions cmd/global-heal.go
Expand Up @@ -330,8 +330,6 @@ func (er *erasureObjects) healErasureSet(ctx context.Context, buckets []string,
default:
tracker.bucketDone(bucket)
logger.LogIf(ctx, tracker.update(ctx))
logger.Info("Healing bucket %s content on %s erasure set complete",
bucket, humanize.Ordinal(tracker.SetIndex+1))
}
}
tracker.Object = ""
Expand Down
2 changes: 1 addition & 1 deletion cmd/iam.go
Expand Up @@ -349,7 +349,7 @@ func (sys *IAMSys) printIAMRoles() {
msgs = append(msgs, color.Bold(arn))
}

logStartupMessage(fmt.Sprintf("%s %s", color.Blue("IAM Roles:"), strings.Join(msgs, " ")))
logger.Info(fmt.Sprintf("%s %s", color.Blue("IAM Roles:"), strings.Join(msgs, " ")))
}

// HasWatcher - returns if the IAM system has a watcher to be notified of
Expand Down
4 changes: 2 additions & 2 deletions cmd/main.go
Expand Up @@ -44,15 +44,15 @@ var GlobalFlags = []cli.Flag{
},
cli.BoolFlag{
Name: "quiet",
Usage: "disable startup information",
Usage: "disable startup and info messages",
},
cli.BoolFlag{
Name: "anonymous",
Usage: "hide sensitive information from logging",
},
cli.BoolFlag{
Name: "json",
Usage: "output server logs and startup information in json format",
Usage: "output logs in JSON format",
},
// Deprecated flag, so its hidden now, existing deployments will keep working.
cli.BoolFlag{
Expand Down
6 changes: 3 additions & 3 deletions cmd/server-main.go
Expand Up @@ -507,11 +507,11 @@ func serverMain(ctx *cli.Context) {
if globalActiveCred.Equal(auth.DefaultCredentials) {
msg := fmt.Sprintf("WARNING: Detected default credentials '%s', we recommend that you change these values with 'MINIO_ROOT_USER' and 'MINIO_ROOT_PASSWORD' environment variables",
globalActiveCred)
logStartupMessage(color.RedBold(msg))
logger.Info(color.RedBold(msg))
}

if !globalCLIContext.StrictS3Compat {
logStartupMessage(color.RedBold("WARNING: Strict AWS S3 compatible incoming PUT, POST content payload validation is turned off, caution is advised do not use in production"))
logger.Info(color.RedBold("WARNING: Strict AWS S3 compatible incoming PUT, POST content payload validation is turned off, caution is advised do not use in production"))
}

if err = initServer(GlobalContext, newObject); err != nil {
Expand Down Expand Up @@ -595,7 +595,7 @@ func serverMain(ctx *cli.Context) {

// initialize the new disk cache objects.
if globalCacheConfig.Enabled {
logStartupMessage(color.Yellow("WARNING: Disk caching is deprecated for single/multi drive MinIO setups. Please migrate to using MinIO S3 gateway instead of disk caching"))
logger.Info(color.Yellow("WARNING: Disk caching is deprecated for single/multi drive MinIO setups. Please migrate to using MinIO S3 gateway instead of disk caching"))
var cacheAPI CacheObjectLayer
cacheAPI, err = newServerCacheObjects(GlobalContext, globalCacheConfig)
logger.FatalIf(err, "Unable to initialize disk caching")
Expand Down
3 changes: 2 additions & 1 deletion cmd/server-rlimit.go
Expand Up @@ -44,7 +44,8 @@ func setMaxResources() (err error) {
}

if maxLimit < 4096 && runtime.GOOS != globalWindowsOSName {
logger.Info("WARNING: maximum file descriptor limit %d is too low for production servers. At least 4096 is recommended. Fix with \"ulimit -n 4096\"", maxLimit)
logger.Info("WARNING: maximum file descriptor limit %d is too low for production servers. At least 4096 is recommended. Fix with \"ulimit -n 4096\"",
maxLimit)
}

if err = sys.SetMaxOpenFileLimit(maxLimit, maxLimit); err != nil {
Expand Down

0 comments on commit 0e3bafc

Please sign in to comment.