From 257ae900b77378c3dd78824918fab12926483c14 Mon Sep 17 00:00:00 2001 From: James Rasell Date: Thu, 17 Jun 2021 12:46:40 +0200 Subject: [PATCH] chore: use error context to log errors rather than Go err style. --- api/exec_start.go | 4 ++-- driver.go | 34 +++++++++++++++++----------------- handle.go | 8 ++++---- 3 files changed, 23 insertions(+), 23 deletions(-) diff --git a/api/exec_start.go b/api/exec_start.go index 289341d..a1ed055 100644 --- a/api/exec_start.go +++ b/api/exec_start.go @@ -91,7 +91,7 @@ func (c *API) attachHandleResize(ctx context.Context, resizeChannel <-chan drive c.logger.Trace("Resize terminal", "sessionId", sessionId, "height", size.Height, "width", size.Width) rerr := c.ExecResize(ctx, sessionId, size.Height, size.Width) if rerr != nil { - c.logger.Error("Failed to resize TTY", "err", rerr) + c.logger.Error("Failed to resize TTY", "error", rerr) } } } @@ -155,7 +155,7 @@ func (c *API) ExecStart(ctx context.Context, sessionID string, options ExecStart go func() { _, err := io.Copy(socket, options.Stdin) if err != nil { - c.logger.Error("Failed to send stdin to exec session", "err", err) + c.logger.Error("Failed to send stdin to exec session", "error", err) } }() } diff --git a/driver.go b/driver.go index 41308e9..a3d38aa 100644 --- a/driver.go +++ b/driver.go @@ -181,7 +181,7 @@ func (d *Driver) Capabilities() (*drivers.Capabilities, error) { func (d *Driver) Fingerprint(ctx context.Context) (<-chan *drivers.Fingerprint, error) { err := shelpers.Init() if err != nil { - d.logger.Error("Could not init stats helper", "err", err) + d.logger.Error("Could not init stats helper", "error", err) return nil, err } ch := make(chan *drivers.Fingerprint) @@ -217,7 +217,7 @@ func (d *Driver) buildFingerprint() *drivers.Fingerprint { // try to connect and get version info info, err := d.podman.SystemInfo(d.ctx) if err != nil { - d.logger.Error("Could not get podman info", "err", err) + d.logger.Error("Could not get podman info", "error", err) } else { // yay! we can enable the driver health = drivers.HealthStateHealthy @@ -263,7 +263,7 @@ func (d *Driver) RecoverTask(handle *drivers.TaskHandle) error { inspectData, err := d.podman.ContainerInspect(d.ctx, taskState.ContainerID) if err != nil { - d.logger.Warn("Recovery lookup failed", "task", handle.Config.ID, "container", taskState.ContainerID, "err", err) + d.logger.Warn("Recovery lookup failed", "task", handle.Config.ID, "container", taskState.ContainerID, "error", err) return nil } @@ -291,7 +291,7 @@ func (d *Driver) RecoverTask(handle *drivers.TaskHandle) error { if d.config.RecoverStopped { d.logger.Debug("Found a stopped container, try to start it", "container", inspectData.State.Pid) if err = d.podman.ContainerStart(d.ctx, inspectData.ID); err != nil { - d.logger.Warn("Recovery restart failed", "task", handle.Config.ID, "container", taskState.ContainerID, "err", err) + d.logger.Warn("Recovery restart failed", "task", handle.Config.ID, "container", taskState.ContainerID, "error", err) } else { d.logger.Info("Restarted a container during recovery", "container", inspectData.ID) h.procState = drivers.TaskStateRunning @@ -517,7 +517,7 @@ func (d *Driver) StartTask(cfg *drivers.TaskConfig) (*drivers.TaskHandle, *drive inspectData, err := d.podman.ContainerInspect(d.ctx, containerID) if err != nil { - d.logger.Error("failed to inspect container", "err", err) + d.logger.Error("failed to inspect container", "error", err) cleanup() return nil, nil, fmt.Errorf("failed to start task, could not inspect container : %v", err) } @@ -651,7 +651,7 @@ func (d *Driver) createImage(image string, auth *AuthConfig, forcePull bool) (st if err != nil { // If ImageInspectID errors, continue the operation and try // to pull the image instead - d.logger.Warn("Unable to check for local image", "image", imageName, "err", err) + d.logger.Warn("Unable to check for local image", "image", imageName, "error", err) } if !forcePull && imageID != "" { d.logger.Debug("Found imageID", imageID, "for image", imageName, "in local storage") @@ -731,10 +731,10 @@ func (d *Driver) StopTask(taskID string, timeout time.Duration, signal string) e if err == nil { return nil } else if err == api.ContainerNotFound { - d.logger.Debug("Container not found while we wanted to stop it", "task", taskID, "container", handle.containerID, "err", err) + d.logger.Debug("Container not found while we wanted to stop it", "task", taskID, "container", handle.containerID, "error", err) return nil } else { - d.logger.Error("Could not stop/kill container", "containerID", handle.containerID, "err", err) + d.logger.Error("Could not stop/kill container", "containerID", handle.containerID, "error", err) return err } } @@ -839,17 +839,17 @@ func (d *Driver) ExecTask(taskID string, cmd []string, timeout time.Duration) (* defer cancel() sessionId, err := d.podman.ExecCreate(ctx, handle.containerID, createRequest) if err != nil { - d.logger.Error("Unable to create ExecTask session", "err", err) + d.logger.Error("Unable to create ExecTask session", "error", err) return nil, err } stdout, err := circbuf.NewBuffer(int64(drivers.CheckBufSize)) if err != nil { - d.logger.Error("ExecTask session failed, unable to allocate stdout buffer", "sessionId", sessionId, "err", err) + d.logger.Error("ExecTask session failed, unable to allocate stdout buffer", "sessionId", sessionId, "error", err) return nil, err } stderr, err := circbuf.NewBuffer(int64(drivers.CheckBufSize)) if err != nil { - d.logger.Error("ExecTask session failed, unable to allocate stderr buffer", "sessionId", sessionId, "err", err) + d.logger.Error("ExecTask session failed, unable to allocate stderr buffer", "sessionId", sessionId, "error", err) return nil, err } startRequest := api.ExecStartRequest{ @@ -862,13 +862,13 @@ func (d *Driver) ExecTask(taskID string, cmd []string, timeout time.Duration) (* } err = d.podman.ExecStart(ctx, sessionId, startRequest) if err != nil { - d.logger.Error("ExecTask session returned with error", "sessionId", sessionId, "err", err) + d.logger.Error("ExecTask session returned with error", "sessionId", sessionId, "error", err) return nil, err } inspectData, err := d.podman.ExecInspect(ctx, sessionId) if err != nil { - d.logger.Error("Unable to inspect finished ExecTask session", "sessionId", sessionId, "err", err) + d.logger.Error("Unable to inspect finished ExecTask session", "sessionId", sessionId, "error", err) return nil, err } execResult := &drivers.ExecTaskResult{ @@ -878,7 +878,7 @@ func (d *Driver) ExecTask(taskID string, cmd []string, timeout time.Duration) (* Stdout: stdout.Bytes(), Stderr: stderr.Bytes(), } - d.logger.Trace("ExecTask result", "code", execResult.ExitResult.ExitCode, "out", string(execResult.Stdout), "err", string(execResult.Stderr)) + d.logger.Trace("ExecTask result", "code", execResult.ExitResult.ExitCode, "out", string(execResult.Stdout), "error", string(execResult.Stderr)) return execResult, nil } @@ -901,7 +901,7 @@ func (d *Driver) ExecTaskStreaming(ctx context.Context, taskID string, execOptio sessionId, err := d.podman.ExecCreate(ctx, handle.containerID, createRequest) if err != nil { - d.logger.Error("Unable to create exec session", "err", err) + d.logger.Error("Unable to create exec session", "error", err) return nil, err } @@ -917,13 +917,13 @@ func (d *Driver) ExecTaskStreaming(ctx context.Context, taskID string, execOptio } err = d.podman.ExecStart(ctx, sessionId, startRequest) if err != nil { - d.logger.Error("Exec session returned with error", "sessionId", sessionId, "err", err) + d.logger.Error("Exec session returned with error", "sessionId", sessionId, "error", err) return nil, err } inspectData, err := d.podman.ExecInspect(ctx, sessionId) if err != nil { - d.logger.Error("Unable to inspect finished exec session", "sessionId", sessionId, "err", err) + d.logger.Error("Unable to inspect finished exec session", "sessionId", sessionId, "error", err) return nil, err } exitResult := drivers.ExitResult{ diff --git a/handle.go b/handle.go index b793005..cb62384 100644 --- a/handle.go +++ b/handle.go @@ -169,20 +169,20 @@ func (h *TaskHandle) runContainerMonitor() { gone = true } if gone { - h.logger.Debug("Container is not running anymore", "container", h.containerID, "err", statsErr) + h.logger.Debug("Container is not running anymore", "container", h.containerID, "error", statsErr) // container was stopped, get exit code and other post mortem infos inspectData, err := h.driver.podman.ContainerInspect(h.driver.ctx, h.containerID) h.stateLock.Lock() h.completedAt = time.Now() if err != nil { h.exitResult.Err = fmt.Errorf("Driver was unable to get the exit code. %s: %v", h.containerID, err) - h.logger.Error("Failed to inspect stopped container, can not get exit code", "container", h.containerID, "err", err) + h.logger.Error("Failed to inspect stopped container, can not get exit code", "container", h.containerID, "error", err) h.exitResult.Signal = 0 } else { h.exitResult.ExitCode = int(inspectData.State.ExitCode) if len(inspectData.State.Error) > 0 { h.exitResult.Err = fmt.Errorf(inspectData.State.Error) - h.logger.Error("Container error", "container", h.containerID, "err", h.exitResult.Err) + h.logger.Error("Container error", "container", h.containerID, "error", h.exitResult.Err) } h.completedAt = inspectData.State.FinishedAt if inspectData.State.OOMKilled { @@ -198,7 +198,7 @@ func (h *TaskHandle) runContainerMonitor() { } // continue and wait for next cycle, it should eventually // fall into the "TaskStateExited" case - h.logger.Debug("Could not get container stats, unknown error", "err", fmt.Sprintf("%#v", statsErr)) + h.logger.Debug("Could not get container stats, unknown error", "error", fmt.Sprintf("%#v", statsErr)) continue }