From 12b8a3853e82ac1bbd7f62055add2e260e129c55 Mon Sep 17 00:00:00 2001 From: Thomas Weber Date: Tue, 19 Nov 2019 13:17:48 -0500 Subject: [PATCH] #4 forward podman log to nomad logger --- README.md | 6 ++++-- driver.go | 6 +++++- driver_test.go | 52 +++++++++++++++++++++++++++++++++++++++++++++++++- 3 files changed, 60 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index dda83f0..7447d92 100644 --- a/README.md +++ b/README.md @@ -39,12 +39,14 @@ For now you can: * use the jobs driver config to define the image for your container * start/stop containers with default or customer entrypoint and arguments -* use nomad alloc data in the container. It's bind mounted to /nomad -* bind mount custome volumes into the container +* [Nomad runtime environment](https://www.nomadproject.io/docs/runtime/environment.html) is populated +* use nomad alloc data in the container. +* bind mount custom volumes into the container * monitor the memory consuption * monitor CPU usage (might be buggy) * container memory is limited to configured value * task config cpu value is used to populate podman CpuShares +* podman log is forwarded to [Nomad logger](https://www.nomadproject.io/docs/commands/alloc/logs.html) ### Driver Configuration diff --git a/driver.go b/driver.go index 93a5c4e..bacf953 100644 --- a/driver.go +++ b/driver.go @@ -312,6 +312,9 @@ func (d *Driver) StartTask(cfg *drivers.TaskConfig) (*drivers.TaskHandle, *drive containerName := BuildContainerName(cfg) memoryLimit := fmt.Sprintf("%dm", cfg.Resources.NomadResources.Memory.MemoryMB) cpuShares := cfg.Resources.LinuxResources.CPUShares + logOpts := []string{ + fmt.Sprintf("path=%s",cfg.StdoutPath), + } allEnv := cfg.EnvList() @@ -342,6 +345,7 @@ func (d *Driver) StartTask(cfg *drivers.TaskConfig) (*drivers.TaskHandle, *drive Memory: &memoryLimit, MemorySwap: &memoryLimit, CpuShares: &cpuShares, + LogOpt: &logOpts, } containerID, err := iopodman.CreateContainer().Call(d.ctx, varlinkConnection, createOpts) @@ -526,7 +530,7 @@ func (d *Driver) InspectTask(taskID string) (*drivers.TaskStatus, error) { } func (d *Driver) TaskStats(ctx context.Context, taskID string, interval time.Duration) (<-chan *drivers.TaskResourceUsage, error) { - d.logger.Error("TaskStats called") + d.logger.Debug("TaskStats called") handle, ok := d.tasks.Get(taskID) if !ok { return nil, drivers.ErrTaskNotFound diff --git a/driver_test.go b/driver_test.go index f120341..0bf9d0d 100644 --- a/driver_test.go +++ b/driver_test.go @@ -251,7 +251,7 @@ func TestDockerDriver_Start_Wait_AllocDir(t *testing.T) { taskCfg := newTaskConfig("", []string{ "sh", "-c", - fmt.Sprintf(`sleep 1; echo -n %s > $%s/%s`, + fmt.Sprintf(`echo -n %s > $%s/%s; sleep 1`, string(exp), taskenv.AllocDir, file), }) task := &drivers.TaskConfig{ @@ -402,6 +402,56 @@ func TestPodmanDriver_GC_Container_off(t *testing.T) { iopodman.RemoveContainer().Call(ctx, varlinkConnection, containerName, true ,true) } +// Check stdout/stderr logging +func TestPodmanDriver_Stdout(t *testing.T) { + if !tu.IsCI() { + t.Parallel() + } + + check := uuid.Generate() + + taskCfg := newTaskConfig("", []string{ + "sh", + "-c", + "echo "+check, + }) + task := &drivers.TaskConfig{ + ID: uuid.Generate(), + Name: "stdout", + AllocID: uuid.Generate(), + Resources: basicResources, + } + require.NoError(t, task.EncodeConcreteDriverConfig(&taskCfg)) + + d := podmanDriverHarness(t, nil) + cleanup := d.MkAllocDir(task, true) + defer cleanup() + + _, _, err := d.StartTask(task) + require.NoError(t, err) + + defer d.DestroyTask(task.ID, true) + + + logfile := filepath.Join(filepath.Dir(task.StdoutPath),fmt.Sprintf("%s.stdout.0",task.Name)) + t.Logf("LOG PATH %s", logfile) + // Get the stdout of the process and assert that it's not empty + stdout, err := ioutil.ReadFile(logfile) + require.NoError(t, err) + require.Contains(t,string(stdout), check) + + // Attempt to wait + waitCh, err := d.WaitTask(context.Background(), task.ID) + require.NoError(t, err) + + select { + case <-waitCh: + t.Fatalf("wait channel should not have received an exit result") + case <-time.After(time.Duration(tu.TestMultiplier()*1) * time.Second): + } + +} + func newTaskConfig(variant string, command []string) TaskConfig { // busyboxImageID is the ID stored in busybox.tar busyboxImageID := "docker://busybox"