Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

auto update containers in systemd units #5480

Merged
merged 2 commits into from Mar 18, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
56 changes: 56 additions & 0 deletions cmd/podman/autoupdate.go
@@ -0,0 +1,56 @@
package main

import (
"fmt"

"github.com/containers/libpod/cmd/podman/cliconfig"
"github.com/containers/libpod/pkg/adapter"
"github.com/pkg/errors"
"github.com/spf13/cobra"
)

var (
autoUpdateCommand cliconfig.AutoUpdateValues
autoUpdateDescription = `Auto update containers according to their auto-update policy.

Auto-update policies are specified with the "io.containers.autoupdate" label.`
_autoUpdateCommand = &cobra.Command{
Use: "auto-update [flags]",
Short: "Auto update containers according to their auto-update policy",
Args: noSubArgs,
Long: autoUpdateDescription,
RunE: func(cmd *cobra.Command, args []string) error {
restartCommand.InputArgs = args
restartCommand.GlobalFlags = MainGlobalOpts
return autoUpdateCmd(&restartCommand)
},
Example: `podman auto-update`,
}
)

func init() {
autoUpdateCommand.Command = _autoUpdateCommand
autoUpdateCommand.SetHelpTemplate(HelpTemplate())
autoUpdateCommand.SetUsageTemplate(UsageTemplate())
}

func autoUpdateCmd(c *cliconfig.RestartValues) error {
runtime, err := adapter.GetRuntime(getContext(), &c.PodmanCommand)
if err != nil {
return errors.Wrapf(err, "error creating libpod runtime")
}
defer runtime.DeferredShutdown(false)

units, failures := runtime.AutoUpdate()
for _, unit := range units {
fmt.Println(unit)
}
var finalErr error
if len(failures) > 0 {
finalErr = failures[0]
for _, e := range failures[1:] {
finalErr = errors.Errorf("%v\n%v", finalErr, e)
}
}
return finalErr
}
13 changes: 9 additions & 4 deletions cmd/podman/cliconfig/config.go
Expand Up @@ -54,6 +54,10 @@ type AttachValues struct {
SigProxy bool
}

type AutoUpdateValues struct {
PodmanCommand
}

type ImagesValues struct {
PodmanCommand
All bool
Expand Down Expand Up @@ -470,10 +474,11 @@ type RefreshValues struct {

type RestartValues struct {
PodmanCommand
All bool
Latest bool
Running bool
Timeout uint
All bool
AutoUpdate bool
Latest bool
Running bool
Timeout uint
}

type RestoreValues struct {
Expand Down
1 change: 1 addition & 0 deletions cmd/podman/commands.go
Expand Up @@ -11,6 +11,7 @@ const remoteclient = false
// Commands that the local client implements
func getMainCommands() []*cobra.Command {
rootCommands := []*cobra.Command{
_autoUpdateCommand,
_cpCommand,
_playCommand,
_loginCommand,
Expand Down
59 changes: 45 additions & 14 deletions cmd/podman/shared/create.go
Expand Up @@ -18,13 +18,15 @@ import (
"github.com/containers/libpod/libpod"
"github.com/containers/libpod/libpod/image"
ann "github.com/containers/libpod/pkg/annotations"
"github.com/containers/libpod/pkg/autoupdate"
envLib "github.com/containers/libpod/pkg/env"
"github.com/containers/libpod/pkg/errorhandling"
"github.com/containers/libpod/pkg/inspect"
ns "github.com/containers/libpod/pkg/namespaces"
"github.com/containers/libpod/pkg/rootless"
"github.com/containers/libpod/pkg/seccomp"
cc "github.com/containers/libpod/pkg/spec"
systemdGen "github.com/containers/libpod/pkg/systemd/generate"
"github.com/containers/libpod/pkg/util"
"github.com/docker/go-connections/nat"
"github.com/docker/go-units"
Expand Down Expand Up @@ -69,6 +71,7 @@ func CreateContainer(ctx context.Context, c *GenericCLIResults, runtime *libpod.
}

imageName := ""
rawImageName := ""
var imageData *inspect.ImageData = nil

// Set the storage if there is no rootfs specified
Expand All @@ -78,9 +81,8 @@ func CreateContainer(ctx context.Context, c *GenericCLIResults, runtime *libpod.
writer = os.Stderr
}

name := ""
if len(c.InputArgs) != 0 {
name = c.InputArgs[0]
rawImageName = c.InputArgs[0]
} else {
return nil, nil, errors.Errorf("error, image name not provided")
}
Expand All @@ -97,7 +99,7 @@ func CreateContainer(ctx context.Context, c *GenericCLIResults, runtime *libpod.
ArchitectureChoice: overrideArch,
}

newImage, err := runtime.ImageRuntime().New(ctx, name, rtc.SignaturePolicyPath, c.String("authfile"), writer, &dockerRegistryOptions, image.SigningOptions{}, nil, pullType)
newImage, err := runtime.ImageRuntime().New(ctx, rawImageName, rtc.SignaturePolicyPath, c.String("authfile"), writer, &dockerRegistryOptions, image.SigningOptions{}, nil, pullType)
if err != nil {
return nil, nil, err
}
Expand Down Expand Up @@ -174,11 +176,32 @@ func CreateContainer(ctx context.Context, c *GenericCLIResults, runtime *libpod.
}
}

createConfig, err := ParseCreateOpts(ctx, c, runtime, imageName, imageData)
createConfig, err := ParseCreateOpts(ctx, c, runtime, imageName, rawImageName, imageData)
if err != nil {
return nil, nil, err
}

// (VR): Ideally we perform the checks _before_ pulling the image but that
// would require some bigger code refactoring of `ParseCreateOpts` and the
// logic here. But as the creation code will be consolidated in the future
// and given auto updates are experimental, we can live with that for now.
// In the end, the user may only need to correct the policy or the raw image
// name.
autoUpdatePolicy, autoUpdatePolicySpecified := createConfig.Labels[autoupdate.Label]
if autoUpdatePolicySpecified {
if _, err := autoupdate.LookupPolicy(autoUpdatePolicy); err != nil {
return nil, nil, err
}
// Now we need to make sure we're having a fully-qualified image reference.
if rootfs != "" {
return nil, nil, errors.Errorf("auto updates do not work with --rootfs")
}
// Make sure the input image is a docker.
if err := autoupdate.ValidateImageReference(rawImageName); err != nil {
return nil, nil, err
}
}

// Because parseCreateOpts does derive anything from the image, we add health check
// at this point. The rest is done by WithOptions.
createConfig.HealthCheck = healthCheck
Expand Down Expand Up @@ -270,7 +293,7 @@ func configurePod(c *GenericCLIResults, runtime *libpod.Runtime, namespaces map[

// Parses CLI options related to container creation into a config which can be
// parsed into an OCI runtime spec
func ParseCreateOpts(ctx context.Context, c *GenericCLIResults, runtime *libpod.Runtime, imageName string, data *inspect.ImageData) (*cc.CreateConfig, error) {
func ParseCreateOpts(ctx context.Context, c *GenericCLIResults, runtime *libpod.Runtime, imageName string, rawImageName string, data *inspect.ImageData) (*cc.CreateConfig, error) {
var (
inputCommand, command []string
memoryLimit, memoryReservation, memorySwap, memoryKernel int64
Expand Down Expand Up @@ -481,12 +504,15 @@ func ParseCreateOpts(ctx context.Context, c *GenericCLIResults, runtime *libpod.
"container": "podman",
}

// First transform the os env into a map. We need it for the labels later in
// any case.
osEnv, err := envLib.ParseSlice(os.Environ())
if err != nil {
return nil, errors.Wrap(err, "error parsing host environment variables")
}

// Start with env-host
if c.Bool("env-host") {
osEnv, err := envLib.ParseSlice(os.Environ())
if err != nil {
return nil, errors.Wrap(err, "error parsing host environment variables")
}
env = envLib.Join(env, osEnv)
}

Expand Down Expand Up @@ -534,6 +560,10 @@ func ParseCreateOpts(ctx context.Context, c *GenericCLIResults, runtime *libpod.
}
}

if systemdUnit, exists := osEnv[systemdGen.EnvVariable]; exists {
labels[systemdGen.EnvVariable] = systemdUnit
}

// ANNOTATIONS
annotations := make(map[string]string)

Expand Down Expand Up @@ -764,11 +794,12 @@ func ParseCreateOpts(ctx context.Context, c *GenericCLIResults, runtime *libpod.
Entrypoint: entrypoint,
Env: env,
// ExposedPorts: ports,
Init: c.Bool("init"),
InitPath: c.String("init-path"),
Image: imageName,
ImageID: imageID,
Interactive: c.Bool("interactive"),
Init: c.Bool("init"),
InitPath: c.String("init-path"),
Image: imageName,
RawImageName: rawImageName,
ImageID: imageID,
Interactive: c.Bool("interactive"),
// IP6Address: c.String("ipv6"), // Not implemented yet - needs CNI support for static v6
Labels: labels,
// LinkLocalIP: c.StringSlice("link-local-ip"), // Not implemented yet
Expand Down
1 change: 1 addition & 0 deletions completions/bash/podman
Expand Up @@ -3334,6 +3334,7 @@ _podman_podman() {
"
commands="
attach
auto-update
build
commit
container
Expand Down
11 changes: 11 additions & 0 deletions contrib/systemd/auto-update/podman-auto-update.service
@@ -0,0 +1,11 @@
[Unit]
Description=Podman auto-update service
Documentation=man:podman-auto-update(1)
Wants=network.target
After=network-online.target

[Service]
ExecStart=/usr/bin/podman auto-update

[Install]
WantedBy=multi-user.target default.target
9 changes: 9 additions & 0 deletions contrib/systemd/auto-update/podman-auto-update.timer
@@ -0,0 +1,9 @@
[Unit]
Description=Podman auto-update timer

[Timer]
OnCalendar=daily
Persistent=true

[Install]
WantedBy=timers.target
46 changes: 46 additions & 0 deletions docs/source/markdown/podman-auto-update.1.md
@@ -0,0 +1,46 @@
% podman-auto-update(1)

## NAME
podman-auto-update - Auto update containers according to their auto-update policy

## SYNOPSIS
**podman auto-update**

## DESCRIPTION
`podman auto-update` looks up containers with a specified "io.containers.autoupdate" label (i.e., the auto-update policy).

If the label is present and set to "image", Podman reaches out to the corresponding registry to check if the image has been updated.
An image is considered updated if the digest in the local storage is different than the one of the remote image.
If an image must be updated, Podman pulls it down and restarts the systemd unit executing the container.

At container-creation time, Podman looks up the "PODMAN_SYSTEMD_UNIT" environment variables and stores it verbatim in the container's label.
This variable is now set by all systemd units generated by `podman-generate-systemd` and is set to `%n` (i.e., the name of systemd unit starting the container).
This data is then being used in the auto-update sequence to instruct systemd (via DBUS) to restart the unit and hence to restart the container.

Note that `podman auto-update` relies on systemd and requires a fully-qualified image reference (e.g., quay.io/podman/stable:latest) to be used to create the container.
This enforcement is necessary to know which image to actually check and pull.
If an image ID was used, Podman would not know which image to check/pull anymore.

## EXAMPLES

```
# Start a container
$ podman run -d busybox:latest top
bc219740a210455fa27deacc96d50a9e20516492f1417507c13ce1533dbdcd9d

# Generate a systemd unit for this container
$ podman generate systemd --new --files bc219740a210455fa27deacc96d50a9e20516492f1417507c13ce1533dbdcd9d
/home/user/containers/libpod/container-bc219740a210455fa27deacc96d50a9e20516492f1417507c13ce1533dbdcd9d.service

# Load the new systemd unit and start it
$ mv ./container-bc219740a210455fa27deacc96d50a9e20516492f1417507c13ce1533dbdcd9d.service ~/.config/systemd/user
$ systemctl --user daemon-reload
$ systemctl --user start container-bc219740a210455fa27deacc96d50a9e20516492f1417507c13ce1533dbdcd9d.service
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This doesn't actually work as described, but it's unrelated to your PR.

First: on my test setup (f31, root), I have to restorecon /etc/systemd/system/xxx.service, otherwise systemctl says "no such unit".

Second, if the initial source container is started with --name foo, systemctl start will constantly fail until you podman rm -f the original source container:

Mar 18 08:22:40 ci-vm-10-0-137-242.hosted.upshift.rdu2.redhat.com podman[68087]: Error: error creating container storage: the container name "mytest" is already in use by "5d6d296390e453b59a9e109a8de78369949f1de0d2fedcb3c6c0568d4e0ceafd". You have to remove that container to be able to reuse that name.: that name is already in use

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

First: on my test setup (f31, root), I have to restorecon /etc/systemd/system/xxx.service, otherwise systemctl says "no such unit".

Shouldn't it go to /usr/lib/systemd/system/ for system services? That's where I place them and selinux behaved.

Second, if the initial source container is started with --name foo, systemctl start will constantly fail until you podman rm -f the original source container.

That's a super helpful observation! I think, we should not restart but stop-start the services, so that everything gets properly cleaned up. I'll investigate.

Thanks a lot for testing and the feedback, @edsantiago !

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Shouldn't it go to /usr/lib/systemd/system/ for system services?

Probably not: that's for installed packages; the recommendation is to use /etc/systemd/system for local configuration. Regardless, if I (root) am sitting in my home directory when I run podman generate systemd --files, then mv that file elsewhere, SELinux will have problems because the file will be labeled admin_home_t instead of systemd_unit_file_t.

This brings up a gripe I've long had, though: why does podman write the unit file to pwd? That causes pain for users who then have to figure out the correct destination, mv it, restorecon, etc. Is it too late to fix podman so it actually figures out the correct root/rootless path and writes the file to the proper place?

we should not restart but stop-start the services

As I recall, systemctl stop podman-xx.service did not help (perhaps because of the --cidfile option). Only podman rm -f (or podman stop, podman rm) helped.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Second, if the initial source container is started with --name foo, systemctl start will constantly fail until you podman rm -f the original source container.

Misread the sentence. Yes, the initial container will remain. We want to add --replace which will remove existing containers in case of a name collision.


# Auto-update the container
$ podman auto-update
container-bc219740a210455fa27deacc96d50a9e20516492f1417507c13ce1533dbdcd9d.service
```

## SEE ALSO
podman(1), podman-generate-systemd(1), podman-run(1), systemd.unit(5)
1 change: 1 addition & 0 deletions docs/source/markdown/podman.1.md
Expand Up @@ -154,6 +154,7 @@ the exit codes follow the `chroot` standard, see below:
| Command | Description |
| ------------------------------------------------ | --------------------------------------------------------------------------- |
| [podman-attach(1)](podman-attach.1.md) | Attach to a running container. |
| [podman-auto-update(1)](podman-auto-update.1.md) | Auto update containers according to their auto-update policy |
| [podman-build(1)](podman-build.1.md) | Build a container image using a Containerfile. |
| [podman-commit(1)](podman-commit.1.md) | Create new image based on the changed container. |
| [podman-container(1)](podman-container.1.md) | Manage containers. |
Expand Down
12 changes: 12 additions & 0 deletions hack/man-page-checker
Expand Up @@ -49,6 +49,12 @@ for md in $(ls -1 *-*.1.md | grep -v remote);do

# podman.1.md has a two-column table; podman-*.1.md all have three.
parent=$(echo $md | sed -e 's/^\(.*\)-.*$/\1.1.md/')
if [[ $parent =~ "podman-auto" ]]; then
# podman-auto-update.1.md is special cased as it's structure differs
# from that of other man pages where main and sub-commands split by
# dashes.
parent="podman.1.md"
fi
x=3
if expr -- "$parent" : ".*-" >/dev/null; then
x=4
Expand Down Expand Up @@ -90,6 +96,12 @@ for md in *.1.md;do
# Get the command name, and confirm that it matches the md file name.
cmd=$(echo "$synopsis" | sed -e 's/\(.*\)\*\*.*/\1/' | tr -d \*)
md_nodash=$(basename "$md" .1.md | tr '-' ' ')
if [[ $md_nodash = 'podman auto update' ]]; then
# podman-auto-update.1.md is special cased as it's structure differs
# from that of other man pages where main and sub-commands split by
# dashes.
md_nodash='podman auto-update'
fi
if [ "$cmd" != "$md_nodash" -a "$cmd" != "podman-remote" ]; then
echo
printf "Inconsistent program name in SYNOPSIS in %s:\n" $md
Expand Down
3 changes: 3 additions & 0 deletions hack/podman-commands.sh
Expand Up @@ -38,6 +38,9 @@ function podman_man() {

# Special case: there is no podman-help man page, nor need for such.
echo "help"
# Auto-update differs from other commands as it's a single command, not
# a main and sub-command split by a dash.
echo "auto-update"
elif [ "$@" = "podman-image-trust" ]; then
# Special case: set and show aren't actually in a table in the man page
echo set
Expand Down
14 changes: 13 additions & 1 deletion libpod/container.go
Expand Up @@ -239,6 +239,12 @@ type ContainerConfig struct {
// container has been created with.
CreateCommand []string `json:"CreateCommand,omitempty"`

// RawImageName is the raw and unprocessed name of the image when creating
// the container (as specified by the user). May or may not be set. One
// use case to store this data are auto-updates where we need the _exact_
// name and not some normalized instance of it.
RawImageName string `json:"RawImageName,omitempty"`
vrothberg marked this conversation as resolved.
Show resolved Hide resolved

// TODO consider breaking these subsections up into smaller structs

// UID/GID mappings used by the storage
Expand Down Expand Up @@ -503,11 +509,17 @@ func (c *Container) Namespace() string {
return c.config.Namespace
}

// Image returns the ID and name of the image used as the container's rootfs
// Image returns the ID and name of the image used as the container's rootfs.
func (c *Container) Image() (string, string) {
return c.config.RootfsImageID, c.config.RootfsImageName
}

// RawImageName returns the unprocessed and not-normalized user-specified image
// name.
func (c *Container) RawImageName() string {
return c.config.RawImageName
}

// ShmDir returns the sources path to be mounted on /dev/shm in container
func (c *Container) ShmDir() string {
return c.config.ShmDir
Expand Down
2 changes: 2 additions & 0 deletions libpod/events/config.go
Expand Up @@ -98,6 +98,8 @@ const (

// Attach ...
Attach Status = "attach"
// AutoUpdate ...
AutoUpdate Status = "auto-update"
// Checkpoint ...
Checkpoint Status = "checkpoint"
// Cleanup ...
Expand Down