Permalink
Browse files

implement `lxd import`

The basic idea here is to maintain a file in the container's directory that
indicates what configuration and snapshots it has, so that if someone just
backs up the filesystem the container is on (e.g. the zfs pool), they can
re-import their containers with a simple `lxd import` command.

Closes #2286

Signed-off-by: Tycho Andersen <tycho.andersen@canonical.com>
  • Loading branch information...
1 parent 017b304 commit 189641641f1effe9d53449d3e0a14f08b91bff69 Tycho Andersen committed Nov 8, 2016
Showing with 226 additions and 2 deletions.
  1. +102 −0 lxd/api_internal.go
  2. +11 −1 lxd/container.go
  3. +75 −0 lxd/container_lxc.go
  4. +28 −0 lxd/main.go
  5. +1 −1 shared/log.go
  6. +9 −0 test/suites/migration.sh
View
@@ -2,10 +2,13 @@ package main
import (
"fmt"
+ "io/ioutil"
"net/http"
"strconv"
+ "strings"
"github.com/gorilla/mux"
+ "gopkg.in/yaml.v2"
"github.com/lxc/lxd/shared"
@@ -17,6 +20,7 @@ var apiInternal = []Command{
internalShutdownCmd,
internalContainerOnStartCmd,
internalContainerOnStopCmd,
+ internalContainersCmd,
}
func internalReady(d *Daemon, r *http.Request) Response {
@@ -95,3 +99,101 @@ var internalShutdownCmd = Command{name: "shutdown", put: internalShutdown}
var internalReadyCmd = Command{name: "ready", put: internalReady, get: internalWaitReady}
var internalContainerOnStartCmd = Command{name: "containers/{id}/onstart", get: internalContainerOnStart}
var internalContainerOnStopCmd = Command{name: "containers/{id}/onstop", get: internalContainerOnStop}
+
+func slurpSlurpFile(path string) (*slurpFile, error) {
+ data, err := ioutil.ReadFile(path)
+ if err != nil {
+ return nil, err
+ }
+
+ sf := slurpFile{}
+
+ if err := yaml.Unmarshal(data, &sf); err != nil {
+ return nil, err
+ }
+
+ return &sf, nil
+}
+
+func internalImport(d *Daemon, r *http.Request) Response {
+ name := r.FormValue("target")
+ if name == "" {
+ return BadRequest(fmt.Errorf("target is required"))
+ }
+
+ path := containerPath(name, false)
+ err := d.Storage.ContainerStart(name, path)
+ if err != nil {
+ return SmartError(err)
+ }
+
+ defer d.Storage.ContainerStop(name, path)
+
+ sf, err := slurpSlurpFile(shared.VarPath("containers", name, "backup.yaml"))
+ if err != nil {
+ return SmartError(err)
+ }
+
+ baseImage := sf.Container.Config["volatile.base_image"]
+ for k := range sf.Container.Config {
+ if strings.HasPrefix(k, "volatile") {
+ delete(sf.Container.Config, k)
+ }
+ }
+
+ arch, err := shared.ArchitectureId(sf.Container.Architecture)
+ if err != nil {
+ return SmartError(err)
+ }
+ _, err = containerCreateInternal(d, containerArgs{
+ Architecture: arch,
+ BaseImage: baseImage,
+ Config: sf.Container.Config,
+ CreationDate: sf.Container.CreationDate,
+ LastUsedDate: sf.Container.LastUsedDate,
+ Ctype: cTypeRegular,
+ Devices: sf.Container.Devices,
+ Ephemeral: sf.Container.Ephemeral,
+ Name: sf.Container.Name,
+ Profiles: sf.Container.Profiles,
+ Stateful: sf.Container.Stateful,
+ })
+ if err != nil {
+ return SmartError(err)
+ }
+
+ for _, snap := range sf.Snapshots {
+ baseImage := snap.Config["volatile.base_image"]
+ for k := range snap.Config {
+ if strings.HasPrefix(k, "volatile") {
+ delete(snap.Config, k)
+ }
+ }
+
+ arch, err := shared.ArchitectureId(snap.Architecture)
+ if err != nil {
+ return SmartError(err)
+ }
+
+ _, err = containerCreateInternal(d, containerArgs{
+ Architecture: arch,
+ BaseImage: baseImage,
+ Config: snap.Config,
+ CreationDate: snap.CreationDate,
+ LastUsedDate: snap.LastUsedDate,
+ Ctype: cTypeSnapshot,
+ Devices: snap.Devices,
+ Ephemeral: snap.Ephemeral,
+ Name: snap.Name,
+ Profiles: snap.Profiles,
+ Stateful: snap.Stateful,
+ })
+ if err != nil {
+ return SmartError(err)
+ }
+ }
+
+ return EmptySyncResponse
+}
+
+var internalContainersCmd = Command{name: "containers", post: internalImport}
View
@@ -660,7 +660,12 @@ func containerCreateInternal(d *Daemon, args containerArgs) (container, error) {
args.CreationDate = dbArgs.CreationDate
args.LastUsedDate = dbArgs.LastUsedDate
- return containerLXCCreate(d, args)
+ c, err := containerLXCCreate(d, args)
+ if err != nil {
+ return nil, err
+ }
+
+ return c, nil
}
func containerConfigureInternal(c container) error {
@@ -683,6 +688,11 @@ func containerConfigureInternal(c container) error {
break
}
+ err := writeSlurpFile(c)
+ if err != nil {
+ return err
+ }
+
return nil
}
View
@@ -2511,6 +2511,14 @@ func (c *containerLXC) Restore(sourceContainer container) error {
return err
}
+ // The old slurp file may be out of date (e.g. it doesn't have all the
+ // current snapshots of the container listed); let's write a new one to
+ // be safe.
+ err = writeSlurpFile(c)
+ if err != nil {
+ return err
+ }
+
// If the container wasn't running but was stateful, should we restore
// it as running?
if shared.PathExists(c.StatePath()) {
@@ -2738,6 +2746,65 @@ func (c *containerLXC) ConfigKeySet(key string, value string) error {
return c.Update(args, false)
}
+type slurpFile struct {
+ Container *shared.ContainerInfo `yaml:"container"`
+ Snapshots []*shared.SnapshotInfo `yaml:"snapshots"`
+}
+
+func writeSlurpFile(c container) error {
+ /* we only write slurp files out for actual containers */
+ if c.IsSnapshot() {
+ return nil
+ }
+
+ ci, _, err := c.Render()
+ if err != nil {
+ return err
+ }
+
+ snapshots, err := c.Snapshots()
+ if err != nil {
+ return err
+ }
+
+ var sis []*shared.SnapshotInfo
+
+ for _, s := range snapshots {
+ si, _, err := s.Render()
+ if err != nil {
+ return err
+ }
+
+ sis = append(sis, si.(*shared.SnapshotInfo))
+ }
+
+ data, err := yaml.Marshal(&slurpFile{
+ Container: ci.(*shared.ContainerInfo),
+ Snapshots: sis,
+ })
+ if err != nil {
+ return err
+ }
+
+ f, err := os.Create(shared.VarPath("containers", c.Name(), "backup.yaml"))
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+
+ err = f.Chmod(0400)
+ if err != nil {
+ return err
+ }
+
+ err = shared.WriteAll(f, data)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
func (c *containerLXC) Update(args containerArgs, userRequested bool) error {
// Set sane defaults for unset keys
if args.Architecture == 0 {
@@ -3486,6 +3553,14 @@ func (c *containerLXC) Update(args containerArgs, userRequested bool) error {
return err
}
+ /* we can call Update in some cases when the directory doesn't exist
+ * yet before container creation; this is okay, because at the end of
+ * container creation we write the slurp file, so let's not worry about
+ * ENOENT. */
+ if err := writeSlurpFile(c); err != nil && !os.IsNotExist(err) {
+ return err
+ }
+
// Update network leases
needsUpdate := false
for _, m := range updateDevices {
View
@@ -88,6 +88,8 @@ func run() error {
fmt.Printf(" Perform a clean shutdown of LXD and all running containers\n")
fmt.Printf(" waitready [--timeout=15]\n")
fmt.Printf(" Wait until LXD is ready to handle requests\n")
+ fmt.Printf(" import <container name>\n")
+ fmt.Printf(" Import a pre-existing container from storage\n")
fmt.Printf("\n\nCommon options:\n")
fmt.Printf(" --debug\n")
@@ -223,6 +225,8 @@ func run() error {
return cmdShutdown()
case "waitready":
return cmdWaitReady()
+ case "import":
+ return cmdImport(os.Args[1:])
// Internal commands
case "forkgetnet":
@@ -1218,3 +1222,27 @@ func cmdMigrateDumpSuccess(args []string) error {
return c.WaitForSuccess(args[1])
}
+
+func cmdImport(args []string) error {
+ name := args[1]
+
+ c, err := lxd.NewClient(&lxd.DefaultConfig, "local")
+ if err != nil {
+ return err
+ }
+
+ url := fmt.Sprintf("%s/internal/containers?target=%s", c.BaseURL, name)
+
+ req, err := http.NewRequest("POST", url, nil)
+ if err != nil {
+ return err
+ }
+
+ raw, err := c.Http.Do(req)
+ _, err = lxd.HoistResponse(raw, lxd.Sync)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
View
@@ -93,5 +93,5 @@ func LogCritf(format string, args ...interface{}) {
func PrintStack() {
buf := make([]byte, 1<<16)
runtime.Stack(buf, true)
- LogDebugf("%s", buf)
+ LogErrorf("%s", buf)
}
View
@@ -21,6 +21,15 @@ test_migration() {
lxc_remote move l1:nonlive l2:
lxc_remote config show l2:nonlive/snap0 | grep user.tester | grep foo
+ # test `lxd import`
+ if [ "${LXD_BACKEND}" = "zfs" ]; then
+ lxc_remote init testimage backup
+ lxc_remote snapshot backup
+ sqlite3 "${LXD_DIR}/lxd.db" "DELETE FROM containers WHERE name='backup'"
+ lxd import backup
+ lxc_remote info backup | grep snap0
+ fi
+
# FIXME: make this backend agnostic
if [ "${LXD_BACKEND}" != "lvm" ]; then
[ -d "${LXD2_DIR}/containers/nonlive/rootfs" ]

0 comments on commit 1896416

Please sign in to comment.