Skip to content

Commit

Permalink
Merge pull request #13215 from tomponline/stable-5.0
Browse files Browse the repository at this point in the history
Backports (stable-5.0)
  • Loading branch information
tomponline committed Mar 26, 2024
2 parents 794866a + 6be15fc commit 65a7c14
Show file tree
Hide file tree
Showing 82 changed files with 1,571 additions and 950 deletions.
1 change: 1 addition & 0 deletions .github/ISSUE_TEMPLATE.md
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ Feel free to remove anything which doesn't apply to you and add more information

* Distribution:
* Distribution version:
* The output of "snap list --all lxd core20 core22 core24 snapd":
* The output of "lxc info" or if that fails:
* Kernel version:
* LXC version:
Expand Down
9 changes: 9 additions & 0 deletions .github/labeler.yml
Original file line number Diff line number Diff line change
Expand Up @@ -8,3 +8,12 @@ Documentation:
- changed-files:
- any-glob-to-any-file:
- doc/**/*

"5.21 LTS":
- base-branch: 'stable-5.21'

"5.0 LTS":
- base-branch: 'stable-5.0'

"4.0 LTS":
- base-branch: 'stable-4.0'
22 changes: 16 additions & 6 deletions .github/workflows/tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -161,7 +161,7 @@ jobs:
# optimize ext4 FSes for performance, not reliability
for fs in $(findmnt --noheading --type ext4 --list --uniq | awk '{print $1}'); do
# nombcache and data=writeback cannot be changed on remount
sudo mount -o remount,noatime,barrier=0,commit=6000 "${fs}"
sudo mount -o remount,noatime,barrier=0,commit=6000 "${fs}" || true
done
# disable dpkg from calling sync()
Expand All @@ -174,7 +174,7 @@ jobs:
sudo snap remove lxd --purge
# Purge older snap revisions that are disabled/superseded by newer revisions of the same snap
snap list --all | while read -r name _ rev _ _ notes _; do
[ "${notes}" = "disabled" ] && snap remove "${name}" --revision "${rev}" --purge
[[ "${notes}" =~ disabled$ ]] && snap remove "${name}" --revision "${rev}" --purge
done || true
# This was inspired from https://github.com/easimon/maximize-build-space
Expand Down Expand Up @@ -391,22 +391,32 @@ jobs:
CGO_ENABLED: 0
GOARCH: amd64
run: |
go build -o bin/lxc.x86_64 ./lxc
go build -ldflags "-s -w" -o trimpath -o bin/lxc.x86_64 ./lxc
- name: Build static lxc (aarch64)
env:
CGO_ENABLED: 0
GOARCH: arm64
run: |
go build -o bin/lxc.aarch64 ./lxc
go build -ldflags "-s -w" -o trimpath -o bin/lxc.aarch64 ./lxc
- name: Build static lxd-benchmark
if: runner.os == 'Linux'
env:
CGO_ENABLED: 0
run: |
set -eux
GOARCH=amd64 go build -ldflags "-s -w" -o trimpath -o bin/lxd-benchmark.x86_64 ./lxd-benchmark
GOARCH=arm64 go build -ldflags "-s -w" -o trimpath -o bin/lxd-benchmark.aarch64 ./lxd-benchmark
- name: Build static lxd-migrate
if: runner.os == 'Linux'
env:
CGO_ENABLED: 0
run: |
GOARCH=amd64 go build -o bin/lxd-migrate.x86_64 ./lxd-migrate
GOARCH=arm64 go build -o bin/lxd-migrate.aarch64 ./lxd-migrate
set -eux
GOARCH=amd64 go build -ldflags "-s -w" -o trimpath -o bin/lxd-migrate.x86_64 ./lxd-migrate
GOARCH=arm64 go build -ldflags "-s -w" -o trimpath -o bin/lxd-migrate.aarch64 ./lxd-migrate
- name: Unit tests (client)
env:
Expand Down
3 changes: 3 additions & 0 deletions client/interfaces.go
Original file line number Diff line number Diff line change
Expand Up @@ -582,6 +582,9 @@ type InstanceBackupArgs struct {

// Name to import backup as
Name string

// If set, it would override devices
Devices map[string]map[string]string
}

// The InstanceCopyArgs struct is used to pass additional options during instance copy.
Expand Down
24 changes: 23 additions & 1 deletion client/lxd_instances.go
Original file line number Diff line number Diff line change
Expand Up @@ -545,7 +545,7 @@ func (r *ProtocolLXD) CreateInstanceFromBackup(args InstanceBackupArgs) (Operati
return nil, err
}

if args.PoolName == "" && args.Name == "" {
if args.PoolName == "" && args.Name == "" && len(args.Devices) == 0 {
// Send the request
op, _, err := r.queryOperation("POST", path, args.BackupFile, "", true)
if err != nil {
Expand All @@ -569,6 +569,13 @@ func (r *ProtocolLXD) CreateInstanceFromBackup(args InstanceBackupArgs) (Operati
}
}

if len(args.Devices) > 0 {
err = r.CheckExtension("import_instance_devices")
if err != nil {
return nil, fmt.Errorf("Cannot use device override: %w", err)
}
}

// Prepare the HTTP request
reqURL, err := r.setQueryAttributes(fmt.Sprintf("%s/1.0%s", r.httpBaseURL.String(), path))
if err != nil {
Expand All @@ -590,6 +597,21 @@ func (r *ProtocolLXD) CreateInstanceFromBackup(args InstanceBackupArgs) (Operati
req.Header.Set("X-LXD-name", args.Name)
}

if len(args.Devices) > 0 {
devProps := url.Values{}

for dev := range args.Devices {
props := url.Values{}
for k, v := range args.Devices[dev] {
props.Set(k, v)
}

devProps.Set(dev, props.Encode())
}

req.Header.Set("X-LXD-devices", devProps.Encode())
}

// Send the request
resp, err := r.DoHTTP(req)
if err != nil {
Expand Down
2 changes: 1 addition & 1 deletion client/lxd_oidc.go
Original file line number Diff line number Diff line change
Expand Up @@ -149,7 +149,7 @@ func (o *oidcClient) getProvider(issuer string, clientID string) (rp.RelyingPart
return nil, err
}

cookieHandler := httphelper.NewCookieHandler(hashKey, encryptKey, httphelper.WithUnsecure())
cookieHandler := httphelper.NewCookieHandler(hashKey, encryptKey)
options := []rp.Option{
rp.WithCookieHandler(cookieHandler),
rp.WithVerifierOpts(rp.WithIssuedAtOffset(5 * time.Second)),
Expand Down
3 changes: 3 additions & 0 deletions doc/.readthedocs.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,9 @@ build:
tools:
golang: "1.19"
python: "3.11"
jobs:
pre_build:
- go build -ldflags "-s -w" -o trimpath -o lxc.bin ./lxc

# Build documentation in the docs/ directory with Sphinx
sphinx:
Expand Down
7 changes: 7 additions & 0 deletions lxc/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -302,6 +302,8 @@ To easily setup a local LXD server in a virtual machine, consider using: https:/
}
}

// PreRun is set as the (*cobra.Command).PersistentPreRunE for the top level lxc command. It loads configuration and
// performs additional checks if it detects that LXD has not been configured yet.
func (c *cmdGlobal) PreRun(cmd *cobra.Command, args []string) error {
var err error

Expand Down Expand Up @@ -416,6 +418,8 @@ Or for a virtual machine: lxc launch ubuntu:22.04 --vm`)+"\n")
return nil
}

// PostRun is set as the (*cobra.Command).PersistentPostRunE hook on the top level lxc command.
// It saves any configuration that must persist between runs.
func (c *cmdGlobal) PostRun(cmd *cobra.Command, args []string) error {
// Macaroon teardown
if c.conf != nil && shared.PathExists(c.confPath) {
Expand All @@ -432,6 +436,8 @@ type remoteResource struct {
name string
}

// ParseServers parses a list of remotes (`<remote>:<resource>...`) and calls (*config.Config).GetInstanceServer
// for each remote to configure a new connection.
func (c *cmdGlobal) ParseServers(remotes ...string) ([]remoteResource, error) {
servers := map[string]lxd.InstanceServer{}
resources := []remoteResource{}
Expand Down Expand Up @@ -471,6 +477,7 @@ func (c *cmdGlobal) ParseServers(remotes ...string) ([]remoteResource, error) {
return resources, nil
}

// CheckArgs checks that the given list of arguments has length between minArgs and maxArgs.
func (c *cmdGlobal) CheckArgs(cmd *cobra.Command, args []string, minArgs int, maxArgs int) (bool, error) {
if len(args) < minArgs || (maxArgs != -1 && len(args) > maxArgs) {
_ = cmd.Help()
Expand Down
2 changes: 1 addition & 1 deletion lxc/storage_volume.go
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ import (
"github.com/spf13/cobra"
"gopkg.in/yaml.v2"

lxd "github.com/canonical/lxd/client"
"github.com/canonical/lxd/client"
"github.com/canonical/lxd/shared"
"github.com/canonical/lxd/shared/api"
cli "github.com/canonical/lxd/shared/cmd"
Expand Down
9 changes: 8 additions & 1 deletion lxd/api_cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -353,6 +353,7 @@ func clusterPutBootstrap(d *Daemon, r *http.Request, req api.ClusterPut) respons
d.serverClustered = true
d.globalConfigMu.Unlock()

d.events.SetLocalLocation(d.serverName)
// Start clustering tasks
d.startClusterTasks()

Expand Down Expand Up @@ -563,17 +564,21 @@ func clusterPutJoin(d *Daemon, r *http.Request, req api.ClusterPut) response.Res
defer revert.Fail()

// Update server name.
oldServerName := d.serverName
d.globalConfigMu.Lock()
d.serverName = req.ServerName
d.serverClustered = true
d.globalConfigMu.Unlock()
revert.Add(func() {
d.globalConfigMu.Lock()
d.serverName = ""
d.serverName = oldServerName
d.serverClustered = false
d.globalConfigMu.Unlock()

d.events.SetLocalLocation(d.serverName)
})

d.events.SetLocalLocation(d.serverName)
localRevert, err := clusterInitMember(localClient, client, req.MemberConfig)
if err != nil {
return fmt.Errorf("Failed to initialize member: %w", err)
Expand Down Expand Up @@ -1856,6 +1861,8 @@ func clusterNodePost(d *Daemon, r *http.Request) response.Response {
d.serverName = req.ServerName
d.globalConfigMu.Unlock()

d.events.SetLocalLocation(d.serverName)

requestor := request.CreateRequestor(r)
s.Events.SendLifecycle(request.ProjectParam(r), lifecycle.ClusterMemberRenamed.Event(req.ServerName, requestor, logger.Ctx{"old_name": memberName}))

Expand Down
6 changes: 3 additions & 3 deletions lxd/api_internal.go
Original file line number Diff line number Diff line change
Expand Up @@ -665,7 +665,7 @@ func internalImportFromBackup(s *state.State, projectName string, instName strin

if backupConf.Pool == nil {
// We don't know what kind of storage type the pool is.
return fmt.Errorf(`No storage pool struct in the backup file found. The storage pool needs to be recovered manually`)
return fmt.Errorf("No storage pool struct in the backup file found. The storage pool needs to be recovered manually")
}

// Try to retrieve the storage pool the instance supposedly lives on.
Expand Down Expand Up @@ -694,7 +694,7 @@ func internalImportFromBackup(s *state.State, projectName string, instName strin
}

// Check snapshots are consistent.
existingSnapshots, err := pool.CheckInstanceBackupFileSnapshots(backupConf, projectName, false, nil)
existingSnapshots, err := pool.CheckInstanceBackupFileSnapshots(backupConf, projectName, nil)
if err != nil {
return fmt.Errorf("Failed checking snapshots: %w", err)
}
Expand Down Expand Up @@ -918,7 +918,7 @@ func internalImportRootDevicePopulate(instancePoolName string, localDevices map[
expandedRootName, expandedRootConfig, _ := instancetype.GetRootDiskDevice(expandedDevices)

// Extract root disk from expanded profile devices.
profileExpandedDevices := db.ExpandInstanceDevices(deviceConfig.NewDevices(localDevices), profiles)
profileExpandedDevices := instancetype.ExpandInstanceDevices(deviceConfig.NewDevices(localDevices), profiles)
profileExpandedRootName, profileExpandedRootConfig, _ := instancetype.GetRootDiskDevice(profileExpandedDevices.CloneNative())

// Record whether we need to add a new local disk device.
Expand Down
4 changes: 4 additions & 0 deletions lxd/backup/backup_config_utils.go
Original file line number Diff line number Diff line change
Expand Up @@ -124,6 +124,10 @@ func UpdateInstanceConfig(c *db.Cluster, b Info, mountPath string) error {
if backup.Volume != nil {
backup.Volume.Name = b.Name
backup.Volume.Project = b.Project

// Ensure the most recent volume UUIDs get updated.
backup.Volume.Config = b.Config.Volume.Config
backup.VolumeSnapshots = b.Config.VolumeSnapshots
}

// Load the storage pool.
Expand Down
37 changes: 21 additions & 16 deletions lxd/cluster/connect.go
Original file line number Diff line number Diff line change
Expand Up @@ -79,9 +79,14 @@ func Connect(address string, networkCert *shared.CertInfo, serverCert *shared.Ce
// ConnectIfInstanceIsRemote figures out the address of the cluster member which is running the instance with the
// given name in the specified project. If it's not the local member will connect to it and return the connected
// client (configured with the specified project), otherwise it will just return nil.
func ConnectIfInstanceIsRemote(cluster *db.Cluster, projectName string, instName string, networkCert *shared.CertInfo, serverCert *shared.CertInfo, r *http.Request, instanceType instancetype.Type) (lxd.InstanceServer, error) {
func ConnectIfInstanceIsRemote(s *state.State, projectName string, instName string, r *http.Request, instanceType instancetype.Type) (lxd.InstanceServer, error) {
// No need to connect if not clustered.
if !s.ServerClustered {
return nil, nil
}

var address string // Cluster member address.
err := cluster.Transaction(context.TODO(), func(ctx context.Context, tx *db.ClusterTx) error {
err := s.DB.Cluster.Transaction(context.TODO(), func(ctx context.Context, tx *db.ClusterTx) error {
var err error
address, err = tx.GetNodeAddressOfInstance(ctx, projectName, instName, instanceType)
return err
Expand All @@ -94,7 +99,7 @@ func ConnectIfInstanceIsRemote(cluster *db.Cluster, projectName string, instName
return nil, nil // The instance is running on this local member, no need to connect.
}

client, err := Connect(address, networkCert, serverCert, r, false)
client, err := Connect(address, s.Endpoints.NetworkCert(), s.ServerCert(), r, false)
if err != nil {
return nil, err
}
Expand Down Expand Up @@ -148,22 +153,22 @@ func ConnectIfVolumeIsRemote(s *state.State, poolName string, projectName string
return nil, fmt.Errorf("Failed checking if volume %q is available: %w", volumeName, err)
}

if remoteInstance != nil {
var instNode db.NodeInfo
err := s.DB.Cluster.Transaction(context.TODO(), func(ctx context.Context, tx *db.ClusterTx) error {
instNode, err = tx.GetNodeByName(ctx, remoteInstance.Node)
return err
})
if err != nil {
return nil, fmt.Errorf("Failed getting cluster member info for %q: %w", remoteInstance.Node, err)
}

// Replace node list with instance's cluster member node (which might be local member).
nodes = []db.NodeInfo{instNode}
} else {
if remoteInstance == nil {
// Volume isn't exclusively attached to an instance. Use local cluster member.
return nil, nil
}

var instNode db.NodeInfo
err = s.DB.Cluster.Transaction(s.ShutdownCtx, func(ctx context.Context, tx *db.ClusterTx) error {
instNode, err = tx.GetNodeByName(ctx, remoteInstance.Node)
return err
})
if err != nil {
return nil, fmt.Errorf("Failed getting cluster member info for %q: %w", remoteInstance.Node, err)
}

// Replace node list with instance's cluster member node (which might be local member).
nodes = []db.NodeInfo{instNode}
}

nodeCount := len(nodes)
Expand Down
2 changes: 1 addition & 1 deletion lxd/cluster/heartbeat.go
Original file line number Diff line number Diff line change
Expand Up @@ -452,7 +452,7 @@ func (g *Gateway) heartbeat(ctx context.Context, mode heartbeatMode) {
// Initialise slice to indicate to HeartbeatNodeHook that its being called from leader.
unavailableMembers := make([]string, 0)

err = query.Retry(func() error {
err = query.Retry(ctx, func(ctx context.Context) error {
// Durating cluster member fluctuations/upgrades the cluster can become unavailable so check here.
if g.Cluster == nil {
return fmt.Errorf("Cluster unavailable")
Expand Down
4 changes: 4 additions & 0 deletions lxd/daemon.go
Original file line number Diff line number Diff line change
Expand Up @@ -1197,6 +1197,8 @@ func (d *Daemon) init() error {
return err
}

d.events.SetLocalLocation(d.serverName)

// Mount the storage pools.
logger.Infof("Initializing storage pools")
err = storageStartup(d.State(), false)
Expand Down Expand Up @@ -1253,6 +1255,8 @@ func (d *Daemon) init() error {
return err
}

d.events.SetLocalLocation(d.serverName)

// Get daemon configuration.
bgpAddress := d.localConfig.BGPAddress()
bgpRouterID := d.localConfig.BGPRouterID()
Expand Down

0 comments on commit 65a7c14

Please sign in to comment.