diff --git a/.github/golangci-lint.config.experimental.yaml b/.github/golangci-lint.config.experimental.yaml index 21764bb40bc..b5accf71665 100644 --- a/.github/golangci-lint.config.experimental.yaml +++ b/.github/golangci-lint.config.experimental.yaml @@ -6,7 +6,7 @@ linters: enable: - errcheck run: - timeout: 20m + timeout: 30m tests: false skip-dirs: - acceptancetests diff --git a/.github/golangci-lint.config.yaml b/.github/golangci-lint.config.yaml index 5e9155e8b25..68803590f70 100644 --- a/.github/golangci-lint.config.yaml +++ b/.github/golangci-lint.config.yaml @@ -35,6 +35,6 @@ linters: - deadcode - varcheck run: - timeout: 20m + timeout: 30m skip-dirs: - acceptancetests diff --git a/acceptancetests/jujupy/controller.py b/acceptancetests/jujupy/controller.py index 512e090cdd9..17bbfcbcef0 100644 --- a/acceptancetests/jujupy/controller.py +++ b/acceptancetests/jujupy/controller.py @@ -91,4 +91,4 @@ def mongo_memory_profile(self): def db_snap_channel(self): if 'juju-db-snap-channel' in self.cfg: return self.cfg["juju-db-snap-channel"] - return "4.0/stable" + return "4.4/stable" diff --git a/agent/agent_test.go b/agent/agent_test.go index a3a62fc3012..5b49083a9fc 100644 --- a/agent/agent_test.go +++ b/agent/agent_test.go @@ -385,7 +385,7 @@ var attributeParams = agent.AgentConfigParams{ Nonce: "a nonce", Controller: testing.ControllerTag, Model: testing.ModelTag, - JujuDBSnapChannel: "4.0/stable", + JujuDBSnapChannel: controller.DefaultJujuDBSnapChannel, NonSyncedWritesToRaftLog: false, } @@ -400,7 +400,7 @@ func (*suite) TestAttributes(c *gc.C) { c.Assert(conf.Dir(), gc.Equals, "/data/dir/agents/machine-1") c.Assert(conf.Nonce(), gc.Equals, "a nonce") c.Assert(conf.UpgradedToVersion(), jc.DeepEquals, jujuversion.Current) - c.Assert(conf.JujuDBSnapChannel(), gc.Equals, "4.0/stable") + c.Assert(conf.JujuDBSnapChannel(), gc.Equals, "4.4/stable") c.Assert(conf.NonSyncedWritesToRaftLog(), jc.IsFalse) } diff --git a/apiserver/facades/agent/provisioner/provisioninginfo.go b/apiserver/facades/agent/provisioner/provisioninginfo.go index 99a052823c6..fdc0350a44b 100644 --- a/apiserver/facades/agent/provisioner/provisioninginfo.go +++ b/apiserver/facades/agent/provisioner/provisioninginfo.go @@ -341,8 +341,9 @@ func (api *ProvisionerAPI) machineSpaces(m *state.Machine, func (api *ProvisionerAPI) machineSpaceTopology(machineID string, spaceNames []string) (params.ProvisioningNetworkTopology, error) { var topology params.ProvisioningNetworkTopology - // If there are no space names, or if there is only one space name and - // that's the alpha space. + // If there are no space names, or if there is only one space + // name and that's the alpha space, we don't bother setting a + // topology that constrains provisioning. if len(spaceNames) < 1 || (len(spaceNames) == 1 && spaceNames[0] == network.AlphaSpaceName) { return topology, nil } diff --git a/apiserver/facades/controller/caasapplicationprovisioner/state.go b/apiserver/facades/controller/caasapplicationprovisioner/state.go index 10911525081..e5453fdbd27 100644 --- a/apiserver/facades/controller/caasapplicationprovisioner/state.go +++ b/apiserver/facades/controller/caasapplicationprovisioner/state.go @@ -29,7 +29,7 @@ type CAASApplicationProvisionerState interface { WatchApplications() state.StringsWatcher } -// CAASApplicationProvisionerState provides the subset of controller state +// CAASApplicationControllerState provides the subset of controller state // required by the CAAS operator provisioner facade. type CAASApplicationControllerState interface { ControllerConfig() (controller.Config, error) diff --git a/caas/kubernetes/provider/bootstrap_test.go b/caas/kubernetes/provider/bootstrap_test.go index ffd56fa4c76..2bb1ec902b9 100644 --- a/caas/kubernetes/provider/bootstrap_test.go +++ b/caas/kubernetes/provider/bootstrap_test.go @@ -77,6 +77,7 @@ func (s *bootstrapSuite) SetUpTest(c *gc.C) { s.cfg = cfg s.controllerCfg = coretesting.FakeControllerConfig() + s.controllerCfg["juju-db-snap-channel"] = controller.DefaultJujuDBSnapChannel s.controllerCfg[controller.CAASImageRepo] = ` { "serveraddress": "quay.io", diff --git a/cloudconfig/podcfg/image.go b/cloudconfig/podcfg/image.go index 373db0c407a..4995ad281d5 100644 --- a/cloudconfig/podcfg/image.go +++ b/cloudconfig/podcfg/image.go @@ -19,7 +19,6 @@ const ( JujudOCINamespace = "jujusolutions" JujudOCIName = "jujud-operator" JujudbOCIName = "juju-db" - JujudbVersion = "4.4" ) // GetControllerImagePath returns oci image path of jujud for a controller. @@ -27,6 +26,12 @@ func (cfg *ControllerPodConfig) GetControllerImagePath() (string, error) { return GetJujuOCIImagePath(cfg.Controller.Config, cfg.JujuVersion, cfg.OfficialBuild) } +func (cfg *ControllerPodConfig) dbVersion() (version.Number, error) { + snapChannel := cfg.Controller.Config.JujuDBSnapChannel() + vers := strings.Split(snapChannel, "/")[0] + ".0" + return version.Parse(vers) +} + // GetJujuDbOCIImagePath returns the juju-db oci image path. func (cfg *ControllerPodConfig) GetJujuDbOCIImagePath() (string, error) { imageRepo := cfg.Controller.Config.CAASImageRepo().Repository @@ -34,7 +39,12 @@ func (cfg *ControllerPodConfig) GetJujuDbOCIImagePath() (string, error) { imageRepo = JujudOCINamespace } path := fmt.Sprintf("%s/%s", imageRepo, JujudbOCIName) - return tagImagePath(path, JujudbVersion) + mongoVers, err := cfg.dbVersion() + if err != nil { + return "", errors.Annotatef(err, "cannot parse %q from controller config", controller.JujuDBSnapChannel) + } + tag := fmt.Sprintf("%d.%d", mongoVers.Major, mongoVers.Minor) + return tagImagePath(path, tag) } // IsJujuOCIImage returns true if the image path is for a Juju operator. diff --git a/cloudconfig/podcfg/podcfg_test.go b/cloudconfig/podcfg/podcfg_test.go index 8e16b854f5c..b6533149426 100644 --- a/cloudconfig/podcfg/podcfg_test.go +++ b/cloudconfig/podcfg/podcfg_test.go @@ -57,6 +57,7 @@ func testPodLabels(c *gc.C, cfg *config.Config, jobs []model.MachineJob, expectT func (*podcfgSuite) TestOperatorImagesDefaultRepo(c *gc.C) { cfg := testing.FakeControllerConfig() + cfg["juju-db-snap-channel"] = "4.4/stable" podConfig, err := podcfg.NewBootstrapControllerPodConfig( cfg, "controller-1", @@ -77,6 +78,7 @@ func (*podcfgSuite) TestOperatorImagesDefaultRepo(c *gc.C) { func (*podcfgSuite) TestOperatorImagesCustomRepo(c *gc.C) { cfg := testing.FakeControllerConfig() cfg["caas-image-repo"] = "path/to/my/repo" + cfg["juju-db-snap-channel"] = "4.4" podConfig, err := podcfg.NewBootstrapControllerPodConfig( cfg, "controller-1", diff --git a/cmd/juju/machine/add.go b/cmd/juju/machine/add.go index 2ce38d947a9..12b2e21314f 100644 --- a/cmd/juju/machine/add.go +++ b/cmd/juju/machine/add.go @@ -111,7 +111,7 @@ Examples: juju add-machine --constraints mem=8G # Start a new machine within the "us-east-1a" availability zone. - juju add-machine --constraints zone=us-east-1a + juju add-machine --constraints zones=us-east-1a # Start a new machine with at least 4 CPU cores and 16GB RAM, and request # three storage volumes to be attached to it. Two are large capacity (1TB) diff --git a/core/network/linklayer.go b/core/network/linklayer.go index 0396515a6a0..5bfe0a00537 100644 --- a/core/network/linklayer.go +++ b/core/network/linklayer.go @@ -29,13 +29,16 @@ const ( // BridgeDevice is used for OSI layer-2 bridge devices. BridgeDevice LinkLayerDeviceType = "bridge" + + // VXLANDevice is used for Virtual Extensible LAN devices. + VXLANDevice LinkLayerDeviceType = "vxlan" ) // IsValidLinkLayerDeviceType returns whether the given value is a valid // link-layer network device type. func IsValidLinkLayerDeviceType(value string) bool { switch LinkLayerDeviceType(value) { - case LoopbackDevice, EthernetDevice, VLAN8021QDevice, BondDevice, BridgeDevice: + case LoopbackDevice, EthernetDevice, VLAN8021QDevice, BondDevice, BridgeDevice, VXLANDevice: return true } return false diff --git a/core/network/linklayer_test.go b/core/network/linklayer_test.go index eed44de0cb7..cda090a0d96 100644 --- a/core/network/linklayer_test.go +++ b/core/network/linklayer_test.go @@ -24,6 +24,7 @@ func (s *linkLayerSuite) TestIsValidLinkLayerDeviceTypeValid(c *gc.C) { VLAN8021QDevice, BondDevice, BridgeDevice, + VXLANDevice, } for _, value := range validTypes { diff --git a/core/network/source_netlink.go b/core/network/source_netlink.go index afd07045659..d37f1924c60 100644 --- a/core/network/source_netlink.go +++ b/core/network/source_netlink.go @@ -62,6 +62,8 @@ func (n netlinkNIC) Type() LinkLayerDeviceType { return VLAN8021QDevice case "bond": return BondDevice + case "vxlan": + return VXLANDevice } if n.nic.Attrs().Flags&net.FlagLoopback > 0 { diff --git a/core/network/source_netlink_test.go b/core/network/source_netlink_test.go index 533e7a0f5d6..7e69e45dbda 100644 --- a/core/network/source_netlink_test.go +++ b/core/network/source_netlink_test.go @@ -64,10 +64,16 @@ func (s *sourceNetlinkSuite) TestNetlinkNICType(c *gc.C) { link := &stubLink{} nic := &netlinkNIC{nic: link} - // If we have get value, return it. + // Known types. + link.linkType = "bridge" + c.Check(nic.Type(), gc.Equals, BridgeDevice) + link.linkType = "bond" c.Check(nic.Type(), gc.Equals, BondDevice) + link.linkType = "vxlan" + c.Check(nic.Type(), gc.Equals, VXLANDevice) + // Infer loopback from flags. link.linkType = "" link.flags = net.FlagUp | net.FlagLoopback diff --git a/go.mod b/go.mod index c25d452b8b6..b2e629aa711 100644 --- a/go.mod +++ b/go.mod @@ -21,7 +21,7 @@ require ( github.com/aws/aws-sdk-go-v2/service/iam v1.9.0 github.com/aws/smithy-go v1.8.0 github.com/bmizerany/pat v0.0.0-20160217103242-c068ca2f0aac - github.com/canonical/pebble v0.0.0-20211004014426-2fbdfdef40c7 + github.com/canonical/pebble v0.0.0-20211017212823-71d8e376806f github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e github.com/coreos/go-systemd/v22 v22.0.0-20200316104309-cb8b64719ae3 github.com/dnaeon/go-vcr v1.1.0 // indirect diff --git a/go.sum b/go.sum index 13224b6646e..42d85772e40 100644 --- a/go.sum +++ b/go.sum @@ -123,8 +123,8 @@ github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kB github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/bmizerany/pat v0.0.0-20160217103242-c068ca2f0aac h1:X5YRFJiteUM3rajABEYJSzw1KWgmp1ulPFKxpfLm0M4= github.com/bmizerany/pat v0.0.0-20160217103242-c068ca2f0aac/go.mod h1:8rLXio+WjiTceGBHIoTvn60HIbs7Hm7bcHjyrSqYB9c= -github.com/canonical/pebble v0.0.0-20211004014426-2fbdfdef40c7 h1:+ZeDZM45AI/MSQF7W4DFL77SdTKtsc4Woelr0U/atX8= -github.com/canonical/pebble v0.0.0-20211004014426-2fbdfdef40c7/go.mod h1:+0rQ57rhB9pciKKaE/QlwPL4R8mujv+24D81KGYRlV0= +github.com/canonical/pebble v0.0.0-20211017212823-71d8e376806f h1:5SP7EqeBNxfAINGbYxIo5hHerEX1h7gw21Z2xbupx18= +github.com/canonical/pebble v0.0.0-20211017212823-71d8e376806f/go.mod h1:+0rQ57rhB9pciKKaE/QlwPL4R8mujv+24D81KGYRlV0= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= diff --git a/provider/openstack/provider.go b/provider/openstack/provider.go index dbba71f0d94..4dc9bb4f361 100644 --- a/provider/openstack/provider.go +++ b/provider/openstack/provider.go @@ -1418,11 +1418,12 @@ func (e *Environ) networksForInstance( return nil, errors.Trace(err) } - if !args.Constraints.HasSpaces() { + if len(args.SubnetsToZones) == 0 { return networks, nil } if len(networks) == 0 { - return nil, errors.New("space constraints were supplied, but no Openstack network is configured") + return nil, errors.New( + "space constraints and/or bindings were supplied, but no Openstack network is configured") } // We know that we are operating in the single configured network. @@ -1452,7 +1453,7 @@ func (e *Environ) networksForInstance( subnetIDsForZone[i] = network.FilterInFanNetwork(subnetIDs) } - /// For each list of subnet IDs that satisfy space and zone constraints, + // For each list of subnet IDs that satisfy space and zone constraints, // choose a single one at random. subnetIDForZone := make([]network.Id, len(subnetIDsForZone)) for i, subnetIDs := range subnetIDsForZone { diff --git a/provider/openstack/provider_test.go b/provider/openstack/provider_test.go index dabc20c2fa2..3b5ede0df8c 100644 --- a/provider/openstack/provider_test.go +++ b/provider/openstack/provider_test.go @@ -530,7 +530,7 @@ var handlerFunc = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) // This line is critical, the openstack provider will reject the message // if you return 200 like a mere mortal. w.WriteHeader(http.StatusMultipleChoices) - fmt.Fprint(w, ` + _, _ = fmt.Fprint(w, ` { "versions": { "values": [ @@ -920,11 +920,6 @@ func (s *providerUnitTests) TestNetworksForInstanceWithAZ(c *gc.C) { siParams := environs.StartInstanceParams{ AvailabilityZone: "eu-west-az", SubnetsToZones: []map[network.Id][]string{{"subnet-foo": {"eu-west-az", "eu-east-az"}}}, - Constraints: constraints.Value{ - Spaces: &[]string{ - "eu-west-az", - }, - }, } result, err := envWithNetworking(mockNetworking).networksForInstance(siParams, netCfg) diff --git a/service/snap/snap_test.go b/service/snap/snap_test.go index a4779d00ecc..cb70e46bbad 100644 --- a/service/snap/snap_test.go +++ b/service/snap/snap_test.go @@ -56,7 +56,7 @@ func (*validationSuite) TestValidateJujuDbSnap(c *gc.C) { c.Check(err, jc.ErrorIsNil) // via NewService - jujudbService, err := NewService("juju-db", "", common.Conf{Desc: "juju-db snap"}, Command, "/ath/to/config", "edge", "jailmode", []BackgroundService{}, []Installable{}) + jujudbService, err := NewService("juju-db", "", common.Conf{Desc: "juju-db snap"}, Command, "/path/to/config", "edge", "jailmode", []BackgroundService{}, []Installable{}) c.Check(err, jc.ErrorIsNil) c.Check(jujudbService.Validate(), jc.ErrorIsNil) @@ -96,7 +96,7 @@ func (*snapSuite) TestConfigOverride(c *gc.C) { "nofile": "64000", }, } - svc, err := NewService("juju-db", "", conf, Command, "/ath/to/config", "latest", "strict", []BackgroundService{{ + svc, err := NewService("juju-db", "", conf, Command, "/path/to/config", "latest", "strict", []BackgroundService{{ Name: "daemon", }}, nil) c.Assert(err, jc.ErrorIsNil) @@ -134,14 +134,14 @@ func (*serviceSuite) TestInstallCommands(c *gc.C) { EnableAtStartup: true, }, } - service, err := NewService("juju-db", "juju-db", conf, Command, "/ath/to/config", "4.0/stable", "", backgroundServices, prerequisites) + service, err := NewService("juju-db", "juju-db", conf, Command, "/path/to/config", "4.4/stable", "", backgroundServices, prerequisites) c.Assert(err, jc.ErrorIsNil) commands, err := service.InstallCommands() c.Assert(err, jc.ErrorIsNil) c.Assert(commands, gc.DeepEquals, []string{ "snap install core", - "snap install --channel=4.0/stable juju-db", + "snap install --channel=4.4/stable juju-db", }) } @@ -154,14 +154,14 @@ func (*serviceSuite) TestInstallCommandsWithConfinementPolicy(c *gc.C) { EnableAtStartup: true, }, } - service, err := NewService("juju-db", "juju-db", conf, Command, "/ath/to/config", "4.0/stable", "classic", backgroundServices, prerequisites) + service, err := NewService("juju-db", "juju-db", conf, Command, "/path/to/config", "4.4/stable", "classic", backgroundServices, prerequisites) c.Assert(err, jc.ErrorIsNil) commands, err := service.InstallCommands() c.Assert(err, jc.ErrorIsNil) c.Assert(commands, gc.DeepEquals, []string{ "snap install core", - "snap install --channel=4.0/stable --classic juju-db", + "snap install --channel=4.4/stable --classic juju-db", }) } @@ -174,7 +174,7 @@ func (*serviceSuite) TestInstall(c *gc.C) { runnable := NewMockRunnable(ctrl) runnable.EXPECT().Execute("snap", []string{"install", "core"}).Return("", nil) - runnable.EXPECT().Execute("snap", []string{"install", "--channel=4.0/stable", "juju-db"}).Return("", nil) + runnable.EXPECT().Execute("snap", []string{"install", "--channel=4.4/stable", "juju-db"}).Return("", nil) conf := common.Conf{} prerequisites := []Installable{NewNamedApp("core")} @@ -184,7 +184,7 @@ func (*serviceSuite) TestInstall(c *gc.C) { EnableAtStartup: true, }, } - service, err := NewService("juju-db", "juju-db", conf, Command, "/ath/to/config", "4.0/stable", "", backgroundServices, prerequisites) + service, err := NewService("juju-db", "juju-db", conf, Command, "/path/to/config", "4.4/stable", "", backgroundServices, prerequisites) c.Assert(err, jc.ErrorIsNil) s := &service @@ -214,7 +214,7 @@ func (*serviceSuite) TestInstallWithRetry(c *gc.C) { runnable := NewMockRunnable(ctrl) runnable.EXPECT().Execute("snap", []string{"install", "core"}).Return("", errors.New("bad")) runnable.EXPECT().Execute("snap", []string{"install", "core"}).Return("", nil) - runnable.EXPECT().Execute("snap", []string{"install", "--channel=4.0/stable", "juju-db"}).Return("", nil) + runnable.EXPECT().Execute("snap", []string{"install", "--channel=4.4/stable", "juju-db"}).Return("", nil) conf := common.Conf{} prerequisites := []Installable{NewNamedApp("core")} @@ -224,7 +224,7 @@ func (*serviceSuite) TestInstallWithRetry(c *gc.C) { EnableAtStartup: true, }, } - service, err := NewService("juju-db", "juju-db", conf, Command, "/ath/to/config", "4.0/stable", "", backgroundServices, prerequisites) + service, err := NewService("juju-db", "juju-db", conf, Command, "/path/to/config", "4.4/stable", "", backgroundServices, prerequisites) c.Assert(err, jc.ErrorIsNil) s := &service diff --git a/snap/local/wrappers/fetch-oci b/snap/local/wrappers/fetch-oci index 266cfa85ef6..02852b44ce1 100755 --- a/snap/local/wrappers/fetch-oci +++ b/snap/local/wrappers/fetch-oci @@ -32,7 +32,7 @@ echo "Wait for microk8s to be ready if needed." microk8s.status --wait-ready --timeout 30 2>&1 juju_version=\$(/snap/bin/juju version | rev | cut -d- -f3- | rev) oci_image="docker.io/jujusolutions/jujud-operator:\$juju_version" -mongo_image="docker.io/jujusolutions/juju-db:4.0" +mongo_image="docker.io/jujusolutions/juju-db:4.4" echo "Going to cache images: \$oci_image and \$mongo_image." echo "Pulling: \$oci_image." diff --git a/state/application.go b/state/application.go index ccdf51db5bf..68d8e8e9777 100644 --- a/state/application.go +++ b/state/application.go @@ -2200,16 +2200,6 @@ type applicationAddUnitOpsArgs struct { unitName *string } -// addApplicationUnitOps is just like addUnitOps but explicitly takes a -// constraints value (this is used at application creation time). -func (a *Application) addApplicationUnitOps(args applicationAddUnitOpsArgs) (string, []txn.Op, error) { - result, ops, err := a.addUnitOpsWithCons(args) - if err == nil { - ops = append(ops, a.incUnitCountOp(nil)) - } - return result, ops, err -} - // addUnitOpsWithCons is a helper method for returning addUnitOps. func (a *Application) addUnitOpsWithCons(args applicationAddUnitOpsArgs) (string, []txn.Op, error) { if a.doc.Subordinate && args.principalName == "" { diff --git a/state/state.go b/state/state.go index 6a92fd087cc..89deac18be8 100644 --- a/state/state.go +++ b/state/state.go @@ -1187,6 +1187,7 @@ func (st *State) AddApplication(args AddApplicationArgs) (_ *Application, err er Channel: string(args.Channel), RelationCount: len(peers), Life: Alive, + UnitCount: args.NumUnits, // CAAS DesiredScale: scale, @@ -1292,7 +1293,7 @@ func (st *State) AddApplication(args AddApplicationArgs) (_ *Application, err er // Collect unit-adding operations. for x := 0; x < args.NumUnits; x++ { - unitName, unitOps, err := app.addApplicationUnitOps(applicationAddUnitOpsArgs{ + unitName, unitOps, err := app.addUnitOpsWithCons(applicationAddUnitOpsArgs{ cons: args.Constraints, storageCons: args.Storage, attachStorage: args.AttachStorage, diff --git a/worker/lease/manager.go b/worker/lease/manager.go index 00bcbf44526..0ef5c26342e 100644 --- a/worker/lease/manager.go +++ b/worker/lease/manager.go @@ -124,9 +124,6 @@ type Manager struct { // timer tracks when nextTimeout would expire and triggers when it does timer clock.Timer - // muNextTimeout protects accesses to nextTimeout - muNextTimeout sync.Mutex - // claims is used to deliver lease claim requests to the loop. claims chan claim @@ -579,7 +576,7 @@ func (manager *Manager) computeNextTimeout(leases map[lease.Key]lease.Info) { // occur when the global clock updater ticks the clock, so this avoids // too frequently checking with the potential of having no work to do. // The blanket addition of a second is no big deal. - nextTick.Add(time.Second) + nextTick = nextTick.Add(time.Second) nextDuration := nextTick.Sub(now).Round(time.Millisecond) manager.config.Logger.Tracef("[%s] next expire in %v %v", manager.logContext, nextDuration, nextTick) @@ -587,17 +584,16 @@ func (manager *Manager) computeNextTimeout(leases map[lease.Key]lease.Info) { } func (manager *Manager) setNextTimeout(t time.Time) { - manager.muNextTimeout.Lock() - defer manager.muNextTimeout.Unlock() + now := manager.config.Clock.Now() // Ensure we never walk the next check back without have performed a - // scheduled check *unless* we're just starting up. - if !manager.nextTimeout.IsZero() && !t.Before(manager.nextTimeout) { + // scheduled check *unless* we think our last check was in the past. + if !manager.nextTimeout.Before(now) && !t.Before(manager.nextTimeout) { return } manager.nextTimeout = t - d := t.Sub(manager.config.Clock.Now()) + d := t.Sub(now) if manager.timer == nil { manager.timer = manager.config.Clock.NewTimer(d) } else { diff --git a/worker/lease/manager_block_test.go b/worker/lease/manager_block_test.go index 55932a7c907..e8e1f6d2275 100644 --- a/worker/lease/manager_block_test.go +++ b/worker/lease/manager_block_test.go @@ -72,6 +72,43 @@ func (s *WaitUntilExpiredSuite) TestLeadershipExpires(c *gc.C) { }) } +func (s *WaitUntilExpiredSuite) TestBlockChecksRescheduled(c *gc.C) { + fix := &Fixture{ + leases: map[corelease.Key]corelease.Info{ + key("postgresql"): { + Holder: "postgresql/0", + Expiry: offset(time.Second), + }, + key("mysql"): { + Holder: "mysql/0", + Expiry: offset(4 * time.Second), + }, + key("redis"): { + Holder: "redis/0", + Expiry: offset(7 * time.Second), + }, + }, + } + fix.RunTest(c, func(manager *lease.Manager, clock *testclock.Clock) { + blockTest := newBlockTest(c, manager, key("redis")) + blockTest.assertBlocked(c) + + // Advance past the first expiry. + c.Assert(clock.WaitAdvance(3*time.Second, testing.ShortWait, 1), jc.ErrorIsNil) + blockTest.assertBlocked(c) + + // Advance past the second expiry. We should have had a check scheduled. + c.Assert(clock.WaitAdvance(3*time.Second, testing.ShortWait, 1), jc.ErrorIsNil) + blockTest.assertBlocked(c) + + // Advance past the last expiry. We should have had a check scheduled + // that causes the redis lease to be unblocked. + c.Assert(clock.WaitAdvance(3*time.Second, testing.ShortWait, 1), jc.ErrorIsNil) + err := blockTest.assertUnblocked(c) + c.Check(err, jc.ErrorIsNil) + }) +} + func (s *WaitUntilExpiredSuite) TestLeadershipChanged(c *gc.C) { fix := &Fixture{ leases: map[corelease.Key]corelease.Info{ diff --git a/worker/uniter/pebblepoller.go b/worker/uniter/pebblepoller.go index b9a5f9e6cfc..3388c7a70d6 100644 --- a/worker/uniter/pebblepoller.go +++ b/worker/uniter/pebblepoller.go @@ -25,7 +25,7 @@ type PebbleClient interface { } // NewPebbleClientFunc is the function type used to create a PebbleClient. -type NewPebbleClientFunc func(*client.Config) PebbleClient +type NewPebbleClientFunc func(*client.Config) (PebbleClient, error) type pebblePoller struct { logger Logger @@ -54,7 +54,7 @@ func NewPebblePoller(logger Logger, workloadEvents container.WorkloadEvents, newPebbleClient NewPebbleClientFunc) worker.Worker { if newPebbleClient == nil { - newPebbleClient = func(config *client.Config) PebbleClient { + newPebbleClient = func(config *client.Config) (PebbleClient, error) { return client.New(config) } } @@ -106,11 +106,14 @@ func (p *pebblePoller) poll(containerName string) error { config := &client.Config{ Socket: path.Join("/charm/containers", containerName, "pebble.socket"), } - pc := p.newPebbleClient(config) + pc, err := p.newPebbleClient(config) + if err != nil { + return errors.Annotate(err, "failed to create Pebble client") + } defer pc.CloseIdleConnections() info, err := pc.SysInfo() if err != nil { - return errors.Annotatef(err, "failed to get pebble info") + return errors.Annotate(err, "failed to get pebble info") } p.mut.Lock() diff --git a/worker/uniter/pebblepoller_test.go b/worker/uniter/pebblepoller_test.go index 421ce3cb400..3334397dd15 100644 --- a/worker/uniter/pebblepoller_test.go +++ b/worker/uniter/pebblepoller_test.go @@ -55,10 +55,10 @@ func (s *pebblePollerSuite) TestStart(c *gc.C) { err: errors.Errorf("not yet workin"), }, } - newClient := func(cfg *pebbleclient.Config) uniter.PebbleClient { + newClient := func(cfg *pebbleclient.Config) (uniter.PebbleClient, error) { c.Assert(cfg.Socket, gc.Matches, pebbleSocketPathRegexpString) res := pebbleSocketPathRegexp.FindAllStringSubmatch(cfg.Socket, 1) - return clients[res[0][1]] + return clients[res[0][1]], nil } clock := testclock.NewClock(time.Time{}) containerNames := []string{ diff --git a/worker/uniter/util_test.go b/worker/uniter/util_test.go index 133e0620ec8..744af9a6514 100644 --- a/worker/uniter/util_test.go +++ b/worker/uniter/util_test.go @@ -561,16 +561,16 @@ func (s startUniter) step(c *gc.C, ctx *testContext) { RebootQuerier: s.rebootQuerier, Logger: loggo.GetLogger("test"), ContainerNames: ctx.containerNames, - NewPebbleClient: func(cfg *pebbleclient.Config) uniter.PebbleClient { + NewPebbleClient: func(cfg *pebbleclient.Config) (uniter.PebbleClient, error) { res := pebbleSocketPathRegexp.FindAllStringSubmatch(cfg.Socket, 1) if res == nil { - return &fakePebbleClient{err: errors.NotFoundf("container not found")} + return nil, errors.NotFoundf("container") } client, ok := ctx.pebbleClients[res[0][1]] if !ok { - return &fakePebbleClient{err: errors.NotFoundf("container not found")} + return nil, errors.NotFoundf("container") } - return client + return client, nil }, SecretRotateWatcherFunc: func(u names.UnitTag, secretsChanged chan []string) (worker.Worker, error) { c.Assert(u.String(), gc.Equals, s.unitTag)