Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
17 changes: 17 additions & 0 deletions service/bootstrap_config.go
Original file line number Diff line number Diff line change
Expand Up @@ -69,3 +69,20 @@ func (bsCfg BootstrapConfig) CreateTLSConfig() (*tls.Config, error) {
Certificates: []tls.Certificate{cert},
}, nil
}

// PeersNeeded returns the minimum number of peers needed for the given config.
func (bsCfg BootstrapConfig) PeersNeeded() int {
minServers := 1
switch bsCfg.Mode {
case ServiceModeCluster:
minServers = 3
case ServiceModeSingle:
minServers = 1
case ServiceModeResilientSingle:
minServers = 2
}
if minServers < bsCfg.AgencySize {
minServers = bsCfg.AgencySize
}
return minServers
}
12 changes: 11 additions & 1 deletion service/bootstrap_master.go
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,17 @@ func (s *Service) bootstrapMaster(ctx context.Context, runner Runner, config Con
// Permanent loop:
s.log.Infof("Serving as master with ID '%s' on %s:%d...", s.id, config.OwnAddress, s.announcePort)

if s.mode.IsSingleMode() || s.myPeers.HaveEnoughAgents() {
// Can we start right away?
needMorePeers := true
if s.mode.IsSingleMode() {
needMorePeers = false
} else if !s.myPeers.HaveEnoughAgents() {
needMorePeers = true
} else if bsCfg.StartLocalSlaves {
peersNeeded := bsCfg.PeersNeeded()
needMorePeers = len(s.myPeers.AllPeers) < peersNeeded
}
if !needMorePeers {
// We have all the agents that we need, start a single server/cluster right now
s.saveSetup()
s.log.Info("Starting service...")
Expand Down
5 changes: 3 additions & 2 deletions service/local_slaves.go
Original file line number Diff line number Diff line change
Expand Up @@ -33,8 +33,9 @@ import (

// createAndStartLocalSlaves creates additional peers for local slaves and starts services for them.
func (s *Service) createAndStartLocalSlaves(wg *sync.WaitGroup, config Config, bsCfg BootstrapConfig) {
peers := make([]Peer, 0, bsCfg.AgencySize)
for index := 2; index <= bsCfg.AgencySize; index++ {
peersNeeded := bsCfg.PeersNeeded()
peers := make([]Peer, 0, peersNeeded)
for index := 2; index <= peersNeeded; index++ {
p := Peer{}
var err error
p.ID, err = createUniqueID()
Expand Down
59 changes: 59 additions & 0 deletions test/docker_cluster_local_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -86,6 +86,65 @@ func TestDockerClusterLocal(t *testing.T) {
ShutdownStarter(t, insecureStarterEndpoint(0))
}

// TestDockerClusterLocalAgencySize1 runs the arangodb starter in docker
// with `--starter.local` & `--cluster.agency-size=1`
func TestDockerClusterLocalAgencySize1(t *testing.T) {
needTestMode(t, testModeDocker)
needStarterMode(t, starterModeCluster)
if os.Getenv("IP") == "" {
t.Fatal("IP envvar must be set to IP address of this machine")
}
/*
docker volume create arangodb1
docker run -i --name=adb1 --rm -p 8528:8528 \
-v arangodb1:/data \
-v /var/run/docker.sock:/var/run/docker.sock \
arangodb/arangodb-starter \
--docker.container=adb1 \
--starter.address=$IP \
--starter.local \
--cluster.agency-size=1
*/
volID := createDockerID("vol-starter-test-local-cluster-as1-")
createDockerVolume(t, volID)
defer removeDockerVolume(t, volID)

// Cleanup of left over tests
removeDockerContainersByLabel(t, "starter-test=true")
removeStarterCreatedDockerContainers(t)

start := time.Now()

cID := createDockerID("starter-test-local-cluster-as1-")
dockerRun := Spawn(t, strings.Join([]string{
"docker run -i",
"--label starter-test=true",
"--name=" + cID,
"--rm",
fmt.Sprintf("-p %d:%d", basePort, basePort),
fmt.Sprintf("-v %s:/data", volID),
"-v /var/run/docker.sock:/var/run/docker.sock",
"arangodb/arangodb-starter",
"--docker.container=" + cID,
"--starter.address=$IP",
"--starter.local",
"--cluster.agency-size=1",
createEnvironmentStarterOptions(),
}, " "))
defer dockerRun.Close()
defer removeDockerContainer(t, cID)

if ok := WaitUntilStarterReady(t, whatCluster, dockerRun); ok {
t.Logf("Cluster start took %s", time.Since(start))
testCluster(t, insecureStarterEndpoint(0), false)
}

if isVerbose {
t.Log("Waiting for termination")
}
ShutdownStarter(t, insecureStarterEndpoint(0))
}

// TestOldDockerClusterLocal runs the arangodb starter in docker with `--local`
func TestOldDockerClusterLocal(t *testing.T) {
needTestMode(t, testModeDocker)
Expand Down