diff --git a/apps/glusterfs/device_entry.go b/apps/glusterfs/device_entry.go index 624f8291a4..48e87b7d58 100644 --- a/apps/glusterfs/device_entry.go +++ b/apps/glusterfs/device_entry.go @@ -210,17 +210,25 @@ func (d *DeviceEntry) SetExtentSize(amount uint64) { // id to the list. func (d *DeviceEntry) NewBrickEntry(amount uint64, snapFactor float64) *BrickEntry { + // :TODO: This needs unit test + // Calculate thinpool size tpsize := uint64(float64(amount) * snapFactor) // Align tpsize to extent - tpsize += tpsize % d.ExtentSize + alignment := tpsize % d.ExtentSize + if alignment != 0 { + tpsize += d.ExtentSize - alignment + } // Determine if we need to allocate space for the metadata metadataSize := d.poolMetadataSize(tpsize) // Align to extent - metadataSize += metadataSize % d.ExtentSize + alignment = metadataSize % d.ExtentSize + if alignment != 0 { + metadataSize += d.ExtentSize - alignment + } // Total required size total := tpsize + metadataSize diff --git a/apps/glusterfs/device_entry_test.go b/apps/glusterfs/device_entry_test.go index 30bb52ac8d..9377ab1f40 100644 --- a/apps/glusterfs/device_entry_test.go +++ b/apps/glusterfs/device_entry_test.go @@ -119,16 +119,17 @@ func TestDeviceEntryNewBrickEntry(t *testing.T) { // --- Now check with a real value --- // Check newly created brick - tpsize := uint64(200 * 1.5) + size := 201 + tpsize := uint64(float32(size) * 1.5) // Alignment - tpsize += tpsize % 8 + tpsize += d.ExtentSize - (tpsize % d.ExtentSize) // Calculate metadatasize metadatasize := d.poolMetadataSize(tpsize) // Alignment - metadatasize += metadatasize % 8 + metadatasize += d.ExtentSize - (metadatasize % d.ExtentSize) total := tpsize + metadatasize brick = d.NewBrickEntry(200, 1.5) diff --git a/apps/glusterfs/models.go b/apps/glusterfs/models.go index 3ecc0cbcfc..0a520802d7 100644 --- a/apps/glusterfs/models.go +++ b/apps/glusterfs/models.go @@ -172,7 +172,7 @@ func (v *VolumeInfoResponse) String() string { "Id: %v\n"+ "Cluster Id: %v\n"+ "Mount: %v\n"+ - "Mount Options: %v\n"+ + "Mount Options: backupvolfile-servers=%v\n"+ "Durability Type: %v\n", v.Name, v.Size, diff --git a/apps/glusterfs/volume_entry.go b/apps/glusterfs/volume_entry.go index f8aa6c76e3..74c8a08e2f 100644 --- a/apps/glusterfs/volume_entry.go +++ b/apps/glusterfs/volume_entry.go @@ -66,6 +66,10 @@ func NewVolumeEntry() *VolumeEntry { entry := &VolumeEntry{} entry.Bricks = make(sort.StringSlice, 0) + gob.Register(&ReplicaDurability{}) + gob.Register(&DisperseDurability{}) + gob.Register(&NoneDurability{}) + return entry } @@ -87,7 +91,6 @@ func NewVolumeEntryFromRequest(req *VolumeCreateRequest) *VolumeEntry { vol.Info.Id, vol.Info.Durability.Replicate.Replica) vol.Durability = &vol.Info.Durability.Replicate - gob.Register(&ReplicaDurability{}) case durability == DURABILITY_STRING_EC: logger.Debug("[%v] EC %v + %v ", @@ -95,12 +98,10 @@ func NewVolumeEntryFromRequest(req *VolumeCreateRequest) *VolumeEntry { vol.Info.Durability.Disperse.Data, vol.Info.Durability.Disperse.Redundancy) vol.Durability = &vol.Info.Durability.Disperse - gob.Register(&DisperseDurability{}) case durability == DURABILITY_STRING_DISTRIBUTE_ONLY || durability == "": logger.Debug("[%v] Distributed", vol.Info.Id, vol.Info.Durability.Replicate.Replica) vol.Durability = NewNoneDurability() - gob.Register(&NoneDurability{}) default: panic(fmt.Sprintf("BUG: Unknown type: %v\n", vol.Info.Durability)) diff --git a/client/cli/go/commands/cluster_create.go b/client/cli/go/commands/cluster_create.go index b1577a5611..e625ff0a00 100644 --- a/client/cli/go/commands/cluster_create.go +++ b/client/cli/go/commands/cluster_create.go @@ -84,7 +84,7 @@ func (c *ClusterCreateCommand) Exec(args []string) error { } fmt.Fprintf(stdout, string(data)) } else { - fmt.Fprintf(stdout, "Cluster id: %v", cluster.Id) + fmt.Fprintf(stdout, "Cluster id: %v\n", cluster.Id) } return nil diff --git a/client/cli/go/commands/cluster_list.go b/client/cli/go/commands/cluster_list.go index 9a9d1174c1..a9395b0ea5 100644 --- a/client/cli/go/commands/cluster_list.go +++ b/client/cli/go/commands/cluster_list.go @@ -79,7 +79,7 @@ func (c *ClusterListCommand) Exec(args []string) error { fmt.Fprintf(stdout, string(data)) } else { output := strings.Join(list.Clusters, "\n") - fmt.Fprintf(stdout, "Clusters:\n%v", output) + fmt.Fprintf(stdout, "Clusters:\n%v\n", output) } return nil diff --git a/client/cli/go/commands/device_add.go b/client/cli/go/commands/device_add.go index 7f877096b7..01c459937e 100644 --- a/client/cli/go/commands/device_add.go +++ b/client/cli/go/commands/device_add.go @@ -46,7 +46,7 @@ func NewDeviceAddCommand(options *Options) *DeviceAddCommand { Add new device to node to be managed by Heketi USAGE - heketi device add [options] + heketi-cli device add [options] OPTIONS`) @@ -54,7 +54,7 @@ OPTIONS`) cmd.flags.PrintDefaults() fmt.Println(` EXAMPLES - $ heketi device add \ + $ heketi-cli device add \ -name=/dev/sdb -node=3e098cb4407d7109806bb196d9e8f095 `) diff --git a/client/cli/go/commands/load.go b/client/cli/go/commands/load.go new file mode 100644 index 0000000000..1678f38387 --- /dev/null +++ b/client/cli/go/commands/load.go @@ -0,0 +1,147 @@ +// +// Copyright (c) 2015 The heketi Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package commands + +import ( + "encoding/json" + "errors" + "flag" + "fmt" + "github.com/heketi/heketi/apps/glusterfs" + client "github.com/heketi/heketi/client/api/go-client" + "github.com/lpabon/godbc" + "os" +) + +type LoadCommand struct { + Cmd + jsonConfigFile string +} + +// Config file +type ConfigFileNode struct { + Devices []string `json:"devices"` + Node glusterfs.NodeAddRequest `json:"node"` +} +type ConfigFileCluster struct { + Nodes []ConfigFileNode `json:"nodes"` +} +type ConfigFile struct { + Clusters []ConfigFileCluster `json:"clusters"` +} + +func NewLoadCommand(options *Options) *LoadCommand { + + //require before we do any work + godbc.Require(options != nil) + + //create ClusterCommand object + cmd := &LoadCommand{} + cmd.name = "load" + cmd.options = options + + //create flags + cmd.flags = flag.NewFlagSet(cmd.name, flag.ExitOnError) + cmd.flags.StringVar(&cmd.jsonConfigFile, "json", "", + "\n\tConfiguration containing devices, nodes, and clusters, in"+ + "\n\tJSON format.") + + //usage on -help + cmd.flags.Usage = func() { + fmt.Println(` +Add devices to Heketi from a configuration file + +USAGE + heketi-cli load [options] + +OPTIONS`) + + //print flags + cmd.flags.PrintDefaults() + fmt.Println(` +EXAMPLES + $ heketi-cli load -json=topology.json +`) + } + + godbc.Ensure(cmd.name == "load") + + return cmd +} + +func (l *LoadCommand) Exec(args []string) error { + + // Parse args + l.flags.Parse(args) + + // Check arguments + if l.jsonConfigFile == "" { + return errors.New("Missing configuration file") + } + + // Load config file + fp, err := os.Open(l.jsonConfigFile) + if err != nil { + return errors.New("Unable to open config file") + } + defer fp.Close() + + configParser := json.NewDecoder(fp) + var topology ConfigFile + if err = configParser.Decode(&topology); err != nil { + return errors.New("Unable to parse config file") + } + + heketi := client.NewClient(l.options.Url, l.options.User, l.options.Key) + for _, cluster := range topology.Clusters { + + fmt.Fprintf(stdout, "Creating cluster ... ") + clusterInfo, err := heketi.ClusterCreate() + if err != nil { + return err + } + fmt.Fprintf(stdout, "ID: %v\n", clusterInfo.Id) + for _, node := range cluster.Nodes { + + fmt.Fprintf(stdout, "\tCreating node %v ... ", node.Node.Hostnames.Manage[0]) + node.Node.ClusterId = clusterInfo.Id + nodeInfo, err := heketi.NodeAdd(&node.Node) + if err != nil { + return err + } + fmt.Fprintf(stdout, "ID: %v\n", nodeInfo.Id) + + for _, device := range node.Devices { + fmt.Fprintf(stdout, "\t\tAdding device %v ... ", device) + + req := &glusterfs.DeviceAddRequest{} + req.Name = device + req.NodeId = nodeInfo.Id + req.Weight = 100 + err := heketi.DeviceAdd(req) + if err != nil { + return nil + } + + fmt.Fprintf(stdout, "OK\n") + } + } + } + + return nil + +} diff --git a/client/cli/go/commands/node_add.go b/client/cli/go/commands/node_add.go index 4482c90c22..ed100ee71a 100644 --- a/client/cli/go/commands/node_add.go +++ b/client/cli/go/commands/node_add.go @@ -53,7 +53,7 @@ func NewNodeAddCommand(options *Options) *NodeAddCommand { Add new node to be managed by Heketi USAGE - heketi node add [options] + heketi-cli node add [options] OPTIONS`) @@ -61,7 +61,7 @@ OPTIONS`) cmd.flags.PrintDefaults() fmt.Println(` EXAMPLES - $ heketi node add \ + $ heketi-cli node add \ -zone=3 \ -cluster=3e098cb4407d7109806bb196d9e8f095 \ -managment-host-name=node1-manage.gluster.lab.com \ diff --git a/client/cli/go/commands/tests/sample.json b/client/cli/go/commands/tests/sample.json new file mode 100644 index 0000000000..cec5896f42 --- /dev/null +++ b/client/cli/go/commands/tests/sample.json @@ -0,0 +1,100 @@ +{ + "clusters": [ + { + "nodes": [ + { + "node": { + "hostnames": { + "manage": [ + "192.168.10.100" + ], + "storage": [ + "192.168.10.100" + ] + }, + "zone": 0 + }, + "devices": [ + "/dev/sdb", + "/dev/sdc", + "/dev/sdd", + "/dev/sde", + "/dev/sdf", + "/dev/sdg", + "/dev/sdh", + "/dev/sdi" + ] + }, + { + "node": { + "hostnames": { + "manage": [ + "192.168.10.101" + ], + "storage": [ + "192.168.10.101" + ] + }, + "zone": 1 + }, + "devices": [ + "/dev/sdb", + "/dev/sdc", + "/dev/sdd", + "/dev/sde", + "/dev/sdf", + "/dev/sdg", + "/dev/sdh", + "/dev/sdi" + ] + }, + { + "node": { + "hostnames": { + "manage": [ + "192.168.10.102" + ], + "storage": [ + "192.168.10.102" + ] + }, + "zone": 0 + }, + "devices": [ + "/dev/sdb", + "/dev/sdc", + "/dev/sdd", + "/dev/sde", + "/dev/sdf", + "/dev/sdg", + "/dev/sdh", + "/dev/sdi" + ] + }, + { + "node": { + "hostnames": { + "manage": [ + "192.168.10.103" + ], + "storage": [ + "192.168.10.103" + ] + }, + "zone": 1 + }, + "devices": [ + "/dev/sdb", + "/dev/sdc", + "/dev/sdd", + "/dev/sde", + "/dev/sdf", + "/dev/sdg", + "/dev/sdh", + "/dev/sdi" + ] + } + ] + } + ] +} \ No newline at end of file diff --git a/client/cli/go/commands/volume_create.go b/client/cli/go/commands/volume_create.go index 81dda4b245..c4cc1d9d4d 100644 --- a/client/cli/go/commands/volume_create.go +++ b/client/cli/go/commands/volume_create.go @@ -83,7 +83,7 @@ func NewVolumeCreateCommand(options *Options) *VolumeCreateCommand { Create a GlusterFS volume USAGE - heketi volume create [options] + heketi-cli volume create [options] OPTIONS`) @@ -92,23 +92,23 @@ OPTIONS`) fmt.Println(` EXAMPLES * Create a 100GB replica 3 volume: - $ heketi volume create -size=100 + $ heketi-cli volume create -size=100 * Create a 100GB replica 3 volume specifying two specific clusters: - $ heketi volume create -size=100 \ + $ heketi-cli volume create -size=100 \ -clusters=0995098e1284ddccb46c7752d142c832,60d46d518074b13a04ce1022c8c7193c * Create a 100GB replica 2 volume with 50GB of snapshot storage: - $ heketi volume create -size=100 -snapshot-factor=1.5 -replica=2 + $ heketi-cli volume create -size=100 -snapshot-factor=1.5 -replica=2 * Create a 100GB distributed volume - $ heketi volume create -size=100 -durabilty=none + $ heketi-cli volume create -size=100 -durabilty=none * Create a 100GB erasure coded 4+2 volume with 25GB snapshot storage: - $ heketi volume create -size=100 -durability=disperse -snapshot-factor=1.25 + $ heketi-cli volume create -size=100 -durability=disperse -snapshot-factor=1.25 * Create a 100GB erasure coded 8+3 volume with 25GB snapshot storage: - $ heketi volume create -size=100 -durability=disperse -snapshot-factor=1.25 \ + $ heketi-cli volume create -size=100 -durability=disperse -snapshot-factor=1.25 \ -disperse-data=8 -redundancy=3 `) } diff --git a/client/cli/go/commands/volume_expand.go b/client/cli/go/commands/volume_expand.go index efa0a44c3b..0446738d12 100644 --- a/client/cli/go/commands/volume_expand.go +++ b/client/cli/go/commands/volume_expand.go @@ -51,7 +51,7 @@ func NewVolumeExpandCommand(options *Options) *VolumeExpandCommand { Expand a volume USAGE - heketi volume expand [options] + heketi-cli volume expand [options] OPTIONS`) @@ -60,7 +60,7 @@ OPTIONS`) fmt.Println(` EXAMPLES * Add 10GB to a volume - $ heketi volume expand -volume=60d46d518074b13a04ce1022c8c7193c -expand-size=10 + $ heketi-cli volume expand -volume=60d46d518074b13a04ce1022c8c7193c -expand-size=10 `) } godbc.Ensure(cmd.name == "expand") diff --git a/client/cli/go/commands/volume_list.go b/client/cli/go/commands/volume_list.go index 11da6c21f0..13e98197b7 100644 --- a/client/cli/go/commands/volume_list.go +++ b/client/cli/go/commands/volume_list.go @@ -78,7 +78,7 @@ func (c *VolumeListCommand) Exec(args []string) error { fmt.Fprintf(stdout, string(data)) } else { output := strings.Join(list.Volumes, "\n") - fmt.Fprintf(stdout, "Volumes:\n%v", output) + fmt.Fprintf(stdout, "Volumes:\n%v\n", output) } return nil diff --git a/client/cli/go/main.go b/client/cli/go/main.go index 8eae4d09b4..4821423fbf 100644 --- a/client/cli/go/main.go +++ b/client/cli/go/main.go @@ -59,6 +59,7 @@ OPTIONS`) flag.PrintDefaults() fmt.Println(` COMMANDS + load Load topology configuration file cluster Manage a cluster (a set of storage nodes) node Manage a storage node device Manage devices in a node @@ -89,7 +90,7 @@ func main() { if options.Url == "" { options.Url = os.Getenv("HEKETI_CLI_SERVER") if options.Url == "" { - fmt.Fprintf(stderr, "Server must be provided") + fmt.Fprintf(stderr, "Server must be provided\n") os.Exit(3) } } @@ -110,6 +111,7 @@ func main() { commands.NewNodeCommand(&options), commands.NewDeviceCommand(&options), commands.NewVolumeCommand(&options), + commands.NewLoadCommand(&options), } // Find command