Skip to content
This repository has been archived by the owner on Jul 6, 2023. It is now read-only.

Commit

Permalink
Merge pull request #205 from lpabon/cliadd_pr
Browse files Browse the repository at this point in the history
Heketi CLI can now load JSON configuration
  • Loading branch information
Luis Pabón committed Sep 28, 2015
2 parents ae8d990 + 42239a8 commit 3f4a5b1
Show file tree
Hide file tree
Showing 14 changed files with 285 additions and 26 deletions.
12 changes: 10 additions & 2 deletions apps/glusterfs/device_entry.go
Expand Up @@ -210,17 +210,25 @@ func (d *DeviceEntry) SetExtentSize(amount uint64) {
// id to the list.
func (d *DeviceEntry) NewBrickEntry(amount uint64, snapFactor float64) *BrickEntry {

// :TODO: This needs unit test

// Calculate thinpool size
tpsize := uint64(float64(amount) * snapFactor)

// Align tpsize to extent
tpsize += tpsize % d.ExtentSize
alignment := tpsize % d.ExtentSize
if alignment != 0 {
tpsize += d.ExtentSize - alignment
}

// Determine if we need to allocate space for the metadata
metadataSize := d.poolMetadataSize(tpsize)

// Align to extent
metadataSize += metadataSize % d.ExtentSize
alignment = metadataSize % d.ExtentSize
if alignment != 0 {
metadataSize += d.ExtentSize - alignment
}

// Total required size
total := tpsize + metadataSize
Expand Down
7 changes: 4 additions & 3 deletions apps/glusterfs/device_entry_test.go
Expand Up @@ -119,16 +119,17 @@ func TestDeviceEntryNewBrickEntry(t *testing.T) {
// --- Now check with a real value ---

// Check newly created brick
tpsize := uint64(200 * 1.5)
size := 201
tpsize := uint64(float32(size) * 1.5)

// Alignment
tpsize += tpsize % 8
tpsize += d.ExtentSize - (tpsize % d.ExtentSize)

// Calculate metadatasize
metadatasize := d.poolMetadataSize(tpsize)

// Alignment
metadatasize += metadatasize % 8
metadatasize += d.ExtentSize - (metadatasize % d.ExtentSize)
total := tpsize + metadatasize

brick = d.NewBrickEntry(200, 1.5)
Expand Down
2 changes: 1 addition & 1 deletion apps/glusterfs/models.go
Expand Up @@ -172,7 +172,7 @@ func (v *VolumeInfoResponse) String() string {
"Id: %v\n"+
"Cluster Id: %v\n"+
"Mount: %v\n"+
"Mount Options: %v\n"+
"Mount Options: backupvolfile-servers=%v\n"+
"Durability Type: %v\n",
v.Name,
v.Size,
Expand Down
7 changes: 4 additions & 3 deletions apps/glusterfs/volume_entry.go
Expand Up @@ -66,6 +66,10 @@ func NewVolumeEntry() *VolumeEntry {
entry := &VolumeEntry{}
entry.Bricks = make(sort.StringSlice, 0)

gob.Register(&ReplicaDurability{})
gob.Register(&DisperseDurability{})
gob.Register(&NoneDurability{})

return entry
}

Expand All @@ -87,20 +91,17 @@ func NewVolumeEntryFromRequest(req *VolumeCreateRequest) *VolumeEntry {
vol.Info.Id,
vol.Info.Durability.Replicate.Replica)
vol.Durability = &vol.Info.Durability.Replicate
gob.Register(&ReplicaDurability{})

case durability == DURABILITY_STRING_EC:
logger.Debug("[%v] EC %v + %v ",
vol.Info.Id,
vol.Info.Durability.Disperse.Data,
vol.Info.Durability.Disperse.Redundancy)
vol.Durability = &vol.Info.Durability.Disperse
gob.Register(&DisperseDurability{})

case durability == DURABILITY_STRING_DISTRIBUTE_ONLY || durability == "":
logger.Debug("[%v] Distributed", vol.Info.Id, vol.Info.Durability.Replicate.Replica)
vol.Durability = NewNoneDurability()
gob.Register(&NoneDurability{})

default:
panic(fmt.Sprintf("BUG: Unknown type: %v\n", vol.Info.Durability))
Expand Down
2 changes: 1 addition & 1 deletion client/cli/go/commands/cluster_create.go
Expand Up @@ -84,7 +84,7 @@ func (c *ClusterCreateCommand) Exec(args []string) error {
}
fmt.Fprintf(stdout, string(data))
} else {
fmt.Fprintf(stdout, "Cluster id: %v", cluster.Id)
fmt.Fprintf(stdout, "Cluster id: %v\n", cluster.Id)
}

return nil
Expand Down
2 changes: 1 addition & 1 deletion client/cli/go/commands/cluster_list.go
Expand Up @@ -79,7 +79,7 @@ func (c *ClusterListCommand) Exec(args []string) error {
fmt.Fprintf(stdout, string(data))
} else {
output := strings.Join(list.Clusters, "\n")
fmt.Fprintf(stdout, "Clusters:\n%v", output)
fmt.Fprintf(stdout, "Clusters:\n%v\n", output)
}

return nil
Expand Down
4 changes: 2 additions & 2 deletions client/cli/go/commands/device_add.go
Expand Up @@ -46,15 +46,15 @@ func NewDeviceAddCommand(options *Options) *DeviceAddCommand {
Add new device to node to be managed by Heketi
USAGE
heketi device add [options]
heketi-cli device add [options]
OPTIONS`)

//print flags
cmd.flags.PrintDefaults()
fmt.Println(`
EXAMPLES
$ heketi device add \
$ heketi-cli device add \
-name=/dev/sdb
-node=3e098cb4407d7109806bb196d9e8f095
`)
Expand Down
147 changes: 147 additions & 0 deletions client/cli/go/commands/load.go
@@ -0,0 +1,147 @@
//
// Copyright (c) 2015 The heketi Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//

package commands

import (
"encoding/json"
"errors"
"flag"
"fmt"
"github.com/heketi/heketi/apps/glusterfs"
client "github.com/heketi/heketi/client/api/go-client"
"github.com/lpabon/godbc"
"os"
)

type LoadCommand struct {
Cmd
jsonConfigFile string
}

// Config file
type ConfigFileNode struct {
Devices []string `json:"devices"`
Node glusterfs.NodeAddRequest `json:"node"`
}
type ConfigFileCluster struct {
Nodes []ConfigFileNode `json:"nodes"`
}
type ConfigFile struct {
Clusters []ConfigFileCluster `json:"clusters"`
}

func NewLoadCommand(options *Options) *LoadCommand {

//require before we do any work
godbc.Require(options != nil)

//create ClusterCommand object
cmd := &LoadCommand{}
cmd.name = "load"
cmd.options = options

//create flags
cmd.flags = flag.NewFlagSet(cmd.name, flag.ExitOnError)
cmd.flags.StringVar(&cmd.jsonConfigFile, "json", "",
"\n\tConfiguration containing devices, nodes, and clusters, in"+
"\n\tJSON format.")

//usage on -help
cmd.flags.Usage = func() {
fmt.Println(`
Add devices to Heketi from a configuration file
USAGE
heketi-cli load [options]
OPTIONS`)

//print flags
cmd.flags.PrintDefaults()
fmt.Println(`
EXAMPLES
$ heketi-cli load -json=topology.json
`)
}

godbc.Ensure(cmd.name == "load")

return cmd
}

func (l *LoadCommand) Exec(args []string) error {

// Parse args
l.flags.Parse(args)

// Check arguments
if l.jsonConfigFile == "" {
return errors.New("Missing configuration file")
}

// Load config file
fp, err := os.Open(l.jsonConfigFile)
if err != nil {
return errors.New("Unable to open config file")
}
defer fp.Close()

configParser := json.NewDecoder(fp)
var topology ConfigFile
if err = configParser.Decode(&topology); err != nil {
return errors.New("Unable to parse config file")
}

heketi := client.NewClient(l.options.Url, l.options.User, l.options.Key)
for _, cluster := range topology.Clusters {

fmt.Fprintf(stdout, "Creating cluster ... ")
clusterInfo, err := heketi.ClusterCreate()
if err != nil {
return err
}
fmt.Fprintf(stdout, "ID: %v\n", clusterInfo.Id)
for _, node := range cluster.Nodes {

fmt.Fprintf(stdout, "\tCreating node %v ... ", node.Node.Hostnames.Manage[0])
node.Node.ClusterId = clusterInfo.Id
nodeInfo, err := heketi.NodeAdd(&node.Node)
if err != nil {
return err
}
fmt.Fprintf(stdout, "ID: %v\n", nodeInfo.Id)

for _, device := range node.Devices {
fmt.Fprintf(stdout, "\t\tAdding device %v ... ", device)

req := &glusterfs.DeviceAddRequest{}
req.Name = device
req.NodeId = nodeInfo.Id
req.Weight = 100
err := heketi.DeviceAdd(req)
if err != nil {
return nil
}

fmt.Fprintf(stdout, "OK\n")
}
}
}

return nil

}
4 changes: 2 additions & 2 deletions client/cli/go/commands/node_add.go
Expand Up @@ -53,15 +53,15 @@ func NewNodeAddCommand(options *Options) *NodeAddCommand {
Add new node to be managed by Heketi
USAGE
heketi node add [options]
heketi-cli node add [options]
OPTIONS`)

//print flags
cmd.flags.PrintDefaults()
fmt.Println(`
EXAMPLES
$ heketi node add \
$ heketi-cli node add \
-zone=3 \
-cluster=3e098cb4407d7109806bb196d9e8f095 \
-managment-host-name=node1-manage.gluster.lab.com \
Expand Down
100 changes: 100 additions & 0 deletions client/cli/go/commands/tests/sample.json
@@ -0,0 +1,100 @@
{
"clusters": [
{
"nodes": [
{
"node": {
"hostnames": {
"manage": [
"192.168.10.100"
],
"storage": [
"192.168.10.100"
]
},
"zone": 0
},
"devices": [
"/dev/sdb",
"/dev/sdc",
"/dev/sdd",
"/dev/sde",
"/dev/sdf",
"/dev/sdg",
"/dev/sdh",
"/dev/sdi"
]
},
{
"node": {
"hostnames": {
"manage": [
"192.168.10.101"
],
"storage": [
"192.168.10.101"
]
},
"zone": 1
},
"devices": [
"/dev/sdb",
"/dev/sdc",
"/dev/sdd",
"/dev/sde",
"/dev/sdf",
"/dev/sdg",
"/dev/sdh",
"/dev/sdi"
]
},
{
"node": {
"hostnames": {
"manage": [
"192.168.10.102"
],
"storage": [
"192.168.10.102"
]
},
"zone": 0
},
"devices": [
"/dev/sdb",
"/dev/sdc",
"/dev/sdd",
"/dev/sde",
"/dev/sdf",
"/dev/sdg",
"/dev/sdh",
"/dev/sdi"
]
},
{
"node": {
"hostnames": {
"manage": [
"192.168.10.103"
],
"storage": [
"192.168.10.103"
]
},
"zone": 1
},
"devices": [
"/dev/sdb",
"/dev/sdc",
"/dev/sdd",
"/dev/sde",
"/dev/sdf",
"/dev/sdg",
"/dev/sdh",
"/dev/sdi"
]
}
]
}
]
}

0 comments on commit 3f4a5b1

Please sign in to comment.