/
create.go
107 lines (90 loc) · 3.12 KB
/
create.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
/*
Copyright 2019 The OpenEBS Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha2
import (
apis "github.com/openebs/maya/pkg/apis/openebs.io/v1alpha1"
zfs "github.com/openebs/maya/pkg/zfs/cmd/v1alpha1"
"github.com/pkg/errors"
"k8s.io/klog"
)
// Create will create the pool for given csp object
func Create(csp *apis.CStorPoolInstance) error {
var err error
// Let's check if there is any disk having the pool config
// If so then we will not create the pool
ret, notImported, err := checkIfPoolNotImported(csp)
if err != nil {
return errors.Errorf("Failed to check not imported pool %s", err.Error())
}
if notImported {
return errors.Errorf("Pool {%s} is in faulty state.. %s", PoolName(csp), ret)
}
klog.Infof("Creating a pool for %s %s", csp.Name, PoolName(csp))
// First create a pool
// TODO, IsWriteCache, IsSpare, IsReadCache should be disable for actual pool?
// Lets say we need to execute following command
// -- zpool create newpool mirror v0 v1 mirror v2 v3 log mirror v4 v5
// Above command we will execute using following steps:
// 1. zpool create newpool mirror v0 v1
// 2. zpool add newpool log mirror v4 v5
// 3. zpool add newpool mirror v2 v3
spec := csp.Spec.DeepCopy()
raidGroups := spec.RaidGroups
for i, r := range raidGroups {
if !r.IsReadCache && !r.IsSpare && !r.IsWriteCache {
// we found the main raidgroup. let's create the pool
err = createPool(csp, r)
if err != nil {
return errors.Errorf("Failed to create pool {%s} : %s",
PoolName(csp), err.Error())
}
// Remove this raidGroup
raidGroups = append(raidGroups[:i], raidGroups[i+1:]...)
break
}
}
// We created the pool
// Lets update it with extra config, if provided
for _, r := range raidGroups {
if e := addRaidGroup(csp, r); e != nil {
err = ErrorWrapf(err, "Failed to add raidGroup{%#v}.. %s", r, e.Error())
}
}
return err
}
func createPool(csp *apis.CStorPoolInstance, r apis.RaidGroup) error {
var vdevlist []string
ptype := r.Type
if len(ptype) == 0 {
// type is not mentioned in raidGroup,
// We will use default raidGroupType from poolConfig
ptype = csp.Spec.PoolConfig.DefaultRaidGroupType
}
disklist, err := getPathForBdevList(r.BlockDevices)
if err != nil {
return errors.Errorf("Failed to get list of disk-path : %s", err.Error())
}
for _, v := range disklist {
vdevlist = append(vdevlist, v[0])
}
ret, err := zfs.NewPoolCreate().
WithType(ptype).
WithProperty("cachefile", csp.Spec.PoolConfig.CacheFile).
WithPool(PoolName(csp)).
WithVdevList(vdevlist).
Execute()
if err != nil {
return errors.Errorf("Failed to create pool.. %s .. %s", string(ret), err.Error())
}
return nil
}