Skip to content

Commit

Permalink
Merge pull request #466 from sapcc/manila-capacity-for-snapmirror
Browse files Browse the repository at this point in the history
capacity-manila: distribute to snapmirror_capacity, too
  • Loading branch information
majewsky committed May 22, 2024
2 parents 8cbaca1 + a54e748 commit a9da7af
Show file tree
Hide file tree
Showing 2 changed files with 62 additions and 26 deletions.
10 changes: 6 additions & 4 deletions docs/operators/config.md
Original file line number Diff line number Diff line change
Expand Up @@ -656,6 +656,7 @@ capacitors:
shares_per_pool: 1000
snapshots_per_share: 5
capacity_balance: 0.5
with_snapmirror: false
with_subcapacities: true
```

Expand All @@ -664,7 +665,7 @@ capacitors:
| `sharev2/share_networks` | Taken from identically-named configuration parameter. |
| `sharev2/shares` | Calculated as `shares_per_pool * count(pools) - share_networks`. |
| `sharev2/share_snapshots` | Calculated as `snapshots_per_share` times the above value. |
| `sharev2/share_capacity`<br>`sharev2/snapshot_capacity` | Calculated as `sum(pool.capabilities.totalCapacityGB)`, then divided among those two resources according to the `capacity_balance` (see below). |
| `sharev2/share_capacity`<br>`sharev2/snapshot_capacity`<br>`sharev2/snapmirror_capacity` | Calculated as `sum(pool.capabilities.totalCapacityGB)`, then divided among those resources according to demand and `capacity_balance` (see below). The `snapmirror_capacity` is only reported if `with_snapmirror` is set (set this iff the respective quota plugin emits this resource). |

The last four of these five resources consider only pools with the share type
that appears first in `params.share_types` (to match the behavior of the quota
Expand All @@ -691,9 +692,8 @@ marked as live. Ignored pools will still show up in the subcapacities, but their

#### Capacity balance

When pool capacity is split between the `share_capacity` and `snapshot_capacity` resources, Limes will first allocate
capacity to both resources according to the global resource demand (i.e. the usage, unused commitments, and pending
commitments, in that order, across all projects).
When pool capacity is split between the various capacity resources, Limes will first allocate capacity according to the
global resource demand (i.e. the usage, unused commitments, and pending commitments, in that order, across all projects).

At that point, if there is unallocated capacity left over, it is distributed according to the `capacity_balance`
parameter, such that
Expand All @@ -706,6 +706,8 @@ that is, there is `capacity_balance` as much extra snapshot capacity as there is
`capacity_balance = 0.5` means that the capacity for snapshots is half as big as that for shares, meaning that shares
get 2/3 of the total capacity and snapshots get the other 1/3.

The `snapmirror_capacity` resource will never get extra capacity this way. It will only get capacity to cover demand.

### `manual`

```yaml
Expand Down
78 changes: 56 additions & 22 deletions internal/plugins/capacity_manila.go
Original file line number Diff line number Diff line change
Expand Up @@ -46,6 +46,7 @@ type capacityManilaPlugin struct {
SharesPerPool uint64 `yaml:"shares_per_pool"`
SnapshotsPerShare uint64 `yaml:"snapshots_per_share"`
CapacityBalance float64 `yaml:"capacity_balance"`
WithSnapmirror bool `yaml:"with_snapmirror"`
WithSubcapacities bool `yaml:"with_subcapacities"`
// connections
ManilaV2 *gophercloud.ServiceClient `yaml:"-"`
Expand Down Expand Up @@ -154,14 +155,34 @@ func (p *capacityManilaPlugin) Scrape(backchannel core.CapacityPluginBackchannel
snapshotCapacityDemand[az] = snapshotOvercommitFactor.ApplyInReverseToDemand(demand)
}

capForType, err := p.scrapeForShareType(shareType, azForServiceHost, allAZs, shareCapacityDemand, snapshotCapacityDemand)
var snapmirrorCapacityDemand map[limes.AvailabilityZone]core.ResourceDemand
if p.WithSnapmirror {
snapmirrorCapacityDemand, err = backchannel.GetGlobalResourceDemand("sharev2", p.makeResourceName("snapmirror_capacity", shareType))
if err != nil {
return nil, nil, err
}
snapmirrorOvercommitFactor, err := backchannel.GetOvercommitFactor("sharev2", p.makeResourceName("snapmirror_capacity", shareType))
if err != nil {
return nil, nil, err
}
for az, demand := range snapmirrorCapacityDemand {
snapmirrorCapacityDemand[az] = snapmirrorOvercommitFactor.ApplyInReverseToDemand(demand)
}
}
// ^ NOTE: If p.WithSnapmirror is false, `snapmirrorCapacityDemand[az]` is always zero-valued
// and thus no capacity will be allocated to the snapmirror_capacity resource.

capForType, err := p.scrapeForShareType(shareType, azForServiceHost, allAZs, shareCapacityDemand, snapshotCapacityDemand, snapmirrorCapacityDemand)
if err != nil {
return nil, nil, err
}
caps[p.makeResourceName("shares", shareType)] = capForType.Shares
caps[p.makeResourceName("share_snapshots", shareType)] = capForType.Snapshots
caps[p.makeResourceName("share_capacity", shareType)] = capForType.ShareGigabytes
caps[p.makeResourceName("snapshot_capacity", shareType)] = capForType.SnapshotGigabytes
if p.WithSnapmirror {
caps[p.makeResourceName("snapmirror_capacity", shareType)] = capForType.SnapmirrorGigabytes
}
}
return map[limes.ServiceType]map[limesresources.ResourceName]core.PerAZ[core.CapacityData]{"sharev2": caps}, nil, nil
}
Expand All @@ -178,17 +199,19 @@ func (p *capacityManilaPlugin) CollectMetrics(ch chan<- prometheus.Metric, seria
}

type capacityForShareType struct {
Shares core.PerAZ[core.CapacityData]
Snapshots core.PerAZ[core.CapacityData]
ShareGigabytes core.PerAZ[core.CapacityData]
SnapshotGigabytes core.PerAZ[core.CapacityData]
Shares core.PerAZ[core.CapacityData]
Snapshots core.PerAZ[core.CapacityData]
ShareGigabytes core.PerAZ[core.CapacityData]
SnapshotGigabytes core.PerAZ[core.CapacityData]
SnapmirrorGigabytes core.PerAZ[core.CapacityData]
}

type azCapacityForShareType struct {
Shares core.CapacityData
Snapshots core.CapacityData
ShareGigabytes core.CapacityData
SnapshotGigabytes core.CapacityData
Shares core.CapacityData
Snapshots core.CapacityData
ShareGigabytes core.CapacityData
SnapshotGigabytes core.CapacityData
SnapmirrorGigabytes core.CapacityData
}

type poolsListDetailOpts struct {
Expand All @@ -203,7 +226,7 @@ func (opts poolsListDetailOpts) ToPoolsListQuery() (string, error) {
return q.String(), err
}

func (p *capacityManilaPlugin) scrapeForShareType(shareType ManilaShareTypeSpec, azForServiceHost map[string]limes.AvailabilityZone, allAZs []limes.AvailabilityZone, shareCapacityDemand, snapshotCapacityDemand map[limes.AvailabilityZone]core.ResourceDemand) (capacityForShareType, error) {
func (p *capacityManilaPlugin) scrapeForShareType(shareType ManilaShareTypeSpec, azForServiceHost map[string]limes.AvailabilityZone, allAZs []limes.AvailabilityZone, shareCapacityDemand, snapshotCapacityDemand, snapmirrorCapacityDemand map[limes.AvailabilityZone]core.ResourceDemand) (capacityForShareType, error) {
// list all pools for the Manila share types corresponding to this virtual share type
allPoolsByAZ := make(map[limes.AvailabilityZone][]*manilaPool)
for _, stName := range getAllManilaShareTypes(shareType) {
Expand Down Expand Up @@ -233,23 +256,25 @@ func (p *capacityManilaPlugin) scrapeForShareType(shareType ManilaShareTypeSpec,

// the following computations are performed for each AZ separately
result := capacityForShareType{
Shares: make(core.PerAZ[core.CapacityData]),
Snapshots: make(core.PerAZ[core.CapacityData]),
ShareGigabytes: make(core.PerAZ[core.CapacityData]),
SnapshotGigabytes: make(core.PerAZ[core.CapacityData]),
Shares: make(core.PerAZ[core.CapacityData]),
Snapshots: make(core.PerAZ[core.CapacityData]),
ShareGigabytes: make(core.PerAZ[core.CapacityData]),
SnapshotGigabytes: make(core.PerAZ[core.CapacityData]),
SnapmirrorGigabytes: make(core.PerAZ[core.CapacityData]),
}
for az, azPools := range allPoolsByAZ {
azResult := p.scrapeForShareTypeAndAZ(shareType, uint64(len(allAZs)), az, azPools, shareCapacityDemand[az], snapshotCapacityDemand[az])
azResult := p.scrapeForShareTypeAndAZ(shareType, uint64(len(allAZs)), az, azPools, shareCapacityDemand[az], snapshotCapacityDemand[az], snapmirrorCapacityDemand[az])
result.Shares[az] = &azResult.Shares
result.Snapshots[az] = &azResult.Snapshots
result.ShareGigabytes[az] = &azResult.ShareGigabytes
result.SnapshotGigabytes[az] = &azResult.SnapshotGigabytes
result.SnapmirrorGigabytes[az] = &azResult.SnapmirrorGigabytes
}

return result, nil
}

func (p *capacityManilaPlugin) scrapeForShareTypeAndAZ(shareType ManilaShareTypeSpec, azCount uint64, az limes.AvailabilityZone, pools []*manilaPool, shareCapacityDemand, snapshotCapacityDemand core.ResourceDemand) azCapacityForShareType {
func (p *capacityManilaPlugin) scrapeForShareTypeAndAZ(shareType ManilaShareTypeSpec, azCount uint64, az limes.AvailabilityZone, pools []*manilaPool, shareCapacityDemand, snapshotCapacityDemand, snapmirrorCapacityDemand core.ResourceDemand) azCapacityForShareType {
// count pools and sum their capacities if they are included
var (
poolCount uint64
Expand Down Expand Up @@ -281,13 +306,15 @@ func (p *capacityManilaPlugin) scrapeForShareTypeAndAZ(shareType ManilaShareType
// distribute capacity and usage between the various resource types
logg.Debug("distributing capacity for share_type %q, AZ %q", shareType.Name, az)
distributedCapacityGiB := p.distributeByDemand(uint64(totalCapacityGB), map[string]core.ResourceDemand{
"shares": shareCapacityDemand,
"snapshots": snapshotCapacityDemand,
"shares": shareCapacityDemand,
"snapshots": snapshotCapacityDemand,
"snapmirrors": snapmirrorCapacityDemand,
})
logg.Debug("distributing usage for share_type %q, AZ %q", shareType.Name, az)
distributedUsageGiB := p.distributeByDemand(uint64(allocatedCapacityGB), map[string]core.ResourceDemand{
"shares": {Usage: shareCapacityDemand.Usage},
"snapshots": {Usage: snapshotCapacityDemand.Usage},
"shares": {Usage: shareCapacityDemand.Usage},
"snapshots": {Usage: snapshotCapacityDemand.Usage},
"snapmirrors": {Usage: snapmirrorCapacityDemand.Usage},
})

// build overall result
Expand All @@ -306,12 +333,19 @@ func (p *capacityManilaPlugin) scrapeForShareTypeAndAZ(shareType ManilaShareType
Capacity: distributedCapacityGiB["snapshots"],
Usage: p2u64(distributedUsageGiB["snapshots"]),
}
result.SnapmirrorGigabytes = core.CapacityData{
Capacity: distributedCapacityGiB["snapmirrors"],
Usage: p2u64(distributedUsageGiB["snapmirrors"]),
}

// render subcapacities (these are not split between share_capacity and
// snapshot_capacity because that quickly turns into an algorithmic
// nightmare, and we have no demand (pun intended) for that right now)
for _, pool := range pools {
if p.WithSubcapacities {
if p.WithSubcapacities {
slices.SortFunc(pools, func(lhs, rhs *manilaPool) int {
return strings.Compare(lhs.Name, rhs.Name)
})
for _, pool := range pools {
subcapacity := storagePoolSubcapacity{
PoolName: pool.Name,
AvailabilityZone: az,
Expand Down

0 comments on commit a9da7af

Please sign in to comment.