Skip to content

Commit

Permalink
Fixed shared gigabytes quota resource.
Browse files Browse the repository at this point in the history
The shared gigabytes resource between volumes and snapshots wasn't
working properly.  The issue was that on update/sync the action item
(volumes or snapshots) would update the resource usages based only on
it's own particular item.

This patch fixes that, and makes the total gigabytes truly shared
between volumesa and snapshots.

Fixes bug: 1159489

Change-Id: Ib1be9788f0beb4f94d010e4f816f9f3393371205
(cherry picked from commit d6935af)
  • Loading branch information
j-griffith committed Mar 24, 2013
1 parent 18e2c35 commit 6a29bda
Show file tree
Hide file tree
Showing 2 changed files with 54 additions and 48 deletions.
3 changes: 2 additions & 1 deletion cinder/quota.py
Expand Up @@ -749,7 +749,8 @@ def _sync_snapshots(context, project_id, session):
resources = [
ReservableResource('volumes', _sync_volumes, 'quota_volumes'),
ReservableResource('snapshots', _sync_snapshots, 'quota_snapshots'),
ReservableResource('gigabytes', _sync_volumes, 'quota_gigabytes'), ]
ReservableResource('gigabytes', _sync_volumes, 'quota_gigabytes'),
ReservableResource('gigabytes', _sync_snapshots, 'quota_gigabytes'), ]


QUOTAS.register_resources(resources)
99 changes: 52 additions & 47 deletions cinder/volume/api.py
Expand Up @@ -154,19 +154,21 @@ def as_int(s):
def _consumed(name):
return (usages[name]['reserved'] + usages[name]['in_use'])

pid = context.project_id
if 'gigabytes' in overs:
consumed = _consumed('gigabytes')
quota = quotas['gigabytes']
LOG.warn(_("Quota exceeded for %(pid)s, tried to create "
"%(size)sG volume (%(consumed)dG of %(quota)dG "
"already consumed)") % locals())
msg = _("Quota exceeded for %(s_pid)s, tried to create "
"%(s_size)sG volume (%(d_consumed)dG of %(d_quota)dG "
"already consumed)")
LOG.warn(msg % {'s_pid': context.project_id,
's_size': size,
'd_consumed': _consumed('gigabytes'),
'd_quota': quotas['gigabytes']})
raise exception.VolumeSizeExceedsAvailableQuota()
elif 'volumes' in overs:
consumed = _consumed('volumes')
LOG.warn(_("Quota exceeded for %(pid)s, tried to create "
"volume (%(consumed)d volumes "
"already consumed)") % locals())
msg = _("Quota exceeded for %(s_pid)s, tried to create "
"volume (%(d_consumed)d volumes"
"already consumed)")
LOG.warn(msg % {'s_pid': context.project_id,
'd_consumed': _consumed('volumes')})
raise exception.VolumeLimitExceeded(allowed=quotas['volumes'])

if availability_zone is None:
Expand Down Expand Up @@ -238,15 +240,15 @@ def _cast_create_volume(self, context, request_spec, filter_properties):
volume_ref = self.db.volume_update(context, volume_id, values)

# bypass scheduler and send request directly to volume
self.volume_rpcapi.create_volume(context,
volume_ref,
volume_ref['host'],
request_spec=request_spec,
filter_properties=
filter_properties,
allow_reschedule=False,
snapshot_id=snapshot_id,
image_id=image_id)
self.volume_rpcapi.create_volume(
context,
volume_ref,
volume_ref['host'],
request_spec=request_spec,
filter_properties=filter_properties,
allow_reschedule=False,
snapshot_id=snapshot_id,
image_id=image_id)
elif source_volid:
source_volume_ref = self.db.volume_get(context,
source_volid)
Expand All @@ -255,25 +257,25 @@ def _cast_create_volume(self, context, request_spec, filter_properties):
volume_ref = self.db.volume_update(context, volume_id, values)

# bypass scheduler and send request directly to volume
self.volume_rpcapi.create_volume(context,
volume_ref,
volume_ref['host'],
request_spec=request_spec,
filter_properties=
filter_properties,
allow_reschedule=False,
snapshot_id=snapshot_id,
image_id=image_id,
source_volid=source_volid)
self.volume_rpcapi.create_volume(
context,
volume_ref,
volume_ref['host'],
request_spec=request_spec,
filter_properties=filter_properties,
allow_reschedule=False,
snapshot_id=snapshot_id,
image_id=image_id,
source_volid=source_volid)
else:
self.scheduler_rpcapi.create_volume(context,
FLAGS.volume_topic,
volume_id,
snapshot_id,
image_id,
request_spec=request_spec,
filter_properties=
filter_properties)
self.scheduler_rpcapi.create_volume(
context,
FLAGS.volume_topic,
volume_id,
snapshot_id,
image_id,
request_spec=request_spec,
filter_properties=filter_properties)

@wrap_check_policy
def delete(self, context, volume, force=False):
Expand Down Expand Up @@ -514,19 +516,22 @@ def _create_snapshot(self, context,
def _consumed(name):
return (usages[name]['reserved'] + usages[name]['in_use'])

pid = context.project_id
if 'gigabytes' in overs:
consumed = _consumed('gigabytes')
quota = quotas['gigabytes']
LOG.warn(_("Quota exceeded for %(pid)s, tried to create "
"%(size)sG volume (%(consumed)dG of %(quota)dG "
"already consumed)") % locals())
msg = _("Quota exceeded for %(s_pid)s, tried to create "
"%(s_size)sG snapshot (%(d_consumed)dG of "
"%(d_quota)dG already consumed)")
LOG.warn(msg % {'s_pid': context.project_id,
's_size': volume['size'],
'd_consumed': _consumed('gigabytes'),
'd_quota': quotas['gigabytes']})
raise exception.VolumeSizeExceedsAvailableQuota()
elif 'snapshots' in overs:
consumed = _consumed('snapshots')
LOG.warn(_("Quota exceeded for %(pid)s, tried to create "
"snapshot (%(consumed)d snapshots "
"already consumed)") % locals())
msg = _("Quota exceeded for %(s_pid)s, tried to create "
"snapshot (%(d_consumed)d snapshots "
"already consumed)")

LOG.warn(msg % {'s_pid': context.project_id,
'd_consumed': _consumed('snapshots')})
raise exception.SnapshotLimitExceeded(
allowed=quotas['snapshots'])

Expand Down

0 comments on commit 6a29bda

Please sign in to comment.