diff --git a/cinder/volume/api.py b/cinder/volume/api.py index 0d340bc..37e81b0 100644 --- a/cinder/volume/api.py +++ b/cinder/volume/api.py @@ -165,6 +165,7 @@ class API(base.Base): volume = self.db.volume_create(context, options) self._cast_create_volume(context, volume['id'], snapshot_id, image_id) + QUOTAS.commit(context, reservations) return volume def _cast_create_volume(self, context, volume_id, snapshot_id, image_id): @@ -198,23 +199,28 @@ class API(base.Base): @wrap_check_policy def delete(self, context, volume, force=False): + reservations = QUOTAS.reserve(context, volumes=-1, gigabytes=-volume['size']) volume_id = volume['id'] if not volume['host']: # NOTE(vish): scheduling failed, so delete it self.db.volume_destroy(context, volume_id) + QUOTAS.commit(context, reservations) return if not force and volume['status'] not in ["available", "error"]: + QUOTAS.rollback(context, reservations) msg = _("Volume status must be available or error") raise exception.InvalidVolume(reason=msg) snapshots = self.db.snapshot_get_all_for_volume(context, volume_id) if len(snapshots): + QUOTAS.rollback(context, reservations) msg = _("Volume still has %d dependent snapshots") % len(snapshots) raise exception.InvalidVolume(reason=msg) now = timeutils.utcnow() self.db.volume_update(context, volume_id, {'status': 'deleting', 'terminated_at': now}) + QUOTAS.commit(context, reservations) host = volume['host'] rpc.cast(context, rpc.queue_get_for(context, FLAGS.volume_topic, host),