Support Volume Backup Quota

Since quota take volumes, snapshots and gigabytes into account, it also
needs to take backup into account.
1. Backup create API is not admin interface, users of projects could
create any number of backups.
2. If some evil users create many more big backups to exhaust the free
space of backup storage-backend, it would cause cinder-backup in the state
of rejecting service.
Based on these two points, I think it is necessary to support volume
backup quota.

blueprint support-volume-backup-quota

Change-Id: Idd24ad2e8a5507bf18e90bd5fad59a4af1c158c6
This commit is contained in:
ling-yun 2014-08-02 14:43:35 +08:00
parent c4bddaa50d
commit baa34a301b
11 changed files with 310 additions and 36 deletions

View File

@ -35,6 +35,8 @@ class UsedLimitsController(wsgi.Controller):
'totalVolumesUsed': 'volumes',
'totalGigabytesUsed': 'gigabytes',
'totalSnapshotsUsed': 'snapshots',
'totalBackupsUsed': 'backups',
'totalBackupGigabytesUsed': 'backup_gigabytes'
}
used_limits = {}

View File

@ -46,8 +46,10 @@ class ViewBuilder(object):
"instances": ["maxTotalInstances"],
"cores": ["maxTotalCores"],
"gigabytes": ["maxTotalVolumeGigabytes"],
"backup_gigabytes": ["maxTotalBackupGigabytes"],
"volumes": ["maxTotalVolumes"],
"snapshots": ["maxTotalSnapshots"],
"backups": ["maxTotalBackups"],
"key_pairs": ["maxTotalKeypairs"],
"floating_ips": ["maxTotalFloatingIps"],
"metadata_items": ["maxServerMeta", "maxImageMeta"],

View File

@ -26,14 +26,17 @@ from cinder import context
from cinder.db import base
from cinder import exception
from cinder.i18n import _
from cinder.openstack.common import excutils
from cinder.openstack.common import log as logging
import cinder.policy
from cinder import quota
from cinder import utils
import cinder.volume
from cinder.volume import utils as volume_utils
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
QUOTAS = quota.QUOTAS
def check_policy(context, action):
@ -120,8 +123,44 @@ class API(base.Base):
if not self._is_backup_service_enabled(volume, volume_host):
raise exception.ServiceNotFound(service_id='cinder-backup')
self.db.volume_update(context, volume_id, {'status': 'backing-up'})
# do quota reserver before setting volume status and backup status
try:
reserve_opts = {'backups': 1,
'backup_gigabytes': volume['size']}
reservations = QUOTAS.reserve(context, **reserve_opts)
except exception.OverQuota as e:
overs = e.kwargs['overs']
usages = e.kwargs['usages']
quotas = e.kwargs['quotas']
def _consumed(resource_name):
return (usages[resource_name]['reserved'] +
usages[resource_name]['in_use'])
for over in overs:
if 'gigabytes' in over:
msg = _("Quota exceeded for %(s_pid)s, tried to create "
"%(s_size)sG backup (%(d_consumed)dG of "
"%(d_quota)dG already consumed)")
LOG.warn(msg % {'s_pid': context.project_id,
's_size': volume['size'],
'd_consumed': _consumed(over),
'd_quota': quotas[over]})
raise exception.VolumeBackupSizeExceedsAvailableQuota(
requested=volume['size'],
consumed=_consumed('backup_gigabytes'),
quota=quotas['backup_gigabytes'])
elif 'backups' in over:
msg = _("Quota exceeded for %(s_pid)s, tried to create "
"backups (%(d_consumed)d backups "
"already consumed)")
LOG.warn(msg % {'s_pid': context.project_id,
'd_consumed': _consumed(over)})
raise exception.BackupLimitExceeded(
allowed=quotas[over])
self.db.volume_update(context, volume_id, {'status': 'backing-up'})
options = {'user_id': context.user_id,
'project_id': context.project_id,
'display_name': name,
@ -131,8 +170,15 @@ class API(base.Base):
'container': container,
'size': volume['size'],
'host': volume_host, }
backup = self.db.backup_create(context, options)
try:
backup = self.db.backup_create(context, options)
QUOTAS.commit(context, reservations)
except Exception:
with excutils.save_and_reraise_exception():
try:
self.db.backup_destroy(context, backup['id'])
finally:
QUOTAS.rollback(context, reservations)
#TODO(DuncanT): In future, when we have a generic local attach,
# this can go via the scheduler, which enables

View File

@ -45,6 +45,7 @@ from cinder import manager
from cinder.openstack.common import excutils
from cinder.openstack.common import importutils
from cinder.openstack.common import log as logging
from cinder import quota
from cinder import utils
from cinder.volume import utils as volume_utils
@ -64,6 +65,7 @@ mapper = {'cinder.backup.services.swift': 'cinder.backup.drivers.swift',
CONF = cfg.CONF
CONF.register_opts(backup_manager_opts)
QUOTAS = quota.QUOTAS
class BackupManager(manager.SchedulerDependentManager):
@ -396,12 +398,11 @@ class BackupManager(manager.SchedulerDependentManager):
actual_status = backup['status']
if actual_status != expected_status:
err = _('Delete_backup aborted, expected backup status '
'%(expected_status)s but got %(actual_status)s.') % {
'expected_status': expected_status,
'actual_status': actual_status,
}
self.db.backup_update(context, backup_id, {'status': 'error',
'fail_reason': err})
'%(expected_status)s but got %(actual_status)s.') \
% {'expected_status': expected_status,
'actual_status': actual_status}
self.db.backup_update(context, backup_id,
{'status': 'error', 'fail_reason': err})
raise exception.InvalidBackup(reason=err)
backup_service = self._map_service_to_driver(backup['service'])
@ -411,10 +412,9 @@ class BackupManager(manager.SchedulerDependentManager):
err = _('Delete backup aborted, the backup service currently'
' configured [%(configured_service)s] is not the'
' backup service that was used to create this'
' backup [%(backup_service)s].') % {
'configured_service': configured_service,
'backup_service': backup_service,
}
' backup [%(backup_service)s].')\
% {'configured_service': configured_service,
'backup_service': backup_service}
self.db.backup_update(context, backup_id,
{'status': 'error'})
raise exception.InvalidBackup(reason=err)
@ -429,8 +429,27 @@ class BackupManager(manager.SchedulerDependentManager):
'fail_reason':
unicode(err)})
# Get reservations
try:
reserve_opts = {
'backups': -1,
'backup_gigabytes': -backup['size'],
}
reservations = QUOTAS.reserve(context,
project_id=backup['project_id'],
**reserve_opts)
except Exception:
reservations = None
LOG.exception(_("Failed to update usages deleting backup"))
context = context.elevated()
self.db.backup_destroy(context, backup_id)
# Commit the reservations
if reservations:
QUOTAS.commit(context, reservations,
project_id=backup['project_id'])
LOG.info(_('Delete backup finished, backup %s deleted.'), backup_id)
def export_record(self, context, backup_id):

View File

@ -270,6 +270,14 @@ def _sync_snapshots(context, project_id, session, volume_type_id=None,
return {key: snapshots}
def _sync_backups(context, project_id, session, volume_type_id=None,
volume_type_name=None):
(backups, gigs) = _backup_data_get_for_project(
context, project_id, volume_type_id=volume_type_id, session=session)
key = 'backups'
return {key: backups}
def _sync_gigabytes(context, project_id, session, volume_type_id=None,
volume_type_name=None):
(_junk, vol_gigs) = _volume_data_get_for_project(
@ -292,11 +300,22 @@ def _sync_consistencygroups(context, project_id, session,
key = 'consistencygroups'
return {key: groups}
def _sync_backup_gigabytes(context, project_id, session, volume_type_id=None,
volume_type_name=None):
key = 'backup_gigabytes'
(_junk, backup_gigs) = _backup_data_get_for_project(
context, project_id, volume_type_id=volume_type_id, session=session)
return {key: backup_gigs}
QUOTA_SYNC_FUNCTIONS = {
'_sync_volumes': _sync_volumes,
'_sync_snapshots': _sync_snapshots,
'_sync_gigabytes': _sync_gigabytes,
'_sync_consistencygroups': _sync_consistencygroups,
'_sync_backups': _sync_backups,
'_sync_backup_gigabytes': _sync_backup_gigabytes
}
@ -1078,6 +1097,25 @@ def _volume_data_get_for_project(context, project_id, volume_type_id=None,
return (result[0] or 0, result[1] or 0)
@require_admin_context
def _backup_data_get_for_project(context, project_id, volume_type_id=None,
session=None):
query = model_query(context,
func.count(models.Backup.id),
func.sum(models.Backup.size),
read_deleted="no",
session=session).\
filter_by(project_id=project_id)
if volume_type_id:
query = query.filter_by(volume_type_id=volume_type_id)
result = query.first()
# NOTE(vish): convert None to 0
return (result[0] or 0, result[1] or 0)
@require_admin_context
def volume_data_get_for_project(context, project_id, volume_type_id=None):
return _volume_data_get_for_project(context, project_id, volume_type_id)

View File

@ -430,6 +430,12 @@ class VolumeSizeExceedsAvailableQuota(QuotaError):
"%(consumed)sG has been consumed.")
class VolumeBackupSizeExceedsAvailableQuota(QuotaError):
message = _("Requested backup exceeds allowed Backup Gigabytes "
"quota. Requested %(requested)sG, quota is %(quota)sG and "
"%(consumed)sG has been consumed.")
class VolumeLimitExceeded(QuotaError):
message = _("Maximum number of volumes allowed (%(allowed)d) exceeded")
@ -438,6 +444,10 @@ class SnapshotLimitExceeded(QuotaError):
message = _("Maximum number of snapshots allowed (%(allowed)d) exceeded")
class BackupLimitExceeded(QuotaError):
message = _("Maximum number of backups allowed (%(allowed)d) exceeded")
class DuplicateSfVolumeNames(Duplicate):
message = _("Detected more than one volume with name %(vol_name)s")

View File

@ -46,6 +46,13 @@ quota_opts = [
default=1000,
help='Total amount of storage, in gigabytes, allowed '
'for volumes and snapshots per project'),
cfg.IntOpt('quota_backups',
default=10,
help='Number of volume backups allowed per project'),
cfg.IntOpt('quota_backup_gigabytes',
default=1000,
help='Total amount of storage, in gigabytes, allowed '
'for backups per project'),
cfg.IntOpt('reservation_expire',
default=86400,
help='Number of seconds until a reservation expires'),
@ -105,6 +112,7 @@ class DbQuotaDriver(object):
default_quotas = {}
if CONF.use_default_quota_class:
default_quotas = db.quota_class_get_default(context)
for resource in resources.values():
if resource.name not in default_quotas:
LOG.deprecated(_("Default quota for resource: %(res)s is set "
@ -861,7 +869,10 @@ class VolumeTypeQuotaEngine(QuotaEngine):
# Global quotas.
argses = [('volumes', '_sync_volumes', 'quota_volumes'),
('snapshots', '_sync_snapshots', 'quota_snapshots'),
('gigabytes', '_sync_gigabytes', 'quota_gigabytes'), ]
('gigabytes', '_sync_gigabytes', 'quota_gigabytes'),
('backups', '_sync_backups', 'quota_backups'),
('backup_gigabytes', '_sync_backup_gigabytes',
'quota_backup_gigabytes')]
for args in argses:
resource = ReservableResource(*args)
result[resource.name] = resource

View File

@ -29,10 +29,13 @@ from cinder import test
def make_body(root=True, gigabytes=1000, snapshots=10,
volumes=10, tenant_id='foo'):
volumes=10, backups=10, backup_gigabytes=1000,
tenant_id='foo'):
resources = {'gigabytes': gigabytes,
'snapshots': snapshots,
'volumes': volumes}
'volumes': volumes,
'backups': backups,
'backup_gigabytes': backup_gigabytes}
# need to consider preexisting volume types as well
volume_types = db.volume_type_get_all(context.get_admin_context())
for volume_type in volume_types:
@ -76,7 +79,7 @@ class QuotaSetsControllerTest(test.TestCase):
def test_update(self):
body = make_body(gigabytes=2000, snapshots=15,
volumes=5, tenant_id=None)
volumes=5, backups=5, tenant_id=None)
result = self.controller.update(self.req, 'foo', body)
self.assertDictMatch(result, body)
@ -115,7 +118,8 @@ class QuotaSetsControllerTest(test.TestCase):
self.assertDictMatch(result_show, make_body())
body = make_body(gigabytes=2000, snapshots=15,
volumes=5, tenant_id=None)
volumes=5, backups=5,
backup_gigabytes=1000, tenant_id=None)
result_update = self.controller.update(self.req, 'foo', body)
self.assertDictMatch(result_update, body)

View File

@ -32,11 +32,15 @@ QUOTAS = quota.QUOTAS
def make_body(root=True, gigabytes=1000, snapshots=10,
volumes=10, volume_types_faked=None,
volumes=10, backups=10,
backup_gigabytes=1000,
volume_types_faked=None,
tenant_id='foo'):
resources = {'gigabytes': gigabytes,
'snapshots': snapshots,
'volumes': volumes}
'volumes': volumes,
'backups': backups,
'backup_gigabytes': backup_gigabytes}
if not volume_types_faked:
volume_types_faked = {'fake_type': None}
for volume_type in volume_types_faked:

View File

@ -21,6 +21,7 @@ import datetime
import mock
from oslo.config import cfg
from cinder import backup
from cinder import context
from cinder import db
from cinder.db.sqlalchemy import api as sqa_api
@ -49,7 +50,9 @@ class QuotaIntegrationTestCase(test.TestCase):
self.flags(quota_volumes=2,
quota_snapshots=2,
quota_gigabytes=20)
quota_gigabytes=20,
quota_backups=2,
quota_backup_gigabytes=20)
self.user_id = 'admin'
self.project_id = 'admin'
@ -84,6 +87,15 @@ class QuotaIntegrationTestCase(test.TestCase):
snapshot['status'] = 'available'
return db.snapshot_create(self.context, snapshot)
def _create_backup(self, volume):
backup = {}
backup['user_id'] = self.user_id
backup['project_id'] = self.project_id
backup['volume_id'] = volume['id']
backup['volume_size'] = volume['size']
backup['status'] = 'available'
return db.backup_create(self.context, backup)
def test_too_many_volumes(self):
volume_ids = []
for i in range(CONF.quota_volumes):
@ -127,6 +139,29 @@ class QuotaIntegrationTestCase(test.TestCase):
db.snapshot_destroy(self.context, snap_ref['id'])
db.volume_destroy(self.context, vol_ref['id'])
def test_too_many_backups(self):
resource = 'backups'
db.quota_class_create(self.context, 'default', resource, 1)
flag_args = {
'quota_backups': 2000,
'quota_backup_gigabytes': 2000
}
self.flags(**flag_args)
vol_ref = self._create_volume()
backup_ref = self._create_backup(vol_ref)
with mock.patch.object(backup.API, '_is_backup_service_enabled') as \
mock__is_backup_service_enabled:
mock__is_backup_service_enabled.return_value = True
self.assertRaises(exception.BackupLimitExceeded,
backup.API().create,
self.context,
'name',
'description',
vol_ref['id'],
'container')
db.backup_destroy(self.context, backup_ref['id'])
db.volume_destroy(self.context, vol_ref['id'])
def test_too_many_gigabytes(self):
volume_ids = []
vol_ref = self._create_volume(size=20)
@ -150,6 +185,23 @@ class QuotaIntegrationTestCase(test.TestCase):
db.snapshot_destroy(self.context, snap_ref['id'])
db.volume_destroy(self.context, vol_ref['id'])
def test_too_many_combined_backup_gigabytes(self):
vol_ref = self._create_volume(size=10000)
backup_ref = self._create_backup(vol_ref)
with mock.patch.object(backup.API, '_is_backup_service_enabled') as \
mock__is_backup_service_enabled:
mock__is_backup_service_enabled.return_value = True
self.assertRaises(
exception.VolumeBackupSizeExceedsAvailableQuota,
backup.API().create,
context=self.context,
name='name',
description='description',
volume_id=vol_ref['id'],
container='container')
db.backup_destroy(self.context, backup_ref['id'])
db.volume_destroy(self.context, vol_ref['id'])
def test_no_snapshot_gb_quota_flag(self):
self.flags(quota_volumes=2,
quota_snapshots=2,
@ -172,6 +224,35 @@ class QuotaIntegrationTestCase(test.TestCase):
db.volume_destroy(self.context, vol_ref['id'])
db.volume_destroy(self.context, vol_ref2['id'])
def test_backup_gb_quota_flag(self):
self.flags(quota_volumes=2,
quota_snapshots=2,
quota_backups=2,
quota_gigabytes=20
)
vol_ref = self._create_volume(size=10)
backup_ref = self._create_backup(vol_ref)
with mock.patch.object(backup.API, '_is_backup_service_enabled') as \
mock__is_backup_service_enabled:
mock__is_backup_service_enabled.return_value = True
backup_ref2 = backup.API().create(self.context,
'name',
'description',
vol_ref['id'],
'container')
# Make sure the backup volume_size isn't included in usage.
vol_ref2 = volume.API().create(self.context, 10, '', '')
usages = db.quota_usage_get_all_by_project(self.context,
self.project_id)
self.assertEqual(usages['gigabytes']['in_use'], 20)
self.assertEqual(usages['gigabytes']['reserved'], 0)
db.backup_destroy(self.context, backup_ref['id'])
db.backup_destroy(self.context, backup_ref2['id'])
db.volume_destroy(self.context, vol_ref['id'])
db.volume_destroy(self.context, vol_ref2['id'])
def test_too_many_gigabytes_of_type(self):
resource = 'gigabytes_%s' % self.volume_type_name
db.quota_class_create(self.context, 'default', resource, 10)
@ -679,7 +760,8 @@ class VolumeTypeQuotaEngineTestCase(test.TestCase):
engine = quota.VolumeTypeQuotaEngine()
self.assertEqual(engine.resource_names,
['gigabytes', 'snapshots', 'volumes'])
['backup_gigabytes', 'backups',
'gigabytes', 'snapshots', 'volumes'])
def test_volume_type_resources(self):
ctx = context.RequestContext('admin', 'admin', is_admin=True)
@ -703,7 +785,8 @@ class VolumeTypeQuotaEngineTestCase(test.TestCase):
engine = quota.VolumeTypeQuotaEngine()
self.assertEqual(engine.resource_names,
['gigabytes', 'gigabytes_type1', 'gigabytes_type_2',
['backup_gigabytes', 'backups',
'gigabytes', 'gigabytes_type1', 'gigabytes_type_2',
'snapshots', 'snapshots_type1', 'snapshots_type_2',
'volumes', 'volumes_type1', 'volumes_type_2'])
db.volume_type_destroy(ctx, vtype['id'])
@ -717,6 +800,8 @@ class DbQuotaDriverTestCase(test.TestCase):
self.flags(quota_volumes=10,
quota_snapshots=10,
quota_gigabytes=1000,
quota_backups=10,
quota_backup_gigabytes=1000,
reservation_expire=86400,
until_refresh=0,
max_age=0,
@ -742,7 +827,9 @@ class DbQuotaDriverTestCase(test.TestCase):
dict(
volumes=10,
snapshots=10,
gigabytes=1000, ))
gigabytes=1000,
backups=10,
backup_gigabytes=1000))
def _stub_quota_class_get_default(self):
# Stub out quota_class_get_default
@ -750,7 +837,10 @@ class DbQuotaDriverTestCase(test.TestCase):
self.calls.append('quota_class_get_default')
return dict(volumes=10,
snapshots=10,
gigabytes=1000,)
gigabytes=1000,
backups=10,
backup_gigabytes=1000
)
self.stubs.Set(db, 'quota_class_get_default', fake_qcgd)
def _stub_volume_type_get_all(self):
@ -763,7 +853,8 @@ class DbQuotaDriverTestCase(test.TestCase):
def fake_qcgabn(context, quota_class):
self.calls.append('quota_class_get_all_by_name')
self.assertEqual(quota_class, 'test_class')
return dict(gigabytes=500, volumes=10, snapshots=10, )
return dict(gigabytes=500, volumes=10, snapshots=10, backups=10,
backup_gigabytes=500)
self.stubs.Set(db, 'quota_class_get_all_by_name', fake_qcgabn)
def test_get_class_quotas(self):
@ -775,7 +866,9 @@ class DbQuotaDriverTestCase(test.TestCase):
self.assertEqual(self.calls, ['quota_class_get_all_by_name'])
self.assertEqual(result, dict(volumes=10,
gigabytes=500,
snapshots=10))
snapshots=10,
backups=10,
backup_gigabytes=500))
def test_get_class_quotas_no_defaults(self):
self._stub_quota_class_get_all_by_name()
@ -785,20 +878,27 @@ class DbQuotaDriverTestCase(test.TestCase):
self.assertEqual(self.calls, ['quota_class_get_all_by_name'])
self.assertEqual(result, dict(volumes=10,
gigabytes=500,
snapshots=10))
snapshots=10,
backups=10,
backup_gigabytes=500))
def _stub_get_by_project(self):
def fake_qgabp(context, project_id):
self.calls.append('quota_get_all_by_project')
self.assertEqual(project_id, 'test_project')
return dict(volumes=10, gigabytes=50, reserved=0, snapshots=10)
return dict(volumes=10, gigabytes=50, reserved=0,
snapshots=10, backups=10,
backup_gigabytes=50)
def fake_qugabp(context, project_id):
self.calls.append('quota_usage_get_all_by_project')
self.assertEqual(project_id, 'test_project')
return dict(volumes=dict(in_use=2, reserved=0),
snapshots=dict(in_use=2, reserved=0),
gigabytes=dict(in_use=10, reserved=0), )
gigabytes=dict(in_use=10, reserved=0),
backups=dict(in_use=2, reserved=0),
backup_gigabytes=dict(in_use=10, reserved=0)
)
self.stubs.Set(db, 'quota_get_all_by_project', fake_qgabp)
self.stubs.Set(db, 'quota_usage_get_all_by_project', fake_qugabp)
@ -825,7 +925,14 @@ class DbQuotaDriverTestCase(test.TestCase):
reserved=0, ),
gigabytes=dict(limit=50,
in_use=10,
reserved=0, ), ))
reserved=0, ),
backups=dict(limit=10,
in_use=2,
reserved=0, ),
backup_gigabytes=dict(limit=50,
in_use=10,
reserved=0, ),
))
def test_get_project_quotas_alt_context_no_class(self):
self._stub_get_by_project()
@ -845,7 +952,14 @@ class DbQuotaDriverTestCase(test.TestCase):
reserved=0, ),
gigabytes=dict(limit=50,
in_use=10,
reserved=0, ), ))
reserved=0, ),
backups=dict(limit=10,
in_use=2,
reserved=0, ),
backup_gigabytes=dict(limit=50,
in_use=10,
reserved=0, ),
))
def test_get_project_quotas_alt_context_with_class(self):
self._stub_get_by_project()
@ -866,7 +980,14 @@ class DbQuotaDriverTestCase(test.TestCase):
reserved=0, ),
gigabytes=dict(limit=50,
in_use=10,
reserved=0, ), ))
reserved=0, ),
backups=dict(limit=10,
in_use=2,
reserved=0, ),
backup_gigabytes=dict(limit=50,
in_use=10,
reserved=0, ),
))
def test_get_project_quotas_no_defaults(self):
self._stub_get_by_project()
@ -880,7 +1001,13 @@ class DbQuotaDriverTestCase(test.TestCase):
'quota_class_get_all_by_name',
'quota_class_get_default', ])
self.assertEqual(result,
dict(gigabytes=dict(limit=50,
dict(backups=dict(limit=10,
in_use=2,
reserved=0, ),
backup_gigabytes=dict(limit=50,
in_use=10,
reserved=0, ),
gigabytes=dict(limit=50,
in_use=10,
reserved=0, ),
snapshots=dict(limit=10,
@ -888,7 +1015,9 @@ class DbQuotaDriverTestCase(test.TestCase):
reserved=0, ),
volumes=dict(limit=10,
in_use=2,
reserved=0, ), ))
reserved=0, ),
))
def test_get_project_quotas_no_usages(self):
self._stub_get_by_project()
@ -902,7 +1031,9 @@ class DbQuotaDriverTestCase(test.TestCase):
'quota_class_get_default', ])
self.assertEqual(result, dict(volumes=dict(limit=10, ),
snapshots=dict(limit=10, ),
gigabytes=dict(limit=50, ), ))
backups=dict(limit=10, ),
gigabytes=dict(limit=50, ),
backup_gigabytes=dict(limit=50, ),))
def _stub_get_project_quotas(self):
def fake_get_project_quotas(context, resources, project_id,

View File

@ -221,6 +221,13 @@
# and snapshots per project (integer value)
#quota_gigabytes=1000
# Number of volume backups allowed per project (integer value)
#quota_backups=10
# Total amount of storage, in gigabytes, allowed for backups
# per project (integer value)
#quota_backup_gigabytes=1000
# Number of seconds until a reservation expires (integer
# value)
#reservation_expire=86400