Remove nova-volume DB
* Remove volume calls from nova.db.api * Remove volume calls from nova.db.sqlalchemy.api * Remove tables from nova/db/sqlalchemy/models.py * Remove Xen Storage Manager Volume Driver code * No migration to preserve data in unused tables * Remove quota support for volumes DocImpact part of bp delete-nova-volume Change-Id: I2c82c96d67f3746e5de28f917dd8ceb0c8037e27
This commit is contained in:
parent
fab607c72c
commit
29f7bdc045
141
bin/nova-manage
141
bin/nova-manage
@ -915,146 +915,6 @@ class InstanceTypeCommands(object):
|
||||
_db_error(e)
|
||||
|
||||
|
||||
class StorageManagerCommands(object):
|
||||
"""Class for mangaging Storage Backends and Flavors"""
|
||||
|
||||
def flavor_list(self, flavor=None):
|
||||
ctxt = context.get_admin_context()
|
||||
|
||||
try:
|
||||
if flavor is None:
|
||||
flavors = db.sm_flavor_get_all(ctxt)
|
||||
else:
|
||||
flavors = db.sm_flavor_get(ctxt, flavor)
|
||||
except exception.NotFound as ex:
|
||||
print _('error: %s') % ex
|
||||
sys.exit(2)
|
||||
|
||||
print "%-18s\t%-20s\t%s" % (_('id'),
|
||||
_('Label'),
|
||||
_('Description'))
|
||||
|
||||
for flav in flavors:
|
||||
print "%-18s\t%-20s\t%s" % (
|
||||
flav['id'],
|
||||
flav['label'],
|
||||
flav['description'])
|
||||
|
||||
def flavor_create(self, label, desc):
|
||||
# TODO(renukaapte) flavor name must be unique
|
||||
try:
|
||||
db.sm_flavor_create(context.get_admin_context(),
|
||||
dict(label=label,
|
||||
description=desc))
|
||||
except exception.DBError, e:
|
||||
_db_error(e)
|
||||
|
||||
def flavor_delete(self, label):
|
||||
try:
|
||||
db.sm_flavor_delete(context.get_admin_context(), label)
|
||||
|
||||
except exception.DBError, e:
|
||||
_db_error(e)
|
||||
|
||||
def _splitfun(self, item):
|
||||
i = item.split("=")
|
||||
return i[0:2]
|
||||
|
||||
def backend_list(self, backend_conf_id=None):
|
||||
ctxt = context.get_admin_context()
|
||||
|
||||
try:
|
||||
if backend_conf_id is None:
|
||||
backends = db.sm_backend_conf_get_all(ctxt)
|
||||
else:
|
||||
backends = db.sm_backend_conf_get(ctxt, backend_conf_id)
|
||||
|
||||
except exception.NotFound as ex:
|
||||
print _('error: %s') % ex
|
||||
sys.exit(2)
|
||||
|
||||
print "%-5s\t%-10s\t%-40s\t%-10s\t%s" % (_('id'),
|
||||
_('Flavor id'),
|
||||
_('SR UUID'),
|
||||
_('SR Type'),
|
||||
_('Config Parameters'),)
|
||||
|
||||
for b in backends:
|
||||
print "%-5s\t%-10s\t%-40s\t%-10s\t%s" % (b['id'],
|
||||
b['flavor_id'],
|
||||
b['sr_uuid'],
|
||||
b['sr_type'],
|
||||
b['config_params'],)
|
||||
|
||||
def backend_add(self, flavor_label, sr_type, *args):
|
||||
# TODO(renukaapte) Add backend_introduce.
|
||||
ctxt = context.get_admin_context()
|
||||
params = dict(map(self._splitfun, args))
|
||||
|
||||
if 'sr_uuid' in params:
|
||||
try:
|
||||
backend = db.sm_backend_conf_get_by_sr(ctxt,
|
||||
params['sr_uuid'])
|
||||
except exception.DBError, e:
|
||||
_db_error(e)
|
||||
|
||||
if backend:
|
||||
print _('Backend config found. Would you like to recreate '
|
||||
'this?')
|
||||
print _('(WARNING:Recreating will destroy all VDIs on '
|
||||
'backend!!)')
|
||||
c = raw_input(_('Proceed? (y/n) '))
|
||||
if c == 'y' or c == 'Y':
|
||||
try:
|
||||
db.sm_backend_conf_update(ctxt, backend['id'],
|
||||
dict(created=False))
|
||||
except exception.DBError, e:
|
||||
_db_error(e)
|
||||
return
|
||||
|
||||
else:
|
||||
print _('Backend config not found. Would you like to create '
|
||||
'it?')
|
||||
print _('(WARNING: Creating will destroy all data on '
|
||||
'backend!!!)')
|
||||
c = raw_input(_('Proceed? (y/n) '))
|
||||
if c != 'y' and c != 'Y':
|
||||
return
|
||||
|
||||
print _('(WARNING: Creating will destroy all data on backend!!!)')
|
||||
c = raw_input(_('Proceed? (y/n) '))
|
||||
if c == 'y' or c == 'Y':
|
||||
if flavor_label is None:
|
||||
print _('error: backend needs to be associated with flavor')
|
||||
sys.exit(2)
|
||||
|
||||
try:
|
||||
flavors = db.sm_flavor_get_by_label(ctxt, flavor_label)
|
||||
except exception.NotFound as ex:
|
||||
print _('error: %s') % ex
|
||||
sys.exit(2)
|
||||
|
||||
config_params = "".join(['%s=%s ' %
|
||||
(key, params[key]) for key in params])
|
||||
|
||||
try:
|
||||
db.sm_backend_conf_create(ctxt,
|
||||
dict(flavor_id=flavors['id'],
|
||||
sr_uuid=None,
|
||||
sr_type=sr_type,
|
||||
config_params=config_params))
|
||||
except exception.DBError, e:
|
||||
_db_error(e)
|
||||
|
||||
def backend_remove(self, backend_conf_id):
|
||||
try:
|
||||
db.sm_backend_conf_delete(context.get_admin_context(),
|
||||
backend_conf_id)
|
||||
|
||||
except exception.DBError, e:
|
||||
_db_error(e)
|
||||
|
||||
|
||||
class AgentBuildCommands(object):
|
||||
"""Class for managing agent builds."""
|
||||
|
||||
@ -1179,7 +1039,6 @@ CATEGORIES = [
|
||||
('project', ProjectCommands),
|
||||
('service', ServiceCommands),
|
||||
('shell', ShellCommands),
|
||||
('sm', StorageManagerCommands),
|
||||
('version', VersionCommands),
|
||||
('vm', VmCommands),
|
||||
('vpn', VpnCommands),
|
||||
|
@ -10,8 +10,6 @@
|
||||
"maxTotalInstances": 10,
|
||||
"maxTotalKeypairs": 100,
|
||||
"maxTotalRAMSize": 51200,
|
||||
"maxTotalVolumeGigabytes": 1000,
|
||||
"maxTotalVolumes": 10,
|
||||
"maxSecurityGroups": 10,
|
||||
"maxSecurityGroupRules": 20
|
||||
},
|
||||
|
@ -20,10 +20,8 @@
|
||||
<limit name="maxImageMeta" value="128"/>
|
||||
<limit name="maxPersonalitySize" value="10240"/>
|
||||
<limit name="maxTotalKeypairs" value="100"/>
|
||||
<limit name="maxTotalVolumes" value="10"/>
|
||||
<limit name="maxTotalCores" value="20"/>
|
||||
<limit name="maxTotalFloatingIps" value="10"/>
|
||||
<limit name="maxTotalVolumeGigabytes" value="1000"/>
|
||||
<limit name="maxTotalRAMSize" value="51200"/>
|
||||
<limit name="maxSecurityGroups" value="10"/>
|
||||
<limit name="maxSecurityGroupRules" value="20"/>
|
||||
|
@ -12,15 +12,11 @@
|
||||
"maxTotalInstances": 10,
|
||||
"maxTotalKeypairs": 100,
|
||||
"maxTotalRAMSize": 51200,
|
||||
"maxTotalVolumeGigabytes": 1000,
|
||||
"maxTotalVolumes": 10,
|
||||
"totalCoresUsed": 0,
|
||||
"totalInstancesUsed": 0,
|
||||
"totalKeyPairsUsed": 0,
|
||||
"totalRAMUsed": 0,
|
||||
"totalSecurityGroupsUsed": 0,
|
||||
"totalVolumeGigabytesUsed": 0,
|
||||
"totalVolumesUsed": 0
|
||||
"totalSecurityGroupsUsed": 0
|
||||
},
|
||||
"rate": [
|
||||
{
|
||||
@ -78,4 +74,4 @@
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -19,20 +19,16 @@
|
||||
<limit name="maxPersonality" value="5"/>
|
||||
<limit name="maxImageMeta" value="128"/>
|
||||
<limit name="maxPersonalitySize" value="10240"/>
|
||||
<limit name="totalVolumesUsed" value="0"/>
|
||||
<limit name="maxSecurityGroupRules" value="20"/>
|
||||
<limit name="maxTotalKeypairs" value="100"/>
|
||||
<limit name="totalCoresUsed" value="0"/>
|
||||
<limit name="maxTotalVolumes" value="10"/>
|
||||
<limit name="totalRAMUsed" value="0"/>
|
||||
<limit name="totalInstancesUsed" value="0"/>
|
||||
<limit name="maxSecurityGroups" value="10"/>
|
||||
<limit name="totalVolumeGigabytesUsed" value="0"/>
|
||||
<limit name="maxTotalCores" value="20"/>
|
||||
<limit name="totalSecurityGroupsUsed" value="0"/>
|
||||
<limit name="maxTotalFloatingIps" value="10"/>
|
||||
<limit name="totalKeyPairsUsed" value="0"/>
|
||||
<limit name="maxTotalVolumeGigabytes" value="1000"/>
|
||||
<limit name="maxTotalRAMSize" value="51200"/>
|
||||
</absolute>
|
||||
</limits>
|
||||
</limits>
|
||||
|
@ -47,8 +47,6 @@ class ViewBuilder(object):
|
||||
"ram": ["maxTotalRAMSize"],
|
||||
"instances": ["maxTotalInstances"],
|
||||
"cores": ["maxTotalCores"],
|
||||
"gigabytes": ["maxTotalVolumeGigabytes"],
|
||||
"volumes": ["maxTotalVolumes"],
|
||||
"key_pairs": ["maxTotalKeypairs"],
|
||||
"floating_ips": ["maxTotalFloatingIps"],
|
||||
"metadata_items": ["maxServerMeta", "maxImageMeta"],
|
||||
|
255
nova/db/api.py
255
nova/db/api.py
@ -59,9 +59,6 @@ db_opts = [
|
||||
cfg.StrOpt('instance_name_template',
|
||||
default='instance-%08x',
|
||||
help='Template string to be used to generate instance names'),
|
||||
cfg.StrOpt('volume_name_template',
|
||||
default='volume-%s',
|
||||
help='Template string to be used to generate instance names'),
|
||||
cfg.StrOpt('snapshot_name_template',
|
||||
default='snapshot-%s',
|
||||
help='Template string to be used to generate snapshot names'),
|
||||
@ -157,15 +154,6 @@ def service_get_all_compute_sorted(context):
|
||||
return IMPL.service_get_all_compute_sorted(context)
|
||||
|
||||
|
||||
def service_get_all_volume_sorted(context):
|
||||
"""Get all volume services sorted by volume count.
|
||||
|
||||
:returns: a list of (Service, volume_count) tuples.
|
||||
|
||||
"""
|
||||
return IMPL.service_get_all_volume_sorted(context)
|
||||
|
||||
|
||||
def service_get_by_args(context, host, binary):
|
||||
"""Get the state of a service by node name and binary."""
|
||||
return IMPL.service_get_by_args(context, host, binary)
|
||||
@ -1062,81 +1050,11 @@ def reservation_expire(context):
|
||||
###################
|
||||
|
||||
|
||||
def volume_allocate_iscsi_target(context, volume_id, host):
|
||||
"""Atomically allocate a free iscsi_target from the pool."""
|
||||
return IMPL.volume_allocate_iscsi_target(context, volume_id, host)
|
||||
|
||||
|
||||
def volume_attached(context, volume_id, instance_id, mountpoint):
|
||||
"""Ensure that a volume is set as attached."""
|
||||
return IMPL.volume_attached(context, volume_id, instance_id, mountpoint)
|
||||
|
||||
|
||||
def volume_create(context, values):
|
||||
"""Create a volume from the values dictionary."""
|
||||
return IMPL.volume_create(context, values)
|
||||
|
||||
|
||||
def volume_data_get_for_project(context, project_id, session=None):
|
||||
"""Get (volume_count, gigabytes) for project."""
|
||||
return IMPL.volume_data_get_for_project(context, project_id,
|
||||
session=session)
|
||||
|
||||
|
||||
def volume_destroy(context, volume_id):
|
||||
"""Destroy the volume or raise if it does not exist."""
|
||||
return IMPL.volume_destroy(context, volume_id)
|
||||
|
||||
|
||||
def volume_detached(context, volume_id):
|
||||
"""Ensure that a volume is set as detached."""
|
||||
return IMPL.volume_detached(context, volume_id)
|
||||
|
||||
|
||||
def volume_get(context, volume_id):
|
||||
"""Get a volume or raise if it does not exist."""
|
||||
return IMPL.volume_get(context, volume_id)
|
||||
|
||||
|
||||
def volume_get_all(context):
|
||||
"""Get all volumes."""
|
||||
return IMPL.volume_get_all(context)
|
||||
|
||||
|
||||
def volume_get_all_by_host(context, host):
|
||||
"""Get all volumes belonging to a host."""
|
||||
return IMPL.volume_get_all_by_host(context, host)
|
||||
|
||||
|
||||
def volume_get_all_by_instance_uuid(context, instance_uuid):
|
||||
"""Get all volumes belonging to an instance."""
|
||||
return IMPL.volume_get_all_by_instance_uuid(context, instance_uuid)
|
||||
|
||||
|
||||
def volume_get_all_by_project(context, project_id):
|
||||
"""Get all volumes belonging to a project."""
|
||||
return IMPL.volume_get_all_by_project(context, project_id)
|
||||
|
||||
|
||||
def volume_get_by_ec2_id(context, ec2_id):
|
||||
"""Get a volume by ec2 id."""
|
||||
return IMPL.volume_get_by_ec2_id(context, ec2_id)
|
||||
|
||||
|
||||
def volume_get_iscsi_target_num(context, volume_id):
|
||||
"""Get the target num (tid) allocated to the volume."""
|
||||
return IMPL.volume_get_iscsi_target_num(context, volume_id)
|
||||
|
||||
|
||||
def volume_update(context, volume_id, values):
|
||||
"""Set the given properties on a volume and update it.
|
||||
|
||||
Raises NotFound if volume does not exist.
|
||||
|
||||
"""
|
||||
return IMPL.volume_update(context, volume_id, values)
|
||||
|
||||
|
||||
def get_ec2_volume_id_by_uuid(context, volume_id):
|
||||
return IMPL.get_ec2_volume_id_by_uuid(context, volume_id)
|
||||
|
||||
@ -1189,11 +1107,6 @@ def snapshot_get_all_by_project(context, project_id):
|
||||
return IMPL.snapshot_get_all_by_project(context, project_id)
|
||||
|
||||
|
||||
def snapshot_get_all_for_volume(context, volume_id):
|
||||
"""Get all snapshots for a volume."""
|
||||
return IMPL.snapshot_get_all_for_volume(context, volume_id)
|
||||
|
||||
|
||||
def snapshot_update(context, snapshot_id, values):
|
||||
"""Set the given properties on a snapshot and update it.
|
||||
|
||||
@ -1586,80 +1499,6 @@ def instance_type_extra_specs_update_or_create(context, flavor_id,
|
||||
extra_specs)
|
||||
|
||||
|
||||
##################
|
||||
|
||||
|
||||
def volume_metadata_get(context, volume_id):
|
||||
"""Get all metadata for a volume."""
|
||||
return IMPL.volume_metadata_get(context, volume_id)
|
||||
|
||||
|
||||
def volume_metadata_delete(context, volume_id, key):
|
||||
"""Delete the given metadata item."""
|
||||
IMPL.volume_metadata_delete(context, volume_id, key)
|
||||
|
||||
|
||||
def volume_metadata_update(context, volume_id, metadata, delete):
|
||||
"""Update metadata if it exists, otherwise create it."""
|
||||
IMPL.volume_metadata_update(context, volume_id, metadata, delete)
|
||||
|
||||
|
||||
##################
|
||||
|
||||
|
||||
def volume_type_create(context, values):
|
||||
"""Create a new volume type."""
|
||||
return IMPL.volume_type_create(context, values)
|
||||
|
||||
|
||||
def volume_type_get_all(context, inactive=False):
|
||||
"""Get all volume types."""
|
||||
return IMPL.volume_type_get_all(context, inactive)
|
||||
|
||||
|
||||
def volume_type_get(context, id):
|
||||
"""Get volume type by id."""
|
||||
return IMPL.volume_type_get(context, id)
|
||||
|
||||
|
||||
def volume_type_get_by_name(context, name):
|
||||
"""Get volume type by name."""
|
||||
return IMPL.volume_type_get_by_name(context, name)
|
||||
|
||||
|
||||
def volume_type_destroy(context, name):
|
||||
"""Delete a volume type."""
|
||||
return IMPL.volume_type_destroy(context, name)
|
||||
|
||||
|
||||
def volume_get_active_by_window(context, begin, end=None, project_id=None):
|
||||
"""Get all the volumes inside the window.
|
||||
|
||||
Specifying a project_id will filter for a certain project."""
|
||||
return IMPL.volume_get_active_by_window(context, begin, end, project_id)
|
||||
|
||||
|
||||
####################
|
||||
|
||||
|
||||
def volume_type_extra_specs_get(context, volume_type_id):
|
||||
"""Get all extra specs for a volume type."""
|
||||
return IMPL.volume_type_extra_specs_get(context, volume_type_id)
|
||||
|
||||
|
||||
def volume_type_extra_specs_delete(context, volume_type_id, key):
|
||||
"""Delete the given extra specs item."""
|
||||
IMPL.volume_type_extra_specs_delete(context, volume_type_id, key)
|
||||
|
||||
|
||||
def volume_type_extra_specs_update_or_create(context, volume_type_id,
|
||||
extra_specs):
|
||||
"""Create or update volume type extra specs. This adds or modifies the
|
||||
key/value pairs specified in the extra specs dict argument"""
|
||||
IMPL.volume_type_extra_specs_update_or_create(context, volume_type_id,
|
||||
extra_specs)
|
||||
|
||||
|
||||
###################
|
||||
|
||||
|
||||
@ -1681,100 +1520,6 @@ def s3_image_create(context, image_uuid):
|
||||
####################
|
||||
|
||||
|
||||
def sm_backend_conf_create(context, values):
|
||||
"""Create a new SM Backend Config entry."""
|
||||
return IMPL.sm_backend_conf_create(context, values)
|
||||
|
||||
|
||||
def sm_backend_conf_update(context, sm_backend_conf_id, values):
|
||||
"""Update a SM Backend Config entry."""
|
||||
return IMPL.sm_backend_conf_update(context, sm_backend_conf_id, values)
|
||||
|
||||
|
||||
def sm_backend_conf_delete(context, sm_backend_conf_id):
|
||||
"""Delete a SM Backend Config."""
|
||||
return IMPL.sm_backend_conf_delete(context, sm_backend_conf_id)
|
||||
|
||||
|
||||
def sm_backend_conf_get(context, sm_backend_conf_id):
|
||||
"""Get a specific SM Backend Config."""
|
||||
return IMPL.sm_backend_conf_get(context, sm_backend_conf_id)
|
||||
|
||||
|
||||
def sm_backend_conf_get_by_sr(context, sr_uuid):
|
||||
"""Get a specific SM Backend Config."""
|
||||
return IMPL.sm_backend_conf_get_by_sr(context, sr_uuid)
|
||||
|
||||
|
||||
def sm_backend_conf_get_all(context):
|
||||
"""Get all SM Backend Configs."""
|
||||
return IMPL.sm_backend_conf_get_all(context)
|
||||
|
||||
|
||||
####################
|
||||
|
||||
|
||||
def sm_flavor_create(context, values):
|
||||
"""Create a new SM Flavor entry."""
|
||||
return IMPL.sm_flavor_create(context, values)
|
||||
|
||||
|
||||
def sm_flavor_update(context, sm_flavor_id, values):
|
||||
"""Update a SM Flavor entry."""
|
||||
return IMPL.sm_flavor_update(context, sm_flavor_id, values)
|
||||
|
||||
|
||||
def sm_flavor_delete(context, sm_flavor_id):
|
||||
"""Delete a SM Flavor."""
|
||||
return IMPL.sm_flavor_delete(context, sm_flavor_id)
|
||||
|
||||
|
||||
def sm_flavor_get(context, sm_flavor_id):
|
||||
"""Get a specific SM Flavor."""
|
||||
return IMPL.sm_flavor_get(context, sm_flavor_id)
|
||||
|
||||
|
||||
def sm_flavor_get_all(context):
|
||||
"""Get all SM Flavors."""
|
||||
return IMPL.sm_flavor_get_all(context)
|
||||
|
||||
|
||||
def sm_flavor_get_by_label(context, sm_flavor_label):
|
||||
"""Get a specific SM Flavor given label."""
|
||||
return IMPL.sm_flavor_get_by_label(context, sm_flavor_label)
|
||||
|
||||
|
||||
####################
|
||||
|
||||
|
||||
def sm_volume_create(context, values):
|
||||
"""Create a new child Zone entry."""
|
||||
return IMPL.sm_volume_create(context, values)
|
||||
|
||||
|
||||
def sm_volume_update(context, volume_id, values):
|
||||
"""Update a child Zone entry."""
|
||||
return IMPL.sm_volume_update(context, values)
|
||||
|
||||
|
||||
def sm_volume_delete(context, volume_id):
|
||||
"""Delete a child Zone."""
|
||||
return IMPL.sm_volume_delete(context, volume_id)
|
||||
|
||||
|
||||
def sm_volume_get(context, volume_id):
|
||||
"""Get a specific child Zone."""
|
||||
return IMPL.sm_volume_get(context, volume_id)
|
||||
|
||||
|
||||
def sm_volume_get_all(context):
|
||||
"""Get all child Zones."""
|
||||
return IMPL.sm_volume_get_all(context)
|
||||
|
||||
|
||||
####################
|
||||
|
||||
|
||||
def aggregate_create(context, values, metadata=None):
|
||||
"""Create a new aggregate with metadata."""
|
||||
return IMPL.aggregate_create(context, values, metadata)
|
||||
|
@ -139,20 +139,6 @@ def require_instance_exists_using_uuid(f):
|
||||
return wrapper
|
||||
|
||||
|
||||
def require_volume_exists(f):
|
||||
"""Decorator to require the specified volume to exist.
|
||||
|
||||
Requires the wrapped function to use context and volume_id as
|
||||
their first two arguments.
|
||||
"""
|
||||
|
||||
def wrapper(context, volume_id, *args, **kwargs):
|
||||
db.volume_get(context, volume_id)
|
||||
return f(context, volume_id, *args, **kwargs)
|
||||
wrapper.__name__ = f.__name__
|
||||
return wrapper
|
||||
|
||||
|
||||
def require_aggregate_exists(f):
|
||||
"""Decorator to require the specified aggregate to exist.
|
||||
|
||||
@ -416,24 +402,6 @@ def service_get_all_compute_sorted(context):
|
||||
label)
|
||||
|
||||
|
||||
@require_admin_context
|
||||
def service_get_all_volume_sorted(context):
|
||||
session = get_session()
|
||||
with session.begin():
|
||||
topic = FLAGS.volume_topic
|
||||
label = 'volume_gigabytes'
|
||||
subq = model_query(context, models.Volume.host,
|
||||
func.sum(models.Volume.size).label(label),
|
||||
session=session, read_deleted="no").\
|
||||
group_by(models.Volume.host).\
|
||||
subquery()
|
||||
return _service_get_all_topic_subquery(context,
|
||||
session,
|
||||
topic,
|
||||
subq,
|
||||
label)
|
||||
|
||||
|
||||
@require_admin_context
|
||||
def service_get_by_args(context, host, binary):
|
||||
result = model_query(context, models.Service).\
|
||||
@ -2884,116 +2852,6 @@ def reservation_expire(context):
|
||||
###################
|
||||
|
||||
|
||||
@require_admin_context
|
||||
def volume_allocate_iscsi_target(context, volume_id, host):
|
||||
session = get_session()
|
||||
with session.begin():
|
||||
iscsi_target_ref = model_query(context, models.IscsiTarget,
|
||||
session=session, read_deleted="no").\
|
||||
filter_by(volume=None).\
|
||||
filter_by(host=host).\
|
||||
with_lockmode('update').\
|
||||
first()
|
||||
|
||||
# NOTE(vish): if with_lockmode isn't supported, as in sqlite,
|
||||
# then this has concurrency issues
|
||||
if not iscsi_target_ref:
|
||||
raise db.NoMoreTargets()
|
||||
|
||||
iscsi_target_ref.volume_id = volume_id
|
||||
session.add(iscsi_target_ref)
|
||||
|
||||
return iscsi_target_ref.target_num
|
||||
|
||||
|
||||
@require_admin_context
|
||||
def volume_attached(context, volume_id, instance_uuid, mountpoint):
|
||||
if not uuidutils.is_uuid_like(instance_uuid):
|
||||
raise exception.InvalidUUID(instance_uuid)
|
||||
|
||||
session = get_session()
|
||||
with session.begin():
|
||||
volume_ref = volume_get(context, volume_id, session=session)
|
||||
volume_ref['status'] = 'in-use'
|
||||
volume_ref['mountpoint'] = mountpoint
|
||||
volume_ref['attach_status'] = 'attached'
|
||||
volume_ref['instance_uuid'] = instance_uuid
|
||||
volume_ref['attach_time'] = timeutils.utcnow()
|
||||
volume_ref.save(session=session)
|
||||
|
||||
|
||||
@require_context
|
||||
def volume_create(context, values):
|
||||
values['volume_metadata'] = _metadata_refs(values.get('metadata'),
|
||||
models.VolumeMetadata)
|
||||
volume_ref = models.Volume()
|
||||
if not values.get('id'):
|
||||
values['id'] = str(utils.gen_uuid())
|
||||
volume_ref.update(values)
|
||||
|
||||
session = get_session()
|
||||
with session.begin():
|
||||
volume_ref.save(session=session)
|
||||
|
||||
return volume_get(context, values['id'], session=session)
|
||||
|
||||
|
||||
@require_admin_context
|
||||
def volume_data_get_for_project(context, project_id, session=None):
|
||||
result = model_query(context,
|
||||
func.count(models.Volume.id),
|
||||
func.sum(models.Volume.size),
|
||||
read_deleted="no",
|
||||
session=session).\
|
||||
filter_by(project_id=project_id).\
|
||||
first()
|
||||
|
||||
# NOTE(vish): convert None to 0
|
||||
return (result[0] or 0, result[1] or 0)
|
||||
|
||||
|
||||
@require_admin_context
|
||||
def volume_destroy(context, volume_id):
|
||||
session = get_session()
|
||||
with session.begin():
|
||||
volume_ref = volume_get(context, volume_id, session=session)
|
||||
session.query(models.Volume).\
|
||||
filter_by(id=volume_id).\
|
||||
update({'deleted': True,
|
||||
'deleted_at': timeutils.utcnow(),
|
||||
'updated_at': literal_column('updated_at')})
|
||||
session.query(models.IscsiTarget).\
|
||||
filter_by(volume_id=volume_id).\
|
||||
update({'volume_id': None})
|
||||
session.query(models.VolumeMetadata).\
|
||||
filter_by(volume_id=volume_id).\
|
||||
update({'deleted': True,
|
||||
'deleted_at': timeutils.utcnow(),
|
||||
'updated_at': literal_column('updated_at')})
|
||||
return volume_ref
|
||||
|
||||
|
||||
@require_admin_context
|
||||
def volume_detached(context, volume_id):
|
||||
session = get_session()
|
||||
with session.begin():
|
||||
volume_ref = volume_get(context, volume_id, session=session)
|
||||
volume_ref['status'] = 'available'
|
||||
volume_ref['mountpoint'] = None
|
||||
volume_ref['attach_status'] = 'detached'
|
||||
volume_ref['instance_uuid'] = None
|
||||
volume_ref['attach_time'] = None
|
||||
volume_ref.save(session=session)
|
||||
|
||||
|
||||
@require_context
|
||||
def _volume_get_query(context, session=None, project_only=False):
|
||||
return model_query(context, models.Volume, session=session,
|
||||
project_only=project_only).\
|
||||
options(joinedload('volume_metadata')).\
|
||||
options(joinedload('volume_type'))
|
||||
|
||||
|
||||
@require_context
|
||||
def _ec2_volume_get_query(context, session=None):
|
||||
return model_query(context, models.VolumeIdMapping,
|
||||
@ -3006,48 +2864,6 @@ def _ec2_snapshot_get_query(context, session=None):
|
||||
session=session, read_deleted='yes')
|
||||
|
||||
|
||||
@require_context
|
||||
def volume_get(context, volume_id, session=None):
|
||||
result = _volume_get_query(context, session=session, project_only=True).\
|
||||
filter_by(id=volume_id).\
|
||||
first()
|
||||
|
||||
if not result:
|
||||
raise exception.VolumeNotFound(volume_id=volume_id)
|
||||
|
||||
return result
|
||||
|
||||
|
||||
@require_admin_context
|
||||
def volume_get_all(context):
|
||||
return _volume_get_query(context).all()
|
||||
|
||||
|
||||
@require_admin_context
|
||||
def volume_get_all_by_host(context, host):
|
||||
return _volume_get_query(context).filter_by(host=host).all()
|
||||
|
||||
|
||||
@require_admin_context
|
||||
def volume_get_all_by_instance_uuid(context, instance_uuid):
|
||||
result = model_query(context, models.Volume, read_deleted="no").\
|
||||
options(joinedload('volume_metadata')).\
|
||||
options(joinedload('volume_type')).\
|
||||
filter_by(instance_uuid=instance_uuid).\
|
||||
all()
|
||||
|
||||
if not result:
|
||||
return []
|
||||
|
||||
return result
|
||||
|
||||
|
||||
@require_context
|
||||
def volume_get_all_by_project(context, project_id):
|
||||
authorize_project_context(context, project_id)
|
||||
return _volume_get_query(context).filter_by(project_id=project_id).all()
|
||||
|
||||
|
||||
@require_admin_context
|
||||
def volume_get_iscsi_target_num(context, volume_id):
|
||||
result = model_query(context, models.IscsiTarget, read_deleted="yes").\
|
||||
@ -3060,23 +2876,6 @@ def volume_get_iscsi_target_num(context, volume_id):
|
||||
return result.target_num
|
||||
|
||||
|
||||
@require_context
|
||||
def volume_update(context, volume_id, values):
|
||||
session = get_session()
|
||||
volume_ref = volume_get(context, volume_id, session=session)
|
||||
metadata = values.get('metadata')
|
||||
if metadata is not None:
|
||||
volume_metadata_update(context,
|
||||
volume_id,
|
||||
values.pop('metadata'),
|
||||
delete=True)
|
||||
with session.begin():
|
||||
volume_ref.update(values)
|
||||
volume_ref.save(session=session)
|
||||
|
||||
return volume_ref
|
||||
|
||||
|
||||
@require_context
|
||||
def ec2_volume_create(context, volume_uuid, id=None):
|
||||
"""Create ec2 compatable volume by provided uuid"""
|
||||
@ -3151,84 +2950,6 @@ def get_snapshot_uuid_by_ec2_id(context, ec2_id, session=None):
|
||||
return result['uuid']
|
||||
|
||||
|
||||
####################
|
||||
|
||||
def _volume_metadata_get_query(context, volume_id, session=None):
|
||||
return model_query(context, models.VolumeMetadata,
|
||||
session=session, read_deleted="no").\
|
||||
filter_by(volume_id=volume_id)
|
||||
|
||||
|
||||
@require_context
|
||||
@require_volume_exists
|
||||
def volume_metadata_get(context, volume_id):
|
||||
rows = _volume_metadata_get_query(context, volume_id).all()
|
||||
result = {}
|
||||
for row in rows:
|
||||
result[row['key']] = row['value']
|
||||
|
||||
return result
|
||||
|
||||
|
||||
@require_context
|
||||
@require_volume_exists
|
||||
def volume_metadata_delete(context, volume_id, key):
|
||||
_volume_metadata_get_query(context, volume_id).\
|
||||
filter_by(key=key).\
|
||||
update({'deleted': True,
|
||||
'deleted_at': timeutils.utcnow(),
|
||||
'updated_at': literal_column('updated_at')})
|
||||
|
||||
|
||||
@require_context
|
||||
@require_volume_exists
|
||||
def volume_metadata_get_item(context, volume_id, key, session=None):
|
||||
result = _volume_metadata_get_query(context, volume_id, session=session).\
|
||||
filter_by(key=key).\
|
||||
first()
|
||||
|
||||
if not result:
|
||||
raise exception.VolumeMetadataNotFound(metadata_key=key,
|
||||
volume_id=volume_id)
|
||||
return result
|
||||
|
||||
|
||||
@require_context
|
||||
@require_volume_exists
|
||||
def volume_metadata_update(context, volume_id, metadata, delete):
|
||||
session = get_session()
|
||||
|
||||
# Set existing metadata to deleted if delete argument is True
|
||||
if delete:
|
||||
original_metadata = volume_metadata_get(context, volume_id)
|
||||
for meta_key, meta_value in original_metadata.iteritems():
|
||||
if meta_key not in metadata:
|
||||
meta_ref = volume_metadata_get_item(context, volume_id,
|
||||
meta_key, session)
|
||||
meta_ref.update({'deleted': True})
|
||||
meta_ref.save(session=session)
|
||||
|
||||
meta_ref = None
|
||||
|
||||
# Now update all existing items with new values, or create new meta objects
|
||||
for meta_key, meta_value in metadata.iteritems():
|
||||
|
||||
# update the value whether it exists or not
|
||||
item = {"value": meta_value}
|
||||
|
||||
try:
|
||||
meta_ref = volume_metadata_get_item(context, volume_id,
|
||||
meta_key, session)
|
||||
except exception.VolumeMetadataNotFound:
|
||||
meta_ref = models.VolumeMetadata()
|
||||
item.update({"key": meta_key, "volume_id": volume_id})
|
||||
|
||||
meta_ref.update(item)
|
||||
meta_ref.save(session=session)
|
||||
|
||||
return metadata
|
||||
|
||||
|
||||
###################
|
||||
|
||||
|
||||
@ -3274,13 +2995,6 @@ def snapshot_get_all(context):
|
||||
return model_query(context, models.Snapshot).all()
|
||||
|
||||
|
||||
@require_context
|
||||
def snapshot_get_all_for_volume(context, volume_id):
|
||||
return model_query(context, models.Snapshot, read_deleted='no',
|
||||
project_only=True).\
|
||||
filter_by(volume_id=volume_id).all()
|
||||
|
||||
|
||||
@require_context
|
||||
def snapshot_get_all_by_project(context, project_id):
|
||||
authorize_project_context(context, project_id)
|
||||
@ -3901,7 +3615,7 @@ def instance_type_create(context, values):
|
||||
|
||||
|
||||
def _dict_with_extra_specs(inst_type_query):
|
||||
"""Takes an instance, volume, or instance type query returned
|
||||
"""Takes an instance or instance type query returned
|
||||
by sqlalchemy and returns it as a dictionary, converting the
|
||||
extra_specs entry from a list of dicts:
|
||||
|
||||
@ -4443,185 +4157,6 @@ def instance_type_extra_specs_update_or_create(context, flavor_id,
|
||||
return specs
|
||||
|
||||
|
||||
##################
|
||||
|
||||
|
||||
@require_admin_context
|
||||
def volume_type_create(context, values):
|
||||
"""Create a new instance type. In order to pass in extra specs,
|
||||
the values dict should contain a 'extra_specs' key/value pair:
|
||||
|
||||
{'extra_specs' : {'k1': 'v1', 'k2': 'v2', ...}}
|
||||
|
||||
"""
|
||||
session = get_session()
|
||||
with session.begin():
|
||||
try:
|
||||
volume_type_get_by_name(context, values['name'], session)
|
||||
raise exception.VolumeTypeExists(name=values['name'])
|
||||
except exception.VolumeTypeNotFoundByName:
|
||||
pass
|
||||
try:
|
||||
values['extra_specs'] = _metadata_refs(values.get('extra_specs'),
|
||||
models.VolumeTypeExtraSpecs)
|
||||
volume_type_ref = models.VolumeTypes()
|
||||
volume_type_ref.update(values)
|
||||
volume_type_ref.save()
|
||||
except Exception, e:
|
||||
raise exception.DBError(e)
|
||||
return volume_type_ref
|
||||
|
||||
|
||||
@require_context
|
||||
def volume_type_get_all(context, inactive=False, filters=None):
|
||||
"""
|
||||
Returns a dict describing all volume_types with name as key.
|
||||
"""
|
||||
filters = filters or {}
|
||||
|
||||
read_deleted = "yes" if inactive else "no"
|
||||
rows = model_query(context, models.VolumeTypes,
|
||||
read_deleted=read_deleted).\
|
||||
options(joinedload('extra_specs')).\
|
||||
order_by("name").\
|
||||
all()
|
||||
|
||||
# TODO(sirp): this patern of converting rows to a result with extra_specs
|
||||
# is repeated quite a bit, might be worth creating a method for it
|
||||
result = {}
|
||||
for row in rows:
|
||||
result[row['name']] = _dict_with_extra_specs(row)
|
||||
|
||||
return result
|
||||
|
||||
|
||||
@require_context
|
||||
def volume_type_get(context, id, session=None):
|
||||
"""Returns a dict describing specific volume_type"""
|
||||
result = model_query(context, models.VolumeTypes, session=session).\
|
||||
options(joinedload('extra_specs')).\
|
||||
filter_by(id=id).\
|
||||
first()
|
||||
|
||||
if not result:
|
||||
raise exception.VolumeTypeNotFound(volume_type_id=id)
|
||||
|
||||
return _dict_with_extra_specs(result)
|
||||
|
||||
|
||||
@require_context
|
||||
def volume_type_get_by_name(context, name, session=None):
|
||||
"""Returns a dict describing specific volume_type"""
|
||||
result = model_query(context, models.VolumeTypes, session=session).\
|
||||
options(joinedload('extra_specs')).\
|
||||
filter_by(name=name).\
|
||||
first()
|
||||
|
||||
if not result:
|
||||
raise exception.VolumeTypeNotFoundByName(volume_type_name=name)
|
||||
else:
|
||||
return _dict_with_extra_specs(result)
|
||||
|
||||
|
||||
@require_admin_context
|
||||
def volume_type_destroy(context, name):
|
||||
session = get_session()
|
||||
with session.begin():
|
||||
volume_type_ref = volume_type_get_by_name(context, name,
|
||||
session=session)
|
||||
volume_type_id = volume_type_ref['id']
|
||||
session.query(models.VolumeTypes).\
|
||||
filter_by(id=volume_type_id).\
|
||||
update({'deleted': True,
|
||||
'deleted_at': timeutils.utcnow(),
|
||||
'updated_at': literal_column('updated_at')})
|
||||
session.query(models.VolumeTypeExtraSpecs).\
|
||||
filter_by(volume_type_id=volume_type_id).\
|
||||
update({'deleted': True,
|
||||
'deleted_at': timeutils.utcnow(),
|
||||
'updated_at': literal_column('updated_at')})
|
||||
|
||||
|
||||
@require_context
|
||||
def volume_get_active_by_window(context, begin, end=None,
|
||||
project_id=None):
|
||||
"""Return volumes that were active during window."""
|
||||
session = get_session()
|
||||
query = session.query(models.Volume)
|
||||
|
||||
query = query.filter(or_(models.Volume.deleted_at == None,
|
||||
models.Volume.deleted_at > begin))
|
||||
if end:
|
||||
query = query.filter(models.Volume.created_at < end)
|
||||
if project_id:
|
||||
query = query.filter_by(project_id=project_id)
|
||||
|
||||
return query.all()
|
||||
|
||||
|
||||
####################
|
||||
|
||||
|
||||
def _volume_type_extra_specs_query(context, volume_type_id, session=None):
|
||||
return model_query(context, models.VolumeTypeExtraSpecs, session=session,
|
||||
read_deleted="no").\
|
||||
filter_by(volume_type_id=volume_type_id)
|
||||
|
||||
|
||||
@require_context
|
||||
def volume_type_extra_specs_get(context, volume_type_id):
|
||||
rows = _volume_type_extra_specs_query(context, volume_type_id).\
|
||||
all()
|
||||
|
||||
result = {}
|
||||
for row in rows:
|
||||
result[row['key']] = row['value']
|
||||
|
||||
return result
|
||||
|
||||
|
||||
@require_context
|
||||
def volume_type_extra_specs_delete(context, volume_type_id, key):
|
||||
_volume_type_extra_specs_query(context, volume_type_id).\
|
||||
filter_by(key=key).\
|
||||
update({'deleted': True,
|
||||
'deleted_at': timeutils.utcnow(),
|
||||
'updated_at': literal_column('updated_at')})
|
||||
|
||||
|
||||
@require_context
|
||||
def volume_type_extra_specs_get_item(context, volume_type_id, key,
|
||||
session=None):
|
||||
result = _volume_type_extra_specs_query(
|
||||
context, volume_type_id, session=session).\
|
||||
filter_by(key=key).\
|
||||
first()
|
||||
|
||||
if not result:
|
||||
raise exception.VolumeTypeExtraSpecsNotFound(
|
||||
extra_specs_key=key, volume_type_id=volume_type_id)
|
||||
|
||||
return result
|
||||
|
||||
|
||||
@require_context
|
||||
def volume_type_extra_specs_update_or_create(context, volume_type_id,
|
||||
specs):
|
||||
session = get_session()
|
||||
spec_ref = None
|
||||
for key, value in specs.iteritems():
|
||||
try:
|
||||
spec_ref = volume_type_extra_specs_get_item(
|
||||
context, volume_type_id, key, session)
|
||||
except exception.VolumeTypeExtraSpecsNotFound:
|
||||
spec_ref = models.VolumeTypeExtraSpecs()
|
||||
spec_ref.update({"key": key, "value": value,
|
||||
"volume_type_id": volume_type_id,
|
||||
"deleted": False})
|
||||
spec_ref.save(session=session)
|
||||
return specs
|
||||
|
||||
|
||||
####################
|
||||
|
||||
|
||||
@ -4664,211 +4199,6 @@ def s3_image_create(context, image_uuid):
|
||||
####################
|
||||
|
||||
|
||||
@require_admin_context
|
||||
def sm_backend_conf_create(context, values):
|
||||
session = get_session()
|
||||
with session.begin():
|
||||
config_params = values['config_params']
|
||||
backend_conf = model_query(context, models.SMBackendConf,
|
||||
session=session,
|
||||
read_deleted="yes").\
|
||||
filter_by(config_params=config_params).\
|
||||
first()
|
||||
|
||||
if backend_conf:
|
||||
raise exception.Duplicate(_('Backend exists'))
|
||||
else:
|
||||
backend_conf = models.SMBackendConf()
|
||||
backend_conf.update(values)
|
||||
backend_conf.save(session=session)
|
||||
return backend_conf
|
||||
|
||||
|
||||
@require_admin_context
|
||||
def sm_backend_conf_update(context, sm_backend_id, values):
|
||||
session = get_session()
|
||||
with session.begin():
|
||||
backend_conf = model_query(context, models.SMBackendConf,
|
||||
session=session,
|
||||
read_deleted="yes").\
|
||||
filter_by(id=sm_backend_id).\
|
||||
first()
|
||||
|
||||
if not backend_conf:
|
||||
raise exception.NotFound(
|
||||
_("No backend config with id %(sm_backend_id)s") % locals())
|
||||
|
||||
backend_conf.update(values)
|
||||
backend_conf.save(session=session)
|
||||
return backend_conf
|
||||
|
||||
|
||||
@require_admin_context
|
||||
def sm_backend_conf_delete(context, sm_backend_id):
|
||||
# FIXME(sirp): for consistency, shouldn't this just mark as deleted with
|
||||
# `purge` actually deleting the record?
|
||||
session = get_session()
|
||||
with session.begin():
|
||||
model_query(context, models.SMBackendConf, session=session,
|
||||
read_deleted="yes").\
|
||||
filter_by(id=sm_backend_id).\
|
||||
delete()
|
||||
|
||||
|
||||
@require_admin_context
|
||||
def sm_backend_conf_get(context, sm_backend_id):
|
||||
result = model_query(context, models.SMBackendConf, read_deleted="yes").\
|
||||
filter_by(id=sm_backend_id).\
|
||||
first()
|
||||
|
||||
if not result:
|
||||
raise exception.NotFound(_("No backend config with id "
|
||||
"%(sm_backend_id)s") % locals())
|
||||
|
||||
return result
|
||||
|
||||
|
||||
@require_admin_context
|
||||
def sm_backend_conf_get_by_sr(context, sr_uuid):
|
||||
result = model_query(context, models.SMBackendConf, read_deleted="yes").\
|
||||
filter_by(sr_uuid=sr_uuid).\
|
||||
first()
|
||||
if not result:
|
||||
raise exception.NotFound(_("No backend config with sr uuid "
|
||||
"%(sr_uuid)s") % locals())
|
||||
return result
|
||||
|
||||
|
||||
@require_admin_context
|
||||
def sm_backend_conf_get_all(context):
|
||||
return model_query(context, models.SMBackendConf, read_deleted="yes").\
|
||||
all()
|
||||
|
||||
|
||||
####################
|
||||
|
||||
|
||||
def _sm_flavor_get_query(context, sm_flavor_id, session=None):
|
||||
return model_query(context, models.SMFlavors, session=session,
|
||||
read_deleted="yes").\
|
||||
filter_by(id=sm_flavor_id)
|
||||
|
||||
|
||||
@require_admin_context
|
||||
def sm_flavor_create(context, values):
|
||||
session = get_session()
|
||||
with session.begin():
|
||||
sm_flavor = model_query(context, models.SMFlavors,
|
||||
session=session,
|
||||
read_deleted="yes").\
|
||||
filter_by(label=values['label']).\
|
||||
first()
|
||||
if not sm_flavor:
|
||||
sm_flavor = models.SMFlavors()
|
||||
sm_flavor.update(values)
|
||||
sm_flavor.save(session=session)
|
||||
else:
|
||||
raise exception.Duplicate(_('Flavor exists'))
|
||||
return sm_flavor
|
||||
|
||||
|
||||
@require_admin_context
|
||||
def sm_flavor_update(context, sm_flavor_id, values):
|
||||
session = get_session()
|
||||
with session.begin():
|
||||
sm_flavor = model_query(context, models.SMFlavors,
|
||||
session=session,
|
||||
read_deleted="yes").\
|
||||
filter_by(id=sm_flavor_id).\
|
||||
first()
|
||||
if not sm_flavor:
|
||||
raise exception.NotFound(
|
||||
_('%(sm_flavor_id) flavor not found') % locals())
|
||||
sm_flavor.update(values)
|
||||
sm_flavor.save(session=session)
|
||||
return sm_flavor
|
||||
|
||||
|
||||
@require_admin_context
|
||||
def sm_flavor_delete(context, sm_flavor_id):
|
||||
session = get_session()
|
||||
with session.begin():
|
||||
_sm_flavor_get_query(context, sm_flavor_id).delete()
|
||||
|
||||
|
||||
@require_admin_context
|
||||
def sm_flavor_get(context, sm_flavor_id):
|
||||
result = _sm_flavor_get_query(context, sm_flavor_id).first()
|
||||
|
||||
if not result:
|
||||
raise exception.NotFound(
|
||||
_("No sm_flavor called %(sm_flavor_id)s") % locals())
|
||||
|
||||
return result
|
||||
|
||||
|
||||
@require_admin_context
|
||||
def sm_flavor_get_all(context):
|
||||
return model_query(context, models.SMFlavors, read_deleted="yes").all()
|
||||
|
||||
|
||||
@require_admin_context
|
||||
def sm_flavor_get_by_label(context, sm_flavor_label):
|
||||
result = model_query(context, models.SMFlavors,
|
||||
read_deleted="yes").\
|
||||
filter_by(label=sm_flavor_label).first()
|
||||
if not result:
|
||||
raise exception.NotFound(
|
||||
_("No sm_flavor called %(sm_flavor_label)s") % locals())
|
||||
return result
|
||||
|
||||
|
||||
###############################
|
||||
|
||||
|
||||
def _sm_volume_get_query(context, volume_id, session=None):
|
||||
return model_query(context, models.SMVolume, session=session,
|
||||
read_deleted="yes").\
|
||||
filter_by(id=volume_id)
|
||||
|
||||
|
||||
def sm_volume_create(context, values):
|
||||
sm_volume = models.SMVolume()
|
||||
sm_volume.update(values)
|
||||
sm_volume.save()
|
||||
return sm_volume
|
||||
|
||||
|
||||
def sm_volume_update(context, volume_id, values):
|
||||
sm_volume = sm_volume_get(context, volume_id)
|
||||
sm_volume.update(values)
|
||||
sm_volume.save()
|
||||
return sm_volume
|
||||
|
||||
|
||||
def sm_volume_delete(context, volume_id):
|
||||
session = get_session()
|
||||
with session.begin():
|
||||
_sm_volume_get_query(context, volume_id, session=session).delete()
|
||||
|
||||
|
||||
def sm_volume_get(context, volume_id):
|
||||
result = _sm_volume_get_query(context, volume_id).first()
|
||||
|
||||
if not result:
|
||||
raise exception.NotFound(
|
||||
_("No sm_volume with id %(volume_id)s") % locals())
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def sm_volume_get_all(context):
|
||||
return model_query(context, models.SMVolume, read_deleted="yes").all()
|
||||
|
||||
|
||||
################
|
||||
|
||||
|
||||
def _aggregate_get_query(context, model_class, id_field, id,
|
||||
session=None, read_deleted=None):
|
||||
return model_query(context, model_class, session=session,
|
||||
|
@ -382,49 +382,6 @@ class Volume(BASE, NovaBase):
|
||||
volume_type_id = Column(Integer)
|
||||
|
||||
|
||||
class VolumeMetadata(BASE, NovaBase):
|
||||
"""Represents a metadata key/value pair for a volume"""
|
||||
__tablename__ = 'volume_metadata'
|
||||
id = Column(Integer, primary_key=True)
|
||||
key = Column(String(255))
|
||||
value = Column(String(255))
|
||||
volume_id = Column(String(36), ForeignKey('volumes.id'), nullable=False)
|
||||
volume = relationship(Volume, backref="volume_metadata",
|
||||
foreign_keys=volume_id,
|
||||
primaryjoin='and_('
|
||||
'VolumeMetadata.volume_id == Volume.id,'
|
||||
'VolumeMetadata.deleted == False)')
|
||||
|
||||
|
||||
class VolumeTypes(BASE, NovaBase):
|
||||
"""Represent possible volume_types of volumes offered"""
|
||||
__tablename__ = "volume_types"
|
||||
id = Column(Integer, primary_key=True)
|
||||
name = Column(String(255))
|
||||
|
||||
volumes = relationship(Volume,
|
||||
backref=backref('volume_type', uselist=False),
|
||||
foreign_keys=id,
|
||||
primaryjoin='and_('
|
||||
'Volume.volume_type_id == VolumeTypes.id, '
|
||||
'VolumeTypes.deleted == False)')
|
||||
|
||||
|
||||
class VolumeTypeExtraSpecs(BASE, NovaBase):
|
||||
"""Represents additional specs as key/value pairs for a volume_type"""
|
||||
__tablename__ = 'volume_type_extra_specs'
|
||||
id = Column(Integer, primary_key=True)
|
||||
key = Column(String(255))
|
||||
value = Column(String(255))
|
||||
volume_type_id = Column(Integer, ForeignKey('volume_types.id'),
|
||||
nullable=False)
|
||||
volume_type = relationship(VolumeTypes, backref="extra_specs",
|
||||
foreign_keys=volume_type_id,
|
||||
primaryjoin='and_('
|
||||
'VolumeTypeExtraSpecs.volume_type_id == VolumeTypes.id,'
|
||||
'VolumeTypeExtraSpecs.deleted == False)')
|
||||
|
||||
|
||||
class Quota(BASE, NovaBase):
|
||||
"""Represents a single quota override for a project.
|
||||
|
||||
@ -959,32 +916,6 @@ class SnapshotIdMapping(BASE, NovaBase):
|
||||
uuid = Column(String(36), nullable=False)
|
||||
|
||||
|
||||
class SMFlavors(BASE, NovaBase):
|
||||
"""Represents a flavor for SM volumes."""
|
||||
__tablename__ = 'sm_flavors'
|
||||
id = Column(Integer(), primary_key=True)
|
||||
label = Column(String(255))
|
||||
description = Column(String(255))
|
||||
|
||||
|
||||
class SMBackendConf(BASE, NovaBase):
|
||||
"""Represents the connection to the backend for SM."""
|
||||
__tablename__ = 'sm_backend_config'
|
||||
id = Column(Integer(), primary_key=True)
|
||||
flavor_id = Column(Integer, ForeignKey('sm_flavors.id'), nullable=False)
|
||||
sr_uuid = Column(String(255))
|
||||
sr_type = Column(String(255))
|
||||
config_params = Column(String(2047))
|
||||
|
||||
|
||||
class SMVolume(BASE, NovaBase):
|
||||
__tablename__ = 'sm_volume'
|
||||
id = Column(String(36), ForeignKey(Volume.id), primary_key=True)
|
||||
backend_id = Column(Integer, ForeignKey('sm_backend_config.id'),
|
||||
nullable=False)
|
||||
vdi_uuid = Column(String(255))
|
||||
|
||||
|
||||
class InstanceFault(BASE, NovaBase):
|
||||
__tablename__ = 'instance_faults'
|
||||
id = Column(Integer(), primary_key=True, autoincrement=True)
|
||||
|
@ -16,7 +16,7 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""Quotas for instances, volumes, and floating ips."""
|
||||
"""Quotas for instances, and floating ips."""
|
||||
|
||||
import datetime
|
||||
|
||||
@ -42,13 +42,7 @@ quota_opts = [
|
||||
cfg.IntOpt('quota_ram',
|
||||
default=50 * 1024,
|
||||
help='megabytes of instance ram allowed per project'),
|
||||
cfg.IntOpt('quota_volumes',
|
||||
default=10,
|
||||
help='number of volumes allowed per project'),
|
||||
cfg.IntOpt('quota_gigabytes',
|
||||
default=1000,
|
||||
help='number of volume gigabytes allowed per project'),
|
||||
cfg.IntOpt('quota_floating_ips',
|
||||
cfg.IntOpt('quota_floating_ips',
|
||||
default=10,
|
||||
help='number of floating ips allowed per project'),
|
||||
cfg.IntOpt('quota_metadata_items',
|
||||
@ -814,12 +808,6 @@ def _sync_instances(context, project_id, session):
|
||||
context, project_id, session=session)))
|
||||
|
||||
|
||||
def _sync_volumes(context, project_id, session):
|
||||
return dict(zip(('volumes', 'gigabytes'),
|
||||
db.volume_data_get_for_project(
|
||||
context, project_id, session=session)))
|
||||
|
||||
|
||||
def _sync_floating_ips(context, project_id, session):
|
||||
return dict(floating_ips=db.floating_ip_count_by_project(
|
||||
context, project_id, session=session))
|
||||
@ -837,8 +825,6 @@ resources = [
|
||||
ReservableResource('instances', _sync_instances, 'quota_instances'),
|
||||
ReservableResource('cores', _sync_instances, 'quota_cores'),
|
||||
ReservableResource('ram', _sync_instances, 'quota_ram'),
|
||||
ReservableResource('volumes', _sync_volumes, 'quota_volumes'),
|
||||
ReservableResource('gigabytes', _sync_volumes, 'quota_gigabytes'),
|
||||
ReservableResource('floating_ips', _sync_floating_ips,
|
||||
'quota_floating_ips'),
|
||||
AbsoluteResource('metadata_items', 'quota_metadata_items'),
|
||||
|
@ -2015,20 +2015,12 @@ class CloudTestCase(test.TestCase):
|
||||
}
|
||||
self.stubs.Set(self.cloud.compute_api, 'get', fake_get)
|
||||
|
||||
def fake_volume_get(ctxt, volume_id, session=None):
|
||||
if volume_id == 87654321:
|
||||
return {'id': volume_id,
|
||||
'attach_time': '13:56:24',
|
||||
'status': 'in-use'}
|
||||
raise exception.VolumeNotFound(volume_id=volume_id)
|
||||
self.stubs.Set(db, 'volume_get', fake_volume_get)
|
||||
|
||||
def fake_get_instance_uuid_by_ec2_id(ctxt, int_id):
|
||||
if int_id == 305419896:
|
||||
return 'e5fe5518-0288-4fa3-b0c4-c79764101b85'
|
||||
raise exception.InstanceNotFound(instance_id=int_id)
|
||||
self.stubs.Set(db, 'get_instance_uuid_by_ec2_id',
|
||||
fake_get_instance_uuid_by_ec2_id)
|
||||
fake_get_instance_uuid_by_ec2_id)
|
||||
|
||||
get_attribute = functools.partial(
|
||||
self.cloud.describe_instance_attribute,
|
||||
|
@ -24,11 +24,11 @@ from nova.tests.api.openstack import fakes
|
||||
|
||||
def quota_set(class_name):
|
||||
return {'quota_class_set': {'id': class_name, 'metadata_items': 128,
|
||||
'volumes': 10, 'gigabytes': 1000, 'ram': 51200,
|
||||
'floating_ips': 10, 'instances': 10, 'injected_files': 5,
|
||||
'cores': 20, 'injected_file_content_bytes': 10240,
|
||||
'security_groups': 10, 'security_group_rules': 20,
|
||||
'key_pairs': 100, 'injected_file_path_bytes': 255}}
|
||||
'ram': 51200, 'floating_ips': 10, 'instances': 10,
|
||||
'injected_files': 5, 'cores': 20,
|
||||
'injected_file_content_bytes': 10240, 'security_groups': 10,
|
||||
'security_group_rules': 20, 'key_pairs': 100,
|
||||
'injected_file_path_bytes': 255}}
|
||||
|
||||
|
||||
class QuotaClassSetsTest(test.TestCase):
|
||||
@ -42,10 +42,8 @@ class QuotaClassSetsTest(test.TestCase):
|
||||
'instances': 10,
|
||||
'cores': 20,
|
||||
'ram': 51200,
|
||||
'volumes': 10,
|
||||
'floating_ips': 10,
|
||||
'metadata_items': 128,
|
||||
'gigabytes': 1000,
|
||||
'injected_files': 5,
|
||||
'injected_file_path_bytes': 255,
|
||||
'injected_file_content_bytes': 10240,
|
||||
@ -62,8 +60,6 @@ class QuotaClassSetsTest(test.TestCase):
|
||||
self.assertEqual(qs['instances'], 10)
|
||||
self.assertEqual(qs['cores'], 20)
|
||||
self.assertEqual(qs['ram'], 51200)
|
||||
self.assertEqual(qs['volumes'], 10)
|
||||
self.assertEqual(qs['gigabytes'], 1000)
|
||||
self.assertEqual(qs['floating_ips'], 10)
|
||||
self.assertEqual(qs['metadata_items'], 128)
|
||||
self.assertEqual(qs['injected_files'], 5)
|
||||
@ -89,8 +85,7 @@ class QuotaClassSetsTest(test.TestCase):
|
||||
|
||||
def test_quotas_update_as_admin(self):
|
||||
body = {'quota_class_set': {'instances': 50, 'cores': 50,
|
||||
'ram': 51200, 'volumes': 10,
|
||||
'gigabytes': 1000, 'floating_ips': 10,
|
||||
'ram': 51200, 'floating_ips': 10,
|
||||
'metadata_items': 128, 'injected_files': 5,
|
||||
'injected_file_content_bytes': 10240,
|
||||
'injected_file_path_bytes': 255,
|
||||
@ -108,8 +103,7 @@ class QuotaClassSetsTest(test.TestCase):
|
||||
|
||||
def test_quotas_update_as_user(self):
|
||||
body = {'quota_class_set': {'instances': 50, 'cores': 50,
|
||||
'ram': 51200, 'volumes': 10,
|
||||
'gigabytes': 1000, 'floating_ips': 10,
|
||||
'ram': 51200, 'floating_ips': 10,
|
||||
'metadata_items': 128, 'injected_files': 5,
|
||||
'injected_file_content_bytes': 10240,
|
||||
'security_groups': 10,
|
||||
@ -135,8 +129,6 @@ class QuotaTemplateXMLSerializerTest(test.TestCase):
|
||||
metadata_items=10,
|
||||
injected_file_path_bytes=255,
|
||||
injected_file_content_bytes=20,
|
||||
volumes=30,
|
||||
gigabytes=40,
|
||||
ram=50,
|
||||
floating_ips=60,
|
||||
instances=70,
|
||||
@ -162,8 +154,6 @@ class QuotaTemplateXMLSerializerTest(test.TestCase):
|
||||
exemplar = dict(quota_class_set=dict(
|
||||
metadata_items='10',
|
||||
injected_file_content_bytes='20',
|
||||
volumes='30',
|
||||
gigabytes='40',
|
||||
ram='50',
|
||||
floating_ips='60',
|
||||
instances='70',
|
||||
@ -177,8 +167,6 @@ class QuotaTemplateXMLSerializerTest(test.TestCase):
|
||||
'<metadata_items>10</metadata_items>'
|
||||
'<injected_file_content_bytes>20'
|
||||
'</injected_file_content_bytes>'
|
||||
'<volumes>30</volumes>'
|
||||
'<gigabytes>40</gigabytes>'
|
||||
'<ram>50</ram>'
|
||||
'<floating_ips>60</floating_ips>'
|
||||
'<instances>70</instances>'
|
||||
|
@ -25,8 +25,8 @@ from nova.tests.api.openstack import fakes
|
||||
|
||||
|
||||
def quota_set(id):
|
||||
return {'quota_set': {'id': id, 'metadata_items': 128, 'volumes': 10,
|
||||
'gigabytes': 1000, 'ram': 51200, 'floating_ips': 10,
|
||||
return {'quota_set': {'id': id, 'metadata_items': 128,
|
||||
'ram': 51200, 'floating_ips': 10,
|
||||
'instances': 10, 'injected_files': 5, 'cores': 20,
|
||||
'injected_file_content_bytes': 10240,
|
||||
'security_groups': 10, 'security_group_rules': 20,
|
||||
@ -44,10 +44,8 @@ class QuotaSetsTest(test.TestCase):
|
||||
'instances': 10,
|
||||
'cores': 20,
|
||||
'ram': 51200,
|
||||
'volumes': 10,
|
||||
'floating_ips': 10,
|
||||
'metadata_items': 128,
|
||||
'gigabytes': 1000,
|
||||
'injected_files': 5,
|
||||
'injected_file_path_bytes': 255,
|
||||
'injected_file_content_bytes': 10240,
|
||||
@ -63,8 +61,6 @@ class QuotaSetsTest(test.TestCase):
|
||||
self.assertEqual(qs['instances'], 10)
|
||||
self.assertEqual(qs['cores'], 20)
|
||||
self.assertEqual(qs['ram'], 51200)
|
||||
self.assertEqual(qs['volumes'], 10)
|
||||
self.assertEqual(qs['gigabytes'], 1000)
|
||||
self.assertEqual(qs['floating_ips'], 10)
|
||||
self.assertEqual(qs['metadata_items'], 128)
|
||||
self.assertEqual(qs['injected_files'], 5)
|
||||
@ -85,8 +81,6 @@ class QuotaSetsTest(test.TestCase):
|
||||
'instances': 10,
|
||||
'cores': 20,
|
||||
'ram': 51200,
|
||||
'volumes': 10,
|
||||
'gigabytes': 1000,
|
||||
'floating_ips': 10,
|
||||
'metadata_items': 128,
|
||||
'injected_files': 5,
|
||||
@ -113,8 +107,7 @@ class QuotaSetsTest(test.TestCase):
|
||||
|
||||
def test_quotas_update_as_admin(self):
|
||||
body = {'quota_set': {'instances': 50, 'cores': 50,
|
||||
'ram': 51200, 'volumes': 10,
|
||||
'gigabytes': 1000, 'floating_ips': 10,
|
||||
'ram': 51200, 'floating_ips': 10,
|
||||
'metadata_items': 128, 'injected_files': 5,
|
||||
'injected_file_content_bytes': 10240,
|
||||
'injected_file_path_bytes': 255,
|
||||
@ -130,8 +123,7 @@ class QuotaSetsTest(test.TestCase):
|
||||
|
||||
def test_quotas_update_as_user(self):
|
||||
body = {'quota_set': {'instances': 50, 'cores': 50,
|
||||
'ram': 51200, 'volumes': 10,
|
||||
'gigabytes': 1000, 'floating_ips': 10,
|
||||
'ram': 51200, 'floating_ips': 10,
|
||||
'metadata_items': 128, 'injected_files': 5,
|
||||
'injected_file_content_bytes': 10240,
|
||||
'security_groups': 10,
|
||||
@ -144,8 +136,7 @@ class QuotaSetsTest(test.TestCase):
|
||||
|
||||
def test_quotas_update_invalid_limit(self):
|
||||
body = {'quota_set': {'instances': -2, 'cores': -2,
|
||||
'ram': -2, 'volumes': -2,
|
||||
'gigabytes': -2, 'floating_ips': -2,
|
||||
'ram': -2, 'floating_ips': -2,
|
||||
'metadata_items': -2, 'injected_files': -2,
|
||||
'injected_file_content_bytes': -2}}
|
||||
|
||||
@ -167,8 +158,6 @@ class QuotaXMLSerializerTest(test.TestCase):
|
||||
metadata_items=10,
|
||||
injected_file_path_bytes=255,
|
||||
injected_file_content_bytes=20,
|
||||
volumes=30,
|
||||
gigabytes=40,
|
||||
ram=50,
|
||||
floating_ips=60,
|
||||
instances=70,
|
||||
@ -193,8 +182,6 @@ class QuotaXMLSerializerTest(test.TestCase):
|
||||
exemplar = dict(quota_set=dict(
|
||||
metadata_items='10',
|
||||
injected_file_content_bytes='20',
|
||||
volumes='30',
|
||||
gigabytes='40',
|
||||
ram='50',
|
||||
floating_ips='60',
|
||||
instances='70',
|
||||
@ -208,8 +195,6 @@ class QuotaXMLSerializerTest(test.TestCase):
|
||||
'<metadata_items>10</metadata_items>'
|
||||
'<injected_file_content_bytes>20'
|
||||
'</injected_file_content_bytes>'
|
||||
'<volumes>30</volumes>'
|
||||
'<gigabytes>40</gigabytes>'
|
||||
'<ram>50</ram>'
|
||||
'<floating_ips>60</floating_ips>'
|
||||
'<instances>70</instances>'
|
||||
|
@ -136,16 +136,11 @@ class BootFromVolumeTest(test.TestCase):
|
||||
'/dev/vda')
|
||||
|
||||
|
||||
def return_volume(context, volume_id):
|
||||
return {'id': volume_id}
|
||||
|
||||
|
||||
class VolumeApiTest(test.TestCase):
|
||||
def setUp(self):
|
||||
super(VolumeApiTest, self).setUp()
|
||||
fakes.stub_out_networking(self.stubs)
|
||||
fakes.stub_out_rate_limiting(self.stubs)
|
||||
self.stubs.Set(db, 'volume_get', return_volume)
|
||||
|
||||
self.stubs.Set(cinder.API, "delete", fakes.stub_volume_delete)
|
||||
self.stubs.Set(cinder.API, "get", fakes.stub_volume_get)
|
||||
|
@ -120,8 +120,6 @@ class LimitsControllerTest(BaseLimitTestSuite):
|
||||
'ram': 512,
|
||||
'instances': 5,
|
||||
'cores': 21,
|
||||
'gigabytes': 512,
|
||||
'volumes': 5,
|
||||
'key_pairs': 10,
|
||||
'floating_ips': 10,
|
||||
'security_groups': 10,
|
||||
@ -170,8 +168,6 @@ class LimitsControllerTest(BaseLimitTestSuite):
|
||||
"maxTotalRAMSize": 512,
|
||||
"maxTotalInstances": 5,
|
||||
"maxTotalCores": 21,
|
||||
"maxTotalVolumeGigabytes": 512,
|
||||
"maxTotalVolumes": 5,
|
||||
"maxTotalKeypairs": 10,
|
||||
"maxTotalFloatingIps": 10,
|
||||
"maxSecurityGroups": 10,
|
||||
|
@ -10,8 +10,6 @@
|
||||
"maxTotalInstances": 10,
|
||||
"maxTotalKeypairs": 100,
|
||||
"maxTotalRAMSize": 51200,
|
||||
"maxTotalVolumeGigabytes": 1000,
|
||||
"maxTotalVolumes": 10,
|
||||
"maxSecurityGroups": 10,
|
||||
"maxSecurityGroupRules": 20
|
||||
},
|
||||
|
@ -21,11 +21,9 @@
|
||||
<limit name="maxPersonalitySize" value="10240"/>
|
||||
<limit name="maxSecurityGroupRules" value="20"/>
|
||||
<limit name="maxTotalKeypairs" value="100"/>
|
||||
<limit name="maxTotalVolumes" value="10"/>
|
||||
<limit name="maxSecurityGroups" value="10"/>
|
||||
<limit name="maxTotalCores" value="20"/>
|
||||
<limit name="maxTotalFloatingIps" value="10"/>
|
||||
<limit name="maxTotalVolumeGigabytes" value="1000"/>
|
||||
<limit name="maxTotalRAMSize" value="51200"/>
|
||||
</absolute>
|
||||
</limits>
|
||||
|
@ -12,15 +12,11 @@
|
||||
"maxTotalInstances": 10,
|
||||
"maxTotalKeypairs": 100,
|
||||
"maxTotalRAMSize": 51200,
|
||||
"maxTotalVolumeGigabytes": 1000,
|
||||
"maxTotalVolumes": 10,
|
||||
"totalCoresUsed": 0,
|
||||
"totalInstancesUsed": 0,
|
||||
"totalKeyPairsUsed": 0,
|
||||
"totalRAMUsed": 0,
|
||||
"totalSecurityGroupsUsed": 0,
|
||||
"totalVolumeGigabytesUsed": 0,
|
||||
"totalVolumesUsed": 0
|
||||
"totalSecurityGroupsUsed": 0
|
||||
},
|
||||
"rate": [
|
||||
{
|
||||
|
@ -19,20 +19,16 @@
|
||||
<limit name="maxPersonality" value="5"/>
|
||||
<limit name="maxImageMeta" value="128"/>
|
||||
<limit name="maxPersonalitySize" value="10240"/>
|
||||
<limit name="totalVolumesUsed" value="0"/>
|
||||
<limit name="maxSecurityGroupRules" value="20"/>
|
||||
<limit name="maxTotalKeypairs" value="100"/>
|
||||
<limit name="totalCoresUsed" value="0"/>
|
||||
<limit name="maxTotalVolumes" value="10"/>
|
||||
<limit name="totalRAMUsed" value="0"/>
|
||||
<limit name="totalInstancesUsed" value="0"/>
|
||||
<limit name="maxSecurityGroups" value="10"/>
|
||||
<limit name="totalVolumeGigabytesUsed" value="0"/>
|
||||
<limit name="maxTotalCores" value="20"/>
|
||||
<limit name="totalSecurityGroupsUsed" value="0"/>
|
||||
<limit name="maxTotalFloatingIps" value="10"/>
|
||||
<limit name="totalKeyPairsUsed" value="0"/>
|
||||
<limit name="maxTotalVolumeGigabytes" value="1000"/>
|
||||
<limit name="maxTotalRAMSize" value="51200"/>
|
||||
</absolute>
|
||||
</limits>
|
||||
|
@ -1077,187 +1077,3 @@ class InstanceDestroyConstraints(test.TestCase):
|
||||
ctx, instance['uuid'], constraint)
|
||||
instance = db.instance_get_by_uuid(ctx, instance['uuid'])
|
||||
self.assertFalse(instance['deleted'])
|
||||
|
||||
|
||||
def _get_sm_backend_params():
|
||||
config_params = ("name_label=testsmbackend "
|
||||
"server=localhost "
|
||||
"serverpath=/tmp/nfspath")
|
||||
params = dict(flavor_id=1,
|
||||
sr_uuid=None,
|
||||
sr_type='nfs',
|
||||
config_params=config_params)
|
||||
return params
|
||||
|
||||
|
||||
def _get_sm_flavor_params():
|
||||
params = dict(label="gold",
|
||||
description="automatic backups")
|
||||
return params
|
||||
|
||||
|
||||
class SMVolumeDBApiTestCase(test.TestCase):
|
||||
def setUp(self):
|
||||
super(SMVolumeDBApiTestCase, self).setUp()
|
||||
self.user_id = 'fake'
|
||||
self.project_id = 'fake'
|
||||
self.context = context.RequestContext(self.user_id, self.project_id)
|
||||
|
||||
def test_sm_backend_conf_create(self):
|
||||
params = _get_sm_backend_params()
|
||||
ctxt = context.get_admin_context()
|
||||
beconf = db.sm_backend_conf_create(ctxt,
|
||||
params)
|
||||
self.assertIsInstance(beconf['id'], int)
|
||||
|
||||
def test_sm_backend_conf_create_raise_duplicate(self):
|
||||
params = _get_sm_backend_params()
|
||||
ctxt = context.get_admin_context()
|
||||
beconf = db.sm_backend_conf_create(ctxt,
|
||||
params)
|
||||
self.assertIsInstance(beconf['id'], int)
|
||||
self.assertRaises(exception.Duplicate,
|
||||
db.sm_backend_conf_create,
|
||||
ctxt,
|
||||
params)
|
||||
|
||||
def test_sm_backend_conf_update(self):
|
||||
ctxt = context.get_admin_context()
|
||||
params = _get_sm_backend_params()
|
||||
beconf = db.sm_backend_conf_create(ctxt,
|
||||
params)
|
||||
beconf = db.sm_backend_conf_update(ctxt,
|
||||
beconf['id'],
|
||||
dict(sr_uuid="FA15E-1D"))
|
||||
self.assertEqual(beconf['sr_uuid'], "FA15E-1D")
|
||||
|
||||
def test_sm_backend_conf_update_raise_notfound(self):
|
||||
ctxt = context.get_admin_context()
|
||||
self.assertRaises(exception.NotFound,
|
||||
db.sm_backend_conf_update,
|
||||
ctxt,
|
||||
7,
|
||||
dict(sr_uuid="FA15E-1D"))
|
||||
|
||||
def test_sm_backend_conf_get(self):
|
||||
ctxt = context.get_admin_context()
|
||||
params = _get_sm_backend_params()
|
||||
beconf = db.sm_backend_conf_create(ctxt,
|
||||
params)
|
||||
val = db.sm_backend_conf_get(ctxt, beconf['id'])
|
||||
self.assertDictMatch(dict(val), dict(beconf))
|
||||
|
||||
def test_sm_backend_conf_get_raise_notfound(self):
|
||||
ctxt = context.get_admin_context()
|
||||
self.assertRaises(exception.NotFound,
|
||||
db.sm_backend_conf_get,
|
||||
ctxt,
|
||||
7)
|
||||
|
||||
def test_sm_backend_conf_get_by_sr(self):
|
||||
ctxt = context.get_admin_context()
|
||||
params = _get_sm_backend_params()
|
||||
beconf = db.sm_backend_conf_create(ctxt,
|
||||
params)
|
||||
val = db.sm_backend_conf_get_by_sr(ctxt, beconf['sr_uuid'])
|
||||
self.assertDictMatch(dict(val), dict(beconf))
|
||||
|
||||
def test_sm_backend_conf_get_by_sr_raise_notfound(self):
|
||||
ctxt = context.get_admin_context()
|
||||
self.assertRaises(exception.NotFound,
|
||||
db.sm_backend_conf_get_by_sr,
|
||||
ctxt,
|
||||
"FA15E-1D")
|
||||
|
||||
def test_sm_backend_conf_delete(self):
|
||||
ctxt = context.get_admin_context()
|
||||
params = _get_sm_backend_params()
|
||||
beconf = db.sm_backend_conf_create(ctxt,
|
||||
params)
|
||||
db.sm_backend_conf_delete(ctxt, beconf['id'])
|
||||
self.assertRaises(exception.NotFound,
|
||||
db.sm_backend_conf_get,
|
||||
ctxt,
|
||||
beconf['id'])
|
||||
|
||||
def test_sm_backend_conf_delete_nonexisting(self):
|
||||
ctxt = context.get_admin_context()
|
||||
db.sm_backend_conf_delete(ctxt, "FA15E-1D")
|
||||
|
||||
def test_sm_flavor_create(self):
|
||||
ctxt = context.get_admin_context()
|
||||
params = _get_sm_flavor_params()
|
||||
flav = db.sm_flavor_create(ctxt,
|
||||
params)
|
||||
self.assertIsInstance(flav['id'], int)
|
||||
|
||||
def sm_flavor_create_raise_duplicate(self):
|
||||
ctxt = context.get_admin_context()
|
||||
params = _get_sm_flavor_params()
|
||||
flav = db.sm_flavor_create(ctxt,
|
||||
params)
|
||||
self.assertRaises(exception.Duplicate,
|
||||
db.sm_flavor_create,
|
||||
params)
|
||||
|
||||
def test_sm_flavor_update(self):
|
||||
ctxt = context.get_admin_context()
|
||||
params = _get_sm_flavor_params()
|
||||
flav = db.sm_flavor_create(ctxt,
|
||||
params)
|
||||
newparms = dict(description="basic volumes")
|
||||
flav = db.sm_flavor_update(ctxt, flav['id'], newparms)
|
||||
self.assertEqual(flav['description'], "basic volumes")
|
||||
|
||||
def test_sm_flavor_update_raise_notfound(self):
|
||||
ctxt = context.get_admin_context()
|
||||
self.assertRaises(exception.NotFound,
|
||||
db.sm_flavor_update,
|
||||
ctxt,
|
||||
7,
|
||||
dict(description="fakedesc"))
|
||||
|
||||
def test_sm_flavor_delete(self):
|
||||
ctxt = context.get_admin_context()
|
||||
params = _get_sm_flavor_params()
|
||||
flav = db.sm_flavor_create(ctxt,
|
||||
params)
|
||||
db.sm_flavor_delete(ctxt, flav['id'])
|
||||
self.assertRaises(exception.NotFound,
|
||||
db.sm_flavor_get,
|
||||
ctxt,
|
||||
"gold")
|
||||
|
||||
def test_sm_flavor_delete_nonexisting(self):
|
||||
ctxt = context.get_admin_context()
|
||||
db.sm_flavor_delete(ctxt, 7)
|
||||
|
||||
def test_sm_flavor_get(self):
|
||||
ctxt = context.get_admin_context()
|
||||
params = _get_sm_flavor_params()
|
||||
flav = db.sm_flavor_create(ctxt,
|
||||
params)
|
||||
val = db.sm_flavor_get(ctxt, flav['id'])
|
||||
self.assertDictMatch(dict(val), dict(flav))
|
||||
|
||||
def test_sm_flavor_get_raise_notfound(self):
|
||||
ctxt = context.get_admin_context()
|
||||
self.assertRaises(exception.NotFound,
|
||||
db.sm_flavor_get,
|
||||
ctxt,
|
||||
7)
|
||||
|
||||
def test_sm_flavor_get_by_label(self):
|
||||
ctxt = context.get_admin_context()
|
||||
params = _get_sm_flavor_params()
|
||||
flav = db.sm_flavor_create(ctxt,
|
||||
params)
|
||||
val = db.sm_flavor_get_by_label(ctxt, flav['label'])
|
||||
self.assertDictMatch(dict(val), dict(flav))
|
||||
|
||||
def test_sm_flavor_get_by_label_raise_notfound(self):
|
||||
ctxt = context.get_admin_context()
|
||||
self.assertRaises(exception.NotFound,
|
||||
db.sm_flavor_get,
|
||||
ctxt,
|
||||
"fake")
|
||||
|
@ -2071,12 +2071,6 @@ class LibvirtConnTestCase(test.TestCase):
|
||||
instance_ref = db.instance_create(self.context, self.test_instance)
|
||||
instance_ref = db.instance_update(self.context, instance_ref['uuid'],
|
||||
instance_dict)
|
||||
vol_dict = {'status': 'migrating', 'size': 1}
|
||||
volume_ref = db.volume_create(self.context, vol_dict)
|
||||
db.volume_attached(self.context,
|
||||
volume_ref['id'],
|
||||
instance_ref['uuid'],
|
||||
'/dev/fake')
|
||||
|
||||
# Preparing mocks
|
||||
vdmock = self.mox.CreateMock(libvirt.virDomain)
|
||||
@ -2107,10 +2101,7 @@ class LibvirtConnTestCase(test.TestCase):
|
||||
instance_ref = db.instance_get(self.context, instance_ref['id'])
|
||||
self.assertTrue(instance_ref['vm_state'] == vm_states.ACTIVE)
|
||||
self.assertTrue(instance_ref['power_state'] == power_state.RUNNING)
|
||||
volume_ref = db.volume_get(self.context, volume_ref['id'])
|
||||
self.assertTrue(volume_ref['status'] == 'in-use')
|
||||
|
||||
db.volume_destroy(self.context, volume_ref['id'])
|
||||
db.instance_destroy(self.context, instance_ref['uuid'])
|
||||
|
||||
def test_pre_live_migration_works_correctly_mocked(self):
|
||||
|
@ -360,13 +360,13 @@ class ProjectCommandsTestCase(test.TestCase):
|
||||
output = StringIO.StringIO()
|
||||
sys.stdout = output
|
||||
self.commands.quota(project_id='admin',
|
||||
key='volumes',
|
||||
key='instances',
|
||||
value='unlimited',
|
||||
)
|
||||
|
||||
sys.stdout = sys.__stdout__
|
||||
result = output.getvalue()
|
||||
self.assertEquals(('volumes: unlimited' in result), True)
|
||||
self.assertEquals(('instances: unlimited' in result), True)
|
||||
|
||||
def test_quota_update_invalid_key(self):
|
||||
self.assertRaises(SystemExit,
|
||||
|
@ -32,7 +32,6 @@ from nova import quota
|
||||
from nova.scheduler import driver as scheduler_driver
|
||||
from nova import test
|
||||
import nova.tests.image.fake
|
||||
from nova import volume
|
||||
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
@ -45,8 +44,6 @@ class QuotaIntegrationTestCase(test.TestCase):
|
||||
self.flags(compute_driver='nova.virt.fake.FakeDriver',
|
||||
quota_instances=2,
|
||||
quota_cores=4,
|
||||
quota_volumes=2,
|
||||
quota_gigabytes=20,
|
||||
quota_floating_ips=1,
|
||||
network_manager='nova.network.manager.FlatDHCPManager')
|
||||
|
||||
@ -91,14 +88,6 @@ class QuotaIntegrationTestCase(test.TestCase):
|
||||
inst['vcpus'] = cores
|
||||
return db.instance_create(self.context, inst)
|
||||
|
||||
def _create_volume(self, size=10):
|
||||
"""Create a test volume"""
|
||||
vol = {}
|
||||
vol['user_id'] = self.user_id
|
||||
vol['project_id'] = self.project_id
|
||||
vol['size'] = size
|
||||
return db.volume_create(self.context, vol)['id']
|
||||
|
||||
def test_too_many_instances(self):
|
||||
instance_uuids = []
|
||||
for i in range(FLAGS.quota_instances):
|
||||
@ -742,8 +731,6 @@ class DbQuotaDriverTestCase(test.TestCase):
|
||||
instances=10,
|
||||
cores=20,
|
||||
ram=50 * 1024,
|
||||
volumes=10,
|
||||
gigabytes=1000,
|
||||
floating_ips=10,
|
||||
metadata_items=128,
|
||||
injected_files=5,
|
||||
@ -762,7 +749,6 @@ class DbQuotaDriverTestCase(test.TestCase):
|
||||
return dict(
|
||||
instances=5,
|
||||
ram=25 * 1024,
|
||||
gigabytes=500,
|
||||
metadata_items=64,
|
||||
injected_file_content_bytes=5 * 1024,
|
||||
)
|
||||
@ -778,8 +764,6 @@ class DbQuotaDriverTestCase(test.TestCase):
|
||||
instances=5,
|
||||
cores=20,
|
||||
ram=25 * 1024,
|
||||
volumes=10,
|
||||
gigabytes=500,
|
||||
floating_ips=10,
|
||||
metadata_items=64,
|
||||
injected_files=5,
|
||||
@ -799,7 +783,6 @@ class DbQuotaDriverTestCase(test.TestCase):
|
||||
self.assertEqual(result, dict(
|
||||
instances=5,
|
||||
ram=25 * 1024,
|
||||
gigabytes=500,
|
||||
metadata_items=64,
|
||||
injected_file_content_bytes=5 * 1024,
|
||||
))
|
||||
@ -810,7 +793,6 @@ class DbQuotaDriverTestCase(test.TestCase):
|
||||
self.assertEqual(project_id, 'test_project')
|
||||
return dict(
|
||||
cores=10,
|
||||
gigabytes=50,
|
||||
injected_files=2,
|
||||
injected_file_path_bytes=127,
|
||||
)
|
||||
@ -822,8 +804,6 @@ class DbQuotaDriverTestCase(test.TestCase):
|
||||
instances=dict(in_use=2, reserved=2),
|
||||
cores=dict(in_use=4, reserved=4),
|
||||
ram=dict(in_use=10 * 1024, reserved=0),
|
||||
volumes=dict(in_use=2, reserved=0),
|
||||
gigabytes=dict(in_use=10, reserved=0),
|
||||
floating_ips=dict(in_use=2, reserved=0),
|
||||
metadata_items=dict(in_use=0, reserved=0),
|
||||
injected_files=dict(in_use=0, reserved=0),
|
||||
@ -863,17 +843,7 @@ class DbQuotaDriverTestCase(test.TestCase):
|
||||
in_use=10 * 1024,
|
||||
reserved=0,
|
||||
),
|
||||
volumes=dict(
|
||||
limit=10,
|
||||
in_use=2,
|
||||
reserved=0,
|
||||
),
|
||||
gigabytes=dict(
|
||||
limit=50,
|
||||
in_use=10,
|
||||
reserved=0,
|
||||
),
|
||||
floating_ips=dict(
|
||||
floating_ips=dict(
|
||||
limit=10,
|
||||
in_use=2,
|
||||
reserved=0,
|
||||
@ -941,17 +911,7 @@ class DbQuotaDriverTestCase(test.TestCase):
|
||||
in_use=10 * 1024,
|
||||
reserved=0,
|
||||
),
|
||||
volumes=dict(
|
||||
limit=10,
|
||||
in_use=2,
|
||||
reserved=0,
|
||||
),
|
||||
gigabytes=dict(
|
||||
limit=50,
|
||||
in_use=10,
|
||||
reserved=0,
|
||||
),
|
||||
floating_ips=dict(
|
||||
floating_ips=dict(
|
||||
limit=10,
|
||||
in_use=2,
|
||||
reserved=0,
|
||||
@ -1020,16 +980,6 @@ class DbQuotaDriverTestCase(test.TestCase):
|
||||
in_use=10 * 1024,
|
||||
reserved=0,
|
||||
),
|
||||
volumes=dict(
|
||||
limit=10,
|
||||
in_use=2,
|
||||
reserved=0,
|
||||
),
|
||||
gigabytes=dict(
|
||||
limit=50,
|
||||
in_use=10,
|
||||
reserved=0,
|
||||
),
|
||||
floating_ips=dict(
|
||||
limit=10,
|
||||
in_use=2,
|
||||
@ -1089,12 +1039,7 @@ class DbQuotaDriverTestCase(test.TestCase):
|
||||
in_use=4,
|
||||
reserved=4,
|
||||
),
|
||||
gigabytes=dict(
|
||||
limit=50,
|
||||
in_use=10,
|
||||
reserved=0,
|
||||
),
|
||||
injected_files=dict(
|
||||
injected_files=dict(
|
||||
limit=2,
|
||||
in_use=0,
|
||||
reserved=0,
|
||||
@ -1126,12 +1071,6 @@ class DbQuotaDriverTestCase(test.TestCase):
|
||||
ram=dict(
|
||||
limit=25 * 1024,
|
||||
),
|
||||
volumes=dict(
|
||||
limit=10,
|
||||
),
|
||||
gigabytes=dict(
|
||||
limit=50,
|
||||
),
|
||||
floating_ips=dict(
|
||||
limit=10,
|
||||
),
|
||||
@ -1207,7 +1146,6 @@ class DbQuotaDriverTestCase(test.TestCase):
|
||||
'test_class'),
|
||||
quota.QUOTAS._resources,
|
||||
['instances', 'cores', 'ram',
|
||||
'volumes', 'gigabytes',
|
||||
'floating_ips', 'security_groups'],
|
||||
True)
|
||||
|
||||
@ -1216,8 +1154,6 @@ class DbQuotaDriverTestCase(test.TestCase):
|
||||
instances=10,
|
||||
cores=20,
|
||||
ram=50 * 1024,
|
||||
volumes=10,
|
||||
gigabytes=1000,
|
||||
floating_ips=10,
|
||||
security_groups=10,
|
||||
))
|
||||
|
@ -235,7 +235,6 @@ class XenAPIVolumeTestCase(stubs.XenAPITestBase):
|
||||
"""This shows how to test Ops classes' methods."""
|
||||
stubs.stubout_session(self.stubs, stubs.FakeSessionForVolumeTests)
|
||||
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
|
||||
volume = self._create_volume()
|
||||
instance = db.instance_create(self.context, self.instance_values)
|
||||
vm = xenapi_fake.create_vm(instance.name, 'Running')
|
||||
result = conn.attach_volume(self._make_connection_info(),
|
||||
@ -253,7 +252,6 @@ class XenAPIVolumeTestCase(stubs.XenAPITestBase):
|
||||
stubs.stubout_session(self.stubs,
|
||||
stubs.FakeSessionForVolumeFailedTests)
|
||||
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
|
||||
volume = self._create_volume()
|
||||
instance = db.instance_create(self.context, self.instance_values)
|
||||
xenapi_fake.create_vm(instance.name, 'Running')
|
||||
self.assertRaises(exception.VolumeDriverNotFound,
|
||||
|
Loading…
Reference in New Issue
Block a user