Remove instance Foreign Key in volumes table, replace with instance_uuid
* Remove the instance relationship and instance_id FK * Add instance_uuuid column to volumes table * Passed unit tests and devstack tests Change-Id: Id598f1f1d7915d1af6bf3dd75e5819dce08aaa0f
This commit is contained in:
parent
d9ed812220
commit
dcad314fb9
|
@ -311,7 +311,7 @@
|
|||
###### (StrOpt) Template string to be used to generate snapshot names
|
||||
# snapshot_name_template="snapshot-%08x"
|
||||
###### (StrOpt) Template string to be used to generate instance names
|
||||
# volume_name_template="volume-%08x"
|
||||
# volume_name_template="volume-%s"
|
||||
|
||||
######### defined in nova.crypto #########
|
||||
|
||||
|
|
|
@ -197,6 +197,10 @@ def get_snapshot_uuid_from_int_id(context, int_id):
|
|||
return db.get_snapshot_uuid_by_ec2_id(context, int_id)
|
||||
|
||||
|
||||
def ec2_instance_id_to_uuid(context, ec2_id):
|
||||
int_id = ec2_id_to_id(ec2_id)
|
||||
return db.instance_get(context, int_id)['uuid']
|
||||
|
||||
_c2u = re.compile('(((?<=[a-z])[A-Z])|([A-Z](?![A-Z]|$)))')
|
||||
|
||||
|
||||
|
|
|
@ -371,10 +371,9 @@ class VolumeAttachmentController(object):
|
|||
except exception.NotFound:
|
||||
raise exc.HTTPNotFound()
|
||||
|
||||
instance = vol['instance']
|
||||
if instance is None or str(instance['uuid']) != server_id:
|
||||
LOG.debug("Instance not found (server_id=%(server_id)s)",
|
||||
{'server_id': server_id}, instance=instance)
|
||||
instance_uuid = vol['instance_uuid']
|
||||
if instance_uuid is None or instance_uuid != server_id:
|
||||
LOG.debug(_("Instance %s is not attached."), instance_uuid)
|
||||
raise exc.HTTPNotFound()
|
||||
|
||||
self.compute_api.detach_volume(context,
|
||||
|
|
|
@ -48,15 +48,13 @@ def _translate_attachment_summary_view(_context, vol):
|
|||
"""Maps keys for attachment summary view."""
|
||||
d = {}
|
||||
|
||||
# TODO(bcwaldon): remove str cast once we use uuids
|
||||
volume_id = str(vol['id'])
|
||||
volume_id = vol['id']
|
||||
|
||||
# NOTE(justinsb): We use the volume id as the id of the attachment object
|
||||
d['id'] = volume_id
|
||||
|
||||
d['volume_id'] = volume_id
|
||||
if vol.get('instance'):
|
||||
d['server_id'] = vol['instance']['uuid']
|
||||
d['server_id'] = vol['instance_uuid']
|
||||
if vol.get('mountpoint'):
|
||||
d['device'] = vol['mountpoint']
|
||||
|
||||
|
@ -77,8 +75,7 @@ def _translate_volume_summary_view(context, vol):
|
|||
"""Maps keys for volumes summary view."""
|
||||
d = {}
|
||||
|
||||
# TODO(bcwaldon): remove str cast once we use uuids
|
||||
d['id'] = str(vol['id'])
|
||||
d['id'] = vol['id']
|
||||
d['status'] = vol['status']
|
||||
d['size'] = vol['size']
|
||||
d['availability_zone'] = vol['availability_zone']
|
||||
|
@ -99,9 +96,6 @@ def _translate_volume_summary_view(context, vol):
|
|||
d['volume_type'] = str(vol['volume_type_id'])
|
||||
|
||||
d['snapshot_id'] = vol['snapshot_id']
|
||||
# TODO(bcwaldon): remove str cast once we use uuids
|
||||
if d['snapshot_id'] is not None:
|
||||
d['snapshot_id'] = str(d['snapshot_id'])
|
||||
|
||||
LOG.audit(_("vol=%s"), vol, context=context)
|
||||
|
||||
|
|
|
@ -1654,7 +1654,10 @@ class API(BaseAPI):
|
|||
# the volume ID via volume API and pass it and the volume object here
|
||||
def detach_volume(self, context, volume_id):
|
||||
"""Detach a volume from an instance."""
|
||||
instance = self.db.volume_get_instance(context.elevated(), volume_id)
|
||||
volume = self.volume_api.get(context, volume_id)
|
||||
instance_uuid = volume['instance_uuid']
|
||||
instance = self.db.instance_get_by_uuid(context.elevated(),
|
||||
instance_uuid)
|
||||
if not instance:
|
||||
raise exception.VolumeUnattached(volume_id=volume_id)
|
||||
|
||||
|
|
|
@ -67,7 +67,6 @@ from nova.openstack.common import importutils
|
|||
from nova import rpc
|
||||
from nova import utils
|
||||
from nova.virt import driver
|
||||
from nova import vnc
|
||||
from nova import volume
|
||||
|
||||
|
||||
|
@ -261,7 +260,6 @@ class ComputeManager(manager.SchedulerDependentManager):
|
|||
context = nova.context.get_admin_context()
|
||||
instances = self.db.instance_get_all_by_host(context, self.host)
|
||||
for instance in instances:
|
||||
instance_uuid = instance['uuid']
|
||||
db_state = instance['power_state']
|
||||
drv_state = self._get_power_state(context, instance)
|
||||
|
||||
|
@ -1725,7 +1723,7 @@ class ComputeManager(manager.SchedulerDependentManager):
|
|||
connection_info = self.volume_api.initialize_connection(context,
|
||||
volume,
|
||||
connector)
|
||||
self.volume_api.attach(context, volume, instance_id, mountpoint)
|
||||
self.volume_api.attach(context, volume, instance_uuid, mountpoint)
|
||||
return connection_info
|
||||
|
||||
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
|
||||
|
@ -1764,7 +1762,10 @@ class ComputeManager(manager.SchedulerDependentManager):
|
|||
volume,
|
||||
connector)
|
||||
|
||||
self.volume_api.attach(context, volume, instance_ref['id'], mountpoint)
|
||||
self.volume_api.attach(context,
|
||||
volume,
|
||||
instance_ref['uuid'],
|
||||
mountpoint)
|
||||
values = {
|
||||
'instance_uuid': instance_ref['uuid'],
|
||||
'connection_info': utils.dumps(connection_info),
|
||||
|
|
|
@ -986,9 +986,9 @@ def volume_get_all_by_host(context, host):
|
|||
return IMPL.volume_get_all_by_host(context, host)
|
||||
|
||||
|
||||
def volume_get_all_by_instance(context, instance_id):
|
||||
def volume_get_all_by_instance_uuid(context, instance_uuid):
|
||||
"""Get all volumes belonging to a instance."""
|
||||
return IMPL.volume_get_all_by_instance(context, instance_id)
|
||||
return IMPL.volume_get_all_by_instance_uuid(context, instance_uuid)
|
||||
|
||||
|
||||
def volume_get_all_by_project(context, project_id):
|
||||
|
@ -1001,11 +1001,6 @@ def volume_get_by_ec2_id(context, ec2_id):
|
|||
return IMPL.volume_get_by_ec2_id(context, ec2_id)
|
||||
|
||||
|
||||
def volume_get_instance(context, volume_id):
|
||||
"""Get the instance that a volume is attached to."""
|
||||
return IMPL.volume_get_instance(context, volume_id)
|
||||
|
||||
|
||||
def volume_get_iscsi_target_num(context, volume_id):
|
||||
"""Get the target num (tid) allocated to the volume."""
|
||||
return IMPL.volume_get_iscsi_target_num(context, volume_id)
|
||||
|
|
|
@ -1374,7 +1374,6 @@ def _build_instance_get(context, session=None):
|
|||
project_only=True).\
|
||||
options(joinedload_all('security_groups.rules')).\
|
||||
options(joinedload('info_cache')).\
|
||||
options(joinedload('volumes')).\
|
||||
options(joinedload('metadata')).\
|
||||
options(joinedload('instance_type'))
|
||||
|
||||
|
@ -2402,15 +2401,17 @@ def volume_allocate_iscsi_target(context, volume_id, host):
|
|||
|
||||
|
||||
@require_admin_context
|
||||
def volume_attached(context, volume_id, instance_id, mountpoint):
|
||||
def volume_attached(context, volume_id, instance_uuid, mountpoint):
|
||||
if not utils.is_uuid_like(instance_uuid):
|
||||
raise exception.InvalidUUID(instance_uuid)
|
||||
|
||||
session = get_session()
|
||||
with session.begin():
|
||||
volume_ref = volume_get(context, volume_id, session=session)
|
||||
volume_ref['status'] = 'in-use'
|
||||
volume_ref['mountpoint'] = mountpoint
|
||||
volume_ref['attach_status'] = 'attached'
|
||||
volume_ref.instance = instance_get(context, instance_id,
|
||||
session=session)
|
||||
volume_ref['instance_uuid'] = instance_uuid
|
||||
volume_ref.save(session=session)
|
||||
|
||||
|
||||
|
@ -2471,7 +2472,7 @@ def volume_detached(context, volume_id):
|
|||
volume_ref['status'] = 'available'
|
||||
volume_ref['mountpoint'] = None
|
||||
volume_ref['attach_status'] = 'detached'
|
||||
volume_ref.instance = None
|
||||
volume_ref['instance_uuid'] = None
|
||||
volume_ref.save(session=session)
|
||||
|
||||
|
||||
|
@ -2479,9 +2480,8 @@ def volume_detached(context, volume_id):
|
|||
def _volume_get_query(context, session=None, project_only=False):
|
||||
return model_query(context, models.Volume, session=session,
|
||||
project_only=project_only).\
|
||||
options(joinedload('instance')).\
|
||||
options(joinedload('volume_metadata')).\
|
||||
options(joinedload('volume_type'))
|
||||
options(joinedload('volume_metadata')).\
|
||||
options(joinedload('volume_type'))
|
||||
|
||||
|
||||
@require_context
|
||||
|
@ -2519,15 +2519,15 @@ def volume_get_all_by_host(context, host):
|
|||
|
||||
|
||||
@require_admin_context
|
||||
def volume_get_all_by_instance(context, instance_id):
|
||||
def volume_get_all_by_instance_uuid(context, instance_uuid):
|
||||
result = model_query(context, models.Volume, read_deleted="no").\
|
||||
options(joinedload('volume_metadata')).\
|
||||
options(joinedload('volume_type')).\
|
||||
filter_by(instance_id=instance_id).\
|
||||
filter_by(instance_uuid=instance_uuid).\
|
||||
all()
|
||||
|
||||
if not result:
|
||||
raise exception.VolumeNotFoundForInstance(instance_id=instance_id)
|
||||
return []
|
||||
|
||||
return result
|
||||
|
||||
|
@ -2538,16 +2538,6 @@ def volume_get_all_by_project(context, project_id):
|
|||
return _volume_get_query(context).filter_by(project_id=project_id).all()
|
||||
|
||||
|
||||
@require_admin_context
|
||||
def volume_get_instance(context, volume_id):
|
||||
result = _volume_get_query(context).filter_by(id=volume_id).first()
|
||||
|
||||
if not result:
|
||||
raise exception.VolumeNotFound(volume_id=volume_id)
|
||||
|
||||
return result.instance
|
||||
|
||||
|
||||
@require_admin_context
|
||||
def volume_get_iscsi_target_num(context, volume_id):
|
||||
result = model_query(context, models.IscsiTarget, read_deleted="yes").\
|
||||
|
|
|
@ -0,0 +1,94 @@
|
|||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2012 OpenStack LLC.
|
||||
# Copyright 2012 SolidFire Inc
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from sqlalchemy import select, Column
|
||||
from sqlalchemy import MetaData, Integer, String, Table
|
||||
from migrate import ForeignKeyConstraint
|
||||
|
||||
from nova import log as logging
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def upgrade(migrate_engine):
|
||||
meta = MetaData()
|
||||
meta.bind = migrate_engine
|
||||
|
||||
instances = Table('instances', meta, autoload=True)
|
||||
volumes = Table('volumes', meta, autoload=True)
|
||||
instance_uuid_column = Column('instance_uuid', String(36))
|
||||
|
||||
instance_uuid_column.create(volumes)
|
||||
try:
|
||||
volumes.update().values(
|
||||
instance_uuid=select(
|
||||
[instances.c.uuid],
|
||||
instances.c.id == volumes.c.instance_id)
|
||||
).execute()
|
||||
except Exception:
|
||||
instance_uuid_column.drop()
|
||||
|
||||
fkeys = list(volumes.c.instance_id.foreign_keys)
|
||||
if fkeys:
|
||||
try:
|
||||
fk_name = fkeys[0].constraint.name
|
||||
ForeignKeyConstraint(
|
||||
columns=[volumes.c.instance_id],
|
||||
refcolumns=[instances.c.id],
|
||||
name=fk_name).drop()
|
||||
|
||||
except Exception:
|
||||
LOG.error(_("foreign key could not be dropped"))
|
||||
raise
|
||||
|
||||
volumes.c.instance_id.drop()
|
||||
|
||||
|
||||
def downgrade(migrate_engine):
|
||||
meta = MetaData()
|
||||
meta.bind = migrate_engine
|
||||
|
||||
instances = Table('instances', meta, autoload=True)
|
||||
volumes = Table('volumes', meta, autoload=True)
|
||||
instance_id_column = Column('instance_id', Integer)
|
||||
|
||||
instance_id_column.create(volumes)
|
||||
try:
|
||||
volumes.update().values(
|
||||
instance_id=select(
|
||||
[instances.c.id],
|
||||
instances.c.uuid == volumes.c.instance_uuid)
|
||||
).execute()
|
||||
except Exception:
|
||||
instance_id_column.drop()
|
||||
|
||||
fkeys = list(volumes.c.instance_id.foreign_keys)
|
||||
if fkeys:
|
||||
try:
|
||||
fk_name = fkeys[0].constraint.name
|
||||
ForeignKeyConstraint(
|
||||
columns=[volumes.c.instance_id],
|
||||
refcolumns=[instances.c.id],
|
||||
name=fk_name).create()
|
||||
|
||||
except Exception:
|
||||
LOG.error(_("foreign key could not be created"))
|
||||
raise
|
||||
|
||||
volumes.c.instance_uuid.drop()
|
|
@ -0,0 +1,133 @@
|
|||
BEGIN TRANSACTION;
|
||||
-- change instance_id volumes table
|
||||
CREATE TABLE volumes_backup(
|
||||
created_at DATETIME,
|
||||
updated_at DATETIME,
|
||||
deleted_at DATETIME,
|
||||
deleted BOOLEAN,
|
||||
id VARCHAR(36) NOT NULL,
|
||||
ec2_id INTEGER,
|
||||
user_id VARCHAR(255),
|
||||
project_id VARCHAR(255),
|
||||
snapshot_id VARCHAR(36),
|
||||
host VARCHAR(255),
|
||||
size INTEGER,
|
||||
availability_zone VARCHAR(255),
|
||||
instance_id INTEGER,
|
||||
instance_uuid VARCHAR(36),
|
||||
mountpoint VARCHAR(255),
|
||||
attach_time VARCHAR(255),
|
||||
status VARCHAR(255),
|
||||
attach_status VARCHAR(255),
|
||||
scheduled_at DATETIME,
|
||||
launched_at DATETIME,
|
||||
terminated_at DATETIME,
|
||||
display_name VARCHAR(255),
|
||||
display_description VARCHAR(255),
|
||||
provider_location VARCHAR(255),
|
||||
provider_auth VARCHAR(255),
|
||||
volume_type_id INTEGER,
|
||||
PRIMARY KEY (id),
|
||||
FOREIGN KEY(instance_id) REFERENCES instances (id),
|
||||
UNIQUE (id),
|
||||
CHECK (deleted IN (0, 1))
|
||||
);
|
||||
|
||||
INSERT INTO volumes_backup SELECT
|
||||
created_at,
|
||||
updated_at,
|
||||
deleted_at,
|
||||
deleted,
|
||||
id,
|
||||
ec2_id,
|
||||
user_id,
|
||||
project_id,
|
||||
snapshot_id,
|
||||
host,
|
||||
size,
|
||||
availability_zone,
|
||||
NULL,
|
||||
instance_uuid,
|
||||
mountpoint,
|
||||
attach_time,
|
||||
status,
|
||||
attach_status,
|
||||
scheduled_at,
|
||||
launched_at,
|
||||
terminated_at,
|
||||
display_name,
|
||||
display_description,
|
||||
provider_location,
|
||||
provider_auth,
|
||||
volume_type_id
|
||||
FROM volumes;
|
||||
|
||||
UPDATE volumes_backup
|
||||
SET instance_id =
|
||||
(SELECT id
|
||||
FROM instances
|
||||
WHERE volumes_backup.instance_uuid = instances.uuid
|
||||
);
|
||||
DROP TABLE volumes;
|
||||
|
||||
CREATE TABLE volumes(
|
||||
created_at DATETIME,
|
||||
updated_at DATETIME,
|
||||
deleted_at DATETIME,
|
||||
deleted BOOLEAN,
|
||||
id VARCHAR(36) NOT NULL,
|
||||
ec2_id INTEGER,
|
||||
user_id VARCHAR(255),
|
||||
project_id VARCHAR(255),
|
||||
snapshot_id VARCHAR(36),
|
||||
host VARCHAR(255),
|
||||
size INTEGER,
|
||||
availability_zone VARCHAR(255),
|
||||
instance_id INTEGER,
|
||||
mountpoint VARCHAR(255),
|
||||
attach_time VARCHAR(255),
|
||||
status VARCHAR(255),
|
||||
attach_status VARCHAR(255),
|
||||
scheduled_at DATETIME,
|
||||
launched_at DATETIME,
|
||||
terminated_at DATETIME,
|
||||
display_name VARCHAR(255),
|
||||
display_description VARCHAR(255),
|
||||
provider_location VARCHAR(255),
|
||||
provider_auth VARCHAR(255),
|
||||
volume_type_id INTEGER,
|
||||
PRIMARY KEY (id),
|
||||
FOREIGN KEY (instance_id) REFERENCES instances (id),
|
||||
UNIQUE (id),
|
||||
CHECK (deleted IN (0, 1))
|
||||
);
|
||||
|
||||
INSERT INTO volumes
|
||||
SELECT created_at,
|
||||
updated_at,
|
||||
deleted_at,
|
||||
deleted,
|
||||
id,
|
||||
ec2_id,
|
||||
user_id,
|
||||
project_id,
|
||||
snapshot_id,
|
||||
host,
|
||||
size,
|
||||
availability_zone,
|
||||
instance_id,
|
||||
mountpoint,
|
||||
attach_time,
|
||||
status,
|
||||
attach_status,
|
||||
scheduled_at,
|
||||
launched_at,
|
||||
terminated_at,
|
||||
display_name,
|
||||
display_description,
|
||||
provider_location,
|
||||
provider_auth,
|
||||
volume_type_id
|
||||
FROM volumes_backup;
|
||||
DROP TABLE volumes_backup;
|
||||
COMMIT;
|
|
@ -0,0 +1,132 @@
|
|||
BEGIN TRANSACTION;
|
||||
-- change instance_id volumes table
|
||||
CREATE TABLE volumes_backup(
|
||||
created_at DATETIME,
|
||||
updated_at DATETIME,
|
||||
deleted_at DATETIME,
|
||||
deleted BOOLEAN,
|
||||
id VARCHAR(36) NOT NULL,
|
||||
ec2_id INTEGER,
|
||||
user_id VARCHAR(255),
|
||||
project_id VARCHAR(255),
|
||||
snapshot_id VARCHAR(36),
|
||||
host VARCHAR(255),
|
||||
size INTEGER,
|
||||
availability_zone VARCHAR(255),
|
||||
instance_id INTEGER,
|
||||
instance_uuid VARCHAR(36),
|
||||
mountpoint VARCHAR(255),
|
||||
attach_time VARCHAR(255),
|
||||
status VARCHAR(255),
|
||||
attach_status VARCHAR(255),
|
||||
scheduled_at DATETIME,
|
||||
launched_at DATETIME,
|
||||
terminated_at DATETIME,
|
||||
display_name VARCHAR(255),
|
||||
display_description VARCHAR(255),
|
||||
provider_location VARCHAR(255),
|
||||
provider_auth VARCHAR(255),
|
||||
volume_type_id INTEGER,
|
||||
PRIMARY KEY (id),
|
||||
FOREIGN KEY(instance_id) REFERENCES instances (id),
|
||||
UNIQUE (id),
|
||||
CHECK (deleted IN (0, 1))
|
||||
);
|
||||
|
||||
INSERT INTO volumes_backup SELECT
|
||||
created_at,
|
||||
updated_at,
|
||||
deleted_at,
|
||||
deleted,
|
||||
id,
|
||||
ec2_id,
|
||||
user_id,
|
||||
project_id,
|
||||
snapshot_id,
|
||||
host,
|
||||
size,
|
||||
availability_zone,
|
||||
instance_id,
|
||||
NULL,
|
||||
mountpoint,
|
||||
attach_time,
|
||||
status,
|
||||
attach_status,
|
||||
scheduled_at,
|
||||
launched_at,
|
||||
terminated_at,
|
||||
display_name,
|
||||
display_description,
|
||||
provider_location,
|
||||
provider_auth,
|
||||
volume_type_id
|
||||
FROM volumes;
|
||||
|
||||
UPDATE volumes_backup
|
||||
SET instance_uuid =
|
||||
(SELECT uuid
|
||||
FROM instances
|
||||
WHERE volumes_backup.instance_id = instances.id
|
||||
);
|
||||
DROP TABLE volumes;
|
||||
|
||||
CREATE TABLE volumes(
|
||||
created_at DATETIME,
|
||||
updated_at DATETIME,
|
||||
deleted_at DATETIME,
|
||||
deleted BOOLEAN,
|
||||
id VARCHAR(36) NOT NULL,
|
||||
ec2_id INTEGER,
|
||||
user_id VARCHAR(255),
|
||||
project_id VARCHAR(255),
|
||||
snapshot_id VARCHAR(36),
|
||||
host VARCHAR(255),
|
||||
size INTEGER,
|
||||
availability_zone VARCHAR(255),
|
||||
instance_uuid VARCHAR(36),
|
||||
mountpoint VARCHAR(255),
|
||||
attach_time VARCHAR(255),
|
||||
status VARCHAR(255),
|
||||
attach_status VARCHAR(255),
|
||||
scheduled_at DATETIME,
|
||||
launched_at DATETIME,
|
||||
terminated_at DATETIME,
|
||||
display_name VARCHAR(255),
|
||||
display_description VARCHAR(255),
|
||||
provider_location VARCHAR(255),
|
||||
provider_auth VARCHAR(255),
|
||||
volume_type_id INTEGER,
|
||||
PRIMARY KEY (id),
|
||||
UNIQUE (id),
|
||||
CHECK (deleted IN (0, 1))
|
||||
);
|
||||
|
||||
INSERT INTO volumes
|
||||
SELECT created_at,
|
||||
updated_at,
|
||||
deleted_at,
|
||||
deleted,
|
||||
id,
|
||||
ec2_id,
|
||||
user_id,
|
||||
project_id,
|
||||
snapshot_id,
|
||||
host,
|
||||
size,
|
||||
availability_zone,
|
||||
instance_uuid,
|
||||
mountpoint,
|
||||
attach_time,
|
||||
status,
|
||||
attach_status,
|
||||
scheduled_at,
|
||||
launched_at,
|
||||
terminated_at,
|
||||
display_name,
|
||||
display_description,
|
||||
provider_location,
|
||||
provider_auth,
|
||||
volume_type_id
|
||||
FROM volumes_backup;
|
||||
DROP TABLE volumes_backup;
|
||||
COMMIT;
|
|
@ -341,12 +341,7 @@ class Volume(BASE, NovaBase):
|
|||
host = Column(String(255)) # , ForeignKey('hosts.id'))
|
||||
size = Column(Integer)
|
||||
availability_zone = Column(String(255)) # TODO(vish): foreign key?
|
||||
instance_id = Column(Integer, ForeignKey('instances.id'), nullable=True)
|
||||
instance = relationship(Instance,
|
||||
backref=backref('volumes'),
|
||||
foreign_keys=instance_id,
|
||||
primaryjoin='and_(Volume.instance_id==Instance.id,'
|
||||
'Volume.deleted==False)')
|
||||
instance_uuid = Column(String(36))
|
||||
mountpoint = Column(String(255))
|
||||
attach_time = Column(String(255)) # TODO(vish): datetime
|
||||
status = Column(String(255)) # TODO(vish): enum?
|
||||
|
|
|
@ -26,7 +26,6 @@ SHOULD include dedicated exception logging.
|
|||
|
||||
import functools
|
||||
import itertools
|
||||
import sys
|
||||
|
||||
import webob.exc
|
||||
|
||||
|
@ -420,6 +419,10 @@ class InvalidEc2Id(Invalid):
|
|||
message = _("Ec2 id %(ec2_id)s is unacceptable.")
|
||||
|
||||
|
||||
class InvalidUUID(Invalid):
|
||||
message = _("Expected a uuid but received %(uuid).")
|
||||
|
||||
|
||||
class NotFound(NovaException):
|
||||
message = _("Resource could not be found.")
|
||||
code = 404
|
||||
|
|
|
@ -917,7 +917,7 @@ class CloudTestCase(test.TestCase):
|
|||
result = self.cloud.describe_instances(self.context)
|
||||
self.assertEqual(len(result['reservationSet']), 2)
|
||||
|
||||
def _block_device_mapping_create(self, instance_id, mappings):
|
||||
def _block_device_mapping_create(self, instance_uuid, mappings):
|
||||
volumes = []
|
||||
for bdm in mappings:
|
||||
db.block_device_mapping_create(self.context, bdm)
|
||||
|
@ -931,7 +931,7 @@ class CloudTestCase(test.TestCase):
|
|||
values[vol_key] = bdm[bdm_key]
|
||||
vol = db.volume_create(self.context, values)
|
||||
db.volume_attached(self.context, vol['id'],
|
||||
instance_id, bdm['device_name'])
|
||||
instance_uuid, bdm['device_name'])
|
||||
volumes.append(vol)
|
||||
return volumes
|
||||
|
||||
|
@ -987,7 +987,7 @@ class CloudTestCase(test.TestCase):
|
|||
'device_name': '/dev/sdb9',
|
||||
'virtual_name': 'ephemeral3'}]
|
||||
|
||||
volumes = self._block_device_mapping_create(instance_id, mappings0)
|
||||
volumes = self._block_device_mapping_create(instance_uuid, mappings0)
|
||||
return (inst1, inst2, volumes)
|
||||
|
||||
def _tearDownBlockDeviceMapping(self, inst1, inst2, volumes):
|
||||
|
@ -1910,14 +1910,14 @@ class CloudTestCase(test.TestCase):
|
|||
kwargs['id'] = volume_id
|
||||
return db.volume_create(self.context, kwargs)
|
||||
|
||||
def _assert_volume_attached(self, vol, instance_id, mountpoint):
|
||||
self.assertEqual(vol['instance_id'], instance_id)
|
||||
def _assert_volume_attached(self, vol, instance_uuid, mountpoint):
|
||||
self.assertEqual(vol['instance_uuid'], instance_uuid)
|
||||
self.assertEqual(vol['mountpoint'], mountpoint)
|
||||
self.assertEqual(vol['status'], "in-use")
|
||||
self.assertEqual(vol['attach_status'], "attached")
|
||||
|
||||
def _assert_volume_detached(self, vol):
|
||||
self.assertEqual(vol['instance_id'], None)
|
||||
self.assertEqual(vol['instance_uuid'], None)
|
||||
self.assertEqual(vol['mountpoint'], None)
|
||||
self.assertEqual(vol['status'], "available")
|
||||
self.assertEqual(vol['attach_status'], "detached")
|
||||
|
@ -1941,18 +1941,20 @@ class CloudTestCase(test.TestCase):
|
|||
'delete_on_termination': True},
|
||||
]}
|
||||
ec2_instance_id = self._run_instance(**kwargs)
|
||||
instance_uuid = ec2utils.ec2_instance_id_to_uuid(self.context,
|
||||
ec2_instance_id)
|
||||
instance_id = ec2utils.ec2_id_to_id(ec2_instance_id)
|
||||
|
||||
vols = db.volume_get_all_by_instance(self.context, instance_id)
|
||||
vols = db.volume_get_all_by_instance_uuid(self.context, instance_uuid)
|
||||
self.assertEqual(len(vols), 2)
|
||||
for vol in vols:
|
||||
self.assertTrue(vol['id'] == vol1['id'] or vol['id'] == vol2['id'])
|
||||
|
||||
vol = db.volume_get(self.context, vol1['id'])
|
||||
self._assert_volume_attached(vol, instance_id, '/dev/vdb')
|
||||
self._assert_volume_attached(vol, instance_uuid, '/dev/vdb')
|
||||
|
||||
vol = db.volume_get(self.context, vol2['id'])
|
||||
self._assert_volume_attached(vol, instance_id, '/dev/vdc')
|
||||
self._assert_volume_attached(vol, instance_uuid, '/dev/vdc')
|
||||
|
||||
result = self.cloud.stop_instances(self.context, [ec2_instance_id])
|
||||
self.assertTrue(result)
|
||||
|
@ -1963,13 +1965,13 @@ class CloudTestCase(test.TestCase):
|
|||
self._assert_volume_detached(vol)
|
||||
|
||||
self.cloud.start_instances(self.context, [ec2_instance_id])
|
||||
vols = db.volume_get_all_by_instance(self.context, instance_id)
|
||||
vols = db.volume_get_all_by_instance_uuid(self.context, instance_uuid)
|
||||
self.assertEqual(len(vols), 2)
|
||||
for vol in vols:
|
||||
self.assertTrue(vol['id'] == vol1['id'] or vol['id'] == vol2['id'])
|
||||
self.assertTrue(vol['mountpoint'] == '/dev/vdb' or
|
||||
vol['mountpoint'] == '/dev/vdc')
|
||||
self.assertEqual(vol['instance_id'], instance_id)
|
||||
self.assertEqual(vol['instance_uuid'], instance_uuid)
|
||||
self.assertEqual(vol['status'], "in-use")
|
||||
self.assertEqual(vol['attach_status'], "attached")
|
||||
|
||||
|
@ -2001,12 +2003,14 @@ class CloudTestCase(test.TestCase):
|
|||
'delete_on_termination': True}]}
|
||||
ec2_instance_id = self._run_instance(**kwargs)
|
||||
instance_id = ec2utils.ec2_id_to_id(ec2_instance_id)
|
||||
instance_uuid = ec2utils.ec2_instance_id_to_uuid(self.context,
|
||||
ec2_instance_id)
|
||||
|
||||
vols = db.volume_get_all_by_instance(self.context, instance_id)
|
||||
vols = db.volume_get_all_by_instance_uuid(self.context, instance_uuid)
|
||||
self.assertEqual(len(vols), 1)
|
||||
for vol in vols:
|
||||
self.assertEqual(vol['id'], vol1['id'])
|
||||
self._assert_volume_attached(vol, instance_id, '/dev/vdb')
|
||||
self._assert_volume_attached(vol, instance_uuid, '/dev/vdb')
|
||||
|
||||
vol = db.volume_get(self.context, vol2['id'])
|
||||
self._assert_volume_detached(vol)
|
||||
|
@ -2017,7 +2021,7 @@ class CloudTestCase(test.TestCase):
|
|||
volume_id=vol2['id'],
|
||||
device='/dev/vdc')
|
||||
vol = db.volume_get(self.context, vol2['id'])
|
||||
self._assert_volume_attached(vol, instance_id, '/dev/vdc')
|
||||
self._assert_volume_attached(vol, instance_uuid, '/dev/vdc')
|
||||
|
||||
self.cloud.compute_api.detach_volume(self.context,
|
||||
volume_id=vol1['id'])
|
||||
|
@ -2032,11 +2036,11 @@ class CloudTestCase(test.TestCase):
|
|||
self._assert_volume_detached(vol)
|
||||
|
||||
self.cloud.start_instances(self.context, [ec2_instance_id])
|
||||
vols = db.volume_get_all_by_instance(self.context, instance_id)
|
||||
vols = db.volume_get_all_by_instance_uuid(self.context, instance_uuid)
|
||||
self.assertEqual(len(vols), 1)
|
||||
for vol in vols:
|
||||
self.assertEqual(vol['id'], vol2['id'])
|
||||
self._assert_volume_attached(vol, instance_id, '/dev/vdc')
|
||||
self._assert_volume_attached(vol, instance_uuid, '/dev/vdc')
|
||||
|
||||
vol = db.volume_get(self.context, vol1['id'])
|
||||
self._assert_volume_detached(vol)
|
||||
|
@ -2077,8 +2081,10 @@ class CloudTestCase(test.TestCase):
|
|||
'delete_on_termination': True}]}
|
||||
ec2_instance_id = self._run_instance(**kwargs)
|
||||
instance_id = ec2utils.ec2_id_to_id(ec2_instance_id)
|
||||
instance_uuid = ec2utils.ec2_instance_id_to_uuid(self.context,
|
||||
ec2_instance_id)
|
||||
|
||||
vols = db.volume_get_all_by_instance(self.context, instance_id)
|
||||
vols = db.volume_get_all_by_instance_uuid(self.context, instance_uuid)
|
||||
self.assertEqual(len(vols), 2)
|
||||
vol1_id = None
|
||||
vol2_id = None
|
||||
|
@ -2093,7 +2099,7 @@ class CloudTestCase(test.TestCase):
|
|||
else:
|
||||
self.fail()
|
||||
|
||||
self._assert_volume_attached(vol, instance_id, mountpoint)
|
||||
self._assert_volume_attached(vol, instance_uuid, mountpoint)
|
||||
|
||||
self.assertTrue(vol1_id)
|
||||
self.assertTrue(vol2_id)
|
||||
|
|
|
@ -592,7 +592,7 @@ def stub_volume(id, **kwargs):
|
|||
'host': 'fakehost',
|
||||
'size': 1,
|
||||
'availability_zone': 'fakeaz',
|
||||
'instance': {'uuid': 'fakeuuid'},
|
||||
'instance_uuid': 'fakeuuid',
|
||||
'mountpoint': '/',
|
||||
'status': 'fakestatus',
|
||||
'attach_status': 'attached',
|
||||
|
@ -611,7 +611,7 @@ def stub_volume(id, **kwargs):
|
|||
|
||||
def stub_volume_create(self, context, size, name, description, snapshot,
|
||||
**param):
|
||||
vol = stub_volume(1)
|
||||
vol = stub_volume('1')
|
||||
vol['size'] = size
|
||||
vol['display_name'] = name
|
||||
vol['display_description'] = description
|
||||
|
@ -640,4 +640,4 @@ def stub_volume_get_notfound(self, context, volume_id):
|
|||
|
||||
|
||||
def stub_volume_get_all(self, context, search_opts=None):
|
||||
return [stub_volume_get(self, context, 1)]
|
||||
return [stub_volume_get(self, context, '1')]
|
||||
|
|
|
@ -90,6 +90,7 @@ class VolumeApiTest(test.TestCase):
|
|||
'created_at': datetime.datetime(1, 1, 1,
|
||||
1, 1, 1),
|
||||
'size': 1}]}
|
||||
self.maxDiff = None
|
||||
self.assertEqual(res_dict, expected)
|
||||
|
||||
def test_volume_list_detail(self):
|
||||
|
@ -114,7 +115,7 @@ class VolumeApiTest(test.TestCase):
|
|||
|
||||
def test_volume_show(self):
|
||||
req = fakes.HTTPRequest.blank('/v1/volumes/1')
|
||||
res_dict = self.controller.show(req, 1)
|
||||
res_dict = self.controller.show(req, '1')
|
||||
expected = {'volume': {'status': 'fakestatus',
|
||||
'display_description': 'displaydesc',
|
||||
'availability_zone': 'fakeaz',
|
||||
|
@ -139,7 +140,7 @@ class VolumeApiTest(test.TestCase):
|
|||
self.stubs.Set(volume_api.API, 'get', stub_volume_get)
|
||||
|
||||
req = fakes.HTTPRequest.blank('/v1/volumes/1')
|
||||
res_dict = self.controller.show(req, 1)
|
||||
res_dict = self.controller.show(req, '1')
|
||||
expected = {'volume': {'status': 'fakestatus',
|
||||
'display_description': 'displaydesc',
|
||||
'availability_zone': 'fakeaz',
|
||||
|
|
|
@ -477,7 +477,6 @@ class SchedulerTestCase(test.TestCase):
|
|||
instance = self._live_migration_instance()
|
||||
db.instance_get(self.context, instance['id']).AndReturn(instance)
|
||||
|
||||
# Source checks (volume and source compute are up)
|
||||
db.service_get_all_compute_by_host(self.context,
|
||||
instance['host']).AndReturn(['fake_service2'])
|
||||
utils.service_is_up('fake_service2').AndReturn(True)
|
||||
|
|
|
@ -1215,7 +1215,9 @@ class LibvirtConnTestCase(test.TestCase):
|
|||
instance_dict)
|
||||
vol_dict = {'status': 'migrating', 'size': 1}
|
||||
volume_ref = db.volume_create(self.context, vol_dict)
|
||||
db.volume_attached(self.context, volume_ref['id'], instance_ref['id'],
|
||||
db.volume_attached(self.context,
|
||||
volume_ref['id'],
|
||||
instance_ref['uuid'],
|
||||
'/dev/fake')
|
||||
|
||||
# Preparing mocks
|
||||
|
|
|
@ -48,7 +48,9 @@ class VolumeTestCase(test.TestCase):
|
|||
self.flags(connection_type='fake')
|
||||
self.volume = importutils.import_object(FLAGS.volume_manager)
|
||||
self.context = context.get_admin_context()
|
||||
self.instance_id = db.instance_create(self.context, {})['id']
|
||||
instance = db.instance_create(self.context, {})
|
||||
self.instance_id = instance['id']
|
||||
self.instance_uuid = instance['uuid']
|
||||
|
||||
def tearDown(self):
|
||||
db.instance_destroy(self.context, self.instance_id)
|
||||
|
@ -175,25 +177,26 @@ class VolumeTestCase(test.TestCase):
|
|||
inst['project_id'] = 'fake'
|
||||
inst['instance_type_id'] = '2' # m1.tiny
|
||||
inst['ami_launch_index'] = 0
|
||||
instance_id = db.instance_create(self.context, inst)['id']
|
||||
instance = db.instance_create(self.context, {})
|
||||
instance_id = instance['id']
|
||||
instance_uuid = instance['uuid']
|
||||
mountpoint = "/dev/sdf"
|
||||
volume = self._create_volume()
|
||||
volume_id = volume['id']
|
||||
self.volume.create_volume(self.context, volume_id)
|
||||
if FLAGS.fake_tests:
|
||||
db.volume_attached(self.context, volume_id, instance_id,
|
||||
db.volume_attached(self.context, volume_id, instance_uuid,
|
||||
mountpoint)
|
||||
else:
|
||||
self.compute.attach_volume(self.context,
|
||||
instance_id,
|
||||
instance_uuid,
|
||||
volume_id,
|
||||
mountpoint)
|
||||
vol = db.volume_get(context.get_admin_context(), volume_id)
|
||||
self.assertEqual(vol['status'], "in-use")
|
||||
self.assertEqual(vol['attach_status'], "attached")
|
||||
self.assertEqual(vol['mountpoint'], mountpoint)
|
||||
instance_ref = db.volume_get_instance(self.context, volume_id)
|
||||
self.assertEqual(instance_ref['id'], instance_id)
|
||||
self.assertEqual(vol['instance_uuid'], instance_uuid)
|
||||
|
||||
self.assertRaises(exception.NovaException,
|
||||
self.volume.delete_volume,
|
||||
|
@ -203,7 +206,7 @@ class VolumeTestCase(test.TestCase):
|
|||
db.volume_detached(self.context, volume_id)
|
||||
else:
|
||||
self.compute.detach_volume(self.context,
|
||||
instance_id,
|
||||
instance_uuid,
|
||||
volume_id)
|
||||
vol = db.volume_get(self.context, volume_id)
|
||||
self.assertEqual(vol['status'], "available")
|
||||
|
@ -323,7 +326,7 @@ class VolumeTestCase(test.TestCase):
|
|||
|
||||
volume = self._create_volume()
|
||||
self.volume.create_volume(self.context, volume['id'])
|
||||
db.volume_attached(self.context, volume['id'], self.instance_id,
|
||||
db.volume_attached(self.context, volume['id'], self.instance_uuid,
|
||||
'/dev/sda1')
|
||||
|
||||
volume_api = nova.volume.api.API()
|
||||
|
@ -383,7 +386,9 @@ class DriverTestCase(test.TestCase):
|
|||
log.logger.addHandler(logging.logging.StreamHandler(self.stream))
|
||||
|
||||
inst = {}
|
||||
self.instance_id = db.instance_create(self.context, inst)['id']
|
||||
instance = db.instance_create(self.context, {})
|
||||
self.instance_id = instance['id']
|
||||
self.instance_uuid = instance['uuid']
|
||||
|
||||
def _attach_volume(self):
|
||||
"""Attach volumes to an instance. This function also sets
|
||||
|
@ -436,7 +441,7 @@ class ISCSITestCase(DriverTestCase):
|
|||
|
||||
# each volume has a different mountpoint
|
||||
mountpoint = "/dev/sd" + chr((ord('b') + index))
|
||||
db.volume_attached(self.context, vol_ref['id'], self.instance_id,
|
||||
db.volume_attached(self.context, vol_ref['id'], self.instance_uuid,
|
||||
mountpoint)
|
||||
volume_id_list.append(vol_ref['id'])
|
||||
|
||||
|
|
|
@ -248,13 +248,13 @@ class API(base.Base):
|
|||
self.update(context, volume, {"status": "available"})
|
||||
|
||||
@wrap_check_policy
|
||||
def attach(self, context, volume, instance_id, mountpoint):
|
||||
def attach(self, context, volume, instance_uuid, mountpoint):
|
||||
host = volume['host']
|
||||
queue = self.db.queue_get_for(context, FLAGS.volume_topic, host)
|
||||
return rpc.call(context, queue,
|
||||
{"method": "attach_volume",
|
||||
"args": {"volume_id": volume['id'],
|
||||
"instance_id": instance_id,
|
||||
"instance_uuid": instance_uuid,
|
||||
"mountpoint": mountpoint}})
|
||||
|
||||
@wrap_check_policy
|
||||
|
|
|
@ -229,12 +229,15 @@ class VolumeManager(manager.SchedulerDependentManager):
|
|||
LOG.debug(_("snapshot %s: deleted successfully"), snapshot_ref['name'])
|
||||
return True
|
||||
|
||||
def attach_volume(self, context, volume_id, instance_id, mountpoint):
|
||||
def attach_volume(self, context, volume_id, instance_uuid, mountpoint):
|
||||
"""Updates db to show volume is attached"""
|
||||
# TODO(vish): refactor this into a more general "reserve"
|
||||
if not utils.is_uuid_like(instance_uuid):
|
||||
raise exception.InvalidUUID(instance_uuid)
|
||||
|
||||
self.db.volume_attached(context,
|
||||
volume_id,
|
||||
instance_id,
|
||||
instance_uuid,
|
||||
mountpoint)
|
||||
|
||||
def detach_volume(self, context, volume_id):
|
||||
|
@ -293,7 +296,10 @@ class VolumeManager(manager.SchedulerDependentManager):
|
|||
def check_for_export(self, context, instance_id):
|
||||
"""Make sure whether volume is exported."""
|
||||
instance_ref = self.db.instance_get(context, instance_id)
|
||||
for volume in instance_ref['volumes']:
|
||||
volumes = self.db.volume_get_all_by_instance_uuid(context,
|
||||
instance_ref['uuid'])
|
||||
|
||||
for volume in volumes:
|
||||
self.driver.check_for_export(context, volume['id'])
|
||||
|
||||
def _volume_stats_changed(self, stat1, stat2):
|
||||
|
|
Loading…
Reference in New Issue