This patch includes the Cinder changes needed to support volume multiple attaches. Nova and python-cinderclient also need patches associated to provide support for multiple attachments. This adds the multiattach flag to volumes. When a volume is created, a multiattach flag can be set, which allows a volume to be attached to more than one Nova instance or host. If the multiattach flag is not set on a volume, it cannot be attached to more than one Nova instance or host Each volume attachment is tracked in a new volume_attachment table. The attachment id is the unique identifier for each attachment to an instance or host. When a volume is to be detached the attachment uuid must be passed in to the detach call in order to determine which attachment should be removed. Since a volume can be attached to an instance and a host, the attachment id is used as the attachment identifier. Nova: https://review.openstack.org/#/c/153033/ https://review.openstack.org/#/c/153038/ python-cinderclient: https://review.openstack.org/#/c/85856/ Change-Id: I950fa00ed5a30e7758245d5b0557f6df42dc58a3 Implements: blueprint multi-attach-volume APIImpactchanges/47/85847/47
parent
490f03b48a
commit
10d5421687
@ -0,0 +1,147 @@
|
||||
# (c) Copyright 2012-2014 Hewlett-Packard Development Company, L.P.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import datetime
|
||||
import uuid
|
||||
|
||||
import six
|
||||
from sqlalchemy import Boolean, Column, DateTime
|
||||
from sqlalchemy import ForeignKey, MetaData, String, Table
|
||||
|
||||
from cinder.i18n import _LE
|
||||
from cinder.openstack.common import log as logging
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
CREATED_AT = datetime.datetime.now()
|
||||
|
||||
|
||||
def upgrade(migrate_engine):
|
||||
"""Add volume multi attachment table."""
|
||||
meta = MetaData()
|
||||
meta.bind = migrate_engine
|
||||
|
||||
# add the multiattach flag to the volumes table.
|
||||
volumes = Table('volumes', meta, autoload=True)
|
||||
multiattach = Column('multiattach', Boolean)
|
||||
volumes.create_column(multiattach)
|
||||
volumes.update().values(multiattach=False).execute()
|
||||
|
||||
# The new volume_attachment table
|
||||
volume_attachment = Table(
|
||||
'volume_attachment', meta,
|
||||
Column('created_at', DateTime),
|
||||
Column('updated_at', DateTime),
|
||||
Column('deleted_at', DateTime),
|
||||
Column('deleted', Boolean),
|
||||
Column('id', String(length=36), primary_key=True, nullable=False),
|
||||
Column('volume_id', String(length=36), ForeignKey('volumes.id'),
|
||||
nullable=False),
|
||||
Column('attached_host', String(length=255)),
|
||||
Column('instance_uuid', String(length=36)),
|
||||
Column('mountpoint', String(length=255)),
|
||||
Column('attach_time', DateTime),
|
||||
Column('detach_time', DateTime),
|
||||
Column('attach_mode', String(length=36)),
|
||||
Column('attach_status', String(length=255)),
|
||||
mysql_engine='InnoDB'
|
||||
)
|
||||
|
||||
try:
|
||||
volume_attachment.create()
|
||||
except Exception:
|
||||
LOG.error(_LE("Table volume_attachment not created!"))
|
||||
raise
|
||||
|
||||
# now migrate existing volume attachment info into the
|
||||
# new volume_attachment table
|
||||
volumes_list = list(volumes.select().execute())
|
||||
for volume in volumes_list:
|
||||
if volume.attach_status == 'attached':
|
||||
attachment = volume_attachment.insert()
|
||||
values = {'id': six.text_type(uuid.uuid4()),
|
||||
'created_at': CREATED_AT,
|
||||
'deleted_at': None,
|
||||
'deleted': 0,
|
||||
'volume_id': volume.id,
|
||||
'attached_host': volume.host,
|
||||
'instance_uuid': volume.instance_uuid,
|
||||
'mountpoint': volume.mountpoint,
|
||||
'attach_time': volume.attach_time,
|
||||
'attach_mode': 'rw',
|
||||
'attach_status': 'attached',
|
||||
}
|
||||
attachment.execute(values)
|
||||
|
||||
# we have no reason to keep the columns that now
|
||||
# exist in the volume_attachment table
|
||||
mountpoint = volumes.columns.mountpoint
|
||||
volumes.drop_column(mountpoint)
|
||||
instance_uuid = volumes.columns.instance_uuid
|
||||
volumes.drop_column(instance_uuid)
|
||||
attach_time = volumes.columns.attach_time
|
||||
volumes.drop_column(attach_time)
|
||||
attached_host = volumes.columns.attached_host
|
||||
volumes.drop_column(attached_host)
|
||||
|
||||
|
||||
def downgrade(migrate_engine):
|
||||
"""Remove volume_attachment table."""
|
||||
meta = MetaData()
|
||||
meta.bind = migrate_engine
|
||||
|
||||
# Put the needed volumes table columns back
|
||||
volumes = Table('volumes', meta, autoload=True)
|
||||
multiattach = volumes.columns.multiattach
|
||||
volumes.drop_column(multiattach)
|
||||
|
||||
attached_host = Column('attached_host', String(length=255))
|
||||
volumes.create_column(attached_host)
|
||||
volumes.update().values(attached_host=None).execute()
|
||||
|
||||
attach_time = Column('attach_time', String(length=255))
|
||||
volumes.create_column(attach_time)
|
||||
volumes.update().values(attach_time=None).execute()
|
||||
|
||||
instance_uuid = Column('instance_uuid', String(length=36))
|
||||
volumes.create_column(instance_uuid)
|
||||
volumes.update().values(instance_uuid=None).execute()
|
||||
|
||||
mountpoint = Column('mountpoint', String(length=255))
|
||||
volumes.create_column(mountpoint)
|
||||
volumes.update().values(mountpoint=None).execute()
|
||||
|
||||
volume_attachment = Table('volume_attachment', meta, autoload=True)
|
||||
attachments = list(volume_attachment.select().execute())
|
||||
for attachment in attachments:
|
||||
# we are going to lose data here for
|
||||
# multiple attaches. We'll migrate and the
|
||||
# last update wins.
|
||||
|
||||
if not attachment.deleted_at:
|
||||
volume_id = attachment.volume_id
|
||||
volumes.update().\
|
||||
where(volumes.c.id == volume_id).\
|
||||
values(mountpoint=attachment.mountpoint,
|
||||
attached_host=attachment.attached_host,
|
||||
attach_time=attachment.attach_time,
|
||||
instance_uuid=attachment.instance_uuid).\
|
||||
execute()
|
||||
try:
|
||||
volume_attachment.drop()
|
||||
|
||||
except Exception:
|
||||
LOG.error(_LE("Dropping volume_attachment table failed."))
|
||||
raise
|
@ -0,0 +1,87 @@
|
||||
BEGIN TRANSACTION;
|
||||
|
||||
CREATE TABLE volumes_v39 (
|
||||
created_at DATETIME,
|
||||
updated_at DATETIME,
|
||||
deleted_at DATETIME,
|
||||
deleted BOOLEAN,
|
||||
id VARCHAR(36) NOT NULL,
|
||||
ec2_id INTEGER,
|
||||
user_id VARCHAR(255),
|
||||
project_id VARCHAR(255),
|
||||
snapshot_id VARCHAR(36),
|
||||
host VARCHAR(255),
|
||||
size INTEGER,
|
||||
availability_zone VARCHAR(255),
|
||||
status VARCHAR(255),
|
||||
attach_status VARCHAR(255),
|
||||
scheduled_at DATETIME,
|
||||
launched_at DATETIME,
|
||||
terminated_at DATETIME,
|
||||
display_name VARCHAR(255),
|
||||
display_description VARCHAR(255),
|
||||
provider_location VARCHAR(255),
|
||||
provider_auth VARCHAR(255),
|
||||
volume_type_id VARCHAR(36),
|
||||
source_volid VARCHAR(36),
|
||||
bootable INTEGER,
|
||||
provider_geometry VARCHAR(255),
|
||||
_name_id VARCHAR(36),
|
||||
encryption_key_id VARCHAR(36),
|
||||
migration_status VARCHAR(255),
|
||||
attached_host VARCHAR(255),
|
||||
attach_time VARCHAR(255),
|
||||
instance_uuid VARCHAR(36),
|
||||
mountpoint VARCHAR(255),
|
||||
consistencygroup_id VARCHAR(36),
|
||||
replication_status VARCHAR(255),
|
||||
replication_extended_status VARCHAR(255),
|
||||
replication_driver_data VARCHAR(255),
|
||||
PRIMARY KEY (id)
|
||||
);
|
||||
|
||||
INSERT INTO volumes_v39
|
||||
SELECT volumes.created_at,
|
||||
volumes.updated_at,
|
||||
volumes.deleted_at,
|
||||
volumes.deleted,
|
||||
volumes.id,
|
||||
volumes.ec2_id,
|
||||
volumes.user_id,
|
||||
volumes.project_id,
|
||||
volumes.snapshot_id,
|
||||
volumes.host,
|
||||
volumes.size,
|
||||
volumes.availability_zone,
|
||||
volumes.status,
|
||||
volumes.attach_status,
|
||||
volumes.scheduled_at,
|
||||
volumes.launched_at,
|
||||
volumes.terminated_at,
|
||||
volumes.display_name,
|
||||
volumes.display_description,
|
||||
volumes.provider_location,
|
||||
volumes.provider_auth,
|
||||
volumes.volume_type_id,
|
||||
volumes.source_volid,
|
||||
volumes.bootable,
|
||||
volumes.provider_geometry,
|
||||
volumes._name_id,
|
||||
volumes.encryption_key_id,
|
||||
volumes.migration_status,
|
||||
volume_attachment.attached_host,
|
||||
volume_attachment.attach_time,
|
||||
volume_attachment.instance_uuid,
|
||||
volume_attachment.mountpoint,
|
||||
volumes.consistencygroup_id,
|
||||
volumes.replication_status,
|
||||
volumes.replication_extended_status,
|
||||
volumes.replication_driver_data
|
||||
FROM volumes
|
||||
LEFT OUTER JOIN volume_attachment
|
||||
ON volumes.id=volume_attachment.volume_id;
|
||||
|
||||
DROP TABLE volumes;
|
||||
ALTER TABLE volumes_v39 RENAME TO volumes;
|
||||
DROP TABLE volume_attachment;
|
||||
COMMIT;
|