Add share instances and snapshot instances

Introduce new Manila core concepts:
- share instance
- snapshot instance

This is minimalistic commit which assumes that
share and snapshot can have only one instance.
More complicated logic will be implemented in
the scope of share migration and replication
features.

Partially implements bp share-instances
Change-Id: I326c00b8252f2630e72bb0c22ca294f79feee2be
This commit is contained in:
Igor Malinovskiy 2015-08-07 16:38:08 +03:00
parent 4795f7c80e
commit f5f36e3bfc
13 changed files with 1028 additions and 271 deletions

View File

@ -71,10 +71,13 @@ class ShareNetworkController(wsgi.Controller):
except exception.ShareNetworkNotFound as e:
raise exc.HTTPNotFound(explanation=six.text_type(e))
shares = db_api.share_get_all_by_share_network(context, id)
if shares:
share_instances = (
db_api.share_instances_get_all_by_share_network(context, id)
)
if share_instances:
msg = _("Can not delete share network %(id)s, it has "
"%(len)s share(s).") % {'id': id, 'len': len(shares)}
"%(len)s share(s).") % {'id': id,
'len': len(share_instances)}
LOG.error(msg)
raise exc.HTTPConflict(explanation=msg)
for share_server in share_network['share_servers']:

View File

@ -50,9 +50,6 @@ class ViewBuilder(common.ViewBuilder):
export_locations = share.get('export_locations', [])
if export_locations:
export_locations = [item['path'] for item in export_locations]
if share['share_type_id'] and share.get('share_type'):
share_type = share['share_type']['name']
else:

View File

@ -281,6 +281,21 @@ def reservation_expire(context):
###################
def share_instance_get(context, instance_id, with_share_data=False):
"""Get share instance by id."""
return IMPL.share_instance_get(context, instance_id,
with_share_data=with_share_data)
def share_instance_update(context, instance_id, values, with_share_data=False):
"""Update share instance fields."""
return IMPL.share_instance_update(context, instance_id, values,
with_share_data=with_share_data)
###################
def share_create(context, values):
"""Create new share."""
return IMPL.share_create(context, values)
@ -308,12 +323,9 @@ def share_get_all(context, filters=None, sort_key=None, sort_dir=None):
)
def share_get_all_by_host(context, host, filters=None, sort_key=None,
sort_dir=None):
"""Returns all shares with given host."""
return IMPL.share_get_all_by_host(
context, host, filters=filters, sort_key=sort_key, sort_dir=sort_dir,
)
def share_instances_get_all_by_host(context, host):
"""Returns all share instances with given host."""
return IMPL.share_instances_get_all_by_host(context, host)
def share_get_all_by_project(context, project_id, filters=None,
@ -325,12 +337,10 @@ def share_get_all_by_project(context, project_id, filters=None,
)
def share_get_all_by_share_network(context, share_network_id, filters=None,
sort_key=None, sort_dir=None):
def share_instances_get_all_by_share_network(context, share_network_id):
"""Returns list of shares that belong to given share network."""
return IMPL.share_get_all_by_share_network(
context, share_network_id, filters=filters, sort_key=sort_key,
sort_dir=sort_dir)
return IMPL.share_instances_get_all_by_share_network(context,
share_network_id)
def share_get_all_by_share_server(context, share_server_id, filters=None,
@ -385,6 +395,23 @@ def share_access_update(context, access_id, values):
####################
def share_snapshot_instance_update(context, instance_id, values):
"""Set the given properties on an snapshot instance and update it.
Raises NotFound if snapshot instance does not exist.
"""
return IMPL.share_snapshot_instance_update(context, instance_id, values)
def share_snapshot_instance_get(context, instance_id, with_share_data=False):
"""Get a snapshot or raise if it does not exist."""
return IMPL.share_snapshot_instance_get(
context, instance_id, with_share_data=with_share_data)
####################
def share_snapshot_create(context, values):
"""Create a snapshot from the values dictionary."""
return IMPL.share_snapshot_create(context, values)
@ -502,10 +529,10 @@ def share_export_locations_get(context, share_id):
return IMPL.share_export_locations_get(context, share_id)
def share_export_locations_update(context, share_id, export_locations,
def share_export_locations_update(context, share_instance_id, export_locations,
delete=True):
"""Update export locations of share."""
return IMPL.share_export_locations_update(context, share_id,
return IMPL.share_export_locations_update(context, share_instance_id,
export_locations, delete)

View File

@ -0,0 +1,297 @@
# Copyright 2015 Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""add_share_instances
Revision ID: 5077ffcc5f1c
Revises: 3db9992c30f3
Create Date: 2015-06-26 12:54:55.630152
"""
# revision identifiers, used by Alembic.
revision = '5077ffcc5f1c'
down_revision = '3db9992c30f3'
from alembic import op
from sqlalchemy import Column, DateTime, ForeignKey, String
import six
from manila.db.migrations import utils
def create_share_instances_table(connection):
# Create 'share_instances' table
share_instances_table = op.create_table(
'share_instances',
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('deleted', String(length=36), default='False'),
Column('id', String(length=36), primary_key=True, nullable=False),
Column('share_id', String(length=36),
ForeignKey('shares.id', name="si_share_fk")),
Column('host', String(length=255)),
Column('status', String(length=255)),
Column('scheduled_at', DateTime),
Column('launched_at', DateTime),
Column('terminated_at', DateTime),
Column('share_network_id', String(length=36),
ForeignKey('share_networks.id', name="si_share_network_fk"),
nullable=True),
Column('share_server_id', String(length=36),
ForeignKey('share_servers.id', name="si_share_server_fk"),
nullable=True),
Column('availability_zone', String(length=255)),
mysql_engine='InnoDB',
mysql_charset='utf8')
# Migrate data from 'shares' to 'share_instances'
share_instances = []
shares_table = utils.load_table('shares', connection)
for share in connection.execute(shares_table.select()):
share_instances.append({
'created_at': share.created_at,
'updated_at': share.updated_at,
'deleted_at': share.deleted_at,
'deleted': share.deleted,
'id': share.id,
'share_id': share.id,
'host': share.host,
'status': share.status,
'scheduled_at': share.scheduled_at,
'launched_at': share.launched_at,
'terminated_at': share.terminated_at,
'share_network_id': share.share_network_id,
'share_server_id': share.share_server_id,
'availability_zone': share.availability_zone,
})
op.bulk_insert(share_instances_table, share_instances)
# Remove columns moved to 'share_instances' table
with op.batch_alter_table("shares") as batch_op:
for fk in shares_table.foreign_key_constraints:
batch_op.drop_constraint(fk.name, type_='foreignkey')
batch_op.drop_column('host')
batch_op.drop_column('status')
batch_op.drop_column('scheduled_at')
batch_op.drop_column('launched_at')
batch_op.drop_column('terminated_at')
batch_op.drop_column('share_network_id')
batch_op.drop_column('share_server_id')
batch_op.drop_column('availability_zone')
def remove_share_instances_table(connection):
with op.batch_alter_table("shares") as batch_op:
batch_op.add_column(Column('host', String(length=255)))
batch_op.add_column(Column('status', String(length=255)))
batch_op.add_column(Column('scheduled_at', DateTime))
batch_op.add_column(Column('launched_at', DateTime))
batch_op.add_column(Column('terminated_at', DateTime))
batch_op.add_column(Column('share_network_id', String(length=36),
ForeignKey('share_networks.id'),
nullable=True))
batch_op.add_column(Column('share_server_id', String(length=36),
ForeignKey('share_servers.id'),
nullable=True))
batch_op.add_column(Column('availability_zone', String(length=255)))
shares_table = utils.load_table('shares', connection)
share_inst_table = utils.load_table('share_instances', connection)
for share in connection.execute(shares_table.select()):
instance = connection.execute(
share_inst_table.select().where(
share_inst_table.c.share_id == share.id)
).first()
op.execute(
shares_table.update().where(
shares_table.c.id == share.id
).values(
{
'host': instance['host'],
'status': instance['status'],
'scheduled_at': instance['scheduled_at'],
'launched_at': instance['launched_at'],
'terminated_at': instance['terminated_at'],
'share_network_id': instance['share_network_id'],
'share_server_id': instance['share_server_id'],
'availability_zone': instance['availability_zone'],
}
)
)
op.drop_table('share_instances')
def create_snapshot_instances_table(connection):
# Create 'share_snapshot_instances' table
snapshot_instances_table = op.create_table(
'share_snapshot_instances',
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('deleted', String(length=36), default='False'),
Column('id', String(length=36), primary_key=True, nullable=False),
Column('snapshot_id', String(length=36),
ForeignKey('share_snapshots.id', name="ssi_snapshot_fk")),
Column('share_instance_id', String(length=36),
ForeignKey('share_instances.id', name="ssi_share_instance_fk")),
Column('status', String(length=255)),
Column('progress', String(length=255)),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
# Migrate data from share_snapshots to share_snapshot_instances
snapshot_instances = []
snapshot_table = utils.load_table('share_snapshots', connection)
share_instances_table = utils.load_table('share_instances', connection)
for snapshot in connection.execute(snapshot_table.select()):
share_instances_rows = connection.execute(
share_instances_table.select().where(
share_instances_table.c.share_id == snapshot.share_id
)
)
snapshot_instances.append({
'created_at': snapshot.created_at,
'updated_at': snapshot.updated_at,
'deleted_at': snapshot.deleted_at,
'deleted': snapshot.deleted,
'id': snapshot.id,
'snapshot_id': snapshot.id,
'status': snapshot.status,
'progress': snapshot.progress,
'snapshot_instance_id': share_instances_rows.first().id,
})
op.bulk_insert(snapshot_instances_table, snapshot_instances)
# Remove columns moved to 'share_snapshot_instances' table
with op.batch_alter_table("share_snapshots") as batch_op:
batch_op.drop_column('status')
batch_op.drop_column('progress')
def remove_snapshot_instances_table(connection):
with op.batch_alter_table("share_snapshots") as batch_op:
batch_op.add_column(Column('status', String(length=255)))
batch_op.add_column(Column('progress', String(length=255)))
snapshots_table = utils.load_table('share_snapshots', connection)
snapshots_inst_table = utils.load_table('share_snapshot_instances',
connection)
for snapshot_instance in connection.execute(snapshots_inst_table.select()):
snapshot = connection.execute(
snapshots_table.select().where(
snapshots_table.c.id == snapshot_instance.snapshot_id)
).first()
op.execute(
snapshots_table.update().where(
snapshots_table.c.id == snapshot.id
).values(
{
'status': snapshot_instance['status'],
'progress': snapshot_instance['progress'],
}
)
)
op.drop_table('share_snapshot_instances')
def upgrade_export_locations_table(connection):
# Update 'share_export_locations' table
op.add_column(
'share_export_locations',
Column('share_instance_id', String(36),
ForeignKey('share_instances.id', name="sel_instance_id_fk"))
)
# Convert share_id to share_instance_id
share_el_table = utils.load_table('share_export_locations', connection)
share_instances_table = utils.load_table('share_instances', connection)
for export in connection.execute(share_el_table.select()):
share_instance = connection.execute(
share_instances_table.select().where(
share_instances_table.c.share_id == export.share_id)
).first()
op.execute(
share_el_table.update().where(
share_el_table.c.id == export.id
).values({'share_instance_id': six.text_type(share_instance.id)})
)
with op.batch_alter_table("share_export_locations") as batch_op:
batch_op.drop_constraint('sel_id_fk', type_='foreignkey')
batch_op.drop_column('share_id')
batch_op.rename_table('share_export_locations',
'share_instance_export_locations')
def downgrade_export_locations_table(connection):
op.rename_table('share_instance_export_locations',
'share_export_locations')
op.add_column(
'share_export_locations',
Column('share_id', String(36),
ForeignKey('shares.id', name="sel_id_fk"))
)
# Convert share_instance_id to share_id
share_el_table = utils.load_table('share_export_locations', connection)
share_instances_table = utils.load_table('share_instances', connection)
for export in connection.execute(share_el_table.select()):
share_instance = connection.execute(
share_instances_table.select().where(
share_instances_table.c.id == export.share_instance_id)
).first()
op.execute(
share_el_table.update().where(
share_el_table.c.id == export.id
).values({'share_id': six.text_type(share_instance.share_id)})
)
with op.batch_alter_table("share_export_locations") as batch_op:
batch_op.drop_constraint('sel_instance_id_fk', type_='foreignkey')
batch_op.drop_column('share_instance_id')
def upgrade():
connection = op.get_bind()
create_share_instances_table(connection)
create_snapshot_instances_table(connection)
upgrade_export_locations_table(connection)
def downgrade():
"""Remove share_instances and share_snapshot_instance tables.
This method can lead to data loss because only first share/snapshot
instance is saved in shares/snapshot table.
"""
connection = op.get_bind()
downgrade_export_locations_table(connection)
remove_snapshot_instances_table(connection)
remove_share_instances_table(connection)

View File

@ -48,7 +48,6 @@ from manila.i18n import _
from manila.i18n import _LE
from manila.i18n import _LW
CONF = cfg.CONF
LOG = log.getLogger(__name__)
@ -382,10 +381,12 @@ def service_get_all_share_sorted(context):
with session.begin():
topic = CONF.share_topic
label = 'share_gigabytes'
subq = model_query(context, models.Share, models.Share.host,
subq = model_query(context, models.Share,
func.sum(models.Share.size).label(label),
session=session, read_deleted="no").\
group_by(models.Share.host).\
join(models.ShareInstance,
models.ShareInstance.share_id == models.Share.id).\
group_by(models.ShareInstance.host).\
subquery()
return _service_get_all_topic_subquery(context,
session,
@ -1085,6 +1086,116 @@ def reservation_expire(context):
reservation_query.soft_delete(synchronize_session=False)
################
def extract_instance_values(values, fields):
instance_values = {}
for field in fields:
field_value = values.pop(field, None)
if field_value:
instance_values.update({field: field_value})
return instance_values
def extract_share_instance_values(values):
share_instance_model_fields = [
'status', 'host', 'scheduled_at', 'launched_at', 'terminated_at',
'share_server_id', 'share_network_id', 'availability_zone'
]
return extract_instance_values(values, share_instance_model_fields)
def extract_snapshot_instance_values(values):
fields = ['status', 'progress']
return extract_instance_values(values, fields)
################
@require_context
def share_instance_create(context, share_id, values, session):
if not values.get('id'):
values['id'] = uuidutils.generate_uuid()
values.update({'share_id': share_id})
share_instance_ref = models.ShareInstance()
share_instance_ref.update(values)
share_instance_ref.save(session=session)
return share_instance_get(context, share_instance_ref['id'],
session=session)
@require_context
def share_instance_update(context, share_instance_id, values,
with_share_data=False):
session = get_session()
with session.begin():
instance_ref = _share_instance_update(
context, share_instance_id, values, session
)
if with_share_data:
parent_share = share_get(context, instance_ref['share_id'],
session=session)
instance_ref.set_share_data(parent_share)
return instance_ref
@require_context
def _share_instance_update(context, share_instance_id, values, session):
share_instance_ref = share_instance_get(context, share_instance_id,
session=session)
share_instance_ref.update(values)
share_instance_ref.save(session=session)
return share_instance_ref
@require_context
def share_instance_get(context, share_instance_id, session=None,
with_share_data=False):
if session is None:
session = get_session()
result = (
model_query(context, models.ShareInstance, session=session).filter_by(
id=share_instance_id).first()
)
if result is None:
raise exception.NotFound()
if with_share_data:
parent_share = share_get(context, result['share_id'], session=session)
result.set_share_data(parent_share)
return result
@require_admin_context
def share_instances_get_all_by_host(context, host):
"""Retrieves all share instances hosted on a host."""
result = (
model_query(context, models.ShareInstance).filter(
or_(
models.ShareInstance.host == host,
models.ShareInstance.host.like("{0}#%".format(host))
)
).all()
)
return result
@require_context
def share_instances_get_all_by_share_network(context, share_network_id):
"""Returns list of share instances that belong to given share network."""
result = (
model_query(context, models.ShareInstance).filter(
models.ShareInstance.share_network_id == share_network_id,
).all()
)
return result
################
@ -1110,15 +1221,21 @@ def _metadata_refs(metadata_dict, meta_class):
@require_context
def share_create(context, values):
def share_create(context, values, create_share_instance=True):
values = ensure_model_dict_has_id(values)
values['share_metadata'] = _metadata_refs(values.get('metadata'),
models.ShareMetadata)
share_ref = models.Share()
share_instance_values = extract_share_instance_values(values)
share_ref.update(values)
session = get_session()
with session.begin():
share_ref.save(session=session)
if create_share_instance:
share_instance_create(context, share_ref['id'],
share_instance_values, session=session)
# NOTE(u_glide): Do so to prevent errors with relationships
return share_get(context, share_ref['id'], session=session)
@ -1145,6 +1262,11 @@ def share_update(context, share_id, values):
session = get_session()
with session.begin():
share_ref = share_get(context, share_id, session=session)
share_instance_values = extract_share_instance_values(values)
_share_instance_update(context, share_ref.instance['id'],
share_instance_values, session=session)
share_ref.update(values)
share_ref.save(session=session)
return share_ref
@ -1160,15 +1282,13 @@ def share_get(context, share_id, session=None):
@require_context
def _share_get_all_with_filters(context, project_id=None, share_server_id=None,
share_network_id=None, host=None, filters=None,
is_public=False, sort_key=None, sort_dir=None):
filters=None, is_public=False, sort_key=None,
sort_dir=None):
"""Returns sorted list of shares that satisfies filters.
:param context: context to query under
:param project_id: project id that owns shares
:param share_server_id: share server that hosts shares
:param share_network_id: share network that was used for shares
:param host: host name where shares [and share servers] are located
:param filters: dict of filters to specify share selection
:param is_public: public shares from other projects will be added
to result if True
@ -1181,24 +1301,22 @@ def _share_get_all_with_filters(context, project_id=None, share_server_id=None,
sort_key = 'created_at'
if not sort_dir:
sort_dir = 'desc'
query = _share_get_query(context)
query = (
_share_get_query(context).join(
models.ShareInstance,
models.ShareInstance.share_id == models.Share.id
)
)
if project_id:
if is_public:
query = query.filter(or_(models.Share.project_id == project_id,
models.Share.is_public))
else:
query = query.filter_by(project_id=project_id)
query = query.filter(models.Share.project_id == project_id)
if share_server_id:
query = query.filter_by(share_server_id=share_server_id)
if share_network_id:
query = query.filter_by(share_network_id=share_network_id)
if host and isinstance(host, six.string_types):
session = get_session()
with session.begin():
host_attr = getattr(models.Share, 'host')
conditions = [host_attr == host,
host_attr.op('LIKE')(host + '#%')]
query = query.filter(or_(*conditions))
query = query.filter(
models.ShareInstance.share_server_id == share_server_id)
# Apply filters
if not filters:
@ -1221,8 +1339,11 @@ def _share_get_all_with_filters(context, project_id=None, share_server_id=None,
try:
attr = getattr(models.Share, sort_key)
except AttributeError:
msg = _("Wrong sorting key provided - '%s'.") % sort_key
raise exception.InvalidInput(reason=msg)
try:
attr = getattr(models.ShareInstance, sort_key)
except AttributeError:
msg = _("Wrong sorting key provided - '%s'.") % sort_key
raise exception.InvalidInput(reason=msg)
if sort_dir.lower() == 'desc':
query = query.order_by(attr.desc())
elif sort_dir.lower() == 'asc':
@ -1245,17 +1366,6 @@ def share_get_all(context, filters=None, sort_key=None, sort_dir=None):
return query
@require_admin_context
def share_get_all_by_host(context, host, filters=None,
sort_key=None, sort_dir=None):
"""Retrieves all shares hosted on a host."""
query = _share_get_all_with_filters(
context, host=host, filters=filters,
sort_key=sort_key, sort_dir=sort_dir,
)
return query
@require_context
def share_get_all_by_project(context, project_id, filters=None,
is_public=False, sort_key=None, sort_dir=None):
@ -1267,16 +1377,6 @@ def share_get_all_by_project(context, project_id, filters=None,
return query
@require_context
def share_get_all_by_share_network(context, share_network_id, filters=None,
sort_key=None, sort_dir=None):
"""Returns list of shares that belong to given share network."""
query = _share_get_all_with_filters(
context, share_network_id=share_network_id, filters=filters,
sort_key=sort_key, sort_dir=sort_dir)
return query
@require_context
def share_get_all_by_share_server(context, share_server_id, filters=None,
sort_key=None, sort_dir=None):
@ -1291,14 +1391,16 @@ def share_get_all_by_share_server(context, share_server_id, filters=None,
@require_context
def share_delete(context, share_id):
session = get_session()
share_ref = share_get(context, share_id, session)
share_ref.soft_delete(session=session, update_status=True)
session.query(models.ShareMetadata).\
filter_by(share_id=share_id).soft_delete()
session.query(models.ShareExportLocations).\
filter_by(share_id=share_id).soft_delete()
share_ref.save(session)
with session.begin():
share_ref = share_get(context, share_id, session)
share_ref.soft_delete(session=session)
share_ref.instance.soft_delete(session=session, update_status=True)
session.query(models.ShareMetadata).\
filter_by(share_id=share_id).soft_delete()
session.query(models.ShareInstanceExportLocations).\
filter_by(share_instance_id=share_ref.instance['id']).soft_delete()
###################
@ -1375,16 +1477,81 @@ def share_access_update(context, access_id, values):
@require_context
def share_snapshot_create(context, values):
def share_snapshot_instance_create(context, snapshot_id, values, session):
if not values.get('id'):
values['id'] = uuidutils.generate_uuid()
values.update({'snapshot_id': snapshot_id})
instance_ref = models.ShareSnapshotInstance()
instance_ref.update(values)
instance_ref.save(session=session)
return share_snapshot_instance_get(context, instance_ref['id'],
session=session)
@require_context
def share_snapshot_instance_update(context, instance_id, values):
session = get_session()
instance_ref = share_snapshot_instance_get(context, instance_id,
session=session)
instance_ref.update(values)
instance_ref.save(session=session)
return instance_ref
@require_context
def share_snapshot_instance_get(context, instance_id, session=None,
with_share_data=False):
if session is None:
session = get_session()
result = (
model_query(
context,
models.ShareSnapshotInstance,
session=session
).filter_by(id=instance_id).first()
)
if result is None:
raise exception.NotFound()
if with_share_data:
share_instance = share_instance_get(
context, result['share_instance_id'],
session=session, with_share_data=True
)
result['share'] = share_instance
return result
###################
@require_context
def share_snapshot_create(context, values, create_snapshot_instance=True):
values = ensure_model_dict_has_id(values)
snapshot_ref = models.ShareSnapshot()
snapshot_instance_values = extract_snapshot_instance_values(values)
share_ref = share_get(context, values.get('share_id'))
snapshot_instance_values.update(
{'share_instance_id': share_ref.instance.id}
)
snapshot_ref.update(values)
session = get_session()
with session.begin():
snapshot_ref.save(session=session)
return share_snapshot_get(context, values['id'], session=session)
if create_snapshot_instance:
share_snapshot_instance_create(
context,
snapshot_ref['id'],
snapshot_instance_values,
session=session
)
return share_snapshot_get(context, values['id'], session=session)
@require_admin_context
@ -1408,8 +1575,10 @@ def snapshot_data_get_for_project(context, project_id, user_id, session=None):
def share_snapshot_destroy(context, snapshot_id):
session = get_session()
with session.begin():
session.query(models.ShareSnapshot).\
filter_by(id=snapshot_id).soft_delete(update_status=True)
snapshot_ref = share_snapshot_get(context, snapshot_id,
session=session)
snapshot_ref.instance.soft_delete(session=session, update_status=True)
snapshot_ref.soft_delete(session=session)
@require_context
@ -1528,8 +1697,17 @@ def share_snapshot_update(context, snapshot_id, values):
with session.begin():
snapshot_ref = share_snapshot_get(context, snapshot_id,
session=session)
snapshot_ref.update(values)
snapshot_ref.save(session=session)
instance_values = extract_snapshot_instance_values(values)
if values:
snapshot_ref.update(values)
snapshot_ref.save(session=session)
if instance_values:
snapshot_ref.instance.update(instance_values)
snapshot_ref.instance.save(session=session)
return snapshot_ref
@ -1632,28 +1810,29 @@ def _share_metadata_get_item(context, share_id, key, session=None):
@require_context
@require_share_exists
def share_export_locations_get(context, share_id):
rows = _share_export_locations_get(context, share_id)
share = share_get(context, share_id)
rows = _share_export_locations_get(context, share.instance.id)
return [location['path'] for location in rows]
def _share_export_locations_get(context, share_id, session=None):
def _share_export_locations_get(context, share_instance_id, session=None):
if not session:
session = get_session()
return (
model_query(context, models.ShareExportLocations,
model_query(context, models.ShareInstanceExportLocations,
session=session, read_deleted="no").
filter_by(share_id=share_id).
filter_by(share_instance_id=share_instance_id).
order_by("updated_at").
all()
)
@require_context
@require_share_exists
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
def share_export_locations_update(context, share_id, export_locations, delete):
def share_export_locations_update(context, share_instance_id, export_locations,
delete):
# NOTE(u_glide):
# Backward compatibility code for drivers,
# which returns single export_location as string
@ -1664,7 +1843,7 @@ def share_export_locations_update(context, share_id, export_locations, delete):
with session.begin():
location_rows = _share_export_locations_get(
context, share_id, session=session)
context, share_instance_id, session=session)
def get_path_list_from_rows(rows):
return set([l['path'] for l in rows])
@ -1700,10 +1879,10 @@ def share_export_locations_update(context, share_id, export_locations, delete):
# Already updated
continue
location_ref = models.ShareExportLocations()
location_ref = models.ShareInstanceExportLocations()
location_ref.update({
'path': path,
'share_id': share_id,
'share_instance_id': share_instance_id,
'updated_at': indexed_update_time[path],
'deleted': 0,
})
@ -1713,7 +1892,7 @@ def share_export_locations_update(context, share_id, export_locations, delete):
return export_locations
return get_path_list_from_rows(_share_export_locations_get(
context, share_id, session=session))
context, share_instance_id, session=session))
#################################
@ -1790,7 +1969,7 @@ def _network_get_query(context, session=None):
if session is None:
session = get_session()
return model_query(context, models.ShareNetwork, session=session).\
options(joinedload('shares'),
options(joinedload('share_instances'),
joinedload('security_services'),
joinedload('share_servers'))
@ -1922,7 +2101,8 @@ def _server_get_query(context, session=None):
if session is None:
session = get_session()
return model_query(context, models.ShareServer, session=session).\
options(joinedload('shares'), joinedload('network_allocations'),
options(joinedload('share_instances'),
joinedload('network_allocations'),
joinedload('share_network'))
@ -2008,7 +2188,7 @@ def share_server_get_all_unused_deletable(context, host, updated_before):
)
result = _server_get_query(context)\
.filter_by(host=host)\
.filter(~models.ShareServer.shares.any())\
.filter(~models.ShareServer.share_instances.any())\
.filter(models.ShareServer.status.in_(valid_server_status))\
.filter(models.ShareServer.updated_at < updated_before).all()
return result

View File

@ -21,6 +21,7 @@ SQLAlchemy models for Manila data.
from oslo_config import cfg
from oslo_db.sqlalchemy import models
from oslo_log import log
import six
from sqlalchemy import Column, Integer, String, schema
from sqlalchemy.ext.declarative import declarative_base
@ -32,6 +33,8 @@ from manila.common import constants
CONF = cfg.CONF
BASE = declarative_base()
LOG = log.getLogger(__name__)
class ManilaBase(models.ModelBase,
models.TimestampMixin,
@ -172,7 +175,9 @@ class Reservation(BASE, ManilaBase):
class Share(BASE, ManilaBase):
"""Represents an NFS and CIFS shares."""
__tablename__ = 'shares'
_extra_keys = ['name', 'export_location']
_extra_keys = ['name', 'export_location', 'export_locations', 'status',
'host', 'share_server_id', 'share_network_id',
'availability_zone']
@property
def name(self):
@ -181,44 +186,120 @@ class Share(BASE, ManilaBase):
@property
def export_location(self):
if len(self.export_locations) > 0:
return self.export_locations[0]['path']
return self.export_locations[0]
@property
def export_locations(self):
# TODO(u_glide): Return a map with lists of locations per AZ when
# replication functionality will be implemented.
all_export_locations = []
for instance in self.instances:
for export_location in instance.export_locations:
all_export_locations.append(export_location['path'])
return all_export_locations
def __getattr__(self, item):
deprecated_properties = ('host', 'share_server_id', 'share_network_id',
'availability_zone')
proxified_properties = ('status',) + deprecated_properties
if item in deprecated_properties:
msg = ("Property '%s' is deprecated. Please use appropriate "
"property from share instance." % item)
LOG.warning(msg)
if item in proxified_properties:
return getattr(self.instance, item, None)
raise AttributeError
@property
def share_server_id(self):
return self.__getattr__('share_server_id')
@property
def instance(self):
if len(self.instances) > 0:
return self.instances[0]
id = Column(String(36), primary_key=True)
deleted = Column(String(36), default='False')
user_id = Column(String(255))
project_id = Column(String(255))
host = Column(String(255))
size = Column(Integer)
availability_zone = Column(String(255))
status = Column(String(255))
scheduled_at = Column(DateTime)
launched_at = Column(DateTime)
terminated_at = Column(DateTime)
display_name = Column(String(255))
display_description = Column(String(255))
snapshot_id = Column(String(36))
share_proto = Column(String(255))
export_locations = orm.relationship(
"ShareExportLocations",
lazy='immediate',
primaryjoin='and_('
'Share.id == ShareExportLocations.share_id, '
'ShareExportLocations.deleted == 0)')
share_network_id = Column(String(36), ForeignKey('share_networks.id'),
nullable=True)
share_type_id = Column(String(36), ForeignKey('share_types.id'),
nullable=True)
share_server_id = Column(String(36), ForeignKey('share_servers.id'),
nullable=True)
is_public = Column(Boolean, default=False)
instances = orm.relationship(
"ShareInstance",
lazy='immediate',
primaryjoin=(
'and_('
'Share.id == ShareInstance.share_id, '
'ShareInstance.deleted == "False")'
),
viewonly=True,
join_depth=2,
)
class ShareExportLocations(BASE, ManilaBase):
class ShareInstance(BASE, ManilaBase):
__tablename__ = 'share_instances'
_proxified_properties = ('name', 'user_id', 'project_id', 'size',
'display_name', 'display_description',
'snapshot_id', 'share_proto', 'share_type_id',
'is_public')
def set_share_data(self, share):
for share_property in self._proxified_properties:
setattr(self, share_property, share[share_property])
@property
def export_location(self):
if len(self.export_locations) > 0:
return self.export_locations[0]['path']
id = Column(String(36), primary_key=True)
share_id = Column(String(36), ForeignKey('shares.id'))
deleted = Column(String(36), default='False')
host = Column(String(255))
status = Column(String(255))
scheduled_at = Column(DateTime)
launched_at = Column(DateTime)
terminated_at = Column(DateTime)
availability_zone = Column(String(255))
export_locations = orm.relationship(
"ShareInstanceExportLocations",
lazy='immediate',
primaryjoin=(
'and_('
'ShareInstance.id == '
'ShareInstanceExportLocations.share_instance_id, '
'ShareInstanceExportLocations.deleted == 0)'
)
)
share_network_id = Column(String(36), ForeignKey('share_networks.id'),
nullable=True)
share_server_id = Column(String(36), ForeignKey('share_servers.id'),
nullable=True)
class ShareInstanceExportLocations(BASE, ManilaBase):
"""Represents export locations of shares."""
__tablename__ = 'share_export_locations'
__tablename__ = 'share_instance_export_locations'
id = Column(Integer, primary_key=True)
share_id = Column(String(36), ForeignKey('shares.id'), nullable=False)
share_instance_id = Column(
String(36), ForeignKey('share_instances.id'), nullable=False)
path = Column(String(2000))
@ -315,6 +396,7 @@ class ShareAccessMapping(BASE, ManilaBase):
class ShareSnapshot(BASE, ManilaBase):
"""Represents a snapshot of a share."""
__tablename__ = 'share_snapshots'
_extra_keys = ['name', 'share_name', 'status', 'progress']
@property
def name(self):
@ -324,14 +406,27 @@ class ShareSnapshot(BASE, ManilaBase):
def share_name(self):
return CONF.share_name_template % self.share_id
@property
def status(self):
if self.instance:
return self.instance.status
@property
def progress(self):
if self.instance:
return self.instance.progress
@property
def instance(self):
if len(self.instances) > 0:
return self.instances[0]
id = Column(String(36), primary_key=True)
deleted = Column(String(36), default='False')
user_id = Column(String(255))
project_id = Column(String(255))
share_id = Column(String(36))
size = Column(Integer)
status = Column(String(255))
progress = Column(String(255))
display_name = Column(String(255))
display_description = Column(String(255))
share_size = Column(Integer)
@ -342,6 +437,46 @@ class ShareSnapshot(BASE, ManilaBase):
'ShareSnapshot.share_id == Share.id,'
'ShareSnapshot.deleted == "False")')
instances = orm.relationship(
"ShareSnapshotInstance",
lazy='immediate',
primaryjoin=(
'and_('
'ShareSnapshot.id == ShareSnapshotInstance.snapshot_id, '
'ShareSnapshotInstance.deleted == "False")'
),
viewonly=True,
join_depth=2,
)
class ShareSnapshotInstance(BASE, ManilaBase):
"""Represents a snapshot of a share."""
__tablename__ = 'share_snapshot_instances'
@property
def share_id(self):
# NOTE(u_glide): This property required for compatibility
# with share drivers
return self.share_instance_id
id = Column(String(36), primary_key=True)
deleted = Column(String(36), default='False')
snapshot_id = Column(
String(36), ForeignKey('share_snapshots.id'), nullable=False)
share_instance_id = Column(
String(36), ForeignKey('share_instances.id'), nullable=False)
status = Column(String(255))
progress = Column(String(255))
share_instance = orm.relationship(
ShareInstance, backref="snapshot_instances",
foreign_keys=share_instance_id,
primaryjoin=(
'and_('
'ShareSnapshotInstance.share_instance_id == ShareInstance.id,'
'ShareSnapshotInstance.deleted == "False")')
)
class SecurityService(BASE, ManilaBase):
"""Security service information for manila shares."""
@ -389,11 +524,12 @@ class ShareNetwork(BASE, ManilaBase):
'SecurityService.id == '
'ShareNetworkSecurityServiceAssociation.security_service_id,'
'SecurityService.deleted == "False")')
shares = orm.relationship("Share",
backref='share_network',
primaryjoin='and_('
'ShareNetwork.id == Share.share_network_id,'
'Share.deleted == "False")')
share_instances = orm.relationship(
"ShareInstance",
backref='share_network',
primaryjoin='and_('
'ShareNetwork.id == ShareInstance.share_network_id,'
'ShareInstance.deleted == "False")')
share_servers = orm.relationship(
"ShareServer", backref='share_network',
primaryjoin='and_(ShareNetwork.id == ShareServer.share_network_id,'
@ -417,11 +553,12 @@ class ShareServer(BASE, ManilaBase):
primaryjoin='and_('
'ShareServer.id == NetworkAllocation.share_server_id,'
'NetworkAllocation.deleted == "False")')
shares = orm.relationship("Share",
backref='share_server',
primaryjoin='and_('
'ShareServer.id == Share.share_server_id,'
'Share.deleted == "False")')
share_instances = orm.relationship(
"ShareInstance",
backref='share_server',
primaryjoin='and_('
'ShareServer.id == ShareInstance.share_server_id,'
'ShareInstance.deleted == "False")')
_backend_details = orm.relationship(
"ShareServerBackendDetails",

View File

@ -263,7 +263,7 @@ class API(base.Base):
msg = _("Share already exists.")
raise exception.ManilaException(msg)
self.db.share_export_locations_update(context, share['id'],
self.db.share_export_locations_update(context, share.instance['id'],
export_location)
self.share_rpcapi.manage_share(context, share, driver_options)

View File

@ -125,24 +125,26 @@ class ShareManager(manager.SchedulerDependentManager):
configuration=self.configuration
)
def _ensure_share_has_pool(self, ctxt, share):
pool = share_utils.extract_host(share['host'], 'pool')
def _ensure_share_instance_has_pool(self, ctxt, share_instance):
pool = share_utils.extract_host(share_instance['host'], 'pool')
if pool is None:
# No pool name encoded in host, so this is a legacy
# share created before pool is introduced, ask
# driver to provide pool info if it has such
# knowledge and update the DB.
try:
pool = self.driver.get_pool(share)
pool = self.driver.get_pool(share_instance)
except Exception as err:
LOG.error(_LE("Failed to fetch pool name for share: "
"%(share)s. Error: %(error)s."),
{'share': share['id'], 'error': err})
{'share': share_instance['id'], 'error': err})
return
if pool:
new_host = share_utils.append_host(share['host'], pool)
self.db.share_update(ctxt, share['id'], {'host': new_host})
new_host = share_utils.append_host(
share_instance['host'], pool)
self.db.share_update(
ctxt, share_instance['id'], {'host': new_host})
return pool
@ -153,41 +155,46 @@ class ShareManager(manager.SchedulerDependentManager):
self.driver.do_setup(ctxt)
self.driver.check_for_setup_error()
shares = self.db.share_get_all_by_host(ctxt, self.host)
LOG.debug("Re-exporting %s shares", len(shares))
for share in shares:
if share['status'] != constants.STATUS_AVAILABLE:
share_instances = self.db.share_instances_get_all_by_host(ctxt,
self.host)
LOG.debug("Re-exporting %s shares", len(share_instances))
for share_instance in share_instances:
if share_instance['status'] != constants.STATUS_AVAILABLE:
LOG.info(
_LI("Share %(name)s: skipping export, because it has "
"'%(status)s' status."),
{'name': share['name'], 'status': share['status']},
_LI("Share instance %(id)s: skipping export, "
"because it has '%(status)s' status."),
{'id': share_instance['id'],
'status': share_instance['status']},
)
continue
self._ensure_share_has_pool(ctxt, share)
share_server = self._get_share_server(ctxt, share)
self._ensure_share_instance_has_pool(ctxt, share_instance)
share_server = self._get_share_server(ctxt, share_instance)
share_instance = self.db.share_instance_get(
ctxt, share_instance['id'], with_share_data=True)
try:
export_locations = self.driver.ensure_share(
ctxt, share, share_server=share_server)
ctxt, share_instance, share_server=share_server)
except Exception as e:
LOG.error(
_LE("Caught exception trying ensure share '%(s_id)s'. "
"Exception: \n%(e)s."),
{'s_id': share['id'], 'e': six.text_type(e)},
{'s_id': share_instance['id'], 'e': six.text_type(e)},
)
continue
if export_locations:
self.db.share_export_locations_update(
ctxt, share['id'], export_locations)
ctxt, share_instance['id'], export_locations)
rules = self.db.share_access_get_all_for_share(ctxt, share['id'])
rules = self.db.share_access_get_all_for_share(
ctxt, share_instance['id'])
for access_ref in rules:
if access_ref['state'] != access_ref.STATE_ACTIVE:
continue
try:
self.driver.allow_access(ctxt, share, access_ref,
self.driver.allow_access(ctxt, share_instance, access_ref,
share_server=share_server)
except exception.ShareAccessExists:
pass
@ -198,7 +205,7 @@ class ShareManager(manager.SchedulerDependentManager):
", access rule type is '%(ar_type)s', "
"access rule id is '%(ar_id)s', exception"
" is '%(e)s'."),
{'s_id': share['id'],
{'s_id': share_instance['id'],
'ar_type': access_ref['access_type'],
'ar_id': access_ref['id'],
'e': six.text_type(e)},
@ -207,7 +214,7 @@ class ShareManager(manager.SchedulerDependentManager):
self.publish_service_capabilities(ctxt)
def _provide_share_server_for_share(self, context, share_network_id,
share, snapshot=None):
share_instance, snapshot=None):
"""Gets or creates share_server and updates share with its id.
Active share_server can be deleted if there are no dependent shares
@ -224,7 +231,7 @@ class ShareManager(manager.SchedulerDependentManager):
should be found or created. If
share_network_id is None method use
share_network_id from provided snapshot.
:param share: Share model
:param share_instance: Share Instance model
:param snapshot: Optional -- Snapshot model
:returns: dict, dict -- first value is share_server, that
@ -240,8 +247,9 @@ class ShareManager(manager.SchedulerDependentManager):
def error(msg, *args):
LOG.error(msg, *args)
self.db.share_update(context, share['id'],
{'status': constants.STATUS_ERROR})
self.db.share_instance_update(context, share_instance['id'],
{'status': constants.STATUS_ERROR})
if snapshot:
parent_share_server_id = snapshot['share']['share_server_id']
@ -289,8 +297,8 @@ class ShareManager(manager.SchedulerDependentManager):
try:
compatible_share_server = (
self.driver.choose_share_server_compatible_with_share(
context, available_share_servers, share,
snapshot=snapshot
context, available_share_servers, share_instance,
snapshot=snapshot.instance if snapshot else None
)
)
except Exception as e:
@ -308,16 +316,18 @@ class ShareManager(manager.SchedulerDependentManager):
}
)
msg = "Using share_server %(share_server)s for share %(share_id)s"
msg = ("Using share_server %(share_server)s for share instance"
" %(share_instance_id)s")
LOG.debug(msg, {
'share_server': compatible_share_server['id'],
'share_id': share['id']
'share_instance_id': share_instance['id']
})
share_ref = self.db.share_update(
share_instance_ref = self.db.share_instance_update(
context,
share['id'],
share_instance['id'],
{'share_server_id': compatible_share_server['id']},
with_share_data=True
)
if compatible_share_server['status'] == constants.STATUS_CREATING:
@ -329,7 +339,7 @@ class ShareManager(manager.SchedulerDependentManager):
LOG.info(_LI("Used preexisting share server "
"'%(share_server_id)s'"),
{'share_server_id': compatible_share_server['id']})
return compatible_share_server, share_ref
return compatible_share_server, share_instance_ref
return _provide_share_server_for_share()
@ -340,6 +350,10 @@ class ShareManager(manager.SchedulerDependentManager):
else:
return None
def _get_share_instance(self, context, share):
return self.db.share_instance_get(
context, share.instance['id'], with_share_data=True)
def create_share(self, context, share_id, request_spec=None,
filter_properties=None, snapshot_id=None):
"""Creates a share."""
@ -348,7 +362,8 @@ class ShareManager(manager.SchedulerDependentManager):
filter_properties = {}
share_ref = self.db.share_get(context, share_id)
share_network_id = share_ref.get('share_network_id', None)
share_instance = self._get_share_instance(context, share_ref)
share_network_id = share_instance.get('share_network_id', None)
if share_network_id and not self.driver.driver_handles_share_servers:
self.db.share_update(
@ -366,9 +381,11 @@ class ShareManager(manager.SchedulerDependentManager):
if share_network_id or parent_share_server_id:
try:
share_server, share_ref = self._provide_share_server_for_share(
context, share_network_id, share_ref,
snapshot=snapshot_ref
share_server, share_instance = (
self._provide_share_server_for_share(
context, share_network_id, share_instance,
snapshot=snapshot_ref
)
)
except Exception:
with excutils.save_and_reraise_exception():
@ -382,14 +399,14 @@ class ShareManager(manager.SchedulerDependentManager):
try:
if snapshot_ref:
export_locations = self.driver.create_share_from_snapshot(
context, share_ref, snapshot_ref,
context, share_instance, snapshot_ref.instance,
share_server=share_server)
else:
export_locations = self.driver.create_share(
context, share_ref, share_server=share_server)
context, share_instance, share_server=share_server)
self.db.share_export_locations_update(context, share_id,
export_locations)
self.db.share_export_locations_update(
context, share_instance['id'], export_locations)
except Exception as e:
with excutils.save_and_reraise_exception():
@ -406,7 +423,7 @@ class ShareManager(manager.SchedulerDependentManager):
if export_locations:
self.db.share_export_locations_update(
context, share_id, export_locations)
context, share_instance['id'], export_locations)
else:
LOG.warning(_LW('Share information in exception '
'can not be written to db because it '
@ -423,6 +440,7 @@ class ShareManager(manager.SchedulerDependentManager):
def manage_share(self, context, share_id, driver_options):
context = context.elevated()
share_ref = self.db.share_get(context, share_id)
share_instance = self._get_share_instance(context, share_ref)
project_id = share_ref['project_id']
try:
@ -432,7 +450,9 @@ class ShareManager(manager.SchedulerDependentManager):
raise exception.InvalidShare(reason=msg)
share_update = (
self.driver.manage_existing(share_ref, driver_options) or {})
self.driver.manage_existing(share_instance, driver_options)
or {}
)
if not share_update.get('size'):
msg = _("Driver cannot calculate share size.")
@ -453,7 +473,8 @@ class ShareManager(manager.SchedulerDependentManager):
# provided by user.
if 'export_locations' in share_update:
self.db.share_export_locations_update(
context, share_id, share_update.pop('export_locations'),
context, share_instance['id'],
share_update.pop('export_locations'),
delete=True)
self.db.share_update(context, share_id, share_update)
@ -482,6 +503,7 @@ class ShareManager(manager.SchedulerDependentManager):
def unmanage_share(self, context, share_id):
context = context.elevated()
share_ref = self.db.share_get(context, share_id)
share_instance = self._get_share_instance(context, share_ref)
share_server = self._get_share_server(context, share_ref)
project_id = share_ref['project_id']
@ -501,7 +523,7 @@ class ShareManager(manager.SchedulerDependentManager):
"shares with share servers.")
raise exception.InvalidShare(reason=msg)
self.driver.unmanage(share_ref)
self.driver.unmanage(share_instance)
except exception.InvalidShare as e:
share_manage_set_error_status(
@ -539,6 +561,7 @@ class ShareManager(manager.SchedulerDependentManager):
"""Delete a share."""
context = context.elevated()
share_ref = self.db.share_get(context, share_id)
share_instance = self._get_share_instance(context, share_ref)
share_server = self._get_share_server(context, share_ref)
if context.project_id != share_ref['project_id']:
@ -548,7 +571,7 @@ class ShareManager(manager.SchedulerDependentManager):
try:
self._remove_share_access_rules(context, share_ref, share_server)
self.driver.delete_share(context, share_ref,
self.driver.delete_share(context, share_instance,
share_server=share_server)
except Exception:
with excutils.save_and_reraise_exception():
@ -573,7 +596,7 @@ class ShareManager(manager.SchedulerDependentManager):
if CONF.delete_share_server_with_last_share:
share_server = self._get_share_server(context, share_ref)
if share_server and not share_server.shares:
if share_server and not share_server.share_instances:
LOG.debug("Scheduled deletion of share-server "
"with id '%s' automatically by "
"deletion of last share.", share_server['id'])
@ -605,26 +628,33 @@ class ShareManager(manager.SchedulerDependentManager):
snapshot_ref = self.db.share_snapshot_get(context, snapshot_id)
share_server = self._get_share_server(context,
snapshot_ref['share'])
snapshot_instance = self.db.share_snapshot_instance_get(
context, snapshot_ref.instance['id'], with_share_data=True
)
snapshot_instance_id = snapshot_instance['id']
try:
model_update = self.driver.create_snapshot(
context, snapshot_ref, share_server=share_server)
context, snapshot_instance, share_server=share_server)
if model_update:
model_dict = model_update.to_dict()
self.db.share_snapshot_update(context, snapshot_ref['id'],
model_dict)
self.db.share_snapshot_instance_update(
context, snapshot_instance_id, model_dict)
except Exception:
with excutils.save_and_reraise_exception():
self.db.share_snapshot_update(
self.db.share_snapshot_instance_update(
context,
snapshot_ref['id'],
snapshot_instance_id,
{'status': constants.STATUS_ERROR})
self.db.share_snapshot_update(context,
snapshot_ref['id'],
{'status': constants.STATUS_AVAILABLE,
'progress': '100%'})
self.db.share_snapshot_instance_update(
context,
snapshot_instance_id,
{'status': constants.STATUS_AVAILABLE,
'progress': '100%'}
)
return snapshot_id
def delete_snapshot(self, context, snapshot_id):
@ -634,6 +664,10 @@ class ShareManager(manager.SchedulerDependentManager):
share_server = self._get_share_server(context,
snapshot_ref['share'])
snapshot_instance = self.db.share_snapshot_instance_get(
context, snapshot_ref.instance['id'], with_share_data=True
)
snapshot_instance_id = snapshot_instance['id']
if context.project_id != snapshot_ref['project_id']:
project_id = snapshot_ref['project_id']
@ -641,18 +675,18 @@ class ShareManager(manager.SchedulerDependentManager):
project_id = context.project_id
try:
self.driver.delete_snapshot(context, snapshot_ref,
self.driver.delete_snapshot(context, snapshot_instance,
share_server=share_server)
except exception.ShareSnapshotIsBusy:
self.db.share_snapshot_update(
self.db.share_snapshot_instance_update(
context,
snapshot_ref['id'],
snapshot_instance_id,
{'status': constants.STATUS_AVAILABLE})
except Exception:
with excutils.save_and_reraise_exception():
self.db.share_snapshot_update(
self.db.share_snapshot_instance_update(
context,
snapshot_ref['id'],
snapshot_instance_id,
{'status': constants.STATUS_ERROR_DELETING})
else:
self.db.share_snapshot_destroy(context, snapshot_id)
@ -675,7 +709,9 @@ class ShareManager(manager.SchedulerDependentManager):
share_server = self._get_share_server(context,
share_ref)
if access_ref['state'] == access_ref.STATE_NEW:
self.driver.allow_access(context, share_ref, access_ref,
share_instance = self._get_share_instance(context, share_ref)
self.driver.allow_access(context, share_instance,
access_ref,
share_server=share_server)
self.db.share_access_update(
context, access_id, {'state': access_ref.STATE_ACTIVE})
@ -694,8 +730,9 @@ class ShareManager(manager.SchedulerDependentManager):
def _deny_access(self, context, access_ref, share_ref, share_server):
access_id = access_ref['id']
share_instance = self._get_share_instance(context, share_ref)
try:
self.driver.deny_access(context, share_ref, access_ref,
self.driver.deny_access(context, share_instance, access_ref,
share_server=share_server)
except Exception:
with excutils.save_and_reraise_exception():
@ -929,12 +966,13 @@ class ShareManager(manager.SchedulerDependentManager):
def extend_share(self, context, share_id, new_size, reservations):
context = context.elevated()
share = self.db.share_get(context, share_id)
share_instance = self._get_share_instance(context, share)
share_server = self._get_share_server(context, share)
project_id = share['project_id']
try:
self.driver.extend_share(
share, new_size, share_server=share_server)
share_instance, new_size, share_server=share_server)
except Exception as e:
LOG.exception(_LE("Extend share failed."), resource=share)
@ -963,6 +1001,7 @@ class ShareManager(manager.SchedulerDependentManager):
def shrink_share(self, context, share_id, new_size):
context = context.elevated()
share = self.db.share_get(context, share_id)
share_instance = self._get_share_instance(context, share)
share_server = self._get_share_server(context, share)
project_id = share['project_id']
new_size = int(new_size)
@ -987,7 +1026,7 @@ class ShareManager(manager.SchedulerDependentManager):
try:
self.driver.shrink_share(
share, new_size, share_server=share_server)
share_instance, new_size, share_server=share_server)
# NOTE(u_glide): Replace following except block by error notification
# when Manila has such mechanism. It's possible because drivers
# shouldn't shrink share when this validation error occurs.

View File

@ -26,8 +26,7 @@ def stub_share(id, **kwargs):
'id': id,
'share_proto': 'FAKEPROTO',
'export_location': 'fake_location',
'export_locations': [{'path': 'fake_location'},
{'path': 'fake_location2'}],
'export_locations': ['fake_location', 'fake_location2'],
'user_id': 'fakeuser',
'project_id': 'fakeproject',
'host': 'fakehost',

View File

@ -206,7 +206,7 @@ class ShareNetworkAPITest(test.TestCase):
share_nw['share_servers'] = ['foo', 'bar']
self.mock_object(db_api, 'share_network_get',
mock.Mock(return_value=share_nw))
self.mock_object(db_api, 'share_get_all_by_share_network',
self.mock_object(db_api, 'share_instances_get_all_by_share_network',
mock.Mock(return_value=[]))
self.mock_object(self.controller.share_rpcapi, 'delete_share_server')
self.mock_object(db_api, 'share_network_delete')
@ -215,8 +215,9 @@ class ShareNetworkAPITest(test.TestCase):
db_api.share_network_get.assert_called_once_with(
self.req.environ['manila.context'], share_nw['id'])
db_api.share_get_all_by_share_network.assert_called_once_with(
self.req.environ['manila.context'], share_nw['id'])
db_api.share_instances_get_all_by_share_network.\
assert_called_once_with(self.req.environ['manila.context'],
share_nw['id'])
self.controller.share_rpcapi.delete_share_server.assert_has_calls([
mock.call(self.req.environ['manila.context'], 'foo'),
mock.call(self.req.environ['manila.context'], 'bar')])
@ -239,7 +240,7 @@ class ShareNetworkAPITest(test.TestCase):
share_nw['share_servers'] = ['foo', 'bar']
self.mock_object(db_api, 'share_network_get',
mock.Mock(return_value=share_nw))
self.mock_object(db_api, 'share_get_all_by_share_network',
self.mock_object(db_api, 'share_instances_get_all_by_share_network',
mock.Mock(return_value=[]))
self.mock_object(self.controller.share_rpcapi, 'delete_share_server')
self.mock_object(db_api, 'share_network_delete')
@ -251,8 +252,11 @@ class ShareNetworkAPITest(test.TestCase):
db_api.share_network_get.assert_called_once_with(
self.req.environ['manila.context'], share_nw['id'])
db_api.share_get_all_by_share_network.assert_called_once_with(
self.req.environ['manila.context'], share_nw['id'])
db_api.share_instances_get_all_by_share_network.\
assert_called_once_with(self.req.environ['manila.context'],
share_nw['id'])
self.controller.share_rpcapi.delete_share_server.assert_has_calls([
mock.call(self.req.environ['manila.context'], 'foo'),
mock.call(self.req.environ['manila.context'], 'bar')])
@ -265,7 +269,7 @@ class ShareNetworkAPITest(test.TestCase):
share_nw = fake_share_network.copy()
self.mock_object(db_api, 'share_network_get',
mock.Mock(return_value=share_nw))
self.mock_object(db_api, 'share_get_all_by_share_network',
self.mock_object(db_api, 'share_instances_get_all_by_share_network',
mock.Mock(return_value=['foo', 'bar']))
self.assertRaises(webob_exc.HTTPConflict,
@ -275,8 +279,9 @@ class ShareNetworkAPITest(test.TestCase):
db_api.share_network_get.assert_called_once_with(
self.req.environ['manila.context'], share_nw['id'])
db_api.share_get_all_by_share_network.assert_called_once_with(
self.req.environ['manila.context'], share_nw['id'])
db_api.share_instances_get_all_by_share_network.\
assert_called_once_with(self.req.environ['manila.context'],
share_nw['id'])
def test_show_nominal(self):
share_nw = 'fake network id'

View File

@ -82,6 +82,7 @@ class GenericDatabaseAPITestCase(test.TestCase):
self.ctxt, share_access.id)
@ddt.ddt
class ShareDatabaseAPITestCase(test.TestCase):
def setUp(self):
@ -90,29 +91,84 @@ class ShareDatabaseAPITestCase(test.TestCase):
self.ctxt = context.get_admin_context()
def test_share_filter_by_host_with_pools(self):
shares = [[db_api.share_create(self.ctxt, {'host': value})
for value in ('foo', 'foo#pool0')]]
share_instances = [[
db_api.share_create(self.ctxt, {'host': value}).instance
for value in ('foo', 'foo#pool0')]]
db_utils.create_share()
self._assertEqualListsOfObjects(shares[0],
db_api.share_get_all_by_host(
self._assertEqualListsOfObjects(share_instances[0],
db_api.share_instances_get_all_by_host(
self.ctxt, 'foo'),
ignored_keys=['share_type',
'share_type_id',
'export_locations'])
def test_share_filter_all_by_host_with_pools_multiple_hosts(self):
shares = [[db_api.share_create(self.ctxt, {'host': value})
for value in ('foo', 'foo#pool0', 'foo', 'foo#pool1')]]
share_instances = [[
db_api.share_create(self.ctxt, {'host': value}).instance
for value in ('foo', 'foo#pool0', 'foo', 'foo#pool1')]]
db_utils.create_share()
self._assertEqualListsOfObjects(shares[0],
db_api.share_get_all_by_host(
self._assertEqualListsOfObjects(share_instances[0],
db_api.share_instances_get_all_by_host(
self.ctxt, 'foo'),
ignored_keys=['share_type',
'share_type_id',
'export_locations'])
def test_share_filter_all_by_share_server(self):
share_network = db_utils.create_share_network()
share_server = db_utils.create_share_server(
share_network_id=share_network['id'])
share = db_utils.create_share(share_server_id=share_server['id'],
share_network_id=share_network['id'])
actual_result = db_api.share_get_all_by_share_server(
self.ctxt, share_server['id'])
self.assertEqual(1, len(actual_result))
self.assertEqual(share['id'], actual_result[0].id)
@ddt.data('host', 'availability_zone')
def test_share_get_all_sort_by_share_instance_fields(self, sort_key):
shares = [db_utils.create_share(**{sort_key: n, 'size': 1})
for n in ('test1', 'test2')]
actual_result = db_api.share_get_all(
self.ctxt, sort_key=sort_key, sort_dir='desc')
self.assertEqual(2, len(actual_result))
self.assertEqual(shares[0]['id'], actual_result[1]['id'])
class ShareSnapshotDatabaseAPITestCase(test.TestCase):
def setUp(self):
"""Run before each test."""
super(ShareSnapshotDatabaseAPITestCase, self).setUp()
self.ctxt = context.get_admin_context()
def test_create(self):
share = db_utils.create_share(size=1)
values = {
'share_id': share['id'],
'size': share['size'],
'user_id': share['user_id'],
'project_id': share['project_id'],
'status': constants.STATUS_CREATING,
'progress': '0%',
'share_size': share['size'],
'display_name': 'fake',
'display_description': 'fake',
'share_proto': share['share_proto']
}
actual_result = db_api.share_snapshot_create(
self.ctxt, values, create_snapshot_instance=True)
self.assertEqual(1, len(actual_result.instances))
self.assertSubDictMatch(values, actual_result.to_dict())
class ShareExportLocationsDatabaseAPITestCase(test.TestCase):
@ -127,10 +183,10 @@ class ShareExportLocationsDatabaseAPITestCase(test.TestCase):
update_locations = ['fake4/4', 'fake2/2', 'fake3/3']
# add initial locations
db_api.share_export_locations_update(self.ctxt, share['id'],
db_api.share_export_locations_update(self.ctxt, share.instance['id'],
initial_locations, False)
# update locations
db_api.share_export_locations_update(self.ctxt, share['id'],
db_api.share_export_locations_update(self.ctxt, share.instance['id'],
update_locations, True)
actual_result = db_api.share_export_locations_get(self.ctxt,
share['id'])
@ -142,7 +198,7 @@ class ShareExportLocationsDatabaseAPITestCase(test.TestCase):
share = db_utils.create_share()
initial_location = 'fake1/1/'
db_api.share_export_locations_update(self.ctxt, share['id'],
db_api.share_export_locations_update(self.ctxt, share.instance['id'],
initial_location, False)
actual_result = db_api.share_export_locations_get(self.ctxt,
share['id'])
@ -286,7 +342,7 @@ class ShareNetworkDatabaseAPITestCase(BaseDatabaseAPITestCase):
self.share_nw_dict)
self._check_fields(expected=self.share_nw_dict, actual=result)
self.assertEqual(len(result['shares']), 0)
self.assertEqual(len(result['share_instances']), 0)
self.assertEqual(len(result['security_services']), 0)
def test_create_two_networks_in_different_tenants(self):
@ -325,23 +381,29 @@ class ShareNetworkDatabaseAPITestCase(BaseDatabaseAPITestCase):
self.share_nw_dict['id'])
self._check_fields(expected=self.share_nw_dict, actual=result)
self.assertEqual(len(result['shares']), 0)
self.assertEqual(len(result['share_instances']), 0)
self.assertEqual(len(result['security_services']), 0)
@ddt.data([{'id': 'fake share id1'}],
[{'id': 'fake share id1'}, {'id': 'fake share id2'}],)
def test_get_with_shares(self, shares):
db_api.share_network_create(self.fake_context, self.share_nw_dict)
share_instances = []
for share in shares:
share.update({'share_network_id': self.share_nw_dict['id']})
db_api.share_create(self.fake_context, share)
share_instances.append(
db_api.share_create(self.fake_context, share).instance
)
result = db_api.share_network_get(self.fake_context,
self.share_nw_dict['id'])
self.assertEqual(len(result['shares']), len(shares))
for index, share in enumerate(shares):
self._check_fields(expected=share, actual=result['shares'][index])
self.assertEqual(len(result['share_instances']), len(shares))
for index, share_instance in enumerate(share_instances):
self.assertEqual(
share_instance['share_network_id'],
result['share_instances'][index]['share_network_id']
)
@ddt.data([{'id': 'fake security service id1', 'type': 'fake type'}],
[{'id': 'fake security service id1', 'type': 'fake type'},
@ -587,7 +649,7 @@ class ShareNetworkDatabaseAPITestCase(BaseDatabaseAPITestCase):
result = db_api.share_network_get(self.fake_context,
self.share_nw_dict['id'])
self.assertEqual(len(result['shares']), 0)
self.assertEqual(len(result['share_instances']), 0)
@ddt.ddt

View File

@ -545,10 +545,12 @@ class ShareAPITestCase(test.TestCase):
driver_options = {}
date = datetime.datetime(1, 1, 1, 1, 1, 1)
timeutils.utcnow.return_value = date
share = fake_share('fakeid',
user_id=self.context.user_id,
project_id=self.context.project_id,
status=constants.STATUS_CREATING)
fake_share_data = {
'id': 'fakeid',
'status': constants.STATUS_CREATING,
}
share = db_driver.share_create(self.context, fake_share_data)
self.mock_object(db_driver, 'share_create',
mock.Mock(return_value=share))
self.mock_object(db_driver, 'share_export_locations_update')
@ -573,7 +575,7 @@ class ShareAPITestCase(test.TestCase):
share_data)
db_driver.share_get.assert_called_once_with(self.context, share['id'])
db_driver.share_export_locations_update.assert_called_once_with(
self.context, share['id'], export_location
self.context, share.instance['id'], export_location
)
self.share_rpcapi.manage_share.assert_called_once_with(
self.context, share, driver_options)
@ -586,7 +588,8 @@ class ShareAPITestCase(test.TestCase):
'share_proto': 'fake',
}
driver_options = {}
share = fake_share('fakeid')
fake_share_data = {'id': 'fakeid'}
share = db_driver.share_create(self.context, fake_share_data)
self.mock_object(db_driver, 'share_update',
mock.Mock(return_value=share))
self.mock_object(db_driver, 'share_get',
@ -604,7 +607,7 @@ class ShareAPITestCase(test.TestCase):
self.share_rpcapi.manage_share.assert_called_once_with(
self.context, mock.ANY, driver_options)
db_driver.share_export_locations_update.assert_called_once_with(
self.context, share['id'], mock.ANY
self.context, share.instance['id'], mock.ANY
)
def test_manage_duplicate(self):

View File

@ -84,13 +84,15 @@ class ShareManagerTestCase(test.TestCase):
self.assertTrue(import_mock.called)
def test_init_host_with_no_shares(self):
self.mock_object(self.share_manager.db, 'share_get_all_by_host',
self.mock_object(self.share_manager.db,
'share_instances_get_all_by_host',
mock.Mock(return_value=[]))
self.share_manager.init_host()
self.share_manager.db.share_get_all_by_host.assert_called_once_with(
utils.IsAMatcher(context.RequestContext), self.share_manager.host)
self.share_manager.db.share_instances_get_all_by_host.\
assert_called_once_with(utils.IsAMatcher(context.RequestContext),
self.share_manager.host)
self.share_manager.driver.do_setup.assert_called_once_with(
utils.IsAMatcher(context.RequestContext))
self.share_manager.driver.check_for_setup_error.\
@ -117,13 +119,15 @@ class ShareManagerTestCase(test.TestCase):
fake_export_locations = ['fake/path/1', 'fake/path']
share_server = 'fake_share_server_type_does_not_matter'
self.mock_object(self.share_manager.db,
'share_get_all_by_host',
'share_instances_get_all_by_host',
mock.Mock(return_value=shares))
self.mock_object(self.share_manager.db, 'share_instance_get',
mock.Mock(side_effect=shares))
self.mock_object(self.share_manager.db,
'share_export_locations_update')
self.mock_object(self.share_manager.driver, 'ensure_share',
mock.Mock(return_value=fake_export_locations))
self.mock_object(self.share_manager, '_ensure_share_has_pool')
self.mock_object(self.share_manager, '_ensure_share_instance_has_pool')
self.mock_object(self.share_manager, '_get_share_server',
mock.Mock(return_value=share_server))
self.mock_object(self.share_manager, 'publish_service_capabilities',
@ -138,8 +142,9 @@ class ShareManagerTestCase(test.TestCase):
self.share_manager.init_host()
# verification of call
self.share_manager.db.share_get_all_by_host.assert_called_once_with(
utils.IsAMatcher(context.RequestContext), self.share_manager.host)
self.share_manager.db.share_instances_get_all_by_host.\
assert_called_once_with(utils.IsAMatcher(context.RequestContext),
self.share_manager.host)
exports_update = self.share_manager.db.share_export_locations_update
exports_update.assert_called_once_with(
mock.ANY, 'fake_id_1', fake_export_locations)
@ -147,7 +152,7 @@ class ShareManagerTestCase(test.TestCase):
utils.IsAMatcher(context.RequestContext))
self.share_manager.driver.check_for_setup_error.\
assert_called_once_with()
self.share_manager._ensure_share_has_pool.\
self.share_manager._ensure_share_instance_has_pool.\
assert_called_once_with(utils.IsAMatcher(context.RequestContext),
shares[0])
self.share_manager._get_share_server.assert_called_once_with(
@ -182,11 +187,13 @@ class ShareManagerTestCase(test.TestCase):
]
share_server = 'fake_share_server_type_does_not_matter'
self.mock_object(self.share_manager.db,
'share_get_all_by_host',
'share_instances_get_all_by_host',
mock.Mock(return_value=shares))
self.mock_object(self.share_manager.db, 'share_instance_get',
mock.Mock(side_effect=[shares[0], shares[2]]))
self.mock_object(self.share_manager.driver, 'ensure_share',
mock.Mock(side_effect=raise_exception))
self.mock_object(self.share_manager, '_ensure_share_has_pool')
self.mock_object(self.share_manager, '_ensure_share_instance_has_pool')
self.mock_object(self.share_manager, '_get_share_server',
mock.Mock(return_value=share_server))
self.mock_object(self.share_manager, 'publish_service_capabilities')
@ -197,12 +204,13 @@ class ShareManagerTestCase(test.TestCase):
self.share_manager.init_host()
# verification of call
self.share_manager.db.share_get_all_by_host.assert_called_once_with(
utils.IsAMatcher(context.RequestContext), self.share_manager.host)
self.share_manager.db.share_instances_get_all_by_host.\
assert_called_once_with(utils.IsAMatcher(context.RequestContext),
self.share_manager.host)
self.share_manager.driver.do_setup.assert_called_once_with(
utils.IsAMatcher(context.RequestContext))
self.share_manager.driver.check_for_setup_error.assert_called_with()
self.share_manager._ensure_share_has_pool.assert_has_calls([
self.share_manager._ensure_share_instance_has_pool.assert_has_calls([
mock.call(utils.IsAMatcher(context.RequestContext), shares[0]),
mock.call(utils.IsAMatcher(context.RequestContext), shares[2]),
])
@ -221,7 +229,7 @@ class ShareManagerTestCase(test.TestCase):
utils.IsAMatcher(context.RequestContext))
manager.LOG.info.assert_called_once_with(
mock.ANY,
{'name': shares[1]['name'], 'status': shares[1]['status']},
{'id': shares[1]['id'], 'status': shares[1]['status']},
)
def test_init_host_with_exception_on_rule_access_allow(self):
@ -245,11 +253,13 @@ class ShareManagerTestCase(test.TestCase):
]
share_server = 'fake_share_server_type_does_not_matter'
self.mock_object(self.share_manager.db,
'share_get_all_by_host',
'share_instances_get_all_by_host',
mock.Mock(return_value=shares))
self.mock_object(self.share_manager.db, 'share_instance_get',
mock.Mock(side_effect=[shares[0], shares[2]]))
self.mock_object(self.share_manager.driver, 'ensure_share',
mock.Mock(return_value=None))
self.mock_object(self.share_manager, '_ensure_share_has_pool')
self.mock_object(self.share_manager, '_ensure_share_instance_has_pool')
self.mock_object(self.share_manager, '_get_share_server',
mock.Mock(return_value=share_server))
self.mock_object(self.share_manager, 'publish_service_capabilities')
@ -265,12 +275,13 @@ class ShareManagerTestCase(test.TestCase):
self.share_manager.init_host()
# verification of call
self.share_manager.db.share_get_all_by_host.assert_called_once_with(
utils.IsAMatcher(context.RequestContext), self.share_manager.host)
self.share_manager.db.share_instances_get_all_by_host.\
assert_called_once_with(utils.IsAMatcher(context.RequestContext),
self.share_manager.host)
self.share_manager.driver.do_setup.assert_called_once_with(
utils.IsAMatcher(context.RequestContext))
self.share_manager.driver.check_for_setup_error.assert_called_with()
self.share_manager._ensure_share_has_pool.assert_has_calls([
self.share_manager._ensure_share_instance_has_pool.assert_has_calls([
mock.call(utils.IsAMatcher(context.RequestContext), shares[0]),
mock.call(utils.IsAMatcher(context.RequestContext), shares[2]),
])
@ -289,7 +300,7 @@ class ShareManagerTestCase(test.TestCase):
utils.IsAMatcher(context.RequestContext))
manager.LOG.info.assert_called_once_with(
mock.ANY,
{'name': shares[1]['name'], 'status': shares[1]['status']},
{'id': shares[1]['id'], 'status': shares[1]['status']},
)
self.share_manager.driver.allow_access.assert_has_calls([
mock.call(utils.IsAMatcher(context.RequestContext), shares[0],
@ -363,7 +374,7 @@ class ShareManagerTestCase(test.TestCase):
def test_create_delete_share_snapshot(self):
"""Test share's snapshot can be created and deleted."""
def _fake_create_snapshot(self, *args, **kwargs):
def _fake_create_snapshot(self, snapshot, **kwargs):
snapshot['progress'] = '99%'
return snapshot
@ -421,11 +432,11 @@ class ShareManagerTestCase(test.TestCase):
constants.STATUS_ERROR_DELETING,
db.share_snapshot_get(self.context, snapshot_id).status)
self.share_manager.driver.create_snapshot.assert_called_once_with(
self.context, utils.IsAMatcher(models.ShareSnapshot),
self.context, utils.IsAMatcher(models.ShareSnapshotInstance),
share_server=None)
self.share_manager.driver.delete_snapshot.assert_called_once_with(
utils.IsAMatcher(context.RequestContext),
utils.IsAMatcher(models.ShareSnapshot),
utils.IsAMatcher(models.ShareSnapshotInstance),
share_server=None)
def test_delete_share_if_busy(self):
@ -446,7 +457,7 @@ class ShareManagerTestCase(test.TestCase):
self.assertEqual(snap['status'], constants.STATUS_AVAILABLE)
self.share_manager.driver.delete_snapshot.assert_called_once_with(
utils.IsAMatcher(context.RequestContext),
utils.IsAMatcher(models.ShareSnapshot),
utils.IsAMatcher(models.ShareSnapshotInstance),
share_server=None)
def test_create_share_with_share_network_driver_not_handles_servers(self):
@ -493,7 +504,8 @@ class ShareManagerTestCase(test.TestCase):
share_id).id)
def test_create_share_with_share_network_server_creation_failed(self):
fake_share = {'id': 'fake_share_id', 'share_network_id': 'fake_sn_id'}
fake_share = db_utils.create_share(share_network_id='fake_sn_id',
size=1)
fake_server = {
'id': 'fake_srv_id',
'status': constants.STATUS_CREATING,
@ -533,11 +545,6 @@ class ShareManagerTestCase(test.TestCase):
db.share_server_create.assert_called_once_with(
utils.IsAMatcher(context.RequestContext), mock.ANY)
db.share_update.assert_has_calls([
mock.call(
utils.IsAMatcher(context.RequestContext),
fake_share['id'],
{'share_server_id': fake_server['id']},
),
mock.call(
utils.IsAMatcher(context.RequestContext),
fake_share['id'],
@ -664,11 +671,11 @@ class ShareManagerTestCase(test.TestCase):
self.assertEqual(shr['status'], constants.STATUS_ERROR_DELETING)
self.share_manager.driver.create_share.assert_called_once_with(
utils.IsAMatcher(context.RequestContext),
utils.IsAMatcher(models.Share),
utils.IsAMatcher(models.ShareInstance),
share_server=None)
self.share_manager.driver.delete_share.assert_called_once_with(
utils.IsAMatcher(context.RequestContext),
utils.IsAMatcher(models.Share),
utils.IsAMatcher(models.ShareInstance),
share_server=None)
def test_provide_share_server_for_share_incompatible_servers(self):
@ -687,13 +694,13 @@ class ShareManagerTestCase(test.TestCase):
self.assertRaises(exception.ManilaException,
self.share_manager._provide_share_server_for_share,
self.context, "fake_id", share)
self.context, "fake_id", share.instance)
driver_mock = self.share_manager.driver
driver_method_mock = (
driver_mock.choose_share_server_compatible_with_share
)
driver_method_mock.assert_called_once_with(
self.context, [fake_share_server], share, snapshot=None)
self.context, [fake_share_server], share.instance, snapshot=None)
def test_provide_share_server_for_share_invalid_arguments(self):
self.assertRaises(ValueError,
@ -710,7 +717,7 @@ class ShareManagerTestCase(test.TestCase):
self.assertRaises(exception.ShareServerNotFound,
self.share_manager._provide_share_server_for_share,
self.context, "fake_id", share,
self.context, "fake_id", share.instance,
snapshot=fake_snapshot)
db.share_server_get.assert_called_once_with(
@ -726,7 +733,7 @@ class ShareManagerTestCase(test.TestCase):
self.assertRaises(exception.InvalidShareServer,
self.share_manager._provide_share_server_for_share,
self.context, "fake_id", share,
self.context, "fake_id", share.instance,
snapshot=fake_snapshot)
db.share_server_get.assert_called_once_with(
@ -851,7 +858,7 @@ class ShareManagerTestCase(test.TestCase):
self.share_manager.db.share_export_locations_update.\
assert_called_once_with(
utils.IsAMatcher(context.RequestContext),
share_id, export_locations, delete=True)
share.instance['id'], export_locations, delete=True)
else:
self.assertFalse(
self.share_manager.db.share_export_locations_update.called)
@ -1396,18 +1403,16 @@ class ShareManagerTestCase(test.TestCase):
def test_ensure_share_has_pool_with_only_host(self):
fake_share = {
'status': constants.STATUS_AVAILABLE, 'host': 'host1', 'id': 1}
host = self.share_manager._ensure_share_has_pool(context.
get_admin_context(),
fake_share)
host = self.share_manager._ensure_share_instance_has_pool(
context.get_admin_context(), fake_share)
self.assertIsNone(host)
def test_ensure_share_has_pool_with_full_pool_name(self):
fake_share = {'host': 'host1#pool0', 'id': 1,
'status': constants.STATUS_AVAILABLE}
fake_share_expected_value = 'pool0'
host = self.share_manager._ensure_share_has_pool(context.
get_admin_context(),
fake_share)
host = self.share_manager._ensure_share_instance_has_pool(
context.get_admin_context(), fake_share)
self.assertEqual(fake_share_expected_value, host)
def test_ensure_share_has_pool_unable_to_fetch_share(self):
@ -1416,9 +1421,8 @@ class ShareManagerTestCase(test.TestCase):
with mock.patch.object(self.share_manager.driver, 'get_pool',
side_effect=Exception):
with mock.patch.object(manager, 'LOG') as mock_LOG:
self.share_manager._ensure_share_has_pool(context.
get_admin_context(),
fake_share)
self.share_manager._ensure_share_instance_has_pool(
context.get_admin_context(), fake_share)
self.assertEqual(1, mock_LOG.error.call_count)
def test__form_server_setup_info(self):
@ -1590,7 +1594,8 @@ class ShareManagerTestCase(test.TestCase):
self.assertTrue(manager._get_share_server.called)
manager.driver.extend_share.assert_called_once_with(
share, new_size, share_server=fake_share_server
utils.IsAMatcher(models.ShareInstance),
new_size, share_server=fake_share_server
)
quota.QUOTAS.commit.assert_called_once_with(
mock.ANY, reservations, project_id=share['project_id'])
@ -1643,7 +1648,9 @@ class ShareManagerTestCase(test.TestCase):
self.share_manager.shrink_share, self.context, share_id, new_size)
self.share_manager.driver.shrink_share.assert_called_once_with(
share, new_size, share_server=None)
utils.IsAMatcher(models.ShareInstance),
new_size, share_server=None
)
self.share_manager.db.share_update.assert_called_once_with(
mock.ANY, share_id, {'status': status}
)
@ -1676,7 +1683,8 @@ class ShareManagerTestCase(test.TestCase):
self.assertTrue(manager._get_share_server.called)
manager.driver.shrink_share.assert_called_once_with(
share, new_size, share_server=fake_share_server
utils.IsAMatcher(models.ShareInstance),
new_size, share_server=fake_share_server
)
quota.QUOTAS.commit.assert_called_once_with(
mock.ANY, mock.ANY, project_id=share['project_id'])