Merge "Add cast_rules_to_readonly to share instances"
This commit is contained in:
commit
37f3060c0a
@ -95,13 +95,14 @@ REST_API_VERSION_HISTORY = """
|
||||
'nondisruptive' to be mandatory as well. All previous
|
||||
migration_start APIs prior to this microversion are now
|
||||
unsupported.
|
||||
* 2.30 - Added cast_rules_to_readonly field to share_instances.
|
||||
"""
|
||||
|
||||
# The minimum and maximum versions of the API supported
|
||||
# The default api version request is defined to be the
|
||||
# minimum version of the API supported.
|
||||
_MIN_API_VERSION = "2.0"
|
||||
_MAX_API_VERSION = "2.29"
|
||||
_MAX_API_VERSION = "2.30"
|
||||
DEFAULT_API_VERSION = _MIN_API_VERSION
|
||||
|
||||
|
||||
|
@ -179,3 +179,7 @@ user documentation.
|
||||
and changed 'preserve_metadata', 'writable', 'nondisruptive' to be mandatory
|
||||
as well. All previous migration_start APIs prior to this microversion are now
|
||||
unsupported.
|
||||
|
||||
2.30
|
||||
----
|
||||
Added cast_rules_to_readonly field to share_instances.
|
||||
|
@ -23,6 +23,7 @@ class ViewBuilder(common.ViewBuilder):
|
||||
"add_access_rules_status_field",
|
||||
"add_replication_fields",
|
||||
"add_share_type_field",
|
||||
"add_cast_rules_to_readonly_field",
|
||||
]
|
||||
|
||||
def detail_list(self, request, instances):
|
||||
@ -83,3 +84,9 @@ class ViewBuilder(common.ViewBuilder):
|
||||
@common.ViewBuilder.versioned_method("2.22")
|
||||
def add_share_type_field(self, context, instance_dict, share_instance):
|
||||
instance_dict['share_type_id'] = share_instance.get('share_type_id')
|
||||
|
||||
@common.ViewBuilder.versioned_method("2.30")
|
||||
def add_cast_rules_to_readonly_field(self, context, instance_dict,
|
||||
share_instance):
|
||||
instance_dict['cast_rules_to_readonly'] = share_instance.get(
|
||||
'cast_rules_to_readonly', False)
|
||||
|
@ -22,6 +22,10 @@ class ReplicationViewBuilder(common.ViewBuilder):
|
||||
_collection_name = 'share_replicas'
|
||||
_collection_links = 'share_replica_links'
|
||||
|
||||
_detail_version_modifiers = [
|
||||
"add_cast_rules_to_readonly_field",
|
||||
]
|
||||
|
||||
def summary_list(self, request, replicas):
|
||||
"""Summary view of a list of replicas."""
|
||||
return self._list_view(self.summary, request, replicas)
|
||||
@ -60,6 +64,7 @@ class ReplicationViewBuilder(common.ViewBuilder):
|
||||
if context.is_admin:
|
||||
replica_dict['share_server_id'] = replica.get('share_server_id')
|
||||
|
||||
self.update_versioned_resource_dict(request, replica_dict, replica)
|
||||
return {'share_replica': replica_dict}
|
||||
|
||||
def _list_view(self, func, request, replicas):
|
||||
@ -77,3 +82,9 @@ class ReplicationViewBuilder(common.ViewBuilder):
|
||||
replicas_dict[self._collection_links] = replica_links
|
||||
|
||||
return replicas_dict
|
||||
|
||||
@common.ViewBuilder.versioned_method("2.30")
|
||||
def add_cast_rules_to_readonly_field(self, context, replica_dict, replica):
|
||||
if context.is_admin:
|
||||
replica_dict['cast_rules_to_readonly'] = replica.get(
|
||||
'cast_rules_to_readonly', False)
|
||||
|
@ -169,6 +169,10 @@ REPLICA_STATE_ACTIVE = 'active'
|
||||
REPLICA_STATE_IN_SYNC = 'in_sync'
|
||||
REPLICA_STATE_OUT_OF_SYNC = 'out_of_sync'
|
||||
|
||||
REPLICATION_TYPE_READABLE = 'readable'
|
||||
REPLICATION_TYPE_WRITABLE = 'writable'
|
||||
REPLICATION_TYPE_DR = 'dr'
|
||||
|
||||
|
||||
class ExtraSpecs(object):
|
||||
|
||||
|
@ -0,0 +1,100 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""add_cast_rules_to_readonly_to_share_instances
|
||||
|
||||
Revision ID: e9f79621d83f
|
||||
Revises: 54667b9cade7
|
||||
Create Date: 2016-12-01 04:06:33.115054
|
||||
|
||||
"""
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = 'e9f79621d83f'
|
||||
down_revision = '54667b9cade7'
|
||||
|
||||
from alembic import op
|
||||
from oslo_log import log
|
||||
import sqlalchemy as sa
|
||||
|
||||
from manila.common import constants
|
||||
from manila.db.migrations import utils
|
||||
from manila.i18n import _LI
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
|
||||
def upgrade():
|
||||
|
||||
LOG.info(_LI("Adding cast_rules_to_readonly column to share instances."))
|
||||
|
||||
op.add_column('share_instances',
|
||||
sa.Column('cast_rules_to_readonly', sa.Boolean,
|
||||
default=False))
|
||||
|
||||
connection = op.get_bind()
|
||||
shares_table = utils.load_table('shares', connection)
|
||||
share_instances_table = utils.load_table('share_instances', connection)
|
||||
|
||||
# First, set the value of ``cast_rules_to_readonly`` in every existing
|
||||
# share instance to False
|
||||
op.execute(
|
||||
share_instances_table.update().values({
|
||||
'cast_rules_to_readonly': False,
|
||||
})
|
||||
)
|
||||
|
||||
# Set the value of ``cast_rules_to_readonly`` to True for secondary
|
||||
# replicas in 'readable' replication relationships
|
||||
replicated_shares_query = (
|
||||
shares_table.select()
|
||||
.where(shares_table.c.deleted == 'False')
|
||||
.where(shares_table.c.replication_type
|
||||
== constants.REPLICATION_TYPE_READABLE)
|
||||
)
|
||||
|
||||
for replicated_share in connection.execute(replicated_shares_query):
|
||||
# NOTE (gouthamr): Only secondary replicas that are not undergoing a
|
||||
# 'replication_change' (promotion to active) are considered. When the
|
||||
# replication change is complete, the share manager will take care
|
||||
# of ensuring the correct values for the replicas that were involved
|
||||
# in the transaction.
|
||||
secondary_replicas_query = (
|
||||
share_instances_table.select().where(
|
||||
share_instances_table.c.deleted == 'False').where(
|
||||
share_instances_table.c.replica_state
|
||||
!= constants.REPLICA_STATE_ACTIVE).where(
|
||||
share_instances_table.c.status
|
||||
!= constants.STATUS_REPLICATION_CHANGE).where(
|
||||
replicated_share['id'] == share_instances_table.c.share_id
|
||||
)
|
||||
)
|
||||
for replica in connection.execute(secondary_replicas_query):
|
||||
op.execute(
|
||||
share_instances_table.update().where(
|
||||
share_instances_table.c.id == replica.id
|
||||
).values({
|
||||
'cast_rules_to_readonly': True,
|
||||
})
|
||||
)
|
||||
|
||||
op.alter_column('share_instances',
|
||||
'cast_rules_to_readonly',
|
||||
existing_type=sa.Boolean,
|
||||
existing_server_default=False,
|
||||
nullable=False)
|
||||
|
||||
|
||||
def downgrade():
|
||||
LOG.info(_LI("Removing cast_rules_to_readonly column from share "
|
||||
"instances."))
|
||||
op.drop_column('share_instances', 'cast_rules_to_readonly')
|
@ -335,7 +335,7 @@ class ShareInstance(BASE, ManilaBase):
|
||||
_proxified_properties = ('user_id', 'project_id', 'size',
|
||||
'display_name', 'display_description',
|
||||
'snapshot_id', 'share_proto', 'is_public',
|
||||
'consistency_group_id',
|
||||
'consistency_group_id', 'replication_type',
|
||||
'source_cgsnapshot_member_id')
|
||||
|
||||
def set_share_data(self, share):
|
||||
@ -377,6 +377,7 @@ class ShareInstance(BASE, ManilaBase):
|
||||
launched_at = Column(DateTime)
|
||||
terminated_at = Column(DateTime)
|
||||
replica_state = Column(String(255), nullable=True)
|
||||
cast_rules_to_readonly = Column(Boolean, default=False, nullable=False)
|
||||
share_type_id = Column(String(36), ForeignKey('share_types.id'),
|
||||
nullable=True)
|
||||
availability_zone_id = Column(String(36),
|
||||
|
@ -291,10 +291,10 @@ class ShareInstanceAccess(ShareInstanceAccessDatabaseMixin):
|
||||
self._get_rules_to_send_to_driver(context, share_instance)
|
||||
)
|
||||
|
||||
if share_instance['status'] == constants.STATUS_MIGRATING:
|
||||
if share_instance['cast_rules_to_readonly']:
|
||||
# Ensure read/only semantics for a migrating instances
|
||||
access_rules_to_be_on_share = self._set_rules_to_readonly(
|
||||
add_rules, access_rules_to_be_on_share, share_instance)
|
||||
access_rules_to_be_on_share, share_instance)
|
||||
add_rules = []
|
||||
rules_to_be_removed_from_db = delete_rules
|
||||
delete_rules = []
|
||||
@ -433,29 +433,17 @@ class ShareInstanceAccess(ShareInstanceAccessDatabaseMixin):
|
||||
share_instance_id=share_instance_id,
|
||||
conditionally_change=conditional_state_updates)
|
||||
|
||||
def _set_rules_to_readonly(self, add_rules, access_rules_to_be_on_share,
|
||||
share_instance):
|
||||
# NOTE(ganso): If the share instance is the source in a migration,
|
||||
# it should have all its rules cast to read-only.
|
||||
readonly_support = self.driver.configuration.safe_get(
|
||||
'migration_readonly_rules_support')
|
||||
# NOTE(ganso): If the backend supports read-only rules, then all
|
||||
# rules are cast to read-only here and passed to drivers.
|
||||
if readonly_support:
|
||||
@staticmethod
|
||||
def _set_rules_to_readonly(access_rules_to_be_on_share, share_instance):
|
||||
|
||||
LOG.debug("All access rules of share instance %s are being "
|
||||
"cast to read-only for a migration or because the "
|
||||
"instance is a readable replica.",
|
||||
share_instance['id'])
|
||||
|
||||
for rule in access_rules_to_be_on_share:
|
||||
rule['access_level'] = constants.ACCESS_LEVEL_RO
|
||||
LOG.debug("All access rules of share instance %s are being "
|
||||
"cast to read-only for migration.",
|
||||
share_instance['id'])
|
||||
# NOTE(ganso): If the backend does not support read-only rules, we
|
||||
# will remove all access to the share and have only the access
|
||||
# requested by the Data Service for migration as RW.
|
||||
else:
|
||||
LOG.debug("All previously existing access rules of share "
|
||||
"instance %s are being removed for migration, as "
|
||||
"driver does not support read-only mode rules.",
|
||||
share_instance['id'])
|
||||
access_rules_to_be_on_share = add_rules
|
||||
|
||||
return access_rules_to_be_on_share
|
||||
|
||||
def _get_rules_to_send_to_driver(self, context, share_instance):
|
||||
|
@ -361,7 +361,7 @@ class API(base.Base):
|
||||
def create_share_instance_and_get_request_spec(
|
||||
self, context, share, availability_zone=None,
|
||||
consistency_group=None, host=None, share_network_id=None,
|
||||
share_type_id=None):
|
||||
share_type_id=None, cast_rules_to_readonly=False):
|
||||
|
||||
availability_zone_id = None
|
||||
if availability_zone:
|
||||
@ -380,6 +380,7 @@ class API(base.Base):
|
||||
'host': host if host else '',
|
||||
'availability_zone_id': availability_zone_id,
|
||||
'share_type_id': share_type_id,
|
||||
'cast_rules_to_readonly': cast_rules_to_readonly,
|
||||
}
|
||||
)
|
||||
|
||||
@ -449,11 +450,17 @@ class API(base.Base):
|
||||
"state.")
|
||||
raise exception.ReplicationException(reason=msg % share['id'])
|
||||
|
||||
if share['replication_type'] == constants.REPLICATION_TYPE_READABLE:
|
||||
cast_rules_to_readonly = True
|
||||
else:
|
||||
cast_rules_to_readonly = False
|
||||
|
||||
request_spec, share_replica = (
|
||||
self.create_share_instance_and_get_request_spec(
|
||||
context, share, availability_zone=availability_zone,
|
||||
share_network_id=share_network_id,
|
||||
share_type_id=share['instance']['share_type_id']))
|
||||
share_type_id=share['instance']['share_type_id'],
|
||||
cast_rules_to_readonly=cast_rules_to_readonly))
|
||||
|
||||
all_replicas = self.db.share_replicas_get_all_by_share(
|
||||
context, share['id'])
|
||||
|
@ -105,9 +105,12 @@ share_opts = [
|
||||
cfg.BoolOpt(
|
||||
'migration_readonly_rules_support',
|
||||
default=True,
|
||||
deprecated_for_removal=True,
|
||||
deprecated_reason="All drivers are now required to support read-only "
|
||||
"access rules.",
|
||||
deprecated_name='migration_readonly_support',
|
||||
help="Specify whether read only access rule mode is supported in this "
|
||||
"backend."),
|
||||
"backend. Obsolete."),
|
||||
cfg.StrOpt(
|
||||
"admin_network_config_group",
|
||||
help="If share driver requires to setup admin network for share, then "
|
||||
|
@ -818,6 +818,9 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
self._restore_migrating_snapshots_status(
|
||||
context, src_share_instance['id'])
|
||||
|
||||
# NOTE(ganso): Read only access rules and share instance status
|
||||
# will be restored in migration_start's except block.
|
||||
|
||||
# NOTE(ganso): For now source share instance should remain in
|
||||
# migrating status for host-assisted migration.
|
||||
msg = _("Driver-assisted migration of share %s "
|
||||
@ -829,9 +832,13 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
|
||||
def _cast_access_rules_to_readonly(self, context, src_share_instance,
|
||||
share_server):
|
||||
self.db.share_instance_update(
|
||||
context, src_share_instance['id'],
|
||||
{'cast_rules_to_readonly': True})
|
||||
|
||||
# Set all 'applying' or 'active' rules to 'queued_to_apply'. Since the
|
||||
# share has a status set to 'migrating', this will render rules
|
||||
# read/only.
|
||||
# share instance has its cast_rules_to_readonly attribute set to True,
|
||||
# existing rules will be cast to read/only.
|
||||
acceptable_past_states = (constants.ACCESS_STATE_APPLYING,
|
||||
constants.ACCESS_STATE_ACTIVE)
|
||||
new_state = constants.ACCESS_STATE_QUEUED_TO_APPLY
|
||||
@ -848,6 +855,29 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
context, self.db, src_share_instance,
|
||||
self.migration_wait_access_rules_timeout)
|
||||
|
||||
def _reset_read_only_access_rules(
|
||||
self, context, share, share_instance_id, supress_errors=True,
|
||||
helper=None):
|
||||
|
||||
instance = self.db.share_instance_get(context, share_instance_id,
|
||||
with_share_data=True)
|
||||
if instance['cast_rules_to_readonly']:
|
||||
update = {'cast_rules_to_readonly': False}
|
||||
|
||||
self.db.share_instance_update(
|
||||
context, share_instance_id, update)
|
||||
|
||||
share_server = self._get_share_server(context, instance)
|
||||
|
||||
if helper is None:
|
||||
helper = migration.ShareMigrationHelper(
|
||||
context, self.db, share, self.access_helper)
|
||||
|
||||
if supress_errors:
|
||||
helper.cleanup_access_rules(instance, share_server)
|
||||
else:
|
||||
helper.revert_access_rules(instance, share_server)
|
||||
|
||||
@periodic_task.periodic_task(
|
||||
spacing=CONF.migration_driver_continue_update_interval)
|
||||
@utils.require_driver_initialized
|
||||
@ -922,9 +952,12 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
context, dest_share_instance['id'])
|
||||
self._restore_migrating_snapshots_status(
|
||||
context, src_share_instance['id'])
|
||||
self._reset_read_only_access_rules(
|
||||
context, share, src_share_instance_id)
|
||||
self.db.share_instance_update(
|
||||
context, src_share_instance['id'],
|
||||
context, src_share_instance_id,
|
||||
{'status': constants.STATUS_AVAILABLE})
|
||||
|
||||
self.db.share_update(
|
||||
context, instance['share_id'],
|
||||
{'task_state': constants.TASK_STATE_MIGRATION_ERROR})
|
||||
@ -1049,9 +1082,12 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
self.db.share_update(
|
||||
context, share_id,
|
||||
{'task_state': constants.TASK_STATE_MIGRATION_ERROR})
|
||||
self._reset_read_only_access_rules(
|
||||
context, share_ref, share_instance['id'])
|
||||
self.db.share_instance_update(
|
||||
context, share_instance['id'],
|
||||
{'status': constants.STATUS_AVAILABLE})
|
||||
|
||||
raise exception.ShareMigrationFailed(reason=msg)
|
||||
|
||||
def _migration_start_host_assisted(
|
||||
@ -1082,7 +1118,6 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
msg = _("Failed to create instance on destination "
|
||||
"backend during migration of share %s.") % share['id']
|
||||
LOG.exception(msg)
|
||||
helper.cleanup_access_rules(src_share_instance, share_server)
|
||||
raise exception.ShareMigrationFailed(reason=msg)
|
||||
|
||||
ignore_list = self.driver.configuration.safe_get(
|
||||
@ -1111,7 +1146,6 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
"share %s.") % share['id']
|
||||
LOG.exception(msg)
|
||||
helper.cleanup_new_instance(dest_share_instance)
|
||||
helper.cleanup_access_rules(src_share_instance, share_server)
|
||||
raise exception.ShareMigrationFailed(reason=msg)
|
||||
|
||||
def _migration_complete_driver(
|
||||
@ -1277,8 +1311,6 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
dest_share_instance = self.db.share_instance_get(
|
||||
context, dest_instance_id, with_share_data=True)
|
||||
|
||||
share_server = self._get_share_server(context, src_share_instance)
|
||||
|
||||
helper = migration.ShareMigrationHelper(
|
||||
context, self.db, share_ref, self.access_helper)
|
||||
|
||||
@ -1289,12 +1321,18 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
" completed successfully.") % share_ref['id']
|
||||
LOG.warning(msg)
|
||||
helper.cleanup_new_instance(dest_share_instance)
|
||||
|
||||
helper.cleanup_access_rules(src_share_instance, share_server)
|
||||
if task_state == constants.TASK_STATE_DATA_COPYING_CANCELLED:
|
||||
cancelled = (
|
||||
task_state == constants.TASK_STATE_DATA_COPYING_CANCELLED)
|
||||
supress_errors = True
|
||||
if cancelled:
|
||||
supress_errors = False
|
||||
self._reset_read_only_access_rules(
|
||||
context, share_ref, src_instance_id,
|
||||
supress_errors=supress_errors, helper=helper)
|
||||
self.db.share_instance_update(
|
||||
context, src_instance_id,
|
||||
{'status': constants.STATUS_AVAILABLE})
|
||||
if cancelled:
|
||||
self.db.share_update(
|
||||
context, share_ref['id'],
|
||||
{'task_state': constants.TASK_STATE_MIGRATION_CANCELLED})
|
||||
@ -1306,7 +1344,7 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
raise exception.ShareMigrationFailed(reason=msg)
|
||||
|
||||
elif task_state != constants.TASK_STATE_DATA_COPYING_COMPLETED:
|
||||
msg = _("Data copy for migration of share %s not completed"
|
||||
msg = _("Data copy for migration of share %s has not completed"
|
||||
" yet.") % share_ref['id']
|
||||
LOG.error(msg)
|
||||
raise exception.ShareMigrationFailed(reason=msg)
|
||||
@ -1318,7 +1356,13 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
"of share %s.") % share_ref['id']
|
||||
LOG.exception(msg)
|
||||
helper.cleanup_new_instance(dest_share_instance)
|
||||
helper.cleanup_access_rules(src_share_instance, share_server)
|
||||
self._reset_read_only_access_rules(
|
||||
context, share_ref, src_instance_id, helper=helper,
|
||||
supress_errors=True)
|
||||
self.db.share_instance_update(
|
||||
context, src_instance_id,
|
||||
{'status': constants.STATUS_AVAILABLE})
|
||||
|
||||
raise exception.ShareMigrationFailed(reason=msg)
|
||||
|
||||
self.db.share_update(
|
||||
@ -1362,19 +1406,18 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
dest_share_server = self._get_share_server(
|
||||
context, dest_share_instance)
|
||||
|
||||
if share_ref['task_state'] == (
|
||||
constants.TASK_STATE_DATA_COPYING_COMPLETED):
|
||||
|
||||
helper = migration.ShareMigrationHelper(
|
||||
context, self.db, share_ref, self.access_helper)
|
||||
|
||||
if share_ref['task_state'] == (
|
||||
constants.TASK_STATE_DATA_COPYING_COMPLETED):
|
||||
|
||||
self.db.share_instance_update(
|
||||
context, dest_share_instance['id'],
|
||||
{'status': constants.STATUS_INACTIVE})
|
||||
|
||||
helper.cleanup_new_instance(dest_share_instance)
|
||||
|
||||
helper.cleanup_access_rules(src_share_instance, share_server)
|
||||
else:
|
||||
|
||||
src_snap_instances, snapshot_mappings = (
|
||||
@ -1390,14 +1433,18 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
self._restore_migrating_snapshots_status(
|
||||
context, src_share_instance['id'])
|
||||
|
||||
self._reset_read_only_access_rules(
|
||||
context, share_ref, src_instance_id, supress_errors=False,
|
||||
helper=helper)
|
||||
|
||||
self.db.share_instance_update(
|
||||
context, src_instance_id,
|
||||
{'status': constants.STATUS_AVAILABLE})
|
||||
|
||||
self.db.share_update(
|
||||
context, share_ref['id'],
|
||||
{'task_state': constants.TASK_STATE_MIGRATION_CANCELLED})
|
||||
|
||||
self.db.share_instance_update(
|
||||
context, src_share_instance['id'],
|
||||
{'status': constants.STATUS_AVAILABLE})
|
||||
|
||||
LOG.info(_LI("Share Migration for share %s"
|
||||
" was cancelled."), share_ref['id'])
|
||||
|
||||
@ -1820,6 +1867,11 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
share_replica = self.db.share_replica_get(
|
||||
context, share_replica_id, with_share_data=True,
|
||||
with_share_server=True)
|
||||
replication_type = share_replica['replication_type']
|
||||
if replication_type == constants.REPLICATION_TYPE_READABLE:
|
||||
ensure_old_active_replica_to_readonly = True
|
||||
else:
|
||||
ensure_old_active_replica_to_readonly = False
|
||||
share_server = self._get_share_server(context, share_replica)
|
||||
|
||||
# Get list of all replicas for share
|
||||
@ -1903,10 +1955,14 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
self.db.share_replica_update(
|
||||
context, share_replica['id'],
|
||||
{'status': constants.STATUS_AVAILABLE,
|
||||
'replica_state': constants.REPLICA_STATE_ACTIVE})
|
||||
'replica_state': constants.REPLICA_STATE_ACTIVE,
|
||||
'cast_rules_to_readonly': False})
|
||||
|
||||
self.db.share_replica_update(
|
||||
context, old_active_replica['id'],
|
||||
{'replica_state': constants.REPLICA_STATE_OUT_OF_SYNC})
|
||||
{'replica_state': constants.REPLICA_STATE_OUT_OF_SYNC,
|
||||
'cast_rules_to_readonly':
|
||||
ensure_old_active_replica_to_readonly})
|
||||
else:
|
||||
for updated_replica in updated_replica_list:
|
||||
updated_export_locs = updated_replica.get(
|
||||
@ -1921,9 +1977,13 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
'replica_state')
|
||||
updates = {}
|
||||
# Change the promoted replica's status from 'available' to
|
||||
# 'replication_change'.
|
||||
# 'replication_change' and unset cast_rules_to_readonly
|
||||
if updated_replica['id'] == share_replica['id']:
|
||||
updates['cast_rules_to_readonly'] = False
|
||||
updates['status'] = constants.STATUS_AVAILABLE
|
||||
elif updated_replica['id'] == old_active_replica['id']:
|
||||
updates['cast_rules_to_readonly'] = (
|
||||
ensure_old_active_replica_to_readonly)
|
||||
if updated_replica_state == constants.STATUS_ERROR:
|
||||
updates['status'] = constants.STATUS_ERROR
|
||||
if updated_replica_state is not None:
|
||||
|
@ -54,6 +54,7 @@ class ShareMigrationHelper(object):
|
||||
self.context = context
|
||||
self.access_helper = access_helper
|
||||
self.api = share_api.API()
|
||||
self.access_helper = access_helper
|
||||
|
||||
self.migration_create_delete_share_timeout = (
|
||||
CONF.migration_create_delete_share_timeout)
|
||||
@ -134,14 +135,6 @@ class ShareMigrationHelper(object):
|
||||
|
||||
def cleanup_access_rules(self, share_instance, share_server):
|
||||
|
||||
# NOTE(ganso): For the purpose of restoring the access rules, the share
|
||||
# instance status must not be "MIGRATING", else they would be cast to
|
||||
# read-only. We briefly change them to "INACTIVE" so they are restored
|
||||
# and after cleanup finishes, the invoking method will set the status
|
||||
# back to "AVAILABLE".
|
||||
self.db.share_instance_update(self.context, share_instance['id'],
|
||||
{'status': constants.STATUS_INACTIVE})
|
||||
|
||||
try:
|
||||
self.revert_access_rules(share_instance, share_server)
|
||||
except Exception:
|
||||
|
@ -1716,3 +1716,125 @@ class RestoreStateToShareInstanceAccessMap(BaseMigrationChecks):
|
||||
self.test_case.assertEqual(
|
||||
constants.STATUS_OUT_OF_SYNC,
|
||||
instance['access_rules_status'])
|
||||
|
||||
|
||||
@map_to_migration('e9f79621d83f')
|
||||
class AddCastRulesToReadonlyToInstances(BaseMigrationChecks):
|
||||
|
||||
share_type = {
|
||||
'id': uuidutils.generate_uuid(),
|
||||
}
|
||||
|
||||
shares = [
|
||||
{
|
||||
'id': uuidutils.generate_uuid(),
|
||||
'replication_type': constants.REPLICATION_TYPE_READABLE,
|
||||
},
|
||||
{
|
||||
'id': uuidutils.generate_uuid(),
|
||||
'replication_type': constants.REPLICATION_TYPE_READABLE,
|
||||
},
|
||||
{
|
||||
'id': uuidutils.generate_uuid(),
|
||||
'replication_type': constants.REPLICATION_TYPE_WRITABLE,
|
||||
},
|
||||
{
|
||||
'id': uuidutils.generate_uuid(),
|
||||
},
|
||||
]
|
||||
share_ids = [x['id'] for x in shares]
|
||||
|
||||
correct_instance = {
|
||||
'id': uuidutils.generate_uuid(),
|
||||
'share_id': share_ids[1],
|
||||
'replica_state': constants.REPLICA_STATE_IN_SYNC,
|
||||
'status': constants.STATUS_AVAILABLE,
|
||||
'share_type_id': share_type['id'],
|
||||
}
|
||||
|
||||
instances = [
|
||||
{
|
||||
'id': uuidutils.generate_uuid(),
|
||||
'share_id': share_ids[0],
|
||||
'replica_state': constants.REPLICA_STATE_ACTIVE,
|
||||
'status': constants.STATUS_AVAILABLE,
|
||||
'share_type_id': share_type['id'],
|
||||
},
|
||||
{
|
||||
'id': uuidutils.generate_uuid(),
|
||||
'share_id': share_ids[0],
|
||||
'replica_state': constants.REPLICA_STATE_IN_SYNC,
|
||||
'status': constants.STATUS_REPLICATION_CHANGE,
|
||||
'share_type_id': share_type['id'],
|
||||
},
|
||||
{
|
||||
'id': uuidutils.generate_uuid(),
|
||||
'share_id': share_ids[1],
|
||||
'replica_state': constants.REPLICA_STATE_ACTIVE,
|
||||
'status': constants.STATUS_REPLICATION_CHANGE,
|
||||
'share_type_id': share_type['id'],
|
||||
},
|
||||
correct_instance,
|
||||
{
|
||||
'id': uuidutils.generate_uuid(),
|
||||
'share_id': share_ids[2],
|
||||
'replica_state': constants.REPLICA_STATE_ACTIVE,
|
||||
'status': constants.STATUS_REPLICATION_CHANGE,
|
||||
'share_type_id': share_type['id'],
|
||||
},
|
||||
{
|
||||
'id': uuidutils.generate_uuid(),
|
||||
'share_id': share_ids[2],
|
||||
'replica_state': constants.REPLICA_STATE_IN_SYNC,
|
||||
'status': constants.STATUS_AVAILABLE,
|
||||
'share_type_id': share_type['id'],
|
||||
},
|
||||
{
|
||||
'id': uuidutils.generate_uuid(),
|
||||
'share_id': share_ids[3],
|
||||
'status': constants.STATUS_AVAILABLE,
|
||||
'share_type_id': share_type['id'],
|
||||
},
|
||||
]
|
||||
instance_ids = share_ids = [x['id'] for x in instances]
|
||||
|
||||
def setup_upgrade_data(self, engine):
|
||||
shares_table = utils.load_table('shares', engine)
|
||||
share_instances_table = utils.load_table('share_instances', engine)
|
||||
share_types_table = utils.load_table('share_types', engine)
|
||||
|
||||
engine.execute(share_types_table.insert(self.share_type))
|
||||
|
||||
for share in self.shares:
|
||||
engine.execute(shares_table.insert(share))
|
||||
|
||||
for instance in self.instances:
|
||||
engine.execute(share_instances_table.insert(instance))
|
||||
|
||||
def check_upgrade(self, engine, data):
|
||||
|
||||
shares_table = utils.load_table('shares', engine)
|
||||
share_instances_table = utils.load_table('share_instances', engine)
|
||||
|
||||
for instance in engine.execute(share_instances_table.select().where(
|
||||
share_instances_table.c.id in self.instance_ids)):
|
||||
self.test_case.assertIn('cast_rules_to_readonly', instance)
|
||||
share = engine.execute(shares_table.select().where(
|
||||
instance['share_id'] == shares_table.c.id)).first()
|
||||
if (instance['replica_state'] != constants.REPLICA_STATE_ACTIVE and
|
||||
share['replication_type'] ==
|
||||
constants.REPLICATION_TYPE_READABLE and
|
||||
instance['status'] != constants.STATUS_REPLICATION_CHANGE):
|
||||
self.test_case.assertTrue(instance['cast_rules_to_readonly'])
|
||||
self.test_case.assertEqual(instance['id'],
|
||||
self.correct_instance['id'])
|
||||
else:
|
||||
self.test_case.assertEqual(
|
||||
False, instance['cast_rules_to_readonly'])
|
||||
|
||||
def check_downgrade(self, engine):
|
||||
|
||||
share_instances_table = utils.load_table('share_instances', engine)
|
||||
|
||||
for instance in engine.execute(share_instances_table.select()):
|
||||
self.test_case.assertNotIn('cast_rules_to_readonly', instance)
|
||||
|
@ -252,6 +252,7 @@ def fake_replica(id=None, as_primitive=True, for_manager=False, **kwargs):
|
||||
'size': None,
|
||||
'display_name': None,
|
||||
'display_description': None,
|
||||
'replication_type': None,
|
||||
'snapshot_id': None,
|
||||
'share_proto': None,
|
||||
'is_public': None,
|
||||
|
@ -622,29 +622,23 @@ class ShareInstanceAccessTestCase(test.TestCase):
|
||||
expected_access_rules_status,
|
||||
share_instance['access_rules_status'])
|
||||
|
||||
@ddt.data(True, False)
|
||||
def test__update_access_rules_for_migration_driver_supports_ro_rules(
|
||||
self, driver_supports_ro_rules):
|
||||
self.mock_object(self.driver.configuration, 'safe_get', mock.Mock(
|
||||
return_value=driver_supports_ro_rules))
|
||||
share = db_utils.create_share(
|
||||
def test__update_access_rules_for_migration(self):
|
||||
share = db_utils.create_share()
|
||||
instance = db_utils.create_share_instance(
|
||||
status=constants.STATUS_MIGRATING,
|
||||
access_rules_status=constants.STATUS_ACTIVE)
|
||||
share_instance_id = share['instance']['id']
|
||||
access_rules_status=constants.STATUS_ACTIVE,
|
||||
cast_rules_to_readonly=True,
|
||||
share_id=share['id'])
|
||||
rule_kwargs = {'share_id': share['id'], 'access_level': 'rw'}
|
||||
rule_1 = db_utils.create_access(
|
||||
state=constants.ACCESS_STATE_ACTIVE, **rule_kwargs)
|
||||
rule_1 = db.share_instance_access_get(
|
||||
self.context, rule_1['id'], share_instance_id)
|
||||
self.context, rule_1['id'], instance['id'])
|
||||
rule_2 = db_utils.create_access(
|
||||
state=constants.ACCESS_STATE_APPLYING, share_id=share['id'],
|
||||
access_level='ro')
|
||||
rule_2 = db.share_instance_access_get(
|
||||
self.context, rule_2['id'], share_instance_id)
|
||||
rule_3 = db_utils.create_access(
|
||||
state=constants.ACCESS_STATE_DENYING, **rule_kwargs)
|
||||
rule_3 = db.share_instance_access_get(
|
||||
self.context, rule_3['id'], share_instance_id)
|
||||
self.context, rule_2['id'], instance['id'])
|
||||
|
||||
driver_call = self.mock_object(
|
||||
self.access_helper.driver, 'update_access',
|
||||
@ -653,19 +647,16 @@ class ShareInstanceAccessTestCase(test.TestCase):
|
||||
mock.Mock(return_value=False))
|
||||
|
||||
retval = self.access_helper._update_access_rules(
|
||||
self.context, share_instance_id, share_server='fake_server')
|
||||
self.context, instance['id'], share_server='fake_server')
|
||||
|
||||
call_args = driver_call.call_args_list[0][0]
|
||||
call_kwargs = driver_call.call_args_list[0][1]
|
||||
access_rules_to_be_on_share = [r['id'] for r in call_args[2]]
|
||||
access_levels = [r['access_level'] for r in call_args[2]]
|
||||
expected_rules_to_be_on_share = (
|
||||
[rule_1['id'], rule_2['id']]
|
||||
if driver_supports_ro_rules else [rule_2['id']]
|
||||
)
|
||||
expected_rules_to_be_on_share = ([rule_1['id'], rule_2['id']])
|
||||
|
||||
self.assertIsNone(retval)
|
||||
self.assertEqual(share_instance_id, call_args[1]['id'])
|
||||
self.assertEqual(instance['id'], call_args[1]['id'])
|
||||
self.assertEqual(sorted(expected_rules_to_be_on_share),
|
||||
sorted(access_rules_to_be_on_share))
|
||||
self.assertEqual(['ro'] * len(expected_rules_to_be_on_share),
|
||||
|
@ -682,6 +682,7 @@ class ShareAPITestCase(test.TestCase):
|
||||
'host': host,
|
||||
'availability_zone_id': 'fake_id',
|
||||
'share_type_id': 'fake_share_type',
|
||||
'cast_rules_to_readonly': False,
|
||||
}
|
||||
)
|
||||
db_api.share_type_get.assert_called_once_with(
|
||||
@ -2590,16 +2591,27 @@ class ShareAPITestCase(test.TestCase):
|
||||
self.assertFalse(mock_db_update_call.called)
|
||||
self.assertFalse(mock_scheduler_rpcapi_call.called)
|
||||
|
||||
@ddt.data(True, False)
|
||||
def test_create_share_replica(self, has_snapshots):
|
||||
@ddt.data({'has_snapshots': True,
|
||||
'replication_type': constants.REPLICATION_TYPE_DR},
|
||||
{'has_snapshots': False,
|
||||
'replication_type': constants.REPLICATION_TYPE_DR},
|
||||
{'has_snapshots': True,
|
||||
'replication_type': constants.REPLICATION_TYPE_READABLE},
|
||||
{'has_snapshots': False,
|
||||
'replication_type': constants.REPLICATION_TYPE_READABLE})
|
||||
@ddt.unpack
|
||||
def test_create_share_replica(self, has_snapshots, replication_type):
|
||||
request_spec = fakes.fake_replica_request_spec()
|
||||
replica = request_spec['share_instance_properties']
|
||||
share = db_utils.create_share(
|
||||
id=replica['share_id'], replication_type='dr')
|
||||
id=replica['share_id'], replication_type=replication_type)
|
||||
snapshots = (
|
||||
[fakes.fake_snapshot(), fakes.fake_snapshot()]
|
||||
if has_snapshots else []
|
||||
)
|
||||
cast_rules_to_readonly = (
|
||||
replication_type == constants.REPLICATION_TYPE_READABLE)
|
||||
|
||||
fake_replica = fakes.fake_replica(id=replica['id'])
|
||||
fake_request_spec = fakes.fake_replica_request_spec()
|
||||
self.mock_object(db_api, 'share_replicas_get_available_active_replica',
|
||||
@ -2626,6 +2638,11 @@ class ShareAPITestCase(test.TestCase):
|
||||
self.context, fake_replica['share_id'])
|
||||
self.assertEqual(expected_snap_instance_create_call_count,
|
||||
mock_snapshot_instance_create_call.call_count)
|
||||
(share_api.API.create_share_instance_and_get_request_spec.
|
||||
assert_called_once_with(
|
||||
self.context, share, availability_zone='FAKE_AZ',
|
||||
share_network_id=None, share_type_id=None,
|
||||
cast_rules_to_readonly=cast_rules_to_readonly))
|
||||
|
||||
def test_delete_last_active_replica(self):
|
||||
fake_replica = fakes.fake_replica(
|
||||
|
@ -1086,7 +1086,8 @@ class ShareManagerTestCase(test.TestCase):
|
||||
@ddt.data([], None)
|
||||
def test_promote_share_replica_driver_update_nothing_has_snaps(self,
|
||||
retval):
|
||||
replica = fake_replica()
|
||||
replica = fake_replica(
|
||||
replication_type=constants.REPLICATION_TYPE_READABLE)
|
||||
active_replica = fake_replica(
|
||||
id='current_active_replica',
|
||||
replica_state=constants.REPLICA_STATE_ACTIVE)
|
||||
@ -1121,10 +1122,12 @@ class ShareManagerTestCase(test.TestCase):
|
||||
mock_replica_update = self.mock_object(db, 'share_replica_update')
|
||||
call_1 = mock.call(mock.ANY, replica['id'],
|
||||
{'status': constants.STATUS_AVAILABLE,
|
||||
'replica_state': constants.REPLICA_STATE_ACTIVE})
|
||||
'replica_state': constants.REPLICA_STATE_ACTIVE,
|
||||
'cast_rules_to_readonly': False})
|
||||
call_2 = mock.call(
|
||||
mock.ANY, 'current_active_replica',
|
||||
{'replica_state': constants.REPLICA_STATE_OUT_OF_SYNC})
|
||||
{'replica_state': constants.REPLICA_STATE_OUT_OF_SYNC,
|
||||
'cast_rules_to_readonly': True})
|
||||
expected_update_calls = [call_1, call_2]
|
||||
|
||||
self.share_manager.promote_share_replica(self.context, replica)
|
||||
@ -1137,8 +1140,11 @@ class ShareManagerTestCase(test.TestCase):
|
||||
{'status': constants.STATUS_ERROR})
|
||||
self.assertEqual(2, mock_info_log.call_count)
|
||||
|
||||
def test_promote_share_replica_driver_updates_replica_list(self):
|
||||
replica = fake_replica()
|
||||
@ddt.data(constants.REPLICATION_TYPE_READABLE,
|
||||
constants.REPLICATION_TYPE_WRITABLE,
|
||||
constants.REPLICATION_TYPE_DR)
|
||||
def test_promote_share_replica_driver_updates_replica_list(self, rtype):
|
||||
replica = fake_replica(replication_type=rtype)
|
||||
active_replica = fake_replica(
|
||||
id='current_active_replica',
|
||||
replica_state=constants.REPLICA_STATE_ACTIVE)
|
||||
@ -1178,16 +1184,30 @@ class ShareManagerTestCase(test.TestCase):
|
||||
mock_export_locs_update = self.mock_object(
|
||||
db, 'share_export_locations_update')
|
||||
mock_replica_update = self.mock_object(db, 'share_replica_update')
|
||||
reset_replication_change_updates = {
|
||||
'replica_state': constants.STATUS_ACTIVE,
|
||||
'status': constants.STATUS_AVAILABLE,
|
||||
'cast_rules_to_readonly': False,
|
||||
}
|
||||
demoted_replica_updates = {
|
||||
'replica_state': constants.REPLICA_STATE_IN_SYNC,
|
||||
'cast_rules_to_readonly': False,
|
||||
}
|
||||
if rtype == constants.REPLICATION_TYPE_READABLE:
|
||||
demoted_replica_updates['cast_rules_to_readonly'] = True
|
||||
reset_replication_change_call = mock.call(
|
||||
mock.ANY, replica['id'], {'replica_state': constants.STATUS_ACTIVE,
|
||||
'status': constants.STATUS_AVAILABLE})
|
||||
mock.ANY, replica['id'], reset_replication_change_updates)
|
||||
demoted_replica_update_call = mock.call(
|
||||
mock.ANY, active_replica['id'], demoted_replica_updates
|
||||
)
|
||||
|
||||
self.share_manager.promote_share_replica(self.context, replica)
|
||||
|
||||
self.assertEqual(2, mock_export_locs_update.call_count)
|
||||
self.assertEqual(2, mock_replica_update.call_count)
|
||||
self.assertIn(
|
||||
reset_replication_change_call, mock_replica_update.mock_calls)
|
||||
mock_replica_update.assert_has_calls([
|
||||
reset_replication_change_call, demoted_replica_update_call,
|
||||
])
|
||||
self.assertTrue(mock_info_log.called)
|
||||
self.assertFalse(mock_snap_instance_update.called)
|
||||
|
||||
@ -3686,6 +3706,7 @@ class ShareManagerTestCase(test.TestCase):
|
||||
mock.Mock(return_value=[snapshot]))
|
||||
|
||||
# mocks
|
||||
self.mock_object(self.share_manager, '_reset_read_only_access_rules')
|
||||
self.mock_object(self.share_manager.db, 'service_get_by_args',
|
||||
mock.Mock(return_value=fake_service))
|
||||
self.mock_object(self.share_manager.db, 'share_update')
|
||||
@ -3712,8 +3733,10 @@ class ShareManagerTestCase(test.TestCase):
|
||||
{'status': constants.STATUS_AVAILABLE})
|
||||
self.share_manager.db.share_get.assert_called_once_with(
|
||||
self.context, 'share_id')
|
||||
self.share_manager.db.service_get_by_args(
|
||||
self.context, 'fake2@backend', 'manila-share')
|
||||
self.share_manager.db.service_get_by_args.assert_called_once_with(
|
||||
self.context, 'fake@backend', 'manila-share')
|
||||
(self.share_manager._reset_read_only_access_rules.
|
||||
assert_called_once_with(self.context, share, instance['id']))
|
||||
|
||||
def test_migration_start_exception(self):
|
||||
|
||||
@ -3739,6 +3762,7 @@ class ShareManagerTestCase(test.TestCase):
|
||||
mock.Mock(side_effect=Exception('fake_exc_1')))
|
||||
self.mock_object(self.share_manager, '_migration_start_host_assisted',
|
||||
mock.Mock(side_effect=Exception('fake_exc_2')))
|
||||
self.mock_object(self.share_manager, '_reset_read_only_access_rules')
|
||||
|
||||
# run
|
||||
self.assertRaises(
|
||||
@ -3762,6 +3786,8 @@ class ShareManagerTestCase(test.TestCase):
|
||||
{'task_state': constants.TASK_STATE_MIGRATION_ERROR})
|
||||
]
|
||||
|
||||
(self.share_manager._reset_read_only_access_rules.
|
||||
assert_called_once_with(self.context, share, instance['id']))
|
||||
self.share_manager.db.share_update.assert_has_calls(share_update_calls)
|
||||
self.share_manager.db.share_instance_update.assert_called_once_with(
|
||||
self.context, instance['id'],
|
||||
@ -3785,8 +3811,16 @@ class ShareManagerTestCase(test.TestCase):
|
||||
server = 'share_server'
|
||||
src_connection_info = 'src_fake_info'
|
||||
dest_connection_info = 'dest_fake_info'
|
||||
|
||||
instance_updates = [
|
||||
mock.call(
|
||||
self.context, instance['id'],
|
||||
{'cast_rules_to_readonly': True})
|
||||
]
|
||||
# mocks
|
||||
helper = mock.Mock()
|
||||
self.mock_object(migration_api, 'ShareMigrationHelper',
|
||||
mock.Mock(return_value=helper))
|
||||
self.mock_object(helper, 'cleanup_new_instance')
|
||||
self.mock_object(self.share_manager.db, 'share_server_get',
|
||||
mock.Mock(return_value=server))
|
||||
self.mock_object(self.share_manager.db, 'share_instance_update',
|
||||
@ -3795,8 +3829,10 @@ class ShareManagerTestCase(test.TestCase):
|
||||
'get_and_update_share_instance_access_rules')
|
||||
self.mock_object(self.share_manager.access_helper,
|
||||
'update_access_rules')
|
||||
self.mock_object(utils, 'wait_for_access_update')
|
||||
|
||||
if exc is None:
|
||||
self.mock_object(migration_api.ShareMigrationHelper,
|
||||
self.mock_object(helper,
|
||||
'create_instance_and_wait',
|
||||
mock.Mock(return_value=new_instance))
|
||||
self.mock_object(self.share_manager.driver, 'connection_get_info',
|
||||
@ -3805,14 +3841,13 @@ class ShareManagerTestCase(test.TestCase):
|
||||
mock.Mock(return_value=dest_connection_info))
|
||||
self.mock_object(data_rpc.DataAPI, 'migration_start',
|
||||
mock.Mock(side_effect=Exception('fake')))
|
||||
self.mock_object(migration_api.ShareMigrationHelper,
|
||||
'cleanup_new_instance')
|
||||
self.mock_object(helper, 'cleanup_new_instance')
|
||||
instance_updates.append(
|
||||
mock.call(self.context, new_instance['id'],
|
||||
{'status': constants.STATUS_MIGRATING_TO}))
|
||||
else:
|
||||
self.mock_object(migration_api.ShareMigrationHelper,
|
||||
'create_instance_and_wait',
|
||||
self.mock_object(helper, 'create_instance_and_wait',
|
||||
mock.Mock(side_effect=exc))
|
||||
self.mock_object(migration_api.ShareMigrationHelper,
|
||||
'cleanup_access_rules')
|
||||
|
||||
# run
|
||||
self.assertRaises(
|
||||
@ -3828,17 +3863,13 @@ class ShareManagerTestCase(test.TestCase):
|
||||
(self.share_manager.access_helper.update_access_rules.
|
||||
assert_called_once_with(
|
||||
self.context, instance['id'], share_server=server))
|
||||
migration_api.ShareMigrationHelper.create_instance_and_wait.\
|
||||
assert_called_once_with(share, 'fake_host', 'fake_net_id',
|
||||
'fake_az_id', 'fake_type_id')
|
||||
(migration_api.ShareMigrationHelper.
|
||||
cleanup_access_rules.assert_called_once_with(
|
||||
instance, server))
|
||||
helper.create_instance_and_wait.assert_called_once_with(
|
||||
share, 'fake_host', 'fake_net_id', 'fake_az_id', 'fake_type_id')
|
||||
utils.wait_for_access_update.assert_called_once_with(
|
||||
self.context, self.share_manager.db, instance,
|
||||
self.share_manager.migration_wait_access_rules_timeout)
|
||||
|
||||
if exc is None:
|
||||
self.share_manager.db.share_instance_update.\
|
||||
assert_called_once_with(
|
||||
self.context, new_instance['id'],
|
||||
{'status': constants.STATUS_MIGRATING_TO})
|
||||
self.share_manager.driver.connection_get_info.\
|
||||
assert_called_once_with(self.context, instance, server)
|
||||
rpcapi.ShareAPI.connection_get_info.assert_called_once_with(
|
||||
@ -3846,8 +3877,7 @@ class ShareManagerTestCase(test.TestCase):
|
||||
data_rpc.DataAPI.migration_start.assert_called_once_with(
|
||||
self.context, share['id'], ['lost+found'], instance['id'],
|
||||
new_instance['id'], src_connection_info, dest_connection_info)
|
||||
migration_api.ShareMigrationHelper.\
|
||||
cleanup_new_instance.assert_called_once_with(new_instance)
|
||||
helper.cleanup_new_instance.assert_called_once_with(new_instance)
|
||||
|
||||
@ddt.data({'share_network_id': 'fake_net_id', 'exc': None,
|
||||
'has_snapshots': True},
|
||||
@ -3959,6 +3989,11 @@ class ShareManagerTestCase(test.TestCase):
|
||||
{'task_state':
|
||||
constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS})
|
||||
])
|
||||
(self.share_manager.db.share_instance_update.assert_has_calls([
|
||||
mock.call(self.context, migrating_instance['id'],
|
||||
{'status': constants.STATUS_MIGRATING_TO}),
|
||||
mock.call(self.context, src_instance['id'],
|
||||
{'cast_rules_to_readonly': True})]))
|
||||
(self.share_manager.access_helper.update_access_rules.
|
||||
assert_called_once_with(
|
||||
self.context, src_instance['id'], share_server=src_server))
|
||||
@ -4018,10 +4053,6 @@ class ShareManagerTestCase(test.TestCase):
|
||||
self.context, snapshot.instance['id'],
|
||||
{'status': constants.STATUS_MIGRATING}))
|
||||
|
||||
self.share_manager.db.share_instance_update.assert_called_once_with(
|
||||
self.context, migrating_instance['id'],
|
||||
{'status': constants.STATUS_MIGRATING_TO})
|
||||
|
||||
@ddt.data({'writable': False, 'preserve_metadata': True,
|
||||
'nondisruptive': True, 'compatible': True,
|
||||
'preserve_snapshots': True, 'has_snapshots': False},
|
||||
@ -4179,6 +4210,7 @@ class ShareManagerTestCase(test.TestCase):
|
||||
self.share_manager.db, 'share_snapshot_instance_update')
|
||||
|
||||
share_get_calls = [mock.call(self.context, 'share_id')]
|
||||
self.mock_object(self.share_manager, '_reset_read_only_access_rules')
|
||||
|
||||
self.share_manager.migration_driver_continue(self.context)
|
||||
|
||||
@ -4194,10 +4226,13 @@ class ShareManagerTestCase(test.TestCase):
|
||||
self.context, 'share_id',
|
||||
{'task_state': constants.TASK_STATE_MIGRATION_ERROR})
|
||||
(self.share_manager.db.share_instance_update.
|
||||
assert_called_once_with(self.context, src_instance['id'],
|
||||
assert_called_once_with(
|
||||
self.context, src_instance['id'],
|
||||
{'status': constants.STATUS_AVAILABLE}))
|
||||
(self.share_manager._migration_delete_instance.
|
||||
assert_called_once_with(self.context, dest_instance['id']))
|
||||
(self.share_manager._reset_read_only_access_rules.
|
||||
assert_called_once_with(self.context, share, src_instance['id']))
|
||||
(self.share_manager.db.share_snapshot_instance_update.
|
||||
assert_called_once_with(
|
||||
self.context, migrating_snap_instance['id'],
|
||||
@ -4378,22 +4413,20 @@ class ShareManagerTestCase(test.TestCase):
|
||||
share_server_id='fake_server_id')
|
||||
new_instance = db_utils.create_share_instance(share_id='fake_id')
|
||||
share = db_utils.create_share(id='fake_id', task_state=status)
|
||||
server = 'fake_server'
|
||||
helper = mock.Mock()
|
||||
|
||||
# mocks
|
||||
self.mock_object(self.share_manager.db, 'share_instance_get',
|
||||
mock.Mock(side_effect=[instance, new_instance]))
|
||||
self.mock_object(self.share_manager.db, 'share_server_get',
|
||||
mock.Mock(return_value=server))
|
||||
self.mock_object(migration_api.ShareMigrationHelper,
|
||||
'cleanup_new_instance')
|
||||
self.mock_object(migration_api.ShareMigrationHelper,
|
||||
'cleanup_access_rules')
|
||||
self.mock_object(helper, 'cleanup_new_instance')
|
||||
self.mock_object(migration_api, 'ShareMigrationHelper',
|
||||
mock.Mock(return_value=helper))
|
||||
self.mock_object(self.share_manager.db, 'share_instance_update')
|
||||
self.mock_object(self.share_manager.db, 'share_update')
|
||||
self.mock_object(self.share_manager, '_reset_read_only_access_rules')
|
||||
|
||||
if status == constants.TASK_STATE_DATA_COPYING_COMPLETED:
|
||||
self.mock_object(migration_api.ShareMigrationHelper,
|
||||
'apply_new_access_rules',
|
||||
self.mock_object(helper, 'apply_new_access_rules',
|
||||
mock.Mock(side_effect=Exception('fake')))
|
||||
self.mock_object(manager.LOG, 'exception')
|
||||
|
||||
@ -4412,14 +4445,13 @@ class ShareManagerTestCase(test.TestCase):
|
||||
mock.call(self.context, instance['id'], with_share_data=True),
|
||||
mock.call(self.context, new_instance['id'], with_share_data=True)
|
||||
])
|
||||
self.share_manager.db.share_server_get.assert_called_once_with(
|
||||
utils.IsAMatcher(context.RequestContext), 'fake_server_id')
|
||||
|
||||
cancelled = not(status == constants.TASK_STATE_DATA_COPYING_CANCELLED)
|
||||
if status != 'other':
|
||||
migration_api.ShareMigrationHelper.cleanup_new_instance.\
|
||||
assert_called_once_with(new_instance)
|
||||
(migration_api.ShareMigrationHelper.cleanup_access_rules.
|
||||
assert_called_once_with(instance, server))
|
||||
helper.cleanup_new_instance.assert_called_once_with(new_instance)
|
||||
(self.share_manager._reset_read_only_access_rules.
|
||||
assert_called_once_with(self.context, share, instance['id'],
|
||||
helper=helper, supress_errors=cancelled))
|
||||
if status == constants.TASK_STATE_MIGRATION_CANCELLED:
|
||||
self.share_manager.db.share_instance_update.\
|
||||
assert_called_once_with(self.context, instance['id'],
|
||||
@ -4428,8 +4460,8 @@ class ShareManagerTestCase(test.TestCase):
|
||||
self.context, share['id'],
|
||||
{'task_state': constants.TASK_STATE_MIGRATION_CANCELLED})
|
||||
if status == constants.TASK_STATE_DATA_COPYING_COMPLETED:
|
||||
migration_api.ShareMigrationHelper.apply_new_access_rules.\
|
||||
assert_called_once_with(new_instance)
|
||||
helper.apply_new_access_rules. assert_called_once_with(
|
||||
new_instance)
|
||||
self.assertTrue(manager.LOG.exception.called)
|
||||
|
||||
def test__migration_complete_driver(self):
|
||||
@ -4542,13 +4574,10 @@ class ShareManagerTestCase(test.TestCase):
|
||||
share = db_utils.create_share(
|
||||
id='fake_id',
|
||||
task_state=constants.TASK_STATE_DATA_COPYING_COMPLETED)
|
||||
server = 'fake_server'
|
||||
|
||||
# mocks
|
||||
self.mock_object(self.share_manager.db, 'share_instance_get',
|
||||
mock.Mock(side_effect=[instance, new_instance]))
|
||||
self.mock_object(self.share_manager.db, 'share_server_get',
|
||||
mock.Mock(return_value=server))
|
||||
self.mock_object(self.share_manager.db, 'share_instance_update')
|
||||
self.mock_object(self.share_manager.db, 'share_update')
|
||||
self.mock_object(migration_api.ShareMigrationHelper,
|
||||
@ -4565,8 +4594,6 @@ class ShareManagerTestCase(test.TestCase):
|
||||
mock.call(self.context, instance['id'], with_share_data=True),
|
||||
mock.call(self.context, new_instance['id'], with_share_data=True)
|
||||
])
|
||||
self.share_manager.db.share_server_get.assert_called_once_with(
|
||||
utils.IsAMatcher(context.RequestContext), 'fake_server_id')
|
||||
|
||||
self.share_manager.db.share_instance_update.assert_has_calls([
|
||||
mock.call(self.context, new_instance['id'],
|
||||
@ -4603,6 +4630,9 @@ class ShareManagerTestCase(test.TestCase):
|
||||
share_id=share['id'], share_server_id=server_2['id'],
|
||||
host=dest_host)
|
||||
|
||||
helper = mock.Mock()
|
||||
self.mock_object(migration_api, 'ShareMigrationHelper',
|
||||
mock.Mock(return_value=helper))
|
||||
self.mock_object(db, 'share_get', mock.Mock(return_value=share))
|
||||
self.mock_object(db, 'share_instance_get',
|
||||
mock.Mock(side_effect=[instance_1, instance_2]))
|
||||
@ -4614,10 +4644,8 @@ class ShareManagerTestCase(test.TestCase):
|
||||
self.mock_object(db, 'share_server_get',
|
||||
mock.Mock(side_effect=[server_1, server_2]))
|
||||
self.mock_object(self.share_manager.driver, 'migration_cancel')
|
||||
self.mock_object(migration_api.ShareMigrationHelper,
|
||||
'cleanup_new_instance')
|
||||
self.mock_object(migration_api.ShareMigrationHelper,
|
||||
'cleanup_access_rules')
|
||||
self.mock_object(helper, 'cleanup_new_instance')
|
||||
self.mock_object(self.share_manager, '_reset_read_only_access_rules')
|
||||
|
||||
self.share_manager.migration_cancel(
|
||||
self.context, instance_1['id'], instance_2['id'])
|
||||
@ -4628,10 +4656,10 @@ class ShareManagerTestCase(test.TestCase):
|
||||
share_instance_update_calls.append(mock.call(
|
||||
self.context, instance_2['id'],
|
||||
{'status': constants.STATUS_INACTIVE}))
|
||||
(migration_api.ShareMigrationHelper.cleanup_new_instance.
|
||||
assert_called_once_with(instance_2))
|
||||
(migration_api.ShareMigrationHelper.cleanup_access_rules.
|
||||
assert_called_once_with(instance_1, server_1))
|
||||
(helper.cleanup_new_instance.assert_called_once_with(instance_2))
|
||||
(self.share_manager._reset_read_only_access_rules.
|
||||
assert_called_once_with(self.context, share, instance_1['id'],
|
||||
helper=helper, supress_errors=False))
|
||||
|
||||
else:
|
||||
self.share_manager.driver.migration_cancel.assert_called_once_with(
|
||||
@ -4664,6 +4692,46 @@ class ShareManagerTestCase(test.TestCase):
|
||||
self.share_manager.db.share_instance_update.assert_has_calls(
|
||||
share_instance_update_calls)
|
||||
|
||||
@ddt.data(True, False)
|
||||
def test__reset_read_only_access_rules(self, supress_errors):
|
||||
|
||||
share = db_utils.create_share()
|
||||
server = db_utils.create_share_server()
|
||||
instance = db_utils.create_share_instance(
|
||||
share_id=share['id'],
|
||||
cast_rules_to_readonly=True,
|
||||
share_server_id=server['id'])
|
||||
|
||||
# mocks
|
||||
self.mock_object(self.share_manager.db, 'share_server_get',
|
||||
mock.Mock(return_value=server))
|
||||
self.mock_object(self.share_manager.db, 'share_instance_update')
|
||||
self.mock_object(self.share_manager.db, 'share_instance_get',
|
||||
mock.Mock(return_value=instance))
|
||||
self.mock_object(migration_api.ShareMigrationHelper,
|
||||
'cleanup_access_rules')
|
||||
self.mock_object(migration_api.ShareMigrationHelper,
|
||||
'revert_access_rules')
|
||||
|
||||
# run
|
||||
self.share_manager._reset_read_only_access_rules(
|
||||
self.context, share, instance['id'], supress_errors=supress_errors)
|
||||
|
||||
# asserts
|
||||
self.share_manager.db.share_server_get.assert_called_once_with(
|
||||
self.context, server['id'])
|
||||
self.share_manager.db.share_instance_update.assert_called_once_with(
|
||||
self.context, instance['id'],
|
||||
{'cast_rules_to_readonly': False})
|
||||
self.share_manager.db.share_instance_get.assert_called_once_with(
|
||||
self.context, instance['id'], with_share_data=True)
|
||||
if supress_errors:
|
||||
(migration_api.ShareMigrationHelper.cleanup_access_rules.
|
||||
assert_called_once_with(instance, server))
|
||||
else:
|
||||
(migration_api.ShareMigrationHelper.revert_access_rules.
|
||||
assert_called_once_with(instance, server))
|
||||
|
||||
def test__migration_delete_instance(self):
|
||||
|
||||
share = db_utils.create_share(id='fake_id')
|
||||
|
@ -338,7 +338,6 @@ class ShareMigrationHelperTestCase(test.TestCase):
|
||||
server = db_utils.create_share_server()
|
||||
self.mock_object(self.helper, 'revert_access_rules',
|
||||
mock.Mock(side_effect=exc))
|
||||
self.mock_object(self.helper.db, 'share_instance_update')
|
||||
|
||||
self.mock_object(migration.LOG, 'warning')
|
||||
|
||||
@ -348,9 +347,6 @@ class ShareMigrationHelperTestCase(test.TestCase):
|
||||
# asserts
|
||||
self.helper.revert_access_rules.assert_called_once_with(
|
||||
self.share_instance, server)
|
||||
self.helper.db.share_instance_update.assert_called_once_with(
|
||||
self.context, self.share_instance['id'],
|
||||
{'status': constants.STATUS_INACTIVE})
|
||||
|
||||
if exc:
|
||||
self.assertEqual(1, migration.LOG.warning.call_count)
|
||||
|
@ -30,7 +30,7 @@ ShareGroup = [
|
||||
help="The minimum api microversion is configured to be the "
|
||||
"value of the minimum microversion supported by Manila."),
|
||||
cfg.StrOpt("max_api_microversion",
|
||||
default="2.29",
|
||||
default="2.30",
|
||||
help="The maximum api microversion is configured to be the "
|
||||
"value of the latest microversion supported by Manila."),
|
||||
cfg.StrOpt("region",
|
||||
|
@ -76,6 +76,12 @@ class ShareInstancesTest(base.BaseSharesAdminTest):
|
||||
expected_keys.extend(["export_location", "export_locations"])
|
||||
if utils.is_microversion_ge(version, '2.10'):
|
||||
expected_keys.append("access_rules_status")
|
||||
if utils.is_microversion_ge(version, '2.11'):
|
||||
expected_keys.append("replica_state")
|
||||
if utils.is_microversion_ge(version, '2.22'):
|
||||
expected_keys.append("share_type_id")
|
||||
if utils.is_microversion_ge(version, '2.30'):
|
||||
expected_keys.append("cast_rules_to_readonly")
|
||||
expected_keys = sorted(expected_keys)
|
||||
actual_keys = sorted(si.keys())
|
||||
self.assertEqual(expected_keys, actual_keys,
|
||||
@ -94,3 +100,7 @@ class ShareInstancesTest(base.BaseSharesAdminTest):
|
||||
@tc.attr(base.TAG_POSITIVE, base.TAG_API_WITH_BACKEND)
|
||||
def test_get_share_instance_v2_10(self):
|
||||
self._get_share_instance('2.10')
|
||||
|
||||
@tc.attr(base.TAG_POSITIVE, base.TAG_API_WITH_BACKEND)
|
||||
def test_get_share_instance_v2_30(self):
|
||||
self._get_share_instance('2.30')
|
||||
|
@ -0,0 +1,7 @@
|
||||
---
|
||||
features:
|
||||
- Improvements have been made to ensure read only rule semantics for
|
||||
shares and readable replicas. When invoked with administrative context,
|
||||
the share instance and share replica APIs will return
|
||||
``cast_rules_to_readonly`` as an additional field in the detailed JSON
|
||||
response.
|
@ -17,7 +17,7 @@ upgrade:
|
||||
force-host-assisted-migration is True.
|
||||
deprecations:
|
||||
- Support for the experimental share migration APIs has been
|
||||
dropped for API microversions prior to 2.29.
|
||||
dropped for API microversions prior to 2.30.
|
||||
fixes:
|
||||
- Added check to validate that host assisted migration cannot
|
||||
be forced while specifying driver assisted migration options.
|
||||
|
Loading…
Reference in New Issue
Block a user