Browse Source

Merge "Create share from snapshot in another pool or backend"

tags/10.0.0.0rc1
Zuul 3 months ago
committed by Gerrit Code Review
parent
commit
ba57e90d45
66 changed files with 1497 additions and 206 deletions
  1. +14
    -6
      api-ref/source/parameters.yaml
  2. +1
    -0
      api-ref/source/samples/share-create-response.json
  3. +15
    -0
      api-ref/source/samples/share-instances-list-response.json
  4. +1
    -0
      api-ref/source/samples/share-show-instance-response.json
  5. +1
    -0
      api-ref/source/samples/share-show-response.json
  6. +2
    -0
      api-ref/source/samples/shares-list-detailed-response.json
  7. +2
    -0
      api-ref/source/share-instances.inc
  8. +5
    -0
      api-ref/source/shares.inc
  9. +4
    -1
      manila/api/openstack/api_version_request.py
  10. +5
    -0
      manila/api/openstack/rest_api_version_history.rst
  11. +15
    -0
      manila/api/views/share_instance.py
  12. +13
    -0
      manila/api/views/shares.py
  13. +1
    -0
      manila/common/constants.py
  14. +3
    -2
      manila/db/api.py
  15. +62
    -0
      manila/db/migrations/alembic/versions/e6d88547b381_add_progress_field_to_share_instance.py
  16. +6
    -2
      manila/db/sqlalchemy/api.py
  17. +6
    -0
      manila/db/sqlalchemy/models.py
  18. +10
    -1
      manila/message/message_field.py
  19. +16
    -10
      manila/scheduler/drivers/filter.py
  20. +71
    -0
      manila/scheduler/filters/create_from_snapshot.py
  21. +2
    -0
      manila/scheduler/host_manager.py
  22. +65
    -0
      manila/scheduler/weighers/host_affinity.py
  23. +34
    -12
      manila/share/api.py
  24. +118
    -7
      manila/share/driver.py
  25. +1
    -1
      manila/share/drivers/dell_emc/driver.py
  26. +1
    -1
      manila/share/drivers/dell_emc/plugins/powermax/connection.py
  27. +1
    -1
      manila/share/drivers/dell_emc/plugins/unity/connection.py
  28. +1
    -1
      manila/share/drivers/dell_emc/plugins/vnx/connection.py
  29. +1
    -1
      manila/share/drivers/generic.py
  30. +1
    -1
      manila/share/drivers/glusterfs/layout.py
  31. +1
    -1
      manila/share/drivers/glusterfs/layout_directory.py
  32. +1
    -1
      manila/share/drivers/glusterfs/layout_volume.py
  33. +1
    -1
      manila/share/drivers/hdfs/hdfs_native.py
  34. +1
    -1
      manila/share/drivers/hitachi/hnas/driver.py
  35. +1
    -1
      manila/share/drivers/hpe/hpe_3par_driver.py
  36. +1
    -1
      manila/share/drivers/huawei/base.py
  37. +1
    -1
      manila/share/drivers/huawei/huawei_nas.py
  38. +1
    -1
      manila/share/drivers/huawei/v3/connection.py
  39. +1
    -1
      manila/share/drivers/ibm/gpfs.py
  40. +1
    -1
      manila/share/drivers/infinidat/infinibox.py
  41. +1
    -1
      manila/share/drivers/inspur/as13000/as13000_nas.py
  42. +1
    -1
      manila/share/drivers/lvm.py
  43. +1
    -1
      manila/share/drivers/maprfs/maprfs_native.py
  44. +1
    -1
      manila/share/drivers/netapp/dataontap/cluster_mode/lib_base.py
  45. +1
    -1
      manila/share/drivers/nexenta/ns4/nexenta_nas.py
  46. +1
    -1
      manila/share/drivers/nexenta/ns5/nexenta_nas.py
  47. +1
    -1
      manila/share/drivers/qnap/qnap.py
  48. +1
    -1
      manila/share/drivers/tegile/tegile.py
  49. +1
    -1
      manila/share/drivers/veritas/veritas_isa.py
  50. +1
    -1
      manila/share/drivers/zfsonlinux/driver.py
  51. +1
    -1
      manila/share/drivers/zfssa/zfssashare.py
  52. +227
    -62
      manila/share/manager.py
  53. +9
    -2
      manila/tests/api/v2/test_share_instances.py
  54. +26
    -1
      manila/tests/api/views/test_shares.py
  55. +30
    -0
      manila/tests/db/migrations/alembic/migrations_data_checks.py
  56. +8
    -4
      manila/tests/db/sqlalchemy/test_api.py
  57. +12
    -2
      manila/tests/fake_driver.py
  58. +92
    -0
      manila/tests/scheduler/filters/test_create_from_snapshot.py
  59. +138
    -0
      manila/tests/scheduler/weighers/test_host_affinity.py
  60. +13
    -2
      manila/tests/share/drivers/dummy.py
  61. +1
    -1
      manila/tests/share/drivers/glusterfs/test_layout.py
  62. +45
    -8
      manila/tests/share/test_api.py
  63. +98
    -0
      manila/tests/share/test_driver.py
  64. +283
    -56
      manila/tests/share/test_manager.py
  65. +14
    -0
      releasenotes/notes/add-create-share-from-snapshot-another-pool-or-backend-98d61fe753b85632.yaml
  66. +2
    -0
      setup.cfg

+ 14
- 6
api-ref/source/parameters.yaml View File

@@ -468,8 +468,8 @@ source_share_group_snapshot_id_query:
status_query:
description: |
Filters by a share status. A valid value is
``creating``, ``error``, ``available``, ``deleting``,
``error_deleting``, ``manage_starting``, ``manage_error``,
``creating``, ``creating_from_snapshot``, ``error``, ``available``,
``deleting``, ``error_deleting``, ``manage_starting``, ``manage_error``,
``unmanage_starting``, ``unmanage_error``, ``migrating``,
``extending``, ``extending_error``, ``shrinking``,
``shrinking_error``, or ``shrinking_possible_data_loss_error``.
@@ -1540,6 +1540,13 @@ progress:
in: body
required: true
type: string
progress_share_instance:
description: |
The progress of the share creation.
in: body
min_version: 2.54
required: true
type: string
project:
description: |
The UUID of the project to which access to the
@@ -2713,7 +2720,7 @@ status_1:
type: string
status_16:
description: |
The share status, which is ``creating``,
The share status, which is ``creating``, ``creating_from_snapshot``,
``error``, ``available``, ``deleting``, ``error_deleting``,
``manage_starting``, ``manage_error``, ``unmanage_starting``,
``unmanage_error``, ``unmanaged``, ``extend``,
@@ -2737,15 +2744,16 @@ status_3:
``extending_error``. Extend share failed. - ``shrinking``. Share
is being shrunk. - ``shrinking_error``. Failed to update quota on
share shrinking. - ``shrinking_possible_data_loss_error``. Shrink
share failed due to possible data loss.
share failed due to possible data loss. - ``creating_from_snapshot``.
The share is being created from a parent snapshot.
in: body
required: true
type: string
status_5:
description: |
The share instance status. A valid value is
``available``, ``error``, ``creating``, ``deleting``, and
``error_deleting``.
``available``, ``error``, ``creating``, ``deleting``,
``creating_from_snapshot``, or ``error_deleting``.
in: body
required: true
type: string


+ 1
- 0
api-ref/source/samples/share-create-response.json View File

@@ -1,6 +1,7 @@
{
"share": {
"status": null,
"progress": null,
"share_server_id": null,
"project_id": "16e1ab15c35a457e9c2b2aa189f544e1",
"name": "share_London",


+ 15
- 0
api-ref/source/samples/share-instances-list-response.json View File

@@ -2,6 +2,7 @@
"share_instances": [
{
"status": "error",
"progress": null,
"share_id": "406ea93b-32e9-4907-a117-148b3945749f",
"availability_zone": "nova",
"replica_state": null,
@@ -14,6 +15,7 @@
},
{
"status": "available",
"progress": "100%",
"share_id": "d94a8548-2079-4be0-b21c-0a887acd31ca",
"availability_zone": "nova",
"replica_state": null,
@@ -23,6 +25,19 @@
"share_server_id": "ba11930a-bf1a-4aa7-bae4-a8dfbaa3cc73",
"host": "manila2@generic1#GENERIC1",
"id": "75559a8b-c90c-42a7-bda2-edbe86acfb7b"
},
{
"status": "creating_from_snapshot",
"progress": "30%",
"share_id": "9bb15af4-27e5-4174-ae15-dc549d4a3b51",
"availability_zone": "nova",
"replica_state": null,
"created_at": "2015-09-07T09:01:15.000000",
"share_network_id": "713df749-aac0-4a54-af52-10f6c991e80c",
"cast_rules_to_readonly": false,
"share_server_id": "ba11930a-bf1a-4aa7-bae4-a8dfbaa3cc73",
"host": "manila2@generic1#GENERIC1",
"id": "48155648-2fd3-480d-b02b-44b995c24bab"
}
]
}

+ 1
- 0
api-ref/source/samples/share-show-instance-response.json View File

@@ -1,6 +1,7 @@
{
"share_instance": {
"status": "available",
"progress": "100%",
"share_id": "d94a8548-2079-4be0-b21c-0a887acd31ca",
"availability_zone": "nova",
"replica_state": null,


+ 1
- 0
api-ref/source/samples/share-show-response.json View File

@@ -27,6 +27,7 @@
"aim": "doc"
},
"status": "available",
"progress": "100%",
"description": "My custom share London",
"host": "manila2@generic1#GENERIC1",
"access_rules_status": "active",


+ 2
- 0
api-ref/source/samples/shares-list-detailed-response.json View File

@@ -25,6 +25,7 @@
"project_id": "16e1ab15c35a457e9c2b2aa189f544e1",
"metadata": {},
"status": "error",
"progress": null,
"access_rules_status": "active",
"description": "There is a share description.",
"host": "manila2@generic1#GENERIC1",
@@ -64,6 +65,7 @@
"project_id": "16e1ab15c35a457e9c2b2aa189f544e1",
"metadata": {},
"status": "available",
"progress": "100%",
"access_rules_status": "active",
"description": "Changed description.",
"host": "manila2@generic1#GENERIC1",


+ 2
- 0
api-ref/source/share-instances.inc View File

@@ -48,6 +48,7 @@ Response parameters
- status: status_5
- access_rules_status: access_rules_status
- share_id: share_id_2
- progress: progress_share_instance
- availability_zone: availability_zone_1
- created_at: created_at
- replica_state: replica_state
@@ -105,6 +106,7 @@ Response parameters
- status: status_5
- access_rules_status: access_rules_status
- share_id: share_id_2
- progress: progress_share_instance
- availability_zone: availability_zone_1
- created_at: created_at
- replica_state: replica_state


+ 5
- 0
api-ref/source/shares.inc View File

@@ -39,6 +39,8 @@ A share has one of these status values:
+----------------------------------------+--------------------------------------------------------+
| ``creating`` | The share is being created. |
+----------------------------------------+--------------------------------------------------------+
| ``creating_from_snapshot`` | The share is being created from a parent snapshot. |
+----------------------------------------+--------------------------------------------------------+
| ``deleting`` | The share is being deleted. |
+----------------------------------------+--------------------------------------------------------+
| ``deleted`` | The share was deleted. |
@@ -220,6 +222,7 @@ Response parameters
- project_id: project_id
- metadata: metadata
- status: status_16
- progress: progress_share_instance
- description: description
- host: host_1
- access_rules_status: access_rules_status
@@ -291,6 +294,7 @@ Response parameters
- project_id: project_id
- metadata: metadata
- status: status_16
- progress: progress_share_instance
- description: description
- host: host_9
- access_rules_status: access_rules_status
@@ -369,6 +373,7 @@ Response parameters

- id: id_4
- status: status_3
- progress: progress_share_instance
- links: links
- project_id: project_id
- share_proto: share_proto


+ 4
- 1
manila/api/openstack/api_version_request.py View File

@@ -144,13 +144,16 @@ REST_API_VERSION_HISTORY = """
filters, support querying user messages within the specified time
period.
* 2.53 - Added quota control to share replicas.
* 2.54 - Share and share instance objects include a new field called
"progress" which indicates the completion of a share creation
operation as a percentage.
"""

# The minimum and maximum versions of the API supported
# The default api version request is defined to be the
# minimum version of the API supported.
_MIN_API_VERSION = "2.0"
_MAX_API_VERSION = "2.53"
_MAX_API_VERSION = "2.54"
DEFAULT_API_VERSION = _MIN_API_VERSION




+ 5
- 0
manila/api/openstack/rest_api_version_history.rst View File

@@ -296,3 +296,8 @@ user documentation.
2.53
----
Added quota control for share replicas and replica gigabytes.

2.54
----
Share and share instance objects include a new field called "progress" which
indicates the completion of a share creation operation as a percentage.

+ 15
- 0
manila/api/views/share_instance.py View File

@@ -11,6 +11,7 @@
# under the License.

from manila.api import common
from manila.common import constants


class ViewBuilder(common.ViewBuilder):
@@ -25,6 +26,8 @@ class ViewBuilder(common.ViewBuilder):
"add_replication_fields",
"add_share_type_field",
"add_cast_rules_to_readonly_field",
"add_progress_field",
"translate_creating_from_snapshot_status",
]

def detail_list(self, request, instances):
@@ -47,6 +50,7 @@ class ViewBuilder(common.ViewBuilder):
'export_location': share_instance.get('export_location'),
'export_locations': export_locations,
}

self.update_versioned_resource_dict(
request, instance_dict, share_instance)
return {'share_instance': instance_dict}
@@ -91,3 +95,14 @@ class ViewBuilder(common.ViewBuilder):
share_instance):
instance_dict['cast_rules_to_readonly'] = share_instance.get(
'cast_rules_to_readonly', False)

@common.ViewBuilder.versioned_method("1.0", "2.53")
def translate_creating_from_snapshot_status(self, context, instance_dict,
share_instance):
if (share_instance.get('status') ==
constants.STATUS_CREATING_FROM_SNAPSHOT):
instance_dict['status'] = constants.STATUS_CREATING

@common.ViewBuilder.versioned_method("2.54")
def add_progress_field(self, context, instance_dict, share_instance):
instance_dict['progress'] = share_instance.get('progress')

+ 13
- 0
manila/api/views/shares.py View File

@@ -34,6 +34,8 @@ class ViewBuilder(common.ViewBuilder):
"translate_access_rules_status",
"add_share_group_fields",
"add_mount_snapshot_support_field",
"add_progress_field",
"translate_creating_from_snapshot_status",
]

def summary_list(self, request, shares, count=None):
@@ -92,6 +94,7 @@ class ViewBuilder(common.ViewBuilder):
'is_public': share.get('is_public'),
'export_locations': export_locations,
}

self.update_versioned_resource_dict(request, share_dict, share)

if context.is_admin:
@@ -184,3 +187,13 @@ class ViewBuilder(common.ViewBuilder):
shares_dict['shares_links'] = shares_links

return shares_dict

@common.ViewBuilder.versioned_method("1.0", "2.53")
def translate_creating_from_snapshot_status(self, context, share_dict,
share):
if share.get('status') == constants.STATUS_CREATING_FROM_SNAPSHOT:
share_dict['status'] = constants.STATUS_CREATING

@common.ViewBuilder.versioned_method("2.54")
def add_progress_field(self, context, share_dict, share):
share_dict['progress'] = share.get('progress')

+ 1
- 0
manila/common/constants.py View File

@@ -18,6 +18,7 @@ DB_MAX_INT = 0x7FFFFFFF

# SHARE AND GENERAL STATUSES
STATUS_CREATING = 'creating'
STATUS_CREATING_FROM_SNAPSHOT = 'creating_from_snapshot'
STATUS_DELETING = 'deleting'
STATUS_DELETED = 'deleted'
STATUS_ERROR = 'error'


+ 3
- 2
manila/db/api.py View File

@@ -340,10 +340,11 @@ def share_instances_get_all_by_share_server(context, share_server_id):
share_server_id)


def share_instances_get_all_by_host(context, host, with_share_data=False):
def share_instances_get_all_by_host(context, host, with_share_data=False,
status=None):
"""Returns all share instances with given host."""
return IMPL.share_instances_get_all_by_host(
context, host, with_share_data=with_share_data)
context, host, with_share_data=with_share_data, status=status)


def share_instances_get_all_by_share_network(context, share_network_id):


+ 62
- 0
manila/db/migrations/alembic/versions/e6d88547b381_add_progress_field_to_share_instance.py View File

@@ -0,0 +1,62 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.

"""add-progress-field-to-share-instance

Revision ID: e6d88547b381
Revises: 805685098bd2
Create Date: 2020-01-31 14:06:15.952747

"""

# revision identifiers, used by Alembic.
revision = 'e6d88547b381'
down_revision = '805685098bd2'

from alembic import op
from manila.common import constants
from manila.db.migrations import utils
from oslo_log import log
import sqlalchemy as sa


LOG = log.getLogger(__name__)


def upgrade():

try:
connection = op.get_bind()
op.add_column('share_instances',
sa.Column('progress', sa.String(32), nullable=True,
default=None))
share_instances_table = utils.load_table('share_instances', connection)

updated_data = {'progress': '100%'}

# pylint: disable=no-value-for-parameter
op.execute(
share_instances_table.update().where(
share_instances_table.c.status == constants.STATUS_AVAILABLE,
).values(updated_data)
)
except Exception:
LOG.error("Column share_instances.progress not created.")
raise


def downgrade():
try:
op.drop_column('share_instances', 'progress')
except Exception:
LOG.error("Column share_instances.progress not dropped.")
raise

+ 6
- 2
manila/db/sqlalchemy/api.py View File

@@ -1579,7 +1579,7 @@ def _set_instances_share_data(context, instances, session):

@require_admin_context
def share_instances_get_all_by_host(context, host, with_share_data=False,
session=None):
status=None, session=None):
"""Retrieves all share instances hosted on a host."""
session = session or get_session()
instances = (
@@ -1588,8 +1588,12 @@ def share_instances_get_all_by_host(context, host, with_share_data=False,
models.ShareInstance.host == host,
models.ShareInstance.host.like("{0}#%".format(host))
)
).all()
)
)
if status is not None:
instances = instances.filter(models.ShareInstance.status == status)
# Returns list of all instances that satisfy filters.
instances = instances.all()

if with_share_data:
instances = _set_instances_share_data(context, instances, session)


+ 6
- 0
manila/db/sqlalchemy/models.py View File

@@ -257,6 +257,11 @@ class Share(BASE, ManilaBase):
return len(replicas) > 1
return False

@property
def progress(self):
if len(self.instances) > 0:
return self.instance.progress

@property
def instance(self):
# NOTE(gouthamr): The order of preference: status 'replication_change',
@@ -364,6 +369,7 @@ class ShareInstance(BASE, ManilaBase):
deleted = Column(String(36), default='False')
host = Column(String(255))
status = Column(String(255))
progress = Column(String(32))

ACCESS_STATUS_PRIORITIES = {
constants.STATUS_ACTIVE: 0,


+ 10
- 1
manila/message/message_field.py View File

@@ -80,6 +80,12 @@ class Detail(object):
"set to extending_error. This action cannot be re-attempted until "
"the status has been rectified. Contact your administrator to "
"determine the cause of this failure."))
FILTER_CREATE_FROM_SNAPSHOT = ('016', FILTER_MSG % 'CreateFromSnapshot')
DRIVER_FAILED_CREATING_FROM_SNAP = (
'017',
_("Share Driver has failed to create the share from snapshot. This "
"operation can be re-attempted by creating a new share. Contact "
"your administrator to determine the cause of this failure."))

ALL = (UNKNOWN_ERROR,
NO_VALID_HOST,
@@ -95,7 +101,9 @@ class Detail(object):
FILTER_JSON,
FILTER_RETRY,
FILTER_REPLICATION,
DRIVER_FAILED_EXTEND)
DRIVER_FAILED_EXTEND,
FILTER_CREATE_FROM_SNAPSHOT,
DRIVER_FAILED_CREATING_FROM_SNAP)

# Exception and detail mappings
EXCEPTION_DETAIL_MAPPINGS = {
@@ -113,6 +121,7 @@ class Detail(object):
'JsonFilter': FILTER_JSON,
'RetryFilter': FILTER_RETRY,
'ShareReplicationFilter': FILTER_REPLICATION,
'CreateFromSnapshotFilter': FILTER_CREATE_FROM_SNAPSHOT,
}




+ 16
- 10
manila/scheduler/drivers/filter.py View File

@@ -162,21 +162,27 @@ class FilterScheduler(base.Scheduler):

share_group = request_spec.get('share_group')

# NOTE(gouthamr): If 'active_replica_host' is present in the request
# spec, pass that host's 'replication_domain' to the
# ShareReplication filter.
# NOTE(gouthamr): If 'active_replica_host' or 'snapshot_host' is
# present in the request spec, pass that host's 'replication_domain' to
# the ShareReplication and CreateFromSnapshot filters.
active_replica_host = request_spec.get('active_replica_host')
replication_domain = None
snapshot_host = request_spec.get('snapshot_host')
allowed_hosts = []
if active_replica_host:
allowed_hosts.append(active_replica_host)
if snapshot_host:
allowed_hosts.append(snapshot_host)
replication_domain = None
if active_replica_host or snapshot_host:
temp_hosts = self.host_manager.get_all_host_states_share(elevated)
ar_host = next((host for host in temp_hosts
if host.host == active_replica_host), None)
if ar_host:
replication_domain = ar_host.replication_domain
matching_host = next((host for host in temp_hosts
if host.host in allowed_hosts), None)
if matching_host:
replication_domain = matching_host.replication_domain

# NOTE(zengyingzhe): remove the 'share_backend_name' extra spec,
# let scheduler choose the available host for this replica
# creation request.
# let scheduler choose the available host for this replica or
# snapshot clone creation request.
share_type.get('extra_specs', {}).pop('share_backend_name', None)

if filter_properties is None:


+ 71
- 0
manila/scheduler/filters/create_from_snapshot.py View File

@@ -0,0 +1,71 @@
# Copyright 2019 NetApp, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.


from oslo_log import log

from manila.scheduler.filters import base_host
from manila.share import utils as share_utils

LOG = log.getLogger(__name__)


class CreateFromSnapshotFilter(base_host.BaseHostFilter):
"""CreateFromSnapshotFilter filters hosts based on replication_domain."""

def host_passes(self, host_state, filter_properties):
"""Return True if new share's host is compatible with snapshot's host.

Design of this filter:

- Creating shares from snapshots in another pool or backend needs
to match with one of the below conditions:
- The backend of the new share must be the same as its parent
snapshot.
- Both new share and snapshot are in the same replication_domain
"""
snapshot_id = filter_properties.get('request_spec', {}).get(
'snapshot_id')
snapshot_host = filter_properties.get(
'request_spec', {}).get('snapshot_host')

if None in [snapshot_id, snapshot_host]:
# NOTE(silvacarlose): if the request does not contain a snapshot_id
# or a snapshot_host, the user is not creating a share from a
# snapshot and we don't need to filter out the host.
return True

snapshot_backend = share_utils.extract_host(snapshot_host, 'backend')
snapshot_rep_domain = filter_properties.get('replication_domain')

host_backend = share_utils.extract_host(host_state.host, 'backend')
host_rep_domain = host_state.replication_domain

# Same backend
if host_backend == snapshot_backend:
return True
# Same replication domain
if snapshot_rep_domain and snapshot_rep_domain == host_rep_domain:
return True

msg = ("The parent's snapshot %(snapshot_id)s back end and "
"replication domain don't match with the back end and "
"replication domain of the Host %(host)s.")
kwargs = {
"snapshot_id": snapshot_id,
"host": host_state.host
}
LOG.debug(msg, kwargs)
return False

+ 2
- 0
manila/scheduler/host_manager.py View File

@@ -48,6 +48,7 @@ host_manager_opts = [
'CapabilitiesFilter',
'DriverFilter',
'ShareReplicationFilter',
'CreateFromSnapshotFilter',
],
help='Which filter class names to use for filtering hosts '
'when not specified in the request.'),
@@ -55,6 +56,7 @@ host_manager_opts = [
default=[
'CapacityWeigher',
'GoodnessWeigher',
'HostAffinityWeigher',
],
help='Which weigher class names to use for weighing hosts.'),
cfg.ListOpt(


+ 65
- 0
manila/scheduler/weighers/host_affinity.py View File

@@ -0,0 +1,65 @@
# Copyright 2019 NetApp, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.

from manila import context
from manila.db import api as db_api
from manila.scheduler.weighers import base_host
from manila.share import utils as share_utils


class HostAffinityWeigher(base_host.BaseHostWeigher):

def _weigh_object(self, obj, weight_properties):
"""Weigh hosts based on their proximity to the source's share pool.

If no snapshot_id was provided will return 0, otherwise, if source and
destination hosts are located on:
1. same back ends and pools: host is a perfect choice (100)
2. same back ends and different pools: host is a very good choice (75)
3. different back ends with the same AZ: host is a good choice (50)
4. different back ends and AZs: host isn't so good choice (25)
"""

ctx = context.get_admin_context()
request_spec = weight_properties.get('request_spec')
snapshot_id = request_spec.get('snapshot_id')
snapshot_host = request_spec.get('snapshot_host')

if None in [snapshot_id, snapshot_host]:
# NOTE(silvacarlose): if the request does not contain a snapshot_id
# or a snapshot_host, the user is not creating a share from a
# snapshot and we don't need to weigh the host.
return 0

snapshot_ref = db_api.share_snapshot_get(ctx, snapshot_id)
# Source host info: pool, backend and availability zone
src_pool = share_utils.extract_host(snapshot_host, 'pool')
src_backend = share_utils.extract_host(
request_spec.get('snapshot_host'), 'backend')
src_az = snapshot_ref['share']['availability_zone']
# Destination host info: pool, backend and availability zone
dst_pool = share_utils.extract_host(obj.host, 'pool')
dst_backend = share_utils.extract_host(obj.host, 'backend')
# NOTE(dviroel): All hosts were already filtered by the availability
# zone parameter.
dst_az = None
if weight_properties['availability_zone_id']:
dst_az = db_api.availability_zone_get(
ctx, weight_properties['availability_zone_id']).name

if src_backend == dst_backend:
return 100 if (src_pool and src_pool == dst_pool) else 75
else:
return 50 if (src_az and src_az == dst_az) else 25

+ 34
- 12
manila/share/api.py View File

@@ -46,7 +46,11 @@ share_api_opts = [
default=False,
help='If set to False, then share creation from snapshot will '
'be performed on the same host. '
'If set to True, then scheduling step will be used.')
'If set to True, then scheduler will be used.'
'When enabling this option make sure that filter '
'CreateShareFromSnapshot is enabled and to have hosts '
'reporting replication_domain option.'
)
]

CONF = cfg.CONF
@@ -190,7 +194,18 @@ class API(base.Base):
share_type_id = share_type['id'] if share_type else None
else:
source_share = self.db.share_get(context, snapshot['share_id'])
availability_zone = source_share['instance']['availability_zone']
source_share_az = source_share['instance']['availability_zone']
if availability_zone is None:
availability_zone = source_share_az
elif (availability_zone != source_share_az
and not CONF.use_scheduler_creating_share_from_snapshot):
LOG.error("The specified availability zone must be the same "
"as parent share when you have the configuration "
"option 'use_scheduler_creating_share_from_snapshot'"
" set to False.")
msg = _("The specified availability zone must be the same "
"as the parent share when creating from snapshot.")
raise exception.InvalidInput(reason=msg)
if share_type is None:
# Grab the source share's share_type if no new share type
# has been provided.
@@ -327,19 +342,23 @@ class API(base.Base):
context, reservations, share_type_id=share_type_id)

host = None
if snapshot and not CONF.use_scheduler_creating_share_from_snapshot:
# Shares from snapshots with restriction - source host only.
# It is common situation for different types of backends.
host = snapshot['share']['instance']['host']

elif share_group:
snapshot_host = None
if snapshot:
snapshot_host = snapshot['share']['instance']['host']
if not CONF.use_scheduler_creating_share_from_snapshot:
# Shares from snapshots with restriction - source host only.
# It is common situation for different types of backends.
host = snapshot['share']['instance']['host']

if share_group and host is None:
host = share_group['host']

self.create_instance(
context, share, share_network_id=share_network_id, host=host,
availability_zone=availability_zone, share_group=share_group,
share_group_snapshot_member=share_group_snapshot_member,
share_type_id=share_type_id, availability_zones=availability_zones)
share_type_id=share_type_id, availability_zones=availability_zones,
snapshot_host=snapshot_host)

# Retrieve the share with instance details
share = self.db.share_get(context, share['id'])
@@ -415,14 +434,16 @@ class API(base.Base):
def create_instance(self, context, share, share_network_id=None,
host=None, availability_zone=None,
share_group=None, share_group_snapshot_member=None,
share_type_id=None, availability_zones=None):
share_type_id=None, availability_zones=None,
snapshot_host=None):
request_spec, share_instance = (
self.create_share_instance_and_get_request_spec(
context, share, availability_zone=availability_zone,
share_group=share_group, host=host,
share_network_id=share_network_id,
share_type_id=share_type_id,
availability_zones=availability_zones))
availability_zones=availability_zones,
snapshot_host=snapshot_host))

if share_group_snapshot_member:
# Inherit properties from the share_group_snapshot_member
@@ -461,7 +482,7 @@ class API(base.Base):
self, context, share, availability_zone=None,
share_group=None, host=None, share_network_id=None,
share_type_id=None, cast_rules_to_readonly=False,
availability_zones=None):
availability_zones=None, snapshot_host=None):

availability_zone_id = None
if availability_zone:
@@ -528,6 +549,7 @@ class API(base.Base):
'share_proto': share['share_proto'],
'share_id': share['id'],
'snapshot_id': share['snapshot_id'],
'snapshot_host': snapshot_host,
'share_type': share_type,
'share_group': share_group,
'availability_zone_id': availability_zone_id,


+ 118
- 7
manila/share/driver.py View File

@@ -24,6 +24,7 @@ import time
from oslo_config import cfg
from oslo_log import log

from manila.common import constants
from manila import exception
from manila.i18n import _
from manila import network
@@ -649,8 +650,46 @@ class ShareDriver(object):
raise NotImplementedError()

def create_share_from_snapshot(self, context, share, snapshot,
share_server=None):
"""Is called to create share from snapshot."""
share_server=None, parent_share=None):
"""Is called to create share from snapshot.

Creating a share from snapshot can take longer than a simple clone
operation if data copy is required from one host to another. For this
reason driver will be able complete this creation asynchronously, by
providing a 'creating_from_snapshot' status in the model update.

When answering asynchronously, drivers must implement the call
'get_share_status' in order to provide updates for shares with
'creating_from_snapshot' status.

It is expected that the driver returns a model update to the share
manager that contains: share status and a list of export_locations.
A list of 'export_locations' is mandatory only for share in 'available'
status.
The current supported status are 'available' and
'creating_from_snapshot'.

:param context: Current context
:param share: Share instance model with share data.
:param snapshot: Snapshot instance model .
:param share_server: Share server model or None.
:param parent_share: Share model from parent snapshot with share data
and share server model.

:returns: a dictionary of updates containing current share status and
its export_location (if available).

Example::

{
'status': 'available',
'export_locations': [{...}, {...}],
}

:raises: ShareBackendException.
A ShareBackendException in this method will set the instance to
'error' and the operation will end.
"""
raise NotImplementedError()

def create_snapshot(self, context, snapshot, share_server=None):
@@ -1311,6 +1350,15 @@ class ShareDriver(object):
share_server=None):
"""Create a share group from a share group snapshot.

When creating a share from snapshot operation takes longer than a
simple clone operation, drivers will be able to complete this creation
asynchronously, by providing a 'creating_from_snapshot' status in the
returned model update. The current supported status are 'available' and
'creating_from_snapshot'.

In order to provide updates for shares with 'creating_from_snapshot'
status, drivers must implement the call 'get_share_status'.

:param context:
:param share_group_dict: The share group details
EXAMPLE:
@@ -1372,12 +1420,27 @@ class ShareDriver(object):

share_update_list - a list of dictionaries containing dicts for
every share created in the share group. Any share dicts should at a
minimum contain the 'id' key and 'export_locations'.
minimum contain the 'id' key and, for synchronous creation, the
'export_locations'. For asynchronous share creation this dict
must also contain the key 'status' with the value set to
'creating_from_snapshot'. The current supported status are
'available' and 'creating_from_snapshot'.
Export locations should be in the same format as returned by
a share_create. This list may be empty or None. EXAMPLE:
.. code::

[{'id': 'uuid', 'export_locations': [{...}, {...}]}]
[
{
'id': 'uuid',
'export_locations': [{...}, {...}],
},
{
'id': 'uuid',
'export_locations': [],
'status': 'creating_from_snapshot',
},
]

"""
# Ensure that the share group snapshot has members
if not share_group_snapshot_dict['share_group_snapshot_members']:
@@ -1389,18 +1452,38 @@ class ShareDriver(object):

LOG.debug('Creating share group from group snapshot %s.',
share_group_snapshot_dict['id'])

for clone in clone_list:
kwargs = {}
share_update_info = {}
if self.driver_handles_share_servers:
kwargs['share_server'] = share_server
export_locations = (
model_update = (
self.create_share_from_snapshot(
context, clone['share'], clone['snapshot'], **kwargs))
share_update_list.append({
if isinstance(model_update, dict):
status = model_update.get('status')
# NOTE(dviroel): share status is mandatory when answering
# a model update. If not provided, won't be possible to
# determine if was successfully created.
if status is None:
msg = _("Driver didn't provide a share status.")
raise exception.InvalidShareInstance(reason=msg)
if status not in [constants.STATUS_AVAILABLE,
constants.STATUS_CREATING_FROM_SNAPSHOT]:
msg = _('Driver returned an invalid status: %s') % status
raise exception.InvalidShareInstance(reason=msg)
share_update_info.update({'status': status})
export_locations = model_update.get('export_locations', [])
else:
# NOTE(dviroel): the driver that doesn't implement the new
# model_update will return only the export locations
export_locations = model_update

share_update_info.update({
'id': clone['share']['id'],
'export_locations': export_locations,
})
share_update_list.append(share_update_info)
return None, share_update_list

def delete_share_group(self, context, share_group_dict, share_server=None):
@@ -2742,3 +2825,31 @@ class ShareDriver(object):
this share server.
"""
raise NotImplementedError()

def get_share_status(self, share, share_server=None):
"""Invoked periodically to get the current status of a given share.

Driver can use this method to update the status of a share that is
still pending from other operations.
This method is expected to be called in a periodic interval set by the
'periodic_interval' configuration in seconds.

:param share: share to get updated status from.
:param share_server: share server model or None.
:returns: a dictionary of updates with the current share status, that
must be 'available', 'creating_from_snapshot' or 'error', a list of
export locations, if available, and a progress field which
indicates the completion of the share creation operation.
EXAMPLE::

{
'status': 'available',
'export_locations': [{...}, {...}],
'progress': '50%'
}

:raises: ShareBackendException.
A ShareBackendException in this method will set the instance status
to 'error'.
"""
raise NotImplementedError()

+ 1
- 1
manila/share/drivers/dell_emc/driver.py View File

@@ -210,7 +210,7 @@ class EMCShareDriver(driver.ShareDriver):
return location

def create_share_from_snapshot(self, context, share, snapshot,
share_server=None):
share_server=None, parent_share=None):
"""Is called to create share from snapshot."""
location = self.plugin.create_share_from_snapshot(
context, share, snapshot, share_server)


+ 1
- 1
manila/share/drivers/dell_emc/plugins/powermax/connection.py View File

@@ -214,7 +214,7 @@ class PowerMaxStorageConnection(driver.StorageConnection):
'share_name': share_name})

def create_share_from_snapshot(self, context, share, snapshot,
share_server=None):
share_server=None, parent_share=None):
"""Create a share from a snapshot - clone a snapshot."""
share_name = share['id']



+ 1
- 1
manila/share/drivers/dell_emc/plugins/unity/connection.py View File

@@ -375,7 +375,7 @@ class UnityStorageConnection(driver.StorageConnection):
return locations

def create_share_from_snapshot(self, context, share, snapshot,
share_server=None):
share_server=None, parent_share=None):
"""Create a share from a snapshot - clone a snapshot."""
share_name = share['id']



+ 1
- 1
manila/share/drivers/dell_emc/plugins/vnx/connection.py View File

@@ -211,7 +211,7 @@ class VNXStorageConnection(driver.StorageConnection):
'share_name': share_name})

def create_share_from_snapshot(self, context, share, snapshot,
share_server=None):
share_server=None, parent_share=None):
"""Create a share from a snapshot - clone a snapshot."""
share_name = share['id']



+ 1
- 1
manila/share/drivers/generic.py View File

@@ -640,7 +640,7 @@ class GenericShareDriver(driver.ExecuteMixin, driver.ShareDriver):

@ensure_server
def create_share_from_snapshot(self, context, share, snapshot,
share_server=None):
share_server=None, parent_share=None):
"""Is called to create share from snapshot."""
return self._create_share(
context, share,


+ 1
- 1
manila/share/drivers/glusterfs/layout.py View File

@@ -235,7 +235,7 @@ class GlusterfsShareLayoutBase(object):

@abc.abstractmethod
def create_share_from_snapshot(self, context, share, snapshot,
share_server=None):
share_server=None, parent_share=None):
"""Is called to create share from snapshot."""

@abc.abstractmethod


+ 1
- 1
manila/share/drivers/glusterfs/layout_directory.py View File

@@ -188,7 +188,7 @@ class GlusterfsDirectoryMappedLayout(layout.GlusterfsShareLayoutBase):
pass

def create_share_from_snapshot(self, context, share, snapshot,
share_server=None):
share_server=None, parent_share=None):
raise NotImplementedError

def create_snapshot(self, context, snapshot, share_server=None):


+ 1
- 1
manila/share/drivers/glusterfs/layout_volume.py View File

@@ -463,7 +463,7 @@ class GlusterfsVolumeMappedLayout(layout.GlusterfsShareLayoutBase):
return backend_snapshot_name

def create_share_from_snapshot(self, context, share, snapshot,
share_server=None):
share_server=None, parent_share=None):
old_gmgr = self._share_manager(snapshot['share_instance'])

# Snapshot clone feature in GlusterFS server essential to support this


+ 1
- 1
manila/share/drivers/hdfs/hdfs_native.py View File

@@ -227,7 +227,7 @@ class HDFSNativeShareDriver(driver.ExecuteMixin, driver.ShareDriver):
return self._get_share_path(share)

def create_share_from_snapshot(self, context, share, snapshot,
share_server=None):
share_server=None, parent_share=None):
"""Creates a snapshot."""
self._create_share(share)
share_path = '/' + share['name']


+ 1
- 1
manila/share/drivers/hitachi/hnas/driver.py View File

@@ -448,7 +448,7 @@ class HitachiHNASDriver(driver.ShareDriver):
{'id': snapshot['id']})

def create_share_from_snapshot(self, context, share, snapshot,
share_server=None):
share_server=None, parent_share=None):
r"""Creates a new share from snapshot.

:param context: The `context.RequestContext` object for the request


+ 1
- 1
manila/share/drivers/hpe/hpe_3par_driver.py View File

@@ -501,7 +501,7 @@ class HPE3ParShareDriver(driver.ShareDriver):
return self._hpe3par.build_export_locations(protocol, ips, path)

def create_share_from_snapshot(self, context, share, snapshot,
share_server=None):
share_server=None, parent_share=None):
"""Is called to create share from snapshot."""

fpg, vfs, ips = self._get_pool_location(share, share_server)


+ 1
- 1
manila/share/drivers/huawei/base.py View File

@@ -66,7 +66,7 @@ class HuaweiBase(object):

@abc.abstractmethod
def create_share_from_snapshot(self, share, snapshot,
share_server=None):
share_server=None, parent_share=None):
"""Create share from snapshot."""

@abc.abstractmethod


+ 1
- 1
manila/share/drivers/huawei/huawei_nas.py View File

@@ -119,7 +119,7 @@ class HuaweiNasDriver(driver.ShareDriver):
self.plugin.extend_share(share, new_size, share_server)

def create_share_from_snapshot(self, context, share, snapshot,
share_server=None):
share_server=None, parent_share=None):
"""Create a share from snapshot."""
LOG.debug("Create a share from snapshot %s.", snapshot['snapshot_id'])
location = self.plugin.create_share_from_snapshot(share, snapshot)


+ 1
- 1
manila/share/drivers/huawei/v3/connection.py View File

@@ -383,7 +383,7 @@ class V3StorageConnection(driver.HuaweiBase):
return share

def create_share_from_snapshot(self, share, snapshot,
share_server=None):
share_server=None, parent_share=None):
"""Create a share from snapshot."""
share_fs_id = self.helper.get_fsid_by_name(snapshot['share_name'])
if not share_fs_id:


+ 1
- 1
manila/share/drivers/ibm/gpfs.py View File

@@ -493,7 +493,7 @@ class GPFSShareDriver(driver.ExecuteMixin, driver.GaneshaMixin,
return location

def create_share_from_snapshot(self, ctx, share, snapshot,
share_server=None):
share_server=None, parent_share=None):
"""Is called to create share from a snapshot."""
share_path = self._get_share_path(share)
self._create_share_from_snapshot(share, snapshot, share_path)


+ 1
- 1
manila/share/drivers/infinidat/infinibox.py View File

@@ -403,7 +403,7 @@ class InfiniboxShareDriver(driver.ShareDriver):

@infinisdk_to_manila_exceptions
def create_share_from_snapshot(self, context, share, snapshot,
share_server=None):
share_server=None, parent_share=None):
name = self._make_share_name(share)
infinidat_snapshot = self._get_infinidat_snapshot(snapshot)
infinidat_new_share = infinidat_snapshot.create_snapshot(


+ 1
- 1
manila/share/drivers/inspur/as13000/as13000_nas.py View File

@@ -343,7 +343,7 @@ class AS13000ShareDriver(driver.ShareDriver):

@inspur_driver_debug_trace
def create_share_from_snapshot(self, context, share, snapshot,
share_server=None):
share_server=None, parent_share=None):
"""Create a share from snapshot."""
pool, name, size, proto = self._get_share_instance_pnsp(share)



+ 1
- 1
manila/share/drivers/lvm.py View File

@@ -242,7 +242,7 @@ class LVMShareDriver(LVMMixin, driver.ShareDriver):
return location

def create_share_from_snapshot(self, context, share, snapshot,
share_server=None):
share_server=None, parent_share=None):
"""Is called to create share from snapshot."""
self._allocate_container(share)
snapshot_device_name = self._get_local_path(snapshot)


+ 1
- 1
manila/share/drivers/maprfs/maprfs_native.py View File

@@ -206,7 +206,7 @@ class MapRFSNativeShareDriver(driver.ExecuteMixin, driver.ShareDriver):
raise exception.ShareResourceNotFound(share_id=share['share_id'])

def create_share_from_snapshot(self, context, share, snapshot,
share_server=None):
share_server=None, parent_share=None):
"""Creates a share from snapshot."""
metadata = self.api.get_share_metadata(context,
{'id': share['share_id']})


+ 1
- 1
manila/share/drivers/netapp/dataontap/cluster_mode/lib_base.py View File

@@ -485,7 +485,7 @@ class NetAppCmodeFileStorageLibrary(object):

@na_utils.trace
def create_share_from_snapshot(self, context, share, snapshot,
share_server=None):
share_server=None, parent_share=None):
"""Creates new share from snapshot."""
vserver, vserver_client = self._get_vserver(share_server=share_server)
self._allocate_container_from_snapshot(


+ 1
- 1
manila/share/drivers/nexenta/ns4/nexenta_nas.py View File

@@ -77,7 +77,7 @@ class NexentaNasDriver(driver.ShareDriver):
return self.helper.create_filesystem(share)

def create_share_from_snapshot(self, context, share, snapshot,
share_server=None):
share_server=None, parent_share=None):
"""Is called to create share from snapshot."""
LOG.debug('Creating share from snapshot %s.', snapshot['name'])
return self.helper.create_share_from_snapshot(share, snapshot)


+ 1
- 1
manila/share/drivers/nexenta/ns5/nexenta_nas.py View File

@@ -189,7 +189,7 @@ class NexentaNasDriver(driver.ShareDriver):
return '%s:%s' % (self.nas_host, filesystem['mountPoint'])

def create_share_from_snapshot(self, context, share, snapshot,
share_server=None):
share_server=None, parent_share=None):
"""Is called to create share from snapshot."""
snapshot_path = self._get_snapshot_path(snapshot)
LOG.debug('Creating share from snapshot %s.', snapshot_path)


+ 1
- 1
manila/share/drivers/qnap/qnap.py View File

@@ -519,7 +519,7 @@ class QnapShareDriver(driver.ShareDriver):
retries=5)
@utils.synchronized('qnap-create_share_from_snapshot')
def create_share_from_snapshot(self, context, share, snapshot,
share_server=None):
share_server=None, parent_share=None):
"""Create a share from a snapshot."""
LOG.debug('Entering create_share_from_snapshot. The source '
'snapshot=%(snap)s. The created share=%(share)s',


+ 1
- 1
manila/share/drivers/tegile/tegile.py View File

@@ -281,7 +281,7 @@ class TegileShareDriver(driver.ShareDriver):

@debugger
def create_share_from_snapshot(self, context, share, snapshot,
share_server=None):
share_server=None, parent_share=None):
"""Create a share from a snapshot - clone a snapshot."""
pool, project, share_name = self._get_pool_project_share_name(share)



+ 1
- 1
manila/share/drivers/veritas/veritas_isa.py View File

@@ -524,7 +524,7 @@ class ACCESSShareDriver(driver.ExecuteMixin, driver.ShareDriver):
json.dumps(data2), 'DELETE')

def create_share_from_snapshot(self, ctx, share, snapshot,
share_server=None):
share_server=None, parent_share=None):
"""create share from a snapshot."""
sharename = snapshot['share_name']
va_sharename = self._get_va_share_name(sharename)


+ 1
- 1
manila/share/drivers/zfsonlinux/driver.py View File

@@ -580,7 +580,7 @@ class ZFSonLinuxShareDriver(zfs_utils.ExecuteMixin, driver.ShareDriver):

@ensure_share_server_not_provided
def create_share_from_snapshot(self, context, share, snapshot,
share_server=None):
share_server=None, parent_share=None):
"""Is called to create a share from snapshot."""
dataset_name = self._get_dataset_name(share)
ssh_cmd = '%(username)s@%(host)s' % {


+ 1
- 1
manila/share/drivers/zfssa/zfssashare.py View File

@@ -242,7 +242,7 @@ class ZFSSAShareDriver(driver.ShareDriver):
snapshot['id'])

def create_share_from_snapshot(self, context, share, snapshot,
share_server=None):
share_server=None, parent_share=None):
"""Create a share from a snapshot - clone a snapshot."""
lcfg = self.configuration
LOG.debug("ZFSSAShareDriver.create_share_from_snapshot: clone=%s",


+ 227
- 62
manila/share/manager.py View File

@@ -403,7 +403,7 @@ class ShareManager(manager.SchedulerDependentManager):
self._ensure_share_instance_has_pool(ctxt, share_instance)
share_instance = self.db.share_instance_get(
ctxt, share_instance['id'], with_share_data=True)
share_instance_dict = self._get_share_replica_dict(
share_instance_dict = self._get_share_instance_dict(
ctxt, share_instance)
update_share_instances.append(share_instance_dict)

@@ -541,6 +541,7 @@ class ShareManager(manager.SchedulerDependentManager):
self.db.share_instance_update(context, share_instance['id'],
{'status': constants.STATUS_ERROR})
parent_share_server = None
parent_share_same_dest = False
if snapshot:
parent_share_server_id = (
snapshot['share']['instance']['share_server_id'])
@@ -562,6 +563,8 @@ class ShareManager(manager.SchedulerDependentManager):
raise exception.InvalidShareServer(
share_server_id=parent_share_server
)
parent_share_same_dest = (snapshot['share']['instance']['host']
== share_instance['host'])
share_network_subnet_id = None
if share_network_id:
share_network_subnet = (
@@ -578,7 +581,7 @@ class ShareManager(manager.SchedulerDependentManager):
parent_share_server['share_network_subnet_id'])

def get_available_share_servers():
if parent_share_server:
if parent_share_server and parent_share_same_dest:
return [parent_share_server]
else:
return (
@@ -929,9 +932,9 @@ class ShareManager(manager.SchedulerDependentManager):
"All snapshots must have '%(status)s' status to be "
"migrated by the driver along with share "
"%(share)s.") % {
'share': share_ref['id'],
'status': constants.STATUS_AVAILABLE
}
'share': share_ref['id'],
'status': constants.STATUS_AVAILABLE
}
raise exception.ShareMigrationFailed(reason=msg)

src_snap_instances = [x.instance for x in src_snapshots]
@@ -1376,7 +1379,7 @@ class ShareManager(manager.SchedulerDependentManager):

self.db.share_instance_update(
context, dest_share_instance['id'],
{'status': constants.STATUS_AVAILABLE})
{'status': constants.STATUS_AVAILABLE, 'progress': '100%'})

self.db.share_instance_update(context, src_share_instance['id'],
{'status': constants.STATUS_INACTIVE})
@@ -1557,7 +1560,7 @@ class ShareManager(manager.SchedulerDependentManager):

self.db.share_instance_update(
context, dest_share_instance['id'],
{'status': constants.STATUS_AVAILABLE})
{'status': constants.STATUS_AVAILABLE, 'progress': '100%'})

self.db.share_instance_update(context, src_instance_id,
{'status': constants.STATUS_INACTIVE})
@@ -1753,17 +1756,44 @@ class ShareManager(manager.SchedulerDependentManager):
else:
share_server = None

status = constants.STATUS_AVAILABLE
try:
if snapshot_ref:
export_locations = self.driver.create_share_from_snapshot(
# NOTE(dviroel): we need to provide the parent share info to
# assist drivers that create shares from snapshot in different
# pools or back ends
parent_share_instance = self.db.share_instance_get(
context, snapshot_ref['share']['instance']['id'],
with_share_data=True)
parent_share_dict = self._get_share_instance_dict(
context, parent_share_instance)
model_update = self.driver.create_share_from_snapshot(
context, share_instance, snapshot_ref.instance,
share_server=share_server)
share_server=share_server, parent_share=parent_share_dict)
if isinstance(model_update, list):
# NOTE(dviroel): the driver that doesn't implement the new
# model_update will return only the export locations
export_locations = model_update
else:
# NOTE(dviroel): share status is mandatory when answering
# a model update. If not provided, won't be possible to
# determine if was successfully created.
status = model_update.get('status')
if status is None:
msg = _("Driver didn't provide a share status.")
raise exception.InvalidShareInstance(reason=msg)
export_locations = model_update.get('export_locations')
else:
export_locations = self.driver.create_share(
context, share_instance, share_server=share_server)
if status not in [constants.STATUS_AVAILABLE,
constants.STATUS_CREATING_FROM_SNAPSHOT]:
msg = _('Driver returned an invalid status: %s') % status
raise exception.InvalidShareInstance(reason=msg)

self.db.share_export_locations_update(
context, share_instance['id'], export_locations)
if export_locations:
self.db.share_export_locations_update(
context, share_instance['id'], export_locations)

except Exception as e:
with excutils.save_and_reraise_exception():
@@ -1801,9 +1831,11 @@ class ShareManager(manager.SchedulerDependentManager):
else:
LOG.info("Share instance %s created successfully.",
share_instance_id)
progress = '100%' if status == constants.STATUS_AVAILABLE else '0%'
updates = {
'status': constants.STATUS_AVAILABLE,
'status': status,
'launched_at': timeutils.utcnow(),
'progress': progress
}
if share.get('replication_type'):
updates['replica_state'] = constants.REPLICA_STATE_ACTIVE
@@ -1957,9 +1989,9 @@ class ShareManager(manager.SchedulerDependentManager):
with_share_data=True, with_share_server=True)
)

replica_list = [self._get_share_replica_dict(context, r)
replica_list = [self._get_share_instance_dict(context, r)
for r in replica_list]
share_replica = self._get_share_replica_dict(context, share_replica)
share_replica = self._get_share_instance_dict(context, share_replica)

try:
replica_ref = self.driver.create_replica(
@@ -1999,7 +2031,8 @@ class ShareManager(manager.SchedulerDependentManager):
self.db.share_replica_update(
context, share_replica['id'],
{'status': constants.STATUS_AVAILABLE,
'replica_state': replica_ref.get('replica_state')})
'replica_state': replica_ref.get('replica_state'),
'progress': '100%'})

if replica_ref.get('access_rules_status'):
self._update_share_replica_access_rules_state(
@@ -2037,12 +2070,12 @@ class ShareManager(manager.SchedulerDependentManager):
with_share_data=True, with_share_server=True)
)

replica_list = [self._get_share_replica_dict(context, r)
replica_list = [self._get_share_instance_dict(context, r)
for r in replica_list]
replica_snapshots = [self._get_snapshot_instance_dict(context, s)
for s in replica_snapshots]
share_server = self._get_share_server(context, share_replica)
share_replica = self._get_share_replica_dict(context, share_replica)
share_replica = self._get_share_instance_dict(context, share_replica)

try:
self.access_helper.update_access_rules(
@@ -2157,9 +2190,9 @@ class ShareManager(manager.SchedulerDependentManager):
access_rules = self.db.share_access_get_all_for_share(
context, share_replica['share_id'])

replica_list = [self._get_share_replica_dict(context, r)
replica_list = [self._get_share_instance_dict(context, r)
for r in replica_list]
share_replica = self._get_share_replica_dict(context, share_replica)
share_replica = self._get_share_instance_dict(context, share_replica)

try:
updated_replica_list = (
@@ -2344,10 +2377,10 @@ class ShareManager(manager.SchedulerDependentManager):
for x in share_snapshots
if x['aggregate_status'] == constants.STATUS_AVAILABLE]

replica_list = [self._get_share_replica_dict(context, r)
replica_list = [self._get_share_instance_dict(context, r)
for r in replica_list]

share_replica = self._get_share_replica_dict(context, share_replica)
share_replica = self._get_share_instance_dict(context, share_replica)

try:
replica_state = self.driver.update_replica_state(
@@ -3258,7 +3291,7 @@ class ShareManager(manager.SchedulerDependentManager):

# Make primitives to pass the information to the driver.

replica_list = [self._get_share_replica_dict(context, r)
replica_list = [self._get_share_instance_dict(context, r)
for r in replica_list]
replica_snapshots = [self._get_snapshot_instance_dict(context, s)
for s in replica_snapshots]
@@ -3317,9 +3350,9 @@ class ShareManager(manager.SchedulerDependentManager):
context, snapshot_instance_filters))[0]

# Make primitives to pass the information to the driver
replica_list = [self._get_share_replica_dict(context, replica)
replica_list = [self._get_share_instance_dict(context, replica)
for replica in replica_list]
active_replica = self._get_share_replica_dict(context, active_replica)
active_replica = self._get_share_instance_dict(context, active_replica)
replica_snapshots = [self._get_snapshot_instance_dict(context, s)
for s in replica_snapshots]
active_replica_snapshot = self._get_snapshot_instance_dict(
@@ -3391,7 +3424,7 @@ class ShareManager(manager.SchedulerDependentManager):
with_share_server=True)
)

replica_list = [self._get_share_replica_dict(context, r)
replica_list = [self._get_share_instance_dict(context, r)
for r in replica_list]
replica_snapshots = [self._get_snapshot_instance_dict(context, s)
for s in replica_snapshots]
@@ -3521,7 +3554,7 @@ class ShareManager(manager.SchedulerDependentManager):
with_share_data=True, with_share_server=True)
)

replica_list = [self._get_share_replica_dict(context, r)
replica_list = [self._get_share_instance_dict(context, r)
for r in replica_list]
replica_snapshots = replica_snapshots or []

@@ -3531,7 +3564,7 @@ class ShareManager(manager.SchedulerDependentManager):
for s in replica_snapshots]
replica_snapshot = self._get_snapshot_instance_dict(
context, replica_snapshot)
share_replica = self._get_share_replica_dict(context, share_replica)
share_replica = self._get_share_instance_dict(context, share_replica)
share_server = share_replica['share_server']
snapshot_update = None

@@ -4075,6 +4108,20 @@ class ShareManager(manager.SchedulerDependentManager):
if share_update_list:
for share in share_update_list:
values = copy.deepcopy(share)
# NOTE(dviroel): To keep backward compatibility we can't
# keep 'status' as a mandatory parameter. We'll set its
# value to 'available' as default.
i_status = values.get('status', constants.STATUS_AVAILABLE)
if i_status not in [
constants.STATUS_AVAILABLE,
constants.STATUS_CREATING_FROM_SNAPSHOT]:
msg = _(
'Driver returned an invalid status %s') % i_status
raise exception.InvalidShareInstance(reason=msg)
values['status'] = i_status
values['progress'] = (
'100%' if i_status == constants.STATUS_AVAILABLE
else '0%')
values.pop('id')
export_locations = values.pop('export_locations')
self.db.share_instance_update(context, share['id'], values)
@@ -4308,45 +4355,83 @@ class ShareManager(manager.SchedulerDependentManager):
LOG.info("Share group snapshot %s: deleted successfully",
share_group_snapshot_id)

def _get_share_replica_dict(self, context, share_replica):
def _get_share_server_dict(self, context, share_server):
share_server_ref = {
'id': share_server.get('id'),
'project_id': share_server.get('project_id'),
'updated_at': share_server.get('updated_at'),
'status': share_server.get('status'),
'host': share_server.get('host'),
'share_network_name': share_server.get('share_network_name'),
'share_network_id': share_server.get('share_network_id'),
'created_at': share_server.get('created_at'),
'backend_details': share_server.get('backend_details'),
'share_network_subnet_id':
share_server.get('share_network_subnet_id', None),
'is_auto_deletable': share_server.get('is_auto_deletable', None),
'identifier': share_server.get('identifier', None),
'network_allocations': share_server.get('network_allocations',
None)
}
return share_server_ref

def _get_export_location_dict(self, context, export_location):
export_location_ref = {
'id': export_location.get('uuid'),
'path': export_location.get('path'),
'created_at': export_location.get('created_at'),
'updated_at': export_location.get('updated_at'),
'share_instance_id':
export_location.get('share_instance_id', None),
'is_admin_only': export_location.get('is_admin_only', None),
}
return export_location_ref

def _get_share_instance_dict(self, context, share_instance):
# TODO(gouthamr): remove method when the db layer returns primitives
share_replica_ref = {
'id': share_replica.get('id'),
'name': share_replica.get('name'),
'share_id': share_replica.get('share_id'),
'host': share_replica.get('host'),
'status': share_replica.get('status'),
'replica_state': share_replica.get('replica_state'),
'availability_zone_id': share_replica.get('availability_zone_id'),
'export_locations': share_replica.get('export_locations') or [],
'share_network_id': share_replica.get('share_network_id'),
'share_server_id': share_replica.get('share_server_id'),
'deleted': share_replica.get('deleted'),
'terminated_at': share_replica.get('terminated_at'),
'launched_at': share_replica.get('launched_at'),
'scheduled_at': share_replica.get('scheduled_at'),
'updated_at': share_replica.get('updated_at'),
'deleted_at': share_replica.get('deleted_at'),
'created_at': share_replica.get('created_at'),
'share_server': self._get_share_server(context, share_replica),
'access_rules_status': share_replica.get('access_rules_status'),
share_instance_ref = {
'id': share_instance.get('id'),
'name': share_instance.get('name'),
'share_id': share_instance.get('share_id'),
'host': share_instance.get('host'),
'status': share_instance.get('status'),
'replica_state': share_instance.get('replica_state'),
'availability_zone_id': share_instance.get('availability_zone_id'),
'share_network_id': share_instance.get('share_network_id'),
'share_server_id': share_instance.get('share_server_id'),
'deleted': share_instance.get('deleted'),
'terminated_at': share_instance.get('terminated_at'),
'launched_at': share_instance.get('launched_at'),
'scheduled_at': share_instance.get('scheduled_at'),
'updated_at': share_instance.get('updated_at'),
'deleted_at': share_instance.get('deleted_at'),
'created_at': share_instance.get('created_at'),
'share_server': self._get_share_server(context, share_instance),
'access_rules_status': share_instance.get('access_rules_status'),
# Share details
'user_id': share_replica.get('user_id'),
'project_id': share_replica.get('project_id'),
'size': share_replica.get('size'),
'display_name': share_replica.get('display_name'),
'display_description': share_replica.get('display_description'),
'snapshot_id': share_replica.get('snapshot_id'),
'share_proto': share_replica.get('share_proto'),
'share_type_id': share_replica.get('share_type_id'),
'is_public': share_replica.get('is_public'),
'share_group_id': share_replica.get('share_group_id'),
'source_share_group_snapshot_member_id': share_replica.get(
'user_id': share_instance.get('user_id'),
'project_id': share_instance.get('project_id'),
'size': share_instance.get('size'),
'display_name': share_instance.get('display_name'),
'display_description': share_instance.get('display_description'),
'snapshot_id': share_instance.get('snapshot_id'),
'share_proto': share_instance.get('share_proto'),
'share_type_id': share_instance.get('share_type_id'),
'is_public': share_instance.get('is_public'),
'share_group_id': share_instance.get('share_group_id'),
'source_share_group_snapshot_member_id': share_instance.get(
'source_share_group_snapshot_member_id'),
'availability_zone': share_replica.get('availability_zone'),
'availability_zone': share_instance.get('availability_zone'),
}

return share_replica_ref
if share_instance_ref['share_server']:
share_instance_ref['share_server'] = self._get_share_server_dict(
context, share_instance_ref['share_server']
)
share_instance_ref['export_locations'] = [
self._get_export_location_dict(context, el) for
el in share_instance.get('export_locations') or []
]
return share_instance_ref

def _get_snapshot_instance_dict(self, context, snapshot_instance,
snapshot=None):
@@ -4415,3 +4500,83 @@ class ShareManager(manager.SchedulerDependentManager):
context, share, share_instance, "consumed.size",
extra_usage_info={'used_size': si['used_size'],
'gathered_at': si['gathered_at']})

@periodic_task.periodic_task(spacing=CONF.periodic_interval)
@utils.require_driver_initialized
def periodic_share_status_update(self, context):
"""Invokes share driver to update shares status."""
LOG.debug("Updating status of share instances.")
share_instances = self.db.share_instances_get_all_by_host(
context, self.host, with_share_data=True,
status=constants.STATUS_CREATING_FROM_SNAPSHOT)

for si in share_instances:
si_dict = self._get_share_instance_dict(context, si)
self._update_share_status(context, si_dict)

def _update_share_status(self, context, share_instance):
share_server = self._get_share_server(context, share_instance)
if share_server is not None:
share_server = self._get_share_server_dict(context,
share_server)
try:
data_updates = self.driver.get_share_status(share_instance,
share_server)
except Exception:
LOG.exception(
("Unexpected driver error occurred while updating status for "
"share instance %(id)s that belongs to share '%(share_id)s'"),
{'id': share_instance['id'],
'share_id': share_instance['share_id']}
)
data_updates = {
'status': constants.STATUS_ERROR
}

status = data_updates.get('status')
if status == constants.STATUS_ERROR:
msg = ("Status of share instance %(id)s that belongs to share "
"%(share_id)s was updated to '%(status)s'."
% {'id': share_instance['id'],
'share_id': share_instance['share_id'],
'status': status})
LOG.error(msg)
self.db.share_instance_update(
context, share_instance['id'],
{'status': constants.STATUS_ERROR,
'progress': None})
self.message_api.create(
context,
message_field.Action.UPDATE,
share_instance['project_id'],
resource_type=message_field.Resource.SHARE,
resource_id=share_instance['share_id'],
detail=message_field.Detail.DRIVER_FAILED_CREATING_FROM_SNAP)
return

export_locations = data_updates.get('export_locations')
progress = data_updates.get('progress')

statuses_requiring_update = [
constants.STATUS_AVAILABLE,
constants.STATUS_CREATING_FROM_SNAPSHOT]

if status in statuses_requiring_update:
si_updates = {
'status': status,
}
progress = ('100%' if status == constants.STATUS_AVAILABLE
else progress)
if progress is not None:
si_updates.update({'progress': progress})
self.db.share_instance_update(
context, share_instance['id'], si_updates)
msg = ("Status of share instance %(id)s that belongs to share "
"%(share_id)s was updated to '%(status)s'."
% {'id': share_instance['id'],
'share_id': share_instance['share_id'],
'status': status})
LOG.debug(msg)
if export_locations:
self.db.share_export_locations_update(
context, share_instance['id'], export_locations)

+ 9
- 2
manila/tests/api/v2/test_share_instances.py View File

@@ -122,13 +122,20 @@ class ShareInstancesAPITest(test.TestCase):
self.mock_policy_check.assert_called_once_with(
req_context, self.resource_name, 'index')

def test_show(self):
@ddt.data('2.3', '2.54')
def test_show(self, version):
test_instance = db_utils.create_share(size=1).instance
id = test_instance['id']

actual_result = self.controller.show(self._get_request('fake'), id)
actual_result = self.controller.show(
self._get_request('fake', version=version), id)

self.assertEqual(id, actual_result['share_instance']['id'])
if (api_version_request.APIVersionRequest(version) >=
api_version_request.APIVersionRequest("2.54")):
self.assertIn("progress", actual_result['share_instance'])
else:
self.assertNotIn("progress", actual_result['share_instance'])
self.mock_policy_check.assert_called_once_with(
self.admin_context, self.resource_name, 'show')



+ 26
- 1
manila/tests/api/views/test_shares.py View File

@@ -13,9 +13,11 @@
# License for the specific language governing permissions and limitations
# under the License.

import copy
import ddt

from manila.api.views import shares
from manila.common import constants
from manila import test
from manila.tests.api.contrib import stubs
from manila.tests.api import fakes
@@ -44,6 +46,7 @@ class ViewBuilderTestCase(test.TestCase):
'name': 'fake_share_type_name',
},
'share_type_id': 'fake_share_type_id',
'progress': '100%',
},
'replication_type': 'fake_replication_type',
'has_replicas': False,
@@ -51,13 +54,14 @@ class ViewBuilderTestCase(test.TestCase):
'snapshot_support': True,
'create_share_from_snapshot_support': True,
'revert_to_snapshot_support': True,
'progress': '100%',
}
return stubs.stub_share('fake_id', **fake_share)

def test__collection_name(self):
self.assertEqual('shares', self.builder._collection_name)

@ddt.data('2.6', '2.9', '2.10', '2.11', '2.16', '2.24', '2.27')
@ddt.data('2.6', '2.9', '2.10', '2.11', '2.16', '2.24', '2.27', '2.54')
def test_detail(self, microversion):
req = fakes.HTTPRequest.blank('/shares', version=microversion)

@@ -85,5 +89,26 @@ class ViewBuilderTestCase(test.TestCase):
expected['create_share_from_snapshot_support'] = True
if self.is_microversion_ge(microversion, '2.27'):
expected['revert_to_snapshot_support'] = True
if self.is_microversion_ge(microversion, '2.54'):
expected['progress'] = '100%'

self.assertSubDictMatch(expected, result['share'])

@ddt.data('1.0', '2.51', '2.54')
def test_detail_translate_creating_from_snapshot_status(self,
microversion):
req = fakes.HTTPRequest.blank('/shares', version=microversion)
new_share_status = constants.STATUS_CREATING_FROM_SNAPSHOT