Blackify openstack.block_storage
Black used with the '-l 79 -S' flags. A future change will ignore this commit in git-blame history by adding a 'git-blame-ignore-revs' file. Change-Id: I502d2788eb75e674e8b399034513996c81407216 Signed-off-by: Stephen Finucane <stephenfin@redhat.com>
This commit is contained in:
parent
542ddaa1ad
commit
34da09f312
@ -16,10 +16,16 @@ from openstack import proxy
|
||||
|
||||
|
||||
class BaseBlockStorageProxy(proxy.Proxy, metaclass=abc.ABCMeta):
|
||||
|
||||
def create_image(
|
||||
self, name, volume, allow_duplicates,
|
||||
container_format, disk_format, wait, timeout):
|
||||
self,
|
||||
name,
|
||||
volume,
|
||||
allow_duplicates,
|
||||
container_format,
|
||||
disk_format,
|
||||
wait,
|
||||
timeout,
|
||||
):
|
||||
if not disk_format:
|
||||
disk_format = self._connection.config.config['image_format']
|
||||
if not container_format:
|
||||
@ -33,7 +39,8 @@ class BaseBlockStorageProxy(proxy.Proxy, metaclass=abc.ABCMeta):
|
||||
if not volume_obj:
|
||||
raise exceptions.SDKException(
|
||||
"Volume {volume} given to create_image could"
|
||||
" not be found".format(volume=volume))
|
||||
" not be found".format(volume=volume)
|
||||
)
|
||||
volume_id = volume_obj['id']
|
||||
data = self.post(
|
||||
'/volumes/{id}/action'.format(id=volume_id),
|
||||
@ -42,7 +49,11 @@ class BaseBlockStorageProxy(proxy.Proxy, metaclass=abc.ABCMeta):
|
||||
'force': allow_duplicates,
|
||||
'image_name': name,
|
||||
'container_format': container_format,
|
||||
'disk_format': disk_format}})
|
||||
'disk_format': disk_format,
|
||||
}
|
||||
},
|
||||
)
|
||||
response = self._connection._get_and_munchify(
|
||||
'os-volume_upload_image', data)
|
||||
'os-volume_upload_image', data
|
||||
)
|
||||
return self._connection.image._existing_image(id=response['image_id'])
|
||||
|
@ -132,8 +132,9 @@ class Proxy(_base_proxy.BaseBlockStorageProxy):
|
||||
|
||||
:returns: ``None``
|
||||
"""
|
||||
self._delete(_snapshot.Snapshot, snapshot,
|
||||
ignore_missing=ignore_missing)
|
||||
self._delete(
|
||||
_snapshot.Snapshot, snapshot, ignore_missing=ignore_missing
|
||||
)
|
||||
|
||||
# ====== SNAPSHOT ACTIONS ======
|
||||
def reset_snapshot(self, snapshot, status):
|
||||
@ -398,9 +399,7 @@ class Proxy(_base_proxy.BaseBlockStorageProxy):
|
||||
volume = self._get_resource(_volume.Volume, volume)
|
||||
volume.reset_status(self, status, attach_status, migration_status)
|
||||
|
||||
def attach_volume(
|
||||
self, volume, mountpoint, instance=None, host_name=None
|
||||
):
|
||||
def attach_volume(self, volume, mountpoint, instance=None, host_name=None):
|
||||
"""Attaches a volume to a server.
|
||||
|
||||
:param volume: The value can be either the ID of a volume or a
|
||||
@ -414,9 +413,7 @@ class Proxy(_base_proxy.BaseBlockStorageProxy):
|
||||
volume = self._get_resource(_volume.Volume, volume)
|
||||
volume.attach(self, mountpoint, instance, host_name)
|
||||
|
||||
def detach_volume(
|
||||
self, volume, attachment, force=False, connector=None
|
||||
):
|
||||
def detach_volume(self, volume, attachment, force=False, connector=None):
|
||||
"""Detaches a volume from a server.
|
||||
|
||||
:param volume: The value can be either the ID of a volume or a
|
||||
@ -444,8 +441,7 @@ class Proxy(_base_proxy.BaseBlockStorageProxy):
|
||||
volume.unmanage(self)
|
||||
|
||||
def migrate_volume(
|
||||
self, volume, host=None, force_host_copy=False,
|
||||
lock_volume=False
|
||||
self, volume, host=None, force_host_copy=False, lock_volume=False
|
||||
):
|
||||
"""Migrates a volume to the specified host.
|
||||
|
||||
@ -466,9 +462,7 @@ class Proxy(_base_proxy.BaseBlockStorageProxy):
|
||||
volume = self._get_resource(_volume.Volume, volume)
|
||||
volume.migrate(self, host, force_host_copy, lock_volume)
|
||||
|
||||
def complete_volume_migration(
|
||||
self, volume, new_volume, error=False
|
||||
):
|
||||
def complete_volume_migration(self, volume, new_volume, error=False):
|
||||
"""Complete the migration of a volume.
|
||||
|
||||
:param volume: The value can be either the ID of a volume or a
|
||||
@ -584,8 +578,7 @@ class Proxy(_base_proxy.BaseBlockStorageProxy):
|
||||
:returns: ``None``
|
||||
"""
|
||||
if not force:
|
||||
self._delete(
|
||||
_backup.Backup, backup, ignore_missing=ignore_missing)
|
||||
self._delete(_backup.Backup, backup, ignore_missing=ignore_missing)
|
||||
else:
|
||||
backup = self._get_resource(_backup.Backup, backup)
|
||||
backup.force_delete(self)
|
||||
@ -617,8 +610,9 @@ class Proxy(_base_proxy.BaseBlockStorageProxy):
|
||||
backup = self._get_resource(_backup.Backup, backup)
|
||||
backup.reset(self, status)
|
||||
|
||||
def wait_for_status(self, res, status='available', failures=None,
|
||||
interval=2, wait=120):
|
||||
def wait_for_status(
|
||||
self, res, status='available', failures=None, interval=2, wait=120
|
||||
):
|
||||
"""Wait for a resource to be in a particular status.
|
||||
|
||||
:param res: The resource to wait on to reach the specified status.
|
||||
@ -641,7 +635,8 @@ class Proxy(_base_proxy.BaseBlockStorageProxy):
|
||||
"""
|
||||
failures = ['error'] if failures is None else failures
|
||||
return resource.wait_for_status(
|
||||
self, res, status, failures, interval, wait)
|
||||
self, res, status, failures, interval, wait
|
||||
)
|
||||
|
||||
def wait_for_delete(self, res, interval=2, wait=120):
|
||||
"""Wait for a resource to be deleted.
|
||||
@ -674,9 +669,9 @@ class Proxy(_base_proxy.BaseBlockStorageProxy):
|
||||
"""
|
||||
project = self._get_resource(_project.Project, project)
|
||||
res = self._get_resource(
|
||||
_quota_set.QuotaSet, None, project_id=project.id)
|
||||
return res.fetch(
|
||||
self, usage=usage, **query)
|
||||
_quota_set.QuotaSet, None, project_id=project.id
|
||||
)
|
||||
return res.fetch(self, usage=usage, **query)
|
||||
|
||||
def get_quota_set_defaults(self, project):
|
||||
"""Show QuotaSet defaults for the project
|
||||
@ -691,9 +686,9 @@ class Proxy(_base_proxy.BaseBlockStorageProxy):
|
||||
"""
|
||||
project = self._get_resource(_project.Project, project)
|
||||
res = self._get_resource(
|
||||
_quota_set.QuotaSet, None, project_id=project.id)
|
||||
return res.fetch(
|
||||
self, base_path='/os-quota-sets/defaults')
|
||||
_quota_set.QuotaSet, None, project_id=project.id
|
||||
)
|
||||
return res.fetch(self, base_path='/os-quota-sets/defaults')
|
||||
|
||||
def revert_quota_set(self, project, **query):
|
||||
"""Reset Quota for the project/user.
|
||||
@ -707,7 +702,8 @@ class Proxy(_base_proxy.BaseBlockStorageProxy):
|
||||
"""
|
||||
project = self._get_resource(_project.Project, project)
|
||||
res = self._get_resource(
|
||||
_quota_set.QuotaSet, None, project_id=project.id)
|
||||
_quota_set.QuotaSet, None, project_id=project.id
|
||||
)
|
||||
|
||||
if not query:
|
||||
query = {}
|
||||
|
@ -16,14 +16,22 @@ from openstack import utils
|
||||
|
||||
class Backup(resource.Resource):
|
||||
"""Volume Backup"""
|
||||
|
||||
resource_key = "backup"
|
||||
resources_key = "backups"
|
||||
base_path = "/backups"
|
||||
|
||||
_query_mapping = resource.QueryParameters(
|
||||
'all_tenants', 'limit', 'marker', 'project_id',
|
||||
'name', 'status', 'volume_id',
|
||||
'sort_key', 'sort_dir')
|
||||
'all_tenants',
|
||||
'limit',
|
||||
'marker',
|
||||
'project_id',
|
||||
'name',
|
||||
'status',
|
||||
'volume_id',
|
||||
'sort_key',
|
||||
'sort_dir',
|
||||
)
|
||||
|
||||
# capabilities
|
||||
allow_fetch = True
|
||||
@ -97,35 +105,48 @@ class Backup(resource.Resource):
|
||||
|
||||
session = self._get_session(session)
|
||||
microversion = self._get_microversion(session, action='create')
|
||||
requires_id = (self.create_requires_id
|
||||
if self.create_requires_id is not None
|
||||
else self.create_method == 'PUT')
|
||||
requires_id = (
|
||||
self.create_requires_id
|
||||
if self.create_requires_id is not None
|
||||
else self.create_method == 'PUT'
|
||||
)
|
||||
|
||||
if self.create_exclude_id_from_body:
|
||||
self._body._dirty.discard("id")
|
||||
|
||||
if self.create_method == 'POST':
|
||||
request = self._prepare_request(requires_id=requires_id,
|
||||
prepend_key=prepend_key,
|
||||
base_path=base_path)
|
||||
request = self._prepare_request(
|
||||
requires_id=requires_id,
|
||||
prepend_key=prepend_key,
|
||||
base_path=base_path,
|
||||
)
|
||||
# NOTE(gtema) this is a funny example of when attribute
|
||||
# is called "incremental" on create, "is_incremental" on get
|
||||
# and use of "alias" or "aka" is not working for such conflict,
|
||||
# since our preferred attr name is exactly "is_incremental"
|
||||
body = request.body
|
||||
if 'is_incremental' in body['backup']:
|
||||
body['backup']['incremental'] = \
|
||||
body['backup'].pop('is_incremental')
|
||||
response = session.post(request.url,
|
||||
json=request.body, headers=request.headers,
|
||||
microversion=microversion, params=params)
|
||||
body['backup']['incremental'] = body['backup'].pop(
|
||||
'is_incremental'
|
||||
)
|
||||
response = session.post(
|
||||
request.url,
|
||||
json=request.body,
|
||||
headers=request.headers,
|
||||
microversion=microversion,
|
||||
params=params,
|
||||
)
|
||||
else:
|
||||
# Just for safety of the implementation (since PUT removed)
|
||||
raise exceptions.ResourceFailure(
|
||||
"Invalid create method: %s" % self.create_method)
|
||||
"Invalid create method: %s" % self.create_method
|
||||
)
|
||||
|
||||
has_body = (self.has_body if self.create_returns_body is None
|
||||
else self.create_returns_body)
|
||||
has_body = (
|
||||
self.has_body
|
||||
if self.create_returns_body is None
|
||||
else self.create_returns_body
|
||||
)
|
||||
self.microversion = microversion
|
||||
self._translate_response(response, has_body=has_body)
|
||||
# direct comparision to False since we need to rule out None
|
||||
@ -137,8 +158,9 @@ class Backup(resource.Resource):
|
||||
def _action(self, session, body, microversion=None):
|
||||
"""Preform backup actions given the message body."""
|
||||
url = utils.urljoin(self.base_path, self.id, 'action')
|
||||
resp = session.post(url, json=body,
|
||||
microversion=self._max_microversion)
|
||||
resp = session.post(
|
||||
url, json=body, microversion=self._max_microversion
|
||||
)
|
||||
exceptions.raise_from_response(resp)
|
||||
return resp
|
||||
|
||||
@ -157,22 +179,20 @@ class Backup(resource.Resource):
|
||||
if name:
|
||||
body['restore']['name'] = name
|
||||
if not (volume_id or name):
|
||||
raise exceptions.SDKException('Either of `name` or `volume_id`'
|
||||
' must be specified.')
|
||||
response = session.post(url,
|
||||
json=body)
|
||||
raise exceptions.SDKException(
|
||||
'Either of `name` or `volume_id`' ' must be specified.'
|
||||
)
|
||||
response = session.post(url, json=body)
|
||||
self._translate_response(response, has_body=False)
|
||||
return self
|
||||
|
||||
def force_delete(self, session):
|
||||
"""Force backup deletion
|
||||
"""
|
||||
"""Force backup deletion"""
|
||||
body = {'os-force_delete': {}}
|
||||
self._action(session, body)
|
||||
|
||||
def reset(self, session, status):
|
||||
"""Reset the status of the backup
|
||||
"""
|
||||
"""Reset the status of the backup"""
|
||||
body = {'os-reset_status': {'status': status}}
|
||||
self._action(session, body)
|
||||
|
||||
|
@ -23,7 +23,8 @@ class Snapshot(resource.Resource, metadata.MetadataMixin):
|
||||
base_path = "/snapshots"
|
||||
|
||||
_query_mapping = resource.QueryParameters(
|
||||
'name', 'status', 'volume_id', all_projects='all_tenants')
|
||||
'name', 'status', 'volume_id', all_projects='all_tenants'
|
||||
)
|
||||
|
||||
# capabilities
|
||||
allow_fetch = True
|
||||
@ -53,14 +54,14 @@ class Snapshot(resource.Resource, metadata.MetadataMixin):
|
||||
def _action(self, session, body, microversion=None):
|
||||
"""Preform backup actions given the message body."""
|
||||
url = utils.urljoin(self.base_path, self.id, 'action')
|
||||
resp = session.post(url, json=body,
|
||||
microversion=self._max_microversion)
|
||||
resp = session.post(
|
||||
url, json=body, microversion=self._max_microversion
|
||||
)
|
||||
exceptions.raise_from_response(resp)
|
||||
return resp
|
||||
|
||||
def reset(self, session, status):
|
||||
"""Reset the status of the snapshot.
|
||||
"""
|
||||
"""Reset the status of the snapshot."""
|
||||
body = {'os-reset_status': {'status': status}}
|
||||
self._action(session, body)
|
||||
|
||||
|
@ -21,7 +21,8 @@ class Volume(resource.Resource, metadata.MetadataMixin):
|
||||
base_path = "/volumes"
|
||||
|
||||
_query_mapping = resource.QueryParameters(
|
||||
'name', 'status', 'project_id', all_projects='all_tenants')
|
||||
'name', 'status', 'project_id', all_projects='all_tenants'
|
||||
)
|
||||
|
||||
# capabilities
|
||||
allow_fetch = True
|
||||
@ -45,7 +46,8 @@ class Volume(resource.Resource, metadata.MetadataMixin):
|
||||
description = resource.Body("description")
|
||||
#: Extended replication status on this volume.
|
||||
extended_replication_status = resource.Body(
|
||||
"os-volume-replication:extended_status")
|
||||
"os-volume-replication:extended_status"
|
||||
)
|
||||
#: The volume's current back-end.
|
||||
host = resource.Body("os-vol-host-attr:host")
|
||||
#: The ID of the image from which you want to create the volume.
|
||||
@ -66,7 +68,8 @@ class Volume(resource.Resource, metadata.MetadataMixin):
|
||||
project_id = resource.Body("os-vol-tenant-attr:tenant_id")
|
||||
#: Data set by the replication driver
|
||||
replication_driver_data = resource.Body(
|
||||
"os-volume-replication:driver_data")
|
||||
"os-volume-replication:driver_data"
|
||||
)
|
||||
#: Status of replication on this volume.
|
||||
replication_status = resource.Body("replication_status")
|
||||
#: Scheduler hints for the volume
|
||||
@ -111,24 +114,22 @@ class Volume(resource.Resource, metadata.MetadataMixin):
|
||||
body = {'os-set_bootable': {'bootable': bootable}}
|
||||
self._action(session, body)
|
||||
|
||||
def reset_status(
|
||||
self, session, status, attach_status, migration_status
|
||||
):
|
||||
def reset_status(self, session, status, attach_status, migration_status):
|
||||
"""Reset volume statuses (admin operation)"""
|
||||
body = {'os-reset_status': {
|
||||
'status': status,
|
||||
'attach_status': attach_status,
|
||||
'migration_status': migration_status
|
||||
}}
|
||||
body = {
|
||||
'os-reset_status': {
|
||||
'status': status,
|
||||
'attach_status': attach_status,
|
||||
'migration_status': migration_status,
|
||||
}
|
||||
}
|
||||
self._action(session, body)
|
||||
|
||||
def attach(
|
||||
self, session, mountpoint, instance
|
||||
):
|
||||
def attach(self, session, mountpoint, instance):
|
||||
"""Attach volume to server"""
|
||||
body = {'os-attach': {
|
||||
'mountpoint': mountpoint,
|
||||
'instance_uuid': instance}}
|
||||
body = {
|
||||
'os-attach': {'mountpoint': mountpoint, 'instance_uuid': instance}
|
||||
}
|
||||
|
||||
self._action(session, body)
|
||||
|
||||
@ -137,8 +138,7 @@ class Volume(resource.Resource, metadata.MetadataMixin):
|
||||
if not force:
|
||||
body = {'os-detach': {'attachment_id': attachment}}
|
||||
if force:
|
||||
body = {'os-force_detach': {
|
||||
'attachment_id': attachment}}
|
||||
body = {'os-force_detach': {'attachment_id': attachment}}
|
||||
|
||||
self._action(session, body)
|
||||
|
||||
@ -150,16 +150,14 @@ class Volume(resource.Resource, metadata.MetadataMixin):
|
||||
|
||||
def retype(self, session, new_type, migration_policy=None):
|
||||
"""Change volume type"""
|
||||
body = {'os-retype': {
|
||||
'new_type': new_type}}
|
||||
body = {'os-retype': {'new_type': new_type}}
|
||||
if migration_policy:
|
||||
body['os-retype']['migration_policy'] = migration_policy
|
||||
|
||||
self._action(session, body)
|
||||
|
||||
def migrate(
|
||||
self, session, host=None, force_host_copy=False,
|
||||
lock_volume=False
|
||||
self, session, host=None, force_host_copy=False, lock_volume=False
|
||||
):
|
||||
"""Migrate volume"""
|
||||
req = dict()
|
||||
@ -175,9 +173,12 @@ class Volume(resource.Resource, metadata.MetadataMixin):
|
||||
|
||||
def complete_migration(self, session, new_volume_id, error=False):
|
||||
"""Complete volume migration"""
|
||||
body = {'os-migrate_volume_completion': {
|
||||
'new_volume': new_volume_id,
|
||||
'error': error}}
|
||||
body = {
|
||||
'os-migrate_volume_completion': {
|
||||
'new_volume': new_volume_id,
|
||||
'error': error,
|
||||
}
|
||||
}
|
||||
|
||||
self._action(session, body)
|
||||
|
||||
|
@ -45,7 +45,7 @@ class Proxy(_base_proxy.BaseBlockStorageProxy):
|
||||
"snapshot": _snapshot.Snapshot,
|
||||
"stats_pools": _stats.Pools,
|
||||
"type": _type.Type,
|
||||
"volume": _volume.Volume
|
||||
"volume": _volume.Volume,
|
||||
}
|
||||
|
||||
# ====== SNAPSHOTS ======
|
||||
@ -168,7 +168,8 @@ class Proxy(_base_proxy.BaseBlockStorageProxy):
|
||||
"""
|
||||
if not force:
|
||||
self._delete(
|
||||
_snapshot.Snapshot, snapshot, ignore_missing=ignore_missing)
|
||||
_snapshot.Snapshot, snapshot, ignore_missing=ignore_missing
|
||||
)
|
||||
else:
|
||||
snapshot = self._get_resource(_snapshot.Snapshot, snapshot)
|
||||
snapshot.force_delete(self)
|
||||
@ -405,9 +406,11 @@ class Proxy(_base_proxy.BaseBlockStorageProxy):
|
||||
"""
|
||||
volume_type = self._get_resource(_type.Type, volume_type_id)
|
||||
|
||||
return self._get(_type.TypeEncryption,
|
||||
volume_type_id=volume_type.id,
|
||||
requires_id=False)
|
||||
return self._get(
|
||||
_type.TypeEncryption,
|
||||
volume_type_id=volume_type.id,
|
||||
requires_id=False,
|
||||
)
|
||||
|
||||
def create_type_encryption(self, volume_type, **attrs):
|
||||
"""Create new type encryption from attributes
|
||||
@ -425,11 +428,13 @@ class Proxy(_base_proxy.BaseBlockStorageProxy):
|
||||
"""
|
||||
volume_type = self._get_resource(_type.Type, volume_type)
|
||||
|
||||
return self._create(_type.TypeEncryption,
|
||||
volume_type_id=volume_type.id, **attrs)
|
||||
return self._create(
|
||||
_type.TypeEncryption, volume_type_id=volume_type.id, **attrs
|
||||
)
|
||||
|
||||
def delete_type_encryption(self, encryption=None,
|
||||
volume_type=None, ignore_missing=True):
|
||||
def delete_type_encryption(
|
||||
self, encryption=None, volume_type=None, ignore_missing=True
|
||||
):
|
||||
"""Delete type encryption attributes
|
||||
|
||||
:param encryption: The value can be None or a
|
||||
@ -452,12 +457,15 @@ class Proxy(_base_proxy.BaseBlockStorageProxy):
|
||||
|
||||
if volume_type:
|
||||
volume_type = self._get_resource(_type.Type, volume_type)
|
||||
encryption = self._get(_type.TypeEncryption,
|
||||
volume_type=volume_type.id,
|
||||
requires_id=False)
|
||||
encryption = self._get(
|
||||
_type.TypeEncryption,
|
||||
volume_type=volume_type.id,
|
||||
requires_id=False,
|
||||
)
|
||||
|
||||
self._delete(_type.TypeEncryption, encryption,
|
||||
ignore_missing=ignore_missing)
|
||||
self._delete(
|
||||
_type.TypeEncryption, encryption, ignore_missing=ignore_missing
|
||||
)
|
||||
|
||||
def update_type_encryption(
|
||||
self,
|
||||
@ -725,9 +733,7 @@ class Proxy(_base_proxy.BaseBlockStorageProxy):
|
||||
volume = self._get_resource(_volume.Volume, volume)
|
||||
volume.reset_status(self, status, attach_status, migration_status)
|
||||
|
||||
def revert_volume_to_snapshot(
|
||||
self, volume, snapshot
|
||||
):
|
||||
def revert_volume_to_snapshot(self, volume, snapshot):
|
||||
"""Revert a volume to its latest snapshot.
|
||||
|
||||
This method only support reverting a detached volume, and the
|
||||
@ -744,9 +750,7 @@ class Proxy(_base_proxy.BaseBlockStorageProxy):
|
||||
snapshot = self._get_resource(_snapshot.Snapshot, snapshot)
|
||||
volume.revert_to_snapshot(self, snapshot.id)
|
||||
|
||||
def attach_volume(
|
||||
self, volume, mountpoint, instance=None, host_name=None
|
||||
):
|
||||
def attach_volume(self, volume, mountpoint, instance=None, host_name=None):
|
||||
"""Attaches a volume to a server.
|
||||
|
||||
:param volume: The value can be either the ID of a volume or a
|
||||
@ -760,9 +764,7 @@ class Proxy(_base_proxy.BaseBlockStorageProxy):
|
||||
volume = self._get_resource(_volume.Volume, volume)
|
||||
volume.attach(self, mountpoint, instance, host_name)
|
||||
|
||||
def detach_volume(
|
||||
self, volume, attachment, force=False, connector=None
|
||||
):
|
||||
def detach_volume(self, volume, attachment, force=False, connector=None):
|
||||
"""Detaches a volume from a server.
|
||||
|
||||
:param volume: The value can be either the ID of a volume or a
|
||||
@ -784,13 +786,17 @@ class Proxy(_base_proxy.BaseBlockStorageProxy):
|
||||
:param volume: The value can be either the ID of a volume or a
|
||||
:class:`~openstack.block_storage.v3.volume.Volume` instance.
|
||||
|
||||
:returns: None """
|
||||
:returns: None"""
|
||||
volume = self._get_resource(_volume.Volume, volume)
|
||||
volume.unmanage(self)
|
||||
|
||||
def migrate_volume(
|
||||
self, volume, host=None, force_host_copy=False,
|
||||
lock_volume=False, cluster=None
|
||||
self,
|
||||
volume,
|
||||
host=None,
|
||||
force_host_copy=False,
|
||||
lock_volume=False,
|
||||
cluster=None,
|
||||
):
|
||||
"""Migrates a volume to the specified host.
|
||||
|
||||
@ -816,9 +822,7 @@ class Proxy(_base_proxy.BaseBlockStorageProxy):
|
||||
volume = self._get_resource(_volume.Volume, volume)
|
||||
volume.migrate(self, host, force_host_copy, lock_volume, cluster)
|
||||
|
||||
def complete_volume_migration(
|
||||
self, volume, new_volume, error=False
|
||||
):
|
||||
def complete_volume_migration(self, volume, new_volume, error=False):
|
||||
"""Complete the migration of a volume.
|
||||
|
||||
:param volume: The value can be either the ID of a volume or a
|
||||
@ -833,8 +837,14 @@ class Proxy(_base_proxy.BaseBlockStorageProxy):
|
||||
volume.complete_migration(self, new_volume, error)
|
||||
|
||||
def upload_volume_to_image(
|
||||
self, volume, image_name, force=False, disk_format=None,
|
||||
container_format=None, visibility=None, protected=None
|
||||
self,
|
||||
volume,
|
||||
image_name,
|
||||
force=False,
|
||||
disk_format=None,
|
||||
container_format=None,
|
||||
visibility=None,
|
||||
protected=None,
|
||||
):
|
||||
"""Uploads the specified volume to image service.
|
||||
|
||||
@ -852,9 +862,13 @@ class Proxy(_base_proxy.BaseBlockStorageProxy):
|
||||
"""
|
||||
volume = self._get_resource(_volume.Volume, volume)
|
||||
volume.upload_to_image(
|
||||
self, image_name, force=force, disk_format=disk_format,
|
||||
container_format=container_format, visibility=visibility,
|
||||
protected=protected
|
||||
self,
|
||||
image_name,
|
||||
force=force,
|
||||
disk_format=disk_format,
|
||||
container_format=container_format,
|
||||
visibility=visibility,
|
||||
protected=protected,
|
||||
)
|
||||
|
||||
def reserve_volume(self, volume):
|
||||
@ -863,7 +877,7 @@ class Proxy(_base_proxy.BaseBlockStorageProxy):
|
||||
:param volume: The value can be either the ID of a volume or a
|
||||
:class:`~openstack.block_storage.v3.volume.Volume` instance.
|
||||
|
||||
:returns: None """
|
||||
:returns: None"""
|
||||
volume = self._get_resource(_volume.Volume, volume)
|
||||
volume.reserve(self)
|
||||
|
||||
@ -873,7 +887,7 @@ class Proxy(_base_proxy.BaseBlockStorageProxy):
|
||||
:param volume: The value can be either the ID of a volume or a
|
||||
:class:`~openstack.block_storage.v3.volume.Volume` instance.
|
||||
|
||||
:returns: None """
|
||||
:returns: None"""
|
||||
volume = self._get_resource(_volume.Volume, volume)
|
||||
volume.unreserve(self)
|
||||
|
||||
@ -883,7 +897,7 @@ class Proxy(_base_proxy.BaseBlockStorageProxy):
|
||||
:param volume: The value can be either the ID of a volume or a
|
||||
:class:`~openstack.block_storage.v3.volume.Volume` instance.
|
||||
|
||||
:returns: None """
|
||||
:returns: None"""
|
||||
volume = self._get_resource(_volume.Volume, volume)
|
||||
volume.begin_detaching(self)
|
||||
|
||||
@ -893,7 +907,7 @@ class Proxy(_base_proxy.BaseBlockStorageProxy):
|
||||
:param volume: The value can be either the ID of a volume or a
|
||||
:class:`~openstack.block_storage.v3.volume.Volume` instance.
|
||||
|
||||
:returns: None """
|
||||
:returns: None"""
|
||||
volume = self._get_resource(_volume.Volume, volume)
|
||||
volume.abort_detaching(self)
|
||||
|
||||
@ -904,7 +918,7 @@ class Proxy(_base_proxy.BaseBlockStorageProxy):
|
||||
:class:`~openstack.block_storage.v3.volume.Volume` instance.
|
||||
:param dict connector: The connector object.
|
||||
|
||||
:returns: None """
|
||||
:returns: None"""
|
||||
volume = self._get_resource(_volume.Volume, volume)
|
||||
volume.init_attachment(self, connector)
|
||||
|
||||
@ -1022,8 +1036,7 @@ class Proxy(_base_proxy.BaseBlockStorageProxy):
|
||||
:returns: ``None``
|
||||
"""
|
||||
if not force:
|
||||
self._delete(
|
||||
_backup.Backup, backup, ignore_missing=ignore_missing)
|
||||
self._delete(_backup.Backup, backup, ignore_missing=ignore_missing)
|
||||
else:
|
||||
backup = self._get_resource(_backup.Backup, backup)
|
||||
backup.force_delete(self)
|
||||
@ -1295,7 +1308,8 @@ class Proxy(_base_proxy.BaseBlockStorageProxy):
|
||||
:returns: None
|
||||
"""
|
||||
resource = self._get_resource(
|
||||
_group_snapshot.GroupSnapshot, group_snapshot)
|
||||
_group_snapshot.GroupSnapshot, group_snapshot
|
||||
)
|
||||
resource.reset_state(self, state)
|
||||
|
||||
def delete_group_snapshot(self, group_snapshot, ignore_missing=True):
|
||||
@ -1307,8 +1321,10 @@ class Proxy(_base_proxy.BaseBlockStorageProxy):
|
||||
:returns: None
|
||||
"""
|
||||
self._delete(
|
||||
_group_snapshot.GroupSnapshot, group_snapshot,
|
||||
ignore_missing=ignore_missing)
|
||||
_group_snapshot.GroupSnapshot,
|
||||
group_snapshot,
|
||||
ignore_missing=ignore_missing,
|
||||
)
|
||||
|
||||
# ====== GROUP TYPE ======
|
||||
def get_group_type(self, group_type):
|
||||
@ -1395,7 +1411,8 @@ class Proxy(_base_proxy.BaseBlockStorageProxy):
|
||||
:returns: None
|
||||
"""
|
||||
self._delete(
|
||||
_group_type.GroupType, group_type, ignore_missing=ignore_missing)
|
||||
_group_type.GroupType, group_type, ignore_missing=ignore_missing
|
||||
)
|
||||
|
||||
def update_group_type(self, group_type, **attrs):
|
||||
"""Update a group_type
|
||||
@ -1408,8 +1425,7 @@ class Proxy(_base_proxy.BaseBlockStorageProxy):
|
||||
:returns: The updated group type.
|
||||
:rtype: :class:`~openstack.block_storage.v3.group_type.GroupType`
|
||||
"""
|
||||
return self._update(
|
||||
_group_type.GroupType, group_type, **attrs)
|
||||
return self._update(_group_type.GroupType, group_type, **attrs)
|
||||
|
||||
def fetch_group_type_group_specs(self, group_type):
|
||||
"""Lists group specs of a group type.
|
||||
@ -1488,9 +1504,9 @@ class Proxy(_base_proxy.BaseBlockStorageProxy):
|
||||
"""
|
||||
project = self._get_resource(_project.Project, project)
|
||||
res = self._get_resource(
|
||||
_quota_set.QuotaSet, None, project_id=project.id)
|
||||
return res.fetch(
|
||||
self, usage=usage, **query)
|
||||
_quota_set.QuotaSet, None, project_id=project.id
|
||||
)
|
||||
return res.fetch(self, usage=usage, **query)
|
||||
|
||||
def get_quota_set_defaults(self, project):
|
||||
"""Show QuotaSet defaults for the project
|
||||
@ -1505,9 +1521,9 @@ class Proxy(_base_proxy.BaseBlockStorageProxy):
|
||||
"""
|
||||
project = self._get_resource(_project.Project, project)
|
||||
res = self._get_resource(
|
||||
_quota_set.QuotaSet, None, project_id=project.id)
|
||||
return res.fetch(
|
||||
self, base_path='/os-quota-sets/defaults')
|
||||
_quota_set.QuotaSet, None, project_id=project.id
|
||||
)
|
||||
return res.fetch(self, base_path='/os-quota-sets/defaults')
|
||||
|
||||
def revert_quota_set(self, project, **query):
|
||||
"""Reset Quota for the project/user.
|
||||
@ -1521,7 +1537,8 @@ class Proxy(_base_proxy.BaseBlockStorageProxy):
|
||||
"""
|
||||
project = self._get_resource(_project.Project, project)
|
||||
res = self._get_resource(
|
||||
_quota_set.QuotaSet, None, project_id=project.id)
|
||||
_quota_set.QuotaSet, None, project_id=project.id
|
||||
)
|
||||
|
||||
return res.delete(self, **query)
|
||||
|
||||
@ -1561,7 +1578,12 @@ class Proxy(_base_proxy.BaseBlockStorageProxy):
|
||||
|
||||
# ====== UTILS ======
|
||||
def wait_for_status(
|
||||
self, res, status='available', failures=None, interval=2, wait=120,
|
||||
self,
|
||||
res,
|
||||
status='available',
|
||||
failures=None,
|
||||
interval=2,
|
||||
wait=120,
|
||||
):
|
||||
"""Wait for a resource to be in a particular status.
|
||||
|
||||
@ -1584,7 +1606,8 @@ class Proxy(_base_proxy.BaseBlockStorageProxy):
|
||||
"""
|
||||
failures = ['error'] if failures is None else failures
|
||||
return resource.wait_for_status(
|
||||
self, res, status, failures, interval, wait)
|
||||
self, res, status, failures, interval, wait
|
||||
)
|
||||
|
||||
def wait_for_delete(self, res, interval=2, wait=120):
|
||||
"""Wait for a resource to be deleted.
|
||||
@ -1602,11 +1625,7 @@ class Proxy(_base_proxy.BaseBlockStorageProxy):
|
||||
return resource.wait_for_delete(self, res, interval, wait)
|
||||
|
||||
def _get_cleanup_dependencies(self):
|
||||
return {
|
||||
'block_storage': {
|
||||
'before': []
|
||||
}
|
||||
}
|
||||
return {'block_storage': {'before': []}}
|
||||
|
||||
def _service_cleanup(
|
||||
self,
|
||||
@ -1614,7 +1633,7 @@ class Proxy(_base_proxy.BaseBlockStorageProxy):
|
||||
client_status_queue=None,
|
||||
identified_resources=None,
|
||||
filters=None,
|
||||
resource_evaluation_fn=None
|
||||
resource_evaluation_fn=None,
|
||||
):
|
||||
# It is not possible to delete backup if there are dependent backups.
|
||||
# In order to be able to do cleanup those is required to have multiple
|
||||
@ -1634,7 +1653,8 @@ class Proxy(_base_proxy.BaseBlockStorageProxy):
|
||||
client_status_queue=client_status_queue,
|
||||
identified_resources=identified_resources,
|
||||
filters=filters,
|
||||
resource_evaluation_fn=resource_evaluation_fn)
|
||||
resource_evaluation_fn=resource_evaluation_fn,
|
||||
)
|
||||
else:
|
||||
# Set initial iterations conditions
|
||||
need_backup_iteration = True
|
||||
@ -1647,7 +1667,7 @@ class Proxy(_base_proxy.BaseBlockStorageProxy):
|
||||
# To increase success chance sort backups by age, dependent
|
||||
# backups are logically younger.
|
||||
for obj in self.backups(
|
||||
details=True, sort_key='created_at', sort_dir='desc'
|
||||
details=True, sort_key='created_at', sort_dir='desc'
|
||||
):
|
||||
if not obj.has_dependent_backups:
|
||||
# If no dependent backups - go with it
|
||||
@ -1658,7 +1678,8 @@ class Proxy(_base_proxy.BaseBlockStorageProxy):
|
||||
client_status_queue=client_status_queue,
|
||||
identified_resources=identified_resources,
|
||||
filters=filters,
|
||||
resource_evaluation_fn=resource_evaluation_fn)
|
||||
resource_evaluation_fn=resource_evaluation_fn,
|
||||
)
|
||||
if not dry_run and need_delete:
|
||||
backups.append(obj)
|
||||
else:
|
||||
@ -1682,7 +1703,8 @@ class Proxy(_base_proxy.BaseBlockStorageProxy):
|
||||
client_status_queue=client_status_queue,
|
||||
identified_resources=identified_resources,
|
||||
filters=filters,
|
||||
resource_evaluation_fn=resource_evaluation_fn)
|
||||
resource_evaluation_fn=resource_evaluation_fn,
|
||||
)
|
||||
if not dry_run and need_delete:
|
||||
snapshots.append(obj)
|
||||
|
||||
@ -1702,4 +1724,5 @@ class Proxy(_base_proxy.BaseBlockStorageProxy):
|
||||
client_status_queue=client_status_queue,
|
||||
identified_resources=identified_resources,
|
||||
filters=filters,
|
||||
resource_evaluation_fn=resource_evaluation_fn)
|
||||
resource_evaluation_fn=resource_evaluation_fn,
|
||||
)
|
||||
|
@ -16,6 +16,7 @@ from openstack import utils
|
||||
|
||||
class Backup(resource.Resource):
|
||||
"""Volume Backup"""
|
||||
|
||||
resource_key = "backup"
|
||||
resources_key = "backups"
|
||||
base_path = "/backups"
|
||||
@ -24,9 +25,16 @@ class Backup(resource.Resource):
|
||||
# search (name~, status~, volume_id~). But this is not documented
|
||||
# officially and seem to require microversion be set
|
||||
_query_mapping = resource.QueryParameters(
|
||||
'all_tenants', 'limit', 'marker', 'project_id',
|
||||
'name', 'status', 'volume_id',
|
||||
'sort_key', 'sort_dir')
|
||||
'all_tenants',
|
||||
'limit',
|
||||
'marker',
|
||||
'project_id',
|
||||
'name',
|
||||
'status',
|
||||
'volume_id',
|
||||
'sort_key',
|
||||
'sort_dir',
|
||||
)
|
||||
|
||||
# capabilities
|
||||
allow_fetch = True
|
||||
@ -111,35 +119,48 @@ class Backup(resource.Resource):
|
||||
|
||||
session = self._get_session(session)
|
||||
microversion = self._get_microversion(session, action='create')
|
||||
requires_id = (self.create_requires_id
|
||||
if self.create_requires_id is not None
|
||||
else self.create_method == 'PUT')
|
||||
requires_id = (
|
||||
self.create_requires_id
|
||||
if self.create_requires_id is not None
|
||||
else self.create_method == 'PUT'
|
||||
)
|
||||
|
||||
if self.create_exclude_id_from_body:
|
||||
self._body._dirty.discard("id")
|
||||
|
||||
if self.create_method == 'POST':
|
||||
request = self._prepare_request(requires_id=requires_id,
|
||||
prepend_key=prepend_key,
|
||||
base_path=base_path)
|
||||
request = self._prepare_request(
|
||||
requires_id=requires_id,
|
||||
prepend_key=prepend_key,
|
||||
base_path=base_path,
|
||||
)
|
||||
# NOTE(gtema) this is a funny example of when attribute
|
||||
# is called "incremental" on create, "is_incremental" on get
|
||||
# and use of "alias" or "aka" is not working for such conflict,
|
||||
# since our preferred attr name is exactly "is_incremental"
|
||||
body = request.body
|
||||
if 'is_incremental' in body['backup']:
|
||||
body['backup']['incremental'] = \
|
||||
body['backup'].pop('is_incremental')
|
||||
response = session.post(request.url,
|
||||
json=request.body, headers=request.headers,
|
||||
microversion=microversion, params=params)
|
||||
body['backup']['incremental'] = body['backup'].pop(
|
||||
'is_incremental'
|
||||
)
|
||||
response = session.post(
|
||||
request.url,
|
||||
json=request.body,
|
||||
headers=request.headers,
|
||||
microversion=microversion,
|
||||
params=params,
|
||||
)
|
||||
else:
|
||||
# Just for safety of the implementation (since PUT removed)
|
||||
raise exceptions.ResourceFailure(
|
||||
"Invalid create method: %s" % self.create_method)
|
||||
"Invalid create method: %s" % self.create_method
|
||||
)
|
||||
|
||||
has_body = (self.has_body if self.create_returns_body is None
|
||||
else self.create_returns_body)
|
||||
has_body = (
|
||||
self.has_body
|
||||
if self.create_returns_body is None
|
||||
else self.create_returns_body
|
||||
)
|
||||
self.microversion = microversion
|
||||
self._translate_response(response, has_body=has_body)
|
||||
# direct comparision to False since we need to rule out None
|
||||
@ -151,8 +172,9 @@ class Backup(resource.Resource):
|
||||
def _action(self, session, body, microversion=None):
|
||||
"""Preform backup actions given the message body."""
|
||||
url = utils.urljoin(self.base_path, self.id, 'action')
|
||||
resp = session.post(url, json=body,
|
||||
microversion=self._max_microversion)
|
||||
resp = session.post(
|
||||
url, json=body, microversion=self._max_microversion
|
||||
)
|
||||
exceptions.raise_from_response(resp)
|
||||
return resp
|
||||
|
||||
@ -171,21 +193,20 @@ class Backup(resource.Resource):
|
||||
if name:
|
||||
body['restore']['name'] = name
|
||||
if not (volume_id or name):
|
||||
raise exceptions.SDKException('Either of `name` or `volume_id`'
|
||||
' must be specified.')
|
||||
raise exceptions.SDKException(
|
||||
'Either of `name` or `volume_id`' ' must be specified.'
|
||||
)
|
||||
response = session.post(url, json=body)
|
||||
self._translate_response(response, has_body=False)
|
||||
return self
|
||||
|
||||
def force_delete(self, session):
|
||||
"""Force backup deletion
|
||||
"""
|
||||
"""Force backup deletion"""
|
||||
body = {'os-force_delete': {}}
|
||||
self._action(session, body)
|
||||
|
||||
def reset(self, session, status):
|
||||
"""Reset the status of the backup
|
||||
"""
|
||||
"""Reset the status of the backup"""
|
||||
body = {'os-reset_status': {'status': status}}
|
||||
self._action(session, body)
|
||||
|
||||
|
@ -15,6 +15,7 @@ from openstack import resource
|
||||
|
||||
class Extension(resource.Resource):
|
||||
"""Extension"""
|
||||
|
||||
resources_key = "extensions"
|
||||
base_path = "/extensions"
|
||||
|
||||
|
@ -58,10 +58,14 @@ class GroupSnapshot(resource.Resource):
|
||||
microversion = session.default_microversion
|
||||
else:
|
||||
microversion = utils.maximum_supported_microversion(
|
||||
session, self._max_microversion,
|
||||
session,
|
||||
self._max_microversion,
|
||||
)
|
||||
response = session.post(
|
||||
url, json=body, headers=headers, microversion=microversion,
|
||||
url,
|
||||
json=body,
|
||||
headers=headers,
|
||||
microversion=microversion,
|
||||
)
|
||||
exceptions.raise_from_response(response)
|
||||
return response
|
||||
|
@ -71,7 +71,9 @@ class GroupType(resource.Resource):
|
||||
url = utils.urljoin(GroupType.base_path, self.id, 'group_specs')
|
||||
microversion = self._get_microversion(session, action='create')
|
||||
response = session.post(
|
||||
url, json={'group_specs': specs}, microversion=microversion,
|
||||
url,
|
||||
json={'group_specs': specs},
|
||||
microversion=microversion,
|
||||
)
|
||||
exceptions.raise_from_response(response)
|
||||
specs = response.json().get('group_specs', {})
|
||||
|
@ -17,19 +17,22 @@ class AbsoluteLimit(resource.Resource):
|
||||
#: Properties
|
||||
#: The maximum total amount of backups, in gibibytes (GiB).
|
||||
max_total_backup_gigabytes = resource.Body(
|
||||
"maxTotalBackupGigabytes", type=int)
|
||||
"maxTotalBackupGigabytes", type=int
|
||||
)
|
||||
#: The maximum number of backups.
|
||||
max_total_backups = resource.Body("maxTotalBackups", type=int)
|
||||
#: The maximum number of snapshots.
|
||||
max_total_snapshots = resource.Body("maxTotalSnapshots", type=int)
|
||||
#: The maximum total amount of volumes, in gibibytes (GiB).
|
||||
max_total_volume_gigabytes = resource.Body(
|
||||
"maxTotalVolumeGigabytes", type=int)
|
||||
"maxTotalVolumeGigabytes", type=int
|
||||
)
|
||||
#: The maximum number of volumes.
|
||||
max_total_volumes = resource.Body("maxTotalVolumes", type=int)
|
||||
#: The total number of backups gibibytes (GiB) used.
|
||||
total_backup_gigabytes_used = resource.Body(
|
||||
"totalBackupGigabytesUsed", type=int)
|
||||
"totalBackupGigabytesUsed", type=int
|
||||
)
|
||||
#: The total number of backups used.
|
||||
total_backups_used = resource.Body("totalBackupsUsed", type=int)
|
||||
#: The total number of gibibytes (GiB) used.
|
||||
|
@ -15,6 +15,7 @@ from openstack import resource
|
||||
|
||||
class ResourceFilter(resource.Resource):
|
||||
"""Resource Filter"""
|
||||
|
||||
resources_key = "resource_filters"
|
||||
base_path = "/resource_filters"
|
||||
|
||||
|
@ -23,8 +23,8 @@ class Snapshot(resource.Resource, metadata.MetadataMixin):
|
||||
base_path = "/snapshots"
|
||||
|
||||
_query_mapping = resource.QueryParameters(
|
||||
'name', 'status', 'volume_id',
|
||||
'project_id', all_projects='all_tenants')
|
||||
'name', 'status', 'volume_id', 'project_id', all_projects='all_tenants'
|
||||
)
|
||||
|
||||
# capabilities
|
||||
allow_fetch = True
|
||||
@ -58,28 +58,25 @@ class Snapshot(resource.Resource, metadata.MetadataMixin):
|
||||
def _action(self, session, body, microversion=None):
|
||||
"""Preform backup actions given the message body."""
|
||||
url = utils.urljoin(self.base_path, self.id, 'action')
|
||||
resp = session.post(url, json=body,
|
||||
microversion=self._max_microversion)
|
||||
resp = session.post(
|
||||
url, json=body, microversion=self._max_microversion
|
||||
)
|
||||
exceptions.raise_from_response(resp)
|
||||
return resp
|
||||
|
||||
def force_delete(self, session):
|
||||
"""Force snapshot deletion.
|
||||
"""
|
||||
"""Force snapshot deletion."""
|
||||
body = {'os-force_delete': {}}
|
||||
self._action(session, body)
|
||||
|
||||
def reset(self, session, status):
|
||||
"""Reset the status of the snapshot.
|
||||
"""
|
||||
"""Reset the status of the snapshot."""
|
||||
body = {'os-reset_status': {'status': status}}
|
||||
self._action(session, body)
|
||||
|
||||
def set_status(self, session, status, progress=None):
|
||||
"""Update fields related to the status of a snapshot.
|
||||
"""
|
||||
body = {'os-update_snapshot_status': {
|
||||
'status': status}}
|
||||
"""Update fields related to the status of a snapshot."""
|
||||
body = {'os-update_snapshot_status': {'status': status}}
|
||||
if progress is not None:
|
||||
body['os-update_snapshot_status']['progress'] = progress
|
||||
self._action(session, body)
|
||||
|
@ -37,13 +37,13 @@ class Type(resource.Resource):
|
||||
#: a private volume-type. *Type: bool*
|
||||
is_public = resource.Body('os-volume-type-access:is_public', type=bool)
|
||||
|
||||
def _extra_specs(self, method, key=None, delete=False,
|
||||
extra_specs=None):
|
||||
def _extra_specs(self, method, key=None, delete=False, extra_specs=None):
|
||||
extra_specs = extra_specs or {}
|
||||
for k, v in extra_specs.items():
|
||||
if not isinstance(v, str):
|
||||
raise ValueError("The value for %s (%s) must be "
|
||||
"a text string" % (k, v))
|
||||
raise ValueError(
|
||||
"The value for %s (%s) must be " "a text string" % (k, v)
|
||||
)
|
||||
|
||||
if key is not None:
|
||||
url = utils.urljoin(self.base_path, self.id, "extra_specs", key)
|
||||
|
@ -23,8 +23,13 @@ class Volume(resource.Resource, metadata.MetadataMixin):
|
||||
base_path = "/volumes"
|
||||
|
||||
_query_mapping = resource.QueryParameters(
|
||||
'name', 'status', 'project_id', 'created_at', 'updated_at',
|
||||
all_projects='all_tenants')
|
||||
'name',
|
||||
'status',
|
||||
'project_id',
|
||||
'created_at',
|
||||
'updated_at',
|
||||
all_projects='all_tenants',
|
||||
)
|
||||
|
||||
# capabilities
|
||||
allow_fetch = True
|
||||
@ -48,7 +53,8 @@ class Volume(resource.Resource, metadata.MetadataMixin):
|
||||
description = resource.Body("description")
|
||||
#: Extended replication status on this volume.
|
||||
extended_replication_status = resource.Body(
|
||||
"os-volume-replication:extended_status")
|
||||
"os-volume-replication:extended_status"
|
||||
)
|
||||
#: The ID of the group that the volume belongs to.
|
||||
group_id = resource.Body("group_id")
|
||||
#: The volume's current back-end.
|
||||
@ -73,7 +79,8 @@ class Volume(resource.Resource, metadata.MetadataMixin):
|
||||
project_id = resource.Body("os-vol-tenant-attr:tenant_id")
|
||||
#: Data set by the replication driver
|
||||
replication_driver_data = resource.Body(
|
||||
"os-volume-replication:driver_data")
|
||||
"os-volume-replication:driver_data"
|
||||
)
|
||||
#: Status of replication on this volume.
|
||||
replication_status = resource.Body("replication_status")
|
||||
#: Scheduler hints for the volume
|
||||
@ -108,8 +115,9 @@ class Volume(resource.Resource, metadata.MetadataMixin):
|
||||
# as both Volume and VolumeDetail instances can be acted on, but
|
||||
# the URL used is sans any additional /detail/ part.
|
||||
url = utils.urljoin(Volume.base_path, self.id, 'action')
|
||||
resp = session.post(url, json=body,
|
||||
microversion=self._max_microversion)
|
||||
resp = session.post(
|
||||
url, json=body, microversion=self._max_microversion
|
||||
)
|
||||
exceptions.raise_from_response(resp)
|
||||
return resp
|
||||
|
||||
@ -128,15 +136,15 @@ class Volume(resource.Resource, metadata.MetadataMixin):
|
||||
body = {'os-update_readonly_flag': {'readonly': readonly}}
|
||||
self._action(session, body)
|
||||
|
||||
def reset_status(
|
||||
self, session, status, attach_status, migration_status
|
||||
):
|
||||
def reset_status(self, session, status, attach_status, migration_status):
|
||||
"""Reset volume statuses (admin operation)"""
|
||||
body = {'os-reset_status': {
|
||||
'status': status,
|
||||
'attach_status': attach_status,
|
||||
'migration_status': migration_status
|
||||
}}
|
||||
body = {
|
||||
'os-reset_status': {
|
||||
'status': status,
|
||||
'attach_status': attach_status,
|
||||
'migration_status': migration_status,
|
||||
}
|
||||
}
|
||||
self._action(session, body)
|
||||
|
||||
def revert_to_snapshot(self, session, snapshot_id):
|
||||
@ -145,12 +153,9 @@ class Volume(resource.Resource, metadata.MetadataMixin):
|
||||
body = {'revert': {'snapshot_id': snapshot_id}}
|
||||
self._action(session, body)
|
||||
|
||||
def attach(
|
||||
self, session, mountpoint, instance=None, host_name=None
|
||||
):
|
||||
def attach(self, session, mountpoint, instance=None, host_name=None):
|
||||
"""Attach volume to server"""
|
||||
body = {'os-attach': {
|
||||
'mountpoint': mountpoint}}
|
||||
body = {'os-attach': {'mountpoint': mountpoint}}
|
||||
|
||||
if instance is not None:
|
||||
body['os-attach']['instance_uuid'] = instance
|
||||
@ -158,7 +163,8 @@ class Volume(resource.Resource, metadata.MetadataMixin):
|
||||
body['os-attach']['host_name'] = host_name
|
||||
else:
|
||||
raise ValueError(
|
||||
'Either instance_uuid or host_name must be specified')
|
||||
'Either instance_uuid or host_name must be specified'
|
||||
)
|
||||
|
||||
self._action(session, body)
|
||||