Blackify openstack.compute
The first step in black'ification. Black used with the '-l 79 -S' flags. A future change will ignore this commit in git-blame history by adding a 'git-blame-ignore-revs' file. Change-Id: Ic8e372a7ca999414ad93fb88e03b92798052cc3e Signed-off-by: Stephen Finucane <stephenfin@redhat.com>
This commit is contained in:
parent
d416746e5c
commit
395a77298e
@ -18,5 +18,5 @@ class ComputeService(service_description.ServiceDescription):
|
||||
"""The compute service."""
|
||||
|
||||
supported_versions = {
|
||||
'2': _proxy.Proxy
|
||||
'2': _proxy.Proxy,
|
||||
}
|
||||
|
@ -64,7 +64,7 @@ class Proxy(proxy.Proxy):
|
||||
"server_remote_console": _src.ServerRemoteConsole,
|
||||
"service": _service.Service,
|
||||
"usage": _usage.Usage,
|
||||
"volume_attachment": _volume_attachment.VolumeAttachment
|
||||
"volume_attachment": _volume_attachment.VolumeAttachment,
|
||||
}
|
||||
|
||||
# ========== Extensions ==========
|
||||
@ -400,8 +400,11 @@ class Proxy(proxy.Proxy):
|
||||
|
||||
:returns: ``None``
|
||||
"""
|
||||
self._delete(_aggregate.Aggregate, aggregate,
|
||||
ignore_missing=ignore_missing)
|
||||
self._delete(
|
||||
_aggregate.Aggregate,
|
||||
aggregate,
|
||||
ignore_missing=ignore_missing,
|
||||
)
|
||||
|
||||
def add_host_to_aggregate(self, aggregate, host):
|
||||
"""Adds a host to an aggregate
|
||||
@ -638,8 +641,12 @@ class Proxy(proxy.Proxy):
|
||||
:returns: ``None``
|
||||
"""
|
||||
attrs = {'user_id': user_id} if user_id else {}
|
||||
self._delete(_keypair.Keypair, keypair, ignore_missing=ignore_missing,
|
||||
**attrs)
|
||||
self._delete(
|
||||
_keypair.Keypair,
|
||||
keypair,
|
||||
ignore_missing=ignore_missing,
|
||||
**attrs,
|
||||
)
|
||||
|
||||
def get_keypair(self, keypair, user_id=None):
|
||||
"""Get a single keypair
|
||||
@ -934,8 +941,14 @@ class Proxy(proxy.Proxy):
|
||||
server = self._get_resource(_server.Server, server)
|
||||
server.revert_resize(self)
|
||||
|
||||
def create_server_image(self, server, name, metadata=None, wait=False,
|
||||
timeout=120):
|
||||
def create_server_image(
|
||||
self,
|
||||
server,
|
||||
name,
|
||||
metadata=None,
|
||||
wait=False,
|
||||
timeout=120,
|
||||
):
|
||||
"""Create an image from a server
|
||||
|
||||
:param server: Either the ID of a server or a
|
||||
@ -1048,8 +1061,7 @@ class Proxy(proxy.Proxy):
|
||||
:returns: None
|
||||
"""
|
||||
server = self._get_resource(_server.Server, server)
|
||||
server.rescue(self, admin_pass=admin_pass,
|
||||
image_ref=image_ref)
|
||||
server.rescue(self, admin_pass=admin_pass, image_ref=image_ref)
|
||||
|
||||
def unrescue_server(self, server):
|
||||
"""Unrescues a server and changes its status to ``ACTIVE``.
|
||||
@ -1076,8 +1088,7 @@ class Proxy(proxy.Proxy):
|
||||
:returns: None
|
||||
"""
|
||||
server = self._get_resource(_server.Server, server)
|
||||
server.evacuate(self, host=host, admin_pass=admin_pass,
|
||||
force=force)
|
||||
server.evacuate(self, host=host, admin_pass=admin_pass, force=force)
|
||||
|
||||
def start_server(self, server):
|
||||
"""Starts a stopped server and changes its state to ``ACTIVE``.
|
||||
@ -1236,8 +1247,7 @@ class Proxy(proxy.Proxy):
|
||||
:returns: None
|
||||
"""
|
||||
server = self._get_resource(_server.Server, server)
|
||||
server.add_floating_ip(self, address,
|
||||
fixed_address=fixed_address)
|
||||
server.add_floating_ip(self, address, fixed_address=fixed_address)
|
||||
|
||||
def remove_floating_ip_from_server(self, server, address):
|
||||
"""Removes a floating IP address from a server instance.
|
||||
@ -1267,13 +1277,20 @@ class Proxy(proxy.Proxy):
|
||||
:rtype: :class:`~openstack.compute.v2.server_interface.ServerInterface`
|
||||
"""
|
||||
server_id = resource.Resource._get_id(server)
|
||||
return self._create(_server_interface.ServerInterface,
|
||||
server_id=server_id, **attrs)
|
||||
return self._create(
|
||||
_server_interface.ServerInterface,
|
||||
server_id=server_id,
|
||||
**attrs,
|
||||
)
|
||||
|
||||
# TODO(stephenfin): Does this work? There's no 'value' parameter for the
|
||||
# call to '_delete'
|
||||
def delete_server_interface(self, server_interface, server=None,
|
||||
ignore_missing=True):
|
||||
def delete_server_interface(
|
||||
self,
|
||||
server_interface,
|
||||
server=None,
|
||||
ignore_missing=True,
|
||||
):
|
||||
"""Delete a server interface
|
||||
|
||||
:param server_interface:
|
||||
@ -1292,14 +1309,19 @@ class Proxy(proxy.Proxy):
|
||||
|
||||
:returns: ``None``
|
||||
"""
|
||||
server_id = self._get_uri_attribute(server_interface, server,
|
||||
"server_id")
|
||||
server_id = self._get_uri_attribute(
|
||||
server_interface,
|
||||
server,
|
||||
"server_id",
|
||||
)
|
||||
server_interface = resource.Resource._get_id(server_interface)
|
||||
|
||||
self._delete(_server_interface.ServerInterface,
|
||||
server_interface,
|
||||
server_id=server_id,
|
||||
ignore_missing=ignore_missing)
|
||||
self._delete(
|
||||
_server_interface.ServerInterface,
|
||||
server_interface,
|
||||
server_id=server_id,
|
||||
ignore_missing=ignore_missing,
|
||||
)
|
||||
|
||||
def get_server_interface(self, server_interface, server=None):
|
||||
"""Get a single server interface
|
||||
@ -1318,12 +1340,18 @@ class Proxy(proxy.Proxy):
|
||||
:raises: :class:`~openstack.exceptions.ResourceNotFound`
|
||||
when no resource can be found.
|
||||
"""
|
||||
server_id = self._get_uri_attribute(server_interface, server,
|
||||
"server_id")
|
||||
server_id = self._get_uri_attribute(
|
||||
server_interface,
|
||||
server,
|
||||
"server_id",
|
||||
)
|
||||
server_interface = resource.Resource._get_id(server_interface)
|
||||
|
||||
return self._get(_server_interface.ServerInterface,
|
||||
server_id=server_id, port_id=server_interface)
|
||||
return self._get(
|
||||
_server_interface.ServerInterface,
|
||||
server_id=server_id,
|
||||
port_id=server_interface,
|
||||
)
|
||||
|
||||
def server_interfaces(self, server, **query):
|
||||
"""Return a generator of server interfaces
|
||||
@ -1337,8 +1365,11 @@ class Proxy(proxy.Proxy):
|
||||
:rtype: :class:`~openstack.compute.v2.server_interface.ServerInterface`
|
||||
"""
|
||||
server_id = resource.Resource._get_id(server)
|
||||
return self._list(_server_interface.ServerInterface,
|
||||
server_id=server_id, **query)
|
||||
return self._list(
|
||||
_server_interface.ServerInterface,
|
||||
server_id=server_id,
|
||||
**query,
|
||||
)
|
||||
|
||||
def server_ips(self, server, network_label=None):
|
||||
"""Return a generator of server IPs
|
||||
@ -1352,8 +1383,11 @@ class Proxy(proxy.Proxy):
|
||||
:rtype: :class:`~openstack.compute.v2.server_ip.ServerIP`
|
||||
"""
|
||||
server_id = resource.Resource._get_id(server)
|
||||
return self._list(server_ip.ServerIP,
|
||||
server_id=server_id, network_label=network_label)
|
||||
return self._list(
|
||||
server_ip.ServerIP,
|
||||
server_id=server_id,
|
||||
network_label=network_label,
|
||||
)
|
||||
|
||||
def availability_zones(self, details=False):
|
||||
"""Return a generator of availability zones
|
||||
@ -1370,7 +1404,8 @@ class Proxy(proxy.Proxy):
|
||||
|
||||
return self._list(
|
||||
availability_zone.AvailabilityZone,
|
||||
base_path=base_path)
|
||||
base_path=base_path,
|
||||
)
|
||||
|
||||
# ========== Server Metadata ==========
|
||||
|
||||
@ -1455,8 +1490,11 @@ class Proxy(proxy.Proxy):
|
||||
|
||||
:returns: ``None``
|
||||
"""
|
||||
self._delete(_server_group.ServerGroup, server_group,
|
||||
ignore_missing=ignore_missing)
|
||||
self._delete(
|
||||
_server_group.ServerGroup,
|
||||
server_group,
|
||||
ignore_missing=ignore_missing,
|
||||
)
|
||||
|
||||
def find_server_group(
|
||||
self,
|
||||
@ -1543,7 +1581,8 @@ class Proxy(proxy.Proxy):
|
||||
):
|
||||
# Until 2.53 we need to use other API
|
||||
base_path = '/os-hypervisors/{pattern}/search'.format(
|
||||
pattern=query.pop('hypervisor_hostname_pattern'))
|
||||
pattern=query.pop('hypervisor_hostname_pattern')
|
||||
)
|
||||
return self._list(_hypervisor.Hypervisor, base_path=base_path, **query)
|
||||
|
||||
def find_hypervisor(
|
||||
@ -1611,7 +1650,11 @@ class Proxy(proxy.Proxy):
|
||||
# ========== Services ==========
|
||||
|
||||
def update_service_forced_down(
|
||||
self, service, host=None, binary=None, forced=True
|
||||
self,
|
||||
service,
|
||||
host=None,
|
||||
binary=None,
|
||||
forced=True,
|
||||
):
|
||||
"""Update service forced_down information
|
||||
|
||||
@ -1626,23 +1669,26 @@ class Proxy(proxy.Proxy):
|
||||
:rtype: class: `~openstack.compute.v2.service.Service`
|
||||
"""
|
||||
if utils.supports_microversion(self, '2.53'):
|
||||
return self.update_service(
|
||||
service, forced_down=forced)
|
||||
return self.update_service(service, forced_down=forced)
|
||||
|
||||
service = self._get_resource(_service.Service, service)
|
||||
if (
|
||||
(not host or not binary)
|
||||
and (not service.host or not service.binary)
|
||||
if (not host or not binary) and (
|
||||
not service.host or not service.binary
|
||||
):
|
||||
raise ValueError(
|
||||
'Either service instance should have host and binary '
|
||||
'or they should be passed')
|
||||
'or they should be passed'
|
||||
)
|
||||
service.set_forced_down(self, host, binary, forced)
|
||||
|
||||
force_service_down = update_service_forced_down
|
||||
|
||||
def disable_service(
|
||||
self, service, host=None, binary=None, disabled_reason=None
|
||||
self,
|
||||
service,
|
||||
host=None,
|
||||
binary=None,
|
||||
disabled_reason=None,
|
||||
):
|
||||
"""Disable a service
|
||||
|
||||
@ -1656,17 +1702,13 @@ class Proxy(proxy.Proxy):
|
||||
:rtype: class: `~openstack.compute.v2.service.Service`
|
||||
"""
|
||||
if utils.supports_microversion(self, '2.53'):
|
||||
attrs = {
|
||||
'status': 'disabled'
|
||||
}
|
||||
attrs = {'status': 'disabled'}
|
||||
if disabled_reason:
|
||||
attrs['disabled_reason'] = disabled_reason
|
||||
return self.update_service(
|
||||
service, **attrs)
|
||||
return self.update_service(service, **attrs)
|
||||
|
||||
service = self._get_resource(_service.Service, service)
|
||||
return service.disable(
|
||||
self, host, binary, disabled_reason)
|
||||
return service.disable(self, host, binary, disabled_reason)
|
||||
|
||||
def enable_service(self, service, host=None, binary=None):
|
||||
"""Enable a service
|
||||
@ -1680,8 +1722,7 @@ class Proxy(proxy.Proxy):
|
||||
:rtype: class: `~openstack.compute.v2.service.Service`
|
||||
"""
|
||||
if utils.supports_microversion(self, '2.53'):
|
||||
return self.update_service(
|
||||
service, status='enabled')
|
||||
return self.update_service(service, status='enabled')
|
||||
|
||||
service = self._get_resource(_service.Service, service)
|
||||
return service.enable(self, host, binary)
|
||||
@ -1732,8 +1773,7 @@ class Proxy(proxy.Proxy):
|
||||
|
||||
:returns: ``None``
|
||||
"""
|
||||
self._delete(
|
||||
_service.Service, service, ignore_missing=ignore_missing)
|
||||
self._delete(_service.Service, service, ignore_missing=ignore_missing)
|
||||
|
||||
def update_service(self, service, **attrs):
|
||||
"""Update a service
|
||||
@ -1813,7 +1853,11 @@ class Proxy(proxy.Proxy):
|
||||
)
|
||||
|
||||
def update_volume_attachment(
|
||||
self, server, volume, volume_id=None, **attrs,
|
||||
self,
|
||||
server,
|
||||
volume,
|
||||
volume_id=None,
|
||||
**attrs,
|
||||
):
|
||||
"""Update a volume attachment
|
||||
|
||||
@ -1860,16 +1904,14 @@ class Proxy(proxy.Proxy):
|
||||
|
||||
# if we have even partial type information and things look as they
|
||||
# should, we can assume the user did the right thing
|
||||
if (
|
||||
isinstance(server, _server.Server)
|
||||
or isinstance(volume, _volume.Volume)
|
||||
if isinstance(server, _server.Server) or isinstance(
|
||||
volume, _volume.Volume
|
||||
):
|
||||
return server, volume
|
||||
|
||||
# conversely, if there's type info and things appear off, tell the user
|
||||
if (
|
||||
isinstance(server, _volume.Volume)
|
||||
or isinstance(volume, _server.Server)
|
||||
if isinstance(server, _volume.Volume) or isinstance(
|
||||
volume, _server.Server
|
||||
):
|
||||
warnings.warn(deprecation_msg, DeprecationWarning)
|
||||
return volume, server
|
||||
@ -1972,7 +2014,11 @@ class Proxy(proxy.Proxy):
|
||||
server.migrate(self)
|
||||
|
||||
def live_migrate_server(
|
||||
self, server, host=None, force=False, block_migration=None,
|
||||
self,
|
||||
server,
|
||||
host=None,
|
||||
force=False,
|
||||
block_migration=None,
|
||||
):
|
||||
"""Live migrate a server from one host to target host
|
||||
|
||||
@ -1995,13 +2041,17 @@ class Proxy(proxy.Proxy):
|
||||
"""
|
||||
server = self._get_resource(_server.Server, server)
|
||||
server.live_migrate(
|
||||
self, host,
|
||||
self,
|
||||
host,
|
||||
force=force,
|
||||
block_migration=block_migration,
|
||||
)
|
||||
|
||||
def abort_server_migration(
|
||||
self, server_migration, server, ignore_missing=True,
|
||||
self,
|
||||
server_migration,
|
||||
server,
|
||||
ignore_missing=True,
|
||||
):
|
||||
"""Abort an in-progress server migration
|
||||
|
||||
@ -2022,7 +2072,9 @@ class Proxy(proxy.Proxy):
|
||||
:returns: ``None``
|
||||
"""
|
||||
server_id = self._get_uri_attribute(
|
||||
server_migration, server, 'server_id',
|
||||
server_migration,
|
||||
server,
|
||||
'server_id',
|
||||
)
|
||||
server_migration = resource.Resource._get_id(server_migration)
|
||||
|
||||
@ -2048,7 +2100,9 @@ class Proxy(proxy.Proxy):
|
||||
:returns: ``None``
|
||||
"""
|
||||
server_id = self._get_uri_attribute(
|
||||
server_migration, server, 'server_id',
|
||||
server_migration,
|
||||
server,
|
||||
'server_id',
|
||||
)
|
||||
server_migration = self._get_resource(
|
||||
_server_migration.ServerMigration,
|
||||
@ -2085,7 +2139,9 @@ class Proxy(proxy.Proxy):
|
||||
when no resource can be found.
|
||||
"""
|
||||
server_id = self._get_uri_attribute(
|
||||
server_migration, server, 'server_id',
|
||||
server_migration,
|
||||
server,
|
||||
'server_id',
|
||||
)
|
||||
server_migration = resource.Resource._get_id(server_migration)
|
||||
|
||||
@ -2140,8 +2196,11 @@ class Proxy(proxy.Proxy):
|
||||
when no resource can be found.
|
||||
"""
|
||||
server_id = self._get_resource(_server.Server, server).id
|
||||
return self._get(_server_diagnostics.ServerDiagnostics,
|
||||
server_id=server_id, requires_id=False)
|
||||
return self._get(
|
||||
_server_diagnostics.ServerDiagnostics,
|
||||
server_id=server_id,
|
||||
requires_id=False,
|
||||
)
|
||||
|
||||
# ========== Project usage ============
|
||||
|
||||
@ -2194,8 +2253,11 @@ class Proxy(proxy.Proxy):
|
||||
:class:`~openstack.compute.v2.server_remote_console.ServerRemoteConsole`
|
||||
"""
|
||||
server_id = resource.Resource._get_id(server)
|
||||
return self._create(_src.ServerRemoteConsole,
|
||||
server_id=server_id, **attrs)
|
||||
return self._create(
|
||||
_src.ServerRemoteConsole,
|
||||
server_id=server_id,
|
||||
**attrs,
|
||||
)
|
||||
|
||||
def get_server_console_url(self, server, console_type):
|
||||
"""Create a remote console on the server.
|
||||
@ -2248,7 +2310,8 @@ class Proxy(proxy.Proxy):
|
||||
_src.ServerRemoteConsole,
|
||||
server_id=server.id,
|
||||
type=console_type,
|
||||
protocol=console_protocol)
|
||||
protocol=console_protocol,
|
||||
)
|
||||
return console.to_dict()
|
||||
else:
|
||||
return server.get_console_url(self, console_type)
|
||||
@ -2271,7 +2334,9 @@ class Proxy(proxy.Proxy):
|
||||
"""
|
||||
project = self._get_resource(_project.Project, project)
|
||||
res = self._get_resource(
|
||||
_quota_set.QuotaSet, None, project_id=project.id,
|
||||
_quota_set.QuotaSet,
|
||||
None,
|
||||
project_id=project.id,
|
||||
)
|
||||
return res.fetch(self, usage=usage, **query)
|
||||
|
||||
@ -2288,7 +2353,9 @@ class Proxy(proxy.Proxy):
|
||||
"""
|
||||
project = self._get_resource(_project.Project, project)
|
||||
res = self._get_resource(
|
||||
_quota_set.QuotaSet, None, project_id=project.id,
|
||||
_quota_set.QuotaSet,
|
||||
None,
|
||||
project_id=project.id,
|
||||
)
|
||||
return res.fetch(self, base_path='/os-quota-sets/defaults')
|
||||
|
||||
@ -2304,7 +2371,10 @@ class Proxy(proxy.Proxy):
|
||||
"""
|
||||
project = self._get_resource(_project.Project, project)
|
||||
res = self._get_resource(
|
||||
_quota_set.QuotaSet, None, project_id=project.id)
|
||||
_quota_set.QuotaSet,
|
||||
None,
|
||||
project_id=project.id,
|
||||
)
|
||||
|
||||
if not query:
|
||||
query = {}
|
||||
@ -2373,7 +2443,12 @@ class Proxy(proxy.Proxy):
|
||||
# ========== Utilities ==========
|
||||
|
||||
def wait_for_server(
|
||||
self, server, status='ACTIVE', failures=None, interval=2, wait=120,
|
||||
self,
|
||||
server,
|
||||
status='ACTIVE',
|
||||
failures=None,
|
||||
interval=2,
|
||||
wait=120,
|
||||
):
|
||||
"""Wait for a server to be in a particular status.
|
||||
|
||||
@ -2400,7 +2475,12 @@ class Proxy(proxy.Proxy):
|
||||
"""
|
||||
failures = ['ERROR'] if failures is None else failures
|
||||
return resource.wait_for_status(
|
||||
self, server, status, failures, interval, wait,
|
||||
self,
|
||||
server,
|
||||
status,
|
||||
failures,
|
||||
interval,
|
||||
wait,
|
||||
)
|
||||
|
||||
def wait_for_delete(self, res, interval=2, wait=120):
|
||||
@ -2420,14 +2500,17 @@ class Proxy(proxy.Proxy):
|
||||
|
||||
def _get_cleanup_dependencies(self):
|
||||
return {
|
||||
'compute': {
|
||||
'before': ['block_storage', 'network', 'identity']
|
||||
}
|
||||
'compute': {'before': ['block_storage', 'network', 'identity']}
|
||||
}
|
||||
|
||||
def _service_cleanup(self, dry_run=True, client_status_queue=None,
|
||||
identified_resources=None,
|
||||
filters=None, resource_evaluation_fn=None):
|
||||
def _service_cleanup(
|
||||
self,
|
||||
dry_run=True,
|
||||
client_status_queue=None,
|
||||
identified_resources=None,
|
||||
filters=None,
|
||||
resource_evaluation_fn=None,
|
||||
):
|
||||
servers = []
|
||||
for obj in self.servers():
|
||||
need_delete = self._service_cleanup_del_res(
|
||||
@ -2437,7 +2520,8 @@ class Proxy(proxy.Proxy):
|
||||
client_status_queue=client_status_queue,
|
||||
identified_resources=identified_resources,
|
||||
filters=filters,
|
||||
resource_evaluation_fn=resource_evaluation_fn)
|
||||
resource_evaluation_fn=resource_evaluation_fn,
|
||||
)
|
||||
if not dry_run and need_delete:
|
||||
# In the dry run we identified, that server will go. To propely
|
||||
# identify consequences we need to tell others, that the port
|
||||
|
@ -52,8 +52,7 @@ class Aggregate(resource.Resource):
|
||||
def _action(self, session, body, microversion=None):
|
||||
"""Preform aggregate actions given the message body."""
|
||||
url = utils.urljoin(self.base_path, self.id, 'action')
|
||||
response = session.post(
|
||||
url, json=body, microversion=microversion)
|
||||
response = session.post(url, json=body, microversion=microversion)
|
||||
exceptions.raise_from_response(response)
|
||||
aggregate = Aggregate()
|
||||
aggregate._translate_response(response=response)
|
||||
@ -79,6 +78,7 @@ class Aggregate(resource.Resource):
|
||||
body = {'cache': images}
|
||||
url = utils.urljoin(self.base_path, self.id, 'images')
|
||||
response = session.post(
|
||||
url, json=body, microversion=self._max_microversion)
|
||||
url, json=body, microversion=self._max_microversion
|
||||
)
|
||||
exceptions.raise_from_response(response)
|
||||
# This API has no result
|
||||
|
@ -28,9 +28,12 @@ class Flavor(resource.Resource):
|
||||
allow_commit = True
|
||||
|
||||
_query_mapping = resource.QueryParameters(
|
||||
"sort_key", "sort_dir", "is_public",
|
||||
"sort_key",
|
||||
"sort_dir",
|
||||
"is_public",
|
||||
min_disk="minDisk",
|
||||
min_ram="minRam")
|
||||
min_ram="minRam",
|
||||
)
|
||||
|
||||
# extra_specs introduced in 2.61
|
||||
_max_microversion = '2.61'
|
||||
@ -47,7 +50,8 @@ class Flavor(resource.Resource):
|
||||
#: ``True`` if this is a publicly visible flavor. ``False`` if this is
|
||||
#: a private image. *Type: bool*
|
||||
is_public = resource.Body(
|
||||
'os-flavor-access:is_public', type=bool, default=True)
|
||||
'os-flavor-access:is_public', type=bool, default=True
|
||||
)
|
||||
#: The amount of RAM (in MB) this flavor offers. *Type: int*
|
||||
ram = resource.Body('ram', type=int, default=0)
|
||||
#: The number of virtual CPUs this flavor offers. *Type: int*
|
||||
@ -55,8 +59,7 @@ class Flavor(resource.Resource):
|
||||
#: Size of the swap partitions.
|
||||
swap = resource.Body('swap', default=0)
|
||||
#: Size of the ephemeral data disk attached to this server. *Type: int*
|
||||
ephemeral = resource.Body(
|
||||
'OS-FLV-EXT-DATA:ephemeral', type=int, default=0)
|
||||
ephemeral = resource.Body('OS-FLV-EXT-DATA:ephemeral', type=int, default=0)
|
||||
#: ``True`` if this flavor is disabled, ``False`` if not. *Type: bool*
|
||||
is_disabled = resource.Body('OS-FLV-DISABLED:disabled', type=bool)
|
||||
#: The bandwidth scaling factor this flavor receives on the network.
|
||||
@ -91,7 +94,7 @@ class Flavor(resource.Resource):
|
||||
session,
|
||||
paginated=True,
|
||||
base_path='/flavors/detail',
|
||||
**params
|
||||
**params,
|
||||
):
|
||||
# Find will invoke list when name was passed. Since we want to return
|
||||
# flavor with details (same as direct get) we need to swap default here
|
||||
@ -101,9 +104,8 @@ class Flavor(resource.Resource):
|
||||
# Force it to string to avoid requests skipping it.
|
||||
params['is_public'] = 'None'
|
||||
return super(Flavor, cls).list(
|
||||
session, paginated=paginated,
|
||||
base_path=base_path,
|
||||
**params)
|
||||
session, paginated=paginated, base_path=base_path, **params
|
||||
)
|
||||
|
||||
def _action(self, session, body, microversion=None):
|
||||
"""Preform flavor actions given the message body."""
|
||||
@ -113,8 +115,7 @@ class Flavor(resource.Resource):
|
||||
if microversion:
|
||||
# Do not reset microversion if it is set on a session level
|
||||
attrs['microversion'] = microversion
|
||||
response = session.post(
|
||||
url, json=body, headers=headers, **attrs)
|
||||
response = session.post(url, json=body, headers=headers, **attrs)
|
||||
exceptions.raise_from_response(response)
|
||||
return response
|
||||
|
||||
@ -161,9 +162,8 @@ class Flavor(resource.Resource):
|
||||
url = utils.urljoin(Flavor.base_path, self.id, 'os-extra_specs')
|
||||
microversion = self._get_microversion(session, action='create')
|
||||
response = session.post(
|
||||
url,
|
||||
json={'extra_specs': specs},
|
||||
microversion=microversion)
|
||||
url, json={'extra_specs': specs}, microversion=microversion
|
||||
)
|
||||
exceptions.raise_from_response(response)
|
||||
specs = response.json().get('extra_specs', {})
|
||||
self._update(extra_specs=specs)
|
||||
@ -171,8 +171,7 @@ class Flavor(resource.Resource):
|
||||
|
||||
def get_extra_specs_property(self, session, prop):
|
||||
"""Get individual extra_spec property"""
|
||||
url = utils.urljoin(Flavor.base_path, self.id,
|
||||
'os-extra_specs', prop)
|
||||
url = utils.urljoin(Flavor.base_path, self.id, 'os-extra_specs', prop)
|
||||
microversion = self._get_microversion(session, action='fetch')
|
||||
response = session.get(url, microversion=microversion)
|
||||
exceptions.raise_from_response(response)
|
||||
@ -181,25 +180,20 @@ class Flavor(resource.Resource):
|
||||
|
||||
def update_extra_specs_property(self, session, prop, val):
|
||||
"""Update An Extra Spec For A Flavor"""
|
||||
url = utils.urljoin(Flavor.base_path, self.id,
|
||||
'os-extra_specs', prop)
|
||||
url = utils.urljoin(Flavor.base_path, self.id, 'os-extra_specs', prop)
|
||||
microversion = self._get_microversion(session, action='commit')
|
||||
response = session.put(
|
||||
url,
|
||||
json={prop: val},
|
||||
microversion=microversion)
|
||||
url, json={prop: val}, microversion=microversion
|
||||
)
|
||||
exceptions.raise_from_response(response)
|
||||
val = response.json().get(prop)
|
||||
return val
|
||||
|
||||
def delete_extra_specs_property(self, session, prop):
|
||||
"""Delete An Extra Spec For A Flavor"""
|
||||
url = utils.urljoin(Flavor.base_path, self.id,
|
||||
'os-extra_specs', prop)
|
||||
url = utils.urljoin(Flavor.base_path, self.id, 'os-extra_specs', prop)
|
||||
microversion = self._get_microversion(session, action='delete')
|
||||
response = session.delete(
|
||||
url,
|
||||
microversion=microversion)
|
||||
response = session.delete(url, microversion=microversion)
|
||||
exceptions.raise_from_response(response)
|
||||
|
||||
|
||||
|
@ -86,14 +86,15 @@ class Hypervisor(resource.Resource):
|
||||
Updates uptime attribute of the hypervisor object
|
||||
"""
|
||||
warnings.warn(
|
||||
"This call is deprecated and is only available until Nova 2.88")
|
||||
"This call is deprecated and is only available until Nova 2.88"
|
||||
)
|
||||
if utils.supports_microversion(session, '2.88'):
|
||||
raise exceptions.SDKException(
|
||||
'Hypervisor.get_uptime is not supported anymore')
|
||||
'Hypervisor.get_uptime is not supported anymore'
|
||||
)
|
||||
url = utils.urljoin(self.base_path, self.id, 'uptime')
|
||||
microversion = self._get_microversion(session, action='fetch')
|
||||
response = session.get(
|
||||
url, microversion=microversion)
|
||||
response = session.get(url, microversion=microversion)
|
||||
self._translate_response(response)
|
||||
return self
|
||||
|
||||
|
@ -24,10 +24,14 @@ class Image(resource.Resource, metadata.MetadataMixin):
|
||||
allow_list = True
|
||||
|
||||
_query_mapping = resource.QueryParameters(
|
||||
"server", "name", "status", "type",
|
||||
"server",
|
||||
"name",
|
||||
"status",
|
||||
"type",
|
||||
min_disk="minDisk",
|
||||
min_ram="minRam",
|
||||
changes_since="changes-since")
|
||||
changes_since="changes-since",
|
||||
)
|
||||
|
||||
# Properties
|
||||
#: Links pertaining to this image. This is a list of dictionaries,
|
||||
|
@ -18,8 +18,7 @@ class Keypair(resource.Resource):
|
||||
resources_key = 'keypairs'
|
||||
base_path = '/os-keypairs'
|
||||
|
||||
_query_mapping = resource.QueryParameters(
|
||||
'user_id')
|
||||
_query_mapping = resource.QueryParameters('user_id')
|
||||
|
||||
# capabilities
|
||||
allow_create = True
|
||||
|
@ -25,13 +25,16 @@ class AbsoluteLimits(resource.Resource):
|
||||
personality_size = resource.Body("maxPersonalitySize", deprecated=True)
|
||||
#: The maximum amount of security group rules allowed.
|
||||
security_group_rules = resource.Body(
|
||||
"maxSecurityGroupRules", aka="max_security_group_rules")
|
||||
"maxSecurityGroupRules", aka="max_security_group_rules"
|
||||
)
|
||||
#: The maximum amount of security groups allowed.
|
||||
security_groups = resource.Body(
|
||||
"maxSecurityGroups", aka="max_security_groups")
|
||||
"maxSecurityGroups", aka="max_security_groups"
|
||||
)
|
||||
#: The amount of security groups currently in use.
|
||||
security_groups_used = resource.Body(
|
||||
"totalSecurityGroupsUsed", aka="total_security_groups_used")
|
||||
"totalSecurityGroupsUsed", aka="total_security_groups_used"
|
||||
)
|
||||
#: The number of key-value pairs that can be set as server metadata.
|
||||
server_meta = resource.Body("maxServerMeta", aka="max_server_meta")
|
||||
#: The maximum amount of cores.
|
||||
@ -40,15 +43,18 @@ class AbsoluteLimits(resource.Resource):
|
||||
total_cores_used = resource.Body("totalCoresUsed", aka="total_cores_used")
|
||||
#: The maximum amount of floating IPs.
|
||||
floating_ips = resource.Body(
|
||||
"maxTotalFloatingIps", aka="max_total_floating_ips")
|
||||
"maxTotalFloatingIps", aka="max_total_floating_ips"
|
||||
)
|
||||
#: The amount of floating IPs currently in use.
|
||||
floating_ips_used = resource.Body(
|
||||
"totalFloatingIpsUsed", aka="total_floating_ips_used")
|
||||
"totalFloatingIpsUsed", aka="total_floating_ips_used"
|
||||
)
|
||||
#: The maximum amount of instances.
|
||||
instances = resource.Body("maxTotalInstances", aka="max_total_instances")
|
||||
#: The amount of instances currently in use.
|
||||
instances_used = resource.Body(
|
||||
"totalInstancesUsed", aka="total_instances_used")
|
||||
"totalInstancesUsed", aka="total_instances_used"
|
||||
)
|
||||
#: The maximum amount of keypairs.
|
||||
keypairs = resource.Body("maxTotalKeypairs", aka="max_total_keypairs")
|
||||
#: The maximum RAM size in megabytes.
|
||||
@ -59,10 +65,12 @@ class AbsoluteLimits(resource.Resource):
|
||||
server_groups = resource.Body("maxServerGroups", aka="max_server_groups")
|
||||
#: The amount of server groups currently in use.
|
||||
server_groups_used = resource.Body(
|
||||
"totalServerGroupsUsed", aka="total_server_groups_used")
|
||||
"totalServerGroupsUsed", aka="total_server_groups_used"
|
||||
)
|
||||
#: The maximum number of members in a server group.
|
||||
server_group_members = resource.Body(
|
||||
"maxServerGroupMembers", aka="max_server_group_members")
|
||||
"maxServerGroupMembers", aka="max_server_group_members"
|
||||
)
|
||||
|
||||
|
||||
class RateLimit(resource.Resource):
|
||||
@ -83,15 +91,20 @@ class Limits(resource.Resource):
|
||||
|
||||
allow_fetch = True
|
||||
|
||||
_query_mapping = resource.QueryParameters(
|
||||
'tenant_id'
|
||||
)
|
||||
_query_mapping = resource.QueryParameters('tenant_id')
|
||||
|
||||
absolute = resource.Body("absolute", type=AbsoluteLimits)
|
||||
rate = resource.Body("rate", type=list, list_type=RateLimit)
|
||||
|
||||
def fetch(self, session, requires_id=False, error_message=None,
|
||||
base_path=None, skip_cache=False, **params):
|
||||
def fetch(
|
||||
self,
|
||||
session,
|
||||
requires_id=False,
|
||||
error_message=None,
|
||||
base_path=None,
|
||||
skip_cache=False,
|
||||
**params
|
||||
):
|
||||
"""Get the Limits resource.
|
||||
|
||||
:param session: The session to use for making this request.
|
||||
@ -103,8 +116,10 @@ class Limits(resource.Resource):
|
||||
# TODO(mordred) We shouldn't have to subclass just to declare
|
||||
# requires_id = False.
|
||||
return super(Limits, self).fetch(
|
||||
session=session, requires_id=requires_id,
|
||||
session=session,
|
||||
requires_id=requires_id,
|
||||
error_message=error_message,
|
||||
base_path=base_path,
|
||||
skip_cache=skip_cache,
|
||||
**params)
|
||||
**params
|
||||
)
|
||||
|
@ -31,10 +31,12 @@ class QuotaSet(quota_set.QuotaSet):
|
||||
force = resource.Body('force', type=bool)
|
||||
#: The number of allowed bytes of content for each injected file.
|
||||
injected_file_content_bytes = resource.Body(
|
||||
'injected_file_content_bytes', type=int)
|
||||
'injected_file_content_bytes', type=int
|
||||
)
|
||||
#: The number of allowed bytes for each injected file path.
|
||||
injected_file_path_bytes = resource.Body(
|
||||
'injected_file_path_bytes', type=int)
|
||||
'injected_file_path_bytes', type=int
|
||||
)
|
||||
#: The number of allowed injected files for each tenant.
|
||||
injected_files = resource.Body('injected_files', type=int)
|
||||
#: The number of allowed servers for each tenant.
|
||||
|
@ -24,7 +24,7 @@ CONSOLE_TYPE_ACTION_MAPPING = {
|
||||
'xvpvnc': 'os-getVNCConsole',
|
||||
'spice-html5': 'os-getSPICEConsole',
|
||||
'rdp-html5': 'os-getRDPConsole',
|
||||
'serial': 'os-getSerialConsole'
|
||||
'serial': 'os-getSerialConsole',
|
||||
}
|
||||
|
||||
|
||||
@ -46,15 +46,33 @@ class Server(resource.Resource, metadata.MetadataMixin, tag.TagMixin):
|
||||
_sentinel = object()
|
||||
|
||||
_query_mapping = resource.QueryParameters(
|
||||
"auto_disk_config", "availability_zone",
|
||||
"created_at", "description", "flavor",
|
||||
"hostname", "image", "kernel_id", "key_name",
|
||||
"launch_index", "launched_at", "locked_by", "name",
|
||||
"node", "power_state", "progress", "project_id", "ramdisk_id",
|
||||
"reservation_id", "root_device_name",
|
||||
"status", "task_state", "terminated_at", "user_id",
|
||||
"auto_disk_config",
|
||||
"availability_zone",
|
||||
"created_at",
|
||||
"description",
|
||||
"flavor",
|
||||
"hostname",
|
||||
"image",
|
||||
"kernel_id",
|
||||
"key_name",
|
||||
"launch_index",
|
||||
"launched_at",
|
||||
"locked_by",
|
||||
"name",
|
||||
"node",
|
||||
"power_state",
|
||||
"progress",
|
||||
"project_id",
|
||||
"ramdisk_id",
|
||||
"reservation_id",
|
||||
"root_device_name",
|
||||
"status",
|
||||
"task_state",
|
||||
"terminated_at",
|
||||
"user_id",
|
||||
"vm_state",
|
||||
"sort_key", "sort_dir",
|
||||
"sort_key",
|
||||
"sort_dir",
|
||||
access_ipv4="access_ip_v4",
|
||||
access_ipv6="access_ip_v6",
|
||||
has_config_drive="config_drive",
|
||||
@ -67,7 +85,7 @@ class Server(resource.Resource, metadata.MetadataMixin, tag.TagMixin):
|
||||
changes_before="changes-before",
|
||||
id="uuid",
|
||||
all_projects="all_tenants",
|
||||
**tag.TagMixin._tag_query_parameters
|
||||
**tag.TagMixin._tag_query_parameters,
|
||||
)
|
||||
|
||||
_max_microversion = '2.91'
|
||||
@ -93,7 +111,8 @@ class Server(resource.Resource, metadata.MetadataMixin, tag.TagMixin):
|
||||
aka='volumes',
|
||||
type=list,
|
||||
list_type=volume_attachment.VolumeAttachment,
|
||||
default=[])
|
||||
default=[],
|
||||
)
|
||||
#: The name of the availability zone this server is a part of.
|
||||
availability_zone = resource.Body('OS-EXT-AZ:availability_zone')
|
||||
#: Enables fine grained control of the block device mapping for an
|
||||
@ -202,8 +221,9 @@ class Server(resource.Resource, metadata.MetadataMixin, tag.TagMixin):
|
||||
scheduler_hints = resource.Body('OS-SCH-HNT:scheduler_hints', type=dict)
|
||||
#: A list of applicable security groups. Each group contains keys for
|
||||
#: description, name, id, and rules.
|
||||
security_groups = resource.Body('security_groups',
|
||||
type=list, list_type=dict)
|
||||
security_groups = resource.Body(
|
||||
'security_groups', type=list, list_type=dict
|
||||
)
|
||||
#: The UUIDs of the server groups to which the server belongs.
|
||||
#: Currently this can contain at most one entry.
|
||||
server_groups = resource.Body('server_groups', type=list)
|
||||
@ -220,7 +240,8 @@ class Server(resource.Resource, metadata.MetadataMixin, tag.TagMixin):
|
||||
#: A list of trusted certificate IDs, that were used during image
|
||||
#: signature verification to verify the signing certificate.
|
||||
trusted_image_certificates = resource.Body(
|
||||
'trusted_image_certificates', type=list)
|
||||
'trusted_image_certificates', type=list
|
||||
)
|
||||
#: Timestamp of when this server was last updated.
|
||||
updated_at = resource.Body('updated')
|
||||
#: Configuration information or scripts to use upon launch.
|
||||
@ -231,11 +252,18 @@ class Server(resource.Resource, metadata.MetadataMixin, tag.TagMixin):
|
||||
#: The VM state of this server.
|
||||
vm_state = resource.Body('OS-EXT-STS:vm_state')
|
||||
|
||||
def _prepare_request(self, requires_id=True, prepend_key=True,
|
||||
base_path=None, **kwargs):
|
||||
request = super(Server, self)._prepare_request(requires_id=requires_id,
|
||||
prepend_key=prepend_key,
|
||||
base_path=base_path)
|
||||
def _prepare_request(
|
||||
self,
|
||||
requires_id=True,
|
||||
prepend_key=True,
|
||||
base_path=None,
|
||||
**kwargs,
|
||||
):
|
||||
request = super(Server, self)._prepare_request(
|
||||
requires_id=requires_id,
|
||||
prepend_key=prepend_key,
|
||||
base_path=base_path,
|
||||
)
|
||||
|
||||
server_body = request.body[self.resource_key]
|
||||
|
||||
@ -311,14 +339,21 @@ class Server(resource.Resource, metadata.MetadataMixin, tag.TagMixin):
|
||||
body = {'forceDelete': None}
|
||||
self._action(session, body)
|
||||
|
||||
def rebuild(self, session, image, name=None, admin_password=None,
|
||||
preserve_ephemeral=None,
|
||||
access_ipv4=None, access_ipv6=None,
|
||||
metadata=None, user_data=None, key_name=None):
|
||||
def rebuild(
|
||||
self,
|
||||
session,
|
||||
image,
|
||||
name=None,
|
||||
admin_password=None,
|
||||
preserve_ephemeral=None,
|
||||
access_ipv4=None,
|
||||
access_ipv6=None,
|
||||
metadata=None,
|
||||
user_data=None,
|
||||
key_name=None,
|
||||
):
|
||||
"""Rebuild the server with the given arguments."""
|
||||
action = {
|
||||
'imageRef': resource.Resource._get_id(image)
|
||||
}
|
||||
action = {'imageRef': resource.Resource._get_id(image)}
|
||||
if preserve_ephemeral is not None:
|
||||
action['preserve_ephemeral'] = preserve_ephemeral
|
||||
if name is not None:
|
||||
@ -431,7 +466,7 @@ class Server(resource.Resource, metadata.MetadataMixin, tag.TagMixin):
|
||||
"createBackup": {
|
||||
"name": name,
|
||||
"backup_type": backup_type,
|
||||
"rotation": rotation
|
||||
"rotation": rotation,
|
||||
}
|
||||
}
|
||||
self._action(session, body)
|
||||
@ -547,24 +582,36 @@ class Server(resource.Resource, metadata.MetadataMixin, tag.TagMixin):
|
||||
resp = self._action(session, body)
|
||||
return resp.json().get('console')
|
||||
|
||||
def live_migrate(self, session, host, force, block_migration,
|
||||
disk_over_commit=False):
|
||||
def live_migrate(
|
||||
self,
|
||||
session,
|
||||
host,
|
||||
force,
|
||||
block_migration,
|
||||
disk_over_commit=False,
|
||||
):
|
||||
if utils.supports_microversion(session, '2.30'):
|
||||
return self._live_migrate_30(
|
||||
session, host,
|
||||
force=force,
|
||||
block_migration=block_migration)
|
||||
elif utils.supports_microversion(session, '2.25'):
|
||||
return self._live_migrate_25(
|
||||
session, host,
|
||||
force=force,
|
||||
block_migration=block_migration)
|
||||
else:
|
||||
return self._live_migrate(
|
||||
session, host,
|
||||
session,
|
||||
host,
|
||||
force=force,
|
||||
block_migration=block_migration,
|
||||
disk_over_commit=disk_over_commit)
|
||||
)
|
||||
elif utils.supports_microversion(session, '2.25'):
|
||||
return self._live_migrate_25(
|
||||
session,
|
||||
host,
|
||||
force=force,
|
||||
block_migration=block_migration,
|
||||
)
|
||||
else:
|
||||
return self._live_migrate(
|
||||
session,
|
||||
host,
|
||||
force=force,
|
||||
block_migration=block_migration,
|
||||
disk_over_commit=disk_over_commit,
|
||||
)
|
||||
|
||||
def _live_migrate_30(self, session, host, force, block_migration):
|
||||
microversion = '2.30'
|
||||
@ -577,7 +624,10 @@ class Server(resource.Resource, metadata.MetadataMixin, tag.TagMixin):
|
||||
if force:
|
||||
body['force'] = force
|
||||
self._action(
|
||||
session, {'os-migrateLive': body}, microversion=microversion)
|
||||
session,
|
||||
{'os-migrateLive': body},
|
||||
microversion=microversion,
|
||||
)
|
||||
|
||||
def _live_migrate_25(self, session, host, force, block_migration):
|
||||
microversion = '2.25'
|
||||
@ -594,12 +644,22 @@ class Server(resource.Resource, metadata.MetadataMixin, tag.TagMixin):
|
||||
" possible to disable. It is recommended to not use 'host'"
|
||||
" at all on this cloud as it is inherently unsafe, but if"
|
||||
" it is unavoidable, please supply 'force=True' so that it"
|
||||
" is clear you understand the risks.")
|
||||
" is clear you understand the risks."
|
||||
)
|
||||
self._action(
|
||||
session, {'os-migrateLive': body}, microversion=microversion)
|
||||
session,
|
||||
{'os-migrateLive': body},
|
||||
microversion=microversion,
|
||||
)
|
||||
|
||||
def _live_migrate(self, session, host, force, block_migration,
|
||||
disk_over_commit):
|
||||
def _live_migrate(
|
||||
self,
|
||||
session,
|
||||
host,
|
||||
force,
|
||||
block_migration,
|
||||
disk_over_commit,
|
||||
):
|
||||
microversion = None
|
||||
body = {
|
||||
'host': None,
|
||||
@ -607,7 +667,8 @@ class Server(resource.Resource, metadata.MetadataMixin, tag.TagMixin):
|
||||
if block_migration == 'auto':
|
||||
raise ValueError(
|
||||
"Live migration on this cloud does not support 'auto' as"
|
||||
" a parameter to block_migration, but only True and False.")
|
||||
" a parameter to block_migration, but only True and False."
|
||||
)
|
||||
body['block_migration'] = block_migration or False
|
||||
body['disk_over_commit'] = disk_over_commit or False
|
||||
if host:
|
||||
@ -619,9 +680,13 @@ class Server(resource.Resource, metadata.MetadataMixin, tag.TagMixin):
|
||||
" possible to disable. It is recommended to not use 'host'"
|
||||
" at all on this cloud as it is inherently unsafe, but if"
|
||||
" it is unavoidable, please supply 'force=True' so that it"
|
||||
" is clear you understand the risks.")
|
||||
" is clear you understand the risks."
|
||||
)
|
||||
self._action(
|
||||
session, {'os-migrateLive': body}, microversion=microversion)
|
||||
session,
|
||||
{'os-migrateLive': body},
|
||||
microversion=microversion,
|
||||
)
|
||||
|
||||
def fetch_topology(self, session):
|
||||
utils.require_microversion(session, 2.78)
|
||||
|
@ -32,8 +32,15 @@ class ServerIP(resource.Resource):
|
||||
version = resource.Body('version')
|
||||
|
||||
@classmethod
|
||||
def list(cls, session, paginated=False, server_id=None,
|
||||
network_label=None, base_path=None, **params):
|
||||
def list(
|
||||
cls,
|
||||
session,
|
||||
paginated=False,
|
||||
server_id=None,
|
||||
network_label=None,
|
||||
base_path=None,
|
||||
**params
|
||||
):
|
||||
|
||||
if base_path is None:
|
||||
base_path = cls.base_path
|
||||
@ -43,7 +50,7 @@ class ServerIP(resource.Resource):
|
||||
if network_label is not None:
|
||||
url = utils.urljoin(url, network_label)
|
||||
|
||||
resp = session.get(url,)
|
||||
resp = session.get(url)
|
||||
resp = resp.json()
|
||||
|
||||
if network_label is None:
|
||||
@ -51,6 +58,8 @@ class ServerIP(resource.Resource):
|
||||
|
||||
for label, addresses in resp.items():
|
||||
for address in addresses:
|
||||
yield cls.existing(network_label=label,
|
||||
address=address["addr"],
|
||||
version=address["version"])
|
||||
yield cls.existing(
|
||||
network_label=label,
|
||||
address=address["addr"],
|
||||
version=address["version"],
|
||||
)
|
||||
|
@ -19,7 +19,7 @@ CONSOLE_TYPE_PROTOCOL_MAPPING = {
|
||||
'spice-html5': 'spice',
|
||||
'rdp-html5': 'rdp',
|
||||
'serial': 'serial',
|
||||
'webmks': 'mks'
|
||||
'webmks': 'mks',
|
||||
}
|
||||
|
||||
|
||||
@ -47,16 +47,14 @@ class ServerRemoteConsole(resource.Resource):
|
||||
|
||||
def create(self, session, prepend_key=True, base_path=None, **params):
|
||||
if not self.protocol:
|
||||
self.protocol = \
|
||||
CONSOLE_TYPE_PROTOCOL_MAPPING.get(self.type)
|
||||
self.protocol = CONSOLE_TYPE_PROTOCOL_MAPPING.get(self.type)
|
||||
if (
|
||||
not utils.supports_microversion(session, '2.8')
|
||||
and self.type == 'webmks'
|
||||
):
|
||||
raise ValueError('Console type webmks is not supported on '
|
||||
'server side')
|
||||
raise ValueError(
|
||||
'Console type webmks is not supported on ' 'server side'
|
||||
)
|
||||
return super(ServerRemoteConsole, self).create(
|
||||
session,
|
||||
prepend_key=prepend_key,
|
||||
base_path=base_path,
|
||||
**params)
|
||||
session, prepend_key=prepend_key, base_path=base_path, **params
|
||||
)
|
||||
|
@ -26,7 +26,9 @@ class Service(resource.Resource):
|
||||
allow_delete = True
|
||||
|
||||
_query_mapping = resource.QueryParameters(
|
||||
'name', 'binary', 'host',
|
||||
'name',
|
||||
'binary',
|
||||
'host',
|
||||
name='binary',
|
||||
)
|
||||
|
||||
@ -73,7 +75,7 @@ class Service(resource.Resource):
|
||||
result = maybe_result
|
||||
else:
|
||||
msg = "More than one %s exists with the name '%s'."
|
||||
msg = (msg % (cls.__name__, name_or_id))
|
||||
msg = msg % (cls.__name__, name_or_id)
|
||||
raise exceptions.DuplicateResource(msg)
|
||||
|
||||
if result is not None:
|
||||
@ -82,12 +84,16 @@ class Service(resource.Resource):
|
||||
if ignore_missing:
|
||||
return None
|
||||
raise exceptions.ResourceNotFound(
|
||||
"No %s found for %s" % (cls.__name__, name_or_id))
|
||||
"No %s found for %s" % (cls.__name__, name_or_id)
|
||||
)
|
||||
|
||||
def commit(self, session, prepend_key=False, **kwargs):
|
||||
# we need to set prepend_key to false
|
||||
return super(Service, self).commit(
|
||||
session, prepend_key=prepend_key, **kwargs)
|
||||
session,
|
||||
prepend_key=prepend_key,
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
def _action(self, session, action, body, microversion=None):
|
||||
if not microversion:
|
||||
@ -97,9 +103,7 @@ class Service(resource.Resource):
|
||||
self._translate_response(response)
|
||||
return self
|
||||
|
||||
def set_forced_down(
|
||||
self, session, host=None, binary=None, forced=False
|
||||
):
|
||||
def set_forced_down(self, session, host=None, binary=None, forced=False):
|
||||
"""Update forced_down information of a service."""
|
||||
microversion = session.default_microversion
|
||||
body = {}
|
||||