Blackify openstack.baremetal, openstack.baremetal_introspection
Black used with the '-l 79 -S' flags. A future change will ignore this commit in git-blame history by adding a 'git-blame-ignore-revs' file. Change-Id: I1effcaff4f4c931b46541f8db44ed50c10104cad Signed-off-by: Stephen Finucane <stephenfin@redhat.com>
This commit is contained in:
parent
4589e293e8
commit
f8e42017e7
openstack
baremetal
baremetal_introspection/v1
tests
@ -23,8 +23,13 @@ import tempfile
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def populate_directory(metadata, user_data=None, versions=None,
|
||||
network_data=None, vendor_data=None):
|
||||
def populate_directory(
|
||||
metadata,
|
||||
user_data=None,
|
||||
versions=None,
|
||||
network_data=None,
|
||||
vendor_data=None,
|
||||
):
|
||||
"""Populate a directory with configdrive files.
|
||||
|
||||
:param dict metadata: Metadata.
|
||||
@ -46,21 +51,24 @@ def populate_directory(metadata, user_data=None, versions=None,
|
||||
json.dump(metadata, fp)
|
||||
|
||||
if network_data:
|
||||
with open(os.path.join(subdir, 'network_data.json'),
|
||||
'w') as fp:
|
||||
with open(
|
||||
os.path.join(subdir, 'network_data.json'), 'w'
|
||||
) as fp:
|
||||
json.dump(network_data, fp)
|
||||
|
||||
if vendor_data:
|
||||
with open(os.path.join(subdir, 'vendor_data2.json'),
|
||||
'w') as fp:
|
||||
with open(
|
||||
os.path.join(subdir, 'vendor_data2.json'), 'w'
|
||||
) as fp:
|
||||
json.dump(vendor_data, fp)
|
||||
|
||||
if user_data:
|
||||
# Strictly speaking, user data is binary, but in many cases
|
||||
# it's actually a text (cloud-init, ignition, etc).
|
||||
flag = 't' if isinstance(user_data, str) else 'b'
|
||||
with open(os.path.join(subdir, 'user_data'),
|
||||
'w%s' % flag) as fp:
|
||||
with open(
|
||||
os.path.join(subdir, 'user_data'), 'w%s' % flag
|
||||
) as fp:
|
||||
fp.write(user_data)
|
||||
|
||||
yield d
|
||||
@ -68,8 +76,13 @@ def populate_directory(metadata, user_data=None, versions=None,
|
||||
shutil.rmtree(d)
|
||||
|
||||
|
||||
def build(metadata, user_data=None, versions=None, network_data=None,
|
||||
vendor_data=None):
|
||||
def build(
|
||||
metadata,
|
||||
user_data=None,
|
||||
versions=None,
|
||||
network_data=None,
|
||||
vendor_data=None,
|
||||
):
|
||||
"""Make a configdrive compatible with the Bare Metal service.
|
||||
|
||||
Requires the genisoimage utility to be available.
|
||||
@ -81,8 +94,9 @@ def build(metadata, user_data=None, versions=None, network_data=None,
|
||||
:param dict vendor_data: Extra supplied vendor data.
|
||||
:return: configdrive contents as a base64-encoded string.
|
||||
"""
|
||||
with populate_directory(metadata, user_data, versions,
|
||||
network_data, vendor_data) as path:
|
||||
with populate_directory(
|
||||
metadata, user_data, versions, network_data, vendor_data
|
||||
) as path:
|
||||
return pack(path)
|
||||
|
||||
|
||||
@ -100,16 +114,27 @@ def pack(path):
|
||||
cmds = ['genisoimage', 'mkisofs', 'xorrisofs']
|
||||
for c in cmds:
|
||||
try:
|
||||
p = subprocess.Popen([c,
|
||||
'-o', tmpfile.name,
|
||||
'-ldots', '-allow-lowercase',
|
||||
'-allow-multidot', '-l',
|
||||
'-publisher', 'metalsmith',
|
||||
'-quiet', '-J',
|
||||
'-r', '-V', 'config-2',
|
||||
path],
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE)
|
||||
p = subprocess.Popen(
|
||||
[
|
||||
c,
|
||||
'-o',
|
||||
tmpfile.name,
|
||||
'-ldots',
|
||||
'-allow-lowercase',
|
||||
'-allow-multidot',
|
||||
'-l',
|
||||
'-publisher',
|
||||
'metalsmith',
|
||||
'-quiet',
|
||||
'-J',
|
||||
'-r',
|
||||
'-V',
|
||||
'config-2',
|
||||
path,
|
||||
],
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
)
|
||||
except OSError as e:
|
||||
error = e
|
||||
else:
|
||||
@ -120,14 +145,16 @@ def pack(path):
|
||||
raise RuntimeError(
|
||||
'Error generating the configdrive. Make sure the '
|
||||
'"genisoimage", "mkisofs" or "xorrisofs" tool is installed. '
|
||||
'Error: %s' % error)
|
||||
'Error: %s' % error
|
||||
)
|
||||
|
||||
stdout, stderr = p.communicate()
|
||||
if p.returncode != 0:
|
||||
raise RuntimeError(
|
||||
'Error generating the configdrive.'
|
||||
'Stdout: "%(stdout)s". Stderr: "%(stderr)s"' %
|
||||
{'stdout': stdout, 'stderr': stderr})
|
||||
'Stdout: "%(stdout)s". Stderr: "%(stderr)s"'
|
||||
% {'stdout': stdout, 'stderr': stderr}
|
||||
)
|
||||
|
||||
tmpfile.seek(0)
|
||||
|
||||
|
@ -17,7 +17,7 @@ RETRIABLE_STATUS_CODES = [
|
||||
# HTTP Conflict - happens if a node is locked
|
||||
409,
|
||||
# HTTP Service Unavailable happens if there's no free conductor
|
||||
503
|
||||
503,
|
||||
]
|
||||
"""HTTP status codes that should be retried."""
|
||||
|
||||
@ -88,7 +88,6 @@ CHANGE_BOOT_MODE_VERSION = '1.76'
|
||||
|
||||
|
||||
class ListMixin:
|
||||
|
||||
@classmethod
|
||||
def list(cls, session, details=False, **params):
|
||||
"""This method is a generator which yields resource objects.
|
||||
@ -112,8 +111,9 @@ class ListMixin:
|
||||
base_path = cls.base_path
|
||||
if details:
|
||||
base_path += '/detail'
|
||||
return super(ListMixin, cls).list(session, paginated=True,
|
||||
base_path=base_path, **params)
|
||||
return super(ListMixin, cls).list(
|
||||
session, paginated=True, base_path=base_path, **params
|
||||
)
|
||||
|
||||
|
||||
def comma_separated_list(value):
|
||||
|
@ -63,8 +63,10 @@ class Proxy(proxy.Proxy):
|
||||
return res.fetch(
|
||||
self,
|
||||
error_message="No {resource_type} found for {value}".format(
|
||||
resource_type=resource_type.__name__, value=value),
|
||||
**kwargs)
|
||||
resource_type=resource_type.__name__, value=value
|
||||
),
|
||||
**kwargs
|
||||
)
|
||||
|
||||
def chassis(self, details=False, **query):
|
||||
"""Retrieve a generator of chassis.
|
||||
@ -123,8 +125,9 @@ class Proxy(proxy.Proxy):
|
||||
:returns: One :class:`~openstack.baremetal.v1.chassis.Chassis` object
|
||||
or None.
|
||||
"""
|
||||
return self._find(_chassis.Chassis, name_or_id,
|
||||
ignore_missing=ignore_missing)
|
||||
return self._find(
|
||||
_chassis.Chassis, name_or_id, ignore_missing=ignore_missing
|
||||
)
|
||||
|
||||
def get_chassis(self, chassis, fields=None):
|
||||
"""Get a specific chassis.
|
||||
@ -178,8 +181,9 @@ class Proxy(proxy.Proxy):
|
||||
:returns: The instance of the chassis which was deleted.
|
||||
:rtype: :class:`~openstack.baremetal.v1.chassis.Chassis`.
|
||||
"""
|
||||
return self._delete(_chassis.Chassis, chassis,
|
||||
ignore_missing=ignore_missing)
|
||||
return self._delete(
|
||||
_chassis.Chassis, chassis, ignore_missing=ignore_missing
|
||||
)
|
||||
|
||||
def drivers(self, details=False, **query):
|
||||
"""Retrieve a generator of drivers.
|
||||
@ -221,8 +225,9 @@ class Proxy(proxy.Proxy):
|
||||
driver = self.get_driver(driver)
|
||||
return driver.list_vendor_passthru(self)
|
||||
|
||||
def call_driver_vendor_passthru(self, driver,
|
||||
verb: str, method: str, body=None):
|
||||
def call_driver_vendor_passthru(
|
||||
self, driver, verb: str, method: str, body=None
|
||||
):
|
||||
"""Call driver's vendor_passthru method.
|
||||
|
||||
:param driver: The value can be the name of a driver or a
|
||||
@ -311,8 +316,9 @@ class Proxy(proxy.Proxy):
|
||||
:returns: One :class:`~openstack.baremetal.v1.node.Node` object
|
||||
or None.
|
||||
"""
|
||||
return self._find(_node.Node, name_or_id,
|
||||
ignore_missing=ignore_missing)
|
||||
return self._find(
|
||||
_node.Node, name_or_id, ignore_missing=ignore_missing
|
||||
)
|
||||
|
||||
def get_node(self, node, fields=None):
|
||||
"""Get a specific node.
|
||||
@ -345,8 +351,9 @@ class Proxy(proxy.Proxy):
|
||||
res = self._get_resource(_node.Node, node, **attrs)
|
||||
return res.commit(self, retry_on_conflict=retry_on_conflict)
|
||||
|
||||
def patch_node(self, node, patch, reset_interfaces=None,
|
||||
retry_on_conflict=True):
|
||||
def patch_node(
|
||||
self, node, patch, reset_interfaces=None, retry_on_conflict=True
|
||||
):
|
||||
"""Apply a JSON patch to the node.
|
||||
|
||||
:param node: The value can be the name or ID of a node or a
|
||||
@ -368,12 +375,24 @@ class Proxy(proxy.Proxy):
|
||||
:rtype: :class:`~openstack.baremetal.v1.node.Node`
|
||||
"""
|
||||
res = self._get_resource(_node.Node, node)
|
||||
return res.patch(self, patch, retry_on_conflict=retry_on_conflict,
|
||||
reset_interfaces=reset_interfaces)
|
||||
return res.patch(
|
||||
self,
|
||||
patch,
|
||||
retry_on_conflict=retry_on_conflict,
|
||||
reset_interfaces=reset_interfaces,
|
||||
)
|
||||
|
||||
def set_node_provision_state(self, node, target, config_drive=None,
|
||||
clean_steps=None, rescue_password=None,
|
||||
wait=False, timeout=None, deploy_steps=None):
|
||||
def set_node_provision_state(
|
||||
self,
|
||||
node,
|
||||
target,
|
||||
config_drive=None,
|
||||
clean_steps=None,
|
||||
rescue_password=None,
|
||||
wait=False,
|
||||
timeout=None,
|
||||
deploy_steps=None,
|
||||
):
|
||||
"""Run an action modifying node's provision state.
|
||||
|
||||
This call is asynchronous, it will return success as soon as the Bare
|
||||
@ -405,11 +424,16 @@ class Proxy(proxy.Proxy):
|
||||
invalid ``target``.
|
||||
"""
|
||||
res = self._get_resource(_node.Node, node)
|
||||
return res.set_provision_state(self, target, config_drive=config_drive,
|
||||
clean_steps=clean_steps,
|
||||
rescue_password=rescue_password,
|
||||
wait=wait, timeout=timeout,
|
||||
deploy_steps=deploy_steps)
|
||||
return res.set_provision_state(
|
||||
self,
|
||||
target,
|
||||
config_drive=config_drive,
|
||||
clean_steps=clean_steps,
|
||||
rescue_password=rescue_password,
|
||||
wait=wait,
|
||||
timeout=timeout,
|
||||
deploy_steps=deploy_steps,
|
||||
)
|
||||
|
||||
def get_node_boot_device(self, node):
|
||||
"""Get node boot device
|
||||
@ -480,10 +504,14 @@ class Proxy(proxy.Proxy):
|
||||
res = self._get_resource(_node.Node, node)
|
||||
res.inject_nmi(self)
|
||||
|
||||
def wait_for_nodes_provision_state(self, nodes, expected_state,
|
||||
timeout=None,
|
||||
abort_on_failed_state=True,
|
||||
fail=True):
|
||||
def wait_for_nodes_provision_state(
|
||||
self,
|
||||
nodes,
|
||||
expected_state,
|
||||
timeout=None,
|
||||
abort_on_failed_state=True,
|
||||
fail=True,
|
||||
):
|
||||
"""Wait for the nodes to reach the expected state.
|
||||
|
||||
:param nodes: List of nodes - name, ID or
|
||||
@ -507,24 +535,27 @@ class Proxy(proxy.Proxy):
|
||||
reaches an error state and ``abort_on_failed_state`` is ``True``.
|
||||
:raises: :class:`~openstack.exceptions.ResourceTimeout` on timeout.
|
||||
"""
|
||||
log_nodes = ', '.join(n.id if isinstance(n, _node.Node) else n
|
||||
for n in nodes)
|
||||
log_nodes = ', '.join(
|
||||
n.id if isinstance(n, _node.Node) else n for n in nodes
|
||||
)
|
||||
|
||||
finished = []
|
||||
failed = []
|
||||
remaining = nodes
|
||||
try:
|
||||
for count in utils.iterate_timeout(
|
||||
timeout,
|
||||
"Timeout waiting for nodes %(nodes)s to reach "
|
||||
"target state '%(state)s'" % {'nodes': log_nodes,
|
||||
'state': expected_state}):
|
||||
timeout,
|
||||
"Timeout waiting for nodes %(nodes)s to reach "
|
||||
"target state '%(state)s'"
|
||||
% {'nodes': log_nodes, 'state': expected_state},
|
||||
):
|
||||
nodes = [self.get_node(n) for n in remaining]
|
||||
remaining = []
|
||||
for n in nodes:
|
||||
try:
|
||||
if n._check_state_reached(self, expected_state,
|
||||
abort_on_failed_state):
|
||||
if n._check_state_reached(
|
||||
self, expected_state, abort_on_failed_state
|
||||
):
|
||||
finished.append(n)
|
||||
else:
|
||||
remaining.append(n)
|
||||
@ -543,8 +574,11 @@ class Proxy(proxy.Proxy):
|
||||
self.log.debug(
|
||||
'Still waiting for nodes %(nodes)s to reach state '
|
||||
'"%(target)s"',
|
||||
{'nodes': ', '.join(n.id for n in remaining),
|
||||
'target': expected_state})
|
||||
{
|
||||
'nodes': ', '.join(n.id for n in remaining),
|
||||
'target': expected_state,
|
||||
},
|
||||
)
|
||||
except exceptions.ResourceTimeout:
|
||||
if fail:
|
||||
raise
|
||||
@ -568,7 +602,8 @@ class Proxy(proxy.Proxy):
|
||||
``None`` (the default) means no client-side timeout.
|
||||
"""
|
||||
self._get_resource(_node.Node, node).set_power_state(
|
||||
self, target, wait=wait, timeout=timeout)
|
||||
self, target, wait=wait, timeout=timeout
|
||||
)
|
||||
|
||||
def wait_for_node_power_state(self, node, expected_state, timeout=None):
|
||||
"""Wait for the node to reach the power state.
|
||||
@ -731,8 +766,9 @@ class Proxy(proxy.Proxy):
|
||||
:returns: One :class:`~openstack.baremetal.v1.port.Port` object
|
||||
or None.
|
||||
"""
|
||||
return self._find(_port.Port, name_or_id,
|
||||
ignore_missing=ignore_missing)
|
||||
return self._find(
|
||||
_port.Port, name_or_id, ignore_missing=ignore_missing
|
||||
)
|
||||
|
||||
def get_port(self, port, fields=None):
|
||||
"""Get a specific port.
|
||||
@ -849,8 +885,9 @@ class Proxy(proxy.Proxy):
|
||||
:returns: One :class:`~openstack.baremetal.v1.port_group.PortGroup`
|
||||
object or None.
|
||||
"""
|
||||
return self._find(_portgroup.PortGroup, name_or_id,
|
||||
ignore_missing=ignore_missing)
|
||||
return self._find(
|
||||
_portgroup.PortGroup, name_or_id, ignore_missing=ignore_missing
|
||||
)
|
||||
|
||||
def get_port_group(self, port_group, fields=None):
|
||||
"""Get a specific port group.
|
||||
@ -863,8 +900,9 @@ class Proxy(proxy.Proxy):
|
||||
:raises: :class:`~openstack.exceptions.ResourceNotFound` when no
|
||||
port group matching the name or ID could be found.
|
||||
"""
|
||||
return self._get_with_fields(_portgroup.PortGroup, port_group,
|
||||
fields=fields)
|
||||
return self._get_with_fields(
|
||||
_portgroup.PortGroup, port_group, fields=fields
|
||||
)
|
||||
|
||||
def update_port_group(self, port_group, **attrs):
|
||||
"""Update a port group.
|
||||
@ -909,8 +947,9 @@ class Proxy(proxy.Proxy):
|
||||
:returns: The instance of the port group which was deleted.
|
||||
:rtype: :class:`~openstack.baremetal.v1.port_group.PortGroup`.
|
||||
"""
|
||||
return self._delete(_portgroup.PortGroup, port_group,
|
||||
ignore_missing=ignore_missing)
|
||||
return self._delete(
|
||||
_portgroup.PortGroup, port_group, ignore_missing=ignore_missing
|
||||
)
|
||||
|
||||
def attach_vif_to_node(self, node, vif_id, retry_on_conflict=True):
|
||||
"""Attach a VIF to the node.
|
||||
@ -1026,8 +1065,9 @@ class Proxy(proxy.Proxy):
|
||||
:raises: :class:`~openstack.exceptions.ResourceNotFound` when no
|
||||
allocation matching the name or ID could be found.
|
||||
"""
|
||||
return self._get_with_fields(_allocation.Allocation, allocation,
|
||||
fields=fields)
|
||||
return self._get_with_fields(
|
||||
_allocation.Allocation, allocation, fields=fields
|
||||
)
|
||||
|
||||
def update_allocation(self, allocation, **attrs):
|
||||
"""Update an allocation.
|
||||
@ -1052,8 +1092,9 @@ class Proxy(proxy.Proxy):
|
||||
:returns: The updated allocation.
|
||||
:rtype: :class:`~openstack.baremetal.v1.allocation.Allocation`
|
||||
"""
|
||||
return self._get_resource(_allocation.Allocation,
|
||||
allocation).patch(self, patch)
|
||||
return self._get_resource(_allocation.Allocation, allocation).patch(
|
||||
self, patch
|
||||
)
|
||||
|
||||
def delete_allocation(self, allocation, ignore_missing=True):
|
||||
"""Delete an allocation.
|
||||
@ -1069,11 +1110,13 @@ class Proxy(proxy.Proxy):
|
||||
:returns: The instance of the allocation which was deleted.
|
||||
:rtype: :class:`~openstack.baremetal.v1.allocation.Allocation`.
|
||||
"""
|
||||
return self._delete(_allocation.Allocation, allocation,
|
||||
ignore_missing=ignore_missing)
|
||||
return self._delete(
|
||||
_allocation.Allocation, allocation, ignore_missing=ignore_missing
|
||||
)
|
||||
|
||||
def wait_for_allocation(self, allocation, timeout=None,
|
||||
ignore_error=False):
|
||||
def wait_for_allocation(
|
||||
self, allocation, timeout=None, ignore_error=False
|
||||
):
|
||||
"""Wait for the allocation to become active.
|
||||
|
||||
:param allocation: The value can be the name or ID of an allocation or
|
||||
@ -1252,8 +1295,11 @@ class Proxy(proxy.Proxy):
|
||||
:class:`~openstack.baremetal.v1.volumeconnector.VolumeConnector`
|
||||
object or None.
|
||||
"""
|
||||
return self._find(_volumeconnector.VolumeConnector, vc_id,
|
||||
ignore_missing=ignore_missing)
|
||||
return self._find(
|
||||
_volumeconnector.VolumeConnector,
|
||||
vc_id,
|
||||
ignore_missing=ignore_missing,
|
||||
)
|
||||
|
||||
def get_volume_connector(self, volume_connector, fields=None):
|
||||
"""Get a specific volume_connector.
|
||||
@ -1269,9 +1315,9 @@ class Proxy(proxy.Proxy):
|
||||
:raises: :class:`~openstack.exceptions.ResourceNotFound` when no
|
||||
volume_connector matching the name or ID could be found.`
|
||||
"""
|
||||
return self._get_with_fields(_volumeconnector.VolumeConnector,
|
||||
volume_connector,
|
||||
fields=fields)
|
||||
return self._get_with_fields(
|
||||
_volumeconnector.VolumeConnector, volume_connector, fields=fields
|
||||
)
|
||||
|
||||
def update_volume_connector(self, volume_connector, **attrs):
|
||||
"""Update a volume_connector.
|
||||
@ -1287,8 +1333,9 @@ class Proxy(proxy.Proxy):
|
||||
:rtype:
|
||||
:class:`~openstack.baremetal.v1.volume_connector.VolumeConnector`
|
||||
"""
|
||||
return self._update(_volumeconnector.VolumeConnector,
|
||||
volume_connector, **attrs)
|
||||
return self._update(
|
||||
_volumeconnector.VolumeConnector, volume_connector, **attrs
|
||||
)
|
||||
|
||||
def patch_volume_connector(self, volume_connector, patch):
|
||||
"""Apply a JSON patch to the volume_connector.
|
||||
@ -1303,11 +1350,11 @@ class Proxy(proxy.Proxy):
|
||||
:rtype:
|
||||
:class:`~openstack.baremetal.v1.volume_connector.VolumeConnector.`
|
||||
"""
|
||||
return self._get_resource(_volumeconnector.VolumeConnector,
|
||||
volume_connector).patch(self, patch)
|
||||
return self._get_resource(
|
||||
_volumeconnector.VolumeConnector, volume_connector
|
||||
).patch(self, patch)
|
||||
|
||||
def delete_volume_connector(self, volume_connector,
|
||||
ignore_missing=True):
|
||||
def delete_volume_connector(self, volume_connector, ignore_missing=True):
|
||||
"""Delete an volume_connector.
|
||||
|
||||
:param volume_connector: The value can be either the ID of a
|
||||
@ -1324,8 +1371,11 @@ class Proxy(proxy.Proxy):
|
||||
:rtype:
|
||||
:class:`~openstack.baremetal.v1.volume_connector.VolumeConnector`.
|
||||
"""
|
||||
return self._delete(_volumeconnector.VolumeConnector,
|
||||
volume_connector, ignore_missing=ignore_missing)
|
||||
return self._delete(
|
||||
_volumeconnector.VolumeConnector,
|
||||
volume_connector,
|
||||
ignore_missing=ignore_missing,
|
||||
)
|
||||
|
||||
def volume_targets(self, details=False, **query):
|
||||
"""Retrieve a generator of volume_target.
|
||||
@ -1392,8 +1442,9 @@ class Proxy(proxy.Proxy):
|
||||
:class:`~openstack.baremetal.v1.volumetarget.VolumeTarget`
|
||||
object or None.
|
||||
"""
|
||||
return self._find(_volumetarget.VolumeTarget, vt_id,
|
||||
ignore_missing=ignore_missing)
|
||||
return self._find(
|
||||
_volumetarget.VolumeTarget, vt_id, ignore_missing=ignore_missing
|
||||
)
|
||||
|
||||
def get_volume_target(self, volume_target, fields=None):
|
||||
"""Get a specific volume_target.
|
||||
@ -1409,9 +1460,9 @@ class Proxy(proxy.Proxy):
|
||||
:raises: :class:`~openstack.exceptions.ResourceNotFound` when no
|
||||
volume_target matching the name or ID could be found.`
|
||||
"""
|
||||
return self._get_with_fields(_volumetarget.VolumeTarget,
|
||||
volume_target,
|
||||
fields=fields)
|
||||
return self._get_with_fields(
|
||||
_volumetarget.VolumeTarget, volume_target, fields=fields
|
||||
)
|
||||
|
||||
def update_volume_target(self, volume_target, **attrs):
|
||||
"""Update a volume_target.
|
||||
@ -1426,8 +1477,7 @@ class Proxy(proxy.Proxy):
|
||||
:rtype:
|
||||
:class:`~openstack.baremetal.v1.volume_target.VolumeTarget`
|
||||
"""
|
||||
return self._update(_volumetarget.VolumeTarget,
|
||||
volume_target, **attrs)
|
||||
return self._update(_volumetarget.VolumeTarget, volume_target, **attrs)
|
||||
|
||||
def patch_volume_target(self, volume_target, patch):
|
||||
"""Apply a JSON patch to the volume_target.
|
||||
@ -1442,11 +1492,11 @@ class Proxy(proxy.Proxy):
|
||||
:rtype:
|
||||
:class:`~openstack.baremetal.v1.volume_target.VolumeTarget.`
|
||||
"""
|
||||
return self._get_resource(_volumetarget.VolumeTarget,
|
||||
volume_target).patch(self, patch)
|
||||
return self._get_resource(
|
||||
_volumetarget.VolumeTarget, volume_target
|
||||
).patch(self, patch)
|
||||
|
||||
def delete_volume_target(self, volume_target,
|
||||
ignore_missing=True):
|
||||
def delete_volume_target(self, volume_target, ignore_missing=True):
|
||||
"""Delete an volume_target.
|
||||
|
||||
:param volume_target: The value can be either the ID of a
|
||||
@ -1463,8 +1513,11 @@ class Proxy(proxy.Proxy):
|
||||
:rtype:
|
||||
:class:`~openstack.baremetal.v1.volume_target.VolumeTarget`.
|
||||
"""
|
||||
return self._delete(_volumetarget.VolumeTarget,
|
||||
volume_target, ignore_missing=ignore_missing)
|
||||
return self._delete(
|
||||
_volumetarget.VolumeTarget,
|
||||
volume_target,
|
||||
ignore_missing=ignore_missing,
|
||||
)
|
||||
|
||||
def deploy_templates(self, details=False, **query):
|
||||
"""Retrieve a generator of deploy_templates.
|
||||
@ -1506,11 +1559,11 @@ class Proxy(proxy.Proxy):
|
||||
:rtype:
|
||||
:class:`~openstack.baremetal.v1.deploy_templates.DeployTemplate`
|
||||
"""
|
||||
return self._update(_deploytemplates.DeployTemplate,
|
||||
deploy_template, **attrs)
|
||||
return self._update(
|
||||
_deploytemplates.DeployTemplate, deploy_template, **attrs
|
||||
)
|
||||
|
||||
def delete_deploy_template(self, deploy_template,
|
||||
ignore_missing=True):
|
||||
def delete_deploy_template(self, deploy_template, ignore_missing=True):
|
||||
"""Delete a deploy_template.
|
||||
|
||||
:param deploy_template:The value can be
|
||||
@ -1532,8 +1585,11 @@ class Proxy(proxy.Proxy):
|
||||
:class:`~openstack.baremetal.v1.deploy_templates.DeployTemplate`.
|
||||
"""
|
||||
|
||||
return self._delete(_deploytemplates.DeployTemplate,
|
||||
deploy_template, ignore_missing=ignore_missing)
|
||||
return self._delete(
|
||||
_deploytemplates.DeployTemplate,
|
||||
deploy_template,
|
||||
ignore_missing=ignore_missing,
|
||||
)
|
||||
|
||||
def get_deploy_template(self, deploy_template, fields=None):
|
||||
"""Get a specific deployment template.
|
||||
@ -1551,8 +1607,9 @@ class Proxy(proxy.Proxy):
|
||||
when no deployment template matching the name or
|
||||
ID could be found.
|
||||
"""
|
||||
return self._get_with_fields(_deploytemplates.DeployTemplate,
|
||||
deploy_template, fields=fields)
|
||||
return self._get_with_fields(
|
||||
_deploytemplates.DeployTemplate, deploy_template, fields=fields
|
||||
)
|
||||
|
||||
def patch_deploy_template(self, deploy_template, patch):
|
||||
"""Apply a JSON patch to the deploy_templates.
|
||||
@ -1568,8 +1625,9 @@ class Proxy(proxy.Proxy):
|
||||
:rtype:
|
||||
:class:`~openstack.baremetal.v1.deploy_templates.DeployTemplate`
|
||||
"""
|
||||
return self._get_resource(_deploytemplates.DeployTemplate,
|
||||
deploy_template).patch(self, patch)
|
||||
return self._get_resource(
|
||||
_deploytemplates.DeployTemplate, deploy_template
|
||||
).patch(self, patch)
|
||||
|
||||
def conductors(self, details=False, **query):
|
||||
"""Retrieve a generator of conductors.
|
||||
@ -1595,5 +1653,6 @@ class Proxy(proxy.Proxy):
|
||||
:raises: :class:`~openstack.exceptions.ResourceNotFound` when no
|
||||
conductor matching the name could be found.
|
||||
"""
|
||||
return self._get_with_fields(_conductor.Conductor,
|
||||
conductor, fields=fields)
|
||||
return self._get_with_fields(
|
||||
_conductor.Conductor, conductor, fields=fields
|
||||
)
|
||||
|
@ -32,7 +32,9 @@ class Allocation(_common.ListMixin, resource.Resource):
|
||||
commit_jsonpatch = True
|
||||
|
||||
_query_mapping = resource.QueryParameters(
|
||||
'node', 'resource_class', 'state',
|
||||
'node',
|
||||
'resource_class',
|
||||
'state',
|
||||
fields={'type': _common.fields_type},
|
||||
)
|
||||
|
||||
@ -88,18 +90,20 @@ class Allocation(_common.ListMixin, resource.Resource):
|
||||
return self
|
||||
|
||||
for count in utils.iterate_timeout(
|
||||
timeout,
|
||||
"Timeout waiting for the allocation %s" % self.id):
|
||||
timeout, "Timeout waiting for the allocation %s" % self.id
|
||||
):
|
||||
self.fetch(session)
|
||||
|
||||
if self.state == 'error' and not ignore_error:
|
||||
raise exceptions.ResourceFailure(
|
||||
"Allocation %(allocation)s failed: %(error)s" %
|
||||
{'allocation': self.id, 'error': self.last_error})
|
||||
"Allocation %(allocation)s failed: %(error)s"
|
||||
% {'allocation': self.id, 'error': self.last_error}
|
||||
)
|
||||
elif self.state != 'allocating':
|
||||
return self
|
||||
|
||||
session.log.debug(
|
||||
'Still waiting for the allocation %(allocation)s '
|
||||
'to become active, the current state is %(state)s',
|
||||
{'allocation': self.id, 'state': self.state})
|
||||
{'allocation': self.id, 'state': self.state},
|
||||
)
|
||||
|
@ -63,7 +63,8 @@ class Driver(resource.Resource):
|
||||
#: Default management interface implementation.
|
||||
#: Introduced in API microversion 1.30.
|
||||
default_management_interface = resource.Body(
|
||||
"default_management_interface")
|
||||
"default_management_interface"
|
||||
)
|
||||
#: Default network interface implementation.
|
||||
#: Introduced in API microversion 1.30.
|
||||
default_network_interface = resource.Body("default_network_interface")
|
||||
@ -101,7 +102,8 @@ class Driver(resource.Resource):
|
||||
#: Enabled management interface implementations.
|
||||
#: Introduced in API microversion 1.30.
|
||||
enabled_management_interfaces = resource.Body(
|
||||
"enabled_management_interfaces")
|
||||
"enabled_management_interfaces"
|
||||
)
|
||||
#: Enabled network interface implementations.
|
||||
#: Introduced in API microversion 1.30.
|
||||
enabled_network_interfaces = resource.Body("enabled_network_interfaces")
|
||||
@ -135,17 +137,18 @@ class Driver(resource.Resource):
|
||||
"""
|
||||
session = self._get_session(session)
|
||||
request = self._prepare_request()
|
||||
request.url = utils.urljoin(
|
||||
request.url, 'vendor_passthru', 'methods')
|
||||
request.url = utils.urljoin(request.url, 'vendor_passthru', 'methods')
|
||||
response = session.get(request.url, headers=request.headers)
|
||||
|
||||
msg = ("Failed to list list vendor_passthru methods for {driver_name}"
|
||||
.format(driver_name=self.name))
|
||||
exceptions.raise_from_response(response, error_message=msg)
|
||||
msg = "Failed to list list vendor_passthru methods for {driver_name}"
|
||||
exceptions.raise_from_response(
|
||||
response, error_message=msg.format(driver_name=self.name)
|
||||
)
|
||||
return response.json()
|
||||
|
||||
def call_vendor_passthru(self, session,
|
||||
verb: str, method: str, body: dict = None):
|
||||
def call_vendor_passthru(
|
||||
self, session, verb: str, method: str, body: dict = None
|
||||
):
|
||||
"""Call a vendor specific passthru method
|
||||
|
||||
Contents of body are params passed to the hardware driver
|
||||
@ -167,13 +170,18 @@ class Driver(resource.Resource):
|
||||
session = self._get_session(session)
|
||||
request = self._prepare_request()
|
||||
request.url = utils.urljoin(
|
||||
request.url, f'vendor_passthru?method={method}')
|
||||
request.url, f'vendor_passthru?method={method}'
|
||||
)
|
||||
call = getattr(session, verb.lower())
|
||||
response = call(
|
||||
request.url, json=body, headers=request.headers,
|
||||
retriable_status_codes=_common.RETRIABLE_STATUS_CODES)
|
||||
request.url,
|
||||
json=body,
|
||||
headers=request.headers,
|
||||
retriable_status_codes=_common.RETRIABLE_STATUS_CODES,
|
||||
)
|
||||
|
||||
msg = ("Failed call to method {method} on driver {driver_name}"
|
||||
.format(method=method, driver_name=self.name))
|
||||
msg = "Failed call to method {method} on driver {driver_name}".format(
|
||||
method=method, driver_name=self.name
|
||||
)
|
||||
exceptions.raise_from_response(response, error_message=msg)
|
||||
return response
|
||||
|
@ -51,8 +51,9 @@ class PowerAction(enum.Enum):
|
||||
"""Reboot the node using soft power off."""
|
||||
|
||||
|
||||
class WaitResult(collections.namedtuple('WaitResult',
|
||||
['success', 'failure', 'timeout'])):
|
||||
class WaitResult(
|
||||
collections.namedtuple('WaitResult', ['success', 'failure', 'timeout'])
|
||||
):
|
||||
"""A named tuple representing a result of waiting for several nodes.
|
||||
|
||||
Each component is a list of :class:`~openstack.baremetal.v1.node.Node`
|
||||
@ -65,6 +66,7 @@ class WaitResult(collections.namedtuple('WaitResult',
|
||||
:ivar ~.failure: a list of :class:`~openstack.baremetal.v1.node.Node`
|
||||
objects that hit a failure.
|
||||
"""
|
||||
|
||||
__slots__ = ()
|
||||
|
||||
|
||||
@ -84,8 +86,12 @@ class Node(_common.ListMixin, resource.Resource):
|
||||
commit_jsonpatch = True
|
||||
|
||||
_query_mapping = resource.QueryParameters(
|
||||
'associated', 'conductor_group', 'driver', 'fault',
|
||||
'provision_state', 'resource_class',
|
||||
'associated',
|
||||
'conductor_group',
|
||||
'driver',
|
||||
'fault',
|
||||
'provision_state',
|
||||
'resource_class',
|
||||
fields={'type': _common.fields_type},
|
||||
instance_id='instance_uuid',
|
||||
is_maintenance='maintenance',
|
||||
@ -292,21 +298,30 @@ class Node(_common.ListMixin, resource.Resource):
|
||||
# Verify that the requested provision state is reachable with
|
||||
# the API version we are going to use.
|
||||
try:
|
||||
microversion = _common.STATE_VERSIONS[
|
||||
expected_provision_state]
|
||||
microversion = _common.STATE_VERSIONS[expected_provision_state]
|
||||
except KeyError:
|
||||
raise ValueError(
|
||||
"Node's provision_state must be one of %s for creation, "
|
||||
"got %s" % (', '.join(_common.STATE_VERSIONS),
|
||||
expected_provision_state))
|
||||
"got %s"
|
||||
% (
|
||||
', '.join(_common.STATE_VERSIONS),
|
||||
expected_provision_state,
|
||||
)
|
||||
)
|
||||
else:
|
||||
error_message = ("Cannot create a node with initial provision "
|
||||
"state %s" % expected_provision_state)
|
||||
error_message = (
|
||||
"Cannot create a node with initial provision "
|
||||
"state %s" % expected_provision_state
|
||||
)
|
||||
# Nodes cannot be created as available using new API versions
|
||||
maximum = ('1.10' if expected_provision_state == 'available'
|
||||
else None)
|
||||
maximum = (
|
||||
'1.10' if expected_provision_state == 'available' else None
|
||||
)
|
||||
microversion = self._assert_microversion_for(
|
||||
session, 'create', microversion, maximum=maximum,
|
||||
session,
|
||||
'create',
|
||||
microversion,
|
||||
maximum=maximum,
|
||||
error_message=error_message,
|
||||
)
|
||||
else:
|
||||
@ -315,11 +330,14 @@ class Node(_common.ListMixin, resource.Resource):
|
||||
# Ironic cannot set provision_state itself, so marking it as unchanged
|
||||
self._clean_body_attrs({'provision_state'})
|
||||
|
||||
super(Node, self).create(session, *args, microversion=microversion,
|
||||
**kwargs)
|
||||
super(Node, self).create(
|
||||
session, *args, microversion=microversion, **kwargs
|
||||
)
|
||||
|
||||
if (expected_provision_state == 'manageable'
|
||||
and self.provision_state != 'manageable'):
|
||||
if (
|
||||
expected_provision_state == 'manageable'
|
||||
and self.provision_state != 'manageable'
|
||||
):
|
||||
# Manageable is not reachable directly
|
||||
self.set_provision_state(session, 'manage', wait=True)
|
||||
|
||||
@ -334,17 +352,22 @@ class Node(_common.ListMixin, resource.Resource):
|
||||
:return: This :class:`Node` instance.
|
||||
"""
|
||||
# These fields have to be set through separate API.
|
||||
if ('maintenance_reason' in self._body.dirty
|
||||
or 'maintenance' in self._body.dirty):
|
||||
if (
|
||||
'maintenance_reason' in self._body.dirty
|
||||
or 'maintenance' in self._body.dirty
|
||||
):
|
||||
if not self.is_maintenance and self.maintenance_reason:
|
||||
if 'maintenance' in self._body.dirty:
|
||||
self.maintenance_reason = None
|
||||
else:
|
||||
raise ValueError('Maintenance reason cannot be set when '
|
||||
'maintenance is False')
|
||||
raise ValueError(
|
||||
'Maintenance reason cannot be set when '
|
||||
'maintenance is False'
|
||||
)
|
||||
if self.is_maintenance:
|
||||
self._do_maintenance_action(
|
||||
session, 'put', {'reason': self.maintenance_reason})
|
||||
session, 'put', {'reason': self.maintenance_reason}
|
||||
)
|
||||
else:
|
||||
# This corresponds to setting maintenance=False and
|
||||
# maintenance_reason=None in the same request.
|
||||
@ -358,9 +381,17 @@ class Node(_common.ListMixin, resource.Resource):
|
||||
|
||||
return super(Node, self).commit(session, *args, **kwargs)
|
||||
|
||||
def set_provision_state(self, session, target, config_drive=None,
|
||||
clean_steps=None, rescue_password=None,
|
||||
wait=False, timeout=None, deploy_steps=None):
|
||||
def set_provision_state(
|
||||
self,
|
||||
session,
|
||||
target,
|
||||
config_drive=None,
|
||||
clean_steps=None,
|
||||
rescue_password=None,
|
||||
wait=False,
|
||||
timeout=None,
|
||||
deploy_steps=None,
|
||||
):
|
||||
"""Run an action modifying this node's provision state.
|
||||
|
||||
This call is asynchronous, it will return success as soon as the Bare
|
||||
@ -413,51 +444,65 @@ class Node(_common.ListMixin, resource.Resource):
|
||||
body = {'target': target}
|
||||
if config_drive:
|
||||
if target not in ('active', 'rebuild'):
|
||||
raise ValueError('Config drive can only be provided with '
|
||||
'"active" and "rebuild" targets')
|
||||
raise ValueError(
|
||||
'Config drive can only be provided with '
|
||||
'"active" and "rebuild" targets'
|
||||
)
|
||||
# Not a typo - ironic accepts "configdrive" (without underscore)
|
||||
body['configdrive'] = config_drive
|
||||
|
||||
if clean_steps is not None:
|
||||
if target != 'clean':
|
||||
raise ValueError('Clean steps can only be provided with '
|
||||
'"clean" target')
|
||||
raise ValueError(
|
||||
'Clean steps can only be provided with ' '"clean" target'
|
||||
)
|
||||
body['clean_steps'] = clean_steps
|
||||
|
||||
if deploy_steps is not None:
|
||||
if target not in ('active', 'rebuild'):
|
||||
raise ValueError('Deploy steps can only be provided with '
|
||||
'"deploy" and "rebuild" target')
|
||||
raise ValueError(
|
||||
'Deploy steps can only be provided with '
|
||||
'"deploy" and "rebuild" target'
|
||||
)
|
||||
body['deploy_steps'] = deploy_steps
|
||||
|
||||
if rescue_password is not None:
|
||||
if target != 'rescue':
|
||||
raise ValueError('Rescue password can only be provided with '
|
||||
'"rescue" target')
|
||||
raise ValueError(
|
||||
'Rescue password can only be provided with '
|
||||
'"rescue" target'
|
||||
)
|
||||
body['rescue_password'] = rescue_password
|
||||
|
||||
if wait:
|
||||
try:
|
||||
expected_state = _common.EXPECTED_STATES[target]
|
||||
except KeyError:
|
||||
raise ValueError('For target %s the expected state is not '
|
||||
'known, cannot wait for it' % target)
|
||||
raise ValueError(
|
||||
'For target %s the expected state is not '
|
||||
'known, cannot wait for it' % target
|
||||
)
|
||||
|
||||
request = self._prepare_request(requires_id=True)
|
||||
request.url = utils.urljoin(request.url, 'states', 'provision')
|
||||
response = session.put(
|
||||
request.url, json=body,
|
||||
headers=request.headers, microversion=version,
|
||||
retriable_status_codes=_common.RETRIABLE_STATUS_CODES)
|
||||
request.url,
|
||||
json=body,
|
||||
headers=request.headers,
|
||||
microversion=version,
|
||||
retriable_status_codes=_common.RETRIABLE_STATUS_CODES,
|
||||
)
|
||||
|
||||
msg = ("Failed to set provision state for bare metal node {node} "
|
||||
"to {target}".format(node=self.id, target=target))
|
||||
msg = (
|
||||
"Failed to set provision state for bare metal node {node} "
|
||||
"to {target}".format(node=self.id, target=target)
|
||||
)
|
||||
exceptions.raise_from_response(response, error_message=msg)
|
||||
|
||||
if wait:
|
||||
return self.wait_for_provision_state(session,
|
||||
expected_state,
|
||||
timeout=timeout)
|
||||
return self.wait_for_provision_state(
|
||||
session, expected_state, timeout=timeout
|
||||
)
|
||||
else:
|
||||
return self.fetch(session)
|
||||
|
||||
@ -475,10 +520,11 @@ class Node(_common.ListMixin, resource.Resource):
|
||||
:raises: :class:`~openstack.exceptions.ResourceTimeout` on timeout.
|
||||
"""
|
||||
for count in utils.iterate_timeout(
|
||||
timeout,
|
||||
"Timeout waiting for node %(node)s to reach "
|
||||
"power state '%(state)s'" % {'node': self.id,
|
||||
'state': expected_state}):
|
||||
timeout,
|
||||
"Timeout waiting for node %(node)s to reach "
|
||||
"power state '%(state)s'"
|
||||
% {'node': self.id, 'state': expected_state},
|
||||
):
|
||||
self.fetch(session)
|
||||
if self.power_state == expected_state:
|
||||
return self
|
||||
@ -486,11 +532,16 @@ class Node(_common.ListMixin, resource.Resource):
|
||||
session.log.debug(
|
||||
'Still waiting for node %(node)s to reach power state '
|
||||
'"%(target)s", the current state is "%(state)s"',
|
||||
{'node': self.id, 'target': expected_state,
|
||||
'state': self.power_state})
|
||||
{
|
||||
'node': self.id,
|
||||
'target': expected_state,
|
||||
'state': self.power_state,
|
||||
},
|
||||
)
|
||||
|
||||
def wait_for_provision_state(self, session, expected_state, timeout=None,
|
||||
abort_on_failed_state=True):
|
||||
def wait_for_provision_state(
|
||||
self, session, expected_state, timeout=None, abort_on_failed_state=True
|
||||
):
|
||||
"""Wait for the node to reach the expected state.
|
||||
|
||||
:param session: The session to use for making this request.
|
||||
@ -510,20 +561,26 @@ class Node(_common.ListMixin, resource.Resource):
|
||||
:raises: :class:`~openstack.exceptions.ResourceTimeout` on timeout.
|
||||
"""
|
||||
for count in utils.iterate_timeout(
|
||||
timeout,
|
||||
"Timeout waiting for node %(node)s to reach "
|
||||
"target state '%(state)s'" % {'node': self.id,
|
||||
'state': expected_state}):
|
||||
timeout,
|
||||
"Timeout waiting for node %(node)s to reach "
|
||||
"target state '%(state)s'"
|
||||
% {'node': self.id, 'state': expected_state},
|
||||
):
|
||||
self.fetch(session)
|
||||
if self._check_state_reached(session, expected_state,
|
||||
abort_on_failed_state):
|
||||
if self._check_state_reached(
|
||||
session, expected_state, abort_on_failed_state
|
||||
):
|
||||
return self
|
||||
|
||||
session.log.debug(
|
||||
'Still waiting for node %(node)s to reach state '
|
||||
'"%(target)s", the current state is "%(state)s"',
|
||||
{'node': self.id, 'target': expected_state,
|
||||
'state': self.provision_state})
|
||||
{
|
||||
'node': self.id,
|
||||
'target': expected_state,
|
||||
'state': self.provision_state,
|
||||
},
|
||||
)
|
||||
|
||||
def wait_for_reservation(self, session, timeout=None):
|
||||
"""Wait for a lock on the node to be released.
|
||||
@ -552,9 +609,9 @@ class Node(_common.ListMixin, resource.Resource):
|
||||
return self
|
||||
|
||||
for count in utils.iterate_timeout(
|
||||
timeout,
|
||||
"Timeout waiting for the lock to be released on node %s" %
|
||||
self.id):
|
||||
timeout,
|
||||
"Timeout waiting for the lock to be released on node %s" % self.id,
|
||||
):
|
||||
self.fetch(session)
|
||||
if self.reservation is None:
|
||||
return self
|
||||
@ -562,10 +619,12 @@ class Node(_common.ListMixin, resource.Resource):
|
||||
session.log.debug(
|
||||
'Still waiting for the lock to be released on node '
|
||||
'%(node)s, currently locked by conductor %(host)s',
|
||||
{'node': self.id, 'host': self.reservation})
|
||||
{'node': self.id, 'host': self.reservation},
|
||||
)
|
||||
|
||||
def _check_state_reached(self, session, expected_state,
|
||||
abort_on_failed_state=True):
|
||||
def _check_state_reached(
|
||||
self, session, expected_state, abort_on_failed_state=True
|
||||
):
|
||||
"""Wait for the node to reach the expected state.
|
||||
|
||||
:param session: The session to use for making this request.
|
||||
@ -581,29 +640,39 @@ class Node(_common.ListMixin, resource.Resource):
|
||||
reaches an error state and ``abort_on_failed_state`` is ``True``.
|
||||
"""
|
||||
# NOTE(dtantsur): microversion 1.2 changed None to available
|
||||
if (self.provision_state == expected_state
|
||||
or (expected_state == 'available'
|
||||
and self.provision_state is None)):
|
||||
if self.provision_state == expected_state or (
|
||||
expected_state == 'available' and self.provision_state is None
|
||||
):
|
||||
return True
|
||||
elif not abort_on_failed_state:
|
||||
return False
|
||||
|
||||
if (self.provision_state.endswith(' failed')
|
||||
or self.provision_state == 'error'):
|
||||
if (
|
||||
self.provision_state.endswith(' failed')
|
||||
or self.provision_state == 'error'
|
||||
):
|
||||
raise exceptions.ResourceFailure(
|
||||
"Node %(node)s reached failure state \"%(state)s\"; "
|
||||
"the last error is %(error)s" %
|
||||
{'node': self.id, 'state': self.provision_state,
|
||||
'error': self.last_error})
|
||||
"the last error is %(error)s"
|
||||
% {
|
||||
'node': self.id,
|
||||
'state': self.provision_state,
|
||||
'error': self.last_error,
|
||||
}
|
||||
)
|
||||
# Special case: a failure state for "manage" transition can be
|
||||
# "enroll"
|
||||
elif (expected_state == 'manageable'
|
||||
and self.provision_state == 'enroll' and self.last_error):
|
||||
elif (
|
||||
expected_state == 'manageable'
|
||||
and self.provision_state == 'enroll'
|
||||
and self.last_error
|
||||
):
|
||||
raise exceptions.ResourceFailure(
|
||||
"Node %(node)s could not reach state manageable: "
|
||||
"failed to verify management credentials; "
|
||||
"the last error is %(error)s" %
|
||||
{'node': self.id, 'error': self.last_error})
|
||||
"the last error is %(error)s"
|
||||
% {'node': self.id, 'error': self.last_error}
|
||||
)
|
||||
|
||||
def inject_nmi(self, session):
|
||||
"""Inject NMI.
|
||||
@ -630,7 +699,7 @@ class Node(_common.ListMixin, resource.Resource):
|
||||
retriable_status_codes=_common.RETRIABLE_STATUS_CODES,
|
||||
)
|
||||
|
||||
msg = ("Failed to inject NMI to node {node}".format(node=self.id))
|
||||
msg = "Failed to inject NMI to node {node}".format(node=self.id)
|
||||
exceptions.raise_from_response(response, error_message=msg)
|
||||
|
||||
def set_power_state(self, session, target, wait=False, timeout=None):
|
||||
@ -654,8 +723,10 @@ class Node(_common.ListMixin, resource.Resource):
|
||||
try:
|
||||
expected = _common.EXPECTED_POWER_STATES[target]
|
||||
except KeyError:
|
||||
raise ValueError("Cannot use target power state %s with wait, "
|
||||
"the expected state is not known" % target)
|
||||
raise ValueError(
|
||||
"Cannot use target power state %s with wait, "
|
||||
"the expected state is not known" % target
|
||||
)
|
||||
|
||||
session = self._get_session(session)
|
||||
|
||||
@ -672,12 +743,17 @@ class Node(_common.ListMixin, resource.Resource):
|
||||
request = self._prepare_request(requires_id=True)
|
||||
request.url = utils.urljoin(request.url, 'states', 'power')
|
||||
response = session.put(
|
||||
request.url, json=body,
|
||||
headers=request.headers, microversion=version,
|
||||
retriable_status_codes=_common.RETRIABLE_STATUS_CODES)
|
||||
request.url,
|
||||
json=body,
|
||||
headers=request.headers,
|
||||
microversion=version,
|
||||
retriable_status_codes=_common.RETRIABLE_STATUS_CODES,
|
||||
)
|
||||
|
||||
msg = ("Failed to set power state for bare metal node {node} "
|
||||
"to {target}".format(node=self.id, target=target))
|
||||
msg = (
|
||||
"Failed to set power state for bare metal node {node} "
|
||||
"to {target}".format(node=self.id, target=target)
|
||||
)
|
||||
exceptions.raise_from_response(response, error_message=msg)
|
||||
|
||||
if wait:
|
||||
@ -704,8 +780,11 @@ class Node(_common.ListMixin, resource.Resource):
|
||||
"""
|
||||
session = self._get_session(session)
|
||||
version = self._assert_microversion_for(
|
||||
session, 'commit', _common.VIF_VERSION,
|
||||
error_message=("Cannot use VIF attachment API"))
|
||||
session,
|
||||
'commit',
|
||||
_common.VIF_VERSION,
|
||||
error_message=("Cannot use VIF attachment API"),
|
||||
)
|
||||
|
||||
request = self._prepare_request(requires_id=True)
|
||||
request.url = utils.urljoin(request.url, 'vifs')
|
||||
@ -714,12 +793,16 @@ class Node(_common.ListMixin, resource.Resource):
|
||||
if not retry_on_conflict:
|
||||
retriable_status_codes = set(retriable_status_codes) - {409}
|
||||
response = session.post(
|
||||
request.url, json=body,
|
||||
headers=request.headers, microversion=version,
|
||||
retriable_status_codes=retriable_status_codes)
|
||||
request.url,
|
||||
json=body,
|
||||
headers=request.headers,
|
||||
microversion=version,
|
||||
retriable_status_codes=retriable_status_codes,
|
||||
)
|
||||
|
||||
msg = ("Failed to attach VIF {vif} to bare metal node {node}"
|
||||
.format(node=self.id, vif=vif_id))
|
||||
msg = "Failed to attach VIF {vif} to bare metal node {node}".format(
|
||||
node=self.id, vif=vif_id
|
||||
)
|
||||
exceptions.raise_from_response(response, error_message=msg)
|
||||
|
||||
def detach_vif(self, session, vif_id, ignore_missing=True):
|
||||
@ -742,23 +825,31 @@ class Node(_common.ListMixin, resource.Resource):
|
||||
"""
|
||||
session = self._get_session(session)
|
||||
version = self._assert_microversion_for(
|
||||
session, 'commit', _common.VIF_VERSION,
|
||||
error_message=("Cannot use VIF attachment API"))
|
||||
session,
|
||||
'commit',
|
||||
_common.VIF_VERSION,
|
||||
error_message=("Cannot use VIF attachment API"),
|
||||
)
|
||||
|
||||
request = self._prepare_request(requires_id=True)
|
||||
request.url = utils.urljoin(request.url, 'vifs', vif_id)
|
||||
response = session.delete(
|
||||
request.url, headers=request.headers, microversion=version,
|
||||
retriable_status_codes=_common.RETRIABLE_STATUS_CODES)
|
||||
request.url,
|
||||
headers=request.headers,
|
||||
microversion=version,
|
||||
retriable_status_codes=_common.RETRIABLE_STATUS_CODES,
|
||||
)
|
||||
|
||||
if ignore_missing and response.status_code == 400:
|
||||
session.log.debug(
|
||||
'VIF %(vif)s was already removed from node %(node)s',
|
||||
{'vif': vif_id, 'node': self.id})
|
||||
{'vif': vif_id, 'node': self.id},
|
||||
)
|
||||
return False
|
||||
|
||||
msg = ("Failed to detach VIF {vif} from bare metal node {node}"
|
||||
.format(node=self.id, vif=vif_id))
|
||||
msg = "Failed to detach VIF {vif} from bare metal node {node}".format(
|
||||
node=self.id, vif=vif_id
|
||||
)
|
||||
exceptions.raise_from_response(response, error_message=msg)
|
||||
return True
|
||||
|
||||
@ -777,16 +868,21 @@ class Node(_common.ListMixin, resource.Resource):
|
||||
"""
|
||||
session = self._get_session(session)
|
||||
version = self._assert_microversion_for(
|
||||
session, 'fetch', _common.VIF_VERSION,
|
||||
error_message=("Cannot use VIF attachment API"))
|
||||
session,
|
||||
'fetch',
|
||||
_common.VIF_VERSION,
|
||||
error_message=("Cannot use VIF attachment API"),
|
||||
)
|
||||
|
||||
request = self._prepare_request(requires_id=True)
|
||||
request.url = utils.urljoin(request.url, 'vifs')
|
||||
response = session.get(
|
||||
request.url, headers=request.headers, microversion=version)
|
||||
request.url, headers=request.headers, microversion=version
|
||||
)
|
||||
|
||||
msg = ("Failed to list VIFs attached to bare metal node {node}"
|
||||
.format(node=self.id))
|
||||
msg = "Failed to list VIFs attached to bare metal node {node}".format(
|
||||
node=self.id
|
||||
)
|
||||
exceptions.raise_from_response(response, error_message=msg)
|
||||
return [vif['id'] for vif in response.json()['vifs']]
|
||||
|
||||
@ -809,10 +905,11 @@ class Node(_common.ListMixin, resource.Resource):
|
||||
|
||||
request = self._prepare_request(requires_id=True)
|
||||
request.url = utils.urljoin(request.url, 'validate')
|
||||
response = session.get(request.url, headers=request.headers,
|
||||
microversion=version)
|
||||
response = session.get(
|
||||
request.url, headers=request.headers, microversion=version
|
||||
)
|
||||
|
||||
msg = ("Failed to validate node {node}".format(node=self.id))
|
||||
msg = "Failed to validate node {node}".format(node=self.id)
|
||||
exceptions.raise_from_response(response, error_message=msg)
|
||||
result = response.json()
|
||||
|
||||
@ -826,11 +923,15 @@ class Node(_common.ListMixin, resource.Resource):
|
||||
if failed:
|
||||
raise exceptions.ValidationException(
|
||||
'Validation failed for required interfaces of node {node}:'
|
||||
' {failures}'.format(node=self.id,
|
||||
failures=', '.join(failed)))
|
||||
' {failures}'.format(
|
||||
node=self.id, failures=', '.join(failed)
|
||||
)
|
||||
)
|
||||
|
||||
return {key: ValidationResult(value.get('result'), value.get('reason'))
|
||||
for key, value in result.items()}
|
||||
return {
|
||||
key: ValidationResult(value.get('result'), value.get('reason'))
|
||||
for key, value in result.items()
|
||||
}
|
||||
|
||||
def set_maintenance(self, session, reason=None):
|
||||
"""Enable maintenance mode on the node.
|
||||
@ -859,10 +960,14 @@ class Node(_common.ListMixin, resource.Resource):
|
||||
request = self._prepare_request(requires_id=True)
|
||||
request.url = utils.urljoin(request.url, 'maintenance')
|
||||
response = getattr(session, verb)(
|
||||
request.url, json=body,
|
||||
headers=request.headers, microversion=version)
|
||||
msg = ("Failed to change maintenance mode for node {node}"
|
||||
.format(node=self.id))
|
||||
request.url,
|
||||
json=body,
|
||||
headers=request.headers,
|
||||
microversion=version,
|
||||
)
|
||||
msg = "Failed to change maintenance mode for node {node}".format(
|
||||
node=self.id
|
||||
)
|
||||
exceptions.raise_from_response(response, error_message=msg)
|
||||
|
||||
def get_boot_device(self, session):
|
||||
@ -901,12 +1006,14 @@ class Node(_common.ListMixin, resource.Resource):
|
||||
body = {'boot_device': boot_device, 'persistent': persistent}
|
||||
|
||||
response = session.put(
|
||||
request.url, json=body,
|
||||
headers=request.headers, microversion=version,
|
||||
retriable_status_codes=_common.RETRIABLE_STATUS_CODES)
|
||||
request.url,
|
||||
json=body,
|
||||
headers=request.headers,
|
||||
microversion=version,
|
||||
retriable_status_codes=_common.RETRIABLE_STATUS_CODES,
|
||||
)
|
||||
|
||||
msg = ("Failed to set boot device for node {node}"
|
||||
.format(node=self.id))
|
||||
msg = "Failed to set boot device for node {node}".format(node=self.id)
|
||||
exceptions.raise_from_response(response, error_message=msg)
|
||||
|
||||
def get_supported_boot_devices(self, session):
|
||||
@ -945,23 +1052,27 @@ class Node(_common.ListMixin, resource.Resource):
|
||||
:raises: ValueError if ``target`` is not one of 'uefi or 'bios'.
|
||||
"""
|
||||
session = self._get_session(session)
|
||||
version = utils.pick_microversion(session,
|
||||
_common.CHANGE_BOOT_MODE_VERSION)
|
||||
version = utils.pick_microversion(
|
||||
session, _common.CHANGE_BOOT_MODE_VERSION
|
||||
)
|
||||
request = self._prepare_request(requires_id=True)
|
||||
request.url = utils.urljoin(request.url, 'states', 'boot_mode')
|
||||
if target not in ('uefi', 'bios'):
|
||||
raise ValueError("Unrecognized boot mode %s."
|
||||
"Boot mode should be one of 'uefi' or 'bios'."
|
||||
% target)
|
||||
raise ValueError(
|
||||
"Unrecognized boot mode %s."
|
||||
"Boot mode should be one of 'uefi' or 'bios'." % target
|
||||
)
|
||||
body = {'target': target}
|
||||
|
||||
response = session.put(
|
||||
request.url, json=body,
|
||||
headers=request.headers, microversion=version,
|
||||
retriable_status_codes=_common.RETRIABLE_STATUS_CODES)
|
||||
request.url,
|
||||
json=body,
|
||||
headers=request.headers,
|
||||
microversion=version,
|
||||
retriable_status_codes=_common.RETRIABLE_STATUS_CODES,
|
||||
)
|
||||
|
||||
msg = ("Failed to change boot mode for node {node}"
|
||||
.format(node=self.id))
|
||||
msg = "Failed to change boot mode for node {node}".format(node=self.id)
|
||||
exceptions.raise_from_response(response, error_message=msg)
|
||||
|
||||
def set_secure_boot(self, session, target):
|
||||
@ -976,23 +1087,29 @@ class Node(_common.ListMixin, resource.Resource):
|
||||
:raises: ValueError if ``target`` is not boolean.
|
||||
"""
|
||||
session = self._get_session(session)
|
||||
version = utils.pick_microversion(session,
|
||||
_common.CHANGE_BOOT_MODE_VERSION)
|
||||
version = utils.pick_microversion(
|
||||
session, _common.CHANGE_BOOT_MODE_VERSION
|
||||
)
|
||||
request = self._prepare_request(requires_id=True)
|
||||
request.url = utils.urljoin(request.url, 'states', 'secure_boot')
|
||||
if not isinstance(target, bool):
|
||||
raise ValueError("Invalid target %s. It should be True or False "
|
||||
"corresponding to secure boot state 'on' or 'off'"
|
||||
% target)
|
||||
raise ValueError(
|
||||
"Invalid target %s. It should be True or False "
|
||||
"corresponding to secure boot state 'on' or 'off'" % target
|
||||
)
|
||||
body = {'target': target}
|
||||
|
||||
response = session.put(
|
||||
request.url, json=body,
|
||||
headers=request.headers, microversion=version,
|
||||
retriable_status_codes=_common.RETRIABLE_STATUS_CODES)
|
||||
request.url,
|
||||
json=body,
|
||||
headers=request.headers,
|
||||
microversion=version,
|
||||
retriable_status_codes=_common.RETRIABLE_STATUS_CODES,
|
||||
)
|
||||
|
||||
msg = ("Failed to change secure boot state for {node}"
|
||||
.format(node=self.id))
|
||||
msg = "Failed to change secure boot state for {node}".format(
|
||||
node=self.id
|
||||
)
|
||||
exceptions.raise_from_response(response, error_message=msg)
|
||||
|
||||
def add_trait(self, session, trait):
|
||||
@ -1006,12 +1123,16 @@ class Node(_common.ListMixin, resource.Resource):
|
||||
request = self._prepare_request(requires_id=True)
|
||||
request.url = utils.urljoin(request.url, 'traits', trait)
|
||||
response = session.put(
|
||||
request.url, json=None,
|
||||
headers=request.headers, microversion=version,
|
||||
retriable_status_codes=_common.RETRIABLE_STATUS_CODES)
|
||||
request.url,
|
||||
json=None,
|
||||
headers=request.headers,
|
||||
microversion=version,
|
||||
retriable_status_codes=_common.RETRIABLE_STATUS_CODES,
|
||||
)
|
||||
|
||||
msg = ("Failed to add trait {trait} for node {node}"
|
||||
.format(trait=trait, node=self.id))
|
||||
msg = "Failed to add trait {trait} for node {node}".format(
|
||||
trait=trait, node=self.id
|
||||
)
|
||||
exceptions.raise_from_response(response, error_message=msg)
|
||||
|
||||
self.traits = list(set(self.traits or ()) | {trait})
|
||||
@ -1034,18 +1155,24 @@ class Node(_common.ListMixin, resource.Resource):
|
||||
request.url = utils.urljoin(request.url, 'traits', trait)
|
||||
|
||||
response = session.delete(
|
||||
request.url, headers=request.headers, microversion=version,
|
||||
retriable_status_codes=_common.RETRIABLE_STATUS_CODES)
|
||||
request.url,
|
||||
headers=request.headers,
|
||||
microversion=version,
|
||||
retriable_status_codes=_common.RETRIABLE_STATUS_CODES,
|
||||
)
|
||||
|
||||
if ignore_missing and response.status_code == 400:
|
||||
session.log.debug(
|
||||
'Trait %(trait)s was already removed from node %(node)s',
|
||||
{'trait': trait, 'node': self.id})
|
||||
{'trait': trait, 'node': self.id},
|
||||
)
|
||||
return False
|
||||
|
||||
msg = ("Failed to remove trait {trait} from bare metal node {node}"
|
||||
.format(node=self.id, trait=trait))
|
||||
exceptions.raise_from_response(response, error_message=msg)
|
||||
msg = "Failed to remove trait {trait} from bare metal node {node}"
|
||||
exceptions.raise_from_response(
|
||||
response,
|
||||
error_message=msg.format(node=self.id, trait=trait),
|
||||
)
|
||||
|
||||
if self.traits:
|
||||
self.traits = list(set(self.traits) - {trait})
|
||||
@ -1069,12 +1196,14 @@ class Node(_common.ListMixin, resource.Resource):
|
||||
body = {'traits': traits}
|
||||
|
||||
response = session.put(
|
||||
request.url, json=body,
|
||||
headers=request.headers, microversion=version,
|
||||
retriable_status_codes=_common.RETRIABLE_STATUS_CODES)
|
||||
request.url,
|
||||
json=body,
|
||||
headers=request.headers,
|
||||
microversion=version,
|
||||
retriable_status_codes=_common.RETRIABLE_STATUS_CODES,
|
||||
)
|
||||
|
||||
msg = ("Failed to set traits for node {node}"
|
||||
.format(node=self.id))
|
||||
msg = "Failed to set traits for node {node}".format(node=self.id)
|
||||
exceptions.raise_from_response(response, error_message=msg)
|
||||
|
||||
self.traits = traits
|
||||
@ -1091,18 +1220,25 @@ class Node(_common.ListMixin, resource.Resource):
|
||||
session = self._get_session(session)
|
||||
version = self._get_microversion(session, action='commit')
|
||||
request = self._prepare_request(requires_id=True)
|
||||
request.url = utils.urljoin(request.url, 'vendor_passthru?method={}'
|
||||
.format(method))
|
||||
request.url = utils.urljoin(
|
||||
request.url, 'vendor_passthru?method={}'.format(method)
|
||||
)
|
||||
|
||||
call = getattr(session, verb.lower())
|
||||
response = call(
|
||||
request.url, json=body,
|
||||
headers=request.headers, microversion=version,
|
||||
retriable_status_codes=_common.RETRIABLE_STATUS_CODES)
|
||||
request.url,
|
||||
json=body,
|
||||
headers=request.headers,
|
||||
microversion=version,
|
||||
retriable_status_codes=_common.RETRIABLE_STATUS_CODES,
|
||||
)
|
||||
|
||||
msg = ("Failed to call vendor_passthru for node {node}, verb {verb}"
|
||||
" and method {method}"
|
||||
.format(node=self.id, verb=verb, method=method))
|
||||
msg = (
|
||||
"Failed to call vendor_passthru for node {node}, verb {verb}"
|
||||
" and method {method}".format(
|
||||
node=self.id, verb=verb, method=method
|
||||
)
|
||||
)
|
||||
exceptions.raise_from_response(response, error_message=msg)
|
||||
|
||||
return response
|
||||
@ -1119,11 +1255,15 @@ class Node(_common.ListMixin, resource.Resource):
|
||||
request.url = utils.urljoin(request.url, 'vendor_passthru/methods')
|
||||
|
||||
response = session.get(
|
||||
request.url, headers=request.headers, microversion=version,
|
||||
retriable_status_codes=_common.RETRIABLE_STATUS_CODES)
|
||||
request.url,
|
||||
headers=request.headers,
|
||||
microversion=version,
|
||||
retriable_status_codes=_common.RETRIABLE_STATUS_CODES,
|
||||
)
|
||||
|
||||
msg = ("Failed to list vendor_passthru methods for node {node}"
|
||||
.format(node=self.id))
|
||||
msg = "Failed to list vendor_passthru methods for node {node}".format(
|
||||
node=self.id
|
||||
)
|
||||
exceptions.raise_from_response(response, error_message=msg)
|
||||
|
||||
return response.json()
|
||||
@ -1156,8 +1296,7 @@ class Node(_common.ListMixin, resource.Resource):
|
||||
if not isinstance(enabled, bool):
|
||||
raise ValueError(
|
||||
"Invalid enabled %s. It should be True or False "
|
||||
"corresponding to console enabled or disabled"
|
||||
% enabled
|
||||
"corresponding to console enabled or disabled" % enabled
|
||||
)
|
||||
body = {'enabled': enabled}
|
||||
|
||||
@ -1174,8 +1313,16 @@ class Node(_common.ListMixin, resource.Resource):
|
||||
)
|
||||
exceptions.raise_from_response(response, error_message=msg)
|
||||
|
||||
def patch(self, session, patch=None, prepend_key=True, has_body=True,
|
||||
retry_on_conflict=None, base_path=None, reset_interfaces=None):
|
||||
def patch(
|
||||
self,
|
||||
session,
|
||||
patch=None,
|
||||
prepend_key=True,
|
||||
has_body=True,
|
||||
retry_on_conflict=None,
|
||||
base_path=None,
|
||||
reset_interfaces=None,
|
||||
):
|
||||
|
||||
if reset_interfaces is not None:
|
||||
# The id cannot be dirty for an commit
|
||||
@ -1190,24 +1337,34 @@ class Node(_common.ListMixin, resource.Resource):
|
||||
|
||||
session = self._get_session(session)
|
||||
microversion = self._assert_microversion_for(
|
||||
session, 'commit', _common.RESET_INTERFACES_VERSION)
|
||||
session, 'commit', _common.RESET_INTERFACES_VERSION
|
||||
)
|
||||
params = [('reset_interfaces', reset_interfaces)]
|
||||
|
||||
request = self._prepare_request(requires_id=True,
|
||||
prepend_key=prepend_key,
|
||||
base_path=base_path, patch=True,
|
||||
params=params)
|
||||
request = self._prepare_request(
|
||||
requires_id=True,
|
||||
prepend_key=prepend_key,
|
||||
base_path=base_path,
|
||||
patch=True,
|
||||
params=params,
|
||||
)
|
||||
|
||||
if patch:
|
||||
request.body += self._convert_patch(patch)
|
||||
|
||||
return self._commit(session, request, 'PATCH', microversion,
|
||||
has_body=has_body,
|
||||
retry_on_conflict=retry_on_conflict)
|
||||
return self._commit(
|
||||
session,
|
||||
request,
|
||||
'PATCH',
|
||||
microversion,
|
||||
has_body=has_body,
|
||||
retry_on_conflict=retry_on_conflict,
|
||||
)
|
||||
|
||||
else:
|
||||
return super(Node, self).patch(session, patch=patch,
|
||||
retry_on_conflict=retry_on_conflict)
|
||||
return super(Node, self).patch(
|
||||
session, patch=patch, retry_on_conflict=retry_on_conflict
|
||||
)
|
||||
|
||||
|
||||
NodeDetail = Node
|
||||
|
@ -30,7 +30,9 @@ class Port(_common.ListMixin, resource.Resource):
|
||||
commit_jsonpatch = True
|
||||
|
||||
_query_mapping = resource.QueryParameters(
|
||||
'address', 'node', 'portgroup',
|
||||
'address',
|
||||
'node',
|
||||
'portgroup',
|
||||
fields={'type': _common.fields_type},
|
||||
node_id='node_uuid',
|
||||
)
|
||||
|
@ -30,7 +30,8 @@ class PortGroup(_common.ListMixin, resource.Resource):
|
||||
commit_jsonpatch = True
|
||||
|
||||
_query_mapping = resource.QueryParameters(
|
||||
'node', 'address',
|
||||
'node',
|
||||
'address',
|
||||
fields={'type': _common.fields_type},
|
||||
)
|
||||
|
||||
@ -52,8 +53,9 @@ class PortGroup(_common.ListMixin, resource.Resource):
|
||||
internal_info = resource.Body('internal_info')
|
||||
#: Whether ports that are members of this portgroup can be used as
|
||||
#: standalone ports. Added in API microversion 1.23.
|
||||
is_standalone_ports_supported = resource.Body('standalone_ports_supported',
|
||||
type=bool)
|
||||
is_standalone_ports_supported = resource.Body(
|
||||
'standalone_ports_supported', type=bool
|
||||
)
|
||||
#: A list of relative links, including the self and bookmark links.
|
||||
links = resource.Body('links', type=list)
|
||||
#: Port bonding mode. Added in API microversion 1.26.
|
||||
|
@ -30,7 +30,8 @@ class VolumeConnector(_common.ListMixin, resource.Resource):
|
||||
commit_jsonpatch = True
|
||||
|
||||
_query_mapping = resource.QueryParameters(
|
||||
'node', 'detail',
|
||||
'node',
|
||||
'detail',
|
||||
fields={'type': _common.fields_type},
|
||||
)
|
||||
|
||||
|
@ -30,7 +30,8 @@ class VolumeTarget(_common.ListMixin, resource.Resource):
|
||||
commit_jsonpatch = True
|
||||
|
||||
_query_mapping = resource.QueryParameters(
|
||||
'node', 'detail',
|
||||
'node',
|
||||
'detail',
|
||||
fields={'type': _common.fields_type},
|
||||
)
|
||||
|
||||
|
@ -70,8 +70,9 @@ class Proxy(proxy.Proxy):
|
||||
:returns: :class:`~.introspection.Introspection` instance.
|
||||
"""
|
||||
node = self._get_resource(_node.Node, node)
|
||||
res = _introspect.Introspection.new(connection=self._get_connection(),
|
||||
id=node.id)
|
||||
res = _introspect.Introspection.new(
|
||||
connection=self._get_connection(), id=node.id
|
||||
)
|
||||
kwargs = {}
|
||||
if manage_boot is not None:
|
||||
kwargs['manage_boot'] = manage_boot
|
||||
@ -126,8 +127,9 @@ class Proxy(proxy.Proxy):
|
||||
if not ignore_missing:
|
||||
raise
|
||||
|
||||
def wait_for_introspection(self, introspection, timeout=None,
|
||||
ignore_error=False):
|
||||
def wait_for_introspection(
|
||||
self, introspection, timeout=None, ignore_error=False
|
||||
):
|
||||
"""Wait for the introspection to finish.
|
||||
|
||||
:param introspection: The value can be the name or ID of an
|
||||
|
@ -67,10 +67,12 @@ class Introspection(resource.Resource):
|
||||
request = self._prepare_request(requires_id=True)
|
||||
request.url = utils.urljoin(request.url, 'abort')
|
||||
response = session.post(
|
||||
request.url, headers=request.headers, microversion=version,
|
||||
retriable_status_codes=_common.RETRIABLE_STATUS_CODES)
|
||||
msg = ("Failed to abort introspection for node {id}"
|
||||
.format(id=self.id))
|
||||
request.url,
|
||||
headers=request.headers,
|
||||
microversion=version,
|
||||
retriable_status_codes=_common.RETRIABLE_STATUS_CODES,
|
||||
)
|
||||
msg = "Failed to abort introspection for node {id}".format(id=self.id)
|
||||
exceptions.raise_from_response(response, error_message=msg)
|
||||
|
||||
def get_data(self, session, processed=True):
|
||||
@ -89,16 +91,21 @@ class Introspection(resource.Resource):
|
||||
"""
|
||||
session = self._get_session(session)
|
||||
|
||||
version = (self._get_microversion(session, action='fetch')
|
||||
if processed else '1.17')
|
||||
version = (
|
||||
self._get_microversion(session, action='fetch')
|
||||
if processed
|
||||
else '1.17'
|
||||
)
|
||||
request = self._prepare_request(requires_id=True)
|
||||
request.url = utils.urljoin(request.url, 'data')
|
||||
if not processed:
|
||||
request.url = utils.urljoin(request.url, 'unprocessed')
|
||||
response = session.get(
|
||||
request.url, headers=request.headers, microversion=version)
|
||||
msg = ("Failed to fetch introspection data for node {id}"
|
||||
.format(id=self.id))
|
||||
request.url, headers=request.headers, microversion=version
|
||||
)
|
||||
msg = "Failed to fetch introspection data for node {id}".format(
|
||||
id=self.id
|
||||
)
|
||||
exceptions.raise_from_response(response, error_message=msg)
|
||||
return response.json()
|
||||
|
||||
@ -121,20 +128,23 @@ class Introspection(resource.Resource):
|
||||
return self
|
||||
|
||||
for count in utils.iterate_timeout(
|
||||
timeout,
|
||||
"Timeout waiting for introspection on node %s" % self.id):
|
||||
timeout, "Timeout waiting for introspection on node %s" % self.id
|
||||
):
|
||||
self.fetch(session)
|
||||
if self._check_state(ignore_error):
|
||||
return self
|
||||
|
||||
_logger.debug('Still waiting for introspection of node %(node)s, '
|
||||
'the current state is "%(state)s"',
|
||||
{'node': self.id, 'state': self.state})
|
||||
_logger.debug(
|
||||
'Still waiting for introspection of node %(node)s, '
|
||||
'the current state is "%(state)s"',
|
||||
{'node': self.id, 'state': self.state},
|
||||
)
|
||||
|
||||
def _check_state(self, ignore_error):
|
||||
if self.state == 'error' and not ignore_error:
|
||||
raise exceptions.ResourceFailure(
|
||||
"Introspection of node %(node)s failed: %(error)s" %
|
||||
{'node': self.id, 'error': self.error})
|
||||
"Introspection of node %(node)s failed: %(error)s"
|
||||
% {'node': self.id, 'error': self.error}
|
||||
)
|
||||
else:
|
||||
return self.is_finished
|
||||
|
@ -20,29 +20,36 @@ class BaseBaremetalTest(base.BaseFunctionalTest):
|
||||
|
||||
def setUp(self):
|
||||
super(BaseBaremetalTest, self).setUp()
|
||||
self.require_service('baremetal',
|
||||
min_microversion=self.min_microversion)
|
||||
self.require_service(
|
||||
'baremetal', min_microversion=self.min_microversion
|
||||
)
|
||||
|
||||
def create_allocation(self, **kwargs):
|
||||
allocation = self.conn.baremetal.create_allocation(**kwargs)
|
||||
self.addCleanup(
|
||||
lambda: self.conn.baremetal.delete_allocation(allocation.id,
|
||||
ignore_missing=True))
|
||||
lambda: self.conn.baremetal.delete_allocation(
|
||||
allocation.id, ignore_missing=True
|
||||
)
|
||||
)
|
||||
return allocation
|
||||
|
||||
def create_chassis(self, **kwargs):
|
||||
chassis = self.conn.baremetal.create_chassis(**kwargs)
|
||||
self.addCleanup(
|
||||
lambda: self.conn.baremetal.delete_chassis(chassis.id,
|
||||
ignore_missing=True))
|
||||
lambda: self.conn.baremetal.delete_chassis(
|
||||
chassis.id, ignore_missing=True
|
||||
)
|
||||
)
|
||||
return chassis
|
||||
|
||||
def create_node(self, driver='fake-hardware', **kwargs):
|
||||
node = self.conn.baremetal.create_node(driver=driver, **kwargs)
|
||||
self.node_id = node.id
|
||||
self.addCleanup(
|
||||
lambda: self.conn.baremetal.delete_node(self.node_id,
|
||||
ignore_missing=True))
|
||||
lambda: self.conn.baremetal.delete_node(
|
||||
self.node_id, ignore_missing=True
|
||||
)
|
||||
)
|
||||
self.assertIsNotNone(self.node_id)
|
||||
return node
|
||||
|
||||
@ -50,50 +57,58 @@ class BaseBaremetalTest(base.BaseFunctionalTest):
|
||||
node_id = node_id or self.node_id
|
||||
port = self.conn.baremetal.create_port(node_uuid=node_id, **kwargs)
|
||||
self.addCleanup(
|
||||
lambda: self.conn.baremetal.delete_port(port.id,
|
||||
ignore_missing=True))
|
||||
lambda: self.conn.baremetal.delete_port(
|
||||
port.id, ignore_missing=True
|
||||
)
|
||||
)
|
||||
return port
|
||||
|
||||
def create_port_group(self, node_id=None, **kwargs):
|
||||
node_id = node_id or self.node_id
|
||||
port_group = self.conn.baremetal.create_port_group(node_uuid=node_id,
|
||||
**kwargs)
|
||||
port_group = self.conn.baremetal.create_port_group(
|
||||
node_uuid=node_id, **kwargs
|
||||
)
|
||||
self.addCleanup(
|
||||
lambda: self.conn.baremetal.delete_port_group(port_group.id,
|
||||
ignore_missing=True))
|
||||
lambda: self.conn.baremetal.delete_port_group(
|
||||
port_group.id, ignore_missing=True
|
||||
)
|
||||
)
|
||||
return port_group
|
||||
|
||||
def create_volume_connector(self, node_id=None, **kwargs):
|
||||
node_id = node_id or self.node_id
|
||||
volume_connector = self.conn.baremetal.create_volume_connector(
|
||||
node_uuid=node_id, **kwargs)
|
||||
node_uuid=node_id, **kwargs
|
||||
)
|
||||
|
||||
self.addCleanup(
|
||||
lambda:
|
||||
self.conn.baremetal.delete_volume_connector(volume_connector.id,
|
||||
ignore_missing=True))
|
||||
lambda: self.conn.baremetal.delete_volume_connector(
|
||||
volume_connector.id, ignore_missing=True
|
||||
)
|
||||
)
|
||||
return volume_connector
|
||||
|
||||
def create_volume_target(self, node_id=None, **kwargs):
|
||||
node_id = node_id or self.node_id
|
||||
volume_target = self.conn.baremetal.create_volume_target(
|
||||
node_uuid=node_id, **kwargs)
|
||||
node_uuid=node_id, **kwargs
|
||||
)
|
||||
|
||||
self.addCleanup(
|
||||
lambda:
|
||||
self.conn.baremetal.delete_volume_target(volume_target.id,
|
||||
ignore_missing=True))
|
||||
lambda: self.conn.baremetal.delete_volume_target(
|
||||
volume_target.id, ignore_missing=True
|
||||
)
|
||||
)
|
||||
return volume_target
|
||||
|
||||
def create_deploy_template(self, **kwargs):
|
||||
"""Create a new deploy_template from attributes.
|
||||
"""
|
||||
"""Create a new deploy_template from attributes."""
|
||||
|
||||
deploy_template = self.conn.baremetal.create_deploy_template(
|
||||
**kwargs)
|
||||
deploy_template = self.conn.baremetal.create_deploy_template(**kwargs)
|
||||
|
||||
self.addCleanup(
|
||||
lambda: self.conn.baremetal.delete_deploy_template(
|
||||
deploy_template.id,
|
||||
ignore_missing=True))
|
||||
deploy_template.id, ignore_missing=True
|
||||
)
|
||||
)
|
||||
return deploy_template
|
||||
|
@ -17,7 +17,6 @@ from openstack.tests.functional.baremetal import base
|
||||
|
||||
|
||||
class Base(base.BaseBaremetalTest):
|
||||
|
||||
def setUp(self):
|
||||
super(Base, self).setUp()
|
||||
# NOTE(dtantsur): generate a unique resource class to prevent parallel
|
||||
@ -27,15 +26,15 @@ class Base(base.BaseBaremetalTest):
|
||||
|
||||
def _create_available_node(self):
|
||||
node = self.create_node(resource_class=self.resource_class)
|
||||
self.conn.baremetal.set_node_provision_state(node, 'manage',
|
||||
wait=True)
|
||||
self.conn.baremetal.set_node_provision_state(node, 'provide',
|
||||
wait=True)
|
||||
self.conn.baremetal.set_node_provision_state(node, 'manage', wait=True)
|
||||
self.conn.baremetal.set_node_provision_state(
|
||||
node, 'provide', wait=True
|
||||
)
|
||||
# Make sure the node has non-empty power state by forcing power off.
|
||||
self.conn.baremetal.set_node_power_state(node, 'power off')
|
||||
self.addCleanup(
|
||||
lambda: self.conn.baremetal.update_node(node.id,
|
||||
instance_id=None))
|
||||
lambda: self.conn.baremetal.update_node(node.id, instance_id=None)
|
||||
)
|
||||
return node
|
||||
|
||||
|
||||
@ -56,7 +55,8 @@ class TestBareMetalAllocation(Base):
|
||||
self.assertIsNone(allocation.last_error)
|
||||
|
||||
with_fields = self.conn.baremetal.get_allocation(
|
||||
allocation.id, fields=['uuid', 'node_uuid'])
|
||||
allocation.id, fields=['uuid', 'node_uuid']
|
||||
)
|
||||
self.assertEqual(allocation.id, with_fields.id)
|
||||
self.assertIsNone(with_fields.state)
|
||||
|
||||
@ -64,21 +64,27 @@ class TestBareMetalAllocation(Base):
|
||||
self.assertEqual(allocation.id, node.allocation_id)
|
||||
|
||||
self.conn.baremetal.delete_allocation(allocation, ignore_missing=False)
|
||||
self.assertRaises(exceptions.ResourceNotFound,
|
||||
self.conn.baremetal.get_allocation, allocation.id)
|
||||
self.assertRaises(
|
||||
exceptions.ResourceNotFound,
|
||||
self.conn.baremetal.get_allocation,
|
||||
allocation.id,
|
||||
)
|
||||
|
||||
def test_allocation_list(self):
|
||||
allocation1 = self.create_allocation(
|
||||
resource_class=self.resource_class)
|
||||
resource_class=self.resource_class
|
||||
)
|
||||
allocation2 = self.create_allocation(
|
||||
resource_class=self.resource_class + '-fail')
|
||||
resource_class=self.resource_class + '-fail'
|
||||
)
|
||||
|
||||
self.conn.baremetal.wait_for_allocation(allocation1)
|
||||
self.conn.baremetal.wait_for_allocation(allocation2, ignore_error=True)
|
||||
|
||||
allocations = self.conn.baremetal.allocations()
|
||||
self.assertEqual({p.id for p in allocations},
|
||||
{allocation1.id, allocation2.id})
|
||||
self.assertEqual(
|
||||
{p.id for p in allocations}, {allocation1.id, allocation2.id}
|
||||
)
|
||||
|
||||
allocations = self.conn.baremetal.allocations(state='active')
|
||||
self.assertEqual([p.id for p in allocations], [allocation1.id])
|
||||
@ -87,15 +93,19 @@ class TestBareMetalAllocation(Base):
|
||||
self.assertEqual([p.id for p in allocations], [allocation1.id])
|
||||
|
||||
allocations = self.conn.baremetal.allocations(
|
||||
resource_class=self.resource_class + '-fail')
|
||||
resource_class=self.resource_class + '-fail'
|
||||
)
|
||||
self.assertEqual([p.id for p in allocations], [allocation2.id])
|
||||
|
||||
def test_allocation_negative_failure(self):
|
||||
allocation = self.create_allocation(
|
||||
resource_class=self.resource_class + '-fail')
|
||||
self.assertRaises(exceptions.SDKException,
|
||||
self.conn.baremetal.wait_for_allocation,
|
||||
allocation)
|
||||
resource_class=self.resource_class + '-fail'
|
||||
)
|
||||
self.assertRaises(
|
||||
exceptions.SDKException,
|
||||
self.conn.baremetal.wait_for_allocation,
|
||||
allocation,
|
||||
)
|
||||
|
||||
allocation = self.conn.baremetal.get_allocation(allocation.id)
|
||||
self.assertEqual('error', allocation.state)
|
||||
@ -103,11 +113,17 @@ class TestBareMetalAllocation(Base):
|
||||
|
||||
def test_allocation_negative_non_existing(self):
|
||||
uuid = "5c9dcd04-2073-49bc-9618-99ae634d8971"
|
||||
self.assertRaises(exceptions.ResourceNotFound,
|
||||
self.conn.baremetal.get_allocation, uuid)
|
||||
self.assertRaises(exceptions.ResourceNotFound,
|
||||
self.conn.baremetal.delete_allocation, uuid,
|
||||
ignore_missing=False)
|
||||
self.assertRaises(
|
||||
exceptions.ResourceNotFound,
|
||||
self.conn.baremetal.get_allocation,
|
||||
uuid,
|
||||
)
|
||||
self.assertRaises(
|
||||
exceptions.ResourceNotFound,
|
||||
self.conn.baremetal.delete_allocation,
|
||||
uuid,
|
||||
ignore_missing=False,
|
||||
)
|
||||
self.assertIsNone(self.conn.baremetal.delete_allocation(uuid))
|
||||
|
||||
def test_allocation_fields(self):
|
||||
@ -133,7 +149,8 @@ class TestBareMetalAllocationUpdate(Base):
|
||||
self.assertEqual({}, allocation.extra)
|
||||
|
||||
allocation = self.conn.baremetal.update_allocation(
|
||||
allocation, name=name, extra={'answer': 42})
|
||||
allocation, name=name, extra={'answer': 42}
|
||||
)
|
||||
self.assertEqual(name, allocation.name)
|
||||
self.assertEqual({'answer': 42}, allocation.extra)
|
||||
|
||||
@ -142,8 +159,11 @@ class TestBareMetalAllocationUpdate(Base):
|
||||
self.assertEqual({'answer': 42}, allocation.extra)
|
||||
|
||||
self.conn.baremetal.delete_allocation(allocation, ignore_missing=False)
|
||||
self.assertRaises(exceptions.ResourceNotFound,
|
||||
self.conn.baremetal.get_allocation, allocation.id)
|
||||
self.assertRaises(
|
||||
exceptions.ResourceNotFound,
|
||||
self.conn.baremetal.get_allocation,
|
||||
allocation.id,
|
||||
)
|
||||
|
||||
def test_allocation_patch(self):
|
||||
name = 'ossdk-name2'
|
||||
@ -156,8 +176,12 @@ class TestBareMetalAllocationUpdate(Base):
|
||||
self.assertEqual({}, allocation.extra)
|
||||
|
||||
allocation = self.conn.baremetal.patch_allocation(
|
||||
allocation, [{'op': 'replace', 'path': '/name', 'value': name},
|
||||
{'op': 'add', 'path': '/extra/answer', 'value': 42}])
|
||||
allocation,
|
||||
[
|
||||
{'op': 'replace', 'path': '/name', 'value': name},
|
||||
{'op': 'add', 'path': '/extra/answer', 'value': 42},
|
||||
],
|
||||
)
|
||||
self.assertEqual(name, allocation.name)
|
||||
self.assertEqual({'answer': 42}, allocation.extra)
|
||||
|
||||
@ -166,8 +190,12 @@ class TestBareMetalAllocationUpdate(Base):
|
||||
self.assertEqual({'answer': 42}, allocation.extra)
|
||||
|
||||
allocation = self.conn.baremetal.patch_allocation(
|
||||
allocation, [{'op': 'remove', 'path': '/name'},
|
||||
{'op': 'remove', 'path': '/extra/answer'}])
|
||||
allocation,
|
||||
[
|
||||
{'op': 'remove', 'path': '/name'},
|
||||
{'op': 'remove', 'path': '/extra/answer'},
|
||||
],
|
||||
)
|
||||
self.assertIsNone(allocation.name)
|
||||
self.assertEqual({}, allocation.extra)
|
||||
|
||||
@ -176,5 +204,8 @@ class TestBareMetalAllocationUpdate(Base):
|
||||
self.assertEqual({}, allocation.extra)
|
||||
|
||||
self.conn.baremetal.delete_allocation(allocation, ignore_missing=False)
|
||||
self.assertRaises(exceptions.ResourceNotFound,
|
||||
self.conn.baremetal.get_allocation, allocation.id)
|
||||
self.assertRaises(
|
||||
exceptions.ResourceNotFound,
|
||||
self.conn.baremetal.get_allocation,
|
||||
allocation.id,
|
||||
)
|
||||
|
@ -16,7 +16,6 @@ from openstack.tests.functional.baremetal import base
|
||||
|
||||
|
||||
class TestBareMetalChassis(base.BaseBaremetalTest):
|
||||
|
||||
def test_chassis_create_get_delete(self):
|
||||
chassis = self.create_chassis()
|
||||
|
||||
@ -24,8 +23,11 @@ class TestBareMetalChassis(base.BaseBaremetalTest):
|
||||
self.assertEqual(loaded.id, chassis.id)
|
||||
|
||||
self.conn.baremetal.delete_chassis(chassis, ignore_missing=False)
|
||||
self.assertRaises(exceptions.ResourceNotFound,
|
||||
self.conn.baremetal.get_chassis, chassis.id)
|
||||
self.assertRaises(
|
||||
exceptions.ResourceNotFound,
|
||||
self.conn.baremetal.get_chassis,
|
||||
chassis.id,
|
||||
)
|
||||
|
||||
def test_chassis_update(self):
|
||||
chassis = self.create_chassis()
|
||||
@ -41,7 +43,8 @@ class TestBareMetalChassis(base.BaseBaremetalTest):
|
||||
chassis = self.create_chassis()
|
||||
|
||||
chassis = self.conn.baremetal.patch_chassis(
|
||||
chassis, dict(path='/extra/answer', op='add', value=42))
|
||||
chassis, dict(path='/extra/answer', op='add', value=42)
|
||||
)
|
||||
self.assertEqual({'answer': 42}, chassis.extra)
|
||||
|
||||
chassis = self.conn.baremetal.get_chassis(chassis.id)
|
||||
@ -49,14 +52,21 @@ class TestBareMetalChassis(base.BaseBaremetalTest):
|
||||
|
||||
def test_chassis_negative_non_existing(self):
|
||||
uuid = "5c9dcd04-2073-49bc-9618-99ae634d8971"
|
||||
self.assertRaises(exceptions.ResourceNotFound,
|
||||
self.conn.baremetal.get_chassis, uuid)
|
||||
self.assertRaises(exceptions.ResourceNotFound,
|
||||
self.conn.baremetal.find_chassis, uuid,
|
||||
ignore_missing=False)
|
||||
self.assertRaises(exceptions.ResourceNotFound,
|
||||
self.conn.baremetal.delete_chassis, uuid,
|
||||
ignore_missing=False)
|
||||
self.assertRaises(
|
||||
exceptions.ResourceNotFound, self.conn.baremetal.get_chassis, uuid
|
||||
)
|
||||
self.assertRaises(
|
||||
exceptions.ResourceNotFound,
|
||||
self.conn.baremetal.find_chassis,
|
||||
uuid,
|
||||
ignore_missing=False,
|
||||
)
|
||||
self.assertRaises(
|
||||
exceptions.ResourceNotFound,
|
||||
self.conn.baremetal.delete_chassis,
|
||||
uuid,
|
||||
ignore_missing=False,
|
||||
)
|
||||
self.assertIsNone(self.conn.baremetal.find_chassis(uuid))
|
||||
self.assertIsNone(self.conn.baremetal.delete_chassis(uuid))
|
||||
|
||||
|
@ -27,27 +27,24 @@ class TestBareMetalDeployTemplate(base.BaseBaremetalTest):
|
||||
"interface": "bios",
|
||||
"step": "apply_configuration",
|
||||
"args": {
|
||||
"settings": [
|
||||
{
|
||||
"name": "LogicalProc",
|
||||
"value": "Enabled"
|
||||
}
|
||||
]
|
||||
"settings": [{"name": "LogicalProc", "value": "Enabled"}]
|
||||
},
|
||||
"priority": 150
|
||||
"priority": 150,
|
||||
}
|
||||
]
|
||||
deploy_template = self.create_deploy_template(
|
||||
name='CUSTOM_DEPLOY_TEMPLATE',
|
||||
steps=steps)
|
||||
loaded = self.conn.baremetal.get_deploy_template(
|
||||
deploy_template.id)
|
||||
name='CUSTOM_DEPLOY_TEMPLATE', steps=steps
|
||||
)
|
||||
loaded = self.conn.baremetal.get_deploy_template(deploy_template.id)
|
||||
self.assertEqual(loaded.id, deploy_template.id)
|
||||
self.conn.baremetal.delete_deploy_template(deploy_template,
|
||||
ignore_missing=False)
|
||||
self.assertRaises(exceptions.ResourceNotFound,
|
||||
self.conn.baremetal.get_deploy_template,
|
||||
deploy_template.id)
|
||||
self.conn.baremetal.delete_deploy_template(
|
||||
deploy_template, ignore_missing=False
|
||||
)
|
||||
self.assertRaises(
|
||||
exceptions.ResourceNotFound,
|
||||
self.conn.baremetal.get_deploy_template,
|
||||
deploy_template.id,
|
||||
)
|
||||
|
||||
def test_baremetal_deploy_template_list(self):
|
||||
steps = [
|
||||
@ -55,36 +52,33 @@ class TestBareMetalDeployTemplate(base.BaseBaremetalTest):
|
||||
"interface": "bios",
|
||||
"step": "apply_configuration",
|
||||
"args": {
|
||||
"settings": [
|
||||
{
|
||||
"name": "LogicalProc",
|
||||
"value": "Enabled"
|
||||
}
|
||||
]
|
||||
"settings": [{"name": "LogicalProc", "value": "Enabled"}]
|
||||
},
|
||||
"priority": 150
|
||||
"priority": 150,
|
||||
}
|
||||
]
|
||||
|
||||
deploy_template1 = self.create_deploy_template(
|
||||
name='CUSTOM_DEPLOY_TEMPLATE1',
|
||||
steps=steps)
|
||||
name='CUSTOM_DEPLOY_TEMPLATE1', steps=steps
|
||||
)
|
||||
deploy_template2 = self.create_deploy_template(
|
||||
name='CUSTOM_DEPLOY_TEMPLATE2',
|
||||
steps=steps)
|
||||
name='CUSTOM_DEPLOY_TEMPLATE2', steps=steps
|
||||
)
|
||||
deploy_templates = self.conn.baremetal.deploy_templates()
|
||||
ids = [template.id for template in deploy_templates]
|
||||
self.assertIn(deploy_template1.id, ids)
|
||||
self.assertIn(deploy_template2.id, ids)
|
||||
|
||||
deploy_templates_with_details = self.conn.baremetal.deploy_templates(
|
||||
details=True)
|
||||
details=True
|
||||
)
|
||||
for dp in deploy_templates_with_details:
|
||||
self.assertIsNotNone(dp.id)
|
||||
self.assertIsNotNone(dp.name)
|
||||
|
||||
deploy_tempalte_with_fields = self.conn.baremetal.deploy_templates(
|
||||
fields=['uuid'])
|
||||
fields=['uuid']
|
||||
)
|
||||
for dp in deploy_tempalte_with_fields:
|
||||
self.assertIsNotNone(dp.id)
|
||||
self.assertIsNone(dp.name)
|
||||
@ -95,31 +89,29 @@ class TestBareMetalDeployTemplate(base.BaseBaremetalTest):
|
||||
"interface": "bios",
|
||||
"step": "apply_configuration",
|
||||
"args": {
|
||||
"settings": [
|
||||
{
|
||||
"name": "LogicalProc",
|
||||
"value": "Enabled"
|
||||
}
|
||||
]
|
||||
"settings": [{"name": "LogicalProc", "value": "Enabled"}]
|
||||
},
|
||||
"priority": 150
|
||||
"priority": 150,
|
||||
}
|
||||
]
|
||||
deploy_template = self.create_deploy_template(
|
||||
name='CUSTOM_DEPLOY_TEMPLATE4',
|
||||
steps=steps)
|
||||
name='CUSTOM_DEPLOY_TEMPLATE4', steps=steps
|
||||
)
|
||||
self.assertFalse(deploy_template.extra)
|
||||
deploy_template.extra = {'answer': 42}
|
||||
|
||||
deploy_template = self.conn.baremetal.update_deploy_template(
|
||||
deploy_template)
|
||||
deploy_template
|
||||
)
|
||||
self.assertEqual({'answer': 42}, deploy_template.extra)
|
||||
|
||||
deploy_template = self.conn.baremetal.get_deploy_template(
|
||||
deploy_template.id)
|
||||
deploy_template.id
|
||||
)
|
||||
|
||||
self.conn.baremetal.delete_deploy_template(deploy_template.id,
|
||||
ignore_missing=False)
|
||||
self.conn.baremetal.delete_deploy_template(
|
||||
deploy_template.id, ignore_missing=False
|
||||
)
|
||||
|
||||
def test_baremetal_deploy_update(self):
|
||||
steps = [
|
||||
@ -127,27 +119,24 @@ class TestBareMetalDeployTemplate(base.BaseBaremetalTest):
|
||||
"interface": "bios",
|
||||
"step": "apply_configuration",
|
||||
"args": {
|
||||
"settings": [
|
||||
{
|
||||
"name": "LogicalProc",
|
||||
"value": "Enabled"
|
||||
}
|
||||
]
|
||||
"settings": [{"name": "LogicalProc", "value": "Enabled"}]
|
||||
},
|
||||
"priority": 150
|
||||
"priority": 150,
|
||||
}
|
||||
]
|
||||
deploy_template = self.create_deploy_template(
|
||||
name='CUSTOM_DEPLOY_TEMPLATE4',
|
||||
steps=steps)
|
||||
name='CUSTOM_DEPLOY_TEMPLATE4', steps=steps
|
||||
)
|
||||
deploy_template.extra = {'answer': 42}
|
||||
|
||||
deploy_template = self.conn.baremetal.update_deploy_template(
|
||||
deploy_template)
|
||||
deploy_template
|
||||
)
|
||||
self.assertEqual({'answer': 42}, deploy_template.extra)
|
||||
|
||||
deploy_template = self.conn.baremetal.get_deploy_template(
|
||||
deploy_template.id)
|
||||
deploy_template.id
|
||||
)
|
||||
self.assertEqual({'answer': 42}, deploy_template.extra)
|
||||
|
||||
def test_deploy_template_patch(self):
|
||||
@ -157,34 +146,34 @@ class TestBareMetalDeployTemplate(base.BaseBaremetalTest):
|
||||
"interface": "bios",
|
||||
"step": "apply_configuration",
|
||||
"args": {
|
||||
"settings": [
|
||||
{
|
||||
"name": "LogicalProc",
|
||||
"value": "Enabled"
|
||||
}
|
||||
]
|
||||
"settings": [{"name": "LogicalProc", "value": "Enabled"}]
|
||||
},
|
||||
"priority": 150
|
||||
"priority": 150,
|
||||
}
|
||||
]
|
||||
deploy_template = self.create_deploy_template(
|
||||
name=name,
|
||||
steps=steps)
|
||||
deploy_template = self.create_deploy_template(name=name, steps=steps)
|
||||
deploy_template = self.conn.baremetal.patch_deploy_template(
|
||||
deploy_template, dict(path='/extra/answer', op='add', value=42))
|
||||
deploy_template, dict(path='/extra/answer', op='add', value=42)
|
||||
)
|
||||
self.assertEqual({'answer': 42}, deploy_template.extra)
|
||||
self.assertEqual(name,
|
||||
deploy_template.name)
|
||||
self.assertEqual(name, deploy_template.name)
|
||||
|
||||
deploy_template = self.conn.baremetal.get_deploy_template(
|
||||
deploy_template.id)
|
||||
deploy_template.id
|
||||
)
|
||||
self.assertEqual({'answer': 42}, deploy_template.extra)
|
||||
|
||||
def test_deploy_template_negative_non_existing(self):
|
||||
uuid = "bbb45f41-d4bc-4307-8d1d-32f95ce1e920"
|
||||
self.assertRaises(exceptions.ResourceNotFound,
|
||||
self.conn.baremetal.get_deploy_template, uuid)
|
||||
self.assertRaises(exceptions.ResourceNotFound,
|
||||
self.conn.baremetal.delete_deploy_template, uuid,
|
||||
ignore_missing=False)
|
||||
self.assertRaises(
|
||||
exceptions.ResourceNotFound,
|
||||
self.conn.baremetal.get_deploy_template,
|
||||
uuid,
|
||||
)
|
||||
self.assertRaises(
|
||||
exceptions.ResourceNotFound,
|
||||
self.conn.baremetal.delete_deploy_template,
|
||||
uuid,
|
||||
ignore_missing=False,
|
||||
)
|
||||
self.assertIsNone(self.conn.baremetal.delete_deploy_template(uuid))
|
||||
|
@ -16,7 +16,6 @@ from openstack.tests.functional.baremetal import base
|
||||
|
||||
|
||||
class TestBareMetalDriver(base.BaseBaremetalTest):
|
||||
|
||||
def test_fake_hardware_get(self):
|
||||
driver = self.conn.baremetal.get_driver('fake-hardware')
|
||||
self.assertEqual('fake-hardware', driver.name)
|
||||
@ -27,8 +26,11 @@ class TestBareMetalDriver(base.BaseBaremetalTest):
|
||||
self.assertIn('fake-hardware', [d.name for d in drivers])
|
||||
|
||||
def test_driver_negative_non_existing(self):
|
||||
self.assertRaises(exceptions.ResourceNotFound,
|
||||
self.conn.baremetal.get_driver, 'not-a-driver')
|
||||
self.assertRaises(
|
||||
exceptions.ResourceNotFound,
|
||||
self.conn.baremetal.get_driver,
|
||||
'not-a-driver',
|
||||
)
|
||||
|
||||
|
||||
class TestBareMetalDriverDetails(base.BaseBaremetalTest):
|
||||
@ -39,17 +41,21 @@ class TestBareMetalDriverDetails(base.BaseBaremetalTest):
|
||||
driver = self.conn.baremetal.get_driver('fake-hardware')
|
||||
self.assertEqual('fake-hardware', driver.name)
|
||||
for iface in ('boot', 'deploy', 'management', 'power'):
|
||||
self.assertIn('fake',
|
||||
getattr(driver, 'enabled_%s_interfaces' % iface))
|
||||
self.assertEqual('fake',
|
||||
getattr(driver, 'default_%s_interface' % iface))
|
||||
self.assertIn(
|
||||
'fake', getattr(driver, 'enabled_%s_interfaces' % iface)
|
||||
)
|
||||
self.assertEqual(
|
||||
'fake', getattr(driver, 'default_%s_interface' % iface)
|
||||
)
|
||||
self.assertNotEqual([], driver.hosts)
|
||||
|
||||
def test_fake_hardware_list_details(self):
|
||||
drivers = self.conn.baremetal.drivers(details=True)
|
||||
driver = [d for d in drivers if d.name == 'fake-hardware'][0]
|
||||
for iface in ('boot', 'deploy', 'management', 'power'):
|
||||
self.assertIn('fake',
|
||||
getattr(driver, 'enabled_%s_interfaces' % iface))
|
||||
self.assertEqual('fake',
|
||||
getattr(driver, 'default_%s_interface' % iface))
|
||||
self.assertIn(
|
||||
'fake', getattr(driver, 'enabled_%s_interfaces' % iface)
|
||||
)
|
||||
self.assertEqual(
|
||||
'fake', getattr(driver, 'default_%s_interface' % iface)
|
||||
)
|
||||
|
@ -27,17 +27,19 @@ class TestBareMetalNode(base.BaseBaremetalTest):
|
||||
|
||||
# NOTE(dtantsur): get_node and find_node only differ in handing missing
|
||||
# nodes, otherwise they are identical.
|
||||
for call, ident in [(self.conn.baremetal.get_node, self.node_id),
|
||||
(self.conn.baremetal.get_node, 'node-name'),
|
||||
(self.conn.baremetal.find_node, self.node_id),
|
||||
(self.conn.baremetal.find_node, 'node-name')]:
|
||||
for call, ident in [
|
||||
(self.conn.baremetal.get_node, self.node_id),
|
||||
(self.conn.baremetal.get_node, 'node-name'),
|
||||
(self.conn.baremetal.find_node, self.node_id),
|
||||
(self.conn.baremetal.find_node, 'node-name'),
|
||||
]:
|
||||
found = call(ident)
|
||||
self.assertEqual(node.id, found.id)
|
||||
self.assertEqual(node.name, found.name)
|
||||
|
||||
with_fields = self.conn.baremetal.get_node(
|
||||
'node-name',
|
||||
fields=['uuid', 'driver', 'instance_id'])
|
||||
'node-name', fields=['uuid', 'driver', 'instance_id']
|
||||
)
|
||||
self.assertEqual(node.id, with_fields.id)
|
||||
self.assertEqual(node.driver, with_fields.driver)
|
||||
self.assertIsNone(with_fields.name)
|
||||
@ -47,8 +49,11 @@ class TestBareMetalNode(base.BaseBaremetalTest):
|
||||
self.assertIn(node.id, [n.id for n in nodes])
|
||||
|
||||
self.conn.baremetal.delete_node(node, ignore_missing=False)
|
||||
self.assertRaises(exceptions.ResourceNotFound,
|
||||
self.conn.baremetal.get_node, self.node_id)
|
||||
self.assertRaises(
|
||||
exceptions.ResourceNotFound,
|
||||
self.conn.baremetal.get_node,
|
||||
self.node_id,
|
||||
)
|
||||
|
||||
def test_node_create_in_available(self):
|
||||
node = self.create_node(name='node-name', provision_state='available')
|
||||
@ -57,8 +62,11 @@ class TestBareMetalNode(base.BaseBaremetalTest):
|
||||
self.assertEqual(node.provision_state, 'available')
|
||||
|
||||
self.conn.baremetal.delete_node(node, ignore_missing=False)
|
||||
self.assertRaises(exceptions.ResourceNotFound,
|
||||
self.conn.baremetal.get_node, self.node_id)
|
||||
self.assertRaises(
|
||||
exceptions.ResourceNotFound,
|
||||
self.conn.baremetal.get_node,
|
||||
self.node_id,
|
||||
)
|
||||
|
||||
def test_node_update(self):
|
||||
node = self.create_node(name='node-name', extra={'foo': 'bar'})
|
||||
@ -66,8 +74,7 @@ class TestBareMetalNode(base.BaseBaremetalTest):
|
||||
node.extra = {'answer': 42}
|
||||
instance_uuid = str(uuid.uuid4())
|
||||
|
||||
node = self.conn.baremetal.update_node(node,
|
||||
instance_id=instance_uuid)
|
||||
node = self.conn.baremetal.update_node(node, instance_id=instance_uuid)
|
||||
self.assertEqual('new-name', node.name)
|
||||
self.assertEqual({'answer': 42}, node.extra)
|
||||
self.assertEqual(instance_uuid, node.instance_id)
|
||||
@ -77,8 +84,7 @@ class TestBareMetalNode(base.BaseBaremetalTest):
|
||||
self.assertEqual({'answer': 42}, node.extra)
|
||||
self.assertEqual(instance_uuid, node.instance_id)
|
||||
|
||||
node = self.conn.baremetal.update_node(node,
|
||||
instance_id=None)
|
||||
node = self.conn.baremetal.update_node(node, instance_id=None)
|
||||
self.assertIsNone(node.instance_id)
|
||||
|
||||
node = self.conn.baremetal.get_node('new-name')
|
||||
@ -88,9 +94,9 @@ class TestBareMetalNode(base.BaseBaremetalTest):
|
||||
self.create_node(name='node-name', extra={'foo': 'bar'})
|
||||
instance_uuid = str(uuid.uuid4())
|
||||
|
||||
node = self.conn.baremetal.update_node('node-name',
|
||||
instance_id=instance_uuid,
|
||||
extra={'answer': 42})
|
||||
node = self.conn.baremetal.update_node(
|
||||
'node-name', instance_id=instance_uuid, extra={'answer': 42}
|
||||
)
|
||||
self.assertEqual({'answer': 42}, node.extra)
|
||||
self.assertEqual(instance_uuid, node.instance_id)
|
||||
|
||||
@ -98,8 +104,7 @@ class TestBareMetalNode(base.BaseBaremetalTest):
|
||||
self.assertEqual({'answer': 42}, node.extra)
|
||||
self.assertEqual(instance_uuid, node.instance_id)
|
||||
|
||||
node = self.conn.baremetal.update_node('node-name',
|
||||
instance_id=None)
|
||||
node = self.conn.baremetal.update_node('node-name', instance_id=None)
|
||||
self.assertIsNone(node.instance_id)
|
||||
|
||||
node = self.conn.baremetal.get_node('node-name')
|
||||
@ -112,8 +117,11 @@ class TestBareMetalNode(base.BaseBaremetalTest):
|
||||
|
||||
node = self.conn.baremetal.patch_node(
|
||||
node,
|
||||
[dict(path='/instance_id', op='replace', value=instance_uuid),
|
||||
dict(path='/extra/answer', op='add', value=42)])
|
||||
[
|
||||
dict(path='/instance_id', op='replace', value=instance_uuid),
|
||||
dict(path='/extra/answer', op='add', value=42),
|
||||
],
|
||||
)
|
||||
self.assertEqual('new-name', node.name)
|
||||
self.assertEqual({'foo': 'bar', 'answer': 42}, node.extra)
|
||||
self.assertEqual(instance_uuid, node.instance_id)
|
||||
@ -125,8 +133,11 @@ class TestBareMetalNode(base.BaseBaremetalTest):
|
||||
|
||||
node = self.conn.baremetal.patch_node(
|
||||
node,
|
||||
[dict(path='/instance_id', op='remove'),
|
||||
dict(path='/extra/answer', op='remove')])
|
||||
[
|
||||
dict(path='/instance_id', op='remove'),
|
||||
dict(path='/extra/answer', op='remove'),
|
||||
],
|
||||
)
|
||||
self.assertIsNone(node.instance_id)
|
||||
self.assertNotIn('answer', node.extra)
|
||||
|
||||
@ -136,12 +147,16 @@ class TestBareMetalNode(base.BaseBaremetalTest):
|
||||
|
||||
def test_node_list_update_delete(self):
|
||||
self.create_node(name='node-name', extra={'foo': 'bar'})
|
||||
node = next(n for n in
|
||||
self.conn.baremetal.nodes(details=True,
|
||||
provision_state='enroll',
|
||||
is_maintenance=False,
|
||||
associated=False)
|
||||
if n.name == 'node-name')
|
||||
node = next(
|
||||
n
|
||||
for n in self.conn.baremetal.nodes(
|
||||
details=True,
|
||||
provision_state='enroll',
|
||||
is_maintenance=False,
|
||||
associated=False,
|
||||
)
|
||||
if n.name == 'node-name'
|
||||
)
|
||||
self.assertEqual(node.extra, {'foo': 'bar'})
|
||||
|
||||
# This test checks that resources returned from listing are usable
|
||||
@ -157,12 +172,12 @@ class TestBareMetalNode(base.BaseBaremetalTest):
|
||||
self.assertIsNone(node.power_state)
|
||||
self.assertFalse(node.is_maintenance)
|
||||
|
||||
self.conn.baremetal.set_node_provision_state(node, 'manage',
|
||||
wait=True)
|
||||
self.conn.baremetal.set_node_provision_state(node, 'manage', wait=True)
|
||||
self.assertEqual(node.provision_state, 'manageable')
|
||||
|
||||
self.conn.baremetal.set_node_provision_state(node, 'provide',
|
||||
wait=True)
|
||||
self.conn.baremetal.set_node_provision_state(
|
||||
node, 'provide', wait=True
|
||||
)
|
||||
self.assertEqual(node.provision_state, 'available')
|
||||
|
||||
def test_node_create_in_enroll_provide_by_name(self):
|
||||
@ -175,12 +190,14 @@ class TestBareMetalNode(base.BaseBaremetalTest):
|
||||
self.assertIsNone(node.power_state)
|
||||
self.assertFalse(node.is_maintenance)
|
||||
|
||||
node = self.conn.baremetal.set_node_provision_state(name, 'manage',
|
||||
wait=True)
|
||||
node = self.conn.baremetal.set_node_provision_state(
|
||||
name, 'manage', wait=True
|
||||
)
|
||||
self.assertEqual(node.provision_state, 'manageable')
|
||||
|
||||
node = self.conn.baremetal.set_node_provision_state(name, 'provide',
|
||||
wait=True)
|
||||
node = self.conn.baremetal.set_node_provision_state(
|
||||
name, 'provide', wait=True
|
||||
)
|
||||
self.assertEqual(node.provision_state, 'available')
|
||||
|
||||
def test_node_power_state(self):
|
||||
@ -205,17 +222,27 @@ class TestBareMetalNode(base.BaseBaremetalTest):
|
||||
|
||||
def test_node_negative_non_existing(self):
|
||||
uuid = "5c9dcd04-2073-49bc-9618-99ae634d8971"
|
||||
self.assertRaises(exceptions.ResourceNotFound,
|
||||
self.conn.baremetal.get_node, uuid)
|
||||
self.assertRaises(exceptions.ResourceNotFound,
|
||||
self.conn.baremetal.find_node, uuid,
|
||||
ignore_missing=False)
|
||||
self.assertRaises(exceptions.ResourceNotFound,
|
||||
self.conn.baremetal.delete_node, uuid,
|
||||
ignore_missing=False)
|
||||
self.assertRaises(exceptions.ResourceNotFound,
|
||||
self.conn.baremetal.update_node, uuid,
|
||||
name='new-name')
|
||||
self.assertRaises(
|
||||
exceptions.ResourceNotFound, self.conn.baremetal.get_node, uuid
|
||||
)
|
||||
self.assertRaises(
|
||||
exceptions.ResourceNotFound,
|
||||
self.conn.baremetal.find_node,
|
||||
uuid,
|
||||
ignore_missing=False,
|
||||
)
|
||||
self.assertRaises(
|
||||
exceptions.ResourceNotFound,
|
||||
self.conn.baremetal.delete_node,
|
||||
uuid,
|
||||
ignore_missing=False,
|
||||
)
|
||||
self.assertRaises(
|
||||
exceptions.ResourceNotFound,
|
||||
self.conn.baremetal.update_node,
|
||||
uuid,
|
||||
name='new-name',
|
||||
)
|
||||
self.assertIsNone(self.conn.baremetal.find_node(uuid))
|
||||
self.assertIsNone(self.conn.baremetal.delete_node(uuid))
|
||||
|
||||
@ -287,8 +314,9 @@ class TestBareMetalNode(base.BaseBaremetalTest):
|
||||
self.assertIsNone(node.maintenance_reason)
|
||||
|
||||
# Initial setting with the reason
|
||||
node = self.conn.baremetal.update_node(node, is_maintenance=True,
|
||||
maintenance_reason=reason)
|
||||
node = self.conn.baremetal.update_node(
|
||||
node, is_maintenance=True, maintenance_reason=reason
|
||||
)
|
||||
self.assertTrue(node.is_maintenance)
|
||||
self.assertEqual(reason, node.maintenance_reason)
|
||||
|
||||
@ -338,8 +366,9 @@ class TestNodeRetired(base.BaseBaremetalTest):
|
||||
self.assertIsNone(node.retired_reason)
|
||||
|
||||
# Set retired with reason
|
||||
node = self.conn.baremetal.update_node(node, is_retired=True,
|
||||
retired_reason=reason)
|
||||
node = self.conn.baremetal.update_node(
|
||||
node, is_retired=True, retired_reason=reason
|
||||
)
|
||||
self.assertTrue(node.is_retired)
|
||||
self.assertEqual(reason, node.retired_reason)
|
||||
|
||||
@ -354,7 +383,10 @@ class TestNodeRetired(base.BaseBaremetalTest):
|
||||
# Set retired when node state available should fail!
|
||||
self.assertRaises(
|
||||
exceptions.ConflictException,
|
||||
self.conn.baremetal.update_node, node, is_retired=True)
|
||||
self.conn.baremetal.update_node,
|
||||
node,
|
||||
is_retired=True,
|
||||
)
|
||||
|
||||
|
||||
class TestBareMetalNodeFields(base.BaseBaremetalTest):
|
||||
@ -364,7 +396,8 @@ class TestBareMetalNodeFields(base.BaseBaremetalTest):
|
||||
def test_node_fields(self):
|
||||
self.create_node()
|
||||
result = self.conn.baremetal.nodes(
|
||||
fields=['uuid', 'name', 'instance_id'])
|
||||
fields=['uuid', 'name', 'instance_id']
|
||||
)
|
||||
for item in result:
|
||||
self.assertIsNotNone(item.id)
|
||||
self.assertIsNone(item.driver)
|
||||
@ -384,21 +417,31 @@ class TestBareMetalVif(base.BaseBaremetalTest):
|
||||
# NOTE(dtantsur): The noop networking driver is completely noop - the
|
||||
# VIF list does not return anything of value.
|
||||
self.conn.baremetal.list_node_vifs(self.node)
|
||||
res = self.conn.baremetal.detach_vif_from_node(self.node, self.vif_id,
|
||||
ignore_missing=False)
|
||||
res = self.conn.baremetal.detach_vif_from_node(
|
||||
self.node, self.vif_id, ignore_missing=False
|
||||
)
|
||||
self.assertTrue(res)
|
||||
|
||||
def test_node_vif_negative(self):
|
||||
uuid = "5c9dcd04-2073-49bc-9618-99ae634d8971"
|
||||
self.assertRaises(exceptions.ResourceNotFound,
|
||||
self.conn.baremetal.attach_vif_to_node,
|
||||
uuid, self.vif_id)
|
||||
self.assertRaises(exceptions.ResourceNotFound,
|
||||
self.conn.baremetal.list_node_vifs,
|
||||
uuid)
|
||||
self.assertRaises(exceptions.ResourceNotFound,
|
||||
self.conn.baremetal.detach_vif_from_node,
|
||||
uuid, self.vif_id, ignore_missing=False)
|
||||
self.assertRaises(
|
||||
exceptions.ResourceNotFound,
|
||||
self.conn.baremetal.attach_vif_to_node,
|
||||
uuid,
|
||||
self.vif_id,
|
||||
)
|
||||
self.assertRaises(
|
||||
exceptions.ResourceNotFound,
|
||||
self.conn.baremetal.list_node_vifs,
|
||||
uuid,
|
||||
)
|
||||
self.assertRaises(
|
||||
exceptions.ResourceNotFound,
|
||||
self.conn.baremetal.detach_vif_from_node,
|
||||
uuid,
|
||||
self.vif_id,
|
||||
ignore_missing=False,
|
||||
)
|
||||
|
||||
|
||||
class TestTraits(base.BaseBaremetalTest):
|
||||
@ -419,14 +462,17 @@ class TestTraits(base.BaseBaremetalTest):
|
||||
self.assertEqual(['CUSTOM_FAKE'], node.traits)
|
||||
|
||||
self.conn.baremetal.add_node_trait(self.node, 'CUSTOM_REAL')
|
||||
self.assertEqual(sorted(['CUSTOM_FAKE', 'CUSTOM_REAL']),
|
||||
sorted(self.node.traits))
|
||||
self.assertEqual(
|
||||
sorted(['CUSTOM_FAKE', 'CUSTOM_REAL']), sorted(self.node.traits)
|
||||
)
|
||||
node = self.conn.baremetal.get_node(self.node)
|
||||
self.assertEqual(sorted(['CUSTOM_FAKE', 'CUSTOM_REAL']),
|
||||
sorted(node.traits))
|
||||
self.assertEqual(
|
||||
sorted(['CUSTOM_FAKE', 'CUSTOM_REAL']), sorted(node.traits)
|
||||
)
|
||||
|
||||
self.conn.baremetal.remove_node_trait(node, 'CUSTOM_FAKE',
|
||||
ignore_missing=False)
|
||||
self.conn.baremetal.remove_node_trait(
|
||||
node, 'CUSTOM_FAKE', ignore_missing=False
|
||||
)
|
||||
self.assertEqual(['CUSTOM_REAL'], self.node.traits)
|
||||
node = self.conn.baremetal.get_node(self.node)
|
||||
self.assertEqual(['CUSTOM_REAL'], node.traits)
|
||||
|
@ -16,7 +16,6 @@ from openstack.tests.functional.baremetal import base
|
||||
|
||||
|
||||
class TestBareMetalPort(base.BaseBaremetalTest):
|
||||
|
||||
def setUp(self):
|
||||
super(TestBareMetalPort, self).setUp()
|
||||
self.node = self.create_node()
|
||||
@ -34,21 +33,23 @@ class TestBareMetalPort(base.BaseBaremetalTest):
|
||||
self.assertIsNotNone(loaded.address)
|
||||
|
||||
with_fields = self.conn.baremetal.get_port(
|
||||
port.id, fields=['uuid', 'extra', 'node_id'])
|
||||
port.id, fields=['uuid', 'extra', 'node_id']
|
||||
)
|
||||
self.assertEqual(port.id, with_fields.id)
|
||||
self.assertIsNone(with_fields.address)
|
||||
|
||||
self.conn.baremetal.delete_port(port, ignore_missing=False)
|
||||
self.assertRaises(exceptions.ResourceNotFound,
|
||||
self.conn.baremetal.get_port, port.id)
|
||||
self.assertRaises(
|
||||
exceptions.ResourceNotFound, self.conn.baremetal.get_port, port.id
|
||||
)
|
||||
|
||||
def test_port_list(self):
|
||||
node2 = self.create_node(name='test-node')
|
||||
|
||||
port1 = self.create_port(address='11:22:33:44:55:66',
|
||||
node_id=node2.id)
|
||||
port2 = self.create_port(address='11:22:33:44:55:77',
|
||||
node_id=self.node.id)
|
||||
port1 = self.create_port(address='11:22:33:44:55:66', node_id=node2.id)
|
||||
port2 = self.create_port(
|
||||
address='11:22:33:44:55:77', node_id=self.node.id
|
||||
)
|
||||
|
||||
ports = self.conn.baremetal.ports(address='11:22:33:44:55:77')
|
||||
self.assertEqual([p.id for p in ports], [port2.id])
|
||||
@ -60,10 +61,16 @@ class TestBareMetalPort(base.BaseBaremetalTest):
|
||||
self.assertEqual([p.id for p in ports], [port1.id])
|
||||
|
||||
def test_port_list_update_delete(self):
|
||||
self.create_port(address='11:22:33:44:55:66', node_id=self.node.id,
|
||||
extra={'foo': 'bar'})
|
||||
port = next(self.conn.baremetal.ports(details=True,
|
||||
address='11:22:33:44:55:66'))
|
||||
self.create_port(
|
||||
address='11:22:33:44:55:66',
|
||||
node_id=self.node.id,
|
||||
extra={'foo': 'bar'},
|
||||
)
|
||||
port = next(
|
||||
self.conn.baremetal.ports(
|
||||
details=True, address='11:22:33:44:55:66'
|
||||
)
|
||||
)
|
||||
self.assertEqual(port.extra, {'foo': 'bar'})
|
||||
|
||||
# This test checks that resources returned from listing are usable
|
||||
@ -88,7 +95,8 @@ class TestBareMetalPort(base.BaseBaremetalTest):
|
||||
port.address = '66:55:44:33:22:11'
|
||||
|
||||
port = self.conn.baremetal.patch_port(
|
||||
port, dict(path='/extra/answer', op='add', value=42))
|
||||
port, dict(path='/extra/answer', op='add', value=42)
|
||||
)
|
||||
self.assertEqual('66:55:44:33:22:11', port.address)
|
||||
self.assertEqual({'answer': 42}, port.extra)
|
||||
|
||||
@ -98,17 +106,27 @@ class TestBareMetalPort(base.BaseBaremetalTest):
|
||||
|
||||
def test_port_negative_non_existing(self):
|
||||
uuid = "5c9dcd04-2073-49bc-9618-99ae634d8971"
|
||||
self.assertRaises(exceptions.ResourceNotFound,
|
||||
self.conn.baremetal.get_port, uuid)
|
||||
self.assertRaises(exceptions.ResourceNotFound,
|
||||
self.conn.baremetal.find_port, uuid,
|
||||
ignore_missing=False)
|
||||
self.assertRaises(exceptions.ResourceNotFound,
|
||||
self.conn.baremetal.delete_port, uuid,
|
||||
ignore_missing=False)
|
||||
self.assertRaises(exceptions.ResourceNotFound,
|
||||
self.conn.baremetal.update_port, uuid,
|
||||
pxe_enabled=True)
|
||||
self.assertRaises(
|
||||
exceptions.ResourceNotFound, self.conn.baremetal.get_port, uuid
|
||||
)
|
||||
self.assertRaises(
|
||||
exceptions.ResourceNotFound,
|
||||
self.conn.baremetal.find_port,
|
||||
uuid,
|
||||
ignore_missing=False,
|
||||
)
|
||||
self.assertRaises(
|
||||
exceptions.ResourceNotFound,
|
||||
self.conn.baremetal.delete_port,
|
||||
uuid,
|
||||
ignore_missing=False,
|
||||
)
|
||||
self.assertRaises(
|
||||
exceptions.ResourceNotFound,
|
||||
self.conn.baremetal.update_port,
|
||||
uuid,
|
||||
pxe_enabled=True,
|
||||
)
|
||||
self.assertIsNone(self.conn.baremetal.find_port(uuid))
|
||||
self.assertIsNone(self.conn.baremetal.delete_port(uuid))
|
||||
|
||||
|
@ -31,22 +31,27 @@ class TestBareMetalPortGroup(base.BaseBaremetalTest):
|
||||
self.assertIsNotNone(loaded.node_id)
|
||||
|
||||
with_fields = self.conn.baremetal.get_port_group(
|
||||
port_group.id, fields=['uuid', 'extra'])
|
||||
port_group.id, fields=['uuid', 'extra']
|
||||
)
|
||||
self.assertEqual(port_group.id, with_fields.id)
|
||||
self.assertIsNone(with_fields.node_id)
|
||||
|
||||
self.conn.baremetal.delete_port_group(port_group,
|
||||
ignore_missing=False)
|
||||
self.assertRaises(exceptions.ResourceNotFound,
|
||||
self.conn.baremetal.get_port_group, port_group.id)
|
||||
self.conn.baremetal.delete_port_group(port_group, ignore_missing=False)
|
||||
self.assertRaises(
|
||||
exceptions.ResourceNotFound,
|
||||
self.conn.baremetal.get_port_group,
|
||||
port_group.id,
|
||||
)
|
||||
|
||||
def test_port_list(self):
|
||||
node2 = self.create_node(name='test-node')
|
||||
|
||||
pg1 = self.create_port_group(address='11:22:33:44:55:66',
|
||||
node_id=node2.id)
|
||||
pg2 = self.create_port_group(address='11:22:33:44:55:77',
|
||||
node_id=self.node.id)
|
||||
pg1 = self.create_port_group(
|
||||
address='11:22:33:44:55:66', node_id=node2.id
|
||||
)
|
||||
pg2 = self.create_port_group(
|
||||
address='11:22:33:44:55:77', node_id=self.node.id
|
||||
)
|
||||
|
||||
pgs = self.conn.baremetal.port_groups(address='11:22:33:44:55:77')
|
||||
self.assertEqual([p.id for p in pgs], [pg2.id])
|
||||
@ -58,10 +63,14 @@ class TestBareMetalPortGroup(base.BaseBaremetalTest):
|
||||
self.assertEqual([p.id for p in pgs], [pg1.id])
|
||||
|
||||
def test_port_list_update_delete(self):
|
||||
self.create_port_group(address='11:22:33:44:55:66',
|
||||
extra={'foo': 'bar'})
|
||||
port_group = next(self.conn.baremetal.port_groups(
|
||||
details=True, address='11:22:33:44:55:66'))
|
||||
self.create_port_group(
|
||||
address='11:22:33:44:55:66', extra={'foo': 'bar'}
|
||||
)
|
||||
port_group = next(
|
||||
self.conn.baremetal.port_groups(
|
||||
details=True, address='11:22:33:44:55:66'
|
||||
)
|
||||
)
|
||||
self.assertEqual(port_group.extra, {'foo': 'bar'})
|
||||
|
||||
# This test checks that resources returned from listing are usable
|
||||
@ -82,7 +91,8 @@ class TestBareMetalPortGroup(base.BaseBaremetalTest):
|
||||
port_group = self.create_port_group()
|
||||
|
||||
port_group = self.conn.baremetal.patch_port_group(
|
||||
port_group, dict(path='/extra/answer', op='add', value=42))
|
||||
port_group, dict(path='/extra/answer', op='add', value=42)
|
||||
)
|
||||
self.assertEqual({'answer': 42}, port_group.extra)
|
||||
|
||||
port_group = self.conn.baremetal.get_port_group(port_group.id)
|
||||
@ -90,14 +100,23 @@ class TestBareMetalPortGroup(base.BaseBaremetalTest):
|
||||
|
||||
def test_port_group_negative_non_existing(self):
|
||||
uuid = "5c9dcd04-2073-49bc-9618-99ae634d8971"
|
||||
self.assertRaises(exceptions.ResourceNotFound,
|
||||
self.conn.baremetal.get_port_group, uuid)
|
||||
self.assertRaises(exceptions.ResourceNotFound,
|
||||
self.conn.baremetal.find_port_group, uuid,
|
||||
ignore_missing=False)
|
||||
self.assertRaises(exceptions.ResourceNotFound,
|
||||
self.conn.baremetal.delete_port_group, uuid,
|
||||
ignore_missing=False)
|
||||
self.assertRaises(
|
||||
exceptions.ResourceNotFound,
|
||||
self.conn.baremetal.get_port_group,
|
||||
uuid,
|
||||
)
|
||||
self.assertRaises(
|
||||
exceptions.ResourceNotFound,
|
||||
self.conn.baremetal.find_port_group,
|
||||
uuid,
|
||||
ignore_missing=False,
|
||||
)
|
||||
self.assertRaises(
|
||||
exceptions.ResourceNotFound,
|
||||
self.conn.baremetal.delete_port_group,
|
||||
uuid,
|
||||
ignore_missing=False,
|
||||
)
|
||||
self.assertIsNone(self.conn.baremetal.find_port_group(uuid))
|
||||
self.assertIsNone(self.conn.baremetal.delete_port_group(uuid))
|
||||
|
||||
|
@ -25,47 +25,54 @@ class TestBareMetalVolumeconnector(base.BaseBaremetalTest):
|
||||
|
||||
def test_volume_connector_create_get_delete(self):
|
||||
self.conn.baremetal.set_node_provision_state(
|
||||
self.node, 'manage', wait=True)
|
||||
self.node, 'manage', wait=True
|
||||
)
|
||||
self.conn.baremetal.set_node_power_state(self.node, 'power off')
|
||||
volume_connector = self.create_volume_connector(
|
||||
connector_id='iqn.2017-07.org.openstack:01:d9a51732c3f',
|
||||
type='iqn')
|
||||
connector_id='iqn.2017-07.org.openstack:01:d9a51732c3f', type='iqn'
|
||||
)
|
||||
|
||||
loaded = self.conn.baremetal.get_volume_connector(
|
||||
volume_connector.id)
|
||||
loaded = self.conn.baremetal.get_volume_connector(volume_connector.id)
|
||||
self.assertEqual(loaded.id, volume_connector.id)
|
||||
self.assertIsNotNone(loaded.node_id)
|
||||
|
||||
with_fields = self.conn.baremetal.get_volume_connector(
|
||||
volume_connector.id, fields=['uuid', 'extra'])
|
||||
volume_connector.id, fields=['uuid', 'extra']
|
||||
)
|
||||
self.assertEqual(volume_connector.id, with_fields.id)
|
||||
self.assertIsNone(with_fields.node_id)
|
||||
|
||||
self.conn.baremetal.delete_volume_connector(volume_connector,
|
||||
ignore_missing=False)
|
||||
self.assertRaises(exceptions.ResourceNotFound,
|
||||
self.conn.baremetal.get_volume_connector,
|
||||
volume_connector.id)
|
||||
self.conn.baremetal.delete_volume_connector(
|
||||
volume_connector, ignore_missing=False
|
||||
)
|
||||
self.assertRaises(
|
||||
exceptions.ResourceNotFound,
|
||||
self.conn.baremetal.get_volume_connector,
|
||||
volume_connector.id,
|
||||
)
|
||||
|
||||
def test_volume_connector_list(self):
|
||||
node2 = self.create_node(name='test-node')
|
||||
self.conn.baremetal.set_node_provision_state(
|
||||
node2, 'manage', wait=True)
|
||||
node2, 'manage', wait=True
|
||||
)
|
||||
self.conn.baremetal.set_node_power_state(node2, 'power off')
|
||||
self.conn.baremetal.set_node_provision_state(
|
||||
self.node, 'manage', wait=True)
|
||||
self.node, 'manage', wait=True
|
||||
)
|
||||
self.conn.baremetal.set_node_power_state(self.node, 'power off')
|
||||
vc1 = self.create_volume_connector(
|
||||
connector_id='iqn.2018-07.org.openstack:01:d9a514g2c32',
|
||||
node_id=node2.id,
|
||||
type='iqn')
|
||||
type='iqn',
|
||||
)
|
||||
vc2 = self.create_volume_connector(
|
||||
connector_id='iqn.2017-07.org.openstack:01:d9a51732c4g',
|
||||
node_id=self.node.id,
|
||||
type='iqn')
|
||||
type='iqn',
|
||||
)
|
||||
|
||||
vcs = self.conn.baremetal.volume_connectors(
|
||||
node=self.node.id)
|
||||
vcs = self.conn.baremetal.volume_connectors(node=self.node.id)
|
||||
self.assertEqual([v.id for v in vcs], [vc2.id])
|
||||
|
||||
vcs = self.conn.baremetal.volume_connectors(node=node2.id)
|
||||
@ -76,86 +83,109 @@ class TestBareMetalVolumeconnector(base.BaseBaremetalTest):
|
||||
|
||||
def test_volume_connector_list_update_delete(self):
|
||||
self.conn.baremetal.set_node_provision_state(
|
||||
self.node, 'manage', wait=True)
|
||||
self.node, 'manage', wait=True
|
||||
)
|
||||
self.conn.baremetal.set_node_power_state(self.node, 'power off')
|
||||
self.create_volume_connector(
|
||||
connector_id='iqn.2020-07.org.openstack:02:d9451472ce2',
|
||||
node_id=self.node.id,
|
||||
type='iqn',
|
||||
extra={'foo': 'bar'})
|
||||
volume_connector = next(self.conn.baremetal.volume_connectors(
|
||||
details=True,
|
||||
node=self.node.id))
|
||||
extra={'foo': 'bar'},
|
||||
)
|
||||
volume_connector = next(
|
||||
self.conn.baremetal.volume_connectors(
|
||||
details=True, node=self.node.id
|
||||
)
|
||||
)
|
||||
self.assertEqual(volume_connector.extra, {'foo': 'bar'})
|
||||
|
||||
# This test checks that resources returned from listing are usable
|
||||
self.conn.baremetal.update_volume_connector(volume_connector,
|
||||
extra={'foo': 42})
|
||||
self.conn.baremetal.delete_volume_connector(volume_connector,
|
||||
ignore_missing=False)
|
||||
self.conn.baremetal.update_volume_connector(
|
||||
volume_connector, extra={'foo': 42}
|
||||
)
|
||||
self.conn.baremetal.delete_volume_connector(
|
||||
volume_connector, ignore_missing=False
|
||||
)
|
||||
|
||||
def test_volume_connector_update(self):
|
||||
self.conn.baremetal.set_node_provision_state(
|
||||
self.node, 'manage', wait=True)
|
||||
self.node, 'manage', wait=True
|
||||
)
|
||||
self.conn.baremetal.set_node_power_state(self.node, 'power off')
|
||||
volume_connector = self.create_volume_connector(
|
||||
connector_id='iqn.2019-07.org.openstack:03:de45b472c40',
|
||||
node_id=self.node.id,
|
||||
type='iqn')
|
||||
type='iqn',
|
||||
)
|
||||
volume_connector.extra = {'answer': 42}
|
||||
|
||||
volume_connector = self.conn.baremetal.update_volume_connector(
|
||||
volume_connector)
|
||||
volume_connector
|
||||
)
|
||||
self.assertEqual({'answer': 42}, volume_connector.extra)
|
||||
|
||||
volume_connector = self.conn.baremetal.get_volume_connector(
|
||||
volume_connector.id)
|
||||
volume_connector.id
|
||||
)
|
||||
self.assertEqual({'answer': 42}, volume_connector.extra)
|
||||
|
||||
def test_volume_connector_patch(self):
|
||||
vol_conn_id = 'iqn.2020-07.org.openstack:04:de45b472c40'
|
||||
self.conn.baremetal.set_node_provision_state(
|
||||
self.node, 'manage', wait=True)
|
||||
self.node, 'manage', wait=True
|
||||
)
|
||||
self.conn.baremetal.set_node_power_state(self.node, 'power off')
|
||||
volume_connector = self.create_volume_connector(
|
||||
connector_id=vol_conn_id,
|
||||
node_id=self.node.id,
|
||||
type='iqn')
|
||||
connector_id=vol_conn_id, node_id=self.node.id, type='iqn'
|
||||
)
|
||||
|
||||
volume_connector = self.conn.baremetal.patch_volume_connector(
|
||||
volume_connector, dict(path='/extra/answer', op='add', value=42))
|
||||
volume_connector, dict(path='/extra/answer', op='add', value=42)
|
||||
)
|
||||
self.assertEqual({'answer': 42}, volume_connector.extra)
|
||||
self.assertEqual(vol_conn_id,
|
||||
volume_connector.connector_id)
|
||||
self.assertEqual(vol_conn_id, volume_connector.connector_id)
|
||||
|
||||
volume_connector = self.conn.baremetal.get_volume_connector(
|
||||
volume_connector.id)
|
||||
volume_connector.id
|
||||
)
|
||||
self.assertEqual({'answer': 42}, volume_connector.extra)
|
||||
|
||||
def test_volume_connector_negative_non_existing(self):
|
||||
uuid = "5c9dcd04-2073-49bc-9618-99ae634d8971"
|
||||
self.assertRaises(exceptions.ResourceNotFound,
|
||||
self.conn.baremetal.get_volume_connector, uuid)
|
||||
self.assertRaises(exceptions.ResourceNotFound,
|
||||
self.conn.baremetal.find_volume_connector, uuid,
|
||||
ignore_missing=False)
|
||||
self.assertRaises(exceptions.ResourceNotFound,
|
||||
self.conn.baremetal.delete_volume_connector, uuid,
|
||||
ignore_missing=False)
|
||||
self.assertRaises(
|
||||
exceptions.ResourceNotFound,
|
||||
self.conn.baremetal.get_volume_connector,
|
||||
uuid,
|
||||
)
|
||||
self.assertRaises(
|
||||
exceptions.ResourceNotFound,
|
||||
self.conn.baremetal.find_volume_connector,
|
||||
uuid,
|
||||
ignore_missing=False,
|
||||
)
|
||||
self.assertRaises(
|
||||
exceptions.ResourceNotFound,
|
||||
self.conn.baremetal.delete_volume_connector,
|
||||
uuid,
|
||||
ignore_missing=False,
|
||||
)
|
||||
self.assertIsNone(self.conn.baremetal.find_volume_connector(uuid))
|
||||
self.assertIsNone(self.conn.baremetal.delete_volume_connector(uuid))
|
||||
|
||||
def test_volume_connector_fields(self):
|
||||
self.create_node()
|
||||
self.conn.baremetal.set_node_provision_state(
|
||||
self.node, 'manage', wait=True)
|
||||
self.node, 'manage', wait=True
|
||||
)
|
||||
self.conn.baremetal.set_node_power_state(self.node, 'power off')
|
||||
self.create_volume_connector(
|
||||
connector_id='iqn.2018-08.org.openstack:04:de45f37c48',
|
||||
node_id=self.node.id,
|
||||
type='iqn')
|
||||
type='iqn',
|
||||
)
|
||||
result = self.conn.baremetal.volume_connectors(
|
||||
fields=['uuid', 'node_id'])
|
||||
fields=['uuid', 'node_id']
|
||||
)
|
||||
for item in result:
|
||||
self.assertIsNotNone(item.id)
|
||||
self.assertIsNone(item.connector_id)
|
||||
|
@ -25,50 +25,58 @@ class TestBareMetalVolumetarget(base.BaseBaremetalTest):
|
||||
|
||||
def test_volume_target_create_get_delete(self):
|
||||
self.conn.baremetal.set_node_provision_state(
|
||||
self.node, 'manage', wait=True)
|
||||
self.node, 'manage', wait=True
|
||||
)
|
||||
self.conn.baremetal.set_node_power_state(self.node, 'power off')
|
||||
volume_target = self.create_volume_target(
|
||||
boot_index=0,
|
||||
volume_id='04452bed-5367-4202-8bf5-de4335ac56d2',
|
||||
volume_type='iscsi')
|
||||
volume_type='iscsi',
|
||||
)
|
||||
|
||||
loaded = self.conn.baremetal.get_volume_target(
|
||||
volume_target.id)
|
||||
loaded = self.conn.baremetal.get_volume_target(volume_target.id)
|
||||
self.assertEqual(loaded.id, volume_target.id)
|
||||
self.assertIsNotNone(loaded.node_id)
|
||||
|
||||
with_fields = self.conn.baremetal.get_volume_target(
|
||||
volume_target.id, fields=['uuid', 'extra'])
|
||||
volume_target.id, fields=['uuid', 'extra']
|
||||
)
|
||||
self.assertEqual(volume_target.id, with_fields.id)
|
||||
self.assertIsNone(with_fields.node_id)
|
||||
|
||||
self.conn.baremetal.delete_volume_target(volume_target,
|
||||
ignore_missing=False)
|
||||
self.assertRaises(exceptions.ResourceNotFound,
|
||||
self.conn.baremetal.get_volume_target,
|
||||
volume_target.id)
|
||||
self.conn.baremetal.delete_volume_target(
|
||||
volume_target, ignore_missing=False
|
||||
)
|
||||
self.assertRaises(
|
||||
exceptions.ResourceNotFound,
|
||||
self.conn.baremetal.get_volume_target,
|
||||
volume_target.id,
|
||||
)
|
||||
|
||||
def test_volume_target_list(self):
|
||||
node2 = self.create_node(name='test-node')
|
||||
self.conn.baremetal.set_node_provision_state(
|
||||
node2, 'manage', wait=True)
|
||||
node2, 'manage', wait=True
|
||||
)
|
||||
self.conn.baremetal.set_node_power_state(node2, 'power off')
|
||||
self.conn.baremetal.set_node_provision_state(
|
||||
self.node, 'manage', wait=True)
|
||||
self.node, 'manage', wait=True
|
||||
)
|
||||
self.conn.baremetal.set_node_power_state(self.node, 'power off')
|
||||
vt1 = self.create_volume_target(
|
||||
boot_index=0,
|
||||
volume_id='bd4d008c-7d31-463d-abf9-6c23d9d55f7f',
|
||||
node_id=node2.id,
|
||||
volume_type='iscsi')
|
||||
volume_type='iscsi',
|
||||
)
|
||||
vt2 = self.create_volume_target(
|
||||
boot_index=0,
|
||||
volume_id='04452bed-5367-4202-8bf5-de4335ac57c2',
|
||||
node_id=self.node.id,
|
||||
volume_type='iscsi')
|
||||
volume_type='iscsi',
|
||||
)
|
||||
|
||||
vts = self.conn.baremetal.volume_targets(
|
||||
node=self.node.id)
|
||||
vts = self.conn.baremetal.volume_targets(node=self.node.id)
|
||||
self.assertEqual([v.id for v in vts], [vt2.id])
|
||||
|
||||
vts = self.conn.baremetal.volume_targets(node=node2.id)
|
||||
@ -83,7 +91,8 @@ class TestBareMetalVolumetarget(base.BaseBaremetalTest):
|
||||
self.assertIsNotNone(i.volume_type)
|
||||
|
||||
vts_with_fields = self.conn.baremetal.volume_targets(
|
||||
fields=['uuid', 'node_uuid'])
|
||||
fields=['uuid', 'node_uuid']
|
||||
)
|
||||
for i in vts_with_fields:
|
||||
self.assertIsNotNone(i.id)
|
||||
self.assertIsNone(i.volume_type)
|
||||
@ -91,89 +100,104 @@ class TestBareMetalVolumetarget(base.BaseBaremetalTest):
|
||||
|
||||
def test_volume_target_list_update_delete(self):
|
||||
self.conn.baremetal.set_node_provision_state(
|
||||
self.node, 'manage', wait=True)
|
||||
self.node, 'manage', wait=True
|
||||
)
|
||||
self.conn.baremetal.set_node_power_state(self.node, 'power off')
|
||||
self.create_volume_target(
|
||||
boot_index=0,
|
||||
volume_id='04452bed-5367-4202-8bf5-de4335ac57h3',
|
||||
node_id=self.node.id,
|
||||
volume_type='iscsi',
|
||||
extra={'foo': 'bar'})
|
||||
volume_target = next(self.conn.baremetal.volume_targets(
|
||||
details=True,
|
||||
node=self.node.id))
|
||||
extra={'foo': 'bar'},
|
||||
)
|
||||
volume_target = next(
|
||||
self.conn.baremetal.volume_targets(details=True, node=self.node.id)
|
||||
)
|
||||
self.assertEqual(volume_target.extra, {'foo': 'bar'})
|
||||
|
||||
# This test checks that resources returned from listing are usable
|
||||
self.conn.baremetal.update_volume_target(volume_target,
|
||||
extra={'foo': 42})
|
||||
self.conn.baremetal.delete_volume_target(volume_target,
|
||||
ignore_missing=False)
|
||||
self.conn.baremetal.update_volume_target(
|
||||
volume_target, extra={'foo': 42}
|
||||
)
|
||||
self.conn.baremetal.delete_volume_target(
|
||||
volume_target, ignore_missing=False
|
||||
)
|
||||
|
||||
def test_volume_target_update(self):
|
||||
self.conn.baremetal.set_node_provision_state(
|
||||
self.node, 'manage', wait=True)
|
||||
self.node, 'manage', wait=True
|
||||
)
|
||||
self.conn.baremetal.set_node_power_state(self.node, 'power off')
|
||||
volume_target = self.create_volume_target(
|
||||
boot_index=0,
|
||||
volume_id='04452bed-5367-4202-8bf5-de4335ac53h7',
|
||||
node_id=self.node.id,
|
||||
volume_type='isci')
|
||||
volume_type='isci',
|
||||
)
|
||||
volume_target.extra = {'answer': 42}
|
||||
|
||||
volume_target = self.conn.baremetal.update_volume_target(
|
||||
volume_target)
|
||||
volume_target = self.conn.baremetal.update_volume_target(volume_target)
|
||||
self.assertEqual({'answer': 42}, volume_target.extra)
|
||||
|
||||
volume_target = self.conn.baremetal.get_volume_target(
|
||||
volume_target.id)
|
||||
volume_target = self.conn.baremetal.get_volume_target(volume_target.id)
|
||||
self.assertEqual({'answer': 42}, volume_target.extra)
|
||||
|
||||
def test_volume_target_patch(self):
|
||||
vol_targ_id = '04452bed-5367-4202-9cg6-de4335ac53h7'
|
||||
self.conn.baremetal.set_node_provision_state(
|
||||
self.node, 'manage', wait=True)
|
||||
self.node, 'manage', wait=True
|
||||
)
|
||||
self.conn.baremetal.set_node_power_state(self.node, 'power off')
|
||||
volume_target = self.create_volume_target(
|
||||
boot_index=0,
|
||||
volume_id=vol_targ_id,
|
||||
node_id=self.node.id,
|
||||
volume_type='isci')
|
||||
volume_type='isci',
|
||||
)
|
||||
|
||||
volume_target = self.conn.baremetal.patch_volume_target(
|
||||
volume_target, dict(path='/extra/answer', op='add', value=42))
|
||||
volume_target, dict(path='/extra/answer', op='add', value=42)
|
||||
)
|
||||
self.assertEqual({'answer': 42}, volume_target.extra)
|
||||
self.assertEqual(vol_targ_id,
|
||||
volume_target.volume_id)
|
||||
self.assertEqual(vol_targ_id, volume_target.volume_id)
|
||||
|
||||
volume_target = self.conn.baremetal.get_volume_target(
|
||||
volume_target.id)
|
||||
volume_target = self.conn.baremetal.get_volume_target(volume_target.id)
|
||||
self.assertEqual({'answer': 42}, volume_target.extra)
|
||||
|
||||
def test_volume_target_negative_non_existing(self):
|
||||
uuid = "5c9dcd04-2073-49bc-9618-99ae634d8971"
|
||||
self.assertRaises(exceptions.ResourceNotFound,
|
||||
self.conn.baremetal.get_volume_target, uuid)
|
||||
self.assertRaises(exceptions.ResourceNotFound,
|
||||
self.conn.baremetal.find_volume_target, uuid,
|
||||
ignore_missing=False)
|
||||
self.assertRaises(exceptions.ResourceNotFound,
|
||||
self.conn.baremetal.delete_volume_target, uuid,
|
||||
ignore_missing=False)
|
||||
self.assertRaises(
|
||||
exceptions.ResourceNotFound,
|
||||
self.conn.baremetal.get_volume_target,
|
||||
uuid,
|
||||
)
|
||||
self.assertRaises(
|
||||
exceptions.ResourceNotFound,
|
||||
self.conn.baremetal.find_volume_target,
|
||||
uuid,
|
||||
ignore_missing=False,
|
||||
)
|
||||
self.assertRaises(
|
||||
exceptions.ResourceNotFound,
|
||||
self.conn.baremetal.delete_volume_target,
|
||||
uuid,
|
||||
ignore_missing=False,
|
||||
)
|
||||
self.assertIsNone(self.conn.baremetal.find_volume_target(uuid))
|
||||
self.assertIsNone(self.conn.baremetal.delete_volume_target(uuid))
|
||||
|
||||
def test_volume_target_fields(self):
|
||||
self.create_node()
|
||||
self.conn.baremetal.set_node_provision_state(
|
||||
self.node, 'manage', wait=True)
|
||||
self.node, 'manage', wait=True
|
||||
)
|
||||
self.conn.baremetal.set_node_power_state(self.node, 'power off')
|
||||
self.create_volume_target(
|
||||
boot_index=0,
|
||||
volume_id='04452bed-5367-4202-8bf5-99ae634d8971',
|
||||
node_id=self.node.id,
|
||||
volume_type='iscsi')
|
||||
result = self.conn.baremetal.volume_targets(
|
||||
fields=['uuid', 'node_id'])
|
||||
volume_type='iscsi',
|
||||
)
|
||||
result = self.conn.baremetal.volume_targets(fields=['uuid', 'node_id'])
|
||||
for item in result:
|
||||
self.assertIsNotNone(item.id)
|
||||
|
@ -23,24 +23,31 @@ from openstack.baremetal import configdrive
|
||||
|
||||
|
||||
class TestPopulateDirectory(testtools.TestCase):
|
||||
def _check(self, metadata, user_data=None, network_data=None,
|
||||
vendor_data=None):
|
||||
with configdrive.populate_directory(metadata,
|
||||
user_data=user_data,
|
||||
network_data=network_data,
|
||||
vendor_data=vendor_data) as d:
|
||||
def _check(
|
||||
self, metadata, user_data=None, network_data=None, vendor_data=None
|
||||
):
|
||||
with configdrive.populate_directory(
|
||||
metadata,
|
||||
user_data=user_data,
|
||||
network_data=network_data,
|
||||
vendor_data=vendor_data,
|
||||
) as d:
|
||||
for version in ('2012-08-10', 'latest'):
|
||||
with open(os.path.join(d, 'openstack', version,
|
||||
'meta_data.json')) as fp:
|
||||
with open(
|
||||
os.path.join(d, 'openstack', version, 'meta_data.json')
|
||||
) as fp:
|
||||
actual_metadata = json.load(fp)
|
||||
|
||||
self.assertEqual(metadata, actual_metadata)
|
||||
network_data_file = os.path.join(d, 'openstack', version,
|
||||
'network_data.json')
|
||||
user_data_file = os.path.join(d, 'openstack', version,
|
||||
'user_data')
|
||||
vendor_data_file = os.path.join(d, 'openstack', version,
|
||||
'vendor_data2.json')
|
||||
network_data_file = os.path.join(
|
||||
d, 'openstack', version, 'network_data.json'
|
||||
)
|
||||
user_data_file = os.path.join(
|
||||
d, 'openstack', version, 'user_data'
|
||||
)
|
||||
vendor_data_file = os.path.join(
|
||||
d, 'openstack', version, 'vendor_data2.json'
|
||||
)
|
||||
|
||||
if network_data is None:
|
||||
self.assertFalse(os.path.exists(network_data_file))
|
||||
@ -83,17 +90,16 @@ class TestPopulateDirectory(testtools.TestCase):
|
||||
|
||||
@mock.patch('subprocess.Popen', autospec=True)
|
||||
class TestPack(testtools.TestCase):
|
||||
|
||||
def test_no_genisoimage(self, mock_popen):
|
||||
mock_popen.side_effect = OSError
|
||||
self.assertRaisesRegex(RuntimeError, "genisoimage",
|
||||
configdrive.pack, "/fake")
|
||||
self.assertRaisesRegex(
|
||||
RuntimeError, "genisoimage", configdrive.pack, "/fake"
|
||||
)
|
||||
|
||||
def test_genisoimage_fails(self, mock_popen):
|
||||
mock_popen.return_value.communicate.return_value = "", "BOOM"
|
||||
mock_popen.return_value.returncode = 1
|
||||
self.assertRaisesRegex(RuntimeError, "BOOM",
|
||||
configdrive.pack, "/fake")
|
||||
self.assertRaisesRegex(RuntimeError, "BOOM", configdrive.pack, "/fake")
|
||||
|
||||
def test_success(self, mock_popen):
|
||||
mock_popen.return_value.communicate.return_value = "", ""
|
||||
|
@ -24,7 +24,6 @@ EXAMPLE = {
|
||||
|
||||
|
||||
class TestVersion(base.TestCase):
|
||||
|
||||
def test_basic(self):
|
||||
sot = version.Version()
|
||||
self.assertEqual('version', sot.resource_key)
|
||||
|
@ -26,12 +26,12 @@ FAKE = {
|
||||
"links": [
|
||||
{
|
||||
"href": "http://127.0.0.1:6385/v1/allocations/<PG_ID>",
|
||||
"rel": "self"
|
||||
"rel": "self",
|
||||
},
|
||||
{
|
||||
"href": "http://127.0.0.1:6385/allocations/<PG_ID>",
|
||||
"rel": "bookmark"
|
||||
}
|
||||
"rel": "bookmark",
|
||||
},
|
||||
],
|
||||
"name": "test_allocation",
|
||||
"node_uuid": "6d85703a-565d-469a-96ce-30b6de53079d",
|
||||
@ -44,7 +44,6 @@ FAKE = {
|
||||
|
||||
|
||||
class TestAllocation(base.TestCase):
|
||||
|
||||
def test_basic(self):
|
||||
sot = allocation.Allocation()
|
||||
self.assertIsNone(sot.resource_key)
|
||||
@ -75,7 +74,6 @@ class TestAllocation(base.TestCase):
|
||||
@mock.patch('time.sleep', lambda _t: None)
|
||||
@mock.patch.object(allocation.Allocation, 'fetch', autospec=True)
|
||||
class TestWaitForAllocation(base.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(TestWaitForAllocation, self).setUp()
|
||||
self.session = mock.Mock(spec=adapter.Adapter)
|
||||
@ -116,8 +114,9 @@ class TestWaitForAllocation(base.TestCase):
|
||||
marker[0] = True
|
||||
|
||||
mock_fetch.side_effect = _side_effect
|
||||
self.assertRaises(exceptions.ResourceFailure,
|
||||
self.allocation.wait, self.session)
|
||||
self.assertRaises(
|
||||
exceptions.ResourceFailure, self.allocation.wait, self.session
|
||||
)
|
||||
self.assertEqual(2, mock_fetch.call_count)
|
||||
|
||||
def test_failure_ignored(self, mock_fetch):
|
||||
@ -136,6 +135,10 @@ class TestWaitForAllocation(base.TestCase):
|
||||
self.assertEqual(2, mock_fetch.call_count)
|
||||
|
||||
def test_timeout(self, mock_fetch):
|
||||
self.assertRaises(exceptions.ResourceTimeout,
|
||||
self.allocation.wait, self.session, timeout=0.001)
|
||||
self.assertRaises(
|
||||
exceptions.ResourceTimeout,
|
||||
self.allocation.wait,
|
||||
self.session,
|
||||
timeout=0.001,
|
||||
)
|
||||
mock_fetch.assert_called_with(self.allocation, self.session)
|
||||
|
@ -19,32 +19,19 @@ FAKE = {
|
||||
"description": "Sample chassis",
|
||||
"extra": {},
|
||||
"links": [
|
||||
{
|
||||
"href": "http://127.0.0.1:6385/v1/chassis/ID",
|
||||
"rel": "self"
|
||||
},
|
||||
{
|
||||
"href": "http://127.0.0.1:6385/chassis/ID",
|
||||
"rel": "bookmark"
|
||||
}
|
||||
{"href": "http://127.0.0.1:6385/v1/chassis/ID", "rel": "self"},
|
||||
{"href": "http://127.0.0.1:6385/chassis/ID", "rel": "bookmark"},
|
||||
],
|
||||
"nodes": [
|
||||
{
|
||||
"href": "http://127.0.0.1:6385/v1/chassis/ID/nodes",
|
||||
"rel": "self"
|
||||
},
|
||||
{
|
||||
"href": "http://127.0.0.1:6385/chassis/ID/nodes",
|
||||
"rel": "bookmark"
|
||||
}
|
||||
{"href": "http://127.0.0.1:6385/v1/chassis/ID/nodes", "rel": "self"},
|
||||
{"href": "http://127.0.0.1:6385/chassis/ID/nodes", "rel": "bookmark"},
|
||||
],
|
||||
"updated_at": None,
|
||||
"uuid": "dff29d23-1ded-43b4-8ae1-5eebb3e30de1"
|
||||
"uuid": "dff29d23-1ded-43b4-8ae1-5eebb3e30de1",
|
||||
}
|
||||
|
||||
|
||||
class TestChassis(base.TestCase):
|
||||
|
||||
def test_basic(self):
|
||||
sot = chassis.Chassis()
|
||||
self.assertIsNone(sot.resource_key)
|
||||
|
@ -18,26 +18,23 @@ FAKE = {
|
||||
"links": [
|
||||
{
|
||||
"href": "http://127.0.0.1:6385/v1/conductors/compute2.localdomain",
|
||||
"rel": "self"
|
||||
"rel": "self",
|
||||
},
|
||||
{
|
||||
"href": "http://127.0.0.1:6385/conductors/compute2.localdomain",
|
||||
"rel": "bookmark"
|
||||
}
|
||||
"rel": "bookmark",
|
||||
},
|
||||
],
|
||||
"created_at": "2018-12-05T07:03:19+00:00",
|
||||
"hostname": "compute2.localdomain",
|
||||
"conductor_group": "",
|
||||
"updated_at": "2018-12-05T07:03:21+00:00",
|
||||
"alive": True,
|
||||
"drivers": [
|
||||
"ipmi"
|
||||
]
|
||||
"drivers": ["ipmi"],
|
||||
}
|
||||
|
||||
|
||||
class TestContainer(base.TestCase):
|
||||
|
||||
def test_basic(self):
|
||||
sot = conductor.Conductor()
|
||||
self.assertIsNone(sot.resource_key)
|
||||
|
@ -18,40 +18,34 @@ FAKE = {
|
||||
"created_at": "2016-08-18T22:28:48.643434+11:11",
|
||||
"extra": {},
|
||||
"links": [
|
||||
{
|
||||
"href": """http://10.60.253.180:6385/v1/deploy_templates
|
||||
/bbb45f41-d4bc-4307-8d1d-32f95ce1e920""",
|
||||
"rel": "self"
|
||||
},
|
||||
{
|
||||
"href": """http://10.60.253.180:6385/deploy_templates
|
||||
"href": """http://10.60.253.180:6385/v1/deploy_templates
|
||||
/bbb45f41-d4bc-4307-8d1d-32f95ce1e920""",
|
||||
"rel": "self",
|
||||
},
|
||||
{
|
||||
"href": """http://10.60.253.180:6385/deploy_templates
|
||||
/bbb45f41-d4bc-4307-8d1d-32f95ce1e920""",
|
||||
"rel": "bookmark"
|
||||
}
|
||||
"rel": "bookmark",
|
||||
},
|
||||
],
|
||||
"name": "CUSTOM_HYPERTHREADING_ON",
|
||||
"steps": [
|
||||
{
|
||||
"args": {
|
||||
"settings": [
|
||||
{
|
||||
"name": "LogicalProc",
|
||||
"value": "Enabled"
|
||||
}
|
||||
]
|
||||
},
|
||||
"interface": "bios",
|
||||
"priority": 150,
|
||||
"step": "apply_configuration"
|
||||
}
|
||||
{
|
||||
"args": {
|
||||
"settings": [{"name": "LogicalProc", "value": "Enabled"}]
|
||||
},
|
||||
"interface": "bios",
|
||||
"priority": 150,
|
||||
"step": "apply_configuration",
|
||||
}
|
||||
],
|
||||
"updated_at": None,
|
||||
"uuid": "bbb45f41-d4bc-4307-8d1d-32f95ce1e920"
|
||||
"uuid": "bbb45f41-d4bc-4307-8d1d-32f95ce1e920",
|
||||
}
|
||||
|
||||
|
||||
class DeployTemplates(base.TestCase):
|
||||
|
||||
def test_basic(self):
|
||||
sot = deploy_templates.DeployTemplate()
|
||||
self.assertIsNone(sot.resource_key)
|
||||
|
@ -21,36 +21,32 @@ from openstack.tests.unit import base
|
||||
|
||||
|
||||
FAKE = {
|
||||
"hosts": [
|
||||
"897ab1dad809"
|
||||
],
|
||||
"hosts": ["897ab1dad809"],
|
||||
"links": [
|
||||
{
|
||||
"href": "http://127.0.0.1:6385/v1/drivers/agent_ipmitool",
|
||||
"rel": "self"
|
||||
"rel": "self",
|
||||
},
|
||||
{
|
||||
"href": "http://127.0.0.1:6385/drivers/agent_ipmitool",
|
||||
"rel": "bookmark"
|
||||
}
|
||||
"rel": "bookmark",
|
||||
},
|
||||
],
|
||||
"name": "agent_ipmitool",
|
||||
"properties": [
|
||||
{
|
||||
"href":
|
||||
"http://127.0.0.1:6385/v1/drivers/agent_ipmitool/properties",
|
||||
"rel": "self"
|
||||
"href": "http://127.0.0.1:6385/v1/drivers/agent_ipmitool/properties", # noqa: E501
|
||||
"rel": "self",
|
||||
},
|
||||
{
|
||||
"href": "http://127.0.0.1:6385/drivers/agent_ipmitool/properties",
|
||||
"rel": "bookmark"
|
||||
}
|
||||
]
|
||||
"rel": "bookmark",
|
||||
},
|
||||
],
|
||||
}
|
||||
|
||||
|
||||
class TestDriver(base.TestCase):
|
||||
|
||||
def test_basic(self):
|
||||
sot = driver.Driver()
|
||||
self.assertIsNone(sot.resource_key)
|
||||
@ -79,16 +75,19 @@ class TestDriver(base.TestCase):
|
||||
'async': True,
|
||||
'attach': False,
|
||||
'description': "Fake function that does nothing in background",
|
||||
'http_methods': ['GET', 'PUT', 'POST', 'DELETE']
|
||||
'http_methods': ['GET', 'PUT', 'POST', 'DELETE'],
|
||||
}
|
||||
}
|
||||
self.session.get.return_value.json.return_value = (
|
||||
fake_vendor_passthru_info)
|
||||
fake_vendor_passthru_info
|
||||
)
|
||||
result = sot.list_vendor_passthru(self.session)
|
||||
self.session.get.assert_called_once_with(
|
||||
'drivers/{driver_name}/vendor_passthru/methods'.format(
|
||||
driver_name=FAKE["name"]),
|
||||
headers=mock.ANY)
|
||||
driver_name=FAKE["name"]
|
||||
),
|
||||
headers=mock.ANY,
|
||||
)
|
||||
self.assertEqual(result, fake_vendor_passthru_info)
|
||||
|
||||
@mock.patch.object(exceptions, 'raise_from_response', mock.Mock())
|
||||
@ -99,33 +98,49 @@ class TestDriver(base.TestCase):
|
||||
sot.call_vendor_passthru(self.session, 'GET', 'fake_vendor_method')
|
||||
self.session.get.assert_called_once_with(
|
||||
'drivers/{}/vendor_passthru?method={}'.format(
|
||||
FAKE["name"], 'fake_vendor_method'),
|
||||
FAKE["name"], 'fake_vendor_method'
|
||||
),
|
||||
json=None,
|
||||
headers=mock.ANY,
|
||||
retriable_status_codes=_common.RETRIABLE_STATUS_CODES)
|
||||
retriable_status_codes=_common.RETRIABLE_STATUS_CODES,
|
||||
)
|
||||
# PUT
|
||||
sot.call_vendor_passthru(self.session, 'PUT', 'fake_vendor_method',
|
||||
body={"fake_param_key": "fake_param_value"})
|
||||
sot.call_vendor_passthru(
|
||||
self.session,
|
||||
'PUT',
|
||||
'fake_vendor_method',
|
||||
body={"fake_param_key": "fake_param_value"},
|
||||
)
|
||||
self.session.put.assert_called_once_with(
|
||||
'drivers/{}/vendor_passthru?method={}'.format(
|
||||
FAKE["name"], 'fake_vendor_method'),
|
||||
FAKE["name"], 'fake_vendor_method'
|
||||
),
|
||||
json={"fake_param_key": "fake_param_value"},
|
||||
headers=mock.ANY,
|
||||
retriable_status_codes=_common.RETRIABLE_STATUS_CODES)
|
||||
retriable_status_codes=_common.RETRIABLE_STATUS_CODES,
|
||||
)
|
||||
# POST
|
||||
sot.call_vendor_passthru(self.session, 'POST', 'fake_vendor_method',
|
||||
body={"fake_param_key": "fake_param_value"})
|
||||
sot.call_vendor_passthru(
|
||||
self.session,
|
||||
'POST',
|
||||
'fake_vendor_method',
|
||||
body={"fake_param_key": "fake_param_value"},
|
||||
)
|
||||
self.session.post.assert_called_once_with(
|
||||
'drivers/{}/vendor_passthru?method={}'.format(
|
||||
FAKE["name"], 'fake_vendor_method'),
|
||||
FAKE["name"], 'fake_vendor_method'
|
||||
),
|
||||
json={"fake_param_key": "fake_param_value"},
|
||||
headers=mock.ANY,
|
||||
retriable_status_codes=_common.RETRIABLE_STATUS_CODES)
|
||||
retriable_status_codes=_common.RETRIABLE_STATUS_CODES,
|
||||
)
|
||||
# DELETE
|
||||
sot.call_vendor_passthru(self.session, 'DELETE', 'fake_vendor_method')
|
||||
self.session.delete.assert_called_once_with(
|
||||
'drivers/{}/vendor_passthru?method={}'.format(
|
||||
FAKE["name"], 'fake_vendor_method'),
|
||||
FAKE["name"], 'fake_vendor_method'
|
||||
),
|
||||
json=None,
|
||||
headers=mock.ANY,
|
||||
retriable_status_codes=_common.RETRIABLE_STATUS_CODES)
|
||||
retriable_status_codes=_common.RETRIABLE_STATUS_CODES,
|
||||
)
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -20,30 +20,23 @@ FAKE = {
|
||||
"extra": {},
|
||||
"internal_info": {},
|
||||
"links": [
|
||||
{
|
||||
"href": "http://127.0.0.1:6385/v1/ports/<PORT_ID>",
|
||||
"rel": "self"
|
||||
},
|
||||
{
|
||||
"href": "http://127.0.0.1:6385/ports/<PORT_ID>",
|
||||
"rel": "bookmark"
|
||||
}
|
||||
{"href": "http://127.0.0.1:6385/v1/ports/<PORT_ID>", "rel": "self"},
|
||||
{"href": "http://127.0.0.1:6385/ports/<PORT_ID>", "rel": "bookmark"},
|
||||
],
|
||||
"local_link_connection": {
|
||||
"port_id": "Ethernet3/1",
|
||||
"switch_id": "0a:1b:2c:3d:4e:5f",
|
||||
"switch_info": "switch1"
|
||||
"switch_info": "switch1",
|
||||
},
|
||||
"node_uuid": "6d85703a-565d-469a-96ce-30b6de53079d",
|
||||
"portgroup_uuid": "e43c722c-248e-4c6e-8ce8-0d8ff129387a",
|
||||
"pxe_enabled": True,
|
||||
"updated_at": None,
|
||||
"uuid": "d2b30520-907d-46c8-bfee-c5586e6fb3a1"
|
||||
"uuid": "d2b30520-907d-46c8-bfee-c5586e6fb3a1",
|
||||
}
|
||||
|
||||
|
||||
class TestPort(base.TestCase):
|
||||
|
||||
def test_basic(self):
|
||||
sot = port.Port()
|
||||
self.assertIsNone(sot.resource_key)
|
||||
@ -64,8 +57,9 @@ class TestPort(base.TestCase):
|
||||
self.assertEqual(FAKE['extra'], sot.extra)
|
||||
self.assertEqual(FAKE['internal_info'], sot.internal_info)
|
||||
self.assertEqual(FAKE['links'], sot.links)
|
||||
self.assertEqual(FAKE['local_link_connection'],
|
||||
sot.local_link_connection)
|
||||
self.assertEqual(
|
||||
FAKE['local_link_connection'], sot.local_link_connection
|
||||
)
|
||||
self.assertEqual(FAKE['node_uuid'], sot.node_id)
|
||||
self.assertEqual(FAKE['portgroup_uuid'], sot.port_group_id)
|
||||
self.assertEqual(FAKE['pxe_enabled'], sot.is_pxe_enabled)
|
||||
|
@ -20,26 +20,23 @@ FAKE = {
|
||||
"extra": {},
|
||||
"internal_info": {},
|
||||
"links": [
|
||||
{
|
||||
"href": "http://127.0.0.1:6385/v1/portgroups/<PG_ID>",
|
||||
"rel": "self"
|
||||
},
|
||||
{"href": "http://127.0.0.1:6385/v1/portgroups/<PG_ID>", "rel": "self"},
|
||||
{
|
||||
"href": "http://127.0.0.1:6385/portgroups/<PG_ID>",
|
||||
"rel": "bookmark"
|
||||
}
|
||||
"rel": "bookmark",
|
||||
},
|
||||
],
|
||||
"name": "test_portgroup",
|
||||
"node_uuid": "6d85703a-565d-469a-96ce-30b6de53079d",
|
||||
"ports": [
|
||||
{
|
||||
"href": "http://127.0.0.1:6385/v1/portgroups/<PG_ID>/ports",
|
||||
"rel": "self"
|
||||
"rel": "self",
|
||||
},
|
||||
{
|
||||
"href": "http://127.0.0.1:6385/portgroups/<PG_ID>/ports",
|
||||
"rel": "bookmark"
|
||||
}
|
||||
"rel": "bookmark",
|
||||
},
|
||||
],
|
||||
"standalone_ports_supported": True,
|
||||
"updated_at": None,
|
||||
@ -48,7 +45,6 @@ FAKE = {
|
||||
|
||||
|
||||
class TestPortGroup(base.TestCase):
|
||||
|
||||
def test_basic(self):
|
||||
sot = port_group.PortGroup()
|
||||
self.assertIsNone(sot.resource_key)
|
||||
@ -72,6 +68,8 @@ class TestPortGroup(base.TestCase):
|
||||
self.assertEqual(FAKE['name'], sot.name)
|
||||
self.assertEqual(FAKE['node_uuid'], sot.node_id)
|
||||
self.assertEqual(FAKE['ports'], sot.ports)
|
||||
self.assertEqual(FAKE['standalone_ports_supported'],
|
||||
sot.is_standalone_ports_supported)
|
||||
self.assertEqual(
|
||||
FAKE['standalone_ports_supported'],
|
||||
sot.is_standalone_ports_supported,
|
||||
)
|
||||
self.assertEqual(FAKE['updated_at'], sot.updated_at)
|
||||
|
@ -63,9 +63,12 @@ class TestChassis(TestBaremetalProxy):
|
||||
self.verify_find(self.proxy.find_chassis, chassis.Chassis)
|
||||
|
||||
def test_get_chassis(self):
|
||||
self.verify_get(self.proxy.get_chassis, chassis.Chassis,
|
||||
mock_method=_MOCK_METHOD,
|
||||
expected_kwargs={'fields': None})
|
||||
self.verify_get(
|
||||
self.proxy.get_chassis,
|
||||
chassis.Chassis,
|
||||
mock_method=_MOCK_METHOD,
|
||||
expected_kwargs={'fields': None},
|
||||
)
|
||||
|
||||
def test_update_chassis(self):
|
||||
self.verify_update(self.proxy.update_chassis, chassis.Chassis)
|
||||
@ -97,23 +100,29 @@ class TestNode(TestBaremetalProxy):
|
||||
self.verify_find(self.proxy.find_node, node.Node)
|
||||
|
||||
def test_get_node(self):
|
||||
self.verify_get(self.proxy.get_node, node.Node,
|
||||
mock_method=_MOCK_METHOD,
|
||||
expected_kwargs={'fields': None})
|
||||
self.verify_get(
|
||||
self.proxy.get_node,
|
||||
node.Node,
|
||||
mock_method=_MOCK_METHOD,
|
||||
expected_kwargs={'fields': None},
|
||||
)
|
||||
|
||||
@mock.patch.object(node.Node, 'commit', autospec=True)
|
||||
def test_update_node(self, mock_commit):
|
||||
self.proxy.update_node('uuid', instance_id='new value')
|
||||
mock_commit.assert_called_once_with(mock.ANY, self.proxy,
|
||||
retry_on_conflict=True)
|
||||
mock_commit.assert_called_once_with(
|
||||
mock.ANY, self.proxy, retry_on_conflict=True
|
||||
)
|
||||
self.assertEqual('new value', mock_commit.call_args[0][0].instance_id)
|
||||
|
||||
@mock.patch.object(node.Node, 'commit', autospec=True)
|
||||
def test_update_node_no_retries(self, mock_commit):
|
||||
self.proxy.update_node('uuid', instance_id='new value',
|
||||
retry_on_conflict=False)
|
||||
mock_commit.assert_called_once_with(mock.ANY, self.proxy,
|
||||
retry_on_conflict=False)
|
||||
self.proxy.update_node(
|
||||
'uuid', instance_id='new value', retry_on_conflict=False
|
||||
)
|
||||
mock_commit.assert_called_once_with(
|
||||
mock.ANY, self.proxy, retry_on_conflict=False
|
||||
)
|
||||
self.assertEqual('new value', mock_commit.call_args[0][0].instance_id)
|
||||
|
||||
def test_delete_node(self):
|
||||
@ -143,9 +152,12 @@ class TestPort(TestBaremetalProxy):
|
||||
self.verify_find(self.proxy.find_port, port.Port)
|
||||
|
||||
def test_get_port(self):
|
||||
self.verify_get(self.proxy.get_port, port.Port,
|
||||
mock_method=_MOCK_METHOD,
|
||||
expected_kwargs={'fields': None})
|
||||
self.verify_get(
|
||||
self.proxy.get_port,
|
||||
port.Port,
|
||||
mock_method=_MOCK_METHOD,
|
||||
expected_kwargs={'fields': None},
|
||||
)
|
||||
|
||||
def test_update_port(self):
|
||||
self.verify_update(self.proxy.update_port, port.Port)
|
||||
@ -171,9 +183,12 @@ class TestPortGroups(TestBaremetalProxy):
|
||||
mock_list.assert_called_once_with(self.proxy, details=False, query=1)
|
||||
|
||||
def test_get_port_group(self):
|
||||
self.verify_get(self.proxy.get_port_group, port_group.PortGroup,
|
||||
mock_method=_MOCK_METHOD,
|
||||
expected_kwargs={'fields': None})
|
||||
self.verify_get(
|
||||
self.proxy.get_port_group,
|
||||
port_group.PortGroup,
|
||||
mock_method=_MOCK_METHOD,
|
||||
expected_kwargs={'fields': None},
|
||||
)
|
||||
|
||||
|
||||
class TestAllocation(TestBaremetalProxy):
|
||||
@ -181,43 +196,57 @@ class TestAllocation(TestBaremetalProxy):
|
||||
self.verify_create(self.proxy.create_allocation, allocation.Allocation)
|
||||
|
||||
def test_get_allocation(self):
|
||||
self.verify_get(self.proxy.get_allocation, allocation.Allocation,
|
||||
mock_method=_MOCK_METHOD,
|
||||
expected_kwargs={'fields': None})
|
||||
self.verify_get(
|
||||
self.proxy.get_allocation,
|
||||
allocation.Allocation,
|
||||
mock_method=_MOCK_METHOD,
|
||||
expected_kwargs={'fields': None},
|
||||
)
|
||||
|
||||
def test_delete_allocation(self):
|
||||
self.verify_delete(self.proxy.delete_allocation, allocation.Allocation,
|
||||
False)
|
||||
self.verify_delete(
|
||||
self.proxy.delete_allocation, allocation.Allocation, False
|
||||
)
|
||||
|
||||
def test_delete_allocation_ignore(self):
|
||||
self.verify_delete(self.proxy.delete_allocation, allocation.Allocation,
|
||||
True)
|
||||
self.verify_delete(
|
||||
self.proxy.delete_allocation, allocation.Allocation, True
|
||||
)
|
||||
|
||||
|
||||
class TestVolumeConnector(TestBaremetalProxy):
|
||||
def test_create_volume_connector(self):
|
||||
self.verify_create(self.proxy.create_volume_connector,
|
||||
volume_connector.VolumeConnector)
|
||||
self.verify_create(
|
||||
self.proxy.create_volume_connector,
|
||||
volume_connector.VolumeConnector,
|
||||
)
|
||||
|
||||
def test_find_volume_connector(self):
|
||||
self.verify_find(self.proxy.find_volume_connector,
|
||||
volume_connector.VolumeConnector)
|
||||
self.verify_find(
|
||||
self.proxy.find_volume_connector, volume_connector.VolumeConnector
|
||||
)
|
||||
|
||||
def test_get_volume_connector(self):
|
||||
self.verify_get(self.proxy.get_volume_connector,
|
||||
volume_connector.VolumeConnector,
|
||||
mock_method=_MOCK_METHOD,
|
||||
expected_kwargs={'fields': None})
|
||||
self.verify_get(
|
||||
self.proxy.get_volume_connector,
|
||||
volume_connector.VolumeConnector,
|
||||
mock_method=_MOCK_METHOD,
|
||||
expected_kwargs={'fields': None},
|
||||
)
|
||||
|
||||
def test_delete_volume_connector(self):
|
||||
self.verify_delete(self.proxy.delete_volume_connector,
|
||||
volume_connector.VolumeConnector,
|
||||
False)
|
||||
self.verify_delete(
|
||||
self.proxy.delete_volume_connector,
|
||||
volume_connector.VolumeConnector,
|
||||
False,
|
||||
)
|
||||
|
||||
def test_delete_volume_connector_ignore(self):
|
||||
self.verify_delete(self.proxy.delete_volume_connector,
|
||||
volume_connector.VolumeConnector,
|
||||
True)
|
||||
self.verify_delete(
|
||||
self.proxy.delete_volume_connector,
|
||||
volume_connector.VolumeConnector,
|
||||
True,
|
||||
)
|
||||
|
||||
|
||||
class TestVolumeTarget(TestBaremetalProxy):
|
||||
@ -234,28 +263,32 @@ class TestVolumeTarget(TestBaremetalProxy):
|
||||
mock_list.assert_called_once_with(self.proxy, query=1)
|
||||
|
||||
def test_create_volume_target(self):
|
||||
self.verify_create(self.proxy.create_volume_target,
|
||||
volume_target.VolumeTarget)
|
||||
self.verify_create(
|
||||
self.proxy.create_volume_target, volume_target.VolumeTarget
|
||||
)
|
||||
|
||||
def test_find_volume_target(self):
|
||||
self.verify_find(self.proxy.find_volume_target,
|
||||
volume_target.VolumeTarget)
|
||||
self.verify_find(
|
||||
self.proxy.find_volume_target, volume_target.VolumeTarget
|
||||
)
|
||||
|
||||
def test_get_volume_target(self):
|
||||
self.verify_get(self.proxy.get_volume_target,
|
||||
volume_target.VolumeTarget,
|
||||
mock_method=_MOCK_METHOD,
|
||||
expected_kwargs={'fields': None})
|
||||
self.verify_get(
|
||||
self.proxy.get_volume_target,
|
||||
volume_target.VolumeTarget,
|
||||
mock_method=_MOCK_METHOD,
|
||||
expected_kwargs={'fields': None},
|
||||
)
|
||||
|
||||
def test_delete_volume_target(self):
|
||||
self.verify_delete(self.proxy.delete_volume_target,
|
||||
volume_target.VolumeTarget,
|
||||
False)
|
||||
self.verify_delete(
|
||||
self.proxy.delete_volume_target, volume_target.VolumeTarget, False
|
||||
)
|
||||
|
||||
def test_delete_volume_target_ignore(self):
|
||||
self.verify_delete(self.proxy.delete_volume_target,
|
||||
volume_target.VolumeTarget,
|
||||
True)
|
||||
self.verify_delete(
|
||||
self.proxy.delete_volume_target, volume_target.VolumeTarget, True
|
||||
)
|
||||
|
||||
|
||||
class TestMisc(TestBaremetalProxy):
|
||||
@ -263,35 +296,45 @@ class TestMisc(TestBaremetalProxy):
|
||||
def test__get_with_fields_none(self, mock_fetch):
|
||||
result = self.proxy._get_with_fields(node.Node, 'value')
|
||||
self.assertIs(result, mock_fetch.return_value)
|
||||
mock_fetch.assert_called_once_with(mock.ANY, self.proxy,
|
||||
error_message=mock.ANY)
|
||||
mock_fetch.assert_called_once_with(
|
||||
mock.ANY, self.proxy, error_message=mock.ANY
|
||||
)
|
||||
|
||||
@mock.patch.object(node.Node, 'fetch', autospec=True)
|
||||
def test__get_with_fields_node(self, mock_fetch):
|
||||
result = self.proxy._get_with_fields(
|
||||
# Mix of server-side and client-side fields
|
||||
node.Node, 'value', fields=['maintenance', 'id', 'instance_id'])
|
||||
node.Node,
|
||||
'value',
|
||||
fields=['maintenance', 'id', 'instance_id'],
|
||||
)
|
||||
self.assertIs(result, mock_fetch.return_value)
|
||||
mock_fetch.assert_called_once_with(
|
||||
mock.ANY, self.proxy, error_message=mock.ANY,
|
||||
mock.ANY,
|
||||
self.proxy,
|
||||
error_message=mock.ANY,
|
||||
# instance_id converted to server-side instance_uuid
|
||||
fields='maintenance,uuid,instance_uuid')
|
||||
fields='maintenance,uuid,instance_uuid',
|
||||
)
|
||||
|
||||
@mock.patch.object(port.Port, 'fetch', autospec=True)
|
||||
def test__get_with_fields_port(self, mock_fetch):
|
||||
result = self.proxy._get_with_fields(
|
||||
port.Port, 'value', fields=['address', 'id', 'node_id'])
|
||||
port.Port, 'value', fields=['address', 'id', 'node_id']
|
||||
)
|
||||
self.assertIs(result, mock_fetch.return_value)
|
||||
mock_fetch.assert_called_once_with(
|
||||
mock.ANY, self.proxy, error_message=mock.ANY,
|
||||
mock.ANY,
|
||||
self.proxy,
|
||||
error_message=mock.ANY,
|
||||
# node_id converted to server-side node_uuid
|
||||
fields='address,uuid,node_uuid')
|
||||
fields='address,uuid,node_uuid',
|
||||
)
|
||||
|
||||
|
||||
@mock.patch('time.sleep', lambda _sec: None)
|
||||
@mock.patch.object(_proxy.Proxy, 'get_node', autospec=True)
|
||||
class TestWaitForNodesProvisionState(base.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(TestWaitForNodesProvisionState, self).setUp()
|
||||
self.session = mock.Mock()
|
||||
@ -299,59 +342,67 @@ class TestWaitForNodesProvisionState(base.TestCase):
|
||||
|
||||
def test_success(self, mock_get):
|
||||
# two attempts, one node succeeds after the 1st
|
||||
nodes = [mock.Mock(spec=node.Node, id=str(i))
|
||||
for i in range(3)]
|
||||
nodes = [mock.Mock(spec=node.Node, id=str(i)) for i in range(3)]
|
||||
for i, n in enumerate(nodes):
|
||||
# 1st attempt on 1st node, 2nd attempt on 2nd node
|
||||
n._check_state_reached.return_value = not (i % 2)
|
||||
mock_get.side_effect = nodes
|
||||
|
||||
result = self.proxy.wait_for_nodes_provision_state(
|
||||
['abcd', node.Node(id='1234')], 'fake state')
|
||||
['abcd', node.Node(id='1234')], 'fake state'
|
||||
)
|
||||
self.assertEqual([nodes[0], nodes[2]], result)
|
||||
|
||||
for n in nodes:
|
||||
n._check_state_reached.assert_called_once_with(
|
||||
self.proxy, 'fake state', True)
|
||||
self.proxy, 'fake state', True
|
||||
)
|
||||
|
||||
def test_success_no_fail(self, mock_get):
|
||||
# two attempts, one node succeeds after the 1st
|
||||
nodes = [mock.Mock(spec=node.Node, id=str(i))
|
||||
for i in range(3)]
|
||||
nodes = [mock.Mock(spec=node.Node, id=str(i)) for i in range(3)]
|
||||
for i, n in enumerate(nodes):
|
||||
# 1st attempt on 1st node, 2nd attempt on 2nd node
|
||||
n._check_state_reached.return_value = not (i % 2)
|
||||
mock_get.side_effect = nodes
|
||||
|
||||
result = self.proxy.wait_for_nodes_provision_state(
|
||||
['abcd', node.Node(id='1234')], 'fake state', fail=False)
|
||||
['abcd', node.Node(id='1234')], 'fake state', fail=False
|
||||
)
|
||||
self.assertEqual([nodes[0], nodes[2]], result.success)
|
||||
self.assertEqual([], result.failure)
|
||||
self.assertEqual([], result.timeout)
|
||||
|
||||
for n in nodes:
|
||||
n._check_state_reached.assert_called_once_with(
|
||||
self.proxy, 'fake state', True)
|
||||
self.proxy, 'fake state', True
|
||||
)
|
||||
|
||||
def test_timeout(self, mock_get):
|
||||
mock_get.return_value._check_state_reached.return_value = False
|
||||
mock_get.return_value.id = '1234'
|
||||
|
||||
self.assertRaises(exceptions.ResourceTimeout,
|
||||
self.proxy.wait_for_nodes_provision_state,
|
||||
['abcd', node.Node(id='1234')], 'fake state',
|
||||
timeout=0.001)
|
||||
self.assertRaises(
|
||||
exceptions.ResourceTimeout,
|
||||
self.proxy.wait_for_nodes_provision_state,
|
||||
['abcd', node.Node(id='1234')],
|
||||
'fake state',
|
||||
timeout=0.001,
|
||||
)
|
||||
mock_get.return_value._check_state_reached.assert_called_with(
|
||||
self.proxy, 'fake state', True)
|
||||
self.proxy, 'fake state', True
|
||||
)
|
||||
|
||||
def test_timeout_no_fail(self, mock_get):
|
||||
mock_get.return_value._check_state_reached.return_value = False
|
||||
mock_get.return_value.id = '1234'
|
||||
|
||||
result = self.proxy.wait_for_nodes_provision_state(
|
||||
['abcd'], 'fake state', timeout=0.001, fail=False)
|
||||
['abcd'], 'fake state', timeout=0.001, fail=False
|
||||
)
|
||||
mock_get.return_value._check_state_reached.assert_called_with(
|
||||
self.proxy, 'fake state', True)
|
||||
self.proxy, 'fake state', True
|
||||
)
|
||||
|
||||
self.assertEqual([], result.success)
|
||||
self.assertEqual([mock_get.return_value], result.timeout)
|
||||
@ -364,8 +415,9 @@ class TestWaitForNodesProvisionState(base.TestCase):
|
||||
if result.id == '1':
|
||||
result._check_state_reached.return_value = True
|
||||
elif result.id == '2':
|
||||
result._check_state_reached.side_effect = \
|
||||
result._check_state_reached.side_effect = (
|
||||
exceptions.ResourceFailure("boom")
|
||||
)
|
||||
else:
|
||||
result._check_state_reached.return_value = False
|
||||
return result
|
||||
@ -373,7 +425,8 @@ class TestWaitForNodesProvisionState(base.TestCase):
|
||||
mock_get.side_effect = _fake_get
|
||||
|
||||
result = self.proxy.wait_for_nodes_provision_state(
|
||||
['1', '2', '3'], 'fake state', timeout=0.001, fail=False)
|
||||
['1', '2', '3'], 'fake state', timeout=0.001, fail=False
|
||||
)
|
||||
|
||||
self.assertEqual(['1'], [x.id for x in result.success])
|
||||
self.assertEqual(['3'], [x.id for x in result.timeout])
|
||||
|
@ -21,22 +21,21 @@ FAKE = {
|
||||
"links": [
|
||||
{
|
||||
"href": "http://127.0.0.1:6385/v1/volume/connector/<ID>",
|
||||
"rel": "self"
|
||||
"rel": "self",
|
||||
},
|
||||
{
|
||||
"href": "http://127.0.0.1:6385/volume/connector/<ID>",
|
||||
"rel": "bookmark"
|
||||
}
|
||||
"rel": "bookmark",
|
||||
},
|
||||
],
|
||||
"node_uuid": "6d85703a-565d-469a-96ce-30b6de53079d",
|
||||
"type": "iqn",
|
||||
"updated_at": None,
|
||||
"uuid": "9bf93e01-d728-47a3-ad4b-5e66a835037c"
|
||||
"uuid": "9bf93e01-d728-47a3-ad4b-5e66a835037c",
|
||||
}
|
||||
|
||||
|
||||
class TestVolumeconnector(base.TestCase):
|
||||
|
||||
def test_basic(self):
|
||||
sot = volume_connector.VolumeConnector()
|
||||
self.assertIsNone(sot.resource_key)
|
||||
|
@ -21,24 +21,23 @@ FAKE = {
|
||||
"links": [
|
||||
{
|
||||
"href": "http://127.0.0.1:6385/v1/volume/targets/<ID>",
|
||||
"rel": "self"
|
||||
"rel": "self",
|
||||
},
|
||||
{
|
||||
"href": "http://127.0.0.1:6385/volume/targets/<ID>",
|
||||
"rel": "bookmark"
|
||||
}
|
||||
"rel": "bookmark",
|
||||
},
|
||||
],
|
||||
"node_uuid": "6d85703a-565d-469a-96ce-30b6de53079d",
|
||||
"properties": {},
|
||||
"updated_at": None,
|
||||
"uuid": "bd4d008c-7d31-463d-abf9-6c23d9d55f7f",
|
||||
"volume_id": "04452bed-5367-4202-8bf5-de4335ac56d2",
|
||||
"volume_type": "iscsi"
|
||||
"volume_type": "iscsi",
|
||||
}
|
||||
|
||||
|
||||
class TestVolumeTarget(base.TestCase):
|
||||
|
||||
def test_basic(self):
|
||||
sot = volume_target.VolumeTarget()
|
||||
self.assertIsNone(sot.resource_key)
|
||||
|
@ -24,7 +24,6 @@ from openstack.tests.unit import test_proxy_base
|
||||
|
||||
@mock.patch.object(introspection.Introspection, 'create', autospec=True)
|
||||
class TestStartIntrospection(base.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(TestStartIntrospection, self).setUp()
|
||||
self.session = mock.Mock(spec=adapter.Adapter)
|
||||
@ -44,27 +43,27 @@ class TestStartIntrospection(base.TestCase):
|
||||
|
||||
def test_create_introspection_manage_boot(self, mock_create):
|
||||
self.proxy.start_introspection('abcd', manage_boot=False)
|
||||
mock_create.assert_called_once_with(mock.ANY, self.proxy,
|
||||
manage_boot=False)
|
||||
mock_create.assert_called_once_with(
|
||||
mock.ANY, self.proxy, manage_boot=False
|
||||
)
|
||||
introspect = mock_create.call_args[0][0]
|
||||
self.assertEqual('abcd', introspect.id)
|
||||
|
||||
|
||||
class TestBaremetalIntrospectionProxy(test_proxy_base.TestProxyBase):
|
||||
|
||||
def setUp(self):
|
||||
super(TestBaremetalIntrospectionProxy, self).setUp()
|
||||
self.proxy = _proxy.Proxy(self.session)
|
||||
|
||||
def test_get_introspection(self):
|
||||
self.verify_get(self.proxy.get_introspection,
|
||||
introspection.Introspection)
|
||||
self.verify_get(
|
||||
self.proxy.get_introspection, introspection.Introspection
|
||||
)
|
||||
|
||||
|
||||
@mock.patch('time.sleep', lambda _sec: None)
|
||||
@mock.patch.object(introspection.Introspection, 'fetch', autospec=True)
|
||||
class TestWaitForIntrospection(base.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(TestWaitForIntrospection, self).setUp()
|
||||
self.session = mock.Mock(spec=adapter.Adapter)
|
||||
@ -96,10 +95,12 @@ class TestWaitForIntrospection(base.TestCase):
|
||||
self.assertEqual(2, mock_fetch.call_count)
|
||||
|
||||
def test_timeout(self, mock_fetch):
|
||||
self.assertRaises(exceptions.ResourceTimeout,
|
||||
self.proxy.wait_for_introspection,
|
||||
self.introspection,
|
||||
timeout=0.001)
|
||||
self.assertRaises(
|
||||
exceptions.ResourceTimeout,
|
||||
self.proxy.wait_for_introspection,
|
||||
self.introspection,
|
||||
timeout=0.001,
|
||||
)
|
||||
mock_fetch.assert_called_with(self.introspection, self.proxy)
|
||||
|
||||
def test_failure(self, mock_fetch):
|
||||
@ -109,9 +110,12 @@ class TestWaitForIntrospection(base.TestCase):
|
||||
self.introspection.error = 'boom'
|
||||
|
||||
mock_fetch.side_effect = _side_effect
|
||||
self.assertRaisesRegex(exceptions.ResourceFailure, 'boom',
|
||||
self.proxy.wait_for_introspection,
|
||||
self.introspection)
|
||||
self.assertRaisesRegex(
|
||||
exceptions.ResourceFailure,
|
||||
'boom',
|
||||
self.proxy.wait_for_introspection,
|
||||
self.introspection,
|
||||
)
|
||||
mock_fetch.assert_called_once_with(self.introspection, self.proxy)
|
||||
|
||||
def test_failure_ignored(self, mock_fetch):
|
||||
@ -121,15 +125,15 @@ class TestWaitForIntrospection(base.TestCase):
|
||||
self.introspection.error = 'boom'
|
||||
|
||||
mock_fetch.side_effect = _side_effect
|
||||
result = self.proxy.wait_for_introspection(self.introspection,
|
||||
ignore_error=True)
|
||||
result = self.proxy.wait_for_introspection(
|
||||
self.introspection, ignore_error=True
|
||||
)
|
||||
self.assertIs(result, self.introspection)
|
||||
mock_fetch.assert_called_once_with(self.introspection, self.proxy)
|
||||
|
||||
|
||||
@mock.patch.object(_proxy.Proxy, 'request', autospec=True)
|
||||
class TestAbortIntrospection(base.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(TestAbortIntrospection, self).setUp()
|
||||
self.session = mock.Mock(spec=adapter.Adapter)
|
||||
@ -141,14 +145,17 @@ class TestAbortIntrospection(base.TestCase):
|
||||
mock_request.return_value.status_code = 202
|
||||
self.proxy.abort_introspection(self.introspection)
|
||||
mock_request.assert_called_once_with(
|
||||
self.proxy, 'introspection/1234/abort', 'POST',
|
||||
headers=mock.ANY, microversion=mock.ANY,
|
||||
retriable_status_codes=[409, 503])
|
||||
self.proxy,
|
||||
'introspection/1234/abort',
|
||||
'POST',
|
||||
headers=mock.ANY,
|
||||
microversion=mock.ANY,
|
||||
retriable_status_codes=[409, 503],
|
||||
)
|
||||
|
||||
|
||||
@mock.patch.object(_proxy.Proxy, 'request', autospec=True)
|
||||
class TestGetData(base.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(TestGetData, self).setUp()
|
||||
self.session = mock.Mock(spec=adapter.Adapter)
|
||||
@ -160,15 +167,24 @@ class TestGetData(base.TestCase):
|
||||
mock_request.return_value.status_code = 200
|
||||
data = self.proxy.get_introspection_data(self.introspection)
|
||||
mock_request.assert_called_once_with(
|
||||
self.proxy, 'introspection/1234/data', 'GET',
|
||||
headers=mock.ANY, microversion=mock.ANY)
|
||||
self.proxy,
|
||||
'introspection/1234/data',
|
||||
'GET',
|
||||
headers=mock.ANY,
|
||||
microversion=mock.ANY,
|
||||
)
|
||||
self.assertIs(data, mock_request.return_value.json.return_value)
|
||||
|
||||
def test_get_unprocessed_data(self, mock_request):
|
||||
mock_request.return_value.status_code = 200
|
||||
data = self.proxy.get_introspection_data(self.introspection,
|
||||
processed=False)
|
||||
data = self.proxy.get_introspection_data(
|
||||
self.introspection, processed=False
|
||||
)
|
||||
mock_request.assert_called_once_with(
|
||||
self.proxy, 'introspection/1234/data/unprocessed', 'GET',
|
||||
headers=mock.ANY, microversion='1.17')
|
||||
self.proxy,
|
||||
'introspection/1234/data/unprocessed',
|
||||
'GET',
|
||||
headers=mock.ANY,
|
||||
microversion='1.17',
|
||||
)
|
||||
self.assertIs(data, mock_request.return_value.json.return_value)
|
||||
|
Loading…
x
Reference in New Issue
Block a user