Merge remote-tracking branch 'shade/master' into merge-shade

Incorporate the latest changes in from shade repo.

Change-Id: I823f731b542cce5a3f25e4740f55f18628e2eacd
This commit is contained in:
Monty Taylor 2017-10-13 06:34:34 -05:00
commit cc8c7cb079
No known key found for this signature in database
GPG Key ID: 7BAE94BC7141A594
14 changed files with 343 additions and 75 deletions

View File

@ -65,6 +65,9 @@ If all of the project information is None, then
domain_name=str() or None))
Resources
=========
Flavor
------
@ -324,34 +327,6 @@ A Floating IP from Neutron or Nova
revision_number=int() or None,
properties=dict())
Project
-------
A Project from Keystone (or a tenant if Keystone v2)
Location information for Project has some specific semantics.
If the project has a parent project, that will be in location.project.id,
and if it doesn't that should be None. If the Project is associated with
a domain that will be in location.project.domain_id regardless of the current
user's token scope. location.project.name and location.project.domain_name
will always be None. Finally, location.region_name will always be None as
Projects are global to a cloud. If a deployer happens to deploy OpenStack
in such a way that users and projects are not shared amongst regions, that
necessitates treating each of those regions as separate clouds from shade's
POV.
.. code-block:: python
Project = dict(
location=Location(),
id=str(),
name=str(),
description=str(),
is_enabled=bool(),
is_domain=bool(),
properties=dict())
Volume
------
@ -502,3 +477,56 @@ A Stack from Heat
tempate_description=str(),
timeout_mins=int(),
properties=dict())
Identity Resources
==================
Identity Resources are slightly different.
They are global to a cloud, so location.availability_zone and
location.region_name and will always be None. If a deployer happens to deploy
OpenStack in such a way that users and projects are not shared amongst regions,
that necessitates treating each of those regions as separate clouds from
shade's POV.
The Identity Resources that are not Project do not exist within a Project,
so all of the values in ``location.project`` will be None.
Project
-------
A Project from Keystone (or a tenant if Keystone v2)
Location information for Project has some additional specific semantics.
If the project has a parent project, that will be in ``location.project.id``,
and if it doesn't that should be ``None``.
If the Project is associated with a domain that will be in
``location.project.domain_id`` in addition to the normal ``domain_id``
regardless of the current user's token scope.
.. code-block:: python
Project = dict(
location=Location(),
id=str(),
name=str(),
description=str(),
is_enabled=bool(),
is_domain=bool(),
domain_id=str(),
properties=dict())
Role
----
A Role from Keystone
.. code-block:: python
Project = dict(
location=Location(),
id=str(),
name=str(),
domain_id=str(),
properties=dict())

View File

@ -647,19 +647,14 @@ class Normalizer(object):
description = project.pop('description', '')
is_enabled = project.pop('enabled', True)
# Projects are global - strip region
location = self._get_current_location(project_id=project_id)
location['region_name'] = None
# v3 additions
domain_id = project.pop('domain_id', 'default')
parent_id = project.pop('parent_id', None)
is_domain = project.pop('is_domain', False)
# Projects have a special relationship with location
location = self._get_identity_location()
location['project']['domain_id'] = domain_id
location['project']['domain_name'] = None
location['project']['name'] = None
location['project']['id'] = parent_id
ret = munch.Munch(
@ -669,13 +664,13 @@ class Normalizer(object):
description=description,
is_enabled=is_enabled,
is_domain=is_domain,
domain_id=domain_id,
properties=project.copy()
)
# Backwards compat
if not self.strict_mode:
ret['enabled'] = is_enabled
ret['domain_id'] = domain_id
ret['parent_id'] = parent_id
for key, val in ret['properties'].items():
ret.setdefault(key, val)
@ -1093,3 +1088,21 @@ class Normalizer(object):
# TODO(mordred) Normalize this resource
return machine
def _normalize_roles(self, roles):
"""Normalize Keystone roles"""
ret = []
for role in roles:
ret.append(self._normalize_role(role))
return ret
def _normalize_role(self, role):
"""Normalize Identity roles."""
return munch.Munch(
id=role.get('id'),
name=role.get('name'),
domain_id=role.get('domain_id'),
location=self._get_identity_location(),
properties={},
)

View File

@ -374,17 +374,6 @@ def normalize_role_assignments(assignments):
return new_assignments
def normalize_roles(roles):
"""Normalize Identity roles."""
ret = [
dict(
id=role.get('id'),
name=role.get('name'),
) for role in roles
]
return meta.obj_list_to_munch(ret)
def normalize_flavor_accesses(flavor_accesses):
"""Normalize Flavor access list."""
return [munch.Munch(

View File

@ -681,6 +681,18 @@ class OpenStackCloud(_normalize.Normalizer):
project=self._get_project_info(project_id),
)
def _get_identity_location(self):
'''Identity resources do not exist inside of projects.'''
return munch.Munch(
cloud=self.name,
region_name=None,
zone=None,
project=munch.Munch(
id=None,
name=None,
domain_id=None,
domain_name=None))
def _get_project_id_param_dict(self, name_or_id):
if name_or_id:
project = self.get_project(name_or_id)
@ -4715,9 +4727,8 @@ class OpenStackCloud(_normalize.Normalizer):
"Timeout waiting for the image to import."):
try:
if image_id is None:
data = self._image_client.get(
status = self._image_client.get(
'/tasks/{id}'.format(id=glance_task.id))
status = self._get_and_munchify('images', data=data)
except OpenStackCloudHTTPError as e:
if e.response.status_code == 503:
# Clear the exception so that it doesn't linger
@ -4727,8 +4738,8 @@ class OpenStackCloud(_normalize.Normalizer):
continue
raise
if status.status == 'success':
image_id = status.result['image_id']
if status['status'] == 'success':
image_id = status['result']['image_id']
try:
image = self.get_image(image_id)
except OpenStackCloudHTTPError as e:
@ -4747,15 +4758,15 @@ class OpenStackCloud(_normalize.Normalizer):
"Image Task %s imported %s in %s",
glance_task.id, image_id, (time.time() - start))
return self.get_image(image_id)
if status.status == 'failure':
if status.message == IMAGE_ERROR_396:
elif status['status'] == 'failure':
if status['message'] == IMAGE_ERROR_396:
glance_task = self._image_client.post(
'/tasks', data=task_args)
self.list_images.invalidate(self)
else:
raise OpenStackCloudException(
"Image creation failed: {message}".format(
message=status.message),
message=status['message']),
extra_data=status)
else:
return glance_task
@ -4818,7 +4829,7 @@ class OpenStackCloud(_normalize.Normalizer):
def create_volume(
self, size,
wait=True, timeout=None, image=None, **kwargs):
wait=True, timeout=None, image=None, bootable=None, **kwargs):
"""Create a volume.
:param size: Size, in GB of the volume to create.
@ -4828,6 +4839,8 @@ class OpenStackCloud(_normalize.Normalizer):
:param timeout: Seconds to wait for volume creation. None is forever.
:param image: (optional) Image name, ID or object from which to create
the volume
:param bootable: (optional) Make this volume bootable. If set, wait
will also be set to true.
:param kwargs: Keyword arguments as expected for cinder client.
:returns: The created volume object.
@ -4835,6 +4848,9 @@ class OpenStackCloud(_normalize.Normalizer):
:raises: OpenStackCloudTimeout if wait time exceeded.
:raises: OpenStackCloudException on operation error.
"""
if bootable is not None:
wait = True
if image:
image_obj = self.get_image(image)
if not image_obj:
@ -4870,6 +4886,10 @@ class OpenStackCloud(_normalize.Normalizer):
continue
if volume['status'] == 'available':
if bootable is not None:
self.set_volume_bootable(volume, bootable=bootable)
# no need to re-fetch to update the flag, just set it.
volume['bootable'] = bootable
return volume
if volume['status'] == 'error':
@ -4877,6 +4897,31 @@ class OpenStackCloud(_normalize.Normalizer):
return self._normalize_volume(volume)
def set_volume_bootable(self, name_or_id, bootable=True):
"""Set a volume's bootable flag.
:param name_or_id: Name, unique ID of the volume or a volume dict.
:param bool bootable: Whether the volume should be bootable.
(Defaults to True)
:raises: OpenStackCloudTimeout if wait time exceeded.
:raises: OpenStackCloudException on operation error.
"""
volume = self.get_volume(name_or_id)
if not volume:
raise OpenStackCloudException(
"Volume {name_or_id} does not exist".format(
name_or_id=name_or_id))
self._volume_client.post(
'volumes/{id}/action'.format(id=volume['id']),
json={'os-set_bootable': {'bootable': bootable}},
error_message="Error setting bootable on volume {volume}".format(
volume=volume['id'])
)
def delete_volume(self, name_or_id=None, wait=True, timeout=None,
force=False):
"""Delete a volume.
@ -6396,17 +6441,19 @@ class OpenStackCloud(_normalize.Normalizer):
'block_device_mapping_v2', 'nics', 'scheduler_hints',
'config_drive', 'admin_pass', 'disk_config')
def create_server(
self, name, image, flavor,
self, name, image=None, flavor=None,
auto_ip=True, ips=None, ip_pool=None,
root_volume=None, terminate_volume=False,
wait=False, timeout=180, reuse_ips=True,
network=None, boot_from_volume=False, volume_size='50',
boot_volume=None, volumes=None, nat_destination=None,
group=None,
**kwargs):
"""Create a virtual server instance.
:param name: Something to name the server.
:param image: Image dict, name or ID to boot with.
:param image: Image dict, name or ID to boot with. image is required
unless boot_volume is given.
:param flavor: Flavor dict, name or ID to boot onto.
:param auto_ip: Whether to take actions to find a routable IP for
the server. (defaults to True)
@ -6480,24 +6527,35 @@ class OpenStackCloud(_normalize.Normalizer):
be attached to, if it's not possible to
infer from the cloud's configuration.
(Optional, defaults to None)
:param group: ServerGroup dict, name or id to boot the server in.
If a group is provided in both scheduler_hints and in
the group param, the group param will win.
(Optional, defaults to None)
:returns: A ``munch.Munch`` representing the created server.
:raises: OpenStackCloudException on operation error.
"""
# TODO(shade) Image is optional but flavor is not - yet flavor comes
# after image in the argument list. Doh.
if not flavor:
raise TypeError(
"create_server() missing 1 required argument: 'flavor'")
if not image and not boot_volume:
raise TypeError(
"create_server() requires either 'image' or 'boot_volume'")
# TODO(mordred) Add support for description starting in 2.19
security_groups = kwargs.get('security_groups', [])
if security_groups and not isinstance(kwargs['security_groups'], list):
security_groups = [security_groups]
if security_groups:
kwargs['security_groups'] = []
for group in security_groups:
kwargs['security_groups'].append(dict(name=group))
for sec_group in security_groups:
kwargs['security_groups'].append(dict(name=sec_group))
if 'userdata' in kwargs:
user_data = kwargs.pop('userdata')
if user_data:
kwargs['user_data'] = self._encode_server_userdata(user_data)
for (desired, given) in (
('OS-DCF:diskConfig', 'disk_config'),
('os:scheduler_hints', 'scheduler_hints'),
('config_drive', 'config_drive'),
('key_name', 'key_name'),
('metadata', 'meta'),
@ -6506,6 +6564,16 @@ class OpenStackCloud(_normalize.Normalizer):
if value:
kwargs[desired] = value
hints = kwargs.pop('scheduler_hints', {})
if group:
group_obj = self.get_server_group(group)
if not group_obj:
raise OpenStackCloudException(
"Server Group {group} was requested but was not found"
" on the cloud".format(group=group))
hints['group'] = group_obj['id']
if hints:
kwargs['os:scheduler_hints'] = hints
kwargs.setdefault('max_count', kwargs.get('max_count', 1))
kwargs.setdefault('min_count', kwargs.get('min_count', 1))
@ -6576,7 +6644,7 @@ class OpenStackCloud(_normalize.Normalizer):
kwargs['imageRef'] = image['id']
else:
kwargs['imageRef'] = self.get_image(image).id
if flavor and isinstance(flavor, dict):
if isinstance(flavor, dict):
kwargs['flavorRef'] = flavor['id']
else:
kwargs['flavorRef'] = self.get_flavor(flavor, get_extra=False).id

View File

@ -1375,9 +1375,12 @@ class OperatorCloud(openstackcloud.OpenStackCloud):
self.list_groups.invalidate(self)
return True
def list_roles(self):
@_utils.valid_kwargs('domain_id')
def list_roles(self, **kwargs):
"""List Keystone roles.
:param domain_id: domain id for listing roles (v3)
:returns: a list of ``munch.Munch`` containing the role description.
:raises: ``OpenStackCloudException``: if something goes wrong during
@ -1386,14 +1389,16 @@ class OperatorCloud(openstackcloud.OpenStackCloud):
v2 = self._is_client_version('identity', 2)
url = '/OS-KSADM/roles' if v2 else '/roles'
data = self._identity_client.get(
url, error_message="Failed to list roles")
return _utils.normalize_roles(self._get_and_munchify('roles', data))
url, params=kwargs, error_message="Failed to list roles")
return self._normalize_roles(self._get_and_munchify('roles', data))
def search_roles(self, name_or_id=None, filters=None):
@_utils.valid_kwargs('domain_id')
def search_roles(self, name_or_id=None, filters=None, **kwargs):
"""Seach Keystone roles.
:param string name: role name or id.
:param dict filters: a dict containing additional filters to use.
:param domain_id: domain id (v3)
:returns: a list of ``munch.Munch`` containing the role description.
Each ``munch.Munch`` contains the following attributes::
@ -1405,14 +1410,16 @@ class OperatorCloud(openstackcloud.OpenStackCloud):
:raises: ``OpenStackCloudException``: if something goes wrong during
the openstack API call.
"""
roles = self.list_roles()
roles = self.list_roles(**kwargs)
return _utils._filter_list(roles, name_or_id, filters)
def get_role(self, name_or_id, filters=None):
@_utils.valid_kwargs('domain_id')
def get_role(self, name_or_id, filters=None, **kwargs):
"""Get exactly one Keystone role.
:param id: role name or id.
:param filters: a dict containing additional filters to use.
:param domain_id: domain id (v3)
:returns: a single ``munch.Munch`` containing the role description.
Each ``munch.Munch`` contains the following attributes::
@ -1424,7 +1431,7 @@ class OperatorCloud(openstackcloud.OpenStackCloud):
:raises: ``OpenStackCloudException``: if something goes wrong during
the openstack API call.
"""
return _utils._get_entity(self, 'role', name_or_id, filters)
return _utils._get_entity(self, 'role', name_or_id, filters, **kwargs)
def _keystone_v2_role_assignments(self, user, project=None,
role=None, **kwargs):
@ -1686,10 +1693,12 @@ class OperatorCloud(openstackcloud.OpenStackCloud):
return _utils.normalize_flavor_accesses(
self._get_and_munchify('flavor_access', data))
def create_role(self, name):
@_utils.valid_kwargs('domain_id')
def create_role(self, name, **kwargs):
"""Create a Keystone role.
:param string name: The name of the role.
:param domain_id: domain id (v3)
:returns: a ``munch.Munch`` containing the role description
@ -1697,23 +1706,55 @@ class OperatorCloud(openstackcloud.OpenStackCloud):
"""
v2 = self._is_client_version('identity', 2)
url = '/OS-KSADM/roles' if v2 else '/roles'
kwargs['name'] = name
msg = 'Failed to create role {name}'.format(name=name)
data = self._identity_client.post(
url, json={'role': {'name': name}}, error_message=msg)
url, json={'role': kwargs}, error_message=msg)
role = self._get_and_munchify('role', data)
return _utils.normalize_roles([role])[0]
return self._normalize_role(role)
def delete_role(self, name_or_id):
@_utils.valid_kwargs('domain_id')
def update_role(self, name_or_id, name, **kwargs):
"""Update a Keystone role.
:param name_or_id: Name or id of the role to update
:param string name: The new role name
:param domain_id: domain id
:returns: a ``munch.Munch`` containing the role description
:raise OpenStackCloudException: if the role cannot be created
"""
if self._is_client_version('identity', 2):
raise OpenStackCloudUnavailableFeature(
'Unavailable Feature: Role update requires Identity v3'
)
kwargs['name_or_id'] = name_or_id
role = self.get_role(**kwargs)
if role is None:
self.log.debug(
"Role %s not found for updating", name_or_id)
return False
msg = 'Failed to update role {name}'.format(name=name_or_id)
json_kwargs = {'role_id': role.id, 'role': {'name': name}}
data = self._identity_client.patch('/roles', error_message=msg,
json=json_kwargs)
role = self._get_and_munchify('role', data)
return self._normalize_role(role)
@_utils.valid_kwargs('domain_id')
def delete_role(self, name_or_id, **kwargs):
"""Delete a Keystone role.
:param string id: Name or id of the role to delete.
:param domain_id: domain id (v3)
:returns: True if delete succeeded, False otherwise.
:raises: ``OpenStackCloudException`` if something goes wrong during
the openstack API call.
"""
role = self.get_role(name_or_id)
role = self.get_role(name_or_id, **kwargs)
if role is None:
self.log.debug(
"Role %s not found for deleting", name_or_id)

View File

@ -7,7 +7,9 @@
- { role: auth, tags: auth }
- { role: client_config, tags: client_config }
- { role: group, tags: group }
- { role: image, tags: image }
# TODO(mordred) Reenable this once the fixed os_image winds up in an
# upstream ansible release.
# - { role: image, tags: image }
- { role: keypair, tags: keypair }
- { role: keystone_domain, tags: keystone_domain }
- { role: keystone_role, tags: keystone_role }

View File

@ -99,6 +99,7 @@ class TestCompute(base.BaseFunctionalTestCase):
self.assertIsNone(self.user_cloud.get_server(self.server_name))
def test_attach_detach_volume(self):
self.skipTest('Volume functional tests temporarily disabled')
server_name = self.getUniqueString()
self.addCleanup(self._cleanup_servers_and_volumes, server_name)
server = self.user_cloud.create_server(
@ -263,6 +264,7 @@ class TestCompute(base.BaseFunctionalTestCase):
return volume_id
def test_create_boot_from_volume_image(self):
self.skipTest('Volume functional tests temporarily disabled')
if not self.user_cloud.has_service('volume'):
self.skipTest('volume service not supported by cloud')
self.addCleanup(self._cleanup_servers_and_volumes, self.server_name)
@ -300,6 +302,7 @@ class TestCompute(base.BaseFunctionalTestCase):
return
def test_create_terminate_volume_image(self):
self.skipTest('Volume functional tests temporarily disabled')
if not self.user_cloud.has_service('volume'):
self.skipTest('volume service not supported by cloud')
self.addCleanup(self._cleanup_servers_and_volumes, self.server_name)
@ -322,6 +325,7 @@ class TestCompute(base.BaseFunctionalTestCase):
self.assertIsNone(self.user_cloud.get_server(self.server_name))
def test_create_boot_from_volume_preexisting(self):
self.skipTest('Volume functional tests temporarily disabled')
if not self.user_cloud.has_service('volume'):
self.skipTest('volume service not supported by cloud')
self.addCleanup(self._cleanup_servers_and_volumes, self.server_name)
@ -349,6 +353,7 @@ class TestCompute(base.BaseFunctionalTestCase):
self.assertIsNone(self.user_cloud.get_volume(volume_id))
def test_create_boot_attach_volume(self):
self.skipTest('Volume functional tests temporarily disabled')
if not self.user_cloud.has_service('volume'):
self.skipTest('volume service not supported by cloud')
self.addCleanup(self._cleanup_servers_and_volumes, self.server_name)
@ -376,6 +381,7 @@ class TestCompute(base.BaseFunctionalTestCase):
self.assertIsNone(self.user_cloud.get_volume(volume_id))
def test_create_boot_from_volume_preexisting_terminate(self):
self.skipTest('Volume functional tests temporarily disabled')
if not self.user_cloud.has_service('volume'):
self.skipTest('volume service not supported by cloud')
self.addCleanup(self._cleanup_servers_and_volumes, self.server_name)

View File

@ -32,6 +32,7 @@ class TestVolume(base.BaseFunctionalTestCase):
def setUp(self):
super(TestVolume, self).setUp()
self.skipTest('Volume functional tests temporarily disabled')
if not self.user_cloud.has_service('volume'):
self.skipTest('volume service not supported by cloud')

View File

@ -18,6 +18,7 @@ class TestVolume(base.BaseFunctionalTestCase):
def setUp(self):
super(TestVolume, self).setUp()
self.skipTest('Volume functional tests temporarily disabled')
if not self.user_cloud.has_service('volume'):
self.skipTest('volume service not supported by cloud')

View File

@ -101,6 +101,30 @@ class TestIdentityRoles(base.RequestsMockTestCase):
self.assertThat(role.id, matchers.Equals(role_data.role_id))
self.assert_calls()
def test_update_role(self):
role_data = self._get_role_data()
req = {'role_id': role_data.role_id,
'role': {'name': role_data.role_name}}
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(),
status_code=200,
json={'roles': [role_data.json_response['role']]}),
dict(method='PATCH',
uri=self.get_mock_url(),
status_code=200,
json=role_data.json_response,
validate=dict(json=req))
])
role = self.op_cloud.update_role(role_data.role_id,
role_data.role_name)
self.assertIsNotNone(role)
self.assertThat(role.name, matchers.Equals(role_data.role_name))
self.assertThat(role.id, matchers.Equals(role_data.role_id))
self.assert_calls()
def test_delete_role_by_id(self):
role_data = self._get_role_data()
self.register_uris([

View File

@ -341,7 +341,7 @@ class TestImage(BaseTestImage):
dict(method='GET',
uri='https://image.example.com/v2/tasks/{id}'.format(
id=task_id),
json={'images': args}),
json=args),
dict(method='GET', uri='https://image.example.com/v2/images',
json={'images': [image_no_checksums]}),
dict(method='PATCH',

View File

@ -292,7 +292,8 @@ class TestVolume(base.RequestsMockTestCase):
uri=self.get_mock_url(
'volumev2', 'public',
append=['volumes', volume.id, 'action']),
json={'os-force_delete': None}),
validate=dict(
json={'os-force_delete': None})),
dict(method='GET',
uri=self.get_mock_url(
'volumev2', 'public', append=['volumes', 'detail']),
@ -300,6 +301,42 @@ class TestVolume(base.RequestsMockTestCase):
self.assertTrue(self.cloud.delete_volume(volume['id'], force=True))
self.assert_calls()
def test_set_volume_bootable(self):
vol = {'id': 'volume001', 'status': 'attached',
'name': '', 'attachments': []}
volume = meta.obj_to_munch(fakes.FakeVolume(**vol))
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
'volumev2', 'public', append=['volumes', 'detail']),
json={'volumes': [volume]}),
dict(method='POST',
uri=self.get_mock_url(
'volumev2', 'public',
append=['volumes', volume.id, 'action']),
json={'os-set_bootable': {'bootable': True}}),
])
self.cloud.set_volume_bootable(volume['id'])
self.assert_calls()
def test_set_volume_bootable_false(self):
vol = {'id': 'volume001', 'status': 'attached',
'name': '', 'attachments': []}
volume = meta.obj_to_munch(fakes.FakeVolume(**vol))
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
'volumev2', 'public', append=['volumes', 'detail']),
json={'volumes': [volume]}),
dict(method='POST',
uri=self.get_mock_url(
'volumev2', 'public',
append=['volumes', volume.id, 'action']),
json={'os-set_bootable': {'bootable': False}}),
])
self.cloud.set_volume_bootable(volume['id'])
self.assert_calls()
def test_list_volumes_with_pagination(self):
vol1 = meta.obj_to_munch(fakes.FakeVolume('01', 'available', 'vol1'))
vol2 = meta.obj_to_munch(fakes.FakeVolume('02', 'available', 'vol2'))
@ -448,3 +485,53 @@ class TestVolume(base.RequestsMockTestCase):
self.cloud._normalize_volume(vol1),
self.cloud.get_volume_by_id('01'))
self.assert_calls()
def test_create_volume(self):
vol1 = meta.obj_to_munch(fakes.FakeVolume('01', 'available', 'vol1'))
self.register_uris([
dict(method='POST',
uri=self.get_mock_url(
'volumev2', 'public', append=['volumes']),
json={'volume': vol1},
validate=dict(json={
'volume': {
'size': 50,
'name': 'vol1',
}})),
dict(method='GET',
uri=self.get_mock_url(
'volumev2', 'public',
append=['volumes', 'detail']),
json={'volumes': [vol1]}),
])
self.cloud.create_volume(50, name='vol1')
self.assert_calls()
def test_create_bootable_volume(self):
vol1 = meta.obj_to_munch(fakes.FakeVolume('01', 'available', 'vol1'))
self.register_uris([
dict(method='POST',
uri=self.get_mock_url(
'volumev2', 'public', append=['volumes']),
json={'volume': vol1},
validate=dict(json={
'volume': {
'size': 50,
'name': 'vol1',
}})),
dict(method='GET',
uri=self.get_mock_url(
'volumev2', 'public',
append=['volumes', 'detail']),
json={'volumes': [vol1]}),
dict(method='POST',
uri=self.get_mock_url(
'volumev2', 'public',
append=['volumes', '01', 'action']),
validate=dict(
json={'os-set_bootable': {'bootable': True}})),
])
self.cloud.create_volume(50, name='vol1', bootable=True)
self.assert_calls()

View File

@ -0,0 +1,4 @@
---
features:
- Added ``group`` parameter to create_server to allow
booting a server into a specific server group.

View File

@ -0,0 +1,4 @@
---
features:
- Added a ``set_volume_bootable`` call to allow toggling the bootable state
of a volume.