Get rid of Nova DB access
Important features: - Nova client with microversion support is required. - Nova API server may not support microversion 2.3 (not tested). - Attaching volumes are not displayed in an instance bdm. Because Cinder volume doesn't yet contain attachment info (which contains a device name), and Nova instance volumes_attached doesn't contain a device name (mountpoint). But a bdm must contain it. Other features: - v2.3 is requested always - RunInstance result is formatted w/o extra v2.3 info, so it doesn't contain rootDeviceName and other such info - if rootDeviceName property is empty it's omitted instead of to report fake /dev/sda1 as Nova EC2 does it. This leads to omit rootDeviceType property in this case as well. Also the same is done for corresponding image's properties. - deleteOnTermination volume property is omitted for volume at all, and for instance bdm if Nova doesn't report it. Previously this volume property contained 'False' only. - DescribeVolumes isn't used in DescribeInstances (as opposed to DescribeNetworkInterfaces), because both methods require actual state of corresponding OS objects, so we prevent duplication of OS requests. - Not merged Nova client is used https://review.openstack.org/#/c/152569/ Also: - fix multi-run instances for EC2 Classic mode - safe getting of OS instance security groups - ec2context module alias is renamed to ec2_context, as it is in other code - fakes.CinderVolume is renamed to local standard OSVolume - fakes.OSInstance it transformed to be initialized from a dictionary, as it is for other fakes.OSXxx objects - fix code style Depends-On: Icf2b9739aaf87b4c9af13ad64a310081a68f776e Change-Id: Id65ea0f56ffd889286d5ca082e1daf2643205c52
This commit is contained in:
parent
5480e9b850
commit
8282371da4
@ -137,6 +137,8 @@ Instance related:
|
|||||||
- spotInstanceRequestId Instance property
|
- spotInstanceRequestId Instance property
|
||||||
- stateReason Instance property
|
- stateReason Instance property
|
||||||
- virtualizationType Instance property
|
- virtualizationType Instance property
|
||||||
|
- instanceInitiatedShutdownBehavior Instance attribute
|
||||||
|
- disableApiTermination Instance attribute
|
||||||
- attachTime EbsInstanceBlockDevice property
|
- attachTime EbsInstanceBlockDevice property
|
||||||
|
|
||||||
Network interface related:
|
Network interface related:
|
||||||
|
@ -190,7 +190,6 @@ function configure_ec2api {
|
|||||||
|
|
||||||
# configure the database.
|
# configure the database.
|
||||||
iniset $EC2API_CONF_FILE database connection `database_connection_url ec2api`
|
iniset $EC2API_CONF_FILE database connection `database_connection_url ec2api`
|
||||||
iniset $EC2API_CONF_FILE database connection_nova `database_connection_url nova`
|
|
||||||
|
|
||||||
configure_ec2api_networking
|
configure_ec2api_networking
|
||||||
|
|
||||||
|
@ -2,3 +2,15 @@
|
|||||||
|
|
||||||
# we have to add ec2-api to enabled services for screen_it to work
|
# we have to add ec2-api to enabled services for screen_it to work
|
||||||
enable_service ec2-api
|
enable_service ec2-api
|
||||||
|
|
||||||
|
# we have to use Nova client supported Nova microversions,
|
||||||
|
# but related changes are not done in the client release.
|
||||||
|
# So we temporary use a not commited patch
|
||||||
|
# https://review.openstack.org/#/c/152569/
|
||||||
|
LIBS_FROM_GIT=python-novaclient
|
||||||
|
# Since legal way to set git repository do not work for a plugin,
|
||||||
|
# we set internal DevStack's variables directly
|
||||||
|
# NOVACLIENT_REPO=https://review.openstack.org/openstack/python-novaclient
|
||||||
|
# NOVACLIENT_BRANCH=refs/changes/69/152569/14
|
||||||
|
GITREPO["python-novaclient"]=https://review.openstack.org/openstack/python-novaclient
|
||||||
|
GITBRANCH["python-novaclient"]=refs/changes/69/152569/14
|
||||||
|
@ -44,7 +44,7 @@ except ImportError:
|
|||||||
logger.info(_('glanceclient not available'))
|
logger.info(_('glanceclient not available'))
|
||||||
|
|
||||||
|
|
||||||
def nova(context, microversion=None):
|
def nova(context):
|
||||||
args = {
|
args = {
|
||||||
'project_id': context.project_id,
|
'project_id': context.project_id,
|
||||||
'auth_url': CONF.keystone_url,
|
'auth_url': CONF.keystone_url,
|
||||||
@ -53,7 +53,9 @@ def nova(context, microversion=None):
|
|||||||
'auth_token': context.auth_token,
|
'auth_token': context.auth_token,
|
||||||
'bypass_url': _url_for(context, service_type='computev21'),
|
'bypass_url': _url_for(context, service_type='computev21'),
|
||||||
}
|
}
|
||||||
return novaclient.Client(microversion or 2, **args)
|
# Nova API's 2.3 microversion provides additional EC2 complient instance
|
||||||
|
# attributes
|
||||||
|
return novaclient.Client(2.3, **args)
|
||||||
|
|
||||||
|
|
||||||
def neutron(context):
|
def neutron(context):
|
||||||
|
@ -100,16 +100,16 @@ IMAGE_TYPES = {'aki': 'kernel',
|
|||||||
def create_image(context, instance_id, name=None, description=None,
|
def create_image(context, instance_id, name=None, description=None,
|
||||||
no_reboot=False, block_device_mapping=None):
|
no_reboot=False, block_device_mapping=None):
|
||||||
instance = ec2utils.get_db_item(context, instance_id)
|
instance = ec2utils.get_db_item(context, instance_id)
|
||||||
nova = clients.nova(context)
|
|
||||||
os_instance = nova.servers.get(instance['os_id'])
|
|
||||||
|
|
||||||
if not instance_api._is_ebs_instance(context, os_instance):
|
if not instance_api._is_ebs_instance(context, instance['os_id']):
|
||||||
# TODO(ft): Change the error code and message with the real AWS ones
|
# TODO(ft): Change the error code and message with the real AWS ones
|
||||||
msg = _('The instance is not an EBS-backed instance.')
|
msg = _('The instance is not an EBS-backed instance.')
|
||||||
raise exception.InvalidParameterValue(value=instance_id,
|
raise exception.InvalidParameterValue(value=instance_id,
|
||||||
parameter='InstanceId',
|
parameter='InstanceId',
|
||||||
reason=msg)
|
reason=msg)
|
||||||
|
|
||||||
|
nova = clients.nova(context)
|
||||||
|
os_instance = nova.servers.get(instance['os_id'])
|
||||||
restart_instance = False
|
restart_instance = False
|
||||||
if not no_reboot and os_instance.status != 'SHUTOFF':
|
if not no_reboot and os_instance.status != 'SHUTOFF':
|
||||||
if os_instance.status != 'ACTIVE':
|
if os_instance.status != 'ACTIVE':
|
||||||
@ -333,11 +333,8 @@ def describe_image_attribute(context, image_id, attribute):
|
|||||||
|
|
||||||
# NOTE(ft): Openstack extension, AWS-incompability
|
# NOTE(ft): Openstack extension, AWS-incompability
|
||||||
def _root_device_name_attribute(os_image, result):
|
def _root_device_name_attribute(os_image, result):
|
||||||
_prop_root_dev_name = _block_device_properties_root_device_name
|
|
||||||
result['rootDeviceName'] = _prop_root_dev_name(os_image.properties)
|
|
||||||
if result['rootDeviceName'] is None:
|
|
||||||
result['rootDeviceName'] = (
|
result['rootDeviceName'] = (
|
||||||
instance_api._block_device_DEFAULT_ROOT_DEV_NAME)
|
_block_device_properties_root_device_name(os_image.properties))
|
||||||
|
|
||||||
supported_attributes = {
|
supported_attributes = {
|
||||||
'blockDeviceMapping': _block_device_mapping_attribute,
|
'blockDeviceMapping': _block_device_mapping_attribute,
|
||||||
@ -439,18 +436,18 @@ def _format_image(context, image, os_image, images_dict, ids_dict,
|
|||||||
|
|
||||||
_prepare_mappings(os_image)
|
_prepare_mappings(os_image)
|
||||||
properties = os_image.properties
|
properties = os_image.properties
|
||||||
ec2_image['rootDeviceName'] = (
|
root_device_name = _block_device_properties_root_device_name(properties)
|
||||||
_block_device_properties_root_device_name(properties) or
|
if root_device_name:
|
||||||
instance_api._block_device_DEFAULT_ROOT_DEV_NAME)
|
ec2_image['rootDeviceName'] = root_device_name
|
||||||
|
|
||||||
root_device_type = 'instance-store'
|
root_device_type = 'instance-store'
|
||||||
root_device_name = instance_api._block_device_strip_dev(
|
short_root_device_name = instance_api._block_device_strip_dev(
|
||||||
ec2_image['rootDeviceName'])
|
root_device_name)
|
||||||
for bdm in properties.get('block_device_mapping', []):
|
for bdm in properties.get('block_device_mapping', []):
|
||||||
if (('snapshot_id' in bdm or 'volume_id' in bdm) and
|
if (('snapshot_id' in bdm or 'volume_id' in bdm) and
|
||||||
not bdm.get('no_device') and
|
not bdm.get('no_device') and
|
||||||
(bdm.get('boot_index') == 0 or
|
(bdm.get('boot_index') == 0 or
|
||||||
root_device_name ==
|
short_root_device_name ==
|
||||||
instance_api._block_device_strip_dev(
|
instance_api._block_device_strip_dev(
|
||||||
bdm.get('device_name')))):
|
bdm.get('device_name')))):
|
||||||
root_device_type = 'ebs'
|
root_device_type = 'ebs'
|
||||||
@ -458,7 +455,7 @@ def _format_image(context, image, os_image, images_dict, ids_dict,
|
|||||||
ec2_image['rootDeviceType'] = root_device_type
|
ec2_image['rootDeviceType'] = root_device_type
|
||||||
|
|
||||||
_cloud_format_mappings(context, properties, ec2_image,
|
_cloud_format_mappings(context, properties, ec2_image,
|
||||||
ec2_image['rootDeviceName'], snapshot_ids)
|
root_device_name, snapshot_ids)
|
||||||
|
|
||||||
return ec2_image
|
return ec2_image
|
||||||
|
|
||||||
|
@ -29,10 +29,10 @@ from ec2api.api import common
|
|||||||
from ec2api.api import ec2utils
|
from ec2api.api import ec2utils
|
||||||
from ec2api.api import network_interface as network_interface_api
|
from ec2api.api import network_interface as network_interface_api
|
||||||
from ec2api.api import security_group as security_group_api
|
from ec2api.api import security_group as security_group_api
|
||||||
|
from ec2api import context as ec2_context
|
||||||
from ec2api.db import api as db_api
|
from ec2api.db import api as db_api
|
||||||
from ec2api import exception
|
from ec2api import exception
|
||||||
from ec2api.i18n import _
|
from ec2api.i18n import _
|
||||||
from ec2api import novadb
|
|
||||||
|
|
||||||
|
|
||||||
ec2_opts = [
|
ec2_opts = [
|
||||||
@ -169,11 +169,10 @@ class InstanceDescriber(common.TaggableItemsDescriber):
|
|||||||
self.obsolete_instances = []
|
self.obsolete_instances = []
|
||||||
|
|
||||||
def format(self, instance, os_instance):
|
def format(self, instance, os_instance):
|
||||||
novadb_instance = self.novadb_instances[os_instance.id]
|
|
||||||
formatted_instance = _format_instance(
|
formatted_instance = _format_instance(
|
||||||
self.context, instance, os_instance, novadb_instance,
|
self.context, instance, os_instance,
|
||||||
self.ec2_network_interfaces.get(instance['id']),
|
self.ec2_network_interfaces.get(instance['id']),
|
||||||
self.image_ids, self.volumes)
|
self.image_ids, self.volumes, self.os_volumes)
|
||||||
|
|
||||||
reservation_id = instance['reservation_id']
|
reservation_id = instance['reservation_id']
|
||||||
if reservation_id in self.reservations:
|
if reservation_id in self.reservations:
|
||||||
@ -184,8 +183,7 @@ class InstanceDescriber(common.TaggableItemsDescriber):
|
|||||||
self.reservations[reservation_id] = reservation
|
self.reservations[reservation_id] = reservation
|
||||||
if not instance['vpc_id']:
|
if not instance['vpc_id']:
|
||||||
self.reservation_os_groups[reservation_id] = (
|
self.reservation_os_groups[reservation_id] = (
|
||||||
os_instance.security_groups
|
getattr(os_instance, 'security_groups', []))
|
||||||
if hasattr(os_instance, 'security_groups') else [])
|
|
||||||
|
|
||||||
self.reservation_instances[
|
self.reservation_instances[
|
||||||
reservation['id']].append(formatted_instance)
|
reservation['id']].append(formatted_instance)
|
||||||
@ -207,22 +205,17 @@ class InstanceDescriber(common.TaggableItemsDescriber):
|
|||||||
return instances
|
return instances
|
||||||
|
|
||||||
def get_os_items(self):
|
def get_os_items(self):
|
||||||
self.novadb_instances = {}
|
self.os_volumes = _get_os_volumes(self.context)
|
||||||
return clients.nova(self.context).servers.list(
|
nova = clients.nova(ec2_context.get_os_admin_context())
|
||||||
# NOTE(ft): these filters are needed for metadata server
|
return nova.servers.list(
|
||||||
# which calls describe_instances with an admin account
|
search_opts={'all_tenants': True,
|
||||||
# (but project_id is substituted to an instance's one).
|
|
||||||
search_opts={'all_tenants': self.context.is_os_admin,
|
|
||||||
'project_id': self.context.project_id})
|
'project_id': self.context.project_id})
|
||||||
|
|
||||||
def auto_update_db(self, instance, os_instance):
|
def auto_update_db(self, instance, os_instance):
|
||||||
novadb_instance = novadb.instance_get_by_uuid(self.context,
|
|
||||||
os_instance.id)
|
|
||||||
self.novadb_instances[os_instance.id] = novadb_instance
|
|
||||||
if not instance:
|
if not instance:
|
||||||
instance = ec2utils.get_db_item_by_os_id(
|
instance = ec2utils.get_db_item_by_os_id(
|
||||||
self.context, 'i', os_instance.id,
|
self.context, 'i', os_instance.id,
|
||||||
novadb_instance=novadb_instance)
|
os_instance=os_instance)
|
||||||
return instance
|
return instance
|
||||||
|
|
||||||
def get_name(self, os_item):
|
def get_name(self, os_item):
|
||||||
@ -339,59 +332,47 @@ def get_console_output(context, instance_id):
|
|||||||
|
|
||||||
def describe_instance_attribute(context, instance_id, attribute):
|
def describe_instance_attribute(context, instance_id, attribute):
|
||||||
instance = ec2utils.get_db_item(context, instance_id)
|
instance = ec2utils.get_db_item(context, instance_id)
|
||||||
nova = clients.nova(context)
|
nova = clients.nova(ec2_context.get_os_admin_context())
|
||||||
os_instance = nova.servers.get(instance['os_id'])
|
os_instance = nova.servers.get(instance['os_id'])
|
||||||
novadb_instance = novadb.instance_get_by_uuid(context, os_instance.id)
|
|
||||||
|
|
||||||
def _format_attr_block_device_mapping(result):
|
def _format_attr_block_device_mapping(result):
|
||||||
root_device_name = _cloud_format_instance_root_device_name(
|
|
||||||
novadb_instance)
|
|
||||||
# TODO(ft): next call add 'rootDeviceType' to result,
|
# TODO(ft): next call add 'rootDeviceType' to result,
|
||||||
# but AWS doesn't. This is legacy behavior of Nova EC2
|
# but AWS doesn't. This is legacy behavior of Nova EC2
|
||||||
_cloud_format_instance_bdm(context, os_instance.id,
|
_cloud_format_instance_bdm(context, os_instance, result)
|
||||||
root_device_name, result)
|
|
||||||
|
|
||||||
def _format_attr_disable_api_termination(result):
|
|
||||||
result['disableApiTermination'] = {
|
|
||||||
'value': novadb_instance.get('disable_terminate', False)}
|
|
||||||
|
|
||||||
def _format_attr_group_set(result):
|
def _format_attr_group_set(result):
|
||||||
result['groupSet'] = _format_group_set(context,
|
result['groupSet'] = _format_group_set(
|
||||||
os_instance.security_groups)
|
context, getattr(os_instance, 'security_groups', []))
|
||||||
|
|
||||||
def _format_attr_instance_initiated_shutdown_behavior(result):
|
|
||||||
value = ('terminate' if novadb_instance.get('shutdown_terminate')
|
|
||||||
else 'stop')
|
|
||||||
result['instanceInitiatedShutdownBehavior'] = {'value': value}
|
|
||||||
|
|
||||||
def _format_attr_instance_type(result):
|
def _format_attr_instance_type(result):
|
||||||
result['instanceType'] = {'value': _cloud_format_instance_type(
|
result['instanceType'] = {'value': _cloud_format_instance_type(
|
||||||
context, os_instance)}
|
context, os_instance)}
|
||||||
|
|
||||||
def _format_attr_kernel(result):
|
def _format_attr_kernel(result):
|
||||||
value = _cloud_format_kernel_id(context, novadb_instance)
|
value = _cloud_format_kernel_id(context, os_instance)
|
||||||
result['kernel'] = {'value': value}
|
result['kernel'] = {'value': value}
|
||||||
|
|
||||||
def _format_attr_ramdisk(result):
|
def _format_attr_ramdisk(result):
|
||||||
value = _cloud_format_ramdisk_id(context, novadb_instance)
|
value = _cloud_format_ramdisk_id(context, os_instance)
|
||||||
result['ramdisk'] = {'value': value}
|
result['ramdisk'] = {'value': value}
|
||||||
|
|
||||||
def _format_attr_root_device_name(result):
|
def _format_attr_root_device_name(result):
|
||||||
result['rootDeviceName'] = {
|
result['rootDeviceName'] = {
|
||||||
'value': _cloud_format_instance_root_device_name(
|
'value': getattr(os_instance,
|
||||||
novadb_instance)}
|
'OS-EXT-SRV-ATTR:root_device_name', None)}
|
||||||
|
|
||||||
def _format_attr_user_data(result):
|
def _format_attr_user_data(result):
|
||||||
if novadb_instance['user_data']:
|
if not hasattr(os_instance, 'OS-EXT-SRV-ATTR:user_data'):
|
||||||
value = base64.b64decode(novadb_instance['user_data'])
|
# NOTE(ft): partial compatibility with pre Kilo OS releases
|
||||||
|
raise exception.InvalidAttribute(attr=attribute)
|
||||||
|
user_data = getattr(os_instance, 'OS-EXT-SRV-ATTR:user_data')
|
||||||
|
if user_data:
|
||||||
|
value = base64.b64decode(user_data)
|
||||||
result['userData'] = {'value': value}
|
result['userData'] = {'value': value}
|
||||||
|
|
||||||
attribute_formatter = {
|
attribute_formatter = {
|
||||||
'blockDeviceMapping': _format_attr_block_device_mapping,
|
'blockDeviceMapping': _format_attr_block_device_mapping,
|
||||||
'disableApiTermination': _format_attr_disable_api_termination,
|
|
||||||
'groupSet': _format_attr_group_set,
|
'groupSet': _format_attr_group_set,
|
||||||
'instanceInitiatedShutdownBehavior': (
|
|
||||||
_format_attr_instance_initiated_shutdown_behavior),
|
|
||||||
'instanceType': _format_attr_instance_type,
|
'instanceType': _format_attr_instance_type,
|
||||||
'kernel': _format_attr_kernel,
|
'kernel': _format_attr_kernel,
|
||||||
'ramdisk': _format_attr_ramdisk,
|
'ramdisk': _format_attr_ramdisk,
|
||||||
@ -415,13 +396,14 @@ def _get_idempotent_run(context, client_token):
|
|||||||
if i.get('client_token') == client_token)
|
if i.get('client_token') == client_token)
|
||||||
if not instances:
|
if not instances:
|
||||||
return
|
return
|
||||||
os_instances = _get_os_instances_by_instances(context, instances.values())
|
nova = clients.nova(ec2_context.get_os_admin_context())
|
||||||
|
os_instances = _get_os_instances_by_instances(context, instances.values(),
|
||||||
|
nova=nova)
|
||||||
instances_info = []
|
instances_info = []
|
||||||
instance_ids = []
|
instance_ids = []
|
||||||
for os_instance in os_instances:
|
for os_instance in os_instances:
|
||||||
instance = instances[os_instance.id]
|
instance = instances[os_instance.id]
|
||||||
novadb_instance = novadb.instance_get_by_uuid(context, os_instance.id)
|
instances_info.append((instance, os_instance,))
|
||||||
instances_info.append((instance, os_instance, novadb_instance,))
|
|
||||||
instance_ids.append(instance['id'])
|
instance_ids.append(instance['id'])
|
||||||
if not instances_info:
|
if not instances_info:
|
||||||
return
|
return
|
||||||
@ -446,9 +428,9 @@ def _format_reservation_body(context, reservation, formatted_instances,
|
|||||||
def _format_reservation(context, reservation_id, instances_info,
|
def _format_reservation(context, reservation_id, instances_info,
|
||||||
ec2_network_interfaces, image_ids={}):
|
ec2_network_interfaces, image_ids={}):
|
||||||
formatted_instances = []
|
formatted_instances = []
|
||||||
for (instance, os_instance, novadb_instance) in instances_info:
|
for (instance, os_instance) in instances_info:
|
||||||
ec2_instance = _format_instance(
|
ec2_instance = _format_instance(
|
||||||
context, instance, os_instance, novadb_instance,
|
context, instance, os_instance,
|
||||||
ec2_network_interfaces.get(instance['id']), image_ids)
|
ec2_network_interfaces.get(instance['id']), image_ids)
|
||||||
formatted_instances.append(ec2_instance)
|
formatted_instances.append(ec2_instance)
|
||||||
|
|
||||||
@ -456,11 +438,12 @@ def _format_reservation(context, reservation_id, instances_info,
|
|||||||
'owner_id': os_instance.tenant_id}
|
'owner_id': os_instance.tenant_id}
|
||||||
return _format_reservation_body(
|
return _format_reservation_body(
|
||||||
context, reservation, formatted_instances,
|
context, reservation, formatted_instances,
|
||||||
None if instance['vpc_id'] else os_instance.security_groups)
|
(None if instance['vpc_id'] else
|
||||||
|
getattr(os_instance, 'security_groups', [])))
|
||||||
|
|
||||||
|
|
||||||
def _format_instance(context, instance, os_instance, novadb_instance,
|
def _format_instance(context, instance, os_instance, ec2_network_interfaces,
|
||||||
ec2_network_interfaces, image_ids, volumes=None):
|
image_ids, volumes=None, os_volumes=None):
|
||||||
ec2_instance = {
|
ec2_instance = {
|
||||||
'amiLaunchIndex': instance['launch_index'],
|
'amiLaunchIndex': instance['launch_index'],
|
||||||
'imageId': (ec2utils.os_id_to_ec2_id(context, 'ami',
|
'imageId': (ec2utils.os_id_to_ec2_id(context, 'ami',
|
||||||
@ -477,16 +460,17 @@ def _format_instance(context, instance, os_instance, novadb_instance,
|
|||||||
'productCodesSet': None,
|
'productCodesSet': None,
|
||||||
'instanceState': _cloud_state_description(
|
'instanceState': _cloud_state_description(
|
||||||
getattr(os_instance, 'OS-EXT-STS:vm_state')),
|
getattr(os_instance, 'OS-EXT-STS:vm_state')),
|
||||||
'rootDeviceName': _cloud_format_instance_root_device_name(
|
|
||||||
novadb_instance),
|
|
||||||
}
|
}
|
||||||
_cloud_format_instance_bdm(context, instance['os_id'],
|
root_device_name = getattr(os_instance,
|
||||||
ec2_instance['rootDeviceName'], ec2_instance,
|
'OS-EXT-SRV-ATTR:root_device_name', None)
|
||||||
volumes)
|
if root_device_name:
|
||||||
kernel_id = _cloud_format_kernel_id(context, novadb_instance, image_ids)
|
ec2_instance['rootDeviceName'] = root_device_name
|
||||||
|
_cloud_format_instance_bdm(context, os_instance, ec2_instance,
|
||||||
|
volumes, os_volumes)
|
||||||
|
kernel_id = _cloud_format_kernel_id(context, os_instance, image_ids)
|
||||||
if kernel_id:
|
if kernel_id:
|
||||||
ec2_instance['kernelId'] = kernel_id
|
ec2_instance['kernelId'] = kernel_id
|
||||||
ramdisk_id = _cloud_format_ramdisk_id(context, novadb_instance, image_ids)
|
ramdisk_id = _cloud_format_ramdisk_id(context, os_instance, image_ids)
|
||||||
if ramdisk_id:
|
if ramdisk_id:
|
||||||
ec2_instance['ramdiskId'] = ramdisk_id
|
ec2_instance['ramdiskId'] = ramdisk_id
|
||||||
|
|
||||||
@ -526,7 +510,8 @@ def _format_instance(context, instance, os_instance, novadb_instance,
|
|||||||
ec2_instance.update({
|
ec2_instance.update({
|
||||||
'privateIpAddress': fixed_ip,
|
'privateIpAddress': fixed_ip,
|
||||||
'privateDnsName': (fixed_ip if CONF.ec2_private_dns_show_ip else
|
'privateDnsName': (fixed_ip if CONF.ec2_private_dns_show_ip else
|
||||||
novadb_instance['hostname']),
|
getattr(os_instance, 'OS-EXT-SRV-ATTR:hostname',
|
||||||
|
None)),
|
||||||
'dnsName': dns_name,
|
'dnsName': dns_name,
|
||||||
})
|
})
|
||||||
if floating_ip is not None:
|
if floating_ip is not None:
|
||||||
@ -666,8 +651,9 @@ def _foreach_instance(context, instance_ids, valid_states, func):
|
|||||||
return True
|
return True
|
||||||
|
|
||||||
|
|
||||||
def _get_os_instances_by_instances(context, instances, exactly=False):
|
def _get_os_instances_by_instances(context, instances, exactly=False,
|
||||||
nova = clients.nova(context)
|
nova=None):
|
||||||
|
nova = nova or clients.nova(context)
|
||||||
os_instances = []
|
os_instances = []
|
||||||
obsolete_instances = []
|
obsolete_instances = []
|
||||||
for instance in instances:
|
for instance in instances:
|
||||||
@ -684,21 +670,35 @@ def _get_os_instances_by_instances(context, instances, exactly=False):
|
|||||||
return os_instances
|
return os_instances
|
||||||
|
|
||||||
|
|
||||||
def _is_ebs_instance(context, os_instance):
|
def _get_os_volumes(context):
|
||||||
novadb_instance = novadb.instance_get_by_uuid(context, os_instance.id)
|
search_opts = ({'all_tenants': True,
|
||||||
root_device_name = _cloud_format_instance_root_device_name(novadb_instance)
|
'project_id': context.project_id}
|
||||||
|
if context.is_os_admin else None)
|
||||||
|
os_volumes = collections.defaultdict(list)
|
||||||
|
cinder = clients.cinder(context)
|
||||||
|
for os_volume in cinder.volumes.list(search_opts=search_opts):
|
||||||
|
os_attachment = next(iter(os_volume.attachments), {})
|
||||||
|
os_instance_id = os_attachment.get('server_id')
|
||||||
|
if os_instance_id:
|
||||||
|
os_volumes[os_instance_id].append(os_volume)
|
||||||
|
return os_volumes
|
||||||
|
|
||||||
|
|
||||||
|
def _is_ebs_instance(context, os_instance_id):
|
||||||
|
nova = clients.nova(ec2_context.get_os_admin_context())
|
||||||
|
os_instance = nova.servers.get(os_instance_id)
|
||||||
|
root_device_name = getattr(os_instance,
|
||||||
|
'OS-EXT-SRV-ATTR:root_device_name', None)
|
||||||
|
if not root_device_name:
|
||||||
|
return False
|
||||||
root_device_short_name = _block_device_strip_dev(root_device_name)
|
root_device_short_name = _block_device_strip_dev(root_device_name)
|
||||||
if root_device_name == root_device_short_name:
|
if root_device_name == root_device_short_name:
|
||||||
root_device_name = _block_device_prepend_dev(root_device_name)
|
root_device_name = _block_device_prepend_dev(root_device_name)
|
||||||
for bdm in novadb.block_device_mapping_get_all_by_instance(context,
|
for os_volume in _get_os_volumes(context)[os_instance_id]:
|
||||||
os_instance.id):
|
os_attachment = next(iter(os_volume.attachments), {})
|
||||||
volume_id = bdm['volume_id']
|
device_name = os_attachment.get('device')
|
||||||
if (volume_id is None or bdm['no_device']):
|
if (device_name == root_device_name or
|
||||||
continue
|
device_name == root_device_short_name):
|
||||||
|
|
||||||
if ((bdm['snapshot_id'] or bdm['volume_id']) and
|
|
||||||
(bdm['device_name'] == root_device_name or
|
|
||||||
bdm['device_name'] == root_device_short_name)):
|
|
||||||
return True
|
return True
|
||||||
return False
|
return False
|
||||||
|
|
||||||
@ -829,14 +829,16 @@ class InstanceEngineNeutron(object):
|
|||||||
network_interface_api._detach_network_interface_item,
|
network_interface_api._detach_network_interface_item,
|
||||||
context, data['network_interface'])
|
context, data['network_interface'])
|
||||||
|
|
||||||
novadb_instance = novadb.instance_get_by_uuid(context,
|
instances_info.append((instance, os_instance))
|
||||||
os_instance.id)
|
|
||||||
instances_info.append((instance, os_instance, novadb_instance))
|
|
||||||
|
|
||||||
# NOTE(ft): we don't reuse network interface objects received from
|
# NOTE(ft): we don't reuse network interface objects received from
|
||||||
# create_network_interfaces because they don't contain attachment info
|
# create_network_interfaces because they don't contain attachment info
|
||||||
ec2_network_interfaces = (self.get_ec2_network_interfaces(
|
ec2_network_interfaces = (self.get_ec2_network_interfaces(
|
||||||
context, instance_ids=instance_ids))
|
context, instance_ids=instance_ids))
|
||||||
|
# NOTE(ft): since os_instance is created with regular Nova client,
|
||||||
|
# it doesn't contain enough info to get an instance in EC2 format
|
||||||
|
# completely, nevertheless we use it to get rid of additional requests
|
||||||
|
# and reduce code complexity
|
||||||
return _format_reservation(context, ec2_reservation_id, instances_info,
|
return _format_reservation(context, ec2_reservation_id, instances_info,
|
||||||
ec2_network_interfaces,
|
ec2_network_interfaces,
|
||||||
image_ids={os_image.id: image_id})
|
image_ids={os_image.id: image_id})
|
||||||
@ -1104,7 +1106,7 @@ class InstanceEngineNova(object):
|
|||||||
os_instance = nova.servers.create(
|
os_instance = nova.servers.create(
|
||||||
'%s-%s' % (ec2_reservation_id, index),
|
'%s-%s' % (ec2_reservation_id, index),
|
||||||
os_image.id, os_flavor,
|
os_image.id, os_flavor,
|
||||||
min_count=min_count, max_count=max_count,
|
min_count=1, max_count=1,
|
||||||
kernel_id=os_kernel_id, ramdisk_id=os_ramdisk_id,
|
kernel_id=os_kernel_id, ramdisk_id=os_ramdisk_id,
|
||||||
availability_zone=(
|
availability_zone=(
|
||||||
placement or {}).get('availability_zone'),
|
placement or {}).get('availability_zone'),
|
||||||
@ -1122,11 +1124,12 @@ class InstanceEngineNova(object):
|
|||||||
cleaner.addCleanup(db_api.delete_item, context, instance['id'])
|
cleaner.addCleanup(db_api.delete_item, context, instance['id'])
|
||||||
|
|
||||||
nova.servers.update(os_instance, name=instance['id'])
|
nova.servers.update(os_instance, name=instance['id'])
|
||||||
|
instances_info.append((instance, os_instance))
|
||||||
|
|
||||||
novadb_instance = novadb.instance_get_by_uuid(context,
|
# NOTE(ft): since os_instance is created with regular Nova client,
|
||||||
os_instance.id)
|
# it doesn't contain enough info to get an instance in EC2 format
|
||||||
instances_info.append((instance, os_instance, novadb_instance))
|
# completely, nevertheless we use it to get rid of additional requests
|
||||||
|
# and reduce code complexity
|
||||||
return _format_reservation(context, ec2_reservation_id, instances_info,
|
return _format_reservation(context, ec2_reservation_id, instances_info,
|
||||||
{}, image_ids={os_image.id: image_id})
|
{}, image_ids={os_image.id: image_id})
|
||||||
|
|
||||||
@ -1137,12 +1140,19 @@ class InstanceEngineNova(object):
|
|||||||
instance_engine = get_instance_engine()
|
instance_engine = get_instance_engine()
|
||||||
|
|
||||||
|
|
||||||
def _auto_create_instance_extension(context, instance, novadb_instance=None):
|
def _auto_create_instance_extension(context, instance, os_instance=None):
|
||||||
if not novadb_instance:
|
if not os_instance:
|
||||||
novadb_instance = novadb.instance_get_by_uuid(context,
|
nova = clients.nova(ec2_context.get_os_admin_context())
|
||||||
instance['os_id'])
|
os_instance = nova.servers.get(instance['os_id'])
|
||||||
instance['reservation_id'] = novadb_instance['reservation_id']
|
if hasattr(os_instance, 'OS-EXT-SRV-ATTR:reservation_id'):
|
||||||
instance['launch_index'] = novadb_instance['launch_index']
|
instance['reservation_id'] = getattr(os_instance,
|
||||||
|
'OS-EXT-SRV-ATTR:reservation_id')
|
||||||
|
instance['launch_index'] = getattr(os_instance,
|
||||||
|
'OS-EXT-SRV-ATTR:launch_index')
|
||||||
|
else:
|
||||||
|
# NOTE(ft): partial compatibility with pre Kilo OS releases
|
||||||
|
instance['reservation_id'] = _generate_reservation_id()
|
||||||
|
instance['launch_index'] = 0
|
||||||
|
|
||||||
|
|
||||||
ec2utils.register_auto_create_db_item_extension(
|
ec2utils.register_auto_create_db_item_extension(
|
||||||
@ -1188,7 +1198,7 @@ def _cloud_get_image_state(image):
|
|||||||
|
|
||||||
|
|
||||||
def _cloud_format_kernel_id(context, os_instance, image_ids=None):
|
def _cloud_format_kernel_id(context, os_instance, image_ids=None):
|
||||||
os_kernel_id = os_instance['kernel_id']
|
os_kernel_id = getattr(os_instance, 'OS-EXT-SRV-ATTR:kernel_id', None)
|
||||||
if os_kernel_id is None or os_kernel_id == '':
|
if os_kernel_id is None or os_kernel_id == '':
|
||||||
return
|
return
|
||||||
return ec2utils.os_id_to_ec2_id(context, 'aki', os_kernel_id,
|
return ec2utils.os_id_to_ec2_id(context, 'aki', os_kernel_id,
|
||||||
@ -1196,7 +1206,7 @@ def _cloud_format_kernel_id(context, os_instance, image_ids=None):
|
|||||||
|
|
||||||
|
|
||||||
def _cloud_format_ramdisk_id(context, os_instance, image_ids=None):
|
def _cloud_format_ramdisk_id(context, os_instance, image_ids=None):
|
||||||
os_ramdisk_id = os_instance['ramdisk_id']
|
os_ramdisk_id = getattr(os_instance, 'OS-EXT-SRV-ATTR:ramdisk_id', None)
|
||||||
if os_ramdisk_id is None or os_ramdisk_id == '':
|
if os_ramdisk_id is None or os_ramdisk_id == '':
|
||||||
return
|
return
|
||||||
return ec2utils.os_id_to_ec2_id(context, 'ari', os_ramdisk_id,
|
return ec2utils.os_id_to_ec2_id(context, 'ari', os_ramdisk_id,
|
||||||
@ -1208,11 +1218,6 @@ def _cloud_format_instance_type(context, os_instance):
|
|||||||
return clients.nova(context).flavors.get(os_instance.flavor['id']).name
|
return clients.nova(context).flavors.get(os_instance.flavor['id']).name
|
||||||
|
|
||||||
|
|
||||||
def _cloud_format_instance_root_device_name(novadb_instance):
|
|
||||||
return (novadb_instance.get('root_device_name') or
|
|
||||||
_block_device_DEFAULT_ROOT_DEV_NAME)
|
|
||||||
|
|
||||||
|
|
||||||
def _cloud_state_description(vm_state):
|
def _cloud_state_description(vm_state):
|
||||||
"""Map the vm state to the server status string."""
|
"""Map the vm state to the server status string."""
|
||||||
# Note(maoy): We do not provide EC2 compatibility
|
# Note(maoy): We do not provide EC2 compatibility
|
||||||
@ -1224,39 +1229,52 @@ def _cloud_state_description(vm_state):
|
|||||||
'name': name}
|
'name': name}
|
||||||
|
|
||||||
|
|
||||||
def _cloud_format_instance_bdm(context, instance_uuid, root_device_name,
|
def _cloud_format_instance_bdm(context, os_instance, result,
|
||||||
result, volumes=None):
|
volumes=None, os_volumes=None):
|
||||||
"""Format InstanceBlockDeviceMappingResponseItemType."""
|
"""Format InstanceBlockDeviceMappingResponseItemType."""
|
||||||
|
root_device_name = getattr(os_instance,
|
||||||
|
'OS-EXT-SRV-ATTR:root_device_name', None)
|
||||||
|
if not root_device_name:
|
||||||
|
root_device_short_name = root_device_type = None
|
||||||
|
else:
|
||||||
root_device_type = 'instance-store'
|
root_device_type = 'instance-store'
|
||||||
root_device_short_name = _block_device_strip_dev(root_device_name)
|
root_device_short_name = _block_device_strip_dev(root_device_name)
|
||||||
if root_device_name == root_device_short_name:
|
if root_device_name == root_device_short_name:
|
||||||
root_device_name = _block_device_prepend_dev(root_device_name)
|
root_device_name = _block_device_prepend_dev(root_device_name)
|
||||||
cinder = clients.cinder(context)
|
|
||||||
mapping = []
|
mapping = []
|
||||||
for bdm in novadb.block_device_mapping_get_all_by_instance(context,
|
if os_volumes is None:
|
||||||
instance_uuid):
|
os_volumes = _get_os_volumes(context)
|
||||||
volume_id = bdm['volume_id']
|
# NOTE(ft): Attaching volumes are not reported, because Cinder
|
||||||
if (volume_id is None or bdm['no_device']):
|
# volume doesn't yet contain attachment info at this stage, but Nova v2.3
|
||||||
|
# instance volumes_attached doesn't contain a device name.
|
||||||
|
# But a bdm must contain the last one.
|
||||||
|
volumes_attached = getattr(os_instance,
|
||||||
|
'os-extended-volumes:volumes_attached', [])
|
||||||
|
for os_volume in os_volumes[os_instance.id]:
|
||||||
|
os_attachment = next(iter(os_volume.attachments), {})
|
||||||
|
device_name = os_attachment.get('device')
|
||||||
|
if not device_name:
|
||||||
continue
|
continue
|
||||||
|
if (device_name == root_device_name or
|
||||||
if ((bdm['snapshot_id'] or bdm['volume_id']) and
|
device_name == root_device_short_name):
|
||||||
(bdm['device_name'] == root_device_name or
|
|
||||||
bdm['device_name'] == root_device_short_name)):
|
|
||||||
root_device_type = 'ebs'
|
root_device_type = 'ebs'
|
||||||
|
|
||||||
vol = cinder.volumes.get(volume_id)
|
volume = ec2utils.get_db_item_by_os_id(context, 'vol', os_volume.id,
|
||||||
volume = ec2utils.get_db_item_by_os_id(context, 'vol', volume_id,
|
|
||||||
volumes)
|
volumes)
|
||||||
# TODO(yamahata): volume attach time
|
# TODO(yamahata): volume attach time
|
||||||
ebs = {'volumeId': volume['id'],
|
ebs = {'volumeId': volume['id'],
|
||||||
'deleteOnTermination': bdm['delete_on_termination'],
|
'status': _cloud_get_volume_attach_status(os_volume)}
|
||||||
'status': _cloud_get_volume_attach_status(vol), }
|
volume_attached = next((va for va in volumes_attached
|
||||||
res = {'deviceName': bdm['device_name'],
|
if va['id'] == os_volume.id), None)
|
||||||
'ebs': ebs, }
|
if volume_attached:
|
||||||
mapping.append(res)
|
ebs['deleteOnTermination'] = (
|
||||||
|
volume_attached['delete_on_termination'])
|
||||||
|
mapping.append({'deviceName': device_name,
|
||||||
|
'ebs': ebs})
|
||||||
|
|
||||||
if mapping:
|
if mapping:
|
||||||
result['blockDeviceMapping'] = mapping
|
result['blockDeviceMapping'] = mapping
|
||||||
|
if root_device_type:
|
||||||
result['rootDeviceType'] = root_device_type
|
result['rootDeviceType'] = root_device_type
|
||||||
|
|
||||||
|
|
||||||
@ -1282,9 +1300,6 @@ def _block_device_prepend_dev(device_name):
|
|||||||
return device_name and '/dev/' + _block_device_strip_dev(device_name)
|
return device_name and '/dev/' + _block_device_strip_dev(device_name)
|
||||||
|
|
||||||
|
|
||||||
_block_device_DEFAULT_ROOT_DEV_NAME = '/dev/sda1'
|
|
||||||
|
|
||||||
|
|
||||||
def _utils_generate_uid(topic, size=8):
|
def _utils_generate_uid(topic, size=8):
|
||||||
characters = '01234567890abcdefghijklmnopqrstuvwxyz'
|
characters = '01234567890abcdefghijklmnopqrstuvwxyz'
|
||||||
choices = [random.choice(characters) for _x in xrange(size)]
|
choices = [random.choice(characters) for _x in xrange(size)]
|
||||||
|
@ -67,7 +67,7 @@ def attach_volume(context, volume_id, instance_id, device):
|
|||||||
cinder = clients.cinder(context)
|
cinder = clients.cinder(context)
|
||||||
os_volume = cinder.volumes.get(volume['os_id'])
|
os_volume = cinder.volumes.get(volume['os_id'])
|
||||||
return _format_attachment(context, volume, os_volume,
|
return _format_attachment(context, volume, os_volume,
|
||||||
instance_id=instance_id, short=True)
|
instance_id=instance_id)
|
||||||
|
|
||||||
|
|
||||||
def detach_volume(context, volume_id, instance_id=None, device=None,
|
def detach_volume(context, volume_id, instance_id=None, device=None,
|
||||||
@ -88,7 +88,7 @@ def detach_volume(context, volume_id, instance_id=None, device=None,
|
|||||||
instance_id = next((i['id'] for i in db_api.get_items(context, 'i')
|
instance_id = next((i['id'] for i in db_api.get_items(context, 'i')
|
||||||
if i['os_id'] == os_instance_id), None)
|
if i['os_id'] == os_instance_id), None)
|
||||||
return _format_attachment(context, volume, os_volume,
|
return _format_attachment(context, volume, os_volume,
|
||||||
instance_id=instance_id, short=True)
|
instance_id=instance_id)
|
||||||
|
|
||||||
|
|
||||||
def delete_volume(context, volume_id):
|
def delete_volume(context, volume_id):
|
||||||
@ -172,7 +172,7 @@ def _format_volume(context, volume, os_volume, instances={},
|
|||||||
|
|
||||||
|
|
||||||
def _format_attachment(context, volume, os_volume, instances={},
|
def _format_attachment(context, volume, os_volume, instances={},
|
||||||
instance_id=None, short=False):
|
instance_id=None):
|
||||||
os_attachment = next(iter(os_volume.attachments), {})
|
os_attachment = next(iter(os_volume.attachments), {})
|
||||||
os_instance_id = os_attachment.get('server_id')
|
os_instance_id = os_attachment.get('server_id')
|
||||||
if not instance_id and os_instance_id:
|
if not instance_id and os_instance_id:
|
||||||
@ -186,6 +186,4 @@ def _format_attachment(context, volume, os_volume, instances={},
|
|||||||
if os_volume.status in ('attaching', 'detaching') else
|
if os_volume.status in ('attaching', 'detaching') else
|
||||||
'attached' if os_attachment else 'detached'),
|
'attached' if os_attachment else 'detached'),
|
||||||
'volumeId': volume['id']}
|
'volumeId': volume['id']}
|
||||||
if not short:
|
|
||||||
ec2_attachment['deleteOnTermination'] = False
|
|
||||||
return ec2_attachment
|
return ec2_attachment
|
||||||
|
@ -158,6 +158,9 @@ def is_user_context(context):
|
|||||||
|
|
||||||
def get_os_admin_context():
|
def get_os_admin_context():
|
||||||
"""Create a context to interact with OpenStack as an administrator."""
|
"""Create a context to interact with OpenStack as an administrator."""
|
||||||
|
if (getattr(local.store, 'context', None) and
|
||||||
|
local.store.context.is_os_admin):
|
||||||
|
return local.store.context
|
||||||
# TODO(ft): make an authentification token reusable
|
# TODO(ft): make an authentification token reusable
|
||||||
keystone = keystone_client.Client(
|
keystone = keystone_client.Client(
|
||||||
username=CONF.admin_user,
|
username=CONF.admin_user,
|
||||||
|
@ -408,7 +408,3 @@ class InvalidFilter(Invalid):
|
|||||||
class RulesPerSecurityGroupLimitExceeded(Overlimit):
|
class RulesPerSecurityGroupLimitExceeded(Overlimit):
|
||||||
msg_fmt = _("You've reached the limit on the number of rules that "
|
msg_fmt = _("You've reached the limit on the number of rules that "
|
||||||
"you can add to a security group.")
|
"you can add to a security group.")
|
||||||
|
|
||||||
|
|
||||||
class NovaDbInstanceNotFound(EC2Exception):
|
|
||||||
code = 500
|
|
||||||
|
@ -23,7 +23,7 @@ from oslo_log import log as logging
|
|||||||
import six
|
import six
|
||||||
import webob
|
import webob
|
||||||
|
|
||||||
from ec2api import context as ec2context
|
from ec2api import context as ec2_context
|
||||||
from ec2api import exception
|
from ec2api import exception
|
||||||
from ec2api.i18n import _, _LE, _LW
|
from ec2api.i18n import _, _LE, _LW
|
||||||
from ec2api.metadata import api
|
from ec2api.metadata import api
|
||||||
@ -154,7 +154,7 @@ class MetadataRequestHandler(wsgi.Application):
|
|||||||
return req.headers
|
return req.headers
|
||||||
|
|
||||||
remote_ip = self._get_remote_ip(req)
|
remote_ip = self._get_remote_ip(req)
|
||||||
context = ec2context.get_os_admin_context()
|
context = ec2_context.get_os_admin_context()
|
||||||
instance_id, project_id = (
|
instance_id, project_id = (
|
||||||
api.get_os_instance_and_project_id(context, remote_ip))
|
api.get_os_instance_and_project_id(context, remote_ip))
|
||||||
return {
|
return {
|
||||||
@ -178,7 +178,7 @@ class MetadataRequestHandler(wsgi.Application):
|
|||||||
hashlib.sha256).hexdigest()
|
hashlib.sha256).hexdigest()
|
||||||
|
|
||||||
def _get_metadata(self, req, path_tokens):
|
def _get_metadata(self, req, path_tokens):
|
||||||
context = ec2context.get_os_admin_context()
|
context = ec2_context.get_os_admin_context()
|
||||||
if req.headers.get('X-Instance-ID'):
|
if req.headers.get('X-Instance-ID'):
|
||||||
os_instance_id, project_id, remote_ip = (
|
os_instance_id, project_id, remote_ip = (
|
||||||
self._unpack_request_attributes(req))
|
self._unpack_request_attributes(req))
|
||||||
|
@ -22,7 +22,6 @@ from ec2api.api import ec2utils
|
|||||||
from ec2api.api import instance as instance_api
|
from ec2api.api import instance as instance_api
|
||||||
from ec2api import exception
|
from ec2api import exception
|
||||||
from ec2api.i18n import _
|
from ec2api.i18n import _
|
||||||
from ec2api.novadb import api as novadb
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
@ -180,10 +179,10 @@ def _build_metadata(context, ec2_instance, ec2_reservation,
|
|||||||
# meta-data/public-keys/0/ : 'openssh-key'
|
# meta-data/public-keys/0/ : 'openssh-key'
|
||||||
# meta-data/public-keys/0/openssh-key : '%s' % publickey
|
# meta-data/public-keys/0/openssh-key : '%s' % publickey
|
||||||
if ec2_instance['keyName']:
|
if ec2_instance['keyName']:
|
||||||
novadb_instance = novadb.instance_get_by_uuid(context, os_instance_id)
|
keypair = clients.nova(context).keypairs.get(ec2_instance['keyName'])
|
||||||
metadata['public-keys'] = {
|
metadata['public-keys'] = {
|
||||||
'0': {'_name': "0=" + ec2_instance['keyName'],
|
'0': {'_name': "0=" + keypair.name,
|
||||||
'openssh-key': novadb_instance['key_data']}}
|
'openssh-key': keypair.public_key}}
|
||||||
|
|
||||||
full_metadata = {'meta-data': metadata}
|
full_metadata = {'meta-data': metadata}
|
||||||
|
|
||||||
@ -210,21 +209,7 @@ def _build_block_device_mappings(context, ec2_instance, os_instance_id):
|
|||||||
for num, ebs in enumerate(ebs_devices))
|
for num, ebs in enumerate(ebs_devices))
|
||||||
mappings.update(ebs_devices)
|
mappings.update(ebs_devices)
|
||||||
|
|
||||||
bdms = novadb.block_device_mapping_get_all_by_instance(context,
|
# TODO(ft): extend Nova API to get ephemerals and swap
|
||||||
os_instance_id)
|
|
||||||
ephemerals = dict(('ephemeral%d' % num, eph['device_name'])
|
|
||||||
for num, eph in enumerate(
|
|
||||||
eph for eph in bdms
|
|
||||||
if (eph['source_type'] == 'blank' and
|
|
||||||
eph['guest_format'] != 'swap')))
|
|
||||||
mappings.update(ephemerals)
|
|
||||||
|
|
||||||
swap = next((swap['device_name'] for swap in bdms
|
|
||||||
if (swap['source_type'] == 'blank' and
|
|
||||||
swap['guest_format'] == 'swap')), None)
|
|
||||||
if swap:
|
|
||||||
mappings['swap'] = swap
|
|
||||||
|
|
||||||
return mappings
|
return mappings
|
||||||
|
|
||||||
|
|
||||||
|
@ -1,19 +0,0 @@
|
|||||||
# Copyright 2014
|
|
||||||
# The Cloudscaling Group, Inc.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
"""
|
|
||||||
DB abstraction for Nova
|
|
||||||
"""
|
|
||||||
|
|
||||||
from ec2api.novadb.api import * # noqa
|
|
@ -1,88 +0,0 @@
|
|||||||
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
|
|
||||||
# Copyright 2010 United States Government as represented by the
|
|
||||||
# Administrator of the National Aeronautics and Space Administration.
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
"""Defines interface for DB access.
|
|
||||||
|
|
||||||
Functions in this module are imported into the ec2api.novadb namespace.
|
|
||||||
Call these functions from c2api.novadb namespace, not the c2api.novadb.api
|
|
||||||
namespace.
|
|
||||||
|
|
||||||
All functions in this module return objects that implement a dictionary-like
|
|
||||||
interface. Currently, many of these objects are sqlalchemy objects that
|
|
||||||
implement a dictionary interface. However, a future goal is to have all of
|
|
||||||
these objects be simple dictionaries.
|
|
||||||
|
|
||||||
"""
|
|
||||||
|
|
||||||
from eventlet import tpool
|
|
||||||
from oslo_config import cfg
|
|
||||||
from oslo_db import api as db_api
|
|
||||||
from oslo_log import log as logging
|
|
||||||
|
|
||||||
|
|
||||||
CONF = cfg.CONF
|
|
||||||
CONF.import_opt('use_tpool', 'ec2api.db.api',
|
|
||||||
group='database')
|
|
||||||
|
|
||||||
_BACKEND_MAPPING = {'sqlalchemy': 'ec2api.novadb.sqlalchemy.api'}
|
|
||||||
|
|
||||||
|
|
||||||
class NovaDBAPI(object):
|
|
||||||
"""Nova's DB API wrapper class.
|
|
||||||
|
|
||||||
This wraps the oslo DB API with an option to be able to use eventlet's
|
|
||||||
thread pooling. Since the CONF variable may not be loaded at the time
|
|
||||||
this class is instantiated, we must look at it on the first DB API call.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self):
|
|
||||||
self.__db_api = None
|
|
||||||
|
|
||||||
@property
|
|
||||||
def _db_api(self):
|
|
||||||
if not self.__db_api:
|
|
||||||
nova_db_api = db_api.DBAPI(CONF.database.backend,
|
|
||||||
backend_mapping=_BACKEND_MAPPING)
|
|
||||||
if CONF.database.use_tpool:
|
|
||||||
self.__db_api = tpool.Proxy(nova_db_api)
|
|
||||||
else:
|
|
||||||
self.__db_api = nova_db_api
|
|
||||||
return self.__db_api
|
|
||||||
|
|
||||||
def __getattr__(self, key):
|
|
||||||
return getattr(self._db_api, key)
|
|
||||||
|
|
||||||
|
|
||||||
IMPL = NovaDBAPI()
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
# The maximum value a signed INT type may have
|
|
||||||
MAX_INT = 0x7FFFFFFF
|
|
||||||
|
|
||||||
####################
|
|
||||||
|
|
||||||
|
|
||||||
def instance_get_by_uuid(context, uuid, columns_to_join=None):
|
|
||||||
"""Get an instance or raise if it does not exist."""
|
|
||||||
return IMPL.instance_get_by_uuid(context, uuid, columns_to_join)
|
|
||||||
|
|
||||||
|
|
||||||
def block_device_mapping_get_all_by_instance(context, instance_uuid):
|
|
||||||
"""Get all block device mapping belonging to an instance."""
|
|
||||||
return IMPL.block_device_mapping_get_all_by_instance(context,
|
|
||||||
instance_uuid)
|
|
@ -1,23 +0,0 @@
|
|||||||
# Copyright 2010 United States Government as represented by the
|
|
||||||
# Administrator of the National Aeronautics and Space Administration.
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
from sqlalchemy import BigInteger
|
|
||||||
from sqlalchemy.ext.compiler import compiles
|
|
||||||
|
|
||||||
|
|
||||||
@compiles(BigInteger, 'sqlite')
|
|
||||||
def compile_big_int_sqlite(type_, compiler, **kw):
|
|
||||||
return 'INTEGER'
|
|
@ -1,191 +0,0 @@
|
|||||||
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
|
|
||||||
# Copyright 2010 United States Government as represented by the
|
|
||||||
# Administrator of the National Aeronautics and Space Administration.
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
"""Implementation of SQLAlchemy backend."""
|
|
||||||
|
|
||||||
import functools
|
|
||||||
import sys
|
|
||||||
|
|
||||||
from oslo_config import cfg
|
|
||||||
from oslo_db.sqlalchemy import session as db_session
|
|
||||||
from oslo_log import log as logging
|
|
||||||
from sqlalchemy import or_
|
|
||||||
|
|
||||||
import ec2api.context
|
|
||||||
from ec2api import exception
|
|
||||||
from ec2api.i18n import _
|
|
||||||
from ec2api.novadb.sqlalchemy import models
|
|
||||||
|
|
||||||
connection_opts = [
|
|
||||||
cfg.StrOpt('connection_nova',
|
|
||||||
secret=True,
|
|
||||||
help='The SQLAlchemy connection string used to connect to the '
|
|
||||||
'nova database'),
|
|
||||||
]
|
|
||||||
|
|
||||||
CONF = cfg.CONF
|
|
||||||
CONF.register_opts(connection_opts, group='database')
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
_MASTER_FACADE = None
|
|
||||||
|
|
||||||
|
|
||||||
def _create_facade_lazily():
|
|
||||||
global _MASTER_FACADE
|
|
||||||
|
|
||||||
if _MASTER_FACADE is None:
|
|
||||||
_MASTER_FACADE = db_session.EngineFacade(
|
|
||||||
CONF.database.connection_nova,
|
|
||||||
**dict(CONF.database.iteritems())
|
|
||||||
)
|
|
||||||
return _MASTER_FACADE
|
|
||||||
|
|
||||||
|
|
||||||
def get_engine():
|
|
||||||
facade = _create_facade_lazily()
|
|
||||||
return facade.get_engine()
|
|
||||||
|
|
||||||
|
|
||||||
def get_session(**kwargs):
|
|
||||||
facade = _create_facade_lazily()
|
|
||||||
return facade.get_session(**kwargs)
|
|
||||||
|
|
||||||
|
|
||||||
def get_backend():
|
|
||||||
"""The backend is this module itself."""
|
|
||||||
return sys.modules[__name__]
|
|
||||||
|
|
||||||
|
|
||||||
def require_context(f):
|
|
||||||
"""Decorator to require *any* user or admin context.
|
|
||||||
|
|
||||||
This does no authorization for user or project access matching, see
|
|
||||||
:py:func:`ec2api.context.authorize_project_context` and
|
|
||||||
:py:func:`ec2api.context.authorize_user_context`.
|
|
||||||
|
|
||||||
The first argument to the wrapped function must be the context.
|
|
||||||
|
|
||||||
"""
|
|
||||||
|
|
||||||
@functools.wraps(f)
|
|
||||||
def wrapper(*args, **kwargs):
|
|
||||||
ec2api.context.require_context(args[0])
|
|
||||||
return f(*args, **kwargs)
|
|
||||||
return wrapper
|
|
||||||
|
|
||||||
|
|
||||||
def model_query(context, model, *args, **kwargs):
|
|
||||||
"""Query helper that accounts for context's `read_deleted` field.
|
|
||||||
|
|
||||||
:param context: context to query under
|
|
||||||
:param session: if present, the session to use
|
|
||||||
:param read_deleted: if present, overrides context's read_deleted field.
|
|
||||||
:param project_only: if present and context is user-type, then restrict
|
|
||||||
query to match the context's project_id. If set to 'allow_none',
|
|
||||||
restriction includes project_id = None.
|
|
||||||
:param base_model: Where model_query is passed a "model" parameter which is
|
|
||||||
not a subclass of NovaBase, we should pass an extra base_model
|
|
||||||
parameter that is a subclass of NovaBase and corresponds to the
|
|
||||||
model parameter.
|
|
||||||
"""
|
|
||||||
|
|
||||||
session = kwargs.get('session') or get_session()
|
|
||||||
read_deleted = kwargs.get('read_deleted') or context.read_deleted
|
|
||||||
project_only = kwargs.get('project_only', False)
|
|
||||||
|
|
||||||
def issubclassof_nova_base(obj):
|
|
||||||
return isinstance(obj, type) and issubclass(obj, models.NovaBase)
|
|
||||||
|
|
||||||
base_model = model
|
|
||||||
if not issubclassof_nova_base(base_model):
|
|
||||||
base_model = kwargs.get('base_model', None)
|
|
||||||
if not issubclassof_nova_base(base_model):
|
|
||||||
raise Exception(_("model or base_model parameter should be "
|
|
||||||
"subclass of NovaBase"))
|
|
||||||
|
|
||||||
query = session.query(model, *args)
|
|
||||||
|
|
||||||
default_deleted_value = base_model.__mapper__.c.deleted.default.arg
|
|
||||||
if read_deleted == 'no':
|
|
||||||
query = query.filter(base_model.deleted == default_deleted_value)
|
|
||||||
elif read_deleted == 'yes':
|
|
||||||
pass # omit the filter to include deleted and active
|
|
||||||
elif read_deleted == 'only':
|
|
||||||
query = query.filter(base_model.deleted != default_deleted_value)
|
|
||||||
else:
|
|
||||||
raise Exception(_("Unrecognized read_deleted value '%s'")
|
|
||||||
% read_deleted)
|
|
||||||
|
|
||||||
if ec2api.context.is_user_context(context) and project_only:
|
|
||||||
if project_only == 'allow_none':
|
|
||||||
query = (query.
|
|
||||||
filter(or_(base_model.project_id == context.project_id,
|
|
||||||
base_model.project_id == None)))
|
|
||||||
else:
|
|
||||||
query = query.filter_by(project_id=context.project_id)
|
|
||||||
|
|
||||||
return query
|
|
||||||
|
|
||||||
|
|
||||||
####################
|
|
||||||
|
|
||||||
|
|
||||||
@require_context
|
|
||||||
def instance_get_by_uuid(context, uuid, columns_to_join=None):
|
|
||||||
return _instance_get_by_uuid(context, uuid,
|
|
||||||
columns_to_join=columns_to_join)
|
|
||||||
|
|
||||||
|
|
||||||
def _instance_get_by_uuid(context, uuid, session=None,
|
|
||||||
columns_to_join=None):
|
|
||||||
result = (_build_instance_get(context, session=session,
|
|
||||||
columns_to_join=columns_to_join).
|
|
||||||
filter_by(uuid=uuid).
|
|
||||||
first())
|
|
||||||
|
|
||||||
if not result:
|
|
||||||
LOG.error("Instance %s could not be found in nova DB" % str(uuid))
|
|
||||||
raise exception.NovaDbInstanceNotFound()
|
|
||||||
|
|
||||||
return result
|
|
||||||
|
|
||||||
|
|
||||||
def _build_instance_get(context, session=None,
|
|
||||||
columns_to_join=None):
|
|
||||||
query = model_query(context, models.Instance, session=session,
|
|
||||||
project_only=True, read_deleted="no")
|
|
||||||
return query
|
|
||||||
|
|
||||||
|
|
||||||
def _block_device_mapping_get_query(context, session=None,
|
|
||||||
columns_to_join=None):
|
|
||||||
if columns_to_join is None:
|
|
||||||
columns_to_join = []
|
|
||||||
|
|
||||||
query = model_query(context, models.BlockDeviceMapping,
|
|
||||||
session=session, read_deleted="no")
|
|
||||||
|
|
||||||
return query
|
|
||||||
|
|
||||||
|
|
||||||
@require_context
|
|
||||||
def block_device_mapping_get_all_by_instance(context, instance_uuid):
|
|
||||||
return (_block_device_mapping_get_query(context).
|
|
||||||
filter_by(instance_uuid=instance_uuid).
|
|
||||||
all())
|
|
@ -1,244 +0,0 @@
|
|||||||
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
|
|
||||||
# Copyright 2010 United States Government as represented by the
|
|
||||||
# Administrator of the National Aeronautics and Space Administration.
|
|
||||||
# Copyright 2011 Piston Cloud Computing, Inc.
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
"""
|
|
||||||
SQLAlchemy models for nova data.
|
|
||||||
"""
|
|
||||||
|
|
||||||
from oslo_config import cfg
|
|
||||||
from oslo_db.sqlalchemy import models
|
|
||||||
from sqlalchemy import Column, Index, Integer, Enum, String
|
|
||||||
from sqlalchemy.dialects.mysql import MEDIUMTEXT
|
|
||||||
from sqlalchemy.ext.declarative import declarative_base
|
|
||||||
from sqlalchemy import DateTime, Boolean, Text
|
|
||||||
from sqlalchemy.orm import object_mapper
|
|
||||||
|
|
||||||
from ec2api.novadb.sqlalchemy import types
|
|
||||||
|
|
||||||
CONF = cfg.CONF
|
|
||||||
BASE = declarative_base()
|
|
||||||
|
|
||||||
|
|
||||||
def MediumText():
|
|
||||||
return Text().with_variant(MEDIUMTEXT(), 'mysql')
|
|
||||||
|
|
||||||
|
|
||||||
class NovaBase(models.SoftDeleteMixin,
|
|
||||||
models.TimestampMixin,
|
|
||||||
models.ModelBase):
|
|
||||||
metadata = None
|
|
||||||
|
|
||||||
def save(self, session=None):
|
|
||||||
from ec2api.novadb.sqlalchemy import api
|
|
||||||
|
|
||||||
if session is None:
|
|
||||||
session = api.get_session()
|
|
||||||
|
|
||||||
super(NovaBase, self).save(session=session)
|
|
||||||
|
|
||||||
|
|
||||||
class Instance(BASE, NovaBase):
|
|
||||||
"""Represents a guest VM."""
|
|
||||||
__tablename__ = 'instances'
|
|
||||||
__table_args__ = (
|
|
||||||
Index('uuid', 'uuid', unique=True),
|
|
||||||
Index('project_id', 'project_id'),
|
|
||||||
Index('instances_host_deleted_idx',
|
|
||||||
'host', 'deleted'),
|
|
||||||
Index('instances_reservation_id_idx',
|
|
||||||
'reservation_id'),
|
|
||||||
Index('instances_terminated_at_launched_at_idx',
|
|
||||||
'terminated_at', 'launched_at'),
|
|
||||||
Index('instances_uuid_deleted_idx',
|
|
||||||
'uuid', 'deleted'),
|
|
||||||
Index('instances_task_state_updated_at_idx',
|
|
||||||
'task_state', 'updated_at'),
|
|
||||||
Index('instances_host_node_deleted_idx',
|
|
||||||
'host', 'node', 'deleted'),
|
|
||||||
Index('instances_host_deleted_cleaned_idx',
|
|
||||||
'host', 'deleted', 'cleaned'),
|
|
||||||
)
|
|
||||||
injected_files = []
|
|
||||||
|
|
||||||
id = Column(Integer, primary_key=True, autoincrement=True)
|
|
||||||
|
|
||||||
@property
|
|
||||||
def name(self):
|
|
||||||
try:
|
|
||||||
base_name = CONF.instance_name_template % self.id
|
|
||||||
except TypeError:
|
|
||||||
# Support templates like "uuid-%(uuid)s", etc.
|
|
||||||
info = {}
|
|
||||||
# NOTE(russellb): Don't use self.iteritems() here, as it will
|
|
||||||
# result in infinite recursion on the name property.
|
|
||||||
for column in iter(object_mapper(self).columns):
|
|
||||||
key = column.name
|
|
||||||
# prevent recursion if someone specifies %(name)s
|
|
||||||
# %(name)s will not be valid.
|
|
||||||
if key == 'name':
|
|
||||||
continue
|
|
||||||
info[key] = self[key]
|
|
||||||
try:
|
|
||||||
base_name = CONF.instance_name_template % info
|
|
||||||
except KeyError:
|
|
||||||
base_name = self.uuid
|
|
||||||
return base_name
|
|
||||||
|
|
||||||
@property
|
|
||||||
def _extra_keys(self):
|
|
||||||
return ['name']
|
|
||||||
|
|
||||||
user_id = Column(String(255))
|
|
||||||
project_id = Column(String(255))
|
|
||||||
|
|
||||||
image_ref = Column(String(255))
|
|
||||||
kernel_id = Column(String(255))
|
|
||||||
ramdisk_id = Column(String(255))
|
|
||||||
hostname = Column(String(255))
|
|
||||||
|
|
||||||
launch_index = Column(Integer)
|
|
||||||
key_name = Column(String(255))
|
|
||||||
key_data = Column(MediumText())
|
|
||||||
|
|
||||||
power_state = Column(Integer)
|
|
||||||
vm_state = Column(String(255))
|
|
||||||
task_state = Column(String(255))
|
|
||||||
|
|
||||||
memory_mb = Column(Integer)
|
|
||||||
vcpus = Column(Integer)
|
|
||||||
root_gb = Column(Integer)
|
|
||||||
ephemeral_gb = Column(Integer)
|
|
||||||
ephemeral_key_uuid = Column(String(36))
|
|
||||||
|
|
||||||
# This is not related to hostname, above. It refers
|
|
||||||
# to the nova node.
|
|
||||||
host = Column(String(255)) # , ForeignKey('hosts.id'))
|
|
||||||
# To identify the "ComputeNode" which the instance resides in.
|
|
||||||
# This equals to ComputeNode.hypervisor_hostname.
|
|
||||||
node = Column(String(255))
|
|
||||||
|
|
||||||
# *not* flavorid, this is the internal primary_key
|
|
||||||
instance_type_id = Column(Integer)
|
|
||||||
|
|
||||||
user_data = Column(MediumText())
|
|
||||||
|
|
||||||
reservation_id = Column(String(255))
|
|
||||||
|
|
||||||
scheduled_at = Column(DateTime)
|
|
||||||
launched_at = Column(DateTime)
|
|
||||||
terminated_at = Column(DateTime)
|
|
||||||
|
|
||||||
availability_zone = Column(String(255))
|
|
||||||
|
|
||||||
# User editable field for display in user-facing UIs
|
|
||||||
display_name = Column(String(255))
|
|
||||||
display_description = Column(String(255))
|
|
||||||
|
|
||||||
# To remember on which host an instance booted.
|
|
||||||
# An instance may have moved to another host by live migration.
|
|
||||||
launched_on = Column(MediumText())
|
|
||||||
|
|
||||||
# NOTE(jdillaman): locked deprecated in favor of locked_by,
|
|
||||||
# to be removed in Icehouse
|
|
||||||
locked = Column(Boolean)
|
|
||||||
locked_by = Column(Enum('owner', 'admin'))
|
|
||||||
|
|
||||||
os_type = Column(String(255))
|
|
||||||
architecture = Column(String(255))
|
|
||||||
vm_mode = Column(String(255))
|
|
||||||
uuid = Column(String(36))
|
|
||||||
|
|
||||||
root_device_name = Column(String(255))
|
|
||||||
default_ephemeral_device = Column(String(255))
|
|
||||||
default_swap_device = Column(String(255))
|
|
||||||
config_drive = Column(String(255))
|
|
||||||
|
|
||||||
# User editable field meant to represent what ip should be used
|
|
||||||
# to connect to the instance
|
|
||||||
access_ip_v4 = Column(types.IPAddress())
|
|
||||||
access_ip_v6 = Column(types.IPAddress())
|
|
||||||
|
|
||||||
auto_disk_config = Column(Boolean())
|
|
||||||
progress = Column(Integer)
|
|
||||||
|
|
||||||
# EC2 instance_initiated_shutdown_terminate
|
|
||||||
# True: -> 'terminate'
|
|
||||||
# False: -> 'stop'
|
|
||||||
# Note(maoy): currently Nova will always stop instead of terminate
|
|
||||||
# no matter what the flag says. So we set the default to False.
|
|
||||||
shutdown_terminate = Column(Boolean(), default=False)
|
|
||||||
|
|
||||||
# EC2 disable_api_termination
|
|
||||||
disable_terminate = Column(Boolean(), default=False)
|
|
||||||
|
|
||||||
# OpenStack compute cell name. This will only be set at the top of
|
|
||||||
# the cells tree and it'll be a full cell name such as 'api!hop1!hop2'
|
|
||||||
cell_name = Column(String(255))
|
|
||||||
internal_id = Column(Integer)
|
|
||||||
|
|
||||||
# Records whether an instance has been deleted from disk
|
|
||||||
cleaned = Column(Integer, default=0)
|
|
||||||
|
|
||||||
|
|
||||||
class BlockDeviceMapping(BASE, NovaBase):
|
|
||||||
"""Represents block device mapping that is defined by EC2."""
|
|
||||||
__tablename__ = "block_device_mapping"
|
|
||||||
__table_args__ = (
|
|
||||||
Index('snapshot_id', 'snapshot_id'),
|
|
||||||
Index('volume_id', 'volume_id'),
|
|
||||||
Index('block_device_mapping_instance_uuid_device_name_idx',
|
|
||||||
'instance_uuid', 'device_name'),
|
|
||||||
Index('block_device_mapping_instance_uuid_volume_id_idx',
|
|
||||||
'instance_uuid', 'volume_id'),
|
|
||||||
Index('block_device_mapping_instance_uuid_idx', 'instance_uuid'),
|
|
||||||
# TODO(sshturm) Should be dropped. `virtual_name` was dropped
|
|
||||||
# in 186 migration,
|
|
||||||
# Duplicates `block_device_mapping_instance_uuid_device_name_idx`index.
|
|
||||||
Index("block_device_mapping_instance_uuid_virtual_name"
|
|
||||||
"_device_name_idx", 'instance_uuid', 'device_name'),
|
|
||||||
)
|
|
||||||
id = Column(Integer, primary_key=True, autoincrement=True)
|
|
||||||
|
|
||||||
instance_uuid = Column(String(36))
|
|
||||||
source_type = Column(String(255))
|
|
||||||
destination_type = Column(String(255))
|
|
||||||
guest_format = Column(String(255))
|
|
||||||
device_type = Column(String(255))
|
|
||||||
disk_bus = Column(String(255))
|
|
||||||
|
|
||||||
boot_index = Column(Integer)
|
|
||||||
|
|
||||||
device_name = Column(String(255))
|
|
||||||
|
|
||||||
# default=False for compatibility of the existing code.
|
|
||||||
# With EC2 API,
|
|
||||||
# default True for ami specified device.
|
|
||||||
# default False for created with other timing.
|
|
||||||
# TODO(sshturm) add default in db
|
|
||||||
delete_on_termination = Column(Boolean, default=False)
|
|
||||||
|
|
||||||
snapshot_id = Column(String(36))
|
|
||||||
|
|
||||||
volume_id = Column(String(36))
|
|
||||||
volume_size = Column(Integer)
|
|
||||||
|
|
||||||
image_id = Column(String(36))
|
|
||||||
|
|
||||||
# for no device to suppress devices.
|
|
||||||
no_device = Column(Boolean)
|
|
||||||
|
|
||||||
connection_info = Column(MediumText())
|
|
@ -1,31 +0,0 @@
|
|||||||
# Copyright 2011 OpenStack Foundation
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
"""Custom SQLAlchemy types."""
|
|
||||||
|
|
||||||
from sqlalchemy.dialects import postgresql
|
|
||||||
from sqlalchemy import types
|
|
||||||
|
|
||||||
|
|
||||||
class IPAddress(types.TypeDecorator):
|
|
||||||
"""An SQLAlchemy type representing an IP-address."""
|
|
||||||
|
|
||||||
impl = types.String
|
|
||||||
|
|
||||||
def load_dialect_impl(self, dialect):
|
|
||||||
if dialect.name == 'postgresql':
|
|
||||||
return dialect.type_descriptor(postgresql.INET())
|
|
||||||
else:
|
|
||||||
return dialect.type_descriptor(types.String(39))
|
|
@ -96,7 +96,8 @@ class SubnetTest(base.EC2TestCase):
|
|||||||
self.client.DeleteVpc(VpcId=vpc_id)
|
self.client.DeleteVpc(VpcId=vpc_id)
|
||||||
self.cancelResourceCleanUp(vpc_clean)
|
self.cancelResourceCleanUp(vpc_clean)
|
||||||
|
|
||||||
@testtools.skipUnless(CONF.aws.run_incompatible_tests,
|
@testtools.skipUnless(
|
||||||
|
CONF.aws.run_incompatible_tests,
|
||||||
"bug with overlapped subnets")
|
"bug with overlapped subnets")
|
||||||
def test_create_overlapped_subnet(self):
|
def test_create_overlapped_subnet(self):
|
||||||
cidr = '10.2.0.0/24'
|
cidr = '10.2.0.0/24'
|
||||||
|
@ -205,6 +205,7 @@ class VolumeTest(base.EC2TestCase):
|
|||||||
self.assertEqual('in-use', volume['State'])
|
self.assertEqual('in-use', volume['State'])
|
||||||
self.assertEqual(1, len(volume['Attachments']))
|
self.assertEqual(1, len(volume['Attachments']))
|
||||||
attachment = volume['Attachments'][0]
|
attachment = volume['Attachments'][0]
|
||||||
|
if CONF.aws.run_incompatible_tests:
|
||||||
self.assertFalse(attachment['DeleteOnTermination'])
|
self.assertFalse(attachment['DeleteOnTermination'])
|
||||||
self.assertIsNotNone(attachment['Device'])
|
self.assertIsNotNone(attachment['Device'])
|
||||||
self.assertEqual(instance_id, attachment['InstanceId'])
|
self.assertEqual(instance_id, attachment['InstanceId'])
|
||||||
@ -272,6 +273,7 @@ class VolumeTest(base.EC2TestCase):
|
|||||||
VolumeId=volume_id)
|
VolumeId=volume_id)
|
||||||
self.assertEqual('attaching', data['State'])
|
self.assertEqual('attaching', data['State'])
|
||||||
|
|
||||||
|
if CONF.aws.run_incompatible_tests:
|
||||||
bdt = self.get_instance_bdm(instance_id, '/dev/vdh')
|
bdt = self.get_instance_bdm(instance_id, '/dev/vdh')
|
||||||
self.assertIsNotNone(bdt)
|
self.assertIsNotNone(bdt)
|
||||||
self.assertEqual('attaching', bdt['Ebs']['Status'])
|
self.assertEqual('attaching', bdt['Ebs']['Status'])
|
||||||
|
@ -375,12 +375,12 @@ class EC2_EBSInstanceSnapshot(base.EC2TestCase):
|
|||||||
ImageId=self.image_id, InstanceType=instance_type,
|
ImageId=self.image_id, InstanceType=instance_type,
|
||||||
Placement={'AvailabilityZone': self.zone}, MinCount=1, MaxCount=1)
|
Placement={'AvailabilityZone': self.zone}, MinCount=1, MaxCount=1)
|
||||||
self.assertEqual(200, resp.status_code, base.EC2ErrorConverter(data))
|
self.assertEqual(200, resp.status_code, base.EC2ErrorConverter(data))
|
||||||
instance = data['Instances'][0]
|
|
||||||
instance_id = data['Instances'][0]['InstanceId']
|
instance_id = data['Instances'][0]['InstanceId']
|
||||||
res_clean = self.addResourceCleanUp(self.client.TerminateInstances,
|
res_clean = self.addResourceCleanUp(self.client.TerminateInstances,
|
||||||
InstanceIds=[instance_id])
|
InstanceIds=[instance_id])
|
||||||
self.get_instance_waiter().wait_available(instance_id,
|
self.get_instance_waiter().wait_available(instance_id,
|
||||||
final_set=('running'))
|
final_set=('running'))
|
||||||
|
instance = self.get_instance(instance_id)
|
||||||
|
|
||||||
bdt = self.get_instance_bdm(instance_id, None)
|
bdt = self.get_instance_bdm(instance_id, None)
|
||||||
self.assertIsNotNone(bdt)
|
self.assertIsNotNone(bdt)
|
||||||
|
@ -40,6 +40,7 @@ def skip_not_implemented(test_item):
|
|||||||
class ApiTestCase(test_base.BaseTestCase):
|
class ApiTestCase(test_base.BaseTestCase):
|
||||||
|
|
||||||
ANY_EXECUTE_ERROR = object()
|
ANY_EXECUTE_ERROR = object()
|
||||||
|
NOVACLIENT_SPEC_OBJ = novaclient.Client('2')
|
||||||
|
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
super(ApiTestCase, self).setUp()
|
super(ApiTestCase, self).setUp()
|
||||||
@ -50,8 +51,9 @@ class ApiTestCase(test_base.BaseTestCase):
|
|||||||
self.addCleanup(neutron_patcher.stop)
|
self.addCleanup(neutron_patcher.stop)
|
||||||
|
|
||||||
nova_patcher = mock.patch('novaclient.client.Client')
|
nova_patcher = mock.patch('novaclient.client.Client')
|
||||||
self.nova = mock.create_autospec(novaclient.Client('2'))
|
self.nova = mock.create_autospec(self.NOVACLIENT_SPEC_OBJ)
|
||||||
nova_patcher.start().return_value = self.nova
|
self.novaclient_getter = nova_patcher.start()
|
||||||
|
self.novaclient_getter.return_value = self.nova
|
||||||
self.addCleanup(nova_patcher.stop)
|
self.addCleanup(nova_patcher.stop)
|
||||||
|
|
||||||
glance_patcher = mock.patch('glanceclient.client.Client')
|
glance_patcher = mock.patch('glanceclient.client.Client')
|
||||||
@ -79,7 +81,8 @@ class ApiTestCase(test_base.BaseTestCase):
|
|||||||
|
|
||||||
def execute(self, action, args):
|
def execute(self, action, args):
|
||||||
status_code, response = self._execute(action, args)
|
status_code, response = self._execute(action, args)
|
||||||
self.assertEqual(200, status_code)
|
self.assertEqual(200, status_code,
|
||||||
|
self._format_error_message(status_code, response))
|
||||||
return response
|
return response
|
||||||
|
|
||||||
def assert_execution_error(self, error_code, action, args):
|
def assert_execution_error(self, error_code, action, args):
|
||||||
@ -88,7 +91,8 @@ class ApiTestCase(test_base.BaseTestCase):
|
|||||||
self.assertLessEqual(400, status_code)
|
self.assertLessEqual(400, status_code)
|
||||||
else:
|
else:
|
||||||
self.assertEqual(400, status_code)
|
self.assertEqual(400, status_code)
|
||||||
self.assertEqual(error_code, response['Error']['Code'])
|
self.assertEqual(error_code, response['Error']['Code'],
|
||||||
|
self._format_error_message(status_code, response))
|
||||||
|
|
||||||
def assert_any_call(self, func, *args, **kwargs):
|
def assert_any_call(self, func, *args, **kwargs):
|
||||||
calls = func.mock_calls
|
calls = func.mock_calls
|
||||||
@ -181,9 +185,10 @@ class ApiTestCase(test_base.BaseTestCase):
|
|||||||
('tag-value', 'fake_value'),
|
('tag-value', 'fake_value'),
|
||||||
('tag:fake_key', 'fake_value')])
|
('tag:fake_key', 'fake_value')])
|
||||||
|
|
||||||
def _create_context(self):
|
def _create_context(self, auth_token=None):
|
||||||
return ec2api.context.RequestContext(
|
return ec2api.context.RequestContext(
|
||||||
fakes.ID_OS_USER, fakes.ID_OS_PROJECT,
|
fakes.ID_OS_USER, fakes.ID_OS_PROJECT,
|
||||||
|
auth_token=auth_token,
|
||||||
service_catalog=[{'type': 'network',
|
service_catalog=[{'type': 'network',
|
||||||
'endpoints': [{'publicUrl': 'fake_url'}]}])
|
'endpoints': [{'publicUrl': 'fake_url'}]}])
|
||||||
|
|
||||||
@ -218,3 +223,10 @@ class ApiTestCase(test_base.BaseTestCase):
|
|||||||
self.assertIn('Error', body)
|
self.assertIn('Error', body)
|
||||||
self.assertEqual(2, len(body['Error']))
|
self.assertEqual(2, len(body['Error']))
|
||||||
return body
|
return body
|
||||||
|
|
||||||
|
def _format_error_message(self, status_code, response):
|
||||||
|
if status_code >= 400:
|
||||||
|
return '%s: %s' % (response['Error']['Code'],
|
||||||
|
response['Error']['Message'])
|
||||||
|
else:
|
||||||
|
return ''
|
||||||
|
@ -238,8 +238,10 @@ FINGERPRINT_KEY_PAIR = (
|
|||||||
# [<subtype>]<object_name>
|
# [<subtype>]<object_name>
|
||||||
# where
|
# where
|
||||||
# subtype - type of object storage, is not used for DB objects
|
# subtype - type of object storage, is not used for DB objects
|
||||||
|
# DB - object is stored in ec2api DB
|
||||||
# EC2 - object representation to end user
|
# EC2 - object representation to end user
|
||||||
# OS - object is stored in OpenStack
|
# OS - object is stored in OpenStack
|
||||||
|
# NOVA - object is stored in Nova (for EC2 Classic mode only)
|
||||||
# object_name - identifies the object
|
# object_name - identifies the object
|
||||||
|
|
||||||
# vpc objects
|
# vpc objects
|
||||||
@ -456,54 +458,6 @@ DB_INSTANCE_2 = {
|
|||||||
'client_token': CLIENT_TOKEN_INSTANCE_2,
|
'client_token': CLIENT_TOKEN_INSTANCE_2,
|
||||||
}
|
}
|
||||||
|
|
||||||
NOVADB_INSTANCE_1 = {
|
|
||||||
'reservation_id': random_ec2_id('r'),
|
|
||||||
'launch_index': 0,
|
|
||||||
'kernel_id': ID_OS_IMAGE_AKI_1,
|
|
||||||
'ramdisk_id': ID_OS_IMAGE_ARI_1,
|
|
||||||
'root_device_name': ROOT_DEVICE_NAME_INSTANCE_1,
|
|
||||||
'hostname': '%s-%s' % (ID_EC2_RESERVATION_1, 0),
|
|
||||||
'key_data': PUBLIC_KEY_KEY_PAIR,
|
|
||||||
'user_data': None,
|
|
||||||
}
|
|
||||||
NOVADB_INSTANCE_2 = {
|
|
||||||
'reservation_id': ID_EC2_RESERVATION_2,
|
|
||||||
'launch_index': 0,
|
|
||||||
'kernel_id': None,
|
|
||||||
'ramdisk_id': None,
|
|
||||||
'root_device_name': ROOT_DEVICE_NAME_INSTANCE_2,
|
|
||||||
'hostname': 'Server %s' % ID_OS_INSTANCE_2,
|
|
||||||
'key_data': None,
|
|
||||||
'user_data': base64.b64encode(USER_DATA_INSTANCE_2),
|
|
||||||
}
|
|
||||||
|
|
||||||
NOVADB_BDM_INSTANCE_1 = []
|
|
||||||
NOVADB_BDM_INSTANCE_2 = [
|
|
||||||
{'device_name': ROOT_DEVICE_NAME_INSTANCE_2,
|
|
||||||
'delete_on_termination': False,
|
|
||||||
'snapshot_id': None,
|
|
||||||
'volume_id': ID_OS_VOLUME_2,
|
|
||||||
'no_device': False,
|
|
||||||
'source_type': 'volume',
|
|
||||||
},
|
|
||||||
{'device_name': '/dev/sdc',
|
|
||||||
'snapshot_id': None,
|
|
||||||
'volume_id': None,
|
|
||||||
'virtual_name': 'swap',
|
|
||||||
'no_device': False,
|
|
||||||
'source_type': 'blank',
|
|
||||||
'guest_format': 'swap',
|
|
||||||
},
|
|
||||||
{'device_name': '/dev/sdd',
|
|
||||||
'snapshot_id': None,
|
|
||||||
'volume_id': None,
|
|
||||||
'virtual_name': 'ephemeral3',
|
|
||||||
'no_device': False,
|
|
||||||
'source_type': 'blank',
|
|
||||||
'guest_format': None,
|
|
||||||
},
|
|
||||||
]
|
|
||||||
|
|
||||||
EC2_INSTANCE_1 = {
|
EC2_INSTANCE_1 = {
|
||||||
'instanceId': ID_EC2_INSTANCE_1,
|
'instanceId': ID_EC2_INSTANCE_1,
|
||||||
'privateIpAddress': IP_NETWORK_INTERFACE_2,
|
'privateIpAddress': IP_NETWORK_INTERFACE_2,
|
||||||
@ -605,27 +559,28 @@ EC2_RESERVATION_2 = {
|
|||||||
EC2_BDM_METADATA_INSTANCE_1 = {}
|
EC2_BDM_METADATA_INSTANCE_1 = {}
|
||||||
EC2_BDM_METADATA_INSTANCE_2 = {
|
EC2_BDM_METADATA_INSTANCE_2 = {
|
||||||
'ebs0': ROOT_DEVICE_NAME_INSTANCE_2,
|
'ebs0': ROOT_DEVICE_NAME_INSTANCE_2,
|
||||||
'ephemeral0': '/dev/sdd',
|
|
||||||
'swap': '/dev/sdc',
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
# fake class for a instance received from Nova API with v2.3 microversion
|
||||||
|
# support
|
||||||
class OSInstance(object):
|
class OSInstance(object):
|
||||||
def __init__(self, instance_id, flavor=None, image=None, key_name=None,
|
def __init__(self, instance_dict):
|
||||||
created=None, tenant_id=ID_OS_PROJECT, addresses={},
|
self.id = instance_dict['id']
|
||||||
security_groups=[], vm_state=None, host=None,
|
self.flavor = instance_dict.get('flavor')
|
||||||
availability_zone=None):
|
self.image = instance_dict.get('image')
|
||||||
self.id = instance_id
|
self.key_name = instance_dict.get('key_name')
|
||||||
self.flavor = flavor
|
self.created = instance_dict.get('created')
|
||||||
self.image = image
|
self.tenant_id = instance_dict.get('tenant_id', ID_OS_PROJECT)
|
||||||
self.key_name = key_name
|
self.addresses = copy.deepcopy(instance_dict.get('addresses', {}))
|
||||||
self.created = created
|
self.security_groups = copy.deepcopy(
|
||||||
self.tenant_id = tenant_id
|
instance_dict.get('security_groups', []))
|
||||||
self.addresses = addresses
|
setattr(self, 'OS-EXT-STS:vm_state', instance_dict.get('vm_state'))
|
||||||
self.security_groups = security_groups
|
setattr(self, 'OS-EXT-SRV-ATTR:host', instance_dict.get('host'))
|
||||||
setattr(self, 'OS-EXT-STS:vm_state', vm_state)
|
setattr(self, 'OS-EXT-AZ:availability_zone',
|
||||||
setattr(self, 'OS-EXT-SRV-ATTR:host', host)
|
instance_dict.get('availability_zone'))
|
||||||
setattr(self, 'OS-EXT-AZ:availability_zone', availability_zone)
|
setattr(self, 'os-extended-volumes:volumes_attached',
|
||||||
|
copy.deepcopy(instance_dict.get('volumes_attached', [])))
|
||||||
|
|
||||||
def get(self):
|
def get(self):
|
||||||
pass
|
pass
|
||||||
@ -648,10 +603,29 @@ class OSInstance(object):
|
|||||||
def get_console_output(self):
|
def get_console_output(self):
|
||||||
return None
|
return None
|
||||||
|
|
||||||
OS_INSTANCE_1 = OSInstance(
|
|
||||||
ID_OS_INSTANCE_1, {'id': 'fakeFlavorId'},
|
# fake class for a instance received with an admin account from Nova API
|
||||||
image={'id': ID_OS_IMAGE_1},
|
# with v2.3 microversion support
|
||||||
addresses={
|
class OSInstance_full(OSInstance):
|
||||||
|
def __init__(self, instance_dict):
|
||||||
|
super(OSInstance_full, self).__init__(instance_dict)
|
||||||
|
setattr(self, 'OS-EXT-SRV-ATTR:root_device_name',
|
||||||
|
instance_dict.get('root_device_name'))
|
||||||
|
setattr(self, 'OS-EXT-SRV-ATTR:kernel_id',
|
||||||
|
instance_dict.get('kernel_id'))
|
||||||
|
setattr(self, 'OS-EXT-SRV-ATTR:ramdisk_id',
|
||||||
|
instance_dict.get('ramdisk_id'))
|
||||||
|
setattr(self, 'OS-EXT-SRV-ATTR:user_data',
|
||||||
|
instance_dict.get('user_data'))
|
||||||
|
setattr(self, 'OS-EXT-SRV-ATTR:hostname',
|
||||||
|
instance_dict.get('hostname'))
|
||||||
|
|
||||||
|
|
||||||
|
OS_INSTANCE_1 = {
|
||||||
|
'id': ID_OS_INSTANCE_1,
|
||||||
|
'flavor': {'id': 'fakeFlavorId'},
|
||||||
|
'image': {'id': ID_OS_IMAGE_1},
|
||||||
|
'addresses': {
|
||||||
ID_EC2_SUBNET_2: [{'addr': IP_NETWORK_INTERFACE_2,
|
ID_EC2_SUBNET_2: [{'addr': IP_NETWORK_INTERFACE_2,
|
||||||
'version': 4,
|
'version': 4,
|
||||||
'OS-EXT-IPS:type': 'fixed'},
|
'OS-EXT-IPS:type': 'fixed'},
|
||||||
@ -664,21 +638,31 @@ OS_INSTANCE_1 = OSInstance(
|
|||||||
{'addr': IP_ADDRESS_2,
|
{'addr': IP_ADDRESS_2,
|
||||||
'version': 4,
|
'version': 4,
|
||||||
'OS-EXT-IPS:type': 'floating'}]},
|
'OS-EXT-IPS:type': 'floating'}]},
|
||||||
key_name=NAME_KEY_PAIR,
|
'key_name': NAME_KEY_PAIR,
|
||||||
)
|
'root_device_name': ROOT_DEVICE_NAME_INSTANCE_1,
|
||||||
OS_INSTANCE_2 = OSInstance(
|
'kernel_id': ID_OS_IMAGE_AKI_1,
|
||||||
ID_OS_INSTANCE_2, {'id': 'fakeFlavorId'},
|
'ramdisk_id': ID_OS_IMAGE_ARI_1,
|
||||||
security_groups=[{'name': NAME_DEFAULT_OS_SECURITY_GROUP},
|
'hostname': '%s-%s' % (ID_EC2_RESERVATION_1, 0),
|
||||||
|
}
|
||||||
|
OS_INSTANCE_2 = {
|
||||||
|
'id': ID_OS_INSTANCE_2,
|
||||||
|
'flavor': {'id': 'fakeFlavorId'},
|
||||||
|
'security_groups': [{'name': NAME_DEFAULT_OS_SECURITY_GROUP},
|
||||||
{'name': NAME_OTHER_OS_SECURITY_GROUP}],
|
{'name': NAME_OTHER_OS_SECURITY_GROUP}],
|
||||||
availability_zone=NAME_AVAILABILITY_ZONE,
|
'availability_zone': NAME_AVAILABILITY_ZONE,
|
||||||
addresses={
|
'addresses': {
|
||||||
ID_EC2_SUBNET_1: [{'addr': IPV6_INSTANCE_2,
|
ID_EC2_SUBNET_1: [{'addr': IPV6_INSTANCE_2,
|
||||||
'version': 6,
|
'version': 6,
|
||||||
'OS-EXT-IPS:type': 'fixed'},
|
'OS-EXT-IPS:type': 'fixed'},
|
||||||
{'addr': IP_ADDRESS_NOVA_1,
|
{'addr': IP_ADDRESS_NOVA_1,
|
||||||
'version': 4,
|
'version': 4,
|
||||||
'OS-EXT-IPS:type': 'floating'}]},
|
'OS-EXT-IPS:type': 'floating'}]},
|
||||||
)
|
'root_device_name': ROOT_DEVICE_NAME_INSTANCE_2,
|
||||||
|
'volumes_attached': [{'id': ID_OS_VOLUME_2,
|
||||||
|
'delete_on_termination': False}],
|
||||||
|
'user_data': base64.b64encode(USER_DATA_INSTANCE_2),
|
||||||
|
'hostname': 'Server %s' % ID_OS_INSTANCE_2,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
# DHCP options objects
|
# DHCP options objects
|
||||||
@ -1309,18 +1293,18 @@ OS_SNAPSHOT_2 = {
|
|||||||
|
|
||||||
|
|
||||||
# volume objects
|
# volume objects
|
||||||
class CinderVolume(object):
|
class OSVolume(object):
|
||||||
|
|
||||||
def __init__(self, volume):
|
def __init__(self, volume):
|
||||||
self.id = volume['id']
|
self.id = volume['id']
|
||||||
self.status = volume['status']
|
self.status = volume['status']
|
||||||
self.availability_zone = volume['availability_zone']
|
self.availability_zone = volume.get('availability_zone')
|
||||||
self.size = volume['size']
|
self.size = volume.get('size')
|
||||||
self.created_at = volume['created_at']
|
self.created_at = volume.get('created_at')
|
||||||
self.display_name = volume['display_name']
|
self.display_name = volume.get('display_name')
|
||||||
self.display_description = volume['display_description']
|
self.display_description = volume.get('display_description')
|
||||||
self.snapshot_id = volume['snapshot_id']
|
self.snapshot_id = volume.get('snapshot_id')
|
||||||
self.attachments = volume['attachments']
|
self.attachments = copy.deepcopy(volume.get('attachments'))
|
||||||
self.volume_type = None
|
self.volume_type = None
|
||||||
self.encrypted = False
|
self.encrypted = False
|
||||||
|
|
||||||
@ -1359,7 +1343,6 @@ EC2_VOLUME_2 = {
|
|||||||
'attachmentSet': [{'status': 'attached',
|
'attachmentSet': [{'status': 'attached',
|
||||||
'instanceId': ID_EC2_INSTANCE_2,
|
'instanceId': ID_EC2_INSTANCE_2,
|
||||||
'volumeId': ID_EC2_VOLUME_2,
|
'volumeId': ID_EC2_VOLUME_2,
|
||||||
'deleteOnTermination': False,
|
|
||||||
'device': ROOT_DEVICE_NAME_INSTANCE_2}],
|
'device': ROOT_DEVICE_NAME_INSTANCE_2}],
|
||||||
'encrypted': False,
|
'encrypted': False,
|
||||||
'volumeType': None,
|
'volumeType': None,
|
||||||
@ -1431,8 +1414,8 @@ class NovaAvailabilityZone(object):
|
|||||||
|
|
||||||
def __init__(self, nova_availability_zone_dict):
|
def __init__(self, nova_availability_zone_dict):
|
||||||
self.zoneName = nova_availability_zone_dict['zoneName']
|
self.zoneName = nova_availability_zone_dict['zoneName']
|
||||||
self.zoneState = {'available':
|
self.zoneState = {'available': (
|
||||||
nova_availability_zone_dict['zoneState'] == 'available'}
|
nova_availability_zone_dict['zoneState'] == 'available')}
|
||||||
self.hosts = nova_availability_zone_dict['hosts']
|
self.hosts = nova_availability_zone_dict['hosts']
|
||||||
|
|
||||||
OS_AVAILABILITY_ZONE = {'zoneName': NAME_AVAILABILITY_ZONE,
|
OS_AVAILABILITY_ZONE = {'zoneName': NAME_AVAILABILITY_ZONE,
|
||||||
|
@ -17,7 +17,7 @@ from oslo_config import cfg
|
|||||||
from oslo_config import fixture as config_fixture
|
from oslo_config import fixture as config_fixture
|
||||||
from oslotest import base as test_base
|
from oslotest import base as test_base
|
||||||
|
|
||||||
from ec2api import context as ec2context
|
from ec2api import context as ec2_context
|
||||||
|
|
||||||
cfg.CONF.import_opt('keystone_url', 'ec2api.api')
|
cfg.CONF.import_opt('keystone_url', 'ec2api.api')
|
||||||
|
|
||||||
@ -33,21 +33,26 @@ class ContextTestCase(test_base.BaseTestCase):
|
|||||||
|
|
||||||
@mock.patch('keystoneclient.v2_0.client.Client')
|
@mock.patch('keystoneclient.v2_0.client.Client')
|
||||||
def test_get_os_admin_context(self, keystone):
|
def test_get_os_admin_context(self, keystone):
|
||||||
service_catalog = mock.MagicMock()
|
service_catalog = mock.Mock()
|
||||||
service_catalog.get_data.return_value = 'fake_service_catalog'
|
service_catalog.get_data.return_value = 'fake_service_catalog'
|
||||||
keystone.return_value = mock.Mock(auth_user_id='fake_user_id',
|
keystone.return_value = mock.Mock(auth_user_id='fake_user_id',
|
||||||
auth_tenant_id='fake_project_id',
|
auth_tenant_id='fake_project_id',
|
||||||
auth_token='fake_token',
|
auth_token='fake_token',
|
||||||
service_catalog=service_catalog)
|
service_catalog=service_catalog)
|
||||||
context = ec2context.get_os_admin_context()
|
context = ec2_context.get_os_admin_context()
|
||||||
self.assertEqual('fake_user_id', context.user_id)
|
self.assertEqual('fake_user_id', context.user_id)
|
||||||
self.assertEqual('fake_project_id', context.project_id)
|
self.assertEqual('fake_project_id', context.project_id)
|
||||||
self.assertEqual('fake_token', context.auth_token)
|
self.assertEqual('fake_token', context.auth_token)
|
||||||
self.assertEqual('fake_service_catalog', context.service_catalog)
|
self.assertEqual('fake_service_catalog', context.service_catalog)
|
||||||
self.assertTrue(context.is_os_admin)
|
self.assertTrue(context.is_os_admin)
|
||||||
conf = cfg.CONF
|
conf = cfg.CONF
|
||||||
keystone.assert_called_with(
|
keystone.assert_called_once_with(
|
||||||
username=conf.admin_user,
|
username=conf.admin_user,
|
||||||
password=conf.admin_password,
|
password=conf.admin_password,
|
||||||
tenant_name=conf.admin_tenant_name,
|
tenant_name=conf.admin_tenant_name,
|
||||||
auth_url=conf.keystone_url)
|
auth_url=conf.keystone_url)
|
||||||
|
service_catalog.get_data.assert_called_once_with()
|
||||||
|
|
||||||
|
keystone.reset_mock()
|
||||||
|
self.assertEqual(context, ec2_context.get_os_admin_context())
|
||||||
|
self.assertFalse(keystone.called)
|
||||||
|
@ -120,7 +120,7 @@ class ImageTestCase(base.ApiTestCase):
|
|||||||
self.db_api.get_item_by_id.assert_called_once_with(
|
self.db_api.get_item_by_id.assert_called_once_with(
|
||||||
mock.ANY, fakes.ID_EC2_INSTANCE_2)
|
mock.ANY, fakes.ID_EC2_INSTANCE_2)
|
||||||
self.nova.servers.get.assert_called_once_with(fakes.ID_OS_INSTANCE_2)
|
self.nova.servers.get.assert_called_once_with(fakes.ID_OS_INSTANCE_2)
|
||||||
is_ebs_instance.assert_called_once_with(mock.ANY, os_instance)
|
is_ebs_instance.assert_called_once_with(mock.ANY, os_instance.id)
|
||||||
self.db_api.add_item.assert_called_once_with(
|
self.db_api.add_item.assert_called_once_with(
|
||||||
mock.ANY, 'ami', {'os_id': os_image.id,
|
mock.ANY, 'ami', {'os_id': os_image.id,
|
||||||
'is_public': False})
|
'is_public': False})
|
||||||
|
@ -21,6 +21,7 @@ import mock
|
|||||||
from novaclient import exceptions as nova_exception
|
from novaclient import exceptions as nova_exception
|
||||||
from oslotest import base as test_base
|
from oslotest import base as test_base
|
||||||
|
|
||||||
|
import ec2api.api.clients
|
||||||
from ec2api.api import instance as instance_api
|
from ec2api.api import instance as instance_api
|
||||||
from ec2api import exception
|
from ec2api import exception
|
||||||
from ec2api.tests.unit import base
|
from ec2api.tests.unit import base
|
||||||
@ -47,9 +48,26 @@ class InstanceTestCase(base.ApiTestCase):
|
|||||||
mock.patch('ec2api.api.instance._utils_generate_uid'))
|
mock.patch('ec2api.api.instance._utils_generate_uid'))
|
||||||
self.utils_generate_uid = utils_generate_uid_patcher.start()
|
self.utils_generate_uid = utils_generate_uid_patcher.start()
|
||||||
self.addCleanup(utils_generate_uid_patcher.stop)
|
self.addCleanup(utils_generate_uid_patcher.stop)
|
||||||
novadb_patcher = (mock.patch('ec2api.api.instance.novadb'))
|
get_os_admin_context_patcher = (
|
||||||
self.novadb = novadb_patcher.start()
|
mock.patch('ec2api.context.get_os_admin_context'))
|
||||||
self.addCleanup(novadb_patcher.stop)
|
self.get_os_admin_context = get_os_admin_context_patcher.start()
|
||||||
|
self.addCleanup(get_os_admin_context_patcher.stop)
|
||||||
|
self.get_os_admin_context.return_value = (
|
||||||
|
self._create_context(auth_token='admin_token'))
|
||||||
|
|
||||||
|
# NOTE(ft): create a special mock for Nova calls with admin account.
|
||||||
|
# Also make sure that an admin account is used only for this calls.
|
||||||
|
# The special mock is needed to validate tested function to retrieve
|
||||||
|
# appropriate data, as long as only calls with admin account return
|
||||||
|
# some specific data.
|
||||||
|
self.nova_admin = mock.create_autospec(self.NOVACLIENT_SPEC_OBJ)
|
||||||
|
self.novaclient_getter.side_effect = (
|
||||||
|
lambda *args, **kwargs: (
|
||||||
|
self.nova_admin
|
||||||
|
if (kwargs.get('auth_token') == 'admin_token') else
|
||||||
|
self.nova
|
||||||
|
if (kwargs.get('auth_token') != 'admin_token') else
|
||||||
|
None))
|
||||||
|
|
||||||
format_security_groups_ids_names = (
|
format_security_groups_ids_names = (
|
||||||
self.security_group_api.format_security_groups_ids_names)
|
self.security_group_api.format_security_groups_ids_names)
|
||||||
@ -78,11 +96,10 @@ class InstanceTestCase(base.ApiTestCase):
|
|||||||
|
|
||||||
self.db_api.add_item.return_value = fakes.DB_INSTANCE_1
|
self.db_api.add_item.return_value = fakes.DB_INSTANCE_1
|
||||||
self.nova.servers.create.return_value = (
|
self.nova.servers.create.return_value = (
|
||||||
fakes.OSInstance(
|
fakes.OSInstance({
|
||||||
fakes.ID_OS_INSTANCE_1, {'id': 'fakeFlavorId'},
|
'id': fakes.ID_OS_INSTANCE_1,
|
||||||
image={'id': fakes.ID_OS_IMAGE_1}))
|
'flavor': {'id': 'fakeFlavorId'},
|
||||||
self.novadb.instance_get_by_uuid.return_value = fakes.NOVADB_INSTANCE_1
|
'image': {'id': fakes.ID_OS_IMAGE_1}}))
|
||||||
self.novadb.block_device_mapping_get_all_by_instance.return_value = []
|
|
||||||
self.utils_generate_uid.return_value = fakes.ID_EC2_RESERVATION_1
|
self.utils_generate_uid.return_value = fakes.ID_EC2_RESERVATION_1
|
||||||
|
|
||||||
get_vpc_default_security_group_id.return_value = None
|
get_vpc_default_security_group_id.return_value = None
|
||||||
@ -103,14 +120,15 @@ class InstanceTestCase(base.ApiTestCase):
|
|||||||
delete_on_termination=delete_port_on_termination)
|
delete_on_termination=delete_port_on_termination)
|
||||||
expected_reservation = fakes.gen_ec2_reservation(
|
expected_reservation = fakes.gen_ec2_reservation(
|
||||||
fakes.ID_EC2_RESERVATION_1,
|
fakes.ID_EC2_RESERVATION_1,
|
||||||
[fakes.gen_ec2_instance(
|
[tools.patch_dict(
|
||||||
|
fakes.gen_ec2_instance(
|
||||||
fakes.ID_EC2_INSTANCE_1,
|
fakes.ID_EC2_INSTANCE_1,
|
||||||
private_ip_address=fakes.IP_NETWORK_INTERFACE_1,
|
private_ip_address=fakes.IP_NETWORK_INTERFACE_1,
|
||||||
ec2_network_interfaces=[eni],
|
ec2_network_interfaces=[eni],
|
||||||
image_id=fakes.ID_EC2_IMAGE_1,
|
image_id=fakes.ID_EC2_IMAGE_1,
|
||||||
kernel_id=fakes.ID_EC2_IMAGE_AKI_1,
|
reservation_id=fakes.ID_EC2_RESERVATION_1),
|
||||||
ramdisk_id=fakes.ID_EC2_IMAGE_ARI_1,
|
{'privateDnsName': None},
|
||||||
reservation_id=fakes.ID_EC2_RESERVATION_1)])
|
['rootDeviceType', 'rootDeviceName'])])
|
||||||
get_ec2_network_interfaces.return_value = {
|
get_ec2_network_interfaces.return_value = {
|
||||||
fakes.ID_EC2_INSTANCE_1: [eni]}
|
fakes.ID_EC2_INSTANCE_1: [eni]}
|
||||||
|
|
||||||
@ -126,7 +144,7 @@ class InstanceTestCase(base.ApiTestCase):
|
|||||||
mock.ANY, fakes.ID_EC2_SUBNET_1,
|
mock.ANY, fakes.ID_EC2_SUBNET_1,
|
||||||
**create_network_interface_kwargs))
|
**create_network_interface_kwargs))
|
||||||
self.nova.servers.create.assert_called_once_with(
|
self.nova.servers.create.assert_called_once_with(
|
||||||
'%s-%s' % (fakes.ID_EC2_RESERVATION_1, 0),
|
fakes.EC2_INSTANCE_1['privateDnsName'],
|
||||||
fakes.ID_OS_IMAGE_1, self.fake_flavor,
|
fakes.ID_OS_IMAGE_1, self.fake_flavor,
|
||||||
min_count=1, max_count=1,
|
min_count=1, max_count=1,
|
||||||
kernel_id=None, ramdisk_id=None,
|
kernel_id=None, ramdisk_id=None,
|
||||||
@ -142,20 +160,12 @@ class InstanceTestCase(base.ApiTestCase):
|
|||||||
mock.ANY, fakes.DB_NETWORK_INTERFACE_1,
|
mock.ANY, fakes.DB_NETWORK_INTERFACE_1,
|
||||||
fakes.ID_EC2_INSTANCE_1, 0,
|
fakes.ID_EC2_INSTANCE_1, 0,
|
||||||
delete_on_termination=delete_port_on_termination))
|
delete_on_termination=delete_port_on_termination))
|
||||||
self.novadb.instance_get_by_uuid.assert_called_once_with(
|
|
||||||
mock.ANY, fakes.ID_OS_INSTANCE_1)
|
|
||||||
get_ec2_network_interfaces.assert_called_once_with(
|
get_ec2_network_interfaces.assert_called_once_with(
|
||||||
mock.ANY, instance_ids=[fakes.ID_EC2_INSTANCE_1])
|
mock.ANY, instance_ids=[fakes.ID_EC2_INSTANCE_1])
|
||||||
self.assertEqual(2, self.db_api.get_item_ids.call_count)
|
|
||||||
self.db_api.get_item_ids.assert_any_call(
|
|
||||||
mock.ANY, 'aki', (fakes.ID_OS_IMAGE_AKI_1,))
|
|
||||||
self.db_api.get_item_ids.assert_any_call(
|
|
||||||
mock.ANY, 'ari', (fakes.ID_OS_IMAGE_ARI_1,))
|
|
||||||
|
|
||||||
self.network_interface_api.reset_mock()
|
self.network_interface_api.reset_mock()
|
||||||
self.nova.servers.reset_mock()
|
self.nova.servers.reset_mock()
|
||||||
self.db_api.reset_mock()
|
self.db_api.reset_mock()
|
||||||
self.novadb.reset_mock()
|
|
||||||
get_ec2_network_interfaces.reset_mock()
|
get_ec2_network_interfaces.reset_mock()
|
||||||
|
|
||||||
do_check({'SubnetId': fakes.ID_EC2_SUBNET_1},
|
do_check({'SubnetId': fakes.ID_EC2_SUBNET_1},
|
||||||
@ -220,9 +230,13 @@ class InstanceTestCase(base.ApiTestCase):
|
|||||||
self.IDS_EC2_INSTANCE,
|
self.IDS_EC2_INSTANCE,
|
||||||
zip(*[iter(self.EC2_ATTACHED_ENIS)] * 2)))
|
zip(*[iter(self.EC2_ATTACHED_ENIS)] * 2)))
|
||||||
ec2_instances = [
|
ec2_instances = [
|
||||||
fakes.gen_ec2_instance(ec2_instance_id, launch_index=l_i,
|
tools.patch_dict(
|
||||||
|
fakes.gen_ec2_instance(
|
||||||
|
ec2_instance_id, launch_index=l_i,
|
||||||
ec2_network_interfaces=eni_pair,
|
ec2_network_interfaces=eni_pair,
|
||||||
reservation_id=fakes.ID_EC2_RESERVATION_1)
|
reservation_id=fakes.ID_EC2_RESERVATION_1),
|
||||||
|
{'privateDnsName': None},
|
||||||
|
['rootDeviceType', 'rootDeviceName'])
|
||||||
for l_i, (ec2_instance_id, eni_pair) in enumerate(zip(
|
for l_i, (ec2_instance_id, eni_pair) in enumerate(zip(
|
||||||
self.IDS_EC2_INSTANCE,
|
self.IDS_EC2_INSTANCE,
|
||||||
zip(*[iter(self.EC2_ATTACHED_ENIS)] * 2)))]
|
zip(*[iter(self.EC2_ATTACHED_ENIS)] * 2)))]
|
||||||
@ -236,9 +250,10 @@ class InstanceTestCase(base.ApiTestCase):
|
|||||||
[{'networkInterface': eni}
|
[{'networkInterface': eni}
|
||||||
for eni in self.EC2_DETACHED_ENIS])
|
for eni in self.EC2_DETACHED_ENIS])
|
||||||
self.nova.servers.create.side_effect = [
|
self.nova.servers.create.side_effect = [
|
||||||
fakes.OSInstance(os_instance_id, {'id': 'fakeFlavorId'})
|
fakes.OSInstance({
|
||||||
|
'id': os_instance_id,
|
||||||
|
'flavor': {'id': 'fakeFlavorId'}})
|
||||||
for os_instance_id in self.IDS_OS_INSTANCE]
|
for os_instance_id in self.IDS_OS_INSTANCE]
|
||||||
self.novadb.instance_get_by_uuid.side_effect = self.NOVADB_INSTANCES
|
|
||||||
self.utils_generate_uid.return_value = fakes.ID_EC2_RESERVATION_1
|
self.utils_generate_uid.return_value = fakes.ID_EC2_RESERVATION_1
|
||||||
self.db_api.add_item.side_effect = self.DB_INSTANCES
|
self.db_api.add_item.side_effect = self.DB_INSTANCES
|
||||||
|
|
||||||
@ -370,14 +385,13 @@ class InstanceTestCase(base.ApiTestCase):
|
|||||||
'reservation_id': fakes.random_ec2_id('r'),
|
'reservation_id': fakes.random_ec2_id('r'),
|
||||||
'client_token': 'client-token-%s' % ind}
|
'client_token': 'client-token-%s' % ind}
|
||||||
for ind in range(3)]
|
for ind in range(3)]
|
||||||
os_instances = [fakes.OSInstance(inst['os_id'])
|
os_instances = [fakes.OSInstance_full({'id': inst['os_id']})
|
||||||
for inst in instances]
|
for inst in instances]
|
||||||
format_reservation.return_value = {'key': 'value'}
|
format_reservation.return_value = {'key': 'value'}
|
||||||
|
|
||||||
# NOTE(ft): check select corresponding instance by client_token
|
# NOTE(ft): check select corresponding instance by client_token
|
||||||
self.set_mock_db_items(instances[0], instances[1])
|
self.set_mock_db_items(instances[0], instances[1])
|
||||||
get_os_instances_by_instances.return_value = [os_instances[1]]
|
get_os_instances_by_instances.return_value = [os_instances[1]]
|
||||||
self.novadb.instance_get_by_uuid.return_value = 'novadb_instance'
|
|
||||||
get_ec2_network_interfaces.return_value = 'ec2_network_interfaces'
|
get_ec2_network_interfaces.return_value = 'ec2_network_interfaces'
|
||||||
|
|
||||||
resp = self.execute('RunInstances',
|
resp = self.execute('RunInstances',
|
||||||
@ -388,12 +402,10 @@ class InstanceTestCase(base.ApiTestCase):
|
|||||||
self.assertEqual({'key': 'value'}, resp)
|
self.assertEqual({'key': 'value'}, resp)
|
||||||
format_reservation.assert_called_once_with(
|
format_reservation.assert_called_once_with(
|
||||||
mock.ANY, instances[1]['reservation_id'],
|
mock.ANY, instances[1]['reservation_id'],
|
||||||
[(instances[1], os_instances[1], 'novadb_instance')],
|
[(instances[1], os_instances[1])],
|
||||||
'ec2_network_interfaces')
|
'ec2_network_interfaces')
|
||||||
get_os_instances_by_instances.assert_called_once_with(
|
get_os_instances_by_instances.assert_called_once_with(
|
||||||
mock.ANY, instances[1:2])
|
mock.ANY, instances[1:2], nova=self.nova_admin)
|
||||||
self.novadb.instance_get_by_uuid.assert_called_once_with(
|
|
||||||
mock.ANY, os_instances[1].id)
|
|
||||||
get_ec2_network_interfaces.assert_called_once_with(
|
get_ec2_network_interfaces.assert_called_once_with(
|
||||||
mock.ANY, [instances[1]['id']])
|
mock.ANY, [instances[1]['id']])
|
||||||
|
|
||||||
@ -423,15 +435,12 @@ class InstanceTestCase(base.ApiTestCase):
|
|||||||
format_reservation.reset_mock()
|
format_reservation.reset_mock()
|
||||||
get_os_instances_by_instances.reset_mock()
|
get_os_instances_by_instances.reset_mock()
|
||||||
instance_engine.reset_mock()
|
instance_engine.reset_mock()
|
||||||
self.novadb.reset_mock()
|
|
||||||
for inst in instances:
|
for inst in instances:
|
||||||
inst['reservation_id'] = instances[0]['reservation_id']
|
inst['reservation_id'] = instances[0]['reservation_id']
|
||||||
inst['client_token'] = 'client-token'
|
inst['client_token'] = 'client-token'
|
||||||
self.set_mock_db_items(*instances)
|
self.set_mock_db_items(*instances)
|
||||||
get_os_instances_by_instances.return_value = [os_instances[0],
|
get_os_instances_by_instances.return_value = [os_instances[0],
|
||||||
os_instances[2]]
|
os_instances[2]]
|
||||||
self.novadb.instance_get_by_uuid.side_effect = ['novadb-instance-0',
|
|
||||||
'novadb-instance-2']
|
|
||||||
get_ec2_network_interfaces.return_value = 'ec2_network_interfaces'
|
get_ec2_network_interfaces.return_value = 'ec2_network_interfaces'
|
||||||
|
|
||||||
resp = self.execute('RunInstances',
|
resp = self.execute('RunInstances',
|
||||||
@ -442,14 +451,11 @@ class InstanceTestCase(base.ApiTestCase):
|
|||||||
self.assertEqual({'key': 'value'}, resp)
|
self.assertEqual({'key': 'value'}, resp)
|
||||||
format_reservation.assert_called_once_with(
|
format_reservation.assert_called_once_with(
|
||||||
mock.ANY, instances[0]['reservation_id'],
|
mock.ANY, instances[0]['reservation_id'],
|
||||||
[(instances[0], os_instances[0], 'novadb-instance-0'),
|
[(instances[0], os_instances[0]),
|
||||||
(instances[2], os_instances[2], 'novadb-instance-2')],
|
(instances[2], os_instances[2])],
|
||||||
'ec2_network_interfaces')
|
'ec2_network_interfaces')
|
||||||
self.assert_any_call(get_os_instances_by_instances, mock.ANY,
|
self.assert_any_call(get_os_instances_by_instances, mock.ANY,
|
||||||
instances)
|
instances, nova=self.nova_admin)
|
||||||
self.assertEqual([mock.call(mock.ANY, os_instances[0].id),
|
|
||||||
mock.call(mock.ANY, os_instances[2].id)],
|
|
||||||
self.novadb.instance_get_by_uuid.mock_calls)
|
|
||||||
get_ec2_network_interfaces.assert_called_once_with(
|
get_ec2_network_interfaces.assert_called_once_with(
|
||||||
mock.ANY, [instances[0]['id'], instances[2]['id']])
|
mock.ANY, [instances[0]['id'], instances[2]['id']])
|
||||||
|
|
||||||
@ -465,9 +471,11 @@ class InstanceTestCase(base.ApiTestCase):
|
|||||||
self.db_api.add_item.return_value = fakes.DB_INSTANCE_1
|
self.db_api.add_item.return_value = fakes.DB_INSTANCE_1
|
||||||
self.utils_generate_uid.return_value = fakes.ID_EC2_RESERVATION_1
|
self.utils_generate_uid.return_value = fakes.ID_EC2_RESERVATION_1
|
||||||
self.nova.servers.create.return_value = (
|
self.nova.servers.create.return_value = (
|
||||||
fakes.OSInstance(fakes.ID_OS_INSTANCE_1, {'id': 'fakeFlavorId'},
|
fakes.OSInstance({'id': fakes.ID_OS_INSTANCE_1,
|
||||||
image={'id': fakes.ID_OS_IMAGE_1}))
|
'flavor': {'id': 'fakeFlavorId'},
|
||||||
self.novadb.instance_get_by_uuid.side_effect = Exception()
|
'image': {'id': fakes.ID_OS_IMAGE_1}}))
|
||||||
|
(self.network_interface_api.
|
||||||
|
_attach_network_interface_item.side_effect) = Exception()
|
||||||
|
|
||||||
@tools.screen_unexpected_exception_logs
|
@tools.screen_unexpected_exception_logs
|
||||||
def do_check(params, new_port=True, delete_on_termination=None):
|
def do_check(params, new_port=True, delete_on_termination=None):
|
||||||
@ -484,9 +492,6 @@ class InstanceTestCase(base.ApiTestCase):
|
|||||||
self.ANY_EXECUTE_ERROR, 'RunInstances', params)
|
self.ANY_EXECUTE_ERROR, 'RunInstances', params)
|
||||||
|
|
||||||
calls = []
|
calls = []
|
||||||
calls.append(
|
|
||||||
mock.call.network_interface_api._detach_network_interface_item(
|
|
||||||
mock.ANY, fakes.DB_NETWORK_INTERFACE_1))
|
|
||||||
if not new_port:
|
if not new_port:
|
||||||
calls.append(
|
calls.append(
|
||||||
mock.call.neutron.update_port(
|
mock.call.neutron.update_port(
|
||||||
@ -532,8 +537,9 @@ class InstanceTestCase(base.ApiTestCase):
|
|||||||
instances = [{'id': fakes.random_ec2_id('i'),
|
instances = [{'id': fakes.random_ec2_id('i'),
|
||||||
'os_id': fakes.random_os_id()}
|
'os_id': fakes.random_os_id()}
|
||||||
for dummy in range(3)]
|
for dummy in range(3)]
|
||||||
os_instances = [fakes.OSInstance(inst['os_id'])
|
os_instances = [fakes.OSInstance({'id': inst['os_id']})
|
||||||
for inst in instances]
|
for inst in instances]
|
||||||
|
self.nova_admin.servers.list.return_value = os_instances[:2]
|
||||||
network_interfaces = [{'id': fakes.random_ec2_id('eni'),
|
network_interfaces = [{'id': fakes.random_ec2_id('eni'),
|
||||||
'os_id': fakes.random_os_id()}
|
'os_id': fakes.random_os_id()}
|
||||||
for dummy in range(3)]
|
for dummy in range(3)]
|
||||||
@ -553,14 +559,12 @@ class InstanceTestCase(base.ApiTestCase):
|
|||||||
for eni in network_interfaces]
|
for eni in network_interfaces]
|
||||||
self.db_api.add_item.side_effect = instances
|
self.db_api.add_item.side_effect = instances
|
||||||
self.nova.servers.create.side_effect = os_instances
|
self.nova.servers.create.side_effect = os_instances
|
||||||
self.novadb.instance_get_by_uuid.side_effect = [
|
|
||||||
{}, {}, Exception()]
|
|
||||||
format_reservation.side_effect = (
|
format_reservation.side_effect = (
|
||||||
lambda _context, r_id, instance_info, *args, **kwargs: (
|
lambda _context, r_id, instance_info, *args, **kwargs: (
|
||||||
{'reservationId': r_id,
|
{'reservationId': r_id,
|
||||||
'instancesSet': [
|
'instancesSet': [
|
||||||
{'instanceId': inst['id']}
|
{'instanceId': inst['id']}
|
||||||
for inst, _os_inst, _novadb_inst in instance_info]}))
|
for inst, _os_inst in instance_info]}))
|
||||||
|
|
||||||
resp = self.execute('RunInstances',
|
resp = self.execute('RunInstances',
|
||||||
{'ImageId': fakes.ID_EC2_IMAGE_1,
|
{'ImageId': fakes.ID_EC2_IMAGE_1,
|
||||||
@ -582,14 +586,16 @@ class InstanceTestCase(base.ApiTestCase):
|
|||||||
self.nova.servers.reset_mock()
|
self.nova.servers.reset_mock()
|
||||||
self.db_api.reset_mock()
|
self.db_api.reset_mock()
|
||||||
|
|
||||||
|
(self.network_interface_api.
|
||||||
|
_attach_network_interface_item.side_effect) = [
|
||||||
|
None, None, Exception()]
|
||||||
with tools.ScreeningLogger(log_name='ec2api.api'):
|
with tools.ScreeningLogger(log_name='ec2api.api'):
|
||||||
do_check(instance_api.InstanceEngineNeutron())
|
do_check(instance_api.InstanceEngineNeutron())
|
||||||
(self.network_interface_api._detach_network_interface_item.
|
|
||||||
assert_called_once_with(mock.ANY, network_interfaces[2]))
|
|
||||||
(self.network_interface_api.delete_network_interface.
|
(self.network_interface_api.delete_network_interface.
|
||||||
assert_called_once_with(
|
assert_called_once_with(
|
||||||
mock.ANY, network_interface_id=network_interfaces[2]['id']))
|
mock.ANY, network_interface_id=network_interfaces[2]['id']))
|
||||||
|
|
||||||
|
self.nova.servers.update.side_effect = [None, None, Exception()]
|
||||||
with tools.ScreeningLogger(log_name='ec2api.api'):
|
with tools.ScreeningLogger(log_name='ec2api.api'):
|
||||||
do_check(instance_api.InstanceEngineNova())
|
do_check(instance_api.InstanceEngineNova())
|
||||||
|
|
||||||
@ -620,8 +626,9 @@ class InstanceTestCase(base.ApiTestCase):
|
|||||||
fakes.DB_INSTANCE_1, fakes.DB_INSTANCE_2,
|
fakes.DB_INSTANCE_1, fakes.DB_INSTANCE_2,
|
||||||
fakes.DB_NETWORK_INTERFACE_1, fakes.DB_NETWORK_INTERFACE_2,
|
fakes.DB_NETWORK_INTERFACE_1, fakes.DB_NETWORK_INTERFACE_2,
|
||||||
fakes.DB_ADDRESS_1, fakes.DB_ADDRESS_2)
|
fakes.DB_ADDRESS_1, fakes.DB_ADDRESS_2)
|
||||||
self.nova.servers.get.side_effect = [fakes.OS_INSTANCE_1,
|
os_instances = [fakes.OSInstance(fakes.OS_INSTANCE_1),
|
||||||
fakes.OS_INSTANCE_2]
|
fakes.OSInstance(fakes.OS_INSTANCE_2)]
|
||||||
|
self.nova.servers.get.side_effect = os_instances
|
||||||
|
|
||||||
resp = self.execute('TerminateInstances',
|
resp = self.execute('TerminateInstances',
|
||||||
{'InstanceId.1': fakes.ID_EC2_INSTANCE_1,
|
{'InstanceId.1': fakes.ID_EC2_INSTANCE_1,
|
||||||
@ -652,8 +659,7 @@ class InstanceTestCase(base.ApiTestCase):
|
|||||||
self.assertFalse(self.db_api.delete_item.called)
|
self.assertFalse(self.db_api.delete_item.called)
|
||||||
self.assertEqual(2, os_instance_delete.call_count)
|
self.assertEqual(2, os_instance_delete.call_count)
|
||||||
self.assertEqual(2, os_instance_get.call_count)
|
self.assertEqual(2, os_instance_get.call_count)
|
||||||
for call_num, inst_id in enumerate([fakes.OS_INSTANCE_1,
|
for call_num, inst_id in enumerate(os_instances):
|
||||||
fakes.OS_INSTANCE_2]):
|
|
||||||
self.assertEqual(mock.call(inst_id),
|
self.assertEqual(mock.call(inst_id),
|
||||||
os_instance_delete.call_args_list[call_num])
|
os_instance_delete.call_args_list[call_num])
|
||||||
self.assertEqual(mock.call(inst_id),
|
self.assertEqual(mock.call(inst_id),
|
||||||
@ -674,7 +680,8 @@ class InstanceTestCase(base.ApiTestCase):
|
|||||||
tools.update_dict({'instanceId': fakes.ID_EC2_INSTANCE_2},
|
tools.update_dict({'instanceId': fakes.ID_EC2_INSTANCE_2},
|
||||||
fake_state_change)]}
|
fake_state_change)]}
|
||||||
self.nova.servers.get.side_effect = (
|
self.nova.servers.get.side_effect = (
|
||||||
lambda ec2_id: fakes.OSInstance(ec2_id, vm_state='active'))
|
lambda ec2_id: fakes.OSInstance({'id': ec2_id,
|
||||||
|
'vm_state': 'active'}))
|
||||||
|
|
||||||
def do_check(mock_eni_list=[], detached_enis=[], deleted_enis=[]):
|
def do_check(mock_eni_list=[], detached_enis=[], deleted_enis=[]):
|
||||||
self.set_mock_db_items(self.DB_FAKE_ENI,
|
self.set_mock_db_items(self.DB_FAKE_ENI,
|
||||||
@ -729,8 +736,8 @@ class InstanceTestCase(base.ApiTestCase):
|
|||||||
def _test_instances_operation(self, operation, os_instance_operation,
|
def _test_instances_operation(self, operation, os_instance_operation,
|
||||||
valid_state, invalid_state,
|
valid_state, invalid_state,
|
||||||
get_os_instances_by_instances):
|
get_os_instances_by_instances):
|
||||||
os_instance_1 = copy.deepcopy(fakes.OS_INSTANCE_1)
|
os_instance_1 = fakes.OSInstance(fakes.OS_INSTANCE_1)
|
||||||
os_instance_2 = copy.deepcopy(fakes.OS_INSTANCE_2)
|
os_instance_2 = fakes.OSInstance(fakes.OS_INSTANCE_2)
|
||||||
for inst in (os_instance_1, os_instance_2):
|
for inst in (os_instance_1, os_instance_2):
|
||||||
setattr(inst, 'OS-EXT-STS:vm_state', valid_state)
|
setattr(inst, 'OS-EXT-STS:vm_state', valid_state)
|
||||||
|
|
||||||
@ -777,7 +784,8 @@ class InstanceTestCase(base.ApiTestCase):
|
|||||||
@mock.patch('oslo_utils.timeutils.utcnow')
|
@mock.patch('oslo_utils.timeutils.utcnow')
|
||||||
def _test_instance_get_operation(self, operation, getter, key, utcnow):
|
def _test_instance_get_operation(self, operation, getter, key, utcnow):
|
||||||
self.set_mock_db_items(fakes.DB_INSTANCE_2)
|
self.set_mock_db_items(fakes.DB_INSTANCE_2)
|
||||||
self.nova.servers.get.return_value = fakes.OS_INSTANCE_2
|
os_instance_2 = fakes.OSInstance(fakes.OS_INSTANCE_2)
|
||||||
|
self.nova.servers.get.return_value = os_instance_2
|
||||||
getter.return_value = 'fake_data'
|
getter.return_value = 'fake_data'
|
||||||
utcnow.return_value = datetime.datetime(2015, 1, 19, 23, 34, 45, 123)
|
utcnow.return_value = datetime.datetime(2015, 1, 19, 23, 34, 45, 123)
|
||||||
resp = self.execute(operation,
|
resp = self.execute(operation,
|
||||||
@ -789,7 +797,7 @@ class InstanceTestCase(base.ApiTestCase):
|
|||||||
self.db_api.get_item_by_id.assert_called_once_with(
|
self.db_api.get_item_by_id.assert_called_once_with(
|
||||||
mock.ANY, fakes.ID_EC2_INSTANCE_2)
|
mock.ANY, fakes.ID_EC2_INSTANCE_2)
|
||||||
self.nova.servers.get.assert_called_once_with(fakes.ID_OS_INSTANCE_2)
|
self.nova.servers.get.assert_called_once_with(fakes.ID_OS_INSTANCE_2)
|
||||||
getter.assert_called_once_with(fakes.OS_INSTANCE_2)
|
getter.assert_called_once_with(os_instance_2)
|
||||||
|
|
||||||
@mock.patch.object(fakes.OSInstance, 'get_password', autospec=True)
|
@mock.patch.object(fakes.OSInstance, 'get_password', autospec=True)
|
||||||
def test_get_password_data(self, get_password):
|
def test_get_password_data(self, get_password):
|
||||||
@ -811,16 +819,13 @@ class InstanceTestCase(base.ApiTestCase):
|
|||||||
fakes.DB_IMAGE_1, fakes.DB_IMAGE_2,
|
fakes.DB_IMAGE_1, fakes.DB_IMAGE_2,
|
||||||
fakes.DB_IMAGE_ARI_1, fakes.DB_IMAGE_AKI_1,
|
fakes.DB_IMAGE_ARI_1, fakes.DB_IMAGE_AKI_1,
|
||||||
fakes.DB_VOLUME_1, fakes.DB_VOLUME_2, fakes.DB_VOLUME_3)
|
fakes.DB_VOLUME_1, fakes.DB_VOLUME_2, fakes.DB_VOLUME_3)
|
||||||
self.nova.servers.list.return_value = [fakes.OS_INSTANCE_1,
|
self.nova_admin.servers.list.return_value = [
|
||||||
fakes.OS_INSTANCE_2]
|
fakes.OSInstance_full(fakes.OS_INSTANCE_1),
|
||||||
self.novadb.instance_get_by_uuid.side_effect = (
|
fakes.OSInstance_full(fakes.OS_INSTANCE_2)]
|
||||||
tools.get_by_2nd_arg_getter({
|
self.cinder.volumes.list.return_value = [
|
||||||
fakes.ID_OS_INSTANCE_1: fakes.NOVADB_INSTANCE_1,
|
fakes.OSVolume(fakes.OS_VOLUME_1),
|
||||||
fakes.ID_OS_INSTANCE_2: fakes.NOVADB_INSTANCE_2}))
|
fakes.OSVolume(fakes.OS_VOLUME_2),
|
||||||
self.novadb.block_device_mapping_get_all_by_instance.side_effect = (
|
fakes.OSVolume(fakes.OS_VOLUME_3)]
|
||||||
tools.get_by_2nd_arg_getter({
|
|
||||||
fakes.ID_OS_INSTANCE_1: fakes.NOVADB_BDM_INSTANCE_1,
|
|
||||||
fakes.ID_OS_INSTANCE_2: fakes.NOVADB_BDM_INSTANCE_2}))
|
|
||||||
self.network_interface_api.describe_network_interfaces.side_effect = (
|
self.network_interface_api.describe_network_interfaces.side_effect = (
|
||||||
lambda *args, **kwargs: copy.deepcopy({
|
lambda *args, **kwargs: copy.deepcopy({
|
||||||
'networkInterfaceSet': [fakes.EC2_NETWORK_INTERFACE_1,
|
'networkInterfaceSet': [fakes.EC2_NETWORK_INTERFACE_1,
|
||||||
@ -832,6 +837,10 @@ class InstanceTestCase(base.ApiTestCase):
|
|||||||
{'reservationSet': [fakes.EC2_RESERVATION_1,
|
{'reservationSet': [fakes.EC2_RESERVATION_1,
|
||||||
fakes.EC2_RESERVATION_2]},
|
fakes.EC2_RESERVATION_2]},
|
||||||
orderless_lists=True))
|
orderless_lists=True))
|
||||||
|
self.nova_admin.servers.list.assert_called_once_with(
|
||||||
|
search_opts={'all_tenants': True,
|
||||||
|
'project_id': fakes.ID_OS_PROJECT})
|
||||||
|
self.cinder.volumes.list.assert_called_once_with(search_opts=None)
|
||||||
|
|
||||||
self.db_api.get_items_by_ids = tools.CopyingMock(
|
self.db_api.get_items_by_ids = tools.CopyingMock(
|
||||||
return_value=[fakes.DB_INSTANCE_1])
|
return_value=[fakes.DB_INSTANCE_1])
|
||||||
@ -895,11 +904,12 @@ class InstanceTestCase(base.ApiTestCase):
|
|||||||
self.set_mock_db_items(
|
self.set_mock_db_items(
|
||||||
fakes.DB_INSTANCE_2, fakes.DB_IMAGE_1, fakes.DB_IMAGE_2,
|
fakes.DB_INSTANCE_2, fakes.DB_IMAGE_1, fakes.DB_IMAGE_2,
|
||||||
fakes.DB_VOLUME_1, fakes.DB_VOLUME_2, fakes.DB_VOLUME_3)
|
fakes.DB_VOLUME_1, fakes.DB_VOLUME_2, fakes.DB_VOLUME_3)
|
||||||
self.nova.servers.list.return_value = [fakes.OS_INSTANCE_2]
|
self.nova_admin.servers.list.return_value = [
|
||||||
self.novadb.instance_get_by_uuid.return_value = (
|
fakes.OSInstance_full(fakes.OS_INSTANCE_2)]
|
||||||
fakes.NOVADB_INSTANCE_2)
|
self.cinder.volumes.list.return_value = [
|
||||||
self.novadb.block_device_mapping_get_all_by_instance.return_value = (
|
fakes.OSVolume(fakes.OS_VOLUME_1),
|
||||||
fakes.NOVADB_BDM_INSTANCE_2)
|
fakes.OSVolume(fakes.OS_VOLUME_2),
|
||||||
|
fakes.OSVolume(fakes.OS_VOLUME_3)]
|
||||||
|
|
||||||
resp = self.execute('DescribeInstances', {})
|
resp = self.execute('DescribeInstances', {})
|
||||||
|
|
||||||
@ -914,12 +924,6 @@ class InstanceTestCase(base.ApiTestCase):
|
|||||||
self._build_multiple_data_model()
|
self._build_multiple_data_model()
|
||||||
|
|
||||||
self.set_mock_db_items(*self.DB_INSTANCES)
|
self.set_mock_db_items(*self.DB_INSTANCES)
|
||||||
self.novadb.instance_get_by_uuid.side_effect = (
|
|
||||||
tools.get_by_2nd_arg_getter(
|
|
||||||
dict((os_id, novadb_instance)
|
|
||||||
for os_id, novadb_instance in zip(
|
|
||||||
self.IDS_OS_INSTANCE,
|
|
||||||
self.NOVADB_INSTANCES))))
|
|
||||||
describe_network_interfaces = (
|
describe_network_interfaces = (
|
||||||
self.network_interface_api.describe_network_interfaces)
|
self.network_interface_api.describe_network_interfaces)
|
||||||
|
|
||||||
@ -928,17 +932,20 @@ class InstanceTestCase(base.ApiTestCase):
|
|||||||
describe_network_interfaces.return_value = copy.deepcopy(
|
describe_network_interfaces.return_value = copy.deepcopy(
|
||||||
{'networkInterfaceSet': list(
|
{'networkInterfaceSet': list(
|
||||||
itertools.chain(*ec2_enis_by_instance))})
|
itertools.chain(*ec2_enis_by_instance))})
|
||||||
self.nova.servers.list.return_value = [
|
self.nova_admin.servers.list.return_value = [
|
||||||
fakes.OSInstance(
|
fakes.OSInstance_full({
|
||||||
os_id, {'id': 'fakeFlavorId'},
|
'id': os_id,
|
||||||
addresses=dict((subnet_name,
|
'flavor': {'id': 'fakeFlavorId'},
|
||||||
|
'addresses': dict((subnet_name,
|
||||||
[{'addr': addr,
|
[{'addr': addr,
|
||||||
'version': 4,
|
'version': 4,
|
||||||
'OS-EXT-IPS:type': 'fixed'}])
|
'OS-EXT-IPS:type': 'fixed'}])
|
||||||
for subnet_name, addr in ips))
|
for subnet_name, addr in ips),
|
||||||
for os_id, ips in zip(
|
'root_device_name': '/dev/vda',
|
||||||
|
'hostname': '%s-%s' % (fakes.ID_EC2_RESERVATION_1, l_i)})
|
||||||
|
for l_i, (os_id, ips) in enumerate(zip(
|
||||||
self.IDS_OS_INSTANCE,
|
self.IDS_OS_INSTANCE,
|
||||||
ips_by_instance)]
|
ips_by_instance))]
|
||||||
|
|
||||||
resp = self.execute('DescribeInstances', {})
|
resp = self.execute('DescribeInstances', {})
|
||||||
|
|
||||||
@ -984,11 +991,10 @@ class InstanceTestCase(base.ApiTestCase):
|
|||||||
def test_describe_instances_auto_remove(self, remove_instances):
|
def test_describe_instances_auto_remove(self, remove_instances):
|
||||||
self.set_mock_db_items(fakes.DB_INSTANCE_1, fakes.DB_INSTANCE_2,
|
self.set_mock_db_items(fakes.DB_INSTANCE_1, fakes.DB_INSTANCE_2,
|
||||||
fakes.DB_VOLUME_2)
|
fakes.DB_VOLUME_2)
|
||||||
self.nova.servers.list.return_value = [fakes.OS_INSTANCE_2]
|
self.nova_admin.servers.list.return_value = [
|
||||||
self.novadb.instance_get_by_uuid.return_value = (
|
fakes.OSInstance_full(fakes.OS_INSTANCE_2)]
|
||||||
fakes.NOVADB_INSTANCE_2)
|
self.cinder.volumes.list.return_value = [
|
||||||
self.novadb.block_device_mapping_get_all_by_instance.return_value = (
|
fakes.OSVolume(fakes.OS_VOLUME_2)]
|
||||||
fakes.NOVADB_BDM_INSTANCE_2)
|
|
||||||
|
|
||||||
resp = self.execute('DescribeInstances', {})
|
resp = self.execute('DescribeInstances', {})
|
||||||
|
|
||||||
@ -1011,9 +1017,9 @@ class InstanceTestCase(base.ApiTestCase):
|
|||||||
random.shuffle(db_instances)
|
random.shuffle(db_instances)
|
||||||
self.set_mock_db_items(*db_instances)
|
self.set_mock_db_items(*db_instances)
|
||||||
os_instances = [
|
os_instances = [
|
||||||
fakes.OSInstance(inst['os_id'])
|
fakes.OSInstance_full({'id': inst['os_id']})
|
||||||
for inst in db_instances]
|
for inst in db_instances]
|
||||||
self.nova.servers.list.return_value = os_instances
|
self.nova_admin.servers.list.return_value = os_instances
|
||||||
format_instance.side_effect = (
|
format_instance.side_effect = (
|
||||||
lambda context, instance, *args: (
|
lambda context, instance, *args: (
|
||||||
{'instanceId': instance['id'],
|
{'instanceId': instance['id'],
|
||||||
@ -1040,20 +1046,14 @@ class InstanceTestCase(base.ApiTestCase):
|
|||||||
self.set_mock_db_items(fakes.DB_INSTANCE_1, fakes.DB_INSTANCE_2,
|
self.set_mock_db_items(fakes.DB_INSTANCE_1, fakes.DB_INSTANCE_2,
|
||||||
fakes.DB_IMAGE_ARI_1, fakes.DB_IMAGE_AKI_1,
|
fakes.DB_IMAGE_ARI_1, fakes.DB_IMAGE_AKI_1,
|
||||||
fakes.DB_VOLUME_2)
|
fakes.DB_VOLUME_2)
|
||||||
self.nova.servers.get.side_effect = (
|
self.nova_admin.servers.get.side_effect = (
|
||||||
tools.get_by_1st_arg_getter({
|
tools.get_by_1st_arg_getter({
|
||||||
fakes.ID_OS_INSTANCE_1: fakes.OS_INSTANCE_1,
|
fakes.ID_OS_INSTANCE_1: (
|
||||||
fakes.ID_OS_INSTANCE_2: fakes.OS_INSTANCE_2}))
|
fakes.OSInstance_full(fakes.OS_INSTANCE_1)),
|
||||||
self.novadb.instance_get_by_uuid.side_effect = (
|
fakes.ID_OS_INSTANCE_2: (
|
||||||
tools.get_by_2nd_arg_getter({
|
fakes.OSInstance_full(fakes.OS_INSTANCE_2))}))
|
||||||
fakes.ID_OS_INSTANCE_1: fakes.NOVADB_INSTANCE_1,
|
self.cinder.volumes.list.return_value = [
|
||||||
fakes.ID_OS_INSTANCE_2: fakes.NOVADB_INSTANCE_2}))
|
fakes.OSVolume(fakes.OS_VOLUME_2)]
|
||||||
self.novadb.block_device_mapping_get_all_by_instance.side_effect = (
|
|
||||||
tools.get_by_2nd_arg_getter({
|
|
||||||
fakes.ID_OS_INSTANCE_1: fakes.NOVADB_BDM_INSTANCE_1,
|
|
||||||
fakes.ID_OS_INSTANCE_2: fakes.NOVADB_BDM_INSTANCE_2}))
|
|
||||||
self.cinder.volumes.get.return_value = (
|
|
||||||
fakes.CinderVolume(fakes.OS_VOLUME_2))
|
|
||||||
|
|
||||||
def do_check(instance_id, attribute, expected):
|
def do_check(instance_id, attribute, expected):
|
||||||
resp = self.execute('DescribeInstanceAttribute',
|
resp = self.execute('DescribeInstanceAttribute',
|
||||||
@ -1066,12 +1066,8 @@ class InstanceTestCase(base.ApiTestCase):
|
|||||||
{'rootDeviceType': 'ebs',
|
{'rootDeviceType': 'ebs',
|
||||||
'blockDeviceMapping': (
|
'blockDeviceMapping': (
|
||||||
fakes.EC2_INSTANCE_2['blockDeviceMapping'])})
|
fakes.EC2_INSTANCE_2['blockDeviceMapping'])})
|
||||||
do_check(fakes.ID_EC2_INSTANCE_2, 'disableApiTermination',
|
|
||||||
{'disableApiTermination': {'value': False}})
|
|
||||||
do_check(fakes.ID_EC2_INSTANCE_2, 'groupSet',
|
do_check(fakes.ID_EC2_INSTANCE_2, 'groupSet',
|
||||||
{'groupSet': fakes.EC2_RESERVATION_2['groupSet']})
|
{'groupSet': fakes.EC2_RESERVATION_2['groupSet']})
|
||||||
do_check(fakes.ID_EC2_INSTANCE_2, 'instanceInitiatedShutdownBehavior',
|
|
||||||
{'instanceInitiatedShutdownBehavior': {'value': 'stop'}})
|
|
||||||
do_check(fakes.ID_EC2_INSTANCE_2, 'instanceType',
|
do_check(fakes.ID_EC2_INSTANCE_2, 'instanceType',
|
||||||
{'instanceType': {'value': 'fake_flavor'}})
|
{'instanceType': {'value': 'fake_flavor'}})
|
||||||
do_check(fakes.ID_EC2_INSTANCE_1, 'kernel',
|
do_check(fakes.ID_EC2_INSTANCE_1, 'kernel',
|
||||||
@ -1169,12 +1165,6 @@ class InstanceTestCase(base.ApiTestCase):
|
|||||||
for l_i, (db_id, os_id) in enumerate(zip(
|
for l_i, (db_id, os_id) in enumerate(zip(
|
||||||
ids_ec2_instance,
|
ids_ec2_instance,
|
||||||
ids_os_instance))]
|
ids_os_instance))]
|
||||||
novadb_instances = [
|
|
||||||
{'kernel_id': None,
|
|
||||||
'ramdisk_id': None,
|
|
||||||
'root_device_name': '/dev/vda',
|
|
||||||
'hostname': '%s-%s' % (fakes.ID_EC2_RESERVATION_1, l_i)}
|
|
||||||
for l_i, ec2_id in enumerate(ids_ec2_instance)]
|
|
||||||
|
|
||||||
self.IDS_EC2_SUBNET = ids_ec2_subnet
|
self.IDS_EC2_SUBNET = ids_ec2_subnet
|
||||||
self.IDS_OS_PORT = ids_os_port
|
self.IDS_OS_PORT = ids_os_port
|
||||||
@ -1186,7 +1176,6 @@ class InstanceTestCase(base.ApiTestCase):
|
|||||||
self.EC2_ATTACHED_ENIS = ec2_attached_enis
|
self.EC2_ATTACHED_ENIS = ec2_attached_enis
|
||||||
self.EC2_DETACHED_ENIS = ec2_detached_enis
|
self.EC2_DETACHED_ENIS = ec2_detached_enis
|
||||||
self.DB_INSTANCES = db_instances
|
self.DB_INSTANCES = db_instances
|
||||||
self.NOVADB_INSTANCES = novadb_instances
|
|
||||||
|
|
||||||
# NOTE(ft): additional fake data to check filtering, etc
|
# NOTE(ft): additional fake data to check filtering, etc
|
||||||
self.DB_FAKE_ENI = fakes.gen_db_network_interface(
|
self.DB_FAKE_ENI = fakes.gen_db_network_interface(
|
||||||
@ -1539,10 +1528,10 @@ class InstancePrivateTestCase(test_base.BaseTestCase):
|
|||||||
'/dev/sdb1': '::55:'},
|
'/dev/sdb1': '::55:'},
|
||||||
orderless_lists=True))
|
orderless_lists=True))
|
||||||
|
|
||||||
@mock.patch('ec2api.api.instance.novadb')
|
@mock.patch('cinderclient.client.Client')
|
||||||
@mock.patch('novaclient.v1_1.client.Client')
|
@mock.patch('novaclient.client.Client')
|
||||||
@mock.patch('ec2api.db.api.IMPL')
|
@mock.patch('ec2api.db.api.IMPL')
|
||||||
def test_format_instance(self, db_api, nova, novadb):
|
def test_format_instance(self, db_api, nova, cinder):
|
||||||
nova = nova.return_value
|
nova = nova.return_value
|
||||||
fake_context = mock.Mock(service_catalog=[{'type': 'fake'}])
|
fake_context = mock.Mock(service_catalog=[{'type': 'fake'}])
|
||||||
fake_flavor = mock.Mock()
|
fake_flavor = mock.Mock()
|
||||||
@ -1552,112 +1541,76 @@ class InstancePrivateTestCase(test_base.BaseTestCase):
|
|||||||
instance = {'id': fakes.random_ec2_id('i'),
|
instance = {'id': fakes.random_ec2_id('i'),
|
||||||
'os_id': fakes.random_os_id(),
|
'os_id': fakes.random_os_id(),
|
||||||
'launch_index': 0}
|
'launch_index': 0}
|
||||||
os_instance = fakes.OSInstance(instance['os_id'],
|
os_instance = fakes.OSInstance_full({'id': instance['os_id'],
|
||||||
flavor={'id': 'fakeFlavorId'})
|
'flavor': {'id': 'fakeFlavorId'}})
|
||||||
novadb_instance = {'kernel_id': None,
|
|
||||||
'ramdisk_id': None,
|
|
||||||
'hostname': instance['id']}
|
|
||||||
|
|
||||||
# NOTE(ft): check instance state formatting
|
# NOTE(ft): check instance state formatting
|
||||||
setattr(os_instance, 'OS-EXT-STS:vm_state', 'active')
|
setattr(os_instance, 'OS-EXT-STS:vm_state', 'active')
|
||||||
formatted_instance = instance_api._format_instance(
|
formatted_instance = instance_api._format_instance(
|
||||||
fake_context, instance, os_instance, novadb_instance, [], {})
|
fake_context, instance, os_instance, [], {})
|
||||||
self.assertEqual({'name': 'running', 'code': 16},
|
self.assertEqual({'name': 'running', 'code': 16},
|
||||||
formatted_instance['instanceState'])
|
formatted_instance['instanceState'])
|
||||||
|
|
||||||
setattr(os_instance, 'OS-EXT-STS:vm_state', 'stopped')
|
setattr(os_instance, 'OS-EXT-STS:vm_state', 'stopped')
|
||||||
formatted_instance = instance_api._format_instance(
|
formatted_instance = instance_api._format_instance(
|
||||||
fake_context, instance, os_instance, novadb_instance, [], {})
|
fake_context, instance, os_instance, [], {})
|
||||||
self.assertEqual({'name': 'stopped', 'code': 80},
|
self.assertEqual({'name': 'stopped', 'code': 80},
|
||||||
formatted_instance['instanceState'])
|
formatted_instance['instanceState'])
|
||||||
|
|
||||||
# NOTE(ft): check auto creating of DB item for unknown OS images
|
# NOTE(ft): check auto creating of DB item for unknown OS images
|
||||||
os_instance.image = {'id': fakes.random_os_id()}
|
os_instance.image = {'id': fakes.random_os_id()}
|
||||||
novadb_instance['kernel_id'] = fakes.random_os_id()
|
kernel_id = fakes.random_os_id()
|
||||||
novadb_instance['ramdisk_id'] = fakes.random_os_id()
|
ramdisk_id = fakes.random_os_id()
|
||||||
|
setattr(os_instance, 'OS-EXT-SRV-ATTR:kernel_id', kernel_id)
|
||||||
|
setattr(os_instance, 'OS-EXT-SRV-ATTR:ramdisk_id', ramdisk_id)
|
||||||
formatted_instance = instance_api._format_instance(
|
formatted_instance = instance_api._format_instance(
|
||||||
fake_context, instance, os_instance, novadb_instance, [], {})
|
fake_context, instance, os_instance, [], {})
|
||||||
db_api.add_item_id.assert_has_calls(
|
db_api.add_item_id.assert_has_calls(
|
||||||
[mock.call(mock.ANY, 'ami', os_instance.image['id']),
|
[mock.call(mock.ANY, 'ami', os_instance.image['id']),
|
||||||
mock.call(mock.ANY, 'aki', novadb_instance['kernel_id']),
|
mock.call(mock.ANY, 'aki', kernel_id),
|
||||||
mock.call(mock.ANY, 'ari', novadb_instance['ramdisk_id'])],
|
mock.call(mock.ANY, 'ari', ramdisk_id)],
|
||||||
any_order=True)
|
any_order=True)
|
||||||
|
|
||||||
@mock.patch('cinderclient.v1.client.Client')
|
@mock.patch('cinderclient.client.Client')
|
||||||
@mock.patch('ec2api.api.instance.novadb')
|
def test_format_instance_bdm(self, cinder):
|
||||||
def test_format_instance_bdm(self, novadb, cinder):
|
|
||||||
cinder = cinder.return_value
|
|
||||||
cinder.volumes.get.return_value = (
|
|
||||||
mock.Mock(status='attached', attachments={'device': 'fake'}))
|
|
||||||
id_os_instance_1 = fakes.random_os_id()
|
id_os_instance_1 = fakes.random_os_id()
|
||||||
id_os_instance_2 = fakes.random_os_id()
|
id_os_instance_2 = fakes.random_os_id()
|
||||||
novadb.block_device_mapping_get_all_by_instance.side_effect = (
|
cinder = cinder.return_value
|
||||||
tools.get_by_2nd_arg_getter({
|
cinder.volumes.list.return_value = [
|
||||||
id_os_instance_1: [{'device_name': '/dev/sdb1',
|
fakes.OSVolume({'id': '2',
|
||||||
'delete_on_termination': False,
|
'status': 'attached',
|
||||||
'snapshot_id': '1',
|
'attachments': [{'device': '/dev/sdb1',
|
||||||
'volume_id': '2',
|
'server_id': id_os_instance_1}]}),
|
||||||
'no_device': False},
|
fakes.OSVolume({'id': '5',
|
||||||
{'device_name': '/dev/sdb2',
|
'status': 'attached',
|
||||||
'delete_on_termination': False,
|
'attachments': [{'device': '/dev/sdb3',
|
||||||
'snapshot_id': None,
|
'server_id': id_os_instance_1}]}),
|
||||||
'volume_id': '3',
|
fakes.OSVolume({'id': '21',
|
||||||
'volume_size': 1,
|
'status': 'attached',
|
||||||
'no_device': False},
|
'attachments': [{'device': 'vda',
|
||||||
{'device_name': '/dev/sdb3',
|
'server_id': id_os_instance_2}]}),
|
||||||
'delete_on_termination': True,
|
]
|
||||||
'snapshot_id': '4',
|
os_instance_1 = fakes.OSInstance_full({
|
||||||
'volume_id': '5',
|
'id': id_os_instance_1,
|
||||||
'no_device': False},
|
'volumes_attached': [{'id': '2',
|
||||||
{'device_name': '/dev/sdb4',
|
'delete_on_termination': False},
|
||||||
'delete_on_termination': False,
|
{'id': '5',
|
||||||
'snapshot_id': '6',
|
'delete_on_termination': True}],
|
||||||
'volume_id': '7',
|
'root_device_name': '/dev/sdb1'})
|
||||||
'no_device': False},
|
os_instance_2 = fakes.OSInstance_full({
|
||||||
{'device_name': '/dev/sdb5',
|
'id': id_os_instance_2,
|
||||||
'delete_on_termination': False,
|
'volumes_attached': [{'id': '21',
|
||||||
'snapshot_id': '8',
|
'delete_on_termination': False}],
|
||||||
'volume_id': '9',
|
'root_device_name': '/dev/sdc1'})
|
||||||
'volume_size': 0,
|
|
||||||
'no_device': False},
|
|
||||||
{'device_name': '/dev/sdb6',
|
|
||||||
'delete_on_termination': False,
|
|
||||||
'snapshot_id': '10',
|
|
||||||
'volume_id': '11',
|
|
||||||
'volume_size': 1,
|
|
||||||
'no_device': False},
|
|
||||||
{'device_name': '/dev/sdb7',
|
|
||||||
'snapshot_id': None,
|
|
||||||
'volume_id': None,
|
|
||||||
'no_device': True},
|
|
||||||
{'device_name': '/dev/sdb8',
|
|
||||||
'snapshot_id': None,
|
|
||||||
'volume_id': None,
|
|
||||||
'virtual_name': 'swap',
|
|
||||||
'no_device': False},
|
|
||||||
{'device_name': '/dev/sdb9',
|
|
||||||
'snapshot_id': None,
|
|
||||||
'volume_id': None,
|
|
||||||
'virtual_name': 'ephemeral3',
|
|
||||||
'no_device': False}],
|
|
||||||
id_os_instance_2: [{'device_name': 'vda',
|
|
||||||
'delete_on_termination': False,
|
|
||||||
'snapshot_id': '1',
|
|
||||||
'volume_id': '21',
|
|
||||||
'no_device': False}]}))
|
|
||||||
|
|
||||||
db_volumes_1 = {'2': {'id': 'vol-00000002'},
|
db_volumes_1 = {'2': {'id': 'vol-00000002'},
|
||||||
'3': {'id': 'vol-00000003'},
|
'5': {'id': 'vol-00000005'}}
|
||||||
'5': {'id': 'vol-00000005'},
|
|
||||||
'7': {'id': 'vol-00000007'},
|
|
||||||
'9': {'id': 'vol-00000009'},
|
|
||||||
'11': {'id': 'vol-0000000b'}}
|
|
||||||
|
|
||||||
fake_context = mock.Mock(service_catalog=[{'type': 'fake'}])
|
fake_context = mock.Mock(service_catalog=[{'type': 'fake'}])
|
||||||
|
|
||||||
result = {}
|
result = {}
|
||||||
instance_api._cloud_format_instance_bdm(
|
instance_api._cloud_format_instance_bdm(
|
||||||
fake_context, id_os_instance_1, '/dev/sdb1', result, db_volumes_1)
|
fake_context, os_instance_1, result, db_volumes_1)
|
||||||
self.assertThat(
|
self.assertThat(
|
||||||
result,
|
result,
|
||||||
matchers.DictMatches({
|
matchers.DictMatches({
|
||||||
@ -1668,39 +1621,19 @@ class InstancePrivateTestCase(test_base.BaseTestCase):
|
|||||||
'deleteOnTermination': False,
|
'deleteOnTermination': False,
|
||||||
'volumeId': 'vol-00000002',
|
'volumeId': 'vol-00000002',
|
||||||
}},
|
}},
|
||||||
{'deviceName': '/dev/sdb2',
|
|
||||||
'ebs': {'status': 'attached',
|
|
||||||
'deleteOnTermination': False,
|
|
||||||
'volumeId': 'vol-00000003',
|
|
||||||
}},
|
|
||||||
{'deviceName': '/dev/sdb3',
|
{'deviceName': '/dev/sdb3',
|
||||||
'ebs': {'status': 'attached',
|
'ebs': {'status': 'attached',
|
||||||
'deleteOnTermination': True,
|
'deleteOnTermination': True,
|
||||||
'volumeId': 'vol-00000005',
|
'volumeId': 'vol-00000005',
|
||||||
}},
|
|
||||||
{'deviceName': '/dev/sdb4',
|
|
||||||
'ebs': {'status': 'attached',
|
|
||||||
'deleteOnTermination': False,
|
|
||||||
'volumeId': 'vol-00000007',
|
|
||||||
}},
|
|
||||||
{'deviceName': '/dev/sdb5',
|
|
||||||
'ebs': {'status': 'attached',
|
|
||||||
'deleteOnTermination': False,
|
|
||||||
'volumeId': 'vol-00000009',
|
|
||||||
}},
|
|
||||||
{'deviceName': '/dev/sdb6',
|
|
||||||
'ebs': {'status': 'attached',
|
|
||||||
'deleteOnTermination': False,
|
|
||||||
'volumeId': 'vol-0000000b',
|
|
||||||
}}]},
|
}}]},
|
||||||
orderless_lists=True))
|
orderless_lists=True), verbose=True)
|
||||||
|
|
||||||
result = {}
|
result = {}
|
||||||
with mock.patch('ec2api.db.api.IMPL') as db_api:
|
with mock.patch('ec2api.db.api.IMPL') as db_api:
|
||||||
db_api.get_items.return_value = [{'id': 'vol-00000015',
|
db_api.get_items.return_value = [{'id': 'vol-00000015',
|
||||||
'os_id': '21'}]
|
'os_id': '21'}]
|
||||||
instance_api._cloud_format_instance_bdm(
|
instance_api._cloud_format_instance_bdm(
|
||||||
fake_context, id_os_instance_2, '/dev/sdc1', result)
|
fake_context, os_instance_2, result)
|
||||||
self.assertThat(
|
self.assertThat(
|
||||||
result,
|
result,
|
||||||
matchers.DictMatches({
|
matchers.DictMatches({
|
||||||
@ -1712,24 +1645,25 @@ class InstancePrivateTestCase(test_base.BaseTestCase):
|
|||||||
'volumeId': 'vol-00000015',
|
'volumeId': 'vol-00000015',
|
||||||
}}]}))
|
}}]}))
|
||||||
|
|
||||||
@mock.patch('cinderclient.v1.client.Client')
|
@mock.patch('cinderclient.client.Client')
|
||||||
@mock.patch('ec2api.api.instance.novadb')
|
def test_format_instance_bdm_while_attaching_volume(self, cinder):
|
||||||
def test_format_instance_bdm_while_attaching_volume(self, novadb, cinder):
|
|
||||||
cinder = cinder.return_value
|
|
||||||
cinder.volumes.get.return_value = (
|
|
||||||
mock.Mock(status='attaching'))
|
|
||||||
id_os_instance = fakes.random_os_id()
|
id_os_instance = fakes.random_os_id()
|
||||||
novadb.block_device_mapping_get_all_by_instance.return_value = (
|
cinder = cinder.return_value
|
||||||
[{'device_name': '/dev/sdb1',
|
cinder.volumes.list.return_value = [
|
||||||
'delete_on_termination': False,
|
fakes.OSVolume({'id': '2',
|
||||||
'snapshot_id': '1',
|
'status': 'attaching',
|
||||||
'volume_id': '2',
|
'attachments': [{'device': '/dev/sdb1',
|
||||||
'no_device': False}])
|
'server_id': id_os_instance}]})]
|
||||||
|
os_instance = fakes.OSInstance_full({
|
||||||
|
'id': id_os_instance,
|
||||||
|
'volumes_attached': [{'id': '2',
|
||||||
|
'delete_on_termination': False}],
|
||||||
|
'root_device_name': '/dev/vda'})
|
||||||
fake_context = mock.Mock(service_catalog=[{'type': 'fake'}])
|
fake_context = mock.Mock(service_catalog=[{'type': 'fake'}])
|
||||||
|
|
||||||
result = {}
|
result = {}
|
||||||
instance_api._cloud_format_instance_bdm(
|
instance_api._cloud_format_instance_bdm(
|
||||||
fake_context, id_os_instance, '/dev/vda', result,
|
fake_context, os_instance, result,
|
||||||
{'2': {'id': 'vol-00000002'}})
|
{'2': {'id': 'vol-00000002'}})
|
||||||
self.assertThat(
|
self.assertThat(
|
||||||
result,
|
result,
|
||||||
@ -1742,36 +1676,65 @@ class InstancePrivateTestCase(test_base.BaseTestCase):
|
|||||||
'volumeId': 'vol-00000002',
|
'volumeId': 'vol-00000002',
|
||||||
}}]}))
|
}}]}))
|
||||||
|
|
||||||
|
def test_format_instance_bdm_no_bdm(self):
|
||||||
|
context = mock.Mock()
|
||||||
|
os_instance_id = fakes.random_os_id()
|
||||||
|
os_instance = fakes.OSInstance_full({'id': os_instance_id})
|
||||||
|
|
||||||
|
res = {}
|
||||||
|
setattr(os_instance, 'OS-EXT-SRV-ATTR:root_device_name', None)
|
||||||
|
instance_api._cloud_format_instance_bdm(
|
||||||
|
context, os_instance, res, {}, {os_instance_id: []})
|
||||||
|
self.assertEqual({}, res)
|
||||||
|
|
||||||
|
res = {}
|
||||||
|
setattr(os_instance, 'OS-EXT-SRV-ATTR:root_device_name', '')
|
||||||
|
instance_api._cloud_format_instance_bdm(
|
||||||
|
context, os_instance, res, {}, {os_instance_id: []})
|
||||||
|
self.assertEqual({}, res)
|
||||||
|
|
||||||
|
res = {}
|
||||||
|
setattr(os_instance, 'OS-EXT-SRV-ATTR:root_device_name', '/dev/vdd')
|
||||||
|
instance_api._cloud_format_instance_bdm(
|
||||||
|
context, os_instance, res, {}, {os_instance_id: []})
|
||||||
|
self.assertEqual({'rootDeviceType': 'instance-store'}, res)
|
||||||
|
|
||||||
@mock.patch('ec2api.api.instance._remove_instances')
|
@mock.patch('ec2api.api.instance._remove_instances')
|
||||||
@mock.patch('novaclient.v1_1.client.Client')
|
@mock.patch('novaclient.client.Client')
|
||||||
def test_get_os_instances_by_instances(self, nova, remove_instances):
|
def test_get_os_instances_by_instances(self, nova, remove_instances):
|
||||||
nova = nova.return_value
|
nova = nova.return_value
|
||||||
fake_context = mock.Mock(service_catalog=[{'type': 'fake'}])
|
fake_context = mock.Mock(service_catalog=[{'type': 'fake'}])
|
||||||
|
os_instance_1 = fakes.OSInstance(fakes.OS_INSTANCE_1)
|
||||||
|
os_instance_2 = fakes.OSInstance(fakes.OS_INSTANCE_2)
|
||||||
|
|
||||||
def do_check(exactly_flag):
|
def do_check(exactly_flag=None, specify_nova_client=False):
|
||||||
nova.servers.get.side_effect = [fakes.OS_INSTANCE_1,
|
nova.servers.get.side_effect = [os_instance_1,
|
||||||
nova_exception.NotFound(404),
|
nova_exception.NotFound(404),
|
||||||
fakes.OS_INSTANCE_2]
|
os_instance_2]
|
||||||
absent_instance = {'id': fakes.random_ec2_id('i'),
|
absent_instance = {'id': fakes.random_ec2_id('i'),
|
||||||
'os_id': fakes.random_os_id()}
|
'os_id': fakes.random_os_id()}
|
||||||
|
|
||||||
params = (fake_context, [fakes.DB_INSTANCE_1, absent_instance,
|
params = (fake_context, [fakes.DB_INSTANCE_1, absent_instance,
|
||||||
fakes.DB_INSTANCE_2],
|
fakes.DB_INSTANCE_2],
|
||||||
exactly_flag)
|
exactly_flag, nova if specify_nova_client else False)
|
||||||
if exactly_flag:
|
if exactly_flag:
|
||||||
self.assertRaises(exception.InvalidInstanceIDNotFound,
|
self.assertRaises(exception.InvalidInstanceIDNotFound,
|
||||||
instance_api._get_os_instances_by_instances,
|
instance_api._get_os_instances_by_instances,
|
||||||
*params)
|
*params)
|
||||||
else:
|
else:
|
||||||
res = instance_api._get_os_instances_by_instances(*params)
|
res = instance_api._get_os_instances_by_instances(*params)
|
||||||
self.assertEqual([fakes.OS_INSTANCE_1, fakes.OS_INSTANCE_2],
|
self.assertEqual([os_instance_1, os_instance_2],
|
||||||
res)
|
res)
|
||||||
remove_instances.assert_called_once_with(fake_context,
|
remove_instances.assert_called_once_with(fake_context,
|
||||||
[absent_instance])
|
[absent_instance])
|
||||||
remove_instances.reset_mock()
|
remove_instances.reset_mock()
|
||||||
|
|
||||||
do_check(True)
|
do_check(exactly_flag=True)
|
||||||
do_check(False)
|
# NOTE(ft): stop to return fake data by the mocked client and create
|
||||||
|
# a new one to pass it into the function
|
||||||
|
nova.servers.side_effect = None
|
||||||
|
nova = mock.Mock()
|
||||||
|
do_check(specify_nova_client=True)
|
||||||
|
|
||||||
@mock.patch('ec2api.api.network_interface._detach_network_interface_item')
|
@mock.patch('ec2api.api.network_interface._detach_network_interface_item')
|
||||||
@mock.patch('ec2api.api.address._disassociate_address_item')
|
@mock.patch('ec2api.api.address._disassociate_address_item')
|
||||||
@ -1830,60 +1793,120 @@ class InstancePrivateTestCase(test_base.BaseTestCase):
|
|||||||
network_interfaces_of_removed_instances)
|
network_interfaces_of_removed_instances)
|
||||||
check_calls()
|
check_calls()
|
||||||
|
|
||||||
@mock.patch('ec2api.api.instance.novadb')
|
@mock.patch('cinderclient.client.Client')
|
||||||
def test_is_ebs_instance(self, novadb):
|
def test_get_os_volumes(self, cinder):
|
||||||
context = mock.Mock(service_catalog=[{'type': 'fake'}])
|
cinder = cinder.return_value
|
||||||
os_instance = fakes.OSInstance(fakes.random_os_id())
|
context = mock.Mock(service_catalog=[{'type': 'fake'}],
|
||||||
|
is_os_admin=False)
|
||||||
|
os_volume_ids = [fakes.random_os_id() for _i in range(5)]
|
||||||
|
os_instance_ids = [fakes.random_os_id() for _i in range(2)]
|
||||||
|
os_volumes = [
|
||||||
|
fakes.OSVolume(
|
||||||
|
{'id': os_volume_ids[0],
|
||||||
|
'status': 'attached',
|
||||||
|
'attachments': [{'server_id': os_instance_ids[0]}]}),
|
||||||
|
fakes.OSVolume(
|
||||||
|
{'id': os_volume_ids[1],
|
||||||
|
'status': 'attaching',
|
||||||
|
'attachments': []}),
|
||||||
|
fakes.OSVolume(
|
||||||
|
{'id': os_volume_ids[2],
|
||||||
|
'status': 'detaching',
|
||||||
|
'attachments': [{'server_id': os_instance_ids[0]}]}),
|
||||||
|
fakes.OSVolume(
|
||||||
|
{'id': os_volume_ids[3],
|
||||||
|
'status': 'attached',
|
||||||
|
'attachments': [{'server_id': os_instance_ids[1]}]}),
|
||||||
|
fakes.OSVolume(
|
||||||
|
{'id': os_volume_ids[4],
|
||||||
|
'status': 'available',
|
||||||
|
'attachments': []}),
|
||||||
|
]
|
||||||
|
cinder.volumes.list.return_value = os_volumes
|
||||||
|
res = instance_api._get_os_volumes(context)
|
||||||
|
self.assertIn(os_instance_ids[0], res)
|
||||||
|
self.assertIn(os_instance_ids[1], res)
|
||||||
|
self.assertEqual([os_volumes[0], os_volumes[2]],
|
||||||
|
res[os_instance_ids[0]])
|
||||||
|
self.assertEqual([os_volumes[3]], res[os_instance_ids[1]])
|
||||||
|
cinder.volumes.list.assert_called_once_with(search_opts=None)
|
||||||
|
|
||||||
novadb.instance_get_by_uuid.return_value = {}
|
context.is_os_admin = True
|
||||||
novadb.block_device_mapping_get_all_by_instance.return_value = []
|
instance_api._get_os_volumes(context)
|
||||||
self.assertFalse(instance_api._is_ebs_instance(context, os_instance))
|
cinder.volumes.list.assert_called_with(
|
||||||
|
search_opts={'all_tenants': True,
|
||||||
|
'project_id': context.project_id})
|
||||||
|
|
||||||
novadb.instance_get_by_uuid.return_value = {
|
@mock.patch('ec2api.api.clients.nova', wraps=ec2api.api.clients.nova)
|
||||||
'root_device_name': '/dev/vda'}
|
@mock.patch('ec2api.context.get_os_admin_context')
|
||||||
self.assertFalse(instance_api._is_ebs_instance(context, os_instance))
|
@mock.patch('cinderclient.client.Client')
|
||||||
|
@mock.patch('novaclient.client.Client')
|
||||||
|
def test_is_ebs_instance(self, nova, cinder, get_os_admin_context,
|
||||||
|
nova_client_getter):
|
||||||
|
nova = nova.return_value
|
||||||
|
cinder = cinder.return_value
|
||||||
|
context = mock.Mock(service_catalog=[{'type': 'fake'}],
|
||||||
|
is_os_admin=False)
|
||||||
|
os_instance = fakes.OSInstance_full({'id': fakes.random_os_id()})
|
||||||
|
|
||||||
novadb.block_device_mapping_get_all_by_instance.return_value = [
|
nova.servers.get.return_value = os_instance
|
||||||
{'device_name': '/dev/vda',
|
cinder.volumes.list.return_value = []
|
||||||
'volume_id': None,
|
self.assertFalse(instance_api._is_ebs_instance(context,
|
||||||
'snapshot_id': None,
|
os_instance.id))
|
||||||
'no_device': True}]
|
|
||||||
self.assertFalse(instance_api._is_ebs_instance(context, os_instance))
|
|
||||||
|
|
||||||
novadb.block_device_mapping_get_all_by_instance.return_value = [
|
cinder.volumes.list.return_value = [
|
||||||
{'device_name': '/dev/vda',
|
fakes.OSVolume(
|
||||||
'volume_id': fakes.random_ec2_id('vol'),
|
{'id': fakes.random_os_id(),
|
||||||
'snapshot_id': None,
|
'status': 'attached',
|
||||||
'no_device': True}]
|
'attachments': [{'device': '/dev/vda',
|
||||||
self.assertFalse(instance_api._is_ebs_instance(context, os_instance))
|
'server_id': os_instance.id}]})]
|
||||||
|
setattr(os_instance, 'OS-EXT-SRV-ATTR:root_device_name', '')
|
||||||
|
self.assertFalse(instance_api._is_ebs_instance(context,
|
||||||
|
os_instance.id))
|
||||||
|
|
||||||
novadb.block_device_mapping_get_all_by_instance.return_value = [
|
setattr(os_instance, 'OS-EXT-SRV-ATTR:root_device_name', '/dev/vda')
|
||||||
{'device_name': '/dev/vda',
|
cinder.volumes.list.return_value = []
|
||||||
'volume_id': '',
|
self.assertFalse(instance_api._is_ebs_instance(context,
|
||||||
'snapshot_id': '',
|
os_instance.id))
|
||||||
'no_device': False}]
|
|
||||||
self.assertFalse(instance_api._is_ebs_instance(context, os_instance))
|
|
||||||
|
|
||||||
novadb.block_device_mapping_get_all_by_instance.return_value = [
|
cinder.volumes.list.return_value = [
|
||||||
{'device_name': '/dev/vdb',
|
fakes.OSVolume(
|
||||||
'volume_id': fakes.random_ec2_id('vol'),
|
{'id': fakes.random_os_id(),
|
||||||
'snapshot_id': '',
|
'status': 'attached',
|
||||||
'no_device': False}]
|
'attachments': [{'device': '/dev/vda',
|
||||||
self.assertFalse(instance_api._is_ebs_instance(context, os_instance))
|
'server_id': fakes.random_os_id()}]})]
|
||||||
|
self.assertFalse(instance_api._is_ebs_instance(context,
|
||||||
|
os_instance.id))
|
||||||
|
|
||||||
novadb.block_device_mapping_get_all_by_instance.return_value = [
|
cinder.volumes.list.return_value = [
|
||||||
{'device_name': '/dev/vda',
|
fakes.OSVolume(
|
||||||
'volume_id': fakes.random_ec2_id('vol'),
|
{'id': fakes.random_os_id(),
|
||||||
'snapshot_id': '',
|
'status': 'attached',
|
||||||
'no_device': False}]
|
'attachments': [{'device': '/dev/vdb',
|
||||||
self.assertTrue(instance_api._is_ebs_instance(context, os_instance))
|
'server_id': os_instance.id}]})]
|
||||||
|
self.assertFalse(instance_api._is_ebs_instance(context,
|
||||||
|
os_instance.id))
|
||||||
|
|
||||||
novadb.block_device_mapping_get_all_by_instance.return_value = [
|
cinder.volumes.list.return_value = [
|
||||||
{'device_name': 'vda',
|
fakes.OSVolume(
|
||||||
'volume_id': fakes.random_ec2_id('vol'),
|
{'id': fakes.random_os_id(),
|
||||||
'snapshot_id': '',
|
'status': 'attached',
|
||||||
'no_device': False}]
|
'attachments': [{'device': '/dev/vda',
|
||||||
self.assertTrue(instance_api._is_ebs_instance(context, os_instance))
|
'server_id': os_instance.id}]})]
|
||||||
|
self.assertTrue(instance_api._is_ebs_instance(context,
|
||||||
|
os_instance.id))
|
||||||
|
nova_client_getter.assert_called_with(
|
||||||
|
get_os_admin_context.return_value)
|
||||||
|
cinder.volumes.list.assert_called_with(search_opts=None)
|
||||||
|
|
||||||
|
cinder.volumes.list.return_value = [
|
||||||
|
fakes.OSVolume(
|
||||||
|
{'id': fakes.random_os_id(),
|
||||||
|
'status': 'attached',
|
||||||
|
'attachments': [{'device': 'vda',
|
||||||
|
'server_id': os_instance.id}]})]
|
||||||
|
self.assertTrue(instance_api._is_ebs_instance(context,
|
||||||
|
os_instance.id))
|
||||||
|
|
||||||
def test_block_device_strip_dev(self):
|
def test_block_device_strip_dev(self):
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
|
@ -333,11 +333,10 @@ class ProxyTestCase(test_base.BaseTestCase):
|
|||||||
self.assertEqual(1, constant_time_compare.call_count)
|
self.assertEqual(1, constant_time_compare.call_count)
|
||||||
|
|
||||||
@mock.patch('keystoneclient.v2_0.client.Client')
|
@mock.patch('keystoneclient.v2_0.client.Client')
|
||||||
@mock.patch('novaclient.v1_1.client.Client')
|
@mock.patch('novaclient.client.Client')
|
||||||
@mock.patch('ec2api.db.api.IMPL')
|
@mock.patch('ec2api.db.api.IMPL')
|
||||||
@mock.patch('ec2api.metadata.api.instance_api')
|
@mock.patch('ec2api.metadata.api.instance_api')
|
||||||
@mock.patch('ec2api.metadata.api.novadb')
|
def test_get_metadata(self, instance_api, db_api, nova, keystone):
|
||||||
def test_get_metadata(self, novadb, instance_api, db_api, nova, keystone):
|
|
||||||
service_catalog = mock.MagicMock()
|
service_catalog = mock.MagicMock()
|
||||||
service_catalog.get_data.return_value = []
|
service_catalog.get_data.return_value = []
|
||||||
keystone.return_value = mock.Mock(auth_user_id='fake_user_id',
|
keystone.return_value = mock.Mock(auth_user_id='fake_user_id',
|
||||||
@ -346,7 +345,11 @@ class ProxyTestCase(test_base.BaseTestCase):
|
|||||||
service_catalog=service_catalog)
|
service_catalog=service_catalog)
|
||||||
nova.return_value.fixed_ips.get.return_value = (
|
nova.return_value.fixed_ips.get.return_value = (
|
||||||
mock.Mock(hostname='fake_name'))
|
mock.Mock(hostname='fake_name'))
|
||||||
nova.return_value.servers.list.return_value = [fakes.OS_INSTANCE_1]
|
nova.return_value.servers.list.return_value = [
|
||||||
|
fakes.OSInstance(fakes.OS_INSTANCE_1)]
|
||||||
|
keypair = mock.Mock(public_key=fakes.PUBLIC_KEY_KEY_PAIR)
|
||||||
|
keypair.configure_mock(name=fakes.NAME_KEY_PAIR)
|
||||||
|
nova.return_value.keypairs.get.return_value = keypair
|
||||||
db_api.get_item_ids.return_value = [
|
db_api.get_item_ids.return_value = [
|
||||||
(fakes.ID_EC2_INSTANCE_1, fakes.ID_OS_INSTANCE_1)]
|
(fakes.ID_EC2_INSTANCE_1, fakes.ID_OS_INSTANCE_1)]
|
||||||
instance_api.describe_instances.return_value = {
|
instance_api.describe_instances.return_value = {
|
||||||
@ -354,9 +357,6 @@ class ProxyTestCase(test_base.BaseTestCase):
|
|||||||
instance_api.describe_instance_attribute.return_value = {
|
instance_api.describe_instance_attribute.return_value = {
|
||||||
'instanceId': fakes.ID_EC2_INSTANCE_1,
|
'instanceId': fakes.ID_EC2_INSTANCE_1,
|
||||||
'userData': {'value': 'fake_user_data'}}
|
'userData': {'value': 'fake_user_data'}}
|
||||||
novadb.instance_get_by_uuid.return_value = fakes.NOVADB_INSTANCE_1
|
|
||||||
novadb.block_device_mapping_get_all_by_instance.return_value = []
|
|
||||||
novadb.instance_get_by_uuid.return_value = fakes.NOVADB_INSTANCE_1
|
|
||||||
|
|
||||||
def _test_metadata_path(relpath):
|
def _test_metadata_path(relpath):
|
||||||
# recursively confirm a http 200 from all meta-data elements
|
# recursively confirm a http 200 from all meta-data elements
|
||||||
@ -364,6 +364,7 @@ class ProxyTestCase(test_base.BaseTestCase):
|
|||||||
request = webob.Request.blank(
|
request = webob.Request.blank(
|
||||||
relpath, remote_addr=fakes.IP_NETWORK_INTERFACE_2)
|
relpath, remote_addr=fakes.IP_NETWORK_INTERFACE_2)
|
||||||
response = request.get_response(self.handler)
|
response = request.get_response(self.handler)
|
||||||
|
self.assertEqual(200, response.status_int)
|
||||||
for item in response.body.split('\n'):
|
for item in response.body.split('\n'):
|
||||||
if 'public-keys' in relpath:
|
if 'public-keys' in relpath:
|
||||||
# meta-data/public-keys/0=keyname refers to
|
# meta-data/public-keys/0=keyname refers to
|
||||||
|
@ -30,10 +30,6 @@ class MetadataApiTestCase(base.ApiTestCase):
|
|||||||
def setUp(self):
|
def setUp(self):
|
||||||
super(MetadataApiTestCase, self).setUp()
|
super(MetadataApiTestCase, self).setUp()
|
||||||
|
|
||||||
novadb_patcher = mock.patch('ec2api.metadata.api.novadb')
|
|
||||||
self.novadb = novadb_patcher.start()
|
|
||||||
self.addCleanup(novadb_patcher.stop)
|
|
||||||
|
|
||||||
instance_api_patcher = mock.patch('ec2api.metadata.api.instance_api')
|
instance_api_patcher = mock.patch('ec2api.metadata.api.instance_api')
|
||||||
self.instance_api = instance_api_patcher.start()
|
self.instance_api = instance_api_patcher.start()
|
||||||
self.addCleanup(instance_api_patcher.stop)
|
self.addCleanup(instance_api_patcher.stop)
|
||||||
@ -44,9 +40,6 @@ class MetadataApiTestCase(base.ApiTestCase):
|
|||||||
self.instance_api.describe_instance_attribute.return_value = {
|
self.instance_api.describe_instance_attribute.return_value = {
|
||||||
'instanceId': fakes.ID_EC2_INSTANCE_1,
|
'instanceId': fakes.ID_EC2_INSTANCE_1,
|
||||||
'userData': {'value': 'fake_user_data'}}
|
'userData': {'value': 'fake_user_data'}}
|
||||||
self.novadb.instance_get_by_uuid.return_value = fakes.NOVADB_INSTANCE_1
|
|
||||||
self.novadb.block_device_mapping_get_all_by_instance.return_value = (
|
|
||||||
fakes.NOVADB_BDM_INSTANCE_1)
|
|
||||||
|
|
||||||
self.fake_context = self._create_context()
|
self.fake_context = self._create_context()
|
||||||
|
|
||||||
@ -55,8 +48,9 @@ class MetadataApiTestCase(base.ApiTestCase):
|
|||||||
self.assertEqual('\n'.join(api.VERSIONS + ['latest']), retval)
|
self.assertEqual('\n'.join(api.VERSIONS + ['latest']), retval)
|
||||||
|
|
||||||
def test_get_instance_and_project_id(self):
|
def test_get_instance_and_project_id(self):
|
||||||
self.nova.servers.list.return_value = [fakes.OS_INSTANCE_1,
|
self.nova.servers.list.return_value = [
|
||||||
fakes.OS_INSTANCE_2]
|
fakes.OSInstance(fakes.OS_INSTANCE_1),
|
||||||
|
fakes.OSInstance(fakes.OS_INSTANCE_2)]
|
||||||
self.nova.fixed_ips.get.return_value = mock.Mock(hostname='fake_name')
|
self.nova.fixed_ips.get.return_value = mock.Mock(hostname='fake_name')
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
(fakes.ID_OS_INSTANCE_1, fakes.ID_OS_PROJECT),
|
(fakes.ID_OS_INSTANCE_1, fakes.ID_OS_PROJECT),
|
||||||
@ -74,12 +68,14 @@ class MetadataApiTestCase(base.ApiTestCase):
|
|||||||
self.fake_context,
|
self.fake_context,
|
||||||
fakes.IP_NETWORK_INTERFACE_2)
|
fakes.IP_NETWORK_INTERFACE_2)
|
||||||
|
|
||||||
self.nova.servers.list.return_value = [fakes.OS_INSTANCE_2]
|
self.nova.servers.list.return_value = [
|
||||||
|
fakes.OSInstance(fakes.OS_INSTANCE_2)]
|
||||||
check_raise()
|
check_raise()
|
||||||
|
|
||||||
self.nova.fixed_ips.get.side_effect = nova_exception.NotFound('fake')
|
self.nova.fixed_ips.get.side_effect = nova_exception.NotFound('fake')
|
||||||
self.nova.servers.list.return_value = [fakes.OS_INSTANCE_1,
|
self.nova.servers.list.return_value = [
|
||||||
fakes.OS_INSTANCE_2]
|
fakes.OSInstance(fakes.OS_INSTANCE_1),
|
||||||
|
fakes.OSInstance(fakes.OS_INSTANCE_2)]
|
||||||
check_raise()
|
check_raise()
|
||||||
|
|
||||||
def test_get_version_root(self):
|
def test_get_version_root(self):
|
||||||
@ -99,10 +95,6 @@ class MetadataApiTestCase(base.ApiTestCase):
|
|||||||
self.fake_context, [fakes.ID_EC2_INSTANCE_1])
|
self.fake_context, [fakes.ID_EC2_INSTANCE_1])
|
||||||
self.instance_api.describe_instance_attribute.assert_called_with(
|
self.instance_api.describe_instance_attribute.assert_called_with(
|
||||||
self.fake_context, fakes.ID_EC2_INSTANCE_1, 'userData')
|
self.fake_context, fakes.ID_EC2_INSTANCE_1, 'userData')
|
||||||
self.novadb.instance_get_by_uuid.assert_called_with(
|
|
||||||
self.fake_context, fakes.ID_OS_INSTANCE_1)
|
|
||||||
(self.novadb.block_device_mapping_get_all_by_instance.
|
|
||||||
assert_called_with(self.fake_context, fakes.ID_OS_INSTANCE_1))
|
|
||||||
|
|
||||||
def test_invalid_path(self):
|
def test_invalid_path(self):
|
||||||
self.assertRaises(exception.EC2MetadataNotFound,
|
self.assertRaises(exception.EC2MetadataNotFound,
|
||||||
@ -174,7 +166,11 @@ class MetadataApiTestCase(base.ApiTestCase):
|
|||||||
fakes.ID_OS_INSTANCE_2, fakes.IP_NETWORK_INTERFACE_1)
|
fakes.ID_OS_INSTANCE_2, fakes.IP_NETWORK_INTERFACE_1)
|
||||||
self.assertEqual(fakes.IP_NETWORK_INTERFACE_1, retval)
|
self.assertEqual(fakes.IP_NETWORK_INTERFACE_1, retval)
|
||||||
|
|
||||||
def test_pubkey(self):
|
@mock.patch('novaclient.client.Client')
|
||||||
|
def test_pubkey(self, nova):
|
||||||
|
keypair = mock.Mock(public_key=fakes.PUBLIC_KEY_KEY_PAIR)
|
||||||
|
keypair.configure_mock(name=fakes.NAME_KEY_PAIR)
|
||||||
|
nova.return_value.keypairs.get.return_value = keypair
|
||||||
retval = api.get_metadata_item(
|
retval = api.get_metadata_item(
|
||||||
self.fake_context,
|
self.fake_context,
|
||||||
['2009-04-04', 'meta-data', 'public-keys'],
|
['2009-04-04', 'meta-data', 'public-keys'],
|
||||||
@ -225,8 +221,6 @@ class MetadataApiTestCase(base.ApiTestCase):
|
|||||||
self.instance_api._block_device_strip_dev.assert_called_with(
|
self.instance_api._block_device_strip_dev.assert_called_with(
|
||||||
fakes.EC2_INSTANCE_1['rootDeviceName'])
|
fakes.EC2_INSTANCE_1['rootDeviceName'])
|
||||||
|
|
||||||
self.novadb.block_device_mapping_get_all_by_instance.return_value = (
|
|
||||||
fakes.NOVADB_BDM_INSTANCE_2)
|
|
||||||
self.instance_api._block_device_strip_dev.return_value = 'sdb1'
|
self.instance_api._block_device_strip_dev.return_value = 'sdb1'
|
||||||
retval = api._build_block_device_mappings(
|
retval = api._build_block_device_mappings(
|
||||||
'fake_context', fakes.EC2_INSTANCE_2, fakes.ID_OS_INSTANCE_2)
|
'fake_context', fakes.EC2_INSTANCE_2, fakes.ID_OS_INSTANCE_2)
|
||||||
@ -235,7 +229,5 @@ class MetadataApiTestCase(base.ApiTestCase):
|
|||||||
expected.update(fakes.EC2_BDM_METADATA_INSTANCE_2)
|
expected.update(fakes.EC2_BDM_METADATA_INSTANCE_2)
|
||||||
self.assertThat(retval,
|
self.assertThat(retval,
|
||||||
matchers.DictMatches(expected))
|
matchers.DictMatches(expected))
|
||||||
(self.novadb.block_device_mapping_get_all_by_instance.
|
|
||||||
assert_called_with('fake_context', fakes.ID_OS_INSTANCE_2))
|
|
||||||
self.instance_api._block_device_strip_dev.assert_called_with(
|
self.instance_api._block_device_strip_dev.assert_called_with(
|
||||||
fakes.EC2_INSTANCE_2['rootDeviceName'])
|
fakes.EC2_INSTANCE_2['rootDeviceName'])
|
||||||
|
@ -104,7 +104,7 @@ class SnapshotTestCase(base.ApiTestCase):
|
|||||||
self.set_mock_db_items(fakes.DB_VOLUME_2)
|
self.set_mock_db_items(fakes.DB_VOLUME_2)
|
||||||
self.cinder.volumes.get.side_effect = (
|
self.cinder.volumes.get.side_effect = (
|
||||||
lambda vol_id: (
|
lambda vol_id: (
|
||||||
fakes.CinderVolume(fakes.OS_VOLUME_2)
|
fakes.OSVolume(fakes.OS_VOLUME_2)
|
||||||
if vol_id == fakes.ID_OS_VOLUME_2
|
if vol_id == fakes.ID_OS_VOLUME_2
|
||||||
else None))
|
else None))
|
||||||
|
|
||||||
|
@ -24,9 +24,9 @@ class VolumeTestCase(base.ApiTestCase):
|
|||||||
|
|
||||||
def test_describe_volumes(self):
|
def test_describe_volumes(self):
|
||||||
self.cinder.volumes.list.return_value = [
|
self.cinder.volumes.list.return_value = [
|
||||||
fakes.CinderVolume(fakes.OS_VOLUME_1),
|
fakes.OSVolume(fakes.OS_VOLUME_1),
|
||||||
fakes.CinderVolume(fakes.OS_VOLUME_2),
|
fakes.OSVolume(fakes.OS_VOLUME_2),
|
||||||
fakes.CinderVolume(fakes.OS_VOLUME_3)]
|
fakes.OSVolume(fakes.OS_VOLUME_3)]
|
||||||
|
|
||||||
self.set_mock_db_items(fakes.DB_VOLUME_1, fakes.DB_VOLUME_2,
|
self.set_mock_db_items(fakes.DB_VOLUME_1, fakes.DB_VOLUME_2,
|
||||||
fakes.DB_INSTANCE_1, fakes.DB_INSTANCE_2,
|
fakes.DB_INSTANCE_1, fakes.DB_INSTANCE_2,
|
||||||
@ -80,8 +80,8 @@ class VolumeTestCase(base.ApiTestCase):
|
|||||||
|
|
||||||
def test_describe_volumes_invalid_parameters(self):
|
def test_describe_volumes_invalid_parameters(self):
|
||||||
self.cinder.volumes.list.return_value = [
|
self.cinder.volumes.list.return_value = [
|
||||||
fakes.CinderVolume(fakes.OS_VOLUME_1),
|
fakes.OSVolume(fakes.OS_VOLUME_1),
|
||||||
fakes.CinderVolume(fakes.OS_VOLUME_2)]
|
fakes.OSVolume(fakes.OS_VOLUME_2)]
|
||||||
|
|
||||||
self.assert_execution_error(
|
self.assert_execution_error(
|
||||||
'InvalidVolume.NotFound', 'DescribeVolumes',
|
'InvalidVolume.NotFound', 'DescribeVolumes',
|
||||||
@ -95,7 +95,7 @@ class VolumeTestCase(base.ApiTestCase):
|
|||||||
|
|
||||||
def test_create_volume(self):
|
def test_create_volume(self):
|
||||||
self.cinder.volumes.create.return_value = (
|
self.cinder.volumes.create.return_value = (
|
||||||
fakes.CinderVolume(fakes.OS_VOLUME_1))
|
fakes.OSVolume(fakes.OS_VOLUME_1))
|
||||||
self.db_api.add_item.side_effect = (
|
self.db_api.add_item.side_effect = (
|
||||||
tools.get_db_api_add_item(fakes.ID_EC2_VOLUME_1))
|
tools.get_db_api_add_item(fakes.ID_EC2_VOLUME_1))
|
||||||
|
|
||||||
@ -113,7 +113,7 @@ class VolumeTestCase(base.ApiTestCase):
|
|||||||
|
|
||||||
def test_create_volume_from_snapshot(self):
|
def test_create_volume_from_snapshot(self):
|
||||||
self.cinder.volumes.create.return_value = (
|
self.cinder.volumes.create.return_value = (
|
||||||
fakes.CinderVolume(fakes.OS_VOLUME_3))
|
fakes.OSVolume(fakes.OS_VOLUME_3))
|
||||||
self.db_api.add_item.side_effect = (
|
self.db_api.add_item.side_effect = (
|
||||||
tools.get_db_api_add_item(fakes.ID_EC2_VOLUME_3))
|
tools.get_db_api_add_item(fakes.ID_EC2_VOLUME_3))
|
||||||
self.set_mock_db_items(fakes.DB_SNAPSHOT_1)
|
self.set_mock_db_items(fakes.DB_SNAPSHOT_1)
|
||||||
@ -141,7 +141,7 @@ class VolumeTestCase(base.ApiTestCase):
|
|||||||
self.assertFalse(self.db_api.delete_item.called)
|
self.assertFalse(self.db_api.delete_item.called)
|
||||||
|
|
||||||
def test_format_volume_maps_status(self):
|
def test_format_volume_maps_status(self):
|
||||||
fake_volume = fakes.CinderVolume(fakes.OS_VOLUME_1)
|
fake_volume = fakes.OSVolume(fakes.OS_VOLUME_1)
|
||||||
self.cinder.volumes.list.return_value = [fake_volume]
|
self.cinder.volumes.list.return_value = [fake_volume]
|
||||||
self.set_mock_db_items(fakes.DB_VOLUME_1)
|
self.set_mock_db_items(fakes.DB_VOLUME_1)
|
||||||
|
|
||||||
@ -163,7 +163,7 @@ class VolumeTestCase(base.ApiTestCase):
|
|||||||
|
|
||||||
def test_attach_volume(self):
|
def test_attach_volume(self):
|
||||||
self.set_mock_db_items(fakes.DB_INSTANCE_2, fakes.DB_VOLUME_3)
|
self.set_mock_db_items(fakes.DB_INSTANCE_2, fakes.DB_VOLUME_3)
|
||||||
os_volume = fakes.CinderVolume(fakes.OS_VOLUME_3)
|
os_volume = fakes.OSVolume(fakes.OS_VOLUME_3)
|
||||||
os_volume.attachments.append({'device': '/dev/vdf',
|
os_volume.attachments.append({'device': '/dev/vdf',
|
||||||
'server_id': fakes.ID_OS_INSTANCE_2})
|
'server_id': fakes.ID_OS_INSTANCE_2})
|
||||||
os_volume.status = 'attaching'
|
os_volume.status = 'attaching'
|
||||||
@ -181,11 +181,11 @@ class VolumeTestCase(base.ApiTestCase):
|
|||||||
self.nova.volumes.create_server_volume.assert_called_once_with(
|
self.nova.volumes.create_server_volume.assert_called_once_with(
|
||||||
fakes.ID_OS_INSTANCE_2, fakes.ID_OS_VOLUME_3, '/dev/vdf')
|
fakes.ID_OS_INSTANCE_2, fakes.ID_OS_VOLUME_3, '/dev/vdf')
|
||||||
|
|
||||||
@mock.patch.object(fakes.CinderVolume, 'get', autospec=True)
|
@mock.patch.object(fakes.OSVolume, 'get', autospec=True)
|
||||||
def test_detach_volume(self, os_volume_get):
|
def test_detach_volume(self, os_volume_get):
|
||||||
self.set_mock_db_items(fakes.DB_INSTANCE_1, fakes.DB_INSTANCE_2,
|
self.set_mock_db_items(fakes.DB_INSTANCE_1, fakes.DB_INSTANCE_2,
|
||||||
fakes.DB_VOLUME_2)
|
fakes.DB_VOLUME_2)
|
||||||
os_volume = fakes.CinderVolume(fakes.OS_VOLUME_2)
|
os_volume = fakes.OSVolume(fakes.OS_VOLUME_2)
|
||||||
self.cinder.volumes.get.return_value = os_volume
|
self.cinder.volumes.get.return_value = os_volume
|
||||||
os_volume_get.side_effect = (
|
os_volume_get.side_effect = (
|
||||||
lambda vol: setattr(vol, 'status', 'detaching'))
|
lambda vol: setattr(vol, 'status', 'detaching'))
|
||||||
@ -204,7 +204,7 @@ class VolumeTestCase(base.ApiTestCase):
|
|||||||
def test_detach_volume_invalid_parameters(self):
|
def test_detach_volume_invalid_parameters(self):
|
||||||
self.set_mock_db_items(fakes.DB_VOLUME_1)
|
self.set_mock_db_items(fakes.DB_VOLUME_1)
|
||||||
self.cinder.volumes.get.return_value = (
|
self.cinder.volumes.get.return_value = (
|
||||||
fakes.CinderVolume(fakes.OS_VOLUME_1))
|
fakes.OSVolume(fakes.OS_VOLUME_1))
|
||||||
|
|
||||||
self.assert_execution_error('IncorrectState', 'DetachVolume',
|
self.assert_execution_error('IncorrectState', 'DetachVolume',
|
||||||
{'VolumeId': fakes.ID_EC2_VOLUME_1})
|
{'VolumeId': fakes.ID_EC2_VOLUME_1})
|
||||||
|
@ -256,15 +256,6 @@
|
|||||||
#use_tpool=false
|
#use_tpool=false
|
||||||
|
|
||||||
|
|
||||||
#
|
|
||||||
# Options defined in ec2api.novadb.sqlalchemy.api
|
|
||||||
#
|
|
||||||
|
|
||||||
# The SQLAlchemy connection string used to connect to the nova
|
|
||||||
# database (string value)
|
|
||||||
#connection_nova=<None>
|
|
||||||
|
|
||||||
|
|
||||||
[keystone_authtoken]
|
[keystone_authtoken]
|
||||||
|
|
||||||
#
|
#
|
||||||
|
20
install.sh
20
install.sh
@ -202,25 +202,6 @@ function copynovaopt() {
|
|||||||
iniset $CONF_FILE DEFAULT $option_name $option
|
iniset $CONF_FILE DEFAULT $option_name $option
|
||||||
}
|
}
|
||||||
|
|
||||||
#get nova settings
|
|
||||||
if [[ -z "$NOVA_CONNECTION" ]]; then
|
|
||||||
if [[ ! -f "$NOVA_CONF" ]]; then
|
|
||||||
reason="$NOVA_CONF isn't found"
|
|
||||||
else
|
|
||||||
reason="Connection string isn't found in $NOVA_CONF"
|
|
||||||
NOVA_CONNECTION=$(iniget $NOVA_CONF database connection)
|
|
||||||
if [[ -z "$NOVA_CONNECTION" ]]; then
|
|
||||||
NOVA_CONNECTION=$(iniget $NOVA_CONF DEFAULT sql_connection)
|
|
||||||
fi
|
|
||||||
if [[ -z "$NOVA_CONNECTION" ]]; then
|
|
||||||
NOVA_CONNECTION=$(iniget $NOVA_CONF DATABASE sql_connection)
|
|
||||||
fi
|
|
||||||
if [[ -z "$NOVA_CONNECTION" ]]; then
|
|
||||||
NOVA_CONNECTION=$(iniget $NOVA_CONF sql connection)
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
die_if_not_set $LINENO NOVA_CONNECTION "$reason. Please set NOVA_CONNECTION environment variable to the connection string to Nova DB"
|
|
||||||
fi
|
|
||||||
if [[ -n $(keystone catalog --service network) ]]; then
|
if [[ -n $(keystone catalog --service network) ]]; then
|
||||||
VPC_SUPPORT="True"
|
VPC_SUPPORT="True"
|
||||||
else
|
else
|
||||||
@ -284,7 +265,6 @@ iniset $CONF_FILE DEFAULT logging_context_format_string "%(asctime)s.%(msecs)03d
|
|||||||
iniset $CONF_FILE DEFAULT verbose True
|
iniset $CONF_FILE DEFAULT verbose True
|
||||||
iniset $CONF_FILE DEFAULT keystone_url "$OS_AUTH_URL"
|
iniset $CONF_FILE DEFAULT keystone_url "$OS_AUTH_URL"
|
||||||
iniset $CONF_FILE database connection "$CONNECTION"
|
iniset $CONF_FILE database connection "$CONNECTION"
|
||||||
iniset $CONF_FILE database connection_nova "$NOVA_CONNECTION"
|
|
||||||
iniset $CONF_FILE DEFAULT full_vpc_support "$VPC_SUPPORT"
|
iniset $CONF_FILE DEFAULT full_vpc_support "$VPC_SUPPORT"
|
||||||
iniset $CONF_FILE DEFAULT external_network "$EXTERNAL_NETWORK"
|
iniset $CONF_FILE DEFAULT external_network "$EXTERNAL_NETWORK"
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user