Simplify usage of ec2utils.get_db_item
At the moment we cann't completely get rid of validation of a kind of an object ID, because common parameter validator doesn't validate complex parameters. So this validation is still used in several cases. Change-Id: Ibbe73a74306f2359f6554fcc8ef7bc31d356c167
This commit is contained in:
parent
71ef91bdc2
commit
2de2b74152
@ -241,7 +241,7 @@ class AddressEngineNeutron(object):
|
||||
return AddressEngineNova().release_address(context,
|
||||
public_ip, None)
|
||||
|
||||
address = ec2utils.get_db_item(context, 'eipalloc', allocation_id)
|
||||
address = ec2utils.get_db_item(context, allocation_id)
|
||||
if not _is_address_valid(context, neutron, address):
|
||||
raise exception.InvalidAllocationIDNotFound(
|
||||
id=allocation_id)
|
||||
@ -295,7 +295,7 @@ class AddressEngineNeutron(object):
|
||||
if instance_id:
|
||||
if not instance_network_interfaces:
|
||||
# NOTE(ft): check the instance exists
|
||||
ec2utils.get_db_item(context, 'i', instance_id)
|
||||
ec2utils.get_db_item(context, instance_id)
|
||||
msg = _('You must specify an IP address when mapping '
|
||||
'to a non-VPC instance')
|
||||
raise exception.InvalidParameterCombination(msg)
|
||||
@ -303,12 +303,12 @@ class AddressEngineNeutron(object):
|
||||
raise exception.InvalidInstanceId(instance_id=instance_id)
|
||||
network_interface = instance_network_interfaces[0]
|
||||
else:
|
||||
network_interface = ec2utils.get_db_item(context, 'eni',
|
||||
network_interface = ec2utils.get_db_item(context,
|
||||
network_interface_id)
|
||||
if not private_ip_address:
|
||||
private_ip_address = network_interface['private_ip_address']
|
||||
|
||||
address = ec2utils.get_db_item(context, 'eipalloc', allocation_id)
|
||||
address = ec2utils.get_db_item(context, allocation_id)
|
||||
if not _is_address_valid(context, neutron, address):
|
||||
raise exception.InvalidAllocationIDNotFound(
|
||||
id=allocation_id)
|
||||
@ -414,8 +414,7 @@ class AddressEngineNova(object):
|
||||
def associate_address(self, context, public_ip=None, instance_id=None,
|
||||
allocation_id=None, network_interface_id=None,
|
||||
private_ip_address=None, allow_reassociation=False):
|
||||
os_instance_id = ec2utils.get_db_item(context, 'i',
|
||||
instance_id)['os_id']
|
||||
os_instance_id = ec2utils.get_db_item(context, instance_id)['os_id']
|
||||
# NOTE(ft): check the public IP exists to raise AWS exception otherwise
|
||||
self.get_nova_ip_by_public_ip(context, public_ip)
|
||||
nova = clients.nova(context)
|
||||
|
@ -425,7 +425,7 @@ class TaggableItemsDescriber(UniversalDescriber):
|
||||
return True
|
||||
return False
|
||||
return super(TaggableItemsDescriber,
|
||||
self).is_filtering_value_found(filter_value, value)
|
||||
self).is_filtering_value_found(filter_value, value)
|
||||
|
||||
|
||||
class NonOpenstackItemsDescriber(UniversalDescriber):
|
||||
|
@ -94,8 +94,7 @@ def delete_dhcp_options(context, dhcp_options_id):
|
||||
if not dhcp_options_id:
|
||||
raise exception.MissingParameter(
|
||||
_('DHCP options ID must be specified'))
|
||||
dhcp_options = ec2utils.get_db_item(context, 'dopt',
|
||||
dhcp_options_id)
|
||||
dhcp_options = ec2utils.get_db_item(context, dhcp_options_id)
|
||||
vpcs = db_api.get_items(context, 'vpc')
|
||||
for vpc in vpcs:
|
||||
if dhcp_options['id'] == vpc.get('dhcp_options_id'):
|
||||
@ -125,13 +124,13 @@ def describe_dhcp_options(context, dhcp_options_id=None,
|
||||
|
||||
|
||||
def associate_dhcp_options(context, dhcp_options_id, vpc_id):
|
||||
vpc = ec2utils.get_db_item(context, 'vpc', vpc_id)
|
||||
vpc = ec2utils.get_db_item(context, vpc_id)
|
||||
rollback_dhcp_options_id = vpc.get('dhcp_options_id')
|
||||
if dhcp_options_id == 'default':
|
||||
dhcp_options_id = None
|
||||
dhcp_options = None
|
||||
else:
|
||||
dhcp_options = ec2utils.get_db_item(context, 'dopt', dhcp_options_id)
|
||||
dhcp_options = ec2utils.get_db_item(context, dhcp_options_id)
|
||||
dhcp_options_id = dhcp_options['id']
|
||||
neutron = clients.neutron(context)
|
||||
os_ports = neutron.list_ports()['ports']
|
||||
|
@ -182,7 +182,20 @@ NOT_FOUND_EXCEPTION_MAP = {
|
||||
}
|
||||
|
||||
|
||||
def get_db_item(context, kind, ec2_id):
|
||||
def get_db_item(context, ec2_id, expected_kind=None):
|
||||
"""Get an DB item, raise AWS compliant exception if it's not found.
|
||||
|
||||
Args:
|
||||
context (RequestContext): The request context.
|
||||
ec2_id (str): The ID of the requested item.
|
||||
expected_kind (str): The expected kind of the requested item.
|
||||
It should be specified for a kind of ec2_id to be validated,
|
||||
if you need it.
|
||||
|
||||
Returns:
|
||||
The DB item.
|
||||
"""
|
||||
kind = expected_kind or get_ec2_id_kind(ec2_id)
|
||||
item = db_api.get_item_by_id(context, kind, ec2_id)
|
||||
if item is None:
|
||||
params = {'id': ec2_id}
|
||||
@ -282,7 +295,7 @@ def get_os_image(context, ec2_image_id):
|
||||
kind = get_ec2_id_kind(ec2_image_id)
|
||||
images = db_api.get_public_items(context, kind, (ec2_image_id,))
|
||||
image = (images[0] if len(images) else
|
||||
get_db_item(context, kind, ec2_image_id))
|
||||
get_db_item(context, ec2_image_id))
|
||||
glance = clients.glance(context)
|
||||
try:
|
||||
return glance.images.get(image['os_id'])
|
||||
|
@ -99,7 +99,7 @@ IMAGE_TYPES = {'aki': 'kernel',
|
||||
# care of it for now. Ostrich algorithm
|
||||
def create_image(context, instance_id, name=None, description=None,
|
||||
no_reboot=False, block_device_mapping=None):
|
||||
instance = ec2utils.get_db_item(context, 'i', instance_id)
|
||||
instance = ec2utils.get_db_item(context, instance_id)
|
||||
nova = clients.nova(context)
|
||||
os_instance = nova.servers.get(instance['os_id'])
|
||||
|
||||
@ -213,8 +213,7 @@ def register_image(context, name=None, image_location=None,
|
||||
def deregister_image(context, image_id):
|
||||
# TODO(ft): AWS returns AuthFailure for public images,
|
||||
# but we return NotFound due searching for local images only
|
||||
kind = ec2utils.get_ec2_id_kind(image_id)
|
||||
image = ec2utils.get_db_item(context, kind, image_id)
|
||||
image = ec2utils.get_db_item(context, image_id)
|
||||
glance = clients.glance(context)
|
||||
try:
|
||||
glance.images.delete(image['os_id'])
|
||||
@ -346,8 +345,7 @@ def describe_image_attribute(context, image_id, attribute):
|
||||
# TODO(ft): AWS returns AuthFailure for not own public images,
|
||||
# but we return NotFound for this case because we search for local images
|
||||
# only
|
||||
kind = ec2utils.get_ec2_id_kind(image_id)
|
||||
image = ec2utils.get_db_item(context, kind, image_id)
|
||||
image = ec2utils.get_db_item(context, image_id)
|
||||
fn = supported_attributes.get(attribute)
|
||||
if fn is None:
|
||||
# TODO(ft): Change the error code and message with the real AWS ones
|
||||
@ -385,8 +383,7 @@ def modify_image_attribute(context, image_id, attribute,
|
||||
|
||||
# TODO(ft): AWS returns AuthFailure for public images,
|
||||
# but we return NotFound due searching for local images only
|
||||
kind = ec2utils.get_ec2_id_kind(image_id)
|
||||
image = ec2utils.get_db_item(context, kind, image_id)
|
||||
image = ec2utils.get_db_item(context, image_id)
|
||||
glance = clients.glance(context)
|
||||
image = glance.images.get(image['os_id'])
|
||||
|
||||
@ -686,10 +683,8 @@ def _s3_parse_manifest(context, manifest):
|
||||
if image_id == 'true':
|
||||
image_format = kind
|
||||
else:
|
||||
images = db_api.get_public_items(context, kind, (image_id,))
|
||||
image = (images[0] if len(images) else
|
||||
ec2utils.get_db_item(context, kind, image_id))
|
||||
properties[image_key] = image['os_id']
|
||||
os_image = ec2utils.get_os_image(context, image_id)
|
||||
properties[image_key] = os_image.id
|
||||
|
||||
set_dependent_image_id('kernel_id', 'aki')
|
||||
set_dependent_image_id('ramdisk_id', 'ari')
|
||||
|
@ -314,8 +314,7 @@ def start_instances(context, instance_id):
|
||||
|
||||
|
||||
def get_password_data(context, instance_id):
|
||||
# NOTE(Alex): AWS supports one and only one instance_id here
|
||||
instance = ec2utils.get_db_item(context, 'i', instance_id)
|
||||
instance = ec2utils.get_db_item(context, instance_id)
|
||||
nova = clients.nova(context)
|
||||
os_instance = nova.servers.get(instance['os_id'])
|
||||
password = os_instance.get_password()
|
||||
@ -328,8 +327,7 @@ def get_password_data(context, instance_id):
|
||||
|
||||
|
||||
def get_console_output(context, instance_id):
|
||||
# NOTE(Alex): AWS supports one and only one instance_id here
|
||||
instance = ec2utils.get_db_item(context, 'i', instance_id)
|
||||
instance = ec2utils.get_db_item(context, instance_id)
|
||||
nova = clients.nova(context)
|
||||
os_instance = nova.servers.get(instance['os_id'])
|
||||
console_output = os_instance.get_console_output()
|
||||
@ -340,7 +338,7 @@ def get_console_output(context, instance_id):
|
||||
|
||||
|
||||
def describe_instance_attribute(context, instance_id, attribute):
|
||||
instance = ec2utils.get_db_item(context, 'i', instance_id)
|
||||
instance = ec2utils.get_db_item(context, instance_id)
|
||||
nova = clients.nova(context)
|
||||
os_instance = nova.servers.get(instance['os_id'])
|
||||
novadb_instance = novadb.instance_get_by_uuid(context, os_instance.id)
|
||||
@ -980,8 +978,8 @@ class InstanceEngineNeutron(object):
|
||||
"may not be specified on multiple interfaces.")
|
||||
msg = msg % {'id': ec2_eni_id}
|
||||
raise exception.InvalidParameterValue(msg)
|
||||
network_interface = ec2utils.get_db_item(context, 'eni',
|
||||
ec2_eni_id)
|
||||
network_interface = ec2utils.get_db_item(context, ec2_eni_id,
|
||||
'eni')
|
||||
if 'instance_id' in network_interface:
|
||||
busy_network_interfaces.append(ec2_eni_id)
|
||||
vpc_ids.add(network_interface['vpc_id'])
|
||||
@ -991,8 +989,8 @@ class InstanceEngineNeutron(object):
|
||||
'detach_on_crash': True,
|
||||
'delete_on_termination': False})
|
||||
else:
|
||||
subnet = ec2utils.get_db_item(context, 'subnet',
|
||||
param['subnet_id'])
|
||||
subnet = ec2utils.get_db_item(context, param['subnet_id'],
|
||||
'subnet')
|
||||
vpc_ids.add(subnet['vpc_id'])
|
||||
args = copy.deepcopy(param)
|
||||
delete_on_termination = args.pop('delete_on_termination', True)
|
||||
@ -1043,9 +1041,8 @@ class InstanceEngineNeutron(object):
|
||||
filter=[{'name': 'vpc-id', 'value': [vpc_id]},
|
||||
{'name': 'group-name', 'value': ['default']}]
|
||||
)['securityGroupInfo']
|
||||
security_groups = [ec2utils.get_db_item(context, 'sg',
|
||||
default_group['groupId'])
|
||||
for default_group in default_groups]
|
||||
security_groups = db_api.get_items_by_ids(
|
||||
context, 'sg', [sg['groupId'] for sg in default_groups])
|
||||
return [sg['os_id'] for sg in security_groups]
|
||||
|
||||
def get_ec2_classic_os_network(self, context, neutron):
|
||||
@ -1175,10 +1172,10 @@ def _cloud_parse_block_device_mapping(context, bdm):
|
||||
ec2_id = ebs.pop('snapshot_id', None)
|
||||
if ec2_id:
|
||||
if ec2_id.startswith('snap-'):
|
||||
snapshot = ec2utils.get_db_item(context, 'snap', ec2_id)
|
||||
snapshot = ec2utils.get_db_item(context, ec2_id)
|
||||
bdm['snapshot_id'] = snapshot['os_id']
|
||||
elif ec2_id.startswith('vol-'):
|
||||
volume = ec2utils.get_db_item(context, 'vol', ec2_id)
|
||||
volume = ec2utils.get_db_item(context, ec2_id)
|
||||
bdm['volume_id'] = volume['os_id']
|
||||
else:
|
||||
# NOTE(ft): AWS returns undocumented InvalidSnapshotID.NotFound
|
||||
|
@ -55,14 +55,14 @@ def create_internet_gateway(context):
|
||||
|
||||
|
||||
def attach_internet_gateway(context, internet_gateway_id, vpc_id):
|
||||
igw = ec2utils.get_db_item(context, 'igw', internet_gateway_id)
|
||||
igw = ec2utils.get_db_item(context, internet_gateway_id)
|
||||
if igw.get('vpc_id'):
|
||||
msg_params = {'igw_id': igw['id'],
|
||||
'vpc_id': igw['vpc_id']}
|
||||
msg = _("resource %(igw_id)s is already attached to "
|
||||
"network %(vpc_id)s") % msg_params
|
||||
raise exception.ResourceAlreadyAssociated(msg)
|
||||
vpc = ec2utils.get_db_item(context, 'vpc', vpc_id)
|
||||
vpc = ec2utils.get_db_item(context, vpc_id)
|
||||
# TODO(ft): move search by vpc_id to DB api
|
||||
for gw in db_api.get_items(context, 'igw'):
|
||||
if gw.get('vpc_id') == vpc['id']:
|
||||
@ -86,8 +86,8 @@ def attach_internet_gateway(context, internet_gateway_id, vpc_id):
|
||||
|
||||
|
||||
def detach_internet_gateway(context, internet_gateway_id, vpc_id):
|
||||
igw = ec2utils.get_db_item(context, 'igw', internet_gateway_id)
|
||||
vpc = ec2utils.get_db_item(context, 'vpc', vpc_id)
|
||||
igw = ec2utils.get_db_item(context, internet_gateway_id)
|
||||
vpc = ec2utils.get_db_item(context, vpc_id)
|
||||
if igw.get('vpc_id') != vpc['id']:
|
||||
raise exception.GatewayNotAttached(igw_id=igw['id'],
|
||||
vpc_id=vpc['id'])
|
||||
@ -106,7 +106,7 @@ def detach_internet_gateway(context, internet_gateway_id, vpc_id):
|
||||
|
||||
|
||||
def delete_internet_gateway(context, internet_gateway_id):
|
||||
igw = ec2utils.get_db_item(context, 'igw', internet_gateway_id)
|
||||
igw = ec2utils.get_db_item(context, internet_gateway_id)
|
||||
if igw.get('vpc_id'):
|
||||
msg = _("The internetGateway '%(igw_id)s' has dependencies and "
|
||||
"cannot be deleted.") % {'igw_id': igw['id']}
|
||||
|
@ -49,7 +49,7 @@ def create_network_interface(context, subnet_id,
|
||||
secondary_private_ip_address_count=None,
|
||||
description=None,
|
||||
security_group_id=None):
|
||||
subnet = ec2utils.get_db_item(context, 'subnet', subnet_id)
|
||||
subnet = ec2utils.get_db_item(context, subnet_id)
|
||||
if subnet is None:
|
||||
raise exception.InvalidSubnetIDNotFound(id=subnet_id)
|
||||
neutron = clients.neutron(context)
|
||||
@ -101,8 +101,7 @@ def create_network_interface(context, subnet_id,
|
||||
)['securityGroupInfo']
|
||||
security_group_id = [default_group['groupId']
|
||||
for default_group in default_groups]
|
||||
security_groups = [ec2utils.get_db_item(context, 'sg', ec2_id)
|
||||
for ec2_id in security_group_id]
|
||||
security_groups = db_api.get_items_by_ids(context, 'sg', security_group_id)
|
||||
if any(security_group['vpc_id'] != vpc['id']
|
||||
for security_group in security_groups):
|
||||
msg = _('You have specified two resources that belong to '
|
||||
@ -159,8 +158,7 @@ def create_network_interface(context, subnet_id,
|
||||
|
||||
|
||||
def delete_network_interface(context, network_interface_id):
|
||||
network_interface = ec2utils.get_db_item(context, 'eni',
|
||||
network_interface_id)
|
||||
network_interface = ec2utils.get_db_item(context, network_interface_id)
|
||||
if 'instance_id' in network_interface:
|
||||
msg = _("Network interface '%(eni_id)s' is currently in use.")
|
||||
msg = msg % {'eni_id': network_interface_id}
|
||||
@ -237,10 +235,9 @@ def assign_private_ip_addresses(context, network_interface_id,
|
||||
secondary_private_ip_address_count=None,
|
||||
allow_reassignment=False):
|
||||
# TODO(Alex): allow_reassignment is not supported at the moment
|
||||
network_interface = ec2utils.get_db_item(context, 'eni',
|
||||
network_interface_id)
|
||||
subnet = ec2utils.get_db_item(context, 'subnet',
|
||||
network_interface['subnet_id'])
|
||||
network_interface = ec2utils.get_db_item(context, network_interface_id)
|
||||
subnet = db_api.get_item_by_id(context, 'subnet',
|
||||
network_interface['subnet_id'])
|
||||
neutron = clients.neutron(context)
|
||||
os_subnet = neutron.show_subnet(subnet['os_id'])['subnet']
|
||||
os_port = neutron.show_port(network_interface['os_id'])['port']
|
||||
@ -278,8 +275,7 @@ def assign_private_ip_addresses(context, network_interface_id,
|
||||
|
||||
def unassign_private_ip_addresses(context, network_interface_id,
|
||||
private_ip_address):
|
||||
network_interface = ec2utils.get_db_item(context, 'eni',
|
||||
network_interface_id)
|
||||
network_interface = ec2utils.get_db_item(context, network_interface_id)
|
||||
if network_interface['private_ip_address'] in private_ip_address:
|
||||
raise exception.InvalidParameterValue(
|
||||
value=str(network_interface['private_ip_address']),
|
||||
@ -301,8 +297,7 @@ def unassign_private_ip_addresses(context, network_interface_id,
|
||||
|
||||
def describe_network_interface_attribute(context, network_interface_id,
|
||||
attribute):
|
||||
network_interface = ec2utils.get_db_item(context, 'eni',
|
||||
network_interface_id)
|
||||
network_interface = ec2utils.get_db_item(context, network_interface_id)
|
||||
# TODO(Alex): Implement attachments, groupSet
|
||||
|
||||
db_key = attribute if attribute == 'description' else 'source_dest_check'
|
||||
@ -323,16 +318,16 @@ def modify_network_interface_attribute(context, network_interface_id,
|
||||
if params_count != 1:
|
||||
raise exception.InvalidParameterCombination(
|
||||
'Multiple attributes specified')
|
||||
network_interface = ec2utils.get_db_item(context, 'eni',
|
||||
network_interface_id)
|
||||
network_interface = ec2utils.get_db_item(context, network_interface_id)
|
||||
# TODO(Alex): Implement attachments
|
||||
if description is not None:
|
||||
network_interface['description'] = description
|
||||
db_api.update_item(context, network_interface)
|
||||
neutron = clients.neutron(context)
|
||||
if security_group_id is not None:
|
||||
os_groups = [ec2utils.get_db_item(context, 'sg', ec2_id)['os_id']
|
||||
for ec2_id in security_group_id]
|
||||
os_groups = [sg['os_id']
|
||||
for sg in ec2utils.get_db_items(context, 'sg',
|
||||
security_group_id)]
|
||||
neutron.update_port(network_interface['os_id'],
|
||||
{'port': {'security_groups': os_groups}})
|
||||
if source_dest_check is not None:
|
||||
@ -361,13 +356,12 @@ def reset_network_interface_attribute(context, network_interface_id,
|
||||
|
||||
def attach_network_interface(context, network_interface_id,
|
||||
instance_id, device_index):
|
||||
network_interface = ec2utils.get_db_item(context, 'eni',
|
||||
network_interface_id)
|
||||
network_interface = ec2utils.get_db_item(context, network_interface_id)
|
||||
if 'instance_id' in network_interface:
|
||||
raise exception.InvalidParameterValue(
|
||||
_("Network interface '%(id)s' is currently in use.") %
|
||||
{'id': network_interface_id})
|
||||
os_instance_id = ec2utils.get_db_item(context, 'i', instance_id)['os_id']
|
||||
os_instance_id = ec2utils.get_db_item(context, instance_id)['os_id']
|
||||
# TODO(Alex) Check that the instance is not yet attached to another VPC
|
||||
# TODO(Alex) Check that the instance is "our", not created via nova
|
||||
# (which means that it doesn't belong to any VPC and can't be attached)
|
||||
|
@ -30,7 +30,7 @@ Validator = common.Validator
|
||||
|
||||
|
||||
def create_route_table(context, vpc_id):
|
||||
vpc = ec2utils.get_db_item(context, 'vpc', vpc_id)
|
||||
vpc = ec2utils.get_db_item(context, vpc_id)
|
||||
route_table = _create_route_table(context, vpc)
|
||||
return {'routeTable': _format_route_table(context, route_table,
|
||||
is_main=False)}
|
||||
@ -55,7 +55,7 @@ def replace_route(context, route_table_id, destination_cidr_block,
|
||||
|
||||
|
||||
def delete_route(context, route_table_id, destination_cidr_block):
|
||||
route_table = ec2utils.get_db_item(context, 'rtb', route_table_id)
|
||||
route_table = ec2utils.get_db_item(context, route_table_id)
|
||||
for route_index, route in enumerate(route_table['routes']):
|
||||
if route['destination_cidr_block'] != destination_cidr_block:
|
||||
continue
|
||||
@ -84,8 +84,8 @@ def delete_route(context, route_table_id, destination_cidr_block):
|
||||
|
||||
|
||||
def associate_route_table(context, route_table_id, subnet_id):
|
||||
route_table = ec2utils.get_db_item(context, 'rtb', route_table_id)
|
||||
subnet = ec2utils.get_db_item(context, 'subnet', subnet_id)
|
||||
route_table = ec2utils.get_db_item(context, route_table_id)
|
||||
subnet = ec2utils.get_db_item(context, subnet_id)
|
||||
if route_table['vpc_id'] != subnet['vpc_id']:
|
||||
msg = _('Route table %(rtb_id)s and subnet %(subnet_id)s belong to '
|
||||
'different networks')
|
||||
@ -114,7 +114,7 @@ def associate_route_table(context, route_table_id, subnet_id):
|
||||
|
||||
|
||||
def replace_route_table_association(context, association_id, route_table_id):
|
||||
route_table = ec2utils.get_db_item(context, 'rtb', route_table_id)
|
||||
route_table = ec2utils.get_db_item(context, route_table_id)
|
||||
if route_table['vpc_id'] == ec2utils.change_ec2_id_kind(association_id,
|
||||
'vpc'):
|
||||
vpc = db_api.get_item_by_id(context, 'vpc',
|
||||
@ -200,7 +200,7 @@ def disassociate_route_table(context, association_id):
|
||||
|
||||
|
||||
def delete_route_table(context, route_table_id):
|
||||
route_table = ec2utils.get_db_item(context, 'rtb', route_table_id)
|
||||
route_table = ec2utils.get_db_item(context, route_table_id)
|
||||
vpc = db_api.get_item_by_id(context, 'vpc', route_table['vpc_id'])
|
||||
_delete_route_table(context, route_table['id'], vpc)
|
||||
return True
|
||||
@ -286,7 +286,7 @@ def _delete_route_table(context, route_table_id, vpc=None, cleaner=None):
|
||||
def _set_route(context, route_table_id, destination_cidr_block,
|
||||
gateway_id, instance_id, network_interface_id,
|
||||
vpc_peering_connection_id, do_replace):
|
||||
route_table = ec2utils.get_db_item(context, 'rtb', route_table_id)
|
||||
route_table = ec2utils.get_db_item(context, route_table_id)
|
||||
vpc = db_api.get_item_by_id(context, 'vpc', route_table['vpc_id'])
|
||||
vpc_ipnet = netaddr.IPNetwork(vpc['cidr_block'])
|
||||
route_ipnet = netaddr.IPNetwork(destination_cidr_block)
|
||||
@ -324,7 +324,7 @@ def _set_route(context, route_table_id, destination_cidr_block,
|
||||
raise exception.InvalidParameterValue(msg)
|
||||
|
||||
if gateway_id:
|
||||
gateway = ec2utils.get_db_item(context, 'igw', gateway_id)
|
||||
gateway = ec2utils.get_db_item(context, gateway_id)
|
||||
if gateway.get('vpc_id') != route_table['vpc_id']:
|
||||
msg = _('Route table %(rtb_id)s and network gateway %(igw_id)s '
|
||||
'belong to different networks')
|
||||
@ -333,8 +333,7 @@ def _set_route(context, route_table_id, destination_cidr_block,
|
||||
raise exception.InvalidParameterValue(msg)
|
||||
route = {'gateway_id': gateway['id']}
|
||||
elif network_interface_id:
|
||||
network_interface = ec2utils.get_db_item(context, 'eni',
|
||||
network_interface_id)
|
||||
network_interface = ec2utils.get_db_item(context, network_interface_id)
|
||||
if network_interface['vpc_id'] != route_table['vpc_id']:
|
||||
msg = _('Route table %(rtb_id)s and interface %(eni_id)s '
|
||||
'belong to different networks')
|
||||
|
@ -71,7 +71,7 @@ def create_security_group(context, group_name, group_description,
|
||||
os_security_group.id)
|
||||
if vpc_id:
|
||||
# NOTE(Alex) Check if such vpc exists
|
||||
ec2utils.get_db_item(context, 'vpc', vpc_id)
|
||||
ec2utils.get_db_item(context, vpc_id)
|
||||
security_group = db_api.add_item(context, 'sg',
|
||||
{'vpc_id': vpc_id,
|
||||
'os_id': os_security_group.id})
|
||||
@ -110,7 +110,7 @@ class SecurityGroupDescriber(common.TaggableItemsDescriber):
|
||||
self.all_db_items, self.os_items)
|
||||
|
||||
def get_os_items(self):
|
||||
if self.all_db_items == None:
|
||||
if self.all_db_items is None:
|
||||
self.all_db_items = ec2utils.get_db_items(self.context, 'sg', None)
|
||||
os_groups = security_group_engine.get_os_groups(self.context)
|
||||
for os_group in os_groups:
|
||||
@ -385,7 +385,7 @@ class SecurityGroupEngineNeutron(object):
|
||||
return SecurityGroupEngineNova().delete_group(context,
|
||||
group_name,
|
||||
group_id)
|
||||
security_group = ec2utils.get_db_item(context, 'sg', group_id)
|
||||
security_group = ec2utils.get_db_item(context, group_id)
|
||||
try:
|
||||
neutron.delete_security_group(security_group['os_id'])
|
||||
except neutron_exception.Conflict as ex:
|
||||
@ -430,7 +430,7 @@ class SecurityGroupEngineNeutron(object):
|
||||
return SecurityGroupEngineNova().get_group_os_id(context,
|
||||
group_id,
|
||||
group_name)
|
||||
return ec2utils.get_db_item(context, 'sg', group_id)['os_id']
|
||||
return ec2utils.get_db_item(context, group_id, 'sg')['os_id']
|
||||
|
||||
|
||||
class SecurityGroupEngineNova(object):
|
||||
@ -500,7 +500,7 @@ class SecurityGroupEngineNova(object):
|
||||
return neutron_security_groups
|
||||
|
||||
def convert_rule_to_neutron(self, context, nova_rule,
|
||||
nova_security_groups=None):
|
||||
nova_security_groups=None):
|
||||
neutron_rule = {'id': nova_rule['id'],
|
||||
'protocol': nova_rule['ip_protocol'],
|
||||
'port_range_min': nova_rule['from_port'],
|
||||
@ -519,7 +519,7 @@ class SecurityGroupEngineNova(object):
|
||||
return neutron_rule
|
||||
|
||||
def get_group_os_id(self, context, group_id, group_name,
|
||||
nova_security_groups=None):
|
||||
nova_security_groups=None):
|
||||
if group_id:
|
||||
return group_id
|
||||
nova_group = self.get_nova_group_by_name(context, group_name,
|
||||
@ -527,7 +527,7 @@ class SecurityGroupEngineNova(object):
|
||||
return nova_group.id
|
||||
|
||||
def get_nova_group_by_name(self, context, group_name,
|
||||
nova_security_groups=None):
|
||||
nova_security_groups=None):
|
||||
if nova_security_groups is None:
|
||||
nova = clients.nova(context)
|
||||
nova_security_groups = nova.security_groups.list()
|
||||
|
@ -30,7 +30,7 @@ Validator = common.Validator
|
||||
|
||||
|
||||
def create_snapshot(context, volume_id, description=None):
|
||||
volume = ec2utils.get_db_item(context, 'vol', volume_id)
|
||||
volume = ec2utils.get_db_item(context, volume_id)
|
||||
cinder = clients.cinder(context)
|
||||
os_volume = cinder.volumes.get(volume['os_id'])
|
||||
# NOTE(ft): Easy fix to allow snapshot creation in statuses other than
|
||||
@ -55,7 +55,7 @@ def create_snapshot(context, volume_id, description=None):
|
||||
|
||||
|
||||
def delete_snapshot(context, snapshot_id):
|
||||
snapshot = ec2utils.get_db_item(context, 'snap', snapshot_id)
|
||||
snapshot = ec2utils.get_db_item(context, snapshot_id)
|
||||
cinder = clients.cinder(context)
|
||||
try:
|
||||
cinder.volume_snapshots.delete(snapshot['os_id'])
|
||||
|
@ -41,7 +41,7 @@ Validator = common.Validator
|
||||
|
||||
def create_subnet(context, vpc_id, cidr_block,
|
||||
availability_zone=None):
|
||||
vpc = ec2utils.get_db_item(context, 'vpc', vpc_id)
|
||||
vpc = ec2utils.get_db_item(context, vpc_id)
|
||||
vpc_ipnet = netaddr.IPNetwork(vpc['cidr_block'])
|
||||
subnet_ipnet = netaddr.IPNetwork(cidr_block)
|
||||
if subnet_ipnet not in vpc_ipnet:
|
||||
@ -89,7 +89,7 @@ def create_subnet(context, vpc_id, cidr_block,
|
||||
|
||||
|
||||
def delete_subnet(context, subnet_id):
|
||||
subnet = ec2utils.get_db_item(context, 'subnet', subnet_id)
|
||||
subnet = ec2utils.get_db_item(context, subnet_id)
|
||||
vpc = db_api.get_item_by_id(context, 'vpc', subnet['vpc_id'])
|
||||
network_interfaces = network_interface_api.describe_network_interfaces(
|
||||
context,
|
||||
|
@ -69,7 +69,7 @@ def create_tags(context, resource_id, tag):
|
||||
# NOTE(ft): check items exist (excluding images because AWS allows to
|
||||
# create a tag with any image id)
|
||||
if kind not in ('ami', 'ari', 'aki'):
|
||||
ec2utils.get_db_item(context, kind, item_id)
|
||||
ec2utils.get_db_item(context, item_id)
|
||||
|
||||
tags = [dict(item_id=item_id,
|
||||
key=tag_pair['key'],
|
||||
|
@ -34,7 +34,7 @@ def create_volume(context, availability_zone=None, size=None,
|
||||
snapshot_id=None, volume_type=None, iops=None,
|
||||
encrypted=None, kms_key_id=None):
|
||||
if snapshot_id is not None:
|
||||
snapshot = ec2utils.get_db_item(context, 'snap', snapshot_id)
|
||||
snapshot = ec2utils.get_db_item(context, snapshot_id)
|
||||
os_snapshot_id = snapshot['os_id']
|
||||
else:
|
||||
os_snapshot_id = None
|
||||
@ -54,8 +54,8 @@ def create_volume(context, availability_zone=None, size=None,
|
||||
|
||||
|
||||
def attach_volume(context, volume_id, instance_id, device):
|
||||
volume = ec2utils.get_db_item(context, 'vol', volume_id)
|
||||
instance = ec2utils.get_db_item(context, 'i', instance_id)
|
||||
volume = ec2utils.get_db_item(context, volume_id)
|
||||
instance = ec2utils.get_db_item(context, instance_id)
|
||||
|
||||
nova = clients.nova(context)
|
||||
try:
|
||||
@ -72,7 +72,7 @@ def attach_volume(context, volume_id, instance_id, device):
|
||||
|
||||
def detach_volume(context, volume_id, instance_id=None, device=None,
|
||||
force=None):
|
||||
volume = ec2utils.get_db_item(context, 'vol', volume_id)
|
||||
volume = ec2utils.get_db_item(context, volume_id)
|
||||
|
||||
cinder = clients.cinder(context)
|
||||
os_volume = cinder.volumes.get(volume['os_id'])
|
||||
@ -92,7 +92,7 @@ def detach_volume(context, volume_id, instance_id=None, device=None,
|
||||
|
||||
|
||||
def delete_volume(context, volume_id):
|
||||
volume = ec2utils.get_db_item(context, 'vol', volume_id)
|
||||
volume = ec2utils.get_db_item(context, volume_id)
|
||||
cinder = clients.cinder(context)
|
||||
try:
|
||||
cinder.volumes.delete(volume['os_id'])
|
||||
|
@ -64,7 +64,7 @@ def create_vpc(context, cidr_block, instance_tenancy='default'):
|
||||
|
||||
|
||||
def delete_vpc(context, vpc_id):
|
||||
vpc = ec2utils.get_db_item(context, 'vpc', vpc_id)
|
||||
vpc = ec2utils.get_db_item(context, vpc_id)
|
||||
subnets = subnet_api.describe_subnets(
|
||||
context,
|
||||
filter=[{'name': 'vpc-id', 'value': [vpc_id]}])['subnetSet']
|
||||
|
@ -66,7 +66,7 @@ class EC2DBAPI(object):
|
||||
def _db_api(self):
|
||||
if not self.__db_api:
|
||||
ec2_db_api = db_api.DBAPI(CONF.database.backend,
|
||||
backend_mapping=_BACKEND_MAPPING)
|
||||
backend_mapping=_BACKEND_MAPPING)
|
||||
if CONF.database.use_tpool:
|
||||
self.__db_api = tpool.Proxy(ec2_db_api)
|
||||
else:
|
||||
|
@ -32,7 +32,7 @@ class EC2UtilsTestCase(testtools.TestCase):
|
||||
|
||||
def check_normal_flow(kind, ec2_id):
|
||||
item['id'] = ec2_id
|
||||
res = ec2utils.get_db_item('fake_context', kind, ec2_id)
|
||||
res = ec2utils.get_db_item('fake_context', ec2_id)
|
||||
self.assertThat(res, matchers.DictMatches(item))
|
||||
db_api.get_item_by_id.assert_called_once_with('fake_context',
|
||||
kind, ec2_id)
|
||||
@ -45,7 +45,7 @@ class EC2UtilsTestCase(testtools.TestCase):
|
||||
ec2_id = fakes.random_ec2_id(kind)
|
||||
self.assertRaises(ex_class,
|
||||
ec2utils.get_db_item,
|
||||
'fake_context', kind, ec2_id)
|
||||
'fake_context', ec2_id)
|
||||
db_api.get_item_by_id.assert_called_once_with('fake_context',
|
||||
kind, ec2_id)
|
||||
db_api.reset_mock()
|
||||
|
@ -521,6 +521,10 @@ class S3TestCase(base.ApiTestCase):
|
||||
'ari': ({'id': fakes.ID_EC2_IMAGE_ARI_1,
|
||||
'os_id': fakes.ID_OS_IMAGE_ARI_1},)}))
|
||||
self.db_api.get_item_by_id.return_value = None
|
||||
self.glance.images.get.side_effect = (
|
||||
fakes.get_by_1st_arg_getter({
|
||||
fakes.ID_OS_IMAGE_AKI_1: fakes.OSImage(fakes.OS_IMAGE_AKI_1),
|
||||
fakes.ID_OS_IMAGE_ARI_1: fakes.OSImage(fakes.OS_IMAGE_ARI_1)}))
|
||||
|
||||
fake_context = self._create_context()
|
||||
metadata, image_parts, key, iv = image_api._s3_parse_manifest(
|
||||
|
Loading…
Reference in New Issue
Block a user