Extract describe images

It was necessary to:
- extend DB-layer API
- add ec2util functions to auto insert db items
- fix and improve Describer class
- use Describer class for volumes and snapshots

Change-Id: Ief0c3507fd1c7d7e3262a34cbd1d8bfa1d4a9ace
This commit is contained in:
Feodor Tersin 2014-12-19 02:45:02 +04:00
parent e4c4463ab1
commit daf47b16ef
13 changed files with 558 additions and 111 deletions

View File

@ -24,6 +24,7 @@ from oslo.config import cfg
from ec2api.api import address
from ec2api.api import availability_zone
from ec2api.api import dhcp_options
from ec2api.api import image
from ec2api.api import instance
from ec2api.api import internet_gateway
from ec2api.api import key_pair
@ -1418,3 +1419,23 @@ class CloudController(object):
"""
return snapshot.describe_snapshots(context, snapshot_id, owner,
restorable_by, filter)
def describe_images(self, context, executable_by=None, image_id=None,
owner=None, filter=None):
"""Describes one or more of the images available to you.
Args:
context (RequestContext): The request context.
executable_by (list of str): Filters the images by users with
explicit launch permissions.
Not used now.
image_id (list of str): One or more image IDs.
owner (list of str): Filters the images by the owner.
filter (list of filter dict): You can specify filters so that the
response includes information for only certain images.
Returns:
A list of images.
"""
return image.describe_images(context, executable_by, image_id,
owner, filter)

View File

@ -36,10 +36,11 @@ class UniversalDescriber(object):
def get_os_items(self):
return []
def auto_update_db(self, os_item_id):
if self.KIND not in VPC_KINDS:
item = ec2utils.get_db_item_by_os_id(self.context, self.KIND,
self.os_item_id)
def auto_update_db(self, item, os_item):
if item is None and self.KIND not in VPC_KINDS:
item = ec2utils.auto_create_db_item(self.context, self.KIND,
os_item.id)
return item
def get_id(self, os_item):
return os_item['id'] if isinstance(os_item, dict) else os_item.id
@ -47,20 +48,24 @@ class UniversalDescriber(object):
def get_name(self, os_item):
return os_item['name']
def delete_obsolete_item(self, item):
db_api.delete_item(self.context, item['id'])
def describe(self, context, ids=None, names=None, filter=None):
self.context = context
selective_describe = ids is not None or names is not None
self.ids = ids or []
self.names = names or []
self.ids = set(ids or [])
self.names = set(names or [])
self.items = self.get_db_items()
self.os_items = self.get_os_items()
formatted_items = []
items_dict = dict((i['os_id'], i) for i in (self.items or []))
self.items_dict = dict((i['os_id'], i) for i in (self.items or []))
paired_items_ids = set()
for os_item in self.os_items:
os_item_name = self.get_name(os_item)
os_item_id = self.get_id(os_item)
item = items_dict.get(os_item_id, None)
item = self.items_dict.get(os_item_id, None)
# NOTE(Alex): Filter out items not requested in names or ids
if selective_describe:
if os_item_name in self.names:
@ -70,17 +75,21 @@ class UniversalDescriber(object):
else:
continue
# NOTE(Alex): Autoupdate DB for autoupdatable items
elif not item:
item = self.auto_update_db(os_item_id)
item = self.auto_update_db(item, os_item)
if item:
paired_items_ids.add(item['id'])
formatted_item = self.format(item, os_item)
if not utils.filtered_out(formatted_item, filter, self.FILTER_MAP):
if (formatted_item and
not utils.filtered_out(formatted_item, filter,
self.FILTER_MAP)):
formatted_items.append(formatted_item)
# NOTE(Alex): delete obsolete items
for id in self.ids:
db_api.delete_item(context, id)
for item in self.items:
if item['id'] not in paired_items_ids:
self.delete_obsolete_item(item)
# NOTE(Alex): some requested items are not found
if self.ids or self.names:
params = {'id': (self.ids or self.names)[0]}
params = {'id': next(iter(self.ids or self.names))}
raise ec2utils._NOT_FOUND_EXCEPTION_MAP[self.KIND](**params)
return formatted_items

View File

@ -26,6 +26,23 @@ from ec2api.openstack.common import uuidutils
LOG = logging.getLogger(__name__)
def image_type(image_type):
"""Converts to a three letter image type.
aki, kernel => aki
ari, ramdisk => ari
anything else => ami
"""
if image_type == 'kernel':
return 'aki'
if image_type == 'ramdisk':
return 'ari'
if image_type not in ['aki', 'ari']:
return 'ami'
return image_type
def resource_type_from_id(context, resource_id):
"""Get resource type by ID
@ -337,6 +354,7 @@ _NOT_FOUND_EXCEPTION_MAP = {
'az': exception.InvalidAvailabilityZoneNotFound,
'vol': exception.InvalidVolumeNotFound,
'snap': exception.InvalidSnapshotNotFound,
'ami': exception.InvalidAMIIDNotFound,
}
@ -369,6 +387,14 @@ def register_auto_create_db_item_extension(kind, extension):
_auto_create_db_item_extensions[kind] = extension
def auto_create_db_item(context, kind, os_id, **extension_kwargs):
item = {'os_id': os_id}
extension = _auto_create_db_item_extensions.get(kind)
if extension:
extension(context, item, **extension_kwargs)
return db_api.add_item(context, kind, item)
def get_db_item_by_os_id(context, kind, os_id, items_by_os_id=None,
**extension_kwargs):
"""Get DB item by OS id (create if it doesn't exist).
@ -396,19 +422,40 @@ def get_db_item_by_os_id(context, kind, os_id, items_by_os_id=None,
item = items_by_os_id.get(os_id)
if item:
return item
item = next((i for i in db_api.get_items(context, kind)
if i['os_id'] == os_id), None)
else:
item = next((i for i in db_api.get_items(context, kind)
if i['os_id'] == os_id), None)
if not item:
item = {'os_id': os_id}
extension = _auto_create_db_item_extensions.get(kind)
if extension:
extension(context, item, **extension_kwargs)
item = db_api.add_item(context, kind, item)
item = auto_create_db_item(context, kind, os_id, **extension_kwargs)
else:
pass
if items_by_os_id is not None:
items_by_os_id[os_id] = item
return item
def os_id_to_ec2_id(context, kind, os_id, items_by_os_id=None,
ids_by_os_id=None):
if os_id is None:
return None
if ids_by_os_id is not None:
item_id = ids_by_os_id.get(os_id)
if item_id:
return item_id
if items_by_os_id is not None:
item = items_by_os_id.get(os_id)
if item:
return item['id']
ids = db_api.get_item_ids(context, kind, (os_id,))
if len(ids):
item_id, _os_id = ids[0]
else:
item_id = db_api.add_item_id(context, kind, os_id)
if ids_by_os_id is not None:
ids_by_os_id[os_id] = item_id
return item_id
_cidr_re = re.compile("^([0-9]{1,3}\.){3}[0-9]{1,3}/[0-9]{1,2}$")

290
ec2api/api/image.py Normal file
View File

@ -0,0 +1,290 @@
# Copyright 2014
# The Cloudscaling Group, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import json
import re
from ec2api.api import clients
from ec2api.api import common
from ec2api.api import ec2utils
from ec2api import context as ec2_context
from ec2api.db import api as db_api
from ec2api import exception
class ImageDescriber(common.UniversalDescriber):
KIND = 'ami'
FILTER_MAP = {'architecture': 'architecture',
'description': 'description',
'image-id': 'imageId',
'image-type': 'imageType',
'is-public': 'isPublic',
'kernel_id': 'kernelId',
'name': 'name',
'owner-id': 'ownerId',
'ramdisk-id': 'ramdiskId',
'state': 'state',
}
def format(self, image, os_image):
return _format_image(self.context, image, os_image, self.items_dict,
self.ids_dict, self.snapshot_ids)
def get_db_items(self):
local_images = [db_api.get_items_by_ids(self.context, kind, self.ids)
for kind in ('ami', 'ari', 'aki')]
public_images = [db_api.get_public_items(self.context, kind, self.ids)
for kind in ('ami', 'ari', 'aki')]
images = list(itertools.chain(*itertools.chain(local_images,
public_images)))
if len(images) < len(self.ids):
missed_ids = set(self.ids) - set(i['id']
for i in images.itervalues())
raise exception.InvalidAMIIDNotFound(
{'id': next(iter(missed_ids))})
self.images = images
self.snapshot_ids = dict((s['os_id'], s['id'])
for s in db_api.get_items(self.context, 'snap'))
self.local_images_os_ids = set(i['os_id']
for i in itertools.chain(*local_images))
self.ids_dict = {}
return images
def get_os_items(self):
return clients.glance(self.context).images.list()
def auto_update_db(self, image, os_image):
if not image:
kind = ec2utils.image_type(os_image.container_format)
ctx = (self.context if os_image.owner == self.context.project_id
else ec2_context.get_admin_context(
project_id=os_image.owner))
image = ec2utils.auto_create_db_item(ctx, kind, os_image.id,
os_image=os_image)
self.items_dict[os_image.id] = image
elif (image['os_id'] in self.local_images_os_ids and
image['is_public'] != os_image.is_public):
image['is_public'] = os_image.is_public
db_api.update_item(self.context, image)
return image
def get_name(self, os_item):
return ''
def delete_obsolete_item(self, image):
if image['os_id'] in self.local_images_os_ids:
db_api.delete_item(self.context, image['id'])
def describe_images(context, executable_by=None, image_id=None,
owner=None, filter=None):
formatted_images = ImageDescriber().describe(
context, ids=image_id, filter=filter)
return {'imagesSet': formatted_images}
def _format_image(context, image, os_image, images_dict, ids_dict,
snapshot_ids=None):
image_type = ec2utils.image_type(os_image.container_format)
name = os_image.name
display_mapping = {'aki': 'kernel',
'ari': 'ramdisk',
'ami': 'machine'}
ec2_image = {'imageId': image['id'],
'imageOwnerId': os_image.owner,
'name': name,
'imageState': _cloud_get_image_state(os_image),
'description': '',
'imageType': display_mapping.get(image_type),
'isPublic': not not os_image.is_public,
'architecture': os_image.properties.get('architecture'),
}
kernel_id = os_image.properties.get('kernel_id')
if kernel_id:
ec2_image['kernelId'] = ec2utils.os_id_to_ec2_id(
context, 'aki', kernel_id,
items_by_os_id=images_dict, ids_by_os_id=ids_dict)
ramdisk_id = os_image.properties.get('ramdisk_id')
if ramdisk_id:
ec2_image['ramdiskId'] = ec2utils.os_id_to_ec2_id(
context, 'ari', ramdisk_id,
items_by_os_id=images_dict, ids_by_os_id=ids_dict)
img_loc = os_image.properties.get('image_location')
if img_loc:
ec2_image['imageLocation'] = img_loc
else:
ec2_image['imageLocation'] = "%s (%s)" % (img_loc, name)
if not name and img_loc:
# This should only occur for images registered with ec2 api
# prior to that api populating the glance name
ec2_image['name'] = img_loc
properties = os_image.properties
root_device_name = _block_device_properties_root_device_name(properties)
root_device_type = 'instance-store'
for bdm in json.loads(properties.get('block_device_mapping', '[]')):
if (bdm.get('boot_index') == 0 and
('snapshot_id' in bdm or 'volume_id' in bdm) and
not bdm.get('no_device')):
root_device_type = 'ebs'
ec2_image['rootDeviceName'] = (root_device_name or
_block_device_DEFAULT_ROOT_DEV_NAME)
ec2_image['rootDeviceType'] = root_device_type
_cloud_format_mappings(context, properties, ec2_image,
ec2_image['rootDeviceName'], snapshot_ids)
return ec2_image
def _auto_create_image_extension(context, image, os_image):
image['is_public'] = os_image.is_public
ec2utils.register_auto_create_db_item_extension(
'ami', _auto_create_image_extension)
ec2utils.register_auto_create_db_item_extension(
'ari', _auto_create_image_extension)
ec2utils.register_auto_create_db_item_extension(
'aki', _auto_create_image_extension)
# NOTE(ft): following functions are copied from various parts of Nova
def _cloud_get_image_state(os_image):
# NOTE(vish): fallback status if image_state isn't set
state = os_image.status
if state == 'active':
state = 'available'
return os_image.properties.get('image_state', state)
_block_device_DEFAULT_ROOT_DEV_NAME = '/dev/sda1'
def _block_device_properties_root_device_name(properties):
"""get root device name from image meta data.
If it isn't specified, return None.
"""
root_device_name = None
# NOTE(yamahata): see image_service.s3.s3create()
for bdm in properties.get('mappings', []):
if bdm['virtual'] == 'root':
root_device_name = bdm['device']
# NOTE(yamahata): register_image's command line can override
# <machine>.manifest.xml
if 'root_device_name' in properties:
root_device_name = properties['root_device_name']
return root_device_name
def _cloud_properties_get_mappings(properties):
return _block_device_mappings_prepend_dev(properties.get('mappings', []))
def _cloud_format_mappings(context, properties, result, root_device_name=None,
snapshot_ids=None):
"""Format multiple BlockDeviceMappingItemType."""
mappings = [{'virtualName': m['virtual'], 'deviceName': m['device']}
for m in _cloud_properties_get_mappings(properties)
if _block_device_is_swap_or_ephemeral(m['virtual'])]
block_device_mapping = [
_cloud_format_block_device_mapping(context, bdm, root_device_name,
snapshot_ids)
for bdm in json.loads(properties.get('block_device_mapping', '[]'))]
# NOTE(yamahata): overwrite mappings with block_device_mapping
for bdm in block_device_mapping:
for i in range(len(mappings)):
if bdm['deviceName'] == mappings[i]['deviceName']:
del mappings[i]
break
mappings.append(bdm)
# NOTE(yamahata): trim ebs.no_device == true. Is this necessary?
mappings = [bdm for bdm in mappings if not (bdm.get('noDevice', False))]
if mappings:
result['blockDeviceMapping'] = mappings
def _cloud_format_block_device_mapping(context, bdm, root_device_name=None,
snapshot_ids=None):
"""Construct BlockDeviceMappingItemType
{'device_name': '...', 'snapshot_id': , ...}
=> BlockDeviceMappingItemType
"""
keys = (('deviceName', 'device_name'),
('virtualName', 'virtual_name'))
item = {}
for name, k in keys:
if k in bdm:
item[name] = bdm[k]
if bdm.get('no_device'):
item['noDevice'] = True
if bdm.get('boot_index') == 0 and root_device_name:
item['deviceName'] = root_device_name
if ('snapshot_id' in bdm) or ('volume_id' in bdm):
ebs_keys = (('snapshotId', 'snapshot_id'),
('snapshotId', 'volume_id'), # snapshotId is abused
('volumeSize', 'volume_size'),
('deleteOnTermination', 'delete_on_termination'))
ebs = {}
for name, k in ebs_keys:
if bdm.get(k) is not None:
if k == 'snapshot_id':
ebs[name] = ec2utils.os_id_to_ec2_id(
context, 'snap', bdm[k], ids_by_os_id=snapshot_ids)
elif k == 'volume_id':
ebs[name] = ec2utils.os_id_to_ec2_id(context, 'vol',
bdm[k])
else:
ebs[name] = bdm[k]
assert 'snapshotId' in ebs
item['ebs'] = ebs
return item
def _block_device_mappings_prepend_dev(mappings):
"""Prepend '/dev/' to 'device' entry of swap/ephemeral virtual type."""
for m in mappings:
virtual = m['virtual']
if (_block_device_is_swap_or_ephemeral(virtual) and
(not m['device'].startswith('/'))):
m['device'] = '/dev/' + m['device']
return mappings
def _block_device_is_swap_or_ephemeral(device_name):
return (device_name and
(device_name == 'swap' or _block_device_is_ephemeral(device_name)))
_ephemeral = re.compile('^ephemeral(\d|[1-9]\d+)$')
def _block_device_is_ephemeral(device_name):
return _ephemeral.match(device_name) is not None

View File

@ -439,7 +439,7 @@ def _get_idempotent_run(context, client_token):
def _format_reservation(context, reservation_id, instances_info,
ec2_network_interfaces, filters=None, volumes={}):
ec2_network_interfaces, filters=None, volumes=None):
formatted_instances = []
for (instance, os_instance, novadb_instance) in instances_info:
ec2_instance = _format_instance(
@ -1065,10 +1065,10 @@ def _cloud_format_instance_type(context, os_instance):
def _cloud_format_instance_root_device_name(novadb_instance):
return (novadb_instance.get('root_device_name') or
block_device_DEFAULT_ROOT_DEV_NAME)
_block_device_DEFAULT_ROOT_DEV_NAME)
block_device_DEFAULT_ROOT_DEV_NAME = '/dev/sda1'
_block_device_DEFAULT_ROOT_DEV_NAME = '/dev/sda1'
def _cloud_format_instance_bdm(context, instance_uuid, root_device_name,

View File

@ -15,6 +15,7 @@
from cinderclient import exceptions as cinder_exception
from ec2api.api import clients
from ec2api.api import common
from ec2api.api import ec2utils
from ec2api.api import utils
from ec2api.db import api as db_api
@ -22,16 +23,6 @@ from ec2api import exception
from ec2api.openstack.common.gettextutils import _
FILTER_MAP = {'description': 'description',
'owner-id': 'ownerId',
'progress': 'progress',
'snapshot-id': 'snapshotId',
'start-time': 'startTime',
'status': 'status',
'volume-id': 'volumeId',
'volume-size': 'volumeSize'}
def create_snapshot(context, volume_id, description=None):
volume = ec2utils.get_db_item(context, 'vol', volume_id)
cinder = clients.cinder(context)
@ -50,7 +41,7 @@ def create_snapshot(context, volume_id, description=None):
display_description=description)
cleaner.addCleanup(os_snapshot.delete)
snapshot = db_api.add_item(context, 'snap', {'os_id': os_snapshot.id})
cleaner.addCleanup(db_api.delete_item(context, snapshot['id']))
cleaner.addCleanup(db_api.delete_item, context, snapshot['id'])
os_snapshot.update(display_name=snapshot['id'])
return _format_snapshot(context, snapshot, os_snapshot,
@ -68,40 +59,38 @@ def delete_snapshot(context, snapshot_id):
return True
class SnapshotDescriber(common.UniversalDescriber):
KIND = 'snap'
FILTER_MAP = {'description': 'description',
'owner-id': 'ownerId',
'progress': 'progress',
'snapshot-id': 'snapshotId',
'start-time': 'startTime',
'status': 'status',
'volume-id': 'volumeId',
'volume-size': 'volumeSize'}
def format(self, snapshot, os_snapshot):
return _format_snapshot(self.context, snapshot, os_snapshot,
self.volumes)
def get_db_items(self):
self.volumes = dict((vol['os_id'], vol)
for vol in db_api.get_items(self.context, 'vol'))
return super(SnapshotDescriber, self).get_db_items()
def get_os_items(self):
return clients.cinder(self.context).volume_snapshots.list()
def get_name(self, os_item):
return ''
def describe_snapshots(context, snapshot_id=None, owner=None,
restorable_by=None, filter=None):
snapshots = ec2utils.get_db_items(context, 'snap', snapshot_id)
snapshots = dict((snap['os_id'], snap) for snap in snapshots)
volumes = dict((vol['os_id'], vol)
for vol in db_api.get_items(context, 'vol'))
formatted_snapshots = []
cinder = clients.cinder(context)
os_snapshots = cinder.volume_snapshots.list()
for os_snapshot in os_snapshots:
snapshot = snapshots.pop(os_snapshot.id, None)
if not snapshot:
if snapshot_id:
# NOTE(ft): os_snapshot is not requested by
# 'snapshot_id' filter
continue
else:
snapshot = ec2utils.get_db_item_by_os_id(context, 'snap',
os_snapshot.id)
formatted_snapshot = _format_snapshot(context, snapshot, os_snapshot,
volumes)
if (formatted_snapshot and
not utils.filtered_out(formatted_snapshot, filter,
FILTER_MAP)):
formatted_snapshots.append(formatted_snapshot)
# NOTE(ft): delete obsolete snapshots
for snap in snapshots.itervalues():
db_api.delete_item(context, snap['id'])
# NOTE(ft): some requested snapshots are obsolete
if snapshot_id and snapshots:
raise exception.InvalidSnapshotNotFound(id=snap['id'])
formatted_snapshots = SnapshotDescriber().describe(
context, ids=snapshot_id, filter=filter)
return {'snapshotSet': formatted_snapshots}

View File

@ -15,6 +15,7 @@
from cinderclient import exceptions as cinder_exception
from ec2api.api import clients
from ec2api.api import common
from ec2api.api import ec2utils
from ec2api.api import utils
from ec2api.db import api as db_api
@ -48,7 +49,7 @@ def create_volume(context, availability_zone=None, size=None,
cleaner.addCleanup(os_volume.delete)
volume = db_api.add_item(context, 'vol', {'os_id': os_volume.id})
cleaner.addCleanup(db_api.delete_item(context, volume['id']))
cleaner.addCleanup(db_api.delete_item, context, volume['id'])
if not name:
os_volume.update(display_name=volume['id'])
@ -100,38 +101,38 @@ def delete_volume(context, volume_id):
return True
class VolumeDescriber(common.UniversalDescriber):
KIND = 'vol'
FILTER_MAP = {'availability-zone': 'availabilityZone',
'create-time': 'createTime',
'size': 'size',
'snapshot-id': 'snapshotId',
'status': 'status',
'volume-id': 'volumeId'}
def format(self, volume, os_volume):
return _format_volume(self.context, volume, os_volume,
self.instances, self.snapshots)
def get_db_items(self):
self.instances = dict((i['os_id'], i)
for i in db_api.get_items(self.context, 'i'))
self.snapshots = dict((s['os_id'], s)
for s in db_api.get_items(self.context, 'snap'))
return super(VolumeDescriber, self).get_db_items()
def get_os_items(self):
return clients.cinder(self.context).volumes.list()
def get_name(self, os_item):
return ''
def describe_volumes(context, volume_id=None, filter=None,
max_results=None, next_token=None):
volumes = ec2utils.get_db_items(context, 'vol', volume_id)
volumes = dict((vol['os_id'], vol) for vol in volumes)
instances = dict((i['os_id'], i) for i in db_api.get_items(context, 'i'))
snapshots = dict((s['os_id'], s)
for s in db_api.get_items(context, 'snap'))
formatted_volumes = []
cinder = clients.cinder(context)
os_volumes = cinder.volumes.list()
for os_volume in os_volumes:
volume = volumes.pop(os_volume.id, None)
if not volume:
if volume_id:
# NOTE(ft): os_volume is not requested by 'volume_id' filter
continue
else:
volume = ec2utils.get_db_item_by_os_id(context, 'vol',
os_volume.id)
formatted_volume = _format_volume(context, volume, os_volume,
instances, snapshots)
if not utils.filtered_out(formatted_volume, filter, FILTER_MAP):
formatted_volumes.append(formatted_volume)
# NOTE(ft): delete obsolete volumes
for vol in volumes.itervalues():
db_api.delete_item(context, vol['id'])
# NOTE(ft): some requested volumes are obsolete
if volume_id and volumes:
raise exception.InvalidVolumeNotFound(id=vol['id'])
formatted_volumes = VolumeDescriber().describe(
context, ids=volume_id, filter=filter)
return {'volumeSet': formatted_volumes}

View File

@ -121,9 +121,9 @@ class RequestContext(object):
return self.user_id
def get_admin_context(read_deleted="no"):
def get_admin_context(project_id=None, read_deleted="no"):
return RequestContext(user_id=None,
project_id=None,
project_id=project_id,
access_key=None,
secret_key=None,
is_admin=True,

View File

@ -86,6 +86,10 @@ def add_item(context, kind, data):
return IMPL.add_item(context, kind, data)
def add_item_id(context, kind, os_id):
return IMPL.add_item_id(context, kind, os_id)
def update_item(context, item):
IMPL.update_item(context, item)
@ -108,3 +112,11 @@ def get_item_by_id(context, kind, item_id):
def get_items_by_ids(context, kind, item_ids):
return IMPL.get_items_by_ids(context, kind, item_ids)
def get_public_items(context, kind, item_ids):
return IMPL.get_public_items(context, kind, item_ids)
def get_item_ids(context, kind, os_ids):
return IMPL.get_item_ids(context, kind, os_ids)

View File

@ -21,10 +21,12 @@ import random
import sys
from oslo.config import cfg
from sqlalchemy import or_
from ec2api.api import ec2utils
import ec2api.context
from ec2api.db.sqlalchemy import models
from ec2api.openstack.common.db import exception as db_exception
from ec2api.openstack.common.db.sqlalchemy import session as db_session
CONF = cfg.CONF
@ -86,29 +88,76 @@ def model_query(context, model, *args, **kwargs):
return session.query(model, *args)
@require_context
def add_item(context, kind, data):
def _new_id(kind, os_id):
# NOTE(ft): obtaining new id from Nova DB is temporary solution
# while we don't implmenet all Nova EC2 methods
if kind == 'i':
obj_id = ec2utils.id_to_ec2_inst_id(data['os_id'])
obj_id = ec2utils.id_to_ec2_inst_id(os_id)
elif kind == 'vol':
obj_id = ec2utils.id_to_ec2_vol_id(data['os_id'])
obj_id = ec2utils.id_to_ec2_vol_id(os_id)
elif kind == 'snap':
obj_id = ec2utils.id_to_ec2_snap_id(data['os_id'])
obj_id = ec2utils.id_to_ec2_snap_id(os_id)
elif kind in ('ami', 'ari', 'aki'):
obj_id = ec2utils.glance_id_to_ec2_id(
ec2api.context.get_admin_context(), os_id, kind)
else:
obj_id = "%(kind)s-%(id)08x" % {"kind": kind,
"id": random.randint(1, 0xffffffff)}
return obj_id
@require_context
def add_item(context, kind, data):
item_ref = models.Item()
item_ref.update({
"project_id": context.project_id,
"id": obj_id,
"id": _new_id(kind, data["os_id"]),
})
item_ref.update(_pack_item_data(data))
item_ref.save()
try:
item_ref.save()
except db_exception.DBDuplicateEntry as ex:
if len(ex.columns) == 1 and 'PRIMARY' in ex.columns:
# NOTE(ft): temporary workaround for usage Nova for generating
# new EC2 identifiers
pass
elif models.ITEMS_OS_ID_INDEX_NAME not in ex.columns:
raise
item_ref = (model_query(context, models.Item).
filter_by(os_id=data["os_id"]).
filter(or_(models.Item.project_id == context.project_id,
models.Item.project_id.is_(None))).
one())
item_data = _unpack_item_data(item_ref)
item_data.update(data)
item_ref.update(_pack_item_data(item_data))
item_ref.project_id = context.project_id
item_ref.save()
return _unpack_item_data(item_ref)
@require_context
def add_item_id(context, kind, os_id):
item_ref = models.Item()
item_ref.update({
"id": _new_id(kind, os_id),
"os_id": os_id,
})
try:
item_ref.save()
except db_exception.DBDuplicateEntry as ex:
if len(ex.columns) == 1 and 'PRIMARY' in ex.columns:
# NOTE(ft): temporary workaround for usage Nova for generating
# new EC2 identifiers
pass
elif models.ITEMS_OS_ID_INDEX_NAME not in ex.columns:
raise
item_ref = (model_query(context, models.Item).
filter_by(os_id=os_id).
one())
return item_ref.id
@require_context
def update_item(context, item):
item_ref = (model_query(context, models.Item).
@ -160,7 +209,7 @@ def get_item_by_id(context, kind, item_id):
@require_context
def get_items_by_ids(context, kind, item_ids):
if item_ids is None or item_ids == []:
if not item_ids:
return get_items(context, kind)
return [_unpack_item_data(item)
for item in (model_query(context, models.Item).
@ -170,6 +219,27 @@ def get_items_by_ids(context, kind, item_ids):
all()]
@require_context
def get_public_items(context, kind, item_ids):
query = (model_query(context, models.Item).
filter(models.Item.id.like('%s-%%' % kind)).
filter(models.Item.data.like('%\'is_public\': True%')))
if item_ids:
query = query.filter(models.Item.id.in_(item_ids))
return [_unpack_item_data(item)
for item in query.all()]
@require_context
def get_item_ids(context, kind, os_ids):
query = (model_query(context, models.Item).
filter(models.Item.id.like('%s-%%' % kind)))
if os_ids:
query = query.filter(models.Item.id.in_(os_ids))
return [(item['id'], item['os_id'])
for item in query.all()]
def _pack_item_data(item_data):
data = copy.deepcopy(item_data)
data.pop("id", None)
@ -183,7 +253,8 @@ def _pack_item_data(item_data):
def _unpack_item_data(item_ref):
if item_ref is None:
return None
data = ast.literal_eval(item_ref.data)
data = item_ref.data
data = ast.literal_eval(data) if data is not None else {}
data["id"] = item_ref.id
data["os_id"] = item_ref.os_id
data["vpc_id"] = item_ref.vpc_id

View File

@ -28,7 +28,7 @@ def upgrade(migrate_engine):
Column("os_id", String(length=36)),
Column("data", Text()),
PrimaryKeyConstraint('id'),
UniqueConstraint('id', name='items_os_id_idx'),
UniqueConstraint('os_id', name='items_os_id_idx'),
mysql_engine="InnoDB",
mysql_charset="utf8"
)

View File

@ -24,6 +24,8 @@ from ec2api.openstack.common.db.sqlalchemy import models
BASE = declarative_base()
ITEMS_OS_ID_INDEX_NAME = 'items_os_id_idx'
class EC2Base(models.ModelBase):
metadata = None
@ -41,7 +43,7 @@ class Item(BASE, EC2Base):
__tablename__ = 'items'
__table_args__ = (
PrimaryKeyConstraint('id'),
UniqueConstraint('id', name='items_os_id_idx'),
UniqueConstraint('os_id', name=ITEMS_OS_ID_INDEX_NAME),
)
id = Column(String(length=30))
project_id = Column(String(length=64))

View File

@ -254,6 +254,11 @@ class InvalidSnapshotNotFound(EC2NotFound):
msg_fmt = _("Snapshot %(id)s could not be found.")
class InvalidAMIIDNotFound(EC2NotFound):
ec2_code = 'InvalidAMIID.NotFound'
msg_fmt = _("The image id '[%(id)s]' does not exist")
class IncorrectState(EC2Exception):
ec2_code = 'IncorrectState'
code = 400