Modifies S3ImageService to wrap LocalImageService or GlanceImageService. It now pulls the parts out of s3, decrypts them locally, and sends them to the underlying service. It includes various fixes for image/glance.py, image/local.py and the tests.
I also uncovered a bug in glance so for the glance backend to work properly, it requires the patch to glance here lp:~vishvananda/glance/fix-update or Glance's Cactus trunk r80.
This commit is contained in:
commit
144fa50821
157
bin/nova-manage
157
bin/nova-manage
@ -55,6 +55,8 @@
|
||||
|
||||
import datetime
|
||||
import gettext
|
||||
import glob
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
@ -81,7 +83,7 @@ from nova import log as logging
|
||||
from nova import quota
|
||||
from nova import rpc
|
||||
from nova import utils
|
||||
from nova.api.ec2.cloud import ec2_id_to_id
|
||||
from nova.api.ec2 import ec2utils
|
||||
from nova.auth import manager
|
||||
from nova.cloudpipe import pipelib
|
||||
from nova.compute import instance_types
|
||||
@ -94,6 +96,7 @@ flags.DECLARE('network_size', 'nova.network.manager')
|
||||
flags.DECLARE('vlan_start', 'nova.network.manager')
|
||||
flags.DECLARE('vpn_start', 'nova.network.manager')
|
||||
flags.DECLARE('fixed_range_v6', 'nova.network.manager')
|
||||
flags.DECLARE('images_path', 'nova.image.local')
|
||||
flags.DEFINE_flag(flags.HelpFlag())
|
||||
flags.DEFINE_flag(flags.HelpshortFlag())
|
||||
flags.DEFINE_flag(flags.HelpXMLFlag())
|
||||
@ -104,7 +107,7 @@ def param2id(object_id):
|
||||
args: [object_id], e.g. 'vol-0000000a' or 'volume-0000000a' or '10'
|
||||
"""
|
||||
if '-' in object_id:
|
||||
return ec2_id_to_id(object_id)
|
||||
return ec2utils.ec2_id_to_id(object_id)
|
||||
else:
|
||||
return int(object_id)
|
||||
|
||||
@ -744,6 +747,155 @@ class InstanceTypeCommands(object):
|
||||
self._print_instance_types(name, inst_types)
|
||||
|
||||
|
||||
class ImageCommands(object):
|
||||
"""Methods for dealing with a cloud in an odd state"""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
self.image_service = utils.import_object(FLAGS.image_service)
|
||||
|
||||
def _register(self, image_type, disk_format, container_format,
|
||||
path, owner, name=None, is_public='T',
|
||||
architecture='x86_64', kernel_id=None, ramdisk_id=None):
|
||||
meta = {'is_public': True,
|
||||
'name': name,
|
||||
'disk_format': disk_format,
|
||||
'container_format': container_format,
|
||||
'properties': {'image_state': 'available',
|
||||
'owner': owner,
|
||||
'type': image_type,
|
||||
'architecture': architecture,
|
||||
'image_location': 'local',
|
||||
'is_public': (is_public == 'T')}}
|
||||
print image_type, meta
|
||||
if kernel_id:
|
||||
meta['properties']['kernel_id'] = int(kernel_id)
|
||||
if ramdisk_id:
|
||||
meta['properties']['ramdisk_id'] = int(ramdisk_id)
|
||||
elevated = context.get_admin_context()
|
||||
try:
|
||||
with open(path) as ifile:
|
||||
image = self.image_service.create(elevated, meta, ifile)
|
||||
new = image['id']
|
||||
print _("Image registered to %(new)s (%(new)08x).") % locals()
|
||||
return new
|
||||
except Exception as exc:
|
||||
print _("Failed to register %(path)s: %(exc)s") % locals()
|
||||
|
||||
def all_register(self, image, kernel, ramdisk, owner, name=None,
|
||||
is_public='T', architecture='x86_64'):
|
||||
"""Uploads an image, kernel, and ramdisk into the image_service
|
||||
arguments: image kernel ramdisk owner [name] [is_public='T']
|
||||
[architecture='x86_64']"""
|
||||
kernel_id = self.kernel_register(kernel, owner, None,
|
||||
is_public, architecture)
|
||||
ramdisk_id = self.ramdisk_register(ramdisk, owner, None,
|
||||
is_public, architecture)
|
||||
self.image_register(image, owner, name, is_public,
|
||||
architecture, kernel_id, ramdisk_id)
|
||||
|
||||
def image_register(self, path, owner, name=None, is_public='T',
|
||||
architecture='x86_64', kernel_id=None, ramdisk_id=None,
|
||||
disk_format='ami', container_format='ami'):
|
||||
"""Uploads an image into the image_service
|
||||
arguments: path owner [name] [is_public='T'] [architecture='x86_64']
|
||||
[kernel_id=None] [ramdisk_id=None]
|
||||
[disk_format='ami'] [container_format='ami']"""
|
||||
return self._register('machine', disk_format, container_format, path,
|
||||
owner, name, is_public, architecture,
|
||||
kernel_id, ramdisk_id)
|
||||
|
||||
def kernel_register(self, path, owner, name=None, is_public='T',
|
||||
architecture='x86_64'):
|
||||
"""Uploads a kernel into the image_service
|
||||
arguments: path owner [name] [is_public='T'] [architecture='x86_64']
|
||||
"""
|
||||
return self._register('kernel', 'aki', 'aki', path, owner, name,
|
||||
is_public, architecture)
|
||||
|
||||
def ramdisk_register(self, path, owner, name=None, is_public='T',
|
||||
architecture='x86_64'):
|
||||
"""Uploads a ramdisk into the image_service
|
||||
arguments: path owner [name] [is_public='T'] [architecture='x86_64']
|
||||
"""
|
||||
return self._register('ramdisk', 'ari', 'ari', path, owner, name,
|
||||
is_public, architecture)
|
||||
|
||||
def _lookup(self, old_image_id):
|
||||
try:
|
||||
internal_id = ec2utils.ec2_id_to_id(old_image_id)
|
||||
image = self.image_service.show(context, internal_id)
|
||||
except exception.NotFound:
|
||||
image = self.image_service.show_by_name(context, old_image_id)
|
||||
return image['id']
|
||||
|
||||
def _old_to_new(self, old):
|
||||
mapping = {'machine': 'ami',
|
||||
'kernel': 'aki',
|
||||
'ramdisk': 'ari'}
|
||||
container_format = mapping[old['type']]
|
||||
disk_format = container_format
|
||||
new = {'disk_format': disk_format,
|
||||
'container_format': container_format,
|
||||
'is_public': True,
|
||||
'name': old['imageId'],
|
||||
'properties': {'image_state': old['imageState'],
|
||||
'owner': old['imageOwnerId'],
|
||||
'architecture': old['architecture'],
|
||||
'type': old['type'],
|
||||
'image_location': old['imageLocation'],
|
||||
'is_public': old['isPublic']}}
|
||||
if old.get('kernelId'):
|
||||
new['properties']['kernel_id'] = self._lookup(old['kernelId'])
|
||||
if old.get('ramdiskId'):
|
||||
new['properties']['ramdisk_id'] = self._lookup(old['ramdiskId'])
|
||||
return new
|
||||
|
||||
def _convert_images(self, images):
|
||||
elevated = context.get_admin_context()
|
||||
for image_path, image_metadata in images.iteritems():
|
||||
meta = self._old_to_new(image_metadata)
|
||||
old = meta['name']
|
||||
try:
|
||||
with open(image_path) as ifile:
|
||||
image = self.image_service.create(elevated, meta, ifile)
|
||||
new = image['id']
|
||||
print _("Image %(old)s converted to " \
|
||||
"%(new)s (%(new)08x).") % locals()
|
||||
except Exception as exc:
|
||||
print _("Failed to convert %(old)s: %(exc)s") % locals()
|
||||
|
||||
def convert(self, directory):
|
||||
"""Uploads old objectstore images in directory to new service
|
||||
arguments: directory"""
|
||||
machine_images = {}
|
||||
other_images = {}
|
||||
directory = os.path.abspath(directory)
|
||||
# NOTE(vish): If we're importing from the images path dir, attempt
|
||||
# to move the files out of the way before importing
|
||||
# so we aren't writing to the same directory. This
|
||||
# may fail if the dir was a mointpoint.
|
||||
if (FLAGS.image_service == 'nova.image.local.LocalImageService'
|
||||
and directory == os.path.abspath(FLAGS.images_path)):
|
||||
new_dir = "%s_bak" % directory
|
||||
os.move(directory, new_dir)
|
||||
os.mkdir(directory)
|
||||
directory = new_dir
|
||||
for fn in glob.glob("%s/*/info.json" % directory):
|
||||
try:
|
||||
image_path = os.path.join(fn.rpartition('/')[0], 'image')
|
||||
with open(fn) as metadata_file:
|
||||
image_metadata = json.load(metadata_file)
|
||||
if image_metadata['type'] == 'machine':
|
||||
machine_images[image_path] = image_metadata
|
||||
else:
|
||||
other_images[image_path] = image_metadata
|
||||
except Exception as exc:
|
||||
print _("Failed to load %(fn)s.") % locals()
|
||||
# NOTE(vish): do kernels and ramdisks first so images
|
||||
self._convert_images(other_images)
|
||||
self._convert_images(machine_images)
|
||||
|
||||
|
||||
CATEGORIES = [
|
||||
('user', UserCommands),
|
||||
('project', ProjectCommands),
|
||||
@ -758,6 +910,7 @@ CATEGORIES = [
|
||||
('db', DbCommands),
|
||||
('volume', VolumeCommands),
|
||||
('instance_type', InstanceTypeCommands),
|
||||
('image', ImageCommands),
|
||||
('flavor', InstanceTypeCommands)]
|
||||
|
||||
|
||||
|
@ -173,7 +173,10 @@ Nova Floating IPs
|
||||
``nova-manage floating create <host> <ip_range>``
|
||||
|
||||
Creates floating IP addresses for the named host by the given range.
|
||||
floating delete <ip_range> Deletes floating IP addresses in the range given.
|
||||
|
||||
``nova-manage floating delete <ip_range>``
|
||||
|
||||
Deletes floating IP addresses in the range given.
|
||||
|
||||
``nova-manage floating list``
|
||||
|
||||
@ -193,7 +196,7 @@ Nova Flavor
|
||||
``nova-manage flavor create <name> <memory> <vCPU> <local_storage> <flavorID> <(optional) swap> <(optional) RXTX Quota> <(optional) RXTX Cap>``
|
||||
|
||||
creates a flavor with the following positional arguments:
|
||||
* memory (expressed in megabytes)
|
||||
* memory (expressed in megabytes)
|
||||
* vcpu(s) (integer)
|
||||
* local storage (expressed in gigabytes)
|
||||
* flavorid (unique integer)
|
||||
@ -209,12 +212,33 @@ Nova Flavor
|
||||
|
||||
Purges the flavor with the name <name>. This removes this flavor from the database.
|
||||
|
||||
|
||||
Nova Instance_type
|
||||
~~~~~~~~~~~~~~~~~~
|
||||
|
||||
The instance_type command is provided as an alias for the flavor command. All the same subcommands and arguments from nova-manage flavor can be used.
|
||||
|
||||
Nova Images
|
||||
~~~~~~~~~~~
|
||||
|
||||
``nova-manage image image_register <path> <owner>``
|
||||
|
||||
Registers an image with the image service.
|
||||
|
||||
``nova-manage image kernel_register <path> <owner>``
|
||||
|
||||
Registers a kernel with the image service.
|
||||
|
||||
``nova-manage image ramdisk_register <path> <owner>``
|
||||
|
||||
Registers a ramdisk with the image service.
|
||||
|
||||
``nova-manage image all_register <image_path> <kernel_path> <ramdisk_path> <owner>``
|
||||
|
||||
Registers an image kernel and ramdisk with the image service.
|
||||
|
||||
``nova-manage image convert <directory>``
|
||||
|
||||
Converts all images in directory from the old (Bexar) format to the new format.
|
||||
|
||||
FILES
|
||||
========
|
||||
|
@ -182,6 +182,29 @@ Nova Floating IPs
|
||||
|
||||
Displays a list of all floating IP addresses.
|
||||
|
||||
Nova Images
|
||||
~~~~~~~~~~~
|
||||
|
||||
``nova-manage image image_register <path> <owner>``
|
||||
|
||||
Registers an image with the image service.
|
||||
|
||||
``nova-manage image kernel_register <path> <owner>``
|
||||
|
||||
Registers a kernel with the image service.
|
||||
|
||||
``nova-manage image ramdisk_register <path> <owner>``
|
||||
|
||||
Registers a ramdisk with the image service.
|
||||
|
||||
``nova-manage image all_register <image_path> <kernel_path> <ramdisk_path> <owner>``
|
||||
|
||||
Registers an image kernel and ramdisk with the image service.
|
||||
|
||||
``nova-manage image convert <directory>``
|
||||
|
||||
Converts all images in directory from the old (Bexar) format to the new format.
|
||||
|
||||
Concept: Flags
|
||||
--------------
|
||||
|
||||
|
@ -39,7 +39,9 @@ from nova import log as logging
|
||||
from nova import network
|
||||
from nova import utils
|
||||
from nova import volume
|
||||
from nova.api.ec2 import ec2utils
|
||||
from nova.compute import instance_types
|
||||
from nova.image import s3
|
||||
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
@ -73,30 +75,19 @@ def _gen_key(context, user_id, key_name):
|
||||
return {'private_key': private_key, 'fingerprint': fingerprint}
|
||||
|
||||
|
||||
def ec2_id_to_id(ec2_id):
|
||||
"""Convert an ec2 ID (i-[base 16 number]) to an instance id (int)"""
|
||||
return int(ec2_id.split('-')[-1], 16)
|
||||
|
||||
|
||||
def id_to_ec2_id(instance_id, template='i-%08x'):
|
||||
"""Convert an instance ID (int) to an ec2 ID (i-[base 16 number])"""
|
||||
return template % instance_id
|
||||
|
||||
|
||||
class CloudController(object):
|
||||
""" CloudController provides the critical dispatch between
|
||||
inbound API calls through the endpoint and messages
|
||||
sent to the other nodes.
|
||||
"""
|
||||
def __init__(self):
|
||||
self.image_service = utils.import_object(FLAGS.image_service)
|
||||
self.image_service = s3.S3ImageService()
|
||||
self.network_api = network.API()
|
||||
self.volume_api = volume.API()
|
||||
self.compute_api = compute.API(
|
||||
network_api=self.network_api,
|
||||
image_service=self.image_service,
|
||||
volume_api=self.volume_api,
|
||||
hostname_factory=id_to_ec2_id)
|
||||
hostname_factory=ec2utils.id_to_ec2_id)
|
||||
self.setup()
|
||||
|
||||
def __str__(self):
|
||||
@ -154,11 +145,14 @@ class CloudController(object):
|
||||
availability_zone = self._get_availability_zone_by_host(ctxt, host)
|
||||
floating_ip = db.instance_get_floating_address(ctxt,
|
||||
instance_ref['id'])
|
||||
ec2_id = id_to_ec2_id(instance_ref['id'])
|
||||
ec2_id = ec2utils.id_to_ec2_id(instance_ref['id'])
|
||||
image_ec2_id = self._image_ec2_id(instance_ref['image_id'], 'machine')
|
||||
k_ec2_id = self._image_ec2_id(instance_ref['kernel_id'], 'kernel')
|
||||
r_ec2_id = self._image_ec2_id(instance_ref['ramdisk_id'], 'ramdisk')
|
||||
data = {
|
||||
'user-data': base64.b64decode(instance_ref['user_data']),
|
||||
'meta-data': {
|
||||
'ami-id': instance_ref['image_id'],
|
||||
'ami-id': image_ec2_id,
|
||||
'ami-launch-index': instance_ref['launch_index'],
|
||||
'ami-manifest-path': 'FIXME',
|
||||
'block-device-mapping': {
|
||||
@ -173,12 +167,12 @@ class CloudController(object):
|
||||
'instance-type': instance_ref['instance_type'],
|
||||
'local-hostname': hostname,
|
||||
'local-ipv4': address,
|
||||
'kernel-id': instance_ref['kernel_id'],
|
||||
'kernel-id': k_ec2_id,
|
||||
'ramdisk-id': r_ec2_id,
|
||||
'placement': {'availability-zone': availability_zone},
|
||||
'public-hostname': hostname,
|
||||
'public-ipv4': floating_ip or '',
|
||||
'public-keys': keys,
|
||||
'ramdisk-id': instance_ref['ramdisk_id'],
|
||||
'reservation-id': instance_ref['reservation_id'],
|
||||
'security-groups': '',
|
||||
'mpi': mpi}}
|
||||
@ -525,7 +519,7 @@ class CloudController(object):
|
||||
ec2_id = instance_id[0]
|
||||
else:
|
||||
ec2_id = instance_id
|
||||
instance_id = ec2_id_to_id(ec2_id)
|
||||
instance_id = ec2utils.ec2_id_to_id(ec2_id)
|
||||
output = self.compute_api.get_console_output(
|
||||
context, instance_id=instance_id)
|
||||
now = datetime.datetime.utcnow()
|
||||
@ -535,7 +529,7 @@ class CloudController(object):
|
||||
|
||||
def get_ajax_console(self, context, instance_id, **kwargs):
|
||||
ec2_id = instance_id[0]
|
||||
instance_id = ec2_id_to_id(ec2_id)
|
||||
instance_id = ec2utils.ec2_id_to_id(ec2_id)
|
||||
return self.compute_api.get_ajax_console(context,
|
||||
instance_id=instance_id)
|
||||
|
||||
@ -543,7 +537,7 @@ class CloudController(object):
|
||||
if volume_id:
|
||||
volumes = []
|
||||
for ec2_id in volume_id:
|
||||
internal_id = ec2_id_to_id(ec2_id)
|
||||
internal_id = ec2utils.ec2_id_to_id(ec2_id)
|
||||
volume = self.volume_api.get(context, internal_id)
|
||||
volumes.append(volume)
|
||||
else:
|
||||
@ -556,11 +550,11 @@ class CloudController(object):
|
||||
instance_data = None
|
||||
if volume.get('instance', None):
|
||||
instance_id = volume['instance']['id']
|
||||
instance_ec2_id = id_to_ec2_id(instance_id)
|
||||
instance_ec2_id = ec2utils.id_to_ec2_id(instance_id)
|
||||
instance_data = '%s[%s]' % (instance_ec2_id,
|
||||
volume['instance']['host'])
|
||||
v = {}
|
||||
v['volumeId'] = id_to_ec2_id(volume['id'], 'vol-%08x')
|
||||
v['volumeId'] = ec2utils.id_to_ec2_id(volume['id'], 'vol-%08x')
|
||||
v['status'] = volume['status']
|
||||
v['size'] = volume['size']
|
||||
v['availabilityZone'] = volume['availability_zone']
|
||||
@ -578,8 +572,7 @@ class CloudController(object):
|
||||
'device': volume['mountpoint'],
|
||||
'instanceId': instance_ec2_id,
|
||||
'status': 'attached',
|
||||
'volumeId': id_to_ec2_id(volume['id'],
|
||||
'vol-%08x')}]
|
||||
'volumeId': v['volumeId']}]
|
||||
else:
|
||||
v['attachmentSet'] = [{}]
|
||||
|
||||
@ -598,12 +591,12 @@ class CloudController(object):
|
||||
return {'volumeSet': [self._format_volume(context, dict(volume))]}
|
||||
|
||||
def delete_volume(self, context, volume_id, **kwargs):
|
||||
volume_id = ec2_id_to_id(volume_id)
|
||||
volume_id = ec2utils.ec2_id_to_id(volume_id)
|
||||
self.volume_api.delete(context, volume_id=volume_id)
|
||||
return True
|
||||
|
||||
def update_volume(self, context, volume_id, **kwargs):
|
||||
volume_id = ec2_id_to_id(volume_id)
|
||||
volume_id = ec2utils.ec2_id_to_id(volume_id)
|
||||
updatable_fields = ['display_name', 'display_description']
|
||||
changes = {}
|
||||
for field in updatable_fields:
|
||||
@ -614,8 +607,8 @@ class CloudController(object):
|
||||
return True
|
||||
|
||||
def attach_volume(self, context, volume_id, instance_id, device, **kwargs):
|
||||
volume_id = ec2_id_to_id(volume_id)
|
||||
instance_id = ec2_id_to_id(instance_id)
|
||||
volume_id = ec2utils.ec2_id_to_id(volume_id)
|
||||
instance_id = ec2utils.ec2_id_to_id(instance_id)
|
||||
msg = _("Attach volume %(volume_id)s to instance %(instance_id)s"
|
||||
" at %(device)s") % locals()
|
||||
LOG.audit(msg, context=context)
|
||||
@ -626,22 +619,22 @@ class CloudController(object):
|
||||
volume = self.volume_api.get(context, volume_id)
|
||||
return {'attachTime': volume['attach_time'],
|
||||
'device': volume['mountpoint'],
|
||||
'instanceId': id_to_ec2_id(instance_id),
|
||||
'instanceId': ec2utils.id_to_ec2_id(instance_id),
|
||||
'requestId': context.request_id,
|
||||
'status': volume['attach_status'],
|
||||
'volumeId': id_to_ec2_id(volume_id, 'vol-%08x')}
|
||||
'volumeId': ec2utils.id_to_ec2_id(volume_id, 'vol-%08x')}
|
||||
|
||||
def detach_volume(self, context, volume_id, **kwargs):
|
||||
volume_id = ec2_id_to_id(volume_id)
|
||||
volume_id = ec2utils.ec2_id_to_id(volume_id)
|
||||
LOG.audit(_("Detach volume %s"), volume_id, context=context)
|
||||
volume = self.volume_api.get(context, volume_id)
|
||||
instance = self.compute_api.detach_volume(context, volume_id=volume_id)
|
||||
return {'attachTime': volume['attach_time'],
|
||||
'device': volume['mountpoint'],
|
||||
'instanceId': id_to_ec2_id(instance['id']),
|
||||
'instanceId': ec2utils.id_to_ec2_id(instance['id']),
|
||||
'requestId': context.request_id,
|
||||
'status': volume['attach_status'],
|
||||
'volumeId': id_to_ec2_id(volume_id, 'vol-%08x')}
|
||||
'volumeId': ec2utils.id_to_ec2_id(volume_id, 'vol-%08x')}
|
||||
|
||||
def _convert_to_set(self, lst, label):
|
||||
if lst == None or lst == []:
|
||||
@ -675,7 +668,7 @@ class CloudController(object):
|
||||
if instance_id:
|
||||
instances = []
|
||||
for ec2_id in instance_id:
|
||||
internal_id = ec2_id_to_id(ec2_id)
|
||||
internal_id = ec2utils.ec2_id_to_id(ec2_id)
|
||||
instance = self.compute_api.get(context,
|
||||
instance_id=internal_id)
|
||||
instances.append(instance)
|
||||
@ -687,9 +680,9 @@ class CloudController(object):
|
||||
continue
|
||||
i = {}
|
||||
instance_id = instance['id']
|
||||
ec2_id = id_to_ec2_id(instance_id)
|
||||
ec2_id = ec2utils.id_to_ec2_id(instance_id)
|
||||
i['instanceId'] = ec2_id
|
||||
i['imageId'] = instance['image_id']
|
||||
i['imageId'] = self._image_ec2_id(instance['image_id'])
|
||||
i['instanceState'] = {
|
||||
'code': instance['state'],
|
||||
'name': instance['state_description']}
|
||||
@ -755,7 +748,7 @@ class CloudController(object):
|
||||
if (floating_ip_ref['fixed_ip']
|
||||
and floating_ip_ref['fixed_ip']['instance']):
|
||||
instance_id = floating_ip_ref['fixed_ip']['instance']['id']
|
||||
ec2_id = id_to_ec2_id(instance_id)
|
||||
ec2_id = ec2utils.id_to_ec2_id(instance_id)
|
||||
address_rv = {'public_ip': address,
|
||||
'instance_id': ec2_id}
|
||||
if context.is_admin:
|
||||
@ -778,7 +771,7 @@ class CloudController(object):
|
||||
def associate_address(self, context, instance_id, public_ip, **kwargs):
|
||||
LOG.audit(_("Associate address %(public_ip)s to"
|
||||
" instance %(instance_id)s") % locals(), context=context)
|
||||
instance_id = ec2_id_to_id(instance_id)
|
||||
instance_id = ec2utils.ec2_id_to_id(instance_id)
|
||||
self.compute_api.associate_floating_ip(context,
|
||||
instance_id=instance_id,
|
||||
address=public_ip)
|
||||
@ -791,13 +784,19 @@ class CloudController(object):
|
||||
|
||||
def run_instances(self, context, **kwargs):
|
||||
max_count = int(kwargs.get('max_count', 1))
|
||||
if kwargs.get('kernel_id'):
|
||||
kernel = self._get_image(context, kwargs['kernel_id'])
|
||||
kwargs['kernel_id'] = kernel['id']
|
||||
if kwargs.get('ramdisk_id'):
|
||||
ramdisk = self._get_image(context, kwargs['ramdisk_id'])
|
||||
kwargs['ramdisk_id'] = ramdisk['id']
|
||||
instances = self.compute_api.create(context,
|
||||
instance_type=instance_types.get_by_type(
|
||||
kwargs.get('instance_type', None)),
|
||||
image_id=kwargs['image_id'],
|
||||
image_id=self._get_image(context, kwargs['image_id'])['id'],
|
||||
min_count=int(kwargs.get('min_count', max_count)),
|
||||
max_count=max_count,
|
||||
kernel_id=kwargs.get('kernel_id', None),
|
||||
kernel_id=kwargs.get('kernel_id'),
|
||||
ramdisk_id=kwargs.get('ramdisk_id'),
|
||||
display_name=kwargs.get('display_name'),
|
||||
display_description=kwargs.get('display_description'),
|
||||
@ -814,7 +813,7 @@ class CloudController(object):
|
||||
instance_id is a kwarg so its name cannot be modified."""
|
||||
LOG.debug(_("Going to start terminating instances"))
|
||||
for ec2_id in instance_id:
|
||||
instance_id = ec2_id_to_id(ec2_id)
|
||||
instance_id = ec2utils.ec2_id_to_id(ec2_id)
|
||||
self.compute_api.delete(context, instance_id=instance_id)
|
||||
return True
|
||||
|
||||
@ -822,19 +821,19 @@ class CloudController(object):
|
||||
"""instance_id is a list of instance ids"""
|
||||
LOG.audit(_("Reboot instance %r"), instance_id, context=context)
|
||||
for ec2_id in instance_id:
|
||||
instance_id = ec2_id_to_id(ec2_id)
|
||||
instance_id = ec2utils.ec2_id_to_id(ec2_id)
|
||||
self.compute_api.reboot(context, instance_id=instance_id)
|
||||
return True
|
||||
|
||||
def rescue_instance(self, context, instance_id, **kwargs):
|
||||
"""This is an extension to the normal ec2_api"""
|
||||
instance_id = ec2_id_to_id(instance_id)
|
||||
instance_id = ec2utils.ec2_id_to_id(instance_id)
|
||||
self.compute_api.rescue(context, instance_id=instance_id)
|
||||
return True
|
||||
|
||||
def unrescue_instance(self, context, instance_id, **kwargs):
|
||||
"""This is an extension to the normal ec2_api"""
|
||||
instance_id = ec2_id_to_id(instance_id)
|
||||
instance_id = ec2utils.ec2_id_to_id(instance_id)
|
||||
self.compute_api.unrescue(context, instance_id=instance_id)
|
||||
return True
|
||||
|
||||
@ -845,41 +844,80 @@ class CloudController(object):
|
||||
if field in kwargs:
|
||||
changes[field] = kwargs[field]
|
||||
if changes:
|
||||
instance_id = ec2_id_to_id(instance_id)
|
||||
instance_id = ec2utils.ec2_id_to_id(instance_id)
|
||||
self.compute_api.update(context, instance_id=instance_id, **kwargs)
|
||||
return True
|
||||
|
||||
def _format_image(self, context, image):
|
||||
_type_prefix_map = {'machine': 'ami',
|
||||
'kernel': 'aki',
|
||||
'ramdisk': 'ari'}
|
||||
|
||||
def _image_ec2_id(self, image_id, image_type='machine'):
|
||||
prefix = self._type_prefix_map[image_type]
|
||||
template = prefix + '-%08x'
|
||||
return ec2utils.id_to_ec2_id(int(image_id), template=template)
|
||||
|
||||
def _get_image(self, context, ec2_id):
|
||||
try:
|
||||
internal_id = ec2utils.ec2_id_to_id(ec2_id)
|
||||
return self.image_service.show(context, internal_id)
|
||||
except exception.NotFound:
|
||||
return self.image_service.show_by_name(context, ec2_id)
|
||||
|
||||
def _format_image(self, image):
|
||||
"""Convert from format defined by BaseImageService to S3 format."""
|
||||
i = {}
|
||||
i['imageId'] = image.get('id')
|
||||
i['kernelId'] = image.get('kernel_id')
|
||||
i['ramdiskId'] = image.get('ramdisk_id')
|
||||
i['imageOwnerId'] = image.get('owner_id')
|
||||
i['imageLocation'] = image.get('location')
|
||||
i['imageState'] = image.get('status')
|
||||
i['type'] = image.get('type')
|
||||
i['isPublic'] = image.get('is_public')
|
||||
i['architecture'] = image.get('architecture')
|
||||
image_type = image['properties'].get('type')
|
||||
ec2_id = self._image_ec2_id(image.get('id'), image_type)
|
||||
name = image.get('name')
|
||||
if name:
|
||||
i['imageId'] = "%s (%s)" % (ec2_id, name)
|
||||
else:
|
||||
i['imageId'] = ec2_id
|
||||
kernel_id = image['properties'].get('kernel_id')
|
||||
if kernel_id:
|
||||
i['kernelId'] = self._image_ec2_id(kernel_id, 'kernel')
|
||||
ramdisk_id = image['properties'].get('ramdisk_id')
|
||||
if ramdisk_id:
|
||||
i['ramdiskId'] = self._image_ec2_id(ramdisk_id, 'ramdisk')
|
||||
i['imageOwnerId'] = image['properties'].get('owner_id')
|
||||
i['imageLocation'] = image['properties'].get('image_location')
|
||||
i['imageState'] = image['properties'].get('image_state')
|
||||
i['type'] = image_type
|
||||
i['isPublic'] = str(image['properties'].get('is_public', '')) == 'True'
|
||||
i['architecture'] = image['properties'].get('architecture')
|
||||
return i
|
||||
|
||||
def describe_images(self, context, image_id=None, **kwargs):
|
||||
# NOTE: image_id is a list!
|
||||
images = self.image_service.index(context)
|
||||
if image_id:
|
||||
images = filter(lambda x: x['id'] in image_id, images)
|
||||
images = [self._format_image(context, i) for i in images]
|
||||
images = []
|
||||
for ec2_id in image_id:
|
||||
try:
|
||||
image = self._get_image(context, ec2_id)
|
||||
except exception.NotFound:
|
||||
raise exception.NotFound(_('Image %s not found') %
|
||||
ec2_id)
|
||||
images.append(image)
|
||||
else:
|
||||
images = self.image_service.detail(context)
|
||||
images = [self._format_image(i) for i in images]
|
||||
return {'imagesSet': images}
|
||||
|
||||
def deregister_image(self, context, image_id, **kwargs):
|
||||
LOG.audit(_("De-registering image %s"), image_id, context=context)
|
||||
self.image_service.deregister(context, image_id)
|
||||
image = self._get_image(context, image_id)
|
||||
internal_id = image['id']
|
||||
self.image_service.delete(context, internal_id)
|
||||
return {'imageId': image_id}
|
||||
|
||||
def register_image(self, context, image_location=None, **kwargs):
|
||||
if image_location is None and 'name' in kwargs:
|
||||
image_location = kwargs['name']
|
||||
image_id = self.image_service.register(context, image_location)
|
||||
metadata = {'properties': {'image_location': image_location}}
|
||||
image = self.image_service.create(context, metadata)
|
||||
image_id = self._image_ec2_id(image['id'],
|
||||
image['properties']['type'])
|
||||
msg = _("Registered image %(image_location)s with"
|
||||
" id %(image_id)s") % locals()
|
||||
LOG.audit(msg, context=context)
|
||||
@ -890,13 +928,11 @@ class CloudController(object):
|
||||
raise exception.ApiError(_('attribute not supported: %s')
|
||||
% attribute)
|
||||
try:
|
||||
image = self._format_image(context,
|
||||
self.image_service.show(context,
|
||||
image_id))
|
||||
except IndexError:
|
||||
raise exception.ApiError(_('invalid id: %s') % image_id)
|
||||
result = {'image_id': image_id, 'launchPermission': []}
|
||||
if image['isPublic']:
|
||||
image = self._get_image(context, image_id)
|
||||
except exception.NotFound:
|
||||
raise exception.NotFound(_('Image %s not found') % image_id)
|
||||
result = {'imageId': image_id, 'launchPermission': []}
|
||||
if image['properties']['is_public']:
|
||||
result['launchPermission'].append({'group': 'all'})
|
||||
return result
|
||||
|
||||
@ -913,8 +949,18 @@ class CloudController(object):
|
||||
if not operation_type in ['add', 'remove']:
|
||||
raise exception.ApiError(_('operation_type must be add or remove'))
|
||||
LOG.audit(_("Updating image %s publicity"), image_id, context=context)
|
||||
return self.image_service.modify(context, image_id, operation_type)
|
||||
|
||||
try:
|
||||
image = self._get_image(context, image_id)
|
||||
except exception.NotFound:
|
||||
raise exception.NotFound(_('Image %s not found') % image_id)
|
||||
internal_id = image['id']
|
||||
del(image['id'])
|
||||
raise Exception(image)
|
||||
image['properties']['is_public'] = (operation_type == 'add')
|
||||
return self.image_service.update(context, internal_id, image)
|
||||
|
||||
def update_image(self, context, image_id, **kwargs):
|
||||
result = self.image_service.update(context, image_id, dict(kwargs))
|
||||
internal_id = ec2utils.ec2_id_to_id(image_id)
|
||||
result = self.image_service.update(context, internal_id, dict(kwargs))
|
||||
return result
|
||||
|
32
nova/api/ec2/ec2utils.py
Normal file
32
nova/api/ec2/ec2utils.py
Normal file
@ -0,0 +1,32 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from nova import exception
|
||||
|
||||
|
||||
def ec2_id_to_id(ec2_id):
|
||||
"""Convert an ec2 ID (i-[base 16 number]) to an instance id (int)"""
|
||||
try:
|
||||
return int(ec2_id.split('-')[-1], 16)
|
||||
except ValueError:
|
||||
raise exception.NotFound(_("Id %s Not Found") % ec2_id)
|
||||
|
||||
|
||||
def id_to_ec2_id(instance_id, template='i-%08x'):
|
||||
"""Convert an instance ID (int) to an ec2 ID (i-[base 16 number])"""
|
||||
return template % instance_id
|
@ -126,9 +126,9 @@ class API(base.Base):
|
||||
|
||||
image = self.image_service.show(context, image_id)
|
||||
if kernel_id is None:
|
||||
kernel_id = image.get('kernel_id', None)
|
||||
kernel_id = image['properties'].get('kernel_id', None)
|
||||
if ramdisk_id is None:
|
||||
ramdisk_id = image.get('ramdisk_id', None)
|
||||
ramdisk_id = image['properties'].get('ramdisk_id', None)
|
||||
# FIXME(sirp): is there a way we can remove null_kernel?
|
||||
# No kernel and ramdisk for raw images
|
||||
if kernel_id == str(FLAGS.null_kernel):
|
||||
|
@ -348,7 +348,7 @@ DEFINE_string('scheduler_manager', 'nova.scheduler.manager.SchedulerManager',
|
||||
'Manager for scheduler')
|
||||
|
||||
# The service to use for image search and retrieval
|
||||
DEFINE_string('image_service', 'nova.image.s3.S3ImageService',
|
||||
DEFINE_string('image_service', 'nova.image.local.LocalImageService',
|
||||
'The service to use for retrieving and searching for images.')
|
||||
|
||||
DEFINE_string('host', socket.gethostname(),
|
||||
|
@ -17,9 +17,8 @@
|
||||
"""Implementation of an image service that uses Glance as the backend"""
|
||||
|
||||
from __future__ import absolute_import
|
||||
import httplib
|
||||
import json
|
||||
import urlparse
|
||||
|
||||
from glance.common import exception as glance_exception
|
||||
|
||||
from nova import exception
|
||||
from nova import flags
|
||||
@ -53,31 +52,64 @@ class GlanceImageService(service.BaseImageService):
|
||||
"""
|
||||
return self.client.get_images_detailed()
|
||||
|
||||
def show(self, context, id):
|
||||
def show(self, context, image_id):
|
||||
"""
|
||||
Returns a dict containing image data for the given opaque image id.
|
||||
"""
|
||||
image = self.client.get_image_meta(id)
|
||||
if image:
|
||||
return image
|
||||
raise exception.NotFound
|
||||
try:
|
||||
image = self.client.get_image_meta(image_id)
|
||||
except glance_exception.NotFound:
|
||||
raise exception.NotFound
|
||||
return image
|
||||
|
||||
def create(self, context, data):
|
||||
def show_by_name(self, context, name):
|
||||
"""
|
||||
Returns a dict containing image data for the given name.
|
||||
"""
|
||||
# TODO(vish): replace this with more efficient call when glance
|
||||
# supports it.
|
||||
images = self.detail(context)
|
||||
image = None
|
||||
for cantidate in images:
|
||||
if name == cantidate.get('name'):
|
||||
image = cantidate
|
||||
break
|
||||
if image is None:
|
||||
raise exception.NotFound
|
||||
return image
|
||||
|
||||
def get(self, context, image_id, data):
|
||||
"""
|
||||
Calls out to Glance for metadata and data and writes data.
|
||||
"""
|
||||
try:
|
||||
metadata, image_chunks = self.client.get_image(image_id)
|
||||
except glance_exception.NotFound:
|
||||
raise exception.NotFound
|
||||
for chunk in image_chunks:
|
||||
data.write(chunk)
|
||||
return metadata
|
||||
|
||||
def create(self, context, metadata, data=None):
|
||||
"""
|
||||
Store the image data and return the new image id.
|
||||
|
||||
:raises AlreadyExists if the image already exist.
|
||||
|
||||
"""
|
||||
return self.client.add_image(image_meta=data)
|
||||
return self.client.add_image(metadata, data)
|
||||
|
||||
def update(self, context, image_id, data):
|
||||
def update(self, context, image_id, metadata, data=None):
|
||||
"""Replace the contents of the given image with the new data.
|
||||
|
||||
:raises NotFound if the image does not exist.
|
||||
|
||||
"""
|
||||
return self.client.update_image(image_id, data)
|
||||
try:
|
||||
result = self.client.update_image(image_id, metadata, data)
|
||||
except glance_exception.NotFound:
|
||||
raise exception.NotFound
|
||||
return result
|
||||
|
||||
def delete(self, context, image_id):
|
||||
"""
|
||||
@ -86,7 +118,11 @@ class GlanceImageService(service.BaseImageService):
|
||||
:raises NotFound if the image does not exist.
|
||||
|
||||
"""
|
||||
return self.client.delete_image(image_id)
|
||||
try:
|
||||
result = self.client.delete_image(image_id)
|
||||
except glance_exception.NotFound:
|
||||
raise exception.NotFound
|
||||
return result
|
||||
|
||||
def delete_all(self):
|
||||
"""
|
||||
|
@ -15,57 +15,110 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import cPickle as pickle
|
||||
import json
|
||||
import os.path
|
||||
import random
|
||||
import tempfile
|
||||
import shutil
|
||||
|
||||
from nova import flags
|
||||
from nova import exception
|
||||
from nova.image import service
|
||||
|
||||
|
||||
class LocalImageService(service.BaseImageService):
|
||||
FLAGS = flags.FLAGS
|
||||
flags.DEFINE_string('images_path', '$state_path/images',
|
||||
'path to decrypted images')
|
||||
|
||||
|
||||
class LocalImageService(service.BaseImageService):
|
||||
"""Image service storing images to local disk.
|
||||
|
||||
It assumes that image_ids are integers.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self._path = tempfile.mkdtemp()
|
||||
self._path = FLAGS.images_path
|
||||
|
||||
def _path_to(self, image_id):
|
||||
return os.path.join(self._path, str(image_id))
|
||||
def _path_to(self, image_id, fname='info.json'):
|
||||
if fname:
|
||||
return os.path.join(self._path, '%08x' % int(image_id), fname)
|
||||
return os.path.join(self._path, '%08x' % int(image_id))
|
||||
|
||||
def _ids(self):
|
||||
"""The list of all image ids."""
|
||||
return [int(i) for i in os.listdir(self._path)]
|
||||
return [int(i, 16) for i in os.listdir(self._path)]
|
||||
|
||||
def index(self, context):
|
||||
return [dict(id=i['id'], name=i['name']) for i in self.detail(context)]
|
||||
return [dict(image_id=i['id'], name=i.get('name'))
|
||||
for i in self.detail(context)]
|
||||
|
||||
def detail(self, context):
|
||||
return [self.show(context, id) for id in self._ids()]
|
||||
images = []
|
||||
for image_id in self._ids():
|
||||
try:
|
||||
image = self.show(context, image_id)
|
||||
images.append(image)
|
||||
except exception.NotFound:
|
||||
continue
|
||||
return images
|
||||
|
||||
def show(self, context, id):
|
||||
def show(self, context, image_id):
|
||||
try:
|
||||
return pickle.load(open(self._path_to(id)))
|
||||
except IOError:
|
||||
with open(self._path_to(image_id)) as metadata_file:
|
||||
return json.load(metadata_file)
|
||||
except (IOError, ValueError):
|
||||
raise exception.NotFound
|
||||
|
||||
def create(self, context, data):
|
||||
"""Store the image data and return the new image id."""
|
||||
id = random.randint(0, 2 ** 31 - 1)
|
||||
data['id'] = id
|
||||
self.update(context, id, data)
|
||||
return id
|
||||
def show_by_name(self, context, name):
|
||||
"""Returns a dict containing image data for the given name."""
|
||||
# NOTE(vish): Not very efficient, but the local image service
|
||||
# is for testing so it should be fine.
|
||||
images = self.detail(context)
|
||||
image = None
|
||||
for cantidate in images:
|
||||
if name == cantidate.get('name'):
|
||||
image = cantidate
|
||||
break
|
||||
if image == None:
|
||||
raise exception.NotFound
|
||||
return image
|
||||
|
||||
def update(self, context, image_id, data):
|
||||
def get(self, context, image_id, data):
|
||||
"""Get image and metadata."""
|
||||
try:
|
||||
with open(self._path_to(image_id)) as metadata_file:
|
||||
metadata = json.load(metadata_file)
|
||||
with open(self._path_to(image_id, 'image')) as image_file:
|
||||
shutil.copyfileobj(image_file, data)
|
||||
except (IOError, ValueError):
|
||||
raise exception.NotFound
|
||||
return metadata
|
||||
|
||||
def create(self, context, metadata, data=None):
|
||||
"""Store the image data and return the new image."""
|
||||
image_id = random.randint(0, 2 ** 31 - 1)
|
||||
image_path = self._path_to(image_id, None)
|
||||
if not os.path.exists(image_path):
|
||||
os.mkdir(image_path)
|
||||
return self.update(context, image_id, metadata, data)
|
||||
|
||||
def update(self, context, image_id, metadata, data=None):
|
||||
"""Replace the contents of the given image with the new data."""
|
||||
metadata['id'] = image_id
|
||||
try:
|
||||
pickle.dump(data, open(self._path_to(image_id), 'w'))
|
||||
except IOError:
|
||||
if data:
|
||||
location = self._path_to(image_id, 'image')
|
||||
with open(location, 'w') as image_file:
|
||||
shutil.copyfileobj(data, image_file)
|
||||
# NOTE(vish): update metadata similarly to glance
|
||||
metadata['status'] = 'active'
|
||||
metadata['location'] = location
|
||||
with open(self._path_to(image_id), 'w') as metadata_file:
|
||||
json.dump(metadata, metadata_file)
|
||||
except (IOError, ValueError):
|
||||
raise exception.NotFound
|
||||
return metadata
|
||||
|
||||
def delete(self, context, image_id):
|
||||
"""Delete the given image.
|
||||
@ -73,18 +126,11 @@ class LocalImageService(service.BaseImageService):
|
||||
|
||||
"""
|
||||
try:
|
||||
os.unlink(self._path_to(image_id))
|
||||
except IOError:
|
||||
shutil.rmtree(self._path_to(image_id, None))
|
||||
except (IOError, ValueError):
|
||||
raise exception.NotFound
|
||||
|
||||
def delete_all(self):
|
||||
"""Clears out all images in local directory."""
|
||||
for id in self._ids():
|
||||
os.unlink(self._path_to(id))
|
||||
|
||||
def delete_imagedir(self):
|
||||
"""Deletes the local directory.
|
||||
Raises OSError if directory is not empty.
|
||||
|
||||
"""
|
||||
os.rmdir(self._path)
|
||||
for image_id in self._ids():
|
||||
shutil.rmtree(self._path_to(image_id, None))
|
||||
|
293
nova/image/s3.py
293
nova/image/s3.py
@ -21,8 +21,13 @@ Proxy AMI-related calls from the cloud controller, to the running
|
||||
objectstore service.
|
||||
"""
|
||||
|
||||
import json
|
||||
import urllib
|
||||
import binascii
|
||||
import eventlet
|
||||
import os
|
||||
import shutil
|
||||
import tarfile
|
||||
import tempfile
|
||||
from xml.etree import ElementTree
|
||||
|
||||
import boto.s3.connection
|
||||
|
||||
@ -31,84 +36,78 @@ from nova import flags
|
||||
from nova import utils
|
||||
from nova.auth import manager
|
||||
from nova.image import service
|
||||
from nova.api.ec2 import ec2utils
|
||||
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
|
||||
|
||||
def map_s3_to_base(image):
|
||||
"""Convert from S3 format to format defined by BaseImageService."""
|
||||
i = {}
|
||||
i['id'] = image.get('imageId')
|
||||
i['name'] = image.get('imageId')
|
||||
i['kernel_id'] = image.get('kernelId')
|
||||
i['ramdisk_id'] = image.get('ramdiskId')
|
||||
i['location'] = image.get('imageLocation')
|
||||
i['owner_id'] = image.get('imageOwnerId')
|
||||
i['status'] = image.get('imageState')
|
||||
i['type'] = image.get('type')
|
||||
i['is_public'] = image.get('isPublic')
|
||||
i['architecture'] = image.get('architecture')
|
||||
return i
|
||||
flags.DEFINE_string('image_decryption_dir', '/tmp',
|
||||
'parent dir for tempdir used for image decryption')
|
||||
|
||||
|
||||
class S3ImageService(service.BaseImageService):
|
||||
def __init__(self, service=None, *args, **kwargs):
|
||||
if service == None:
|
||||
service = utils.import_object(FLAGS.image_service)
|
||||
self.service = service
|
||||
self.service.__init__(*args, **kwargs)
|
||||
|
||||
def modify(self, context, image_id, operation):
|
||||
self._conn(context).make_request(
|
||||
method='POST',
|
||||
bucket='_images',
|
||||
query_args=self._qs({'image_id': image_id,
|
||||
'operation': operation}))
|
||||
return True
|
||||
|
||||
def update(self, context, image_id, attributes):
|
||||
"""update an image's attributes / info.json"""
|
||||
attributes.update({"image_id": image_id})
|
||||
self._conn(context).make_request(
|
||||
method='POST',
|
||||
bucket='_images',
|
||||
query_args=self._qs(attributes))
|
||||
return True
|
||||
|
||||
def register(self, context, image_location):
|
||||
""" rpc call to register a new image based from a manifest """
|
||||
image_id = utils.generate_uid('ami')
|
||||
self._conn(context).make_request(
|
||||
method='PUT',
|
||||
bucket='_images',
|
||||
query_args=self._qs({'image_location': image_location,
|
||||
'image_id': image_id}))
|
||||
return image_id
|
||||
|
||||
def index(self, context):
|
||||
"""Return a list of all images that a user can see."""
|
||||
response = self._conn(context).make_request(
|
||||
method='GET',
|
||||
bucket='_images')
|
||||
images = json.loads(response.read())
|
||||
return [map_s3_to_base(i) for i in images]
|
||||
|
||||
def show(self, context, image_id):
|
||||
"""return a image object if the context has permissions"""
|
||||
if FLAGS.connection_type == 'fake':
|
||||
return {'imageId': 'bar'}
|
||||
result = self.index(context)
|
||||
result = [i for i in result if i['id'] == image_id]
|
||||
if not result:
|
||||
raise exception.NotFound(_('Image %s could not be found')
|
||||
% image_id)
|
||||
image = result[0]
|
||||
def create(self, context, metadata, data=None):
|
||||
"""metadata['properties'] should contain image_location"""
|
||||
image = self._s3_create(context, metadata)
|
||||
return image
|
||||
|
||||
def deregister(self, context, image_id):
|
||||
""" unregister an image """
|
||||
self._conn(context).make_request(
|
||||
method='DELETE',
|
||||
bucket='_images',
|
||||
query_args=self._qs({'image_id': image_id}))
|
||||
def delete(self, context, image_id):
|
||||
# FIXME(vish): call to show is to check filter
|
||||
self.show(context, image_id)
|
||||
self.service.delete(context, image_id)
|
||||
|
||||
def _conn(self, context):
|
||||
def update(self, context, image_id, metadata, data=None):
|
||||
# FIXME(vish): call to show is to check filter
|
||||
self.show(context, image_id)
|
||||
image = self.service.update(context, image_id, metadata, data)
|
||||
return image
|
||||
|
||||
def index(self, context):
|
||||
images = self.service.index(context)
|
||||
# FIXME(vish): index doesn't filter so we do it manually
|
||||
return self._filter(context, images)
|
||||
|
||||
def detail(self, context):
|
||||
images = self.service.detail(context)
|
||||
# FIXME(vish): detail doesn't filter so we do it manually
|
||||
return self._filter(context, images)
|
||||
|
||||
@classmethod
|
||||
def _is_visible(cls, context, image):
|
||||
return (context.is_admin
|
||||
or context.project_id == image['properties']['owner_id']
|
||||
or image['properties']['is_public'] == 'True')
|
||||
|
||||
@classmethod
|
||||
def _filter(cls, context, images):
|
||||
filtered = []
|
||||
for image in images:
|
||||
if not cls._is_visible(context, image):
|
||||
continue
|
||||
filtered.append(image)
|
||||
return filtered
|
||||
|
||||
def show(self, context, image_id):
|
||||
image = self.service.show(context, image_id)
|
||||
if not self._is_visible(context, image):
|
||||
raise exception.NotFound
|
||||
return image
|
||||
|
||||
def show_by_name(self, context, name):
|
||||
image = self.service.show_by_name(context, name)
|
||||
if not self._is_visible(context, image):
|
||||
raise exception.NotFound
|
||||
return image
|
||||
|
||||
@staticmethod
|
||||
def _conn(context):
|
||||
# TODO(vish): is there a better way to get creds to sign
|
||||
# for the user?
|
||||
access = manager.AuthManager().get_access_key(context.user,
|
||||
context.project)
|
||||
secret = str(context.user.secret)
|
||||
@ -120,8 +119,152 @@ class S3ImageService(service.BaseImageService):
|
||||
port=FLAGS.s3_port,
|
||||
host=FLAGS.s3_host)
|
||||
|
||||
def _qs(self, params):
|
||||
pairs = []
|
||||
for key in params.keys():
|
||||
pairs.append(key + '=' + urllib.quote(params[key]))
|
||||
return '&'.join(pairs)
|
||||
@staticmethod
|
||||
def _download_file(bucket, filename, local_dir):
|
||||
key = bucket.get_key(filename)
|
||||
local_filename = os.path.join(local_dir, filename)
|
||||
key.get_contents_to_filename(local_filename)
|
||||
return local_filename
|
||||
|
||||
def _s3_create(self, context, metadata):
|
||||
"""Gets a manifext from s3 and makes an image"""
|
||||
|
||||
image_path = tempfile.mkdtemp(dir=FLAGS.image_decryption_dir)
|
||||
|
||||
image_location = metadata['properties']['image_location']
|
||||
bucket_name = image_location.split("/")[0]
|
||||
manifest_path = image_location[len(bucket_name) + 1:]
|
||||
bucket = self._conn(context).get_bucket(bucket_name)
|
||||
key = bucket.get_key(manifest_path)
|
||||
manifest = key.get_contents_as_string()
|
||||
|
||||
manifest = ElementTree.fromstring(manifest)
|
||||
image_format = 'ami'
|
||||
image_type = 'machine'
|
||||
|
||||
try:
|
||||
kernel_id = manifest.find("machine_configuration/kernel_id").text
|
||||
if kernel_id == 'true':
|
||||
image_format = 'aki'
|
||||
image_type = 'kernel'
|
||||
kernel_id = None
|
||||
except Exception:
|
||||
kernel_id = None
|
||||
|
||||
try:
|
||||
ramdisk_id = manifest.find("machine_configuration/ramdisk_id").text
|
||||
if ramdisk_id == 'true':
|
||||
image_format = 'ari'
|
||||
image_type = 'ramdisk'
|
||||
ramdisk_id = None
|
||||
except Exception:
|
||||
ramdisk_id = None
|
||||
|
||||
try:
|
||||
arch = manifest.find("machine_configuration/architecture").text
|
||||
except Exception:
|
||||
arch = 'x86_64'
|
||||
|
||||
properties = metadata['properties']
|
||||
properties['owner_id'] = context.project_id
|
||||
properties['architecture'] = arch
|
||||
|
||||
if kernel_id:
|
||||
properties['kernel_id'] = ec2utils.ec2_id_to_id(kernel_id)
|
||||
|
||||
if ramdisk_id:
|
||||
properties['ramdisk_id'] = ec2utils.ec2_id_to_id(ramdisk_id)
|
||||
|
||||
properties['is_public'] = False
|
||||
properties['type'] = image_type
|
||||
metadata.update({'disk_format': image_format,
|
||||
'container_format': image_format,
|
||||
'status': 'queued',
|
||||
'is_public': True,
|
||||
'properties': properties})
|
||||
metadata['properties']['image_state'] = 'pending'
|
||||
image = self.service.create(context, metadata)
|
||||
image_id = image['id']
|
||||
|
||||
def delayed_create():
|
||||
"""This handles the fetching and decrypting of the part files."""
|
||||
parts = []
|
||||
for fn_element in manifest.find("image").getiterator("filename"):
|
||||
part = self._download_file(bucket, fn_element.text, image_path)
|
||||
parts.append(part)
|
||||
|
||||
# NOTE(vish): this may be suboptimal, should we use cat?
|
||||
encrypted_filename = os.path.join(image_path, 'image.encrypted')
|
||||
with open(encrypted_filename, 'w') as combined:
|
||||
for filename in parts:
|
||||
with open(filename) as part:
|
||||
shutil.copyfileobj(part, combined)
|
||||
|
||||
metadata['properties']['image_state'] = 'decrypting'
|
||||
self.service.update(context, image_id, metadata)
|
||||
|
||||
hex_key = manifest.find("image/ec2_encrypted_key").text
|
||||
encrypted_key = binascii.a2b_hex(hex_key)
|
||||
hex_iv = manifest.find("image/ec2_encrypted_iv").text
|
||||
encrypted_iv = binascii.a2b_hex(hex_iv)
|
||||
|
||||
# FIXME(vish): grab key from common service so this can run on
|
||||
# any host.
|
||||
cloud_pk = os.path.join(FLAGS.ca_path, "private/cakey.pem")
|
||||
|
||||
decrypted_filename = os.path.join(image_path, 'image.tar.gz')
|
||||
self._decrypt_image(encrypted_filename, encrypted_key,
|
||||
encrypted_iv, cloud_pk, decrypted_filename)
|
||||
|
||||
metadata['properties']['image_state'] = 'untarring'
|
||||
self.service.update(context, image_id, metadata)
|
||||
|
||||
unz_filename = self._untarzip_image(image_path, decrypted_filename)
|
||||
|
||||
metadata['properties']['image_state'] = 'uploading'
|
||||
with open(unz_filename) as image_file:
|
||||
self.service.update(context, image_id, metadata, image_file)
|
||||
metadata['properties']['image_state'] = 'available'
|
||||
self.service.update(context, image_id, metadata)
|
||||
|
||||
shutil.rmtree(image_path)
|
||||
|
||||
eventlet.spawn_n(delayed_create)
|
||||
|
||||
return image
|
||||
|
||||
@staticmethod
|
||||
def _decrypt_image(encrypted_filename, encrypted_key, encrypted_iv,
|
||||
cloud_private_key, decrypted_filename):
|
||||
key, err = utils.execute(
|
||||
'openssl rsautl -decrypt -inkey %s' % cloud_private_key,
|
||||
process_input=encrypted_key,
|
||||
check_exit_code=False)
|
||||
if err:
|
||||
raise exception.Error(_("Failed to decrypt private key: %s")
|
||||
% err)
|
||||
iv, err = utils.execute(
|
||||
'openssl rsautl -decrypt -inkey %s' % cloud_private_key,
|
||||
process_input=encrypted_iv,
|
||||
check_exit_code=False)
|
||||
if err:
|
||||
raise exception.Error(_("Failed to decrypt initialization "
|
||||
"vector: %s") % err)
|
||||
|
||||
_out, err = utils.execute(
|
||||
'openssl enc -d -aes-128-cbc -in %s -K %s -iv %s -out %s'
|
||||
% (encrypted_filename, key, iv, decrypted_filename),
|
||||
check_exit_code=False)
|
||||
if err:
|
||||
raise exception.Error(_("Failed to decrypt image file "
|
||||
"%(image_file)s: %(err)s") %
|
||||
{'image_file': encrypted_filename,
|
||||
'err': err})
|
||||
|
||||
@staticmethod
|
||||
def _untarzip_image(path, filename):
|
||||
tar_file = tarfile.open(filename, "r|gz")
|
||||
tar_file.extractall(path)
|
||||
image_file = tar_file.getnames()[0]
|
||||
tar_file.close()
|
||||
return os.path.join(path, image_file)
|
||||
|
@ -56,9 +56,9 @@ class BaseImageService(object):
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def show(self, context, id):
|
||||
def show(self, context, image_id):
|
||||
"""
|
||||
Returns a dict containing image data for the given opaque image id.
|
||||
Returns a dict containing image metadata for the given opaque image id.
|
||||
|
||||
:retval a mapping with the following signature:
|
||||
|
||||
@ -76,17 +76,27 @@ class BaseImageService(object):
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def create(self, context, data):
|
||||
def get(self, context, data):
|
||||
"""
|
||||
Store the image data and return the new image id.
|
||||
Returns a dict containing image metadata and writes image data to data.
|
||||
|
||||
:param data: a file-like object to hold binary image data
|
||||
|
||||
:raises NotFound if the image does not exist
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def create(self, context, metadata, data=None):
|
||||
"""
|
||||
Store the image metadata and data and return the new image id.
|
||||
|
||||
:raises AlreadyExists if the image already exist.
|
||||
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def update(self, context, image_id, data):
|
||||
"""Replace the contents of the given image with the new data.
|
||||
def update(self, context, image_id, metadata, data=None):
|
||||
"""Update the given image with the new metadata and data.
|
||||
|
||||
:raises NotFound if the image does not exist.
|
||||
|
||||
|
@ -37,8 +37,7 @@ from nova.objectstore import bucket
|
||||
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
flags.DEFINE_string('images_path', '$state_path/images',
|
||||
'path to decrypted images')
|
||||
flags.DECLARE('images_path', 'nova.image.local')
|
||||
|
||||
|
||||
class Image(object):
|
||||
|
@ -25,6 +25,7 @@ import webob.dec
|
||||
from paste import urlmap
|
||||
|
||||
from glance import client as glance_client
|
||||
from glance.common import exception as glance_exc
|
||||
|
||||
from nova import auth
|
||||
from nova import context
|
||||
@ -149,25 +150,26 @@ def stub_out_glance(stubs, initial_fixtures=None):
|
||||
for f in self.fixtures:
|
||||
if f['id'] == image_id:
|
||||
return f
|
||||
return None
|
||||
raise glance_exc.NotFound
|
||||
|
||||
def fake_add_image(self, image_meta):
|
||||
def fake_add_image(self, image_meta, data=None):
|
||||
id = ''.join(random.choice(string.letters) for _ in range(20))
|
||||
image_meta['id'] = id
|
||||
self.fixtures.append(image_meta)
|
||||
return id
|
||||
return image_meta
|
||||
|
||||
def fake_update_image(self, image_id, image_meta):
|
||||
def fake_update_image(self, image_id, image_meta, data=None):
|
||||
f = self.fake_get_image_meta(image_id)
|
||||
if not f:
|
||||
raise exc.NotFound
|
||||
raise glance_exc.NotFound
|
||||
|
||||
f.update(image_meta)
|
||||
return f
|
||||
|
||||
def fake_delete_image(self, image_id):
|
||||
f = self.fake_get_image_meta(image_id)
|
||||
if not f:
|
||||
raise exc.NotFound
|
||||
raise glance_exc.NotFound
|
||||
|
||||
self.fixtures.remove(f)
|
||||
|
||||
|
@ -22,6 +22,8 @@ and as a WSGI layer
|
||||
|
||||
import json
|
||||
import datetime
|
||||
import shutil
|
||||
import tempfile
|
||||
|
||||
import stubout
|
||||
import webob
|
||||
@ -54,7 +56,7 @@ class BaseImageServiceTests(object):
|
||||
|
||||
num_images = len(self.service.index(self.context))
|
||||
|
||||
id = self.service.create(self.context, fixture)
|
||||
id = self.service.create(self.context, fixture)['id']
|
||||
|
||||
self.assertNotEquals(None, id)
|
||||
self.assertEquals(num_images + 1,
|
||||
@ -71,7 +73,7 @@ class BaseImageServiceTests(object):
|
||||
|
||||
num_images = len(self.service.index(self.context))
|
||||
|
||||
id = self.service.create(self.context, fixture)
|
||||
id = self.service.create(self.context, fixture)['id']
|
||||
|
||||
self.assertNotEquals(None, id)
|
||||
|
||||
@ -89,7 +91,7 @@ class BaseImageServiceTests(object):
|
||||
'instance_id': None,
|
||||
'progress': None}
|
||||
|
||||
id = self.service.create(self.context, fixture)
|
||||
id = self.service.create(self.context, fixture)['id']
|
||||
|
||||
fixture['status'] = 'in progress'
|
||||
|
||||
@ -118,7 +120,7 @@ class BaseImageServiceTests(object):
|
||||
|
||||
ids = []
|
||||
for fixture in fixtures:
|
||||
new_id = self.service.create(self.context, fixture)
|
||||
new_id = self.service.create(self.context, fixture)['id']
|
||||
ids.append(new_id)
|
||||
|
||||
num_images = len(self.service.index(self.context))
|
||||
@ -137,14 +139,15 @@ class LocalImageServiceTest(test.TestCase,
|
||||
|
||||
def setUp(self):
|
||||
super(LocalImageServiceTest, self).setUp()
|
||||
self.tempdir = tempfile.mkdtemp()
|
||||
self.flags(images_path=self.tempdir)
|
||||
self.stubs = stubout.StubOutForTesting()
|
||||
service_class = 'nova.image.local.LocalImageService'
|
||||
self.service = utils.import_object(service_class)
|
||||
self.context = context.RequestContext(None, None)
|
||||
|
||||
def tearDown(self):
|
||||
self.service.delete_all()
|
||||
self.service.delete_imagedir()
|
||||
shutil.rmtree(self.tempdir)
|
||||
self.stubs.UnsetAll()
|
||||
super(LocalImageServiceTest, self).tearDown()
|
||||
|
||||
|
@ -32,6 +32,7 @@ flags.DECLARE('fake_network', 'nova.network.manager')
|
||||
FLAGS.network_size = 8
|
||||
FLAGS.num_networks = 2
|
||||
FLAGS.fake_network = True
|
||||
FLAGS.image_service = 'nova.image.local.LocalImageService'
|
||||
flags.DECLARE('num_shelves', 'nova.volume.driver')
|
||||
flags.DECLARE('blades_per_shelf', 'nova.volume.driver')
|
||||
flags.DECLARE('iscsi_num_targets', 'nova.volume.driver')
|
||||
|
@ -38,6 +38,8 @@ from nova import test
|
||||
from nova.auth import manager
|
||||
from nova.compute import power_state
|
||||
from nova.api.ec2 import cloud
|
||||
from nova.api.ec2 import ec2utils
|
||||
from nova.image import local
|
||||
from nova.objectstore import image
|
||||
|
||||
|
||||
@ -76,6 +78,12 @@ class CloudTestCase(test.TestCase):
|
||||
project=self.project)
|
||||
host = self.network.get_network_host(self.context.elevated())
|
||||
|
||||
def fake_show(meh, context, id):
|
||||
return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1}}
|
||||
|
||||
self.stubs.Set(local.LocalImageService, 'show', fake_show)
|
||||
self.stubs.Set(local.LocalImageService, 'show_by_name', fake_show)
|
||||
|
||||
def tearDown(self):
|
||||
network_ref = db.project_get_network(self.context,
|
||||
self.project.id)
|
||||
@ -122,7 +130,7 @@ class CloudTestCase(test.TestCase):
|
||||
self.cloud.allocate_address(self.context)
|
||||
inst = db.instance_create(self.context, {'host': self.compute.host})
|
||||
fixed = self.network.allocate_fixed_ip(self.context, inst['id'])
|
||||
ec2_id = cloud.id_to_ec2_id(inst['id'])
|
||||
ec2_id = ec2utils.id_to_ec2_id(inst['id'])
|
||||
self.cloud.associate_address(self.context,
|
||||
instance_id=ec2_id,
|
||||
public_ip=address)
|
||||
@ -158,12 +166,12 @@ class CloudTestCase(test.TestCase):
|
||||
vol2 = db.volume_create(self.context, {})
|
||||
result = self.cloud.describe_volumes(self.context)
|
||||
self.assertEqual(len(result['volumeSet']), 2)
|
||||
volume_id = cloud.id_to_ec2_id(vol2['id'], 'vol-%08x')
|
||||
volume_id = ec2utils.id_to_ec2_id(vol2['id'], 'vol-%08x')
|
||||
result = self.cloud.describe_volumes(self.context,
|
||||
volume_id=[volume_id])
|
||||
self.assertEqual(len(result['volumeSet']), 1)
|
||||
self.assertEqual(
|
||||
cloud.ec2_id_to_id(result['volumeSet'][0]['volumeId']),
|
||||
ec2utils.ec2_id_to_id(result['volumeSet'][0]['volumeId']),
|
||||
vol2['id'])
|
||||
db.volume_destroy(self.context, vol1['id'])
|
||||
db.volume_destroy(self.context, vol2['id'])
|
||||
@ -188,8 +196,10 @@ class CloudTestCase(test.TestCase):
|
||||
def test_describe_instances(self):
|
||||
"""Makes sure describe_instances works and filters results."""
|
||||
inst1 = db.instance_create(self.context, {'reservation_id': 'a',
|
||||
'image_id': 1,
|
||||
'host': 'host1'})
|
||||
inst2 = db.instance_create(self.context, {'reservation_id': 'a',
|
||||
'image_id': 1,
|
||||
'host': 'host2'})
|
||||
comp1 = db.service_create(self.context, {'host': 'host1',
|
||||
'availability_zone': 'zone1',
|
||||
@ -200,7 +210,7 @@ class CloudTestCase(test.TestCase):
|
||||
result = self.cloud.describe_instances(self.context)
|
||||
result = result['reservationSet'][0]
|
||||
self.assertEqual(len(result['instancesSet']), 2)
|
||||
instance_id = cloud.id_to_ec2_id(inst2['id'])
|
||||
instance_id = ec2utils.id_to_ec2_id(inst2['id'])
|
||||
result = self.cloud.describe_instances(self.context,
|
||||
instance_id=[instance_id])
|
||||
result = result['reservationSet'][0]
|
||||
@ -215,10 +225,9 @@ class CloudTestCase(test.TestCase):
|
||||
db.service_destroy(self.context, comp2['id'])
|
||||
|
||||
def test_console_output(self):
|
||||
image_id = FLAGS.default_image
|
||||
instance_type = FLAGS.default_instance_type
|
||||
max_count = 1
|
||||
kwargs = {'image_id': image_id,
|
||||
kwargs = {'image_id': 'ami-1',
|
||||
'instance_type': instance_type,
|
||||
'max_count': max_count}
|
||||
rv = self.cloud.run_instances(self.context, **kwargs)
|
||||
@ -234,8 +243,7 @@ class CloudTestCase(test.TestCase):
|
||||
greenthread.sleep(0.3)
|
||||
|
||||
def test_ajax_console(self):
|
||||
image_id = FLAGS.default_image
|
||||
kwargs = {'image_id': image_id}
|
||||
kwargs = {'image_id': 'ami-1'}
|
||||
rv = self.cloud.run_instances(self.context, **kwargs)
|
||||
instance_id = rv['instancesSet'][0]['instanceId']
|
||||
greenthread.sleep(0.3)
|
||||
@ -347,7 +355,7 @@ class CloudTestCase(test.TestCase):
|
||||
|
||||
def test_update_of_instance_display_fields(self):
|
||||
inst = db.instance_create(self.context, {})
|
||||
ec2_id = cloud.id_to_ec2_id(inst['id'])
|
||||
ec2_id = ec2utils.id_to_ec2_id(inst['id'])
|
||||
self.cloud.update_instance(self.context, ec2_id,
|
||||
display_name='c00l 1m4g3')
|
||||
inst = db.instance_get(self.context, inst['id'])
|
||||
@ -365,7 +373,7 @@ class CloudTestCase(test.TestCase):
|
||||
def test_update_of_volume_display_fields(self):
|
||||
vol = db.volume_create(self.context, {})
|
||||
self.cloud.update_volume(self.context,
|
||||
cloud.id_to_ec2_id(vol['id'], 'vol-%08x'),
|
||||
ec2utils.id_to_ec2_id(vol['id'], 'vol-%08x'),
|
||||
display_name='c00l v0lum3')
|
||||
vol = db.volume_get(self.context, vol['id'])
|
||||
self.assertEqual('c00l v0lum3', vol['display_name'])
|
||||
@ -374,7 +382,7 @@ class CloudTestCase(test.TestCase):
|
||||
def test_update_of_volume_wont_update_private_fields(self):
|
||||
vol = db.volume_create(self.context, {})
|
||||
self.cloud.update_volume(self.context,
|
||||
cloud.id_to_ec2_id(vol['id'], 'vol-%08x'),
|
||||
ec2utils.id_to_ec2_id(vol['id'], 'vol-%08x'),
|
||||
mountpoint='/not/here')
|
||||
vol = db.volume_get(self.context, vol['id'])
|
||||
self.assertEqual(None, vol['mountpoint'])
|
||||
|
@ -31,7 +31,7 @@ from nova import test
|
||||
from nova import utils
|
||||
from nova.auth import manager
|
||||
from nova.compute import instance_types
|
||||
|
||||
from nova.image import local
|
||||
|
||||
LOG = logging.getLogger('nova.tests.compute')
|
||||
FLAGS = flags.FLAGS
|
||||
@ -52,6 +52,11 @@ class ComputeTestCase(test.TestCase):
|
||||
self.project = self.manager.create_project('fake', 'fake', 'fake')
|
||||
self.context = context.RequestContext('fake', 'fake', False)
|
||||
|
||||
def fake_show(meh, context, id):
|
||||
return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1}}
|
||||
|
||||
self.stubs.Set(local.LocalImageService, 'show', fake_show)
|
||||
|
||||
def tearDown(self):
|
||||
self.manager.delete_user(self.user)
|
||||
self.manager.delete_project(self.project)
|
||||
@ -60,7 +65,7 @@ class ComputeTestCase(test.TestCase):
|
||||
def _create_instance(self, params={}):
|
||||
"""Create a test instance"""
|
||||
inst = {}
|
||||
inst['image_id'] = 'ami-test'
|
||||
inst['image_id'] = 1
|
||||
inst['reservation_id'] = 'r-fakeres'
|
||||
inst['launch_time'] = '10'
|
||||
inst['user_id'] = self.user.id
|
||||
|
@ -57,7 +57,7 @@ class ConsoleTestCase(test.TestCase):
|
||||
inst = {}
|
||||
#inst['host'] = self.host
|
||||
#inst['name'] = 'instance-1234'
|
||||
inst['image_id'] = 'ami-test'
|
||||
inst['image_id'] = 1
|
||||
inst['reservation_id'] = 'r-fakeres'
|
||||
inst['launch_time'] = '10'
|
||||
inst['user_id'] = self.user.id
|
||||
|
@ -93,8 +93,7 @@ class DirectTestCase(test.TestCase):
|
||||
class DirectCloudTestCase(test_cloud.CloudTestCase):
|
||||
def setUp(self):
|
||||
super(DirectCloudTestCase, self).setUp()
|
||||
compute_handle = compute.API(image_service=self.cloud.image_service,
|
||||
network_api=self.cloud.network_api,
|
||||
compute_handle = compute.API(network_api=self.cloud.network_api,
|
||||
volume_api=self.cloud.volume_api)
|
||||
direct.register_service('compute', compute_handle)
|
||||
self.router = direct.JsonParamsMiddleware(direct.Router())
|
||||
|
@ -20,11 +20,12 @@ from nova import compute
|
||||
from nova import context
|
||||
from nova import db
|
||||
from nova import flags
|
||||
from nova import network
|
||||
from nova import quota
|
||||
from nova import test
|
||||
from nova import utils
|
||||
from nova import volume
|
||||
from nova.auth import manager
|
||||
from nova.api.ec2 import cloud
|
||||
from nova.compute import instance_types
|
||||
|
||||
|
||||
@ -41,7 +42,6 @@ class QuotaTestCase(test.TestCase):
|
||||
quota_gigabytes=20,
|
||||
quota_floating_ips=1)
|
||||
|
||||
self.cloud = cloud.CloudController()
|
||||
self.manager = manager.AuthManager()
|
||||
self.user = self.manager.create_user('admin', 'admin', 'admin', True)
|
||||
self.project = self.manager.create_project('admin', 'admin', 'admin')
|
||||
@ -57,7 +57,7 @@ class QuotaTestCase(test.TestCase):
|
||||
def _create_instance(self, cores=2):
|
||||
"""Create a test instance"""
|
||||
inst = {}
|
||||
inst['image_id'] = 'ami-test'
|
||||
inst['image_id'] = 1
|
||||
inst['reservation_id'] = 'r-fakeres'
|
||||
inst['user_id'] = self.user.id
|
||||
inst['project_id'] = self.project.id
|
||||
@ -118,12 +118,12 @@ class QuotaTestCase(test.TestCase):
|
||||
for i in range(FLAGS.quota_instances):
|
||||
instance_id = self._create_instance()
|
||||
instance_ids.append(instance_id)
|
||||
self.assertRaises(quota.QuotaError, self.cloud.run_instances,
|
||||
self.assertRaises(quota.QuotaError, compute.API().create,
|
||||
self.context,
|
||||
min_count=1,
|
||||
max_count=1,
|
||||
instance_type='m1.small',
|
||||
image_id='fake')
|
||||
image_id=1)
|
||||
for instance_id in instance_ids:
|
||||
db.instance_destroy(self.context, instance_id)
|
||||
|
||||
@ -131,12 +131,12 @@ class QuotaTestCase(test.TestCase):
|
||||
instance_ids = []
|
||||
instance_id = self._create_instance(cores=4)
|
||||
instance_ids.append(instance_id)
|
||||
self.assertRaises(quota.QuotaError, self.cloud.run_instances,
|
||||
self.assertRaises(quota.QuotaError, compute.API().create,
|
||||
self.context,
|
||||
min_count=1,
|
||||
max_count=1,
|
||||
instance_type='m1.small',
|
||||
image_id='fake')
|
||||
image_id=1)
|
||||
for instance_id in instance_ids:
|
||||
db.instance_destroy(self.context, instance_id)
|
||||
|
||||
@ -145,9 +145,12 @@ class QuotaTestCase(test.TestCase):
|
||||
for i in range(FLAGS.quota_volumes):
|
||||
volume_id = self._create_volume()
|
||||
volume_ids.append(volume_id)
|
||||
self.assertRaises(quota.QuotaError, self.cloud.create_volume,
|
||||
self.context,
|
||||
size=10)
|
||||
self.assertRaises(quota.QuotaError,
|
||||
volume.API().create,
|
||||
self.context,
|
||||
size=10,
|
||||
name='',
|
||||
description='')
|
||||
for volume_id in volume_ids:
|
||||
db.volume_destroy(self.context, volume_id)
|
||||
|
||||
@ -156,9 +159,11 @@ class QuotaTestCase(test.TestCase):
|
||||
volume_id = self._create_volume(size=20)
|
||||
volume_ids.append(volume_id)
|
||||
self.assertRaises(quota.QuotaError,
|
||||
self.cloud.create_volume,
|
||||
volume.API().create,
|
||||
self.context,
|
||||
size=10)
|
||||
size=10,
|
||||
name='',
|
||||
description='')
|
||||
for volume_id in volume_ids:
|
||||
db.volume_destroy(self.context, volume_id)
|
||||
|
||||
@ -172,7 +177,8 @@ class QuotaTestCase(test.TestCase):
|
||||
# make an rpc.call, the test just finishes with OK. It
|
||||
# appears to be something in the magic inline callbacks
|
||||
# that is breaking.
|
||||
self.assertRaises(quota.QuotaError, self.cloud.allocate_address,
|
||||
self.assertRaises(quota.QuotaError,
|
||||
network.API().allocate_floating_ip,
|
||||
self.context)
|
||||
db.floating_ip_destroy(context.get_admin_context(), address)
|
||||
|
||||
|
@ -155,7 +155,7 @@ class SimpleDriverTestCase(test.TestCase):
|
||||
def _create_instance(self, **kwargs):
|
||||
"""Create a test instance"""
|
||||
inst = {}
|
||||
inst['image_id'] = 'ami-test'
|
||||
inst['image_id'] = 1
|
||||
inst['reservation_id'] = 'r-fakeres'
|
||||
inst['user_id'] = self.user.id
|
||||
inst['project_id'] = self.project.id
|
||||
@ -169,8 +169,6 @@ class SimpleDriverTestCase(test.TestCase):
|
||||
def _create_volume(self):
|
||||
"""Create a test volume"""
|
||||
vol = {}
|
||||
vol['image_id'] = 'ami-test'
|
||||
vol['reservation_id'] = 'r-fakeres'
|
||||
vol['size'] = 1
|
||||
vol['availability_zone'] = 'test'
|
||||
return db.volume_create(self.context, vol)['id']
|
||||
|
@ -99,7 +99,7 @@ class VolumeTestCase(test.TestCase):
|
||||
def test_run_attach_detach_volume(self):
|
||||
"""Make sure volume can be attached and detached from instance."""
|
||||
inst = {}
|
||||
inst['image_id'] = 'ami-test'
|
||||
inst['image_id'] = 1
|
||||
inst['reservation_id'] = 'r-fakeres'
|
||||
inst['launch_time'] = '10'
|
||||
inst['user_id'] = 'fake'
|
||||
|
@ -28,29 +28,32 @@ import time
|
||||
import urllib2
|
||||
import urlparse
|
||||
|
||||
from nova import context
|
||||
from nova import flags
|
||||
from nova import log as logging
|
||||
from nova import utils
|
||||
from nova.auth import manager
|
||||
from nova.auth import signer
|
||||
from nova.objectstore import image
|
||||
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
flags.DEFINE_bool('use_s3', True,
|
||||
'whether to get images from s3 or use local copy')
|
||||
|
||||
LOG = logging.getLogger('nova.virt.images')
|
||||
|
||||
|
||||
def fetch(image, path, user, project):
|
||||
if FLAGS.use_s3:
|
||||
f = _fetch_s3_image
|
||||
else:
|
||||
f = _fetch_local_image
|
||||
return f(image, path, user, project)
|
||||
def fetch(image_id, path, _user, _project):
|
||||
# TODO(vish): Improve context handling and add owner and auth data
|
||||
# when it is added to glance. Right now there is no
|
||||
# auth checking in glance, so we assume that access was
|
||||
# checked before we got here.
|
||||
image_service = utils.import_object(FLAGS.image_service)
|
||||
with open(path, "wb") as image_file:
|
||||
elevated = context.get_admin_context()
|
||||
metadata = image_service.get(elevated, image_id, image_file)
|
||||
return metadata
|
||||
|
||||
|
||||
# NOTE(vish): The methods below should be unnecessary, but I'm leaving
|
||||
# them in case the glance client does not work on windows.
|
||||
def _fetch_image_no_curl(url, path, headers):
|
||||
request = urllib2.Request(url)
|
||||
for (k, v) in headers.iteritems():
|
||||
@ -109,6 +112,8 @@ def _image_path(path):
|
||||
return os.path.join(FLAGS.images_path, path)
|
||||
|
||||
|
||||
# TODO(vish): xenapi should use the glance client code directly instead
|
||||
# of retrieving the image using this method.
|
||||
def image_url(image):
|
||||
if FLAGS.image_service == "nova.image.glance.GlanceImageService":
|
||||
return "http://%s:%s/images/%s" % (FLAGS.glance_host,
|
||||
|
@ -591,21 +591,23 @@ class LibvirtConnection(object):
|
||||
'ramdisk_id': inst['ramdisk_id']}
|
||||
|
||||
if disk_images['kernel_id']:
|
||||
fname = '%08x' % int(disk_images['kernel_id'])
|
||||
self._cache_image(fn=self._fetch_image,
|
||||
target=basepath('kernel'),
|
||||
fname=disk_images['kernel_id'],
|
||||
fname=fname,
|
||||
image_id=disk_images['kernel_id'],
|
||||
user=user,
|
||||
project=project)
|
||||
if disk_images['ramdisk_id']:
|
||||
fname = '%08x' % int(disk_images['ramdisk_id'])
|
||||
self._cache_image(fn=self._fetch_image,
|
||||
target=basepath('ramdisk'),
|
||||
fname=disk_images['ramdisk_id'],
|
||||
fname=fname,
|
||||
image_id=disk_images['ramdisk_id'],
|
||||
user=user,
|
||||
project=project)
|
||||
|
||||
root_fname = disk_images['image_id']
|
||||
root_fname = '%08x' % int(disk_images['image_id'])
|
||||
size = FLAGS.minimum_root_size
|
||||
if inst['instance_type'] == 'm1.tiny' or suffix == '.rescue':
|
||||
size = None
|
||||
|
Loading…
Reference in New Issue
Block a user