Merged with trunk

This commit is contained in:
Justin Santa Barbara 2011-03-24 01:05:13 -07:00
commit 143a8387fc
39 changed files with 1022 additions and 331 deletions

View File

@ -67,11 +67,14 @@ paste.app_factory = nova.api.ec2.metadatarequesthandler:MetadataRequestHandler.f
[composite:osapi] [composite:osapi]
use = egg:Paste#urlmap use = egg:Paste#urlmap
/: osversions /: osversions
/v1.0: openstackapi /v1.0: openstackapi10
/v1.1: openstackapi /v1.1: openstackapi11
[pipeline:openstackapi] [pipeline:openstackapi10]
pipeline = faultwrap auth ratelimit osapiapp pipeline = faultwrap auth ratelimit osapiapp10
[pipeline:openstackapi11]
pipeline = faultwrap auth ratelimit osapiapp11
[filter:faultwrap] [filter:faultwrap]
paste.filter_factory = nova.api.openstack:FaultWrapper.factory paste.filter_factory = nova.api.openstack:FaultWrapper.factory
@ -82,8 +85,11 @@ paste.filter_factory = nova.api.openstack.auth:AuthMiddleware.factory
[filter:ratelimit] [filter:ratelimit]
paste.filter_factory = nova.api.openstack.limits:RateLimitingMiddleware.factory paste.filter_factory = nova.api.openstack.limits:RateLimitingMiddleware.factory
[app:osapiapp] [app:osapiapp10]
paste.app_factory = nova.api.openstack:APIRouter.factory paste.app_factory = nova.api.openstack:APIRouterV10.factory
[app:osapiapp11]
paste.app_factory = nova.api.openstack:APIRouterV11.factory
[pipeline:osversions] [pipeline:osversions]
pipeline = faultwrap osversionapp pipeline = faultwrap osversionapp

View File

@ -120,7 +120,8 @@ class AdminController(object):
def describe_instance_types(self, context, **_kwargs): def describe_instance_types(self, context, **_kwargs):
"""Returns all active instance types data (vcpus, memory, etc.)""" """Returns all active instance types data (vcpus, memory, etc.)"""
return {'instanceTypeSet': [db.instance_type_get_all(context)]} return {'instanceTypeSet': [instance_dict(v) for v in
db.instance_type_get_all(context).values()]}
def describe_user(self, _context, name, **_kwargs): def describe_user(self, _context, name, **_kwargs):
"""Returns user data, including access and secret keys.""" """Returns user data, including access and secret keys."""

View File

@ -74,9 +74,14 @@ class APIRouter(wsgi.Router):
return cls() return cls()
def __init__(self): def __init__(self):
self.server_members = {}
mapper = routes.Mapper() mapper = routes.Mapper()
self._setup_routes(mapper)
super(APIRouter, self).__init__(mapper)
server_members = {'action': 'POST'} def _setup_routes(self, mapper):
server_members = self.server_members
server_members['action'] = 'POST'
if FLAGS.allow_admin_api: if FLAGS.allow_admin_api:
LOG.debug(_("Including admin operations in API.")) LOG.debug(_("Including admin operations in API."))
@ -101,20 +106,11 @@ class APIRouter(wsgi.Router):
controller=accounts.Controller(), controller=accounts.Controller(),
collection={'detail': 'GET'}) collection={'detail': 'GET'})
mapper.resource("server", "servers", controller=servers.Controller(),
collection={'detail': 'GET'},
member=server_members)
mapper.resource("backup_schedule", "backup_schedule", mapper.resource("backup_schedule", "backup_schedule",
controller=backup_schedules.Controller(), controller=backup_schedules.Controller(),
parent_resource=dict(member_name='server', parent_resource=dict(member_name='server',
collection_name='servers')) collection_name='servers'))
mapper.resource("volume_attachment", "volume_attachment",
controller=volume_attachments.Controller(),
parent_resource=dict(member_name='server',
collection_name='servers'))
mapper.resource("console", "consoles", mapper.resource("console", "consoles",
controller=consoles.Controller(), controller=consoles.Controller(),
parent_resource=dict(member_name='server', parent_resource=dict(member_name='server',
@ -138,8 +134,34 @@ class APIRouter(wsgi.Router):
controller=volumes.Controller(), controller=volumes.Controller(),
collection={'detail': 'GET'}) collection={'detail': 'GET'})
mapper.resource("volume_attachment", "volume_attachment",
controller=volume_attachments.Controller(),
parent_resource=dict(member_name='server',
collection_name='servers'))
super(APIRouter, self).__init__(mapper) super(APIRouter, self).__init__(mapper)
class APIRouterV10(APIRouter):
"""Define routes specific to OpenStack API V1.0."""
def _setup_routes(self, mapper):
super(APIRouterV10, self)._setup_routes(mapper)
mapper.resource("server", "servers",
controller=servers.ControllerV10(),
collection={'detail': 'GET'},
member=self.server_members)
class APIRouterV11(APIRouter):
"""Define routes specific to OpenStack API V1.1."""
def _setup_routes(self, mapper):
super(APIRouterV11, self)._setup_routes(mapper)
mapper.resource("server", "servers",
controller=servers.ControllerV11(),
collection={'detail': 'GET'},
member=self.server_members)
class Versions(wsgi.Application): class Versions(wsgi.Application):
@webob.dec.wsgify(RequestClass=wsgi.Request) @webob.dec.wsgify(RequestClass=wsgi.Request)

View File

@ -69,8 +69,6 @@ class AuthMiddleware(wsgi.Middleware):
return faults.Fault(webob.exc.HTTPUnauthorized()) return faults.Fault(webob.exc.HTTPUnauthorized())
req.environ['nova.context'] = context.RequestContext(user, account) req.environ['nova.context'] = context.RequestContext(user, account)
version = req.path.split('/')[1].replace('v', '')
req.environ['api.version'] = version
return self.application return self.application
def has_authentication(self, req): def has_authentication(self, req):

View File

@ -15,7 +15,9 @@
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
import webob.exc from urlparse import urlparse
import webob
from nova import exception from nova import exception
@ -76,5 +78,14 @@ def get_image_id_from_image_hash(image_service, context, image_hash):
raise exception.NotFound(image_hash) raise exception.NotFound(image_hash)
def get_api_version(req): def get_id_from_href(href):
return req.environ.get('api.version') """Return the id portion of a url as an int.
Given: http://www.foo.com/bar/123?q=4
Returns: 123
"""
try:
return int(urlparse(href).path.split('/')[-1])
except:
raise webob.exc.HTTPBadRequest(_('could not parse id from href'))

View File

@ -143,6 +143,7 @@ class Controller(wsgi.Controller):
image = self._service.show(req.environ['nova.context'], image_id) image = self._service.show(req.environ['nova.context'], image_id)
_convert_image_id_to_hash(image) _convert_image_id_to_hash(image)
self._format_image_dates(image)
return dict(image=image) return dict(image=image)
def delete(self, req, id): def delete(self, req, id):
@ -164,3 +165,8 @@ class Controller(wsgi.Controller):
# Users may not modify public images, and that's all that # Users may not modify public images, and that's all that
# we support for now. # we support for now.
raise faults.Fault(exc.HTTPNotFound()) raise faults.Fault(exc.HTTPNotFound())
def _format_image_dates(self, image):
for attr in ['created_at', 'updated_at', 'deleted_at']:
if image.get(attr) is not None:
image[attr] = image[attr].strftime('%Y-%m-%dT%H:%M:%SZ')

View File

@ -22,6 +22,7 @@ from xml.dom import minidom
from webob import exc from webob import exc
from nova import compute from nova import compute
from nova import context
from nova import exception from nova import exception
from nova import flags from nova import flags
from nova import log as logging from nova import log as logging
@ -29,8 +30,9 @@ from nova import wsgi
from nova import utils from nova import utils
from nova.api.openstack import common from nova.api.openstack import common
from nova.api.openstack import faults from nova.api.openstack import faults
from nova.api.openstack.views import servers as servers_views import nova.api.openstack.views.addresses
from nova.api.openstack.views import addresses as addresses_views import nova.api.openstack.views.flavors
import nova.api.openstack.views.servers
from nova.auth import manager as auth_manager from nova.auth import manager as auth_manager
from nova.compute import instance_types from nova.compute import instance_types
from nova.compute import power_state from nova.compute import power_state
@ -63,7 +65,7 @@ class Controller(wsgi.Controller):
except exception.NotFound: except exception.NotFound:
return faults.Fault(exc.HTTPNotFound()) return faults.Fault(exc.HTTPNotFound())
builder = addresses_views.get_view_builder(req) builder = self._get_addresses_view_builder(req)
return builder.build(instance) return builder.build(instance)
def index(self, req): def index(self, req):
@ -81,7 +83,7 @@ class Controller(wsgi.Controller):
""" """
instance_list = self.compute_api.get_all(req.environ['nova.context']) instance_list = self.compute_api.get_all(req.environ['nova.context'])
limited_list = common.limited(instance_list, req) limited_list = common.limited(instance_list, req)
builder = servers_views.get_view_builder(req) builder = self._get_view_builder(req)
servers = [builder.build(inst, is_detail)['server'] servers = [builder.build(inst, is_detail)['server']
for inst in limited_list] for inst in limited_list]
return dict(servers=servers) return dict(servers=servers)
@ -90,7 +92,7 @@ class Controller(wsgi.Controller):
""" Returns server details by server id """ """ Returns server details by server id """
try: try:
instance = self.compute_api.get(req.environ['nova.context'], id) instance = self.compute_api.get(req.environ['nova.context'], id)
builder = servers_views.get_view_builder(req) builder = self._get_view_builder(req)
return builder.build(instance, is_detail=True) return builder.build(instance, is_detail=True)
except exception.NotFound: except exception.NotFound:
return faults.Fault(exc.HTTPNotFound()) return faults.Fault(exc.HTTPNotFound())
@ -119,8 +121,9 @@ class Controller(wsgi.Controller):
key_name = key_pair['name'] key_name = key_pair['name']
key_data = key_pair['public_key'] key_data = key_pair['public_key']
requested_image_id = self._image_id_from_req_data(env)
image_id = common.get_image_id_from_image_hash(self._image_service, image_id = common.get_image_id_from_image_hash(self._image_service,
context, env['server']['imageId']) context, requested_image_id)
kernel_id, ramdisk_id = self._get_kernel_ramdisk_from_image( kernel_id, ramdisk_id = self._get_kernel_ramdisk_from_image(
req, image_id) req, image_id)
@ -139,10 +142,11 @@ class Controller(wsgi.Controller):
if personality: if personality:
injected_files = self._get_injected_files(personality) injected_files = self._get_injected_files(personality)
flavor_id = self._flavor_id_from_req_data(env)
try: try:
instances = self.compute_api.create( (inst,) = self.compute_api.create(
context, context,
instance_types.get_by_flavor_id(env['server']['flavorId']), instance_types.get_by_flavor_id(flavor_id),
image_id, image_id,
kernel_id=kernel_id, kernel_id=kernel_id,
ramdisk_id=ramdisk_id, ramdisk_id=ramdisk_id,
@ -155,8 +159,11 @@ class Controller(wsgi.Controller):
except QuotaError as error: except QuotaError as error:
self._handle_quota_errors(error) self._handle_quota_errors(error)
builder = servers_views.get_view_builder(req) inst['instance_type'] = flavor_id
server = builder.build(instances[0], is_detail=False) inst['image_id'] = requested_image_id
builder = self._get_view_builder(req)
server = builder.build(inst, is_detail=True)
password = "%s%s" % (server['server']['name'][:4], password = "%s%s" % (server['server']['name'][:4],
utils.generate_password(12)) utils.generate_password(12))
server['server']['adminPass'] = password server['server']['adminPass'] = password
@ -511,6 +518,45 @@ class Controller(wsgi.Controller):
return kernel_id, ramdisk_id return kernel_id, ramdisk_id
class ControllerV10(Controller):
def _image_id_from_req_data(self, data):
return data['server']['imageId']
def _flavor_id_from_req_data(self, data):
return data['server']['flavorId']
def _get_view_builder(self, req):
addresses_builder = nova.api.openstack.views.addresses.ViewBuilderV10()
return nova.api.openstack.views.servers.ViewBuilderV10(
addresses_builder)
def _get_addresses_view_builder(self, req):
return nova.api.openstack.views.addresses.ViewBuilderV10(req)
class ControllerV11(Controller):
def _image_id_from_req_data(self, data):
href = data['server']['imageRef']
return common.get_id_from_href(href)
def _flavor_id_from_req_data(self, data):
href = data['server']['flavorRef']
return common.get_id_from_href(href)
def _get_view_builder(self, req):
base_url = req.application_url
flavor_builder = nova.api.openstack.views.flavors.ViewBuilderV11(
base_url)
image_builder = nova.api.openstack.views.images.ViewBuilderV11(
base_url)
addresses_builder = nova.api.openstack.views.addresses.ViewBuilderV11()
return nova.api.openstack.views.servers.ViewBuilderV11(
addresses_builder, flavor_builder, image_builder)
def _get_addresses_view_builder(self, req):
return nova.api.openstack.views.addresses.ViewBuilderV11(req)
class ServerCreateRequestXMLDeserializer(object): class ServerCreateRequestXMLDeserializer(object):
""" """
Deserializer to handle xml-formatted server create requests. Deserializer to handle xml-formatted server create requests.

View File

@ -19,18 +19,6 @@ from nova import utils
from nova.api.openstack import common from nova.api.openstack import common
def get_view_builder(req):
'''
A factory method that returns the correct builder based on the version of
the api requested.
'''
version = common.get_api_version(req)
if version == '1.1':
return ViewBuilder_1_1()
else:
return ViewBuilder_1_0()
class ViewBuilder(object): class ViewBuilder(object):
''' Models a server addresses response as a python dictionary.''' ''' Models a server addresses response as a python dictionary.'''
@ -38,14 +26,14 @@ class ViewBuilder(object):
raise NotImplementedError() raise NotImplementedError()
class ViewBuilder_1_0(ViewBuilder): class ViewBuilderV10(ViewBuilder):
def build(self, inst): def build(self, inst):
private_ips = utils.get_from_path(inst, 'fixed_ip/address') private_ips = utils.get_from_path(inst, 'fixed_ip/address')
public_ips = utils.get_from_path(inst, 'fixed_ip/floating_ips/address') public_ips = utils.get_from_path(inst, 'fixed_ip/floating_ips/address')
return dict(public=public_ips, private=private_ips) return dict(public=public_ips, private=private_ips)
class ViewBuilder_1_1(ViewBuilder): class ViewBuilderV11(ViewBuilder):
def build(self, inst): def build(self, inst):
private_ips = utils.get_from_path(inst, 'fixed_ip/address') private_ips = utils.get_from_path(inst, 'fixed_ip/address')
private_ips = [dict(version=4, addr=a) for a in private_ips] private_ips = [dict(version=4, addr=a) for a in private_ips]

View File

@ -18,19 +18,6 @@
from nova.api.openstack import common from nova.api.openstack import common
def get_view_builder(req):
'''
A factory method that returns the correct builder based on the version of
the api requested.
'''
version = common.get_api_version(req)
base_url = req.application_url
if version == '1.1':
return ViewBuilder_1_1(base_url)
else:
return ViewBuilder_1_0()
class ViewBuilder(object): class ViewBuilder(object):
def __init__(self): def __init__(self):
pass pass
@ -39,13 +26,9 @@ class ViewBuilder(object):
raise NotImplementedError() raise NotImplementedError()
class ViewBuilder_1_1(ViewBuilder): class ViewBuilderV11(ViewBuilder):
def __init__(self, base_url): def __init__(self, base_url):
self.base_url = base_url self.base_url = base_url
def generate_href(self, flavor_id): def generate_href(self, flavor_id):
return "%s/flavors/%s" % (self.base_url, flavor_id) return "%s/flavors/%s" % (self.base_url, flavor_id)
class ViewBuilder_1_0(ViewBuilder):
pass

View File

@ -18,19 +18,6 @@
from nova.api.openstack import common from nova.api.openstack import common
def get_view_builder(req):
'''
A factory method that returns the correct builder based on the version of
the api requested.
'''
version = common.get_api_version(req)
base_url = req.application_url
if version == '1.1':
return ViewBuilder_1_1(base_url)
else:
return ViewBuilder_1_0()
class ViewBuilder(object): class ViewBuilder(object):
def __init__(self): def __init__(self):
pass pass
@ -39,13 +26,9 @@ class ViewBuilder(object):
raise NotImplementedError() raise NotImplementedError()
class ViewBuilder_1_1(ViewBuilder): class ViewBuilderV11(ViewBuilder):
def __init__(self, base_url): def __init__(self, base_url):
self.base_url = base_url self.base_url = base_url
def generate_href(self, image_id): def generate_href(self, image_id):
return "%s/images/%s" % (self.base_url, image_id) return "%s/images/%s" % (self.base_url, image_id)
class ViewBuilder_1_0(ViewBuilder):
pass

View File

@ -16,7 +16,10 @@
# under the License. # under the License.
import hashlib import hashlib
from nova.compute import power_state from nova.compute import power_state
import nova.compute
import nova.context
from nova.api.openstack import common from nova.api.openstack import common
from nova.api.openstack.views import addresses as addresses_view from nova.api.openstack.views import addresses as addresses_view
from nova.api.openstack.views import flavors as flavors_view from nova.api.openstack.views import flavors as flavors_view
@ -24,45 +27,30 @@ from nova.api.openstack.views import images as images_view
from nova import utils from nova import utils
def get_view_builder(req):
'''
A factory method that returns the correct builder based on the version of
the api requested.
'''
version = common.get_api_version(req)
addresses_builder = addresses_view.get_view_builder(req)
if version == '1.1':
flavor_builder = flavors_view.get_view_builder(req)
image_builder = images_view.get_view_builder(req)
return ViewBuilder_1_1(addresses_builder, flavor_builder,
image_builder)
else:
return ViewBuilder_1_0(addresses_builder)
class ViewBuilder(object): class ViewBuilder(object):
''' """Model a server response as a python dictionary.
Models a server response as a python dictionary.
Public methods: build
Abstract methods: _build_image, _build_flavor Abstract methods: _build_image, _build_flavor
'''
"""
def __init__(self, addresses_builder): def __init__(self, addresses_builder):
self.addresses_builder = addresses_builder self.addresses_builder = addresses_builder
def build(self, inst, is_detail): def build(self, inst, is_detail):
""" """Return a dict that represenst a server."""
Coerces into dictionary format, mapping everything to
Rackspace-like attributes for return
"""
if is_detail: if is_detail:
return self._build_detail(inst) return self._build_detail(inst)
else: else:
return self._build_simple(inst) return self._build_simple(inst)
def _build_simple(self, inst): def _build_simple(self, inst):
"""Return a simple model of a server."""
return dict(server=dict(id=inst['id'], name=inst['display_name'])) return dict(server=dict(id=inst['id'], name=inst['display_name']))
def _build_detail(self, inst): def _build_detail(self, inst):
"""Returns a detailed model of a server."""
power_mapping = { power_mapping = {
None: 'build', None: 'build',
power_state.NOSTATE: 'build', power_state.NOSTATE: 'build',
@ -74,27 +62,26 @@ class ViewBuilder(object):
power_state.SHUTOFF: 'active', power_state.SHUTOFF: 'active',
power_state.CRASHED: 'error', power_state.CRASHED: 'error',
power_state.FAILED: 'error'} power_state.FAILED: 'error'}
inst_dict = {}
#mapped_keys = dict(status='state', imageId='image_id', inst_dict = {
# flavorId='instance_type', name='display_name', id='id') 'id': int(inst['id']),
'name': inst['display_name'],
'addresses': self.addresses_builder.build(inst),
'status': power_mapping[inst.get('state')]}
mapped_keys = dict(status='state', name='display_name', id='id') ctxt = nova.context.get_admin_context()
compute_api = nova.compute.API()
for k, v in mapped_keys.iteritems(): if compute_api.has_finished_migration(ctxt, inst['id']):
inst_dict[k] = inst[v] inst_dict['status'] = 'resize-confirm'
inst_dict['status'] = power_mapping[inst_dict['status']]
inst_dict['addresses'] = self.addresses_builder.build(inst)
# Return the metadata as a dictionary # Return the metadata as a dictionary
metadata = {} metadata = {}
for item in inst['metadata']: for item in inst.get('metadata', []):
metadata[item['key']] = item['value'] metadata[item['key']] = item['value']
inst_dict['metadata'] = metadata inst_dict['metadata'] = metadata
inst_dict['hostId'] = '' inst_dict['hostId'] = ''
if inst['host']: if inst.get('host'):
inst_dict['hostId'] = hashlib.sha224(inst['host']).hexdigest() inst_dict['hostId'] = hashlib.sha224(inst['host']).hexdigest()
self._build_image(inst_dict, inst) self._build_image(inst_dict, inst)
@ -103,21 +90,27 @@ class ViewBuilder(object):
return dict(server=inst_dict) return dict(server=inst_dict)
def _build_image(self, response, inst): def _build_image(self, response, inst):
"""Return the image sub-resource of a server."""
raise NotImplementedError() raise NotImplementedError()
def _build_flavor(self, response, inst): def _build_flavor(self, response, inst):
"""Return the flavor sub-resource of a server."""
raise NotImplementedError() raise NotImplementedError()
class ViewBuilder_1_0(ViewBuilder): class ViewBuilderV10(ViewBuilder):
"""Model an Openstack API V1.0 server response."""
def _build_image(self, response, inst): def _build_image(self, response, inst):
response["imageId"] = inst["image_id"] response['imageId'] = inst['image_id']
def _build_flavor(self, response, inst): def _build_flavor(self, response, inst):
response["flavorId"] = inst["instance_type"] response['flavorId'] = inst['instance_type']
class ViewBuilder_1_1(ViewBuilder): class ViewBuilderV11(ViewBuilder):
"""Model an Openstack API V1.0 server response."""
def __init__(self, addresses_builder, flavor_builder, image_builder): def __init__(self, addresses_builder, flavor_builder, image_builder):
ViewBuilder.__init__(self, addresses_builder) ViewBuilder.__init__(self, addresses_builder)
self.flavor_builder = flavor_builder self.flavor_builder = flavor_builder

View File

@ -253,6 +253,16 @@ class API(base.Base):
return [dict(x.iteritems()) for x in instances] return [dict(x.iteritems()) for x in instances]
def has_finished_migration(self, context, instance_id):
"""Retrieves whether or not a finished migration exists for
an instance"""
try:
db.migration_get_by_instance_and_status(context, instance_id,
'finished')
return True
except exception.NotFound:
return False
def ensure_default_security_group(self, context): def ensure_default_security_group(self, context):
""" Create security group for the security context if it """ Create security group for the security context if it
does not already exist does not already exist
@ -464,6 +474,8 @@ class API(base.Base):
params = {'migration_id': migration_ref['id']} params = {'migration_id': migration_ref['id']}
self._cast_compute_message('revert_resize', context, instance_id, self._cast_compute_message('revert_resize', context, instance_id,
migration_ref['dest_compute'], params=params) migration_ref['dest_compute'], params=params)
self.db.migration_update(context, migration_ref['id'],
{'status': 'reverted'})
def confirm_resize(self, context, instance_id): def confirm_resize(self, context, instance_id):
"""Confirms a migration/resize, deleting the 'old' instance in the """Confirms a migration/resize, deleting the 'old' instance in the
@ -479,17 +491,41 @@ class API(base.Base):
self._cast_compute_message('confirm_resize', context, instance_id, self._cast_compute_message('confirm_resize', context, instance_id,
migration_ref['source_compute'], params=params) migration_ref['source_compute'], params=params)
self.db.migration_update(context, migration_id, self.db.migration_update(context, migration_ref['id'],
{'status': 'confirmed'}) {'status': 'confirmed'})
self.db.instance_update(context, instance_id, self.db.instance_update(context, instance_id,
{'host': migration_ref['dest_compute'], }) {'host': migration_ref['dest_compute'], })
def resize(self, context, instance_id, flavor): def resize(self, context, instance_id, flavor_id):
"""Resize a running instance.""" """Resize a running instance."""
instance = self.db.instance_get(context, instance_id)
current_instance_type = self.db.instance_type_get_by_name(
context, instance['instance_type'])
new_instance_type = self.db.instance_type_get_by_flavor_id(
context, flavor_id)
current_instance_type_name = current_instance_type['name']
new_instance_type_name = new_instance_type['name']
LOG.debug(_("Old instance type %(current_instance_type_name)s, "
" new instance type %(new_instance_type_name)s") % locals())
if not new_instance_type:
raise exception.ApiError(_("Requested flavor %(flavor_id)d "
"does not exist") % locals())
current_memory_mb = current_instance_type['memory_mb']
new_memory_mb = new_instance_type['memory_mb']
if current_memory_mb > new_memory_mb:
raise exception.ApiError(_("Invalid flavor: cannot downsize"
"instances"))
if current_memory_mb == new_memory_mb:
raise exception.ApiError(_("Invalid flavor: cannot use"
"the same flavor. "))
self._cast_scheduler_message(context, self._cast_scheduler_message(context,
{"method": "prep_resize", {"method": "prep_resize",
"args": {"topic": FLAGS.compute_topic, "args": {"topic": FLAGS.compute_topic,
"instance_id": instance_id, }},) "instance_id": instance_id,
"flavor_id": flavor_id}})
def pause(self, context, instance_id): def pause(self, context, instance_id):
"""Pause the given instance.""" """Pause the given instance."""

View File

@ -65,8 +65,11 @@ flags.DEFINE_string('console_host', socket.gethostname(),
'Console proxy host to use to connect to instances on' 'Console proxy host to use to connect to instances on'
'this host.') 'this host.')
flags.DEFINE_integer('live_migration_retry_count', 30, flags.DEFINE_integer('live_migration_retry_count', 30,
("Retry count needed in live_migration." "Retry count needed in live_migration."
" sleep 1 sec for each count")) " sleep 1 sec for each count")
flags.DEFINE_integer("rescue_timeout", 0,
"Automatically unrescue an instance after N seconds."
" Set to 0 to disable.")
LOG = logging.getLogger('nova.compute.manager') LOG = logging.getLogger('nova.compute.manager')
@ -118,8 +121,8 @@ class ComputeManager(manager.Manager):
try: try:
self.driver = utils.import_object(compute_driver) self.driver = utils.import_object(compute_driver)
except ImportError: except ImportError as e:
LOG.error("Unable to load the virtualization driver.") LOG.error(_("Unable to load the virtualization driver: %s") % (e))
sys.exit(1) sys.exit(1)
self.network_manager = utils.import_object(FLAGS.network_manager) self.network_manager = utils.import_object(FLAGS.network_manager)
@ -132,6 +135,12 @@ class ComputeManager(manager.Manager):
""" """
self.driver.init_host(host=self.host) self.driver.init_host(host=self.host)
def periodic_tasks(self, context=None):
"""Tasks to be run at a periodic interval."""
super(ComputeManager, self).periodic_tasks(context)
if FLAGS.rescue_timeout > 0:
self.driver.poll_rescued_instances(FLAGS.rescue_timeout)
def _update_state(self, context, instance_id): def _update_state(self, context, instance_id):
"""Update the state of an instance from the driver info.""" """Update the state of an instance from the driver info."""
# FIXME(ja): include other fields from state? # FIXME(ja): include other fields from state?
@ -437,17 +446,11 @@ class ComputeManager(manager.Manager):
instance_ref = self.db.instance_get(context, instance_id) instance_ref = self.db.instance_get(context, instance_id)
migration_ref = self.db.migration_get(context, migration_id) migration_ref = self.db.migration_get(context, migration_id)
#TODO(mdietz): we may want to split these into separate methods.
if migration_ref['source_compute'] == FLAGS.host:
self.driver._start(instance_ref)
self.db.migration_update(context, migration_id,
{'status': 'reverted'})
else:
self.driver.destroy(instance_ref) self.driver.destroy(instance_ref)
topic = self.db.queue_get_for(context, FLAGS.compute_topic, topic = self.db.queue_get_for(context, FLAGS.compute_topic,
instance_ref['host']) instance_ref['host'])
rpc.cast(context, topic, rpc.cast(context, topic,
{'method': 'revert_resize', {'method': 'finish_revert_resize',
'args': { 'args': {
'migration_id': migration_ref['id'], 'migration_id': migration_ref['id'],
'instance_id': instance_id, }, 'instance_id': instance_id, },
@ -455,7 +458,29 @@ class ComputeManager(manager.Manager):
@exception.wrap_exception @exception.wrap_exception
@checks_instance_lock @checks_instance_lock
def prep_resize(self, context, instance_id): def finish_revert_resize(self, context, instance_id, migration_id):
"""Finishes the second half of reverting a resize, powering back on
the source instance and reverting the resized attributes in the
database"""
instance_ref = self.db.instance_get(context, instance_id)
migration_ref = self.db.migration_get(context, migration_id)
instance_type = self.db.instance_type_get_by_flavor_id(context,
migration_ref['old_flavor_id'])
# Just roll back the record. There's no need to resize down since
# the 'old' VM already has the preferred attributes
self.db.instance_update(context, instance_id,
dict(memory_mb=instance_type['memory_mb'],
vcpus=instance_type['vcpus'],
local_gb=instance_type['local_gb']))
self.driver.revert_resize(instance_ref)
self.db.migration_update(context, migration_id,
{'status': 'reverted'})
@exception.wrap_exception
@checks_instance_lock
def prep_resize(self, context, instance_id, flavor_id):
"""Initiates the process of moving a running instance to another """Initiates the process of moving a running instance to another
host, possibly changing the RAM and disk size in the process""" host, possibly changing the RAM and disk size in the process"""
context = context.elevated() context = context.elevated()
@ -464,12 +489,17 @@ class ComputeManager(manager.Manager):
raise exception.Error(_( raise exception.Error(_(
'Migration error: destination same as source!')) 'Migration error: destination same as source!'))
instance_type = self.db.instance_type_get_by_flavor_id(context,
flavor_id)
migration_ref = self.db.migration_create(context, migration_ref = self.db.migration_create(context,
{'instance_id': instance_id, {'instance_id': instance_id,
'source_compute': instance_ref['host'], 'source_compute': instance_ref['host'],
'dest_compute': FLAGS.host, 'dest_compute': FLAGS.host,
'dest_host': self.driver.get_host_ip_addr(), 'dest_host': self.driver.get_host_ip_addr(),
'old_flavor_id': instance_type['flavorid'],
'new_flavor_id': flavor_id,
'status': 'pre-migrating'}) 'status': 'pre-migrating'})
LOG.audit(_('instance %s: migrating to '), instance_id, LOG.audit(_('instance %s: migrating to '), instance_id,
context=context) context=context)
topic = self.db.queue_get_for(context, FLAGS.compute_topic, topic = self.db.queue_get_for(context, FLAGS.compute_topic,
@ -495,8 +525,6 @@ class ComputeManager(manager.Manager):
self.db.migration_update(context, migration_id, self.db.migration_update(context, migration_id,
{'status': 'post-migrating', }) {'status': 'post-migrating', })
#TODO(mdietz): This is where we would update the VM record
#after resizing
service = self.db.service_get_by_host_and_topic(context, service = self.db.service_get_by_host_and_topic(context,
migration_ref['dest_compute'], FLAGS.compute_topic) migration_ref['dest_compute'], FLAGS.compute_topic)
topic = self.db.queue_get_for(context, FLAGS.compute_topic, topic = self.db.queue_get_for(context, FLAGS.compute_topic,
@ -517,7 +545,19 @@ class ComputeManager(manager.Manager):
migration_ref = self.db.migration_get(context, migration_id) migration_ref = self.db.migration_get(context, migration_id)
instance_ref = self.db.instance_get(context, instance_ref = self.db.instance_get(context,
migration_ref['instance_id']) migration_ref['instance_id'])
# TODO(mdietz): apply the rest of the instance_type attributes going
# after they're supported
instance_type = self.db.instance_type_get_by_flavor_id(context,
migration_ref['new_flavor_id'])
self.db.instance_update(context, instance_id,
dict(instance_type=instance_type['name'],
memory_mb=instance_type['memory_mb'],
vcpus=instance_type['vcpus'],
local_gb=instance_type['local_gb']))
# reload the updated instance ref
# FIXME(mdietz): is there reload functionality?
instance_ref = self.db.instance_get(context, instance_id)
self.driver.finish_resize(instance_ref, disk_info) self.driver.finish_resize(instance_ref, disk_info)
self.db.migration_update(context, migration_id, self.db.migration_update(context, migration_id,

View File

@ -26,6 +26,7 @@ import gettext
import hashlib import hashlib
import os import os
import shutil import shutil
import string
import struct import struct
import tempfile import tempfile
import time import time
@ -267,7 +268,7 @@ def _sign_csr(csr_text, ca_folder):
'./openssl.cnf', '-infiles', inbound) './openssl.cnf', '-infiles', inbound)
out, _err = utils.execute('openssl', 'x509', '-in', outbound, out, _err = utils.execute('openssl', 'x509', '-in', outbound,
'-serial', '-noout') '-serial', '-noout')
serial = out.rpartition("=")[2] serial = string.strip(out.rpartition("=")[2])
os.chdir(start) os.chdir(start)
with open(outbound, "r") as crtfile: with open(outbound, "r") as crtfile:
return (serial, crtfile.read()) return (serial, crtfile.read())

View File

@ -214,7 +214,7 @@ def certificate_update(context, certificate_id, values):
Raises NotFound if service does not exist. Raises NotFound if service does not exist.
""" """
return IMPL.service_update(context, certificate_id, values) return IMPL.certificate_update(context, certificate_id, values)
################### ###################

View File

@ -2220,8 +2220,8 @@ def migration_get_by_instance_and_status(context, instance_id, status):
filter_by(instance_id=instance_id).\ filter_by(instance_id=instance_id).\
filter_by(status=status).first() filter_by(status=status).first()
if not result: if not result:
raise exception.NotFound(_("No migration found with instance id %s") raise exception.NotFound(_("No migration found for instance "
% migration_id) "%(instance_id)s with status %(status)s") % locals())
return result return result
@ -2336,8 +2336,8 @@ def instance_type_create(_context, values):
instance_type_ref = models.InstanceTypes() instance_type_ref = models.InstanceTypes()
instance_type_ref.update(values) instance_type_ref.update(values)
instance_type_ref.save() instance_type_ref.save()
except: except Exception, e:
raise exception.DBError raise exception.DBError(e)
return instance_type_ref return instance_type_ref

View File

@ -0,0 +1,50 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.from sqlalchemy import *
from sqlalchemy import *
from migrate import *
from nova import log as logging
meta = MetaData()
migrations = Table('migrations', meta,
Column('id', Integer(), primary_key=True, nullable=False),
)
#
# Tables to alter
#
#
old_flavor_id = Column('old_flavor_id', Integer())
new_flavor_id = Column('new_flavor_id', Integer())
def upgrade(migrate_engine):
# Upgrade operations go here. Don't create your own engine;
# bind migrate_engine to your metadata
meta.bind = migrate_engine
migrations.create_column(old_flavor_id)
migrations.create_column(new_flavor_id)
def downgrade(migrate_engine):
meta.bind = migrate_engine
migrations.drop_column(old_flavor_id)
migrations.drop_column(new_flavor_id)

View File

@ -436,6 +436,8 @@ class Migration(BASE, NovaBase):
source_compute = Column(String(255)) source_compute = Column(String(255))
dest_compute = Column(String(255)) dest_compute = Column(String(255))
dest_host = Column(String(255)) dest_host = Column(String(255))
old_flavor_id = Column(Integer())
new_flavor_id = Column(Integer())
instance_id = Column(Integer, ForeignKey('instances.id'), nullable=True) instance_id = Column(Integer, ForeignKey('instances.id'), nullable=True)
#TODO(_cerberus_): enum #TODO(_cerberus_): enum
status = Column(String(255)) status = Column(String(255))

View File

@ -18,6 +18,8 @@
from __future__ import absolute_import from __future__ import absolute_import
import datetime
from glance.common import exception as glance_exception from glance.common import exception as glance_exception
from nova import exception from nova import exception
@ -37,8 +39,11 @@ GlanceClient = utils.import_class('glance.client.Client')
class GlanceImageService(service.BaseImageService): class GlanceImageService(service.BaseImageService):
"""Provides storage and retrieval of disk image objects within Glance.""" """Provides storage and retrieval of disk image objects within Glance."""
def __init__(self): def __init__(self, client=None):
if client is None:
self.client = GlanceClient(FLAGS.glance_host, FLAGS.glance_port) self.client = GlanceClient(FLAGS.glance_host, FLAGS.glance_port)
else:
self.client = client
def index(self, context): def index(self, context):
""" """
@ -50,7 +55,8 @@ class GlanceImageService(service.BaseImageService):
""" """
Calls out to Glance for a list of detailed image information Calls out to Glance for a list of detailed image information
""" """
return self.client.get_images_detailed() return [self._convert_timestamps_to_datetimes(image)
for image in self.client.get_images_detailed()]
def show(self, context, image_id): def show(self, context, image_id):
""" """
@ -60,8 +66,23 @@ class GlanceImageService(service.BaseImageService):
image = self.client.get_image_meta(image_id) image = self.client.get_image_meta(image_id)
except glance_exception.NotFound: except glance_exception.NotFound:
raise exception.NotFound raise exception.NotFound
return self._convert_timestamps_to_datetimes(image)
def _convert_timestamps_to_datetimes(self, image):
"""
Returns image with known timestamp fields converted to datetime objects
"""
for attr in ['created_at', 'updated_at', 'deleted_at']:
if image.get(attr) is not None:
image[attr] = self._parse_glance_iso8601_timestamp(image[attr])
return image return image
def _parse_glance_iso8601_timestamp(self, timestamp):
"""
Parse a subset of iso8601 timestamps into datetime objects
"""
return datetime.datetime.strptime(timestamp, "%Y-%m-%dT%H:%M:%S.%f")
def show_by_name(self, context, name): def show_by_name(self, context, name):
""" """
Returns a dict containing image data for the given name. Returns a dict containing image data for the given name.
@ -88,7 +109,7 @@ class GlanceImageService(service.BaseImageService):
raise exception.NotFound raise exception.NotFound
for chunk in image_chunks: for chunk in image_chunks:
data.write(chunk) data.write(chunk)
return metadata return self._convert_timestamps_to_datetimes(metadata)
def create(self, context, metadata, data=None): def create(self, context, metadata, data=None):
""" """
@ -97,7 +118,8 @@ class GlanceImageService(service.BaseImageService):
:raises AlreadyExists if the image already exist. :raises AlreadyExists if the image already exist.
""" """
return self.client.add_image(metadata, data) return self._convert_timestamps_to_datetimes(
self.client.add_image(metadata, data))
def update(self, context, image_id, metadata, data=None): def update(self, context, image_id, metadata, data=None):
"""Replace the contents of the given image with the new data. """Replace the contents of the given image with the new data.
@ -106,10 +128,10 @@ class GlanceImageService(service.BaseImageService):
""" """
try: try:
result = self.client.update_image(image_id, metadata, data) metadata = self.client.update_image(image_id, metadata, data)
except glance_exception.NotFound: except glance_exception.NotFound:
raise exception.NotFound raise exception.NotFound
return result return self._convert_timestamps_to_datetimes(metadata)
def delete(self, context, image_id): def delete(self, context, image_id):
""" """

View File

@ -40,9 +40,9 @@ class BaseImageService(object):
:retval: a sequence of mappings with the following signature :retval: a sequence of mappings with the following signature
{'id': opaque id of image, {'id': opaque id of image,
'name': name of image, 'name': name of image,
'created_at': creation timestamp, 'created_at': creation datetime object,
'updated_at': modification timestamp, 'updated_at': modification datetime object,
'deleted_at': deletion timestamp or None, 'deleted_at': deletion datetime object or None,
'deleted': boolean indicating if image has been deleted, 'deleted': boolean indicating if image has been deleted,
'status': string description of image status, 'status': string description of image status,
'is_public': boolean indicating if image is public 'is_public': boolean indicating if image is public
@ -64,9 +64,9 @@ class BaseImageService(object):
{'id': opaque id of image, {'id': opaque id of image,
'name': name of image, 'name': name of image,
'created_at': creation timestamp, 'created_at': creation datetime object,
'updated_at': modification timestamp, 'updated_at': modification datetime object,
'deleted_at': deletion timestamp or None, 'deleted_at': deletion datetime object or None,
'deleted': boolean indicating if image has been deleted, 'deleted': boolean indicating if image has been deleted,
'status': string description of image status, 'status': string description of image status,
'is_public': boolean indicating if image is public 'is_public': boolean indicating if image is public
@ -88,7 +88,7 @@ class BaseImageService(object):
def create(self, context, metadata, data=None): def create(self, context, metadata, data=None):
""" """
Store the image metadata and data and return the new image id. Store the image metadata and data and return the new image metadata.
:raises AlreadyExists if the image already exist. :raises AlreadyExists if the image already exist.
@ -96,7 +96,7 @@ class BaseImageService(object):
raise NotImplementedError raise NotImplementedError
def update(self, context, image_id, metadata, data=None): def update(self, context, image_id, metadata, data=None):
"""Update the given image with the new metadata and data. """Update the given image metadata and data and return the metadata
:raises NotFound if the image does not exist. :raises NotFound if the image does not exist.

View File

@ -21,8 +21,6 @@ import inspect
import os import os
import calendar import calendar
from eventlet import semaphore
from nova import db from nova import db
from nova import exception from nova import exception
from nova import flags from nova import flags
@ -272,21 +270,14 @@ class IptablesManager(object):
self.ipv4['nat'].add_chain('floating-snat') self.ipv4['nat'].add_chain('floating-snat')
self.ipv4['nat'].add_rule('snat', '-j $floating-snat') self.ipv4['nat'].add_rule('snat', '-j $floating-snat')
self.semaphore = semaphore.Semaphore() @utils.synchronized('iptables', external=True)
@utils.synchronized('iptables')
def apply(self): def apply(self):
"""Apply the current in-memory set of iptables rules """Apply the current in-memory set of iptables rules
This will blow away any rules left over from previous runs of the This will blow away any rules left over from previous runs of the
same component of Nova, and replace them with our current set of same component of Nova, and replace them with our current set of
rules. This happens atomically, thanks to iptables-restore. rules. This happens atomically, thanks to iptables-restore.
We wrap the call in a semaphore lock, so that we don't race with
ourselves. In the event of a race with another component running
an iptables-* command at the same time, we retry up to 5 times.
""" """
with self.semaphore:
s = [('iptables', self.ipv4)] s = [('iptables', self.ipv4)]
if FLAGS.use_ipv6: if FLAGS.use_ipv6:
s += [('ip6tables', self.ipv6)] s += [('ip6tables', self.ipv6)]
@ -595,6 +586,7 @@ def update_dhcp(context, network_id):
_execute(*command, addl_env=env) _execute(*command, addl_env=env)
@utils.synchronized('radvd_start')
def update_ra(context, network_id): def update_ra(context, network_id):
network_ref = db.network_get(context, network_id) network_ref = db.network_get(context, network_id)

View File

@ -15,6 +15,7 @@
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
import copy
import datetime import datetime
import json import json
import random import random
@ -72,14 +73,18 @@ def fake_wsgi(self, req):
return self.application return self.application
def wsgi_app(inner_application=None): def wsgi_app(inner_app10=None, inner_app11=None):
if not inner_application: if not inner_app10:
inner_application = openstack.APIRouter() inner_app10 = openstack.APIRouterV10()
if not inner_app11:
inner_app11 = openstack.APIRouterV11()
mapper = urlmap.URLMap() mapper = urlmap.URLMap()
api = openstack.FaultWrapper(auth.AuthMiddleware( api10 = openstack.FaultWrapper(auth.AuthMiddleware(
limits.RateLimitingMiddleware(inner_application))) limits.RateLimitingMiddleware(inner_app10)))
mapper['/v1.0'] = api api11 = openstack.FaultWrapper(auth.AuthMiddleware(
mapper['/v1.1'] = api limits.RateLimitingMiddleware(inner_app11)))
mapper['/v1.0'] = api10
mapper['/v1.1'] = api11
mapper['/'] = openstack.FaultWrapper(openstack.Versions()) mapper['/'] = openstack.FaultWrapper(openstack.Versions())
return mapper return mapper
@ -150,22 +155,23 @@ def stub_out_glance(stubs, initial_fixtures=None):
for f in self.fixtures] for f in self.fixtures]
def fake_get_images_detailed(self): def fake_get_images_detailed(self):
return self.fixtures return copy.deepcopy(self.fixtures)
def fake_get_image_meta(self, image_id): def fake_get_image_meta(self, image_id):
for f in self.fixtures: image = self._find_image(image_id)
if f['id'] == image_id: if image:
return f return copy.deepcopy(image)
raise glance_exc.NotFound raise glance_exc.NotFound
def fake_add_image(self, image_meta, data=None): def fake_add_image(self, image_meta, data=None):
image_meta = copy.deepcopy(image_meta)
id = ''.join(random.choice(string.letters) for _ in range(20)) id = ''.join(random.choice(string.letters) for _ in range(20))
image_meta['id'] = id image_meta['id'] = id
self.fixtures.append(image_meta) self.fixtures.append(image_meta)
return image_meta return image_meta
def fake_update_image(self, image_id, image_meta, data=None): def fake_update_image(self, image_id, image_meta, data=None):
f = self.fake_get_image_meta(image_id) f = self._find_image(image_id)
if not f: if not f:
raise glance_exc.NotFound raise glance_exc.NotFound
@ -173,7 +179,7 @@ def stub_out_glance(stubs, initial_fixtures=None):
return f return f
def fake_delete_image(self, image_id): def fake_delete_image(self, image_id):
f = self.fake_get_image_meta(image_id) f = self._find_image(image_id)
if not f: if not f:
raise glance_exc.NotFound raise glance_exc.NotFound
@ -182,6 +188,12 @@ def stub_out_glance(stubs, initial_fixtures=None):
##def fake_delete_all(self): ##def fake_delete_all(self):
## self.fixtures = [] ## self.fixtures = []
def _find_image(self, image_id):
for f in self.fixtures:
if f['id'] == image_id:
return f
return None
GlanceClient = glance_client.Client GlanceClient = glance_client.Client
fake = FakeGlanceClient(initial_fixtures) fake = FakeGlanceClient(initial_fixtures)

View File

@ -83,8 +83,7 @@ class Test(test.TestCase):
self.assertEqual(result.headers['X-Storage-Url'], "") self.assertEqual(result.headers['X-Storage-Url'], "")
token = result.headers['X-Auth-Token'] token = result.headers['X-Auth-Token']
self.stubs.Set(nova.api.openstack, 'APIRouter', self.stubs.Set(nova.api.openstack, 'APIRouterV10', fakes.FakeRouter)
fakes.FakeRouter)
req = webob.Request.blank('/v1.0/fake') req = webob.Request.blank('/v1.0/fake')
req.headers['X-Auth-Token'] = token req.headers['X-Auth-Token'] = token
result = req.get_response(fakes.wsgi_app()) result = req.get_response(fakes.wsgi_app())
@ -201,8 +200,7 @@ class TestLimiter(test.TestCase):
self.assertEqual(len(result.headers['X-Auth-Token']), 40) self.assertEqual(len(result.headers['X-Auth-Token']), 40)
token = result.headers['X-Auth-Token'] token = result.headers['X-Auth-Token']
self.stubs.Set(nova.api.openstack, 'APIRouter', self.stubs.Set(nova.api.openstack, 'APIRouterV10', fakes.FakeRouter)
fakes.FakeRouter)
req = webob.Request.blank('/v1.0/fake') req = webob.Request.blank('/v1.0/fake')
req.method = 'POST' req.method = 'POST'
req.headers['X-Auth-Token'] = token req.headers['X-Auth-Token'] = token

View File

@ -189,13 +189,13 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
"""Test of the OpenStack API /images application controller""" """Test of the OpenStack API /images application controller"""
# Registered images at start of each test. # Registered images at start of each test.
now = datetime.datetime.utcnow()
IMAGE_FIXTURES = [ IMAGE_FIXTURES = [
{'id': '23g2ogk23k4hhkk4k42l', {'id': '23g2ogk23k4hhkk4k42l',
'imageId': '23g2ogk23k4hhkk4k42l', 'imageId': '23g2ogk23k4hhkk4k42l',
'name': 'public image #1', 'name': 'public image #1',
'created_at': str(datetime.datetime.utcnow()), 'created_at': now.isoformat(),
'updated_at': str(datetime.datetime.utcnow()), 'updated_at': now.isoformat(),
'deleted_at': None, 'deleted_at': None,
'deleted': False, 'deleted': False,
'is_public': True, 'is_public': True,
@ -204,8 +204,8 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
{'id': 'slkduhfas73kkaskgdas', {'id': 'slkduhfas73kkaskgdas',
'imageId': 'slkduhfas73kkaskgdas', 'imageId': 'slkduhfas73kkaskgdas',
'name': 'public image #2', 'name': 'public image #2',
'created_at': str(datetime.datetime.utcnow()), 'created_at': now.isoformat(),
'updated_at': str(datetime.datetime.utcnow()), 'updated_at': now.isoformat(),
'deleted_at': None, 'deleted_at': None,
'deleted': False, 'deleted': False,
'is_public': True, 'is_public': True,
@ -247,20 +247,20 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
res = req.get_response(fakes.wsgi_app()) res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body) res_dict = json.loads(res.body)
def _is_equivalent_subset(x, y): for image in self.IMAGE_FIXTURES:
if set(x) <= set(y): expected = {
for k, v in x.iteritems(): 'id': abs(hash(image['imageId'])),
if x[k] != y[k]: 'name': image['name'],
if x[k] == 'active' and y[k] == 'available': 'status': 'active',
continue }
return False self.assertTrue(expected in res_dict['images'])
return True
return False
for image in res_dict['images']: def test_show_image(self):
for image_fixture in self.IMAGE_FIXTURES: expected = self.IMAGE_FIXTURES[0]
if _is_equivalent_subset(image, image_fixture): id = abs(hash(expected['id']))
break expected_time = self.now.strftime('%Y-%m-%dT%H:%M:%SZ')
else: req = webob.Request.blank('/v1.0/images/%s' % id)
self.assertEquals(1, 2, "image %s not in fixtures!" % res = req.get_response(fakes.wsgi_app())
str(image)) actual = json.loads(res.body)['image']
self.assertEqual(expected_time, actual['created_at'])
self.assertEqual(expected_time, actual['updated_at'])

View File

@ -161,7 +161,7 @@ class ServersTest(test.TestCase):
req = webob.Request.blank('/v1.0/servers/1') req = webob.Request.blank('/v1.0/servers/1')
res = req.get_response(fakes.wsgi_app()) res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body) res_dict = json.loads(res.body)
self.assertEqual(res_dict['server']['id'], '1') self.assertEqual(res_dict['server']['id'], 1)
self.assertEqual(res_dict['server']['name'], 'server1') self.assertEqual(res_dict['server']['name'], 'server1')
def test_get_server_by_id_with_addresses(self): def test_get_server_by_id_with_addresses(self):
@ -172,7 +172,7 @@ class ServersTest(test.TestCase):
req = webob.Request.blank('/v1.0/servers/1') req = webob.Request.blank('/v1.0/servers/1')
res = req.get_response(fakes.wsgi_app()) res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body) res_dict = json.loads(res.body)
self.assertEqual(res_dict['server']['id'], '1') self.assertEqual(res_dict['server']['id'], 1)
self.assertEqual(res_dict['server']['name'], 'server1') self.assertEqual(res_dict['server']['name'], 'server1')
addresses = res_dict['server']['addresses'] addresses = res_dict['server']['addresses']
self.assertEqual(len(addresses["public"]), len(public)) self.assertEqual(len(addresses["public"]), len(public))
@ -180,7 +180,7 @@ class ServersTest(test.TestCase):
self.assertEqual(len(addresses["private"]), 1) self.assertEqual(len(addresses["private"]), 1)
self.assertEqual(addresses["private"][0], private) self.assertEqual(addresses["private"][0], private)
def test_get_server_by_id_with_addresses_v1_1(self): def test_get_server_by_id_with_addresses_v11(self):
private = "192.168.0.3" private = "192.168.0.3"
public = ["1.2.3.4"] public = ["1.2.3.4"]
new_return_server = return_server_with_addresses(private, public) new_return_server = return_server_with_addresses(private, public)
@ -189,7 +189,7 @@ class ServersTest(test.TestCase):
req.environ['api.version'] = '1.1' req.environ['api.version'] = '1.1'
res = req.get_response(fakes.wsgi_app()) res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body) res_dict = json.loads(res.body)
self.assertEqual(res_dict['server']['id'], '1') self.assertEqual(res_dict['server']['id'], 1)
self.assertEqual(res_dict['server']['name'], 'server1') self.assertEqual(res_dict['server']['name'], 'server1')
addresses = res_dict['server']['addresses'] addresses = res_dict['server']['addresses']
self.assertEqual(len(addresses["public"]), len(public)) self.assertEqual(len(addresses["public"]), len(public))
@ -239,7 +239,7 @@ class ServersTest(test.TestCase):
servers = json.loads(res.body)['servers'] servers = json.loads(res.body)['servers']
self.assertEqual([s['id'] for s in servers], [1, 2]) self.assertEqual([s['id'] for s in servers], [1, 2])
def _test_create_instance_helper(self): def _setup_for_create_instance(self):
"""Shared implementation for tests below that create instance""" """Shared implementation for tests below that create instance"""
def instance_create(context, inst): def instance_create(context, inst):
return {'id': '1', 'display_name': 'server_test'} return {'id': '1', 'display_name': 'server_test'}
@ -276,14 +276,17 @@ class ServersTest(test.TestCase):
self.stubs.Set(nova.api.openstack.common, self.stubs.Set(nova.api.openstack.common,
"get_image_id_from_image_hash", image_id_from_hash) "get_image_id_from_image_hash", image_id_from_hash)
def _test_create_instance_helper(self):
self._setup_for_create_instance()
body = dict(server=dict( body = dict(server=dict(
name='server_test', imageId=2, flavorId=2, name='server_test', imageId=3, flavorId=2,
metadata={'hello': 'world', 'open': 'stack'}, metadata={'hello': 'world', 'open': 'stack'},
personality={})) personality={}))
req = webob.Request.blank('/v1.0/servers') req = webob.Request.blank('/v1.0/servers')
req.method = 'POST' req.method = 'POST'
req.body = json.dumps(body) req.body = json.dumps(body)
req.headers["Content-Type"] = "application/json" req.headers["content-type"] = "application/json"
res = req.get_response(fakes.wsgi_app()) res = req.get_response(fakes.wsgi_app())
@ -291,8 +294,9 @@ class ServersTest(test.TestCase):
self.assertEqual('serv', server['adminPass'][:4]) self.assertEqual('serv', server['adminPass'][:4])
self.assertEqual(16, len(server['adminPass'])) self.assertEqual(16, len(server['adminPass']))
self.assertEqual('server_test', server['name']) self.assertEqual('server_test', server['name'])
self.assertEqual('1', server['id']) self.assertEqual(1, server['id'])
self.assertEqual(2, server['flavorId'])
self.assertEqual(3, server['imageId'])
self.assertEqual(res.status_int, 200) self.assertEqual(res.status_int, 200)
def test_create_instance(self): def test_create_instance(self):
@ -302,6 +306,56 @@ class ServersTest(test.TestCase):
fakes.stub_out_key_pair_funcs(self.stubs, have_key_pair=False) fakes.stub_out_key_pair_funcs(self.stubs, have_key_pair=False)
self._test_create_instance_helper() self._test_create_instance_helper()
def test_create_instance_v11(self):
self._setup_for_create_instance()
imageRef = 'http://localhost/v1.1/images/2'
flavorRef = 'http://localhost/v1.1/flavors/3'
body = {
'server': {
'name': 'server_test',
'imageRef': imageRef,
'flavorRef': flavorRef,
'metadata': {
'hello': 'world',
'open': 'stack',
},
'personality': {},
},
}
req = webob.Request.blank('/v1.1/servers')
req.method = 'POST'
req.body = json.dumps(body)
req.headers["content-type"] = "application/json"
res = req.get_response(fakes.wsgi_app())
server = json.loads(res.body)['server']
self.assertEqual('serv', server['adminPass'][:4])
self.assertEqual(16, len(server['adminPass']))
self.assertEqual('server_test', server['name'])
self.assertEqual(1, server['id'])
self.assertEqual(flavorRef, server['flavorRef'])
self.assertEqual(imageRef, server['imageRef'])
self.assertEqual(res.status_int, 200)
def test_create_instance_v11_bad_href(self):
self._setup_for_create_instance()
imageRef = 'http://localhost/v1.1/images/asdf'
flavorRef = 'http://localhost/v1.1/flavors/3'
body = dict(server=dict(
name='server_test', imageRef=imageRef, flavorRef=flavorRef,
metadata={'hello': 'world', 'open': 'stack'},
personality={}))
req = webob.Request.blank('/v1.1/servers')
req.method = 'POST'
req.body = json.dumps(body)
req.headers["content-type"] = "application/json"
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 400)
def test_update_no_body(self): def test_update_no_body(self):
req = webob.Request.blank('/v1.0/servers/1') req = webob.Request.blank('/v1.0/servers/1')
req.method = 'PUT' req.method = 'PUT'
@ -524,16 +578,6 @@ class ServersTest(test.TestCase):
req.body = json.dumps(body) req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app()) res = req.get_response(fakes.wsgi_app())
def test_server_resize(self):
body = dict(server=dict(
name='server_test', imageId=2, flavorId=2, metadata={},
personality={}))
req = webob.Request.blank('/v1.0/servers/1/action')
req.method = 'POST'
req.content_type = 'application/json'
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
def test_delete_server_instance(self): def test_delete_server_instance(self):
req = webob.Request.blank('/v1.0/servers/1') req = webob.Request.blank('/v1.0/servers/1')
req.method = 'DELETE' req.method = 'DELETE'
@ -589,6 +633,18 @@ class ServersTest(test.TestCase):
res = req.get_response(fakes.wsgi_app()) res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 400) self.assertEqual(res.status_int, 400)
def test_resized_server_has_correct_status(self):
req = self.webreq('/1', 'GET', dict(resize=dict(flavorId=3)))
def fake_migration_get(*args):
return {}
self.stubs.Set(nova.db, 'migration_get_by_instance_and_status',
fake_migration_get)
res = req.get_response(fakes.wsgi_app())
body = json.loads(res.body)
self.assertEqual(body['server']['status'], 'resize-confirm')
def test_confirm_resize_server(self): def test_confirm_resize_server(self):
req = self.webreq('/1/action', 'POST', dict(confirmResize=None)) req = self.webreq('/1/action', 'POST', dict(confirmResize=None))
@ -943,7 +999,7 @@ class TestServerInstanceCreation(test.TestCase):
def _setup_mock_compute_api_for_personality(self): def _setup_mock_compute_api_for_personality(self):
class MockComputeAPI(object): class MockComputeAPI(nova.compute.API):
def __init__(self): def __init__(self):
self.injected_files = None self.injected_files = None

View File

@ -0,0 +1,16 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 Openstack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.

View File

@ -0,0 +1,188 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 Openstack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import unittest
from nova.image import glance
class StubGlanceClient(object):
def __init__(self, images, add_response=None, update_response=None):
self.images = images
self.add_response = add_response
self.update_response = update_response
def get_image_meta(self, id):
return self.images[id]
def get_images_detailed(self):
return self.images.itervalues()
def get_image(self, id):
return self.images[id], []
def add_image(self, metadata, data):
return self.add_response
def update_image(self, image_id, metadata, data):
return self.update_response
class NullWriter(object):
def write(self, *arg, **kwargs):
pass
class TestGlanceImageServiceDatetimes(unittest.TestCase):
def setUp(self):
self.client = StubGlanceClient(None)
self.service = glance.GlanceImageService(self.client)
def test_show_passes_through_to_client(self):
self.client.images = {'xyz': {'foo': 'bar'}}
self.assertEqual(self.service.show({}, 'xyz'), {'foo': 'bar'})
def test_detail_passes_through_to_client(self):
self.client.images = {1: {'foo': 'bar'}}
self.assertEqual(list(self.service.detail({})), [{'foo': 'bar'}])
def test_show_makes_create_datetimes(self):
create_time = datetime.datetime.utcnow()
self.client.images = {'xyz': {
'id': "id",
'name': "my awesome image",
'created_at': create_time.isoformat(),
}}
actual = self.service.show({}, 'xyz')
self.assertEqual(actual['created_at'], create_time)
def test_show_makes_update_datetimes(self):
update_time = datetime.datetime.utcnow()
self.client.images = {'abc': {
'id': "id",
'name': "my okay image",
'updated_at': update_time.isoformat(),
}}
actual = self.service.show({}, 'abc')
self.assertEqual(actual['updated_at'], update_time)
def test_show_makes_delete_datetimes(self):
delete_time = datetime.datetime.utcnow()
self.client.images = {'123': {
'id': "123",
'name': "my lame image",
'deleted_at': delete_time.isoformat(),
}}
actual = self.service.show({}, '123')
self.assertEqual(actual['deleted_at'], delete_time)
def test_show_handles_deleted_at_none(self):
self.client.images = {'747': {
'id': "747",
'name': "not deleted",
'deleted_at': None,
}}
actual = self.service.show({}, '747')
self.assertEqual(actual['deleted_at'], None)
def test_detail_handles_timestamps(self):
now = datetime.datetime.utcnow()
image1 = {
'id': 1,
'name': 'image 1',
'created_at': now.isoformat(),
'updated_at': now.isoformat(),
'deleted_at': None,
}
image2 = {
'id': 2,
'name': 'image 2',
'deleted_at': now.isoformat(),
}
self.client.images = {1: image1, 2: image2}
i1, i2 = self.service.detail({})
self.assertEqual(i1['created_at'], now)
self.assertEqual(i1['updated_at'], now)
self.assertEqual(i1['deleted_at'], None)
self.assertEqual(i2['deleted_at'], now)
def test_get_handles_timestamps(self):
now = datetime.datetime.utcnow()
self.client.images = {'abcd': {
'id': 'abcd',
'name': 'nifty image',
'created_at': now.isoformat(),
'updated_at': now.isoformat(),
'deleted_at': now.isoformat(),
}}
actual = self.service.get({}, 'abcd', NullWriter())
for attr in ('created_at', 'updated_at', 'deleted_at'):
self.assertEqual(actual[attr], now)
def test_get_handles_deleted_at_none(self):
self.client.images = {'abcd': {'deleted_at': None}}
actual = self.service.get({}, 'abcd', NullWriter())
self.assertEqual(actual['deleted_at'], None)
def test_create_handles_timestamps(self):
now = datetime.datetime.utcnow()
self.client.add_response = {
'id': 'abcd',
'name': 'blah',
'created_at': now.isoformat(),
'updated_at': now.isoformat(),
'deleted_at': now.isoformat(),
}
actual = self.service.create({}, {})
for attr in ('created_at', 'updated_at', 'deleted_at'):
self.assertEqual(actual[attr], now)
def test_create_handles_deleted_at_none(self):
self.client.add_response = {
'id': 'abcd',
'name': 'blah',
'deleted_at': None,
}
actual = self.service.create({}, {})
self.assertEqual(actual['deleted_at'], None)
def test_update_handles_timestamps(self):
now = datetime.datetime.utcnow()
self.client.update_response = {
'id': 'abcd',
'name': 'blah',
'created_at': now.isoformat(),
'updated_at': now.isoformat(),
'deleted_at': now.isoformat(),
}
actual = self.service.update({}, 'dummy_id', {})
for attr in ('created_at', 'updated_at', 'deleted_at'):
self.assertEqual(actual[attr], now)
def test_create_handles_deleted_at_none(self):
self.client.update_response = {
'id': 'abcd',
'name': 'blah',
'deleted_at': None,
}
actual = self.service.update({}, 'dummy_id', {})
self.assertEqual(actual['deleted_at'], None)

View File

@ -82,6 +82,21 @@ class ComputeTestCase(test.TestCase):
inst.update(params) inst.update(params)
return db.instance_create(self.context, inst)['id'] return db.instance_create(self.context, inst)['id']
def _create_instance_type(self, params={}):
"""Create a test instance"""
context = self.context.elevated()
inst = {}
inst['name'] = 'm1.small'
inst['memory_mb'] = '1024'
inst['vcpus'] = '1'
inst['local_gb'] = '20'
inst['flavorid'] = '1'
inst['swap'] = '2048'
inst['rxtx_quota'] = 100
inst['rxtx_cap'] = 200
inst.update(params)
return db.instance_type_create(context, inst)['id']
def _create_group(self): def _create_group(self):
values = {'name': 'testgroup', values = {'name': 'testgroup',
'description': 'testgroup', 'description': 'testgroup',
@ -299,15 +314,53 @@ class ComputeTestCase(test.TestCase):
"""Ensure instance can be migrated/resized""" """Ensure instance can be migrated/resized"""
instance_id = self._create_instance() instance_id = self._create_instance()
context = self.context.elevated() context = self.context.elevated()
self.compute.run_instance(self.context, instance_id) self.compute.run_instance(self.context, instance_id)
db.instance_update(self.context, instance_id, {'host': 'foo'}) db.instance_update(self.context, instance_id, {'host': 'foo'})
self.compute.prep_resize(context, instance_id) self.compute.prep_resize(context, instance_id, 1)
migration_ref = db.migration_get_by_instance_and_status(context, migration_ref = db.migration_get_by_instance_and_status(context,
instance_id, 'pre-migrating') instance_id, 'pre-migrating')
self.compute.resize_instance(context, instance_id, self.compute.resize_instance(context, instance_id,
migration_ref['id']) migration_ref['id'])
self.compute.terminate_instance(context, instance_id) self.compute.terminate_instance(context, instance_id)
def test_resize_invalid_flavor_fails(self):
"""Ensure invalid flavors raise"""
instance_id = self._create_instance()
context = self.context.elevated()
self.compute.run_instance(self.context, instance_id)
self.assertRaises(exception.NotFound, self.compute_api.resize,
context, instance_id, 200)
self.compute.terminate_instance(context, instance_id)
def test_resize_down_fails(self):
"""Ensure resizing down raises and fails"""
context = self.context.elevated()
instance_id = self._create_instance()
self.compute.run_instance(self.context, instance_id)
db.instance_update(self.context, instance_id,
{'instance_type': 'm1.xlarge'})
self.assertRaises(exception.ApiError, self.compute_api.resize,
context, instance_id, 1)
self.compute.terminate_instance(context, instance_id)
def test_resize_same_size_fails(self):
"""Ensure invalid flavors raise"""
context = self.context.elevated()
instance_id = self._create_instance()
self.compute.run_instance(self.context, instance_id)
self.assertRaises(exception.ApiError, self.compute_api.resize,
context, instance_id, 1)
self.compute.terminate_instance(context, instance_id)
def test_get_by_flavor_id(self): def test_get_by_flavor_id(self):
type = instance_types.get_by_flavor_id(1) type = instance_types.get_by_flavor_id(1)
self.assertEqual(type, 'm1.tiny') self.assertEqual(type, 'm1.tiny')
@ -318,10 +371,8 @@ class ComputeTestCase(test.TestCase):
instance_id = self._create_instance() instance_id = self._create_instance()
self.compute.run_instance(self.context, instance_id) self.compute.run_instance(self.context, instance_id)
self.assertRaises(exception.Error, self.compute.prep_resize, self.assertRaises(exception.Error, self.compute.prep_resize,
self.context, instance_id) self.context, instance_id, 1)
self.compute.terminate_instance(self.context, instance_id) self.compute.terminate_instance(self.context, instance_id)
type = instance_types.get_by_flavor_id("1")
self.assertEqual(type, 'm1.tiny')
def _setup_other_managers(self): def _setup_other_managers(self):
self.volume_manager = utils.import_object(FLAGS.volume_manager) self.volume_manager = utils.import_object(FLAGS.volume_manager)

View File

@ -21,9 +21,10 @@ import sys
import unittest import unittest
import nova import nova
from nova import test
class LocalizationTestCase(unittest.TestCase): class LocalizationTestCase(test.TestCase):
def test_multiple_positional_format_placeholders(self): def test_multiple_positional_format_placeholders(self):
pat = re.compile("\W_\(") pat = re.compile("\W_\(")
single_pat = re.compile("\W%\W") single_pat = re.compile("\W%\W")

View File

@ -18,8 +18,12 @@ import errno
import os import os
import select import select
from eventlet import greenpool
from eventlet import greenthread
from nova import test from nova import test
from nova.utils import parse_mailmap, str_dict_replace, synchronized from nova import utils
from nova.utils import parse_mailmap, str_dict_replace
class ProjectTestCase(test.TestCase): class ProjectTestCase(test.TestCase):
@ -63,7 +67,7 @@ class ProjectTestCase(test.TestCase):
class LockTestCase(test.TestCase): class LockTestCase(test.TestCase):
def test_synchronized_wrapped_function_metadata(self): def test_synchronized_wrapped_function_metadata(self):
@synchronized('whatever') @utils.synchronized('whatever')
def foo(): def foo():
"""Bar""" """Bar"""
pass pass
@ -72,11 +76,42 @@ class LockTestCase(test.TestCase):
self.assertEquals(foo.__name__, 'foo', "Wrapped function's name " self.assertEquals(foo.__name__, 'foo', "Wrapped function's name "
"got mangled") "got mangled")
def test_synchronized(self): def test_synchronized_internally(self):
"""We can lock across multiple green threads"""
saved_sem_num = len(utils._semaphores)
seen_threads = list()
@utils.synchronized('testlock2', external=False)
def f(id):
for x in range(10):
seen_threads.append(id)
greenthread.sleep(0)
threads = []
pool = greenpool.GreenPool(10)
for i in range(10):
threads.append(pool.spawn(f, i))
for thread in threads:
thread.wait()
self.assertEquals(len(seen_threads), 100)
# Looking at the seen threads, split it into chunks of 10, and verify
# that the last 9 match the first in each chunk.
for i in range(10):
for j in range(9):
self.assertEquals(seen_threads[i * 10],
seen_threads[i * 10 + 1 + j])
self.assertEqual(saved_sem_num, len(utils._semaphores),
"Semaphore leak detected")
def test_synchronized_externally(self):
"""We can lock across multiple processes"""
rpipe1, wpipe1 = os.pipe() rpipe1, wpipe1 = os.pipe()
rpipe2, wpipe2 = os.pipe() rpipe2, wpipe2 = os.pipe()
@synchronized('testlock') @utils.synchronized('testlock1', external=True)
def f(rpipe, wpipe): def f(rpipe, wpipe):
try: try:
os.write(wpipe, "foo") os.write(wpipe, "foo")

View File

@ -77,13 +77,11 @@ class CacheConcurrencyTestCase(test.TestCase):
eventlet.sleep(0) eventlet.sleep(0)
try: try:
self.assertFalse(done2.ready()) self.assertFalse(done2.ready())
self.assertTrue('fname' in conn._image_sems)
finally: finally:
wait1.send() wait1.send()
done1.wait() done1.wait()
eventlet.sleep(0) eventlet.sleep(0)
self.assertTrue(done2.ready()) self.assertTrue(done2.ready())
self.assertFalse('fname' in conn._image_sems)
def test_different_fname_concurrency(self): def test_different_fname_concurrency(self):
"""Ensures that two different fname caches are concurrent""" """Ensures that two different fname caches are concurrent"""

View File

@ -228,6 +228,9 @@ class FakeSessionForMigrationTests(fake.SessionBase):
def VDI_get_by_uuid(*args): def VDI_get_by_uuid(*args):
return 'hurr' return 'hurr'
def VDI_resize_online(*args):
pass
def VM_start(self, _1, ref, _2, _3): def VM_start(self, _1, ref, _2, _3):
vm = fake.get_record('VM', ref) vm = fake.get_record('VM', ref)
if vm['power_state'] != 'Halted': if vm['power_state'] != 'Halted':
@ -240,7 +243,7 @@ class FakeSessionForMigrationTests(fake.SessionBase):
def stub_out_migration_methods(stubs): def stub_out_migration_methods(stubs):
def fake_get_snapshot(self, instance): def fake_get_snapshot(self, instance):
return 'foo', 'bar' return 'vm_ref', dict(image='foo', snap='bar')
@classmethod @classmethod
def fake_get_vdi(cls, session, vm_ref): def fake_get_vdi(cls, session, vm_ref):
@ -249,7 +252,7 @@ def stub_out_migration_methods(stubs):
vdi_rec = session.get_xenapi().VDI.get_record(vdi_ref) vdi_rec = session.get_xenapi().VDI.get_record(vdi_ref)
return vdi_ref, {'uuid': vdi_rec['uuid'], } return vdi_ref, {'uuid': vdi_rec['uuid'], }
def fake_shutdown(self, inst, vm, method='clean'): def fake_shutdown(self, inst, vm, hard=True):
pass pass
@classmethod @classmethod

View File

@ -41,6 +41,7 @@ from xml.sax import saxutils
from eventlet import event from eventlet import event
from eventlet import greenthread from eventlet import greenthread
from eventlet import semaphore
from eventlet.green import subprocess from eventlet.green import subprocess
None None
from nova import exception from nova import exception
@ -334,6 +335,14 @@ def utcnow():
utcnow.override_time = None utcnow.override_time = None
def is_older_than(before, seconds):
"""Return True if before is older than 'seconds'"""
if utcnow() - before > datetime.timedelta(seconds=seconds):
return True
else:
return False
def utcnow_ts(): def utcnow_ts():
"""Timestamp version of our utcnow function.""" """Timestamp version of our utcnow function."""
return time.mktime(utcnow().timetuple()) return time.mktime(utcnow().timetuple())
@ -531,17 +540,76 @@ def loads(s):
return json.loads(s) return json.loads(s)
def synchronized(name): _semaphores = {}
class _NoopContextManager(object):
def __enter__(self):
pass
def __exit__(self, exc_type, exc_val, exc_tb):
pass
def synchronized(name, external=False):
"""Synchronization decorator
Decorating a method like so:
@synchronized('mylock')
def foo(self, *args):
...
ensures that only one thread will execute the bar method at a time.
Different methods can share the same lock:
@synchronized('mylock')
def foo(self, *args):
...
@synchronized('mylock')
def bar(self, *args):
...
This way only one of either foo or bar can be executing at a time.
The external keyword argument denotes whether this lock should work across
multiple processes. This means that if two different workers both run a
a method decorated with @synchronized('mylock', external=True), only one
of them will execute at a time.
"""
def wrap(f): def wrap(f):
@functools.wraps(f) @functools.wraps(f)
def inner(*args, **kwargs): def inner(*args, **kwargs):
LOG.debug(_("Attempting to grab %(lock)s for method " # NOTE(soren): If we ever go natively threaded, this will be racy.
"%(method)s..." % {"lock": name, # See http://stackoverflow.com/questions/5390569/dyn\
# amically-allocating-and-destroying-mutexes
if name not in _semaphores:
_semaphores[name] = semaphore.Semaphore()
sem = _semaphores[name]
LOG.debug(_('Attempting to grab semaphore "%(lock)s" for method '
'"%(method)s"...' % {"lock": name,
"method": f.__name__})) "method": f.__name__}))
lock = lockfile.FileLock(os.path.join(FLAGS.lock_path, with sem:
'nova-%s.lock' % name)) if external:
LOG.debug(_('Attempting to grab file lock "%(lock)s" for '
'method "%(method)s"...' %
{"lock": name, "method": f.__name__}))
lock_file_path = os.path.join(FLAGS.lock_path,
'nova-%s.lock' % name)
lock = lockfile.FileLock(lock_file_path)
else:
lock = _NoopContextManager()
with lock: with lock:
return f(*args, **kwargs) retval = f(*args, **kwargs)
# If no-one else is waiting for it, delete it.
# See note about possible raciness above.
if not sem.balance < 1:
del _semaphores[name]
return retval
return inner return inner
return wrap return wrap

View File

@ -467,3 +467,6 @@ class HyperVConnection(object):
if vm is None: if vm is None:
raise exception.NotFound('Cannot detach volume from missing %s ' raise exception.NotFound('Cannot detach volume from missing %s '
% instance_name) % instance_name)
def poll_rescued_instances(self, timeout):
pass

View File

@ -48,7 +48,7 @@ from xml.dom import minidom
from eventlet import greenthread from eventlet import greenthread
from eventlet import tpool from eventlet import tpool
from eventlet import semaphore
import IPy import IPy
from nova import context from nova import context
@ -416,6 +416,10 @@ class LibvirtConnection(object):
# the normal xml file, we can just call reboot here # the normal xml file, we can just call reboot here
self.reboot(instance) self.reboot(instance)
@exception.wrap_exception
def poll_rescued_instances(self, timeout):
pass
@exception.wrap_exception @exception.wrap_exception
def spawn(self, instance): def spawn(self, instance):
xml = self.to_xml(instance) xml = self.to_xml(instance)
@ -556,13 +560,12 @@ class LibvirtConnection(object):
os.mkdir(base_dir) os.mkdir(base_dir)
base = os.path.join(base_dir, fname) base = os.path.join(base_dir, fname)
if fname not in LibvirtConnection._image_sems: @utils.synchronized(fname)
LibvirtConnection._image_sems[fname] = semaphore.Semaphore() def call_if_not_exists(base, fn, *args, **kwargs):
with LibvirtConnection._image_sems[fname]:
if not os.path.exists(base): if not os.path.exists(base):
fn(target=base, *args, **kwargs) fn(target=base, *args, **kwargs)
if not LibvirtConnection._image_sems[fname].locked():
del LibvirtConnection._image_sems[fname] call_if_not_exists(base, fn, *args, **kwargs)
if cow: if cow:
utils.execute('qemu-img', 'create', '-f', 'qcow2', '-o', utils.execute('qemu-img', 'create', '-f', 'qcow2', '-o',
@ -1780,14 +1783,14 @@ class IptablesFirewallDriver(FirewallDriver):
pass pass
def refresh_security_group_rules(self, security_group): def refresh_security_group_rules(self, security_group):
# We use the semaphore to make sure noone applies the rule set self.do_refresh_security_group_rules(security_group)
# after we've yanked the existing rules but before we've put in self.iptables.apply()
# the new ones.
with self.iptables.semaphore: @utils.synchronized('iptables', external=True)
def do_refresh_security_group_rules(self, security_group):
for instance in self.instances.values(): for instance in self.instances.values():
self.remove_filters_for_instance(instance) self.remove_filters_for_instance(instance)
self.add_filters_for_instance(instance) self.add_filters_for_instance(instance)
self.iptables.apply()
def _security_group_chain_name(self, security_group_id): def _security_group_chain_name(self, security_group_id):
return 'nova-sg-%s' % (security_group_id,) return 'nova-sg-%s' % (security_group_id,)

View File

@ -51,6 +51,7 @@ class VMOps(object):
def __init__(self, session): def __init__(self, session):
self.XenAPI = session.get_imported_xenapi() self.XenAPI = session.get_imported_xenapi()
self._session = session self._session = session
self.poll_rescue_last_ran = None
VMHelper.XenAPI = self.XenAPI VMHelper.XenAPI = self.XenAPI
@ -63,6 +64,17 @@ class VMOps(object):
vm_refs.append(vm_rec["name_label"]) vm_refs.append(vm_rec["name_label"])
return vm_refs return vm_refs
def revert_resize(self, instance):
vm_ref = VMHelper.lookup(self._session, instance.name)
self._start(instance, vm_ref)
def finish_resize(self, instance, disk_info):
vdi_uuid = self.link_disks(instance, disk_info['base_copy'],
disk_info['cow'])
vm_ref = self._create_vm(instance, vdi_uuid)
self.resize_instance(instance, vdi_uuid)
self._spawn(instance, vm_ref)
def _start(self, instance, vm_ref=None): def _start(self, instance, vm_ref=None):
"""Power on a VM instance""" """Power on a VM instance"""
if not vm_ref: if not vm_ref:
@ -73,7 +85,7 @@ class VMOps(object):
LOG.debug(_("Starting instance %s"), instance.name) LOG.debug(_("Starting instance %s"), instance.name)
self._session.call_xenapi('VM.start', vm_ref, False, False) self._session.call_xenapi('VM.start', vm_ref, False, False)
def create_disk(self, instance): def _create_disk(self, instance):
user = AuthManager().get_user(instance.user_id) user = AuthManager().get_user(instance.user_id)
project = AuthManager().get_project(instance.project_id) project = AuthManager().get_project(instance.project_id)
disk_image_type = VMHelper.determine_disk_image_type(instance) disk_image_type = VMHelper.determine_disk_image_type(instance)
@ -82,10 +94,11 @@ class VMOps(object):
return vdi_uuid return vdi_uuid
def spawn(self, instance, network_info=None): def spawn(self, instance, network_info=None):
vdi_uuid = self.create_disk(instance) vdi_uuid = self._create_disk(instance)
self._spawn_with_disk(instance, vdi_uuid, network_info) vm_ref = self._create_vm(instance, vdi_uuid, network_info)
self._spawn(instance, vm_ref)
def _spawn_with_disk(self, instance, vdi_uuid, network_info=None): def _create_vm(self, instance, vdi_uuid, network_info=None):
"""Create VM instance""" """Create VM instance"""
instance_name = instance.name instance_name = instance.name
vm_ref = VMHelper.lookup(self._session, instance_name) vm_ref = VMHelper.lookup(self._session, instance_name)
@ -128,16 +141,19 @@ class VMOps(object):
VMHelper.create_vbd(session=self._session, vm_ref=vm_ref, VMHelper.create_vbd(session=self._session, vm_ref=vm_ref,
vdi_ref=vdi_ref, userdevice=0, bootable=True) vdi_ref=vdi_ref, userdevice=0, bootable=True)
# inject_network_info and create vifs
# TODO(tr3buchet) - check to make sure we have network info, otherwise # TODO(tr3buchet) - check to make sure we have network info, otherwise
# create it now. This goes away once nova-multi-nic hits. # create it now. This goes away once nova-multi-nic hits.
if network_info is None: if network_info is None:
network_info = self._get_network_info(instance) network_info = self._get_network_info(instance)
self.create_vifs(vm_ref, network_info) self.create_vifs(vm_ref, network_info)
self.inject_network_info(instance, vm_ref, network_info) self.inject_network_info(instance, vm_ref, network_info)
return vm_ref
def _spawn(self, instance, vm_ref):
"""Spawn a new instance"""
LOG.debug(_('Starting VM %s...'), vm_ref) LOG.debug(_('Starting VM %s...'), vm_ref)
self._start(instance, vm_ref) self._start(instance, vm_ref)
instance_name = instance.name
LOG.info(_('Spawning VM %(instance_name)s created %(vm_ref)s.') LOG.info(_('Spawning VM %(instance_name)s created %(vm_ref)s.')
% locals()) % locals())
@ -310,7 +326,7 @@ class VMOps(object):
try: try:
# transfer the base copy # transfer the base copy
template_vm_ref, template_vdi_uuids = self._get_snapshot(instance) template_vm_ref, template_vdi_uuids = self._get_snapshot(instance)
base_copy_uuid = template_vdi_uuids[1] base_copy_uuid = template_vdi_uuids['image']
vdi_ref, vm_vdi_rec = \ vdi_ref, vm_vdi_rec = \
VMHelper.get_vdi_for_vm_safely(self._session, vm_ref) VMHelper.get_vdi_for_vm_safely(self._session, vm_ref)
cow_uuid = vm_vdi_rec['uuid'] cow_uuid = vm_vdi_rec['uuid']
@ -325,7 +341,7 @@ class VMOps(object):
self._session.wait_for_task(task, instance.id) self._session.wait_for_task(task, instance.id)
# Now power down the instance and transfer the COW VHD # Now power down the instance and transfer the COW VHD
self._shutdown(instance, vm_ref, method='clean') self._shutdown(instance, vm_ref, hard=False)
params = {'host': dest, params = {'host': dest,
'vdi_uuid': cow_uuid, 'vdi_uuid': cow_uuid,
@ -345,7 +361,7 @@ class VMOps(object):
# sensible so we don't need to blindly pass around dictionaries # sensible so we don't need to blindly pass around dictionaries
return {'base_copy': base_copy_uuid, 'cow': cow_uuid} return {'base_copy': base_copy_uuid, 'cow': cow_uuid}
def attach_disk(self, instance, base_copy_uuid, cow_uuid): def link_disks(self, instance, base_copy_uuid, cow_uuid):
"""Links the base copy VHD to the COW via the XAPI plugin""" """Links the base copy VHD to the COW via the XAPI plugin"""
vm_ref = VMHelper.lookup(self._session, instance.name) vm_ref = VMHelper.lookup(self._session, instance.name)
new_base_copy_uuid = str(uuid.uuid4()) new_base_copy_uuid = str(uuid.uuid4())
@ -366,9 +382,19 @@ class VMOps(object):
return new_cow_uuid return new_cow_uuid
def resize(self, instance, flavor): def resize_instance(self, instance, vdi_uuid):
"""Resize a running instance by changing it's RAM and disk size """ """Resize a running instance by changing it's RAM and disk size """
raise NotImplementedError() #TODO(mdietz): this will need to be adjusted for swap later
#The new disk size must be in bytes
new_disk_size = str(instance.local_gb * 1024 * 1024 * 1024)
instance_name = instance.name
instance_local_gb = instance.local_gb
LOG.debug(_("Resizing VDI %(vdi_uuid)s for instance %(instance_name)s."
" Expanding to %(instance_local_gb)d GB") % locals())
vdi_ref = self._session.call_xenapi('VDI.get_by_uuid', vdi_uuid)
self._session.call_xenapi('VDI.resize_online', vdi_ref, new_disk_size)
LOG.debug(_("Resize instance %s complete") % (instance.name))
def reboot(self, instance): def reboot(self, instance):
"""Reboot VM instance""" """Reboot VM instance"""
@ -443,8 +469,9 @@ class VMOps(object):
"""Shutdown an instance""" """Shutdown an instance"""
state = self.get_info(instance['name'])['state'] state = self.get_info(instance['name'])['state']
if state == power_state.SHUTDOWN: if state == power_state.SHUTDOWN:
LOG.warn(_("VM %(vm)s already halted, skipping shutdown...") % instance_name = instance.name
locals()) LOG.warn(_("VM %(instance_name)s already halted,"
"skipping shutdown...") % locals())
return return
instance_id = instance.id instance_id = instance.id
@ -462,6 +489,10 @@ class VMOps(object):
except self.XenAPI.Failure, exc: except self.XenAPI.Failure, exc:
LOG.exception(exc) LOG.exception(exc)
def _shutdown_rescue(self, rescue_vm_ref):
"""Shutdown a rescue instance"""
self._session.call_xenapi("Async.VM.hard_shutdown", rescue_vm_ref)
def _destroy_vdis(self, instance, vm_ref): def _destroy_vdis(self, instance, vm_ref):
"""Destroys all VDIs associated with a VM""" """Destroys all VDIs associated with a VM"""
instance_id = instance.id instance_id = instance.id
@ -479,6 +510,24 @@ class VMOps(object):
except self.XenAPI.Failure, exc: except self.XenAPI.Failure, exc:
LOG.exception(exc) LOG.exception(exc)
def _destroy_rescue_vdis(self, rescue_vm_ref):
"""Destroys all VDIs associated with a rescued VM"""
vdi_refs = VMHelper.lookup_vm_vdis(self._session, rescue_vm_ref)
for vdi_ref in vdi_refs:
try:
self._session.call_xenapi("Async.VDI.destroy", vdi_ref)
except self.XenAPI.Failure:
continue
def _destroy_rescue_vbds(self, rescue_vm_ref):
"""Destroys all VBDs tied to a rescue VM"""
vbd_refs = self._session.get_xenapi().VM.get_VBDs(rescue_vm_ref)
for vbd_ref in vbd_refs:
vbd_rec = self._session.get_xenapi().VBD.get_record(vbd_ref)
if vbd_rec["userdevice"] == "1": # primary VBD is always 1
VMHelper.unplug_vbd(self._session, vbd_ref)
VMHelper.destroy_vbd(self._session, vbd_ref)
def _destroy_kernel_ramdisk(self, instance, vm_ref): def _destroy_kernel_ramdisk(self, instance, vm_ref):
""" """
Three situations can occur: Three situations can occur:
@ -529,6 +578,14 @@ class VMOps(object):
LOG.debug(_("Instance %(instance_id)s VM destroyed") % locals()) LOG.debug(_("Instance %(instance_id)s VM destroyed") % locals())
def _destroy_rescue_instance(self, rescue_vm_ref):
"""Destroy a rescue instance"""
self._destroy_rescue_vbds(rescue_vm_ref)
self._shutdown_rescue(rescue_vm_ref)
self._destroy_rescue_vdis(rescue_vm_ref)
self._session.call_xenapi("Async.VM.destroy", rescue_vm_ref)
def destroy(self, instance): def destroy(self, instance):
""" """
Destroy VM instance Destroy VM instance
@ -639,34 +696,50 @@ class VMOps(object):
"Instance is not in Rescue Mode: %s" % instance.name)) "Instance is not in Rescue Mode: %s" % instance.name))
original_vm_ref = self._get_vm_opaque_ref(instance) original_vm_ref = self._get_vm_opaque_ref(instance)
vbd_refs = self._session.get_xenapi().VM.get_VBDs(rescue_vm_ref)
instance._rescue = False instance._rescue = False
for vbd_ref in vbd_refs: self._destroy_rescue_instance(rescue_vm_ref)
_vbd_ref = self._session.get_xenapi().VBD.get_record(vbd_ref)
if _vbd_ref["userdevice"] == "1":
VMHelper.unplug_vbd(self._session, vbd_ref)
VMHelper.destroy_vbd(self._session, vbd_ref)
task1 = self._session.call_xenapi("Async.VM.hard_shutdown",
rescue_vm_ref)
self._session.wait_for_task(task1, instance.id)
vdi_refs = VMHelper.lookup_vm_vdis(self._session, rescue_vm_ref)
for vdi_ref in vdi_refs:
try:
task = self._session.call_xenapi('Async.VDI.destroy', vdi_ref)
self._session.wait_for_task(task, instance.id)
except self.XenAPI.Failure:
continue
task2 = self._session.call_xenapi('Async.VM.destroy', rescue_vm_ref)
self._session.wait_for_task(task2, instance.id)
self._release_bootlock(original_vm_ref) self._release_bootlock(original_vm_ref)
self._start(instance, original_vm_ref) self._start(instance, original_vm_ref)
def poll_rescued_instances(self, timeout):
"""Look for expirable rescued instances
- forcibly exit rescue mode for any instances that have been
in rescue mode for >= the provided timeout
"""
last_ran = self.poll_rescue_last_ran
if last_ran:
if not utils.is_older_than(last_ran, timeout):
# Do not run. Let's bail.
return
else:
# Update the time tracker and proceed.
self.poll_rescue_last_ran = utils.utcnow()
else:
# We need a base time to start tracking.
self.poll_rescue_last_ran = utils.utcnow()
return
rescue_vms = []
for instance in self.list_instances():
if instance.endswith("-rescue"):
rescue_vms.append(dict(name=instance,
vm_ref=VMHelper.lookup(self._session,
instance)))
for vm in rescue_vms:
rescue_name = vm["name"]
rescue_vm_ref = vm["vm_ref"]
self._destroy_rescue_instance(rescue_vm_ref)
original_name = vm["name"].split("-rescue", 1)[0]
original_vm_ref = VMHelper.lookup(self._session, original_name)
self._release_bootlock(original_vm_ref)
self._session.call_xenapi("VM.start", original_vm_ref, False,
False)
def get_info(self, instance): def get_info(self, instance):
"""Return data about VM instance""" """Return data about VM instance"""
vm_ref = self._get_vm_opaque_ref(instance) vm_ref = self._get_vm_opaque_ref(instance)
@ -723,8 +796,9 @@ class VMOps(object):
'mac': instance.mac_address, 'mac': instance.mac_address,
'rxtx_cap': flavor['rxtx_cap'], 'rxtx_cap': flavor['rxtx_cap'],
'dns': [network['dns']], 'dns': [network['dns']],
'ips': [ip_dict(ip) for ip in network_IPs], 'ips': [ip_dict(ip) for ip in network_IPs]}
'ip6s': [ip6_dict(ip) for ip in network_IPs]} if network['cidr_v6']:
info['ip6s'] = [ip6_dict(ip) for ip in network_IPs]
network_info.append((network, info)) network_info.append((network, info))
return network_info return network_info

View File

@ -164,20 +164,18 @@ class XenAPIConnection(object):
"""Create VM instance""" """Create VM instance"""
self._vmops.spawn(instance) self._vmops.spawn(instance)
def revert_resize(self, instance):
"""Reverts a resize, powering back on the instance"""
self._vmops.revert_resize(instance)
def finish_resize(self, instance, disk_info): def finish_resize(self, instance, disk_info):
"""Completes a resize, turning on the migrated instance""" """Completes a resize, turning on the migrated instance"""
vdi_uuid = self._vmops.attach_disk(instance, disk_info['base_copy'], self._vmops.finish_resize(instance, disk_info)
disk_info['cow'])
self._vmops._spawn_with_disk(instance, vdi_uuid)
def snapshot(self, instance, image_id): def snapshot(self, instance, image_id):
""" Create snapshot from a running VM instance """ """ Create snapshot from a running VM instance """
self._vmops.snapshot(instance, image_id) self._vmops.snapshot(instance, image_id)
def resize(self, instance, flavor):
"""Resize a VM instance"""
raise NotImplementedError()
def reboot(self, instance): def reboot(self, instance):
"""Reboot VM instance""" """Reboot VM instance"""
self._vmops.reboot(instance) self._vmops.reboot(instance)
@ -225,6 +223,10 @@ class XenAPIConnection(object):
"""Unrescue the specified instance""" """Unrescue the specified instance"""
self._vmops.unrescue(instance, callback) self._vmops.unrescue(instance, callback)
def poll_rescued_instances(self, timeout):
"""Poll for rescued instances"""
self._vmops.poll_rescued_instances(timeout)
def reset_network(self, instance): def reset_network(self, instance):
"""reset networking for specified instance""" """reset networking for specified instance"""
self._vmops.reset_network(instance) self._vmops.reset_network(instance)

View File

@ -22,6 +22,7 @@ XenAPI Plugin for transfering data between host nodes
import os import os
import os.path import os.path
import pickle import pickle
import shlex
import shutil import shutil
import subprocess import subprocess
@ -97,7 +98,7 @@ def transfer_vhd(session, args):
logging.debug("Preparing to transmit %s to %s" % (source_path, logging.debug("Preparing to transmit %s to %s" % (source_path,
dest_path)) dest_path))
ssh_cmd = 'ssh -o StrictHostKeyChecking=no' ssh_cmd = '\"ssh -o StrictHostKeyChecking=no\"'
rsync_args = shlex.split('nohup /usr/bin/rsync -av --progress -e %s %s %s' rsync_args = shlex.split('nohup /usr/bin/rsync -av --progress -e %s %s %s'
% (ssh_cmd, source_path, dest_path)) % (ssh_cmd, source_path, dest_path))

View File

@ -32,7 +32,6 @@ SUITE_NAMES = '[image, instance, volume]'
FLAGS = flags.FLAGS FLAGS = flags.FLAGS
flags.DEFINE_string('suite', None, 'Specific test suite to run ' + SUITE_NAMES) flags.DEFINE_string('suite', None, 'Specific test suite to run ' + SUITE_NAMES)
flags.DEFINE_integer('ssh_tries', 3, 'Numer of times to try ssh') flags.DEFINE_integer('ssh_tries', 3, 'Numer of times to try ssh')
boto_v6 = None
class SmokeTestCase(unittest.TestCase): class SmokeTestCase(unittest.TestCase):
@ -183,6 +182,9 @@ class SmokeTestCase(unittest.TestCase):
TEST_DATA = {} TEST_DATA = {}
if FLAGS.use_ipv6:
global boto_v6
boto_v6 = __import__('boto_v6')
class UserSmokeTestCase(SmokeTestCase): class UserSmokeTestCase(SmokeTestCase):