phew ... working
This commit is contained in:
@@ -617,7 +617,7 @@ class VmCommands(object):
|
|||||||
:param host: show all instance on specified host.
|
:param host: show all instance on specified host.
|
||||||
:param instance: show specificed instance.
|
:param instance: show specificed instance.
|
||||||
"""
|
"""
|
||||||
print "%-10s %-15s %-10s %-10s %-19s %-12s %-12s %-12s" \
|
print "%-10s %-15s %-10s %-10s %-26s %-9s %-9s %-9s" \
|
||||||
" %-10s %-10s %-10s %-5s" % (
|
" %-10s %-10s %-10s %-5s" % (
|
||||||
_('instance'),
|
_('instance'),
|
||||||
_('node'),
|
_('node'),
|
||||||
@@ -639,14 +639,14 @@ class VmCommands(object):
|
|||||||
context.get_admin_context(), host)
|
context.get_admin_context(), host)
|
||||||
|
|
||||||
for instance in instances:
|
for instance in instances:
|
||||||
print "%-10s %-15s %-10s %-10s %-19s %-12s %-12s %-12s" \
|
print "%-10s %-15s %-10s %-10s %-26s %-9s %-9s %-9s" \
|
||||||
" %-10s %-10s %-10s %-5d" % (
|
" %-10s %-10s %-10s %-5d" % (
|
||||||
instance['hostname'],
|
instance['hostname'],
|
||||||
instance['host'],
|
instance['host'],
|
||||||
instance['instance_type'],
|
instance['instance_type'].name,
|
||||||
instance['state_description'],
|
instance['state_description'],
|
||||||
instance['launched_at'],
|
instance['launched_at'],
|
||||||
instance['image_id'],
|
instance['image_ref'],
|
||||||
instance['kernel_id'],
|
instance['kernel_id'],
|
||||||
instance['ramdisk_id'],
|
instance['ramdisk_id'],
|
||||||
instance['project_id'],
|
instance['project_id'],
|
||||||
@@ -878,7 +878,7 @@ class InstanceTypeCommands(object):
|
|||||||
try:
|
try:
|
||||||
instance_types.create(name, memory, vcpus, local_gb,
|
instance_types.create(name, memory, vcpus, local_gb,
|
||||||
flavorid, swap, rxtx_quota, rxtx_cap)
|
flavorid, swap, rxtx_quota, rxtx_cap)
|
||||||
except exception.InvalidInput:
|
except exception.InvalidInput, e:
|
||||||
print "Must supply valid parameters to create instance_type"
|
print "Must supply valid parameters to create instance_type"
|
||||||
print e
|
print e
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|||||||
@@ -90,31 +90,67 @@ class Controller(object):
|
|||||||
return webob.exc.HTTPNoContent()
|
return webob.exc.HTTPNoContent()
|
||||||
|
|
||||||
def create(self, req, body):
|
def create(self, req, body):
|
||||||
"""Snapshot a server instance and save the image.
|
"""Snapshot or backup a server instance and save the image.
|
||||||
|
|
||||||
|
Images now have an `image_type` associated with them, which can be
|
||||||
|
'snapshot' or the backup type, like 'daily' or 'weekly'.
|
||||||
|
|
||||||
|
If the image_type is backup-like, then the rotation factor can be
|
||||||
|
included and that will cause the oldest backups that exceed the
|
||||||
|
rotation factor to be deleted.
|
||||||
|
|
||||||
:param req: `wsgi.Request` object
|
:param req: `wsgi.Request` object
|
||||||
"""
|
"""
|
||||||
|
def get_param(param):
|
||||||
|
try:
|
||||||
|
return body["image"][param]
|
||||||
|
except KeyError:
|
||||||
|
raise webob.exc.HTTPBadRequest(explanation="Missing required "
|
||||||
|
"param: %s" % param)
|
||||||
|
|
||||||
context = req.environ['nova.context']
|
context = req.environ['nova.context']
|
||||||
content_type = req.get_content_type()
|
content_type = req.get_content_type()
|
||||||
|
|
||||||
if not body:
|
if not body:
|
||||||
raise webob.exc.HTTPBadRequest()
|
raise webob.exc.HTTPBadRequest()
|
||||||
|
|
||||||
|
image_type = body["image"].get("image_type", "snapshot")
|
||||||
|
|
||||||
try:
|
try:
|
||||||
server_id = self._server_id_from_req(req, body)
|
server_id = self._server_id_from_req(req, body)
|
||||||
image_name = body["image"]["name"]
|
|
||||||
except KeyError:
|
except KeyError:
|
||||||
raise webob.exc.HTTPBadRequest()
|
raise webob.exc.HTTPBadRequest()
|
||||||
|
|
||||||
|
image_name = get_param("name")
|
||||||
props = self._get_extra_properties(req, body)
|
props = self._get_extra_properties(req, body)
|
||||||
|
|
||||||
image = self._compute_service.snapshot(context, server_id,
|
if image_type == "snapshot":
|
||||||
image_name, props)
|
image = self._compute_service.snapshot(
|
||||||
|
context, server_id, image_name,
|
||||||
|
extra_properties=props)
|
||||||
|
elif image_type == "backup":
|
||||||
|
# NOTE(sirp): Unlike snapshot, backup is not a customer facing
|
||||||
|
# API call; rather, it's used by the internal backup scheduler
|
||||||
|
if not FLAGS.allow_admin_api:
|
||||||
|
raise webob.exc.HTTPBadRequest(
|
||||||
|
explanation="Admin API Required")
|
||||||
|
|
||||||
|
backup_type = get_param("backup_type")
|
||||||
|
rotation = int(get_param("rotation"))
|
||||||
|
|
||||||
|
image = self._compute_service.backup(
|
||||||
|
context, server_id, image_name,
|
||||||
|
backup_type, rotation, extra_properties=props)
|
||||||
|
else:
|
||||||
|
LOG.error(_("Invalid image_type '%s' passed") % image_type)
|
||||||
|
raise webob.exc.HTTPBadRequest(explanation="Invalue image_type: "
|
||||||
|
"%s" % image_type)
|
||||||
|
|
||||||
return dict(image=self.get_builder(req).build(image, detail=True))
|
return dict(image=self.get_builder(req).build(image, detail=True))
|
||||||
|
|
||||||
def get_builder(self, request):
|
def get_builder(self, request):
|
||||||
"""Indicates that you must use a Controller subclass."""
|
"""Indicates that you must use a Controller subclass."""
|
||||||
raise NotImplementedError
|
raise NotImplementedError()
|
||||||
|
|
||||||
def _server_id_from_req(self, req, data):
|
def _server_id_from_req(self, req, data):
|
||||||
raise NotImplementedError()
|
raise NotImplementedError()
|
||||||
|
|||||||
@@ -394,4 +394,5 @@ class Resource(wsgi.Application):
|
|||||||
"""Find action-spefic method on controller and call it."""
|
"""Find action-spefic method on controller and call it."""
|
||||||
|
|
||||||
controller_method = getattr(self.controller, action)
|
controller_method = getattr(self.controller, action)
|
||||||
|
print "DISPATCHING", self.controller, action
|
||||||
return controller_method(req=request, **action_args)
|
return controller_method(req=request, **action_args)
|
||||||
|
|||||||
@@ -711,19 +711,60 @@ class API(base.Base):
|
|||||||
raise exception.Error(_("Unable to find host for Instance %s")
|
raise exception.Error(_("Unable to find host for Instance %s")
|
||||||
% instance_id)
|
% instance_id)
|
||||||
|
|
||||||
|
def backup(self, context, instance_id, name, backup_type, rotation,
|
||||||
|
extra_properties=None):
|
||||||
|
"""Backup the given instance
|
||||||
|
|
||||||
|
:param instance_id: nova.db.sqlalchemy.models.Instance.Id
|
||||||
|
:param name: name of the backup or snapshot
|
||||||
|
name = backup_type # daily backups are called 'daily'
|
||||||
|
:param rotation: int representing how many backups to keep around;
|
||||||
|
None if rotation shouldn't be used (as in the case of snapshots)
|
||||||
|
:param extra_properties: dict of extra image properties to include
|
||||||
|
"""
|
||||||
|
recv_meta = self._create_image(context, instance_id, name, 'backup',
|
||||||
|
backup_type=backup_type, rotation=rotation,
|
||||||
|
extra_properties=extra_properties)
|
||||||
|
return recv_meta
|
||||||
|
|
||||||
def snapshot(self, context, instance_id, name, extra_properties=None):
|
def snapshot(self, context, instance_id, name, extra_properties=None):
|
||||||
"""Snapshot the given instance.
|
"""Snapshot the given instance.
|
||||||
|
|
||||||
|
:param instance_id: nova.db.sqlalchemy.models.Instance.Id
|
||||||
|
:param name: name of the backup or snapshot
|
||||||
|
:param extra_properties: dict of extra image properties to include
|
||||||
|
|
||||||
:returns: A dict containing image metadata
|
:returns: A dict containing image metadata
|
||||||
"""
|
"""
|
||||||
properties = {'instance_id': str(instance_id),
|
return self._create_image(context, instance_id, name, 'snapshot',
|
||||||
|
extra_properties=extra_properties)
|
||||||
|
|
||||||
|
def _create_image(self, context, instance_id, name, image_type,
|
||||||
|
backup_type=None, rotation=None, extra_properties=None):
|
||||||
|
"""Create snapshot or backup for an instance on this host.
|
||||||
|
|
||||||
|
:param context: security context
|
||||||
|
:param instance_id: nova.db.sqlalchemy.models.Instance.Id
|
||||||
|
:param name: string for name of the snapshot
|
||||||
|
:param image_type: snapshot | backup
|
||||||
|
:param backup_type: daily | weekly
|
||||||
|
:param rotation: int representing how many backups to keep around;
|
||||||
|
None if rotation shouldn't be used (as in the case of snapshots)
|
||||||
|
:param extra_properties: dict of extra image properties to include
|
||||||
|
|
||||||
|
"""
|
||||||
|
instance = db.api.instance_get(context, instance_id)
|
||||||
|
properties = {'instance_uuid': instance['uuid'],
|
||||||
'user_id': str(context.user_id),
|
'user_id': str(context.user_id),
|
||||||
'image_state': 'creating'}
|
'image_state': 'creating',
|
||||||
|
'image_type': image_type,
|
||||||
|
'backup_type': backup_type}
|
||||||
properties.update(extra_properties or {})
|
properties.update(extra_properties or {})
|
||||||
sent_meta = {'name': name, 'is_public': False,
|
sent_meta = {'name': name, 'is_public': False,
|
||||||
'status': 'creating', 'properties': properties}
|
'status': 'creating', 'properties': properties}
|
||||||
recv_meta = self.image_service.create(context, sent_meta)
|
recv_meta = self.image_service.create(context, sent_meta)
|
||||||
params = {'image_id': recv_meta['id']}
|
params = {'image_id': recv_meta['id'], 'image_type': image_type,
|
||||||
|
'backup_type': backup_type, 'rotation': rotation}
|
||||||
self._cast_compute_message('snapshot_instance', context, instance_id,
|
self._cast_compute_message('snapshot_instance', context, instance_id,
|
||||||
params=params)
|
params=params)
|
||||||
return recv_meta
|
return recv_meta
|
||||||
|
|||||||
@@ -46,6 +46,7 @@ from eventlet import greenthread
|
|||||||
|
|
||||||
from nova import exception
|
from nova import exception
|
||||||
from nova import flags
|
from nova import flags
|
||||||
|
import nova.image
|
||||||
from nova import log as logging
|
from nova import log as logging
|
||||||
from nova import manager
|
from nova import manager
|
||||||
from nova import network
|
from nova import network
|
||||||
@@ -53,7 +54,7 @@ from nova import rpc
|
|||||||
from nova import utils
|
from nova import utils
|
||||||
from nova import volume
|
from nova import volume
|
||||||
from nova.compute import power_state
|
from nova.compute import power_state
|
||||||
from nova.notifier import api as notifier_api
|
from nova.notifier import api as notifier
|
||||||
from nova.compute.utils import terminate_volumes
|
from nova.compute.utils import terminate_volumes
|
||||||
from nova.virt import driver
|
from nova.virt import driver
|
||||||
|
|
||||||
@@ -84,6 +85,10 @@ flags.DEFINE_integer('host_state_interval', 120,
|
|||||||
LOG = logging.getLogger('nova.compute.manager')
|
LOG = logging.getLogger('nova.compute.manager')
|
||||||
|
|
||||||
|
|
||||||
|
def publisher_id(host=None):
|
||||||
|
return notifier.publisher_id("compute", host)
|
||||||
|
|
||||||
|
|
||||||
def checks_instance_lock(function):
|
def checks_instance_lock(function):
|
||||||
"""Decorator to prevent action against locked instances for non-admins."""
|
"""Decorator to prevent action against locked instances for non-admins."""
|
||||||
@functools.wraps(function)
|
@functools.wraps(function)
|
||||||
@@ -112,10 +117,6 @@ def checks_instance_lock(function):
|
|||||||
return decorated_function
|
return decorated_function
|
||||||
|
|
||||||
|
|
||||||
def publisher_id(host=None):
|
|
||||||
return notifier_api.publisher_id("compute", host)
|
|
||||||
|
|
||||||
|
|
||||||
class ComputeManager(manager.SchedulerDependentManager):
|
class ComputeManager(manager.SchedulerDependentManager):
|
||||||
"""Manages the running instances from creation to destruction."""
|
"""Manages the running instances from creation to destruction."""
|
||||||
|
|
||||||
@@ -200,7 +201,7 @@ class ComputeManager(manager.SchedulerDependentManager):
|
|||||||
def get_console_pool_info(self, context, console_type):
|
def get_console_pool_info(self, context, console_type):
|
||||||
return self.driver.get_console_pool_info(console_type)
|
return self.driver.get_console_pool_info(console_type)
|
||||||
|
|
||||||
@exception.wrap_exception(notifier=notifier_api, publisher_id=publisher_id())
|
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
|
||||||
def refresh_security_group_rules(self, context, security_group_id,
|
def refresh_security_group_rules(self, context, security_group_id,
|
||||||
**kwargs):
|
**kwargs):
|
||||||
"""Tell the virtualization driver to refresh security group rules.
|
"""Tell the virtualization driver to refresh security group rules.
|
||||||
@@ -210,7 +211,7 @@ class ComputeManager(manager.SchedulerDependentManager):
|
|||||||
"""
|
"""
|
||||||
return self.driver.refresh_security_group_rules(security_group_id)
|
return self.driver.refresh_security_group_rules(security_group_id)
|
||||||
|
|
||||||
@exception.wrap_exception(notifier=notifier_api, publisher_id=publisher_id())
|
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
|
||||||
def refresh_security_group_members(self, context,
|
def refresh_security_group_members(self, context,
|
||||||
security_group_id, **kwargs):
|
security_group_id, **kwargs):
|
||||||
"""Tell the virtualization driver to refresh security group members.
|
"""Tell the virtualization driver to refresh security group members.
|
||||||
@@ -220,7 +221,7 @@ class ComputeManager(manager.SchedulerDependentManager):
|
|||||||
"""
|
"""
|
||||||
return self.driver.refresh_security_group_members(security_group_id)
|
return self.driver.refresh_security_group_members(security_group_id)
|
||||||
|
|
||||||
@exception.wrap_exception(notifier=notifier_api, publisher_id=publisher_id())
|
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
|
||||||
def refresh_provider_fw_rules(self, context, **_kwargs):
|
def refresh_provider_fw_rules(self, context, **_kwargs):
|
||||||
"""This call passes straight through to the virtualization driver."""
|
"""This call passes straight through to the virtualization driver."""
|
||||||
return self.driver.refresh_provider_fw_rules()
|
return self.driver.refresh_provider_fw_rules()
|
||||||
@@ -349,9 +350,9 @@ class ComputeManager(manager.SchedulerDependentManager):
|
|||||||
self._update_launched_at(context, instance_id)
|
self._update_launched_at(context, instance_id)
|
||||||
self._update_state(context, instance_id)
|
self._update_state(context, instance_id)
|
||||||
usage_info = utils.usage_from_instance(instance_ref)
|
usage_info = utils.usage_from_instance(instance_ref)
|
||||||
notifier_api.notify('compute.%s' % self.host,
|
notifier.notify('compute.%s' % self.host,
|
||||||
'compute.instance.create',
|
'compute.instance.create',
|
||||||
notifier_api.INFO,
|
notifier.INFO,
|
||||||
usage_info)
|
usage_info)
|
||||||
except exception.InstanceNotFound:
|
except exception.InstanceNotFound:
|
||||||
# FIXME(wwolf): We are just ignoring InstanceNotFound
|
# FIXME(wwolf): We are just ignoring InstanceNotFound
|
||||||
@@ -360,11 +361,11 @@ class ComputeManager(manager.SchedulerDependentManager):
|
|||||||
# be fixed once we have no-db-messaging
|
# be fixed once we have no-db-messaging
|
||||||
pass
|
pass
|
||||||
|
|
||||||
@exception.wrap_exception(notifier=notifier_api, publisher_id=publisher_id())
|
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
|
||||||
def run_instance(self, context, instance_id, **kwargs):
|
def run_instance(self, context, instance_id, **kwargs):
|
||||||
self._run_instance(context, instance_id, **kwargs)
|
self._run_instance(context, instance_id, **kwargs)
|
||||||
|
|
||||||
@exception.wrap_exception(notifier=notifier_api, publisher_id=publisher_id())
|
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
|
||||||
@checks_instance_lock
|
@checks_instance_lock
|
||||||
def start_instance(self, context, instance_id):
|
def start_instance(self, context, instance_id):
|
||||||
"""Starting an instance on this host."""
|
"""Starting an instance on this host."""
|
||||||
@@ -426,7 +427,7 @@ class ComputeManager(manager.SchedulerDependentManager):
|
|||||||
if action_str == 'Terminating':
|
if action_str == 'Terminating':
|
||||||
terminate_volumes(self.db, context, instance_id)
|
terminate_volumes(self.db, context, instance_id)
|
||||||
|
|
||||||
@exception.wrap_exception(notifier=notifier_api, publisher_id=publisher_id())
|
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
|
||||||
@checks_instance_lock
|
@checks_instance_lock
|
||||||
def terminate_instance(self, context, instance_id):
|
def terminate_instance(self, context, instance_id):
|
||||||
"""Terminate an instance on this host."""
|
"""Terminate an instance on this host."""
|
||||||
@@ -436,19 +437,19 @@ class ComputeManager(manager.SchedulerDependentManager):
|
|||||||
# TODO(ja): should we keep it in a terminated state for a bit?
|
# TODO(ja): should we keep it in a terminated state for a bit?
|
||||||
self.db.instance_destroy(context, instance_id)
|
self.db.instance_destroy(context, instance_id)
|
||||||
usage_info = utils.usage_from_instance(instance_ref)
|
usage_info = utils.usage_from_instance(instance_ref)
|
||||||
notifier_api.notify('compute.%s' % self.host,
|
notifier.notify('compute.%s' % self.host,
|
||||||
'compute.instance.delete',
|
'compute.instance.delete',
|
||||||
notifier_api.INFO,
|
notifier.INFO,
|
||||||
usage_info)
|
usage_info)
|
||||||
|
|
||||||
@exception.wrap_exception(notifier=notifier_api, publisher_id=publisher_id())
|
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
|
||||||
@checks_instance_lock
|
@checks_instance_lock
|
||||||
def stop_instance(self, context, instance_id):
|
def stop_instance(self, context, instance_id):
|
||||||
"""Stopping an instance on this host."""
|
"""Stopping an instance on this host."""
|
||||||
self._shutdown_instance(context, instance_id, 'Stopping')
|
self._shutdown_instance(context, instance_id, 'Stopping')
|
||||||
# instance state will be updated to stopped by _poll_instance_states()
|
# instance state will be updated to stopped by _poll_instance_states()
|
||||||
|
|
||||||
@exception.wrap_exception(notifier=notifier_api, publisher_id=publisher_id())
|
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
|
||||||
@checks_instance_lock
|
@checks_instance_lock
|
||||||
def rebuild_instance(self, context, instance_id, **kwargs):
|
def rebuild_instance(self, context, instance_id, **kwargs):
|
||||||
"""Destroy and re-make this instance.
|
"""Destroy and re-make this instance.
|
||||||
@@ -478,12 +479,12 @@ class ComputeManager(manager.SchedulerDependentManager):
|
|||||||
self._update_state(context, instance_id)
|
self._update_state(context, instance_id)
|
||||||
usage_info = utils.usage_from_instance(instance_ref,
|
usage_info = utils.usage_from_instance(instance_ref,
|
||||||
image_ref=image_ref)
|
image_ref=image_ref)
|
||||||
notifier_api.notify('compute.%s' % self.host,
|
notifier.notify('compute.%s' % self.host,
|
||||||
'compute.instance.rebuild',
|
'compute.instance.rebuild',
|
||||||
notifier_api.INFO,
|
notifier.INFO,
|
||||||
usage_info)
|
usage_info)
|
||||||
|
|
||||||
@exception.wrap_exception(notifier=notifier_api, publisher_id=publisher_id())
|
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
|
||||||
@checks_instance_lock
|
@checks_instance_lock
|
||||||
def reboot_instance(self, context, instance_id):
|
def reboot_instance(self, context, instance_id):
|
||||||
"""Reboot an instance on this host."""
|
"""Reboot an instance on this host."""
|
||||||
@@ -508,9 +509,20 @@ class ComputeManager(manager.SchedulerDependentManager):
|
|||||||
self.driver.reboot(instance_ref)
|
self.driver.reboot(instance_ref)
|
||||||
self._update_state(context, instance_id)
|
self._update_state(context, instance_id)
|
||||||
|
|
||||||
@exception.wrap_exception(notifier=notifier_api, publisher_id=publisher_id())
|
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
|
||||||
def snapshot_instance(self, context, instance_id, image_id):
|
def snapshot_instance(self, context, instance_id, image_id,
|
||||||
"""Snapshot an instance on this host."""
|
image_type='snapshot', backup_type=None,
|
||||||
|
rotation=None):
|
||||||
|
"""Snapshot an instance on this host.
|
||||||
|
|
||||||
|
:param context: security context
|
||||||
|
:param instance_id: nova.db.sqlalchemy.models.Instance.Id
|
||||||
|
:param image_id: glance.db.sqlalchemy.models.Image.Id
|
||||||
|
:param image_type: snapshot | backup
|
||||||
|
:param backup_type: daily | weekly
|
||||||
|
:param rotation: int representing how many backups to keep around;
|
||||||
|
None if rotation shouldn't be used (as in the case of snapshots)
|
||||||
|
"""
|
||||||
context = context.elevated()
|
context = context.elevated()
|
||||||
instance_ref = self.db.instance_get(context, instance_id)
|
instance_ref = self.db.instance_get(context, instance_id)
|
||||||
|
|
||||||
@@ -530,7 +542,66 @@ class ComputeManager(manager.SchedulerDependentManager):
|
|||||||
|
|
||||||
self.driver.snapshot(instance_ref, image_id)
|
self.driver.snapshot(instance_ref, image_id)
|
||||||
|
|
||||||
@exception.wrap_exception(notifier=notifier_api, publisher_id=publisher_id())
|
if image_type == 'snapshot':
|
||||||
|
if rotation:
|
||||||
|
raise exception.ImageRotationNotAllowed()
|
||||||
|
elif image_type == 'backup':
|
||||||
|
if rotation:
|
||||||
|
instance_uuid = instance_ref['uuid']
|
||||||
|
self.rotate_backups(context, instance_uuid, backup_type,
|
||||||
|
rotation)
|
||||||
|
else:
|
||||||
|
raise exception.RotationRequiredForBackup()
|
||||||
|
else:
|
||||||
|
raise Exception(_('Image type not recognized %s') % image_type)
|
||||||
|
|
||||||
|
def rotate_backups(self, context, instance_uuid, backup_type, rotation):
|
||||||
|
"""Delete excess backups associated to an instance.
|
||||||
|
|
||||||
|
Instances are allowed a fixed number of backups (the rotation number);
|
||||||
|
this method deletes the oldest backups that exceed the rotation
|
||||||
|
threshold.
|
||||||
|
|
||||||
|
:param context: security context
|
||||||
|
:param instance_uuid: string representing uuid of instance
|
||||||
|
:param backup_type: daily | weekly
|
||||||
|
:param rotation: int representing how many backups to keep around;
|
||||||
|
None if rotation shouldn't be used (as in the case of snapshots)
|
||||||
|
"""
|
||||||
|
# NOTE(jk0): Eventually extract this out to the ImageService?
|
||||||
|
def fetch_images():
|
||||||
|
images = []
|
||||||
|
marker = None
|
||||||
|
while True:
|
||||||
|
batch = image_service.detail(context, filters=filters,
|
||||||
|
marker=marker, sort_key='created_at', sort_dir='desc')
|
||||||
|
if not batch:
|
||||||
|
break
|
||||||
|
images += batch
|
||||||
|
marker = batch[-1]['id']
|
||||||
|
return images
|
||||||
|
|
||||||
|
image_service = nova.image.get_default_image_service()
|
||||||
|
filters = {'property-image_type': 'backup',
|
||||||
|
'property-backup_type': backup_type,
|
||||||
|
'property-instance_uuid': instance_uuid}
|
||||||
|
|
||||||
|
images = fetch_images()
|
||||||
|
num_images = len(images)
|
||||||
|
LOG.debug(_("Found %(num_images)d images (rotation: %(rotation)d)"
|
||||||
|
% locals()))
|
||||||
|
if num_images > rotation:
|
||||||
|
# NOTE(sirp): this deletes all backups that exceed the rotation
|
||||||
|
# limit
|
||||||
|
excess = len(images) - rotation
|
||||||
|
LOG.debug(_("Rotating out %d backups" % excess))
|
||||||
|
for i in xrange(excess):
|
||||||
|
image = images.pop()
|
||||||
|
image_id = image['id']
|
||||||
|
LOG.debug(_("Deleting image %d" % image_id))
|
||||||
|
image_service.delete(context, image_id)
|
||||||
|
|
||||||
|
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
|
||||||
@checks_instance_lock
|
@checks_instance_lock
|
||||||
def set_admin_password(self, context, instance_id, new_pass=None):
|
def set_admin_password(self, context, instance_id, new_pass=None):
|
||||||
"""Set the root/admin password for an instance on this host.
|
"""Set the root/admin password for an instance on this host.
|
||||||
@@ -578,7 +649,7 @@ class ComputeManager(manager.SchedulerDependentManager):
|
|||||||
time.sleep(1)
|
time.sleep(1)
|
||||||
continue
|
continue
|
||||||
|
|
||||||
@exception.wrap_exception(notifier=notifier_api, publisher_id=publisher_id())
|
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
|
||||||
@checks_instance_lock
|
@checks_instance_lock
|
||||||
def inject_file(self, context, instance_id, path, file_contents):
|
def inject_file(self, context, instance_id, path, file_contents):
|
||||||
"""Write a file to the specified path in an instance on this host."""
|
"""Write a file to the specified path in an instance on this host."""
|
||||||
@@ -596,7 +667,7 @@ class ComputeManager(manager.SchedulerDependentManager):
|
|||||||
LOG.audit(msg)
|
LOG.audit(msg)
|
||||||
self.driver.inject_file(instance_ref, path, file_contents)
|
self.driver.inject_file(instance_ref, path, file_contents)
|
||||||
|
|
||||||
@exception.wrap_exception(notifier=notifier_api, publisher_id=publisher_id())
|
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
|
||||||
@checks_instance_lock
|
@checks_instance_lock
|
||||||
def agent_update(self, context, instance_id, url, md5hash):
|
def agent_update(self, context, instance_id, url, md5hash):
|
||||||
"""Update agent running on an instance on this host."""
|
"""Update agent running on an instance on this host."""
|
||||||
@@ -614,7 +685,7 @@ class ComputeManager(manager.SchedulerDependentManager):
|
|||||||
LOG.audit(msg)
|
LOG.audit(msg)
|
||||||
self.driver.agent_update(instance_ref, url, md5hash)
|
self.driver.agent_update(instance_ref, url, md5hash)
|
||||||
|
|
||||||
@exception.wrap_exception(notifier=notifier_api, publisher_id=publisher_id())
|
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
|
||||||
@checks_instance_lock
|
@checks_instance_lock
|
||||||
def rescue_instance(self, context, instance_id):
|
def rescue_instance(self, context, instance_id):
|
||||||
"""Rescue an instance on this host."""
|
"""Rescue an instance on this host."""
|
||||||
@@ -631,7 +702,7 @@ class ComputeManager(manager.SchedulerDependentManager):
|
|||||||
self.driver.rescue(instance_ref, _update_state)
|
self.driver.rescue(instance_ref, _update_state)
|
||||||
self._update_state(context, instance_id)
|
self._update_state(context, instance_id)
|
||||||
|
|
||||||
@exception.wrap_exception(notifier=notifier_api, publisher_id=publisher_id())
|
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
|
||||||
@checks_instance_lock
|
@checks_instance_lock
|
||||||
def unrescue_instance(self, context, instance_id):
|
def unrescue_instance(self, context, instance_id):
|
||||||
"""Rescue an instance on this host."""
|
"""Rescue an instance on this host."""
|
||||||
@@ -652,7 +723,7 @@ class ComputeManager(manager.SchedulerDependentManager):
|
|||||||
"""Update instance state when async task completes."""
|
"""Update instance state when async task completes."""
|
||||||
self._update_state(context, instance_id)
|
self._update_state(context, instance_id)
|
||||||
|
|
||||||
@exception.wrap_exception(notifier=notifier_api, publisher_id=publisher_id())
|
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
|
||||||
@checks_instance_lock
|
@checks_instance_lock
|
||||||
def confirm_resize(self, context, instance_id, migration_id):
|
def confirm_resize(self, context, instance_id, migration_id):
|
||||||
"""Destroys the source instance."""
|
"""Destroys the source instance."""
|
||||||
@@ -660,12 +731,12 @@ class ComputeManager(manager.SchedulerDependentManager):
|
|||||||
instance_ref = self.db.instance_get(context, instance_id)
|
instance_ref = self.db.instance_get(context, instance_id)
|
||||||
self.driver.destroy(instance_ref)
|
self.driver.destroy(instance_ref)
|
||||||
usage_info = utils.usage_from_instance(instance_ref)
|
usage_info = utils.usage_from_instance(instance_ref)
|
||||||
notifier_api.notify('compute.%s' % self.host,
|
notifier.notify('compute.%s' % self.host,
|
||||||
'compute.instance.resize.confirm',
|
'compute.instance.resize.confirm',
|
||||||
notifier_api.INFO,
|
notifier.INFO,
|
||||||
usage_info)
|
usage_info)
|
||||||
|
|
||||||
@exception.wrap_exception(notifier=notifier_api, publisher_id=publisher_id())
|
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
|
||||||
@checks_instance_lock
|
@checks_instance_lock
|
||||||
def revert_resize(self, context, instance_id, migration_id):
|
def revert_resize(self, context, instance_id, migration_id):
|
||||||
"""Destroys the new instance on the destination machine.
|
"""Destroys the new instance on the destination machine.
|
||||||
@@ -687,7 +758,7 @@ class ComputeManager(manager.SchedulerDependentManager):
|
|||||||
'instance_id': instance_id, },
|
'instance_id': instance_id, },
|
||||||
})
|
})
|
||||||
|
|
||||||
@exception.wrap_exception(notifier=notifier_api, publisher_id=publisher_id())
|
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
|
||||||
@checks_instance_lock
|
@checks_instance_lock
|
||||||
def finish_revert_resize(self, context, instance_id, migration_id):
|
def finish_revert_resize(self, context, instance_id, migration_id):
|
||||||
"""Finishes the second half of reverting a resize.
|
"""Finishes the second half of reverting a resize.
|
||||||
@@ -712,12 +783,12 @@ class ComputeManager(manager.SchedulerDependentManager):
|
|||||||
self.db.migration_update(context, migration_id,
|
self.db.migration_update(context, migration_id,
|
||||||
{'status': 'reverted'})
|
{'status': 'reverted'})
|
||||||
usage_info = utils.usage_from_instance(instance_ref)
|
usage_info = utils.usage_from_instance(instance_ref)
|
||||||
notifier_api.notify('compute.%s' % self.host,
|
notifier.notify('compute.%s' % self.host,
|
||||||
'compute.instance.resize.revert',
|
'compute.instance.resize.revert',
|
||||||
notifier_api.INFO,
|
notifier.INFO,
|
||||||
usage_info)
|
usage_info)
|
||||||
|
|
||||||
@exception.wrap_exception(notifier=notifier_api, publisher_id=publisher_id())
|
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
|
||||||
@checks_instance_lock
|
@checks_instance_lock
|
||||||
def prep_resize(self, context, instance_id, flavor_id):
|
def prep_resize(self, context, instance_id, flavor_id):
|
||||||
"""Initiates the process of moving a running instance to another host.
|
"""Initiates the process of moving a running instance to another host.
|
||||||
@@ -755,12 +826,12 @@ class ComputeManager(manager.SchedulerDependentManager):
|
|||||||
usage_info = utils.usage_from_instance(instance_ref,
|
usage_info = utils.usage_from_instance(instance_ref,
|
||||||
new_instance_type=instance_type['name'],
|
new_instance_type=instance_type['name'],
|
||||||
new_instance_type_id=instance_type['id'])
|
new_instance_type_id=instance_type['id'])
|
||||||
notifier_api.notify('compute.%s' % self.host,
|
notifier.notify('compute.%s' % self.host,
|
||||||
'compute.instance.resize.prep',
|
'compute.instance.resize.prep',
|
||||||
notifier_api.INFO,
|
notifier.INFO,
|
||||||
usage_info)
|
usage_info)
|
||||||
|
|
||||||
@exception.wrap_exception(notifier=notifier_api, publisher_id=publisher_id())
|
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
|
||||||
@checks_instance_lock
|
@checks_instance_lock
|
||||||
def resize_instance(self, context, instance_id, migration_id):
|
def resize_instance(self, context, instance_id, migration_id):
|
||||||
"""Starts the migration of a running instance to another host."""
|
"""Starts the migration of a running instance to another host."""
|
||||||
@@ -786,7 +857,7 @@ class ComputeManager(manager.SchedulerDependentManager):
|
|||||||
'instance_id': instance_id,
|
'instance_id': instance_id,
|
||||||
'disk_info': disk_info}})
|
'disk_info': disk_info}})
|
||||||
|
|
||||||
@exception.wrap_exception(notifier=notifier_api, publisher_id=publisher_id())
|
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
|
||||||
@checks_instance_lock
|
@checks_instance_lock
|
||||||
def finish_resize(self, context, instance_id, migration_id, disk_info):
|
def finish_resize(self, context, instance_id, migration_id, disk_info):
|
||||||
"""Completes the migration process.
|
"""Completes the migration process.
|
||||||
@@ -816,7 +887,7 @@ class ComputeManager(manager.SchedulerDependentManager):
|
|||||||
self.db.migration_update(context, migration_id,
|
self.db.migration_update(context, migration_id,
|
||||||
{'status': 'finished', })
|
{'status': 'finished', })
|
||||||
|
|
||||||
@exception.wrap_exception(notifier=notifier_api, publisher_id=publisher_id())
|
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
|
||||||
@checks_instance_lock
|
@checks_instance_lock
|
||||||
def pause_instance(self, context, instance_id):
|
def pause_instance(self, context, instance_id):
|
||||||
"""Pause an instance on this host."""
|
"""Pause an instance on this host."""
|
||||||
@@ -833,7 +904,7 @@ class ComputeManager(manager.SchedulerDependentManager):
|
|||||||
instance_id,
|
instance_id,
|
||||||
result))
|
result))
|
||||||
|
|
||||||
@exception.wrap_exception(notifier=notifier_api, publisher_id=publisher_id())
|
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
|
||||||
@checks_instance_lock
|
@checks_instance_lock
|
||||||
def unpause_instance(self, context, instance_id):
|
def unpause_instance(self, context, instance_id):
|
||||||
"""Unpause a paused instance on this host."""
|
"""Unpause a paused instance on this host."""
|
||||||
@@ -850,7 +921,7 @@ class ComputeManager(manager.SchedulerDependentManager):
|
|||||||
instance_id,
|
instance_id,
|
||||||
result))
|
result))
|
||||||
|
|
||||||
@exception.wrap_exception(notifier=notifier_api, publisher_id=publisher_id())
|
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
|
||||||
def get_diagnostics(self, context, instance_id):
|
def get_diagnostics(self, context, instance_id):
|
||||||
"""Retrieve diagnostics for an instance on this host."""
|
"""Retrieve diagnostics for an instance on this host."""
|
||||||
instance_ref = self.db.instance_get(context, instance_id)
|
instance_ref = self.db.instance_get(context, instance_id)
|
||||||
@@ -859,7 +930,7 @@ class ComputeManager(manager.SchedulerDependentManager):
|
|||||||
context=context)
|
context=context)
|
||||||
return self.driver.get_diagnostics(instance_ref)
|
return self.driver.get_diagnostics(instance_ref)
|
||||||
|
|
||||||
@exception.wrap_exception(notifier=notifier_api, publisher_id=publisher_id())
|
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
|
||||||
@checks_instance_lock
|
@checks_instance_lock
|
||||||
def suspend_instance(self, context, instance_id):
|
def suspend_instance(self, context, instance_id):
|
||||||
"""Suspend the given instance."""
|
"""Suspend the given instance."""
|
||||||
@@ -875,7 +946,7 @@ class ComputeManager(manager.SchedulerDependentManager):
|
|||||||
instance_id,
|
instance_id,
|
||||||
result))
|
result))
|
||||||
|
|
||||||
@exception.wrap_exception(notifier=notifier_api, publisher_id=publisher_id())
|
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
|
||||||
@checks_instance_lock
|
@checks_instance_lock
|
||||||
def resume_instance(self, context, instance_id):
|
def resume_instance(self, context, instance_id):
|
||||||
"""Resume the given suspended instance."""
|
"""Resume the given suspended instance."""
|
||||||
@@ -891,7 +962,7 @@ class ComputeManager(manager.SchedulerDependentManager):
|
|||||||
instance_id,
|
instance_id,
|
||||||
result))
|
result))
|
||||||
|
|
||||||
@exception.wrap_exception(notifier=notifier_api, publisher_id=publisher_id())
|
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
|
||||||
def lock_instance(self, context, instance_id):
|
def lock_instance(self, context, instance_id):
|
||||||
"""Lock the given instance."""
|
"""Lock the given instance."""
|
||||||
context = context.elevated()
|
context = context.elevated()
|
||||||
@@ -899,7 +970,7 @@ class ComputeManager(manager.SchedulerDependentManager):
|
|||||||
LOG.debug(_('instance %s: locking'), instance_id, context=context)
|
LOG.debug(_('instance %s: locking'), instance_id, context=context)
|
||||||
self.db.instance_update(context, instance_id, {'locked': True})
|
self.db.instance_update(context, instance_id, {'locked': True})
|
||||||
|
|
||||||
@exception.wrap_exception(notifier=notifier_api, publisher_id=publisher_id())
|
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
|
||||||
def unlock_instance(self, context, instance_id):
|
def unlock_instance(self, context, instance_id):
|
||||||
"""Unlock the given instance."""
|
"""Unlock the given instance."""
|
||||||
context = context.elevated()
|
context = context.elevated()
|
||||||
@@ -907,7 +978,7 @@ class ComputeManager(manager.SchedulerDependentManager):
|
|||||||
LOG.debug(_('instance %s: unlocking'), instance_id, context=context)
|
LOG.debug(_('instance %s: unlocking'), instance_id, context=context)
|
||||||
self.db.instance_update(context, instance_id, {'locked': False})
|
self.db.instance_update(context, instance_id, {'locked': False})
|
||||||
|
|
||||||
@exception.wrap_exception(notifier=notifier_api, publisher_id=publisher_id())
|
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
|
||||||
def get_lock(self, context, instance_id):
|
def get_lock(self, context, instance_id):
|
||||||
"""Return the boolean state of the given instance's lock."""
|
"""Return the boolean state of the given instance's lock."""
|
||||||
context = context.elevated()
|
context = context.elevated()
|
||||||
@@ -934,7 +1005,7 @@ class ComputeManager(manager.SchedulerDependentManager):
|
|||||||
context=context)
|
context=context)
|
||||||
self.driver.inject_network_info(instance_ref)
|
self.driver.inject_network_info(instance_ref)
|
||||||
|
|
||||||
@exception.wrap_exception(notifier=notifier_api, publisher_id=publisher_id())
|
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
|
||||||
def get_console_output(self, context, instance_id):
|
def get_console_output(self, context, instance_id):
|
||||||
"""Send the console output for the given instance."""
|
"""Send the console output for the given instance."""
|
||||||
context = context.elevated()
|
context = context.elevated()
|
||||||
@@ -944,7 +1015,7 @@ class ComputeManager(manager.SchedulerDependentManager):
|
|||||||
output = self.driver.get_console_output(instance_ref)
|
output = self.driver.get_console_output(instance_ref)
|
||||||
return output.decode('utf-8', 'replace').encode('ascii', 'replace')
|
return output.decode('utf-8', 'replace').encode('ascii', 'replace')
|
||||||
|
|
||||||
@exception.wrap_exception
|
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
|
||||||
def get_ajax_console(self, context, instance_id):
|
def get_ajax_console(self, context, instance_id):
|
||||||
"""Return connection information for an ajax console."""
|
"""Return connection information for an ajax console."""
|
||||||
context = context.elevated()
|
context = context.elevated()
|
||||||
@@ -952,7 +1023,7 @@ class ComputeManager(manager.SchedulerDependentManager):
|
|||||||
instance_ref = self.db.instance_get(context, instance_id)
|
instance_ref = self.db.instance_get(context, instance_id)
|
||||||
return self.driver.get_ajax_console(instance_ref)
|
return self.driver.get_ajax_console(instance_ref)
|
||||||
|
|
||||||
@exception.wrap_exception
|
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
|
||||||
def get_vnc_console(self, context, instance_id):
|
def get_vnc_console(self, context, instance_id):
|
||||||
"""Return connection information for a vnc console."""
|
"""Return connection information for a vnc console."""
|
||||||
context = context.elevated()
|
context = context.elevated()
|
||||||
@@ -1015,7 +1086,7 @@ class ComputeManager(manager.SchedulerDependentManager):
|
|||||||
|
|
||||||
return True
|
return True
|
||||||
|
|
||||||
@exception.wrap_exception
|
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
|
||||||
@checks_instance_lock
|
@checks_instance_lock
|
||||||
def _detach_volume(self, context, instance_id, volume_id, destroy_bdm):
|
def _detach_volume(self, context, instance_id, volume_id, destroy_bdm):
|
||||||
"""Detach a volume from an instance."""
|
"""Detach a volume from an instance."""
|
||||||
@@ -1050,7 +1121,7 @@ class ComputeManager(manager.SchedulerDependentManager):
|
|||||||
"""
|
"""
|
||||||
self.volume_manager.remove_compute_volume(context, volume_id)
|
self.volume_manager.remove_compute_volume(context, volume_id)
|
||||||
|
|
||||||
@exception.wrap_exception
|
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
|
||||||
def compare_cpu(self, context, cpu_info):
|
def compare_cpu(self, context, cpu_info):
|
||||||
"""Checks that the host cpu is compatible with a cpu given by xml.
|
"""Checks that the host cpu is compatible with a cpu given by xml.
|
||||||
|
|
||||||
@@ -1061,7 +1132,7 @@ class ComputeManager(manager.SchedulerDependentManager):
|
|||||||
"""
|
"""
|
||||||
return self.driver.compare_cpu(cpu_info)
|
return self.driver.compare_cpu(cpu_info)
|
||||||
|
|
||||||
@exception.wrap_exception
|
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
|
||||||
def create_shared_storage_test_file(self, context):
|
def create_shared_storage_test_file(self, context):
|
||||||
"""Makes tmpfile under FLAGS.instance_path.
|
"""Makes tmpfile under FLAGS.instance_path.
|
||||||
|
|
||||||
@@ -1081,7 +1152,7 @@ class ComputeManager(manager.SchedulerDependentManager):
|
|||||||
os.close(fd)
|
os.close(fd)
|
||||||
return os.path.basename(tmp_file)
|
return os.path.basename(tmp_file)
|
||||||
|
|
||||||
@exception.wrap_exception
|
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
|
||||||
def check_shared_storage_test_file(self, context, filename):
|
def check_shared_storage_test_file(self, context, filename):
|
||||||
"""Confirms existence of the tmpfile under FLAGS.instances_path.
|
"""Confirms existence of the tmpfile under FLAGS.instances_path.
|
||||||
|
|
||||||
@@ -1093,7 +1164,7 @@ class ComputeManager(manager.SchedulerDependentManager):
|
|||||||
if not os.path.exists(tmp_file):
|
if not os.path.exists(tmp_file):
|
||||||
raise exception.FileNotFound(file_path=tmp_file)
|
raise exception.FileNotFound(file_path=tmp_file)
|
||||||
|
|
||||||
@exception.wrap_exception
|
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
|
||||||
def cleanup_shared_storage_test_file(self, context, filename):
|
def cleanup_shared_storage_test_file(self, context, filename):
|
||||||
"""Removes existence of the tmpfile under FLAGS.instances_path.
|
"""Removes existence of the tmpfile under FLAGS.instances_path.
|
||||||
|
|
||||||
@@ -1104,7 +1175,7 @@ class ComputeManager(manager.SchedulerDependentManager):
|
|||||||
tmp_file = os.path.join(FLAGS.instances_path, filename)
|
tmp_file = os.path.join(FLAGS.instances_path, filename)
|
||||||
os.remove(tmp_file)
|
os.remove(tmp_file)
|
||||||
|
|
||||||
@exception.wrap_exception
|
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
|
||||||
def update_available_resource(self, context):
|
def update_available_resource(self, context):
|
||||||
"""See comments update_resource_info.
|
"""See comments update_resource_info.
|
||||||
|
|
||||||
@@ -1112,6 +1183,7 @@ class ComputeManager(manager.SchedulerDependentManager):
|
|||||||
:returns: See driver.update_available_resource()
|
:returns: See driver.update_available_resource()
|
||||||
|
|
||||||
"""
|
"""
|
||||||
|
print "UPDATE AVAILABLE"
|
||||||
return self.driver.update_available_resource(context, self.host)
|
return self.driver.update_available_resource(context, self.host)
|
||||||
|
|
||||||
def pre_live_migration(self, context, instance_id, time=None):
|
def pre_live_migration(self, context, instance_id, time=None):
|
||||||
@@ -1197,7 +1269,7 @@ class ComputeManager(manager.SchedulerDependentManager):
|
|||||||
{"method": "pre_live_migration",
|
{"method": "pre_live_migration",
|
||||||
"args": {'instance_id': instance_id}})
|
"args": {'instance_id': instance_id}})
|
||||||
|
|
||||||
except Exception, e:
|
except Exception:
|
||||||
msg = _("Pre live migration for %(i_name)s failed at %(dest)s")
|
msg = _("Pre live migration for %(i_name)s failed at %(dest)s")
|
||||||
LOG.error(msg % locals())
|
LOG.error(msg % locals())
|
||||||
self.recover_live_migration(context, instance_ref)
|
self.recover_live_migration(context, instance_ref)
|
||||||
|
|||||||
@@ -56,7 +56,7 @@ class ConsoleProxyManager(manager.Manager):
|
|||||||
def init_host(self):
|
def init_host(self):
|
||||||
self.driver.init_host()
|
self.driver.init_host()
|
||||||
|
|
||||||
@exception.wrap_exception
|
@exception.wrap_exception()
|
||||||
def add_console(self, context, instance_id, password=None,
|
def add_console(self, context, instance_id, password=None,
|
||||||
port=None, **kwargs):
|
port=None, **kwargs):
|
||||||
instance = self.db.instance_get(context, instance_id)
|
instance = self.db.instance_get(context, instance_id)
|
||||||
@@ -83,7 +83,7 @@ class ConsoleProxyManager(manager.Manager):
|
|||||||
self.driver.setup_console(context, console)
|
self.driver.setup_console(context, console)
|
||||||
return console['id']
|
return console['id']
|
||||||
|
|
||||||
@exception.wrap_exception
|
@exception.wrap_exception()
|
||||||
def remove_console(self, context, console_id, **_kwargs):
|
def remove_console(self, context, console_id, **_kwargs):
|
||||||
try:
|
try:
|
||||||
console = self.db.console_get(context, console_id)
|
console = self.db.console_get(context, console_id)
|
||||||
|
|||||||
@@ -77,7 +77,7 @@ class ConsoleVMRCManager(manager.Manager):
|
|||||||
self.driver.setup_console(context, console)
|
self.driver.setup_console(context, console)
|
||||||
return console
|
return console
|
||||||
|
|
||||||
@exception.wrap_exception
|
@exception.wrap_exception()
|
||||||
def add_console(self, context, instance_id, password=None,
|
def add_console(self, context, instance_id, password=None,
|
||||||
port=None, **kwargs):
|
port=None, **kwargs):
|
||||||
"""Adds a console for the instance.
|
"""Adds a console for the instance.
|
||||||
@@ -107,7 +107,7 @@ class ConsoleVMRCManager(manager.Manager):
|
|||||||
instance)
|
instance)
|
||||||
return console['id']
|
return console['id']
|
||||||
|
|
||||||
@exception.wrap_exception
|
@exception.wrap_exception()
|
||||||
def remove_console(self, context, console_id, **_kwargs):
|
def remove_console(self, context, console_id, **_kwargs):
|
||||||
"""Removes a console entry."""
|
"""Removes a console entry."""
|
||||||
try:
|
try:
|
||||||
|
|||||||
@@ -24,8 +24,9 @@ SHOULD include dedicated exception logging.
|
|||||||
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
from nova import log as logging
|
from functools import wraps
|
||||||
|
|
||||||
|
from nova import log as logging
|
||||||
|
|
||||||
LOG = logging.getLogger('nova.exception')
|
LOG = logging.getLogger('nova.exception')
|
||||||
|
|
||||||
@@ -81,8 +82,16 @@ def wrap_db_error(f):
|
|||||||
_wrap.func_name = f.func_name
|
_wrap.func_name = f.func_name
|
||||||
|
|
||||||
|
|
||||||
def wrap_exception(f, notifier=None, publisher_id=None, event_type=None, level=None):
|
def wrap_exception(notifier=None, publisher_id=None, event_type=None, level=None):
|
||||||
def _wrap(*args, **kw):
|
"""This decorator wraps a method to catch any exceptions that may
|
||||||
|
get thrown. It logs the exception as well as optionally sending
|
||||||
|
it to the notification system.
|
||||||
|
"""
|
||||||
|
# TODO(sandy): Find a way to import nova.notifier.api so we don't have
|
||||||
|
# to pass it in as a parameter. Otherwise we get a cyclic import of
|
||||||
|
# nova.notifier.api -> nova.utils -> nova.exception :(
|
||||||
|
def inner(f):
|
||||||
|
def wrapped(*args, **kw):
|
||||||
try:
|
try:
|
||||||
return f(*args, **kw)
|
return f(*args, **kw)
|
||||||
except Exception, e:
|
except Exception, e:
|
||||||
@@ -90,10 +99,21 @@ def wrap_exception(f, notifier=None, publisher_id=None, event_type=None, level=N
|
|||||||
payload = dict(args=args, exception=e)
|
payload = dict(args=args, exception=e)
|
||||||
payload.update(kw)
|
payload.update(kw)
|
||||||
|
|
||||||
if not level:
|
# Use a temp vars so we don't shadow
|
||||||
level = notifier.ERROR
|
# our outer definitions.
|
||||||
|
temp_level = level
|
||||||
|
if not temp_level:
|
||||||
|
temp_level = notifier.ERROR
|
||||||
|
|
||||||
notifier.safe_notify(publisher_id, event_type, level, payload)
|
temp_type = event_type
|
||||||
|
if not temp_type:
|
||||||
|
# If f has multiple decorators, they must use
|
||||||
|
# functools.wraps to ensure the name is
|
||||||
|
# propagated.
|
||||||
|
temp_type = f.__name__
|
||||||
|
|
||||||
|
notifier.safe_notify(publisher_id, temp_type, temp_level,
|
||||||
|
payload)
|
||||||
|
|
||||||
if not isinstance(e, Error):
|
if not isinstance(e, Error):
|
||||||
#exc_type, exc_value, exc_traceback = sys.exc_info()
|
#exc_type, exc_value, exc_traceback = sys.exc_info()
|
||||||
@@ -101,8 +121,9 @@ def wrap_exception(f, notifier=None, publisher_id=None, event_type=None, level=N
|
|||||||
#logging.error(traceback.extract_stack(exc_traceback))
|
#logging.error(traceback.extract_stack(exc_traceback))
|
||||||
raise Error(str(e))
|
raise Error(str(e))
|
||||||
raise
|
raise
|
||||||
_wrap.func_name = f.func_name
|
|
||||||
return _wrap
|
return wraps(f)(wrapped)
|
||||||
|
return inner
|
||||||
|
|
||||||
|
|
||||||
class NovaException(Exception):
|
class NovaException(Exception):
|
||||||
@@ -567,6 +588,14 @@ class GlobalRoleNotAllowed(NotAllowed):
|
|||||||
message = _("Unable to use global role %(role_id)s")
|
message = _("Unable to use global role %(role_id)s")
|
||||||
|
|
||||||
|
|
||||||
|
class ImageRotationNotAllowed(NovaException):
|
||||||
|
message = _("Rotation is not allowed for snapshots")
|
||||||
|
|
||||||
|
|
||||||
|
class RotationRequiredForBackup(NovaException):
|
||||||
|
message = _("Rotation param is required for backup image_type")
|
||||||
|
|
||||||
|
|
||||||
#TODO(bcwaldon): EOL this exception!
|
#TODO(bcwaldon): EOL this exception!
|
||||||
class Duplicate(NovaException):
|
class Duplicate(NovaException):
|
||||||
pass
|
pass
|
||||||
|
|||||||
@@ -17,7 +17,9 @@ import uuid
|
|||||||
|
|
||||||
from nova import flags
|
from nova import flags
|
||||||
from nova import utils
|
from nova import utils
|
||||||
|
from nova import log as logging
|
||||||
|
|
||||||
|
LOG = logging.getLogger('nova.exception')
|
||||||
|
|
||||||
FLAGS = flags.FLAGS
|
FLAGS = flags.FLAGS
|
||||||
|
|
||||||
@@ -47,9 +49,10 @@ def safe_notify(publisher_id, event_type, priority, payload):
|
|||||||
try:
|
try:
|
||||||
notify(publisher_id, event_type, notification_level, payload)
|
notify(publisher_id, event_type, notification_level, payload)
|
||||||
except Exception, e:
|
except Exception, e:
|
||||||
LOG.exception(_("Problem '%(e)' attempting to "
|
LOG.exception(_("Problem '%(e)s' attempting to "
|
||||||
"send to notification system." % locals()))
|
"send to notification system." % locals()))
|
||||||
|
|
||||||
|
|
||||||
def notify(publisher_id, event_type, priority, payload):
|
def notify(publisher_id, event_type, priority, payload):
|
||||||
"""
|
"""
|
||||||
Sends a notification using the specified driver
|
Sends a notification using the specified driver
|
||||||
|
|||||||
@@ -219,7 +219,7 @@ class AdapterConsumer(Consumer):
|
|||||||
return
|
return
|
||||||
self.pool.spawn_n(self._process_data, msg_id, ctxt, method, args)
|
self.pool.spawn_n(self._process_data, msg_id, ctxt, method, args)
|
||||||
|
|
||||||
@exception.wrap_exception
|
@exception.wrap_exception()
|
||||||
def _process_data(self, msg_id, ctxt, method, args):
|
def _process_data(self, msg_id, ctxt, method, args):
|
||||||
"""Thread that maigcally looks for a method on the proxy
|
"""Thread that maigcally looks for a method on the proxy
|
||||||
object and calls it.
|
object and calls it.
|
||||||
|
|||||||
@@ -147,6 +147,16 @@ def stub_out_compute_api_snapshot(stubs):
|
|||||||
stubs.Set(nova.compute.API, 'snapshot', snapshot)
|
stubs.Set(nova.compute.API, 'snapshot', snapshot)
|
||||||
|
|
||||||
|
|
||||||
|
def stub_out_compute_api_backup(stubs):
|
||||||
|
def backup(self, context, instance_id, name, backup_type, rotation,
|
||||||
|
extra_properties=None):
|
||||||
|
props = dict(instance_id=instance_id, instance_ref=instance_id,
|
||||||
|
backup_type=backup_type, rotation=rotation)
|
||||||
|
props.update(extra_properties or {})
|
||||||
|
return dict(id='123', status='ACTIVE', name=name, properties=props)
|
||||||
|
stubs.Set(nova.compute.API, 'backup', backup)
|
||||||
|
|
||||||
|
|
||||||
def stub_out_glance_add_image(stubs, sent_to_glance):
|
def stub_out_glance_add_image(stubs, sent_to_glance):
|
||||||
"""
|
"""
|
||||||
We return the metadata sent to glance by modifying the sent_to_glance dict
|
We return the metadata sent to glance by modifying the sent_to_glance dict
|
||||||
|
|||||||
@@ -340,6 +340,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
|
|||||||
self.fixtures = self._make_image_fixtures()
|
self.fixtures = self._make_image_fixtures()
|
||||||
fakes.stub_out_glance(self.stubs, initial_fixtures=self.fixtures)
|
fakes.stub_out_glance(self.stubs, initial_fixtures=self.fixtures)
|
||||||
fakes.stub_out_compute_api_snapshot(self.stubs)
|
fakes.stub_out_compute_api_snapshot(self.stubs)
|
||||||
|
fakes.stub_out_compute_api_backup(self.stubs)
|
||||||
|
|
||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
"""Run after each test."""
|
"""Run after each test."""
|
||||||
@@ -364,10 +365,10 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
|
|||||||
response_list = response_dict["images"]
|
response_list = response_dict["images"]
|
||||||
|
|
||||||
expected = [{'id': 123, 'name': 'public image'},
|
expected = [{'id': 123, 'name': 'public image'},
|
||||||
{'id': 124, 'name': 'queued backup'},
|
{'id': 124, 'name': 'queued snapshot'},
|
||||||
{'id': 125, 'name': 'saving backup'},
|
{'id': 125, 'name': 'saving snapshot'},
|
||||||
{'id': 126, 'name': 'active backup'},
|
{'id': 126, 'name': 'active snapshot'},
|
||||||
{'id': 127, 'name': 'killed backup'},
|
{'id': 127, 'name': 'killed snapshot'},
|
||||||
{'id': 129, 'name': None}]
|
{'id': 129, 'name': None}]
|
||||||
|
|
||||||
self.assertDictListMatch(response_list, expected)
|
self.assertDictListMatch(response_list, expected)
|
||||||
@@ -617,14 +618,14 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
'id': 124,
|
'id': 124,
|
||||||
'name': 'queued backup',
|
'name': 'queued snapshot',
|
||||||
'updated': self.NOW_API_FORMAT,
|
'updated': self.NOW_API_FORMAT,
|
||||||
'created': self.NOW_API_FORMAT,
|
'created': self.NOW_API_FORMAT,
|
||||||
'status': 'QUEUED',
|
'status': 'QUEUED',
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
'id': 125,
|
'id': 125,
|
||||||
'name': 'saving backup',
|
'name': 'saving snapshot',
|
||||||
'updated': self.NOW_API_FORMAT,
|
'updated': self.NOW_API_FORMAT,
|
||||||
'created': self.NOW_API_FORMAT,
|
'created': self.NOW_API_FORMAT,
|
||||||
'status': 'SAVING',
|
'status': 'SAVING',
|
||||||
@@ -632,14 +633,14 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
'id': 126,
|
'id': 126,
|
||||||
'name': 'active backup',
|
'name': 'active snapshot',
|
||||||
'updated': self.NOW_API_FORMAT,
|
'updated': self.NOW_API_FORMAT,
|
||||||
'created': self.NOW_API_FORMAT,
|
'created': self.NOW_API_FORMAT,
|
||||||
'status': 'ACTIVE'
|
'status': 'ACTIVE'
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
'id': 127,
|
'id': 127,
|
||||||
'name': 'killed backup',
|
'name': 'killed snapshot',
|
||||||
'updated': self.NOW_API_FORMAT,
|
'updated': self.NOW_API_FORMAT,
|
||||||
'created': self.NOW_API_FORMAT,
|
'created': self.NOW_API_FORMAT,
|
||||||
'status': 'FAILED',
|
'status': 'FAILED',
|
||||||
@@ -684,7 +685,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
'id': 124,
|
'id': 124,
|
||||||
'name': 'queued backup',
|
'name': 'queued snapshot',
|
||||||
'serverRef': "http://localhost:8774/v1.1/servers/42",
|
'serverRef': "http://localhost:8774/v1.1/servers/42",
|
||||||
'updated': self.NOW_API_FORMAT,
|
'updated': self.NOW_API_FORMAT,
|
||||||
'created': self.NOW_API_FORMAT,
|
'created': self.NOW_API_FORMAT,
|
||||||
@@ -706,7 +707,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
'id': 125,
|
'id': 125,
|
||||||
'name': 'saving backup',
|
'name': 'saving snapshot',
|
||||||
'serverRef': "http://localhost:8774/v1.1/servers/42",
|
'serverRef': "http://localhost:8774/v1.1/servers/42",
|
||||||
'updated': self.NOW_API_FORMAT,
|
'updated': self.NOW_API_FORMAT,
|
||||||
'created': self.NOW_API_FORMAT,
|
'created': self.NOW_API_FORMAT,
|
||||||
@@ -729,7 +730,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
'id': 126,
|
'id': 126,
|
||||||
'name': 'active backup',
|
'name': 'active snapshot',
|
||||||
'serverRef': "http://localhost:8774/v1.1/servers/42",
|
'serverRef': "http://localhost:8774/v1.1/servers/42",
|
||||||
'updated': self.NOW_API_FORMAT,
|
'updated': self.NOW_API_FORMAT,
|
||||||
'created': self.NOW_API_FORMAT,
|
'created': self.NOW_API_FORMAT,
|
||||||
@@ -751,7 +752,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
'id': 127,
|
'id': 127,
|
||||||
'name': 'killed backup',
|
'name': 'killed snapshot',
|
||||||
'serverRef': "http://localhost:8774/v1.1/servers/42",
|
'serverRef': "http://localhost:8774/v1.1/servers/42",
|
||||||
'updated': self.NOW_API_FORMAT,
|
'updated': self.NOW_API_FORMAT,
|
||||||
'created': self.NOW_API_FORMAT,
|
'created': self.NOW_API_FORMAT,
|
||||||
@@ -969,8 +970,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
|
|||||||
self.assertEqual(res.status_int, 404)
|
self.assertEqual(res.status_int, 404)
|
||||||
|
|
||||||
def test_create_image(self):
|
def test_create_image(self):
|
||||||
|
body = dict(image=dict(serverId='123', name='Snapshot 1'))
|
||||||
body = dict(image=dict(serverId='123', name='Backup 1'))
|
|
||||||
req = webob.Request.blank('/v1.0/images')
|
req = webob.Request.blank('/v1.0/images')
|
||||||
req.method = 'POST'
|
req.method = 'POST'
|
||||||
req.body = json.dumps(body)
|
req.body = json.dumps(body)
|
||||||
@@ -978,9 +978,95 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
|
|||||||
response = req.get_response(fakes.wsgi_app())
|
response = req.get_response(fakes.wsgi_app())
|
||||||
self.assertEqual(200, response.status_int)
|
self.assertEqual(200, response.status_int)
|
||||||
|
|
||||||
|
def test_create_snapshot_no_name(self):
|
||||||
|
"""Name is required for snapshots"""
|
||||||
|
body = dict(image=dict(serverId='123'))
|
||||||
|
req = webob.Request.blank('/v1.0/images')
|
||||||
|
req.method = 'POST'
|
||||||
|
req.body = json.dumps(body)
|
||||||
|
req.headers["content-type"] = "application/json"
|
||||||
|
response = req.get_response(fakes.wsgi_app())
|
||||||
|
self.assertEqual(400, response.status_int)
|
||||||
|
|
||||||
|
def test_create_backup_no_name(self):
|
||||||
|
"""Name is also required for backups"""
|
||||||
|
body = dict(image=dict(serverId='123', image_type='backup',
|
||||||
|
backup_type='daily', rotation=1))
|
||||||
|
req = webob.Request.blank('/v1.0/images')
|
||||||
|
req.method = 'POST'
|
||||||
|
req.body = json.dumps(body)
|
||||||
|
req.headers["content-type"] = "application/json"
|
||||||
|
response = req.get_response(fakes.wsgi_app())
|
||||||
|
self.assertEqual(400, response.status_int)
|
||||||
|
|
||||||
|
def test_create_backup_with_rotation_and_backup_type(self):
|
||||||
|
"""The happy path for creating backups
|
||||||
|
|
||||||
|
Creating a backup is an admin-only operation, as opposed to snapshots
|
||||||
|
which are available to anybody.
|
||||||
|
"""
|
||||||
|
# FIXME(sirp): teardown needed?
|
||||||
|
FLAGS.allow_admin_api = True
|
||||||
|
|
||||||
|
# FIXME(sirp): should the fact that backups are admin_only be a FLAG
|
||||||
|
body = dict(image=dict(serverId='123', image_type='backup',
|
||||||
|
name='Backup 1',
|
||||||
|
backup_type='daily', rotation=1))
|
||||||
|
req = webob.Request.blank('/v1.0/images')
|
||||||
|
req.method = 'POST'
|
||||||
|
req.body = json.dumps(body)
|
||||||
|
req.headers["content-type"] = "application/json"
|
||||||
|
response = req.get_response(fakes.wsgi_app())
|
||||||
|
self.assertEqual(200, response.status_int)
|
||||||
|
|
||||||
|
def test_create_backup_no_rotation(self):
|
||||||
|
"""Rotation is required for backup requests"""
|
||||||
|
# FIXME(sirp): teardown needed?
|
||||||
|
FLAGS.allow_admin_api = True
|
||||||
|
|
||||||
|
# FIXME(sirp): should the fact that backups are admin_only be a FLAG
|
||||||
|
body = dict(image=dict(serverId='123', name='daily',
|
||||||
|
image_type='backup', backup_type='daily'))
|
||||||
|
req = webob.Request.blank('/v1.0/images')
|
||||||
|
req.method = 'POST'
|
||||||
|
req.body = json.dumps(body)
|
||||||
|
req.headers["content-type"] = "application/json"
|
||||||
|
response = req.get_response(fakes.wsgi_app())
|
||||||
|
self.assertEqual(400, response.status_int)
|
||||||
|
|
||||||
|
def test_create_backup_no_backup_type(self):
|
||||||
|
"""Backup Type (daily or weekly) is required for backup requests"""
|
||||||
|
# FIXME(sirp): teardown needed?
|
||||||
|
FLAGS.allow_admin_api = True
|
||||||
|
|
||||||
|
# FIXME(sirp): should the fact that backups are admin_only be a FLAG
|
||||||
|
body = dict(image=dict(serverId='123', name='daily',
|
||||||
|
image_type='backup', rotation=1))
|
||||||
|
req = webob.Request.blank('/v1.0/images')
|
||||||
|
req.method = 'POST'
|
||||||
|
req.body = json.dumps(body)
|
||||||
|
req.headers["content-type"] = "application/json"
|
||||||
|
response = req.get_response(fakes.wsgi_app())
|
||||||
|
self.assertEqual(400, response.status_int)
|
||||||
|
|
||||||
|
def test_create_image_with_invalid_image_type(self):
|
||||||
|
"""Valid image_types are snapshot | daily | weekly"""
|
||||||
|
# FIXME(sirp): teardown needed?
|
||||||
|
FLAGS.allow_admin_api = True
|
||||||
|
|
||||||
|
# FIXME(sirp): should the fact that backups are admin_only be a FLAG
|
||||||
|
body = dict(image=dict(serverId='123', image_type='monthly',
|
||||||
|
rotation=1))
|
||||||
|
req = webob.Request.blank('/v1.0/images')
|
||||||
|
req.method = 'POST'
|
||||||
|
req.body = json.dumps(body)
|
||||||
|
req.headers["content-type"] = "application/json"
|
||||||
|
response = req.get_response(fakes.wsgi_app())
|
||||||
|
self.assertEqual(400, response.status_int)
|
||||||
|
|
||||||
def test_create_image_no_server_id(self):
|
def test_create_image_no_server_id(self):
|
||||||
|
|
||||||
body = dict(image=dict(name='Backup 1'))
|
body = dict(image=dict(name='Snapshot 1'))
|
||||||
req = webob.Request.blank('/v1.0/images')
|
req = webob.Request.blank('/v1.0/images')
|
||||||
req.method = 'POST'
|
req.method = 'POST'
|
||||||
req.body = json.dumps(body)
|
req.body = json.dumps(body)
|
||||||
@@ -990,7 +1076,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
|
|||||||
|
|
||||||
def test_create_image_v1_1(self):
|
def test_create_image_v1_1(self):
|
||||||
|
|
||||||
body = dict(image=dict(serverRef='123', name='Backup 1'))
|
body = dict(image=dict(serverRef='123', name='Snapshot 1'))
|
||||||
req = webob.Request.blank('/v1.1/images')
|
req = webob.Request.blank('/v1.1/images')
|
||||||
req.method = 'POST'
|
req.method = 'POST'
|
||||||
req.body = json.dumps(body)
|
req.body = json.dumps(body)
|
||||||
@@ -1024,7 +1110,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
|
|||||||
|
|
||||||
def test_create_image_v1_1_xml_serialization(self):
|
def test_create_image_v1_1_xml_serialization(self):
|
||||||
|
|
||||||
body = dict(image=dict(serverRef='123', name='Backup 1'))
|
body = dict(image=dict(serverRef='123', name='Snapshot 1'))
|
||||||
req = webob.Request.blank('/v1.1/images')
|
req = webob.Request.blank('/v1.1/images')
|
||||||
req.method = 'POST'
|
req.method = 'POST'
|
||||||
req.body = json.dumps(body)
|
req.body = json.dumps(body)
|
||||||
@@ -1038,7 +1124,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
|
|||||||
<image
|
<image
|
||||||
created="None"
|
created="None"
|
||||||
id="123"
|
id="123"
|
||||||
name="Backup 1"
|
name="Snapshot 1"
|
||||||
serverRef="http://localhost/v1.1/servers/123"
|
serverRef="http://localhost/v1.1/servers/123"
|
||||||
status="ACTIVE"
|
status="ACTIVE"
|
||||||
updated="None"
|
updated="None"
|
||||||
@@ -1057,7 +1143,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
|
|||||||
|
|
||||||
def test_create_image_v1_1_no_server_ref(self):
|
def test_create_image_v1_1_no_server_ref(self):
|
||||||
|
|
||||||
body = dict(image=dict(name='Backup 1'))
|
body = dict(image=dict(name='Snapshot 1'))
|
||||||
req = webob.Request.blank('/v1.1/images')
|
req = webob.Request.blank('/v1.1/images')
|
||||||
req.method = 'POST'
|
req.method = 'POST'
|
||||||
req.body = json.dumps(body)
|
req.body = json.dumps(body)
|
||||||
@@ -1084,19 +1170,21 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
|
|||||||
status='active', properties={})
|
status='active', properties={})
|
||||||
image_id += 1
|
image_id += 1
|
||||||
|
|
||||||
# Backup for User 1
|
# Snapshot for User 1
|
||||||
server_ref = 'http://localhost:8774/v1.1/servers/42'
|
server_ref = 'http://localhost:8774/v1.1/servers/42'
|
||||||
backup_properties = {'instance_ref': server_ref, 'user_id': '1'}
|
snapshot_properties = {'instance_ref': server_ref, 'user_id': '1'}
|
||||||
for status in ('queued', 'saving', 'active', 'killed'):
|
for status in ('queued', 'saving', 'active', 'killed'):
|
||||||
add_fixture(id=image_id, name='%s backup' % status,
|
add_fixture(id=image_id, name='%s snapshot' % status,
|
||||||
is_public=False, status=status,
|
is_public=False, status=status,
|
||||||
properties=backup_properties)
|
properties=snapshot_properties)
|
||||||
image_id += 1
|
image_id += 1
|
||||||
|
|
||||||
# Backup for User 2
|
# Snapshot for User 2
|
||||||
other_backup_properties = {'instance_id': '43', 'user_id': '2'}
|
other_snapshot_properties = {'instance_id': '43', 'user_id': '2'}
|
||||||
add_fixture(id=image_id, name='someone elses backup', is_public=False,
|
add_fixture(id=image_id, name='someone elses snapshot',
|
||||||
status='active', properties=other_backup_properties)
|
is_public=False, status='active',
|
||||||
|
properties=other_snapshot_properties)
|
||||||
|
|
||||||
image_id += 1
|
image_id += 1
|
||||||
|
|
||||||
# Image without a name
|
# Image without a name
|
||||||
|
|||||||
@@ -331,7 +331,7 @@ class LibvirtConnection(driver.ComputeDriver):
|
|||||||
if os.path.exists(target):
|
if os.path.exists(target):
|
||||||
shutil.rmtree(target)
|
shutil.rmtree(target)
|
||||||
|
|
||||||
@exception.wrap_exception
|
@exception.wrap_exception()
|
||||||
def attach_volume(self, instance_name, device_path, mountpoint):
|
def attach_volume(self, instance_name, device_path, mountpoint):
|
||||||
virt_dom = self._lookup_by_name(instance_name)
|
virt_dom = self._lookup_by_name(instance_name)
|
||||||
mount_device = mountpoint.rpartition("/")[2]
|
mount_device = mountpoint.rpartition("/")[2]
|
||||||
@@ -375,7 +375,7 @@ class LibvirtConnection(driver.ComputeDriver):
|
|||||||
if doc is not None:
|
if doc is not None:
|
||||||
doc.freeDoc()
|
doc.freeDoc()
|
||||||
|
|
||||||
@exception.wrap_exception
|
@exception.wrap_exception()
|
||||||
def detach_volume(self, instance_name, mountpoint):
|
def detach_volume(self, instance_name, mountpoint):
|
||||||
virt_dom = self._lookup_by_name(instance_name)
|
virt_dom = self._lookup_by_name(instance_name)
|
||||||
mount_device = mountpoint.rpartition("/")[2]
|
mount_device = mountpoint.rpartition("/")[2]
|
||||||
@@ -384,7 +384,7 @@ class LibvirtConnection(driver.ComputeDriver):
|
|||||||
raise exception.DiskNotFound(location=mount_device)
|
raise exception.DiskNotFound(location=mount_device)
|
||||||
virt_dom.detachDevice(xml)
|
virt_dom.detachDevice(xml)
|
||||||
|
|
||||||
@exception.wrap_exception
|
@exception.wrap_exception()
|
||||||
def snapshot(self, instance, image_href):
|
def snapshot(self, instance, image_href):
|
||||||
"""Create snapshot from a running VM instance.
|
"""Create snapshot from a running VM instance.
|
||||||
|
|
||||||
@@ -460,7 +460,7 @@ class LibvirtConnection(driver.ComputeDriver):
|
|||||||
# Clean up
|
# Clean up
|
||||||
shutil.rmtree(temp_dir)
|
shutil.rmtree(temp_dir)
|
||||||
|
|
||||||
@exception.wrap_exception
|
@exception.wrap_exception()
|
||||||
def reboot(self, instance):
|
def reboot(self, instance):
|
||||||
"""Reboot a virtual machine, given an instance reference.
|
"""Reboot a virtual machine, given an instance reference.
|
||||||
|
|
||||||
@@ -501,31 +501,31 @@ class LibvirtConnection(driver.ComputeDriver):
|
|||||||
timer = utils.LoopingCall(_wait_for_reboot)
|
timer = utils.LoopingCall(_wait_for_reboot)
|
||||||
return timer.start(interval=0.5, now=True)
|
return timer.start(interval=0.5, now=True)
|
||||||
|
|
||||||
@exception.wrap_exception
|
@exception.wrap_exception()
|
||||||
def pause(self, instance, callback):
|
def pause(self, instance, callback):
|
||||||
"""Pause VM instance"""
|
"""Pause VM instance"""
|
||||||
dom = self._lookup_by_name(instance.name)
|
dom = self._lookup_by_name(instance.name)
|
||||||
dom.suspend()
|
dom.suspend()
|
||||||
|
|
||||||
@exception.wrap_exception
|
@exception.wrap_exception()
|
||||||
def unpause(self, instance, callback):
|
def unpause(self, instance, callback):
|
||||||
"""Unpause paused VM instance"""
|
"""Unpause paused VM instance"""
|
||||||
dom = self._lookup_by_name(instance.name)
|
dom = self._lookup_by_name(instance.name)
|
||||||
dom.resume()
|
dom.resume()
|
||||||
|
|
||||||
@exception.wrap_exception
|
@exception.wrap_exception()
|
||||||
def suspend(self, instance, callback):
|
def suspend(self, instance, callback):
|
||||||
"""Suspend the specified instance"""
|
"""Suspend the specified instance"""
|
||||||
dom = self._lookup_by_name(instance.name)
|
dom = self._lookup_by_name(instance.name)
|
||||||
dom.managedSave(0)
|
dom.managedSave(0)
|
||||||
|
|
||||||
@exception.wrap_exception
|
@exception.wrap_exception()
|
||||||
def resume(self, instance, callback):
|
def resume(self, instance, callback):
|
||||||
"""resume the specified instance"""
|
"""resume the specified instance"""
|
||||||
dom = self._lookup_by_name(instance.name)
|
dom = self._lookup_by_name(instance.name)
|
||||||
dom.create()
|
dom.create()
|
||||||
|
|
||||||
@exception.wrap_exception
|
@exception.wrap_exception()
|
||||||
def rescue(self, instance):
|
def rescue(self, instance):
|
||||||
"""Loads a VM using rescue images.
|
"""Loads a VM using rescue images.
|
||||||
|
|
||||||
@@ -563,7 +563,7 @@ class LibvirtConnection(driver.ComputeDriver):
|
|||||||
timer = utils.LoopingCall(_wait_for_rescue)
|
timer = utils.LoopingCall(_wait_for_rescue)
|
||||||
return timer.start(interval=0.5, now=True)
|
return timer.start(interval=0.5, now=True)
|
||||||
|
|
||||||
@exception.wrap_exception
|
@exception.wrap_exception()
|
||||||
def unrescue(self, instance):
|
def unrescue(self, instance):
|
||||||
"""Reboot the VM which is being rescued back into primary images.
|
"""Reboot the VM which is being rescued back into primary images.
|
||||||
|
|
||||||
@@ -573,13 +573,13 @@ class LibvirtConnection(driver.ComputeDriver):
|
|||||||
"""
|
"""
|
||||||
self.reboot(instance)
|
self.reboot(instance)
|
||||||
|
|
||||||
@exception.wrap_exception
|
@exception.wrap_exception()
|
||||||
def poll_rescued_instances(self, timeout):
|
def poll_rescued_instances(self, timeout):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
# NOTE(ilyaalekseyev): Implementation like in multinics
|
# NOTE(ilyaalekseyev): Implementation like in multinics
|
||||||
# for xenapi(tr3buchet)
|
# for xenapi(tr3buchet)
|
||||||
@exception.wrap_exception
|
@exception.wrap_exception()
|
||||||
def spawn(self, instance, network_info=None, block_device_mapping=None):
|
def spawn(self, instance, network_info=None, block_device_mapping=None):
|
||||||
xml = self.to_xml(instance, False, network_info=network_info,
|
xml = self.to_xml(instance, False, network_info=network_info,
|
||||||
block_device_mapping=block_device_mapping)
|
block_device_mapping=block_device_mapping)
|
||||||
@@ -642,7 +642,7 @@ class LibvirtConnection(driver.ComputeDriver):
|
|||||||
LOG.info(_('Contents of file %(fpath)s: %(contents)r') % locals())
|
LOG.info(_('Contents of file %(fpath)s: %(contents)r') % locals())
|
||||||
return contents
|
return contents
|
||||||
|
|
||||||
@exception.wrap_exception
|
@exception.wrap_exception()
|
||||||
def get_console_output(self, instance):
|
def get_console_output(self, instance):
|
||||||
console_log = os.path.join(FLAGS.instances_path, instance['name'],
|
console_log = os.path.join(FLAGS.instances_path, instance['name'],
|
||||||
'console.log')
|
'console.log')
|
||||||
@@ -663,7 +663,7 @@ class LibvirtConnection(driver.ComputeDriver):
|
|||||||
|
|
||||||
return self._dump_file(fpath)
|
return self._dump_file(fpath)
|
||||||
|
|
||||||
@exception.wrap_exception
|
@exception.wrap_exception()
|
||||||
def get_ajax_console(self, instance):
|
def get_ajax_console(self, instance):
|
||||||
def get_open_port():
|
def get_open_port():
|
||||||
start_port, end_port = FLAGS.ajaxterm_portrange.split("-")
|
start_port, end_port = FLAGS.ajaxterm_portrange.split("-")
|
||||||
@@ -704,7 +704,7 @@ class LibvirtConnection(driver.ComputeDriver):
|
|||||||
def get_host_ip_addr(self):
|
def get_host_ip_addr(self):
|
||||||
return FLAGS.my_ip
|
return FLAGS.my_ip
|
||||||
|
|
||||||
@exception.wrap_exception
|
@exception.wrap_exception()
|
||||||
def get_vnc_console(self, instance):
|
def get_vnc_console(self, instance):
|
||||||
def get_vnc_port_for_instance(instance_name):
|
def get_vnc_port_for_instance(instance_name):
|
||||||
virt_dom = self._lookup_by_name(instance_name)
|
virt_dom = self._lookup_by_name(instance_name)
|
||||||
|
|||||||
@@ -6,7 +6,8 @@ function usage {
|
|||||||
echo ""
|
echo ""
|
||||||
echo " -V, --virtual-env Always use virtualenv. Install automatically if not present"
|
echo " -V, --virtual-env Always use virtualenv. Install automatically if not present"
|
||||||
echo " -N, --no-virtual-env Don't use virtualenv. Run tests in local environment"
|
echo " -N, --no-virtual-env Don't use virtualenv. Run tests in local environment"
|
||||||
echo " -r, --recreate-db Recreate the test database."
|
echo " -r, --recreate-db Recreate the test database (deprecated, as this is now the default)."
|
||||||
|
echo " -n, --no-recreate-db Don't recreate the test database."
|
||||||
echo " -x, --stop Stop running tests after the first error or failure."
|
echo " -x, --stop Stop running tests after the first error or failure."
|
||||||
echo " -f, --force Force a clean re-build of the virtual environment. Useful when dependencies have been added."
|
echo " -f, --force Force a clean re-build of the virtual environment. Useful when dependencies have been added."
|
||||||
echo " -p, --pep8 Just run pep8"
|
echo " -p, --pep8 Just run pep8"
|
||||||
@@ -25,6 +26,7 @@ function process_option {
|
|||||||
-V|--virtual-env) let always_venv=1; let never_venv=0;;
|
-V|--virtual-env) let always_venv=1; let never_venv=0;;
|
||||||
-N|--no-virtual-env) let always_venv=0; let never_venv=1;;
|
-N|--no-virtual-env) let always_venv=0; let never_venv=1;;
|
||||||
-r|--recreate-db) let recreate_db=1;;
|
-r|--recreate-db) let recreate_db=1;;
|
||||||
|
-n|--no-recreate-db) let recreate_db=0;;
|
||||||
-f|--force) let force=1;;
|
-f|--force) let force=1;;
|
||||||
-p|--pep8) let just_pep8=1;;
|
-p|--pep8) let just_pep8=1;;
|
||||||
-*) noseopts="$noseopts $1";;
|
-*) noseopts="$noseopts $1";;
|
||||||
@@ -41,7 +43,7 @@ noseargs=
|
|||||||
noseopts=
|
noseopts=
|
||||||
wrapper=""
|
wrapper=""
|
||||||
just_pep8=0
|
just_pep8=0
|
||||||
recreate_db=0
|
recreate_db=1
|
||||||
|
|
||||||
for arg in "$@"; do
|
for arg in "$@"; do
|
||||||
process_option $arg
|
process_option $arg
|
||||||
|
|||||||
Reference in New Issue
Block a user