Unbreak start instance and fixes bug 905270

This patch fixes the bug 905270
https://bugs.launchpad.net/nova/+bug/905270

According to EC2 documentation, EBS-instances that initiated shutdown
 result in stopped state.  And then it can be started again.  (On the
 other hand non-EBS instance result in terminted when instance
 initiated shutdown)

However, the current nova case, the shutdowned instance always results
 in terminated status.  As related issues are
 - describe-instance-attribute instance_initiated_shutdown_behavior doesn't
   work correctly
 - instance attribute disable_api_termination isn't supported
 - stop instance was broken by the change set of the following.
   It needs unbreak.

  > commit eb03d47fec
  >  Author: Vishvananda Ishaya <vishvananda@gmail.com>
  >  Date: Fri Sep 23 09:22:32 2011 -0700
  >
  >    Remove AoE, Clean up volume code
  >
  >     * Removes Ata Over Ethernet
  >     * Adds drivers to libvirt for volumes
  >     * Adds initialize_connection and terminate_connection to volume api
  >     * Passes connection info back through volume api
  >
  >  Change-Id: I1b1626f40bebe8466ab410fb174683293c7c474f

This patch
- unbreak start instance
- implement instance_initiated_shutdown_behavior and make it EC2 compatible
- implement disable_api_termination

---
Changes 5 -> 6:
- fixes to catch up 26b7b9457a

Changes 4 -> 5:
- HACKING compilance

Changes 3 -> 4:
- rebased to 4c5586a28f
  sqlalchemy migrate version

Changes 2 -> 3:
- rename long name to shorter one
  s/instance_initiated_shutdown_behavior/shutdown_terminate/g
  s/disable_api_termination/disable_terminate/g
  as suggested Kevin L. Mitchell
- improved nova.api.ec2.cloud._state_description
- pep8
- broken out patches are available for easy review at
  git://github.com/yamahata/nova.git lp905270-2

Changes 1 -> 2:
- fixed an unit test failure pointed out by Mark.
  (I think ebtabls failure strongly suggests installation problem)
- introduce vm_states.SHUTOFF and put instance state which is in
  power_state.{NOSTATE, SHUTOFF} into vm_states.SHUTOFF.
- simplified logic a bit by vm_states.SHUTOFF as suggested by Vish.
- instance_initiated_shutdown_behavior:String(255)
   =>
  instance_initiated_shutdown_terminate:Boolean()
  as suggested by Vish.
- Added Johannes Erdfelt to reviews as they written the vm_states state
  machine checker.
  I'd have liked to add David Subiros either, but he doesn't seem to be a
  registered user of the gerrit.

Change-Id: Ibeb94f65137feadad2c343913b39195e3f96a35e
This commit is contained in:
Isaku Yamahata 2011-12-14 16:15:52 +09:00
parent 6ece432be0
commit 932f3aafd1
17 changed files with 401 additions and 87 deletions

View File

@ -32,8 +32,10 @@ import urllib
from nova.api.ec2 import ec2utils
from nova.compute import instance_types
from nova.api.ec2 import inst_state
from nova import block_device
from nova import compute
from nova.compute import power_state
from nova.compute import vm_states
from nova import crypto
from nova import db
@ -79,26 +81,35 @@ def _gen_key(context, user_id, key_name):
# EC2 API can return the following values as documented in the EC2 API
# http://docs.amazonwebservices.com/AWSEC2/latest/APIReference/
# ApiReference-ItemType-InstanceStateType.html
# pending | running | shutting-down | terminated | stopping | stopped
# pending 0 | running 16 | shutting-down 32 | terminated 48 | stopping 64 |
# stopped 80
_STATE_DESCRIPTION_MAP = {
None: 'pending',
vm_states.ACTIVE: 'running',
vm_states.BUILDING: 'pending',
vm_states.REBUILDING: 'pending',
vm_states.DELETED: 'terminated',
vm_states.SOFT_DELETE: 'terminated',
vm_states.STOPPED: 'stopped',
vm_states.MIGRATING: 'migrate',
vm_states.RESIZING: 'resize',
vm_states.PAUSED: 'pause',
vm_states.SUSPENDED: 'suspend',
vm_states.RESCUED: 'rescue',
None: inst_state.PENDING,
vm_states.ACTIVE: inst_state.RUNNING,
vm_states.BUILDING: inst_state.PENDING,
vm_states.REBUILDING: inst_state.PENDING,
vm_states.DELETED: inst_state.TERMINATED,
vm_states.SOFT_DELETE: inst_state.TERMINATED,
vm_states.STOPPED: inst_state.STOPPED,
vm_states.SHUTOFF: inst_state.SHUTOFF,
vm_states.MIGRATING: inst_state.MIGRATE,
vm_states.RESIZING: inst_state.RESIZE,
vm_states.PAUSED: inst_state.PAUSE,
vm_states.SUSPENDED: inst_state.SUSPEND,
vm_states.RESCUED: inst_state.RESCUE,
}
def state_description_from_vm_state(vm_state):
def _state_description(vm_state, shutdown_terminate):
"""Map the vm state to the server status string"""
return _STATE_DESCRIPTION_MAP.get(vm_state, vm_state)
if (vm_state == vm_states.SHUTOFF and
not shutdown_terminate):
name = inst_state.STOPPED
else:
name = _STATE_DESCRIPTION_MAP.get(vm_state, vm_state)
return {'code': inst_state.name_to_code(name),
'name': name}
def _parse_block_device_mapping(bdm):
@ -987,21 +998,17 @@ class CloudController(object):
tmp['rootDeviceName'], result)
def _format_attr_disable_api_termination(instance, result):
_unsupported_attribute(instance, result)
result['disableApiTermination'] = instance['disable_terminate']
def _format_attr_group_set(instance, result):
CloudController._format_group_set(instance, result)
def _format_attr_instance_initiated_shutdown_behavior(instance,
result):
vm_state = instance['vm_state']
state_to_value = {
vm_states.STOPPED: 'stopped',
vm_states.DELETED: 'terminated',
}
value = state_to_value.get(vm_state)
if value:
result['instanceInitiatedShutdownBehavior'] = value
if instance['shutdown_terminate']:
result['instanceInitiatedShutdownBehavior'] = 'terminate'
else:
result['instanceInitiatedShutdownBehavior'] = 'stop'
def _format_attr_instance_type(instance, result):
self._format_instance_type(instance, result)
@ -1157,9 +1164,8 @@ class CloudController(object):
i['imageId'] = ec2utils.image_ec2_id(image_id)
self._format_kernel_id(context, instance, i, 'kernelId')
self._format_ramdisk_id(context, instance, i, 'ramdiskId')
i['instanceState'] = {
'code': instance['power_state'],
'name': state_description_from_vm_state(instance['vm_state'])}
i['instanceState'] = _state_description(
instance['vm_state'], instance['shutdown_terminate'])
fixed_ip = None
floating_ip = None
@ -1575,10 +1581,11 @@ class CloudController(object):
vm_state = instance['vm_state']
# if the instance is in subtle state, refuse to proceed.
if vm_state not in (vm_states.ACTIVE, vm_states.STOPPED):
if vm_state not in (vm_states.ACTIVE, vm_states.SHUTOFF,
vm_states.STOPPED):
raise exception.InstanceNotRunning(instance_id=ec2_instance_id)
if vm_state == vm_states.ACTIVE:
if vm_state in (vm_states.ACTIVE, vm_states.SHUTOFF):
restart_instance = True
self.compute_api.stop(context, instance_id=instance_id)

View File

@ -0,0 +1,60 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 Isaku Yamahata <yamahata at valinux co jp>
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
PENDING_CODE = 0
RUNNING_CODE = 16
SHUTTING_DOWN_CODE = 32
TERMINATED_CODE = 48
STOPPING_CODE = 64
STOPPED_CODE = 80
PENDING = 'pending'
RUNNING = 'running'
SHUTTING_DOWN = 'shutting-down'
TERMINATED = 'terminated'
STOPPING = 'stopping'
STOPPED = 'stopped'
# non-ec2 value
SHUTOFF = 'shutoff'
MIGRATE = 'migrate'
RESIZE = 'resize'
PAUSE = 'pause'
SUSPEND = 'suspend'
RESCUE = 'rescue'
# EC2 API instance status code
_NAME_TO_CODE = {
PENDING: PENDING_CODE,
RUNNING: RUNNING_CODE,
SHUTTING_DOWN: SHUTTING_DOWN_CODE,
TERMINATED: TERMINATED_CODE,
STOPPING: STOPPING_CODE,
STOPPED: STOPPED_CODE,
# approximation
SHUTOFF: TERMINATED_CODE,
MIGRATE: RUNNING_CODE,
RESIZE: RUNNING_CODE,
PAUSE: STOPPED_CODE,
SUSPEND: STOPPED_CODE,
RESCUE: RUNNING_CODE,
}
def name_to_code(name):
return _NAME_TO_CODE.get(name, PENDING_CODE)

View File

@ -57,6 +57,9 @@ _STATE_MAP = {
vm_states.STOPPED: {
'default': 'STOPPED',
},
vm_states.SHUTOFF: {
'default': 'SHUTOFF',
},
vm_states.MIGRATING: {
'default': 'MIGRATING',
},

View File

@ -483,6 +483,11 @@ class API(base.Base):
updates['vm_state'] = vm_states.BUILDING
updates['task_state'] = task_states.SCHEDULING
if (image['properties'].get('mappings', []) or
image['properties'].get('block_device_mapping', []) or
block_device_mapping):
updates['shutdown_terminate'] = False
instance = self.update(context, instance, **updates)
return instance
@ -771,13 +776,17 @@ class API(base.Base):
rv = self.db.instance_update(context, instance["id"], kwargs)
return dict(rv.iteritems())
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.ERROR])
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.SHUTOFF,
vm_states.ERROR])
@scheduler_api.reroute_compute("soft_delete")
def soft_delete(self, context, instance):
"""Terminate an instance."""
instance_uuid = instance["uuid"]
LOG.debug(_("Going to try to soft delete %s"), instance_uuid)
if instance['disable_terminate']:
return
# NOTE(jerdfelt): The compute daemon handles reclaiming instances
# that are in soft delete. If there is no host assigned, there is
# no daemon to reclaim, so delete it immediately.
@ -812,13 +821,17 @@ class API(base.Base):
# NOTE(jerdfelt): The API implies that only ACTIVE and ERROR are
# allowed but the EC2 API appears to allow from RESCUED and STOPPED
# too
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.ERROR,
vm_states.RESCUED, vm_states.STOPPED])
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.SHUTOFF,
vm_states.ERROR, vm_states.RESCUED,
vm_states.STOPPED])
@scheduler_api.reroute_compute("delete")
def delete(self, context, instance):
"""Terminate an instance."""
LOG.debug(_("Going to try to terminate %s"), instance["uuid"])
if instance['disable_terminate']:
return
self._delete(context, instance)
@check_instance_state(vm_state=[vm_states.SOFT_DELETE])
@ -845,10 +858,11 @@ class API(base.Base):
"""Force delete a previously deleted (but not reclaimed) instance."""
self._delete(context, instance)
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.RESCUED],
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.SHUTOFF,
vm_states.RESCUED],
task_state=[None, task_states.RESIZE_VERIFY])
@scheduler_api.reroute_compute("stop")
def stop(self, context, instance):
def stop(self, context, instance, do_cast=True):
"""Stop an instance."""
instance_uuid = instance["uuid"]
LOG.debug(_("Going to try to stop %s"), instance_uuid)
@ -861,21 +875,31 @@ class API(base.Base):
progress=0)
host = instance['host']
if host:
if not host:
return
if do_cast:
self._cast_compute_message('stop_instance', context,
instance_uuid, host)
else:
self._call_compute_message('stop_instance', context, instance)
@check_instance_state(vm_state=[vm_states.STOPPED])
@check_instance_state(vm_state=[vm_states.STOPPED, vm_states.SHUTOFF])
def start(self, context, instance):
"""Start an instance."""
vm_state = instance["vm_state"]
instance_uuid = instance["uuid"]
LOG.debug(_("Going to try to start %s"), instance_uuid)
if vm_state != vm_states.STOPPED:
LOG.warning(_("Instance %(instance_uuid)s is not "
"stopped. (%(vm_state)s)") % locals())
return
if vm_state == vm_states.SHUTOFF:
if instance['shutdown_terminate']:
LOG.warning(_("Instance %(instance_uuid)s is not "
"stopped. (%(vm_state)s") % locals())
return
# NOTE(yamahata): nova compute doesn't reap instances
# which initiated shutdown itself. So reap it here.
self.stop(context, instance, do_cast=False)
self.update(context,
instance,
@ -1077,7 +1101,7 @@ class API(base.Base):
raise exception.Error(_("Unable to find host for Instance %s")
% instance_uuid)
@check_instance_state(vm_state=[vm_states.ACTIVE],
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.SHUTOFF],
task_state=[None, task_states.RESIZE_VERIFY])
@scheduler_api.reroute_compute("backup")
def backup(self, context, instance, name, backup_type, rotation,
@ -1096,7 +1120,7 @@ class API(base.Base):
extra_properties=extra_properties)
return recv_meta
@check_instance_state(vm_state=[vm_states.ACTIVE],
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.SHUTOFF],
task_state=[None, task_states.RESIZE_VERIFY])
@scheduler_api.reroute_compute("snapshot")
def snapshot(self, context, instance, name, extra_properties=None):
@ -1175,7 +1199,8 @@ class API(base.Base):
return min_ram, min_disk
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.RESCUED],
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.SHUTOFF,
vm_states.RESCUED],
task_state=[None, task_states.RESIZE_VERIFY])
@scheduler_api.reroute_compute("reboot")
def reboot(self, context, instance, reboot_type):
@ -1191,7 +1216,7 @@ class API(base.Base):
instance['uuid'],
params={'reboot_type': reboot_type})
@check_instance_state(vm_state=[vm_states.ACTIVE],
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.SHUTOFF],
task_state=[None, task_states.RESIZE_VERIFY])
@scheduler_api.reroute_compute("rebuild")
def rebuild(self, context, instance, image_href, admin_password, **kwargs):
@ -1221,7 +1246,7 @@ class API(base.Base):
instance["uuid"],
params=rebuild_params)
@check_instance_state(vm_state=[vm_states.ACTIVE],
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.SHUTOFF],
task_state=[task_states.RESIZE_VERIFY])
@scheduler_api.reroute_compute("revert_resize")
def revert_resize(self, context, instance):
@ -1247,7 +1272,7 @@ class API(base.Base):
self.db.migration_update(context, migration_ref['id'],
{'status': 'reverted'})
@check_instance_state(vm_state=[vm_states.ACTIVE],
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.SHUTOFF],
task_state=[task_states.RESIZE_VERIFY])
@scheduler_api.reroute_compute("confirm_resize")
def confirm_resize(self, context, instance):
@ -1275,7 +1300,7 @@ class API(base.Base):
self.db.instance_update(context, instance['uuid'],
{'host': migration_ref['dest_compute'], })
@check_instance_state(vm_state=[vm_states.ACTIVE],
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.SHUTOFF],
task_state=[None])
@scheduler_api.reroute_compute("resize")
def resize(self, context, instance, flavor_id=None):
@ -1358,7 +1383,8 @@ class API(base.Base):
# didn't raise so this is the correct zone
self.network_api.add_network_to_project(context, project_id)
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.RESCUED],
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.SHUTOFF,
vm_states.RESCUED],
task_state=[None, task_states.RESIZE_VERIFY])
@scheduler_api.reroute_compute("pause")
def pause(self, context, instance):
@ -1408,7 +1434,8 @@ class API(base.Base):
"""Retrieve actions for the given instance."""
return self.db.instance_get_actions(context, instance['uuid'])
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.RESCUED],
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.SHUTOFF,
vm_states.RESCUED],
task_state=[None, task_states.RESIZE_VERIFY])
@scheduler_api.reroute_compute("suspend")
def suspend(self, context, instance):
@ -1431,7 +1458,8 @@ class API(base.Base):
task_state=task_states.RESUMING)
self._cast_compute_message('resume_instance', context, instance_uuid)
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED],
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.SHUTOFF,
vm_states.STOPPED],
task_state=[None, task_states.RESIZE_VERIFY])
@scheduler_api.reroute_compute("rescue")
def rescue(self, context, instance, rescue_password=None):

View File

@ -571,7 +571,7 @@ class ComputeManager(manager.SchedulerDependentManager):
# I think start will fail due to the files still
self._run_instance(context, instance_uuid)
def _shutdown_instance(self, context, instance, action_str, cleanup):
def _shutdown_instance(self, context, instance, action_str):
"""Shutdown an instance on this host."""
context = context.elevated()
instance_id = instance['id']
@ -592,7 +592,7 @@ class ComputeManager(manager.SchedulerDependentManager):
bdms = self._get_instance_volume_bdms(context, instance_id)
block_device_info = self._get_instance_volume_block_device_info(
context, instance_id)
self.driver.destroy(instance, network_info, block_device_info, cleanup)
self.driver.destroy(instance, network_info, block_device_info)
for bdm in bdms:
try:
# NOTE(vish): actual driver detach done in driver.destroy, so
@ -616,7 +616,7 @@ class ComputeManager(manager.SchedulerDependentManager):
def _delete_instance(self, context, instance):
"""Delete an instance on this host."""
instance_id = instance['id']
self._shutdown_instance(context, instance, 'Terminating', True)
self._shutdown_instance(context, instance, 'Terminating')
self._cleanup_volumes(context, instance_id)
self._instance_update(context,
instance_id,
@ -646,12 +646,8 @@ class ComputeManager(manager.SchedulerDependentManager):
@wrap_instance_fault
def stop_instance(self, context, instance_uuid):
"""Stopping an instance on this host."""
# FIXME(vish): I've kept the files during stop instance, but
# I think start will fail due to the files still
# existing. I don't really know what the purpose of
# stop and start are when compared to pause and unpause
instance = self.db.instance_get_by_uuid(context, instance_uuid)
self._shutdown_instance(context, instance, 'Stopping', False)
self._shutdown_instance(context, instance, 'Stopping')
self._instance_update(context,
instance_uuid,
vm_state=vm_states.STOPPED,
@ -2030,9 +2026,16 @@ class ComputeManager(manager.SchedulerDependentManager):
if vm_power_state == db_power_state:
continue
self._instance_update(context,
db_instance["id"],
power_state=vm_power_state)
if (vm_power_state in (power_state.NOSTATE, power_state.SHUTOFF)
and db_instance['vm_state'] == vm_states.ACTIVE):
self._instance_update(context,
db_instance["id"],
power_state=vm_power_state,
vm_state=vm_states.SHUTOFF)
else:
self._instance_update(context,
db_instance["id"],
power_state=vm_power_state)
@manager.periodic_task
def _reclaim_queued_deletes(self, context):

View File

@ -29,6 +29,7 @@ REBUILDING = 'rebuilding'
PAUSED = 'paused'
SUSPENDED = 'suspended'
SHUTOFF = 'shutoff'
RESCUED = 'rescued'
DELETED = 'deleted'
STOPPED = 'stopped'

View File

@ -0,0 +1,38 @@
# Copyright 2011 Isaku Yamahata
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import MetaData
from sqlalchemy import Boolean, String
from sqlalchemy import Column, Table
meta = MetaData()
shutdown_terminate = Column(
'shutdown_terminate', Boolean(), default=True)
disable_terminate = Column(
'disable_terminate', Boolean(), default=False)
def upgrade(migrate_engine):
meta.bind = migrate_engine
instances = Table('instances', meta, autoload=True)
instances.create_column(shutdown_terminate)
instances.create_column(disable_terminate)
def downgrade(migrate_engine):
meta.bind = migrate_engine
instances = Table('instances', meta, autoload=True)
instances.drop_column(shutdown_terminate)
instances.drop_column(disable_terminate)

View File

@ -257,6 +257,14 @@ class Instance(BASE, NovaBase):
auto_disk_config = Column(Boolean())
progress = Column(Integer)
# EC2 instance_initiated_shutdown_teminate
# True: -> 'terminate'
# False: -> 'stop'
shutdown_terminate = Column(Boolean(), default=True, nullable=False)
# EC2 disable_api_termination
disable_terminate = Column(Boolean(), default=False, nullable=False)
class InstanceInfoCache(BASE, NovaBase):
"""

View File

@ -27,6 +27,8 @@ from M2Crypto import RSA
from nova.api.ec2 import cloud
from nova.api.ec2 import ec2utils
from nova.api.ec2 import inst_state
from nova.compute import power_state
from nova.compute import vm_states
from nova import context
from nova import crypto
@ -597,6 +599,38 @@ class CloudTestCase(test.TestCase):
db.service_destroy(self.context, comp1['id'])
db.service_destroy(self.context, comp2['id'])
def test_describe_instance_state(self):
"""Makes sure describe_instances for instanceState works."""
def test_instance_state(expected_code, expected_name,
power_state_, vm_state_, values=None):
image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
values = values or {}
values.update({'image_ref': image_uuid, 'instance_type_id': 1,
'power_state': power_state_, 'vm_state': vm_state_})
inst = db.instance_create(self.context, values)
instance_id = ec2utils.id_to_ec2_id(inst['id'])
result = self.cloud.describe_instances(self.context,
instance_id=[instance_id])
result = result['reservationSet'][0]
result = result['instancesSet'][0]['instanceState']
name = result['name']
code = result['code']
self.assertEqual(code, expected_code)
self.assertEqual(name, expected_name)
db.instance_destroy(self.context, inst['id'])
test_instance_state(inst_state.RUNNING_CODE, inst_state.RUNNING,
power_state.RUNNING, vm_states.ACTIVE)
test_instance_state(inst_state.TERMINATED_CODE, inst_state.SHUTOFF,
power_state.NOSTATE, vm_states.SHUTOFF)
test_instance_state(inst_state.STOPPED_CODE, inst_state.STOPPED,
power_state.NOSTATE, vm_states.SHUTOFF,
{'shutdown_terminate': False})
def test_describe_instances_no_ipv6(self):
"""Makes sure describe_instances w/ no ipv6 works."""
self.flags(use_ipv6=False)
@ -1794,6 +1828,8 @@ class CloudTestCase(test.TestCase):
'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
'ramdisk_id': '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
'user_data': 'fake-user data',
'shutdown_terminate': False,
'disable_terminate': False,
}
self.stubs.Set(self.cloud.compute_api, 'get', fake_get)
@ -1822,8 +1858,6 @@ class CloudTestCase(test.TestCase):
'attachTime': '13:56:24'}}]}
expected_bdm['blockDeviceMapping'].sort()
self.assertEqual(bdm, expected_bdm)
# NOTE(yamahata): this isn't supported
# get_attribute('disableApiTermination')
groupSet = get_attribute('groupSet')
groupSet['groupSet'].sort()
expected_groupSet = {'instance_id': 'i-12345678',
@ -1833,7 +1867,10 @@ class CloudTestCase(test.TestCase):
self.assertEqual(groupSet, expected_groupSet)
self.assertEqual(get_attribute('instanceInitiatedShutdownBehavior'),
{'instance_id': 'i-12345678',
'instanceInitiatedShutdownBehavior': 'stopped'})
'instanceInitiatedShutdownBehavior': 'stop'})
self.assertEqual(get_attribute('disableApiTermination'),
{'instance_id': 'i-12345678',
'disableApiTermination': False})
self.assertEqual(get_attribute('instanceType'),
{'instance_id': 'i-12345678',
'instanceType': 'fake_type'})
@ -1851,3 +1888,72 @@ class CloudTestCase(test.TestCase):
self.assertEqual(get_attribute('userData'),
{'instance_id': 'i-12345678',
'userData': '}\xa9\x1e\xba\xc7\xabu\xabZ'})
def test_instance_initiated_shutdown_behavior(self):
def test_dia_iisb(expected_result, **kwargs):
"""test describe_instance_attribute
attribute instance_initiated_shutdown_behavior"""
kwargs.update({'instance_type': FLAGS.default_instance_type,
'max_count': 1})
instance_id = self._run_instance(**kwargs)
result = self.cloud.describe_instance_attribute(self.context,
instance_id, 'instanceInitiatedShutdownBehavior')
self.assertEqual(result['instanceInitiatedShutdownBehavior'],
expected_result)
result = self.cloud.terminate_instances(self.context,
[instance_id])
self.assertTrue(result)
self._restart_compute_service()
test_dia_iisb('terminate', image_id='ami-1')
block_device_mapping = [{'device_name': '/dev/vdb',
'virtual_name': 'ephemeral0'}]
test_dia_iisb('stop', image_id='ami-2',
block_device_mapping=block_device_mapping)
def fake_show(self, context, id_):
LOG.debug("id_ %s", id_)
print id_
prop = {}
if id_ == 'ami-3':
pass
elif id_ == 'ami-4':
prop = {'mappings': [{'device': 'sdb0',
'virtual': 'ephemeral0'}]}
elif id_ == 'ami-5':
prop = {'block_device_mapping':
[{'device_name': '/dev/sdb0',
'virtual_name': 'ephemeral0'}]}
elif id_ == 'ami-6':
prop = {'mappings': [{'device': 'sdb0',
'virtual': 'ephemeral0'}],
'block_device_mapping':
[{'device_name': '/dev/sdb0',
'virtual_name': 'ephemeral0'}]}
prop_base = {'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
'type': 'machine'}
prop_base.update(prop)
return {
'id': id_,
'properties': prop_base,
'container_format': 'ami',
'status': 'active'}
# NOTE(yamahata): create ami-3 ... ami-6
# ami-1 and ami-2 is already created by setUp()
for i in range(3, 7):
db.api.s3_image_create(self.context, 'ami-%d' % i)
self.stubs.UnsetAll()
self.stubs.Set(fake._FakeImageService, 'show', fake_show)
test_dia_iisb('terminate', image_id='ami-3')
test_dia_iisb('stop', image_id='ami-4')
test_dia_iisb('stop', image_id='ami-5')
test_dia_iisb('stop', image_id='ami-6')

View File

@ -560,6 +560,8 @@ def stub_instance(id, user_id='fake', project_id='fake', host=None,
"progress": progress,
"auto_disk_config": auto_disk_config,
"name": "instance-%s" % id,
"fixed_ips": fixed_ips}
"fixed_ips": fixed_ips,
"shutdown_terminate": True,
"disable_terminate": False}
return instance

View File

@ -1358,6 +1358,15 @@ class ComputeAPITestCase(BaseTestCase):
'properties': {'kernel_id': 1, 'ramdisk_id': 1},
}
def _run_instance(self):
instance = self._create_fake_instance()
instance_uuid = instance['uuid']
self.compute.run_instance(self.context, instance_uuid)
instance = db.instance_get_by_uuid(self.context, instance_uuid)
self.assertEqual(instance['task_state'], None)
return instance, instance_uuid
def test_create_with_too_little_ram(self):
"""Test an instance type with too little memory"""
@ -1554,13 +1563,45 @@ class ComputeAPITestCase(BaseTestCase):
db.instance_destroy(self.context, instance['id'])
def test_delete(self):
def test_start_shutdown(self):
def check_state(instance_uuid, power_state_, vm_state_, task_state_):
instance = db.instance_get_by_uuid(self.context, instance_uuid)
self.assertEqual(instance['power_state'], power_state_)
self.assertEqual(instance['vm_state'], vm_state_)
self.assertEqual(instance['task_state'], task_state_)
def start_check_state(instance_uuid,
power_state_, vm_state_, task_state_):
instance = db.instance_get_by_uuid(self.context, instance_uuid)
self.compute_api.start(self.context, instance)
check_state(instance_uuid, power_state_, vm_state_, task_state_)
instance = self._create_fake_instance()
instance_uuid = instance['uuid']
self.compute.run_instance(self.context, instance_uuid)
instance = db.instance_get_by_uuid(self.context, instance_uuid)
self.assertEqual(instance['task_state'], None)
check_state(instance_uuid, power_state.RUNNING, vm_states.ACTIVE, None)
# NOTE(yamahata): emulate compute.manager._sync_power_state() that
# the instance is shutdown by itself
db.instance_update(self.context, instance_uuid,
{'power_state': power_state.NOSTATE,
'vm_state': vm_states.SHUTOFF})
check_state(instance_uuid, power_state.NOSTATE, vm_states.SHUTOFF,
None)
start_check_state(instance_uuid,
power_state.NOSTATE, vm_states.SHUTOFF, None)
db.instance_update(self.context, instance_uuid,
{'shutdown_terminate': False})
start_check_state(instance_uuid, power_state.NOSTATE,
vm_states.STOPPED, task_states.STARTING)
db.instance_destroy(self.context, instance['id'])
def test_delete(self):
instance, instance_uuid = self._run_instance()
self.compute_api.delete(self.context, instance)
@ -1569,14 +1610,21 @@ class ComputeAPITestCase(BaseTestCase):
db.instance_destroy(self.context, instance['id'])
def test_delete_soft(self):
instance = self._create_fake_instance()
instance_uuid = instance['uuid']
self.compute.run_instance(self.context, instance['uuid'])
def test_delete_fail(self):
instance, instance_uuid = self._run_instance()
instance = db.instance_update(self.context, instance_uuid,
{'disable_terminate': True})
self.compute_api.delete(self.context, instance)
instance = db.instance_get_by_uuid(self.context, instance_uuid)
self.assertEqual(instance['task_state'], None)
db.instance_destroy(self.context, instance['id'])
def test_delete_soft(self):
instance, instance_uuid = self._run_instance()
self.compute_api.soft_delete(self.context, instance)
instance = db.instance_get_by_uuid(self.context, instance_uuid)
@ -1584,6 +1632,18 @@ class ComputeAPITestCase(BaseTestCase):
db.instance_destroy(self.context, instance['id'])
def test_delete_soft_fail(self):
instance, instance_uuid = self._run_instance()
instance = db.instance_update(self.context, instance_uuid,
{'disable_terminate': True})
self.compute_api.soft_delete(self.context, instance)
instance = db.instance_get_by_uuid(self.context, instance_uuid)
self.assertEqual(instance['task_state'], None)
db.instance_destroy(self.context, instance['id'])
def test_force_delete(self):
"""Ensure instance can be deleted after a soft delete"""
instance = self._create_fake_instance()
@ -1621,7 +1681,7 @@ class ComputeAPITestCase(BaseTestCase):
instance = self._create_fake_instance()
instance_uuid = instance['uuid']
instance_id = instance['id']
self.compute.run_instance(self.context, instance_uuid )
self.compute.run_instance(self.context, instance_uuid)
db.instance_update(self.context, instance_id,
{'vm_state': vm_states.SUSPENDED})
instance = db.instance_get(self.context, instance_id)

View File

@ -152,8 +152,7 @@ class ComputeDriver(object):
"""
raise NotImplementedError()
def destroy(self, instance, network_info, block_device_info=None,
cleanup=True):
def destroy(self, instance, network_info, block_device_info=None):
"""Destroy (shutdown and delete) the specified instance.
If the instance is not found (for example if networking failed), this
@ -165,7 +164,6 @@ class ComputeDriver(object):
:py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info`
:param block_device_info: Information about block devices that should
be detached from the instance.
:param cleanup:
"""
# TODO(Vek): Need to pass context in for access to auth_token

View File

@ -162,8 +162,7 @@ class FakeConnection(driver.ComputeDriver):
def resume(self, instance):
pass
def destroy(self, instance, network_info, block_device_info=None,
cleanup=True):
def destroy(self, instance, network_info, block_device_info=None):
key = instance['name']
if key in self.instances:
del self.instances[key]

View File

@ -374,8 +374,7 @@ class HyperVConnection(driver.ComputeDriver):
raise exception.InstanceNotFound(instance_id=instance.id)
self._set_vm_state(instance.name, 'Reboot')
def destroy(self, instance, network_info, block_device_info=None,
cleanup=True):
def destroy(self, instance, network_info, block_device_info=None):
"""Destroy the VM. Also destroy the associated VHD disk files"""
LOG.debug(_("Got request to destroy vm %s"), instance.name)
vm = self._lookup(instance.name)

View File

@ -304,8 +304,8 @@ class LibvirtConnection(driver.ComputeDriver):
for (network, mapping) in network_info:
self.vif_driver.unplug(instance, network, mapping)
def destroy(self, instance, network_info, block_device_info=None,
cleanup=True):
def _destroy(self, instance, network_info, block_device_info=None,
cleanup=True):
instance_name = instance['name']
try:
@ -393,6 +393,10 @@ class LibvirtConnection(driver.ComputeDriver):
return True
def destroy(self, instance, network_info, block_device_info=None):
return self._destroy(instance, network_info, block_device_info,
cleanup=True)
def _cleanup(self, instance):
target = os.path.join(FLAGS.instances_path, instance['name'])
instance_name = instance['name']
@ -554,7 +558,7 @@ class LibvirtConnection(driver.ComputeDriver):
# NOTE(itoumsn): self.shutdown() and wait instead of self.destroy() is
# better because we cannot ensure flushing dirty buffers
# in the guest OS. But, in case of KVM, shutdown() does not work...
self.destroy(instance, network_info, cleanup=False)
self._destroy(instance, network_info, cleanup=False)
self.unplug_vifs(instance, network_info)
self.plug_vifs(instance, network_info)
self.firewall_driver.setup_basic_filtering(instance, network_info)

View File

@ -137,8 +137,7 @@ class VMWareESXConnection(driver.ComputeDriver):
"""Reboot VM instance."""
self._vmops.reboot(instance, network_info)
def destroy(self, instance, network_info, block_device_info=None,
cleanup=True):
def destroy(self, instance, network_info, block_device_info=None):
"""Destroy VM instance."""
self._vmops.destroy(instance, network_info)

View File

@ -232,8 +232,7 @@ class XenAPIConnection(driver.ComputeDriver):
"""
self._vmops.inject_file(instance, b64_path, b64_contents)
def destroy(self, instance, network_info, block_device_info=None,
cleanup=True):
def destroy(self, instance, network_info, block_device_info=None):
"""Destroy VM instance"""
self._vmops.destroy(instance, network_info)