Do not detach/re-attach volumes in AWS Instance
Currently during resource suspend/resume/delete we manually detach/re-attach volumes on AWS Instance resource. This is not needed, as Nova already keeps volumes attached to suspended instances, so the volume attachments are readily available on resume, and automatically detaches volumes on instance termination. Besides, since AWS CFN has no support for stack/resource suspend/resume, we do not have to keep compatibility with any CFN behavior in this regard. Change-Id: I38234d68467b5a4036e1a474a1e89d10686e1c3e Related-Bug: #1393268
This commit is contained in:
parent
216190ce48
commit
5781b740a3
@ -779,16 +779,6 @@ class Instance(resource.Resource):
|
||||
"when specifying BlockDeviceMappings.")
|
||||
raise exception.StackValidationFailed(message=msg)
|
||||
|
||||
def _detach_volumes_task(self):
|
||||
'''
|
||||
Detach volumes from the instance
|
||||
'''
|
||||
detach_tasks = (vol_task.VolumeDetachTask(self.stack,
|
||||
self.resource_id,
|
||||
volume_id)
|
||||
for volume_id, device in self.volumes())
|
||||
return scheduler.PollingTaskGroup(detach_tasks)
|
||||
|
||||
def handle_delete(self):
|
||||
# make sure to delete the port which implicit-created by heat
|
||||
self._port_data_delete()
|
||||
@ -800,21 +790,16 @@ class Instance(resource.Resource):
|
||||
except Exception as e:
|
||||
self.client_plugin().ignore_not_found(e)
|
||||
return
|
||||
deleters = (
|
||||
scheduler.TaskRunner(self._detach_volumes_task()),
|
||||
scheduler.TaskRunner(self.client_plugin().delete_server,
|
||||
server))
|
||||
deleters[0].start()
|
||||
return deleters
|
||||
deleter = scheduler.TaskRunner(self.client_plugin().delete_server,
|
||||
server)
|
||||
deleter.start()
|
||||
return deleter
|
||||
|
||||
def check_delete_complete(self, deleters):
|
||||
def check_delete_complete(self, deleter):
|
||||
# if the resource was already deleted, deleters will be None
|
||||
if deleters:
|
||||
for deleter in deleters:
|
||||
if not deleter.started():
|
||||
deleter.start()
|
||||
if not deleter.step():
|
||||
return False
|
||||
if deleter:
|
||||
if not deleter.step():
|
||||
return False
|
||||
return True
|
||||
|
||||
def handle_suspend(self):
|
||||
@ -835,44 +820,27 @@ class Instance(resource.Resource):
|
||||
self.resource_id)
|
||||
else:
|
||||
LOG.debug("suspending instance %s" % self.resource_id)
|
||||
# We want the server.suspend to happen after the volume
|
||||
# detachement has finished, so pass both tasks and the server
|
||||
suspend_runner = scheduler.TaskRunner(server.suspend)
|
||||
volumes_runner = scheduler.TaskRunner(self._detach_volumes_task())
|
||||
return server, suspend_runner, volumes_runner
|
||||
server.suspend()
|
||||
return server
|
||||
|
||||
def check_suspend_complete(self, cookie):
|
||||
server, suspend_runner, volumes_runner = cookie
|
||||
def check_suspend_complete(self, server):
|
||||
|
||||
if not volumes_runner.started():
|
||||
volumes_runner.start()
|
||||
if server.status == 'SUSPENDED':
|
||||
return True
|
||||
|
||||
if volumes_runner.done():
|
||||
if not suspend_runner.started():
|
||||
suspend_runner.start()
|
||||
|
||||
if suspend_runner.done():
|
||||
if server.status == 'SUSPENDED':
|
||||
return True
|
||||
|
||||
cp = self.client_plugin()
|
||||
cp.refresh_server(server)
|
||||
LOG.debug("%(name)s check_suspend_complete "
|
||||
"status = %(status)s",
|
||||
{'name': self.name, 'status': server.status})
|
||||
if server.status in list(cp.deferred_server_statuses +
|
||||
['ACTIVE']):
|
||||
return server.status == 'SUSPENDED'
|
||||
else:
|
||||
raise exception.Error(_(' nova reported unexpected '
|
||||
'instance[%(instance)s] '
|
||||
'status[%(status)s]') %
|
||||
{'instance': self.name,
|
||||
'status': server.status})
|
||||
else:
|
||||
suspend_runner.step()
|
||||
cp = self.client_plugin()
|
||||
cp.refresh_server(server)
|
||||
LOG.debug("%(name)s check_suspend_complete "
|
||||
"status = %(status)s",
|
||||
{'name': self.name, 'status': server.status})
|
||||
if server.status in list(cp.deferred_server_statuses + ['ACTIVE']):
|
||||
return server.status == 'SUSPENDED'
|
||||
else:
|
||||
volumes_runner.step()
|
||||
raise exception.Error(_(' nova reported unexpected '
|
||||
'instance[%(instance)s] '
|
||||
'status[%(status)s]') %
|
||||
{'instance': self.name,
|
||||
'status': server.status})
|
||||
|
||||
def handle_resume(self):
|
||||
'''
|
||||
@ -893,12 +861,10 @@ class Instance(resource.Resource):
|
||||
else:
|
||||
LOG.debug("resuming instance %s" % self.resource_id)
|
||||
server.resume()
|
||||
return server, scheduler.TaskRunner(self._attach_volumes_task())
|
||||
return server
|
||||
|
||||
def check_resume_complete(self, cookie):
|
||||
server, volume_attach_task = cookie
|
||||
return (self._check_active(server) and
|
||||
self._check_volume_attached(server, volume_attach_task))
|
||||
def check_resume_complete(self, server):
|
||||
return self._check_active(server)
|
||||
|
||||
|
||||
def resource_mapping():
|
||||
|
@ -1090,101 +1090,6 @@ class InstancesTest(common.HeatTestCase):
|
||||
|
||||
self.m.VerifyAll()
|
||||
|
||||
def test_instance_suspend_volumes_step(self):
|
||||
return_server = self.fc.servers.list()[1]
|
||||
instance = self._create_test_instance(return_server,
|
||||
'in_suspend_vol')
|
||||
|
||||
instance.resource_id = '1234'
|
||||
self.m.ReplayAll()
|
||||
|
||||
# Override the get_servers_1234 handler status to SUSPENDED
|
||||
d = {'server': self.fc.client.get_servers_detail()[1]['servers'][0]}
|
||||
d['server']['status'] = 'SUSPENDED'
|
||||
|
||||
# Return a dummy PollingTaskGroup to make check_suspend_complete step
|
||||
def dummy_detach():
|
||||
yield
|
||||
dummy_tg = scheduler.PollingTaskGroup([dummy_detach, dummy_detach])
|
||||
self.m.StubOutWithMock(instance, '_detach_volumes_task')
|
||||
instance._detach_volumes_task().AndReturn(dummy_tg)
|
||||
|
||||
self.m.StubOutWithMock(self.fc.client, 'get_servers_1234')
|
||||
get = self.fc.client.get_servers_1234
|
||||
get().AndReturn((200, d))
|
||||
self.m.ReplayAll()
|
||||
|
||||
scheduler.TaskRunner(instance.suspend)()
|
||||
self.assertEqual((instance.SUSPEND, instance.COMPLETE), instance.state)
|
||||
|
||||
self.m.VerifyAll()
|
||||
|
||||
def test_instance_resume_volumes_step(self):
|
||||
return_server = self.fc.servers.list()[1]
|
||||
instance = self._create_test_instance(return_server,
|
||||
'in_resume_vol')
|
||||
|
||||
instance.resource_id = '1234'
|
||||
self.m.ReplayAll()
|
||||
|
||||
# Override the get_servers_1234 handler status to ACTIVE
|
||||
d = {'server': self.fc.client.get_servers_detail()[1]['servers'][0]}
|
||||
d['server']['status'] = 'ACTIVE'
|
||||
|
||||
# Return a dummy PollingTaskGroup to make check_resume_complete step
|
||||
def dummy_attach():
|
||||
yield
|
||||
dummy_tg = scheduler.PollingTaskGroup([dummy_attach, dummy_attach])
|
||||
self.m.StubOutWithMock(instance, '_attach_volumes_task')
|
||||
instance._attach_volumes_task().AndReturn(dummy_tg)
|
||||
|
||||
self.m.StubOutWithMock(self.fc.client, 'get_servers_1234')
|
||||
get = self.fc.client.get_servers_1234
|
||||
get().AndReturn((200, d))
|
||||
|
||||
self.m.ReplayAll()
|
||||
|
||||
instance.state_set(instance.SUSPEND, instance.COMPLETE)
|
||||
|
||||
scheduler.TaskRunner(instance.resume)()
|
||||
self.assertEqual((instance.RESUME, instance.COMPLETE), instance.state)
|
||||
|
||||
self.m.VerifyAll()
|
||||
|
||||
def test_instance_suspend_volumes_wait(self):
|
||||
return_server = self.fc.servers.list()[1]
|
||||
instance = self._create_test_instance(return_server,
|
||||
'in_suspend_vol')
|
||||
|
||||
instance.resource_id = '1234'
|
||||
self.m.ReplayAll()
|
||||
|
||||
# Override the get_servers_1234 handler status to SUSPENDED, but keep
|
||||
# it ACTIVE for the first two iterations of check_suspend_complete.
|
||||
d1 = {'server': self.fc.client.get_servers_detail()[1]['servers'][0]}
|
||||
d2 = copy.deepcopy(d1)
|
||||
d1['server']['status'] = 'ACTIVE'
|
||||
d2['server']['status'] = 'SUSPENDED'
|
||||
|
||||
# Return a dummy PollingTaskGroup to make check_suspend_complete step
|
||||
def dummy_detach():
|
||||
yield
|
||||
dummy_tg = scheduler.PollingTaskGroup([dummy_detach, dummy_detach])
|
||||
self.m.StubOutWithMock(instance, '_detach_volumes_task')
|
||||
instance._detach_volumes_task().AndReturn(dummy_tg)
|
||||
|
||||
self.m.StubOutWithMock(self.fc.client, 'get_servers_1234')
|
||||
get = self.fc.client.get_servers_1234
|
||||
get().AndReturn((200, d1))
|
||||
get().AndReturn((200, d1))
|
||||
get().AndReturn((200, d2))
|
||||
self.m.ReplayAll()
|
||||
|
||||
scheduler.TaskRunner(instance.suspend)()
|
||||
self.assertEqual((instance.SUSPEND, instance.COMPLETE), instance.state)
|
||||
|
||||
self.m.VerifyAll()
|
||||
|
||||
def test_instance_status_build_spawning(self):
|
||||
self._test_instance_status_not_build_active('BUILD(SPAWNING)')
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user