Do not detach/re-attach volumes in AWS Instance
Currently during resource suspend/resume/delete we manually detach/re-attach volumes on AWS Instance resource. This is not needed, as Nova already keeps volumes attached to suspended instances, so the volume attachments are readily available on resume, and automatically detaches volumes on instance termination. Besides, since AWS CFN has no support for stack/resource suspend/resume, we do not have to keep compatibility with any CFN behavior in this regard. Change-Id: I38234d68467b5a4036e1a474a1e89d10686e1c3e Related-Bug: #1393268
This commit is contained in:
parent
216190ce48
commit
5781b740a3
@ -779,16 +779,6 @@ class Instance(resource.Resource):
|
|||||||
"when specifying BlockDeviceMappings.")
|
"when specifying BlockDeviceMappings.")
|
||||||
raise exception.StackValidationFailed(message=msg)
|
raise exception.StackValidationFailed(message=msg)
|
||||||
|
|
||||||
def _detach_volumes_task(self):
|
|
||||||
'''
|
|
||||||
Detach volumes from the instance
|
|
||||||
'''
|
|
||||||
detach_tasks = (vol_task.VolumeDetachTask(self.stack,
|
|
||||||
self.resource_id,
|
|
||||||
volume_id)
|
|
||||||
for volume_id, device in self.volumes())
|
|
||||||
return scheduler.PollingTaskGroup(detach_tasks)
|
|
||||||
|
|
||||||
def handle_delete(self):
|
def handle_delete(self):
|
||||||
# make sure to delete the port which implicit-created by heat
|
# make sure to delete the port which implicit-created by heat
|
||||||
self._port_data_delete()
|
self._port_data_delete()
|
||||||
@ -800,19 +790,14 @@ class Instance(resource.Resource):
|
|||||||
except Exception as e:
|
except Exception as e:
|
||||||
self.client_plugin().ignore_not_found(e)
|
self.client_plugin().ignore_not_found(e)
|
||||||
return
|
return
|
||||||
deleters = (
|
deleter = scheduler.TaskRunner(self.client_plugin().delete_server,
|
||||||
scheduler.TaskRunner(self._detach_volumes_task()),
|
server)
|
||||||
scheduler.TaskRunner(self.client_plugin().delete_server,
|
|
||||||
server))
|
|
||||||
deleters[0].start()
|
|
||||||
return deleters
|
|
||||||
|
|
||||||
def check_delete_complete(self, deleters):
|
|
||||||
# if the resource was already deleted, deleters will be None
|
|
||||||
if deleters:
|
|
||||||
for deleter in deleters:
|
|
||||||
if not deleter.started():
|
|
||||||
deleter.start()
|
deleter.start()
|
||||||
|
return deleter
|
||||||
|
|
||||||
|
def check_delete_complete(self, deleter):
|
||||||
|
# if the resource was already deleted, deleters will be None
|
||||||
|
if deleter:
|
||||||
if not deleter.step():
|
if not deleter.step():
|
||||||
return False
|
return False
|
||||||
return True
|
return True
|
||||||
@ -835,23 +820,11 @@ class Instance(resource.Resource):
|
|||||||
self.resource_id)
|
self.resource_id)
|
||||||
else:
|
else:
|
||||||
LOG.debug("suspending instance %s" % self.resource_id)
|
LOG.debug("suspending instance %s" % self.resource_id)
|
||||||
# We want the server.suspend to happen after the volume
|
server.suspend()
|
||||||
# detachement has finished, so pass both tasks and the server
|
return server
|
||||||
suspend_runner = scheduler.TaskRunner(server.suspend)
|
|
||||||
volumes_runner = scheduler.TaskRunner(self._detach_volumes_task())
|
|
||||||
return server, suspend_runner, volumes_runner
|
|
||||||
|
|
||||||
def check_suspend_complete(self, cookie):
|
def check_suspend_complete(self, server):
|
||||||
server, suspend_runner, volumes_runner = cookie
|
|
||||||
|
|
||||||
if not volumes_runner.started():
|
|
||||||
volumes_runner.start()
|
|
||||||
|
|
||||||
if volumes_runner.done():
|
|
||||||
if not suspend_runner.started():
|
|
||||||
suspend_runner.start()
|
|
||||||
|
|
||||||
if suspend_runner.done():
|
|
||||||
if server.status == 'SUSPENDED':
|
if server.status == 'SUSPENDED':
|
||||||
return True
|
return True
|
||||||
|
|
||||||
@ -860,8 +833,7 @@ class Instance(resource.Resource):
|
|||||||
LOG.debug("%(name)s check_suspend_complete "
|
LOG.debug("%(name)s check_suspend_complete "
|
||||||
"status = %(status)s",
|
"status = %(status)s",
|
||||||
{'name': self.name, 'status': server.status})
|
{'name': self.name, 'status': server.status})
|
||||||
if server.status in list(cp.deferred_server_statuses +
|
if server.status in list(cp.deferred_server_statuses + ['ACTIVE']):
|
||||||
['ACTIVE']):
|
|
||||||
return server.status == 'SUSPENDED'
|
return server.status == 'SUSPENDED'
|
||||||
else:
|
else:
|
||||||
raise exception.Error(_(' nova reported unexpected '
|
raise exception.Error(_(' nova reported unexpected '
|
||||||
@ -869,10 +841,6 @@ class Instance(resource.Resource):
|
|||||||
'status[%(status)s]') %
|
'status[%(status)s]') %
|
||||||
{'instance': self.name,
|
{'instance': self.name,
|
||||||
'status': server.status})
|
'status': server.status})
|
||||||
else:
|
|
||||||
suspend_runner.step()
|
|
||||||
else:
|
|
||||||
volumes_runner.step()
|
|
||||||
|
|
||||||
def handle_resume(self):
|
def handle_resume(self):
|
||||||
'''
|
'''
|
||||||
@ -893,12 +861,10 @@ class Instance(resource.Resource):
|
|||||||
else:
|
else:
|
||||||
LOG.debug("resuming instance %s" % self.resource_id)
|
LOG.debug("resuming instance %s" % self.resource_id)
|
||||||
server.resume()
|
server.resume()
|
||||||
return server, scheduler.TaskRunner(self._attach_volumes_task())
|
return server
|
||||||
|
|
||||||
def check_resume_complete(self, cookie):
|
def check_resume_complete(self, server):
|
||||||
server, volume_attach_task = cookie
|
return self._check_active(server)
|
||||||
return (self._check_active(server) and
|
|
||||||
self._check_volume_attached(server, volume_attach_task))
|
|
||||||
|
|
||||||
|
|
||||||
def resource_mapping():
|
def resource_mapping():
|
||||||
|
@ -1090,101 +1090,6 @@ class InstancesTest(common.HeatTestCase):
|
|||||||
|
|
||||||
self.m.VerifyAll()
|
self.m.VerifyAll()
|
||||||
|
|
||||||
def test_instance_suspend_volumes_step(self):
|
|
||||||
return_server = self.fc.servers.list()[1]
|
|
||||||
instance = self._create_test_instance(return_server,
|
|
||||||
'in_suspend_vol')
|
|
||||||
|
|
||||||
instance.resource_id = '1234'
|
|
||||||
self.m.ReplayAll()
|
|
||||||
|
|
||||||
# Override the get_servers_1234 handler status to SUSPENDED
|
|
||||||
d = {'server': self.fc.client.get_servers_detail()[1]['servers'][0]}
|
|
||||||
d['server']['status'] = 'SUSPENDED'
|
|
||||||
|
|
||||||
# Return a dummy PollingTaskGroup to make check_suspend_complete step
|
|
||||||
def dummy_detach():
|
|
||||||
yield
|
|
||||||
dummy_tg = scheduler.PollingTaskGroup([dummy_detach, dummy_detach])
|
|
||||||
self.m.StubOutWithMock(instance, '_detach_volumes_task')
|
|
||||||
instance._detach_volumes_task().AndReturn(dummy_tg)
|
|
||||||
|
|
||||||
self.m.StubOutWithMock(self.fc.client, 'get_servers_1234')
|
|
||||||
get = self.fc.client.get_servers_1234
|
|
||||||
get().AndReturn((200, d))
|
|
||||||
self.m.ReplayAll()
|
|
||||||
|
|
||||||
scheduler.TaskRunner(instance.suspend)()
|
|
||||||
self.assertEqual((instance.SUSPEND, instance.COMPLETE), instance.state)
|
|
||||||
|
|
||||||
self.m.VerifyAll()
|
|
||||||
|
|
||||||
def test_instance_resume_volumes_step(self):
|
|
||||||
return_server = self.fc.servers.list()[1]
|
|
||||||
instance = self._create_test_instance(return_server,
|
|
||||||
'in_resume_vol')
|
|
||||||
|
|
||||||
instance.resource_id = '1234'
|
|
||||||
self.m.ReplayAll()
|
|
||||||
|
|
||||||
# Override the get_servers_1234 handler status to ACTIVE
|
|
||||||
d = {'server': self.fc.client.get_servers_detail()[1]['servers'][0]}
|
|
||||||
d['server']['status'] = 'ACTIVE'
|
|
||||||
|
|
||||||
# Return a dummy PollingTaskGroup to make check_resume_complete step
|
|
||||||
def dummy_attach():
|
|
||||||
yield
|
|
||||||
dummy_tg = scheduler.PollingTaskGroup([dummy_attach, dummy_attach])
|
|
||||||
self.m.StubOutWithMock(instance, '_attach_volumes_task')
|
|
||||||
instance._attach_volumes_task().AndReturn(dummy_tg)
|
|
||||||
|
|
||||||
self.m.StubOutWithMock(self.fc.client, 'get_servers_1234')
|
|
||||||
get = self.fc.client.get_servers_1234
|
|
||||||
get().AndReturn((200, d))
|
|
||||||
|
|
||||||
self.m.ReplayAll()
|
|
||||||
|
|
||||||
instance.state_set(instance.SUSPEND, instance.COMPLETE)
|
|
||||||
|
|
||||||
scheduler.TaskRunner(instance.resume)()
|
|
||||||
self.assertEqual((instance.RESUME, instance.COMPLETE), instance.state)
|
|
||||||
|
|
||||||
self.m.VerifyAll()
|
|
||||||
|
|
||||||
def test_instance_suspend_volumes_wait(self):
|
|
||||||
return_server = self.fc.servers.list()[1]
|
|
||||||
instance = self._create_test_instance(return_server,
|
|
||||||
'in_suspend_vol')
|
|
||||||
|
|
||||||
instance.resource_id = '1234'
|
|
||||||
self.m.ReplayAll()
|
|
||||||
|
|
||||||
# Override the get_servers_1234 handler status to SUSPENDED, but keep
|
|
||||||
# it ACTIVE for the first two iterations of check_suspend_complete.
|
|
||||||
d1 = {'server': self.fc.client.get_servers_detail()[1]['servers'][0]}
|
|
||||||
d2 = copy.deepcopy(d1)
|
|
||||||
d1['server']['status'] = 'ACTIVE'
|
|
||||||
d2['server']['status'] = 'SUSPENDED'
|
|
||||||
|
|
||||||
# Return a dummy PollingTaskGroup to make check_suspend_complete step
|
|
||||||
def dummy_detach():
|
|
||||||
yield
|
|
||||||
dummy_tg = scheduler.PollingTaskGroup([dummy_detach, dummy_detach])
|
|
||||||
self.m.StubOutWithMock(instance, '_detach_volumes_task')
|
|
||||||
instance._detach_volumes_task().AndReturn(dummy_tg)
|
|
||||||
|
|
||||||
self.m.StubOutWithMock(self.fc.client, 'get_servers_1234')
|
|
||||||
get = self.fc.client.get_servers_1234
|
|
||||||
get().AndReturn((200, d1))
|
|
||||||
get().AndReturn((200, d1))
|
|
||||||
get().AndReturn((200, d2))
|
|
||||||
self.m.ReplayAll()
|
|
||||||
|
|
||||||
scheduler.TaskRunner(instance.suspend)()
|
|
||||||
self.assertEqual((instance.SUSPEND, instance.COMPLETE), instance.state)
|
|
||||||
|
|
||||||
self.m.VerifyAll()
|
|
||||||
|
|
||||||
def test_instance_status_build_spawning(self):
|
def test_instance_status_build_spawning(self):
|
||||||
self._test_instance_status_not_build_active('BUILD(SPAWNING)')
|
self._test_instance_status_not_build_active('BUILD(SPAWNING)')
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user