Merge "Do not detach/re-attach volumes in AWS Instance"
This commit is contained in:
commit
05caec63b4
@ -779,16 +779,6 @@ class Instance(resource.Resource):
|
|||||||
"when specifying BlockDeviceMappings.")
|
"when specifying BlockDeviceMappings.")
|
||||||
raise exception.StackValidationFailed(message=msg)
|
raise exception.StackValidationFailed(message=msg)
|
||||||
|
|
||||||
def _detach_volumes_task(self):
|
|
||||||
'''
|
|
||||||
Detach volumes from the instance
|
|
||||||
'''
|
|
||||||
detach_tasks = (vol_task.VolumeDetachTask(self.stack,
|
|
||||||
self.resource_id,
|
|
||||||
volume_id)
|
|
||||||
for volume_id, device in self.volumes())
|
|
||||||
return scheduler.PollingTaskGroup(detach_tasks)
|
|
||||||
|
|
||||||
def handle_delete(self):
|
def handle_delete(self):
|
||||||
# make sure to delete the port which implicit-created by heat
|
# make sure to delete the port which implicit-created by heat
|
||||||
self._port_data_delete()
|
self._port_data_delete()
|
||||||
@ -800,21 +790,16 @@ class Instance(resource.Resource):
|
|||||||
except Exception as e:
|
except Exception as e:
|
||||||
self.client_plugin().ignore_not_found(e)
|
self.client_plugin().ignore_not_found(e)
|
||||||
return
|
return
|
||||||
deleters = (
|
deleter = scheduler.TaskRunner(self.client_plugin().delete_server,
|
||||||
scheduler.TaskRunner(self._detach_volumes_task()),
|
server)
|
||||||
scheduler.TaskRunner(self.client_plugin().delete_server,
|
deleter.start()
|
||||||
server))
|
return deleter
|
||||||
deleters[0].start()
|
|
||||||
return deleters
|
|
||||||
|
|
||||||
def check_delete_complete(self, deleters):
|
def check_delete_complete(self, deleter):
|
||||||
# if the resource was already deleted, deleters will be None
|
# if the resource was already deleted, deleters will be None
|
||||||
if deleters:
|
if deleter:
|
||||||
for deleter in deleters:
|
if not deleter.step():
|
||||||
if not deleter.started():
|
return False
|
||||||
deleter.start()
|
|
||||||
if not deleter.step():
|
|
||||||
return False
|
|
||||||
return True
|
return True
|
||||||
|
|
||||||
def handle_suspend(self):
|
def handle_suspend(self):
|
||||||
@ -835,44 +820,27 @@ class Instance(resource.Resource):
|
|||||||
self.resource_id)
|
self.resource_id)
|
||||||
else:
|
else:
|
||||||
LOG.debug("suspending instance %s" % self.resource_id)
|
LOG.debug("suspending instance %s" % self.resource_id)
|
||||||
# We want the server.suspend to happen after the volume
|
server.suspend()
|
||||||
# detachement has finished, so pass both tasks and the server
|
return server
|
||||||
suspend_runner = scheduler.TaskRunner(server.suspend)
|
|
||||||
volumes_runner = scheduler.TaskRunner(self._detach_volumes_task())
|
|
||||||
return server, suspend_runner, volumes_runner
|
|
||||||
|
|
||||||
def check_suspend_complete(self, cookie):
|
def check_suspend_complete(self, server):
|
||||||
server, suspend_runner, volumes_runner = cookie
|
|
||||||
|
|
||||||
if not volumes_runner.started():
|
if server.status == 'SUSPENDED':
|
||||||
volumes_runner.start()
|
return True
|
||||||
|
|
||||||
if volumes_runner.done():
|
cp = self.client_plugin()
|
||||||
if not suspend_runner.started():
|
cp.refresh_server(server)
|
||||||
suspend_runner.start()
|
LOG.debug("%(name)s check_suspend_complete "
|
||||||
|
"status = %(status)s",
|
||||||
if suspend_runner.done():
|
{'name': self.name, 'status': server.status})
|
||||||
if server.status == 'SUSPENDED':
|
if server.status in list(cp.deferred_server_statuses + ['ACTIVE']):
|
||||||
return True
|
return server.status == 'SUSPENDED'
|
||||||
|
|
||||||
cp = self.client_plugin()
|
|
||||||
cp.refresh_server(server)
|
|
||||||
LOG.debug("%(name)s check_suspend_complete "
|
|
||||||
"status = %(status)s",
|
|
||||||
{'name': self.name, 'status': server.status})
|
|
||||||
if server.status in list(cp.deferred_server_statuses +
|
|
||||||
['ACTIVE']):
|
|
||||||
return server.status == 'SUSPENDED'
|
|
||||||
else:
|
|
||||||
raise exception.Error(_(' nova reported unexpected '
|
|
||||||
'instance[%(instance)s] '
|
|
||||||
'status[%(status)s]') %
|
|
||||||
{'instance': self.name,
|
|
||||||
'status': server.status})
|
|
||||||
else:
|
|
||||||
suspend_runner.step()
|
|
||||||
else:
|
else:
|
||||||
volumes_runner.step()
|
raise exception.Error(_(' nova reported unexpected '
|
||||||
|
'instance[%(instance)s] '
|
||||||
|
'status[%(status)s]') %
|
||||||
|
{'instance': self.name,
|
||||||
|
'status': server.status})
|
||||||
|
|
||||||
def handle_resume(self):
|
def handle_resume(self):
|
||||||
'''
|
'''
|
||||||
@ -893,12 +861,10 @@ class Instance(resource.Resource):
|
|||||||
else:
|
else:
|
||||||
LOG.debug("resuming instance %s" % self.resource_id)
|
LOG.debug("resuming instance %s" % self.resource_id)
|
||||||
server.resume()
|
server.resume()
|
||||||
return server, scheduler.TaskRunner(self._attach_volumes_task())
|
return server
|
||||||
|
|
||||||
def check_resume_complete(self, cookie):
|
def check_resume_complete(self, server):
|
||||||
server, volume_attach_task = cookie
|
return self._check_active(server)
|
||||||
return (self._check_active(server) and
|
|
||||||
self._check_volume_attached(server, volume_attach_task))
|
|
||||||
|
|
||||||
|
|
||||||
def resource_mapping():
|
def resource_mapping():
|
||||||
|
@ -1090,101 +1090,6 @@ class InstancesTest(common.HeatTestCase):
|
|||||||
|
|
||||||
self.m.VerifyAll()
|
self.m.VerifyAll()
|
||||||
|
|
||||||
def test_instance_suspend_volumes_step(self):
|
|
||||||
return_server = self.fc.servers.list()[1]
|
|
||||||
instance = self._create_test_instance(return_server,
|
|
||||||
'in_suspend_vol')
|
|
||||||
|
|
||||||
instance.resource_id = '1234'
|
|
||||||
self.m.ReplayAll()
|
|
||||||
|
|
||||||
# Override the get_servers_1234 handler status to SUSPENDED
|
|
||||||
d = {'server': self.fc.client.get_servers_detail()[1]['servers'][0]}
|
|
||||||
d['server']['status'] = 'SUSPENDED'
|
|
||||||
|
|
||||||
# Return a dummy PollingTaskGroup to make check_suspend_complete step
|
|
||||||
def dummy_detach():
|
|
||||||
yield
|
|
||||||
dummy_tg = scheduler.PollingTaskGroup([dummy_detach, dummy_detach])
|
|
||||||
self.m.StubOutWithMock(instance, '_detach_volumes_task')
|
|
||||||
instance._detach_volumes_task().AndReturn(dummy_tg)
|
|
||||||
|
|
||||||
self.m.StubOutWithMock(self.fc.client, 'get_servers_1234')
|
|
||||||
get = self.fc.client.get_servers_1234
|
|
||||||
get().AndReturn((200, d))
|
|
||||||
self.m.ReplayAll()
|
|
||||||
|
|
||||||
scheduler.TaskRunner(instance.suspend)()
|
|
||||||
self.assertEqual((instance.SUSPEND, instance.COMPLETE), instance.state)
|
|
||||||
|
|
||||||
self.m.VerifyAll()
|
|
||||||
|
|
||||||
def test_instance_resume_volumes_step(self):
|
|
||||||
return_server = self.fc.servers.list()[1]
|
|
||||||
instance = self._create_test_instance(return_server,
|
|
||||||
'in_resume_vol')
|
|
||||||
|
|
||||||
instance.resource_id = '1234'
|
|
||||||
self.m.ReplayAll()
|
|
||||||
|
|
||||||
# Override the get_servers_1234 handler status to ACTIVE
|
|
||||||
d = {'server': self.fc.client.get_servers_detail()[1]['servers'][0]}
|
|
||||||
d['server']['status'] = 'ACTIVE'
|
|
||||||
|
|
||||||
# Return a dummy PollingTaskGroup to make check_resume_complete step
|
|
||||||
def dummy_attach():
|
|
||||||
yield
|
|
||||||
dummy_tg = scheduler.PollingTaskGroup([dummy_attach, dummy_attach])
|
|
||||||
self.m.StubOutWithMock(instance, '_attach_volumes_task')
|
|
||||||
instance._attach_volumes_task().AndReturn(dummy_tg)
|
|
||||||
|
|
||||||
self.m.StubOutWithMock(self.fc.client, 'get_servers_1234')
|
|
||||||
get = self.fc.client.get_servers_1234
|
|
||||||
get().AndReturn((200, d))
|
|
||||||
|
|
||||||
self.m.ReplayAll()
|
|
||||||
|
|
||||||
instance.state_set(instance.SUSPEND, instance.COMPLETE)
|
|
||||||
|
|
||||||
scheduler.TaskRunner(instance.resume)()
|
|
||||||
self.assertEqual((instance.RESUME, instance.COMPLETE), instance.state)
|
|
||||||
|
|
||||||
self.m.VerifyAll()
|
|
||||||
|
|
||||||
def test_instance_suspend_volumes_wait(self):
|
|
||||||
return_server = self.fc.servers.list()[1]
|
|
||||||
instance = self._create_test_instance(return_server,
|
|
||||||
'in_suspend_vol')
|
|
||||||
|
|
||||||
instance.resource_id = '1234'
|
|
||||||
self.m.ReplayAll()
|
|
||||||
|
|
||||||
# Override the get_servers_1234 handler status to SUSPENDED, but keep
|
|
||||||
# it ACTIVE for the first two iterations of check_suspend_complete.
|
|
||||||
d1 = {'server': self.fc.client.get_servers_detail()[1]['servers'][0]}
|
|
||||||
d2 = copy.deepcopy(d1)
|
|
||||||
d1['server']['status'] = 'ACTIVE'
|
|
||||||
d2['server']['status'] = 'SUSPENDED'
|
|
||||||
|
|
||||||
# Return a dummy PollingTaskGroup to make check_suspend_complete step
|
|
||||||
def dummy_detach():
|
|
||||||
yield
|
|
||||||
dummy_tg = scheduler.PollingTaskGroup([dummy_detach, dummy_detach])
|
|
||||||
self.m.StubOutWithMock(instance, '_detach_volumes_task')
|
|
||||||
instance._detach_volumes_task().AndReturn(dummy_tg)
|
|
||||||
|
|
||||||
self.m.StubOutWithMock(self.fc.client, 'get_servers_1234')
|
|
||||||
get = self.fc.client.get_servers_1234
|
|
||||||
get().AndReturn((200, d1))
|
|
||||||
get().AndReturn((200, d1))
|
|
||||||
get().AndReturn((200, d2))
|
|
||||||
self.m.ReplayAll()
|
|
||||||
|
|
||||||
scheduler.TaskRunner(instance.suspend)()
|
|
||||||
self.assertEqual((instance.SUSPEND, instance.COMPLETE), instance.state)
|
|
||||||
|
|
||||||
self.m.VerifyAll()
|
|
||||||
|
|
||||||
def test_instance_status_build_spawning(self):
|
def test_instance_status_build_spawning(self):
|
||||||
self._test_instance_status_not_build_active('BUILD(SPAWNING)')
|
self._test_instance_status_not_build_active('BUILD(SPAWNING)')
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user