Merge "Implement stack restore"
This commit is contained in:
commit
d5bcfa9b84
|
@ -1128,6 +1128,15 @@ class Server(stack_user.StackUser):
|
|||
except Exception as e:
|
||||
self.client_plugin().ignore_not_found(e)
|
||||
|
||||
def handle_restore(self, defn, restore_data):
|
||||
image_id = restore_data['resource_data']['snapshot_image_id']
|
||||
props = dict(
|
||||
(key, value) for (key, value) in
|
||||
defn.properties(self.properties_schema).iteritems()
|
||||
if value is not None)
|
||||
props[self.IMAGE] = image_id
|
||||
return defn.freeze(properties=props)
|
||||
|
||||
|
||||
class FlavorConstraint(constraints.BaseCustomConstraint):
|
||||
|
||||
|
|
|
@ -751,6 +751,17 @@ class CinderVolume(Volume):
|
|||
message=_('Scheduler hints are not supported by the current '
|
||||
'volume API.'))
|
||||
|
||||
def handle_restore(self, defn, restore_data):
|
||||
backup_id = restore_data['resource_data']['backup_id']
|
||||
ignore_props = (
|
||||
self.IMAGE_REF, self.IMAGE, self.SOURCE_VOLID, self.SIZE)
|
||||
props = dict(
|
||||
(key, value) for (key, value) in
|
||||
defn.properties(self.properties_schema).iteritems()
|
||||
if key not in ignore_props and value is not None)
|
||||
props[self.BACKUP_ID] = backup_id
|
||||
return defn.freeze(properties=props)
|
||||
|
||||
|
||||
class CinderVolumeAttachment(VolumeAttachment):
|
||||
|
||||
|
|
|
@ -60,11 +60,11 @@ class ForcedCancel(BaseException):
|
|||
class Stack(collections.Mapping):
|
||||
|
||||
ACTIONS = (
|
||||
CREATE, DELETE, UPDATE, ROLLBACK, SUSPEND,
|
||||
RESUME, ADOPT, SNAPSHOT, CHECK,
|
||||
CREATE, DELETE, UPDATE, ROLLBACK, SUSPEND, RESUME, ADOPT,
|
||||
SNAPSHOT, CHECK, RESTORE
|
||||
) = (
|
||||
'CREATE', 'DELETE', 'UPDATE', 'ROLLBACK', 'SUSPEND',
|
||||
'RESUME', 'ADOPT', 'SNAPSHOT', 'CHECK',
|
||||
'CREATE', 'DELETE', 'UPDATE', 'ROLLBACK', 'SUSPEND', 'RESUME', 'ADOPT',
|
||||
'SNAPSHOT', 'CHECK', 'RESTORE'
|
||||
)
|
||||
|
||||
STATUSES = (IN_PROGRESS, FAILED, COMPLETE
|
||||
|
@ -706,7 +706,7 @@ class Stack(collections.Mapping):
|
|||
|
||||
@scheduler.wrappertask
|
||||
def update_task(self, newstack, action=UPDATE, event=None):
|
||||
if action not in (self.UPDATE, self.ROLLBACK):
|
||||
if action not in (self.UPDATE, self.ROLLBACK, self.RESTORE):
|
||||
LOG.error(_LE("Unexpected action %s passed to update!"), action)
|
||||
self.state_set(self.UPDATE, self.FAILED,
|
||||
"Invalid action %s" % action)
|
||||
|
@ -764,6 +764,8 @@ class Stack(collections.Mapping):
|
|||
|
||||
if action == self.UPDATE:
|
||||
reason = 'Stack successfully updated'
|
||||
elif action == self.RESTORE:
|
||||
reason = 'Stack successfully restored'
|
||||
else:
|
||||
reason = 'Stack rollback completed'
|
||||
stack_status = self.COMPLETE
|
||||
|
@ -1040,6 +1042,32 @@ class Stack(collections.Mapping):
|
|||
data = snapshot.data['resources'].get(name)
|
||||
scheduler.TaskRunner(rsrc.delete_snapshot, data)()
|
||||
|
||||
@profiler.trace('Stack.restore', hide_args=False)
|
||||
def restore(self, snapshot):
|
||||
'''
|
||||
Restore the given snapshot, invoking handle_restore on all resources.
|
||||
'''
|
||||
self.updated_time = datetime.utcnow()
|
||||
|
||||
tmpl = Template(snapshot.data['template'])
|
||||
|
||||
for name, defn in tmpl.resource_definitions(self).iteritems():
|
||||
rsrc = resource.Resource(name, defn, self)
|
||||
data = snapshot.data['resources'].get(name)
|
||||
handle_restore = getattr(rsrc, 'handle_restore', None)
|
||||
if callable(handle_restore):
|
||||
defn = handle_restore(defn, data)
|
||||
tmpl.add_resource(defn, name)
|
||||
|
||||
newstack = self.__class__(self.context, self.name, tmpl, self.env,
|
||||
timeout_mins=self.timeout_mins,
|
||||
disable_rollback=self.disable_rollback)
|
||||
newstack.parameters.set_stack_id(self.identifier())
|
||||
|
||||
updater = scheduler.TaskRunner(self.update_task, newstack,
|
||||
action=self.RESTORE)
|
||||
updater()
|
||||
|
||||
@profiler.trace('Stack.output', hide_args=False)
|
||||
def output(self, key):
|
||||
'''
|
||||
|
|
|
@ -4282,3 +4282,63 @@ class StackTest(common.HeatTestCase):
|
|||
self.assertEqual('Output validation error: The Referenced Attribute '
|
||||
'(AResource Bar) is incorrect.',
|
||||
six.text_type(ex))
|
||||
|
||||
def test_restore(self):
|
||||
tmpl = {'HeatTemplateFormatVersion': '2012-12-12',
|
||||
'Resources': {
|
||||
'A': {'Type': 'GenericResourceType'},
|
||||
'B': {'Type': 'GenericResourceType'}}}
|
||||
self.stack = parser.Stack(self.ctx, 'stack_details_test',
|
||||
parser.Template(tmpl))
|
||||
self.stack.store()
|
||||
self.stack.create()
|
||||
|
||||
data = copy.deepcopy(self.stack.prepare_abandon())
|
||||
fake_snapshot = collections.namedtuple('Snapshot', ('data',))(data)
|
||||
|
||||
new_tmpl = {'HeatTemplateFormatVersion': '2012-12-12',
|
||||
'Resources': {'A': {'Type': 'GenericResourceType'}}}
|
||||
updated_stack = parser.Stack(self.ctx, 'updated_stack',
|
||||
template.Template(new_tmpl))
|
||||
self.stack.update(updated_stack)
|
||||
self.assertEqual(1, len(self.stack.resources))
|
||||
|
||||
self.stack.restore(fake_snapshot)
|
||||
|
||||
self.assertEqual((parser.Stack.RESTORE, parser.Stack.COMPLETE),
|
||||
self.stack.state)
|
||||
self.assertEqual(2, len(self.stack.resources))
|
||||
|
||||
def test_hot_restore(self):
|
||||
|
||||
class ResourceWithRestore(generic_rsrc.ResWithComplexPropsAndAttrs):
|
||||
|
||||
def handle_restore(self, defn, data):
|
||||
props = dict(
|
||||
(key, value) for (key, value) in
|
||||
defn.properties(self.properties_schema).iteritems()
|
||||
if value is not None)
|
||||
value = data['resource_data']['a_string']
|
||||
props['a_string'] = value
|
||||
return defn.freeze(properties=props)
|
||||
|
||||
resource._register_class('ResourceWithRestore', ResourceWithRestore)
|
||||
tpl = {'heat_template_version': '2013-05-23',
|
||||
'resources':
|
||||
{'A': {'type': 'ResourceWithRestore'}}}
|
||||
self.stack = parser.Stack(self.ctx, 'stack_details_test',
|
||||
parser.Template(tpl))
|
||||
self.stack.store()
|
||||
self.stack.create()
|
||||
|
||||
data = self.stack.prepare_abandon()
|
||||
data['resources']['A']['resource_data']['a_string'] = 'foo'
|
||||
fake_snapshot = collections.namedtuple('Snapshot', ('data',))(data)
|
||||
|
||||
self.stack.restore(fake_snapshot)
|
||||
|
||||
self.assertEqual((parser.Stack.RESTORE, parser.Stack.COMPLETE),
|
||||
self.stack.state)
|
||||
|
||||
self.assertEqual(
|
||||
'foo', self.stack.resources['A'].properties['a_string'])
|
||||
|
|
|
@ -2699,6 +2699,66 @@ class ServersTest(common.HeatTestCase):
|
|||
# this call is Act stage of this test. We calling server.validate()
|
||||
# to verify that no excessive calls to Nova are made during validation.
|
||||
self.assertIsNone(server.validate())
|
||||
|
||||
self.m.VerifyAll()
|
||||
|
||||
def test_server_restore(self):
|
||||
t = template_format.parse(wp_template)
|
||||
template = parser.Template(t)
|
||||
stack = parser.Stack(utils.dummy_context(), "server_restore", template)
|
||||
stack.store()
|
||||
|
||||
self.m.StubOutWithMock(nova.NovaClientPlugin, '_create')
|
||||
nova.NovaClientPlugin._create().MultipleTimes().AndReturn(self.fc)
|
||||
|
||||
return_server = self.fc.servers.list()[1]
|
||||
return_server.id = 1234
|
||||
|
||||
self.m.StubOutWithMock(self.fc.servers, 'create')
|
||||
self.fc.servers.create(
|
||||
image=744, flavor=3, key_name='test',
|
||||
name=utils.PhysName("server_restore", "WebServer"),
|
||||
security_groups=[],
|
||||
userdata=mox.IgnoreArg(), scheduler_hints=None,
|
||||
meta=None, nics=None, availability_zone=None,
|
||||
block_device_mapping=None, config_drive=None,
|
||||
disk_config=None, reservation_id=None, files={},
|
||||
admin_pass=None).AndReturn(return_server)
|
||||
self.fc.servers.create(
|
||||
image=1, flavor=3, key_name='test',
|
||||
name=utils.PhysName("server_restore", "WebServer"),
|
||||
security_groups=[],
|
||||
userdata=mox.IgnoreArg(), scheduler_hints=None,
|
||||
meta=None, nics=None, availability_zone=None,
|
||||
block_device_mapping=None, config_drive=None,
|
||||
disk_config=None, reservation_id=None, files={},
|
||||
admin_pass=None).AndReturn(return_server)
|
||||
|
||||
self.m.StubOutWithMock(glance.GlanceClientPlugin, 'get_image_id')
|
||||
glance.GlanceClientPlugin.get_image_id(
|
||||
'F17-x86_64-gold').MultipleTimes().AndReturn(744)
|
||||
glance.GlanceClientPlugin.get_image_id(
|
||||
'CentOS 5.2').MultipleTimes().AndReturn(1)
|
||||
|
||||
self.m.ReplayAll()
|
||||
|
||||
scheduler.TaskRunner(stack.create)()
|
||||
|
||||
self.assertEqual((stack.CREATE, stack.COMPLETE), stack.state)
|
||||
|
||||
scheduler.TaskRunner(stack.snapshot)()
|
||||
|
||||
self.assertEqual((stack.SNAPSHOT, stack.COMPLETE), stack.state)
|
||||
|
||||
data = stack.prepare_abandon()
|
||||
resource_data = data['resources']['WebServer']['resource_data']
|
||||
resource_data['snapshot_image_id'] = 'CentOS 5.2'
|
||||
fake_snapshot = collections.namedtuple('Snapshot', ('data',))(data)
|
||||
|
||||
stack.restore(fake_snapshot)
|
||||
|
||||
self.assertEqual((stack.RESTORE, stack.COMPLETE), stack.state)
|
||||
|
||||
self.m.VerifyAll()
|
||||
|
||||
|
||||
|
|
|
@ -11,6 +11,7 @@
|
|||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import collections
|
||||
import copy
|
||||
import json
|
||||
|
||||
|
@ -104,6 +105,18 @@ resources:
|
|||
mountpoint: /dev/vdc
|
||||
'''
|
||||
|
||||
single_cinder_volume_template = '''
|
||||
heat_template_version: 2013-05-23
|
||||
description: Cinder volume
|
||||
resources:
|
||||
volume:
|
||||
type: OS::Cinder::Volume
|
||||
properties:
|
||||
size: 1
|
||||
name: test_name
|
||||
description: test_description
|
||||
'''
|
||||
|
||||
|
||||
class BaseVolumeTest(common.HeatTestCase):
|
||||
def setUp(self):
|
||||
|
@ -1426,6 +1439,45 @@ class CinderVolumeTest(BaseVolumeTest):
|
|||
self.create_volume, self.t, stack, 'volume3')
|
||||
self.assertIn('Scheduler hints are not supported by the current '
|
||||
'volume API.', six.text_type(ex))
|
||||
self.m.VerifyAll()
|
||||
|
||||
def test_volume_restore(self):
|
||||
stack_name = 'test_restore_stack'
|
||||
t = template_format.parse(single_cinder_volume_template)
|
||||
stack = utils.parse_stack(t, stack_name=stack_name)
|
||||
|
||||
fv = FakeVolume('creating', 'available')
|
||||
fb = FakeBackup('creating', 'available')
|
||||
fvbr = FakeBackupRestore('vol-123')
|
||||
|
||||
cinder.CinderClientPlugin._create().MultipleTimes().AndReturn(
|
||||
self.cinder_fc)
|
||||
self.cinder_fc.volumes.create(
|
||||
size=1, availability_zone=None, description='test_description',
|
||||
name='test_name'
|
||||
).AndReturn(fv)
|
||||
self.m.StubOutWithMock(self.cinder_fc.backups, 'create')
|
||||
self.cinder_fc.backups.create('vol-123').AndReturn(fb)
|
||||
self.m.StubOutWithMock(self.cinder_fc.restores, 'restore')
|
||||
self.cinder_fc.restores.restore('backup-123').AndReturn(fvbr)
|
||||
self.cinder_fc.volumes.get('vol-123').AndReturn(fv)
|
||||
|
||||
self.m.ReplayAll()
|
||||
|
||||
scheduler.TaskRunner(stack.create)()
|
||||
|
||||
self.assertEqual((stack.CREATE, stack.COMPLETE), stack.state)
|
||||
|
||||
scheduler.TaskRunner(stack.snapshot)()
|
||||
|
||||
self.assertEqual((stack.SNAPSHOT, stack.COMPLETE), stack.state)
|
||||
|
||||
data = stack.prepare_abandon()
|
||||
fake_snapshot = collections.namedtuple('Snapshot', ('data',))(data)
|
||||
|
||||
stack.restore(fake_snapshot)
|
||||
|
||||
self.assertEqual((stack.RESTORE, stack.COMPLETE), stack.state)
|
||||
|
||||
self.m.VerifyAll()
|
||||
|
||||
|
|
Loading…
Reference in New Issue