move volume code into datalayer and cleanup
This commit is contained in:
@@ -36,11 +36,9 @@ from nova import flags
|
||||
from nova import process
|
||||
from nova import service
|
||||
from nova import utils
|
||||
from nova import models
|
||||
from nova.compute import power_state
|
||||
from nova.network import service as network_service
|
||||
from nova.virt import connection as virt_connection
|
||||
from nova.volume import service as volume_service
|
||||
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
@@ -122,7 +120,7 @@ class ComputeService(service.Service):
|
||||
"""Reboot an instance on this server.
|
||||
|
||||
KVM doesn't support reboot, so we terminate and restart.
|
||||
|
||||
|
||||
"""
|
||||
self.update_state(instance_id, context)
|
||||
instance_ref = db.instance_get(context, instance_id)
|
||||
@@ -172,14 +170,14 @@ class ComputeService(service.Service):
|
||||
context=None):
|
||||
"""Attach a volume to an instance."""
|
||||
# TODO(termie): check that instance_id exists
|
||||
volume_ref = volume_get(context, volume_id)
|
||||
volume_ref = db.volume_get(context, volume_id)
|
||||
yield self._init_aoe()
|
||||
yield process.simple_execute(
|
||||
"sudo virsh attach-disk %s /dev/etherd/%s %s" %
|
||||
(instance_id,
|
||||
volume['aoe_device'],
|
||||
volume_ref['aoe_device'],
|
||||
mountpoint.rpartition('/dev/')[2]))
|
||||
volume_attached(context, volume_id)
|
||||
db.volume_attached(context, volume_id)
|
||||
defer.returnValue(True)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@@ -189,14 +187,15 @@ class ComputeService(service.Service):
|
||||
# despite the documentation, virsh detach-disk just wants the device
|
||||
# name without the leading /dev/
|
||||
# TODO(termie): check that instance_id exists
|
||||
volume_ref = volume_get(context, volume_id)
|
||||
target = volume['mountpoint'].rpartition('/dev/')[2]
|
||||
volume_ref = db.volume_get(context, volume_id)
|
||||
target = volume_ref['mountpoint'].rpartition('/dev/')[2]
|
||||
yield process.simple_execute(
|
||||
"sudo virsh detach-disk %s %s " % (instance_id, target))
|
||||
volume_detached(context, volume_id)
|
||||
db.volume_detached(context, volume_id)
|
||||
defer.returnValue(True)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _init_aoe(self):
|
||||
# TODO(vish): these shell calls should move into a different layer.
|
||||
yield process.simple_execute("sudo aoe-discover")
|
||||
yield process.simple_execute("sudo aoe-stat")
|
||||
|
||||
@@ -1,5 +1,21 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from nova import flags
|
||||
from nova import utils
|
||||
|
||||
@@ -23,18 +39,28 @@ def instance_get(context, instance_id):
|
||||
return _impl.instance_get(context, instance_id)
|
||||
|
||||
|
||||
def instance_update(context, instance_id, values):
|
||||
"""Set the given properties on an instance and update it.
|
||||
|
||||
Raises NotFound if instance does not exist.
|
||||
|
||||
"""
|
||||
return _impl.instance_update(context, instance_id, values)
|
||||
|
||||
|
||||
def instance_create(context, values):
|
||||
"""Create an instance from the values dictionary."""
|
||||
return _impl.instance_create(context, values)
|
||||
|
||||
|
||||
def instance_state(context, instance_id, state, description=None):
|
||||
"""Set the state of an instance."""
|
||||
return _impl.instance_state(context, instance_id, state, description)
|
||||
|
||||
|
||||
def instance_update(context, instance_id, new_values):
|
||||
"""Set the given properties on an instance and update it.
|
||||
|
||||
Raises if instance does not exist.
|
||||
|
||||
"""
|
||||
return _impl.instance_update(context, instance_id, new_values)
|
||||
def volume_destroy(context, volume_id):
|
||||
"""Destroy the volume or raise if it does not exist."""
|
||||
return _impl.volume_destroy(context, volume_id)
|
||||
|
||||
|
||||
def volume_get(context, volume_id):
|
||||
@@ -42,12 +68,59 @@ def volume_get(context, volume_id):
|
||||
return _impl.volume_get(context, volume_id)
|
||||
|
||||
|
||||
def volume_attached(context, volume_id):
|
||||
def volume_attached(context, volume_id, instance_id, mountpoint):
|
||||
"""Ensure that a volume is set as attached."""
|
||||
return _impl.volume_attached(context, volume_id)
|
||||
return _impl.volume_attached(context, volume_id, instance_id, mountpoint)
|
||||
|
||||
|
||||
def volume_detached(context, volume_id):
|
||||
"""Ensure that a volume is set as detached."""
|
||||
return _impl.volume_detached(context, volume_id)
|
||||
|
||||
|
||||
def volume_update(context, volume_id, values):
|
||||
"""Set the given properties on an volume and update it.
|
||||
|
||||
Raises NotFound if volume does not exist.
|
||||
|
||||
"""
|
||||
return _impl.volume_update(context, volume_id, values)
|
||||
|
||||
|
||||
def volume_create(context, values):
|
||||
"""Create a volume from the values dictionary."""
|
||||
return _impl.volume_create(context, values)
|
||||
|
||||
|
||||
def volume_allocate_shelf_and_blade(context, volume_id):
|
||||
"""Allocate a free shelf and blace from the pool."""
|
||||
return _impl.volume_allocate_shelf_and_blade(context, volume_id)
|
||||
|
||||
|
||||
def volume_get_shelf_and_blade(context, volume_id):
|
||||
"""Get the shelf and blade allocated to the volume."""
|
||||
return _impl.volume_get_shelf_and_blade(context, volume_id)
|
||||
|
||||
|
||||
def network_destroy(context, network_id):
|
||||
"""Destroy the network or raise if it does not exist."""
|
||||
return _impl.network_destroy(context, network_id)
|
||||
|
||||
|
||||
def network_get(context, network_id):
|
||||
"""Get an network or raise if it does not exist."""
|
||||
return _impl.network_get(context, network_id)
|
||||
|
||||
|
||||
def network_update(context, network_id, values):
|
||||
"""Set the given properties on an network and update it.
|
||||
|
||||
Raises NotFound if network does not exist.
|
||||
|
||||
"""
|
||||
return _impl.network_update(context, network_id, values)
|
||||
|
||||
|
||||
def network_create(context, values):
|
||||
"""Create a network from the values dictionary."""
|
||||
return _impl.network_create(context, values)
|
||||
|
||||
@@ -1,5 +1,22 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from nova import exception
|
||||
from nova import models
|
||||
|
||||
|
||||
@@ -12,24 +29,40 @@ def instance_get(context, instance_id):
|
||||
return models.Instance.find(instance_id)
|
||||
|
||||
|
||||
def instance_update(context, instance_id, values):
|
||||
instance_ref = instance_get(context, instance_id)
|
||||
for (key, value) in values.iteritems():
|
||||
instance_ref[key] = value
|
||||
instance_ref.save()
|
||||
|
||||
|
||||
def instance_create(context, values):
|
||||
instance_ref = models.Instance()
|
||||
for (key, value) in values.iteritems():
|
||||
instance_ref[key] = value
|
||||
instance_ref.save()
|
||||
return instance_ref.id
|
||||
|
||||
|
||||
def instance_state(context, instance_id, state, description=None):
|
||||
instance_ref = instance_get(context, instance_id)
|
||||
instance_ref.set_state(state, description)
|
||||
|
||||
|
||||
def instance_update(context, instance_id, properties):
|
||||
instance_ref = instance_get(context, instance_id)
|
||||
for k, v in properties.iteritems():
|
||||
instance_ref[k] = v
|
||||
instance_ref.save()
|
||||
def volume_destroy(context, volume_id):
|
||||
volume_ref = volume_get(context, volume_id)
|
||||
volume_ref.delete()
|
||||
|
||||
|
||||
def volume_get(context, volume_id):
|
||||
return models.Volume.find(volume_id)
|
||||
|
||||
|
||||
def volume_attached(context, volume_id):
|
||||
def volume_attached(context, volume_id, instance_id, mountpoint):
|
||||
volume_ref = volume_get(context, volume_id)
|
||||
volume_ref.instance_id = instance_id
|
||||
volume_ref['status'] = 'in-use'
|
||||
volume_ref['mountpoint'] = mountpoint
|
||||
volume_ref['attach_status'] = 'attached'
|
||||
volume_ref.save()
|
||||
|
||||
@@ -41,3 +74,72 @@ def volume_detached(context, volume_id):
|
||||
volume_ref['status'] = 'available'
|
||||
volume_ref['attach_status'] = 'detached'
|
||||
volume_ref.save()
|
||||
|
||||
|
||||
def volume_update(context, volume_id, values):
|
||||
volume_ref = volume_get(context, volume_id)
|
||||
for (key, value) in values.iteritems():
|
||||
volume_ref[key] = value
|
||||
volume_ref.save()
|
||||
|
||||
|
||||
def volume_create(context, values):
|
||||
volume_ref = models.Volume()
|
||||
for (key, value) in values.iteritems():
|
||||
volume_ref[key] = value
|
||||
volume_ref.save()
|
||||
return volume_ref.id
|
||||
|
||||
|
||||
class NoMoreBlades(exception.Error):
|
||||
pass
|
||||
|
||||
|
||||
# FIXME should we just do this in the constructor automatically
|
||||
# and return the shelf and blade id with volume data in
|
||||
# volume_get?
|
||||
def volume_allocate_shelf_and_blade(context, volume_id):
|
||||
session = models.NovaBase.get_session()
|
||||
query = session.query(models.ExportDevice).filter_by(volume=None)
|
||||
export_device = query.with_lockmode("update").first()
|
||||
# NOTE(vish): if with_lockmode isn't supported, as in sqlite,
|
||||
# then this has concurrency issues
|
||||
if not export_device:
|
||||
# FIXME where should this exception go?
|
||||
raise NoMoreBlades()
|
||||
export_device.volume_id = volume_id
|
||||
session.add(export_device)
|
||||
session.commit()
|
||||
return (export_device.shelf_id, export_device.blade_id)
|
||||
|
||||
|
||||
def volume_get_shelf_and_blade(context, volume_id):
|
||||
# FIXME: should probably do this in one call
|
||||
volume_ref = volume_get(context, volume_id)
|
||||
export_device = volume_ref.export_device
|
||||
if not export_device:
|
||||
raise exception.NotFound()
|
||||
return (export_device.shelf_id, export_device.blade_id)
|
||||
|
||||
def network_destroy(context, network_id):
|
||||
network_ref = network_get(context, network_id)
|
||||
network_ref.delete()
|
||||
|
||||
|
||||
def network_get(context, network_id):
|
||||
return models.Instance.find(network_id)
|
||||
|
||||
|
||||
def network_update(context, network_id, values):
|
||||
network_ref = network_get(context, network_id)
|
||||
for (key, value) in values.iteritems():
|
||||
network_ref[key] = value
|
||||
network_ref.save()
|
||||
|
||||
|
||||
def network_create(context, values):
|
||||
network_ref = models.Network()
|
||||
for (key, value) in values.iteritems():
|
||||
network_ref[key] = value
|
||||
network_ref.save()
|
||||
return network_ref.id
|
||||
|
||||
@@ -231,7 +231,6 @@ class Instance(Base, NovaBase):
|
||||
class Volume(Base, NovaBase):
|
||||
__tablename__ = 'volumes'
|
||||
id = Column(Integer, primary_key=True)
|
||||
volume_id = Column(String(255))
|
||||
|
||||
user_id = Column(String(255)) #, ForeignKey('users.id'), nullable=False)
|
||||
project_id = Column(String(255)) #, ForeignKey('projects.id'))
|
||||
|
||||
@@ -21,6 +21,7 @@ import logging
|
||||
from twisted.internet import defer
|
||||
|
||||
from nova import exception
|
||||
from nova import db
|
||||
from nova import flags
|
||||
from nova import models
|
||||
from nova import test
|
||||
@@ -89,7 +90,7 @@ class VolumeTestCase(test.TrialTestCase):
|
||||
self.assertFailure(self.volume.create_volume(vol_size,
|
||||
user_id,
|
||||
project_id),
|
||||
volume_service.NoMoreBlades)
|
||||
db.sqlalchemy.api.NoMoreBlades)
|
||||
for id in vols:
|
||||
yield self.volume.delete_volume(id)
|
||||
|
||||
@@ -102,23 +103,21 @@ class VolumeTestCase(test.TrialTestCase):
|
||||
project_id = 'fake'
|
||||
mountpoint = "/dev/sdf"
|
||||
volume_id = yield self.volume.create_volume(vol_size, user_id, project_id)
|
||||
vol = models.Volume.find(volume_id)
|
||||
self.volume.start_attach(volume_id, instance_id, mountpoint)
|
||||
if FLAGS.fake_tests:
|
||||
self.volume.finish_attach(volume_id)
|
||||
db.volume_attached(None, volume_id, instance_id, mountpoint)
|
||||
else:
|
||||
rv = yield self.compute.attach_volume(instance_id,
|
||||
volume_id,
|
||||
mountpoint)
|
||||
vol = db.volume_get(None, volume_id)
|
||||
self.assertEqual(vol.status, "in-use")
|
||||
self.assertEqual(vol.attach_status, "attached")
|
||||
self.assertEqual(vol.instance_id, instance_id)
|
||||
self.assertEqual(vol.mountpoint, mountpoint)
|
||||
|
||||
self.assertFailure(self.volume.delete_volume(volume_id), exception.Error)
|
||||
self.volume.start_detach(volume_id)
|
||||
if FLAGS.fake_tests:
|
||||
self.volume.finish_detach(volume_id)
|
||||
db.volume_detached(None, volume_id)
|
||||
else:
|
||||
rv = yield self.volume.detach_volume(instance_id,
|
||||
volume_id)
|
||||
|
||||
@@ -26,12 +26,11 @@ import logging
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from nova import db
|
||||
from nova import exception
|
||||
from nova import flags
|
||||
from nova import models
|
||||
from nova import process
|
||||
from nova import service
|
||||
from nova import utils
|
||||
from nova import validate
|
||||
|
||||
|
||||
@@ -55,10 +54,6 @@ flags.DEFINE_boolean('fake_storage', False,
|
||||
'Should we make real storage volumes to attach?')
|
||||
|
||||
|
||||
class NoMoreBlades(exception.Error):
|
||||
pass
|
||||
|
||||
|
||||
class VolumeService(service.Service):
|
||||
"""
|
||||
There is one VolumeNode running on each host.
|
||||
@@ -71,7 +66,7 @@ class VolumeService(service.Service):
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@validate.rangetest(size=(0, 1000))
|
||||
def create_volume(self, size, user_id, project_id):
|
||||
def create_volume(self, size, user_id, project_id, context=None):
|
||||
"""
|
||||
Creates an exported volume (fake or real),
|
||||
restarts exports to make it available.
|
||||
@@ -79,108 +74,88 @@ class VolumeService(service.Service):
|
||||
"""
|
||||
logging.debug("Creating volume of size: %s" % (size))
|
||||
|
||||
vol = models.Volume()
|
||||
vol.volume_id = utils.generate_uid('vol')
|
||||
vol.node_name = FLAGS.node_name
|
||||
vol.size = size
|
||||
vol.user_id = user_id
|
||||
vol.project_id = project_id
|
||||
vol.availability_zone = FLAGS.storage_availability_zone
|
||||
vol.status = "creating" # creating | available | in-use
|
||||
vol.attach_status = "detached" # attaching | attached | detaching | detached
|
||||
vol.save()
|
||||
yield self._exec_create_volume(vol)
|
||||
yield self._setup_export(vol)
|
||||
vol = {}
|
||||
vol['node_name'] = FLAGS.node_name
|
||||
vol['size'] = size
|
||||
vol['user_id'] = user_id
|
||||
vol['project_id'] = project_id
|
||||
vol['availability_zone'] = FLAGS.storage_availability_zone
|
||||
vol['status'] = "creating" # creating | available | in-use
|
||||
# attaching | attached | detaching | detached
|
||||
vol['attach_status'] = "detached"
|
||||
volume_id = db.volume_create(context, vol)
|
||||
yield self._exec_create_volume(volume_id, size)
|
||||
(shelf_id, blade_id) = db.volume_allocate_shelf_and_blade(context,
|
||||
volume_id)
|
||||
yield self._exec_create_export(volume_id, shelf_id, blade_id)
|
||||
# TODO(joshua): We need to trigger a fanout message
|
||||
# for aoe-discover on all the nodes
|
||||
vol.status = "available"
|
||||
vol.save()
|
||||
logging.debug("restarting exports")
|
||||
yield self._exec_ensure_exports()
|
||||
defer.returnValue(vol.id)
|
||||
db.volume_update(context, volume_id, {'status': 'available'})
|
||||
logging.debug("restarting exports")
|
||||
defer.returnValue(volume_id)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def delete_volume(self, volume_id):
|
||||
def delete_volume(self, volume_id, context=None):
|
||||
logging.debug("Deleting volume with id of: %s" % (volume_id))
|
||||
vol = models.Volume.find(volume_id)
|
||||
if vol.attach_status == "attached":
|
||||
volume_ref = db.volume_get(context, volume_id)
|
||||
if volume_ref['attach_status'] == "attached":
|
||||
raise exception.Error("Volume is still attached")
|
||||
if vol.node_name != FLAGS.node_name:
|
||||
if volume_ref['node_name'] != FLAGS.node_name:
|
||||
raise exception.Error("Volume is not local to this node")
|
||||
yield self._exec_delete_volume(vol)
|
||||
yield vol.delete()
|
||||
shelf_id, blade_id = db.volume_get_shelf_and_blade(context,
|
||||
volume_id)
|
||||
yield self._exec_remove_export(volume_id, shelf_id, blade_id)
|
||||
yield self._exec_delete_volume(volume_id)
|
||||
db.volume_destroy(context, volume_id)
|
||||
defer.returnValue(True)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _exec_create_volume(self, vol):
|
||||
def _exec_create_volume(self, volume_id, size):
|
||||
if FLAGS.fake_storage:
|
||||
defer.returnValue(None)
|
||||
if str(vol.size) == '0':
|
||||
if int(size) == 0:
|
||||
sizestr = '100M'
|
||||
else:
|
||||
sizestr = '%sG' % vol.size
|
||||
sizestr = '%sG' % size
|
||||
yield process.simple_execute(
|
||||
"sudo lvcreate -L %s -n %s %s" % (sizestr,
|
||||
vol.volume_id,
|
||||
volume_id,
|
||||
FLAGS.volume_group),
|
||||
error_ok=1)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _exec_delete_volume(self, vol):
|
||||
def _exec_delete_volume(self, volume_id):
|
||||
if FLAGS.fake_storage:
|
||||
defer.returnValue(None)
|
||||
yield process.simple_execute(
|
||||
"sudo lvremove -f %s/%s" % (FLAGS.volume_group,
|
||||
vol.volume_id), error_ok=1)
|
||||
volume_id), error_ok=1)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _setup_export(self, vol):
|
||||
# FIXME: abstract this. also remove vol.export_device.xxx cheat
|
||||
session = models.NovaBase.get_session()
|
||||
query = session.query(models.ExportDevice).filter_by(volume=None)
|
||||
export_device = query.with_lockmode("update").first()
|
||||
# NOTE(vish): if with_lockmode isn't supported, as in sqlite,
|
||||
# then this has concurrency issues
|
||||
if not export_device:
|
||||
raise NoMoreBlades()
|
||||
export_device.volume_id = vol.id
|
||||
session.add(export_device)
|
||||
session.commit()
|
||||
# FIXME: aoe_device is redundant, should be turned into a method
|
||||
vol.aoe_device = "e%s.%s" % (export_device.shelf_id,
|
||||
export_device.blade_id)
|
||||
vol.save()
|
||||
yield self._exec_setup_export(vol)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _exec_setup_export(self, vol):
|
||||
def _exec_create_export(self, volume_id, shelf_id, blade_id):
|
||||
if FLAGS.fake_storage:
|
||||
defer.returnValue(None)
|
||||
yield process.simple_execute(
|
||||
"sudo vblade-persist setup %s %s %s /dev/%s/%s" %
|
||||
(self, vol.export_device.shelf_id,
|
||||
vol.export_device.blade_id,
|
||||
(self,
|
||||
shelf_id,
|
||||
blade_id,
|
||||
FLAGS.aoe_eth_dev,
|
||||
FLAGS.volume_group,
|
||||
vol.volume_id), error_ok=1)
|
||||
volume_id), error_ok=1)
|
||||
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _remove_export(self, vol):
|
||||
if not vol.export_device:
|
||||
defer.returnValue(False)
|
||||
yield self._exec_remove_export(vol)
|
||||
defer.returnValue(True)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _exec_remove_export(self, vol):
|
||||
def _exec_remove_export(self, _volume_id, shelf_id, blade_id):
|
||||
if FLAGS.fake_storage:
|
||||
defer.returnValue(None)
|
||||
yield process.simple_execute(
|
||||
"sudo vblade-persist stop %s %s" % (self, vol.export_device.shelf_id,
|
||||
vol.export_device.blade_id), error_ok=1)
|
||||
"sudo vblade-persist stop %s %s" % (self, shelf_id,
|
||||
blade_id), error_ok=1)
|
||||
yield process.simple_execute(
|
||||
"sudo vblade-persist destroy %s %s" % (self, vol.export_device.shelf_id,
|
||||
vol.export_device.blade_id), error_ok=1)
|
||||
"sudo vblade-persist destroy %s %s" % (self, shelf_id,
|
||||
blade_id), error_ok=1)
|
||||
@defer.inlineCallbacks
|
||||
def _exec_ensure_exports(self):
|
||||
if FLAGS.fake_storage:
|
||||
@@ -198,30 +173,3 @@ class VolumeService(service.Service):
|
||||
yield process.simple_execute(
|
||||
"sudo vgcreate %s %s" % (FLAGS.volume_group,
|
||||
FLAGS.storage_dev))
|
||||
|
||||
def start_attach(self, volume_id, instance_id, mountpoint):
|
||||
vol = models.Volume.find(volume_id)
|
||||
vol.instance_id = instance_id
|
||||
vol.mountpoint = mountpoint
|
||||
vol.status = "in-use"
|
||||
vol.attach_status = "attaching"
|
||||
vol.attach_time = utils.isotime()
|
||||
vol.save()
|
||||
|
||||
def finish_attach(self, volume_id):
|
||||
vol = models.Volume.find(volume_id)
|
||||
vol.attach_status = "attached"
|
||||
vol.save()
|
||||
|
||||
def start_detach(self, volume_id):
|
||||
vol = models.Volume.find(volume_id)
|
||||
vol.attach_status = "detaching"
|
||||
vol.save()
|
||||
|
||||
def finish_detach(self, volume_id):
|
||||
vol = models.Volume.find(volume_id)
|
||||
vol.instance_id = None
|
||||
vol.mountpoint = None
|
||||
vol.status = "available"
|
||||
vol.attach_status = "detached"
|
||||
vol.save()
|
||||
|
||||
Reference in New Issue
Block a user