Remove unused code from volumes module

Direct engine was removed. This code is unused now.
blueprint: remove-direct-engine

Change-Id: I8b8060185f88c959341479f34db335a978102aa2
This commit is contained in:
Vitaly Gridnev 2015-11-20 13:02:19 +03:00
parent 9c713e551a
commit b94ba1f3da
3 changed files with 0 additions and 190 deletions

View File

@ -21,7 +21,6 @@ from oslo_log import log as logging
from sahara import conductor as c
from sahara import context
from sahara import exceptions as ex
from sahara.i18n import _
from sahara.i18n import _LE
from sahara.i18n import _LW
@ -37,8 +36,6 @@ conductor = c.API
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.import_opt('api_version', 'sahara.utils.openstack.cinder',
group='cinder')
def _get_timeout_for_disk_preparing(cluster):
@ -103,110 +100,6 @@ def _can_use_xfs(instances):
return True
def _count_instances_to_attach(instances):
return len([inst for inst in instances if
inst.node_group.volumes_per_node > 0])
def _count_volumes_to_mount(instances):
return sum([inst.node_group.volumes_per_node for inst in instances])
def attach_to_instances(instances):
instances_to_attach = _count_instances_to_attach(instances)
if instances_to_attach == 0:
mount_to_instances(instances)
return
cpo.add_provisioning_step(
instances[0].cluster_id, _("Attach volumes to instances"),
instances_to_attach)
with context.ThreadGroup() as tg:
for instance in instances:
if instance.node_group.volumes_per_node > 0:
with context.set_current_instance_id(instance.instance_id):
tg.spawn(
'attach-volumes-for-instance-%s'
% instance.instance_name, _attach_volumes_to_node,
instance.node_group, instance)
mount_to_instances(instances)
@poll_utils.poll_status(
'await_attach_volumes', _("Await for attaching volumes to instances"),
sleep=2)
def _await_attach_volumes(instance, devices):
return _count_attached_devices(instance, devices) == len(devices)
@cpo.event_wrapper(mark_successful_on_exit=True)
def _attach_volumes_to_node(node_group, instance):
ctx = context.ctx()
size = node_group.volumes_size
volume_type = node_group.volume_type
devices = []
for idx in range(1, node_group.volumes_per_node + 1):
display_name = "volume_" + instance.instance_name + "_" + str(idx)
device = _create_attach_volume(
ctx, instance, size, volume_type,
node_group.volume_local_to_instance, display_name,
node_group.volumes_availability_zone)
devices.append(device)
LOG.debug("Attached volume {device} to instance".format(device=device))
_await_attach_volumes(instance, devices)
@poll_utils.poll_status(
'volume_available_timeout', _("Await for volume become available"),
sleep=1)
def _await_available(volume):
volume = cinder.get_volume(volume.id)
if volume.status == 'error':
raise ex.SystemError(_("Volume %s has error status") % volume.id)
return volume.status == 'available'
def _create_attach_volume(ctx, instance, size, volume_type,
volume_local_to_instance, name=None,
availability_zone=None):
if CONF.cinder.api_version == 1:
kwargs = {'size': size, 'display_name': name}
else:
kwargs = {'size': size, 'name': name}
kwargs['volume_type'] = volume_type
if availability_zone is not None:
kwargs['availability_zone'] = availability_zone
if volume_local_to_instance:
kwargs['scheduler_hints'] = {'local_to_instance': instance.instance_id}
volume = b.execute_with_retries(cinder.client().volumes.create, **kwargs)
conductor.append_volume(ctx, instance, volume.id)
_await_available(volume)
resp = b.execute_with_retries(nova.client().volumes.create_server_volume,
instance.instance_id, volume.id, None)
return resp.device
def _count_attached_devices(instance, devices):
code, part_info = instance.remote().execute_command('cat /proc/partitions')
count = 0
for line in part_info.split('\n')[1:]:
tokens = line.split()
if len(tokens) > 3:
dev = '/dev/' + tokens[3]
if dev in devices:
count += 1
return count
def mount_to_instances(instances):
if len(instances) == 0:
return

View File

@ -17,11 +17,9 @@ from cinderclient.v1 import volumes as vol_v1
from cinderclient.v2 import volumes as vol_v2
import mock
from sahara.conductor import resource as r
from sahara import exceptions as ex
from sahara.service import volumes
from sahara.tests.unit import base
from sahara.utils import cluster as cluster_utils
class TestAttachVolume(base.SaharaWithDbTestCase):
@ -79,76 +77,6 @@ class TestAttachVolume(base.SaharaWithDbTestCase):
self.assertIsNone(
volumes.detach_from_instance(instance))
@base.mock_thread_group
@mock.patch('sahara.service.volumes.mount_to_instances')
@mock.patch('sahara.service.volumes._await_attach_volumes')
@mock.patch('sahara.service.volumes._create_attach_volume')
@mock.patch('sahara.utils.cluster_progress_ops.add_successful_event')
@mock.patch('sahara.utils.cluster_progress_ops.add_provisioning_step')
def test_attach(self, add_step, add_event, p_create_attach_vol, p_await,
p_mount):
p_create_attach_vol.side_effect = ['/dev/vdb', '/dev/vdc'] * 2
p_await.return_value = None
p_mount.return_value = None
add_event.return_value = None
add_step.return_value = None
instance1 = {'id': '1',
'instance_id': '123',
'instance_name': 'inst_1'}
instance2 = {'id': '2',
'instance_id': '456',
'instance_name': 'inst_2'}
ng = {'volumes_per_node': 2,
'volumes_size': 2,
'volumes_availability_zone': None,
'volume_mount_prefix': '/mnt/vols',
'volume_type': None,
'name': 'master',
'cluster_id': '11',
'instances': [instance1, instance2],
'volume_local_to_instance': False}
cluster = r.ClusterResource({'node_groups': [ng]})
volumes.attach_to_instances(cluster_utils.get_instances(cluster))
self.assertEqual(4, p_create_attach_vol.call_count)
self.assertEqual(2, p_await.call_count)
self.assertEqual(1, p_mount.call_count)
@mock.patch('sahara.utils.poll_utils._get_consumed', return_value=0)
@mock.patch('sahara.context.sleep')
@mock.patch('sahara.service.volumes._count_attached_devices')
def test_await_attach_volume(self, dev_count, p_sleep, p_get_cons):
self.override_config('await_attach_volumes', 0, group='timeouts')
dev_count.return_value = 2
p_sleep.return_value = None
instance = r.InstanceResource({'instance_id': '123454321',
'instance_name': 'instt'})
self.assertIsNone(volumes._await_attach_volumes(
instance, ['/dev/vda', '/dev/vdb']))
self.assertRaises(ex.TimeoutException, volumes._await_attach_volumes,
instance, ['/dev/vda', '/dev/vdb', '/dev/vdc'])
def test_count_attached_devices(self):
partitions = """major minor #blocks name
7 0 41943040 vdd
7 1 102400000 vdc
7 1 222222222 vdc1
8 0 976762584 vda
8 0 111111111 vda1
8 1 842576896 vdb"""
instance = self._get_instance()
ex_cmd = instance.remote().execute_command
ex_cmd.side_effect = [(0, partitions)]
self.assertEqual(1, volumes._count_attached_devices(
instance, ['/dev/vdd', '/dev/vdx']))
def _get_instance(self):
inst_remote = mock.MagicMock()
inst_remote.execute_command.return_value = 0

View File

@ -42,22 +42,11 @@ timeouts_opts = [
cfg.IntOpt('delete_instances_timeout',
default=DEFAULT_TIMEOUT,
help="Wait for instances to be deleted, in seconds"),
cfg.IntOpt('await_for_instances_active',
default=DEFAULT_TIMEOUT,
help="Wait for instances to become active, in seconds"),
# volumes opts
cfg.IntOpt(
'detach_volume_timeout', default=300,
help='Timeout for detaching volumes from instance, in seconds'),
cfg.IntOpt('volume_available_timeout',
default=DEFAULT_TIMEOUT,
help="Wait for volumes to become available, in seconds"),
cfg.IntOpt('await_attach_volumes',
default=10,
help="Wait for attaching volumes to instances, in seconds")
]
timeouts = cfg.OptGroup(name='timeouts',