From bfca4f5989ee9b7c706118d3ee921767b5c01350 Mon Sep 17 00:00:00 2001 From: Vitaly Gridnev Date: Tue, 22 Sep 2015 17:43:31 +0300 Subject: [PATCH] Use xfs for formatting Xfsprogs will be preinstalled on all images, but for fake plugin we will try to install that. Change-Id: Ia02ed523a77118a882b4a49d07b25bdddc1f31dc Closes-bug: 1493548 --- sahara/plugins/provisioning.py | 8 ++- sahara/service/volumes.py | 83 ++++++++++++++++++++--- sahara/tests/unit/service/test_volumes.py | 5 +- 3 files changed, 82 insertions(+), 14 deletions(-) diff --git a/sahara/plugins/provisioning.py b/sahara/plugins/provisioning.py index 0bae5207..449fc942 100644 --- a/sahara/plugins/provisioning.py +++ b/sahara/plugins/provisioning.py @@ -213,6 +213,12 @@ class ValidationError(object): # COMMON FOR ALL PLUGINS CONFIGS +XFS_ENABLED = Config( + "Enable XFS", 'general', 'cluster', priority=1, + default_value=True, config_type="bool", is_optional=True, + description='Enables XFS for formatting' +) + DISKS_PREPARING_TIMEOUT = Config( "Timeout for disk preparing", 'general', 'cluster', priority=1, default_value=300, config_type="int", is_optional=True, @@ -242,4 +248,4 @@ HEAT_WAIT_CONDITION_TIMEOUT = Config( def list_of_common_configs(): return [DISKS_PREPARING_TIMEOUT, NTP_ENABLED, NTP_URL, - HEAT_WAIT_CONDITION_TIMEOUT] + HEAT_WAIT_CONDITION_TIMEOUT, XFS_ENABLED] diff --git a/sahara/service/volumes.py b/sahara/service/volumes.py index 90049ac8..399f9ba9 100644 --- a/sahara/service/volumes.py +++ b/sahara/service/volumes.py @@ -51,6 +51,58 @@ def _get_timeout_for_disk_preparing(cluster): return int(plugin_base.DISKS_PREPARING_TIMEOUT.default_value) +def _is_xfs_enabled(cluster): + configs = cluster.cluster_configs.to_dict() + option_name = plugin_base.XFS_ENABLED.name + option_target = plugin_base.XFS_ENABLED.applicable_target + try: + return bool(configs[option_target][option_name]) + except Exception: + return bool(plugin_base.XFS_ENABLED.default_value) + + +def _get_os_distrib(remote): + return remote.execute_command('lsb_release -is')[1].strip().lower() + + +def _check_installed_xfs(instance): + redhat = "rpm -q xfsprogs || yum install -y xfsprogs" + debian = "dpkg -s xfsprogs || apt-get -y install xfsprogs" + + cmd_map = { + "centos": redhat, + "fedora": redhat, + "redhatenterpriseserver": redhat, + "ubuntu": debian, + 'debian': debian + } + + with instance.remote() as r: + distro = _get_os_distrib(r) + if not cmd_map.get(distro): + LOG.warning( + _LW("Cannot verify installation of XFS tools for " + "unknown distro {distro}.").format(distro=distro)) + return False + try: + r.execute_command(cmd_map.get(distro), run_as_root=True) + return True + except Exception as e: + LOG.warning( + _LW("Cannot install xfsprogs: {reason}").format(reason=e)) + return False + + +def _can_use_xfs(instances): + cluster = instances[0].cluster + if not _is_xfs_enabled(cluster): + return False + for instance in instances: + if not _check_installed_xfs(instance): + return False + return True + + def _count_instances_to_attach(instances): result = 0 for instance in instances: @@ -166,6 +218,8 @@ def mount_to_instances(instances): instances[0].cluster_id, _("Mount volumes to instances"), _count_volumes_to_mount(instances)) + use_xfs = _can_use_xfs(instances) + for instance in instances: with context.set_current_instance_id(instance.instance_id): devices = _find_instance_devices(instance) @@ -176,13 +230,14 @@ def mount_to_instances(instances): # and can be done in parallel, launch one thread per disk. for device in devices: tg.spawn('format-device-%s' % device, _format_device, - instance, device, formatted_devices, lock) + instance, device, use_xfs, formatted_devices, + lock) conductor.instance_update( context.current(), instance, {"storage_devices_number": len(formatted_devices)}) for idx, dev in enumerate(formatted_devices): - _mount_volume_to_node(instance, idx+1, dev) + _mount_volume_to_node(instance, idx+1, dev, use_xfs) def _find_instance_devices(instance): @@ -211,14 +266,15 @@ def _find_instance_devices(instance): @cpo.event_wrapper(mark_successful_on_exit=True) -def _mount_volume_to_node(instance, index, device): +def _mount_volume_to_node(instance, index, device, use_xfs): LOG.debug("Mounting volume {device} to instance".format(device=device)) mount_point = instance.node_group.volume_mount_prefix + str(index) - _mount_volume(instance, device, mount_point) + _mount_volume(instance, device, mount_point, use_xfs) LOG.debug("Mounted volume to instance") -def _format_device(instance, device, formatted_devices=None, lock=None): +def _format_device( + instance, device, use_xfs, formatted_devices=None, lock=None): with instance.remote() as r: try: timeout = _get_timeout_for_disk_preparing(instance.cluster) @@ -228,10 +284,11 @@ def _format_device(instance, device, formatted_devices=None, lock=None): # - use 'dir_index' for faster directory listings # - use 'extents' to work faster with large files # - disable journaling - fs_opts = '-F -m 1 -O dir_index,extents,^has_journal' - r.execute_command('sudo mkfs.ext4 %s %s' % (fs_opts, device), - timeout=timeout) + command = 'sudo mkfs.ext4 %s %s' % (fs_opts, device) + if use_xfs: + command = 'sudo mkfs.xfs %s' % device + r.execute_command(command, timeout=timeout) if lock: with lock: formatted_devices.append(device) @@ -241,17 +298,21 @@ def _format_device(instance, device, formatted_devices=None, lock=None): dev=device, reason=e)) -def _mount_volume(instance, device_path, mount_point): +def _mount_volume(instance, device_path, mount_point, use_xfs): with instance.remote() as r: try: timeout = _get_timeout_for_disk_preparing(instance.cluster) # Mount volumes with better performance options: - # - enable write-back + # - enable write-back for ext4 # - do not store access time - mount_opts = '-o data=writeback,noatime,nodiratime' + # - disable barrier for xfs r.execute_command('sudo mkdir -p %s' % mount_point) + mount_opts = '-o data=writeback,noatime,nodiratime' + if use_xfs: + mount_opts = "-t xfs -o noatime,nodiratime,nobarrier" + r.execute_command('sudo mount %s %s %s' % (mount_opts, device_path, mount_point), timeout=timeout) diff --git a/sahara/tests/unit/service/test_volumes.py b/sahara/tests/unit/service/test_volumes.py index 3708312e..8a2460c1 100644 --- a/sahara/tests/unit/service/test_volumes.py +++ b/sahara/tests/unit/service/test_volumes.py @@ -33,12 +33,13 @@ class TestAttachVolume(base.SaharaWithDbTestCase): instance = self._get_instance() execute_com = instance.remote().execute_command - self.assertIsNone(volumes._mount_volume(instance, '123', '456')) + self.assertIsNone(volumes._mount_volume(instance, '123', '456', + False)) self.assertEqual(3, execute_com.call_count) execute_com.side_effect = ex.RemoteCommandException('cmd') self.assertRaises(ex.RemoteCommandException, volumes._mount_volume, - instance, '123', '456') + instance, '123', '456', False) @mock.patch('sahara.conductor.manager.ConductorManager.cluster_get') @mock.patch('cinderclient.v1.volumes.Volume.delete')