Fixes with customized ceph cluster name

Current RBD connector assumes ceph cluster name to be 'ceph', for
cluster has a different name, the backup manager won't be able
to connect to the ceph cluster.

This fix creates temporary ceph conf file to include mon_hosts and
client section for cinder user to specify keyring path, the same conf
file will be passed to linux rbd initiator to connect to ceph cluster
and the temp file will be deleted.

The temporary conf will have required info required to
connect to ceph cluster like

mon_hosts = <ip>:<port>
[client.<user>]
keyring = <user_keyring_path>

This fix assumes that the keyring for ceph cinder volume
user will exist in /etc/ceph/ directory

Change-Id: Ie7583fff22e2b54324e70f89d9df98a3a6b99e7b
Closes-Bug: #1609659
This commit is contained in:
Naga Venkata 2016-08-04 11:46:41 +05:30
parent dbff3f2c6d
commit f0491dbe7d
2 changed files with 60 additions and 5 deletions

View File

@ -12,6 +12,10 @@
# License for the specific language governing permissions and limitations
# under the License.
import os
import tempfile
from oslo_concurrency import processutils as putils
from oslo_log import log as logging
@ -56,19 +60,46 @@ class RBDConnector(base.BaseLinuxConnector):
# TODO(e0ne): Implement this for local volume.
return []
def _create_ceph_conf(self, monitor_ips, monitor_ports,
cluster_name, user):
try:
fd, ceph_conf_path = tempfile.mkstemp()
monitors = ["%s:%s" % (ip, port) for ip, port
in zip(monitor_ips, monitor_ports)]
mon_hosts = "mon_host = %s" % (','.join(monitors))
client_section = "[client.%s]" % user
keyring = ("keyring = /etc/ceph/%s.client.%s.keyring" %
(cluster_name, user))
with os.fdopen(fd, 'w') as conf_file:
conf_file.writelines([mon_hosts, "\n",
client_section, "\n", keyring])
return ceph_conf_path
except IOError:
msg = (_("Failed to write data to %s.") % (ceph_conf_path))
raise exception.BrickException(msg=msg)
def _get_rbd_handle(self, connection_properties):
try:
user = connection_properties['auth_username']
pool, volume = connection_properties['name'].split('/')
conf = connection_properties.get('conffile')
cluster_name = connection_properties.get('cluster_name')
monitor_ips = connection_properties.get('hosts')
monitor_ports = connection_properties.get('ports')
except IndexError:
msg = _("Connect volume failed, malformed connection properties")
raise exception.BrickException(msg=msg)
rbd_client = linuxrbd.RBDClient(user, pool)
conf = self._create_ceph_conf(monitor_ips, monitor_ports,
str(cluster_name), user)
rbd_client = linuxrbd.RBDClient(user, pool, conffile=conf,
rbd_cluster_name=str(cluster_name))
rbd_volume = linuxrbd.RBDVolume(rbd_client, volume)
rbd_handle = linuxrbd.RBDVolumeIOWrapper(
linuxrbd.RBDImageMetadata(rbd_volume, pool, user, conf))
if os.path.exists(conf):
os.remove(conf)
return rbd_handle
@staticmethod

View File

@ -28,10 +28,16 @@ class RBDConnectorTestCase(test_connector.ConnectorTestCase):
self.user = 'fake_user'
self.pool = 'fake_pool'
self.volume = 'fake_volume'
self.clustername = 'fake_ceph'
self.hosts = ['192.168.10.2']
self.ports = ['6789']
self.connection_properties = {
'auth_username': self.user,
'name': '%s/%s' % (self.pool, self.volume),
'cluster_name': self.clustername,
'hosts': self.hosts,
'ports': self.ports,
}
def test_get_search_path(self):
@ -56,16 +62,20 @@ class RBDConnectorTestCase(test_connector.ConnectorTestCase):
@mock.patch('os_brick.initiator.linuxrbd.rbd')
@mock.patch('os_brick.initiator.linuxrbd.rados')
def test_connect_volume(self, mock_rados, mock_rbd):
@mock.patch.object(rbd.RBDConnector, '_create_ceph_conf')
@mock.patch('os.path.exists')
def test_connect_volume(self, mock_path, mock_conf, mock_rados, mock_rbd):
"""Test the connect volume case."""
rbd_connector = rbd.RBDConnector(None)
mock_path.return_value = False
mock_conf.return_value = "/tmp/fake_dir/fake_ceph.conf"
device_info = rbd_connector.connect_volume(self.connection_properties)
# Ensure rados is instantiated correctly
mock_rados.Rados.assert_called_once_with(
clustername='ceph',
clustername=self.clustername,
rados_id=utils.convert_str(self.user),
conffile='/etc/ceph/ceph.conf')
conffile='/tmp/fake_dir/fake_ceph.conf')
# Ensure correct calls to connect to cluster
self.assertEqual(1, mock_rados.Rados.return_value.connect.call_count)
@ -82,6 +92,20 @@ class RBDConnectorTestCase(test_connector.ConnectorTestCase):
self.assertTrue(isinstance(device_info['path'],
linuxrbd.RBDVolumeIOWrapper))
@mock.patch('os_brick.initiator.connectors.rbd.tempfile.mkstemp')
def test_create_ceph_conf(self, mock_mkstemp):
mockopen = mock.mock_open()
fd = mock.sentinel.fd
tmpfile = mock.sentinel.tmpfile
mock_mkstemp.return_value = (fd, tmpfile)
with mock.patch('os.fdopen', mockopen, create=True):
rbd_connector = rbd.RBDConnector(None)
conf_path = rbd_connector._create_ceph_conf(
self.hosts, self.ports, self.clustername, self.user)
self.assertEqual(conf_path, tmpfile)
mock_mkstemp.assert_called_once_with()
@mock.patch.object(priv_rootwrap, 'execute')
def test_connect_local_volume(self, mock_execute):
rbd_connector = rbd.RBDConnector(None, do_local_attach=True)