Fix building of RPMs

* Update MANIFEST.in
* Update spec file
  - Update dependency to openstack-swift-object 2.3.0
  - Update URL to point to stackforge repo
  - Remove unnecessarry dependencies
  - Remove references to glusterfs
* Make makerpm.sh script executable
* Remove .travis.yml (unrelated)
* Remove legacy Glusterfs.py which is no longer used (unrelated)

Reference spec file from upstream openstack-swift:
http://pkgs.fedoraproject.org/cgit/openstack-swift.git/tree/openstack-swift.spec

Change-Id: I4644fa5b258615e0f8c0b3bdc5287fc169388eb2
Signed-off-by: Prashanth Pai <ppai@redhat.com>
This commit is contained in:
Prashanth Pai 2015-07-07 12:55:47 +05:30
parent 11b61294c2
commit 402f2b4954
8 changed files with 26 additions and 577 deletions

View File

@ -1,14 +0,0 @@
language: python
python:
- "2.6"
- "2.7"
# command to install dependencies
install: "pip install tox nose"
# command to run tests
script:
- if [[ $TRAVIS_PYTHON_VERSION == '2.6' ]]; then tox -e py26; fi
- if [[ $TRAVIS_PYTHON_VERSION == '2.7' ]]; then tox -e py27; fi
- if [[ $TRAVIS_PULL_REQUEST == 'false' ]]; then curl http://build.gluster.org/job/swiftonfile-builds/buildWithParameters?token=swiftonfile\&cause=http://github.com/swiftonfile/swiftonfile\&TRAVIS_PULL_REQUEST=$TRAVIS_PULL_REQUEST; fi
- if [[ $TRAVIS_PULL_REQUEST != 'false' ]]; then curl http://build.gluster.org/job/swiftonfile-builds/buildWithParameters?token=swiftonfile\&cause=http://github.com/swiftonfile/swiftonfile/pull/$TRAVIS_PULL_REQUEST\&TRAVIS_PULL_REQUEST=$TRAVIS_PULL_REQUEST; fi

View File

@ -1,4 +1,6 @@
include README.md
include makerpm.sh pkgconfig.py
include .functests .unittests tox.ini requirements.txt test-requirements.txt
include makerpm.sh pkgconfig.py swiftonfile.spec
graft doc
graft etc
graft test

View File

@ -1,38 +0,0 @@
#!/usr/bin/env python
# Copyright (c) 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from optparse import OptionParser
if __name__ == '__main__':
# check if swift is installed
try:
from swiftonfile.swift.common.Glusterfs import get_mnt_point, unmount
except ImportError:
import sys
sys.exit("Openstack Swift does not appear to be installed properly")
op = OptionParser(usage="%prog [options...]")
op.add_option('--volname', dest='vol', type=str)
op.add_option('--last', dest='last', type=str)
(opts, args) = op.parse_args()
mnt_point = get_mnt_point(opts.vol)
if mnt_point:
unmount(mnt_point)
else:
sys.exit("get_mnt_point returned none for mount point")

0
makerpm.sh Normal file → Executable file
View File

View File

@ -1,90 +1,65 @@
%define _confdir %{_sysconfdir}/swift
# The following values are provided by passing the following arguments
# to rpmbuild. For example:
# --define "_version 1.0" --define "_release 1" --define "_name g4s"
#
%{!?_version:%define _version __PKG_VERSION__}
%{!?_name:%define _name __PKG_NAME__}
%{!?_release:%define _release __PKG_RELEASE__}
Summary : SwiftOnFile enables Swift objects to be accessed as files.
Summary : Enables Swift objects to be accessed as files and files as objects
Name : %{_name}
Version : %{_version}
Release : %{_release}%{?dist}
Group : Application/File
URL : https://github.com/swiftonfile/swiftonfile
Vendor : Fedora Project
Group : Applications/System
URL : https://github.com/stackforge/swiftonfile
Source0 : %{_name}-%{_version}-%{_release}.tar.gz
License : ASL 2.0
BuildArch: noarch
BuildRequires: python
BuildRequires: python-devel
BuildRequires: python-setuptools
Requires : memcached
Requires : openssl
Requires : python
Requires : python-prettytable
Requires : openstack-swift = 1.13.1
Requires : openstack-swift-account = 1.13.1
Requires : openstack-swift-container = 1.13.1
Requires : openstack-swift-object = 1.13.1
Requires : openstack-swift-proxy = 1.13.1
Requires : glusterfs-api >= 3.4.1
Obsoletes: glusterfs-swift-plugin
Obsoletes: glusterfs-swift
Obsoletes: glusterfs-ufo
Obsoletes: glusterfs-swift-container
Obsoletes: glusterfs-swift-object
Obsoletes: glusterfs-swift-proxy
Obsoletes: glusterfs-swift-account
Requires : python-setuptools
Requires : openstack-swift-object = 2.3.0
%description
SwiftOnFile integrates GlusterFS as an alternative back end for OpenStack
Object Storage (Swift) leveraging the existing front end OpenStack Swift code.
Gluster volumes are used to store objects in files, containers are maintained
as top-level directories of volumes, where accounts are mapped one-to-one to
gluster volumes.
SwiftOnFile is a Swift Object Server implementation that enables users to
access the same data, both as an object and as a file. Data can be stored
and retrieved through Swift's REST interface or as files from NAS interfaces
including native GlusterFS, GPFS, NFS and CIFS.
%prep
%setup -q -n swiftonfile-%{_version}
# Let RPM handle the dependencies
rm -f requirements.txt test-requirements.txt
%build
%{__python} setup.py build
%install
rm -rf %{buildroot}
%{__python} setup.py install -O1 --skip-build --root %{buildroot}
mkdir -p %{buildroot}/%{_confdir}/
cp -r etc/* %{buildroot}/%{_confdir}/
# Man Pages
install -d -m 755 %{buildroot}%{_mandir}/man8
for page in doc/man/*.8; do
install -p -m 0644 $page %{buildroot}%{_mandir}/man8
done
# Remove tests
%{__rm} -rf %{buildroot}/%{python_sitelib}/test
%files
%defattr(-,root,root)
%{python_sitelib}/swiftonfile
%{python_sitelib}/swiftonfile-%{_version}_*.egg-info
%{python_sitelib}/swiftonfile-%{_version}*.egg-info
%{_bindir}/swiftonfile-print-metadata
%{_mandir}/man8/*
%dir %{_confdir}
%config(noreplace) %{_confdir}/account-server.conf-gluster
%config(noreplace) %{_confdir}/container-server.conf-gluster
%config(noreplace) %{_confdir}/object-server.conf-gluster
%config(noreplace) %{_confdir}/swift.conf-gluster
%config(noreplace) %{_confdir}/proxy-server.conf-gluster
%config(noreplace) %{_confdir}/fs.conf-gluster
%config(noreplace) %{_confdir}/object-expirer.conf-gluster
%config(noreplace) %{_confdir}/object-server.conf-swiftonfile
%config(noreplace) %{_confdir}/swift.conf-swiftonfile
%clean
rm -rf %{buildroot}
%changelog
* Wed Jul 15 2015 Prashanth Pai <ppai@redhat.com> - 2.3.0-0
- Update spec file to support Kilo release of Swift
* Mon Oct 28 2013 Luis Pabon <lpabon@redhat.com> - 1.10.1-0
- IceHouse Release

View File

@ -1,240 +0,0 @@
# Copyright (c) 2012-2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import fcntl
import time
import errno
import logging
import urllib
from ConfigParser import ConfigParser, NoSectionError, NoOptionError
from swift.common.utils import TRUE_VALUES
from swiftonfile.swift.common.fs_utils import do_ismount
from swiftonfile.swift.common.exceptions import FailureToMountError
#
# Read the fs.conf file once at startup (module load)
#
_fs_conf = ConfigParser()
MOUNT_IP = 'localhost'
RUN_DIR = '/var/run/swift'
SWIFT_DIR = '/etc/swift'
_allow_mount_per_server = False
if _fs_conf.read(os.path.join(SWIFT_DIR, 'fs.conf')):
try:
MOUNT_IP = _fs_conf.get('DEFAULT', 'mount_ip', MOUNT_IP)
except (NoSectionError, NoOptionError):
pass
try:
RUN_DIR = _fs_conf.get('DEFAULT', 'run_dir', RUN_DIR)
except (NoSectionError, NoOptionError):
pass
try:
_allow_mount_per_server = _fs_conf.get('DEFAULT',
'allow_mount_per_server',
_allow_mount_per_server
) in TRUE_VALUES
except (NoSectionError, NoOptionError):
pass
def _busy_wait(full_mount_path):
# Iterate for definite number of time over a given
# interval for successful mount
for i in range(0, 5):
if os.path.ismount(full_mount_path):
return True
time.sleep(2)
logging.error('Busy wait for mount timed out for mount %s',
full_mount_path)
return False
def _get_unique_id():
# Each individual server will attempt to get a free lock file
# sequentially numbered, storing the pid of the holder of that
# file, That number represents the numbered mount point to use
# for its operations.
if not _allow_mount_per_server:
return 0
try:
os.mkdir(RUN_DIR)
except OSError as err:
if err.errno == errno.EEXIST:
pass
unique_id = 0
lock_file_template = os.path.join(RUN_DIR,
'swift.object-server-%03d.lock')
for i in range(1, 201):
lock_file = lock_file_template % i
fd = os.open(lock_file, os.O_CREAT | os.O_RDWR)
try:
fcntl.lockf(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
except IOError as ex:
os.close(fd)
if ex.errno in (errno.EACCES, errno.EAGAIN):
# This means that some other process has it locked, so they
# own the lock.
continue
raise
except Exception:
os.close(fd)
raise
else:
# We got the lock, write our PID into it, but don't close the
# file, it will be closed when our process exists
os.lseek(fd, 0, os.SEEK_SET)
pid = str(os.getpid()) + '\n'
os.write(fd, pid)
unique_id = i
break
return unique_id
_unique_id = None
def _get_drive_mount_point_name(drive):
"""
Get the GlusterFS mount point name to use for this worker for the target
drive name.
If unique is False, then we just map the drive directly to the mount point
name. If unique is True, then we determine a unique mount point name that
maps to our server PID.
"""
if not _allow_mount_per_server:
# One-to-one mapping of drive to mount point name
mount_point = drive
else:
global _unique_id
if _unique_id is None:
_unique_id = _get_unique_id()
mount_point = ("%s_%03d" % (drive, _unique_id)) \
if _unique_id else drive
return mount_point
def mount(root, drive):
"""
Verify that the path to the device is a mount point and mounted. This
allows us to fast fail on drives that have been unmounted because of
issues, and also prevents us for accidentally filling up the root
partition.
This method effectively replaces the swift.common.constraints.check_mount
method in behavior, adding the ability to auto-mount the volume, which is
dubious (FIXME).
:param root: base path where the devices are mounted
:param drive: drive name to be checked
:returns: True if it is a valid mounted device, False otherwise
"""
if not (urllib.quote_plus(drive) == drive):
return False
mount_point = _get_drive_mount_point_name(drive)
full_mount_path = os.path.join(root, mount_point)
if do_ismount(full_mount_path):
# Don't bother checking volume if it is already a mount point. Allows
# us to use local file systems for unit tests and some functional test
# environments to isolate behaviors from GlusterFS itself.
return True
# FIXME: Possible thundering herd problem here
el = _get_export_list()
for export in el:
if drive == export:
break
else:
logging.error('No export found in %r matching drive, %s', el, drive)
return False
try:
os.makedirs(full_mount_path)
except OSError as err:
if err.errno == errno.EEXIST:
pass
else:
logging.exception('Could not create mount path hierarchy:'
' %s' % full_mount_path)
return False
mnt_cmd = 'mount -t glusterfs %s:%s %s' % (MOUNT_IP, export,
full_mount_path)
if _allow_mount_per_server:
if os.system(mnt_cmd):
logging.exception('Mount failed %s' % (mnt_cmd))
return True
lck_file = os.path.join(RUN_DIR, '%s.lock' % mount_point)
try:
os.mkdir(RUN_DIR)
except OSError as err:
if err.errno == errno.EEXIST:
pass
else:
logging.exception('Could not create RUN_DIR: %s' % full_mount_path)
return False
fd = os.open(lck_file, os.O_CREAT | os.O_RDWR)
with os.fdopen(fd, 'r+b') as f:
try:
fcntl.lockf(f, fcntl.LOCK_EX | fcntl.LOCK_NB)
except IOError as ex:
if ex.errno in (errno.EACCES, errno.EAGAIN):
# This means that some other process is mounting the
# filesystem, so wait for the mount process to complete
return _busy_wait(full_mount_path)
if os.system(mnt_cmd) or not _busy_wait(full_mount_path):
logging.error('Mount failed %s', mnt_cmd)
return False
return True
def unmount(full_mount_path):
# FIXME: Possible thundering herd problem here
umnt_cmd = 'umount %s 2>> /dev/null' % full_mount_path
if os.system(umnt_cmd):
raise FailureToMountError(
'Unable to unmount %s' % (full_mount_path))
def _get_export_list():
cmnd = 'gluster --remote-host=%s volume info' % MOUNT_IP
export_list = []
if os.system(cmnd + ' >> /dev/null'):
logging.error('Getting volume info failed, make sure to have'
' passwordless ssh on %s', MOUNT_IP)
else:
fp = os.popen(cmnd)
while True:
item = fp.readline()
if not item:
break
item = item.strip('\n').strip(' ')
if item.lower().startswith('volume name:'):
export_list.append(item.split(':')[1].strip(' '))
return export_list

View File

@ -1,236 +0,0 @@
# Copyright (c) 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import os, fcntl, errno, shutil
import time
import StringIO
import mock
from tempfile import mkdtemp
import swiftonfile.swift.common.Glusterfs as gfs
def mock_os_path_ismount_false(path):
return False
def mock_os_path_ismount(path):
return True
def mock_get_export_list():
return ['test', 'test2']
def mock_os_system(cmd):
return False
def mock_fcntl_lockf(f, *a, **kw):
raise IOError(errno.EAGAIN, os.strerror(errno.EAGAIN))
def mock_time_sleep(secs):
return True
def _init():
global _RUN_DIR, _OS_SYSTEM, _FCNTL_LOCKF
global _OS_PATH_ISMOUNT, __GET_EXPORT_LIST
_RUN_DIR = gfs.RUN_DIR
_OS_SYSTEM = os.system
_FCNTL_LOCKF = fcntl.lockf
_OS_PATH_ISMOUNT = os.path.ismount
__GET_EXPORT_LIST = gfs._get_export_list
def _init_mock_variables(tmpdir):
os.system = mock_os_system
os.path.ismount = mock_os_path_ismount
try:
os.makedirs(os.path.join(tmpdir, "var", "run"))
except OSError as err:
if err.errno != errno.EEXIST:
raise
gfs.RUN_DIR = os.path.join(tmpdir, 'var', 'run', 'swift')
gfs._get_export_list = mock_get_export_list
def _reset_mock_variables():
gfs.RUN_DIR = _RUN_DIR
gfs._get_export_list = __GET_EXPORT_LIST
os.system = _OS_SYSTEM
fcntl.lockf = _FCNTL_LOCKF
os.path.ismount = _OS_PATH_ISMOUNT
class TestGlusterfs(unittest.TestCase):
""" Tests for common.GlusterFS """
def setUp(self):
_init()
def test_busy_wait_timeout(self):
os.path.ismount = mock_os_path_ismount_false
# setup time mock
real_time_sleep = time.sleep
time.sleep = mock_time_sleep
try:
self.assertFalse(gfs._busy_wait("/"))
finally:
time.sleep = real_time_sleep
def test_busy_wait(self):
self.assertTrue(gfs._busy_wait("/"))
def test_mount(self):
try:
tmpdir = mkdtemp()
root = os.path.join(tmpdir, 'mnt/swiftonfile')
drive = 'test'
_init_mock_variables(tmpdir)
assert gfs.mount(root, drive)
finally:
_reset_mock_variables()
shutil.rmtree(tmpdir)
def test_mount_egain(self):
try:
tmpdir = mkdtemp()
root = os.path.join(tmpdir, 'mnt/swiftonfile')
drive = 'test'
_init_mock_variables(tmpdir)
assert gfs.mount(root, drive)
fcntl.lockf = mock_fcntl_lockf
assert gfs.mount(root, drive)
finally:
shutil.rmtree(tmpdir)
def test_mount_get_export_list_err(self):
try:
tmpdir = mkdtemp()
root = os.path.join(tmpdir, 'mnt/swiftonfile')
drive = 'test3'
_init_mock_variables(tmpdir)
gfs._get_export_list = mock_get_export_list
assert not gfs.mount(root, drive)
finally:
shutil.rmtree(tmpdir)
def test_get_drive_mount_point_name_unique_id_None(self):
"""
Using the public method mount to test _get_drive_mount_point_name
"""
try:
tmpdir = mkdtemp()
root = os.path.join(tmpdir, 'mnt/swiftonfile')
drive = 'test'
_init_mock_variables(tmpdir)
gfs._allow_mount_per_server = True
self.assertTrue(gfs.mount(root, drive))
finally:
gfs._allow_mount_per_server = False
_reset_mock_variables()
shutil.rmtree(tmpdir)
def test_get_drive_mount_point_name_unique_id_exists(self):
"""
Using the public method mount to test _get_drive_mount_point_name
and the _unique_id is already defined
"""
try:
tmpdir = mkdtemp()
root = os.path.join(tmpdir, 'mnt/swiftonfile')
drive = 'test'
_init_mock_variables(tmpdir)
gfs._allow_mount_per_server = True
gfs._unique_id = 0
self.assertTrue(gfs.mount(root, drive))
finally:
gfs._allow_mount_per_server = False
gfs._unique_id = None
_reset_mock_variables()
shutil.rmtree(tmpdir)
def test_invalid_drive_name(self):
try:
tmpdir = mkdtemp()
root = os.path.join(tmpdir, 'mnt/swiftonfile')
drive = 'te st'
_init_mock_variables(tmpdir)
self.assertFalse(gfs.mount(root, drive))
finally:
_reset_mock_variables()
shutil.rmtree(tmpdir)
def test_already_mounted(self):
try:
tmpdir = mkdtemp()
root = os.path.join(tmpdir, 'mnt/swiftonfile')
drive = 'test'
_init_mock_variables(tmpdir)
def mock_do_ismount(path):
return True
with mock.patch("swiftonfile.swift.common.Glusterfs.do_ismount",
mock_do_ismount):
self.assertTrue(gfs.mount(root, drive))
finally:
_reset_mock_variables()
shutil.rmtree(tmpdir)
def test_get_export_list(self):
try:
tmpdir = mkdtemp()
root = os.path.join(tmpdir, 'mnt/swiftonfile-object')
drive = 'test'
# undo mocking of _get_export_list
tmp_get_export_list = gfs._get_export_list
_init_mock_variables(tmpdir)
gfs._get_export_list = tmp_get_export_list
def mock_os_popen(cmd):
mock_string = """
Volume Name: test
Type: Distribute
Volume ID: 361cfe52-75c0-4a76-88af-0092a92270b5
Status: Started
Number of Bricks: 1
Transport-type: tcp
Bricks:
Brick1: myhost:/export/brick/test
Volume Name: test2
Type: Distribute
Volume ID: a6df4e2b-6040-4e19-96f1-b8d8c0a29528
Status: Started
Number of Bricks: 1
Transport-type: tcp
Bricks:
Brick1: myhost:/export/brick/test2
"""
return StringIO.StringIO(mock_string)
# mock os_popen
with mock.patch('os.popen', mock_os_popen):
self.assertTrue(gfs.mount(root, drive))
finally:
_reset_mock_variables()
shutil.rmtree(tmpdir)
def tearDown(self):
_reset_mock_variables()

View File

@ -26,7 +26,7 @@ import tarfile
import shutil
from collections import defaultdict
from mock import patch
from swiftonfile.swift.common import utils, Glusterfs
from swiftonfile.swift.common import utils
from swiftonfile.swift.common.exceptions import SwiftOnFileSystemOSError
from swift.common.exceptions import DiskFileNoSpace