2012-10-10 17:28:11 +11:00
|
|
|
# Copyright 2011 Justin Santa Barbara
|
|
|
|
#
|
|
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
|
|
# not use this file except in compliance with the License. You may obtain
|
|
|
|
# a copy of the License at
|
|
|
|
#
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
#
|
|
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
|
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
|
|
# License for the specific language governing permissions and limitations
|
|
|
|
# under the License.
|
|
|
|
|
2014-01-14 15:35:20 -08:00
|
|
|
import errno
|
2013-01-04 22:40:36 -05:00
|
|
|
import fcntl
|
2013-10-23 20:11:14 +04:00
|
|
|
import multiprocessing
|
2012-10-10 17:28:11 +11:00
|
|
|
import os
|
|
|
|
import shutil
|
2013-10-24 17:07:55 +04:00
|
|
|
import sys
|
2012-10-10 17:28:11 +11:00
|
|
|
import tempfile
|
2013-08-27 17:38:26 +03:00
|
|
|
import threading
|
2012-10-10 17:28:11 +11:00
|
|
|
|
|
|
|
import eventlet
|
|
|
|
from eventlet import greenpool
|
2013-01-18 14:10:05 +08:00
|
|
|
from eventlet import greenthread
|
2013-07-09 17:03:53 -05:00
|
|
|
from oslo.config import cfg
|
2013-11-26 06:49:03 -08:00
|
|
|
from six import moves
|
2012-10-10 17:28:11 +11:00
|
|
|
|
2013-08-19 02:31:59 -04:00
|
|
|
from openstack.common.fixture import config
|
2013-10-16 13:02:48 -04:00
|
|
|
from openstack.common.fixture import lockutils as fixtures
|
2012-10-10 17:28:11 +11:00
|
|
|
from openstack.common import lockutils
|
2013-08-19 02:31:59 -04:00
|
|
|
from openstack.common import test
|
2012-10-10 17:28:11 +11:00
|
|
|
|
|
|
|
|
2014-01-14 15:35:20 -08:00
|
|
|
class BrokenLock(lockutils.InterProcessLock):
|
|
|
|
def __init__(self, name, errno_code):
|
|
|
|
super(BrokenLock, self).__init__(name)
|
|
|
|
self.errno_code = errno_code
|
|
|
|
|
|
|
|
def unlock(self):
|
|
|
|
pass
|
|
|
|
|
|
|
|
def trylock(self):
|
|
|
|
err = IOError()
|
|
|
|
err.errno = self.errno_code
|
|
|
|
raise err
|
|
|
|
|
|
|
|
|
2013-08-19 02:31:59 -04:00
|
|
|
class TestFileLocks(test.BaseTestCase):
|
2013-06-10 14:57:42 +02:00
|
|
|
|
2012-10-10 17:28:11 +11:00
|
|
|
def test_concurrent_green_lock_succeeds(self):
|
|
|
|
"""Verify spawn_n greenthreads with two locks run concurrently."""
|
|
|
|
tmpdir = tempfile.mkdtemp()
|
|
|
|
try:
|
|
|
|
self.completed = False
|
|
|
|
|
|
|
|
def locka(wait):
|
|
|
|
a = lockutils.InterProcessLock(os.path.join(tmpdir, 'a'))
|
|
|
|
with a:
|
|
|
|
wait.wait()
|
|
|
|
self.completed = True
|
|
|
|
|
|
|
|
def lockb(wait):
|
|
|
|
b = lockutils.InterProcessLock(os.path.join(tmpdir, 'b'))
|
|
|
|
with b:
|
|
|
|
wait.wait()
|
|
|
|
|
|
|
|
wait1 = eventlet.event.Event()
|
|
|
|
wait2 = eventlet.event.Event()
|
|
|
|
pool = greenpool.GreenPool()
|
|
|
|
pool.spawn_n(locka, wait1)
|
|
|
|
pool.spawn_n(lockb, wait2)
|
|
|
|
wait2.send()
|
|
|
|
eventlet.sleep(0)
|
|
|
|
wait1.send()
|
|
|
|
pool.waitall()
|
|
|
|
|
|
|
|
self.assertTrue(self.completed)
|
|
|
|
|
|
|
|
finally:
|
|
|
|
if os.path.exists(tmpdir):
|
|
|
|
shutil.rmtree(tmpdir)
|
|
|
|
|
|
|
|
|
2013-08-19 02:31:59 -04:00
|
|
|
class LockTestCase(test.BaseTestCase):
|
|
|
|
|
|
|
|
def setUp(self):
|
|
|
|
super(LockTestCase, self).setUp()
|
|
|
|
self.config = self.useFixture(config.Config()).config
|
2013-06-10 14:57:42 +02:00
|
|
|
|
2012-10-10 17:28:11 +11:00
|
|
|
def test_synchronized_wrapped_function_metadata(self):
|
|
|
|
@lockutils.synchronized('whatever', 'test-')
|
|
|
|
def foo():
|
|
|
|
"""Bar"""
|
|
|
|
pass
|
|
|
|
|
2013-08-16 09:23:32 +08:00
|
|
|
self.assertEqual(foo.__doc__, 'Bar', "Wrapped function's docstring "
|
|
|
|
"got lost")
|
|
|
|
self.assertEqual(foo.__name__, 'foo', "Wrapped function's name "
|
|
|
|
"got mangled")
|
2012-10-10 17:28:11 +11:00
|
|
|
|
2014-01-14 15:35:20 -08:00
|
|
|
def test_bad_acquire(self):
|
|
|
|
lock_dir = tempfile.mkdtemp()
|
|
|
|
lock_file = os.path.join(lock_dir, 'lock')
|
|
|
|
lock = BrokenLock(lock_file, errno.EBUSY)
|
|
|
|
|
|
|
|
try:
|
|
|
|
self.assertRaises(threading.ThreadError, lock.acquire)
|
|
|
|
finally:
|
|
|
|
try:
|
|
|
|
shutil.rmtree(lock_dir)
|
|
|
|
except IOError:
|
|
|
|
pass
|
|
|
|
|
2014-01-17 19:32:35 -08:00
|
|
|
def test_lock_acquire_release(self):
|
|
|
|
lock_dir = tempfile.mkdtemp()
|
|
|
|
lock_file = os.path.join(lock_dir, 'lock')
|
|
|
|
lock = lockutils.InterProcessLock(lock_file)
|
|
|
|
|
|
|
|
def try_lock():
|
|
|
|
lock.release() # child co-owns it before fork
|
|
|
|
try:
|
|
|
|
my_lock = lockutils.InterProcessLock(lock_file)
|
|
|
|
my_lock.lockfile = open(lock_file, 'w')
|
|
|
|
my_lock.trylock()
|
|
|
|
my_lock.unlock()
|
|
|
|
os._exit(1)
|
|
|
|
except IOError:
|
|
|
|
os._exit(0)
|
|
|
|
|
|
|
|
def attempt_acquire(count):
|
|
|
|
children = []
|
|
|
|
for i in range(count):
|
|
|
|
child = multiprocessing.Process(target=try_lock)
|
|
|
|
child.start()
|
|
|
|
children.append(child)
|
|
|
|
exit_codes = []
|
|
|
|
for child in children:
|
|
|
|
child.join()
|
|
|
|
exit_codes.append(child.exitcode)
|
|
|
|
return sum(exit_codes)
|
|
|
|
|
|
|
|
self.assertTrue(lock.acquire())
|
|
|
|
try:
|
|
|
|
acquired_children = attempt_acquire(10)
|
|
|
|
self.assertEqual(0, acquired_children)
|
|
|
|
finally:
|
|
|
|
lock.release()
|
|
|
|
|
|
|
|
try:
|
|
|
|
acquired_children = attempt_acquire(5)
|
|
|
|
self.assertNotEqual(0, acquired_children)
|
|
|
|
finally:
|
|
|
|
try:
|
|
|
|
shutil.rmtree(lock_dir)
|
|
|
|
except IOError:
|
|
|
|
pass
|
|
|
|
|
2013-06-10 14:57:42 +02:00
|
|
|
def test_lock_internally(self):
|
2013-06-02 20:41:20 +04:00
|
|
|
"""We can lock across multiple green threads."""
|
2012-10-10 17:28:11 +11:00
|
|
|
saved_sem_num = len(lockutils._semaphores)
|
|
|
|
seen_threads = list()
|
|
|
|
|
2013-06-10 14:57:42 +02:00
|
|
|
def f(_id):
|
|
|
|
with lockutils.lock('testlock2', 'test-', external=False):
|
|
|
|
for x in range(10):
|
|
|
|
seen_threads.append(_id)
|
|
|
|
greenthread.sleep(0)
|
2012-10-10 17:28:11 +11:00
|
|
|
|
|
|
|
threads = []
|
|
|
|
pool = greenpool.GreenPool(10)
|
|
|
|
for i in range(10):
|
|
|
|
threads.append(pool.spawn(f, i))
|
|
|
|
|
|
|
|
for thread in threads:
|
|
|
|
thread.wait()
|
|
|
|
|
2013-08-16 09:23:32 +08:00
|
|
|
self.assertEqual(len(seen_threads), 100)
|
2012-10-10 17:28:11 +11:00
|
|
|
# Looking at the seen threads, split it into chunks of 10, and verify
|
|
|
|
# that the last 9 match the first in each chunk.
|
|
|
|
for i in range(10):
|
|
|
|
for j in range(9):
|
2013-08-16 09:23:32 +08:00
|
|
|
self.assertEqual(seen_threads[i * 10],
|
|
|
|
seen_threads[i * 10 + 1 + j])
|
2012-10-10 17:28:11 +11:00
|
|
|
|
|
|
|
self.assertEqual(saved_sem_num, len(lockutils._semaphores),
|
|
|
|
"Semaphore leak detected")
|
|
|
|
|
2013-06-10 14:57:42 +02:00
|
|
|
def test_nested_synchronized_external_works(self):
|
2013-06-02 20:41:20 +04:00
|
|
|
"""We can nest external syncs."""
|
2012-10-10 17:28:11 +11:00
|
|
|
tempdir = tempfile.mkdtemp()
|
|
|
|
try:
|
|
|
|
self.config(lock_path=tempdir)
|
|
|
|
sentinel = object()
|
|
|
|
|
|
|
|
@lockutils.synchronized('testlock1', 'test-', external=True)
|
|
|
|
def outer_lock():
|
|
|
|
|
|
|
|
@lockutils.synchronized('testlock2', 'test-', external=True)
|
|
|
|
def inner_lock():
|
|
|
|
return sentinel
|
|
|
|
return inner_lock()
|
|
|
|
|
|
|
|
self.assertEqual(sentinel, outer_lock())
|
|
|
|
|
|
|
|
finally:
|
|
|
|
if os.path.exists(tempdir):
|
|
|
|
shutil.rmtree(tempdir)
|
|
|
|
|
2013-06-10 14:57:42 +02:00
|
|
|
def _do_test_lock_externally(self):
|
2013-06-02 20:41:20 +04:00
|
|
|
"""We can lock across multiple processes."""
|
2013-01-04 22:40:36 -05:00
|
|
|
|
2013-03-22 10:14:48 +00:00
|
|
|
def lock_files(handles_dir):
|
2013-06-10 14:57:42 +02:00
|
|
|
|
|
|
|
with lockutils.lock('external', 'test-', external=True):
|
|
|
|
# Open some files we can use for locking
|
|
|
|
handles = []
|
|
|
|
for n in range(50):
|
|
|
|
path = os.path.join(handles_dir, ('file-%s' % n))
|
|
|
|
handles.append(open(path, 'w'))
|
|
|
|
|
|
|
|
# Loop over all the handles and try locking the file
|
|
|
|
# without blocking, keep a count of how many files we
|
|
|
|
# were able to lock and then unlock. If the lock fails
|
|
|
|
# we get an IOError and bail out with bad exit code
|
|
|
|
count = 0
|
|
|
|
for handle in handles:
|
|
|
|
try:
|
|
|
|
fcntl.flock(handle, fcntl.LOCK_EX | fcntl.LOCK_NB)
|
|
|
|
count += 1
|
|
|
|
fcntl.flock(handle, fcntl.LOCK_UN)
|
|
|
|
except IOError:
|
|
|
|
os._exit(2)
|
|
|
|
finally:
|
|
|
|
handle.close()
|
|
|
|
|
|
|
|
# Check if we were able to open all files
|
|
|
|
self.assertEqual(50, count)
|
2013-01-04 22:40:36 -05:00
|
|
|
|
2013-03-22 10:14:48 +00:00
|
|
|
handles_dir = tempfile.mkdtemp()
|
2013-01-04 22:40:36 -05:00
|
|
|
try:
|
|
|
|
children = []
|
|
|
|
for n in range(50):
|
|
|
|
pid = os.fork()
|
|
|
|
if pid:
|
|
|
|
children.append(pid)
|
|
|
|
else:
|
2013-03-22 10:14:48 +00:00
|
|
|
try:
|
|
|
|
lock_files(handles_dir)
|
|
|
|
finally:
|
|
|
|
os._exit(0)
|
2013-01-04 22:40:36 -05:00
|
|
|
|
2014-01-12 08:49:16 +08:00
|
|
|
for child in children:
|
2013-01-04 22:40:36 -05:00
|
|
|
(pid, status) = os.waitpid(child, 0)
|
|
|
|
if pid:
|
|
|
|
self.assertEqual(0, status)
|
|
|
|
finally:
|
2013-03-22 10:14:48 +00:00
|
|
|
if os.path.exists(handles_dir):
|
|
|
|
shutil.rmtree(handles_dir, ignore_errors=True)
|
2013-03-22 10:33:57 +00:00
|
|
|
|
2013-06-10 14:57:42 +02:00
|
|
|
def test_lock_externally(self):
|
2013-03-22 10:33:57 +00:00
|
|
|
lock_dir = tempfile.mkdtemp()
|
|
|
|
self.config(lock_path=lock_dir)
|
|
|
|
|
|
|
|
try:
|
2013-06-10 14:57:42 +02:00
|
|
|
self._do_test_lock_externally()
|
2013-03-22 10:33:57 +00:00
|
|
|
finally:
|
|
|
|
if os.path.exists(lock_dir):
|
|
|
|
shutil.rmtree(lock_dir, ignore_errors=True)
|
|
|
|
|
2013-06-10 14:57:42 +02:00
|
|
|
def test_lock_externally_lock_dir_not_exist(self):
|
2013-03-22 10:33:57 +00:00
|
|
|
lock_dir = tempfile.mkdtemp()
|
|
|
|
os.rmdir(lock_dir)
|
|
|
|
self.config(lock_path=lock_dir)
|
|
|
|
|
|
|
|
try:
|
2013-06-10 14:57:42 +02:00
|
|
|
self._do_test_lock_externally()
|
2013-03-22 10:33:57 +00:00
|
|
|
finally:
|
2013-03-22 10:14:48 +00:00
|
|
|
if os.path.exists(lock_dir):
|
|
|
|
shutil.rmtree(lock_dir, ignore_errors=True)
|
2013-05-07 17:25:34 +09:00
|
|
|
|
|
|
|
def test_synchronized_with_prefix(self):
|
|
|
|
lock_name = 'mylock'
|
|
|
|
lock_pfix = 'mypfix-'
|
|
|
|
|
|
|
|
foo = lockutils.synchronized_with_prefix(lock_pfix)
|
|
|
|
|
|
|
|
@foo(lock_name, external=True)
|
|
|
|
def bar(dirpath, pfix, name):
|
|
|
|
filepath = os.path.join(dirpath, '%s%s' % (pfix, name))
|
|
|
|
return os.path.isfile(filepath)
|
|
|
|
|
|
|
|
lock_dir = tempfile.mkdtemp()
|
|
|
|
self.config(lock_path=lock_dir)
|
|
|
|
|
|
|
|
self.assertTrue(bar(lock_dir, lock_pfix, lock_name))
|
2013-06-10 14:25:58 +02:00
|
|
|
|
|
|
|
def test_synchronized_without_prefix(self):
|
|
|
|
lock_dir = tempfile.mkdtemp()
|
2013-12-26 14:15:27 +01:00
|
|
|
self.config(lock_path=lock_dir)
|
2013-06-10 14:25:58 +02:00
|
|
|
|
2013-12-26 14:15:27 +01:00
|
|
|
@lockutils.synchronized('lock', external=True)
|
2013-06-10 14:25:58 +02:00
|
|
|
def test_without_prefix():
|
|
|
|
path = os.path.join(lock_dir, "lock")
|
|
|
|
self.assertTrue(os.path.exists(path))
|
|
|
|
|
|
|
|
try:
|
|
|
|
test_without_prefix()
|
|
|
|
finally:
|
|
|
|
if os.path.exists(lock_dir):
|
|
|
|
shutil.rmtree(lock_dir, ignore_errors=True)
|
|
|
|
|
|
|
|
def test_synchronized_prefix_without_hypen(self):
|
|
|
|
lock_dir = tempfile.mkdtemp()
|
2013-12-26 14:15:27 +01:00
|
|
|
self.config(lock_path=lock_dir)
|
2013-06-10 14:25:58 +02:00
|
|
|
|
2013-12-26 14:15:27 +01:00
|
|
|
@lockutils.synchronized('lock', 'hypen', True)
|
2013-06-10 14:25:58 +02:00
|
|
|
def test_without_hypen():
|
|
|
|
path = os.path.join(lock_dir, "hypen-lock")
|
|
|
|
self.assertTrue(os.path.exists(path))
|
|
|
|
|
|
|
|
try:
|
|
|
|
test_without_hypen()
|
|
|
|
finally:
|
|
|
|
if os.path.exists(lock_dir):
|
|
|
|
shutil.rmtree(lock_dir, ignore_errors=True)
|
2013-06-10 14:57:42 +02:00
|
|
|
|
|
|
|
def test_contextlock(self):
|
|
|
|
lock_dir = tempfile.mkdtemp()
|
2013-12-26 14:15:27 +01:00
|
|
|
self.config(lock_path=lock_dir)
|
2013-06-10 14:57:42 +02:00
|
|
|
|
|
|
|
try:
|
|
|
|
# Note(flaper87): Lock is not external, which means
|
|
|
|
# a semaphore will be yielded
|
|
|
|
with lockutils.lock("test") as sem:
|
2013-12-20 14:29:22 -07:00
|
|
|
self.assertTrue(isinstance(sem, threading._Semaphore))
|
2013-06-10 14:57:42 +02:00
|
|
|
|
|
|
|
# NOTE(flaper87): Lock is external so an InterProcessLock
|
|
|
|
# will be yielded.
|
2013-12-26 14:15:27 +01:00
|
|
|
with lockutils.lock("test2", external=True):
|
2013-06-10 14:57:42 +02:00
|
|
|
path = os.path.join(lock_dir, "test2")
|
|
|
|
self.assertTrue(os.path.exists(path))
|
|
|
|
|
|
|
|
with lockutils.lock("test1",
|
2013-12-26 14:15:27 +01:00
|
|
|
external=True) as lock1:
|
2013-06-10 14:57:42 +02:00
|
|
|
self.assertTrue(isinstance(lock1,
|
|
|
|
lockutils.InterProcessLock))
|
|
|
|
finally:
|
|
|
|
if os.path.exists(lock_dir):
|
|
|
|
shutil.rmtree(lock_dir, ignore_errors=True)
|
|
|
|
|
|
|
|
def test_contextlock_unlocks(self):
|
|
|
|
lock_dir = tempfile.mkdtemp()
|
2013-12-26 14:15:27 +01:00
|
|
|
self.config(lock_path=lock_dir)
|
2013-06-10 14:57:42 +02:00
|
|
|
|
|
|
|
sem = None
|
|
|
|
|
|
|
|
try:
|
|
|
|
with lockutils.lock("test") as sem:
|
2013-12-20 14:29:22 -07:00
|
|
|
self.assertTrue(isinstance(sem, threading._Semaphore))
|
2013-06-10 14:57:42 +02:00
|
|
|
|
2013-12-26 14:15:27 +01:00
|
|
|
with lockutils.lock("test2", external=True):
|
2013-06-10 14:57:42 +02:00
|
|
|
path = os.path.join(lock_dir, "test2")
|
|
|
|
self.assertTrue(os.path.exists(path))
|
|
|
|
|
|
|
|
# NOTE(flaper87): Lock should be free
|
2013-12-26 14:15:27 +01:00
|
|
|
with lockutils.lock("test2", external=True):
|
2013-06-10 14:57:42 +02:00
|
|
|
path = os.path.join(lock_dir, "test2")
|
|
|
|
self.assertTrue(os.path.exists(path))
|
|
|
|
|
|
|
|
# NOTE(flaper87): Lock should be free
|
|
|
|
# but semaphore should already exist.
|
|
|
|
with lockutils.lock("test") as sem2:
|
|
|
|
self.assertEqual(sem, sem2)
|
|
|
|
finally:
|
|
|
|
if os.path.exists(lock_dir):
|
|
|
|
shutil.rmtree(lock_dir, ignore_errors=True)
|
2013-07-09 17:03:53 -05:00
|
|
|
|
|
|
|
def test_synchronized_externally_without_lock_path(self):
|
|
|
|
self.config(lock_path=None)
|
|
|
|
|
|
|
|
@lockutils.synchronized('external', 'test-', external=True)
|
|
|
|
def foo():
|
|
|
|
pass
|
|
|
|
|
|
|
|
self.assertRaises(cfg.RequiredOptError, foo)
|
2013-10-03 22:42:55 -04:00
|
|
|
|
2014-01-25 06:35:31 +08:00
|
|
|
def test_remove_lock_external_file(self):
|
|
|
|
lock_name = 'mylock'
|
|
|
|
lock_pfix = 'mypfix-remove-lock-test-'
|
|
|
|
|
|
|
|
lock_dir = tempfile.mkdtemp()
|
|
|
|
self.config(lock_path=lock_dir)
|
|
|
|
|
|
|
|
lockutils.remove_external_lock_file(lock_name, lock_pfix)
|
|
|
|
|
|
|
|
for ent in os.listdir(lock_dir):
|
|
|
|
self.assertRaises(OSError, ent.startswith, lock_pfix)
|
|
|
|
|
|
|
|
if os.path.exists(lock_dir):
|
|
|
|
shutil.rmtree(lock_dir, ignore_errors=True)
|
|
|
|
|
2013-10-03 22:42:55 -04:00
|
|
|
|
2013-10-23 20:11:14 +04:00
|
|
|
class LockutilsModuleTestCase(test.BaseTestCase):
|
|
|
|
|
|
|
|
def setUp(self):
|
|
|
|
super(LockutilsModuleTestCase, self).setUp()
|
|
|
|
self.old_env = os.environ.get('OSLO_LOCK_PATH')
|
|
|
|
|
|
|
|
def tearDown(self):
|
|
|
|
if self.old_env is None:
|
|
|
|
del os.environ['OSLO_LOCK_PATH']
|
|
|
|
else:
|
|
|
|
os.environ['OSLO_LOCK_PATH'] = self.old_env
|
|
|
|
super(LockutilsModuleTestCase, self).tearDown()
|
|
|
|
|
|
|
|
def _lock_path_conf_test(self, lock_dir):
|
|
|
|
cfg.CONF.unregister_opts(lockutils.util_opts)
|
2013-11-26 06:49:03 -08:00
|
|
|
lockutils_ = moves.reload_module(lockutils)
|
2013-10-23 20:11:14 +04:00
|
|
|
with lockutils_.lock('test-lock', external=True):
|
|
|
|
if not os.path.exists(lock_dir):
|
|
|
|
os._exit(2)
|
|
|
|
if not os.path.exists(os.path.join(lock_dir, 'test-lock')):
|
|
|
|
os._exit(3)
|
|
|
|
|
|
|
|
def test_lock_path_from_env(self):
|
|
|
|
lock_dir = tempfile.mkdtemp()
|
|
|
|
os.environ['OSLO_LOCK_PATH'] = lock_dir
|
|
|
|
try:
|
|
|
|
p = multiprocessing.Process(target=self._lock_path_conf_test,
|
|
|
|
args=(lock_dir,))
|
|
|
|
p.start()
|
|
|
|
p.join()
|
|
|
|
if p.exitcode == 2:
|
|
|
|
self.fail("lock_path directory %s does not exist" % lock_dir)
|
|
|
|
elif p.exitcode == 3:
|
|
|
|
self.fail("lock file hasn't been created in expected location")
|
|
|
|
else:
|
|
|
|
self.assertEqual(p.exitcode, 0,
|
|
|
|
"Subprocess failed with code %s" % p.exitcode)
|
|
|
|
finally:
|
|
|
|
if os.path.exists(lock_dir):
|
|
|
|
shutil.rmtree(lock_dir, ignore_errors=True)
|
|
|
|
|
2013-10-24 17:07:55 +04:00
|
|
|
def test_main(self):
|
|
|
|
script = '\n'.join([
|
|
|
|
'import os',
|
|
|
|
'lock_path = os.environ.get("OSLO_LOCK_PATH")',
|
|
|
|
'assert lock_path is not None',
|
|
|
|
'assert os.path.isdir(lock_path)',
|
|
|
|
])
|
|
|
|
argv = ['', sys.executable, '-c', script]
|
|
|
|
retval = lockutils.main(argv)
|
|
|
|
self.assertEqual(retval, 0, "Bad OSLO_LOCK_PATH has been set")
|
|
|
|
|
2013-10-23 20:11:14 +04:00
|
|
|
|
2013-10-03 22:42:55 -04:00
|
|
|
class TestLockFixture(test.BaseTestCase):
|
|
|
|
|
|
|
|
def setUp(self):
|
|
|
|
super(TestLockFixture, self).setUp()
|
|
|
|
self.config = self.useFixture(config.Config()).config
|
|
|
|
self.tempdir = tempfile.mkdtemp()
|
|
|
|
|
|
|
|
def _check_in_lock(self):
|
|
|
|
# Check that the lock file exists during teardown
|
|
|
|
lock_path = os.path.join(self.tempdir, 'test-lock')
|
|
|
|
self.assertTrue(os.path.exists(lock_path))
|
|
|
|
|
|
|
|
def tearDown(self):
|
|
|
|
self._check_in_lock()
|
|
|
|
super(TestLockFixture, self).tearDown()
|
|
|
|
|
|
|
|
def test_lock_fixture(self):
|
|
|
|
# Setup lock fixture to test that teardown is inside the lock
|
|
|
|
self.config(lock_path=self.tempdir)
|
2013-10-16 13:02:48 -04:00
|
|
|
self.useFixture(fixtures.LockFixture('test-lock'))
|