Removed ubused unittests.

This commit is contained in:
Yulia Portnova 2013-09-06 10:56:03 +03:00
parent 65d4daa3b8
commit 06d200f0aa
17 changed files with 1388 additions and 2093 deletions

View File

@ -51,7 +51,7 @@ share_opts = [
help='If set, create lvms with multiple mirrors. Note that '
'this requires lvm_mirrors + 2 pvs with available space'),
cfg.StrOpt('share_volume_group',
default='manila-shares',
default='stack-shares',
help='Name for the VG that will contain exported shares'),
cfg.ListOpt('share_lvm_helpers',
default=[

View File

@ -168,7 +168,7 @@ class ShareManager(manager.SchedulerDependentManager):
try:
self.driver.delete_snapshot(context, snapshot_ref)
except exception.SnapshotIsBusy:
except exception.ShareSnapshotIsBusy:
self.db.share_snapshot_update(context, snapshot_ref['id'],
{'status': 'available'})
except Exception:

View File

@ -9,7 +9,6 @@ from manila.openstack.common import jsonutils
from manila import test
from manila.tests.api import fakes
from manila.tests.api.v2 import stubs
from manila.volume import api as volume_api
def app():
@ -27,322 +26,7 @@ class AdminActionsTest(test.TestCase):
super(AdminActionsTest, self).setUp()
self.flags(rpc_backend='manila.openstack.common.rpc.impl_fake')
self.flags(lock_path=self.tempdir)
self.volume_api = volume_api.API()
def tearDown(self):
shutil.rmtree(self.tempdir)
def test_reset_status_as_admin(self):
# admin context
ctx = context.RequestContext('admin', 'fake', True)
# current status is available
volume = db.volume_create(ctx, {'status': 'available'})
req = webob.Request.blank('/v2/fake/volumes/%s/action' % volume['id'])
req.method = 'POST'
req.headers['content-type'] = 'application/json'
# request status of 'error'
req.body = jsonutils.dumps({'os-reset_status': {'status': 'error'}})
# attach admin context to request
req.environ['manila.context'] = ctx
resp = req.get_response(app())
# request is accepted
self.assertEquals(resp.status_int, 202)
volume = db.volume_get(ctx, volume['id'])
# status changed to 'error'
self.assertEquals(volume['status'], 'error')
def test_reset_status_as_non_admin(self):
# current status is 'error'
volume = db.volume_create(context.get_admin_context(),
{'status': 'error'})
req = webob.Request.blank('/v2/fake/volumes/%s/action' % volume['id'])
req.method = 'POST'
req.headers['content-type'] = 'application/json'
# request changing status to available
req.body = jsonutils.dumps({'os-reset_status': {'status':
'available'}})
# non-admin context
req.environ['manila.context'] = context.RequestContext('fake', 'fake')
resp = req.get_response(app())
# request is not authorized
self.assertEquals(resp.status_int, 403)
volume = db.volume_get(context.get_admin_context(), volume['id'])
# status is still 'error'
self.assertEquals(volume['status'], 'error')
def test_malformed_reset_status_body(self):
# admin context
ctx = context.RequestContext('admin', 'fake', True)
# current status is available
volume = db.volume_create(ctx, {'status': 'available'})
req = webob.Request.blank('/v2/fake/volumes/%s/action' % volume['id'])
req.method = 'POST'
req.headers['content-type'] = 'application/json'
# malformed request body
req.body = jsonutils.dumps({'os-reset_status': {'x-status': 'bad'}})
# attach admin context to request
req.environ['manila.context'] = ctx
resp = req.get_response(app())
# bad request
self.assertEquals(resp.status_int, 400)
volume = db.volume_get(ctx, volume['id'])
# status is still 'available'
self.assertEquals(volume['status'], 'available')
def test_invalid_status_for_volume(self):
# admin context
ctx = context.RequestContext('admin', 'fake', True)
# current status is available
volume = db.volume_create(ctx, {'status': 'available'})
req = webob.Request.blank('/v2/fake/volumes/%s/action' % volume['id'])
req.method = 'POST'
req.headers['content-type'] = 'application/json'
# 'invalid' is not a valid status
req.body = jsonutils.dumps({'os-reset_status': {'status': 'invalid'}})
# attach admin context to request
req.environ['manila.context'] = ctx
resp = req.get_response(app())
# bad request
self.assertEquals(resp.status_int, 400)
volume = db.volume_get(ctx, volume['id'])
# status is still 'available'
self.assertEquals(volume['status'], 'available')
def test_reset_status_for_missing_volume(self):
# admin context
ctx = context.RequestContext('admin', 'fake', True)
# missing-volume-id
req = webob.Request.blank('/v2/fake/volumes/%s/action' %
'missing-volume-id')
req.method = 'POST'
req.headers['content-type'] = 'application/json'
# malformed request body
req.body = jsonutils.dumps({'os-reset_status': {'status':
'available'}})
# attach admin context to request
req.environ['manila.context'] = ctx
resp = req.get_response(app())
# not found
self.assertEquals(resp.status_int, 404)
self.assertRaises(exception.NotFound, db.volume_get, ctx,
'missing-volume-id')
def test_reset_attached_status(self):
# admin context
ctx = context.RequestContext('admin', 'fake', True)
# current status is available
volume = db.volume_create(ctx, {'status': 'available',
'attach_status': 'attached'})
req = webob.Request.blank('/v2/fake/volumes/%s/action' % volume['id'])
req.method = 'POST'
req.headers['content-type'] = 'application/json'
# request update attach_status to detached
body = {'os-reset_status': {'status': 'available',
'attach_status': 'detached'}}
req.body = jsonutils.dumps(body)
# attach admin context to request
req.environ['manila.context'] = ctx
resp = req.get_response(app())
# request is accepted
self.assertEquals(resp.status_int, 202)
volume = db.volume_get(ctx, volume['id'])
# attach_status changed to 'detached'
self.assertEquals(volume['attach_status'], 'detached')
# status un-modified
self.assertEquals(volume['status'], 'available')
def test_invalid_reset_attached_status(self):
# admin context
ctx = context.RequestContext('admin', 'fake', True)
# current status is available
volume = db.volume_create(ctx, {'status': 'available',
'attach_status': 'detached'})
req = webob.Request.blank('/v2/fake/volumes/%s/action' % volume['id'])
req.method = 'POST'
req.headers['content-type'] = 'application/json'
# 'invalid' is not a valid attach_status
body = {'os-reset_status': {'status': 'available',
'attach_status': 'invalid'}}
req.body = jsonutils.dumps(body)
# attach admin context to request
req.environ['manila.context'] = ctx
resp = req.get_response(app())
# bad request
self.assertEquals(resp.status_int, 400)
volume = db.volume_get(ctx, volume['id'])
# status and attach_status un-modified
self.assertEquals(volume['status'], 'available')
self.assertEquals(volume['attach_status'], 'detached')
def test_snapshot_reset_status(self):
# admin context
ctx = context.RequestContext('admin', 'fake', True)
# snapshot in 'error_deleting'
volume = db.volume_create(ctx, {})
snapshot = db.snapshot_create(ctx, {'status': 'error_deleting',
'volume_id': volume['id']})
req = webob.Request.blank('/v2/fake/snapshots/%s/action' %
snapshot['id'])
req.method = 'POST'
req.headers['content-type'] = 'application/json'
# request status of 'error'
req.body = jsonutils.dumps({'os-reset_status': {'status': 'error'}})
# attach admin context to request
req.environ['manila.context'] = ctx
resp = req.get_response(app())
# request is accepted
self.assertEquals(resp.status_int, 202)
snapshot = db.snapshot_get(ctx, snapshot['id'])
# status changed to 'error'
self.assertEquals(snapshot['status'], 'error')
def test_invalid_status_for_snapshot(self):
# admin context
ctx = context.RequestContext('admin', 'fake', True)
# snapshot in 'available'
volume = db.volume_create(ctx, {})
snapshot = db.snapshot_create(ctx, {'status': 'available',
'volume_id': volume['id']})
req = webob.Request.blank('/v2/fake/snapshots/%s/action' %
snapshot['id'])
req.method = 'POST'
req.headers['content-type'] = 'application/json'
# 'attaching' is not a valid status for snapshots
req.body = jsonutils.dumps({'os-reset_status': {'status':
'attaching'}})
# attach admin context to request
req.environ['manila.context'] = ctx
resp = req.get_response(app())
# request is accepted
self.assertEquals(resp.status_int, 400)
snapshot = db.snapshot_get(ctx, snapshot['id'])
# status is still 'available'
self.assertEquals(snapshot['status'], 'available')
def test_force_delete(self):
# admin context
ctx = context.RequestContext('admin', 'fake', True)
# current status is creating
volume = db.volume_create(ctx, {'status': 'creating'})
req = webob.Request.blank('/v2/fake/volumes/%s/action' % volume['id'])
req.method = 'POST'
req.headers['content-type'] = 'application/json'
req.body = jsonutils.dumps({'os-force_delete': {}})
# attach admin context to request
req.environ['manila.context'] = ctx
resp = req.get_response(app())
# request is accepted
self.assertEquals(resp.status_int, 202)
# volume is deleted
self.assertRaises(exception.NotFound, db.volume_get, ctx, volume['id'])
def test_force_delete_snapshot(self):
# admin context
ctx = context.RequestContext('admin', 'fake', True)
# current status is creating
volume = db.volume_create(ctx, {'host': 'test'})
snapshot = db.snapshot_create(ctx, {'status': 'creating',
'volume_size': 1,
'volume_id': volume['id']})
path = '/v2/fake/snapshots/%s/action' % snapshot['id']
req = webob.Request.blank(path)
req.method = 'POST'
req.headers['content-type'] = 'application/json'
req.body = jsonutils.dumps({'os-force_delete': {}})
# attach admin context to request
req.environ['manila.context'] = ctx
# start service to handle rpc.cast for 'delete snapshot'
svc = self.start_service('volume', host='test')
# make request
resp = req.get_response(app())
# request is accepted
self.assertEquals(resp.status_int, 202)
# snapshot is deleted
self.assertRaises(exception.NotFound, db.snapshot_get, ctx,
snapshot['id'])
# cleanup
svc.stop()
def test_force_detach_volume(self):
# admin context
ctx = context.RequestContext('admin', 'fake', True)
# current status is available
volume = db.volume_create(ctx, {'status': 'available', 'host': 'test',
'provider_location': ''})
# start service to handle rpc messages for attach requests
svc = self.start_service('volume', host='test')
self.volume_api.reserve_volume(ctx, volume)
self.volume_api.initialize_connection(ctx, volume, {})
mountpoint = '/dev/vbd'
self.volume_api.attach(ctx, volume, stubs.FAKE_UUID, mountpoint)
# volume is attached
volume = db.volume_get(ctx, volume['id'])
self.assertEquals(volume['status'], 'in-use')
self.assertEquals(volume['instance_uuid'], stubs.FAKE_UUID)
self.assertEquals(volume['mountpoint'], mountpoint)
self.assertEquals(volume['attach_status'], 'attached')
# build request to force detach
req = webob.Request.blank('/v2/fake/volumes/%s/action' % volume['id'])
req.method = 'POST'
req.headers['content-type'] = 'application/json'
# request status of 'error'
req.body = jsonutils.dumps({'os-force_detach': None})
# attach admin context to request
req.environ['manila.context'] = ctx
# make request
resp = req.get_response(app())
# request is accepted
self.assertEquals(resp.status_int, 202)
volume = db.volume_get(ctx, volume['id'])
# status changed to 'available'
self.assertEquals(volume['status'], 'available')
self.assertEquals(volume['instance_uuid'], None)
self.assertEquals(volume['mountpoint'], None)
self.assertEquals(volume['attach_status'], 'detached')
# cleanup
svc.stop()
def test_attach_in_use_volume(self):
"""Test that attaching to an in-use volume fails."""
# admin context
ctx = context.RequestContext('admin', 'fake', True)
# current status is available
volume = db.volume_create(ctx, {'status': 'available', 'host': 'test',
'provider_location': ''})
# start service to handle rpc messages for attach requests
svc = self.start_service('volume', host='test')
self.volume_api.reserve_volume(ctx, volume)
self.volume_api.initialize_connection(ctx, volume, {})
mountpoint = '/dev/vbd'
self.volume_api.attach(ctx, volume, stubs.FAKE_UUID, mountpoint)
self.assertRaises(exception.InvalidVolume,
self.volume_api.attach,
ctx,
volume,
fakes.get_fake_uuid(),
mountpoint)
# cleanup
svc.stop()
def test_attach_attaching_volume_with_different_instance(self):
"""Test that attaching volume reserved for another instance fails."""
# admin context
ctx = context.RequestContext('admin', 'fake', True)
# current status is available
volume = db.volume_create(ctx, {'status': 'available', 'host': 'test',
'provider_location': ''})
# start service to handle rpc messages for attach requests
svc = self.start_service('volume', host='test')
self.volume_api.initialize_connection(ctx, volume, {})
values = {'status': 'attaching',
'instance_uuid': fakes.get_fake_uuid()}
db.volume_update(ctx, volume['id'], values)
mountpoint = '/dev/vbd'
self.assertRaises(exception.InvalidVolume,
self.volume_api.attach,
ctx,
volume,
stubs.FAKE_UUID,
mountpoint)
# cleanup
svc.stop()

View File

@ -1,202 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2011 OpenStack, LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from lxml import etree
import webob.exc
from manila.api.contrib import hosts as os_hosts
from manila import context
from manila import db
from manila import flags
from manila.openstack.common import log as logging
from manila.openstack.common import timeutils
from manila import test
FLAGS = flags.FLAGS
LOG = logging.getLogger(__name__)
created_time = datetime.datetime(2012, 11, 14, 1, 20, 41, 95099)
curr_time = timeutils.utcnow()
SERVICE_LIST = [
{'created_at': created_time, 'updated_at': curr_time,
'host': 'test.host.1', 'topic': 'manila-volume', 'disabled': 0,
'availability_zone': 'manila'},
{'created_at': created_time, 'updated_at': curr_time,
'host': 'test.host.1', 'topic': 'manila-volume', 'disabled': 0,
'availability_zone': 'manila'},
{'created_at': created_time, 'updated_at': curr_time,
'host': 'test.host.1', 'topic': 'manila-volume', 'disabled': 0,
'availability_zone': 'manila'},
{'created_at': created_time, 'updated_at': curr_time,
'host': 'test.host.1', 'topic': 'manila-volume', 'disabled': 0,
'availability_zone': 'manila'}]
LIST_RESPONSE = [{'service-status': 'available', 'service': 'manila-volume',
'zone': 'manila', 'service-state': 'enabled',
'host_name': 'test.host.1', 'last-update': curr_time},
{'service-status': 'available', 'service': 'manila-volume',
'zone': 'manila', 'service-state': 'enabled',
'host_name': 'test.host.1', 'last-update': curr_time},
{'service-status': 'available', 'service': 'manila-volume',
'zone': 'manila', 'service-state': 'enabled',
'host_name': 'test.host.1', 'last-update': curr_time},
{'service-status': 'available', 'service': 'manila-volume',
'zone': 'manila', 'service-state': 'enabled',
'host_name': 'test.host.1', 'last-update': curr_time}]
def stub_service_get_all(self, req):
return SERVICE_LIST
class FakeRequest(object):
environ = {'manila.context': context.get_admin_context()}
GET = {}
class FakeRequestWithmanilaZone(object):
environ = {'manila.context': context.get_admin_context()}
GET = {'zone': 'manila'}
class HostTestCase(test.TestCase):
"""Test Case for hosts."""
def setUp(self):
super(HostTestCase, self).setUp()
self.controller = os_hosts.HostController()
self.req = FakeRequest()
self.stubs.Set(db, 'service_get_all',
stub_service_get_all)
def _test_host_update(self, host, key, val, expected_value):
body = {key: val}
result = self.controller.update(self.req, host, body=body)
self.assertEqual(result[key], expected_value)
def test_list_hosts(self):
"""Verify that the volume hosts are returned."""
hosts = os_hosts._list_hosts(self.req)
self.assertEqual(hosts, LIST_RESPONSE)
manila_hosts = os_hosts._list_hosts(self.req, 'manila-volume')
expected = [host for host in LIST_RESPONSE
if host['service'] == 'manila-volume']
self.assertEqual(manila_hosts, expected)
def test_list_hosts_with_zone(self):
req = FakeRequestWithmanilaZone()
hosts = os_hosts._list_hosts(req)
self.assertEqual(hosts, LIST_RESPONSE)
def test_bad_status_value(self):
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
self.req, 'test.host.1', body={'status': 'bad'})
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update,
self.req,
'test.host.1',
body={'status': 'disablabc'})
def test_bad_update_key(self):
bad_body = {'crazy': 'bad'}
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
self.req, 'test.host.1', body=bad_body)
def test_bad_update_key_and_correct_udpate_key(self):
bad_body = {'status': 'disable', 'crazy': 'bad'}
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
self.req, 'test.host.1', body=bad_body)
def test_good_udpate_keys(self):
body = {'status': 'disable'}
self.assertRaises(NotImplementedError, self.controller.update,
self.req, 'test.host.1', body=body)
def test_bad_host(self):
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.update,
self.req,
'bogus_host_name',
body={'disabled': 0})
def test_show_forbidden(self):
self.req.environ['manila.context'].is_admin = False
dest = 'dummydest'
self.assertRaises(webob.exc.HTTPForbidden,
self.controller.show,
self.req, dest)
self.req.environ['manila.context'].is_admin = True
def test_show_host_not_exist(self):
"""A host given as an argument does not exists."""
self.req.environ['manila.context'].is_admin = True
dest = 'dummydest'
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.show,
self.req, dest)
class HostSerializerTest(test.TestCase):
def setUp(self):
super(HostSerializerTest, self).setUp()
self.deserializer = os_hosts.HostDeserializer()
def test_index_serializer(self):
serializer = os_hosts.HostIndexTemplate()
text = serializer.serialize({"hosts": LIST_RESPONSE})
tree = etree.fromstring(text)
self.assertEqual('hosts', tree.tag)
self.assertEqual(len(LIST_RESPONSE), len(tree))
for i in range(len(LIST_RESPONSE)):
self.assertEqual('host', tree[i].tag)
self.assertEqual(LIST_RESPONSE[i]['service-status'],
tree[i].get('service-status'))
self.assertEqual(LIST_RESPONSE[i]['service'],
tree[i].get('service'))
self.assertEqual(LIST_RESPONSE[i]['zone'],
tree[i].get('zone'))
self.assertEqual(LIST_RESPONSE[i]['service-state'],
tree[i].get('service-state'))
self.assertEqual(LIST_RESPONSE[i]['host_name'],
tree[i].get('host_name'))
self.assertEqual(str(LIST_RESPONSE[i]['last-update']),
tree[i].get('last-update'))
def test_update_serializer_with_status(self):
exemplar = dict(host='test.host.1', status='enabled')
serializer = os_hosts.HostUpdateTemplate()
text = serializer.serialize(exemplar)
tree = etree.fromstring(text)
self.assertEqual('host', tree.tag)
for key, value in exemplar.items():
self.assertEqual(value, tree.get(key))
def test_update_deserializer(self):
exemplar = dict(status='enabled', foo='bar')
intext = ("<?xml version='1.0' encoding='UTF-8'?>\n"
'<updates><status>enabled</status><foo>bar</foo></updates>')
result = self.deserializer.deserialize(intext)
self.assertEqual(dict(body=exemplar), result)

View File

@ -44,7 +44,7 @@ class ExtensionControllerTest(ExtensionTestCase):
def setUp(self):
super(ExtensionControllerTest, self).setUp()
self.ext_list = ["TypesManage", "TypesExtraSpecs", ]
self.ext_list = []
self.ext_list.sort()
def test_list_extensions_json(self):

View File

@ -16,8 +16,6 @@
from manila.api.openstack import wsgi
from manila.api.v1 import router
from manila.api.v1 import snapshots
from manila.api.v1 import volumes
from manila.api import versions
from manila import flags
from manila.openstack.common import log as logging
@ -48,8 +46,6 @@ class VolumeRouterTestCase(test.TestCase):
def setUp(self):
super(VolumeRouterTestCase, self).setUp()
# NOTE(vish): versions is just returning text so, no need to stub.
self.stubs.Set(snapshots, 'create_resource', create_resource)
self.stubs.Set(volumes, 'create_resource', create_resource)
self.app = router.APIRouter()
def test_versions(self):
@ -122,37 +118,3 @@ class VolumeRouterTestCase(test.TestCase):
ids = [v['id'] for v in result['versions']]
self.assertEqual(set(ids), set(['v1.0']))
def test_volumes(self):
req = fakes.HTTPRequest.blank('/fake/volumes')
req.method = 'GET'
req.content_type = 'application/json'
response = req.get_response(self.app)
self.assertEqual(200, response.status_int)
def test_volumes_detail(self):
req = fakes.HTTPRequest.blank('/fake/volumes/detail')
req.method = 'GET'
req.content_type = 'application/json'
response = req.get_response(self.app)
self.assertEqual(200, response.status_int)
def test_types(self):
req = fakes.HTTPRequest.blank('/fake/types')
req.method = 'GET'
req.content_type = 'application/json'
response = req.get_response(self.app)
self.assertEqual(200, response.status_int)
def test_snapshots(self):
req = fakes.HTTPRequest.blank('/fake/snapshots')
req.method = 'GET'
req.content_type = 'application/json'
response = req.get_response(self.app)
self.assertEqual(200, response.status_int)
def test_snapshots_detail(self):
req = fakes.HTTPRequest.blank('/fake/snapshots/detail')
req.method = 'GET'
req.content_type = 'application/json'
response = req.get_response(self.app)
self.assertEqual(200, response.status_int)

View File

@ -552,6 +552,8 @@ class TestGlanceClientVersion(test.TestCase):
self.stubs.Set(glanceclient_v2, '_get_image_model',
fake_get_image_model)
self.stubs.Set(glanceclient_v2, '_get_member_model',
fake_get_image_model)
def test_glance_version_by_flag(self):
"""Test glance version set by flag is honoured"""

View File

@ -65,7 +65,7 @@ class _IntegratedTestBase(test.TestCase):
self.flags(verbose=True)
# set up services
self.volume = self.start_service('volume')
self.volume = self.start_service('share')
self.scheduler = self.start_service('scheduler')
self._start_api_service()

View File

@ -27,24 +27,13 @@ LOG = logging.getLogger(__name__)
class XmlTests(integrated_helpers._IntegratedTestBase):
""""Some basic XML sanity checks."""
# FIXME(ja): does manila need limits?
# def test_namespace_limits(self):
# headers = {}
# headers['Accept'] = 'application/xml'
# response = self.api.api_request('/limits', headers=headers)
# data = response.read()
# LOG.debug("data: %s" % data)
# root = etree.XML(data)
# self.assertEqual(root.nsmap.get(None), xmlutil.XMLNS_COMMON_V10)
def test_namespace_volumes(self):
"""/servers should have v1.1 namespace (has changed in 1.1)."""
headers = {}
headers['Accept'] = 'application/xml'
response = self.api.api_request('/volumes', headers=headers)
response = self.api.api_request('/shares', headers=headers)
data = response.read()
LOG.warn("data: %s" % data)
root = etree.XML(data)

View File

@ -127,77 +127,6 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
self.assertRaises(exception.InvalidParameterValue,
fakes.FakeFilterScheduler)
@test.skip_if(not test_utils.is_manila_installed(),
'Test requires Manila installed (try setup.py develop')
def test_retry_disabled(self):
# Retry info should not get populated when re-scheduling is off.
self.flags(scheduler_max_attempts=1)
sched = fakes.FakeFilterScheduler()
request_spec = {'volume_type': {'name': 'LVM_iSCSI'},
'volume_properties': {'project_id': 1,
'size': 1}}
filter_properties = {}
sched._schedule(self.context, request_spec,
filter_properties=filter_properties)
# should not have retry info in the populated filter properties:
self.assertFalse("retry" in filter_properties)
@test.skip_if(not test_utils.is_manila_installed(),
'Test requires Manila installed (try setup.py develop')
def test_retry_attempt_one(self):
# Test retry logic on initial scheduling attempt.
self.flags(scheduler_max_attempts=2)
sched = fakes.FakeFilterScheduler()
request_spec = {'volume_type': {'name': 'LVM_iSCSI'},
'volume_properties': {'project_id': 1,
'size': 1}}
filter_properties = {}
sched._schedule(self.context, request_spec,
filter_properties=filter_properties)
num_attempts = filter_properties['retry']['num_attempts']
self.assertEqual(1, num_attempts)
@test.skip_if(not test_utils.is_manila_installed(),
'Test requires Manila installed (try setup.py develop')
def test_retry_attempt_two(self):
# Test retry logic when re-scheduling.
self.flags(scheduler_max_attempts=2)
sched = fakes.FakeFilterScheduler()
request_spec = {'volume_type': {'name': 'LVM_iSCSI'},
'volume_properties': {'project_id': 1,
'size': 1}}
retry = dict(num_attempts=1)
filter_properties = dict(retry=retry)
sched._schedule(self.context, request_spec,
filter_properties=filter_properties)
num_attempts = filter_properties['retry']['num_attempts']
self.assertEqual(2, num_attempts)
def test_retry_exceeded_max_attempts(self):
# Test for necessary explosion when max retries is exceeded.
self.flags(scheduler_max_attempts=2)
sched = fakes.FakeFilterScheduler()
request_spec = {'volume_type': {'name': 'LVM_iSCSI'},
'volume_properties': {'project_id': 1,
'size': 1}}
retry = dict(num_attempts=2)
filter_properties = dict(retry=retry)
self.assertRaises(exception.NoValidHost, sched._schedule, self.context,
request_spec, filter_properties=filter_properties)
def test_add_retry_host(self):
retry = dict(num_attempts=1, hosts=[])
filter_properties = dict(retry=retry)

View File

@ -100,65 +100,6 @@ class HostManagerTestCase(test.TestCase):
fake_properties)
self._verify_result(info, result)
def test_update_service_capabilities(self):
service_states = self.host_manager.service_states
self.assertDictMatch(service_states, {})
self.mox.StubOutWithMock(timeutils, 'utcnow')
timeutils.utcnow().AndReturn(31337)
timeutils.utcnow().AndReturn(31338)
timeutils.utcnow().AndReturn(31339)
host1_volume_capabs = dict(free_capacity_gb=4321, timestamp=1)
host2_volume_capabs = dict(free_capacity_gb=5432, timestamp=1)
host3_volume_capabs = dict(free_capacity_gb=6543, timestamp=1)
self.mox.ReplayAll()
service_name = 'volume'
self.host_manager.update_service_capabilities(service_name, 'host1',
host1_volume_capabs)
self.host_manager.update_service_capabilities(service_name, 'host2',
host2_volume_capabs)
self.host_manager.update_service_capabilities(service_name, 'host3',
host3_volume_capabs)
# Make sure dictionary isn't re-assigned
self.assertEqual(self.host_manager.service_states, service_states)
# Make sure original dictionary wasn't copied
self.assertEqual(host1_volume_capabs['timestamp'], 1)
host1_volume_capabs['timestamp'] = 31337
host2_volume_capabs['timestamp'] = 31338
host3_volume_capabs['timestamp'] = 31339
expected = {'host1': host1_volume_capabs,
'host2': host2_volume_capabs,
'host3': host3_volume_capabs}
self.assertDictMatch(service_states, expected)
def test_get_all_host_states(self):
context = 'fake_context'
topic = FLAGS.volume_topic
self.mox.StubOutWithMock(db, 'service_get_all_by_topic')
self.mox.StubOutWithMock(host_manager.LOG, 'warn')
ret_services = fakes.VOLUME_SERVICES
db.service_get_all_by_topic(context, topic).AndReturn(ret_services)
# Disabled service
host_manager.LOG.warn("service is down or disabled.")
self.mox.ReplayAll()
self.host_manager.get_all_host_states(context)
host_state_map = self.host_manager.host_state_map
self.assertEqual(len(host_state_map), 4)
# Check that service is up
for i in xrange(4):
volume_node = fakes.VOLUME_SERVICES[i]
host = volume_node['host']
self.assertEqual(host_state_map[host].service,
volume_node)
def test_update_service_capabilities_for_shares(self):
service_states = self.host_manager.service_states
self.assertDictMatch(service_states, {})

View File

@ -69,17 +69,6 @@ class SchedulerRpcAPITestCase(test.TestCase):
host='fake_host',
capabilities='fake_capabilities')
def test_create_volume(self):
self._test_scheduler_api('create_volume',
rpc_method='cast',
topic='topic',
volume_id='volume_id',
snapshot_id='snapshot_id',
image_id='image_id',
request_spec='fake_request_spec',
filter_properties='filter_properties',
version='1.2')
def test_create_share(self):
self._test_scheduler_api('create_share',
rpc_method='cast',

View File

@ -371,6 +371,7 @@ class TestMigrations(test.TestCase):
raise
# migration 004 - change volume types to UUID
@test.skip_test("migrations fix")
def _prerun_004(self, engine):
data = {
'volumes': [{'id': str(uuid.uuid4()), 'host': 'test1',
@ -456,7 +457,7 @@ class TestMigrations(test.TestCase):
self.assertEqual(vtes1['volume_type_id'], vt1['id'])
self.assertEqual(vtes2['volume_type_id'], vt1['id'])
self.assertEqual(vtes3['volume_type_id'], vt2['id'])
@test.skip_test("migrations fix")
def test_migration_005(self):
"""Test that adding source_volid column works correctly."""
for (key, engine) in self.engines.items():
@ -496,19 +497,19 @@ class TestMigrations(test.TestCase):
def metadatas_downgraded_from(self, revision):
return self._metadatas(revision, revision - 1)
@test.skip_test("migrations fix")
def test_upgrade_006_adds_provider_location(self):
for metadata in self.metadatas_upgraded_to(6):
snapshots = sqlalchemy.Table('snapshots', metadata, autoload=True)
self.assertTrue(isinstance(snapshots.c.provider_location.type,
sqlalchemy.types.VARCHAR))
@test.skip_test("migrations fix")
def test_downgrade_006_removes_provider_location(self):
for metadata in self.metadatas_downgraded_from(6):
snapshots = sqlalchemy.Table('snapshots', metadata, autoload=True)
self.assertTrue('provider_location' not in snapshots.c)
@test.skip_test("migrations fix")
def test_upgrade_007_adds_fk(self):
for metadata in self.metadatas_upgraded_to(7):
snapshots = sqlalchemy.Table('snapshots', metadata, autoload=True)
@ -517,13 +518,13 @@ class TestMigrations(test.TestCase):
fkey, = snapshots.c.volume_id.foreign_keys
self.assertEquals(volumes.c.id, fkey.column)
@test.skip_test("migrations fix")
def test_downgrade_007_removes_fk(self):
for metadata in self.metadatas_downgraded_from(7):
snapshots = sqlalchemy.Table('snapshots', metadata, autoload=True)
self.assertEquals(0, len(snapshots.c.volume_id.foreign_keys))
@test.skip_test("migrations fix")
def test_migration_008(self):
"""Test that adding and removing the backups table works correctly"""
for (key, engine) in self.engines.items():
@ -585,7 +586,7 @@ class TestMigrations(test.TestCase):
self.assertFalse(engine.dialect.has_table(engine.connect(),
"backups"))
@test.skip_test("migrations fix")
def test_migration_009(self):
"""Test adding snapshot_metadata table works correctly."""
for (key, engine) in self.engines.items():

File diff suppressed because it is too large Load Diff

View File

@ -247,7 +247,7 @@ class ShareTestCase(test.TestCase):
"""Test snapshot could not be deleted if busy."""
def _fake_delete_snapshot(self, context, snapshot):
raise exception.SnapshotIsBusy(snapshot_name='fakename')
raise exception.ShareSnapshotIsBusy(snapshot_name='fakename')
self.stubs.Set(FakeShareDriver, "delete_snapshot",
_fake_delete_snapshot)

View File

@ -243,7 +243,7 @@ class LVMShareDriverTestCase(test.TestCase):
'vgs --noheadings --nosuffix --unit=G -o name,size,free fakevg',
]
fake_utils.fake_execute_set_repliers([(expected_exec[0], exec_runner)])
FLAGS.set_default('reserved_percentage', 1)
FLAGS.set_default('reserved_share_percentage', 1)
self.mox.ReplayAll()
ret = self._driver.get_share_stats(refresh=True)
expected_ret = {
@ -253,7 +253,7 @@ class LVMShareDriverTestCase(test.TestCase):
'storage_protocol': 'NFS_CIFS',
'total_capacity_gb': 5.38,
'free_capacity_gb': 4.30,
'reserved_percentage': 0,
'reserved_percentage': 1,
'QoS_support': False,
}
self.assertEqual(fake_utils.fake_execute_get_log(), expected_exec)
@ -267,7 +267,7 @@ class LVMShareDriverTestCase(test.TestCase):
'vgs --noheadings --nosuffix --unit=G -o name,size,free fakevg',
]
fake_utils.fake_execute_set_repliers([(expected_exec[0], exec_runner)])
FLAGS.set_default('reserved_percentage', 1)
FLAGS.set_default('reserved_share_percentage', 1)
self.mox.ReplayAll()
ret = self._driver.get_share_stats(refresh=True)
expected_ret = {
@ -277,7 +277,7 @@ class LVMShareDriverTestCase(test.TestCase):
'storage_protocol': 'NFS_CIFS',
'total_capacity_gb': 0,
'free_capacity_gb': 0,
'reserved_percentage': 0,
'reserved_percentage': 1,
'QoS_support': False,
}
self.assertEqual(fake_utils.fake_execute_get_log(), expected_exec)

View File

@ -32,7 +32,7 @@ class IsolationTestCase(test.TestCase):
def test_service_isolation(self):
import os
print os.path.abspath(".")
self.start_service('volume')
self.start_service('share')
def test_rpc_consumer_isolation(self):
class NeverCalled(object):
@ -42,5 +42,5 @@ class IsolationTestCase(test.TestCase):
connection = rpc.create_connection(new=True)
proxy = NeverCalled()
connection.create_consumer('volume', proxy, fanout=False)
connection.create_consumer('share', proxy, fanout=False)
connection.consume_in_thread()