Move the functional test directories

While in Nova, placement functional test code was relegated to the
'nova/tests/functional/api/openstack/placement/' directory. This commit
moves the contents of that directory to 'placement/tests/functional/',
and removes the unnecesary directories. It also removes the files
related to Nova aggregates that were preserved in the filter_history.sh
script.

Change-Id: I5af2e074f2e1bcb90f32589fce819bd4872b6871
This commit is contained in:
EdLeafe 2018-08-30 22:49:00 +00:00 committed by Eric Fried
parent fb7c190990
commit 9b9073c4c3
102 changed files with 0 additions and 2170 deletions

View File

@ -1,28 +0,0 @@
Api Samples
===========
This part of the tree contains templates for API samples. The
documentation in doc/api_samples is completely autogenerated from the
tests in this directory.
To add a new api sample, add tests for the common passing and failing
cases in this directory for your extension, and modify test_samples.py
for your tests.
Then run the following command:
tox -e api-samples
Which will create the files on doc/api_samples.
If new tests are added or the .tpl files are changed due to bug fixes, the
samples must be regenerated so they are in sync with the templates, as
there is an additional test which reloads the documentation and
ensures that it's in sync.
Debugging sample generation
---------------------------
If a .tpl is changed, its matching .json must be removed else the samples
won't be generated. If an entirely new extension is added, a directory for
it must be created before its samples will be generated.

View File

@ -1,5 +0,0 @@
{
"add_host": {
"host": "%(host_name)s"
}
}

View File

@ -1,9 +0,0 @@
{
"set_metadata":
{
"metadata":
{
"key": "value"
}
}
}

View File

@ -1,7 +0,0 @@
{
"aggregate":
{
"name": "name",
"availability_zone": "london"
}
}

View File

@ -1,11 +0,0 @@
{
"aggregate": {
"availability_zone": "london",
"created_at": "%(strtime)s",
"deleted": false,
"deleted_at": null,
"id": %(aggregate_id)s,
"name": "name",
"updated_at": null
}
}

View File

@ -1,5 +0,0 @@
{
"remove_host": {
"host": "%(host_name)s"
}
}

View File

@ -1,7 +0,0 @@
{
"aggregate":
{
"name": "newname",
"availability_zone": "nova2"
}
}

View File

@ -1,15 +0,0 @@
{
"aggregate": {
"availability_zone": "nova2",
"created_at": "%(strtime)s",
"deleted": false,
"deleted_at": null,
"hosts": [],
"id": 1,
"metadata": {
"availability_zone": "nova2"
},
"name": "newname",
"updated_at": "%(strtime)s"
}
}

View File

@ -1,17 +0,0 @@
{
"aggregate": {
"availability_zone": "london",
"created_at": "%(strtime)s",
"deleted": false,
"deleted_at": null,
"hosts": [
"%(compute_host)s"
],
"id": 1,
"metadata": {
"availability_zone": "london"
},
"name": "name",
"updated_at": null
}
}

View File

@ -1,15 +0,0 @@
{
"aggregate": {
"availability_zone": "london",
"created_at": "%(strtime)s",
"deleted": false,
"deleted_at": null,
"hosts": [],
"id": 1,
"metadata": {
"availability_zone": "london"
},
"name": "name",
"updated_at": null
}
}

View File

@ -1,19 +0,0 @@
{
"aggregates": [
{
"availability_zone": "london",
"created_at": "%(strtime)s",
"deleted": false,
"deleted_at": null,
"hosts": [
"%(compute_host)s"
],
"id": 1,
"metadata": {
"availability_zone": "london"
},
"name": "name",
"updated_at": null
}
]
}

View File

@ -1,16 +0,0 @@
{
"aggregate": {
"availability_zone": "london",
"created_at": "%(strtime)s",
"deleted": false,
"deleted_at": null,
"hosts": [],
"id": 1,
"metadata": {
"availability_zone": "london",
"key": "value"
},
"name": "name",
"updated_at": %(strtime)s
}
}

View File

@ -1,15 +0,0 @@
{
"aggregate": {
"availability_zone": "london",
"created_at": "%(strtime)s",
"deleted": false,
"deleted_at": null,
"hosts": [],
"id": 1,
"metadata": {
"availability_zone": "london"
},
"name": "name",
"updated_at": null
}
}

View File

@ -1,9 +0,0 @@
{
"set_metadata":
{
"metadata":
{
"key": "value"
}
}
}

View File

@ -1,7 +0,0 @@
{
"aggregate":
{
"name": "name",
"availability_zone": "london"
}
}

View File

@ -1,12 +0,0 @@
{
"aggregate": {
"availability_zone": "london",
"created_at": "%(strtime)s",
"deleted": false,
"deleted_at": null,
"id": %(aggregate_id)s,
"name": "name",
"updated_at": null,
"uuid": "%(uuid)s"
}
}

View File

@ -1,5 +0,0 @@
{
"remove_host": {
"host": "%(host_name)s"
}
}

View File

@ -1,7 +0,0 @@
{
"aggregate":
{
"name": "newname",
"availability_zone": "nova2"
}
}

View File

@ -1,16 +0,0 @@
{
"aggregate": {
"availability_zone": "nova2",
"created_at": "%(strtime)s",
"deleted": false,
"deleted_at": null,
"hosts": [],
"id": 1,
"metadata": {
"availability_zone": "nova2"
},
"name": "newname",
"updated_at": "%(strtime)s",
"uuid": "%(uuid)s"
}
}

View File

@ -1,18 +0,0 @@
{
"aggregate": {
"availability_zone": "london",
"created_at": "%(strtime)s",
"deleted": false,
"deleted_at": null,
"hosts": [
"%(compute_host)s"
],
"id": 1,
"metadata": {
"availability_zone": "london"
},
"name": "name",
"updated_at": null,
"uuid": "%(uuid)s"
}
}

View File

@ -1,16 +0,0 @@
{
"aggregate": {
"availability_zone": "london",
"created_at": "%(strtime)s",
"deleted": false,
"deleted_at": null,
"hosts": [],
"id": 1,
"metadata": {
"availability_zone": "london"
},
"name": "name",
"updated_at": null,
"uuid": "%(uuid)s"
}
}

View File

@ -1,20 +0,0 @@
{
"aggregates": [
{
"availability_zone": "london",
"created_at": "%(strtime)s",
"deleted": false,
"deleted_at": null,
"hosts": [
"%(compute_host)s"
],
"id": 1,
"metadata": {
"availability_zone": "london"
},
"name": "name",
"updated_at": null,
"uuid": "%(uuid)s"
}
]
}

View File

@ -1,17 +0,0 @@
{
"aggregate": {
"availability_zone": "london",
"created_at": "%(strtime)s",
"deleted": false,
"deleted_at": null,
"hosts": [],
"id": 1,
"metadata": {
"availability_zone": "london",
"key": "value"
},
"name": "name",
"updated_at": %(strtime)s,
"uuid": "%(uuid)s"
}
}

View File

@ -1,16 +0,0 @@
{
"aggregate": {
"availability_zone": "london",
"created_at": "%(strtime)s",
"deleted": false,
"deleted_at": null,
"hosts": [],
"id": 1,
"metadata": {
"availability_zone": "london"
},
"name": "name",
"updated_at": null,
"uuid": "%(uuid)s"
}
}

View File

@ -1,119 +0,0 @@
# Copyright 2012 Nebula, Inc.
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_serialization import jsonutils
from nova.tests.functional.api_sample_tests import api_sample_base
class AggregatesSampleJsonTest(api_sample_base.ApiSampleTestBaseV21):
ADMIN_API = True
sample_dir = "os-aggregates"
# extra_subs is a noop in the base v2.1 test class; it's used to sub in
# additional details for response verification of actions performed on an
# existing aggregate.
extra_subs = {}
def _test_aggregate_create(self):
subs = {
"aggregate_id": '(?P<id>\d+)'
}
response = self._do_post('os-aggregates', 'aggregate-post-req', subs)
return self._verify_response('aggregate-post-resp',
subs, response, 200)
def test_aggregate_create(self):
self._test_aggregate_create()
def _test_add_host(self, aggregate_id, host):
subs = {
"host_name": host
}
response = self._do_post('os-aggregates/%s/action' % aggregate_id,
'aggregate-add-host-post-req', subs)
subs.update(self.extra_subs)
self._verify_response('aggregates-add-host-post-resp', subs,
response, 200)
def test_list_aggregates(self):
aggregate_id = self._test_aggregate_create()
self._test_add_host(aggregate_id, self.compute.host)
response = self._do_get('os-aggregates')
self._verify_response('aggregates-list-get-resp', {}, response, 200)
def test_aggregate_get(self):
agg_id = self._test_aggregate_create()
response = self._do_get('os-aggregates/%s' % agg_id)
self._verify_response('aggregates-get-resp', self.extra_subs,
response, 200)
def test_add_metadata(self):
agg_id = self._test_aggregate_create()
response = self._do_post('os-aggregates/%s/action' % agg_id,
'aggregate-metadata-post-req',
{'action': 'set_metadata'})
self._verify_response('aggregates-metadata-post-resp', self.extra_subs,
response, 200)
def test_add_host(self):
aggregate_id = self._test_aggregate_create()
self._test_add_host(aggregate_id, self.compute.host)
def test_remove_host(self):
self.test_add_host()
subs = {
"host_name": self.compute.host,
}
response = self._do_post('os-aggregates/1/action',
'aggregate-remove-host-post-req', subs)
subs.update(self.extra_subs)
self._verify_response('aggregates-remove-host-post-resp',
subs, response, 200)
def test_update_aggregate(self):
aggregate_id = self._test_aggregate_create()
response = self._do_put('os-aggregates/%s' % aggregate_id,
'aggregate-update-post-req', {})
self._verify_response('aggregate-update-post-resp',
self.extra_subs, response, 200)
class AggregatesV2_41_SampleJsonTest(AggregatesSampleJsonTest):
microversion = '2.41'
scenarios = [
(
"v2_41", {
'api_major_version': 'v2.1',
},
)
]
def _test_aggregate_create(self):
subs = {
"aggregate_id": '(?P<id>\d+)',
}
response = self._do_post('os-aggregates', 'aggregate-post-req', subs)
# This feels like cheating since we're getting the uuid from the
# response before we even validate that it exists in the response based
# on the sample, but we'll fail with a KeyError if it doesn't which is
# maybe good enough. Alternatively we have to mock out the DB API
# to return a fake aggregate with a hard-coded uuid that matches the
# API sample which isn't fun either.
subs['uuid'] = jsonutils.loads(response.content)['aggregate']['uuid']
# save off the uuid for subs validation on other actions performed
# on this aggregate
self.extra_subs['uuid'] = subs['uuid']
return self._verify_response('aggregate-post-resp',
subs, response, 200)

View File

@ -1,563 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from copy import deepcopy
import mock
from oslo_db import exception as db_exc
from oslo_utils import timeutils
from nova import context
from nova.db.sqlalchemy import api as db_api
from nova.db.sqlalchemy import api_models
from nova import exception
import nova.objects.aggregate as aggregate_obj
from nova import test
from nova.tests.unit import matchers
from nova.tests.unit.objects.test_objects import compare_obj as base_compare
from nova.tests import uuidsentinel
SUBS = {'metadata': 'metadetails'}
NOW = timeutils.utcnow().replace(microsecond=0)
def _get_fake_aggregate(db_id, in_api=True, result=True):
agg_map = {
'created_at': NOW,
'updated_at': None,
'deleted_at': None,
'id': db_id,
'uuid': getattr(uuidsentinel, str(db_id)),
'name': 'name_' + str(db_id),
}
if not in_api:
agg_map['deleted'] = False
if result:
agg_map['hosts'] = _get_fake_hosts(db_id)
agg_map['metadetails'] = _get_fake_metadata(db_id)
return agg_map
def _get_fake_hosts(db_id):
return ['constant_host', 'unique_host_' + str(db_id)]
def _get_fake_metadata(db_id):
return {'constant_key': 'constant_value',
'unique_key': 'unique_value_' + str(db_id)}
@db_api.api_context_manager.writer
def _create_aggregate(context, values=_get_fake_aggregate(1, result=False),
metadata=_get_fake_metadata(1)):
aggregate = api_models.Aggregate()
aggregate.update(values)
aggregate.save(context.session)
if metadata:
for key, value in metadata.items():
aggregate_metadata = api_models.AggregateMetadata()
aggregate_metadata.update({'key': key,
'value': value,
'aggregate_id': aggregate['id']})
aggregate_metadata.save(context.session)
return aggregate
@db_api.api_context_manager.writer
def _create_aggregate_with_hosts(context,
values=_get_fake_aggregate(1, result=False),
metadata=_get_fake_metadata(1),
hosts=_get_fake_hosts(1)):
aggregate = _create_aggregate(context, values, metadata)
for host in hosts:
host_model = api_models.AggregateHost()
host_model.update({'host': host,
'aggregate_id': aggregate.id})
host_model.save(context.session)
return aggregate
@db_api.api_context_manager.reader
def _aggregate_host_get_all(context, aggregate_id):
return context.session.query(api_models.AggregateHost).\
filter_by(aggregate_id=aggregate_id).all()
@db_api.api_context_manager.reader
def _aggregate_metadata_get_all(context, aggregate_id):
results = context.session.query(api_models.AggregateMetadata).\
filter_by(aggregate_id=aggregate_id).all()
metadata = {}
for r in results:
metadata[r['key']] = r['value']
return metadata
class AggregateObjectDbTestCase(test.TestCase):
def setUp(self):
super(AggregateObjectDbTestCase, self).setUp()
self.context = context.RequestContext('fake-user', 'fake-project')
def test_aggregate_get_from_db(self):
result = _create_aggregate_with_hosts(self.context)
expected = aggregate_obj._aggregate_get_from_db(self.context,
result['id'])
self.assertEqual(_get_fake_hosts(1), expected.hosts)
self.assertEqual(_get_fake_metadata(1), expected['metadetails'])
def test_aggregate_get_from_db_by_uuid(self):
result = _create_aggregate_with_hosts(self.context)
expected = aggregate_obj._aggregate_get_from_db_by_uuid(
self.context, result['uuid'])
self.assertEqual(result.uuid, expected.uuid)
self.assertEqual(_get_fake_hosts(1), expected.hosts)
self.assertEqual(_get_fake_metadata(1), expected['metadetails'])
def test_aggregate_get_from_db_raise_not_found(self):
aggregate_id = 5
self.assertRaises(exception.AggregateNotFound,
aggregate_obj._aggregate_get_from_db,
self.context, aggregate_id)
def test_aggregate_get_all_from_db(self):
for c in range(3):
_create_aggregate(self.context,
values={'name': 'fake_aggregate_%d' % c})
results = aggregate_obj._get_all_from_db(self.context)
self.assertEqual(len(results), 3)
def test_aggregate_get_by_host_from_db(self):
_create_aggregate_with_hosts(self.context,
values={'name': 'fake_aggregate_1'},
hosts=['host.1.openstack.org'])
_create_aggregate_with_hosts(self.context,
values={'name': 'fake_aggregate_2'},
hosts=['host.1.openstack.org'])
_create_aggregate(self.context,
values={'name': 'no_host_aggregate'})
rh1 = aggregate_obj._get_all_from_db(self.context)
rh2 = aggregate_obj._get_by_host_from_db(self.context,
'host.1.openstack.org')
self.assertEqual(3, len(rh1))
self.assertEqual(2, len(rh2))
def test_aggregate_get_by_host_with_key_from_db(self):
ah1 = _create_aggregate_with_hosts(self.context,
values={'name': 'fake_aggregate_1'},
metadata={'goodkey': 'good'},
hosts=['host.1.openstack.org'])
_create_aggregate_with_hosts(self.context,
values={'name': 'fake_aggregate_2'},
hosts=['host.1.openstack.org'])
rh1 = aggregate_obj._get_by_host_from_db(self.context,
'host.1.openstack.org',
key='goodkey')
self.assertEqual(1, len(rh1))
self.assertEqual(ah1['id'], rh1[0]['id'])
def test_aggregate_get_by_metadata_key_from_db(self):
_create_aggregate(self.context,
values={'name': 'aggregate_1'},
metadata={'goodkey': 'good'})
_create_aggregate(self.context,
values={'name': 'aggregate_2'},
metadata={'goodkey': 'bad'})
_create_aggregate(self.context,
values={'name': 'aggregate_3'},
metadata={'badkey': 'good'})
rl1 = aggregate_obj._get_by_metadata_from_db(self.context,
key='goodkey')
self.assertEqual(2, len(rl1))
def test_aggregate_create_in_db(self):
fake_create_aggregate = {
'name': 'fake-aggregate',
}
agg = aggregate_obj._aggregate_create_in_db(self.context,
fake_create_aggregate)
result = aggregate_obj._aggregate_get_from_db(self.context,
agg.id)
self.assertEqual(result.name, fake_create_aggregate['name'])
def test_aggregate_create_in_db_with_metadata(self):
fake_create_aggregate = {
'name': 'fake-aggregate',
}
agg = aggregate_obj._aggregate_create_in_db(self.context,
fake_create_aggregate,
metadata={'goodkey': 'good'})
result = aggregate_obj._aggregate_get_from_db(self.context,
agg.id)
md = aggregate_obj._get_by_metadata_from_db(self.context,
key='goodkey')
self.assertEqual(len(md), 1)
self.assertEqual(md[0]['id'], agg.id)
self.assertEqual(result.name, fake_create_aggregate['name'])
def test_aggregate_create_raise_exist_exc(self):
fake_create_aggregate = {
'name': 'fake-aggregate',
}
aggregate_obj._aggregate_create_in_db(self.context,
fake_create_aggregate)
self.assertRaises(exception.AggregateNameExists,
aggregate_obj._aggregate_create_in_db,
self.context,
fake_create_aggregate,
metadata=None)
def test_aggregate_delete(self):
result = _create_aggregate(self.context, metadata=None)
aggregate_obj._aggregate_delete_from_db(self.context, result['id'])
self.assertRaises(exception.AggregateNotFound,
aggregate_obj._aggregate_get_from_db,
self.context, result['id'])
def test_aggregate_delete_raise_not_found(self):
# this does not exist!
aggregate_id = 45
self.assertRaises(exception.AggregateNotFound,
aggregate_obj._aggregate_delete_from_db,
self.context, aggregate_id)
def test_aggregate_delete_with_metadata(self):
result = _create_aggregate(self.context,
metadata={'availability_zone': 'fake_avail_zone'})
aggregate_obj._aggregate_delete_from_db(self.context, result['id'])
self.assertRaises(exception.AggregateNotFound,
aggregate_obj._aggregate_get_from_db,
self.context, result['id'])
def test_aggregate_update(self):
created = _create_aggregate(self.context,
metadata={'availability_zone': 'fake_avail_zone'})
result = aggregate_obj._aggregate_get_from_db(self.context,
created['id'])
self.assertEqual('fake_avail_zone', result['availability_zone'])
new_values = deepcopy(_get_fake_aggregate(1, result=False))
new_values['availability_zone'] = 'different_avail_zone'
updated = aggregate_obj._aggregate_update_to_db(self.context,
result['id'], new_values)
self.assertEqual('different_avail_zone', updated['availability_zone'])
def test_aggregate_update_with_metadata(self):
result = _create_aggregate(self.context, metadata=None)
values = deepcopy(_get_fake_aggregate(1, result=False))
values['metadata'] = deepcopy(_get_fake_metadata(1))
values['availability_zone'] = 'different_avail_zone'
expected_metadata = deepcopy(values['metadata'])
expected_metadata['availability_zone'] = values['availability_zone']
aggregate_obj._aggregate_update_to_db(self.context, result['id'],
values)
metadata = _aggregate_metadata_get_all(self.context, result['id'])
updated = aggregate_obj._aggregate_get_from_db(self.context,
result['id'])
self.assertThat(metadata,
matchers.DictMatches(expected_metadata))
self.assertEqual('different_avail_zone', updated['availability_zone'])
def test_aggregate_update_with_existing_metadata(self):
result = _create_aggregate(self.context)
values = deepcopy(_get_fake_aggregate(1, result=False))
values['metadata'] = deepcopy(_get_fake_metadata(1))
values['metadata']['fake_key1'] = 'foo'
expected_metadata = deepcopy(values['metadata'])
aggregate_obj._aggregate_update_to_db(self.context, result['id'],
values)
metadata = _aggregate_metadata_get_all(self.context, result['id'])
self.assertThat(metadata, matchers.DictMatches(expected_metadata))
def test_aggregate_update_zone_with_existing_metadata(self):
result = _create_aggregate(self.context)
new_zone = {'availability_zone': 'fake_avail_zone_2'}
metadata = deepcopy(_get_fake_metadata(1))
metadata.update(new_zone)
aggregate_obj._aggregate_update_to_db(self.context, result['id'],
new_zone)
expected = _aggregate_metadata_get_all(self.context, result['id'])
self.assertThat(metadata, matchers.DictMatches(expected))
def test_aggregate_update_raise_not_found(self):
# this does not exist!
aggregate_id = 2
new_values = deepcopy(_get_fake_aggregate(1, result=False))
self.assertRaises(exception.AggregateNotFound,
aggregate_obj._aggregate_update_to_db,
self.context, aggregate_id, new_values)
def test_aggregate_update_raise_name_exist(self):
_create_aggregate(self.context, values={'name': 'test1'},
metadata={'availability_zone': 'fake_avail_zone'})
_create_aggregate(self.context, values={'name': 'test2'},
metadata={'availability_zone': 'fake_avail_zone'})
aggregate_id = 1
new_values = {'name': 'test2'}
self.assertRaises(exception.AggregateNameExists,
aggregate_obj._aggregate_update_to_db,
self.context, aggregate_id, new_values)
def test_aggregate_host_add_to_db(self):
result = _create_aggregate(self.context, metadata=None)
host = _get_fake_hosts(1)[0]
aggregate_obj._host_add_to_db(self.context, result['id'], host)
expected = aggregate_obj._aggregate_get_from_db(self.context,
result['id'])
self.assertEqual([_get_fake_hosts(1)[0]], expected.hosts)
def test_aggregate_host_re_add_to_db(self):
result = _create_aggregate_with_hosts(self.context,
metadata=None)
host = _get_fake_hosts(1)[0]
aggregate_obj._host_delete_from_db(self.context, result['id'], host)
aggregate_obj._host_add_to_db(self.context, result['id'], host)
expected = _aggregate_host_get_all(self.context, result['id'])
self.assertEqual(len(expected), 2)
def test_aggregate_host_add_to_db_duplicate_works(self):
r1 = _create_aggregate_with_hosts(self.context,
metadata=None)
r2 = _create_aggregate_with_hosts(self.context,
values={'name': 'fake_aggregate2'},
metadata={'availability_zone': 'fake_avail_zone2'})
h1 = _aggregate_host_get_all(self.context, r1['id'])
self.assertEqual(len(h1), 2)
self.assertEqual(r1['id'], h1[0]['aggregate_id'])
h2 = _aggregate_host_get_all(self.context, r2['id'])
self.assertEqual(len(h2), 2)
self.assertEqual(r2['id'], h2[0]['aggregate_id'])
def test_aggregate_host_add_to_db_duplicate_raise_exist_exc(self):
result = _create_aggregate_with_hosts(self.context,
metadata=None)
self.assertRaises(exception.AggregateHostExists,
aggregate_obj._host_add_to_db,
self.context, result['id'],
_get_fake_hosts(1)[0])
def test_aggregate_host_add_to_db_raise_not_found(self):
# this does not exist!
aggregate_id = 1
host = _get_fake_hosts(1)[0]
self.assertRaises(exception.AggregateNotFound,
aggregate_obj._host_add_to_db,
self.context, aggregate_id, host)
def test_aggregate_host_delete_from_db(self):
result = _create_aggregate_with_hosts(self.context,
metadata=None)
aggregate_obj._host_delete_from_db(self.context, result['id'],
_get_fake_hosts(1)[0])
expected = _aggregate_host_get_all(self.context, result['id'])
self.assertEqual(len(expected), 1)
def test_aggregate_host_delete_from_db_raise_not_found(self):
result = _create_aggregate(self.context)
self.assertRaises(exception.AggregateHostNotFound,
aggregate_obj._host_delete_from_db,
self.context, result['id'],
_get_fake_hosts(1)[0])
def test_aggregate_metadata_add(self):
result = _create_aggregate(self.context, metadata=None)
metadata = deepcopy(_get_fake_metadata(1))
aggregate_obj._metadata_add_to_db(self.context, result['id'], metadata)
expected = _aggregate_metadata_get_all(self.context, result['id'])
self.assertThat(metadata, matchers.DictMatches(expected))
def test_aggregate_metadata_add_empty_metadata(self):
result = _create_aggregate(self.context, metadata=None)
metadata = {}
aggregate_obj._metadata_add_to_db(self.context, result['id'], metadata)
expected = _aggregate_metadata_get_all(self.context, result['id'])
self.assertThat(metadata, matchers.DictMatches(expected))
def test_aggregate_metadata_add_and_update(self):
result = _create_aggregate(self.context)
metadata = deepcopy(_get_fake_metadata(1))
key = list(metadata.keys())[0]
new_metadata = {key: 'foo',
'fake_new_key': 'fake_new_value'}
metadata.update(new_metadata)
aggregate_obj._metadata_add_to_db(self.context,
result['id'], new_metadata)
expected = _aggregate_metadata_get_all(self.context, result['id'])
self.assertThat(metadata, matchers.DictMatches(expected))
def test_aggregate_metadata_add_retry(self):
result = _create_aggregate(self.context, metadata=None)
with mock.patch('nova.db.sqlalchemy.api_models.'
'AggregateMetadata.__table__.insert') as insert_mock:
insert_mock.side_effect = db_exc.DBDuplicateEntry
self.assertRaises(db_exc.DBDuplicateEntry,
aggregate_obj._metadata_add_to_db,
self.context,
result['id'],
{'fake_key2': 'fake_value2'},
max_retries=5)
def test_aggregate_metadata_update(self):
result = _create_aggregate(self.context)
metadata = deepcopy(_get_fake_metadata(1))
key = list(metadata.keys())[0]
aggregate_obj._metadata_delete_from_db(self.context, result['id'], key)
new_metadata = {key: 'foo'}
aggregate_obj._metadata_add_to_db(self.context,
result['id'], new_metadata)
expected = _aggregate_metadata_get_all(self.context, result['id'])
metadata[key] = 'foo'
self.assertThat(metadata, matchers.DictMatches(expected))
def test_aggregate_metadata_delete(self):
result = _create_aggregate(self.context, metadata=None)
metadata = deepcopy(_get_fake_metadata(1))
aggregate_obj._metadata_add_to_db(self.context, result['id'], metadata)
aggregate_obj._metadata_delete_from_db(self.context, result['id'],
list(metadata.keys())[0])
expected = _aggregate_metadata_get_all(self.context, result['id'])
del metadata[list(metadata.keys())[0]]
self.assertThat(metadata, matchers.DictMatches(expected))
def test_aggregate_remove_availability_zone(self):
result = _create_aggregate(self.context, metadata={'availability_zone':
'fake_avail_zone'})
aggregate_obj._metadata_delete_from_db(self.context,
result['id'],
'availability_zone')
aggr = aggregate_obj._aggregate_get_from_db(self.context, result['id'])
self.assertIsNone(aggr['availability_zone'])
def test_aggregate_metadata_delete_raise_not_found(self):
result = _create_aggregate(self.context)
self.assertRaises(exception.AggregateMetadataNotFound,
aggregate_obj._metadata_delete_from_db,
self.context, result['id'], 'foo_key')
def create_aggregate(context, db_id):
fake_aggregate = _get_fake_aggregate(db_id, in_api=False, result=False)
aggregate_obj._aggregate_create_in_db(context, fake_aggregate,
metadata=_get_fake_metadata(db_id))
for host in _get_fake_hosts(db_id):
aggregate_obj._host_add_to_db(context, fake_aggregate['id'], host)
def compare_obj(test, result, source):
source['deleted'] = False
def updated_at_comparator(result, source):
return True
return base_compare(test, result, source, subs=SUBS,
comparators={'updated_at': updated_at_comparator})
class AggregateObjectTestCase(test.TestCase):
def setUp(self):
super(AggregateObjectTestCase, self).setUp()
self.context = context.RequestContext('fake-user', 'fake-project')
self._seed_data()
def _seed_data(self):
for i in range(1, 10):
create_aggregate(self.context, i)
def test_create(self):
new_agg = aggregate_obj.Aggregate(self.context)
new_agg.name = 'new-aggregate'
new_agg.create()
result = aggregate_obj.Aggregate.get_by_id(self.context, new_agg.id)
self.assertEqual(new_agg.name, result.name)
def test_get_by_id(self):
for i in range(1, 10):
agg = aggregate_obj.Aggregate.get_by_id(self.context, i)
compare_obj(self, agg, _get_fake_aggregate(i))
def test_save(self):
for i in range(1, 10):
agg = aggregate_obj.Aggregate.get_by_id(self.context, i)
fake_agg = _get_fake_aggregate(i)
fake_agg['name'] = 'new-name' + str(i)
agg.name = 'new-name' + str(i)
agg.save()
result = aggregate_obj.Aggregate.get_by_id(self.context, i)
compare_obj(self, agg, fake_agg)
compare_obj(self, result, fake_agg)
def test_update_metadata(self):
for i in range(1, 10):
agg = aggregate_obj.Aggregate.get_by_id(self.context, i)
fake_agg = _get_fake_aggregate(i)
fake_agg['metadetails'] = {'constant_key': 'constant_value'}
agg.update_metadata({'unique_key': None})
agg.save()
result = aggregate_obj.Aggregate.get_by_id(self.context, i)
compare_obj(self, agg, fake_agg)
compare_obj(self, result, fake_agg)
def test_destroy(self):
for i in range(1, 10):
agg = aggregate_obj.Aggregate.get_by_id(self.context, i)
agg.destroy()
aggs = aggregate_obj.AggregateList.get_all(self.context)
self.assertEqual(len(aggs), 0)
def test_add_host(self):
for i in range(1, 10):
agg = aggregate_obj.Aggregate.get_by_id(self.context, i)
fake_agg = _get_fake_aggregate(i)
fake_agg['hosts'].append('barbar')
agg.add_host('barbar')
agg.save()
result = aggregate_obj.Aggregate.get_by_id(self.context, i)
compare_obj(self, agg, fake_agg)
compare_obj(self, result, fake_agg)
def test_delete_host(self):
for i in range(1, 10):
agg = aggregate_obj.Aggregate.get_by_id(self.context, i)
fake_agg = _get_fake_aggregate(i)
fake_agg['hosts'].remove('constant_host')
agg.delete_host('constant_host')
result = aggregate_obj.Aggregate.get_by_id(self.context, i)
compare_obj(self, agg, fake_agg)
compare_obj(self, result, fake_agg)
def test_get_by_metadata(self):
agg = aggregate_obj.Aggregate.get_by_id(self.context, 1)
agg.update_metadata({'foo': 'bar'})
agg = aggregate_obj.Aggregate.get_by_id(self.context, 2)
agg.update_metadata({'foo': 'baz',
'fu': 'bar'})
aggs = aggregate_obj.AggregateList.get_by_metadata(
self.context, key='foo', value='bar')
self.assertEqual(1, len(aggs))
self.assertEqual(1, aggs[0].id)
aggs = aggregate_obj.AggregateList.get_by_metadata(
self.context, value='bar')
self.assertEqual(2, len(aggs))
self.assertEqual(set([1, 2]), set([a.id for a in aggs]))
def test_get_by_metadata_from_db_assertion(self):
self.assertRaises(AssertionError,
aggregate_obj._get_by_metadata_from_db,
self.context)

View File

@ -1,57 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.db.sqlalchemy import api_models
from nova.db.sqlalchemy import models
from nova import test
class AggregateTablesCompareTestCase(test.NoDBTestCase):
def _get_column_list(self, model):
column_list = [m.key for m in model.__table__.columns]
return column_list
def _check_column_list(self,
columns_new,
columns_old,
added=None,
removed=None):
for c in added or []:
columns_new.remove(c)
for c in removed or []:
columns_old.remove(c)
intersect = set(columns_new).intersection(set(columns_old))
if intersect != set(columns_new) or intersect != set(columns_old):
return False
return True
def _compare_models(self, m_a, m_b,
added=None, removed=None):
added = added or []
removed = removed or ['deleted_at', 'deleted']
c_a = self._get_column_list(m_a)
c_b = self._get_column_list(m_b)
self.assertTrue(self._check_column_list(c_a, c_b,
added=added,
removed=removed))
def test_tables_aggregate_hosts(self):
self._compare_models(api_models.AggregateHost(),
models.AggregateHost())
def test_tables_aggregate_metadata(self):
self._compare_models(api_models.AggregateMetadata(),
models.AggregateMetadata())
def test_tables_aggregates(self):
self._compare_models(api_models.Aggregate(),
models.Aggregate())

View File

@ -1,168 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.tests.functional.notification_sample_tests \
import notification_sample_base
from nova.tests.unit import fake_notifier
class TestAggregateNotificationSample(
notification_sample_base.NotificationSampleTestBase):
def test_aggregate_create_delete(self):
aggregate_req = {
"aggregate": {
"name": "my-aggregate",
"availability_zone": "nova"}}
aggregate = self.admin_api.post_aggregate(aggregate_req)
self.assertEqual(2, len(fake_notifier.VERSIONED_NOTIFICATIONS))
self._verify_notification(
'aggregate-create-start',
replacements={
'uuid': aggregate['uuid']},
actual=fake_notifier.VERSIONED_NOTIFICATIONS[0])
self._verify_notification(
'aggregate-create-end',
replacements={
'uuid': aggregate['uuid'],
'id': aggregate['id']},
actual=fake_notifier.VERSIONED_NOTIFICATIONS[1])
self.admin_api.delete_aggregate(aggregate['id'])
self.assertEqual(4, len(fake_notifier.VERSIONED_NOTIFICATIONS))
self._verify_notification(
'aggregate-delete-start',
replacements={
'uuid': aggregate['uuid'],
'id': aggregate['id']},
actual=fake_notifier.VERSIONED_NOTIFICATIONS[2])
self._verify_notification(
'aggregate-delete-end',
replacements={
'uuid': aggregate['uuid'],
'id': aggregate['id']},
actual=fake_notifier.VERSIONED_NOTIFICATIONS[3])
def test_aggregate_add_remove_host(self):
aggregate_req = {
"aggregate": {
"name": "my-aggregate",
"availability_zone": "nova"}}
aggregate = self.admin_api.post_aggregate(aggregate_req)
fake_notifier.reset()
add_host_req = {
"add_host": {
"host": "compute"
}
}
self.admin_api.post_aggregate_action(aggregate['id'], add_host_req)
self.assertEqual(2, len(fake_notifier.VERSIONED_NOTIFICATIONS))
self._verify_notification(
'aggregate-add_host-start',
replacements={
'uuid': aggregate['uuid'],
'id': aggregate['id']},
actual=fake_notifier.VERSIONED_NOTIFICATIONS[0])
self._verify_notification(
'aggregate-add_host-end',
replacements={
'uuid': aggregate['uuid'],
'id': aggregate['id']},
actual=fake_notifier.VERSIONED_NOTIFICATIONS[1])
remove_host_req = {
"remove_host": {
"host": "compute"
}
}
self.admin_api.post_aggregate_action(aggregate['id'], remove_host_req)
self.assertEqual(4, len(fake_notifier.VERSIONED_NOTIFICATIONS))
self._verify_notification(
'aggregate-remove_host-start',
replacements={
'uuid': aggregate['uuid'],
'id': aggregate['id']},
actual=fake_notifier.VERSIONED_NOTIFICATIONS[2])
self._verify_notification(
'aggregate-remove_host-end',
replacements={
'uuid': aggregate['uuid'],
'id': aggregate['id']},
actual=fake_notifier.VERSIONED_NOTIFICATIONS[3])
self.admin_api.delete_aggregate(aggregate['id'])
def test_aggregate_update_metadata(self):
aggregate_req = {
"aggregate": {
"name": "my-aggregate",
"availability_zone": "nova"}}
aggregate = self.admin_api.post_aggregate(aggregate_req)
set_metadata_req = {
"set_metadata": {
"metadata": {
"availability_zone": "AZ-1"
}
}
}
fake_notifier.reset()
self.admin_api.post_aggregate_action(aggregate['id'], set_metadata_req)
self.assertEqual(2, len(fake_notifier.VERSIONED_NOTIFICATIONS))
self._verify_notification(
'aggregate-update_metadata-start',
replacements={
'uuid': aggregate['uuid'],
'id': aggregate['id']},
actual=fake_notifier.VERSIONED_NOTIFICATIONS[0])
self._verify_notification(
'aggregate-update_metadata-end',
replacements={
'uuid': aggregate['uuid'],
'id': aggregate['id']},
actual=fake_notifier.VERSIONED_NOTIFICATIONS[1])
def test_aggregate_updateprops(self):
aggregate_req = {
"aggregate": {
"name": "my-aggregate",
"availability_zone": "nova"}}
aggregate = self.admin_api.post_aggregate(aggregate_req)
update_req = {
"aggregate": {
"name": "my-new-aggregate"}}
self.admin_api.put_aggregate(aggregate['id'], update_req)
# 0. aggregate-create-start
# 1. aggregate-create-end
# 2. aggregate-update_prop-start
# 3. aggregate-update_prop-end
self.assertEqual(4, len(fake_notifier.VERSIONED_NOTIFICATIONS))
self._verify_notification(
'aggregate-update_prop-start',
replacements={
'uuid': aggregate['uuid'],
'id': aggregate['id']},
actual=fake_notifier.VERSIONED_NOTIFICATIONS[2])
self._verify_notification(
'aggregate-update_prop-end',
replacements={
'uuid': aggregate['uuid'],
'id': aggregate['id']},
actual=fake_notifier.VERSIONED_NOTIFICATIONS[3])

View File

@ -1,24 +0,0 @@
================================
Tests for Specific Regressions
================================
When we have a bug reported by end users that we can write a full
stack reproduce on, we should. And we should keep a regression test
for that bug in our tree. It can be deleted at some future date if
needed, but largely should not be changed.
Writing Regression Tests
========================
- These should be full stack tests which inherit from
nova.test.TestCase directly. (This is to prevent coupling with other tests).
- They should setup a full stack cloud in their setUp via fixtures
- They should each live in a file which is named test_bug_######.py
Writing Tests Before the Bug is Fixed
=====================================
TODO describe writing and landing tests before the bug is fixed as a
reproduce.

View File

@ -1,365 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
from nova.scheduler.client import report
import nova.conf
from nova import context as nova_context
from nova.scheduler import weights
from nova import test
from nova.tests import fixtures as nova_fixtures
from nova.tests.functional import integrated_helpers
import nova.tests.unit.image.fake
from nova.tests.unit import policy_fixture
from nova.virt import fake
CONF = nova.conf.CONF
class AggregatesTest(integrated_helpers._IntegratedTestBase):
api_major_version = 'v2'
ADMIN_API = True
def _add_hosts_to_aggregate(self):
"""List all compute services and add them all to an aggregate."""
compute_services = [s for s in self.api.get_services()
if s['binary'] == 'nova-compute']
agg = {'aggregate': {'name': 'test-aggregate'}}
agg = self.api.post_aggregate(agg)
for service in compute_services:
self.api.add_host_to_aggregate(agg['id'], service['host'])
return len(compute_services)
def test_add_hosts(self):
# Default case with one compute, mapped for us
self.assertEqual(1, self._add_hosts_to_aggregate())
def test_add_unmapped_host(self):
"""Ensure that hosts without mappings are still found and added"""
# Add another compute, but nuke its HostMapping
self.start_service('compute', host='compute2')
self.host_mappings['compute2'].destroy()
self.assertEqual(2, self._add_hosts_to_aggregate())
class AggregateRequestFiltersTest(test.TestCase,
integrated_helpers.InstanceHelperMixin):
microversion = 'latest'
compute_driver = 'fake.MediumFakeDriver'
def setUp(self):
self.flags(compute_driver=self.compute_driver)
super(AggregateRequestFiltersTest, self).setUp()
self.useFixture(policy_fixture.RealPolicyFixture())
self.useFixture(nova_fixtures.NeutronFixture(self))
self.useFixture(nova_fixtures.AllServicesCurrent())
placement = self.useFixture(nova_fixtures.PlacementFixture())
self.placement_api = placement.api
api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
api_version='v2.1'))
self.admin_api = api_fixture.admin_api
self.admin_api.microversion = self.microversion
self.api = self.admin_api
# the image fake backend needed for image discovery
nova.tests.unit.image.fake.stub_out_image_service(self)
self.start_service('conductor')
self.scheduler_service = self.start_service('scheduler')
self.computes = {}
self.aggregates = {}
self._start_compute('host1')
self._start_compute('host2')
self.context = nova_context.get_admin_context()
self.report_client = report.SchedulerReportClient()
self.flavors = self.api.get_flavors()
# Aggregate with only host1
self._create_aggregate('only-host1')
self._add_host_to_aggregate('only-host1', 'host1')
# Aggregate with only host2
self._create_aggregate('only-host2')
self._add_host_to_aggregate('only-host2', 'host2')
# Aggregate with neither host
self._create_aggregate('no-hosts')
def _start_compute(self, host):
"""Start a nova compute service on the given host
:param host: the name of the host that will be associated to the
compute service.
:return: the nova compute service object
"""
fake.set_nodes([host])
self.addCleanup(fake.restore_nodes)
compute = self.start_service('compute', host=host)
self.computes[host] = compute
return compute
def _create_aggregate(self, name):
agg = self.admin_api.post_aggregate({'aggregate': {'name': name}})
self.aggregates[name] = agg
def _get_provider_uuid_by_host(self, host):
"""Return the compute node uuid for a named compute host."""
# NOTE(gibi): the compute node id is the same as the compute node
# provider uuid on that compute
resp = self.admin_api.api_get(
'os-hypervisors?hypervisor_hostname_pattern=%s' % host).body
return resp['hypervisors'][0]['id']
def _add_host_to_aggregate(self, agg, host):
"""Add a compute host to both nova and placement aggregates.
:param agg: Name of the nova aggregate
:param host: Name of the compute host
"""
agg = self.aggregates[agg]
self.admin_api.add_host_to_aggregate(agg['id'], host)
host_uuid = self._get_provider_uuid_by_host(host)
# Make sure we have a view of the provider we're about to mess with
# FIXME(efried): This should be a thing we can do without internals
self.report_client._ensure_resource_provider(
self.context, host_uuid, name=host)
self.report_client.aggregate_add_host(self.context, agg['uuid'], host)
def _wait_for_state_change(self, server, from_status):
for i in range(0, 50):
server = self.api.get_server(server['id'])
if server['status'] != from_status:
break
time.sleep(.1)
return server
def _boot_server(self, az=None):
server_req = self._build_minimal_create_server_request(
self.api, 'test-instance', flavor_id=self.flavors[0]['id'],
image_uuid='155d900f-4e14-4e4c-a73d-069cbf4541e6',
networks='none', az=az)
created_server = self.api.post_server({'server': server_req})
server = self._wait_for_state_change(created_server, 'BUILD')
return server
def _get_instance_host(self, server):
srv = self.admin_api.get_server(server['id'])
return srv['OS-EXT-SRV-ATTR:host']
def _set_az_aggregate(self, agg, az):
"""Set the availability_zone of an aggregate
:param agg: Name of the nova aggregate
:param az: Availability zone name
"""
agg = self.aggregates[agg]
action = {
'set_metadata': {
'metadata': {
'availability_zone': az,
}
},
}
self.admin_api.post_aggregate_action(agg['id'], action)
def _grant_tenant_aggregate(self, agg, tenants):
"""Grant a set of tenants access to use an aggregate.
:param agg: Name of the nova aggregate
:param tenants: A list of all tenant ids that will be allowed access
"""
agg = self.aggregates[agg]
action = {
'set_metadata': {
'metadata': {
'filter_tenant_id%i' % i: tenant
for i, tenant in enumerate(tenants)
}
},
}
self.admin_api.post_aggregate_action(agg['id'], action)
class TenantAggregateFilterTest(AggregateRequestFiltersTest):
def setUp(self):
super(TenantAggregateFilterTest, self).setUp()
# Default to enabling the filter and making it mandatory
self.flags(limit_tenants_to_placement_aggregate=True,
group='scheduler')
self.flags(placement_aggregate_required_for_tenants=True,
group='scheduler')
def test_tenant_id_required_fails_if_no_aggregate(self):
server = self._boot_server()
# Without granting our tenant permission to an aggregate, instance
# creates should fail since aggregates are required
self.assertEqual('ERROR', server['status'])
def test_tenant_id_not_required_succeeds_if_no_aggregate(self):
self.flags(placement_aggregate_required_for_tenants=False,
group='scheduler')
server = self._boot_server()
# Without granting our tenant permission to an aggregate, instance
# creates should still succeed since aggregates are not required
self.assertEqual('ACTIVE', server['status'])
def test_filter_honors_tenant_id(self):
tenant = self.api.project_id
# Grant our tenant access to the aggregate with only host1 in it
# and boot some servers. They should all stack up on host1.
self._grant_tenant_aggregate('only-host1',
['foo', tenant, 'bar'])
server1 = self._boot_server()
server2 = self._boot_server()
self.assertEqual('ACTIVE', server1['status'])
self.assertEqual('ACTIVE', server2['status'])
# Grant our tenant access to the aggregate with only host2 in it
# and boot some servers. They should all stack up on host2.
self._grant_tenant_aggregate('only-host1',
['foo', 'bar'])
self._grant_tenant_aggregate('only-host2',
['foo', tenant, 'bar'])
server3 = self._boot_server()
server4 = self._boot_server()
self.assertEqual('ACTIVE', server3['status'])
self.assertEqual('ACTIVE', server4['status'])
# Make sure the servers landed on the hosts we had access to at
# the time we booted them.
hosts = [self._get_instance_host(s)
for s in (server1, server2, server3, server4)]
expected_hosts = ['host1', 'host1', 'host2', 'host2']
self.assertEqual(expected_hosts, hosts)
def test_filter_with_empty_aggregate(self):
tenant = self.api.project_id
# Grant our tenant access to the aggregate with no hosts in it
self._grant_tenant_aggregate('no-hosts',
['foo', tenant, 'bar'])
server = self._boot_server()
self.assertEqual('ERROR', server['status'])
def test_filter_with_multiple_aggregates_for_tenant(self):
tenant = self.api.project_id
# Grant our tenant access to the aggregate with no hosts in it,
# and one with a host.
self._grant_tenant_aggregate('no-hosts',
['foo', tenant, 'bar'])
self._grant_tenant_aggregate('only-host2',
['foo', tenant, 'bar'])
# Boot several servers and make sure they all land on the
# only host we have access to.
for i in range(0, 4):
server = self._boot_server()
self.assertEqual('ACTIVE', server['status'])
self.assertEqual('host2', self._get_instance_host(server))
class HostNameWeigher(weights.BaseHostWeigher):
def _weigh_object(self, host_state, weight_properties):
"""Arbitrary preferring host1 over host2 over host3."""
weights = {'host1': 100, 'host2': 50, 'host3': 1}
return weights.get(host_state.host, 0)
class AvailabilityZoneFilterTest(AggregateRequestFiltersTest):
def setUp(self):
# Default to enabling the filter
self.flags(query_placement_for_availability_zone=True,
group='scheduler')
# Use our custom weigher defined above to make sure that we have
# a predictable scheduling sort order.
self.flags(weight_classes=[__name__ + '.HostNameWeigher'],
group='filter_scheduler')
# NOTE(danms): Do this before calling setUp() so that
# the scheduler service that is started sees the new value
filters = CONF.filter_scheduler.enabled_filters
filters.remove('AvailabilityZoneFilter')
self.flags(enabled_filters=filters, group='filter_scheduler')
super(AvailabilityZoneFilterTest, self).setUp()
def test_filter_with_az(self):
self._set_az_aggregate('only-host2', 'myaz')
server1 = self._boot_server(az='myaz')
server2 = self._boot_server(az='myaz')
hosts = [self._get_instance_host(s) for s in (server1, server2)]
self.assertEqual(['host2', 'host2'], hosts)
class TestAggregateFiltersTogether(AggregateRequestFiltersTest):
def setUp(self):
# NOTE(danms): Do this before calling setUp() so that
# the scheduler service that is started sees the new value
filters = CONF.filter_scheduler.enabled_filters
filters.remove('AvailabilityZoneFilter')
self.flags(enabled_filters=filters, group='filter_scheduler')
super(TestAggregateFiltersTogether, self).setUp()
# Default to enabling both filters
self.flags(limit_tenants_to_placement_aggregate=True,
group='scheduler')
self.flags(placement_aggregate_required_for_tenants=True,
group='scheduler')
self.flags(query_placement_for_availability_zone=True,
group='scheduler')
def test_tenant_with_az_match(self):
# Grant our tenant access to the aggregate with
# host1
self._grant_tenant_aggregate('only-host1',
[self.api.project_id])
# Set an az on only-host1
self._set_az_aggregate('only-host1', 'myaz')
# Boot the server into that az and make sure we land
server = self._boot_server(az='myaz')
self.assertEqual('host1', self._get_instance_host(server))
def test_tenant_with_az_mismatch(self):
# Grant our tenant access to the aggregate with
# host1
self._grant_tenant_aggregate('only-host1',
[self.api.project_id])
# Set an az on only-host2
self._set_az_aggregate('only-host2', 'myaz')
# Boot the server into that az and make sure we fail
server = self._boot_server(az='myaz')
self.assertIsNone(self._get_instance_host(server))
server = self.api.get_server(server['id'])
self.assertEqual('ERROR', server['status'])

View File

@ -1,250 +0,0 @@
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_utils import timeutils
from nova import exception
from nova.objects import aggregate
from nova import test
from nova.tests.unit import fake_notifier
from nova.tests.unit.objects import test_objects
from nova.tests import uuidsentinel
NOW = timeutils.utcnow().replace(microsecond=0)
fake_aggregate = {
'deleted': 0,
'deleted_at': None,
'created_at': NOW,
'updated_at': None,
'id': 123,
'uuid': uuidsentinel.fake_aggregate,
'name': 'fake-aggregate',
'hosts': ['foo', 'bar'],
'metadetails': {'this': 'that'},
}
SUBS = {'metadata': 'metadetails'}
class _TestAggregateObject(object):
@mock.patch('nova.objects.aggregate._aggregate_get_from_db')
def test_get_by_id_from_api(self, mock_get_api):
mock_get_api.return_value = fake_aggregate
agg = aggregate.Aggregate.get_by_id(self.context, 123)
self.compare_obj(agg, fake_aggregate, subs=SUBS)
mock_get_api.assert_called_once_with(self.context, 123)
@mock.patch('nova.objects.aggregate._aggregate_get_from_db_by_uuid')
def test_get_by_uuid_from_api(self, get_by_uuid_api):
get_by_uuid_api.return_value = fake_aggregate
agg = aggregate.Aggregate.get_by_uuid(self.context,
uuidsentinel.fake_aggregate)
self.assertEqual(uuidsentinel.fake_aggregate, agg.uuid)
self.assertEqual(fake_aggregate['id'], agg.id)
@mock.patch('nova.objects.aggregate._aggregate_create_in_db')
def test_create(self, api_create_mock):
api_create_mock.return_value = fake_aggregate
agg = aggregate.Aggregate(context=self.context)
agg.name = 'foo'
agg.metadata = {'one': 'two'}
agg.uuid = uuidsentinel.fake_agg
agg.create()
api_create_mock.assert_called_once_with(
self.context,
{'name': 'foo', 'uuid': uuidsentinel.fake_agg},
metadata={'one': 'two'})
self.compare_obj(agg, fake_aggregate, subs=SUBS)
api_create_mock.assert_called_once_with(self.context,
{'name': 'foo', 'uuid': uuidsentinel.fake_agg},
metadata={'one': 'two'})
@mock.patch('nova.objects.aggregate._aggregate_create_in_db')
def test_recreate_fails(self, api_create_mock):
api_create_mock.return_value = fake_aggregate
agg = aggregate.Aggregate(context=self.context)
agg.name = 'foo'
agg.metadata = {'one': 'two'}
agg.uuid = uuidsentinel.fake_agg
agg.create()
self.assertRaises(exception.ObjectActionError, agg.create)
api_create_mock.assert_called_once_with(self.context,
{'name': 'foo', 'uuid': uuidsentinel.fake_agg},
metadata={'one': 'two'})
@mock.patch('nova.objects.aggregate._aggregate_delete_from_db')
def test_destroy(self, api_delete_mock):
agg = aggregate.Aggregate(context=self.context)
agg.id = 123
agg.destroy()
api_delete_mock.assert_called_with(self.context, 123)
@mock.patch('nova.compute.utils.notify_about_aggregate_action')
@mock.patch('nova.objects.aggregate._aggregate_update_to_db')
def test_save_to_api(self, api_update_mock, mock_notify):
api_update_mock.return_value = fake_aggregate
agg = aggregate.Aggregate(context=self.context)
agg.id = 123
agg.name = 'fake-api-aggregate'
agg.save()
self.compare_obj(agg, fake_aggregate, subs=SUBS)
api_update_mock.assert_called_once_with(self.context,
123,
{'name': 'fake-api-aggregate'})
api_update_mock.assert_called_once_with(self.context,
123, {'name': 'fake-api-aggregate'})
mock_notify.assert_has_calls([
mock.call(context=self.context,
aggregate=test.MatchType(aggregate.Aggregate),
action='update_prop', phase='start'),
mock.call(context=self.context,
aggregate=test.MatchType(aggregate.Aggregate),
action='update_prop', phase='end')])
self.assertEqual(2, mock_notify.call_count)
def test_save_and_create_no_hosts(self):
agg = aggregate.Aggregate(context=self.context)
agg.id = 123
agg.hosts = ['foo', 'bar']
self.assertRaises(exception.ObjectActionError,
agg.create)
self.assertRaises(exception.ObjectActionError,
agg.save)
@mock.patch('nova.objects.aggregate._metadata_delete_from_db')
@mock.patch('nova.objects.aggregate._metadata_add_to_db')
@mock.patch('nova.compute.utils.notify_about_aggregate_action')
@mock.patch('oslo_versionedobjects.base.VersionedObject.'
'obj_from_primitive')
def test_update_metadata_api(self,
mock_obj_from_primitive,
mock_notify,
mock_api_metadata_add,
mock_api_metadata_delete):
fake_notifier.NOTIFICATIONS = []
agg = aggregate.Aggregate()
agg._context = self.context
agg.id = 123
agg.metadata = {'foo': 'bar'}
agg.obj_reset_changes()
mock_obj_from_primitive.return_value = agg
agg.update_metadata({'todelete': None, 'toadd': 'myval'})
self.assertEqual(2, len(fake_notifier.NOTIFICATIONS))
msg = fake_notifier.NOTIFICATIONS[0]
self.assertEqual('aggregate.updatemetadata.start', msg.event_type)
self.assertEqual({'todelete': None, 'toadd': 'myval'},
msg.payload['meta_data'])
msg = fake_notifier.NOTIFICATIONS[1]
self.assertEqual('aggregate.updatemetadata.end', msg.event_type)
mock_notify.assert_has_calls([
mock.call(context=self.context, aggregate=agg,
action='update_metadata', phase='start'),
mock.call(context=self.context, aggregate=agg,
action='update_metadata', phase='end')])
self.assertEqual({'todelete': None, 'toadd': 'myval'},
msg.payload['meta_data'])
self.assertEqual({'foo': 'bar', 'toadd': 'myval'}, agg.metadata)
mock_api_metadata_delete.assert_called_once_with(self.context, 123,
'todelete')
mock_api_metadata_add.assert_called_once_with(self.context, 123,
{'toadd': 'myval'})
mock_api_metadata_delete.assert_called_once_with(self.context,
123,
'todelete')
mock_api_metadata_add.assert_called_once_with(self.context,
123,
{'toadd': 'myval'})
@mock.patch('nova.objects.aggregate._host_add_to_db')
def test_add_host_api(self, mock_host_add_api):
mock_host_add_api.return_value = {'host': 'bar'}
agg = aggregate.Aggregate()
agg.id = 123
agg.hosts = ['foo']
agg._context = self.context
agg.add_host('bar')
self.assertEqual(agg.hosts, ['foo', 'bar'])
mock_host_add_api.assert_called_once_with(self.context, 123, 'bar')
@mock.patch('nova.objects.aggregate._host_delete_from_db')
def test_delete_host_api(self, mock_host_delete_api):
agg = aggregate.Aggregate()
agg.id = 123
agg.hosts = ['foo', 'bar']
agg._context = self.context
agg.delete_host('foo')
self.assertEqual(agg.hosts, ['bar'])
mock_host_delete_api.assert_called_once_with(self.context, 123, 'foo')
def test_availability_zone(self):
agg = aggregate.Aggregate()
agg.metadata = {'availability_zone': 'foo'}
self.assertEqual('foo', agg.availability_zone)
@mock.patch('nova.objects.aggregate._get_all_from_db')
def test_get_all(self, mock_api_get_all):
mock_api_get_all.return_value = [fake_aggregate]
aggs = aggregate.AggregateList.get_all(self.context)
self.assertEqual(1, len(aggs))
self.compare_obj(aggs[0], fake_aggregate, subs=SUBS)
@mock.patch('nova.objects.aggregate._get_by_host_from_db')
def test_by_host(self, mock_api_get_by_host):
mock_api_get_by_host.return_value = [fake_aggregate]
aggs = aggregate.AggregateList.get_by_host(self.context, 'fake-host')
self.assertEqual(1, len(aggs))
self.compare_obj(aggs[0], fake_aggregate, subs=SUBS)
@mock.patch('nova.objects.aggregate._get_by_metadata_from_db')
def test_get_by_metadata_key(self, mock_api_get_by_metadata_key):
mock_api_get_by_metadata_key.return_value = [fake_aggregate]
aggs = aggregate.AggregateList.get_by_metadata_key(
self.context, 'this')
self.assertEqual(1, len(aggs))
self.compare_obj(aggs[0], fake_aggregate, subs=SUBS)
@mock.patch('nova.objects.aggregate._get_by_metadata_from_db')
def test_get_by_metadata_key_and_hosts_no_match(self, get_by_metadata_key):
get_by_metadata_key.return_value = [fake_aggregate]
aggs = aggregate.AggregateList.get_by_metadata_key(
self.context, 'this', hosts=['baz'])
self.assertEqual(0, len(aggs))
@mock.patch('nova.objects.aggregate._get_by_metadata_from_db')
def test_get_by_metadata_key_and_hosts_match(self, get_by_metadata_key):
get_by_metadata_key.return_value = [fake_aggregate]
aggs = aggregate.AggregateList.get_by_metadata_key(
self.context, 'this', hosts=['foo', 'bar'])
self.assertEqual(1, len(aggs))
self.compare_obj(aggs[0], fake_aggregate, subs=SUBS)
class TestAggregateObject(test_objects._LocalTest,
_TestAggregateObject):
pass
class TestRemoteAggregateObject(test_objects._RemoteTest,
_TestAggregateObject):
pass

View File

@ -1,150 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from nova import objects
from nova.scheduler.filters import aggregate_image_properties_isolation as aipi
from nova import test
from nova.tests.unit.scheduler import fakes
@mock.patch('nova.scheduler.filters.utils.aggregate_metadata_get_by_host')
class TestAggImagePropsIsolationFilter(test.NoDBTestCase):
def setUp(self):
super(TestAggImagePropsIsolationFilter, self).setUp()
self.filt_cls = aipi.AggregateImagePropertiesIsolation()
def test_aggregate_image_properties_isolation_passes(self, agg_mock):
agg_mock.return_value = {'hw_vm_mode': set(['hvm'])}
spec_obj = objects.RequestSpec(
context=mock.sentinel.ctx,
image=objects.ImageMeta(properties=objects.ImageMetaProps(
hw_vm_mode='hvm')))
host = fakes.FakeHostState('host1', 'compute', {})
self.assertTrue(self.filt_cls.host_passes(host, spec_obj))
def test_aggregate_image_properties_isolation_passes_comma(self, agg_mock):
agg_mock.return_value = {'hw_vm_mode': set(['hvm', 'xen'])}
spec_obj = objects.RequestSpec(
context=mock.sentinel.ctx,
image=objects.ImageMeta(properties=objects.ImageMetaProps(
hw_vm_mode='hvm')))
host = fakes.FakeHostState('host1', 'compute', {})
self.assertTrue(self.filt_cls.host_passes(host, spec_obj))
def test_aggregate_image_properties_isolation_props_bad_comma(self,
agg_mock):
agg_mock.return_value = {'os_distro': set(['windows', 'linux'])}
spec_obj = objects.RequestSpec(
context=mock.sentinel.ctx,
image=objects.ImageMeta(properties=objects.ImageMetaProps(
os_distro='windows,')))
host = fakes.FakeHostState('host1', 'compute', {})
self.assertFalse(self.filt_cls.host_passes(host, spec_obj))
def test_aggregate_image_properties_isolation_multi_props_passes(self,
agg_mock):
agg_mock.return_value = {'hw_vm_mode': set(['hvm']),
'hw_cpu_cores': set(['2'])}
spec_obj = objects.RequestSpec(
context=mock.sentinel.ctx,
image=objects.ImageMeta(properties=objects.ImageMetaProps(
hw_vm_mode='hvm', hw_cpu_cores=2)))
host = fakes.FakeHostState('host1', 'compute', {})
self.assertTrue(self.filt_cls.host_passes(host, spec_obj))
def test_aggregate_image_properties_isolation_props_with_meta_passes(self,
agg_mock):
agg_mock.return_value = {'hw_vm_mode': set(['hvm'])}
spec_obj = objects.RequestSpec(
context=mock.sentinel.ctx,
image=objects.ImageMeta(properties=objects.ImageMetaProps()))
host = fakes.FakeHostState('host1', 'compute', {})
self.assertTrue(self.filt_cls.host_passes(host, spec_obj))
def test_aggregate_image_properties_isolation_props_imgprops_passes(self,
agg_mock):
agg_mock.return_value = {}
spec_obj = objects.RequestSpec(
context=mock.sentinel.ctx,
image=objects.ImageMeta(properties=objects.ImageMetaProps(
hw_vm_mode='hvm')))
host = fakes.FakeHostState('host1', 'compute', {})
self.assertTrue(self.filt_cls.host_passes(host, spec_obj))
def test_aggregate_image_properties_isolation_props_not_match_fails(self,
agg_mock):
agg_mock.return_value = {'hw_vm_mode': set(['hvm'])}
spec_obj = objects.RequestSpec(
context=mock.sentinel.ctx,
image=objects.ImageMeta(properties=objects.ImageMetaProps(
hw_vm_mode='xen')))
host = fakes.FakeHostState('host1', 'compute', {})
self.assertFalse(self.filt_cls.host_passes(host, spec_obj))
def test_aggregate_image_properties_isolation_props_not_match2_fails(self,
agg_mock):
agg_mock.return_value = {'hw_vm_mode': set(['hvm']),
'hw_cpu_cores': set(['1'])}
spec_obj = objects.RequestSpec(
context=mock.sentinel.ctx,
image=objects.ImageMeta(properties=objects.ImageMetaProps(
hw_vm_mode='hvm', hw_cpu_cores=2)))
host = fakes.FakeHostState('host1', 'compute', {})
self.assertFalse(self.filt_cls.host_passes(host, spec_obj))
def test_aggregate_image_properties_isolation_props_namespace(self,
agg_mock):
self.flags(aggregate_image_properties_isolation_namespace='hw',
group='filter_scheduler')
self.flags(aggregate_image_properties_isolation_separator='_',
group='filter_scheduler')
agg_mock.return_value = {'hw_vm_mode': set(['hvm']),
'img_owner_id': set(['foo'])}
spec_obj = objects.RequestSpec(
context=mock.sentinel.ctx,
image=objects.ImageMeta(properties=objects.ImageMetaProps(
hw_vm_mode='hvm', img_owner_id='wrong')))
host = fakes.FakeHostState('host1', 'compute', {})
self.assertTrue(self.filt_cls.host_passes(host, spec_obj))
def test_aggregate_image_properties_iso_props_with_custom_meta(self,
agg_mock):
agg_mock.return_value = {'os': set(['linux'])}
spec_obj = objects.RequestSpec(
context=mock.sentinel.ctx,
image=objects.ImageMeta(properties=objects.ImageMetaProps(
os_type='linux')))
host = fakes.FakeHostState('host1', 'compute', {})
self.assertTrue(self.filt_cls.host_passes(host, spec_obj))
def test_aggregate_image_properties_iso_props_with_matching_meta_pass(self,
agg_mock):
agg_mock.return_value = {'os_type': set(['linux'])}
spec_obj = objects.RequestSpec(
context=mock.sentinel.ctx,
image=objects.ImageMeta(properties=objects.ImageMetaProps(
os_type='linux')))
host = fakes.FakeHostState('host1', 'compute', {})
self.assertTrue(self.filt_cls.host_passes(host, spec_obj))
def test_aggregate_image_properties_iso_props_with_matching_meta_fail(
self, agg_mock):
agg_mock.return_value = {'os_type': set(['windows'])}
spec_obj = objects.RequestSpec(
context=mock.sentinel.ctx,
image=objects.ImageMeta(properties=objects.ImageMetaProps(
os_type='linux')))
host = fakes.FakeHostState('host1', 'compute', {})
self.assertFalse(self.filt_cls.host_passes(host, spec_obj))

Some files were not shown because too many files have changed in this diff Show More