4135 lines
181 KiB
Python
4135 lines
181 KiB
Python
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
# encoding=UTF8
|
|
|
|
# Copyright 2010 United States Government as represented by the
|
|
# Administrator of the National Aeronautics and Space Administration.
|
|
# All Rights Reserved.
|
|
#
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
# not use this file except in compliance with the License. You may obtain
|
|
# a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
# License for the specific language governing permissions and limitations
|
|
# under the License.
|
|
|
|
"""Unit tests for the DB API."""
|
|
|
|
import copy
|
|
import datetime
|
|
import types
|
|
import uuid as stdlib_uuid
|
|
|
|
import mox
|
|
from oslo.config import cfg
|
|
from sqlalchemy.dialects import sqlite
|
|
from sqlalchemy import MetaData
|
|
from sqlalchemy.schema import Table
|
|
from sqlalchemy.sql.expression import select
|
|
|
|
from nova import context
|
|
from nova import db
|
|
from nova.db.sqlalchemy import api as sqlalchemy_api
|
|
from nova import exception
|
|
from nova.openstack.common.db.sqlalchemy import session as db_session
|
|
from nova.openstack.common import timeutils
|
|
from nova.openstack.common import uuidutils
|
|
from nova import test
|
|
from nova.tests import matchers
|
|
from nova import utils
|
|
|
|
|
|
CONF = cfg.CONF
|
|
CONF.import_opt('reserved_host_memory_mb', 'nova.compute.resource_tracker')
|
|
CONF.import_opt('reserved_host_disk_mb', 'nova.compute.resource_tracker')
|
|
|
|
get_engine = db_session.get_engine
|
|
get_session = db_session.get_session
|
|
|
|
|
|
class DbTestCase(test.TestCase):
|
|
def setUp(self):
|
|
super(DbTestCase, self).setUp()
|
|
self.user_id = 'fake'
|
|
self.project_id = 'fake'
|
|
self.context = context.RequestContext(self.user_id, self.project_id)
|
|
|
|
def create_instances_with_args(self, **kwargs):
|
|
args = {'reservation_id': 'a', 'image_ref': 1, 'host': 'host1',
|
|
'node': 'node1', 'project_id': self.project_id,
|
|
'vm_state': 'fake'}
|
|
if 'context' in kwargs:
|
|
ctxt = kwargs.pop('context')
|
|
args['project_id'] = ctxt.project_id
|
|
else:
|
|
ctxt = self.context
|
|
args.update(kwargs)
|
|
return db.instance_create(ctxt, args)
|
|
|
|
def fake_metadata(self, content):
|
|
meta = {}
|
|
for i in range(0, 10):
|
|
meta["foo%i" % i] = "this is %s item %i" % (content, i)
|
|
return meta
|
|
|
|
def create_metadata_for_instance(self, instance_uuid):
|
|
meta = self.fake_metadata('metadata')
|
|
db.instance_metadata_update(self.context, instance_uuid, meta, False)
|
|
sys_meta = self.fake_metadata('system_metadata')
|
|
db.instance_system_metadata_update(self.context, instance_uuid,
|
|
sys_meta, False)
|
|
return meta, sys_meta
|
|
|
|
|
|
class DbApiTestCase(DbTestCase):
|
|
def test_create_instance_unique_hostname(self):
|
|
otherprojectcontext = context.RequestContext(self.user_id,
|
|
"%s2" % self.project_id)
|
|
|
|
self.create_instances_with_args(hostname='fake_name')
|
|
|
|
# With scope 'global' any duplicate should fail, be it this project:
|
|
self.flags(osapi_compute_unique_server_name_scope='global')
|
|
self.assertRaises(exception.InstanceExists,
|
|
self.create_instances_with_args,
|
|
hostname='fake_name')
|
|
|
|
# or another:
|
|
self.assertRaises(exception.InstanceExists,
|
|
self.create_instances_with_args,
|
|
context=otherprojectcontext,
|
|
hostname='fake_name')
|
|
|
|
# With scope 'project' a duplicate in the project should fail:
|
|
self.flags(osapi_compute_unique_server_name_scope='project')
|
|
self.assertRaises(exception.InstanceExists,
|
|
self.create_instances_with_args,
|
|
hostname='fake_name')
|
|
|
|
# With scope 'project' a duplicate in a different project should work:
|
|
self.flags(osapi_compute_unique_server_name_scope='project')
|
|
self.create_instances_with_args(context=otherprojectcontext,
|
|
hostname='fake_name')
|
|
|
|
self.flags(osapi_compute_unique_server_name_scope=None)
|
|
|
|
def test_instance_metadata_get_all_query(self):
|
|
self.create_instances_with_args(metadata={'foo': 'bar'})
|
|
self.create_instances_with_args(metadata={'baz': 'quux'})
|
|
|
|
result = db.instance_metadata_get_all(self.context, [])
|
|
self.assertEqual(2, len(result))
|
|
|
|
result = db.instance_metadata_get_all(self.context,
|
|
[{'key': 'foo'}])
|
|
self.assertEqual(1, len(result))
|
|
|
|
result = db.instance_metadata_get_all(self.context,
|
|
[{'value': 'quux'}])
|
|
self.assertEqual(1, len(result))
|
|
|
|
result = db.instance_metadata_get_all(self.context,
|
|
[{'value': 'quux'},
|
|
{'key': 'foo'}])
|
|
self.assertEqual(2, len(result))
|
|
|
|
def test_ec2_ids_not_found_are_printable(self):
|
|
def check_exc_format(method):
|
|
try:
|
|
method(self.context, 'fake')
|
|
except exception.NotFound as exc:
|
|
self.assertTrue('fake' in unicode(exc))
|
|
|
|
check_exc_format(db.get_ec2_volume_id_by_uuid)
|
|
check_exc_format(db.get_volume_uuid_by_ec2_id)
|
|
check_exc_format(db.get_ec2_snapshot_id_by_uuid)
|
|
check_exc_format(db.get_snapshot_uuid_by_ec2_id)
|
|
check_exc_format(db.get_ec2_instance_id_by_uuid)
|
|
check_exc_format(db.get_instance_uuid_by_ec2_id)
|
|
|
|
def test_instance_get_all_with_meta(self):
|
|
inst = self.create_instances_with_args()
|
|
fake_meta, fake_sys = self.create_metadata_for_instance(inst['uuid'])
|
|
result = db.instance_get_all(self.context)
|
|
for inst in result:
|
|
meta = utils.metadata_to_dict(inst['metadata'])
|
|
self.assertEqual(meta, fake_meta)
|
|
sys_meta = utils.metadata_to_dict(inst['system_metadata'])
|
|
self.assertEqual(sys_meta, fake_sys)
|
|
|
|
def test_instance_get_all_by_filters_with_meta(self):
|
|
inst = self.create_instances_with_args()
|
|
fake_meta, fake_sys = self.create_metadata_for_instance(inst['uuid'])
|
|
result = db.instance_get_all_by_filters(self.context, {})
|
|
for inst in result:
|
|
meta = utils.metadata_to_dict(inst['metadata'])
|
|
self.assertEqual(meta, fake_meta)
|
|
sys_meta = utils.metadata_to_dict(inst['system_metadata'])
|
|
self.assertEqual(sys_meta, fake_sys)
|
|
|
|
def test_instance_get_all_by_filters_without_meta(self):
|
|
inst = self.create_instances_with_args()
|
|
fake_meta, fake_sys = self.create_metadata_for_instance(inst['uuid'])
|
|
result = db.instance_get_all_by_filters(self.context, {},
|
|
columns_to_join=[])
|
|
for inst in result:
|
|
meta = utils.metadata_to_dict(inst['metadata'])
|
|
self.assertEqual(meta, {})
|
|
sys_meta = utils.metadata_to_dict(inst['system_metadata'])
|
|
self.assertEqual(sys_meta, {})
|
|
|
|
def test_instance_get_all_by_filters(self):
|
|
self.create_instances_with_args()
|
|
self.create_instances_with_args()
|
|
result = db.instance_get_all_by_filters(self.context, {})
|
|
self.assertEqual(2, len(result))
|
|
|
|
def test_instance_get_all_by_filters_regex(self):
|
|
self.create_instances_with_args(display_name='test1')
|
|
self.create_instances_with_args(display_name='teeeest2')
|
|
self.create_instances_with_args(display_name='diff')
|
|
result = db.instance_get_all_by_filters(self.context,
|
|
{'display_name': 't.*st.'})
|
|
self.assertEqual(2, len(result))
|
|
|
|
def test_instance_get_all_by_filters_metadata(self):
|
|
self.create_instances_with_args(metadata={'foo': 'bar'})
|
|
self.create_instances_with_args()
|
|
result = db.instance_get_all_by_filters(self.context,
|
|
{'metadata': {'foo': 'bar'}})
|
|
self.assertEqual(1, len(result))
|
|
|
|
def test_instance_get_all_by_filters_unicode_value(self):
|
|
self.create_instances_with_args(display_name=u'test♥')
|
|
result = db.instance_get_all_by_filters(self.context,
|
|
{'display_name': u'test'})
|
|
self.assertEqual(1, len(result))
|
|
|
|
def test_instance_get_by_uuid(self):
|
|
inst = self.create_instances_with_args()
|
|
fake_meta, fake_sys = self.create_metadata_for_instance(inst['uuid'])
|
|
result = db.instance_get_by_uuid(self.context, inst['uuid'])
|
|
meta = utils.metadata_to_dict(result['metadata'])
|
|
self.assertEqual(meta, fake_meta)
|
|
sys_meta = utils.metadata_to_dict(result['system_metadata'])
|
|
self.assertEqual(sys_meta, fake_sys)
|
|
|
|
def test_instance_get_by_uuid_join_empty(self):
|
|
inst = self.create_instances_with_args()
|
|
fake_meta, fake_sys = self.create_metadata_for_instance(inst['uuid'])
|
|
result = db.instance_get_by_uuid(self.context, inst['uuid'],
|
|
columns_to_join=[])
|
|
meta = utils.metadata_to_dict(result['metadata'])
|
|
self.assertEqual(meta, {})
|
|
sys_meta = utils.metadata_to_dict(result['system_metadata'])
|
|
self.assertEqual(sys_meta, {})
|
|
|
|
def test_instance_get_by_uuid_join_meta(self):
|
|
inst = self.create_instances_with_args()
|
|
fake_meta, fake_sys = self.create_metadata_for_instance(inst['uuid'])
|
|
result = db.instance_get_by_uuid(self.context, inst['uuid'],
|
|
columns_to_join=['metadata'])
|
|
meta = utils.metadata_to_dict(result['metadata'])
|
|
self.assertEqual(meta, fake_meta)
|
|
sys_meta = utils.metadata_to_dict(result['system_metadata'])
|
|
self.assertEqual(sys_meta, {})
|
|
|
|
def test_instance_get_by_uuid_join_sys_meta(self):
|
|
inst = self.create_instances_with_args()
|
|
fake_meta, fake_sys = self.create_metadata_for_instance(inst['uuid'])
|
|
result = db.instance_get_by_uuid(self.context, inst['uuid'],
|
|
columns_to_join=['system_metadata'])
|
|
meta = utils.metadata_to_dict(result['metadata'])
|
|
self.assertEqual(meta, {})
|
|
sys_meta = utils.metadata_to_dict(result['system_metadata'])
|
|
self.assertEqual(sys_meta, fake_sys)
|
|
|
|
def test_instance_get_all_by_filters_deleted(self):
|
|
inst1 = self.create_instances_with_args()
|
|
inst2 = self.create_instances_with_args(reservation_id='b')
|
|
db.instance_destroy(self.context, inst1['uuid'])
|
|
result = db.instance_get_all_by_filters(self.context, {})
|
|
self.assertEqual(2, len(result))
|
|
self.assertIn(inst1['id'], [result[0]['id'], result[1]['id']])
|
|
self.assertIn(inst2['id'], [result[0]['id'], result[1]['id']])
|
|
if inst1['id'] == result[0]['id']:
|
|
self.assertTrue(result[0]['deleted'])
|
|
else:
|
|
self.assertTrue(result[1]['deleted'])
|
|
|
|
def test_instance_get_all_by_host_and_node_no_join(self):
|
|
# Test that system metadata is not joined.
|
|
sys_meta = {'foo': 'bar'}
|
|
expected = self.create_instances_with_args(system_metadata=sys_meta)
|
|
|
|
elevated = self.context.elevated()
|
|
instances = db.instance_get_all_by_host_and_node(elevated, 'host1',
|
|
'node1')
|
|
self.assertEqual(1, len(instances))
|
|
instance = instances[0]
|
|
self.assertEqual(expected['uuid'], instance['uuid'])
|
|
sysmeta = dict(instance)['system_metadata']
|
|
self.assertEqual(len(sysmeta), 0)
|
|
|
|
def test_migration_get_unconfirmed_by_dest_compute(self):
|
|
ctxt = context.get_admin_context()
|
|
|
|
# Ensure no migrations are returned.
|
|
results = db.migration_get_unconfirmed_by_dest_compute(ctxt, 10,
|
|
'fake_host')
|
|
self.assertEqual(0, len(results))
|
|
|
|
# Ensure no migrations are returned.
|
|
results = db.migration_get_unconfirmed_by_dest_compute(ctxt, 10,
|
|
'fake_host2')
|
|
self.assertEqual(0, len(results))
|
|
|
|
updated_at = datetime.datetime(2000, 01, 01, 12, 00, 00)
|
|
values = {"status": "finished", "updated_at": updated_at,
|
|
"dest_compute": "fake_host2"}
|
|
migration = db.migration_create(ctxt, values)
|
|
|
|
# Ensure different host is not returned
|
|
results = db.migration_get_unconfirmed_by_dest_compute(ctxt, 10,
|
|
'fake_host')
|
|
self.assertEqual(0, len(results))
|
|
|
|
# Ensure one migration older than 10 seconds is returned.
|
|
results = db.migration_get_unconfirmed_by_dest_compute(ctxt, 10,
|
|
'fake_host2')
|
|
self.assertEqual(1, len(results))
|
|
db.migration_update(ctxt, migration['id'], {"status": "CONFIRMED"})
|
|
|
|
# Ensure the new migration is not returned.
|
|
updated_at = timeutils.utcnow()
|
|
values = {"status": "finished", "updated_at": updated_at,
|
|
"dest_compute": "fake_host2"}
|
|
migration = db.migration_create(ctxt, values)
|
|
results = db.migration_get_unconfirmed_by_dest_compute(ctxt, 10,
|
|
"fake_host2")
|
|
self.assertEqual(0, len(results))
|
|
db.migration_update(ctxt, migration['id'], {"status": "CONFIRMED"})
|
|
|
|
def test_instance_get_all_hung_in_rebooting(self):
|
|
ctxt = context.get_admin_context()
|
|
|
|
# Ensure no instances are returned.
|
|
results = db.instance_get_all_hung_in_rebooting(ctxt, 10)
|
|
self.assertEqual(0, len(results))
|
|
|
|
# Ensure one rebooting instance with updated_at older than 10 seconds
|
|
# is returned.
|
|
updated_at = datetime.datetime(2000, 01, 01, 12, 00, 00)
|
|
values = {"task_state": "rebooting", "updated_at": updated_at}
|
|
instance = db.instance_create(ctxt, values)
|
|
results = db.instance_get_all_hung_in_rebooting(ctxt, 10)
|
|
self.assertEqual(1, len(results))
|
|
db.instance_update(ctxt, instance['uuid'], {"task_state": None})
|
|
|
|
# Ensure the newly rebooted instance is not returned.
|
|
updated_at = timeutils.utcnow()
|
|
values = {"task_state": "rebooting", "updated_at": updated_at}
|
|
instance = db.instance_create(ctxt, values)
|
|
results = db.instance_get_all_hung_in_rebooting(ctxt, 10)
|
|
self.assertEqual(0, len(results))
|
|
db.instance_update(ctxt, instance['uuid'], {"task_state": None})
|
|
|
|
def test_instance_update_with_expected_vm_state(self):
|
|
ctxt = context.get_admin_context()
|
|
uuid = uuidutils.generate_uuid()
|
|
updates = {'expected_vm_state': 'meow',
|
|
'moo': 'cow'}
|
|
|
|
class FakeInstance(dict):
|
|
def save(self, session=None):
|
|
pass
|
|
|
|
fake_instance_values = {'vm_state': 'meow',
|
|
'hostname': '',
|
|
'metadata': None,
|
|
'system_metadata': None}
|
|
fake_instance = FakeInstance(fake_instance_values)
|
|
|
|
self.mox.StubOutWithMock(sqlalchemy_api, '_instance_get_by_uuid')
|
|
self.mox.StubOutWithMock(fake_instance, 'save')
|
|
|
|
sqlalchemy_api._instance_get_by_uuid(ctxt, uuid,
|
|
session=mox.IgnoreArg()).AndReturn(fake_instance)
|
|
fake_instance.save(session=mox.IgnoreArg())
|
|
|
|
self.mox.ReplayAll()
|
|
|
|
result = db.instance_update(ctxt, uuid, updates)
|
|
expected_instance = dict(fake_instance_values)
|
|
expected_instance['moo'] = 'cow'
|
|
self.assertEqual(expected_instance, result)
|
|
|
|
def test_instance_update_with_unexpected_vm_state(self):
|
|
ctxt = context.get_admin_context()
|
|
uuid = uuidutils.generate_uuid()
|
|
updates = {'expected_vm_state': 'meow'}
|
|
fake_instance = {'vm_state': 'nomatch'}
|
|
|
|
self.mox.StubOutWithMock(sqlalchemy_api, '_instance_get_by_uuid')
|
|
|
|
sqlalchemy_api._instance_get_by_uuid(ctxt, uuid,
|
|
session=mox.IgnoreArg()).AndReturn(fake_instance)
|
|
|
|
self.mox.ReplayAll()
|
|
|
|
self.assertRaises(exception.UnexpectedVMStateError,
|
|
db.instance_update, ctxt, uuid, updates)
|
|
|
|
def test_network_create_safe(self):
|
|
ctxt = context.get_admin_context()
|
|
values = {'host': 'localhost', 'project_id': 'project1'}
|
|
network = db.network_create_safe(ctxt, values)
|
|
self.assertNotEqual(None, network['uuid'])
|
|
self.assertEqual(36, len(network['uuid']))
|
|
db_network = db.network_get(ctxt, network['id'])
|
|
self.assertEqual(network['uuid'], db_network['uuid'])
|
|
|
|
def test_network_delete_safe(self):
|
|
ctxt = context.get_admin_context()
|
|
values = {'host': 'localhost', 'project_id': 'project1'}
|
|
network = db.network_create_safe(ctxt, values)
|
|
db_network = db.network_get(ctxt, network['id'])
|
|
values = {'network_id': network['id'], 'address': 'fake1'}
|
|
address1 = db.fixed_ip_create(ctxt, values)['address']
|
|
values = {'network_id': network['id'],
|
|
'address': 'fake2',
|
|
'allocated': True}
|
|
address2 = db.fixed_ip_create(ctxt, values)['address']
|
|
self.assertRaises(exception.NetworkInUse,
|
|
db.network_delete_safe, ctxt, network['id'])
|
|
db.fixed_ip_update(ctxt, address2, {'allocated': False})
|
|
network = db.network_delete_safe(ctxt, network['id'])
|
|
self.assertRaises(exception.FixedIpNotFoundForAddress,
|
|
db.fixed_ip_get_by_address, ctxt, address1)
|
|
ctxt = ctxt.elevated(read_deleted='yes')
|
|
fixed_ip = db.fixed_ip_get_by_address(ctxt, address1)
|
|
self.assertTrue(fixed_ip['deleted'])
|
|
|
|
def test_network_create_with_duplicate_vlan(self):
|
|
ctxt = context.get_admin_context()
|
|
values1 = {'host': 'localhost', 'project_id': 'project1', 'vlan': 1}
|
|
values2 = {'host': 'something', 'project_id': 'project1', 'vlan': 1}
|
|
db.network_create_safe(ctxt, values1)
|
|
self.assertRaises(exception.DuplicateVlan,
|
|
db.network_create_safe, ctxt, values2)
|
|
|
|
def test_network_update_with_duplicate_vlan(self):
|
|
ctxt = context.get_admin_context()
|
|
values1 = {'host': 'localhost', 'project_id': 'project1', 'vlan': 1}
|
|
values2 = {'host': 'something', 'project_id': 'project1', 'vlan': 2}
|
|
network_ref = db.network_create_safe(ctxt, values1)
|
|
db.network_create_safe(ctxt, values2)
|
|
self.assertRaises(exception.DuplicateVlan,
|
|
db.network_update,
|
|
ctxt, network_ref["id"], values2)
|
|
|
|
def test_instance_update_with_instance_uuid(self):
|
|
# test instance_update() works when an instance UUID is passed.
|
|
ctxt = context.get_admin_context()
|
|
|
|
# Create an instance with some metadata
|
|
values = {'metadata': {'host': 'foo', 'key1': 'meow'},
|
|
'system_metadata': {'original_image_ref': 'blah'}}
|
|
instance = db.instance_create(ctxt, values)
|
|
|
|
# Update the metadata
|
|
values = {'metadata': {'host': 'bar', 'key2': 'wuff'},
|
|
'system_metadata': {'original_image_ref': 'baz'}}
|
|
db.instance_update(ctxt, instance['uuid'], values)
|
|
|
|
# Retrieve the user-provided metadata to ensure it was successfully
|
|
# updated
|
|
instance_meta = db.instance_metadata_get(ctxt, instance['uuid'])
|
|
self.assertEqual('bar', instance_meta['host'])
|
|
self.assertEqual('wuff', instance_meta['key2'])
|
|
self.assertNotIn('key1', instance_meta)
|
|
|
|
# Retrieve the system metadata to ensure it was successfully updated
|
|
system_meta = db.instance_system_metadata_get(ctxt, instance['uuid'])
|
|
self.assertEqual('baz', system_meta['original_image_ref'])
|
|
|
|
def test_delete_instance_and_system_metadata_on_instance_destroy(self):
|
|
ctxt = context.get_admin_context()
|
|
|
|
# Create an instance with some metadata
|
|
values = {'metadata': {'host': 'foo', 'key1': 'meow'},
|
|
'system_metadata': {'original_image_ref': 'blah'}}
|
|
instance = db.instance_create(ctxt, values)
|
|
instance_meta = db.instance_metadata_get(ctxt, instance['uuid'])
|
|
self.assertEqual('foo', instance_meta['host'])
|
|
self.assertEqual('meow', instance_meta['key1'])
|
|
db.instance_destroy(ctxt, instance['uuid'])
|
|
instance_meta = db.instance_metadata_get(ctxt, instance['uuid'])
|
|
instance_system_meta = db.instance_system_metadata_get(ctxt,
|
|
instance['uuid'])
|
|
# Make sure instance and system metadata is deleted as well
|
|
self.assertEqual({}, instance_meta)
|
|
self.assertEqual({}, instance_system_meta)
|
|
|
|
def test_instance_update_unique_name(self):
|
|
otherprojectcontext = context.RequestContext(self.user_id,
|
|
"%s2" % self.project_id)
|
|
|
|
inst = self.create_instances_with_args(hostname='fake_name')
|
|
uuid1p1 = inst['uuid']
|
|
inst = self.create_instances_with_args(hostname='fake_name2')
|
|
uuid2p1 = inst['uuid']
|
|
|
|
inst = self.create_instances_with_args(context=otherprojectcontext,
|
|
hostname='fake_name3')
|
|
uuid1p2 = inst['uuid']
|
|
|
|
# osapi_compute_unique_server_name_scope is unset so this should work:
|
|
values = {'hostname': 'fake_name2'}
|
|
db.instance_update(self.context, uuid1p1, values)
|
|
values = {'hostname': 'fake_name'}
|
|
db.instance_update(self.context, uuid1p1, values)
|
|
|
|
# With scope 'global' any duplicate should fail.
|
|
self.flags(osapi_compute_unique_server_name_scope='global')
|
|
self.assertRaises(exception.InstanceExists,
|
|
db.instance_update,
|
|
self.context,
|
|
uuid2p1,
|
|
values)
|
|
|
|
self.assertRaises(exception.InstanceExists,
|
|
db.instance_update,
|
|
otherprojectcontext,
|
|
uuid1p2,
|
|
values)
|
|
|
|
# But we should definitely be able to update our name if we aren't
|
|
# really changing it.
|
|
case_only_values = {'hostname': 'fake_NAME'}
|
|
db.instance_update(self.context, uuid1p1, case_only_values)
|
|
|
|
# With scope 'project' a duplicate in the project should fail:
|
|
self.flags(osapi_compute_unique_server_name_scope='project')
|
|
self.assertRaises(exception.InstanceExists,
|
|
db.instance_update,
|
|
self.context,
|
|
uuid2p1,
|
|
values)
|
|
|
|
# With scope 'project' a duplicate in a different project should work:
|
|
self.flags(osapi_compute_unique_server_name_scope='project')
|
|
db.instance_update(otherprojectcontext, uuid1p2, values)
|
|
|
|
def test_instance_update_with_and_get_original(self):
|
|
ctxt = context.get_admin_context()
|
|
|
|
# Create an instance with some metadata
|
|
values = {'vm_state': 'building'}
|
|
instance = db.instance_create(ctxt, values)
|
|
|
|
(old_ref, new_ref) = db.instance_update_and_get_original(ctxt,
|
|
instance['uuid'], {'vm_state': 'needscoffee'})
|
|
self.assertEquals("building", old_ref["vm_state"])
|
|
self.assertEquals("needscoffee", new_ref["vm_state"])
|
|
|
|
def _test_instance_update_updates_metadata(self, metadata_type):
|
|
ctxt = context.get_admin_context()
|
|
|
|
instance = db.instance_create(ctxt, {})
|
|
|
|
def set_and_check(meta):
|
|
inst = db.instance_update(ctxt, instance['uuid'],
|
|
{metadata_type: dict(meta)})
|
|
_meta = utils.metadata_to_dict(inst[metadata_type])
|
|
self.assertEqual(meta, _meta)
|
|
|
|
meta = {'speed': '88', 'units': 'MPH'}
|
|
set_and_check(meta)
|
|
|
|
meta['gigawatts'] = '1.21'
|
|
set_and_check(meta)
|
|
|
|
del meta['gigawatts']
|
|
set_and_check(meta)
|
|
|
|
def test_instance_update_updates_system_metadata(self):
|
|
# Ensure that system_metadata is updated during instance_update
|
|
self._test_instance_update_updates_metadata('system_metadata')
|
|
|
|
def test_instance_update_updates_metadata(self):
|
|
# Ensure that metadata is updated during instance_update
|
|
self._test_instance_update_updates_metadata('metadata')
|
|
|
|
def test_instance_fault_create(self):
|
|
# Ensure we can create an instance fault.
|
|
ctxt = context.get_admin_context()
|
|
uuid = str(stdlib_uuid.uuid4())
|
|
|
|
# Create a fault
|
|
fault_values = {
|
|
'message': 'message',
|
|
'details': 'detail',
|
|
'instance_uuid': uuid,
|
|
'code': 404,
|
|
}
|
|
db.instance_fault_create(ctxt, fault_values)
|
|
|
|
# Retrieve the fault to ensure it was successfully added
|
|
faults = db.instance_fault_get_by_instance_uuids(ctxt, [uuid])
|
|
self.assertEqual(404, faults[uuid][0]['code'])
|
|
|
|
def test_instance_fault_get_by_instance(self):
|
|
# ensure we can retrieve an instance fault by instance UUID.
|
|
ctxt = context.get_admin_context()
|
|
instance1 = db.instance_create(ctxt, {})
|
|
instance2 = db.instance_create(ctxt, {})
|
|
uuids = [instance1['uuid'], instance2['uuid']]
|
|
|
|
# Create faults
|
|
fault_values = {
|
|
'message': 'message',
|
|
'details': 'detail',
|
|
'instance_uuid': uuids[0],
|
|
'code': 404,
|
|
}
|
|
fault1 = db.instance_fault_create(ctxt, fault_values)
|
|
|
|
fault_values = {
|
|
'message': 'message',
|
|
'details': 'detail',
|
|
'instance_uuid': uuids[0],
|
|
'code': 500,
|
|
}
|
|
fault2 = db.instance_fault_create(ctxt, fault_values)
|
|
|
|
fault_values = {
|
|
'message': 'message',
|
|
'details': 'detail',
|
|
'instance_uuid': uuids[1],
|
|
'code': 404,
|
|
}
|
|
fault3 = db.instance_fault_create(ctxt, fault_values)
|
|
|
|
fault_values = {
|
|
'message': 'message',
|
|
'details': 'detail',
|
|
'instance_uuid': uuids[1],
|
|
'code': 500,
|
|
}
|
|
fault4 = db.instance_fault_create(ctxt, fault_values)
|
|
|
|
instance_faults = db.instance_fault_get_by_instance_uuids(ctxt, uuids)
|
|
|
|
expected = {
|
|
uuids[0]: [fault2, fault1],
|
|
uuids[1]: [fault4, fault3],
|
|
}
|
|
|
|
self.assertEqual(instance_faults, expected)
|
|
|
|
def test_instance_faults_get_by_instance_uuids_no_faults(self):
|
|
# None should be returned when no faults exist.
|
|
ctxt = context.get_admin_context()
|
|
instance1 = db.instance_create(ctxt, {})
|
|
instance2 = db.instance_create(ctxt, {})
|
|
uuids = [instance1['uuid'], instance2['uuid']]
|
|
instance_faults = db.instance_fault_get_by_instance_uuids(ctxt, uuids)
|
|
expected = {uuids[0]: [], uuids[1]: []}
|
|
self.assertEqual(expected, instance_faults)
|
|
|
|
def test_instance_action_start(self):
|
|
"""Create an instance action."""
|
|
ctxt = context.get_admin_context()
|
|
uuid = str(stdlib_uuid.uuid4())
|
|
|
|
start_time = timeutils.utcnow()
|
|
action_values = {'action': 'run_instance',
|
|
'instance_uuid': uuid,
|
|
'request_id': ctxt.request_id,
|
|
'user_id': ctxt.user_id,
|
|
'project_id': ctxt.project_id,
|
|
'start_time': start_time}
|
|
db.action_start(ctxt, action_values)
|
|
|
|
# Retrieve the action to ensure it was successfully added
|
|
actions = db.actions_get(ctxt, uuid)
|
|
self.assertEqual(1, len(actions))
|
|
self.assertEqual('run_instance', actions[0]['action'])
|
|
self.assertEqual(start_time, actions[0]['start_time'])
|
|
self.assertEqual(ctxt.request_id, actions[0]['request_id'])
|
|
self.assertEqual(ctxt.user_id, actions[0]['user_id'])
|
|
self.assertEqual(ctxt.project_id, actions[0]['project_id'])
|
|
|
|
def test_instance_action_finish(self):
|
|
"""Create an instance action."""
|
|
ctxt = context.get_admin_context()
|
|
uuid = str(stdlib_uuid.uuid4())
|
|
|
|
start_time = timeutils.utcnow()
|
|
action_start_values = {'action': 'run_instance',
|
|
'instance_uuid': uuid,
|
|
'request_id': ctxt.request_id,
|
|
'user_id': ctxt.user_id,
|
|
'project_id': ctxt.project_id,
|
|
'start_time': start_time}
|
|
db.action_start(ctxt, action_start_values)
|
|
|
|
finish_time = timeutils.utcnow() + datetime.timedelta(seconds=5)
|
|
action_finish_values = {'instance_uuid': uuid,
|
|
'request_id': ctxt.request_id,
|
|
'finish_time': finish_time}
|
|
db.action_finish(ctxt, action_finish_values)
|
|
|
|
# Retrieve the action to ensure it was successfully added
|
|
actions = db.actions_get(ctxt, uuid)
|
|
self.assertEqual(1, len(actions))
|
|
self.assertEqual('run_instance', actions[0]['action'])
|
|
self.assertEqual(start_time, actions[0]['start_time'])
|
|
self.assertEqual(finish_time, actions[0]['finish_time'])
|
|
self.assertEqual(ctxt.request_id, actions[0]['request_id'])
|
|
self.assertEqual(ctxt.user_id, actions[0]['user_id'])
|
|
self.assertEqual(ctxt.project_id, actions[0]['project_id'])
|
|
|
|
def test_instance_actions_get_by_instance(self):
|
|
"""Ensure we can get actions by UUID."""
|
|
ctxt1 = context.get_admin_context()
|
|
ctxt2 = context.get_admin_context()
|
|
uuid1 = str(stdlib_uuid.uuid4())
|
|
uuid2 = str(stdlib_uuid.uuid4())
|
|
|
|
action_values = {'action': 'run_instance',
|
|
'instance_uuid': uuid1,
|
|
'request_id': ctxt1.request_id,
|
|
'user_id': ctxt1.user_id,
|
|
'project_id': ctxt1.project_id,
|
|
'start_time': timeutils.utcnow()}
|
|
db.action_start(ctxt1, action_values)
|
|
action_values['action'] = 'resize'
|
|
db.action_start(ctxt1, action_values)
|
|
|
|
action_values = {'action': 'reboot',
|
|
'instance_uuid': uuid2,
|
|
'request_id': ctxt2.request_id,
|
|
'user_id': ctxt2.user_id,
|
|
'project_id': ctxt2.project_id,
|
|
'start_time': timeutils.utcnow()}
|
|
db.action_start(ctxt2, action_values)
|
|
db.action_start(ctxt2, action_values)
|
|
|
|
# Retrieve the action to ensure it was successfully added
|
|
actions = db.actions_get(ctxt1, uuid1)
|
|
self.assertEqual(2, len(actions))
|
|
self.assertEqual('resize', actions[0]['action'])
|
|
self.assertEqual('run_instance', actions[1]['action'])
|
|
|
|
def test_instance_action_get_by_instance_and_action(self):
|
|
"""Ensure we can get an action by instance UUID and action id."""
|
|
ctxt1 = context.get_admin_context()
|
|
ctxt2 = context.get_admin_context()
|
|
uuid1 = str(stdlib_uuid.uuid4())
|
|
uuid2 = str(stdlib_uuid.uuid4())
|
|
|
|
action_values = {'action': 'run_instance',
|
|
'instance_uuid': uuid1,
|
|
'request_id': ctxt1.request_id,
|
|
'user_id': ctxt1.user_id,
|
|
'project_id': ctxt1.project_id,
|
|
'start_time': timeutils.utcnow()}
|
|
db.action_start(ctxt1, action_values)
|
|
action_values['action'] = 'resize'
|
|
db.action_start(ctxt1, action_values)
|
|
|
|
action_values = {'action': 'reboot',
|
|
'instance_uuid': uuid2,
|
|
'request_id': ctxt2.request_id,
|
|
'user_id': ctxt2.user_id,
|
|
'project_id': ctxt2.project_id,
|
|
'start_time': timeutils.utcnow()}
|
|
db.action_start(ctxt2, action_values)
|
|
db.action_start(ctxt2, action_values)
|
|
|
|
actions = db.actions_get(ctxt1, uuid1)
|
|
request_id = actions[0]['request_id']
|
|
action = db.action_get_by_request_id(ctxt1, uuid1, request_id)
|
|
self.assertEqual('run_instance', action['action'])
|
|
self.assertEqual(ctxt1.request_id, action['request_id'])
|
|
|
|
def test_instance_action_event_start(self):
|
|
"""Create an instance action event."""
|
|
ctxt = context.get_admin_context()
|
|
uuid = str(stdlib_uuid.uuid4())
|
|
|
|
start_time = timeutils.utcnow()
|
|
action_values = {'action': 'run_instance',
|
|
'instance_uuid': uuid,
|
|
'request_id': ctxt.request_id,
|
|
'user_id': ctxt.user_id,
|
|
'project_id': ctxt.project_id,
|
|
'start_time': start_time}
|
|
action = db.action_start(ctxt, action_values)
|
|
|
|
event_values = {'event': 'schedule',
|
|
'instance_uuid': uuid,
|
|
'request_id': ctxt.request_id,
|
|
'start_time': start_time}
|
|
db.action_event_start(ctxt, event_values)
|
|
|
|
# Retrieve the event to ensure it was successfully added
|
|
events = db.action_events_get(ctxt, action['id'])
|
|
self.assertEqual(1, len(events))
|
|
self.assertEqual('schedule', events[0]['event'])
|
|
self.assertEqual(start_time, events[0]['start_time'])
|
|
|
|
def test_instance_action_event_finish_success(self):
|
|
"""Finish an instance action event."""
|
|
ctxt = context.get_admin_context()
|
|
uuid = str(stdlib_uuid.uuid4())
|
|
|
|
start_time = timeutils.utcnow()
|
|
action_values = {'action': 'run_instance',
|
|
'instance_uuid': uuid,
|
|
'request_id': ctxt.request_id,
|
|
'user_id': ctxt.user_id,
|
|
'project_id': ctxt.project_id,
|
|
'start_time': start_time}
|
|
action = db.action_start(ctxt, action_values)
|
|
|
|
event_values = {'event': 'schedule',
|
|
'request_id': ctxt.request_id,
|
|
'instance_uuid': uuid,
|
|
'start_time': start_time}
|
|
db.action_event_start(ctxt, event_values)
|
|
|
|
finish_time = timeutils.utcnow() + datetime.timedelta(seconds=5)
|
|
event_finish_values = {'event': 'schedule',
|
|
'request_id': ctxt.request_id,
|
|
'instance_uuid': uuid,
|
|
'finish_time': finish_time,
|
|
'result': 'Success'}
|
|
db.action_event_finish(ctxt, event_finish_values)
|
|
|
|
# Retrieve the event to ensure it was successfully added
|
|
events = db.action_events_get(ctxt, action['id'])
|
|
action = db.action_get_by_request_id(ctxt, uuid, ctxt.request_id)
|
|
self.assertEqual(1, len(events))
|
|
self.assertEqual('schedule', events[0]['event'])
|
|
self.assertEqual(start_time, events[0]['start_time'])
|
|
self.assertEqual(finish_time, events[0]['finish_time'])
|
|
self.assertNotEqual(action['message'], 'Error')
|
|
|
|
def test_instance_action_event_finish_error(self):
|
|
"""Finish an instance action event with an error."""
|
|
ctxt = context.get_admin_context()
|
|
uuid = str(stdlib_uuid.uuid4())
|
|
|
|
start_time = timeutils.utcnow()
|
|
action_values = {'action': 'run_instance',
|
|
'instance_uuid': uuid,
|
|
'request_id': ctxt.request_id,
|
|
'user_id': ctxt.user_id,
|
|
'project_id': ctxt.project_id,
|
|
'start_time': start_time}
|
|
action = db.action_start(ctxt, action_values)
|
|
|
|
event_values = {'event': 'schedule',
|
|
'request_id': ctxt.request_id,
|
|
'instance_uuid': uuid,
|
|
'start_time': start_time}
|
|
db.action_event_start(ctxt, event_values)
|
|
|
|
finish_time = timeutils.utcnow() + datetime.timedelta(seconds=5)
|
|
event_finish_values = {'event': 'schedule',
|
|
'request_id': ctxt.request_id,
|
|
'instance_uuid': uuid,
|
|
'finish_time': finish_time,
|
|
'result': 'Error'}
|
|
db.action_event_finish(ctxt, event_finish_values)
|
|
|
|
# Retrieve the event to ensure it was successfully added
|
|
events = db.action_events_get(ctxt, action['id'])
|
|
action = db.action_get_by_request_id(ctxt, uuid, ctxt.request_id)
|
|
self.assertEqual(1, len(events))
|
|
self.assertEqual('schedule', events[0]['event'])
|
|
self.assertEqual(start_time, events[0]['start_time'])
|
|
self.assertEqual(finish_time, events[0]['finish_time'])
|
|
self.assertEqual(action['message'], 'Error')
|
|
|
|
def test_instance_action_and_event_start_string_time(self):
|
|
"""Create an instance action and event with a string start_time."""
|
|
ctxt = context.get_admin_context()
|
|
uuid = str(stdlib_uuid.uuid4())
|
|
|
|
start_time = timeutils.utcnow()
|
|
start_time_str = timeutils.strtime(start_time)
|
|
action_values = {'action': 'run_instance',
|
|
'instance_uuid': uuid,
|
|
'request_id': ctxt.request_id,
|
|
'user_id': ctxt.user_id,
|
|
'project_id': ctxt.project_id,
|
|
'start_time': start_time_str}
|
|
action = db.action_start(ctxt, action_values)
|
|
|
|
event_values = {'event': 'schedule',
|
|
'instance_uuid': uuid,
|
|
'request_id': ctxt.request_id,
|
|
'start_time': start_time_str}
|
|
db.action_event_start(ctxt, event_values)
|
|
|
|
# Retrieve the event to ensure it was successfully added
|
|
events = db.action_events_get(ctxt, action['id'])
|
|
self.assertEqual(1, len(events))
|
|
self.assertEqual('schedule', events[0]['event'])
|
|
# db api still returns models with datetime, not str, values
|
|
self.assertEqual(start_time, events[0]['start_time'])
|
|
|
|
def test_instance_action_event_get_by_id(self):
|
|
"""Get a specific instance action event."""
|
|
ctxt1 = context.get_admin_context()
|
|
ctxt2 = context.get_admin_context()
|
|
uuid1 = str(stdlib_uuid.uuid4())
|
|
uuid2 = str(stdlib_uuid.uuid4())
|
|
|
|
action_values = {'action': 'run_instance',
|
|
'instance_uuid': uuid1,
|
|
'request_id': ctxt1.request_id,
|
|
'user_id': ctxt1.user_id,
|
|
'project_id': ctxt1.project_id,
|
|
'start_time': timeutils.utcnow()}
|
|
added_action = db.action_start(ctxt1, action_values)
|
|
|
|
action_values = {'action': 'reboot',
|
|
'instance_uuid': uuid2,
|
|
'request_id': ctxt2.request_id,
|
|
'user_id': ctxt2.user_id,
|
|
'project_id': ctxt2.project_id,
|
|
'start_time': timeutils.utcnow()}
|
|
db.action_start(ctxt2, action_values)
|
|
|
|
start_time = timeutils.utcnow()
|
|
event_values = {'event': 'schedule',
|
|
'instance_uuid': uuid1,
|
|
'request_id': ctxt1.request_id,
|
|
'start_time': start_time}
|
|
added_event = db.action_event_start(ctxt1, event_values)
|
|
|
|
event_values = {'event': 'reboot',
|
|
'instance_uuid': uuid2,
|
|
'request_id': ctxt2.request_id,
|
|
'start_time': timeutils.utcnow()}
|
|
db.action_event_start(ctxt2, event_values)
|
|
|
|
# Retrieve the event to ensure it was successfully added
|
|
event = db.action_event_get_by_id(ctxt1, added_action['id'],
|
|
added_event['id'])
|
|
self.assertEqual('schedule', event['event'])
|
|
self.assertEqual(start_time, event['start_time'])
|
|
|
|
def test_add_key_pair(self, name=None):
|
|
"""Check if keypair creation work as expected."""
|
|
keypair = {
|
|
'user_id': self.user_id,
|
|
'name': name or 'test-keypair',
|
|
'fingerprint': '15:b0:f8:b3:f9:48:63:71:cf:7b:5b:38:6d:44:2d:4a',
|
|
'private_key': 'private_key_value',
|
|
'public_key': 'public_key_value'
|
|
}
|
|
result_key = db.key_pair_create(context.get_admin_context(), keypair)
|
|
for label in keypair:
|
|
self.assertEqual(keypair[label], result_key[label])
|
|
|
|
def test_key_pair_destroy(self):
|
|
"""Check if key pair deletion works as expected."""
|
|
keypair_name = 'test-delete-keypair'
|
|
self.test_add_key_pair(name=keypair_name)
|
|
db.key_pair_destroy(context.get_admin_context(), self.user_id,
|
|
keypair_name)
|
|
self.assertRaises(exception.KeypairNotFound, db.key_pair_get,
|
|
context.get_admin_context(), self.user_id,
|
|
keypair_name)
|
|
|
|
def test_key_pair_get(self):
|
|
"""Test if a previously created keypair can be found."""
|
|
keypair_name = 'test-get-keypair'
|
|
self.test_add_key_pair(name=keypair_name)
|
|
result = db.key_pair_get(context.get_admin_context(), self.user_id,
|
|
keypair_name)
|
|
self.assertEqual(result.name, keypair_name)
|
|
|
|
def test_key_pair_get_all_by_user(self):
|
|
self.assertTrue(isinstance(db.key_pair_get_all_by_user(
|
|
context.get_admin_context(), self.user_id), list))
|
|
|
|
def test_delete_non_existent_key_pair(self):
|
|
self.assertRaises(exception.KeypairNotFound, db.key_pair_destroy,
|
|
context.get_admin_context(), self.user_id,
|
|
'non-existent-keypair')
|
|
|
|
def test_get_non_existent_key_pair(self):
|
|
self.assertRaises(exception.KeypairNotFound, db.key_pair_get,
|
|
context.get_admin_context(), self.user_id,
|
|
'invalid-key')
|
|
|
|
def test_dns_registration(self):
|
|
domain1 = 'test.domain.one'
|
|
domain2 = 'test.domain.two'
|
|
testzone = 'testzone'
|
|
ctxt = context.get_admin_context()
|
|
|
|
db.dnsdomain_register_for_zone(ctxt, domain1, testzone)
|
|
domain_ref = db.dnsdomain_get(ctxt, domain1)
|
|
zone = domain_ref['availability_zone']
|
|
scope = domain_ref['scope']
|
|
self.assertEqual(scope, 'private')
|
|
self.assertEqual(zone, testzone)
|
|
|
|
db.dnsdomain_register_for_project(ctxt, domain2,
|
|
self.project_id)
|
|
domain_ref = db.dnsdomain_get(ctxt, domain2)
|
|
project = domain_ref['project_id']
|
|
scope = domain_ref['scope']
|
|
self.assertEqual(project, self.project_id)
|
|
self.assertEqual(scope, 'public')
|
|
|
|
expected = [domain1, domain2]
|
|
domains = db.dnsdomain_list(ctxt)
|
|
self.assertEqual(expected, domains)
|
|
|
|
db.dnsdomain_unregister(ctxt, domain1)
|
|
db.dnsdomain_unregister(ctxt, domain2)
|
|
|
|
def test_network_get_associated_fixed_ips(self):
|
|
ctxt = context.get_admin_context()
|
|
values = {'host': 'foo', 'hostname': 'myname'}
|
|
instance = db.instance_create(ctxt, values)
|
|
values = {'address': 'bar', 'instance_uuid': instance['uuid']}
|
|
vif = db.virtual_interface_create(ctxt, values)
|
|
values = {'address': 'baz',
|
|
'network_id': 1,
|
|
'allocated': True,
|
|
'instance_uuid': instance['uuid'],
|
|
'virtual_interface_id': vif['id']}
|
|
fixed_address = db.fixed_ip_create(ctxt, values)['address']
|
|
data = db.network_get_associated_fixed_ips(ctxt, 1)
|
|
self.assertEqual(len(data), 1)
|
|
record = data[0]
|
|
self.assertEqual(record['address'], fixed_address)
|
|
self.assertEqual(record['instance_uuid'], instance['uuid'])
|
|
self.assertEqual(record['network_id'], 1)
|
|
self.assertEqual(record['instance_created'], instance['created_at'])
|
|
self.assertEqual(record['instance_updated'], instance['updated_at'])
|
|
self.assertEqual(record['instance_hostname'], instance['hostname'])
|
|
self.assertEqual(record['vif_id'], vif['id'])
|
|
self.assertEqual(record['vif_address'], vif['address'])
|
|
data = db.network_get_associated_fixed_ips(ctxt, 1, 'nothing')
|
|
self.assertEqual(len(data), 0)
|
|
|
|
def test_network_get_all_by_host(self):
|
|
ctxt = context.get_admin_context()
|
|
data = db.network_get_all_by_host(ctxt, 'foo')
|
|
self.assertEqual(len(data), 0)
|
|
# dummy network
|
|
net = db.network_create_safe(ctxt, {})
|
|
# network with host set
|
|
net = db.network_create_safe(ctxt, {'host': 'foo'})
|
|
data = db.network_get_all_by_host(ctxt, 'foo')
|
|
self.assertEqual(len(data), 1)
|
|
# network with fixed ip with host set
|
|
net = db.network_create_safe(ctxt, {})
|
|
values = {'host': 'foo', 'network_id': net['id']}
|
|
db.fixed_ip_create(ctxt, values)
|
|
data = db.network_get_all_by_host(ctxt, 'foo')
|
|
self.assertEqual(len(data), 2)
|
|
# network with instance with host set
|
|
net = db.network_create_safe(ctxt, {})
|
|
instance = db.instance_create(ctxt, {'host': 'foo'})
|
|
values = {'instance_uuid': instance['uuid']}
|
|
vif = db.virtual_interface_create(ctxt, values)
|
|
values = {'network_id': net['id'],
|
|
'virtual_interface_id': vif['id']}
|
|
db.fixed_ip_create(ctxt, values)
|
|
data = db.network_get_all_by_host(ctxt, 'foo')
|
|
self.assertEqual(len(data), 3)
|
|
|
|
def test_network_in_use_on_host(self):
|
|
ctxt = context.get_admin_context()
|
|
|
|
values = {'host': 'foo', 'hostname': 'myname'}
|
|
instance = db.instance_create(ctxt, values)
|
|
values = {'address': 'bar', 'instance_uuid': instance['uuid']}
|
|
vif = db.virtual_interface_create(ctxt, values)
|
|
values = {'address': 'baz',
|
|
'network_id': 1,
|
|
'allocated': True,
|
|
'instance_uuid': instance['uuid'],
|
|
'virtual_interface_id': vif['id']}
|
|
db.fixed_ip_create(ctxt, values)
|
|
|
|
self.assertEqual(db.network_in_use_on_host(ctxt, 1, 'foo'), True)
|
|
self.assertEqual(db.network_in_use_on_host(ctxt, 1, 'bar'), False)
|
|
|
|
def test_instance_floating_address_get_all(self):
|
|
ctxt = context.get_admin_context()
|
|
|
|
instance1 = db.instance_create(ctxt, {'host': 'h1', 'hostname': 'n1'})
|
|
instance2 = db.instance_create(ctxt, {'host': 'h2', 'hostname': 'n2'})
|
|
|
|
fixed_addresses = ['1.1.1.1', '1.1.1.2', '1.1.1.3']
|
|
float_addresses = ['2.1.1.1', '2.1.1.2', '2.1.1.3']
|
|
instance_uuids = [instance1['uuid'], instance1['uuid'],
|
|
instance2['uuid']]
|
|
|
|
for fixed_addr, float_addr, instance_uuid in zip(fixed_addresses,
|
|
float_addresses,
|
|
instance_uuids):
|
|
db.fixed_ip_create(ctxt, {'address': fixed_addr,
|
|
'instance_uuid': instance_uuid})
|
|
fixed_id = db.fixed_ip_get_by_address(ctxt, fixed_addr)['id']
|
|
db.floating_ip_create(ctxt,
|
|
{'address': float_addr,
|
|
'fixed_ip_id': fixed_id})
|
|
|
|
real_float_addresses = \
|
|
db.instance_floating_address_get_all(ctxt, instance_uuids[0])
|
|
self.assertEqual(set(float_addresses[:2]), set(real_float_addresses))
|
|
real_float_addresses = \
|
|
db.instance_floating_address_get_all(ctxt, instance_uuids[2])
|
|
self.assertEqual(set([float_addresses[2]]), set(real_float_addresses))
|
|
|
|
def test_get_vol_mapping_non_admin(self):
|
|
ref = db.ec2_volume_create(self.context, 'fake-uuid')
|
|
ec2_id = db.get_ec2_volume_id_by_uuid(self.context, 'fake-uuid')
|
|
self.assertEqual(ref['id'], ec2_id)
|
|
|
|
def test_get_snap_mapping_non_admin(self):
|
|
ref = db.ec2_snapshot_create(self.context, 'fake-uuid')
|
|
ec2_id = db.get_ec2_snapshot_id_by_uuid(self.context, 'fake-uuid')
|
|
self.assertEqual(ref['id'], ec2_id)
|
|
|
|
def test_bw_usage_calls(self):
|
|
ctxt = context.get_admin_context()
|
|
now = timeutils.utcnow()
|
|
timeutils.set_time_override(now)
|
|
start_period = now - datetime.timedelta(seconds=10)
|
|
uuid3_refreshed = now - datetime.timedelta(seconds=5)
|
|
|
|
expected_bw_usages = [{'uuid': 'fake_uuid1',
|
|
'mac': 'fake_mac1',
|
|
'start_period': start_period,
|
|
'bw_in': 100,
|
|
'bw_out': 200,
|
|
'last_ctr_in': 12345,
|
|
'last_ctr_out': 67890,
|
|
'last_refreshed': now},
|
|
{'uuid': 'fake_uuid2',
|
|
'mac': 'fake_mac2',
|
|
'start_period': start_period,
|
|
'bw_in': 200,
|
|
'bw_out': 300,
|
|
'last_ctr_in': 22345,
|
|
'last_ctr_out': 77890,
|
|
'last_refreshed': now},
|
|
{'uuid': 'fake_uuid3',
|
|
'mac': 'fake_mac3',
|
|
'start_period': start_period,
|
|
'bw_in': 400,
|
|
'bw_out': 500,
|
|
'last_ctr_in': 32345,
|
|
'last_ctr_out': 87890,
|
|
'last_refreshed': uuid3_refreshed}]
|
|
|
|
def _compare(bw_usage, expected):
|
|
for key, value in expected.items():
|
|
self.assertEqual(bw_usage[key], value)
|
|
|
|
bw_usages = db.bw_usage_get_by_uuids(ctxt,
|
|
['fake_uuid1', 'fake_uuid2'], start_period)
|
|
# No matches
|
|
self.assertEqual(len(bw_usages), 0)
|
|
|
|
# Add 3 entries
|
|
db.bw_usage_update(ctxt, 'fake_uuid1',
|
|
'fake_mac1', start_period,
|
|
100, 200, 12345, 67890)
|
|
db.bw_usage_update(ctxt, 'fake_uuid2',
|
|
'fake_mac2', start_period,
|
|
100, 200, 42, 42)
|
|
# Test explicit refreshed time
|
|
db.bw_usage_update(ctxt, 'fake_uuid3',
|
|
'fake_mac3', start_period,
|
|
400, 500, 32345, 87890,
|
|
last_refreshed=uuid3_refreshed)
|
|
# Update 2nd entry
|
|
db.bw_usage_update(ctxt, 'fake_uuid2',
|
|
'fake_mac2', start_period,
|
|
200, 300, 22345, 77890)
|
|
|
|
bw_usages = db.bw_usage_get_by_uuids(ctxt,
|
|
['fake_uuid1', 'fake_uuid2', 'fake_uuid3'], start_period)
|
|
self.assertEqual(len(bw_usages), 3)
|
|
_compare(bw_usages[0], expected_bw_usages[0])
|
|
_compare(bw_usages[1], expected_bw_usages[1])
|
|
_compare(bw_usages[2], expected_bw_usages[2])
|
|
timeutils.clear_time_override()
|
|
|
|
|
|
def _get_fake_aggr_values():
|
|
return {'name': 'fake_aggregate'}
|
|
|
|
|
|
def _get_fake_aggr_metadata():
|
|
return {'fake_key1': 'fake_value1',
|
|
'fake_key2': 'fake_value2',
|
|
'availability_zone': 'fake_avail_zone'}
|
|
|
|
|
|
def _get_fake_aggr_hosts():
|
|
return ['foo.openstack.org']
|
|
|
|
|
|
def _create_aggregate(context=context.get_admin_context(),
|
|
values=_get_fake_aggr_values(),
|
|
metadata=_get_fake_aggr_metadata()):
|
|
return db.aggregate_create(context, values, metadata)
|
|
|
|
|
|
def _create_aggregate_with_hosts(context=context.get_admin_context(),
|
|
values=_get_fake_aggr_values(),
|
|
metadata=_get_fake_aggr_metadata(),
|
|
hosts=_get_fake_aggr_hosts()):
|
|
result = _create_aggregate(context=context,
|
|
values=values, metadata=metadata)
|
|
for host in hosts:
|
|
db.aggregate_host_add(context, result['id'], host)
|
|
return result
|
|
|
|
|
|
class NotDbApiTestCase(DbTestCase):
|
|
def setUp(self):
|
|
super(NotDbApiTestCase, self).setUp()
|
|
self.flags(sql_connection="notdb://")
|
|
|
|
def test_instance_get_all_by_filters_regex_unsupported_db(self):
|
|
# Ensure that the 'LIKE' operator is used for unsupported dbs.
|
|
self.create_instances_with_args(display_name='test1')
|
|
self.create_instances_with_args(display_name='test.*')
|
|
self.create_instances_with_args(display_name='diff')
|
|
result = db.instance_get_all_by_filters(self.context,
|
|
{'display_name': 'test.*'})
|
|
self.assertEqual(1, len(result))
|
|
result = db.instance_get_all_by_filters(self.context,
|
|
{'display_name': '%test%'})
|
|
self.assertEqual(2, len(result))
|
|
|
|
def test_instance_get_all_by_filters_paginate(self):
|
|
test1 = self.create_instances_with_args(display_name='test1')
|
|
test2 = self.create_instances_with_args(display_name='test2')
|
|
test3 = self.create_instances_with_args(display_name='test3')
|
|
|
|
result = db.instance_get_all_by_filters(self.context,
|
|
{'display_name': '%test%'},
|
|
marker=None)
|
|
self.assertEqual(3, len(result))
|
|
result = db.instance_get_all_by_filters(self.context,
|
|
{'display_name': '%test%'},
|
|
sort_dir="asc",
|
|
marker=test1['uuid'])
|
|
self.assertEqual(2, len(result))
|
|
result = db.instance_get_all_by_filters(self.context,
|
|
{'display_name': '%test%'},
|
|
sort_dir="asc",
|
|
marker=test2['uuid'])
|
|
self.assertEqual(1, len(result))
|
|
result = db.instance_get_all_by_filters(self.context,
|
|
{'display_name': '%test%'},
|
|
sort_dir="asc",
|
|
marker=test3['uuid'])
|
|
self.assertEqual(0, len(result))
|
|
|
|
self.assertRaises(exception.MarkerNotFound,
|
|
db.instance_get_all_by_filters,
|
|
self.context, {'display_name': '%test%'},
|
|
marker=str(stdlib_uuid.uuid4()))
|
|
|
|
|
|
class AggregateDBApiTestCase(test.TestCase):
|
|
def setUp(self):
|
|
super(AggregateDBApiTestCase, self).setUp()
|
|
self.user_id = 'fake'
|
|
self.project_id = 'fake'
|
|
self.context = context.RequestContext(self.user_id, self.project_id)
|
|
|
|
def test_aggregate_create_no_metadata(self):
|
|
result = _create_aggregate(metadata=None)
|
|
self.assertEquals(result['name'], 'fake_aggregate')
|
|
|
|
def test_aggregate_create_avoid_name_conflict(self):
|
|
r1 = _create_aggregate(metadata=None)
|
|
db.aggregate_delete(context.get_admin_context(), r1['id'])
|
|
values = {'name': r1['name']}
|
|
metadata = {'availability_zone': 'new_zone'}
|
|
r2 = _create_aggregate(values=values, metadata=metadata)
|
|
self.assertEqual(r2['name'], values['name'])
|
|
self.assertEqual(r2['availability_zone'],
|
|
metadata['availability_zone'])
|
|
|
|
def test_aggregate_create_raise_exist_exc(self):
|
|
_create_aggregate(metadata=None)
|
|
self.assertRaises(exception.AggregateNameExists,
|
|
_create_aggregate, metadata=None)
|
|
|
|
def test_aggregate_get_raise_not_found(self):
|
|
ctxt = context.get_admin_context()
|
|
# this does not exist!
|
|
aggregate_id = 1
|
|
self.assertRaises(exception.AggregateNotFound,
|
|
db.aggregate_get,
|
|
ctxt, aggregate_id)
|
|
|
|
def test_aggregate_metadata_get_raise_not_found(self):
|
|
ctxt = context.get_admin_context()
|
|
# this does not exist!
|
|
aggregate_id = 1
|
|
self.assertRaises(exception.AggregateNotFound,
|
|
db.aggregate_metadata_get,
|
|
ctxt, aggregate_id)
|
|
|
|
def test_aggregate_create_with_metadata(self):
|
|
ctxt = context.get_admin_context()
|
|
result = _create_aggregate(context=ctxt)
|
|
expected_metadata = db.aggregate_metadata_get(ctxt, result['id'])
|
|
self.assertThat(expected_metadata,
|
|
matchers.DictMatches(_get_fake_aggr_metadata()))
|
|
|
|
def test_aggregate_create_delete_create_with_metadata(self):
|
|
#test for bug 1052479
|
|
ctxt = context.get_admin_context()
|
|
result = _create_aggregate(context=ctxt)
|
|
expected_metadata = db.aggregate_metadata_get(ctxt, result['id'])
|
|
self.assertThat(expected_metadata,
|
|
matchers.DictMatches(_get_fake_aggr_metadata()))
|
|
db.aggregate_delete(ctxt, result['id'])
|
|
result = _create_aggregate(metadata={'availability_zone':
|
|
'fake_avail_zone'})
|
|
expected_metadata = db.aggregate_metadata_get(ctxt, result['id'])
|
|
self.assertEqual(expected_metadata, {'availability_zone':
|
|
'fake_avail_zone'})
|
|
|
|
def test_aggregate_create_low_privi_context(self):
|
|
self.assertRaises(exception.AdminRequired,
|
|
db.aggregate_create,
|
|
self.context, _get_fake_aggr_values())
|
|
|
|
def test_aggregate_get(self):
|
|
ctxt = context.get_admin_context()
|
|
result = _create_aggregate_with_hosts(context=ctxt)
|
|
expected = db.aggregate_get(ctxt, result['id'])
|
|
self.assertEqual(_get_fake_aggr_hosts(), expected['hosts'])
|
|
self.assertEqual(_get_fake_aggr_metadata(), expected['metadetails'])
|
|
|
|
def test_aggregate_get_by_host(self):
|
|
ctxt = context.get_admin_context()
|
|
values = {'name': 'fake_aggregate2'}
|
|
a1 = _create_aggregate_with_hosts(context=ctxt)
|
|
a2 = _create_aggregate_with_hosts(context=ctxt, values=values)
|
|
r1 = db.aggregate_get_by_host(ctxt, 'foo.openstack.org')
|
|
self.assertEqual([a1['id'], a2['id']], [x['id'] for x in r1])
|
|
|
|
def test_aggregate_get_by_host_with_key(self):
|
|
ctxt = context.get_admin_context()
|
|
values = {'name': 'fake_aggregate2'}
|
|
a1 = _create_aggregate_with_hosts(context=ctxt,
|
|
metadata={'goodkey': 'good'})
|
|
a2 = _create_aggregate_with_hosts(context=ctxt, values=values)
|
|
# filter result by key
|
|
r1 = db.aggregate_get_by_host(ctxt, 'foo.openstack.org', key='goodkey')
|
|
self.assertEqual([a1['id']], [x['id'] for x in r1])
|
|
|
|
def test_aggregate_metadata_get_by_host(self):
|
|
ctxt = context.get_admin_context()
|
|
values = {'name': 'fake_aggregate2'}
|
|
values2 = {'name': 'fake_aggregate3'}
|
|
a1 = _create_aggregate_with_hosts(context=ctxt)
|
|
a2 = _create_aggregate_with_hosts(context=ctxt, values=values)
|
|
a3 = _create_aggregate_with_hosts(context=ctxt, values=values2,
|
|
hosts=['bar.openstack.org'], metadata={'badkey': 'bad'})
|
|
r1 = db.aggregate_metadata_get_by_host(ctxt, 'foo.openstack.org')
|
|
self.assertEqual(r1['fake_key1'], set(['fake_value1']))
|
|
self.assertFalse('badkey' in r1)
|
|
|
|
def test_aggregate_metadata_get_by_host_with_key(self):
|
|
ctxt = context.get_admin_context()
|
|
values = {'name': 'fake_aggregate2'}
|
|
values2 = {'name': 'fake_aggregate3'}
|
|
a1 = _create_aggregate_with_hosts(context=ctxt)
|
|
a2 = _create_aggregate_with_hosts(context=ctxt, values=values)
|
|
a3 = _create_aggregate_with_hosts(context=ctxt, values=values2,
|
|
hosts=['foo.openstack.org'], metadata={'good': 'value'})
|
|
r1 = db.aggregate_metadata_get_by_host(ctxt, 'foo.openstack.org',
|
|
key='good')
|
|
self.assertEqual(r1['good'], set(['value']))
|
|
self.assertFalse('fake_key1' in r1)
|
|
# Delete metadata
|
|
db.aggregate_metadata_delete(ctxt, a3['id'], 'good')
|
|
r2 = db.aggregate_metadata_get_by_host(ctxt, 'foo.openstack.org',
|
|
key='good')
|
|
self.assertFalse('good' in r2)
|
|
|
|
def test_aggregate_host_get_by_metadata_key(self):
|
|
ctxt = context.get_admin_context()
|
|
values = {'name': 'fake_aggregate2'}
|
|
values2 = {'name': 'fake_aggregate3'}
|
|
a1 = _create_aggregate_with_hosts(context=ctxt)
|
|
a2 = _create_aggregate_with_hosts(context=ctxt, values=values)
|
|
a3 = _create_aggregate_with_hosts(context=ctxt, values=values2,
|
|
hosts=['foo.openstack.org'], metadata={'good': 'value'})
|
|
r1 = db.aggregate_host_get_by_metadata_key(ctxt, key='good')
|
|
self.assertEqual(r1, {'foo.openstack.org': set(['value'])})
|
|
self.assertFalse('fake_key1' in r1)
|
|
|
|
def test_aggregate_get_by_host_not_found(self):
|
|
ctxt = context.get_admin_context()
|
|
_create_aggregate_with_hosts(context=ctxt)
|
|
self.assertEqual([], db.aggregate_get_by_host(ctxt, 'unknown_host'))
|
|
|
|
def test_aggregate_delete_raise_not_found(self):
|
|
ctxt = context.get_admin_context()
|
|
# this does not exist!
|
|
aggregate_id = 1
|
|
self.assertRaises(exception.AggregateNotFound,
|
|
db.aggregate_delete,
|
|
ctxt, aggregate_id)
|
|
|
|
def test_aggregate_delete(self):
|
|
ctxt = context.get_admin_context()
|
|
result = _create_aggregate(context=ctxt, metadata=None)
|
|
db.aggregate_delete(ctxt, result['id'])
|
|
expected = db.aggregate_get_all(ctxt)
|
|
self.assertEqual(0, len(expected))
|
|
aggregate = db.aggregate_get(ctxt.elevated(read_deleted='yes'),
|
|
result['id'])
|
|
self.assertEqual(aggregate['deleted'], True)
|
|
|
|
def test_aggregate_update(self):
|
|
ctxt = context.get_admin_context()
|
|
result = _create_aggregate(context=ctxt, metadata={'availability_zone':
|
|
'fake_avail_zone'})
|
|
self.assertEqual(result['availability_zone'], 'fake_avail_zone')
|
|
new_values = _get_fake_aggr_values()
|
|
new_values['availability_zone'] = 'different_avail_zone'
|
|
updated = db.aggregate_update(ctxt, 1, new_values)
|
|
self.assertNotEqual(result['availability_zone'],
|
|
updated['availability_zone'])
|
|
|
|
def test_aggregate_update_with_metadata(self):
|
|
ctxt = context.get_admin_context()
|
|
result = _create_aggregate(context=ctxt, metadata=None)
|
|
values = _get_fake_aggr_values()
|
|
values['metadata'] = _get_fake_aggr_metadata()
|
|
values['availability_zone'] = 'different_avail_zone'
|
|
db.aggregate_update(ctxt, 1, values)
|
|
expected = db.aggregate_metadata_get(ctxt, result['id'])
|
|
updated = db.aggregate_get(ctxt, result['id'])
|
|
self.assertThat(values['metadata'],
|
|
matchers.DictMatches(expected))
|
|
self.assertNotEqual(result['availability_zone'],
|
|
updated['availability_zone'])
|
|
|
|
def test_aggregate_update_with_existing_metadata(self):
|
|
ctxt = context.get_admin_context()
|
|
result = _create_aggregate(context=ctxt)
|
|
values = _get_fake_aggr_values()
|
|
values['metadata'] = _get_fake_aggr_metadata()
|
|
values['metadata']['fake_key1'] = 'foo'
|
|
db.aggregate_update(ctxt, 1, values)
|
|
expected = db.aggregate_metadata_get(ctxt, result['id'])
|
|
self.assertThat(values['metadata'], matchers.DictMatches(expected))
|
|
|
|
def test_aggregate_update_raise_not_found(self):
|
|
ctxt = context.get_admin_context()
|
|
# this does not exist!
|
|
aggregate_id = 1
|
|
new_values = _get_fake_aggr_values()
|
|
self.assertRaises(exception.AggregateNotFound,
|
|
db.aggregate_update, ctxt, aggregate_id, new_values)
|
|
|
|
def test_aggregate_get_all(self):
|
|
ctxt = context.get_admin_context()
|
|
counter = 3
|
|
for c in xrange(counter):
|
|
_create_aggregate(context=ctxt,
|
|
values={'name': 'fake_aggregate_%d' % c},
|
|
metadata=None)
|
|
results = db.aggregate_get_all(ctxt)
|
|
self.assertEqual(len(results), counter)
|
|
|
|
def test_aggregate_get_all_non_deleted(self):
|
|
ctxt = context.get_admin_context()
|
|
add_counter = 5
|
|
remove_counter = 2
|
|
aggregates = []
|
|
for c in xrange(1, add_counter):
|
|
values = {'name': 'fake_aggregate_%d' % c}
|
|
aggregates.append(_create_aggregate(context=ctxt,
|
|
values=values, metadata=None))
|
|
for c in xrange(1, remove_counter):
|
|
db.aggregate_delete(ctxt, aggregates[c - 1]['id'])
|
|
results = db.aggregate_get_all(ctxt)
|
|
self.assertEqual(len(results), add_counter - remove_counter)
|
|
|
|
def test_aggregate_metadata_add(self):
|
|
ctxt = context.get_admin_context()
|
|
result = _create_aggregate(context=ctxt, metadata=None)
|
|
metadata = _get_fake_aggr_metadata()
|
|
db.aggregate_metadata_add(ctxt, result['id'], metadata)
|
|
expected = db.aggregate_metadata_get(ctxt, result['id'])
|
|
self.assertThat(metadata, matchers.DictMatches(expected))
|
|
|
|
def test_aggregate_metadata_update(self):
|
|
ctxt = context.get_admin_context()
|
|
result = _create_aggregate(context=ctxt)
|
|
metadata = _get_fake_aggr_metadata()
|
|
key = metadata.keys()[0]
|
|
db.aggregate_metadata_delete(ctxt, result['id'], key)
|
|
new_metadata = {key: 'foo'}
|
|
db.aggregate_metadata_add(ctxt, result['id'], new_metadata)
|
|
expected = db.aggregate_metadata_get(ctxt, result['id'])
|
|
metadata[key] = 'foo'
|
|
self.assertThat(metadata, matchers.DictMatches(expected))
|
|
|
|
def test_aggregate_metadata_delete(self):
|
|
ctxt = context.get_admin_context()
|
|
result = _create_aggregate(context=ctxt, metadata=None)
|
|
metadata = _get_fake_aggr_metadata()
|
|
db.aggregate_metadata_add(ctxt, result['id'], metadata)
|
|
db.aggregate_metadata_delete(ctxt, result['id'], metadata.keys()[0])
|
|
expected = db.aggregate_metadata_get(ctxt, result['id'])
|
|
del metadata[metadata.keys()[0]]
|
|
self.assertThat(metadata, matchers.DictMatches(expected))
|
|
|
|
def test_aggregate_remove_availability_zone(self):
|
|
ctxt = context.get_admin_context()
|
|
result = _create_aggregate(context=ctxt, metadata={'availability_zone':
|
|
'fake_avail_zone'})
|
|
db.aggregate_metadata_delete(ctxt, result['id'], 'availability_zone')
|
|
expected = db.aggregate_metadata_get(ctxt, result['id'])
|
|
aggregate = db.aggregate_get(ctxt, result['id'])
|
|
self.assertEquals(aggregate['availability_zone'], None)
|
|
self.assertThat({}, matchers.DictMatches(expected))
|
|
|
|
def test_aggregate_metadata_delete_raise_not_found(self):
|
|
ctxt = context.get_admin_context()
|
|
result = _create_aggregate(context=ctxt)
|
|
self.assertRaises(exception.AggregateMetadataNotFound,
|
|
db.aggregate_metadata_delete,
|
|
ctxt, result['id'], 'foo_key')
|
|
|
|
def test_aggregate_host_add(self):
|
|
ctxt = context.get_admin_context()
|
|
result = _create_aggregate_with_hosts(context=ctxt, metadata=None)
|
|
expected = db.aggregate_host_get_all(ctxt, result['id'])
|
|
self.assertEqual(_get_fake_aggr_hosts(), expected)
|
|
|
|
def test_aggregate_host_re_add(self):
|
|
ctxt = context.get_admin_context()
|
|
result = _create_aggregate_with_hosts(context=ctxt, metadata=None)
|
|
host = _get_fake_aggr_hosts()[0]
|
|
db.aggregate_host_delete(ctxt, result['id'], host)
|
|
db.aggregate_host_add(ctxt, result['id'], host)
|
|
expected = db.aggregate_host_get_all(ctxt, result['id'])
|
|
self.assertEqual(len(expected), 1)
|
|
|
|
def test_aggregate_host_add_duplicate_works(self):
|
|
ctxt = context.get_admin_context()
|
|
r1 = _create_aggregate_with_hosts(context=ctxt, metadata=None)
|
|
r2 = _create_aggregate_with_hosts(ctxt,
|
|
values={'name': 'fake_aggregate2'},
|
|
metadata={'availability_zone': 'fake_avail_zone2'})
|
|
h1 = db.aggregate_host_get_all(ctxt, r1['id'])
|
|
h2 = db.aggregate_host_get_all(ctxt, r2['id'])
|
|
self.assertEqual(h1, h2)
|
|
|
|
def test_aggregate_host_add_duplicate_raise_exist_exc(self):
|
|
ctxt = context.get_admin_context()
|
|
result = _create_aggregate_with_hosts(context=ctxt, metadata=None)
|
|
self.assertRaises(exception.AggregateHostExists,
|
|
db.aggregate_host_add,
|
|
ctxt, result['id'], _get_fake_aggr_hosts()[0])
|
|
|
|
def test_aggregate_host_add_raise_not_found(self):
|
|
ctxt = context.get_admin_context()
|
|
# this does not exist!
|
|
aggregate_id = 1
|
|
host = _get_fake_aggr_hosts()[0]
|
|
self.assertRaises(exception.AggregateNotFound,
|
|
db.aggregate_host_add,
|
|
ctxt, aggregate_id, host)
|
|
|
|
def test_aggregate_host_delete(self):
|
|
ctxt = context.get_admin_context()
|
|
result = _create_aggregate_with_hosts(context=ctxt, metadata=None)
|
|
db.aggregate_host_delete(ctxt, result['id'],
|
|
_get_fake_aggr_hosts()[0])
|
|
expected = db.aggregate_host_get_all(ctxt, result['id'])
|
|
self.assertEqual(0, len(expected))
|
|
|
|
def test_aggregate_host_delete_raise_not_found(self):
|
|
ctxt = context.get_admin_context()
|
|
result = _create_aggregate(context=ctxt)
|
|
self.assertRaises(exception.AggregateHostNotFound,
|
|
db.aggregate_host_delete,
|
|
ctxt, result['id'], _get_fake_aggr_hosts()[0])
|
|
|
|
|
|
class SqlAlchemyDbApiTestCase(DbTestCase):
|
|
def test_instance_get_all_by_host(self):
|
|
ctxt = context.get_admin_context()
|
|
|
|
self.create_instances_with_args()
|
|
self.create_instances_with_args()
|
|
self.create_instances_with_args(host='host2')
|
|
result = sqlalchemy_api._instance_get_all_uuids_by_host(ctxt, 'host1')
|
|
self.assertEqual(2, len(result))
|
|
|
|
def test_instance_get_all_uuids_by_host(self):
|
|
ctxt = context.get_admin_context()
|
|
self.create_instances_with_args()
|
|
self.create_instances_with_args()
|
|
self.create_instances_with_args(host='host2')
|
|
result = sqlalchemy_api._instance_get_all_uuids_by_host(ctxt, 'host1')
|
|
self.assertEqual(2, len(result))
|
|
self.assertEqual(types.UnicodeType, type(result[0]))
|
|
|
|
|
|
class CapacityTestCase(test.TestCase):
|
|
def setUp(self):
|
|
super(CapacityTestCase, self).setUp()
|
|
|
|
self.ctxt = context.get_admin_context()
|
|
|
|
service_dict = dict(host='host1', binary='binary1',
|
|
topic='compute', report_count=1,
|
|
disabled=False)
|
|
self.service = db.service_create(self.ctxt, service_dict)
|
|
|
|
self.compute_node_dict = dict(vcpus=2, memory_mb=1024, local_gb=2048,
|
|
vcpus_used=0, memory_mb_used=0,
|
|
local_gb_used=0, free_ram_mb=1024,
|
|
free_disk_gb=2048, hypervisor_type="xen",
|
|
hypervisor_version=1, cpu_info="",
|
|
running_vms=0, current_workload=0,
|
|
service_id=self.service['id'])
|
|
# add some random stats
|
|
stats = dict(num_instances=3, num_proj_12345=2,
|
|
num_proj_23456=2, num_vm_building=3)
|
|
self.compute_node_dict['stats'] = stats
|
|
|
|
self.flags(reserved_host_memory_mb=0)
|
|
self.flags(reserved_host_disk_mb=0)
|
|
|
|
def _create_helper(self, host):
|
|
self.compute_node_dict['host'] = host
|
|
return db.compute_node_create(self.ctxt, self.compute_node_dict)
|
|
|
|
def _stats_as_dict(self, stats):
|
|
d = {}
|
|
for s in stats:
|
|
key = s['key']
|
|
d[key] = s['value']
|
|
return d
|
|
|
|
def test_compute_node_create(self):
|
|
item = self._create_helper('host1')
|
|
self.assertEquals(item['free_ram_mb'], 1024)
|
|
self.assertEquals(item['free_disk_gb'], 2048)
|
|
self.assertEquals(item['running_vms'], 0)
|
|
self.assertEquals(item['current_workload'], 0)
|
|
|
|
stats = self._stats_as_dict(item['stats'])
|
|
self.assertEqual(3, stats['num_instances'])
|
|
self.assertEqual(2, stats['num_proj_12345'])
|
|
self.assertEqual(3, stats['num_vm_building'])
|
|
|
|
def test_compute_node_get_all(self):
|
|
item = self._create_helper('host1')
|
|
nodes = db.compute_node_get_all(self.ctxt)
|
|
self.assertEqual(1, len(nodes))
|
|
|
|
node = nodes[0]
|
|
self.assertEqual(2, node['vcpus'])
|
|
|
|
stats = self._stats_as_dict(node['stats'])
|
|
self.assertEqual(3, int(stats['num_instances']))
|
|
self.assertEqual(2, int(stats['num_proj_12345']))
|
|
self.assertEqual(3, int(stats['num_vm_building']))
|
|
|
|
def test_compute_node_update(self):
|
|
item = self._create_helper('host1')
|
|
|
|
compute_node_id = item['id']
|
|
stats = self._stats_as_dict(item['stats'])
|
|
|
|
# change some values:
|
|
stats['num_instances'] = 8
|
|
stats['num_tribbles'] = 1
|
|
values = {
|
|
'vcpus': 4,
|
|
'stats': stats,
|
|
}
|
|
item = db.compute_node_update(self.ctxt, compute_node_id, values)
|
|
stats = self._stats_as_dict(item['stats'])
|
|
|
|
self.assertEqual(4, item['vcpus'])
|
|
self.assertEqual(8, int(stats['num_instances']))
|
|
self.assertEqual(2, int(stats['num_proj_12345']))
|
|
self.assertEqual(1, int(stats['num_tribbles']))
|
|
|
|
def test_compute_node_update_always_updates_updated_at(self):
|
|
item = self._create_helper('host1')
|
|
item_updated = db.compute_node_update(self.ctxt,
|
|
item['id'], {})
|
|
self.assertNotEqual(item['updated_at'], item_updated['updated_at'])
|
|
|
|
def test_compute_node_stat_prune(self):
|
|
item = self._create_helper('host1')
|
|
for stat in item['stats']:
|
|
if stat['key'] == 'num_instances':
|
|
num_instance_stat = stat
|
|
break
|
|
|
|
values = {
|
|
'stats': dict(num_instances=1)
|
|
}
|
|
db.compute_node_update(self.ctxt, item['id'], values, prune_stats=True)
|
|
item = db.compute_node_get_all(self.ctxt)[0]
|
|
self.assertEqual(1, len(item['stats']))
|
|
|
|
stat = item['stats'][0]
|
|
self.assertEqual(num_instance_stat['id'], stat['id'])
|
|
self.assertEqual(num_instance_stat['key'], stat['key'])
|
|
self.assertEqual(1, int(stat['value']))
|
|
|
|
|
|
class MigrationTestCase(test.TestCase):
|
|
|
|
def setUp(self):
|
|
super(MigrationTestCase, self).setUp()
|
|
self.ctxt = context.get_admin_context()
|
|
|
|
self._create()
|
|
self._create()
|
|
self._create(status='reverted')
|
|
self._create(status='confirmed')
|
|
self._create(source_compute='host2', source_node='b',
|
|
dest_compute='host1', dest_node='a')
|
|
self._create(source_compute='host2', dest_compute='host3')
|
|
self._create(source_compute='host3', dest_compute='host4')
|
|
|
|
def _create(self, status='migrating', source_compute='host1',
|
|
source_node='a', dest_compute='host2', dest_node='b',
|
|
system_metadata=None):
|
|
|
|
values = {'host': source_compute}
|
|
instance = db.instance_create(self.ctxt, values)
|
|
if system_metadata:
|
|
db.instance_system_metadata_update(self.ctxt, instance['uuid'],
|
|
system_metadata, False)
|
|
|
|
values = {'status': status, 'source_compute': source_compute,
|
|
'source_node': source_node, 'dest_compute': dest_compute,
|
|
'dest_node': dest_node, 'instance_uuid': instance['uuid']}
|
|
db.migration_create(self.ctxt, values)
|
|
|
|
def _assert_in_progress(self, migrations):
|
|
for migration in migrations:
|
|
self.assertNotEqual('confirmed', migration['status'])
|
|
self.assertNotEqual('reverted', migration['status'])
|
|
|
|
def test_migration_get_in_progress_joins(self):
|
|
self._create(source_compute='foo', system_metadata={'foo': 'bar'})
|
|
migrations = db.migration_get_in_progress_by_host_and_node(self.ctxt,
|
|
'foo', 'a')
|
|
system_metadata = migrations[0]['instance']['system_metadata'][0]
|
|
self.assertEqual(system_metadata['key'], 'foo')
|
|
self.assertEqual(system_metadata['value'], 'bar')
|
|
|
|
def test_in_progress_host1_nodea(self):
|
|
migrations = db.migration_get_in_progress_by_host_and_node(self.ctxt,
|
|
'host1', 'a')
|
|
# 2 as source + 1 as dest
|
|
self.assertEqual(3, len(migrations))
|
|
self._assert_in_progress(migrations)
|
|
|
|
def test_in_progress_host1_nodeb(self):
|
|
migrations = db.migration_get_in_progress_by_host_and_node(self.ctxt,
|
|
'host1', 'b')
|
|
# some migrations are to/from host1, but none with a node 'b'
|
|
self.assertEqual(0, len(migrations))
|
|
|
|
def test_in_progress_host2_nodeb(self):
|
|
migrations = db.migration_get_in_progress_by_host_and_node(self.ctxt,
|
|
'host2', 'b')
|
|
# 2 as dest, 1 as source
|
|
self.assertEqual(3, len(migrations))
|
|
self._assert_in_progress(migrations)
|
|
|
|
def test_instance_join(self):
|
|
migrations = db.migration_get_in_progress_by_host_and_node(self.ctxt,
|
|
'host2', 'b')
|
|
for migration in migrations:
|
|
instance = migration['instance']
|
|
self.assertEqual(migration['instance_uuid'], instance['uuid'])
|
|
|
|
|
|
class ModelsObjectComparatorMixin(object):
|
|
def _dict_from_object(self, obj, ignored_keys):
|
|
if ignored_keys is None:
|
|
ignored_keys = []
|
|
return dict([(k, v) for k, v in obj.iteritems()
|
|
if k not in ignored_keys])
|
|
|
|
def _assertEqualObjects(self, obj1, obj2, ignored_keys=None):
|
|
obj1 = self._dict_from_object(obj1, ignored_keys)
|
|
obj2 = self._dict_from_object(obj2, ignored_keys)
|
|
|
|
self.assertEqual(len(obj1), len(obj2))
|
|
for key, value in obj1.iteritems():
|
|
self.assertEqual(value, obj2[key])
|
|
|
|
def _assertEqualListsOfObjects(self, objs1, objs2, ignored_keys=None):
|
|
self.assertEqual(len(objs1), len(objs2))
|
|
objs2 = dict([(o['id'], o) for o in objs2])
|
|
for o1 in objs1:
|
|
self._assertEqualObjects(o1, objs2[o1['id']], ignored_keys)
|
|
|
|
def _assertEqualListsOfPrimitivesAsSets(self, primitives1, primitives2):
|
|
self.assertEqual(len(primitives1), len(primitives2))
|
|
for primitive in primitives1:
|
|
self.assertIn(primitive, primitives2)
|
|
|
|
for primitive in primitives2:
|
|
self.assertIn(primitive, primitives1)
|
|
|
|
|
|
class ServiceTestCase(test.TestCase, ModelsObjectComparatorMixin):
|
|
def setUp(self):
|
|
super(ServiceTestCase, self).setUp()
|
|
self.ctxt = context.get_admin_context()
|
|
|
|
def _get_base_values(self):
|
|
return {
|
|
'host': 'fake_host',
|
|
'binary': 'fake_binary',
|
|
'topic': 'fake_topic',
|
|
'report_count': 3,
|
|
'disabled': False
|
|
}
|
|
|
|
def _create_service(self, values):
|
|
v = self._get_base_values()
|
|
v.update(values)
|
|
return db.service_create(self.ctxt, v)
|
|
|
|
def test_service_create(self):
|
|
service = self._create_service({})
|
|
self.assertFalse(service['id'] is None)
|
|
for key, value in self._get_base_values().iteritems():
|
|
self.assertEqual(value, service[key])
|
|
|
|
def test_service_destroy(self):
|
|
service1 = self._create_service({})
|
|
service2 = self._create_service({'host': 'fake_host2'})
|
|
|
|
db.service_destroy(self.ctxt, service1['id'])
|
|
self.assertRaises(exception.ServiceNotFound,
|
|
db.service_get, self.ctxt, service1['id'])
|
|
self._assertEqualObjects(db.service_get(self.ctxt, service2['id']),
|
|
service2, ignored_keys=['compute_node'])
|
|
|
|
def test_service_update(self):
|
|
service = self._create_service({})
|
|
new_values = {
|
|
'host': 'fake_host1',
|
|
'binary': 'fake_binary1',
|
|
'topic': 'fake_topic1',
|
|
'report_count': 4,
|
|
'disabled': True
|
|
}
|
|
db.service_update(self.ctxt, service['id'], new_values)
|
|
updated_service = db.service_get(self.ctxt, service['id'])
|
|
for key, value in new_values.iteritems():
|
|
self.assertEqual(value, updated_service[key])
|
|
|
|
def test_service_update_not_found_exception(self):
|
|
self.assertRaises(exception.ServiceNotFound,
|
|
db.service_update, self.ctxt, 100500, {})
|
|
|
|
def test_service_get(self):
|
|
service1 = self._create_service({})
|
|
service2 = self._create_service({'host': 'some_other_fake_host'})
|
|
real_service1 = db.service_get(self.ctxt, service1['id'])
|
|
self._assertEqualObjects(service1, real_service1,
|
|
ignored_keys=['compute_node'])
|
|
|
|
def test_service_get_with_compute_node(self):
|
|
service = self._create_service({})
|
|
compute_values = dict(vcpus=2, memory_mb=1024, local_gb=2048,
|
|
vcpus_used=0, memory_mb_used=0,
|
|
local_gb_used=0, free_ram_mb=1024,
|
|
free_disk_gb=2048, hypervisor_type="xen",
|
|
hypervisor_version=1, cpu_info="",
|
|
running_vms=0, current_workload=0,
|
|
service_id=service['id'])
|
|
compute = db.compute_node_create(self.ctxt, compute_values)
|
|
real_service = db.service_get(self.ctxt, service['id'])
|
|
real_compute = real_service['compute_node'][0]
|
|
self.assertEqual(compute['id'], real_compute['id'])
|
|
|
|
def test_service_get_not_found_exception(self):
|
|
self.assertRaises(exception.ServiceNotFound,
|
|
db.service_get, self.ctxt, 100500)
|
|
|
|
def test_service_get_by_host_and_topic(self):
|
|
service1 = self._create_service({'host': 'host1', 'topic': 'topic1'})
|
|
service2 = self._create_service({'host': 'host2', 'topic': 'topic2'})
|
|
|
|
real_service1 = db.service_get_by_host_and_topic(self.ctxt,
|
|
host='host1',
|
|
topic='topic1')
|
|
self._assertEqualObjects(service1, real_service1)
|
|
|
|
def test_service_get_all(self):
|
|
values = [
|
|
{'host': 'host1', 'topic': 'topic1'},
|
|
{'host': 'host2', 'topic': 'topic2'},
|
|
{'disabled': True}
|
|
]
|
|
services = [self._create_service(vals) for vals in values]
|
|
disabled_services = [services[-1]]
|
|
non_disabled_services = services[:-1]
|
|
|
|
compares = [
|
|
(services, db.service_get_all(self.ctxt)),
|
|
(disabled_services, db.service_get_all(self.ctxt, True)),
|
|
(non_disabled_services, db.service_get_all(self.ctxt, False))
|
|
]
|
|
for comp in compares:
|
|
self._assertEqualListsOfObjects(*comp)
|
|
|
|
def test_service_get_all_by_topic(self):
|
|
values = [
|
|
{'host': 'host1', 'topic': 't1'},
|
|
{'host': 'host2', 'topic': 't1'},
|
|
{'disabled': True, 'topic': 't1'},
|
|
{'host': 'host3', 'topic': 't2'}
|
|
]
|
|
services = [self._create_service(vals) for vals in values]
|
|
expected = services[:2]
|
|
real = db.service_get_all_by_topic(self.ctxt, 't1')
|
|
self._assertEqualListsOfObjects(expected, real)
|
|
|
|
def test_service_get_all_by_host(self):
|
|
values = [
|
|
{'host': 'host1', 'topic': 't1'},
|
|
{'host': 'host1', 'topic': 't1'},
|
|
{'host': 'host2', 'topic': 't1'},
|
|
{'host': 'host3', 'topic': 't2'}
|
|
]
|
|
services = [self._create_service(vals) for vals in values]
|
|
|
|
expected = services[:2]
|
|
real = db.service_get_all_by_host(self.ctxt, 'host1')
|
|
self._assertEqualListsOfObjects(expected, real)
|
|
|
|
def test_service_get_by_compute_host(self):
|
|
values = [
|
|
{'host': 'host1', 'topic': CONF.compute_topic},
|
|
{'host': 'host2', 'topic': 't1'},
|
|
{'host': 'host3', 'topic': CONF.compute_topic}
|
|
]
|
|
services = [self._create_service(vals) for vals in values]
|
|
|
|
real_service = db.service_get_by_compute_host(self.ctxt, 'host1')
|
|
self._assertEqualObjects(services[0], real_service,
|
|
ignored_keys=['compute_node'])
|
|
|
|
self.assertRaises(exception.ComputeHostNotFound,
|
|
db.service_get_by_compute_host,
|
|
self.ctxt, 'non-exists-host')
|
|
|
|
def test_service_get_by_compute_host_not_found(self):
|
|
self.assertRaises(exception.ComputeHostNotFound,
|
|
db.service_get_by_compute_host,
|
|
self.ctxt, 'non-exists-host')
|
|
|
|
def test_service_get_by_args(self):
|
|
values = [
|
|
{'host': 'host1', 'binary': 'a'},
|
|
{'host': 'host2', 'binary': 'b'}
|
|
]
|
|
services = [self._create_service(vals) for vals in values]
|
|
|
|
service1 = db.service_get_by_args(self.ctxt, 'host1', 'a')
|
|
self._assertEqualObjects(services[0], service1)
|
|
|
|
service2 = db.service_get_by_args(self.ctxt, 'host2', 'b')
|
|
self._assertEqualObjects(services[1], service2)
|
|
|
|
def test_service_get_by_args_not_found_exception(self):
|
|
self.assertRaises(exception.HostBinaryNotFound,
|
|
db.service_get_by_args,
|
|
self.ctxt, 'non-exists-host', 'a')
|
|
|
|
|
|
class BaseInstanceTypeTestCase(test.TestCase, ModelsObjectComparatorMixin):
|
|
def setUp(self):
|
|
super(BaseInstanceTypeTestCase, self).setUp()
|
|
self.ctxt = context.get_admin_context()
|
|
|
|
def _get_base_values(self):
|
|
return {
|
|
'name': 'fake_name',
|
|
'memory_mb': 512,
|
|
'vcpus': 1,
|
|
'root_gb': 10,
|
|
'ephemeral_gb': 10,
|
|
'flavorid': 'fake_flavor',
|
|
'swap': 0,
|
|
'rxtx_factor': 0.5,
|
|
'vcpu_weight': 1,
|
|
'disabled': False,
|
|
'is_public': True
|
|
}
|
|
|
|
def _create_inst_type(self, values):
|
|
v = self._get_base_values()
|
|
v.update(values)
|
|
return db.instance_type_create(self.ctxt, v)
|
|
|
|
|
|
class InstanceTypeTestCase(BaseInstanceTypeTestCase):
|
|
|
|
def test_instance_type_create(self):
|
|
inst_type = self._create_inst_type({})
|
|
ignored_keys = ['id', 'deleted', 'deleted_at', 'updated_at',
|
|
'created_at', 'extra_specs']
|
|
|
|
self.assertFalse(inst_type['id'] is None)
|
|
self._assertEqualObjects(inst_type, self._get_base_values(),
|
|
ignored_keys)
|
|
|
|
def test_instance_type_destroy(self):
|
|
specs1 = {'a': '1', 'b': '2'}
|
|
inst_type1 = self._create_inst_type({'name': 'name1', 'flavorid': 'a1',
|
|
'extra_specs': specs1})
|
|
specs2 = {'c': '4', 'd': '3'}
|
|
inst_type2 = self._create_inst_type({'name': 'name2', 'flavorid': 'a2',
|
|
'extra_specs': specs2})
|
|
|
|
db.instance_type_destroy(self.ctxt, 'name1')
|
|
|
|
self.assertRaises(exception.InstanceTypeNotFound,
|
|
db.instance_type_get, self.ctxt, inst_type1['id'])
|
|
real_specs1 = db.instance_type_extra_specs_get(self.ctxt,
|
|
inst_type1['flavorid'])
|
|
self._assertEqualObjects(real_specs1, {})
|
|
|
|
r_inst_type2 = db.instance_type_get(self.ctxt, inst_type2['id'])
|
|
self._assertEqualObjects(inst_type2, r_inst_type2, 'extra_specs')
|
|
|
|
def test_instance_type_destroy_not_found(self):
|
|
self.assertRaises(exception.InstanceTypeNotFound,
|
|
db.instance_type_destroy, self.ctxt, 'nonexists')
|
|
|
|
def test_instance_type_create_duplicate_name(self):
|
|
self._create_inst_type({})
|
|
self.assertRaises(exception.InstanceTypeExists,
|
|
self._create_inst_type,
|
|
{'flavorid': 'some_random_flavor'})
|
|
|
|
def test_instance_type_create_duplicate_flavorid(self):
|
|
self._create_inst_type({})
|
|
self.assertRaises(exception.InstanceTypeIdExists,
|
|
self._create_inst_type,
|
|
{'name': 'some_random_name'})
|
|
|
|
def test_instance_type_create_with_extra_specs(self):
|
|
extra_specs = dict(a='abc', b='def', c='ghi')
|
|
inst_type = self._create_inst_type({'extra_specs': extra_specs})
|
|
ignored_keys = ['id', 'deleted', 'deleted_at', 'updated_at',
|
|
'created_at', 'extra_specs']
|
|
|
|
self._assertEqualObjects(inst_type, self._get_base_values(),
|
|
ignored_keys)
|
|
self._assertEqualObjects(extra_specs, inst_type['extra_specs'])
|
|
|
|
def test_instance_type_get_all(self):
|
|
# NOTE(boris-42): Remove base instance types
|
|
for it in db.instance_type_get_all(self.ctxt):
|
|
db.instance_type_destroy(self.ctxt, it['name'])
|
|
|
|
instance_types = [
|
|
{'root_gb': 600, 'memory_mb': 100, 'disabled': True,
|
|
'is_public': True, 'name': 'a1', 'flavorid': 'f1'},
|
|
{'root_gb': 500, 'memory_mb': 200, 'disabled': True,
|
|
'is_public': True, 'name': 'a2', 'flavorid': 'f2'},
|
|
{'root_gb': 400, 'memory_mb': 300, 'disabled': False,
|
|
'is_public': True, 'name': 'a3', 'flavorid': 'f3'},
|
|
{'root_gb': 300, 'memory_mb': 400, 'disabled': False,
|
|
'is_public': False, 'name': 'a4', 'flavorid': 'f4'},
|
|
{'root_gb': 200, 'memory_mb': 500, 'disabled': True,
|
|
'is_public': False, 'name': 'a5', 'flavorid': 'f5'},
|
|
{'root_gb': 100, 'memory_mb': 600, 'disabled': True,
|
|
'is_public': False, 'name': 'a6', 'flavorid': 'f6'}
|
|
]
|
|
instance_types = [self._create_inst_type(it) for it in instance_types]
|
|
|
|
lambda_filters = {
|
|
'min_memory_mb': lambda it, v: it['memory_mb'] >= v,
|
|
'min_root_gb': lambda it, v: it['root_gb'] >= v,
|
|
'disabled': lambda it, v: it['disabled'] == v,
|
|
'is_public': lambda it, v: (v is None or it['is_public'] == v)
|
|
}
|
|
|
|
mem_filts = [{'min_memory_mb': x} for x in [100, 350, 550, 650]]
|
|
root_filts = [{'min_root_gb': x} for x in [100, 350, 550, 650]]
|
|
disabled_filts = [{'disabled': x} for x in [True, False]]
|
|
is_public_filts = [{'is_public': x} for x in [True, False, None]]
|
|
|
|
def assert_multi_filter_instance_type_get(filters=None):
|
|
if filters is None:
|
|
filters = {}
|
|
|
|
expected_it = instance_types
|
|
for name, value in filters.iteritems():
|
|
filt = lambda it: lambda_filters[name](it, value)
|
|
expected_it = filter(filt, expected_it)
|
|
|
|
real_it = db.instance_type_get_all(self.ctxt, filters=filters)
|
|
self._assertEqualListsOfObjects(expected_it, real_it)
|
|
|
|
#no filter
|
|
assert_multi_filter_instance_type_get()
|
|
|
|
#test only with one filter
|
|
for filt in mem_filts:
|
|
assert_multi_filter_instance_type_get(filt)
|
|
for filt in root_filts:
|
|
assert_multi_filter_instance_type_get(filt)
|
|
for filt in disabled_filts:
|
|
assert_multi_filter_instance_type_get(filt)
|
|
for filt in is_public_filts:
|
|
assert_multi_filter_instance_type_get(filt)
|
|
|
|
#test all filters together
|
|
for mem in mem_filts:
|
|
for root in root_filts:
|
|
for disabled in disabled_filts:
|
|
for is_public in is_public_filts:
|
|
filts = [f.items() for f in
|
|
[mem, root, disabled, is_public]]
|
|
filts = dict(reduce(lambda x, y: x + y, filts, []))
|
|
assert_multi_filter_instance_type_get(filts)
|
|
|
|
def test_instance_type_get(self):
|
|
inst_types = [{'name': 'abc', 'flavorid': '123'},
|
|
{'name': 'def', 'flavorid': '456'},
|
|
{'name': 'ghi', 'flavorid': '789'}]
|
|
inst_types = [self._create_inst_type(t) for t in inst_types]
|
|
|
|
for inst_type in inst_types:
|
|
inst_type_by_id = db.instance_type_get(self.ctxt, inst_type['id'])
|
|
self._assertEqualObjects(inst_type, inst_type_by_id)
|
|
|
|
def test_instance_type_get_by_name(self):
|
|
inst_types = [{'name': 'abc', 'flavorid': '123'},
|
|
{'name': 'def', 'flavorid': '456'},
|
|
{'name': 'ghi', 'flavorid': '789'}]
|
|
inst_types = [self._create_inst_type(t) for t in inst_types]
|
|
|
|
for inst_type in inst_types:
|
|
inst_type_by_name = db.instance_type_get_by_name(self.ctxt,
|
|
inst_type['name'])
|
|
self._assertEqualObjects(inst_type, inst_type_by_name)
|
|
|
|
def test_instance_type_get_by_name_not_found(self):
|
|
self._create_inst_type({})
|
|
self.assertRaises(exception.InstanceTypeNotFoundByName,
|
|
db.instance_type_get_by_name, self.ctxt, 'nonexists')
|
|
|
|
def test_instance_type_get_by_flavor_id(self):
|
|
inst_types = [{'name': 'abc', 'flavorid': '123'},
|
|
{'name': 'def', 'flavorid': '456'},
|
|
{'name': 'ghi', 'flavorid': '789'}]
|
|
inst_types = [self._create_inst_type(t) for t in inst_types]
|
|
|
|
for inst_type in inst_types:
|
|
params = (self.ctxt, inst_type['flavorid'])
|
|
inst_type_by_flavorid = db.instance_type_get_by_flavor_id(*params)
|
|
self._assertEqualObjects(inst_type, inst_type_by_flavorid)
|
|
|
|
def test_instance_type_get_by_flavor_not_found(self):
|
|
self._create_inst_type({})
|
|
self.assertRaises(exception.FlavorNotFound,
|
|
db.instance_type_get_by_flavor_id,
|
|
self.ctxt, 'nonexists')
|
|
|
|
|
|
class InstanceTypeExtraSpecsTestCase(BaseInstanceTypeTestCase):
|
|
|
|
def setUp(self):
|
|
super(InstanceTypeExtraSpecsTestCase, self).setUp()
|
|
values = ({'name': 'n1', 'flavorid': 'f1',
|
|
'extra_specs': dict(a='a', b='b', c='c')},
|
|
{'name': 'n2', 'flavorid': 'f2',
|
|
'extra_specs': dict(d='d', e='e', f='f')})
|
|
|
|
# NOTE(boris-42): We have already tested instance_type_create method
|
|
# with extra_specs in InstanceTypeTestCase.
|
|
self.inst_types = [self._create_inst_type(v) for v in values]
|
|
|
|
def test_instance_type_extra_specs_get(self):
|
|
for it in self.inst_types:
|
|
real_specs = db.instance_type_extra_specs_get(self.ctxt,
|
|
it['flavorid'])
|
|
self._assertEqualObjects(it['extra_specs'], real_specs)
|
|
|
|
def test_instance_type_extra_specs_get_item(self):
|
|
expected = dict(f1=dict(a='a', b='b', c='c'),
|
|
f2=dict(d='d', e='e', f='f'))
|
|
|
|
for flavor, specs in expected.iteritems():
|
|
for key, val in specs.iteritems():
|
|
spec = db.instance_type_extra_specs_get_item(self.ctxt, flavor,
|
|
key)
|
|
self.assertEqual(spec[key], val)
|
|
|
|
def test_instance_type_extra_specs_delete(self):
|
|
for it in self.inst_types:
|
|
specs = it['extra_specs']
|
|
key = specs.keys()[0]
|
|
del specs[key]
|
|
db.instance_type_extra_specs_delete(self.ctxt, it['flavorid'], key)
|
|
real_specs = db.instance_type_extra_specs_get(self.ctxt,
|
|
it['flavorid'])
|
|
self._assertEqualObjects(it['extra_specs'], real_specs)
|
|
|
|
def test_instance_type_extra_specs_update_or_create(self):
|
|
for it in self.inst_types:
|
|
current_specs = it['extra_specs']
|
|
current_specs.update(dict(b='b1', c='c1', d='d1', e='e1'))
|
|
params = (self.ctxt, it['flavorid'], current_specs)
|
|
db.instance_type_extra_specs_update_or_create(*params)
|
|
real_specs = db.instance_type_extra_specs_get(self.ctxt,
|
|
it['flavorid'])
|
|
self._assertEqualObjects(current_specs, real_specs)
|
|
|
|
def test_instance_type_extra_specs_update_or_create_flavor_not_found(self):
|
|
self.assertRaises(exception.FlavorNotFound,
|
|
db.instance_type_extra_specs_update_or_create,
|
|
self.ctxt, 'nonexists', {})
|
|
|
|
|
|
class InstanceTypeAccessTestCase(BaseInstanceTypeTestCase):
|
|
|
|
def _create_inst_type_access(self, instance_type_id, project_id):
|
|
return db.instance_type_access_add(self.ctxt, instance_type_id,
|
|
project_id)
|
|
|
|
def test_instance_type_access_get_by_flavor_id(self):
|
|
inst_types = ({'name': 'n1', 'flavorid': 'f1'},
|
|
{'name': 'n2', 'flavorid': 'f2'})
|
|
it1, it2 = tuple((self._create_inst_type(v) for v in inst_types))
|
|
|
|
access_it1 = [self._create_inst_type_access(it1['flavorid'], 'pr1'),
|
|
self._create_inst_type_access(it1['flavorid'], 'pr2')]
|
|
|
|
access_it2 = [self._create_inst_type_access(it2['flavorid'], 'pr1')]
|
|
|
|
for it, access_it in zip((it1, it2), (access_it1, access_it2)):
|
|
params = (self.ctxt, it['flavorid'])
|
|
real_access_it = db.instance_type_access_get_by_flavor_id(*params)
|
|
self._assertEqualListsOfObjects(access_it, real_access_it)
|
|
|
|
def test_instance_type_access_get_by_flavor_id_flavor_not_found(self):
|
|
self.assertRaises(exception.FlavorNotFound,
|
|
db.instance_type_get_by_flavor_id,
|
|
self.ctxt, 'nonexists')
|
|
|
|
def test_instance_type_access_add(self):
|
|
inst_type = self._create_inst_type({'flavorid': 'f1'})
|
|
project_id = 'p1'
|
|
|
|
access = self._create_inst_type_access(inst_type['flavorid'],
|
|
project_id)
|
|
# NOTE(boris-42): Check that instance_type_access_add doesn't fail and
|
|
# returns correct value. This is enough because other
|
|
# logic is checked by other methods.
|
|
self.assertFalse(access['id'] is None)
|
|
self.assertEqual(access['instance_type_id'], inst_type['id'])
|
|
self.assertEqual(access['project_id'], project_id)
|
|
|
|
def test_instance_type_access_add_to_non_existing_flavor(self):
|
|
self.assertRaises(exception.FlavorNotFound,
|
|
self._create_inst_type_access,
|
|
'nonexists', 'does_not_matter')
|
|
|
|
def test_instance_type_access_add_duplicate_project_id_flavor(self):
|
|
inst_type = self._create_inst_type({'flavorid': 'f1'})
|
|
params = (inst_type['flavorid'], 'p1')
|
|
|
|
self._create_inst_type_access(*params)
|
|
self.assertRaises(exception.FlavorAccessExists,
|
|
self._create_inst_type_access, *params)
|
|
|
|
def test_instance_type_access_remove(self):
|
|
inst_types = ({'name': 'n1', 'flavorid': 'f1'},
|
|
{'name': 'n2', 'flavorid': 'f2'})
|
|
it1, it2 = tuple((self._create_inst_type(v) for v in inst_types))
|
|
|
|
access_it1 = [self._create_inst_type_access(it1['flavorid'], 'pr1'),
|
|
self._create_inst_type_access(it1['flavorid'], 'pr2')]
|
|
|
|
access_it2 = [self._create_inst_type_access(it2['flavorid'], 'pr1')]
|
|
|
|
db.instance_type_access_remove(self.ctxt, it1['flavorid'],
|
|
access_it1[1]['project_id'])
|
|
|
|
for it, access_it in zip((it1, it2), (access_it1[:1], access_it2)):
|
|
params = (self.ctxt, it['flavorid'])
|
|
real_access_it = db.instance_type_access_get_by_flavor_id(*params)
|
|
self._assertEqualListsOfObjects(access_it, real_access_it)
|
|
|
|
def test_instance_type_access_remove_flavor_not_found(self):
|
|
self.assertRaises(exception.FlavorNotFound,
|
|
db.instance_type_access_remove,
|
|
self.ctxt, 'nonexists', 'does_not_matter')
|
|
|
|
def test_instance_type_access_remove_access_not_found(self):
|
|
inst_type = self._create_inst_type({'flavorid': 'f1'})
|
|
params = (inst_type['flavorid'], 'p1')
|
|
self._create_inst_type_access(*params)
|
|
self.assertRaises(exception.FlavorAccessNotFound,
|
|
db.instance_type_access_remove,
|
|
self.ctxt, inst_type['flavorid'], 'p2')
|
|
|
|
def test_instance_type_access_removed_after_instance_type_destroy(self):
|
|
inst_type1 = self._create_inst_type({'flavorid': 'f1', 'name': 'n1'})
|
|
inst_type2 = self._create_inst_type({'flavorid': 'f2', 'name': 'n2'})
|
|
values = [
|
|
(inst_type1['flavorid'], 'p1'),
|
|
(inst_type1['flavorid'], 'p2'),
|
|
(inst_type2['flavorid'], 'p3')
|
|
]
|
|
for v in values:
|
|
self._create_inst_type_access(*v)
|
|
|
|
db.instance_type_destroy(self.ctxt, inst_type1['name'])
|
|
|
|
p = (self.ctxt, inst_type1['flavorid'])
|
|
self.assertEqual(0, len(db.instance_type_access_get_by_flavor_id(*p)))
|
|
p = (self.ctxt, inst_type2['flavorid'])
|
|
self.assertEqual(1, len(db.instance_type_access_get_by_flavor_id(*p)))
|
|
db.instance_type_destroy(self.ctxt, inst_type2['name'])
|
|
self.assertEqual(0, len(db.instance_type_access_get_by_flavor_id(*p)))
|
|
|
|
|
|
class FixedIPTestCase(BaseInstanceTypeTestCase):
|
|
def _timeout_test(self, ctxt, timeout, multi_host):
|
|
instance = db.instance_create(ctxt, dict(host='foo'))
|
|
net = db.network_create_safe(ctxt, dict(multi_host=multi_host,
|
|
host='bar'))
|
|
old = timeout - datetime.timedelta(seconds=5)
|
|
new = timeout + datetime.timedelta(seconds=5)
|
|
# should deallocate
|
|
db.fixed_ip_create(ctxt, dict(allocated=False,
|
|
instance_uuid=instance['uuid'],
|
|
network_id=net['id'],
|
|
updated_at=old))
|
|
# still allocated
|
|
db.fixed_ip_create(ctxt, dict(allocated=True,
|
|
instance_uuid=instance['uuid'],
|
|
network_id=net['id'],
|
|
updated_at=old))
|
|
# wrong network
|
|
db.fixed_ip_create(ctxt, dict(allocated=False,
|
|
instance_uuid=instance['uuid'],
|
|
network_id=None,
|
|
updated_at=old))
|
|
# too new
|
|
db.fixed_ip_create(ctxt, dict(allocated=False,
|
|
instance_uuid=instance['uuid'],
|
|
network_id=None,
|
|
updated_at=new))
|
|
|
|
def test_fixed_ip_disassociate_all_by_timeout_single_host(self):
|
|
now = timeutils.utcnow()
|
|
self._timeout_test(self.ctxt, now, False)
|
|
result = db.fixed_ip_disassociate_all_by_timeout(self.ctxt, 'foo', now)
|
|
self.assertEqual(result, 0)
|
|
result = db.fixed_ip_disassociate_all_by_timeout(self.ctxt, 'bar', now)
|
|
self.assertEqual(result, 1)
|
|
|
|
def test_fixed_ip_disassociate_all_by_timeout_multi_host(self):
|
|
now = timeutils.utcnow()
|
|
self._timeout_test(self.ctxt, now, True)
|
|
result = db.fixed_ip_disassociate_all_by_timeout(self.ctxt, 'foo', now)
|
|
self.assertEqual(result, 1)
|
|
result = db.fixed_ip_disassociate_all_by_timeout(self.ctxt, 'bar', now)
|
|
self.assertEqual(result, 0)
|
|
|
|
def test_fixed_ip_get_by_floating_address(self):
|
|
fixed_ip = db.fixed_ip_create(self.ctxt, {'address': 'fixed'})
|
|
values = {'address': 'floating',
|
|
'fixed_ip_id': fixed_ip['id']}
|
|
floating = db.floating_ip_create(self.ctxt, values)['address']
|
|
fixed_ip_ref = db.fixed_ip_get_by_floating_address(self.ctxt, floating)
|
|
self._assertEqualObjects(fixed_ip, fixed_ip_ref)
|
|
|
|
def test_fixed_ip_get_by_host(self):
|
|
host_ips = {
|
|
'host1': ['1.1.1.1', '1.1.1.2', '1.1.1.3'],
|
|
'host2': ['1.1.1.4', '1.1.1.5'],
|
|
'host3': ['1.1.1.6']
|
|
}
|
|
|
|
for host, ips in host_ips.iteritems():
|
|
for ip in ips:
|
|
instance_uuid = self._create_instance(host=host)
|
|
db.fixed_ip_create(self.ctxt, {'address': ip})
|
|
db.fixed_ip_associate(self.ctxt, ip, instance_uuid)
|
|
|
|
for host, ips in host_ips.iteritems():
|
|
ips_on_host = map(lambda x: x['address'],
|
|
db.fixed_ip_get_by_host(self.ctxt, host))
|
|
self._assertEqualListsOfPrimitivesAsSets(ips_on_host, ips)
|
|
|
|
def test_fixed_ip_get_by_network_host_not_found_exception(self):
|
|
self.assertRaises(
|
|
exception.FixedIpNotFoundForNetworkHost,
|
|
db.fixed_ip_get_by_network_host,
|
|
self.ctxt, 1, 'ignore')
|
|
|
|
def test_fixed_ip_get_by_network_host_fixed_ip_found(self):
|
|
db.fixed_ip_create(self.ctxt, dict(network_id=1, host='host'))
|
|
|
|
fip = db.fixed_ip_get_by_network_host(self.ctxt, 1, 'host')
|
|
|
|
self.assertEquals(1, fip['network_id'])
|
|
self.assertEquals('host', fip['host'])
|
|
|
|
def _create_instance(self, **kwargs):
|
|
instance = db.instance_create(self.ctxt, kwargs)
|
|
return instance['uuid']
|
|
|
|
def test_fixed_ip_get_by_instance_fixed_ip_found(self):
|
|
instance_uuid = self._create_instance()
|
|
|
|
FIXED_IP_ADDRESS = 'address'
|
|
db.fixed_ip_create(self.ctxt, dict(
|
|
instance_uuid=instance_uuid, address=FIXED_IP_ADDRESS))
|
|
|
|
ips_list = db.fixed_ip_get_by_instance(self.ctxt, instance_uuid)
|
|
self._assertEqualListsOfPrimitivesAsSets([FIXED_IP_ADDRESS],
|
|
[ips_list[0].address])
|
|
|
|
def test_fixed_ip_get_by_instance_multiple_fixed_ips_found(self):
|
|
instance_uuid = self._create_instance()
|
|
|
|
FIXED_IP_ADDRESS_1 = 'address_1'
|
|
db.fixed_ip_create(self.ctxt, dict(
|
|
instance_uuid=instance_uuid, address=FIXED_IP_ADDRESS_1))
|
|
FIXED_IP_ADDRESS_2 = 'address_2'
|
|
db.fixed_ip_create(self.ctxt, dict(
|
|
instance_uuid=instance_uuid, address=FIXED_IP_ADDRESS_2))
|
|
|
|
ips_list = db.fixed_ip_get_by_instance(self.ctxt, instance_uuid)
|
|
self._assertEqualListsOfPrimitivesAsSets(
|
|
[FIXED_IP_ADDRESS_1, FIXED_IP_ADDRESS_2],
|
|
[ips_list[0].address, ips_list[1].address])
|
|
|
|
def test_fixed_ip_get_by_instance_inappropriate_ignored(self):
|
|
instance_uuid = self._create_instance()
|
|
|
|
FIXED_IP_ADDRESS_1 = 'address_1'
|
|
db.fixed_ip_create(self.ctxt, dict(
|
|
instance_uuid=instance_uuid, address=FIXED_IP_ADDRESS_1))
|
|
FIXED_IP_ADDRESS_2 = 'address_2'
|
|
db.fixed_ip_create(self.ctxt, dict(
|
|
instance_uuid=instance_uuid, address=FIXED_IP_ADDRESS_2))
|
|
|
|
another_instance = db.instance_create(self.ctxt, {})
|
|
db.fixed_ip_create(self.ctxt, dict(
|
|
instance_uuid=another_instance['uuid'], address="another_addr"))
|
|
|
|
ips_list = db.fixed_ip_get_by_instance(self.ctxt, instance_uuid)
|
|
self._assertEqualListsOfPrimitivesAsSets(
|
|
[FIXED_IP_ADDRESS_1, FIXED_IP_ADDRESS_2],
|
|
[ips_list[0].address, ips_list[1].address])
|
|
|
|
def test_fixed_ip_get_by_instance_not_found_exception(self):
|
|
instance_uuid = self._create_instance()
|
|
|
|
self.assertRaises(exception.FixedIpNotFoundForInstance,
|
|
db.fixed_ip_get_by_instance,
|
|
self.ctxt, instance_uuid)
|
|
|
|
def test_fixed_ips_by_virtual_interface_fixed_ip_found(self):
|
|
instance_uuid = self._create_instance()
|
|
|
|
vif = db.virtual_interface_create(
|
|
self.ctxt, dict(instance_uuid=instance_uuid))
|
|
|
|
FIXED_IP_ADDRESS = 'address'
|
|
db.fixed_ip_create(self.ctxt, dict(
|
|
virtual_interface_id=vif.id, address=FIXED_IP_ADDRESS))
|
|
|
|
ips_list = db.fixed_ips_by_virtual_interface(self.ctxt, vif.id)
|
|
self._assertEqualListsOfPrimitivesAsSets([FIXED_IP_ADDRESS],
|
|
[ips_list[0].address])
|
|
|
|
def test_fixed_ips_by_virtual_interface_multiple_fixed_ips_found(self):
|
|
instance_uuid = self._create_instance()
|
|
|
|
vif = db.virtual_interface_create(
|
|
self.ctxt, dict(instance_uuid=instance_uuid))
|
|
|
|
FIXED_IP_ADDRESS_1 = 'address_1'
|
|
db.fixed_ip_create(self.ctxt, dict(
|
|
virtual_interface_id=vif.id, address=FIXED_IP_ADDRESS_1))
|
|
FIXED_IP_ADDRESS_2 = 'address_2'
|
|
db.fixed_ip_create(self.ctxt, dict(
|
|
virtual_interface_id=vif.id, address=FIXED_IP_ADDRESS_2))
|
|
|
|
ips_list = db.fixed_ips_by_virtual_interface(self.ctxt, vif.id)
|
|
self._assertEqualListsOfPrimitivesAsSets(
|
|
[FIXED_IP_ADDRESS_1, FIXED_IP_ADDRESS_2],
|
|
[ips_list[0].address, ips_list[1].address])
|
|
|
|
def test_fixed_ips_by_virtual_interface_inappropriate_ignored(self):
|
|
instance_uuid = self._create_instance()
|
|
|
|
vif = db.virtual_interface_create(
|
|
self.ctxt, dict(instance_uuid=instance_uuid))
|
|
|
|
FIXED_IP_ADDRESS_1 = 'address_1'
|
|
db.fixed_ip_create(self.ctxt, dict(
|
|
virtual_interface_id=vif.id, address=FIXED_IP_ADDRESS_1))
|
|
FIXED_IP_ADDRESS_2 = 'address_2'
|
|
db.fixed_ip_create(self.ctxt, dict(
|
|
virtual_interface_id=vif.id, address=FIXED_IP_ADDRESS_2))
|
|
|
|
another_vif = db.virtual_interface_create(
|
|
self.ctxt, dict(instance_uuid=instance_uuid))
|
|
db.fixed_ip_create(self.ctxt, dict(
|
|
virtual_interface_id=another_vif.id, address="another_addr"))
|
|
|
|
ips_list = db.fixed_ips_by_virtual_interface(self.ctxt, vif.id)
|
|
self._assertEqualListsOfPrimitivesAsSets(
|
|
[FIXED_IP_ADDRESS_1, FIXED_IP_ADDRESS_2],
|
|
[ips_list[0].address, ips_list[1].address])
|
|
|
|
def test_fixed_ips_by_virtual_interface_no_ip_found(self):
|
|
instance_uuid = self._create_instance()
|
|
|
|
vif = db.virtual_interface_create(
|
|
self.ctxt, dict(instance_uuid=instance_uuid))
|
|
|
|
ips_list = db.fixed_ips_by_virtual_interface(self.ctxt, vif.id)
|
|
self.assertEquals(0, len(ips_list))
|
|
|
|
def test_fixed_ip_count_by_project_one_ip(self):
|
|
PROJECT_ID = "project_id"
|
|
instance_uuid = self._create_instance(project_id=PROJECT_ID)
|
|
db.fixed_ip_create(self.ctxt, dict(
|
|
instance_uuid=instance_uuid, address='address'))
|
|
|
|
ips_count = db.fixed_ip_count_by_project(self.ctxt, PROJECT_ID)
|
|
self.assertEquals(1, ips_count)
|
|
|
|
def test_fixed_ip_count_by_project_two_ips_for_different_instances(self):
|
|
PROJECT_ID = "project_id"
|
|
instance_uuid = self._create_instance(project_id=PROJECT_ID)
|
|
|
|
db.fixed_ip_create(self.ctxt, dict(
|
|
instance_uuid=instance_uuid, address='address_1'))
|
|
|
|
another_instance_for_this_project =\
|
|
db.instance_create(self.ctxt, dict(project_id=PROJECT_ID))
|
|
|
|
db.fixed_ip_create(self.ctxt, dict(
|
|
instance_uuid=another_instance_for_this_project['uuid'],
|
|
address='address_2'))
|
|
|
|
ips_count = db.fixed_ip_count_by_project(self.ctxt, PROJECT_ID)
|
|
self.assertEquals(2, ips_count)
|
|
|
|
def create_fixed_ip(self, **params):
|
|
default_params = {'address': '192.168.0.1'}
|
|
default_params.update(params)
|
|
return db.fixed_ip_create(self.ctxt, default_params)['address']
|
|
|
|
def test_fixed_ip_associate_fails_if_ip_not_in_network(self):
|
|
instance_uuid = self._create_instance()
|
|
self.assertRaises(exception.FixedIpNotFoundForNetwork,
|
|
db.fixed_ip_associate,
|
|
self.ctxt, None, instance_uuid)
|
|
|
|
def test_fixed_ip_associate_fails_if_ip_in_use(self):
|
|
instance_uuid = self._create_instance()
|
|
|
|
address = self.create_fixed_ip(instance_uuid=instance_uuid)
|
|
self.assertRaises(exception.FixedIpAlreadyInUse,
|
|
db.fixed_ip_associate,
|
|
self.ctxt, address, instance_uuid)
|
|
|
|
def test_fixed_ip_associate_succeeds(self):
|
|
instance_uuid = self._create_instance()
|
|
network = db.network_create_safe(self.ctxt, {})
|
|
|
|
address = self.create_fixed_ip(network_id=network['id'])
|
|
db.fixed_ip_associate(self.ctxt, address, instance_uuid,
|
|
network_id=network['id'])
|
|
fixed_ip = db.fixed_ip_get_by_address(self.ctxt, address)
|
|
self.assertEqual(fixed_ip['instance_uuid'], instance_uuid)
|
|
|
|
def test_fixed_ip_associate_succeeds_and_sets_network(self):
|
|
instance_uuid = self._create_instance()
|
|
network = db.network_create_safe(self.ctxt, {})
|
|
|
|
address = self.create_fixed_ip()
|
|
db.fixed_ip_associate(self.ctxt, address, instance_uuid,
|
|
network_id=network['id'])
|
|
fixed_ip = db.fixed_ip_get_by_address(self.ctxt, address)
|
|
self.assertEqual(fixed_ip['instance_uuid'], instance_uuid)
|
|
self.assertEqual(fixed_ip['network_id'], network['id'])
|
|
|
|
def test_fixed_ip_associate_pool_invalid_uuid(self):
|
|
instance_uuid = '123'
|
|
self.assertRaises(exception.InvalidUUID, db.fixed_ip_associate_pool,
|
|
self.ctxt, None, instance_uuid)
|
|
|
|
def test_fixed_ip_associate_pool_no_more_fixed_ips(self):
|
|
instance_uuid = self._create_instance()
|
|
self.assertRaises(exception.NoMoreFixedIps, db.fixed_ip_associate_pool,
|
|
self.ctxt, None, instance_uuid)
|
|
|
|
def test_fixed_ip_associate_pool_succeeds(self):
|
|
instance_uuid = self._create_instance()
|
|
network = db.network_create_safe(self.ctxt, {})
|
|
|
|
address = self.create_fixed_ip(network_id=network['id'])
|
|
db.fixed_ip_associate_pool(self.ctxt, network['id'], instance_uuid)
|
|
fixed_ip = db.fixed_ip_get_by_address(self.ctxt, address)
|
|
self.assertEqual(fixed_ip['instance_uuid'], instance_uuid)
|
|
|
|
def test_fixed_ip_create(self):
|
|
instance_uuid = self._create_instance()
|
|
network_id = db.network_create_safe(self.ctxt, {})['id']
|
|
param = {
|
|
'reserved': False,
|
|
'deleted': 0,
|
|
'leased': False,
|
|
'host': '127.0.0.1',
|
|
'address': 'localhost',
|
|
'allocated': False,
|
|
'instance_uuid': instance_uuid,
|
|
'network_id': network_id,
|
|
'virtual_interface_id': None
|
|
}
|
|
|
|
ignored_keys = ['created_at', 'id', 'deleted_at', 'updated_at']
|
|
fixed_ip_data = db.fixed_ip_create(self.ctxt, param)
|
|
self._assertEqualObjects(param, fixed_ip_data, ignored_keys)
|
|
|
|
def test_fixed_ip_bulk_create(self):
|
|
adress = 'fixed_ip_adress'
|
|
instance_uuid = self._create_instance()
|
|
network_id_1 = db.network_create_safe(self.ctxt, {})['id']
|
|
network_id_2 = db.network_create_safe(self.ctxt, {})['id']
|
|
params = [
|
|
{'reserved': False, 'deleted': 0, 'leased': False,
|
|
'host': '127.0.0.1', 'address': adress, 'allocated': False,
|
|
'instance_uuid': instance_uuid, 'network_id': network_id_1,
|
|
'virtual_interface_id': None},
|
|
{'reserved': False, 'deleted': 0, 'leased': False,
|
|
'host': 'localhost', 'address': adress, 'allocated': True,
|
|
'instance_uuid': instance_uuid, 'network_id': network_id_2,
|
|
'virtual_interface_id': None}
|
|
]
|
|
|
|
db.fixed_ip_bulk_create(self.ctxt, params)
|
|
ignored_keys = ['created_at', 'id', 'deleted_at', 'updated_at']
|
|
fixed_ip_data = db.fixed_ip_get_by_instance(self.ctxt, instance_uuid)
|
|
|
|
# we have no `id` in incoming data so we can not use
|
|
# _assertEqualListsOfObjects to compare incoming data and received
|
|
# objects
|
|
fixed_ip_data = sorted(fixed_ip_data, key=lambda i: i['network_id'])
|
|
params = sorted(params, key=lambda i: i['network_id'])
|
|
for param, ip in zip(params, fixed_ip_data):
|
|
self._assertEqualObjects(param, ip, ignored_keys)
|
|
|
|
def test_fixed_ip_disassociate(self):
|
|
adress = 'fixed_ip_adress'
|
|
instance_uuid = self._create_instance()
|
|
network_id = db.network_create_safe(self.ctxt, {})['id']
|
|
param = {
|
|
'reserved': False,
|
|
'deleted': 0,
|
|
'leased': False,
|
|
'host': '127.0.0.1',
|
|
'address': adress,
|
|
'allocated': False,
|
|
'instance_uuid': instance_uuid,
|
|
'network_id': network_id,
|
|
'virtual_interface_id': None
|
|
}
|
|
db.fixed_ip_create(self.ctxt, param)
|
|
|
|
db.fixed_ip_disassociate(self.ctxt, adress)
|
|
fixed_ip_data = db.fixed_ip_get_by_address(self.ctxt, adress)
|
|
ignored_keys = ['created_at', 'id', 'deleted_at',
|
|
'updated_at', 'instance_uuid']
|
|
self._assertEqualObjects(param, fixed_ip_data, ignored_keys)
|
|
self.assertIsNone(fixed_ip_data['instance_uuid'])
|
|
|
|
def test_fixed_ip_get_not_found_exception(self):
|
|
self.assertRaises(exception.FixedIpNotFound,
|
|
db.fixed_ip_get, self.ctxt, 0)
|
|
|
|
def test_fixed_ip_get_sucsess2(self):
|
|
adress = 'fixed_ip_adress'
|
|
instance_uuid = self._create_instance()
|
|
network_id = db.network_create_safe(self.ctxt, {})['id']
|
|
param = {
|
|
'reserved': False,
|
|
'deleted': 0,
|
|
'leased': False,
|
|
'host': '127.0.0.1',
|
|
'address': adress,
|
|
'allocated': False,
|
|
'instance_uuid': instance_uuid,
|
|
'network_id': network_id,
|
|
'virtual_interface_id': None
|
|
}
|
|
fixed_ip_id = db.fixed_ip_create(self.ctxt, param)
|
|
|
|
self.ctxt.is_admin = False
|
|
self.assertRaises(exception.NotAuthorized, db.fixed_ip_get,
|
|
self.ctxt, fixed_ip_id)
|
|
|
|
def test_fixed_ip_get_sucsess(self):
|
|
adress = 'fixed_ip_adress'
|
|
instance_uuid = self._create_instance()
|
|
network_id = db.network_create_safe(self.ctxt, {})['id']
|
|
param = {
|
|
'reserved': False,
|
|
'deleted': 0,
|
|
'leased': False,
|
|
'host': '127.0.0.1',
|
|
'address': adress,
|
|
'allocated': False,
|
|
'instance_uuid': instance_uuid,
|
|
'network_id': network_id,
|
|
'virtual_interface_id': None
|
|
}
|
|
db.fixed_ip_create(self.ctxt, param)
|
|
|
|
fixed_ip_id = db.fixed_ip_get_by_address(self.ctxt, adress)['id']
|
|
fixed_ip_data = db.fixed_ip_get(self.ctxt, fixed_ip_id)
|
|
ignored_keys = ['created_at', 'id', 'deleted_at', 'updated_at']
|
|
self._assertEqualObjects(param, fixed_ip_data, ignored_keys)
|
|
|
|
def test_fixed_ip_get_by_address_detailed_not_found_exception(self):
|
|
self.assertRaises(exception.FixedIpNotFoundForAddress,
|
|
db.fixed_ip_get_by_address_detailed, self.ctxt, 'x')
|
|
|
|
def test_fixed_ip_get_by_address_detailed_sucsess(self):
|
|
adress = 'fixed_ip_adress_123'
|
|
instance_uuid = self._create_instance()
|
|
network_id = db.network_create_safe(self.ctxt, {})['id']
|
|
param = {
|
|
'reserved': False,
|
|
'deleted': 0,
|
|
'leased': False,
|
|
'host': '127.0.0.1',
|
|
'address': adress,
|
|
'allocated': False,
|
|
'instance_uuid': instance_uuid,
|
|
'network_id': network_id,
|
|
'virtual_interface_id': None
|
|
}
|
|
db.fixed_ip_create(self.ctxt, param)
|
|
|
|
fixed_ip_data = db.fixed_ip_get_by_address_detailed(self.ctxt,
|
|
adress)
|
|
# fixed ip check here
|
|
ignored_keys = ['created_at', 'id', 'deleted_at', 'updated_at']
|
|
self._assertEqualObjects(param, fixed_ip_data[0], ignored_keys)
|
|
|
|
# network model check here
|
|
network_data = db.network_get(self.ctxt, network_id)
|
|
self._assertEqualObjects(network_data, fixed_ip_data[1])
|
|
|
|
# Instance check here
|
|
instance_data = db.instance_get_by_uuid(self.ctxt, instance_uuid)
|
|
ignored_keys = ['info_cache', 'system_metadata',
|
|
'security_groups', 'metadata'] # HOW ????
|
|
self._assertEqualObjects(instance_data, fixed_ip_data[2], ignored_keys)
|
|
|
|
def test_fixed_ip_update_not_found_for_adress(self):
|
|
self.assertRaises(exception.FixedIpNotFoundForAddress,
|
|
db.fixed_ip_update, self.ctxt, 'fixed_ip_adress', {})
|
|
|
|
def test_fixed_ip_update(self):
|
|
instance_uuid_1 = self._create_instance()
|
|
instance_uuid_2 = self._create_instance()
|
|
network_id_1 = db.network_create_safe(self.ctxt, {})['id']
|
|
network_id_2 = db.network_create_safe(self.ctxt, {})['id']
|
|
param_1 = {
|
|
'reserved': True, 'deleted': 0, 'leased': True,
|
|
'host': '192.168.133.1', 'address': 'localhost',
|
|
'allocated': True, 'instance_uuid': instance_uuid_1,
|
|
'network_id': network_id_1, 'virtual_interface_id': '123',
|
|
}
|
|
|
|
param_2 = {
|
|
'reserved': False, 'deleted': 0, 'leased': False,
|
|
'host': '127.0.0.1', 'address': 'localhost', 'allocated': False,
|
|
'instance_uuid': instance_uuid_2, 'network_id': network_id_2,
|
|
'virtual_interface_id': None
|
|
}
|
|
|
|
ignored_keys = ['created_at', 'id', 'deleted_at', 'updated_at']
|
|
fixed_ip_addr = db.fixed_ip_create(self.ctxt, param_1)['address']
|
|
db.fixed_ip_update(self.ctxt, fixed_ip_addr, param_2)
|
|
fixed_ip_after_update = db.fixed_ip_get_by_address(self.ctxt,
|
|
param_2['address'])
|
|
self._assertEqualObjects(param_2, fixed_ip_after_update, ignored_keys)
|
|
|
|
|
|
class FloatingIpTestCase(test.TestCase, ModelsObjectComparatorMixin):
|
|
|
|
def setUp(self):
|
|
super(FloatingIpTestCase, self).setUp()
|
|
self.ctxt = context.get_admin_context()
|
|
|
|
def _get_base_values(self):
|
|
return {
|
|
'address': '1.1.1.1',
|
|
'fixed_ip_id': None,
|
|
'project_id': 'fake_project',
|
|
'host': 'fake_host',
|
|
'auto_assigned': False,
|
|
'pool': 'fake_pool',
|
|
'interface': 'fake_interface',
|
|
}
|
|
|
|
def _create_floating_ip(self, values):
|
|
if not values:
|
|
values = {}
|
|
vals = self._get_base_values()
|
|
vals.update(values)
|
|
return db.floating_ip_create(self.ctxt, vals)
|
|
|
|
def test_floating_ip_get(self):
|
|
values = [{'address': '0.0.0.0'}, {'address': '1.1.1.1'}]
|
|
floating_ips = [self._create_floating_ip(val) for val in values]
|
|
|
|
for floating_ip in floating_ips:
|
|
real_floating_ip = db.floating_ip_get(self.ctxt, floating_ip['id'])
|
|
self._assertEqualObjects(floating_ip, real_floating_ip,
|
|
ignored_keys=['fixed_ip'])
|
|
|
|
def test_floating_ip_get_not_found(self):
|
|
self.assertRaises(exception.FloatingIpNotFound,
|
|
db.floating_ip_get, self.ctxt, 100500)
|
|
|
|
def test_floating_ip_get_pools(self):
|
|
values = [
|
|
{'address': '0.0.0.0', 'pool': 'abc'},
|
|
{'address': '1.1.1.1', 'pool': 'abc'},
|
|
{'address': '2.2.2.2', 'pool': 'def'},
|
|
{'address': '3.3.3.3', 'pool': 'ghi'},
|
|
]
|
|
for val in values:
|
|
self._create_floating_ip(val)
|
|
expected_pools = [{'name': x}
|
|
for x in set(map(lambda x: x['pool'], values))]
|
|
real_pools = db.floating_ip_get_pools(self.ctxt)
|
|
self._assertEqualListsOfPrimitivesAsSets(real_pools, expected_pools)
|
|
|
|
def test_floating_ip_allocate_address(self):
|
|
pools = {
|
|
'pool1': ['0.0.0.0', '1.1.1.1'],
|
|
'pool2': ['2.2.2.2'],
|
|
'pool3': ['3.3.3.3', '4.4.4.4', '5.5.5.5']
|
|
}
|
|
for pool, addresses in pools.iteritems():
|
|
for address in addresses:
|
|
vals = {'pool': pool, 'address': address, 'project_id': None}
|
|
self._create_floating_ip(vals)
|
|
|
|
project_id = self._get_base_values()['project_id']
|
|
for pool, addresses in pools.iteritems():
|
|
alloc_addrs = []
|
|
for i in addresses:
|
|
float_addr = db.floating_ip_allocate_address(self.ctxt,
|
|
project_id, pool)
|
|
alloc_addrs.append(float_addr)
|
|
self._assertEqualListsOfPrimitivesAsSets(alloc_addrs, addresses)
|
|
|
|
def test_floating_ip_allocate_address_no_more_floating_ips(self):
|
|
self.assertRaises(exception.NoMoreFloatingIps,
|
|
db.floating_ip_allocate_address,
|
|
self.ctxt, 'any_project_id', 'no_such_pool')
|
|
|
|
def test_floating_ip_allocate_not_authorized(self):
|
|
ctxt = context.RequestContext(user_id='a', project_id='abc',
|
|
is_admin=False)
|
|
self.assertRaises(exception.NotAuthorized,
|
|
db.floating_ip_allocate_address,
|
|
ctxt, 'other_project_id', 'any_pool')
|
|
|
|
def _get_existing_ips(self):
|
|
return [ip['address'] for ip in db.floating_ip_get_all(self.ctxt)]
|
|
|
|
def test_floating_ip_bulk_create(self):
|
|
expected_ips = ['1.1.1.1', '1.1.1.2', '1.1.1.3', '1.1.1.4']
|
|
db.floating_ip_bulk_create(self.ctxt,
|
|
map(lambda x: {'address': x}, expected_ips))
|
|
self._assertEqualListsOfPrimitivesAsSets(self._get_existing_ips(),
|
|
expected_ips)
|
|
|
|
def test_floating_ip_bulk_create_duplicate(self):
|
|
ips = ['1.1.1.1', '1.1.1.2', '1.1.1.3', '1.1.1.4']
|
|
prepare_ips = lambda x: {'address': x}
|
|
|
|
db.floating_ip_bulk_create(self.ctxt, map(prepare_ips, ips))
|
|
self.assertRaises(exception.FloatingIpExists,
|
|
db.floating_ip_bulk_create,
|
|
self.ctxt, map(prepare_ips, ['1.1.1.5', '1.1.1.4']))
|
|
self.assertRaises(exception.FloatingIpNotFoundForAddress,
|
|
db.floating_ip_get_by_address,
|
|
self.ctxt, '1.1.1.5')
|
|
|
|
def test_floating_ip_bulk_destroy(self):
|
|
ips_for_delete = []
|
|
ips_for_non_delete = []
|
|
|
|
def create_ips(i):
|
|
return [{'address': '1.1.%s.%s' % (i, k)} for k in xrange(1, 256)]
|
|
|
|
# NOTE(boris-42): Create more then 256 ip to check that
|
|
# _ip_range_splitter works properly.
|
|
for i in xrange(1, 3):
|
|
ips_for_delete.extend(create_ips(i))
|
|
ips_for_non_delete.extend(create_ips(3))
|
|
|
|
db.floating_ip_bulk_create(self.ctxt,
|
|
ips_for_delete + ips_for_non_delete)
|
|
db.floating_ip_bulk_destroy(self.ctxt, ips_for_delete)
|
|
|
|
expected_addresses = map(lambda x: x['address'], ips_for_non_delete)
|
|
self._assertEqualListsOfPrimitivesAsSets(self._get_existing_ips(),
|
|
expected_addresses)
|
|
|
|
def test_floating_ip_create(self):
|
|
floating_ip = self._create_floating_ip({})
|
|
ignored_keys = ['id', 'deleted', 'deleted_at', 'updated_at',
|
|
'created_at']
|
|
|
|
self.assertFalse(floating_ip['id'] is None)
|
|
self._assertEqualObjects(floating_ip, self._get_base_values(),
|
|
ignored_keys)
|
|
|
|
def test_floating_ip_create_duplicate(self):
|
|
self._create_floating_ip({})
|
|
self.assertRaises(exception.FloatingIpExists,
|
|
self._create_floating_ip, {})
|
|
|
|
def test_floating_ip_count_by_project(self):
|
|
projects = {
|
|
'project1': ['1.1.1.1', '2.2.2.2', '3.3.3.3'],
|
|
'project2': ['4.4.4.4', '5.5.5.5'],
|
|
'project3': ['6.6.6.6']
|
|
}
|
|
for project_id, addresses in projects.iteritems():
|
|
for address in addresses:
|
|
self._create_floating_ip({'project_id': project_id,
|
|
'address': address})
|
|
for project_id, addresses in projects.iteritems():
|
|
real_count = db.floating_ip_count_by_project(self.ctxt, project_id)
|
|
self.assertEqual(len(addresses), real_count)
|
|
|
|
def test_floating_ip_count_by_project_not_authorized(self):
|
|
ctxt = context.RequestContext(user_id='a', project_id='abc',
|
|
is_admin=False)
|
|
self.assertRaises(exception.NotAuthorized,
|
|
db.floating_ip_count_by_project, ctxt, 'def')
|
|
|
|
def _create_fixed_ip(self, params):
|
|
default_params = {'address': '192.168.0.1'}
|
|
default_params.update(params)
|
|
return db.fixed_ip_create(self.ctxt, default_params)['address']
|
|
|
|
def test_floating_ip_fixed_ip_associate(self):
|
|
float_addresses = ['1.1.1.1', '1.1.1.2', '1.1.1.3']
|
|
fixed_addresses = ['2.2.2.1', '2.2.2.2', '2.2.2.3']
|
|
|
|
float_ips = [self._create_floating_ip({'address': address})
|
|
for address in float_addresses]
|
|
fixed_addrs = [self._create_fixed_ip({'address': address})
|
|
for address in fixed_addresses]
|
|
|
|
for float_ip, fixed_addr in zip(float_ips, fixed_addrs):
|
|
fixed_ip = db.floating_ip_fixed_ip_associate(self.ctxt,
|
|
float_ip.address,
|
|
fixed_addr, 'host')
|
|
self.assertEqual(fixed_ip.address, fixed_addr)
|
|
|
|
updated_float_ip = db.floating_ip_get(self.ctxt, float_ip.id)
|
|
self.assertEqual(fixed_ip.id, updated_float_ip.fixed_ip_id)
|
|
self.assertEqual('host', updated_float_ip.host)
|
|
|
|
# Test that already allocated float_ip returns None
|
|
result = db.floating_ip_fixed_ip_associate(self.ctxt,
|
|
float_addresses[0],
|
|
fixed_addresses[0], 'host')
|
|
self.assertTrue(result is None)
|
|
|
|
def test_floating_ip_fixed_ip_associate_float_ip_not_found(self):
|
|
self.assertRaises(exception.FloatingIpNotFoundForAddress,
|
|
db.floating_ip_fixed_ip_associate,
|
|
self.ctxt, 'non exist', 'some', 'some')
|
|
|
|
def test_floating_ip_deallocate(self):
|
|
values = {'address': '1.1.1.1', 'project_id': 'fake', 'host': 'fake'}
|
|
float_ip = self._create_floating_ip(values)
|
|
db.floating_ip_deallocate(self.ctxt, float_ip.address)
|
|
|
|
updated_float_ip = db.floating_ip_get(self.ctxt, float_ip.id)
|
|
self.assertTrue(updated_float_ip.project_id is None)
|
|
self.assertTrue(updated_float_ip.host is None)
|
|
self.assertFalse(updated_float_ip.auto_assigned)
|
|
|
|
def test_floating_ip_destroy(self):
|
|
addresses = ['1.1.1.1', '1.1.1.2', '1.1.1.3']
|
|
float_ips = [self._create_floating_ip({'address': addr})
|
|
for addr in addresses]
|
|
|
|
expected_len = len(addresses)
|
|
for float_ip in float_ips:
|
|
db.floating_ip_destroy(self.ctxt, float_ip.address)
|
|
self.assertRaises(exception.FloatingIpNotFound,
|
|
db.floating_ip_get, self.ctxt, float_ip.id)
|
|
expected_len -= 1
|
|
if expected_len > 0:
|
|
self.assertEqual(expected_len,
|
|
len(db.floating_ip_get_all(self.ctxt)))
|
|
else:
|
|
self.assertRaises(exception.NoFloatingIpsDefined,
|
|
db.floating_ip_get_all, self.ctxt)
|
|
|
|
def test_floating_ip_disassociate(self):
|
|
float_addresses = ['1.1.1.1', '1.1.1.2', '1.1.1.3']
|
|
fixed_addresses = ['2.2.2.1', '2.2.2.2', '2.2.2.3']
|
|
|
|
float_ips = [self._create_floating_ip({'address': address})
|
|
for address in float_addresses]
|
|
fixed_addrs = [self._create_fixed_ip({'address': address})
|
|
for address in fixed_addresses]
|
|
|
|
for float_ip, fixed_addr in zip(float_ips, fixed_addrs):
|
|
db.floating_ip_fixed_ip_associate(self.ctxt,
|
|
float_ip.address,
|
|
fixed_addr, 'host')
|
|
|
|
for float_ip, fixed_addr in zip(float_ips, fixed_addrs):
|
|
fixed = db.floating_ip_disassociate(self.ctxt, float_ip.address)
|
|
self.assertEqual(fixed.address, fixed_addr)
|
|
updated_float_ip = db.floating_ip_get(self.ctxt, float_ip.id)
|
|
self.assertTrue(updated_float_ip.fixed_ip_id is None)
|
|
self.assertTrue(updated_float_ip.host is None)
|
|
|
|
def test_floating_ip_disassociate_not_found(self):
|
|
self.assertRaises(exception.FloatingIpNotFoundForAddress,
|
|
db.floating_ip_disassociate, self.ctxt, 'non exist')
|
|
|
|
def test_floating_ip_set_auto_assigned(self):
|
|
addresses = ['1.1.1.1', '1.1.1.2', '1.1.1.3']
|
|
float_ips = [self._create_floating_ip({'address': addr,
|
|
'auto_assigned': False})
|
|
for addr in addresses]
|
|
|
|
for i in xrange(2):
|
|
db.floating_ip_set_auto_assigned(self.ctxt, float_ips[i].address)
|
|
for i in xrange(2):
|
|
float_ip = db.floating_ip_get(self.ctxt, float_ips[i].id)
|
|
self.assertTrue(float_ip.auto_assigned)
|
|
|
|
float_ip = db.floating_ip_get(self.ctxt, float_ips[2].id)
|
|
self.assertFalse(float_ip.auto_assigned)
|
|
|
|
def test_floating_ip_get_all(self):
|
|
addresses = ['1.1.1.1', '1.1.1.2', '1.1.1.3']
|
|
float_ips = [self._create_floating_ip({'address': addr})
|
|
for addr in addresses]
|
|
self._assertEqualListsOfObjects(float_ips,
|
|
db.floating_ip_get_all(self.ctxt))
|
|
|
|
def test_floating_ip_get_all_not_found(self):
|
|
self.assertRaises(exception.NoFloatingIpsDefined,
|
|
db.floating_ip_get_all, self.ctxt)
|
|
|
|
def test_floating_ip_get_all_by_host(self):
|
|
hosts = {
|
|
'host1': ['1.1.1.1', '1.1.1.2'],
|
|
'host2': ['2.1.1.1', '2.1.1.2'],
|
|
'host3': ['3.1.1.1', '3.1.1.2', '3.1.1.3']
|
|
}
|
|
|
|
hosts_with_float_ips = {}
|
|
for host, addresses in hosts.iteritems():
|
|
hosts_with_float_ips[host] = []
|
|
for address in addresses:
|
|
float_ip = self._create_floating_ip({'host': host,
|
|
'address': address})
|
|
hosts_with_float_ips[host].append(float_ip)
|
|
|
|
for host, float_ips in hosts_with_float_ips.iteritems():
|
|
real_float_ips = db.floating_ip_get_all_by_host(self.ctxt, host)
|
|
self._assertEqualListsOfObjects(float_ips, real_float_ips)
|
|
|
|
def test_floating_ip_get_all_by_host_not_found(self):
|
|
self.assertRaises(exception.FloatingIpNotFoundForHost,
|
|
db.floating_ip_get_all_by_host,
|
|
self.ctxt, 'non_exists_host')
|
|
|
|
def test_floating_ip_get_all_by_project(self):
|
|
projects = {
|
|
'pr1': ['1.1.1.1', '1.1.1.2'],
|
|
'pr2': ['2.1.1.1', '2.1.1.2'],
|
|
'pr3': ['3.1.1.1', '3.1.1.2', '3.1.1.3']
|
|
}
|
|
|
|
projects_with_float_ips = {}
|
|
for project_id, addresses in projects.iteritems():
|
|
projects_with_float_ips[project_id] = []
|
|
for address in addresses:
|
|
float_ip = self._create_floating_ip({'project_id': project_id,
|
|
'address': address})
|
|
projects_with_float_ips[project_id].append(float_ip)
|
|
|
|
for project_id, float_ips in projects_with_float_ips.iteritems():
|
|
real_float_ips = db.floating_ip_get_all_by_project(self.ctxt,
|
|
project_id)
|
|
self._assertEqualListsOfObjects(float_ips, real_float_ips,
|
|
ignored_keys='fixed_ip')
|
|
|
|
def test_floating_ip_get_all_by_project_not_authorized(self):
|
|
ctxt = context.RequestContext(user_id='a', project_id='abc',
|
|
is_admin=False)
|
|
self.assertRaises(exception.NotAuthorized,
|
|
db.floating_ip_get_all_by_project,
|
|
ctxt, 'other_project')
|
|
|
|
def test_floating_ip_get_by_address(self):
|
|
addresses = ['1.1.1.1', '1.1.1.2', '1.1.1.3']
|
|
float_ips = [self._create_floating_ip({'address': addr})
|
|
for addr in addresses]
|
|
|
|
for float_ip in float_ips:
|
|
real_float_ip = db.floating_ip_get_by_address(self.ctxt,
|
|
float_ip.address)
|
|
self._assertEqualObjects(float_ip, real_float_ip,
|
|
ignored_keys='fixed_ip')
|
|
|
|
def test_floating_ip_get_by_address_not_found(self):
|
|
self.assertRaises(exception.FloatingIpNotFoundForAddress,
|
|
db.floating_ip_get_by_address,
|
|
self.ctxt, 'non_exists_host')
|
|
|
|
def test_floating_ip_get_by_fixed_address(self):
|
|
fixed_float = [
|
|
('1.1.1.1', '2.2.2.1'),
|
|
('1.1.1.2', '2.2.2.2'),
|
|
('1.1.1.3', '2.2.2.3')
|
|
]
|
|
|
|
for fixed_addr, float_addr in fixed_float:
|
|
self._create_floating_ip({'address': float_addr})
|
|
self._create_fixed_ip({'address': fixed_addr})
|
|
db.floating_ip_fixed_ip_associate(self.ctxt, float_addr,
|
|
fixed_addr, 'some_host')
|
|
|
|
for fixed_addr, float_addr in fixed_float:
|
|
float_ip = db.floating_ip_get_by_fixed_address(self.ctxt,
|
|
fixed_addr)
|
|
self.assertEqual(float_addr, float_ip[0]['address'])
|
|
|
|
def test_floating_ip_get_by_fixed_ip_id(self):
|
|
fixed_float = [
|
|
('1.1.1.1', '2.2.2.1'),
|
|
('1.1.1.2', '2.2.2.2'),
|
|
('1.1.1.3', '2.2.2.3')
|
|
]
|
|
|
|
for fixed_addr, float_addr in fixed_float:
|
|
self._create_floating_ip({'address': float_addr})
|
|
self._create_fixed_ip({'address': fixed_addr})
|
|
db.floating_ip_fixed_ip_associate(self.ctxt, float_addr,
|
|
fixed_addr, 'some_host')
|
|
|
|
for fixed_addr, float_addr in fixed_float:
|
|
fixed_ip = db.fixed_ip_get_by_address(self.ctxt, fixed_addr)
|
|
float_ip = db.floating_ip_get_by_fixed_ip_id(self.ctxt,
|
|
fixed_ip['id'])
|
|
self.assertEqual(float_addr, float_ip[0]['address'])
|
|
|
|
def test_floating_ip_update(self):
|
|
float_ip = self._create_floating_ip({})
|
|
|
|
values = {
|
|
'project_id': 'some_pr',
|
|
'host': 'some_host',
|
|
'auto_assigned': True,
|
|
'interface': 'some_interface',
|
|
'pool': 'some_pool'
|
|
}
|
|
db.floating_ip_update(self.ctxt, float_ip['address'], values)
|
|
updated_float_ip = db.floating_ip_get(self.ctxt, float_ip['id'])
|
|
self._assertEqualObjects(updated_float_ip, values,
|
|
ignored_keys=['id', 'address', 'updated_at',
|
|
'deleted_at', 'created_at',
|
|
'deleted', 'fixed_ip_id',
|
|
'fixed_ip'])
|
|
|
|
def test_floating_ip_update_to_duplicate(self):
|
|
float_ip1 = self._create_floating_ip({'address': '1.1.1.1'})
|
|
float_ip2 = self._create_floating_ip({'address': '1.1.1.2'})
|
|
|
|
self.assertRaises(exception.FloatingIpExists,
|
|
db.floating_ip_update,
|
|
self.ctxt, float_ip2['address'],
|
|
{'address': float_ip1['address']})
|
|
|
|
|
|
class InstanceDestroyConstraints(test.TestCase):
|
|
|
|
def test_destroy_with_equal_any_constraint_met(self):
|
|
ctx = context.get_admin_context()
|
|
instance = db.instance_create(ctx, {'task_state': 'deleting'})
|
|
constraint = db.constraint(task_state=db.equal_any('deleting'))
|
|
db.instance_destroy(ctx, instance['uuid'], constraint)
|
|
self.assertRaises(exception.InstanceNotFound, db.instance_get_by_uuid,
|
|
ctx, instance['uuid'])
|
|
|
|
def test_destroy_with_equal_any_constraint_not_met(self):
|
|
ctx = context.get_admin_context()
|
|
instance = db.instance_create(ctx, {'vm_state': 'resize'})
|
|
constraint = db.constraint(vm_state=db.equal_any('active', 'error'))
|
|
self.assertRaises(exception.ConstraintNotMet, db.instance_destroy,
|
|
ctx, instance['uuid'], constraint)
|
|
instance = db.instance_get_by_uuid(ctx, instance['uuid'])
|
|
self.assertFalse(instance['deleted'])
|
|
|
|
def test_destroy_with_not_equal_constraint_met(self):
|
|
ctx = context.get_admin_context()
|
|
instance = db.instance_create(ctx, {'task_state': 'deleting'})
|
|
constraint = db.constraint(task_state=db.not_equal('error', 'resize'))
|
|
db.instance_destroy(ctx, instance['uuid'], constraint)
|
|
self.assertRaises(exception.InstanceNotFound, db.instance_get_by_uuid,
|
|
ctx, instance['uuid'])
|
|
|
|
def test_destroy_with_not_equal_constraint_not_met(self):
|
|
ctx = context.get_admin_context()
|
|
instance = db.instance_create(ctx, {'vm_state': 'active'})
|
|
constraint = db.constraint(vm_state=db.not_equal('active', 'error'))
|
|
self.assertRaises(exception.ConstraintNotMet, db.instance_destroy,
|
|
ctx, instance['uuid'], constraint)
|
|
instance = db.instance_get_by_uuid(ctx, instance['uuid'])
|
|
self.assertFalse(instance['deleted'])
|
|
|
|
|
|
class VolumeUsageDBApiTestCase(test.TestCase):
|
|
|
|
def setUp(self):
|
|
super(VolumeUsageDBApiTestCase, self).setUp()
|
|
self.user_id = 'fake'
|
|
self.project_id = 'fake'
|
|
self.context = context.RequestContext(self.user_id, self.project_id)
|
|
|
|
self.useFixture(test.TimeOverride())
|
|
|
|
def test_vol_usage_update_no_totals_update(self):
|
|
ctxt = context.get_admin_context()
|
|
now = timeutils.utcnow()
|
|
start_time = now - datetime.timedelta(seconds=10)
|
|
refreshed_time = now - datetime.timedelta(seconds=5)
|
|
|
|
expected_vol_usages = [{'volume_id': u'1',
|
|
'instance_uuid': 'fake-instance-uuid1',
|
|
'project_id': 'fake-project-uuid1',
|
|
'user_id': 'fake-user-uuid1',
|
|
'curr_reads': 1000,
|
|
'curr_read_bytes': 2000,
|
|
'curr_writes': 3000,
|
|
'curr_write_bytes': 4000,
|
|
'tot_reads': 0,
|
|
'tot_read_bytes': 0,
|
|
'tot_writes': 0,
|
|
'tot_write_bytes': 0},
|
|
{'volume_id': u'2',
|
|
'instance_uuid': 'fake-instance-uuid2',
|
|
'project_id': 'fake-project-uuid2',
|
|
'user_id': 'fake-user-uuid2',
|
|
'curr_reads': 100,
|
|
'curr_read_bytes': 200,
|
|
'curr_writes': 300,
|
|
'curr_write_bytes': 400,
|
|
'tot_reads': 0,
|
|
'tot_read_bytes': 0,
|
|
'tot_writes': 0,
|
|
'tot_write_bytes': 0}]
|
|
|
|
def _compare(vol_usage, expected):
|
|
for key, value in expected.items():
|
|
self.assertEqual(vol_usage[key], value)
|
|
|
|
vol_usages = db.vol_get_usage_by_time(ctxt, start_time)
|
|
self.assertEqual(len(vol_usages), 0)
|
|
|
|
vol_usage = db.vol_usage_update(ctxt, 1, rd_req=10, rd_bytes=20,
|
|
wr_req=30, wr_bytes=40,
|
|
instance_id='fake-instance-uuid1',
|
|
project_id='fake-project-uuid1',
|
|
user_id='fake-user-uuid1',
|
|
availability_zone='fake-az')
|
|
vol_usage = db.vol_usage_update(ctxt, 2, rd_req=100, rd_bytes=200,
|
|
wr_req=300, wr_bytes=400,
|
|
instance_id='fake-instance-uuid2',
|
|
project_id='fake-project-uuid2',
|
|
user_id='fake-user-uuid2',
|
|
availability_zone='fake-az')
|
|
vol_usage = db.vol_usage_update(ctxt, 1, rd_req=1000, rd_bytes=2000,
|
|
wr_req=3000, wr_bytes=4000,
|
|
instance_id='fake-instance-uuid1',
|
|
project_id='fake-project-uuid1',
|
|
user_id='fake-user-uuid1',
|
|
availability_zone='fake-az',
|
|
last_refreshed=refreshed_time)
|
|
|
|
vol_usages = db.vol_get_usage_by_time(ctxt, start_time)
|
|
self.assertEqual(len(vol_usages), 2)
|
|
_compare(vol_usages[0], expected_vol_usages[0])
|
|
_compare(vol_usages[1], expected_vol_usages[1])
|
|
|
|
def test_vol_usage_update_totals_update(self):
|
|
ctxt = context.get_admin_context()
|
|
now = timeutils.utcnow()
|
|
start_time = now - datetime.timedelta(seconds=10)
|
|
|
|
vol_usage = db.vol_usage_update(ctxt, 1, rd_req=100, rd_bytes=200,
|
|
wr_req=300, wr_bytes=400,
|
|
instance_id='fake-instance-uuid',
|
|
project_id='fake-project-uuid',
|
|
user_id='fake-user-uuid',
|
|
availability_zone='fake-az')
|
|
current_usage = db.vol_get_usage_by_time(ctxt, start_time)[0]
|
|
self.assertEqual(current_usage['tot_reads'], 0)
|
|
self.assertEqual(current_usage['curr_reads'], 100)
|
|
|
|
vol_usage = db.vol_usage_update(ctxt, 1, rd_req=200, rd_bytes=300,
|
|
wr_req=400, wr_bytes=500,
|
|
instance_id='fake-instance-uuid',
|
|
project_id='fake-project-uuid',
|
|
user_id='fake-user-uuid',
|
|
availability_zone='fake-az',
|
|
update_totals=True)
|
|
current_usage = db.vol_get_usage_by_time(ctxt, start_time)[0]
|
|
self.assertEqual(current_usage['tot_reads'], 200)
|
|
self.assertEqual(current_usage['curr_reads'], 0)
|
|
|
|
vol_usage = db.vol_usage_update(ctxt, 1, rd_req=300, rd_bytes=400,
|
|
wr_req=500, wr_bytes=600,
|
|
instance_id='fake-instance-uuid',
|
|
project_id='fake-project-uuid',
|
|
availability_zone='fake-az',
|
|
user_id='fake-user-uuid')
|
|
current_usage = db.vol_get_usage_by_time(ctxt, start_time)[0]
|
|
self.assertEqual(current_usage['tot_reads'], 200)
|
|
self.assertEqual(current_usage['curr_reads'], 300)
|
|
|
|
vol_usage = db.vol_usage_update(ctxt, 1, rd_req=400, rd_bytes=500,
|
|
wr_req=600, wr_bytes=700,
|
|
instance_id='fake-instance-uuid',
|
|
project_id='fake-project-uuid',
|
|
user_id='fake-user-uuid',
|
|
availability_zone='fake-az',
|
|
update_totals=True)
|
|
|
|
vol_usages = db.vol_get_usage_by_time(ctxt, start_time)
|
|
|
|
expected_vol_usages = {'volume_id': u'1',
|
|
'project_id': 'fake-project-uuid',
|
|
'user_id': 'fake-user-uuid',
|
|
'instance_uuid': 'fake-instance-uuid',
|
|
'availability_zone': 'fake-az',
|
|
'tot_reads': 600,
|
|
'tot_read_bytes': 800,
|
|
'tot_writes': 1000,
|
|
'tot_write_bytes': 1200,
|
|
'curr_reads': 0,
|
|
'curr_read_bytes': 0,
|
|
'curr_writes': 0,
|
|
'curr_write_bytes': 0}
|
|
|
|
self.assertEquals(1, len(vol_usages))
|
|
for key, value in expected_vol_usages.items():
|
|
self.assertEqual(vol_usages[0][key], value)
|
|
|
|
def test_vol_usage_update_when_blockdevicestats_reset(self):
|
|
ctxt = context.get_admin_context()
|
|
now = timeutils.utcnow()
|
|
start_time = now - datetime.timedelta(seconds=10)
|
|
|
|
vol_usages = db.vol_get_usage_by_time(ctxt, start_time)
|
|
self.assertEqual(len(vol_usages), 0)
|
|
|
|
db.vol_usage_update(ctxt, 1,
|
|
rd_req=10000, rd_bytes=20000,
|
|
wr_req=30000, wr_bytes=40000,
|
|
instance_id='fake-instance-uuid1',
|
|
project_id='fake-project-uuid1',
|
|
availability_zone='fake-az',
|
|
user_id='fake-user-uuid1')
|
|
|
|
# Instance rebooted or crashed. block device stats were reset and are
|
|
# less then the previous values
|
|
db.vol_usage_update(ctxt, 1,
|
|
rd_req=100, rd_bytes=200,
|
|
wr_req=300, wr_bytes=400,
|
|
instance_id='fake-instance-uuid1',
|
|
project_id='fake-project-uuid1',
|
|
availability_zone='fake-az',
|
|
user_id='fake-user-uuid1')
|
|
|
|
db.vol_usage_update(ctxt, 1,
|
|
rd_req=200, rd_bytes=300,
|
|
wr_req=400, wr_bytes=500,
|
|
instance_id='fake-instance-uuid1',
|
|
project_id='fake-project-uuid1',
|
|
availability_zone='fake-az',
|
|
user_id='fake-user-uuid1')
|
|
|
|
vol_usage = db.vol_get_usage_by_time(ctxt, start_time)[0]
|
|
expected_vol_usage = {'volume_id': u'1',
|
|
'instance_uuid': 'fake-instance-uuid1',
|
|
'project_id': 'fake-project-uuid1',
|
|
'availability_zone': 'fake-az',
|
|
'user_id': 'fake-user-uuid1',
|
|
'curr_reads': 200,
|
|
'curr_read_bytes': 300,
|
|
'curr_writes': 400,
|
|
'curr_write_bytes': 500,
|
|
'tot_reads': 10000,
|
|
'tot_read_bytes': 20000,
|
|
'tot_writes': 30000,
|
|
'tot_write_bytes': 40000}
|
|
for key, value in expected_vol_usage.items():
|
|
self.assertEqual(vol_usage[key], value, key)
|
|
|
|
def test_vol_usage_update_totals_update_when_blockdevicestats_reset(self):
|
|
# This is unlikely to happen, but could when a volume is detached
|
|
# right after a instance has rebooted / recovered and before
|
|
# the system polled and updated the volume usage cache table.
|
|
ctxt = context.get_admin_context()
|
|
now = timeutils.utcnow()
|
|
start_time = now - datetime.timedelta(seconds=10)
|
|
|
|
vol_usages = db.vol_get_usage_by_time(ctxt, start_time)
|
|
self.assertEqual(len(vol_usages), 0)
|
|
|
|
db.vol_usage_update(ctxt, 1,
|
|
rd_req=10000, rd_bytes=20000,
|
|
wr_req=30000, wr_bytes=40000,
|
|
instance_id='fake-instance-uuid1',
|
|
project_id='fake-project-uuid1',
|
|
availability_zone='fake-az',
|
|
user_id='fake-user-uuid1')
|
|
|
|
# Instance rebooted or crashed. block device stats were reset and are
|
|
# less then the previous values
|
|
db.vol_usage_update(ctxt, 1,
|
|
rd_req=100, rd_bytes=200,
|
|
wr_req=300, wr_bytes=400,
|
|
instance_id='fake-instance-uuid1',
|
|
project_id='fake-project-uuid1',
|
|
availability_zone='fake-az',
|
|
user_id='fake-user-uuid1',
|
|
update_totals=True)
|
|
|
|
vol_usage = db.vol_get_usage_by_time(ctxt, start_time)[0]
|
|
expected_vol_usage = {'volume_id': u'1',
|
|
'instance_uuid': 'fake-instance-uuid1',
|
|
'project_id': 'fake-project-uuid1',
|
|
'availability_zone': 'fake-az',
|
|
'user_id': 'fake-user-uuid1',
|
|
'curr_reads': 0,
|
|
'curr_read_bytes': 0,
|
|
'curr_writes': 0,
|
|
'curr_write_bytes': 0,
|
|
'tot_reads': 10100,
|
|
'tot_read_bytes': 20200,
|
|
'tot_writes': 30300,
|
|
'tot_write_bytes': 40400}
|
|
for key, value in expected_vol_usage.items():
|
|
self.assertEqual(vol_usage[key], value, key)
|
|
|
|
|
|
class TaskLogTestCase(test.TestCase):
|
|
|
|
def setUp(self):
|
|
super(TaskLogTestCase, self).setUp()
|
|
self.context = context.get_admin_context()
|
|
now = timeutils.utcnow()
|
|
self.begin = now - datetime.timedelta(seconds=10)
|
|
self.end = now - datetime.timedelta(seconds=5)
|
|
self.task_name = 'fake-task-name'
|
|
self.host = 'fake-host'
|
|
self.message = 'Fake task message'
|
|
db.task_log_begin_task(self.context, self.task_name, self.begin,
|
|
self.end, self.host, message=self.message)
|
|
|
|
def test_task_log_get(self):
|
|
result = db.task_log_get(self.context, self.task_name, self.begin,
|
|
self.end, self.host)
|
|
self.assertEqual(result['task_name'], self.task_name)
|
|
self.assertEqual(result['period_beginning'], self.begin)
|
|
self.assertEqual(result['period_ending'], self.end)
|
|
self.assertEqual(result['host'], self.host)
|
|
self.assertEqual(result['message'], self.message)
|
|
|
|
def test_task_log_get_all(self):
|
|
result = db.task_log_get_all(self.context, self.task_name, self.begin,
|
|
self.end, host=self.host)
|
|
self.assertEqual(len(result), 1)
|
|
|
|
def test_task_log_begin_task(self):
|
|
db.task_log_begin_task(self.context, 'fake', self.begin,
|
|
self.end, self.host, message=self.message)
|
|
result = db.task_log_get(self.context, 'fake', self.begin,
|
|
self.end, self.host)
|
|
self.assertEqual(result['task_name'], 'fake')
|
|
|
|
def test_task_log_begin_task_duplicate(self):
|
|
params = (self.context, 'fake', self.begin, self.end, self.host)
|
|
db.task_log_begin_task(*params, message=self.message)
|
|
self.assertRaises(exception.TaskAlreadyRunning,
|
|
db.task_log_begin_task,
|
|
*params, message=self.message)
|
|
|
|
def test_task_log_end_task(self):
|
|
errors = 1
|
|
db.task_log_end_task(self.context, self.task_name, self.begin,
|
|
self.end, self.host, errors, message=self.message)
|
|
result = db.task_log_get(self.context, self.task_name, self.begin,
|
|
self.end, self.host)
|
|
self.assertEqual(result['errors'], 1)
|
|
|
|
|
|
class BlockDeviceMappingTestCase(test.TestCase):
|
|
def setUp(self):
|
|
super(BlockDeviceMappingTestCase, self).setUp()
|
|
self.ctxt = context.get_admin_context()
|
|
self.instance = db.instance_create(self.ctxt, {})
|
|
|
|
def _create_bdm(self, values):
|
|
values.setdefault('instance_uuid', self.instance['uuid'])
|
|
values.setdefault('device_name', 'fake_device')
|
|
db.block_device_mapping_create(self.ctxt, values)
|
|
uuid = values['instance_uuid']
|
|
|
|
bdms = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid)
|
|
|
|
for bdm in bdms:
|
|
if bdm['device_name'] == values['device_name']:
|
|
return bdm
|
|
|
|
def test_scrub_empty_str_values_no_effect(self):
|
|
values = {'volume_size': 5}
|
|
expected = copy.copy(values)
|
|
sqlalchemy_api._scrub_empty_str_values(values, ['volume_size'])
|
|
self.assertEqual(values, expected)
|
|
|
|
def test_scrub_empty_str_values_empty_string(self):
|
|
values = {'volume_size': ''}
|
|
sqlalchemy_api._scrub_empty_str_values(values, ['volume_size'])
|
|
self.assertEqual(values, {})
|
|
|
|
def test_scrub_empty_str_values_empty_unicode(self):
|
|
values = {'volume_size': u''}
|
|
sqlalchemy_api._scrub_empty_str_values(values, ['volume_size'])
|
|
self.assertEqual(values, {})
|
|
|
|
def test_block_device_mapping_create(self):
|
|
bdm = self._create_bdm({})
|
|
self.assertFalse(bdm is None)
|
|
|
|
def test_block_device_mapping_update(self):
|
|
bdm = self._create_bdm({})
|
|
db.block_device_mapping_update(self.ctxt, bdm['id'],
|
|
{'virtual_name': 'some_virt_name'})
|
|
uuid = bdm['instance_uuid']
|
|
bdm_real = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid)
|
|
self.assertEqual(bdm_real[0]['virtual_name'], 'some_virt_name')
|
|
|
|
def test_block_device_mapping_update_or_create(self):
|
|
values = {
|
|
'instance_uuid': self.instance['uuid'],
|
|
'device_name': 'fake_name',
|
|
'virtual_name': 'some_virt_name'
|
|
}
|
|
# check create
|
|
db.block_device_mapping_update_or_create(self.ctxt, values)
|
|
uuid = values['instance_uuid']
|
|
bdm_real = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid)
|
|
self.assertEqual(len(bdm_real), 1)
|
|
self.assertEqual(bdm_real[0]['device_name'], 'fake_name')
|
|
|
|
# check update
|
|
values['virtual_name'] = 'virtual_name'
|
|
db.block_device_mapping_update_or_create(self.ctxt, values)
|
|
bdm_real = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid)
|
|
self.assertEqual(len(bdm_real), 1)
|
|
bdm_real = bdm_real[0]
|
|
self.assertEqual(bdm_real['device_name'], 'fake_name')
|
|
self.assertEqual(bdm_real['virtual_name'], 'virtual_name')
|
|
|
|
def test_block_device_mapping_update_or_create_check_remove_virt(self):
|
|
uuid = self.instance['uuid']
|
|
values = {
|
|
'instance_uuid': uuid,
|
|
'virtual_name': 'ephemeral12'
|
|
}
|
|
|
|
# check that old bdm with same virtual_names are deleted on create
|
|
val1 = dict(values)
|
|
val1['device_name'] = 'device1'
|
|
db.block_device_mapping_create(self.ctxt, val1)
|
|
val2 = dict(values)
|
|
val2['device_name'] = 'device2'
|
|
db.block_device_mapping_update_or_create(self.ctxt, val2)
|
|
bdm_real = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid)
|
|
self.assertEqual(len(bdm_real), 1)
|
|
bdm_real = bdm_real[0]
|
|
self.assertEqual(bdm_real['device_name'], 'device2')
|
|
self.assertEqual(bdm_real['virtual_name'], 'ephemeral12')
|
|
|
|
# check that old bdm with same virtual_names are deleted on update
|
|
val3 = dict(values)
|
|
val3['device_name'] = 'device3'
|
|
val3['virtual_name'] = 'some_name'
|
|
db.block_device_mapping_create(self.ctxt, val3)
|
|
bdm_real = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid)
|
|
self.assertEqual(len(bdm_real), 2)
|
|
|
|
val3['virtual_name'] = 'ephemeral12'
|
|
db.block_device_mapping_update_or_create(self.ctxt, val3)
|
|
bdm_real = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid)
|
|
self.assertEqual(len(bdm_real), 1)
|
|
bdm_real = bdm_real[0]
|
|
self.assertEqual(bdm_real['device_name'], 'device3')
|
|
self.assertEqual(bdm_real['virtual_name'], 'ephemeral12')
|
|
|
|
def test_block_device_mapping_get_all_by_instance(self):
|
|
uuid1 = self.instance['uuid']
|
|
uuid2 = db.instance_create(self.ctxt, {})['uuid']
|
|
|
|
bmds_values = [{'instance_uuid': uuid1,
|
|
'virtual_name': 'virtual_name',
|
|
'device_name': 'first'},
|
|
{'instance_uuid': uuid2,
|
|
'virtual_name': 'virtual_name1',
|
|
'device_name': 'second'},
|
|
{'instance_uuid': uuid2,
|
|
'virtual_name': 'virtual_name2',
|
|
'device_name': 'third'}]
|
|
|
|
for bdm in bmds_values:
|
|
self._create_bdm(bdm)
|
|
|
|
bmd = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid1)
|
|
self.assertEqual(len(bmd), 1)
|
|
self.assertEqual(bmd[0]['virtual_name'], 'virtual_name')
|
|
self.assertEqual(bmd[0]['device_name'], 'first')
|
|
|
|
bmd = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid2)
|
|
self.assertEqual(len(bmd), 2)
|
|
|
|
def test_block_device_mapping_destroy(self):
|
|
bdm = self._create_bdm({})
|
|
db.block_device_mapping_destroy(self.ctxt, bdm['id'])
|
|
bdm = db.block_device_mapping_get_all_by_instance(self.ctxt,
|
|
bdm['instance_uuid'])
|
|
self.assertEqual(len(bdm), 0)
|
|
|
|
def test_block_device_mapping_destory_by_instance_and_volumne(self):
|
|
vol_id1 = '69f5c254-1a5b-4fff-acf7-cb369904f58f'
|
|
vol_id2 = '69f5c254-1a5b-4fff-acf7-cb369904f59f'
|
|
|
|
self._create_bdm({'device_name': 'fake1', 'volume_id': vol_id1})
|
|
self._create_bdm({'device_name': 'fake2', 'volume_id': vol_id2})
|
|
|
|
uuid = self.instance['uuid']
|
|
db.block_device_mapping_destroy_by_instance_and_volume(self.ctxt, uuid,
|
|
vol_id1)
|
|
bdms = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid)
|
|
self.assertEqual(len(bdms), 1)
|
|
self.assertEqual(bdms[0]['device_name'], 'fake2')
|
|
|
|
def test_block_device_mapping_destroy_by_instance_and_device(self):
|
|
self._create_bdm({'device_name': 'fake1'})
|
|
self._create_bdm({'device_name': 'fake2'})
|
|
|
|
uuid = self.instance['uuid']
|
|
params = (self.ctxt, uuid, 'fake1')
|
|
db.block_device_mapping_destroy_by_instance_and_device(*params)
|
|
|
|
bdms = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid)
|
|
self.assertEqual(len(bdms), 1)
|
|
self.assertEqual(bdms[0]['device_name'], 'fake2')
|
|
|
|
|
|
class VirtualInterfaceTestCase(test.TestCase, ModelsObjectComparatorMixin):
|
|
def setUp(self):
|
|
super(VirtualInterfaceTestCase, self).setUp()
|
|
self.ctxt = context.get_admin_context()
|
|
self.instance_uuid = db.instance_create(self.ctxt, {})['uuid']
|
|
values = {'host': 'localhost', 'project_id': 'project1'}
|
|
self.network = db.network_create_safe(self.ctxt, values)
|
|
|
|
def _get_base_values(self):
|
|
return {
|
|
'instance_uuid': self.instance_uuid,
|
|
'address': 'fake_address',
|
|
'network_id': self.network['id'],
|
|
'uuid': str(stdlib_uuid.uuid4())
|
|
}
|
|
|
|
def _create_virt_interface(self, values):
|
|
v = self._get_base_values()
|
|
v.update(values)
|
|
return db.virtual_interface_create(self.ctxt, v)
|
|
|
|
def test_virtual_interface_create(self):
|
|
vif = self._create_virt_interface({})
|
|
self.assertFalse(vif['id'] is None)
|
|
ignored_keys = ['id', 'deleted', 'deleted_at', 'updated_at',
|
|
'created_at', 'uuid']
|
|
self._assertEqualObjects(vif, self._get_base_values(), ignored_keys)
|
|
|
|
@test.testtools.skip("bug 1156227")
|
|
def test_virtual_interface_create_with_duplicate_address(self):
|
|
vif = self._create_virt_interface({})
|
|
# NOTE(boris-42): Due to the bug 1156227 this won't work. In havana-1
|
|
# it will be fixed.
|
|
self.assertRaises(exception.VirtualInterfaceCreateException,
|
|
self._create_virt_interface, {"uuid": vif['uuid']})
|
|
|
|
def test_virtual_interface_get(self):
|
|
vifs = [self._create_virt_interface({'address':'a'}),
|
|
self._create_virt_interface({'address':'b'})]
|
|
|
|
for vif in vifs:
|
|
real_vif = db.virtual_interface_get(self.ctxt, vif['id'])
|
|
self._assertEqualObjects(vif, real_vif)
|
|
|
|
def test_virtual_interface_get_by_address(self):
|
|
vifs = [self._create_virt_interface({'address': 'first'}),
|
|
self._create_virt_interface({'address': 'second'})]
|
|
for vif in vifs:
|
|
real_vif = db.virtual_interface_get_by_address(self.ctxt,
|
|
vif['address'])
|
|
self._assertEqualObjects(vif, real_vif)
|
|
|
|
def test_virtual_interface_get_by_uuid(self):
|
|
vifs = [self._create_virt_interface({}),
|
|
self._create_virt_interface({})]
|
|
for vif in vifs:
|
|
real_vif = db.virtual_interface_get_by_uuid(self.ctxt, vif['uuid'])
|
|
self._assertEqualObjects(vif, real_vif)
|
|
|
|
def test_virtual_interface_get_by_instance(self):
|
|
inst_uuid2 = db.instance_create(self.ctxt, {})['uuid']
|
|
vifs1 = [self._create_virt_interface({'address': 'fake1'}),
|
|
self._create_virt_interface({'address': 'fake2'})]
|
|
vifs2 = [self._create_virt_interface({'address': 'fake3',
|
|
'instance_uuid': inst_uuid2})]
|
|
vifs1_real = db.virtual_interface_get_by_instance(self.ctxt,
|
|
self.instance_uuid)
|
|
vifs2_real = db.virtual_interface_get_by_instance(self.ctxt,
|
|
inst_uuid2)
|
|
self._assertEqualListsOfObjects(vifs1, vifs1_real)
|
|
self._assertEqualListsOfObjects(vifs2, vifs2_real)
|
|
|
|
def test_virtual_interface_get_by_instance_and_network(self):
|
|
inst_uuid2 = db.instance_create(self.ctxt, {})['uuid']
|
|
values = {'host': 'localhost', 'project_id': 'project2'}
|
|
network_id = db.network_create_safe(self.ctxt, values)['id']
|
|
|
|
vifs = [self._create_virt_interface({'address': 'fake1'}),
|
|
self._create_virt_interface({'address': 'fake2',
|
|
'network_id': network_id,
|
|
'instance_uuid': inst_uuid2}),
|
|
self._create_virt_interface({'address': 'fake3',
|
|
'instance_uuid': inst_uuid2})]
|
|
for vif in vifs:
|
|
params = (self.ctxt, vif['instance_uuid'], vif['network_id'])
|
|
r_vif = db.virtual_interface_get_by_instance_and_network(*params)
|
|
self._assertEqualObjects(r_vif, vif)
|
|
|
|
def test_virtual_interface_delete_by_instance(self):
|
|
inst_uuid2 = db.instance_create(self.ctxt, {})['uuid']
|
|
|
|
values = [dict(address='fake1'), dict(address='fake2'),
|
|
dict(address='fake3', instance_uuid=inst_uuid2)]
|
|
for vals in values:
|
|
self._create_virt_interface(vals)
|
|
|
|
db.virtual_interface_delete_by_instance(self.ctxt, self.instance_uuid)
|
|
|
|
real_vifs1 = db.virtual_interface_get_by_instance(self.ctxt,
|
|
self.instance_uuid)
|
|
real_vifs2 = db.virtual_interface_get_by_instance(self.ctxt,
|
|
inst_uuid2)
|
|
self.assertEqual(len(real_vifs1), 0)
|
|
self.assertEqual(len(real_vifs2), 1)
|
|
|
|
def test_virtual_interface_get_all(self):
|
|
inst_uuid2 = db.instance_create(self.ctxt, {})['uuid']
|
|
values = [dict(address='fake1'), dict(address='fake2'),
|
|
dict(address='fake3', instance_uuid=inst_uuid2)]
|
|
|
|
vifs = [self._create_virt_interface(val) for val in values]
|
|
real_vifs = db.virtual_interface_get_all(self.ctxt)
|
|
self._assertEqualListsOfObjects(vifs, real_vifs)
|
|
|
|
|
|
class KeyPairTestCase(test.TestCase, ModelsObjectComparatorMixin):
|
|
def setUp(self):
|
|
super(KeyPairTestCase, self).setUp()
|
|
self.ctxt = context.get_admin_context()
|
|
|
|
def _create_key_pair(self, values):
|
|
return db.key_pair_create(self.ctxt, values)
|
|
|
|
def test_key_pair_create(self):
|
|
param = {
|
|
'name': 'test_1',
|
|
'user_id': 'test_user_id_1',
|
|
'public_key': 'test_public_key_1',
|
|
'fingerprint': 'test_fingerprint_1'
|
|
}
|
|
key_pair = self._create_key_pair(param)
|
|
|
|
self.assertTrue(key_pair['id'] is not None)
|
|
ignored_keys = ['deleted', 'created_at', 'updated_at',
|
|
'deleted_at', 'id']
|
|
self._assertEqualObjects(key_pair, param, ignored_keys)
|
|
|
|
def test_key_pair_create_with_duplicate_name(self):
|
|
params = {'name': 'test_name', 'user_id': 'test_user_id'}
|
|
self._create_key_pair(params)
|
|
self.assertRaises(exception.KeyPairExists, self._create_key_pair,
|
|
params)
|
|
|
|
def test_key_pair_get(self):
|
|
params = [
|
|
{'name': 'test_1', 'user_id': 'test_user_id_1'},
|
|
{'name': 'test_2', 'user_id': 'test_user_id_2'},
|
|
{'name': 'test_3', 'user_id': 'test_user_id_3'}
|
|
]
|
|
key_pairs = [self._create_key_pair(p) for p in params]
|
|
|
|
for key in key_pairs:
|
|
real_key = db.key_pair_get(self.ctxt, key['user_id'], key['name'])
|
|
self._assertEqualObjects(key, real_key)
|
|
|
|
def test_key_pair_get_no_results(self):
|
|
param = {'name': 'test_1', 'user_id': 'test_user_id_1'}
|
|
self.assertRaises(exception.KeypairNotFound, db.key_pair_get,
|
|
self.ctxt, param['user_id'], param['name'])
|
|
|
|
def test_key_pair_get_deleted(self):
|
|
param = {'name': 'test_1', 'user_id': 'test_user_id_1'}
|
|
key_pair_created = self._create_key_pair(param)
|
|
|
|
db.key_pair_destroy(self.ctxt, param['user_id'], param['name'])
|
|
self.assertRaises(exception.KeypairNotFound, db.key_pair_get,
|
|
self.ctxt, param['user_id'], param['name'])
|
|
|
|
ctxt = self.ctxt.elevated(read_deleted='yes')
|
|
key_pair_deleted = db.key_pair_get(ctxt, param['user_id'],
|
|
param['name'])
|
|
ignored_keys = ['deleted', 'created_at', 'updated_at', 'deleted_at']
|
|
self._assertEqualObjects(key_pair_deleted, key_pair_created,
|
|
ignored_keys)
|
|
self.assertEqual(key_pair_deleted['deleted'], key_pair_deleted['id'])
|
|
|
|
def test_key_pair_get_all_by_user(self):
|
|
params = [
|
|
{'name': 'test_1', 'user_id': 'test_user_id_1'},
|
|
{'name': 'test_2', 'user_id': 'test_user_id_1'},
|
|
{'name': 'test_3', 'user_id': 'test_user_id_2'}
|
|
]
|
|
key_pairs_user_1 = [self._create_key_pair(p) for p in params
|
|
if p['user_id'] == 'test_user_id_1']
|
|
key_pairs_user_2 = [self._create_key_pair(p) for p in params
|
|
if p['user_id'] == 'test_user_id_2']
|
|
|
|
real_keys_1 = db.key_pair_get_all_by_user(self.ctxt, 'test_user_id_1')
|
|
real_keys_2 = db.key_pair_get_all_by_user(self.ctxt, 'test_user_id_2')
|
|
|
|
self._assertEqualListsOfObjects(key_pairs_user_1, real_keys_1)
|
|
self._assertEqualListsOfObjects(key_pairs_user_2, real_keys_2)
|
|
|
|
def test_key_pair_count_by_user(self):
|
|
params = [
|
|
{'name': 'test_1', 'user_id': 'test_user_id_1'},
|
|
{'name': 'test_2', 'user_id': 'test_user_id_1'},
|
|
{'name': 'test_3', 'user_id': 'test_user_id_2'}
|
|
]
|
|
for p in params:
|
|
self._create_key_pair(p)
|
|
|
|
count_1 = db.key_pair_count_by_user(self.ctxt, 'test_user_id_1')
|
|
self.assertEqual(count_1, 2)
|
|
|
|
count_2 = db.key_pair_count_by_user(self.ctxt, 'test_user_id_2')
|
|
self.assertEqual(count_2, 1)
|
|
|
|
def test_key_pair_destroy(self):
|
|
param = {'name': 'test_1', 'user_id': 'test_user_id_1'}
|
|
self._create_key_pair(param)
|
|
|
|
db.key_pair_destroy(self.ctxt, param['user_id'], param['name'])
|
|
self.assertRaises(exception.KeypairNotFound, db.key_pair_get,
|
|
self.ctxt, param['user_id'], param['name'])
|
|
|
|
def test_key_pair_destroy_no_such_key(self):
|
|
param = {'name': 'test_1', 'user_id': 'test_user_id_1'}
|
|
self.assertRaises(exception.KeypairNotFound,
|
|
db.key_pair_destroy, self.ctxt,
|
|
param['user_id'], param['name'])
|
|
|
|
|
|
class ArchiveTestCase(test.TestCase):
|
|
|
|
def setUp(self):
|
|
super(ArchiveTestCase, self).setUp()
|
|
self.context = context.get_admin_context()
|
|
self.engine = get_engine()
|
|
self.conn = self.engine.connect()
|
|
self.metadata = MetaData()
|
|
self.metadata.bind = self.engine
|
|
self.table1 = Table("instance_id_mappings",
|
|
self.metadata,
|
|
autoload=True)
|
|
self.shadow_table1 = Table("shadow_instance_id_mappings",
|
|
self.metadata,
|
|
autoload=True)
|
|
self.table2 = Table("dns_domains",
|
|
self.metadata,
|
|
autoload=True)
|
|
self.shadow_table2 = Table("shadow_dns_domains",
|
|
self.metadata,
|
|
autoload=True)
|
|
self.consoles = Table("consoles",
|
|
self.metadata,
|
|
autoload=True)
|
|
self.console_pools = Table("console_pools",
|
|
self.metadata,
|
|
autoload=True)
|
|
self.shadow_consoles = Table("shadow_consoles",
|
|
self.metadata,
|
|
autoload=True)
|
|
self.shadow_console_pools = Table("shadow_console_pools",
|
|
self.metadata,
|
|
autoload=True)
|
|
self.uuidstrs = []
|
|
for unused in xrange(6):
|
|
self.uuidstrs.append(stdlib_uuid.uuid4().hex)
|
|
self.ids = []
|
|
|
|
def tearDown(self):
|
|
super(ArchiveTestCase, self).tearDown()
|
|
delete_statement1 = self.table1.delete(
|
|
self.table1.c.uuid.in_(self.uuidstrs))
|
|
self.conn.execute(delete_statement1)
|
|
delete_statement2 = self.shadow_table1.delete(
|
|
self.shadow_table1.c.uuid.in_(self.uuidstrs))
|
|
self.conn.execute(delete_statement2)
|
|
delete_statement3 = self.table2.delete(self.table2.c.domain.in_(
|
|
self.uuidstrs))
|
|
self.conn.execute(delete_statement3)
|
|
delete_statement4 = self.shadow_table2.delete(
|
|
self.shadow_table2.c.domain.in_(self.uuidstrs))
|
|
self.conn.execute(delete_statement4)
|
|
for table in [self.console_pools, self.consoles, self.shadow_consoles,
|
|
self.shadow_console_pools]:
|
|
delete_statement5 = table.delete(table.c.id.in_(self.ids))
|
|
self.conn.execute(delete_statement5)
|
|
|
|
def test_archive_deleted_rows(self):
|
|
# Add 6 rows to table
|
|
for uuidstr in self.uuidstrs:
|
|
insert_statement = self.table1.insert().values(uuid=uuidstr)
|
|
self.conn.execute(insert_statement)
|
|
# Set 4 to deleted
|
|
update_statement = self.table1.update().\
|
|
where(self.table1.c.uuid.in_(self.uuidstrs[:4]))\
|
|
.values(deleted=1)
|
|
self.conn.execute(update_statement)
|
|
query1 = select([self.table1]).where(self.table1.c.uuid.in_(
|
|
self.uuidstrs))
|
|
rows1 = self.conn.execute(query1).fetchall()
|
|
# Verify we have 6 in main
|
|
self.assertEqual(len(rows1), 6)
|
|
query2 = select([self.shadow_table1]).\
|
|
where(self.shadow_table1.c.uuid.in_(self.uuidstrs))
|
|
rows2 = self.conn.execute(query2).fetchall()
|
|
# Verify we have 0 in shadow
|
|
self.assertEqual(len(rows2), 0)
|
|
# Archive 2 rows
|
|
db.archive_deleted_rows(self.context, max_rows=2)
|
|
rows3 = self.conn.execute(query1).fetchall()
|
|
# Verify we have 4 left in main
|
|
self.assertEqual(len(rows3), 4)
|
|
rows4 = self.conn.execute(query2).fetchall()
|
|
# Verify we have 2 in shadow
|
|
self.assertEqual(len(rows4), 2)
|
|
# Archive 2 more rows
|
|
db.archive_deleted_rows(self.context, max_rows=2)
|
|
rows5 = self.conn.execute(query1).fetchall()
|
|
# Verify we have 2 left in main
|
|
self.assertEqual(len(rows5), 2)
|
|
rows6 = self.conn.execute(query2).fetchall()
|
|
# Verify we have 4 in shadow
|
|
self.assertEqual(len(rows6), 4)
|
|
# Try to archive more, but there are no deleted rows left.
|
|
db.archive_deleted_rows(self.context, max_rows=2)
|
|
rows7 = self.conn.execute(query1).fetchall()
|
|
# Verify we still have 2 left in main
|
|
self.assertEqual(len(rows7), 2)
|
|
rows8 = self.conn.execute(query2).fetchall()
|
|
# Verify we still have 4 in shadow
|
|
self.assertEqual(len(rows8), 4)
|
|
|
|
def test_archive_deleted_rows_for_table(self):
|
|
tablename = "instance_id_mappings"
|
|
# Add 6 rows to table
|
|
for uuidstr in self.uuidstrs:
|
|
insert_statement = self.table1.insert().values(uuid=uuidstr)
|
|
self.conn.execute(insert_statement)
|
|
# Set 4 to deleted
|
|
update_statement = self.table1.update().\
|
|
where(self.table1.c.uuid.in_(self.uuidstrs[:4]))\
|
|
.values(deleted=1)
|
|
self.conn.execute(update_statement)
|
|
query1 = select([self.table1]).where(self.table1.c.uuid.in_(
|
|
self.uuidstrs))
|
|
rows1 = self.conn.execute(query1).fetchall()
|
|
# Verify we have 6 in main
|
|
self.assertEqual(len(rows1), 6)
|
|
query2 = select([self.shadow_table1]).\
|
|
where(self.shadow_table1.c.uuid.in_(self.uuidstrs))
|
|
rows2 = self.conn.execute(query2).fetchall()
|
|
# Verify we have 0 in shadow
|
|
self.assertEqual(len(rows2), 0)
|
|
# Archive 2 rows
|
|
db.archive_deleted_rows_for_table(self.context, tablename, max_rows=2)
|
|
rows3 = self.conn.execute(query1).fetchall()
|
|
# Verify we have 4 left in main
|
|
self.assertEqual(len(rows3), 4)
|
|
rows4 = self.conn.execute(query2).fetchall()
|
|
# Verify we have 2 in shadow
|
|
self.assertEqual(len(rows4), 2)
|
|
# Archive 2 more rows
|
|
db.archive_deleted_rows_for_table(self.context, tablename, max_rows=2)
|
|
rows5 = self.conn.execute(query1).fetchall()
|
|
# Verify we have 2 left in main
|
|
self.assertEqual(len(rows5), 2)
|
|
rows6 = self.conn.execute(query2).fetchall()
|
|
# Verify we have 4 in shadow
|
|
self.assertEqual(len(rows6), 4)
|
|
# Try to archive more, but there are no deleted rows left.
|
|
db.archive_deleted_rows_for_table(self.context, tablename, max_rows=2)
|
|
rows7 = self.conn.execute(query1).fetchall()
|
|
# Verify we still have 2 left in main
|
|
self.assertEqual(len(rows7), 2)
|
|
rows8 = self.conn.execute(query2).fetchall()
|
|
# Verify we still have 4 in shadow
|
|
self.assertEqual(len(rows8), 4)
|
|
|
|
def test_archive_deleted_rows_no_id_column(self):
|
|
uuidstr0 = self.uuidstrs[0]
|
|
insert_statement = self.table2.insert().values(domain=uuidstr0)
|
|
self.conn.execute(insert_statement)
|
|
update_statement = self.table2.update().\
|
|
where(self.table2.c.domain == uuidstr0).\
|
|
values(deleted=1)
|
|
self.conn.execute(update_statement)
|
|
query1 = select([self.table2], self.table2.c.domain == uuidstr0)
|
|
rows1 = self.conn.execute(query1).fetchall()
|
|
self.assertEqual(len(rows1), 1)
|
|
query2 = select([self.shadow_table2],
|
|
self.shadow_table2.c.domain == uuidstr0)
|
|
rows2 = self.conn.execute(query2).fetchall()
|
|
self.assertEqual(len(rows2), 0)
|
|
db.archive_deleted_rows(self.context, max_rows=1)
|
|
rows3 = self.conn.execute(query1).fetchall()
|
|
self.assertEqual(len(rows3), 0)
|
|
rows4 = self.conn.execute(query2).fetchall()
|
|
self.assertEqual(len(rows4), 1)
|
|
|
|
def test_archive_deleted_rows_fk_constraint(self):
|
|
# consoles.pool_id depends on console_pools.id
|
|
# SQLite doesn't enforce foreign key constraints without a pragma.
|
|
dialect = self.engine.url.get_dialect()
|
|
if dialect == sqlite.dialect:
|
|
# We're seeing issues with foreign key support in SQLite 3.6.20
|
|
# SQLAlchemy doesn't support it at all with < SQLite 3.6.19
|
|
# It works fine in SQLite 3.7.
|
|
# So return early to skip this test if running SQLite < 3.7
|
|
import sqlite3
|
|
tup = sqlite3.sqlite_version_info
|
|
if tup[0] < 3 or (tup[0] == 3 and tup[1] < 7):
|
|
self.skipTest(
|
|
'sqlite version too old for reliable SQLA foreign_keys')
|
|
self.conn.execute("PRAGMA foreign_keys = ON")
|
|
insert_statement = self.console_pools.insert().values(deleted=1)
|
|
result = self.conn.execute(insert_statement)
|
|
id1 = result.inserted_primary_key[0]
|
|
self.ids.append(id1)
|
|
insert_statement = self.consoles.insert().values(deleted=1,
|
|
pool_id=id1)
|
|
result = self.conn.execute(insert_statement)
|
|
id2 = result.inserted_primary_key[0]
|
|
self.ids.append(id2)
|
|
# The first try to archive console_pools should fail, due to FK.
|
|
num = db.archive_deleted_rows_for_table(self.context, "console_pools")
|
|
self.assertEqual(num, 0)
|
|
# Then archiving consoles should work.
|
|
num = db.archive_deleted_rows_for_table(self.context, "consoles")
|
|
self.assertEqual(num, 1)
|
|
# Then archiving console_pools should work.
|
|
num = db.archive_deleted_rows_for_table(self.context, "console_pools")
|
|
self.assertEqual(num, 1)
|