diff --git a/nova/tests/test_api.py b/nova/tests/test_api.py deleted file mode 100644 index 3c3ac6c6..00000000 --- a/nova/tests/test_api.py +++ /dev/null @@ -1,615 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Unit tests for the API endpoint.""" - -import random -import StringIO - -import boto -import boto.connection -from boto.ec2 import regioninfo -from boto import exception as boto_exc -# newer versions of boto use their own wrapper on top of httplib.HTTPResponse -if hasattr(boto.connection, 'HTTPResponse'): - httplib = boto.connection -else: - import httplib -import fixtures -import webob - -from nova.api import auth -from nova.api import ec2 -from nova.api.ec2 import apirequest -from nova.api.ec2 import ec2utils -from nova import block_device -from nova import context -from nova import exception -from nova.openstack.common import timeutils -from nova import test -from nova.tests import matchers - - -class FakeHttplibSocket(object): - """a fake socket implementation for httplib.HTTPResponse, trivial.""" - def __init__(self, response_string): - self.response_string = response_string - self._buffer = StringIO.StringIO(response_string) - - def makefile(self, _mode, _other): - """Returns the socket's internal buffer.""" - return self._buffer - - -class FakeHttplibConnection(object): - """A fake httplib.HTTPConnection for boto to use - - requests made via this connection actually get translated and routed into - our WSGI app, we then wait for the response and turn it back into - the HTTPResponse that boto expects. - """ - def __init__(self, app, host, is_secure=False): - self.app = app - self.host = host - - def request(self, method, path, data, headers): - req = webob.Request.blank(path) - req.method = method - req.body = data - req.headers = headers - req.headers['Accept'] = 'text/html' - req.host = self.host - # Call the WSGI app, get the HTTP response - resp = str(req.get_response(self.app)) - # For some reason, the response doesn't have "HTTP/1.0 " prepended; I - # guess that's a function the web server usually provides. - resp = "HTTP/1.0 %s" % resp - self.sock = FakeHttplibSocket(resp) - self.http_response = httplib.HTTPResponse(self.sock) - # NOTE(vish): boto is accessing private variables for some reason - self._HTTPConnection__response = self.http_response - self.http_response.begin() - - def getresponse(self): - return self.http_response - - def getresponsebody(self): - return self.sock.response_string - - def close(self): - """Required for compatibility with boto/tornado.""" - pass - - -class XmlConversionTestCase(test.TestCase): - """Unit test api xml conversion.""" - def test_number_conversion(self): - conv = ec2utils._try_convert - self.assertEqual(conv('None'), None) - self.assertEqual(conv('True'), True) - self.assertEqual(conv('TRUE'), True) - self.assertEqual(conv('true'), True) - self.assertEqual(conv('False'), False) - self.assertEqual(conv('FALSE'), False) - self.assertEqual(conv('false'), False) - self.assertEqual(conv('0'), 0) - self.assertEqual(conv('42'), 42) - self.assertEqual(conv('3.14'), 3.14) - self.assertEqual(conv('-57.12'), -57.12) - self.assertEqual(conv('0x57'), 0x57) - self.assertEqual(conv('-0x57'), -0x57) - self.assertEqual(conv('-'), '-') - self.assertEqual(conv('-0'), 0) - self.assertEqual(conv('0.0'), 0.0) - self.assertEqual(conv('1e-8'), 0.0) - self.assertEqual(conv('-1e-8'), 0.0) - self.assertEqual(conv('0xDD8G'), '0xDD8G') - self.assertEqual(conv('0XDD8G'), '0XDD8G') - self.assertEqual(conv('-stringy'), '-stringy') - self.assertEqual(conv('stringy'), 'stringy') - self.assertEqual(conv('add'), 'add') - self.assertEqual(conv('remove'), 'remove') - self.assertEqual(conv(''), '') - - -class Ec2utilsTestCase(test.TestCase): - def test_ec2_id_to_id(self): - self.assertEqual(ec2utils.ec2_id_to_id('i-0000001e'), 30) - self.assertEqual(ec2utils.ec2_id_to_id('ami-1d'), 29) - self.assertEqual(ec2utils.ec2_id_to_id('snap-0000001c'), 28) - self.assertEqual(ec2utils.ec2_id_to_id('vol-0000001b'), 27) - - def test_bad_ec2_id(self): - self.assertRaises(exception.InvalidEc2Id, - ec2utils.ec2_id_to_id, - 'badone') - - def test_id_to_ec2_id(self): - self.assertEqual(ec2utils.id_to_ec2_id(30), 'i-0000001e') - self.assertEqual(ec2utils.id_to_ec2_id(29, 'ami-%08x'), 'ami-0000001d') - self.assertEqual(ec2utils.id_to_ec2_snap_id(28), 'snap-0000001c') - self.assertEqual(ec2utils.id_to_ec2_vol_id(27), 'vol-0000001b') - - def test_dict_from_dotted_str(self): - in_str = [('BlockDeviceMapping.1.DeviceName', '/dev/sda1'), - ('BlockDeviceMapping.1.Ebs.SnapshotId', 'snap-0000001c'), - ('BlockDeviceMapping.1.Ebs.VolumeSize', '80'), - ('BlockDeviceMapping.1.Ebs.DeleteOnTermination', 'false'), - ('BlockDeviceMapping.2.DeviceName', '/dev/sdc'), - ('BlockDeviceMapping.2.VirtualName', 'ephemeral0')] - expected_dict = { - 'block_device_mapping': { - '1': {'device_name': '/dev/sda1', - 'ebs': {'snapshot_id': 'snap-0000001c', - 'volume_size': 80, - 'delete_on_termination': False}}, - '2': {'device_name': '/dev/sdc', - 'virtual_name': 'ephemeral0'}}} - out_dict = ec2utils.dict_from_dotted_str(in_str) - - self.assertThat(out_dict, matchers.DictMatches(expected_dict)) - - def test_properties_root_defice_name(self): - mappings = [{"device": "/dev/sda1", "virtual": "root"}] - properties0 = {'mappings': mappings} - properties1 = {'root_device_name': '/dev/sdb', 'mappings': mappings} - - root_device_name = block_device.properties_root_device_name( - properties0) - self.assertEqual(root_device_name, '/dev/sda1') - - root_device_name = block_device.properties_root_device_name( - properties1) - self.assertEqual(root_device_name, '/dev/sdb') - - def test_mapping_prepend_dev(self): - mappings = [ - {'virtual': 'ami', - 'device': 'sda1'}, - {'virtual': 'root', - 'device': '/dev/sda1'}, - - {'virtual': 'swap', - 'device': 'sdb1'}, - {'virtual': 'swap', - 'device': '/dev/sdb2'}, - - {'virtual': 'ephemeral0', - 'device': 'sdc1'}, - {'virtual': 'ephemeral1', - 'device': '/dev/sdc1'}] - expected_result = [ - {'virtual': 'ami', - 'device': 'sda1'}, - {'virtual': 'root', - 'device': '/dev/sda1'}, - - {'virtual': 'swap', - 'device': '/dev/sdb1'}, - {'virtual': 'swap', - 'device': '/dev/sdb2'}, - - {'virtual': 'ephemeral0', - 'device': '/dev/sdc1'}, - {'virtual': 'ephemeral1', - 'device': '/dev/sdc1'}] - self.assertThat(block_device.mappings_prepend_dev(mappings), - matchers.DictListMatches(expected_result)) - - -class ApiEc2TestCase(test.TestCase): - """Unit test for the cloud controller on an EC2 API.""" - def setUp(self): - super(ApiEc2TestCase, self).setUp() - self.host = '127.0.0.1' - # NOTE(vish): skipping the Authorizer - roles = ['sysadmin', 'netadmin'] - ctxt = context.RequestContext('fake', 'fake', roles=roles) - self.app = auth.InjectContext(ctxt, ec2.FaultWrapper( - ec2.RequestLogging(ec2.Requestify(ec2.Authorizer(ec2.Executor() - ), 'nova.api.ec2.cloud.CloudController')))) - self.useFixture(fixtures.FakeLogger('boto')) - - def expect_http(self, host=None, is_secure=False, api_version=None): - """Returns a new EC2 connection.""" - self.ec2 = boto.connect_ec2( - aws_access_key_id='fake', - aws_secret_access_key='fake', - is_secure=False, - region=regioninfo.RegionInfo(None, 'test', self.host), - port=8773, - path='/services/Cloud') - if api_version: - self.ec2.APIVersion = api_version - - self.mox.StubOutWithMock(self.ec2, 'new_http_connection') - self.http = FakeHttplibConnection( - self.app, '%s:8773' % (self.host), False) - # pylint: disable=E1103 - if boto.Version >= '2': - self.ec2.new_http_connection(host or '%s:8773' % (self.host), - is_secure).AndReturn(self.http) - else: - self.ec2.new_http_connection(host, is_secure).AndReturn(self.http) - return self.http - - def test_return_valid_isoformat(self): - """ - Ensure that the ec2 api returns datetime in xs:dateTime - (which apparently isn't datetime.isoformat()) - NOTE(ken-pepple): https://bugs.launchpad.net/nova/+bug/721297 - """ - conv = apirequest._database_to_isoformat - # sqlite database representation with microseconds - time_to_convert = timeutils.parse_strtime("2011-02-21 20:14:10.634276", - "%Y-%m-%d %H:%M:%S.%f") - self.assertEqual(conv(time_to_convert), '2011-02-21T20:14:10.634Z') - # mysqlite database representation - time_to_convert = timeutils.parse_strtime("2011-02-21 19:56:18", - "%Y-%m-%d %H:%M:%S") - self.assertEqual(conv(time_to_convert), '2011-02-21T19:56:18.000Z') - - def test_xmlns_version_matches_request_version(self): - self.expect_http(api_version='2010-10-30') - self.mox.ReplayAll() - - # Any request should be fine - self.ec2.get_all_instances() - self.assertTrue(self.ec2.APIVersion in self.http.getresponsebody(), - 'The version in the xmlns of the response does ' - 'not match the API version given in the request.') - - def test_describe_instances(self): - """Test that, after creating a user and a project, the describe - instances call to the API works properly""" - self.expect_http() - self.mox.ReplayAll() - self.assertEqual(self.ec2.get_all_instances(), []) - - def test_terminate_invalid_instance(self): - # Attempt to terminate an invalid instance. - self.expect_http() - self.mox.ReplayAll() - self.assertRaises(boto_exc.EC2ResponseError, - self.ec2.terminate_instances, "i-00000005") - - def test_get_all_key_pairs(self): - """Test that, after creating a user and project and generating - a key pair, that the API call to list key pairs works properly""" - keyname = "".join(random.choice("sdiuisudfsdcnpaqwertasd") - for x in range(random.randint(4, 8))) - self.expect_http() - self.mox.ReplayAll() - self.ec2.create_key_pair(keyname) - rv = self.ec2.get_all_key_pairs() - results = [k for k in rv if k.name == keyname] - self.assertEquals(len(results), 1) - - def test_create_duplicate_key_pair(self): - """Test that, after successfully generating a keypair, - requesting a second keypair with the same name fails sanely""" - self.expect_http() - self.mox.ReplayAll() - self.ec2.create_key_pair('test') - - try: - self.ec2.create_key_pair('test') - except boto_exc.EC2ResponseError as e: - if e.code == 'InvalidKeyPair.Duplicate': - pass - else: - self.assertEqual('InvalidKeyPair.Duplicate', e.code) - else: - self.fail('Exception not raised.') - - def test_get_all_security_groups(self): - # Test that we can retrieve security groups. - self.expect_http() - self.mox.ReplayAll() - - rv = self.ec2.get_all_security_groups() - - self.assertEquals(len(rv), 1) - self.assertEquals(rv[0].name, 'default') - - def test_create_delete_security_group(self): - # Test that we can create a security group. - self.expect_http() - self.mox.ReplayAll() - - security_group_name = "".join(random.choice("sdiuisudfsdcnpaqwertasd") - for x in range(random.randint(4, 8))) - - self.ec2.create_security_group(security_group_name, 'test group') - - self.expect_http() - self.mox.ReplayAll() - - rv = self.ec2.get_all_security_groups() - self.assertEquals(len(rv), 2) - self.assertTrue(security_group_name in [group.name for group in rv]) - - self.expect_http() - self.mox.ReplayAll() - - self.ec2.delete_security_group(security_group_name) - - def test_group_name_valid_chars_security_group(self): - """Test that we sanely handle invalid security group names. - EC2 API Spec states we should only accept alphanumeric characters, - spaces, dashes, and underscores. Amazon implementation - accepts more characters - so, [:print:] is ok. """ - - bad_strict_ec2 = "aa \t\x01\x02\x7f" - bad_amazon_ec2 = "aa #^% -=99" - test_raise = [ - (True, bad_amazon_ec2, "test desc"), - (True, "test name", bad_amazon_ec2), - (False, bad_strict_ec2, "test desc"), - ] - for test in test_raise: - self.expect_http() - self.mox.ReplayAll() - self.flags(ec2_strict_validation=test[0]) - self.assertRaises(boto_exc.EC2ResponseError, - self.ec2.create_security_group, - test[1], - test[2]) - test_accept = [ - (False, bad_amazon_ec2, "test desc"), - (False, "test name", bad_amazon_ec2), - ] - for test in test_accept: - self.expect_http() - self.mox.ReplayAll() - self.flags(ec2_strict_validation=test[0]) - self.ec2.create_security_group(test[1], test[2]) - self.expect_http() - self.mox.ReplayAll() - self.ec2.delete_security_group(test[1]) - - def test_group_name_valid_length_security_group(self): - """Test that we sanely handle invalid security group names. - API Spec states that the length should not exceed 255 chars """ - self.expect_http() - self.mox.ReplayAll() - - # Test block group_name > 255 chars - security_group_name = "".join(random.choice("poiuytrewqasdfghjklmnbvc") - for x in range(random.randint(256, 266))) - - self.assertRaises(boto_exc.EC2ResponseError, - self.ec2.create_security_group, - security_group_name, - 'test group') - - def test_authorize_revoke_security_group_cidr(self): - """ - Test that we can add and remove CIDR based rules - to a security group - """ - self.expect_http() - self.mox.ReplayAll() - - security_group_name = "".join(random.choice("sdiuisudfsdcnpaqwertasd") - for x in range(random.randint(4, 8))) - - group = self.ec2.create_security_group(security_group_name, - 'test group') - - self.expect_http() - self.mox.ReplayAll() - group.connection = self.ec2 - - group.authorize('tcp', 80, 81, '0.0.0.0/0') - group.authorize('icmp', -1, -1, '0.0.0.0/0') - group.authorize('udp', 80, 81, '0.0.0.0/0') - group.authorize('tcp', 1, 65535, '0.0.0.0/0') - group.authorize('udp', 1, 65535, '0.0.0.0/0') - group.authorize('icmp', 1, 0, '0.0.0.0/0') - group.authorize('icmp', 0, 1, '0.0.0.0/0') - group.authorize('icmp', 0, 0, '0.0.0.0/0') - - def _assert(message, *args): - try: - group.authorize(*args) - except boto_exc.EC2ResponseError as e: - self.assertEqual(e.status, 400, 'Expected status to be 400') - self.assertIn(message, e.error_message) - else: - raise self.failureException, 'EC2ResponseError not raised' - - # Invalid CIDR address - _assert('Invalid CIDR', 'tcp', 80, 81, '0.0.0.0/0444') - # Missing ports - _assert('Not enough parameters', 'tcp', '0.0.0.0/0') - # from port cannot be greater than to port - _assert('Invalid port range', 'tcp', 100, 1, '0.0.0.0/0') - # For tcp, negative values are not allowed - _assert('Invalid port range', 'tcp', -1, 1, '0.0.0.0/0') - # For tcp, valid port range 1-65535 - _assert('Invalid port range', 'tcp', 1, 65599, '0.0.0.0/0') - # Invalid Cidr for ICMP type - _assert('Invalid CIDR', 'icmp', -1, -1, '0.0.444.0/4') - # Invalid protocol - _assert('Invalid IP protocol', 'xyz', 1, 14, '0.0.0.0/0') - # Invalid port - _assert('An unknown error has occurred', 'tcp', " ", "81", '0.0.0.0/0') - # Invalid icmp port - _assert('An unknown error has occurred', 'icmp', " ", "81", - '0.0.0.0/0') - # Invalid CIDR Address - _assert('Invalid CIDR', 'icmp', -1, -1, '0.0.0.0') - # Invalid CIDR Address - _assert('Invalid CIDR', 'icmp', -1, -1, '0.0.0.0/') - # Invalid Cidr ports - _assert('Invalid port range', 'icmp', 1, 256, '0.0.0.0/0') - - self.expect_http() - self.mox.ReplayAll() - - rv = self.ec2.get_all_security_groups() - - group = [grp for grp in rv if grp.name == security_group_name][0] - - self.assertEquals(len(group.rules), 8) - self.assertEquals(int(group.rules[0].from_port), 80) - self.assertEquals(int(group.rules[0].to_port), 81) - self.assertEquals(len(group.rules[0].grants), 1) - self.assertEquals(str(group.rules[0].grants[0]), '0.0.0.0/0') - - self.expect_http() - self.mox.ReplayAll() - group.connection = self.ec2 - - group.revoke('tcp', 80, 81, '0.0.0.0/0') - group.revoke('icmp', -1, -1, '0.0.0.0/0') - group.revoke('udp', 80, 81, '0.0.0.0/0') - group.revoke('tcp', 1, 65535, '0.0.0.0/0') - group.revoke('udp', 1, 65535, '0.0.0.0/0') - group.revoke('icmp', 1, 0, '0.0.0.0/0') - group.revoke('icmp', 0, 1, '0.0.0.0/0') - group.revoke('icmp', 0, 0, '0.0.0.0/0') - - self.expect_http() - self.mox.ReplayAll() - - self.ec2.delete_security_group(security_group_name) - - self.expect_http() - self.mox.ReplayAll() - group.connection = self.ec2 - - rv = self.ec2.get_all_security_groups() - - self.assertEqual(len(rv), 1) - self.assertEqual(rv[0].name, 'default') - - def test_authorize_revoke_security_group_cidr_v6(self): - """ - Test that we can add and remove CIDR based rules - to a security group for IPv6 - """ - self.expect_http() - self.mox.ReplayAll() - - security_group_name = "".join(random.choice("sdiuisudfsdcnpaqwertasd") - for x in range(random.randint(4, 8))) - - group = self.ec2.create_security_group(security_group_name, - 'test group') - - self.expect_http() - self.mox.ReplayAll() - group.connection = self.ec2 - - group.authorize('tcp', 80, 81, '::/0') - - self.expect_http() - self.mox.ReplayAll() - - rv = self.ec2.get_all_security_groups() - - group = [grp for grp in rv if grp.name == security_group_name][0] - self.assertEquals(len(group.rules), 1) - self.assertEquals(int(group.rules[0].from_port), 80) - self.assertEquals(int(group.rules[0].to_port), 81) - self.assertEquals(len(group.rules[0].grants), 1) - self.assertEquals(str(group.rules[0].grants[0]), '::/0') - - self.expect_http() - self.mox.ReplayAll() - group.connection = self.ec2 - - group.revoke('tcp', 80, 81, '::/0') - - self.expect_http() - self.mox.ReplayAll() - - self.ec2.delete_security_group(security_group_name) - - self.expect_http() - self.mox.ReplayAll() - group.connection = self.ec2 - - rv = self.ec2.get_all_security_groups() - - self.assertEqual(len(rv), 1) - self.assertEqual(rv[0].name, 'default') - - def test_authorize_revoke_security_group_foreign_group(self): - """ - Test that we can grant and revoke another security group access - to a security group - """ - self.expect_http() - self.mox.ReplayAll() - - rand_string = 'sdiuisudfsdcnpaqwertasd' - security_group_name = "".join(random.choice(rand_string) - for x in range(random.randint(4, 8))) - other_security_group_name = "".join(random.choice(rand_string) - for x in range(random.randint(4, 8))) - - group = self.ec2.create_security_group(security_group_name, - 'test group') - - self.expect_http() - self.mox.ReplayAll() - - other_group = self.ec2.create_security_group(other_security_group_name, - 'some other group') - - self.expect_http() - self.mox.ReplayAll() - group.connection = self.ec2 - - group.authorize(src_group=other_group) - - self.expect_http() - self.mox.ReplayAll() - - rv = self.ec2.get_all_security_groups() - - # I don't bother checkng that we actually find it here, - # because the create/delete unit test further up should - # be good enough for that. - for group in rv: - if group.name == security_group_name: - self.assertEquals(len(group.rules), 3) - self.assertEquals(len(group.rules[0].grants), 1) - self.assertEquals(str(group.rules[0].grants[0]), '%s-%s' % - (other_security_group_name, 'fake')) - - self.expect_http() - self.mox.ReplayAll() - - rv = self.ec2.get_all_security_groups() - - for group in rv: - if group.name == security_group_name: - self.expect_http() - self.mox.ReplayAll() - group.connection = self.ec2 - group.revoke(src_group=other_group) - - self.expect_http() - self.mox.ReplayAll() - - self.ec2.delete_security_group(security_group_name) - self.ec2.delete_security_group(other_security_group_name) diff --git a/nova/tests/test_db_api.py b/nova/tests/test_db_api.py deleted file mode 100644 index efe243d1..00000000 --- a/nova/tests/test_db_api.py +++ /dev/null @@ -1,4902 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# encoding=UTF8 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Unit tests for the DB API.""" - -import copy -import datetime -import types -import uuid as stdlib_uuid - -import mox -from oslo.config import cfg -from sqlalchemy.dialects import sqlite -from sqlalchemy import exc -from sqlalchemy.exc import IntegrityError -from sqlalchemy import MetaData -from sqlalchemy.orm import query -from sqlalchemy.sql.expression import select - -from nova.compute import vm_states -from nova import context -from nova import db -from nova.db.sqlalchemy import api as sqlalchemy_api -from nova.db.sqlalchemy import models -from nova.db.sqlalchemy import utils as db_utils -from nova import exception -from nova.openstack.common.db.sqlalchemy import session as db_session -from nova.openstack.common import timeutils -from nova.openstack.common import uuidutils -from nova.quota import ReservableResource -from nova import test -from nova.tests import matchers -from nova import utils - - -CONF = cfg.CONF -CONF.import_opt('reserved_host_memory_mb', 'nova.compute.resource_tracker') -CONF.import_opt('reserved_host_disk_mb', 'nova.compute.resource_tracker') - -get_engine = db_session.get_engine -get_session = db_session.get_session - - -def _quota_reserve(context, project_id): - """Create sample Quota, QuotaUsage and Reservation objects. - - There is no method db.quota_usage_create(), so we have to use - db.quota_reserve() for creating QuotaUsage objects. - - Returns reservations uuids. - - """ - def get_sync(resource, usage): - def sync(elevated, project_id, session): - return {resource: usage} - return sync - quotas = {} - resources = {} - deltas = {} - for i in range(3): - resource = 'res%d' % i - quotas[resource] = db.quota_create(context, project_id, resource, i) - resources[resource] = ReservableResource(resource, - get_sync(resource, i), 'quota_res_%d' % i) - deltas[resource] = i - return db.quota_reserve(context, resources, quotas, deltas, - datetime.datetime.utcnow(), datetime.datetime.utcnow(), - datetime.timedelta(days=1), project_id) - - -class DbTestCase(test.TestCase): - def setUp(self): - super(DbTestCase, self).setUp() - self.user_id = 'fake' - self.project_id = 'fake' - self.context = context.RequestContext(self.user_id, self.project_id) - - def create_instance_with_args(self, **kwargs): - args = {'reservation_id': 'a', 'image_ref': 1, 'host': 'host1', - 'node': 'node1', 'project_id': self.project_id, - 'vm_state': 'fake'} - if 'context' in kwargs: - ctxt = kwargs.pop('context') - args['project_id'] = ctxt.project_id - else: - ctxt = self.context - args.update(kwargs) - return db.instance_create(ctxt, args) - - def fake_metadata(self, content): - meta = {} - for i in range(0, 10): - meta["foo%i" % i] = "this is %s item %i" % (content, i) - return meta - - def create_metadata_for_instance(self, instance_uuid): - meta = self.fake_metadata('metadata') - db.instance_metadata_update(self.context, instance_uuid, meta, False) - sys_meta = self.fake_metadata('system_metadata') - db.instance_system_metadata_update(self.context, instance_uuid, - sys_meta, False) - return meta, sys_meta - - -class DbApiTestCase(DbTestCase): - def test_create_instance_unique_hostname(self): - otherprojectcontext = context.RequestContext(self.user_id, - "%s2" % self.project_id) - - self.create_instance_with_args(hostname='fake_name') - - # With scope 'global' any duplicate should fail, be it this project: - self.flags(osapi_compute_unique_server_name_scope='global') - self.assertRaises(exception.InstanceExists, - self.create_instance_with_args, - hostname='fake_name') - - # or another: - self.assertRaises(exception.InstanceExists, - self.create_instance_with_args, - context=otherprojectcontext, - hostname='fake_name') - - # With scope 'project' a duplicate in the project should fail: - self.flags(osapi_compute_unique_server_name_scope='project') - self.assertRaises(exception.InstanceExists, - self.create_instance_with_args, - hostname='fake_name') - - # With scope 'project' a duplicate in a different project should work: - self.flags(osapi_compute_unique_server_name_scope='project') - self.create_instance_with_args(context=otherprojectcontext, - hostname='fake_name') - - self.flags(osapi_compute_unique_server_name_scope=None) - - def test_instance_metadata_get_all_query(self): - self.create_instance_with_args(metadata={'foo': 'bar'}) - self.create_instance_with_args(metadata={'baz': 'quux'}) - - result = db.instance_metadata_get_all(self.context, []) - self.assertEqual(2, len(result)) - - result = db.instance_metadata_get_all(self.context, - [{'key': 'foo'}]) - self.assertEqual(1, len(result)) - - result = db.instance_metadata_get_all(self.context, - [{'value': 'quux'}]) - self.assertEqual(1, len(result)) - - result = db.instance_metadata_get_all(self.context, - [{'value': 'quux'}, - {'key': 'foo'}]) - self.assertEqual(2, len(result)) - - def test_ec2_ids_not_found_are_printable(self): - def check_exc_format(method): - try: - method(self.context, 'fake') - except exception.NotFound as exc: - self.assertTrue('fake' in unicode(exc)) - - check_exc_format(db.get_ec2_volume_id_by_uuid) - check_exc_format(db.get_volume_uuid_by_ec2_id) - check_exc_format(db.get_ec2_snapshot_id_by_uuid) - check_exc_format(db.get_snapshot_uuid_by_ec2_id) - check_exc_format(db.get_ec2_instance_id_by_uuid) - check_exc_format(db.get_instance_uuid_by_ec2_id) - - def test_instance_get_all_with_meta(self): - inst = self.create_instance_with_args() - fake_meta, fake_sys = self.create_metadata_for_instance(inst['uuid']) - result = db.instance_get_all(self.context) - for inst in result: - meta = utils.metadata_to_dict(inst['metadata']) - self.assertEqual(meta, fake_meta) - sys_meta = utils.metadata_to_dict(inst['system_metadata']) - self.assertEqual(sys_meta, fake_sys) - - def test_instance_get_all_by_filters_with_meta(self): - inst = self.create_instance_with_args() - fake_meta, fake_sys = self.create_metadata_for_instance(inst['uuid']) - result = db.instance_get_all_by_filters(self.context, {}) - for inst in result: - meta = utils.metadata_to_dict(inst['metadata']) - self.assertEqual(meta, fake_meta) - sys_meta = utils.metadata_to_dict(inst['system_metadata']) - self.assertEqual(sys_meta, fake_sys) - - def test_instance_get_all_by_filters_without_meta(self): - inst = self.create_instance_with_args() - fake_meta, fake_sys = self.create_metadata_for_instance(inst['uuid']) - result = db.instance_get_all_by_filters(self.context, {}, - columns_to_join=[]) - for inst in result: - meta = utils.metadata_to_dict(inst['metadata']) - self.assertEqual(meta, {}) - sys_meta = utils.metadata_to_dict(inst['system_metadata']) - self.assertEqual(sys_meta, {}) - - def test_instance_get_all_by_filters(self): - self.create_instance_with_args() - self.create_instance_with_args() - result = db.instance_get_all_by_filters(self.context, {}) - self.assertEqual(2, len(result)) - - def test_instance_get_all_by_filters_regex(self): - self.create_instance_with_args(display_name='test1') - self.create_instance_with_args(display_name='teeeest2') - self.create_instance_with_args(display_name='diff') - result = db.instance_get_all_by_filters(self.context, - {'display_name': 't.*st.'}) - self.assertEqual(2, len(result)) - - def test_instance_get_all_by_filters_exact_match(self): - self.create_instance_with_args(host='host1') - self.create_instance_with_args(host='host12') - result = db.instance_get_all_by_filters(self.context, - {'host': 'host1'}) - self.assertEqual(1, len(result)) - - def test_instance_get_all_by_filters_metadata(self): - self.create_instance_with_args(metadata={'foo': 'bar'}) - self.create_instance_with_args() - result = db.instance_get_all_by_filters(self.context, - {'metadata': {'foo': 'bar'}}) - self.assertEqual(1, len(result)) - - def test_instance_get_all_by_filters_unicode_value(self): - self.create_instance_with_args(display_name=u'test♥') - result = db.instance_get_all_by_filters(self.context, - {'display_name': u'test'}) - self.assertEqual(1, len(result)) - - def test_instance_get_by_uuid(self): - inst = self.create_instance_with_args() - fake_meta, fake_sys = self.create_metadata_for_instance(inst['uuid']) - result = db.instance_get_by_uuid(self.context, inst['uuid']) - meta = utils.metadata_to_dict(result['metadata']) - self.assertEqual(meta, fake_meta) - sys_meta = utils.metadata_to_dict(result['system_metadata']) - self.assertEqual(sys_meta, fake_sys) - - def test_instance_get_by_uuid_join_empty(self): - inst = self.create_instance_with_args() - fake_meta, fake_sys = self.create_metadata_for_instance(inst['uuid']) - result = db.instance_get_by_uuid(self.context, inst['uuid'], - columns_to_join=[]) - meta = utils.metadata_to_dict(result['metadata']) - self.assertEqual(meta, {}) - sys_meta = utils.metadata_to_dict(result['system_metadata']) - self.assertEqual(sys_meta, {}) - - def test_instance_get_by_uuid_join_meta(self): - inst = self.create_instance_with_args() - fake_meta, fake_sys = self.create_metadata_for_instance(inst['uuid']) - result = db.instance_get_by_uuid(self.context, inst['uuid'], - columns_to_join=['metadata']) - meta = utils.metadata_to_dict(result['metadata']) - self.assertEqual(meta, fake_meta) - sys_meta = utils.metadata_to_dict(result['system_metadata']) - self.assertEqual(sys_meta, {}) - - def test_instance_get_by_uuid_join_sys_meta(self): - inst = self.create_instance_with_args() - fake_meta, fake_sys = self.create_metadata_for_instance(inst['uuid']) - result = db.instance_get_by_uuid(self.context, inst['uuid'], - columns_to_join=['system_metadata']) - meta = utils.metadata_to_dict(result['metadata']) - self.assertEqual(meta, {}) - sys_meta = utils.metadata_to_dict(result['system_metadata']) - self.assertEqual(sys_meta, fake_sys) - - def test_instance_get_all_by_filters_deleted(self): - inst1 = self.create_instance_with_args() - inst2 = self.create_instance_with_args(reservation_id='b') - db.instance_destroy(self.context, inst1['uuid']) - result = db.instance_get_all_by_filters(self.context, {}) - self.assertEqual(2, len(result)) - self.assertIn(inst1['id'], [result[0]['id'], result[1]['id']]) - self.assertIn(inst2['id'], [result[0]['id'], result[1]['id']]) - if inst1['id'] == result[0]['id']: - self.assertTrue(result[0]['deleted']) - else: - self.assertTrue(result[1]['deleted']) - - def test_instance_get_all_by_filters_deleted_and_soft_deleted(self): - inst1 = self.create_instance_with_args() - inst2 = self.create_instance_with_args(vm_state=vm_states.SOFT_DELETED) - inst3 = self.create_instance_with_args() - db.instance_destroy(self.context, inst1['uuid']) - result = db.instance_get_all_by_filters(self.context, - {'deleted': True}) - self.assertEqual(2, len(result)) - self.assertIn(inst1['id'], [result[0]['id'], result[1]['id']]) - self.assertIn(inst2['id'], [result[0]['id'], result[1]['id']]) - - def test_instance_get_all_by_filters_deleted_no_soft_deleted(self): - inst1 = self.create_instance_with_args() - inst2 = self.create_instance_with_args(vm_state=vm_states.SOFT_DELETED) - inst3 = self.create_instance_with_args() - db.instance_destroy(self.context, inst1['uuid']) - result = db.instance_get_all_by_filters(self.context, - {'deleted': True, - 'soft_deleted': False}) - self.assertEqual(1, len(result)) - self.assertEqual(inst1['id'], result[0]['id']) - - def test_instance_get_all_by_filters_alive_and_soft_deleted(self): - inst1 = self.create_instance_with_args() - inst2 = self.create_instance_with_args(vm_state=vm_states.SOFT_DELETED) - inst3 = self.create_instance_with_args() - db.instance_destroy(self.context, inst1['uuid']) - result = db.instance_get_all_by_filters(self.context, - {'deleted': False, - 'soft_deleted': True}) - self.assertEqual(2, len(result)) - self.assertIn(inst2['id'], [result[0]['id'], result[1]['id']]) - self.assertIn(inst3['id'], [result[0]['id'], result[1]['id']]) - - def test_instance_get_all_by_host_and_node_no_join(self): - # Test that system metadata is not joined. - sys_meta = {'foo': 'bar'} - expected = self.create_instance_with_args(system_metadata=sys_meta) - - elevated = self.context.elevated() - instances = db.instance_get_all_by_host_and_node(elevated, 'host1', - 'node1') - self.assertEqual(1, len(instances)) - instance = instances[0] - self.assertEqual(expected['uuid'], instance['uuid']) - sysmeta = dict(instance)['system_metadata'] - self.assertEqual(len(sysmeta), 0) - - def test_migration_get_unconfirmed_by_dest_compute(self): - ctxt = context.get_admin_context() - - # Ensure no migrations are returned. - results = db.migration_get_unconfirmed_by_dest_compute(ctxt, 10, - 'fake_host') - self.assertEqual(0, len(results)) - - # Ensure no migrations are returned. - results = db.migration_get_unconfirmed_by_dest_compute(ctxt, 10, - 'fake_host2') - self.assertEqual(0, len(results)) - - updated_at = datetime.datetime(2000, 01, 01, 12, 00, 00) - values = {"status": "finished", "updated_at": updated_at, - "dest_compute": "fake_host2"} - migration = db.migration_create(ctxt, values) - - # Ensure different host is not returned - results = db.migration_get_unconfirmed_by_dest_compute(ctxt, 10, - 'fake_host') - self.assertEqual(0, len(results)) - - # Ensure one migration older than 10 seconds is returned. - results = db.migration_get_unconfirmed_by_dest_compute(ctxt, 10, - 'fake_host2') - self.assertEqual(1, len(results)) - db.migration_update(ctxt, migration['id'], {"status": "CONFIRMED"}) - - # Ensure the new migration is not returned. - updated_at = timeutils.utcnow() - values = {"status": "finished", "updated_at": updated_at, - "dest_compute": "fake_host2"} - migration = db.migration_create(ctxt, values) - results = db.migration_get_unconfirmed_by_dest_compute(ctxt, 10, - "fake_host2") - self.assertEqual(0, len(results)) - db.migration_update(ctxt, migration['id'], {"status": "CONFIRMED"}) - - def test_instance_get_all_hung_in_rebooting(self): - ctxt = context.get_admin_context() - - # Ensure no instances are returned. - results = db.instance_get_all_hung_in_rebooting(ctxt, 10) - self.assertEqual(0, len(results)) - - # Ensure one rebooting instance with updated_at older than 10 seconds - # is returned. - updated_at = datetime.datetime(2000, 01, 01, 12, 00, 00) - values = {"task_state": "rebooting", "updated_at": updated_at} - instance = db.instance_create(ctxt, values) - results = db.instance_get_all_hung_in_rebooting(ctxt, 10) - self.assertEqual(1, len(results)) - db.instance_update(ctxt, instance['uuid'], {"task_state": None}) - - # Ensure the newly rebooted instance is not returned. - updated_at = timeutils.utcnow() - values = {"task_state": "rebooting", "updated_at": updated_at} - instance = db.instance_create(ctxt, values) - results = db.instance_get_all_hung_in_rebooting(ctxt, 10) - self.assertEqual(0, len(results)) - db.instance_update(ctxt, instance['uuid'], {"task_state": None}) - - def test_instance_update_with_expected_vm_state(self): - ctxt = context.get_admin_context() - uuid = uuidutils.generate_uuid() - updates = {'expected_vm_state': 'meow', - 'moo': 'cow'} - - class FakeInstance(dict): - def save(self, session=None): - pass - - fake_instance_values = {'vm_state': 'meow', - 'hostname': '', - 'metadata': None, - 'system_metadata': None} - fake_instance = FakeInstance(fake_instance_values) - - self.mox.StubOutWithMock(sqlalchemy_api, '_instance_get_by_uuid') - self.mox.StubOutWithMock(fake_instance, 'save') - - sqlalchemy_api._instance_get_by_uuid(ctxt, uuid, - session=mox.IgnoreArg()).AndReturn(fake_instance) - fake_instance.save(session=mox.IgnoreArg()) - - self.mox.ReplayAll() - - result = db.instance_update(ctxt, uuid, updates) - expected_instance = dict(fake_instance_values) - expected_instance['moo'] = 'cow' - self.assertEqual(expected_instance, result) - - def test_instance_update_with_unexpected_vm_state(self): - ctxt = context.get_admin_context() - uuid = uuidutils.generate_uuid() - updates = {'expected_vm_state': 'meow'} - fake_instance = {'vm_state': 'nomatch'} - - self.mox.StubOutWithMock(sqlalchemy_api, '_instance_get_by_uuid') - - sqlalchemy_api._instance_get_by_uuid(ctxt, uuid, - session=mox.IgnoreArg()).AndReturn(fake_instance) - - self.mox.ReplayAll() - - self.assertRaises(exception.UnexpectedVMStateError, - db.instance_update, ctxt, uuid, updates) - - def test_network_create_safe(self): - ctxt = context.get_admin_context() - values = {'host': 'localhost', 'project_id': 'project1'} - network = db.network_create_safe(ctxt, values) - self.assertNotEqual(None, network['uuid']) - self.assertEqual(36, len(network['uuid'])) - db_network = db.network_get(ctxt, network['id']) - self.assertEqual(network['uuid'], db_network['uuid']) - - def test_network_delete_safe(self): - ctxt = context.get_admin_context() - values = {'host': 'localhost', 'project_id': 'project1'} - network = db.network_create_safe(ctxt, values) - db_network = db.network_get(ctxt, network['id']) - values = {'network_id': network['id'], 'address': 'fake1'} - address1 = db.fixed_ip_create(ctxt, values)['address'] - values = {'network_id': network['id'], - 'address': 'fake2', - 'allocated': True} - address2 = db.fixed_ip_create(ctxt, values)['address'] - self.assertRaises(exception.NetworkInUse, - db.network_delete_safe, ctxt, network['id']) - db.fixed_ip_update(ctxt, address2, {'allocated': False}) - network = db.network_delete_safe(ctxt, network['id']) - self.assertRaises(exception.FixedIpNotFoundForAddress, - db.fixed_ip_get_by_address, ctxt, address1) - ctxt = ctxt.elevated(read_deleted='yes') - fixed_ip = db.fixed_ip_get_by_address(ctxt, address1) - self.assertTrue(fixed_ip['deleted']) - - def test_network_create_with_duplicate_vlan(self): - ctxt = context.get_admin_context() - values1 = {'host': 'localhost', 'project_id': 'project1', 'vlan': 1} - values2 = {'host': 'something', 'project_id': 'project1', 'vlan': 1} - db.network_create_safe(ctxt, values1) - self.assertRaises(exception.DuplicateVlan, - db.network_create_safe, ctxt, values2) - - def test_network_update_with_duplicate_vlan(self): - ctxt = context.get_admin_context() - values1 = {'host': 'localhost', 'project_id': 'project1', 'vlan': 1} - values2 = {'host': 'something', 'project_id': 'project1', 'vlan': 2} - network_ref = db.network_create_safe(ctxt, values1) - db.network_create_safe(ctxt, values2) - self.assertRaises(exception.DuplicateVlan, - db.network_update, - ctxt, network_ref["id"], values2) - - def test_instance_update_with_instance_uuid(self): - # test instance_update() works when an instance UUID is passed. - ctxt = context.get_admin_context() - - # Create an instance with some metadata - values = {'metadata': {'host': 'foo', 'key1': 'meow'}, - 'system_metadata': {'original_image_ref': 'blah'}} - instance = db.instance_create(ctxt, values) - - # Update the metadata - values = {'metadata': {'host': 'bar', 'key2': 'wuff'}, - 'system_metadata': {'original_image_ref': 'baz'}} - db.instance_update(ctxt, instance['uuid'], values) - - # Retrieve the user-provided metadata to ensure it was successfully - # updated - instance_meta = db.instance_metadata_get(ctxt, instance['uuid']) - self.assertEqual('bar', instance_meta['host']) - self.assertEqual('wuff', instance_meta['key2']) - self.assertNotIn('key1', instance_meta) - - # Retrieve the system metadata to ensure it was successfully updated - system_meta = db.instance_system_metadata_get(ctxt, instance['uuid']) - self.assertEqual('baz', system_meta['original_image_ref']) - - def test_delete_instance_metadata_on_instance_destroy(self): - ctxt = context.get_admin_context() - - # Create an instance with some metadata - values = {'metadata': {'host': 'foo', 'key1': 'meow'}, - 'system_metadata': {'original_image_ref': 'blah'}} - instance = db.instance_create(ctxt, values) - instance_meta = db.instance_metadata_get(ctxt, instance['uuid']) - self.assertEqual('foo', instance_meta['host']) - self.assertEqual('meow', instance_meta['key1']) - db.instance_destroy(ctxt, instance['uuid']) - instance_meta = db.instance_metadata_get(ctxt, instance['uuid']) - # Make sure instance metadata is deleted as well - self.assertEqual({}, instance_meta) - - def test_instance_update_unique_name(self): - otherprojectcontext = context.RequestContext(self.user_id, - "%s2" % self.project_id) - - inst = self.create_instance_with_args(hostname='fake_name') - uuid1p1 = inst['uuid'] - inst = self.create_instance_with_args(hostname='fake_name2') - uuid2p1 = inst['uuid'] - - inst = self.create_instance_with_args(context=otherprojectcontext, - hostname='fake_name3') - uuid1p2 = inst['uuid'] - - # osapi_compute_unique_server_name_scope is unset so this should work: - values = {'hostname': 'fake_name2'} - db.instance_update(self.context, uuid1p1, values) - values = {'hostname': 'fake_name'} - db.instance_update(self.context, uuid1p1, values) - - # With scope 'global' any duplicate should fail. - self.flags(osapi_compute_unique_server_name_scope='global') - self.assertRaises(exception.InstanceExists, - db.instance_update, - self.context, - uuid2p1, - values) - - self.assertRaises(exception.InstanceExists, - db.instance_update, - otherprojectcontext, - uuid1p2, - values) - - # But we should definitely be able to update our name if we aren't - # really changing it. - case_only_values = {'hostname': 'fake_NAME'} - db.instance_update(self.context, uuid1p1, case_only_values) - - # With scope 'project' a duplicate in the project should fail: - self.flags(osapi_compute_unique_server_name_scope='project') - self.assertRaises(exception.InstanceExists, - db.instance_update, - self.context, - uuid2p1, - values) - - # With scope 'project' a duplicate in a different project should work: - self.flags(osapi_compute_unique_server_name_scope='project') - db.instance_update(otherprojectcontext, uuid1p2, values) - - def test_instance_update_with_and_get_original(self): - ctxt = context.get_admin_context() - - # Create an instance with some metadata - values = {'vm_state': 'building'} - instance = db.instance_create(ctxt, values) - - (old_ref, new_ref) = db.instance_update_and_get_original(ctxt, - instance['uuid'], {'vm_state': 'needscoffee'}) - self.assertEquals("building", old_ref["vm_state"]) - self.assertEquals("needscoffee", new_ref["vm_state"]) - - def _test_instance_update_updates_metadata(self, metadata_type): - ctxt = context.get_admin_context() - - instance = db.instance_create(ctxt, {}) - - def set_and_check(meta): - inst = db.instance_update(ctxt, instance['uuid'], - {metadata_type: dict(meta)}) - _meta = utils.metadata_to_dict(inst[metadata_type]) - self.assertEqual(meta, _meta) - - meta = {'speed': '88', 'units': 'MPH'} - set_and_check(meta) - - meta['gigawatts'] = '1.21' - set_and_check(meta) - - del meta['gigawatts'] - set_and_check(meta) - - def test_instance_update_updates_system_metadata(self): - # Ensure that system_metadata is updated during instance_update - self._test_instance_update_updates_metadata('system_metadata') - - def test_instance_update_updates_metadata(self): - # Ensure that metadata is updated during instance_update - self._test_instance_update_updates_metadata('metadata') - - def test_instance_fault_create(self): - # Ensure we can create an instance fault. - ctxt = context.get_admin_context() - uuid = str(stdlib_uuid.uuid4()) - - # Create a fault - fault_values = { - 'message': 'message', - 'details': 'detail', - 'instance_uuid': uuid, - 'code': 404, - } - db.instance_fault_create(ctxt, fault_values) - - # Retrieve the fault to ensure it was successfully added - faults = db.instance_fault_get_by_instance_uuids(ctxt, [uuid]) - self.assertEqual(404, faults[uuid][0]['code']) - - def test_instance_fault_get_by_instance(self): - # ensure we can retrieve an instance fault by instance UUID. - ctxt = context.get_admin_context() - instance1 = db.instance_create(ctxt, {}) - instance2 = db.instance_create(ctxt, {}) - uuids = [instance1['uuid'], instance2['uuid']] - - # Create faults - fault_values = { - 'message': 'message', - 'details': 'detail', - 'instance_uuid': uuids[0], - 'code': 404, - } - fault1 = db.instance_fault_create(ctxt, fault_values) - - fault_values = { - 'message': 'message', - 'details': 'detail', - 'instance_uuid': uuids[0], - 'code': 500, - } - fault2 = db.instance_fault_create(ctxt, fault_values) - - fault_values = { - 'message': 'message', - 'details': 'detail', - 'instance_uuid': uuids[1], - 'code': 404, - } - fault3 = db.instance_fault_create(ctxt, fault_values) - - fault_values = { - 'message': 'message', - 'details': 'detail', - 'instance_uuid': uuids[1], - 'code': 500, - } - fault4 = db.instance_fault_create(ctxt, fault_values) - - instance_faults = db.instance_fault_get_by_instance_uuids(ctxt, uuids) - - expected = { - uuids[0]: [fault2, fault1], - uuids[1]: [fault4, fault3], - } - - self.assertEqual(instance_faults, expected) - - def test_instance_faults_get_by_instance_uuids_no_faults(self): - # None should be returned when no faults exist. - ctxt = context.get_admin_context() - instance1 = db.instance_create(ctxt, {}) - instance2 = db.instance_create(ctxt, {}) - uuids = [instance1['uuid'], instance2['uuid']] - instance_faults = db.instance_fault_get_by_instance_uuids(ctxt, uuids) - expected = {uuids[0]: [], uuids[1]: []} - self.assertEqual(expected, instance_faults) - - def test_instance_action_start(self): - """Create an instance action.""" - ctxt = context.get_admin_context() - uuid = str(stdlib_uuid.uuid4()) - - start_time = timeutils.utcnow() - action_values = {'action': 'run_instance', - 'instance_uuid': uuid, - 'request_id': ctxt.request_id, - 'user_id': ctxt.user_id, - 'project_id': ctxt.project_id, - 'start_time': start_time} - db.action_start(ctxt, action_values) - - # Retrieve the action to ensure it was successfully added - actions = db.actions_get(ctxt, uuid) - self.assertEqual(1, len(actions)) - self.assertEqual('run_instance', actions[0]['action']) - self.assertEqual(start_time, actions[0]['start_time']) - self.assertEqual(ctxt.request_id, actions[0]['request_id']) - self.assertEqual(ctxt.user_id, actions[0]['user_id']) - self.assertEqual(ctxt.project_id, actions[0]['project_id']) - - def test_instance_action_finish(self): - """Create an instance action.""" - ctxt = context.get_admin_context() - uuid = str(stdlib_uuid.uuid4()) - - start_time = timeutils.utcnow() - action_start_values = {'action': 'run_instance', - 'instance_uuid': uuid, - 'request_id': ctxt.request_id, - 'user_id': ctxt.user_id, - 'project_id': ctxt.project_id, - 'start_time': start_time} - db.action_start(ctxt, action_start_values) - - finish_time = timeutils.utcnow() + datetime.timedelta(seconds=5) - action_finish_values = {'instance_uuid': uuid, - 'request_id': ctxt.request_id, - 'finish_time': finish_time} - db.action_finish(ctxt, action_finish_values) - - # Retrieve the action to ensure it was successfully added - actions = db.actions_get(ctxt, uuid) - self.assertEqual(1, len(actions)) - self.assertEqual('run_instance', actions[0]['action']) - self.assertEqual(start_time, actions[0]['start_time']) - self.assertEqual(finish_time, actions[0]['finish_time']) - self.assertEqual(ctxt.request_id, actions[0]['request_id']) - self.assertEqual(ctxt.user_id, actions[0]['user_id']) - self.assertEqual(ctxt.project_id, actions[0]['project_id']) - - def test_instance_actions_get_by_instance(self): - """Ensure we can get actions by UUID.""" - ctxt1 = context.get_admin_context() - ctxt2 = context.get_admin_context() - uuid1 = str(stdlib_uuid.uuid4()) - uuid2 = str(stdlib_uuid.uuid4()) - - action_values = {'action': 'run_instance', - 'instance_uuid': uuid1, - 'request_id': ctxt1.request_id, - 'user_id': ctxt1.user_id, - 'project_id': ctxt1.project_id, - 'start_time': timeutils.utcnow()} - db.action_start(ctxt1, action_values) - action_values['action'] = 'resize' - db.action_start(ctxt1, action_values) - - action_values = {'action': 'reboot', - 'instance_uuid': uuid2, - 'request_id': ctxt2.request_id, - 'user_id': ctxt2.user_id, - 'project_id': ctxt2.project_id, - 'start_time': timeutils.utcnow()} - db.action_start(ctxt2, action_values) - db.action_start(ctxt2, action_values) - - # Retrieve the action to ensure it was successfully added - actions = db.actions_get(ctxt1, uuid1) - self.assertEqual(2, len(actions)) - self.assertEqual('resize', actions[0]['action']) - self.assertEqual('run_instance', actions[1]['action']) - - def test_instance_action_get_by_instance_and_action(self): - """Ensure we can get an action by instance UUID and action id.""" - ctxt1 = context.get_admin_context() - ctxt2 = context.get_admin_context() - uuid1 = str(stdlib_uuid.uuid4()) - uuid2 = str(stdlib_uuid.uuid4()) - - action_values = {'action': 'run_instance', - 'instance_uuid': uuid1, - 'request_id': ctxt1.request_id, - 'user_id': ctxt1.user_id, - 'project_id': ctxt1.project_id, - 'start_time': timeutils.utcnow()} - db.action_start(ctxt1, action_values) - action_values['action'] = 'resize' - db.action_start(ctxt1, action_values) - - action_values = {'action': 'reboot', - 'instance_uuid': uuid2, - 'request_id': ctxt2.request_id, - 'user_id': ctxt2.user_id, - 'project_id': ctxt2.project_id, - 'start_time': timeutils.utcnow()} - db.action_start(ctxt2, action_values) - db.action_start(ctxt2, action_values) - - actions = db.actions_get(ctxt1, uuid1) - request_id = actions[0]['request_id'] - action = db.action_get_by_request_id(ctxt1, uuid1, request_id) - self.assertEqual('run_instance', action['action']) - self.assertEqual(ctxt1.request_id, action['request_id']) - - def test_instance_action_event_start(self): - """Create an instance action event.""" - ctxt = context.get_admin_context() - uuid = str(stdlib_uuid.uuid4()) - - start_time = timeutils.utcnow() - action_values = {'action': 'run_instance', - 'instance_uuid': uuid, - 'request_id': ctxt.request_id, - 'user_id': ctxt.user_id, - 'project_id': ctxt.project_id, - 'start_time': start_time} - action = db.action_start(ctxt, action_values) - - event_values = {'event': 'schedule', - 'instance_uuid': uuid, - 'request_id': ctxt.request_id, - 'start_time': start_time} - db.action_event_start(ctxt, event_values) - - # Retrieve the event to ensure it was successfully added - events = db.action_events_get(ctxt, action['id']) - self.assertEqual(1, len(events)) - self.assertEqual('schedule', events[0]['event']) - self.assertEqual(start_time, events[0]['start_time']) - - def test_instance_action_event_finish_success(self): - """Finish an instance action event.""" - ctxt = context.get_admin_context() - uuid = str(stdlib_uuid.uuid4()) - - start_time = timeutils.utcnow() - action_values = {'action': 'run_instance', - 'instance_uuid': uuid, - 'request_id': ctxt.request_id, - 'user_id': ctxt.user_id, - 'project_id': ctxt.project_id, - 'start_time': start_time} - action = db.action_start(ctxt, action_values) - - event_values = {'event': 'schedule', - 'request_id': ctxt.request_id, - 'instance_uuid': uuid, - 'start_time': start_time} - db.action_event_start(ctxt, event_values) - - finish_time = timeutils.utcnow() + datetime.timedelta(seconds=5) - event_finish_values = {'event': 'schedule', - 'request_id': ctxt.request_id, - 'instance_uuid': uuid, - 'finish_time': finish_time, - 'result': 'Success'} - db.action_event_finish(ctxt, event_finish_values) - - # Retrieve the event to ensure it was successfully added - events = db.action_events_get(ctxt, action['id']) - action = db.action_get_by_request_id(ctxt, uuid, ctxt.request_id) - self.assertEqual(1, len(events)) - self.assertEqual('schedule', events[0]['event']) - self.assertEqual(start_time, events[0]['start_time']) - self.assertEqual(finish_time, events[0]['finish_time']) - self.assertNotEqual(action['message'], 'Error') - - def test_instance_action_event_finish_error(self): - """Finish an instance action event with an error.""" - ctxt = context.get_admin_context() - uuid = str(stdlib_uuid.uuid4()) - - start_time = timeutils.utcnow() - action_values = {'action': 'run_instance', - 'instance_uuid': uuid, - 'request_id': ctxt.request_id, - 'user_id': ctxt.user_id, - 'project_id': ctxt.project_id, - 'start_time': start_time} - action = db.action_start(ctxt, action_values) - - event_values = {'event': 'schedule', - 'request_id': ctxt.request_id, - 'instance_uuid': uuid, - 'start_time': start_time} - db.action_event_start(ctxt, event_values) - - finish_time = timeutils.utcnow() + datetime.timedelta(seconds=5) - event_finish_values = {'event': 'schedule', - 'request_id': ctxt.request_id, - 'instance_uuid': uuid, - 'finish_time': finish_time, - 'result': 'Error'} - db.action_event_finish(ctxt, event_finish_values) - - # Retrieve the event to ensure it was successfully added - events = db.action_events_get(ctxt, action['id']) - action = db.action_get_by_request_id(ctxt, uuid, ctxt.request_id) - self.assertEqual(1, len(events)) - self.assertEqual('schedule', events[0]['event']) - self.assertEqual(start_time, events[0]['start_time']) - self.assertEqual(finish_time, events[0]['finish_time']) - self.assertEqual(action['message'], 'Error') - - def test_instance_action_and_event_start_string_time(self): - """Create an instance action and event with a string start_time.""" - ctxt = context.get_admin_context() - uuid = str(stdlib_uuid.uuid4()) - - start_time = timeutils.utcnow() - start_time_str = timeutils.strtime(start_time) - action_values = {'action': 'run_instance', - 'instance_uuid': uuid, - 'request_id': ctxt.request_id, - 'user_id': ctxt.user_id, - 'project_id': ctxt.project_id, - 'start_time': start_time_str} - action = db.action_start(ctxt, action_values) - - event_values = {'event': 'schedule', - 'instance_uuid': uuid, - 'request_id': ctxt.request_id, - 'start_time': start_time_str} - db.action_event_start(ctxt, event_values) - - # Retrieve the event to ensure it was successfully added - events = db.action_events_get(ctxt, action['id']) - self.assertEqual(1, len(events)) - self.assertEqual('schedule', events[0]['event']) - # db api still returns models with datetime, not str, values - self.assertEqual(start_time, events[0]['start_time']) - - def test_instance_action_event_get_by_id(self): - """Get a specific instance action event.""" - ctxt1 = context.get_admin_context() - ctxt2 = context.get_admin_context() - uuid1 = str(stdlib_uuid.uuid4()) - uuid2 = str(stdlib_uuid.uuid4()) - - action_values = {'action': 'run_instance', - 'instance_uuid': uuid1, - 'request_id': ctxt1.request_id, - 'user_id': ctxt1.user_id, - 'project_id': ctxt1.project_id, - 'start_time': timeutils.utcnow()} - added_action = db.action_start(ctxt1, action_values) - - action_values = {'action': 'reboot', - 'instance_uuid': uuid2, - 'request_id': ctxt2.request_id, - 'user_id': ctxt2.user_id, - 'project_id': ctxt2.project_id, - 'start_time': timeutils.utcnow()} - db.action_start(ctxt2, action_values) - - start_time = timeutils.utcnow() - event_values = {'event': 'schedule', - 'instance_uuid': uuid1, - 'request_id': ctxt1.request_id, - 'start_time': start_time} - added_event = db.action_event_start(ctxt1, event_values) - - event_values = {'event': 'reboot', - 'instance_uuid': uuid2, - 'request_id': ctxt2.request_id, - 'start_time': timeutils.utcnow()} - db.action_event_start(ctxt2, event_values) - - # Retrieve the event to ensure it was successfully added - event = db.action_event_get_by_id(ctxt1, added_action['id'], - added_event['id']) - self.assertEqual('schedule', event['event']) - self.assertEqual(start_time, event['start_time']) - - def test_add_key_pair(self, name=None): - """Check if keypair creation work as expected.""" - keypair = { - 'user_id': self.user_id, - 'name': name or 'test-keypair', - 'fingerprint': '15:b0:f8:b3:f9:48:63:71:cf:7b:5b:38:6d:44:2d:4a', - 'private_key': 'private_key_value', - 'public_key': 'public_key_value' - } - result_key = db.key_pair_create(context.get_admin_context(), keypair) - for label in keypair: - self.assertEqual(keypair[label], result_key[label]) - - def test_key_pair_destroy(self): - """Check if key pair deletion works as expected.""" - keypair_name = 'test-delete-keypair' - self.test_add_key_pair(name=keypair_name) - db.key_pair_destroy(context.get_admin_context(), self.user_id, - keypair_name) - self.assertRaises(exception.KeypairNotFound, db.key_pair_get, - context.get_admin_context(), self.user_id, - keypair_name) - - def test_key_pair_get(self): - """Test if a previously created keypair can be found.""" - keypair_name = 'test-get-keypair' - self.test_add_key_pair(name=keypair_name) - result = db.key_pair_get(context.get_admin_context(), self.user_id, - keypair_name) - self.assertEqual(result.name, keypair_name) - - def test_key_pair_get_all_by_user(self): - self.assertTrue(isinstance(db.key_pair_get_all_by_user( - context.get_admin_context(), self.user_id), list)) - - def test_delete_non_existent_key_pair(self): - self.assertRaises(exception.KeypairNotFound, db.key_pair_destroy, - context.get_admin_context(), self.user_id, - 'non-existent-keypair') - - def test_get_non_existent_key_pair(self): - self.assertRaises(exception.KeypairNotFound, db.key_pair_get, - context.get_admin_context(), self.user_id, - 'invalid-key') - - def test_dns_registration(self): - domain1 = 'test.domain.one' - domain2 = 'test.domain.two' - testzone = 'testzone' - ctxt = context.get_admin_context() - - db.dnsdomain_register_for_zone(ctxt, domain1, testzone) - domain_ref = db.dnsdomain_get(ctxt, domain1) - zone = domain_ref['availability_zone'] - scope = domain_ref['scope'] - self.assertEqual(scope, 'private') - self.assertEqual(zone, testzone) - - db.dnsdomain_register_for_project(ctxt, domain2, - self.project_id) - domain_ref = db.dnsdomain_get(ctxt, domain2) - project = domain_ref['project_id'] - scope = domain_ref['scope'] - self.assertEqual(project, self.project_id) - self.assertEqual(scope, 'public') - - expected = [domain1, domain2] - domains = db.dnsdomain_list(ctxt) - self.assertEqual(expected, domains) - - db.dnsdomain_unregister(ctxt, domain1) - db.dnsdomain_unregister(ctxt, domain2) - - def test_network_get_associated_fixed_ips(self): - ctxt = context.get_admin_context() - values = {'host': 'foo', 'hostname': 'myname'} - instance = db.instance_create(ctxt, values) - values = {'address': 'bar', 'instance_uuid': instance['uuid']} - vif = db.virtual_interface_create(ctxt, values) - values = {'address': 'baz', - 'network_id': 1, - 'allocated': True, - 'instance_uuid': instance['uuid'], - 'virtual_interface_id': vif['id']} - fixed_address = db.fixed_ip_create(ctxt, values)['address'] - data = db.network_get_associated_fixed_ips(ctxt, 1) - self.assertEqual(len(data), 1) - record = data[0] - self.assertEqual(record['address'], fixed_address) - self.assertEqual(record['instance_uuid'], instance['uuid']) - self.assertEqual(record['network_id'], 1) - self.assertEqual(record['instance_created'], instance['created_at']) - self.assertEqual(record['instance_updated'], instance['updated_at']) - self.assertEqual(record['instance_hostname'], instance['hostname']) - self.assertEqual(record['vif_id'], vif['id']) - self.assertEqual(record['vif_address'], vif['address']) - data = db.network_get_associated_fixed_ips(ctxt, 1, 'nothing') - self.assertEqual(len(data), 0) - - def test_network_get_all_by_host(self): - ctxt = context.get_admin_context() - data = db.network_get_all_by_host(ctxt, 'foo') - self.assertEqual(len(data), 0) - # dummy network - net = db.network_create_safe(ctxt, {}) - # network with host set - net = db.network_create_safe(ctxt, {'host': 'foo'}) - data = db.network_get_all_by_host(ctxt, 'foo') - self.assertEqual(len(data), 1) - # network with fixed ip with host set - net = db.network_create_safe(ctxt, {}) - values = {'host': 'foo', 'network_id': net['id']} - db.fixed_ip_create(ctxt, values) - data = db.network_get_all_by_host(ctxt, 'foo') - self.assertEqual(len(data), 2) - # network with instance with host set - net = db.network_create_safe(ctxt, {}) - instance = db.instance_create(ctxt, {'host': 'foo'}) - values = {'instance_uuid': instance['uuid']} - vif = db.virtual_interface_create(ctxt, values) - values = {'network_id': net['id'], - 'virtual_interface_id': vif['id']} - db.fixed_ip_create(ctxt, values) - data = db.network_get_all_by_host(ctxt, 'foo') - self.assertEqual(len(data), 3) - - def test_network_in_use_on_host(self): - ctxt = context.get_admin_context() - - values = {'host': 'foo', 'hostname': 'myname'} - instance = db.instance_create(ctxt, values) - values = {'address': 'bar', 'instance_uuid': instance['uuid']} - vif = db.virtual_interface_create(ctxt, values) - values = {'address': 'baz', - 'network_id': 1, - 'allocated': True, - 'instance_uuid': instance['uuid'], - 'virtual_interface_id': vif['id']} - db.fixed_ip_create(ctxt, values) - - self.assertEqual(db.network_in_use_on_host(ctxt, 1, 'foo'), True) - self.assertEqual(db.network_in_use_on_host(ctxt, 1, 'bar'), False) - - def test_instance_floating_address_get_all(self): - ctxt = context.get_admin_context() - - instance1 = db.instance_create(ctxt, {'host': 'h1', 'hostname': 'n1'}) - instance2 = db.instance_create(ctxt, {'host': 'h2', 'hostname': 'n2'}) - - fixed_addresses = ['1.1.1.1', '1.1.1.2', '1.1.1.3'] - float_addresses = ['2.1.1.1', '2.1.1.2', '2.1.1.3'] - instance_uuids = [instance1['uuid'], instance1['uuid'], - instance2['uuid']] - - for fixed_addr, float_addr, instance_uuid in zip(fixed_addresses, - float_addresses, - instance_uuids): - db.fixed_ip_create(ctxt, {'address': fixed_addr, - 'instance_uuid': instance_uuid}) - fixed_id = db.fixed_ip_get_by_address(ctxt, fixed_addr)['id'] - db.floating_ip_create(ctxt, - {'address': float_addr, - 'fixed_ip_id': fixed_id}) - - real_float_addresses = \ - db.instance_floating_address_get_all(ctxt, instance_uuids[0]) - self.assertEqual(set(float_addresses[:2]), set(real_float_addresses)) - real_float_addresses = \ - db.instance_floating_address_get_all(ctxt, instance_uuids[2]) - self.assertEqual(set([float_addresses[2]]), set(real_float_addresses)) - - def test_get_vol_mapping_non_admin(self): - ref = db.ec2_volume_create(self.context, 'fake-uuid') - ec2_id = db.get_ec2_volume_id_by_uuid(self.context, 'fake-uuid') - self.assertEqual(ref['id'], ec2_id) - - def test_get_snap_mapping_non_admin(self): - ref = db.ec2_snapshot_create(self.context, 'fake-uuid') - ec2_id = db.get_ec2_snapshot_id_by_uuid(self.context, 'fake-uuid') - self.assertEqual(ref['id'], ec2_id) - - def test_security_group_update(self): - ctxt = context.get_admin_context() - values = {'security_group': {'tenant_id': '123', - 'name': 'test', 'description': 'test-description'}} - sg = db.security_group_create(ctxt, values) - - values['security_group']['name'] = 'test_name' - values['security_group']['description'] = 'test_desc' - sg = db.security_group_update(ctxt, sg['id'], values) - self.assertNotEqual(None, sg) - self.assertEqual(sg['security_group']['name'], 'test_name') - self.assertEqual(sg['security_group']['description'], 'test_desc') - - def test_bw_usage_calls(self): - ctxt = context.get_admin_context() - now = timeutils.utcnow() - timeutils.set_time_override(now) - start_period = now - datetime.timedelta(seconds=10) - uuid3_refreshed = now - datetime.timedelta(seconds=5) - - expected_bw_usages = [{'uuid': 'fake_uuid1', - 'mac': 'fake_mac1', - 'start_period': start_period, - 'bw_in': 100, - 'bw_out': 200, - 'last_ctr_in': 12345, - 'last_ctr_out': 67890, - 'last_refreshed': now}, - {'uuid': 'fake_uuid2', - 'mac': 'fake_mac2', - 'start_period': start_period, - 'bw_in': 200, - 'bw_out': 300, - 'last_ctr_in': 22345, - 'last_ctr_out': 77890, - 'last_refreshed': now}, - {'uuid': 'fake_uuid3', - 'mac': 'fake_mac3', - 'start_period': start_period, - 'bw_in': 400, - 'bw_out': 500, - 'last_ctr_in': 32345, - 'last_ctr_out': 87890, - 'last_refreshed': uuid3_refreshed}] - - def _compare(bw_usage, expected): - for key, value in expected.items(): - self.assertEqual(bw_usage[key], value) - - bw_usages = db.bw_usage_get_by_uuids(ctxt, - ['fake_uuid1', 'fake_uuid2'], start_period) - # No matches - self.assertEqual(len(bw_usages), 0) - - # Add 3 entries - db.bw_usage_update(ctxt, 'fake_uuid1', - 'fake_mac1', start_period, - 100, 200, 12345, 67890) - db.bw_usage_update(ctxt, 'fake_uuid2', - 'fake_mac2', start_period, - 100, 200, 42, 42) - # Test explicit refreshed time - db.bw_usage_update(ctxt, 'fake_uuid3', - 'fake_mac3', start_period, - 400, 500, 32345, 87890, - last_refreshed=uuid3_refreshed) - # Update 2nd entry - db.bw_usage_update(ctxt, 'fake_uuid2', - 'fake_mac2', start_period, - 200, 300, 22345, 77890) - - bw_usages = db.bw_usage_get_by_uuids(ctxt, - ['fake_uuid1', 'fake_uuid2', 'fake_uuid3'], start_period) - self.assertEqual(len(bw_usages), 3) - _compare(bw_usages[0], expected_bw_usages[0]) - _compare(bw_usages[1], expected_bw_usages[1]) - _compare(bw_usages[2], expected_bw_usages[2]) - timeutils.clear_time_override() - - def _test_decorator_wraps_helper(self, decorator): - def test_func(): - """Test docstring.""" - - decorated_func = decorator(test_func) - - self.assertEquals(test_func.func_name, decorated_func.func_name) - self.assertEquals(test_func.__doc__, decorated_func.__doc__) - self.assertEquals(test_func.__module__, decorated_func.__module__) - - def test_require_context_decorator_wraps_functions_properly(self): - self._test_decorator_wraps_helper(sqlalchemy_api.require_context) - - def test_require_admin_context_decorator_wraps_functions_properly(self): - self._test_decorator_wraps_helper(sqlalchemy_api.require_admin_context) - - -def _get_fake_aggr_values(): - return {'name': 'fake_aggregate'} - - -def _get_fake_aggr_metadata(): - return {'fake_key1': 'fake_value1', - 'fake_key2': 'fake_value2', - 'availability_zone': 'fake_avail_zone'} - - -def _get_fake_aggr_hosts(): - return ['foo.openstack.org'] - - -def _create_aggregate(context=context.get_admin_context(), - values=_get_fake_aggr_values(), - metadata=_get_fake_aggr_metadata()): - return db.aggregate_create(context, values, metadata) - - -def _create_aggregate_with_hosts(context=context.get_admin_context(), - values=_get_fake_aggr_values(), - metadata=_get_fake_aggr_metadata(), - hosts=_get_fake_aggr_hosts()): - result = _create_aggregate(context=context, - values=values, metadata=metadata) - for host in hosts: - db.aggregate_host_add(context, result['id'], host) - return result - - -class NotDbApiTestCase(DbTestCase): - def setUp(self): - super(NotDbApiTestCase, self).setUp() - self.flags(sql_connection="notdb://") - - def test_instance_get_all_by_filters_regex_unsupported_db(self): - # Ensure that the 'LIKE' operator is used for unsupported dbs. - self.create_instance_with_args(display_name='test1') - self.create_instance_with_args(display_name='test.*') - self.create_instance_with_args(display_name='diff') - result = db.instance_get_all_by_filters(self.context, - {'display_name': 'test.*'}) - self.assertEqual(1, len(result)) - result = db.instance_get_all_by_filters(self.context, - {'display_name': '%test%'}) - self.assertEqual(2, len(result)) - - def test_instance_get_all_by_filters_paginate(self): - test1 = self.create_instance_with_args(display_name='test1') - test2 = self.create_instance_with_args(display_name='test2') - test3 = self.create_instance_with_args(display_name='test3') - - result = db.instance_get_all_by_filters(self.context, - {'display_name': '%test%'}, - marker=None) - self.assertEqual(3, len(result)) - result = db.instance_get_all_by_filters(self.context, - {'display_name': '%test%'}, - sort_dir="asc", - marker=test1['uuid']) - self.assertEqual(2, len(result)) - result = db.instance_get_all_by_filters(self.context, - {'display_name': '%test%'}, - sort_dir="asc", - marker=test2['uuid']) - self.assertEqual(1, len(result)) - result = db.instance_get_all_by_filters(self.context, - {'display_name': '%test%'}, - sort_dir="asc", - marker=test3['uuid']) - self.assertEqual(0, len(result)) - - self.assertRaises(exception.MarkerNotFound, - db.instance_get_all_by_filters, - self.context, {'display_name': '%test%'}, - marker=str(stdlib_uuid.uuid4())) - - -class AggregateDBApiTestCase(test.TestCase): - def setUp(self): - super(AggregateDBApiTestCase, self).setUp() - self.user_id = 'fake' - self.project_id = 'fake' - self.context = context.RequestContext(self.user_id, self.project_id) - - def test_aggregate_create_no_metadata(self): - result = _create_aggregate(metadata=None) - self.assertEquals(result['name'], 'fake_aggregate') - - def test_aggregate_create_avoid_name_conflict(self): - r1 = _create_aggregate(metadata=None) - db.aggregate_delete(context.get_admin_context(), r1['id']) - values = {'name': r1['name']} - metadata = {'availability_zone': 'new_zone'} - r2 = _create_aggregate(values=values, metadata=metadata) - self.assertEqual(r2['name'], values['name']) - self.assertEqual(r2['availability_zone'], - metadata['availability_zone']) - - def test_aggregate_create_raise_exist_exc(self): - _create_aggregate(metadata=None) - self.assertRaises(exception.AggregateNameExists, - _create_aggregate, metadata=None) - - def test_aggregate_get_raise_not_found(self): - ctxt = context.get_admin_context() - # this does not exist! - aggregate_id = 1 - self.assertRaises(exception.AggregateNotFound, - db.aggregate_get, - ctxt, aggregate_id) - - def test_aggregate_metadata_get_raise_not_found(self): - ctxt = context.get_admin_context() - # this does not exist! - aggregate_id = 1 - self.assertRaises(exception.AggregateNotFound, - db.aggregate_metadata_get, - ctxt, aggregate_id) - - def test_aggregate_create_with_metadata(self): - ctxt = context.get_admin_context() - result = _create_aggregate(context=ctxt) - expected_metadata = db.aggregate_metadata_get(ctxt, result['id']) - self.assertThat(expected_metadata, - matchers.DictMatches(_get_fake_aggr_metadata())) - - def test_aggregate_create_delete_create_with_metadata(self): - #test for bug 1052479 - ctxt = context.get_admin_context() - result = _create_aggregate(context=ctxt) - expected_metadata = db.aggregate_metadata_get(ctxt, result['id']) - self.assertThat(expected_metadata, - matchers.DictMatches(_get_fake_aggr_metadata())) - db.aggregate_delete(ctxt, result['id']) - result = _create_aggregate(metadata={'availability_zone': - 'fake_avail_zone'}) - expected_metadata = db.aggregate_metadata_get(ctxt, result['id']) - self.assertEqual(expected_metadata, {'availability_zone': - 'fake_avail_zone'}) - - def test_aggregate_create_low_privi_context(self): - self.assertRaises(exception.AdminRequired, - db.aggregate_create, - self.context, _get_fake_aggr_values()) - - def test_aggregate_get(self): - ctxt = context.get_admin_context() - result = _create_aggregate_with_hosts(context=ctxt) - expected = db.aggregate_get(ctxt, result['id']) - self.assertEqual(_get_fake_aggr_hosts(), expected['hosts']) - self.assertEqual(_get_fake_aggr_metadata(), expected['metadetails']) - - def test_aggregate_get_by_host(self): - ctxt = context.get_admin_context() - values = {'name': 'fake_aggregate2'} - a1 = _create_aggregate_with_hosts(context=ctxt) - a2 = _create_aggregate_with_hosts(context=ctxt, values=values) - r1 = db.aggregate_get_by_host(ctxt, 'foo.openstack.org') - self.assertEqual([a1['id'], a2['id']], [x['id'] for x in r1]) - - def test_aggregate_get_by_host_with_key(self): - ctxt = context.get_admin_context() - values = {'name': 'fake_aggregate2'} - a1 = _create_aggregate_with_hosts(context=ctxt, - metadata={'goodkey': 'good'}) - a2 = _create_aggregate_with_hosts(context=ctxt, values=values) - # filter result by key - r1 = db.aggregate_get_by_host(ctxt, 'foo.openstack.org', key='goodkey') - self.assertEqual([a1['id']], [x['id'] for x in r1]) - - def test_aggregate_metadata_get_by_host(self): - ctxt = context.get_admin_context() - values = {'name': 'fake_aggregate2'} - values2 = {'name': 'fake_aggregate3'} - a1 = _create_aggregate_with_hosts(context=ctxt) - a2 = _create_aggregate_with_hosts(context=ctxt, values=values) - a3 = _create_aggregate_with_hosts(context=ctxt, values=values2, - hosts=['bar.openstack.org'], metadata={'badkey': 'bad'}) - r1 = db.aggregate_metadata_get_by_host(ctxt, 'foo.openstack.org') - self.assertEqual(r1['fake_key1'], set(['fake_value1'])) - self.assertFalse('badkey' in r1) - - def test_aggregate_metadata_get_by_host_with_key(self): - ctxt = context.get_admin_context() - values = {'name': 'fake_aggregate2'} - values2 = {'name': 'fake_aggregate3'} - a1 = _create_aggregate_with_hosts(context=ctxt) - a2 = _create_aggregate_with_hosts(context=ctxt, values=values) - a3 = _create_aggregate_with_hosts(context=ctxt, values=values2, - hosts=['foo.openstack.org'], metadata={'good': 'value'}) - r1 = db.aggregate_metadata_get_by_host(ctxt, 'foo.openstack.org', - key='good') - self.assertEqual(r1['good'], set(['value'])) - self.assertFalse('fake_key1' in r1) - # Delete metadata - db.aggregate_metadata_delete(ctxt, a3['id'], 'good') - r2 = db.aggregate_metadata_get_by_host(ctxt, 'foo.openstack.org', - key='good') - self.assertFalse('good' in r2) - - def test_aggregate_host_get_by_metadata_key(self): - ctxt = context.get_admin_context() - values = {'name': 'fake_aggregate2'} - values2 = {'name': 'fake_aggregate3'} - a1 = _create_aggregate_with_hosts(context=ctxt) - a2 = _create_aggregate_with_hosts(context=ctxt, values=values) - a3 = _create_aggregate_with_hosts(context=ctxt, values=values2, - hosts=['foo.openstack.org'], metadata={'good': 'value'}) - r1 = db.aggregate_host_get_by_metadata_key(ctxt, key='good') - self.assertEqual(r1, {'foo.openstack.org': set(['value'])}) - self.assertFalse('fake_key1' in r1) - - def test_aggregate_get_by_host_not_found(self): - ctxt = context.get_admin_context() - _create_aggregate_with_hosts(context=ctxt) - self.assertEqual([], db.aggregate_get_by_host(ctxt, 'unknown_host')) - - def test_aggregate_delete_raise_not_found(self): - ctxt = context.get_admin_context() - # this does not exist! - aggregate_id = 1 - self.assertRaises(exception.AggregateNotFound, - db.aggregate_delete, - ctxt, aggregate_id) - - def test_aggregate_delete(self): - ctxt = context.get_admin_context() - result = _create_aggregate(context=ctxt, metadata=None) - db.aggregate_delete(ctxt, result['id']) - expected = db.aggregate_get_all(ctxt) - self.assertEqual(0, len(expected)) - aggregate = db.aggregate_get(ctxt.elevated(read_deleted='yes'), - result['id']) - self.assertEqual(aggregate['deleted'], True) - - def test_aggregate_update(self): - ctxt = context.get_admin_context() - result = _create_aggregate(context=ctxt, metadata={'availability_zone': - 'fake_avail_zone'}) - self.assertEqual(result['availability_zone'], 'fake_avail_zone') - new_values = _get_fake_aggr_values() - new_values['availability_zone'] = 'different_avail_zone' - updated = db.aggregate_update(ctxt, 1, new_values) - self.assertNotEqual(result['availability_zone'], - updated['availability_zone']) - - def test_aggregate_update_with_metadata(self): - ctxt = context.get_admin_context() - result = _create_aggregate(context=ctxt, metadata=None) - values = _get_fake_aggr_values() - values['metadata'] = _get_fake_aggr_metadata() - values['availability_zone'] = 'different_avail_zone' - db.aggregate_update(ctxt, 1, values) - expected = db.aggregate_metadata_get(ctxt, result['id']) - updated = db.aggregate_get(ctxt, result['id']) - self.assertThat(values['metadata'], - matchers.DictMatches(expected)) - self.assertNotEqual(result['availability_zone'], - updated['availability_zone']) - - def test_aggregate_update_with_existing_metadata(self): - ctxt = context.get_admin_context() - result = _create_aggregate(context=ctxt) - values = _get_fake_aggr_values() - values['metadata'] = _get_fake_aggr_metadata() - values['metadata']['fake_key1'] = 'foo' - db.aggregate_update(ctxt, 1, values) - expected = db.aggregate_metadata_get(ctxt, result['id']) - self.assertThat(values['metadata'], matchers.DictMatches(expected)) - - def test_aggregate_update_raise_not_found(self): - ctxt = context.get_admin_context() - # this does not exist! - aggregate_id = 1 - new_values = _get_fake_aggr_values() - self.assertRaises(exception.AggregateNotFound, - db.aggregate_update, ctxt, aggregate_id, new_values) - - def test_aggregate_get_all(self): - ctxt = context.get_admin_context() - counter = 3 - for c in range(counter): - _create_aggregate(context=ctxt, - values={'name': 'fake_aggregate_%d' % c}, - metadata=None) - results = db.aggregate_get_all(ctxt) - self.assertEqual(len(results), counter) - - def test_aggregate_get_all_non_deleted(self): - ctxt = context.get_admin_context() - add_counter = 5 - remove_counter = 2 - aggregates = [] - for c in range(1, add_counter): - values = {'name': 'fake_aggregate_%d' % c} - aggregates.append(_create_aggregate(context=ctxt, - values=values, metadata=None)) - for c in range(1, remove_counter): - db.aggregate_delete(ctxt, aggregates[c - 1]['id']) - results = db.aggregate_get_all(ctxt) - self.assertEqual(len(results), add_counter - remove_counter) - - def test_aggregate_metadata_add(self): - ctxt = context.get_admin_context() - result = _create_aggregate(context=ctxt, metadata=None) - metadata = _get_fake_aggr_metadata() - db.aggregate_metadata_add(ctxt, result['id'], metadata) - expected = db.aggregate_metadata_get(ctxt, result['id']) - self.assertThat(metadata, matchers.DictMatches(expected)) - - def test_aggregate_metadata_update(self): - ctxt = context.get_admin_context() - result = _create_aggregate(context=ctxt) - metadata = _get_fake_aggr_metadata() - key = metadata.keys()[0] - db.aggregate_metadata_delete(ctxt, result['id'], key) - new_metadata = {key: 'foo'} - db.aggregate_metadata_add(ctxt, result['id'], new_metadata) - expected = db.aggregate_metadata_get(ctxt, result['id']) - metadata[key] = 'foo' - self.assertThat(metadata, matchers.DictMatches(expected)) - - def test_aggregate_metadata_delete(self): - ctxt = context.get_admin_context() - result = _create_aggregate(context=ctxt, metadata=None) - metadata = _get_fake_aggr_metadata() - db.aggregate_metadata_add(ctxt, result['id'], metadata) - db.aggregate_metadata_delete(ctxt, result['id'], metadata.keys()[0]) - expected = db.aggregate_metadata_get(ctxt, result['id']) - del metadata[metadata.keys()[0]] - self.assertThat(metadata, matchers.DictMatches(expected)) - - def test_aggregate_remove_availability_zone(self): - ctxt = context.get_admin_context() - result = _create_aggregate(context=ctxt, metadata={'availability_zone': - 'fake_avail_zone'}) - db.aggregate_metadata_delete(ctxt, result['id'], 'availability_zone') - expected = db.aggregate_metadata_get(ctxt, result['id']) - aggregate = db.aggregate_get(ctxt, result['id']) - self.assertEquals(aggregate['availability_zone'], None) - self.assertThat({}, matchers.DictMatches(expected)) - - def test_aggregate_metadata_delete_raise_not_found(self): - ctxt = context.get_admin_context() - result = _create_aggregate(context=ctxt) - self.assertRaises(exception.AggregateMetadataNotFound, - db.aggregate_metadata_delete, - ctxt, result['id'], 'foo_key') - - def test_aggregate_host_add(self): - ctxt = context.get_admin_context() - result = _create_aggregate_with_hosts(context=ctxt, metadata=None) - expected = db.aggregate_host_get_all(ctxt, result['id']) - self.assertEqual(_get_fake_aggr_hosts(), expected) - - def test_aggregate_host_re_add(self): - ctxt = context.get_admin_context() - result = _create_aggregate_with_hosts(context=ctxt, metadata=None) - host = _get_fake_aggr_hosts()[0] - db.aggregate_host_delete(ctxt, result['id'], host) - db.aggregate_host_add(ctxt, result['id'], host) - expected = db.aggregate_host_get_all(ctxt, result['id']) - self.assertEqual(len(expected), 1) - - def test_aggregate_host_add_duplicate_works(self): - ctxt = context.get_admin_context() - r1 = _create_aggregate_with_hosts(context=ctxt, metadata=None) - r2 = _create_aggregate_with_hosts(ctxt, - values={'name': 'fake_aggregate2'}, - metadata={'availability_zone': 'fake_avail_zone2'}) - h1 = db.aggregate_host_get_all(ctxt, r1['id']) - h2 = db.aggregate_host_get_all(ctxt, r2['id']) - self.assertEqual(h1, h2) - - def test_aggregate_host_add_duplicate_raise_exist_exc(self): - ctxt = context.get_admin_context() - result = _create_aggregate_with_hosts(context=ctxt, metadata=None) - self.assertRaises(exception.AggregateHostExists, - db.aggregate_host_add, - ctxt, result['id'], _get_fake_aggr_hosts()[0]) - - def test_aggregate_host_add_raise_not_found(self): - ctxt = context.get_admin_context() - # this does not exist! - aggregate_id = 1 - host = _get_fake_aggr_hosts()[0] - self.assertRaises(exception.AggregateNotFound, - db.aggregate_host_add, - ctxt, aggregate_id, host) - - def test_aggregate_host_delete(self): - ctxt = context.get_admin_context() - result = _create_aggregate_with_hosts(context=ctxt, metadata=None) - db.aggregate_host_delete(ctxt, result['id'], - _get_fake_aggr_hosts()[0]) - expected = db.aggregate_host_get_all(ctxt, result['id']) - self.assertEqual(0, len(expected)) - - def test_aggregate_host_delete_raise_not_found(self): - ctxt = context.get_admin_context() - result = _create_aggregate(context=ctxt) - self.assertRaises(exception.AggregateHostNotFound, - db.aggregate_host_delete, - ctxt, result['id'], _get_fake_aggr_hosts()[0]) - - -class SqlAlchemyDbApiTestCase(DbTestCase): - def test_instance_get_all_by_host(self): - ctxt = context.get_admin_context() - - self.create_instance_with_args() - self.create_instance_with_args() - self.create_instance_with_args(host='host2') - result = sqlalchemy_api._instance_get_all_uuids_by_host(ctxt, 'host1') - self.assertEqual(2, len(result)) - - def test_instance_get_all_uuids_by_host(self): - ctxt = context.get_admin_context() - self.create_instance_with_args() - self.create_instance_with_args() - self.create_instance_with_args(host='host2') - result = sqlalchemy_api._instance_get_all_uuids_by_host(ctxt, 'host1') - self.assertEqual(2, len(result)) - self.assertEqual(types.UnicodeType, type(result[0])) - - -class CapacityTestCase(test.TestCase): - def setUp(self): - super(CapacityTestCase, self).setUp() - - self.ctxt = context.get_admin_context() - - service_dict = dict(host='host1', binary='binary1', - topic='compute', report_count=1, - disabled=False) - self.service = db.service_create(self.ctxt, service_dict) - - self.compute_node_dict = dict(vcpus=2, memory_mb=1024, local_gb=2048, - vcpus_used=0, memory_mb_used=0, - local_gb_used=0, free_ram_mb=1024, - free_disk_gb=2048, hypervisor_type="xen", - hypervisor_version=1, cpu_info="", - running_vms=0, current_workload=0, - service_id=self.service['id']) - # add some random stats - stats = dict(num_instances=3, num_proj_12345=2, - num_proj_23456=2, num_vm_building=3) - self.compute_node_dict['stats'] = stats - - self.flags(reserved_host_memory_mb=0) - self.flags(reserved_host_disk_mb=0) - - def _create_helper(self, host): - self.compute_node_dict['host'] = host - return db.compute_node_create(self.ctxt, self.compute_node_dict) - - def _stats_as_dict(self, stats): - d = {} - for s in stats: - key = s['key'] - d[key] = s['value'] - return d - - def test_compute_node_create(self): - item = self._create_helper('host1') - self.assertEquals(item['free_ram_mb'], 1024) - self.assertEquals(item['free_disk_gb'], 2048) - self.assertEquals(item['running_vms'], 0) - self.assertEquals(item['current_workload'], 0) - - stats = self._stats_as_dict(item['stats']) - self.assertEqual(3, stats['num_instances']) - self.assertEqual(2, stats['num_proj_12345']) - self.assertEqual(3, stats['num_vm_building']) - - def test_compute_node_get_all(self): - item = self._create_helper('host1') - nodes = db.compute_node_get_all(self.ctxt) - self.assertEqual(1, len(nodes)) - - node = nodes[0] - self.assertEqual(2, node['vcpus']) - - stats = self._stats_as_dict(node['stats']) - self.assertEqual(3, int(stats['num_instances'])) - self.assertEqual(2, int(stats['num_proj_12345'])) - self.assertEqual(3, int(stats['num_vm_building'])) - - def test_compute_node_update(self): - item = self._create_helper('host1') - - compute_node_id = item['id'] - stats = self._stats_as_dict(item['stats']) - - # change some values: - stats['num_instances'] = 8 - stats['num_tribbles'] = 1 - values = { - 'vcpus': 4, - 'stats': stats, - } - item = db.compute_node_update(self.ctxt, compute_node_id, values) - stats = self._stats_as_dict(item['stats']) - - self.assertEqual(4, item['vcpus']) - self.assertEqual(8, int(stats['num_instances'])) - self.assertEqual(2, int(stats['num_proj_12345'])) - self.assertEqual(1, int(stats['num_tribbles'])) - - def test_compute_node_update_always_updates_updated_at(self): - item = self._create_helper('host1') - item_updated = db.compute_node_update(self.ctxt, - item['id'], {}) - self.assertNotEqual(item['updated_at'], item_updated['updated_at']) - - def test_compute_node_stat_unchanged(self): - # don't update unchanged stat values: - item = self._create_helper('host1') - - compute_node_id = item['id'] - stats = self._stats_as_dict(item['stats']) - self.assertEqual(4, len(stats.keys())) - - orig_update_stats = sqlalchemy_api._update_stats - - def update(context, new_stats, compute_id, session, prune_stats=False): - # wrap the session object to see which stats get updated - orig_add = session.add - added = [] - - def add(instance): - added.append(instance) - orig_add(instance) - - self.stubs.Set(session, 'add', add) - orig_update_stats(context, new_stats, compute_id, session, - prune_stats=False) - - # no stats should have been added to the session: - self.assertEqual(0, len(added)) - - self.stubs.Set(sqlalchemy_api, '_update_stats', update) - - # save with same (unchanged) stats again: - values = {'stats': stats} - db.compute_node_update(self.ctxt, compute_node_id, values) - - def test_compute_node_stat_prune(self): - item = self._create_helper('host1') - for stat in item['stats']: - if stat['key'] == 'num_instances': - num_instance_stat = stat - break - - values = { - 'stats': dict(num_instances=1) - } - db.compute_node_update(self.ctxt, item['id'], values, prune_stats=True) - item = db.compute_node_get_all(self.ctxt)[0] - self.assertEqual(1, len(item['stats'])) - - stat = item['stats'][0] - self.assertEqual(num_instance_stat['id'], stat['id']) - self.assertEqual(num_instance_stat['key'], stat['key']) - self.assertEqual(1, int(stat['value'])) - - -class MigrationTestCase(test.TestCase): - - def setUp(self): - super(MigrationTestCase, self).setUp() - self.ctxt = context.get_admin_context() - - self._create() - self._create() - self._create(status='reverted') - self._create(status='confirmed') - self._create(source_compute='host2', source_node='b', - dest_compute='host1', dest_node='a') - self._create(source_compute='host2', dest_compute='host3') - self._create(source_compute='host3', dest_compute='host4') - - def _create(self, status='migrating', source_compute='host1', - source_node='a', dest_compute='host2', dest_node='b', - system_metadata=None): - - values = {'host': source_compute} - instance = db.instance_create(self.ctxt, values) - if system_metadata: - db.instance_system_metadata_update(self.ctxt, instance['uuid'], - system_metadata, False) - - values = {'status': status, 'source_compute': source_compute, - 'source_node': source_node, 'dest_compute': dest_compute, - 'dest_node': dest_node, 'instance_uuid': instance['uuid']} - db.migration_create(self.ctxt, values) - - def _assert_in_progress(self, migrations): - for migration in migrations: - self.assertNotEqual('confirmed', migration['status']) - self.assertNotEqual('reverted', migration['status']) - - def test_migration_get_in_progress_joins(self): - self._create(source_compute='foo', system_metadata={'foo': 'bar'}) - migrations = db.migration_get_in_progress_by_host_and_node(self.ctxt, - 'foo', 'a') - system_metadata = migrations[0]['instance']['system_metadata'][0] - self.assertEqual(system_metadata['key'], 'foo') - self.assertEqual(system_metadata['value'], 'bar') - - def test_in_progress_host1_nodea(self): - migrations = db.migration_get_in_progress_by_host_and_node(self.ctxt, - 'host1', 'a') - # 2 as source + 1 as dest - self.assertEqual(3, len(migrations)) - self._assert_in_progress(migrations) - - def test_in_progress_host1_nodeb(self): - migrations = db.migration_get_in_progress_by_host_and_node(self.ctxt, - 'host1', 'b') - # some migrations are to/from host1, but none with a node 'b' - self.assertEqual(0, len(migrations)) - - def test_in_progress_host2_nodeb(self): - migrations = db.migration_get_in_progress_by_host_and_node(self.ctxt, - 'host2', 'b') - # 2 as dest, 1 as source - self.assertEqual(3, len(migrations)) - self._assert_in_progress(migrations) - - def test_instance_join(self): - migrations = db.migration_get_in_progress_by_host_and_node(self.ctxt, - 'host2', 'b') - for migration in migrations: - instance = migration['instance'] - self.assertEqual(migration['instance_uuid'], instance['uuid']) - - -class ModelsObjectComparatorMixin(object): - def _dict_from_object(self, obj, ignored_keys): - if ignored_keys is None: - ignored_keys = [] - return dict([(k, v) for k, v in obj.iteritems() - if k not in ignored_keys]) - - def _assertEqualObjects(self, obj1, obj2, ignored_keys=None): - obj1 = self._dict_from_object(obj1, ignored_keys) - obj2 = self._dict_from_object(obj2, ignored_keys) - - self.assertEqual(len(obj1), len(obj2)) - for key, value in obj1.iteritems(): - self.assertEqual(value, obj2[key]) - - def _assertEqualListsOfObjects(self, objs1, objs2, ignored_keys=None): - self.assertEqual(len(objs1), len(objs2)) - objs2 = dict([(o['id'], o) for o in objs2]) - for o1 in objs1: - self._assertEqualObjects(o1, objs2[o1['id']], ignored_keys) - - def _assertEqualListsOfPrimitivesAsSets(self, primitives1, primitives2): - self.assertEqual(len(primitives1), len(primitives2)) - for primitive in primitives1: - self.assertIn(primitive, primitives2) - - for primitive in primitives2: - self.assertIn(primitive, primitives1) - - -class ReservationTestCase(test.TestCase, ModelsObjectComparatorMixin): - - """Tests for db.api.reservation_* methods.""" - - def setUp(self): - super(ReservationTestCase, self).setUp() - self.ctxt = context.get_admin_context() - self.values = {'uuid': 'sample-uuid', - 'project_id': 'project1', - 'resource': 'resource', - 'delta': 42, - 'expire': datetime.datetime.utcnow() + - datetime.timedelta(days=1), - 'usage': {'id': 1}} - - def _quota_reserve(self): - """Create sample Quota, QuotaUsage and Reservation objects. - - There is no method db.quota_usage_create(), so we have to use - db.quota_reserve() for creating QuotaUsage objects. - - Returns reservations uuids. - - """ - def get_sync(resource, usage): - def sync(elevated, project_id, session): - return {resource: usage} - return sync - quotas = {} - resources = {} - deltas = {} - for i in range(3): - resource = 'resource%d' % i - quotas[resource] = db.quota_create(self.ctxt, 'project1', - resource, i) - resources[resource] = ReservableResource(resource, - get_sync(resource, i), 'quota_res_%d' % i) - deltas[resource] = i - return db.quota_reserve(self.ctxt, resources, quotas, deltas, - datetime.datetime.utcnow(), datetime.datetime.utcnow(), - datetime.timedelta(days=1), self.values['project_id']) - - def test_reservation_create(self): - reservation = db.reservation_create(self.ctxt, **self.values) - self._assertEqualObjects(self.values, reservation, ignored_keys=( - 'deleted', 'updated_at', - 'deleted_at', 'id', - 'created_at', 'usage', - 'usage_id')) - self.assertEqual(reservation['usage_id'], self.values['usage']['id']) - - def test_reservation_get(self): - reservation = db.reservation_create(self.ctxt, **self.values) - reservation_db = db.reservation_get(self.ctxt, self.values['uuid']) - self._assertEqualObjects(reservation, reservation_db) - - def test_reservation_get_nonexistent(self): - self.assertRaises(exception.ReservationNotFound, db.reservation_get, - self.ctxt, 'non-exitent-resevation-uuid') - - def test_reservation_commit(self): - reservations = self._quota_reserve() - expected = {'project_id': 'project1', - 'resource0': {'reserved': 0, 'in_use': 0}, - 'resource1': {'reserved': 1, 'in_use': 1}, - 'resource2': {'reserved': 2, 'in_use': 2}} - self.assertEqual(expected, db.quota_usage_get_all_by_project( - self.ctxt, 'project1')) - db.reservation_get(self.ctxt, reservations[0]) - db.reservation_commit(self.ctxt, reservations, 'project1') - self.assertRaises(exception.ReservationNotFound, - db.reservation_get, self.ctxt, reservations[0]) - expected = {'project_id': 'project1', - 'resource0': {'reserved': 0, 'in_use': 0}, - 'resource1': {'reserved': 0, 'in_use': 2}, - 'resource2': {'reserved': 0, 'in_use': 4}} - self.assertEqual(expected, db.quota_usage_get_all_by_project( - self.ctxt, 'project1')) - - def test_reservation_rollback(self): - reservations = self._quota_reserve() - expected = {'project_id': 'project1', - 'resource0': {'reserved': 0, 'in_use': 0}, - 'resource1': {'reserved': 1, 'in_use': 1}, - 'resource2': {'reserved': 2, 'in_use': 2}} - self.assertEqual(expected, db.quota_usage_get_all_by_project( - self.ctxt, 'project1')) - db.reservation_get(self.ctxt, reservations[0]) - db.reservation_rollback(self.ctxt, reservations, 'project1') - self.assertRaises(exception.ReservationNotFound, - db.reservation_get, self.ctxt, reservations[0]) - expected = {'project_id': 'project1', - 'resource0': {'reserved': 0, 'in_use': 0}, - 'resource1': {'reserved': 0, 'in_use': 1}, - 'resource2': {'reserved': 0, 'in_use': 2}} - self.assertEqual(expected, db.quota_usage_get_all_by_project( - self.ctxt, 'project1')) - - def test_reservation_expire(self): - self.values['expire'] = datetime.datetime.utcnow() + datetime.\ - timedelta(days=1) - reservations = self._quota_reserve() - db.reservation_expire(self.ctxt) - - expected = {'project_id': 'project1', - 'resource0': {'reserved': 0, 'in_use': 0}, - 'resource1': {'reserved': 0, 'in_use': 1}, - 'resource2': {'reserved': 0, 'in_use': 2}} - self.assertEqual(expected, db.quota_usage_get_all_by_project( - self.ctxt, 'project1')) - - -class SecurityGroupTestCase(test.TestCase, ModelsObjectComparatorMixin): - def setUp(self): - super(SecurityGroupTestCase, self).setUp() - self.ctxt = context.get_admin_context() - - def _get_base_values(self): - return { - 'name': 'fake_sec_group', - 'description': 'fake_sec_group_descr', - 'user_id': 'fake', - 'project_id': 'fake', - 'instances': [] - } - - def _create_security_group(self, values): - v = self._get_base_values() - v.update(values) - return db.security_group_create(self.ctxt, v) - - def test_security_group_create(self): - security_group = self._create_security_group({}) - self.assertFalse(security_group['id'] is None) - for key, value in self._get_base_values().iteritems(): - self.assertEqual(value, security_group[key]) - - def test_security_group_destroy(self): - security_group1 = self._create_security_group({}) - security_group2 = \ - self._create_security_group({'name': 'fake_sec_group2'}) - - db.security_group_destroy(self.ctxt, security_group1['id']) - self.assertRaises(exception.SecurityGroupNotFound, - db.security_group_get, - self.ctxt, security_group1['id']) - self._assertEqualObjects(db.security_group_get(self.ctxt, - security_group2['id']), - security_group2) - - def test_security_group_get(self): - security_group1 = self._create_security_group({}) - security_group2 = self._create_security_group( - {'name': 'fake_sec_group2'}) - real_security_group = db.security_group_get(self.ctxt, - security_group1['id']) - self._assertEqualObjects(security_group1, - real_security_group) - - def test_security_group_get_not_found_exception(self): - self.assertRaises(exception.SecurityGroupNotFound, - db.security_group_get, self.ctxt, 100500) - - def test_security_group_get_by_name(self): - security_group1 = self._create_security_group({'name': 'fake1'}) - security_group2 = self._create_security_group({'name': 'fake2'}) - - real_security_group1 = db.security_group_get_by_name( - self.ctxt, - security_group1['project_id'], - security_group1['name']) - real_security_group2 = db.security_group_get_by_name( - self.ctxt, - security_group2['project_id'], - security_group2['name']) - self._assertEqualObjects(security_group1, real_security_group1) - self._assertEqualObjects(security_group2, real_security_group2) - - def test_security_group_get_by_project(self): - security_group1 = self._create_security_group( - {'name': 'fake1', 'project_id': 'fake_proj1'}) - security_group2 = self._create_security_group( - {'name': 'fake2', 'project_id': 'fake_proj2'}) - - real1 = db.security_group_get_by_project( - self.ctxt, - security_group1['project_id']) - real2 = db.security_group_get_by_project( - self.ctxt, - security_group2['project_id']) - - expected1, expected2 = [security_group1], [security_group2] - self._assertEqualListsOfObjects(expected1, real1, - ignored_keys=['instances']) - self._assertEqualListsOfObjects(expected2, real2, - ignored_keys=['instances']) - - def test_security_group_get_by_instance(self): - instance = db.instance_create(self.ctxt, dict(host='foo')) - values = [ - {'name': 'fake1', 'instances': [instance]}, - {'name': 'fake2', 'instances': [instance]}, - {'name': 'fake3', 'instances': []}, - ] - security_groups = [self._create_security_group(vals) - for vals in values] - - real = db.security_group_get_by_instance(self.ctxt, - instance['id']) - expected = security_groups[:2] - self._assertEqualListsOfObjects(expected, real, - ignored_keys=['instances']) - - def test_security_group_get_all(self): - values = [ - {'name': 'fake1', 'project_id': 'fake_proj1'}, - {'name': 'fake2', 'project_id': 'fake_proj2'}, - ] - security_groups = [self._create_security_group(vals) - for vals in values] - - real = db.security_group_get_all(self.ctxt) - - self._assertEqualListsOfObjects(security_groups, real, - ignored_keys=['instances']) - - def test_security_group_exists(self): - security_group = self._create_security_group( - {'name': 'fake1', 'project_id': 'fake_proj1'}) - - real = (db.security_group_exists(self.ctxt, - security_group['project_id'], - security_group['name']), - db.security_group_exists(self.ctxt, - security_group['project_id'], - 'fake_sec_group')) - - self.assertEqual((True, False), real) - - def test_security_group_count_by_project(self): - values = [ - {'name': 'fake1', 'project_id': 'fake_proj1'}, - {'name': 'fake2', 'project_id': 'fake_proj1'}, - {'name': 'fake3', 'project_id': 'fake_proj2'}, - ] - security_groups = [self._create_security_group(vals) - for vals in values] - - real = [] - for project in ('fake_proj1', 'fake_proj2'): - real.append(db.security_group_count_by_project(self.ctxt, project)) - expected = [2, 1] - - self.assertEquals(expected, real) - - def test_security_group_in_use(self): - instance = db.instance_create(self.ctxt, dict(host='foo')) - values = [ - {'instances': [instance]}, - {'instances': []}, - ] - - security_groups = [self._create_security_group(vals) - for vals in values] - - real = [] - for security_group in security_groups: - in_use = db.security_group_in_use(self.ctxt, - security_group['id']) - real.append(in_use) - expected = [True, False] - - self.assertEquals(expected, real) - - def test_security_group_ensure_default(self): - self.assertFalse(db.security_group_exists(self.ctxt, - self.ctxt.project_id, - 'default')) - - default_group = db.security_group_ensure_default(self.ctxt) - - self.assertTrue(db.security_group_exists(self.ctxt, - self.ctxt.project_id, - 'default')) - - -class ServiceTestCase(test.TestCase, ModelsObjectComparatorMixin): - def setUp(self): - super(ServiceTestCase, self).setUp() - self.ctxt = context.get_admin_context() - - def _get_base_values(self): - return { - 'host': 'fake_host', - 'binary': 'fake_binary', - 'topic': 'fake_topic', - 'report_count': 3, - 'disabled': False - } - - def _create_service(self, values): - v = self._get_base_values() - v.update(values) - return db.service_create(self.ctxt, v) - - def test_service_create(self): - service = self._create_service({}) - self.assertFalse(service['id'] is None) - for key, value in self._get_base_values().iteritems(): - self.assertEqual(value, service[key]) - - def test_service_destroy(self): - service1 = self._create_service({}) - service2 = self._create_service({'host': 'fake_host2'}) - - db.service_destroy(self.ctxt, service1['id']) - self.assertRaises(exception.ServiceNotFound, - db.service_get, self.ctxt, service1['id']) - self._assertEqualObjects(db.service_get(self.ctxt, service2['id']), - service2, ignored_keys=['compute_node']) - - def test_service_update(self): - service = self._create_service({}) - new_values = { - 'host': 'fake_host1', - 'binary': 'fake_binary1', - 'topic': 'fake_topic1', - 'report_count': 4, - 'disabled': True - } - db.service_update(self.ctxt, service['id'], new_values) - updated_service = db.service_get(self.ctxt, service['id']) - for key, value in new_values.iteritems(): - self.assertEqual(value, updated_service[key]) - - def test_service_update_not_found_exception(self): - self.assertRaises(exception.ServiceNotFound, - db.service_update, self.ctxt, 100500, {}) - - def test_service_get(self): - service1 = self._create_service({}) - service2 = self._create_service({'host': 'some_other_fake_host'}) - real_service1 = db.service_get(self.ctxt, service1['id']) - self._assertEqualObjects(service1, real_service1, - ignored_keys=['compute_node']) - - def test_service_get_with_compute_node(self): - service = self._create_service({}) - compute_values = dict(vcpus=2, memory_mb=1024, local_gb=2048, - vcpus_used=0, memory_mb_used=0, - local_gb_used=0, free_ram_mb=1024, - free_disk_gb=2048, hypervisor_type="xen", - hypervisor_version=1, cpu_info="", - running_vms=0, current_workload=0, - service_id=service['id']) - compute = db.compute_node_create(self.ctxt, compute_values) - real_service = db.service_get(self.ctxt, service['id']) - real_compute = real_service['compute_node'][0] - self.assertEqual(compute['id'], real_compute['id']) - - def test_service_get_not_found_exception(self): - self.assertRaises(exception.ServiceNotFound, - db.service_get, self.ctxt, 100500) - - def test_service_get_by_host_and_topic(self): - service1 = self._create_service({'host': 'host1', 'topic': 'topic1'}) - service2 = self._create_service({'host': 'host2', 'topic': 'topic2'}) - - real_service1 = db.service_get_by_host_and_topic(self.ctxt, - host='host1', - topic='topic1') - self._assertEqualObjects(service1, real_service1) - - def test_service_get_all(self): - values = [ - {'host': 'host1', 'topic': 'topic1'}, - {'host': 'host2', 'topic': 'topic2'}, - {'disabled': True} - ] - services = [self._create_service(vals) for vals in values] - disabled_services = [services[-1]] - non_disabled_services = services[:-1] - - compares = [ - (services, db.service_get_all(self.ctxt)), - (disabled_services, db.service_get_all(self.ctxt, True)), - (non_disabled_services, db.service_get_all(self.ctxt, False)) - ] - for comp in compares: - self._assertEqualListsOfObjects(*comp) - - def test_service_get_all_by_topic(self): - values = [ - {'host': 'host1', 'topic': 't1'}, - {'host': 'host2', 'topic': 't1'}, - {'disabled': True, 'topic': 't1'}, - {'host': 'host3', 'topic': 't2'} - ] - services = [self._create_service(vals) for vals in values] - expected = services[:2] - real = db.service_get_all_by_topic(self.ctxt, 't1') - self._assertEqualListsOfObjects(expected, real) - - def test_service_get_all_by_host(self): - values = [ - {'host': 'host1', 'topic': 't1'}, - {'host': 'host1', 'topic': 't1'}, - {'host': 'host2', 'topic': 't1'}, - {'host': 'host3', 'topic': 't2'} - ] - services = [self._create_service(vals) for vals in values] - - expected = services[:2] - real = db.service_get_all_by_host(self.ctxt, 'host1') - self._assertEqualListsOfObjects(expected, real) - - def test_service_get_by_compute_host(self): - values = [ - {'host': 'host1', 'topic': CONF.compute_topic}, - {'host': 'host2', 'topic': 't1'}, - {'host': 'host3', 'topic': CONF.compute_topic} - ] - services = [self._create_service(vals) for vals in values] - - real_service = db.service_get_by_compute_host(self.ctxt, 'host1') - self._assertEqualObjects(services[0], real_service, - ignored_keys=['compute_node']) - - self.assertRaises(exception.ComputeHostNotFound, - db.service_get_by_compute_host, - self.ctxt, 'non-exists-host') - - def test_service_get_by_compute_host_not_found(self): - self.assertRaises(exception.ComputeHostNotFound, - db.service_get_by_compute_host, - self.ctxt, 'non-exists-host') - - def test_service_get_by_args(self): - values = [ - {'host': 'host1', 'binary': 'a'}, - {'host': 'host2', 'binary': 'b'} - ] - services = [self._create_service(vals) for vals in values] - - service1 = db.service_get_by_args(self.ctxt, 'host1', 'a') - self._assertEqualObjects(services[0], service1) - - service2 = db.service_get_by_args(self.ctxt, 'host2', 'b') - self._assertEqualObjects(services[1], service2) - - def test_service_get_by_args_not_found_exception(self): - self.assertRaises(exception.HostBinaryNotFound, - db.service_get_by_args, - self.ctxt, 'non-exists-host', 'a') - - -class BaseInstanceTypeTestCase(test.TestCase, ModelsObjectComparatorMixin): - def setUp(self): - super(BaseInstanceTypeTestCase, self).setUp() - self.ctxt = context.get_admin_context() - - def _get_base_values(self): - return { - 'name': 'fake_name', - 'memory_mb': 512, - 'vcpus': 1, - 'root_gb': 10, - 'ephemeral_gb': 10, - 'flavorid': 'fake_flavor', - 'swap': 0, - 'rxtx_factor': 0.5, - 'vcpu_weight': 1, - 'disabled': False, - 'is_public': True - } - - def _create_inst_type(self, values): - v = self._get_base_values() - v.update(values) - return db.instance_type_create(self.ctxt, v) - - -class InstanceTypeTestCase(BaseInstanceTypeTestCase): - - def test_instance_type_create(self): - inst_type = self._create_inst_type({}) - ignored_keys = ['id', 'deleted', 'deleted_at', 'updated_at', - 'created_at', 'extra_specs'] - - self.assertFalse(inst_type['id'] is None) - self._assertEqualObjects(inst_type, self._get_base_values(), - ignored_keys) - - def test_instance_type_destroy(self): - specs1 = {'a': '1', 'b': '2'} - inst_type1 = self._create_inst_type({'name': 'name1', 'flavorid': 'a1', - 'extra_specs': specs1}) - specs2 = {'c': '4', 'd': '3'} - inst_type2 = self._create_inst_type({'name': 'name2', 'flavorid': 'a2', - 'extra_specs': specs2}) - - db.instance_type_destroy(self.ctxt, 'name1') - - self.assertRaises(exception.InstanceTypeNotFound, - db.instance_type_get, self.ctxt, inst_type1['id']) - real_specs1 = db.instance_type_extra_specs_get(self.ctxt, - inst_type1['flavorid']) - self._assertEqualObjects(real_specs1, {}) - - r_inst_type2 = db.instance_type_get(self.ctxt, inst_type2['id']) - self._assertEqualObjects(inst_type2, r_inst_type2, 'extra_specs') - - def test_instance_type_destroy_not_found(self): - self.assertRaises(exception.InstanceTypeNotFound, - db.instance_type_destroy, self.ctxt, 'nonexists') - - def test_instance_type_create_duplicate_name(self): - self._create_inst_type({}) - self.assertRaises(exception.InstanceTypeExists, - self._create_inst_type, - {'flavorid': 'some_random_flavor'}) - - def test_instance_type_create_duplicate_flavorid(self): - self._create_inst_type({}) - self.assertRaises(exception.InstanceTypeIdExists, - self._create_inst_type, - {'name': 'some_random_name'}) - - def test_instance_type_create_with_extra_specs(self): - extra_specs = dict(a='abc', b='def', c='ghi') - inst_type = self._create_inst_type({'extra_specs': extra_specs}) - ignored_keys = ['id', 'deleted', 'deleted_at', 'updated_at', - 'created_at', 'extra_specs'] - - self._assertEqualObjects(inst_type, self._get_base_values(), - ignored_keys) - self._assertEqualObjects(extra_specs, inst_type['extra_specs']) - - def test_instance_type_get_all(self): - # NOTE(boris-42): Remove base instance types - for it in db.instance_type_get_all(self.ctxt): - db.instance_type_destroy(self.ctxt, it['name']) - - instance_types = [ - {'root_gb': 600, 'memory_mb': 100, 'disabled': True, - 'is_public': True, 'name': 'a1', 'flavorid': 'f1'}, - {'root_gb': 500, 'memory_mb': 200, 'disabled': True, - 'is_public': True, 'name': 'a2', 'flavorid': 'f2'}, - {'root_gb': 400, 'memory_mb': 300, 'disabled': False, - 'is_public': True, 'name': 'a3', 'flavorid': 'f3'}, - {'root_gb': 300, 'memory_mb': 400, 'disabled': False, - 'is_public': False, 'name': 'a4', 'flavorid': 'f4'}, - {'root_gb': 200, 'memory_mb': 500, 'disabled': True, - 'is_public': False, 'name': 'a5', 'flavorid': 'f5'}, - {'root_gb': 100, 'memory_mb': 600, 'disabled': True, - 'is_public': False, 'name': 'a6', 'flavorid': 'f6'} - ] - instance_types = [self._create_inst_type(it) for it in instance_types] - - lambda_filters = { - 'min_memory_mb': lambda it, v: it['memory_mb'] >= v, - 'min_root_gb': lambda it, v: it['root_gb'] >= v, - 'disabled': lambda it, v: it['disabled'] == v, - 'is_public': lambda it, v: (v is None or it['is_public'] == v) - } - - mem_filts = [{'min_memory_mb': x} for x in [100, 350, 550, 650]] - root_filts = [{'min_root_gb': x} for x in [100, 350, 550, 650]] - disabled_filts = [{'disabled': x} for x in [True, False]] - is_public_filts = [{'is_public': x} for x in [True, False, None]] - - def assert_multi_filter_instance_type_get(filters=None): - if filters is None: - filters = {} - - expected_it = instance_types - for name, value in filters.iteritems(): - filt = lambda it: lambda_filters[name](it, value) - expected_it = filter(filt, expected_it) - - real_it = db.instance_type_get_all(self.ctxt, filters=filters) - self._assertEqualListsOfObjects(expected_it, real_it) - - #no filter - assert_multi_filter_instance_type_get() - - #test only with one filter - for filt in mem_filts: - assert_multi_filter_instance_type_get(filt) - for filt in root_filts: - assert_multi_filter_instance_type_get(filt) - for filt in disabled_filts: - assert_multi_filter_instance_type_get(filt) - for filt in is_public_filts: - assert_multi_filter_instance_type_get(filt) - - #test all filters together - for mem in mem_filts: - for root in root_filts: - for disabled in disabled_filts: - for is_public in is_public_filts: - filts = [f.items() for f in - [mem, root, disabled, is_public]] - filts = dict(reduce(lambda x, y: x + y, filts, [])) - assert_multi_filter_instance_type_get(filts) - - def test_instance_type_get(self): - inst_types = [{'name': 'abc', 'flavorid': '123'}, - {'name': 'def', 'flavorid': '456'}, - {'name': 'ghi', 'flavorid': '789'}] - inst_types = [self._create_inst_type(t) for t in inst_types] - - for inst_type in inst_types: - inst_type_by_id = db.instance_type_get(self.ctxt, inst_type['id']) - self._assertEqualObjects(inst_type, inst_type_by_id) - - def test_instance_type_get_by_name(self): - inst_types = [{'name': 'abc', 'flavorid': '123'}, - {'name': 'def', 'flavorid': '456'}, - {'name': 'ghi', 'flavorid': '789'}] - inst_types = [self._create_inst_type(t) for t in inst_types] - - for inst_type in inst_types: - inst_type_by_name = db.instance_type_get_by_name(self.ctxt, - inst_type['name']) - self._assertEqualObjects(inst_type, inst_type_by_name) - - def test_instance_type_get_by_name_not_found(self): - self._create_inst_type({}) - self.assertRaises(exception.InstanceTypeNotFoundByName, - db.instance_type_get_by_name, self.ctxt, 'nonexists') - - def test_instance_type_get_by_flavor_id(self): - inst_types = [{'name': 'abc', 'flavorid': '123'}, - {'name': 'def', 'flavorid': '456'}, - {'name': 'ghi', 'flavorid': '789'}] - inst_types = [self._create_inst_type(t) for t in inst_types] - - for inst_type in inst_types: - params = (self.ctxt, inst_type['flavorid']) - inst_type_by_flavorid = db.instance_type_get_by_flavor_id(*params) - self._assertEqualObjects(inst_type, inst_type_by_flavorid) - - def test_instance_type_get_by_flavor_not_found(self): - self._create_inst_type({}) - self.assertRaises(exception.FlavorNotFound, - db.instance_type_get_by_flavor_id, - self.ctxt, 'nonexists') - - -class InstanceTypeExtraSpecsTestCase(BaseInstanceTypeTestCase): - - def setUp(self): - super(InstanceTypeExtraSpecsTestCase, self).setUp() - values = ({'name': 'n1', 'flavorid': 'f1', - 'extra_specs': dict(a='a', b='b', c='c')}, - {'name': 'n2', 'flavorid': 'f2', - 'extra_specs': dict(d='d', e='e', f='f')}) - - # NOTE(boris-42): We have already tested instance_type_create method - # with extra_specs in InstanceTypeTestCase. - self.inst_types = [self._create_inst_type(v) for v in values] - - def test_instance_type_extra_specs_get(self): - for it in self.inst_types: - real_specs = db.instance_type_extra_specs_get(self.ctxt, - it['flavorid']) - self._assertEqualObjects(it['extra_specs'], real_specs) - - def test_instance_type_extra_specs_get_item(self): - expected = dict(f1=dict(a='a', b='b', c='c'), - f2=dict(d='d', e='e', f='f')) - - for flavor, specs in expected.iteritems(): - for key, val in specs.iteritems(): - spec = db.instance_type_extra_specs_get_item(self.ctxt, flavor, - key) - self.assertEqual(spec[key], val) - - def test_instance_type_extra_specs_delete(self): - for it in self.inst_types: - specs = it['extra_specs'] - key = specs.keys()[0] - del specs[key] - db.instance_type_extra_specs_delete(self.ctxt, it['flavorid'], key) - real_specs = db.instance_type_extra_specs_get(self.ctxt, - it['flavorid']) - self._assertEqualObjects(it['extra_specs'], real_specs) - - def test_instance_type_extra_specs_update_or_create(self): - for it in self.inst_types: - current_specs = it['extra_specs'] - current_specs.update(dict(b='b1', c='c1', d='d1', e='e1')) - params = (self.ctxt, it['flavorid'], current_specs) - db.instance_type_extra_specs_update_or_create(*params) - real_specs = db.instance_type_extra_specs_get(self.ctxt, - it['flavorid']) - self._assertEqualObjects(current_specs, real_specs) - - def test_instance_type_extra_specs_update_or_create_flavor_not_found(self): - self.assertRaises(exception.FlavorNotFound, - db.instance_type_extra_specs_update_or_create, - self.ctxt, 'nonexists', {}) - - -class InstanceTypeAccessTestCase(BaseInstanceTypeTestCase): - - def _create_inst_type_access(self, instance_type_id, project_id): - return db.instance_type_access_add(self.ctxt, instance_type_id, - project_id) - - def test_instance_type_access_get_by_flavor_id(self): - inst_types = ({'name': 'n1', 'flavorid': 'f1'}, - {'name': 'n2', 'flavorid': 'f2'}) - it1, it2 = tuple((self._create_inst_type(v) for v in inst_types)) - - access_it1 = [self._create_inst_type_access(it1['flavorid'], 'pr1'), - self._create_inst_type_access(it1['flavorid'], 'pr2')] - - access_it2 = [self._create_inst_type_access(it2['flavorid'], 'pr1')] - - for it, access_it in zip((it1, it2), (access_it1, access_it2)): - params = (self.ctxt, it['flavorid']) - real_access_it = db.instance_type_access_get_by_flavor_id(*params) - self._assertEqualListsOfObjects(access_it, real_access_it) - - def test_instance_type_access_get_by_flavor_id_flavor_not_found(self): - self.assertRaises(exception.FlavorNotFound, - db.instance_type_get_by_flavor_id, - self.ctxt, 'nonexists') - - def test_instance_type_access_add(self): - inst_type = self._create_inst_type({'flavorid': 'f1'}) - project_id = 'p1' - - access = self._create_inst_type_access(inst_type['flavorid'], - project_id) - # NOTE(boris-42): Check that instance_type_access_add doesn't fail and - # returns correct value. This is enough because other - # logic is checked by other methods. - self.assertFalse(access['id'] is None) - self.assertEqual(access['instance_type_id'], inst_type['id']) - self.assertEqual(access['project_id'], project_id) - - def test_instance_type_access_add_to_non_existing_flavor(self): - self.assertRaises(exception.FlavorNotFound, - self._create_inst_type_access, - 'nonexists', 'does_not_matter') - - def test_instance_type_access_add_duplicate_project_id_flavor(self): - inst_type = self._create_inst_type({'flavorid': 'f1'}) - params = (inst_type['flavorid'], 'p1') - - self._create_inst_type_access(*params) - self.assertRaises(exception.FlavorAccessExists, - self._create_inst_type_access, *params) - - def test_instance_type_access_remove(self): - inst_types = ({'name': 'n1', 'flavorid': 'f1'}, - {'name': 'n2', 'flavorid': 'f2'}) - it1, it2 = tuple((self._create_inst_type(v) for v in inst_types)) - - access_it1 = [self._create_inst_type_access(it1['flavorid'], 'pr1'), - self._create_inst_type_access(it1['flavorid'], 'pr2')] - - access_it2 = [self._create_inst_type_access(it2['flavorid'], 'pr1')] - - db.instance_type_access_remove(self.ctxt, it1['flavorid'], - access_it1[1]['project_id']) - - for it, access_it in zip((it1, it2), (access_it1[:1], access_it2)): - params = (self.ctxt, it['flavorid']) - real_access_it = db.instance_type_access_get_by_flavor_id(*params) - self._assertEqualListsOfObjects(access_it, real_access_it) - - def test_instance_type_access_remove_flavor_not_found(self): - self.assertRaises(exception.FlavorNotFound, - db.instance_type_access_remove, - self.ctxt, 'nonexists', 'does_not_matter') - - def test_instance_type_access_remove_access_not_found(self): - inst_type = self._create_inst_type({'flavorid': 'f1'}) - params = (inst_type['flavorid'], 'p1') - self._create_inst_type_access(*params) - self.assertRaises(exception.FlavorAccessNotFound, - db.instance_type_access_remove, - self.ctxt, inst_type['flavorid'], 'p2') - - def test_instance_type_access_removed_after_instance_type_destroy(self): - inst_type1 = self._create_inst_type({'flavorid': 'f1', 'name': 'n1'}) - inst_type2 = self._create_inst_type({'flavorid': 'f2', 'name': 'n2'}) - values = [ - (inst_type1['flavorid'], 'p1'), - (inst_type1['flavorid'], 'p2'), - (inst_type2['flavorid'], 'p3') - ] - for v in values: - self._create_inst_type_access(*v) - - db.instance_type_destroy(self.ctxt, inst_type1['name']) - - p = (self.ctxt, inst_type1['flavorid']) - self.assertEqual(0, len(db.instance_type_access_get_by_flavor_id(*p))) - p = (self.ctxt, inst_type2['flavorid']) - self.assertEqual(1, len(db.instance_type_access_get_by_flavor_id(*p))) - db.instance_type_destroy(self.ctxt, inst_type2['name']) - self.assertEqual(0, len(db.instance_type_access_get_by_flavor_id(*p))) - - -class FixedIPTestCase(BaseInstanceTypeTestCase): - def _timeout_test(self, ctxt, timeout, multi_host): - instance = db.instance_create(ctxt, dict(host='foo')) - net = db.network_create_safe(ctxt, dict(multi_host=multi_host, - host='bar')) - old = timeout - datetime.timedelta(seconds=5) - new = timeout + datetime.timedelta(seconds=5) - # should deallocate - db.fixed_ip_create(ctxt, dict(allocated=False, - instance_uuid=instance['uuid'], - network_id=net['id'], - updated_at=old)) - # still allocated - db.fixed_ip_create(ctxt, dict(allocated=True, - instance_uuid=instance['uuid'], - network_id=net['id'], - updated_at=old)) - # wrong network - db.fixed_ip_create(ctxt, dict(allocated=False, - instance_uuid=instance['uuid'], - network_id=None, - updated_at=old)) - # too new - db.fixed_ip_create(ctxt, dict(allocated=False, - instance_uuid=instance['uuid'], - network_id=None, - updated_at=new)) - - def mock_db_query_first_to_raise_data_error_exception(self): - self.mox.StubOutWithMock(query.Query, 'first') - query.Query.first().AndRaise(exc.DataError(mox.IgnoreArg(), - mox.IgnoreArg(), - mox.IgnoreArg())) - self.mox.ReplayAll() - - def test_fixed_ip_disassociate_all_by_timeout_single_host(self): - now = timeutils.utcnow() - self._timeout_test(self.ctxt, now, False) - result = db.fixed_ip_disassociate_all_by_timeout(self.ctxt, 'foo', now) - self.assertEqual(result, 0) - result = db.fixed_ip_disassociate_all_by_timeout(self.ctxt, 'bar', now) - self.assertEqual(result, 1) - - def test_fixed_ip_disassociate_all_by_timeout_multi_host(self): - now = timeutils.utcnow() - self._timeout_test(self.ctxt, now, True) - result = db.fixed_ip_disassociate_all_by_timeout(self.ctxt, 'foo', now) - self.assertEqual(result, 1) - result = db.fixed_ip_disassociate_all_by_timeout(self.ctxt, 'bar', now) - self.assertEqual(result, 0) - - def test_fixed_ip_get_by_floating_address(self): - fixed_ip = db.fixed_ip_create(self.ctxt, {'address': 'fixed'}) - values = {'address': 'floating', - 'fixed_ip_id': fixed_ip['id']} - floating = db.floating_ip_create(self.ctxt, values)['address'] - fixed_ip_ref = db.fixed_ip_get_by_floating_address(self.ctxt, floating) - self._assertEqualObjects(fixed_ip, fixed_ip_ref) - - def test_fixed_ip_get_by_host(self): - host_ips = { - 'host1': ['1.1.1.1', '1.1.1.2', '1.1.1.3'], - 'host2': ['1.1.1.4', '1.1.1.5'], - 'host3': ['1.1.1.6'] - } - - for host, ips in host_ips.iteritems(): - for ip in ips: - instance_uuid = self._create_instance(host=host) - db.fixed_ip_create(self.ctxt, {'address': ip}) - db.fixed_ip_associate(self.ctxt, ip, instance_uuid) - - for host, ips in host_ips.iteritems(): - ips_on_host = map(lambda x: x['address'], - db.fixed_ip_get_by_host(self.ctxt, host)) - self._assertEqualListsOfPrimitivesAsSets(ips_on_host, ips) - - def test_fixed_ip_get_by_network_host_not_found_exception(self): - self.assertRaises( - exception.FixedIpNotFoundForNetworkHost, - db.fixed_ip_get_by_network_host, - self.ctxt, 1, 'ignore') - - def test_fixed_ip_get_by_network_host_fixed_ip_found(self): - db.fixed_ip_create(self.ctxt, dict(network_id=1, host='host')) - - fip = db.fixed_ip_get_by_network_host(self.ctxt, 1, 'host') - - self.assertEquals(1, fip['network_id']) - self.assertEquals('host', fip['host']) - - def _create_instance(self, **kwargs): - instance = db.instance_create(self.ctxt, kwargs) - return instance['uuid'] - - def test_fixed_ip_get_by_instance_fixed_ip_found(self): - instance_uuid = self._create_instance() - - FIXED_IP_ADDRESS = 'address' - db.fixed_ip_create(self.ctxt, dict( - instance_uuid=instance_uuid, address=FIXED_IP_ADDRESS)) - - ips_list = db.fixed_ip_get_by_instance(self.ctxt, instance_uuid) - self._assertEqualListsOfPrimitivesAsSets([FIXED_IP_ADDRESS], - [ips_list[0].address]) - - def test_fixed_ip_get_by_instance_multiple_fixed_ips_found(self): - instance_uuid = self._create_instance() - - FIXED_IP_ADDRESS_1 = 'address_1' - db.fixed_ip_create(self.ctxt, dict( - instance_uuid=instance_uuid, address=FIXED_IP_ADDRESS_1)) - FIXED_IP_ADDRESS_2 = 'address_2' - db.fixed_ip_create(self.ctxt, dict( - instance_uuid=instance_uuid, address=FIXED_IP_ADDRESS_2)) - - ips_list = db.fixed_ip_get_by_instance(self.ctxt, instance_uuid) - self._assertEqualListsOfPrimitivesAsSets( - [FIXED_IP_ADDRESS_1, FIXED_IP_ADDRESS_2], - [ips_list[0].address, ips_list[1].address]) - - def test_fixed_ip_get_by_instance_inappropriate_ignored(self): - instance_uuid = self._create_instance() - - FIXED_IP_ADDRESS_1 = 'address_1' - db.fixed_ip_create(self.ctxt, dict( - instance_uuid=instance_uuid, address=FIXED_IP_ADDRESS_1)) - FIXED_IP_ADDRESS_2 = 'address_2' - db.fixed_ip_create(self.ctxt, dict( - instance_uuid=instance_uuid, address=FIXED_IP_ADDRESS_2)) - - another_instance = db.instance_create(self.ctxt, {}) - db.fixed_ip_create(self.ctxt, dict( - instance_uuid=another_instance['uuid'], address="another_addr")) - - ips_list = db.fixed_ip_get_by_instance(self.ctxt, instance_uuid) - self._assertEqualListsOfPrimitivesAsSets( - [FIXED_IP_ADDRESS_1, FIXED_IP_ADDRESS_2], - [ips_list[0].address, ips_list[1].address]) - - def test_fixed_ip_get_by_instance_not_found_exception(self): - instance_uuid = self._create_instance() - - self.assertRaises(exception.FixedIpNotFoundForInstance, - db.fixed_ip_get_by_instance, - self.ctxt, instance_uuid) - - def test_fixed_ips_by_virtual_interface_fixed_ip_found(self): - instance_uuid = self._create_instance() - - vif = db.virtual_interface_create( - self.ctxt, dict(instance_uuid=instance_uuid)) - - FIXED_IP_ADDRESS = 'address' - db.fixed_ip_create(self.ctxt, dict( - virtual_interface_id=vif.id, address=FIXED_IP_ADDRESS)) - - ips_list = db.fixed_ips_by_virtual_interface(self.ctxt, vif.id) - self._assertEqualListsOfPrimitivesAsSets([FIXED_IP_ADDRESS], - [ips_list[0].address]) - - def test_fixed_ips_by_virtual_interface_multiple_fixed_ips_found(self): - instance_uuid = self._create_instance() - - vif = db.virtual_interface_create( - self.ctxt, dict(instance_uuid=instance_uuid)) - - FIXED_IP_ADDRESS_1 = 'address_1' - db.fixed_ip_create(self.ctxt, dict( - virtual_interface_id=vif.id, address=FIXED_IP_ADDRESS_1)) - FIXED_IP_ADDRESS_2 = 'address_2' - db.fixed_ip_create(self.ctxt, dict( - virtual_interface_id=vif.id, address=FIXED_IP_ADDRESS_2)) - - ips_list = db.fixed_ips_by_virtual_interface(self.ctxt, vif.id) - self._assertEqualListsOfPrimitivesAsSets( - [FIXED_IP_ADDRESS_1, FIXED_IP_ADDRESS_2], - [ips_list[0].address, ips_list[1].address]) - - def test_fixed_ips_by_virtual_interface_inappropriate_ignored(self): - instance_uuid = self._create_instance() - - vif = db.virtual_interface_create( - self.ctxt, dict(instance_uuid=instance_uuid)) - - FIXED_IP_ADDRESS_1 = 'address_1' - db.fixed_ip_create(self.ctxt, dict( - virtual_interface_id=vif.id, address=FIXED_IP_ADDRESS_1)) - FIXED_IP_ADDRESS_2 = 'address_2' - db.fixed_ip_create(self.ctxt, dict( - virtual_interface_id=vif.id, address=FIXED_IP_ADDRESS_2)) - - another_vif = db.virtual_interface_create( - self.ctxt, dict(instance_uuid=instance_uuid)) - db.fixed_ip_create(self.ctxt, dict( - virtual_interface_id=another_vif.id, address="another_addr")) - - ips_list = db.fixed_ips_by_virtual_interface(self.ctxt, vif.id) - self._assertEqualListsOfPrimitivesAsSets( - [FIXED_IP_ADDRESS_1, FIXED_IP_ADDRESS_2], - [ips_list[0].address, ips_list[1].address]) - - def test_fixed_ips_by_virtual_interface_no_ip_found(self): - instance_uuid = self._create_instance() - - vif = db.virtual_interface_create( - self.ctxt, dict(instance_uuid=instance_uuid)) - - ips_list = db.fixed_ips_by_virtual_interface(self.ctxt, vif.id) - self.assertEquals(0, len(ips_list)) - - def test_fixed_ip_count_by_project_one_ip(self): - PROJECT_ID = "project_id" - instance_uuid = self._create_instance(project_id=PROJECT_ID) - db.fixed_ip_create(self.ctxt, dict( - instance_uuid=instance_uuid, address='address')) - - ips_count = db.fixed_ip_count_by_project(self.ctxt, PROJECT_ID) - self.assertEquals(1, ips_count) - - def test_fixed_ip_count_by_project_two_ips_for_different_instances(self): - PROJECT_ID = "project_id" - instance_uuid = self._create_instance(project_id=PROJECT_ID) - - db.fixed_ip_create(self.ctxt, dict( - instance_uuid=instance_uuid, address='address_1')) - - another_instance_for_this_project =\ - db.instance_create(self.ctxt, dict(project_id=PROJECT_ID)) - - db.fixed_ip_create(self.ctxt, dict( - instance_uuid=another_instance_for_this_project['uuid'], - address='address_2')) - - ips_count = db.fixed_ip_count_by_project(self.ctxt, PROJECT_ID) - self.assertEquals(2, ips_count) - - def create_fixed_ip(self, **params): - default_params = {'address': '192.168.0.1'} - default_params.update(params) - return db.fixed_ip_create(self.ctxt, default_params)['address'] - - def test_fixed_ip_associate_fails_if_ip_not_in_network(self): - instance_uuid = self._create_instance() - self.assertRaises(exception.FixedIpNotFoundForNetwork, - db.fixed_ip_associate, - self.ctxt, None, instance_uuid) - - def test_fixed_ip_associate_fails_if_ip_in_use(self): - instance_uuid = self._create_instance() - - address = self.create_fixed_ip(instance_uuid=instance_uuid) - self.assertRaises(exception.FixedIpAlreadyInUse, - db.fixed_ip_associate, - self.ctxt, address, instance_uuid) - - def test_fixed_ip_associate_succeeds(self): - instance_uuid = self._create_instance() - network = db.network_create_safe(self.ctxt, {}) - - address = self.create_fixed_ip(network_id=network['id']) - db.fixed_ip_associate(self.ctxt, address, instance_uuid, - network_id=network['id']) - fixed_ip = db.fixed_ip_get_by_address(self.ctxt, address) - self.assertEqual(fixed_ip['instance_uuid'], instance_uuid) - - def test_fixed_ip_associate_succeeds_and_sets_network(self): - instance_uuid = self._create_instance() - network = db.network_create_safe(self.ctxt, {}) - - address = self.create_fixed_ip() - db.fixed_ip_associate(self.ctxt, address, instance_uuid, - network_id=network['id']) - fixed_ip = db.fixed_ip_get_by_address(self.ctxt, address) - self.assertEqual(fixed_ip['instance_uuid'], instance_uuid) - self.assertEqual(fixed_ip['network_id'], network['id']) - - def test_fixed_ip_associate_pool_invalid_uuid(self): - instance_uuid = '123' - self.assertRaises(exception.InvalidUUID, db.fixed_ip_associate_pool, - self.ctxt, None, instance_uuid) - - def test_fixed_ip_associate_pool_no_more_fixed_ips(self): - instance_uuid = self._create_instance() - self.assertRaises(exception.NoMoreFixedIps, db.fixed_ip_associate_pool, - self.ctxt, None, instance_uuid) - - def test_fixed_ip_associate_pool_succeeds(self): - instance_uuid = self._create_instance() - network = db.network_create_safe(self.ctxt, {}) - - address = self.create_fixed_ip(network_id=network['id']) - db.fixed_ip_associate_pool(self.ctxt, network['id'], instance_uuid) - fixed_ip = db.fixed_ip_get_by_address(self.ctxt, address) - self.assertEqual(fixed_ip['instance_uuid'], instance_uuid) - - def test_fixed_ip_create(self): - instance_uuid = self._create_instance() - network_id = db.network_create_safe(self.ctxt, {})['id'] - param = { - 'reserved': False, - 'deleted': 0, - 'leased': False, - 'host': '127.0.0.1', - 'address': 'localhost', - 'allocated': False, - 'instance_uuid': instance_uuid, - 'network_id': network_id, - 'virtual_interface_id': None - } - - ignored_keys = ['created_at', 'id', 'deleted_at', 'updated_at'] - fixed_ip_data = db.fixed_ip_create(self.ctxt, param) - self._assertEqualObjects(param, fixed_ip_data, ignored_keys) - - def test_fixed_ip_bulk_create(self): - adress = 'fixed_ip_adress' - instance_uuid = self._create_instance() - network_id_1 = db.network_create_safe(self.ctxt, {})['id'] - network_id_2 = db.network_create_safe(self.ctxt, {})['id'] - params = [ - {'reserved': False, 'deleted': 0, 'leased': False, - 'host': '127.0.0.1', 'address': adress, 'allocated': False, - 'instance_uuid': instance_uuid, 'network_id': network_id_1, - 'virtual_interface_id': None}, - {'reserved': False, 'deleted': 0, 'leased': False, - 'host': 'localhost', 'address': adress, 'allocated': True, - 'instance_uuid': instance_uuid, 'network_id': network_id_2, - 'virtual_interface_id': None} - ] - - db.fixed_ip_bulk_create(self.ctxt, params) - ignored_keys = ['created_at', 'id', 'deleted_at', 'updated_at'] - fixed_ip_data = db.fixed_ip_get_by_instance(self.ctxt, instance_uuid) - - # we have no `id` in incoming data so we can not use - # _assertEqualListsOfObjects to compare incoming data and received - # objects - fixed_ip_data = sorted(fixed_ip_data, key=lambda i: i['network_id']) - params = sorted(params, key=lambda i: i['network_id']) - for param, ip in zip(params, fixed_ip_data): - self._assertEqualObjects(param, ip, ignored_keys) - - def test_fixed_ip_disassociate(self): - adress = 'fixed_ip_adress' - instance_uuid = self._create_instance() - network_id = db.network_create_safe(self.ctxt, {})['id'] - param = { - 'reserved': False, - 'deleted': 0, - 'leased': False, - 'host': '127.0.0.1', - 'address': adress, - 'allocated': False, - 'instance_uuid': instance_uuid, - 'network_id': network_id, - 'virtual_interface_id': None - } - db.fixed_ip_create(self.ctxt, param) - - db.fixed_ip_disassociate(self.ctxt, adress) - fixed_ip_data = db.fixed_ip_get_by_address(self.ctxt, adress) - ignored_keys = ['created_at', 'id', 'deleted_at', - 'updated_at', 'instance_uuid'] - self._assertEqualObjects(param, fixed_ip_data, ignored_keys) - self.assertIsNone(fixed_ip_data['instance_uuid']) - - def test_fixed_ip_get_not_found_exception(self): - self.assertRaises(exception.FixedIpNotFound, - db.fixed_ip_get, self.ctxt, 0) - - def test_fixed_ip_get_success2(self): - adress = 'fixed_ip_adress' - instance_uuid = self._create_instance() - network_id = db.network_create_safe(self.ctxt, {})['id'] - param = { - 'reserved': False, - 'deleted': 0, - 'leased': False, - 'host': '127.0.0.1', - 'address': adress, - 'allocated': False, - 'instance_uuid': instance_uuid, - 'network_id': network_id, - 'virtual_interface_id': None - } - fixed_ip_id = db.fixed_ip_create(self.ctxt, param) - - self.ctxt.is_admin = False - self.assertRaises(exception.NotAuthorized, db.fixed_ip_get, - self.ctxt, fixed_ip_id) - - def test_fixed_ip_get_success(self): - adress = 'fixed_ip_adress' - instance_uuid = self._create_instance() - network_id = db.network_create_safe(self.ctxt, {})['id'] - param = { - 'reserved': False, - 'deleted': 0, - 'leased': False, - 'host': '127.0.0.1', - 'address': adress, - 'allocated': False, - 'instance_uuid': instance_uuid, - 'network_id': network_id, - 'virtual_interface_id': None - } - db.fixed_ip_create(self.ctxt, param) - - fixed_ip_id = db.fixed_ip_get_by_address(self.ctxt, adress)['id'] - fixed_ip_data = db.fixed_ip_get(self.ctxt, fixed_ip_id) - ignored_keys = ['created_at', 'id', 'deleted_at', 'updated_at'] - self._assertEqualObjects(param, fixed_ip_data, ignored_keys) - - def test_fixed_ip_get_by_address_detailed_not_found_exception(self): - self.assertRaises(exception.FixedIpNotFoundForAddress, - db.fixed_ip_get_by_address_detailed, self.ctxt, 'x') - - def test_fixed_ip_get_by_address_with_data_error_exception(self): - self.mock_db_query_first_to_raise_data_error_exception() - self.assertRaises(exception.FixedIpInvalid, - db.fixed_ip_get_by_address_detailed, self.ctxt, 'x') - - def test_fixed_ip_get_by_address_detailed_sucsess(self): - adress = 'fixed_ip_adress_123' - instance_uuid = self._create_instance() - network_id = db.network_create_safe(self.ctxt, {})['id'] - param = { - 'reserved': False, - 'deleted': 0, - 'leased': False, - 'host': '127.0.0.1', - 'address': adress, - 'allocated': False, - 'instance_uuid': instance_uuid, - 'network_id': network_id, - 'virtual_interface_id': None - } - db.fixed_ip_create(self.ctxt, param) - - fixed_ip_data = db.fixed_ip_get_by_address_detailed(self.ctxt, - adress) - # fixed ip check here - ignored_keys = ['created_at', 'id', 'deleted_at', 'updated_at'] - self._assertEqualObjects(param, fixed_ip_data[0], ignored_keys) - - # network model check here - network_data = db.network_get(self.ctxt, network_id) - self._assertEqualObjects(network_data, fixed_ip_data[1]) - - # Instance check here - instance_data = db.instance_get_by_uuid(self.ctxt, instance_uuid) - ignored_keys = ['info_cache', 'system_metadata', - 'security_groups', 'metadata'] # HOW ???? - self._assertEqualObjects(instance_data, fixed_ip_data[2], ignored_keys) - - def test_fixed_ip_update_not_found_for_adress(self): - self.assertRaises(exception.FixedIpNotFoundForAddress, - db.fixed_ip_update, self.ctxt, 'fixed_ip_adress', {}) - - def test_fixed_ip_update(self): - instance_uuid_1 = self._create_instance() - instance_uuid_2 = self._create_instance() - network_id_1 = db.network_create_safe(self.ctxt, {})['id'] - network_id_2 = db.network_create_safe(self.ctxt, {})['id'] - param_1 = { - 'reserved': True, 'deleted': 0, 'leased': True, - 'host': '192.168.133.1', 'address': 'localhost', - 'allocated': True, 'instance_uuid': instance_uuid_1, - 'network_id': network_id_1, 'virtual_interface_id': '123', - } - - param_2 = { - 'reserved': False, 'deleted': 0, 'leased': False, - 'host': '127.0.0.1', 'address': 'localhost', 'allocated': False, - 'instance_uuid': instance_uuid_2, 'network_id': network_id_2, - 'virtual_interface_id': None - } - - ignored_keys = ['created_at', 'id', 'deleted_at', 'updated_at'] - fixed_ip_addr = db.fixed_ip_create(self.ctxt, param_1)['address'] - db.fixed_ip_update(self.ctxt, fixed_ip_addr, param_2) - fixed_ip_after_update = db.fixed_ip_get_by_address(self.ctxt, - param_2['address']) - self._assertEqualObjects(param_2, fixed_ip_after_update, ignored_keys) - - -class FloatingIpTestCase(test.TestCase, ModelsObjectComparatorMixin): - - def setUp(self): - super(FloatingIpTestCase, self).setUp() - self.ctxt = context.get_admin_context() - - def _get_base_values(self): - return { - 'address': '1.1.1.1', - 'fixed_ip_id': None, - 'project_id': 'fake_project', - 'host': 'fake_host', - 'auto_assigned': False, - 'pool': 'fake_pool', - 'interface': 'fake_interface', - } - - def mock_db_query_first_to_raise_data_error_exception(self): - self.mox.StubOutWithMock(query.Query, 'first') - query.Query.first().AndRaise(exc.DataError(mox.IgnoreArg(), - mox.IgnoreArg(), - mox.IgnoreArg())) - self.mox.ReplayAll() - - def _create_floating_ip(self, values): - if not values: - values = {} - vals = self._get_base_values() - vals.update(values) - return db.floating_ip_create(self.ctxt, vals) - - def test_floating_ip_get(self): - values = [{'address': '0.0.0.0'}, {'address': '1.1.1.1'}] - floating_ips = [self._create_floating_ip(val) for val in values] - - for floating_ip in floating_ips: - real_floating_ip = db.floating_ip_get(self.ctxt, floating_ip['id']) - self._assertEqualObjects(floating_ip, real_floating_ip, - ignored_keys=['fixed_ip']) - - def test_floating_ip_get_not_found(self): - self.assertRaises(exception.FloatingIpNotFound, - db.floating_ip_get, self.ctxt, 100500) - - def test_floating_ip_get_with_long_id_not_found(self): - self.mock_db_query_first_to_raise_data_error_exception() - self.assertRaises(exception.InvalidID, - db.floating_ip_get, self.ctxt, 123456789101112) - - def test_floating_ip_get_pools(self): - values = [ - {'address': '0.0.0.0', 'pool': 'abc'}, - {'address': '1.1.1.1', 'pool': 'abc'}, - {'address': '2.2.2.2', 'pool': 'def'}, - {'address': '3.3.3.3', 'pool': 'ghi'}, - ] - for val in values: - self._create_floating_ip(val) - expected_pools = [{'name': x} - for x in set(map(lambda x: x['pool'], values))] - real_pools = db.floating_ip_get_pools(self.ctxt) - self._assertEqualListsOfPrimitivesAsSets(real_pools, expected_pools) - - def test_floating_ip_allocate_address(self): - pools = { - 'pool1': ['0.0.0.0', '1.1.1.1'], - 'pool2': ['2.2.2.2'], - 'pool3': ['3.3.3.3', '4.4.4.4', '5.5.5.5'] - } - for pool, addresses in pools.iteritems(): - for address in addresses: - vals = {'pool': pool, 'address': address, 'project_id': None} - self._create_floating_ip(vals) - - project_id = self._get_base_values()['project_id'] - for pool, addresses in pools.iteritems(): - alloc_addrs = [] - for i in addresses: - float_addr = db.floating_ip_allocate_address(self.ctxt, - project_id, pool) - alloc_addrs.append(float_addr) - self._assertEqualListsOfPrimitivesAsSets(alloc_addrs, addresses) - - def test_floating_ip_allocate_address_no_more_floating_ips(self): - self.assertRaises(exception.NoMoreFloatingIps, - db.floating_ip_allocate_address, - self.ctxt, 'any_project_id', 'no_such_pool') - - def test_floating_ip_allocate_not_authorized(self): - ctxt = context.RequestContext(user_id='a', project_id='abc', - is_admin=False) - self.assertRaises(exception.NotAuthorized, - db.floating_ip_allocate_address, - ctxt, 'other_project_id', 'any_pool') - - def _get_existing_ips(self): - return [ip['address'] for ip in db.floating_ip_get_all(self.ctxt)] - - def test_floating_ip_bulk_create(self): - expected_ips = ['1.1.1.1', '1.1.1.2', '1.1.1.3', '1.1.1.4'] - db.floating_ip_bulk_create(self.ctxt, - map(lambda x: {'address': x}, expected_ips)) - self._assertEqualListsOfPrimitivesAsSets(self._get_existing_ips(), - expected_ips) - - def test_floating_ip_bulk_create_duplicate(self): - ips = ['1.1.1.1', '1.1.1.2', '1.1.1.3', '1.1.1.4'] - prepare_ips = lambda x: {'address': x} - - db.floating_ip_bulk_create(self.ctxt, map(prepare_ips, ips)) - self.assertRaises(exception.FloatingIpExists, - db.floating_ip_bulk_create, - self.ctxt, map(prepare_ips, ['1.1.1.5', '1.1.1.4'])) - self.assertRaises(exception.FloatingIpNotFoundForAddress, - db.floating_ip_get_by_address, - self.ctxt, '1.1.1.5') - - def test_floating_ip_bulk_destroy(self): - ips_for_delete = [] - ips_for_non_delete = [] - - def create_ips(i): - return [{'address': '1.1.%s.%s' % (i, k)} for k in range(1, 256)] - - # NOTE(boris-42): Create more then 256 ip to check that - # _ip_range_splitter works properly. - for i in range(1, 3): - ips_for_delete.extend(create_ips(i)) - ips_for_non_delete.extend(create_ips(3)) - - db.floating_ip_bulk_create(self.ctxt, - ips_for_delete + ips_for_non_delete) - db.floating_ip_bulk_destroy(self.ctxt, ips_for_delete) - - expected_addresses = map(lambda x: x['address'], ips_for_non_delete) - self._assertEqualListsOfPrimitivesAsSets(self._get_existing_ips(), - expected_addresses) - - def test_floating_ip_create(self): - floating_ip = self._create_floating_ip({}) - ignored_keys = ['id', 'deleted', 'deleted_at', 'updated_at', - 'created_at'] - - self.assertFalse(floating_ip['id'] is None) - self._assertEqualObjects(floating_ip, self._get_base_values(), - ignored_keys) - - def test_floating_ip_create_duplicate(self): - self._create_floating_ip({}) - self.assertRaises(exception.FloatingIpExists, - self._create_floating_ip, {}) - - def test_floating_ip_count_by_project(self): - projects = { - 'project1': ['1.1.1.1', '2.2.2.2', '3.3.3.3'], - 'project2': ['4.4.4.4', '5.5.5.5'], - 'project3': ['6.6.6.6'] - } - for project_id, addresses in projects.iteritems(): - for address in addresses: - self._create_floating_ip({'project_id': project_id, - 'address': address}) - for project_id, addresses in projects.iteritems(): - real_count = db.floating_ip_count_by_project(self.ctxt, project_id) - self.assertEqual(len(addresses), real_count) - - def test_floating_ip_count_by_project_not_authorized(self): - ctxt = context.RequestContext(user_id='a', project_id='abc', - is_admin=False) - self.assertRaises(exception.NotAuthorized, - db.floating_ip_count_by_project, ctxt, 'def') - - def _create_fixed_ip(self, params): - default_params = {'address': '192.168.0.1'} - default_params.update(params) - return db.fixed_ip_create(self.ctxt, default_params)['address'] - - def test_floating_ip_fixed_ip_associate(self): - float_addresses = ['1.1.1.1', '1.1.1.2', '1.1.1.3'] - fixed_addresses = ['2.2.2.1', '2.2.2.2', '2.2.2.3'] - - float_ips = [self._create_floating_ip({'address': address}) - for address in float_addresses] - fixed_addrs = [self._create_fixed_ip({'address': address}) - for address in fixed_addresses] - - for float_ip, fixed_addr in zip(float_ips, fixed_addrs): - fixed_ip = db.floating_ip_fixed_ip_associate(self.ctxt, - float_ip.address, - fixed_addr, 'host') - self.assertEqual(fixed_ip.address, fixed_addr) - - updated_float_ip = db.floating_ip_get(self.ctxt, float_ip.id) - self.assertEqual(fixed_ip.id, updated_float_ip.fixed_ip_id) - self.assertEqual('host', updated_float_ip.host) - - # Test that already allocated float_ip returns None - result = db.floating_ip_fixed_ip_associate(self.ctxt, - float_addresses[0], - fixed_addresses[0], 'host') - self.assertTrue(result is None) - - def test_floating_ip_fixed_ip_associate_float_ip_not_found(self): - self.assertRaises(exception.FloatingIpNotFoundForAddress, - db.floating_ip_fixed_ip_associate, - self.ctxt, 'non exist', 'some', 'some') - - def test_floating_ip_deallocate(self): - values = {'address': '1.1.1.1', 'project_id': 'fake', 'host': 'fake'} - float_ip = self._create_floating_ip(values) - db.floating_ip_deallocate(self.ctxt, float_ip.address) - - updated_float_ip = db.floating_ip_get(self.ctxt, float_ip.id) - self.assertTrue(updated_float_ip.project_id is None) - self.assertTrue(updated_float_ip.host is None) - self.assertFalse(updated_float_ip.auto_assigned) - - def test_floating_ip_destroy(self): - addresses = ['1.1.1.1', '1.1.1.2', '1.1.1.3'] - float_ips = [self._create_floating_ip({'address': addr}) - for addr in addresses] - - expected_len = len(addresses) - for float_ip in float_ips: - db.floating_ip_destroy(self.ctxt, float_ip.address) - self.assertRaises(exception.FloatingIpNotFound, - db.floating_ip_get, self.ctxt, float_ip.id) - expected_len -= 1 - if expected_len > 0: - self.assertEqual(expected_len, - len(db.floating_ip_get_all(self.ctxt))) - else: - self.assertRaises(exception.NoFloatingIpsDefined, - db.floating_ip_get_all, self.ctxt) - - def test_floating_ip_disassociate(self): - float_addresses = ['1.1.1.1', '1.1.1.2', '1.1.1.3'] - fixed_addresses = ['2.2.2.1', '2.2.2.2', '2.2.2.3'] - - float_ips = [self._create_floating_ip({'address': address}) - for address in float_addresses] - fixed_addrs = [self._create_fixed_ip({'address': address}) - for address in fixed_addresses] - - for float_ip, fixed_addr in zip(float_ips, fixed_addrs): - db.floating_ip_fixed_ip_associate(self.ctxt, - float_ip.address, - fixed_addr, 'host') - - for float_ip, fixed_addr in zip(float_ips, fixed_addrs): - fixed = db.floating_ip_disassociate(self.ctxt, float_ip.address) - self.assertEqual(fixed.address, fixed_addr) - updated_float_ip = db.floating_ip_get(self.ctxt, float_ip.id) - self.assertTrue(updated_float_ip.fixed_ip_id is None) - self.assertTrue(updated_float_ip.host is None) - - def test_floating_ip_disassociate_not_found(self): - self.assertRaises(exception.FloatingIpNotFoundForAddress, - db.floating_ip_disassociate, self.ctxt, 'non exist') - - def test_floating_ip_set_auto_assigned(self): - addresses = ['1.1.1.1', '1.1.1.2', '1.1.1.3'] - float_ips = [self._create_floating_ip({'address': addr, - 'auto_assigned': False}) - for addr in addresses] - - for i in range(2): - db.floating_ip_set_auto_assigned(self.ctxt, float_ips[i].address) - for i in range(2): - float_ip = db.floating_ip_get(self.ctxt, float_ips[i].id) - self.assertTrue(float_ip.auto_assigned) - - float_ip = db.floating_ip_get(self.ctxt, float_ips[2].id) - self.assertFalse(float_ip.auto_assigned) - - def test_floating_ip_get_all(self): - addresses = ['1.1.1.1', '1.1.1.2', '1.1.1.3'] - float_ips = [self._create_floating_ip({'address': addr}) - for addr in addresses] - self._assertEqualListsOfObjects(float_ips, - db.floating_ip_get_all(self.ctxt)) - - def test_floating_ip_get_all_not_found(self): - self.assertRaises(exception.NoFloatingIpsDefined, - db.floating_ip_get_all, self.ctxt) - - def test_floating_ip_get_all_by_host(self): - hosts = { - 'host1': ['1.1.1.1', '1.1.1.2'], - 'host2': ['2.1.1.1', '2.1.1.2'], - 'host3': ['3.1.1.1', '3.1.1.2', '3.1.1.3'] - } - - hosts_with_float_ips = {} - for host, addresses in hosts.iteritems(): - hosts_with_float_ips[host] = [] - for address in addresses: - float_ip = self._create_floating_ip({'host': host, - 'address': address}) - hosts_with_float_ips[host].append(float_ip) - - for host, float_ips in hosts_with_float_ips.iteritems(): - real_float_ips = db.floating_ip_get_all_by_host(self.ctxt, host) - self._assertEqualListsOfObjects(float_ips, real_float_ips) - - def test_floating_ip_get_all_by_host_not_found(self): - self.assertRaises(exception.FloatingIpNotFoundForHost, - db.floating_ip_get_all_by_host, - self.ctxt, 'non_exists_host') - - def test_floating_ip_get_all_by_project(self): - projects = { - 'pr1': ['1.1.1.1', '1.1.1.2'], - 'pr2': ['2.1.1.1', '2.1.1.2'], - 'pr3': ['3.1.1.1', '3.1.1.2', '3.1.1.3'] - } - - projects_with_float_ips = {} - for project_id, addresses in projects.iteritems(): - projects_with_float_ips[project_id] = [] - for address in addresses: - float_ip = self._create_floating_ip({'project_id': project_id, - 'address': address}) - projects_with_float_ips[project_id].append(float_ip) - - for project_id, float_ips in projects_with_float_ips.iteritems(): - real_float_ips = db.floating_ip_get_all_by_project(self.ctxt, - project_id) - self._assertEqualListsOfObjects(float_ips, real_float_ips, - ignored_keys='fixed_ip') - - def test_floating_ip_get_all_by_project_not_authorized(self): - ctxt = context.RequestContext(user_id='a', project_id='abc', - is_admin=False) - self.assertRaises(exception.NotAuthorized, - db.floating_ip_get_all_by_project, - ctxt, 'other_project') - - def test_floating_ip_get_by_address(self): - addresses = ['1.1.1.1', '1.1.1.2', '1.1.1.3'] - float_ips = [self._create_floating_ip({'address': addr}) - for addr in addresses] - - for float_ip in float_ips: - real_float_ip = db.floating_ip_get_by_address(self.ctxt, - float_ip.address) - self._assertEqualObjects(float_ip, real_float_ip, - ignored_keys='fixed_ip') - - def test_floating_ip_get_by_address_not_found(self): - self.assertRaises(exception.FloatingIpNotFoundForAddress, - db.floating_ip_get_by_address, - self.ctxt, 'non_exists_host') - - def test_floating_ip_get_by_invalid_address(self): - self.mock_db_query_first_to_raise_data_error_exception() - self.assertRaises(exception.InvalidIpAddressError, - db.floating_ip_get_by_address, - self.ctxt, 'non_exists_host') - - def test_floating_ip_get_by_fixed_address(self): - fixed_float = [ - ('1.1.1.1', '2.2.2.1'), - ('1.1.1.2', '2.2.2.2'), - ('1.1.1.3', '2.2.2.3') - ] - - for fixed_addr, float_addr in fixed_float: - self._create_floating_ip({'address': float_addr}) - self._create_fixed_ip({'address': fixed_addr}) - db.floating_ip_fixed_ip_associate(self.ctxt, float_addr, - fixed_addr, 'some_host') - - for fixed_addr, float_addr in fixed_float: - float_ip = db.floating_ip_get_by_fixed_address(self.ctxt, - fixed_addr) - self.assertEqual(float_addr, float_ip[0]['address']) - - def test_floating_ip_get_by_fixed_ip_id(self): - fixed_float = [ - ('1.1.1.1', '2.2.2.1'), - ('1.1.1.2', '2.2.2.2'), - ('1.1.1.3', '2.2.2.3') - ] - - for fixed_addr, float_addr in fixed_float: - self._create_floating_ip({'address': float_addr}) - self._create_fixed_ip({'address': fixed_addr}) - db.floating_ip_fixed_ip_associate(self.ctxt, float_addr, - fixed_addr, 'some_host') - - for fixed_addr, float_addr in fixed_float: - fixed_ip = db.fixed_ip_get_by_address(self.ctxt, fixed_addr) - float_ip = db.floating_ip_get_by_fixed_ip_id(self.ctxt, - fixed_ip['id']) - self.assertEqual(float_addr, float_ip[0]['address']) - - def test_floating_ip_update(self): - float_ip = self._create_floating_ip({}) - - values = { - 'project_id': 'some_pr', - 'host': 'some_host', - 'auto_assigned': True, - 'interface': 'some_interface', - 'pool': 'some_pool' - } - db.floating_ip_update(self.ctxt, float_ip['address'], values) - updated_float_ip = db.floating_ip_get(self.ctxt, float_ip['id']) - self._assertEqualObjects(updated_float_ip, values, - ignored_keys=['id', 'address', 'updated_at', - 'deleted_at', 'created_at', - 'deleted', 'fixed_ip_id', - 'fixed_ip']) - - def test_floating_ip_update_to_duplicate(self): - float_ip1 = self._create_floating_ip({'address': '1.1.1.1'}) - float_ip2 = self._create_floating_ip({'address': '1.1.1.2'}) - - self.assertRaises(exception.FloatingIpExists, - db.floating_ip_update, - self.ctxt, float_ip2['address'], - {'address': float_ip1['address']}) - - -class InstanceDestroyConstraints(test.TestCase): - - def test_destroy_with_equal_any_constraint_met(self): - ctx = context.get_admin_context() - instance = db.instance_create(ctx, {'task_state': 'deleting'}) - constraint = db.constraint(task_state=db.equal_any('deleting')) - db.instance_destroy(ctx, instance['uuid'], constraint) - self.assertRaises(exception.InstanceNotFound, db.instance_get_by_uuid, - ctx, instance['uuid']) - - def test_destroy_with_equal_any_constraint_not_met(self): - ctx = context.get_admin_context() - instance = db.instance_create(ctx, {'vm_state': 'resize'}) - constraint = db.constraint(vm_state=db.equal_any('active', 'error')) - self.assertRaises(exception.ConstraintNotMet, db.instance_destroy, - ctx, instance['uuid'], constraint) - instance = db.instance_get_by_uuid(ctx, instance['uuid']) - self.assertFalse(instance['deleted']) - - def test_destroy_with_not_equal_constraint_met(self): - ctx = context.get_admin_context() - instance = db.instance_create(ctx, {'task_state': 'deleting'}) - constraint = db.constraint(task_state=db.not_equal('error', 'resize')) - db.instance_destroy(ctx, instance['uuid'], constraint) - self.assertRaises(exception.InstanceNotFound, db.instance_get_by_uuid, - ctx, instance['uuid']) - - def test_destroy_with_not_equal_constraint_not_met(self): - ctx = context.get_admin_context() - instance = db.instance_create(ctx, {'vm_state': 'active'}) - constraint = db.constraint(vm_state=db.not_equal('active', 'error')) - self.assertRaises(exception.ConstraintNotMet, db.instance_destroy, - ctx, instance['uuid'], constraint) - instance = db.instance_get_by_uuid(ctx, instance['uuid']) - self.assertFalse(instance['deleted']) - - -class VolumeUsageDBApiTestCase(test.TestCase): - - def setUp(self): - super(VolumeUsageDBApiTestCase, self).setUp() - self.user_id = 'fake' - self.project_id = 'fake' - self.context = context.RequestContext(self.user_id, self.project_id) - - self.useFixture(test.TimeOverride()) - - def test_vol_usage_update_no_totals_update(self): - ctxt = context.get_admin_context() - now = timeutils.utcnow() - start_time = now - datetime.timedelta(seconds=10) - refreshed_time = now - datetime.timedelta(seconds=5) - - expected_vol_usages = [{'volume_id': u'1', - 'instance_uuid': 'fake-instance-uuid1', - 'project_id': 'fake-project-uuid1', - 'user_id': 'fake-user-uuid1', - 'curr_reads': 1000, - 'curr_read_bytes': 2000, - 'curr_writes': 3000, - 'curr_write_bytes': 4000, - 'tot_reads': 0, - 'tot_read_bytes': 0, - 'tot_writes': 0, - 'tot_write_bytes': 0}, - {'volume_id': u'2', - 'instance_uuid': 'fake-instance-uuid2', - 'project_id': 'fake-project-uuid2', - 'user_id': 'fake-user-uuid2', - 'curr_reads': 100, - 'curr_read_bytes': 200, - 'curr_writes': 300, - 'curr_write_bytes': 400, - 'tot_reads': 0, - 'tot_read_bytes': 0, - 'tot_writes': 0, - 'tot_write_bytes': 0}] - - def _compare(vol_usage, expected): - for key, value in expected.items(): - self.assertEqual(vol_usage[key], value) - - vol_usages = db.vol_get_usage_by_time(ctxt, start_time) - self.assertEqual(len(vol_usages), 0) - - vol_usage = db.vol_usage_update(ctxt, 1, rd_req=10, rd_bytes=20, - wr_req=30, wr_bytes=40, - instance_id='fake-instance-uuid1', - project_id='fake-project-uuid1', - user_id='fake-user-uuid1', - availability_zone='fake-az') - vol_usage = db.vol_usage_update(ctxt, 2, rd_req=100, rd_bytes=200, - wr_req=300, wr_bytes=400, - instance_id='fake-instance-uuid2', - project_id='fake-project-uuid2', - user_id='fake-user-uuid2', - availability_zone='fake-az') - vol_usage = db.vol_usage_update(ctxt, 1, rd_req=1000, rd_bytes=2000, - wr_req=3000, wr_bytes=4000, - instance_id='fake-instance-uuid1', - project_id='fake-project-uuid1', - user_id='fake-user-uuid1', - availability_zone='fake-az', - last_refreshed=refreshed_time) - - vol_usages = db.vol_get_usage_by_time(ctxt, start_time) - self.assertEqual(len(vol_usages), 2) - _compare(vol_usages[0], expected_vol_usages[0]) - _compare(vol_usages[1], expected_vol_usages[1]) - - def test_vol_usage_update_totals_update(self): - ctxt = context.get_admin_context() - now = timeutils.utcnow() - start_time = now - datetime.timedelta(seconds=10) - - vol_usage = db.vol_usage_update(ctxt, 1, rd_req=100, rd_bytes=200, - wr_req=300, wr_bytes=400, - instance_id='fake-instance-uuid', - project_id='fake-project-uuid', - user_id='fake-user-uuid', - availability_zone='fake-az') - current_usage = db.vol_get_usage_by_time(ctxt, start_time)[0] - self.assertEqual(current_usage['tot_reads'], 0) - self.assertEqual(current_usage['curr_reads'], 100) - - vol_usage = db.vol_usage_update(ctxt, 1, rd_req=200, rd_bytes=300, - wr_req=400, wr_bytes=500, - instance_id='fake-instance-uuid', - project_id='fake-project-uuid', - user_id='fake-user-uuid', - availability_zone='fake-az', - update_totals=True) - current_usage = db.vol_get_usage_by_time(ctxt, start_time)[0] - self.assertEqual(current_usage['tot_reads'], 200) - self.assertEqual(current_usage['curr_reads'], 0) - - vol_usage = db.vol_usage_update(ctxt, 1, rd_req=300, rd_bytes=400, - wr_req=500, wr_bytes=600, - instance_id='fake-instance-uuid', - project_id='fake-project-uuid', - availability_zone='fake-az', - user_id='fake-user-uuid') - current_usage = db.vol_get_usage_by_time(ctxt, start_time)[0] - self.assertEqual(current_usage['tot_reads'], 200) - self.assertEqual(current_usage['curr_reads'], 300) - - vol_usage = db.vol_usage_update(ctxt, 1, rd_req=400, rd_bytes=500, - wr_req=600, wr_bytes=700, - instance_id='fake-instance-uuid', - project_id='fake-project-uuid', - user_id='fake-user-uuid', - availability_zone='fake-az', - update_totals=True) - - vol_usages = db.vol_get_usage_by_time(ctxt, start_time) - - expected_vol_usages = {'volume_id': u'1', - 'project_id': 'fake-project-uuid', - 'user_id': 'fake-user-uuid', - 'instance_uuid': 'fake-instance-uuid', - 'availability_zone': 'fake-az', - 'tot_reads': 600, - 'tot_read_bytes': 800, - 'tot_writes': 1000, - 'tot_write_bytes': 1200, - 'curr_reads': 0, - 'curr_read_bytes': 0, - 'curr_writes': 0, - 'curr_write_bytes': 0} - - self.assertEquals(1, len(vol_usages)) - for key, value in expected_vol_usages.items(): - self.assertEqual(vol_usages[0][key], value) - - def test_vol_usage_update_when_blockdevicestats_reset(self): - ctxt = context.get_admin_context() - now = timeutils.utcnow() - start_time = now - datetime.timedelta(seconds=10) - - vol_usages = db.vol_get_usage_by_time(ctxt, start_time) - self.assertEqual(len(vol_usages), 0) - - db.vol_usage_update(ctxt, 1, - rd_req=10000, rd_bytes=20000, - wr_req=30000, wr_bytes=40000, - instance_id='fake-instance-uuid1', - project_id='fake-project-uuid1', - availability_zone='fake-az', - user_id='fake-user-uuid1') - - # Instance rebooted or crashed. block device stats were reset and are - # less then the previous values - db.vol_usage_update(ctxt, 1, - rd_req=100, rd_bytes=200, - wr_req=300, wr_bytes=400, - instance_id='fake-instance-uuid1', - project_id='fake-project-uuid1', - availability_zone='fake-az', - user_id='fake-user-uuid1') - - db.vol_usage_update(ctxt, 1, - rd_req=200, rd_bytes=300, - wr_req=400, wr_bytes=500, - instance_id='fake-instance-uuid1', - project_id='fake-project-uuid1', - availability_zone='fake-az', - user_id='fake-user-uuid1') - - vol_usage = db.vol_get_usage_by_time(ctxt, start_time)[0] - expected_vol_usage = {'volume_id': u'1', - 'instance_uuid': 'fake-instance-uuid1', - 'project_id': 'fake-project-uuid1', - 'availability_zone': 'fake-az', - 'user_id': 'fake-user-uuid1', - 'curr_reads': 200, - 'curr_read_bytes': 300, - 'curr_writes': 400, - 'curr_write_bytes': 500, - 'tot_reads': 10000, - 'tot_read_bytes': 20000, - 'tot_writes': 30000, - 'tot_write_bytes': 40000} - for key, value in expected_vol_usage.items(): - self.assertEqual(vol_usage[key], value, key) - - def test_vol_usage_update_totals_update_when_blockdevicestats_reset(self): - # This is unlikely to happen, but could when a volume is detached - # right after a instance has rebooted / recovered and before - # the system polled and updated the volume usage cache table. - ctxt = context.get_admin_context() - now = timeutils.utcnow() - start_time = now - datetime.timedelta(seconds=10) - - vol_usages = db.vol_get_usage_by_time(ctxt, start_time) - self.assertEqual(len(vol_usages), 0) - - db.vol_usage_update(ctxt, 1, - rd_req=10000, rd_bytes=20000, - wr_req=30000, wr_bytes=40000, - instance_id='fake-instance-uuid1', - project_id='fake-project-uuid1', - availability_zone='fake-az', - user_id='fake-user-uuid1') - - # Instance rebooted or crashed. block device stats were reset and are - # less then the previous values - db.vol_usage_update(ctxt, 1, - rd_req=100, rd_bytes=200, - wr_req=300, wr_bytes=400, - instance_id='fake-instance-uuid1', - project_id='fake-project-uuid1', - availability_zone='fake-az', - user_id='fake-user-uuid1', - update_totals=True) - - vol_usage = db.vol_get_usage_by_time(ctxt, start_time)[0] - expected_vol_usage = {'volume_id': u'1', - 'instance_uuid': 'fake-instance-uuid1', - 'project_id': 'fake-project-uuid1', - 'availability_zone': 'fake-az', - 'user_id': 'fake-user-uuid1', - 'curr_reads': 0, - 'curr_read_bytes': 0, - 'curr_writes': 0, - 'curr_write_bytes': 0, - 'tot_reads': 10100, - 'tot_read_bytes': 20200, - 'tot_writes': 30300, - 'tot_write_bytes': 40400} - for key, value in expected_vol_usage.items(): - self.assertEqual(vol_usage[key], value, key) - - -class TaskLogTestCase(test.TestCase): - - def setUp(self): - super(TaskLogTestCase, self).setUp() - self.context = context.get_admin_context() - now = timeutils.utcnow() - self.begin = now - datetime.timedelta(seconds=10) - self.end = now - datetime.timedelta(seconds=5) - self.task_name = 'fake-task-name' - self.host = 'fake-host' - self.message = 'Fake task message' - db.task_log_begin_task(self.context, self.task_name, self.begin, - self.end, self.host, message=self.message) - - def test_task_log_get(self): - result = db.task_log_get(self.context, self.task_name, self.begin, - self.end, self.host) - self.assertEqual(result['task_name'], self.task_name) - self.assertEqual(result['period_beginning'], self.begin) - self.assertEqual(result['period_ending'], self.end) - self.assertEqual(result['host'], self.host) - self.assertEqual(result['message'], self.message) - - def test_task_log_get_all(self): - result = db.task_log_get_all(self.context, self.task_name, self.begin, - self.end, host=self.host) - self.assertEqual(len(result), 1) - - def test_task_log_begin_task(self): - db.task_log_begin_task(self.context, 'fake', self.begin, - self.end, self.host, message=self.message) - result = db.task_log_get(self.context, 'fake', self.begin, - self.end, self.host) - self.assertEqual(result['task_name'], 'fake') - - def test_task_log_begin_task_duplicate(self): - params = (self.context, 'fake', self.begin, self.end, self.host) - db.task_log_begin_task(*params, message=self.message) - self.assertRaises(exception.TaskAlreadyRunning, - db.task_log_begin_task, - *params, message=self.message) - - def test_task_log_end_task(self): - errors = 1 - db.task_log_end_task(self.context, self.task_name, self.begin, - self.end, self.host, errors, message=self.message) - result = db.task_log_get(self.context, self.task_name, self.begin, - self.end, self.host) - self.assertEqual(result['errors'], 1) - - -class BlockDeviceMappingTestCase(test.TestCase): - def setUp(self): - super(BlockDeviceMappingTestCase, self).setUp() - self.ctxt = context.get_admin_context() - self.instance = db.instance_create(self.ctxt, {}) - - def _create_bdm(self, values): - values.setdefault('instance_uuid', self.instance['uuid']) - values.setdefault('device_name', 'fake_device') - db.block_device_mapping_create(self.ctxt, values) - uuid = values['instance_uuid'] - - bdms = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid) - - for bdm in bdms: - if bdm['device_name'] == values['device_name']: - return bdm - - def test_scrub_empty_str_values_no_effect(self): - values = {'volume_size': 5} - expected = copy.copy(values) - sqlalchemy_api._scrub_empty_str_values(values, ['volume_size']) - self.assertEqual(values, expected) - - def test_scrub_empty_str_values_empty_string(self): - values = {'volume_size': ''} - sqlalchemy_api._scrub_empty_str_values(values, ['volume_size']) - self.assertEqual(values, {}) - - def test_scrub_empty_str_values_empty_unicode(self): - values = {'volume_size': u''} - sqlalchemy_api._scrub_empty_str_values(values, ['volume_size']) - self.assertEqual(values, {}) - - def test_block_device_mapping_create(self): - bdm = self._create_bdm({}) - self.assertFalse(bdm is None) - - def test_block_device_mapping_update(self): - bdm = self._create_bdm({}) - db.block_device_mapping_update(self.ctxt, bdm['id'], - {'virtual_name': 'some_virt_name'}) - uuid = bdm['instance_uuid'] - bdm_real = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid) - self.assertEqual(bdm_real[0]['virtual_name'], 'some_virt_name') - - def test_block_device_mapping_update_or_create(self): - values = { - 'instance_uuid': self.instance['uuid'], - 'device_name': 'fake_name', - 'virtual_name': 'some_virt_name' - } - # check create - db.block_device_mapping_update_or_create(self.ctxt, values) - uuid = values['instance_uuid'] - bdm_real = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid) - self.assertEqual(len(bdm_real), 1) - self.assertEqual(bdm_real[0]['device_name'], 'fake_name') - - # check update - values['virtual_name'] = 'virtual_name' - db.block_device_mapping_update_or_create(self.ctxt, values) - bdm_real = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid) - self.assertEqual(len(bdm_real), 1) - bdm_real = bdm_real[0] - self.assertEqual(bdm_real['device_name'], 'fake_name') - self.assertEqual(bdm_real['virtual_name'], 'virtual_name') - - def test_block_device_mapping_update_or_create_check_remove_virt(self): - uuid = self.instance['uuid'] - values = { - 'instance_uuid': uuid, - 'virtual_name': 'ephemeral12' - } - - # check that old bdm with same virtual_names are deleted on create - val1 = dict(values) - val1['device_name'] = 'device1' - db.block_device_mapping_create(self.ctxt, val1) - val2 = dict(values) - val2['device_name'] = 'device2' - db.block_device_mapping_update_or_create(self.ctxt, val2) - bdm_real = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid) - self.assertEqual(len(bdm_real), 1) - bdm_real = bdm_real[0] - self.assertEqual(bdm_real['device_name'], 'device2') - self.assertEqual(bdm_real['virtual_name'], 'ephemeral12') - - # check that old bdm with same virtual_names are deleted on update - val3 = dict(values) - val3['device_name'] = 'device3' - val3['virtual_name'] = 'some_name' - db.block_device_mapping_create(self.ctxt, val3) - bdm_real = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid) - self.assertEqual(len(bdm_real), 2) - - val3['virtual_name'] = 'ephemeral12' - db.block_device_mapping_update_or_create(self.ctxt, val3) - bdm_real = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid) - self.assertEqual(len(bdm_real), 1) - bdm_real = bdm_real[0] - self.assertEqual(bdm_real['device_name'], 'device3') - self.assertEqual(bdm_real['virtual_name'], 'ephemeral12') - - def test_block_device_mapping_get_all_by_instance(self): - uuid1 = self.instance['uuid'] - uuid2 = db.instance_create(self.ctxt, {})['uuid'] - - bmds_values = [{'instance_uuid': uuid1, - 'virtual_name': 'virtual_name', - 'device_name': 'first'}, - {'instance_uuid': uuid2, - 'virtual_name': 'virtual_name1', - 'device_name': 'second'}, - {'instance_uuid': uuid2, - 'virtual_name': 'virtual_name2', - 'device_name': 'third'}] - - for bdm in bmds_values: - self._create_bdm(bdm) - - bmd = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid1) - self.assertEqual(len(bmd), 1) - self.assertEqual(bmd[0]['virtual_name'], 'virtual_name') - self.assertEqual(bmd[0]['device_name'], 'first') - - bmd = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid2) - self.assertEqual(len(bmd), 2) - - def test_block_device_mapping_destroy(self): - bdm = self._create_bdm({}) - db.block_device_mapping_destroy(self.ctxt, bdm['id']) - bdm = db.block_device_mapping_get_all_by_instance(self.ctxt, - bdm['instance_uuid']) - self.assertEqual(len(bdm), 0) - - def test_block_device_mapping_destory_by_instance_and_volumne(self): - vol_id1 = '69f5c254-1a5b-4fff-acf7-cb369904f58f' - vol_id2 = '69f5c254-1a5b-4fff-acf7-cb369904f59f' - - self._create_bdm({'device_name': 'fake1', 'volume_id': vol_id1}) - self._create_bdm({'device_name': 'fake2', 'volume_id': vol_id2}) - - uuid = self.instance['uuid'] - db.block_device_mapping_destroy_by_instance_and_volume(self.ctxt, uuid, - vol_id1) - bdms = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid) - self.assertEqual(len(bdms), 1) - self.assertEqual(bdms[0]['device_name'], 'fake2') - - def test_block_device_mapping_destroy_by_instance_and_device(self): - self._create_bdm({'device_name': 'fake1'}) - self._create_bdm({'device_name': 'fake2'}) - - uuid = self.instance['uuid'] - params = (self.ctxt, uuid, 'fake1') - db.block_device_mapping_destroy_by_instance_and_device(*params) - - bdms = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid) - self.assertEqual(len(bdms), 1) - self.assertEqual(bdms[0]['device_name'], 'fake2') - - -class AgentBuildTestCase(test.TestCase, ModelsObjectComparatorMixin): - - """Tests for db.api.agent_build_* methods.""" - - def setUp(self): - super(AgentBuildTestCase, self).setUp() - self.ctxt = context.get_admin_context() - - def test_agent_build_create_and_get_all(self): - self.assertEqual(0, len(db.agent_build_get_all(self.ctxt))) - agent_build = db.agent_build_create(self.ctxt, {'os': 'GNU/HURD'}) - all_agent_builds = db.agent_build_get_all(self.ctxt) - self.assertEqual(1, len(all_agent_builds)) - self._assertEqualObjects(agent_build, all_agent_builds[0]) - - def test_agent_build_get_by_triple(self): - agent_build = db.agent_build_create(self.ctxt, {'hypervisor': 'kvm', - 'os': 'FreeBSD', 'architecture': 'x86_64'}) - self.assertIsNone(db.agent_build_get_by_triple(self.ctxt, 'kvm', - 'FreeBSD', 'i386')) - self._assertEqualObjects(agent_build, db.agent_build_get_by_triple( - self.ctxt, 'kvm', 'FreeBSD', 'x86_64')) - - def test_agent_build_destroy(self): - agent_build = db.agent_build_create(self.ctxt, {}) - self.assertEqual(1, len(db.agent_build_get_all(self.ctxt))) - db.agent_build_destroy(self.ctxt, agent_build.id) - self.assertEqual(0, len(db.agent_build_get_all(self.ctxt))) - - def test_agent_build_update(self): - agent_build = db.agent_build_create(self.ctxt, {'os': 'HaikuOS'}) - db.agent_build_update(self.ctxt, agent_build.id, {'os': 'ReactOS'}) - self.assertEqual('ReactOS', db.agent_build_get_all(self.ctxt)[0].os) - - def test_agent_build_destroy_destroyed(self): - agent_build = db.agent_build_create(self.ctxt, {}) - db.agent_build_destroy(self.ctxt, agent_build.id) - self.assertRaises(exception.AgentBuildNotFound, - db.agent_build_destroy, self.ctxt, agent_build.id) - - def test_agent_build_update_destroyed(self): - agent_build = db.agent_build_create(self.ctxt, {'os': 'HaikuOS'}) - db.agent_build_destroy(self.ctxt, agent_build.id) - self.assertRaises(exception.AgentBuildNotFound, - db.agent_build_update, self.ctxt, agent_build.id, {'os': 'OS/2'}) - - -class VirtualInterfaceTestCase(test.TestCase, ModelsObjectComparatorMixin): - def setUp(self): - super(VirtualInterfaceTestCase, self).setUp() - self.ctxt = context.get_admin_context() - self.instance_uuid = db.instance_create(self.ctxt, {})['uuid'] - values = {'host': 'localhost', 'project_id': 'project1'} - self.network = db.network_create_safe(self.ctxt, values) - - def _get_base_values(self): - return { - 'instance_uuid': self.instance_uuid, - 'address': 'fake_address', - 'network_id': self.network['id'], - 'uuid': str(stdlib_uuid.uuid4()) - } - - def mock_db_query_first_to_raise_data_error_exception(self): - self.mox.StubOutWithMock(query.Query, 'first') - query.Query.first().AndRaise(exc.DataError(mox.IgnoreArg(), - mox.IgnoreArg(), - mox.IgnoreArg())) - self.mox.ReplayAll() - - def _create_virt_interface(self, values): - v = self._get_base_values() - v.update(values) - return db.virtual_interface_create(self.ctxt, v) - - def test_virtual_interface_create(self): - vif = self._create_virt_interface({}) - self.assertFalse(vif['id'] is None) - ignored_keys = ['id', 'deleted', 'deleted_at', 'updated_at', - 'created_at', 'uuid'] - self._assertEqualObjects(vif, self._get_base_values(), ignored_keys) - - @test.testtools.skip("bug 1156227") - def test_virtual_interface_create_with_duplicate_address(self): - vif = self._create_virt_interface({}) - # NOTE(boris-42): Due to the bug 1156227 this won't work. In havana-1 - # it will be fixed. - self.assertRaises(exception.VirtualInterfaceCreateException, - self._create_virt_interface, {"uuid": vif['uuid']}) - - def test_virtual_interface_get(self): - vifs = [self._create_virt_interface({'address': 'a'}), - self._create_virt_interface({'address': 'b'})] - - for vif in vifs: - real_vif = db.virtual_interface_get(self.ctxt, vif['id']) - self._assertEqualObjects(vif, real_vif) - - def test_virtual_interface_get_by_address(self): - vifs = [self._create_virt_interface({'address': 'first'}), - self._create_virt_interface({'address': 'second'})] - for vif in vifs: - real_vif = db.virtual_interface_get_by_address(self.ctxt, - vif['address']) - self._assertEqualObjects(vif, real_vif) - - def test_virtual_interface_get_by_address_not_found(self): - self.assertIsNone(db.virtual_interface_get_by_address(self.ctxt, - "i.nv.ali.ip")) - - def test_virtual_interface_get_by_address_data_error_exception(self): - self.mock_db_query_first_to_raise_data_error_exception() - self.assertRaises(exception.InvalidIpAddressError, - db.virtual_interface_get_by_address, - self.ctxt, - "i.nv.ali.ip") - - def test_virtual_interface_get_by_uuid(self): - vifs = [self._create_virt_interface({}), - self._create_virt_interface({})] - for vif in vifs: - real_vif = db.virtual_interface_get_by_uuid(self.ctxt, vif['uuid']) - self._assertEqualObjects(vif, real_vif) - - def test_virtual_interface_get_by_instance(self): - inst_uuid2 = db.instance_create(self.ctxt, {})['uuid'] - vifs1 = [self._create_virt_interface({'address': 'fake1'}), - self._create_virt_interface({'address': 'fake2'})] - vifs2 = [self._create_virt_interface({'address': 'fake3', - 'instance_uuid': inst_uuid2})] - vifs1_real = db.virtual_interface_get_by_instance(self.ctxt, - self.instance_uuid) - vifs2_real = db.virtual_interface_get_by_instance(self.ctxt, - inst_uuid2) - self._assertEqualListsOfObjects(vifs1, vifs1_real) - self._assertEqualListsOfObjects(vifs2, vifs2_real) - - def test_virtual_interface_get_by_instance_and_network(self): - inst_uuid2 = db.instance_create(self.ctxt, {})['uuid'] - values = {'host': 'localhost', 'project_id': 'project2'} - network_id = db.network_create_safe(self.ctxt, values)['id'] - - vifs = [self._create_virt_interface({'address': 'fake1'}), - self._create_virt_interface({'address': 'fake2', - 'network_id': network_id, - 'instance_uuid': inst_uuid2}), - self._create_virt_interface({'address': 'fake3', - 'instance_uuid': inst_uuid2})] - for vif in vifs: - params = (self.ctxt, vif['instance_uuid'], vif['network_id']) - r_vif = db.virtual_interface_get_by_instance_and_network(*params) - self._assertEqualObjects(r_vif, vif) - - def test_virtual_interface_delete_by_instance(self): - inst_uuid2 = db.instance_create(self.ctxt, {})['uuid'] - - values = [dict(address='fake1'), dict(address='fake2'), - dict(address='fake3', instance_uuid=inst_uuid2)] - for vals in values: - self._create_virt_interface(vals) - - db.virtual_interface_delete_by_instance(self.ctxt, self.instance_uuid) - - real_vifs1 = db.virtual_interface_get_by_instance(self.ctxt, - self.instance_uuid) - real_vifs2 = db.virtual_interface_get_by_instance(self.ctxt, - inst_uuid2) - self.assertEqual(len(real_vifs1), 0) - self.assertEqual(len(real_vifs2), 1) - - def test_virtual_interface_get_all(self): - inst_uuid2 = db.instance_create(self.ctxt, {})['uuid'] - values = [dict(address='fake1'), dict(address='fake2'), - dict(address='fake3', instance_uuid=inst_uuid2)] - - vifs = [self._create_virt_interface(val) for val in values] - real_vifs = db.virtual_interface_get_all(self.ctxt) - self._assertEqualListsOfObjects(vifs, real_vifs) - - -class KeyPairTestCase(test.TestCase, ModelsObjectComparatorMixin): - def setUp(self): - super(KeyPairTestCase, self).setUp() - self.ctxt = context.get_admin_context() - - def _create_key_pair(self, values): - return db.key_pair_create(self.ctxt, values) - - def test_key_pair_create(self): - param = { - 'name': 'test_1', - 'user_id': 'test_user_id_1', - 'public_key': 'test_public_key_1', - 'fingerprint': 'test_fingerprint_1' - } - key_pair = self._create_key_pair(param) - - self.assertTrue(key_pair['id'] is not None) - ignored_keys = ['deleted', 'created_at', 'updated_at', - 'deleted_at', 'id'] - self._assertEqualObjects(key_pair, param, ignored_keys) - - def test_key_pair_create_with_duplicate_name(self): - params = {'name': 'test_name', 'user_id': 'test_user_id'} - self._create_key_pair(params) - self.assertRaises(exception.KeyPairExists, self._create_key_pair, - params) - - def test_key_pair_get(self): - params = [ - {'name': 'test_1', 'user_id': 'test_user_id_1'}, - {'name': 'test_2', 'user_id': 'test_user_id_2'}, - {'name': 'test_3', 'user_id': 'test_user_id_3'} - ] - key_pairs = [self._create_key_pair(p) for p in params] - - for key in key_pairs: - real_key = db.key_pair_get(self.ctxt, key['user_id'], key['name']) - self._assertEqualObjects(key, real_key) - - def test_key_pair_get_no_results(self): - param = {'name': 'test_1', 'user_id': 'test_user_id_1'} - self.assertRaises(exception.KeypairNotFound, db.key_pair_get, - self.ctxt, param['user_id'], param['name']) - - def test_key_pair_get_deleted(self): - param = {'name': 'test_1', 'user_id': 'test_user_id_1'} - key_pair_created = self._create_key_pair(param) - - db.key_pair_destroy(self.ctxt, param['user_id'], param['name']) - self.assertRaises(exception.KeypairNotFound, db.key_pair_get, - self.ctxt, param['user_id'], param['name']) - - ctxt = self.ctxt.elevated(read_deleted='yes') - key_pair_deleted = db.key_pair_get(ctxt, param['user_id'], - param['name']) - ignored_keys = ['deleted', 'created_at', 'updated_at', 'deleted_at'] - self._assertEqualObjects(key_pair_deleted, key_pair_created, - ignored_keys) - self.assertEqual(key_pair_deleted['deleted'], key_pair_deleted['id']) - - def test_key_pair_get_all_by_user(self): - params = [ - {'name': 'test_1', 'user_id': 'test_user_id_1'}, - {'name': 'test_2', 'user_id': 'test_user_id_1'}, - {'name': 'test_3', 'user_id': 'test_user_id_2'} - ] - key_pairs_user_1 = [self._create_key_pair(p) for p in params - if p['user_id'] == 'test_user_id_1'] - key_pairs_user_2 = [self._create_key_pair(p) for p in params - if p['user_id'] == 'test_user_id_2'] - - real_keys_1 = db.key_pair_get_all_by_user(self.ctxt, 'test_user_id_1') - real_keys_2 = db.key_pair_get_all_by_user(self.ctxt, 'test_user_id_2') - - self._assertEqualListsOfObjects(key_pairs_user_1, real_keys_1) - self._assertEqualListsOfObjects(key_pairs_user_2, real_keys_2) - - def test_key_pair_count_by_user(self): - params = [ - {'name': 'test_1', 'user_id': 'test_user_id_1'}, - {'name': 'test_2', 'user_id': 'test_user_id_1'}, - {'name': 'test_3', 'user_id': 'test_user_id_2'} - ] - for p in params: - self._create_key_pair(p) - - count_1 = db.key_pair_count_by_user(self.ctxt, 'test_user_id_1') - self.assertEqual(count_1, 2) - - count_2 = db.key_pair_count_by_user(self.ctxt, 'test_user_id_2') - self.assertEqual(count_2, 1) - - def test_key_pair_destroy(self): - param = {'name': 'test_1', 'user_id': 'test_user_id_1'} - self._create_key_pair(param) - - db.key_pair_destroy(self.ctxt, param['user_id'], param['name']) - self.assertRaises(exception.KeypairNotFound, db.key_pair_get, - self.ctxt, param['user_id'], param['name']) - - def test_key_pair_destroy_no_such_key(self): - param = {'name': 'test_1', 'user_id': 'test_user_id_1'} - self.assertRaises(exception.KeypairNotFound, - db.key_pair_destroy, self.ctxt, - param['user_id'], param['name']) - - -class QuotaTestCase(test.TestCase, ModelsObjectComparatorMixin): - - """Tests for db.api.quota_* methods.""" - - def setUp(self): - super(QuotaTestCase, self).setUp() - self.ctxt = context.get_admin_context() - - def test_quota_create(self): - quota = db.quota_create(self.ctxt, 'project1', 'resource', 99) - self.assertEqual(quota.resource, 'resource') - self.assertEqual(quota.hard_limit, 99) - self.assertEqual(quota.project_id, 'project1') - - def test_quota_get(self): - quota = db.quota_create(self.ctxt, 'project1', 'resource', 99) - quota_db = db.quota_get(self.ctxt, 'project1', 'resource') - self._assertEqualObjects(quota, quota_db) - - def test_quota_get_all_by_project(self): - for i in range(3): - for j in range(3): - db.quota_create(self.ctxt, 'proj%d' % i, 'resource%d' % j, j) - for i in range(3): - quotas_db = db.quota_get_all_by_project(self.ctxt, 'proj%d' % i) - self.assertEqual(quotas_db, {'project_id': 'proj%d' % i, - 'resource0': 0, - 'resource1': 1, - 'resource2': 2}) - - def test_quota_update(self): - db.quota_create(self.ctxt, 'project1', 'resource1', 41) - db.quota_update(self.ctxt, 'project1', 'resource1', 42) - quota = db.quota_get(self.ctxt, 'project1', 'resource1') - self.assertEqual(quota.hard_limit, 42) - self.assertEqual(quota.resource, 'resource1') - self.assertEqual(quota.project_id, 'project1') - - def test_quota_update_nonexistent(self): - self.assertRaises(exception.ProjectQuotaNotFound, - db.quota_update, self.ctxt, 'project1', 'resource1', 42) - - def test_quota_get_nonexistent(self): - self.assertRaises(exception.ProjectQuotaNotFound, - db.quota_get, self.ctxt, 'project1', 'resource1') - - def test_quota_reserve(self): - reservations = _quota_reserve(self.ctxt, 'project1') - self.assertEqual(len(reservations), 3) - res_names = ['res0', 'res1', 'res2'] - for uuid in reservations: - reservation = db.reservation_get(self.ctxt, uuid) - self.assertTrue(reservation.resource in res_names) - res_names.remove(reservation.resource) - - def test_quota_destroy_all_by_project(self): - reservations = _quota_reserve(self.ctxt, 'project1') - db.quota_destroy_all_by_project(self.ctxt, 'project1') - self.assertEqual(db.quota_get_all_by_project(self.ctxt, 'project1'), - {'project_id': 'project1'}) - self.assertEqual(db.quota_usage_get_all_by_project( - self.ctxt, 'project1'), - {'project_id': 'project1'}) - for r in reservations: - self.assertRaises(exception.ReservationNotFound, - db.reservation_get, self.ctxt, r) - - def test_quota_usage_get_nonexistent(self): - self.assertRaises(exception.QuotaUsageNotFound, db.quota_usage_get, - self.ctxt, 'p1', 'nonexitent_resource') - - def test_quota_usage_get(self): - reservations = _quota_reserve(self.ctxt, 'p1') - quota_usage = db.quota_usage_get(self.ctxt, 'p1', 'res0') - expected = {'resource': 'res0', 'project_id': 'p1', - 'in_use': 0, 'reserved': 0, 'total': 0} - for key, value in expected.iteritems(): - self.assertEqual(value, quota_usage[key]) - - def test_quota_usage_get_all_by_project(self): - reservations = _quota_reserve(self.ctxt, 'p1') - expected = {'project_id': 'p1', - 'res0': {'in_use': 0, 'reserved': 0}, - 'res1': {'in_use': 1, 'reserved': 1}, - 'res2': {'in_use': 2, 'reserved': 2}} - self.assertEqual(expected, db.quota_usage_get_all_by_project( - self.ctxt, 'p1')) - - def test_quota_usage_update_nonexistent(self): - self.assertRaises(exception.QuotaUsageNotFound, db.quota_usage_update, - self.ctxt, 'p1', 'resource', in_use=42) - - def test_quota_usage_update(self): - reservations = _quota_reserve(self.ctxt, 'p1') - until_refresh = datetime.datetime.now() + datetime.timedelta(days=1) - db.quota_usage_update(self.ctxt, 'p1', 'res0', in_use=42, reserved=43) - quota_usage = db.quota_usage_get(self.ctxt, 'p1', 'res0') - expected = {'resource': 'res0', 'project_id': 'p1', - 'in_use': 42, 'reserved': 43, 'total': 85} - for key, value in expected.iteritems(): - self.assertEqual(value, quota_usage[key]) - - -class QuotaClassTestCase(test.TestCase, ModelsObjectComparatorMixin): - - def setUp(self): - super(QuotaClassTestCase, self).setUp() - self.ctxt = context.get_admin_context() - - def test_quota_class_get_default(self): - params = { - 'test_resource1': '10', - 'test_resource2': '20', - 'test_resource3': '30', - } - for res, limit in params.items(): - db.quota_class_create(self.ctxt, 'default', res, limit) - - defaults = db.quota_class_get_default(self.ctxt) - self.assertEqual(defaults, dict(class_name='default', - test_resource1=10, - test_resource2=20, - test_resource3=30)) - - def test_quota_class_create(self): - qc = db.quota_class_create(self.ctxt, 'class name', 'resource', 42) - self.assertEqual(qc.class_name, 'class name') - self.assertEqual(qc.resource, 'resource') - self.assertEqual(qc.hard_limit, 42) - - def test_quota_class_get(self): - qc = db.quota_class_create(self.ctxt, 'class name', 'resource', 42) - qc_db = db.quota_class_get(self.ctxt, 'class name', 'resource') - self._assertEqualObjects(qc, qc_db) - - def test_quota_class_get_nonexistent(self): - self.assertRaises(exception.QuotaClassNotFound, db.quota_class_get, - self.ctxt, 'nonexistent', 'resource') - - def test_quota_class_get_all_by_name(self): - for i in range(3): - for j in range(3): - db.quota_class_create(self.ctxt, 'class%d' % i, - 'resource%d' % j, j) - for i in range(3): - classes = db.quota_class_get_all_by_name(self.ctxt, 'class%d' % i) - self.assertEqual(classes, {'class_name': 'class%d' % i, - 'resource0': 0, 'resource1': 1, 'resource2': 2}) - - def test_quota_class_update(self): - qc = db.quota_class_create(self.ctxt, 'class name', 'resource', 42) - db.quota_class_update(self.ctxt, 'class name', 'resource', 43) - self.assertEqual(db.quota_class_get(self.ctxt, 'class name', - 'resource').hard_limit, 43) - - def test_quota_class_update_nonexistent(self): - self.assertRaises(exception.QuotaClassNotFound, db.quota_class_update, - self.ctxt, 'class name', 'resource', 42) - - -class ArchiveTestCase(test.TestCase): - - def setUp(self): - super(ArchiveTestCase, self).setUp() - self.context = context.get_admin_context() - self.engine = get_engine() - self.conn = self.engine.connect() - self.instance_id_mappings = db_utils.get_table(self.engine, - "instance_id_mappings") - self.shadow_instance_id_mappings = db_utils.get_table(self.engine, - "shadow_instance_id_mappings") - self.dns_domains = db_utils.get_table(self.engine, "dns_domains") - self.shadow_dns_domains = db_utils.get_table(self.engine, - "shadow_dns_domains") - self.consoles = db_utils.get_table(self.engine, "consoles") - self.console_pools = db_utils.get_table(self.engine, "console_pools") - self.shadow_consoles = db_utils.get_table(self.engine, - "shadow_consoles") - self.shadow_console_pools = db_utils.get_table(self.engine, - "shadow_console_pools") - self.instances = db_utils.get_table(self.engine, "instances") - self.shadow_instances = db_utils.get_table(self.engine, - "shadow_instances") - self.uuidstrs = [] - for unused in range(6): - self.uuidstrs.append(stdlib_uuid.uuid4().hex) - self.ids = [] - self.id_tablenames_to_cleanup = set(["console_pools", "consoles"]) - self.uuid_tablenames_to_cleanup = set(["instance_id_mappings", - "instances"]) - self.domain_tablenames_to_cleanup = set(["dns_domains"]) - - def tearDown(self): - super(ArchiveTestCase, self).tearDown() - for tablename in self.id_tablenames_to_cleanup: - for name in [tablename, "shadow_" + tablename]: - table = db_utils.get_table(self.engine, name) - del_statement = table.delete(table.c.id.in_(self.ids)) - self.conn.execute(del_statement) - for tablename in self.uuid_tablenames_to_cleanup: - for name in [tablename, "shadow_" + tablename]: - table = db_utils.get_table(self.engine, name) - del_statement = table.delete(table.c.uuid.in_(self.uuidstrs)) - self.conn.execute(del_statement) - for tablename in self.domain_tablenames_to_cleanup: - for name in [tablename, "shadow_" + tablename]: - table = db_utils.get_table(self.engine, name) - del_statement = table.delete(table.c.domain.in_(self.uuidstrs)) - self.conn.execute(del_statement) - - def test_shadow_tables(self): - metadata = MetaData(bind=self.engine) - metadata.reflect() - for table_name in metadata.tables: - if table_name.startswith("shadow_"): - self.assertIn(table_name[7:], metadata.tables) - continue - self.assertTrue(db_utils.check_shadow_table(self.engine, - table_name)) - - def test_archive_deleted_rows(self): - # Add 6 rows to table - for uuidstr in self.uuidstrs: - ins_stmt = self.instance_id_mappings.insert().values(uuid=uuidstr) - self.conn.execute(ins_stmt) - # Set 4 to deleted - update_statement = self.instance_id_mappings.update().\ - where(self.instance_id_mappings.c.uuid.in_(self.uuidstrs[:4]))\ - .values(deleted=1) - self.conn.execute(update_statement) - qiim = select([self.instance_id_mappings]).where(self. - instance_id_mappings.c.uuid.in_(self.uuidstrs)) - rows = self.conn.execute(qiim).fetchall() - # Verify we have 6 in main - self.assertEqual(len(rows), 6) - qsiim = select([self.shadow_instance_id_mappings]).\ - where(self.shadow_instance_id_mappings.c.uuid.in_( - self.uuidstrs)) - rows = self.conn.execute(qsiim).fetchall() - # Verify we have 0 in shadow - self.assertEqual(len(rows), 0) - # Archive 2 rows - db.archive_deleted_rows(self.context, max_rows=2) - rows = self.conn.execute(qiim).fetchall() - # Verify we have 4 left in main - self.assertEqual(len(rows), 4) - rows = self.conn.execute(qsiim).fetchall() - # Verify we have 2 in shadow - self.assertEqual(len(rows), 2) - # Archive 2 more rows - db.archive_deleted_rows(self.context, max_rows=2) - rows = self.conn.execute(qiim).fetchall() - # Verify we have 2 left in main - self.assertEqual(len(rows), 2) - rows = self.conn.execute(qsiim).fetchall() - # Verify we have 4 in shadow - self.assertEqual(len(rows), 4) - # Try to archive more, but there are no deleted rows left. - db.archive_deleted_rows(self.context, max_rows=2) - rows = self.conn.execute(qiim).fetchall() - # Verify we still have 2 left in main - self.assertEqual(len(rows), 2) - rows = self.conn.execute(qsiim).fetchall() - # Verify we still have 4 in shadow - self.assertEqual(len(rows), 4) - - def test_archive_deleted_rows_for_every_uuid_table(self): - tablenames = [] - for model_class in models.__dict__.itervalues(): - if hasattr(model_class, "__tablename__"): - tablenames.append(model_class.__tablename__) - tablenames.sort() - for tablename in tablenames: - ret = self._test_archive_deleted_rows_for_one_uuid_table(tablename) - if ret == 0: - self.uuid_tablenames_to_cleanup.add(tablename) - - def _test_archive_deleted_rows_for_one_uuid_table(self, tablename): - """ - :returns: 0 on success, 1 if no uuid column, 2 if insert failed - """ - main_table = db_utils.get_table(self.engine, tablename) - if not hasattr(main_table.c, "uuid"): - # Not a uuid table, so skip it. - return 1 - shadow_table = db_utils.get_table(self.engine, "shadow_" + tablename) - # Add 6 rows to table - for uuidstr in self.uuidstrs: - ins_stmt = main_table.insert().values(uuid=uuidstr) - try: - self.conn.execute(ins_stmt) - except IntegrityError: - # This table has constraints that require a table-specific - # insert, so skip it. - return 2 - # Set 4 to deleted - update_statement = main_table.update().\ - where(main_table.c.uuid.in_(self.uuidstrs[:4]))\ - .values(deleted=1) - self.conn.execute(update_statement) - qmt = select([main_table]).where(main_table.c.uuid.in_( - self.uuidstrs)) - rows = self.conn.execute(qmt).fetchall() - # Verify we have 6 in main - self.assertEqual(len(rows), 6) - qst = select([shadow_table]).\ - where(shadow_table.c.uuid.in_(self.uuidstrs)) - rows = self.conn.execute(qst).fetchall() - # Verify we have 0 in shadow - self.assertEqual(len(rows), 0) - # Archive 2 rows - db.archive_deleted_rows_for_table(self.context, tablename, max_rows=2) - # Verify we have 4 left in main - rows = self.conn.execute(qmt).fetchall() - self.assertEqual(len(rows), 4) - # Verify we have 2 in shadow - rows = self.conn.execute(qst).fetchall() - self.assertEqual(len(rows), 2) - # Archive 2 more rows - db.archive_deleted_rows_for_table(self.context, tablename, max_rows=2) - # Verify we have 2 left in main - rows = self.conn.execute(qmt).fetchall() - self.assertEqual(len(rows), 2) - # Verify we have 4 in shadow - rows = self.conn.execute(qst).fetchall() - self.assertEqual(len(rows), 4) - # Try to archive more, but there are no deleted rows left. - db.archive_deleted_rows_for_table(self.context, tablename, max_rows=2) - # Verify we still have 2 left in main - rows = self.conn.execute(qmt).fetchall() - self.assertEqual(len(rows), 2) - # Verify we still have 4 in shadow - rows = self.conn.execute(qst).fetchall() - self.assertEqual(len(rows), 4) - return 0 - - def test_archive_deleted_rows_no_id_column(self): - uuidstr0 = self.uuidstrs[0] - ins_stmt = self.dns_domains.insert().values(domain=uuidstr0) - self.conn.execute(ins_stmt) - update_statement = self.dns_domains.update().\ - where(self.dns_domains.c.domain == uuidstr0).\ - values(deleted=1) - self.conn.execute(update_statement) - qdd = select([self.dns_domains], self.dns_domains.c.domain == - uuidstr0) - rows = self.conn.execute(qdd).fetchall() - self.assertEqual(len(rows), 1) - qsdd = select([self.shadow_dns_domains], - self.shadow_dns_domains.c.domain == uuidstr0) - rows = self.conn.execute(qsdd).fetchall() - self.assertEqual(len(rows), 0) - db.archive_deleted_rows(self.context, max_rows=1) - rows = self.conn.execute(qdd).fetchall() - self.assertEqual(len(rows), 0) - rows = self.conn.execute(qsdd).fetchall() - self.assertEqual(len(rows), 1) - - def test_archive_deleted_rows_fk_constraint(self): - # consoles.pool_id depends on console_pools.id - # SQLite doesn't enforce foreign key constraints without a pragma. - dialect = self.engine.url.get_dialect() - if dialect == sqlite.dialect: - # We're seeing issues with foreign key support in SQLite 3.6.20 - # SQLAlchemy doesn't support it at all with < SQLite 3.6.19 - # It works fine in SQLite 3.7. - # So return early to skip this test if running SQLite < 3.7 - import sqlite3 - tup = sqlite3.sqlite_version_info - if tup[0] < 3 or (tup[0] == 3 and tup[1] < 7): - self.skipTest( - 'sqlite version too old for reliable SQLA foreign_keys') - self.conn.execute("PRAGMA foreign_keys = ON") - ins_stmt = self.console_pools.insert().values(deleted=1) - result = self.conn.execute(ins_stmt) - id1 = result.inserted_primary_key[0] - self.ids.append(id1) - ins_stmt = self.consoles.insert().values(deleted=1, - pool_id=id1) - result = self.conn.execute(ins_stmt) - id2 = result.inserted_primary_key[0] - self.ids.append(id2) - # The first try to archive console_pools should fail, due to FK. - num = db.archive_deleted_rows_for_table(self.context, "console_pools") - self.assertEqual(num, 0) - # Then archiving consoles should work. - num = db.archive_deleted_rows_for_table(self.context, "consoles") - self.assertEqual(num, 1) - # Then archiving console_pools should work. - num = db.archive_deleted_rows_for_table(self.context, "console_pools") - self.assertEqual(num, 1) - - def test_archive_deleted_rows_2_tables(self): - # Add 6 rows to each table - for uuidstr in self.uuidstrs: - ins_stmt = self.instance_id_mappings.insert().values(uuid=uuidstr) - self.conn.execute(ins_stmt) - ins_stmt2 = self.instances.insert().values(uuid=uuidstr) - self.conn.execute(ins_stmt2) - # Set 4 of each to deleted - update_statement = self.instance_id_mappings.update().\ - where(self.instance_id_mappings.c.uuid.in_(self.uuidstrs[:4]))\ - .values(deleted=1) - self.conn.execute(update_statement) - update_statement2 = self.instances.update().\ - where(self.instances.c.uuid.in_(self.uuidstrs[:4]))\ - .values(deleted=1) - self.conn.execute(update_statement2) - # Verify we have 6 in each main table - qiim = select([self.instance_id_mappings]).where( - self.instance_id_mappings.c.uuid.in_(self.uuidstrs)) - rows = self.conn.execute(qiim).fetchall() - self.assertEqual(len(rows), 6) - qi = select([self.instances]).where(self.instances.c.uuid.in_( - self.uuidstrs)) - rows = self.conn.execute(qi).fetchall() - self.assertEqual(len(rows), 6) - # Verify we have 0 in each shadow table - qsiim = select([self.shadow_instance_id_mappings]).\ - where(self.shadow_instance_id_mappings.c.uuid.in_( - self.uuidstrs)) - rows = self.conn.execute(qsiim).fetchall() - self.assertEqual(len(rows), 0) - qsi = select([self.shadow_instances]).\ - where(self.shadow_instances.c.uuid.in_(self.uuidstrs)) - rows = self.conn.execute(qsi).fetchall() - self.assertEqual(len(rows), 0) - # Archive 7 rows, which should be 4 in one table and 3 in the other. - db.archive_deleted_rows(self.context, max_rows=7) - # Verify we have 5 left in the two main tables combined - iim_rows = self.conn.execute(qiim).fetchall() - i_rows = self.conn.execute(qi).fetchall() - self.assertEqual(len(iim_rows) + len(i_rows), 5) - # Verify we have 7 in the two shadow tables combined. - siim_rows = self.conn.execute(qsiim).fetchall() - si_rows = self.conn.execute(qsi).fetchall() - self.assertEqual(len(siim_rows) + len(si_rows), 7) - # Archive the remaining deleted rows. - db.archive_deleted_rows(self.context, max_rows=1) - # Verify we have 4 total left in both main tables. - iim_rows = self.conn.execute(qiim).fetchall() - i_rows = self.conn.execute(qi).fetchall() - self.assertEqual(len(iim_rows) + len(i_rows), 4) - # Verify we have 8 in shadow - siim_rows = self.conn.execute(qsiim).fetchall() - si_rows = self.conn.execute(qsi).fetchall() - self.assertEqual(len(siim_rows) + len(si_rows), 8) - # Try to archive more, but there are no deleted rows left. - db.archive_deleted_rows(self.context, max_rows=500) - # Verify we have 4 total left in both main tables. - iim_rows = self.conn.execute(qiim).fetchall() - i_rows = self.conn.execute(qi).fetchall() - self.assertEqual(len(iim_rows) + len(i_rows), 4) - # Verify we have 8 in shadow - siim_rows = self.conn.execute(qsiim).fetchall() - si_rows = self.conn.execute(qsi).fetchall() - self.assertEqual(len(siim_rows) + len(si_rows), 8) diff --git a/nova/tests/test_filters.py b/nova/tests/test_filters.py deleted file mode 100644 index c06b50fd..00000000 --- a/nova/tests/test_filters.py +++ /dev/null @@ -1,158 +0,0 @@ -# Copyright 2012 OpenStack Foundation # All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Tests For Scheduler Host Filters. -""" - -import inspect -import sys - -from nova import filters -from nova import loadables -from nova import test - - -class Filter1(filters.BaseFilter): - """Test Filter class #1.""" - pass - - -class Filter2(filters.BaseFilter): - """Test Filter class #2.""" - pass - - -class FiltersTestCase(test.TestCase): - def test_filter_all(self): - filter_obj_list = ['obj1', 'obj2', 'obj3'] - filter_properties = 'fake_filter_properties' - base_filter = filters.BaseFilter() - - self.mox.StubOutWithMock(base_filter, '_filter_one') - - base_filter._filter_one('obj1', filter_properties).AndReturn(True) - base_filter._filter_one('obj2', filter_properties).AndReturn(False) - base_filter._filter_one('obj3', filter_properties).AndReturn(True) - - self.mox.ReplayAll() - - result = base_filter.filter_all(filter_obj_list, filter_properties) - self.assertTrue(inspect.isgenerator(result)) - self.assertEqual(['obj1', 'obj3'], list(result)) - - def test_filter_all_recursive_yields(self): - # Test filter_all() allows generators from previous filter_all()s. - # filter_all() yields results. We want to make sure that we can - # call filter_all() with generators returned from previous calls - # to filter_all(). - filter_obj_list = ['obj1', 'obj2', 'obj3'] - filter_properties = 'fake_filter_properties' - base_filter = filters.BaseFilter() - - self.mox.StubOutWithMock(base_filter, '_filter_one') - - total_iterations = 200 - - # The order that _filter_one is going to get called gets - # confusing because we will be recursively yielding things.. - # We are going to simulate the first call to filter_all() - # returning False for 'obj2'. So, 'obj1' will get yielded - # 'total_iterations' number of times before the first filter_all() - # call gets to processing 'obj2'. We then return 'False' for it. - # After that, 'obj3' gets yielded 'total_iterations' number of - # times. - for x in xrange(total_iterations): - base_filter._filter_one('obj1', filter_properties).AndReturn(True) - base_filter._filter_one('obj2', filter_properties).AndReturn(False) - for x in xrange(total_iterations): - base_filter._filter_one('obj3', filter_properties).AndReturn(True) - self.mox.ReplayAll() - - objs = iter(filter_obj_list) - for x in xrange(total_iterations): - # Pass in generators returned from previous calls. - objs = base_filter.filter_all(objs, filter_properties) - self.assertTrue(inspect.isgenerator(objs)) - self.assertEqual(['obj1', 'obj3'], list(objs)) - - def test_get_filtered_objects(self): - filter_objs_initial = ['initial', 'filter1', 'objects1'] - filter_objs_second = ['second', 'filter2', 'objects2'] - filter_objs_last = ['last', 'filter3', 'objects3'] - filter_properties = 'fake_filter_properties' - - def _fake_base_loader_init(*args, **kwargs): - pass - - self.stubs.Set(loadables.BaseLoader, '__init__', - _fake_base_loader_init) - - filt1_mock = self.mox.CreateMock(Filter1) - filt2_mock = self.mox.CreateMock(Filter2) - - self.mox.StubOutWithMock(sys.modules[__name__], 'Filter1', - use_mock_anything=True) - self.mox.StubOutWithMock(filt1_mock, 'filter_all') - self.mox.StubOutWithMock(sys.modules[__name__], 'Filter2', - use_mock_anything=True) - self.mox.StubOutWithMock(filt2_mock, 'filter_all') - - Filter1().AndReturn(filt1_mock) - filt1_mock.filter_all(filter_objs_initial, - filter_properties).AndReturn(filter_objs_second) - Filter2().AndReturn(filt2_mock) - filt2_mock.filter_all(filter_objs_second, - filter_properties).AndReturn(filter_objs_last) - - self.mox.ReplayAll() - - filter_handler = filters.BaseFilterHandler(filters.BaseFilter) - filter_classes = [Filter1, Filter2] - result = filter_handler.get_filtered_objects(filter_classes, - filter_objs_initial, - filter_properties) - self.assertEqual(filter_objs_last, result) - - def test_get_filtered_objects_none_response(self): - filter_objs_initial = ['initial', 'filter1', 'objects1'] - filter_properties = 'fake_filter_properties' - - def _fake_base_loader_init(*args, **kwargs): - pass - - self.stubs.Set(loadables.BaseLoader, '__init__', - _fake_base_loader_init) - - filt1_mock = self.mox.CreateMock(Filter1) - filt2_mock = self.mox.CreateMock(Filter2) - - self.mox.StubOutWithMock(sys.modules[__name__], 'Filter1', - use_mock_anything=True) - self.mox.StubOutWithMock(filt1_mock, 'filter_all') - # Shouldn't be called. - self.mox.StubOutWithMock(sys.modules[__name__], 'Filter2', - use_mock_anything=True) - self.mox.StubOutWithMock(filt2_mock, 'filter_all') - - Filter1().AndReturn(filt1_mock) - filt1_mock.filter_all(filter_objs_initial, - filter_properties).AndReturn(None) - self.mox.ReplayAll() - - filter_handler = filters.BaseFilterHandler(filters.BaseFilter) - filter_classes = [Filter1, Filter2] - result = filter_handler.get_filtered_objects(filter_classes, - filter_objs_initial, - filter_properties) - self.assertEqual(None, result) diff --git a/nova/tests/test_image_utils.py b/nova/tests/test_image_utils.py deleted file mode 100644 index a9768f82..00000000 --- a/nova/tests/test_image_utils.py +++ /dev/null @@ -1,238 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright (C) 2012 Yahoo! Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os - -from nova import test -from nova import utils - -from nova.virt import images -from nova.virt.libvirt import utils as libvirt_utils - - -class ImageUtilsTestCase(test.TestCase): - def test_disk_type(self): - # Seems like lvm detection - # if its in /dev ?? - for p in ['/dev/b', '/dev/blah/blah']: - d_type = libvirt_utils.get_disk_type(p) - self.assertEquals('lvm', d_type) - # Try the other types - template_output = """image: %(path)s -file format: %(format)s -virtual size: 64M (67108864 bytes) -cluster_size: 65536 -disk size: 96K -""" - path = '/myhome/disk.config' - for f in ['raw', 'qcow2']: - output = template_output % ({ - 'format': f, - 'path': path, - }) - self.mox.StubOutWithMock(os.path, 'exists') - self.mox.StubOutWithMock(utils, 'execute') - os.path.exists(path).AndReturn(True) - utils.execute('env', 'LC_ALL=C', 'LANG=C', - 'qemu-img', 'info', path).AndReturn((output, '')) - self.mox.ReplayAll() - d_type = libvirt_utils.get_disk_type(path) - self.assertEquals(f, d_type) - self.mox.UnsetStubs() - - def test_disk_backing(self): - path = '/myhome/disk.config' - template_output = """image: %(path)s -file format: raw -virtual size: 2K (2048 bytes) -cluster_size: 65536 -disk size: 96K -""" - output = template_output % ({ - 'path': path, - }) - self.mox.StubOutWithMock(os.path, 'exists') - self.mox.StubOutWithMock(utils, 'execute') - os.path.exists(path).AndReturn(True) - utils.execute('env', 'LC_ALL=C', 'LANG=C', - 'qemu-img', 'info', path).AndReturn((output, '')) - self.mox.ReplayAll() - d_backing = libvirt_utils.get_disk_backing_file(path) - self.assertEquals(None, d_backing) - - def test_disk_size(self): - path = '/myhome/disk.config' - template_output = """image: %(path)s -file format: raw -virtual size: %(v_size)s (%(vsize_b)s bytes) -cluster_size: 65536 -disk size: 96K -""" - for i in range(0, 128): - bytes = i * 65336 - kbytes = bytes / 1024 - mbytes = kbytes / 1024 - output = template_output % ({ - 'v_size': "%sM" % (mbytes), - 'vsize_b': i, - 'path': path, - }) - self.mox.StubOutWithMock(os.path, 'exists') - self.mox.StubOutWithMock(utils, 'execute') - os.path.exists(path).AndReturn(True) - utils.execute('env', 'LC_ALL=C', 'LANG=C', - 'qemu-img', 'info', path).AndReturn((output, '')) - self.mox.ReplayAll() - d_size = libvirt_utils.get_disk_size(path) - self.assertEquals(i, d_size) - self.mox.UnsetStubs() - output = template_output % ({ - 'v_size': "%sK" % (kbytes), - 'vsize_b': i, - 'path': path, - }) - self.mox.StubOutWithMock(os.path, 'exists') - self.mox.StubOutWithMock(utils, 'execute') - os.path.exists(path).AndReturn(True) - utils.execute('env', 'LC_ALL=C', 'LANG=C', - 'qemu-img', 'info', path).AndReturn((output, '')) - self.mox.ReplayAll() - d_size = libvirt_utils.get_disk_size(path) - self.assertEquals(i, d_size) - self.mox.UnsetStubs() - - def test_qemu_info_canon(self): - path = "disk.config" - example_output = """image: disk.config -file format: raw -virtual size: 64M (67108864 bytes) -cluster_size: 65536 -disk size: 96K -blah BLAH: bb -""" - self.mox.StubOutWithMock(os.path, 'exists') - self.mox.StubOutWithMock(utils, 'execute') - os.path.exists(path).AndReturn(True) - utils.execute('env', 'LC_ALL=C', 'LANG=C', - 'qemu-img', 'info', path).AndReturn((example_output, '')) - self.mox.ReplayAll() - image_info = images.qemu_img_info(path) - self.assertEquals('disk.config', image_info.image) - self.assertEquals('raw', image_info.file_format) - self.assertEquals(67108864, image_info.virtual_size) - self.assertEquals(98304, image_info.disk_size) - self.assertEquals(65536, image_info.cluster_size) - - def test_qemu_info_canon2(self): - path = "disk.config" - example_output = """image: disk.config -file format: QCOW2 -virtual size: 67108844 -cluster_size: 65536 -disk size: 963434 -backing file: /var/lib/nova/a328c7998805951a_2 -""" - self.mox.StubOutWithMock(os.path, 'exists') - self.mox.StubOutWithMock(utils, 'execute') - os.path.exists(path).AndReturn(True) - utils.execute('env', 'LC_ALL=C', 'LANG=C', - 'qemu-img', 'info', path).AndReturn((example_output, '')) - self.mox.ReplayAll() - image_info = images.qemu_img_info(path) - self.assertEquals('disk.config', image_info.image) - self.assertEquals('qcow2', image_info.file_format) - self.assertEquals(67108844, image_info.virtual_size) - self.assertEquals(963434, image_info.disk_size) - self.assertEquals(65536, image_info.cluster_size) - self.assertEquals('/var/lib/nova/a328c7998805951a_2', - image_info.backing_file) - - def test_qemu_backing_file_actual(self): - path = "disk.config" - example_output = """image: disk.config -file format: raw -virtual size: 64M (67108864 bytes) -cluster_size: 65536 -disk size: 96K -Snapshot list: -ID TAG VM SIZE DATE VM CLOCK -1 d9a9784a500742a7bb95627bb3aace38 0 2012-08-20 10:52:46 00:00:00.000 -backing file: /var/lib/nova/a328c7998805951a_2 (actual path: /b/3a988059e51a_2) -""" - self.mox.StubOutWithMock(os.path, 'exists') - self.mox.StubOutWithMock(utils, 'execute') - os.path.exists(path).AndReturn(True) - utils.execute('env', 'LC_ALL=C', 'LANG=C', - 'qemu-img', 'info', path).AndReturn((example_output, '')) - self.mox.ReplayAll() - image_info = images.qemu_img_info(path) - self.assertEquals('disk.config', image_info.image) - self.assertEquals('raw', image_info.file_format) - self.assertEquals(67108864, image_info.virtual_size) - self.assertEquals(98304, image_info.disk_size) - self.assertEquals(1, len(image_info.snapshots)) - self.assertEquals('/b/3a988059e51a_2', - image_info.backing_file) - - def test_qemu_info_convert(self): - path = "disk.config" - example_output = """image: disk.config -file format: raw -virtual size: 64M -disk size: 96K -Snapshot list: -ID TAG VM SIZE DATE VM CLOCK -1 d9a9784a500742a7bb95627bb3aace38 0 2012-08-20 10:52:46 00:00:00.000 -3 d9a9784a500742a7bb95627bb3aace38 0 2012-08-20 10:52:46 00:00:00.000 -4 d9a9784a500742a7bb95627bb3aace38 0 2012-08-20 10:52:46 00:00:00.000 -junk stuff: bbb -""" - self.mox.StubOutWithMock(os.path, 'exists') - self.mox.StubOutWithMock(utils, 'execute') - os.path.exists(path).AndReturn(True) - utils.execute('env', 'LC_ALL=C', 'LANG=C', - 'qemu-img', 'info', path).AndReturn((example_output, '')) - self.mox.ReplayAll() - image_info = images.qemu_img_info(path) - self.assertEquals('disk.config', image_info.image) - self.assertEquals('raw', image_info.file_format) - self.assertEquals(67108864, image_info.virtual_size) - self.assertEquals(98304, image_info.disk_size) - - def test_qemu_info_snaps(self): - path = "disk.config" - example_output = """image: disk.config -file format: raw -virtual size: 64M (67108864 bytes) -disk size: 96K -Snapshot list: -ID TAG VM SIZE DATE VM CLOCK -1 d9a9784a500742a7bb95627bb3aace38 0 2012-08-20 10:52:46 00:00:00.000 -3 d9a9784a500742a7bb95627bb3aace38 0 2012-08-20 10:52:46 00:00:00.000 -4 d9a9784a500742a7bb95627bb3aace38 0 2012-08-20 10:52:46 00:00:00.000 -""" - self.mox.StubOutWithMock(os.path, 'exists') - self.mox.StubOutWithMock(utils, 'execute') - os.path.exists(path).AndReturn(True) - utils.execute('env', 'LC_ALL=C', 'LANG=C', - 'qemu-img', 'info', path).AndReturn((example_output, '')) - self.mox.ReplayAll() - image_info = images.qemu_img_info(path) - self.assertEquals('disk.config', image_info.image) - self.assertEquals('raw', image_info.file_format) - self.assertEquals(67108864, image_info.virtual_size) - self.assertEquals(98304, image_info.disk_size) - self.assertEquals(3, len(image_info.snapshots)) diff --git a/nova/tests/test_migration_utils.py b/nova/tests/test_migration_utils.py deleted file mode 100644 index a15ac251..00000000 --- a/nova/tests/test_migration_utils.py +++ /dev/null @@ -1,516 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright (c) 2013 Boris Pavlovic (boris@pavlovic.me). -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from migrate.changeset import UniqueConstraint -from sqlalchemy.dialects import mysql -from sqlalchemy import Boolean, Index, Integer, DateTime, String -from sqlalchemy import MetaData, Table, Column -from sqlalchemy.engine import reflection -from sqlalchemy.exc import NoSuchTableError -from sqlalchemy.exc import SAWarning -from sqlalchemy.sql import select -from sqlalchemy.types import UserDefinedType, NullType - -from nova.db.sqlalchemy import api as db -from nova.db.sqlalchemy import utils -from nova import exception -from nova.tests import test_migrations -import warnings - - -class CustomType(UserDefinedType): - """Dummy column type for testing unsupported types.""" - def get_col_spec(self): - return "CustomType" - - -class TestMigrationUtils(test_migrations.BaseMigrationTestCase): - """Class for testing utils that are used in db migrations.""" - - def test_utils_drop_unique_constraint(self): - table_name = "__test_tmp_table__" - uc_name = 'uniq_foo' - values = [ - {'id': 1, 'a': 3, 'foo': 10}, - {'id': 2, 'a': 2, 'foo': 20}, - {'id': 3, 'a': 1, 'foo': 30} - ] - for key, engine in self.engines.items(): - meta = MetaData() - meta.bind = engine - test_table = Table(table_name, meta, - Column('id', Integer, primary_key=True, - nullable=False), - Column('a', Integer), - Column('foo', Integer), - UniqueConstraint('a', name='uniq_a'), - UniqueConstraint('foo', name=uc_name)) - test_table.create() - - engine.execute(test_table.insert(), values) - # NOTE(boris-42): This method is generic UC dropper. - utils.drop_unique_constraint(engine, table_name, uc_name, 'foo') - - s = test_table.select().order_by(test_table.c.id) - rows = engine.execute(s).fetchall() - - for i in xrange(0, len(values)): - v = values[i] - self.assertEqual((v['id'], v['a'], v['foo']), rows[i]) - - # NOTE(boris-42): Update data about Table from DB. - meta = MetaData() - meta.bind = engine - test_table = Table(table_name, meta, autoload=True) - constraints = filter(lambda c: c.name == uc_name, - test_table.constraints) - self.assertEqual(len(constraints), 0) - self.assertEqual(len(test_table.constraints), 1) - - test_table.drop() - - def test_util_drop_unique_constraint_with_not_supported_sqlite_type(self): - - table_name = "__test_tmp_table__" - uc_name = 'uniq_foo' - values = [ - {'id': 1, 'a': 3, 'foo': 10}, - {'id': 2, 'a': 2, 'foo': 20}, - {'id': 3, 'a': 1, 'foo': 30} - ] - - engine = self.engines['sqlite'] - meta = MetaData(bind=engine) - - test_table = Table(table_name, meta, - Column('id', Integer, primary_key=True, - nullable=False), - Column('a', Integer), - Column('foo', CustomType, default=0), - UniqueConstraint('a', name='uniq_a'), - UniqueConstraint('foo', name=uc_name)) - test_table.create() - - engine.execute(test_table.insert(), values) - warnings.simplefilter("ignore", SAWarning) - # NOTE(boris-42): Missing info about column `foo` that has - # unsupported type CustomType. - self.assertRaises(exception.NovaException, - utils.drop_unique_constraint, - engine, table_name, uc_name, 'foo') - - # NOTE(boris-42): Wrong type of foo instance. it should be - # instance of sqlalchemy.Column. - self.assertRaises(exception.NovaException, - utils.drop_unique_constraint, - engine, table_name, uc_name, 'foo', foo=Integer()) - - foo = Column('foo', CustomType, default=0) - utils.drop_unique_constraint(engine, table_name, uc_name, 'foo', - foo=foo) - - s = test_table.select().order_by(test_table.c.id) - rows = engine.execute(s).fetchall() - - for i in xrange(0, len(values)): - v = values[i] - self.assertEqual((v['id'], v['a'], v['foo']), rows[i]) - - # NOTE(boris-42): Update data about Table from DB. - meta = MetaData(bind=engine) - test_table = Table(table_name, meta, autoload=True) - constraints = filter(lambda c: c.name == uc_name, - test_table.constraints) - self.assertEqual(len(constraints), 0) - self.assertEqual(len(test_table.constraints), 1) - test_table.drop() - - def _populate_db_for_drop_duplicate_entries(self, engine, meta, - table_name): - values = [ - {'id': 11, 'a': 3, 'b': 10, 'c': 'abcdef'}, - {'id': 12, 'a': 5, 'b': 10, 'c': 'abcdef'}, - {'id': 13, 'a': 6, 'b': 10, 'c': 'abcdef'}, - {'id': 14, 'a': 7, 'b': 10, 'c': 'abcdef'}, - {'id': 21, 'a': 1, 'b': 20, 'c': 'aa'}, - {'id': 31, 'a': 1, 'b': 20, 'c': 'bb'}, - {'id': 41, 'a': 1, 'b': 30, 'c': 'aef'}, - {'id': 42, 'a': 2, 'b': 30, 'c': 'aef'}, - {'id': 43, 'a': 3, 'b': 30, 'c': 'aef'} - ] - - test_table = Table(table_name, meta, - Column('id', Integer, primary_key=True, - nullable=False), - Column('a', Integer), - Column('b', Integer), - Column('c', String(255)), - Column('deleted', Integer, default=0), - Column('deleted_at', DateTime), - Column('updated_at', DateTime)) - - test_table.create() - engine.execute(test_table.insert(), values) - return test_table, values - - def test_drop_old_duplicate_entries_from_table(self): - table_name = "__test_tmp_table__" - - for key, engine in self.engines.items(): - meta = MetaData() - meta.bind = engine - test_table, values = self.\ - _populate_db_for_drop_duplicate_entries(engine, meta, - table_name) - - utils.drop_old_duplicate_entries_from_table(engine, table_name, - False, 'b', 'c') - - uniq_values = set() - expected_ids = [] - for value in sorted(values, key=lambda x: x['id'], reverse=True): - uniq_value = (('b', value['b']), ('c', value['c'])) - if uniq_value in uniq_values: - continue - uniq_values.add(uniq_value) - expected_ids.append(value['id']) - - real_ids = [row[0] for row in - engine.execute(select([test_table.c.id])).fetchall()] - - self.assertEqual(len(real_ids), len(expected_ids)) - for id_ in expected_ids: - self.assertTrue(id_ in real_ids) - - def test_drop_old_duplicate_entries_from_table_soft_delete(self): - table_name = "__test_tmp_table__" - - for key, engine in self.engines.items(): - meta = MetaData() - meta.bind = engine - table, values = self.\ - _populate_db_for_drop_duplicate_entries(engine, meta, - table_name) - utils.drop_old_duplicate_entries_from_table(engine, table_name, - True, 'b', 'c') - uniq_values = set() - expected_values = [] - soft_deleted_values = [] - - for value in sorted(values, key=lambda x: x['id'], reverse=True): - uniq_value = (('b', value['b']), ('c', value['c'])) - if uniq_value in uniq_values: - soft_deleted_values.append(value) - continue - uniq_values.add(uniq_value) - expected_values.append(value) - - base_select = table.select() - - rows_select = base_select.\ - where(table.c.deleted != table.c.id) - row_ids = [row['id'] for row in - engine.execute(rows_select).fetchall()] - self.assertEqual(len(row_ids), len(expected_values)) - for value in expected_values: - self.assertTrue(value['id'] in row_ids) - - deleted_rows_select = base_select.\ - where(table.c.deleted == table.c.id) - deleted_rows_ids = [row['id'] for row in - engine.execute(deleted_rows_select).fetchall()] - self.assertEqual(len(deleted_rows_ids), - len(values) - len(row_ids)) - for value in soft_deleted_values: - self.assertTrue(value['id'] in deleted_rows_ids) - - def test_check_shadow_table(self): - table_name = 'abc' - for key, engine in self.engines.items(): - meta = MetaData() - meta.bind = engine - - table = Table(table_name, meta, - Column('id', Integer, primary_key=True), - Column('a', Integer), - Column('c', String(256))) - table.create() - - #check missing shadow table - self.assertRaises(NoSuchTableError, - utils.check_shadow_table, engine, table_name) - - shadow_table = Table(db._SHADOW_TABLE_PREFIX + table_name, meta, - Column('id', Integer), - Column('a', Integer)) - shadow_table.create() - - # check missing column - self.assertRaises(exception.NovaException, - utils.check_shadow_table, engine, table_name) - - # check when all is ok - c = Column('c', String(256)) - shadow_table.create_column(c) - self.assertTrue(utils.check_shadow_table(engine, table_name)) - - # check extra column - d = Column('d', Integer) - shadow_table.create_column(d) - self.assertRaises(exception.NovaException, - utils.check_shadow_table, engine, table_name) - - def test_check_shadow_table_different_types(self): - table_name = 'abc' - for key, engine in self.engines.items(): - meta = MetaData() - meta.bind = engine - - table = Table(table_name, meta, - Column('id', Integer, primary_key=True), - Column('a', Integer)) - table.create() - - shadow_table = Table(db._SHADOW_TABLE_PREFIX + table_name, meta, - Column('id', Integer, primary_key=True), - Column('a', String(256))) - shadow_table.create() - self.assertRaises(exception.NovaException, - utils.check_shadow_table, engine, table_name) - - def test_check_shadow_table_with_unsupported_type(self): - table_name = 'abc' - engine = self.engines['sqlite'] - meta = MetaData(bind=engine) - - table = Table(table_name, meta, - Column('id', Integer, primary_key=True), - Column('a', Integer), - Column('c', CustomType)) - table.create() - - shadow_table = Table(db._SHADOW_TABLE_PREFIX + table_name, meta, - Column('id', Integer, primary_key=True), - Column('a', Integer), - Column('c', CustomType)) - shadow_table.create() - self.assertTrue(utils.check_shadow_table(engine, table_name)) - - def test_create_shadow_table_by_table_instance(self): - table_name = 'abc' - for key, engine in self.engines.items(): - meta = MetaData() - meta.bind = engine - table = Table(table_name, meta, - Column('id', Integer, primary_key=True), - Column('a', Integer), - Column('b', String(256))) - table.create() - utils.create_shadow_table(engine, table=table) - self.assertTrue(utils.check_shadow_table(engine, table_name)) - - def test_create_shadow_table_by_name(self): - table_name = 'abc' - for key, engine in self.engines.items(): - meta = MetaData() - meta.bind = engine - - table = Table(table_name, meta, - Column('id', Integer, primary_key=True), - Column('a', Integer), - Column('b', String(256))) - table.create() - utils.create_shadow_table(engine, table_name=table_name) - self.assertTrue(utils.check_shadow_table(engine, table_name)) - - def test_create_shadow_table_not_supported_type(self): - table_name = 'abc' - engine = self.engines['sqlite'] - meta = MetaData() - meta.bind = engine - table = Table(table_name, meta, - Column('id', Integer, primary_key=True), - Column('a', CustomType)) - table.create() - self.assertRaises(exception.NovaException, - utils.create_shadow_table, - engine, table_name=table_name) - - utils.create_shadow_table(engine, table_name=table_name, - a=Column('a', CustomType())) - self.assertTrue(utils.check_shadow_table(engine, table_name)) - - def test_create_shadow_both_table_and_table_name_are_none(self): - for key, engine in self.engines.items(): - meta = MetaData() - meta.bind = engine - self.assertRaises(exception.NovaException, - utils.create_shadow_table, engine) - - def test_create_shadow_both_table_and_table_name_are_specified(self): - table_name = 'abc' - for key, engine in self.engines.items(): - meta = MetaData() - meta.bind = engine - table = Table(table_name, meta, - Column('id', Integer, primary_key=True), - Column('a', Integer)) - table.create() - self.assertRaises(exception.NovaException, - utils.create_shadow_table, - engine, table=table, table_name=table_name) - - def test_create_duplicate_shadow_table(self): - table_name = 'abc' - for key, engine in self.engines.items(): - meta = MetaData() - meta.bind = engine - table = Table(table_name, meta, - Column('id', Integer, primary_key=True), - Column('a', Integer)) - table.create() - utils.create_shadow_table(engine, table_name=table_name) - self.assertRaises(exception.ShadowTableExists, - utils.create_shadow_table, - engine, table_name=table_name) - - def test_change_deleted_column_type_doesnt_drop_index(self): - table_name = 'abc' - for key, engine in self.engines.items(): - meta = MetaData(bind=engine) - - indexes = { - 'idx_a_deleted': ['a', 'deleted'], - 'idx_b_deleted': ['b', 'deleted'], - 'idx_a': ['a'] - } - - index_instances = [Index(name, *columns) - for name, columns in indexes.iteritems()] - - table = Table(table_name, meta, - Column('id', Integer, primary_key=True), - Column('a', String(255)), - Column('b', String(255)), - Column('deleted', Boolean), - *index_instances) - table.create() - utils.change_deleted_column_type_to_id_type(engine, table_name) - utils.change_deleted_column_type_to_boolean(engine, table_name) - - insp = reflection.Inspector.from_engine(engine) - real_indexes = insp.get_indexes(table_name) - self.assertEqual(len(real_indexes), 3) - for index in real_indexes: - name = index['name'] - self.assertIn(name, indexes) - self.assertEqual(set(index['column_names']), - set(indexes[name])) - - def test_change_deleted_column_type_to_id_type_integer(self): - table_name = 'abc' - for key, engine in self.engines.items(): - meta = MetaData() - meta.bind = engine - table = Table(table_name, meta, - Column('id', Integer, primary_key=True), - Column('deleted', Boolean)) - table.create() - utils.change_deleted_column_type_to_id_type(engine, table_name) - - table = utils.get_table(engine, table_name) - self.assertTrue(isinstance(table.c.deleted.type, Integer)) - - def test_change_deleted_column_type_to_id_type_string(self): - table_name = 'abc' - for key, engine in self.engines.items(): - meta = MetaData() - meta.bind = engine - table = Table(table_name, meta, - Column('id', String(255), primary_key=True), - Column('deleted', Boolean)) - table.create() - utils.change_deleted_column_type_to_id_type(engine, table_name) - - table = utils.get_table(engine, table_name) - self.assertTrue(isinstance(table.c.deleted.type, String)) - - def test_change_deleted_column_type_to_id_type_custom(self): - table_name = 'abc' - engine = self.engines['sqlite'] - meta = MetaData() - meta.bind = engine - table = Table(table_name, meta, - Column('id', Integer, primary_key=True), - Column('foo', CustomType), - Column('deleted', Boolean)) - table.create() - - self.assertRaises(exception.NovaException, - utils.change_deleted_column_type_to_id_type, - engine, table_name) - - fooColumn = Column('foo', CustomType()) - utils.change_deleted_column_type_to_id_type(engine, table_name, - foo=fooColumn) - - table = utils.get_table(engine, table_name) - # NOTE(boris-42): There is no way to check has foo type CustomType. - # but sqlalchemy will set it to NullType. - self.assertTrue(isinstance(table.c.foo.type, NullType)) - self.assertTrue(isinstance(table.c.deleted.type, Integer)) - - def test_change_deleted_column_type_to_boolean(self): - table_name = 'abc' - for key, engine in self.engines.items(): - meta = MetaData() - meta.bind = engine - table = Table(table_name, meta, - Column('id', Integer, primary_key=True), - Column('deleted', Integer)) - table.create() - - utils.change_deleted_column_type_to_boolean(engine, table_name) - - table = utils.get_table(engine, table_name) - expected_type = Boolean if key != "mysql" else mysql.TINYINT - self.assertTrue(isinstance(table.c.deleted.type, expected_type)) - - def test_change_deleted_column_type_to_boolean_type_custom(self): - table_name = 'abc' - engine = self.engines['sqlite'] - meta = MetaData() - meta.bind = engine - table = Table(table_name, meta, - Column('id', Integer, primary_key=True), - Column('foo', CustomType), - Column('deleted', Integer)) - table.create() - - self.assertRaises(exception.NovaException, - utils.change_deleted_column_type_to_boolean, - engine, table_name) - - fooColumn = Column('foo', CustomType()) - utils.change_deleted_column_type_to_boolean(engine, table_name, - foo=fooColumn) - - table = utils.get_table(engine, table_name) - # NOTE(boris-42): There is no way to check has foo type CustomType. - # but sqlalchemy will set it to NullType. - self.assertTrue(isinstance(table.c.foo.type, NullType)) - self.assertTrue(isinstance(table.c.deleted.type, Boolean)) diff --git a/nova/tests/test_migrations.conf b/nova/tests/test_migrations.conf deleted file mode 100644 index 774f1499..00000000 --- a/nova/tests/test_migrations.conf +++ /dev/null @@ -1,9 +0,0 @@ -[DEFAULT] -# Set up any number of migration data stores you want, one -# The "name" used in the test is the config variable key. -#sqlite=sqlite:///test_migrations.db -sqlite=sqlite:// -#mysql=mysql://root:@localhost/test_migrations -#postgresql=postgresql://user:pass@localhost/test_migrations -[walk_style] -snake_walk=yes diff --git a/nova/tests/test_migrations.py b/nova/tests/test_migrations.py deleted file mode 100644 index 7121bdb0..00000000 --- a/nova/tests/test_migrations.py +++ /dev/null @@ -1,1547 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010-2011 OpenStack Foundation -# Copyright 2012-2013 IBM Corp. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Tests for database migrations. This test case reads the configuration -file test_migrations.conf for database connection settings -to use in the tests. For each connection found in the config file, -the test case runs a series of test cases to ensure that migrations work -properly both upgrading and downgrading, and that no data loss occurs -if possible. - -There are also "opportunistic" tests for both mysql and postgresql in here, -which allows testing against all 3 databases (sqlite in memory, mysql, pg) in -a properly configured unit test environment. - -For the opportunistic testing you need to set up a db named 'openstack_citest' -with user 'openstack_citest' and password 'openstack_citest' on localhost. -The test will then use that db and u/p combo to run the tests. - -For postgres on Ubuntu this can be done with the following commands: - -sudo -u postgres psql -postgres=# create user openstack_citest with createdb login password - 'openstack_citest'; -postgres=# create database openstack_citest with owner openstack_citest; - -""" - -import collections -import commands -import ConfigParser -import datetime -import glob -import os -import urlparse -import uuid - -from migrate.versioning import repository -import netaddr -import sqlalchemy -from sqlalchemy.dialects import postgresql -from sqlalchemy.dialects import sqlite -import sqlalchemy.exc - -from nova.db.sqlalchemy import api as db -import nova.db.sqlalchemy.migrate_repo -from nova.db.sqlalchemy import utils as db_utils -from nova.openstack.common import log as logging -from nova.openstack.common import timeutils -from nova import test -from nova import utils -import nova.virt.baremetal.db.sqlalchemy.migrate_repo - - -LOG = logging.getLogger(__name__) - - -def _get_connect_string(backend, user, passwd, database): - """ - Try to get a connection with a very specific set of values, if we get - these then we'll run the tests, otherwise they are skipped - """ - if backend == "postgres": - backend = "postgresql+psycopg2" - elif backend == "mysql": - backend = "mysql+mysqldb" - else: - raise Exception("Unrecognized backend: '%s'" % backend) - - return ("%(backend)s://%(user)s:%(passwd)s@localhost/%(database)s" - % locals()) - - -def _is_backend_avail(backend, user, passwd, database): - try: - connect_uri = _get_connect_string(backend, user, passwd, database) - engine = sqlalchemy.create_engine(connect_uri) - connection = engine.connect() - except Exception: - # intentionally catch all to handle exceptions even if we don't - # have any backend code loaded. - return False - else: - connection.close() - engine.dispose() - return True - - -def _have_mysql(user, passwd, database): - present = os.environ.get('NOVA_TEST_MYSQL_PRESENT') - if present is None: - return _is_backend_avail('mysql', user, passwd, database) - return present.lower() in ('', 'true') - - -def _have_postgresql(user, passwd, database): - present = os.environ.get('NOVA_TEST_POSTGRESQL_PRESENT') - if present is None: - return _is_backend_avail('postgres', user, passwd, database) - return present.lower() in ('', 'true') - - -def get_mysql_connection_info(conn_pieces): - database = conn_pieces.path.strip('/') - loc_pieces = conn_pieces.netloc.split('@') - host = loc_pieces[1] - auth_pieces = loc_pieces[0].split(':') - user = auth_pieces[0] - password = "" - if len(auth_pieces) > 1: - if auth_pieces[1].strip(): - password = "-p\"%s\"" % auth_pieces[1] - - return (user, password, database, host) - - -def get_pgsql_connection_info(conn_pieces): - database = conn_pieces.path.strip('/') - loc_pieces = conn_pieces.netloc.split('@') - host = loc_pieces[1] - - auth_pieces = loc_pieces[0].split(':') - user = auth_pieces[0] - password = "" - if len(auth_pieces) > 1: - password = auth_pieces[1].strip() - - return (user, password, database, host) - - -class CommonTestsMixIn(object): - """These tests are shared between TestNovaMigrations and - TestBaremetalMigrations. - - BaseMigrationTestCase is effectively an abstract class, meant to be derived - from and not directly tested against; that's why these `test_` methods need - to be on a Mixin, so that they won't be picked up as valid tests for - BaseMigrationTestCase. - """ - def test_walk_versions(self): - for key, engine in self.engines.items(): - self._walk_versions(engine, self.snake_walk) - - def test_mysql_opportunistically(self): - self._test_mysql_opportunistically() - - def test_mysql_connect_fail(self): - """ - Test that we can trigger a mysql connection failure and we fail - gracefully to ensure we don't break people without mysql - """ - if _is_backend_avail('mysql', "openstack_cifail", self.PASSWD, - self.DATABASE): - self.fail("Shouldn't have connected") - - def test_postgresql_opportunistically(self): - self._test_postgresql_opportunistically() - - def test_postgresql_connect_fail(self): - """ - Test that we can trigger a postgres connection failure and we fail - gracefully to ensure we don't break people without postgres - """ - if _is_backend_avail('postgres', "openstack_cifail", self.PASSWD, - self.DATABASE): - self.fail("Shouldn't have connected") - - -class BaseMigrationTestCase(test.TestCase): - """Base class fort testing migrations and migration utils.""" - USER = None - PASSWD = None - DATABASE = None - - def __init__(self, *args, **kwargs): - super(BaseMigrationTestCase, self).__init__(*args, **kwargs) - - self.DEFAULT_CONFIG_FILE = os.path.join(os.path.dirname(__file__), - 'test_migrations.conf') - # Test machines can set the NOVA_TEST_MIGRATIONS_CONF variable - # to override the location of the config file for migration testing - self.CONFIG_FILE_PATH = os.environ.get('NOVA_TEST_MIGRATIONS_CONF', - self.DEFAULT_CONFIG_FILE) - self.MIGRATE_FILE = nova.db.sqlalchemy.migrate_repo.__file__ - self.REPOSITORY = repository.Repository( - os.path.abspath(os.path.dirname(self.MIGRATE_FILE))) - self.INIT_VERSION = 0 - - self.snake_walk = False - self.test_databases = {} - self.migration = None - self.migration_api = None - - def setUp(self): - super(BaseMigrationTestCase, self).setUp() - - # Load test databases from the config file. Only do this - # once. No need to re-run this on each test... - LOG.debug('config_path is %s' % self.CONFIG_FILE_PATH) - if os.path.exists(self.CONFIG_FILE_PATH): - cp = ConfigParser.RawConfigParser() - try: - cp.read(self.CONFIG_FILE_PATH) - defaults = cp.defaults() - for key, value in defaults.items(): - self.test_databases[key] = value - self.snake_walk = cp.getboolean('walk_style', 'snake_walk') - except ConfigParser.ParsingError as e: - self.fail("Failed to read test_migrations.conf config " - "file. Got error: %s" % e) - else: - self.fail("Failed to find test_migrations.conf config " - "file.") - - self.engines = {} - for key, value in self.test_databases.items(): - self.engines[key] = sqlalchemy.create_engine(value) - - # We start each test case with a completely blank slate. - self._reset_databases() - - def tearDown(self): - # We destroy the test data store between each test case, - # and recreate it, which ensures that we have no side-effects - # from the tests - self._reset_databases() - super(BaseMigrationTestCase, self).tearDown() - - def execute_cmd(self, cmd=None): - status, output = commands.getstatusoutput(cmd) - LOG.debug(output) - self.assertEqual(0, status, - "Failed to run: %s\n%s" % (cmd, output)) - - @utils.synchronized('pgadmin', external=True) - def _reset_pg(self, conn_pieces): - (user, password, database, host) = \ - get_pgsql_connection_info(conn_pieces) - os.environ['PGPASSWORD'] = password - os.environ['PGUSER'] = user - # note(boris-42): We must create and drop database, we can't - # drop database which we have connected to, so for such - # operations there is a special database template1. - sqlcmd = ("psql -w -U %(user)s -h %(host)s -c" - " '%(sql)s' -d template1") - - sql = ("drop database if exists %(database)s;") % locals() - droptable = sqlcmd % locals() - self.execute_cmd(droptable) - - sql = ("create database %(database)s;") % locals() - createtable = sqlcmd % locals() - self.execute_cmd(createtable) - - os.unsetenv('PGPASSWORD') - os.unsetenv('PGUSER') - - def _reset_databases(self): - for key, engine in self.engines.items(): - conn_string = self.test_databases[key] - conn_pieces = urlparse.urlparse(conn_string) - engine.dispose() - if conn_string.startswith('sqlite'): - # We can just delete the SQLite database, which is - # the easiest and cleanest solution - db_path = conn_pieces.path.strip('/') - if os.path.exists(db_path): - os.unlink(db_path) - # No need to recreate the SQLite DB. SQLite will - # create it for us if it's not there... - elif conn_string.startswith('mysql'): - # We can execute the MySQL client to destroy and re-create - # the MYSQL database, which is easier and less error-prone - # than using SQLAlchemy to do this via MetaData...trust me. - (user, password, database, host) = \ - get_mysql_connection_info(conn_pieces) - sql = ("drop database if exists %(database)s; " - "create database %(database)s;") % locals() - cmd = ("mysql -u \"%(user)s\" %(password)s -h %(host)s " - "-e \"%(sql)s\"") % locals() - self.execute_cmd(cmd) - elif conn_string.startswith('postgresql'): - self._reset_pg(conn_pieces) - - def _test_mysql_opportunistically(self): - # Test that table creation on mysql only builds InnoDB tables - if not _have_mysql(self.USER, self.PASSWD, self.DATABASE): - self.skipTest("mysql not available") - # add this to the global lists to make reset work with it, it's removed - # automatically in tearDown so no need to clean it up here. - connect_string = _get_connect_string("mysql", self.USER, self.PASSWD, - self.DATABASE) - (user, password, database, host) = \ - get_mysql_connection_info(urlparse.urlparse(connect_string)) - engine = sqlalchemy.create_engine(connect_string) - self.engines[database] = engine - self.test_databases[database] = connect_string - - # build a fully populated mysql database with all the tables - self._reset_databases() - self._walk_versions(engine, False, False) - - connection = engine.connect() - # sanity check - total = connection.execute("SELECT count(*) " - "from information_schema.TABLES " - "where TABLE_SCHEMA='%(database)s'" % - locals()) - self.assertTrue(total.scalar() > 0, "No tables found. Wrong schema?") - - noninnodb = connection.execute("SELECT count(*) " - "from information_schema.TABLES " - "where TABLE_SCHEMA='%(database)s' " - "and ENGINE!='InnoDB' " - "and TABLE_NAME!='migrate_version'" % - locals()) - count = noninnodb.scalar() - self.assertEqual(count, 0, "%d non InnoDB tables created" % count) - connection.close() - - def _test_postgresql_opportunistically(self): - # Test postgresql database migration walk - if not _have_postgresql(self.USER, self.PASSWD, self.DATABASE): - self.skipTest("postgresql not available") - # add this to the global lists to make reset work with it, it's removed - # automatically in tearDown so no need to clean it up here. - connect_string = _get_connect_string("postgres", self.USER, - self.PASSWD, self.DATABASE) - engine = sqlalchemy.create_engine(connect_string) - (user, password, database, host) = \ - get_mysql_connection_info(urlparse.urlparse(connect_string)) - self.engines[database] = engine - self.test_databases[database] = connect_string - - # build a fully populated postgresql database with all the tables - self._reset_databases() - self._walk_versions(engine, False, False) - - def _walk_versions(self, engine=None, snake_walk=False, downgrade=True): - # Determine latest version script from the repo, then - # upgrade from 1 through to the latest, with no data - # in the databases. This just checks that the schema itself - # upgrades successfully. - - # Place the database under version control - self.migration_api.version_control(engine, - self.REPOSITORY, - self.INIT_VERSION) - self.assertEqual(self.INIT_VERSION, - self.migration_api.db_version(engine, - self.REPOSITORY)) - - LOG.debug('latest version is %s' % self.REPOSITORY.latest) - versions = range(self.INIT_VERSION + 1, self.REPOSITORY.latest + 1) - - for version in versions: - # upgrade -> downgrade -> upgrade - self._migrate_up(engine, version, with_data=True) - if snake_walk: - downgraded = self._migrate_down( - engine, version - 1, with_data=True) - if downgraded: - self._migrate_up(engine, version) - - if downgrade: - # Now walk it back down to 0 from the latest, testing - # the downgrade paths. - for version in reversed(versions): - # downgrade -> upgrade -> downgrade - downgraded = self._migrate_down(engine, version - 1) - - if snake_walk and downgraded: - self._migrate_up(engine, version) - self._migrate_down(engine, version - 1) - - def _migrate_down(self, engine, version, with_data=False): - try: - self.migration_api.downgrade(engine, self.REPOSITORY, version) - except NotImplementedError: - # NOTE(sirp): some migrations, namely release-level - # migrations, don't support a downgrade. - return False - - self.assertEqual(version, - self.migration_api.db_version(engine, - self.REPOSITORY)) - - # NOTE(sirp): `version` is what we're downgrading to (i.e. the 'target' - # version). So if we have any downgrade checks, they need to be run for - # the previous (higher numbered) migration. - if with_data: - post_downgrade = getattr( - self, "_post_downgrade_%03d" % (version + 1), None) - if post_downgrade: - post_downgrade(engine) - - return True - - def _migrate_up(self, engine, version, with_data=False): - """migrate up to a new version of the db. - - We allow for data insertion and post checks at every - migration version with special _pre_upgrade_### and - _check_### functions in the main test. - """ - # NOTE(sdague): try block is here because it's impossible to debug - # where a failed data migration happens otherwise - try: - if with_data: - data = None - pre_upgrade = getattr( - self, "_pre_upgrade_%03d" % version, None) - if pre_upgrade: - data = pre_upgrade(engine) - - self.migration_api.upgrade(engine, self.REPOSITORY, version) - self.assertEqual(version, - self.migration_api.db_version(engine, - self.REPOSITORY)) - if with_data: - check = getattr(self, "_check_%03d" % version, None) - if check: - check(engine, data) - except Exception: - LOG.error("Failed to migrate to version %s on engine %s" % - (version, engine)) - raise - - -class TestNovaMigrations(BaseMigrationTestCase, CommonTestsMixIn): - """Test sqlalchemy-migrate migrations.""" - USER = "openstack_citest" - PASSWD = "openstack_citest" - DATABASE = "openstack_citest" - - def __init__(self, *args, **kwargs): - super(TestNovaMigrations, self).__init__(*args, **kwargs) - - self.DEFAULT_CONFIG_FILE = os.path.join(os.path.dirname(__file__), - 'test_migrations.conf') - # Test machines can set the NOVA_TEST_MIGRATIONS_CONF variable - # to override the location of the config file for migration testing - self.CONFIG_FILE_PATH = os.environ.get('NOVA_TEST_MIGRATIONS_CONF', - self.DEFAULT_CONFIG_FILE) - self.MIGRATE_FILE = nova.db.sqlalchemy.migrate_repo.__file__ - self.REPOSITORY = repository.Repository( - os.path.abspath(os.path.dirname(self.MIGRATE_FILE))) - - def setUp(self): - super(TestNovaMigrations, self).setUp() - - if self.migration is None: - self.migration = __import__('nova.db.migration', - globals(), locals(), ['INIT_VERSION'], -1) - self.INIT_VERSION = self.migration.INIT_VERSION - if self.migration_api is None: - temp = __import__('nova.db.sqlalchemy.migration', - globals(), locals(), ['versioning_api'], -1) - self.migration_api = temp.versioning_api - - def _pre_upgrade_134(self, engine): - now = timeutils.utcnow() - data = [{ - 'id': 1, - 'uuid': '1d739808-d7ec-4944-b252-f8363e119755', - 'mac': '00:00:00:00:00:01', - 'start_period': now, - 'last_refreshed': now + datetime.timedelta(seconds=10), - 'bw_in': 100000, - 'bw_out': 200000, - }, { - 'id': 2, - 'uuid': '1d739808-d7ec-4944-b252-f8363e119756', - 'mac': '2a:f2:48:31:c1:60', - 'start_period': now, - 'last_refreshed': now + datetime.timedelta(seconds=20), - 'bw_in': 1000000000, - 'bw_out': 200000000, - }, { - 'id': 3, - # This is intended to be the same as above. - 'uuid': '1d739808-d7ec-4944-b252-f8363e119756', - 'mac': '00:00:00:00:00:02', - 'start_period': now, - 'last_refreshed': now + datetime.timedelta(seconds=30), - 'bw_in': 0, - 'bw_out': 0, - }] - - bw_usage_cache = db_utils.get_table(engine, 'bw_usage_cache') - engine.execute(bw_usage_cache.insert(), data) - return data - - def _check_134(self, engine, data): - bw_usage_cache = db_utils.get_table(engine, 'bw_usage_cache') - - # Checks if both columns have been successfuly created. - self.assertIn('last_ctr_in', bw_usage_cache.c) - self.assertIn('last_ctr_out', bw_usage_cache.c) - - # Checks if all rows have been inserted. - bw_items = bw_usage_cache.select().execute().fetchall() - self.assertEqual(len(bw_items), 3) - - bw = bw_usage_cache.select( - bw_usage_cache.c.id == 1).execute().first() - - # New columns have 'NULL' as default value. - self.assertEqual(bw['last_ctr_in'], None) - self.assertEqual(bw['last_ctr_out'], None) - - self.assertEqual(data[0]['mac'], bw['mac']) - - # migration 141, update migrations instance uuid - def _pre_upgrade_141(self, engine): - data = { - 'instance_uuid': str(uuid.uuid4()) - } - migrations = db_utils.get_table(engine, 'migrations') - engine.execute(migrations.insert(), data) - result = migrations.insert().values(data).execute() - data['id'] = result.inserted_primary_key[0] - return data - - def _check_141(self, engine, data): - migrations = db_utils.get_table(engine, 'migrations') - row = migrations.select( - migrations.c.id == data['id']).execute().first() - # Check that change to String(36) went alright - self.assertEqual(data['instance_uuid'], row['instance_uuid']) - - # migration 146, availability zone transition - def _pre_upgrade_146(self, engine): - data = { - 'availability_zone': 'custom_az', - 'name': 'name', - } - - aggregates = db_utils.get_table(engine, 'aggregates') - result = aggregates.insert().values(data).execute() - # NOTE(sdague) it's important you don't insert keys by value in - # postgresql, because its autoincrement counter won't get updated - data['id'] = result.inserted_primary_key[0] - return data - - def _check_146(self, engine, data): - aggregate_md = db_utils.get_table(engine, 'aggregate_metadata') - md = aggregate_md.select( - aggregate_md.c.aggregate_id == data['id']).execute().first() - self.assertEqual(data['availability_zone'], md['value']) - - def _post_downgrade_146(self, engine): - # Downgrade should delete availability_zone aggregate_metadata entries - aggregate_md = db_utils.get_table(engine, 'aggregate_metadata') - num_azs = aggregate_md.count().where( - aggregate_md.c.key == 'availability_zone').execute().scalar() - self.assertEqual(0, num_azs) - - # migration 147, availability zone transition for services - def _pre_upgrade_147(self, engine): - az = 'test_zone' - host1 = 'compute-host1' - host2 = 'compute-host2' - # start at id == 2 because we already inserted one - data = [ - {'id': 1, 'host': host1, - 'binary': 'nova-compute', 'topic': 'compute', - 'report_count': 0, 'availability_zone': az}, - {'id': 2, 'host': 'sched-host', - 'binary': 'nova-scheduler', 'topic': 'scheduler', - 'report_count': 0, 'availability_zone': 'ignore_me'}, - {'id': 3, 'host': host2, - 'binary': 'nova-compute', 'topic': 'compute', - 'report_count': 0, 'availability_zone': az}, - ] - - services = db_utils.get_table(engine, 'services') - engine.execute(services.insert(), data) - self._pre_upgrade_147_no_duplicate_aggregate_hosts(engine) - return data - - def _pre_upgrade_147_no_duplicate_aggregate_hosts(self, engine): - engine.execute( - db_utils.get_table(engine, 'aggregate_metadata').insert(), - [{'aggregate_id': 1, - 'key': 'availability_zone', - 'value': 'custom_az'}]) - - engine.execute(db_utils.get_table(engine, 'aggregate_hosts').insert(), - [{'aggregate_id': 1, - 'host': 'compute-host3'}]) - - engine.execute(db_utils.get_table(engine, 'services').insert(), - [{'id': 99, 'host': 'compute-host3', - 'binary': 'nova-compute', 'topic': 'compute', - 'report_count': 0, 'availability_zone': 'custom_az'}]) - - def _check_147(self, engine, data): - aggregate_md = db_utils.get_table(engine, 'aggregate_metadata') - aggregate_hosts = db_utils.get_table(engine, 'aggregate_hosts') - # NOTE(sdague): hard coded to id == 2, because we added to - # aggregate_metadata previously - for item in data: - md = aggregate_md.select( - aggregate_md.c.aggregate_id == 2).execute().first() - if item['binary'] == "nova-compute": - self.assertEqual(item['availability_zone'], md['value']) - - host = aggregate_hosts.select( - aggregate_hosts.c.aggregate_id == 2 - ).execute().first() - self.assertEqual(host['host'], data[0]['host']) - - # NOTE(sdague): id 3 is just non-existent - host = aggregate_hosts.select( - aggregate_hosts.c.aggregate_id == 3 - ).execute().first() - self.assertEqual(host, None) - - self._check_147_no_duplicate_aggregate_hosts(engine, data) - - def _check_147_no_duplicate_aggregate_hosts(self, engine, data): - aggregate_hosts = db_utils.get_table(engine, 'aggregate_hosts') - agg1_hosts = [h['host'] for h in aggregate_hosts.select( - aggregate_hosts.c.aggregate_id == 1 - ).execute().fetchall()] - self.assertEqual(['compute-host3'], agg1_hosts) - - # migration 149, changes IPAddr storage format - def _pre_upgrade_149(self, engine): - provider_fw_rules = db_utils.get_table(engine, 'provider_fw_rules') - console_pools = db_utils.get_table(engine, 'console_pools') - data = { - 'provider_fw_rules': - [ - {'protocol': 'tcp', 'from_port': 1234, - 'to_port': 1234, 'cidr': "127.0.0.1/30"}, - {'protocol': 'tcp', 'from_port': 1234, - 'to_port': 1234, 'cidr': "128.128.128.128/16"}, - {'protocol': 'tcp', 'from_port': 1234, - 'to_port': 1234, 'cidr': "128.128.128.128/32"}, - {'protocol': 'tcp', 'from_port': 1234, - 'to_port': 1234, 'cidr': "2001:db8::1:2/48"}, - {'protocol': 'tcp', 'from_port': 1234, - 'to_port': 1234, 'cidr': "::1/64"}, - {'protocol': 'tcp', 'from_port': 1234, 'to_port': 1234, - 'cidr': "0000:0000:0000:2013:0000:6535:abcd:ef11/64"}, - {'protocol': 'tcp', 'from_port': 1234, 'to_port': 1234, - 'cidr': "0000:1020:0000:2013:0000:6535:abcd:ef11/128"}, - ], - 'console_pools': - [ - {'address': '10.10.10.10'}, - {'address': '128.100.100.100'}, - {'address': '2002:2002:2002:2002:2002:2002:2002:2002'}, - {'address': '::1'}, - {'address': '0000:0000:0000:2013:0000:6535:abcd:ef11'} - ] - } - - engine.execute(provider_fw_rules.insert(), data['provider_fw_rules']) - - for pool in data['console_pools']: - engine.execute(console_pools.insert(), pool) - - return data - - def _check_149(self, engine, data): - provider_fw_rules = db_utils.get_table(engine, 'provider_fw_rules') - result = provider_fw_rules.select().execute() - - iplist = map(lambda x: str(netaddr.IPNetwork(x['cidr'])), - data['provider_fw_rules']) - - for row in result: - self.assertIn(str(netaddr.IPNetwork(row['cidr'])), iplist) - - console_pools = db_utils.get_table(engine, 'console_pools') - result = console_pools.select().execute() - - iplist = map(lambda x: str(netaddr.IPAddress(x['address'])), - data['console_pools']) - - for row in result: - self.assertIn(str(netaddr.IPAddress(row['address'])), iplist) - - # migration 151 - changes period_beginning and period_ending to DateTime - def _pre_upgrade_151(self, engine): - task_log = db_utils.get_table(engine, 'task_log') - data = { - 'task_name': 'The name of the task', - 'state': 'The state of the task', - 'host': 'compute-host1', - 'period_beginning': str(datetime.datetime(2013, 02, 11)), - 'period_ending': str(datetime.datetime(2013, 02, 12)), - 'message': 'The task_log message', - } - result = task_log.insert().values(data).execute() - data['id'] = result.inserted_primary_key[0] - return data - - def _check_151(self, engine, data): - task_log = db_utils.get_table(engine, 'task_log') - row = task_log.select(task_log.c.id == data['id']).execute().first() - self.assertTrue(isinstance(row['period_beginning'], - datetime.datetime)) - self.assertTrue(isinstance(row['period_ending'], - datetime.datetime)) - self.assertEqual( - data['period_beginning'], str(row['period_beginning'])) - self.assertEqual(data['period_ending'], str(row['period_ending'])) - - # migration 152 - convert deleted from boolean to int - def _pre_upgrade_152(self, engine): - host1 = 'compute-host1' - host2 = 'compute-host2' - # NOTE(sdague): start at #4 because services data already in table - # from 147 - services_data = [ - {'id': 4, 'host': host1, 'binary': 'nova-compute', - 'report_count': 0, 'topic': 'compute', 'deleted': False}, - {'id': 5, 'host': host1, 'binary': 'nova-compute', - 'report_count': 0, 'topic': 'compute', 'deleted': True} - ] - volumes_data = [ - {'id': 'first', 'host': host1, 'deleted': False}, - {'id': 'second', 'host': host2, 'deleted': True} - ] - - services = db_utils.get_table(engine, 'services') - engine.execute(services.insert(), services_data) - - volumes = db_utils.get_table(engine, 'volumes') - engine.execute(volumes.insert(), volumes_data) - return dict(services=services_data, volumes=volumes_data) - - def _check_152(self, engine, data): - services = db_utils.get_table(engine, 'services') - service = services.select(services.c.id == 4).execute().first() - self.assertEqual(0, service.deleted) - service = services.select(services.c.id == 5).execute().first() - self.assertEqual(service.id, service.deleted) - - volumes = db_utils.get_table(engine, 'volumes') - volume = volumes.select(volumes.c.id == "first").execute().first() - self.assertEqual("", volume.deleted) - volume = volumes.select(volumes.c.id == "second").execute().first() - self.assertEqual(volume.id, volume.deleted) - - # migration 153, copy flavor information into system_metadata - def _pre_upgrade_153(self, engine): - fake_types = [ - dict(id=10, name='type1', memory_mb=128, vcpus=1, - root_gb=10, ephemeral_gb=0, flavorid="1", swap=0, - rxtx_factor=1.0, vcpu_weight=1, disabled=False, - is_public=True), - dict(id=11, name='type2', memory_mb=512, vcpus=1, - root_gb=10, ephemeral_gb=5, flavorid="2", swap=0, - rxtx_factor=1.5, vcpu_weight=2, disabled=False, - is_public=True), - dict(id=12, name='type3', memory_mb=128, vcpus=1, - root_gb=10, ephemeral_gb=0, flavorid="3", swap=0, - rxtx_factor=1.0, vcpu_weight=1, disabled=False, - is_public=False), - dict(id=13, name='type4', memory_mb=128, vcpus=1, - root_gb=10, ephemeral_gb=0, flavorid="4", swap=0, - rxtx_factor=1.0, vcpu_weight=None, disabled=True, - is_public=True), - dict(id=14, name='type5', memory_mb=128, vcpus=1, - root_gb=10, ephemeral_gb=0, flavorid="5", swap=0, - rxtx_factor=1.0, vcpu_weight=1, disabled=True, - is_public=False), - ] - - fake_instances = [ - dict(uuid='m153-uuid1', instance_type_id=10, deleted=0), - dict(uuid='m153-uuid2', instance_type_id=11, deleted=0), - dict(uuid='m153-uuid3', instance_type_id=12, deleted=0), - dict(uuid='m153-uuid4', instance_type_id=13, deleted=0), - # NOTE(danms): no use of type5 - ] - - instances = db_utils.get_table(engine, 'instances') - instance_types = db_utils.get_table(engine, 'instance_types') - engine.execute(instance_types.insert(), fake_types) - engine.execute(instances.insert(), fake_instances) - - return fake_types, fake_instances - - def _check_153(self, engine, data): - fake_types, fake_instances = data - # NOTE(danms): Fetch all the tables and data from scratch after change - instances = db_utils.get_table(engine, 'instances') - instance_types = db_utils.get_table(engine, 'instance_types') - sys_meta = db_utils.get_table(engine, 'instance_system_metadata') - - # Collect all system metadata, indexed by instance_uuid - metadata = collections.defaultdict(dict) - for values in sys_meta.select().execute(): - metadata[values['instance_uuid']][values['key']] = values['value'] - - # Taken from nova/compute/api.py - instance_type_props = ['id', 'name', 'memory_mb', 'vcpus', - 'root_gb', 'ephemeral_gb', 'flavorid', - 'swap', 'rxtx_factor', 'vcpu_weight'] - - for instance in fake_instances: - inst_sys_meta = metadata[instance['uuid']] - inst_type = fake_types[instance['instance_type_id'] - 10] - for prop in instance_type_props: - prop_name = 'instance_type_%s' % prop - self.assertIn(prop_name, inst_sys_meta) - if prop == "vcpu_weight": - # NOTE(danms) vcpu_weight can be NULL - self.assertEqual(inst_sys_meta[prop_name], - inst_type[prop] and str(inst_type[prop]) - or None) - else: - self.assertEqual(str(inst_sys_meta[prop_name]), - str(inst_type[prop])) - - # migration 154, add shadow tables for deleted data - # There are 53 shadow tables but we only test one - # There are additional tests in test_db_api.py - def _pre_upgrade_154(self, engine): - meta = sqlalchemy.schema.MetaData() - meta.reflect(engine) - table_names = meta.tables.keys() - for table_name in table_names: - self.assertFalse(table_name.startswith("_shadow")) - - def _check_154(self, engine, data): - meta = sqlalchemy.schema.MetaData() - meta.reflect(engine) - table_names = set(meta.tables.keys()) - for table_name in table_names: - if table_name.startswith(db._SHADOW_TABLE_PREFIX): - shadow_name = table_name - base_name = table_name.replace(db._SHADOW_TABLE_PREFIX, "") - self.assertIn(base_name, table_names) - else: - base_name = table_name - shadow_name = db._SHADOW_TABLE_PREFIX + table_name - self.assertIn(shadow_name, table_names) - shadow_table = db_utils.get_table(engine, shadow_name) - base_table = db_utils.get_table(engine, base_name) - base_columns = [] - shadow_columns = [] - for column in base_table.columns: - base_columns.append(column) - for column in shadow_table.columns: - shadow_columns.append(column) - for ii, base_column in enumerate(base_columns): - shadow_column = shadow_columns[ii] - self.assertEqual(base_column.name, shadow_column.name) - # NullType needs a special case. We end up with NullType on sqlite - # where bigint is not defined. - if isinstance(base_column.type, sqlalchemy.types.NullType): - self.assertTrue(isinstance(shadow_column.type, - sqlalchemy.types.NullType)) - else: - # Identical types do not test equal because sqlalchemy does not - # override __eq__, but if we stringify them then they do. - self.assertEqual(str(base_column.type), - str(shadow_column.type)) - - # migration 156 - introduce CIDR type - def _pre_upgrade_156(self, engine): - # assume the same data as from 149 - data = { - 'provider_fw_rules': - [ - {'protocol': 'tcp', 'from_port': 1234, - 'to_port': 1234, 'cidr': "127.0.0.1/30"}, - {'protocol': 'tcp', 'from_port': 1234, - 'to_port': 1234, 'cidr': "128.128.128.128/16"}, - {'protocol': 'tcp', 'from_port': 1234, - 'to_port': 1234, 'cidr': "128.128.128.128/32"}, - {'protocol': 'tcp', 'from_port': 1234, - 'to_port': 1234, 'cidr': "2001:db8::1:2/48"}, - {'protocol': 'tcp', 'from_port': 1234, - 'to_port': 1234, 'cidr': "::1/64"}, - {'protocol': 'tcp', 'from_port': 1234, 'to_port': 1234, - 'cidr': "0000:0000:0000:2013:0000:6535:abcd:ef11/64"}, - {'protocol': 'tcp', 'from_port': 1234, 'to_port': 1234, - 'cidr': "0000:1020:0000:2013:0000:6535:abcd:ef11/128"}, - ], - 'console_pools': - [ - {'address': '10.10.10.10'}, - {'address': '128.100.100.100'}, - {'address': '2002:2002:2002:2002:2002:2002:2002:2002'}, - {'address': '::1'}, - {'address': '0000:0000:0000:2013:0000:6535:abcd:ef11'} - ] - } - return data - - def _check_156(self, engine, data): - # recheck the 149 data - self._check_149(engine, data) - - def _pre_upgrade_158(self, engine): - networks = db_utils.get_table(engine, 'networks') - data = [ - {'vlan': 1, 'deleted': 0}, - {'vlan': 1, 'deleted': 0}, - {'vlan': 1, 'deleted': 0}, - ] - - for item in data: - networks.insert().values(item).execute() - return data - - def _check_158(self, engine, data): - networks = db_utils.get_table(engine, 'networks') - rows = networks.select().\ - where(networks.c.deleted != networks.c.id).\ - execute().\ - fetchall() - self.assertEqual(len(rows), 1) - - def _pre_upgrade_159(self, engine): - data = { - 'provider_fw_rules': - [ - {'protocol': 'tcp', 'from_port': 1234, - 'to_port': 1234, 'cidr': "127.0.0.1/30"}, - {'protocol': 'tcp', 'from_port': 1234, - 'to_port': 1234, 'cidr': "128.128.128.128/16"}, - {'protocol': 'tcp', 'from_port': 1234, - 'to_port': 1234, 'cidr': "128.128.128.128/32"}, - {'protocol': 'tcp', 'from_port': 1234, - 'to_port': 1234, 'cidr': "2001:db8::1:2/48"}, - {'protocol': 'tcp', 'from_port': 1234, - 'to_port': 1234, 'cidr': "::1/64"}, - {'protocol': 'tcp', 'from_port': 1234, 'to_port': 1234, - 'cidr': "0000:0000:0000:2013:0000:6535:abcd:ef11/64"}, - {'protocol': 'tcp', 'from_port': 1234, 'to_port': 1234, - 'cidr': "0000:1020:0000:2013:0000:6535:abcd:ef11/128"}, - ], - 'console_pools': - [ - {'address': '10.10.10.10'}, - {'address': '128.100.100.100'}, - {'address': '2002:2002:2002:2002:2002:2002:2002:2002'}, - {'address': '::1'}, - {'address': '0000:0000:0000:2013:0000:6535:abcd:ef11'} - ] - } - return data - - # migration 159 - revert ip column size - def _check_159(self, engine, data): - dialect = engine.url.get_dialect() - # NOTE(maurosr): check if column length is 39 again (it currently makes - # sense only for mysql) - if dialect not in [postgresql.dialect, sqlite.dialect]: - console_pools = db_utils.get_table(engine, 'console_pools') - self.assertEqual(console_pools.columns['address'].type.length, 39) - # recheck the 149 data - self._check_149(engine, data) - - def _post_downgrade_159(self, engine): - dialect = engine.url.get_dialect() - # NOTE(maurosr): check if column length is 43 again (it currently makes - # sense only for mysql) - if dialect not in [postgresql.dialect, sqlite.dialect]: - console_pools = db_utils.get_table(engine, 'console_pools') - self.assertEqual(console_pools.columns['address'].type.length, 43) - - # migration 160, fix system_metadata NULL deleted entries to be 0 - def _pre_upgrade_160(self, engine): - fake_instances = [ - dict(uuid='m160-uuid1'), - dict(uuid='m160-uuid2'), - dict(uuid='m160-uuid3'), - ] - fake_sys_meta = [ - dict(instance_uuid='m160-uuid1', key='foo', value='bar'), - dict(instance_uuid='m160-uuid2', key='foo2', value='bar2'), - dict(instance_uuid='m160-uuid3', key='foo3', value='bar3')] - - instances = db_utils.get_table(engine, 'instances') - sys_meta = db_utils.get_table(engine, 'instance_system_metadata') - engine.execute(instances.insert(), fake_instances) - - # Create the metadata entries - data = {} - for sm in fake_sys_meta: - result = sys_meta.insert().values(sm).execute() - sm['id'] = result.inserted_primary_key[0] - data[sm['id']] = sm - - # Make sure the entries in the DB for 'deleted' are None. - our_ids = data.keys() - results = sys_meta.select().where(sys_meta.c.id.in_(our_ids)).\ - execute() - results = list(results) - self.assertEqual(len(our_ids), len(results)) - for result in results: - self.assertEqual(result['deleted'], None) - return data - - def _check_160(self, engine, data): - our_ids = data.keys() - sys_meta = db_utils.get_table(engine, 'instance_system_metadata') - results = sys_meta.select().where(sys_meta.c.id.in_(our_ids)).\ - execute() - results = list(results) - self.assertEqual(len(our_ids), len(results)) - for result in results: - the_id = result['id'] - # Make sure this is now 0. - self.assertEqual(result['deleted'], 0) - # Make sure nothing else changed. - for key, value in data[the_id].items(): - self.assertEqual(value, result[key]) - - # migration 161, fix system_metadata "None" values should be NULL - def _pre_upgrade_161(self, engine): - fake_instances = [dict(uuid='m161-uuid1')] - sm_base = dict(instance_uuid='m161-uuid1', value=None) - now = timeutils.utcnow().replace(microsecond=0) - fake_sys_meta = [ - # Should be fixed - dict(sm_base, key='instance_type_foo', value='None'), - dict(sm_base, key='instance_type_bar', value='88 mph'), - - # Should be unaffected - dict(sm_base, key='instance_type_name', value='None'), - dict(sm_base, key='instance_type_flavorid', value='None'), - dict(sm_base, key='foo', value='None'), - dict(sm_base, key='instance_type_bat'), - dict(sm_base, key='instance_type_baz', created_at=now), - ] - - instances = db_utils.get_table(engine, 'instances') - sys_meta = db_utils.get_table(engine, 'instance_system_metadata') - engine.execute(instances.insert(), fake_instances) - - data = {} - for sm in fake_sys_meta: - result = sys_meta.insert().values(sm).execute() - sm['id'] = result.inserted_primary_key[0] - data[sm['id']] = sm - - return data - - def _check_161(self, engine, data): - our_ids = data.keys() - sys_meta = db_utils.get_table(engine, 'instance_system_metadata') - results = sys_meta.select().where(sys_meta.c.id.in_(our_ids)).\ - execute() - results = list(results) - self.assertEqual(len(our_ids), len(results)) - for result in results: - the_id = result['id'] - key = result['key'] - value = result['value'] - original = data[the_id] - - if key == 'instance_type_baz': - # Neither value nor created_at should have been altered - self.assertEqual(result['value'], original['value']) - self.assertEqual(result['created_at'], original['created_at']) - elif key in ['instance_type_name', 'instance_type_flavorid']: - # These should not have their values changed, but should - # have corrected created_at stamps - self.assertEqual(result['value'], original['value']) - self.assertTrue(isinstance(result['created_at'], - datetime.datetime)) - elif key.startswith('instance_type'): - # Values like instance_type_% should be stamped and values - # converted from 'None' to None where appropriate - self.assertEqual(result['value'], - None if original['value'] == 'None' - else original['value']) - self.assertTrue(isinstance(result['created_at'], - datetime.datetime)) - else: - # None of the non-instance_type values should have - # been touched. Since we didn't set created_at on any - # of them, they should all still be None. - self.assertEqual(result['value'], original['value']) - self.assertEqual(result['created_at'], None) - - def _pre_upgrade_172(self, engine): - instance_types = db_utils.get_table(engine, 'instance_types') - data = [ - dict(id=21, name='uc_name0', memory_mb=128, vcpus=1, - root_gb=10, ephemeral_gb=0, flavorid="uc_flavor1", swap=0, - rxtx_factor=1.0, vcpu_weight=1, disabled=False, - is_public=True, deleted=0), - dict(id=22, name='uc_name1', memory_mb=128, vcpus=1, - root_gb=10, ephemeral_gb=0, flavorid="uc_flavor1", swap=0, - rxtx_factor=1.0, vcpu_weight=1, disabled=False, - is_public=True, deleted=0), - dict(id=23, name='uc_name2', memory_mb=128, vcpus=1, - root_gb=10, ephemeral_gb=0, flavorid="uc_flavor2", swap=0, - rxtx_factor=1.0, vcpu_weight=1, disabled=False, - is_public=True, deleted=0), - dict(id=24, name='uc_name2', memory_mb=128, vcpus=1, - root_gb=10, ephemeral_gb=0, flavorid="uc_flavor3", swap=0, - rxtx_factor=1.0, vcpu_weight=1, disabled=False, - is_public=True, deleted=0), - ] - engine.execute(instance_types.insert(), data) - return data - - def _check_172(self, engine, data): - instance_types = db_utils.get_table(engine, 'instance_types') - - not_deleted = instance_types.c.deleted != instance_types.c.id - - # There is only one instance_type with flavor `uc_flavor1` - uc_flavor1_rows = instance_types.select().\ - where(instance_types.c.flavorid == 'uc_flavor1').\ - where(not_deleted).\ - execute().\ - fetchall() - - self.assertEqual(1, len(uc_flavor1_rows)) - - # There is only one instance_type with name `uc_name2` - uc_name2_rows = instance_types.select().\ - where(instance_types.c.name == 'uc_name2').\ - where(not_deleted).\ - execute().\ - fetchall() - self.assertEqual(1, len(uc_name2_rows)) - - # migration 173, add unique constraint to keypairs - def _pre_upgrade_173(self, engine): - created_at = [datetime.datetime.now() for x in range(0, 7)] - fake_keypairs = [dict(name='key1', user_id='1a', - created_at=created_at[0], - deleted=0), - dict(name='key1', user_id='1a', - created_at=created_at[1], - deleted=0), - dict(name='key1', user_id='1a', - created_at=created_at[2], - deleted=0) - ] - keypairs = db_utils.get_table(engine, 'key_pairs') - engine.execute(keypairs.insert(), fake_keypairs) - return fake_keypairs - - def _check_173(self, engine, data): - keypairs = db_utils.get_table(engine, 'key_pairs') - # Unique constraints are not listed in table.constraints for any db. - # So, simply add a duplicate keypair to check if unique constraint - # is applied to the key_pairs table or not. - insert = keypairs.insert() - duplicate_keypair = dict(name='key4', user_id='4a', - created_at=datetime.datetime.now(), - deleted=0) - insert.execute(duplicate_keypair) - # Insert again - self.assertRaises(sqlalchemy.exc.IntegrityError, insert.execute, - duplicate_keypair) - - # Get all unique records - rows = keypairs.select().\ - where(keypairs.c.deleted != keypairs.c.id).\ - execute().\ - fetchall() - # Ensure the number of unique keypairs is correct - self.assertEqual(len(rows), 2) - - def _pre_upgrade_174(self, engine): - instance_types = db_utils.get_table(engine, 'instance_types') - instance_type_projects = db_utils.get_table(engine, - 'instance_type_projects') - - instance_type_data = [ - dict(id=31, name='itp_name0', memory_mb=128, vcpus=1, - root_gb=10, ephemeral_gb=0, flavorid="itp_flavor1", swap=0, - rxtx_factor=1.0, vcpu_weight=1, disabled=False, - is_public=True, deleted=0) - ] - instance_type_projects_data = [ - dict(project_id='pr1', instance_type_id=31, deleted=0), - dict(project_id='pr1', instance_type_id=31, deleted=0), - dict(project_id='pr2', instance_type_id=31, deleted=0) - ] - - engine.execute(instance_types.insert(), instance_type_data) - engine.execute(instance_type_projects.insert(), - instance_type_projects_data) - - def _check_174(self, engine, data): - it_projects = db_utils.get_table(engine, 'instance_type_projects') - - def get_(project_id, it_id, deleted): - deleted_value = 0 if not deleted else it_projects.c.id - return it_projects.select().\ - where(it_projects.c.project_id == project_id).\ - where(it_projects.c.instance_type_id == it_id).\ - where(it_projects.c.deleted == deleted_value).\ - execute().\ - fetchall() - - self.assertEqual(1, len(get_('pr1', '31', False))) - self.assertEqual(1, len(get_('pr1', '31', True))) - self.assertEqual(1, len(get_('pr2', '31', False))) - self.assertRaises(sqlalchemy.exc.IntegrityError, - it_projects.insert().execute, - dict(instance_type=31, project_id='pr1', deleted=0)) - - # migration 175, Modify volume_usage-cache, Drop column instance_id, add - # columns instance_uuid, project_id and user_id - def _pre_upgrade_175(self, engine): - volume_usage_cache = db_utils.get_table(engine, 'volume_usage_cache') - fake_usage = {'volume_id': 'fake_volume_id', - 'instance_id': 10, - 'tot_last_refreshed': datetime.datetime.now(), - 'tot_reads': 2, - 'tot_read_bytes': 3, - 'tot_writes': 4, - 'tot_write_bytes': 5, - 'curr_last_refreshed': datetime.datetime.now(), - 'curr_reads': 6, - 'curr_read_bytes': 7, - 'curr_writes': 8, - 'curr_write_bytes': 9} - volume_usage_cache.insert().execute(fake_usage) - - def _check_175(self, engine, data): - volume_usage_cache = db_utils.get_table(engine, 'volume_usage_cache') - # Get the record - rows = volume_usage_cache.select().execute().fetchall() - self.assertEqual(len(rows), 1) - - self.assertEqual(rows[0]['instance_uuid'], None) - self.assertEqual(rows[0]['project_id'], None) - self.assertEqual(rows[0]['user_id'], None) - self.assertFalse('instance_id' in rows[0]) - - def _post_downgrade_175(self, engine): - volume_usage_cache = db_utils.get_table(engine, 'volume_usage_cache') - # Get the record - rows = volume_usage_cache.select().execute().fetchall() - self.assertEqual(len(rows), 1) - - self.assertFalse('instance_uuid' in rows[0]) - self.assertFalse('project_id' in rows[0]) - self.assertFalse('user_id' in rows[0]) - self.assertEqual(rows[0]['instance_id'], None) - - def _check_176(self, engine, data): - volume_usage_cache = db_utils.get_table(engine, 'volume_usage_cache') - # Get the record - rows = volume_usage_cache.select().execute().fetchall() - self.assertEqual(len(rows), 1) - - self.assertEqual(rows[0]['availability_zone'], None) - - def _post_downgrade_176(self, engine): - volume_usage_cache = db_utils.get_table(engine, 'volume_usage_cache') - # Get the record - rows = volume_usage_cache.select().execute().fetchall() - self.assertEqual(len(rows), 1) - - self.assertFalse('availability_zone' in rows[0]) - - def _pre_upgrade_177(self, engine): - floating_ips = db_utils.get_table(engine, 'floating_ips') - data = [ - {'address': '128.128.128.128', 'deleted': 0}, - {'address': '128.128.128.128', 'deleted': 0}, - {'address': '128.128.128.129', 'deleted': 0}, - ] - - for item in data: - floating_ips.insert().values(item).execute() - return data - - def _check_177(self, engine, data): - floating_ips = db_utils.get_table(engine, 'floating_ips') - - def get_(address, deleted): - deleted_value = 0 if not deleted else floating_ips.c.id - return floating_ips.select().\ - where(floating_ips.c.address == address).\ - where(floating_ips.c.deleted == deleted_value).\ - execute().\ - fetchall() - - self.assertEqual(1, len(get_('128.128.128.128', False))) - self.assertEqual(1, len(get_('128.128.128.128', True))) - self.assertEqual(1, len(get_('128.128.128.129', False))) - self.assertRaises(sqlalchemy.exc.IntegrityError, - floating_ips.insert().execute, - dict(address='128.128.128.129', deleted=0)) - - # migration 179 - convert cells.deleted from boolean to int - def _pre_upgrade_179(self, engine): - cells_data = [ - {'id': 4, 'deleted': True}, - {'id': 5, 'deleted': False}, - ] - - cells = db_utils.get_table(engine, 'cells') - engine.execute(cells.insert(), cells_data) - - return dict(cells=cells_data) - - def _check_179(self, engine, data): - cells = db_utils.get_table(engine, 'cells') - cell = cells.select(cells.c.id == 4).execute().first() - self.assertEqual(4, cell.deleted) - cell = cells.select(cells.c.id == 5).execute().first() - self.assertEqual(0, cell.deleted) - - def _check_180(self, engine, data): - self.assertTrue(db_utils.check_shadow_table(engine, - "volume_usage_cache")) - - def _check_181(self, engine, data): - self.assertTrue(db_utils.check_shadow_table(engine, 'cells')) - - def _pre_upgrade_182(self, engine): - CIDR = '6666:1020:1000:2013:1000:6535:abcd:abcd' - - security_group_rules = \ - db_utils.get_table(engine, 'shadow_security_group_rules') - values = { - 'id': 182, - 'protocol': 'tcp', - 'from_port': 6666, - 'to_port': 9999, - 'cidr': CIDR, - 'deleted': 0 - } - security_group_rules.insert().values(values).execute() - - networks = db_utils.get_table(engine, 'shadow_networks') - values = { - 'id': 182, - 'vlan': 100500, - 'cidr': CIDR, - 'cidr_v6': CIDR, - 'deleted': 0 - } - networks.insert().values(values).execute() - - provider_fw_rules = db_utils.get_table(engine, - 'shadow_provider_fw_rules') - values = { - 'id': 182, - 'protocol': 'tcp', - 'from_port': 6666, - 'to_port': 9999, - 'cidr': CIDR, - 'deleted': 0 - } - provider_fw_rules.insert().values(values).execute() - return {'cidr': CIDR} - - def _check_182(self, engine, data): - self.assertTrue(db_utils.check_shadow_table(engine, - 'security_group_rules')) - self.assertTrue(db_utils.check_shadow_table(engine, - 'provider_fw_rules')) - self.assertTrue(db_utils.check_shadow_table(engine, 'networks')) - - table_fields = { - 'shadow_security_group_rules': ['cidr'], - 'shadow_networks': ['cidr', 'cidr_v6'], - 'shadow_provider_fw_rules': ['cidr'] - } - - for table_name, fields in table_fields.iteritems(): - table = db_utils.get_table(engine, table_name) - rows = table.\ - select().\ - where(table.c.id == 182).\ - execute().\ - fetchall() - self.assertEqual(len(rows), 1) - for field in fields: - self.assertEqual(rows[0][field], data['cidr']) - - for field in fields: - # we should be able to store mask in cidr fields also - table.\ - update().\ - values({field: data['cidr'] + '/128'}).\ - execute() - - def _check_183(self, engine, data): - table_name = 'security_group_default_rules' - self.assertTrue(db_utils.check_shadow_table(engine, table_name)) - - def _check_184(self, engine, data): - self.assertTrue(db_utils.check_shadow_table(engine, 'instances')) - self.assertTrue(db_utils.check_shadow_table(engine, 'networks')) - self.assertTrue(db_utils.check_shadow_table(engine, 'fixed_ips')) - self.assertTrue(db_utils.check_shadow_table(engine, 'floating_ips')) - self.assertTrue(db_utils.check_shadow_table(engine, 'console_pools')) - - -class TestBaremetalMigrations(BaseMigrationTestCase, CommonTestsMixIn): - """Test sqlalchemy-migrate migrations.""" - USER = "openstack_citest" - PASSWD = "openstack_citest" - DATABASE = "openstack_baremetal_citest" - - def __init__(self, *args, **kwargs): - super(TestBaremetalMigrations, self).__init__(*args, **kwargs) - - self.DEFAULT_CONFIG_FILE = os.path.join(os.path.dirname(__file__), - 'virt/baremetal/test_baremetal_migrations.conf') - # Test machines can set the NOVA_TEST_MIGRATIONS_CONF variable - # to override the location of the config file for migration testing - self.CONFIG_FILE_PATH = os.environ.get( - 'BAREMETAL_TEST_MIGRATIONS_CONF', - self.DEFAULT_CONFIG_FILE) - self.MIGRATE_FILE = \ - nova.virt.baremetal.db.sqlalchemy.migrate_repo.__file__ - self.REPOSITORY = repository.Repository( - os.path.abspath(os.path.dirname(self.MIGRATE_FILE))) - - def setUp(self): - super(TestBaremetalMigrations, self).setUp() - - if self.migration is None: - self.migration = __import__('nova.virt.baremetal.db.migration', - globals(), locals(), ['INIT_VERSION'], -1) - self.INIT_VERSION = self.migration.INIT_VERSION - if self.migration_api is None: - temp = __import__('nova.virt.baremetal.db.sqlalchemy.migration', - globals(), locals(), ['versioning_api'], -1) - self.migration_api = temp.versioning_api - - def _pre_upgrade_002(self, engine): - data = [{'id': 1, 'key': 'fake-key', 'image_path': '/dev/null', - 'pxe_config_path': '/dev/null/', 'root_mb': 0, 'swap_mb': 0}] - table = db_utils.get_table(engine, 'bm_deployments') - engine.execute(table.insert(), data) - return data - - def _check_002(self, engine, data): - self.assertRaises(sqlalchemy.exc.NoSuchTableError, - db_utils.get_table, engine, 'bm_deployments') - - def _post_downgrade_004(self, engine): - bm_nodes = db_utils.get_table(engine, 'bm_nodes') - self.assertNotIn(u'instance_name', [c.name for c in bm_nodes.columns]) - - def _check_005(self, engine, data): - bm_nodes = db_utils.get_table(engine, 'bm_nodes') - columns = [c.name for c in bm_nodes.columns] - self.assertNotIn(u'prov_vlan_id', columns) - self.assertNotIn(u'registration_status', columns) - - def _pre_upgrade_006(self, engine): - nodes = db_utils.get_table(engine, 'bm_nodes') - ifs = db_utils.get_table(engine, 'bm_interfaces') - # node 1 has two diffrent addresses in bm_nodes and bm_interfaces - engine.execute(nodes.insert(), - [{'id': 1, - 'prov_mac_address': 'aa:aa:aa:aa:aa:aa'}]) - engine.execute(ifs.insert(), - [{'id': 101, - 'bm_node_id': 1, - 'address': 'bb:bb:bb:bb:bb:bb'}]) - # node 2 has one same address both in bm_nodes and bm_interfaces - engine.execute(nodes.insert(), - [{'id': 2, - 'prov_mac_address': 'cc:cc:cc:cc:cc:cc'}]) - engine.execute(ifs.insert(), - [{'id': 201, - 'bm_node_id': 2, - 'address': 'cc:cc:cc:cc:cc:cc'}]) - - def _check_006(self, engine, data): - ifs = db_utils.get_table(engine, 'bm_interfaces') - rows = ifs.select().\ - where(ifs.c.bm_node_id == 1).\ - execute().\ - fetchall() - self.assertEqual(len(rows), 2) - rows = ifs.select().\ - where(ifs.c.bm_node_id == 2).\ - execute().\ - fetchall() - self.assertEqual(len(rows), 1) - self.assertEqual(rows[0]['address'], 'cc:cc:cc:cc:cc:cc') - - def _post_downgrade_006(self, engine): - ifs = db_utils.get_table(engine, 'bm_interfaces') - rows = ifs.select().where(ifs.c.bm_node_id == 1).execute().fetchall() - self.assertEqual(len(rows), 1) - self.assertEqual(rows[0]['address'], 'bb:bb:bb:bb:bb:bb') - - rows = ifs.select().where(ifs.c.bm_node_id == 2).execute().fetchall() - self.assertEqual(len(rows), 0) - - def _check_007(self, engine, data): - bm_nodes = db_utils.get_table(engine, 'bm_nodes') - columns = [c.name for c in bm_nodes.columns] - self.assertNotIn(u'prov_mac_address', columns) - - -class ProjectTestCase(test.TestCase): - - def test_all_migrations_have_downgrade(self): - topdir = os.path.normpath(os.path.dirname(__file__) + '/../../../') - py_glob = os.path.join(topdir, "nova", "db", "sqlalchemy", - "migrate_repo", "versions", "*.py") - - missing_downgrade = [] - for path in glob.iglob(py_glob): - has_upgrade = False - has_downgrade = False - with open(path, "r") as f: - for line in f: - if 'def upgrade(' in line: - has_upgrade = True - if 'def downgrade(' in line: - has_downgrade = True - - if has_upgrade and not has_downgrade: - fname = os.path.basename(path) - missing_downgrade.append(fname) - - helpful_msg = (_("The following migrations are missing a downgrade:" - "\n\t%s") % '\n\t'.join(sorted(missing_downgrade))) - self.assert_(not missing_downgrade, helpful_msg) diff --git a/nova/tests/test_plugin_api_extensions.py b/nova/tests/test_plugin_api_extensions.py deleted file mode 100644 index 3aac638c..00000000 --- a/nova/tests/test_plugin_api_extensions.py +++ /dev/null @@ -1,87 +0,0 @@ -# Copyright 2011 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import pkg_resources - -from nova.api.openstack.compute import extensions as computeextensions -from nova.api.openstack import extensions -from nova.openstack.common.plugin import plugin -from nova import test - - -class StubController(object): - - def i_am_the_stub(self): - pass - - -class StubControllerExtension(extensions.ExtensionDescriptor): - """This is a docstring. We need it.""" - name = 'stubextension' - alias = 'stubby' - - def get_resources(self): - resources = [] - res = extensions.ResourceExtension('testme', - StubController()) - resources.append(res) - return resources - - -service_list = [] - - -class TestPluginClass(plugin.Plugin): - - def __init__(self, service_name): - super(TestPluginClass, self).__init__(service_name) - self._add_api_extension_descriptor(StubControllerExtension) - service_list.append(service_name) - - -class MockEntrypoint(pkg_resources.EntryPoint): - def load(self): - return TestPluginClass - - -class APITestCase(test.TestCase): - """Test case for the plugin api extension interface.""" - def test_add_extension(self): - def mock_load(_s): - return TestPluginClass() - - def mock_iter_entry_points(_t): - return [MockEntrypoint("fake", "fake", ["fake"])] - - self.stubs.Set(pkg_resources, 'iter_entry_points', - mock_iter_entry_points) - global service_list - service_list = [] - - # Marking out the default extension paths makes this test MUCH faster. - self.flags(osapi_compute_extension=[]) - - found = False - mgr = computeextensions.ExtensionManager() - for res in mgr.get_resources(): - # We have to use this weird 'dir' check because - # the plugin framework muddies up the classname - # such that 'isinstance' doesn't work right. - if 'i_am_the_stub' in dir(res.controller): - found = True - - self.assertTrue(found) - self.assertEqual(len(service_list), 1) - self.assertEqual(service_list[0], 'compute-extensions') diff --git a/nova/tests/test_vmmode.py b/nova/tests/test_vmmode.py deleted file mode 100644 index 374f040e..00000000 --- a/nova/tests/test_vmmode.py +++ /dev/null @@ -1,49 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# -# Copyright (C) 2012 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from nova import exception -from nova import test - -from nova.compute import vm_mode - - -class ComputeVMModeTest(test.TestCase): - - def test_case(self): - inst = dict(vm_mode="HVM") - mode = vm_mode.get_from_instance(inst) - self.assertEqual(mode, "hvm") - - def test_legacy_pv(self): - inst = dict(vm_mode="pv") - mode = vm_mode.get_from_instance(inst) - self.assertEqual(mode, "xen") - - def test_legacy_hv(self): - inst = dict(vm_mode="hv") - mode = vm_mode.get_from_instance(inst) - self.assertEqual(mode, "hvm") - - def test_bogus(self): - inst = dict(vm_mode="wibble") - self.assertRaises(exception.Invalid, - vm_mode.get_from_instance, - inst) - - def test_good(self): - inst = dict(vm_mode="hvm") - mode = vm_mode.get_from_instance(inst) - self.assertEqual(mode, "hvm")