You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
nova/nova/tests/unit/db/test_db_api.py

10078 lines
444 KiB

# encoding=UTF8
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Unit tests for the DB API."""
import copy
import datetime
from dateutil import parser as dateutil_parser
import iso8601
import mock
import netaddr
from oslo_db import api as oslo_db_api
from oslo_db import exception as db_exc
from oslo_db.sqlalchemy import enginefacade
from oslo_db.sqlalchemy import test_fixtures
from oslo_db.sqlalchemy import update_match
from oslo_db.sqlalchemy import utils as sqlalchemyutils
from oslo_serialization import jsonutils
from oslo_utils import fixture as utils_fixture
from oslo_utils.fixture import uuidsentinel
from oslo_utils import timeutils
from oslo_utils import uuidutils
import six
from six.moves import range
from sqlalchemy import Column
from sqlalchemy.dialects import sqlite
from sqlalchemy.exc import OperationalError
from sqlalchemy.exc import SQLAlchemyError
from sqlalchemy import inspect
from sqlalchemy import Integer
from sqlalchemy import MetaData
from sqlalchemy.orm import query
from sqlalchemy.orm import session as sqla_session
from sqlalchemy import sql
from sqlalchemy import Table
from nova import block_device
from nova.compute import rpcapi as compute_rpcapi
from nova.compute import task_states
from nova.compute import vm_states
import nova.conf
from nova import context
from nova.db import api as db
from nova.db.sqlalchemy import api as sqlalchemy_api
from nova.db.sqlalchemy import models
from nova.db.sqlalchemy import types as col_types
from nova.db.sqlalchemy import utils as db_utils
from nova import exception
from nova import objects
from nova.objects import fields
from nova import test
from nova.tests import fixtures as nova_fixtures
from nova.tests.unit import fake_console_auth_token
from nova import utils
CONF = nova.conf.CONF
get_engine = sqlalchemy_api.get_engine
def _reservation_get(context, uuid):
@sqlalchemy_api.pick_context_manager_reader
def doit(context):
return sqlalchemy_api.model_query(
context, models.Reservation, read_deleted="no").filter_by(
uuid=uuid).first()
result = doit(context)
if not result:
raise exception.ReservationNotFound(uuid=uuid)
return result
def _make_compute_node(host, node, hv_type, service_id):
compute_node_dict = dict(vcpus=2, memory_mb=1024, local_gb=2048,
uuid=uuidutils.generate_uuid(),
vcpus_used=0, memory_mb_used=0,
local_gb_used=0, free_ram_mb=1024,
free_disk_gb=2048, hypervisor_type=hv_type,
hypervisor_version=1, cpu_info="",
running_vms=0, current_workload=0,
service_id=service_id,
host=host,
disk_available_least=100,
hypervisor_hostname=node,
host_ip='127.0.0.1',
supported_instances='',
pci_stats='',
metrics='',
extra_resources='',
cpu_allocation_ratio=16.0,
ram_allocation_ratio=1.5,
disk_allocation_ratio=1.0,
stats='', numa_topology='')
# add some random stats
stats = dict(num_instances=3, num_proj_12345=2,
num_proj_23456=2, num_vm_building=3)
compute_node_dict['stats'] = jsonutils.dumps(stats)
return compute_node_dict
def _quota_create(context, project_id, user_id):
"""Create sample Quota objects."""
quotas = {}
user_quotas = {}
for i in range(3):
resource = 'resource%d' % i
if i == 2:
# test for project level resources
resource = 'fixed_ips'
quotas[resource] = db.quota_create(context,
project_id,
resource, i + 2).hard_limit
user_quotas[resource] = quotas[resource]
else:
quotas[resource] = db.quota_create(context,
project_id,
resource, i + 1).hard_limit
user_quotas[resource] = db.quota_create(context, project_id,
resource, i + 1,
user_id=user_id).hard_limit
@sqlalchemy_api.pick_context_manager_reader
def _assert_instance_id_mapping(_ctxt, tc, inst_uuid, expected_existing=False):
# NOTE(mriedem): We can't use ec2_instance_get_by_uuid to assert
# the instance_id_mappings record is gone because it hard-codes
# read_deleted='yes' and will read the soft-deleted record. So we
# do the model_query directly here. See bug 1061166.
inst_id_mapping = sqlalchemy_api.model_query(
_ctxt, models.InstanceIdMapping).filter_by(uuid=inst_uuid).first()
if not expected_existing:
tc.assertFalse(inst_id_mapping,
'instance_id_mapping not deleted for '
'instance: %s' % inst_uuid)
else:
tc.assertTrue(inst_id_mapping,
'instance_id_mapping not found for '
'instance: %s' % inst_uuid)
class DbTestCase(test.TestCase):
def setUp(self):
super(DbTestCase, self).setUp()
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id, self.project_id)
def create_instance_with_args(self, **kwargs):
args = {'reservation_id': 'a', 'image_ref': 1, 'host': 'host1',
'node': 'node1', 'project_id': self.project_id,
'vm_state': 'fake'}
if 'context' in kwargs:
ctxt = kwargs.pop('context')
args['project_id'] = ctxt.project_id
else:
ctxt = self.context
args.update(kwargs)
return db.instance_create(ctxt, args)
def fake_metadata(self, content):
meta = {}
for i in range(0, 10):
meta["foo%i" % i] = "this is %s item %i" % (content, i)
return meta
def create_metadata_for_instance(self, instance_uuid):
meta = self.fake_metadata('metadata')
db.instance_metadata_update(self.context, instance_uuid, meta, False)
sys_meta = self.fake_metadata('system_metadata')
db.instance_system_metadata_update(self.context, instance_uuid,
sys_meta, False)
return meta, sys_meta
class DecoratorTestCase(test.TestCase):
def _test_decorator_wraps_helper(self, decorator):
def test_func():
"""Test docstring."""
decorated_func = decorator(test_func)
self.assertEqual(test_func.__name__, decorated_func.__name__)
self.assertEqual(test_func.__doc__, decorated_func.__doc__)
self.assertEqual(test_func.__module__, decorated_func.__module__)
def test_require_context_decorator_wraps_functions_properly(self):
self._test_decorator_wraps_helper(sqlalchemy_api.require_context)
def test_require_deadlock_retry_wraps_functions_properly(self):
self._test_decorator_wraps_helper(
oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True))
@mock.patch.object(enginefacade._TransactionContextManager, 'using')
@mock.patch.object(enginefacade._TransactionContextManager, '_clone')
def test_select_db_reader_mode_select_sync(self, mock_clone, mock_using):
@db.select_db_reader_mode
def func(self, context, value, use_slave=False):
pass
mock_clone.return_value = enginefacade._TransactionContextManager(
mode=enginefacade._READER)
ctxt = context.get_admin_context()
value = 'some_value'
func(self, ctxt, value)
mock_clone.assert_called_once_with(mode=enginefacade._READER)
mock_using.assert_called_once_with(ctxt)
@mock.patch.object(enginefacade._TransactionContextManager, 'using')
@mock.patch.object(enginefacade._TransactionContextManager, '_clone')
def test_select_db_reader_mode_select_async(self, mock_clone, mock_using):
@db.select_db_reader_mode
def func(self, context, value, use_slave=False):
pass
mock_clone.return_value = enginefacade._TransactionContextManager(
mode=enginefacade._ASYNC_READER)
ctxt = context.get_admin_context()
value = 'some_value'
func(self, ctxt, value, use_slave=True)
mock_clone.assert_called_once_with(mode=enginefacade._ASYNC_READER)
mock_using.assert_called_once_with(ctxt)
@mock.patch.object(enginefacade._TransactionContextManager, 'using')
@mock.patch.object(enginefacade._TransactionContextManager, '_clone')
def test_select_db_reader_mode_no_use_slave_select_sync(self, mock_clone,
mock_using):
@db.select_db_reader_mode
def func(self, context, value):
pass
mock_clone.return_value = enginefacade._TransactionContextManager(
mode=enginefacade._READER)
ctxt = context.get_admin_context()
value = 'some_value'
func(self, ctxt, value)
mock_clone.assert_called_once_with(mode=enginefacade._READER)
mock_using.assert_called_once_with(ctxt)
def _get_fake_aggr_values():
return {'name': 'fake_aggregate'}
def _get_fake_aggr_metadata():
return {'fake_key1': 'fake_value1',
'fake_key2': 'fake_value2',
'availability_zone': 'fake_avail_zone'}
def _get_fake_aggr_hosts():
return ['foo.openstack.org']
def _create_aggregate(context=context.get_admin_context(),
values=_get_fake_aggr_values(),
metadata=_get_fake_aggr_metadata()):
return db.aggregate_create(context, values, metadata)
def _create_aggregate_with_hosts(context=context.get_admin_context(),
values=_get_fake_aggr_values(),
metadata=_get_fake_aggr_metadata(),
hosts=_get_fake_aggr_hosts()):
result = _create_aggregate(context=context,
values=values, metadata=metadata)
for host in hosts:
db.aggregate_host_add(context, result['id'], host)
return result
@mock.patch.object(sqlalchemy_api, '_get_regexp_ops',
return_value=(lambda x: x, 'LIKE'))
class UnsupportedDbRegexpTestCase(DbTestCase):
def test_instance_get_all_by_filters_paginate(self, mock_get_regexp):
test1 = self.create_instance_with_args(display_name='test1')
test2 = self.create_instance_with_args(display_name='test2')
test3 = self.create_instance_with_args(display_name='test3')
result = db.instance_get_all_by_filters(self.context,
{'display_name': '%test%'},
marker=None)
self.assertEqual(3, len(result))
result = db.instance_get_all_by_filters(self.context,
{'display_name': '%test%'},
sort_dir="asc",
marker=test1['uuid'])
self.assertEqual(2, len(result))
result = db.instance_get_all_by_filters(self.context,
{'display_name': '%test%'},
sort_dir="asc",
marker=test2['uuid'])
self.assertEqual(1, len(result))
result = db.instance_get_all_by_filters(self.context,
{'display_name': '%test%'},
sort_dir="asc",
marker=test3['uuid'])
self.assertEqual(0, len(result))
self.assertRaises(exception.MarkerNotFound,
db.instance_get_all_by_filters,
self.context, {'display_name': '%test%'},
marker=uuidsentinel.uuid1)
def test_instance_get_all_uuids_by_host(self, mock_get_regexp):
test1 = self.create_instance_with_args(display_name='test1')
test2 = self.create_instance_with_args(display_name='test2')
test3 = self.create_instance_with_args(display_name='test3')
uuids = [i.uuid for i in (test1, test2, test3)]
found_uuids = db.instance_get_all_uuids_by_host(self.context,
test1.host)
self.assertEqual(sorted(uuids), sorted(found_uuids))
def _assert_equals_inst_order(self, correct_order, filters,
sort_keys=None, sort_dirs=None,
limit=None, marker=None,
match_keys=['uuid', 'vm_state',
'display_name', 'id']):
'''Retrieves instances based on the given filters and sorting
information and verifies that the instances are returned in the
correct sorted order by ensuring that the supplied keys match.
'''
result = db.instance_get_all_by_filters_sort(
self.context, filters, limit=limit, marker=marker,
sort_keys=sort_keys, sort_dirs=sort_dirs)
self.assertEqual(len(correct_order), len(result))
for inst1, inst2 in zip(result, correct_order):
for key in match_keys:
self.assertEqual(inst1.get(key), inst2.get(key))
return result
def test_instance_get_all_by_filters_sort_keys(self, mock_get_regexp):
'''Verifies sort order and direction for multiple instances.'''
# Instances that will reply to the query
test1_active = self.create_instance_with_args(
display_name='test1',
vm_state=vm_states.ACTIVE)
test1_error = self.create_instance_with_args(
display_name='test1',
vm_state=vm_states.ERROR)
test1_error2 = self.create_instance_with_args(
display_name='test1',
vm_state=vm_states.ERROR)
test2_active = self.create_instance_with_args(
display_name='test2',
vm_state=vm_states.ACTIVE)
test2_error = self.create_instance_with_args(
display_name='test2',
vm_state=vm_states.ERROR)
test2_error2 = self.create_instance_with_args(
display_name='test2',
vm_state=vm_states.ERROR)
# Other instances in the DB, will not match name filter
other_error = self.create_instance_with_args(
display_name='other',
vm_state=vm_states.ERROR)
other_active = self.create_instance_with_args(
display_name='other',
vm_state=vm_states.ACTIVE)
filters = {'display_name': '%test%'}
# Verify different sort key/direction combinations
sort_keys = ['display_name', 'vm_state', 'created_at']
sort_dirs = ['asc', 'asc', 'asc']
correct_order = [test1_active, test1_error, test1_error2,
test2_active, test2_error, test2_error2]
self._assert_equals_inst_order(correct_order, filters,
sort_keys=sort_keys,
sort_dirs=sort_dirs)
sort_dirs = ['asc', 'desc', 'asc']
correct_order = [test1_error, test1_error2, test1_active,
test2_error, test2_error2, test2_active]
self._assert_equals_inst_order(correct_order, filters,
sort_keys=sort_keys,
sort_dirs=sort_dirs)
sort_dirs = ['desc', 'desc', 'asc']
correct_order = [test2_error, test2_error2, test2_active,
test1_error, test1_error2, test1_active]
self._assert_equals_inst_order(correct_order, filters,
sort_keys=sort_keys,
sort_dirs=sort_dirs)
# created_at is added by default if not supplied, descending order
sort_keys = ['display_name', 'vm_state']
sort_dirs = ['desc', 'desc']
correct_order = [test2_error2, test2_error, test2_active,
test1_error2, test1_error, test1_active]
self._assert_equals_inst_order(correct_order, filters,
sort_keys=sort_keys,
sort_dirs=sort_dirs)
# Now created_at should be in ascending order (defaults to the first
# sort dir direction)
sort_dirs = ['asc', 'asc']
correct_order = [test1_active, test1_error, test1_error2,
test2_active, test2_error, test2_error2]
self._assert_equals_inst_order(correct_order, filters,
sort_keys=sort_keys,
sort_dirs=sort_dirs)
# Remove name filter, get all instances
correct_order = [other_active, other_error,
test1_active, test1_error, test1_error2,
test2_active, test2_error, test2_error2]
self._assert_equals_inst_order(correct_order, {},
sort_keys=sort_keys,
sort_dirs=sort_dirs)
# Default sorting, 'created_at' then 'id' in desc order
correct_order = [other_active, other_error,
test2_error2, test2_error, test2_active,
test1_error2, test1_error, test1_active]
self._assert_equals_inst_order(correct_order, {})
def test_instance_get_all_by_filters_sort_keys_paginate(self,
mock_get_regexp):
'''Verifies sort order with pagination.'''
# Instances that will reply to the query
test1_active = self.create_instance_with_args(
display_name='test1',
vm_state=vm_states.ACTIVE)
test1_error = self.create_instance_with_args(
display_name='test1',
vm_state=vm_states.ERROR)
test1_error2 = self.create_instance_with_args(
display_name='test1',
vm_state=vm_states.ERROR)
test2_active = self.create_instance_with_args(
display_name='test2',
vm_state=vm_states.ACTIVE)
test2_error = self.create_instance_with_args(
display_name='test2',
vm_state=vm_states.ERROR)
test2_error2 = self.create_instance_with_args(
display_name='test2',
vm_state=vm_states.ERROR)
# Other instances in the DB, will not match name filter
self.create_instance_with_args(display_name='other')
self.create_instance_with_args(display_name='other')
filters = {'display_name': '%test%'}
# Common sort information for every query
sort_keys = ['display_name', 'vm_state', 'created_at']
sort_dirs = ['asc', 'desc', 'asc']
# Overall correct instance order based on the sort keys
correct_order = [test1_error, test1_error2, test1_active,
test2_error, test2_error2, test2_active]
# Limits of 1, 2, and 3, verify that the instances returned are in the
# correct sorted order, update the marker to get the next correct page
for limit in range(1, 4):
marker = None
# Include the maximum number of instances (ie, 6) to ensure that
# the last query (with marker pointing to the last instance)
# returns 0 servers
for i in range(0, 7, limit):
if i == len(correct_order):
correct = []
else:
correct = correct_order[i:i + limit]
insts = self._assert_equals_inst_order(
correct, filters,
sort_keys=sort_keys, sort_dirs=sort_dirs,
limit=limit, marker=marker)
if correct:
marker = insts[-1]['uuid']
self.assertEqual(correct[-1]['uuid'], marker)
def test_instance_get_deleted_by_filters_sort_keys_paginate(self,
mock_get_regexp):
'''Verifies sort order with pagination for deleted instances.'''
ctxt = context.get_admin_context()
# Instances that will reply to the query
test1_active = self.create_instance_with_args(
display_name='test1',
vm_state=vm_states.ACTIVE)
db.instance_destroy(ctxt, test1_active['uuid'])
test1_error = self.create_instance_with_args(
display_name='test1',
vm_state=vm_states.ERROR)
db.instance_destroy(ctxt, test1_error['uuid'])
test1_error2 = self.create_instance_with_args(
display_name='test1',
vm_state=vm_states.ERROR)
db.instance_destroy(ctxt, test1_error2['uuid'])
test2_active = self.create_instance_with_args(
display_name='test2',
vm_state=vm_states.ACTIVE)
db.instance_destroy(ctxt, test2_active['uuid'])
test2_error = self.create_instance_with_args(
display_name='test2',
vm_state=vm_states.ERROR)
db.instance_destroy(ctxt, test2_error['uuid'])
test2_error2 = self.create_instance_with_args(
display_name='test2',
vm_state=vm_states.ERROR)
db.instance_destroy(ctxt, test2_error2['uuid'])
# Other instances in the DB, will not match name filter
self.create_instance_with_args(display_name='other')
self.create_instance_with_args(display_name='other')
filters = {'display_name': '%test%', 'deleted': True}
# Common sort information for every query
sort_keys = ['display_name', 'vm_state', 'created_at']
sort_dirs = ['asc', 'desc', 'asc']
# Overall correct instance order based on the sort keys
correct_order = [test1_error, test1_error2, test1_active,
test2_error, test2_error2, test2_active]
# Limits of 1, 2, and 3, verify that the instances returned are in the
# correct sorted order, update the marker to get the next correct page
for limit in range(1, 4):
marker = None
# Include the maximum number of instances (ie, 6) to ensure that
# the last query (with marker pointing to the last instance)
# returns 0 servers
for i in range(0, 7, limit):
if i == len(correct_order):
correct = []
else:
correct = correct_order[i:i + limit]
insts = self._assert_equals_inst_order(
correct, filters,
sort_keys=sort_keys, sort_dirs=sort_dirs,
limit=limit, marker=marker)
if correct:
marker = insts[-1]['uuid']
self.assertEqual(correct[-1]['uuid'], marker)
class ModelQueryTestCase(DbTestCase):
def test_model_query_invalid_arguments(self):
@sqlalchemy_api.pick_context_manager_reader
def test(context):
# read_deleted shouldn't accept invalid values
self.assertRaises(ValueError, sqlalchemy_api.model_query,
context, models.Instance,
read_deleted=False)
self.assertRaises(ValueError, sqlalchemy_api.model_query,
context, models.Instance,
read_deleted="foo")
# Check model is a valid model
self.assertRaises(TypeError, sqlalchemy_api.model_query,
context, "")
test(self.context)
@mock.patch.object(sqlalchemyutils, 'model_query')
def test_model_query_use_context_session(self, mock_model_query):
@sqlalchemy_api.main_context_manager.reader
def fake_method(context):
session = context.session
sqlalchemy_api.model_query(context, models.Instance)
return session
session = fake_method(self.context)
mock_model_query.assert_called_once_with(models.Instance, session,
None, deleted=False)
class EngineFacadeTestCase(DbTestCase):
def test_use_single_context_session_writer(self):
# Checks that session in context would not be overwritten by
# annotation @sqlalchemy_api.main_context_manager.writer if annotation
# is used twice.
@sqlalchemy_api.main_context_manager.writer
def fake_parent_method(context):
session = context.session
return fake_child_method(context), session
@sqlalchemy_api.main_context_manager.writer
def fake_child_method(context):
session = context.session
sqlalchemy_api.model_query(context, models.Instance)
return session
parent_session, child_session = fake_parent_method(self.context)
self.assertEqual(parent_session, child_session)
def test_use_single_context_session_reader(self):
# Checks that session in context would not be overwritten by
# annotation @sqlalchemy_api.main_context_manager.reader if annotation
# is used twice.
@sqlalchemy_api.main_context_manager.reader
def fake_parent_method(context):
session = context.session
return fake_child_method(context), session
@sqlalchemy_api.main_context_manager.reader
def fake_child_method(context):
session = context.session
sqlalchemy_api.model_query(context, models.Instance)
return session
parent_session, child_session = fake_parent_method(self.context)
self.assertEqual(parent_session, child_session)
class SqlAlchemyDbApiNoDbTestCase(test.NoDBTestCase):
"""No-DB test class for simple test cases that do not require a backend."""
def test_manual_join_columns_immutable_list(self):
# Tests that _manual_join_columns doesn't modify the list passed in.
columns_to_join = ['system_metadata', 'test']
manual_joins, columns_to_join2 = (
sqlalchemy_api._manual_join_columns(columns_to_join))
self.assertEqual(['system_metadata'], manual_joins)
self.assertEqual(['test'], columns_to_join2)
self.assertEqual(['system_metadata', 'test'], columns_to_join)
def test_convert_objects_related_datetimes(self):
t1 = timeutils.utcnow()
t2 = t1 + datetime.timedelta(seconds=10)
t3 = t2 + datetime.timedelta(hours=1)
t2_utc = t2.replace(tzinfo=iso8601.UTC)
t3_utc = t3.replace(tzinfo=iso8601.UTC)
datetime_keys = ('created_at', 'deleted_at')
test1 = {'created_at': t1, 'deleted_at': t2, 'updated_at': t3}
expected_dict = {'created_at': t1, 'deleted_at': t2, 'updated_at': t3}
sqlalchemy_api.convert_objects_related_datetimes(test1, *datetime_keys)
self.assertEqual(test1, expected_dict)
test2 = {'created_at': t1, 'deleted_at': t2_utc, 'updated_at': t3}
expected_dict = {'created_at': t1, 'deleted_at': t2, 'updated_at': t3}
sqlalchemy_api.convert_objects_related_datetimes(test2, *datetime_keys)
self.assertEqual(test2, expected_dict)
test3 = {'deleted_at': t2_utc, 'updated_at': t3_utc}
expected_dict = {'deleted_at': t2, 'updated_at': t3_utc}
sqlalchemy_api.convert_objects_related_datetimes(test3, *datetime_keys)
self.assertEqual(test3, expected_dict)
def test_convert_objects_related_datetimes_with_strings(self):
t1 = '2015-05-28T17:15:53.000000'
t2 = '2012-04-21T18:25:43-05:00'
t3 = '2012-04-23T18:25:43.511Z'
datetime_keys = ('created_at', 'deleted_at', 'updated_at')
test1 = {'created_at': t1, 'deleted_at': t2, 'updated_at': t3}
expected_dict = {
'created_at': timeutils.parse_strtime(t1).replace(tzinfo=None),
'deleted_at': timeutils.parse_isotime(t2).replace(tzinfo=None),
'updated_at': timeutils.parse_isotime(t3).replace(tzinfo=None)}
sqlalchemy_api.convert_objects_related_datetimes(test1)
self.assertEqual(test1, expected_dict)
sqlalchemy_api.convert_objects_related_datetimes(test1, *datetime_keys)
self.assertEqual(test1, expected_dict)
def test_get_regexp_op_for_database_sqlite(self):
filter, op = sqlalchemy_api._get_regexp_ops('sqlite:///')
self.assertEqual('|', filter('|'))
self.assertEqual('REGEXP', op)
def test_get_regexp_op_for_database_mysql(self):
filter, op = sqlalchemy_api._get_regexp_ops(
'mysql+pymysql://root@localhost')
self.assertEqual('\\|', filter('|'))
self.assertEqual('REGEXP', op)
def test_get_regexp_op_for_database_postgresql(self):
filter, op = sqlalchemy_api._get_regexp_ops(
'postgresql://localhost')
self.assertEqual('|', filter('|'))
self.assertEqual('~', op)
def test_get_regexp_op_for_database_unknown(self):
filter, op = sqlalchemy_api._get_regexp_ops('notdb:///')
self.assertEqual('|', filter('|'))
self.assertEqual('LIKE', op)
@mock.patch.object(sqlalchemy_api, 'main_context_manager')
def test_get_engine(self, mock_ctxt_mgr):
sqlalchemy_api.get_engine()
mock_ctxt_mgr.writer.get_engine.assert_called_once_with()
@mock.patch.object(sqlalchemy_api, 'main_context_manager')
def test_get_engine_use_slave(self, mock_ctxt_mgr):
sqlalchemy_api.get_engine(use_slave=True)
mock_ctxt_mgr.reader.get_engine.assert_called_once_with()
def test_get_db_conf_with_connection(self):
mock_conf_group = mock.MagicMock()
mock_conf_group.connection = 'fakemain://'
db_conf = sqlalchemy_api._get_db_conf(mock_conf_group,
connection='fake://')
self.assertEqual('fake://', db_conf['connection'])
@mock.patch.object(sqlalchemy_api, 'api_context_manager')
def test_get_api_engine(self, mock_ctxt_mgr):
sqlalchemy_api.get_api_engine()
mock_ctxt_mgr.writer.get_engine.assert_called_once_with()
@mock.patch.object(sqlalchemy_api, '_instance_get_by_uuid')
@mock.patch.object(sqlalchemy_api, '_instances_fill_metadata')
@mock.patch('oslo_db.sqlalchemy.utils.paginate_query')
def test_instance_get_all_by_filters_paginated_allows_deleted_marker(
self, mock_paginate, mock_fill, mock_get):
ctxt = mock.MagicMock()
ctxt.elevated.return_value = mock.sentinel.elevated
sqlalchemy_api.instance_get_all_by_filters_sort(ctxt, {}, marker='foo')
mock_get.assert_called_once_with(mock.sentinel.elevated, 'foo')
ctxt.elevated.assert_called_once_with(read_deleted='yes')
def test_replace_sub_expression(self):
ret = sqlalchemy_api._safe_regex_mysql('|')
self.assertEqual('\\|', ret)
ret = sqlalchemy_api._safe_regex_mysql('||')
self.assertEqual('\\|\\|', ret)
ret = sqlalchemy_api._safe_regex_mysql('a||')
self.assertEqual('a\\|\\|', ret)
ret = sqlalchemy_api._safe_regex_mysql('|a|')
self.assertEqual('\\|a\\|', ret)
ret = sqlalchemy_api._safe_regex_mysql('||a')
self.assertEqual('\\|\\|a', ret)
class SqlAlchemyDbApiTestCase(DbTestCase):
def test_instance_get_all_by_host(self):
ctxt = context.get_admin_context()
self.create_instance_with_args()
self.create_instance_with_args()
self.create_instance_with_args(host='host2')
@sqlalchemy_api.pick_context_manager_reader
def test(context):
return sqlalchemy_api.instance_get_all_by_host(
context, 'host1')
result = test(ctxt)
self.assertEqual(2, len(result))
# make sure info_cache and security_groups were auto-joined
instance = result[0]
self.assertIn('info_cache', instance)
self.assertIn('security_groups', instance)
def test_instance_get_all_by_host_no_joins(self):
"""Tests that we don't join on the info_cache and security_groups
tables if columns_to_join is an empty list.
"""
self.create_instance_with_args()
@sqlalchemy_api.pick_context_manager_reader
def test(ctxt):
return sqlalchemy_api.instance_get_all_by_host(
ctxt, 'host1', columns_to_join=[])
result = test(context.get_admin_context())
self.assertEqual(1, len(result))
# make sure info_cache and security_groups were not auto-joined
instance = result[0]
self.assertNotIn('info_cache', instance)
self.assertNotIn('security_groups', instance)
def test_instance_get_all_uuids_by_host(self):
ctxt = context.get_admin_context()
self.create_instance_with_args()
self.create_instance_with_args()
self.create_instance_with_args(host='host2')
@sqlalchemy_api.pick_context_manager_reader
def test(context):
return sqlalchemy_api._instance_get_all_uuids_by_host(
context, 'host1')
result = test(ctxt)
self.assertEqual(2, len(result))
self.assertEqual(six.text_type, type(result[0]))
@mock.patch('oslo_utils.uuidutils.generate_uuid')
def test_instance_get_active_by_window_joined_paging(self, mock_uuids):
mock_uuids.side_effect = ['BBB', 'ZZZ', 'AAA', 'CCC']
ctxt = context.get_admin_context()
now = datetime.datetime(2015, 10, 2)
self.create_instance_with_args(project_id='project-ZZZ')
self.create_instance_with_args(project_id='project-ZZZ')
self.create_instance_with_args(project_id='project-ZZZ')
self.create_instance_with_args(project_id='project-AAA')
# no limit or marker
result = sqlalchemy_api.instance_get_active_by_window_joined(
ctxt, begin=now, columns_to_join=[])
actual_uuids = [row['uuid'] for row in result]
self.assertEqual(['CCC', 'AAA', 'BBB', 'ZZZ'], actual_uuids)
# just limit
result = sqlalchemy_api.instance_get_active_by_window_joined(
ctxt, begin=now, columns_to_join=[], limit=2)
actual_uuids = [row['uuid'] for row in result]
self.assertEqual(['CCC', 'AAA'], actual_uuids)
# limit & marker
result = sqlalchemy_api.instance_get_active_by_window_joined(
ctxt, begin=now, columns_to_join=[], limit=2, marker='CCC')
actual_uuids = [row['uuid'] for row in result]
self.assertEqual(['AAA', 'BBB'], actual_uuids)
# unknown marker
self.assertRaises(
exception.MarkerNotFound,
sqlalchemy_api.instance_get_active_by_window_joined,
ctxt, begin=now, columns_to_join=[], limit=2, marker='unknown')
def test_instance_get_active_by_window_joined(self):
now = datetime.datetime(2013, 10, 10, 17, 16, 37, 156701)
start_time = now - datetime.timedelta(minutes=10)
now1 = now + datetime.timedelta(minutes=1)
now2 = now + datetime.timedelta(minutes=2)
now3 = now + datetime.timedelta(minutes=3)
ctxt = context.get_admin_context()
# used for testing columns_to_join
network_info = jsonutils.dumps({'ckey': 'cvalue'})
sample_data = {
'metadata': {'mkey1': 'mval1', 'mkey2': 'mval2'},
'system_metadata': {'smkey1': 'smval1', 'smkey2': 'smval2'},
'info_cache': {'network_info': network_info},
}
self.create_instance_with_args(launched_at=now, **sample_data)
self.create_instance_with_args(launched_at=now1, terminated_at=now2,
**sample_data)
self.create_instance_with_args(launched_at=now2, terminated_at=now3,
**sample_data)
self.create_instance_with_args(launched_at=now3, terminated_at=None,
**sample_data)
result = sqlalchemy_api.instance_get_active_by_window_joined(
ctxt, begin=now)
self.assertEqual(4, len(result))
# verify that all default columns are joined
meta = utils.metadata_to_dict(result[0]['metadata'])
self.assertEqual(sample_data['metadata'], meta)
sys_meta = utils.metadata_to_dict(result[0]['system_metadata'])
self.assertEqual(sample_data['system_metadata'], sys_meta)
self.assertIn('info_cache', result[0])
result = sqlalchemy_api.instance_get_active_by_window_joined(
ctxt, begin=now3, columns_to_join=['info_cache'])
self.assertEqual(2, len(result))
# verify that only info_cache is loaded
meta = utils.metadata_to_dict(result[0]['metadata'])
self.assertEqual({}, meta)
self.assertIn('info_cache', result[0])
result = sqlalchemy_api.instance_get_active_by_window_joined(
ctxt, begin=start_time, end=now)
self.assertEqual(0, len(result))
result = sqlalchemy_api.instance_get_active_by_window_joined(
ctxt, begin=start_time, end=now2,
columns_to_join=['system_metadata'])
self.assertEqual(2, len(result))
# verify that only system_metadata is loaded
meta = utils.metadata_to_dict(result[0]['metadata'])
self.assertEqual({}, meta)
sys_meta = utils.metadata_to_dict(result[0]['system_metadata'])
self.assertEqual(sample_data['system_metadata'], sys_meta)
self.assertNotIn('info_cache', result[0])
result = sqlalchemy_api.instance_get_active_by_window_joined(
ctxt, begin=now2, end=now3,
columns_to_join=['metadata', 'info_cache'])
self.assertEqual(2, len(result))
# verify that only metadata and info_cache are loaded
meta = utils.metadata_to_dict(result[0]['metadata'])
self.assertEqual(sample_data['metadata'], meta)
sys_meta = utils.metadata_to_dict(result[0]['system_metadata'])
self.assertEqual({}, sys_meta)
self.assertIn('info_cache', result[0])
self.assertEqual(network_info, result[0]['info_cache']['network_info'])
@mock.patch('nova.db.sqlalchemy.api.instance_get_all_by_filters_sort')
def test_instance_get_all_by_filters_calls_sort(self,
mock_get_all_filters_sort):
'''Verifies instance_get_all_by_filters calls the sort function.'''
# sort parameters should be wrapped in a list, all other parameters
# should be passed through
ctxt = context.get_admin_context()
sqlalchemy_api.instance_get_all_by_filters(ctxt, {'foo': 'bar'},
'sort_key', 'sort_dir', limit=100, marker='uuid',
columns_to_join='columns')
mock_get_all_filters_sort.assert_called_once_with(ctxt, {'foo': 'bar'},
limit=100, marker='uuid', columns_to_join='columns',
sort_keys=['sort_key'], sort_dirs=['sort_dir'])
def test_instance_get_all_by_filters_sort_key_invalid(self):
'''InvalidSortKey raised if an invalid key is given.'''
for keys in [['foo'], ['uuid', 'foo']]:
self.assertRaises(exception.InvalidSortKey,
db.instance_get_all_by_filters_sort,
self.context,
filters={},
sort_keys=keys)
class ProcessSortParamTestCase(test.TestCase):
def test_process_sort_params_defaults(self):
'''Verifies default sort parameters.'''
sort_keys, sort_dirs = sqlalchemy_api.process_sort_params([], [])
self.assertEqual(['created_at', 'id'], sort_keys)
self.assertEqual(['asc', 'asc'], sort_dirs)
sort_keys, sort_dirs = sqlalchemy_api.process_sort_params(None, None)
self.assertEqual(['created_at', 'id'], sort_keys)
self.assertEqual(['asc', 'asc'], sort_dirs)
def test_process_sort_params_override_default_keys(self):
'''Verifies that the default keys can be overridden.'''
sort_keys, sort_dirs = sqlalchemy_api.process_sort_params(
[], [], default_keys=['key1', 'key2', 'key3'])
self.assertEqual(['key1', 'key2', 'key3'], sort_keys)
self.assertEqual(['asc', 'asc', 'asc'], sort_dirs)
def test_process_sort_params_override_default_dir(self):
'''Verifies that the default direction can be overridden.'''
sort_keys, sort_dirs = sqlalchemy_api.process_sort_params(
[], [], default_dir='dir1')
self.assertEqual(['created_at', 'id'], sort_keys)
self.assertEqual(['dir1', 'dir1'], sort_dirs)
def test_process_sort_params_override_default_key_and_dir(self):
'''Verifies that the default key and dir can be overridden.'''
sort_keys, sort_dirs = sqlalchemy_api.process_sort_params(
[], [], default_keys=['key1', 'key2', 'key3'],
default_dir='dir1')
self.assertEqual(['key1', 'key2', 'key3'], sort_keys)
self.assertEqual(['dir1', 'dir1', 'dir1'], sort_dirs)
sort_keys, sort_dirs = sqlalchemy_api.process_sort_params(
[], [], default_keys=[], default_dir='dir1')
self.assertEqual([], sort_keys)
self.assertEqual([], sort_dirs)
def test_process_sort_params_non_default(self):
'''Verifies that non-default keys are added correctly.'''
sort_keys, sort_dirs = sqlalchemy_api.process_sort_params(
['key1', 'key2'], ['asc', 'desc'])
self.assertEqual(['key1', 'key2', 'created_at', 'id'], sort_keys)
# First sort_dir in list is used when adding the default keys
self.assertEqual(['asc', 'desc', 'asc', 'asc'], sort_dirs)
def test_process_sort_params_default(self):
'''Verifies that default keys are added correctly.'''
sort_keys, sort_dirs = sqlalchemy_api.process_sort_params(
['id', 'key2'], ['asc', 'desc'])
self.assertEqual(['id', 'key2', 'created_at'], sort_keys)
self.assertEqual(['asc', 'desc', 'asc'], sort_dirs)
# Include default key value, rely on default direction
sort_keys, sort_dirs = sqlalchemy_api.process_sort_params(
['id', 'key2'], [])
self.assertEqual(['id', 'key2', 'created_at'], sort_keys)
self.assertEqual(['asc', 'asc', 'asc'], sort_dirs)
def test_process_sort_params_default_dir(self):
'''Verifies that the default dir is applied to all keys.'''
# Direction is set, ignore default dir
sort_keys, sort_dirs = sqlalchemy_api.process_sort_params(
['id', 'key2'], ['desc'], default_dir='dir')
self.assertEqual(['id', 'key2', 'created_at'], sort_keys)
self.assertEqual(['desc', 'desc', 'desc'], sort_dirs)
# But should be used if no direction is set
sort_keys, sort_dirs = sqlalchemy_api.process_sort_params(
['id', 'key2'], [], default_dir='dir')
self.assertEqual(['id', 'key2', 'created_at'], sort_keys)
self.assertEqual(['dir', 'dir', 'dir'], sort_dirs)
def test_process_sort_params_unequal_length(self):
'''Verifies that a sort direction list is applied correctly.'''
sort_keys, sort_dirs = sqlalchemy_api.process_sort_params(
['id', 'key2', 'key3'], ['desc'])
self.assertEqual(['id', 'key2', 'key3', 'created_at'], sort_keys)
self.assertEqual(['desc', 'desc', 'desc', 'desc'], sort_dirs)
# Default direction is the first key in the list
sort_keys, sort_dirs = sqlalchemy_api.process_sort_params(
['id', 'key2', 'key3'], ['desc', 'asc'])
self.assertEqual(['id', 'key2', 'key3', 'created_at'], sort_keys)
self.assertEqual(['desc', 'asc', 'desc', 'desc'], sort_dirs)
sort_keys, sort_dirs = sqlalchemy_api.process_sort_params(
['id', 'key2', 'key3'], ['desc', 'asc', 'asc'])
self.assertEqual(['id', 'key2', 'key3', 'created_at'], sort_keys)
self.assertEqual(['desc', 'asc', 'asc', 'desc'], sort_dirs)
def test_process_sort_params_extra_dirs_lengths(self):
'''InvalidInput raised if more directions are given.'''
self.assertRaises(exception.InvalidInput,
sqlalchemy_api.process_sort_params,
['key1', 'key2'],
['asc', 'desc', 'desc'])
def test_process_sort_params_invalid_sort_dir(self):
'''InvalidInput raised if invalid directions are given.'''
for dirs in [['foo'], ['asc', 'foo'], ['asc', 'desc', 'foo']]:
self.assertRaises(exception.InvalidInput,
sqlalchemy_api.process_sort_params,
['key'],
dirs)
class MigrationTestCase(test.TestCase):
def setUp(self):
super(MigrationTestCase, self).setUp()
self.ctxt = context.get_admin_context()
self._create()
self._create()
self._create(status='reverted')
self._create(status='confirmed')
self._create(status='error')
self._create(status='failed')
self._create(status='accepted')
self._create(status='done')
self._create(status='completed')
self._create(status='cancelled')
self._create(source_compute='host2', source_node='b',
dest_compute='host1', dest_node='a')
self._create(source_compute='host2', dest_compute='host3')
self._create(source_compute='host3', dest_compute='host4')
def _create(self, status='migrating', source_compute='host1',
source_node='a', dest_compute='host2', dest_node='b',
system_metadata=None, migration_type=None, uuid=None,
created_at=None, updated_at=None):
values = {'host': source_compute}
instance = db.instance_create(self.ctxt, values)
if system_metadata:
db.instance_system_metadata_update(self.ctxt, instance['uuid'],
system_metadata, False)
values = {'status': status, 'source_compute': source_compute,
'source_node': source_node, 'dest_compute': dest_compute,
'dest_node': dest_node, 'instance_uuid': instance['uuid'],
'migration_type': migration_type, 'uuid': uuid}
if created_at:
values['created_at'] = created_at
if updated_at:
values['updated_at'] = updated_at
db.migration_create(self.ctxt, values)
return values
def _assert_in_progress(self, migrations):
for migration in migrations:
self.assertNotEqual('confirmed', migration['status'])
self.assertNotEqual('reverted', migration['status'])
self.assertNotEqual('error', migration['status'])
self.assertNotEqual('failed', migration['status'])
self.assertNotEqual('accepted', migration['status'])
self.assertNotEqual('done', migration['status'])
self.assertNotEqual('cancelled', migration['status'])
def test_migration_get_in_progress_joins(self):
self._create(source_compute='foo', system_metadata={'foo': 'bar'})
migrations = db.migration_get_in_progress_by_host_and_node(self.ctxt,
'foo', 'a')
system_metadata = migrations[0]['instance']['system_metadata'][0]
self.assertEqual(system_metadata['key'], 'foo')
self.assertEqual(system_metadata['value'], 'bar')
def test_in_progress_host1_nodea(self):
migrations = db.migration_get_in_progress_by_host_and_node(self.ctxt,
'host1', 'a')
# 2 as source + 1 as dest
self.assertEqual(3, len(migrations))
self._assert_in_progress(migrations)
def test_in_progress_host1_nodeb(self):
migrations = db.migration_get_in_progress_by_host_and_node(self.ctxt,
'host1', 'b')
# some migrations are to/from host1, but none with a node 'b'
self.assertEqual(0, len(migrations))
def test_in_progress_host2_nodeb(self):
migrations = db.migration_get_in_progress_by_host_and_node(self.ctxt,
'host2', 'b')
# 2 as dest, 1 as source
self.assertEqual(3, len(migrations))
self._assert_in_progress(migrations)
def test_instance_join(self):
migrations = db.migration_get_in_progress_by_host_and_node(self.ctxt,
'host2', 'b')
for migration in migrations:
instance = migration['instance']
self.assertEqual(migration['instance_uuid'], instance['uuid'])
def test_migration_get_by_uuid(self):
migration1 = self._create(uuid=uuidsentinel.migration1_uuid)
self._create(uuid=uuidsentinel.other_uuid)
real_migration1 = db.migration_get_by_uuid(
self.ctxt, uuidsentinel.migration1_uuid)
for key in migration1:
self.assertEqual(migration1[key], real_migration1[key])
def test_migration_get_by_uuid_soft_deleted_and_deleted(self):
migration1 = self._create(uuid=uuidsentinel.migration1_uuid)
@sqlalchemy_api.pick_context_manager_writer
def soft_delete_it(context):
sqlalchemy_api.model_query(context, models.Migration).\
filter_by(uuid=uuidsentinel.migration1_uuid).\
soft_delete()
@sqlalchemy_api.pick_context_manager_writer
def delete_it(context):
sqlalchemy_api.model_query(context, models.Migration,
read_deleted="yes").\
filter_by(uuid=uuidsentinel.migration1_uuid).\
delete()
soft_delete_it(self.ctxt)
soft_deletd_migration1 = db.migration_get_by_uuid(
self.ctxt, uuidsentinel.migration1_uuid)
for key in migration1:
self.assertEqual(migration1[key], soft_deletd_migration1[key])
delete_it(self.ctxt)
self.assertRaises(exception.MigrationNotFound,
db.migration_get_by_uuid, self.ctxt,
uuidsentinel.migration1_uuid)
def test_migration_get_by_uuid_not_found(self):
"""Asserts that MigrationNotFound is raised if a migration is not
found by a given uuid.
"""
self.assertRaises(exception.MigrationNotFound,
db.migration_get_by_uuid, self.ctxt,
uuidsentinel.migration_not_found)
def test_get_migrations_by_filters(self):
filters = {"status": "migrating", "host": "host3",
"migration_type": None, "hidden": False}
migrations = db.migration_get_all_by_filters(self.ctxt, filters)
self.assertEqual(2, len(migrations))
for migration in migrations:
self.assertEqual(filters["status"], migration['status'])
hosts = [migration['source_compute'], migration['dest_compute']]
self.assertIn(filters["host"], hosts)
def test_get_migrations_by_uuid_filters(self):
mig_uuid1 = self._create(uuid=uuidsentinel.mig_uuid1)
filters = {"uuid": [uuidsentinel.mig_uuid1]}
mig_get = db.migration_get_all_by_filters(self.ctxt, filters)
self.assertEqual(1, len(mig_get))
for key in mig_uuid1:
self.assertEqual(mig_uuid1[key], mig_get[0][key])
def test_get_migrations_by_filters_with_multiple_statuses(self):
filters = {"status": ["reverted", "confirmed"],
"migration_type": None, "hidden": False}
migrations = db.migration_get_all_by_filters(self.ctxt, filters)
self.assertEqual(2, len(migrations))
for migration in migrations:
self.assertIn(migration['status'], filters['status'])
def test_get_migrations_by_filters_unicode_status(self):
self._create(status=u"unicode")
filters = {"status": u"unicode"}
migrations = db.migration_get_all_by_filters(self.ctxt, filters)
self.assertEqual(1, len(migrations))
for migration in migrations:
self.assertIn(migration['status'], filters['status'])
def test_get_migrations_by_filters_with_type(self):
self._create(status="special", source_compute="host9",
migration_type="evacuation")
self._create(status="special", source_compute="host9",
migration_type="live-migration")
filters = {"status": "special", "host": "host9",
"migration_type": "evacuation", "hidden": False}
migrations = db.migration_get_all_by_filters(self.ctxt, filters)
self.assertEqual(1, len(migrations))
def test_get_migrations_by_filters_source_compute(self):
filters = {'source_compute': 'host2'}
migrations = db.migration_get_all_by_filters(self.ctxt, filters)
self.assertEqual(2, len(migrations))
sources = [x['source_compute'] for x in migrations]
self.assertEqual(['host2', 'host2'], sources)
dests = [x['dest_compute'] for x in migrations]
self.assertEqual(['host1', 'host3'], dests)
def test_get_migrations_by_filters_instance_uuid(self):
migrations = db.migration_get_all_by_filters(self.ctxt, filters={})
for migration in migrations:
filters = {'instance_uuid': migration['instance_uuid']}
instance_migrations = db.migration_get_all_by_filters(
self.ctxt, filters)
self.assertEqual(1, len(instance_migrations))
self.assertEqual(migration['instance_uuid'],
instance_migrations[0]['instance_uuid'])
def test_migration_get_unconfirmed_by_dest_compute(self):
# Ensure no migrations are returned.
results = db.migration_get_unconfirmed_by_dest_compute(self.ctxt, 10,
'fake_host')
self.assertEqual(0, len(results))
# Ensure no migrations are returned.
results = db.migration_get_unconfirmed_by_dest_compute(self.ctxt, 10,
'fake_host2')
self.assertEqual(0, len(results))
updated_at = datetime.datetime(2000, 1, 1, 12, 0, 0)
values = {"status": "finished", "updated_at": updated_at,
"dest_compute": "fake_host2"}
migration = db.migration_create(self.ctxt, values)
# Ensure different host is not returned
results = db.migration_get_unconfirmed_by_dest_compute(self.ctxt, 10,
'fake_host')
self.assertEqual(0, len(results))
# Ensure one migration older than 10 seconds is returned.
results = db.migration_get_unconfirmed_by_dest_compute(self.ctxt, 10,
'fake_host2')
self.assertEqual(1, len(results))
db.migration_update(self.ctxt, migration['id'],
{"status": "CONFIRMED"})
# Ensure the new migration is not returned.
updated_at = timeutils.utcnow()
values = {"status": "finished", "updated_at": updated_at,
"dest_compute": "fake_host2"}
migration = db.migration_create(self.ctxt, values)
results = db.migration_get_unconfirmed_by_dest_compute(self.ctxt, 10,
"fake_host2")
self.assertEqual(0, len(results))
db.migration_update(self.ctxt, migration['id'],
{"status": "CONFIRMED"})
def test_migration_get_in_progress_by_instance(self):
values = self._create(status='running',
migration_type="live-migration")
results = db.migration_get_in_progress_by_instance(
self.ctxt, values["instance_uuid"], "live-migration")
self.assertEqual(1, len(results))
for key in values:
self.assertEqual(values[key], results[0][key])
self.assertEqual("running", results[0]["status"])
def test_migration_get_in_progress_by_instance_not_in_progress(self):
values = self._create(migration_type="live-migration")
results = db.migration_get_in_progress_by_instance(
self.ctxt, values["instance_uuid"], "live-migration")
self.assertEqual(0, len(results))
def test_migration_get_in_progress_by_instance_not_live_migration(self):
values = self._create(migration_type="resize")
results = db.migration_get_in_progress_by_instance(
self.ctxt, values["instance_uuid"], "live-migration")
self.assertEqual(0, len(results))
results = db.migration_get_in_progress_by_instance(
self.ctxt, values["instance_uuid"])
self.assertEqual(0, len(results))
def test_migration_update_not_found(self):
self.assertRaises(exception.MigrationNotFound,
db.migration_update, self.ctxt, 42, {})
def test_get_migration_for_instance(self):
migrations = db.migration_get_all_by_filters(self.ctxt, [])
migration_id = migrations[0].id
instance_uuid = migrations[0].instance_uuid
instance_migration = db.migration_get_by_id_and_instance(
self.ctxt, migration_id, instance_uuid)
self.assertEqual(migration_id, instance_migration.id)
self.assertEqual(instance_uuid, instance_migration.instance_uuid)
def test_get_migration_for_instance_not_found(self):
self.assertRaises(exception.MigrationNotFoundForInstance,
db.migration_get_by_id_and_instance, self.ctxt,
'500', '501')
def _create_3_migration_after_time(self, time=None):
time = time or timeutils.utcnow()
tmp_time = time + datetime.timedelta(days=1)
after_1hour = datetime.timedelta(hours=1)
self._create(uuid=uuidsentinel.uuid_time1, created_at=tmp_time,
updated_at=tmp_time + after_1hour)
tmp_time = time + datetime.timedelta(days=2)
self._create(uuid=uuidsentinel.uuid_time2, created_at=tmp_time,
updated_at=tmp_time + after_1hour)
tmp_time = time + datetime.timedelta(days=3)
self._create(uuid=uuidsentinel.uuid_time3, created_at=tmp_time,
updated_at=tmp_time + after_1hour)
def test_get_migrations_by_filters_with_limit(self):
migrations = db.migration_get_all_by_filters(self.ctxt, {}, limit=3)
self.assertEqual(3, len(migrations))
def test_get_migrations_by_filters_with_limit_marker(self):
self._create_3_migration_after_time()
# order by created_at, desc: time3, time2, time1
migrations = db.migration_get_all_by_filters(
self.ctxt, {}, limit=2, marker=uuidsentinel.uuid_time3)
# time3 as marker: time2, time1
self.assertEqual(2, len(migrations))
self.assertEqual(migrations[0]['uuid'], uuidsentinel.uuid_time2)
self.assertEqual(migrations[1]['uuid'], uuidsentinel.uuid_time1)
# time3 as marker, limit 2: time3, time2
migrations = db.migration_get_all_by_filters(
self.ctxt, {}, limit=1, marker=uuidsentinel.uuid_time3)
self.assertEqual(1, len(migrations))
self.assertEqual(migrations[0]['uuid'], uuidsentinel.uuid_time2)
def test_get_migrations_by_filters_with_limit_marker_sort(self):
self._create_3_migration_after_time()
# order by created_at, desc: time3, time2, time1
migrations = db.migration_get_all_by_filters(
self.ctxt, {}, limit=2, marker=uuidsentinel.uuid_time3)
# time2, time1
self.assertEqual(2, len(migrations))
self.assertEqual(migrations[0]['uuid'], uuidsentinel.uuid_time2)
self.assertEqual(migrations[1]['uuid'], uuidsentinel.uuid_time1)
# order by updated_at, desc: time1, time2, time3
migrations = db.migration_get_all_by_filters(
self.ctxt, {}, sort_keys=['updated_at'], sort_dirs=['asc'],
limit=2, marker=uuidsentinel.uuid_time1)
# time2, time3
self.assertEqual(2, len(migrations))
self.assertEqual(migrations[0]['uuid'], uuidsentinel.uuid_time2)
self.assertEqual(migrations[1]['uuid'], uuidsentinel.uuid_time3)
def test_get_migrations_by_filters_with_not_found_marker(self):
self.assertRaises(exception.MarkerNotFound,
db.migration_get_all_by_filters, self.ctxt, {},
marker=uuidsentinel.not_found_marker)
def test_get_migrations_by_filters_with_changes_since(self):
changes_time = timeutils.utcnow(with_timezone=True)
self._create_3_migration_after_time(changes_time)
after_1day_2hours = datetime.timedelta(days=1, hours=2)
filters = {"changes-since": changes_time + after_1day_2hours}
migrations = db.migration_get_all_by_filters(
self.ctxt, filters,
sort_keys=['updated_at'], sort_dirs=['asc'])
self.assertEqual(2, len(migrations))
self.assertEqual(migrations[0]['uuid'], uuidsentinel.uuid_time2)
self.assertEqual(migrations[1]['uuid'], uuidsentinel.uuid_time3)
def test_get_migrations_by_filters_with_changes_before(self):
changes_time = timeutils.utcnow(with_timezone=True)
self._create_3_migration_after_time(changes_time)
after_3day_2hours = datetime.timedelta(days=3, hours=2)
filters = {"changes-before": changes_time + after_3day_2hours}
migrations = db.migration_get_all_by_filters(
self.ctxt, filters,
sort_keys=['updated_at'], sort_dirs=['asc'])
self.assertEqual(3, len(migrations))
self.assertEqual(migrations[0]['uuid'], uuidsentinel.uuid_time1)
self.assertEqual(migrations[1]['uuid'], uuidsentinel.uuid_time2)
self.assertEqual(migrations[2]['uuid'], uuidsentinel.uuid_time3)
class ModelsObjectComparatorMixin(object):
def _dict_from_object(self, obj, ignored_keys):
if ignored_keys is None:
ignored_keys = []
return {k: v for k, v in obj.items()
if k not in ignored_keys}
def _assertEqualObjects(self, obj1, obj2, ignored_keys=None):
obj1 = self._dict_from_object(obj1, ignored_keys)
obj2 = self._dict_from_object(obj2, ignored_keys)
self.assertEqual(len(obj1),
len(obj2),
"Keys mismatch: %s" %
str(set(obj1.keys()) ^ set(obj2.keys())))
for key, value in obj1.items():
self.assertEqual(value, obj2[key])
def _assertEqualListsOfObjects(self, objs1, objs2, ignored_keys=None):
obj_to_dict = lambda o: self._dict_from_object(o, ignored_keys)
sort_key = lambda d: [d[k] for k in sorted(d)]
conv_and_sort = lambda obj: sorted(map(obj_to_dict, obj), key=sort_key)
self.assertEqual(conv_and_sort(objs1), conv_and_sort(objs2))
def _assertEqualOrderedListOfObjects(self, objs1, objs2,
ignored_keys=None):
obj_to_dict = lambda o: self._dict_from_object(o, ignored_keys)
conv = lambda objs: [obj_to_dict(obj) for obj in objs]
self.assertEqual(conv(objs1), conv(objs2))
def _assertEqualListsOfPrimitivesAsSets(self, primitives1, primitives2):
self.assertEqual(len(primitives1), len(primitives2))
for primitive in primitives1:
self.assertIn(primitive, primitives2)
for primitive in primitives2:
self.assertIn(primitive, primitives1)
class InstanceSystemMetadataTestCase(test.TestCase):
"""Tests for db.api.instance_system_metadata_* methods."""
def setUp(self):
super(InstanceSystemMetadataTestCase, self).setUp()
values = {'host': 'h1', 'project_id': 'p1',
'system_metadata': {'key': 'value'}}
self.ctxt = context.get_admin_context()
self.instance = db.instance_create(self.ctxt, values)
def test_instance_system_metadata_get(self):
metadata = db.instance_system_metadata_get(self.ctxt,
self.instance['uuid'])
self.assertEqual(metadata, {'key': 'value'})
def test_instance_system_metadata_update_new_pair(self):
db.instance_system_metadata_update(
self.ctxt, self.instance['uuid'],
{'new_key': 'new_value'}, False)
metadata = db.instance_system_metadata_get(self.ctxt,
self.instance['uuid'])
self.assertEqual(metadata, {'key': 'value', 'new_key': 'new_value'})
def test_instance_system_metadata_update_existent_pair(self):
db.instance_system_metadata_update(
self.ctxt, self.instance['uuid'],
{'key': 'new_value'}, True)
metadata = db.instance_system_metadata_get(self.ctxt,
self.instance['uuid'])
self.assertEqual(metadata, {'key': 'new_value'})
def test_instance_system_metadata_update_delete_true(self):
db.instance_system_metadata_update(
self.ctxt, self.instance['uuid'],
{'new_key': 'new_value'}, True)
metadata = db.instance_system_metadata_get(self.ctxt,
self.instance['uuid'])
self.assertEqual(metadata, {'new_key': 'new_value'})
@test.testtools.skip("bug 1189462")
def test_instance_system_metadata_update_nonexistent(self):
self.assertRaises(exception.InstanceNotFound,
db.instance_system_metadata_update,
self.ctxt, 'nonexistent-uuid',
{'key': 'value'}, True)
class SecurityGroupRuleTestCase(test.TestCase, ModelsObjectComparatorMixin):
def setUp(self):
super(SecurityGroupRuleTestCase, self).setUp()
self.ctxt = context.get_admin_context()
def _get_base_values(self):
return {
'name': 'fake_sec_group',
'description': 'fake_sec_group_descr',
'user_id': 'fake',
'project_id': 'fake',
'instances': []
}
def _get_base_rule_values(self):
return {
'protocol': "tcp",
'from_port': 80,
'to_port': 8080,
'cidr': None,
'deleted': 0,
'deleted_at': None,
'grantee_group': None,
'updated_at': None
}
def _create_security_group(self, values):
v = self._get_base_values()
v.update(values)
return db.security_group_create(self.ctxt, v)
def _create_security_group_rule(self, values):
v = self._get_base_rule_values()
v.update(values)
return db.security_group_rule_create(self.ctxt, v)
def test_security_group_rule_create(self):
security_group_rule = self._create_security_group_rule({})
self.assertIsNotNone(security_group_rule['id'])
for key, value in self._get_base_rule_values().items():
self.assertEqual(value, security_group_rule[key])
def _test_security_group_rule_get_by_security_group(self, columns=None):
instance = db.instance_create(self.ctxt,
{'system_metadata': {'foo': 'bar'}})
security_group = self._create_security_group({
'instances': [instance]})
security_group_rule = self._create_security_group_rule(
{'parent_group': security_group, 'grantee_group': security_group})
security_group_rule1 = self._create_security_group_rule(
{'parent_group': security_group, 'grantee_group': security_group})
found_rules = db.security_group_rule_get_by_security_group(
self.ctxt, security_group['id'], columns_to_join=columns)
self.assertEqual(len(found_rules), 2)
rules_ids = [security_group_rule['id'], security_group_rule1['id']]
for rule in found_rules:
if columns is None:
self.assertIn('grantee_group', dict(rule))
self.assertIn('instances',
dict(rule.grantee_group))
self.assertIn(
'system_metadata',
dict(rule.grantee_group.instances[0]))
self.assertIn(rule['id'], rules_ids)
else:
self.assertNotIn('grantee_group', dict(rule))
def test_security_group_rule_get_by_security_group(self):
self._test_security_group_rule_get_by_security_group()
def test_security_group_rule_get_by_security_group_no_joins(self):
self._test_security_group_rule_get_by_security_group(columns=[])
def test_security_group_rule_get_by_instance(self):
instance = db.instance_create(self.ctxt, {})
security_group = self._create_security_group({
'instances': [instance]})
security_group_rule = self._create_security_group_rule(
{'parent_group': security_group, 'grantee_group': security_group})
security_group_rule1 = self._create_security_group_rule(
{'parent_group': security_group, 'grantee_group': security_group})
security_group_rule_ids = [security_group_rule['id'],
security_group_rule1['id']]
found_rules = db.security_group_rule_get_by_instance(self.ctxt,
instance['uuid'])
self.assertEqual(len(found_rules), 2)
for rule in found_rules:
self.assertIn('grantee_group', rule)
self.assertIn(rule['id'], security_group_rule_ids)
def test_security_group_rule_destroy(self):
self._create_security_group({'name': 'fake1'})
self._create_security_group({'name': 'fake2'})
security_group_rule1 = self._create_security_group_rule({})
security_group_rule2 = self._create_security_group_rule({})
db.security_group_rule_destroy(self.ctxt, security_group_rule1['id'])
self.assertRaises(exception.SecurityGroupNotFound,
db.security_group_rule_get,
self.ctxt, security_group_rule1['id'])
self._assertEqualObjects(db.security_group_rule_get(self.ctxt,
security_group_rule2['id']),
security_group_rule2, ['grantee_group'])
def test_security_group_rule_destroy_not_found_exception(self):
self.assertRaises(exception.SecurityGroupNotFound,
db.security_group_rule_destroy, self.ctxt, 100500)
def test_security_group_rule_get(self):
security_group_rule1 = (
self._create_security_group_rule({}))
self._create_security_group_rule({})
real_security_group_rule = db.security_group_rule_get(self.ctxt,
security_group_rule1['id'])
self._assertEqualObjects(security_group_rule1,
real_security_group_rule, ['grantee_group'])
def test_security_group_rule_get_not_found_exception(self):
self.assertRaises(exception.SecurityGroupNotFound,
db.security_group_rule_get, self.ctxt, 100500)
def test_security_group_rule_count_by_group(self):
sg1 = self._create_security_group({'name': 'fake1'})
sg2 = self._create_security_group({'name': 'fake2'})
rules_by_group = {sg1: [], sg2: []}
for group in rules_by_group:
rules = rules_by_group[group]
for i in range(0, 10):
rules.append(
self._create_security_group_rule({'parent_group_id':
group['id']}))
db.security_group_rule_destroy(self.ctxt,
rules_by_group[sg1][0]['id'])
counted_groups = [db.security_group_rule_count_by_group(self.ctxt,
group['id'])
for group in [sg1, sg2]]
expected = [9, 10]
self.assertEqual(counted_groups, expected)
class SecurityGroupTestCase(test.TestCase, ModelsObjectComparatorMixin):
def setUp(self):
super(SecurityGroupTestCase, self).setUp()
self.ctxt = context.get_admin_context()
def _get_base_values(self):
return {
'name': 'fake_sec_group',
'description': 'fake_sec_group_descr',
'user_id': 'fake',
'project_id': 'fake',
'instances': []
}
def _create_security_group(self, values):
v = self._get_base_values()
v.update(values)
return db.security_group_create(self.ctxt, v)
def test_security_group_create(self):
security_group = self._create_security_group({})
self.assertIsNotNone(security_group['id'])
for key, value in self._get_base_values().items():
self.assertEqual(value, security_group[key])
def test_security_group_destroy(self):
security_group1 = self._create_security_group({})
security_group2 = \
self._create_security_group({'name': 'fake_sec_group2'})
db.security_group_destroy(self.ctxt, security_group1['id'])
self.assertRaises(exception.SecurityGroupNotFound,
db.security_group_get,
self.ctxt, security_group1['id'])
self._assertEqualObjects(db.security_group_get(
self.ctxt, security_group2['id'],
columns_to_join=['instances',
'rules']), security_group2)
def test_security_group_destroy_with_instance(self):
security_group1 = self._create_security_group({})
instance = db.instance_create(self.ctxt, {})
db.instance_add_security_group(self.ctxt, instance.uuid,
security_group1.id)
self.assertEqual(
1,
len(db.security_group_get_by_instance(self.ctxt, instance.uuid)))
db.security_group_destroy(self.ctxt, security_group1['id'])
self.assertEqual(
0,
len(db.security_group_get_by_instance(self.ctxt, instance.uuid)))
def test_security_group_get(self):
security_group1 = self._create_security_group({})
self._create_security_group({'name': 'fake_sec_group2'})
real_security_group = db.security_group_get(self.ctxt,
security_group1['id'],
columns_to_join=['instances',
'rules'])
self._assertEqualObjects(security_group1,
real_security_group)
def test_security_group_get_with_instance_columns(self):
instance = db.instance_create(self.ctxt,
{'system_metadata': {'foo': 'bar'}})
secgroup = self._create_security_group({'instances': [instance]})
secgroup = db.security_group_get(
self.ctxt, secgroup['id'],
columns_to_join=['instances.system_metadata'])
inst = secgroup.instances[0]
self.assertIn('system_metadata', dict(inst).keys())
def test_security_group_get_no_instances(self):
instance = db.instance_create(self.ctxt, {})
sid = self._create_security_group({'instances': [instance]})['id']
security_group = db.security_group_get(self.ctxt, sid,
columns_to_join=['instances'])
self.assertIn('instances', security_group.__dict__)
security_group = db.security_group_get(self.ctxt, sid)
self.assertNotIn('instances', security_group.__dict__)
def test_security_group_get_not_found_exception(self):
self.assertRaises(exception.SecurityGroupNotFound,
db.security_group_get, self.ctxt, 100500)
def test_security_group_get_by_name(self):
security_group1 = self._create_security_group({'name': 'fake1'})
security_group2 = self._create_security_group({'name': 'fake2'})
real_security_group1 = db.security_group_get_by_name(
self.ctxt,
security_group1['project_id'],
security_group1['name'],
columns_to_join=None)
real_security_group2 = db.security_group_get_by_name(
self.ctxt,
security_group2['project_id'],
security_group2['name'],
columns_to_join=None)
self._assertEqualObjects(security_group1, real_security_group1)
self._assertEqualObjects(security_group2, real_security_group2)
def test_security_group_get_by_project(self):
security_group1 = self._create_security_group(
{'name': 'fake1', 'project_id': 'fake_proj1'})
security_group2 = self._create_security_group(
{'name': 'fake2', 'project_id': 'fake_proj2'})
real1 = db.security_group_get_by_project(
self.ctxt,
security_group1['project_id'])
real2 = db.security_group_get_by_project(
self.ctxt,
security_group2['project_id'])
expected1, expected2 = [security_group1], [security_group2]
self._assertEqualListsOfObjects(expected1, real1,
ignored_keys=['instances'])
self._assertEqualListsOfObjects(expected2, real2,
ignored_keys=['instances'])
def test_security_group_get_by_instance(self):
instance = db.instance_create(self.ctxt, dict(host='foo'))
values = [
{'name': 'fake1', 'instances': [instance]},
{'name': 'fake2', 'instances': [instance]},
{'name': 'fake3', 'instances': []},
]
security_groups = [self._create_security_group(vals)
for vals in values]
real = db.security_group_get_by_instance(self.ctxt,
instance['uuid'])
expected = security_groups[:2]
self._assertEqualListsOfObjects(expected, real,
ignored_keys=['instances'])
def test_security_group_get_all(self):
values = [
{'name': 'fake1', 'project_id': 'fake_proj1'},
{'name': 'fake2', 'project_id': 'fake_proj2'},
]
security_groups = [self._create_security_group(vals)
for vals in values]
real = db.security_group_get_all(self.ctxt)
self._assertEqualListsOfObjects(security_groups, real,
ignored_keys=['instances'])
def test_security_group_in_use(self):
instance = db.instance_create(self.ctxt, dict(host='foo'))
values = [
{'instances': [instance],
'name': 'fake_in_use'},
{'instances': []},
]
security_groups = [self._create_security_group(vals)
for vals in values]
real = []
for security_group in security_groups:
in_use = db.security_group_in_use(self.ctxt,
security_group['id'])
real.append(in_use)
expected = [True, False]
self.assertEqual(expected, real)
def test_security_group_ensure_default(self):
self.ctxt.project_id = 'fake'
self.ctxt.user_id = 'fake'
self.assertEqual(0, len(db.security_group_get_by_project(
self.ctxt,
self.ctxt.project_id)))
db.security_group_ensure_default(self.ctxt)
security_groups = db.security_group_get_by_project(
self.ctxt,
self.ctxt.project_id)
self.assertEqual(1, len(security_groups))
self.assertEqual("default", security_groups[0]["name"])
@mock.patch.object(sqlalchemy_api, '_security_group_get_by_names')
def test_security_group_ensure_default_called_concurrently(self, sg_mock):
# make sure NotFound is always raised here to trick Nova to insert the
# duplicate security group entry
sg_mock.side_effect = exception.NotFound
# create the first db entry
self.ctxt.project_id = 1
db.security_group_ensure_default(self.ctxt)
security_groups = db.security_group_get_by_project(
self.ctxt,
self.ctxt.project_id)
self.assertEqual(1, len(security_groups))
# create the second one and ensure the exception is handled properly
default_group = db.security_group_ensure_default(self.ctxt)
self.assertEqual('default', default_group.name)
def test_security_group_update(self):
security_group = self._create_security_group({})
new_values = {
'name': 'sec_group1',
'description': 'sec_group_descr1',
'user_id': 'fake_user1',
'project_id': 'fake_proj1',
}
updated_group = db.security_group_update(self.ctxt,
security_group['id'],
new_values,
columns_to_join=['rules.grantee_group'])
for key, value in new_values.items():
self.assertEqual(updated_group[key], value)
self.assertEqual(updated_group['rules'], [])
def test_security_group_update_to_duplicate(self):
self._create_security_group(
{'name': 'fake1', 'project_id': 'fake_proj1'})
security_group2 = self._create_security_group(
{'name': 'fake1', 'project_id': 'fake_proj2'})
self.assertRaises(exception.SecurityGroupExists,
db.security_group_update,
self.ctxt, security_group2['id'],
{'project_id': 'fake_proj1'})
@mock.patch('time.sleep', new=lambda x: None)
class InstanceTestCase(test.TestCase, ModelsObjectComparatorMixin):
"""Tests for db.api.instance_* methods."""
sample_data = {
'project_id': 'project1',
'hostname': 'example.com',
'host': 'h1',
'node': 'n1',
'metadata': {'mkey1': 'mval1', 'mkey2': 'mval2'},
'system_metadata': {'smkey1': 'smval1', 'smkey2': 'smval2'},
'info_cache': {'ckey': 'cvalue'},
}
def setUp(self):
super(InstanceTestCase, self).setUp()
self.ctxt = context.get_admin_context()
def _assertEqualInstances(self, instance1, instance2):
self._assertEqualObjects(instance1, instance2,
ignored_keys=['metadata', 'system_metadata', 'info_cache',
'extra'])
def _assertEqualListsOfInstances(self, list1, list2):
self._assertEqualListsOfObjects(list1, list2,
ignored_keys=['metadata', 'system_metadata', 'info_cache',
'extra'])
def create_instance_with_args(self, **kwargs):
if 'context' in kwargs:
context = kwargs.pop('context')
else:
context = self.ctxt
args = self.sample_data.copy()
args.update(kwargs)
return db.instance_create(context, args)
def test_instance_create(self):
instance = self.create_instance_with_args()
self.assertTrue(uuidutils.is_uuid_like(instance['uuid']))
@mock.patch.object(sqlalchemy_api, 'security_group_ensure_default')
def test_instance_create_with_deadlock_retry(self, mock_sg):
mock_sg.side_effect = [db_exc.DBDeadlock(), None]
instance = self.create_instance_with_args()
self.assertTrue(uuidutils.is_uuid_like(instance['uuid']))
def test_instance_create_with_object_values(self):
values = {
'access_ip_v4': netaddr.IPAddress('1.2.3.4'),
'access_ip_v6': netaddr.IPAddress('::1'),
}
dt_keys = ('created_at', 'deleted_at', 'updated_at',
'launched_at', 'terminated_at')
dt = timeutils.utcnow()
dt_utc = dt.replace(tzinfo=iso8601.UTC)
for key in dt_keys:
values[key] = dt_utc
inst = db.instance_create(self.ctxt, values)
self.assertEqual(inst['access_ip_v4'], '1.2.3.4')
self.assertEqual(inst['access_ip_v6'], '::1')
for key in dt_keys:
self.assertEqual(inst[key], dt)
def test_instance_update_with_object_values(self):
values = {
'access_ip_v4': netaddr.IPAddress('1.2.3.4'),
'access_ip_v6': netaddr.IPAddress('::1'),
}
dt_keys = ('created_at', 'deleted_at', 'updated_at',
'launched_at', 'terminated_at')
dt = timeutils.utcnow()
dt_utc = dt.replace(tzinfo=iso8601.UTC)
for key in dt_keys:
values[key] = dt_utc
inst = db.instance_create(self.ctxt, {})
inst = db.instance_update(self.ctxt, inst['uuid'], values)
self.assertEqual(inst['access_ip_v4'], '1.2.3.4')
self.assertEqual(inst['access_ip_v6'], '::1')
for key in dt_keys:
self.assertEqual(inst[key], dt)
def test_instance_update_no_metadata_clobber(self):
meta = {'foo': 'bar'}
sys_meta = {'sfoo': 'sbar'}
values = {
'metadata': meta,
'system_metadata': sys_meta,
}
inst = db.instance_create(self.ctxt, {})
inst = db.instance_update(self.ctxt, inst['uuid'], values)
self.assertEqual(meta, utils.metadata_to_dict(inst['metadata']))
self.assertEqual(sys_meta,
utils.metadata_to_dict(inst['system_metadata']))
def test_instance_get_all_with_meta(self):
self.create_instance_with_args()
for inst in db.instance_get_all(self.ctxt):
meta = utils.metadata_to_dict(inst['metadata'])
self.assertEqual(meta, self.sample_data['metadata'])
sys_meta = utils.metadata_to_dict(inst['system_metadata'])
self.assertEqual(sys_meta, self.sample_data['system_metadata'])
def test_instance_update(self):
instance = self.create_instance_with_args()
metadata = {'host': 'bar', 'key2': 'wuff'}
system_metadata = {'original_image_ref': 'baz'}
# Update the metadata
db.instance_update(self.ctxt, instance['uuid'], {'metadata': metadata,
'system_metadata': system_metadata})
# Retrieve the user-provided metadata to ensure it was successfully
# updated
self.assertEqual(metadata,
db.instance_metadata_get(self.ctxt, instance['uuid']))
self.assertEqual(system_metadata,
db.instance_system_metadata_get(self.ctxt, instance['uuid']))
def test_instance_update_bad_str_dates(self):
instance = self.create_instance_with_args()
values = {'created_at': '123'}
self.assertRaises(ValueError,
db.instance_update,
self.ctxt, instance['uuid'], values)
def test_instance_update_good_str_dates(self):
instance = self.create_instance_with_args()
values = {'created_at': '2011-01-31T00:00:00.0'}
actual = db.instance_update(self.ctxt, instance['uuid'], values)
expected = datetime.datetime(2011, 1, 31)
self.assertEqual(expected, actual["created_at"])
def test_create_instance_unique_hostname(self):
context1 = context.RequestContext('user1', 'p1')
context2 = context.RequestContext('user2', 'p2')
self.create_instance_with_args(hostname='h1', project_id='p1')
# With scope 'global' any duplicate should fail, be it this project:
self.flags(osapi_compute_unique_server_name_scope='global')
self.assertRaises(exception.InstanceExists,
self.create_instance_with_args,
context=context1,
hostname='h1', project_id='p3')
# or another:
self.assertRaises(exception.InstanceExists,
self.create_instance_with_args,
context=context2,
hostname='h1', project_id='p2')
# With scope 'project' a duplicate in the project should fail:
self.flags(osapi_compute_unique_server_name_scope='project')
self.assertRaises(exception.InstanceExists,
self.create_instance_with_args,
context=context1,
hostname='h1', project_id='p1')
# With scope 'project' a duplicate in a different project should work:
self.flags(osapi_compute_unique_server_name_scope='project')
self.create_instance_with_args(context=context2, hostname='h2')
self.flags(osapi_compute_unique_server_name_scope=None)
def test_instance_get_all_by_filters_empty_list_filter(self):
filters = {'uuid': []}
instances = db.instance_get_all_by_filters_sort(self.ctxt, filters)
self.assertEqual([], instances)
@mock.patch('nova.db.sqlalchemy.api.undefer')
@mock.patch('nova.db.sqlalchemy.api.joinedload')
def test_instance_get_all_by_filters_extra_columns(self,
mock_joinedload,
mock_undefer):
db.instance_get_all_by_filters_sort(
self.ctxt, {},
columns_to_join=['info_cache', 'extra.pci_requests'])
mock_joinedload.assert_called_once_with('info_cache')
mock_undefer.assert_called_once_with('extra.pci_requests')
@mock.patch('nova.db.sqlalchemy.api.undefer')
@mock.patch('nova.db.sqlalchemy.api.joinedload')
def test_instance_get_active_by_window_extra_columns(self,
mock_joinedload,
mock_undefer):
now = datetime.datetime(2013, 10, 10, 17, 16, 37, 156701)
db.instance_get_active_by_window_joined(
self.ctxt, now,
columns_to_join=['info_cache', 'extra.pci_requests'])
mock_joinedload.assert_called_once_with('info_cache')
mock_undefer.assert_called_once_with('extra.pci_requests')
def test_instance_get_all_by_filters_with_meta(self):
self.create_instance_with_args()
for inst in db.instance_get_all_by_filters(self.ctxt, {}):
meta = utils.metadata_to_dict(inst['metadata'])
self.assertEqual(meta, self.sample_data['metadata'])
sys_meta = utils.metadata_to_dict(inst['system_metadata'])
self.assertEqual(sys_meta, self.sample_data['system_metadata'])
def test_instance_get_all_by_filters_without_meta(self):
self.create_instance_with_args()
result = db.instance_get_all_by_filters(self.ctxt, {},
columns_to_join=[])
for inst in result:
meta = utils.metadata_to_dict(inst['metadata'])
self.assertEqual(meta, {})
sys_meta = utils.metadata_to_dict(inst['system_metadata'])
self.assertEqual(sys_meta, {})
def test_instance_get_all_by_filters_with_fault(self):
inst = self.create_instance_with_args()
result = db.instance_get_all_by_filters(self.ctxt, {},
columns_to_join=['fault'])
self.assertIsNone(result[0]['fault'])
db.instance_fault_create(self.ctxt,
{'instance_uuid': inst['uuid'],
'code': 123})
fault2 = db.instance_fault_create(self.ctxt,
{'instance_uuid': inst['uuid'],
'code': 123})
result = db.instance_get_all_by_filters(self.ctxt, {},
columns_to_join=['fault'])
# Make sure we get the latest fault
self.assertEqual(fault2['id'], result[0]['fault']['id'])
def test_instance_get_all_by_filters(self):
instances = [self.create_instance_with_args() for i in range(3)]
filtered_instances = db.instance_get_all_by_filters(self.ctxt, {})
self._assertEqualListsOfInstances(instances, filtered_instances)
def test_instance_get_all_by_filters_zero_limit(self):
self.create_instance_with_args()
instances = db.instance_get_all_by_filters(self.ctxt, {}, limit=0)
self.assertEqual([], instances)
def test_instance_metadata_get_multi(self):
uuids = [self.create_instance_with_args()['uuid'] for i in range(3)]
@sqlalchemy_api.pick_context_manager_reader
def test(context):
return sqlalchemy_api._instance_metadata_get_multi(
context, uuids)
meta = test(self.ctxt)
for row in meta:
self.assertIn(row['instance_uuid'], uuids)
@mock.patch.object(query.Query, 'filter')
def test_instance_metadata_get_multi_no_uuids(self, mock_query_filter):
with sqlalchemy_api.main_context_manager.reader.using(self.ctxt):
sqlalchemy_api._instance_metadata_get_multi(self.ctxt, [])
self.assertFalse(mock_query_filter.called)
def test_instance_system_system_metadata_get_multi(self):
uuids = [self.create_instance_with_args()['uuid'] for i in range(3)]
@sqlalchemy_api.pick_context_manager_reader
def test(context):
return sqlalchemy_api._instance_system_metadata_get_multi(
context, uuids)
sys_meta = test(self.ctxt)
for row in sys_meta:
self.assertIn(row['instance_uuid'], uuids)
@mock.patch.object(query.Query, 'filter')
def test_instance_system_metadata_get_multi_no_uuids(self,
mock_query_filter):
sqlalchemy_api._instance_system_metadata_get_multi(self.ctxt, [])
self.assertFalse(mock_query_filter.called)
def test_instance_get_all_by_filters_regex(self):
i1 = self.create_instance_with_args(display_name='test1')
i2 = self.create_instance_with_args(display_name='teeeest2')
self.create_instance_with_args(display_name='diff')
result = db.instance_get_all_by_filters(self.ctxt,
{'display_name': 't.*st.'})
self._assertEqualListsOfInstances(result, [i1, i2])
def test_instance_get_all_by_filters_changes_since(self):
i1 = self.create_instance_with_args(updated_at=
'2013-12-05T15:03:25.000000')
i2 = self.create_instance_with_args(updated_at=
'2013-12-05T15:03:26.000000')
changes_since = iso8601.parse_date('2013-12-05T15:03:25.000000')
result = db.instance_get_all_by_filters(self.ctxt,
{'changes-since':
changes_since})
self._assertEqualListsOfInstances([i1, i2], result)
changes_since = iso8601.parse_date('2013-12-05T15:03:26.000000')
result = db.instance_get_all_by_filters(self.ctxt,
{'changes-since':
changes_since})
self._assertEqualListsOfInstances([i2], result)
db.instance_destroy(self.ctxt, i1['uuid'])
filters = {}
filters['changes-since'] = changes_since
filters['marker'] = i1['uuid']
result = db.instance_get_all_by_filters(self.ctxt,
filters)
self._assertEqualListsOfInstances([i2], result)
def test_instance_get_all_by_filters_changes_before(self):
i1 = self.create_instance_with_args(updated_at=
'2013-12-05T15:03:25.000000')
i2 = self.create_instance_with_args(updated_at=
'2013-12-05T15:03:26.000000')
changes_before = iso8601.parse_date('2013-12-05T15:03:26.000000')
result = db.instance_get_all_by_filters(self.ctxt,
{'changes-before':
changes_before})
self._assertEqualListsOfInstances([i1, i2], result)
changes_before = iso8601.parse_date('2013-12-05T15:03:25.000000')
result = db.instance_get_all_by_filters(self.ctxt,
{'changes-before':
changes_before})
self._assertEqualListsOfInstances([i1], result)
db.instance_destroy(self.ctxt, i2['uuid'])
filters = {}
filters['changes-before'] = changes_before
filters['marker'] = i2['uuid']
result = db.instance_get_all_by_filters(self.ctxt,
filters)
self._assertEqualListsOfInstances([i1], result)
def test_instance_get_all_by_filters_changes_time_period(self):
i1 = self.create_instance_with_args(updated_at=
'2013-12-05T15:03:25.000000')
i2 = self.create_instance_with_args(updated_at=
'2013-12-05T15:03:26.000000')
i3 = self.create_instance_with_args(updated_at=
'2013-12-05T15:03:27.000000')
changes_since = iso8601.parse_date('2013-12-05T15:03:25.000000')
changes_before = iso8601.parse_date('2013-12-05T15:03:27.000000')
result = db.instance_get_all_by_filters(self.ctxt,
{'changes-since':
changes_since,
'changes-before':
changes_before})
self._assertEqualListsOfInstances([i1, i2, i3], result)