Make Image Volume Cache cluster aware

Image Volume Cache mechanism was not cluster aware and therefore cached
images would not be shared among different services in the same cluster.

This patch addresses this issue and makes sure that they share by
cluster if there is one.

This patch does not address any concurrency issues that may currently
exist in the caching mechanism's code.

Implements: blueprint cinder-volume-active-active-support
Change-Id: I9be2b3c6dc571ce2e0e4ccf7557123a7858c1990
This commit is contained in:
Gorka Eguileor 2016-10-13 20:49:45 +02:00
parent b4a13281ea
commit 4d3e1e7c35
10 changed files with 237 additions and 107 deletions

View File

@ -1569,11 +1569,12 @@ def driver_initiator_data_get(context, initiator, namespace):
###################
def image_volume_cache_create(context, host, image_id, image_updated_at,
volume_id, size):
def image_volume_cache_create(context, host, cluster_name, image_id,
image_updated_at, volume_id, size):
"""Create a new image volume cache entry."""
return IMPL.image_volume_cache_create(context,
host,
cluster_name,
image_id,
image_updated_at,
volume_id,
@ -1585,11 +1586,11 @@ def image_volume_cache_delete(context, volume_id):
return IMPL.image_volume_cache_delete(context, volume_id)
def image_volume_cache_get_and_update_last_used(context, image_id, host):
def image_volume_cache_get_and_update_last_used(context, image_id, **filters):
"""Query for an image volume cache entry."""
return IMPL.image_volume_cache_get_and_update_last_used(context,
image_id,
host)
**filters)
def image_volume_cache_get_by_volume_id(context, volume_id):
@ -1597,9 +1598,28 @@ def image_volume_cache_get_by_volume_id(context, volume_id):
return IMPL.image_volume_cache_get_by_volume_id(context, volume_id)
def image_volume_cache_get_all_for_host(context, host):
def image_volume_cache_get_all(context, **filters):
"""Query for all image volume cache entry for a host."""
return IMPL.image_volume_cache_get_all_for_host(context, host)
return IMPL.image_volume_cache_get_all(context, **filters)
def image_volume_cache_include_in_cluster(context, cluster,
partial_rename=True, **filters):
"""Include in cluster image volume cache entries matching the filters.
When partial_rename is set we will not set the cluster_name with cluster
parameter value directly, we'll replace provided cluster_name or host
filter value with cluster instead.
This is useful when we want to replace just the cluster name but leave
the backend and pool information as it is. If we are using cluster_name
to filter, we'll use that same DB field to replace the cluster value and
leave the rest as it is. Likewise if we use the host to filter.
Returns the number of volumes that have been changed.
"""
return IMPL.image_volume_cache_include_in_cluster(
context, cluster, partial_rename, **filters)
###################

View File

@ -1590,7 +1590,10 @@ def _include_in_cluster(context, cluster, model, partial_rename, filters):
filters = _clean_filters(filters)
if filters and not is_valid_model_filters(model, filters):
return None
query = model_query(context, model)
query = get_session().query(model)
if hasattr(model, 'deleted'):
query = query.filter_by(deleted=False)
# cluster_name and host are special filter cases
for field in {'cluster_name', 'host'}.intersection(filters):
@ -6392,12 +6395,13 @@ PAGINATION_HELPERS = {
@require_context
def image_volume_cache_create(context, host, image_id, image_updated_at,
volume_id, size):
def image_volume_cache_create(context, host, cluster_name, image_id,
image_updated_at, volume_id, size):
session = get_session()
with session.begin():
cache_entry = models.ImageVolumeCacheEntry()
cache_entry.host = host
cache_entry.cluster_name = cluster_name
cache_entry.image_id = image_id
cache_entry.image_updated_at = image_updated_at
cache_entry.volume_id = volume_id
@ -6416,12 +6420,13 @@ def image_volume_cache_delete(context, volume_id):
@require_context
def image_volume_cache_get_and_update_last_used(context, image_id, host):
def image_volume_cache_get_and_update_last_used(context, image_id, **filters):
filters = _clean_filters(filters)
session = get_session()
with session.begin():
entry = session.query(models.ImageVolumeCacheEntry).\
filter_by(image_id=image_id).\
filter_by(host=host).\
filter_by(**filters).\
order_by(desc(models.ImageVolumeCacheEntry.last_used)).\
first()
@ -6441,15 +6446,25 @@ def image_volume_cache_get_by_volume_id(context, volume_id):
@require_context
def image_volume_cache_get_all_for_host(context, host):
def image_volume_cache_get_all(context, **filters):
filters = _clean_filters(filters)
session = get_session()
with session.begin():
return session.query(models.ImageVolumeCacheEntry).\
filter_by(host=host).\
filter_by(**filters).\
order_by(desc(models.ImageVolumeCacheEntry.last_used)).\
all()
@require_admin_context
def image_volume_cache_include_in_cluster(context, cluster,
partial_rename=True, **filters):
"""Include all volumes matching the filters into a cluster."""
filters = _clean_filters(filters)
return _include_in_cluster(context, cluster, models.ImageVolumeCacheEntry,
partial_rename, filters)
###################

View File

@ -0,0 +1,27 @@
# Copyright (c) 2016 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import Column
from sqlalchemy import MetaData, String, Table
def upgrade(migrate_engine):
"""Add cluster name to image cache entries."""
meta = MetaData()
meta.bind = migrate_engine
image_cache = Table('image_volume_cache_entries', meta, autoload=True)
cluster_name = Column('cluster_name', String(255), nullable=True)
image_cache.create_column(cluster_name)

View File

@ -828,6 +828,7 @@ class ImageVolumeCacheEntry(BASE, models.ModelBase):
__tablename__ = 'image_volume_cache_entries'
id = Column(Integer, primary_key=True, nullable=False)
host = Column(String(255), index=True, nullable=False)
cluster_name = Column(String(255), nullable=True)
image_id = Column(String(36), index=True, nullable=False)
image_updated_at = Column(DateTime, nullable=False)
volume_id = Column(String(36), nullable=False)

View File

@ -47,11 +47,17 @@ class ImageVolumeCache(object):
self._notify_cache_eviction(context, cache_entry['image_id'],
cache_entry['host'])
@staticmethod
def _get_query_filters(volume_ref):
if volume_ref.is_clustered:
return {'cluster_name': volume_ref.cluster_name}
return {'host': volume_ref.host}
def get_entry(self, context, volume_ref, image_id, image_meta):
cache_entry = self.db.image_volume_cache_get_and_update_last_used(
context,
image_id,
volume_ref['host']
**self._get_query_filters(volume_ref)
)
if cache_entry:
@ -80,8 +86,9 @@ class ImageVolumeCache(object):
created and is in an available state.
"""
LOG.debug('Creating new image-volume cache entry for image '
'%(image_id)s on host %(host)s.',
{'image_id': image_id, 'host': volume_ref['host']})
'%(image_id)s on %(service)s',
{'image_id': image_id,
'service': volume_ref.service_topic_queue})
# When we are creating an image from a volume the updated_at field
# will be a unicode representation of the datetime. In that case
@ -95,19 +102,20 @@ class ImageVolumeCache(object):
cache_entry = self.db.image_volume_cache_create(
context,
volume_ref['host'],
volume_ref.host,
volume_ref.cluster_name,
image_id,
image_updated_at.replace(tzinfo=None),
volume_ref['id'],
volume_ref['size']
volume_ref.id,
volume_ref.size
)
LOG.debug('New image-volume cache entry created: %(entry)s.',
{'entry': self._entry_to_str(cache_entry)})
return cache_entry
def ensure_space(self, context, space_required, host):
"""Makes room for a cache entry.
def ensure_space(self, context, volume):
"""Makes room for a volume cache entry.
Returns True if successful, false otherwise.
"""
@ -120,11 +128,12 @@ class ImageVolumeCache(object):
# and bail out before evicting everything else to try and make
# room for it.
if (self.max_cache_size_gb != 0 and
space_required > self.max_cache_size_gb):
volume.size > self.max_cache_size_gb):
return False
# Assume the entries are ordered by most recently used to least used.
entries = self.db.image_volume_cache_get_all_for_host(context, host)
entries = self.db.image_volume_cache_get_all(
**self._get_query_filters(volume))
current_count = len(entries)
@ -133,13 +142,13 @@ class ImageVolumeCache(object):
current_size += entry['size']
# Add values for the entry we intend to create.
current_size += space_required
current_size += volume.size
current_count += 1
LOG.debug('Image-volume cache for host %(host)s current_size (GB) = '
LOG.debug('Image-volume cache for %(service)s current_size (GB) = '
'%(size_gb)s (max = %(max_gb)s), current count = %(count)s '
'(max = %(max_count)s).',
{'host': host,
{'service': volume.service_topic_queue,
'size_gb': current_size,
'max_gb': self.max_cache_size_gb,
'count': current_count,
@ -154,9 +163,9 @@ class ImageVolumeCache(object):
self._delete_image_volume(context, entry)
current_size -= entry['size']
current_count -= 1
LOG.debug('Image-volume cache for host %(host)s new size (GB) = '
LOG.debug('Image-volume cache for %(service)s new size (GB) = '
'%(size_gb)s, new count = %(count)s.',
{'host': host,
{'service': volume.service_topic_queue,
'size_gb': current_size,
'count': current_count})
@ -166,8 +175,9 @@ class ImageVolumeCache(object):
# to 0.
if self.max_cache_size_gb > 0:
if current_size > self.max_cache_size_gb > 0:
LOG.warning(_LW('Image-volume cache for host %(host)s does '
'not have enough space (GB).'), {'host': host})
LOG.warning(_LW('Image-volume cache for %(service)s does '
'not have enough space (GB).'),
{'service': volume.service_topic_queue})
return False
return True

View File

@ -13,15 +13,20 @@
# under the License.
from datetime import timedelta
import ddt
import mock
from oslo_utils import timeutils
from cinder import context as ctxt
from cinder.db.sqlalchemy import models
from cinder.image import cache as image_cache
from cinder import objects
from cinder import test
from cinder.tests.unit import fake_constants as fake
@ddt.ddt
class ImageVolumeCacheTestCase(test.TestCase):
def setUp(self):
@ -29,6 +34,13 @@ class ImageVolumeCacheTestCase(test.TestCase):
self.mock_db = mock.Mock()
self.mock_volume_api = mock.Mock()
self.context = ctxt.get_admin_context()
self.volume = models.Volume()
vol_params = {'id': fake.VOLUME_ID,
'host': 'foo@bar#whatever',
'cluster_name': 'cluster',
'size': 0}
self.volume.update(vol_params)
self.volume_ovo = objects.Volume(self.context, **vol_params)
def _build_cache(self, max_gb=0, max_count=0):
cache = image_cache.ImageVolumeCache(self.mock_db,
@ -42,6 +54,7 @@ class ImageVolumeCacheTestCase(test.TestCase):
entry = {
'id': 1,
'host': 'test@foo#bar',
'cluster_name': 'cluster@foo#bar',
'image_id': 'c7a8b8d4-e519-46c7-a0df-ddf1b9b9fff2',
'image_updated_at': timeutils.utcnow(with_timezone=True),
'volume_id': '70a599e0-31e7-49b7-b260-868f441e862b',
@ -78,12 +91,10 @@ class ImageVolumeCacheTestCase(test.TestCase):
self.assertEqual(entry['image_id'], msg['payload']['image_id'])
self.assertEqual(1, len(self.notifier.notifications))
def test_get_entry(self):
@ddt.data(True, False)
def test_get_entry(self, clustered):
cache = self._build_cache()
entry = self._build_entry()
volume_ref = {
'host': 'foo@bar#whatever'
}
image_meta = {
'is_public': True,
'owner': '70a599e0-31e7-49b7-b260-868f441e862b',
@ -94,8 +105,13 @@ class ImageVolumeCacheTestCase(test.TestCase):
}
(self.mock_db.
image_volume_cache_get_and_update_last_used.return_value) = entry
if not clustered:
self.volume_ovo.cluster_name = None
expect = {'host': self.volume.host}
else:
expect = {'cluster_name': self.volume.cluster_name}
found_entry = cache.get_entry(self.context,
volume_ref,
self.volume_ovo,
entry['image_id'],
image_meta)
self.assertDictEqual(entry, found_entry)
@ -103,7 +119,7 @@ class ImageVolumeCacheTestCase(test.TestCase):
image_volume_cache_get_and_update_last_used.assert_called_once_with)(
self.context,
entry['image_id'],
volume_ref['host']
**expect
)
msg = self.notifier.notifications[0]
@ -115,9 +131,6 @@ class ImageVolumeCacheTestCase(test.TestCase):
def test_get_entry_not_exists(self):
cache = self._build_cache()
volume_ref = {
'host': 'foo@bar#whatever'
}
image_meta = {
'is_public': True,
'owner': '70a599e0-31e7-49b7-b260-868f441e862b',
@ -131,7 +144,7 @@ class ImageVolumeCacheTestCase(test.TestCase):
image_volume_cache_get_and_update_last_used.return_value) = None
found_entry = cache.get_entry(self.context,
volume_ref,
self.volume_ovo,
image_id,
image_meta)
@ -140,7 +153,7 @@ class ImageVolumeCacheTestCase(test.TestCase):
msg = self.notifier.notifications[0]
self.assertEqual('image_volume_cache.miss', msg['event_type'])
self.assertEqual('INFO', msg['priority'])
self.assertEqual(volume_ref['host'], msg['payload']['host'])
self.assertEqual(self.volume.host, msg['payload']['host'])
self.assertEqual(image_id, msg['payload']['image_id'])
self.assertEqual(1, len(self.notifier.notifications))
@ -148,9 +161,6 @@ class ImageVolumeCacheTestCase(test.TestCase):
def test_get_entry_needs_update(self, mock_volume_by_id):
cache = self._build_cache()
entry = self._build_entry()
volume_ref = {
'host': 'foo@bar#whatever'
}
image_meta = {
'is_public': True,
'owner': '70a599e0-31e7-49b7-b260-868f441e862b',
@ -166,7 +176,7 @@ class ImageVolumeCacheTestCase(test.TestCase):
mock_volume_by_id.return_value = mock_volume
found_entry = cache.get_entry(self.context,
volume_ref,
self.volume_ovo,
entry['image_id'],
image_meta)
@ -178,60 +188,56 @@ class ImageVolumeCacheTestCase(test.TestCase):
msg = self.notifier.notifications[0]
self.assertEqual('image_volume_cache.miss', msg['event_type'])
self.assertEqual('INFO', msg['priority'])
self.assertEqual(volume_ref['host'], msg['payload']['host'])
self.assertEqual(self.volume.host, msg['payload']['host'])
self.assertEqual(entry['image_id'], msg['payload']['image_id'])
self.assertEqual(1, len(self.notifier.notifications))
def test_create_cache_entry(self):
cache = self._build_cache()
entry = self._build_entry()
volume_ref = {
'id': entry['volume_id'],
'host': entry['host'],
'size': entry['size']
}
image_meta = {
'updated_at': entry['image_updated_at']
}
self.mock_db.image_volume_cache_create.return_value = entry
created_entry = cache.create_cache_entry(self.context,
volume_ref,
self.volume_ovo,
entry['image_id'],
image_meta)
self.assertEqual(entry, created_entry)
self.mock_db.image_volume_cache_create.assert_called_once_with(
self.context,
entry['host'],
self.volume_ovo.host,
self.volume_ovo.cluster_name,
entry['image_id'],
entry['image_updated_at'].replace(tzinfo=None),
entry['volume_id'],
entry['size']
self.volume_ovo.id,
self.volume_ovo.size
)
def test_ensure_space_unlimited(self):
cache = self._build_cache(max_gb=0, max_count=0)
host = 'foo@bar#whatever'
has_space = cache.ensure_space(self.context, 0, host)
has_space = cache.ensure_space(self.context, self.volume)
self.assertTrue(has_space)
has_space = cache.ensure_space(self.context, 500, host)
self.volume.size = 500
has_space = cache.ensure_space(self.context, self.volume)
self.assertTrue(has_space)
def test_ensure_space_no_entries(self):
cache = self._build_cache(max_gb=100, max_count=10)
host = 'foo@bar#whatever'
self.mock_db.image_volume_cache_get_all_for_host.return_value = []
self.mock_db.image_volume_cache_get_all.return_value = []
has_space = cache.ensure_space(self.context, 5, host)
self.volume_ovo.size = 5
has_space = cache.ensure_space(self.context, self.volume_ovo)
self.assertTrue(has_space)
has_space = cache.ensure_space(self.context, 101, host)
self.volume_ovo.size = 101
has_space = cache.ensure_space(self.context, self.volume_ovo)
self.assertFalse(has_space)
def test_ensure_space_need_gb(self):
cache = self._build_cache(max_gb=30, max_count=10)
mock_delete = mock.patch.object(cache, '_delete_image_volume').start()
host = 'foo@bar#whatever'
entries = []
entry1 = self._build_entry(size=12)
@ -240,9 +246,10 @@ class ImageVolumeCacheTestCase(test.TestCase):
entries.append(entry2)
entry3 = self._build_entry(size=10)
entries.append(entry3)
self.mock_db.image_volume_cache_get_all_for_host.return_value = entries
self.mock_db.image_volume_cache_get_all.return_value = entries
has_space = cache.ensure_space(self.context, 15, host)
self.volume_ovo.size = 15
has_space = cache.ensure_space(self.context, self.volume_ovo)
self.assertTrue(has_space)
self.assertEqual(2, mock_delete.call_count)
mock_delete.assert_any_call(self.context, entry2)
@ -251,16 +258,16 @@ class ImageVolumeCacheTestCase(test.TestCase):
def test_ensure_space_need_count(self):
cache = self._build_cache(max_gb=30, max_count=2)
mock_delete = mock.patch.object(cache, '_delete_image_volume').start()
host = 'foo@bar#whatever'
entries = []
entry1 = self._build_entry(size=10)
entries.append(entry1)
entry2 = self._build_entry(size=5)
entries.append(entry2)
self.mock_db.image_volume_cache_get_all_for_host.return_value = entries
self.mock_db.image_volume_cache_get_all.return_value = entries
has_space = cache.ensure_space(self.context, 12, host)
self.volume_ovo.size = 12
has_space = cache.ensure_space(self.context, self.volume_ovo)
self.assertTrue(has_space)
self.assertEqual(1, mock_delete.call_count)
mock_delete.assert_any_call(self.context, entry2)
@ -268,7 +275,6 @@ class ImageVolumeCacheTestCase(test.TestCase):
def test_ensure_space_need_gb_and_count(self):
cache = self._build_cache(max_gb=30, max_count=3)
mock_delete = mock.patch.object(cache, '_delete_image_volume').start()
host = 'foo@bar#whatever'
entries = []
entry1 = self._build_entry(size=10)
@ -277,9 +283,10 @@ class ImageVolumeCacheTestCase(test.TestCase):
entries.append(entry2)
entry3 = self._build_entry(size=12)
entries.append(entry3)
self.mock_db.image_volume_cache_get_all_for_host.return_value = entries
self.mock_db.image_volume_cache_get_all.return_value = entries
has_space = cache.ensure_space(self.context, 16, host)
self.volume_ovo.size = 16
has_space = cache.ensure_space(self.context, self.volume_ovo)
self.assertTrue(has_space)
self.assertEqual(2, mock_delete.call_count)
mock_delete.assert_any_call(self.context, entry2)
@ -288,11 +295,11 @@ class ImageVolumeCacheTestCase(test.TestCase):
def test_ensure_space_cant_free_enough_gb(self):
cache = self._build_cache(max_gb=30, max_count=10)
mock_delete = mock.patch.object(cache, '_delete_image_volume').start()
host = 'foo@bar#whatever'
entries = list(self._build_entry(size=25))
self.mock_db.image_volume_cache_get_all_for_host.return_value = entries
self.mock_db.image_volume_cache_get_all.return_value = entries
has_space = cache.ensure_space(self.context, 50, host)
self.volume_ovo.size = 50
has_space = cache.ensure_space(self.context, self.volume_ovo)
self.assertFalse(has_space)
mock_delete.assert_not_called()

View File

@ -2923,13 +2923,15 @@ class DBAPIDriverInitiatorDataTestCase(BaseTest):
self._test_insert('key2', 'bar', expected_result=False)
@ddt.ddt
class DBAPIImageVolumeCacheEntryTestCase(BaseTest):
def _validate_entry(self, entry, host, image_id, image_updated_at,
volume_id, size):
def _validate_entry(self, entry, host, cluster_name, image_id,
image_updated_at, volume_id, size):
self.assertIsNotNone(entry)
self.assertIsNotNone(entry['id'])
self.assertEqual(host, entry['host'])
self.assertEqual(cluster_name, entry['cluster_name'])
self.assertEqual(image_id, entry['image_id'])
self.assertEqual(image_updated_at, entry['image_updated_at'])
self.assertEqual(volume_id, entry['volume_id'])
@ -2938,35 +2940,38 @@ class DBAPIImageVolumeCacheEntryTestCase(BaseTest):
def test_create_delete_query_cache_entry(self):
host = 'abc@123#poolz'
cluster_name = 'def@123#poolz'
image_id = 'c06764d7-54b0-4471-acce-62e79452a38b'
image_updated_at = datetime.datetime.utcnow()
volume_id = 'e0e4f819-24bb-49e6-af1e-67fb77fc07d1'
size = 6
entry = db.image_volume_cache_create(self.ctxt, host, image_id,
image_updated_at, volume_id, size)
self._validate_entry(entry, host, image_id, image_updated_at,
volume_id, size)
entry = db.image_volume_cache_create(self.ctxt, host, cluster_name,
image_id, image_updated_at,
volume_id, size)
self._validate_entry(entry, host, cluster_name, image_id,
image_updated_at, volume_id, size)
entry = db.image_volume_cache_get_and_update_last_used(self.ctxt,
image_id,
host)
self._validate_entry(entry, host, image_id, image_updated_at,
volume_id, size)
host=host)
self._validate_entry(entry, host, cluster_name, image_id,
image_updated_at, volume_id, size)
entry = db.image_volume_cache_get_by_volume_id(self.ctxt, volume_id)
self._validate_entry(entry, host, image_id, image_updated_at,
volume_id, size)
self._validate_entry(entry, host, cluster_name, image_id,
image_updated_at, volume_id, size)
db.image_volume_cache_delete(self.ctxt, entry['volume_id'])
entry = db.image_volume_cache_get_and_update_last_used(self.ctxt,
image_id,
host)
host=host)
self.assertIsNone(entry)
def test_cache_entry_get_multiple(self):
host = 'abc@123#poolz'
cluster_name = 'def@123#poolz'
image_id = 'c06764d7-54b0-4471-acce-62e79452a38b'
image_updated_at = datetime.datetime.utcnow()
volume_id = 'e0e4f819-24bb-49e6-af1e-67fb77fc07d1'
@ -2976,6 +2981,7 @@ class DBAPIImageVolumeCacheEntryTestCase(BaseTest):
for i in range(0, 3):
entries.append(db.image_volume_cache_create(self.ctxt,
host,
cluster_name,
image_id,
image_updated_at,
volume_id,
@ -2984,18 +2990,18 @@ class DBAPIImageVolumeCacheEntryTestCase(BaseTest):
# entries. Expect only a single one from the query.
entry = db.image_volume_cache_get_and_update_last_used(self.ctxt,
image_id,
host)
self._validate_entry(entry, host, image_id, image_updated_at,
volume_id, size)
host=host)
self._validate_entry(entry, host, cluster_name, image_id,
image_updated_at, volume_id, size)
# We expect to get the same one on subsequent queries due to the
# last_used field being updated each time and ordering by it.
entry_id = entry['id']
entry = db.image_volume_cache_get_and_update_last_used(self.ctxt,
image_id,
host)
self._validate_entry(entry, host, image_id, image_updated_at,
volume_id, size)
host=host)
self._validate_entry(entry, host, cluster_name, image_id,
image_updated_at, volume_id, size)
self.assertEqual(entry_id, entry['id'])
# Cleanup
@ -3007,7 +3013,7 @@ class DBAPIImageVolumeCacheEntryTestCase(BaseTest):
image_id = 'c06764d7-54b0-4471-acce-62e79452a38b'
entry = db.image_volume_cache_get_and_update_last_used(self.ctxt,
image_id,
host)
host=host)
self.assertIsNone(entry)
def test_cache_entry_get_by_volume_id_none(self):
@ -3024,6 +3030,7 @@ class DBAPIImageVolumeCacheEntryTestCase(BaseTest):
for i in range(0, 3):
entries.append(db.image_volume_cache_create(self.ctxt,
host,
'cluster-%s' % i,
'image-' + str(i),
image_updated_at,
'vol-' + str(i),
@ -3031,12 +3038,13 @@ class DBAPIImageVolumeCacheEntryTestCase(BaseTest):
other_entry = db.image_volume_cache_create(self.ctxt,
'someOtherHost',
'someOtherCluster',
'image-12345',
image_updated_at,
'vol-1234',
size)
found_entries = db.image_volume_cache_get_all_for_host(self.ctxt, host)
found_entries = db.image_volume_cache_get_all(self.ctxt, host=host)
self.assertIsNotNone(found_entries)
self.assertEqual(len(entries), len(found_entries))
for found_entry in found_entries:
@ -3044,6 +3052,7 @@ class DBAPIImageVolumeCacheEntryTestCase(BaseTest):
if found_entry['id'] == entry['id']:
self._validate_entry(found_entry,
entry['host'],
entry['cluster_name'],
entry['image_id'],
entry['image_updated_at'],
entry['volume_id'],
@ -3056,9 +3065,36 @@ class DBAPIImageVolumeCacheEntryTestCase(BaseTest):
def test_cache_entry_get_all_for_host_none(self):
host = 'abc@123#poolz'
entries = db.image_volume_cache_get_all_for_host(self.ctxt, host)
entries = db.image_volume_cache_get_all(self.ctxt, host=host)
self.assertEqual([], entries)
@ddt.data('host1@backend1#pool1', 'host1@backend1')
def test_cache_entry_include_in_cluster_by_host(self, host):
"""Basic cache include test filtering by host and with full rename."""
image_updated_at = datetime.datetime.utcnow()
image_cache = (
db.image_volume_cache_create(
self.ctxt, 'host1@backend1#pool1', 'cluster1@backend1#pool1',
'image-1', image_updated_at, 'vol-1', 6),
db.image_volume_cache_create(
self.ctxt, 'host1@backend2#pool2', 'cluster1@backend2#pool2',
'image-2', image_updated_at, 'vol-2', 6),
db.image_volume_cache_create(
self.ctxt, 'host2@backend#pool', 'cluster2@backend#pool',
'image-3', image_updated_at, 'vol-3', 6),
)
cluster_name = 'my_cluster'
result = db.image_volume_cache_include_in_cluster(self.ctxt,
cluster_name,
partial_rename=False,
host=host)
self.assertEqual(1, result)
db_image_cache = db.image_volume_cache_get_by_volume_id(
self.ctxt, image_cache[0].volume_id)
self.assertEqual(cluster_name, db_image_cache.cluster_name)
class DBAPIGenericTestCase(BaseTest):
def test_resource_exists_volume(self):

View File

@ -1086,6 +1086,12 @@ class MigrationsMixin(test_migrations.WalkVersionsMixin):
self.assertIsInstance(clusters.c.frozen.type,
self.BOOL_TYPE)
def _check_089(self, engine, data):
"""Test adding cluster_name to image volume cache table."""
image_cache = db_utils.get_table(engine, 'image_volume_cache_entries')
self.assertIsInstance(image_cache.c.cluster_name.type,
self.VARCHAR_TYPE)
def test_walk_versions(self):
self.walk_versions(False, False)

View File

@ -427,7 +427,9 @@ class VolumeTestCase(base.BaseVolumeTestCase):
@mock.patch('cinder.objects.volume.VolumeList.include_in_cluster')
@mock.patch('cinder.objects.consistencygroup.ConsistencyGroupList.'
'include_in_cluster')
def test_init_host_added_to_cluster(self, cg_include_mock,
@mock.patch('cinder.db.image_volume_cache_include_in_cluster')
def test_init_host_added_to_cluster(self, image_cache_include_mock,
cg_include_mock,
vol_include_mock, vol_get_all_mock,
snap_get_all_mock):
cluster = str(mock.sentinel.cluster)
@ -439,6 +441,8 @@ class VolumeTestCase(base.BaseVolumeTestCase):
host=self.volume.host)
cg_include_mock.assert_called_once_with(mock.ANY, cluster,
host=self.volume.host)
image_cache_include_mock.assert_called_once_with(mock.ANY, cluster,
host=self.volume.host)
vol_get_all_mock.assert_called_once_with(
mock.ANY, filters={'cluster_name': cluster})
snap_get_all_mock.assert_called_once_with(
@ -7121,6 +7125,7 @@ class ImageVolumeCacheTestCase(base.BaseVolumeTestCase):
volume_params = {
'status': 'creating',
'host': 'some_host',
'cluster_name': 'some_cluster',
'size': 1
}
volume_api = cinder.volume.api.API()
@ -7130,6 +7135,7 @@ class ImageVolumeCacheTestCase(base.BaseVolumeTestCase):
image_id = '70a599e0-31e7-49b7-b260-868f441e862b'
db.image_volume_cache_create(self.context,
volume['host'],
volume_params['cluster_name'],
image_id,
datetime.datetime.utcnow(),
volume['id'],

View File

@ -57,6 +57,7 @@ from cinder.common import constants
from cinder import compute
from cinder import context
from cinder import coordination
from cinder import db
from cinder import exception
from cinder import flow_utils
from cinder import keymgr as key_manager
@ -385,11 +386,14 @@ class VolumeManager(manager.CleanableManager,
ctxt, self.cluster, host=self.host)
num_cgs = objects.ConsistencyGroupList.include_in_cluster(
ctxt, self.cluster, host=self.host)
LOG.info(_LI('%(num_vols)s volumes and %(num_cgs)s consistency groups '
'from host %(host)s have been included in cluster '
'%(cluster)s.'),
num_cache = db.image_volume_cache_include_in_cluster(
ctxt, self.cluster, host=self.host)
LOG.info(_LI('%(num_vols)s volumes, %(num_cgs)s consistency groups, '
'and %(num_cache)s image volume caches from host '
'%(host)s have been included in cluster %(cluster)s.'),
{'num_vols': num_vols, 'num_cgs': num_cgs,
'host': self.host, 'cluster': self.cluster})
'host': self.host, 'cluster': self.cluster,
'num_cache': num_cache})
def init_host(self, added_to_cluster=None, **kwargs):
"""Perform any required initialization."""
@ -1170,14 +1174,12 @@ class VolumeManager(manager.CleanableManager,
"""
image_volume = None
try:
if not self.image_volume_cache.ensure_space(
ctx,
volume_ref['size'],
volume_ref['host']):
if not self.image_volume_cache.ensure_space(ctx, volume_ref):
LOG.warning(_LW('Unable to ensure space for image-volume in'
' cache. Will skip creating entry for image'
' %(image)s on host %(host)s.'),
{'image': image_id, 'host': volume_ref['host']})
' %(image)s on %(service)s.'),
{'image': image_id,
'service': volume_ref.service_topic_queue})
return
image_volume = self._clone_image_volume(ctx,
@ -1235,7 +1237,7 @@ class VolumeManager(manager.CleanableManager,
try:
self.create_volume(ctx, image_volume, allow_reschedule=False)
image_volume = self.db.volume_get(ctx, image_volume.id)
image_volume = objects.Volume.get_by_id(ctx, image_volume.id)
if image_volume.status != 'available':
raise exception.InvalidVolume(_('Volume is not available.'))