Merge "Migrate fixed_key encryption keys to Barbican"
This commit is contained in:
commit
4a92c2ec8e
|
@ -0,0 +1,162 @@
|
||||||
|
# Copyright 2017 Red Hat, Inc.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
import binascii
|
||||||
|
|
||||||
|
from oslo_config import cfg
|
||||||
|
from oslo_log import log as logging
|
||||||
|
|
||||||
|
from barbicanclient import client as barbican_client
|
||||||
|
from castellan import options as castellan_options
|
||||||
|
from keystoneauth1 import loading as ks_loading
|
||||||
|
from keystoneauth1 import session as ks_session
|
||||||
|
|
||||||
|
from cinder import context
|
||||||
|
from cinder import coordination
|
||||||
|
from cinder import objects
|
||||||
|
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
CONF = cfg.CONF
|
||||||
|
|
||||||
|
MAX_KEY_MIGRATION_ERRORS = 3
|
||||||
|
|
||||||
|
|
||||||
|
class KeyMigrator(object):
|
||||||
|
def __init__(self, conf):
|
||||||
|
self.conf = conf
|
||||||
|
self.admin_context = context.get_admin_context()
|
||||||
|
self.fixed_key_id = '00000000-0000-0000-0000-000000000000'
|
||||||
|
self.fixed_key_bytes = None
|
||||||
|
self.fixed_key_length = None
|
||||||
|
|
||||||
|
def handle_key_migration(self, volumes):
|
||||||
|
castellan_options.set_defaults(self.conf)
|
||||||
|
self.conf.import_opt(name='fixed_key',
|
||||||
|
module_str='cinder.keymgr.conf_key_mgr',
|
||||||
|
group='key_manager')
|
||||||
|
fixed_key = self.conf.key_manager.fixed_key
|
||||||
|
backend = self.conf.key_manager.backend or ''
|
||||||
|
|
||||||
|
backend = backend.split('.')[-1]
|
||||||
|
|
||||||
|
if backend == 'ConfKeyManager':
|
||||||
|
LOG.info("Not migrating encryption keys because the "
|
||||||
|
"ConfKeyManager is still in use.")
|
||||||
|
elif not fixed_key:
|
||||||
|
LOG.info("Not migrating encryption keys because the "
|
||||||
|
"ConfKeyManager's fixed_key is not in use.")
|
||||||
|
elif backend != 'barbican' and backend != 'BarbicanKeyManager':
|
||||||
|
# Note: There are two ways of specifying the Barbican backend.
|
||||||
|
# The long-hand method contains the "BarbicanKeyManager" class
|
||||||
|
# name, and the short-hand method is just "barbican" with no
|
||||||
|
# module path prefix.
|
||||||
|
LOG.warning("Not migrating encryption keys because migration to "
|
||||||
|
"the '%s' key_manager backend is not supported.",
|
||||||
|
backend)
|
||||||
|
self._log_migration_status()
|
||||||
|
elif not volumes:
|
||||||
|
LOG.info("Not migrating encryption keys because there are no "
|
||||||
|
"volumes associated with this host.")
|
||||||
|
self._log_migration_status()
|
||||||
|
else:
|
||||||
|
self.fixed_key_bytes = bytes(binascii.unhexlify(fixed_key))
|
||||||
|
self.fixed_key_length = len(self.fixed_key_bytes) * 8
|
||||||
|
self._migrate_keys(volumes)
|
||||||
|
self._log_migration_status()
|
||||||
|
|
||||||
|
def _migrate_keys(self, volumes):
|
||||||
|
LOG.info("Starting migration of ConfKeyManager keys.")
|
||||||
|
|
||||||
|
# Establish a Barbican client session that will be used for the entire
|
||||||
|
# key migration process. Use cinder's own service credentials.
|
||||||
|
try:
|
||||||
|
ks_loading.register_auth_conf_options(self.conf,
|
||||||
|
'keystone_authtoken')
|
||||||
|
auth = ks_loading.load_auth_from_conf_options(self.conf,
|
||||||
|
'keystone_authtoken')
|
||||||
|
sess = ks_session.Session(auth=auth)
|
||||||
|
self.barbican = barbican_client.Client(session=sess)
|
||||||
|
except Exception as e:
|
||||||
|
LOG.error("Aborting encryption key migration due to "
|
||||||
|
"error creating Barbican client: %s", e)
|
||||||
|
return
|
||||||
|
|
||||||
|
errors = 0
|
||||||
|
for volume in volumes:
|
||||||
|
try:
|
||||||
|
self._migrate_volume_key(volume)
|
||||||
|
except Exception as e:
|
||||||
|
LOG.error("Error migrating encryption key: %s", e)
|
||||||
|
# NOTE(abishop): There really shouldn't be any soft errors, so
|
||||||
|
# if an error occurs migrating one key then chances are they
|
||||||
|
# will all fail. This avoids filling the log with the same
|
||||||
|
# error in situations where there are many keys to migrate.
|
||||||
|
errors += 1
|
||||||
|
if errors > MAX_KEY_MIGRATION_ERRORS:
|
||||||
|
LOG.error("Aborting encryption key migration "
|
||||||
|
"(too many errors).")
|
||||||
|
break
|
||||||
|
|
||||||
|
@coordination.synchronized('{volume.id}-{f_name}')
|
||||||
|
def _migrate_volume_key(self, volume):
|
||||||
|
if volume.encryption_key_id == self.fixed_key_id:
|
||||||
|
self._update_encryption_key_id(volume)
|
||||||
|
|
||||||
|
def _update_encryption_key_id(self, volume):
|
||||||
|
LOG.info("Migrating volume %s encryption key to Barbican", volume.id)
|
||||||
|
|
||||||
|
# Create a Barbican secret using the same fixed_key algorithm.
|
||||||
|
secret = self.barbican.secrets.create(algorithm='AES',
|
||||||
|
bit_length=self.fixed_key_length,
|
||||||
|
secret_type='symmetric',
|
||||||
|
mode=None,
|
||||||
|
payload=self.fixed_key_bytes)
|
||||||
|
secret_ref = secret.store()
|
||||||
|
|
||||||
|
# Create a Barbican ACL so the volume's user can access the secret.
|
||||||
|
acl = self.barbican.acls.create(entity_ref=secret_ref,
|
||||||
|
users=[volume.user_id])
|
||||||
|
acl.submit()
|
||||||
|
|
||||||
|
_, _, encryption_key_id = secret_ref.rpartition('/')
|
||||||
|
volume.encryption_key_id = encryption_key_id
|
||||||
|
volume.save()
|
||||||
|
|
||||||
|
# TODO(abishop): need to determine if any snapshot creations are
|
||||||
|
# in-flight that might be added to the db with the volume's old
|
||||||
|
# fixed key ID.
|
||||||
|
snapshots = objects.snapshot.SnapshotList.get_all_for_volume(
|
||||||
|
self.admin_context,
|
||||||
|
volume.id)
|
||||||
|
for snapshot in snapshots:
|
||||||
|
snapshot.encryption_key_id = encryption_key_id
|
||||||
|
snapshot.save()
|
||||||
|
|
||||||
|
def _log_migration_status(self):
|
||||||
|
num_to_migrate = len(objects.volume.VolumeList.get_all(
|
||||||
|
context=self.admin_context,
|
||||||
|
filters={'encryption_key_id': self.fixed_key_id}))
|
||||||
|
if num_to_migrate == 0:
|
||||||
|
LOG.info("No volumes are using the ConfKeyManager's "
|
||||||
|
"encryption_key_id.")
|
||||||
|
else:
|
||||||
|
LOG.warning("There are still %d volume(s) using the "
|
||||||
|
"ConfKeyManager's all-zeros encryption key ID.",
|
||||||
|
num_to_migrate)
|
||||||
|
|
||||||
|
|
||||||
|
def migrate_fixed_key(volumes, conf=CONF):
|
||||||
|
KeyMigrator(conf).handle_key_migration(volumes)
|
|
@ -0,0 +1,217 @@
|
||||||
|
# Copyright 2017 Red Hat, Inc.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
"""Tests for encryption key migration."""
|
||||||
|
|
||||||
|
import mock
|
||||||
|
from oslo_config import cfg
|
||||||
|
|
||||||
|
from cinder import db
|
||||||
|
from cinder.keymgr import migration
|
||||||
|
from cinder import objects
|
||||||
|
from cinder.tests.unit import fake_constants as fake
|
||||||
|
from cinder.tests.unit import utils as tests_utils
|
||||||
|
from cinder.tests.unit import volume as base
|
||||||
|
|
||||||
|
CONF = cfg.CONF
|
||||||
|
|
||||||
|
FIXED_KEY_ID = '00000000-0000-0000-0000-000000000000'
|
||||||
|
|
||||||
|
|
||||||
|
class KeyMigrationTestCase(base.BaseVolumeTestCase):
|
||||||
|
def setUp(self):
|
||||||
|
super(KeyMigrationTestCase, self).setUp()
|
||||||
|
self.conf = CONF
|
||||||
|
self.fixed_key = '1' * 64
|
||||||
|
self.conf.import_opt(name='fixed_key',
|
||||||
|
module_str='cinder.keymgr.conf_key_mgr',
|
||||||
|
group='key_manager')
|
||||||
|
self.conf.set_override('fixed_key',
|
||||||
|
self.fixed_key,
|
||||||
|
group='key_manager')
|
||||||
|
self.conf.set_override('backend',
|
||||||
|
'barbican',
|
||||||
|
group='key_manager')
|
||||||
|
self.my_vols = []
|
||||||
|
|
||||||
|
def tearDown(self):
|
||||||
|
for vol in objects.VolumeList.get_all(self.context):
|
||||||
|
self.volume.delete_volume(self.context, vol)
|
||||||
|
super(KeyMigrationTestCase, self).tearDown()
|
||||||
|
|
||||||
|
def create_volume(self, key_id=FIXED_KEY_ID):
|
||||||
|
vol = tests_utils.create_volume(self.context, host=self.conf.host)
|
||||||
|
vol_id = self.volume.create_volume(self.context, vol)
|
||||||
|
if key_id:
|
||||||
|
db.volume_update(self.context,
|
||||||
|
vol_id,
|
||||||
|
{'encryption_key_id': key_id})
|
||||||
|
self.my_vols = objects.VolumeList.get_all_by_host(self.context,
|
||||||
|
self.conf.host)
|
||||||
|
# Return a fully baked Volume object (not the partially baked 'vol'
|
||||||
|
# and not the DB object).
|
||||||
|
return next((v for v in self.my_vols if v.id == vol_id))
|
||||||
|
|
||||||
|
@mock.patch('cinder.keymgr.migration.KeyMigrator._migrate_keys')
|
||||||
|
@mock.patch('cinder.keymgr.migration.KeyMigrator._log_migration_status')
|
||||||
|
def test_no_fixed_key(self,
|
||||||
|
mock_log_migration_status,
|
||||||
|
mock_migrate_keys):
|
||||||
|
self.create_volume()
|
||||||
|
self.conf.set_override('fixed_key', None, group='key_manager')
|
||||||
|
migration.migrate_fixed_key(self.my_vols, conf=self.conf)
|
||||||
|
mock_migrate_keys.assert_not_called()
|
||||||
|
mock_log_migration_status.assert_not_called()
|
||||||
|
|
||||||
|
@mock.patch('cinder.keymgr.migration.KeyMigrator._migrate_keys')
|
||||||
|
@mock.patch('cinder.keymgr.migration.KeyMigrator._log_migration_status')
|
||||||
|
def test_using_conf_key_manager(self,
|
||||||
|
mock_log_migration_status,
|
||||||
|
mock_migrate_keys):
|
||||||
|
self.create_volume()
|
||||||
|
self.conf.set_override('backend',
|
||||||
|
'some.ConfKeyManager',
|
||||||
|
group='key_manager')
|
||||||
|
migration.migrate_fixed_key(self.my_vols, conf=self.conf)
|
||||||
|
mock_migrate_keys.assert_not_called()
|
||||||
|
mock_log_migration_status.assert_not_called()
|
||||||
|
|
||||||
|
@mock.patch('cinder.keymgr.migration.KeyMigrator._migrate_keys')
|
||||||
|
@mock.patch('cinder.keymgr.migration.KeyMigrator._log_migration_status')
|
||||||
|
def test_using_barbican_module_path(self,
|
||||||
|
mock_log_migration_status,
|
||||||
|
mock_migrate_keys):
|
||||||
|
# Verify the long-hand method of specifying the Barbican backend
|
||||||
|
# is properly parsed.
|
||||||
|
self.create_volume()
|
||||||
|
self.conf.set_override(
|
||||||
|
'backend',
|
||||||
|
'castellan.key_manager.barbican_key_manager.BarbicanKeyManager',
|
||||||
|
group='key_manager')
|
||||||
|
migration.migrate_fixed_key(self.my_vols, conf=self.conf)
|
||||||
|
mock_migrate_keys.assert_called_once_with(self.my_vols)
|
||||||
|
mock_log_migration_status.assert_called_once_with()
|
||||||
|
|
||||||
|
@mock.patch('cinder.keymgr.migration.KeyMigrator._migrate_keys')
|
||||||
|
@mock.patch('cinder.keymgr.migration.KeyMigrator._log_migration_status')
|
||||||
|
def test_using_unsupported_key_manager(self,
|
||||||
|
mock_log_migration_status,
|
||||||
|
mock_migrate_keys):
|
||||||
|
self.create_volume()
|
||||||
|
self.conf.set_override('backend',
|
||||||
|
'some.OtherKeyManager',
|
||||||
|
group='key_manager')
|
||||||
|
migration.migrate_fixed_key(self.my_vols, conf=self.conf)
|
||||||
|
mock_migrate_keys.assert_not_called()
|
||||||
|
mock_log_migration_status.assert_called_once_with()
|
||||||
|
|
||||||
|
@mock.patch('cinder.keymgr.migration.KeyMigrator._migrate_keys')
|
||||||
|
@mock.patch('cinder.keymgr.migration.KeyMigrator._log_migration_status')
|
||||||
|
def test_no_volumes(self,
|
||||||
|
mock_log_migration_status,
|
||||||
|
mock_migrate_keys):
|
||||||
|
migration.migrate_fixed_key(self.my_vols, conf=self.conf)
|
||||||
|
mock_migrate_keys.assert_not_called()
|
||||||
|
mock_log_migration_status.assert_called_once_with()
|
||||||
|
|
||||||
|
@mock.patch('cinder.keymgr.migration.KeyMigrator._migrate_volume_key')
|
||||||
|
@mock.patch('barbicanclient.client.Client')
|
||||||
|
def test_fail_no_barbican_client(self,
|
||||||
|
mock_barbican_client,
|
||||||
|
mock_migrate_volume_key):
|
||||||
|
self.create_volume()
|
||||||
|
mock_barbican_client.side_effect = Exception
|
||||||
|
migration.migrate_fixed_key(self.my_vols, conf=self.conf)
|
||||||
|
mock_migrate_volume_key.assert_not_called()
|
||||||
|
|
||||||
|
@mock.patch('cinder.keymgr.migration.KeyMigrator._migrate_volume_key')
|
||||||
|
@mock.patch('barbicanclient.client.Client')
|
||||||
|
def test_fail_too_many_errors(self,
|
||||||
|
mock_barbican_client,
|
||||||
|
mock_migrate_volume_key):
|
||||||
|
for n in range(0, (migration.MAX_KEY_MIGRATION_ERRORS + 3)):
|
||||||
|
self.create_volume()
|
||||||
|
mock_migrate_volume_key.side_effect = Exception
|
||||||
|
migration.migrate_fixed_key(self.my_vols, conf=self.conf)
|
||||||
|
self.assertEqual(mock_migrate_volume_key.call_count,
|
||||||
|
(migration.MAX_KEY_MIGRATION_ERRORS + 1))
|
||||||
|
|
||||||
|
@mock.patch('cinder.keymgr.migration.KeyMigrator._migrate_keys')
|
||||||
|
def test_migration_status_more_to_migrate(self,
|
||||||
|
mock_migrate_keys):
|
||||||
|
mock_log = self.mock_object(migration, 'LOG')
|
||||||
|
self.create_volume()
|
||||||
|
migration.migrate_fixed_key(self.my_vols, conf=self.conf)
|
||||||
|
|
||||||
|
# Look for one warning (more to migrate) and no info log messages.
|
||||||
|
mock_log.info.assert_not_called()
|
||||||
|
self.assertEqual(mock_log.warning.call_count, 1)
|
||||||
|
|
||||||
|
@mock.patch('cinder.keymgr.migration.KeyMigrator._migrate_keys')
|
||||||
|
def test_migration_status_all_done(self,
|
||||||
|
mock_migrate_keys):
|
||||||
|
mock_log = self.mock_object(migration, 'LOG')
|
||||||
|
self.create_volume(key_id=fake.ENCRYPTION_KEY_ID)
|
||||||
|
migration.migrate_fixed_key(self.my_vols, conf=self.conf)
|
||||||
|
|
||||||
|
# Look for one info (all done) and no warning log messages.
|
||||||
|
mock_log.warning.assert_not_called()
|
||||||
|
self.assertEqual(mock_log.info.call_count, 1)
|
||||||
|
|
||||||
|
@mock.patch(
|
||||||
|
'cinder.keymgr.migration.KeyMigrator._update_encryption_key_id')
|
||||||
|
@mock.patch('barbicanclient.client.Client')
|
||||||
|
def test_migrate_fixed_key_volumes(self,
|
||||||
|
mock_barbican_client,
|
||||||
|
mock_update_encryption_key_id):
|
||||||
|
# Create two volumes with fixed key ID that needs to be migrated, and
|
||||||
|
# a couple of volumes with key IDs that don't need to be migrated,
|
||||||
|
# or no key ID.
|
||||||
|
vol_1 = self.create_volume()
|
||||||
|
self.create_volume(key_id=fake.UUID1)
|
||||||
|
self.create_volume(key_id=None)
|
||||||
|
vol_2 = self.create_volume()
|
||||||
|
self.create_volume(key_id=fake.UUID2)
|
||||||
|
|
||||||
|
migration.migrate_fixed_key(self.my_vols, conf=self.conf)
|
||||||
|
calls = [mock.call(vol_1), mock.call(vol_2)]
|
||||||
|
mock_update_encryption_key_id.assert_has_calls(calls, any_order=True)
|
||||||
|
self.assertEqual(mock_update_encryption_key_id.call_count, len(calls))
|
||||||
|
|
||||||
|
@mock.patch('barbicanclient.client.Client')
|
||||||
|
def test_update_encryption_key_id(self,
|
||||||
|
mock_barbican_client):
|
||||||
|
vol = self.create_volume()
|
||||||
|
|
||||||
|
snap_ids = [fake.SNAPSHOT_ID, fake.SNAPSHOT2_ID, fake.SNAPSHOT3_ID]
|
||||||
|
for snap_id in snap_ids:
|
||||||
|
tests_utils.create_snapshot(self.context, vol.id, id=snap_id)
|
||||||
|
|
||||||
|
# Barbican's secret.store() returns a URI that contains the
|
||||||
|
# secret's key ID at the end.
|
||||||
|
secret_ref = 'http://some/path/' + fake.ENCRYPTION_KEY_ID
|
||||||
|
mock_secret = mock.MagicMock()
|
||||||
|
mock_secret.store.return_value = secret_ref
|
||||||
|
|
||||||
|
mock_barbican_client.return_value.secrets.create.return_value \
|
||||||
|
= mock_secret
|
||||||
|
|
||||||
|
migration.migrate_fixed_key(self.my_vols, conf=self.conf)
|
||||||
|
vol_db = db.volume_get(self.context, vol.id)
|
||||||
|
self.assertEqual(fake.ENCRYPTION_KEY_ID, vol_db['encryption_key_id'])
|
||||||
|
|
||||||
|
for snap_id in snap_ids:
|
||||||
|
snap_db = db.snapshot_get(self.context, snap_id)
|
||||||
|
self.assertEqual(fake.ENCRYPTION_KEY_ID,
|
||||||
|
snap_db['encryption_key_id'])
|
|
@ -268,3 +268,15 @@ class VolumeInitHostTestCase(base.BaseVolumeTestCase):
|
||||||
mock.ANY, filters={'cluster_name': cluster})
|
mock.ANY, filters={'cluster_name': cluster})
|
||||||
snap_get_all_mock.assert_called_once_with(
|
snap_get_all_mock.assert_called_once_with(
|
||||||
mock.ANY, filters={'cluster_name': cluster})
|
mock.ANY, filters={'cluster_name': cluster})
|
||||||
|
|
||||||
|
@mock.patch('cinder.keymgr.migration.migrate_fixed_key')
|
||||||
|
@mock.patch('cinder.volume.manager.VolumeManager._get_my_volumes')
|
||||||
|
@mock.patch('cinder.manager.ThreadPoolManager._add_to_threadpool')
|
||||||
|
def test_init_host_key_migration(self,
|
||||||
|
mock_add_threadpool,
|
||||||
|
mock_get_my_volumes,
|
||||||
|
mock_migrate_fixed_key):
|
||||||
|
|
||||||
|
self.volume.init_host(service_id=self.service_id)
|
||||||
|
mock_add_threadpool.assert_called_once_with(mock_migrate_fixed_key,
|
||||||
|
mock_get_my_volumes())
|
||||||
|
|
|
@ -65,6 +65,7 @@ from cinder.i18n import _
|
||||||
from cinder.image import cache as image_cache
|
from cinder.image import cache as image_cache
|
||||||
from cinder.image import glance
|
from cinder.image import glance
|
||||||
from cinder.image import image_utils
|
from cinder.image import image_utils
|
||||||
|
from cinder.keymgr import migration as key_migration
|
||||||
from cinder import manager
|
from cinder import manager
|
||||||
from cinder.message import api as message_api
|
from cinder.message import api as message_api
|
||||||
from cinder.message import message_field
|
from cinder.message import message_field
|
||||||
|
@ -495,6 +496,10 @@ class VolumeManager(manager.CleanableManager,
|
||||||
backend_name = vol_utils.extract_host(self.service_topic_queue)
|
backend_name = vol_utils.extract_host(self.service_topic_queue)
|
||||||
image_utils.cleanup_temporary_file(backend_name)
|
image_utils.cleanup_temporary_file(backend_name)
|
||||||
|
|
||||||
|
# Migrate any ConfKeyManager keys based on fixed_key to the currently
|
||||||
|
# configured key manager.
|
||||||
|
self._add_to_threadpool(key_migration.migrate_fixed_key, volumes)
|
||||||
|
|
||||||
# collect and publish service capabilities
|
# collect and publish service capabilities
|
||||||
self.publish_service_capabilities(ctxt)
|
self.publish_service_capabilities(ctxt)
|
||||||
LOG.info("Driver initialization completed successfully.",
|
LOG.info("Driver initialization completed successfully.",
|
||||||
|
|
|
@ -25,9 +25,12 @@ import re
|
||||||
import time
|
import time
|
||||||
import uuid
|
import uuid
|
||||||
|
|
||||||
|
from castellan.common.credentials import keystone_password
|
||||||
from castellan.common import exception as castellan_exception
|
from castellan.common import exception as castellan_exception
|
||||||
|
from castellan import key_manager as castellan_key_manager
|
||||||
import eventlet
|
import eventlet
|
||||||
from eventlet import tpool
|
from eventlet import tpool
|
||||||
|
from keystoneauth1 import loading as ks_loading
|
||||||
from oslo_concurrency import processutils
|
from oslo_concurrency import processutils
|
||||||
from oslo_config import cfg
|
from oslo_config import cfg
|
||||||
from oslo_log import log as logging
|
from oslo_log import log as logging
|
||||||
|
@ -913,6 +916,23 @@ def delete_encryption_key(context, key_manager, encryption_key_id):
|
||||||
key_manager.delete(context, encryption_key_id)
|
key_manager.delete(context, encryption_key_id)
|
||||||
except castellan_exception.ManagedObjectNotFoundError:
|
except castellan_exception.ManagedObjectNotFoundError:
|
||||||
pass
|
pass
|
||||||
|
except castellan_exception.KeyManagerError:
|
||||||
|
LOG.info("First attempt to delete key id %s failed, retrying with "
|
||||||
|
"cinder's service context.", encryption_key_id)
|
||||||
|
conf = CONF
|
||||||
|
ks_loading.register_auth_conf_options(conf, 'keystone_authtoken')
|
||||||
|
service_context = keystone_password.KeystonePassword(
|
||||||
|
password=conf.keystone_authtoken.password,
|
||||||
|
auth_url=conf.keystone_authtoken.auth_url,
|
||||||
|
username=conf.keystone_authtoken.username,
|
||||||
|
user_domain_name=conf.keystone_authtoken.user_domain_name,
|
||||||
|
project_name=conf.keystone_authtoken.project_name,
|
||||||
|
project_domain_name=conf.keystone_authtoken.project_domain_name)
|
||||||
|
try:
|
||||||
|
castellan_key_manager.API(conf).delete(service_context,
|
||||||
|
encryption_key_id)
|
||||||
|
except castellan_exception.ManagedObjectNotFoundError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
def clone_encryption_key(context, key_manager, encryption_key_id):
|
def clone_encryption_key(context, key_manager, encryption_key_id):
|
||||||
|
|
|
@ -0,0 +1,13 @@
|
||||||
|
---
|
||||||
|
features:
|
||||||
|
- |
|
||||||
|
When Barbican is the encryption key_manager backend, any encryption keys
|
||||||
|
associated with the legacy ConfKeyManager will be automatically migrated
|
||||||
|
to Barbican. All database references to the ConfKeyManager's all-zeros key
|
||||||
|
ID will be updated with a Barbican key ID. The encryption keys do not
|
||||||
|
change. Only the encryption key ID changes.
|
||||||
|
|
||||||
|
Key migration is initiated on service startup, and entries in the
|
||||||
|
cinder-volume log will indicate the migration status. Log entries will
|
||||||
|
indicate when a volume's encryption key ID has been migrated to Barbican,
|
||||||
|
and a summary log message will indicate when key migration has finished.
|
Loading…
Reference in New Issue