Option to restrict amp glance image owner
This patch adds an optional configuration setting that allows an operator to restrict the amphora glance image selection to a specific owner id. This is a recommended security setting for clouds that allow user uploadable images. Change-Id: I73347b5b3e868d13974cd6ca6bada9cdf75773fe Closes-Bug: #1620629
This commit is contained in:
parent
2eaabf06d0
commit
d7d062a47a
@ -25,6 +25,11 @@ function build_octavia_worker_image {
|
||||
$OCTAVIA_DIR/diskimage-create/diskimage-create.sh -s 2 -o $OCTAVIA_AMP_IMAGE_FILE
|
||||
fi
|
||||
upload_image file://${OCTAVIA_AMP_IMAGE_FILE} $TOKEN
|
||||
|
||||
image_id=$(glance image-list --property-filter name=${OCTAVIA_AMP_IMAGE_NAME} | awk '/ amphora-x64-haproxy / {print $2}')
|
||||
owner_id=$(glance image-show ${image_id} | awk '/ owner / {print $4}')
|
||||
iniset $OCTAVIA_CONF controller_worker amp_image_owner_id ${owner_id}
|
||||
|
||||
}
|
||||
|
||||
function create_octavia_accounts {
|
||||
|
@ -135,6 +135,9 @@
|
||||
# parameters is needed. Using tags is the recommended way to refer to images.
|
||||
# amp_image_id =
|
||||
# amp_image_tag =
|
||||
# Optional owner ID used to restrict glance images to one owner ID.
|
||||
# This is a recommended security setting.
|
||||
# amp_image_owner_id =
|
||||
# Nova parameters to use when booting amphora
|
||||
# amp_flavor_id =
|
||||
# amp_ssh_key_name =
|
||||
|
@ -216,6 +216,10 @@ controller_worker_opts = [
|
||||
deprecated_for_removal=True,
|
||||
deprecated_reason='Superseded by amp_image_tag option.',
|
||||
help=_('Glance image id for the Amphora image to boot')),
|
||||
cfg.StrOpt('amp_image_owner_id',
|
||||
default='',
|
||||
help=_('Restrict glance image selection to a specific '
|
||||
'owner ID. This is a recommended security setting.')),
|
||||
cfg.StrOpt('amp_ssh_key_name',
|
||||
default='',
|
||||
help=_('SSH key name used to boot the Amphora')),
|
||||
|
@ -331,3 +331,4 @@ NETNS_PRIMARY_INTERFACE = 'eth1'
|
||||
AMP_ACTION_START = 'start'
|
||||
AMP_ACTION_STOP = 'stop'
|
||||
AMP_ACTION_RELOAD = 'reload'
|
||||
GLANCE_IMAGE_ACTIVE = 'active'
|
||||
|
@ -22,7 +22,7 @@ class ComputeBase(object):
|
||||
|
||||
@abc.abstractmethod
|
||||
def build(self, name="amphora_name", amphora_flavor=None,
|
||||
image_id=None, image_tag=None,
|
||||
image_id=None, image_tag=None, image_owner=None,
|
||||
key_name=None, sec_groups=None, network_ids=None,
|
||||
config_drive_files=None, user_data=None, server_group_id=None):
|
||||
"""Build a new amphora.
|
||||
|
@ -28,22 +28,22 @@ class NoopManager(object):
|
||||
self.computeconfig = {}
|
||||
|
||||
def build(self, name="amphora_name", amphora_flavor=None,
|
||||
image_id=None, image_tag=None,
|
||||
image_id=None, image_tag=None, image_owner=None,
|
||||
key_name=None, sec_groups=None, network_ids=None,
|
||||
config_drive_files=None, user_data=None, port_ids=None,
|
||||
server_group_id=None):
|
||||
LOG.debug("Compute %s no-op, build name %s, amphora_flavor %s, "
|
||||
"image_id %s, image_tag %s, key_name %s, sec_groups %s, "
|
||||
"network_ids %s, config_drive_files %s, user_data %s, "
|
||||
"port_ids %s, server_group_id %s",
|
||||
"image_id %s, image_tag %s, image_owner %s, key_name %s, "
|
||||
"sec_groups %s, network_ids %s, config_drive_files %s, "
|
||||
"user_data %s, port_ids %s, server_group_id %s",
|
||||
self.__class__.__name__,
|
||||
name, amphora_flavor, image_id, image_tag,
|
||||
name, amphora_flavor, image_id, image_tag, image_owner,
|
||||
key_name, sec_groups, network_ids, config_drive_files,
|
||||
user_data, port_ids, server_group_id)
|
||||
self.computeconfig[(name, amphora_flavor, image_id, image_tag,
|
||||
key_name, user_data)] = (
|
||||
image_owner, key_name, user_data)] = (
|
||||
name, amphora_flavor,
|
||||
image_id, image_tag, key_name, sec_groups,
|
||||
image_id, image_tag, image_owner, key_name, sec_groups,
|
||||
network_ids, config_drive_files,
|
||||
user_data, port_ids, 'build')
|
||||
compute_id = uuidutils.generate_uuid()
|
||||
@ -87,13 +87,13 @@ class NoopComputeDriver(driver_base.ComputeBase):
|
||||
self.driver = NoopManager()
|
||||
|
||||
def build(self, name="amphora_name", amphora_flavor=None,
|
||||
image_id=None, image_tag=None,
|
||||
image_id=None, image_tag=None, image_owner=None,
|
||||
key_name=None, sec_groups=None, network_ids=None,
|
||||
config_drive_files=None, user_data=None, port_ids=None,
|
||||
server_group_id=None):
|
||||
|
||||
compute_id = self.driver.build(name, amphora_flavor,
|
||||
image_id, image_tag,
|
||||
image_id, image_tag, image_owner,
|
||||
key_name, sec_groups, network_ids,
|
||||
config_drive_files, user_data, port_ids,
|
||||
server_group_id)
|
||||
|
@ -32,11 +32,21 @@ CONF.import_group('networking', 'octavia.common.config')
|
||||
CONF.import_group('nova', 'octavia.common.config')
|
||||
|
||||
|
||||
def _extract_amp_image_id_by_tag(client, image_tag):
|
||||
images = list(client.images.list(
|
||||
filters={'tag': [image_tag]},
|
||||
sort='created_at:desc',
|
||||
limit=2))
|
||||
def _extract_amp_image_id_by_tag(client, image_tag, image_owner):
|
||||
if image_owner:
|
||||
images = list(client.images.list(
|
||||
filters={'tag': [image_tag],
|
||||
'owner': image_owner,
|
||||
'status': constants.GLANCE_IMAGE_ACTIVE},
|
||||
sort='created_at:desc',
|
||||
limit=2))
|
||||
else:
|
||||
images = list(client.images.list(
|
||||
filters={'tag': [image_tag],
|
||||
'status': constants.GLANCE_IMAGE_ACTIVE},
|
||||
sort='created_at:desc',
|
||||
limit=2))
|
||||
|
||||
if not images:
|
||||
raise exceptions.GlanceNoTaggedImages(tag=image_tag)
|
||||
image_id = images[0]['id']
|
||||
@ -50,15 +60,15 @@ def _extract_amp_image_id_by_tag(client, image_tag):
|
||||
return image_id
|
||||
|
||||
|
||||
def _get_image_uuid(client, image_id, image_tag):
|
||||
def _get_image_uuid(client, image_id, image_tag, image_owner):
|
||||
if image_id:
|
||||
if image_tag:
|
||||
LOG.warning(
|
||||
_LW("Both amp_image_id and amp_image_tag options defined. "
|
||||
"Using the former."))
|
||||
"Using the amp_image_id."))
|
||||
return image_id
|
||||
|
||||
return _extract_amp_image_id_by_tag(client, image_tag)
|
||||
return _extract_amp_image_id_by_tag(client, image_tag, image_owner)
|
||||
|
||||
|
||||
class VirtualMachineManager(compute_base.ComputeBase):
|
||||
@ -84,7 +94,7 @@ class VirtualMachineManager(compute_base.ComputeBase):
|
||||
self.server_groups = self._nova_client.server_groups
|
||||
|
||||
def build(self, name="amphora_name", amphora_flavor=None,
|
||||
image_id=None, image_tag=None,
|
||||
image_id=None, image_tag=None, image_owner=None,
|
||||
key_name=None, sec_groups=None, network_ids=None,
|
||||
port_ids=None, config_drive_files=None, user_data=None,
|
||||
server_group_id=None):
|
||||
@ -126,7 +136,7 @@ class VirtualMachineManager(compute_base.ComputeBase):
|
||||
"group": server_group_id}
|
||||
|
||||
image_id = _get_image_uuid(
|
||||
self._glance_client, image_id, image_tag)
|
||||
self._glance_client, image_id, image_tag, image_owner)
|
||||
amphora = self.manager.create(
|
||||
name=name, image=image_id, flavor=amphora_flavor,
|
||||
key_name=key_name, security_groups=sec_groups,
|
||||
|
@ -83,6 +83,7 @@ class ComputeCreate(BaseComputeTask):
|
||||
amphora_flavor=CONF.controller_worker.amp_flavor_id,
|
||||
image_id=CONF.controller_worker.amp_image_id,
|
||||
image_tag=CONF.controller_worker.amp_image_tag,
|
||||
image_owner=CONF.controller_worker.amp_image_owner_id,
|
||||
key_name=key_name,
|
||||
sec_groups=CONF.controller_worker.amp_secgroup_list,
|
||||
network_ids=network_ids,
|
||||
|
@ -35,18 +35,19 @@ class Test_GetImageUuid(base.TestCase):
|
||||
with mock.patch.object(nova_common,
|
||||
'_extract_amp_image_id_by_tag',
|
||||
return_value='fakeid') as extract:
|
||||
image_id = nova_common._get_image_uuid(client, '', 'faketag')
|
||||
image_id = nova_common._get_image_uuid(client, '', 'faketag', None)
|
||||
self.assertEqual('fakeid', image_id)
|
||||
extract.assert_called_with(client, 'faketag')
|
||||
extract.assert_called_with(client, 'faketag', None)
|
||||
|
||||
def test__get_image_uuid_notag(self):
|
||||
client = mock.Mock()
|
||||
image_id = nova_common._get_image_uuid(client, 'fakeid', '')
|
||||
image_id = nova_common._get_image_uuid(client, 'fakeid', '', None)
|
||||
self.assertEqual('fakeid', image_id)
|
||||
|
||||
def test__get_image_uuid_id_beats_tag(self):
|
||||
client = mock.Mock()
|
||||
image_id = nova_common._get_image_uuid(client, 'fakeid', 'faketag')
|
||||
image_id = nova_common._get_image_uuid(client, 'fakeid',
|
||||
'faketag', None)
|
||||
self.assertEqual('fakeid', image_id)
|
||||
|
||||
|
||||
@ -62,7 +63,8 @@ class Test_ExtractAmpImageIdByTag(base.TestCase):
|
||||
self.client.images.list.return_value = []
|
||||
self.assertRaises(
|
||||
exceptions.GlanceNoTaggedImages,
|
||||
nova_common._extract_amp_image_id_by_tag, self.client, 'faketag')
|
||||
nova_common._extract_amp_image_id_by_tag, self.client,
|
||||
'faketag', None)
|
||||
|
||||
def test_single_image(self):
|
||||
images = [
|
||||
@ -70,7 +72,7 @@ class Test_ExtractAmpImageIdByTag(base.TestCase):
|
||||
]
|
||||
self.client.images.list.return_value = images
|
||||
image_id = nova_common._extract_amp_image_id_by_tag(self.client,
|
||||
'faketag')
|
||||
'faketag', None)
|
||||
self.assertIn(image_id, images[0]['id'])
|
||||
|
||||
def test_multiple_images_returns_one_of_images(self):
|
||||
@ -80,7 +82,7 @@ class Test_ExtractAmpImageIdByTag(base.TestCase):
|
||||
]
|
||||
self.client.images.list.return_value = images
|
||||
image_id = nova_common._extract_amp_image_id_by_tag(self.client,
|
||||
'faketag')
|
||||
'faketag', None)
|
||||
self.assertIn(image_id, [image['id'] for image in images])
|
||||
|
||||
|
||||
|
@ -87,6 +87,11 @@ class TestComputeTasks(base.TestCase):
|
||||
@mock.patch('stevedore.driver.DriverManager.driver')
|
||||
def test_compute_create(self, mock_driver, mock_conf, mock_jinja):
|
||||
|
||||
image_owner_id = uuidutils.generate_uuid()
|
||||
conf = oslo_fixture.Config(cfg.CONF)
|
||||
conf.config(group="controller_worker",
|
||||
amp_image_owner_id=image_owner_id)
|
||||
|
||||
createcompute = compute_tasks.ComputeCreate()
|
||||
|
||||
mock_driver.build.return_value = COMPUTE_ID
|
||||
@ -100,6 +105,7 @@ class TestComputeTasks(base.TestCase):
|
||||
amphora_flavor=AMP_FLAVOR_ID,
|
||||
image_id=AMP_IMAGE_ID,
|
||||
image_tag=AMP_IMAGE_TAG,
|
||||
image_owner=image_owner_id,
|
||||
key_name=AMP_SSH_KEY_NAME,
|
||||
sec_groups=AMP_SEC_GROUPS,
|
||||
network_ids=AMP_NET,
|
||||
@ -134,6 +140,9 @@ class TestComputeTasks(base.TestCase):
|
||||
|
||||
createcompute.revert(COMPUTE_ID, _amphora_mock.id)
|
||||
|
||||
conf.config(group="controller_worker",
|
||||
amp_image_owner_id='')
|
||||
|
||||
@mock.patch('jinja2.Environment.get_template')
|
||||
@mock.patch('octavia.amphorae.backends.agent.'
|
||||
'agent_jinja_cfg.AgentJinjaTemplater.'
|
||||
@ -160,6 +169,7 @@ class TestComputeTasks(base.TestCase):
|
||||
amphora_flavor=AMP_FLAVOR_ID,
|
||||
image_id=AMP_IMAGE_ID,
|
||||
image_tag=AMP_IMAGE_TAG,
|
||||
image_owner='',
|
||||
key_name=AMP_SSH_KEY_NAME,
|
||||
sec_groups=AMP_SEC_GROUPS,
|
||||
network_ids=AMP_NET,
|
||||
@ -219,6 +229,7 @@ class TestComputeTasks(base.TestCase):
|
||||
amphora_flavor=AMP_FLAVOR_ID,
|
||||
image_id=AMP_IMAGE_ID,
|
||||
image_tag=AMP_IMAGE_TAG,
|
||||
image_owner='',
|
||||
key_name=None,
|
||||
sec_groups=AMP_SEC_GROUPS,
|
||||
network_ids=AMP_NET,
|
||||
@ -276,6 +287,7 @@ class TestComputeTasks(base.TestCase):
|
||||
amphora_flavor=AMP_FLAVOR_ID,
|
||||
image_id=AMP_IMAGE_ID,
|
||||
image_tag=AMP_IMAGE_TAG,
|
||||
image_owner='',
|
||||
key_name=AMP_SSH_KEY_NAME,
|
||||
sec_groups=AMP_SEC_GROUPS,
|
||||
network_ids=AMP_NET,
|
||||
|
@ -0,0 +1,6 @@
|
||||
---
|
||||
security:
|
||||
- Allows the operator to optionally restrict the amphora
|
||||
glance image selection to a specific owner id.
|
||||
This is a recommended security setting for clouds that
|
||||
allow user uploadable images.
|
Loading…
Reference in New Issue
Block a user