ec2 collector attempt config-drive before metadata server

This change will first attempt to get ec2 metadata from a v2 config
drive before falling back to the nova metadata server if no config
drive is detected.

Config drive is enabled in overcloud nodes, so using this for
os-collect-config will allow the undercloud nova metadata server to be
disabled.

V2 of the config-drive was released in OpenStack Folsom, so a decision
was made to not support V1 discovery at all. This makes the block
device candidate selection much simpler than the cloud-init one[1].

[1] https://git.launchpad.net/cloud-init/tree/cloudinit/sources/DataSourceConfigDrive.py#n219

Change-Id: I2aec0544fe40c2e901aee38c79bb838a627d20da
Closes-Bug: #1619074
This commit is contained in:
Steve Baker 2016-09-14 10:01:27 +12:00
parent a950edb320
commit 76975f04e5
5 changed files with 411 additions and 47 deletions

View File

@ -0,0 +1,176 @@
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import subprocess
import tempfile
from oslo_log import log
logger = log.getLogger('os-collect-config')
PROC_MOUNTS_PATH = '/proc/mounts'
class BlockDevice(object):
devname = None
type = None
label = None
mountpoint = None
unmount = False
ATTR_MAP = {
'DEVNAME': 'devname',
'TYPE': 'type',
'LABEL': 'label'
}
@staticmethod
def parse_shell_var(line):
# parse shell-style KEY=value
try:
ieq = line.index('=')
except (ValueError, AttributeError):
return None, None
value = line[ieq + 1:]
# unescape backslash escaped spaces
value = value.replace('\\ ', ' ')
return line[:ieq], value
@classmethod
def from_blkid_export(cls, export_str):
'''Construct BlockDevice from export formatted blkid output.'''
bd = cls()
for line in export_str.splitlines():
var, value = cls.parse_shell_var(line)
if var in cls.ATTR_MAP:
setattr(bd, cls.ATTR_MAP[var], value)
return bd
def config_drive_candidate(self):
'''Whether this block device is a v2 config-drive.'''
return self.label == 'config-2' and self.type in (
'vfat', 'iso9660')
def ensure_mounted(self):
'''Finds an existing mountpoint or mounts to a temp directory.'''
self.unmount = False
# check if already mounted, if so use that
with open(PROC_MOUNTS_PATH, 'r') as f:
for line in f.read().splitlines():
values = line.split()
if values[0] == self.devname:
self.mountpoint = values[1]
logger.debug('Found existing mounted config-drive: %s' %
self.mountpoint)
return
# otherwise mount readonly to a temp directory
self.mountpoint = tempfile.mkdtemp(prefix='config-2-')
cmd = ['mount', self.devname, self.mountpoint, '-o', 'ro']
logger.debug('Mounting %s at : %s' % (self.devname, self.mountpoint))
try:
subprocess.check_output(cmd)
except subprocess.CalledProcessError as e:
logger.error('Problem running "%s": %s', ' '.join(cmd), e)
os.rmdir(self.mountpoint)
self.mountpoint = None
else:
self.unmount = True
def cleanup(self):
'''Unmounts device if mounted by ensure_mounted.'''
if not self.unmount:
self.mountpoint = None
return
if not self.mountpoint:
self.unmount = False
return
cmd = ['umount', '-l', self.mountpoint]
logger.debug('Unmounting: %s' % self.mountpoint)
try:
subprocess.check_output(cmd)
except subprocess.CalledProcessError as e:
logger.error('Problem running "%s": %s', ' '.join(cmd), e)
else:
os.rmdir(self.mountpoint)
self.mountpoint = None
self.unmount = False
def get_metadata(self):
'''Load and return ec2/latest/meta-data.json from config drive.'''
try:
self.ensure_mounted()
if not self.mountpoint:
return {}
md_path = os.path.join(self.mountpoint,
'ec2', 'latest', 'meta-data.json')
if not os.path.isfile(md_path):
logger.warn('No expected file at path: %s' % md_path)
return {}
with open(md_path, 'r') as f:
return json.load(f)
except Exception as e:
logger.error('Problem getting metadata: %s', e)
return {}
finally:
self.cleanup()
def __repr__(self):
return '%s: TYPE="%s" LABEL="%s"' % (self.devname,
self.type,
self.label)
def all_block_devices():
'''Run blkid and yield a BlockDevice for all devices.'''
try:
cmd = ['blkid', '-o', 'export']
out = subprocess.check_output(cmd)
except Exception as e:
logger.error('Problem running "%s": %s', ' '.join(cmd), e)
else:
# with -o export, devices are separated by a blank line
for device in out.split('\n\n'):
yield BlockDevice.from_blkid_export(device)
def config_drive():
"""Return the first device expected to contain a v2 config drive.
Disk needs to be:
* either vfat or iso9660 formated
* labeled with 'config-2'
"""
for bd in all_block_devices():
if bd.config_drive_candidate():
return bd
def get_metadata():
"""Return discovered config drive metadata, or an empty dict."""
bd = config_drive()
if bd:
return bd.get_metadata()
return {}

View File

@ -21,6 +21,7 @@ from oslo_log import log
from os_collect_config import cache
from os_collect_config import common
from os_collect_config import config_drive
from os_collect_config import exc
EC2_METADATA_URL = 'http://169.254.169.254/latest/meta-data'
@ -70,5 +71,10 @@ class Collector(object):
metadata = json.load(f)
if metadata:
return [('ec2', metadata)]
md = config_drive.get_metadata()
if md:
return [('ec2', md)]
root_url = '%s/' % (CONF.ec2.metadata_url)
return [('ec2', self._fetch_metadata(root_url, CONF.ec2.timeout))]

View File

@ -22,12 +22,14 @@ import tempfile
import extras
import fixtures
import mock
from oslo_config import cfg
import testtools
from testtools import matchers
from os_collect_config import cache
from os_collect_config import collect
from os_collect_config import config_drive
from os_collect_config import exc
from os_collect_config.tests import test_cfn
from os_collect_config.tests import test_ec2
@ -82,8 +84,10 @@ class TestCollect(testtools.TestCase):
'discover_class': test_heat.FakeKeystoneDiscover
},
}
return collect.__main__(args=fake_args,
collector_kwargs_map=collector_kwargs_map)
with mock.patch.object(config_drive, 'get_metadata') as gm:
gm.return_value = {}
return collect.__main__(args=fake_args,
collector_kwargs_map=collector_kwargs_map)
def _fake_popen_call_main(self, occ_args):
calls = []
@ -329,6 +333,7 @@ class TestCollect(testtools.TestCase):
class TestCollectAll(testtools.TestCase):
def setUp(self):
super(TestCollectAll, self).setUp()
self.log = self.useFixture(fixtures.FakeLogger())
@ -383,10 +388,12 @@ class TestCollectAll(testtools.TestCase):
}
if collectors is None:
collectors = cfg.CONF.collectors
return collect.collect_all(
collectors,
store=store,
collector_kwargs_map=collector_kwargs_map)
with mock.patch.object(config_drive, 'get_metadata') as gm:
gm.return_value = {}
return collect.collect_all(
collectors,
store=store,
collector_kwargs_map=collector_kwargs_map)
def _test_collect_all_store(self, collector_kwargs_map=None,
expected_changed=None):

View File

@ -0,0 +1,150 @@
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import subprocess
import fixtures
import mock
import testtools
from os_collect_config import config_drive
from os_collect_config.tests import test_ec2
BLKID_CONFIG_DRIVE = '''DEVNAME=/dev/sr0
UUID=2016-09-12-02-14-09-00
LABEL=config-2
TYPE=iso9660'''
BLKID_RESPONSE = BLKID_CONFIG_DRIVE + '''
DEVNAME=/dev/block/253:1
UUID=f13d84b4-c756-4d89-9d5e-6b534397aa14
TYPE=xfs
'''
class TestConfigDrive(testtools.TestCase):
def setUp(self):
super(TestConfigDrive, self).setUp()
self.log = self.useFixture(fixtures.FakeLogger())
@mock.patch.object(subprocess, 'check_output')
def test_all_devices(self, co):
co.return_value = BLKID_RESPONSE
bds = list(config_drive.all_block_devices())
self.assertEqual(2, len(bds))
self.assertEqual('/dev/sr0', bds[0].devname)
self.assertEqual('iso9660', bds[0].type)
self.assertEqual('config-2', bds[0].label)
self.assertTrue(bds[0].config_drive_candidate())
self.assertEqual('/dev/sr0: TYPE="iso9660" LABEL="config-2"',
str(bds[0]))
self.assertEqual('/dev/block/253:1', bds[1].devname)
self.assertEqual('xfs', bds[1].type)
self.assertIsNone(bds[1].label)
self.assertFalse(bds[1].config_drive_candidate())
self.assertEqual('/dev/block/253:1: TYPE="xfs" LABEL="None"',
str(bds[1]))
@mock.patch.object(subprocess, 'check_output')
def test_config_drive(self, co):
co.return_value = BLKID_RESPONSE
bd = config_drive.config_drive()
self.assertTrue(bd.config_drive_candidate())
self.assertEqual('/dev/sr0: TYPE="iso9660" LABEL="config-2"',
str(bd))
def test_parse_shell_var(self):
psv = config_drive.BlockDevice.parse_shell_var
self.assertEqual(('foo', 'bar'), psv('foo=bar'))
self.assertEqual(('foo', 'bar=baz'), psv('foo=bar=baz'))
self.assertEqual(('foo', 'bar baz'), psv('foo=bar baz'))
self.assertEqual(('foo', 'bar baz'), psv('foo=bar\ baz'))
self.assertEqual(('foo', ''), psv('foo='))
self.assertEqual((None, None), psv('foo'))
self.assertEqual((None, None), psv(None))
@mock.patch.object(subprocess, 'check_output')
def test_ensure_mounted(self, co):
bd = config_drive.BlockDevice.from_blkid_export(BLKID_CONFIG_DRIVE)
self.assertTrue(bd.config_drive_candidate())
proc = self.useFixture(fixtures.TempDir())
config_drive.PROC_MOUNTS_PATH = os.path.join(proc.path, 'mount')
with open(config_drive.PROC_MOUNTS_PATH, 'w') as md:
md.write('')
self.assertIsNone(bd.mountpoint)
self.assertFalse(bd.unmount)
bd.ensure_mounted()
mountpoint = bd.mountpoint
self.assertIsNotNone(mountpoint)
self.assertTrue(bd.unmount)
self.assertTrue(os.path.isdir(mountpoint))
co.assert_called_with([
'mount', '/dev/sr0', mountpoint, '-o', 'ro'
])
bd.cleanup()
self.assertIsNone(bd.mountpoint)
self.assertFalse(bd.unmount)
self.assertFalse(os.path.isdir(mountpoint))
co.assert_called_with([
'umount', '-l', mountpoint
])
@mock.patch.object(subprocess, 'check_output')
def test_already_mounted(self, co):
bd = config_drive.BlockDevice.from_blkid_export(BLKID_CONFIG_DRIVE)
self.assertTrue(bd.config_drive_candidate())
proc = self.useFixture(fixtures.TempDir())
mountpoint = self.useFixture(fixtures.TempDir()).path
config_drive.PROC_MOUNTS_PATH = os.path.join(proc.path, 'mount')
with open(config_drive.PROC_MOUNTS_PATH, 'w') as md:
md.write('%s %s r 0 0\n' % (bd.devname, mountpoint))
self.assertIsNone(bd.mountpoint)
self.assertFalse(bd.unmount)
bd.ensure_mounted()
self.assertEqual(mountpoint, bd.mountpoint)
self.assertFalse(bd.unmount)
co.assert_not_called()
bd.cleanup()
self.assertIsNone(bd.mountpoint)
self.assertFalse(bd.unmount)
co.assert_not_called()
@mock.patch.object(config_drive.BlockDevice, 'ensure_mounted')
@mock.patch.object(config_drive.BlockDevice, 'cleanup')
def test_get_metadata(self, cleanup, ensure_mounted):
bd = config_drive.BlockDevice.from_blkid_export(BLKID_CONFIG_DRIVE)
bd.mountpoint = self.useFixture(fixtures.TempDir()).path
md = bd.get_metadata()
self.assertEqual({}, md)
md_dir = os.path.join(bd.mountpoint, 'ec2', 'latest')
os.makedirs(md_dir)
md_path = os.path.join(md_dir, 'meta-data.json')
with open(md_path, 'w') as md:
json.dump(test_ec2.META_DATA_RESOLVED, md)
md = bd.get_metadata()
self.assertEqual(test_ec2.META_DATA_RESOLVED, md)

View File

@ -18,40 +18,64 @@ import os
import uuid
import fixtures
import mock
from oslo_config import cfg
import requests
import six.moves.urllib.parse as urlparse
import testtools
from testtools import matchers
from os_collect_config import collect
from os_collect_config import config_drive
from os_collect_config import ec2
from os_collect_config import exc
META_DATA = {'local-ipv4': '192.0.2.1',
'reservation-id': str(uuid.uuid1()),
'local-hostname': 'foo',
'ami-launch-index': '0',
'public-hostname': 'foo',
'hostname': 'foo',
'ami-id': str(uuid.uuid1()),
'instance-action': 'none',
'public-ipv4': '192.0.2.1',
'instance-type': 'flavor.small',
'placement/': 'availability-zone',
'placement/availability-zone': 'foo-az',
'mpi/': 'foo-keypair',
'mpi/foo-keypair': '192.0.2.1 slots=1',
'block-device-mapping/': "ami\nroot\nephemeral0",
'block-device-mapping/ami': 'vda',
'block-device-mapping/root': '/dev/vda',
'block-device-mapping/ephemeral0': '/dev/vdb',
'public-keys/': '0=foo-keypair',
'public-keys/0': 'openssh-key',
'public-keys/0/': 'openssh-key',
'public-keys/0/openssh-key': 'ssh-rsa AAAAAAAAABBBBBBBBCCCCCCCC',
'instance-id': str(uuid.uuid1())}
META_DATA = {
'local-ipv4': '192.0.2.1',
'reservation-id': str(uuid.uuid1()),
'local-hostname': 'foo',
'ami-launch-index': '0',
'public-hostname': 'foo',
'hostname': 'foo',
'ami-id': str(uuid.uuid1()),
'instance-action': 'none',
'public-ipv4': '192.0.2.1',
'instance-type': 'flavor.small',
'placement/': 'availability-zone',
'placement/availability-zone': 'foo-az',
'mpi/': 'foo-keypair',
'mpi/foo-keypair': '192.0.2.1 slots=1',
'block-device-mapping/': "ami\nroot\nephemeral0",
'block-device-mapping/ami': 'vda',
'block-device-mapping/root': '/dev/vda',
'block-device-mapping/ephemeral0': '/dev/vdb',
'public-keys/': '0=foo-keypair',
'public-keys/0': 'openssh-key',
'public-keys/0/': 'openssh-key',
'public-keys/0/openssh-key': 'ssh-rsa AAAAAAAAABBBBBBBBCCCCCCCC',
'instance-id': str(uuid.uuid1())
}
META_DATA_RESOLVED = {
'local-ipv4': '192.0.2.1',
'reservation-id': META_DATA['reservation-id'],
'local-hostname': 'foo',
'ami-launch-index': '0',
'public-hostname': 'foo',
'hostname': 'foo',
'ami-id': META_DATA['ami-id'],
'instance-action': 'none',
'public-ipv4': '192.0.2.1',
'instance-type': 'flavor.small',
'placement': {'availability-zone': 'foo-az'},
'mpi': {'foo-keypair': '192.0.2.1 slots=1'},
'public-keys': {'0': {'openssh-key': 'ssh-rsa AAAAAAAAABBBBBBBBCCCCCCCC'}},
'block-device-mapping': {'ami': 'vda',
'ephemeral0': '/dev/vdb',
'root': '/dev/vda'},
'instance-id': META_DATA['instance-id']
}
class FakeResponse(dict):
@ -93,32 +117,25 @@ class TestEc2(testtools.TestCase):
super(TestEc2, self).setUp()
self.log = self.useFixture(fixtures.FakeLogger())
def test_collect_ec2(self):
@mock.patch.object(config_drive, 'config_drive')
def test_collect_ec2(self, cd):
cd.return_value = None
collect.setup_conf()
ec2_md = ec2.Collector(requests_impl=FakeRequests).collect()
self.assertThat(ec2_md, matchers.IsInstance(list))
self.assertEqual('ec2', ec2_md[0][0])
ec2_md = ec2_md[0][1]
for k in ('public-ipv4', 'instance-id', 'hostname'):
self.assertIn(k, ec2_md)
self.assertEqual(ec2_md[k], META_DATA[k])
self.assertEqual(ec2_md['block-device-mapping']['ami'], 'vda')
# SSH keys are special cases
self.assertEqual(
{'0': {'openssh-key': 'ssh-rsa AAAAAAAAABBBBBBBBCCCCCCCC'}},
ec2_md['public-keys'])
self.assertEqual([('ec2', META_DATA_RESOLVED)], ec2_md)
self.assertEqual('', self.log.output)
def test_collect_ec2_fail(self):
@mock.patch.object(config_drive, 'config_drive')
def test_collect_ec2_fail(self, cd):
cd.return_value = None
collect.setup_conf()
collect_ec2 = ec2.Collector(requests_impl=FakeFailRequests)
self.assertRaises(exc.Ec2MetadataNotAvailable, collect_ec2.collect)
self.assertIn('Forbidden', self.log.output)
def test_collect_ec2_collected(self):
@mock.patch.object(config_drive, 'config_drive')
def test_collect_ec2_collected(self, cd):
cd.return_value = None
collect.setup_conf()
cache_dir = self.useFixture(fixtures.TempDir())
self.addCleanup(cfg.CONF.reset)
@ -129,3 +146,11 @@ class TestEc2(testtools.TestCase):
collect_ec2 = ec2.Collector(requests_impl=FakeFailRequests)
self.assertEqual([('ec2', META_DATA)], collect_ec2.collect())
@mock.patch.object(config_drive, 'config_drive')
def test_collect_config_drive(self, cd):
cd.return_value.get_metadata.return_value = META_DATA_RESOLVED
collect.setup_conf()
ec2_md = ec2.Collector(requests_impl=FakeFailRequests).collect()
self.assertEqual([('ec2', META_DATA_RESOLVED)], ec2_md)
self.assertEqual('', self.log.output)