merge from trunk
This commit is contained in:
commit
161a9ab351
|
@ -1,6 +1,9 @@
|
|||
0.7.3:
|
||||
- fix omnibus chef installer (LP: #1182265) [Chris Wing]
|
||||
- small fix for OVF datasource for iso transport on non-iso9660 filesystem
|
||||
- determine if upstart version is suitable for
|
||||
'initctl reload-configuration' (LP: #1124384). If so, then invoke it.
|
||||
- add Azure datasource.
|
||||
0.7.2:
|
||||
- add a debian watch file
|
||||
- add 'sudo' entry to ubuntu's default user (LP: #1080717)
|
||||
|
|
|
@ -22,6 +22,7 @@
|
|||
|
||||
|
||||
import os
|
||||
import re
|
||||
|
||||
from cloudinit import handlers
|
||||
from cloudinit import log as logging
|
||||
|
@ -66,14 +67,53 @@ class UpstartJobPartHandler(handlers.Handler):
|
|||
path = os.path.join(self.upstart_dir, filename)
|
||||
util.write_file(path, payload, 0644)
|
||||
|
||||
# FIXME LATER (LP: #1124384)
|
||||
# a bug in upstart means that invoking reload-configuration
|
||||
# at this stage in boot causes havoc. So, until that is fixed
|
||||
# we will not do that. However, I'd like to be able to easily
|
||||
# test to see if this bug is still present in an image with
|
||||
# a newer upstart. So, a boot hook could easiliy write this file.
|
||||
if os.path.exists("/run/cloud-init-upstart-reload"):
|
||||
# if inotify support is not present in the root filesystem
|
||||
# (overlayroot) then we need to tell upstart to re-read /etc
|
||||
|
||||
if SUITABLE_UPSTART:
|
||||
util.subp(["initctl", "reload-configuration"], capture=False)
|
||||
|
||||
|
||||
def _has_suitable_upstart():
|
||||
# (LP: #1124384)
|
||||
# a bug in upstart means that invoking reload-configuration
|
||||
# at this stage in boot causes havoc. So, try to determine if upstart
|
||||
# is installed, and reloading configuration is OK.
|
||||
if not os.path.exists("/sbin/initctl"):
|
||||
return False
|
||||
try:
|
||||
(version_out, _err) = util.subp(["initctl", "version"])
|
||||
except:
|
||||
util.logexc(LOG, "initctl version failed")
|
||||
return False
|
||||
|
||||
# expecting 'initctl version' to output something like: init (upstart X.Y)
|
||||
if re.match("upstart 1.[0-7][)]", version_out):
|
||||
return False
|
||||
if "upstart 0." in version_out:
|
||||
return False
|
||||
elif "upstart 1.8" in version_out:
|
||||
if not os.path.exists("/usr/bin/dpkg-query"):
|
||||
return False
|
||||
try:
|
||||
(dpkg_ver, _err) = util.subp(["dpkg-query",
|
||||
"--showformat=${Version}",
|
||||
"--show", "upstart"], rcs=[0, 1])
|
||||
except Exception:
|
||||
util.logexc(LOG, "dpkg-query failed")
|
||||
return False
|
||||
|
||||
try:
|
||||
good = "1.8-0ubuntu1.2"
|
||||
util.subp(["dpkg", "--compare-versions", dpkg_ver, "ge", good])
|
||||
return True
|
||||
except util.ProcessExecutionError as e:
|
||||
if e.exit_code is 1:
|
||||
pass
|
||||
else:
|
||||
util.logexc(LOG, "dpkg --compare-versions failed [%s]",
|
||||
e.exit_code)
|
||||
except Exception as e:
|
||||
util.logexc(LOG, "dpkg --compare-versions failed")
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
|
||||
SUITABLE_UPSTART = _has_suitable_upstart()
|
||||
|
|
|
@ -31,6 +31,7 @@ CFG_BUILTIN = {
|
|||
'datasource_list': [
|
||||
'NoCloud',
|
||||
'ConfigDrive',
|
||||
'Azure',
|
||||
'AltCloud',
|
||||
'OVF',
|
||||
'MAAS',
|
||||
|
|
|
@ -0,0 +1,381 @@
|
|||
# vi: ts=4 expandtab
|
||||
#
|
||||
# Copyright (C) 2013 Canonical Ltd.
|
||||
#
|
||||
# Author: Scott Moser <scott.moser@canonical.com>
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License version 3, as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import base64
|
||||
import os
|
||||
import os.path
|
||||
import time
|
||||
from xml.dom import minidom
|
||||
|
||||
from cloudinit import log as logging
|
||||
from cloudinit import sources
|
||||
from cloudinit import util
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
DS_NAME = 'Azure'
|
||||
DEFAULT_METADATA = {"instance-id": "iid-AZURE-NODE"}
|
||||
AGENT_START = ['service', 'walinuxagent', 'start']
|
||||
BUILTIN_DS_CONFIG = {'datasource': {DS_NAME: {
|
||||
'agent_command': AGENT_START,
|
||||
'data_dir': "/var/lib/waagent"}}}
|
||||
|
||||
|
||||
class DataSourceAzureNet(sources.DataSource):
|
||||
def __init__(self, sys_cfg, distro, paths):
|
||||
sources.DataSource.__init__(self, sys_cfg, distro, paths)
|
||||
self.seed_dir = os.path.join(paths.seed_dir, 'azure')
|
||||
self.cfg = {}
|
||||
self.seed = None
|
||||
|
||||
def __str__(self):
|
||||
root = sources.DataSource.__str__(self)
|
||||
return "%s [seed=%s]" % (root, self.seed)
|
||||
|
||||
def get_data(self):
|
||||
ddir_cfgpath = ['datasource', DS_NAME, 'data_dir']
|
||||
# azure removes/ejects the cdrom containing the ovf-env.xml
|
||||
# file on reboot. So, in order to successfully reboot we
|
||||
# need to look in the datadir and consider that valid
|
||||
ddir = util.get_cfg_by_path(self.sys_cfg, ddir_cfgpath)
|
||||
if ddir is None:
|
||||
ddir = util.get_cfg_by_path(BUILTIN_DS_CONFIG, ddir_cfgpath)
|
||||
|
||||
candidates = [self.seed_dir]
|
||||
candidates.extend(list_possible_azure_ds_devs())
|
||||
if ddir:
|
||||
candidates.append(ddir)
|
||||
|
||||
found = None
|
||||
|
||||
for cdev in candidates:
|
||||
try:
|
||||
if cdev.startswith("/dev/"):
|
||||
ret = util.mount_cb(cdev, load_azure_ds_dir)
|
||||
else:
|
||||
ret = load_azure_ds_dir(cdev)
|
||||
|
||||
except NonAzureDataSource:
|
||||
continue
|
||||
except BrokenAzureDataSource as exc:
|
||||
raise exc
|
||||
except util.MountFailedError:
|
||||
LOG.warn("%s was not mountable" % cdev)
|
||||
continue
|
||||
|
||||
(md, self.userdata_raw, cfg, files) = ret
|
||||
self.seed = cdev
|
||||
self.metadata = util.mergemanydict([md, DEFAULT_METADATA])
|
||||
self.cfg = cfg
|
||||
found = cdev
|
||||
|
||||
LOG.debug("found datasource in %s", cdev)
|
||||
break
|
||||
|
||||
if not found:
|
||||
return False
|
||||
|
||||
if found == ddir:
|
||||
LOG.debug("using cached datasource in %s", ddir)
|
||||
|
||||
fields = [('cmd', ['datasource', DS_NAME, 'agent_command']),
|
||||
('datadir', ddir_cfgpath)]
|
||||
mycfg = {}
|
||||
for cfg in (self.cfg, self.sys_cfg, BUILTIN_DS_CONFIG):
|
||||
for name, path in fields:
|
||||
if name in mycfg:
|
||||
continue
|
||||
value = util.get_cfg_by_path(cfg, keyp=path)
|
||||
if value is not None:
|
||||
mycfg[name] = value
|
||||
|
||||
# walinux agent writes files world readable, but expects
|
||||
# the directory to be protected.
|
||||
write_files(mycfg['datadir'], files, dirmode=0700)
|
||||
|
||||
try:
|
||||
invoke_agent(mycfg['cmd'])
|
||||
except util.ProcessExecutionError:
|
||||
# claim the datasource even if the command failed
|
||||
util.logexc(LOG, "agent command '%s' failed.", mycfg['cmd'])
|
||||
|
||||
wait_for = [os.path.join(mycfg['datadir'], "SharedConfig.xml")]
|
||||
|
||||
fp_files = []
|
||||
for pk in self.cfg.get('_pubkeys', []):
|
||||
bname = pk['fingerprint'] + ".crt"
|
||||
fp_files += [os.path.join(mycfg['datadir'], bname)]
|
||||
|
||||
start = time.time()
|
||||
missing = wait_for_files(wait_for + fp_files)
|
||||
if len(missing):
|
||||
LOG.warn("Did not find files, but going on: %s", missing)
|
||||
else:
|
||||
LOG.debug("waited %.3f seconds for %d files to appear",
|
||||
time.time() - start, len(wait_for))
|
||||
|
||||
pubkeys = pubkeys_from_crt_files(fp_files)
|
||||
|
||||
self.metadata['public-keys'] = pubkeys
|
||||
|
||||
return True
|
||||
|
||||
def get_config_obj(self):
|
||||
return self.cfg
|
||||
|
||||
|
||||
def crtfile_to_pubkey(fname):
|
||||
pipeline = ('openssl x509 -noout -pubkey < "$0" |'
|
||||
'ssh-keygen -i -m PKCS8 -f /dev/stdin')
|
||||
(out, _err) = util.subp(['sh', '-c', pipeline, fname], capture=True)
|
||||
return out.rstrip()
|
||||
|
||||
|
||||
def pubkeys_from_crt_files(flist):
|
||||
pubkeys = []
|
||||
errors = []
|
||||
for fname in flist:
|
||||
try:
|
||||
pubkeys.append(crtfile_to_pubkey(fname))
|
||||
except util.ProcessExecutionError:
|
||||
errors.extend(fname)
|
||||
|
||||
if errors:
|
||||
LOG.warn("failed to convert the crt files to pubkey: %s" % errors)
|
||||
|
||||
return pubkeys
|
||||
|
||||
|
||||
def wait_for_files(flist, maxwait=60, naplen=.5):
|
||||
need = set(flist)
|
||||
waited = 0
|
||||
while waited < maxwait:
|
||||
need -= set([f for f in need if os.path.exists(f)])
|
||||
if len(need) == 0:
|
||||
return []
|
||||
time.sleep(naplen)
|
||||
waited += naplen
|
||||
return need
|
||||
|
||||
|
||||
def write_files(datadir, files, dirmode=None):
|
||||
if not datadir:
|
||||
return
|
||||
if not files:
|
||||
files = {}
|
||||
util.ensure_dir(datadir, dirmode)
|
||||
for (name, content) in files.items():
|
||||
util.write_file(filename=os.path.join(datadir, name),
|
||||
content=content, mode=0600)
|
||||
|
||||
|
||||
def invoke_agent(cmd):
|
||||
# this is a function itself to simplify patching it for test
|
||||
if cmd:
|
||||
LOG.debug("invoking agent: %s" % cmd)
|
||||
util.subp(cmd, shell=(not isinstance(cmd, list)))
|
||||
else:
|
||||
LOG.debug("not invoking agent")
|
||||
|
||||
|
||||
def find_child(node, filter_func):
|
||||
ret = []
|
||||
if not node.hasChildNodes():
|
||||
return ret
|
||||
for child in node.childNodes:
|
||||
if filter_func(child):
|
||||
ret.append(child)
|
||||
return ret
|
||||
|
||||
|
||||
def load_azure_ovf_pubkeys(sshnode):
|
||||
# This parses a 'SSH' node formatted like below, and returns
|
||||
# an array of dicts.
|
||||
# [{'fp': '6BE7A7C3C8A8F4B123CCA5D0C2F1BE4CA7B63ED7',
|
||||
# 'path': 'where/to/go'}]
|
||||
#
|
||||
# <SSH><PublicKeys>
|
||||
# <PublicKey><Fingerprint>ABC</FingerPrint><Path>/ABC</Path>
|
||||
# ...
|
||||
# </PublicKeys></SSH>
|
||||
results = find_child(sshnode, lambda n: n.localName == "PublicKeys")
|
||||
if len(results) == 0:
|
||||
return []
|
||||
if len(results) > 1:
|
||||
raise BrokenAzureDataSource("Multiple 'PublicKeys'(%s) in SSH node" %
|
||||
len(results))
|
||||
|
||||
pubkeys_node = results[0]
|
||||
pubkeys = find_child(pubkeys_node, lambda n: n.localName == "PublicKey")
|
||||
|
||||
if len(pubkeys) == 0:
|
||||
return []
|
||||
|
||||
found = []
|
||||
text_node = minidom.Document.TEXT_NODE
|
||||
|
||||
for pk_node in pubkeys:
|
||||
if not pk_node.hasChildNodes():
|
||||
continue
|
||||
cur = {'fingerprint': "", 'path': ""}
|
||||
for child in pk_node.childNodes:
|
||||
if (child.nodeType == text_node or not child.localName):
|
||||
continue
|
||||
|
||||
name = child.localName.lower()
|
||||
|
||||
if name not in cur.keys():
|
||||
continue
|
||||
|
||||
if (len(child.childNodes) != 1 or
|
||||
child.childNodes[0].nodeType != text_node):
|
||||
continue
|
||||
|
||||
cur[name] = child.childNodes[0].wholeText.strip()
|
||||
found.append(cur)
|
||||
|
||||
return found
|
||||
|
||||
|
||||
def read_azure_ovf(contents):
|
||||
try:
|
||||
dom = minidom.parseString(contents)
|
||||
except Exception as e:
|
||||
raise NonAzureDataSource("invalid xml: %s" % e)
|
||||
|
||||
results = find_child(dom.documentElement,
|
||||
lambda n: n.localName == "ProvisioningSection")
|
||||
|
||||
if len(results) == 0:
|
||||
raise NonAzureDataSource("No ProvisioningSection")
|
||||
if len(results) > 1:
|
||||
raise BrokenAzureDataSource("found '%d' ProvisioningSection items" %
|
||||
len(results))
|
||||
provSection = results[0]
|
||||
|
||||
lpcs_nodes = find_child(provSection,
|
||||
lambda n: n.localName == "LinuxProvisioningConfigurationSet")
|
||||
|
||||
if len(results) == 0:
|
||||
raise NonAzureDataSource("No LinuxProvisioningConfigurationSet")
|
||||
if len(results) > 1:
|
||||
raise BrokenAzureDataSource("found '%d' %ss" %
|
||||
("LinuxProvisioningConfigurationSet",
|
||||
len(results)))
|
||||
lpcs = lpcs_nodes[0]
|
||||
|
||||
if not lpcs.hasChildNodes():
|
||||
raise BrokenAzureDataSource("no child nodes of configuration set")
|
||||
|
||||
md_props = 'seedfrom'
|
||||
md = {'azure_data': {}}
|
||||
cfg = {}
|
||||
ud = ""
|
||||
password = None
|
||||
username = None
|
||||
|
||||
for child in lpcs.childNodes:
|
||||
if child.nodeType == dom.TEXT_NODE or not child.localName:
|
||||
continue
|
||||
|
||||
name = child.localName.lower()
|
||||
|
||||
simple = False
|
||||
if (len(child.childNodes) == 1 and
|
||||
child.childNodes[0].nodeType == dom.TEXT_NODE):
|
||||
simple = True
|
||||
value = child.childNodes[0].wholeText
|
||||
|
||||
# we accept either UserData or CustomData. If both are present
|
||||
# then behavior is undefined.
|
||||
if (name == "userdata" or name == "customdata"):
|
||||
ud = base64.b64decode(''.join(value.split()))
|
||||
elif name == "username":
|
||||
username = value
|
||||
elif name == "userpassword":
|
||||
password = value
|
||||
elif name == "hostname":
|
||||
md['local-hostname'] = value
|
||||
elif name == "dscfg":
|
||||
cfg['datasource'] = {DS_NAME: util.load_yaml(value, default={})}
|
||||
elif name == "ssh":
|
||||
cfg['_pubkeys'] = load_azure_ovf_pubkeys(child)
|
||||
elif name == "disablesshpasswordauthentication":
|
||||
cfg['ssh_pwauth'] = util.is_false(value)
|
||||
elif simple:
|
||||
if name in md_props:
|
||||
md[name] = value
|
||||
else:
|
||||
md['azure_data'][name] = value
|
||||
|
||||
defuser = {}
|
||||
if username:
|
||||
defuser['name'] = username
|
||||
if password:
|
||||
defuser['password'] = password
|
||||
defuser['lock_passwd'] = False
|
||||
|
||||
if defuser:
|
||||
cfg['system_info'] = {'default_user': defuser}
|
||||
|
||||
if 'ssh_pwauth' not in cfg and password:
|
||||
cfg['ssh_pwauth'] = True
|
||||
|
||||
return (md, ud, cfg)
|
||||
|
||||
|
||||
def list_possible_azure_ds_devs():
|
||||
# return a sorted list of devices that might have a azure datasource
|
||||
devlist = []
|
||||
for fstype in ("iso9660", "udf"):
|
||||
devlist.extend(util.find_devs_with("TYPE=%s" % fstype))
|
||||
|
||||
devlist.sort(reverse=True)
|
||||
return devlist
|
||||
|
||||
|
||||
def load_azure_ds_dir(source_dir):
|
||||
ovf_file = os.path.join(source_dir, "ovf-env.xml")
|
||||
|
||||
if not os.path.isfile(ovf_file):
|
||||
raise NonAzureDataSource("No ovf-env file found")
|
||||
|
||||
with open(ovf_file, "r") as fp:
|
||||
contents = fp.read()
|
||||
|
||||
md, ud, cfg = read_azure_ovf(contents)
|
||||
return (md, ud, cfg, {'ovf-env.xml': contents})
|
||||
|
||||
|
||||
class BrokenAzureDataSource(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class NonAzureDataSource(Exception):
|
||||
pass
|
||||
|
||||
|
||||
# Used to match classes to dependencies
|
||||
datasources = [
|
||||
(DataSourceAzureNet, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
|
||||
]
|
||||
|
||||
|
||||
# Return a list of data sources that match this set of dependencies
|
||||
def get_datasource_list(depends):
|
||||
return sources.list_from_depends(depends, datasources)
|
|
@ -42,3 +42,7 @@ datasource:
|
|||
meta-data:
|
||||
instance-id: i-87018aed
|
||||
local-hostname: myhost.internal
|
||||
|
||||
Azure:
|
||||
agent_command: [service, walinuxagent, start]
|
||||
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
"""Tests of the built-in user data handlers."""
|
||||
|
||||
import os
|
||||
import unittest
|
||||
|
||||
from tests.unittests import helpers as test_helpers
|
||||
|
||||
|
@ -35,7 +34,6 @@ class TestBuiltins(test_helpers.FilesystemMockingTestCase):
|
|||
None, None, None)
|
||||
self.assertEquals(0, len(os.listdir(up_root)))
|
||||
|
||||
@unittest.skip("until LP: #1124384 fixed")
|
||||
def test_upstart_frequency_single(self):
|
||||
# files should be written out when frequency is ! per-instance
|
||||
new_root = self.makeDir()
|
||||
|
@ -47,6 +45,7 @@ class TestBuiltins(test_helpers.FilesystemMockingTestCase):
|
|||
'upstart_dir': "/etc/upstart",
|
||||
})
|
||||
|
||||
upstart_job.SUITABLE_UPSTART = True
|
||||
util.ensure_dir("/run")
|
||||
util.ensure_dir("/etc/upstart")
|
||||
|
||||
|
|
|
@ -0,0 +1,238 @@
|
|||
from cloudinit import helpers
|
||||
from cloudinit.sources import DataSourceAzure
|
||||
from tests.unittests.helpers import populate_dir
|
||||
|
||||
import base64
|
||||
from mocker import MockerTestCase
|
||||
import os
|
||||
import yaml
|
||||
|
||||
|
||||
def construct_valid_ovf_env(data=None, pubkeys=None, userdata=None):
|
||||
if data is None:
|
||||
data = {'HostName': 'FOOHOST'}
|
||||
if pubkeys is None:
|
||||
pubkeys = {}
|
||||
|
||||
content = """<?xml version="1.0" encoding="utf-8"?>
|
||||
<Environment xmlns="http://schemas.dmtf.org/ovf/environment/1"
|
||||
xmlns:oe="http://schemas.dmtf.org/ovf/environment/1"
|
||||
xmlns:wa="http://schemas.microsoft.com/windowsazure"
|
||||
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
|
||||
|
||||
<wa:ProvisioningSection><wa:Version>1.0</wa:Version>
|
||||
<LinuxProvisioningConfigurationSet
|
||||
xmlns="http://schemas.microsoft.com/windowsazure"
|
||||
xmlns:i="http://www.w3.org/2001/XMLSchema-instance">
|
||||
<ConfigurationSetType>LinuxProvisioningConfiguration</ConfigurationSetType>
|
||||
"""
|
||||
for key, val in data.items():
|
||||
content += "<%s>%s</%s>\n" % (key, val, key)
|
||||
|
||||
if userdata:
|
||||
content += "<UserData>%s</UserData>\n" % (base64.b64encode(userdata))
|
||||
|
||||
if pubkeys:
|
||||
content += "<SSH><PublicKeys>\n"
|
||||
for fp, path in pubkeys:
|
||||
content += " <PublicKey>"
|
||||
content += ("<Fingerprint>%s</Fingerprint><Path>%s</Path>" %
|
||||
(fp, path))
|
||||
content += "</PublicKey>\n"
|
||||
content += "</PublicKeys></SSH>"
|
||||
content += """
|
||||
</LinuxProvisioningConfigurationSet>
|
||||
</wa:ProvisioningSection>
|
||||
<wa:PlatformSettingsSection><wa:Version>1.0</wa:Version>
|
||||
<PlatformSettings xmlns="http://schemas.microsoft.com/windowsazure"
|
||||
xmlns:i="http://www.w3.org/2001/XMLSchema-instance">
|
||||
<KmsServerHostname>kms.core.windows.net</KmsServerHostname>
|
||||
<ProvisionGuestAgent>false</ProvisionGuestAgent>
|
||||
<GuestAgentPackageName i:nil="true" />
|
||||
</PlatformSettings></wa:PlatformSettingsSection>
|
||||
</Environment>
|
||||
"""
|
||||
|
||||
return content
|
||||
|
||||
|
||||
class TestAzureDataSource(MockerTestCase):
|
||||
|
||||
def setUp(self):
|
||||
# makeDir comes from MockerTestCase
|
||||
self.tmp = self.makeDir()
|
||||
|
||||
# patch cloud_dir, so our 'seed_dir' is guaranteed empty
|
||||
self.paths = helpers.Paths({'cloud_dir': self.tmp})
|
||||
|
||||
self.unapply = []
|
||||
super(TestAzureDataSource, self).setUp()
|
||||
|
||||
def tearDown(self):
|
||||
apply_patches([i for i in reversed(self.unapply)])
|
||||
super(TestAzureDataSource, self).tearDown()
|
||||
|
||||
def apply_patches(self, patches):
|
||||
ret = apply_patches(patches)
|
||||
self.unapply += ret
|
||||
|
||||
def _get_ds(self, data):
|
||||
|
||||
def dsdevs():
|
||||
return data.get('dsdevs', [])
|
||||
|
||||
def _invoke_agent(cmd):
|
||||
data['agent_invoked'] = cmd
|
||||
|
||||
def _write_files(datadir, files, dirmode):
|
||||
data['files'] = {}
|
||||
data['datadir'] = datadir
|
||||
data['datadir_mode'] = dirmode
|
||||
for (fname, content) in files.items():
|
||||
data['files'][fname] = content
|
||||
|
||||
def _wait_for_files(flist, _maxwait=None, _naplen=None):
|
||||
data['waited'] = flist
|
||||
return []
|
||||
|
||||
def _pubkeys_from_crt_files(flist):
|
||||
data['pubkey_files'] = flist
|
||||
return ["pubkey_from: %s" % f for f in flist]
|
||||
|
||||
if data.get('ovfcontent') is not None:
|
||||
populate_dir(os.path.join(self.paths.seed_dir, "azure"),
|
||||
{'ovf-env.xml': data['ovfcontent']})
|
||||
|
||||
mod = DataSourceAzure
|
||||
|
||||
if data.get('dsdevs'):
|
||||
self.apply_patches([(mod, 'list_possible_azure_ds_devs', dsdevs)])
|
||||
|
||||
self.apply_patches([(mod, 'invoke_agent', _invoke_agent),
|
||||
(mod, 'write_files', _write_files),
|
||||
(mod, 'wait_for_files', _wait_for_files),
|
||||
(mod, 'pubkeys_from_crt_files',
|
||||
_pubkeys_from_crt_files)])
|
||||
|
||||
dsrc = mod.DataSourceAzureNet(
|
||||
data.get('sys_cfg', {}), distro=None, paths=self.paths)
|
||||
|
||||
return dsrc
|
||||
|
||||
def test_basic_seed_dir(self):
|
||||
odata = {'HostName': "myhost", 'UserName': "myuser"}
|
||||
data = {'ovfcontent': construct_valid_ovf_env(data=odata),
|
||||
'sys_cfg': {}}
|
||||
|
||||
dsrc = self._get_ds(data)
|
||||
ret = dsrc.get_data()
|
||||
self.assertTrue(ret)
|
||||
self.assertEqual(dsrc.userdata_raw, "")
|
||||
self.assertEqual(dsrc.metadata['local-hostname'], odata['HostName'])
|
||||
self.assertTrue('ovf-env.xml' in data['files'])
|
||||
self.assertEqual(0700, data['datadir_mode'])
|
||||
|
||||
def test_user_cfg_set_agent_command(self):
|
||||
cfg = {'agent_command': "my_command"}
|
||||
odata = {'HostName': "myhost", 'UserName': "myuser",
|
||||
'dscfg': yaml.dump(cfg)}
|
||||
data = {'ovfcontent': construct_valid_ovf_env(data=odata)}
|
||||
|
||||
dsrc = self._get_ds(data)
|
||||
ret = dsrc.get_data()
|
||||
self.assertTrue(ret)
|
||||
self.assertEqual(data['agent_invoked'], cfg['agent_command'])
|
||||
|
||||
def test_sys_cfg_set_agent_command(self):
|
||||
sys_cfg = {'datasource': {'Azure': {'agent_command': '_COMMAND'}}}
|
||||
data = {'ovfcontent': construct_valid_ovf_env(data={}),
|
||||
'sys_cfg': sys_cfg}
|
||||
|
||||
dsrc = self._get_ds(data)
|
||||
ret = dsrc.get_data()
|
||||
self.assertTrue(ret)
|
||||
self.assertEqual(data['agent_invoked'], '_COMMAND')
|
||||
|
||||
def test_username_used(self):
|
||||
odata = {'HostName': "myhost", 'UserName': "myuser"}
|
||||
data = {'ovfcontent': construct_valid_ovf_env(data=odata)}
|
||||
|
||||
dsrc = self._get_ds(data)
|
||||
ret = dsrc.get_data()
|
||||
self.assertTrue(ret)
|
||||
self.assertEqual(dsrc.cfg['system_info']['default_user']['name'],
|
||||
"myuser")
|
||||
|
||||
def test_password_given(self):
|
||||
odata = {'HostName': "myhost", 'UserName': "myuser",
|
||||
'UserPassword': "mypass"}
|
||||
data = {'ovfcontent': construct_valid_ovf_env(data=odata)}
|
||||
|
||||
dsrc = self._get_ds(data)
|
||||
ret = dsrc.get_data()
|
||||
self.assertTrue(ret)
|
||||
self.assertTrue('default_user' in dsrc.cfg['system_info'])
|
||||
defuser = dsrc.cfg['system_info']['default_user']
|
||||
|
||||
# default user shoudl be updated for password and username
|
||||
# and should not be locked.
|
||||
self.assertEqual(defuser['name'], odata['UserName'])
|
||||
self.assertEqual(defuser['password'], odata['UserPassword'])
|
||||
self.assertFalse(defuser['lock_passwd'])
|
||||
|
||||
def test_userdata_found(self):
|
||||
mydata = "FOOBAR"
|
||||
odata = {'UserData': base64.b64encode(mydata)}
|
||||
data = {'ovfcontent': construct_valid_ovf_env(data=odata)}
|
||||
|
||||
dsrc = self._get_ds(data)
|
||||
ret = dsrc.get_data()
|
||||
self.assertTrue(ret)
|
||||
self.assertEqual(dsrc.userdata_raw, mydata)
|
||||
|
||||
def test_no_datasource_expected(self):
|
||||
#no source should be found if no seed_dir and no devs
|
||||
data = {}
|
||||
dsrc = self._get_ds({})
|
||||
ret = dsrc.get_data()
|
||||
self.assertFalse(ret)
|
||||
self.assertFalse('agent_invoked' in data)
|
||||
|
||||
def test_cfg_has_pubkeys(self):
|
||||
odata = {'HostName': "myhost", 'UserName': "myuser"}
|
||||
mypklist = [{'fingerprint': 'fp1', 'path': 'path1'}]
|
||||
pubkeys = [(x['fingerprint'], x['path']) for x in mypklist]
|
||||
data = {'ovfcontent': construct_valid_ovf_env(data=odata,
|
||||
pubkeys=pubkeys)}
|
||||
|
||||
dsrc = self._get_ds(data)
|
||||
ret = dsrc.get_data()
|
||||
self.assertTrue(ret)
|
||||
for mypk in mypklist:
|
||||
self.assertIn(mypk, dsrc.cfg['_pubkeys'])
|
||||
|
||||
|
||||
class TestReadAzureOvf(MockerTestCase):
|
||||
def test_invalid_xml_raises_non_azure_ds(self):
|
||||
invalid_xml = "<foo>" + construct_valid_ovf_env(data={})
|
||||
self.assertRaises(DataSourceAzure.NonAzureDataSource,
|
||||
DataSourceAzure.read_azure_ovf, invalid_xml)
|
||||
|
||||
def test_load_with_pubkeys(self):
|
||||
mypklist = [{'fingerprint': 'fp1', 'path': 'path1'}]
|
||||
pubkeys = [(x['fingerprint'], x['path']) for x in mypklist]
|
||||
content = construct_valid_ovf_env(pubkeys=pubkeys)
|
||||
(_md, _ud, cfg) = DataSourceAzure.read_azure_ovf(content)
|
||||
for mypk in mypklist:
|
||||
self.assertIn(mypk, cfg['_pubkeys'])
|
||||
|
||||
|
||||
def apply_patches(patches):
|
||||
ret = []
|
||||
for (ref, name, replace) in patches:
|
||||
if replace is None:
|
||||
continue
|
||||
orig = getattr(ref, name)
|
||||
setattr(ref, name, replace)
|
||||
ret.append((ref, name, orig))
|
||||
return ret
|
|
@ -22,7 +22,7 @@ class TestNoCloudDataSource(MockerTestCase):
|
|||
|
||||
def tearDown(self):
|
||||
apply_patches([i for i in reversed(self.unapply)])
|
||||
super(TestNoCloudDataSource, self).setUp()
|
||||
super(TestNoCloudDataSource, self).tearDown()
|
||||
|
||||
def apply_patches(self, patches):
|
||||
ret = apply_patches(patches)
|
||||
|
|
Loading…
Reference in New Issue