Update with trunk and resolve conflicts

This commit is contained in:
Joshua Harlow
2014-11-25 11:46:10 -08:00
28 changed files with 1547 additions and 166 deletions

View File

@@ -1,3 +1,20 @@
0.7.7:
- open 0.7.7
- Digital Ocean: add datasource for Digital Ocean. [Neal Shrader]
- expose uses_systemd as a distro function (fix rhel7)
- fix broken 'output' config (LP: #1387340)
- begin adding cloud config module docs to config modules (LP: #1383510)
- retain trailing eol from template files (sources.list) when
rendered with jinja (LP: #1355343)
- Only use datafiles and initsys addon outside virtualenvs
- Fix the digital ocean test case on python 2.6
- Increase the usefulness, robustness, configurability of the chef module
so that it is more useful, more documented and better for users
- Fix how '=' signs are not handled that well in ssh_utils (LP: #1391303)
- Be more tolerant of ssh keys passed into 'ssh_authorized_keys'; allowing
for list, tuple, set, dict, string types and warning on other unexpected
types
- Update to use newer/better OMNIBUS_URL for chef module
0.7.6:
- open 0.7.6
- Enable vendordata on CloudSigma datasource (LP: #1303986)

View File

@@ -18,6 +18,57 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
**Summary:** module that configures, starts and installs chef.
**Description:** This module enables chef to be installed (from packages or
from gems, or from omnibus). Before this occurs chef configurations are
written to disk (validation.pem, client.pem, firstboot.json, client.rb),
and needed chef folders/directories are created (/etc/chef and /var/log/chef
and so-on). Then once installing proceeds correctly if configured chef will
be started (in daemon mode or in non-daemon mode) and then once that has
finished (if ran in non-daemon mode this will be when chef finishes
converging, if ran in daemon mode then no further actions are possible since
chef will have forked into its own process) then a post run function can
run that can do finishing activities (such as removing the validation pem
file).
It can be configured with the following option structure::
chef:
directories: (defaulting to /etc/chef, /var/log/chef, /var/lib/chef,
/var/cache/chef, /var/backups/chef, /var/run/chef)
validation_key or validation_cert: (optional string to be written to
/etc/chef/validation.pem)
firstboot_path: (path to write run_list and initial_attributes keys that
should also be present in this configuration, defaults
to /etc/chef/firstboot.json)
exec: boolean to run or not run chef (defaults to false, unless
a gem installed is requested
where this will then default
to true)
chef.rb template keys (if falsey, then will be skipped and not
written to /etc/chef/client.rb)
chef:
client_key:
environment:
file_backup_path:
file_cache_path:
json_attribs:
log_level:
log_location:
node_name:
pid_file:
server_url:
show_time:
ssl_verify_mode:
validation_key:
validation_name:
"""
import itertools
import json
import os
@@ -27,19 +78,112 @@ from cloudinit import util
RUBY_VERSION_DEFAULT = "1.8"
CHEF_DIRS = [
CHEF_DIRS = tuple([
'/etc/chef',
'/var/log/chef',
'/var/lib/chef',
'/var/cache/chef',
'/var/backups/chef',
'/var/run/chef',
]
])
REQUIRED_CHEF_DIRS = tuple([
'/etc/chef',
])
OMNIBUS_URL = "https://www.opscode.com/chef/install.sh"
# Used if fetching chef from a omnibus style package
OMNIBUS_URL = "https://www.getchef.com/chef/install.sh"
OMNIBUS_URL_RETRIES = 5
CHEF_VALIDATION_PEM_PATH = '/etc/chef/validation.pem'
CHEF_FB_PATH = '/etc/chef/firstboot.json'
CHEF_RB_TPL_DEFAULTS = {
# These are ruby symbols...
'ssl_verify_mode': ':verify_none',
'log_level': ':info',
# These are not symbols...
'log_location': '/var/log/chef/client.log',
'validation_key': CHEF_VALIDATION_PEM_PATH,
'client_key': "/etc/chef/client.pem",
'json_attribs': CHEF_FB_PATH,
'file_cache_path': "/var/cache/chef",
'file_backup_path': "/var/backups/chef",
'pid_file': "/var/run/chef/client.pid",
'show_time': True,
}
CHEF_RB_TPL_BOOL_KEYS = frozenset(['show_time'])
CHEF_RB_TPL_PATH_KEYS = frozenset([
'log_location',
'validation_key',
'client_key',
'file_cache_path',
'json_attribs',
'file_cache_path',
'pid_file',
])
CHEF_RB_TPL_KEYS = list(CHEF_RB_TPL_DEFAULTS.keys())
CHEF_RB_TPL_KEYS.extend(CHEF_RB_TPL_BOOL_KEYS)
CHEF_RB_TPL_KEYS.extend(CHEF_RB_TPL_PATH_KEYS)
CHEF_RB_TPL_KEYS.extend([
'server_url',
'node_name',
'environment',
'validation_name',
])
CHEF_RB_TPL_KEYS = frozenset(CHEF_RB_TPL_KEYS)
CHEF_RB_PATH = '/etc/chef/client.rb'
CHEF_EXEC_PATH = '/usr/bin/chef-client'
CHEF_EXEC_DEF_ARGS = tuple(['-d', '-i', '1800', '-s', '20'])
def is_installed():
if not os.path.isfile(CHEF_EXEC_PATH):
return False
if not os.access(CHEF_EXEC_PATH, os.X_OK):
return False
return True
def post_run_chef(chef_cfg, log):
delete_pem = util.get_cfg_option_bool(chef_cfg,
'delete_validation_post_exec',
default=False)
if delete_pem and os.path.isfile(CHEF_VALIDATION_PEM_PATH):
os.unlink(CHEF_VALIDATION_PEM_PATH)
def get_template_params(iid, chef_cfg, log):
params = CHEF_RB_TPL_DEFAULTS.copy()
# Allow users to overwrite any of the keys they want (if they so choose),
# when a value is None, then the value will be set to None and no boolean
# or string version will be populated...
for (k, v) in chef_cfg.items():
if k not in CHEF_RB_TPL_KEYS:
log.debug("Skipping unknown chef template key '%s'", k)
continue
if v is None:
params[k] = None
else:
# This will make the value a boolean or string...
if k in CHEF_RB_TPL_BOOL_KEYS:
params[k] = util.get_cfg_option_bool(chef_cfg, k)
else:
params[k] = util.get_cfg_option_str(chef_cfg, k)
# These ones are overwritten to be exact values...
params.update({
'generated_by': util.make_header(),
'node_name': util.get_cfg_option_str(chef_cfg, 'node_name',
default=iid),
'environment': util.get_cfg_option_str(chef_cfg, 'environment',
default='_default'),
# These two are mandatory...
'server_url': chef_cfg['server_url'],
'validation_name': chef_cfg['validation_name'],
})
return params
def handle(name, cfg, cloud, log, _args):
"""Handler method activated by cloud-init."""
# If there isn't a chef key in the configuration don't do anything
if 'chef' not in cfg:
@@ -49,7 +193,10 @@ def handle(name, cfg, cloud, log, _args):
chef_cfg = cfg['chef']
# Ensure the chef directories we use exist
for d in CHEF_DIRS:
chef_dirs = util.get_cfg_option_list(chef_cfg, 'directories')
if not chef_dirs:
chef_dirs = list(CHEF_DIRS)
for d in itertools.chain(chef_dirs, REQUIRED_CHEF_DIRS):
util.ensure_dir(d)
# Set the validation key based on the presence of either 'validation_key'
@@ -57,64 +204,108 @@ def handle(name, cfg, cloud, log, _args):
# takes precedence
for key in ('validation_key', 'validation_cert'):
if key in chef_cfg and chef_cfg[key]:
util.write_file('/etc/chef/validation.pem', chef_cfg[key])
util.write_file(CHEF_VALIDATION_PEM_PATH, chef_cfg[key])
break
# Create the chef config from template
template_fn = cloud.get_template_filename('chef_client.rb')
if template_fn:
iid = str(cloud.datasource.get_instance_id())
params = {
'server_url': chef_cfg['server_url'],
'node_name': util.get_cfg_option_str(chef_cfg, 'node_name', iid),
'environment': util.get_cfg_option_str(chef_cfg, 'environment',
'_default'),
'validation_name': chef_cfg['validation_name']
}
templater.render_to_file(template_fn, '/etc/chef/client.rb', params)
params = get_template_params(iid, chef_cfg, log)
# Do a best effort attempt to ensure that the template values that
# are associated with paths have there parent directory created
# before they are used by the chef-client itself.
param_paths = set()
for (k, v) in params.items():
if k in CHEF_RB_TPL_PATH_KEYS and v:
param_paths.add(os.path.dirname(v))
util.ensure_dirs(param_paths)
templater.render_to_file(template_fn, CHEF_RB_PATH, params)
else:
log.warn("No template found, not rendering to /etc/chef/client.rb")
log.warn("No template found, not rendering to %s",
CHEF_RB_PATH)
# set the firstboot json
initial_json = {}
if 'run_list' in chef_cfg:
initial_json['run_list'] = chef_cfg['run_list']
if 'initial_attributes' in chef_cfg:
initial_attributes = chef_cfg['initial_attributes']
for k in list(initial_attributes.keys()):
initial_json[k] = initial_attributes[k]
util.write_file('/etc/chef/firstboot.json', json.dumps(initial_json))
# Set the firstboot json
fb_filename = util.get_cfg_option_str(chef_cfg, 'firstboot_path',
default=CHEF_FB_PATH)
if not fb_filename:
log.info("First boot path empty, not writing first boot json file")
else:
initial_json = {}
if 'run_list' in chef_cfg:
initial_json['run_list'] = chef_cfg['run_list']
if 'initial_attributes' in chef_cfg:
initial_attributes = chef_cfg['initial_attributes']
for k in list(initial_attributes.keys()):
initial_json[k] = initial_attributes[k]
util.write_file(fb_filename, json.dumps(initial_json))
# If chef is not installed, we install chef based on 'install_type'
if (not os.path.isfile('/usr/bin/chef-client') or
util.get_cfg_option_bool(chef_cfg,
'force_install', default=False)):
# Try to install chef, if its not already installed...
force_install = util.get_cfg_option_bool(chef_cfg,
'force_install', default=False)
if not is_installed() or force_install:
run = install_chef(cloud, chef_cfg, log)
elif is_installed():
run = util.get_cfg_option_bool(chef_cfg, 'exec', default=False)
else:
run = False
if run:
run_chef(chef_cfg, log)
post_run_chef(chef_cfg, log)
install_type = util.get_cfg_option_str(chef_cfg, 'install_type',
'packages')
if install_type == "gems":
# this will install and run the chef-client from gems
chef_version = util.get_cfg_option_str(chef_cfg, 'version', None)
ruby_version = util.get_cfg_option_str(chef_cfg, 'ruby_version',
RUBY_VERSION_DEFAULT)
install_chef_from_gems(cloud.distro, ruby_version, chef_version)
# and finally, run chef-client
log.debug('Running chef-client')
util.subp(['/usr/bin/chef-client',
'-d', '-i', '1800', '-s', '20'], capture=False)
elif install_type == 'packages':
# this will install and run the chef-client from packages
cloud.distro.install_packages(('chef',))
elif install_type == 'omnibus':
url = util.get_cfg_option_str(chef_cfg, "omnibus_url", OMNIBUS_URL)
content = url_helper.readurl(url=url, retries=5)
with util.tempdir() as tmpd:
# use tmpd over tmpfile to avoid 'Text file busy' on execute
tmpf = "%s/chef-omnibus-install" % tmpd
util.write_file(tmpf, str(content), mode=0700)
util.subp([tmpf], capture=False)
def run_chef(chef_cfg, log):
log.debug('Running chef-client')
cmd = [CHEF_EXEC_PATH]
if 'exec_arguments' in chef_cfg:
cmd_args = chef_cfg['exec_arguments']
if isinstance(cmd_args, (list, tuple)):
cmd.extend(cmd_args)
elif isinstance(cmd_args, (str, basestring)):
cmd.append(cmd_args)
else:
log.warn("Unknown chef install type %s", install_type)
log.warn("Unknown type %s provided for chef"
" 'exec_arguments' expected list, tuple,"
" or string", type(cmd_args))
cmd.extend(CHEF_EXEC_DEF_ARGS)
else:
cmd.extend(CHEF_EXEC_DEF_ARGS)
util.subp(cmd, capture=False)
def install_chef(cloud, chef_cfg, log):
# If chef is not installed, we install chef based on 'install_type'
install_type = util.get_cfg_option_str(chef_cfg, 'install_type',
'packages')
run = util.get_cfg_option_bool(chef_cfg, 'exec', default=False)
if install_type == "gems":
# This will install and run the chef-client from gems
chef_version = util.get_cfg_option_str(chef_cfg, 'version', None)
ruby_version = util.get_cfg_option_str(chef_cfg, 'ruby_version',
RUBY_VERSION_DEFAULT)
install_chef_from_gems(cloud.distro, ruby_version, chef_version)
# Retain backwards compat, by preferring True instead of False
# when not provided/overriden...
run = util.get_cfg_option_bool(chef_cfg, 'exec', default=True)
elif install_type == 'packages':
# This will install and run the chef-client from packages
cloud.distro.install_packages(('chef',))
elif install_type == 'omnibus':
# This will install as a omnibus unified package
url = util.get_cfg_option_str(chef_cfg, "omnibus_url", OMNIBUS_URL)
retries = max(0, util.get_cfg_option_int(chef_cfg,
"omnibus_url_retries",
default=OMNIBUS_URL_RETRIES))
content = url_helper.readurl(url=url, retries=retries)
with util.tempdir() as tmpd:
# Use tmpdir over tmpfile to avoid 'text file busy' on execute
tmpf = "%s/chef-omnibus-install" % tmpd
util.write_file(tmpf, str(content), mode=0700)
util.subp([tmpf], capture=False)
else:
log.warn("Unknown chef install type '%s'", install_type)
run = False
return run
def get_ruby_packages(version):
@@ -133,9 +324,9 @@ def install_chef_from_gems(ruby_version, chef_version, distro):
util.sym_link('/usr/bin/ruby%s' % ruby_version, '/usr/bin/ruby')
if chef_version:
util.subp(['/usr/bin/gem', 'install', 'chef',
'-v %s' % chef_version, '--no-ri',
'--no-rdoc', '--bindir', '/usr/bin', '-q'], capture=False)
'-v %s' % chef_version, '--no-ri',
'--no-rdoc', '--bindir', '/usr/bin', '-q'], capture=False)
else:
util.subp(['/usr/bin/gem', 'install', 'chef',
'--no-ri', '--no-rdoc', '--bindir',
'/usr/bin', '-q'], capture=False)
'--no-ri', '--no-rdoc', '--bindir',
'/usr/bin', '-q'], capture=False)

View File

@@ -14,11 +14,33 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from cloudinit import type_utils
from cloudinit import util
"""
**Summary:** helper to debug cloud-init *internal* datastructures.
**Description:** This module will enable for outputting various internal
information that cloud-init sources provide to either a file or to the output
console/log location that this cloud-init has been configured with when
running.
It can be configured with the following option structure::
debug:
verbose: (defaulting to true)
output: (location to write output, defaulting to console + log)
.. note::
Log configurations are not output.
"""
import copy
from StringIO import StringIO
from cloudinit import type_utils
from cloudinit import util
SKIP_KEYS = frozenset(['log_cfgs'])
def _make_header(text):
header = StringIO()
@@ -31,7 +53,14 @@ def _make_header(text):
return header.getvalue()
def _dumps(obj):
text = util.yaml_dumps(obj, explicit_start=False, explicit_end=False)
return text.rstrip()
def handle(name, cfg, cloud, log, args):
"""Handler method activated by cloud-init."""
verbose = util.get_cfg_by_path(cfg, ('debug', 'verbose'), default=True)
if args:
# if args are provided (from cmdline) then explicitly set verbose
@@ -46,7 +75,7 @@ def handle(name, cfg, cloud, log, args):
return
# Clean out some keys that we just don't care about showing...
dump_cfg = copy.deepcopy(cfg)
for k in ['log_cfgs']:
for k in SKIP_KEYS:
dump_cfg.pop(k, None)
all_keys = list(dump_cfg.keys())
for k in all_keys:
@@ -55,10 +84,10 @@ def handle(name, cfg, cloud, log, args):
# Now dump it...
to_print = StringIO()
to_print.write(_make_header("Config"))
to_print.write(util.yaml_dumps(dump_cfg))
to_print.write(_dumps(dump_cfg))
to_print.write("\n")
to_print.write(_make_header("MetaData"))
to_print.write(util.yaml_dumps(cloud.datasource.metadata))
to_print.write(_dumps(cloud.datasource.metadata))
to_print.write("\n")
to_print.write(_make_header("Misc"))
to_print.write("Datasource: %s\n" %

View File

@@ -17,30 +17,27 @@
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
ubuntu_init_switch: reboot system into another init
**Summary:** reboot system into another init.
This provides a way for the user to boot with systemd even if the
image is set to boot with upstart. It should be run as one of the first
cloud_init_modules, and will switch the init system and then issue a reboot.
The next boot will come up in the target init system and no action will
**Description:** This module provides a way for the user to boot with systemd
even if the image is set to boot with upstart. It should be run as one of the
first ``cloud_init_modules``, and will switch the init system and then issue a
reboot. The next boot will come up in the target init system and no action will
be taken.
This should be inert on non-ubuntu systems, and also exit quickly.
config is comes under the top level 'init_switch' dictionary.
It can be configured with the following option structure::
#cloud-config
init_switch:
target: systemd
reboot: true
init_switch:
target: systemd (can be 'systemd' or 'upstart')
reboot: true (reboot if a change was made, or false to not reboot)
'target' can be 'systemd' or 'upstart'. Best effort is made, but its possible
this system will break, and probably won't interact well with any other
mechanism you've used to switch the init system.
.. note::
'reboot': [default=true].
true: reboot if a change was made.
false: do not reboot.
Best effort is made, but it's possible
this system will break, and probably won't interact well with any other
mechanism you've used to switch the init system.
"""
from cloudinit.settings import PER_INSTANCE
@@ -91,6 +88,7 @@ fi
def handle(name, cfg, cloud, log, args):
"""Handler method activated by cloud-init."""
if not isinstance(cloud.distro, ubuntu.Distro):
log.debug("%s: distro is '%s', not ubuntu. returning",

View File

@@ -388,8 +388,20 @@ class Distro(object):
# Import SSH keys
if 'ssh_authorized_keys' in kwargs:
keys = set(kwargs['ssh_authorized_keys']) or []
ssh_util.setup_user_keys(keys, name, options=None)
# Try to handle this in a smart manner.
keys = kwargs['ssh_authorized_keys']
if isinstance(keys, (basestring, str)):
keys = [keys]
if isinstance(keys, dict):
keys = list(keys.values())
if keys is not None:
if not isinstance(keys, (tuple, list, set)):
LOG.warn("Invalid type '%s' detected for"
" 'ssh_authorized_keys', expected list,"
" string, dict, or set.", type(keys))
else:
keys = set(keys) or []
ssh_util.setup_user_keys(keys, name, options=None)
return True

View File

@@ -114,6 +114,10 @@ def translate_network(settings):
if 'iface' not in info:
continue
iface_details = info['iface'].split(None)
# Check if current device *may* have an ipv6 IP
use_ipv6 = False
if 'inet6' in iface_details:
use_ipv6 = True
dev_name = None
if len(iface_details) >= 1:
dev = iface_details[0].strip().lower()
@@ -122,6 +126,7 @@ def translate_network(settings):
if not dev_name:
continue
iface_info = {}
iface_info['ipv6'] = {}
if len(iface_details) >= 3:
proto_type = iface_details[2].strip().lower()
# Seems like this can be 'loopback' which we don't
@@ -129,35 +134,50 @@ def translate_network(settings):
if proto_type in ['dhcp', 'static']:
iface_info['bootproto'] = proto_type
# These can just be copied over
for k in ['netmask', 'address', 'gateway', 'broadcast']:
if k in info:
val = info[k].strip().lower()
if val:
iface_info[k] = val
# Name server info provided??
if 'dns-nameservers' in info:
iface_info['dns-nameservers'] = info['dns-nameservers'].split()
# Name server search info provided??
if 'dns-search' in info:
iface_info['dns-search'] = info['dns-search'].split()
# Is any mac address spoofing going on??
if 'hwaddress' in info:
hw_info = info['hwaddress'].lower().strip()
hw_split = hw_info.split(None, 1)
if len(hw_split) == 2 and hw_split[0].startswith('ether'):
hw_addr = hw_split[1]
if hw_addr:
iface_info['hwaddress'] = hw_addr
real_ifaces[dev_name] = iface_info
if use_ipv6:
for k in ['address', 'gateway']:
if k in info:
val = info[k].strip().lower()
if val:
iface_info['ipv6'][k] = val
else:
for k in ['netmask', 'address', 'gateway', 'broadcast']:
if k in info:
val = info[k].strip().lower()
if val:
iface_info[k] = val
# Name server info provided??
if 'dns-nameservers' in info:
iface_info['dns-nameservers'] = info['dns-nameservers'].split()
# Name server search info provided??
if 'dns-search' in info:
iface_info['dns-search'] = info['dns-search'].split()
# Is any mac address spoofing going on??
if 'hwaddress' in info:
hw_info = info['hwaddress'].lower().strip()
hw_split = hw_info.split(None, 1)
if len(hw_split) == 2 and hw_split[0].startswith('ether'):
hw_addr = hw_split[1]
if hw_addr:
iface_info['hwaddress'] = hw_addr
# If ipv6 is enabled, device will have multiple IPs, so we need to
# update the dictionary instead of overwriting it...
if dev_name in real_ifaces:
real_ifaces[dev_name].update(iface_info)
else:
real_ifaces[dev_name] = iface_info
# Check for those that should be started on boot via 'auto'
for (cmd, args) in entries:
args = args.split(None)
if not args:
continue
dev_name = args[0].strip().lower()
if cmd == 'auto':
# Seems like auto can be like 'auto eth0 eth0:1' so just get the
# first part out as the device name
args = args.split(None)
if not args:
continue
dev_name = args[0].strip().lower()
if dev_name in real_ifaces:
real_ifaces[dev_name]['auto'] = True
if cmd == 'iface' and 'inet6' in args:
real_ifaces[dev_name]['inet6'] = True
return real_ifaces

View File

@@ -72,6 +72,7 @@ class Distro(distros.Distro):
nameservers = []
searchservers = []
dev_names = entries.keys()
use_ipv6 = False
for (dev, info) in entries.iteritems():
net_fn = self.network_script_tpl % (dev)
net_cfg = {
@@ -84,6 +85,13 @@ class Distro(distros.Distro):
'MACADDR': info.get('hwaddress'),
'ONBOOT': _make_sysconfig_bool(info.get('auto')),
}
if info.get('inet6'):
use_ipv6 = True
net_cfg.update({
'IPV6INIT': _make_sysconfig_bool(True),
'IPV6ADDR': info.get('ipv6').get('address'),
'IPV6_DEFAULTGW': info.get('ipv6').get('gateway'),
})
rhel_util.update_sysconfig_file(net_fn, net_cfg)
if 'dns-nameservers' in info:
nameservers.extend(info['dns-nameservers'])
@@ -96,10 +104,14 @@ class Distro(distros.Distro):
net_cfg = {
'NETWORKING': _make_sysconfig_bool(True),
}
# If IPv6 interface present, enable ipv6 networking
if use_ipv6:
net_cfg['NETWORKING_IPV6'] = _make_sysconfig_bool(True)
net_cfg['IPV6_AUTOCONF'] = _make_sysconfig_bool(False)
rhel_util.update_sysconfig_file(self.network_conf_fn, net_cfg)
return dev_names
def _dist_uses_systemd(self):
def uses_systemd(self):
# Fedora 18 and RHEL 7 were the first adopters in their series
(dist, vers) = util.system_info()['dist'][:2]
major = (int)(vers.split('.')[0])
@@ -107,7 +119,7 @@ class Distro(distros.Distro):
or (dist.startswith('Fedora') and major >= 18))
def apply_locale(self, locale, out_fn=None):
if self._dist_uses_systemd():
if self.uses_systemd():
if not out_fn:
out_fn = self.systemd_locale_conf_fn
out_fn = self.systemd_locale_conf_fn
@@ -120,7 +132,7 @@ class Distro(distros.Distro):
rhel_util.update_sysconfig_file(out_fn, locale_cfg)
def _write_hostname(self, hostname, out_fn):
if self._dist_uses_systemd():
if self.uses_systemd():
util.subp(['hostnamectl', 'set-hostname', str(hostname)])
else:
host_cfg = {
@@ -136,14 +148,14 @@ class Distro(distros.Distro):
return hostname
def _read_system_hostname(self):
if self._dist_uses_systemd():
if self.uses_systemd():
host_fn = self.systemd_hostname_conf_fn
else:
host_fn = self.hostname_conf_fn
return (host_fn, self._read_hostname(host_fn))
def _read_hostname(self, filename, default=None):
if self._dist_uses_systemd():
if self.uses_systemd():
(out, _err) = util.subp(['hostname'])
if len(out):
return out
@@ -164,7 +176,7 @@ class Distro(distros.Distro):
def set_timezone(self, tz):
tz_file = self._find_tz_file(tz)
if self._dist_uses_systemd():
if self.uses_systemd():
# Currently, timedatectl complains if invoked during startup
# so for compatibility, create the link manually.
util.del_file(self.tz_local_fn)

View File

@@ -72,6 +72,7 @@ def netdev_info(empty=""):
"bcast:": "bcast", "broadcast": "bcast",
"mask:": "mask", "netmask": "mask",
"hwaddr": "hwaddr", "ether": "hwaddr",
"scope": "scope",
}
for origfield, field in ifconfigfields.items():
target = "%s%s" % (field, fieldpost)
@@ -96,7 +97,12 @@ def netdev_info(empty=""):
def route_info():
(route_out, _err) = util.subp(["netstat", "-rn"])
routes = []
(route_out6, _err6) = util.subp(["netstat", "-A inet6", "-n"])
routes = {}
routes['ipv4'] = []
routes['ipv6'] = []
entries = route_out.splitlines()[1:]
for line in entries:
if not line:
@@ -132,7 +138,26 @@ def route_info():
'iface': toks[7],
}
routes.append(entry)
routes['ipv4'].append(entry)
entries6 = route_out6.splitlines()[1:]
for line in entries6:
if not line:
continue
toks = line.split()
if (len(toks) < 6 or toks[0] == "Kernel" or
toks[0] == "Proto" or toks[0] == "Active"):
continue
entry = {
'proto': toks[0],
'recv-q': toks[1],
'send-q': toks[2],
'local address': toks[3],
'foreign address': toks[4],
'state': toks[5],
}
routes['ipv6'].append(entry)
return routes
@@ -156,10 +181,12 @@ def netdev_pformat():
lines.append(util.center("Net device info failed", '!', 80))
netdev = None
if netdev is not None:
fields = ['Device', 'Up', 'Address', 'Mask', 'Hw-Address']
fields = ['Device', 'Up', 'Address', 'Mask', 'Scope', 'Hw-Address']
tbl = PrettyTable(fields)
for (dev, d) in netdev.iteritems():
tbl.add_row([dev, d["up"], d["addr"], d["mask"], d["hwaddr"]])
tbl.add_row([dev, d["up"], d["addr"], d["mask"], ".", d["hwaddr"]])
if d["addr6"]:
tbl.add_row([dev, d["up"], d["addr6"], ".", d["scope6"], d["hwaddr"]])
netdev_s = tbl.get_string()
max_len = len(max(netdev_s.splitlines(), key=len))
header = util.center("Net device info", "+", max_len)
@@ -176,15 +203,30 @@ def route_pformat():
util.logexc(LOG, "Route info failed: %s" % e)
routes = None
if routes is not None:
fields = ['Route', 'Destination', 'Gateway',
fields_v4 = ['Route', 'Destination', 'Gateway',
'Genmask', 'Interface', 'Flags']
tbl = PrettyTable(fields)
for (n, r) in enumerate(routes):
if routes.get('ipv6') is not None:
fields_v6 = ['Route', 'Proto', 'Recv-Q', 'Send-Q', 'Local Address',
'Foreign Address', 'State']
tbl_v4 = PrettyTable(fields_v4)
for (n, r) in enumerate(routes.get('ipv4')):
route_id = str(n)
tbl.add_row([route_id, r['destination'],
tbl_v4.add_row([route_id, r['destination'],
r['gateway'], r['genmask'],
r['iface'], r['flags']])
route_s = tbl.get_string()
route_s = tbl_v4.get_string()
if fields_v6:
tbl_v6 = PrettyTable(fields_v6)
for (n, r) in enumerate(routes.get('ipv6')):
route_id = str(n)
tbl_v6.add_row([route_id, r['proto'],
r['recv-q'], r['send-q'],
r['local address'], r['foreign address'],
r['state']])
route_s = route_s + tbl_v6.get_string()
max_len = len(max(route_s.splitlines(), key=len))
header = util.center("Route info", "+", max_len)
lines.extend([header, route_s])

View File

@@ -0,0 +1,104 @@
# vi: ts=4 expandtab
#
# Author: Neal Shrader <neal@digitalocean.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3, as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from cloudinit import log as logging
from cloudinit import util
from cloudinit import sources
from cloudinit import ec2_utils
from types import StringType
import functools
LOG = logging.getLogger(__name__)
BUILTIN_DS_CONFIG = {
'metadata_url': 'http://169.254.169.254/metadata/v1/',
'mirrors_url': 'http://mirrors.digitalocean.com/'
}
MD_RETRIES = 0
MD_TIMEOUT = 1
class DataSourceDigitalOcean(sources.DataSource):
def __init__(self, sys_cfg, distro, paths):
sources.DataSource.__init__(self, sys_cfg, distro, paths)
self.metadata = dict()
self.ds_cfg = util.mergemanydict([
util.get_cfg_by_path(sys_cfg, ["datasource", "DigitalOcean"], {}),
BUILTIN_DS_CONFIG])
self.metadata_address = self.ds_cfg['metadata_url']
if self.ds_cfg.get('retries'):
self.retries = self.ds_cfg['retries']
else:
self.retries = MD_RETRIES
if self.ds_cfg.get('timeout'):
self.timeout = self.ds_cfg['timeout']
else:
self.timeout = MD_TIMEOUT
def get_data(self):
caller = functools.partial(util.read_file_or_url, timeout=self.timeout,
retries=self.retries)
md = ec2_utils.MetadataMaterializer(str(caller(self.metadata_address)),
base_url=self.metadata_address,
caller=caller)
self.metadata = md.materialize()
if self.metadata.get('id'):
return True
else:
return False
def get_userdata_raw(self):
return "\n".join(self.metadata['user-data'])
def get_vendordata_raw(self):
return "\n".join(self.metadata['vendor-data'])
def get_public_ssh_keys(self):
if type(self.metadata['public-keys']) is StringType:
return [self.metadata['public-keys']]
else:
return self.metadata['public-keys']
@property
def availability_zone(self):
return self.metadata['region']
def get_instance_id(self):
return self.metadata['id']
def get_hostname(self, fqdn=False):
return self.metadata['hostname']
def get_package_mirror_info(self):
return self.ds_cfg['mirrors_url']
@property
def launch_index(self):
return None
# Used to match classes to dependencies
datasources = [
(DataSourceDigitalOcean, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
]
# Return a list of data sources that match this set of dependencies
def get_datasource_list(depends):
return sources.list_from_depends(depends, datasources)

View File

@@ -293,7 +293,10 @@ def parse_ssh_config(fname):
if not line or line.startswith("#"):
lines.append(SshdConfigLine(line))
continue
(key, val) = line.split(None, 1)
try:
key, val = line.split(None, 1)
except ValueError:
key, val = line.split('=', 1)
lines.append(SshdConfigLine(line, key, val))
return lines

View File

@@ -89,9 +89,11 @@ def detect_template(text):
return CTemplate(content, searchList=[params]).respond()
def jinja_render(content, params):
# keep_trailing_newline is in jinja2 2.7+, not 2.6
add = "\n" if content.endswith("\n") else ""
return JTemplate(content,
undefined=jinja2.StrictUndefined,
trim_blocks=True).render(**params)
trim_blocks=True).render(**params) + add
if text.find("\n") != -1:
ident, rest = text.split("\n", 1)

View File

@@ -399,6 +399,10 @@ def get_cfg_option_str(yobj, key, default=None):
return val
def get_cfg_option_int(yobj, key, default=0):
return int(get_cfg_option_str(yobj, key, default=default))
def system_info():
return {
'platform': platform.platform(),
@@ -1146,7 +1150,7 @@ def chownbyname(fname, user=None, group=None):
# this returns the specific 'mode' entry, cleanly formatted, with value
def get_output_cfg(cfg, mode):
ret = [None, None]
if cfg or 'output' not in cfg:
if not cfg or 'output' not in cfg:
return ret
outcfg = cfg['output']
@@ -1270,14 +1274,14 @@ def read_write_cmdline_url(target_fn):
logexc(LOG, "Failed writing url content to %s", target_fn)
def yaml_dumps(obj):
formatted = yaml.dump(obj,
line_break="\n",
indent=4,
explicit_start=True,
explicit_end=True,
default_flow_style=False)
return formatted
def yaml_dumps(obj, explicit_start=True, explicit_end=True):
return yaml.safe_dump(obj,
line_break="\n",
indent=4,
explicit_start=explicit_start,
explicit_end=explicit_end,
default_flow_style=False,
allow_unicode=True)
def ensure_dir(path, mode=None):

View File

@@ -20,7 +20,7 @@ from distutils import version as vr
def version():
return vr.StrictVersion("0.7.6")
return vr.StrictVersion("0.7.7")
def version_string():

View File

@@ -27,6 +27,8 @@ project = 'Cloud-Init'
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.intersphinx',
'sphinx.ext.autodoc',
'sphinx.ext.viewcode',
]
intersphinx_mapping = {

View File

@@ -1,3 +1,342 @@
=========
=======
Modules
=========
=======
Apt Configure
-------------
**Internal name:** ``cc_apt_configure``
.. automodule:: cloudinit.config.cc_apt_configure
Apt Pipelining
--------------
**Internal name:** ``cc_apt_pipelining``
.. automodule:: cloudinit.config.cc_apt_pipelining
Bootcmd
-------
**Internal name:** ``cc_bootcmd``
.. automodule:: cloudinit.config.cc_bootcmd
Byobu
-----
**Internal name:** ``cc_byobu``
.. automodule:: cloudinit.config.cc_byobu
Ca Certs
--------
**Internal name:** ``cc_ca_certs``
.. automodule:: cloudinit.config.cc_ca_certs
Chef
----
**Internal name:** ``cc_chef``
.. automodule:: cloudinit.config.cc_chef
:members:
Debug
-----
**Internal name:** ``cc_debug``
.. automodule:: cloudinit.config.cc_debug
:members:
Disable Ec2 Metadata
--------------------
**Internal name:** ``cc_disable_ec2_metadata``
.. automodule:: cloudinit.config.cc_disable_ec2_metadata
Disk Setup
----------
**Internal name:** ``cc_disk_setup``
.. automodule:: cloudinit.config.cc_disk_setup
Emit Upstart
------------
**Internal name:** ``cc_emit_upstart``
.. automodule:: cloudinit.config.cc_emit_upstart
Final Message
-------------
**Internal name:** ``cc_final_message``
.. automodule:: cloudinit.config.cc_final_message
Foo
---
**Internal name:** ``cc_foo``
.. automodule:: cloudinit.config.cc_foo
Growpart
--------
**Internal name:** ``cc_growpart``
.. automodule:: cloudinit.config.cc_growpart
Grub Dpkg
---------
**Internal name:** ``cc_grub_dpkg``
.. automodule:: cloudinit.config.cc_grub_dpkg
Keys To Console
---------------
**Internal name:** ``cc_keys_to_console``
.. automodule:: cloudinit.config.cc_keys_to_console
Landscape
---------
**Internal name:** ``cc_landscape``
.. automodule:: cloudinit.config.cc_landscape
Locale
------
**Internal name:** ``cc_locale``
.. automodule:: cloudinit.config.cc_locale
Mcollective
-----------
**Internal name:** ``cc_mcollective``
.. automodule:: cloudinit.config.cc_mcollective
Migrator
--------
**Internal name:** ``cc_migrator``
.. automodule:: cloudinit.config.cc_migrator
Mounts
------
**Internal name:** ``cc_mounts``
.. automodule:: cloudinit.config.cc_mounts
Package Update Upgrade Install
------------------------------
**Internal name:** ``cc_package_update_upgrade_install``
.. automodule:: cloudinit.config.cc_package_update_upgrade_install
Phone Home
----------
**Internal name:** ``cc_phone_home``
.. automodule:: cloudinit.config.cc_phone_home
Power State Change
------------------
**Internal name:** ``cc_power_state_change``
.. automodule:: cloudinit.config.cc_power_state_change
Puppet
------
**Internal name:** ``cc_puppet``
.. automodule:: cloudinit.config.cc_puppet
Resizefs
--------
**Internal name:** ``cc_resizefs``
.. automodule:: cloudinit.config.cc_resizefs
Resolv Conf
-----------
**Internal name:** ``cc_resolv_conf``
.. automodule:: cloudinit.config.cc_resolv_conf
Rightscale Userdata
-------------------
**Internal name:** ``cc_rightscale_userdata``
.. automodule:: cloudinit.config.cc_rightscale_userdata
Rsyslog
-------
**Internal name:** ``cc_rsyslog``
.. automodule:: cloudinit.config.cc_rsyslog
Runcmd
------
**Internal name:** ``cc_runcmd``
.. automodule:: cloudinit.config.cc_runcmd
Salt Minion
-----------
**Internal name:** ``cc_salt_minion``
.. automodule:: cloudinit.config.cc_salt_minion
Scripts Per Boot
----------------
**Internal name:** ``cc_scripts_per_boot``
.. automodule:: cloudinit.config.cc_scripts_per_boot
Scripts Per Instance
--------------------
**Internal name:** ``cc_scripts_per_instance``
.. automodule:: cloudinit.config.cc_scripts_per_instance
Scripts Per Once
----------------
**Internal name:** ``cc_scripts_per_once``
.. automodule:: cloudinit.config.cc_scripts_per_once
Scripts User
------------
**Internal name:** ``cc_scripts_user``
.. automodule:: cloudinit.config.cc_scripts_user
Scripts Vendor
--------------
**Internal name:** ``cc_scripts_vendor``
.. automodule:: cloudinit.config.cc_scripts_vendor
Seed Random
-----------
**Internal name:** ``cc_seed_random``
.. automodule:: cloudinit.config.cc_seed_random
Set Hostname
------------
**Internal name:** ``cc_set_hostname``
.. automodule:: cloudinit.config.cc_set_hostname
Set Passwords
-------------
**Internal name:** ``cc_set_passwords``
.. automodule:: cloudinit.config.cc_set_passwords
Ssh
---
**Internal name:** ``cc_ssh``
.. automodule:: cloudinit.config.cc_ssh
Ssh Authkey Fingerprints
------------------------
**Internal name:** ``cc_ssh_authkey_fingerprints``
.. automodule:: cloudinit.config.cc_ssh_authkey_fingerprints
Ssh Import Id
-------------
**Internal name:** ``cc_ssh_import_id``
.. automodule:: cloudinit.config.cc_ssh_import_id
Timezone
--------
**Internal name:** ``cc_timezone``
.. automodule:: cloudinit.config.cc_timezone
Ubuntu Init Switch
------------------
**Internal name:** ``cc_ubuntu_init_switch``
.. automodule:: cloudinit.config.cc_ubuntu_init_switch
:members:
Update Etc Hosts
----------------
**Internal name:** ``cc_update_etc_hosts``
.. automodule:: cloudinit.config.cc_update_etc_hosts
Update Hostname
---------------
**Internal name:** ``cc_update_hostname``
.. automodule:: cloudinit.config.cc_update_hostname
Users Groups
------------
**Internal name:** ``cc_users_groups``
.. automodule:: cloudinit.config.cc_users_groups
Write Files
-----------
**Internal name:** ``cc_write_files``
.. automodule:: cloudinit.config.cc_write_files
Yum Add Repo
------------
**Internal name:** ``cc_yum_add_repo``
.. automodule:: cloudinit.config.cc_yum_add_repo

View File

@@ -0,0 +1,21 @@
The `DigitalOcean`_ datasource consumes the content served from DigitalOcean's `metadata service`_. This
metadata service serves information about the running droplet via HTTP over the link local address
169.254.169.254. The metadata API endpoints are fully described at
`https://developers.digitalocean.com/metadata/ <https://developers.digitalocean.com/metadata/>`_.
Configuration
~~~~~~~~~~~~~
DigitalOcean's datasource can be configured as follows:
datasource:
DigitalOcean:
retries: 3
timeout: 2
- *retries*: Determines the number of times to attempt to connect to the metadata service
- *timeout*: Determines the timeout in seconds to wait for a response from the metadata service
.. _DigitalOcean: http://digitalocean.com/
.. _metadata service: https://developers.digitalocean.com/metadata/
.. _Full documentation: https://developers.digitalocean.com/metadata/

View File

@@ -99,6 +99,11 @@ rm -rf \$RPM_BUILD_ROOT%{python_sitelib}/tests
mkdir -p \$RPM_BUILD_ROOT/%{_sharedstatedir}/cloud
mkdir -p \$RPM_BUILD_ROOT/%{_libexecdir}/%{name}
#if $systemd
mkdir -p \$RPM_BUILD_ROOT/%{_unitdir}
cp -p systemd/* \$RPM_BUILD_ROOT/%{_unitdir}
#end if
%clean
rm -rf \$RPM_BUILD_ROOT

View File

@@ -23,6 +23,7 @@
from glob import glob
import os
import sys
import setuptools
from setuptools.command.install import install
@@ -90,6 +91,17 @@ elif os.path.isfile('/etc/redhat-release'):
USR_LIB_EXEC = "/usr/libexec"
# Avoid having datafiles installed in a virtualenv...
def in_virtualenv():
try:
if sys.real_prefix == sys.prefix:
return False
else:
return True
except AttributeError:
return False
def get_version():
cmd = ['tools/read-version']
(ver, _e) = tiny_p(cmd)
@@ -139,6 +151,29 @@ class InitsysInstallData(install):
self.distribution.reinitialize_command('install_data', True)
if in_virtualenv():
data_files = []
cmdclass = {}
else:
data_files = [
(ETC + '/cloud', glob('config/*.cfg')),
(ETC + '/cloud/cloud.cfg.d', glob('config/cloud.cfg.d/*')),
(ETC + '/cloud/templates', glob('templates/*')),
(USR_LIB_EXEC + '/cloud-init', ['tools/uncloud-init',
'tools/write-ssh-key-fingerprints']),
(USR + '/share/doc/cloud-init', [f for f in glob('doc/*') if is_f(f)]),
(USR + '/share/doc/cloud-init/examples',
[f for f in glob('doc/examples/*') if is_f(f)]),
(USR + '/share/doc/cloud-init/examples/seed',
[f for f in glob('doc/examples/seed/*') if is_f(f)]),
]
# Use a subclass for install that handles
# adding on the right init system configuration files
cmdclass = {
'install': InitsysInstallData,
}
setuptools.setup(name='cloud-init',
version=get_version(),
description='EC2 initialisation magic',
@@ -150,23 +185,7 @@ setuptools.setup(name='cloud-init',
'tools/cloud-init-per',
],
license='GPLv3',
data_files=[(ETC + '/cloud', glob('config/*.cfg')),
(ETC + '/cloud/cloud.cfg.d', glob('config/cloud.cfg.d/*')),
(ETC + '/cloud/templates', glob('templates/*')),
(USR_LIB_EXEC + '/cloud-init',
['tools/uncloud-init',
'tools/write-ssh-key-fingerprints']),
(USR + '/share/doc/cloud-init',
[f for f in glob('doc/*') if is_f(f)]),
(USR + '/share/doc/cloud-init/examples',
[f for f in glob('doc/examples/*') if is_f(f)]),
(USR + '/share/doc/cloud-init/examples/seed',
[f for f in glob('doc/examples/seed/*') if is_f(f)]),
],
data_files=data_files,
install_requires=read_requires(),
cmdclass={
# Use a subclass for install that handles
# adding on the right init system configuration files
'install': InitsysInstallData,
},
cmdclass=cmdclass,
)

View File

@@ -9,17 +9,50 @@ you need to add the following to config:
validation_name: XYZ
server_url: XYZ
-#}
log_level :info
log_location "/var/log/chef/client.log"
ssl_verify_mode :verify_none
{{generated_by}}
{#
The reason these are not in quotes is because they are ruby
symbols that will be placed inside here, and not actual strings...
#}
{% if log_level %}
log_level {{log_level}}
{% endif %}
{% if ssl_verify_mode %}
ssl_verify_mode {{ssl_verify_mode}}
{% endif %}
{% if log_location %}
log_location "{{log_location}}"
{% endif %}
{% if validation_name %}
validation_client_name "{{validation_name}}"
validation_key "/etc/chef/validation.pem"
client_key "/etc/chef/client.pem"
{% endif %}
{% if validation_key %}
validation_key "{{validation_key}}"
{% endif %}
{% if client_key %}
client_key "{{client_key}}"
{% endif %}
{% if server_url %}
chef_server_url "{{server_url}}"
{% endif %}
{% if environment %}
environment "{{environment}}"
{% endif %}
{% if node_name %}
node_name "{{node_name}}"
json_attribs "/etc/chef/firstboot.json"
file_cache_path "/var/cache/chef"
file_backup_path "/var/backups/chef"
pid_file "/var/run/chef/client.pid"
{% endif %}
{% if json_attribs %}
json_attribs "{{json_attribs}}"
{% endif %}
{% if file_cache_path %}
file_cache_path "{{file_cache_path}}"
{% endif %}
{% if file_backup_path %}
file_backup_path "{{file_backup_path}}"
{% endif %}
{% if pid_file %}
pid_file "{{pid_file}}"
{% endif %}
{% if show_time %}
Chef::Log::Formatter.show_time = true
{% endif %}

View File

@@ -1,4 +1,5 @@
httpretty>=0.7.1
mock
mocker
nose
pep8==1.5.7

View File

@@ -35,6 +35,11 @@ else:
if PY26:
# For now add these on, taken from python 2.7 + slightly adjusted
class TestCase(unittest.TestCase):
def assertIs(self, expr1, expr2, msg=None):
if expr1 is not expr2:
standardMsg = '%r is not %r' % (expr1, expr2)
self.fail(self._formatMessage(msg, standardMsg))
def assertIn(self, member, container, msg=None):
if member not in container:
standardMsg = '%r not found in %r' % (member, container)

View File

@@ -0,0 +1,126 @@
#
# Copyright (C) 2014 Neal Shrader
#
# Author: Neal Shrader <neal@digitalocean.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3, as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import httpretty
import re
from types import ListType
from urlparse import urlparse
from cloudinit import settings
from cloudinit import helpers
from cloudinit.sources import DataSourceDigitalOcean
from .. import helpers as test_helpers
# Abbreviated for the test
DO_INDEX = """id
hostname
user-data
vendor-data
public-keys
region"""
DO_MULTIPLE_KEYS = """ssh-rsa AAAAB3NzaC1yc2EAAAA... neal@digitalocean.com
ssh-rsa AAAAB3NzaC1yc2EAAAA... neal2@digitalocean.com"""
DO_SINGLE_KEY = "ssh-rsa AAAAB3NzaC1yc2EAAAA... neal@digitalocean.com"
DO_META = {
'': DO_INDEX,
'user-data': '#!/bin/bash\necho "user-data"',
'vendor-data': '#!/bin/bash\necho "vendor-data"',
'public-keys': DO_SINGLE_KEY,
'region': 'nyc3',
'id': '2000000',
'hostname': 'cloudinit-test',
}
MD_URL_RE = re.compile(r'http://169.254.169.254/metadata/v1/.*')
def _request_callback(method, uri, headers):
url_path = urlparse(uri).path
if url_path.startswith('/metadata/v1/'):
path = url_path.split('/metadata/v1/')[1:][0]
else:
path = None
if path in DO_META:
return (200, headers, DO_META.get(path))
else:
return (404, headers, '')
class TestDataSourceDigitalOcean(test_helpers.HttprettyTestCase):
def setUp(self):
self.ds = DataSourceDigitalOcean.DataSourceDigitalOcean(
settings.CFG_BUILTIN, None,
helpers.Paths({}))
super(TestDataSourceDigitalOcean, self).setUp()
@httpretty.activate
def test_connection(self):
httpretty.register_uri(
httpretty.GET, MD_URL_RE,
body=_request_callback)
success = self.ds.get_data()
self.assertTrue(success)
@httpretty.activate
def test_metadata(self):
httpretty.register_uri(
httpretty.GET, MD_URL_RE,
body=_request_callback)
self.ds.get_data()
self.assertEqual(DO_META.get('user-data'),
self.ds.get_userdata_raw())
self.assertEqual(DO_META.get('vendor-data'),
self.ds.get_vendordata_raw())
self.assertEqual(DO_META.get('region'),
self.ds.availability_zone)
self.assertEqual(DO_META.get('id'),
self.ds.get_instance_id())
self.assertEqual(DO_META.get('hostname'),
self.ds.get_hostname())
self.assertEqual('http://mirrors.digitalocean.com/',
self.ds.get_package_mirror_info())
# Single key
self.assertEqual([DO_META.get('public-keys')],
self.ds.get_public_ssh_keys())
self.assertIs(type(self.ds.get_public_ssh_keys()), ListType)
@httpretty.activate
def test_multiple_ssh_keys(self):
DO_META['public_keys'] = DO_MULTIPLE_KEYS
httpretty.register_uri(
httpretty.GET, MD_URL_RE,
body=_request_callback)
self.ds.get_data()
# Multiple keys
self.assertEqual(DO_META.get('public-keys').splitlines(),
self.ds.get_public_ssh_keys())
self.assertIs(type(self.ds.get_public_ssh_keys()), ListType)

View File

@@ -19,7 +19,6 @@
import copy
import json
import re
import unittest
from StringIO import StringIO
@@ -318,7 +317,7 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase):
self.assertIsNone(ds_os.version)
class TestVendorDataLoading(unittest.TestCase):
class TestVendorDataLoading(test_helpers.TestCase):
def cvj(self, data):
return openstack.convert_vendordata_json(data)

View File

@@ -30,6 +30,36 @@ auto eth1
iface eth1 inet dhcp
'''
BASE_NET_CFG_IPV6 = '''
auto lo
iface lo inet loopback
auto eth0
iface eth0 inet static
address 192.168.1.5
netmask 255.255.255.0
network 192.168.0.0
broadcast 192.168.1.0
gateway 192.168.1.254
iface eth0 inet6 static
address 2607:f0d0:1002:0011::2
netmask 64
gateway 2607:f0d0:1002:0011::1
iface eth1 inet static
address 192.168.1.6
netmask 255.255.255.0
network 192.168.0.0
broadcast 192.168.1.0
gateway 192.168.1.254
iface eth1 inet6 static
address 2607:f0d0:1002:0011::3
netmask 64
gateway 2607:f0d0:1002:0011::1
'''
class WriteBuffer(object):
def __init__(self):
@@ -174,6 +204,97 @@ NETWORKING=yes
self.assertCfgEquals(expected_buf, str(write_buf))
self.assertEquals(write_buf.mode, 0644)
def test_write_ipv6_rhel(self):
rh_distro = self._get_distro('rhel')
write_mock = self.mocker.replace(util.write_file,
spec=False, passthrough=False)
load_mock = self.mocker.replace(util.load_file,
spec=False, passthrough=False)
exists_mock = self.mocker.replace(os.path.isfile,
spec=False, passthrough=False)
write_bufs = {}
def replace_write(filename, content, mode=0644, omode="wb"):
buf = WriteBuffer()
buf.mode = mode
buf.omode = omode
buf.write(content)
write_bufs[filename] = buf
exists_mock(mocker.ARGS)
self.mocker.count(0, None)
self.mocker.result(False)
load_mock(mocker.ARGS)
self.mocker.count(0, None)
self.mocker.result('')
for _i in range(0, 3):
write_mock(mocker.ARGS)
self.mocker.call(replace_write)
write_mock(mocker.ARGS)
self.mocker.call(replace_write)
self.mocker.replay()
rh_distro.apply_network(BASE_NET_CFG_IPV6, False)
self.assertEquals(len(write_bufs), 4)
self.assertIn('/etc/sysconfig/network-scripts/ifcfg-lo', write_bufs)
write_buf = write_bufs['/etc/sysconfig/network-scripts/ifcfg-lo']
expected_buf = '''
DEVICE="lo"
ONBOOT=yes
'''
self.assertCfgEquals(expected_buf, str(write_buf))
self.assertEquals(write_buf.mode, 0644)
self.assertIn('/etc/sysconfig/network-scripts/ifcfg-eth0', write_bufs)
write_buf = write_bufs['/etc/sysconfig/network-scripts/ifcfg-eth0']
expected_buf = '''
DEVICE="eth0"
BOOTPROTO="static"
NETMASK="255.255.255.0"
IPADDR="192.168.1.5"
ONBOOT=yes
GATEWAY="192.168.1.254"
BROADCAST="192.168.1.0"
IPV6INIT=yes
IPV6ADDR="2607:f0d0:1002:0011::2"
IPV6_DEFAULTGW="2607:f0d0:1002:0011::1"
'''
self.assertCfgEquals(expected_buf, str(write_buf))
self.assertEquals(write_buf.mode, 0644)
self.assertIn('/etc/sysconfig/network-scripts/ifcfg-eth1', write_bufs)
write_buf = write_bufs['/etc/sysconfig/network-scripts/ifcfg-eth1']
expected_buf = '''
DEVICE="eth1"
BOOTPROTO="static"
NETMASK="255.255.255.0"
IPADDR="192.168.1.6"
ONBOOT=no
GATEWAY="192.168.1.254"
BROADCAST="192.168.1.0"
IPV6INIT=yes
IPV6ADDR="2607:f0d0:1002:0011::3"
IPV6_DEFAULTGW="2607:f0d0:1002:0011::1"
'''
self.assertCfgEquals(expected_buf, str(write_buf))
self.assertEquals(write_buf.mode, 0644)
self.assertIn('/etc/sysconfig/network', write_bufs)
write_buf = write_bufs['/etc/sysconfig/network']
expected_buf = '''
# Created by cloud-init v. 0.7
NETWORKING=yes
NETWORKING_IPV6=yes
IPV6_AUTOCONF=no
'''
self.assertCfgEquals(expected_buf, str(write_buf))
self.assertEquals(write_buf.mode, 0644)
def test_simple_write_freebsd(self):
fbsd_distro = self._get_distro('freebsd')
util_mock = self.mocker.replace(util.write_file,
@@ -182,6 +303,12 @@ NETWORKING=yes
spec=False, passthrough=False)
load_mock = self.mocker.replace(util.load_file,
spec=False, passthrough=False)
subp_mock = self.mocker.replace(util.subp,
spec=False, passthrough=False)
subp_mock(['ifconfig', '-a'])
self.mocker.count(0, None)
self.mocker.result(('vtnet0', ''))
exists_mock(mocker.ARGS)
self.mocker.count(0, None)
@@ -190,6 +317,7 @@ NETWORKING=yes
write_bufs = {}
read_bufs = {
'/etc/rc.conf': '',
'/etc/resolv.conf': '',
}
def replace_write(filename, content, mode=0644, omode="wb"):

View File

@@ -0,0 +1,121 @@
import json
import os
from cloudinit.config import cc_chef
from cloudinit import cloud
from cloudinit import distros
from cloudinit import helpers
from cloudinit import util
from cloudinit.sources import DataSourceNone
from .. import helpers as t_help
import logging
LOG = logging.getLogger(__name__)
class TestChef(t_help.FilesystemMockingTestCase):
def setUp(self):
super(TestChef, self).setUp()
self.tmp = self.makeDir(prefix="unittest_")
def fetch_cloud(self, distro_kind):
cls = distros.fetch(distro_kind)
paths = helpers.Paths({})
distro = cls(distro_kind, {}, paths)
ds = DataSourceNone.DataSourceNone({}, distro, paths, None)
return cloud.Cloud(ds, paths, {}, distro, None)
def test_no_config(self):
self.patchUtils(self.tmp)
self.patchOS(self.tmp)
cfg = {}
cc_chef.handle('chef', cfg, self.fetch_cloud('ubuntu'), LOG, [])
for d in cc_chef.CHEF_DIRS:
self.assertFalse(os.path.isdir(d))
def test_basic_config(self):
# This should create a file of the format...
"""
# Created by cloud-init v. 0.7.6 on Sat, 11 Oct 2014 23:57:21 +0000
log_level :info
ssl_verify_mode :verify_none
log_location "/var/log/chef/client.log"
validation_client_name "bob"
validation_key "/etc/chef/validation.pem"
client_key "/etc/chef/client.pem"
chef_server_url "localhost"
environment "_default"
node_name "iid-datasource-none"
json_attribs "/etc/chef/firstboot.json"
file_cache_path "/var/cache/chef"
file_backup_path "/var/backups/chef"
pid_file "/var/run/chef/client.pid"
Chef::Log::Formatter.show_time = true
"""
tpl_file = util.load_file('templates/chef_client.rb.tmpl')
self.patchUtils(self.tmp)
self.patchOS(self.tmp)
util.write_file('/etc/cloud/templates/chef_client.rb.tmpl', tpl_file)
cfg = {
'chef': {
'server_url': 'localhost',
'validation_name': 'bob',
},
}
cc_chef.handle('chef', cfg, self.fetch_cloud('ubuntu'), LOG, [])
for d in cc_chef.CHEF_DIRS:
self.assertTrue(os.path.isdir(d))
c = util.load_file(cc_chef.CHEF_RB_PATH)
for k, v in cfg['chef'].items():
self.assertIn(v, c)
for k, v in cc_chef.CHEF_RB_TPL_DEFAULTS.items():
if isinstance(v, basestring):
self.assertIn(v, c)
c = util.load_file(cc_chef.CHEF_FB_PATH)
self.assertEqual({}, json.loads(c))
def test_firstboot_json(self):
self.patchUtils(self.tmp)
self.patchOS(self.tmp)
cfg = {
'chef': {
'server_url': 'localhost',
'validation_name': 'bob',
'run_list': ['a', 'b', 'c'],
'initial_attributes': {
'c': 'd',
}
},
}
cc_chef.handle('chef', cfg, self.fetch_cloud('ubuntu'), LOG, [])
c = util.load_file(cc_chef.CHEF_FB_PATH)
self.assertEqual(
{
'run_list': ['a', 'b', 'c'],
'c': 'd',
}, json.loads(c))
def test_template_deletes(self):
tpl_file = util.load_file('templates/chef_client.rb.tmpl')
self.patchUtils(self.tmp)
self.patchOS(self.tmp)
util.write_file('/etc/cloud/templates/chef_client.rb.tmpl', tpl_file)
cfg = {
'chef': {
'server_url': 'localhost',
'validation_name': 'bob',
'json_attribs': None,
'show_time': None,
},
}
cc_chef.handle('chef', cfg, self.fetch_cloud('ubuntu'), LOG, [])
c = util.load_file(cc_chef.CHEF_RB_PATH)
self.assertNotIn('json_attribs', c)
self.assertNotIn('Formatter.show_time', c)

View File

@@ -0,0 +1,78 @@
# vi: ts=4 expandtab
#
# Copyright (C) 2014 Yahoo! Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3, as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from cloudinit.config import cc_debug
from cloudinit import cloud
from cloudinit import distros
from cloudinit import helpers
from cloudinit import util
from cloudinit.sources import DataSourceNone
from .. import helpers as t_help
import logging
LOG = logging.getLogger(__name__)
class TestDebug(t_help.FilesystemMockingTestCase):
def setUp(self):
super(TestDebug, self).setUp()
self.new_root = self.makeDir(prefix="unittest_")
def _get_cloud(self, distro, metadata=None):
self.patchUtils(self.new_root)
paths = helpers.Paths({})
cls = distros.fetch(distro)
d = cls(distro, {}, paths)
ds = DataSourceNone.DataSourceNone({}, d, paths)
if metadata:
ds.metadata.update(metadata)
return cloud.Cloud(ds, paths, {}, d, None)
def test_debug_write(self):
cfg = {
'abc': '123',
'c': u'\u20a0',
'debug': {
'verbose': True,
# Does not actually write here due to mocking...
'output': '/var/log/cloud-init-debug.log',
},
}
cc = self._get_cloud('ubuntu')
cc_debug.handle('cc_debug', cfg, cc, LOG, [])
contents = util.load_file('/var/log/cloud-init-debug.log')
# Some basic sanity tests...
self.assertNotEqual(0, len(contents))
for k in cfg.keys():
self.assertIn(k, contents)
def test_debug_no_write(self):
cfg = {
'abc': '123',
'debug': {
'verbose': False,
# Does not actually write here due to mocking...
'output': '/var/log/cloud-init-debug.log',
},
}
cc = self._get_cloud('ubuntu')
cc_debug.handle('cc_debug', cfg, cc, LOG, [])
self.assertRaises(IOError,
util.load_file, '/var/log/cloud-init-debug.log')

View File

@@ -37,10 +37,11 @@ class TestHostname(t_help.FilesystemMockingTestCase):
self.patchUtils(self.tmp)
cc_set_hostname.handle('cc_set_hostname',
cfg, cc, LOG, [])
contents = util.load_file("/etc/sysconfig/network")
n_cfg = ConfigObj(StringIO(contents))
self.assertEquals({'HOSTNAME': 'blah.blah.blah.yahoo.com'},
dict(n_cfg))
if not distro.uses_systemd():
contents = util.load_file("/etc/sysconfig/network")
n_cfg = ConfigObj(StringIO(contents))
self.assertEquals({'HOSTNAME': 'blah.blah.blah.yahoo.com'},
dict(n_cfg))
def test_write_hostname_debian(self):
cfg = {

View File

@@ -1,5 +1,7 @@
from mock import patch
from . import helpers as test_helpers
from cloudinit import ssh_util
from unittest import TestCase
VALID_CONTENT = {
@@ -35,7 +37,7 @@ TEST_OPTIONS = ("no-port-forwarding,no-agent-forwarding,no-X11-forwarding,"
'user \"root\".\';echo;sleep 10"')
class TestAuthKeyLineParser(TestCase):
class TestAuthKeyLineParser(test_helpers.TestCase):
def test_simple_parse(self):
# test key line with common 3 fields (keytype, base64, comment)
parser = ssh_util.AuthKeyLineParser()
@@ -98,4 +100,71 @@ class TestAuthKeyLineParser(TestCase):
self.assertFalse(key.valid())
class TestParseSSHConfig(test_helpers.TestCase):
def setUp(self):
self.load_file_patch = patch('cloudinit.ssh_util.util.load_file')
self.load_file = self.load_file_patch.start()
self.isfile_patch = patch('cloudinit.ssh_util.os.path.isfile')
self.isfile = self.isfile_patch.start()
self.isfile.return_value = True
def tearDown(self):
self.load_file_patch.stop()
self.isfile_patch.stop()
def test_not_a_file(self):
self.isfile.return_value = False
self.load_file.side_effect = IOError
ret = ssh_util.parse_ssh_config('not a real file')
self.assertEqual([], ret)
def test_empty_file(self):
self.load_file.return_value = ''
ret = ssh_util.parse_ssh_config('some real file')
self.assertEqual([], ret)
def test_comment_line(self):
comment_line = '# This is a comment'
self.load_file.return_value = comment_line
ret = ssh_util.parse_ssh_config('some real file')
self.assertEqual(1, len(ret))
self.assertEqual(comment_line, ret[0].line)
def test_blank_lines(self):
lines = ['', '\t', ' ']
self.load_file.return_value = '\n'.join(lines)
ret = ssh_util.parse_ssh_config('some real file')
self.assertEqual(len(lines), len(ret))
for line in ret:
self.assertEqual('', line.line)
def test_lower_case_config(self):
self.load_file.return_value = 'foo bar'
ret = ssh_util.parse_ssh_config('some real file')
self.assertEqual(1, len(ret))
self.assertEqual('foo', ret[0].key)
self.assertEqual('bar', ret[0].value)
def test_upper_case_config(self):
self.load_file.return_value = 'Foo Bar'
ret = ssh_util.parse_ssh_config('some real file')
self.assertEqual(1, len(ret))
self.assertEqual('foo', ret[0].key)
self.assertEqual('Bar', ret[0].value)
def test_lower_case_with_equals(self):
self.load_file.return_value = 'foo=bar'
ret = ssh_util.parse_ssh_config('some real file')
self.assertEqual(1, len(ret))
self.assertEqual('foo', ret[0].key)
self.assertEqual('bar', ret[0].value)
def test_upper_case_with_equals(self):
self.load_file.return_value = 'Foo=bar'
ret = ssh_util.parse_ssh_config('some real file')
self.assertEqual(1, len(ret))
self.assertEqual('foo', ret[0].key)
self.assertEqual('bar', ret[0].value)
# vi: ts=4 expandtab