Rebase on next

This commit is contained in:
James Page 2015-09-03 18:04:56 +01:00
commit 4b803e8479
26 changed files with 593 additions and 120 deletions

View File

@ -250,3 +250,10 @@ options:
stipulated by nova-cloud-controller. The option is only available for
backward compatibility for deployments which do not use the neutron-api
charm. Please do not enable this on new deployments.
# Huge page configuration - off by default
hugepages:
type: string
default:
description: |
The pecentage of system memory to use for hugepages eg '10%' or the total
number of 2M hugepages - eg "1024".

14
files/qemu-hugefsdir Executable file
View File

@ -0,0 +1,14 @@
#! /bin/sh
#
### BEGIN INIT INFO
# Required-Start: $local_fs
# Required-Stop: $local_fs
# Provides: qemu-hugefsdir
# Default-Start: 2 3 4 5
# Default-Stop: 0 6
# Short-Description: Make sure qemu gets a temp hugetblfs dir
# Description: Make sure qemu gets a temp hugetblfs dir
### END INIT INFO
mkdir -p /run/hugepages/kvm/libvirt/qemu || true
chown libvirt-qemu:libvirtd /run/hugepages/kvm/libvirt/qemu

View File

@ -152,15 +152,11 @@ class CommandLine(object):
arguments = self.argument_parser.parse_args()
argspec = inspect.getargspec(arguments.func)
vargs = []
kwargs = {}
for arg in argspec.args:
vargs.append(getattr(arguments, arg))
if argspec.varargs:
vargs.extend(getattr(arguments, argspec.varargs))
if argspec.keywords:
for kwarg in argspec.keywords.items():
kwargs[kwarg] = getattr(arguments, kwarg)
output = arguments.func(*vargs, **kwargs)
output = arguments.func(*vargs)
if getattr(arguments.func, '_cli_test_command', False):
self.exit_code = 0 if output else 1
output = ''

View File

@ -26,7 +26,7 @@ from . import CommandLine # noqa
"""
Import the sub-modules which have decorated subcommands to register with chlp.
"""
import host # noqa
import benchmark # noqa
import unitdata # noqa
from charmhelpers.core import hookenv # noqa
from . import host # noqa
from . import benchmark # noqa
from . import unitdata # noqa
from . import hookenv # noqa

View File

@ -0,0 +1,23 @@
# Copyright 2014-2015 Canonical Limited.
#
# This file is part of charm-helpers.
#
# charm-helpers is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3 as
# published by the Free Software Foundation.
#
# charm-helpers is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
from . import cmdline
from charmhelpers.core import hookenv
cmdline.subcommand('relation-id')(hookenv.relation_id._wrapped)
cmdline.subcommand('service-name')(hookenv.service_name)
cmdline.subcommand('remote-service-name')(hookenv.remote_service_name._wrapped)

View File

@ -435,8 +435,12 @@ def get_hostname(address, fqdn=True):
rev = dns.reversename.from_address(address)
result = ns_query(rev)
if not result:
return None
try:
result = socket.gethostbyaddr(address)[0]
except:
return None
else:
result = address

View File

@ -44,7 +44,7 @@ class OpenStackAmuletDeployment(AmuletDeployment):
Determine if the local branch being tested is derived from its
stable or next (dev) branch, and based on this, use the corresonding
stable or next branches for the other_services."""
base_charms = ['mysql', 'mongodb']
base_charms = ['mysql', 'mongodb', 'nrpe']
if self.series in ['precise', 'trusty']:
base_series = self.series
@ -81,7 +81,7 @@ class OpenStackAmuletDeployment(AmuletDeployment):
'ceph-osd', 'ceph-radosgw']
# Most OpenStack subordinate charms do not expose an origin option
# as that is controlled by the principle.
ignore = ['cinder-ceph', 'hacluster', 'neutron-openvswitch']
ignore = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe']
if self.openstack:
for svc in services:

View File

@ -50,6 +50,8 @@ from charmhelpers.core.sysctl import create as sysctl_create
from charmhelpers.core.strutils import bool_from_string
from charmhelpers.core.host import (
get_bond_master,
is_phy_iface,
list_nics,
get_nic_hwaddr,
mkdir,
@ -893,6 +895,18 @@ class NeutronContext(OSContextGenerator):
'neutron_url': '%s://%s:%s' % (proto, host, '9696')}
return ctxt
def pg_ctxt(self):
driver = neutron_plugin_attribute(self.plugin, 'driver',
self.network_manager)
config = neutron_plugin_attribute(self.plugin, 'config',
self.network_manager)
ovs_ctxt = {'core_plugin': driver,
'neutron_plugin': 'plumgrid',
'neutron_security_groups': self.neutron_security_groups,
'local_ip': unit_private_ip(),
'config': config}
return ovs_ctxt
def __call__(self):
if self.network_manager not in ['quantum', 'neutron']:
return {}
@ -912,6 +926,8 @@ class NeutronContext(OSContextGenerator):
ctxt.update(self.calico_ctxt())
elif self.plugin == 'vsp':
ctxt.update(self.nuage_ctxt())
elif self.plugin == 'plumgrid':
ctxt.update(self.pg_ctxt())
alchemy_flags = config('neutron-alchemy-flags')
if alchemy_flags:
@ -923,7 +939,6 @@ class NeutronContext(OSContextGenerator):
class NeutronPortContext(OSContextGenerator):
NIC_PREFIXES = ['eth', 'bond']
def resolve_ports(self, ports):
"""Resolve NICs not yet bound to bridge(s)
@ -935,7 +950,18 @@ class NeutronPortContext(OSContextGenerator):
hwaddr_to_nic = {}
hwaddr_to_ip = {}
for nic in list_nics(self.NIC_PREFIXES):
for nic in list_nics():
# Ignore virtual interfaces (bond masters will be identified from
# their slaves)
if not is_phy_iface(nic):
continue
_nic = get_bond_master(nic)
if _nic:
log("Replacing iface '%s' with bond master '%s'" % (nic, _nic),
level=DEBUG)
nic = _nic
hwaddr = get_nic_hwaddr(nic)
hwaddr_to_nic[hwaddr] = nic
addresses = get_ipv4_addr(nic, fatal=False)
@ -961,7 +987,8 @@ class NeutronPortContext(OSContextGenerator):
# trust it to be the real external network).
resolved.append(entry)
return resolved
# Ensure no duplicates
return list(set(resolved))
class OSConfigFlagContext(OSContextGenerator):
@ -1280,15 +1307,19 @@ class DataPortContext(NeutronPortContext):
def __call__(self):
ports = config('data-port')
if ports:
# Map of {port/mac:bridge}
portmap = parse_data_port_mappings(ports)
ports = portmap.values()
ports = portmap.keys()
# Resolve provided ports or mac addresses and filter out those
# already attached to a bridge.
resolved = self.resolve_ports(ports)
# FIXME: is this necessary?
normalized = {get_nic_hwaddr(port): port for port in resolved
if port not in ports}
normalized.update({port: port for port in resolved
if port in ports})
if resolved:
return {bridge: normalized[port] for bridge, port in
return {bridge: normalized[port] for port, bridge in
six.iteritems(portmap) if port in normalized.keys()}
return None

View File

@ -195,6 +195,20 @@ def neutron_plugins():
'packages': [],
'server_packages': ['neutron-server', 'neutron-plugin-nuage'],
'server_services': ['neutron-server']
},
'plumgrid': {
'config': '/etc/neutron/plugins/plumgrid/plumgrid.ini',
'driver': 'neutron.plugins.plumgrid.plumgrid_plugin.plumgrid_plugin.NeutronPluginPLUMgridV2',
'contexts': [
context.SharedDBContext(user=config('database-user'),
database=config('database'),
ssl_dir=NEUTRON_CONF_DIR)],
'services': [],
'packages': [['plumgrid-lxc'],
['iovisor-dkms']],
'server_packages': ['neutron-server',
'neutron-plugin-plumgrid'],
'server_services': ['neutron-server']
}
}
if release >= 'icehouse':
@ -255,17 +269,30 @@ def network_manager():
return 'neutron'
def parse_mappings(mappings):
def parse_mappings(mappings, key_rvalue=False):
"""By default mappings are lvalue keyed.
If key_rvalue is True, the mapping will be reversed to allow multiple
configs for the same lvalue.
"""
parsed = {}
if mappings:
mappings = mappings.split()
for m in mappings:
p = m.partition(':')
key = p[0].strip()
if p[1]:
parsed[key] = p[2].strip()
if key_rvalue:
key_index = 2
val_index = 0
# if there is no rvalue skip to next
if not p[1]:
continue
else:
parsed[key] = ''
key_index = 0
val_index = 2
key = p[key_index].strip()
parsed[key] = p[val_index].strip()
return parsed
@ -283,25 +310,25 @@ def parse_bridge_mappings(mappings):
def parse_data_port_mappings(mappings, default_bridge='br-data'):
"""Parse data port mappings.
Mappings must be a space-delimited list of bridge:port mappings.
Mappings must be a space-delimited list of port:bridge mappings.
Returns dict of the form {bridge:port}.
Returns dict of the form {port:bridge} where port may be an mac address or
interface name.
"""
_mappings = parse_mappings(mappings)
# NOTE(dosaboy): we use rvalue for key to allow multiple values to be
# proposed for <port> since it may be a mac address which will differ
# across units this allowing first-known-good to be chosen.
_mappings = parse_mappings(mappings, key_rvalue=True)
if not _mappings or list(_mappings.values()) == ['']:
if not mappings:
return {}
# For backwards-compatibility we need to support port-only provided in
# config.
_mappings = {default_bridge: mappings.split()[0]}
bridges = _mappings.keys()
ports = _mappings.values()
if len(set(bridges)) != len(bridges):
raise Exception("It is not allowed to have more than one port "
"configured on the same bridge")
_mappings = {mappings.split()[0]: default_bridge}
ports = _mappings.keys()
if len(set(ports)) != len(ports):
raise Exception("It is not allowed to have the same port configured "
"on more than one bridge")

View File

@ -1,5 +1,3 @@
#!/usr/bin/python
# Copyright 2014-2015 Canonical Limited.
#
# This file is part of charm-helpers.
@ -24,6 +22,7 @@ import subprocess
import json
import os
import sys
import re
import six
import yaml
@ -69,7 +68,6 @@ CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA'
DISTRO_PROPOSED = ('deb http://archive.ubuntu.com/ubuntu/ %s-proposed '
'restricted main multiverse universe')
UBUNTU_OPENSTACK_RELEASE = OrderedDict([
('oneiric', 'diablo'),
('precise', 'essex'),
@ -118,6 +116,34 @@ SWIFT_CODENAMES = OrderedDict([
('2.3.0', 'liberty'),
])
# >= Liberty version->codename mapping
PACKAGE_CODENAMES = {
'nova-common': OrderedDict([
('12.0.0', 'liberty'),
]),
'neutron-common': OrderedDict([
('7.0.0', 'liberty'),
]),
'cinder-common': OrderedDict([
('7.0.0', 'liberty'),
]),
'keystone': OrderedDict([
('8.0.0', 'liberty'),
]),
'horizon-common': OrderedDict([
('8.0.0', 'liberty'),
]),
'ceilometer-common': OrderedDict([
('5.0.0', 'liberty'),
]),
'heat-common': OrderedDict([
('5.0.0', 'liberty'),
]),
'glance-common': OrderedDict([
('11.0.0', 'liberty'),
]),
}
DEFAULT_LOOPBACK_SIZE = '5G'
@ -167,9 +193,9 @@ def get_os_codename_version(vers):
error_out(e)
def get_os_version_codename(codename):
def get_os_version_codename(codename, version_map=OPENSTACK_CODENAMES):
'''Determine OpenStack version number from codename.'''
for k, v in six.iteritems(OPENSTACK_CODENAMES):
for k, v in six.iteritems(version_map):
if v == codename:
return k
e = 'Could not derive OpenStack version for '\
@ -201,20 +227,31 @@ def get_os_codename_package(package, fatal=True):
error_out(e)
vers = apt.upstream_version(pkg.current_ver.ver_str)
match = re.match('^(\d+)\.(\d+)\.(\d+)', vers)
if match:
vers = match.group(0)
try:
if 'swift' in pkg.name:
swift_vers = vers[:5]
if swift_vers not in SWIFT_CODENAMES:
# Deal with 1.10.0 upward
swift_vers = vers[:6]
return SWIFT_CODENAMES[swift_vers]
else:
vers = vers[:6]
return OPENSTACK_CODENAMES[vers]
except KeyError:
e = 'Could not determine OpenStack codename for version %s' % vers
error_out(e)
# >= Liberty independent project versions
if (package in PACKAGE_CODENAMES and
vers in PACKAGE_CODENAMES[package]):
return PACKAGE_CODENAMES[package][vers]
else:
# < Liberty co-ordinated project versions
try:
if 'swift' in pkg.name:
swift_vers = vers[:5]
if swift_vers not in SWIFT_CODENAMES:
# Deal with 1.10.0 upward
swift_vers = vers[:6]
return SWIFT_CODENAMES[swift_vers]
else:
vers = vers[:6]
return OPENSTACK_CODENAMES[vers]
except KeyError:
if not fatal:
return None
e = 'Could not determine OpenStack codename for version %s' % vers
error_out(e)
def get_os_version_package(pkg, fatal=True):
@ -392,7 +429,11 @@ def openstack_upgrade_available(package):
import apt_pkg as apt
src = config('openstack-origin')
cur_vers = get_os_version_package(package)
available_vers = get_os_version_install_source(src)
if "swift" in package:
codename = get_os_codename_install_source(src)
available_vers = get_os_version_codename(codename, SWIFT_CODENAMES)
else:
available_vers = get_os_version_install_source(src)
apt.init()
return apt.version_compare(available_vers, cur_vers) == 1

View File

@ -43,9 +43,10 @@ def zap_disk(block_device):
:param block_device: str: Full path of block device to clean.
'''
# https://github.com/ceph/ceph/commit/fdd7f8d83afa25c4e09aaedd90ab93f3b64a677b
# sometimes sgdisk exits non-zero; this is OK, dd will clean up
call(['sgdisk', '--zap-all', '--mbrtogpt',
'--clear', block_device])
call(['sgdisk', '--zap-all', '--', block_device])
call(['sgdisk', '--clear', '--mbrtogpt', '--', block_device])
dev_end = check_output(['blockdev', '--getsz',
block_device]).decode('UTF-8')
gpt_end = int(dev_end.split()[0]) - 100

View File

@ -34,23 +34,6 @@ import errno
import tempfile
from subprocess import CalledProcessError
try:
from charmhelpers.cli import cmdline
except ImportError as e:
# due to the anti-pattern of partially synching charmhelpers directly
# into charms, it's possible that charmhelpers.cli is not available;
# if that's the case, they don't really care about using the cli anyway,
# so mock it out
if str(e) == 'No module named cli':
class cmdline(object):
@classmethod
def subcommand(cls, *args, **kwargs):
def _wrap(func):
return func
return _wrap
else:
raise
import six
if not six.PY3:
from UserDict import UserDict
@ -91,6 +74,7 @@ def cached(func):
res = func(*args, **kwargs)
cache[key] = res
return res
wrapper._wrapped = func
return wrapper
@ -190,7 +174,6 @@ def relation_type():
return os.environ.get('JUJU_RELATION', None)
@cmdline.subcommand()
@cached
def relation_id(relation_name=None, service_or_unit=None):
"""The relation ID for the current or a specified relation"""
@ -216,13 +199,11 @@ def remote_unit():
return os.environ.get('JUJU_REMOTE_UNIT', None)
@cmdline.subcommand()
def service_name():
"""The name service group this unit belongs to"""
return local_unit().split('/')[0]
@cmdline.subcommand()
@cached
def remote_service_name(relid=None):
"""The remote service name for a given relation-id (or the current relation)"""
@ -786,21 +767,23 @@ def status_set(workload_state, message):
def status_get():
"""Retrieve the previously set juju workload state
"""Retrieve the previously set juju workload state and message
If the status-get command is not found then assume this is juju < 1.23 and
return 'unknown', ""
If the status-set command is not found then assume this is juju < 1.23 and
return 'unknown'
"""
cmd = ['status-get']
cmd = ['status-get', "--format=json", "--include-data"]
try:
raw_status = subprocess.check_output(cmd, universal_newlines=True)
status = raw_status.rstrip()
return status
raw_status = subprocess.check_output(cmd)
except OSError as e:
if e.errno == errno.ENOENT:
return 'unknown'
return ('unknown', "")
else:
raise
else:
status = json.loads(raw_status.decode("UTF-8"))
return (status["status"], status["message"])
def translate_exc(from_exc, to_exc):

View File

@ -72,7 +72,7 @@ def service_pause(service_name, init_dir=None):
stopped = service_stop(service_name)
# XXX: Support systemd too
override_path = os.path.join(
init_dir, '{}.conf.override'.format(service_name))
init_dir, '{}.override'.format(service_name))
with open(override_path, 'w') as fh:
fh.write("manual\n")
return stopped
@ -86,7 +86,7 @@ def service_resume(service_name, init_dir=None):
if init_dir is None:
init_dir = "/etc/init"
override_path = os.path.join(
init_dir, '{}.conf.override'.format(service_name))
init_dir, '{}.override'.format(service_name))
if os.path.exists(override_path):
os.unlink(override_path)
started = service_start(service_name)
@ -148,6 +148,16 @@ def adduser(username, password=None, shell='/bin/bash', system_user=False):
return user_info
def user_exists(username):
"""Check if a user exists"""
try:
pwd.getpwnam(username)
user_exists = True
except KeyError:
user_exists = False
return user_exists
def add_group(group_name, system_group=False):
"""Add a group to the system"""
try:
@ -280,6 +290,17 @@ def mounts():
return system_mounts
def fstab_mount(mountpoint):
"""Mount filesystem using fstab"""
cmd_args = ['mount', mountpoint]
try:
subprocess.check_output(cmd_args)
except subprocess.CalledProcessError as e:
log('Error unmounting {}\n{}'.format(mountpoint, e.output))
return False
return True
def file_hash(path, hash_type='md5'):
"""
Generate a hash checksum of the contents of 'path' or None if not found.
@ -396,25 +417,80 @@ def pwgen(length=None):
return(''.join(random_chars))
def list_nics(nic_type):
def is_phy_iface(interface):
"""Returns True if interface is not virtual, otherwise False."""
if interface:
sys_net = '/sys/class/net'
if os.path.isdir(sys_net):
for iface in glob.glob(os.path.join(sys_net, '*')):
if '/virtual/' in os.path.realpath(iface):
continue
if interface == os.path.basename(iface):
return True
return False
def get_bond_master(interface):
"""Returns bond master if interface is bond slave otherwise None.
NOTE: the provided interface is expected to be physical
"""
if interface:
iface_path = '/sys/class/net/%s' % (interface)
if os.path.exists(iface_path):
if '/virtual/' in os.path.realpath(iface_path):
return None
master = os.path.join(iface_path, 'master')
if os.path.exists(master):
master = os.path.realpath(master)
# make sure it is a bond master
if os.path.exists(os.path.join(master, 'bonding')):
return os.path.basename(master)
return None
def list_nics(nic_type=None):
'''Return a list of nics of given type(s)'''
if isinstance(nic_type, six.string_types):
int_types = [nic_type]
else:
int_types = nic_type
interfaces = []
for int_type in int_types:
cmd = ['ip', 'addr', 'show', 'label', int_type + '*']
if nic_type:
for int_type in int_types:
cmd = ['ip', 'addr', 'show', 'label', int_type + '*']
ip_output = subprocess.check_output(cmd).decode('UTF-8')
ip_output = ip_output.split('\n')
ip_output = (line for line in ip_output if line)
for line in ip_output:
if line.split()[1].startswith(int_type):
matched = re.search('.*: (' + int_type +
r'[0-9]+\.[0-9]+)@.*', line)
if matched:
iface = matched.groups()[0]
else:
iface = line.split()[1].replace(":", "")
if iface not in interfaces:
interfaces.append(iface)
else:
cmd = ['ip', 'a']
ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n')
ip_output = (line for line in ip_output if line)
ip_output = (line.strip() for line in ip_output if line)
key = re.compile('^[0-9]+:\s+(.+):')
for line in ip_output:
if line.split()[1].startswith(int_type):
matched = re.search('.*: (' + int_type + r'[0-9]+\.[0-9]+)@.*', line)
if matched:
interface = matched.groups()[0]
else:
interface = line.split()[1].replace(":", "")
interfaces.append(interface)
matched = re.search(key, line)
if matched:
iface = matched.group(1)
iface = iface.partition("@")[0]
if iface not in interfaces:
interfaces.append(iface)
return interfaces

View File

@ -0,0 +1,62 @@
# -*- coding: utf-8 -*-
# Copyright 2014-2015 Canonical Limited.
#
# This file is part of charm-helpers.
#
# charm-helpers is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3 as
# published by the Free Software Foundation.
#
# charm-helpers is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
import yaml
from charmhelpers.core import fstab
from charmhelpers.core import sysctl
from charmhelpers.core.host import (
add_group,
add_user_to_group,
fstab_mount,
mkdir,
)
def hugepage_support(user, group='hugetlb', nr_hugepages=256,
max_map_count=65536, mnt_point='/run/hugepages/kvm',
pagesize='2MB', mount=True):
"""Enable hugepages on system.
Args:
user (str) -- Username to allow access to hugepages to
group (str) -- Group name to own hugepages
nr_hugepages (int) -- Number of pages to reserve
max_map_count (int) -- Number of Virtual Memory Areas a process can own
mnt_point (str) -- Directory to mount hugepages on
pagesize (str) -- Size of hugepages
mount (bool) -- Whether to Mount hugepages
"""
group_info = add_group(group)
gid = group_info.gr_gid
add_user_to_group(user, group)
sysctl_settings = {
'vm.nr_hugepages': nr_hugepages,
'vm.max_map_count': max_map_count,
'vm.hugetlb_shm_group': gid,
}
sysctl.create(yaml.dump(sysctl_settings), '/etc/sysctl.d/10-hugepage.conf')
mkdir(mnt_point, owner='root', group='root', perms=0o755, force=False)
lfstab = fstab.Fstab()
fstab_entry = lfstab.get_entry_by_attr('mountpoint', mnt_point)
if fstab_entry:
lfstab.remove_entry(fstab_entry)
entry = lfstab.Entry('nodev', mnt_point, 'hugetlbfs',
'mode=1770,gid={},pagesize={}'.format(gid, pagesize), 0, 0)
lfstab.add_entry(entry)
if mount:
fstab_mount(mnt_point)

View File

@ -16,7 +16,9 @@
import os
import yaml
from charmhelpers.core import hookenv
from charmhelpers.core import host
from charmhelpers.core import templating
from charmhelpers.core.services.base import ManagerCallback
@ -240,27 +242,41 @@ class TemplateCallback(ManagerCallback):
:param str source: The template source file, relative to
`$CHARM_DIR/templates`
:param str target: The target to write the rendered template to
:param str owner: The owner of the rendered file
:param str group: The group of the rendered file
:param int perms: The permissions of the rendered file
:param partial on_change_action: functools partial to be executed when
rendered file changes
"""
def __init__(self, source, target,
owner='root', group='root', perms=0o444):
owner='root', group='root', perms=0o444,
on_change_action=None):
self.source = source
self.target = target
self.owner = owner
self.group = group
self.perms = perms
self.on_change_action = on_change_action
def __call__(self, manager, service_name, event_name):
pre_checksum = ''
if self.on_change_action and os.path.isfile(self.target):
pre_checksum = host.file_hash(self.target)
service = manager.get_service(service_name)
context = {}
for ctx in service.get('required_data', []):
context.update(ctx)
templating.render(self.source, self.target, context,
self.owner, self.group, self.perms)
if self.on_change_action:
if pre_checksum == host.file_hash(self.target):
hookenv.log(
'No change detected: {}'.format(self.target),
hookenv.DEBUG)
else:
self.on_change_action()
# Convenience aliases for templates

View File

@ -90,6 +90,14 @@ CLOUD_ARCHIVE_POCKETS = {
'kilo/proposed': 'trusty-proposed/kilo',
'trusty-kilo/proposed': 'trusty-proposed/kilo',
'trusty-proposed/kilo': 'trusty-proposed/kilo',
# Liberty
'liberty': 'trusty-updates/liberty',
'trusty-liberty': 'trusty-updates/liberty',
'trusty-liberty/updates': 'trusty-updates/liberty',
'trusty-updates/liberty': 'trusty-updates/liberty',
'liberty/proposed': 'trusty-proposed/liberty',
'trusty-liberty/proposed': 'trusty-proposed/liberty',
'trusty-proposed/liberty': 'trusty-proposed/liberty',
}
# The order of this list is very important. Handlers should be listed in from

View File

@ -124,6 +124,9 @@ class NovaComputeLibvirtContext(context.OSContextGenerator):
if config('disk-cachemodes'):
ctxt['disk_cachemodes'] = config('disk-cachemodes')
if config('hugepages'):
ctxt['hugepages'] = True
ctxt['host_uuid'] = '%s' % uuid.uuid4()
return ctxt

View File

@ -65,6 +65,7 @@ from nova_compute_utils import (
get_topics,
assert_charm_supports_ipv6,
manage_ovs,
install_hugepages,
)
from charmhelpers.contrib.network.ip import (
@ -137,6 +138,9 @@ def config_changed():
if is_relation_made("nrpe-external-master"):
update_nrpe_config()
if config('hugepages'):
install_hugepages()
CONFIGS.write_all()
@ -325,6 +329,8 @@ def relation_broken():
@hooks.hook('upgrade-charm')
def upgrade_charm():
# NOTE: ensure psutil install for hugepages configuration
apt_install(filter_installed_packages(['python-psutil']))
for r_id in relation_ids('amqp'):
amqp_joined(relation_id=r_id)

View File

@ -1,6 +1,7 @@
import os
import shutil
import pwd
import subprocess
from base64 import b64decode
from copy import deepcopy
@ -60,6 +61,12 @@ from charmhelpers.contrib.python.packages import (
pip_install,
)
from charmhelpers.core.hugepage import hugepage_support
from charmhelpers.core.host import (
fstab_mount,
rsync,
)
from nova_compute_context import (
CloudComputeContext,
MetadataServiceContext,
@ -82,6 +89,7 @@ BASE_PACKAGES = [
'genisoimage', # was missing as a package dependency until raring.
'librbd1', # bug 1440953
'python-six',
'python-psutil',
]
BASE_GIT_PACKAGES = [
@ -177,7 +185,7 @@ BASE_RESOURCE_MAP = {
LIBVIRT_RESOURCE_MAP = {
QEMU_CONF: {
'services': ['libvirt-bin'],
'contexts': [],
'contexts': [NovaComputeLibvirtContext()],
},
LIBVIRTD_CONF: {
'services': ['libvirt-bin'],
@ -808,3 +816,36 @@ def git_post_install(projects_yaml):
apt_update()
apt_install(LATE_GIT_PACKAGES, fatal=True)
def install_hugepages():
""" Configure hugepages """
hugepage_config = config('hugepages')
if hugepage_config:
# TODO: defaults to 2M - this should probably be configurable
# and support multiple pool sizes - e.g. 2M and 1G.
hugepage_size = 2048
if hugepage_config.endswith('%'):
import psutil
mem = psutil.virtual_memory()
hugepage_config_pct = hugepage_config.strip('%')
hugepage_multiplier = float(hugepage_config_pct) / 100
hugepages = int((mem.total * hugepage_multiplier) / hugepage_size)
else:
hugepages = int(hugepage_config)
mnt_point = '/run/hugepages/kvm'
hugepage_support(
'nova',
mnt_point=mnt_point,
group='root',
nr_hugepages=hugepages,
mount=False,
)
if subprocess.call(['mountpoint', mnt_point]):
fstab_mount(mnt_point)
rsync(
charm_dir() + '/files/qemu-hugefsdir',
'/etc/init.d/qemu-hugefsdir'
)
subprocess.check_call('/etc/init.d/qemu-hugefsdir')
subprocess.check_call(['update-rc.d', 'qemu-hugefsdir', 'defaults'])

View File

@ -85,6 +85,11 @@ security_group_api = neutron
firewall_driver = nova.virt.firewall.NoopFirewallDriver
{% endif -%}
{% if neutron_plugin and neutron_plugin == 'plumgrid' -%}
security_group_api=neutron
firewall_driver = nova.virt.firewall.NoopFirewallDriver
{% endif -%}
{% if network_manager_config -%}
{% for key, value in network_manager_config.iteritems() -%}
{{ key }} = {{ value }}

View File

@ -90,6 +90,11 @@ security_group_api = neutron
firewall_driver = nova.virt.firewall.NoopFirewallDriver
{% endif -%}
{% if neutron_plugin and neutron_plugin == 'plumgrid' -%}
security_group_api=neutron
firewall_driver = nova.virt.firewall.NoopFirewallDriver
{% endif -%}
{% if network_manager_config -%}
{% for key, value in network_manager_config.iteritems() -%}
{{ key }} = {{ value }}

View File

@ -72,6 +72,11 @@ security_group_api = neutron
firewall_driver = nova.virt.firewall.NoopFirewallDriver
{% endif -%}
{% if neutron_plugin and neutron_plugin == 'plumgrid' -%}
security_group_api=neutron
firewall_driver = nova.virt.firewall.NoopFirewallDriver
{% endif -%}
{% if network_manager != 'neutron' and network_manager_config -%}
{% for key, value in network_manager_config.iteritems() -%}
{{ key }} = {{ value }}

View File

@ -14,17 +14,23 @@
# You should have received a copy of the GNU Lesser General Public License
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
import amulet
import ConfigParser
import distro_info
import io
import json
import logging
import os
import re
import six
import subprocess
import sys
import time
import urlparse
import amulet
import distro_info
import six
from six.moves import configparser
if six.PY3:
from urllib import parse as urlparse
else:
import urlparse
class AmuletUtils(object):
@ -142,19 +148,23 @@ class AmuletUtils(object):
for service_name in services_list:
if (self.ubuntu_releases.index(release) >= systemd_switch or
service_name == "rabbitmq-server"):
# init is systemd
service_name in ['rabbitmq-server', 'apache2']):
# init is systemd (or regular sysv)
cmd = 'sudo service {} status'.format(service_name)
output, code = sentry_unit.run(cmd)
service_running = code == 0
elif self.ubuntu_releases.index(release) < systemd_switch:
# init is upstart
cmd = 'sudo status {}'.format(service_name)
output, code = sentry_unit.run(cmd)
service_running = code == 0 and "start/running" in output
output, code = sentry_unit.run(cmd)
self.log.debug('{} `{}` returned '
'{}'.format(sentry_unit.info['unit_name'],
cmd, code))
if code != 0:
return "command `{}` returned {}".format(cmd, str(code))
if not service_running:
return u"command `{}` returned {} {}".format(
cmd, output, str(code))
return None
def _get_config(self, unit, filename):
@ -164,7 +174,7 @@ class AmuletUtils(object):
# NOTE(beisner): by default, ConfigParser does not handle options
# with no value, such as the flags used in the mysql my.cnf file.
# https://bugs.python.org/issue7005
config = ConfigParser.ConfigParser(allow_no_value=True)
config = configparser.ConfigParser(allow_no_value=True)
config.readfp(io.StringIO(file_contents))
return config
@ -450,15 +460,20 @@ class AmuletUtils(object):
cmd, code, output))
return None
def get_process_id_list(self, sentry_unit, process_name):
def get_process_id_list(self, sentry_unit, process_name,
expect_success=True):
"""Get a list of process ID(s) from a single sentry juju unit
for a single process name.
:param sentry_unit: Pointer to amulet sentry instance (juju unit)
:param sentry_unit: Amulet sentry instance (juju unit)
:param process_name: Process name
:param expect_success: If False, expect the PID to be missing,
raise if it is present.
:returns: List of process IDs
"""
cmd = 'pidof {}'.format(process_name)
cmd = 'pidof -x {}'.format(process_name)
if not expect_success:
cmd += " || exit 0 && exit 1"
output, code = sentry_unit.run(cmd)
if code != 0:
msg = ('{} `{}` returned {} '
@ -467,14 +482,23 @@ class AmuletUtils(object):
amulet.raise_status(amulet.FAIL, msg=msg)
return str(output).split()
def get_unit_process_ids(self, unit_processes):
def get_unit_process_ids(self, unit_processes, expect_success=True):
"""Construct a dict containing unit sentries, process names, and
process IDs."""
process IDs.
:param unit_processes: A dictionary of Amulet sentry instance
to list of process names.
:param expect_success: if False expect the processes to not be
running, raise if they are.
:returns: Dictionary of Amulet sentry instance to dictionary
of process names to PIDs.
"""
pid_dict = {}
for sentry_unit, process_list in unit_processes.iteritems():
for sentry_unit, process_list in six.iteritems(unit_processes):
pid_dict[sentry_unit] = {}
for process in process_list:
pids = self.get_process_id_list(sentry_unit, process)
pids = self.get_process_id_list(
sentry_unit, process, expect_success=expect_success)
pid_dict[sentry_unit].update({process: pids})
return pid_dict
@ -488,7 +512,7 @@ class AmuletUtils(object):
return ('Unit count mismatch. expected, actual: {}, '
'{} '.format(len(expected), len(actual)))
for (e_sentry, e_proc_names) in expected.iteritems():
for (e_sentry, e_proc_names) in six.iteritems(expected):
e_sentry_name = e_sentry.info['unit_name']
if e_sentry in actual.keys():
a_proc_names = actual[e_sentry]
@ -507,11 +531,23 @@ class AmuletUtils(object):
'{}'.format(e_proc_name, a_proc_name))
a_pids_length = len(a_pids)
if e_pids_length != a_pids_length:
return ('PID count mismatch. {} ({}) expected, actual: '
fail_msg = ('PID count mismatch. {} ({}) expected, actual: '
'{}, {} ({})'.format(e_sentry_name, e_proc_name,
e_pids_length, a_pids_length,
a_pids))
# If expected is not bool, ensure PID quantities match
if not isinstance(e_pids_length, bool) and \
a_pids_length != e_pids_length:
return fail_msg
# If expected is bool True, ensure 1 or more PIDs exist
elif isinstance(e_pids_length, bool) and \
e_pids_length is True and a_pids_length < 1:
return fail_msg
# If expected is bool False, ensure 0 PIDs exist
elif isinstance(e_pids_length, bool) and \
e_pids_length is False and a_pids_length != 0:
return fail_msg
else:
self.log.debug('PID check OK: {} {} {}: '
'{}'.format(e_sentry_name, e_proc_name,
@ -531,3 +567,30 @@ class AmuletUtils(object):
return 'Dicts within list are not identical'
return None
def run_action(self, unit_sentry, action,
_check_output=subprocess.check_output):
"""Run the named action on a given unit sentry.
_check_output parameter is used for dependency injection.
@return action_id.
"""
unit_id = unit_sentry.info["unit_name"]
command = ["juju", "action", "do", "--format=json", unit_id, action]
self.log.info("Running command: %s\n" % " ".join(command))
output = _check_output(command, universal_newlines=True)
data = json.loads(output)
action_id = data[u'Action queued with id']
return action_id
def wait_on_action(self, action_id, _check_output=subprocess.check_output):
"""Wait for a given action, returning if it completed or not.
_check_output parameter is used for dependency injection.
"""
command = ["juju", "action", "fetch", "--format=json", "--wait=0",
action_id]
output = _check_output(command, universal_newlines=True)
data = json.loads(output)
return data.get(u"status") == "completed"

View File

@ -44,7 +44,7 @@ class OpenStackAmuletDeployment(AmuletDeployment):
Determine if the local branch being tested is derived from its
stable or next (dev) branch, and based on this, use the corresonding
stable or next branches for the other_services."""
base_charms = ['mysql', 'mongodb']
base_charms = ['mysql', 'mongodb', 'nrpe']
if self.series in ['precise', 'trusty']:
base_series = self.series
@ -81,7 +81,7 @@ class OpenStackAmuletDeployment(AmuletDeployment):
'ceph-osd', 'ceph-radosgw']
# Most OpenStack subordinate charms do not expose an origin option
# as that is controlled by the principle.
ignore = ['cinder-ceph', 'hacluster', 'neutron-openvswitch']
ignore = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe']
if self.openstack:
for svc in services:

View File

@ -57,6 +57,7 @@ TO_PATCH = [
# socket
'gethostname',
'create_sysctl',
'install_hugepages',
]

View File

@ -30,6 +30,10 @@ TO_PATCH = [
'add_user_to_group',
'MetadataServiceContext',
'lsb_release',
'charm_dir',
'hugepage_support',
'rsync',
'fstab_mount',
]
OVS_PKGS = [
@ -54,6 +58,7 @@ class NovaComputeUtilsTests(CharmTestCase):
def setUp(self):
super(NovaComputeUtilsTests, self).setUp(utils, TO_PATCH)
self.config.side_effect = self.test_config.get
self.charm_dir.return_value = 'mycharm'
@patch.object(utils, 'enable_nova_metadata')
@patch.object(utils, 'network_manager')
@ -708,3 +713,53 @@ class NovaComputeUtilsTests(CharmTestCase):
'iputils-arping', 'kpartx', 'kvm', 'netcat', 'open-iscsi',
'parted', 'python-libvirt', 'qemu', 'qemu-system',
'qemu-utils', 'vlan', 'xen-system-amd64'], fatal=True)
@patch('psutil.virtual_memory')
@patch('subprocess.check_call')
@patch('subprocess.call')
def test_install_hugepages(self, _call, _check_call, _virt_mem):
class mem(object):
def __init__(self):
self.total = 10000000
self.test_config.set('hugepages', '10%')
_virt_mem.side_effect = mem
_call.return_value = 1
utils.install_hugepages()
self.hugepage_support.assert_called_with(
'nova',
mnt_point='/run/hugepages/kvm',
group='root',
nr_hugepages=488,
mount=False,
)
check_call_calls = [
call('/etc/init.d/qemu-hugefsdir'),
call(['update-rc.d', 'qemu-hugefsdir', 'defaults']),
]
_check_call.assert_has_calls(check_call_calls)
self.fstab_mount.assert_called_with('/run/hugepages/kvm')
@patch('psutil.virtual_memory')
@patch('subprocess.check_call')
@patch('subprocess.call')
def test_install_hugepages_explicit_size(self, _call, _check_call,
_virt_mem):
self.test_config.set('hugepages', '2048')
utils.install_hugepages()
self.hugepage_support.assert_called_with(
'nova',
mnt_point='/run/hugepages/kvm',
group='root',
nr_hugepages=2048,
mount=False,
)
@patch('psutil.virtual_memory')
@patch('subprocess.check_call')
@patch('subprocess.call')
def test_install_hugepages_already_mounted(self, _call, _check_call,
_virt_mem):
self.test_config.set('hugepages', '2048')
_call.return_value = 0
utils.install_hugepages()
self.assertEqual(self.fstab_mount.call_args_list, [])