Add nova.conf for mitaka

This commit is contained in:
James Page 2016-02-09 15:35:44 +00:00
parent 9893ce4525
commit 37522aa13c
8 changed files with 174 additions and 76 deletions

View File

@ -121,11 +121,12 @@ class OpenStackAmuletDeployment(AmuletDeployment):
# Charms which should use the source config option # Charms which should use the source config option
use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph', use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph',
'ceph-osd', 'ceph-radosgw'] 'ceph-osd', 'ceph-radosgw', 'ceph-mon']
# Charms which can not use openstack-origin, ie. many subordinates # Charms which can not use openstack-origin, ie. many subordinates
no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe', no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe',
'openvswitch-odl', 'neutron-api-odl', 'odl-controller'] 'openvswitch-odl', 'neutron-api-odl', 'odl-controller',
'cinder-backup']
if self.openstack: if self.openstack:
for svc in services: for svc in services:

View File

@ -90,6 +90,12 @@ from charmhelpers.contrib.network.ip import (
from charmhelpers.contrib.openstack.utils import get_host_ip from charmhelpers.contrib.openstack.utils import get_host_ip
from charmhelpers.core.unitdata import kv from charmhelpers.core.unitdata import kv
try:
import psutil
except ImportError:
apt_install('python-psutil', fatal=True)
import psutil
CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt' CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt'
ADDRESS_TYPES = ['admin', 'internal', 'public'] ADDRESS_TYPES = ['admin', 'internal', 'public']
@ -1258,13 +1264,11 @@ class WorkerConfigContext(OSContextGenerator):
@property @property
def num_cpus(self): def num_cpus(self):
try: # NOTE: use cpu_count if present (16.04 support)
from psutil import NUM_CPUS if hasattr(psutil, 'cpu_count'):
except ImportError: return psutil.cpu_count()
apt_install('python-psutil', fatal=True) else:
from psutil import NUM_CPUS return psutil.NUM_CPUS
return NUM_CPUS
def __call__(self): def __call__(self):
multiplier = config('worker-multiplier') or 0 multiplier = config('worker-multiplier') or 0

View File

@ -50,7 +50,7 @@ def determine_dkms_package():
if kernel_version() >= (3, 13): if kernel_version() >= (3, 13):
return [] return []
else: else:
return ['openvswitch-datapath-dkms'] return [headers_package(), 'openvswitch-datapath-dkms']
# legacy # legacy
@ -70,7 +70,7 @@ def quantum_plugins():
relation_prefix='neutron', relation_prefix='neutron',
ssl_dir=QUANTUM_CONF_DIR)], ssl_dir=QUANTUM_CONF_DIR)],
'services': ['quantum-plugin-openvswitch-agent'], 'services': ['quantum-plugin-openvswitch-agent'],
'packages': [[headers_package()] + determine_dkms_package(), 'packages': [determine_dkms_package(),
['quantum-plugin-openvswitch-agent']], ['quantum-plugin-openvswitch-agent']],
'server_packages': ['quantum-server', 'server_packages': ['quantum-server',
'quantum-plugin-openvswitch'], 'quantum-plugin-openvswitch'],
@ -111,7 +111,7 @@ def neutron_plugins():
relation_prefix='neutron', relation_prefix='neutron',
ssl_dir=NEUTRON_CONF_DIR)], ssl_dir=NEUTRON_CONF_DIR)],
'services': ['neutron-plugin-openvswitch-agent'], 'services': ['neutron-plugin-openvswitch-agent'],
'packages': [[headers_package()] + determine_dkms_package(), 'packages': [determine_dkms_package(),
['neutron-plugin-openvswitch-agent']], ['neutron-plugin-openvswitch-agent']],
'server_packages': ['neutron-server', 'server_packages': ['neutron-server',
'neutron-plugin-openvswitch'], 'neutron-plugin-openvswitch'],
@ -155,7 +155,7 @@ def neutron_plugins():
relation_prefix='neutron', relation_prefix='neutron',
ssl_dir=NEUTRON_CONF_DIR)], ssl_dir=NEUTRON_CONF_DIR)],
'services': [], 'services': [],
'packages': [[headers_package()] + determine_dkms_package(), 'packages': [determine_dkms_package(),
['neutron-plugin-cisco']], ['neutron-plugin-cisco']],
'server_packages': ['neutron-server', 'server_packages': ['neutron-server',
'neutron-plugin-cisco'], 'neutron-plugin-cisco'],
@ -174,7 +174,7 @@ def neutron_plugins():
'neutron-dhcp-agent', 'neutron-dhcp-agent',
'nova-api-metadata', 'nova-api-metadata',
'etcd'], 'etcd'],
'packages': [[headers_package()] + determine_dkms_package(), 'packages': [determine_dkms_package(),
['calico-compute', ['calico-compute',
'bird', 'bird',
'neutron-dhcp-agent', 'neutron-dhcp-agent',
@ -219,7 +219,7 @@ def neutron_plugins():
relation_prefix='neutron', relation_prefix='neutron',
ssl_dir=NEUTRON_CONF_DIR)], ssl_dir=NEUTRON_CONF_DIR)],
'services': [], 'services': [],
'packages': [[headers_package()] + determine_dkms_package()], 'packages': [determine_dkms_package()],
'server_packages': ['neutron-server', 'server_packages': ['neutron-server',
'python-neutron-plugin-midonet'], 'python-neutron-plugin-midonet'],
'server_services': ['neutron-server'] 'server_services': ['neutron-server']
@ -233,6 +233,14 @@ def neutron_plugins():
'neutron-plugin-ml2'] 'neutron-plugin-ml2']
# NOTE: patch in vmware renames nvp->nsx for icehouse onwards # NOTE: patch in vmware renames nvp->nsx for icehouse onwards
plugins['nvp'] = plugins['nsx'] plugins['nvp'] = plugins['nsx']
if release >= 'kilo':
plugins['midonet']['driver'] = (
'neutron.plugins.midonet.plugin.MidonetPluginV2')
if release >= 'liberty':
midonet_origin = config('midonet-origin')
if midonet_origin is not None and midonet_origin[4:5] == '1':
plugins['midonet']['driver'] = (
'midonet.neutron.plugin_v1.MidonetPluginV2')
return plugins return plugins

View File

@ -103,29 +103,28 @@ OPENSTACK_CODENAMES = OrderedDict([
('2016.1', 'mitaka'), ('2016.1', 'mitaka'),
]) ])
# The ugly duckling # The ugly duckling - must list releases oldest to newest
SWIFT_CODENAMES = OrderedDict([ SWIFT_CODENAMES = OrderedDict([
('1.4.3', 'diablo'), ('diablo',
('1.4.8', 'essex'), ['1.4.3']),
('1.7.4', 'folsom'), ('essex',
('1.8.0', 'grizzly'), ['1.4.8']),
('1.7.7', 'grizzly'), ('folsom',
('1.7.6', 'grizzly'), ['1.7.4']),
('1.10.0', 'havana'), ('grizzly',
('1.9.1', 'havana'), ['1.7.6', '1.7.7', '1.8.0']),
('1.9.0', 'havana'), ('havana',
('1.13.1', 'icehouse'), ['1.9.0', '1.9.1', '1.10.0']),
('1.13.0', 'icehouse'), ('icehouse',
('1.12.0', 'icehouse'), ['1.11.0', '1.12.0', '1.13.0', '1.13.1']),
('1.11.0', 'icehouse'), ('juno',
('2.0.0', 'juno'), ['2.0.0', '2.1.0', '2.2.0']),
('2.1.0', 'juno'), ('kilo',
('2.2.0', 'juno'), ['2.2.1', '2.2.2']),
('2.2.1', 'kilo'), ('liberty',
('2.2.2', 'kilo'), ['2.3.0', '2.4.0', '2.5.0']),
('2.3.0', 'liberty'), ('mitaka',
('2.4.0', 'liberty'), ['2.5.0']),
('2.5.0', 'liberty'),
]) ])
# >= Liberty version->codename mapping # >= Liberty version->codename mapping
@ -227,6 +226,33 @@ def get_os_version_codename(codename, version_map=OPENSTACK_CODENAMES):
error_out(e) error_out(e)
def get_os_version_codename_swift(codename):
'''Determine OpenStack version number of swift from codename.'''
for k, v in six.iteritems(SWIFT_CODENAMES):
if k == codename:
return v[-1]
e = 'Could not derive swift version for '\
'codename: %s' % codename
error_out(e)
def get_swift_codename(version):
'''Determine OpenStack codename that corresponds to swift version.'''
codenames = [k for k, v in six.iteritems(SWIFT_CODENAMES) if version in v]
if len(codenames) > 1:
# If more than one release codename contains this version we determine
# the actual codename based on the highest available install source.
for codename in reversed(codenames):
releases = UBUNTU_OPENSTACK_RELEASE
release = [k for k, v in six.iteritems(releases) if codename in v]
ret = subprocess.check_output(['apt-cache', 'policy', 'swift'])
if codename in ret or release[0] in ret:
return codename
elif len(codenames) == 1:
return codenames[0]
return None
def get_os_codename_package(package, fatal=True): def get_os_codename_package(package, fatal=True):
'''Derive OpenStack release codename from an installed package.''' '''Derive OpenStack release codename from an installed package.'''
import apt_pkg as apt import apt_pkg as apt
@ -270,7 +296,7 @@ def get_os_codename_package(package, fatal=True):
# < Liberty co-ordinated project versions # < Liberty co-ordinated project versions
try: try:
if 'swift' in pkg.name: if 'swift' in pkg.name:
return SWIFT_CODENAMES[vers] return get_swift_codename(vers)
else: else:
return OPENSTACK_CODENAMES[vers] return OPENSTACK_CODENAMES[vers]
except KeyError: except KeyError:
@ -289,9 +315,11 @@ def get_os_version_package(pkg, fatal=True):
if 'swift' in pkg: if 'swift' in pkg:
vers_map = SWIFT_CODENAMES vers_map = SWIFT_CODENAMES
for cname, version in six.iteritems(vers_map):
if cname == codename:
return version[-1]
else: else:
vers_map = OPENSTACK_CODENAMES vers_map = OPENSTACK_CODENAMES
for version, cname in six.iteritems(vers_map): for version, cname in six.iteritems(vers_map):
if cname == codename: if cname == codename:
return version return version
@ -460,11 +488,16 @@ def openstack_upgrade_available(package):
cur_vers = get_os_version_package(package) cur_vers = get_os_version_package(package)
if "swift" in package: if "swift" in package:
codename = get_os_codename_install_source(src) codename = get_os_codename_install_source(src)
available_vers = get_os_version_codename(codename, SWIFT_CODENAMES) avail_vers = get_os_version_codename_swift(codename)
else: else:
available_vers = get_os_version_install_source(src) avail_vers = get_os_version_install_source(src)
apt.init() apt.init()
return apt.version_compare(available_vers, cur_vers) == 1 if "swift" in package:
major_cur_vers = cur_vers.split('.', 1)[0]
major_avail_vers = avail_vers.split('.', 1)[0]
major_diff = apt.version_compare(major_avail_vers, major_cur_vers)
return avail_vers > cur_vers and (major_diff == 1 or major_diff == 0)
return apt.version_compare(avail_vers, cur_vers) == 1
def ensure_block_device(block_device): def ensure_block_device(block_device):

View File

@ -138,7 +138,8 @@ def service_running(service_name):
except subprocess.CalledProcessError: except subprocess.CalledProcessError:
return False return False
else: else:
if ("start/running" in output or "is running" in output): if ("start/running" in output or "is running" in output or
"up and running" in output):
return True return True
else: else:
return False return False
@ -160,13 +161,13 @@ SYSTEMD_SYSTEM = '/run/systemd/system'
def init_is_systemd(): def init_is_systemd():
"""Return True if the host system uses systemd, False otherwise."""
return os.path.isdir(SYSTEMD_SYSTEM) return os.path.isdir(SYSTEMD_SYSTEM)
def adduser(username, password=None, shell='/bin/bash', system_user=False, def adduser(username, password=None, shell='/bin/bash', system_user=False,
primary_group=None, secondary_groups=None): primary_group=None, secondary_groups=None):
""" """Add a user to the system.
Add a user to the system.
Will log but otherwise succeed if the user already exists. Will log but otherwise succeed if the user already exists.
@ -174,7 +175,7 @@ def adduser(username, password=None, shell='/bin/bash', system_user=False,
:param str password: Password for user; if ``None``, create a system user :param str password: Password for user; if ``None``, create a system user
:param str shell: The default shell for the user :param str shell: The default shell for the user
:param bool system_user: Whether to create a login or system user :param bool system_user: Whether to create a login or system user
:param str primary_group: Primary group for user; defaults to their username :param str primary_group: Primary group for user; defaults to username
:param list secondary_groups: Optional list of additional groups :param list secondary_groups: Optional list of additional groups
:returns: The password database entry struct, as returned by `pwd.getpwnam` :returns: The password database entry struct, as returned by `pwd.getpwnam`
@ -300,14 +301,12 @@ def write_file(path, content, owner='root', group='root', perms=0o444):
def fstab_remove(mp): def fstab_remove(mp):
"""Remove the given mountpoint entry from /etc/fstab """Remove the given mountpoint entry from /etc/fstab"""
"""
return Fstab.remove_by_mountpoint(mp) return Fstab.remove_by_mountpoint(mp)
def fstab_add(dev, mp, fs, options=None): def fstab_add(dev, mp, fs, options=None):
"""Adds the given device entry to the /etc/fstab file """Adds the given device entry to the /etc/fstab file"""
"""
return Fstab.add(dev, mp, fs, options=options) return Fstab.add(dev, mp, fs, options=options)
@ -363,8 +362,7 @@ def fstab_mount(mountpoint):
def file_hash(path, hash_type='md5'): def file_hash(path, hash_type='md5'):
""" """Generate a hash checksum of the contents of 'path' or None if not found.
Generate a hash checksum of the contents of 'path' or None if not found.
:param str hash_type: Any hash alrgorithm supported by :mod:`hashlib`, :param str hash_type: Any hash alrgorithm supported by :mod:`hashlib`,
such as md5, sha1, sha256, sha512, etc. such as md5, sha1, sha256, sha512, etc.
@ -379,10 +377,9 @@ def file_hash(path, hash_type='md5'):
def path_hash(path): def path_hash(path):
""" """Generate a hash checksum of all files matching 'path'. Standard
Generate a hash checksum of all files matching 'path'. Standard wildcards wildcards like '*' and '?' are supported, see documentation for the 'glob'
like '*' and '?' are supported, see documentation for the 'glob' module for module for more information.
more information.
:return: dict: A { filename: hash } dictionary for all matched files. :return: dict: A { filename: hash } dictionary for all matched files.
Empty if none found. Empty if none found.
@ -394,8 +391,7 @@ def path_hash(path):
def check_hash(path, checksum, hash_type='md5'): def check_hash(path, checksum, hash_type='md5'):
""" """Validate a file using a cryptographic checksum.
Validate a file using a cryptographic checksum.
:param str checksum: Value of the checksum used to validate the file. :param str checksum: Value of the checksum used to validate the file.
:param str hash_type: Hash algorithm used to generate `checksum`. :param str hash_type: Hash algorithm used to generate `checksum`.
@ -410,6 +406,7 @@ def check_hash(path, checksum, hash_type='md5'):
class ChecksumError(ValueError): class ChecksumError(ValueError):
"""A class derived from Value error to indicate the checksum failed."""
pass pass
@ -515,7 +512,7 @@ def get_bond_master(interface):
def list_nics(nic_type=None): def list_nics(nic_type=None):
'''Return a list of nics of given type(s)''' """Return a list of nics of given type(s)"""
if isinstance(nic_type, six.string_types): if isinstance(nic_type, six.string_types):
int_types = [nic_type] int_types = [nic_type]
else: else:
@ -557,12 +554,13 @@ def list_nics(nic_type=None):
def set_nic_mtu(nic, mtu): def set_nic_mtu(nic, mtu):
'''Set MTU on a network interface''' """Set the Maximum Transmission Unit (MTU) on a network interface."""
cmd = ['ip', 'link', 'set', nic, 'mtu', mtu] cmd = ['ip', 'link', 'set', nic, 'mtu', mtu]
subprocess.check_call(cmd) subprocess.check_call(cmd)
def get_nic_mtu(nic): def get_nic_mtu(nic):
"""Return the Maximum Transmission Unit (MTU) for a network interface."""
cmd = ['ip', 'addr', 'show', nic] cmd = ['ip', 'addr', 'show', nic]
ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n') ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n')
mtu = "" mtu = ""
@ -574,6 +572,7 @@ def get_nic_mtu(nic):
def get_nic_hwaddr(nic): def get_nic_hwaddr(nic):
"""Return the Media Access Control (MAC) for a network interface."""
cmd = ['ip', '-o', '-0', 'addr', 'show', nic] cmd = ['ip', '-o', '-0', 'addr', 'show', nic]
ip_output = subprocess.check_output(cmd).decode('UTF-8') ip_output = subprocess.check_output(cmd).decode('UTF-8')
hwaddr = "" hwaddr = ""
@ -584,7 +583,7 @@ def get_nic_hwaddr(nic):
def cmp_pkgrevno(package, revno, pkgcache=None): def cmp_pkgrevno(package, revno, pkgcache=None):
'''Compare supplied revno with the revno of the installed package """Compare supplied revno with the revno of the installed package
* 1 => Installed revno is greater than supplied arg * 1 => Installed revno is greater than supplied arg
* 0 => Installed revno is the same as supplied arg * 0 => Installed revno is the same as supplied arg
@ -593,7 +592,7 @@ def cmp_pkgrevno(package, revno, pkgcache=None):
This function imports apt_cache function from charmhelpers.fetch if This function imports apt_cache function from charmhelpers.fetch if
the pkgcache argument is None. Be sure to add charmhelpers.fetch if the pkgcache argument is None. Be sure to add charmhelpers.fetch if
you call this function, or pass an apt_pkg.Cache() instance. you call this function, or pass an apt_pkg.Cache() instance.
''' """
import apt_pkg import apt_pkg
if not pkgcache: if not pkgcache:
from charmhelpers.fetch import apt_cache from charmhelpers.fetch import apt_cache
@ -603,19 +602,27 @@ def cmp_pkgrevno(package, revno, pkgcache=None):
@contextmanager @contextmanager
def chdir(d): def chdir(directory):
"""Change the current working directory to a different directory for a code
block and return the previous directory after the block exits. Useful to
run commands from a specificed directory.
:param str directory: The directory path to change to for this context.
"""
cur = os.getcwd() cur = os.getcwd()
try: try:
yield os.chdir(d) yield os.chdir(directory)
finally: finally:
os.chdir(cur) os.chdir(cur)
def chownr(path, owner, group, follow_links=True, chowntopdir=False): def chownr(path, owner, group, follow_links=True, chowntopdir=False):
""" """Recursively change user and group ownership of files and directories
Recursively change user and group ownership of files and directories
in given path. Doesn't chown path itself by default, only its children. in given path. Doesn't chown path itself by default, only its children.
:param str path: The string path to start changing ownership.
:param str owner: The owner string to use when looking up the uid.
:param str group: The group string to use when looking up the gid.
:param bool follow_links: Also Chown links if True :param bool follow_links: Also Chown links if True
:param bool chowntopdir: Also chown path itself if True :param bool chowntopdir: Also chown path itself if True
""" """
@ -639,15 +646,23 @@ def chownr(path, owner, group, follow_links=True, chowntopdir=False):
def lchownr(path, owner, group): def lchownr(path, owner, group):
"""Recursively change user and group ownership of files and directories
in a given path, not following symbolic links. See the documentation for
'os.lchown' for more information.
:param str path: The string path to start changing ownership.
:param str owner: The owner string to use when looking up the uid.
:param str group: The group string to use when looking up the gid.
"""
chownr(path, owner, group, follow_links=False) chownr(path, owner, group, follow_links=False)
def get_total_ram(): def get_total_ram():
'''The total amount of system RAM in bytes. """The total amount of system RAM in bytes.
This is what is reported by the OS, and may be overcommitted when This is what is reported by the OS, and may be overcommitted when
there are multiple containers hosted on the same machine. there are multiple containers hosted on the same machine.
''' """
with open('/proc/meminfo', 'r') as f: with open('/proc/meminfo', 'r') as f:
for line in f.readlines(): for line in f.readlines():
if line: if line:

View File

@ -15,7 +15,7 @@
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
import os import os
from subprocess import check_call from subprocess import check_call, CalledProcessError
from charmhelpers.fetch import ( from charmhelpers.fetch import (
BaseFetchHandler, BaseFetchHandler,
UnhandledSource, UnhandledSource,
@ -63,6 +63,8 @@ class GitUrlFetchHandler(BaseFetchHandler):
branch_name) branch_name)
try: try:
self.clone(source, dest_dir, branch, depth) self.clone(source, dest_dir, branch, depth)
except CalledProcessError as e:
raise UnhandledSource(e)
except OSError as e: except OSError as e:
raise UnhandledSource(e.strerror) raise UnhandledSource(e.strerror)
return dest_dir return dest_dir

View File

@ -0,0 +1,34 @@
# kilo
###############################################################################
# [ WARNING ]
# Configuration file maintained by Juju. Local changes may be overwritten.
###############################################################################
[DEFAULT]
logdir=/var/log/nova
state_path=/var/lib/nova
root_helper=sudo nova-rootwrap /etc/nova/rootwrap.conf
verbose= {{ verbose }}
use_syslog = {{ use_syslog }}
api_paste_config=/etc/nova/api-paste.ini
enabled_apis=metadata
multi_host=True
# Access to neutron API services
network_api_class=nova.network.neutronv2.api.API
{% include "section-zeromq" %}
[neutron]
url={{ quantum_url }}
auth_plugin=password
project_name={{ service_tenant }}
username={{ service_username }}
password={{ service_password }}
auth_url={{ service_protocol }}://{{ keystone_host }}:{{ service_port }}
region={{ region }}
service_metadata_proxy=True
metadata_proxy_shared_secret={{ shared_secret }}
{% include "section-rabbitmq-oslo" %}
[oslo_concurrency]
lock_path=/var/lock/nova

View File

@ -121,11 +121,12 @@ class OpenStackAmuletDeployment(AmuletDeployment):
# Charms which should use the source config option # Charms which should use the source config option
use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph', use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph',
'ceph-osd', 'ceph-radosgw'] 'ceph-osd', 'ceph-radosgw', 'ceph-mon']
# Charms which can not use openstack-origin, ie. many subordinates # Charms which can not use openstack-origin, ie. many subordinates
no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe', no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe',
'openvswitch-odl', 'neutron-api-odl', 'odl-controller'] 'openvswitch-odl', 'neutron-api-odl', 'odl-controller',
'cinder-backup']
if self.openstack: if self.openstack:
for svc in services: for svc in services: