Add systemd init support for deploy from source

systemd is used instead of upstart by default since Ubuntu 15.10
(Wily).  This adds systemd init file support for nova services
that are deployed from source.

Change-Id: I7d031e86853a3fb8b91501dc6bbd7f5f1b67701d
This commit is contained in:
Corey Bryant 2016-06-23 20:40:35 +00:00
parent 76f4183dff
commit faaf51e7ba
15 changed files with 384 additions and 99 deletions

View File

@ -19,14 +19,8 @@ options:
Specifies a default OpenStack release name, or a YAML dictionary
listing the git repositories to install from.
When openstack-origin-git is specified, openstack-specific
packages will be installed from source rather than from the
the nova-compute charm's openstack-origin repository.
The default Openstack release name may be one of the following, where
the corresponding OpenStack github branch will be used:
* icehouse
* kilo
* liberty
* mitaka
* master
@ -50,10 +44,7 @@ options:
- {name: neutron,
repository: 'git://github.com/openstack/neutron',
branch: master}
Note that the installed config files will be determined based on
the OpenStack release of the nova-compute charm's openstack-origin
option.
release: master
rabbit-user:
default: neutron
type: string

View File

@ -71,7 +71,7 @@ class OpenStackAmuletDeployment(AmuletDeployment):
base_charms = {
'mysql': ['precise', 'trusty'],
'mongodb': ['precise', 'trusty'],
'nrpe': ['precise', 'trusty'],
'nrpe': ['precise', 'trusty', 'wily', 'xenial'],
}
for svc in other_services:
@ -112,7 +112,7 @@ class OpenStackAmuletDeployment(AmuletDeployment):
# Charms which should use the source config option
use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph',
'ceph-osd', 'ceph-radosgw', 'ceph-mon']
'ceph-osd', 'ceph-radosgw', 'ceph-mon', 'ceph-proxy']
# Charms which can not use openstack-origin, ie. many subordinates
no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe',

View File

@ -57,6 +57,7 @@ from charmhelpers.core.host import (
mkdir,
write_file,
pwgen,
lsb_release,
)
from charmhelpers.contrib.hahelpers.cluster import (
determine_apache_port,
@ -1195,7 +1196,10 @@ class WorkerConfigContext(OSContextGenerator):
def __call__(self):
multiplier = config('worker-multiplier') or 0
ctxt = {"workers": self.num_cpus * multiplier}
count = int(self.num_cpus * multiplier)
if multiplier > 0 and count == 0:
count = 1
ctxt = {"workers": count}
return ctxt
@ -1436,7 +1440,8 @@ class AppArmorContext(OSContextGenerator):
:return ctxt: Dictionary of the apparmor profile or None
"""
if config('aa-profile-mode') in ['disable', 'enforce', 'complain']:
ctxt = {'aa_profile_mode': config('aa-profile-mode')}
ctxt = {'aa_profile_mode': config('aa-profile-mode'),
'ubuntu_release': lsb_release()['DISTRIB_RELEASE']}
else:
ctxt = None
return ctxt

View File

@ -220,7 +220,6 @@ GIT_DEFAULT_REPOS = {
}
GIT_DEFAULT_BRANCHES = {
'kilo': 'stable/kilo',
'liberty': 'stable/liberty',
'mitaka': 'stable/mitaka',
'master': 'master',
@ -413,7 +412,8 @@ def os_release(package, base='essex'):
global os_rel
if os_rel:
return os_rel
os_rel = (get_os_codename_package(package, fatal=False) or
os_rel = (git_os_codename_install_source(config('openstack-origin-git')) or
get_os_codename_package(package, fatal=False) or
get_os_codename_install_source(config('openstack-origin')) or
base)
return os_rel
@ -719,7 +719,24 @@ def git_install_requested():
return config('openstack-origin-git') is not None
requirements_dir = None
def git_os_codename_install_source(projects_yaml):
"""
Returns OpenStack codename of release being installed from source.
"""
if git_install_requested():
projects = _git_yaml_load(projects_yaml)
if projects in GIT_DEFAULT_BRANCHES.keys():
if projects == 'master':
return 'newton'
return projects
if 'release' in projects:
if projects['release'] == 'master':
return 'newton'
return projects['release']
return None
def git_default_repos(projects_yaml):
@ -740,12 +757,6 @@ def git_default_repos(projects_yaml):
}
repos = [repo]
# NOTE(coreycb): This is a temp work-around until the requirements
# repo moves from stable/kilo branch to kilo-eol tag. The core
# repos have already done this.
if default == 'kilo':
branch = 'kilo-eol'
# neutron-* and nova-* charms require some additional repos
if service in ['neutron-api', 'neutron-gateway',
'neutron-openvswitch']:
@ -778,7 +789,7 @@ def git_default_repos(projects_yaml):
}
repos.append(repo)
return yaml.dump(dict(repositories=repos))
return yaml.dump(dict(repositories=repos, release=default))
return projects_yaml
@ -793,6 +804,9 @@ def _git_yaml_load(projects_yaml):
return yaml.load(projects_yaml)
requirements_dir = None
def git_clone_and_install(projects_yaml, core_project):
"""
Clone/install all specified OpenStack repositories.
@ -856,6 +870,10 @@ def git_clone_and_install(projects_yaml, core_project):
# upper-constraints didn't exist until after icehouse
if not os.path.isfile(constraints):
constraints = None
# use constraints unless project yaml sets use_constraints to false
if 'use_constraints' in projects.keys():
if not projects['use_constraints']:
constraints = None
else:
repo_dir = _git_clone_and_install_single(repo, branch, depth,
parent_dir, http_proxy,
@ -882,6 +900,8 @@ def _git_validate_projects_yaml(projects, core_project):
if projects['repositories'][-1]['name'] != core_project:
error_out('{} git repo must be specified last'.format(core_project))
_git_ensure_key_exists('release', projects)
def _git_ensure_key_exists(key, keys):
"""

View File

@ -21,9 +21,10 @@
# James Page <james.page@ubuntu.com>
# Adam Gandelman <adamg@ubuntu.com>
#
import bisect
import errno
import hashlib
import math
import six
import os
@ -76,8 +77,16 @@ log to syslog = {use_syslog}
err to syslog = {use_syslog}
clog to syslog = {use_syslog}
"""
# For 50 < osds < 240,000 OSDs (Roughly 1 Exabyte at 6T OSDs)
powers_of_two = [8192, 16384, 32768, 65536, 131072, 262144, 524288, 1048576, 2097152, 4194304, 8388608]
# The number of placement groups per OSD to target for placement group
# calculations. This number is chosen as 100 due to the ceph PG Calc
# documentation recommending to choose 100 for clusters which are not
# expected to increase in the foreseeable future. Since the majority of the
# calculations are done on deployment, target the case of non-expanding
# clusters as the default.
DEFAULT_PGS_PER_OSD_TARGET = 100
DEFAULT_POOL_WEIGHT = 10.0
LEGACY_PG_COUNT = 200
def validator(value, valid_type, valid_range=None):
@ -184,42 +193,106 @@ class Pool(object):
check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove-overlay', self.name])
check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', self.name, cache_pool])
def get_pgs(self, pool_size):
"""
:param pool_size: int. pool_size is either the number of replicas for replicated pools or the K+M sum for
erasure coded pools
def get_pgs(self, pool_size, percent_data=DEFAULT_POOL_WEIGHT):
"""Return the number of placement groups to use when creating the pool.
Returns the number of placement groups which should be specified when
creating the pool. This is based upon the calculation guidelines
provided by the Ceph Placement Group Calculator (located online at
http://ceph.com/pgcalc/).
The number of placement groups are calculated using the following:
(Target PGs per OSD) * (OSD #) * (%Data)
----------------------------------------
(Pool size)
Per the upstream guidelines, the OSD # should really be considered
based on the number of OSDs which are eligible to be selected by the
pool. Since the pool creation doesn't specify any of CRUSH set rules,
the default rule will be dependent upon the type of pool being
created (replicated or erasure).
This code makes no attempt to determine the number of OSDs which can be
selected for the specific rule, rather it is left to the user to tune
in the form of 'expected-osd-count' config option.
:param pool_size: int. pool_size is either the number of replicas for
replicated pools or the K+M sum for erasure coded pools
:param percent_data: float. the percentage of data that is expected to
be contained in the pool for the specific OSD set. Default value
is to assume 10% of the data is for this pool, which is a
relatively low % of the data but allows for the pg_num to be
increased. NOTE: the default is primarily to handle the scenario
where related charms requiring pools has not been upgraded to
include an update to indicate their relative usage of the pools.
:return: int. The number of pgs to use.
"""
# Note: This calculation follows the approach that is provided
# by the Ceph PG Calculator located at http://ceph.com/pgcalc/.
validator(value=pool_size, valid_type=int)
# Ensure that percent data is set to something - even with a default
# it can be set to None, which would wreak havoc below.
if percent_data is None:
percent_data = DEFAULT_POOL_WEIGHT
# If the expected-osd-count is specified, then use the max between
# the expected-osd-count and the actual osd_count
osd_list = get_osds(self.service)
if not osd_list:
expected = config('expected-osd-count') or 0
if osd_list:
osd_count = max(expected, len(osd_list))
# Log a message to provide some insight if the calculations claim
# to be off because someone is setting the expected count and
# there are more OSDs in reality. Try to make a proper guess
# based upon the cluster itself.
if expected and osd_count != expected:
log("Found more OSDs than provided expected count. "
"Using the actual count instead", INFO)
elif expected:
# Use the expected-osd-count in older ceph versions to allow for
# a more accurate pg calculations
osd_count = expected
else:
# NOTE(james-page): Default to 200 for older ceph versions
# which don't support OSD query from cli
return 200
return LEGACY_PG_COUNT
osd_list_length = len(osd_list)
# Calculate based on Ceph best practices
if osd_list_length < 5:
return 128
elif 5 < osd_list_length < 10:
return 512
elif 10 < osd_list_length < 50:
return 4096
percent_data /= 100.0
target_pgs_per_osd = config('pgs-per-osd') or DEFAULT_PGS_PER_OSD_TARGET
num_pg = (target_pgs_per_osd * osd_count * percent_data) // pool_size
# The CRUSH algorithm has a slight optimization for placement groups
# with powers of 2 so find the nearest power of 2. If the nearest
# power of 2 is more than 25% below the original value, the next
# highest value is used. To do this, find the nearest power of 2 such
# that 2^n <= num_pg, check to see if its within the 25% tolerance.
exponent = math.floor(math.log(num_pg, 2))
nearest = 2 ** exponent
if (num_pg - nearest) > (num_pg * 0.25):
# Choose the next highest power of 2 since the nearest is more
# than 25% below the original value.
return int(nearest * 2)
else:
estimate = (osd_list_length * 100) / pool_size
# Return the next nearest power of 2
index = bisect.bisect_right(powers_of_two, estimate)
return powers_of_two[index]
return int(nearest)
class ReplicatedPool(Pool):
def __init__(self, service, name, pg_num=None, replicas=2):
def __init__(self, service, name, pg_num=None, replicas=2,
percent_data=10.0):
super(ReplicatedPool, self).__init__(service=service, name=name)
self.replicas = replicas
if pg_num is None:
self.pg_num = self.get_pgs(self.replicas)
if pg_num:
# Since the number of placement groups were specified, ensure
# that there aren't too many created.
max_pgs = self.get_pgs(self.replicas, 100.0)
self.pg_num = min(pg_num, max_pgs)
else:
self.pg_num = pg_num
self.pg_num = self.get_pgs(self.replicas, percent_data)
def create(self):
if not pool_exists(self.service, self.name):
@ -238,30 +311,39 @@ class ReplicatedPool(Pool):
# Default jerasure erasure coded pool
class ErasurePool(Pool):
def __init__(self, service, name, erasure_code_profile="default"):
def __init__(self, service, name, erasure_code_profile="default",
percent_data=10.0):
super(ErasurePool, self).__init__(service=service, name=name)
self.erasure_code_profile = erasure_code_profile
self.percent_data = percent_data
def create(self):
if not pool_exists(self.service, self.name):
# Try to find the erasure profile information so we can properly size the pgs
erasure_profile = get_erasure_profile(service=self.service, name=self.erasure_code_profile)
# Try to find the erasure profile information in order to properly
# size the number of placement groups. The size of an erasure
# coded placement group is calculated as k+m.
erasure_profile = get_erasure_profile(self.service,
self.erasure_code_profile)
# Check for errors
if erasure_profile is None:
log(message='Failed to discover erasure_profile named={}'.format(self.erasure_code_profile),
level=ERROR)
raise PoolCreationError(message='unable to find erasure profile {}'.format(self.erasure_code_profile))
msg = ("Failed to discover erasure profile named "
"{}".format(self.erasure_code_profile))
log(msg, level=ERROR)
raise PoolCreationError(msg)
if 'k' not in erasure_profile or 'm' not in erasure_profile:
# Error
log(message='Unable to find k (data chunks) or m (coding chunks) in {}'.format(erasure_profile),
level=ERROR)
raise PoolCreationError(
message='unable to find k (data chunks) or m (coding chunks) in {}'.format(erasure_profile))
msg = ("Unable to find k (data chunks) or m (coding chunks) "
"in erasure profile {}".format(erasure_profile))
log(msg, level=ERROR)
raise PoolCreationError(msg)
pgs = self.get_pgs(int(erasure_profile['k']) + int(erasure_profile['m']))
k = int(erasure_profile['k'])
m = int(erasure_profile['m'])
pgs = self.get_pgs(k + m, self.percent_data)
# Create it
cmd = ['ceph', '--id', self.service, 'osd', 'pool', 'create', self.name, str(pgs), str(pgs),
cmd = ['ceph', '--id', self.service, 'osd', 'pool', 'create',
self.name, str(pgs), str(pgs),
'erasure', self.erasure_code_profile]
try:
check_call(cmd)
@ -955,16 +1037,22 @@ class CephBrokerRq(object):
self.request_id = str(uuid.uuid1())
self.ops = []
def add_op_create_pool(self, name, replica_count=3, pg_num=None):
def add_op_create_pool(self, name, replica_count=3, pg_num=None,
weight=None):
"""Adds an operation to create a pool.
@param pg_num setting: optional setting. If not provided, this value
will be calculated by the broker based on how many OSDs are in the
cluster at the time of creation. Note that, if provided, this value
will be capped at the current available maximum.
@param weight: the percentage of data the pool makes up
"""
if pg_num and weight:
raise ValueError('pg_num and weight are mutually exclusive')
self.ops.append({'op': 'create-pool', 'name': name,
'replicas': replica_count, 'pg_num': pg_num})
'replicas': replica_count, 'pg_num': pg_num,
'weight': weight})
def set_ops(self, ops):
"""Set request ops to provided value.
@ -982,7 +1070,7 @@ class CephBrokerRq(object):
def _ops_equal(self, other):
if len(self.ops) == len(other.ops):
for req_no in range(0, len(self.ops)):
for key in ['replicas', 'name', 'op', 'pg_num']:
for key in ['replicas', 'name', 'op', 'pg_num', 'weight']:
if self.ops[req_no].get(key) != other.ops[req_no].get(key):
return False
else:

View File

@ -174,7 +174,7 @@ def init_is_systemd():
def adduser(username, password=None, shell='/bin/bash', system_user=False,
primary_group=None, secondary_groups=None, uid=None):
primary_group=None, secondary_groups=None, uid=None, home_dir=None):
"""Add a user to the system.
Will log but otherwise succeed if the user already exists.
@ -186,6 +186,7 @@ def adduser(username, password=None, shell='/bin/bash', system_user=False,
:param str primary_group: Primary group for user; defaults to username
:param list secondary_groups: Optional list of additional groups
:param int uid: UID for user being created
:param str home_dir: Home directory for user
:returns: The password database entry struct, as returned by `pwd.getpwnam`
"""
@ -200,6 +201,8 @@ def adduser(username, password=None, shell='/bin/bash', system_user=False,
cmd = ['useradd']
if uid:
cmd.extend(['--uid', str(uid)])
if home_dir:
cmd.extend(['--home', str(home_dir)])
if system_user or password is None:
cmd.append('--system')
else:

View File

@ -22,11 +22,12 @@ from copy import deepcopy
from charmhelpers.contrib.openstack import context, templating
from charmhelpers.contrib.openstack.utils import (
git_install_requested,
git_clone_and_install,
git_default_repos,
git_src_dir,
git_generate_systemd_init_files,
git_install_requested,
git_pip_venv_dir,
git_src_dir,
pause_unit,
resume_unit,
make_assess_status_func,
@ -43,6 +44,7 @@ from charmhelpers.contrib.network.ovs import (
full_restart,
)
from charmhelpers.core.hookenv import (
charm_dir,
config,
status_set,
)
@ -59,7 +61,9 @@ from charmhelpers.core.host import (
adduser,
add_group,
add_user_to_group,
lsb_release,
mkdir,
service,
service_restart,
service_running,
write_file,
@ -89,6 +93,7 @@ BASE_GIT_PACKAGES = [
'libxml2-dev',
'libxslt1-dev',
'libyaml-dev',
'openstack-pkg-tools',
'openvswitch-switch',
'python-dev',
'python-pip',
@ -103,6 +108,8 @@ GIT_PACKAGE_BLACKLIST = [
'neutron-server',
'neutron-plugin-openvswitch',
'neutron-plugin-openvswitch-agent',
'neutron-openvswitch',
'neutron-openvswitch-agent',
'neutron-openvswitch-agent',
]
@ -505,31 +512,61 @@ def git_post_install(projects_yaml):
perms=0o440)
bin_dir = os.path.join(git_pip_venv_dir(projects_yaml), 'bin')
neutron_ovs_agent_context = {
'service_description': 'Neutron OpenvSwitch Plugin Agent',
'charm_name': 'neutron-openvswitch',
'process_name': 'neutron-openvswitch-agent',
'executable_name': os.path.join(bin_dir, 'neutron-openvswitch-agent'),
'cleanup_process_name': 'neutron-ovs-cleanup',
'plugin_config': '/etc/neutron/plugins/ml2/ml2_conf.ini',
'log_file': '/var/log/neutron/openvswitch-agent.log',
}
# Use systemd init units/scripts from ubuntu wily onward
if lsb_release()['DISTRIB_RELEASE'] >= '15.10':
templates_dir = os.path.join(charm_dir(), 'templates/git')
daemons = ['neutron-openvswitch-agent', 'neutron-ovs-cleanup']
for daemon in daemons:
neutron_ovs_context = {
'daemon_path': os.path.join(bin_dir, daemon),
}
filename = daemon
if daemon == 'neutron-openvswitch-agent':
if os_release('neutron-common') < 'mitaka':
filename = 'neutron-plugin-openvswitch-agent'
template_file = 'git/{}.init.in.template'.format(filename)
init_in_file = '{}.init.in'.format(filename)
render(template_file, os.path.join(templates_dir, init_in_file),
neutron_ovs_context, perms=0o644)
git_generate_systemd_init_files(templates_dir)
neutron_ovs_cleanup_context = {
'service_description': 'Neutron OpenvSwitch Cleanup',
'charm_name': 'neutron-openvswitch',
'process_name': 'neutron-ovs-cleanup',
'executable_name': os.path.join(bin_dir, 'neutron-ovs-cleanup'),
'log_file': '/var/log/neutron/ovs-cleanup.log',
}
for daemon in daemons:
filename = daemon
if daemon == 'neutron-openvswitch-agent':
if os_release('neutron-common') < 'mitaka':
filename = 'neutron-plugin-openvswitch-agent'
service('enable', filename)
else:
neutron_ovs_agent_context = {
'service_description': 'Neutron OpenvSwitch Plugin Agent',
'charm_name': 'neutron-openvswitch',
'process_name': 'neutron-openvswitch-agent',
'executable_name': os.path.join(bin_dir,
'neutron-openvswitch-agent'),
'cleanup_process_name': 'neutron-ovs-cleanup',
'plugin_config': '/etc/neutron/plugins/ml2/ml2_conf.ini',
'log_file': '/var/log/neutron/openvswitch-agent.log',
}
# NOTE(coreycb): Needs systemd support
render('git/upstart/neutron-plugin-openvswitch-agent.upstart',
'/etc/init/neutron-plugin-openvswitch-agent.conf',
neutron_ovs_agent_context, perms=0o644)
render('git/upstart/neutron-ovs-cleanup.upstart',
'/etc/init/neutron-ovs-cleanup.conf',
neutron_ovs_cleanup_context, perms=0o644)
neutron_ovs_cleanup_context = {
'service_description': 'Neutron OpenvSwitch Cleanup',
'charm_name': 'neutron-openvswitch',
'process_name': 'neutron-ovs-cleanup',
'executable_name': os.path.join(bin_dir, 'neutron-ovs-cleanup'),
'log_file': '/var/log/neutron/ovs-cleanup.log',
}
if os_release('neutron-common') < 'mitaka':
render('git/upstart/neutron-plugin-openvswitch-agent.upstart',
'/etc/init/neutron-plugin-openvswitch-agent.conf',
neutron_ovs_agent_context, perms=0o644)
else:
render('git/upstart/neutron-plugin-openvswitch-agent.upstart',
'/etc/init/neutron-openvswitch-agent.conf',
neutron_ovs_agent_context, perms=0o644)
render('git/upstart/neutron-ovs-cleanup.upstart',
'/etc/init/neutron-ovs-cleanup.conf',
neutron_ovs_cleanup_context, perms=0o644)
if not is_unit_paused_set():
service_restart('neutron-plugin-openvswitch-agent')

View File

@ -0,0 +1,20 @@
#!/bin/sh
### BEGIN INIT INFO
# Provides: neutron-openvswitch-agent
# Required-Start: $network $local_fs $remote_fs $syslog
# Required-Stop: $remote_fs openvswitch-switch
# Should-Start: mysql postgresql rabbitmq-server keystone neutron-ovs-cleanup
# Should-Stop: mysql postgresql rabbitmq-server keystone
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: Neutron Open vSwitch Agent
# Description: Open vSwitch agent for OpenStack Neutron ML2 plugin
### END INIT INFO
# Authors: Julien Danjou <acid@debian.org>, Thomas Goirand <zigo@debian.org>
DESC="Openstack Neutron Open vSwitch Agent"
PROJECT_NAME=neutron
NAME=${PROJECT_NAME}-openvswitch-agent
DAEMON={{ daemon_path }}
DAEMON_ARGS="--config-file=/etc/neutron/plugins/ml2/openvswitch_agent.ini"

View File

@ -0,0 +1,19 @@
[Unit]
Description=Openstack Neutron Open vSwitch Plugin Agent
After=mysql.service postgresql.service rabbitmq-server.service keystone.service
Requires=neutron-ovs-cleanup.service
[Service]
User=neutron
Group=neutron
WorkingDirectory=/var/lib/neutron
PermissionsStartOnly=true
ExecStartPre=/bin/mkdir -p /var/lock/neutron /var/log/neutron /var/lib/neutron
ExecStartPre=/bin/chown neutron:neutron /var/lock/neutron /var/log/neutron /var/lib/neutron
ExecStart=/etc/init.d/neutron-openvswitch-agent systemd-start
Restart=on-failure
LimitNOFILE=65535
[Install]
WantedBy=multi-user.target

View File

@ -0,0 +1,19 @@
#!/bin/sh
### BEGIN INIT INFO
# Provides: neutron-ovs-cleanup
# Required-Start: $network $local_fs $remote_fs $syslog openvswitch-switch
# Required-Stop: $remote_fs
# Should-Start:
# Should-Stop:
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: Neutron OVS Cleanup
# Description: OpenvSwitch Cleanup for OpenStack Neutron
### END INIT INFO
# Authors: James Page <james.page@ubuntu.com>
DESC="OpenStack Neutron OVS cleanup"
PROJECT_NAME=neutron
NAME=${PROJECT_NAME}-ovs-cleanup
DAEMON={{ daemon_path }}

View File

@ -0,0 +1,17 @@
[Unit]
Description=OpenStack Neutron OVS cleanup
[Service]
type=oneshot
User=neutron
Group=neutron
WorkingDirectory=/var/lib/neutron
PermissionsStartOnly=true
ExecStartPre=/bin/mkdir -p /var/lock/neutron /var/log/neutron /var/lib/neutron
ExecStartPre=/bin/chown neutron:neutron /var/lock/neutron /var/log/neutron /var/lib/neutron
ExecStart=/etc/init.d/neutron-ovs-cleanup systemd-start
RemainAfterExit=yes
[Install]
WantedBy=multi-user.target

View File

@ -0,0 +1,20 @@
#!/bin/sh
### BEGIN INIT INFO
# Provides: neutron-plugin-openvswitch-agent
# Required-Start: $network $local_fs $remote_fs $syslog
# Required-Stop: $remote_fs openvswitch-switch
# Should-Start: mysql postgresql rabbitmq-server keystone neutron-ovs-cleanup
# Should-Stop: mysql postgresql rabbitmq-server keystone
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: Neutron Open vSwitch Agent
# Description: Open vSwitch agent for OpenStack Neutron ML2 plugin
### END INIT INFO
# Authors: Julien Danjou <acid@debian.org>, Thomas Goirand <zigo@debian.org>
DESC="Openstack Neutron Open vSwitch Agent"
PROJECT_NAME=neutron
NAME=${PROJECT_NAME}-plugin-openvswitch-agent
DAEMON={{ daemon_path }}
DAEMON_ARGS="--config-file=/etc/neutron/plugins/ml2/openvswitch_agent.ini"

View File

@ -0,0 +1,19 @@
[Unit]
Description=Openstack Neutron Open vSwitch Plugin Agent
After=mysql.service postgresql.service rabbitmq-server.service keystone.service
Requires=neutron-ovs-cleanup.service
[Service]
User=neutron
Group=neutron
WorkingDirectory=/var/lib/neutron
PermissionsStartOnly=true
ExecStartPre=/bin/mkdir -p /var/lock/neutron /var/log/neutron /var/lib/neutron
ExecStartPre=/bin/chown neutron:neutron /var/lock/neutron /var/log/neutron /var/lib/neutron
ExecStart=/etc/init.d/neutron-plugin-openvswitch-agent systemd-start
Restart=on-failure
LimitNOFILE=65535
[Install]
WantedBy=multi-user.target

View File

@ -71,7 +71,7 @@ class OpenStackAmuletDeployment(AmuletDeployment):
base_charms = {
'mysql': ['precise', 'trusty'],
'mongodb': ['precise', 'trusty'],
'nrpe': ['precise', 'trusty'],
'nrpe': ['precise', 'trusty', 'wily', 'xenial'],
}
for svc in other_services:
@ -112,7 +112,7 @@ class OpenStackAmuletDeployment(AmuletDeployment):
# Charms which should use the source config option
use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph',
'ceph-osd', 'ceph-radosgw', 'ceph-mon']
'ceph-osd', 'ceph-radosgw', 'ceph-mon', 'ceph-proxy']
# Charms which can not use openstack-origin, ie. many subordinates
no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe',

View File

@ -37,8 +37,12 @@ TO_PATCH = [
'config',
'os_release',
'filter_installed_packages',
'git_src_dir',
'lsb_release',
'neutron_plugin_attribute',
'full_restart',
'render',
'service',
'service_restart',
'service_running',
'ExternalPortContext',
@ -430,18 +434,17 @@ class TestNeutronOVSUtils(CharmTestCase):
]
self.assertEquals(write_file.call_args_list, expected)
@patch.object(nutils, 'git_src_dir')
@patch.object(nutils, 'service_restart')
@patch.object(nutils, 'render')
@patch('os.listdir')
@patch('os.path.join')
@patch('os.path.exists')
@patch('os.symlink')
@patch('shutil.copytree')
@patch('shutil.rmtree')
def test_git_post_install(self, rmtree, copytree, symlink, exists, join,
render, service_restart, git_src_dir):
def test_git_post_install_upstart(self, rmtree, copytree, symlink, exists,
join, listdir):
projects_yaml = openstack_origin_git
join.return_value = 'joined-string'
self.lsb_release.return_value = {'DISTRIB_RELEASE': '15.04'}
nutils.git_post_install(projects_yaml)
expected = [
call('joined-string', '/etc/neutron'),
@ -479,11 +482,35 @@ class TestNeutronOVSUtils(CharmTestCase):
'/etc/init/neutron-ovs-cleanup.conf',
neutron_ovs_cleanup_context, perms=0o644),
]
self.assertEquals(render.call_args_list, expected)
self.assertEquals(self.render.call_args_list, expected)
expected = [
call('neutron-plugin-openvswitch-agent'),
]
self.assertEquals(service_restart.call_args_list, expected)
self.assertEquals(self.service_restart.call_args_list, expected)
@patch('os.listdir')
@patch('os.path.join')
@patch('os.path.exists')
@patch('os.symlink')
@patch('shutil.copytree')
@patch('shutil.rmtree')
def test_git_post_install_systemd(self, rmtree, copytree, symlink, exists,
join, listdir):
projects_yaml = openstack_origin_git
join.return_value = 'joined-string'
self.lsb_release.return_value = {'DISTRIB_RELEASE': '15.10'}
nutils.git_post_install(projects_yaml)
expected = [
call('git/neutron_sudoers', '/etc/sudoers.d/neutron_sudoers',
{}, perms=288),
call('git/neutron-plugin-openvswitch-agent.init.in.template',
'joined-string', {'daemon_path': 'joined-string'},
perms=420),
call('git/neutron-ovs-cleanup.init.in.template',
'joined-string', {'daemon_path': 'joined-string'},
perms=420)
]
self.assertEquals(self.render.call_args_list, expected)
def test_assess_status(self):
with patch.object(nutils, 'assess_status_func') as asf: