Remove deploy from source support

Drop support for deployment from Git repositories, as deprecated
in the 17.02 charm release.  This feature is unmaintained and has
no known users.

Change-Id: Ib954ddd1fb63d409af77949d8e76a6d6da8f2cde
This commit is contained in:
James Page 2018-01-10 12:01:08 +00:00
parent 46faae4ff8
commit fe9633856b
29 changed files with 225 additions and 1389 deletions

View File

@ -1,5 +1,3 @@
git-reinstall:
description: Reinstall neutron-openvswitch from the openstack-origin-git repositories.
pause:
description: Pause the neutron-openvswitch unit. This action will stop neutron-openvswitch services.
resume:

View File

@ -1,60 +0,0 @@
#!/usr/bin/env python3
#
# Copyright 2016 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import traceback
sys.path.append('hooks/')
from charmhelpers.contrib.openstack.utils import (
git_install_requested,
)
from charmhelpers.core.hookenv import (
action_set,
action_fail,
config,
)
from neutron_ovs_utils import (
git_install,
)
from neutron_ovs_hooks import (
config_changed,
)
def git_reinstall():
"""Reinstall from source and restart services.
If the openstack-origin-git config option was used to install openstack
from source git repositories, then this action can be used to reinstall
from updated git repositories, followed by a restart of services."""
if not git_install_requested():
action_fail('openstack-origin-git is not configured')
return
try:
git_install(config('openstack-origin-git'))
config_changed()
except:
action_set({'traceback': traceback.format_exc()})
action_fail('git-reinstall resulted in an unexpected error')
if __name__ == '__main__':
git_reinstall()

View File

@ -12,40 +12,6 @@ options:
default: False
description: |
Setting this to True will allow supporting services to log to syslog.
openstack-origin-git:
type: string
default:
description: |
Specifies a default OpenStack release name, or a YAML dictionary
listing the git repositories to install from.
.
The default Openstack release name may be one of the following, where
the corresponding OpenStack github branch will be used:
* liberty
* mitaka
* newton
* master
.
The YAML must minimally include requirements, neutron-fwaas,
neutron-lbaas, neutron-vpnaas, and neutron repositories, and may
also include repositories for other dependencies:
repositories:
- {name: requirements,
repository: 'git://github.com/openstack/requirements',
branch: master}
- {name: neutron-fwaas,
repository: 'git://github.com/openstack/neutron-fwaas',
branch: master}
- {name: neutron-lbaas,
repository: 'git://github.com/openstack/neutron-lbaas',
branch: master}
- {name: neutron-vpnaas,
repository: 'git://github.com/openstack/neutron-vpnaas',
branch: master}
- {name: neutron,
repository: 'git://github.com/openstack/neutron',
branch: master}
release: master
rabbit-user:
type: string
default: neutron

View File

@ -858,9 +858,12 @@ class OpenStackAmuletUtils(AmuletUtils):
:returns: List of pool name, object count, kb disk space used
"""
df = self.get_ceph_df(sentry_unit)
pool_name = df['pools'][pool_id]['name']
obj_count = df['pools'][pool_id]['stats']['objects']
kb_used = df['pools'][pool_id]['stats']['kb_used']
for pool in df['pools']:
if pool['id'] == pool_id:
pool_name = pool['name']
obj_count = pool['stats']['objects']
kb_used = pool['stats']['kb_used']
self.log.debug('Ceph {} pool (ID {}): {} objects, '
'{} kb used'.format(pool_name, pool_id,
obj_count, kb_used))

View File

@ -97,10 +97,10 @@ from charmhelpers.contrib.network.ip import (
from charmhelpers.contrib.openstack.utils import (
config_flags_parser,
get_host_ip,
git_determine_usr_bin,
git_determine_python_path,
enable_memcache,
snap_install_requested,
CompareOpenStackReleases,
os_release,
)
from charmhelpers.core.unitdata import kv
@ -332,10 +332,7 @@ class IdentityServiceContext(OSContextGenerator):
self.rel_name = rel_name
self.interfaces = [self.rel_name]
def __call__(self):
log('Generating template context for ' + self.rel_name, level=DEBUG)
ctxt = {}
def _setup_pki_cache(self):
if self.service and self.service_user:
# This is required for pki token signing if we don't want /tmp to
# be used.
@ -345,6 +342,15 @@ class IdentityServiceContext(OSContextGenerator):
mkdir(path=cachedir, owner=self.service_user,
group=self.service_user, perms=0o700)
return cachedir
return None
def __call__(self):
log('Generating template context for ' + self.rel_name, level=DEBUG)
ctxt = {}
cachedir = self._setup_pki_cache()
if cachedir:
ctxt['signing_dir'] = cachedir
for rid in relation_ids(self.rel_name):
@ -383,6 +389,62 @@ class IdentityServiceContext(OSContextGenerator):
return {}
class IdentityCredentialsContext(IdentityServiceContext):
'''Context for identity-credentials interface type'''
def __init__(self,
service=None,
service_user=None,
rel_name='identity-credentials'):
super(IdentityCredentialsContext, self).__init__(service,
service_user,
rel_name)
def __call__(self):
log('Generating template context for ' + self.rel_name, level=DEBUG)
ctxt = {}
cachedir = self._setup_pki_cache()
if cachedir:
ctxt['signing_dir'] = cachedir
for rid in relation_ids(self.rel_name):
self.related = True
for unit in related_units(rid):
rdata = relation_get(rid=rid, unit=unit)
credentials_host = rdata.get('credentials_host')
credentials_host = (
format_ipv6_addr(credentials_host) or credentials_host
)
auth_host = rdata.get('auth_host')
auth_host = format_ipv6_addr(auth_host) or auth_host
svc_protocol = rdata.get('credentials_protocol') or 'http'
auth_protocol = rdata.get('auth_protocol') or 'http'
api_version = rdata.get('api_version') or '2.0'
ctxt.update({
'service_port': rdata.get('credentials_port'),
'service_host': credentials_host,
'auth_host': auth_host,
'auth_port': rdata.get('auth_port'),
'admin_tenant_name': rdata.get('credentials_project'),
'admin_tenant_id': rdata.get('credentials_project_id'),
'admin_user': rdata.get('credentials_username'),
'admin_password': rdata.get('credentials_password'),
'service_protocol': svc_protocol,
'auth_protocol': auth_protocol,
'api_version': api_version
})
if float(api_version) > 2:
ctxt.update({'admin_domain_name':
rdata.get('domain')})
if self.context_complete(ctxt):
return ctxt
return {}
class AMQPContext(OSContextGenerator):
def __init__(self, ssl_dir=None, rel_name='amqp', relation_prefix=None):
@ -1321,8 +1383,6 @@ class WSGIWorkerConfigContext(WorkerConfigContext):
"public_processes": int(math.ceil(self.public_process_weight *
total_processes)),
"threads": 1,
"usr_bin": git_determine_usr_bin(),
"python_path": git_determine_python_path(),
}
return ctxt
@ -1566,8 +1626,18 @@ class InternalEndpointContext(OSContextGenerator):
endpoints by default so this allows admins to optionally use internal
endpoints.
"""
def __init__(self, ost_rel_check_pkg_name):
self.ost_rel_check_pkg_name = ost_rel_check_pkg_name
def __call__(self):
return {'use_internal_endpoints': config('use-internal-endpoints')}
ctxt = {'use_internal_endpoints': config('use-internal-endpoints')}
rel = os_release(self.ost_rel_check_pkg_name, base='icehouse')
if CompareOpenStackReleases(rel) >= 'pike':
ctxt['volume_api_version'] = '3'
else:
ctxt['volume_api_version'] = '2'
return ctxt
class AppArmorContext(OSContextGenerator):

View File

@ -17,22 +17,22 @@ defaults
{%- if haproxy_queue_timeout %}
timeout queue {{ haproxy_queue_timeout }}
{%- else %}
timeout queue 5000
timeout queue 9000
{%- endif %}
{%- if haproxy_connect_timeout %}
timeout connect {{ haproxy_connect_timeout }}
{%- else %}
timeout connect 5000
timeout connect 9000
{%- endif %}
{%- if haproxy_client_timeout %}
timeout client {{ haproxy_client_timeout }}
{%- else %}
timeout client 30000
timeout client 90000
{%- endif %}
{%- if haproxy_server_timeout %}
timeout server {{ haproxy_server_timeout }}
{%- else %}
timeout server 30000
timeout server 90000
{%- endif %}
listen stats

View File

@ -23,7 +23,6 @@ import sys
import re
import itertools
import functools
import shutil
import six
import traceback
@ -47,7 +46,6 @@ from charmhelpers.core.hookenv import (
related_units,
relation_ids,
relation_set,
service_name,
status_set,
hook_name,
application_version_set,
@ -68,11 +66,6 @@ from charmhelpers.contrib.network.ip import (
port_has_listener,
)
from charmhelpers.contrib.python.packages import (
pip_create_virtualenv,
pip_install,
)
from charmhelpers.core.host import (
lsb_release,
mounts,
@ -84,7 +77,6 @@ from charmhelpers.core.host import (
)
from charmhelpers.fetch import (
apt_cache,
install_remote,
import_key as fetch_import_key,
add_source as fetch_add_source,
SourceConfigError,
@ -278,27 +270,6 @@ PACKAGE_CODENAMES = {
]),
}
GIT_DEFAULT_REPOS = {
'requirements': 'git://github.com/openstack/requirements',
'cinder': 'git://github.com/openstack/cinder',
'glance': 'git://github.com/openstack/glance',
'horizon': 'git://github.com/openstack/horizon',
'keystone': 'git://github.com/openstack/keystone',
'networking-hyperv': 'git://github.com/openstack/networking-hyperv',
'neutron': 'git://github.com/openstack/neutron',
'neutron-fwaas': 'git://github.com/openstack/neutron-fwaas',
'neutron-lbaas': 'git://github.com/openstack/neutron-lbaas',
'neutron-vpnaas': 'git://github.com/openstack/neutron-vpnaas',
'nova': 'git://github.com/openstack/nova',
}
GIT_DEFAULT_BRANCHES = {
'liberty': 'stable/liberty',
'mitaka': 'stable/mitaka',
'newton': 'stable/newton',
'master': 'master',
}
DEFAULT_LOOPBACK_SIZE = '5G'
@ -392,6 +363,8 @@ def get_swift_codename(version):
releases = UBUNTU_OPENSTACK_RELEASE
release = [k for k, v in six.iteritems(releases) if codename in v]
ret = subprocess.check_output(['apt-cache', 'policy', 'swift'])
if six.PY3:
ret = ret.decode('UTF-8')
if codename in ret or release[0] in ret:
return codename
elif len(codenames) == 1:
@ -528,7 +501,6 @@ def os_release(package, base='essex', reset_cache=False):
if _os_rel:
return _os_rel
_os_rel = (
git_os_codename_install_source(config('openstack-origin-git')) or
get_os_codename_package(package, fatal=False) or
get_os_codename_install_source(config('openstack-origin')) or
base)
@ -769,417 +741,6 @@ def os_requires_version(ostack_release, pkg):
return wrap
def git_install_requested():
"""
Returns true if openstack-origin-git is specified.
"""
return config('openstack-origin-git') is not None
def git_os_codename_install_source(projects_yaml):
"""
Returns OpenStack codename of release being installed from source.
"""
if git_install_requested():
projects = _git_yaml_load(projects_yaml)
if projects in GIT_DEFAULT_BRANCHES.keys():
if projects == 'master':
return 'ocata'
return projects
if 'release' in projects:
if projects['release'] == 'master':
return 'ocata'
return projects['release']
return None
def git_default_repos(projects_yaml):
"""
Returns default repos if a default openstack-origin-git value is specified.
"""
service = service_name()
core_project = service
for default, branch in six.iteritems(GIT_DEFAULT_BRANCHES):
if projects_yaml == default:
# add the requirements repo first
repo = {
'name': 'requirements',
'repository': GIT_DEFAULT_REPOS['requirements'],
'branch': branch,
}
repos = [repo]
# neutron-* and nova-* charms require some additional repos
if service in ['neutron-api', 'neutron-gateway',
'neutron-openvswitch']:
core_project = 'neutron'
if service == 'neutron-api':
repo = {
'name': 'networking-hyperv',
'repository': GIT_DEFAULT_REPOS['networking-hyperv'],
'branch': branch,
}
repos.append(repo)
for project in ['neutron-fwaas', 'neutron-lbaas',
'neutron-vpnaas', 'nova']:
repo = {
'name': project,
'repository': GIT_DEFAULT_REPOS[project],
'branch': branch,
}
repos.append(repo)
elif service in ['nova-cloud-controller', 'nova-compute']:
core_project = 'nova'
repo = {
'name': 'neutron',
'repository': GIT_DEFAULT_REPOS['neutron'],
'branch': branch,
}
repos.append(repo)
elif service == 'openstack-dashboard':
core_project = 'horizon'
# finally add the current service's core project repo
repo = {
'name': core_project,
'repository': GIT_DEFAULT_REPOS[core_project],
'branch': branch,
}
repos.append(repo)
return yaml.dump(dict(repositories=repos, release=default))
return projects_yaml
def _git_yaml_load(projects_yaml):
"""
Load the specified yaml into a dictionary.
"""
if not projects_yaml:
return None
return yaml.load(projects_yaml)
requirements_dir = None
def git_clone_and_install(projects_yaml, core_project):
"""
Clone/install all specified OpenStack repositories.
The expected format of projects_yaml is:
repositories:
- {name: keystone,
repository: 'git://git.openstack.org/openstack/keystone.git',
branch: 'stable/icehouse'}
- {name: requirements,
repository: 'git://git.openstack.org/openstack/requirements.git',
branch: 'stable/icehouse'}
directory: /mnt/openstack-git
http_proxy: squid-proxy-url
https_proxy: squid-proxy-url
The directory, http_proxy, and https_proxy keys are optional.
"""
global requirements_dir
parent_dir = '/mnt/openstack-git'
http_proxy = None
projects = _git_yaml_load(projects_yaml)
_git_validate_projects_yaml(projects, core_project)
old_environ = dict(os.environ)
if 'http_proxy' in projects.keys():
http_proxy = projects['http_proxy']
os.environ['http_proxy'] = projects['http_proxy']
if 'https_proxy' in projects.keys():
os.environ['https_proxy'] = projects['https_proxy']
if 'directory' in projects.keys():
parent_dir = projects['directory']
pip_create_virtualenv(os.path.join(parent_dir, 'venv'))
# Upgrade setuptools and pip from default virtualenv versions. The default
# versions in trusty break master OpenStack branch deployments.
for p in ['pip', 'setuptools']:
pip_install(p, upgrade=True, proxy=http_proxy,
venv=os.path.join(parent_dir, 'venv'))
constraints = None
for p in projects['repositories']:
repo = p['repository']
branch = p['branch']
depth = '1'
if 'depth' in p.keys():
depth = p['depth']
if p['name'] == 'requirements':
repo_dir = _git_clone_and_install_single(repo, branch, depth,
parent_dir, http_proxy,
update_requirements=False)
requirements_dir = repo_dir
constraints = os.path.join(repo_dir, "upper-constraints.txt")
# upper-constraints didn't exist until after icehouse
if not os.path.isfile(constraints):
constraints = None
# use constraints unless project yaml sets use_constraints to false
if 'use_constraints' in projects.keys():
if not projects['use_constraints']:
constraints = None
else:
repo_dir = _git_clone_and_install_single(repo, branch, depth,
parent_dir, http_proxy,
update_requirements=True,
constraints=constraints)
os.environ = old_environ
def _git_validate_projects_yaml(projects, core_project):
"""
Validate the projects yaml.
"""
_git_ensure_key_exists('repositories', projects)
for project in projects['repositories']:
_git_ensure_key_exists('name', project.keys())
_git_ensure_key_exists('repository', project.keys())
_git_ensure_key_exists('branch', project.keys())
if projects['repositories'][0]['name'] != 'requirements':
error_out('{} git repo must be specified first'.format('requirements'))
if projects['repositories'][-1]['name'] != core_project:
error_out('{} git repo must be specified last'.format(core_project))
_git_ensure_key_exists('release', projects)
def _git_ensure_key_exists(key, keys):
"""
Ensure that key exists in keys.
"""
if key not in keys:
error_out('openstack-origin-git key \'{}\' is missing'.format(key))
def _git_clone_and_install_single(repo, branch, depth, parent_dir, http_proxy,
update_requirements, constraints=None):
"""
Clone and install a single git repository.
"""
if not os.path.exists(parent_dir):
juju_log('Directory already exists at {}. '
'No need to create directory.'.format(parent_dir))
os.mkdir(parent_dir)
juju_log('Cloning git repo: {}, branch: {}'.format(repo, branch))
repo_dir = install_remote(
repo, dest=parent_dir, branch=branch, depth=depth)
venv = os.path.join(parent_dir, 'venv')
if update_requirements:
if not requirements_dir:
error_out('requirements repo must be cloned before '
'updating from global requirements.')
_git_update_requirements(venv, repo_dir, requirements_dir)
juju_log('Installing git repo from dir: {}'.format(repo_dir))
if http_proxy:
pip_install(repo_dir, proxy=http_proxy, venv=venv,
constraints=constraints)
else:
pip_install(repo_dir, venv=venv, constraints=constraints)
return repo_dir
def _git_update_requirements(venv, package_dir, reqs_dir):
"""
Update from global requirements.
Update an OpenStack git directory's requirements.txt and
test-requirements.txt from global-requirements.txt.
"""
orig_dir = os.getcwd()
os.chdir(reqs_dir)
python = os.path.join(venv, 'bin/python')
cmd = [python, 'update.py', package_dir]
try:
subprocess.check_call(cmd)
except subprocess.CalledProcessError:
package = os.path.basename(package_dir)
error_out("Error updating {} from "
"global-requirements.txt".format(package))
os.chdir(orig_dir)
def git_pip_venv_dir(projects_yaml):
"""
Return the pip virtualenv path.
"""
parent_dir = '/mnt/openstack-git'
projects = _git_yaml_load(projects_yaml)
if 'directory' in projects.keys():
parent_dir = projects['directory']
return os.path.join(parent_dir, 'venv')
def git_src_dir(projects_yaml, project):
"""
Return the directory where the specified project's source is located.
"""
parent_dir = '/mnt/openstack-git'
projects = _git_yaml_load(projects_yaml)
if 'directory' in projects.keys():
parent_dir = projects['directory']
for p in projects['repositories']:
if p['name'] == project:
return os.path.join(parent_dir, os.path.basename(p['repository']))
return None
def git_yaml_value(projects_yaml, key):
"""
Return the value in projects_yaml for the specified key.
"""
projects = _git_yaml_load(projects_yaml)
if key in projects.keys():
return projects[key]
return None
def git_generate_systemd_init_files(templates_dir):
"""
Generate systemd init files.
Generates and installs systemd init units and script files based on the
*.init.in files contained in the templates_dir directory.
This code is based on the openstack-pkg-tools package and its init
script generation, which is used by the OpenStack packages.
"""
for f in os.listdir(templates_dir):
# Create the init script and systemd unit file from the template
if f.endswith(".init.in"):
init_in_file = f
init_file = f[:-8]
service_file = "{}.service".format(init_file)
init_in_source = os.path.join(templates_dir, init_in_file)
init_source = os.path.join(templates_dir, init_file)
service_source = os.path.join(templates_dir, service_file)
init_dest = os.path.join('/etc/init.d', init_file)
service_dest = os.path.join('/lib/systemd/system', service_file)
shutil.copyfile(init_in_source, init_source)
with open(init_source, 'a') as outfile:
template = ('/usr/share/openstack-pkg-tools/'
'init-script-template')
with open(template) as infile:
outfile.write('\n\n{}'.format(infile.read()))
cmd = ['pkgos-gen-systemd-unit', init_in_source]
subprocess.check_call(cmd)
if os.path.exists(init_dest):
os.remove(init_dest)
if os.path.exists(service_dest):
os.remove(service_dest)
shutil.copyfile(init_source, init_dest)
shutil.copyfile(service_source, service_dest)
os.chmod(init_dest, 0o755)
for f in os.listdir(templates_dir):
# If there's a service.in file, use it instead of the generated one
if f.endswith(".service.in"):
service_in_file = f
service_file = f[:-3]
service_in_source = os.path.join(templates_dir, service_in_file)
service_source = os.path.join(templates_dir, service_file)
service_dest = os.path.join('/lib/systemd/system', service_file)
shutil.copyfile(service_in_source, service_source)
if os.path.exists(service_dest):
os.remove(service_dest)
shutil.copyfile(service_source, service_dest)
for f in os.listdir(templates_dir):
# Generate the systemd unit if there's no existing .service.in
if f.endswith(".init.in"):
init_in_file = f
init_file = f[:-8]
service_in_file = "{}.service.in".format(init_file)
service_file = "{}.service".format(init_file)
init_in_source = os.path.join(templates_dir, init_in_file)
service_in_source = os.path.join(templates_dir, service_in_file)
service_source = os.path.join(templates_dir, service_file)
service_dest = os.path.join('/lib/systemd/system', service_file)
if not os.path.exists(service_in_source):
cmd = ['pkgos-gen-systemd-unit', init_in_source]
subprocess.check_call(cmd)
if os.path.exists(service_dest):
os.remove(service_dest)
shutil.copyfile(service_source, service_dest)
def git_determine_usr_bin():
"""Return the /usr/bin path for Apache2 config.
The /usr/bin path will be located in the virtualenv if the charm
is configured to deploy from source.
"""
if git_install_requested():
projects_yaml = config('openstack-origin-git')
projects_yaml = git_default_repos(projects_yaml)
return os.path.join(git_pip_venv_dir(projects_yaml), 'bin')
else:
return '/usr/bin'
def git_determine_python_path():
"""Return the python-path for Apache2 config.
Returns 'None' unless the charm is configured to deploy from source,
in which case the path of the virtualenv's site-packages is returned.
"""
if git_install_requested():
projects_yaml = config('openstack-origin-git')
projects_yaml = git_default_repos(projects_yaml)
return os.path.join(git_pip_venv_dir(projects_yaml),
'lib/python2.7/site-packages')
else:
return None
def os_workload_status(configs, required_interfaces, charm_func=None):
"""
Decorator to set workload status based on complete contexts
@ -1613,27 +1174,24 @@ def do_action_openstack_upgrade(package, upgrade_callback, configs):
"""
ret = False
if git_install_requested():
action_set({'outcome': 'installed from source, skipped upgrade.'})
else:
if openstack_upgrade_available(package):
if config('action-managed-upgrade'):
juju_log('Upgrading OpenStack release')
if openstack_upgrade_available(package):
if config('action-managed-upgrade'):
juju_log('Upgrading OpenStack release')
try:
upgrade_callback(configs=configs)
action_set({'outcome': 'success, upgrade completed.'})
ret = True
except Exception:
action_set({'outcome': 'upgrade failed, see traceback.'})
action_set({'traceback': traceback.format_exc()})
action_fail('do_openstack_upgrade resulted in an '
'unexpected error')
else:
action_set({'outcome': 'action-managed-upgrade config is '
'False, skipped upgrade.'})
try:
upgrade_callback(configs=configs)
action_set({'outcome': 'success, upgrade completed.'})
ret = True
except Exception:
action_set({'outcome': 'upgrade failed, see traceback.'})
action_set({'traceback': traceback.format_exc()})
action_fail('do_openstack_upgrade resulted in an '
'unexpected error')
else:
action_set({'outcome': 'no upgrade available.'})
action_set({'outcome': 'action-managed-upgrade config is '
'False, skipped upgrade.'})
else:
action_set({'outcome': 'no upgrade available.'})
return ret
@ -2043,14 +1601,25 @@ def token_cache_pkgs(source=None, release=None):
def update_json_file(filename, items):
"""Updates the json `filename` with a given dict.
:param filename: json filename (i.e.: /etc/glance/policy.json)
:param filename: path to json file (e.g. /etc/glance/policy.json)
:param items: dict of items to update
"""
if not items:
return
with open(filename) as fd:
policy = json.load(fd)
# Compare before and after and if nothing has changed don't write the file
# since that could cause unnecessary service restarts.
before = json.dumps(policy, indent=4, sort_keys=True)
policy.update(items)
after = json.dumps(policy, indent=4, sort_keys=True)
if before == after:
return
with open(filename, "w") as fd:
fd.write(json.dumps(policy, indent=4))
fd.write(after)
@cached

View File

@ -113,7 +113,7 @@ def validator(value, valid_type, valid_range=None):
assert isinstance(valid_range, list), \
"valid_range must be a list, was given {}".format(valid_range)
# If we're dealing with strings
if valid_type is six.string_types:
if isinstance(value, six.string_types):
assert value in valid_range, \
"{} is not in the list {}".format(value, valid_range)
# Integer, float should have a min and max
@ -377,12 +377,12 @@ def get_mon_map(service):
try:
return json.loads(mon_status)
except ValueError as v:
log("Unable to parse mon_status json: {}. Error: {}".format(
mon_status, v.message))
log("Unable to parse mon_status json: {}. Error: {}"
.format(mon_status, str(v)))
raise
except CalledProcessError as e:
log("mon_status command failed with message: {}".format(
e.message))
log("mon_status command failed with message: {}"
.format(str(e)))
raise
@ -517,7 +517,8 @@ def pool_set(service, pool_name, key, value):
:param value:
:return: None. Can raise CalledProcessError
"""
cmd = ['ceph', '--id', service, 'osd', 'pool', 'set', pool_name, key, value]
cmd = ['ceph', '--id', service, 'osd', 'pool', 'set', pool_name, key,
str(value).lower()]
try:
check_call(cmd)
except CalledProcessError:
@ -621,16 +622,24 @@ def create_erasure_profile(service, profile_name, erasure_plugin_name='jerasure'
:param durability_estimator: int
:return: None. Can raise CalledProcessError
"""
version = ceph_version()
# Ensure this failure_domain is allowed by Ceph
validator(failure_domain, six.string_types,
['chassis', 'datacenter', 'host', 'osd', 'pdu', 'pod', 'rack', 'region', 'room', 'root', 'row'])
cmd = ['ceph', '--id', service, 'osd', 'erasure-code-profile', 'set', profile_name,
'plugin=' + erasure_plugin_name, 'k=' + str(data_chunks), 'm=' + str(coding_chunks),
'ruleset_failure_domain=' + failure_domain]
'plugin=' + erasure_plugin_name, 'k=' + str(data_chunks), 'm=' + str(coding_chunks)
]
if locality is not None and durability_estimator is not None:
raise ValueError("create_erasure_profile should be called with k, m and one of l or c but not both.")
# failure_domain changed in luminous
if version and version >= '12.0.0':
cmd.append('crush-failure-domain=' + failure_domain)
else:
cmd.append('ruleset-failure-domain=' + failure_domain)
# Add plugin specific information
if locality is not None:
# For local erasure codes
@ -1064,14 +1073,24 @@ class CephBrokerRq(object):
self.ops = []
def add_op_request_access_to_group(self, name, namespace=None,
permission=None, key_name=None):
permission=None, key_name=None,
object_prefix_permissions=None):
"""
Adds the requested permissions to the current service's Ceph key,
allowing the key to access only the specified pools
allowing the key to access only the specified pools or
object prefixes. object_prefix_permissions should be a dictionary
keyed on the permission with the corresponding value being a list
of prefixes to apply that permission to.
{
'rwx': ['prefix1', 'prefix2'],
'class-read': ['prefix3']}
"""
self.ops.append({'op': 'add-permissions-to-key', 'group': name,
'namespace': namespace, 'name': key_name or service_name(),
'group-permission': permission})
self.ops.append({
'op': 'add-permissions-to-key', 'group': name,
'namespace': namespace,
'name': key_name or service_name(),
'group-permission': permission,
'object-prefix-permissions': object_prefix_permissions})
def add_op_create_pool(self, name, replica_count=3, pg_num=None,
weight=None, group=None, namespace=None):
@ -1107,7 +1126,10 @@ class CephBrokerRq(object):
def _ops_equal(self, other):
if len(self.ops) == len(other.ops):
for req_no in range(0, len(self.ops)):
for key in ['replicas', 'name', 'op', 'pg_num', 'weight']:
for key in [
'replicas', 'name', 'op', 'pg_num', 'weight',
'group', 'group-namespace', 'group-permission',
'object-prefix-permissions']:
if self.ops[req_no].get(key) != other.ops[req_no].get(key):
return False
else:

View File

@ -12,6 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
from subprocess import (
CalledProcessError,
check_call,
@ -101,3 +102,52 @@ def create_lvm_volume_group(volume_group, block_device):
:block_device: str: Full path of PV-initialized block device.
'''
check_call(['vgcreate', volume_group, block_device])
def list_logical_volumes(select_criteria=None, path_mode=False):
'''
List logical volumes
:param select_criteria: str: Limit list to those volumes matching this
criteria (see 'lvs -S help' for more details)
:param path_mode: bool: return logical volume name in 'vg/lv' format, this
format is required for some commands like lvextend
:returns: [str]: List of logical volumes
'''
lv_diplay_attr = 'lv_name'
if path_mode:
# Parsing output logic relies on the column order
lv_diplay_attr = 'vg_name,' + lv_diplay_attr
cmd = ['lvs', '--options', lv_diplay_attr, '--noheadings']
if select_criteria:
cmd.extend(['--select', select_criteria])
lvs = []
for lv in check_output(cmd).decode('UTF-8').splitlines():
if not lv:
continue
if path_mode:
lvs.append('/'.join(lv.strip().split()))
else:
lvs.append(lv.strip())
return lvs
list_thin_logical_volume_pools = functools.partial(
list_logical_volumes,
select_criteria='lv_attr =~ ^t')
list_thin_logical_volumes = functools.partial(
list_logical_volumes,
select_criteria='lv_attr =~ ^V')
def extend_logical_volume_by_device(lv_name, block_device):
'''
Extends the size of logical volume lv_name by the amount of free space on
physical volume block_device.
:param lv_name: str: name of logical volume to be extended (vg/lv format)
:param block_device: str: name of block_device to be allocated to lv_name
'''
cmd = ['lvextend', lv_name, block_device]
check_call(cmd)

View File

@ -20,6 +20,7 @@ UBUNTU_RELEASES = (
'yakkety',
'zesty',
'artful',
'bionic',
)

View File

@ -19,8 +19,6 @@ import sys
from copy import deepcopy
from charmhelpers.contrib.openstack.utils import (
config_value_changed,
git_install_requested,
pausable_restart_on_change as restart_on_change,
)
@ -40,7 +38,6 @@ from neutron_ovs_utils import (
OVS_DEFAULT,
configure_ovs,
configure_sriov,
git_install,
get_shared_secret,
register_configs,
restart_map,
@ -59,7 +56,6 @@ CONFIGS = register_configs()
@hooks.hook()
def install():
install_packages()
git_install(config('openstack-origin-git'))
# NOTE(wolsen): Do NOT add restart_on_change decorator without consideration
@ -88,9 +84,6 @@ def upgrade_charm():
@restart_on_change(restart_map())
def config_changed():
install_packages()
if git_install_requested():
if config_value_changed('openstack-origin-git'):
git_install(config('openstack-origin-git'))
configure_ovs()
CONFIGS.write_all()

View File

@ -13,7 +13,6 @@
# limitations under the License.
import os
import shutil
from itertools import chain
import subprocess
@ -22,12 +21,6 @@ from copy import deepcopy
from charmhelpers.contrib.openstack import context, templating
from charmhelpers.contrib.openstack.utils import (
git_clone_and_install,
git_default_repos,
git_generate_systemd_init_files,
git_install_requested,
git_pip_venv_dir,
git_src_dir,
pause_unit,
resume_unit,
make_assess_status_func,
@ -47,7 +40,6 @@ from charmhelpers.contrib.network.ovs import (
full_restart,
)
from charmhelpers.core.hookenv import (
charm_dir,
config,
status_set,
log,
@ -63,20 +55,13 @@ from charmhelpers.contrib.openstack.context import (
WorkerConfigContext,
)
from charmhelpers.core.host import (
adduser,
add_group,
add_user_to_group,
lsb_release,
mkdir,
service,
service_restart,
service_running,
write_file,
CompareHostReleases,
)
from charmhelpers.core.templating import render
from charmhelpers.fetch import (
apt_install,
apt_purge,
@ -96,33 +81,6 @@ REQUIRED_INTERFACES = {
'messaging': ['amqp', 'zeromq-configuration'],
}
BASE_GIT_PACKAGES = [
'ipset',
'libffi-dev',
'libssl-dev',
'libxml2-dev',
'libxslt1-dev',
'libyaml-dev',
'openstack-pkg-tools',
'openvswitch-switch',
'python-dev',
'python-pip',
'python-setuptools',
'zlib1g-dev',
]
# ubuntu packages that should not be installed when deploying from git
GIT_PACKAGE_BLACKLIST = [
'neutron-l3-agent',
'neutron-metadata-agent',
'neutron-server',
'neutron-plugin-openvswitch',
'neutron-plugin-openvswitch-agent',
'neutron-openvswitch',
'neutron-openvswitch-agent',
'neutron-openvswitch-agent',
]
VERSION_PACKAGE = 'neutron-common'
NOVA_CONF_DIR = "/etc/nova"
NEUTRON_DHCP_AGENT_CONF = "/etc/neutron/dhcp_agent.ini"
@ -289,13 +247,6 @@ def determine_packages():
pkgs.extend(DHCP_PACKAGES)
pkgs.extend(METADATA_PACKAGES)
if git_install_requested():
pkgs.extend(BASE_GIT_PACKAGES)
# don't include packages that will be installed from git
for p in GIT_PACKAGE_BLACKLIST:
if p in pkgs:
pkgs.remove(p)
cmp_release = CompareOpenStackReleases(
os_release('neutron-common', base='icehouse'))
if cmp_release >= 'mitaka' and 'neutron-plugin-openvswitch-agent' in pkgs:
@ -617,134 +568,6 @@ def enable_local_dhcp():
return config('enable-local-dhcp-and-metadata')
def git_install(projects_yaml):
"""Perform setup, and install git repos specified in yaml parameter."""
status_set('maintenance', 'running git install')
if git_install_requested():
git_pre_install()
projects_yaml = git_default_repos(projects_yaml)
git_clone_and_install(projects_yaml, core_project='neutron')
git_post_install(projects_yaml)
def git_pre_install():
"""Perform pre-install setup."""
dirs = [
'/var/lib/neutron',
'/var/lib/neutron/lock',
'/var/log/neutron',
]
logs = [
'/var/log/neutron/server.log',
]
adduser('neutron', shell='/bin/bash', system_user=True)
add_group('neutron', system_group=True)
add_user_to_group('neutron', 'neutron')
for d in dirs:
mkdir(d, owner='neutron', group='neutron', perms=0o755, force=False)
for l in logs:
write_file(l, '', owner='neutron', group='neutron', perms=0o600)
def git_post_install(projects_yaml):
"""Perform post-install setup."""
src_etc = os.path.join(git_src_dir(projects_yaml, 'neutron'), 'etc')
configs = [
{'src': src_etc,
'dest': '/etc/neutron'},
{'src': os.path.join(src_etc, 'neutron/plugins'),
'dest': '/etc/neutron/plugins'},
{'src': os.path.join(src_etc, 'neutron/rootwrap.d'),
'dest': '/etc/neutron/rootwrap.d'},
]
for c in configs:
if os.path.exists(c['dest']):
shutil.rmtree(c['dest'])
shutil.copytree(c['src'], c['dest'])
# NOTE(coreycb): Need to find better solution than bin symlinks.
symlinks = [
{'src': os.path.join(git_pip_venv_dir(projects_yaml),
'bin/neutron-rootwrap'),
'link': '/usr/local/bin/neutron-rootwrap'},
]
for s in symlinks:
if os.path.lexists(s['link']):
os.remove(s['link'])
os.symlink(s['src'], s['link'])
render('git/neutron_sudoers', '/etc/sudoers.d/neutron_sudoers', {},
perms=0o440)
bin_dir = os.path.join(git_pip_venv_dir(projects_yaml), 'bin')
cmp_os_release = CompareOpenStackReleases(os_release('neutron-common'))
# Use systemd init units/scripts from ubuntu wily onward
_release = lsb_release()['DISTRIB_CODENAME']
if CompareHostReleases(_release) >= 'wily':
templates_dir = os.path.join(charm_dir(), 'templates/git')
daemons = ['neutron-openvswitch-agent', 'neutron-ovs-cleanup']
for daemon in daemons:
neutron_ovs_context = {
'daemon_path': os.path.join(bin_dir, daemon),
}
filename = daemon
if daemon == 'neutron-openvswitch-agent':
if cmp_os_release < 'mitaka':
filename = 'neutron-plugin-openvswitch-agent'
template_file = 'git/{}.init.in.template'.format(filename)
init_in_file = '{}.init.in'.format(filename)
render(template_file, os.path.join(templates_dir, init_in_file),
neutron_ovs_context, perms=0o644)
git_generate_systemd_init_files(templates_dir)
for daemon in daemons:
filename = daemon
if daemon == 'neutron-openvswitch-agent':
if cmp_os_release < 'mitaka':
filename = 'neutron-plugin-openvswitch-agent'
service('enable', filename)
else:
neutron_ovs_agent_context = {
'service_description': 'Neutron OpenvSwitch Plugin Agent',
'charm_name': 'neutron-openvswitch',
'process_name': 'neutron-openvswitch-agent',
'executable_name': os.path.join(bin_dir,
'neutron-openvswitch-agent'),
'cleanup_process_name': 'neutron-ovs-cleanup',
'plugin_config': '/etc/neutron/plugins/ml2/ml2_conf.ini',
'log_file': '/var/log/neutron/openvswitch-agent.log',
}
neutron_ovs_cleanup_context = {
'service_description': 'Neutron OpenvSwitch Cleanup',
'charm_name': 'neutron-openvswitch',
'process_name': 'neutron-ovs-cleanup',
'executable_name': os.path.join(bin_dir, 'neutron-ovs-cleanup'),
'log_file': '/var/log/neutron/ovs-cleanup.log',
}
if cmp_os_release < 'mitaka':
render('git/upstart/neutron-plugin-openvswitch-agent.upstart',
'/etc/init/neutron-plugin-openvswitch-agent.conf',
neutron_ovs_agent_context, perms=0o644)
else:
render('git/upstart/neutron-plugin-openvswitch-agent.upstart',
'/etc/init/neutron-openvswitch-agent.conf',
neutron_ovs_agent_context, perms=0o644)
render('git/upstart/neutron-ovs-cleanup.upstart',
'/etc/init/neutron-ovs-cleanup.conf',
neutron_ovs_cleanup_context, perms=0o644)
if not is_unit_paused_set():
service_restart('neutron-plugin-openvswitch-agent')
def assess_status(configs):
"""Assess status of current unit
Decides what the state of the unit should be based on the current

View File

@ -1,20 +0,0 @@
#!/bin/sh
### BEGIN INIT INFO
# Provides: neutron-openvswitch-agent
# Required-Start: $network $local_fs $remote_fs $syslog
# Required-Stop: $remote_fs openvswitch-switch
# Should-Start: mysql postgresql rabbitmq-server keystone neutron-ovs-cleanup
# Should-Stop: mysql postgresql rabbitmq-server keystone
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: Neutron Open vSwitch Agent
# Description: Open vSwitch agent for OpenStack Neutron ML2 plugin
### END INIT INFO