Move to pkg deploy and tidy up

This commit is contained in:
Liam Young 2019-03-01 11:12:18 +00:00
parent 76dde64622
commit cc8be69c24
13 changed files with 139 additions and 1383 deletions

View File

@ -1,21 +1,13 @@
import collections
import socket
import subprocess
import tempfile
import charmhelpers.core.hookenv as hookenv
import charms_openstack.charm
import charms_openstack.ip as os_ip
import charmhelpers.contrib.openstack.utils as ch_os_utils
# import charms_openstack.sdn.odl as odl
# import charms_openstack.sdn.ovs as ovs
MASAKARI_WSGI_CONF = '/etc/apache2/sites-enabled/masakari-api.conf'
charms_openstack.charm.use_defaults('charm.default-select-release')
class MasakariCharm(charms_openstack.charm.HAOpenStackCharm):
# Internal name of charm
@ -25,14 +17,7 @@ class MasakariCharm(charms_openstack.charm.HAOpenStackCharm):
release = 'rocky'
# List of packages to install for this charm
packages = ['apache2', 'python-apt',
'cinder-common', 'python3-oslo.policy', 'python3-pymysql',
'python3-keystoneauth1', 'python3-oslo.db',
'python3-oslo.service', 'python3-oslo.middleware',
'python3-oslo.messaging', 'python3-oslo.versionedobjects',
'python3-novaclient', 'python3-keystonemiddleware',
'python3-taskflow', 'libapache2-mod-wsgi-py3',
'python3-microversion-parse']
packages = ['masakari-api', 'masakari-engine', 'python-apt']
api_ports = {
'masakari': {
@ -42,6 +27,7 @@ class MasakariCharm(charms_openstack.charm.HAOpenStackCharm):
}
}
group = 'masakari'
service_type = 'masakari'
default_service = 'masakari'
services = ['haproxy', 'apache2', 'masakari-engine']
@ -55,17 +41,18 @@ class MasakariCharm(charms_openstack.charm.HAOpenStackCharm):
ha_resources = ['vips', 'haproxy', 'dnsha']
release_pkg = 'cinder-common'
release_pkg = 'masakari'
package_codenames = {
'masakari-common': collections.OrderedDict([
('2', 'mitaka'),
('3', 'newton'),
('4', 'ocata'),
('5', 'pike'),
('6', 'rocky'),
]),
}
sync_cmd = ['masakari-manage', '--config-file',
'/etc/masakari/masakari.conf', 'db', 'sync']
@ -88,22 +75,3 @@ class MasakariCharm(charms_openstack.charm.HAOpenStackCharm):
@property
def internal_url(self):
return super().internal_url + "/v1/%(tenant_id)s"
# XXX THIS IS A TEMPORARY WORKAROUND AND SHOULD NOT BE INCLUDED IN
# ANY DEPLOYMENTS OTHER THAN POCs
def install(self):
super(MasakariCharm, self).install()
os_release = ch_os_utils.get_os_codename_package('cinder-common')
with tempfile.TemporaryDirectory() as tmpdirname:
git_dir = '{}/masakari'.format(tmpdirname)
subprocess.check_call([
'git', 'clone', '-b', 'stable/{}'.format(os_release),
'https://github.com/openstack/masakari.git', git_dir])
subprocess.check_call([
'sudo', 'python3', 'setup.py', 'install'], cwd=git_dir)
subprocess.check_call(['mkdir', '-p', '/var/lock/masakari', '/var/log/masakari', '/var/lib/masakari'])
subprocess.check_call(['cp', 'templates/masakari-engine.service', '/lib/systemd/system'])
subprocess.check_call(['cp', 'templates/wsgi.py', '/usr/local/lib/python3.6/dist-packages/masakari/api/openstack/wsgi.py'])
subprocess.check_call(['systemctl', 'daemon-reload'])
subprocess.check_call(['systemctl', 'start', 'masakari-engine'])
subprocess.check_call(['cp', 'templates/api-paste.ini', '/etc/masakari/'])

View File

@ -11,13 +11,10 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import subprocess
import charms_openstack.charm as charm
import charms.reactive as reactive
# This charm's library contains all of the handler code associated with
# sdn_charm
import charm.openstack.masakari as masakari # noqa
charm.use_defaults(
@ -29,6 +26,7 @@ charm.use_defaults(
'config.changed',
'update-status')
@reactive.when('shared-db.available')
@reactive.when('identity-service.available')
@reactive.when('amqp.available')
@ -40,15 +38,16 @@ def render_config(*args):
# charm_class.upgrade_if_available(args)
charm_class.render_with_interfaces(args)
charm_class.assess_status()
subprocess.check_call(['chgrp', '-R', 'ubuntu', '/etc/masakari'])
reactive.set_state('config.rendered')
# db_sync checks if sync has been done so rerunning is a noop
@reactive.when('config.rendered')
def init_db():
with charm.provide_charm_instance() as charm_class:
charm_class.db_sync()
@reactive.when('ha.connected')
def cluster_connected(hacluster):
"""Configure HA resources in corosync"""

View File

@ -1,45 +0,0 @@
[composite:masakari_api]
use = call:masakari.api.urlmap:urlmap_factory
/: apiversions
/v1: masakari_api_v1
[composite:masakari_api_v1]
use = call:masakari.api.auth:pipeline_factory_v1
keystone = cors http_proxy_to_wsgi request_id faultwrap sizelimit authtoken keystonecontext osapi_masakari_app_v1
# filters
[filter:cors]
paste.filter_factory = oslo_middleware.cors:filter_factory
oslo_config_project = masakari
[filter:http_proxy_to_wsgi]
paste.filter_factory = oslo_middleware.http_proxy_to_wsgi:HTTPProxyToWSGI.factory
[filter:request_id]
paste.filter_factory = oslo_middleware:RequestId.factory
[filter:faultwrap]
paste.filter_factory = masakari.api.openstack:FaultWrapper.factory
[filter:sizelimit]
paste.filter_factory = oslo_middleware:RequestBodySizeLimiter.factory
[filter:authtoken]
paste.filter_factory = keystonemiddleware.auth_token:filter_factory
[filter:keystonecontext]
paste.filter_factory = masakari.api.auth:MasakariKeystoneContext.factory
[filter:noauth2]
paste.filter_factory = masakari.api.auth:NoAuthMiddleware.factory
# apps
[app:osapi_masakari_app_v1]
paste.app_factory = masakari.api.openstack.ha:APIRouterV1.factory
[pipeline:apiversions]
pipeline = faultwrap http_proxy_to_wsgi apiversionsapp
[app:apiversionsapp]
paste.app_factory = masakari.api.openstack.ha.versions:Versions.factory

View File

@ -1,14 +1,37 @@
Listen {{ options.service_listen_info.masakari.public_port }}
<VirtualHost *:{{ options.service_listen_info.masakari.public_port }}>
WSGIDaemonProcess masakari-api user=ubuntu group=ubuntu processes={{ options.wsgi_worker_context.processes }} threads=10 display-name=%{GROUP}
WSGIProcessGroup masakari-api
WSGIScriptAlias / /usr/local/bin/masakari-wsgi
WSGIScriptAlias / /usr/bin/masakari-wsgi
WSGIDaemonProcess masakari processes={{ options.wsgi_worker_context.processes }} threads=1 user=masakari group=masakari display-name=%{GROUP}
WSGIProcessGroup masakari
WSGIApplicationGroup %{GLOBAL}
<Directory /usr/local/bin>
Require all granted
</Directory>
ErrorLogFormat "%{cu}t %M"
WSGIPassAuthorization On
LimitRequestBody 114688
<IfVersion >= 2.4>
ErrorLogFormat "%{cu}t %M"
</IfVersion>
ErrorLog /var/log/apache2/masakari_error.log
CustomLog /var/log/apache2/masakari_access.log combined
<Directory /usr/bin>
<IfVersion >= 2.4>
Require all granted
</IfVersion>
<IfVersion < 2.4>
Order allow,deny
Allow from all
</IfVersion>
</Directory>
</VirtualHost>
Alias /instance-ha /usr/bin/masakari-wsgi
<Location /instance-ha>
SetHandler wsgi-script
Options +ExecCGI
WSGIProcessGroup masakari
WSGIApplicationGroup %{GLOBAL}
WSGIPassAuthorization On
</Location>

View File

@ -1,25 +0,0 @@
[Unit]
Description=OpenStack Masakari Scheduler
After=postgresql.service mysql.service keystone.service rabbitmq-server.service ntp.service
[Service]
User=ubuntu
Group=ubuntu
Type=simple
WorkingDirectory=/var/lib/masakari
PermissionsStartOnly=true
ExecStartPre=/bin/mkdir -p /var/lock/masakari /var/log/masakari /var/lib/masakari /etc/masakari
ExecStartPre=/bin/chown ubuntu:ubuntu /var/lock/masakari /var/lib/masakari
ExecStartPre=/bin/chown ubuntu:adm /var/log/masakari
ExecStartPre=/bin/chgrp -R ubuntu /etc/masakari
ExecStart=/usr/local/bin/masakari-engine --log-file /var/log/masakari/masakari_engine.log
Restart=on-failure
LimitNOFILE=65535
TimeoutStopSec=15
[Install]
WantedBy=multi-user.target

File diff suppressed because it is too large Load Diff

View File

@ -146,7 +146,7 @@ applications:
series: bionic
num_units: 1
options:
openstack-origin: cloud:bionic-rocky
openstack-origin: cloud:bionic-rocky/proposed
ceph-mon:
charm: ceph-mon
num_units: 3

View File

@ -19,11 +19,8 @@
import logging
import tenacity
import time
import uuid
#from keystoneauth1.identity.generic import password as ks_password
from openstack import connection
from openstack import exceptions
import zaza.model
import zaza.charm_tests.test_utils as test_utils
@ -54,7 +51,8 @@ class MasakariTest(test_utils.OpenStackBaseTest):
region_name='RegionOne')
cls.masakari_client = conn.instance_ha
def launch_instance(self, instance_key, use_boot_volume=False, vm_name=None):
def launch_instance(self, instance_key, use_boot_volume=False,
vm_name=None):
"""Launch an instance.
:param instance_key: Key to collect associated config data with.
@ -71,13 +69,12 @@ class MasakariTest(test_utils.OpenStackBaseTest):
if use_boot_volume:
bdmv2 = [{
'boot_index': '0',
'uuid': image.id,
'source_type': 'image',
'volume_size': flavor.disk,
'destination_type': 'volume',
'delete_on_termination': True,
}]
'boot_index': '0',
'uuid': image.id,
'source_type': 'image',
'volume_size': flavor.disk,
'destination_type': 'volume',
'delete_on_termination': True}]
image = None
# Launch instance.
@ -122,98 +119,112 @@ class MasakariTest(test_utils.OpenStackBaseTest):
# password=None,
# privkey=openstack_utils.get_private_key(nova_utils.KEYPAIR_NAME))
def configure(self):
try:
self.masakari_client.create_segment(
name='seg1',
recovery_method='auto',
service_type='COMPUTE')
hypervisors = self.nova_client.hypervisors.list()
segment_ids = [s.uuid for s in self.masakari_client.segments()] * len(hypervisors)
for hypervisor in hypervisors:
target_segment = segment_ids.pop()
hostname = hypervisor.hypervisor_hostname.split('.')[0]
self.masakari_client.create_host(
name=hostname,
segment_id=target_segment,
recovery_method='auto',
control_attributes='SSH',
type='COMPUTE')
except:
pass
try:
self.masakari_client.create_segment(
name='seg1',
recovery_method='auto',
service_type='COMPUTE')
hypervisors = self.nova_client.hypervisors.list()
segment_ids = [s.uuid for s in self.masakari_client.segments()]
segment_ids = segment_ids * len(hypervisors)
for hypervisor in hypervisors:
target_segment = segment_ids.pop()
hostname = hypervisor.hypervisor_hostname.split('.')[0]
self.masakari_client.create_host(
name=hostname,
segment_id=target_segment,
recovery_method='auto',
control_attributes='SSH',
type='COMPUTE')
except:
pass
@tenacity.retry(wait=tenacity.wait_exponential(multiplier=1, max=60),
reraise=True, stop=tenacity.stop_after_attempt(80))
def wait_for_server_migration(self, vm_name, original_hypervisor):
server = self.nova_client.servers.find(name=vm_name)
current_hypervisor = getattr(server, 'OS-EXT-SRV-ATTR:host')
logging.info('{} is on {} in state {}'.format(vm_name, current_hypervisor, server.status))
assert (original_hypervisor != current_hypervisor and server.status == 'ACTIVE')
logging.info('SUCCESS {} has migrated to {}'.format(vm_name, current_hypervisor))
logging.info('{} is on {} in state {}'.format(
vm_name,
current_hypervisor,
server.status))
assert (original_hypervisor != current_hypervisor and
server.status == 'ACTIVE')
logging.info('SUCCESS {} has migrated to {}'.format(
vm_name,
current_hypervisor))
def svc_control(self, unit_name, action, services):
logging.info('{} {} on {}'.format(action.title(), services, unit_name))
cmds = []
for svc in services:
cmds.append("systemctl {} {}".format(action, svc))
zaza.model.run_on_unit(
unit_name, command=';'.join(cmds),
model_name=self.model_name)
logging.info('{} {} on {}'.format(action.title(), services, unit_name))
cmds = []
for svc in services:
cmds.append("systemctl {} {}".format(action, svc))
zaza.model.run_on_unit(
unit_name, command=';'.join(cmds),
model_name=self.model_name)
def enable_the_things(self):
logging.info("Enabling all the things")
# Start corosync et al
for u in zaza.model.get_units(application_name='nova-compute'):
self.svc_control(u.entity_id, 'start', ['corosync', 'pacemaker', 'nova-compute'])
logging.info("Enabling all the things")
# Start corosync et al
for u in zaza.model.get_units(application_name='nova-compute'):
self.svc_control(
u.entity_id,
'start',
['corosync', 'pacemaker', 'nova-compute'])
# Enable nova-compute in nova
for svc in self.nova_client.services.list():
if svc.status == 'disabled':
logging.info("Enabling {} on {}".format(svc.binary, svc.host))
self.nova_client.services.enable(svc.host, svc.binary)
# Enable nova-compute in masakari
for segment in self.masakari_client.segments():
for host in self.masakari_client.hosts(segment_id=segment.uuid):
if host.on_maintenance:
logging.info("Removing maintenance mode from masakari host {}".format(host.uuid))
self.masakari_client.update_host(
host.uuid,
segment_id=segment.uuid,
**{'on_maintenance': False})
# Enable nova-compute in nova
for svc in self.nova_client.services.list():
if svc.status == 'disabled':
logging.info("Enabling {} on {}".format(svc.binary, svc.host))
self.nova_client.services.enable(svc.host, svc.binary)
# Enable nova-compute in masakari
for segment in self.masakari_client.segments():
for host in self.masakari_client.hosts(segment_id=segment.uuid):
if host.on_maintenance:
logging.info("Removing maintenance mode from masakari "
"host {}".format(host.uuid))
self.masakari_client.update_host(
host.uuid,
segment_id=segment.uuid,
**{'on_maintenance': False})
def test_instance_failover(self):
self.configure()
# Launch guest
lts = 'bionic'
vm_name = 'zaza_test_instance_failover'
try:
server = self.nova_client.servers.find(name=vm_name)
logging.info('Found existing guest')
except:
logging.info('Launching new guest')
self.launch_instance('bionic', use_boot_volume=True, vm_name=vm_name)
server = self.nova_client.servers.find(name=vm_name)
logging.info('Finding hosting hypervisor')
server = self.nova_client.servers.find(name=vm_name)
current_hypervisor = getattr(server, 'OS-EXT-SRV-ATTR:host')
self.configure()
# Launch guest
vm_name = 'zaza_test_instance_failover'
try:
server = self.nova_client.servers.find(name=vm_name)
logging.info('Found existing guest')
except:
logging.info('Launching new guest')
self.launch_instance(
'bionic',
use_boot_volume=True,
vm_name=vm_name)
server = self.nova_client.servers.find(name=vm_name)
logging.info('Finding hosting hypervisor')
server = self.nova_client.servers.find(name=vm_name)
current_hypervisor = getattr(server, 'OS-EXT-SRV-ATTR:host')
# Simulate compute node shutdown
logging.info('Simulate compute node shutdown')
server = self.nova_client.servers.find(name=vm_name)
guest_hypervisor = getattr(server, 'OS-EXT-SRV-ATTR:host')
hypervisor_machine_number = guest_hypervisor.split('-')[-1]
unit_name = [u.entity_id
for u in zaza.model.get_units(application_name='nova-compute')
if u.data['machine-id'] == hypervisor_machine_number][0]
logging.info('Simulate compute node shutdown')
server = self.nova_client.servers.find(name=vm_name)
guest_hypervisor = getattr(server, 'OS-EXT-SRV-ATTR:host')
hypervisor_machine_number = guest_hypervisor.split('-')[-1]
unit_name = [
u.entity_id
for u in zaza.model.get_units(application_name='nova-compute')
if u.data['machine-id'] == hypervisor_machine_number][0]
# Simulate shutdown
self.svc_control(unit_name, 'stop', ['corosync', 'pacemaker', 'nova-compute'])
# Simulate shutdown
self.svc_control(
unit_name,
'stop',
['corosync', 'pacemaker', 'nova-compute'])
# Wait for instance move
self.wait_for_server_migration(vm_name, current_hypervisor)
# Wait for instance move
self.wait_for_server_migration(vm_name, current_hypervisor)
# Bring things back
self.enable_the_things()
# Bring things back
self.enable_the_things()

View File

@ -43,4 +43,4 @@ sys.modules['charmhelpers.fetch'] = charmhelpers.fetch
sys.modules['charmhelpers.cli'] = charmhelpers.cli
sys.modules['charmhelpers.contrib.hahelpers'] = charmhelpers.contrib.hahelpers
sys.modules['charmhelpers.contrib.hahelpers.cluster'] = (
charmhelpers.contrib.hahelpers.cluster)
charmhelpers.contrib.hahelpers.cluster)

View File

@ -1,47 +0,0 @@
# Copyright 2016 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import print_function
import unittest
import mock
import charm.openstack.sdn_charm as sdn_charm
class Helper(unittest.TestCase):
def setUp(self):
self._patches = {}
self._patches_start = {}
def tearDown(self):
for k, v in self._patches.items():
v.stop()
setattr(self, k, None)
self._patches = None
self._patches_start = None
def patch(self, obj, attr, return_value=None, **kwargs):
mocked = mock.patch.object(obj, attr, **kwargs)
self._patches[attr] = mocked
started = mocked.start()
started.return_value = return_value
self._patches_start[attr] = started
setattr(self, attr, started)
class TestSDNCharm(Helper):

View File

@ -1,43 +0,0 @@
# Copyright 2016 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import print_function
import mock
import reactive.sdn_charm_handlers as handlers
import charms_openstack.test_utils as test_utils
class TestRegisteredHooks(test_utils.TestRegisteredHooks):
def test_hooks(self):
defaults = [
'charm.installed',
'config.changed',
'update-status']
hook_set = {
'when': {
},
'when_not': {
}
}
# test that the hooks were registered via the
# reactive.barbican_handlers
self.registered_hooks_test_helper(handlers, hook_set, defaults)
class TestSDNCharmHandles(test_utils.PatchHelper):