Working cut

This commit is contained in:
Liam Young
2021-09-07 10:00:52 +00:00
parent 2b3f1a75af
commit aa17ce0827
20 changed files with 1554 additions and 241 deletions

2
.gitignore vendored
View File

@@ -6,3 +6,5 @@ build/
.coverage
__pycache__/
*.py[cod]
.stestr
**/*.swp

3
.stestr.conf Normal file
View File

@@ -0,0 +1,3 @@
[DEFAULT]
test_path=./unit_tests
top_dir=./

View File

@@ -1,25 +1,26 @@
# openstack-loadbalancer
# Overview
## Description
The openstack-loadbalancer charm deploys a loadbalancer that can load balance
traffic over a number of units of a service. The charm supports using vips
accross the loadbalancer units to provide HA.
TODO: Describe your charm in a few paragraphs of Markdown
# Usage
## Usage
## Configuration
TODO: Provide high-level usage, such as required config or relations
See file `config.yaml` for the full list of options, along with their
descriptions and default values.
## Deployment
## Developing
Use the vip charm config option to specify the vips to be used by the
loadbalancer, normally one vip per network space that the charm is bound to.
Create and activate a virtualenv with the development requirements:
juju deploy -n 3 openstack-loadbalancer
juju config openstack-loadbalancer vip="10.0.0.100 10.10.0.100 10.20.0.100"
juju deploy hacluster
juju relate openstack-loadbalancer:ha hacluster:ha
virtualenv -p python3 venv
source venv/bin/activate
pip install -r requirements-dev.txt
Then relate the charm to a service that requires a loadbalancer
## Testing
The Python operator framework includes a very nice harness for testing
operator behaviour without full deployment. Just `run_tests`:
./run_tests
juju add-relation openstack-loadbalancer:loadbalancer ceph-dashboard:loadbalancer

View File

@@ -1,11 +1,5 @@
# Copyright 2021 Ubuntu
# See LICENSE file for licensing details.
#
# TEMPLATE-TODO: change this example to suit your needs.
# If you don't need a config, you can remove the file entirely.
# It ties in to the example _on_config_changed handler in src/charm.py
#
# Learn more about config at: https://juju.is/docs/sdk/config
options:
vip:

View File

@@ -10,6 +10,8 @@ tags:
- openstack
extra-bindings:
public:
admin:
internal:
series:
- focal
- groovy

View File

@@ -1,3 +0,0 @@
-r requirements.txt
coverage
flake8

View File

@@ -1,17 +0,0 @@
#!/bin/sh -e
# Copyright 2021 Ubuntu
# See LICENSE file for licensing details.
if [ -z "$VIRTUAL_ENV" -a -d venv/ ]; then
. venv/bin/activate
fi
if [ -z "$PYTHONPATH" ]; then
export PYTHONPATH="lib:src"
else
export PYTHONPATH="lib:src:$PYTHONPATH"
fi
flake8
coverage run --source=src -m unittest -v "$@"
coverage report -m

View File

@@ -4,22 +4,18 @@
#
# Learn more at: https://juju.is/docs/sdk
"""Charm the service.
Refer to the following post for a quick-start guide that will help you
develop a new k8s charm using the Operator Framework:
https://discourse.charmhub.io/t/4208
"""
import collections
import jinja2
import json
import ipaddress
import logging
import os
import interface_api_endpoints
import interface_hacluster.ops_ha_interface as ops_ha_interface
import subprocess
from pathlib import Path
from ops.charm import CharmBase
import charmhelpers.core.host as ch_host
import charmhelpers.core.templating as ch_templating
import ops_openstack.adapters
from ops.framework import StoredState
from ops.main import main
from ops.model import ActiveStatus
@@ -27,120 +23,129 @@ from ops.model import ActiveStatus
import ops_openstack.core
logger = logging.getLogger(__name__)
# defaults
# log global
# option log-health-checks
# timeout connect 5s
# timeout client 50s
# timeout server 450s
# frontend dashboard_front
# mode http
# bind *:80
# option httplog
# redirect scheme https code 301 if !{ ssl_fc }
def reload_service(service_name) -> None:
"""Reload service.
# frontend dashboard_front_ssl
# mode tcp
# bind *:443
# option tcplog
# default_backend dashboard_back_ssl
:param service_name: Name of service to reload
:type service_name: str
"""
subprocess.check_call(['systemctl', 'reload', service_name])
# backend dashboard_back_ssl
# mode tcp
# option httpchk GET /
# http-check expect status 200
# server x <HOST>:<PORT> check-ssl check verify none
# server y <HOST>:<PORT> check-ssl check verify none
# server z <HOST>:<PORT> check-ssl check verify none
HAPROXY_TEMPLATE = """
defaults
log global
option log-health-checks
timeout connect 5s
timeout client 50s
timeout server 450s
class LoadbalancerAdapter(
ops_openstack.adapters.OpenStackOperRelationAdapter):
"""Adapter for Loadbalanceer interface."""
{% for service, service_config in endpoints.items() %}
frontend {{service}}_front
mode tcp
bind *:{{service_config.frontend_port}}
option tcplog
default_backend {{service}}_back
@property
def endpoints(self):
"""List of registered endpoints.
:returns: List of endpoint dicts
:rtype: str
"""
endpoint_data = self.relation.get_loadbalancer_requests()['endpoints']
_endpoints = {
service.replace("-", "_"): config
for service, config in endpoint_data.items()}
return _endpoints
class OpenstackLoadbalancerAdapters(
ops_openstack.adapters.OpenStackRelationAdapters):
"""Collection of relation adapters."""
relation_adapters = {
'loadbalancer': LoadbalancerAdapter,
}
backend {{service}}_back
mode tcp
option httpchk GET /
http-check expect status 200
{% for unit in service_config.members %}
server {{ unit.unit_name }} {{ unit.backend_ip }}:{{ unit.backend_port }} check-ssl check verify none
{% endfor %}
{% endfor %}
"""
class OpenstackLoadbalancerCharm(ops_openstack.core.OSBaseCharm):
"""Charm the service."""
PACKAGES = ['haproxy']
HAPROXY_CONF = Path('/etc/haproxy/haproxy.cfg')
HAPROXY_SERVICE = 'haproxy'
RESTART_MAP = {
str(HAPROXY_CONF): [HAPROXY_SERVICE]}
RFUNCS = {
HAPROXY_SERVICE: reload_service}
_stored = StoredState()
def __init__(self, *args):
"""Setup interfaces and observers"""
super().__init__(*args)
self.api_eps = interface_api_endpoints.APIEndpointsProvides(self)
self.adapters = OpenstackLoadbalancerAdapters((self.api_eps,), self)
self.ha = ops_ha_interface.HAServiceRequires(self, 'ha')
self.framework.observe(self.api_eps.on.ep_ready, self._configure_haproxy)
self.framework.observe(
self.api_eps.on.ep_requested,
self._process_ep_requests)
self.framework.observe(self.ha.on.ha_ready, self._configure_hacluster)
self.unit.status = ActiveStatus()
self._stored.is_started = True
def _get_config_from_relation(self):
app_config_keys = ['check-type', 'frontend-port']
unit_config_keys = ['backend-ip', 'backend-port']
app_data = {
'endpoints': {}}
for relation in self.model.relations['loadbalancer']:
unit = list(relation.units)[0]
for ep in json.loads(relation.data[unit].get('endpoints', '[]')):
service_type = ep['service-type'].replace('-', '_')
app_data['endpoints'][service_type] = {
'members': []}
for config in app_config_keys:
app_data['endpoints'][service_type][config.replace('-', '_')] = ep[config]
unit_data = {}
for relation in self.model.relations['loadbalancer']:
for unit in relation.units:
eps = json.loads(relation.data[unit].get('endpoints', '[]'))
for service in app_data['endpoints'].keys():
unit_config = {}
for ep in eps:
if ep['service-type'].replace('-', '_') == service:
unit_config = ep
unit_config['unit_name'] = unit.name.replace('/', '_')
unit_config = {k.replace('-', '_'):v for k,v in unit_config.items()}
app_data['endpoints'][service]['members'].append(unit_config)
return app_data
def _get_binding_subnet_map(self):
bindings = {}
for binding_name in self.meta.extra_bindings.keys():
network = self.model.get_binding(binding_name).network
bindings[binding_name] = [i.subnet for i in network.interfaces]
return bindings
def _get_haproxy_config(self):
"""Generate squid.conf contents."""
jinja_env = jinja2.Environment(loader=jinja2.BaseLoader())
jinja_template = jinja_env.from_string(HAPROXY_TEMPLATE)
ctxt = {}
ctxt.update(self._get_config_from_relation())
ctxt = {k.replace('-', '_'): v for k, v in ctxt.items()}
return jinja_template.render(**ctxt)
@property
def vips(self):
return self.config.get('vip').split()
def _configure_hacluster(self, event):
for vip in self.config.get('vip').split():
def _get_space_vip_mapping(self):
bindings = {}
for binding_name, subnets in self._get_binding_subnet_map().items():
bindings[binding_name] = [
vip
for subnet in subnets
for vip in self.vips
if ipaddress.ip_address(vip) in subnet]
return bindings
def _send_loadbalancer_response(self):
# May do tls termination in future
protocol = 'http'
for binding, vips in self._get_space_vip_mapping().items():
eps = self.api_eps.get_loadbalancer_requests()['endpoints']
for name, data in eps.items():
self.api_eps.loadbalancer_ready(
name,
binding,
vips,
data['frontend_port'], # Requested port is honoured atm
protocol)
self.api_eps.advertise_loadbalancers()
def _configure_hacluster(self, _):
vip_config = self.config.get('vip')
if not vip_config:
logging.warn("Cannot setup vips, vip config missing")
return
for vip in vip_config.split():
self.ha.add_vip(self.model.app.name, vip)
self.ha.add_init_service(self.model.app.name, 'haproxy')
self.ha.bind_resources()
def _configure_haproxy(self, event):
with open('/etc/haproxy/haproxy.cfg', 'w') as f:
contents = self._get_haproxy_config()
f.write(contents)
self._stored.is_started = True
subprocess.check_call(['systemctl', 'restart', 'haproxy'])
def _configure_haproxy(self):
@ch_host.restart_on_change(self.RESTART_MAP,
restart_functions=self.RFUNCS)
def _render_configs():
for config_file in self.RESTART_MAP.keys():
ch_templating.render(
os.path.basename(config_file),
config_file,
self.adapters)
logging.info("Rendering config")
_render_configs()
def _process_ep_requests(self, event):
self._configure_haproxy()
self._send_loadbalancer_response()
if __name__ == "__main__":
main(OpenstackLoadbalancerCharm)

View File

@@ -1,9 +1,25 @@
#!/usr/bin/env python3
# Copyright 2021 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import functools
import json
import logging
import socket
from ops.charm import CharmBase, RelationEvent
from ops.framework import (
StoredState,
EventBase,
@@ -11,41 +27,157 @@ from ops.framework import (
EventSource,
Object)
class EnpointDataEvent(EventBase):
UNIT_DATA_KEYS = ['backend-port', 'backend-ip']
APP_DATA_KEYS = ['frontend-port', 'check-type']
SERVICE_NAME_KEY = 'service-name'
PUBLIC_SPACE = "public"
ADMIN_SPACE = "admin"
INTERNAL_SPACE = "internal"
class EndpointRelationReadyEvent(EventBase):
pass
class APIEndpointsEvents(ObjectEvents):
ep_ready = EventSource(EnpointDataEvent)
# openstack-loadbalancer: provides
# ceph-dashboard:requires
class EndpointRequestsEvent(EventBase):
pass
class EndpointConfiguredEvent(EventBase):
pass
class APIEndpointsEvents(ObjectEvents):
ep_relation_ready = EventSource(EndpointRelationReadyEvent)
ep_requested = EventSource(EndpointRequestsEvent)
ep_configured = EventSource(EndpointConfiguredEvent)
class APIEndpointsRequires(Object):
on = APIEndpointsEvents()
_stored = StoredState()
def __init__(self, charm, relation_name, config_dict):
def __init__(self, charm: CharmBase, relation_name: str) -> None:
"""Initialise class
:param charm: The charm using this interface.
:param relation_name: Model alias map to store
"""
super().__init__(charm, relation_name)
self.config_dict = config_dict
self.charm = charm
self.relation_name = relation_name
self.framework.observe(charm.on[self.relation_name].relation_changed, self._on_relation_changed)
self.framework.observe(
charm.on[self.relation_name].relation_changed,
self._on_relation_changed)
self.framework.observe(
charm.on[self.relation_name].relation_joined,
self._on_relation_joined)
def _on_relation_changed(self, event):
"""Handle the relation-changed event."""
# `self.unit` isn't available here, so use `self.model.unit`.
# for key in self.config_dict:
# event.relation.data[self.model.app][key] = str(self.config_dict[key])
event.relation.data[self.model.app]['endpoints'] = str(self.config_dict['endpoints'])
def _on_relation_joined(self, event: RelationEvent) -> None:
"""Handle relation joined event
def update_config(self, config_dict):
"""Allow for updates to relation."""
self.config_dict = config_dict
relation = self.model.get_relation(self.relation_name)
if relation:
# for key in self.config_dict:
# relation.data[self.model.app][key] = str(self.config_dict[key])
relation.data[self.model.app]['endpoints'] = str(self.config_dict['endpoints'])
:param event: Event triggering action
"""
self.on.ep_relation_ready.emit()
def _on_relation_changed(self, event: RelationEvent) -> None:
"""Handle relation changed event
:param event: Event triggering action
"""
self._process_response()
def _update_relation_data(self, relation_data: dict,
service: dict) -> dict:
"""Update or add service to requests
The endpoints are a list of dicts for both app data and unit data. This
method updates an entry in the list if it already exists or adds a new
one it it does not.
:param relation_data: Relation data dict.
:param service: Service data
"""
endpoints = [e
for e in json.loads(relation_data.get('endpoints', '[]'))
if e['service-name'] != service['service-name']]
endpoints.append(service)
return endpoints
def request_loadbalancer(self, service_name: str, lb_port: int,
backend_port: int, backend_ip: str,
lb_check_type: str = 'http') -> None:
"""Send request for loadbalancer.
:param service_name: Name of service
:param lb_port: Port the loadbalancer should bind to.
:param backend_port: Port backend is bound to.
:param backend_ip: IP address backend is listening on.
:param lb_check_type: NEEDS UPDATING
"""
unit_data = {
'service-name': service_name,
'backend-port': backend_port,
'backend-ip': backend_ip}
app_data = {
'service-name': service_name,
'frontend-port': lb_port,
'check-type': lb_check_type}
for relation in self.model.relations[self.relation_name]:
if self.model.unit.is_leader():
relation.data[self.model.app]['endpoints'] = json.dumps(
self._update_relation_data(
relation.data[self.model.app],
app_data),
sort_keys=True)
relation.data[self.model.unit]['endpoints'] = json.dumps(
self._update_relation_data(
relation.data[self.model.unit],
unit_data),
sort_keys=True)
def get_frontend_data(self) -> dict:
"""Get the details of the loadbalancers that have been created.
Construct a dictionary of created listeners.
"""
if not self.model.relations[self.relation_name]:
return
data = None
for relation in self.model.relations[self.relation_name]:
data = relation.data[relation.app].get('frontends')
if data:
data = json.loads(data)
return data
def _process_response(self) -> None:
"""Check for a complete response from loadbalancer"""
if self.get_frontend_data():
self.on.ep_configured.emit()
def get_lb_endpoint(self, service_name: str, binding: str):
"""Return the loadbalancer details on a given binding.
:param service_name: Name of service
:param binding: Port the loadbalancer should bind to.
"""
endpoint = None
lb_endpoints = self.get_frontend_data()
if lb_endpoints:
endpoint = lb_endpoints.get(service_name, {}).get(binding)
return endpoint
get_lb_public_endpoint = functools.partialmethod(
get_lb_endpoint,
binding=PUBLIC_SPACE)
get_lb_internal_endpoint = functools.partialmethod(
get_lb_endpoint,
binding=INTERNAL_SPACE)
get_lb_admin_endpoint = functools.partialmethod(
get_lb_endpoint,
binding=ADMIN_SPACE)
class APIEndpointsProvides(Object):
@@ -53,19 +185,105 @@ class APIEndpointsProvides(Object):
on = APIEndpointsEvents()
_stored = StoredState()
def __init__(self, charm):
super().__init__(charm, "loadbalancer")
# Observe the relation-changed hook event and bind
# self.on_relation_changed() to handle the event.
self.framework.observe(charm.on["loadbalancer"].relation_changed, self._on_relation_changed)
def __init__(self, charm: str,
relation_name: str = 'loadbalancer') -> None:
"""Initialise class
:param charm: The charm using this interface.
:param relation_name: Model alias map to store
"""
super().__init__(charm, relation_name)
self.relation_name = relation_name
self.framework.observe(
charm.on["loadbalancer"].relation_changed,
self._on_relation_changed)
self.charm = charm
self.service_listeners = collections.defaultdict(dict)
def _on_relation_changed(self, event):
"""Handle a change to the ingress relation.
def _on_relation_changed(self, event: RelationEvent) -> None:
"""Handle relation changed event
Confirm we have the fields we expect to receive."""
# `self.unit` isn't available here, so use `self.model.unit`.
:param event: Event triggering action
"""
self.on.ep_requested.emit()
def _get_frontends(self) -> dict:
"""Get a dict of requested loadbalancers.
Examine the application data bag across all relations to construct
a dictionary of all requested loadbalancers and their settings.
"""
ep_data = collections.defaultdict(dict)
for relation in self.model.relations[self.relation_name]:
endpoints = json.loads(
relation.data[relation.app].get('endpoints', '[]'))
for ep in endpoints:
for config in APP_DATA_KEYS:
_config_key = config.replace('-', '_')
ep_data[ep[SERVICE_NAME_KEY]][_config_key] = ep[config]
return {'endpoints': ep_data}
def _get_backends(self) -> dict:
"""Get a dict of registered backends.
Examine the unit data bag across all relations to construct
a dictionary of all registered backends for a service.
"""
members = collections.defaultdict(list)
for relation in self.model.relations['loadbalancer']:
units = sorted(
[u for u in relation.units],
key=lambda unit: unit.name)
for unit in units:
unit_name = unit.name.replace('/', '_')
eps = json.loads(relation.data[unit].get('endpoints', '[]'))
for ep in eps:
member_data = {
'unit_name': unit_name}
for config in UNIT_DATA_KEYS:
_config_key = config.replace('-', '_')
member_data[_config_key] = ep[config]
members[ep['service-name']].append(member_data)
return members
def get_loadbalancer_requests(self) -> dict:
"""Return dict of loadbalancer requests.
Match loadbalancer requests with advertised backends.
"""
ep_data = self._get_frontends()
for ep, members in self._get_backends().items():
if ep_data['endpoints'].get(ep):
ep_data['endpoints'][ep]['members'] = members
return ep_data
def _get_requested_service_names(self, relation) -> list:
"""A list of loadbalancer service name requests for a relation"""
requests = json.loads(
relation.data[relation.app].get('endpoints', '[]'))
return [e['service-name'] for e in requests]
def loadbalancer_ready(self, service_name: str, space: str, ips: list,
port: int, protocol: str) -> None:
"""Register a loadbalancer as ready."""
self.service_listeners[service_name][space] = {
'ip': ips,
'port': port,
'protocol': protocol}
def advertise_loadbalancers(self) -> None:
"""Advertise a loadbalancers as ready down the requesting relation
Tell requesters whether their requested loadbalacers are ready.
"""
if not self.model.unit.is_leader():
logging.info("Not sending response, not leader")
return
self.on.ep_ready.emit()
for relation in self.model.relations[self.relation_name]:
_listeners = {}
for service_name in self._get_requested_service_names(relation):
_listeners[service_name] = self.service_listeners.get(
service_name)
relation.data[self.model.app]['frontends'] = json.dumps(
_listeners,
sort_keys=True)

31
templates/haproxy.cfg Normal file
View File

@@ -0,0 +1,31 @@
global
log /dev/log local0
log /dev/log local1 notice
chroot /var/lib/haproxy
stats socket /run/haproxy/admin.sock mode 660 level admin expose-fd listeners
stats timeout 30s
user haproxy
group haproxy
daemon
defaults
log global
option log-health-checks
timeout connect 5s
timeout client 50s
timeout server 450s
{% for service, service_config in loadbalancer.endpoints.items() %}
frontend {{service}}_front
mode tcp
bind *:{{service_config.frontend_port}}
option tcplog
default_backend {{service}}_back
backend {{service}}_back
mode tcp
option httpchk GET /
http-check expect status 200
{%- for unit in service_config.members %}
server {{ unit.unit_name }} {{ unit.backend_ip }}:{{ unit.backend_port }} check-ssl check verify none
{%- endfor %}
{% endfor %}

17
test-requirements.txt Normal file
View File

@@ -0,0 +1,17 @@
# This file is managed centrally. If you find the need to modify this as a
# one-off, please don't. Intead, consult #openstack-charms and ask about
# requirements management in charms via bot-control. Thank you.
charm-tools>=2.4.4
coverage>=3.6
mock>=1.2
flake8>=2.2.4,<=2.4.1
pyflakes==2.1.1
stestr>=2.2.0
requests>=2.18.4
psutil
# oslo.i18n dropped py35 support
oslo.i18n<4.0.0
git+https://github.com/openstack-charmers/zaza.git#egg=zaza
git+https://github.com/openstack-charmers/zaza-openstack-tests.git#egg=zaza.openstack
pytz # workaround for 14.04 pip/tox
pyudev # for ceph-* charm unit tests (not mocked?)

50
tests/bundles/focal.yaml Normal file
View File

@@ -0,0 +1,50 @@
local_overlay_enabled: False
series: focal
applications:
ceph-osd:
charm: cs:~openstack-charmers-next/ceph-osd
num_units: 6
storage:
osd-devices: 'cinder,10G'
options:
osd-devices: '/dev/test-non-existent'
ceph-mon:
charm: cs:~openstack-charmers-next/ceph-mon
num_units: 3
options:
monitor-count: '3'
vault:
num_units: 1
charm: cs:~openstack-charmers-next/vault
mysql-innodb-cluster:
charm: cs:~openstack-charmers-next/mysql-innodb-cluster
constraints: mem=3072M
num_units: 3
vault-mysql-router:
charm: cs:~openstack-charmers-next/mysql-router
ceph-dashboard:
charm: ../../ceph-dashboard.charm
options:
public-hostname: 'ceph-dashboard.zaza.local'
openstack-loadbalancer:
charm: ../../openstack-loadbalancer.charm
num_units: 3
hacluster:
charm: cs:~openstack-charmers-next/hacluster
options:
cluster_count: 3
relations:
- - 'ceph-osd:mon'
- 'ceph-mon:osd'
- - 'vault:shared-db'
- 'vault-mysql-router:shared-db'
- - 'vault-mysql-router:db-router'
- 'mysql-innodb-cluster:db-router'
- - 'ceph-dashboard:dashboard'
- 'ceph-mon:dashboard'
- - 'ceph-dashboard:certificates'
- 'vault:certificates'
- - 'openstack-loadbalancer:loadbalancer'
- 'ceph-dashboard:loadbalancer'
- - 'openstack-loadbalancer:ha'
- 'hacluster:ha'

View File

@@ -0,0 +1,4 @@
applications:
openstack-loadbalancer:
options:
vip: '{{ TEST_VIP00 }}'

View File

@@ -0,0 +1,3 @@
applications:
ceph-dashboard:
charm: ../../ceph-dashboard.charm

View File

@@ -1,66 +0,0 @@
# Copyright 2021 Ubuntu
# See LICENSE file for licensing details.
#
# Learn more about testing at: https://juju.is/docs/sdk/testing
import unittest
from unittest.mock import Mock
from charm import OpenstackLoadbalancerCharm
from ops.model import ActiveStatus
from ops.testing import Harness
class TestCharm(unittest.TestCase):
def setUp(self):
self.harness = Harness(OpenstackLoadbalancerCharm)
self.addCleanup(self.harness.cleanup)
self.harness.begin()
def test_config_changed(self):
self.assertEqual(list(self.harness.charm._stored.things), [])
self.harness.update_config({"thing": "foo"})
self.assertEqual(list(self.harness.charm._stored.things), ["foo"])
def test_action(self):
# the harness doesn't (yet!) help much with actions themselves
action_event = Mock(params={"fail": ""})
self.harness.charm._on_fortune_action(action_event)
self.assertTrue(action_event.set_results.called)
def test_action_fail(self):
action_event = Mock(params={"fail": "fail this"})
self.harness.charm._on_fortune_action(action_event)
self.assertEqual(action_event.fail.call_args, [("fail this",)])
def test_httpbin_pebble_ready(self):
# Check the initial Pebble plan is empty
initial_plan = self.harness.get_container_pebble_plan("httpbin")
self.assertEqual(initial_plan.to_yaml(), "{}\n")
# Expected plan after Pebble ready with default config
expected_plan = {
"services": {
"httpbin": {
"override": "replace",
"summary": "httpbin",
"command": "gunicorn -b 0.0.0.0:80 httpbin:app -k gevent",
"startup": "enabled",
"environment": {"thing": "🎁"},
}
},
}
# Get the httpbin container from the model
container = self.harness.model.unit.get_container("httpbin")
# Emit the PebbleReadyEvent carrying the httpbin container
self.harness.charm.on.httpbin_pebble_ready.emit(container)
# Get the plan now we've run PebbleReady
updated_plan = self.harness.get_container_pebble_plan("httpbin").to_dict()
# Check we've got the plan we expected
self.assertEqual(expected_plan, updated_plan)
# Check the service was started
service = self.harness.model.unit.get_container("httpbin").get_service("httpbin")
self.assertTrue(service.is_running())
# Ensure we set an ActiveStatus with no message
self.assertEqual(self.harness.model.unit.status, ActiveStatus())

15
tests/tests.yaml Normal file
View File

@@ -0,0 +1,15 @@
charm_name: ceph-dasboard
gate_bundles:
- focal
smoke_bundles:
- focal
configure:
- zaza.openstack.charm_tests.vault.setup.auto_initialize_no_validation
- zaza.openstack.charm_tests.ceph.dashboard.setup.check_dashboard_cert
target_deploy_status:
ceph-dashboard:
workload-status: blocked
workload-status-message-regex: "No certificates found|Charm config option|Unit is ready"
vault:
workload-status: blocked
workload-status-message-prefix: Vault needs to be initialized

24
unit_tests/__init__.py Normal file
View File

@@ -0,0 +1,24 @@
# Copyright 2020 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import mock
# Mock out secrets to make py35 happy.
sys.modules['secrets'] = mock.MagicMock()
# Tenacity decorators need to be mocked before import
tenacity = mock.MagicMock()
tenacity.retry.side_effect = lambda *args, **kwargs: lambda x: x
sys.modules['tenacity'] = tenacity

View File

@@ -0,0 +1,116 @@
import json
loadbalancer_data = {
'ceph-dashboard': {
'admin': {
'ip': ['10.20.0.101'],
'port': 8443,
'protocol': 'http'},
'internal': {
'ip': ['10.30.0.101'],
'port': 8443,
'protocol': 'http'},
'public': {
'ip': ['10.10.0.101'],
'port': 8443,
'protocol': 'http'}}}
def add_loadbalancer_relation(harness):
rel_id = harness.add_relation(
'loadbalancer',
'service-loadbalancer')
harness.add_relation_unit(
rel_id,
'service-loadbalancer/0')
harness.update_relation_data(
rel_id,
'service-loadbalancer/0',
{'ingress-address': '10.0.0.3'})
return rel_id
def add_loadbalancer_response(harness, rel_id):
harness.update_relation_data(
rel_id,
'service-loadbalancer',
{
'frontends': json.dumps(
loadbalancer_data)})
def add_requesting_dash_relation(harness):
rel_id = harness.add_relation('loadbalancer', 'ceph-dashboard')
harness.add_relation_unit(
rel_id,
'ceph-dashboard/0')
harness.add_relation_unit(
rel_id,
'ceph-dashboard/1')
harness.update_relation_data(
rel_id,
'ceph-dashboard/0',
{
'endpoints': json.dumps([
{
'service-name': 'ceph-dashboard',
'backend-port': 8443,
'backend-ip': '10.0.0.10'},
{
'service-name': 'ceph-api',
'backend-port': 9443,
'backend-ip': '10.0.0.10'}])})
harness.update_relation_data(
rel_id,
'ceph-dashboard/1',
{
'endpoints': json.dumps([
{
'service-name': 'ceph-dashboard',
'backend-port': 8443,
'backend-ip': '10.0.0.11'},
{
'service-name': 'ceph-api',
'backend-port': 9443,
'backend-ip': '10.0.0.11'}])})
harness.update_relation_data(
rel_id,
'ceph-dashboard',
{
'endpoints': json.dumps([
{
'service-name': 'ceph-dashboard',
'frontend-port': 8443,
'check-type': 'https'},
{
'service-name': 'ceph-api',
'frontend-port': 9443,
'check-type': 'https'}])})
return rel_id
def add_requesting_glance_relation(harness):
rel_id = harness.add_relation('loadbalancer', 'glance')
harness.add_relation_unit(
rel_id,
'glance/0')
harness.update_relation_data(
rel_id,
'glance/0',
{
'endpoints': json.dumps([
{
'service-name': 'glance-api',
'backend-port': 9292,
'backend-ip': '10.0.0.50'}])})
harness.update_relation_data(
rel_id,
'glance',
{
'endpoints': json.dumps([
{
'service-name': 'glance-api',
'frontend-port': 9292,
'check-type': 'http'}])})
return rel_id

View File

@@ -0,0 +1,611 @@
#!/usr/bin/env python3
# Copyright 2021 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import json
import unittest
import sys
sys.path.append('lib') # noqa
sys.path.append('src') # noqa
from ops.testing import Harness
from ops.charm import CharmBase
import interface_api_endpoints
from unit_tests.manage_test_relations import (
add_loadbalancer_relation,
add_loadbalancer_response,
add_requesting_dash_relation,
add_requesting_glance_relation,
loadbalancer_data,
)
class TestAPIEndpointsRequires(unittest.TestCase):
class MyCharm(CharmBase):
def __init__(self, *args):
super().__init__(*args)
self.seen_events = []
self.ingress = interface_api_endpoints.APIEndpointsRequires(
self,
'loadbalancer')
self.framework.observe(
self.ingress.on.ep_requested,
self._log_event)
self.framework.observe(
self.ingress.on.ep_configured,
self._log_event)
self.framework.observe(
self.ingress.on.ep_relation_ready,
self._register_ep)
def _log_event(self, event):
self.seen_events.append(type(event).__name__)
def _register_ep(self, event):
self._log_event(event)
self.seen_events.append(type(event).__name__)
self.ingress.request_loadbalancer(
'ceph-dashboard',
8443,
8443,
'10.0.0.10',
lb_check_type='https')
def setUp(self):
super().setUp()
self.harness = Harness(
self.MyCharm,
meta='''
name: my-charm
requires:
loadbalancer:
interface: api-endpoints
'''
)
self.eps = [{
'service-name': 'ceph-dashboard',
'frontend-port': 8443,
'backend-port': 8443,
'backend-ip': '10.0.0.10',
'check-type': 'https'}]
# self.loadbalancer_data = {
# 'ceph-dashboard': {
# 'admin': {
# 'ip': ['10.20.0.101'],
# 'port': 8443,
# 'protocol': 'http'},
# 'internal': {
# 'ip': ['10.30.0.101'],
# 'port': 8443,
# 'protocol': 'http'},
# 'public': {
# 'ip': ['10.10.0.101'],
# 'port': 8443,
# 'protocol': 'http'}}}
#
# def add_loadbalancer_relation(self):
# rel_id = self.harness.add_relation(
# 'loadbalancer',
# 'service-loadbalancer')
# self.harness.add_relation_unit(
# rel_id,
# 'service-loadbalancer/0')
# self.harness.update_relation_data(
# rel_id,
# 'service-loadbalancer/0',
# {'ingress-address': '10.0.0.3'})
# return rel_id
#
# def add_loadbalancer_response(self, rel_id):
# self.harness.update_relation_data(
# rel_id,
# 'service-loadbalancer',
# {
# 'frontends': json.dumps(
# self.loadbalancer_data)})
def test_init(self):
self.harness.begin()
self.assertEqual(
self.harness.charm.ingress.relation_name,
'loadbalancer')
def test__on_relation_changed(self):
self.harness.begin()
self.harness.set_leader()
rel_id = add_loadbalancer_relation(self.harness)
unit_rel_data = self.harness.get_relation_data(
rel_id,
'my-charm/0')
app_rel_data = self.harness.get_relation_data(
rel_id,
'my-charm')
self.assertEqual(
json.loads(unit_rel_data['endpoints']),
[{
'service-name': 'ceph-dashboard',
'backend-port': 8443,
'backend-ip': '10.0.0.10'}])
self.assertEqual(
json.loads(app_rel_data['endpoints']),
[{
'service-name': 'ceph-dashboard',
'frontend-port': 8443,
'check-type': 'https'}])
def test_register_second_ep(self):
self.harness.begin()
self.harness.set_leader()
rel_id = add_loadbalancer_relation(self.harness)
new_eps = copy.deepcopy(self.eps)
new_eps.append({
'service-name': 'ceph-api',
'frontend-port': 9443,
'backend-port': 9443,
'backend-ip': '10.0.0.10',
'check-type': 'https'})
self.harness.charm.ingress.request_loadbalancer(
'ceph-api',
9443,
9443,
'10.0.0.10',
lb_check_type='https')
unit_rel_data = self.harness.get_relation_data(
rel_id,
'my-charm/0')
app_rel_data = self.harness.get_relation_data(
rel_id,
'my-charm')
self.assertEqual(
json.loads(unit_rel_data['endpoints']),
[
{
'service-name': 'ceph-dashboard',
'backend-port': 8443,
'backend-ip': '10.0.0.10'},
{
'service-name': 'ceph-api',
'backend-port': 9443,
'backend-ip': '10.0.0.10'}])
self.assertEqual(
json.loads(app_rel_data['endpoints']),
[
{
'service-name': 'ceph-dashboard',
'frontend-port': 8443,
'check-type': 'https'},
{
'service-name': 'ceph-api',
'frontend-port': 9443,
'check-type': 'https'}])
def test_update_existing_request(self):
self.harness.begin()
self.harness.set_leader()
rel_id = add_loadbalancer_relation(self.harness)
self.harness.charm.ingress.request_loadbalancer(
'ceph-dashboard',
9443,
9443,
'10.0.0.20',
lb_check_type='http')
unit_rel_data = self.harness.get_relation_data(
rel_id,
'my-charm/0')
app_rel_data = self.harness.get_relation_data(
rel_id,
'my-charm')
self.assertEqual(
json.loads(unit_rel_data['endpoints']),
[
{
'service-name': 'ceph-dashboard',
'backend-port': 9443,
'backend-ip': '10.0.0.20'}])
self.assertEqual(
json.loads(app_rel_data['endpoints']),
[
{
'service-name': 'ceph-dashboard',
'frontend-port': 9443,
'check-type': 'http'}])
def test_get_frontend_data(self):
self.harness.begin()
self.harness.set_leader()
rel_id = add_loadbalancer_relation(self.harness)
add_loadbalancer_response(self.harness, rel_id)
self.assertEqual(
self.harness.charm.ingress.get_frontend_data(),
loadbalancer_data)
def test__process_response(self):
self.harness.begin()
self.harness.set_leader()
rel_id = add_loadbalancer_relation(self.harness)
self.assertNotIn(
'EndpointConfiguredEvent',
self.harness.charm.seen_events)
add_loadbalancer_response(self.harness, rel_id)
self.assertIn(
'EndpointConfiguredEvent',
self.harness.charm.seen_events)
def test_retrieving_endpoints(self):
self.harness.begin()
self.harness.set_leader()
rel_id = add_loadbalancer_relation(self.harness)
self.assertIsNone(
self.harness.charm.ingress.get_lb_endpoint(
'ceph-dashboard',
'public'))
add_loadbalancer_response(self.harness, rel_id)
self.assertEqual(
self.harness.charm.ingress.get_lb_endpoint(
'ceph-dashboard',
'public'),
{
'ip': ['10.10.0.101'],
'port': 8443,
'protocol': 'http'})
self.assertEqual(
self.harness.charm.ingress.get_lb_public_endpoint(
'ceph-dashboard'),
{
'ip': ['10.10.0.101'],
'port': 8443,
'protocol': 'http'})
self.assertEqual(
self.harness.charm.ingress.get_lb_internal_endpoint(
'ceph-dashboard'),
{
'ip': ['10.30.0.101'],
'port': 8443,
'protocol': 'http'})
self.assertEqual(
self.harness.charm.ingress.get_lb_admin_endpoint(
'ceph-dashboard'),
{
'ip': ['10.20.0.101'],
'port': 8443,
'protocol': 'http'})
class TestAPIEndpointsProvides(unittest.TestCase):
class MyCharm(CharmBase):
def __init__(self, *args):
super().__init__(*args)
self.seen_events = []
self.api_eps = interface_api_endpoints.APIEndpointsProvides(self)
self.framework.observe(
self.api_eps.on.ep_requested,
self._log_event)
self.framework.observe(
self.api_eps.on.ep_configured,
self._log_event)
def _log_event(self, event):
self.seen_events.append(type(event).__name__)
def setUp(self):
super().setUp()
self.harness = Harness(
self.MyCharm,
meta='''
name: my-charm
provides:
loadbalancer:
interface: api-endpoints
'''
)
def test_on_changed(self):
self.harness.begin()
# No EndpointDataEvent as relation is absent
self.assertEqual(
self.harness.charm.seen_events,
[])
rel_id = self.harness.add_relation('loadbalancer', 'ceph-dashboard')
self.harness.add_relation_unit(
rel_id,
'ceph-dashboard/0')
self.harness.update_relation_data(
rel_id,
'ceph-dashboard/0',
{'ingress-address': '10.0.0.3'})
self.assertIn(
'EndpointRequestsEvent',
self.harness.charm.seen_events)
# def add_requesting_dash_relation(self.harness)(self):
# rel_id = self.harness.add_relation('loadbalancer', 'ceph-dashboard')
# self.harness.add_relation_unit(
# rel_id,
# 'ceph-dashboard/0')
# self.harness.add_relation_unit(
# rel_id,
# 'ceph-dashboard/1')
# self.harness.update_relation_data(
# rel_id,
# 'ceph-dashboard/0',
# {
# 'endpoints': json.dumps([
# {
# 'service-name': 'ceph-dashboard',
# 'backend-port': 8443,
# 'backend-ip': '10.0.0.10'},
# {
# 'service-name': 'ceph-api',
# 'backend-port': 9443,
# 'backend-ip': '10.0.0.10'}])})
# self.harness.update_relation_data(
# rel_id,
# 'ceph-dashboard/1',
# {
# 'endpoints': json.dumps([
# {
# 'service-name': 'ceph-dashboard',
# 'backend-port': 8443,
# 'backend-ip': '10.0.0.11'},
# {
# 'service-name': 'ceph-api',
# 'backend-port': 9443,
# 'backend-ip': '10.0.0.11'}])})
# self.harness.update_relation_data(
# rel_id,
# 'ceph-dashboard',
# {
# 'endpoints': json.dumps([
# {
# 'service-name': 'ceph-dashboard',
# 'frontend-port': 8443,
# 'check-type': 'https'},
# {
# 'service-name': 'ceph-api',
# 'frontend-port': 9443,
# 'check-type': 'https'}])})
#
# return rel_id
#
# def add_requesting_glance_relation(self.harness)(self):
# rel_id = self.harness.add_relation('loadbalancer', 'glance')
# self.harness.add_relation_unit(
# rel_id,
# 'glance/0')
# self.harness.update_relation_data(
# rel_id,
# 'glance/0',
# {
# 'endpoints': json.dumps([
# {
# 'service-name': 'glance-api',
# 'backend-port': 9292,
# 'backend-ip': '10.0.0.50'}])})
# self.harness.update_relation_data(
# rel_id,
# 'glance',
# {
# 'endpoints': json.dumps([
# {
# 'service-name': 'glance-api',
# 'frontend-port': 9292,
# 'check-type': 'http'}])})
# return rel_id
def test__get_frontends(self):
self.harness.begin()
add_requesting_dash_relation(self.harness)
add_requesting_glance_relation(self.harness)
self.assertEqual(
self.harness.charm.api_eps._get_frontends(),
{
'endpoints': {
'ceph-dashboard': {
'check_type': 'https',
'frontend_port': 8443},
'ceph-api': {
'check_type': 'https',
'frontend_port': 9443},
'glance-api': {
'check_type': 'http',
'frontend_port': 9292}}})
def test__get_backends(self):
self.harness.begin()
add_requesting_dash_relation(self.harness)
add_requesting_glance_relation(self.harness)
self.assertEqual(
self.harness.charm.api_eps._get_backends(),
{
'ceph-dashboard': [
{
'unit_name': 'ceph-dashboard_0',
'backend_ip': '10.0.0.10',
'backend_port': 8443},
{
'unit_name': 'ceph-dashboard_1',
'backend_ip': '10.0.0.11',
'backend_port': 8443}],
'ceph-api': [
{
'unit_name': 'ceph-dashboard_0',
'backend_ip': '10.0.0.10',
'backend_port': 9443},
{
'unit_name': 'ceph-dashboard_1',
'backend_ip': '10.0.0.11',
'backend_port': 9443}],
'glance-api': [
{
'unit_name': 'glance_0',
'backend_ip': '10.0.0.50',
'backend_port': 9292}]})
def test_get_loadbalancer_requests(self):
self.harness.begin()
add_requesting_dash_relation(self.harness)
add_requesting_glance_relation(self.harness)
self.assertEqual(
self.harness.charm.api_eps.get_loadbalancer_requests(),
{
'endpoints': {
'ceph-dashboard': {
'check_type': 'https',
'frontend_port': 8443,
'members': [
{
'backend_ip': '10.0.0.10',
'backend_port': 8443,
'unit_name': 'ceph-dashboard_0'},
{
'backend_ip': '10.0.0.11',
'backend_port': 8443,
'unit_name': 'ceph-dashboard_1'}]},
'ceph-api': {
'check_type': 'https',
'frontend_port': 9443,
'members': [
{
'backend_ip': '10.0.0.10',
'backend_port': 9443,
'unit_name': 'ceph-dashboard_0'},
{
'backend_ip': '10.0.0.11',
'backend_port': 9443,
'unit_name': 'ceph-dashboard_1'}]},
'glance-api': {
'check_type': 'http',
'frontend_port': 9292,
'members': [
{
'backend_ip': '10.0.0.50',
'backend_port': 9292,
'unit_name': 'glance_0'}]}}})
def test_send_loadbalancer_response(self):
self.harness.begin()
self.harness.set_leader()
dash_rel_id = add_requesting_dash_relation(self.harness)
glance_rel_id = add_requesting_glance_relation(self.harness)
self.harness.charm.api_eps.loadbalancer_ready(
'ceph-dashboard',
'admin',
['10.20.0.101'],
8443,
'http')
self.harness.charm.api_eps.loadbalancer_ready(
'ceph-dashboard',
'internal',
['10.30.0.101'],
8443,
'http')
self.harness.charm.api_eps.loadbalancer_ready(
'ceph-dashboard',
'public',
['10.10.0.101'],
8443,
'http')
self.harness.charm.api_eps.loadbalancer_ready(
'ceph-api',
'admin',
['10.20.0.101'],
9443,
'http')
self.harness.charm.api_eps.loadbalancer_ready(
'ceph-api',
'internal',
['10.30.0.101'],
9443,
'http')
self.harness.charm.api_eps.loadbalancer_ready(
'ceph-api',
'public',
['10.10.0.101'],
9443,
'http')
self.harness.charm.api_eps.loadbalancer_ready(
'glance-api',
'admin',
['10.20.0.101'],
9292,
'http')
self.harness.charm.api_eps.loadbalancer_ready(
'glance-api',
'internal',
['10.30.0.101'],
9292,
'http')
self.harness.charm.api_eps.loadbalancer_ready(
'glance-api',
'public',
['10.10.0.101'],
9292,
'http')
self.harness.charm.api_eps.advertise_loadbalancers()
dash_rel_data = self.harness.get_relation_data(
dash_rel_id,
'my-charm')
self.assertEqual(
json.loads(dash_rel_data['frontends']),
{
'ceph-dashboard': {
'admin': {
'ip': ['10.20.0.101'],
'port': 8443,
'protocol': 'http'},
'internal': {
'ip': ['10.30.0.101'],
'port': 8443,
'protocol': 'http'},
'public': {
'ip': ['10.10.0.101'],
'port': 8443,
'protocol': 'http'}},
'ceph-api': {
'admin': {
'ip': ['10.20.0.101'],
'port': 9443,
'protocol': 'http'},
'internal': {
'ip': ['10.30.0.101'],
'port': 9443,
'protocol': 'http'},
'public': {
'ip': ['10.10.0.101'],
'port': 9443,
'protocol': 'http'}}})
glance_rel_data = self.harness.get_relation_data(
glance_rel_id,
'my-charm')
self.assertEqual(
json.loads(glance_rel_data['frontends']),
{
'glance-api': {
'admin': {
'ip': ['10.20.0.101'],
'port': 9292,
'protocol': 'http'},
'internal': {
'ip': ['10.30.0.101'],
'port': 9292,
'protocol': 'http'},
'public': {
'ip': ['10.10.0.101'],
'port': 9292,
'protocol': 'http'}}})

View File

@@ -0,0 +1,303 @@
#!/USr/bin/env python3
# Copyright 2021 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ipaddress
import json
import re
import sys
import unittest
sys.path.append('lib') # noqa
sys.path.append('src') # noqa
from mock import patch
from ops.testing import Harness, _TestingModelBackend
from ops import framework, model
import charm
from unit_tests.manage_test_relations import (
add_requesting_dash_relation,
add_requesting_glance_relation,
)
class CharmTestCase(unittest.TestCase):
def setUp(self, obj, patches):
super().setUp()
self.patches = patches
self.obj = obj
self.patch_all()
def patch(self, method):
_m = patch.object(self.obj, method)
mock = _m.start()
self.addCleanup(_m.stop)
return mock
def patch_all(self):
for method in self.patches:
setattr(self, method, self.patch(method))
class _OpenstackLoadbalancerCharm(charm.OpenstackLoadbalancerCharm):
def _get_bind_ip(self):
return '10.0.0.10'
class TestOpenstackLoadbalancerCharmBase(CharmTestCase):
PATCHES = [
'ch_host',
'subprocess',
]
def setUp(self):
super().setUp(charm, self.PATCHES)
self.harness = self.get_harness()
def get_harness(self):
initial_config = {}
_harness = Harness(
_OpenstackLoadbalancerCharm,
meta='''
name: my-charm
extra-bindings:
public:
admin:
internal:
provides:
loadbalancer:
interface: api-endpoints
requires:
ha:
interface: hacluster
scope: container
'''
)
# BEGIN: Workaround until network_get is implemented
class _TestingOPSModelBackend(_TestingModelBackend):
def network_get(self, endpoint_name, relation_id=None):
network_data = {
'admin': {
'bind-addresses': [{
'interface-name': 'eth1',
'addresses': [{
'cidr': '10.10.0.0/24',
'value': '10.10.0.10'}]}],
'ingress-addresses': ['10.10.0.10'],
'egress-subnets': ['10.10.0.0/24']},
'public': {
'bind-addresses': [{
'interface-name': 'eth2',
'addresses': [{
'cidr': '10.20.0.0/24',
'value': '10.20.0.10'}]}],
'ingress-addresses': ['10.20.0.10'],
'egress-subnets': ['10.20.0.0/24']},
'internal': {
'bind-addresses': [{
'interface-name': 'eth3',
'addresses': [{
'cidr': '10.30.0.0/24',
'value': '10.30.0.10'}]}],
'ingress-addresses': ['10.30.0.10'],
'egress-subnets': ['10.30.0.0/24']}}
return network_data[endpoint_name]
_harness._backend = _TestingOPSModelBackend(
_harness._unit_name, _harness._meta)
_harness._model = model.Model(
_harness._meta,
_harness._backend)
_harness._framework = framework.Framework(
":memory:",
_harness._charm_dir,
_harness._meta,
_harness._model)
# END Workaround
_harness.update_config(initial_config)
return _harness
def test_init(self):
self.harness.begin()
self.assertTrue(self.harness.charm._stored.is_started)
def test__get_binding_subnet_map(self):
self.harness.begin()
self.assertEqual(
self.harness.charm._get_binding_subnet_map(),
{
'admin': [ipaddress.IPv4Network('10.10.0.0/24')],
'internal': [ipaddress.IPv4Network('10.30.0.0/24')],
'public': [ipaddress.IPv4Network('10.20.0.0/24')]})
def test_vips(self):
self.harness.begin()
self.harness.update_config({
'vip': '10.10.0.100 10.20.0.100 10.30.0.100'})
self.assertEqual(
self.harness.charm.vips,
['10.10.0.100', '10.20.0.100', '10.30.0.100'])
def test__get_space_vip_mapping(self):
self.harness.begin()
self.harness.update_config({
'vip': '10.10.0.100 10.20.0.100 10.30.0.100'})
self.assertEqual(
self.harness.charm._get_space_vip_mapping(),
{
'admin': ['10.10.0.100'],
'internal': ['10.30.0.100'],
'public': ['10.20.0.100']})
def test__send_loadbalancer_response(self):
self.harness.begin()
self.harness.set_leader()
self.harness.update_config({
'vip': '10.10.0.100 10.20.0.100 10.30.0.100'})
dash_rel_id = add_requesting_dash_relation(self.harness)
glance_rel_id = add_requesting_glance_relation(self.harness)
self.harness.charm._send_loadbalancer_response()
glance_rel_data = self.harness.get_relation_data(
glance_rel_id,
'my-charm')
dash_rel_data = self.harness.get_relation_data(
dash_rel_id,
'my-charm')
self.assertEqual(
json.loads(glance_rel_data['frontends']),
{
'glance-api': {
'admin': {
'ip': ['10.10.0.100'],
'port': 9292,
'protocol': 'http'},
'internal': {
'ip': ['10.30.0.100'],
'port': 9292,
'protocol': 'http'},
'public': {
'ip': ['10.20.0.100'],
'port': 9292,
'protocol': 'http'}}})
self.assertEqual(
json.loads(dash_rel_data['frontends']),
{
'ceph-dashboard': {
'admin': {
'ip': ['10.10.0.100'],
'port': 8443,
'protocol': 'http'},
'internal': {
'ip': ['10.30.0.100'],
'port': 8443,
'protocol': 'http'},
'public': {
'ip': ['10.20.0.100'],
'port': 8443,
'protocol': 'http'}},
'ceph-api': {
'admin': {
'ip': ['10.10.0.100'],
'port': 9443,
'protocol': 'http'},
'internal': {
'ip': ['10.30.0.100'],
'port': 9443,
'protocol': 'http'},
'public': {
'ip': ['10.20.0.100'],
'port': 9443,
'protocol': 'http'}}})
def test__configure_hacluster(self):
self.harness.begin()
self.harness.set_leader()
self.harness.update_config({
'vip': '10.10.0.100 10.20.0.100 10.30.0.100'})
rel_id = self.harness.add_relation(
'ha',
'hacluster')
self.harness.add_relation_unit(
rel_id,
'hacluster/0')
self.harness.charm._configure_hacluster(None)
rel_data = self.harness.get_relation_data(
rel_id,
'my-charm/0')
self.assertEqual(
json.loads(rel_data['json_clones']),
{'cl_res_my_charm_haproxy': 'res_my_charm_haproxy'})
self.assertEqual(
json.loads(rel_data['json_init_services']),
['haproxy'])
self.assertEqual(
json.loads(rel_data['json_resources'])['res_my_charm_haproxy'],
'lsb:haproxy')
vip_resources = {
k: v
for k, v in json.loads(rel_data['json_resources']).items()
if re.match('res_my-charm_.*vip$', k)}
self.assertEqual(len(vip_resources), 3)
self.assertTrue(all(
[v == 'ocf:heartbeat:IPaddr2' for v in vip_resources.values()]))
def test_LoadbalancerAdapter(self):
self.harness.begin()
self.harness.set_leader()
self.harness.update_config({
'vip': '10.10.0.100 10.20.0.100 10.30.0.100'})
add_requesting_dash_relation(self.harness)
add_requesting_glance_relation(self.harness)
self.assertEqual(
self.harness.charm.adapters.loadbalancer.endpoints,
{
'ceph_dashboard': {
'frontend_port': 8443,
'check_type': 'https',
'members': [
{
'unit_name': 'ceph-dashboard_0',
'backend_port': 8443,
'backend_ip': '10.0.0.10'},
{
'unit_name': 'ceph-dashboard_1',
'backend_port': 8443,
'backend_ip': '10.0.0.11'}]},
'ceph_api': {
'frontend_port': 9443,
'check_type': 'https',
'members': [
{
'unit_name': 'ceph-dashboard_0',
'backend_port': 9443,
'backend_ip': '10.0.0.10'},
{
'unit_name': 'ceph-dashboard_1',
'backend_port': 9443,
'backend_ip': '10.0.0.11'}]},
'glance_api': {
'frontend_port': 9292,
'check_type': 'http',
'members': [
{
'unit_name': 'glance_0',
'backend_port': 9292,
'backend_ip': '10.0.0.50'}]}})