treewide: Repository archived.

To signify move of the development to GitHub, tip of the default
branch is removed and README.md is left in place to point to the
new location.

Change-Id: I9a540e0fa29b9ec239b75acde74a0779dd787a8c
Signed-off-by: Martin Kalcok <martin.kalcok@canonical.com>
This commit is contained in:
Martin Kalcok
2025-11-25 16:48:46 +01:00
parent 0ba27e64f3
commit 204eff0d9a
59 changed files with 15 additions and 5535 deletions

View File

@@ -1,3 +0,0 @@
[DEFAULT]
test_path=./unit_tests
top_dir=./

View File

@@ -1,36 +0,0 @@
sudo: true
dist: xenial
language: python
install:
- pip install tox-travis
matrix:
include:
- name: "Python 3.6"
python: 3.6
env: ENV=pep8,py3
- name: "Python 3.7"
python: 3.7
env: ENV=pep8,py3
- name: "Functional test"
env: ENV=func-smoke
script:
- if [ $ENV = 'func-smoke' ]; then
sudo apt update;
sudo apt install -y distro-info;
sudo apt remove -y --purge lxd lxd-client;
sudo snap install lxd;
sudo snap install juju --classic;
sudo sh -c 'echo PATH=/snap/bin:$PATH >> /etc/environment';
sudo lxd waitready;
sudo lxd init --auto;
sudo usermod -a -G lxd travis;
sudo su travis -c 'juju bootstrap --no-gui localhost';
echo "export PATH=$PATH;cd $(pwd)" > $HOME/saved_path;
sudo su - travis -c "source $HOME/saved_path; tox -e build";
sudo su - travis -c "source $HOME/saved_path; tox -c build/builds/ovn-central/tox.ini -e $ENV -- --log DEBUG";
else
tox -c tox.ini -e $ENV;
fi
- if [ $ENV = 'func-smoke' ]; then
sudo su travis -c 'juju status -m $(juju models --format yaml|grep "^- name:.*zaza"|cut -f2 -d/)';
fi

View File

@@ -1,4 +1,3 @@
- project:
templates:
- openstack-python3-charm-jobs
- openstack-cover-jobs

202
LICENSE
View File

@@ -1,202 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@@ -1 +0,0 @@
src/README.md

6
README.md Normal file
View File

@@ -0,0 +1,6 @@
# Repository archived
Development of the `ovn-central` charm has been moved to GitHub under
[canonical/charm-ovn-central](https://github.com/canonical/charm-ovn-central).
Content of stable branches will remain preserved here as well, but new development
is expected to happen on GitHub.

View File

@@ -1,4 +0,0 @@
libffi-dev [platform:dpkg]
libpq-dev [platform:dpkg]
libxml2-dev [platform:dpkg]
libxslt1-dev [platform:dpkg]

View File

@@ -1,37 +0,0 @@
type: charm
parts:
charm:
source: src/
plugin: reactive
reactive-charm-build-arguments:
- --binary-wheels-from-source
- -v
build-packages:
- git
- python3-dev
- libffi-dev
- libssl-dev
- rustc
- cargo
build-snaps:
- charm/latest/edge
build-environment:
- CHARM_INTERFACES_DIR: /root/project/interfaces/
- CHARM_LAYERS_DIR: /root/project/layers/
- MAKEFLAGS: -j$(nproc)
base: ubuntu@24.04
platforms:
amd64:
build-on: amd64
build-for: amd64
arm64:
build-on: arm64
build-for: arm64
ppc64el:
build-on: ppc64el
build-for: ppc64el
s390x:
build-on: s390x
build-for: s390x

View File

@@ -1 +0,0 @@
src/metadata.yaml

View File

@@ -1,9 +0,0 @@
- project:
templates:
- charm-unit-jobs-py310
- charm-functional-jobs
vars:
needs_charm_build: true
charm_build_name: ovn-central
build_type: charmcraft
charmcraft_channel: 3.x/beta

View File

@@ -1,56 +0,0 @@
[MAIN]
jobs=0
ignore=.git
# List of plugins (as comma separated values of python module names) to load,
# usually to register additional checkers.
load-plugins=pylint.extensions.no_self_use
# When enabled, pylint would attempt to guess common misconfiguration and emit
# user-friendly hints instead of false-positive error messages.
suggestion-mode=yes
[FORMAT]
max-line-length=79
[REPORTS]
#reports=yes
score=yes
[MESSAGES CONTROL]
disable=
fixme,
invalid-name,
no-value-for-parameter,
pointless-statement,
missing-module-docstring,
missing-class-docstring,
missing-function-docstring,
too-many-arguments,
too-many-locals,
too-many-branches,
too-many-instance-attributes,
too-many-ancestors,
too-many-public-methods,
too-many-lines,
too-many-nested-blocks,
too-many-statements,
protected-access,
super-init-not-called,
useless-object-inheritance,
unidiomatic-typecheck,
unsubscriptable-object,
inconsistent-return-statements,
attribute-defined-outside-init,
too-few-public-methods,
abstract-method,
no-self-use,
broad-except,
unnecessary-lambda,
arguments-differ,
broad-exception-raised,
unspecified-encoding,
consider-using-f-string,
consider-using-with,
consider-using-dict-items,
unused-private-member,

View File

@@ -1,5 +0,0 @@
# This file is used to trigger rebuilds
# when dependencies of the charm change,
# but nothing in the charm needs to.
# simply change the uuid to something new
4c8fa5e0-c1d4-40f0-809b-d4fb5f412c2b

View File

@@ -1,13 +0,0 @@
#!/bin/bash
charm=$(grep "charm_build_name" osci.yaml | awk '{print $2}')
echo "renaming ${charm}_*.charm to ${charm}.charm"
echo -n "pwd: "
pwd
ls -al
echo "Removing bad downloaded charm maybe?"
if [[ -e "${charm}.charm" ]];
then
rm "${charm}.charm"
fi
echo "Renaming charm here."
mv ${charm}_*.charm ${charm}.charm

View File

@@ -1,19 +0,0 @@
# This file is managed centrally by release-tools and should not be modified
# within individual charm repos. See the 'global' dir contents for available
# choices of *requirements.txt files for OpenStack Charms:
# https://github.com/openstack-charmers/release-tools
#
# NOTE(lourot): This might look like a duplication of test-requirements.txt but
# some tox targets use only test-requirements.txt whereas charm-build uses only
# requirements.txt
# NOTE: newer versions of cryptography require a Rust compiler to build,
# see
# * https://github.com/openstack-charmers/zaza/issues/421
# * https://mail.python.org/pipermail/cryptography-dev/2021-January/001003.html
#
cryptography<3.4
git+https://github.com/juju/charm-tools.git
simplejson

View File

@@ -1,7 +0,0 @@
# Overview
This charm is developed as part of the OpenStack Charms project, and as such you
should refer to the [OpenStack Charm Development Guide](https://github.com/openstack/charm-guide) for details on how
to contribute to this charm.
You can find its source code here: <https://github.com/openstack/charm-ovn>.

View File

@@ -1,94 +0,0 @@
# Overview
The ovn-central charm provides the Northbound and Southbound OVSDB Databases
and the Open Virtual Network (OVN) central control daemon (`ovn-northd`). It is
used in conjunction with either the [ovn-chassis][ovn-chassis-charm]
subordinate charm or the [ovn-dedicated-chassis][ovn-dedicated-chassis-charm]
principle charm.
> **Note**: The OVN charms are supported starting with OpenStack Train.
# Usage
The [OpenStack Base bundle][openstack-base-bundle] gives an example of how you
can deploy OpenStack and OVN with [Vault][vault-charm] to automate certificate
lifecycle management.
OVN makes use of Public Key Infrastructure (PKI) to authenticate and authorize
control plane communication. The charm therefore requires a Certificate
Authority to be present in the model as represented by the `certificates`
relation.
Refer to [Open Virtual Network (OVN)][cdg-ovn] in the [OpenStack Charms
Deployment Guide][cdg] for details, including deployment steps.
> **Note**: The ovn-central charm requires a minimum of three units to operate.
## Network spaces
This charm supports the use of Juju network spaces.
By binding the `ovsdb`, `ovsdb-cms` and `ovsdb-peer` endpoints you can
influence which interface will be used for communication with consumers of the
Southbound DB, Cloud Management Systems (CMS) and cluster internal
communication.
juju deploy -n 3 --series focal \
--bind "''=oam-space ovsdb=data-space" \
ovn-central
## OVN RBAC and securing the OVN services
The charm enables [RBAC][ovn-rbac] in the OVN Southbound database by default.
The RBAC feature enforces authorization of individual chassis connecting to the
database, and also restricts database operations.
In the event of an individual chassis being compromised, RBAC will make it more
difficult to leverage database access for compromising other parts of the
network.
> **Note**: Due to how RBAC is implemented in [ovsdb-server][ovsdb-server]
the charm opens up a separate listener at port 16642 for connections from
[ovn-northd][ovn-northd].
The charm automatically enables the firewall and will allow traffic from its
cluster peers to port 6641, 6643, 6644 and 16642. CMS clients will be allowed
to talk to port 6641.
Anyone will be allowed to connect to port 6642.
## Deferred service events
Operational or maintenance procedures applied to a cloud often lead to the
restarting of various OpenStack services and/or the calling of certain charm
hooks. Although normal, such events can be undesirable due to the service
interruptions they can cause.
The deferred service events feature provides the operator the choice of
preventing these service restarts and hook calls from occurring, which can then
be resolved at a more opportune time.
See the [Deferred service events][cdg-deferred-service-events] page in the
[OpenStack Charms Deployment Guide][cdg] for an in-depth treatment of this
feature.
# Bugs
Please report bugs on [Launchpad][lp-ovn-central].
For general questions please refer to the [OpenStack Charm Guide][cg].
<!-- LINKS -->
[cg]: https://docs.openstack.org/charm-guide/latest/
[cdg]: https://docs.openstack.org/project-deploy-guide/charm-deployment-guide/latest/
[cdg-ovn]: https://docs.openstack.org/project-deploy-guide/charm-deployment-guide/latest/app-ovn.html
[ovn-rbac]: https://github.com/ovn-org/ovn/blob/master/Documentation/topics/role-based-access-control.rst
[ovsdb-server]: https://github.com/openvswitch/ovs/blob/master/Documentation/ref/ovsdb-server.7.rst#413-transact
[ovn-northd]: https://manpages.ubuntu.com/manpages/eoan/en/man8/ovn-northd.8.html
[lp-ovn-central]: https://bugs.launchpad.net/charm-ovn-central/+filebug
[openstack-base-bundle]: https://github.com/openstack-charmers/openstack-bundles/blob/master/development/openstack-base-bionic-ussuri-ovn/bundle.yaml
[vault-charm]: https://jaas.ai/vault
[ovn-chassis-charm]: https://jaas.ai/ovn-chassis
[ovn-dedicated-chassis-charm]: https://jaas.ai/ovn-dedicated-chassis
[cdg-deferred-service-events]: https://docs.openstack.org/project-deploy-guide/charm-deployment-guide/latest/deferred-events.html

View File

@@ -1,67 +0,0 @@
restart-services:
description: |
Restarts services this charm manages.
params:
deferred-only:
type: boolean
default: false
description: |
Restart all deferred services.
services:
type: string
default: ""
description: |
List of services to restart.
run-hooks:
type: boolean
default: true
description: |
Run any hooks which have been deferred.
run-deferred-hooks:
description: |
Run deferable hooks and restart services.
.
NOTE: Service will be restarted as needed irrespective of enable-auto-restarts
show-deferred-events:
descrpition: |
Show the outstanding restarts
cluster-status:
description: |
Show status of an OVN cluster. Action result will contain two keys,
"ovnsb" and "ovnnb", each of these keys will contain yaml structure with data
from "ovn-appctl cluster/status" command representing status of Southbound and
Northbound clusters. Additional "unit_map" key is included in each cluster status
that pairs server IDs of cluster members with unit IDs on which these servers run.
In case the action finds servers in cluster that are not associated with any known
unit, the "unit_map" will also include key "UNKNOWN" with list of these
disassociated servers.
cluster-kick:
description: |
Request removal of a server from the cluster. This action is equivalent to running
"ovn-appctl cluster/kick" command and can be run on any unit connected to the
cluster. This action takes ID of a server in southbound or northbound cluster
(or both) as an argument. At least one of these arguments must be specified. To get
the list of servers (and their IDs) connected to the cluster, user can run
"cluster-status" action.
params:
sb-server-id:
type:
- string
- number
default: ""
description: |
ID of a server to kick from Southbound cluster
nb-server-id:
type:
- string
- number
default: ""
description: |
ID of a server to kick from Northbound cluster
i-really-mean-it:
type: boolean
description: |
Confirmation by user to really perform this destructive action
required:
- i-really-mean-it

View File

@@ -1 +0,0 @@
cluster.py

View File

@@ -1 +0,0 @@
cluster.py

View File

@@ -1,249 +0,0 @@
#!/usr/bin/env python3
# Copyright 2022 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import yaml
import subprocess
# Load modules from $CHARM_DIR/lib
sys.path.append("lib")
from charms.layer import basic
basic.bootstrap_charm_deps()
import charms_openstack.bus
import charms_openstack.charm
import charms.reactive as reactive
import charmhelpers.core as ch_core
import charmhelpers.contrib.network.ovs.ovn as ch_ovn
import charmhelpers.contrib.network.ip as ch_ip
charms_openstack.bus.discover()
class StatusParsingException(Exception):
"""Exception when OVN cluster status has unexpected format/values."""
def _url_to_ip(cluster_url):
"""Parse IP from cluster URL.
OVN cluster uses urls like "ssl:10.0.0.1:6644". This function parses the
IP portion out of the url. This function works with IPv4 and IPv6
addresses.
:raises StatusParsingException: If cluster_url does not contain valid IP
address.
:param cluster_url: OVN server url. Like "ssl:10.0.0.1".
:type cluster_url: str
:return: Parsed out IP address
:rtype: str
"""
ip_portion = cluster_url.split(":")[1:-1]
if len(ip_portion) > 1:
# Possible IPv6 address
ip_str = ":".join(ip_portion)
else:
# Likely a IPv4 address
ip_str = "".join(ip_portion)
if not ch_ip.is_ip(ip_str):
raise StatusParsingException(
"Failed to parse OVN cluster status. Cluster member address "
"has unexpected format: {}".format(cluster_url)
)
return ip_str
def _format_cluster_status(raw_cluster_status, cluster_ip_map):
"""Reformat cluster status into dict.
Resulting dictionary also includes mapping between cluster servers and
juju units.
Parameter cluster_ip_map is a dictionary with juju unit IDs as a key and
their respective IP addresses as a value. Example:
{"ovn-central/0": "10.0.0.1", "ovn-central/1: "10.0.0.2"}
:raises StatusParsingException: In case the parsing of a cluster status
fails.
:param raw_cluster_status: Cluster status object
:type raw_cluster_status: ch_ovn.OVNClusterStatus
:param cluster_ip_map: mapping between juju units and their IPs in the
cluster.
:type cluster_ip_map: dict
:return: Cluster status in the form of dictionary
:rtype: dict
"""
mapped_servers = {}
unknown_servers = []
# Map unit name to each server in the Servers field.
for server_id, server_url in raw_cluster_status.servers:
member_address = _url_to_ip(server_url)
for unit, ip in cluster_ip_map.items():
if member_address == ip:
mapped_servers[unit] = server_id
break
else:
unknown_servers.append(server_id)
cluster = raw_cluster_status.to_yaml()
if unknown_servers:
mapped_servers["UNKNOWN"] = unknown_servers
cluster["unit_map"] = mapped_servers
return cluster
def _cluster_ip_map():
"""Produce mapping between units and their IPs.
This function selects an IP bound to the ovsdb-peer endpoint.
Example output: {"ovn-central/0": "10.0.0.1", ...}
"""
# Existence of ovsdb-peer relation is guaranteed by check in the main func
ovsdb_peers = reactive.endpoint_from_flag("ovsdb-peer.available")
local_unit_id = ch_core.hookenv.local_unit()
local_ip = ovsdb_peers.cluster_local_addr
unit_map = {local_unit_id: local_ip}
for relation in ovsdb_peers.relations:
for unit in relation.units:
try:
address = unit.received.get("bound-address", "")
unit_map[unit.unit_name] = address
except ValueError:
pass
return unit_map
def _kick_server(cluster, server_id):
"""Perform ovn-appctl cluster/kick to remove server from selected cluster.
:raises:
subprocess.CalledProcessError: If subprocess command execution fails.
ValueError: If cluster parameter doesn't have an expected value.
:param cluster: Cluster from which the server should be kicked. Available
options are "northbound" or "southbound"
:type cluster: str
:param server_id: short ID of a server to be kicked
:type server_id: str
:return: None
"""
if cluster.lower() == "southbound":
params = ("ovnsb_db", ("cluster/kick", "OVN_Southbound", server_id))
elif cluster.lower() == "northbound":
params = ("ovnnb_db", ("cluster/kick", "OVN_Northbound", server_id))
else:
raise ValueError(
"Unexpected value of 'cluster' parameter: '{}'".format(cluster)
)
ch_ovn.ovn_appctl(*params)
def cluster_status():
"""Implementation of a "cluster-status" action."""
with charms_openstack.charm.provide_charm_instance() as charm_instance:
sb_status = charm_instance.cluster_status("ovnsb_db")
nb_status = charm_instance.cluster_status("ovnnb_db")
try:
unit_ip_map = _cluster_ip_map()
sb_cluster = _format_cluster_status(sb_status, unit_ip_map)
nb_cluster = _format_cluster_status(nb_status, unit_ip_map)
except StatusParsingException as exc:
ch_core.hookenv.action_fail(str(exc))
return
ch_core.hookenv.action_set(
{"ovnsb": yaml.safe_dump(sb_cluster, sort_keys=False)}
)
ch_core.hookenv.action_set(
{"ovnnb": yaml.safe_dump(nb_cluster, sort_keys=False)}
)
def cluster_kick():
"""Implementation of a "cluster-kick" action."""
sb_server_id = str(ch_core.hookenv.action_get("sb-server-id"))
nb_server_id = str(ch_core.hookenv.action_get("nb-server-id"))
if not (sb_server_id or nb_server_id):
ch_core.hookenv.action_fail(
"At least one server ID to kick must be specified."
)
return
if sb_server_id:
try:
_kick_server("southbound", sb_server_id)
ch_core.hookenv.action_set(
{"ovnsb": "requested kick of {}".format(sb_server_id)}
)
except subprocess.CalledProcessError as exc:
ch_core.hookenv.action_fail(
"Failed to kick Southbound cluster member "
"{}: {}".format(sb_server_id, exc.output)
)
if nb_server_id:
try:
_kick_server("northbound", nb_server_id)
ch_core.hookenv.action_set(
{"ovnnb": "requested kick of {}".format(nb_server_id)}
)
except subprocess.CalledProcessError as exc:
ch_core.hookenv.action_fail(
"Failed to kick Northbound cluster member "
"{}: {}".format(nb_server_id, exc.output)
)
ACTIONS = {"cluster-status": cluster_status, "cluster-kick": cluster_kick}
def main(args):
ch_core.hookenv._run_atstart()
# Abort action if this unit is not in a cluster.
if reactive.endpoint_from_flag("ovsdb-peer.available") is None:
ch_core.hookenv.action_fail("Unit is not part of an OVN cluster.")
return
action_name = os.path.basename(args[0])
try:
action = ACTIONS[action_name]
except KeyError:
return "Action %s undefined" % action_name
else:
try:
action()
except Exception as e:
ch_core.hookenv.action_fail(str(e))
ch_core.hookenv._run_atexit()
if __name__ == "__main__":
sys.exit(main(sys.argv))

View File

@@ -1,131 +0,0 @@
#!/usr/bin/env python3
# Copyright 2021 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
# Load modules from $CHARM_DIR/lib
sys.path.append('lib')
from charms.layer import basic
basic.bootstrap_charm_deps()
import charmhelpers.contrib.openstack.deferred_events as deferred_events
import charmhelpers.contrib.openstack.utils as os_utils
import charmhelpers.core.hookenv as hookenv
import charms_openstack.bus
import charms_openstack.charm
import charms.reactive as reactive
charms_openstack.bus.discover()
def handle_package_updates():
"""Ensure DB services are restarted PKG update."""
_svcs = ['ovn-central', 'ovn-northd']
with charms_openstack.charm.provide_charm_instance() as charm_instance:
for _svc in ['ovn-ovsdb-server-nb', 'ovn-ovsdb-server-sb']:
if _svc in charm_instance.services:
_svcs.append(_svc)
for event in deferred_events.get_deferred_events():
if (event.reason == 'Package update' and
event.service.startswith('ovn-central')):
os_utils.restart_services_action(services=_svcs)
def restart_services(args):
"""Restart services.
:param args: Unused
:type args: List[str]
"""
deferred_only = hookenv.action_get("deferred-only")
services = hookenv.action_get("services").split()
# Check input
if deferred_only and services:
hookenv.action_fail("Cannot set deferred-only and services")
return
if not (deferred_only or services):
hookenv.action_fail("Please specify deferred-only or services")
return
if deferred_only:
handle_package_updates()
os_utils.restart_services_action(deferred_only=True)
else:
os_utils.restart_services_action(services=services)
with charms_openstack.charm.provide_charm_instance() as charm_instance:
charm_instance._assess_status()
def show_deferred_events(args):
"""Show the deferred events.
:param args: Unused
:type args: List[str]
"""
os_utils.show_deferred_events_action_helper()
def run_deferred_hooks(args):
"""Run deferred hooks.
:param args: Unused
:type args: List[str]
"""
deferred_methods = deferred_events.get_deferred_hooks()
ovsdb = reactive.endpoint_from_flag('ovsdb.available')
with charms_openstack.charm.provide_charm_instance() as charm_instance:
if ('install' in deferred_methods or
'configure_ovs' in deferred_methods):
charm_instance.install(check_deferred_events=False)
if 'configure_ovs' in deferred_methods:
charm_instance.render_with_interfaces(
charms_openstack.charm.optional_interfaces(
(ovsdb,),
'nova-compute.connected',
'amqp.connected'))
charm_instance.configure_ovs(
','.join(ovsdb.db_sb_connection_strs),
reactive.is_flag_set('config.changed.disable-mlockall'),
check_deferred_events=False)
charm_instance._assess_status()
# Actions to function mapping, to allow for illegal python action names that
# can map to a python function.
ACTIONS = {
"restart-services": restart_services,
"show-deferred-events": show_deferred_events,
"run-deferred-hooks": run_deferred_hooks
}
def main(args):
hookenv._run_atstart()
action_name = os.path.basename(args[0])
try:
action = ACTIONS[action_name]
except KeyError:
return "Action %s undefined" % action_name
else:
try:
action(args)
except Exception as e:
hookenv.action_fail(str(e))
hookenv._run_atexit()
if __name__ == "__main__":
sys.exit(main(sys.argv))

View File

@@ -1,26 +0,0 @@
#!/usr/bin/env python3
# Copyright 2021 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
sys.path.append('actions')
import os_deferred_event_actions
if __name__ == "__main__":
sys.exit(os_deferred_event_actions.main(sys.argv))

View File

@@ -1,26 +0,0 @@
#!/usr/bin/env python3
# Copyright 2021 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
sys.path.append('actions')
import os_deferred_event_actions
if __name__ == "__main__":
sys.exit(os_deferred_event_actions.main(sys.argv))

View File

@@ -1,26 +0,0 @@
#!/usr/bin/env python3
# Copyright 2021 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
sys.path.append('actions')
import os_deferred_event_actions
if __name__ == "__main__":
sys.exit(os_deferred_event_actions.main(sys.argv))

View File

@@ -1,99 +0,0 @@
options:
source:
default: caracal
type: string
description: |
Repository from which to install packages.
May be one of the following:
distro (default)
ppa:somecustom/ppa (PPA name must include UCA OpenStack Release name)
deb url sources entry|key id
or a supported Ubuntu Cloud Archive pocket.
Supported Ubuntu Cloud Archive pockets include:
cloud:xenial-pike
cloud:xenial-queens
cloud:bionic-rocky
Note that updating this setting to a source that is known to
provide a later version of Ceph will trigger a software
upgrade.
ovn-source:
default: ''
type: string
description: |
Overlay repository from which to install OVS+OVN.
The default for this configuration option is determined at charm
runtime.
When charm is deployed into a fresh environment on Ubuntu
20.04 (Focal Fossa), the default will be 'cloud:focal-ovn-22.03'.
When charm is upgraded or deployed into a fresh environment
on a different series the default will be to not use the
overlay repository.
To disable the overlay repository, set this option to 'distro'.
Note that updating this setting to a source that is known to
provide a later version of OVN will trigger a software
upgrade.
ovsdb-server-election-timer:
default: 4
type: int
description: |
Raft leader election timeout in seconds. The charm allows a value between
1 and 60 seconds.
.
The Open vSwitch ovsdb-server default of 1 second may not be sufficient
for a loaded cluster where the database server may be too busy serving
requests to respond to elections in time.
.
Using a higher value will increase the time to discover a real failure,
but you must weigh that against the risk of spurious leader flapping and
the unwanted churn that entails.
.
NOTE: The ovsdb-server will refuse to decrease or increase the value of
this timer more than 2x the current value. The charm will compensate for
this and decrease / increase the timer in increments, but care should be
taken to not decrease / increase the value too much in one operation.
ovsdb-server-inactivity-probe:
default: 60
type: int
description: |
Maximum number of seconds of idle time on connection to client before
sending an inactivity probe message.
The Open vSwitch ovsdb-server default of 5 seconds may not be sufficient
depending on type and load of the CMS you want to connect to OVN.
nagios_context:
default: "juju"
type: string
description: |
A string that will be prepended to instance name to set the host name
in nagios. So for instance the hostname would be something like:
juju-myservice-0
If you're running multiple environments with the same services in them
this allows you to differentiate between them.
nagios_servicegroups:
default: ""
type: string
description: |
Comma separated list of nagios servicegroups for the service checks.
enable-auto-restarts:
type: boolean
default: True
description: |
Allow the charm and packages to restart services automatically when
required.
ovn-exporter-channel:
type: string
default: stable
description: >-
The snap channel to install the prometheus-ovn-exporter from. Setting
this option to an empty string will result in the snap not being
installed or removed if it has already been installed.

View File

@@ -1,6 +0,0 @@
Format: http://dep.debian.net/deps/dep5/
Files: *
Copyright: Copyright 2018, Canonical Ltd
License: Apache-2.0

View File

View File

@@ -1,44 +0,0 @@
#!/usr/bin/env python3
# Copyright (C) 2023 Canonical
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import sys
from datetime import datetime, timedelta
NAGIOS_PLUGIN_DATA = '/usr/local/lib/nagios/juju_charm_plugin_data'
if __name__ == "__main__":
output_path = os.path.join(NAGIOS_PLUGIN_DATA, 'ovn_cert_status.json')
if os.path.exists(output_path):
with open(output_path) as fd:
try:
status = json.loads(fd.read())
ts = datetime.strptime(status['last_updated'],
"%Y-%m-%d %H:%M:%S")
if datetime.now() - ts > timedelta(days=1):
print("ovn cert check status is more than 24 hours old "
"(last_updated={})".format(status['last_updated']))
sys.exit(1)
print(status['message'])
sys.exit(status['exit_code'])
except ValueError:
print("invalid check output")
else:
print("no info available")
sys.exit(0)

View File

@@ -1,109 +0,0 @@
#!/usr/bin/env python3
# Copyright (C) 2023 Canonical
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import json
from datetime import datetime
from cryptography.hazmat.backends import default_backend
from cryptography import x509
NAGIOS_PLUGIN_DATA = '/usr/local/lib/nagios/juju_charm_plugin_data'
UNKNOWN = 3
CRITICAL = 2
WARN = 1
SUCCESS = 0
CERT_EXPIRY_CRITICAL_LIMIT = 30
CERT_EXPIRY_WARN_LIMIT = 60
class SSLCertificate(object):
def __init__(self, path):
self.path = path
@property
def cert(self):
with open(self.path, "rb") as fd:
return fd.read()
@property
def expiry_date(self):
cert = x509.load_pem_x509_certificate(self.cert, default_backend())
return cert.not_valid_after
@property
def days_remaining(self):
return int((self.expiry_date - datetime.now()).days)
def check_ovn_certs():
output_path = os.path.join(NAGIOS_PLUGIN_DATA, 'ovn_cert_status.json')
if not os.path.isdir(NAGIOS_PLUGIN_DATA):
os.makedirs(NAGIOS_PLUGIN_DATA)
exit_code = SUCCESS
for cert in ['/etc/ovn/cert_host', '/etc/ovn/ovn-central.crt']:
if not os.path.exists(cert):
message = "cert '{}' does not exist.".format(cert)
exit_code = CRITICAL
break
if not os.access(cert, os.R_OK):
message = "cert '{}' is not readable.".format(cert)
exit_code = CRITICAL
break
try:
remaining_days = SSLCertificate(cert).days_remaining
if remaining_days <= 0:
message = "{}: cert has expired.".format(cert)
exit_code = CRITICAL
break
if remaining_days < CERT_EXPIRY_CRITICAL_LIMIT:
message = ("{}: cert will expire in {} days".
format(cert, remaining_days))
exit_code = CRITICAL
break
if remaining_days < CERT_EXPIRY_WARN_LIMIT:
message = ("{}: cert will expire in {} days".
format(cert, remaining_days))
exit_code = WARN
break
except Exception as exc:
message = "failed to check cert '{}': {}".format(cert, str(exc))
exit_code = UNKNOWN
else:
message = "all certs healthy"
exit_code = SUCCESS
ts = datetime.now()
with open(output_path, 'w') as fd:
fd.write(json.dumps({'message': message,
'exit_code': exit_code,
'last_updated':
"{}-{}-{} {}:{}:{}".format(ts.year, ts.month,
ts.day, ts.hour,
ts.minute,
ts.second)}))
os.chmod(output_path, 644)
if __name__ == "__main__":
check_ovn_certs()

View File

@@ -1,191 +0,0 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!-- Created with Inkscape (http://www.inkscape.org/) -->
<svg
xmlns:dc="http://purl.org/dc/elements/1.1/"
xmlns:cc="http://creativecommons.org/ns#"
xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
xmlns:svg="http://www.w3.org/2000/svg"
xmlns="http://www.w3.org/2000/svg"
xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
width="266.66666"
height="266.66666"
viewBox="0 0 70.555554 70.555555"
version="1.1"
id="svg3931"
inkscape:version="0.92.4 (5da689c313, 2019-01-14)"
sodipodi:docname="icon.svg">
<defs
id="defs3925">
<inkscape:path-effect
is_visible="true"
id="path-effect3028"
effect="spiro" />
<inkscape:path-effect
is_visible="true"
id="path-effect4724"
effect="spiro" />
<inkscape:path-effect
is_visible="true"
id="path-effect4720"
effect="spiro" />
<marker
style="overflow:visible"
id="Arrow1Lend"
refX="0"
refY="0"
orient="auto"
inkscape:stockid="Arrow1Lend">
<path
inkscape:connector-curvature="0"
transform="matrix(-0.8,0,0,-0.8,-10,0)"
style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
id="path3917" />
</marker>
<inkscape:path-effect
is_visible="true"
id="path-effect3908"
effect="spiro" />
<inkscape:path-effect
is_visible="true"
id="path-effect4724-8"
effect="spiro" />
</defs>
<sodipodi:namedview
id="base"
pagecolor="#ffffff"
bordercolor="#666666"
borderopacity="1.0"
inkscape:pageopacity="0.0"
inkscape:pageshadow="2"
inkscape:zoom="1.4"
inkscape:cx="887.07689"
inkscape:cy="18.723436"
inkscape:document-units="mm"
inkscape:current-layer="layer1"
showgrid="false"
units="px"
inkscape:window-width="3770"
inkscape:window-height="2096"
inkscape:window-x="70"
inkscape:window-y="27"
inkscape:window-maximized="1" />
<metadata
id="metadata3928">
<rdf:RDF>
<cc:Work
rdf:about="">
<dc:format>image/svg+xml</dc:format>
<dc:type
rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
<dc:title></dc:title>
</cc:Work>
</rdf:RDF>
</metadata>
<g
inkscape:label="Layer 1"
inkscape:groupmode="layer"
id="layer1"
transform="translate(0,-226.44443)">
<g
id="g4132"
transform="matrix(0.13712291,0,0,0.13712291,6.9978915,233.42295)">
<g
transform="translate(-116.86196,-400.24359)"
style="display:inline"
id="layer1-6"
inkscape:label="Base">
<rect
ry="19.369202"
rx="19.369202"
y="410.75735"
x="129.53154"
height="382.54175"
width="382.54175"
id="rect2987"
style="fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none" />
<rect
ry="17.49905"
rx="23.46236"
y="492.37717"
x="173.8145"
height="219.83182"
width="294.74588"
id="rect2989"
style="fill:#ffffff;fill-opacity:1;fill-rule:nonzero;stroke:none" />
<flowRoot
transform="translate(488.47522,401.2293)"
style="font-style:normal;font-weight:normal;line-height:0.01%;font-family:'Bitstream Vera Sans';letter-spacing:0px;word-spacing:0px;fill:#ffffff;fill-opacity:1;stroke:none"
id="flowRoot3004"
xml:space="preserve"><flowRegion
id="flowRegion3006"><rect
style="fill:#ffffff"
y="314.19586"
x="-229.67079"
height="123.70679"
width="149.3353"
id="rect3008" /></flowRegion><flowPara
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:60px;line-height:125%;font-family:sans-serif;-inkscape-font-specification:Sans;text-align:start;writing-mode:lr-tb;text-anchor:start"
id="flowPara3010">OVN</flowPara></flowRoot> </g>
<g
transform="translate(-116.86196,-400.24359)"
style="display:inline"
inkscape:label="Knobs &amp; Text"
id="layer2">
<circle
transform="translate(-11.541922,-28.674478)"
id="path3808"
style="fill:#ffffff;fill-opacity:1;fill-rule:nonzero;stroke:none"
cx="190.08102"
cy="482.84048"
r="19.116308" />
<circle
transform="translate(44.364264,-28.674478)"
id="path3808-7"
style="fill:#ffffff;fill-opacity:1;fill-rule:nonzero;stroke:none"
cx="190.08102"
cy="482.84048"
r="19.116308" />
<circle
transform="translate(130.72139,-28.674478)"
id="path3808-7-3"
style="fill:#ffffff;fill-opacity:1;fill-rule:nonzero;stroke:none"
cx="190.08102"
cy="482.84048"
r="19.116308" />
<circle
transform="translate(220.55891,-28.674478)"
id="path3808-4"
style="fill:#ffffff;fill-opacity:1;fill-rule:nonzero;stroke:none"
cx="190.08102"
cy="482.84048"
r="19.116308" />
<circle
transform="translate(276.46511,-28.674478)"
id="path3808-7-36"
style="fill:#ffffff;fill-opacity:1;fill-rule:nonzero;stroke:none"
cx="190.08102"
cy="482.84048"
r="19.116308" />
<path
sodipodi:nodetypes="cccccccc"
inkscape:connector-curvature="0"
inkscape:original-d="m 275.17737,567.87881 86.37835,0.73292 v -22.7212 l 41.01417,33.71537 -41.01417,32.98241 v -20.5224 h -86.99979 z"
inkscape:path-effect="#path-effect4724"
id="path4722"
d="m 275.17737,567.87881 86.37835,0.73292 v -22.7212 l 41.01417,33.71537 -41.01417,32.98241 v -20.5224 h -86.99979 z"
style="display:inline;fill:#008000;fill-opacity:1;stroke:none" />
<path
sodipodi:nodetypes="cccccccc"
inkscape:connector-curvature="0"
inkscape:original-d="m 361.10949,622.65312 -86.37833,0.73294 v -22.72122 l -41.01417,33.71537 41.01417,32.98243 v -20.52241 h 86.99976 z"
inkscape:path-effect="#path-effect4724-8"
id="path4722-1"
d="m 361.10949,622.65312 -86.37833,0.73294 v -22.72122 l -41.01417,33.71537 41.01417,32.98243 v -20.52241 h 86.99976 z"
style="display:inline;fill:#008000;fill-opacity:1;stroke:none" />
</g>
</g>
</g>
</svg>

Before

Width:  |  Height:  |  Size: 6.7 KiB

View File

@@ -1,38 +0,0 @@
includes:
- layer:openstack
- layer:leadership
- layer:coordinator
- layer:snap
- interface:nrpe-external-master
- interface:ovsdb
- interface:ovsdb-cluster
- interface:ovsdb-cms
- interface:prometheus-scrape
options:
basic:
use_venv: True
include_system_packages: False
snap:
prometheus-ovn-exporter:
connect:
- ['prometheus-ovn-exporter:network-bind', ':network-bind']
- ['prometheus-ovn-exporter:kernel-module-observe', ':kernel-module-observe']
- ['prometheus-ovn-exporter:netlink-audit', ':netlink-audit']
- ['prometheus-ovn-exporter:log-observe', ':log-observe']
- ['prometheus-ovn-exporter:network-observe', ':network-observe']
- ['prometheus-ovn-exporter:openvswitch', ':openvswitch']
- ['prometheus-ovn-exporter:system-observe', ':system-observe']
# NOTE(dmitriis): uncomment those once the snap upload is approved.
# - ['prometheus-ovn-exporter:etc-openvswitch', ':system-files']
# - ['prometheus-ovn-exporter:run-openvswitch', ':system-files']
# - ['prometheus-ovn-exporter:run-ovn', ':system-files']
repo: https://opendev.org/x/charm-ovn-central
config:
deletes:
- debug
- ssl_ca
- ssl_cert
- ssl_key
- use-internal-endpoints
- use-syslog
- verbose

View File

@@ -1,13 +0,0 @@
# Copyright 2019 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

View File

@@ -1,13 +0,0 @@
# Copyright 2019 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

View File

@@ -1,13 +0,0 @@
# Copyright 2019 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

File diff suppressed because it is too large Load Diff

View File

@@ -1,2 +0,0 @@
config:
linux.kernel_modules: ip_tables,ip6_tables

View File

@@ -1,45 +0,0 @@
name: ovn-central
summary: Open Virtual Network for Open vSwitch
maintainer: OpenStack Charmers <openstack-charmers@lists.ubuntu.com>
description: |
Principal charm that deploys ovn-northd, the OVN central control daemon,
and ovsdb-server, the Open vSwitch Database (OVSDB).
The ovn-northd daemon is responsible for translating the high-level OVN
configuration into logical configuration consumable by daemons such as
ovn-controller.
The ovn-northd process talks to OVN Northbound- and Southbound- databases.
The ovsdb-server exposes endpoints over relations implemented by the ovsdb
interface.
The charm supports clustering of the OVSDB, you must have a odd number of
units for this to work. Note that write performance decreases as you
increase the number of units.
Running multiple ovn-northd daemons is supported and they will operate in
active/passive mode. The daemon uses a locking feature in the OVSDB to
automatically choose a single active instance.
docs: https://discourse.charmhub.io/t/ovn-central-docs-index/10550
tags:
- networking
series:
- jammy
- mantic
subordinate: false
provides:
ovsdb:
interface: ovsdb
ovsdb-cms:
interface: ovsdb-cms
ovsdb-server:
interface: ovsdb-cluster
nrpe-external-master:
interface: nrpe-external-master
scope: container
metrics-endpoint:
interface: prometheus_scrape
peers:
ovsdb-peer:
interface: ovsdb-cluster

View File

@@ -1,13 +0,0 @@
# Copyright 2019 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

View File

@@ -1,378 +0,0 @@
# Copyright 2019 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import charms.reactive as reactive
import charms.leadership as leadership
import charms.coordinator as coordinator
import charms_openstack.bus
import charms_openstack.charm as charm
from charmhelpers.core import hookenv
charms_openstack.bus.discover()
# Use the charms.openstack defaults for common states and hooks
charm.use_defaults(
'config.changed',
'update-status',
'upgrade-charm',
)
@reactive.when_none('charm.installed', 'leadership.set.install_stamp')
@reactive.when('leadership.is_leader')
def stamp_fresh_deployment():
"""Stamp the deployment with leader setting, fresh deployment.
This is used to determine whether this application is a fresh or upgraded
deployment which influence the default of the `ovn-source` configuration
option.
"""
leadership.leader_set(install_stamp=2203)
@reactive.when_none('is-update-status-hook',
'leadership.set.install_stamp',
'leadership.set.upgrade_stamp')
@reactive.when('charm.installed',
'leadership.is_leader')
def stamp_upgraded_deployment():
"""Stamp the deployment with leader setting, upgrade.
This is needed so that the units of this application can safely enable
the default install hook.
"""
leadership.leader_set(upgrade_stamp=2203)
@reactive.when_none('charm.installed', 'is-update-status-hook')
@reactive.when_any('leadership.set.install_stamp',
'leadership.set.upgrade_stamp')
def enable_install():
"""Enable the default install hook."""
charm.use_defaults('charm.installed')
# These flags will be set on initial install. We use these flags to ensure
# not performing certain actions during coordinated payload upgrades, but
# we don't want these provisions to interfere with initial clustering.
reactive.clear_flag('config.changed.source')
reactive.clear_flag('config.changed.ovn-source')
@reactive.when_none('is-update-status-hook', 'charm.firewall_initialized')
def initialize_firewall():
"""Do one-time initialization of firewall."""
with charm.provide_charm_instance() as ovn_charm:
ovn_charm.initialize_firewall()
reactive.set_flag('charm.firewall_initialized')
@reactive.when_none('is-update-status-hook',
'leadership.set.nb_cid',
'leadership.set.sb_cid',
'coordinator.granted.upgrade',
'coordinator.requested.upgrade',
'config.changed.source',
'config.changed.ovn-source')
@reactive.when('config.rendered',
'certificates.connected',
'certificates.available',
'leadership.is_leader',
'ovsdb-peer.connected',)
def announce_leader_ready():
"""Announce leader is ready.
At this point ovn-ctl has taken care of initialization of OVSDB databases
and OVSDB servers for the Northbound- and Southbound- databases are
running.
Signal to our peers that they should render configurations and start their
database processes.
"""
# although this is done in the interface, explicitly do it in the same
# breath as updating the leader settings as our peers will immediately
# look for it
ovsdb_peer = reactive.endpoint_from_flag('ovsdb-peer.connected')
ovsdb_peer.publish_cluster_local_addr()
ovsdb = reactive.endpoint_from_name('ovsdb')
with charm.provide_charm_instance() as ovn_charm:
# Create and configure listeners
ovn_charm.configure_ovn(
ovsdb_peer.db_nb_port,
ovsdb.db_sb_port,
ovsdb_peer.db_sb_admin_port)
nb_status = ovn_charm.cluster_status('ovnnb_db')
sb_status = ovn_charm.cluster_status('ovnsb_db')
leadership.leader_set({
'ready': True,
'nb_cid': str(nb_status.cluster_id),
'sb_cid': str(sb_status.cluster_id),
})
@reactive.when_none('is-update-status-hook',
'leadership.set.nb_cid',
'leadership.set.sb_cid',
'coordinator.granted.upgrade',
'coordinator.requested.upgrade')
@reactive.when('charm.installed', 'leadership.is_leader',
'ovsdb-peer.connected')
def initialize_ovsdbs():
ovsdb_peer = reactive.endpoint_from_flag('ovsdb-peer.connected')
with charm.provide_charm_instance() as ovn_charm:
# On the leader the ``/etc/default/ovn-central`` file is rendered
# without configuration for the cluste remote address. This leads
# ``ovn-ctl`` on the path to initializing a new cluster if the
# database file does not already exist.
ovn_charm.render_with_interfaces([ovsdb_peer])
if ovn_charm.enable_services():
# belated enablement of default certificates handler due to the
# ``ovsdb-server`` processes must have finished database
# initialization and be running prior to configuring TLS
charm.use_defaults('certificates.available')
reactive.set_flag('config.rendered')
ovn_charm.assess_status()
@reactive.when_none('is-update-status-hook', 'leadership.is_leader')
@reactive.when('charm.installed')
def enable_default_certificates():
# belated enablement of default certificates handler due to the
# ``ovsdb-server`` processes must have finished database
# initialization and be running prior to configuring TLS
charm.use_defaults('certificates.available')
@reactive.when_none('is-update-status-hook', 'endpoint.ovsdb-peer.departed')
@reactive.when('ovsdb-peer.available')
def configure_firewall():
ovsdb_peer = reactive.endpoint_from_flag('ovsdb-peer.available')
ovsdb_cms = reactive.endpoint_from_flag('ovsdb-cms.connected')
with charm.provide_charm_instance() as ovn_charm:
ovn_charm.configure_firewall({
(ovsdb_peer.db_nb_port,
ovsdb_peer.db_sb_admin_port,
ovsdb_peer.db_sb_cluster_port,
ovsdb_peer.db_nb_cluster_port,):
ovsdb_peer.cluster_remote_addrs,
# NOTE(fnordahl): Tactical workaround for LP: #1864640
(ovsdb_peer.db_nb_port,
ovsdb_peer.db_sb_admin_port,):
ovsdb_cms.client_remote_addrs if ovsdb_cms else None,
})
ovn_charm.assess_status()
@reactive.when_none('is-update-status-hook')
@reactive.when('ovsdb-peer.available',
'leadership.set.nb_cid',
'leadership.set.sb_cid',
'certificates.connected',
'certificates.available')
def publish_addr_to_clients():
ovsdb_peer = reactive.endpoint_from_flag('ovsdb-peer.available')
for ep in [reactive.endpoint_from_flag('ovsdb.connected'),
reactive.endpoint_from_flag('ovsdb-cms.connected')]:
if not ep:
continue
ep.publish_cluster_local_addr(ovsdb_peer.cluster_local_addr)
@reactive.when_none('is-update-status-hook')
@reactive.when('ovsdb-peer.available')
@reactive.when_any('config.changed.source', 'config.changed.ovn-source')
def maybe_request_upgrade():
# The ovn-ctl script in the ovn-common package does schema upgrade based
# on non-presence of a value to `--db-nb-cluster-remote-addr` in
# /etc/default/ovn-central. This is the case for the charm leader.
#
# The charm leader will perform DB schema upgrade as part of the package
# upgrade, and in order to succeed with that we must ensure the other
# units does not perform the package upgrade simultaneously.
#
# The coordinator library is based on leader storage and the leader will
# always be the first one to get the lock.
coordinator.acquire('upgrade')
@reactive.when_none('is-update-status-hook')
@reactive.when('ovsdb-peer.available', 'coordinator.granted.upgrade')
def maybe_do_upgrade():
ovsdb_peer = reactive.endpoint_from_flag('ovsdb-peer.available')
with charm.provide_charm_instance() as ovn_charm:
ovn_charm.upgrade_if_available([ovsdb_peer])
ovn_charm.assess_status()
@reactive.when_none('is-update-status-hook',
'coordinator.granted.upgrade',
'coordinator.requested.upgrade',
'config.changed.source',
'config.changed.ovn-source',
'endpoint.ovsdb-peer.departed')
@reactive.when('ovsdb-peer.available',
'leadership.set.nb_cid',
'leadership.set.sb_cid',
'certificates.connected',
'certificates.available')
def render():
ovsdb = reactive.endpoint_from_name('ovsdb')
ovsdb_peer = reactive.endpoint_from_flag('ovsdb-peer.available')
with charm.provide_charm_instance() as ovn_charm:
ovn_charm.render_with_interfaces([ovsdb_peer])
# NOTE: The upstream ctl scripts currently do not support passing
# multiple connection strings to the ``ovsdb-tool join-cluster``
# command.
#
# This makes it harder to bootstrap a cluster in the event
# one of the units are not available. Thus the charm performs the
# ``join-cluster`` command expliclty before handing off to the
# upstream scripts.
#
# Replace this with functionality in ``ovn-ctl`` when support has been
# added upstream.
ovn_charm.join_cluster('ovnnb_db.db', 'OVN_Northbound',
ovsdb_peer.db_connection_strs(
(ovsdb_peer.cluster_local_addr,),
ovsdb_peer.db_nb_cluster_port),
ovsdb_peer.db_connection_strs(
ovsdb_peer.cluster_remote_addrs,
ovsdb_peer.db_nb_cluster_port))
ovn_charm.join_cluster('ovnsb_db.db', 'OVN_Southbound',
ovsdb_peer.db_connection_strs(
(ovsdb_peer.cluster_local_addr,),
ovsdb_peer.db_sb_cluster_port),
ovsdb_peer.db_connection_strs(
ovsdb_peer.cluster_remote_addrs,
ovsdb_peer.db_sb_cluster_port))
if ovn_charm.enable_services():
# Handle any post deploy configuration changes impacting listeners
ovn_charm.configure_ovn(
ovsdb_peer.db_nb_port,
ovsdb.db_sb_port,
ovsdb_peer.db_sb_admin_port)
reactive.set_flag('config.rendered')
ovn_charm.assess_status()
@reactive.when_none('charm.paused', 'is-update-status-hook')
@reactive.when('config.rendered')
@reactive.when_any('config.changed.nagios_context',
'config.changed.nagios_servicegroups',
'endpoint.nrpe-external-master.changed',
'nrpe-external-master.available')
def configure_nrpe():
"""Handle config-changed for NRPE options."""
with charm.provide_charm_instance() as charm_instance:
charm_instance.render_nrpe()
@reactive.when_not('is-update-status-hook')
def configure_deferred_restarts():
with charm.provide_charm_instance() as instance:
instance.configure_deferred_restarts()
@reactive.when_none('is-update-status-hook')
@reactive.when_any('config.changed.ovn-exporter-channel',
'snap.installed.prometheus-ovn-exporter')
def reassess_exporter():
with charm.provide_charm_instance() as instance:
instance.assess_exporter()
@reactive.when_none('is-update-status-hook')
@reactive.when('charm.installed',
'metrics-endpoint.available',
'snap.installed.prometheus-ovn-exporter')
def handle_metrics_endpoint():
metrics_endpoint = reactive.endpoint_from_flag(
'metrics-endpoint.available')
job_name = 'ovn-exporter'
metrics_endpoint.expose_job(
job_name,
static_configs=[{"targets": ["*:9476"]}])
@reactive.when_none('is-update-status-hook')
@reactive.when('charm.installed', 'metrics-endpoint.available')
@reactive.when_not('snap.installed.prometheus-ovn-exporter')
def maybe_clear_metrics_endpoint():
"""Clear the metrics endpoint state if the exporter isn't installed.
An operator may choose not to install the ovs exporter which needs
to be reflected if a relation to prometheus is present to avoid
scrape errors.
"""
metrics_endpoint = reactive.endpoint_from_flag(
'metrics-endpoint.available')
job_name = 'ovn-exporter'
if not reactive.is_flag_set(f'metrics-endpoint.exposed.{job_name}'):
return
metrics_endpoint.clear_job(job_name)
@reactive.when('endpoint.ovsdb-peer.departed')
def handle_cluster_downscale():
"""Handle OVN cluster's downscaling when unit is removed.
There are two branches of code in this function. If it's executed on a
unit that is being removed, It should trigger "cluster/leave" message.
If, on the other hand, this code is executed on a unit that's remaining,
it should wait before the departing unit can send out the "cluster/leave"
command before reconfiguring firewall and closing off ports.
"""
if reactive.is_flag_set("ovsdb-peer.left_cluster"):
# Departing unit already left cluster
hookenv.log("Servers already left the cluster.", hookenv.INFO)
return
departing_unit = hookenv.departing_unit()
is_departing_unit = hookenv.local_unit() == departing_unit
if is_departing_unit:
# Departing unit must attempt to gracefully leave OVN cluster.
with charm.provide_charm_instance() as ovn:
ovn.leave_cluster()
reactive.set_flag("ovsdb-peer.left_cluster")
else:
# unit that remains in cluster should wait for departing unit to
# gracefully leave cluster before reconfiguring firewall
peers = reactive.endpoint_from_name("ovsdb-peer")
remote_unit_ip = peers.all_departed_units[
departing_unit
].received["bound-address"]
with charm.provide_charm_instance() as ovn:
departed = ovn.wait_for_server_leave(remote_unit_ip)
if departed:
hookenv.log(
"Departing unit {} successfully disconnected from "
"cluster.".format(departing_unit),
hookenv.INFO
)
else:
hookenv.log(
"Departing unit {} failed to remove itself from cluster. "
"Please use action `cluster-kick` to remove straggling "
"servers from OVN cluster.".format(departing_unit),
hookenv.WARNING
)
configure_firewall()

View File

@@ -1,5 +0,0 @@
--ovnnb-db={{ ovsdb_peer.db_nb_connection_strs|join(',') }}
--ovnsb-db={{ ovsdb_peer.db_sb_connection_strs|join(',') }}
-c {{ options.ovn_cert }}
-C {{ options.ovn_ca_cert }}
-p {{ options.ovn_key }}

View File

@@ -1,41 +0,0 @@
# This is a systemd EnvironmentFile as documented in systemd.exec(5)
#
###############################################################################
# [ WARNING ]
# Configuration file maintained by Juju. Local changes may be overwritten.
# Configuration managed by ovn-central charm
###############################################################################
# OVN_CTL_OPTS: Extra options to pass to ovs-ctl. This is, for example,
# a suitable place to specify --ovn-northd-wrapper=valgrind.
# NOTE(fnordahl): Cluster join is directed towards all peers by a direct call
# to `ovsdb-tool join-cluster` by the charm.
#
# That will create a database file on disk with the required information and
# the `ovn-ctl` script will not touch it.
#
# The `ovn-ctl` `db-nb-cluster-remote-addr` and `db-sb-cluster-remote-addr`
# configuration options only take one remote and one must be provided for
# correct startup, but the values in the on-disk database file will be used by
# `ovsdb-server`.
OVN_CTL_OPTS=--db-nb-file=/var/lib/openvswitch/ovnnb_db.db \
--db-nb-cluster-local-addr={{ ovsdb_peer.cluster_local_addr }} \
--db-nb-cluster-local-port={{ ovsdb_peer.db_nb_cluster_port }} \
--db-nb-cluster-local-proto=ssl \
--ovn-nb-db-ssl-key={{ options.ovn_key }} \
--ovn-nb-db-ssl-cert={{ options.ovn_cert }} \
--ovn-nb-db-ssl-ca-cert={{ options.ovn_ca_cert }} \
--db-nb-cluster-remote-addr={{ ovsdb_peer.cluster_remote_addrs | first }} \
--db-nb-cluster-remote-port={{ ovsdb_peer.db_nb_cluster_port }} \
--db-nb-cluster-remote-proto=ssl \
--db-sb-file=/var/lib/openvswitch/ovnsb_db.db \
--db-sb-cluster-local-addr={{ ovsdb_peer.cluster_local_addr }} \
--db-sb-cluster-local-port={{ ovsdb_peer.db_sb_cluster_port }} \
--db-sb-cluster-local-proto=ssl \
--ovn-sb-db-ssl-key={{ options.ovn_key }} \
--ovn-sb-db-ssl-cert={{ options.ovn_cert }} \
--ovn-sb-db-ssl-ca-cert={{ options.ovn_ca_cert }} \
--db-sb-cluster-remote-addr={{ ovsdb_peer.cluster_remote_addrs | first }} \
--db-sb-cluster-remote-port={{ ovsdb_peer.db_sb_cluster_port }} \
--db-sb-cluster-remote-proto=ssl

View File

@@ -1,22 +0,0 @@
###############################################################################
# [ WARNING ]
# Configuration file maintained by Juju. Local changes may be overwritten.
# Configuration managed by ovn-central charm
###############################################################################
[Unit]
Description=Open Virtual Network central components
After=network.target
Requires=network.target
Wants=ovn-northd.service
# Facilitate spread placement of the DBs if someone should choose to do that
Wants=ovn-nb-ovsdb.service
Wants=ovn-sb-ovsdb.service
[Service]
Type=oneshot
ExecStart=/bin/true
ExecStop=/bin/true
RemainAfterExit=yes
[Install]
WantedBy=multi-user.target

View File

@@ -1,20 +0,0 @@
###############################################################################
# [ WARNING ]
# Configuration file maintained by Juju. Local changes may be overwritten.
# Configuration managed by ovn-central charm
###############################################################################
[Unit]
Description=Open vSwitch database server for OVN Northbound database
After=network.target
PartOf=ovn-central.service
DefaultDependencies=no
[Service]
EnvironmentFile=-/etc/default/ovn-central
Type=forking
PIDFile=/var/run/openvswitch/ovnnb_db.pid
ExecStart=/usr/share/openvswitch/scripts/ovn-ctl start_nb_ovsdb $OVN_CTL_OPTS
ExecStop=/usr/share/openvswitch/scripts/ovn-ctl start_nb_ovsdb
Restart=on-failure
LimitNOFILE=65535
TimeoutStopSec=15

View File

@@ -1,20 +0,0 @@
###############################################################################
# [ WARNING ]
# Configuration file maintained by Juju. Local changes may be overwritten.
# Configuration managed by ovn-central charm
###############################################################################
[Unit]
Description=Open Virtual Network central control daemon
After=network.target ovn-nb-ovsdb.service ovn-sb-ovsdb.service
PartOf=ovn-central.service
DefaultDependencies=no
[Service]
EnvironmentFile=-/etc/default/ovn-central
Type=forking
PIDFile=/var/run/openvswitch/ovn-northd.pid
ExecStart=/usr/share/openvswitch/scripts/ovn-ctl start_northd --ovn-manage-ovsdb=no $OVN_CTL_OPTS
ExecStop=/usr/share/openvswitch/scripts/ovn-ctl stop_northd
Restart=on-failure
LimitNOFILE=65535
TimeoutStopSec=15

View File

@@ -1,20 +0,0 @@
###############################################################################
# [ WARNING ]
# Configuration file maintained by Juju. Local changes may be overwritten.
# Configuration managed by ovn-central charm
###############################################################################
[Unit]
Description=Open vSwitch database server for OVN Southbound database
After=network.target
PartOf=ovn-central.service
DefaultDependencies=no
[Service]
EnvironmentFile=-/etc/default/ovn-central
Type=forking
PIDFile=/var/run/openvswitch/ovnsb_db.pid
ExecStart=/usr/share/openvswitch/scripts/ovn-ctl start_sb_ovsdb $OVN_CTL_OPTS
ExecStop=/usr/share/openvswitch/scripts/ovn-ctl start_sb_ovsdb
Restart=on-failure
LimitNOFILE=65535
TimeoutStopSec=15

View File

@@ -1,40 +0,0 @@
# This is a systemd EnvironmentFile as documented in systemd.exec(5)
#
###############################################################################
# [ WARNING ]
# Configuration file maintained by Juju. Local changes may be overwritten.
# Configuration managed by ovn-central charm
###############################################################################
# OVN_CTL_OPTS: Extra options to pass to ovs-ctl. This is, for example,
# a suitable place to specify --ovn-northd-wrapper=valgrind.
# NOTE(fnordahl): Cluster join is directed towards all peers by a direct call
# to `ovsdb-tool join-cluster` by the charm.
#
# That will create a database file on disk with the required information and
# the `ovn-ctl` script will not touch it.
#
# The `ovn-ctl` `db-nb-cluster-remote-addr` and `db-sb-cluster-remote-addr`
# configuration options only take one remote and one must be provided for
# correct startup, but the values in the on-disk database file will be used by
# `ovsdb-server`.
OVN_CTL_OPTS= \
--db-nb-cluster-local-addr={{ ovsdb_peer.cluster_local_addr }} \
--db-nb-cluster-local-port={{ ovsdb_peer.db_nb_cluster_port }} \
--db-nb-cluster-local-proto=ssl \
--ovn-nb-db-ssl-key={{ options.ovn_key }} \
--ovn-nb-db-ssl-cert={{ options.ovn_cert }} \
--ovn-nb-db-ssl-ca-cert={{ options.ovn_ca_cert }} \
--db-nb-cluster-remote-addr={{ ovsdb_peer.cluster_remote_addrs | first if not options.is_charm_leader else '' }} \
--db-nb-cluster-remote-port={{ ovsdb_peer.db_nb_cluster_port }} \
--db-nb-cluster-remote-proto=ssl \
--db-sb-cluster-local-addr={{ ovsdb_peer.cluster_local_addr }} \
--db-sb-cluster-local-port={{ ovsdb_peer.db_sb_cluster_port }} \
--db-sb-cluster-local-proto=ssl \
--ovn-sb-db-ssl-key={{ options.ovn_key }} \
--ovn-sb-db-ssl-cert={{ options.ovn_cert }} \
--ovn-sb-db-ssl-ca-cert={{ options.ovn_ca_cert }} \
--db-sb-cluster-remote-addr={{ ovsdb_peer.cluster_remote_addrs | first if not options.is_charm_leader else '' }} \
--db-sb-cluster-remote-port={{ ovsdb_peer.db_sb_cluster_port }} \
--db-sb-cluster-remote-proto=ssl

View File

@@ -1,9 +0,0 @@
# This file is managed centrally by release-tools and should not be modified
# within individual charm repos. See the 'global' dir contents for available
# choices of *requirements.txt files for OpenStack Charms:
# https://github.com/openstack-charmers/release-tools
#
# Functional Test Requirements (let Zaza's dependencies solve all dependencies here!)
git+https://github.com/openstack-charmers/zaza.git#egg=zaza
git+https://github.com/openstack-charmers/zaza-openstack-tests.git#egg=zaza.openstack

View File

@@ -1,24 +0,0 @@
variables:
openstack-origin: &openstack-origin cloud:jammy-bobcat
local_overlay_enabled: False
series: jammy
applications:
vault:
charm: ch:vault
num_units: 1
channel: latest/edge
ovn-central:
charm: ../../../ovn-central_ubuntu-22.04-amd64.charm
num_units: 3
options:
source: *openstack-origin
relations:
- - 'ovn-central:certificates'
- 'vault:certificates'

View File

@@ -1,24 +0,0 @@
variables:
openstack-origin: &openstack-origin distro
local_overlay_enabled: False
series: mantic
applications:
vault:
charm: ch:vault
num_units: 1
channel: latest/edge
ovn-central:
charm: ../../../ovn-central_ubuntu-23.10-amd64.charm
num_units: 3
options:
source: *openstack-origin
relations:
- - 'ovn-central:certificates'
- 'vault:certificates'

View File

@@ -1,24 +0,0 @@
variables:
openstack-origin: &openstack-origin caracal
local_overlay_enabled: False
series: noble
applications:
vault:
charm: ch:vault
num_units: 1
channel: latest/edge
ovn-central:
charm: ../../../ovn-central_amd64.charm
num_units: 3
options:
source: *openstack-origin
relations:
- - 'ovn-central:certificates'
- 'vault:certificates'

View File

@@ -1,32 +0,0 @@
charm_name: ovn-central
gate_bundles:
- noble-caracal
smoke_bundles:
- noble-caracal
dev_bundles:
- noble-caracal
target_deploy_status:
ovn-central:
workload-status: waiting
workload-status-message-prefix: "'ovsdb-peer' incomplete, 'certificates' awaiting server certificate data"
vault:
workload-status: blocked
workload-status-message-prefix: Vault needs to be initialized
nrpe:
workload-status: blocked
workload-status-message-prefix: "Nagios server not configured or related"
# Note that full end to end tests are performed with OVN in the
# neutron-api-plugin-ovn and octavia charm gates
configure:
- zaza.openstack.charm_tests.vault.setup.auto_initialize_no_validation
tests:
- zaza.openstack.charm_tests.ovn.tests.OVNCentralDeferredRestartTest
- zaza.openstack.charm_tests.ovn.tests.CentralCharmOperationTest
- zaza.openstack.charm_tests.ovn.tests.OVNCentralDownscaleTests

View File

@@ -1,55 +0,0 @@
# Source charm (with zaza): ./src/tox.ini
# This file is managed centrally by release-tools and should not be modified
# within individual charm repos. See the 'global' dir contents for available
# choices of tox.ini for OpenStack Charms:
# https://github.com/openstack-charmers/release-tools
[tox]
envlist = pep8
# NOTE: Avoid build/test env pollution by not enabling sitepackages.
sitepackages = False
# NOTE: Avoid false positives by not skipping missing interpreters.
skip_missing_interpreters = False
[testenv]
# We use tox mainly for virtual environment management for test requirements
# and do not install the charm code as a Python package into that environment.
# Ref: https://tox.wiki/en/latest/config.html#skip_install
skip_install = True
setenv = VIRTUAL_ENV={envdir}
PYTHONHASHSEED=0
allowlist_externals = juju
passenv =
HOME
TERM
CS_*
OS_*
TEST_*
deps = -r{toxinidir}/test-requirements.txt
[testenv:pep8]
basepython = python3
commands = charm-proof
[testenv:func-noop]
basepython = python3
commands =
functest-run-suite --help
[testenv:func]
basepython = python3
commands =
functest-run-suite --keep-model
[testenv:func-smoke]
basepython = python3
commands =
functest-run-suite --keep-model --smoke
[testenv:func-target]
basepython = python3
commands =
functest-run-suite --keep-model --bundle {posargs}
[testenv:venv]
commands = {posargs}

View File

@@ -1,10 +0,0 @@
dnspython
psutil
poetry-core
six
git+https://github.com/wolsen/charms.reactive.git@fix-entry-points#egg=charms.reactive
git+https://github.com/openstack/charms.openstack.git#egg=charms.openstack
git+https://github.com/juju/charm-helpers.git#egg=charmhelpers

View File

@@ -1,37 +0,0 @@
# This file is managed centrally by release-tools and should not be modified
# within individual charm repos. See the 'global' dir contents for available
# choices of *requirements.txt files for OpenStack Charms:
# https://github.com/openstack-charmers/release-tools
#
pyparsing<3.0.0 # aodhclient is pinned in zaza and needs pyparsing < 3.0.0, but cffi also needs it, so pin here.
stestr>=2.2.0
# Dependency of stestr. Workaround for
# https://github.com/mtreinish/stestr/issues/145
cliff<3.0.0
requests>=2.18.4
charms.reactive
mock>=1.2
nose>=1.3.7
coverage>=3.6
git+https://github.com/openstack/charms.openstack.git#egg=charms.openstack
#
# Revisit for removal / mock improvement:
#
# NOTE(lourot): newer versions of cryptography require a Rust compiler to build,
# see
# * https://github.com/openstack-charmers/zaza/issues/421
# * https://mail.python.org/pipermail/cryptography-dev/2021-January/001003.html
#
netifaces # vault
psycopg2-binary # vault
tenacity # vault
pbr==5.6.0 # vault
cryptography<3.4 # vault, keystone-saml-mellon
lxml # keystone-saml-mellon
hvac # vault, barbican-vault
psutil # cinder-lvm

92
tox.ini
View File

@@ -1,9 +1,5 @@
# Source charm: ./tox.ini
# This file is managed centrally by release-tools and should not be modified
# within individual charm repos. See the 'global' dir contents for available
# choices of tox.ini for OpenStack Charms:
# https://github.com/openstack-charmers/release-tools
# Placeholder tox.ini to satisfy CI on final push that
# archives the repository.
[tox]
envlist = pep8,pylint,py3
# NOTE: Avoid build/test env pollution by not enabling sitepackages.
@@ -12,98 +8,28 @@ sitepackages = False
skip_missing_interpreters = False
[testenv]
# We use tox mainly for virtual environment management for test requirements
# and do not install the charm code as a Python package into that environment.
# Ref: https://tox.wiki/en/latest/config.html#skip_install
skip_install = True
setenv = VIRTUAL_ENV={envdir}
PYTHONHASHSEED=0
TERM=linux
CHARM_LAYERS_DIR={toxinidir}/layers
CHARM_INTERFACES_DIR={toxinidir}/interfaces
JUJU_REPOSITORY={toxinidir}/build
passenv =
no_proxy
http_proxy
https_proxy
CHARM_INTERFACES_DIR
CHARM_LAYERS_DIR
JUJU_REPOSITORY
allowlist_externals =
charmcraft
bash
tox
deps =
-r{toxinidir}/requirements.txt
[testenv:build]
basepython = python3
commands =
charmcraft clean
charmcraft -v pack
[testenv:add-build-lock-file]
basepython = python3
commands =
charm-build --log-level DEBUG --write-lock-file -o {toxinidir}/build/builds src {posargs}
allowlist_externals=true
[testenv:py3]
basepython = python3
deps = -r{toxinidir}/test-requirements.txt
commands = stestr run --slowest {posargs}
commands = true
[testenv:py310]
basepython = python3.10
deps = -r{toxinidir}/test-requirements.txt
commands = stestr run --slowest {posargs}
commands = true
[testenv:pep8]
basepython = python3
deps = flake8==7.1.1
git+https://github.com/juju/charm-tools.git
commands = flake8 {posargs} src unit_tests
commands = true
# This is added for manual testing and is not run by the gate.
[testenv:pylint]
deps =
pylint==2.17.4
cryptography
commands = pylint -v --rcfile={toxinidir}/pylintrc \
{toxinidir}/src/files/nagios/nrpe_check_ovn_certs.py \
{toxinidir}/src/files/scripts/check_ovn_certs.py
basepython = python3
commands = true
[testenv:cover]
# Technique based heavily upon
# https://github.com/openstack/nova/blob/master/tox.ini
basepython = python3
deps = -r{toxinidir}/requirements.txt
-r{toxinidir}/test-requirements.txt
setenv =
{[testenv]setenv}
PYTHON=coverage run
commands =
coverage erase
stestr run --slowest {posargs}
coverage combine
coverage html -d cover
coverage xml -o cover/coverage.xml
coverage report
[coverage:run]
branch = True
concurrency = multiprocessing
parallel = True
source =
.
omit =
.tox/*
*/charmhelpers/*
unit_tests/*
commands = true
[testenv:venv]
basepython = python3
commands = {posargs}
[flake8]
# E402 ignore necessary for path append before sys module import in actions
ignore = E402,W503,W504

View File

@@ -1,86 +0,0 @@
# Copyright 2018 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
sys.path.append('src')
sys.path.append('src/lib')
# Mock out charmhelpers so that we can test without it.
import charms_openstack.test_mocks # noqa
charms_openstack.test_mocks.mock_charmhelpers()
import mock
class _fake_decorator(object):
def __init__(self, *args):
pass
def __call__(self, f):
return f
sys.modules['charmhelpers.contrib.network.ovs'] = mock.MagicMock()
sys.modules['charmhelpers.contrib.network.ovs.ovn'] = mock.MagicMock()
sys.modules['charmhelpers.contrib.network.ovs.ovsdb'] = mock.MagicMock()
sys.modules['charmhelpers.contrib.charmsupport.nrpe'] = mock.MagicMock()
sys.modules[
'charmhelpers.contrib.openstack.deferred_events'] = mock.MagicMock()
charms = mock.MagicMock()
sys.modules['charms'] = charms
charms.leadership = mock.MagicMock()
sys.modules['charms.leadership'] = charms.leadership
charms.coordinator = mock.MagicMock()
sys.modules['charms.coordinator'] = charms.coordinator
charms.reactive = mock.MagicMock()
charms.reactive.when = _fake_decorator
charms.reactive.when_all = _fake_decorator
charms.reactive.when_any = _fake_decorator
charms.reactive.when_not = _fake_decorator
charms.reactive.when_none = _fake_decorator
charms.reactive.when_not_all = _fake_decorator
charms.reactive.not_unless = _fake_decorator
charms.reactive.when_file_changed = _fake_decorator
charms.reactive.collect_metrics = _fake_decorator
charms.reactive.meter_status_changed = _fake_decorator
charms.reactive.only_once = _fake_decorator
charms.reactive.hook = _fake_decorator
charms.reactive.bus = mock.MagicMock()
charms.reactive.flags = mock.MagicMock()
charms.reactive.relations = mock.MagicMock()
sys.modules['charms.reactive'] = charms.reactive
sys.modules['charms.reactive.bus'] = charms.reactive.bus
sys.modules['charms.reactive.bus'] = charms.reactive.decorators
sys.modules['charms.reactive.flags'] = charms.reactive.flags
sys.modules['charms.reactive.relations'] = charms.reactive.relations
charms.ovn = mock.MagicMock()
sys.modules['charms.ovn'] = charms.ovn
charms.ovn_charm = mock.MagicMock()
sys.modules['charms.ovn_charm'] = charms.ovn
charms.layer = mock.MagicMock()
sys.modules['charms.layer'] = charms.layer
keystoneauth1 = mock.MagicMock()
sys.modules['keystoneauth1'] = keystoneauth1
netaddr = mock.MagicMock()
sys.modules['netaddr'] = netaddr
neutronclient = mock.MagicMock()
sys.modules['neutronclient'] = neutronclient
sys.modules['neutronclient.v2_0'] = neutronclient.v2_0
neutron_lib = mock.MagicMock()
sys.modules['neutron_lib'] = neutron_lib
sys.modules['neutron_lib.constants'] = neutron_lib.constants
novaclient = mock.MagicMock()
sys.modules['novaclient'] = novaclient

View File

@@ -1,551 +0,0 @@
# Copyright 2022 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from copy import deepcopy
from unittest import TestCase
from unittest.mock import MagicMock, patch, call
import yaml
import actions.cluster as cluster_actions
class ClusterActionTests(TestCase):
UNIT_MAPPING = {
"ovn-central/0": {"id": "aa11", "address": "ssl:10.0.0.1:6644"},
"ovn-central/1": {"id": "bb22", "address": "ssl:10.0.0.2:6644"},
"ovn-central/2": {"id": "cc33", "address": "ssl:10.0.0.3:6644"},
}
@property
def servers(self):
"""Return list of tuples representing servers in cluster.
This property uses data from self.UNIT_MAPPING to produce output
similar to that of OVNClusterStatus.servers attribute.
:rtype: List[Tuple(str, str)]
"""
servers = []
for server in self.UNIT_MAPPING.values():
servers.append((server["id"], server["address"]))
return servers
@property
def unit_ip_map(self):
"""Return mapping between unit names and their IPs.
This property uses data from self.UNIT_MAPPING.
:rtype: Dict[str, str]
"""
unit_map = {}
for unit, data in self.UNIT_MAPPING.items():
unit_map[unit] = data["address"].split(":")[1]
return unit_map
@property
def unit_id_map(self):
"""Return mapping between unit names and their IDs.
This property uses data from self.UNIT_MAPPING.
:rtype: Dict[str, str]
"""
unit_map = {}
for unit, data in self.UNIT_MAPPING.items():
unit_map[unit] = data["id"]
return unit_map
def setUp(self):
"""Setup and clean up frequent mocks."""
super().setUp()
mocks = [
patch.object(cluster_actions.ch_core.hookenv, "action_get"),
patch.object(cluster_actions.ch_core.hookenv, "action_set"),
patch.object(cluster_actions.ch_core.hookenv, "action_fail"),
patch.object(cluster_actions.ch_ovn, "ovn_appctl"),
]
for mock in mocks:
mock.start()
self.addCleanup(mock.stop)
# Mock actions mapped in the cluster.py otherwise they'd refer
# to non-mocked functions.
self.mapped_action_cluster_kick = MagicMock()
self.mapped_action_cluster_status = MagicMock()
cluster_actions.ACTIONS[
"cluster-kick"
] = self.mapped_action_cluster_kick
cluster_actions.ACTIONS[
"cluster-status"
] = self.mapped_action_cluster_status
def test_url_to_ip(self):
"""Test function that parses IPs out of server URLs."""
valid_ipv4 = "10.0.0.1"
valid_ipv6 = "2001:db8:3333:4444:5555:6666:7777:8888"
invalid_addr = "foo"
url = "ssl:{}:6644"
# Parse valid IPv4
ipv4 = cluster_actions._url_to_ip(url.format(valid_ipv4))
self.assertEqual(ipv4, valid_ipv4)
# Parse valid IPv6
ipv6 = cluster_actions._url_to_ip(url.format(valid_ipv6))
self.assertEqual(ipv6, valid_ipv6)
# Parse invalid url
cluster_actions.ch_ip.is_ip.return_value = False
with self.assertRaises(cluster_actions.StatusParsingException):
cluster_actions._url_to_ip(url.format(invalid_addr))
@patch.object(cluster_actions.ch_ovn, 'OVNClusterStatus')
def test_format_cluster_status(self, mock_cluster_status):
"""Test turning OVNClusterStatus into dict.
Resulting dict also contains additional info mapping cluster servers
to the juju units.
"""
sample_data = {"cluster_id": "11aa", "servers": self.servers}
mock_cluster_status.to_yaml.return_value = sample_data
mock_cluster_status.servers = self.servers
cluster_status = cluster_actions._format_cluster_status(
mock_cluster_status, self.unit_ip_map
)
# Compare resulting dict with expected data
expected_status = sample_data.copy()
expected_status["unit_map"] = self.unit_id_map
self.assertEqual(cluster_status, expected_status)
@patch.object(cluster_actions.ch_ovn, 'OVNClusterStatus')
def test_format_cluster_status_missing_server(self, mock_cluster_status):
"""Test turning OVNClusterStatus into dict with a missing server.
This use-case happens when OVN cluster reports server that does not run
on active ovn-central unit. For example, if server ran on unit that was
destroyed and did not leave cluster gracefully. in such case, resulting
status shows "Unit" attribute of this server as "UNKNOWN"
"""
missing_server_id = "ff99"
missing_server_ip = "10.0.0.99"
missing_server_url = "ssl:{}:6644".format(missing_server_ip)
servers = self.servers.copy()
servers.append((missing_server_id, missing_server_url))
sample_data = {"cluster_id": "11aa", "servers": servers}
mock_cluster_status.to_yaml.return_value = sample_data
mock_cluster_status.servers = servers
cluster_status = cluster_actions._format_cluster_status(
mock_cluster_status, self.unit_ip_map
)
# Compare resulting dict with expected data
expected_status = sample_data.copy()
expected_status["unit_map"] = self.unit_id_map
expected_status["unit_map"]["UNKNOWN"] = [missing_server_id]
self.assertEqual(cluster_status, expected_status)
@patch.object(cluster_actions.ch_ovn, 'OVNClusterStatus')
@patch.object(cluster_actions, "_url_to_ip")
def test_format_cluster_parsing_failure(
self,
mock_url_to_ip,
mock_cluster_status
):
"""Test failure to parse status with format_cluster_status()."""
sample_data = {"cluster_id": "11aa", "servers": self.servers}
mock_cluster_status.to_yaml.return_value = sample_data
mock_cluster_status.servers = self.servers
mock_url_to_ip.side_effect = cluster_actions.StatusParsingException
with self.assertRaises(cluster_actions.StatusParsingException):
cluster_actions._format_cluster_status(
mock_cluster_status, self.unit_ip_map
)
@patch.object(cluster_actions.reactive, "endpoint_from_flag")
@patch.object(cluster_actions.ch_core.hookenv, "local_unit")
def test_cluster_ip_map(self, mock_local_unit, mock_endpoint_from_flag):
"""Test generating map of unit IDs and their IPs."""
expected_map = {}
remote_unit_data = deepcopy(self.UNIT_MAPPING)
remote_units = []
local_unit_name = "ovn-central/0"
local_unit_data = remote_unit_data.pop(local_unit_name)
for unit_name, data in remote_unit_data.items():
_, ip, _ = data["address"].split(":")
unit = MagicMock()
unit.unit_name = unit_name
unit.received = {"bound-address": ip}
remote_units.append(unit)
expected_map[unit_name] = ip
_, local_unit_ip, _ = local_unit_data["address"].split(":")
expected_map[local_unit_name] = local_unit_ip
endpoint = MagicMock()
relation = MagicMock()
relation.units = remote_units
endpoint.relations = [relation]
endpoint.cluster_local_addr = local_unit_ip
mock_local_unit.return_value = local_unit_name
mock_endpoint_from_flag.return_value = endpoint
unit_mapping = cluster_actions._cluster_ip_map()
self.assertEqual(unit_mapping, expected_map)
def test_kick_server_success(self):
"""Test successfully kicking server from cluster"""
server_id = "aa11"
expected_sb_call = (
"ovnsb_db",
("cluster/kick", "OVN_Southbound", server_id)
)
expected_nb_call = (
"ovnnb_db",
("cluster/kick", "OVN_Northbound", server_id)
)
# test kick from Southbound cluster
cluster_actions._kick_server("southbound", server_id)
cluster_actions.ch_ovn.ovn_appctl.assert_called_once_with(
*expected_sb_call
)
# Reset mock
cluster_actions.ch_ovn.ovn_appctl.reset_mock()
# test kick from Northbound cluster
cluster_actions._kick_server("northbound", server_id)
cluster_actions.ch_ovn.ovn_appctl.assert_called_once_with(
*expected_nb_call
)
def test_kick_server_unknown_cluster(self):
"""Test failure when kicking server from unknown cluster.
Function _kick_server() expects either "southbound" or "northbound" as
value of 'cluster' parameter. Other values should raise ValueError.
"""
with self.assertRaises(ValueError):
cluster_actions._kick_server("foo", "11aa")
@patch.object(
cluster_actions.charms_openstack.charm, "provide_charm_instance"
)
@patch.object(cluster_actions, "_cluster_ip_map")
@patch.object(cluster_actions, "_format_cluster_status")
def test_cluster_status(
self, format_cluster_mock, cluster_map_mock, provide_instance_mock
):
"""Test cluster-status action implementation."""
sb_raw_status = "Southbound status"
nb_raw_status = "Northbound status"
charm_instance = MagicMock()
charm_instance.cluster_status.side_effect = [
sb_raw_status,
nb_raw_status,
]
provide_instance_mock.return_value = charm_instance
ip_map = {"ovn-central/0": "10.0.0.0"}
cluster_map_mock.return_value = ip_map
sb_cluster_status = {"Southbound": "status"}
nb_cluster_status = {"Northbound": "status"}
format_cluster_mock.side_effect = [
sb_cluster_status,
nb_cluster_status,
]
# Test successfully generating cluster status
cluster_actions.cluster_status()
expected_calls = [
call(
{
"ovnsb": yaml.safe_dump(
sb_cluster_status, sort_keys=False
)
}
),
call(
{
"ovnnb": yaml.dump(
nb_cluster_status, sort_keys=False
)
}
),
]
cluster_actions.ch_core.hookenv.action_set.assert_has_calls(
expected_calls)
cluster_actions.ch_core.hookenv.action_fail.asser_not_called()
# Reset mocks
cluster_actions.ch_core.hookenv.action_set.reset_mock()
# Test failure to generate cluster status
msg = "parsing failed"
format_cluster_mock.side_effect = (
cluster_actions.StatusParsingException(msg)
)
cluster_actions.cluster_status()
cluster_actions.ch_core.hookenv.action_set.assert_not_called()
cluster_actions.ch_core.hookenv.action_fail.assert_called_once_with(
msg
)
@patch.object(cluster_actions, "_kick_server")
def test_cluster_kick_no_server(self, kick_server_mock):
"""Test running cluster-kick action without providing any server ID."""
cluster_actions.ch_core.hookenv.action_get.return_value = ""
err = "At least one server ID to kick must be specified."
cluster_actions.cluster_kick()
cluster_actions.ch_core.hookenv.action_fail.assert_called_once_with(
err
)
cluster_actions.ch_core.hookenv.action_set.assert_not_called()
kick_server_mock.assert_not_called()
@patch.object(cluster_actions, "_kick_server")
def test_cluster_kick_sb_server(self, kick_server_mock):
"""Test kicking single Southbound server from cluster."""
sb_id = "11aa"
nb_id = ""
expected_msg = {"ovnsb": "requested kick of {}".format(sb_id)}
# Test successfully kicking server from Southbound cluster
cluster_actions.ch_core.hookenv.action_get.side_effect = [
sb_id,
nb_id,
]
cluster_actions.cluster_kick()
cluster_actions.ch_core.hookenv.action_fail.assert_not_called()
cluster_actions.ch_core.hookenv.action_set.assert_called_once_with(
expected_msg
)
kick_server_mock.assert_called_once_with("southbound", sb_id)
# Reset mocks
cluster_actions.ch_core.hookenv.action_set.reset_mock()
cluster_actions.ch_core.hookenv.action_fail.reset_mock()
kick_server_mock.reset_mock()
cluster_actions.ch_core.hookenv.action_get.side_effect = [
sb_id,
nb_id,
]
# Test failure to kick server from Southbound cluster
process_output = "Failed to kick server"
exception = cluster_actions.subprocess.CalledProcessError(
-1, "/usr/sbin/ovs-appctl", process_output
)
kick_server_mock.side_effect = exception
err = "Failed to kick Southbound cluster member {}: {}".format(
sb_id, process_output
)
cluster_actions.cluster_kick()
cluster_actions.ch_core.hookenv.action_set.assert_not_called()
cluster_actions.ch_core.hookenv.action_fail.assert_called_once_with(
err
)
kick_server_mock.assert_called_once_with("southbound", sb_id)
@patch.object(cluster_actions, "_kick_server")
def test_cluster_kick_nb_server(self, kick_server_mock):
"""Test kicking single Northbound server from cluster."""
sb_id = ""
nb_id = "22bb"
expected_msg = {"ovnnb": "requested kick of {}".format(nb_id)}
# Test successfully kicking server from Northbound cluster
cluster_actions.ch_core.hookenv.action_get.side_effect = [
sb_id,
nb_id,
]
cluster_actions.cluster_kick()
cluster_actions.ch_core.hookenv.action_fail.assert_not_called()
cluster_actions.ch_core.hookenv.action_set.assert_called_once_with(
expected_msg
)
kick_server_mock.assert_called_once_with("northbound", nb_id)
# Reset mocks
cluster_actions.ch_core.hookenv.action_set.reset_mock()
cluster_actions.ch_core.hookenv.action_fail.reset_mock()
kick_server_mock.reset_mock()
cluster_actions.ch_core.hookenv.action_get.side_effect = [
sb_id,
nb_id,
]
# Test failure to kick server from Northbound cluster
process_output = "Failed to kick server"
exception = cluster_actions.subprocess.CalledProcessError(
-1, "/usr/sbin/ovs-appctl", process_output
)
kick_server_mock.side_effect = exception
err = "Failed to kick Northbound cluster member {}: {}".format(
nb_id, process_output
)
cluster_actions.cluster_kick()
cluster_actions.ch_core.hookenv.action_set.assert_not_called()
cluster_actions.ch_core.hookenv.action_fail.assert_called_once_with(
err
)
kick_server_mock.assert_called_once_with("northbound", nb_id)
@patch.object(cluster_actions, "_kick_server")
def test_cluster_kick_both_server(self, kick_server_mock):
"""Test kicking Southbound and Northbound servers from cluster."""
sb_id = "11bb"
nb_id = "22bb"
expected_func_set_calls = [
call({"ovnsb": "requested kick of {}".format(sb_id)}),
call({"ovnnb": "requested kick of {}".format(nb_id)}),
]
kick_commands = [
call("southbound", sb_id),
call("northbound", nb_id),
]
# Test successfully kicking servers from Northbound and Southbound
# cluster
cluster_actions.ch_core.hookenv.action_get.side_effect = [
sb_id,
nb_id,
]
cluster_actions.cluster_kick()
cluster_actions.ch_core.hookenv.action_fail.assert_not_called()
cluster_actions.ch_core.hookenv.action_set.assert_has_calls(
expected_func_set_calls
)
kick_server_mock.assert_has_calls(kick_commands)
# Reset mocks
cluster_actions.ch_core.hookenv.action_set.reset_mock()
cluster_actions.ch_core.hookenv.action_fail.reset_mock()
cluster_actions.ch_ovn.ovn_appctl.reset_mock()
cluster_actions.ch_core.hookenv.action_get.side_effect = [
sb_id,
nb_id,
]
# Test failure to kick servers from Northbound and Southbound
# clusters
process_output = "Failed to kick server"
exception = cluster_actions.subprocess.CalledProcessError(
-1, "/usr/sbin/ovs-appctl", process_output
)
kick_server_mock.side_effect = exception
errors = [
call(
"Failed to kick Southbound cluster member {}: {}".format(
sb_id, process_output
)
),
call(
"Failed to kick Northbound cluster member {}: {}".format(
nb_id, process_output
)
),
]
cluster_actions.cluster_kick()
cluster_actions.ch_core.hookenv.action_set.assert_not_called()
cluster_actions.ch_core.hookenv.action_fail.assert_has_calls(errors)
kick_server_mock.assert_has_calls(kick_commands)
@patch.object(cluster_actions.reactive, "endpoint_from_flag")
def test_main_no_cluster(self, endpoint):
"""Test refusal to run action if unit is not in cluster."""
endpoint.return_value = None
err = "Unit is not part of an OVN cluster."
cluster_actions.main([])
cluster_actions.ch_core.hookenv.action_fail.assert_called_once_with(
err
)
self.mapped_action_cluster_kick.assert_not_called()
self.mapped_action_cluster_status.assert_not_called()
@patch.object(cluster_actions.reactive, "endpoint_from_flag")
def test_main_unknown_action(self, endpoint):
"""Test executing unknown action from main function."""
endpoint.return_value = MagicMock()
action = "unknown-action"
action_path = (
"/var/lib/juju/agents/unit-ovn-central-0/charm/actions/" + action
)
err = "Action {} undefined".format(action)
result = cluster_actions.main([action_path])
self.assertEqual(result, err)
self.mapped_action_cluster_kick.assert_not_called()
self.mapped_action_cluster_status.assert_not_called()
@patch.object(cluster_actions.reactive, "endpoint_from_flag")
def test_main_cluster_kick(self, endpoint):
"""Test executing cluster-kick action from main function."""
endpoint.return_value = MagicMock()
action = "cluster-kick"
action_path = (
"/var/lib/juju/agents/unit-ovn-central-0/charm/actions/" + action
)
cluster_actions.main([action_path])
cluster_actions.ch_core.hookenv.action_fail.assert_not_called()
self.mapped_action_cluster_kick.assert_called_once_with()
@patch.object(cluster_actions.reactive, "endpoint_from_flag")
def test_main_cluster_status(self, endpoint):
"""Test executing cluster-status action from main function."""
endpoint.return_value = MagicMock()
action = "cluster-status"
action_path = (
"/var/lib/juju/agents/unit-ovn-central-0/charm/actions/" + action
)
cluster_actions.main([action_path])
cluster_actions.ch_core.hookenv.action_fail.assert_not_called()
self.mapped_action_cluster_status.assert_called_once_with()

View File

@@ -1,140 +0,0 @@
# Copyright 2021 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest.mock as mock
import actions.os_deferred_event_actions as os_deferred_event_actions
import charms_openstack.test_utils as test_utils
class TestOSDeferredEventActions(test_utils.PatchHelper):
def setUp(self):
super().setUp()
self.patch_object(os_deferred_event_actions.hookenv, 'action_get')
self.action_config = {}
self.action_get.side_effect = lambda x: self.action_config.get(x)
self.patch_object(os_deferred_event_actions.hookenv, 'action_fail')
self.patch_object(
os_deferred_event_actions.charms_openstack.charm,
'provide_charm_instance')
self.charm_instance = mock.MagicMock()
self.provide_charm_instance.return_value.__enter__.return_value = \
self.charm_instance
def test_restart_services(self):
self.patch_object(
os_deferred_event_actions.os_utils,
'restart_services_action')
self.action_config = {
'deferred-only': True,
'services': ''}
os_deferred_event_actions.restart_services(['restart-services'])
self.charm_instance._assess_status.assert_called_once_with()
self.restart_services_action.assert_called_once_with(
deferred_only=True)
self.charm_instance.reset_mock()
self.restart_services_action.reset_mock()
self.action_config = {
'deferred-only': False,
'services': 'svcA svcB'}
os_deferred_event_actions.restart_services(['restart-services'])
self.charm_instance._assess_status.assert_called_once_with()
self.restart_services_action.assert_called_once_with(
services=['svcA', 'svcB'])
self.charm_instance.reset_mock()
self.restart_services_action.reset_mock()
self.action_config = {
'deferred-only': True,
'services': 'svcA svcB'}
os_deferred_event_actions.restart_services(['restart-services'])
self.action_fail.assert_called_once_with(
'Cannot set deferred-only and services')
self.charm_instance.reset_mock()
self.restart_services_action.reset_mock()
self.action_fail.reset_mock()
self.action_config = {
'deferred-only': False,
'services': ''}
os_deferred_event_actions.restart_services(['restart-services'])
self.action_fail.assert_called_once_with(
'Please specify deferred-only or services')
def test_show_deferred_events(self):
self.patch_object(
os_deferred_event_actions.os_utils,
'show_deferred_events_action_helper')
os_deferred_event_actions.show_deferred_events(
['show-deferred-events'])
self.show_deferred_events_action_helper.assert_called_once_with()
def test_run_deferred_hooks(self):
self.patch_object(
os_deferred_event_actions.deferred_events,
'get_deferred_hooks')
self.patch_object(
os_deferred_event_actions.reactive,
'endpoint_from_flag')
self.patch_object(
os_deferred_event_actions.reactive,
'is_flag_set')
self.patch_object(
os_deferred_event_actions.charms_openstack.charm,
'optional_interfaces')
interfaces_mock = mock.MagicMock()
self.optional_interfaces.return_value = interfaces_mock
self.is_flag_set.return_value = True
ovsdb_available = mock.MagicMock()
ovsdb_available.db_sb_connection_strs = ['constrA', 'connstrB']
self.endpoint_from_flag.return_value = ovsdb_available
self.get_deferred_hooks.return_value = ['install']
os_deferred_event_actions.run_deferred_hooks(['run-deferred-hooks'])
self.charm_instance.install.assert_called_once_with(
check_deferred_events=False)
self.assertFalse(self.charm_instance.configure_ovs.called)
self.assertFalse(
self.charm_instance.render_with_interfaces.called)
self.charm_instance._assess_status.assert_called_once_with()
self.charm_instance.reset_mock()
self.get_deferred_hooks.return_value = ['install', 'configure_ovs']
os_deferred_event_actions.run_deferred_hooks(['run-deferred-hooks'])
self.charm_instance.install.assert_called_once_with(
check_deferred_events=False)
self.charm_instance.render_with_interfaces.assert_called_once_with(
interfaces_mock)
self.charm_instance.configure_ovs.assert_called_once_with(
'constrA,connstrB',
True,
check_deferred_events=False)
self.charm_instance._assess_status.assert_called_once_with()
self.charm_instance.reset_mock()
self.get_deferred_hooks.return_value = []
os_deferred_event_actions.run_deferred_hooks(['run-deferred-hooks'])
self.assertFalse(self.charm_instance.install.configure_ovs.called)
self.assertFalse(self.charm_instance.configure_ovs.called)
self.assertFalse(self.charm_instance.render_with_interfaces.called)
self.charm_instance._assess_status.assert_called_once_with()

View File

@@ -1,861 +0,0 @@
# Copyright 2019 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import collections
import io
import tempfile
import unittest.mock as mock
import charms_openstack.test_utils as test_utils
import charm.openstack.ovn_central as ovn_central
class Helper(test_utils.PatchHelper):
def setUp(self):
super().setUp()
self.patch_release(ovn_central.UssuriOVNCentralCharm.release)
self.patch_object(
ovn_central.charms_openstack.adapters, 'config_property')
self.target = ovn_central.UssuriOVNCentralCharm()
def patch_target(self, attr, return_value=None):
mocked = mock.patch.object(self.target, attr)
self._patches[attr] = mocked
started = mocked.start()
started.return_value = return_value
self._patches_start[attr] = started
setattr(self, attr, started)
class TestOVNCentralConfigurationAdapter(test_utils.PatchHelper):
def setUp(self):
super().setUp()
self.charm_instance = mock.MagicMock()
self.charm_instance.ovn_sysconfdir.return_value = '/etc/path'
self.target = ovn_central.OVNCentralConfigurationAdapter(
charm_instance=self.charm_instance)
def test__ovn_source(self):
self.patch_object(ovn_central.reactive, 'is_flag_set',
return_value=True)
self.patch_object(ovn_central.ch_core.host, 'lsb_release',
return_value={'DISTRIB_CODENAME': 'focal'})
# User has supplied a ovn-source config
m = mock.patch.object(ovn_central.ch_core.hookenv, 'config',
return_value={'ovn-source': 'fake-source'})
m.start()
self.target = ovn_central.OVNCentralConfigurationAdapter(
charm_instance=self.charm_instance)
m.stop()
setattr(self, 'config', None)
self.assertEqual('fake-source', self.target._ovn_source)
# User has not supplied a ovn-source config, charm was installed at
# this version on focal
m = mock.patch.object(ovn_central.ch_core.hookenv, 'config',
return_value={'ovn-source': ''})
m.start()
self.target = ovn_central.OVNCentralConfigurationAdapter(
charm_instance=self.charm_instance)
m.stop()
setattr(self, 'config', None)
self.assertEqual('cloud:focal-ovn-22.03', self.target._ovn_source)
# User has not supplied a ovn-source config, charm was upgraded
self.is_flag_set.return_value = False
self.assertEqual('', self.target._ovn_source)
# User has not supplied a ovn-source config, charm was installed at
# this version on jammy
self.is_flag_set.return_value = True
self.lsb_release.return_value = {'DISTRIB_CODENAME': 'jammy'}
self.assertEqual('', self.target._ovn_source)
class TestOVNCentralCharm(Helper):
class FakeClusterStatus(object):
def __init__(self, is_cluster_leader=None):
self.is_cluster_leader = is_cluster_leader
def test_install_train(self):
self.patch_release(ovn_central.TrainOVNCentralCharm.release)
self.patch_object(ovn_central.ch_core.hookenv, 'config',
return_value={'ovn-source': ''})
self.target = ovn_central.TrainOVNCentralCharm()
self.patch_object(ovn_central.charms_openstack.charm.OpenStackCharm,
'install')
self.patch_object(ovn_central.os.path, 'islink')
self.islink.return_value = False
self.patch_object(ovn_central.os, 'symlink')
self.patch_target('configure_sources')
self.patch_object(ovn_central.os, 'mkdir')
self.target.install()
calls = []
for service in ('openvswitch-switch', 'ovs-vswitchd', 'ovsdb-server',
self.target.services[0],):
calls.append(
mock.call('/etc/systemd/system/{}.service'.format(service)))
self.islink.assert_has_calls(calls)
calls = []
for service in ('openvswitch-switch', 'ovs-vswitchd', 'ovsdb-server',
self.target.services[0],):
calls.append(
mock.call('/dev/null',
'/etc/systemd/system/{}.service'.format(service)))
self.symlink.assert_has_calls(calls)
self.install.assert_called_once_with()
def test_install(self):
self.patch_object(ovn_central.charms_openstack.charm.OpenStackCharm,
'install')
self.patch_object(ovn_central.os.path, 'islink')
self.islink.return_value = False
self.patch_object(ovn_central.os, 'symlink')
self.patch_target('configure_sources')
self.patch_object(ovn_central.os, 'mkdir')
self.patch_object(ovn_central.reactive, 'is_flag_set')
self.is_flag_set.return_value = False
self.target.install()
calls = []
for service in (self.target.services[0],
'ovn-ovsdb-server-nb',
'ovn-ovsdb-server-sb',):
calls.append(
mock.call('/etc/systemd/system/{}.service'.format(service)))
self.islink.assert_has_calls(calls)
calls = []
for service in (self.target.services[0], 'ovn-ovsdb-server-nb',
'ovn-ovsdb-server-sb',):
calls.append(
mock.call('/dev/null',
'/etc/systemd/system/{}.service'.format(service)))
self.symlink.assert_has_calls(calls)
self.install.assert_called_once_with()
self.configure_sources.assert_called_once_with()
def test_install_during_upgrade(self):
"""Test that services are not masked during charm upgrade.
install() handler is also called during charm-upgrade handling
and in such case, services should not be masked. Otherwise, it
results in upgrade failures.
"""
self.patch_object(ovn_central.charms_openstack.charm.OpenStackCharm,
'install')
self.patch_object(ovn_central.os.path, 'islink')
self.islink.return_value = False
self.patch_object(ovn_central.os, 'symlink')
self.patch_target('configure_sources')
self.patch_object(ovn_central.os, 'mkdir')
self.patch_object(ovn_central.reactive, 'is_flag_set')
self.is_flag_set.return_value = True
self.target.install()
# Assert that services were not masked
self.islink.assert_not_called()
self.symlink.assert_not_called()
self.install.assert_called_once_with()
self.configure_sources.assert_called_once_with()
def test_configure_ovn_source(self):
self.patch_target('configure_source')
self.patch_object(ovn_central.ch_core.hookenv, 'config',
return_value={'source': 'fake-source',
'ovn-source': ''})
self.patch_object(ovn_central.OVNCentralConfigurationAdapter,
'_ovn_source',
new=mock.PropertyMock())
self._ovn_source.return_value = 'cloud:focal-ovn-22.03'
self.patch_object(ovn_central.ch_fetch, 'add_source')
self.patch_object(ovn_central.ch_fetch, 'apt_update')
self.target.configure_ovn_source()
self.add_source.assert_called_once_with('cloud:focal-ovn-22.03')
self.assertFalse(self.configure_source.called)
def test_states_to_check(self):
self.maxDiff = None
expect = collections.OrderedDict([
('ovsdb-peer', [
('ovsdb-peer.connected',
'blocked',
'Charm requires peers to operate, add more units. A minimum '
'of 3 is required for HA'),
('ovsdb-peer.available',
'waiting',
"'ovsdb-peer' incomplete")]),
('certificates', [
('certificates.available', 'blocked',
"'certificates' missing"),
('certificates.server.certs.available',
'waiting',
"'certificates' awaiting server certificate data")]),
])
self.assertDictEqual(self.target.states_to_check(), expect)
def test__default_port_list(self):
self.assertEqual(
self.target._default_port_list(),
[6641, 6642])
def test_ports_to_check(self):
self.target._default_port_list = mock.MagicMock()
self.target.ports_to_check()
self.target._default_port_list.assert_called_once_with()
def test_cluster_status_mesage(self):
self.patch_target('cluster_status')
self.patch_target('is_northd_active')
self.cluster_status.side_effect = [
self.FakeClusterStatus(False),
self.FakeClusterStatus(False),
]
self.is_northd_active.return_value = False
self.assertEqual(
self.target.cluster_status_message(), '')
self.cluster_status.assert_has_calls([
mock.call('ovnnb_db'),
mock.call('ovnsb_db'),
])
self.cluster_status.side_effect = [
self.FakeClusterStatus(True),
self.FakeClusterStatus(False),
]
self.assertEqual(
self.target.cluster_status_message(),
'leader: ovnnb_db')
self.cluster_status.side_effect = [
self.FakeClusterStatus(True),
self.FakeClusterStatus(True),
]
self.assertEqual(
self.target.cluster_status_message(),
'leader: ovnnb_db, ovnsb_db')
self.cluster_status.side_effect = [
self.FakeClusterStatus(False),
self.FakeClusterStatus(False),
]
self.is_northd_active.return_value = True
self.assertEqual(
self.target.cluster_status_message(),
'northd: active')
self.cluster_status.side_effect = [
self.FakeClusterStatus(True),
self.FakeClusterStatus(False),
]
self.assertEqual(
self.target.cluster_status_message(),
'leader: ovnnb_db northd: active')
self.cluster_status.side_effect = [
self.FakeClusterStatus(True),
self.FakeClusterStatus(True),
]
self.assertEqual(
self.target.cluster_status_message(),
'leader: ovnnb_db, ovnsb_db northd: active')
def test_enable_services(self):
self.patch_object(ovn_central.ch_core.host, 'service_resume')
self.target.check_if_paused = mock.MagicMock()
self.target.check_if_paused.return_value = ('status', 'message')
self.target.enable_services()
self.target.check_if_paused.assert_called_once_with()
self.assertFalse(self.service_resume.called)
self.target.check_if_paused.return_value = (None, None)
self.target.enable_services()
calls = []
for service in self.target.services:
calls.append(mock.call(service))
self.service_resume.assert_has_calls(calls)
def test_run(self):
self.patch_object(ovn_central.subprocess, 'run')
self.patch_object(ovn_central.ch_core.hookenv, 'log')
self.target.run('some', 'args')
self.run.assert_called_once_with(
('some', 'args'),
stdout=ovn_central.subprocess.PIPE,
stderr=ovn_central.subprocess.STDOUT,
check=True,
universal_newlines=True)
def test_join_cluster(self):
self.patch_target('run')
self.target.join_cluster('/a/db.file',
'aSchema',
['ssl:a.b.c.d:1234'],
['ssl:e.f.g.h:1234', 'ssl:i.j.k.l:1234'])
self.run.assert_called_once_with(
'ovsdb-tool', 'join-cluster', '/a/db.file', 'aSchema',
'ssl:a.b.c.d:1234', 'ssl:e.f.g.h:1234', 'ssl:i.j.k.l:1234')
def test_configure_tls(self):
self.patch_target('get_certs_and_keys')
self.get_certs_and_keys.return_value = [{
'cert': 'fakecert',
'key': 'fakekey',
'cn': 'fakecn',
'ca': 'fakeca',
'chain': 'fakechain',
}]
with mock.patch('builtins.open', create=True) as mocked_open:
mocked_file = mock.MagicMock(spec=io.FileIO)
mocked_open.return_value = mocked_file
self.target.configure_cert = mock.MagicMock()
self.target.configure_tls()
mocked_open.assert_called_once_with(
'/etc/ovn/ovn-central.crt', 'w')
mocked_file.__enter__().write.assert_called_once_with(
'fakeca\nfakechain')
self.target.configure_cert.assert_called_once_with(
'/etc/ovn',
'fakecert',
'fakekey',
cn='host')
def test_configure_ovn_listener(self):
self.patch_object(ovn_central.ch_ovsdb, 'SimpleOVSDB')
self.patch_target('run')
port_map = {6641: {'inactivity_probe': 42},
6642: {'role': 'ovn-controller'}}
self.patch_target('cluster_status')
cluster_status = self.FakeClusterStatus()
self.cluster_status.return_value = cluster_status
cluster_status.is_cluster_leader = False
self.target.configure_ovn_listener('nb', port_map)
self.assertFalse(self.SimpleOVSDB.called)
cluster_status.is_cluster_leader = True
ovsdb = mock.MagicMock()
ovsdb.connection.find.side_effect = [
[],
[{'_uuid': 'fake-uuid'}],
[],
[{'_uuid': 'fake-uuid'}],
]
self.SimpleOVSDB.return_value = ovsdb
self.target.configure_ovn_listener('nb', port_map)
self.run.assert_has_calls([
mock.call('ovn-nbctl', '--', '--id=@connection', 'create',
'connection', 'target="pssl:6641"', '--', 'add',
'NB_Global', '.', 'connections', '@connection'),
mock.call('ovn-nbctl', '--', '--id=@connection', 'create',
'connection', 'target="pssl:6642"', '--', 'add',
'NB_Global', '.', 'connections', '@connection'),
])
ovsdb.connection.set.assert_has_calls([
mock.call('fake-uuid', 'inactivity_probe', 42),
mock.call('fake-uuid', 'role', 'ovn-controller')
])
def test_validate_config(self):
self.patch_target('config')
self.config.__getitem__.return_value = self.target.min_election_timer
self.assertEqual(self.target.validate_config(), (None, None))
self.config.__getitem__.return_value = self.target.max_election_timer
self.assertEqual(self.target.validate_config(), (None, None))
self.config.__getitem__.return_value = (
self.target.min_election_timer - 1)
self.assertEqual(self.target.validate_config(), ('blocked', mock.ANY))
self.config.__getitem__.return_value = (
self.target.max_election_timer + 1)
self.assertEqual(self.target.validate_config(), ('blocked', mock.ANY))
def test_configure_ovsdb_election_timer(self):
with self.assertRaises(ValueError):
self.target.configure_ovsdb_election_timer('aDb', 42)
self.patch_target('cluster_status')
self.patch_object(ovn_central.time, 'sleep')
_election_timer = 1000
class FakeClusterStatus(object):
def __init__(self):
self.is_cluster_leader = True
@property
def election_timer(self):
nonlocal _election_timer
return _election_timer
def fake_ovn_appctl(db, cmd, **kwargs):
nonlocal _election_timer
_election_timer = int(cmd[2])
cluster_status = FakeClusterStatus()
self.cluster_status.return_value = cluster_status
self.patch_object(ovn_central.ch_ovn, 'ovn_appctl')
self.ovn_appctl.side_effect = fake_ovn_appctl
self.target.configure_ovsdb_election_timer('sb', 42)
self.ovn_appctl.assert_has_calls([
mock.call(
'ovnsb_db',
('cluster/change-election-timer', 'OVN_Southbound', '2000'),
rundir='/var/run/ovn',
use_ovs_appctl=False),
mock.call(
'ovnsb_db',
('cluster/change-election-timer', 'OVN_Southbound', '4000'),
rundir='/var/run/ovn',
use_ovs_appctl=False),
mock.call(
'ovnsb_db',
('cluster/change-election-timer', 'OVN_Southbound', '8000'),
rundir='/var/run/ovn',
use_ovs_appctl=False),
mock.call(
'ovnsb_db',
('cluster/change-election-timer', 'OVN_Southbound', '16000'),
rundir='/var/run/ovn',
use_ovs_appctl=False),
mock.call(
'ovnsb_db',
('cluster/change-election-timer', 'OVN_Southbound', '32000'),
rundir='/var/run/ovn',
use_ovs_appctl=False),
mock.call(
'ovnsb_db',
('cluster/change-election-timer', 'OVN_Southbound', '42000'),
rundir='/var/run/ovn',
use_ovs_appctl=False)
])
_election_timer = 42000
self.ovn_appctl.reset_mock()
self.target.configure_ovsdb_election_timer('sb', 1)
self.ovn_appctl.assert_has_calls([
mock.call(
'ovnsb_db',
('cluster/change-election-timer', 'OVN_Southbound', '21000'),
rundir='/var/run/ovn',
use_ovs_appctl=False),
mock.call(
'ovnsb_db',
('cluster/change-election-timer', 'OVN_Southbound', '10500'),
rundir='/var/run/ovn',
use_ovs_appctl=False),
mock.call(
'ovnsb_db',
('cluster/change-election-timer', 'OVN_Southbound', '5250'),
rundir='/var/run/ovn',
use_ovs_appctl=False),
mock.call(
'ovnsb_db',
('cluster/change-election-timer', 'OVN_Southbound', '2625'),
rundir='/var/run/ovn',
use_ovs_appctl=False),
mock.call(
'ovnsb_db',
('cluster/change-election-timer', 'OVN_Southbound', '1312'),
rundir='/var/run/ovn',
use_ovs_appctl=False),
mock.call(
'ovnsb_db',
('cluster/change-election-timer', 'OVN_Southbound', '1000'),
rundir='/var/run/ovn',
use_ovs_appctl=False),
])
def test_configure_ovn(self):
self.patch_target('config')
self.config.__getitem__.return_value = 42
self.patch_target('configure_ovn_listener')
self.patch_target('configure_ovsdb_election_timer')
self.target.configure_ovn(1, 2, 3)
self.config.__getitem__.assert_has_calls([
mock.call('ovsdb-server-inactivity-probe'),
mock.call('ovsdb-server-election-timer'),
])
self.configure_ovn_listener.assert_has_calls([
mock.call('nb', {1: {'inactivity_probe': 42000}}),
mock.call('sb', {2: {'role': 'ovn-controller',
'inactivity_probe': 42000}}),
mock.call('sb', {3: {'inactivity_probe': 42000}}),
])
self.configure_ovsdb_election_timer.assert_has_calls([
mock.call('nb', 42),
mock.call('sb', 42),
])
def test_initialize_firewall(self):
self.patch_object(ovn_central, 'ch_ufw')
self.target.initialize_firewall()
self.ch_ufw.enable.assert_called_once_with()
self.ch_ufw.default_policy.assert_has_calls([
mock.call('allow', 'incoming'),
mock.call('allow', 'outgoing'),
mock.call('allow', 'routed'),
])
def test_configure_firewall(self):
self.patch_object(ovn_central, 'ch_ufw')
self.ch_ufw.status.return_value = [
(42, {
'action': 'allow in',
'from': 'q.r.s.t',
'comment': 'charm-ovn-central'}),
(51, {
'action': 'reject in',
'from': 'any',
'comment': 'charm-ovn-central'}),
]
self.target.configure_firewall({
(1, 2, 3, 4,): ('a.b.c.d', 'e.f.g.h',),
(1, 2,): ('i.j.k.l', 'm.n.o.p',),
})
self.ch_ufw.modify_access.assert_has_calls([
mock.call(src=None, dst='any', port=1,
proto='tcp', action='reject',
comment='charm-ovn-central'),
mock.call(src=None, dst='any', port=2,
proto='tcp', action='reject',
comment='charm-ovn-central'),
mock.call(src=None, dst='any', port=3,
proto='tcp', action='reject',
comment='charm-ovn-central'),
mock.call(src=None, dst='any', port=4,
proto='tcp', action='reject',
comment='charm-ovn-central'),
], any_order=True)
self.ch_ufw.modify_access.assert_has_calls([
mock.call('a.b.c.d', port=1, proto='tcp', action='allow',
prepend=True, comment='charm-ovn-central'),
mock.call('e.f.g.h', port=1, proto='tcp', action='allow',
prepend=True, comment='charm-ovn-central'),
mock.call('a.b.c.d', port=2, proto='tcp', action='allow',
prepend=True, comment='charm-ovn-central'),
mock.call('e.f.g.h', port=2, proto='tcp', action='allow',
prepend=True, comment='charm-ovn-central'),
mock.call('a.b.c.d', port=3, proto='tcp', action='allow',
prepend=True, comment='charm-ovn-central'),
mock.call('e.f.g.h', port=3, proto='tcp', action='allow',
prepend=True, comment='charm-ovn-central'),
mock.call('a.b.c.d', port=4, proto='tcp', action='allow',
prepend=True, comment='charm-ovn-central'),
mock.call('e.f.g.h', port=4, proto='tcp', action='allow',
prepend=True, comment='charm-ovn-central'),
mock.call('i.j.k.l', port=1, proto='tcp', action='allow',
prepend=True, comment='charm-ovn-central'),
mock.call('m.n.o.p', port=1, proto='tcp', action='allow',
prepend=True, comment='charm-ovn-central'),
mock.call('i.j.k.l', port=2, proto='tcp', action='allow',
prepend=True, comment='charm-ovn-central'),
mock.call('m.n.o.p', port=2, proto='tcp', action='allow',
prepend=True, comment='charm-ovn-central'),
], any_order=True)
self.ch_ufw.modify_access.assert_has_calls([
mock.call(None, dst=None, action='delete', index=42)
])
self.ch_ufw.reset_mock()
self.target.configure_firewall({
(1, 2, 3, 4,): ('a.b.c.d', 'e.f.g.h',),
(1, 2, 5,): None,
})
self.ch_ufw.modify_access.assert_has_calls([
mock.call(src=None, dst='any', port=1,
proto='tcp', action='reject',
comment='charm-ovn-central'),
mock.call(src=None, dst='any', port=2,
proto='tcp', action='reject',
comment='charm-ovn-central'),
mock.call(src=None, dst='any', port=3,
proto='tcp', action='reject',
comment='charm-ovn-central'),
mock.call(src=None, dst='any', port=4,
proto='tcp', action='reject',
comment='charm-ovn-central'),
mock.call(src=None, dst='any', port=5,
proto='tcp', action='reject',
comment='charm-ovn-central'),
], any_order=True)
self.ch_ufw.modify_access.assert_has_calls([
mock.call('a.b.c.d', port=1, proto='tcp', action='allow',
prepend=True, comment='charm-ovn-central'),
mock.call('e.f.g.h', port=1, proto='tcp', action='allow',
prepend=True, comment='charm-ovn-central'),
mock.call('a.b.c.d', port=2, proto='tcp', action='allow',
prepend=True, comment='charm-ovn-central'),
mock.call('e.f.g.h', port=2, proto='tcp', action='allow',
prepend=True, comment='charm-ovn-central'),
mock.call('a.b.c.d', port=3, proto='tcp', action='allow',
prepend=True, comment='charm-ovn-central'),
mock.call('e.f.g.h', port=3, proto='tcp', action='allow',
prepend=True, comment='charm-ovn-central'),
mock.call('a.b.c.d', port=4, proto='tcp', action='allow',
prepend=True, comment='charm-ovn-central'),
mock.call('e.f.g.h', port=4, proto='tcp', action='allow',
prepend=True, comment='charm-ovn-central'),
], any_order=True)
self.ch_ufw.modify_access.assert_has_calls([
mock.call(None, dst=None, action='delete', index=42)
])
def test_render_nrpe(self):
with tempfile.TemporaryDirectory() as dtmp:
os.environ['CHARM_DIR'] = dtmp
self.patch_object(ovn_central.nrpe, 'NRPE')
self.patch_object(ovn_central.nrpe, 'add_init_service_checks')
self.target.render_nrpe()
# Note that this list is valid for Ussuri
self.add_init_service_checks.assert_has_calls([
mock.call().add_init_service_checks(
mock.ANY,
['ovn-northd', 'ovn-ovsdb-server-nb',
'ovn-ovsdb-server-sb'],
mock.ANY
),
])
self.NRPE.assert_has_calls([
mock.call().write(),
])
def test_configure_deferred_restarts(self):
self.patch_object(
ovn_central.ch_core.hookenv,
'config',
return_value={'enable-auto-restarts': True})
self.patch_object(
ovn_central.ch_core.hookenv,
'service_name',
return_value='myapp')
self.patch_object(
ovn_central.deferred_events,
'configure_deferred_restarts')
self.patch_object(ovn_central.os, 'chmod')
self.target.configure_deferred_restarts()
self.configure_deferred_restarts.assert_called_once()
self.assertEqual(
sorted(self.configure_deferred_restarts.call_args.args[0]),
sorted(['ovn-central', 'ovn-ovsdb-server-nb', 'ovn-northd',
'ovn-ovsdb-server-sb']))
self.chmod.assert_called_once_with(
'/var/lib/charm/myapp/policy-rc.d',
493)
def test_configure_deferred_restarts_unsupported(self):
self.patch_object(
ovn_central.ch_core.hookenv,
'config',
return_value={})
self.patch_object(
ovn_central.deferred_events,
'configure_deferred_restarts')
self.target.configure_deferred_restarts()
self.assertFalse(self.configure_deferred_restarts.called)
def test_assess_exporter_no_channel_installed(self):
self.patch_object(
ovn_central.ch_core.hookenv,
'config',
return_value={'ovn-exporter-channel': ''})
self.patch_object(ovn_central.snap, 'is_installed')
self.patch_object(ovn_central.snap, 'install')
self.patch_object(ovn_central.snap, 'remove')
self.patch_object(ovn_central.snap, 'refresh')
self.is_installed.return_value = True
self.target.assess_exporter()
self.remove.assert_called_once_with('prometheus-ovn-exporter')
self.install.assert_not_called()
self.refresh.assert_not_called()
def test_assess_exporter_no_channel_not_installed(self):
self.patch_object(
ovn_central.ch_core.hookenv,
'config',
return_value={'ovn-exporter-channel': ''})
self.patch_object(ovn_central.snap, 'is_installed')
self.patch_object(ovn_central.snap, 'install')
self.patch_object(ovn_central.snap, 'remove')
self.patch_object(ovn_central.snap, 'refresh')
self.is_installed.return_value = False
self.target.assess_exporter()
self.install.assert_not_called()
self.refresh.assert_not_called()
self.remove.assert_not_called()
def test_assess_exporter_fresh_install(self):
self.patch_object(
ovn_central.ch_core.hookenv,
'config',
return_value={'ovn-exporter-channel': 'stable'})
self.patch_object(ovn_central.snap, 'is_installed')
self.patch_object(ovn_central.snap, 'install')
self.patch_object(ovn_central.snap, 'remove')
self.patch_object(ovn_central.snap, 'refresh')
self.is_installed.return_value = False
self.target.assess_exporter()
self.install.assert_called_once_with(
'prometheus-ovn-exporter',
channel='stable')
self.remove.assert_not_called()
self.refresh.assert_not_called()
def test_assess_exporter_refresh(self):
self.patch_object(
ovn_central.ch_core.hookenv,
'config',
return_value={'ovn-exporter-channel': 'stable'})
self.patch_object(ovn_central.snap, 'is_installed')
self.patch_object(ovn_central.snap, 'install')
self.patch_object(ovn_central.snap, 'remove')
self.patch_object(ovn_central.snap, 'refresh')
self.is_installed.return_value = True
self.target.assess_exporter()
self.refresh.assert_called_once_with(
'prometheus-ovn-exporter',
channel='stable')
self.install.assert_not_called()
self.remove.assert_not_called()
def test_cluster_leave_ok(self):
"""Test successfully leaving OVN cluster."""
self.patch_object(
ovn_central.ch_ovn,
'ovn_appctl'
)
expected_calls = [
mock.call("ovnsb_db", ("cluster/leave", "OVN_Southbound")),
mock.call("ovnnb_db", ("cluster/leave", "OVN_Northbound")),
]
self.target.leave_cluster()
ovn_central.ch_ovn.ovn_appctl.assert_has_calls(expected_calls)
def test_cluster_leave_fail(self):
"""Test failure during leaving of OVN cluster."""
self.patch_object(
ovn_central.ch_ovn,
'ovn_appctl'
)
self.patch_object(
ovn_central.ch_core.hookenv,
'log'
)
expected_err = ovn_central.subprocess.CalledProcessError(1, "foo")
ovn_central.ch_ovn.ovn_appctl.side_effect = expected_err
error_msg = (
"Failed to leave {} cluster. You can use 'cluster-kick' juju "
"action on remaining units to remove lingering cluster members."
)
expected_ovn_calls = [
mock.call("ovnsb_db", ("cluster/leave", "OVN_Southbound")),
mock.call("ovnnb_db", ("cluster/leave", "OVN_Northbound")),
]
expected_log_calls = [
mock.call(
error_msg.format("Southbound"),
ovn_central.ch_core.hookenv.ERROR
),
mock.call(
error_msg.format("Northbound"),
ovn_central.ch_core.hookenv.ERROR
),
]
self.target.leave_cluster()
ovn_central.ch_ovn.ovn_appctl.assert_has_calls(expected_ovn_calls)
ovn_central.ch_core.hookenv.log.assert_has_calls(expected_log_calls,
any_order=True)
def test_server_in_cluster(self):
"""Test detection of server in cluster."""
ipv4_in_cluster = "10.0.0.10"
ipv6_in_cluster = "2001:db8:3333:4444:5555:6666:7777:8888"
not_in_cluster = "10.0.0.1"
servers = [
("aa11", "ssl:{}:6644".format(ipv4_in_cluster)),
("bb22", "ssl:{}:6644".format(ipv6_in_cluster)),
("cc33", "ssl:10.0.0.12:6644"),
]
cluster_status = self.FakeClusterStatus(is_cluster_leader=True)
cluster_status.servers = servers
# Find expected IPv4 address in server list
self.assertTrue(
self.target.is_server_in_cluster(ipv4_in_cluster, cluster_status)
)
# Find expected IPv6 address in server list
self.assertTrue(
self.target.is_server_in_cluster(ipv6_in_cluster, cluster_status)
)
# Don't find unexpected IP in server list
self.assertFalse(
self.target.is_server_in_cluster(not_in_cluster, cluster_status)
)
def test_wait_for_server_leave_fail(self):
"""Test waiting until server leaves cluster.
This test verifies scenario when server does not leave cluster
before timeout.
"""
self.patch_object(ovn_central.time, "sleep")
self.patch_target("is_server_in_cluster", return_value=True)
self.patch_target("cluster_status")
timeout = 30
expected_retries = 6
expected_calls = []
for i in range(expected_retries):
expected_calls.append(mock.call("ovnsb_db"))
expected_calls.append(mock.call("ovnnb_db"))
result = self.target.wait_for_server_leave("10.0.0.1", timeout)
self.assertFalse(result)
self.target.cluster_status.assert_has_calls(expected_calls)
def test_wait_for_server_leave_true(self):
"""Test waiting until server leaves cluster.
This test verifies scenario when server successfully leaves
cluster during the timeout period.
"""
self.patch_object(ovn_central.time, "sleep")
self.patch_target("is_server_in_cluster", return_value=False)
self.patch_target("cluster_status")
timeout = 30
expected_calls = [
mock.call("ovnsb_db"),
mock.call("ovnnb_db"),
]
result = self.target.wait_for_server_leave("10.0.0.1", timeout)
self.assertTrue(result)
self.target.cluster_status.assert_has_calls(expected_calls)

View File

@@ -1,374 +0,0 @@
# Copyright 2019 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
import reactive.ovn_central_handlers as handlers
import charms_openstack.test_utils as test_utils
class TestRegisteredHooks(test_utils.TestRegisteredHooks):
def test_hooks(self):
defaults = [
'config.changed',
'charm.default-select-release',
'update-status',
'upgrade-charm',
]
hook_set = {
'when_none': {
'announce_leader_ready': ('is-update-status-hook',
'leadership.set.nb_cid',
'leadership.set.sb_cid',
'coordinator.granted.upgrade',
'coordinator.requested.upgrade',
'config.changed.source',
'config.changed.ovn-source'),
'configure_firewall': ('is-update-status-hook',
'endpoint.ovsdb-peer.departed'),
'enable_default_certificates': ('is-update-status-hook',
'leadership.is_leader',),
'initialize_firewall': ('is-update-status-hook',
'charm.firewall_initialized',),
'initialize_ovsdbs': ('is-update-status-hook',
'leadership.set.nb_cid',
'leadership.set.sb_cid',
'coordinator.granted.upgrade',
'coordinator.requested.upgrade'),
'maybe_do_upgrade': ('is-update-status-hook',),
'maybe_request_upgrade': ('is-update-status-hook',),
'publish_addr_to_clients': ('is-update-status-hook',),
'render': ('is-update-status-hook',
'coordinator.granted.upgrade',
'coordinator.requested.upgrade',
'config.changed.source',
'config.changed.ovn-source',
'endpoint.ovsdb-peer.departed'),
'configure_nrpe': ('charm.paused', 'is-update-status-hook',),
'stamp_fresh_deployment': ('charm.installed',
'leadership.set.install_stamp'),
'stamp_upgraded_deployment': ('is-update-status-hook',
'leadership.set.install_stamp',
'leadership.set.upgrade_stamp'),
'enable_install': ('charm.installed', 'is-update-status-hook'),
'reassess_exporter': ('is-update-status-hook',),
'maybe_clear_metrics_endpoint': ('is-update-status-hook',),
'handle_metrics_endpoint': ('is-update-status-hook',),
},
'when': {
'announce_leader_ready': ('config.rendered',
'certificates.connected',
'certificates.available',
'leadership.is_leader',
'ovsdb-peer.connected',),
'certificates_in_config_tls': ('config.rendered',
'config.changed',),
'configure_firewall': ('ovsdb-peer.available',),
'enable_default_certificates': ('charm.installed',),
'initialize_ovsdbs': ('charm.installed',
'leadership.is_leader',
'ovsdb-peer.connected',),
'maybe_do_upgrade': ('ovsdb-peer.available',
'coordinator.granted.upgrade',),
'maybe_request_upgrade': ('ovsdb-peer.available',),
'publish_addr_to_clients': ('ovsdb-peer.available',
'leadership.set.nb_cid',
'leadership.set.sb_cid',
'certificates.connected',
'certificates.available',),
'render': ('ovsdb-peer.available',
'leadership.set.nb_cid',
'leadership.set.sb_cid',
'certificates.connected',
'certificates.available',),
'configure_nrpe': ('config.rendered',),
'stamp_fresh_deployment': ('leadership.is_leader',),
'stamp_upgraded_deployment': ('charm.installed',
'leadership.is_leader'),
'handle_metrics_endpoint': (
'charm.installed',
'metrics-endpoint.available',
'snap.installed.prometheus-ovn-exporter',
),
'reassess_exporter': (
'charm.installed',
),
'maybe_clear_metrics_endpoint': (
'charm.installed',
'metrics-endpoint.available',
),
'handle_cluster_downscale': ('endpoint.ovsdb-peer.departed',),
},
'when_any': {
'configure_nrpe': ('config.changed.nagios_context',
'config.changed.nagios_servicegroups',
'endpoint.nrpe-external-master.changed',
'nrpe-external-master.available',),
'enable_install': ('leadership.set.install_stamp',
'leadership.set.upgrade_stamp'),
'maybe_request_upgrade': ('config.changed.source',
'config.changed.ovn-source'),
'reassess_exporter': (
'config.changed.ovn-exporter-channel',
'snap.installed.prometheus-ovn-exporter'),
},
'when_not': {
'configure_deferred_restarts': ('is-update-status-hook',),
'maybe_clear_metrics_endpoint': (
'snap.installed.prometheus-ovn-exporter',
),
},
'hook': {
'leave_cluster': ('certificates-relation-broken',),
},
}
# test that the hooks were registered via the
# reactive.ovn_handlers
self.registered_hooks_test_helper(handlers, hook_set, defaults)
class TestOvnCentralHandlers(test_utils.PatchHelper):
def setUp(self):
super().setUp()
self.target = mock.MagicMock()
self.patch_object(handlers.charm, 'provide_charm_instance',
new=mock.MagicMock())
self.provide_charm_instance().__enter__.return_value = \
self.target
self.provide_charm_instance().__exit__.return_value = None
def test_initialize_firewall(self):
self.patch_object(handlers.reactive, 'set_flag')
handlers.initialize_firewall()
self.target.initialize_firewall.assert_called_once_with()
self.set_flag.assert_called_once_with('charm.firewall_initialized')
def test_announce_leader_ready(self):
self.patch_object(handlers.reactive, 'endpoint_from_name')
self.patch_object(handlers.reactive, 'endpoint_from_flag')
self.patch_object(handlers.leadership, 'leader_set')
ovsdb = mock.MagicMock()
self.endpoint_from_name.return_value = ovsdb
ovsdb_peer = mock.MagicMock()
self.endpoint_from_flag.return_value = ovsdb_peer
cluster_status = mock.MagicMock()
cluster_status.cluster_id = 'fake-uuid'
self.target.cluster_status.return_value = cluster_status
handlers.announce_leader_ready()
ovsdb_peer.publish_cluster_local_addr.assert_called_once_with()
self.target.configure_ovn.assert_called_once_with(
ovsdb_peer.db_nb_port,
ovsdb.db_sb_port,
ovsdb_peer.db_sb_admin_port)
self.leader_set.assert_called_once_with(
{
'ready': True,
'nb_cid': 'fake-uuid',
'sb_cid': 'fake-uuid',
})
def test_initialize_ovsdbs(self):
self.patch_object(handlers.reactive, 'endpoint_from_flag')
self.patch_object(handlers.charm, 'use_defaults')
self.patch_object(handlers.reactive, 'set_flag')
ovsdb_peer = mock.MagicMock()
self.endpoint_from_flag.return_value = ovsdb_peer
handlers.initialize_ovsdbs()
self.target.render_with_interfaces.assert_called_once_with(
[ovsdb_peer])
self.target.enable_services.assert_called_once_with()
self.use_defaults.assert_called_once_with('certificates.available')
self.set_flag.assert_called_once_with('config.rendered')
self.target.assess_status()
def test_enable_default_certificates(self):
self.patch_object(handlers.charm, 'use_defaults')
handlers.enable_default_certificates()
self.use_defaults.assert_called_once_with('certificates.available')
def test_configure_firewall(self):
self.patch_object(handlers.reactive, 'endpoint_from_flag')
ovsdb_peer = mock.MagicMock()
self.endpoint_from_flag.side_effect = (ovsdb_peer, None)
handlers.configure_firewall()
self.endpoint_from_flag.assert_has_calls([
mock.call('ovsdb-peer.available'),
mock.call('ovsdb-cms.connected'),
])
self.target.configure_firewall.assert_called_once_with({
(ovsdb_peer.db_nb_port,
ovsdb_peer.db_sb_admin_port,
ovsdb_peer.db_sb_cluster_port,
ovsdb_peer.db_nb_cluster_port,):
ovsdb_peer.cluster_remote_addrs,
(ovsdb_peer.db_nb_port,
ovsdb_peer.db_sb_admin_port,): None,
})
self.target.assess_status.assert_called_once_with()
self.target.configure_firewall.reset_mock()
ovsdb_cms = mock.MagicMock()
self.endpoint_from_flag.side_effect = (ovsdb_peer, ovsdb_cms)
handlers.configure_firewall()
self.target.configure_firewall.assert_called_once_with({
(ovsdb_peer.db_nb_port,
ovsdb_peer.db_sb_admin_port,
ovsdb_peer.db_sb_cluster_port,
ovsdb_peer.db_nb_cluster_port,):
ovsdb_peer.cluster_remote_addrs,
(ovsdb_peer.db_nb_port,
ovsdb_peer.db_sb_admin_port,): ovsdb_cms.client_remote_addrs,
})
def test_publish_addr_to_clients(self):
self.patch_object(handlers.reactive, 'endpoint_from_flag')
ovsdb_peer = mock.MagicMock()
ovsdb_peer.cluster_local_addr = mock.PropertyMock().return_value = (
'a.b.c.d')
ovsdb = mock.MagicMock()
ovsdb_cms = mock.MagicMock()
self.endpoint_from_flag.side_effect = [ovsdb_peer, ovsdb, ovsdb_cms]
handlers.publish_addr_to_clients()
ovsdb.publish_cluster_local_addr.assert_called_once_with('a.b.c.d')
ovsdb_cms.publish_cluster_local_addr.assert_called_once_with('a.b.c.d')
def test_render(self):
self.patch_object(handlers.reactive, 'endpoint_from_name')
self.patch_object(handlers.reactive, 'endpoint_from_flag')
self.patch_object(handlers.reactive, 'set_flag')
ovsdb_peer = mock.MagicMock()
# re-using the same conection strings for both NB and SB DBs here, the
# implementation detail is unit tested in the interface
connection_strs = ('ssl:a.b.c.d:1234',
'ssl:e.f.g.h:1234',
'ssl:i.j.k.l:1234',)
ovsdb_peer.db_connection_strs.return_value = connection_strs
self.endpoint_from_flag.return_value = ovsdb_peer
self.target.enable_services.return_value = False
handlers.render()
self.endpoint_from_flag.assert_called_once_with('ovsdb-peer.available')
self.target.render_with_interfaces.assert_called_once_with(
[ovsdb_peer])
self.target.join_cluster.assert_has_calls([
mock.call('ovnnb_db.db',
'OVN_Northbound',
connection_strs,
connection_strs),
mock.call('ovnsb_db.db',
'OVN_Southbound',
connection_strs,
connection_strs),
])
self.target.assess_status.assert_called_once_with()
self.target.enable_services.return_value = True
handlers.render()
self.set_flag.assert_called_once_with('config.rendered')
def test_handle_cluster_downscale_leaving(self):
"""Test actions during departure of a peer unit.
This scenario tests actions of a unit that is departing the cluster.
"""
self.patch_object(handlers.reactive, 'is_flag_set')
self.is_flag_set.side_effect = [False, True]
self.patch_object(handlers.reactive, 'set_flag')
unit_name = 'ovn-central/3'
self.patch_object(
handlers.hookenv,
'departing_unit',
return_value=unit_name
)
self.patch_object(
handlers.hookenv,
'local_unit',
return_value=unit_name
)
handlers.handle_cluster_downscale()
self.target.leave_cluster.assert_called_once_with()
self.set_flag.assert_called_once_with('ovsdb-peer.left_cluster')
# subsequent calls do not trigger leave_cluster_calls()
handlers.handle_cluster_downscale()
self.target.leave_cluster.assert_called_once_with()
# unit that is leaving does not attempt to wait for remote
# unit to leave cluster.
self.target.wait_for_server_leave.assert_not_called()
def test_handle_cluster_downscale_not_leaving(self):
"""Test actions during departure of a peer unit.
This scenario tests actions of a unit whose peer is departing the
cluster.
"""
self.patch_object(handlers.reactive, 'is_flag_set', return_value=False)
self.patch_object(handlers.reactive, 'endpoint_from_name')
self.patch_object(handlers.reactive, 'set_flag')
self.patch_object(handlers, 'configure_firewall')
self.patch_object(handlers.hookenv, 'log')
local_unit_name = 'ovn-central/0'
departing_unit_name = 'ovn-central/3'
departing_unit_ip = '10.0.0.10'
departing_unit = mock.MagicMock()
departing_unit.received = {'bound-address': departing_unit_ip}
self.patch_object(
handlers.hookenv,
'departing_unit',
return_value=departing_unit_name
)
self.patch_object(
handlers.hookenv,
'local_unit',
return_value=local_unit_name
)
ovsdb_peer = mock.MagicMock()
ovsdb_peer.all_departed_units = {departing_unit_name: departing_unit}
self.endpoint_from_name.return_value = ovsdb_peer
ok_msg = ("Departing unit {} successfully disconnected from "
"cluster.".format(departing_unit_name)
)
fail_msg = (
"Departing unit {} failed to remove itself from cluster. "
"Please use action `cluster-kick` to remove straggling "
"servers from OVN cluster.".format(departing_unit_name)
)
# Test departing unit successfully leaving
self.target.wait_for_server_leave.return_value = True
handlers.handle_cluster_downscale()
self.target.wait_for_server_leave.assert_called_once_with(
departing_unit_ip
)
self.configure_firewall.assert_called_once_with()
self.log.assert_called_once_with(ok_msg, handlers.hookenv.INFO)
# Reset mocks
self.target.wait_for_server_leave.reset_mock()
self.configure_firewall.reset_mock()
self.log.reset_mock()
# Test departing unit failed to leave
self.target.wait_for_server_leave.return_value = False
handlers.handle_cluster_downscale()
self.target.wait_for_server_leave.assert_called_once_with(
departing_unit_ip
)
self.configure_firewall.assert_called_once_with()
self.log.assert_called_once_with(fail_msg, handlers.hookenv.WARNING)