Merge 'next' in and fix lint
This commit is contained in:
commit
90e8f8a852
@ -6,6 +6,7 @@ include:
|
||||
- contrib.openstack
|
||||
- contrib.hahelpers
|
||||
- contrib.network
|
||||
- contrib.python.packages
|
||||
- contrib.storage.linux
|
||||
- payload.execd
|
||||
- contrib.charmsupport
|
||||
|
19
config.yaml
19
config.yaml
@ -8,6 +8,7 @@ options:
|
||||
.
|
||||
ovs - OpenVSwitch
|
||||
nvp|nsx - Nicira NVP/VMware NSX
|
||||
n1kv - Cisco N1kv
|
||||
ext-port:
|
||||
type: string
|
||||
default:
|
||||
@ -16,6 +17,12 @@ options:
|
||||
traffic to the external public network. Valid values are either MAC
|
||||
addresses (in which case only MAC addresses for interfaces without an IP
|
||||
address already assigned will be used), or interfaces (eth0)
|
||||
data-port:
|
||||
type: string
|
||||
default:
|
||||
description: |
|
||||
The data port will be added to br-data and will allow usage of flat or VLAN
|
||||
network types with Neutron.
|
||||
openstack-origin:
|
||||
type: string
|
||||
default: distro
|
||||
@ -83,6 +90,12 @@ options:
|
||||
within the cloud. This is useful in deployments where its not
|
||||
possible to increase MTU on switches and physical servers to
|
||||
accomodate the packet overhead of using GRE tunnels.
|
||||
enable-l3-agent:
|
||||
type: boolean
|
||||
default: True
|
||||
description: |
|
||||
Optional configuration to support use of linux router
|
||||
Note that this is used only for Cisco n1kv plugin.
|
||||
database-user:
|
||||
default: nova
|
||||
type: string
|
||||
@ -112,3 +125,9 @@ options:
|
||||
.
|
||||
This network will be used for tenant network traffic in overlay
|
||||
networks.
|
||||
sysctl:
|
||||
type: string
|
||||
default:
|
||||
description: |
|
||||
YAML-formatted associative array of sysctl key/value pairs to be set
|
||||
persistently e.g. '{ kernel.pid_max : 4194303 }'.
|
@ -0,0 +1,22 @@
|
||||
# Bootstrap charm-helpers, installing its dependencies if necessary using
|
||||
# only standard libraries.
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
try:
|
||||
import six # flake8: noqa
|
||||
except ImportError:
|
||||
if sys.version_info.major == 2:
|
||||
subprocess.check_call(['apt-get', 'install', '-y', 'python-six'])
|
||||
else:
|
||||
subprocess.check_call(['apt-get', 'install', '-y', 'python3-six'])
|
||||
import six # flake8: noqa
|
||||
|
||||
try:
|
||||
import yaml # flake8: noqa
|
||||
except ImportError:
|
||||
if sys.version_info.major == 2:
|
||||
subprocess.check_call(['apt-get', 'install', '-y', 'python-yaml'])
|
||||
else:
|
||||
subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml'])
|
||||
import yaml # flake8: noqa
|
@ -13,9 +13,10 @@ clustering-related helpers.
|
||||
|
||||
import subprocess
|
||||
import os
|
||||
|
||||
from socket import gethostname as get_unit_hostname
|
||||
|
||||
import six
|
||||
|
||||
from charmhelpers.core.hookenv import (
|
||||
log,
|
||||
relation_ids,
|
||||
@ -77,7 +78,7 @@ def is_crm_leader(resource):
|
||||
"show", resource
|
||||
]
|
||||
try:
|
||||
status = subprocess.check_output(cmd)
|
||||
status = subprocess.check_output(cmd).decode('UTF-8')
|
||||
except subprocess.CalledProcessError:
|
||||
return False
|
||||
else:
|
||||
@ -150,34 +151,42 @@ def https():
|
||||
return False
|
||||
|
||||
|
||||
def determine_api_port(public_port):
|
||||
def determine_api_port(public_port, singlenode_mode=False):
|
||||
'''
|
||||
Determine correct API server listening port based on
|
||||
existence of HTTPS reverse proxy and/or haproxy.
|
||||
|
||||
public_port: int: standard public port for given service
|
||||
|
||||
singlenode_mode: boolean: Shuffle ports when only a single unit is present
|
||||
|
||||
returns: int: the correct listening port for the API service
|
||||
'''
|
||||
i = 0
|
||||
if len(peer_units()) > 0 or is_clustered():
|
||||
if singlenode_mode:
|
||||
i += 1
|
||||
elif len(peer_units()) > 0 or is_clustered():
|
||||
i += 1
|
||||
if https():
|
||||
i += 1
|
||||
return public_port - (i * 10)
|
||||
|
||||
|
||||
def determine_apache_port(public_port):
|
||||
def determine_apache_port(public_port, singlenode_mode=False):
|
||||
'''
|
||||
Description: Determine correct apache listening port based on public IP +
|
||||
state of the cluster.
|
||||
|
||||
public_port: int: standard public port for given service
|
||||
|
||||
singlenode_mode: boolean: Shuffle ports when only a single unit is present
|
||||
|
||||
returns: int: the correct listening port for the HAProxy service
|
||||
'''
|
||||
i = 0
|
||||
if len(peer_units()) > 0 or is_clustered():
|
||||
if singlenode_mode:
|
||||
i += 1
|
||||
elif len(peer_units()) > 0 or is_clustered():
|
||||
i += 1
|
||||
return public_port - (i * 10)
|
||||
|
||||
@ -197,7 +206,7 @@ def get_hacluster_config():
|
||||
for setting in settings:
|
||||
conf[setting] = config_get(setting)
|
||||
missing = []
|
||||
[missing.append(s) for s, v in conf.iteritems() if v is None]
|
||||
[missing.append(s) for s, v in six.iteritems(conf) if v is None]
|
||||
if missing:
|
||||
log('Insufficient config data to configure hacluster.', level=ERROR)
|
||||
raise HAIncompleteConfig
|
||||
|
@ -1,15 +1,12 @@
|
||||
import glob
|
||||
import re
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
from functools import partial
|
||||
|
||||
from charmhelpers.core.hookenv import unit_get
|
||||
from charmhelpers.fetch import apt_install
|
||||
from charmhelpers.core.hookenv import (
|
||||
WARNING,
|
||||
ERROR,
|
||||
log
|
||||
)
|
||||
|
||||
@ -34,31 +31,28 @@ def _validate_cidr(network):
|
||||
network)
|
||||
|
||||
|
||||
def no_ip_found_error_out(network):
|
||||
errmsg = ("No IP address found in network: %s" % network)
|
||||
raise ValueError(errmsg)
|
||||
|
||||
|
||||
def get_address_in_network(network, fallback=None, fatal=False):
|
||||
"""
|
||||
Get an IPv4 or IPv6 address within the network from the host.
|
||||
"""Get an IPv4 or IPv6 address within the network from the host.
|
||||
|
||||
:param network (str): CIDR presentation format. For example,
|
||||
'192.168.1.0/24'.
|
||||
:param fallback (str): If no address is found, return fallback.
|
||||
:param fatal (boolean): If no address is found, fallback is not
|
||||
set and fatal is True then exit(1).
|
||||
|
||||
"""
|
||||
|
||||
def not_found_error_out():
|
||||
log("No IP address found in network: %s" % network,
|
||||
level=ERROR)
|
||||
sys.exit(1)
|
||||
|
||||
if network is None:
|
||||
if fallback is not None:
|
||||
return fallback
|
||||
|
||||
if fatal:
|
||||
no_ip_found_error_out(network)
|
||||
else:
|
||||
if fatal:
|
||||
not_found_error_out()
|
||||
else:
|
||||
return None
|
||||
return None
|
||||
|
||||
_validate_cidr(network)
|
||||
network = netaddr.IPNetwork(network)
|
||||
@ -70,6 +64,7 @@ def get_address_in_network(network, fallback=None, fatal=False):
|
||||
cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask))
|
||||
if cidr in network:
|
||||
return str(cidr.ip)
|
||||
|
||||
if network.version == 6 and netifaces.AF_INET6 in addresses:
|
||||
for addr in addresses[netifaces.AF_INET6]:
|
||||
if not addr['addr'].startswith('fe80'):
|
||||
@ -82,20 +77,20 @@ def get_address_in_network(network, fallback=None, fatal=False):
|
||||
return fallback
|
||||
|
||||
if fatal:
|
||||
not_found_error_out()
|
||||
no_ip_found_error_out(network)
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def is_ipv6(address):
|
||||
'''Determine whether provided address is IPv6 or not'''
|
||||
"""Determine whether provided address is IPv6 or not."""
|
||||
try:
|
||||
address = netaddr.IPAddress(address)
|
||||
except netaddr.AddrFormatError:
|
||||
# probably a hostname - so not an address at all!
|
||||
return False
|
||||
else:
|
||||
return address.version == 6
|
||||
|
||||
return address.version == 6
|
||||
|
||||
|
||||
def is_address_in_network(network, address):
|
||||
@ -113,11 +108,13 @@ def is_address_in_network(network, address):
|
||||
except (netaddr.core.AddrFormatError, ValueError):
|
||||
raise ValueError("Network (%s) is not in CIDR presentation format" %
|
||||
network)
|
||||
|
||||
try:
|
||||
address = netaddr.IPAddress(address)
|
||||
except (netaddr.core.AddrFormatError, ValueError):
|
||||
raise ValueError("Address (%s) is not in correct presentation format" %
|
||||
address)
|
||||
|
||||
if address in network:
|
||||
return True
|
||||
else:
|
||||
@ -147,6 +144,7 @@ def _get_for_address(address, key):
|
||||
return iface
|
||||
else:
|
||||
return addresses[netifaces.AF_INET][0][key]
|
||||
|
||||
if address.version == 6 and netifaces.AF_INET6 in addresses:
|
||||
for addr in addresses[netifaces.AF_INET6]:
|
||||
if not addr['addr'].startswith('fe80'):
|
||||
@ -160,41 +158,42 @@ def _get_for_address(address, key):
|
||||
return str(cidr).split('/')[1]
|
||||
else:
|
||||
return addr[key]
|
||||
|
||||
return None
|
||||
|
||||
|
||||
get_iface_for_address = partial(_get_for_address, key='iface')
|
||||
|
||||
|
||||
get_netmask_for_address = partial(_get_for_address, key='netmask')
|
||||
|
||||
|
||||
def format_ipv6_addr(address):
|
||||
"""
|
||||
IPv6 needs to be wrapped with [] in url link to parse correctly.
|
||||
"""If address is IPv6, wrap it in '[]' otherwise return None.
|
||||
|
||||
This is required by most configuration files when specifying IPv6
|
||||
addresses.
|
||||
"""
|
||||
if is_ipv6(address):
|
||||
address = "[%s]" % address
|
||||
else:
|
||||
log("Not a valid ipv6 address: %s" % address, level=WARNING)
|
||||
address = None
|
||||
return "[%s]" % address
|
||||
|
||||
return address
|
||||
return None
|
||||
|
||||
|
||||
def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False,
|
||||
fatal=True, exc_list=None):
|
||||
"""
|
||||
Return the assigned IP address for a given interface, if any, or [].
|
||||
"""
|
||||
"""Return the assigned IP address for a given interface, if any."""
|
||||
# Extract nic if passed /dev/ethX
|
||||
if '/' in iface:
|
||||
iface = iface.split('/')[-1]
|
||||
|
||||
if not exc_list:
|
||||
exc_list = []
|
||||
|
||||
try:
|
||||
inet_num = getattr(netifaces, inet_type)
|
||||
except AttributeError:
|
||||
raise Exception('Unknown inet type ' + str(inet_type))
|
||||
raise Exception("Unknown inet type '%s'" % str(inet_type))
|
||||
|
||||
interfaces = netifaces.interfaces()
|
||||
if inc_aliases:
|
||||
@ -202,15 +201,18 @@ def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False,
|
||||
for _iface in interfaces:
|
||||
if iface == _iface or _iface.split(':')[0] == iface:
|
||||
ifaces.append(_iface)
|
||||
|
||||
if fatal and not ifaces:
|
||||
raise Exception("Invalid interface '%s'" % iface)
|
||||
|
||||
ifaces.sort()
|
||||
else:
|
||||
if iface not in interfaces:
|
||||
if fatal:
|
||||
raise Exception("%s not found " % (iface))
|
||||
raise Exception("Interface '%s' not found " % (iface))
|
||||
else:
|
||||
return []
|
||||
|
||||
else:
|
||||
ifaces = [iface]
|
||||
|
||||
@ -221,10 +223,13 @@ def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False,
|
||||
for entry in net_info[inet_num]:
|
||||
if 'addr' in entry and entry['addr'] not in exc_list:
|
||||
addresses.append(entry['addr'])
|
||||
|
||||
if fatal and not addresses:
|
||||
raise Exception("Interface '%s' doesn't have any %s addresses." %
|
||||
(iface, inet_type))
|
||||
return addresses
|
||||
|
||||
return sorted(addresses)
|
||||
|
||||
|
||||
get_ipv4_addr = partial(get_iface_addr, inet_type='AF_INET')
|
||||
|
||||
@ -241,6 +246,7 @@ def get_iface_from_addr(addr):
|
||||
raw = re.match(ll_key, _addr)
|
||||
if raw:
|
||||
_addr = raw.group(1)
|
||||
|
||||
if _addr == addr:
|
||||
log("Address '%s' is configured on iface '%s'" %
|
||||
(addr, iface))
|
||||
@ -251,8 +257,9 @@ def get_iface_from_addr(addr):
|
||||
|
||||
|
||||
def sniff_iface(f):
|
||||
"""If no iface provided, inject net iface inferred from unit private
|
||||
address.
|
||||
"""Ensure decorated function is called with a value for iface.
|
||||
|
||||
If no iface provided, inject net iface inferred from unit private address.
|
||||
"""
|
||||
def iface_sniffer(*args, **kwargs):
|
||||
if not kwargs.get('iface', None):
|
||||
@ -295,7 +302,7 @@ def get_ipv6_addr(iface=None, inc_aliases=False, fatal=True, exc_list=None,
|
||||
if global_addrs:
|
||||
# Make sure any found global addresses are not temporary
|
||||
cmd = ['ip', 'addr', 'show', iface]
|
||||
out = subprocess.check_output(cmd)
|
||||
out = subprocess.check_output(cmd).decode('UTF-8')
|
||||
if dynamic_only:
|
||||
key = re.compile("inet6 (.+)/[0-9]+ scope global dynamic.*")
|
||||
else:
|
||||
@ -317,33 +324,28 @@ def get_ipv6_addr(iface=None, inc_aliases=False, fatal=True, exc_list=None,
|
||||
return addrs
|
||||
|
||||
if fatal:
|
||||
raise Exception("Interface '%s' doesn't have a scope global "
|
||||
raise Exception("Interface '%s' does not have a scope global "
|
||||
"non-temporary ipv6 address." % iface)
|
||||
|
||||
return []
|
||||
|
||||
|
||||
def get_bridges(vnic_dir='/sys/devices/virtual/net'):
|
||||
"""
|
||||
Return a list of bridges on the system or []
|
||||
"""
|
||||
b_rgex = vnic_dir + '/*/bridge'
|
||||
return [x.replace(vnic_dir, '').split('/')[1] for x in glob.glob(b_rgex)]
|
||||
"""Return a list of bridges on the system."""
|
||||
b_regex = "%s/*/bridge" % vnic_dir
|
||||
return [x.replace(vnic_dir, '').split('/')[1] for x in glob.glob(b_regex)]
|
||||
|
||||
|
||||
def get_bridge_nics(bridge, vnic_dir='/sys/devices/virtual/net'):
|
||||
"""
|
||||
Return a list of nics comprising a given bridge on the system or []
|
||||
"""
|
||||
brif_rgex = "%s/%s/brif/*" % (vnic_dir, bridge)
|
||||
return [x.split('/')[-1] for x in glob.glob(brif_rgex)]
|
||||
"""Return a list of nics comprising a given bridge on the system."""
|
||||
brif_regex = "%s/%s/brif/*" % (vnic_dir, bridge)
|
||||
return [x.split('/')[-1] for x in glob.glob(brif_regex)]
|
||||
|
||||
|
||||
def is_bridge_member(nic):
|
||||
"""
|
||||
Check if a given nic is a member of a bridge
|
||||
"""
|
||||
"""Check if a given nic is a member of a bridge."""
|
||||
for bridge in get_bridges():
|
||||
if nic in get_bridge_nics(bridge):
|
||||
return True
|
||||
|
||||
return False
|
||||
|
189
hooks/charmhelpers/contrib/network/ufw.py
Normal file
189
hooks/charmhelpers/contrib/network/ufw.py
Normal file
@ -0,0 +1,189 @@
|
||||
"""
|
||||
This module contains helpers to add and remove ufw rules.
|
||||
|
||||
Examples:
|
||||
|
||||
- open SSH port for subnet 10.0.3.0/24:
|
||||
|
||||
>>> from charmhelpers.contrib.network import ufw
|
||||
>>> ufw.enable()
|
||||
>>> ufw.grant_access(src='10.0.3.0/24', dst='any', port='22', proto='tcp')
|
||||
|
||||
- open service by name as defined in /etc/services:
|
||||
|
||||
>>> from charmhelpers.contrib.network import ufw
|
||||
>>> ufw.enable()
|
||||
>>> ufw.service('ssh', 'open')
|
||||
|
||||
- close service by port number:
|
||||
|
||||
>>> from charmhelpers.contrib.network import ufw
|
||||
>>> ufw.enable()
|
||||
>>> ufw.service('4949', 'close') # munin
|
||||
"""
|
||||
|
||||
__author__ = "Felipe Reyes <felipe.reyes@canonical.com>"
|
||||
|
||||
import re
|
||||
import os
|
||||
import subprocess
|
||||
from charmhelpers.core import hookenv
|
||||
|
||||
|
||||
def is_enabled():
|
||||
"""
|
||||
Check if `ufw` is enabled
|
||||
|
||||
:returns: True if ufw is enabled
|
||||
"""
|
||||
output = subprocess.check_output(['ufw', 'status'],
|
||||
env={'LANG': 'en_US',
|
||||
'PATH': os.environ['PATH']})
|
||||
|
||||
m = re.findall(r'^Status: active\n', output, re.M)
|
||||
|
||||
return len(m) >= 1
|
||||
|
||||
|
||||
def enable():
|
||||
"""
|
||||
Enable ufw
|
||||
|
||||
:returns: True if ufw is successfully enabled
|
||||
"""
|
||||
if is_enabled():
|
||||
return True
|
||||
|
||||
output = subprocess.check_output(['ufw', 'enable'],
|
||||
env={'LANG': 'en_US',
|
||||
'PATH': os.environ['PATH']})
|
||||
|
||||
m = re.findall('^Firewall is active and enabled on system startup\n',
|
||||
output, re.M)
|
||||
hookenv.log(output, level='DEBUG')
|
||||
|
||||
if len(m) == 0:
|
||||
hookenv.log("ufw couldn't be enabled", level='WARN')
|
||||
return False
|
||||
else:
|
||||
hookenv.log("ufw enabled", level='INFO')
|
||||
return True
|
||||
|
||||
|
||||
def disable():
|
||||
"""
|
||||
Disable ufw
|
||||
|
||||
:returns: True if ufw is successfully disabled
|
||||
"""
|
||||
if not is_enabled():
|
||||
return True
|
||||
|
||||
output = subprocess.check_output(['ufw', 'disable'],
|
||||
env={'LANG': 'en_US',
|
||||
'PATH': os.environ['PATH']})
|
||||
|
||||
m = re.findall(r'^Firewall stopped and disabled on system startup\n',
|
||||
output, re.M)
|
||||
hookenv.log(output, level='DEBUG')
|
||||
|
||||
if len(m) == 0:
|
||||
hookenv.log("ufw couldn't be disabled", level='WARN')
|
||||
return False
|
||||
else:
|
||||
hookenv.log("ufw disabled", level='INFO')
|
||||
return True
|
||||
|
||||
|
||||
def modify_access(src, dst='any', port=None, proto=None, action='allow'):
|
||||
"""
|
||||
Grant access to an address or subnet
|
||||
|
||||
:param src: address (e.g. 192.168.1.234) or subnet
|
||||
(e.g. 192.168.1.0/24).
|
||||
:param dst: destiny of the connection, if the machine has multiple IPs and
|
||||
connections to only one of those have to accepted this is the
|
||||
field has to be set.
|
||||
:param port: destiny port
|
||||
:param proto: protocol (tcp or udp)
|
||||
:param action: `allow` or `delete`
|
||||
"""
|
||||
if not is_enabled():
|
||||
hookenv.log('ufw is disabled, skipping modify_access()', level='WARN')
|
||||
return
|
||||
|
||||
if action == 'delete':
|
||||
cmd = ['ufw', 'delete', 'allow']
|
||||
else:
|
||||
cmd = ['ufw', action]
|
||||
|
||||
if src is not None:
|
||||
cmd += ['from', src]
|
||||
|
||||
if dst is not None:
|
||||
cmd += ['to', dst]
|
||||
|
||||
if port is not None:
|
||||
cmd += ['port', port]
|
||||
|
||||
if proto is not None:
|
||||
cmd += ['proto', proto]
|
||||
|
||||
hookenv.log('ufw {}: {}'.format(action, ' '.join(cmd)), level='DEBUG')
|
||||
p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
|
||||
(stdout, stderr) = p.communicate()
|
||||
|
||||
hookenv.log(stdout, level='INFO')
|
||||
|
||||
if p.returncode != 0:
|
||||
hookenv.log(stderr, level='ERROR')
|
||||
hookenv.log('Error running: {}, exit code: {}'.format(' '.join(cmd),
|
||||
p.returncode),
|
||||
level='ERROR')
|
||||
|
||||
|
||||
def grant_access(src, dst='any', port=None, proto=None):
|
||||
"""
|
||||
Grant access to an address or subnet
|
||||
|
||||
:param src: address (e.g. 192.168.1.234) or subnet
|
||||
(e.g. 192.168.1.0/24).
|
||||
:param dst: destiny of the connection, if the machine has multiple IPs and
|
||||
connections to only one of those have to accepted this is the
|
||||
field has to be set.
|
||||
:param port: destiny port
|
||||
:param proto: protocol (tcp or udp)
|
||||
"""
|
||||
return modify_access(src, dst=dst, port=port, proto=proto, action='allow')
|
||||
|
||||
|
||||
def revoke_access(src, dst='any', port=None, proto=None):
|
||||
"""
|
||||
Revoke access to an address or subnet
|
||||
|
||||
:param src: address (e.g. 192.168.1.234) or subnet
|
||||
(e.g. 192.168.1.0/24).
|
||||
:param dst: destiny of the connection, if the machine has multiple IPs and
|
||||
connections to only one of those have to accepted this is the
|
||||
field has to be set.
|
||||
:param port: destiny port
|
||||
:param proto: protocol (tcp or udp)
|
||||
"""
|
||||
return modify_access(src, dst=dst, port=port, proto=proto, action='delete')
|
||||
|
||||
|
||||
def service(name, action):
|
||||
"""
|
||||
Open/close access to a service
|
||||
|
||||
:param name: could be a service name defined in `/etc/services` or a port
|
||||
number.
|
||||
:param action: `open` or `close`
|
||||
"""
|
||||
if action == 'open':
|
||||
subprocess.check_output(['ufw', 'allow', name])
|
||||
elif action == 'close':
|
||||
subprocess.check_output(['ufw', 'delete', 'allow', name])
|
||||
else:
|
||||
raise Exception(("'{}' not supported, use 'allow' "
|
||||
"or 'delete'").format(action))
|
@ -1,3 +1,4 @@
|
||||
import six
|
||||
from charmhelpers.contrib.amulet.deployment import (
|
||||
AmuletDeployment
|
||||
)
|
||||
@ -69,7 +70,7 @@ class OpenStackAmuletDeployment(AmuletDeployment):
|
||||
|
||||
def _configure_services(self, configs):
|
||||
"""Configure all of the services."""
|
||||
for service, config in configs.iteritems():
|
||||
for service, config in six.iteritems(configs):
|
||||
self.d.configure(service, config)
|
||||
|
||||
def _get_openstack_release(self):
|
||||
|
@ -7,6 +7,8 @@ import glanceclient.v1.client as glance_client
|
||||
import keystoneclient.v2_0 as keystone_client
|
||||
import novaclient.v1_1.client as nova_client
|
||||
|
||||
import six
|
||||
|
||||
from charmhelpers.contrib.amulet.utils import (
|
||||
AmuletUtils
|
||||
)
|
||||
@ -60,7 +62,7 @@ class OpenStackAmuletUtils(AmuletUtils):
|
||||
expected service catalog endpoints.
|
||||
"""
|
||||
self.log.debug('actual: {}'.format(repr(actual)))
|
||||
for k, v in expected.iteritems():
|
||||
for k, v in six.iteritems(expected):
|
||||
if k in actual:
|
||||
ret = self._validate_dict_data(expected[k][0], actual[k][0])
|
||||
if ret:
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -2,21 +2,19 @@ from charmhelpers.core.hookenv import (
|
||||
config,
|
||||
unit_get,
|
||||
)
|
||||
|
||||
from charmhelpers.contrib.network.ip import (
|
||||
get_address_in_network,
|
||||
is_address_in_network,
|
||||
is_ipv6,
|
||||
get_ipv6_addr,
|
||||
)
|
||||
|
||||
from charmhelpers.contrib.hahelpers.cluster import is_clustered
|
||||
|
||||
PUBLIC = 'public'
|
||||
INTERNAL = 'int'
|
||||
ADMIN = 'admin'
|
||||
|
||||
_address_map = {
|
||||
ADDRESS_MAP = {
|
||||
PUBLIC: {
|
||||
'config': 'os-public-network',
|
||||
'fallback': 'public-address'
|
||||
@ -33,16 +31,14 @@ _address_map = {
|
||||
|
||||
|
||||
def canonical_url(configs, endpoint_type=PUBLIC):
|
||||
'''
|
||||
Returns the correct HTTP URL to this host given the state of HTTPS
|
||||
"""Returns the correct HTTP URL to this host given the state of HTTPS
|
||||
configuration, hacluster and charm configuration.
|
||||
|
||||
:configs OSTemplateRenderer: A config tempating object to inspect for
|
||||
a complete https context.
|
||||
:endpoint_type str: The endpoint type to resolve.
|
||||
|
||||
:returns str: Base URL for services on the current service unit.
|
||||
'''
|
||||
:param configs: OSTemplateRenderer config templating object to inspect
|
||||
for a complete https context.
|
||||
:param endpoint_type: str endpoint type to resolve.
|
||||
:param returns: str base URL for services on the current service unit.
|
||||
"""
|
||||
scheme = 'http'
|
||||
if 'https' in configs.complete_contexts():
|
||||
scheme = 'https'
|
||||
@ -53,27 +49,45 @@ def canonical_url(configs, endpoint_type=PUBLIC):
|
||||
|
||||
|
||||
def resolve_address(endpoint_type=PUBLIC):
|
||||
"""Return unit address depending on net config.
|
||||
|
||||
If unit is clustered with vip(s) and has net splits defined, return vip on
|
||||
correct network. If clustered with no nets defined, return primary vip.
|
||||
|
||||
If not clustered, return unit address ensuring address is on configured net
|
||||
split if one is configured.
|
||||
|
||||
:param endpoint_type: Network endpoing type
|
||||
"""
|
||||
resolved_address = None
|
||||
if is_clustered():
|
||||
if config(_address_map[endpoint_type]['config']) is None:
|
||||
# Assume vip is simple and pass back directly
|
||||
resolved_address = config('vip')
|
||||
vips = config('vip')
|
||||
if vips:
|
||||
vips = vips.split()
|
||||
|
||||
net_type = ADDRESS_MAP[endpoint_type]['config']
|
||||
net_addr = config(net_type)
|
||||
net_fallback = ADDRESS_MAP[endpoint_type]['fallback']
|
||||
clustered = is_clustered()
|
||||
if clustered:
|
||||
if not net_addr:
|
||||
# If no net-splits defined, we expect a single vip
|
||||
resolved_address = vips[0]
|
||||
else:
|
||||
for vip in config('vip').split():
|
||||
if is_address_in_network(
|
||||
config(_address_map[endpoint_type]['config']),
|
||||
vip):
|
||||
for vip in vips:
|
||||
if is_address_in_network(net_addr, vip):
|
||||
resolved_address = vip
|
||||
break
|
||||
else:
|
||||
if config('prefer-ipv6'):
|
||||
fallback_addr = get_ipv6_addr(exc_list=[config('vip')])[0]
|
||||
fallback_addr = get_ipv6_addr(exc_list=vips)[0]
|
||||
else:
|
||||
fallback_addr = unit_get(_address_map[endpoint_type]['fallback'])
|
||||
resolved_address = get_address_in_network(
|
||||
config(_address_map[endpoint_type]['config']), fallback_addr)
|
||||
fallback_addr = unit_get(net_fallback)
|
||||
|
||||
resolved_address = get_address_in_network(net_addr, fallback_addr)
|
||||
|
||||
if resolved_address is None:
|
||||
raise ValueError('Unable to resolve a suitable IP address'
|
||||
' based on charm state and configuration')
|
||||
else:
|
||||
return resolved_address
|
||||
raise ValueError("Unable to resolve a suitable IP address based on "
|
||||
"charm state and configuration. (net_type=%s, "
|
||||
"clustered=%s)" % (net_type, clustered))
|
||||
|
||||
return resolved_address
|
||||
|
@ -14,7 +14,7 @@ from charmhelpers.contrib.openstack.utils import os_release
|
||||
def headers_package():
|
||||
"""Ensures correct linux-headers for running kernel are installed,
|
||||
for building DKMS package"""
|
||||
kver = check_output(['uname', '-r']).strip()
|
||||
kver = check_output(['uname', '-r']).decode('UTF-8').strip()
|
||||
return 'linux-headers-%s' % kver
|
||||
|
||||
QUANTUM_CONF_DIR = '/etc/quantum'
|
||||
@ -22,7 +22,7 @@ QUANTUM_CONF_DIR = '/etc/quantum'
|
||||
|
||||
def kernel_version():
|
||||
""" Retrieve the current major kernel version as a tuple e.g. (3, 13) """
|
||||
kver = check_output(['uname', '-r']).strip()
|
||||
kver = check_output(['uname', '-r']).decode('UTF-8').strip()
|
||||
kver = kver.split('.')
|
||||
return (int(kver[0]), int(kver[1]))
|
||||
|
||||
@ -138,10 +138,25 @@ def neutron_plugins():
|
||||
relation_prefix='neutron',
|
||||
ssl_dir=NEUTRON_CONF_DIR)],
|
||||
'services': [],
|
||||
'packages': [['neutron-plugin-cisco']],
|
||||
'packages': [[headers_package()] + determine_dkms_package(),
|
||||
['neutron-plugin-cisco']],
|
||||
'server_packages': ['neutron-server',
|
||||
'neutron-plugin-cisco'],
|
||||
'server_services': ['neutron-server']
|
||||
},
|
||||
'Calico': {
|
||||
'config': '/etc/neutron/plugins/ml2/ml2_conf.ini',
|
||||
'driver': 'neutron.plugins.ml2.plugin.Ml2Plugin',
|
||||
'contexts': [
|
||||
context.SharedDBContext(user=config('neutron-database-user'),
|
||||
database=config('neutron-database'),
|
||||
relation_prefix='neutron',
|
||||
ssl_dir=NEUTRON_CONF_DIR)],
|
||||
'services': ['calico-compute', 'bird', 'neutron-dhcp-agent'],
|
||||
'packages': [[headers_package()] + determine_dkms_package(),
|
||||
['calico-compute', 'bird', 'neutron-dhcp-agent']],
|
||||
'server_packages': ['neutron-server', 'calico-control'],
|
||||
'server_services': ['neutron-server']
|
||||
}
|
||||
}
|
||||
if release >= 'icehouse':
|
||||
@ -162,7 +177,8 @@ def neutron_plugin_attribute(plugin, attr, net_manager=None):
|
||||
elif manager == 'neutron':
|
||||
plugins = neutron_plugins()
|
||||
else:
|
||||
log('Error: Network manager does not support plugins.')
|
||||
log("Network manager '%s' does not support plugins." % (manager),
|
||||
level=ERROR)
|
||||
raise Exception
|
||||
|
||||
try:
|
||||
|
@ -1,13 +1,13 @@
|
||||
import os
|
||||
|
||||
from charmhelpers.fetch import apt_install
|
||||
import six
|
||||
|
||||
from charmhelpers.fetch import apt_install
|
||||
from charmhelpers.core.hookenv import (
|
||||
log,
|
||||
ERROR,
|
||||
INFO
|
||||
)
|
||||
|
||||
from charmhelpers.contrib.openstack.utils import OPENSTACK_CODENAMES
|
||||
|
||||
try:
|
||||
@ -43,7 +43,7 @@ def get_loader(templates_dir, os_release):
|
||||
order by OpenStack release.
|
||||
"""
|
||||
tmpl_dirs = [(rel, os.path.join(templates_dir, rel))
|
||||
for rel in OPENSTACK_CODENAMES.itervalues()]
|
||||
for rel in six.itervalues(OPENSTACK_CODENAMES)]
|
||||
|
||||
if not os.path.isdir(templates_dir):
|
||||
log('Templates directory not found @ %s.' % templates_dir,
|
||||
@ -258,7 +258,7 @@ class OSConfigRenderer(object):
|
||||
"""
|
||||
Write out all registered config files.
|
||||
"""
|
||||
[self.write(k) for k in self.templates.iterkeys()]
|
||||
[self.write(k) for k in six.iterkeys(self.templates)]
|
||||
|
||||
def set_release(self, openstack_release):
|
||||
"""
|
||||
@ -275,5 +275,5 @@ class OSConfigRenderer(object):
|
||||
'''
|
||||
interfaces = []
|
||||
[interfaces.extend(i.complete_contexts())
|
||||
for i in self.templates.itervalues()]
|
||||
for i in six.itervalues(self.templates)]
|
||||
return interfaces
|
||||
|
@ -2,6 +2,7 @@
|
||||
|
||||
# Common python helper functions used for OpenStack charms.
|
||||
from collections import OrderedDict
|
||||
from functools import wraps
|
||||
|
||||
import subprocess
|
||||
import json
|
||||
@ -9,11 +10,13 @@ import os
|
||||
import socket
|
||||
import sys
|
||||
|
||||
import six
|
||||
import yaml
|
||||
|
||||
from charmhelpers.core.hookenv import (
|
||||
config,
|
||||
log as juju_log,
|
||||
charm_dir,
|
||||
ERROR,
|
||||
INFO,
|
||||
relation_ids,
|
||||
relation_set
|
||||
@ -30,7 +33,8 @@ from charmhelpers.contrib.network.ip import (
|
||||
)
|
||||
|
||||
from charmhelpers.core.host import lsb_release, mounts, umount
|
||||
from charmhelpers.fetch import apt_install, apt_cache
|
||||
from charmhelpers.fetch import apt_install, apt_cache, install_remote
|
||||
from charmhelpers.contrib.python.packages import pip_install
|
||||
from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk
|
||||
from charmhelpers.contrib.storage.linux.loopback import ensure_loopback_device
|
||||
|
||||
@ -112,7 +116,7 @@ def get_os_codename_install_source(src):
|
||||
|
||||
# Best guess match based on deb string provided
|
||||
if src.startswith('deb') or src.startswith('ppa'):
|
||||
for k, v in OPENSTACK_CODENAMES.iteritems():
|
||||
for k, v in six.iteritems(OPENSTACK_CODENAMES):
|
||||
if v in src:
|
||||
return v
|
||||
|
||||
@ -133,7 +137,7 @@ def get_os_codename_version(vers):
|
||||
|
||||
def get_os_version_codename(codename):
|
||||
'''Determine OpenStack version number from codename.'''
|
||||
for k, v in OPENSTACK_CODENAMES.iteritems():
|
||||
for k, v in six.iteritems(OPENSTACK_CODENAMES):
|
||||
if v == codename:
|
||||
return k
|
||||
e = 'Could not derive OpenStack version for '\
|
||||
@ -193,7 +197,7 @@ def get_os_version_package(pkg, fatal=True):
|
||||
else:
|
||||
vers_map = OPENSTACK_CODENAMES
|
||||
|
||||
for version, cname in vers_map.iteritems():
|
||||
for version, cname in six.iteritems(vers_map):
|
||||
if cname == codename:
|
||||
return version
|
||||
# e = "Could not determine OpenStack version for package: %s" % pkg
|
||||
@ -317,7 +321,7 @@ def save_script_rc(script_path="scripts/scriptrc", **env_vars):
|
||||
rc_script.write(
|
||||
"#!/bin/bash\n")
|
||||
[rc_script.write('export %s=%s\n' % (u, p))
|
||||
for u, p in env_vars.iteritems() if u != "script_path"]
|
||||
for u, p in six.iteritems(env_vars) if u != "script_path"]
|
||||
|
||||
|
||||
def openstack_upgrade_available(package):
|
||||
@ -350,8 +354,8 @@ def ensure_block_device(block_device):
|
||||
'''
|
||||
_none = ['None', 'none', None]
|
||||
if (block_device in _none):
|
||||
error_out('prepare_storage(): Missing required input: '
|
||||
'block_device=%s.' % block_device, level=ERROR)
|
||||
error_out('prepare_storage(): Missing required input: block_device=%s.'
|
||||
% block_device)
|
||||
|
||||
if block_device.startswith('/dev/'):
|
||||
bdev = block_device
|
||||
@ -367,8 +371,7 @@ def ensure_block_device(block_device):
|
||||
bdev = '/dev/%s' % block_device
|
||||
|
||||
if not is_block_device(bdev):
|
||||
error_out('Failed to locate valid block device at %s' % bdev,
|
||||
level=ERROR)
|
||||
error_out('Failed to locate valid block device at %s' % bdev)
|
||||
|
||||
return bdev
|
||||
|
||||
@ -417,7 +420,7 @@ def ns_query(address):
|
||||
|
||||
if isinstance(address, dns.name.Name):
|
||||
rtype = 'PTR'
|
||||
elif isinstance(address, basestring):
|
||||
elif isinstance(address, six.string_types):
|
||||
rtype = 'A'
|
||||
else:
|
||||
return None
|
||||
@ -468,6 +471,14 @@ def get_hostname(address, fqdn=True):
|
||||
return result.split('.')[0]
|
||||
|
||||
|
||||
def get_matchmaker_map(mm_file='/etc/oslo/matchmaker_ring.json'):
|
||||
mm_map = {}
|
||||
if os.path.isfile(mm_file):
|
||||
with open(mm_file, 'r') as f:
|
||||
mm_map = json.load(f)
|
||||
return mm_map
|
||||
|
||||
|
||||
def sync_db_with_multi_ipv6_addresses(database, database_user,
|
||||
relation_prefix=None):
|
||||
hosts = get_ipv6_addr(dynamic_only=False)
|
||||
@ -477,10 +488,132 @@ def sync_db_with_multi_ipv6_addresses(database, database_user,
|
||||
'hostname': json.dumps(hosts)}
|
||||
|
||||
if relation_prefix:
|
||||
keys = kwargs.keys()
|
||||
for key in keys:
|
||||
for key in list(kwargs.keys()):
|
||||
kwargs["%s_%s" % (relation_prefix, key)] = kwargs[key]
|
||||
del kwargs[key]
|
||||
|
||||
for rid in relation_ids('shared-db'):
|
||||
relation_set(relation_id=rid, **kwargs)
|
||||
|
||||
|
||||
def os_requires_version(ostack_release, pkg):
|
||||
"""
|
||||
Decorator for hook to specify minimum supported release
|
||||
"""
|
||||
def wrap(f):
|
||||
@wraps(f)
|
||||
def wrapped_f(*args):
|
||||
if os_release(pkg) < ostack_release:
|
||||
raise Exception("This hook is not supported on releases"
|
||||
" before %s" % ostack_release)
|
||||
f(*args)
|
||||
return wrapped_f
|
||||
return wrap
|
||||
|
||||
|
||||
def git_install_requested():
|
||||
"""Returns true if openstack-origin-git is specified."""
|
||||
return config('openstack-origin-git') != "None"
|
||||
|
||||
|
||||
requirements_dir = None
|
||||
|
||||
|
||||
def git_clone_and_install(file_name, core_project):
|
||||
"""Clone/install all OpenStack repos specified in yaml config file."""
|
||||
global requirements_dir
|
||||
|
||||
if file_name == "None":
|
||||
return
|
||||
|
||||
yaml_file = os.path.join(charm_dir(), file_name)
|
||||
|
||||
# clone/install the requirements project first
|
||||
installed = _git_clone_and_install_subset(yaml_file,
|
||||
whitelist=['requirements'])
|
||||
if 'requirements' not in installed:
|
||||
error_out('requirements git repository must be specified')
|
||||
|
||||
# clone/install all other projects except requirements and the core project
|
||||
blacklist = ['requirements', core_project]
|
||||
_git_clone_and_install_subset(yaml_file, blacklist=blacklist,
|
||||
update_requirements=True)
|
||||
|
||||
# clone/install the core project
|
||||
whitelist = [core_project]
|
||||
installed = _git_clone_and_install_subset(yaml_file, whitelist=whitelist,
|
||||
update_requirements=True)
|
||||
if core_project not in installed:
|
||||
error_out('{} git repository must be specified'.format(core_project))
|
||||
|
||||
|
||||
def _git_clone_and_install_subset(yaml_file, whitelist=[], blacklist=[],
|
||||
update_requirements=False):
|
||||
"""Clone/install subset of OpenStack repos specified in yaml config file."""
|
||||
global requirements_dir
|
||||
installed = []
|
||||
|
||||
with open(yaml_file, 'r') as fd:
|
||||
projects = yaml.load(fd)
|
||||
for proj, val in projects.items():
|
||||
# The project subset is chosen based on the following 3 rules:
|
||||
# 1) If project is in blacklist, we don't clone/install it, period.
|
||||
# 2) If whitelist is empty, we clone/install everything else.
|
||||
# 3) If whitelist is not empty, we clone/install everything in the
|
||||
# whitelist.
|
||||
if proj in blacklist:
|
||||
continue
|
||||
if whitelist and proj not in whitelist:
|
||||
continue
|
||||
repo = val['repository']
|
||||
branch = val['branch']
|
||||
repo_dir = _git_clone_and_install_single(repo, branch,
|
||||
update_requirements)
|
||||
if proj == 'requirements':
|
||||
requirements_dir = repo_dir
|
||||
installed.append(proj)
|
||||
return installed
|
||||
|
||||
|
||||
def _git_clone_and_install_single(repo, branch, update_requirements=False):
|
||||
"""Clone and install a single git repository."""
|
||||
dest_parent_dir = "/mnt/openstack-git/"
|
||||
dest_dir = os.path.join(dest_parent_dir, os.path.basename(repo))
|
||||
|
||||
if not os.path.exists(dest_parent_dir):
|
||||
juju_log('Host dir not mounted at {}. '
|
||||
'Creating directory there instead.'.format(dest_parent_dir))
|
||||
os.mkdir(dest_parent_dir)
|
||||
|
||||
if not os.path.exists(dest_dir):
|
||||
juju_log('Cloning git repo: {}, branch: {}'.format(repo, branch))
|
||||
repo_dir = install_remote(repo, dest=dest_parent_dir, branch=branch)
|
||||
else:
|
||||
repo_dir = dest_dir
|
||||
|
||||
if update_requirements:
|
||||
if not requirements_dir:
|
||||
error_out('requirements repo must be cloned before '
|
||||
'updating from global requirements.')
|
||||
_git_update_requirements(repo_dir, requirements_dir)
|
||||
|
||||
juju_log('Installing git repo from dir: {}'.format(repo_dir))
|
||||
pip_install(repo_dir)
|
||||
|
||||
return repo_dir
|
||||
|
||||
|
||||
def _git_update_requirements(package_dir, reqs_dir):
|
||||
"""Update from global requirements.
|
||||
|
||||
Update an OpenStack git directory's requirements.txt and
|
||||
test-requirements.txt from global-requirements.txt."""
|
||||
orig_dir = os.getcwd()
|
||||
os.chdir(reqs_dir)
|
||||
cmd = "python update.py {}".format(package_dir)
|
||||
try:
|
||||
subprocess.check_call(cmd.split(' '))
|
||||
except subprocess.CalledProcessError:
|
||||
package = os.path.basename(package_dir)
|
||||
error_out("Error updating {} from global-requirements.txt".format(package))
|
||||
os.chdir(orig_dir)
|
||||
|
0
hooks/charmhelpers/contrib/python/__init__.py
Normal file
0
hooks/charmhelpers/contrib/python/__init__.py
Normal file
77
hooks/charmhelpers/contrib/python/packages.py
Normal file
77
hooks/charmhelpers/contrib/python/packages.py
Normal file
@ -0,0 +1,77 @@
|
||||
#!/usr/bin/env python
|
||||
# coding: utf-8
|
||||
|
||||
__author__ = "Jorge Niedbalski <jorge.niedbalski@canonical.com>"
|
||||
|
||||
from charmhelpers.fetch import apt_install, apt_update
|
||||
from charmhelpers.core.hookenv import log
|
||||
|
||||
try:
|
||||
from pip import main as pip_execute
|
||||
except ImportError:
|
||||
apt_update()
|
||||
apt_install('python-pip')
|
||||
from pip import main as pip_execute
|
||||
|
||||
|
||||
def parse_options(given, available):
|
||||
"""Given a set of options, check if available"""
|
||||
for key, value in sorted(given.items()):
|
||||
if key in available:
|
||||
yield "--{0}={1}".format(key, value)
|
||||
|
||||
|
||||
def pip_install_requirements(requirements, **options):
|
||||
"""Install a requirements file """
|
||||
command = ["install"]
|
||||
|
||||
available_options = ('proxy', 'src', 'log', )
|
||||
for option in parse_options(options, available_options):
|
||||
command.append(option)
|
||||
|
||||
command.append("-r {0}".format(requirements))
|
||||
log("Installing from file: {} with options: {}".format(requirements,
|
||||
command))
|
||||
pip_execute(command)
|
||||
|
||||
|
||||
def pip_install(package, fatal=False, **options):
|
||||
"""Install a python package"""
|
||||
command = ["install"]
|
||||
|
||||
available_options = ('proxy', 'src', 'log', "index-url", )
|
||||
for option in parse_options(options, available_options):
|
||||
command.append(option)
|
||||
|
||||
if isinstance(package, list):
|
||||
command.extend(package)
|
||||
else:
|
||||
command.append(package)
|
||||
|
||||
log("Installing {} package with options: {}".format(package,
|
||||
command))
|
||||
pip_execute(command)
|
||||
|
||||
|
||||
def pip_uninstall(package, **options):
|
||||
"""Uninstall a python package"""
|
||||
command = ["uninstall", "-q", "-y"]
|
||||
|
||||
available_options = ('proxy', 'log', )
|
||||
for option in parse_options(options, available_options):
|
||||
command.append(option)
|
||||
|
||||
if isinstance(package, list):
|
||||
command.extend(package)
|
||||
else:
|
||||
command.append(package)
|
||||
|
||||
log("Uninstalling {} package with options: {}".format(package,
|
||||
command))
|
||||
pip_execute(command)
|
||||
|
||||
|
||||
def pip_list():
|
||||
"""Returns the list of current python installed packages
|
||||
"""
|
||||
return pip_execute(["list"])
|
@ -16,19 +16,18 @@ import time
|
||||
from subprocess import (
|
||||
check_call,
|
||||
check_output,
|
||||
CalledProcessError
|
||||
CalledProcessError,
|
||||
)
|
||||
|
||||
from charmhelpers.core.hookenv import (
|
||||
relation_get,
|
||||
relation_ids,
|
||||
related_units,
|
||||
log,
|
||||
DEBUG,
|
||||
INFO,
|
||||
WARNING,
|
||||
ERROR
|
||||
ERROR,
|
||||
)
|
||||
|
||||
from charmhelpers.core.host import (
|
||||
mount,
|
||||
mounts,
|
||||
@ -37,7 +36,6 @@ from charmhelpers.core.host import (
|
||||
service_running,
|
||||
umount,
|
||||
)
|
||||
|
||||
from charmhelpers.fetch import (
|
||||
apt_install,
|
||||
)
|
||||
@ -56,99 +54,85 @@ CEPH_CONF = """[global]
|
||||
|
||||
|
||||
def install():
|
||||
''' Basic Ceph client installation '''
|
||||
"""Basic Ceph client installation."""
|
||||
ceph_dir = "/etc/ceph"
|
||||
if not os.path.exists(ceph_dir):
|
||||
os.mkdir(ceph_dir)
|
||||
|
||||
apt_install('ceph-common', fatal=True)
|
||||
|
||||
|
||||
def rbd_exists(service, pool, rbd_img):
|
||||
''' Check to see if a RADOS block device exists '''
|
||||
"""Check to see if a RADOS block device exists."""
|
||||
try:
|
||||
out = check_output(['rbd', 'list', '--id', service,
|
||||
'--pool', pool])
|
||||
out = check_output(['rbd', 'list', '--id',
|
||||
service, '--pool', pool]).decode('UTF-8')
|
||||
except CalledProcessError:
|
||||
return False
|
||||
else:
|
||||
return rbd_img in out
|
||||
|
||||
return rbd_img in out
|
||||
|
||||
|
||||
def create_rbd_image(service, pool, image, sizemb):
|
||||
''' Create a new RADOS block device '''
|
||||
cmd = [
|
||||
'rbd',
|
||||
'create',
|
||||
image,
|
||||
'--size',
|
||||
str(sizemb),
|
||||
'--id',
|
||||
service,
|
||||
'--pool',
|
||||
pool
|
||||
]
|
||||
"""Create a new RADOS block device."""
|
||||
cmd = ['rbd', 'create', image, '--size', str(sizemb), '--id', service,
|
||||
'--pool', pool]
|
||||
check_call(cmd)
|
||||
|
||||
|
||||
def pool_exists(service, name):
|
||||
''' Check to see if a RADOS pool already exists '''
|
||||
"""Check to see if a RADOS pool already exists."""
|
||||
try:
|
||||
out = check_output(['rados', '--id', service, 'lspools'])
|
||||
out = check_output(['rados', '--id', service,
|
||||
'lspools']).decode('UTF-8')
|
||||
except CalledProcessError:
|
||||
return False
|
||||
else:
|
||||
return name in out
|
||||
|
||||
return name in out
|
||||
|
||||
|
||||
def get_osds(service):
|
||||
'''
|
||||
Return a list of all Ceph Object Storage Daemons
|
||||
currently in the cluster
|
||||
'''
|
||||
"""Return a list of all Ceph Object Storage Daemons currently in the
|
||||
cluster.
|
||||
"""
|
||||
version = ceph_version()
|
||||
if version and version >= '0.56':
|
||||
return json.loads(check_output(['ceph', '--id', service,
|
||||
'osd', 'ls', '--format=json']))
|
||||
else:
|
||||
return None
|
||||
'osd', 'ls',
|
||||
'--format=json']).decode('UTF-8'))
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def create_pool(service, name, replicas=2):
|
||||
''' Create a new RADOS pool '''
|
||||
def create_pool(service, name, replicas=3):
|
||||
"""Create a new RADOS pool."""
|
||||
if pool_exists(service, name):
|
||||
log("Ceph pool {} already exists, skipping creation".format(name),
|
||||
level=WARNING)
|
||||
return
|
||||
|
||||
# Calculate the number of placement groups based
|
||||
# on upstream recommended best practices.
|
||||
osds = get_osds(service)
|
||||
if osds:
|
||||
pgnum = (len(osds) * 100 / replicas)
|
||||
pgnum = (len(osds) * 100 // replicas)
|
||||
else:
|
||||
# NOTE(james-page): Default to 200 for older ceph versions
|
||||
# which don't support OSD query from cli
|
||||
pgnum = 200
|
||||
cmd = [
|
||||
'ceph', '--id', service,
|
||||
'osd', 'pool', 'create',
|
||||
name, str(pgnum)
|
||||
]
|
||||
|
||||
cmd = ['ceph', '--id', service, 'osd', 'pool', 'create', name, str(pgnum)]
|
||||
check_call(cmd)
|
||||
cmd = [
|
||||
'ceph', '--id', service,
|
||||
'osd', 'pool', 'set', name,
|
||||
'size', str(replicas)
|
||||
]
|
||||
|
||||
cmd = ['ceph', '--id', service, 'osd', 'pool', 'set', name, 'size',
|
||||
str(replicas)]
|
||||
check_call(cmd)
|
||||
|
||||
|
||||
def delete_pool(service, name):
|
||||
''' Delete a RADOS pool from ceph '''
|
||||
cmd = [
|
||||
'ceph', '--id', service,
|
||||
'osd', 'pool', 'delete',
|
||||
name, '--yes-i-really-really-mean-it'
|
||||
]
|
||||
"""Delete a RADOS pool from ceph."""
|
||||
cmd = ['ceph', '--id', service, 'osd', 'pool', 'delete', name,
|
||||
'--yes-i-really-really-mean-it']
|
||||
check_call(cmd)
|
||||
|
||||
|
||||
@ -161,44 +145,43 @@ def _keyring_path(service):
|
||||
|
||||
|
||||
def create_keyring(service, key):
|
||||
''' Create a new Ceph keyring containing key'''
|
||||
"""Create a new Ceph keyring containing key."""
|
||||
keyring = _keyring_path(service)
|
||||
if os.path.exists(keyring):
|
||||
log('ceph: Keyring exists at %s.' % keyring, level=WARNING)
|
||||
log('Ceph keyring exists at %s.' % keyring, level=WARNING)
|
||||
return
|
||||
cmd = [
|
||||
'ceph-authtool',
|
||||
keyring,
|
||||
'--create-keyring',
|
||||
'--name=client.{}'.format(service),
|
||||
'--add-key={}'.format(key)
|
||||
]
|
||||
|
||||
cmd = ['ceph-authtool', keyring, '--create-keyring',
|
||||
'--name=client.{}'.format(service), '--add-key={}'.format(key)]
|
||||
check_call(cmd)
|
||||
log('ceph: Created new ring at %s.' % keyring, level=INFO)
|
||||
log('Created new ceph keyring at %s.' % keyring, level=DEBUG)
|
||||
|
||||
|
||||
def create_key_file(service, key):
|
||||
''' Create a file containing key '''
|
||||
"""Create a file containing key."""
|
||||
keyfile = _keyfile_path(service)
|
||||
if os.path.exists(keyfile):
|
||||
log('ceph: Keyfile exists at %s.' % keyfile, level=WARNING)
|
||||
log('Keyfile exists at %s.' % keyfile, level=WARNING)
|
||||
return
|
||||
|
||||
with open(keyfile, 'w') as fd:
|
||||
fd.write(key)
|
||||
log('ceph: Created new keyfile at %s.' % keyfile, level=INFO)
|
||||
|
||||
log('Created new keyfile at %s.' % keyfile, level=INFO)
|
||||
|
||||
|
||||
def get_ceph_nodes():
|
||||
''' Query named relation 'ceph' to detemine current nodes '''
|
||||
"""Query named relation 'ceph' to determine current nodes."""
|
||||
hosts = []
|
||||
for r_id in relation_ids('ceph'):
|
||||
for unit in related_units(r_id):
|
||||
hosts.append(relation_get('private-address', unit=unit, rid=r_id))
|
||||
|
||||
return hosts
|
||||
|
||||
|
||||
def configure(service, key, auth, use_syslog):
|
||||
''' Perform basic configuration of Ceph '''
|
||||
"""Perform basic configuration of Ceph."""
|
||||
create_keyring(service, key)
|
||||
create_key_file(service, key)
|
||||
hosts = get_ceph_nodes()
|
||||
@ -211,17 +194,17 @@ def configure(service, key, auth, use_syslog):
|
||||
|
||||
|
||||
def image_mapped(name):
|
||||
''' Determine whether a RADOS block device is mapped locally '''
|
||||
"""Determine whether a RADOS block device is mapped locally."""
|
||||
try:
|
||||
out = check_output(['rbd', 'showmapped'])
|
||||
out = check_output(['rbd', 'showmapped']).decode('UTF-8')
|
||||
except CalledProcessError:
|
||||
return False
|
||||
else:
|
||||
return name in out
|
||||
|
||||
return name in out
|
||||
|
||||
|
||||
def map_block_storage(service, pool, image):
|
||||
''' Map a RADOS block device for local use '''
|
||||
"""Map a RADOS block device for local use."""
|
||||
cmd = [
|
||||
'rbd',
|
||||
'map',
|
||||
@ -235,31 +218,32 @@ def map_block_storage(service, pool, image):
|
||||
|
||||
|
||||
def filesystem_mounted(fs):
|
||||
''' Determine whether a filesytems is already mounted '''
|
||||
"""Determine whether a filesytems is already mounted."""
|
||||
return fs in [f for f, m in mounts()]
|
||||
|
||||
|
||||
def make_filesystem(blk_device, fstype='ext4', timeout=10):
|
||||
''' Make a new filesystem on the specified block device '''
|
||||
"""Make a new filesystem on the specified block device."""
|
||||
count = 0
|
||||
e_noent = os.errno.ENOENT
|
||||
while not os.path.exists(blk_device):
|
||||
if count >= timeout:
|
||||
log('ceph: gave up waiting on block device %s' % blk_device,
|
||||
log('Gave up waiting on block device %s' % blk_device,
|
||||
level=ERROR)
|
||||
raise IOError(e_noent, os.strerror(e_noent), blk_device)
|
||||
log('ceph: waiting for block device %s to appear' % blk_device,
|
||||
level=INFO)
|
||||
|
||||
log('Waiting for block device %s to appear' % blk_device,
|
||||
level=DEBUG)
|
||||
count += 1
|
||||
time.sleep(1)
|
||||
else:
|
||||
log('ceph: Formatting block device %s as filesystem %s.' %
|
||||
log('Formatting block device %s as filesystem %s.' %
|
||||
(blk_device, fstype), level=INFO)
|
||||
check_call(['mkfs', '-t', fstype, blk_device])
|
||||
|
||||
|
||||
def place_data_on_block_device(blk_device, data_src_dst):
|
||||
''' Migrate data in data_src_dst to blk_device and then remount '''
|
||||
"""Migrate data in data_src_dst to blk_device and then remount."""
|
||||
# mount block device into /mnt
|
||||
mount(blk_device, '/mnt')
|
||||
# copy data to /mnt
|
||||
@ -279,8 +263,8 @@ def place_data_on_block_device(blk_device, data_src_dst):
|
||||
|
||||
# TODO: re-use
|
||||
def modprobe(module):
|
||||
''' Load a kernel module and configure for auto-load on reboot '''
|
||||
log('ceph: Loading kernel module', level=INFO)
|
||||
"""Load a kernel module and configure for auto-load on reboot."""
|
||||
log('Loading kernel module', level=INFO)
|
||||
cmd = ['modprobe', module]
|
||||
check_call(cmd)
|
||||
with open('/etc/modules', 'r+') as modules:
|
||||
@ -289,7 +273,7 @@ def modprobe(module):
|
||||
|
||||
|
||||
def copy_files(src, dst, symlinks=False, ignore=None):
|
||||
''' Copy files from src to dst '''
|
||||
"""Copy files from src to dst."""
|
||||
for item in os.listdir(src):
|
||||
s = os.path.join(src, item)
|
||||
d = os.path.join(dst, item)
|
||||
@ -300,9 +284,9 @@ def copy_files(src, dst, symlinks=False, ignore=None):
|
||||
|
||||
|
||||
def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point,
|
||||
blk_device, fstype, system_services=[]):
|
||||
"""
|
||||
NOTE: This function must only be called from a single service unit for
|
||||
blk_device, fstype, system_services=[],
|
||||
replicas=3):
|
||||
"""NOTE: This function must only be called from a single service unit for
|
||||
the same rbd_img otherwise data loss will occur.
|
||||
|
||||
Ensures given pool and RBD image exists, is mapped to a block device,
|
||||
@ -316,15 +300,16 @@ def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point,
|
||||
"""
|
||||
# Ensure pool, RBD image, RBD mappings are in place.
|
||||
if not pool_exists(service, pool):
|
||||
log('ceph: Creating new pool {}.'.format(pool))
|
||||
create_pool(service, pool)
|
||||
log('Creating new pool {}.'.format(pool), level=INFO)
|
||||
create_pool(service, pool, replicas=replicas)
|
||||
|
||||
if not rbd_exists(service, pool, rbd_img):
|
||||
log('ceph: Creating RBD image ({}).'.format(rbd_img))
|
||||
log('Creating RBD image ({}).'.format(rbd_img), level=INFO)
|
||||
create_rbd_image(service, pool, rbd_img, sizemb)
|
||||
|
||||
if not image_mapped(rbd_img):
|
||||
log('ceph: Mapping RBD Image {} as a Block Device.'.format(rbd_img))
|
||||
log('Mapping RBD Image {} as a Block Device.'.format(rbd_img),
|
||||
level=INFO)
|
||||
map_block_storage(service, pool, rbd_img)
|
||||
|
||||
# make file system
|
||||
@ -339,45 +324,47 @@ def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point,
|
||||
|
||||
for svc in system_services:
|
||||
if service_running(svc):
|
||||
log('ceph: Stopping services {} prior to migrating data.'
|
||||
.format(svc))
|
||||
log('Stopping services {} prior to migrating data.'
|
||||
.format(svc), level=DEBUG)
|
||||
service_stop(svc)
|
||||
|
||||
place_data_on_block_device(blk_device, mount_point)
|
||||
|
||||
for svc in system_services:
|
||||
log('ceph: Starting service {} after migrating data.'
|
||||
.format(svc))
|
||||
log('Starting service {} after migrating data.'
|
||||
.format(svc), level=DEBUG)
|
||||
service_start(svc)
|
||||
|
||||
|
||||
def ensure_ceph_keyring(service, user=None, group=None):
|
||||
'''
|
||||
Ensures a ceph keyring is created for a named service
|
||||
and optionally ensures user and group ownership.
|
||||
"""Ensures a ceph keyring is created for a named service and optionally
|
||||
ensures user and group ownership.
|
||||
|
||||
Returns False if no ceph key is available in relation state.
|
||||
'''
|
||||
"""
|
||||
key = None
|
||||
for rid in relation_ids('ceph'):
|
||||
for unit in related_units(rid):
|
||||
key = relation_get('key', rid=rid, unit=unit)
|
||||
if key:
|
||||
break
|
||||
|
||||
if not key:
|
||||
return False
|
||||
|
||||
create_keyring(service=service, key=key)
|
||||
keyring = _keyring_path(service)
|
||||
if user and group:
|
||||
check_call(['chown', '%s.%s' % (user, group), keyring])
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def ceph_version():
|
||||
''' Retrieve the local version of ceph '''
|
||||
"""Retrieve the local version of ceph."""
|
||||
if os.path.exists('/usr/bin/ceph'):
|
||||
cmd = ['ceph', '-v']
|
||||
output = check_output(cmd)
|
||||
output = check_output(cmd).decode('US-ASCII')
|
||||
output = output.split()
|
||||
if len(output) > 3:
|
||||
return output[2]
|
||||
|
@ -1,12 +1,12 @@
|
||||
|
||||
import os
|
||||
import re
|
||||
|
||||
from subprocess import (
|
||||
check_call,
|
||||
check_output,
|
||||
)
|
||||
|
||||
import six
|
||||
|
||||
|
||||
##################################################
|
||||
# loopback device helpers.
|
||||
@ -37,7 +37,7 @@ def create_loopback(file_path):
|
||||
'''
|
||||
file_path = os.path.abspath(file_path)
|
||||
check_call(['losetup', '--find', file_path])
|
||||
for d, f in loopback_devices().iteritems():
|
||||
for d, f in six.iteritems(loopback_devices()):
|
||||
if f == file_path:
|
||||
return d
|
||||
|
||||
@ -51,7 +51,7 @@ def ensure_loopback_device(path, size):
|
||||
|
||||
:returns: str: Full path to the ensured loopback device (eg, /dev/loop0)
|
||||
'''
|
||||
for d, f in loopback_devices().iteritems():
|
||||
for d, f in six.iteritems(loopback_devices()):
|
||||
if f == path:
|
||||
return d
|
||||
|
||||
|
@ -61,6 +61,7 @@ def list_lvm_volume_group(block_device):
|
||||
vg = None
|
||||
pvd = check_output(['pvdisplay', block_device]).splitlines()
|
||||
for l in pvd:
|
||||
l = l.decode('UTF-8')
|
||||
if l.strip().startswith('VG Name'):
|
||||
vg = ' '.join(l.strip().split()[2:])
|
||||
return vg
|
||||
|
@ -30,7 +30,8 @@ def zap_disk(block_device):
|
||||
# sometimes sgdisk exits non-zero; this is OK, dd will clean up
|
||||
call(['sgdisk', '--zap-all', '--mbrtogpt',
|
||||
'--clear', block_device])
|
||||
dev_end = check_output(['blockdev', '--getsz', block_device])
|
||||
dev_end = check_output(['blockdev', '--getsz',
|
||||
block_device]).decode('UTF-8')
|
||||
gpt_end = int(dev_end.split()[0]) - 100
|
||||
check_call(['dd', 'if=/dev/zero', 'of=%s' % (block_device),
|
||||
'bs=1M', 'count=1'])
|
||||
@ -47,7 +48,7 @@ def is_device_mounted(device):
|
||||
it doesn't.
|
||||
'''
|
||||
is_partition = bool(re.search(r".*[0-9]+\b", device))
|
||||
out = check_output(['mount'])
|
||||
out = check_output(['mount']).decode('UTF-8')
|
||||
if is_partition:
|
||||
return bool(re.search(device + r"\b", out))
|
||||
return bool(re.search(device + r"[0-9]+\b", out))
|
||||
|
@ -3,10 +3,11 @@
|
||||
|
||||
__author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>'
|
||||
|
||||
import io
|
||||
import os
|
||||
|
||||
|
||||
class Fstab(file):
|
||||
class Fstab(io.FileIO):
|
||||
"""This class extends file in order to implement a file reader/writer
|
||||
for file `/etc/fstab`
|
||||
"""
|
||||
@ -24,8 +25,8 @@ class Fstab(file):
|
||||
options = "defaults"
|
||||
|
||||
self.options = options
|
||||
self.d = d
|
||||
self.p = p
|
||||
self.d = int(d)
|
||||
self.p = int(p)
|
||||
|
||||
def __eq__(self, o):
|
||||
return str(self) == str(o)
|
||||
@ -45,7 +46,7 @@ class Fstab(file):
|
||||
self._path = path
|
||||
else:
|
||||
self._path = self.DEFAULT_PATH
|
||||
file.__init__(self, self._path, 'r+')
|
||||
super(Fstab, self).__init__(self._path, 'rb+')
|
||||
|
||||
def _hydrate_entry(self, line):
|
||||
# NOTE: use split with no arguments to split on any
|
||||
@ -58,8 +59,9 @@ class Fstab(file):
|
||||
def entries(self):
|
||||
self.seek(0)
|
||||
for line in self.readlines():
|
||||
line = line.decode('us-ascii')
|
||||
try:
|
||||
if not line.startswith("#"):
|
||||
if line.strip() and not line.startswith("#"):
|
||||
yield self._hydrate_entry(line)
|
||||
except ValueError:
|
||||
pass
|
||||
@ -75,14 +77,14 @@ class Fstab(file):
|
||||
if self.get_entry_by_attr('device', entry.device):
|
||||
return False
|
||||
|
||||
self.write(str(entry) + '\n')
|
||||
self.write((str(entry) + '\n').encode('us-ascii'))
|
||||
self.truncate()
|
||||
return entry
|
||||
|
||||
def remove_entry(self, entry):
|
||||
self.seek(0)
|
||||
|
||||
lines = self.readlines()
|
||||
lines = [l.decode('us-ascii') for l in self.readlines()]
|
||||
|
||||
found = False
|
||||
for index, line in enumerate(lines):
|
||||
@ -97,7 +99,7 @@ class Fstab(file):
|
||||
lines.remove(line)
|
||||
|
||||
self.seek(0)
|
||||
self.write(''.join(lines))
|
||||
self.write(''.join(lines).encode('us-ascii'))
|
||||
self.truncate()
|
||||
return True
|
||||
|
||||
|
@ -9,9 +9,14 @@ import json
|
||||
import yaml
|
||||
import subprocess
|
||||
import sys
|
||||
import UserDict
|
||||
from subprocess import CalledProcessError
|
||||
|
||||
import six
|
||||
if not six.PY3:
|
||||
from UserDict import UserDict
|
||||
else:
|
||||
from collections import UserDict
|
||||
|
||||
CRITICAL = "CRITICAL"
|
||||
ERROR = "ERROR"
|
||||
WARNING = "WARNING"
|
||||
@ -63,16 +68,18 @@ def log(message, level=None):
|
||||
command = ['juju-log']
|
||||
if level:
|
||||
command += ['-l', level]
|
||||
if not isinstance(message, six.string_types):
|
||||
message = repr(message)
|
||||
command += [message]
|
||||
subprocess.call(command)
|
||||
|
||||
|
||||
class Serializable(UserDict.IterableUserDict):
|
||||
class Serializable(UserDict):
|
||||
"""Wrapper, an object that can be serialized to yaml or json"""
|
||||
|
||||
def __init__(self, obj):
|
||||
# wrap the object
|
||||
UserDict.IterableUserDict.__init__(self)
|
||||
UserDict.__init__(self)
|
||||
self.data = obj
|
||||
|
||||
def __getattr__(self, attr):
|
||||
@ -214,6 +221,12 @@ class Config(dict):
|
||||
except KeyError:
|
||||
return (self._prev_dict or {})[key]
|
||||
|
||||
def keys(self):
|
||||
prev_keys = []
|
||||
if self._prev_dict is not None:
|
||||
prev_keys = self._prev_dict.keys()
|
||||
return list(set(prev_keys + list(dict.keys(self))))
|
||||
|
||||
def load_previous(self, path=None):
|
||||
"""Load previous copy of config from disk.
|
||||
|
||||
@ -263,7 +276,7 @@ class Config(dict):
|
||||
|
||||
"""
|
||||
if self._prev_dict:
|
||||
for k, v in self._prev_dict.iteritems():
|
||||
for k, v in six.iteritems(self._prev_dict):
|
||||
if k not in self:
|
||||
self[k] = v
|
||||
with open(self.path, 'w') as f:
|
||||
@ -278,7 +291,8 @@ def config(scope=None):
|
||||
config_cmd_line.append(scope)
|
||||
config_cmd_line.append('--format=json')
|
||||
try:
|
||||
config_data = json.loads(subprocess.check_output(config_cmd_line))
|
||||
config_data = json.loads(
|
||||
subprocess.check_output(config_cmd_line).decode('UTF-8'))
|
||||
if scope is not None:
|
||||
return config_data
|
||||
return Config(config_data)
|
||||
@ -297,10 +311,10 @@ def relation_get(attribute=None, unit=None, rid=None):
|
||||
if unit:
|
||||
_args.append(unit)
|
||||
try:
|
||||
return json.loads(subprocess.check_output(_args))
|
||||
return json.loads(subprocess.check_output(_args).decode('UTF-8'))
|
||||
except ValueError:
|
||||
return None
|
||||
except CalledProcessError, e:
|
||||
except CalledProcessError as e:
|
||||
if e.returncode == 2:
|
||||
return None
|
||||
raise
|
||||
@ -312,7 +326,7 @@ def relation_set(relation_id=None, relation_settings=None, **kwargs):
|
||||
relation_cmd_line = ['relation-set']
|
||||
if relation_id is not None:
|
||||
relation_cmd_line.extend(('-r', relation_id))
|
||||
for k, v in (relation_settings.items() + kwargs.items()):
|
||||
for k, v in (list(relation_settings.items()) + list(kwargs.items())):
|
||||
if v is None:
|
||||
relation_cmd_line.append('{}='.format(k))
|
||||
else:
|
||||
@ -329,7 +343,8 @@ def relation_ids(reltype=None):
|
||||
relid_cmd_line = ['relation-ids', '--format=json']
|
||||
if reltype is not None:
|
||||
relid_cmd_line.append(reltype)
|
||||
return json.loads(subprocess.check_output(relid_cmd_line)) or []
|
||||
return json.loads(
|
||||
subprocess.check_output(relid_cmd_line).decode('UTF-8')) or []
|
||||
return []
|
||||
|
||||
|
||||
@ -340,7 +355,8 @@ def related_units(relid=None):
|
||||
units_cmd_line = ['relation-list', '--format=json']
|
||||
if relid is not None:
|
||||
units_cmd_line.extend(('-r', relid))
|
||||
return json.loads(subprocess.check_output(units_cmd_line)) or []
|
||||
return json.loads(
|
||||
subprocess.check_output(units_cmd_line).decode('UTF-8')) or []
|
||||
|
||||
|
||||
@cached
|
||||
@ -379,21 +395,31 @@ def relations_of_type(reltype=None):
|
||||
return relation_data
|
||||
|
||||
|
||||
@cached
|
||||
def metadata():
|
||||
"""Get the current charm metadata.yaml contents as a python object"""
|
||||
with open(os.path.join(charm_dir(), 'metadata.yaml')) as md:
|
||||
return yaml.safe_load(md)
|
||||
|
||||
|
||||
@cached
|
||||
def relation_types():
|
||||
"""Get a list of relation types supported by this charm"""
|
||||
charmdir = os.environ.get('CHARM_DIR', '')
|
||||
mdf = open(os.path.join(charmdir, 'metadata.yaml'))
|
||||
md = yaml.safe_load(mdf)
|
||||
rel_types = []
|
||||
md = metadata()
|
||||
for key in ('provides', 'requires', 'peers'):
|
||||
section = md.get(key)
|
||||
if section:
|
||||
rel_types.extend(section.keys())
|
||||
mdf.close()
|
||||
return rel_types
|
||||
|
||||
|
||||
@cached
|
||||
def charm_name():
|
||||
"""Get the name of the current charm as is specified on metadata.yaml"""
|
||||
return metadata().get('name')
|
||||
|
||||
|
||||
@cached
|
||||
def relations():
|
||||
"""Get a nested dictionary of relation data for all related units"""
|
||||
@ -449,7 +475,7 @@ def unit_get(attribute):
|
||||
"""Get the unit ID for the remote unit"""
|
||||
_args = ['unit-get', '--format=json', attribute]
|
||||
try:
|
||||
return json.loads(subprocess.check_output(_args))
|
||||
return json.loads(subprocess.check_output(_args).decode('UTF-8'))
|
||||
except ValueError:
|
||||
return None
|
||||
|
||||
|
@ -13,13 +13,13 @@ import random
|
||||
import string
|
||||
import subprocess
|
||||
import hashlib
|
||||
import shutil
|
||||
from contextlib import contextmanager
|
||||
|
||||
from collections import OrderedDict
|
||||
|
||||
from hookenv import log
|
||||
from fstab import Fstab
|
||||
import six
|
||||
|
||||
from .hookenv import log
|
||||
from .fstab import Fstab
|
||||
|
||||
|
||||
def service_start(service_name):
|
||||
@ -55,7 +55,9 @@ def service(action, service_name):
|
||||
def service_running(service):
|
||||
"""Determine whether a system service is running"""
|
||||
try:
|
||||
output = subprocess.check_output(['service', service, 'status'], stderr=subprocess.STDOUT)
|
||||
output = subprocess.check_output(
|
||||
['service', service, 'status'],
|
||||
stderr=subprocess.STDOUT).decode('UTF-8')
|
||||
except subprocess.CalledProcessError:
|
||||
return False
|
||||
else:
|
||||
@ -68,7 +70,9 @@ def service_running(service):
|
||||
def service_available(service_name):
|
||||
"""Determine whether a system service is available"""
|
||||
try:
|
||||
subprocess.check_output(['service', service_name, 'status'], stderr=subprocess.STDOUT)
|
||||
subprocess.check_output(
|
||||
['service', service_name, 'status'],
|
||||
stderr=subprocess.STDOUT).decode('UTF-8')
|
||||
except subprocess.CalledProcessError as e:
|
||||
return 'unrecognized service' not in e.output
|
||||
else:
|
||||
@ -97,6 +101,26 @@ def adduser(username, password=None, shell='/bin/bash', system_user=False):
|
||||
return user_info
|
||||
|
||||
|
||||
def add_group(group_name, system_group=False):
|
||||
"""Add a group to the system"""
|
||||
try:
|
||||
group_info = grp.getgrnam(group_name)
|
||||
log('group {0} already exists!'.format(group_name))
|
||||
except KeyError:
|
||||
log('creating group {0}'.format(group_name))
|
||||
cmd = ['addgroup']
|
||||
if system_group:
|
||||
cmd.append('--system')
|
||||
else:
|
||||
cmd.extend([
|
||||
'--group',
|
||||
])
|
||||
cmd.append(group_name)
|
||||
subprocess.check_call(cmd)
|
||||
group_info = grp.getgrnam(group_name)
|
||||
return group_info
|
||||
|
||||
|
||||
def add_user_to_group(username, group):
|
||||
"""Add a user to a group"""
|
||||
cmd = [
|
||||
@ -116,7 +140,7 @@ def rsync(from_path, to_path, flags='-r', options=None):
|
||||
cmd.append(from_path)
|
||||
cmd.append(to_path)
|
||||
log(" ".join(cmd))
|
||||
return subprocess.check_output(cmd).strip()
|
||||
return subprocess.check_output(cmd).decode('UTF-8').strip()
|
||||
|
||||
|
||||
def symlink(source, destination):
|
||||
@ -131,7 +155,7 @@ def symlink(source, destination):
|
||||
subprocess.check_call(cmd)
|
||||
|
||||
|
||||
def mkdir(path, owner='root', group='root', perms=0555, force=False):
|
||||
def mkdir(path, owner='root', group='root', perms=0o555, force=False):
|
||||
"""Create a directory"""
|
||||
log("Making dir {} {}:{} {:o}".format(path, owner, group,
|
||||
perms))
|
||||
@ -147,7 +171,7 @@ def mkdir(path, owner='root', group='root', perms=0555, force=False):
|
||||
os.chown(realpath, uid, gid)
|
||||
|
||||
|
||||
def write_file(path, content, owner='root', group='root', perms=0444):
|
||||
def write_file(path, content, owner='root', group='root', perms=0o444):
|
||||
"""Create or overwrite a file with the contents of a string"""
|
||||
log("Writing file {} {}:{} {:o}".format(path, owner, group, perms))
|
||||
uid = pwd.getpwnam(owner).pw_uid
|
||||
@ -178,7 +202,7 @@ def mount(device, mountpoint, options=None, persist=False, filesystem="ext3"):
|
||||
cmd_args.extend([device, mountpoint])
|
||||
try:
|
||||
subprocess.check_output(cmd_args)
|
||||
except subprocess.CalledProcessError, e:
|
||||
except subprocess.CalledProcessError as e:
|
||||
log('Error mounting {} at {}\n{}'.format(device, mountpoint, e.output))
|
||||
return False
|
||||
|
||||
@ -192,7 +216,7 @@ def umount(mountpoint, persist=False):
|
||||
cmd_args = ['umount', mountpoint]
|
||||
try:
|
||||
subprocess.check_output(cmd_args)
|
||||
except subprocess.CalledProcessError, e:
|
||||
except subprocess.CalledProcessError as e:
|
||||
log('Error unmounting {}\n{}'.format(mountpoint, e.output))
|
||||
return False
|
||||
|
||||
@ -219,8 +243,8 @@ def file_hash(path, hash_type='md5'):
|
||||
"""
|
||||
if os.path.exists(path):
|
||||
h = getattr(hashlib, hash_type)()
|
||||
with open(path, 'r') as source:
|
||||
h.update(source.read()) # IGNORE:E1101 - it does have update
|
||||
with open(path, 'rb') as source:
|
||||
h.update(source.read())
|
||||
return h.hexdigest()
|
||||
else:
|
||||
return None
|
||||
@ -298,7 +322,7 @@ def pwgen(length=None):
|
||||
if length is None:
|
||||
length = random.choice(range(35, 45))
|
||||
alphanumeric_chars = [
|
||||
l for l in (string.letters + string.digits)
|
||||
l for l in (string.ascii_letters + string.digits)
|
||||
if l not in 'l0QD1vAEIOUaeiou']
|
||||
random_chars = [
|
||||
random.choice(alphanumeric_chars) for _ in range(length)]
|
||||
@ -307,14 +331,14 @@ def pwgen(length=None):
|
||||
|
||||
def list_nics(nic_type):
|
||||
'''Return a list of nics of given type(s)'''
|
||||
if isinstance(nic_type, basestring):
|
||||
if isinstance(nic_type, six.string_types):
|
||||
int_types = [nic_type]
|
||||
else:
|
||||
int_types = nic_type
|
||||
interfaces = []
|
||||
for int_type in int_types:
|
||||
cmd = ['ip', 'addr', 'show', 'label', int_type + '*']
|
||||
ip_output = subprocess.check_output(cmd).split('\n')
|
||||
ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n')
|
||||
ip_output = (line for line in ip_output if line)
|
||||
for line in ip_output:
|
||||
if line.split()[1].startswith(int_type):
|
||||
@ -336,7 +360,7 @@ def set_nic_mtu(nic, mtu):
|
||||
|
||||
def get_nic_mtu(nic):
|
||||
cmd = ['ip', 'addr', 'show', nic]
|
||||
ip_output = subprocess.check_output(cmd).split('\n')
|
||||
ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n')
|
||||
mtu = ""
|
||||
for line in ip_output:
|
||||
words = line.split()
|
||||
@ -347,7 +371,7 @@ def get_nic_mtu(nic):
|
||||
|
||||
def get_nic_hwaddr(nic):
|
||||
cmd = ['ip', '-o', '-0', 'addr', 'show', nic]
|
||||
ip_output = subprocess.check_output(cmd)
|
||||
ip_output = subprocess.check_output(cmd).decode('UTF-8')
|
||||
hwaddr = ""
|
||||
words = ip_output.split()
|
||||
if 'link/ether' in words:
|
||||
@ -364,8 +388,8 @@ def cmp_pkgrevno(package, revno, pkgcache=None):
|
||||
|
||||
'''
|
||||
import apt_pkg
|
||||
from charmhelpers.fetch import apt_cache
|
||||
if not pkgcache:
|
||||
from charmhelpers.fetch import apt_cache
|
||||
pkgcache = apt_cache()
|
||||
pkg = pkgcache[package]
|
||||
return apt_pkg.version_compare(pkg.current_ver.ver_str, revno)
|
||||
|
@ -1,2 +1,2 @@
|
||||
from .base import *
|
||||
from .helpers import *
|
||||
from .base import * # NOQA
|
||||
from .helpers import * # NOQA
|
||||
|
@ -196,7 +196,7 @@ class StoredContext(dict):
|
||||
if not os.path.isabs(file_name):
|
||||
file_name = os.path.join(hookenv.charm_dir(), file_name)
|
||||
with open(file_name, 'w') as file_stream:
|
||||
os.fchmod(file_stream.fileno(), 0600)
|
||||
os.fchmod(file_stream.fileno(), 0o600)
|
||||
yaml.dump(config_data, file_stream)
|
||||
|
||||
def read_context(self, file_name):
|
||||
@ -211,15 +211,19 @@ class StoredContext(dict):
|
||||
|
||||
class TemplateCallback(ManagerCallback):
|
||||
"""
|
||||
Callback class that will render a Jinja2 template, for use as a ready action.
|
||||
Callback class that will render a Jinja2 template, for use as a ready
|
||||
action.
|
||||
|
||||
:param str source: The template source file, relative to
|
||||
`$CHARM_DIR/templates`
|
||||
|
||||
:param str source: The template source file, relative to `$CHARM_DIR/templates`
|
||||
:param str target: The target to write the rendered template to
|
||||
:param str owner: The owner of the rendered file
|
||||
:param str group: The group of the rendered file
|
||||
:param int perms: The permissions of the rendered file
|
||||
"""
|
||||
def __init__(self, source, target, owner='root', group='root', perms=0444):
|
||||
def __init__(self, source, target,
|
||||
owner='root', group='root', perms=0o444):
|
||||
self.source = source
|
||||
self.target = target
|
||||
self.owner = owner
|
||||
|
@ -4,7 +4,8 @@ from charmhelpers.core import host
|
||||
from charmhelpers.core import hookenv
|
||||
|
||||
|
||||
def render(source, target, context, owner='root', group='root', perms=0444, templates_dir=None):
|
||||
def render(source, target, context, owner='root', group='root',
|
||||
perms=0o444, templates_dir=None):
|
||||
"""
|
||||
Render a template.
|
||||
|
||||
@ -47,5 +48,5 @@ def render(source, target, context, owner='root', group='root', perms=0444, temp
|
||||
level=hookenv.ERROR)
|
||||
raise e
|
||||
content = template.render(context)
|
||||
host.mkdir(os.path.dirname(target))
|
||||
host.mkdir(os.path.dirname(target), owner, group)
|
||||
host.write_file(target, content, owner, group, perms)
|
||||
|
@ -5,10 +5,6 @@ from yaml import safe_load
|
||||
from charmhelpers.core.host import (
|
||||
lsb_release
|
||||
)
|
||||
from urlparse import (
|
||||
urlparse,
|
||||
urlunparse,
|
||||
)
|
||||
import subprocess
|
||||
from charmhelpers.core.hookenv import (
|
||||
config,
|
||||
@ -16,6 +12,12 @@ from charmhelpers.core.hookenv import (
|
||||
)
|
||||
import os
|
||||
|
||||
import six
|
||||
if six.PY3:
|
||||
from urllib.parse import urlparse, urlunparse
|
||||
else:
|
||||
from urlparse import urlparse, urlunparse
|
||||
|
||||
|
||||
CLOUD_ARCHIVE = """# Ubuntu Cloud Archive
|
||||
deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main
|
||||
@ -72,6 +74,7 @@ CLOUD_ARCHIVE_POCKETS = {
|
||||
FETCH_HANDLERS = (
|
||||
'charmhelpers.fetch.archiveurl.ArchiveUrlFetchHandler',
|
||||
'charmhelpers.fetch.bzrurl.BzrUrlFetchHandler',
|
||||
'charmhelpers.fetch.giturl.GitUrlFetchHandler',
|
||||
)
|
||||
|
||||
APT_NO_LOCK = 100 # The return code for "couldn't acquire lock" in APT.
|
||||
@ -148,7 +151,7 @@ def apt_install(packages, options=None, fatal=False):
|
||||
cmd = ['apt-get', '--assume-yes']
|
||||
cmd.extend(options)
|
||||
cmd.append('install')
|
||||
if isinstance(packages, basestring):
|
||||
if isinstance(packages, six.string_types):
|
||||
cmd.append(packages)
|
||||
else:
|
||||
cmd.extend(packages)
|
||||
@ -181,7 +184,7 @@ def apt_update(fatal=False):
|
||||
def apt_purge(packages, fatal=False):
|
||||
"""Purge one or more packages"""
|
||||
cmd = ['apt-get', '--assume-yes', 'purge']
|
||||
if isinstance(packages, basestring):
|
||||
if isinstance(packages, six.string_types):
|
||||
cmd.append(packages)
|
||||
else:
|
||||
cmd.extend(packages)
|
||||
@ -192,7 +195,7 @@ def apt_purge(packages, fatal=False):
|
||||
def apt_hold(packages, fatal=False):
|
||||
"""Hold one or more packages"""
|
||||
cmd = ['apt-mark', 'hold']
|
||||
if isinstance(packages, basestring):
|
||||
if isinstance(packages, six.string_types):
|
||||
cmd.append(packages)
|
||||
else:
|
||||
cmd.extend(packages)
|
||||
@ -218,6 +221,7 @@ def add_source(source, key=None):
|
||||
pocket for the release.
|
||||
'cloud:' may be used to activate official cloud archive pockets,
|
||||
such as 'cloud:icehouse'
|
||||
'distro' may be used as a noop
|
||||
|
||||
@param key: A key to be added to the system's APT keyring and used
|
||||
to verify the signatures on packages. Ideally, this should be an
|
||||
@ -251,12 +255,14 @@ def add_source(source, key=None):
|
||||
release = lsb_release()['DISTRIB_CODENAME']
|
||||
with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt:
|
||||
apt.write(PROPOSED_POCKET.format(release))
|
||||
elif source == 'distro':
|
||||
pass
|
||||
else:
|
||||
raise SourceConfigError("Unknown source: {!r}".format(source))
|
||||
log("Unknown source: {!r}".format(source))
|
||||
|
||||
if key:
|
||||
if '-----BEGIN PGP PUBLIC KEY BLOCK-----' in key:
|
||||
with NamedTemporaryFile() as key_file:
|
||||
with NamedTemporaryFile('w+') as key_file:
|
||||
key_file.write(key)
|
||||
key_file.flush()
|
||||
key_file.seek(0)
|
||||
@ -293,14 +299,14 @@ def configure_sources(update=False,
|
||||
sources = safe_load((config(sources_var) or '').strip()) or []
|
||||
keys = safe_load((config(keys_var) or '').strip()) or None
|
||||
|
||||
if isinstance(sources, basestring):
|
||||
if isinstance(sources, six.string_types):
|
||||
sources = [sources]
|
||||
|
||||
if keys is None:
|
||||
for source in sources:
|
||||
add_source(source, None)
|
||||
else:
|
||||
if isinstance(keys, basestring):
|
||||
if isinstance(keys, six.string_types):
|
||||
keys = [keys]
|
||||
|
||||
if len(sources) != len(keys):
|
||||
@ -397,7 +403,7 @@ def _run_apt_command(cmd, fatal=False):
|
||||
while result is None or result == APT_NO_LOCK:
|
||||
try:
|
||||
result = subprocess.check_call(cmd, env=env)
|
||||
except subprocess.CalledProcessError, e:
|
||||
except subprocess.CalledProcessError as e:
|
||||
retry_count = retry_count + 1
|
||||
if retry_count > APT_NO_LOCK_RETRY_COUNT:
|
||||
raise
|
||||
|
@ -1,8 +1,23 @@
|
||||
import os
|
||||
import urllib2
|
||||
from urllib import urlretrieve
|
||||
import urlparse
|
||||
import hashlib
|
||||
import re
|
||||
|
||||
import six
|
||||
if six.PY3:
|
||||
from urllib.request import (
|
||||
build_opener, install_opener, urlopen, urlretrieve,
|
||||
HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler,
|
||||
)
|
||||
from urllib.parse import urlparse, urlunparse, parse_qs
|
||||
from urllib.error import URLError
|
||||
else:
|
||||
from urllib import urlretrieve
|
||||
from urllib2 import (
|
||||
build_opener, install_opener, urlopen,
|
||||
HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler,
|
||||
URLError
|
||||
)
|
||||
from urlparse import urlparse, urlunparse, parse_qs
|
||||
|
||||
from charmhelpers.fetch import (
|
||||
BaseFetchHandler,
|
||||
@ -15,6 +30,24 @@ from charmhelpers.payload.archive import (
|
||||
from charmhelpers.core.host import mkdir, check_hash
|
||||
|
||||
|
||||
def splituser(host):
|
||||
'''urllib.splituser(), but six's support of this seems broken'''
|
||||
_userprog = re.compile('^(.*)@(.*)$')
|
||||
match = _userprog.match(host)
|
||||
if match:
|
||||
return match.group(1, 2)
|
||||
return None, host
|
||||
|
||||
|
||||
def splitpasswd(user):
|
||||
'''urllib.splitpasswd(), but six's support of this is missing'''
|
||||
_passwdprog = re.compile('^([^:]*):(.*)$', re.S)
|
||||
match = _passwdprog.match(user)
|
||||
if match:
|
||||
return match.group(1, 2)
|
||||
return user, None
|
||||
|
||||
|
||||
class ArchiveUrlFetchHandler(BaseFetchHandler):
|
||||
"""
|
||||
Handler to download archive files from arbitrary URLs.
|
||||
@ -42,20 +75,20 @@ class ArchiveUrlFetchHandler(BaseFetchHandler):
|
||||
"""
|
||||
# propogate all exceptions
|
||||
# URLError, OSError, etc
|
||||
proto, netloc, path, params, query, fragment = urlparse.urlparse(source)
|
||||
proto, netloc, path, params, query, fragment = urlparse(source)
|
||||
if proto in ('http', 'https'):
|
||||
auth, barehost = urllib2.splituser(netloc)
|
||||
auth, barehost = splituser(netloc)
|
||||
if auth is not None:
|
||||
source = urlparse.urlunparse((proto, barehost, path, params, query, fragment))
|
||||
username, password = urllib2.splitpasswd(auth)
|
||||
passman = urllib2.HTTPPasswordMgrWithDefaultRealm()
|
||||
source = urlunparse((proto, barehost, path, params, query, fragment))
|
||||
username, password = splitpasswd(auth)
|
||||
passman = HTTPPasswordMgrWithDefaultRealm()
|
||||
# Realm is set to None in add_password to force the username and password
|
||||
# to be used whatever the realm
|
||||
passman.add_password(None, source, username, password)
|
||||
authhandler = urllib2.HTTPBasicAuthHandler(passman)
|
||||
opener = urllib2.build_opener(authhandler)
|
||||
urllib2.install_opener(opener)
|
||||
response = urllib2.urlopen(source)
|
||||
authhandler = HTTPBasicAuthHandler(passman)
|
||||
opener = build_opener(authhandler)
|
||||
install_opener(opener)
|
||||
response = urlopen(source)
|
||||
try:
|
||||
with open(dest, 'w') as dest_file:
|
||||
dest_file.write(response.read())
|
||||
@ -91,17 +124,21 @@ class ArchiveUrlFetchHandler(BaseFetchHandler):
|
||||
url_parts = self.parse_url(source)
|
||||
dest_dir = os.path.join(os.environ.get('CHARM_DIR'), 'fetched')
|
||||
if not os.path.exists(dest_dir):
|
||||
mkdir(dest_dir, perms=0755)
|
||||
mkdir(dest_dir, perms=0o755)
|
||||
dld_file = os.path.join(dest_dir, os.path.basename(url_parts.path))
|
||||
try:
|
||||
self.download(source, dld_file)
|
||||
except urllib2.URLError as e:
|
||||
except URLError as e:
|
||||
raise UnhandledSource(e.reason)
|
||||
except OSError as e:
|
||||
raise UnhandledSource(e.strerror)
|
||||
options = urlparse.parse_qs(url_parts.fragment)
|
||||
options = parse_qs(url_parts.fragment)
|
||||
for key, value in options.items():
|
||||
if key in hashlib.algorithms:
|
||||
if not six.PY3:
|
||||
algorithms = hashlib.algorithms
|
||||
else:
|
||||
algorithms = hashlib.algorithms_available
|
||||
if key in algorithms:
|
||||
check_hash(dld_file, value, key)
|
||||
if checksum:
|
||||
check_hash(dld_file, checksum, hash_type)
|
||||
|
@ -5,6 +5,10 @@ from charmhelpers.fetch import (
|
||||
)
|
||||
from charmhelpers.core.host import mkdir
|
||||
|
||||
import six
|
||||
if six.PY3:
|
||||
raise ImportError('bzrlib does not support Python3')
|
||||
|
||||
try:
|
||||
from bzrlib.branch import Branch
|
||||
except ImportError:
|
||||
@ -42,7 +46,7 @@ class BzrUrlFetchHandler(BaseFetchHandler):
|
||||
dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched",
|
||||
branch_name)
|
||||
if not os.path.exists(dest_dir):
|
||||
mkdir(dest_dir, perms=0755)
|
||||
mkdir(dest_dir, perms=0o755)
|
||||
try:
|
||||
self.branch(source, dest_dir)
|
||||
except OSError as e:
|
||||
|
51
hooks/charmhelpers/fetch/giturl.py
Normal file
51
hooks/charmhelpers/fetch/giturl.py
Normal file
@ -0,0 +1,51 @@
|
||||
import os
|
||||
from charmhelpers.fetch import (
|
||||
BaseFetchHandler,
|
||||
UnhandledSource
|
||||
)
|
||||
from charmhelpers.core.host import mkdir
|
||||
|
||||
import six
|
||||
if six.PY3:
|
||||
raise ImportError('GitPython does not support Python 3')
|
||||
|
||||
try:
|
||||
from git import Repo
|
||||
except ImportError:
|
||||
from charmhelpers.fetch import apt_install
|
||||
apt_install("python-git")
|
||||
from git import Repo
|
||||
|
||||
|
||||
class GitUrlFetchHandler(BaseFetchHandler):
|
||||
"""Handler for git branches via generic and github URLs"""
|
||||
def can_handle(self, source):
|
||||
url_parts = self.parse_url(source)
|
||||
# TODO (mattyw) no support for ssh git@ yet
|
||||
if url_parts.scheme not in ('http', 'https', 'git'):
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
|
||||
def clone(self, source, dest, branch):
|
||||
if not self.can_handle(source):
|
||||
raise UnhandledSource("Cannot handle {}".format(source))
|
||||
|
||||
repo = Repo.clone_from(source, dest)
|
||||
repo.git.checkout(branch)
|
||||
|
||||
def install(self, source, branch="master", dest=None):
|
||||
url_parts = self.parse_url(source)
|
||||
branch_name = url_parts.path.strip("/").split("/")[-1]
|
||||
if dest:
|
||||
dest_dir = os.path.join(dest, branch_name)
|
||||
else:
|
||||
dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched",
|
||||
branch_name)
|
||||
if not os.path.exists(dest_dir):
|
||||
mkdir(dest_dir, perms=0o755)
|
||||
try:
|
||||
self.clone(source, dest_dir, branch)
|
||||
except OSError as e:
|
||||
raise UnhandledSource(e.strerror)
|
||||
return dest_dir
|
@ -50,6 +50,8 @@ NEUTRON_ML2_PLUGIN = \
|
||||
"neutron.plugins.ml2.plugin.Ml2Plugin"
|
||||
NEUTRON_NVP_PLUGIN = \
|
||||
"neutron.plugins.nicira.nicira_nvp_plugin.NeutronPlugin.NvpPluginV2"
|
||||
NEUTRON_N1KV_PLUGIN = \
|
||||
"neutron.plugins.cisco.n1kv.n1kv_neutron_plugin.N1kvNeutronPluginV2"
|
||||
NEUTRON_NSX_PLUGIN = "vmware"
|
||||
|
||||
NEUTRON = 'neutron'
|
||||
@ -65,16 +67,18 @@ def networking_name():
|
||||
|
||||
OVS = 'ovs'
|
||||
NVP = 'nvp'
|
||||
N1KV = 'n1kv'
|
||||
NSX = 'nsx'
|
||||
|
||||
CORE_PLUGIN = {
|
||||
QUANTUM: {
|
||||
OVS: QUANTUM_OVS_PLUGIN,
|
||||
NVP: QUANTUM_NVP_PLUGIN
|
||||
NVP: QUANTUM_NVP_PLUGIN,
|
||||
},
|
||||
NEUTRON: {
|
||||
OVS: NEUTRON_OVS_PLUGIN,
|
||||
NVP: NEUTRON_NVP_PLUGIN,
|
||||
N1KV: NEUTRON_N1KV_PLUGIN,
|
||||
NSX: NEUTRON_NSX_PLUGIN
|
||||
},
|
||||
}
|
||||
@ -166,13 +170,16 @@ class L3AgentContext(OSContextGenerator):
|
||||
|
||||
if config('external-network-id'):
|
||||
ctxt['ext_net_id'] = config('external-network-id')
|
||||
|
||||
if config('plugin'):
|
||||
ctxt['plugin'] = config('plugin')
|
||||
return ctxt
|
||||
|
||||
|
||||
class ExternalPortContext(OSContextGenerator):
|
||||
class NeutronPortContext(OSContextGenerator):
|
||||
|
||||
def __call__(self):
|
||||
if not config('ext-port'):
|
||||
def _resolve_port(self, config_key):
|
||||
if not config(config_key):
|
||||
return None
|
||||
hwaddr_to_nic = {}
|
||||
hwaddr_to_ip = {}
|
||||
@ -183,7 +190,7 @@ class ExternalPortContext(OSContextGenerator):
|
||||
get_ipv6_addr(iface=nic, fatal=False)
|
||||
hwaddr_to_ip[hwaddr] = addresses
|
||||
mac_regex = re.compile(r'([0-9A-F]{2}[:-]){5}([0-9A-F]{2})', re.I)
|
||||
for entry in config('ext-port').split():
|
||||
for entry in config(config_key).split():
|
||||
entry = entry.strip()
|
||||
if re.match(mac_regex, entry):
|
||||
if entry in hwaddr_to_nic and len(hwaddr_to_ip[entry]) == 0:
|
||||
@ -192,15 +199,35 @@ class ExternalPortContext(OSContextGenerator):
|
||||
continue
|
||||
# Entry is a MAC address for a valid interface that doesn't
|
||||
# have an IP address assigned yet.
|
||||
return {"ext_port": hwaddr_to_nic[entry]}
|
||||
return hwaddr_to_nic[entry]
|
||||
else:
|
||||
# If the passed entry is not a MAC address, assume it's a valid
|
||||
# interface, and that the user put it there on purpose (we can
|
||||
# trust it to be the real external network).
|
||||
return {"ext_port": entry}
|
||||
return entry
|
||||
return None
|
||||
|
||||
|
||||
class ExternalPortContext(NeutronPortContext):
|
||||
|
||||
def __call__(self):
|
||||
port = self._resolve_port('ext-port')
|
||||
if port:
|
||||
return {"ext_port": port}
|
||||
else:
|
||||
return None
|
||||
|
||||
|
||||
class DataPortContext(NeutronPortContext):
|
||||
|
||||
def __call__(self):
|
||||
port = self._resolve_port('data-port')
|
||||
if port:
|
||||
return {"data_port": port}
|
||||
else:
|
||||
return None
|
||||
|
||||
|
||||
class QuantumGatewayContext(OSContextGenerator):
|
||||
|
||||
def __call__(self):
|
||||
|
@ -19,6 +19,7 @@ from charmhelpers.fetch import (
|
||||
apt_update,
|
||||
apt_install,
|
||||
filter_installed_packages,
|
||||
apt_purge,
|
||||
)
|
||||
from charmhelpers.core.host import (
|
||||
restart_on_change,
|
||||
@ -35,6 +36,7 @@ from charmhelpers.contrib.openstack.utils import (
|
||||
openstack_upgrade_available,
|
||||
)
|
||||
from charmhelpers.payload.execd import execd_preinstall
|
||||
from charmhelpers.core.sysctl import create as create_sysctl
|
||||
|
||||
from charmhelpers.contrib.charmsupport.nrpe import NRPE
|
||||
|
||||
@ -66,6 +68,7 @@ def install():
|
||||
src = 'cloud:precise-folsom'
|
||||
configure_installation_source(src)
|
||||
apt_update(fatal=True)
|
||||
apt_install('python-six', fatal=True) # Force upgrade
|
||||
if valid_plugin():
|
||||
apt_install(filter_installed_packages(get_early_packages()),
|
||||
fatal=True)
|
||||
@ -83,6 +86,11 @@ def config_changed():
|
||||
if openstack_upgrade_available(get_common_package()):
|
||||
CONFIGS = do_openstack_upgrade()
|
||||
update_nrpe_config()
|
||||
|
||||
sysctl_dict = config('sysctl')
|
||||
if sysctl_dict:
|
||||
create_sysctl(sysctl_dict, '/etc/sysctl.d/50-quantum-gateway.conf')
|
||||
|
||||
# Re-run joined hooks as config might have changed
|
||||
for r_id in relation_ids('shared-db'):
|
||||
db_joined(relation_id=r_id)
|
||||
@ -98,6 +106,11 @@ def config_changed():
|
||||
else:
|
||||
log('Please provide a valid plugin config', level=ERROR)
|
||||
sys.exit(1)
|
||||
if config('plugin') == 'n1kv':
|
||||
if config('enable-l3-agent'):
|
||||
apt_install(filter_installed_packages('neutron-l3-agent'))
|
||||
else:
|
||||
apt_purge('neutron-l3-agent')
|
||||
|
||||
|
||||
@hooks.hook('upgrade-charm')
|
||||
@ -193,6 +206,10 @@ def cluster_departed():
|
||||
' failed nodes with nvp|nsx',
|
||||
level=WARNING)
|
||||
return
|
||||
if config('plugin') == 'n1kv':
|
||||
log('Unable to re-assign agent resources for failed nodes with n1kv',
|
||||
level=WARNING)
|
||||
return
|
||||
if eligible_leader(None):
|
||||
reassign_agent_resources()
|
||||
CONFIGS.write_all()
|
||||
|
@ -39,13 +39,14 @@ from charmhelpers.contrib.openstack.context import (
|
||||
import charmhelpers.contrib.openstack.templating as templating
|
||||
from charmhelpers.contrib.openstack.neutron import headers_package
|
||||
from quantum_contexts import (
|
||||
CORE_PLUGIN, OVS, NVP, NSX,
|
||||
CORE_PLUGIN, OVS, NVP, NSX, N1KV,
|
||||
NEUTRON, QUANTUM,
|
||||
networking_name,
|
||||
QuantumGatewayContext,
|
||||
NetworkServiceContext,
|
||||
L3AgentContext,
|
||||
ExternalPortContext,
|
||||
DataPortContext,
|
||||
remap_plugin
|
||||
)
|
||||
|
||||
@ -63,7 +64,7 @@ QUANTUM_NVP_PLUGIN_CONF = \
|
||||
"/etc/quantum/plugins/nicira/nvp.ini"
|
||||
QUANTUM_PLUGIN_CONF = {
|
||||
OVS: QUANTUM_OVS_PLUGIN_CONF,
|
||||
NVP: QUANTUM_NVP_PLUGIN_CONF
|
||||
NVP: QUANTUM_NVP_PLUGIN_CONF,
|
||||
}
|
||||
|
||||
NEUTRON_CONF_DIR = '/etc/neutron'
|
||||
@ -120,6 +121,15 @@ NEUTRON_GATEWAY_PKGS = {
|
||||
'python-psycopg2',
|
||||
'python-oslo.config', # Force upgrade
|
||||
"nova-api-metadata"
|
||||
],
|
||||
N1KV: [
|
||||
"neutron-plugin-cisco",
|
||||
"neutron-dhcp-agent",
|
||||
"python-mysqldb",
|
||||
"python-psycopg2",
|
||||
"nova-api-metadata",
|
||||
"neutron-common",
|
||||
"neutron-l3-agent"
|
||||
]
|
||||
}
|
||||
NEUTRON_GATEWAY_PKGS[NSX] = NEUTRON_GATEWAY_PKGS[NVP]
|
||||
@ -129,6 +139,12 @@ GATEWAY_PKGS = {
|
||||
NEUTRON: NEUTRON_GATEWAY_PKGS,
|
||||
}
|
||||
|
||||
EARLY_PACKAGES = {
|
||||
OVS: ['openvswitch-datapath-dkms'],
|
||||
NVP: [],
|
||||
N1KV: []
|
||||
}
|
||||
|
||||
|
||||
def get_early_packages():
|
||||
'''Return a list of package for pre-install based on configured plugin'''
|
||||
@ -326,6 +342,24 @@ NEUTRON_NVP_CONFIG_FILES = {
|
||||
}
|
||||
NEUTRON_NVP_CONFIG_FILES.update(NEUTRON_SHARED_CONFIG_FILES)
|
||||
|
||||
NEUTRON_N1KV_CONFIG_FILES = {
|
||||
NEUTRON_CONF: {
|
||||
'hook_contexts': [context.AMQPContext(ssl_dir=NEUTRON_CONF_DIR),
|
||||
QuantumGatewayContext(),
|
||||
SyslogContext()],
|
||||
'services': ['neutron-l3-agent',
|
||||
'neutron-dhcp-agent',
|
||||
'neutron-metadata-agent']
|
||||
},
|
||||
NEUTRON_L3_AGENT_CONF: {
|
||||
'hook_contexts': [NetworkServiceContext(),
|
||||
L3AgentContext(),
|
||||
QuantumGatewayContext()],
|
||||
'services': ['neutron-l3-agent']
|
||||
},
|
||||
}
|
||||
NEUTRON_N1KV_CONFIG_FILES.update(NEUTRON_SHARED_CONFIG_FILES)
|
||||
|
||||
CONFIG_FILES = {
|
||||
QUANTUM: {
|
||||
NVP: QUANTUM_NVP_CONFIG_FILES,
|
||||
@ -335,6 +369,7 @@ CONFIG_FILES = {
|
||||
NSX: NEUTRON_NVP_CONFIG_FILES,
|
||||
NVP: NEUTRON_NVP_CONFIG_FILES,
|
||||
OVS: NEUTRON_OVS_CONFIG_FILES,
|
||||
N1KV: NEUTRON_N1KV_CONFIG_FILES,
|
||||
},
|
||||
}
|
||||
|
||||
@ -402,16 +437,9 @@ def restart_map():
|
||||
return _map
|
||||
|
||||
|
||||
def services():
|
||||
''' Returns a list of services associate with this charm '''
|
||||
_services = []
|
||||
for v in restart_map().values():
|
||||
_services = _services + v
|
||||
return list(set(_services))
|
||||
|
||||
|
||||
INT_BRIDGE = "br-int"
|
||||
EXT_BRIDGE = "br-ex"
|
||||
DATA_BRIDGE = 'br-data'
|
||||
|
||||
DHCP_AGENT = "DHCP Agent"
|
||||
L3_AGENT = "L3 Agent"
|
||||
@ -541,5 +569,11 @@ def configure_ovs():
|
||||
add_bridge(INT_BRIDGE)
|
||||
add_bridge(EXT_BRIDGE)
|
||||
ext_port_ctx = ExternalPortContext()()
|
||||
if ext_port_ctx is not None and ext_port_ctx['ext_port']:
|
||||
if ext_port_ctx and ext_port_ctx['ext_port']:
|
||||
add_bridge_port(EXT_BRIDGE, ext_port_ctx['ext_port'])
|
||||
|
||||
add_bridge(DATA_BRIDGE)
|
||||
data_port_ctx = DataPortContext()()
|
||||
if data_port_ctx and data_port_ctx['data_port']:
|
||||
add_bridge_port(DATA_BRIDGE, data_port_ctx['data_port'],
|
||||
promisc=True)
|
||||
|
@ -7,7 +7,7 @@ state_path = /var/lib/neutron
|
||||
interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
|
||||
dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq
|
||||
root_helper = sudo /usr/bin/neutron-rootwrap /etc/neutron/rootwrap.conf
|
||||
ovs_use_veth = True
|
||||
|
||||
{% if instance_mtu -%}
|
||||
dnsmasq_config_file = /etc/neutron/dnsmasq.conf
|
||||
{% endif -%}
|
||||
@ -15,3 +15,13 @@ dnsmasq_config_file = /etc/neutron/dnsmasq.conf
|
||||
enable_metadata_network = True
|
||||
enable_isolated_metadata = True
|
||||
{% endif -%}
|
||||
|
||||
{% if plugin == 'n1kv' %}
|
||||
enable_metadata_network = True
|
||||
enable_isolated_metadata = True
|
||||
resync_interval = 30
|
||||
use_namespaces = True
|
||||
dhcp_lease_time=3600
|
||||
{% else %}
|
||||
ovs_use_veth = True
|
||||
{% endif %}
|
||||
|
@ -10,8 +10,15 @@ admin_tenant_name = {{ service_tenant }}
|
||||
admin_user = {{ service_username }}
|
||||
admin_password = {{ service_password }}
|
||||
root_helper = sudo /usr/bin/neutron-rootwrap /etc/neutron/rootwrap.conf
|
||||
ovs_use_veth = True
|
||||
handle_internal_only_routers = {{ handle_internal_only_router }}
|
||||
{% if plugin == 'n1kv' %}
|
||||
l3_agent_manager = neutron.agent.l3_agent.L3NATAgentWithStateReport
|
||||
external_network_bridge = br-int
|
||||
ovs_use_veth = False
|
||||
use_namespaces = True
|
||||
{% else %}
|
||||
ovs_use_veth = True
|
||||
{% endif %}
|
||||
{% if ext_net_id -%}
|
||||
gateway_external_network_id = {{ ext_net_id }}
|
||||
{% endif -%}
|
||||
|
@ -3,18 +3,30 @@
|
||||
# Configuration file maintained by Juju. Local changes may be overwritten.
|
||||
###############################################################################
|
||||
[ml2]
|
||||
type_drivers = gre,vxlan
|
||||
tenant_network_types = gre,vxlan
|
||||
type_drivers = gre,vxlan,vlan,flat
|
||||
tenant_network_types = gre,vxlan,vlan,flat
|
||||
mechanism_drivers = openvswitch,l2population
|
||||
|
||||
[ml2_type_gre]
|
||||
tunnel_id_ranges = 1:1000
|
||||
|
||||
[ml2_type_vxlan]
|
||||
vni_ranges = 1001:2000
|
||||
|
||||
[ml2_type_vlan]
|
||||
network_vlan_ranges = physnet1:1000:2000
|
||||
|
||||
[ml2_type_flat]
|
||||
flat_networks = physnet1
|
||||
|
||||
[ovs]
|
||||
enable_tunneling = True
|
||||
local_ip = {{ local_ip }}
|
||||
bridge_mappings = physnet1:br-data
|
||||
|
||||
[agent]
|
||||
tunnel_types = {{ overlay_network_type }}
|
||||
l2_population = {{ l2_population }}
|
||||
|
||||
[securitygroup]
|
||||
firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
|
||||
|
@ -358,8 +358,8 @@ class QuantumGatewayBasicDeployment(OpenStackAmuletDeployment):
|
||||
quantum_gateway_relation = unit.relation('shared-db', 'mysql:shared-db')
|
||||
expected = {
|
||||
'ml2': {
|
||||
'type_drivers': 'gre,vxlan',
|
||||
'tenant_network_types': 'gre,vxlan',
|
||||
'type_drivers': 'gre,vxlan,vlan,flat',
|
||||
'tenant_network_types': 'gre,vxlan,vlan,flat',
|
||||
'mechanism_drivers': 'openvswitch,l2population'
|
||||
},
|
||||
'ml2_type_gre': {
|
||||
|
@ -0,0 +1,22 @@
|
||||
# Bootstrap charm-helpers, installing its dependencies if necessary using
|
||||
# only standard libraries.
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
try:
|
||||
import six # flake8: noqa
|
||||
except ImportError:
|
||||
if sys.version_info.major == 2:
|
||||
subprocess.check_call(['apt-get', 'install', '-y', 'python-six'])
|
||||
else:
|
||||
subprocess.check_call(['apt-get', 'install', '-y', 'python3-six'])
|
||||
import six # flake8: noqa
|
||||
|
||||
try:
|
||||
import yaml # flake8: noqa
|
||||
except ImportError:
|
||||
if sys.version_info.major == 2:
|
||||
subprocess.check_call(['apt-get', 'install', '-y', 'python-yaml'])
|
||||
else:
|
||||
subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml'])
|
||||
import yaml # flake8: noqa
|
@ -1,6 +1,6 @@
|
||||
import amulet
|
||||
|
||||
import os
|
||||
import six
|
||||
|
||||
|
||||
class AmuletDeployment(object):
|
||||
@ -52,12 +52,12 @@ class AmuletDeployment(object):
|
||||
|
||||
def _add_relations(self, relations):
|
||||
"""Add all of the relations for the services."""
|
||||
for k, v in relations.iteritems():
|
||||
for k, v in six.iteritems(relations):
|
||||
self.d.relate(k, v)
|
||||
|
||||
def _configure_services(self, configs):
|
||||
"""Configure all of the services."""
|
||||
for service, config in configs.iteritems():
|
||||
for service, config in six.iteritems(configs):
|
||||
self.d.configure(service, config)
|
||||
|
||||
def _deploy(self):
|
||||
|
@ -5,6 +5,8 @@ import re
|
||||
import sys
|
||||
import time
|
||||
|
||||
import six
|
||||
|
||||
|
||||
class AmuletUtils(object):
|
||||
"""Amulet utilities.
|
||||
@ -58,7 +60,7 @@ class AmuletUtils(object):
|
||||
Verify the specified services are running on the corresponding
|
||||
service units.
|
||||
"""
|
||||
for k, v in commands.iteritems():
|
||||
for k, v in six.iteritems(commands):
|
||||
for cmd in v:
|
||||
output, code = k.run(cmd)
|
||||
if code != 0:
|
||||
@ -100,11 +102,11 @@ class AmuletUtils(object):
|
||||
longs, or can be a function that evaluate a variable and returns a
|
||||
bool.
|
||||
"""
|
||||
for k, v in expected.iteritems():
|
||||
for k, v in six.iteritems(expected):
|
||||
if k in actual:
|
||||
if (isinstance(v, basestring) or
|
||||
if (isinstance(v, six.string_types) or
|
||||
isinstance(v, bool) or
|
||||
isinstance(v, (int, long))):
|
||||
isinstance(v, six.integer_types)):
|
||||
if v != actual[k]:
|
||||
return "{}:{}".format(k, actual[k])
|
||||
elif not v(actual[k]):
|
||||
|
@ -1,3 +1,4 @@
|
||||
import six
|
||||
from charmhelpers.contrib.amulet.deployment import (
|
||||
AmuletDeployment
|
||||
)
|
||||
@ -69,7 +70,7 @@ class OpenStackAmuletDeployment(AmuletDeployment):
|
||||
|
||||
def _configure_services(self, configs):
|
||||
"""Configure all of the services."""
|
||||
for service, config in configs.iteritems():
|
||||
for service, config in six.iteritems(configs):
|
||||
self.d.configure(service, config)
|
||||
|
||||
def _get_openstack_release(self):
|
||||
|
@ -7,6 +7,8 @@ import glanceclient.v1.client as glance_client
|
||||
import keystoneclient.v2_0 as keystone_client
|
||||
import novaclient.v1_1.client as nova_client
|
||||
|
||||
import six
|
||||
|
||||
from charmhelpers.contrib.amulet.utils import (
|
||||
AmuletUtils
|
||||
)
|
||||
@ -60,7 +62,7 @@ class OpenStackAmuletUtils(AmuletUtils):
|
||||
expected service catalog endpoints.
|
||||
"""
|
||||
self.log.debug('actual: {}'.format(repr(actual)))
|
||||
for k, v in expected.iteritems():
|
||||
for k, v in six.iteritems(expected):
|
||||
if k in actual:
|
||||
ret = self._validate_dict_data(expected[k][0], actual[k][0])
|
||||
if ret:
|
||||
|
@ -117,11 +117,11 @@ class TestNetworkServiceContext(_TestQuantumContext):
|
||||
}
|
||||
|
||||
|
||||
class TestExternalPortContext(CharmTestCase):
|
||||
class TestNeutronPortContext(CharmTestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(TestExternalPortContext, self).setUp(quantum_contexts,
|
||||
TO_PATCH)
|
||||
super(TestNeutronPortContext, self).setUp(quantum_contexts,
|
||||
TO_PATCH)
|
||||
self.machine_macs = {
|
||||
'eth0': 'fe:c5:ce:8e:2b:00',
|
||||
'eth1': 'fe:c5:ce:8e:2b:01',
|
||||
@ -174,6 +174,11 @@ class TestExternalPortContext(CharmTestCase):
|
||||
self.assertEquals(quantum_contexts.ExternalPortContext()(),
|
||||
{'ext_port': 'eth2'})
|
||||
|
||||
def test_data_port_eth(self):
|
||||
self.config.return_value = 'eth1010'
|
||||
self.assertEquals(quantum_contexts.DataPortContext()(),
|
||||
{'data_port': 'eth1010'})
|
||||
|
||||
|
||||
class TestL3AgentContext(CharmTestCase):
|
||||
|
||||
@ -187,7 +192,8 @@ class TestL3AgentContext(CharmTestCase):
|
||||
self.test_config.set('external-network-id', '')
|
||||
self.eligible_leader.return_value = False
|
||||
self.assertEquals(quantum_contexts.L3AgentContext()(),
|
||||
{'handle_internal_only_router': False})
|
||||
{'handle_internal_only_router': False,
|
||||
'plugin': 'ovs'})
|
||||
|
||||
def test_hior_leader(self):
|
||||
self.test_config.set('run-internal-router', 'leader')
|
||||
@ -195,7 +201,8 @@ class TestL3AgentContext(CharmTestCase):
|
||||
self.eligible_leader.return_value = True
|
||||
self.assertEquals(quantum_contexts.L3AgentContext()(),
|
||||
{'handle_internal_only_router': True,
|
||||
'ext_net_id': 'netid'})
|
||||
'ext_net_id': 'netid',
|
||||
'plugin': 'ovs'})
|
||||
|
||||
def test_hior_all(self):
|
||||
self.test_config.set('run-internal-router', 'all')
|
||||
@ -203,7 +210,8 @@ class TestL3AgentContext(CharmTestCase):
|
||||
self.eligible_leader.return_value = True
|
||||
self.assertEquals(quantum_contexts.L3AgentContext()(),
|
||||
{'handle_internal_only_router': True,
|
||||
'ext_net_id': 'netid'})
|
||||
'ext_net_id': 'netid',
|
||||
'plugin': 'ovs'})
|
||||
|
||||
|
||||
class TestQuantumGatewayContext(CharmTestCase):
|
||||
|
@ -19,6 +19,7 @@ TO_PATCH = [
|
||||
'valid_plugin',
|
||||
'apt_update',
|
||||
'apt_install',
|
||||
'apt_purge',
|
||||
'filter_installed_packages',
|
||||
'get_early_packages',
|
||||
'get_packages',
|
||||
@ -39,7 +40,8 @@ TO_PATCH = [
|
||||
'lsb_release',
|
||||
'stop_services',
|
||||
'b64decode',
|
||||
'is_relation_made'
|
||||
'is_relation_made',
|
||||
'create_sysctl',
|
||||
]
|
||||
|
||||
|
||||
@ -97,6 +99,7 @@ class TestQuantumHooks(CharmTestCase):
|
||||
def test_config_changed(self):
|
||||
def mock_relids(rel):
|
||||
return ['relid']
|
||||
self.test_config.set('sysctl', '{ kernel.max_pid: "1337"}')
|
||||
self.openstack_upgrade_available.return_value = True
|
||||
self.valid_plugin.return_value = True
|
||||
self.relation_ids.side_effect = mock_relids
|
||||
@ -111,6 +114,7 @@ class TestQuantumHooks(CharmTestCase):
|
||||
self.assertTrue(_pgsql_db_joined.called)
|
||||
self.assertTrue(_amqp_joined.called)
|
||||
self.assertTrue(_amqp_nova_joined.called)
|
||||
self.create_sysctl.assert_called()
|
||||
|
||||
def test_config_changed_upgrade(self):
|
||||
self.openstack_upgrade_available.return_value = True
|
||||
@ -119,6 +123,17 @@ class TestQuantumHooks(CharmTestCase):
|
||||
self.assertTrue(self.do_openstack_upgrade.called)
|
||||
self.assertTrue(self.configure_ovs.called)
|
||||
|
||||
def test_config_changed_n1kv(self):
|
||||
self.openstack_upgrade_available.return_value = False
|
||||
self.valid_plugin.return_value = True
|
||||
self.filter_installed_packages.side_effect = lambda p: p
|
||||
self.test_config.set('plugin', 'n1kv')
|
||||
self._call_hook('config-changed')
|
||||
self.apt_install.assert_called_with('neutron-l3-agent')
|
||||
self.test_config.set('enable-l3-agent', False)
|
||||
self._call_hook('config-changed')
|
||||
self.apt_purge.assert_called_with('neutron-l3-agent')
|
||||
|
||||
@patch('sys.exit')
|
||||
def test_config_changed_invalid_plugin(self, _exit):
|
||||
self.valid_plugin.return_value = False
|
||||
|
@ -36,6 +36,7 @@ TO_PATCH = [
|
||||
'service_running',
|
||||
'NetworkServiceContext',
|
||||
'ExternalPortContext',
|
||||
'DataPortContext',
|
||||
'unit_private_ip',
|
||||
'relations_of_type',
|
||||
'service_stop',
|
||||
@ -148,13 +149,33 @@ class TestQuantumUtils(CharmTestCase):
|
||||
self.test_config.set('ext-port', 'eth0')
|
||||
self.ExternalPortContext.return_value = \
|
||||
DummyExternalPortContext(return_value={'ext_port': 'eth0'})
|
||||
self.DataPortContext.return_value = \
|
||||
DummyExternalPortContext(return_value=None)
|
||||
quantum_utils.configure_ovs()
|
||||
self.add_bridge.assert_has_calls([
|
||||
call('br-int'),
|
||||
call('br-ex')
|
||||
call('br-ex'),
|
||||
call('br-data')
|
||||
])
|
||||
self.add_bridge_port.assert_called_with('br-ex', 'eth0')
|
||||
|
||||
def test_configure_ovs_ovs_data_port(self):
|
||||
self.config.side_effect = self.test_config.get
|
||||
self.test_config.set('plugin', 'ovs')
|
||||
self.test_config.set('data-port', 'eth0')
|
||||
self.ExternalPortContext.return_value = \
|
||||
DummyExternalPortContext(return_value=None)
|
||||
self.DataPortContext.return_value = \
|
||||
DummyExternalPortContext(return_value={'data_port': 'eth0'})
|
||||
quantum_utils.configure_ovs()
|
||||
self.add_bridge.assert_has_calls([
|
||||
call('br-int'),
|
||||
call('br-ex'),
|
||||
call('br-data')
|
||||
])
|
||||
self.add_bridge_port.assert_called_with('br-data', 'eth0',
|
||||
promisc=True)
|
||||
|
||||
def test_do_openstack_upgrade(self):
|
||||
self.config.side_effect = self.test_config.get
|
||||
self.is_relation_made.return_value = False
|
||||
|
Loading…
Reference in New Issue
Block a user