Pre-freeze 'make sync'
Change-Id: Ie02501d44ac279fd5ec40c29c3655a1a4daddf0e
This commit is contained in:
parent
03c44248b4
commit
677a31b95e
@ -28,6 +28,7 @@ from charmhelpers.core.host import (
|
|||||||
service
|
service
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
BRIDGE_TEMPLATE = """\
|
BRIDGE_TEMPLATE = """\
|
||||||
# This veth pair is required when neutron data-port is mapped to an existing linux bridge. lp:1635067
|
# This veth pair is required when neutron data-port is mapped to an existing linux bridge. lp:1635067
|
||||||
|
|
||||||
@ -83,36 +84,177 @@ def get_bridges_and_ports_map():
|
|||||||
return {b: get_bridge_ports(b) for b in get_bridges()}
|
return {b: get_bridge_ports(b) for b in get_bridges()}
|
||||||
|
|
||||||
|
|
||||||
def add_bridge(name, datapath_type=None):
|
def _dict_to_vsctl_set(data, table, entity):
|
||||||
''' Add the named bridge to openvswitch '''
|
"""Helper that takes dictionary and provides ``ovs-vsctl set`` commands
|
||||||
|
|
||||||
|
:param data: Additional data to attach to interface
|
||||||
|
The keys in the data dictionary map directly to column names in the
|
||||||
|
OpenvSwitch table specified as defined in DB-SCHEMA [0] referenced in
|
||||||
|
RFC 7047 [1]
|
||||||
|
|
||||||
|
There are some established conventions for keys in the external-ids
|
||||||
|
column of various tables, consult the OVS Integration Guide [2] for
|
||||||
|
more details.
|
||||||
|
|
||||||
|
NOTE(fnordahl): Technically the ``external-ids`` column is called
|
||||||
|
``external_ids`` (with an underscore) and we rely on ``ovs-vsctl``'s
|
||||||
|
behaviour of transforming dashes to underscores for us [3] so we can
|
||||||
|
have a more pleasant data structure.
|
||||||
|
|
||||||
|
0: http://www.openvswitch.org/ovs-vswitchd.conf.db.5.pdf
|
||||||
|
1: https://tools.ietf.org/html/rfc7047
|
||||||
|
2: http://docs.openvswitch.org/en/latest/topics/integration/
|
||||||
|
3: https://github.com/openvswitch/ovs/blob/
|
||||||
|
20dac08fdcce4b7fda1d07add3b346aa9751cfbc/
|
||||||
|
lib/db-ctl-base.c#L189-L215
|
||||||
|
:type data: Optional[Dict[str,Union[str,Dict[str,str]]]]
|
||||||
|
:param table: Name of table to operate on
|
||||||
|
:type table: str
|
||||||
|
:param entity: Name of entity to operate on
|
||||||
|
:type entity: str
|
||||||
|
:returns: '--' separated ``ovs-vsctl set`` commands
|
||||||
|
:rtype: Iterator[Tuple[str, str, str, str, str]]
|
||||||
|
"""
|
||||||
|
for (k, v) in data.items():
|
||||||
|
if isinstance(v, dict):
|
||||||
|
entries = {
|
||||||
|
'{}:{}'.format(k, dk): dv for (dk, dv) in v.items()}
|
||||||
|
else:
|
||||||
|
entries = {k: v}
|
||||||
|
for (colk, colv) in entries.items():
|
||||||
|
yield ('--', 'set', table, entity, '{}={}'.format(colk, colv))
|
||||||
|
|
||||||
|
|
||||||
|
def add_bridge(name, datapath_type=None, brdata=None, exclusive=False):
|
||||||
|
"""Add the named bridge to openvswitch and set/update bridge data for it
|
||||||
|
|
||||||
|
:param name: Name of bridge to create
|
||||||
|
:type name: str
|
||||||
|
:param datapath_type: Add datapath_type to bridge (DEPRECATED, use brdata)
|
||||||
|
:type datapath_type: Optional[str]
|
||||||
|
:param brdata: Additional data to attach to bridge
|
||||||
|
The keys in the brdata dictionary map directly to column names in the
|
||||||
|
OpenvSwitch bridge table as defined in DB-SCHEMA [0] referenced in
|
||||||
|
RFC 7047 [1]
|
||||||
|
|
||||||
|
There are some established conventions for keys in the external-ids
|
||||||
|
column of various tables, consult the OVS Integration Guide [2] for
|
||||||
|
more details.
|
||||||
|
|
||||||
|
NOTE(fnordahl): Technically the ``external-ids`` column is called
|
||||||
|
``external_ids`` (with an underscore) and we rely on ``ovs-vsctl``'s
|
||||||
|
behaviour of transforming dashes to underscores for us [3] so we can
|
||||||
|
have a more pleasant data structure.
|
||||||
|
|
||||||
|
0: http://www.openvswitch.org/ovs-vswitchd.conf.db.5.pdf
|
||||||
|
1: https://tools.ietf.org/html/rfc7047
|
||||||
|
2: http://docs.openvswitch.org/en/latest/topics/integration/
|
||||||
|
3: https://github.com/openvswitch/ovs/blob/
|
||||||
|
20dac08fdcce4b7fda1d07add3b346aa9751cfbc/
|
||||||
|
lib/db-ctl-base.c#L189-L215
|
||||||
|
:type brdata: Optional[Dict[str,Union[str,Dict[str,str]]]]
|
||||||
|
:param exclusive: If True, raise exception if bridge exists
|
||||||
|
:type exclusive: bool
|
||||||
|
:raises: subprocess.CalledProcessError
|
||||||
|
"""
|
||||||
log('Creating bridge {}'.format(name))
|
log('Creating bridge {}'.format(name))
|
||||||
cmd = ["ovs-vsctl", "--", "--may-exist", "add-br", name]
|
cmd = ['ovs-vsctl', '--']
|
||||||
|
if not exclusive:
|
||||||
|
cmd.append('--may-exist')
|
||||||
|
cmd.extend(('add-br', name))
|
||||||
|
if brdata:
|
||||||
|
for setcmd in _dict_to_vsctl_set(brdata, 'bridge', name):
|
||||||
|
cmd.extend(setcmd)
|
||||||
if datapath_type is not None:
|
if datapath_type is not None:
|
||||||
|
log('DEPRECATION WARNING: add_bridge called with datapath_type, '
|
||||||
|
'please use the brdata keyword argument instead.')
|
||||||
cmd += ['--', 'set', 'bridge', name,
|
cmd += ['--', 'set', 'bridge', name,
|
||||||
'datapath_type={}'.format(datapath_type)]
|
'datapath_type={}'.format(datapath_type)]
|
||||||
subprocess.check_call(cmd)
|
subprocess.check_call(cmd)
|
||||||
|
|
||||||
|
|
||||||
def del_bridge(name):
|
def del_bridge(name):
|
||||||
''' Delete the named bridge from openvswitch '''
|
"""Delete the named bridge from openvswitch
|
||||||
|
|
||||||
|
:param name: Name of bridge to remove
|
||||||
|
:type name: str
|
||||||
|
:raises: subprocess.CalledProcessError
|
||||||
|
"""
|
||||||
log('Deleting bridge {}'.format(name))
|
log('Deleting bridge {}'.format(name))
|
||||||
subprocess.check_call(["ovs-vsctl", "--", "--if-exists", "del-br", name])
|
subprocess.check_call(["ovs-vsctl", "--", "--if-exists", "del-br", name])
|
||||||
|
|
||||||
|
|
||||||
def add_bridge_port(name, port, promisc=False):
|
def add_bridge_port(name, port, promisc=False, ifdata=None, exclusive=False,
|
||||||
''' Add a port to the named openvswitch bridge '''
|
linkup=True, portdata=None):
|
||||||
|
"""Add port to bridge and optionally set/update interface data for it
|
||||||
|
|
||||||
|
:param name: Name of bridge to attach port to
|
||||||
|
:type name: str
|
||||||
|
:param port: Name of port as represented in netdev
|
||||||
|
:type port: str
|
||||||
|
:param promisc: Whether to set promiscuous mode on interface
|
||||||
|
True=on, False=off, None leave untouched
|
||||||
|
:type promisc: Optional[bool]
|
||||||
|
:param ifdata: Additional data to attach to interface
|
||||||
|
The keys in the ifdata dictionary map directly to column names in the
|
||||||
|
OpenvSwitch Interface table as defined in DB-SCHEMA [0] referenced in
|
||||||
|
RFC 7047 [1]
|
||||||
|
|
||||||
|
There are some established conventions for keys in the external-ids
|
||||||
|
column of various tables, consult the OVS Integration Guide [2] for
|
||||||
|
more details.
|
||||||
|
|
||||||
|
NOTE(fnordahl): Technically the ``external-ids`` column is called
|
||||||
|
``external_ids`` (with an underscore) and we rely on ``ovs-vsctl``'s
|
||||||
|
behaviour of transforming dashes to underscores for us [3] so we can
|
||||||
|
have a more pleasant data structure.
|
||||||
|
|
||||||
|
0: http://www.openvswitch.org/ovs-vswitchd.conf.db.5.pdf
|
||||||
|
1: https://tools.ietf.org/html/rfc7047
|
||||||
|
2: http://docs.openvswitch.org/en/latest/topics/integration/
|
||||||
|
3: https://github.com/openvswitch/ovs/blob/
|
||||||
|
20dac08fdcce4b7fda1d07add3b346aa9751cfbc/
|
||||||
|
lib/db-ctl-base.c#L189-L215
|
||||||
|
:type ifdata: Optional[Dict[str,Union[str,Dict[str,str]]]]
|
||||||
|
:param exclusive: If True, raise exception if port exists
|
||||||
|
:type exclusive: bool
|
||||||
|
:param linkup: Bring link up
|
||||||
|
:type linkup: bool
|
||||||
|
:param portdata: Additional data to attach to port. Similar to ifdata.
|
||||||
|
:type portdata: Optional[Dict[str,Union[str,Dict[str,str]]]]
|
||||||
|
:raises: subprocess.CalledProcessError
|
||||||
|
"""
|
||||||
|
cmd = ['ovs-vsctl', '--']
|
||||||
|
if not exclusive:
|
||||||
|
cmd.append('--may-exist')
|
||||||
|
cmd.extend(('add-port', name, port))
|
||||||
|
for ovs_table, data in (('Interface', ifdata), ('Port', portdata)):
|
||||||
|
if data:
|
||||||
|
for setcmd in _dict_to_vsctl_set(data, ovs_table, port):
|
||||||
|
cmd.extend(setcmd)
|
||||||
|
|
||||||
log('Adding port {} to bridge {}'.format(port, name))
|
log('Adding port {} to bridge {}'.format(port, name))
|
||||||
subprocess.check_call(["ovs-vsctl", "--", "--may-exist", "add-port",
|
subprocess.check_call(cmd)
|
||||||
name, port])
|
if linkup:
|
||||||
subprocess.check_call(["ip", "link", "set", port, "up"])
|
# This is mostly a workaround for CI environments, in the real world
|
||||||
|
# the bare metal provider would most likely have configured and brought
|
||||||
|
# up the link for us.
|
||||||
|
subprocess.check_call(["ip", "link", "set", port, "up"])
|
||||||
if promisc:
|
if promisc:
|
||||||
subprocess.check_call(["ip", "link", "set", port, "promisc", "on"])
|
subprocess.check_call(["ip", "link", "set", port, "promisc", "on"])
|
||||||
else:
|
elif promisc is False:
|
||||||
subprocess.check_call(["ip", "link", "set", port, "promisc", "off"])
|
subprocess.check_call(["ip", "link", "set", port, "promisc", "off"])
|
||||||
|
|
||||||
|
|
||||||
def del_bridge_port(name, port):
|
def del_bridge_port(name, port):
|
||||||
''' Delete a port from the named openvswitch bridge '''
|
"""Delete a port from the named openvswitch bridge
|
||||||
|
|
||||||
|
:param name: Name of bridge to remove port from
|
||||||
|
:type name: str
|
||||||
|
:param port: Name of port to remove
|
||||||
|
:type port: str
|
||||||
|
:raises: subprocess.CalledProcessError
|
||||||
|
"""
|
||||||
log('Deleting port {} from bridge {}'.format(port, name))
|
log('Deleting port {} from bridge {}'.format(port, name))
|
||||||
subprocess.check_call(["ovs-vsctl", "--", "--if-exists", "del-port",
|
subprocess.check_call(["ovs-vsctl", "--", "--if-exists", "del-port",
|
||||||
name, port])
|
name, port])
|
||||||
@ -120,12 +262,88 @@ def del_bridge_port(name, port):
|
|||||||
subprocess.check_call(["ip", "link", "set", port, "promisc", "off"])
|
subprocess.check_call(["ip", "link", "set", port, "promisc", "off"])
|
||||||
|
|
||||||
|
|
||||||
def add_ovsbridge_linuxbridge(name, bridge):
|
def add_bridge_bond(bridge, port, interfaces, portdata=None, ifdatamap=None,
|
||||||
''' Add linux bridge to the named openvswitch bridge
|
exclusive=False):
|
||||||
|
"""Add bonded port in bridge from interfaces.
|
||||||
|
|
||||||
|
:param bridge: Name of bridge to add bonded port to
|
||||||
|
:type bridge: str
|
||||||
|
:param port: Name of created port
|
||||||
|
:type port: str
|
||||||
|
:param interfaces: Underlying interfaces that make up the bonded port
|
||||||
|
:type interfaces: Iterator[str]
|
||||||
|
:param portdata: Additional data to attach to the created bond port
|
||||||
|
See _dict_to_vsctl_set() for detailed description.
|
||||||
|
Example:
|
||||||
|
{
|
||||||
|
'bond-mode': 'balance-tcp',
|
||||||
|
'lacp': 'active',
|
||||||
|
'other-config': {
|
||||||
|
'lacp-time': 'fast',
|
||||||
|
},
|
||||||
|
}
|
||||||
|
:type portdata: Optional[Dict[str,Union[str,Dict[str,str]]]]
|
||||||
|
:param ifdatamap: Map of data to attach to created bond interfaces
|
||||||
|
See _dict_to_vsctl_set() for detailed description.
|
||||||
|
Example:
|
||||||
|
{
|
||||||
|
'eth0': {
|
||||||
|
'type': 'dpdk',
|
||||||
|
'mtu-request': '9000',
|
||||||
|
'options': {
|
||||||
|
'dpdk-devargs': '0000:01:00.0',
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
:type ifdatamap: Optional[Dict[str,Dict[str,Union[str,Dict[str,str]]]]]
|
||||||
|
:param exclusive: If True, raise exception if port exists
|
||||||
|
:type exclusive: bool
|
||||||
|
:raises: subprocess.CalledProcessError
|
||||||
|
"""
|
||||||
|
cmd = ['ovs-vsctl', '--']
|
||||||
|
if not exclusive:
|
||||||
|
cmd.append('--may-exist')
|
||||||
|
cmd.extend(('add-bond', bridge, port))
|
||||||
|
cmd.extend(interfaces)
|
||||||
|
if portdata:
|
||||||
|
for setcmd in _dict_to_vsctl_set(portdata, 'port', port):
|
||||||
|
cmd.extend(setcmd)
|
||||||
|
if ifdatamap:
|
||||||
|
for ifname, ifdata in ifdatamap.items():
|
||||||
|
for setcmd in _dict_to_vsctl_set(ifdata, 'Interface', ifname):
|
||||||
|
cmd.extend(setcmd)
|
||||||
|
subprocess.check_call(cmd)
|
||||||
|
|
||||||
|
|
||||||
|
def add_ovsbridge_linuxbridge(name, bridge, ifdata=None):
|
||||||
|
"""Add linux bridge to the named openvswitch bridge
|
||||||
|
|
||||||
:param name: Name of ovs bridge to be added to Linux bridge
|
:param name: Name of ovs bridge to be added to Linux bridge
|
||||||
|
:type name: str
|
||||||
:param bridge: Name of Linux bridge to be added to ovs bridge
|
:param bridge: Name of Linux bridge to be added to ovs bridge
|
||||||
:returns: True if veth is added between ovs bridge and linux bridge,
|
:type name: str
|
||||||
False otherwise'''
|
:param ifdata: Additional data to attach to interface
|
||||||
|
The keys in the ifdata dictionary map directly to column names in the
|
||||||
|
OpenvSwitch Interface table as defined in DB-SCHEMA [0] referenced in
|
||||||
|
RFC 7047 [1]
|
||||||
|
|
||||||
|
There are some established conventions for keys in the external-ids
|
||||||
|
column of various tables, consult the OVS Integration Guide [2] for
|
||||||
|
more details.
|
||||||
|
|
||||||
|
NOTE(fnordahl): Technically the ``external-ids`` column is called
|
||||||
|
``external_ids`` (with an underscore) and we rely on ``ovs-vsctl``'s
|
||||||
|
behaviour of transforming dashes to underscores for us [3] so we can
|
||||||
|
have a more pleasant data structure.
|
||||||
|
|
||||||
|
0: http://www.openvswitch.org/ovs-vswitchd.conf.db.5.pdf
|
||||||
|
1: https://tools.ietf.org/html/rfc7047
|
||||||
|
2: http://docs.openvswitch.org/en/latest/topics/integration/
|
||||||
|
3: https://github.com/openvswitch/ovs/blob/
|
||||||
|
20dac08fdcce4b7fda1d07add3b346aa9751cfbc/
|
||||||
|
lib/db-ctl-base.c#L189-L215
|
||||||
|
:type ifdata: Optional[Dict[str,Union[str,Dict[str,str]]]]
|
||||||
|
"""
|
||||||
try:
|
try:
|
||||||
import netifaces
|
import netifaces
|
||||||
except ImportError:
|
except ImportError:
|
||||||
@ -177,7 +395,7 @@ def add_ovsbridge_linuxbridge(name, bridge):
|
|||||||
bridge=bridge))
|
bridge=bridge))
|
||||||
|
|
||||||
subprocess.check_call(["ifup", linuxbridge_port])
|
subprocess.check_call(["ifup", linuxbridge_port])
|
||||||
add_bridge_port(name, linuxbridge_port)
|
add_bridge_port(name, linuxbridge_port, ifdata=ifdata)
|
||||||
|
|
||||||
|
|
||||||
def is_linuxbridge_interface(port):
|
def is_linuxbridge_interface(port):
|
||||||
@ -303,3 +521,21 @@ def port_to_br(port):
|
|||||||
).decode('UTF-8').strip()
|
).decode('UTF-8').strip()
|
||||||
except subprocess.CalledProcessError:
|
except subprocess.CalledProcessError:
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def ovs_appctl(target, args):
|
||||||
|
"""Run `ovs-appctl` for target with args and return output.
|
||||||
|
|
||||||
|
:param target: Name of daemon to contact. Unless target begins with '/',
|
||||||
|
`ovs-appctl` looks for a pidfile and will build the path to
|
||||||
|
a /var/run/openvswitch/target.pid.ctl for you.
|
||||||
|
:type target: str
|
||||||
|
:param args: Command and arguments to pass to `ovs-appctl`
|
||||||
|
:type args: Tuple[str, ...]
|
||||||
|
:returns: Output from command
|
||||||
|
:rtype: str
|
||||||
|
:raises: subprocess.CalledProcessError
|
||||||
|
"""
|
||||||
|
cmd = ['ovs-appctl', '-t', target]
|
||||||
|
cmd.extend(args)
|
||||||
|
return subprocess.check_output(cmd, universal_newlines=True)
|
||||||
|
230
hooks/charmhelpers/contrib/network/ovs/ovn.py
Normal file
230
hooks/charmhelpers/contrib/network/ovs/ovn.py
Normal file
@ -0,0 +1,230 @@
|
|||||||
|
# Copyright 2019 Canonical Ltd
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
import os
|
||||||
|
import subprocess
|
||||||
|
import uuid
|
||||||
|
|
||||||
|
from . import utils
|
||||||
|
|
||||||
|
|
||||||
|
OVN_RUNDIR = '/var/run/ovn'
|
||||||
|
OVN_SYSCONFDIR = '/etc/ovn'
|
||||||
|
|
||||||
|
|
||||||
|
def ovn_appctl(target, args, rundir=None, use_ovs_appctl=False):
|
||||||
|
"""Run ovn/ovs-appctl for target with args and return output.
|
||||||
|
|
||||||
|
:param target: Name of daemon to contact. Unless target begins with '/',
|
||||||
|
`ovn-appctl` looks for a pidfile and will build the path to
|
||||||
|
a /var/run/ovn/target.pid.ctl for you.
|
||||||
|
:type target: str
|
||||||
|
:param args: Command and arguments to pass to `ovn-appctl`
|
||||||
|
:type args: Tuple[str, ...]
|
||||||
|
:param rundir: Override path to sockets
|
||||||
|
:type rundir: Optional[str]
|
||||||
|
:param use_ovs_appctl: The ``ovn-appctl`` command appeared in OVN 20.03,
|
||||||
|
set this to True to use ``ovs-appctl`` instead.
|
||||||
|
:type use_ovs_appctl: bool
|
||||||
|
:returns: Output from command
|
||||||
|
:rtype: str
|
||||||
|
:raises: subprocess.CalledProcessError
|
||||||
|
"""
|
||||||
|
# NOTE(fnordahl): The ovsdb-server processes for the OVN databases use a
|
||||||
|
# non-standard naming scheme for their daemon control socket and we need
|
||||||
|
# to pass the full path to the socket.
|
||||||
|
if target in ('ovnnb_db', 'ovnsb_db',):
|
||||||
|
target = os.path.join(rundir or OVN_RUNDIR, target + '.ctl')
|
||||||
|
|
||||||
|
if use_ovs_appctl:
|
||||||
|
tool = 'ovs-appctl'
|
||||||
|
else:
|
||||||
|
tool = 'ovn-appctl'
|
||||||
|
|
||||||
|
return utils._run(tool, '-t', target, *args)
|
||||||
|
|
||||||
|
|
||||||
|
class OVNClusterStatus(object):
|
||||||
|
|
||||||
|
def __init__(self, name, cluster_id, server_id, address, status, role,
|
||||||
|
term, leader, vote, election_timer, log,
|
||||||
|
entries_not_yet_committed, entries_not_yet_applied,
|
||||||
|
connections, servers):
|
||||||
|
"""Initialize and populate OVNClusterStatus object.
|
||||||
|
|
||||||
|
Use class initializer so we can define types in a compatible manner.
|
||||||
|
|
||||||
|
:param name: Name of schema used for database
|
||||||
|
:type name: str
|
||||||
|
:param cluster_id: UUID of cluster
|
||||||
|
:type cluster_id: uuid.UUID
|
||||||
|
:param server_id: UUID of server
|
||||||
|
:type server_id: uuid.UUID
|
||||||
|
:param address: OVSDB connection method
|
||||||
|
:type address: str
|
||||||
|
:param status: Status text
|
||||||
|
:type status: str
|
||||||
|
:param role: Role of server
|
||||||
|
:type role: str
|
||||||
|
:param term: Election term
|
||||||
|
:type term: int
|
||||||
|
:param leader: Short form UUID of leader
|
||||||
|
:type leader: str
|
||||||
|
:param vote: Vote
|
||||||
|
:type vote: str
|
||||||
|
:param election_timer: Current value of election timer
|
||||||
|
:type election_timer: int
|
||||||
|
:param log: Log
|
||||||
|
:type log: str
|
||||||
|
:param entries_not_yet_committed: Entries not yet committed
|
||||||
|
:type entries_not_yet_committed: int
|
||||||
|
:param entries_not_yet_applied: Entries not yet applied
|
||||||
|
:type entries_not_yet_applied: int
|
||||||
|
:param connections: Connections
|
||||||
|
:type connections: str
|
||||||
|
:param servers: Servers in the cluster
|
||||||
|
[('0ea6', 'ssl:192.0.2.42:6643')]
|
||||||
|
:type servers: List[Tuple[str,str]]
|
||||||
|
"""
|
||||||
|
self.name = name
|
||||||
|
self.cluster_id = cluster_id
|
||||||
|
self.server_id = server_id
|
||||||
|
self.address = address
|
||||||
|
self.status = status
|
||||||
|
self.role = role
|
||||||
|
self.term = term
|
||||||
|
self.leader = leader
|
||||||
|
self.vote = vote
|
||||||
|
self.election_timer = election_timer
|
||||||
|
self.log = log
|
||||||
|
self.entries_not_yet_committed = entries_not_yet_committed
|
||||||
|
self.entries_not_yet_applied = entries_not_yet_applied
|
||||||
|
self.connections = connections
|
||||||
|
self.servers = servers
|
||||||
|
|
||||||
|
def __eq__(self, other):
|
||||||
|
return (
|
||||||
|
self.name == other.name and
|
||||||
|
self.cluster_id == other.cluster_id and
|
||||||
|
self.server_id == other.server_id and
|
||||||
|
self.address == other.address and
|
||||||
|
self.status == other.status and
|
||||||
|
self.role == other.role and
|
||||||
|
self.term == other.term and
|
||||||
|
self.leader == other.leader and
|
||||||
|
self.vote == other.vote and
|
||||||
|
self.election_timer == other.election_timer and
|
||||||
|
self.log == other.log and
|
||||||
|
self.entries_not_yet_committed == other.entries_not_yet_committed and
|
||||||
|
self.entries_not_yet_applied == other.entries_not_yet_applied and
|
||||||
|
self.connections == other.connections and
|
||||||
|
self.servers == other.servers)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def is_cluster_leader(self):
|
||||||
|
"""Retrieve status information from clustered OVSDB.
|
||||||
|
|
||||||
|
:returns: Whether target is cluster leader
|
||||||
|
:rtype: bool
|
||||||
|
"""
|
||||||
|
return self.leader == 'self'
|
||||||
|
|
||||||
|
|
||||||
|
def cluster_status(target, schema=None, use_ovs_appctl=False):
|
||||||
|
"""Retrieve status information from clustered OVSDB.
|
||||||
|
|
||||||
|
:param target: Usually one of 'ovsdb-server', 'ovnnb_db', 'ovnsb_db', can
|
||||||
|
also be full path to control socket.
|
||||||
|
:type target: str
|
||||||
|
:param schema: Database schema name, deduced from target if not provided
|
||||||
|
:type schema: Optional[str]
|
||||||
|
:param use_ovs_appctl: The ``ovn-appctl`` command appeared in OVN 20.03,
|
||||||
|
set this to True to use ``ovs-appctl`` instead.
|
||||||
|
:type use_ovs_appctl: bool
|
||||||
|
:returns: cluster status data object
|
||||||
|
:rtype: OVNClusterStatus
|
||||||
|
:raises: subprocess.CalledProcessError, KeyError, RuntimeError
|
||||||
|
"""
|
||||||
|
schema_map = {
|
||||||
|
'ovnnb_db': 'OVN_Northbound',
|
||||||
|
'ovnsb_db': 'OVN_Southbound',
|
||||||
|
}
|
||||||
|
if schema and schema not in schema_map.keys():
|
||||||
|
raise RuntimeError('Unknown schema provided: "{}"'.format(schema))
|
||||||
|
|
||||||
|
status = {}
|
||||||
|
k = ''
|
||||||
|
for line in ovn_appctl(target, 'cluster/status',
|
||||||
|
schema or schema_map[target],
|
||||||
|
use_ovs_appctl=use_ovs_appctl).splitlines():
|
||||||
|
if k and line.startswith(' '):
|
||||||
|
# there is no key which means this is a instance of a multi-line/
|
||||||
|
# multi-value item, populate the List which is already stored under
|
||||||
|
# the key.
|
||||||
|
if k == 'servers':
|
||||||
|
status[k].append(
|
||||||
|
tuple(line.replace(')', '').lstrip().split()[0:4:3]))
|
||||||
|
else:
|
||||||
|
status[k].append(line.lstrip())
|
||||||
|
elif ':' in line:
|
||||||
|
# this is a line with a key
|
||||||
|
k, v = line.split(':', 1)
|
||||||
|
k = k.lower()
|
||||||
|
k = k.replace(' ', '_')
|
||||||
|
if v:
|
||||||
|
# this is a line with both key and value
|
||||||
|
if k in ('cluster_id', 'server_id',):
|
||||||
|
v = v.replace('(', '')
|
||||||
|
v = v.replace(')', '')
|
||||||
|
status[k] = tuple(v.split())
|
||||||
|
else:
|
||||||
|
status[k] = v.lstrip()
|
||||||
|
else:
|
||||||
|
# this is a line with only key which means a multi-line/
|
||||||
|
# multi-value item. Store key as List which will be
|
||||||
|
# populated on subsequent iterations.
|
||||||
|
status[k] = []
|
||||||
|
return OVNClusterStatus(
|
||||||
|
status['name'],
|
||||||
|
uuid.UUID(status['cluster_id'][1]),
|
||||||
|
uuid.UUID(status['server_id'][1]),
|
||||||
|
status['address'],
|
||||||
|
status['status'],
|
||||||
|
status['role'],
|
||||||
|
int(status['term']),
|
||||||
|
status['leader'],
|
||||||
|
status['vote'],
|
||||||
|
int(status['election_timer']),
|
||||||
|
status['log'],
|
||||||
|
int(status['entries_not_yet_committed']),
|
||||||
|
int(status['entries_not_yet_applied']),
|
||||||
|
status['connections'],
|
||||||
|
status['servers'])
|
||||||
|
|
||||||
|
|
||||||
|
def is_northd_active():
|
||||||
|
"""Query `ovn-northd` for active status.
|
||||||
|
|
||||||
|
Note that the active status information for ovn-northd is available for
|
||||||
|
OVN 20.03 and onward.
|
||||||
|
|
||||||
|
:returns: True if local `ovn-northd` instance is active, False otherwise
|
||||||
|
:rtype: bool
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
for line in ovn_appctl('ovn-northd', 'status').splitlines():
|
||||||
|
if line.startswith('Status:') and 'active' in line:
|
||||||
|
return True
|
||||||
|
except subprocess.CalledProcessError:
|
||||||
|
pass
|
||||||
|
return False
|
206
hooks/charmhelpers/contrib/network/ovs/ovsdb.py
Normal file
206
hooks/charmhelpers/contrib/network/ovs/ovsdb.py
Normal file
@ -0,0 +1,206 @@
|
|||||||
|
# Copyright 2019 Canonical Ltd
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
import json
|
||||||
|
import uuid
|
||||||
|
|
||||||
|
from . import utils
|
||||||
|
|
||||||
|
|
||||||
|
class SimpleOVSDB(object):
|
||||||
|
"""Simple interface to OVSDB through the use of command line tools.
|
||||||
|
|
||||||
|
OVS and OVN is managed through a set of databases. These databases have
|
||||||
|
similar command line tools to manage them. We make use of the similarity
|
||||||
|
to provide a generic class that can be used to manage them.
|
||||||
|
|
||||||
|
The OpenvSwitch project does provide a Python API, but on the surface it
|
||||||
|
appears to be a bit too involved for our simple use case.
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
sbdb = SimpleOVSDB('ovn-sbctl')
|
||||||
|
for chs in sbdb.chassis:
|
||||||
|
print(chs)
|
||||||
|
|
||||||
|
ovsdb = SimpleOVSDB('ovs-vsctl')
|
||||||
|
for br in ovsdb.bridge:
|
||||||
|
if br['name'] == 'br-test':
|
||||||
|
ovsdb.bridge.set(br['uuid'], 'external_ids:charm', 'managed')
|
||||||
|
"""
|
||||||
|
|
||||||
|
# For validation we keep a complete map of currently known good tool and
|
||||||
|
# table combinations. This requires maintenance down the line whenever
|
||||||
|
# upstream adds things that downstream wants, and the cost of maintaining
|
||||||
|
# that will most likely be lower then the cost of finding the needle in
|
||||||
|
# the haystack whenever downstream code misspells something.
|
||||||
|
_tool_table_map = {
|
||||||
|
'ovs-vsctl': (
|
||||||
|
'autoattach',
|
||||||
|
'bridge',
|
||||||
|
'ct_timeout_policy',
|
||||||
|
'ct_zone',
|
||||||
|
'controller',
|
||||||
|
'datapath',
|
||||||
|
'flow_sample_collector_set',
|
||||||
|
'flow_table',
|
||||||
|
'ipfix',
|
||||||
|
'interface',
|
||||||
|
'manager',
|
||||||
|
'mirror',
|
||||||
|
'netflow',
|
||||||
|
'open_vswitch',
|
||||||
|
'port',
|
||||||
|
'qos',
|
||||||
|
'queue',
|
||||||
|
'ssl',
|
||||||
|
'sflow',
|
||||||
|
),
|
||||||
|
'ovn-nbctl': (
|
||||||
|
'acl',
|
||||||
|
'address_set',
|
||||||
|
'connection',
|
||||||
|
'dhcp_options',
|
||||||
|
'dns',
|
||||||
|
'forwarding_group',
|
||||||
|
'gateway_chassis',
|
||||||
|
'ha_chassis',
|
||||||
|
'ha_chassis_group',
|
||||||
|
'load_balancer',
|
||||||
|
'load_balancer_health_check',
|
||||||
|
'logical_router',
|
||||||
|
'logical_router_policy',
|
||||||
|
'logical_router_port',
|
||||||
|
'logical_router_static_route',
|
||||||
|
'logical_switch',
|
||||||
|
'logical_switch_port',
|
||||||
|
'meter',
|
||||||
|
'meter_band',
|
||||||
|
'nat',
|
||||||
|
'nb_global',
|
||||||
|
'port_group',
|
||||||
|
'qos',
|
||||||
|
'ssl',
|
||||||
|
),
|
||||||
|
'ovn-sbctl': (
|
||||||
|
'address_set',
|
||||||
|
'chassis',
|
||||||
|
'connection',
|
||||||
|
'controller_event',
|
||||||
|
'dhcp_options',
|
||||||
|
'dhcpv6_options',
|
||||||
|
'dns',
|
||||||
|
'datapath_binding',
|
||||||
|
'encap',
|
||||||
|
'gateway_chassis',
|
||||||
|
'ha_chassis',
|
||||||
|
'ha_chassis_group',
|
||||||
|
'igmp_group',
|
||||||
|
'ip_multicast',
|
||||||
|
'logical_flow',
|
||||||
|
'mac_binding',
|
||||||
|
'meter',
|
||||||
|
'meter_band',
|
||||||
|
'multicast_group',
|
||||||
|
'port_binding',
|
||||||
|
'port_group',
|
||||||
|
'rbac_permission',
|
||||||
|
'rbac_role',
|
||||||
|
'sb_global',
|
||||||
|
'ssl',
|
||||||
|
'service_monitor',
|
||||||
|
),
|
||||||
|
}
|
||||||
|
|
||||||
|
def __init__(self, tool):
|
||||||
|
"""SimpleOVSDB constructor.
|
||||||
|
|
||||||
|
:param tool: Which tool with database commands to operate on.
|
||||||
|
Usually one of `ovs-vsctl`, `ovn-nbctl`, `ovn-sbctl`
|
||||||
|
:type tool: str
|
||||||
|
"""
|
||||||
|
if tool not in self._tool_table_map:
|
||||||
|
raise RuntimeError(
|
||||||
|
'tool must be one of "{}"'.format(self._tool_table_map.keys()))
|
||||||
|
self._tool = tool
|
||||||
|
|
||||||
|
def __getattr__(self, table):
|
||||||
|
if table not in self._tool_table_map[self._tool]:
|
||||||
|
raise AttributeError(
|
||||||
|
'table "{}" not known for use with "{}"'
|
||||||
|
.format(table, self._tool))
|
||||||
|
return self.Table(self._tool, table)
|
||||||
|
|
||||||
|
class Table(object):
|
||||||
|
"""Methods to interact with contents of OVSDB tables.
|
||||||
|
|
||||||
|
NOTE: At the time of this writing ``find`` is the only command
|
||||||
|
line argument to OVSDB manipulating tools that actually supports
|
||||||
|
JSON output.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, tool, table):
|
||||||
|
"""SimpleOVSDBTable constructor.
|
||||||
|
|
||||||
|
:param table: Which table to operate on
|
||||||
|
:type table: str
|
||||||
|
"""
|
||||||
|
self._tool = tool
|
||||||
|
self._table = table
|
||||||
|
|
||||||
|
def _find_tbl(self, condition=None):
|
||||||
|
"""Run and parse output of OVSDB `find` command.
|
||||||
|
|
||||||
|
:param condition: An optional RFC 7047 5.1 match condition
|
||||||
|
:type condition: Optional[str]
|
||||||
|
:returns: Dictionary with data
|
||||||
|
:rtype: Dict[str, any]
|
||||||
|
"""
|
||||||
|
# When using json formatted output to OVS commands Internal OVSDB
|
||||||
|
# notation may occur that require further deserializing.
|
||||||
|
# Reference: https://tools.ietf.org/html/rfc7047#section-5.1
|
||||||
|
ovs_type_cb_map = {
|
||||||
|
'uuid': uuid.UUID,
|
||||||
|
# FIXME sets also appear to sometimes contain type/value tuples
|
||||||
|
'set': list,
|
||||||
|
'map': dict,
|
||||||
|
}
|
||||||
|
cmd = [self._tool, '-f', 'json', 'find', self._table]
|
||||||
|
if condition:
|
||||||
|
cmd.append(condition)
|
||||||
|
output = utils._run(*cmd)
|
||||||
|
data = json.loads(output)
|
||||||
|
for row in data['data']:
|
||||||
|
values = []
|
||||||
|
for col in row:
|
||||||
|
if isinstance(col, list):
|
||||||
|
f = ovs_type_cb_map.get(col[0], str)
|
||||||
|
values.append(f(col[1]))
|
||||||
|
else:
|
||||||
|
values.append(col)
|
||||||
|
yield dict(zip(data['headings'], values))
|
||||||
|
|
||||||
|
def __iter__(self):
|
||||||
|
return self._find_tbl()
|
||||||
|
|
||||||
|
def clear(self, rec, col):
|
||||||
|
utils._run(self._tool, 'clear', self._table, rec, col)
|
||||||
|
|
||||||
|
def find(self, condition):
|
||||||
|
return self._find_tbl(condition=condition)
|
||||||
|
|
||||||
|
def remove(self, rec, col, value):
|
||||||
|
utils._run(self._tool, 'remove', self._table, rec, col, value)
|
||||||
|
|
||||||
|
def set(self, rec, col, value):
|
||||||
|
utils._run(self._tool, 'set', self._table, rec,
|
||||||
|
'{}={}'.format(col, value))
|
26
hooks/charmhelpers/contrib/network/ovs/utils.py
Normal file
26
hooks/charmhelpers/contrib/network/ovs/utils.py
Normal file
@ -0,0 +1,26 @@
|
|||||||
|
# Copyright 2019 Canonical Ltd
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
import subprocess
|
||||||
|
|
||||||
|
|
||||||
|
def _run(*args):
|
||||||
|
"""Run a process, check result, capture decoded output from STDOUT.
|
||||||
|
|
||||||
|
:param args: Command and arguments to run
|
||||||
|
:type args: Tuple[str, ...]
|
||||||
|
:returns: Information about the completed process
|
||||||
|
:rtype: str
|
||||||
|
:raises subprocess.CalledProcessError
|
||||||
|
"""
|
||||||
|
return subprocess.check_output(args, universal_newlines=True)
|
@ -13,13 +13,17 @@
|
|||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
import collections
|
import collections
|
||||||
|
import copy
|
||||||
|
import enum
|
||||||
import glob
|
import glob
|
||||||
|
import hashlib
|
||||||
import json
|
import json
|
||||||
import math
|
import math
|
||||||
import os
|
import os
|
||||||
import re
|
import re
|
||||||
import socket
|
import socket
|
||||||
import time
|
import time
|
||||||
|
|
||||||
from base64 import b64decode
|
from base64 import b64decode
|
||||||
from subprocess import check_call, CalledProcessError
|
from subprocess import check_call, CalledProcessError
|
||||||
|
|
||||||
@ -50,7 +54,8 @@ from charmhelpers.core.hookenv import (
|
|||||||
INFO,
|
INFO,
|
||||||
ERROR,
|
ERROR,
|
||||||
status_set,
|
status_set,
|
||||||
network_get_primary_address
|
network_get_primary_address,
|
||||||
|
WARNING,
|
||||||
)
|
)
|
||||||
|
|
||||||
from charmhelpers.core.sysctl import create as sysctl_create
|
from charmhelpers.core.sysctl import create as sysctl_create
|
||||||
@ -110,6 +115,13 @@ from charmhelpers.contrib.openstack.utils import (
|
|||||||
)
|
)
|
||||||
from charmhelpers.core.unitdata import kv
|
from charmhelpers.core.unitdata import kv
|
||||||
|
|
||||||
|
try:
|
||||||
|
from sriov_netplan_shim import pci
|
||||||
|
except ImportError:
|
||||||
|
# The use of the function and contexts that require the pci module is
|
||||||
|
# optional.
|
||||||
|
pass
|
||||||
|
|
||||||
try:
|
try:
|
||||||
import psutil
|
import psutil
|
||||||
except ImportError:
|
except ImportError:
|
||||||
@ -263,6 +275,12 @@ class SharedDBContext(OSContextGenerator):
|
|||||||
'database_password': rdata.get(password_setting),
|
'database_password': rdata.get(password_setting),
|
||||||
'database_type': 'mysql+pymysql'
|
'database_type': 'mysql+pymysql'
|
||||||
}
|
}
|
||||||
|
# Port is being introduced with LP Bug #1876188
|
||||||
|
# but it not currently required and may not be set in all
|
||||||
|
# cases, particularly in classic charms.
|
||||||
|
port = rdata.get('db_port')
|
||||||
|
if port:
|
||||||
|
ctxt['database_port'] = port
|
||||||
if CompareOpenStackReleases(rel) < 'queens':
|
if CompareOpenStackReleases(rel) < 'queens':
|
||||||
ctxt['database_type'] = 'mysql'
|
ctxt['database_type'] = 'mysql'
|
||||||
if self.context_complete(ctxt):
|
if self.context_complete(ctxt):
|
||||||
@ -2396,3 +2414,734 @@ class DHCPAgentContext(OSContextGenerator):
|
|||||||
return False
|
return False
|
||||||
else:
|
else:
|
||||||
return _config
|
return _config
|
||||||
|
|
||||||
|
|
||||||
|
EntityMac = collections.namedtuple('EntityMac', ['entity', 'mac'])
|
||||||
|
|
||||||
|
|
||||||
|
def resolve_pci_from_mapping_config(config_key):
|
||||||
|
"""Resolve local PCI devices from MAC addresses in mapping config.
|
||||||
|
|
||||||
|
Note that this function keeps record of mac->PCI address lookups
|
||||||
|
in the local unit db as the devices will disappaear from the system
|
||||||
|
once bound.
|
||||||
|
|
||||||
|
:param config_key: Configuration option key to parse data from
|
||||||
|
:type config_key: str
|
||||||
|
:returns: PCI device address to Tuple(entity, mac) map
|
||||||
|
:rtype: collections.OrderedDict[str,Tuple[str,str]]
|
||||||
|
"""
|
||||||
|
devices = pci.PCINetDevices()
|
||||||
|
resolved_devices = collections.OrderedDict()
|
||||||
|
db = kv()
|
||||||
|
# Note that ``parse_data_port_mappings`` returns Dict regardless of input
|
||||||
|
for mac, entity in parse_data_port_mappings(config(config_key)).items():
|
||||||
|
pcidev = devices.get_device_from_mac(mac)
|
||||||
|
if pcidev:
|
||||||
|
# NOTE: store mac->pci allocation as post binding
|
||||||
|
# it disappears from PCIDevices.
|
||||||
|
db.set(mac, pcidev.pci_address)
|
||||||
|
db.flush()
|
||||||
|
|
||||||
|
pci_address = db.get(mac)
|
||||||
|
if pci_address:
|
||||||
|
resolved_devices[pci_address] = EntityMac(entity, mac)
|
||||||
|
|
||||||
|
return resolved_devices
|
||||||
|
|
||||||
|
|
||||||
|
class DPDKDeviceContext(OSContextGenerator):
|
||||||
|
|
||||||
|
def __init__(self, driver_key=None, bridges_key=None, bonds_key=None):
|
||||||
|
"""Initialize DPDKDeviceContext.
|
||||||
|
|
||||||
|
:param driver_key: Key to use when retrieving driver config.
|
||||||
|
:type driver_key: str
|
||||||
|
:param bridges_key: Key to use when retrieving bridge config.
|
||||||
|
:type bridges_key: str
|
||||||
|
:param bonds_key: Key to use when retrieving bonds config.
|
||||||
|
:type bonds_key: str
|
||||||
|
"""
|
||||||
|
self.driver_key = driver_key or 'dpdk-driver'
|
||||||
|
self.bridges_key = bridges_key or 'data-port'
|
||||||
|
self.bonds_key = bonds_key or 'dpdk-bond-mappings'
|
||||||
|
|
||||||
|
def __call__(self):
|
||||||
|
"""Populate context.
|
||||||
|
|
||||||
|
:returns: context
|
||||||
|
:rtype: Dict[str,Union[str,collections.OrderedDict[str,str]]]
|
||||||
|
"""
|
||||||
|
driver = config(self.driver_key)
|
||||||
|
if driver is None:
|
||||||
|
return {}
|
||||||
|
# Resolve PCI devices for both directly used devices (_bridges)
|
||||||
|
# and devices for use in dpdk bonds (_bonds)
|
||||||
|
pci_devices = resolve_pci_from_mapping_config(self.bridges_key)
|
||||||
|
pci_devices.update(resolve_pci_from_mapping_config(self.bonds_key))
|
||||||
|
return {'devices': pci_devices,
|
||||||
|
'driver': driver}
|
||||||
|
|
||||||
|
|
||||||
|
class OVSDPDKDeviceContext(OSContextGenerator):
|
||||||
|
|
||||||
|
def __init__(self, bridges_key=None, bonds_key=None):
|
||||||
|
"""Initialize OVSDPDKDeviceContext.
|
||||||
|
|
||||||
|
:param bridges_key: Key to use when retrieving bridge config.
|
||||||
|
:type bridges_key: str
|
||||||
|
:param bonds_key: Key to use when retrieving bonds config.
|
||||||
|
:type bonds_key: str
|
||||||
|
"""
|
||||||
|
self.bridges_key = bridges_key or 'data-port'
|
||||||
|
self.bonds_key = bonds_key or 'dpdk-bond-mappings'
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _parse_cpu_list(cpulist):
|
||||||
|
"""Parses a linux cpulist for a numa node
|
||||||
|
|
||||||
|
:returns: list of cores
|
||||||
|
:rtype: List[int]
|
||||||
|
"""
|
||||||
|
cores = []
|
||||||
|
ranges = cpulist.split(',')
|
||||||
|
for cpu_range in ranges:
|
||||||
|
if "-" in cpu_range:
|
||||||
|
cpu_min_max = cpu_range.split('-')
|
||||||
|
cores += range(int(cpu_min_max[0]),
|
||||||
|
int(cpu_min_max[1]) + 1)
|
||||||
|
else:
|
||||||
|
cores.append(int(cpu_range))
|
||||||
|
return cores
|
||||||
|
|
||||||
|
def _numa_node_cores(self):
|
||||||
|
"""Get map of numa node -> cpu core
|
||||||
|
|
||||||
|
:returns: map of numa node -> cpu core
|
||||||
|
:rtype: Dict[str,List[int]]
|
||||||
|
"""
|
||||||
|
nodes = {}
|
||||||
|
node_regex = '/sys/devices/system/node/node*'
|
||||||
|
for node in glob.glob(node_regex):
|
||||||
|
index = node.lstrip('/sys/devices/system/node/node')
|
||||||
|
with open(os.path.join(node, 'cpulist')) as cpulist:
|
||||||
|
nodes[index] = self._parse_cpu_list(cpulist.read().strip())
|
||||||
|
return nodes
|
||||||
|
|
||||||
|
def cpu_mask(self):
|
||||||
|
"""Get hex formatted CPU mask
|
||||||
|
|
||||||
|
The mask is based on using the first config:dpdk-socket-cores
|
||||||
|
cores of each NUMA node in the unit.
|
||||||
|
:returns: hex formatted CPU mask
|
||||||
|
:rtype: str
|
||||||
|
"""
|
||||||
|
num_cores = config('dpdk-socket-cores')
|
||||||
|
mask = 0
|
||||||
|
for cores in self._numa_node_cores().values():
|
||||||
|
for core in cores[:num_cores]:
|
||||||
|
mask = mask | 1 << core
|
||||||
|
return format(mask, '#04x')
|
||||||
|
|
||||||
|
def socket_memory(self):
|
||||||
|
"""Formatted list of socket memory configuration per NUMA node
|
||||||
|
|
||||||
|
:returns: socket memory configuration per NUMA node
|
||||||
|
:rtype: str
|
||||||
|
"""
|
||||||
|
sm_size = config('dpdk-socket-memory')
|
||||||
|
node_regex = '/sys/devices/system/node/node*'
|
||||||
|
mem_list = [str(sm_size) for _ in glob.glob(node_regex)]
|
||||||
|
if mem_list:
|
||||||
|
return ','.join(mem_list)
|
||||||
|
else:
|
||||||
|
return str(sm_size)
|
||||||
|
|
||||||
|
def devices(self):
|
||||||
|
"""List of PCI devices for use by DPDK
|
||||||
|
|
||||||
|
:returns: List of PCI devices for use by DPDK
|
||||||
|
:rtype: collections.OrderedDict[str,str]
|
||||||
|
"""
|
||||||
|
pci_devices = resolve_pci_from_mapping_config(self.bridges_key)
|
||||||
|
pci_devices.update(resolve_pci_from_mapping_config(self.bonds_key))
|
||||||
|
return pci_devices
|
||||||
|
|
||||||
|
def _formatted_whitelist(self, flag):
|
||||||
|
"""Flag formatted list of devices to whitelist
|
||||||
|
|
||||||
|
:param flag: flag format to use
|
||||||
|
:type flag: str
|
||||||
|
:rtype: str
|
||||||
|
"""
|
||||||
|
whitelist = []
|
||||||
|
for device in self.devices():
|
||||||
|
whitelist.append(flag.format(device=device))
|
||||||
|
return ' '.join(whitelist)
|
||||||
|
|
||||||
|
def device_whitelist(self):
|
||||||
|
"""Formatted list of devices to whitelist for dpdk
|
||||||
|
|
||||||
|
using the old style '-w' flag
|
||||||
|
|
||||||
|
:returns: devices to whitelist prefixed by '-w '
|
||||||
|
:rtype: str
|
||||||
|
"""
|
||||||
|
return self._formatted_whitelist('-w {device}')
|
||||||
|
|
||||||
|
def pci_whitelist(self):
|
||||||
|
"""Formatted list of devices to whitelist for dpdk
|
||||||
|
|
||||||
|
using the new style '--pci-whitelist' flag
|
||||||
|
|
||||||
|
:returns: devices to whitelist prefixed by '--pci-whitelist '
|
||||||
|
:rtype: str
|
||||||
|
"""
|
||||||
|
return self._formatted_whitelist('--pci-whitelist {device}')
|
||||||
|
|
||||||
|
def __call__(self):
|
||||||
|
"""Populate context.
|
||||||
|
|
||||||
|
:returns: context
|
||||||
|
:rtype: Dict[str,Union[bool,str]]
|
||||||
|
"""
|
||||||
|
ctxt = {}
|
||||||
|
whitelist = self.device_whitelist()
|
||||||
|
if whitelist:
|
||||||
|
ctxt['dpdk_enabled'] = config('enable-dpdk')
|
||||||
|
ctxt['device_whitelist'] = self.device_whitelist()
|
||||||
|
ctxt['socket_memory'] = self.socket_memory()
|
||||||
|
ctxt['cpu_mask'] = self.cpu_mask()
|
||||||
|
return ctxt
|
||||||
|
|
||||||
|
|
||||||
|
class BridgePortInterfaceMap(object):
|
||||||
|
"""Build a map of bridge ports and interaces from charm configuration.
|
||||||
|
|
||||||
|
NOTE: the handling of this detail in the charm is pre-deprecated.
|
||||||
|
|
||||||
|
The long term goal is for network connectivity detail to be modelled in
|
||||||
|
the server provisioning layer (such as MAAS) which in turn will provide
|
||||||
|
a Netplan YAML description that will be used to drive Open vSwitch.
|
||||||
|
|
||||||
|
Until we get to that reality the charm will need to configure this
|
||||||
|
detail based on application level configuration options.
|
||||||
|
|
||||||
|
There is a established way of mapping interfaces to ports and bridges
|
||||||
|
in the ``neutron-openvswitch`` and ``neutron-gateway`` charms and we
|
||||||
|
will carry that forward.
|
||||||
|
|
||||||
|
The relationship between bridge, port and interface(s).
|
||||||
|
+--------+
|
||||||
|
| bridge |
|
||||||
|
+--------+
|
||||||
|
|
|
||||||
|
+----------------+
|
||||||
|
| port aka. bond |
|
||||||
|
+----------------+
|
||||||
|
| |
|
||||||
|
+-+ +-+
|
||||||
|
|i| |i|
|
||||||
|
|n| |n|
|
||||||
|
|t| |t|
|
||||||
|
|0| |N|
|
||||||
|
+-+ +-+
|
||||||
|
"""
|
||||||
|
class interface_type(enum.Enum):
|
||||||
|
"""Supported interface types.
|
||||||
|
|
||||||
|
Supported interface types can be found in the ``iface_types`` column
|
||||||
|
in the ``Open_vSwitch`` table on a running system.
|
||||||
|
"""
|
||||||
|
dpdk = 'dpdk'
|
||||||
|
internal = 'internal'
|
||||||
|
system = 'system'
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
"""Return string representation of value.
|
||||||
|
|
||||||
|
:returns: string representation of value.
|
||||||
|
:rtype: str
|
||||||
|
"""
|
||||||
|
return self.value
|
||||||
|
|
||||||
|
def __init__(self, bridges_key=None, bonds_key=None, enable_dpdk_key=None,
|
||||||
|
global_mtu=None):
|
||||||
|
"""Initialize map.
|
||||||
|
|
||||||
|
:param bridges_key: Name of bridge:interface/port map config key
|
||||||
|
(default: 'data-port')
|
||||||
|
:type bridges_key: Optional[str]
|
||||||
|
:param bonds_key: Name of port-name:interface map config key
|
||||||
|
(default: 'dpdk-bond-mappings')
|
||||||
|
:type bonds_key: Optional[str]
|
||||||
|
:param enable_dpdk_key: Name of DPDK toggle config key
|
||||||
|
(default: 'enable-dpdk')
|
||||||
|
:type enable_dpdk_key: Optional[str]
|
||||||
|
:param global_mtu: Set a MTU on all interfaces at map initialization.
|
||||||
|
|
||||||
|
The default is to have Open vSwitch get this from the underlying
|
||||||
|
interface as set up by bare metal provisioning.
|
||||||
|
|
||||||
|
Note that you can augment the MTU on an individual interface basis
|
||||||
|
like this:
|
||||||
|
|
||||||
|
ifdatamap = bpi.get_ifdatamap(bridge, port)
|
||||||
|
ifdatamap = {
|
||||||
|
port: {
|
||||||
|
**ifdata,
|
||||||
|
**{'mtu-request': my_individual_mtu_map[port]},
|
||||||
|
}
|
||||||
|
for port, ifdata in ifdatamap.items()
|
||||||
|
}
|
||||||
|
:type global_mtu: Optional[int]
|
||||||
|
"""
|
||||||
|
bridges_key = bridges_key or 'data-port'
|
||||||
|
bonds_key = bonds_key or 'dpdk-bond-mappings'
|
||||||
|
enable_dpdk_key = enable_dpdk_key or 'enable-dpdk'
|
||||||
|
self._map = collections.defaultdict(
|
||||||
|
lambda: collections.defaultdict(dict))
|
||||||
|
self._ifname_mac_map = collections.defaultdict(list)
|
||||||
|
self._mac_ifname_map = {}
|
||||||
|
self._mac_pci_address_map = {}
|
||||||
|
|
||||||
|
# First we iterate over the list of physical interfaces visible to the
|
||||||
|
# system and update interface name to mac and mac to interface name map
|
||||||
|
for ifname in list_nics():
|
||||||
|
if not is_phy_iface(ifname):
|
||||||
|
continue
|
||||||
|
mac = get_nic_hwaddr(ifname)
|
||||||
|
self._ifname_mac_map[ifname] = [mac]
|
||||||
|
self._mac_ifname_map[mac] = ifname
|
||||||
|
|
||||||
|
# In light of the pre-deprecation notice in the docstring of this
|
||||||
|
# class we will expose the ability to configure OVS bonds as a
|
||||||
|
# DPDK-only feature, but generally use the data structures internally.
|
||||||
|
if config(enable_dpdk_key):
|
||||||
|
# resolve PCI address of interfaces listed in the bridges and bonds
|
||||||
|
# charm configuration options. Note that for already bound
|
||||||
|
# interfaces the helper will retrieve MAC address from the unit
|
||||||
|
# KV store as the information is no longer available in sysfs.
|
||||||
|
_pci_bridge_mac = resolve_pci_from_mapping_config(
|
||||||
|
bridges_key)
|
||||||
|
_pci_bond_mac = resolve_pci_from_mapping_config(
|
||||||
|
bonds_key)
|
||||||
|
|
||||||
|
for pci_address, bridge_mac in _pci_bridge_mac.items():
|
||||||
|
if bridge_mac.mac in self._mac_ifname_map:
|
||||||
|
# if we already have the interface name in our map it is
|
||||||
|
# visible to the system and therefore not bound to DPDK
|
||||||
|
continue
|
||||||
|
ifname = 'dpdk-{}'.format(
|
||||||
|
hashlib.sha1(
|
||||||
|
pci_address.encode('UTF-8')).hexdigest()[:7])
|
||||||
|
self._ifname_mac_map[ifname] = [bridge_mac.mac]
|
||||||
|
self._mac_ifname_map[bridge_mac.mac] = ifname
|
||||||
|
self._mac_pci_address_map[bridge_mac.mac] = pci_address
|
||||||
|
|
||||||
|
for pci_address, bond_mac in _pci_bond_mac.items():
|
||||||
|
# for bonds we want to be able to get a list of macs from
|
||||||
|
# the bond name and also get at the interface name made up
|
||||||
|
# of the hash of the PCI address
|
||||||
|
ifname = 'dpdk-{}'.format(
|
||||||
|
hashlib.sha1(
|
||||||
|
pci_address.encode('UTF-8')).hexdigest()[:7])
|
||||||
|
self._ifname_mac_map[bond_mac.entity].append(bond_mac.mac)
|
||||||
|
self._mac_ifname_map[bond_mac.mac] = ifname
|
||||||
|
self._mac_pci_address_map[bond_mac.mac] = pci_address
|
||||||
|
|
||||||
|
config_bridges = config(bridges_key) or ''
|
||||||
|
for bridge, ifname_or_mac in (
|
||||||
|
pair.split(':', 1)
|
||||||
|
for pair in config_bridges.split()):
|
||||||
|
if ':' in ifname_or_mac:
|
||||||
|
try:
|
||||||
|
ifname = self.ifname_from_mac(ifname_or_mac)
|
||||||
|
except KeyError:
|
||||||
|
# The interface is destined for a different unit in the
|
||||||
|
# deployment.
|
||||||
|
continue
|
||||||
|
macs = [ifname_or_mac]
|
||||||
|
else:
|
||||||
|
ifname = ifname_or_mac
|
||||||
|
macs = self.macs_from_ifname(ifname_or_mac)
|
||||||
|
|
||||||
|
portname = ifname
|
||||||
|
for mac in macs:
|
||||||
|
try:
|
||||||
|
pci_address = self.pci_address_from_mac(mac)
|
||||||
|
iftype = self.interface_type.dpdk
|
||||||
|
ifname = self.ifname_from_mac(mac)
|
||||||
|
except KeyError:
|
||||||
|
pci_address = None
|
||||||
|
iftype = self.interface_type.system
|
||||||
|
|
||||||
|
self.add_interface(
|
||||||
|
bridge, portname, ifname, iftype, pci_address, global_mtu)
|
||||||
|
|
||||||
|
def __getitem__(self, key):
|
||||||
|
"""Provide a Dict-like interface, get value of item.
|
||||||
|
|
||||||
|
:param key: Key to look up value from.
|
||||||
|
:type key: any
|
||||||
|
:returns: Value
|
||||||
|
:rtype: any
|
||||||
|
"""
|
||||||
|
return self._map.__getitem__(key)
|
||||||
|
|
||||||
|
def __iter__(self):
|
||||||
|
"""Provide a Dict-like interface, iterate over keys.
|
||||||
|
|
||||||
|
:returns: Iterator
|
||||||
|
:rtype: Iterator[any]
|
||||||
|
"""
|
||||||
|
return self._map.__iter__()
|
||||||
|
|
||||||
|
def __len__(self):
|
||||||
|
"""Provide a Dict-like interface, measure the length of internal map.
|
||||||
|
|
||||||
|
:returns: Length
|
||||||
|
:rtype: int
|
||||||
|
"""
|
||||||
|
return len(self._map)
|
||||||
|
|
||||||
|
def items(self):
|
||||||
|
"""Provide a Dict-like interface, iterate over items.
|
||||||
|
|
||||||
|
:returns: Key Value pairs
|
||||||
|
:rtype: Iterator[any, any]
|
||||||
|
"""
|
||||||
|
return self._map.items()
|
||||||
|
|
||||||
|
def keys(self):
|
||||||
|
"""Provide a Dict-like interface, iterate over keys.
|
||||||
|
|
||||||
|
:returns: Iterator
|
||||||
|
:rtype: Iterator[any]
|
||||||
|
"""
|
||||||
|
return self._map.keys()
|
||||||
|
|
||||||
|
def ifname_from_mac(self, mac):
|
||||||
|
"""
|
||||||
|
:returns: Name of interface
|
||||||
|
:rtype: str
|
||||||
|
:raises: KeyError
|
||||||
|
"""
|
||||||
|
return (get_bond_master(self._mac_ifname_map[mac]) or
|
||||||
|
self._mac_ifname_map[mac])
|
||||||
|
|
||||||
|
def macs_from_ifname(self, ifname):
|
||||||
|
"""
|
||||||
|
:returns: List of hardware address (MAC) of interface
|
||||||
|
:rtype: List[str]
|
||||||
|
:raises: KeyError
|
||||||
|
"""
|
||||||
|
return self._ifname_mac_map[ifname]
|
||||||
|
|
||||||
|
def pci_address_from_mac(self, mac):
|
||||||
|
"""
|
||||||
|
:param mac: Hardware address (MAC) of interface
|
||||||
|
:type mac: str
|
||||||
|
:returns: PCI address of device associated with mac
|
||||||
|
:rtype: str
|
||||||
|
:raises: KeyError
|
||||||
|
"""
|
||||||
|
return self._mac_pci_address_map[mac]
|
||||||
|
|
||||||
|
def add_interface(self, bridge, port, ifname, iftype,
|
||||||
|
pci_address, mtu_request):
|
||||||
|
"""Add an interface to the map.
|
||||||
|
|
||||||
|
:param bridge: Name of bridge on which the bond will be added
|
||||||
|
:type bridge: str
|
||||||
|
:param port: Name of port which will represent the bond on bridge
|
||||||
|
:type port: str
|
||||||
|
:param ifname: Name of interface that will make up the bonded port
|
||||||
|
:type ifname: str
|
||||||
|
:param iftype: Type of interface
|
||||||
|
:type iftype: BridgeBondMap.interface_type
|
||||||
|
:param pci_address: PCI address of interface
|
||||||
|
:type pci_address: Optional[str]
|
||||||
|
:param mtu_request: MTU to request for interface
|
||||||
|
:type mtu_request: Optional[int]
|
||||||
|
"""
|
||||||
|
self._map[bridge][port][ifname] = {
|
||||||
|
'type': str(iftype),
|
||||||
|
}
|
||||||
|
if pci_address:
|
||||||
|
self._map[bridge][port][ifname].update({
|
||||||
|
'pci-address': pci_address,
|
||||||
|
})
|
||||||
|
if mtu_request is not None:
|
||||||
|
self._map[bridge][port][ifname].update({
|
||||||
|
'mtu-request': str(mtu_request)
|
||||||
|
})
|
||||||
|
|
||||||
|
def get_ifdatamap(self, bridge, port):
|
||||||
|
"""Get structure suitable for charmhelpers.contrib.network.ovs helpers.
|
||||||
|
|
||||||
|
:param bridge: Name of bridge on which the port will be added
|
||||||
|
:type bridge: str
|
||||||
|
:param port: Name of port which will represent one or more interfaces
|
||||||
|
:type port: str
|
||||||
|
"""
|
||||||
|
for _bridge, _ports in self.items():
|
||||||
|
for _port, _interfaces in _ports.items():
|
||||||
|
if _bridge == bridge and _port == port:
|
||||||
|
ifdatamap = {}
|
||||||
|
for name, data in _interfaces.items():
|
||||||
|
ifdatamap.update({
|
||||||
|
name: {
|
||||||
|
'type': data['type'],
|
||||||
|
},
|
||||||
|
})
|
||||||
|
if data.get('mtu-request') is not None:
|
||||||
|
ifdatamap[name].update({
|
||||||
|
'mtu_request': data['mtu-request'],
|
||||||
|
})
|
||||||
|
if data.get('pci-address'):
|
||||||
|
ifdatamap[name].update({
|
||||||
|
'options': {
|
||||||
|
'dpdk-devargs': data['pci-address'],
|
||||||
|
},
|
||||||
|
})
|
||||||
|
return ifdatamap
|
||||||
|
|
||||||
|
|
||||||
|
class BondConfig(object):
|
||||||
|
"""Container and helpers for bond configuration options.
|
||||||
|
|
||||||
|
Data is put into a dictionary and a convenient config get interface is
|
||||||
|
provided.
|
||||||
|
"""
|
||||||
|
|
||||||
|
DEFAULT_LACP_CONFIG = {
|
||||||
|
'mode': 'balance-tcp',
|
||||||
|
'lacp': 'active',
|
||||||
|
'lacp-time': 'fast'
|
||||||
|
}
|
||||||
|
ALL_BONDS = 'ALL_BONDS'
|
||||||
|
|
||||||
|
BOND_MODES = ['active-backup', 'balance-slb', 'balance-tcp']
|
||||||
|
BOND_LACP = ['active', 'passive', 'off']
|
||||||
|
BOND_LACP_TIME = ['fast', 'slow']
|
||||||
|
|
||||||
|
def __init__(self, config_key=None):
|
||||||
|
"""Parse specified configuration option.
|
||||||
|
|
||||||
|
:param config_key: Configuration key to retrieve data from
|
||||||
|
(default: ``dpdk-bond-config``)
|
||||||
|
:type config_key: Optional[str]
|
||||||
|
"""
|
||||||
|
self.config_key = config_key or 'dpdk-bond-config'
|
||||||
|
|
||||||
|
self.lacp_config = {
|
||||||
|
self.ALL_BONDS: copy.deepcopy(self.DEFAULT_LACP_CONFIG)
|
||||||
|
}
|
||||||
|
|
||||||
|
lacp_config = config(self.config_key)
|
||||||
|
if lacp_config:
|
||||||
|
lacp_config_map = lacp_config.split()
|
||||||
|
for entry in lacp_config_map:
|
||||||
|
bond, entry = entry.partition(':')[0:3:2]
|
||||||
|
if not bond:
|
||||||
|
bond = self.ALL_BONDS
|
||||||
|
|
||||||
|
mode, entry = entry.partition(':')[0:3:2]
|
||||||
|
if not mode:
|
||||||
|
mode = self.DEFAULT_LACP_CONFIG['mode']
|
||||||
|
assert mode in self.BOND_MODES, \
|
||||||
|
"Bond mode {} is invalid".format(mode)
|
||||||
|
|
||||||
|
lacp, entry = entry.partition(':')[0:3:2]
|
||||||
|
if not lacp:
|
||||||
|
lacp = self.DEFAULT_LACP_CONFIG['lacp']
|
||||||
|
assert lacp in self.BOND_LACP, \
|
||||||
|
"Bond lacp {} is invalid".format(lacp)
|
||||||
|
|
||||||
|
lacp_time, entry = entry.partition(':')[0:3:2]
|
||||||
|
if not lacp_time:
|
||||||
|
lacp_time = self.DEFAULT_LACP_CONFIG['lacp-time']
|
||||||
|
assert lacp_time in self.BOND_LACP_TIME, \
|
||||||
|
"Bond lacp-time {} is invalid".format(lacp_time)
|
||||||
|
|
||||||
|
self.lacp_config[bond] = {
|
||||||
|
'mode': mode,
|
||||||
|
'lacp': lacp,
|
||||||
|
'lacp-time': lacp_time
|
||||||
|
}
|
||||||
|
|
||||||
|
def get_bond_config(self, bond):
|
||||||
|
"""Get the LACP configuration for a bond
|
||||||
|
|
||||||
|
:param bond: the bond name
|
||||||
|
:return: a dictionary with the configuration of the bond
|
||||||
|
:rtype: Dict[str,Dict[str,str]]
|
||||||
|
"""
|
||||||
|
return self.lacp_config.get(bond, self.lacp_config[self.ALL_BONDS])
|
||||||
|
|
||||||
|
def get_ovs_portdata(self, bond):
|
||||||
|
"""Get structure suitable for charmhelpers.contrib.network.ovs helpers.
|
||||||
|
|
||||||
|
:param bond: the bond name
|
||||||
|
:return: a dictionary with the configuration of the bond
|
||||||
|
:rtype: Dict[str,Union[str,Dict[str,str]]]
|
||||||
|
"""
|
||||||
|
bond_config = self.get_bond_config(bond)
|
||||||
|
return {
|
||||||
|
'bond_mode': bond_config['mode'],
|
||||||
|
'lacp': bond_config['lacp'],
|
||||||
|
'other_config': {
|
||||||
|
'lacp-time': bond_config['lacp-time'],
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
class SRIOVContext(OSContextGenerator):
|
||||||
|
"""Provide context for configuring SR-IOV devices."""
|
||||||
|
|
||||||
|
class sriov_config_mode(enum.Enum):
|
||||||
|
"""Mode in which SR-IOV is configured.
|
||||||
|
|
||||||
|
The configuration option identified by the ``numvfs_key`` parameter
|
||||||
|
is overloaded and defines in which mode the charm should interpret
|
||||||
|
the other SR-IOV-related configuration options.
|
||||||
|
"""
|
||||||
|
auto = 'auto'
|
||||||
|
blanket = 'blanket'
|
||||||
|
explicit = 'explicit'
|
||||||
|
|
||||||
|
def _determine_numvfs(self, device, sriov_numvfs):
|
||||||
|
"""Determine number of Virtual Functions (VFs) configured for device.
|
||||||
|
|
||||||
|
:param device: Object describing a PCI Network interface card (NIC)/
|
||||||
|
:type device: sriov_netplan_shim.pci.PCINetDevice
|
||||||
|
:param sriov_numvfs: Number of VFs requested for blanket configuration.
|
||||||
|
:type sriov_numvfs: int
|
||||||
|
:returns: Number of VFs to configure for device
|
||||||
|
:rtype: Optional[int]
|
||||||
|
"""
|
||||||
|
|
||||||
|
def _get_capped_numvfs(requested):
|
||||||
|
"""Get a number of VFs that does not exceed individual card limits.
|
||||||
|
|
||||||
|
Depending and make and model of NIC the number of VFs supported
|
||||||
|
vary. Requesting more VFs than a card support would be a fatal
|
||||||
|
error, cap the requested number at the total number of VFs each
|
||||||
|
individual card supports.
|
||||||
|
|
||||||
|
:param requested: Number of VFs requested
|
||||||
|
:type requested: int
|
||||||
|
:returns: Number of VFs allowed
|
||||||
|
:rtype: int
|
||||||
|
"""
|
||||||
|
actual = min(int(requested), int(device.sriov_totalvfs))
|
||||||
|
if actual < int(requested):
|
||||||
|
log('Requested VFs ({}) too high for device {}. Falling back '
|
||||||
|
'to value supprted by device: {}'
|
||||||
|
.format(requested, device.interface_name,
|
||||||
|
device.sriov_totalvfs),
|
||||||
|
level=WARNING)
|
||||||
|
return actual
|
||||||
|
|
||||||
|
if self._sriov_config_mode == self.sriov_config_mode.auto:
|
||||||
|
# auto-mode
|
||||||
|
#
|
||||||
|
# If device mapping configuration is present, return information
|
||||||
|
# on cards with mapping.
|
||||||
|
#
|
||||||
|
# If no device mapping configuration is present, return information
|
||||||
|
# for all cards.
|
||||||
|
#
|
||||||
|
# The maximum number of VFs supported by card will be used.
|
||||||
|
if (self._sriov_mapped_devices and
|
||||||
|
device.interface_name not in self._sriov_mapped_devices):
|
||||||
|
log('SR-IOV configured in auto mode: No device mapping for {}'
|
||||||
|
.format(device.interface_name),
|
||||||
|
level=DEBUG)
|
||||||
|
return
|
||||||
|
return _get_capped_numvfs(device.sriov_totalvfs)
|
||||||
|
elif self._sriov_config_mode == self.sriov_config_mode.blanket:
|
||||||
|
# blanket-mode
|
||||||
|
#
|
||||||
|
# User has specified a number of VFs that should apply to all
|
||||||
|
# cards with support for VFs.
|
||||||
|
return _get_capped_numvfs(sriov_numvfs)
|
||||||
|
elif self._sriov_config_mode == self.sriov_config_mode.explicit:
|
||||||
|
# explicit-mode
|
||||||
|
#
|
||||||
|
# User has given a list of interface names and associated number of
|
||||||
|
# VFs
|
||||||
|
if device.interface_name not in self._sriov_config_devices:
|
||||||
|
log('SR-IOV configured in explicit mode: No device:numvfs '
|
||||||
|
'pair for device {}, skipping.'
|
||||||
|
.format(device.interface_name),
|
||||||
|
level=DEBUG)
|
||||||
|
return
|
||||||
|
return _get_capped_numvfs(
|
||||||
|
self._sriov_config_devices[device.interface_name])
|
||||||
|
else:
|
||||||
|
raise RuntimeError('This should not be reached')
|
||||||
|
|
||||||
|
def __init__(self, numvfs_key=None, device_mappings_key=None):
|
||||||
|
"""Initialize map from PCI devices and configuration options.
|
||||||
|
|
||||||
|
:param numvfs_key: Config key for numvfs (default: 'sriov-numvfs')
|
||||||
|
:type numvfs_key: Optional[str]
|
||||||
|
:param device_mappings_key: Config key for device mappings
|
||||||
|
(default: 'sriov-device-mappings')
|
||||||
|
:type device_mappings_key: Optional[str]
|
||||||
|
:raises: RuntimeError
|
||||||
|
"""
|
||||||
|
numvfs_key = numvfs_key or 'sriov-numvfs'
|
||||||
|
device_mappings_key = device_mappings_key or 'sriov-device-mappings'
|
||||||
|
|
||||||
|
devices = pci.PCINetDevices()
|
||||||
|
charm_config = config()
|
||||||
|
sriov_numvfs = charm_config.get(numvfs_key) or ''
|
||||||
|
sriov_device_mappings = charm_config.get(device_mappings_key) or ''
|
||||||
|
|
||||||
|
# create list of devices from sriov_device_mappings config option
|
||||||
|
self._sriov_mapped_devices = [
|
||||||
|
pair.split(':', 1)[1]
|
||||||
|
for pair in sriov_device_mappings.split()
|
||||||
|
]
|
||||||
|
|
||||||
|
# create map of device:numvfs from sriov_numvfs config option
|
||||||
|
self._sriov_config_devices = {
|
||||||
|
ifname: numvfs for ifname, numvfs in (
|
||||||
|
pair.split(':', 1) for pair in sriov_numvfs.split()
|
||||||
|
if ':' in sriov_numvfs)
|
||||||
|
}
|
||||||
|
|
||||||
|
# determine configuration mode from contents of sriov_numvfs
|
||||||
|
if sriov_numvfs == 'auto':
|
||||||
|
self._sriov_config_mode = self.sriov_config_mode.auto
|
||||||
|
elif sriov_numvfs.isdigit():
|
||||||
|
self._sriov_config_mode = self.sriov_config_mode.blanket
|
||||||
|
elif ':' in sriov_numvfs:
|
||||||
|
self._sriov_config_mode = self.sriov_config_mode.explicit
|
||||||
|
else:
|
||||||
|
raise RuntimeError('Unable to determine mode of SR-IOV '
|
||||||
|
'configuration.')
|
||||||
|
|
||||||
|
self._map = {
|
||||||
|
device.interface_name: self._determine_numvfs(device, sriov_numvfs)
|
||||||
|
for device in devices.pci_devices
|
||||||
|
if device.sriov and
|
||||||
|
self._determine_numvfs(device, sriov_numvfs) is not None
|
||||||
|
}
|
||||||
|
|
||||||
|
def __call__(self):
|
||||||
|
"""Provide SR-IOV context.
|
||||||
|
|
||||||
|
:returns: Map interface name: min(configured, max) virtual functions.
|
||||||
|
Example:
|
||||||
|
{
|
||||||
|
'eth0': 16,
|
||||||
|
'eth1': 32,
|
||||||
|
'eth2': 64,
|
||||||
|
}
|
||||||
|
:rtype: Dict[str,int]
|
||||||
|
"""
|
||||||
|
return self._map
|
||||||
|
@ -13,7 +13,7 @@
|
|||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
# Common python helper functions used for OpenStack charms.
|
# Common python helper functions used for OpenStack charms.
|
||||||
from collections import OrderedDict
|
from collections import OrderedDict, namedtuple
|
||||||
from functools import wraps
|
from functools import wraps
|
||||||
|
|
||||||
import subprocess
|
import subprocess
|
||||||
@ -36,15 +36,20 @@ from charmhelpers.contrib.network import ip
|
|||||||
from charmhelpers.core import unitdata
|
from charmhelpers.core import unitdata
|
||||||
|
|
||||||
from charmhelpers.core.hookenv import (
|
from charmhelpers.core.hookenv import (
|
||||||
|
WORKLOAD_STATES,
|
||||||
action_fail,
|
action_fail,
|
||||||
action_set,
|
action_set,
|
||||||
config,
|
config,
|
||||||
|
expected_peer_units,
|
||||||
|
expected_related_units,
|
||||||
log as juju_log,
|
log as juju_log,
|
||||||
charm_dir,
|
charm_dir,
|
||||||
INFO,
|
INFO,
|
||||||
ERROR,
|
ERROR,
|
||||||
|
metadata,
|
||||||
related_units,
|
related_units,
|
||||||
relation_get,
|
relation_get,
|
||||||
|
relation_id,
|
||||||
relation_ids,
|
relation_ids,
|
||||||
relation_set,
|
relation_set,
|
||||||
status_set,
|
status_set,
|
||||||
@ -53,6 +58,7 @@ from charmhelpers.core.hookenv import (
|
|||||||
cached,
|
cached,
|
||||||
leader_set,
|
leader_set,
|
||||||
leader_get,
|
leader_get,
|
||||||
|
local_unit,
|
||||||
)
|
)
|
||||||
|
|
||||||
from charmhelpers.core.strutils import (
|
from charmhelpers.core.strutils import (
|
||||||
@ -108,6 +114,10 @@ from charmhelpers.contrib.openstack.policyd import (
|
|||||||
POLICYD_CONFIG_NAME,
|
POLICYD_CONFIG_NAME,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
from charmhelpers.contrib.openstack.ha.utils import (
|
||||||
|
expect_ha,
|
||||||
|
)
|
||||||
|
|
||||||
CLOUD_ARCHIVE_URL = "http://ubuntu-cloud.archive.canonical.com/ubuntu"
|
CLOUD_ARCHIVE_URL = "http://ubuntu-cloud.archive.canonical.com/ubuntu"
|
||||||
CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA'
|
CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA'
|
||||||
|
|
||||||
@ -1810,6 +1820,16 @@ def os_application_version_set(package):
|
|||||||
application_version_set(application_version)
|
application_version_set(application_version)
|
||||||
|
|
||||||
|
|
||||||
|
def os_application_status_set(check_function):
|
||||||
|
"""Run the supplied function and set the application status accordingly.
|
||||||
|
|
||||||
|
:param check_function: Function to run to get app states and messages.
|
||||||
|
:type check_function: function
|
||||||
|
"""
|
||||||
|
state, message = check_function()
|
||||||
|
status_set(state, message, application=True)
|
||||||
|
|
||||||
|
|
||||||
def enable_memcache(source=None, release=None, package=None):
|
def enable_memcache(source=None, release=None, package=None):
|
||||||
"""Determine if memcache should be enabled on the local unit
|
"""Determine if memcache should be enabled on the local unit
|
||||||
|
|
||||||
@ -2046,3 +2066,287 @@ def is_db_maintenance_mode(relid=None):
|
|||||||
'WARN')
|
'WARN')
|
||||||
pass
|
pass
|
||||||
return True in notifications
|
return True in notifications
|
||||||
|
|
||||||
|
|
||||||
|
@cached
|
||||||
|
def container_scoped_relations():
|
||||||
|
"""Get all the container scoped relations
|
||||||
|
|
||||||
|
:returns: List of relation names
|
||||||
|
:rtype: List
|
||||||
|
"""
|
||||||
|
md = metadata()
|
||||||
|
relations = []
|
||||||
|
for relation_type in ('provides', 'requires', 'peers'):
|
||||||
|
for relation in md.get(relation_type, []):
|
||||||
|
if md[relation_type][relation].get('scope') == 'container':
|
||||||
|
relations.append(relation)
|
||||||
|
return relations
|
||||||
|
|
||||||
|
|
||||||
|
def is_db_ready(use_current_context=False, rel_name=None):
|
||||||
|
"""Check remote database is ready to be used.
|
||||||
|
|
||||||
|
Database relations are expected to provide a list of 'allowed' units to
|
||||||
|
confirm that the database is ready for use by those units.
|
||||||
|
|
||||||
|
If db relation has provided this information and local unit is a member,
|
||||||
|
returns True otherwise False.
|
||||||
|
|
||||||
|
:param use_current_context: Whether to limit checks to current hook
|
||||||
|
context.
|
||||||
|
:type use_current_context: bool
|
||||||
|
:param rel_name: Name of relation to check
|
||||||
|
:type rel_name: string
|
||||||
|
:returns: Whether remote db is ready.
|
||||||
|
:rtype: bool
|
||||||
|
:raises: Exception
|
||||||
|
"""
|
||||||
|
key = 'allowed_units'
|
||||||
|
|
||||||
|
rel_name = rel_name or 'shared-db'
|
||||||
|
this_unit = local_unit()
|
||||||
|
|
||||||
|
if use_current_context:
|
||||||
|
if relation_id() in relation_ids(rel_name):
|
||||||
|
rids_units = [(None, None)]
|
||||||
|
else:
|
||||||
|
raise Exception("use_current_context=True but not in {} "
|
||||||
|
"rel hook contexts (currently in {})."
|
||||||
|
.format(rel_name, relation_id()))
|
||||||
|
else:
|
||||||
|
rids_units = [(r_id, u)
|
||||||
|
for r_id in relation_ids(rel_name)
|
||||||
|
for u in related_units(r_id)]
|
||||||
|
|
||||||
|
for rid, unit in rids_units:
|
||||||
|
allowed_units = relation_get(rid=rid, unit=unit, attribute=key)
|
||||||
|
if allowed_units and this_unit in allowed_units.split():
|
||||||
|
juju_log("This unit ({}) is in allowed unit list from {}".format(
|
||||||
|
this_unit,
|
||||||
|
unit), 'DEBUG')
|
||||||
|
return True
|
||||||
|
|
||||||
|
juju_log("This unit was not found in any allowed unit list")
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def is_expected_scale(peer_relation_name='cluster'):
|
||||||
|
"""Query juju goal-state to determine whether our peer- and dependency-
|
||||||
|
relations are at the expected scale.
|
||||||
|
|
||||||
|
Useful for deferring per unit per relation housekeeping work until we are
|
||||||
|
ready to complete it successfully and without unnecessary repetiton.
|
||||||
|
|
||||||
|
Always returns True if version of juju used does not support goal-state.
|
||||||
|
|
||||||
|
:param peer_relation_name: Name of peer relation
|
||||||
|
:type rel_name: string
|
||||||
|
:returns: True or False
|
||||||
|
:rtype: bool
|
||||||
|
"""
|
||||||
|
def _get_relation_id(rel_type):
|
||||||
|
return next((rid for rid in relation_ids(reltype=rel_type)), None)
|
||||||
|
|
||||||
|
Relation = namedtuple('Relation', 'rel_type rel_id')
|
||||||
|
peer_rid = _get_relation_id(peer_relation_name)
|
||||||
|
# Units with no peers should still have a peer relation.
|
||||||
|
if not peer_rid:
|
||||||
|
juju_log('Not at expected scale, no peer relation found', 'DEBUG')
|
||||||
|
return False
|
||||||
|
expected_relations = [
|
||||||
|
Relation(rel_type='shared-db', rel_id=_get_relation_id('shared-db'))]
|
||||||
|
if expect_ha():
|
||||||
|
expected_relations.append(
|
||||||
|
Relation(
|
||||||
|
rel_type='ha',
|
||||||
|
rel_id=_get_relation_id('ha')))
|
||||||
|
juju_log(
|
||||||
|
'Checking scale of {} relations'.format(
|
||||||
|
','.join([r.rel_type for r in expected_relations])),
|
||||||
|
'DEBUG')
|
||||||
|
try:
|
||||||
|
if (len(related_units(relid=peer_rid)) <
|
||||||
|
len(list(expected_peer_units()))):
|
||||||
|
return False
|
||||||
|
for rel in expected_relations:
|
||||||
|
if not rel.rel_id:
|
||||||
|
juju_log(
|
||||||
|
'Expected to find {} relation, but it is missing'.format(
|
||||||
|
rel.rel_type),
|
||||||
|
'DEBUG')
|
||||||
|
return False
|
||||||
|
# Goal state returns every unit even for container scoped
|
||||||
|
# relations but the charm only ever has a relation with
|
||||||
|
# the local unit.
|
||||||
|
if rel.rel_type in container_scoped_relations():
|
||||||
|
expected_count = 1
|
||||||
|
else:
|
||||||
|
expected_count = len(
|
||||||
|
list(expected_related_units(reltype=rel.rel_type)))
|
||||||
|
if len(related_units(relid=rel.rel_id)) < expected_count:
|
||||||
|
juju_log(
|
||||||
|
('Not at expected scale, not enough units on {} '
|
||||||
|
'relation'.format(rel.rel_type)),
|
||||||
|
'DEBUG')
|
||||||
|
return False
|
||||||
|
except NotImplementedError:
|
||||||
|
return True
|
||||||
|
juju_log('All checks have passed, unit is at expected scale', 'DEBUG')
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
def get_peer_key(unit_name):
|
||||||
|
"""Get the peer key for this unit.
|
||||||
|
|
||||||
|
The peer key is the key a unit uses to publish its status down the peer
|
||||||
|
relation
|
||||||
|
|
||||||
|
:param unit_name: Name of unit
|
||||||
|
:type unit_name: string
|
||||||
|
:returns: Peer key for given unit
|
||||||
|
:rtype: string
|
||||||
|
"""
|
||||||
|
return 'unit-state-{}'.format(unit_name.replace('/', '-'))
|
||||||
|
|
||||||
|
|
||||||
|
UNIT_READY = 'READY'
|
||||||
|
UNIT_NOTREADY = 'NOTREADY'
|
||||||
|
UNIT_UNKNOWN = 'UNKNOWN'
|
||||||
|
UNIT_STATES = [UNIT_READY, UNIT_NOTREADY, UNIT_UNKNOWN]
|
||||||
|
|
||||||
|
|
||||||
|
def inform_peers_unit_state(state, relation_name='cluster'):
|
||||||
|
"""Inform peers of the state of this unit.
|
||||||
|
|
||||||
|
:param state: State of unit to publish
|
||||||
|
:type state: string
|
||||||
|
:param relation_name: Name of relation to publish state on
|
||||||
|
:type relation_name: string
|
||||||
|
"""
|
||||||
|
if state not in UNIT_STATES:
|
||||||
|
raise ValueError(
|
||||||
|
"Setting invalid state {} for unit".format(state))
|
||||||
|
for r_id in relation_ids(relation_name):
|
||||||
|
relation_set(relation_id=r_id,
|
||||||
|
relation_settings={
|
||||||
|
get_peer_key(local_unit()): state})
|
||||||
|
|
||||||
|
|
||||||
|
def get_peers_unit_state(relation_name='cluster'):
|
||||||
|
"""Get the state of all peers.
|
||||||
|
|
||||||
|
:param relation_name: Name of relation to check peers on.
|
||||||
|
:type relation_name: string
|
||||||
|
:returns: Unit states keyed on unit name.
|
||||||
|
:rtype: dict
|
||||||
|
:raises: ValueError
|
||||||
|
"""
|
||||||
|
r_ids = relation_ids(relation_name)
|
||||||
|
rids_units = [(r, u) for r in r_ids for u in related_units(r)]
|
||||||
|
unit_states = {}
|
||||||
|
for r_id, unit in rids_units:
|
||||||
|
settings = relation_get(unit=unit, rid=r_id)
|
||||||
|
unit_states[unit] = settings.get(get_peer_key(unit), UNIT_UNKNOWN)
|
||||||
|
if unit_states[unit] not in UNIT_STATES:
|
||||||
|
raise ValueError(
|
||||||
|
"Unit in unknown state {}".format(unit_states[unit]))
|
||||||
|
return unit_states
|
||||||
|
|
||||||
|
|
||||||
|
def are_peers_ready(relation_name='cluster'):
|
||||||
|
"""Check if all peers are ready.
|
||||||
|
|
||||||
|
:param relation_name: Name of relation to check peers on.
|
||||||
|
:type relation_name: string
|
||||||
|
:returns: Whether all units are ready.
|
||||||
|
:rtype: bool
|
||||||
|
"""
|
||||||
|
unit_states = get_peers_unit_state(relation_name)
|
||||||
|
return all(v == UNIT_READY for v in unit_states.values())
|
||||||
|
|
||||||
|
|
||||||
|
def inform_peers_if_ready(check_unit_ready_func, relation_name='cluster'):
|
||||||
|
"""Inform peers if this unit is ready.
|
||||||
|
|
||||||
|
The check function should return a tuple (state, message). A state
|
||||||
|
of 'READY' indicates the unit is READY.
|
||||||
|
|
||||||
|
:param check_unit_ready_func: Function to run to check readiness
|
||||||
|
:type check_unit_ready_func: function
|
||||||
|
:param relation_name: Name of relation to check peers on.
|
||||||
|
:type relation_name: string
|
||||||
|
"""
|
||||||
|
unit_ready, msg = check_unit_ready_func()
|
||||||
|
if unit_ready:
|
||||||
|
state = UNIT_READY
|
||||||
|
else:
|
||||||
|
state = UNIT_NOTREADY
|
||||||
|
juju_log('Telling peers this unit is: {}'.format(state), 'DEBUG')
|
||||||
|
inform_peers_unit_state(state, relation_name)
|
||||||
|
|
||||||
|
|
||||||
|
def check_api_unit_ready(check_db_ready=True):
|
||||||
|
"""Check if this unit is ready.
|
||||||
|
|
||||||
|
:param check_db_ready: Include checks of database readiness.
|
||||||
|
:type check_db_ready: bool
|
||||||
|
:returns: Whether unit state is ready and status message
|
||||||
|
:rtype: (bool, str)
|
||||||
|
"""
|
||||||
|
unit_state, msg = get_api_unit_status(check_db_ready=check_db_ready)
|
||||||
|
return unit_state == WORKLOAD_STATES.ACTIVE, msg
|
||||||
|
|
||||||
|
|
||||||
|
def get_api_unit_status(check_db_ready=True):
|
||||||
|
"""Return a workload status and message for this unit.
|
||||||
|
|
||||||
|
:param check_db_ready: Include checks of database readiness.
|
||||||
|
:type check_db_ready: bool
|
||||||
|
:returns: Workload state and message
|
||||||
|
:rtype: (bool, str)
|
||||||
|
"""
|
||||||
|
unit_state = WORKLOAD_STATES.ACTIVE
|
||||||
|
msg = 'Unit is ready'
|
||||||
|
if is_db_maintenance_mode():
|
||||||
|
unit_state = WORKLOAD_STATES.MAINTENANCE
|
||||||
|
msg = 'Database in maintenance mode.'
|
||||||
|
elif is_unit_paused_set():
|
||||||
|
unit_state = WORKLOAD_STATES.BLOCKED
|
||||||
|
msg = 'Unit paused.'
|
||||||
|
elif check_db_ready and not is_db_ready():
|
||||||
|
unit_state = WORKLOAD_STATES.WAITING
|
||||||
|
msg = 'Allowed_units list provided but this unit not present'
|
||||||
|
elif not is_db_initialised():
|
||||||
|
unit_state = WORKLOAD_STATES.WAITING
|
||||||
|
msg = 'Database not initialised'
|
||||||
|
elif not is_expected_scale():
|
||||||
|
unit_state = WORKLOAD_STATES.WAITING
|
||||||
|
msg = 'Charm and its dependencies not yet at expected scale'
|
||||||
|
juju_log(msg, 'DEBUG')
|
||||||
|
return unit_state, msg
|
||||||
|
|
||||||
|
|
||||||
|
def check_api_application_ready():
|
||||||
|
"""Check if this application is ready.
|
||||||
|
|
||||||
|
:returns: Whether application state is ready and status message
|
||||||
|
:rtype: (bool, str)
|
||||||
|
"""
|
||||||
|
app_state, msg = get_api_application_status()
|
||||||
|
return app_state == WORKLOAD_STATES.ACTIVE, msg
|
||||||
|
|
||||||
|
|
||||||
|
def get_api_application_status():
|
||||||
|
"""Return a workload status and message for this application.
|
||||||
|
|
||||||
|
:returns: Workload state and message
|
||||||
|
:rtype: (bool, str)
|
||||||
|
"""
|
||||||
|
app_state, msg = get_api_unit_status()
|
||||||
|
if app_state == WORKLOAD_STATES.ACTIVE:
|
||||||
|
if are_peers_ready():
|
||||||
|
return WORKLOAD_STATES.ACTIVE, 'Application Ready'
|
||||||
|
else:
|
||||||
|
return WORKLOAD_STATES.WAITING, 'Some units are not ready'
|
||||||
|
return app_state, msg
|
||||||
|
@ -140,9 +140,16 @@ def vault_relation_complete(backend=None):
|
|||||||
:ptype backend: string
|
:ptype backend: string
|
||||||
:returns: whether the relation to vault is complete
|
:returns: whether the relation to vault is complete
|
||||||
:rtype: bool"""
|
:rtype: bool"""
|
||||||
vault_kv = VaultKVContext(secret_backend=backend or VAULTLOCKER_BACKEND)
|
try:
|
||||||
vault_kv()
|
import hvac
|
||||||
return vault_kv.complete
|
except ImportError:
|
||||||
|
return False
|
||||||
|
try:
|
||||||
|
vault_kv = VaultKVContext(secret_backend=backend or VAULTLOCKER_BACKEND)
|
||||||
|
vault_kv()
|
||||||
|
return vault_kv.complete
|
||||||
|
except hvac.exceptions.InvalidRequest:
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
# TODO: contrib a high level unwrap method to hvac that works
|
# TODO: contrib a high level unwrap method to hvac that works
|
||||||
@ -156,7 +163,16 @@ def retrieve_secret_id(url, token):
|
|||||||
:returns: secret_id to use for Vault Access
|
:returns: secret_id to use for Vault Access
|
||||||
:rtype: str"""
|
:rtype: str"""
|
||||||
import hvac
|
import hvac
|
||||||
client = hvac.Client(url=url, token=token)
|
try:
|
||||||
|
# hvac 0.10.1 changed default adapter to JSONAdapter
|
||||||
|
client = hvac.Client(url=url, token=token, adapter=hvac.adapters.Request)
|
||||||
|
except AttributeError:
|
||||||
|
# hvac < 0.6.2 doesn't have adapter but uses the same response interface
|
||||||
|
client = hvac.Client(url=url, token=token)
|
||||||
|
else:
|
||||||
|
# hvac < 0.9.2 assumes adapter is an instance, so doesn't instantiate
|
||||||
|
if not isinstance(client.adapter, hvac.adapters.Request):
|
||||||
|
client.adapter = hvac.adapters.Request(base_uri=url, token=token)
|
||||||
response = client._post('/v1/sys/wrapping/unwrap')
|
response = client._post('/v1/sys/wrapping/unwrap')
|
||||||
if response.status_code == 200:
|
if response.status_code == 200:
|
||||||
data = response.json()
|
data = response.json()
|
||||||
|
@ -22,6 +22,7 @@
|
|||||||
# Adam Gandelman <adamg@ubuntu.com>
|
# Adam Gandelman <adamg@ubuntu.com>
|
||||||
#
|
#
|
||||||
|
|
||||||
|
import collections
|
||||||
import errno
|
import errno
|
||||||
import hashlib
|
import hashlib
|
||||||
import math
|
import math
|
||||||
@ -93,6 +94,88 @@ LEGACY_PG_COUNT = 200
|
|||||||
DEFAULT_MINIMUM_PGS = 2
|
DEFAULT_MINIMUM_PGS = 2
|
||||||
|
|
||||||
|
|
||||||
|
class OsdPostUpgradeError(Exception):
|
||||||
|
"""Error class for OSD post-upgrade operations."""
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class OSDSettingConflict(Exception):
|
||||||
|
"""Error class for conflicting osd setting requests."""
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class OSDSettingNotAllowed(Exception):
|
||||||
|
"""Error class for a disallowed setting."""
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
OSD_SETTING_EXCEPTIONS = (OSDSettingConflict, OSDSettingNotAllowed)
|
||||||
|
|
||||||
|
OSD_SETTING_WHITELIST = [
|
||||||
|
'osd heartbeat grace',
|
||||||
|
'osd heartbeat interval',
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
def _order_dict_by_key(rdict):
|
||||||
|
"""Convert a dictionary into an OrderedDict sorted by key.
|
||||||
|
|
||||||
|
:param rdict: Dictionary to be ordered.
|
||||||
|
:type rdict: dict
|
||||||
|
:returns: Ordered Dictionary.
|
||||||
|
:rtype: collections.OrderedDict
|
||||||
|
"""
|
||||||
|
return collections.OrderedDict(sorted(rdict.items(), key=lambda k: k[0]))
|
||||||
|
|
||||||
|
|
||||||
|
def get_osd_settings(relation_name):
|
||||||
|
"""Consolidate requested osd settings from all clients.
|
||||||
|
|
||||||
|
Consolidate requested osd settings from all clients. Check that the
|
||||||
|
requested setting is on the whitelist and it does not conflict with
|
||||||
|
any other requested settings.
|
||||||
|
|
||||||
|
:returns: Dictionary of settings
|
||||||
|
:rtype: dict
|
||||||
|
|
||||||
|
:raises: OSDSettingNotAllowed
|
||||||
|
:raises: OSDSettingConflict
|
||||||
|
"""
|
||||||
|
rel_ids = relation_ids(relation_name)
|
||||||
|
osd_settings = {}
|
||||||
|
for relid in rel_ids:
|
||||||
|
for unit in related_units(relid):
|
||||||
|
unit_settings = relation_get('osd-settings', unit, relid) or '{}'
|
||||||
|
unit_settings = json.loads(unit_settings)
|
||||||
|
for key, value in unit_settings.items():
|
||||||
|
if key not in OSD_SETTING_WHITELIST:
|
||||||
|
msg = 'Illegal settings "{}"'.format(key)
|
||||||
|
raise OSDSettingNotAllowed(msg)
|
||||||
|
if key in osd_settings:
|
||||||
|
if osd_settings[key] != unit_settings[key]:
|
||||||
|
msg = 'Conflicting settings for "{}"'.format(key)
|
||||||
|
raise OSDSettingConflict(msg)
|
||||||
|
else:
|
||||||
|
osd_settings[key] = value
|
||||||
|
return _order_dict_by_key(osd_settings)
|
||||||
|
|
||||||
|
|
||||||
|
def send_osd_settings():
|
||||||
|
"""Pass on requested OSD settings to osd units."""
|
||||||
|
try:
|
||||||
|
settings = get_osd_settings('client')
|
||||||
|
except OSD_SETTING_EXCEPTIONS as e:
|
||||||
|
# There is a problem with the settings, not passing them on. Update
|
||||||
|
# status will notify the user.
|
||||||
|
log(e, level=ERROR)
|
||||||
|
return
|
||||||
|
data = {
|
||||||
|
'osd-settings': json.dumps(settings, sort_keys=True)}
|
||||||
|
for relid in relation_ids('osd'):
|
||||||
|
relation_set(relation_id=relid,
|
||||||
|
relation_settings=data)
|
||||||
|
|
||||||
|
|
||||||
def validator(value, valid_type, valid_range=None):
|
def validator(value, valid_type, valid_range=None):
|
||||||
"""
|
"""
|
||||||
Used to validate these: http://docs.ceph.com/docs/master/rados/operations/pools/#set-pool-values
|
Used to validate these: http://docs.ceph.com/docs/master/rados/operations/pools/#set-pool-values
|
||||||
@ -1635,5 +1718,67 @@ class CephConfContext(object):
|
|||||||
continue
|
continue
|
||||||
|
|
||||||
ceph_conf[key] = conf[key]
|
ceph_conf[key] = conf[key]
|
||||||
|
|
||||||
return ceph_conf
|
return ceph_conf
|
||||||
|
|
||||||
|
|
||||||
|
class CephOSDConfContext(CephConfContext):
|
||||||
|
"""Ceph config (ceph.conf) context.
|
||||||
|
|
||||||
|
Consolidates settings from config-flags via CephConfContext with
|
||||||
|
settings provided by the mons. The config-flag values are preserved in
|
||||||
|
conf['osd'], settings from the mons which do not clash with config-flag
|
||||||
|
settings are in conf['osd_from_client'] and finally settings which do
|
||||||
|
clash are in conf['osd_from_client_conflict']. Rather than silently drop
|
||||||
|
the conflicting settings they are provided in the context so they can be
|
||||||
|
rendered commented out to give some visability to the admin.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, permitted_sections=None):
|
||||||
|
super(CephOSDConfContext, self).__init__(
|
||||||
|
permitted_sections=permitted_sections)
|
||||||
|
try:
|
||||||
|
self.settings_from_mons = get_osd_settings('mon')
|
||||||
|
except OSDSettingConflict:
|
||||||
|
log(
|
||||||
|
"OSD settings from mons are inconsistent, ignoring them",
|
||||||
|
level=WARNING)
|
||||||
|
self.settings_from_mons = {}
|
||||||
|
|
||||||
|
def filter_osd_from_mon_settings(self):
|
||||||
|
"""Filter settings from client relation against config-flags.
|
||||||
|
|
||||||
|
:returns: A tuple (
|
||||||
|
,config-flag values,
|
||||||
|
,client settings which do not conflict with config-flag values,
|
||||||
|
,client settings which confilct with config-flag values)
|
||||||
|
:rtype: (OrderedDict, OrderedDict, OrderedDict)
|
||||||
|
"""
|
||||||
|
ceph_conf = super(CephOSDConfContext, self).__call__()
|
||||||
|
conflicting_entries = {}
|
||||||
|
clear_entries = {}
|
||||||
|
for key, value in self.settings_from_mons.items():
|
||||||
|
if key in ceph_conf.get('osd', {}):
|
||||||
|
if ceph_conf['osd'][key] != value:
|
||||||
|
conflicting_entries[key] = value
|
||||||
|
else:
|
||||||
|
clear_entries[key] = value
|
||||||
|
clear_entries = _order_dict_by_key(clear_entries)
|
||||||
|
conflicting_entries = _order_dict_by_key(conflicting_entries)
|
||||||
|
return ceph_conf, clear_entries, conflicting_entries
|
||||||
|
|
||||||
|
def __call__(self):
|
||||||
|
"""Construct OSD config context.
|
||||||
|
|
||||||
|
Standard context with two additional special keys.
|
||||||
|
osd_from_client_conflict: client settings which confilct with
|
||||||
|
config-flag values
|
||||||
|
osd_from_client: settings which do not conflict with config-flag
|
||||||
|
values
|
||||||
|
|
||||||
|
:returns: OSD config context dict.
|
||||||
|
:rtype: dict
|
||||||
|
"""
|
||||||
|
conf, osd_clear, osd_conflict = self.filter_osd_from_mon_settings()
|
||||||
|
conf['osd_from_client_conflict'] = osd_conflict
|
||||||
|
conf['osd_from_client'] = osd_clear
|
||||||
|
return conf
|
||||||
|
@ -32,6 +32,10 @@ def loopback_devices():
|
|||||||
|
|
||||||
/dev/loop0: [0807]:961814 (/tmp/my.img)
|
/dev/loop0: [0807]:961814 (/tmp/my.img)
|
||||||
|
|
||||||
|
or:
|
||||||
|
|
||||||
|
/dev/loop0: [0807]:961814 (/tmp/my.img (deleted))
|
||||||
|
|
||||||
:returns: dict: a dict mapping {loopback_dev: backing_file}
|
:returns: dict: a dict mapping {loopback_dev: backing_file}
|
||||||
'''
|
'''
|
||||||
loopbacks = {}
|
loopbacks = {}
|
||||||
@ -39,9 +43,9 @@ def loopback_devices():
|
|||||||
output = check_output(cmd)
|
output = check_output(cmd)
|
||||||
if six.PY3:
|
if six.PY3:
|
||||||
output = output.decode('utf-8')
|
output = output.decode('utf-8')
|
||||||
devs = [d.strip().split(' ') for d in output.splitlines() if d != '']
|
devs = [d.strip().split(' ', 2) for d in output.splitlines() if d != '']
|
||||||
for dev, _, f in devs:
|
for dev, _, f in devs:
|
||||||
loopbacks[dev.replace(':', '')] = re.search(r'\((\S+)\)', f).groups()[0]
|
loopbacks[dev.replace(':', '')] = re.search(r'\((.+)\)', f).groups()[0]
|
||||||
return loopbacks
|
return loopbacks
|
||||||
|
|
||||||
|
|
||||||
|
@ -21,6 +21,7 @@
|
|||||||
from __future__ import print_function
|
from __future__ import print_function
|
||||||
import copy
|
import copy
|
||||||
from distutils.version import LooseVersion
|
from distutils.version import LooseVersion
|
||||||
|
from enum import Enum
|
||||||
from functools import wraps
|
from functools import wraps
|
||||||
from collections import namedtuple
|
from collections import namedtuple
|
||||||
import glob
|
import glob
|
||||||
@ -57,6 +58,14 @@ RANGE_WARNING = ('Passing NO_PROXY string that includes a cidr. '
|
|||||||
'This may not be compatible with software you are '
|
'This may not be compatible with software you are '
|
||||||
'running in your shell.')
|
'running in your shell.')
|
||||||
|
|
||||||
|
|
||||||
|
class WORKLOAD_STATES(Enum):
|
||||||
|
ACTIVE = 'active'
|
||||||
|
BLOCKED = 'blocked'
|
||||||
|
MAINTENANCE = 'maintenance'
|
||||||
|
WAITING = 'waiting'
|
||||||
|
|
||||||
|
|
||||||
cache = {}
|
cache = {}
|
||||||
|
|
||||||
|
|
||||||
@ -1088,22 +1097,33 @@ def function_tag():
|
|||||||
return os.environ.get('JUJU_FUNCTION_TAG') or action_tag()
|
return os.environ.get('JUJU_FUNCTION_TAG') or action_tag()
|
||||||
|
|
||||||
|
|
||||||
def status_set(workload_state, message):
|
def status_set(workload_state, message, application=False):
|
||||||
"""Set the workload state with a message
|
"""Set the workload state with a message
|
||||||
|
|
||||||
Use status-set to set the workload state with a message which is visible
|
Use status-set to set the workload state with a message which is visible
|
||||||
to the user via juju status. If the status-set command is not found then
|
to the user via juju status. If the status-set command is not found then
|
||||||
assume this is juju < 1.23 and juju-log the message unstead.
|
assume this is juju < 1.23 and juju-log the message instead.
|
||||||
|
|
||||||
workload_state -- valid juju workload state.
|
workload_state -- valid juju workload state. str or WORKLOAD_STATES
|
||||||
message -- status update message
|
message -- status update message
|
||||||
|
application -- Whether this is an application state set
|
||||||
"""
|
"""
|
||||||
valid_states = ['maintenance', 'blocked', 'waiting', 'active']
|
bad_state_msg = '{!r} is not a valid workload state'
|
||||||
if workload_state not in valid_states:
|
|
||||||
raise ValueError(
|
if isinstance(workload_state, str):
|
||||||
'{!r} is not a valid workload state'.format(workload_state)
|
try:
|
||||||
)
|
# Convert string to enum.
|
||||||
cmd = ['status-set', workload_state, message]
|
workload_state = WORKLOAD_STATES[workload_state.upper()]
|
||||||
|
except KeyError:
|
||||||
|
raise ValueError(bad_state_msg.format(workload_state))
|
||||||
|
|
||||||
|
if workload_state not in WORKLOAD_STATES:
|
||||||
|
raise ValueError(bad_state_msg.format(workload_state))
|
||||||
|
|
||||||
|
cmd = ['status-set']
|
||||||
|
if application:
|
||||||
|
cmd.append('--application')
|
||||||
|
cmd.extend([workload_state.value, message])
|
||||||
try:
|
try:
|
||||||
ret = subprocess.call(cmd)
|
ret = subprocess.call(cmd)
|
||||||
if ret == 0:
|
if ret == 0:
|
||||||
@ -1111,7 +1131,7 @@ def status_set(workload_state, message):
|
|||||||
except OSError as e:
|
except OSError as e:
|
||||||
if e.errno != errno.ENOENT:
|
if e.errno != errno.ENOENT:
|
||||||
raise
|
raise
|
||||||
log_message = 'status-set failed: {} {}'.format(workload_state,
|
log_message = 'status-set failed: {} {}'.format(workload_state.value,
|
||||||
message)
|
message)
|
||||||
log(log_message, level='INFO')
|
log(log_message, level='INFO')
|
||||||
|
|
||||||
@ -1526,13 +1546,13 @@ def env_proxy_settings(selected_settings=None):
|
|||||||
"""Get proxy settings from process environment variables.
|
"""Get proxy settings from process environment variables.
|
||||||
|
|
||||||
Get charm proxy settings from environment variables that correspond to
|
Get charm proxy settings from environment variables that correspond to
|
||||||
juju-http-proxy, juju-https-proxy and juju-no-proxy (available as of 2.4.2,
|
juju-http-proxy, juju-https-proxy juju-no-proxy (available as of 2.4.2, see
|
||||||
see lp:1782236) in a format suitable for passing to an application that
|
lp:1782236) and juju-ftp-proxy in a format suitable for passing to an
|
||||||
reacts to proxy settings passed as environment variables. Some applications
|
application that reacts to proxy settings passed as environment variables.
|
||||||
support lowercase or uppercase notation (e.g. curl), some support only
|
Some applications support lowercase or uppercase notation (e.g. curl), some
|
||||||
lowercase (e.g. wget), there are also subjectively rare cases of only
|
support only lowercase (e.g. wget), there are also subjectively rare cases
|
||||||
uppercase notation support. no_proxy CIDR and wildcard support also varies
|
of only uppercase notation support. no_proxy CIDR and wildcard support also
|
||||||
between runtimes and applications as there is no enforced standard.
|
varies between runtimes and applications as there is no enforced standard.
|
||||||
|
|
||||||
Some applications may connect to multiple destinations and expose config
|
Some applications may connect to multiple destinations and expose config
|
||||||
options that would affect only proxy settings for a specific destination
|
options that would affect only proxy settings for a specific destination
|
||||||
|
@ -17,14 +17,17 @@
|
|||||||
|
|
||||||
import yaml
|
import yaml
|
||||||
|
|
||||||
from subprocess import check_call
|
from subprocess import check_call, CalledProcessError
|
||||||
|
|
||||||
from charmhelpers.core.hookenv import (
|
from charmhelpers.core.hookenv import (
|
||||||
log,
|
log,
|
||||||
DEBUG,
|
DEBUG,
|
||||||
ERROR,
|
ERROR,
|
||||||
|
WARNING,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
from charmhelpers.core.host import is_container
|
||||||
|
|
||||||
__author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>'
|
__author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>'
|
||||||
|
|
||||||
|
|
||||||
@ -62,4 +65,11 @@ def create(sysctl_dict, sysctl_file, ignore=False):
|
|||||||
if ignore:
|
if ignore:
|
||||||
call.append("-e")
|
call.append("-e")
|
||||||
|
|
||||||
check_call(call)
|
try:
|
||||||
|
check_call(call)
|
||||||
|
except CalledProcessError as e:
|
||||||
|
if is_container():
|
||||||
|
log("Error setting some sysctl keys in this container: {}".format(e.output),
|
||||||
|
level=WARNING)
|
||||||
|
else:
|
||||||
|
raise e
|
||||||
|
Loading…
x
Reference in New Issue
Block a user