Replace python-neutronclient with openstacksdk
python-neutronclient has been deprecated and Octavia has already removed it in the dependend change below. These are the respective changes on ovn-octavia-provider side and they are in line with changes in Octavia itself: - Replaced code that uses the deprecated `python-neutronclient` library with code that uses `openstacksdk` and removed `python-neutronclient` as a dependency. - Marked certain configuration options that were related to Keystone authentication as deprecated for removal. In future releases authentication options options need to be added to the [neutron] section of the configuration. Note: After [1] some calls to neutron test_db_base_plugin_v2 had added a new param 'as_admin' that need to be included in the calls from ovn-provider functional tests. Squashed with patch [2] to solve cross dependency. [1] https://review.opendev.org/c/openstack/neutron/+/879827 [2] https://review.opendev.org/c/openstack/ovn-octavia-provider/+/882715 Depends-On: https://review.opendev.org/c/openstack/octavia/+/866327 Change-Id: I985b24e4a6db962b1e73eeae69a8c96f4b0760ae
This commit is contained in:
parent
b80bf850d9
commit
220d8c8581
@ -22,7 +22,6 @@ disable=
|
||||
dangerous-default-value,
|
||||
fixme,
|
||||
global-statement,
|
||||
no-init,
|
||||
protected-access,
|
||||
redefined-builtin,
|
||||
redefined-outer-name,
|
||||
@ -32,10 +31,8 @@ disable=
|
||||
unused-variable,
|
||||
useless-super-delegation,
|
||||
# "C" Coding convention violations
|
||||
bad-continuation,
|
||||
invalid-name,
|
||||
len-as-condition,
|
||||
misplaced-comparison-constant,
|
||||
missing-docstring,
|
||||
superfluous-parens,
|
||||
ungrouped-imports,
|
||||
@ -43,7 +40,6 @@ disable=
|
||||
# "R" Refactor recommendations
|
||||
duplicate-code,
|
||||
no-else-return,
|
||||
no-self-use,
|
||||
too-few-public-methods,
|
||||
too-many-ancestors,
|
||||
too-many-arguments,
|
||||
@ -96,8 +92,6 @@ max-line-length=79
|
||||
additional-builtins=
|
||||
|
||||
[CLASSES]
|
||||
# List of interface methods to ignore, separated by a comma.
|
||||
ignore-iface-methods=
|
||||
|
||||
[IMPORTS]
|
||||
# Deprecated modules which should not be used, separated by a comma
|
||||
@ -112,4 +106,3 @@ ignored-modules=six.moves,_MovedItems
|
||||
[REPORTS]
|
||||
# Tells whether to display a full report or only the messages
|
||||
reports=no
|
||||
|
||||
|
@ -10,11 +10,11 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from keystoneauth1 import exceptions as ks_exceptions
|
||||
from keystoneauth1 import loading as ks_loading
|
||||
from neutronclient.common import exceptions as n_exc
|
||||
from neutronclient.neutron import client as neutron_client
|
||||
|
||||
from octavia_lib.api.drivers import exceptions as driver_exceptions
|
||||
import openstack
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
from oslo_utils import excutils
|
||||
@ -25,8 +25,6 @@ from ovn_octavia_provider.i18n import _
|
||||
LOG = logging.getLogger(__name__)
|
||||
CONF = cfg.CONF
|
||||
|
||||
NEUTRON_VERSION = '2.0'
|
||||
|
||||
|
||||
class KeystoneSession():
|
||||
|
||||
@ -35,8 +33,6 @@ class KeystoneSession():
|
||||
self._auth = None
|
||||
|
||||
self.section = section
|
||||
ks_loading.register_auth_conf_options(cfg.CONF, self.section)
|
||||
ks_loading.register_session_conf_options(cfg.CONF, self.section)
|
||||
|
||||
@property
|
||||
def session(self):
|
||||
@ -52,8 +48,57 @@ class KeystoneSession():
|
||||
@property
|
||||
def auth(self):
|
||||
if not self._auth:
|
||||
self._auth = ks_loading.load_auth_from_conf_options(
|
||||
cfg.CONF, self.section)
|
||||
try:
|
||||
self._auth = ks_loading.load_auth_from_conf_options(
|
||||
cfg.CONF, self.section)
|
||||
except ks_exceptions.auth_plugins.MissingRequiredOptions as e:
|
||||
if self.section == constants.SERVICE_AUTH:
|
||||
raise e
|
||||
# NOTE(gthiemonge): MissingRequiredOptions is raised: there is
|
||||
# one or more missing auth options in the config file. It may
|
||||
# be due to the migration from python-neutronclient to
|
||||
# openstacksdk.
|
||||
# With neutronclient, most of the auth settings were in
|
||||
# [service_auth] with a few overrides in [neutron],
|
||||
# but with openstacksdk, we have all the auth settings in the
|
||||
# [neutron] section. In order to support smooth upgrades, in
|
||||
# case those options are missing, we override the undefined
|
||||
# options with the existing settings from [service_auth].
|
||||
|
||||
# This code should be removed when all the deployment tools set
|
||||
# the correct options in [neutron]
|
||||
|
||||
# The config options are lazily registered/loaded by keystone,
|
||||
# it means that we cannot get/set them before invoking
|
||||
# 'load_auth_from_conf_options' on 'service_auth'.
|
||||
ks_loading.load_auth_from_conf_options(
|
||||
cfg.CONF, constants.SERVICE_AUTH)
|
||||
|
||||
config = getattr(cfg.CONF, self.section)
|
||||
for opt in config:
|
||||
# For each option in the [neutron] section, get its setting
|
||||
# location, if the location is 'opt_default' or
|
||||
# 'set_default', it means that the option is not configured
|
||||
# in the config file, it should be replaced with the one
|
||||
# from [service_auth]
|
||||
loc = cfg.CONF.get_location(opt, self.section)
|
||||
if not loc or loc.location in (cfg.Locations.opt_default,
|
||||
cfg.Locations.set_default):
|
||||
if hasattr(cfg.CONF.service_auth, opt):
|
||||
cur_value = getattr(config, opt)
|
||||
value = getattr(cfg.CONF.service_auth, opt)
|
||||
if value != cur_value:
|
||||
log_value = (value if opt != "password"
|
||||
else "<hidden>")
|
||||
LOG.debug("Overriding [%s].%s with '%s'",
|
||||
self.section, opt, log_value)
|
||||
cfg.CONF.set_override(opt, value, self.section)
|
||||
|
||||
# Now we can call load_auth_from_conf_options for this specific
|
||||
# service with the newly defined options.
|
||||
self._auth = ks_loading.load_auth_from_conf_options(
|
||||
cfg.CONF, self.section)
|
||||
|
||||
return self._auth
|
||||
|
||||
|
||||
@ -68,34 +113,18 @@ class Singleton(type):
|
||||
|
||||
|
||||
class NeutronAuth(metaclass=Singleton):
|
||||
def __init__(self, region, service_name=None, endpoint=None,
|
||||
endpoint_type='publicURL', insecure=False,
|
||||
ca_cert=None):
|
||||
"""Create neutron client object.
|
||||
|
||||
:param region: The region of the service
|
||||
:param service_name: The name of the neutron service in the catalog
|
||||
:param endpoint: The endpoint of the service
|
||||
:param endpoint_type: The endpoint_type of the service
|
||||
:param insecure: Turn off certificate validation
|
||||
:param ca_cert: CA Cert file path
|
||||
:return: a Neutron Client object.
|
||||
:raises Exception: if the client cannot be created
|
||||
"""
|
||||
ksession = KeystoneSession()
|
||||
kwargs = {'region_name': region,
|
||||
'session': ksession.session,
|
||||
'endpoint_type': endpoint_type,
|
||||
'insecure': insecure}
|
||||
if service_name:
|
||||
kwargs['service_name'] = service_name
|
||||
if endpoint:
|
||||
kwargs['endpoint_override'] = endpoint
|
||||
if ca_cert:
|
||||
kwargs['ca_cert'] = ca_cert
|
||||
def __init__(self):
|
||||
"""Create neutron client object."""
|
||||
try:
|
||||
self.neutron_client = neutron_client.Client(
|
||||
NEUTRON_VERSION, **kwargs)
|
||||
ksession = KeystoneSession('neutron')
|
||||
|
||||
kwargs = {}
|
||||
if CONF.neutron.endpoint_override:
|
||||
kwargs['network_endpoint_override'] = (
|
||||
CONF.neutron.endpoint_override)
|
||||
|
||||
self.network_proxy = openstack.connection.Connection(
|
||||
session=ksession.session, **kwargs).network
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.exception("Error creating Neutron client.")
|
||||
@ -103,16 +132,9 @@ class NeutronAuth(metaclass=Singleton):
|
||||
|
||||
def get_neutron_client():
|
||||
try:
|
||||
return NeutronAuth(
|
||||
endpoint=CONF.neutron.endpoint,
|
||||
region=CONF.neutron.region_name,
|
||||
endpoint_type=CONF.neutron.endpoint_type,
|
||||
service_name=CONF.neutron.service_name,
|
||||
insecure=CONF.neutron.insecure,
|
||||
ca_cert=CONF.neutron.ca_certificates_file,
|
||||
).neutron_client
|
||||
except n_exc.NeutronClientException as e:
|
||||
msg = _('Cannot inialize Neutron Client. Exception: %s. '
|
||||
return NeutronAuth().network_proxy
|
||||
except Exception as e:
|
||||
msg = _('Cannot initialize OpenStackSDK. Exception: %s. '
|
||||
'Please verify Neutron service configuration '
|
||||
'in Octavia API configuration.') % e
|
||||
raise driver_exceptions.DriverError(
|
||||
|
@ -79,39 +79,93 @@ ovn_opts = [
|
||||
]
|
||||
|
||||
neutron_opts = [
|
||||
cfg.StrOpt('service_name',
|
||||
help=_('The name of the neutron service in the '
|
||||
'keystone catalog')),
|
||||
cfg.StrOpt('endpoint', help=_('A new endpoint to override the endpoint '
|
||||
'in the keystone catalog.')),
|
||||
cfg.StrOpt('region_name',
|
||||
help=_('Region in Identity service catalog to use for '
|
||||
'communication with the OpenStack services.')),
|
||||
cfg.StrOpt('endpoint_type', default='publicURL',
|
||||
help=_('Endpoint interface in identity service to use')),
|
||||
'in the keystone catalog.'),
|
||||
deprecated_for_removal=True,
|
||||
deprecated_reason=_('The endpoint_override option defined by '
|
||||
'keystoneauth1 is the new name for this '
|
||||
'option.'),
|
||||
deprecated_since='Antelope'),
|
||||
cfg.StrOpt('endpoint_type', help=_('Endpoint interface in identity '
|
||||
'service to use'),
|
||||
deprecated_for_removal=True,
|
||||
deprecated_reason=_('This option was replaced by the '
|
||||
'valid_interfaces option defined by '
|
||||
'keystoneauth.'),
|
||||
deprecated_since='Antelope'),
|
||||
cfg.StrOpt('ca_certificates_file',
|
||||
help=_('CA certificates file path')),
|
||||
cfg.BoolOpt('insecure',
|
||||
default=False,
|
||||
help=_('Disable certificate validation on SSL connections ')),
|
||||
help=_('CA certificates file path'),
|
||||
deprecated_for_removal=True,
|
||||
deprecated_reason=_('The cafile option defined by '
|
||||
'keystoneauth1 is the new name for this '
|
||||
'option.'),
|
||||
deprecated_since='Antelope'),
|
||||
]
|
||||
|
||||
|
||||
def handle_neutron_deprecations():
|
||||
# Apply neutron deprecated options to their new setting if needed
|
||||
|
||||
# Basicaly: if the value of the deprecated option is not the default:
|
||||
# * convert it to a valid "new" value if needed
|
||||
# * set it as the default for the new option
|
||||
# Thus [neutron].<new_option> has an higher precedence than
|
||||
# [neutron].<deprecated_option>
|
||||
loc = cfg.CONF.get_location('endpoint', 'neutron')
|
||||
if loc and loc.location != cfg.Locations.opt_default:
|
||||
cfg.CONF.set_default('endpoint_override', cfg.CONF.neutron.endpoint,
|
||||
'neutron')
|
||||
|
||||
loc = cfg.CONF.get_location('endpoint_type', 'neutron')
|
||||
if loc and loc.location != cfg.Locations.opt_default:
|
||||
endpoint_type = cfg.CONF.neutron.endpoint_type.replace('URL', '')
|
||||
cfg.CONF.set_default('valid_interfaces', [endpoint_type],
|
||||
'neutron')
|
||||
|
||||
loc = cfg.CONF.get_location('ca_certificates_file', 'neutron')
|
||||
if loc and loc.location != cfg.Locations.opt_default:
|
||||
cfg.CONF.set_default('cafile', cfg.CONF.neutron.ca_certificates_file,
|
||||
'neutron')
|
||||
|
||||
|
||||
def register_opts():
|
||||
# NOTE (froyo): just to not try to re-register options already done
|
||||
# by Neutron, specially in test scope, that will get a DuplicateOptError
|
||||
missing_opts = ovn_opts
|
||||
missing_ovn_opts = ovn_opts
|
||||
try:
|
||||
neutron_registered_opts = [opt for opt in cfg.CONF.ovn]
|
||||
missing_opts = [opt for opt in ovn_opts
|
||||
if opt.name not in neutron_registered_opts]
|
||||
missing_ovn_opts = [opt for opt in ovn_opts
|
||||
if opt.name not in neutron_registered_opts]
|
||||
except cfg.NoSuchOptError:
|
||||
LOG.info('Not found any opts under group ovn registered by Neutron')
|
||||
|
||||
cfg.CONF.register_opts(missing_opts, group='ovn')
|
||||
cfg.CONF.register_opts(neutron_opts, group='neutron')
|
||||
# Do the same for neutron options that have been already registered by
|
||||
# Octavia
|
||||
missing_neutron_opts = neutron_opts
|
||||
try:
|
||||
neutron_registered_opts = [opt for opt in cfg.CONF.neutron]
|
||||
missing_neutron_opts = [opt for opt in neutron_opts
|
||||
if opt.name not in neutron_registered_opts]
|
||||
except cfg.NoSuchOptError:
|
||||
LOG.info('Not found any opts under group neutron')
|
||||
|
||||
cfg.CONF.register_opts(missing_ovn_opts, group='ovn')
|
||||
cfg.CONF.register_opts(missing_neutron_opts, group='neutron')
|
||||
ks_loading.register_auth_conf_options(cfg.CONF, 'service_auth')
|
||||
ks_loading.register_session_conf_options(cfg.CONF, 'service_auth')
|
||||
ks_loading.register_adapter_conf_options(cfg.CONF, 'service_auth',
|
||||
include_deprecated=False)
|
||||
|
||||
ks_loading.register_auth_conf_options(cfg.CONF, 'neutron')
|
||||
ks_loading.register_session_conf_options(cfg.CONF, 'neutron')
|
||||
ks_loading.register_adapter_conf_options(cfg.CONF, 'neutron',
|
||||
include_deprecated=False)
|
||||
|
||||
# Override default auth_type for plugins with the default from service_auth
|
||||
auth_type = cfg.CONF.service_auth.auth_type
|
||||
cfg.CONF.set_default('auth_type', auth_type, 'neutron')
|
||||
|
||||
handle_neutron_deprecations()
|
||||
|
||||
|
||||
def list_opts():
|
||||
|
@ -464,15 +464,18 @@ class OvnProviderDriver(driver_base.ProviderDriver):
|
||||
# TODO(gthiemonge): implement additional_vip_dicts
|
||||
try:
|
||||
port = self._ovn_helper.create_vip_port(
|
||||
project_id, lb_id, vip_dict)['port']
|
||||
project_id, lb_id, vip_dict)
|
||||
vip_dict[constants.VIP_PORT_ID] = port['id']
|
||||
vip_dict[constants.VIP_ADDRESS] = (
|
||||
port['fixed_ips'][0]['ip_address'])
|
||||
except Exception as e:
|
||||
kwargs = {}
|
||||
if hasattr(e, 'message'):
|
||||
kwargs = {'user_fault_string': e.message,
|
||||
'operator_fault_string': e.message}
|
||||
for attr in ('details', 'message'):
|
||||
if hasattr(e, attr):
|
||||
value = getattr(e, attr)
|
||||
kwargs = {'user_fault_string': value,
|
||||
'operator_fault_string': value}
|
||||
break
|
||||
raise driver_exceptions.DriverError(
|
||||
**kwargs)
|
||||
return vip_dict, []
|
||||
|
@ -18,13 +18,12 @@ import re
|
||||
import threading
|
||||
|
||||
import netaddr
|
||||
from neutron_lib.api.definitions import provider_net
|
||||
from neutron_lib import constants as n_const
|
||||
from neutronclient.common import exceptions as n_exc
|
||||
from octavia_lib.api.drivers import data_models as o_datamodels
|
||||
from octavia_lib.api.drivers import driver_lib as o_driver_lib
|
||||
from octavia_lib.api.drivers import exceptions as driver_exceptions
|
||||
from octavia_lib.common import constants
|
||||
import openstack
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
from oslo_serialization import jsonutils
|
||||
@ -136,18 +135,17 @@ class OvnProviderHelper():
|
||||
col=col, match=key) from e
|
||||
|
||||
def _create_hm_port(self, network_id, subnet_id, project_id):
|
||||
port = {'port': {'name': ovn_const.LB_HM_PORT_PREFIX + str(subnet_id),
|
||||
'network_id': network_id,
|
||||
'fixed_ips': [{'subnet_id': subnet_id}],
|
||||
'admin_state_up': True,
|
||||
'port_security_enabled': False,
|
||||
'device_owner': n_const.DEVICE_OWNER_DISTRIBUTED,
|
||||
'project_id': project_id}}
|
||||
port = {'name': ovn_const.LB_HM_PORT_PREFIX + str(subnet_id),
|
||||
'network_id': network_id,
|
||||
'fixed_ips': [{'subnet_id': subnet_id}],
|
||||
'admin_state_up': True,
|
||||
'port_security_enabled': False,
|
||||
'device_owner': n_const.DEVICE_OWNER_DISTRIBUTED,
|
||||
'project_id': project_id}
|
||||
neutron_client = clients.get_neutron_client()
|
||||
try:
|
||||
hm_port = neutron_client.create_port(port)
|
||||
return hm_port['port'] if hm_port['port'] else None
|
||||
except n_exc.NeutronClientException:
|
||||
return neutron_client.create_port(**port)
|
||||
except openstack.exceptions.HttpException:
|
||||
# NOTE (froyo): whatever other exception as e.g. Timeout
|
||||
# we should try to ensure no leftover port remains
|
||||
self._clean_up_hm_port(subnet_id)
|
||||
@ -159,39 +157,39 @@ class OvnProviderHelper():
|
||||
neutron_client = clients.get_neutron_client()
|
||||
hm_port_ip = None
|
||||
|
||||
hm_checks_port = self._neutron_list_ports(neutron_client, **{
|
||||
'name': f'{ovn_const.LB_HM_PORT_PREFIX}{subnet_id}'})
|
||||
if hm_checks_port['ports']:
|
||||
# NOTE(froyo): Just to cover the case that we have more than one
|
||||
# hm-port created by a race condition on create_hm_port and we need
|
||||
# to ensure no leftover ports remains
|
||||
for hm_port in hm_checks_port['ports']:
|
||||
for fixed_ip in hm_port.get('fixed_ips', []):
|
||||
if fixed_ip['subnet_id'] == subnet_id:
|
||||
hm_port_ip = fixed_ip['ip_address']
|
||||
hm_checks_port = self._neutron_list_ports(
|
||||
neutron_client,
|
||||
name=f'{ovn_const.LB_HM_PORT_PREFIX}{subnet_id}')
|
||||
# NOTE(froyo): Just to cover the case that we have more than one
|
||||
# hm-port created by a race condition on create_hm_port and we need
|
||||
# to ensure no leftover ports remains
|
||||
for hm_port in hm_checks_port:
|
||||
for fixed_ip in hm_port.fixed_ips:
|
||||
if fixed_ip['subnet_id'] == subnet_id:
|
||||
hm_port_ip = fixed_ip['ip_address']
|
||||
|
||||
if hm_port_ip:
|
||||
lbs = self.ovn_nbdb_api.db_find_rows(
|
||||
'Load_Balancer', ('health_check', '!=', [])).execute()
|
||||
for lb in lbs:
|
||||
for k, v in lb.ip_port_mappings.items():
|
||||
if hm_port_ip in v:
|
||||
return
|
||||
# Not found any other health monitor using the hm port
|
||||
self.delete_port(hm_port['id'])
|
||||
if hm_port_ip:
|
||||
lbs = self.ovn_nbdb_api.db_find_rows(
|
||||
'Load_Balancer', ('health_check', '!=', [])).execute()
|
||||
for lb in lbs:
|
||||
for k, v in lb.ip_port_mappings.items():
|
||||
if hm_port_ip in v:
|
||||
return
|
||||
# Not found any other health monitor using the hm port
|
||||
self.delete_port(hm_port.id)
|
||||
|
||||
def _ensure_hm_ovn_port(self, network_id, subnet_id, project_id):
|
||||
# We will use a dedicated port for this, so we should find the one
|
||||
# related to the network id, if not found, create a new one and use it.
|
||||
|
||||
neutron_client = clients.get_neutron_client()
|
||||
hm_checks_port = self._neutron_list_ports(neutron_client, **{
|
||||
'network_id': network_id,
|
||||
'name': f'{ovn_const.LB_HM_PORT_PREFIX}{subnet_id}'})
|
||||
if hm_checks_port['ports']:
|
||||
return hm_checks_port['ports'][0]
|
||||
else:
|
||||
return self._create_hm_port(network_id, subnet_id, project_id)
|
||||
hm_checks_port = self._neutron_find_port(
|
||||
neutron_client,
|
||||
network_id=network_id,
|
||||
name_or_id=f'{ovn_const.LB_HM_PORT_PREFIX}{subnet_id}')
|
||||
if hm_checks_port:
|
||||
return hm_checks_port
|
||||
return self._create_hm_port(network_id, subnet_id, project_id)
|
||||
|
||||
def _get_nw_router_info_on_interface_event(self, lrp):
|
||||
"""Get the Router and Network information on an interface event
|
||||
@ -442,12 +440,22 @@ class OvnProviderHelper():
|
||||
return self._find_ovn_lbs(lb_id, protocol=protocol)
|
||||
|
||||
@tenacity.retry(
|
||||
retry=tenacity.retry_if_exception_type(n_exc.NeutronClientException),
|
||||
retry=tenacity.retry_if_exception_type(
|
||||
openstack.exceptions.HttpException),
|
||||
wait=tenacity.wait_exponential(),
|
||||
stop=tenacity.stop_after_delay(10),
|
||||
reraise=True)
|
||||
def _neutron_list_ports(self, neutron_client, **params):
|
||||
return neutron_client.list_ports(**params)
|
||||
return neutron_client.ports(**params)
|
||||
|
||||
@tenacity.retry(
|
||||
retry=tenacity.retry_if_exception_type(
|
||||
openstack.exceptions.HttpException),
|
||||
wait=tenacity.wait_exponential(),
|
||||
stop=tenacity.stop_after_delay(10),
|
||||
reraise=True)
|
||||
def _neutron_find_port(self, neutron_client, **params):
|
||||
return neutron_client.find_port(**params)
|
||||
|
||||
def _find_ovn_lbs(self, lb_id, protocol=None):
|
||||
"""Find the Loadbalancers in OVN with the given lb_id as its name
|
||||
@ -573,9 +581,9 @@ class OvnProviderHelper():
|
||||
if lb and lb.vip_subnet_id:
|
||||
neutron_client = clients.get_neutron_client()
|
||||
try:
|
||||
subnet = neutron_client.show_subnet(lb.vip_subnet_id)
|
||||
vip_subnet_cidr = subnet['subnet']['cidr']
|
||||
except n_exc.NotFound:
|
||||
subnet = neutron_client.get_subnet(lb.vip_subnet_id)
|
||||
vip_subnet_cidr = subnet.cidr
|
||||
except openstack.exceptions.ResourceNotFound:
|
||||
LOG.warning('Subnet %s not found while trying to '
|
||||
'fetch its data.', lb.vip_subnet_id)
|
||||
return None, None
|
||||
@ -619,9 +627,9 @@ class OvnProviderHelper():
|
||||
else:
|
||||
neutron_client = clients.get_neutron_client()
|
||||
try:
|
||||
subnet = neutron_client.show_subnet(subnet_id)
|
||||
ls_name = utils.ovn_name(subnet['subnet']['network_id'])
|
||||
except n_exc.NotFound:
|
||||
subnet = neutron_client.get_subnet(subnet_id)
|
||||
ls_name = utils.ovn_name(subnet.network_id)
|
||||
except openstack.exceptions.ResourceNotFound:
|
||||
LOG.warning('Subnet %s not found while trying to '
|
||||
'fetch its data.', subnet_id)
|
||||
ls_name = None
|
||||
@ -974,43 +982,44 @@ class OvnProviderHelper():
|
||||
|
||||
def lb_create(self, loadbalancer, protocol=None):
|
||||
port = None
|
||||
subnet = {}
|
||||
subnet = None
|
||||
try:
|
||||
neutron_client = clients.get_neutron_client()
|
||||
if loadbalancer.get(constants.VIP_PORT_ID):
|
||||
# In case we don't have vip_network_id
|
||||
port = neutron_client.show_port(
|
||||
loadbalancer[constants.VIP_PORT_ID])['port']
|
||||
for ip in port['fixed_ips']:
|
||||
port = neutron_client.get_port(
|
||||
loadbalancer[constants.VIP_PORT_ID])
|
||||
for ip in port.fixed_ips:
|
||||
if ip['ip_address'] == loadbalancer[constants.VIP_ADDRESS]:
|
||||
subnet = neutron_client.show_subnet(
|
||||
ip['subnet_id'])['subnet']
|
||||
subnet = neutron_client.get_subnet(
|
||||
ip['subnet_id'])
|
||||
break
|
||||
elif (loadbalancer.get(constants.VIP_NETWORK_ID) and
|
||||
loadbalancer.get(constants.VIP_ADDRESS)):
|
||||
ports = self._neutron_list_ports(neutron_client, **{
|
||||
'network_id': loadbalancer[constants.VIP_NETWORK_ID]})
|
||||
for p in ports['ports']:
|
||||
for ip in p['fixed_ips']:
|
||||
ports = self._neutron_list_ports(
|
||||
neutron_client,
|
||||
network_id=loadbalancer[constants.VIP_NETWORK_ID])
|
||||
for p in ports:
|
||||
for ip in p.fixed_ips:
|
||||
if ip['ip_address'] == loadbalancer[
|
||||
constants.VIP_ADDRESS]:
|
||||
port = p
|
||||
subnet = neutron_client.show_subnet(
|
||||
ip['subnet_id'])['subnet']
|
||||
subnet = neutron_client.get_subnet(
|
||||
ip['subnet_id'])
|
||||
break
|
||||
except Exception:
|
||||
LOG.error('Cannot get info from neutron client')
|
||||
LOG.exception(ovn_const.EXCEPTION_MSG, "creation of loadbalancer")
|
||||
# Any Exception set the status to ERROR
|
||||
if isinstance(port, dict):
|
||||
if port:
|
||||
try:
|
||||
self.delete_port(port.get('id'))
|
||||
self.delete_port(port.id)
|
||||
LOG.warning("Deleting the VIP port %s since LB went into "
|
||||
"ERROR state", str(port.get('id')))
|
||||
"ERROR state", str(port.id))
|
||||
except Exception:
|
||||
LOG.exception("Error deleting the VIP port %s upon "
|
||||
"loadbalancer %s creation failure",
|
||||
str(port.get('id')),
|
||||
str(port.id),
|
||||
str(loadbalancer[constants.ID]))
|
||||
status = {
|
||||
constants.LOADBALANCERS: [
|
||||
@ -1026,7 +1035,7 @@ class OvnProviderHelper():
|
||||
external_ids = {
|
||||
ovn_const.LB_EXT_IDS_VIP_KEY: loadbalancer[constants.VIP_ADDRESS],
|
||||
ovn_const.LB_EXT_IDS_VIP_PORT_ID_KEY:
|
||||
loadbalancer.get(constants.VIP_PORT_ID) or port['id'],
|
||||
loadbalancer.get(constants.VIP_PORT_ID) or port.id,
|
||||
'enabled': str(loadbalancer[constants.ADMIN_STATE_UP])}
|
||||
# In case vip_fip was passed - use it.
|
||||
vip_fip = loadbalancer.get(ovn_const.LB_EXT_IDS_VIP_FIP_KEY)
|
||||
@ -1055,18 +1064,17 @@ class OvnProviderHelper():
|
||||
|
||||
# NOTE(ltomasbo): If the VIP is on a provider network, it does
|
||||
# not need to be associated to its LS
|
||||
network = neutron_client.show_network(
|
||||
port['network_id'])['network']
|
||||
if not network.get(provider_net.PHYSICAL_NETWORK, False):
|
||||
network = neutron_client.get_network(port.network_id)
|
||||
if not network.provider_physical_network:
|
||||
# NOTE(froyo): This is the association of the lb to the VIP ls
|
||||
# so this is executed right away
|
||||
self._update_lb_to_ls_association(
|
||||
ovn_lb, network_id=port['network_id'],
|
||||
ovn_lb, network_id=port.network_id,
|
||||
associate=True, update_ls_ref=True)
|
||||
ls_name = utils.ovn_name(port['network_id'])
|
||||
ls_name = utils.ovn_name(port.network_id)
|
||||
ovn_ls = self.ovn_nbdb_api.ls_get(ls_name).execute(
|
||||
check_error=True)
|
||||
ovn_lr = self._find_lr_of_ls(ovn_ls, subnet.get('gateway_ip'))
|
||||
ovn_lr = self._find_lr_of_ls(ovn_ls, subnet.gateway_ip)
|
||||
if ovn_lr:
|
||||
try:
|
||||
# NOTE(froyo): This is the association of the lb to the
|
||||
@ -1119,15 +1127,15 @@ class OvnProviderHelper():
|
||||
except Exception:
|
||||
LOG.exception(ovn_const.EXCEPTION_MSG, "creation of loadbalancer")
|
||||
# Any Exception set the status to ERROR
|
||||
if isinstance(port, dict):
|
||||
if port:
|
||||
try:
|
||||
self.delete_port(port.get('id'))
|
||||
self.delete_port(port.id)
|
||||
LOG.warning("Deleting the VIP port %s since LB went into "
|
||||
"ERROR state", str(port.get('id')))
|
||||
"ERROR state", str(port.id))
|
||||
except Exception:
|
||||
LOG.exception("Error deleting the VIP port %s upon "
|
||||
"loadbalancer %s creation failure",
|
||||
str(port.get('id')),
|
||||
str(port.id),
|
||||
str(loadbalancer[constants.ID]))
|
||||
status = {
|
||||
constants.LOADBALANCERS: [
|
||||
@ -1876,13 +1884,13 @@ class OvnProviderHelper():
|
||||
neutron_client = clients.get_neutron_client()
|
||||
ovn_lr = None
|
||||
try:
|
||||
subnet = neutron_client.show_subnet(subnet_id)
|
||||
ls_name = utils.ovn_name(subnet['subnet']['network_id'])
|
||||
subnet = neutron_client.get_subnet(subnet_id)
|
||||
ls_name = utils.ovn_name(subnet.network_id)
|
||||
ovn_ls = self.ovn_nbdb_api.ls_get(ls_name).execute(
|
||||
check_error=True)
|
||||
ovn_lr = self._find_lr_of_ls(
|
||||
ovn_ls, subnet['subnet'].get('gateway_ip'))
|
||||
except n_exc.NotFound:
|
||||
ovn_ls, subnet.gateway_ip)
|
||||
except openstack.exceptions.ResourceNotFound:
|
||||
pass
|
||||
except idlutils.RowNotFound:
|
||||
pass
|
||||
@ -2147,49 +2155,48 @@ class OvnProviderHelper():
|
||||
return meminf.split('_')[1]
|
||||
|
||||
def create_vip_port(self, project_id, lb_id, vip_d):
|
||||
port = {'port': {'name': ovn_const.LB_VIP_PORT_PREFIX + str(lb_id),
|
||||
'network_id': vip_d[constants.VIP_NETWORK_ID],
|
||||
'fixed_ips': [{'subnet_id': vip_d['vip_subnet_id']}],
|
||||
'admin_state_up': True,
|
||||
'project_id': project_id}}
|
||||
port = {'name': ovn_const.LB_VIP_PORT_PREFIX + str(lb_id),
|
||||
'network_id': vip_d[constants.VIP_NETWORK_ID],
|
||||
'fixed_ips': [{'subnet_id': vip_d['vip_subnet_id']}],
|
||||
'admin_state_up': True,
|
||||
'project_id': project_id}
|
||||
try:
|
||||
port['port']['fixed_ips'][0]['ip_address'] = (
|
||||
port['fixed_ips'][0]['ip_address'] = (
|
||||
vip_d[constants.VIP_ADDRESS])
|
||||
except KeyError:
|
||||
pass
|
||||
neutron_client = clients.get_neutron_client()
|
||||
try:
|
||||
return neutron_client.create_port(port)
|
||||
except n_exc.IpAddressAlreadyAllocatedClient as e:
|
||||
return neutron_client.create_port(**port)
|
||||
except openstack.exceptions.ConflictException as e:
|
||||
# Sometimes the VIP is already created (race-conditions)
|
||||
# Lets get the it from Neutron API.
|
||||
ports = self._neutron_list_ports(neutron_client, **{
|
||||
'network_id': vip_d[constants.VIP_NETWORK_ID],
|
||||
'name': f'{ovn_const.LB_VIP_PORT_PREFIX}{lb_id}'})
|
||||
if not ports['ports']:
|
||||
port = self._neutron_find_port(
|
||||
neutron_client,
|
||||
network_id=vip_d[constants.VIP_NETWORK_ID],
|
||||
name_or_id=f'{ovn_const.LB_VIP_PORT_PREFIX}{lb_id}')
|
||||
if not port:
|
||||
LOG.error('Cannot create/get LoadBalancer VIP port with '
|
||||
'fixed IP: %s', vip_d[constants.VIP_ADDRESS])
|
||||
raise e
|
||||
# there should only be one port returned
|
||||
port = ports['ports'][0]
|
||||
LOG.debug('VIP Port already exists, uuid: %s', port['id'])
|
||||
return {'port': port}
|
||||
except n_exc.NeutronClientException as e:
|
||||
LOG.debug('VIP Port already exists, uuid: %s', port.id)
|
||||
return port
|
||||
except openstack.exceptions.HttpException as e:
|
||||
# NOTE (froyo): whatever other exception as e.g. Timeout
|
||||
# we should try to ensure no leftover port remains
|
||||
ports = self._neutron_list_ports(neutron_client, **{
|
||||
'network_id': vip_d[constants.VIP_NETWORK_ID],
|
||||
'name': f'{ovn_const.LB_VIP_PORT_PREFIX}{lb_id}'})
|
||||
if ports['ports']:
|
||||
port = ports['ports'][0]
|
||||
port = self._neutron_find_port(
|
||||
neutron_client,
|
||||
network_id=vip_d[constants.VIP_NETWORK_ID],
|
||||
name_or_id=f'{ovn_const.LB_VIP_PORT_PREFIX}{lb_id}')
|
||||
if port:
|
||||
LOG.debug('Leftover port %s has been found. Trying to '
|
||||
'delete it', port['id'])
|
||||
self.delete_port(port['id'])
|
||||
'delete it', port.id)
|
||||
self.delete_port(port.id)
|
||||
raise e
|
||||
|
||||
@tenacity.retry(
|
||||
retry=tenacity.retry_if_exception_type(
|
||||
n_exc.NeutronClientException),
|
||||
openstack.exceptions.HttpException),
|
||||
wait=tenacity.wait_exponential(max=75),
|
||||
stop=tenacity.stop_after_attempt(15),
|
||||
reraise=True)
|
||||
@ -2197,7 +2204,7 @@ class OvnProviderHelper():
|
||||
neutron_client = clients.get_neutron_client()
|
||||
try:
|
||||
neutron_client.delete_port(port_id)
|
||||
except n_exc.PortNotFoundClient:
|
||||
except openstack.exceptions.ResourceNotFound:
|
||||
LOG.warning("Port %s could not be found. Please "
|
||||
"check Neutron logs. Perhaps port "
|
||||
"was already deleted.", port_id)
|
||||
@ -2261,9 +2268,9 @@ class OvnProviderHelper():
|
||||
# Find out if member has FIP assigned.
|
||||
neutron_client = clients.get_neutron_client()
|
||||
try:
|
||||
subnet = neutron_client.show_subnet(info['subnet_id'])
|
||||
ls_name = utils.ovn_name(subnet['subnet']['network_id'])
|
||||
except n_exc.NotFound:
|
||||
subnet = neutron_client.get_subnet(info['subnet_id'])
|
||||
ls_name = utils.ovn_name(subnet.network_id)
|
||||
except openstack.exceptions.ResourceNotFound:
|
||||
LOG.exception('Subnet %s not found while trying to '
|
||||
'fetch its data.', info['subnet_id'])
|
||||
return
|
||||
@ -2318,15 +2325,14 @@ class OvnProviderHelper():
|
||||
# We should call neutron API to do 'empty' update of the FIP.
|
||||
# It will bump revision number and do recomputation of the FIP.
|
||||
try:
|
||||
fip_info = neutron_client.show_floatingip(
|
||||
fip_info = neutron_client.get_ip(
|
||||
fip.external_ids[ovn_const.OVN_FIP_EXT_ID_KEY])
|
||||
empty_update = {
|
||||
"floatingip": {
|
||||
'description': fip_info['floatingip']['description']}}
|
||||
neutron_client.update_floatingip(
|
||||
'description': fip_info['description']}
|
||||
neutron_client.update_ip(
|
||||
fip.external_ids[ovn_const.OVN_FIP_EXT_ID_KEY],
|
||||
empty_update)
|
||||
except n_exc.NotFound:
|
||||
**empty_update)
|
||||
except openstack.exceptions.ResourceNotFound:
|
||||
LOG.warning('Member %(member)s FIP %(fip)s not found in '
|
||||
'Neutron. Cannot update it.',
|
||||
{'member': info['id'],
|
||||
@ -2335,12 +2341,12 @@ class OvnProviderHelper():
|
||||
def _get_member_lsp(self, member_ip, member_subnet_id):
|
||||
neutron_client = clients.get_neutron_client()
|
||||
try:
|
||||
member_subnet = neutron_client.show_subnet(member_subnet_id)
|
||||
except n_exc.NotFound:
|
||||
member_subnet = neutron_client.get_subnet(member_subnet_id)
|
||||
except openstack.exceptions.ResourceNotFound:
|
||||
LOG.exception('Subnet %s not found while trying to '
|
||||
'fetch its data.', member_subnet_id)
|
||||
return
|
||||
ls_name = utils.ovn_name(member_subnet['subnet']['network_id'])
|
||||
ls_name = utils.ovn_name(member_subnet.network_id)
|
||||
try:
|
||||
ls = self.ovn_nbdb_api.lookup('Logical_Switch', ls_name)
|
||||
except idlutils.RowNotFound:
|
||||
|
@ -57,38 +57,43 @@ class TestOvnOctaviaBase(base.TestOVNFunctionalBase,
|
||||
self.fake_neutron_client = mock.MagicMock()
|
||||
clients.get_neutron_client = mock.MagicMock()
|
||||
clients.get_neutron_client.return_value = self.fake_neutron_client
|
||||
self.fake_neutron_client.show_network = self._mock_show_network
|
||||
self.fake_neutron_client.show_subnet = self._mock_show_subnet
|
||||
self.fake_neutron_client.list_ports = self._mock_list_ports
|
||||
self.fake_neutron_client.show_port = self._mock_show_port
|
||||
self.fake_neutron_client.get_network = self._mock_get_network
|
||||
self.fake_neutron_client.get_subnet = self._mock_get_subnet
|
||||
self.fake_neutron_client.ports = self._mock_ports
|
||||
self.fake_neutron_client.get_port = self._mock_get_port
|
||||
self.fake_neutron_client.delete_port.return_value = True
|
||||
self._local_net_cache = {}
|
||||
self._local_cidr_cache = {}
|
||||
self._local_port_cache = {'ports': []}
|
||||
self.core_plugin = directory.get_plugin()
|
||||
|
||||
def _mock_show_network(self, network_id):
|
||||
network = {}
|
||||
network['id'] = network_id
|
||||
network['provider:physical_network'] = None
|
||||
return {'network': network}
|
||||
def _port_dict_to_mock(self, port_dict):
|
||||
port = mock.Mock(**port_dict)
|
||||
return port
|
||||
|
||||
def _mock_show_subnet(self, subnet_id):
|
||||
subnet = {}
|
||||
subnet['network_id'] = self._local_net_cache[subnet_id]
|
||||
subnet['cidr'] = self._local_cidr_cache[subnet_id]
|
||||
return {'subnet': subnet}
|
||||
def _mock_get_network(self, network_id):
|
||||
network = mock.Mock()
|
||||
network.id = network_id
|
||||
network.provider_physical_network = None
|
||||
return network
|
||||
|
||||
def _mock_list_ports(self, **kwargs):
|
||||
return self._local_port_cache
|
||||
def _mock_get_subnet(self, subnet_id):
|
||||
subnet = mock.Mock()
|
||||
subnet.network_id = self._local_net_cache[subnet_id]
|
||||
subnet.cidr = self._local_cidr_cache[subnet_id]
|
||||
subnet.gateway_ip = None
|
||||
return subnet
|
||||
|
||||
def _mock_show_port(self, port_id):
|
||||
def _mock_ports(self, **kwargs):
|
||||
return self._local_port_cache['ports']
|
||||
|
||||
def _mock_get_port(self, port_id):
|
||||
for port in self._local_port_cache['ports']:
|
||||
if port['id'] == port_id:
|
||||
return {'port': port}
|
||||
if port.id == port_id:
|
||||
return port
|
||||
|
||||
def _create_provider_network(self):
|
||||
e1 = self._make_network(self.fmt, 'e1', True,
|
||||
e1 = self._make_network(self.fmt, 'e1', True, True,
|
||||
arg_list=('router:external',
|
||||
'provider:network_type',
|
||||
'provider:physical_network'),
|
||||
@ -276,7 +281,8 @@ class TestOvnOctaviaBase(base.TestOVNFunctionalBase,
|
||||
if router_id:
|
||||
self.l3_plugin.add_router_interface(
|
||||
self.context, router_id, {'subnet_id': subnet['id']})
|
||||
self._local_port_cache['ports'].append(port['port'])
|
||||
self._local_port_cache['ports'].append(
|
||||
self._port_dict_to_mock(port['port']))
|
||||
vip_port_address = port['port']['fixed_ips'][0]['ip_address']
|
||||
return (n1['network']['id'], subnet['id'], vip_port_address,
|
||||
port['port']['id'])
|
||||
@ -616,13 +622,13 @@ class TestOvnOctaviaBase(base.TestOVNFunctionalBase,
|
||||
if m.subnet_id:
|
||||
# Need to get the network_id.
|
||||
for port in self._local_port_cache['ports']:
|
||||
for fixed_ip in port['fixed_ips']:
|
||||
for fixed_ip in port.fixed_ips:
|
||||
if fixed_ip['subnet_id'] == m.subnet_id:
|
||||
ex = external_ids[
|
||||
ovn_const.LB_EXT_IDS_LS_REFS_KEY]
|
||||
act = ex.get(
|
||||
'neutron-%s' % port['network_id'], 0)
|
||||
ex['neutron-%s' % port['network_id']] = act + 1
|
||||
'neutron-%s' % port.network_id, 0)
|
||||
ex['neutron-%s' % port.network_id] = act + 1
|
||||
break
|
||||
if p.healthmonitor:
|
||||
member_status[m.member_id] = o_constants.ONLINE
|
||||
|
@ -13,82 +13,117 @@
|
||||
#
|
||||
from unittest import mock
|
||||
|
||||
from keystoneauth1 import exceptions as ks_exceptions
|
||||
from oslo_config import cfg
|
||||
from oslo_config import fixture as oslo_fixture
|
||||
from oslotest import base
|
||||
|
||||
from ovn_octavia_provider.common import clients
|
||||
from ovn_octavia_provider.common import config
|
||||
|
||||
|
||||
class TestKeystoneSession(base.BaseTestCase):
|
||||
def setUp(self):
|
||||
super().setUp()
|
||||
config.register_opts()
|
||||
self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF))
|
||||
|
||||
@mock.patch(
|
||||
'keystoneauth1.loading.register_auth_conf_options')
|
||||
@mock.patch(
|
||||
'keystoneauth1.loading.register_session_conf_options')
|
||||
def test_init(self, kl_rs, kl_ra):
|
||||
clients.KeystoneSession()
|
||||
kl_ra.assert_called_once_with(mock.ANY, 'service_auth')
|
||||
kl_rs.assert_called_once_with(mock.ANY, 'service_auth')
|
||||
'keystoneauth1.loading.load_auth_from_conf_options')
|
||||
def test_auth(self, kl_auth):
|
||||
missing_options = [mock.Mock(dest='username')]
|
||||
auth = mock.Mock()
|
||||
|
||||
# service_auth with missing option
|
||||
kl_auth.side_effect = [
|
||||
ks_exceptions.auth_plugins.MissingRequiredOptions(missing_options)
|
||||
]
|
||||
|
||||
ksession = clients.KeystoneSession()
|
||||
self.assertRaises(
|
||||
ks_exceptions.auth_plugins.MissingRequiredOptions,
|
||||
lambda: ksession.auth)
|
||||
|
||||
# neutron with missing option, missing option also in service_auth
|
||||
kl_auth.reset_mock()
|
||||
kl_auth.side_effect = [
|
||||
ks_exceptions.auth_plugins.MissingRequiredOptions(missing_options),
|
||||
auth,
|
||||
ks_exceptions.auth_plugins.MissingRequiredOptions(missing_options),
|
||||
]
|
||||
|
||||
ksession = clients.KeystoneSession('neutron')
|
||||
self.assertRaises(
|
||||
ks_exceptions.auth_plugins.MissingRequiredOptions,
|
||||
lambda: ksession.auth)
|
||||
|
||||
# neutron with missing option, it is copied from service_auth
|
||||
kl_auth.reset_mock()
|
||||
kl_auth.side_effect = [
|
||||
ks_exceptions.auth_plugins.MissingRequiredOptions(missing_options),
|
||||
auth,
|
||||
auth,
|
||||
]
|
||||
|
||||
self.conf.config(group='service_auth',
|
||||
endpoint_override='foo')
|
||||
|
||||
ksession = clients.KeystoneSession('neutron')
|
||||
self.assertEqual(ksession.auth, auth)
|
||||
self.assertEqual(cfg.CONF.neutron.endpoint_override, 'foo')
|
||||
|
||||
@mock.patch(
|
||||
'keystoneauth1.loading.load_session_from_conf_options')
|
||||
def test_cached_session(self, kl):
|
||||
ksession = clients.KeystoneSession()
|
||||
@mock.patch(
|
||||
'keystoneauth1.loading.load_auth_from_conf_options')
|
||||
def test_cached_session(self, kl_auth, kl_session):
|
||||
ksession = clients.KeystoneSession('neutron')
|
||||
self.assertIs(
|
||||
ksession.session,
|
||||
ksession.session)
|
||||
kl.assert_called_once_with(
|
||||
mock.ANY, 'service_auth', auth=ksession.auth)
|
||||
kl_session.assert_called_once_with(
|
||||
mock.ANY, 'neutron', auth=ksession.auth)
|
||||
|
||||
@mock.patch(
|
||||
'keystoneauth1.loading.load_auth_from_conf_options')
|
||||
def test_cached_auth(self, kl):
|
||||
ksession = clients.KeystoneSession()
|
||||
ksession = clients.KeystoneSession('neutron')
|
||||
self.assertIs(
|
||||
ksession.auth,
|
||||
ksession.auth)
|
||||
kl.assert_called_once_with(mock.ANY, 'service_auth')
|
||||
kl.assert_called_once_with(mock.ANY, 'neutron')
|
||||
|
||||
|
||||
class TestNeutronAuth(base.BaseTestCase):
|
||||
def setUp(self):
|
||||
super().setUp()
|
||||
config.register_opts()
|
||||
self.mock_client = mock.patch(
|
||||
'neutronclient.neutron.client.Client').start()
|
||||
self.client_args = {
|
||||
'endpoint': 'foo_endpoint',
|
||||
'region': 'foo_region',
|
||||
'endpoint_type': 'foo_endpoint_type',
|
||||
'service_name': 'foo_service_name',
|
||||
'insecure': 'foo_insecure',
|
||||
'ca_cert': 'foo_ca_cert'}
|
||||
'openstack.connection.Connection').start()
|
||||
clients.Singleton._instances = {}
|
||||
|
||||
@mock.patch.object(clients, 'KeystoneSession')
|
||||
def test_init(self, mock_ks):
|
||||
clients.NeutronAuth(**self.client_args)
|
||||
clients.NeutronAuth()
|
||||
self.mock_client.assert_called_once_with(
|
||||
'2.0',
|
||||
endpoint_override=self.client_args['endpoint'],
|
||||
region_name=self.client_args['region'],
|
||||
endpoint_type=self.client_args['endpoint_type'],
|
||||
service_name=self.client_args['service_name'],
|
||||
insecure=self.client_args['insecure'],
|
||||
ca_cert=self.client_args['ca_cert'],
|
||||
session=mock_ks().session)
|
||||
|
||||
def test_singleton(self):
|
||||
c1 = clients.NeutronAuth(**self.client_args)
|
||||
c2 = clients.NeutronAuth(**self.client_args)
|
||||
c1 = clients.NeutronAuth()
|
||||
c2 = clients.NeutronAuth()
|
||||
self.assertIs(c1, c2)
|
||||
|
||||
def test_singleton_exception(self):
|
||||
mock_client = mock.Mock()
|
||||
mock_client.network_proxy = 'foo'
|
||||
with mock.patch(
|
||||
'neutronclient.neutron.client.Client',
|
||||
side_effect=[RuntimeError, 'foo', 'foo']) as n_cli:
|
||||
'openstack.connection.Connection',
|
||||
side_effect=[RuntimeError,
|
||||
mock_client, mock_client]) as os_sdk:
|
||||
self.assertRaises(
|
||||
RuntimeError,
|
||||
clients.NeutronAuth,
|
||||
**self.client_args)
|
||||
c2 = clients.NeutronAuth(**self.client_args)
|
||||
c3 = clients.NeutronAuth(**self.client_args)
|
||||
clients.NeutronAuth)
|
||||
c2 = clients.NeutronAuth()
|
||||
c3 = clients.NeutronAuth()
|
||||
self.assertIs(c2, c3)
|
||||
self.assertEqual(n_cli._mock_call_count, 2)
|
||||
self.assertEqual(os_sdk._mock_call_count, 2)
|
||||
|
@ -86,6 +86,14 @@ class FakeResource(dict):
|
||||
self._add_details(info)
|
||||
|
||||
|
||||
class FakeOpenStackSDKResource(FakeResource):
|
||||
def __getattr__(self, attr):
|
||||
if attr in self._info:
|
||||
return self._info[attr]
|
||||
else:
|
||||
raise AttributeError("No such attribute '{}'".format(attr))
|
||||
|
||||
|
||||
class FakeOvsdbRow(FakeResource):
|
||||
"""Fake one or more OVSDB rows."""
|
||||
|
||||
@ -165,8 +173,8 @@ class FakeSubnet():
|
||||
# Overwrite default attributes.
|
||||
subnet_attrs.update(attrs)
|
||||
|
||||
return FakeResource(info=copy.deepcopy(subnet_attrs),
|
||||
loaded=True)
|
||||
return FakeOpenStackSDKResource(info=copy.deepcopy(subnet_attrs),
|
||||
loaded=True)
|
||||
|
||||
|
||||
class FakeOVNPort():
|
||||
@ -289,8 +297,8 @@ class FakePort():
|
||||
# Overwrite default attributes.
|
||||
port_attrs.update(attrs)
|
||||
|
||||
return FakeResource(info=copy.deepcopy(port_attrs),
|
||||
loaded=True)
|
||||
return FakeOpenStackSDKResource(info=copy.deepcopy(port_attrs),
|
||||
loaded=True)
|
||||
|
||||
|
||||
class FakeLB(data_models.LoadBalancer):
|
||||
|
@ -11,15 +11,16 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
import collections
|
||||
import copy
|
||||
from unittest import mock
|
||||
|
||||
from neutron_lib.api.definitions import provider_net
|
||||
from neutron_lib import constants as n_const
|
||||
from neutronclient.common import exceptions as n_exc
|
||||
from octavia_lib.api.drivers import data_models
|
||||
from octavia_lib.api.drivers import exceptions
|
||||
from octavia_lib.common import constants
|
||||
import openstack
|
||||
from oslo_serialization import jsonutils
|
||||
from oslo_utils import uuidutils
|
||||
from ovsdbapp.backend.ovs_idl import idlutils
|
||||
@ -32,6 +33,9 @@ from ovn_octavia_provider import helper as ovn_helper
|
||||
from ovn_octavia_provider.tests.unit import base as ovn_base
|
||||
from ovn_octavia_provider.tests.unit import fakes
|
||||
|
||||
Port = collections.namedtuple('Port', 'id, name, network_id, fixed_ips',
|
||||
defaults=[None, '', None, []])
|
||||
|
||||
|
||||
class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase):
|
||||
|
||||
@ -55,11 +59,11 @@ class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase):
|
||||
'cascade': False,
|
||||
'vip_network_id': self.vip_network_id,
|
||||
'admin_state_up': False}
|
||||
self.ports = {'ports': [{
|
||||
'fixed_ips': [{'ip_address': self.vip_address,
|
||||
'subnet_id': uuidutils.generate_uuid()}],
|
||||
'network_id': self.vip_network_id,
|
||||
'id': self.port1_id}]}
|
||||
self.ports = [Port(
|
||||
fixed_ips=[{'ip_address': self.vip_address,
|
||||
'subnet_id': uuidutils.generate_uuid()}],
|
||||
network_id=self.vip_network_id,
|
||||
id=self.port1_id)]
|
||||
self.pool = {'id': self.pool_id,
|
||||
'loadbalancer_id': self.loadbalancer_id,
|
||||
'listener_id': self.listener_id,
|
||||
@ -504,8 +508,9 @@ class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase):
|
||||
|
||||
@mock.patch('ovn_octavia_provider.common.clients.get_neutron_client')
|
||||
def test__get_subnet_from_pool(self, net_cli):
|
||||
net_cli.return_value.show_subnet.return_value = {
|
||||
'subnet': {'cidr': '10.22.33.0/24'}}
|
||||
net_cli.return_value.get_subnet.return_value = (
|
||||
fakes.FakeSubnet.create_one_subnet(
|
||||
attrs={'cidr': '10.22.33.0/24'}))
|
||||
|
||||
f = self.helper._get_subnet_from_pool
|
||||
|
||||
@ -656,7 +661,7 @@ class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase):
|
||||
@mock.patch('ovn_octavia_provider.common.clients.get_neutron_client')
|
||||
def test_lb_create_disabled(self, net_cli):
|
||||
self.lb['admin_state_up'] = False
|
||||
net_cli.return_value.list_ports.return_value = self.ports
|
||||
net_cli.return_value.ports.return_value = self.ports
|
||||
status = self.helper.lb_create(self.lb)
|
||||
self.assertEqual(status['loadbalancers'][0]['provisioning_status'],
|
||||
constants.ACTIVE)
|
||||
@ -674,7 +679,7 @@ class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase):
|
||||
@mock.patch('ovn_octavia_provider.common.clients.get_neutron_client')
|
||||
def test_lb_create_enabled(self, net_cli):
|
||||
self.lb['admin_state_up'] = True
|
||||
net_cli.return_value.list_ports.return_value = self.ports
|
||||
net_cli.return_value.ports.return_value = self.ports
|
||||
status = self.helper.lb_create(self.lb)
|
||||
self.assertEqual(status['loadbalancers'][0]['provisioning_status'],
|
||||
constants.ACTIVE)
|
||||
@ -699,7 +704,7 @@ class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase):
|
||||
self._update_lb_to_ls_association.stop()
|
||||
self.lb['admin_state_up'] = True
|
||||
f_lr.return_value = self.router
|
||||
net_cli.return_value.list_ports.return_value = self.ports
|
||||
net_cli.return_value.ports.return_value = self.ports
|
||||
self.helper._update_lb_to_lr_association.side_effect = [
|
||||
idlutils.RowNotFound]
|
||||
status = self.helper.lb_create(self.lb)
|
||||
@ -726,7 +731,7 @@ class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase):
|
||||
@mock.patch('ovn_octavia_provider.common.clients.get_neutron_client')
|
||||
def test_lb_create_selection_fields_not_supported(self, net_cli):
|
||||
self.lb['admin_state_up'] = True
|
||||
net_cli.return_value.list_ports.return_value = self.ports
|
||||
net_cli.return_value.ports.return_value = self.ports
|
||||
self.helper._are_selection_fields_supported = (
|
||||
mock.Mock(return_value=False))
|
||||
status = self.helper.lb_create(self.lb)
|
||||
@ -745,9 +750,8 @@ class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase):
|
||||
@mock.patch('ovn_octavia_provider.common.clients.get_neutron_client')
|
||||
def test_lb_create_selection_fields_not_supported_algo(self, net_cli):
|
||||
self.lb['admin_state_up'] = True
|
||||
net_cli.return_value.list_ports.return_value = self.ports
|
||||
net_cli.return_value.show_subnet.return_value = {
|
||||
'subnet': mock.MagicMock()}
|
||||
net_cli.return_value.ports.return_value = self.ports
|
||||
net_cli.return_value.get_subnet.return_value = mock.MagicMock()
|
||||
self.pool['lb_algoritm'] = 'foo'
|
||||
status = self.helper.lb_create(self.lb)
|
||||
self.assertEqual(status['loadbalancers'][0]['provisioning_status'],
|
||||
@ -776,11 +780,11 @@ class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase):
|
||||
self.lb['protocol'] = protocol
|
||||
self.lb[ovn_const.LB_EXT_IDS_LR_REF_KEY] = 'foo'
|
||||
self.lb[ovn_const.LB_EXT_IDS_LS_REFS_KEY] = '{\"neutron-foo\": 1}'
|
||||
net_cli.return_value.list_ports.return_value = self.ports
|
||||
fake_network = {'id': self.lb['vip_network_id'],
|
||||
provider_net.PHYSICAL_NETWORK: provider}
|
||||
net_cli.return_value.show_network.return_value = {
|
||||
'network': fake_network}
|
||||
net_cli.return_value.ports.return_value = self.ports
|
||||
fake_network = mock.MagicMock()
|
||||
fake_network.id = self.lb['vip_network_id']
|
||||
fake_network.provider_physical_network = provider
|
||||
net_cli.return_value.get_network.return_value = fake_network
|
||||
|
||||
status = self.helper.lb_create(self.lb, protocol=protocol)
|
||||
self.assertEqual(status['loadbalancers'][0]['provisioning_status'],
|
||||
@ -819,8 +823,9 @@ class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase):
|
||||
|
||||
@mock.patch('ovn_octavia_provider.common.clients.get_neutron_client')
|
||||
def test_lb_create_neutron_client_exception(self, net_cli):
|
||||
net_cli.return_value.list_ports.return_value = self.ports
|
||||
net_cli.return_value.show_subnet.side_effect = [n_exc.NotFound]
|
||||
net_cli.return_value.ports.return_value = self.ports
|
||||
net_cli.return_value.get_subnet.side_effect = [
|
||||
openstack.exceptions.ResourceNotFound]
|
||||
status = self.helper.lb_create(self.lb)
|
||||
self.assertEqual(status['loadbalancers'][0]['provisioning_status'],
|
||||
constants.ERROR)
|
||||
@ -831,13 +836,13 @@ class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase):
|
||||
@mock.patch.object(ovn_helper.OvnProviderHelper, 'delete_port')
|
||||
def test_lb_create_exception(self, del_port, net_cli):
|
||||
self.helper._find_ovn_lbs.side_effect = [RuntimeError]
|
||||
net_cli.return_value.list_ports.return_value = self.ports
|
||||
net_cli.return_value.ports.return_value = self.ports
|
||||
status = self.helper.lb_create(self.lb)
|
||||
self.assertEqual(status['loadbalancers'][0]['provisioning_status'],
|
||||
constants.ERROR)
|
||||
self.assertEqual(status['loadbalancers'][0]['operating_status'],
|
||||
constants.ERROR)
|
||||
del_port.assert_called_once_with(self.ports.get('ports')[0]['id'])
|
||||
del_port.assert_called_once_with(self.ports[0].id)
|
||||
del_port.side_effect = [Exception]
|
||||
status = self.helper.lb_create(self.lb)
|
||||
self.assertEqual(status['loadbalancers'][0]['provisioning_status'],
|
||||
@ -1676,7 +1681,7 @@ class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase):
|
||||
|
||||
@mock.patch('ovn_octavia_provider.common.clients.get_neutron_client')
|
||||
def test_member_create(self, net_cli):
|
||||
net_cli.return_value.show_subnet.side_effect = [
|
||||
net_cli.return_value.get_subnet.side_effect = [
|
||||
idlutils.RowNotFound, idlutils.RowNotFound]
|
||||
self.ovn_lb.external_ids = mock.MagicMock()
|
||||
status = self.helper.member_create(self.member)
|
||||
@ -1702,7 +1707,7 @@ class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase):
|
||||
@mock.patch('ovn_octavia_provider.common.clients.get_neutron_client')
|
||||
def test_member_create_lb_add_from_lr(self, net_cli, f_lr, folbpi):
|
||||
fake_subnet = fakes.FakeSubnet.create_one_subnet()
|
||||
net_cli.return_value.show_subnet.return_value = {'subnet': fake_subnet}
|
||||
net_cli.return_value.get_subnet.return_value = fake_subnet
|
||||
f_lr.return_value = self.router
|
||||
pool_key = 'pool_%s' % self.pool_id
|
||||
folbpi.return_value = (pool_key, self.ovn_lb)
|
||||
@ -1719,10 +1724,10 @@ class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase):
|
||||
@mock.patch('ovn_octavia_provider.common.clients.get_neutron_client')
|
||||
def test_member_create_lb_add_from_lr_no_ls(self, net_cli, f_lr, f_ls):
|
||||
fake_subnet = fakes.FakeSubnet.create_one_subnet()
|
||||
net_cli.return_value.show_subnet.return_value = {'subnet': fake_subnet}
|
||||
net_cli.return_value.get_subnet.return_value = fake_subnet
|
||||
self.ovn_lb.external_ids = mock.MagicMock()
|
||||
(self.helper.ovn_nbdb_api.ls_get.return_value.
|
||||
execute.side_effect) = [n_exc.NotFound]
|
||||
execute.side_effect) = [openstack.exceptions.ResourceNotFound]
|
||||
status = self.helper.member_create(self.member)
|
||||
self.assertEqual(status['loadbalancers'][0]['provisioning_status'],
|
||||
constants.ACTIVE)
|
||||
@ -1751,7 +1756,7 @@ class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase):
|
||||
@mock.patch('ovn_octavia_provider.common.clients.get_neutron_client')
|
||||
def test_member_create_lb_add_from_lr_retry(self, net_cli, f_lr, folbpi):
|
||||
fake_subnet = fakes.FakeSubnet.create_one_subnet()
|
||||
net_cli.return_value.show_subnet.return_value = {'subnet': fake_subnet}
|
||||
net_cli.return_value.get_subnet.return_value = fake_subnet
|
||||
f_lr.return_value = self.router
|
||||
pool_key = 'pool_%s' % self.pool_id
|
||||
folbpi.return_value = (pool_key, self.ovn_lb)
|
||||
@ -1761,7 +1766,7 @@ class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase):
|
||||
status = self.helper.member_create(self.member)
|
||||
self.assertEqual(status['loadbalancers'][0]['provisioning_status'],
|
||||
constants.ACTIVE)
|
||||
f_lr.assert_called_once_with(self.network, fake_subnet['gateway_ip'])
|
||||
f_lr.assert_called_once_with(self.network, fake_subnet.gateway_ip)
|
||||
self.helper._update_lb_to_lr_association.assert_called_once_with(
|
||||
self.ovn_lb, self.router)
|
||||
self.helper._update_lb_to_lr_association_by_step \
|
||||
@ -1771,7 +1776,7 @@ class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase):
|
||||
|
||||
@mock.patch('ovn_octavia_provider.common.clients.get_neutron_client')
|
||||
def test_member_create_listener(self, net_cli):
|
||||
net_cli.return_value.show_subnet.side_effect = [idlutils.RowNotFound]
|
||||
net_cli.return_value.get_subnet.side_effect = [idlutils.RowNotFound]
|
||||
self.ovn_lb.external_ids = mock.MagicMock()
|
||||
self.helper._get_pool_listeners.return_value = ['listener1']
|
||||
status = self.helper.member_create(self.member)
|
||||
@ -2779,8 +2784,7 @@ class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase):
|
||||
attrs={'id': 'foo_subnet_id',
|
||||
'name': 'foo_subnet_name',
|
||||
'network_id': 'foo_network_id'})
|
||||
net_cli.return_value.show_subnet.return_value = {
|
||||
'subnet': subnet}
|
||||
net_cli.return_value.get_subnet.return_value = subnet
|
||||
self.helper._update_lb_to_ls_association(
|
||||
self.ref_lb1, subnet_id=subnet.id,
|
||||
associate=True, update_ls_ref=True)
|
||||
@ -2921,11 +2925,12 @@ class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase):
|
||||
@mock.patch('ovn_octavia_provider.common.clients.get_neutron_client')
|
||||
def test__update_lb_to_ls_association_network_dis_net_not_found(
|
||||
self, net_cli):
|
||||
net_cli.return_value.show_subnet.side_effect = n_exc.NotFound
|
||||
net_cli.return_value.get_subnet.side_effect = (
|
||||
openstack.exceptions.ResourceNotFound)
|
||||
self._update_lb_to_ls_association.stop()
|
||||
self._get_lb_to_ls_association_commands.stop()
|
||||
(self.helper.ovn_nbdb_api.ls_get.return_value.execute.
|
||||
return_value) = self.network
|
||||
return_value) = self.network
|
||||
self.helper._update_lb_to_ls_association(
|
||||
self.ref_lb1, subnet_id='foo',
|
||||
associate=False, update_ls_ref=True)
|
||||
@ -3043,7 +3048,7 @@ class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase):
|
||||
@mock.patch('ovn_octavia_provider.common.clients.get_neutron_client')
|
||||
def test_delete_port_not_found(self, net_cli):
|
||||
net_cli.return_value.delete_port.side_effect = (
|
||||
[n_exc.PortNotFoundClient])
|
||||
[openstack.exceptions.ResourceNotFound])
|
||||
self.helper.delete_port('foo')
|
||||
|
||||
@mock.patch('ovn_octavia_provider.helper.OvnProviderHelper.'
|
||||
@ -3300,7 +3305,7 @@ class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase):
|
||||
lb.external_ids = external_ids
|
||||
self.mock_find_lb_pool_key.return_value = lb
|
||||
self.helper.handle_member_dvr(info)
|
||||
net_cli.show_subnet.assert_not_called()
|
||||
net_cli.get_subnet.assert_not_called()
|
||||
self.helper.ovn_nbdb_api.db_clear.assert_not_called()
|
||||
|
||||
@mock.patch('ovn_octavia_provider.common.clients.get_neutron_client')
|
||||
@ -3335,7 +3340,8 @@ class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase):
|
||||
'neutron:vip_fip': '11.11.11.11'}
|
||||
lb.external_ids = external_ids
|
||||
self.mock_find_lb_pool_key.return_value = lb
|
||||
net_cli.return_value.show_subnet.side_effect = [n_exc.NotFound]
|
||||
net_cli.return_value.get_subnet.side_effect = [
|
||||
openstack.exceptions.ResourceNotFound]
|
||||
self.helper.handle_member_dvr(info)
|
||||
self.helper.ovn_nbdb_api.db_clear.assert_not_called()
|
||||
|
||||
@ -3367,10 +3373,9 @@ class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase):
|
||||
'subnet_id': fake_port['fixed_ips'][0]['subnet_id'],
|
||||
'action': action}
|
||||
member_subnet = fakes.FakeSubnet.create_one_subnet()
|
||||
member_subnet['id'] = self.member_subnet_id
|
||||
member_subnet['network_id'] = 'foo'
|
||||
net_cli.return_value.show_subnet.return_value = {
|
||||
'subnet': member_subnet}
|
||||
member_subnet.update({'id': self.member_subnet_id})
|
||||
member_subnet.update({'network_id': 'foo'})
|
||||
net_cli.return_value.get_subnet.return_value = member_subnet
|
||||
fake_lsp = fakes.FakeOVNPort.from_neutron_port(
|
||||
fake_port)
|
||||
fake_ls = fakes.FakeOvsdbRow.create_one_ovsdb_row(
|
||||
@ -3384,11 +3389,9 @@ class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase):
|
||||
'external_ip': '22.22.22.22',
|
||||
'external_ids': {
|
||||
ovn_const.OVN_FIP_EXT_ID_KEY: 'fip_id'}})
|
||||
fip_info = {
|
||||
'floatingip': {
|
||||
'description': 'bar'}}
|
||||
net_cli.return_value.show_floatingip.return_value = fip_info
|
||||
self.helper.ovn_nbdb_api.db_find_rows.return_value.\
|
||||
fip_info = {'description': 'bar'}
|
||||
net_cli.return_value.get_ip.return_value = fip_info
|
||||
self.helper.ovn_nbdb_api.db_find_rows.return_value. \
|
||||
execute.return_value = [fake_nat]
|
||||
external_ids = {
|
||||
ovn_const.LB_EXT_IDS_VIP_FIP_KEY: '11.11.11.11'}
|
||||
@ -3408,11 +3411,10 @@ class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase):
|
||||
mock.ANY]
|
||||
self.helper.ovn_nbdb_api.assert_has_calls(calls)
|
||||
else:
|
||||
(net_cli.return_value.show_floatingip.
|
||||
(net_cli.return_value.get_ip.
|
||||
assert_called_once_with('fip_id'))
|
||||
(net_cli.return_value.update_floatingip.
|
||||
assert_called_once_with('fip_id', {
|
||||
'floatingip': {'description': 'bar'}}))
|
||||
(net_cli.return_value.update_ip.
|
||||
assert_called_once_with('fip_id', description='bar'))
|
||||
self.helper.ovn_nbdb_api.db_clear.assert_not_called()
|
||||
|
||||
@mock.patch('ovn_octavia_provider.common.clients.get_neutron_client')
|
||||
@ -3440,85 +3442,81 @@ class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase):
|
||||
|
||||
def test_create_vip_port_vip_selected(self):
|
||||
expected_dict = {
|
||||
'port': {'name': '%s%s' % (ovn_const.LB_VIP_PORT_PREFIX,
|
||||
self.loadbalancer_id),
|
||||
'fixed_ips': [{'subnet_id':
|
||||
self.vip_dict['vip_subnet_id'],
|
||||
'ip_address':'10.1.10.1'}],
|
||||
'network_id': self.vip_dict['vip_network_id'],
|
||||
'admin_state_up': True,
|
||||
'project_id': self.project_id}}
|
||||
'name': '%s%s' % (ovn_const.LB_VIP_PORT_PREFIX,
|
||||
self.loadbalancer_id),
|
||||
'fixed_ips': [{
|
||||
'subnet_id': self.vip_dict['vip_subnet_id'],
|
||||
'ip_address': '10.1.10.1'}],
|
||||
'network_id': self.vip_dict['vip_network_id'],
|
||||
'admin_state_up': True,
|
||||
'project_id': self.project_id}
|
||||
with mock.patch.object(clients, 'get_neutron_client') as net_cli:
|
||||
self.vip_dict['vip_address'] = '10.1.10.1'
|
||||
self.helper.create_vip_port(self.project_id,
|
||||
self.loadbalancer_id,
|
||||
self.vip_dict)
|
||||
expected_call = [
|
||||
mock.call().create_port(expected_dict)]
|
||||
mock.call().create_port(**expected_dict)]
|
||||
net_cli.assert_has_calls(expected_call)
|
||||
|
||||
def test_create_vip_port_vip_not_selected(self):
|
||||
expected_dict = {
|
||||
'port': {'name': '%s%s' % (ovn_const.LB_VIP_PORT_PREFIX,
|
||||
self.loadbalancer_id),
|
||||
'fixed_ips': [{'subnet_id':
|
||||
self.vip_dict['vip_subnet_id']}],
|
||||
'network_id': self.vip_dict['vip_network_id'],
|
||||
'admin_state_up': True,
|
||||
'project_id': self.project_id}}
|
||||
'name': '%s%s' % (ovn_const.LB_VIP_PORT_PREFIX,
|
||||
self.loadbalancer_id),
|
||||
'fixed_ips': [{
|
||||
'subnet_id': self.vip_dict['vip_subnet_id']}],
|
||||
'network_id': self.vip_dict['vip_network_id'],
|
||||
'admin_state_up': True,
|
||||
'project_id': self.project_id}
|
||||
with mock.patch.object(clients, 'get_neutron_client') as net_cli:
|
||||
self.helper.create_vip_port(self.project_id,
|
||||
self.loadbalancer_id,
|
||||
self.vip_dict)
|
||||
expected_call = [
|
||||
mock.call().create_port(expected_dict)]
|
||||
mock.call().create_port(**expected_dict)]
|
||||
net_cli.assert_has_calls(expected_call)
|
||||
|
||||
@mock.patch('ovn_octavia_provider.common.clients.get_neutron_client')
|
||||
def test_create_vip_port_vip_selected_already_exist(self, net_cli):
|
||||
net_cli.return_value.create_port.side_effect = [
|
||||
n_exc.IpAddressAlreadyAllocatedClient]
|
||||
net_cli.return_value.list_ports.return_value = {
|
||||
'ports': [
|
||||
{'name': 'ovn-lb-vip-' + self.loadbalancer_id,
|
||||
'id': self.loadbalancer_id}]}
|
||||
openstack.exceptions.ConflictException]
|
||||
net_cli.return_value.find_port.return_value = (
|
||||
Port(name='ovn-lb-vip-' + self.loadbalancer_id,
|
||||
id=self.loadbalancer_id))
|
||||
self.vip_dict['vip_address'] = '10.1.10.1'
|
||||
ret = self.helper.create_vip_port(
|
||||
self.project_id,
|
||||
self.loadbalancer_id,
|
||||
self.vip_dict)
|
||||
expected = {
|
||||
'port': {
|
||||
'name': '%s%s' % (ovn_const.LB_VIP_PORT_PREFIX,
|
||||
self.loadbalancer_id),
|
||||
'id': self.loadbalancer_id}}
|
||||
self.assertDictEqual(expected, ret)
|
||||
self.assertEqual(
|
||||
'%s%s' % (ovn_const.LB_VIP_PORT_PREFIX,
|
||||
self.loadbalancer_id), ret.name)
|
||||
self.assertEqual(self.loadbalancer_id, ret.id)
|
||||
expected_call = [
|
||||
mock.call().list_ports(
|
||||
mock.call().find_port(
|
||||
network_id='%s' % self.vip_dict['vip_network_id'],
|
||||
name='%s%s' % (ovn_const.LB_VIP_PORT_PREFIX,
|
||||
self.loadbalancer_id))]
|
||||
name_or_id='%s%s' % (ovn_const.LB_VIP_PORT_PREFIX,
|
||||
self.loadbalancer_id))]
|
||||
net_cli.assert_has_calls(expected_call)
|
||||
|
||||
@mock.patch('ovn_octavia_provider.common.clients.get_neutron_client')
|
||||
def test_create_vip_port_vip_selected_other_allocation_exist(
|
||||
self, net_cli):
|
||||
net_cli.return_value.create_port.side_effect = [
|
||||
n_exc.IpAddressAlreadyAllocatedClient]
|
||||
net_cli.return_value.list_ports.return_value = {
|
||||
'ports': []}
|
||||
openstack.exceptions.ConflictException]
|
||||
net_cli.return_value.find_port.return_value = None
|
||||
self.vip_dict['vip_address'] = '10.1.10.1'
|
||||
self.assertRaises(
|
||||
n_exc.IpAddressAlreadyAllocatedClient,
|
||||
openstack.exceptions.ConflictException,
|
||||
self.helper.create_vip_port,
|
||||
self.project_id,
|
||||
self.loadbalancer_id,
|
||||
self.vip_dict)
|
||||
expected_call = [
|
||||
mock.call().list_ports(
|
||||
mock.call().find_port(
|
||||
network_id='%s' % self.vip_dict['vip_network_id'],
|
||||
name='%s%s' % (ovn_const.LB_VIP_PORT_PREFIX,
|
||||
self.loadbalancer_id))]
|
||||
name_or_id='%s%s' % (ovn_const.LB_VIP_PORT_PREFIX,
|
||||
self.loadbalancer_id))]
|
||||
net_cli.assert_has_calls(expected_call)
|
||||
self.helper._update_status_to_octavia.assert_not_called()
|
||||
|
||||
@ -3527,26 +3525,46 @@ class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase):
|
||||
def test_create_vip_port_vip_neutron_client_other_exception(
|
||||
self, del_port, net_cli):
|
||||
net_cli.return_value.create_port.side_effect = [
|
||||
n_exc.NeutronClientException]
|
||||
net_cli.return_value.list_ports.return_value = {
|
||||
'ports': [
|
||||
{'name': 'ovn-lb-vip-' + self.loadbalancer_id,
|
||||
'id': self.loadbalancer_id}]}
|
||||
openstack.exceptions.HttpException]
|
||||
net_cli.return_value.find_port.return_value = Port(
|
||||
name='ovn-lb-vip-' + self.loadbalancer_id,
|
||||
id=self.loadbalancer_id)
|
||||
self.assertRaises(
|
||||
n_exc.NeutronClientException,
|
||||
openstack.exceptions.HttpException,
|
||||
self.helper.create_vip_port,
|
||||
self.project_id,
|
||||
self.loadbalancer_id,
|
||||
self.vip_dict)
|
||||
expected_call = [
|
||||
mock.call().list_ports(
|
||||
mock.call().find_port(
|
||||
network_id='%s' % self.vip_dict['vip_network_id'],
|
||||
name='%s%s' % (ovn_const.LB_VIP_PORT_PREFIX,
|
||||
self.loadbalancer_id))]
|
||||
name_or_id='%s%s' % (ovn_const.LB_VIP_PORT_PREFIX,
|
||||
self.loadbalancer_id))]
|
||||
net_cli.assert_has_calls(expected_call)
|
||||
del_port.assert_called_once_with(self.loadbalancer_id)
|
||||
self.helper._update_status_to_octavia.assert_not_called()
|
||||
|
||||
# Same but find_port returns None
|
||||
net_cli.return_value.create_port.side_effect = [
|
||||
openstack.exceptions.HttpException]
|
||||
net_cli.return_value.find_port.return_value = None
|
||||
net_cli.reset_mock()
|
||||
del_port.reset_mock()
|
||||
self.assertRaises(
|
||||
openstack.exceptions.HttpException,
|
||||
self.helper.create_vip_port,
|
||||
self.project_id,
|
||||
self.loadbalancer_id,
|
||||
self.vip_dict)
|
||||
expected_call = [
|
||||
mock.call().find_port(
|
||||
network_id='%s' % self.vip_dict['vip_network_id'],
|
||||
name_or_id='%s%s' % (ovn_const.LB_VIP_PORT_PREFIX,
|
||||
self.loadbalancer_id))]
|
||||
net_cli.assert_has_calls(expected_call)
|
||||
del_port.assert_not_called()
|
||||
self.helper._update_status_to_octavia.assert_not_called()
|
||||
|
||||
def test_get_pool_member_id(self):
|
||||
ret = self.helper.get_pool_member_id(
|
||||
self.pool_id, mem_addr_port='192.168.2.149:1010')
|
||||
@ -3642,7 +3660,7 @@ class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase):
|
||||
self.ovn_hm_lb.protocol = [protocol]
|
||||
folbpi.return_value = (pool_key, self.ovn_hm_lb)
|
||||
uhm.return_value = True
|
||||
net_cli.return_value.show_subnet.return_value = {'subnet': fake_subnet}
|
||||
net_cli.return_value.get_subnet.return_value = {'subnet': fake_subnet}
|
||||
if not fip:
|
||||
del self.ovn_hm_lb.external_ids[ovn_const.LB_EXT_IDS_VIP_FIP_KEY]
|
||||
status = self.helper.hm_create(self.health_monitor)
|
||||
@ -3795,7 +3813,8 @@ class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase):
|
||||
pool_key = 'pool_%s' % self.pool_id
|
||||
self.ovn_hm_lb.external_ids[pool_key] = self.member_line
|
||||
folbpi.return_value = (pool_key, self.ovn_hm_lb)
|
||||
net_cli.return_value.show_subnet.side_effect = [n_exc.NotFound]
|
||||
net_cli.return_value.get_subnet.side_effect = [
|
||||
openstack.exceptions.ResourceNotFound]
|
||||
status = self.helper.hm_create(self.health_monitor)
|
||||
self.assertEqual(status['healthmonitors'][0]['provisioning_status'],
|
||||
constants.ACTIVE)
|
||||
@ -3815,15 +3834,14 @@ class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase):
|
||||
'pool_id': self.pool_id,
|
||||
'admin_state_up': True,
|
||||
'old_admin_state_up': True}
|
||||
member_line = (
|
||||
'member_%s_%s:%s_%s' %
|
||||
(member['id'], member['address'],
|
||||
member['protocol_port'], member['subnet_id']))
|
||||
member_line = ('member_%s_%s:%s_%s' %
|
||||
(member['id'], member['address'],
|
||||
member['protocol_port'], member['subnet_id']))
|
||||
pool_key = 'pool_%s' % self.pool_id
|
||||
self.ovn_hm_lb.external_ids[pool_key] = member_line
|
||||
folbpi.return_value = (pool_key, self.ovn_hm_lb)
|
||||
net_cli.return_value.show_subnet.return_value = {'subnet': fake_subnet}
|
||||
net_cli.return_value.list_ports.return_value = {'ports': []}
|
||||
net_cli.return_value.get_subnet.return_value = fake_subnet
|
||||
net_cli.return_value.ports.return_value = iter(())
|
||||
fake_lsp = fakes.FakeOVNPort.from_neutron_port(fake_port)
|
||||
fake_ls = fakes.FakeOvsdbRow.create_one_ovsdb_row(
|
||||
attrs={
|
||||
@ -3856,7 +3874,7 @@ class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase):
|
||||
pool_key = 'pool_%s' % self.pool_id
|
||||
self.ovn_hm_lb.external_ids[pool_key] = member_line
|
||||
folbpi.return_value = (pool_key, self.ovn_hm_lb)
|
||||
net_cli.return_value.show_subnet.return_value = {'subnet': fake_subnet}
|
||||
net_cli.return_value.get_subnet.return_value = fake_subnet
|
||||
fake_lsp = fakes.FakeOVNPort.from_neutron_port(fake_port)
|
||||
fake_ls = fakes.FakeOvsdbRow.create_one_ovsdb_row(
|
||||
attrs={
|
||||
@ -4290,6 +4308,44 @@ class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase):
|
||||
status = self.helper.hm_update_event(info)
|
||||
return status
|
||||
|
||||
@mock.patch.object(ovn_helper.OvnProviderHelper, '_create_hm_port')
|
||||
@mock.patch('ovn_octavia_provider.common.clients.get_neutron_client')
|
||||
def test__ensure_hm_ovn_port(self, mock_get_neutron_client,
|
||||
mock_create_hm_port):
|
||||
|
||||
mock_neutron_client = mock_get_neutron_client.return_value
|
||||
mock_find_port = mock_neutron_client.find_port
|
||||
mock_find_port.return_value = Port(id='fake_id')
|
||||
|
||||
self.helper._ensure_hm_ovn_port('network_id', 'subnet_id',
|
||||
'project_id')
|
||||
|
||||
mock_find_port.assert_called_once_with(
|
||||
network_id='network_id',
|
||||
name_or_id='%s%s' % (ovn_const.LB_HM_PORT_PREFIX,
|
||||
'subnet_id'))
|
||||
mock_create_hm_port.assert_not_called()
|
||||
|
||||
@mock.patch.object(ovn_helper.OvnProviderHelper, '_create_hm_port')
|
||||
@mock.patch('ovn_octavia_provider.common.clients.get_neutron_client')
|
||||
def test__ensure_hm_ovn_port_create_port(self,
|
||||
mock_get_neutron_client,
|
||||
mock_create_hm_port):
|
||||
|
||||
mock_neutron_client = mock_get_neutron_client.return_value
|
||||
mock_find_port = mock_neutron_client.find_port
|
||||
mock_find_port.return_value = None
|
||||
|
||||
self.helper._ensure_hm_ovn_port('network_id', 'subnet_id',
|
||||
'project_id')
|
||||
|
||||
mock_find_port.assert_called_once_with(
|
||||
network_id='network_id',
|
||||
name_or_id='%s%s' % (ovn_const.LB_HM_PORT_PREFIX,
|
||||
'subnet_id'))
|
||||
mock_create_hm_port.assert_called_with('network_id', 'subnet_id',
|
||||
'project_id')
|
||||
|
||||
def _update_external_ids_member_status(self, lb, member_id, member_status):
|
||||
status = constants.ONLINE
|
||||
if member_status == 'offline':
|
||||
@ -4354,23 +4410,23 @@ class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase):
|
||||
|
||||
def test__create_hm_port(self):
|
||||
expected_dict = {
|
||||
'port': {'name': '%s%s' % (ovn_const.LB_HM_PORT_PREFIX,
|
||||
self.vip_dict['vip_subnet_id']),
|
||||
'network_id': self.vip_dict['vip_network_id'],
|
||||
'fixed_ips': [{'subnet_id':
|
||||
self.vip_dict['vip_subnet_id']}],
|
||||
'admin_state_up': True,
|
||||
'port_security_enabled': False,
|
||||
'device_owner': n_const.DEVICE_OWNER_DISTRIBUTED,
|
||||
'project_id': self.project_id
|
||||
}}
|
||||
'name': '%s%s' % (ovn_const.LB_HM_PORT_PREFIX,
|
||||
self.vip_dict['vip_subnet_id']),
|
||||
'network_id': self.vip_dict['vip_network_id'],
|
||||
'fixed_ips': [{'subnet_id':
|
||||
self.vip_dict['vip_subnet_id']}],
|
||||
'admin_state_up': True,
|
||||
'port_security_enabled': False,
|
||||
'device_owner': n_const.DEVICE_OWNER_DISTRIBUTED,
|
||||
'project_id': self.project_id
|
||||
}
|
||||
with mock.patch.object(clients, 'get_neutron_client') as net_cli:
|
||||
hm_port = self.helper._create_hm_port(
|
||||
self.vip_dict['vip_network_id'],
|
||||
self.vip_dict['vip_subnet_id'],
|
||||
self.project_id)
|
||||
expected_call = [
|
||||
mock.call().create_port(expected_dict)]
|
||||
mock.call().create_port(**expected_dict)]
|
||||
net_cli.assert_has_calls(expected_call)
|
||||
self.assertIsNotNone(hm_port)
|
||||
|
||||
@ -4378,29 +4434,28 @@ class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase):
|
||||
def test__create_hm_port_neutron_client_exception(
|
||||
self, net_cli):
|
||||
net_cli.return_value.create_port.side_effect = [
|
||||
n_exc.NeutronClientException]
|
||||
net_cli.return_value.list_ports.return_value = {
|
||||
'ports': []}
|
||||
openstack.exceptions.HttpException]
|
||||
net_cli.return_value.ports.return_value = []
|
||||
expected_dict = {
|
||||
'port': {'name': '%s%s' % (ovn_const.LB_HM_PORT_PREFIX,
|
||||
self.vip_dict['vip_subnet_id']),
|
||||
'network_id': self.vip_dict['vip_network_id'],
|
||||
'fixed_ips': [{'subnet_id':
|
||||
self.vip_dict['vip_subnet_id']}],
|
||||
'admin_state_up': True,
|
||||
'port_security_enabled': False,
|
||||
'device_owner': n_const.DEVICE_OWNER_DISTRIBUTED,
|
||||
'project_id': self.project_id
|
||||
}}
|
||||
'name': '%s%s' % (ovn_const.LB_HM_PORT_PREFIX,
|
||||
self.vip_dict['vip_subnet_id']),
|
||||
'network_id': self.vip_dict['vip_network_id'],
|
||||
'fixed_ips': [{'subnet_id':
|
||||
self.vip_dict['vip_subnet_id']}],
|
||||
'admin_state_up': True,
|
||||
'port_security_enabled': False,
|
||||
'device_owner': n_const.DEVICE_OWNER_DISTRIBUTED,
|
||||
'project_id': self.project_id
|
||||
}
|
||||
hm_port = self.helper._create_hm_port(
|
||||
self.vip_dict['vip_network_id'],
|
||||
self.vip_dict['vip_subnet_id'],
|
||||
self.project_id)
|
||||
expected_call = [
|
||||
mock.call(),
|
||||
mock.call().create_port(expected_dict),
|
||||
mock.call().create_port(**expected_dict),
|
||||
mock.call(),
|
||||
mock.call().list_ports(
|
||||
mock.call().ports(
|
||||
name='%s%s' % (ovn_const.LB_HM_PORT_PREFIX,
|
||||
self.vip_dict['vip_subnet_id']))]
|
||||
net_cli.assert_has_calls(expected_call)
|
||||
@ -4411,30 +4466,29 @@ class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase):
|
||||
def test__create_hm_port_neutron_client_exception_clean_up_hm_port(
|
||||
self, del_hm_port, net_cli):
|
||||
net_cli.return_value.create_port.side_effect = [
|
||||
n_exc.NeutronClientException]
|
||||
net_cli.return_value.list_ports.return_value = {
|
||||
'ports': [
|
||||
{'name': '%s%s' % (ovn_const.LB_HM_PORT_PREFIX,
|
||||
self.vip_dict['vip_subnet_id']),
|
||||
'id': 'fake_uuid'}]}
|
||||
openstack.exceptions.HttpException]
|
||||
net_cli.return_value.ports.return_value = [
|
||||
Port(name='%s%s' % (ovn_const.LB_HM_PORT_PREFIX,
|
||||
self.vip_dict['vip_subnet_id']),
|
||||
id='fake_uuid')]
|
||||
expected_dict = {
|
||||
'port': {'name': '%s%s' % (ovn_const.LB_HM_PORT_PREFIX,
|
||||
self.vip_dict['vip_subnet_id']),
|
||||
'network_id': self.vip_dict['vip_network_id'],
|
||||
'fixed_ips': [{
|
||||
'subnet_id': self.vip_dict['vip_subnet_id']}],
|
||||
'admin_state_up': True,
|
||||
'port_security_enabled': False,
|
||||
'device_owner': n_const.DEVICE_OWNER_DISTRIBUTED,
|
||||
'project_id': self.project_id
|
||||
}}
|
||||
'name': '%s%s' % (ovn_const.LB_HM_PORT_PREFIX,
|
||||
self.vip_dict['vip_subnet_id']),
|
||||
'network_id': self.vip_dict['vip_network_id'],
|
||||
'fixed_ips': [{
|
||||
'subnet_id': self.vip_dict['vip_subnet_id']}],
|
||||
'admin_state_up': True,
|
||||
'port_security_enabled': False,
|
||||
'device_owner': n_const.DEVICE_OWNER_DISTRIBUTED,
|
||||
'project_id': self.project_id
|
||||
}
|
||||
hm_port = self.helper._create_hm_port(
|
||||
self.vip_dict['vip_network_id'],
|
||||
self.vip_dict['vip_subnet_id'],
|
||||
self.project_id)
|
||||
expected_call = [
|
||||
mock.call(),
|
||||
mock.call().create_port(expected_dict)]
|
||||
mock.call().create_port(**expected_dict)]
|
||||
net_cli.assert_has_calls(expected_call)
|
||||
del_hm_port.assert_called_once_with(self.vip_dict['vip_subnet_id'])
|
||||
self.assertIsNone(hm_port)
|
||||
@ -4442,19 +4496,18 @@ class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase):
|
||||
@mock.patch('ovn_octavia_provider.common.clients.get_neutron_client')
|
||||
@mock.patch.object(ovn_helper.OvnProviderHelper, 'delete_port')
|
||||
def test__clean_up_hm_port(self, del_port, net_cli):
|
||||
net_cli.return_value.list_ports.return_value = {
|
||||
'ports': [
|
||||
{'name': '%s%s' % (ovn_const.LB_HM_PORT_PREFIX,
|
||||
self.vip_dict['vip_subnet_id']),
|
||||
'id': 'fake_uuid',
|
||||
'fixed_ips': [{'subnet_id': 'another_subnet_id',
|
||||
'ip_address': '10.1.2.3'},
|
||||
{'subnet_id': self.vip_dict['vip_subnet_id'],
|
||||
'ip_address': '10.0.0.3'}]}]}
|
||||
net_cli.return_value.ports.return_value = [
|
||||
Port(name='%s%s' % (ovn_const.LB_HM_PORT_PREFIX,
|
||||
self.vip_dict['vip_subnet_id']),
|
||||
id='fake_uuid',
|
||||
fixed_ips=[{'subnet_id': 'another_subnet_id',
|
||||
'ip_address': '10.1.2.3'},
|
||||
{'subnet_id': self.vip_dict['vip_subnet_id'],
|
||||
'ip_address': '10.0.0.3'}])]
|
||||
self.helper._clean_up_hm_port(self.vip_dict['vip_subnet_id'])
|
||||
expected_call = [
|
||||
mock.call(),
|
||||
mock.call().list_ports(
|
||||
mock.call().ports(
|
||||
name='%s%s' % (ovn_const.LB_HM_PORT_PREFIX,
|
||||
self.vip_dict['vip_subnet_id']))]
|
||||
net_cli.assert_has_calls(expected_call)
|
||||
@ -4463,15 +4516,14 @@ class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase):
|
||||
@mock.patch('ovn_octavia_provider.common.clients.get_neutron_client')
|
||||
@mock.patch.object(ovn_helper.OvnProviderHelper, 'delete_port')
|
||||
def test__clean_up_hm_port_in_use(self, del_port, net_cli):
|
||||
net_cli.return_value.list_ports.return_value = {
|
||||
'ports': [
|
||||
{'name': '%s%s' % (ovn_const.LB_HM_PORT_PREFIX,
|
||||
self.vip_dict['vip_subnet_id']),
|
||||
'id': 'fake_uuid',
|
||||
'fixed_ips': [{'subnet_id': 'another_subnet_id',
|
||||
'ip_address': '10.1.2.3'},
|
||||
{'subnet_id': self.vip_dict['vip_subnet_id'],
|
||||
'ip_address': '10.0.0.3'}]}]}
|
||||
net_cli.return_value.ports.return_value = [
|
||||
Port(name='%s%s' % (ovn_const.LB_HM_PORT_PREFIX,
|
||||
self.vip_dict['vip_subnet_id']),
|
||||
id='fake_uuid',
|
||||
fixed_ips=[{'subnet_id': 'another_subnet_id',
|
||||
'ip_address': '10.1.2.3'},
|
||||
{'subnet_id': self.vip_dict['vip_subnet_id'],
|
||||
'ip_address': '10.0.0.3'}])]
|
||||
fake_lb_unrelated = fakes.FakeOvsdbRow.create_one_ovsdb_row(
|
||||
attrs={
|
||||
'ip_port_mappings': {'10.1.2.4': 'fake_member_lgp:10.1.2.3'}})
|
||||
@ -4484,7 +4536,7 @@ class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase):
|
||||
self.helper._clean_up_hm_port(self.vip_dict['vip_subnet_id'])
|
||||
expected_call = [
|
||||
mock.call(),
|
||||
mock.call().list_ports(
|
||||
mock.call().ports(
|
||||
name='%s%s' % (ovn_const.LB_HM_PORT_PREFIX,
|
||||
self.vip_dict['vip_subnet_id']))]
|
||||
net_cli.assert_has_calls(expected_call)
|
||||
@ -4493,12 +4545,11 @@ class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase):
|
||||
@mock.patch('ovn_octavia_provider.common.clients.get_neutron_client')
|
||||
@mock.patch.object(ovn_helper.OvnProviderHelper, 'delete_port')
|
||||
def test__clean_up_hm_port_not_found(self, del_port, net_cli):
|
||||
net_cli.return_value.list_ports.return_value = {
|
||||
'ports': []}
|
||||
net_cli.return_value.ports.return_value = []
|
||||
self.helper._clean_up_hm_port(self.vip_dict['vip_subnet_id'])
|
||||
expected_call = [
|
||||
mock.call(),
|
||||
mock.call().list_ports(
|
||||
mock.call().ports(
|
||||
name='%s%s' % (ovn_const.LB_HM_PORT_PREFIX,
|
||||
self.vip_dict['vip_subnet_id']))]
|
||||
net_cli.assert_has_calls(expected_call)
|
||||
|
@ -0,0 +1,26 @@
|
||||
---
|
||||
upgrade:
|
||||
- |
|
||||
Authentication settings for Neutron should be added
|
||||
directly to the [neutron] section of the configuration now. The exact
|
||||
settings depend on the `auth_type` used. Refer to
|
||||
https://docs.openstack.org/keystoneauth/latest/plugin-options.html
|
||||
for a list of possible options.
|
||||
deprecations:
|
||||
- |
|
||||
As part of the effort to replace the deprecated
|
||||
`python-neutronclient` package in Octavia the following options in the
|
||||
[neutron] section of the Octavia configuration
|
||||
file have been marked as deprecated for removal:
|
||||
`endpoint` is replaced by the `endpoint_override` option,
|
||||
`endpoint_type` is replaced by the `valid_interfaces` option,
|
||||
and `ca_certificates_file` is replaced by the `cafile` option.
|
||||
In a future release `ovn-octavia-provider` will no
|
||||
longer take the authentication
|
||||
settings from the [service_auth] section as a fallback. It will
|
||||
require them to be in the [neutron] section.
|
||||
other:
|
||||
- |
|
||||
Replaced code that uses the deprecated `python-neutronclient` library with
|
||||
code that uses `openstacksdk` and removed `python-neutronclient` as a
|
||||
dependency.
|
@ -9,6 +9,7 @@
|
||||
keystoneauth1>=3.14.0 # Apache-2.0
|
||||
netaddr>=0.7.18 # BSD
|
||||
neutron-lib>=2.16.0 # Apache-2.0
|
||||
openstacksdk>=0.103.0 # Apache-2.0
|
||||
oslo.config>=8.0.0 # Apache-2.0
|
||||
oslo.log>=4.3.0 # Apache-2.0
|
||||
oslo.messaging>=12.4.0 # Apache-2.0
|
||||
@ -20,4 +21,3 @@ pbr>=4.0.0 # Apache-2.0
|
||||
SQLAlchemy>=1.4.23 # MIT
|
||||
tenacity>=6.0.0 # Apache-2.0
|
||||
octavia-lib>=2.2.0 # Apache-2.0
|
||||
python-neutronclient>=6.7.0 # Apache-2.0
|
||||
|
2
tox.ini
2
tox.ini
@ -72,9 +72,9 @@ setenv =
|
||||
commands =
|
||||
stestr run --no-subunit-trace {posargs}
|
||||
coverage combine
|
||||
coverage report --fail-under=92 --skip-covered
|
||||
coverage html -d cover
|
||||
coverage xml -o cover/coverage.xml
|
||||
coverage report --fail-under=92 --skip-covered
|
||||
|
||||
[testenv:docs]
|
||||
envdir = {toxworkdir}/docs
|
||||
|
Loading…
x
Reference in New Issue
Block a user