Add option to tag Octavia resources created by us
This patch extends If75028e17d13ec62fb414fa9797ee7ac02d948d1 with Octavia resources tagging. Change-Id: I0a2f89007994fbd7696b0f487affc1b7b643be74 Implements: blueprint kuryr-resources-tagging
This commit is contained in:
parent
21a0a2673c
commit
bee4997f4e
@ -46,17 +46,44 @@ _L7_POLICY_ACT_REDIRECT_TO_POOL = 'REDIRECT_TO_POOL'
|
||||
# 'slow' (will be used for LB creation) polling.
|
||||
_LB_STS_POLL_FAST_INTERVAL = 1
|
||||
_LB_STS_POLL_SLOW_INTERVAL = 3
|
||||
_OCTAVIA_TAGGING_VERSION = 2, 5
|
||||
|
||||
|
||||
class LBaaSv2Driver(base.LBaaSDriver):
|
||||
"""LBaaSv2Driver implements LBaaSDriver for Neutron LBaaSv2 API."""
|
||||
|
||||
def __init__(self):
|
||||
super(LBaaSv2Driver, self).__init__()
|
||||
|
||||
self._octavia_tags = False
|
||||
# Check if Octavia API supports tagging.
|
||||
lbaas = clients.get_loadbalancer_client()
|
||||
v = lbaas.get_api_major_version()
|
||||
if v >= _OCTAVIA_TAGGING_VERSION:
|
||||
LOG.info('Octavia supports resource tags.')
|
||||
self._octavia_tags = True
|
||||
else:
|
||||
LOG.warning('[neutron_defaults]resource_tags is set, but Octavia '
|
||||
'API %d.%d does not support resource tagging. Kuryr'
|
||||
'will put requested tags in the description field of '
|
||||
'Octavia resources.', *v)
|
||||
|
||||
def get_service_loadbalancer_name(self, namespace, svc_name):
|
||||
return "%s/%s" % (namespace, svc_name)
|
||||
|
||||
def get_loadbalancer_pool_name(self, loadbalancer, namespace, svc_name):
|
||||
return "%s/%s/%s" % (loadbalancer.name, namespace, svc_name)
|
||||
|
||||
def _add_tags(self, resource, req):
|
||||
if CONF.neutron_defaults.resource_tags:
|
||||
if self._octavia_tags:
|
||||
req['tags'] = CONF.neutron_defaults.resource_tags
|
||||
else:
|
||||
if resource in ('loadbalancer', 'listener', 'pool',
|
||||
'l7policy'):
|
||||
req['description'] = ','.join(
|
||||
CONF.neutron_defaults.resource_tags)
|
||||
|
||||
def ensure_loadbalancer(self, name, project_id, subnet_id, ip,
|
||||
security_groups_ids=None, service_type=None,
|
||||
provider=None):
|
||||
@ -484,9 +511,25 @@ class LBaaSv2Driver(base.LBaaSDriver):
|
||||
|
||||
return None
|
||||
|
||||
def _create_loadbalancer(self, loadbalancer):
|
||||
def _post_lb_resource(self, path, resource, request):
|
||||
# FIXME(dulek): openstacksdk doesn't support Octavia tags until version
|
||||
# 0.24.0 (Stein+). At the moment our dependency is
|
||||
# >=0.13.0, because we want Kuryr to support multiple
|
||||
# OpenStack versions also in terms of dependencies (think
|
||||
# building container images from various distros or
|
||||
# running Kuryr on older OS-es). Once 0.24.0 is fairly
|
||||
# stable and available, we can raise the requirement and
|
||||
# use lbaas.create_*() directly. Until then we manually
|
||||
# send POST request.
|
||||
lbaas = clients.get_loadbalancer_client()
|
||||
response = lbaas.post(path, json={resource: request})
|
||||
if not response.ok:
|
||||
LOG.error('Error when creating %s: %s', resource, response.text)
|
||||
response.raise_for_status()
|
||||
response = response.json()[resource]
|
||||
return response
|
||||
|
||||
def _create_loadbalancer(self, loadbalancer):
|
||||
request = {
|
||||
'name': loadbalancer.name,
|
||||
'project_id': loadbalancer.project_id,
|
||||
@ -497,7 +540,11 @@ class LBaaSv2Driver(base.LBaaSDriver):
|
||||
if loadbalancer.provider is not None:
|
||||
request['provider'] = loadbalancer.provider
|
||||
|
||||
response = lbaas.create_load_balancer(**request)
|
||||
self._add_tags('loadbalancer', request)
|
||||
|
||||
response = self._post_lb_resource('loadbalancers', 'loadbalancer',
|
||||
request)
|
||||
|
||||
loadbalancer.id = response['id']
|
||||
loadbalancer.port_id = self._get_vip_port(loadbalancer).get("id")
|
||||
if (loadbalancer.provider is not None and
|
||||
@ -531,13 +578,15 @@ class LBaaSv2Driver(base.LBaaSDriver):
|
||||
return loadbalancer
|
||||
|
||||
def _create_listener(self, listener):
|
||||
lbaas = clients.get_loadbalancer_client()
|
||||
response = lbaas.create_listener(
|
||||
name=listener.name,
|
||||
project_id=listener.project_id,
|
||||
load_balancer_id=listener.loadbalancer_id,
|
||||
protocol=listener.protocol,
|
||||
protocol_port=listener.port)
|
||||
request = {
|
||||
'name': listener.name,
|
||||
'project_id': listener.project_id,
|
||||
'loadbalancer_id': listener.loadbalancer_id,
|
||||
'protocol': listener.protocol,
|
||||
'protocol_port': listener.port,
|
||||
}
|
||||
self._add_tags('listener', request)
|
||||
response = self._post_lb_resource('listeners', 'listener', request)
|
||||
listener.id = response['id']
|
||||
return listener
|
||||
|
||||
@ -561,14 +610,16 @@ class LBaaSv2Driver(base.LBaaSDriver):
|
||||
def _create_pool(self, pool):
|
||||
# TODO(ivc): make lb_algorithm configurable
|
||||
lb_algorithm = 'ROUND_ROBIN'
|
||||
lbaas = clients.get_loadbalancer_client()
|
||||
response = lbaas.create_pool(
|
||||
name=pool.name,
|
||||
project_id=pool.project_id,
|
||||
listener_id=pool.listener_id,
|
||||
loadbalancer_id=pool.loadbalancer_id,
|
||||
protocol=pool.protocol,
|
||||
lb_algorithm=lb_algorithm)
|
||||
request = {
|
||||
'name': pool.name,
|
||||
'project_id': pool.project_id,
|
||||
'listener_id': pool.listener_id,
|
||||
'loadbalancer_id': pool.loadbalancer_id,
|
||||
'protocol': pool.protocol,
|
||||
'lb_algorithm': lb_algorithm,
|
||||
}
|
||||
self._add_tags('pool', request)
|
||||
response = self._post_lb_resource('pools', 'pool', request)
|
||||
pool.id = response['id']
|
||||
return pool
|
||||
|
||||
@ -596,14 +647,16 @@ class LBaaSv2Driver(base.LBaaSDriver):
|
||||
return self._find_pool(pool, by_listener=False)
|
||||
|
||||
def _create_member(self, member):
|
||||
lbaas = clients.get_loadbalancer_client()
|
||||
response = lbaas.create_member(
|
||||
member.pool_id,
|
||||
name=member.name,
|
||||
project_id=member.project_id,
|
||||
subnet_id=member.subnet_id,
|
||||
address=str(member.ip),
|
||||
protocol_port=member.port)
|
||||
request = {
|
||||
'name': member.name,
|
||||
'project_id': member.project_id,
|
||||
'subnet_id': member.subnet_id,
|
||||
'address': str(member.ip),
|
||||
'protocol_port': member.port,
|
||||
}
|
||||
self._add_tags('member', request)
|
||||
response = self._post_lb_resource('pools/%s/members' % member.pool_id,
|
||||
'member', request)
|
||||
member.id = response['id']
|
||||
return member
|
||||
|
||||
@ -792,13 +845,15 @@ class LBaaSv2Driver(base.LBaaSDriver):
|
||||
l7_policy.id)
|
||||
|
||||
def _create_l7_policy(self, l7_policy):
|
||||
lbaas = clients.get_loadbalancer_client()
|
||||
response = lbaas.create_l7_policy(
|
||||
action=_L7_POLICY_ACT_REDIRECT_TO_POOL,
|
||||
listener_id=l7_policy.listener_id,
|
||||
name=l7_policy.name,
|
||||
project_id=l7_policy.project_id,
|
||||
redirect_pool_id=l7_policy.redirect_pool_id)
|
||||
request = {
|
||||
'action': _L7_POLICY_ACT_REDIRECT_TO_POOL,
|
||||
'listener_id': l7_policy.listener_id,
|
||||
'name': l7_policy.name,
|
||||
'project_id': l7_policy.project_id,
|
||||
'redirect_pool_id': l7_policy.redirect_pool_id,
|
||||
}
|
||||
self._add_tags('l7policy', request)
|
||||
response = self._post_lb_resource('l7policies', 'l7policy', request)
|
||||
l7_policy.id = response['id']
|
||||
return l7_policy
|
||||
|
||||
@ -826,12 +881,14 @@ class LBaaSv2Driver(base.LBaaSDriver):
|
||||
self._find_l7_rule)
|
||||
|
||||
def _create_l7_rule(self, l7_rule):
|
||||
lbaas = clients.get_loadbalancer_client()
|
||||
response = lbaas.create_l7_rule(
|
||||
l7_rule.l7policy_id,
|
||||
compare_type=l7_rule.compare_type,
|
||||
type=l7_rule.type,
|
||||
value=l7_rule.value)
|
||||
request = {
|
||||
'compare_type': l7_rule.compare_type,
|
||||
'type': l7_rule.type,
|
||||
'value': l7_rule.value
|
||||
}
|
||||
self._add_tags('rule', request)
|
||||
response = self._post_lb_resource(
|
||||
'l7policies/%s/rules' % l7_rule.l7policy_id, 'rule', request)
|
||||
l7_rule.id = response['id']
|
||||
return l7_rule
|
||||
|
||||
|
@ -17,6 +17,7 @@ import mock
|
||||
|
||||
from neutronclient.common import exceptions as n_exc
|
||||
from openstack import exceptions as o_exc
|
||||
from oslo_config import cfg
|
||||
|
||||
from kuryr_kubernetes.controller.drivers import lbaasv2 as d_lbaasv2
|
||||
from kuryr_kubernetes import exceptions as k_exc
|
||||
@ -24,8 +25,55 @@ from kuryr_kubernetes.objects import lbaas as obj_lbaas
|
||||
from kuryr_kubernetes.tests import base as test_base
|
||||
from kuryr_kubernetes.tests.unit import kuryr_fixtures as k_fix
|
||||
|
||||
CONF = cfg.CONF
|
||||
|
||||
|
||||
class TestLBaaSv2Driver(test_base.TestCase):
|
||||
def test_add_tags(self):
|
||||
CONF.set_override('resource_tags', ['foo'], group='neutron_defaults')
|
||||
self.addCleanup(CONF.clear_override, 'resource_tags',
|
||||
group='neutron_defaults')
|
||||
lbaas = self.useFixture(k_fix.MockLBaaSClient()).client
|
||||
lbaas.get_api_major_version.return_value = (2, 5)
|
||||
d = d_lbaasv2.LBaaSv2Driver()
|
||||
req = {}
|
||||
d._add_tags('loadbalancer', req)
|
||||
self.assertEqual({'tags': ['foo']}, req)
|
||||
|
||||
def test_add_tags_no_tag(self):
|
||||
lbaas = self.useFixture(k_fix.MockLBaaSClient()).client
|
||||
lbaas.get_api_major_version.return_value = (2, 5)
|
||||
d = d_lbaasv2.LBaaSv2Driver()
|
||||
req = {}
|
||||
d._add_tags('loadbalancer', req)
|
||||
self.assertEqual({}, req)
|
||||
|
||||
def test_add_tags_no_support(self):
|
||||
CONF.set_override('resource_tags', ['foo'], group='neutron_defaults')
|
||||
self.addCleanup(CONF.clear_override, 'resource_tags',
|
||||
group='neutron_defaults')
|
||||
lbaas = self.useFixture(k_fix.MockLBaaSClient()).client
|
||||
lbaas.get_api_major_version.return_value = (2, 4)
|
||||
d = d_lbaasv2.LBaaSv2Driver()
|
||||
for res in ('loadbalancer', 'listener', 'pool', 'l7policy'):
|
||||
req = {}
|
||||
d._add_tags(res, req)
|
||||
self.assertEqual({'description': 'foo'}, req,
|
||||
'No description added to resource %s' % res)
|
||||
|
||||
def test_add_tags_no_support_resource_no_description(self):
|
||||
CONF.set_override('resource_tags', ['foo'], group='neutron_defaults')
|
||||
self.addCleanup(CONF.clear_override, 'resource_tags',
|
||||
group='neutron_defaults')
|
||||
lbaas = self.useFixture(k_fix.MockLBaaSClient()).client
|
||||
lbaas.get_api_major_version.return_value = (2, 4)
|
||||
d = d_lbaasv2.LBaaSv2Driver()
|
||||
for res in ('member', 'rule'):
|
||||
req = {}
|
||||
d._add_tags(res, req)
|
||||
self.assertEqual({}, req, 'Unnecessary description added to '
|
||||
'resource %s' % res)
|
||||
|
||||
def test_ensure_loadbalancer(self):
|
||||
neutron = self.useFixture(k_fix.MockNeutronClient()).client
|
||||
cls = d_lbaasv2.LBaaSv2Driver
|
||||
@ -240,7 +288,6 @@ class TestLBaaSv2Driver(test_base.TestCase):
|
||||
member.id, member.pool_id)
|
||||
|
||||
def test_create_loadbalancer(self):
|
||||
lbaas = self.useFixture(k_fix.MockLBaaSClient()).client
|
||||
cls = d_lbaasv2.LBaaSv2Driver
|
||||
m_driver = mock.Mock(spec=d_lbaasv2.LBaaSv2Driver)
|
||||
loadbalancer = obj_lbaas.LBaaSLoadBalancer(
|
||||
@ -255,18 +302,18 @@ class TestLBaaSv2Driver(test_base.TestCase):
|
||||
'vip_subnet_id': loadbalancer.subnet_id,
|
||||
}
|
||||
resp = {'id': loadbalancer_id, 'provider': 'haproxy'}
|
||||
lbaas.create_load_balancer.return_value = resp
|
||||
m_driver._post_lb_resource.return_value = resp
|
||||
m_driver._get_vip_port.return_value = {'id': mock.sentinel.port_id}
|
||||
|
||||
ret = cls._create_loadbalancer(m_driver, loadbalancer)
|
||||
lbaas.create_load_balancer.assert_called_once_with(**req)
|
||||
m_driver._post_lb_resource.assert_called_once_with('loadbalancers',
|
||||
'loadbalancer', req)
|
||||
for attr in loadbalancer.obj_fields:
|
||||
self.assertEqual(getattr(loadbalancer, attr),
|
||||
getattr(ret, attr))
|
||||
self.assertEqual(loadbalancer_id, ret.id)
|
||||
|
||||
def test_create_loadbalancer_provider_defined(self):
|
||||
lbaas = self.useFixture(k_fix.MockLBaaSClient()).client
|
||||
cls = d_lbaasv2.LBaaSv2Driver
|
||||
m_driver = mock.Mock(spec=d_lbaasv2.LBaaSv2Driver)
|
||||
loadbalancer = obj_lbaas.LBaaSLoadBalancer(
|
||||
@ -283,18 +330,18 @@ class TestLBaaSv2Driver(test_base.TestCase):
|
||||
'provider': loadbalancer.provider,
|
||||
}
|
||||
resp = {'id': loadbalancer_id, 'provider': 'amphora'}
|
||||
lbaas.create_load_balancer.return_value = resp
|
||||
m_driver._post_lb_resource.return_value = resp
|
||||
m_driver._get_vip_port.return_value = {'id': mock.sentinel.port_id}
|
||||
|
||||
ret = cls._create_loadbalancer(m_driver, loadbalancer)
|
||||
lbaas.create_load_balancer.assert_called_once_with(**req)
|
||||
m_driver._post_lb_resource.assert_called_once_with('loadbalancers',
|
||||
'loadbalancer', req)
|
||||
for attr in loadbalancer.obj_fields:
|
||||
self.assertEqual(getattr(loadbalancer, attr),
|
||||
getattr(ret, attr))
|
||||
self.assertEqual(loadbalancer_id, ret.id)
|
||||
|
||||
def test_create_loadbalancer_provider_mismatch(self):
|
||||
lbaas = self.useFixture(k_fix.MockLBaaSClient()).client
|
||||
cls = d_lbaasv2.LBaaSv2Driver
|
||||
m_driver = mock.Mock(spec=d_lbaasv2.LBaaSv2Driver)
|
||||
loadbalancer = obj_lbaas.LBaaSLoadBalancer(
|
||||
@ -311,11 +358,12 @@ class TestLBaaSv2Driver(test_base.TestCase):
|
||||
'provider': loadbalancer.provider,
|
||||
}
|
||||
resp = {'id': loadbalancer_id, 'provider': 'haproxy'}
|
||||
lbaas.create_load_balancer.return_value = resp
|
||||
m_driver._post_lb_resource.return_value = resp
|
||||
m_driver._get_vip_port.return_value = {'id': mock.sentinel.port_id}
|
||||
|
||||
ret = cls._create_loadbalancer(m_driver, loadbalancer)
|
||||
lbaas.create_load_balancer.assert_called_once_with(**req)
|
||||
m_driver._post_lb_resource.assert_called_once_with('loadbalancers',
|
||||
'loadbalancer', req)
|
||||
self.assertIsNone(ret)
|
||||
|
||||
def test_find_loadbalancer(self):
|
||||
@ -388,7 +436,6 @@ class TestLBaaSv2Driver(test_base.TestCase):
|
||||
m_driver.release_loadbalancer.assert_called_once()
|
||||
|
||||
def test_create_listener(self):
|
||||
lbaas = self.useFixture(k_fix.MockLBaaSClient()).client
|
||||
cls = d_lbaasv2.LBaaSv2Driver
|
||||
m_driver = mock.Mock(spec=d_lbaasv2.LBaaSv2Driver)
|
||||
listener = obj_lbaas.LBaaSListener(
|
||||
@ -398,14 +445,15 @@ class TestLBaaSv2Driver(test_base.TestCase):
|
||||
req = {
|
||||
'name': listener.name,
|
||||
'project_id': listener.project_id,
|
||||
'load_balancer_id': listener.loadbalancer_id,
|
||||
'loadbalancer_id': listener.loadbalancer_id,
|
||||
'protocol': listener.protocol,
|
||||
'protocol_port': listener.port}
|
||||
resp = {'id': listener_id}
|
||||
lbaas.create_listener.return_value = resp
|
||||
m_driver._post_lb_resource.return_value = resp
|
||||
|
||||
ret = cls._create_listener(m_driver, listener)
|
||||
lbaas.create_listener.assert_called_once_with(**req)
|
||||
m_driver._post_lb_resource.assert_called_once_with('listeners',
|
||||
'listener', req)
|
||||
for attr in listener.obj_fields:
|
||||
self.assertEqual(getattr(listener, attr),
|
||||
getattr(ret, attr))
|
||||
@ -453,7 +501,6 @@ class TestLBaaSv2Driver(test_base.TestCase):
|
||||
self.assertIsNone(ret)
|
||||
|
||||
def test_create_pool(self):
|
||||
lbaas = self.useFixture(k_fix.MockLBaaSClient()).client
|
||||
cls = d_lbaasv2.LBaaSv2Driver
|
||||
m_driver = mock.Mock(spec=d_lbaasv2.LBaaSv2Driver)
|
||||
lb_algorithm = 'ROUND_ROBIN'
|
||||
@ -470,17 +517,17 @@ class TestLBaaSv2Driver(test_base.TestCase):
|
||||
'protocol': pool.protocol,
|
||||
'lb_algorithm': lb_algorithm}
|
||||
resp = {'id': pool_id}
|
||||
lbaas.create_pool.return_value = resp
|
||||
m_driver._post_lb_resource.return_value = resp
|
||||
|
||||
ret = cls._create_pool(m_driver, pool)
|
||||
lbaas.create_pool.assert_called_once_with(**req)
|
||||
m_driver._post_lb_resource.assert_called_once_with('pools', 'pool',
|
||||
req)
|
||||
for attr in pool.obj_fields:
|
||||
self.assertEqual(getattr(pool, attr),
|
||||
getattr(ret, attr))
|
||||
self.assertEqual(pool_id, ret.id)
|
||||
|
||||
def test_create_pool_conflict(self):
|
||||
lbaas = self.useFixture(k_fix.MockLBaaSClient()).client
|
||||
cls = d_lbaasv2.LBaaSv2Driver
|
||||
m_driver = mock.Mock(spec=d_lbaasv2.LBaaSv2Driver)
|
||||
lb_algorithm = 'ROUND_ROBIN'
|
||||
@ -495,11 +542,12 @@ class TestLBaaSv2Driver(test_base.TestCase):
|
||||
'loadbalancer_id': pool.loadbalancer_id,
|
||||
'protocol': pool.protocol,
|
||||
'lb_algorithm': lb_algorithm}
|
||||
lbaas.create_pool.side_effect = n_exc.StateInvalidClient
|
||||
m_driver._post_lb_resource.side_effect = n_exc.StateInvalidClient
|
||||
|
||||
self.assertRaises(n_exc.StateInvalidClient, cls._create_pool, m_driver,
|
||||
pool)
|
||||
lbaas.create_pool.assert_called_once_with(**req)
|
||||
m_driver._post_lb_resource.assert_called_once_with('pools', 'pool',
|
||||
req)
|
||||
|
||||
def test_find_pool_by_listener(self):
|
||||
lbaas = self.useFixture(k_fix.MockLBaaSClient()).client
|
||||
@ -545,7 +593,6 @@ class TestLBaaSv2Driver(test_base.TestCase):
|
||||
self.assertIsNone(ret)
|
||||
|
||||
def test_create_member(self):
|
||||
lbaas = self.useFixture(k_fix.MockLBaaSClient()).client
|
||||
cls = d_lbaasv2.LBaaSv2Driver
|
||||
m_driver = mock.Mock(spec=d_lbaasv2.LBaaSv2Driver)
|
||||
member = obj_lbaas.LBaaSMember(
|
||||
@ -560,11 +607,11 @@ class TestLBaaSv2Driver(test_base.TestCase):
|
||||
'address': str(member.ip),
|
||||
'protocol_port': member.port}
|
||||
resp = {'id': member_id}
|
||||
lbaas.create_member.return_value = resp
|
||||
m_driver._post_lb_resource.return_value = resp
|
||||
|
||||
ret = cls._create_member(m_driver, member)
|
||||
lbaas.create_member.assert_called_once_with(
|
||||
member.pool_id, **req)
|
||||
m_driver._post_lb_resource.assert_called_once_with(
|
||||
'pools/%s/members' % member.pool_id, 'member', req)
|
||||
for attr in member.obj_fields:
|
||||
self.assertEqual(getattr(member, attr),
|
||||
getattr(ret, attr))
|
||||
@ -969,7 +1016,6 @@ class TestLBaaSv2Driver(test_base.TestCase):
|
||||
l7_policy.id)
|
||||
|
||||
def test_create_l7policy(self):
|
||||
lbaas = self.useFixture(k_fix.MockLBaaSClient()).client
|
||||
cls = d_lbaasv2.LBaaSv2Driver
|
||||
m_driver = mock.Mock(spec=d_lbaasv2.LBaaSv2Driver)
|
||||
|
||||
@ -987,10 +1033,11 @@ class TestLBaaSv2Driver(test_base.TestCase):
|
||||
'project_id': l7_policy.project_id,
|
||||
'redirect_pool_id': l7_policy.redirect_pool_id}
|
||||
resp = {'id': l7policy_id}
|
||||
lbaas.create_l7_policy.return_value = resp
|
||||
m_driver._post_lb_resource.return_value = resp
|
||||
|
||||
ret = cls._create_l7_policy(m_driver, l7_policy)
|
||||
lbaas.create_l7_policy.assert_called_once_with(**req)
|
||||
m_driver._post_lb_resource.assert_called_once_with('l7policies',
|
||||
'l7policy', req)
|
||||
for attr in l7_policy.obj_fields:
|
||||
self.assertEqual(getattr(l7_policy, attr),
|
||||
getattr(ret, attr))
|
||||
@ -1086,7 +1133,6 @@ class TestLBaaSv2Driver(test_base.TestCase):
|
||||
l7_rule.id, l7_rule.l7policy_id)
|
||||
|
||||
def test_create_l7_rule(self):
|
||||
lbaas = self.useFixture(k_fix.MockLBaaSClient()).client
|
||||
cls = d_lbaasv2.LBaaSv2Driver
|
||||
m_driver = mock.Mock(spec=d_lbaasv2.LBaaSv2Driver)
|
||||
|
||||
@ -1103,11 +1149,11 @@ class TestLBaaSv2Driver(test_base.TestCase):
|
||||
'value': l7_rule.value}
|
||||
|
||||
resp = {'id': l7_rule_id}
|
||||
lbaas.create_l7_rule.return_value = resp
|
||||
m_driver._post_lb_resource.return_value = resp
|
||||
|
||||
ret = cls._create_l7_rule(m_driver, l7_rule)
|
||||
lbaas.create_l7_rule.assert_called_once_with(l7_rule.l7policy_id,
|
||||
**req)
|
||||
m_driver._post_lb_resource.assert_called_once_with(
|
||||
'l7policies/%s/rules' % l7_rule.l7policy_id, 'rule', req)
|
||||
for attr in l7_rule.obj_fields:
|
||||
self.assertEqual(getattr(l7_rule, attr),
|
||||
getattr(ret, attr))
|
||||
|
@ -58,17 +58,21 @@ class TestPodLabelHandler(test_base.TestCase):
|
||||
@mock.patch.object(drivers.VIFPoolDriver, 'get_instance')
|
||||
@mock.patch.object(drivers.PodSecurityGroupsDriver, 'get_instance')
|
||||
@mock.patch.object(drivers.PodProjectDriver, 'get_instance')
|
||||
def test_init(self, m_get_project_driver, m_get_sg_driver,
|
||||
m_get_vif_pool_driver):
|
||||
@mock.patch.object(drivers.LBaaSDriver, 'get_instance')
|
||||
def test_init(self, m_get_lbaas_driver, m_get_project_driver,
|
||||
m_get_sg_driver, m_get_vif_pool_driver):
|
||||
project_driver = mock.sentinel.project_driver
|
||||
sg_driver = mock.sentinel.sg_driver
|
||||
lbaas_driver = mock.sentinel.lbaas_driver
|
||||
vif_pool_driver = mock.Mock(spec=drivers.VIFPoolDriver)
|
||||
m_get_lbaas_driver.return_value = lbaas_driver
|
||||
m_get_project_driver.return_value = project_driver
|
||||
m_get_sg_driver.return_value = sg_driver
|
||||
m_get_vif_pool_driver.return_value = vif_pool_driver
|
||||
|
||||
handler = p_label.PodLabelHandler()
|
||||
|
||||
self.assertEqual(lbaas_driver, handler._drv_lbaas)
|
||||
self.assertEqual(project_driver, handler._drv_project)
|
||||
self.assertEqual(sg_driver, handler._drv_sg)
|
||||
self.assertEqual(vif_pool_driver, handler._drv_vif_pool)
|
||||
|
11
releasenotes/notes/add-tagging-ce56231f58bf7ad0.yaml
Normal file
11
releasenotes/notes/add-tagging-ce56231f58bf7ad0.yaml
Normal file
@ -0,0 +1,11 @@
|
||||
---
|
||||
features:
|
||||
- |
|
||||
Added possibility to ensure all OpenStack resources created by Kuryr are
|
||||
tagged. In case of Neutron regular ``tags`` field is used. If Octavia
|
||||
supports tagging (from Octavia API 2.5, i.e. Stein), ``tags`` field is used
|
||||
as well, otherwise tags are put on ``description`` field. All this is
|
||||
controlled by ``[neutron_defaults]resource_tags`` config option that can
|
||||
hold a list of tags to be put on resources. This feature is useful to
|
||||
correctly identify any leftovers in OpenStack after K8s cluster Kuryr was
|
||||
serving gets deleted.
|
Loading…
Reference in New Issue
Block a user