Move from Neutron client to OpenStackSDK.

OpenStack SDK is a framework, which provides consistent and complete
interface for OpenStack cloud. As we already use it for Octavia
integration, in this patch we propose to use it also for other parts of
Kuryr.

Implements: blueprint switch-to-openstacksdk
Change-Id: Ia87bf68bdade3e6f7f752d013f4bdb29bfa5d444
This commit is contained in:
Roman Dobosz 2019-10-18 07:43:00 +00:00
parent 80f990fed4
commit 1b97158c89
11 changed files with 127 additions and 59 deletions

View File

@ -29,6 +29,10 @@ _OPENSTACKSDK = 'openstacksdk'
_POD_RESOURCES_CLIENT = 'pod-resources-client'
def get_network_client():
return _clients[_OPENSTACKSDK].network
def get_neutron_client():
return _clients[_NEUTRON_CLIENT]

View File

@ -188,17 +188,16 @@ class NamespaceHandler(k8s_base.ResourceEventHandler):
@MEMOIZE
def _check_quota(self, quota):
neutron = clients.get_neutron_client()
resources = {'subnet': neutron.list_subnets,
'network': neutron.list_networks,
'security_group': neutron.list_security_groups}
os_net = clients.get_network_client()
resources = {'subnets': os_net.subnets,
'networks': os_net.networks,
'security_groups': os_net.security_groups}
for resource, neutron_func in resources.items():
for resource, network_func in resources.items():
resource_quota = quota[resource]
resource_name = resource + 's'
if utils.has_limit(resource_quota):
if not utils.is_available(resource_name, resource_quota,
neutron_func):
if not utils.is_available(resource, resource_quota,
network_func):
return False
return True

View File

@ -141,11 +141,10 @@ class NetworkPolicyHandler(k8s_base.ResourceEventHandler):
@MEMOIZE
def _check_quota(self, quota):
neutron = clients.get_neutron_client()
sg_quota = quota['security_group']
sg_func = neutron.list_security_groups
if utils.has_limit(sg_quota):
return utils.is_available('security_groups', sg_quota, sg_func)
os_net = clients.get_network_client()
if utils.has_limit(quota.security_groups):
return utils.is_available('security_groups', quota.security_groups,
os_net.security_groups)
return True
def _is_service_affected(self, service, affected_pods):

View File

@ -212,11 +212,9 @@ class VIFHandler(k8s_base.ResourceEventHandler):
@MEMOIZE
def is_ready(self, quota):
neutron = clients.get_neutron_client()
port_quota = quota['port']
port_func = neutron.list_ports
if utils.has_limit(port_quota):
return utils.is_available('ports', port_quota, port_func)
os_net = clients.get_network_client()
if utils.has_limit(quota.ports):
return utils.is_available('ports', quota.ports, os_net.ports)
return True
@staticmethod

View File

@ -59,9 +59,9 @@ class HealthServer(object):
self.headers = {'Connection': 'close'}
def _components_ready(self):
neutron = clients.get_neutron_client()
os_net = clients.get_network_client()
project_id = config.CONF.neutron_defaults.project
quota = neutron.show_quota(project_id).get('quota')
quota = os_net.get_quota(project_id)
for component in self._registry:
if not component.is_ready(quota):

View File

@ -342,6 +342,9 @@ class BaseVIFPool(test_base.TestCase):
oslo_cfg.CONF.set_override('resource_tags',
tags,
group='neutron_defaults')
self.addCleanup(oslo_cfg.CONF.clear_override, 'resource_tags',
group='neutron_defaults')
neutron.list_networks.return_value = {'networks': [{'id': net_id}]}
cls._cleanup_leftover_ports(m_driver)
@ -362,6 +365,8 @@ class BaseVIFPool(test_base.TestCase):
oslo_cfg.CONF.set_override('resource_tags',
tags,
group='neutron_defaults')
self.addCleanup(oslo_cfg.CONF.clear_override, 'resource_tags',
group='neutron_defaults')
neutron.list_networks.return_value = {'networks': []}
cls._cleanup_leftover_ports(m_driver)
@ -384,6 +389,9 @@ class BaseVIFPool(test_base.TestCase):
oslo_cfg.CONF.set_override('resource_tags',
tags,
group='neutron_defaults')
self.addCleanup(oslo_cfg.CONF.clear_override, 'resource_tags',
group='neutron_defaults')
neutron.list_networks.return_value = {'networks': [{'id': net_id}]}
cls._cleanup_leftover_ports(m_driver)
@ -404,6 +412,9 @@ class BaseVIFPool(test_base.TestCase):
oslo_cfg.CONF.set_override('resource_tags',
tags,
group='neutron_defaults')
self.addCleanup(oslo_cfg.CONF.clear_override, 'resource_tags',
group='neutron_defaults')
neutron.list_networks.return_value = {'networks': [{'id': net_id}]}
cls._cleanup_leftover_ports(m_driver)
@ -419,9 +430,6 @@ class BaseVIFPool(test_base.TestCase):
port_id = str(uuid.uuid4())
port = get_port_obj(port_id=port_id)
m_get_ports.return_value = [port]
oslo_cfg.CONF.set_override('resource_tags',
[],
group='neutron_defaults')
cls._cleanup_leftover_ports(m_driver)
neutron.list_networks.assert_not_called()
@ -437,9 +445,6 @@ class BaseVIFPool(test_base.TestCase):
port = get_port_obj(port_id=port_id)
port['binding:host_id'] = None
m_get_ports.return_value = [port]
oslo_cfg.CONF.set_override('resource_tags',
[],
group='neutron_defaults')
cls._cleanup_leftover_ports(m_driver)
neutron.list_networks.assert_not_called()

View File

@ -144,29 +144,29 @@ class TestHealthServer(base.TestCase):
@mock.patch.object(_TestHandler, 'is_ready')
def test__components_ready(self, m_status):
neutron = self.useFixture(k_fix.MockNeutronClient()).client
neutron.show_quota.return_value = get_quota_obj()
os_net = self.useFixture(k_fix.MockNetworkClient()).client
os_net.get_quota.return_value = get_quota_obj()
self.srv._registry = [_TestHandler()]
m_status.return_value = True
resp = self.srv._components_ready()
m_status.assert_called_once()
self.assertEqual(resp, True)
neutron.show_quota.assert_called_once()
self.assertIs(resp, True)
os_net.get_quota.assert_called_once()
@mock.patch.object(_TestHandler, 'is_ready')
def test__components_ready_error(self, m_status):
neutron = self.useFixture(k_fix.MockNeutronClient()).client
neutron.show_quota.return_value = get_quota_obj()
os_net = self.useFixture(k_fix.MockNetworkClient()).client
os_net.get_quota.return_value = get_quota_obj()
self.srv._registry = [_TestHandler()]
m_status.return_value = False
resp = self.srv._components_ready()
m_status.assert_called_once()
self.assertEqual(resp, False)
neutron.show_quota.assert_called_once()
self.assertIs(resp, False)
os_net.get_quota.assert_called_once()
@mock.patch.object(_TestHandler, 'is_alive')
def test_liveness(self, m_status):

View File

@ -41,3 +41,11 @@ class MockLBaaSClient(fixtures.Fixture):
self.useFixture(fixtures.MockPatch(
'kuryr_kubernetes.clients.get_loadbalancer_client',
lambda: self.client))
class MockNetworkClient(fixtures.Fixture):
def _setUp(self):
self.client = mock.Mock()
self.useFixture(fixtures.MockPatch(
'kuryr_kubernetes.clients.get_network_client',
lambda: self.client))

View File

@ -31,6 +31,7 @@ class TestK8sClient(test_base.TestCase):
neutron_mock = mock.Mock()
openstacksdk_mock = mock.Mock()
openstacksdk_mock.load_balancer = mock.Mock()
openstacksdk_mock.network = mock.Mock()
k8s_dummy = object()
m_cfg.kubernetes.api_root = k8s_api_root
@ -46,3 +47,5 @@ class TestK8sClient(test_base.TestCase):
self.assertIs(openstacksdk_mock, clients.get_openstacksdk())
self.assertIs(openstacksdk_mock.load_balancer,
clients.get_loadbalancer_client())
self.assertIs(openstacksdk_mock.network,
clients.get_network_client())

View File

@ -13,6 +13,8 @@
# limitations under the License.
import mock
import munch
from openstack import exceptions as os_exc
from os_vif import objects
from oslo_config import cfg
@ -28,6 +30,7 @@ CONF = cfg.CONF
class TestUtils(test_base.TestCase):
@mock.patch('socket.gethostname')
def test_get_node_name(self, m_gethostname):
m_gethostname.return_value = 'foo'
@ -64,18 +67,18 @@ class TestUtils(test_base.TestCase):
@mock.patch('kuryr_kubernetes.os_vif_util.neutron_to_osvif_network')
@mock.patch('kuryr_kubernetes.os_vif_util.neutron_to_osvif_subnet')
def test_get_subnet(self, m_osv_subnet, m_osv_network):
neutron = self.useFixture(k_fix.MockNeutronClient()).client
os_net = self.useFixture(k_fix.MockNetworkClient()).client
subnet = mock.MagicMock()
network = mock.MagicMock()
subnet_id = mock.sentinel.subnet_id
network_id = mock.sentinel.network_id
neutron_subnet = {'network_id': network_id}
neutron_subnet = munch.Munch({'network_id': network_id})
neutron_network = mock.sentinel.neutron_network
neutron.show_subnet.return_value = {'subnet': neutron_subnet}
neutron.show_network.return_value = {'network': neutron_network}
os_net.get_subnet.return_value = neutron_subnet
os_net.get_network.return_value = neutron_network
m_osv_subnet.return_value = subnet
m_osv_network.return_value = network
@ -83,8 +86,8 @@ class TestUtils(test_base.TestCase):
ret = utils.get_subnet(subnet_id)
self.assertEqual(network, ret)
neutron.show_subnet.assert_called_once_with(subnet_id)
neutron.show_network.assert_called_once_with(network_id)
os_net.get_subnet.assert_called_once_with(subnet_id)
os_net.get_network.assert_called_once_with(network_id)
m_osv_subnet.assert_called_once_with(neutron_subnet)
m_osv_network.assert_called_once_with(neutron_network)
network.subnets.objects.append.assert_called_once_with(subnet)
@ -217,3 +220,55 @@ class TestUtils(test_base.TestCase):
ret = utils.has_port_changes(service, lbaas_spec)
self.assertFalse(ret)
def test_get_nodes_ips(self):
os_net = self.useFixture(k_fix.MockNetworkClient()).client
ip1 = munch.Munch({'fixed_ips': [{'ip_address': '10.0.0.1'}],
'trunk_details': True})
ip2 = munch.Munch({'fixed_ips': [{'ip_address': '10.0.0.2'}],
'trunk_details': True})
ip3 = munch.Munch({'fixed_ips': [{'ip_address': '10.0.0.3'}],
'trunk_details': None})
ports = (p for p in [ip1, ip2, ip3])
os_net.ports.return_value = ports
trunk_ips = utils.get_nodes_ips()
os_net.ports.assert_called_once_with(status='ACTIVE')
self.assertEqual(trunk_ips, [ip1.fixed_ips[0]['ip_address'],
ip2.fixed_ips[0]['ip_address']])
def test_get_nodes_ips_tagged(self):
CONF.set_override('resource_tags', ['foo'], group='neutron_defaults')
self.addCleanup(CONF.clear_override, 'resource_tags',
group='neutron_defaults')
os_net = self.useFixture(k_fix.MockNetworkClient()).client
ip1 = munch.Munch({'fixed_ips': [{'ip_address': '10.0.0.1'}],
'trunk_details': True})
ip2 = munch.Munch({'fixed_ips': [{'ip_address': '10.0.0.2'}],
'trunk_details': False})
ports = (p for p in [ip1, ip2])
os_net.ports.return_value = ports
trunk_ips = utils.get_nodes_ips()
os_net.ports.assert_called_once_with(status='ACTIVE', tags=['foo'])
self.assertEqual(trunk_ips, [ip1.fixed_ips[0]['ip_address']])
def test_get_subnet_cidr(self):
os_net = self.useFixture(k_fix.MockNetworkClient()).client
subnet_id = mock.sentinel.subnet_id
subnet = munch.Munch(cidr='10.0.0.0/24')
os_net.get_subnet.return_value = subnet
result = utils.get_subnet_cidr(subnet_id)
os_net.get_subnet.assert_called_once_with(subnet_id)
self.assertEqual(result, '10.0.0.0/24')
def test_get_subnet_cidr_no_such_subnet(self):
os_net = self.useFixture(k_fix.MockNetworkClient()).client
subnet_id = mock.sentinel.subnet_id
os_net.get_subnet.side_effect = os_exc.ResourceNotFound
self.assertRaises(os_exc.ResourceNotFound, utils.get_subnet_cidr,
subnet_id)
os_net.get_subnet.assert_called_once_with(subnet_id)

View File

@ -16,7 +16,7 @@ import time
import requests
from neutronclient.common import exceptions as n_exc
from openstack import exceptions as os_exc
from os_vif import objects
from oslo_cache import core as cache
from oslo_config import cfg
@ -162,45 +162,42 @@ def get_leader_name():
def get_nodes_ips():
"""Get the IPs of the trunk ports associated to the deployment."""
trunk_ips = []
neutron = clients.get_neutron_client()
os_net = clients.get_network_client()
tags = CONF.neutron_defaults.resource_tags
if tags:
ports = neutron.list_ports(status='ACTIVE',
tags=CONF.neutron_defaults.resource_tags)
ports = os_net.ports(status='ACTIVE', tags=tags)
else:
# NOTE(ltomasbo: if tags are not used, assume all the trunk ports are
# part of the kuryr deployment
ports = neutron.list_ports(status='ACTIVE')
for port in ports.get('ports'):
if port.get('trunk_details'):
trunk_ips.append(port['fixed_ips'][0]['ip_address'])
ports = os_net.ports(status='ACTIVE')
for port in ports:
if port.trunk_details:
trunk_ips.append(port.fixed_ips[0]['ip_address'])
return trunk_ips
@MEMOIZE
def get_subnet(subnet_id):
neutron = clients.get_neutron_client()
os_net = clients.get_network_client()
n_subnet = neutron.show_subnet(subnet_id).get('subnet')
network_id = n_subnet['network_id']
n_network = neutron.show_network(network_id).get('network')
n_subnet = os_net.get_subnet(subnet_id)
n_network = os_net.get_network(n_subnet.network_id)
subnet = os_vif_util.neutron_to_osvif_subnet(n_subnet)
network = os_vif_util.neutron_to_osvif_network(n_network)
network.subnets.objects.append(subnet)
return network
@MEMOIZE
def get_subnet_cidr(subnet_id):
neutron = clients.get_neutron_client()
os_net = clients.get_network_client()
try:
subnet_obj = neutron.show_subnet(subnet_id)
except n_exc.NeutronClientException:
subnet_obj = os_net.get_subnet(subnet_id)
except os_exc.ResourceNotFound:
LOG.exception("Subnet %s CIDR not found!", subnet_id)
raise
return subnet_obj.get('subnet')['cidr']
return subnet_obj.cidr
def extract_pod_annotation(annotation):
@ -222,8 +219,8 @@ def has_limit(quota):
return quota != NO_LIMIT
def is_available(resource, resource_quota, neutron_func):
qnt_resources = len(neutron_func().get(resource))
def is_available(resource, resource_quota, network_func):
qnt_resources = len(list(network_func()))
availability = resource_quota - qnt_resources
if availability <= 0:
LOG.error("Quota exceeded for resource: %s", resource)