From 9115ab2c6b21ee8f6a920ce673005f18e788e52e Mon Sep 17 00:00:00 2001 From: Kirill Zaitsev Date: Mon, 5 Jun 2017 18:10:03 +0300 Subject: [PATCH] Allow passing multiple VIFs to CNI This commit alters the format of the annotation, set and read by controller/CNI. Instead of a single VIF object it now holds a dictionary, that maps interface names to VIF objects. Controller currently only sets 'eth0' as the default VIF. The CNI is altered accordingly to read VIFs from the mapping and add all of them to Pods namespace. This commit does not include any mechanism to actually request multiple IPs/ports for a Pod, but rather lays foundation for future commits, that would allow that. Related bp: kuryr-kubernetes-sriov-support Targets bp: multi-vif-pods Change-Id: Iaef928e7ab9dc0fce8b7e8fffb7b5a1f6b5ccb17 --- kuryr_kubernetes/cni/api.py | 2 +- kuryr_kubernetes/cni/binding/base.py | 12 +- kuryr_kubernetes/cni/daemon/service.py | 26 ++-- kuryr_kubernetes/cni/handlers.py | 147 +++++++++++++++--- .../cni/plugins/k8s_cni_registry.py | 40 +++-- kuryr_kubernetes/constants.py | 2 + kuryr_kubernetes/controller/handlers/vif.py | 81 ++++++---- kuryr_kubernetes/tests/fake.py | 24 +++ .../unit/cni/plugins/test_k8s_cni_registry.py | 27 ++-- .../unit/controller/handlers/test_vif.py | 54 ++++--- 10 files changed, 296 insertions(+), 119 deletions(-) diff --git a/kuryr_kubernetes/cni/api.py b/kuryr_kubernetes/cni/api.py index d89f1a0c2..83af00803 100644 --- a/kuryr_kubernetes/cni/api.py +++ b/kuryr_kubernetes/cni/api.py @@ -100,7 +100,7 @@ class CNIRunner(object): cni_ip = result.setdefault("ip%s" % ip.version, {}) cni_ip['ip'] = "%s/%s" % (ip, subnet.cidr.prefixlen) - if subnet.gateway: + if hasattr(subnet, 'gateway'): cni_ip['gateway'] = str(subnet.gateway) if subnet.routes.objects: diff --git a/kuryr_kubernetes/cni/binding/base.py b/kuryr_kubernetes/cni/binding/base.py index af9b1db1e..3e058bb37 100644 --- a/kuryr_kubernetes/cni/binding/base.py +++ b/kuryr_kubernetes/cni/binding/base.py @@ -55,7 +55,7 @@ def _enable_ipv6(netns): pyroute2.netns.setns(self_ns_fd) -def _configure_l3(vif, ifname, netns): +def _configure_l3(vif, ifname, netns, is_default_gateway): with get_ipdb(netns) as ipdb: with ipdb.interfaces[ifname] as iface: for subnet in vif.network.subnets.objects: @@ -70,21 +70,23 @@ def _configure_l3(vif, ifname, netns): for route in subnet.routes.objects: routes.add(gateway=str(route.gateway), dst=str(route.cidr)).commit() - if subnet.gateway: + if is_default_gateway and hasattr(subnet, 'gateway'): routes.add(gateway=str(subnet.gateway), dst='default').commit() -def connect(vif, instance_info, ifname, netns=None, report_health=None): +def connect(vif, instance_info, ifname, netns=None, report_health=None, + is_default_gateway=True): driver = _get_binding_driver(vif) if report_health: report_health(driver.is_healthy()) os_vif.plug(vif, instance_info) driver.connect(vif, ifname, netns) - _configure_l3(vif, ifname, netns) + _configure_l3(vif, ifname, netns, is_default_gateway) -def disconnect(vif, instance_info, ifname, netns=None, report_health=None): +def disconnect(vif, instance_info, ifname, netns=None, report_health=None, + **kwargs): driver = _get_binding_driver(vif) if report_health: report_health(driver.is_healthy()) diff --git a/kuryr_kubernetes/cni/daemon/service.py b/kuryr_kubernetes/cni/daemon/service.py index a32b8e254..b470b2799 100644 --- a/kuryr_kubernetes/cni/daemon/service.py +++ b/kuryr_kubernetes/cni/daemon/service.py @@ -209,24 +209,32 @@ class CNIDaemonWatcherService(cotyledon.Service): self.healthy.value = False time.sleep(HEALTH_CHECKER_DELAY) - def on_done(self, pod, vif): + def on_done(self, pod, vifs): pod_name = utils.get_pod_unique_name(pod) - vif_dict = vif.obj_to_primitive() + vif_dict = { + ifname: vif.obj_to_primitive() for + ifname, vif in vifs.items() + } # NOTE(dulek): We need a lock when modifying shared self.registry dict # to prevent race conditions with other processes/threads. with lockutils.lock(pod_name, external=True): if pod_name not in self.registry: - self.registry[pod_name] = {'pod': pod, 'vif': vif_dict, + self.registry[pod_name] = {'pod': pod, 'vifs': vif_dict, 'containerid': None} else: # NOTE(dulek): Only update vif if its status changed, we don't # need to care about other changes now. - old_vif = base.VersionedObject.obj_from_primitive( - self.registry[pod_name]['vif']) - if old_vif.active != vif.active: - pod_dict = self.registry[pod_name] - pod_dict['vif'] = vif_dict - self.registry[pod_name] = pod_dict + old_vifs = { + ifname: + base.VersionedObject.obj_from_primitive(vif_obj) for + ifname, vif_obj in ( + self.registry[pod_name]['vifs'].items()) + } + for iface in vifs.keys(): + if old_vifs[iface].active != vifs[iface].active: + pod_dict = self.registry[pod_name] + pod_dict['vifs'] = vif_dict + self.registry[pod_name] = pod_dict def on_deleted(self, pod): pod_name = utils.get_pod_unique_name(pod) diff --git a/kuryr_kubernetes/cni/handlers.py b/kuryr_kubernetes/cni/handlers.py index 960088685..258b97517 100644 --- a/kuryr_kubernetes/cni/handlers.py +++ b/kuryr_kubernetes/cni/handlers.py @@ -35,29 +35,52 @@ class CNIHandlerBase(k8s_base.ResourceEventHandler): def __init__(self, cni, on_done): self._cni = cni self._callback = on_done - self._vif = None + self._vifs = {} def on_present(self, pod): - vif = self._get_vif(pod) + vifs = self._get_vifs(pod) - if vif: - self.on_vif(pod, vif) + for ifname, vif in vifs.items(): + self.on_vif(pod, vif, ifname) + + if self.should_callback(pod, vifs): + self.callback() @abc.abstractmethod - def on_vif(self, pod, vif): + def should_callback(self, pod, vifs): + """Called after all vifs have been processed + + Should determine if the CNI is ready to call the callback + + :param pod: dict containing Kubernetes Pod object + :param vifs: dict containing os_vif VIF objects and ifnames + :returns True/False + """ raise NotImplementedError() - def _get_vif(self, pod): + @abc.abstractmethod + def callback(self): + """Called if should_callback returns True""" + raise NotImplementedError() + + @abc.abstractmethod + def on_vif(self, pod, vif, ifname): + raise NotImplementedError() + + def _get_vifs(self, pod): # TODO(ivc): same as VIFHandler._get_vif try: annotations = pod['metadata']['annotations'] - vif_annotation = annotations[k_const.K8S_ANNOTATION_VIF] + vifs_annotation = annotations[k_const.K8S_ANNOTATION_VIF] except KeyError: - return None - vif_dict = jsonutils.loads(vif_annotation) - vif = obj_vif.vif.VIFBase.obj_from_primitive(vif_dict) - LOG.debug("Got VIF from annotation: %r", vif) - return vif + return {} + vifs_annotation = jsonutils.loads(vifs_annotation) + vifs_dict = { + ifname: obj_vif.vif.VIFBase.obj_from_primitive(vif) + for ifname, vif in vifs_annotation.items() + } + LOG.debug("Got VIFs from annotation: %r", vifs_dict) + return vifs_dict def _get_inst(self, pod): return obj_vif.instance_info.InstanceInfo( @@ -69,25 +92,79 @@ class AddHandler(CNIHandlerBase): def __init__(self, cni, on_done): LOG.debug("AddHandler called with CNI env: %r", cni) super(AddHandler, self).__init__(cni, on_done) - self._vif = None - def on_vif(self, pod, vif): - if not self._vif: - self._vif = vif.obj_clone() - self._vif.active = True - b_base.connect(self._vif, self._get_inst(pod), - self._cni.CNI_IFNAME, self._cni.CNI_NETNS) + def on_vif(self, pod, vif, ifname): + """Called once for every vif of a Pod on every event. - if vif.active: - self._callback(vif) + If it is the first time we see this vif, plug it in. + + :param pod: dict containing Kubernetes Pod object + :param vif: os_vif VIF object + :param ifname: string, name of the interfaces inside container + """ + if ifname not in self._vifs: + + self._vifs[ifname] = vif + _vif = vif.obj_clone() + _vif.active = True + + # set eth0's gateway as default + is_default_gateway = (ifname == self._cni.CNI_IFNAME) + b_base.connect(_vif, self._get_inst(pod), + ifname, self._cni.CNI_NETNS, + is_default_gateway=is_default_gateway) + + def should_callback(self, pod, vifs): + """Called after all vifs have been processed + + Determines if CNI is ready to call the callback and stop watching for + more events. For AddHandler the callback should be called if there + is at least one VIF in the annotation and all the + VIFs recieved are marked active + + :param pod: dict containing Kubernetes Pod object + :param vifs: dict containing os_vif VIF objects and ifnames + :returns True/False + """ + all_vifs_active = vifs and all(vif.active for vif in vifs.values()) + + if all_vifs_active: + if self._cni.CNI_IFNAME in self._vifs: + self.callback_vif = self._vifs[self._cni.CNI_IFNAME] + else: + self.callback_vif = self._vifs.values()[0] + LOG.debug("All VIFs are active, exiting. Will return %s", + self.callback_vif) + return True + else: + LOG.debug("Waiting for all vifs to become active") + return False + + def callback(self): + self._callback(self.callback_vif) class DelHandler(CNIHandlerBase): - def on_vif(self, pod, vif): + def on_vif(self, pod, vif, ifname): b_base.disconnect(vif, self._get_inst(pod), self._cni.CNI_IFNAME, self._cni.CNI_NETNS) - self._callback(vif) + + def should_callback(self, pod, vifs): + """Called after all vifs have been processed + + Calls callback if there was at least one vif in the Pod + + :param pod: dict containing Kubernetes Pod object + :param vifs: dict containing os_vif VIF objects and ifnames + :returns True/False + """ + if vifs: + return True + return False + + def callback(self): + self._callback(None) class CallbackHandler(CNIHandlerBase): @@ -95,9 +172,29 @@ class CallbackHandler(CNIHandlerBase): def __init__(self, on_vif, on_del=None): super(CallbackHandler, self).__init__(None, on_vif) self._del_callback = on_del + self._pod = None + self._callback_vifs = None - def on_vif(self, pod, vif): - self._callback(pod, vif) + def on_vif(self, pod, vif, ifname): + pass + + def should_callback(self, pod, vifs): + """Called after all vifs have been processed + + Calls callback if there was at least one vif in the Pod + + :param pod: dict containing Kubernetes Pod object + :param vifs: dict containing os_vif VIF objects and ifnames + :returns True/False + """ + self._pod = pod + self._callback_vifs = vifs + if vifs: + return True + return False + + def callback(self): + self._callback(self._pod, self._callback_vifs) def on_deleted(self, pod): LOG.debug("Got pod %s deletion event.", pod['metadata']['name']) diff --git a/kuryr_kubernetes/cni/plugins/k8s_cni_registry.py b/kuryr_kubernetes/cni/plugins/k8s_cni_registry.py index e10e4daa6..ef19f7acc 100644 --- a/kuryr_kubernetes/cni/plugins/k8s_cni_registry.py +++ b/kuryr_kubernetes/cni/plugins/k8s_cni_registry.py @@ -22,6 +22,7 @@ from oslo_log import log as logging from kuryr_kubernetes.cni.binding import base as b_base from kuryr_kubernetes.cni.plugins import base as base_cni +from kuryr_kubernetes import constants as k_const from kuryr_kubernetes import exceptions LOG = logging.getLogger(__name__) @@ -47,7 +48,7 @@ class K8sCNIRegistryPlugin(base_cni.CNIPlugin): 'name': params.args.K8S_POD_NAME} def add(self, params): - vif = self._do_work(params, b_base.connect) + vifs = self._do_work(params, b_base.connect) pod_name = self._get_pod_name(params) @@ -62,21 +63,26 @@ class K8sCNIRegistryPlugin(base_cni.CNIPlugin): LOG.debug('Saved containerid = %s for pod %s', params.CNI_CONTAINERID, pod_name) - # Wait for VIF to become active. + # Wait for VIFs to become active. timeout = CONF.cni_daemon.vif_annotation_timeout - # Wait for timeout sec, 1 sec between tries, retry when vif not active. + # Wait for timeout sec, 1 sec between tries, retry when even one + # vif is not active. @retrying.retry(stop_max_delay=timeout * 1000, wait_fixed=RETRY_DELAY, - retry_on_result=lambda x: not x.active) + retry_on_result=lambda x: any( + map(lambda y: not y[1].active, x.items()))) def wait_for_active(pod_name): - return base.VersionedObject.obj_from_primitive( - self.registry[pod_name]['vif']) + return { + ifname: base.VersionedObject.obj_from_primitive(vif_obj) for + ifname, vif_obj in self.registry[pod_name]['vifs'].items() + } - vif = wait_for_active(pod_name) - if not vif.active: - raise exceptions.ResourceNotReady(pod_name) + vifs = wait_for_active(pod_name) + for vif in vifs.values(): + if not vif.active: + raise exceptions.ResourceNotReady(pod_name) - return vif + return vifs[k_const.DEFAULT_IFNAME] def delete(self, params): pod_name = self._get_pod_name(params) @@ -114,13 +120,19 @@ class K8sCNIRegistryPlugin(base_cni.CNIPlugin): try: d = find() pod = d['pod'] - vif = base.VersionedObject.obj_from_primitive(d['vif']) + vifs = { + ifname: base.VersionedObject.obj_from_primitive(vif_obj) for + ifname, vif_obj in d['vifs'].items() + } except KeyError: raise exceptions.ResourceNotReady(pod_name) - fn(vif, self._get_inst(pod), params.CNI_IFNAME, params.CNI_NETNS, - self.report_drivers_health) - return vif + for ifname, vif in vifs.items(): + is_default_gateway = (ifname == params.CNI_IFNAME) + fn(vif, self._get_inst(pod), ifname, params.CNI_NETNS, + report_health=self.report_drivers_health, + is_default_gateway=is_default_gateway) + return vifs def _get_inst(self, pod): return obj_vif.instance_info.InstanceInfo( diff --git a/kuryr_kubernetes/constants.py b/kuryr_kubernetes/constants.py index a5e414063..e3bd3be46 100644 --- a/kuryr_kubernetes/constants.py +++ b/kuryr_kubernetes/constants.py @@ -43,3 +43,5 @@ VIF_POOL_POPULATE = '/populatePool' VIF_POOL_FREE = '/freePool' VIF_POOL_LIST = '/listPools' VIF_POOL_SHOW = '/showPool' + +DEFAULT_IFNAME = 'eth0' diff --git a/kuryr_kubernetes/controller/handlers/vif.py b/kuryr_kubernetes/controller/handlers/vif.py index 45ec48820..9cc7f5c18 100644 --- a/kuryr_kubernetes/controller/handlers/vif.py +++ b/kuryr_kubernetes/controller/handlers/vif.py @@ -33,7 +33,7 @@ class VIFHandler(k8s_base.ResourceEventHandler): the CNI driver (that runs on 'kubelet' nodes) is responsible for providing networking to Kubernetes pods. `VIFHandler` relies on a set of drivers (which are responsible for managing Neutron resources) to define the VIF - object and pass it to the CNI driver in form of the Kubernetes pod + objects and pass them to the CNI driver in form of the Kubernetes pod annotation. """ @@ -60,38 +60,54 @@ class VIFHandler(k8s_base.ResourceEventHandler): # where certain pods/namespaces/nodes can be managed by other # networking solutions/CNI drivers. return + vifs = self._get_vifs(pod) - vif = self._get_vif(pod) + if not vifs: + vifs = {} - if not vif: project_id = self._drv_project.get_project(pod) security_groups = self._drv_sg.get_security_groups(pod, project_id) subnets = self._drv_subnets.get_subnets(pod, project_id) - vif = self._drv_vif_pool.request_vif(pod, project_id, subnets, - security_groups) + + # NOTE(danil): There is currently no way to actually request + # multiple VIFs. However we're packing the main_vif 'eth0' in a + # dict here to facilitate future work in this area + main_vif = self._drv_vif_pool.request_vif( + pod, project_id, subnets, security_groups) + vifs[constants.DEFAULT_IFNAME] = main_vif + try: - self._set_vif(pod, vif) + self._set_vifs(pod, vifs) except k_exc.K8sClientException as ex: LOG.debug("Failed to set annotation: %s", ex) # FIXME(ivc): improve granularity of K8sClient exceptions: # only resourceVersion conflict should be ignored - self._drv_vif_pool.release_vif(pod, vif, project_id, - security_groups) - elif not vif.active: - self._drv_vif_pool.activate_vif(pod, vif) - self._set_vif(pod, vif) + for ifname, vif in vifs.items(): + self._drv_for_vif(vif).release_vif(pod, vif, project_id, + security_groups) + else: + changed = False + for ifname, vif in vifs.items(): + if not vif.active: + self._drv_for_vif(vif).activate_vif(pod, vif) + changed = True + if changed: + self._set_vifs(pod, vifs) def on_deleted(self, pod): if self._is_host_network(pod): return + project_id = self._drv_project.get_project(pod) + security_groups = self._drv_sg.get_security_groups(pod, project_id) - vif = self._get_vif(pod) + vifs = self._get_vifs(pod) + for ifname, vif in vifs.items(): + self._drv_for_vif(vif).release_vif(pod, vif, project_id, + security_groups) - if vif: - project_id = self._drv_project.get_project(pod) - security_groups = self._drv_sg.get_security_groups(pod, project_id) - self._drv_vif_pool.release_vif(pod, vif, project_id, - security_groups) + def _drv_for_vif(self, vif): + # TODO(danil): a better polymorphism is required here + return self._drv_vif_pool @staticmethod def _is_host_network(pod): @@ -106,29 +122,36 @@ class VIFHandler(k8s_base.ResourceEventHandler): except KeyError: return False - def _set_vif(self, pod, vif): + def _set_vifs(self, pod, vifs): # TODO(ivc): extract annotation interactions - if vif is None: - LOG.debug("Removing VIF annotation: %r", vif) + if not vifs: + LOG.debug("Removing VIFs annotation: %r", vifs) annotation = None else: - vif.obj_reset_changes(recursive=True) - LOG.debug("Setting VIF annotation: %r", vif) - annotation = jsonutils.dumps(vif.obj_to_primitive(), + vifs_dict = {} + for ifname, vif in vifs.items(): + vif.obj_reset_changes(recursive=True) + vifs_dict[ifname] = vif.obj_to_primitive() + + annotation = jsonutils.dumps(vifs_dict, sort_keys=True) + LOG.debug("Setting VIFs annotation: %r", annotation) k8s = clients.get_kubernetes_client() k8s.annotate(pod['metadata']['selfLink'], {constants.K8S_ANNOTATION_VIF: annotation}, resource_version=pod['metadata']['resourceVersion']) - def _get_vif(self, pod): + def _get_vifs(self, pod): # TODO(ivc): same as '_set_vif' try: annotations = pod['metadata']['annotations'] vif_annotation = annotations[constants.K8S_ANNOTATION_VIF] except KeyError: - return None - vif_dict = jsonutils.loads(vif_annotation) - vif = obj_vif.vif.VIFBase.obj_from_primitive(vif_dict) - LOG.debug("Got VIF from annotation: %r", vif) - return vif + return {} + vif_annotation = jsonutils.loads(vif_annotation) + vifs = { + ifname: obj_vif.vif.VIFBase.obj_from_primitive(vif_obj) for + ifname, vif_obj in vif_annotation.items() + } + LOG.debug("Got VIFs from annotation: %r", vifs) + return vifs diff --git a/kuryr_kubernetes/tests/fake.py b/kuryr_kubernetes/tests/fake.py index 5663f1c80..ffb84a3b7 100644 --- a/kuryr_kubernetes/tests/fake.py +++ b/kuryr_kubernetes/tests/fake.py @@ -59,3 +59,27 @@ def _fake_vif_string(dictionary=None): return jsonutils.dumps(dictionary) else: return jsonutils.dumps(_fake_vif_dict()) + + +def _fake_vifs(cls=osv_vif.VIFOpenVSwitch): + return {'eth0': _fake_vif(cls)} + + +def _fake_vifs_dict(obj=None): + if obj: + return { + ifname: vif.obj_to_primitive() for + ifname, vif in obj.items() + } + else: + return { + ifname: vif.obj_to_primitive() for + ifname, vif in _fake_vifs().items() + } + + +def _fake_vifs_string(dictionary=None): + if dictionary: + return jsonutils.dumps(dictionary) + else: + return jsonutils.dumps(_fake_vifs_dict()) diff --git a/kuryr_kubernetes/tests/unit/cni/plugins/test_k8s_cni_registry.py b/kuryr_kubernetes/tests/unit/cni/plugins/test_k8s_cni_registry.py index 9e0829644..11c1d36f8 100644 --- a/kuryr_kubernetes/tests/unit/cni/plugins/test_k8s_cni_registry.py +++ b/kuryr_kubernetes/tests/unit/cni/plugins/test_k8s_cni_registry.py @@ -27,8 +27,8 @@ class TestK8sCNIRegistryPlugin(base.TestCase): super(TestK8sCNIRegistryPlugin, self).setUp() self.pod = {'metadata': {'name': 'foo', 'uid': 'bar', 'namespace': 'default'}} - self.vif = fake._fake_vif_dict() - registry = {'default/foo': {'pod': self.pod, 'vif': self.vif, + self.vifs = fake._fake_vifs_dict() + registry = {'default/foo': {'pod': self.pod, 'vifs': self.vifs, 'containerid': None}} healthy = mock.Mock() self.plugin = k8s_cni_registry.K8sCNIRegistryPlugin(registry, healthy) @@ -43,7 +43,9 @@ class TestK8sCNIRegistryPlugin(base.TestCase): self.plugin.add(self.params) m_lock.assert_called_with('default/foo', external=True) - m_connect.assert_called_with(mock.ANY, mock.ANY, 'baz', 123, mock.ANY) + m_connect.assert_called_with(mock.ANY, mock.ANY, 'eth0', 123, + report_health=mock.ANY, + is_default_gateway=mock.ANY) self.assertEqual('cont_id', self.plugin.registry['default/foo']['containerid']) @@ -51,12 +53,13 @@ class TestK8sCNIRegistryPlugin(base.TestCase): def test_del_present(self, m_disconnect): self.plugin.delete(self.params) - m_disconnect.assert_called_with(mock.ANY, mock.ANY, 'baz', 123, - mock.ANY) + m_disconnect.assert_called_with(mock.ANY, mock.ANY, 'eth0', 123, + report_health=mock.ANY, + is_default_gateway=mock.ANY) @mock.patch('kuryr_kubernetes.cni.binding.base.disconnect') def test_del_wrong_container_id(self, m_disconnect): - registry = {'default/foo': {'pod': self.pod, 'vif': self.vif, + registry = {'default/foo': {'pod': self.pod, 'vifs': self.vifs, 'containerid': 'different'}} healthy = mock.Mock() self.plugin = k8s_cni_registry.K8sCNIRegistryPlugin(registry, healthy) @@ -69,9 +72,9 @@ class TestK8sCNIRegistryPlugin(base.TestCase): @mock.patch('kuryr_kubernetes.cni.binding.base.connect') def test_add_present_on_5_try(self, m_connect, m_lock): se = [KeyError] * 5 - se.append({'pod': self.pod, 'vif': self.vif, 'containerid': None}) - se.append({'pod': self.pod, 'vif': self.vif, 'containerid': None}) - se.append({'pod': self.pod, 'vif': self.vif, 'containerid': None}) + se.append({'pod': self.pod, 'vifs': self.vifs, 'containerid': None}) + se.append({'pod': self.pod, 'vifs': self.vifs, 'containerid': None}) + se.append({'pod': self.pod, 'vifs': self.vifs, 'containerid': None}) m_getitem = mock.Mock(side_effect=se) m_setitem = mock.Mock() m_registry = mock.Mock(__getitem__=m_getitem, __setitem__=m_setitem) @@ -81,9 +84,11 @@ class TestK8sCNIRegistryPlugin(base.TestCase): m_lock.assert_called_with('default/foo', external=True) m_setitem.assert_called_once_with('default/foo', {'pod': self.pod, - 'vif': self.vif, + 'vifs': self.vifs, 'containerid': 'cont_id'}) - m_connect.assert_called_with(mock.ANY, mock.ANY, 'baz', 123, mock.ANY) + m_connect.assert_called_with(mock.ANY, mock.ANY, 'eth0', 123, + report_health=mock.ANY, + is_default_gateway=mock.ANY) @mock.patch('time.sleep', mock.Mock()) def test_add_not_present(self): diff --git a/kuryr_kubernetes/tests/unit/controller/handlers/test_vif.py b/kuryr_kubernetes/tests/unit/controller/handlers/test_vif.py index 02d4c7ebe..1d6757436 100644 --- a/kuryr_kubernetes/tests/unit/controller/handlers/test_vif.py +++ b/kuryr_kubernetes/tests/unit/controller/handlers/test_vif.py @@ -33,6 +33,7 @@ class TestVIFHandler(test_base.TestCase): self._vif = mock.Mock() self._vif.active = True self._vif_serialized = mock.sentinel.vif_serialized + self._vifs = {k_const.DEFAULT_IFNAME: self._vif} self._pod_version = mock.sentinel.pod_version self._pod_link = mock.sentinel.pod_link @@ -55,25 +56,28 @@ class TestVIFHandler(test_base.TestCase): self._get_project = self._handler._drv_project.get_project self._get_subnets = self._handler._drv_subnets.get_subnets self._get_security_groups = self._handler._drv_sg.get_security_groups - self._set_vif_driver = self._handler._drv_vif_pool.set_vif_driver + self._set_vifs_driver = self._handler._drv_vif_pool.set_vif_driver self._request_vif = self._handler._drv_vif_pool.request_vif self._release_vif = self._handler._drv_vif_pool.release_vif self._activate_vif = self._handler._drv_vif_pool.activate_vif - self._get_vif = self._handler._get_vif - self._set_vif = self._handler._set_vif + self._get_vifs = self._handler._get_vifs + self._set_vifs = self._handler._set_vifs self._is_host_network = self._handler._is_host_network self._is_pending_node = self._handler._is_pending_node self._request_vif.return_value = self._vif - self._get_vif.return_value = self._vif + self._get_vifs.return_value = self._vifs self._is_host_network.return_value = False self._is_pending_node.return_value = True self._get_project.return_value = self._project_id self._get_subnets.return_value = self._subnets self._get_security_groups.return_value = self._security_groups - self._set_vif_driver.return_value = mock.Mock( + self._set_vifs_driver.return_value = mock.Mock( spec=drivers.PodVIFDriver) + self._handler._drv_for_vif = h_vif.VIFHandler._drv_for_vif.__get__( + self._handler) + @mock.patch.object(drivers.VIFPoolDriver, 'set_vif_driver') @mock.patch.object(drivers.VIFPoolDriver, 'get_instance') @mock.patch.object(drivers.PodVIFDriver, 'get_instance') @@ -82,7 +86,7 @@ class TestVIFHandler(test_base.TestCase): @mock.patch.object(drivers.PodProjectDriver, 'get_instance') def test_init(self, m_get_project_driver, m_get_subnets_driver, m_get_sg_driver, m_get_vif_driver, m_get_vif_pool_driver, - m_set_vif_driver): + m_set_vifs_driver): project_driver = mock.sentinel.project_driver subnets_driver = mock.sentinel.subnets_driver sg_driver = mock.sentinel.sg_driver @@ -131,62 +135,62 @@ class TestVIFHandler(test_base.TestCase): def test_on_present(self): h_vif.VIFHandler.on_present(self._handler, self._pod) - self._get_vif.assert_called_once_with(self._pod) + self._get_vifs.assert_called_once_with(self._pod) self._request_vif.assert_not_called() self._activate_vif.assert_not_called() - self._set_vif.assert_not_called() + self._set_vifs.assert_not_called() def test_on_present_host_network(self): self._is_host_network.return_value = True h_vif.VIFHandler.on_present(self._handler, self._pod) - self._get_vif.assert_not_called() + self._get_vifs.assert_not_called() self._request_vif.assert_not_called() self._activate_vif.assert_not_called() - self._set_vif.assert_not_called() + self._set_vifs.assert_not_called() def test_on_present_not_pending(self): self._is_pending_node.return_value = False h_vif.VIFHandler.on_present(self._handler, self._pod) - self._get_vif.assert_not_called() + self._get_vifs.assert_not_called() self._request_vif.assert_not_called() self._activate_vif.assert_not_called() - self._set_vif.assert_not_called() + self._set_vifs.assert_not_called() def test_on_present_activate(self): self._vif.active = False h_vif.VIFHandler.on_present(self._handler, self._pod) - self._get_vif.assert_called_once_with(self._pod) + self._get_vifs.assert_called_once_with(self._pod) self._activate_vif.assert_called_once_with(self._pod, self._vif) - self._set_vif.assert_called_once_with(self._pod, self._vif) + self._set_vifs.assert_called_once_with(self._pod, self._vifs) self._request_vif.assert_not_called() def test_on_present_create(self): - self._get_vif.return_value = None + self._get_vifs.return_value = {} h_vif.VIFHandler.on_present(self._handler, self._pod) - self._get_vif.assert_called_once_with(self._pod) + self._get_vifs.assert_called_once_with(self._pod) self._request_vif.assert_called_once_with( self._pod, self._project_id, self._subnets, self._security_groups) - self._set_vif.assert_called_once_with(self._pod, self._vif) + self._set_vifs.assert_called_once_with(self._pod, self._vifs) self._activate_vif.assert_not_called() def test_on_present_rollback(self): - self._get_vif.return_value = None - self._set_vif.side_effect = k_exc.K8sClientException + self._get_vifs.return_value = {} + self._set_vifs.side_effect = k_exc.K8sClientException h_vif.VIFHandler.on_present(self._handler, self._pod) - self._get_vif.assert_called_once_with(self._pod) + self._get_vifs.assert_called_once_with(self._pod) self._request_vif.assert_called_once_with( self._pod, self._project_id, self._subnets, self._security_groups) - self._set_vif.assert_called_once_with(self._pod, self._vif) + self._set_vifs.assert_called_once_with(self._pod, self._vifs) self._release_vif.assert_called_once_with(self._pod, self._vif, self._project_id, self._security_groups) @@ -195,7 +199,7 @@ class TestVIFHandler(test_base.TestCase): def test_on_deleted(self): h_vif.VIFHandler.on_deleted(self._handler, self._pod) - self._get_vif.assert_called_once_with(self._pod) + self._get_vifs.assert_called_once_with(self._pod) self._release_vif.assert_called_once_with(self._pod, self._vif, self._project_id, self._security_groups) @@ -205,13 +209,13 @@ class TestVIFHandler(test_base.TestCase): h_vif.VIFHandler.on_deleted(self._handler, self._pod) - self._get_vif.assert_not_called() + self._get_vifs.assert_not_called() self._release_vif.assert_not_called() def test_on_deleted_no_annotation(self): - self._get_vif.return_value = None + self._get_vifs.return_value = {} h_vif.VIFHandler.on_deleted(self._handler, self._pod) - self._get_vif.assert_called_once_with(self._pod) + self._get_vifs.assert_called_once_with(self._pod) self._release_vif.assert_not_called()