From b0dfd35d76468986682a966c1fb609952fafc44f Mon Sep 17 00:00:00 2001 From: Yash Gupta Date: Mon, 27 Aug 2018 17:43:27 +0900 Subject: [PATCH] Use same pool_driver for different pod_vif_drivers New config option vif_pool_mapping is added and pools_vif_drivers is deprecated. The newer vif_pool_mapping is simply inverted mapping of pools_vif_drivers. Also, with the scoping ability added in cb2d308f84, independent drv_pool instances can be acquired from base driver manager. Earlier as a single instance of drv_pool was used by all pod_drivers. This meant only the pod_driver which was passed to drv_pool.set_vif_driver finally was used (for a given drv_pool). Please see release notes for further details. Related-Bug: 1747406 Change-Id: Id0137f6b1a78e7aa3e3d3de639a5e989f4fd408c Signed-off-by: Yash Gupta --- doc/source/installation/ports-pool.rst | 20 ++++++++-- .../controller/drivers/vif_pool.py | 38 +++++++++++++++++-- ...eusable-pool-drivers-00e7fdc1f4738441.yaml | 31 +++++++++++++++ 3 files changed, 81 insertions(+), 8 deletions(-) create mode 100644 releasenotes/notes/reusable-pool-drivers-00e7fdc1f4738441.yaml diff --git a/doc/source/installation/ports-pool.rst b/doc/source/installation/ports-pool.rst index 3fa434e01..9a7b33623 100644 --- a/doc/source/installation/ports-pool.rst +++ b/doc/source/installation/ports-pool.rst @@ -101,10 +101,10 @@ different VIF drivers (e.g., neutron and nested-vlan). This new multi pool driver is the default pool driver used even if a different vif_pool_driver is set at the config option. However if the configuration -about the mappings between the different pools and pod vif drivers is not -provided at the pools_vif_drivers config option of vif_pool configuration +about the mappings between the different pod vif and pools drivers is not +provided at the vif_pool_mapping config option of vif_pool configuration section only one pool driver will be loaded -- using the standard -vif_pool_driver and pod_vif_driver config options, i.e., using the one +pod_vif_driver and vif_pool_driver config options, i.e., using the one selected at kuryr.conf options. To enable the option of having different pools depending on the node's pod @@ -114,7 +114,7 @@ driver, e.g.: .. code-block:: ini [vif_pool] - pools_vif_drivers=nested:nested-vlan,neutron:neutron-vif + vif_pool_mapping=nested-vlan:nested,neutron-vif:neutron This will use a pool driver nested to handle the pods whose vif driver is nested-vlan, and a pool driver neutron to handle the pods whose vif driver is @@ -123,5 +123,17 @@ will first read the node's annotation about pod_vif driver to use, e.g., pod_vif: nested-vlan, and then use the corresponding pool driver -- which has the right pod-vif driver set. +.. note:: + + Previously, `pools_vif_drivers` configuration option provided similar + functionality, but is now deprecated and not recommended. + It stored a mapping from pool_driver => pod_vif_driver instead, disallowing + the use of a single pool driver as keys for multiple pod_vif_drivers. + + .. code-block:: ini + + [vif_pool] + pools_vif_drivers=nested:nested-vlan,neutron:neutron-vif + Note that if no annotation is set on a node, the default pod_vif_driver is used. diff --git a/kuryr_kubernetes/controller/drivers/vif_pool.py b/kuryr_kubernetes/controller/drivers/vif_pool.py index f6f54d712..63a910bd4 100644 --- a/kuryr_kubernetes/controller/drivers/vif_pool.py +++ b/kuryr_kubernetes/controller/drivers/vif_pool.py @@ -26,6 +26,7 @@ from oslo_cache import core as cache from oslo_concurrency import lockutils from oslo_config import cfg as oslo_cfg from oslo_log import log as logging +from oslo_log import versionutils from oslo_serialization import jsonutils from kuryr_kubernetes import clients @@ -61,6 +62,17 @@ vif_pool_driver_opts = [ "used. If not set, it will take them from the " "kubernetes driver options for pool and pod " "drivers respectively"), + default={}, deprecated_for_removal=True, + deprecated_since="Stein", + deprecated_reason=_( + "Mapping from pool->vif does not allow different " + "vifs to use the same pool driver. " + "Use vif_pool_mapping instead.")), + oslo_cfg.DictOpt('vif_pool_mapping', + help=_("Dict with the pod driver and the corresponding " + "pool driver to be used. If not set, it will take " + "them from the kubernetes driver options for pool " + "and pod drivers respectively"), default={}), ] @@ -775,15 +787,16 @@ class MultiVIFPool(base.VIFPoolDriver): def set_vif_driver(self): self._vif_drvs = {} - pools_vif_drivers = oslo_cfg.CONF.vif_pool.pools_vif_drivers - if not pools_vif_drivers: + vif_pool_mapping = self._get_vif_pool_mapping() + + if not vif_pool_mapping: pod_vif = oslo_cfg.CONF.kubernetes.pod_vif_driver drv_vif = base.PodVIFDriver.get_instance() drv_pool = base.VIFPoolDriver.get_instance() drv_pool.set_vif_driver(drv_vif) self._vif_drvs[pod_vif] = drv_pool return - for pool_driver, pod_driver in pools_vif_drivers.items(): + for pod_driver, pool_driver in vif_pool_mapping.items(): if not utils.check_suitable_multi_pool_driver_opt(pool_driver, pod_driver): LOG.error("The pool and pod driver selected are not " @@ -792,7 +805,7 @@ class MultiVIFPool(base.VIFPoolDriver): drv_vif = base.PodVIFDriver.get_instance( specific_driver=pod_driver) drv_pool = base.VIFPoolDriver.get_instance( - specific_driver=pool_driver) + specific_driver=pool_driver, scope='for:{}'.format(pod_driver)) drv_pool.set_vif_driver(drv_vif) self._vif_drvs[pod_driver] = drv_pool @@ -839,3 +852,20 @@ class MultiVIFPool(base.VIFPoolDriver): def _get_vif_drv_alias(self, vif): vif_type_name = type(vif).__name__ return VIF_TYPE_TO_DRIVER_MAPPING[vif_type_name] + + def _get_vif_pool_mapping(self): + vif_pool_mapping = oslo_cfg.CONF.vif_pool.vif_pool_mapping + + if not vif_pool_mapping: + pools_vif_drivers = oslo_cfg.CONF.vif_pool.pools_vif_drivers + + if pools_vif_drivers: + msg = ("Config option vif_pool.pools_vif_drivers is " + "deprecated in favour of vif_pool.vif_pool_mapping, " + "and will be removed in a future release") + versionutils.report_deprecated_feature(LOG, msg) + + for pool_driver, pod_driver in pools_vif_drivers.items(): + vif_pool_mapping[pod_driver] = pool_driver + + return vif_pool_mapping diff --git a/releasenotes/notes/reusable-pool-drivers-00e7fdc1f4738441.yaml b/releasenotes/notes/reusable-pool-drivers-00e7fdc1f4738441.yaml new file mode 100644 index 000000000..c440c801c --- /dev/null +++ b/releasenotes/notes/reusable-pool-drivers-00e7fdc1f4738441.yaml @@ -0,0 +1,31 @@ +--- +features: + - | + It is now possible to use same pool_driver for different pod_vif_drivers + when using MultiVIFPool driver. + + A new config option `vif_pool.vif_pool_mapping` is introduced which is a + dict/mapping from pod_vif_driver => pool_driver. So different + pod_vif_drivers can be configured to use the same pool_driver. + + .. code-block:: ini + + [vif_pool] + vif_pool_mapping=nested-vlan:nested,neutron-vif:neutron + + Earlier each instance of a pool_driver was mapped to a single + pod_driver, thus requiring a unique pool_driver for each pod_vif_driver. +upgrade: + - | + If ``vif_pool.pools_vif_drivers`` config option is used, new config option + `vif_pool.vif_pool_mapping` should be populated with inverted mapping + from the present value of ``vif_pool.pools_vif_drivers``. +deprecations: + - | + Configuration option ``vif_pool.pools_vif_drivers`` has been deprecated in + favour of ``vif_pool.vif_pool_mapping`` to allow reuse of pool_drivers for + different pod_vif_drivers. + + If ``vif_pool_mapping`` is not configured, ``pools_vif_drivers`` will still + continue to work for now, but ``pools_vif_drivers`` will be completely + removed in a future release. \ No newline at end of file