use trunk constants from neutron-lib

The trunk constants now live in neutron-lib. This patch consumes them
by removing neutron.services.trunk.constants and using them from
neutron-lib instead.

Depends-On: https://review.opendev.org/#/c/650372/

NeutronLibImpact

Change-Id: I4445c44c7e321d0fc35976d4d855c148bb9a3b18
This commit is contained in:
Boden R 2019-04-02 07:29:19 -06:00
parent 5d607a13ba
commit 88cca4cabe
18 changed files with 91 additions and 163 deletions

View File

@ -1,82 +0,0 @@
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Valid trunk statuses
# The trunk is happy, yay!
# A trunk remains in ACTIVE state when updates like name or admin_status_up
# occur. It goes back to ACTIVE state from other states (e.g. BUILD) when
# logical and physical resource provisioning has completed successfully. The
# attribute ADMIN_STATE_UP is not to be confused with STATUS: the former
# indicates whether a trunk can be managed. If a trunk has admin_state_up
# equal to false, the trunk plugin will reject any user request to manage
# the trunk resources (i.e. adding/removing sub-ports). ACTIVE_STATUS
# reflects the provisioning state of logical and physical resources associated
# with the trunk.
ACTIVE_STATUS = 'ACTIVE'
# A trunk is in DOWN state any time the logical and physical resources
# associated to a trunk are not in sync. This can happen in the following
# cases:
# a) A user has asked to create a trunk, or add(remove) subports to a
# trunk in ACTIVE state. In this case, the plugin has created/updated the
# logical resource, and the request has been passed along to a backend. The
# physical resources associated to the trunk are in the process of being
# (de)commissioned. While this happens, the logical and physical state are
# mismatching, albeit temporarily during subport operations, or until a user
# spawns a VM after a trunk creation.
# b) A system event, such as instance deletion, has led to the deprovisioning
# of the entire set of physical resources associated to the trunk. In this
# case, the logical resource exists but it has no physical resources
# associated with it, and the logical and physical state of the trunk are
# not matching.
DOWN_STATUS = 'DOWN'
# A driver/backend has acknowledged the server request: once the server
# notifies the driver/backend, a trunk is in BUILD state while the
# backend provisions the trunk resources.
BUILD_STATUS = 'BUILD'
# Should any temporary system failure occur during the provisioning process,
# a trunk is in DEGRADED state. This means that the trunk was only
# partially provisioned, and only a subset of the subports were added
# successfully to the trunk. The operation of removing/adding the faulty
# subports may be attempted as a recovery measure.
DEGRADED_STATUS = 'DEGRADED'
# Due to unforeseen circumstances, the user request has led to a conflict, and
# the trunk cannot be provisioned correctly for a subset of subports. For
# instance, a subport belonging to a network might not be compatible with
# the current trunk configuration, or the binding process leads to a persistent
# failure. Removing the 'offending' resource may be attempted as a recovery
# measure, but readding it to the trunk should lead to the same error
# condition. A trunk in ERROR status should be brought back to a sane status
# (i.e. any state except ERROR state) before attempting to add more subports,
# therefore requests of adding more subports must be rejected to avoid
# cascading errors.
ERROR_STATUS = 'ERROR'
# String literals for identifying trunk resources
PARENT_PORT = 'parent_port'
SUBPORTS = 'subports'
TRUNK = 'trunk'
TRUNK_PLUGIN = 'trunk_plugin'
TRUNK_SUBPORT_OWNER = 'trunk:subport'
# String literals for segmentation types
VLAN = 'vlan'
INHERIT = 'inherit'

View File

@ -14,12 +14,12 @@
from neutron_lib.callbacks import events as local_events
from neutron_lib.callbacks import registry
from neutron_lib.callbacks import resources as local_resources
from neutron_lib.services.trunk import constants as t_const
from oslo_log import log as logging
import oslo_messaging
from neutron.api.rpc.callbacks import events
from neutron.api.rpc.handlers import resources_rpc
from neutron.services.trunk import constants as t_const
from neutron.services.trunk.drivers.linuxbridge.agent import trunk_plumber
from neutron.services.trunk.rpc import agent as trunk_rpc
@ -104,7 +104,8 @@ class LinuxBridgeTrunkDriver(trunk_rpc.TrunkSkeleton):
self._tapi.bind_subports_to_host(context, trunk)
try:
self._plumber.ensure_trunk_subports(trunk)
self._tapi.set_trunk_status(context, trunk, t_const.ACTIVE_STATUS)
self._tapi.set_trunk_status(
context, trunk, t_const.TRUNK_ACTIVE_STATUS)
except Exception:
if not self._plumber.trunk_on_host(trunk):
LOG.debug("Trunk %s removed during wiring", trunk.port_id)
@ -112,7 +113,7 @@ class LinuxBridgeTrunkDriver(trunk_rpc.TrunkSkeleton):
# something broke
LOG.exception("Failure setting up subports for %s", trunk.port_id)
self._tapi.set_trunk_status(context, trunk,
t_const.DEGRADED_STATUS)
t_const.TRUNK_DEGRADED_STATUS)
class _TrunkAPI(object):

View File

@ -15,8 +15,8 @@ from oslo_log import log as logging
from neutron_lib.api.definitions import portbindings
from neutron_lib import constants
from neutron_lib.services.trunk import constants as trunk_consts
from neutron.services.trunk import constants as trunk_consts
from neutron.services.trunk.drivers import base
LOG = logging.getLogger(__name__)
@ -26,7 +26,7 @@ SUPPORTED_INTERFACES = (
portbindings.VIF_TYPE_BRIDGE,
)
SUPPORTED_SEGMENTATION_TYPES = (
trunk_consts.VLAN,
trunk_consts.SEGMENTATION_TYPE_VLAN,
)

View File

@ -20,6 +20,7 @@ from neutron_lib.callbacks import events
from neutron_lib.callbacks import registry
from neutron_lib.callbacks import resources
from neutron_lib import context as n_context
from neutron_lib.services.trunk import constants
from oslo_concurrency import lockutils
from oslo_context import context as o_context
from oslo_log import log as logging
@ -32,7 +33,6 @@ from neutron.api.rpc.handlers import resources_rpc
from neutron.common import utils as common_utils
from neutron.plugins.ml2.drivers.openvswitch.agent.common \
import constants as ovs_agent_constants
from neutron.services.trunk import constants
from neutron.services.trunk.drivers.openvswitch.agent import exceptions
from neutron.services.trunk.drivers.openvswitch.agent \
import trunk_manager as tman
@ -296,7 +296,7 @@ class OVSDBHandler(object):
# might cause troubles during deletion. Signal a DEGRADED status;
# if the user undo/redo the operation things may go back to
# normal.
return constants.DEGRADED_STATUS
return constants.TRUNK_DEGRADED_STATUS
LOG.debug("Added trunk: %s", trunk_id)
return self._get_current_status(subports, subport_ids)
@ -327,7 +327,7 @@ class OVSDBHandler(object):
# normal.
LOG.error("Failed to store metadata for trunk %(trunk_id)s: "
"%(reason)s", {'trunk_id': trunk_id, 'reason': e})
return constants.DEGRADED_STATUS
return constants.TRUNK_DEGRADED_STATUS
except exceptions.ParentPortNotFound as e:
# If a user deletes/migrates a VM and remove subports from a trunk
# in short sequence, there is a chance that we hit this spot in
@ -391,11 +391,12 @@ class OVSDBHandler(object):
'err': te})
# NOTE(status_police): Trunk couldn't be created so it ends in
# ERROR status and resync can fix that later.
self.report_trunk_status(ctx, trunk.id, constants.ERROR_STATUS)
self.report_trunk_status(
ctx, trunk.id, constants.TRUNK_ERROR_STATUS)
return
# We need to remove stale subports
unwire_status = constants.ACTIVE_STATUS
unwire_status = constants.TRUNK_ACTIVE_STATUS
if rewire:
old_subport_ids = self.get_connected_subports_for_trunk(trunk.id)
subports = {p['port_id'] for p in trunk.sub_ports}
@ -414,10 +415,10 @@ class OVSDBHandler(object):
trunk_bridge=trunk_br, parent_port=port)
if (unwire_status == wire_status and
wire_status == constants.ACTIVE_STATUS):
status = constants.ACTIVE_STATUS
wire_status == constants.TRUNK_ACTIVE_STATUS):
status = constants.TRUNK_ACTIVE_STATUS
else:
status = constants.DEGRADED_STATUS
status = constants.TRUNK_DEGRADED_STATUS
self.report_trunk_status(ctx, trunk.id, status)
def _set_trunk_metadata(self, trunk_bridge, port, trunk_id, subport_ids):
@ -476,9 +477,9 @@ class OVSDBHandler(object):
# a trunk_update_status to report the latest trunk status, but there
# can be exceptions (e.g. unwire_subports_for_trunk).
if len(expected_subports) != len(actual_subports):
return constants.DEGRADED_STATUS
return constants.TRUNK_DEGRADED_STATUS
else:
return constants.ACTIVE_STATUS
return constants.TRUNK_ACTIVE_STATUS
def _is_vm_connected(self, bridge):
"""True if an instance is connected to bridge, False otherwise."""

View File

@ -15,12 +15,12 @@ from neutron_lib.api.definitions import portbindings
from neutron_lib.callbacks import events
from neutron_lib.callbacks import registry
from neutron_lib import constants
from neutron_lib.services.trunk import constants as trunk_consts
from oslo_config import cfg
from oslo_log import log as logging
from neutron.plugins.ml2.drivers.openvswitch.agent.common import (
constants as agent_consts)
from neutron.services.trunk import constants as trunk_consts
from neutron.services.trunk.drivers import base
from neutron.services.trunk.drivers.openvswitch import utils
@ -34,7 +34,7 @@ SUPPORTED_INTERFACES = (
)
SUPPORTED_SEGMENTATION_TYPES = (
trunk_consts.VLAN,
trunk_consts.SEGMENTATION_TYPE_VLAN,
)
DRIVER = None

View File

@ -15,12 +15,12 @@
from neutron_lib.db import constants as db_const
from neutron_lib.db import model_base
from neutron_lib.services.trunk import constants
import sqlalchemy as sa
from sqlalchemy import sql
from neutron.db import models_v2
from neutron.db import standard_attr
from neutron.services.trunk import constants
class Trunk(standard_attr.HasStandardAttributes, model_base.BASEV2,
@ -35,7 +35,8 @@ class Trunk(standard_attr.HasStandardAttributes, model_base.BASEV2,
nullable=False,
unique=True)
status = sa.Column(
sa.String(16), nullable=False, server_default=constants.ACTIVE_STATUS)
sa.String(16), nullable=False,
server_default=constants.TRUNK_ACTIVE_STATUS)
port = sa.orm.relationship(
models_v2.Port,

View File

@ -26,6 +26,7 @@ from neutron_lib.db import api as db_api
from neutron_lib.db import resource_extend
from neutron_lib.plugins import directory
from neutron_lib.services import base as service_base
from neutron_lib.services.trunk import constants
from oslo_log import log as logging
from oslo_utils import uuidutils
@ -34,7 +35,6 @@ from neutron.db import db_base_plugin_common
from neutron.objects import base as objects_base
from neutron.objects import trunk as trunk_objects
from neutron.services.trunk import callbacks
from neutron.services.trunk import constants
from neutron.services.trunk import drivers
from neutron.services.trunk import exceptions as trunk_exc
from neutron.services.trunk import rules
@ -223,7 +223,7 @@ class TrunkPlugin(service_base.ServicePluginBase,
description=trunk_description,
project_id=trunk['tenant_id'],
port_id=trunk['port_id'],
status=constants.DOWN_STATUS,
status=constants.TRUNK_DOWN_STATUS,
sub_ports=sub_ports)
with db_api.autonested_transaction(context.session):
trunk_obj.create()
@ -308,10 +308,10 @@ class TrunkPlugin(service_base.ServicePluginBase,
# DOWN and thus can potentially overwrite an interleaving state
# change to ACTIVE. Eventually the driver should bring the status
# back to ACTIVE or ERROR.
if trunk.status == constants.ERROR_STATUS:
if trunk.status == constants.TRUNK_ERROR_STATUS:
raise trunk_exc.TrunkInErrorState(trunk_id=trunk_id)
else:
trunk.update(status=constants.DOWN_STATUS)
trunk.update(status=constants.TRUNK_DOWN_STATUS)
for subport in subports:
obj = trunk_objects.SubPort(
@ -374,7 +374,7 @@ class TrunkPlugin(service_base.ServicePluginBase,
# Should a trunk be in DOWN or BUILD state (e.g. when dealing
# with multiple concurrent requests), the status is still forced
# to DOWN. See add_subports() for more details.
trunk.update(status=constants.DOWN_STATUS)
trunk.update(status=constants.TRUNK_DOWN_STATUS)
payload = callbacks.TrunkPayload(context, trunk_id,
current_trunk=trunk,
original_trunk=original_trunk,
@ -422,5 +422,6 @@ class TrunkPlugin(service_base.ServicePluginBase,
# NOTE(status_police) Trunk status goes to DOWN when the parent
# port is unbound. This means there are no more physical resources
# associated with the logical resource.
self.update_trunk(context, trunk_id,
{'trunk': {'status': constants.DOWN_STATUS}})
self.update_trunk(
context, trunk_id,
{'trunk': {'status': constants.TRUNK_DOWN_STATUS}})

View File

@ -18,6 +18,7 @@ from neutron_lib.api.definitions import portbindings
from neutron_lib.db import api as db_api
from neutron_lib.plugins import directory
from neutron_lib import rpc as n_rpc
from neutron_lib.services.trunk import constants as trunk_consts
from oslo_log import helpers as log_helpers
from oslo_log import log as logging
import oslo_messaging
@ -27,7 +28,6 @@ from neutron.api.rpc.callbacks.producer import registry
from neutron.api.rpc.callbacks import resources
from neutron.api.rpc.handlers import resources_rpc
from neutron.objects import trunk as trunk_objects
from neutron.services.trunk import constants as trunk_consts
from neutron.services.trunk import exceptions as trunk_exc
from neutron.services.trunk.rpc import constants
@ -119,7 +119,7 @@ class TrunkSkeleton(object):
# subport bindings. The trunk will stay in BUILD state until an
# attempt has been made to bind all subports passed here and the
# agent acknowledges the operation was successful.
trunk.update(status=trunk_consts.BUILD_STATUS)
trunk.update(status=trunk_consts.TRUNK_BUILD_STATUS)
for port_id in port_ids:
try:
@ -134,7 +134,7 @@ class TrunkSkeleton(object):
# NOTE(status_police) The subport binding has failed in a
# manner in which we cannot proceed and the user must take
# action to bring the trunk back to a sane state.
trunk.update(status=trunk_consts.ERROR_STATUS)
trunk.update(status=trunk_consts.TRUNK_ERROR_STATUS)
return []
except Exception as e:
msg = ("Failed to bind subport port %(port)s on trunk "
@ -142,7 +142,7 @@ class TrunkSkeleton(object):
LOG.error(msg, {'port': port_id, 'trunk': trunk.id, 'exc': e})
if len(port_ids) != len(updated_ports):
trunk.update(status=trunk_consts.DEGRADED_STATUS)
trunk.update(status=trunk_consts.TRUNK_DEGRADED_STATUS)
return updated_ports

View File

@ -22,10 +22,10 @@ from neutron_lib.api import validators
from neutron_lib import exceptions as n_exc
from neutron_lib.plugins import directory
from neutron_lib.plugins.ml2 import api
from neutron_lib.services.trunk import constants
from neutron._i18n import _
from neutron.objects import trunk as trunk_objects
from neutron.services.trunk import constants
from neutron.services.trunk import exceptions as trunk_exc
from neutron.services.trunk import utils
@ -193,7 +193,8 @@ class SubPortsValidator(object):
port_ids = {}
any_has_inherit = False
for i, s in enumerate(self.subports):
has_inherit = s.get('segmentation_type') == constants.INHERIT
has_inherit = (s.get('segmentation_type') ==
constants.SEGMENTATION_TYPE_INHERIT)
any_has_inherit |= has_inherit
port_ids[s['port_id']] = (
InheritIndex(index=i, has_inherit=has_inherit))
@ -202,7 +203,8 @@ class SubPortsValidator(object):
if (any_has_inherit and
not extensions.is_extension_supported(
core_plugin, provider.ALIAS)):
msg = _("Cannot accept segmentation type %s") % constants.INHERIT
msg = (_("Cannot accept segmentation type %s") %
constants.SEGMENTATION_TYPE_INHERIT)
raise n_exc.InvalidInput(error_message=msg)
ports = core_plugin.get_ports(context, filters={'id': port_ids})

View File

@ -13,9 +13,9 @@
# under the License.
from neutron_lib.plugins import utils as plugin_utils
from neutron_lib.services.trunk import constants as trunk_consts
from neutron._i18n import _
from neutron.services.trunk import constants as trunk_consts
# Base map of segmentation types supported with their respective validator
# functions. In multi-driver deployments all drivers must support the same
@ -23,7 +23,7 @@ from neutron.services.trunk import constants as trunk_consts
# and respective validator, however this is a configuration that may be
# supported only in single-driver deployments.
_supported = {
trunk_consts.VLAN: plugin_utils.is_valid_vlan_tag,
trunk_consts.SEGMENTATION_TYPE_VLAN: plugin_utils.is_valid_vlan_tag,
}

View File

@ -14,7 +14,8 @@
import mock
from neutron.services.trunk import constants
from neutron_lib.services.trunk import constants
from neutron.services.trunk import plugin as trunk_plugin
from neutron.tests.common import helpers
from neutron.tests.unit.plugins.ml2 import base as ml2_test_base

View File

@ -17,12 +17,12 @@ import itertools
import mock
from neutron_lib import exceptions as n_exc
from neutron_lib.services.trunk import constants
from oslo_db import exception as obj_exc
from oslo_utils import uuidutils
from neutron.objects.db import api as obj_db_api
from neutron.objects import trunk as t_obj
from neutron.services.trunk import constants
from neutron.services.trunk import exceptions as t_exc
from neutron.tests.unit.objects import test_base
from neutron.tests.unit import testlib_api
@ -157,9 +157,10 @@ class TrunkDbObjectTestCase(test_base.BaseDbObjectTestCase,
trunk = t_obj.Trunk(context=self.context,
admin_state_up=False,
port_id=self.db_objs[0]['port_id'],
status=constants.DOWN_STATUS)
status=constants.TRUNK_DOWN_STATUS)
trunk.create()
fields = {'admin_state_up': True, 'status': constants.ACTIVE_STATUS}
fields = {'admin_state_up': True,
'status': constants.TRUNK_ACTIVE_STATUS}
trunk.update(**fields)
trunk = t_obj.Trunk.get_object(self.context, id=trunk.id)

View File

@ -14,6 +14,7 @@
import mock
from neutron_lib.callbacks import events as cb_events
from neutron_lib.services.trunk import constants as t_const
import oslo_messaging
from oslo_utils import uuidutils
import testtools
@ -21,7 +22,6 @@ import testtools
from neutron.api.rpc.callbacks import events
from neutron.api.rpc.handlers import resources_rpc
from neutron.objects import trunk
from neutron.services.trunk import constants as t_const
from neutron.services.trunk.drivers.linuxbridge.agent import driver
from neutron.services.trunk.drivers.linuxbridge.agent import trunk_plumber
from neutron.tests import base
@ -122,7 +122,7 @@ class LinuxBridgeTrunkDriverTestCase(base.BaseTestCase):
'ctx', self.trunk)
self.plumber.ensure_trunk_subports.assert_called_once_with(self.trunk)
self.tapi.set_trunk_status.assert_called_once_with(
'ctx', self.trunk, t_const.ACTIVE_STATUS)
'ctx', self.trunk, t_const.TRUNK_ACTIVE_STATUS)
def test_wire_trunk_not_on_host(self):
# trunk device not on host
@ -144,7 +144,7 @@ class LinuxBridgeTrunkDriverTestCase(base.BaseTestCase):
self.lbd.wire_trunk('ctx', self.trunk)
# degraded due to dataplane failure
self.tapi.set_trunk_status.assert_called_once_with(
'ctx', self.trunk, t_const.DEGRADED_STATUS)
'ctx', self.trunk, t_const.TRUNK_DEGRADED_STATUS)
class TrunkAPITestCase(base.BaseTestCase):

View File

@ -15,6 +15,7 @@
import mock
from neutron_lib.services.trunk import constants
import oslo_messaging
from oslo_serialization import jsonutils
from oslo_utils import uuidutils
@ -23,7 +24,6 @@ import testtools
from neutron.api.rpc.handlers import resources_rpc
from neutron.common import utils
from neutron.objects import trunk as trunk_obj
from neutron.services.trunk import constants
from neutron.services.trunk.drivers.openvswitch.agent import exceptions
from neutron.services.trunk.drivers.openvswitch.agent import ovsdb_handler
from neutron.services.trunk.drivers.openvswitch.agent import trunk_manager
@ -205,7 +205,7 @@ class TestOVSDBHandler(base.BaseTestCase):
status = self.ovsdb_handler.wire_subports_for_trunk(
None, self.trunk_id, self.fake_subports)
self.assertTrue(f.call_count)
self.assertEqual(constants.DEGRADED_STATUS, status)
self.assertEqual(constants.TRUNK_DEGRADED_STATUS, status)
@mock.patch('neutron.agent.common.ovs_lib.OVSBridge')
def test_wire_subports_for_trunk_ovsdb_failure(self, br):
@ -215,7 +215,7 @@ class TestOVSDBHandler(base.BaseTestCase):
side_effect=RuntimeError):
status = self.ovsdb_handler.wire_subports_for_trunk(
None, self.trunk_id, self.fake_subports)
self.assertEqual(constants.DEGRADED_STATUS, status)
self.assertEqual(constants.TRUNK_DEGRADED_STATUS, status)
@mock.patch('neutron.agent.common.ovs_lib.OVSBridge')
def test_unwire_subports_for_trunk_port_not_found(self, br):
@ -225,7 +225,7 @@ class TestOVSDBHandler(base.BaseTestCase):
side_effect=exceptions.ParentPortNotFound(bridge='foo_br')):
status = self.ovsdb_handler.unwire_subports_for_trunk(
self.trunk_id, ['subport_id'])
self.assertEqual(constants.ACTIVE_STATUS, status)
self.assertEqual(constants.TRUNK_ACTIVE_STATUS, status)
@mock.patch('neutron.agent.common.ovs_lib.OVSBridge')
def test_unwire_subports_for_trunk_trunk_manager_failure(self, br):
@ -236,7 +236,7 @@ class TestOVSDBHandler(base.BaseTestCase):
status = self.ovsdb_handler.unwire_subports_for_trunk(
'foo_trunk_id', ['subport_id'])
self.assertTrue(f.call_count)
self.assertEqual(constants.DEGRADED_STATUS, status)
self.assertEqual(constants.TRUNK_DEGRADED_STATUS, status)
def test__wire_trunk_get_trunk_details_failure(self):
self.trunk_manager.get_port_uuid_from_external_ids.side_effect = (
@ -257,7 +257,7 @@ class TestOVSDBHandler(base.BaseTestCase):
self.ovsdb_handler._wire_trunk(mock.Mock(), self.fake_port)
trunk_rpc = self.ovsdb_handler.trunk_rpc
trunk_rpc.update_trunk_status.assert_called_once_with(
mock.ANY, mock.ANY, constants.ERROR_STATUS)
mock.ANY, mock.ANY, constants.TRUNK_ERROR_STATUS)
def test__wire_trunk_rewire_trunk_failure(self):
with mock.patch.object(self.ovsdb_handler,
@ -265,12 +265,12 @@ class TestOVSDBHandler(base.BaseTestCase):
mock.patch.object(self.ovsdb_handler,
'get_connected_subports_for_trunk') as g:
g.return_value = ['stale_port']
f.return_value = constants.DEGRADED_STATUS
f.return_value = constants.TRUNK_DEGRADED_STATUS
self.ovsdb_handler._wire_trunk(
mock.Mock(), self.fake_port, rewire=True)
trunk_rpc = self.ovsdb_handler.trunk_rpc
trunk_rpc.update_trunk_status.assert_called_once_with(
mock.ANY, mock.ANY, constants.DEGRADED_STATUS)
mock.ANY, mock.ANY, constants.TRUNK_DEGRADED_STATUS)
def test__wire_trunk_report_trunk_called_on_wiring(self):
with mock.patch.object(self.trunk_manager, 'create_trunk'),\
@ -293,11 +293,11 @@ class TestOVSDBHandler(base.BaseTestCase):
'subport_ids': '[]'})
def test__get_current_status_active(self):
self.assertEqual(constants.ACTIVE_STATUS,
self.assertEqual(constants.TRUNK_ACTIVE_STATUS,
self.ovsdb_handler._get_current_status([], []))
def test__get_current_status_degraded(self):
self.assertEqual(constants.DEGRADED_STATUS,
self.assertEqual(constants.TRUNK_DEGRADED_STATUS,
self.ovsdb_handler._get_current_status(
[mock.ANY], []))

View File

@ -15,13 +15,13 @@ import mock
from neutron_lib.api.definitions import portbindings
from neutron_lib.plugins import directory
from neutron_lib import rpc as n_rpc
from neutron_lib.services.trunk import constants
from neutron.api.rpc.callbacks import events
from neutron.api.rpc.callbacks import resources
from neutron.api.rpc.handlers import resources_rpc
from neutron.objects import trunk as trunk_obj
from neutron.plugins.ml2 import plugin as ml2_plugin
from neutron.services.trunk import constants
from neutron.services.trunk import drivers
from neutron.services.trunk import exceptions as trunk_exc
from neutron.services.trunk import plugin as trunk_plugin
@ -96,7 +96,7 @@ class TrunkSkeletonTest(test_plugin.Ml2PluginV2TestCase):
subports=subports)
trunk = trunk_obj.Trunk.get_object(self.context, id=trunk['id'])
self.assertEqual(trunk.status, constants.BUILD_STATUS)
self.assertEqual(trunk.status, constants.TRUNK_BUILD_STATUS)
self.assertIn(trunk.id, updated_subports)
for port in updated_subports[trunk['id']]:
self.assertEqual('trunk_host_id', port[portbindings.HOST_ID])
@ -144,7 +144,7 @@ class TrunkSkeletonTest(test_plugin.Ml2PluginV2TestCase):
subports=subports)
trunk = trunk_obj.Trunk.get_object(self.context, id=trunk['id'])
self.assertEqual(trunk.status, constants.ERROR_STATUS)
self.assertEqual(trunk.status, constants.TRUNK_ERROR_STATUS)
self.assertEqual([], updated_subports[trunk.id])
def test_update_subport_bindings_exception(self):
@ -176,7 +176,7 @@ class TrunkSkeletonTest(test_plugin.Ml2PluginV2TestCase):
subports=subports)
trunk = trunk_obj.Trunk.get_object(self.context, id=trunk['id'])
self.assertEqual([], updated_subports.get(trunk.id))
self.assertEqual(constants.DEGRADED_STATUS, trunk.status)
self.assertEqual(constants.TRUNK_DEGRADED_STATUS, trunk.status)
class TrunkStubTest(base.BaseTestCase):

View File

@ -13,7 +13,8 @@
import mock
from neutron.services.trunk import constants
from neutron_lib.services.trunk import constants
from neutron.services.trunk.seg_types import validators
from neutron.tests import base
@ -23,14 +24,15 @@ class ValidatorsTestCase(base.BaseTestCase):
def test_add_validator_raises_keyerror_on_redefinition(self):
self.assertRaises(KeyError,
validators.add_validator,
constants.VLAN, mock.ANY)
constants.SEGMENTATION_TYPE_VLAN, mock.ANY)
def test_add_validator_add_new_type(self):
validators.add_validator('foo', lambda: None)
self.assertIn('foo', validators._supported)
def test_get_validator(self):
self.assertIsNotNone(validators.get_validator(constants.VLAN))
self.assertIsNotNone(validators.get_validator(
constants.SEGMENTATION_TYPE_VLAN))
def test_get_validator_raises_keyerror_on_missing_validator(self):
self.assertRaises(KeyError,

View File

@ -19,11 +19,11 @@ from neutron_lib.callbacks import events
from neutron_lib.callbacks import registry
from neutron_lib.callbacks import resources
from neutron_lib.plugins import directory
from neutron_lib.services.trunk import constants
import testtools
from neutron.objects import trunk as trunk_objects
from neutron.services.trunk import callbacks
from neutron.services.trunk import constants
from neutron.services.trunk import drivers
from neutron.services.trunk import exceptions as trunk_exc
from neutron.services.trunk import plugin as trunk_plugin
@ -257,13 +257,13 @@ class TrunkPluginTestCase(test_plugin.Ml2PluginV2TestCase):
with self.port() as port:
trunk = self._create_test_trunk(port)
self.assertEqual(
constants.DOWN_STATUS, trunk['status'])
constants.TRUNK_DOWN_STATUS, trunk['status'])
def test_add_subports_trunk_in_error_state_raises(self):
with self.port() as port, self.port() as subport:
trunk = self._create_test_trunk(port)
trunk_obj = self._get_trunk_obj(trunk['id'])
trunk_obj.status = constants.ERROR_STATUS
trunk_obj.status = constants.TRUNK_ERROR_STATUS
trunk_obj.update()
s = create_subport_dict(subport['port']['id'])
self.assertRaises(trunk_exc.TrunkInErrorState,
@ -274,24 +274,24 @@ class TrunkPluginTestCase(test_plugin.Ml2PluginV2TestCase):
with self.port() as port, self.port() as subport:
trunk = self._create_test_trunk(port)
trunk_obj = self._get_trunk_obj(trunk['id'])
trunk_obj.status = constants.ACTIVE_STATUS
trunk_obj.status = constants.TRUNK_ACTIVE_STATUS
trunk_obj.update()
s = create_subport_dict(subport['port']['id'])
trunk = self.trunk_plugin.add_subports(
self.context, trunk['id'], {'sub_ports': [s]})
self.assertEqual(constants.DOWN_STATUS, trunk['status'])
self.assertEqual(constants.TRUNK_DOWN_STATUS, trunk['status'])
def test_remove_subports_trunk_goes_to_down(self):
with self.port() as port, self.port() as subport:
s = create_subport_dict(subport['port']['id'])
trunk = self._create_test_trunk(port, [s])
trunk_obj = self._get_trunk_obj(trunk['id'])
trunk_obj.status = constants.ACTIVE_STATUS
trunk_obj.status = constants.TRUNK_ACTIVE_STATUS
trunk_obj.update()
trunk = self.trunk_plugin.remove_subports(
self.context, trunk['id'],
{'sub_ports': [{'port_id': subport['port']['id']}]})
self.assertEqual(constants.DOWN_STATUS, trunk['status'])
self.assertEqual(constants.TRUNK_DOWN_STATUS, trunk['status'])
def test__trigger_trunk_status_change_vif_type_changed_unbound(self):
callback = register_mock_callback(resources.TRUNK, events.AFTER_UPDATE)
@ -301,7 +301,8 @@ class TrunkPluginTestCase(test_plugin.Ml2PluginV2TestCase):
original_trunk, current_trunk = (
self._test__trigger_trunk_status_change(
parent, original_port,
constants.ACTIVE_STATUS, constants.DOWN_STATUS))
constants.TRUNK_ACTIVE_STATUS,
constants.TRUNK_DOWN_STATUS))
payload = callbacks.TrunkPayload(self.context, original_trunk['id'],
original_trunk=original_trunk,
current_trunk=current_trunk)
@ -313,19 +314,17 @@ class TrunkPluginTestCase(test_plugin.Ml2PluginV2TestCase):
with self.port() as parent:
parent[portbindings.VIF_TYPE] = 'fakeviftype'
original_port = {portbindings.VIF_TYPE: 'fakeviftype'}
self._test__trigger_trunk_status_change(parent,
original_port,
constants.ACTIVE_STATUS,
constants.ACTIVE_STATUS)
self._test__trigger_trunk_status_change(
parent, original_port, constants.TRUNK_ACTIVE_STATUS,
constants.TRUNK_ACTIVE_STATUS)
def test__trigger_trunk_status_change_vif_type_changed(self):
with self.port() as parent:
parent[portbindings.VIF_TYPE] = 'realviftype'
original_port = {portbindings.VIF_TYPE: 'fakeviftype'}
self._test__trigger_trunk_status_change(parent,
original_port,
constants.ACTIVE_STATUS,
constants.ACTIVE_STATUS)
self._test__trigger_trunk_status_change(
parent, original_port, constants.TRUNK_ACTIVE_STATUS,
constants.TRUNK_ACTIVE_STATUS)
def _test__trigger_trunk_status_change(self, new_parent,
original_parent,

View File

@ -22,9 +22,9 @@ from neutron_lib import exceptions as n_exc
from neutron_lib.plugins import directory
from neutron_lib.plugins.ml2 import api
from neutron_lib.plugins import utils as plugin_utils
from neutron_lib.services.trunk import constants
from oslo_utils import uuidutils
from neutron.services.trunk import constants
from neutron.services.trunk import drivers
from neutron.services.trunk import exceptions as trunk_exc
from neutron.services.trunk import plugin as trunk_plugin
@ -40,7 +40,7 @@ class SubPortsValidatorTestCase(base.BaseTestCase):
def setUp(self):
super(SubPortsValidatorTestCase, self).setUp()
self.segmentation_types = {
constants.VLAN: plugin_utils.is_valid_vlan_tag}
constants.SEGMENTATION_TYPE_VLAN: plugin_utils.is_valid_vlan_tag}
self.context = mock.ANY
mock.patch.object(rules.SubPortsValidator, '_get_port_mtu',
@ -132,7 +132,7 @@ class SubPortsValidatorPrepareTestCase(base.BaseTestCase):
def setUp(self):
super(SubPortsValidatorPrepareTestCase, self).setUp()
self.segmentation_types = {
constants.VLAN: plugin_utils.is_valid_vlan_tag}
constants.SEGMENTATION_TYPE_VLAN: plugin_utils.is_valid_vlan_tag}
self.context = mock.ANY
mock.patch.object(rules.SubPortsValidator, '_get_port_mtu',
@ -153,7 +153,7 @@ class SubPortsValidatorMtuSanityTestCase(test_plugin.Ml2PluginV2TestCase):
def setUp(self):
super(SubPortsValidatorMtuSanityTestCase, self).setUp()
self.segmentation_types = {
constants.VLAN: plugin_utils.is_valid_vlan_tag}
constants.SEGMENTATION_TYPE_VLAN: plugin_utils.is_valid_vlan_tag}
def test_validate_subport_mtu_same_as_trunk(self):
self._test_validate_subport_trunk_mtu(1500, 1500)
@ -232,8 +232,9 @@ class TrunkPortValidatorTestCase(test_plugin.Ml2PluginV2TestCase):
self.compat_patch = mock.patch.object(
trunk_plugin.TrunkPlugin, 'check_compatibility').start()
self.trunk_plugin = trunk_plugin.TrunkPlugin()
self.trunk_plugin.add_segmentation_type(constants.VLAN,
plugin_utils.is_valid_vlan_tag)
self.trunk_plugin.add_segmentation_type(
constants.SEGMENTATION_TYPE_VLAN,
plugin_utils.is_valid_vlan_tag)
def test_validate_port_parent_in_use_by_trunk(self):
with self.port() as trunk_parent: