Add support for xena
Change-Id: Id533f439bc0e832b03a20352aca903cbc7ba212e
This commit is contained in:
parent
6806711423
commit
ada3eee179
.zuul.yamltest-requirements.txttox.ini
devstack
gbpservice
neutron
plugins/ml2plus
services
tests/unit/plugins/ml2plus
tests/contrib
18
.zuul.yaml
18
.zuul.yaml
@ -1,7 +1,7 @@
|
||||
- project:
|
||||
name: x/group-based-policy
|
||||
templates:
|
||||
- openstack-python3-wallaby-jobs
|
||||
- openstack-python3-xena-jobs
|
||||
- publish-to-pypi
|
||||
# REVISIT: In the jobs below, the required-projects clause is needed on
|
||||
# the master branch to select the correct version of the requirements
|
||||
@ -15,22 +15,22 @@
|
||||
nodeset: ubuntu-bionic
|
||||
required-projects:
|
||||
- name: openstack/requirements
|
||||
override-checkout: stable/wallaby
|
||||
override-checkout: stable/xena
|
||||
- openstack-tox-py36:
|
||||
nodeset: ubuntu-bionic
|
||||
required-projects:
|
||||
- name: openstack/requirements
|
||||
override-checkout: stable/wallaby
|
||||
override-checkout: stable/xena
|
||||
- openstack-tox-py37:
|
||||
nodeset: ubuntu-bionic
|
||||
required-projects:
|
||||
- name: openstack/requirements
|
||||
override-checkout: stable/wallaby
|
||||
override-checkout: stable/xena
|
||||
- openstack-tox-py38:
|
||||
nodeset: ubuntu-bionic
|
||||
required-projects:
|
||||
- name: openstack/requirements
|
||||
override-checkout: stable/wallaby
|
||||
override-checkout: stable/xena
|
||||
- legacy-group-based-policy-dsvm-functional:
|
||||
voting: false
|
||||
- legacy-group-based-policy-dsvm-aim:
|
||||
@ -43,19 +43,19 @@
|
||||
nodeset: ubuntu-bionic
|
||||
required-projects:
|
||||
- name: openstack/requirements
|
||||
override-checkout: stable/wallaby
|
||||
override-checkout: stable/xena
|
||||
- openstack-tox-py36:
|
||||
nodeset: ubuntu-bionic
|
||||
required-projects:
|
||||
- name: openstack/requirements
|
||||
override-checkout: stable/wallaby
|
||||
override-checkout: stable/xena
|
||||
- openstack-tox-py37:
|
||||
nodeset: ubuntu-bionic
|
||||
required-projects:
|
||||
- name: openstack/requirements
|
||||
override-checkout: stable/wallaby
|
||||
override-checkout: stable/xena
|
||||
- openstack-tox-py38:
|
||||
nodeset: ubuntu-bionic
|
||||
required-projects:
|
||||
- name: openstack/requirements
|
||||
override-checkout: stable/wallaby
|
||||
override-checkout: stable/xena
|
||||
|
@ -25,7 +25,7 @@ NEUTRON_CONF_DIR=/etc/neutron
|
||||
NEUTRON_CONF=$NEUTRON_CONF_DIR/neutron.conf
|
||||
NFP_CONF_DIR=/etc/nfp
|
||||
DISKIMAGE_CREATE_DIR=$NFPSERVICE_DIR/gbpservice/contrib/nfp/tools/image_builder
|
||||
NEUTRON_SRC_BRANCH_FOR_NFP_CONTROLLER=stable/wallaby
|
||||
NEUTRON_SRC_BRANCH_FOR_NFP_CONTROLLER=stable/xena
|
||||
|
||||
# Save trace setting
|
||||
XTRACE=$(set +o | grep xtrace)
|
||||
|
@ -43,11 +43,11 @@ if [[ $ENABLE_NFP = True ]]; then
|
||||
# Make sure that your public interface is not attached to any bridge.
|
||||
PUBLIC_INTERFACE=
|
||||
|
||||
enable_plugin neutron-fwaas http://opendev.org/openstack/neutron-fwaas.git stable/wallaby
|
||||
enable_plugin neutron-lbaas https://opendev.org/openstack/neutron-lbaas.git stable/wallaby
|
||||
enable_plugin neutron https://opendev.org/openstack/neutron.git stable/wallaby
|
||||
enable_plugin neutron-vpnaas https://opendev.org/openstack/neutron-vpnaas.git stable/wallaby
|
||||
enable_plugin octavia https://opendev.org/openstack/octavia.git stable/wallaby
|
||||
enable_plugin neutron-fwaas http://opendev.org/openstack/neutron-fwaas.git stable/xena
|
||||
enable_plugin neutron-lbaas https://opendev.org/openstack/neutron-lbaas.git stable/xena
|
||||
enable_plugin neutron https://opendev.org/openstack/neutron.git stable/xena
|
||||
enable_plugin neutron-vpnaas https://opendev.org/openstack/neutron-vpnaas.git stable/xena
|
||||
enable_plugin octavia https://opendev.org/openstack/octavia.git stable/xena
|
||||
|
||||
fi
|
||||
fi
|
||||
|
@ -424,7 +424,7 @@ class ExtensionDbMixin(object):
|
||||
external_network_dn=sa.bindparam('dn'))
|
||||
if lock_update:
|
||||
# REVISIT: Eliminate locking.
|
||||
query += lambda q: q.with_lockmode('update')
|
||||
query += lambda q: q.with_for_update()
|
||||
ids = query(session).params(dn=dn)
|
||||
|
||||
return [i[0] for i in ids]
|
||||
@ -437,7 +437,7 @@ class ExtensionDbMixin(object):
|
||||
sa.bindparam('dn') + "/%"))
|
||||
if lock_update:
|
||||
# REVISIT: Eliminate locking.
|
||||
query += lambda q: q.with_lockmode('update')
|
||||
query += lambda q: q.with_for_update()
|
||||
ids = query(session).params(dn=dn)
|
||||
|
||||
return [i[0] for i in ids]
|
||||
@ -451,7 +451,7 @@ class ExtensionDbMixin(object):
|
||||
NetworkExtensionDb.svi == true())
|
||||
if lock_update:
|
||||
# REVISIT: Eliminate locking.
|
||||
query += lambda q: q.with_lockmode('update')
|
||||
query += lambda q: q.with_for_update()
|
||||
ids = query(session).params(dn=dn)
|
||||
|
||||
return [i[0] for i in ids]
|
||||
@ -470,7 +470,7 @@ class ExtensionDbMixin(object):
|
||||
query += lambda q: q.distinct()
|
||||
if lock_update:
|
||||
# REVISIT: Eliminate locking.
|
||||
query += lambda q: q.with_lockmode('update')
|
||||
query += lambda q: q.with_for_update()
|
||||
cidrs = query(session).params(dn=dn)
|
||||
|
||||
return [c[0] for c in cidrs]
|
||||
|
@ -1904,19 +1904,22 @@ class ApicMechanismDriver(api_plus.MechanismDriver,
|
||||
self.aim.update(aim_ctx, vrf, display_name=dname)
|
||||
|
||||
@registry.receives(resources.ROUTER, [events.PRECOMMIT_CREATE])
|
||||
def _create_router_precommit(self, resource, event, trigger, context,
|
||||
router, router_id, router_db):
|
||||
def _create_router_precommit(self, resource, event, trigger, payload):
|
||||
context = payload.context
|
||||
router = payload.states
|
||||
router_id = payload.resource_id
|
||||
|
||||
LOG.debug("APIC AIM MD creating router: %s", router)
|
||||
|
||||
session = context.session
|
||||
aim_ctx = aim_context.AimContext(session)
|
||||
|
||||
# Persist extension attributes.
|
||||
self.l3_plugin.set_router_extn_db(session, router_id, router)
|
||||
self.l3_plugin.set_router_extn_db(session, router_id, router[0])
|
||||
|
||||
contract, subject = self._map_router(session, router)
|
||||
contract, subject = self._map_router(session, router[0])
|
||||
|
||||
dname = aim_utils.sanitize_display_name(router['name'])
|
||||
dname = aim_utils.sanitize_display_name(router[0]['name'])
|
||||
|
||||
contract.display_name = dname
|
||||
self.aim.create(aim_ctx, contract)
|
||||
@ -2034,8 +2037,11 @@ class ApicMechanismDriver(api_plus.MechanismDriver,
|
||||
self._add_postcommit_port_notifications(context, affected_port_ids)
|
||||
|
||||
@registry.receives(resources.ROUTER, [events.PRECOMMIT_DELETE])
|
||||
def _delete_router_precommit(self, resource, event, trigger, context,
|
||||
router_db, router_id):
|
||||
def _delete_router_precommit(self, resource, event, trigger, payload):
|
||||
context = payload.context
|
||||
router_id = payload.resource_id
|
||||
router_db = payload.states[0]
|
||||
|
||||
LOG.debug("APIC AIM MD deleting router: %s", router_id)
|
||||
|
||||
session = context.session
|
||||
@ -3046,9 +3052,15 @@ class ApicMechanismDriver(api_plus.MechanismDriver,
|
||||
# orig_binding and new_binding kwargs, and it is generated with
|
||||
# those args from _commit_port_binding.
|
||||
@registry.receives(resources.PORT, [events.BEFORE_UPDATE])
|
||||
def _before_update_port(self, resource, event, trigger, context,
|
||||
port, original_port,
|
||||
def _before_update_port(self, resource, event, trigger, payload,
|
||||
orig_binding=None, new_binding=None):
|
||||
context = payload.context
|
||||
original_port = payload.states[0]
|
||||
port = payload.states[1]
|
||||
if payload.metadata:
|
||||
orig_binding = payload.metadata['orig_binding']
|
||||
new_binding = payload.metadata['new_binding']
|
||||
|
||||
if self._is_port_bound(original_port) and 'fixed_ips' in port:
|
||||
# When a bound port is updated with a subnet, if the port
|
||||
# is on a SVI network, we need to ensure that the SVI ports
|
||||
@ -5547,7 +5559,7 @@ class ApicMechanismDriver(api_plus.MechanismDriver,
|
||||
[events.AFTER_CREATE, events.AFTER_DELETE])
|
||||
def _after_subport_event(self, resource, event, trunk_plugin, payload):
|
||||
context = payload.context
|
||||
subports = payload.subports
|
||||
subports = payload.metadata['subports']
|
||||
first_subport_id = subports[0].port_id
|
||||
# This is only needed for baremetal VNIC types, as they don't
|
||||
# have agents to perform port binding.
|
||||
@ -5567,7 +5579,7 @@ class ApicMechanismDriver(api_plus.MechanismDriver,
|
||||
owner = trunk_consts.TRUNK_SUBPORT_OWNER
|
||||
subport_ids = [subport.port_id for subport in subports]
|
||||
profile = parent_port[portbindings.PROFILE] if parent_port else None
|
||||
self._update_trunk_status_and_subports(context, payload.trunk_id,
|
||||
self._update_trunk_status_and_subports(context, payload.resource_id,
|
||||
host_id, subport_ids, owner,
|
||||
binding_profile=profile)
|
||||
|
||||
|
@ -111,8 +111,12 @@ class Ml2PlusPlugin(ml2_plugin.Ml2Plugin,
|
||||
**kwargs):
|
||||
if 'payload' in kwargs:
|
||||
context = kwargs['payload'].context
|
||||
security_group = kwargs['payload'].desired_state
|
||||
original_security_group = kwargs['payload'].states[0]
|
||||
if event == events.PRECOMMIT_UPDATE:
|
||||
security_group = kwargs['payload'].desired_state
|
||||
original_security_group = kwargs['payload'].states[0]
|
||||
else:
|
||||
security_group = kwargs['payload'].states[0]
|
||||
original_security_group = kwargs['payload'].desired_state
|
||||
else:
|
||||
context = kwargs.get('context')
|
||||
security_group = kwargs.get('security_group')
|
||||
@ -142,18 +146,30 @@ class Ml2PlusPlugin(ml2_plugin.Ml2Plugin,
|
||||
[events.PRECOMMIT_CREATE, events.PRECOMMIT_DELETE])
|
||||
def _handle_security_group_rule_change(self, resource, event, trigger,
|
||||
**kwargs):
|
||||
context = kwargs.get('context')
|
||||
if 'payload' in kwargs:
|
||||
context = kwargs['payload'].context
|
||||
else:
|
||||
context = kwargs.get('context')
|
||||
if event == events.PRECOMMIT_CREATE:
|
||||
sg_rule = kwargs.get('security_group_rule')
|
||||
if 'payload' in kwargs:
|
||||
sg_rule = kwargs['payload'].states[0]
|
||||
else:
|
||||
sg_rule = kwargs.get('security_group_rule')
|
||||
mech_context = driver_context.SecurityGroupRuleContext(
|
||||
self, context, sg_rule)
|
||||
self.mechanism_manager.create_security_group_rule_precommit(
|
||||
mech_context)
|
||||
return
|
||||
if event == events.PRECOMMIT_DELETE:
|
||||
sg_rule = {'id': kwargs.get('security_group_rule_id'),
|
||||
'security_group_id': kwargs.get('security_group_id'),
|
||||
'tenant_id': context.project_id}
|
||||
if 'payload' in kwargs:
|
||||
sg_rule = {'id': kwargs['payload'].resource_id,
|
||||
'security_group_id':
|
||||
kwargs['payload'].metadata['security_group_id'],
|
||||
'tenant_id': context.project_id}
|
||||
else:
|
||||
sg_rule = {'id': kwargs.get('security_group_rule_id'),
|
||||
'security_group_id': kwargs.get('security_group_id'),
|
||||
'tenant_id': context.project_id}
|
||||
mech_context = driver_context.SecurityGroupRuleContext(
|
||||
self, context, sg_rule)
|
||||
self.mechanism_manager.delete_security_group_rule_precommit(
|
||||
|
@ -2546,6 +2546,12 @@ class ResourceMappingDriver(api.PolicyDriver, ImplicitResourceOperations,
|
||||
subnet = self._get_subnet(context._plugin_context,
|
||||
es['subnet_id'])
|
||||
interface_info = {'network_id': subnet['network_id']}
|
||||
router = self._get_router(context._plugin_context,
|
||||
router_id)
|
||||
routes = [route for route in router['routes']
|
||||
if route not in es['external_routes']]
|
||||
self._update_router(context._plugin_context, router_id,
|
||||
{'routes': routes})
|
||||
self._remove_router_gw_interface(context._plugin_context,
|
||||
router_id, interface_info)
|
||||
|
||||
|
@ -135,8 +135,9 @@ class FlowclassifierAIMDriver(FlowclassifierAIMDriverBase):
|
||||
return classifier_ids
|
||||
|
||||
@registry.receives(resources.NETWORK, [events.PRECOMMIT_DELETE])
|
||||
def _handle_network_delete(self, rtype, event, trigger, context,
|
||||
network_id, **kwargs):
|
||||
def _handle_network_delete(self, rtype, event, trigger, payload):
|
||||
context = payload.context
|
||||
network_id = payload.resource_id
|
||||
flc_ids = self._get_classifiers_by_network_id(context, network_id)
|
||||
if flc_ids:
|
||||
# TODO(ivar): instead of raising, we could try deleting the flow
|
||||
|
@ -57,7 +57,6 @@ from oslo_config import cfg
|
||||
from oslo_db import exception as exc
|
||||
from oslo_utils import uuidutils
|
||||
import six
|
||||
from sqlalchemy.orm import exc as sql_exc
|
||||
import testtools
|
||||
import webob.exc
|
||||
|
||||
@ -2532,7 +2531,7 @@ class TestAimMapping(ApicAimTestCase):
|
||||
# This should throw an exception as there will be only one
|
||||
# entry in this DB table at any given time.
|
||||
current_time = datetime.datetime.now()
|
||||
self.assertRaises(sql_exc.FlushError,
|
||||
self.assertRaises(exc.DBDuplicateEntry,
|
||||
self.driver._set_vm_name_update,
|
||||
self.db_session, None, 'host_id1', current_time)
|
||||
|
||||
|
@ -15,7 +15,10 @@
|
||||
|
||||
from unittest import mock
|
||||
|
||||
from neutron import quota
|
||||
from neutron.quota import resource_registry
|
||||
from neutron_lib import context
|
||||
from oslo_config import cfg
|
||||
|
||||
from gbpservice.neutron.services.apic_aim import l3_plugin
|
||||
from gbpservice.neutron.tests.unit.services.grouppolicy import (
|
||||
@ -127,6 +130,14 @@ class TestCiscoApicAimL3Plugin(test_aim_mapping_driver.AIMBaseTestCase):
|
||||
'subnet_id': subnet['id'],
|
||||
'floating_network_id': subnet['network_id']}}
|
||||
self.handler_mock.reset_mock()
|
||||
quota.QUOTAS._driver = None
|
||||
cfg.CONF.set_override('quota_driver',
|
||||
'neutron.db.quota.driver.DbQuotaDriver',
|
||||
group='QUOTAS')
|
||||
self.registry = resource_registry.ResourceRegistry.get_instance()
|
||||
self.registry.register_resource_by_name('floatingip')
|
||||
self.registry.register_resource_by_name('port')
|
||||
|
||||
floatingip = self.plugin.create_floatingip(self.context, kwargs)
|
||||
internal_ports = self._show('ports',
|
||||
floatingip['port_id'])['ports']
|
||||
|
@ -13,11 +13,11 @@ SKIP_EXERCISES=volumes,trove,swift,sahara,euca,bundle,boot_from_volume,aggregate
|
||||
|
||||
enable_plugin group-based-policy https://opendev.org/x/group-based-policy.git master
|
||||
|
||||
enable_plugin networking-sfc https://opendev.org/openstack/networking-sfc.git stable/wallaby
|
||||
enable_plugin networking-sfc https://opendev.org/openstack/networking-sfc.git stable/xena
|
||||
|
||||
ENABLE_APIC_AIM_GATE=True
|
||||
|
||||
AIM_BRANCH=master
|
||||
OPFLEX_BRANCH=stable/wallaby
|
||||
OPFLEX_BRANCH=stable/xena
|
||||
APICAPI_BRANCH=master
|
||||
ACITOOLKIT_BRANCH=noiro-lite
|
||||
|
@ -20,7 +20,7 @@ GBPSERVICE_BRANCH=master
|
||||
#GBPSERVICE_BRANCH=refs/changes/85/298385/154
|
||||
|
||||
GBPCLIENT_REPO=${GIT_BASE}/x/python-group-based-policy-client.git
|
||||
GBPCLIENT_BRANCH=stable/wallaby
|
||||
GBPCLIENT_BRANCH=stable/xena
|
||||
|
||||
GBPUI_REPO=${GIT_BASE}/x/group-based-policy-ui.git
|
||||
GBPUI_BRANCH=master
|
||||
@ -43,13 +43,13 @@ enable_service q-svc
|
||||
enable_service q-agt
|
||||
enable_service q-dhcp
|
||||
enable_service q-l3
|
||||
enable_plugin neutron-fwaas https://opendev.org/openstack/neutron-fwaas.git stable/wallaby
|
||||
enable_plugin neutron-lbaas https://opendev.org/openstack/neutron-lbaas.git stable/wallaby
|
||||
enable_plugin neutron https://opendev.org/openstack/neutron.git stable/wallaby
|
||||
enable_plugin neutron-fwaas https://opendev.org/openstack/neutron-fwaas.git stable/xena
|
||||
enable_plugin neutron-lbaas https://opendev.org/openstack/neutron-lbaas.git stable/xena
|
||||
enable_plugin neutron https://opendev.org/openstack/neutron.git stable/xena
|
||||
|
||||
|
||||
#ENBALE OCTAVIA
|
||||
enable_plugin octavia https://opendev.org/openstack/octavia stable/wallaby
|
||||
enable_plugin octavia https://opendev.org/openstack/octavia stable/xena
|
||||
#ENABLED_SERVICES+=,octavia,o-cw,o-hk,o-hm,o-api
|
||||
|
||||
enable_service q-fwaas-v1
|
||||
|
@ -20,7 +20,7 @@ GBPSERVICE_BRANCH=master
|
||||
#GBPSERVICE_BRANCH=refs/changes/54/240954/47
|
||||
|
||||
GBPCLIENT_REPO=${GIT_BASE}/x/python-group-based-policy-client.git
|
||||
GBPCLIENT_BRANCH=stable/wallaby
|
||||
GBPCLIENT_BRANCH=stable/xena
|
||||
|
||||
GBPUI_REPO=${GIT_BASE}/x/group-based-policy-ui.git
|
||||
GBPUI_BRANCH=master
|
||||
|
@ -16,7 +16,7 @@ XTRACE=$(set +o | grep xtrace)
|
||||
|
||||
function prepare_gbp_devstack_pre {
|
||||
cd $TOP_DIR
|
||||
sudo git checkout stable/wallaby
|
||||
sudo git checkout stable/xena
|
||||
sudo sed -i 's/DEST=\/opt\/stack/DEST=\/opt\/stack\/new/g' $TOP_DIR/stackrc
|
||||
sudo sed -i 's/source $TOP_DIR\/lib\/neutron/source $TOP_DIR\/lib\/neutron\nsource $TOP_DIR\/lib\/neutron-legacy/g' $TOP_DIR/stack.sh
|
||||
}
|
||||
@ -25,15 +25,15 @@ function prepare_gbp_devstack_post {
|
||||
# The following should updated when master moves to a new release
|
||||
# We need to do the following since the infra job clones these repos and
|
||||
# checks out the master branch (as this is the master branch) and later
|
||||
# does not switch to the stable/wallaby branch when installing devstack
|
||||
# does not switch to the stable/xena branch when installing devstack
|
||||
# since the repo is already present.
|
||||
# This can be worked around by changing the job description in
|
||||
# project-config to set BRANCH_OVERRIDE to use the stable/wallaby branch
|
||||
sudo git --git-dir=/opt/stack/new/neutron/.git --work-tree=/opt/stack/new/neutron checkout stable/wallaby
|
||||
sudo git --git-dir=/opt/stack/new/nova/.git --work-tree=/opt/stack/new/nova checkout stable/wallaby
|
||||
sudo git --git-dir=/opt/stack/new/keystone/.git --work-tree=/opt/stack/new/keystone checkout stable/wallaby
|
||||
sudo git --git-dir=/opt/stack/new/cinder/.git --work-tree=/opt/stack/new/cinder checkout stable/wallaby
|
||||
sudo git --git-dir=/opt/stack/new/requirements/.git --work-tree=/opt/stack/new/requirements checkout stable/wallaby
|
||||
# project-config to set BRANCH_OVERRIDE to use the stable/xena branch
|
||||
sudo git --git-dir=/opt/stack/new/neutron/.git --work-tree=/opt/stack/new/neutron checkout stable/xena
|
||||
sudo git --git-dir=/opt/stack/new/nova/.git --work-tree=/opt/stack/new/nova checkout stable/xena
|
||||
sudo git --git-dir=/opt/stack/new/keystone/.git --work-tree=/opt/stack/new/keystone checkout stable/xena
|
||||
sudo git --git-dir=/opt/stack/new/cinder/.git --work-tree=/opt/stack/new/cinder checkout stable/xena
|
||||
sudo git --git-dir=/opt/stack/new/requirements/.git --work-tree=/opt/stack/new/requirements checkout stable/xena
|
||||
|
||||
source $TOP_DIR/functions
|
||||
source $TOP_DIR/functions-common
|
||||
|
@ -4,19 +4,19 @@
|
||||
hacking>=1.1.0,<1.2.0 # Apache-2.0
|
||||
|
||||
# Since version numbers for these are specified in
|
||||
# https://releases.openstack.org/constraints/upper/wallaby, they cannot be
|
||||
# https://releases.openstack.org/constraints/upper/xena, they cannot be
|
||||
# referenced as GIT URLs.
|
||||
neutron
|
||||
python-heatclient
|
||||
python-keystoneclient
|
||||
|
||||
-e git+https://opendev.org/openstack/networking-sfc.git@stable/wallaby#egg=networking-sfc
|
||||
-e git+https://opendev.org/openstack/networking-sfc.git@stable/xena#egg=networking-sfc
|
||||
|
||||
-e git+https://github.com/noironetworks/apicapi.git@master#egg=apicapi
|
||||
|
||||
-e git+https://github.com/noironetworks/python-opflex-agent.git@stable/wallaby#egg=neutron-opflex-agent
|
||||
-e git+https://github.com/noironetworks/python-opflex-agent.git@stable/xena#egg=neutron-opflex-agent
|
||||
|
||||
-e git+https://opendev.org/x/python-group-based-policy-client.git@stable/wallaby#egg=python-group-based-policy-client
|
||||
-e git+https://opendev.org/x/python-group-based-policy-client.git@stable/xena#egg=python-group-based-policy-client
|
||||
|
||||
coverage!=4.4,>=4.0 # Apache-2.0
|
||||
flake8-import-order==0.12 # LGPLv3
|
||||
@ -28,6 +28,7 @@ testscenarios>=0.4 # Apache-2.0/BSD
|
||||
WebTest>=2.0.27 # MIT
|
||||
oslotest>=3.2.0 # Apache-2.0
|
||||
stestr>=1.0.0 # Apache-2.0
|
||||
unittest2>=1.1.0
|
||||
|
||||
# REVISIT: Until co-gating and/or stable branches are implemented for
|
||||
# the aci-integration-module repo, it may be necessary to pin to a
|
||||
|
2
tox.ini
2
tox.ini
@ -16,7 +16,7 @@ usedevelop = True
|
||||
install_command =
|
||||
pip install {opts} {packages}
|
||||
deps =
|
||||
-c{env:UPPER_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/wallaby}
|
||||
-c{env:UPPER_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/xena}
|
||||
-r{toxinidir}/requirements.txt
|
||||
-r{toxinidir}/test-requirements.txt
|
||||
whitelist_externals = sh
|
||||
|
Loading…
x
Reference in New Issue
Block a user