Remove old code
Change-Id: I846ae0f6455f2a6dfcd3016697b9dc2d32366bfc
This commit is contained in:
@@ -1,247 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo.config import cfg
|
||||
|
||||
from oslo.utils import importutils
|
||||
from oslo_log import log as logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
from nova.compute import compute_keystoneclient as hkc
|
||||
from novaclient import client as novaclient
|
||||
from novaclient import shell as novashell
|
||||
try:
|
||||
from swiftclient import client as swiftclient
|
||||
except ImportError:
|
||||
swiftclient = None
|
||||
logger.info('swiftclient not available')
|
||||
try:
|
||||
from neutronclient.v2_0 import client as neutronclient
|
||||
except ImportError:
|
||||
neutronclient = None
|
||||
logger.info('neutronclient not available')
|
||||
try:
|
||||
from cinderclient import client as cinderclient
|
||||
except ImportError:
|
||||
cinderclient = None
|
||||
logger.info('cinderclient not available')
|
||||
|
||||
try:
|
||||
from ceilometerclient.v2 import client as ceilometerclient
|
||||
except ImportError:
|
||||
ceilometerclient = None
|
||||
logger.info('ceilometerclient not available')
|
||||
|
||||
|
||||
cloud_opts = [
|
||||
cfg.StrOpt('cloud_backend',
|
||||
default=None,
|
||||
help="Cloud module to use as a backend. Defaults to OpenStack.")
|
||||
]
|
||||
cfg.CONF.register_opts(cloud_opts)
|
||||
|
||||
|
||||
class OpenStackClients(object):
|
||||
|
||||
'''
|
||||
Convenience class to create and cache client instances.
|
||||
'''
|
||||
|
||||
def __init__(self, context):
|
||||
self.context = context
|
||||
self._nova = {}
|
||||
self._keystone = None
|
||||
self._swift = None
|
||||
self._neutron = None
|
||||
self._cinder = None
|
||||
self._ceilometer = None
|
||||
|
||||
@property
|
||||
def auth_token(self):
|
||||
# if there is no auth token in the context
|
||||
# attempt to get one using the context username and password
|
||||
return self.context.auth_token or self.keystone().auth_token
|
||||
|
||||
def keystone(self):
|
||||
if self._keystone:
|
||||
return self._keystone
|
||||
|
||||
self._keystone = hkc.KeystoneClient(self.context)
|
||||
return self._keystone
|
||||
|
||||
def url_for(self, **kwargs):
|
||||
return self.keystone().url_for(**kwargs)
|
||||
|
||||
def nova(self, service_type='compute'):
|
||||
if service_type in self._nova:
|
||||
return self._nova[service_type]
|
||||
|
||||
con = self.context
|
||||
if self.auth_token is None:
|
||||
logger.error("Nova connection failed, no auth_token!")
|
||||
return None
|
||||
|
||||
computeshell = novashell.OpenStackComputeShell()
|
||||
extensions = computeshell._discover_extensions("1.1")
|
||||
|
||||
args = {
|
||||
'project_id': con.tenant,
|
||||
'auth_url': con.auth_url,
|
||||
'service_type': service_type,
|
||||
'username': con.username,
|
||||
'api_key': con.password,
|
||||
'region_name':con.region_name,
|
||||
'extensions': extensions
|
||||
}
|
||||
if con.password is not None:
|
||||
if self.context.region_name is None:
|
||||
management_url = self.url_for(service_type=service_type)
|
||||
else:
|
||||
management_url = self.url_for(
|
||||
service_type=service_type,
|
||||
attr='region',
|
||||
filter_value=self.context.region_name)
|
||||
else:
|
||||
management_url = con.nova_url + '/' + con.tenant_id
|
||||
client = novaclient.Client(2, **args)
|
||||
client.client.auth_token = self.auth_token
|
||||
client.client.management_url = management_url
|
||||
|
||||
self._nova[service_type] = client
|
||||
return client
|
||||
|
||||
def swift(self):
|
||||
if swiftclient is None:
|
||||
return None
|
||||
if self._swift:
|
||||
return self._swift
|
||||
|
||||
con = self.context
|
||||
if self.auth_token is None:
|
||||
logger.error("Swift connection failed, no auth_token!")
|
||||
return None
|
||||
|
||||
args = {
|
||||
'auth_version': '2.0',
|
||||
'tenant_name': con.tenant_id,
|
||||
'user': con.username,
|
||||
'key': None,
|
||||
'authurl': None,
|
||||
'preauthtoken': self.auth_token,
|
||||
'preauthurl': self.url_for(service_type='object-store')
|
||||
}
|
||||
self._swift = swiftclient.Connection(**args)
|
||||
return self._swift
|
||||
|
||||
def neutron(self):
|
||||
if neutronclient is None:
|
||||
return None
|
||||
if self._neutron:
|
||||
return self._neutron
|
||||
|
||||
con = self.context
|
||||
if self.auth_token is None:
|
||||
logger.error("Neutron connection failed, no auth_token!")
|
||||
return None
|
||||
if con.password is not None:
|
||||
if self.context.region_name is None:
|
||||
management_url = self.url_for(service_type='network')
|
||||
else:
|
||||
management_url = self.url_for(
|
||||
service_type='network',
|
||||
attr='region',
|
||||
filter_value=self.context.region_name)
|
||||
else:
|
||||
management_url = con.neutron_url
|
||||
|
||||
args = {
|
||||
'auth_url': con.auth_url,
|
||||
'service_type': 'network',
|
||||
'token': self.auth_token,
|
||||
'endpoint_url': management_url
|
||||
}
|
||||
|
||||
self._neutron = neutronclient.Client(**args)
|
||||
|
||||
return self._neutron
|
||||
|
||||
def cinder(self):
|
||||
if cinderclient is None:
|
||||
return self.nova('volume')
|
||||
if self._cinder:
|
||||
return self._cinder
|
||||
|
||||
con = self.context
|
||||
if self.auth_token is None:
|
||||
logger.error("Cinder connection failed, no auth_token!")
|
||||
return None
|
||||
|
||||
args = {
|
||||
'service_type': 'volume',
|
||||
'auth_url': con.auth_url,
|
||||
'project_id': con.tenant_id,
|
||||
'username': None,
|
||||
'api_key': None
|
||||
}
|
||||
|
||||
self._cinder = cinderclient.Client('1', **args)
|
||||
if con.password is not None:
|
||||
if self.context.region_name is None:
|
||||
management_url = self.url_for(service_type='volume')
|
||||
else:
|
||||
management_url = self.url_for(
|
||||
service_type='volume',
|
||||
attr='region',
|
||||
filter_value=self.context.region_name)
|
||||
else:
|
||||
management_url = con.cinder_url + '/' + con.tenant_id
|
||||
self._cinder.client.auth_token = self.auth_token
|
||||
self._cinder.client.management_url = management_url
|
||||
|
||||
return self._cinder
|
||||
|
||||
def ceilometer(self):
|
||||
if ceilometerclient is None:
|
||||
return None
|
||||
if self._ceilometer:
|
||||
return self._ceilometer
|
||||
|
||||
if self.auth_token is None:
|
||||
logger.error("Ceilometer connection failed, no auth_token!")
|
||||
return None
|
||||
con = self.context
|
||||
args = {
|
||||
'auth_url': con.auth_url,
|
||||
'service_type': 'metering',
|
||||
'project_id': con.tenant_id,
|
||||
'token': lambda: self.auth_token,
|
||||
'endpoint': self.url_for(service_type='metering'),
|
||||
}
|
||||
|
||||
client = ceilometerclient.Client(**args)
|
||||
|
||||
self._ceilometer = client
|
||||
return self._ceilometer
|
||||
|
||||
|
||||
if cfg.CONF.cloud_backend:
|
||||
cloud_backend_module = importutils.import_module(cfg.CONF.cloud_backend)
|
||||
Clients = cloud_backend_module.Clients
|
||||
else:
|
||||
Clients = OpenStackClients
|
||||
|
||||
logger.debug('Using backend %s' % Clients)
|
@@ -1,199 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo.config import cfg
|
||||
from nova.openstack.common import local
|
||||
from nova import exception
|
||||
from nova import wsgi
|
||||
from oslo_context import context
|
||||
from oslo.utils import importutils
|
||||
from oslo.utils import uuidutils
|
||||
|
||||
|
||||
def generate_request_id():
|
||||
return 'req-' + uuidutils.generate_uuid()
|
||||
|
||||
|
||||
class RequestContext(context.RequestContext):
|
||||
|
||||
"""
|
||||
Stores information about the security context under which the user
|
||||
accesses the system, as well as additional request information.
|
||||
"""
|
||||
|
||||
def __init__(self, auth_token=None, username=None, password=None,
|
||||
aws_creds=None, tenant=None,
|
||||
tenant_id=None, auth_url=None, roles=None,
|
||||
is_admin=False, region_name=None,
|
||||
nova_url=None, cinder_url=None, neutron_url=None,
|
||||
read_only=False, show_deleted=False,
|
||||
owner_is_tenant=True, overwrite=True,
|
||||
trust_id=None, trustor_user_id=None,
|
||||
**kwargs):
|
||||
"""
|
||||
:param overwrite: Set to False to ensure that the greenthread local
|
||||
copy of the index is not overwritten.
|
||||
|
||||
:param kwargs: Extra arguments that might be present, but we ignore
|
||||
because they possibly came in from older rpc messages.
|
||||
"""
|
||||
super(RequestContext, self).__init__(auth_token=auth_token,
|
||||
user=username, tenant=tenant,
|
||||
is_admin=is_admin,
|
||||
read_only=read_only,
|
||||
show_deleted=show_deleted,
|
||||
request_id='unused')
|
||||
self.username = username
|
||||
self.password = password
|
||||
self.aws_creds = aws_creds
|
||||
self.tenant_id = tenant_id
|
||||
self.auth_url = auth_url
|
||||
self.roles = roles or []
|
||||
self.owner_is_tenant = owner_is_tenant
|
||||
if overwrite or not hasattr(local.store, 'context'):
|
||||
self.update_store()
|
||||
self._session = None
|
||||
self.trust_id = trust_id
|
||||
self.trustor_user_id = trustor_user_id
|
||||
self.nova_url = nova_url
|
||||
self.cinder_url = cinder_url
|
||||
self.neutron_url = neutron_url
|
||||
self.region_name = region_name
|
||||
|
||||
def update_store(self):
|
||||
local.store.context = self
|
||||
|
||||
def to_dict(self):
|
||||
return {'auth_token': self.auth_token,
|
||||
'username': self.username,
|
||||
'password': self.password,
|
||||
'aws_creds': self.aws_creds,
|
||||
'tenant': self.tenant,
|
||||
'tenant_id': self.tenant_id,
|
||||
'trust_id': self.trust_id,
|
||||
'trustor_user_id': self.trustor_user_id,
|
||||
'auth_url': self.auth_url,
|
||||
'roles': self.roles,
|
||||
'is_admin': self.is_admin}
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, values):
|
||||
return cls(**values)
|
||||
|
||||
@property
|
||||
def owner(self):
|
||||
"""Return the owner to correlate with an image."""
|
||||
return self.tenant if self.owner_is_tenant else self.user
|
||||
|
||||
|
||||
def get_admin_context(read_deleted="no"):
|
||||
return RequestContext(is_admin=True)
|
||||
|
||||
|
||||
class ContextMiddleware(wsgi.Middleware):
|
||||
|
||||
opts = [cfg.BoolOpt('owner_is_tenant', default=True),
|
||||
cfg.StrOpt('admin_role', default='admin')]
|
||||
|
||||
def __init__(self, app, conf, **local_conf):
|
||||
cfg.CONF.register_opts(self.opts)
|
||||
|
||||
# Determine the context class to use
|
||||
self.ctxcls = RequestContext
|
||||
if 'context_class' in local_conf:
|
||||
self.ctxcls = importutils.import_class(local_conf['context_class'])
|
||||
|
||||
super(ContextMiddleware, self).__init__(app)
|
||||
|
||||
def make_context(self, *args, **kwargs):
|
||||
"""
|
||||
Create a context with the given arguments.
|
||||
"""
|
||||
kwargs.setdefault('owner_is_tenant', cfg.CONF.owner_is_tenant)
|
||||
|
||||
return self.ctxcls(*args, **kwargs)
|
||||
|
||||
def process_request(self, req):
|
||||
"""
|
||||
Extract any authentication information in the request and
|
||||
construct an appropriate context from it.
|
||||
|
||||
A few scenarios exist:
|
||||
|
||||
1. If X-Auth-Token is passed in, then consult TENANT and ROLE headers
|
||||
to determine permissions.
|
||||
|
||||
2. An X-Auth-Token was passed in, but the Identity-Status is not
|
||||
confirmed. For now, just raising a NotAuthenticated exception.
|
||||
|
||||
3. X-Auth-Token is omitted. If we were using Keystone, then the
|
||||
tokenauth middleware would have rejected the request, so we must be
|
||||
using NoAuth. In that case, assume that is_admin=True.
|
||||
"""
|
||||
headers = req.headers
|
||||
|
||||
try:
|
||||
"""
|
||||
This sets the username/password to the admin user because you
|
||||
need this information in order to perform token authentication.
|
||||
The real 'username' is the 'tenant'.
|
||||
|
||||
We should also check here to see if X-Auth-Token is not set and
|
||||
in that case we should assign the user/pass directly as the real
|
||||
username/password and token as None. 'tenant' should still be
|
||||
the username.
|
||||
"""
|
||||
|
||||
username = None
|
||||
password = None
|
||||
aws_creds = None
|
||||
|
||||
if headers.get('X-Auth-User') is not None:
|
||||
username = headers.get('X-Auth-User')
|
||||
password = headers.get('X-Auth-Key')
|
||||
elif headers.get('X-Auth-EC2-Creds') is not None:
|
||||
aws_creds = headers.get('X-Auth-EC2-Creds')
|
||||
|
||||
token = headers.get('X-Auth-Token')
|
||||
tenant = headers.get('X-Tenant-Name')
|
||||
tenant_id = headers.get('X-Tenant-Id')
|
||||
auth_url = headers.get('X-Auth-Url')
|
||||
roles = headers.get('X-Roles')
|
||||
if roles is not None:
|
||||
roles = roles.split(',')
|
||||
|
||||
except Exception:
|
||||
raise exception.NotAuthenticated()
|
||||
|
||||
req.context = self.make_context(auth_token=token,
|
||||
tenant=tenant, tenant_id=tenant_id,
|
||||
aws_creds=aws_creds,
|
||||
username=username,
|
||||
password=password,
|
||||
auth_url=auth_url, roles=roles,
|
||||
is_admin=True)
|
||||
|
||||
|
||||
def ContextMiddleware_filter_factory(global_conf, **local_conf):
|
||||
"""
|
||||
Factory method for paste.deploy
|
||||
"""
|
||||
conf = global_conf.copy()
|
||||
conf.update(local_conf)
|
||||
|
||||
def filter(app):
|
||||
return ContextMiddleware(app, conf)
|
||||
|
||||
return filter
|
@@ -1,315 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo_context import context
|
||||
from nova import exception
|
||||
|
||||
import eventlet
|
||||
|
||||
from keystoneclient.v2_0 import client as kc
|
||||
from keystoneclient.v3 import client as kc_v3
|
||||
from oslo.config import cfg
|
||||
from oslo.utils import importutils
|
||||
from oslo_log import log as logging
|
||||
|
||||
logger = logging.getLogger('nova.compute.keystoneclient')
|
||||
|
||||
|
||||
class KeystoneClient(object):
|
||||
|
||||
"""
|
||||
Wrap keystone client so we can encapsulate logic used in resources
|
||||
Note this is intended to be initialized from a resource on a per-session
|
||||
basis, so the session context is passed in on initialization
|
||||
Also note that a copy of this is created every resource as self.keystone()
|
||||
via the code in engine/client.py, so there should not be any need to
|
||||
directly instantiate instances of this class inside resources themselves
|
||||
"""
|
||||
|
||||
def __init__(self, context):
|
||||
# We have to maintain two clients authenticated with keystone:
|
||||
# - ec2 interface is v2.0 only
|
||||
# - trusts is v3 only
|
||||
# If a trust_id is specified in the context, we immediately
|
||||
# authenticate so we can populate the context with a trust token
|
||||
# otherwise, we delay client authentication until needed to avoid
|
||||
# unnecessary calls to keystone.
|
||||
#
|
||||
# Note that when you obtain a token using a trust, it cannot be
|
||||
# used to reauthenticate and get another token, so we have to
|
||||
# get a new trust-token even if context.auth_token is set.
|
||||
#
|
||||
# - context.auth_url is expected to contain the v2.0 keystone endpoint
|
||||
self.context = context
|
||||
self._client_v2 = None
|
||||
self._client_v3 = None
|
||||
|
||||
if self.context.trust_id:
|
||||
# Create a connection to the v2 API, with the trust_id, this
|
||||
# populates self.context.auth_token with a trust-scoped token
|
||||
self._client_v2 = self._v2_client_init()
|
||||
|
||||
@property
|
||||
def client_v3(self):
|
||||
if not self._client_v3:
|
||||
# Create connection to v3 API
|
||||
self._client_v3 = self._v3_client_init()
|
||||
return self._client_v3
|
||||
|
||||
@property
|
||||
def client_v2(self):
|
||||
if not self._client_v2:
|
||||
self._client_v2 = self._v2_client_init()
|
||||
return self._client_v2
|
||||
|
||||
def _v2_client_init(self):
|
||||
kwargs = {
|
||||
'auth_url': self.context.auth_url
|
||||
}
|
||||
auth_kwargs = {}
|
||||
# Note try trust_id first, as we can't reuse auth_token in that case
|
||||
if self.context.trust_id is not None:
|
||||
# We got a trust_id, so we use the admin credentials
|
||||
# to authenticate, then re-scope the token to the
|
||||
# trust impersonating the trustor user.
|
||||
# Note that this currently requires the trustor tenant_id
|
||||
# to be passed to the authenticate(), unlike the v3 call
|
||||
kwargs.update(self._service_admin_creds(api_version=2))
|
||||
auth_kwargs['trust_id'] = self.context.trust_id
|
||||
auth_kwargs['tenant_id'] = self.context.tenant_id
|
||||
elif self.context.auth_token is not None:
|
||||
kwargs['tenant_name'] = self.context.tenant
|
||||
kwargs['token'] = self.context.auth_token
|
||||
elif self.context.password is not None:
|
||||
kwargs['username'] = self.context.username
|
||||
kwargs['password'] = self.context.password
|
||||
kwargs['tenant_name'] = self.context.tenant
|
||||
kwargs['tenant_id'] = self.context.tenant_id
|
||||
else:
|
||||
logger.error("Keystone v2 API connection failed, no password or "
|
||||
"auth_token!")
|
||||
raise exception.AuthorizationFailure()
|
||||
|
||||
client_v2 = kc.Client(**kwargs)
|
||||
|
||||
client_v2.authenticate(**auth_kwargs)
|
||||
# If we are authenticating with a trust auth_kwargs are set, so set
|
||||
# the context auth_token with the re-scoped trust token
|
||||
if auth_kwargs:
|
||||
# Sanity check
|
||||
if not client_v2.auth_ref.trust_scoped:
|
||||
logger.error("v2 trust token re-scoping failed!")
|
||||
raise exception.AuthorizationFailure()
|
||||
# All OK so update the context with the token
|
||||
self.context.auth_token = client_v2.auth_ref.auth_token
|
||||
self.context.auth_url = kwargs.get('auth_url')
|
||||
|
||||
return client_v2
|
||||
|
||||
@staticmethod
|
||||
def _service_admin_creds(api_version=2):
|
||||
# Import auth_token to have keystone_authtoken settings setup.
|
||||
importutils.import_module('keystoneclient.middleware.auth_token')
|
||||
|
||||
creds = {
|
||||
'username': cfg.CONF.keystone_authtoken.admin_user,
|
||||
'password': cfg.CONF.keystone_authtoken.admin_password,
|
||||
}
|
||||
if api_version >= 3:
|
||||
creds['auth_url'] =\
|
||||
cfg.CONF.keystone_authtoken.auth_uri.replace('v2.0', 'v3')
|
||||
creds['project_name'] =\
|
||||
cfg.CONF.keystone_authtoken.admin_tenant_name
|
||||
else:
|
||||
creds['auth_url'] = cfg.CONF.keystone_authtoken.auth_uri
|
||||
creds['tenant_name'] =\
|
||||
cfg.CONF.keystone_authtoken.admin_tenant_name
|
||||
|
||||
return creds
|
||||
|
||||
def _v3_client_init(self):
|
||||
kwargs = {}
|
||||
if self.context.auth_token is not None:
|
||||
kwargs['project_name'] = self.context.tenant
|
||||
kwargs['token'] = self.context.auth_token
|
||||
kwargs['auth_url'] = self.context.auth_url.replace('v2.0', 'v3')
|
||||
kwargs['endpoint'] = kwargs['auth_url']
|
||||
elif self.context.trust_id is not None:
|
||||
# We got a trust_id, so we use the admin credentials and get a
|
||||
# Token back impersonating the trustor user
|
||||
kwargs.update(self._service_admin_creds(api_version=3))
|
||||
kwargs['trust_id'] = self.context.trust_id
|
||||
elif self.context.password is not None:
|
||||
kwargs['username'] = self.context.username
|
||||
kwargs['password'] = self.context.password
|
||||
kwargs['project_name'] = self.context.tenant
|
||||
kwargs['project_id'] = self.context.tenant_id
|
||||
kwargs['auth_url'] = self.context.auth_url.replace('v2.0', 'v3')
|
||||
kwargs['endpoint'] = kwargs['auth_url']
|
||||
else:
|
||||
logger.error("Keystone v3 API connection failed, no password or "
|
||||
"auth_token!")
|
||||
raise exception.AuthorizationFailure()
|
||||
|
||||
client = kc_v3.Client(**kwargs)
|
||||
# Have to explicitly authenticate() or client.auth_ref is None
|
||||
client.authenticate()
|
||||
|
||||
return client
|
||||
|
||||
def create_trust_context(self):
|
||||
"""
|
||||
If cfg.CONF.deferred_auth_method is trusts, we create a
|
||||
trust using the trustor identity in the current context, with the
|
||||
trustee as the heat service user and return a context containing
|
||||
the new trust_id
|
||||
|
||||
If deferred_auth_method != trusts, or the current context already
|
||||
contains a trust_id, we do nothing and return the current context
|
||||
"""
|
||||
if self.context.trust_id:
|
||||
return self.context
|
||||
|
||||
# We need the service admin user ID (not name), as the trustor user
|
||||
# can't lookup the ID in keystoneclient unless they're admin
|
||||
# workaround this by creating a temporary admin client connection
|
||||
# then getting the user ID from the auth_ref
|
||||
admin_creds = self._service_admin_creds()
|
||||
admin_client = kc.Client(**admin_creds)
|
||||
trustee_user_id = admin_client.auth_ref.user_id
|
||||
trustor_user_id = self.client_v3.auth_ref.user_id
|
||||
trustor_project_id = self.client_v3.auth_ref.project_id
|
||||
roles = cfg.CONF.trusts_delegated_roles
|
||||
trust = self.client_v3.trusts.create(trustor_user=trustor_user_id,
|
||||
trustee_user=trustee_user_id,
|
||||
project=trustor_project_id,
|
||||
impersonation=True,
|
||||
role_names=roles)
|
||||
|
||||
trust_context = context.RequestContext.from_dict(
|
||||
self.context.to_dict())
|
||||
trust_context.trust_id = trust.id
|
||||
trust_context.trustor_user_id = trustor_user_id
|
||||
return trust_context
|
||||
|
||||
def delete_trust(self, trust_id):
|
||||
"""
|
||||
Delete the specified trust.
|
||||
"""
|
||||
self.client_v3.trusts.delete(trust_id)
|
||||
|
||||
def create_stack_user(self, username, password=''):
|
||||
"""
|
||||
Create a user defined as part of a stack, either via template
|
||||
or created internally by a resource. This user will be added to
|
||||
the heat_stack_user_role as defined in the config
|
||||
Returns the keystone ID of the resulting user
|
||||
"""
|
||||
if(len(username) > 64):
|
||||
logger.warning("Truncating the username %s to the last 64 "
|
||||
"characters." % username)
|
||||
# get the last 64 characters of the username
|
||||
username = username[-64:]
|
||||
user = self.client_v2.users.create(username,
|
||||
password,
|
||||
'%s@heat-api.org' %
|
||||
username,
|
||||
tenant_id=self.context.tenant_id,
|
||||
enabled=True)
|
||||
|
||||
# We add the new user to a special keystone role
|
||||
# This role is designed to allow easier differentiation of the
|
||||
# heat-generated "stack users" which will generally have credentials
|
||||
# deployed on an instance (hence are implicitly untrusted)
|
||||
roles = self.client_v2.roles.list()
|
||||
stack_user_role = [r.id for r in roles
|
||||
if r.name == cfg.CONF.heat_stack_user_role]
|
||||
if len(stack_user_role) == 1:
|
||||
role_id = stack_user_role[0]
|
||||
logger.debug("Adding user %s to role %s" % (user.id, role_id))
|
||||
self.client_v2.roles.add_user_role(user.id, role_id,
|
||||
self.context.tenant_id)
|
||||
else:
|
||||
logger.error("Failed to add user %s to role %s, check role exists!"
|
||||
% (username, cfg.CONF.heat_stack_user_role))
|
||||
|
||||
return user.id
|
||||
|
||||
def delete_stack_user(self, user_id):
|
||||
|
||||
user = self.client_v2.users.get(user_id)
|
||||
|
||||
# FIXME (shardy) : need to test, do we still need this retry logic?
|
||||
# Copied from user.py, but seems like something we really shouldn't
|
||||
# need to do, no bug reference in the original comment (below)...
|
||||
# tempory hack to work around an openstack bug.
|
||||
# seems you can't delete a user first time - you have to try
|
||||
# a couple of times - go figure!
|
||||
tmo = eventlet.Timeout(10)
|
||||
status = 'WAITING'
|
||||
reason = 'Timed out trying to delete user'
|
||||
try:
|
||||
while status == 'WAITING':
|
||||
try:
|
||||
user.delete()
|
||||
status = 'DELETED'
|
||||
except Exception as ce:
|
||||
reason = str(ce)
|
||||
logger.warning("Problem deleting user %s: %s" %
|
||||
(user_id, reason))
|
||||
eventlet.sleep(1)
|
||||
except eventlet.Timeout as t:
|
||||
if t is not tmo:
|
||||
# not my timeout
|
||||
raise
|
||||
else:
|
||||
status = 'TIMEDOUT'
|
||||
finally:
|
||||
tmo.cancel()
|
||||
|
||||
if status != 'DELETED':
|
||||
raise exception.Error(reason)
|
||||
|
||||
def delete_ec2_keypair(self, user_id, accesskey):
|
||||
self.client_v2.ec2.delete(user_id, accesskey)
|
||||
|
||||
def get_ec2_keypair(self, user_id):
|
||||
# We make the assumption that each user will only have one
|
||||
# ec2 keypair, it's not clear if AWS allow multiple AccessKey resources
|
||||
# to be associated with a single User resource, but for simplicity
|
||||
# we assume that here for now
|
||||
cred = self.client_v2.ec2.list(user_id)
|
||||
if len(cred) == 0:
|
||||
return self.client_v2.ec2.create(user_id, self.context.tenant_id)
|
||||
if len(cred) == 1:
|
||||
return cred[0]
|
||||
else:
|
||||
logger.error("Unexpected number of ec2 credentials %s for %s" %
|
||||
(len(cred), user_id))
|
||||
|
||||
def disable_stack_user(self, user_id):
|
||||
# FIXME : This won't work with the v3 keystone API
|
||||
self.client_v2.users.update_enabled(user_id, False)
|
||||
|
||||
def enable_stack_user(self, user_id):
|
||||
# FIXME : This won't work with the v3 keystone API
|
||||
self.client_v2.users.update_enabled(user_id, True)
|
||||
|
||||
def url_for(self, **kwargs):
|
||||
return self.client_v2.service_catalog.url_for(**kwargs)
|
||||
|
||||
@property
|
||||
def auth_token(self):
|
||||
return self.client_v2.auth_token
|
File diff suppressed because it is too large
Load Diff
@@ -1,237 +0,0 @@
|
||||
# Copyright 2014, Huawei, Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
# @author: Haojie Jia, Huawei
|
||||
|
||||
|
||||
from oslo.config import cfg
|
||||
|
||||
#from heat.openstack.common import importutils
|
||||
#from heat.openstack.common import log as logging
|
||||
from oslo.utils import importutils
|
||||
from oslo_log import log as logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
from neutron.plugins.l2_proxy.agent import neutron_keystoneclient as hkc
|
||||
from novaclient import client as novaclient
|
||||
from novaclient import shell as novashell
|
||||
try:
|
||||
from swiftclient import client as swiftclient
|
||||
except ImportError:
|
||||
swiftclient = None
|
||||
logger.info('swiftclient not available')
|
||||
try:
|
||||
from neutronclient.v2_0 import client as neutronclient
|
||||
except ImportError:
|
||||
neutronclient = None
|
||||
logger.info('neutronclient not available')
|
||||
try:
|
||||
from cinderclient import client as cinderclient
|
||||
except ImportError:
|
||||
cinderclient = None
|
||||
logger.info('cinderclient not available')
|
||||
|
||||
try:
|
||||
from ceilometerclient.v2 import client as ceilometerclient
|
||||
except ImportError:
|
||||
ceilometerclient = None
|
||||
logger.info('ceilometerclient not available')
|
||||
|
||||
|
||||
cloud_opts = [
|
||||
cfg.StrOpt('cloud_backend',
|
||||
default=None,
|
||||
help="Cloud module to use as a backend. Defaults to OpenStack.")
|
||||
]
|
||||
cfg.CONF.register_opts(cloud_opts)
|
||||
|
||||
|
||||
class OpenStackClients(object):
|
||||
|
||||
'''
|
||||
Convenience class to create and cache client instances.
|
||||
'''
|
||||
|
||||
def __init__(self, context):
|
||||
self.context = context
|
||||
self._nova = {}
|
||||
self._keystone = None
|
||||
self._swift = None
|
||||
self._neutron = None
|
||||
self._cinder = None
|
||||
self._ceilometer = None
|
||||
|
||||
@property
|
||||
def auth_token(self):
|
||||
# if there is no auth token in the context
|
||||
# attempt to get one using the context username and password
|
||||
return self.context.auth_token or self.keystone().auth_token
|
||||
|
||||
def keystone(self):
|
||||
if self._keystone:
|
||||
return self._keystone
|
||||
|
||||
self._keystone = hkc.KeystoneClient(self.context)
|
||||
return self._keystone
|
||||
|
||||
def url_for(self, **kwargs):
|
||||
return self.keystone().url_for(**kwargs)
|
||||
|
||||
def nova(self, service_type='compute'):
|
||||
if service_type in self._nova:
|
||||
return self._nova[service_type]
|
||||
|
||||
con = self.context
|
||||
if self.auth_token is None:
|
||||
logger.error("Nova connection failed, no auth_token!")
|
||||
return None
|
||||
|
||||
computeshell = novashell.OpenStackComputeShell()
|
||||
extensions = computeshell._discover_extensions("1.1")
|
||||
|
||||
args = {
|
||||
'project_id': con.tenant_id,
|
||||
'auth_url': con.auth_url,
|
||||
'service_type': service_type,
|
||||
'username': None,
|
||||
'api_key': None,
|
||||
'extensions': extensions
|
||||
}
|
||||
|
||||
client = novaclient.Client(1.1, **args)
|
||||
|
||||
management_url = self.url_for(
|
||||
service_type=service_type,
|
||||
attr='region',
|
||||
filter_value='RegionTwo')
|
||||
client.client.auth_token = self.auth_token
|
||||
client.client.management_url = management_url
|
||||
# management_url = self.url_for(service_type=service_type,attr='region',filter_value='RegionTwo')
|
||||
# client.client.auth_token = self.auth_token
|
||||
# client.client.management_url = 'http://172.31.127.32:8774/v2/49a3d7c4bbb34a6f843ccc87bab844aa'
|
||||
|
||||
self._nova[service_type] = client
|
||||
return client
|
||||
|
||||
def swift(self):
|
||||
if swiftclient is None:
|
||||
return None
|
||||
if self._swift:
|
||||
return self._swift
|
||||
|
||||
con = self.context
|
||||
if self.auth_token is None:
|
||||
logger.error("Swift connection failed, no auth_token!")
|
||||
return None
|
||||
|
||||
args = {
|
||||
'auth_version': '2.0',
|
||||
'tenant_name': con.tenant_id,
|
||||
'user': con.username,
|
||||
'key': None,
|
||||
'authurl': None,
|
||||
'preauthtoken': self.auth_token,
|
||||
'preauthurl': self.url_for(service_type='object-store')
|
||||
}
|
||||
self._swift = swiftclient.Connection(**args)
|
||||
return self._swift
|
||||
|
||||
def neutron(self):
|
||||
if neutronclient is None:
|
||||
return None
|
||||
if self._neutron:
|
||||
return self._neutron
|
||||
|
||||
con = self.context
|
||||
if self.auth_token is None:
|
||||
logger.error("Neutron connection failed, no auth_token!")
|
||||
return None
|
||||
|
||||
if self.context.region_name is None:
|
||||
management_url = self.url_for(service_type='network')
|
||||
else:
|
||||
management_url = self.url_for(
|
||||
service_type='network',
|
||||
attr='region',
|
||||
filter_value=self.context.region_name)
|
||||
args = {
|
||||
'auth_url': con.auth_url,
|
||||
'service_type': 'network',
|
||||
'token': self.auth_token,
|
||||
'endpoint_url': management_url
|
||||
}
|
||||
|
||||
self._neutron = neutronclient.Client(**args)
|
||||
|
||||
return self._neutron
|
||||
|
||||
def cinder(self):
|
||||
if cinderclient is None:
|
||||
return self.nova('volume')
|
||||
if self._cinder:
|
||||
return self._cinder
|
||||
|
||||
con = self.context
|
||||
if self.auth_token is None:
|
||||
logger.error("Cinder connection failed, no auth_token!")
|
||||
return None
|
||||
|
||||
args = {
|
||||
'service_type': 'volume',
|
||||
'auth_url': con.auth_url,
|
||||
'project_id': con.tenant_id,
|
||||
'username': None,
|
||||
'api_key': None
|
||||
}
|
||||
|
||||
self._cinder = cinderclient.Client('1', **args)
|
||||
management_url = self.url_for(service_type='volume')
|
||||
self._cinder.client.auth_token = self.auth_token
|
||||
self._cinder.client.management_url = management_url
|
||||
|
||||
return self._cinder
|
||||
|
||||
def ceilometer(self):
|
||||
if ceilometerclient is None:
|
||||
return None
|
||||
if self._ceilometer:
|
||||
return self._ceilometer
|
||||
|
||||
if self.auth_token is None:
|
||||
logger.error("Ceilometer connection failed, no auth_token!")
|
||||
return None
|
||||
con = self.context
|
||||
args = {
|
||||
'auth_url': con.auth_url,
|
||||
'service_type': 'metering',
|
||||
'project_id': con.tenant_id,
|
||||
'token': lambda: self.auth_token,
|
||||
'endpoint': self.url_for(service_type='metering'),
|
||||
}
|
||||
|
||||
client = ceilometerclient.Client(**args)
|
||||
|
||||
self._ceilometer = client
|
||||
return self._ceilometer
|
||||
|
||||
|
||||
if cfg.CONF.cloud_backend:
|
||||
cloud_backend_module = importutils.import_module(cfg.CONF.cloud_backend)
|
||||
Clients = cloud_backend_module.Clients
|
||||
else:
|
||||
Clients = OpenStackClients
|
||||
|
||||
logger.debug('Using backend %s' % Clients)
|
File diff suppressed because it is too large
Load Diff
@@ -1,317 +0,0 @@
|
||||
# Copyright 2014, Huawei, Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
# @author: Haojie Jia, Huawei
|
||||
|
||||
from oslo_context import context
|
||||
from neutron.common import exceptions
|
||||
|
||||
import eventlet
|
||||
|
||||
from keystoneclient.v2_0 import client as kc
|
||||
from keystoneclient.v3 import client as kc_v3
|
||||
from oslo.config import cfg
|
||||
|
||||
from oslo.utils import importutils
|
||||
from oslo_log import log as logging
|
||||
|
||||
logger = logging.getLogger(
|
||||
'neutron.plugins.cascading_proxy_agent.keystoneclient')
|
||||
|
||||
|
||||
class KeystoneClient(object):
|
||||
|
||||
"""
|
||||
Wrap keystone client so we can encapsulate logic used in resources
|
||||
Note this is intended to be initialized from a resource on a per-session
|
||||
basis, so the session context is passed in on initialization
|
||||
Also note that a copy of this is created every resource as self.keystone()
|
||||
via the code in engine/client.py, so there should not be any need to
|
||||
directly instantiate instances of this class inside resources themselves
|
||||
"""
|
||||
|
||||
def __init__(self, context):
|
||||
# We have to maintain two clients authenticated with keystone:
|
||||
# - ec2 interface is v2.0 only
|
||||
# - trusts is v3 only
|
||||
# If a trust_id is specified in the context, we immediately
|
||||
# authenticate so we can populate the context with a trust token
|
||||
# otherwise, we delay client authentication until needed to avoid
|
||||
# unnecessary calls to keystone.
|
||||
#
|
||||
# Note that when you obtain a token using a trust, it cannot be
|
||||
# used to reauthenticate and get another token, so we have to
|
||||
# get a new trust-token even if context.auth_token is set.
|
||||
#
|
||||
# - context.auth_url is expected to contain the v2.0 keystone endpoint
|
||||
self.context = context
|
||||
self._client_v2 = None
|
||||
self._client_v3 = None
|
||||
|
||||
if self.context.trust_id:
|
||||
# Create a connection to the v2 API, with the trust_id, this
|
||||
# populates self.context.auth_token with a trust-scoped token
|
||||
self._client_v2 = self._v2_client_init()
|
||||
|
||||
@property
|
||||
def client_v3(self):
|
||||
if not self._client_v3:
|
||||
# Create connection to v3 API
|
||||
self._client_v3 = self._v3_client_init()
|
||||
return self._client_v3
|
||||
|
||||
@property
|
||||
def client_v2(self):
|
||||
if not self._client_v2:
|
||||
self._client_v2 = self._v2_client_init()
|
||||
return self._client_v2
|
||||
|
||||
def _v2_client_init(self):
|
||||
kwargs = {
|
||||
'auth_url': self.context.auth_url
|
||||
}
|
||||
auth_kwargs = {}
|
||||
# Note try trust_id first, as we can't reuse auth_token in that case
|
||||
if self.context.trust_id is not None:
|
||||
# We got a trust_id, so we use the admin credentials
|
||||
# to authenticate, then re-scope the token to the
|
||||
# trust impersonating the trustor user.
|
||||
# Note that this currently requires the trustor tenant_id
|
||||
# to be passed to the authenticate(), unlike the v3 call
|
||||
kwargs.update(self._service_admin_creds(api_version=2))
|
||||
auth_kwargs['trust_id'] = self.context.trust_id
|
||||
auth_kwargs['tenant_id'] = self.context.tenant_id
|
||||
elif self.context.auth_token is not None:
|
||||
kwargs['tenant_name'] = self.context.tenant
|
||||
kwargs['token'] = self.context.auth_token
|
||||
elif self.context.password is not None:
|
||||
kwargs['username'] = self.context.username
|
||||
kwargs['password'] = self.context.password
|
||||
kwargs['tenant_name'] = self.context.tenant
|
||||
kwargs['tenant_id'] = self.context.tenant_id
|
||||
else:
|
||||
logger.error("Keystone v2 API connection failed, no password or "
|
||||
"auth_token!")
|
||||
raise exception.AuthorizationFailure()
|
||||
client_v2 = kc.Client(**kwargs)
|
||||
|
||||
client_v2.authenticate(**auth_kwargs)
|
||||
# If we are authenticating with a trust auth_kwargs are set, so set
|
||||
# the context auth_token with the re-scoped trust token
|
||||
if auth_kwargs:
|
||||
# Sanity check
|
||||
if not client_v2.auth_ref.trust_scoped:
|
||||
logger.error("v2 trust token re-scoping failed!")
|
||||
raise exception.AuthorizationFailure()
|
||||
# All OK so update the context with the token
|
||||
self.context.auth_token = client_v2.auth_ref.auth_token
|
||||
self.context.auth_url = kwargs.get('auth_url')
|
||||
|
||||
return client_v2
|
||||
|
||||
@staticmethod
|
||||
def _service_admin_creds(api_version=2):
|
||||
# Import auth_token to have keystone_authtoken settings setup.
|
||||
importutils.import_module('keystoneclient.middleware.auth_token')
|
||||
|
||||
creds = {
|
||||
'username': cfg.CONF.keystone_authtoken.admin_user,
|
||||
'password': cfg.CONF.keystone_authtoken.admin_password,
|
||||
}
|
||||
if api_version >= 3:
|
||||
creds['auth_url'] =\
|
||||
cfg.CONF.keystone_authtoken.auth_uri.replace('v2.0', 'v3')
|
||||
creds['project_name'] =\
|
||||
cfg.CONF.keystone_authtoken.admin_tenant_name
|
||||
else:
|
||||
creds['auth_url'] = cfg.CONF.keystone_authtoken.auth_uri
|
||||
creds['tenant_name'] =\
|
||||
cfg.CONF.keystone_authtoken.admin_tenant_name
|
||||
|
||||
return creds
|
||||
|
||||
def _v3_client_init(self):
|
||||
kwargs = {}
|
||||
if self.context.auth_token is not None:
|
||||
kwargs['project_name'] = self.context.tenant
|
||||
kwargs['token'] = self.context.auth_token
|
||||
kwargs['auth_url'] = self.context.auth_url.replace('v2.0', 'v3')
|
||||
kwargs['endpoint'] = kwargs['auth_url']
|
||||
elif self.context.trust_id is not None:
|
||||
# We got a trust_id, so we use the admin credentials and get a
|
||||
# Token back impersonating the trustor user
|
||||
kwargs.update(self._service_admin_creds(api_version=3))
|
||||
kwargs['trust_id'] = self.context.trust_id
|
||||
elif self.context.password is not None:
|
||||
kwargs['username'] = self.context.username
|
||||
kwargs['password'] = self.context.password
|
||||
kwargs['project_name'] = self.context.tenant
|
||||
kwargs['project_id'] = self.context.tenant_id
|
||||
kwargs['auth_url'] = self.context.auth_url.replace('v2.0', 'v3')
|
||||
kwargs['endpoint'] = kwargs['auth_url']
|
||||
else:
|
||||
logger.error("Keystone v3 API connection failed, no password or "
|
||||
"auth_token!")
|
||||
raise exception.AuthorizationFailure()
|
||||
|
||||
client = kc_v3.Client(**kwargs)
|
||||
# Have to explicitly authenticate() or client.auth_ref is None
|
||||
client.authenticate()
|
||||
|
||||
return client
|
||||
|
||||
def create_trust_context(self):
|
||||
"""
|
||||
If cfg.CONF.deferred_auth_method is trusts, we create a
|
||||
trust using the trustor identity in the current context, with the
|
||||
trustee as the heat service user and return a context containing
|
||||
the new trust_id
|
||||
|
||||
If deferred_auth_method != trusts, or the current context already
|
||||
contains a trust_id, we do nothing and return the current context
|
||||
"""
|
||||
if self.context.trust_id:
|
||||
return self.context
|
||||
|
||||
# We need the service admin user ID (not name), as the trustor user
|
||||
# can't lookup the ID in keystoneclient unless they're admin
|
||||
# workaround this by creating a temporary admin client connection
|
||||
# then getting the user ID from the auth_ref
|
||||
admin_creds = self._service_admin_creds()
|
||||
admin_client = kc.Client(**admin_creds)
|
||||
trustee_user_id = admin_client.auth_ref.user_id
|
||||
trustor_user_id = self.client_v3.auth_ref.user_id
|
||||
trustor_project_id = self.client_v3.auth_ref.project_id
|
||||
roles = cfg.CONF.trusts_delegated_roles
|
||||
trust = self.client_v3.trusts.create(trustor_user=trustor_user_id,
|
||||
trustee_user=trustee_user_id,
|
||||
project=trustor_project_id,
|
||||
impersonation=True,
|
||||
role_names=roles)
|
||||
|
||||
trust_context = context.RequestContext.from_dict(
|
||||
self.context.to_dict())
|
||||
trust_context.trust_id = trust.id
|
||||
trust_context.trustor_user_id = trustor_user_id
|
||||
return trust_context
|
||||
|
||||
def delete_trust(self, trust_id):
|
||||
"""
|
||||
Delete the specified trust.
|
||||
"""
|
||||
self.client_v3.trusts.delete(trust_id)
|
||||
|
||||
def create_stack_user(self, username, password=''):
|
||||
"""
|
||||
Create a user defined as part of a stack, either via template
|
||||
or created internally by a resource. This user will be added to
|
||||
the heat_stack_user_role as defined in the config
|
||||
Returns the keystone ID of the resulting user
|
||||
"""
|
||||
if(len(username) > 64):
|
||||
logger.warning("Truncating the username %s to the last 64 "
|
||||
"characters." % username)
|
||||
# get the last 64 characters of the username
|
||||
username = username[-64:]
|
||||
user = self.client_v2.users.create(username,
|
||||
password,
|
||||
'%s@heat-api.org' %
|
||||
username,
|
||||
tenant_id=self.context.tenant_id,
|
||||
enabled=True)
|
||||
|
||||
# We add the new user to a special keystone role
|
||||
# This role is designed to allow easier differentiation of the
|
||||
# heat-generated "stack users" which will generally have credentials
|
||||
# deployed on an instance (hence are implicitly untrusted)
|
||||
roles = self.client_v2.roles.list()
|
||||
stack_user_role = [r.id for r in roles
|
||||
if r.name == cfg.CONF.heat_stack_user_role]
|
||||
if len(stack_user_role) == 1:
|
||||
role_id = stack_user_role[0]
|
||||
logger.debug("Adding user %s to role %s" % (user.id, role_id))
|
||||
self.client_v2.roles.add_user_role(user.id, role_id,
|
||||
self.context.tenant_id)
|
||||
else:
|
||||
logger.error("Failed to add user %s to role %s, check role exists!"
|
||||
% (username, cfg.CONF.heat_stack_user_role))
|
||||
|
||||
return user.id
|
||||
|
||||
def delete_stack_user(self, user_id):
|
||||
|
||||
user = self.client_v2.users.get(user_id)
|
||||
|
||||
# FIXME (shardy) : need to test, do we still need this retry logic?
|
||||
# Copied from user.py, but seems like something we really shouldn't
|
||||
# need to do, no bug reference in the original comment (below)...
|
||||
# tempory hack to work around an openstack bug.
|
||||
# seems you can't delete a user first time - you have to try
|
||||
# a couple of times - go figure!
|
||||
tmo = eventlet.Timeout(10)
|
||||
status = 'WAITING'
|
||||
reason = 'Timed out trying to delete user'
|
||||
try:
|
||||
while status == 'WAITING':
|
||||
try:
|
||||
user.delete()
|
||||
status = 'DELETED'
|
||||
except Exception as ce:
|
||||
reason = str(ce)
|
||||
logger.warning("Problem deleting user %s: %s" %
|
||||
(user_id, reason))
|
||||
eventlet.sleep(1)
|
||||
except eventlet.Timeout as t:
|
||||
if t is not tmo:
|
||||
# not my timeout
|
||||
raise
|
||||
else:
|
||||
status = 'TIMEDOUT'
|
||||
finally:
|
||||
tmo.cancel()
|
||||
|
||||
if status != 'DELETED':
|
||||
raise exception.Error(reason)
|
||||
|
||||
def delete_ec2_keypair(self, user_id, accesskey):
|
||||
self.client_v2.ec2.delete(user_id, accesskey)
|
||||
|
||||
def get_ec2_keypair(self, user_id):
|
||||
# We make the assumption that each user will only have one
|
||||
# ec2 keypair, it's not clear if AWS allow multiple AccessKey resources
|
||||
# to be associated with a single User resource, but for simplicity
|
||||
# we assume that here for now
|
||||
cred = self.client_v2.ec2.list(user_id)
|
||||
if len(cred) == 0:
|
||||
return self.client_v2.ec2.create(user_id, self.context.tenant_id)
|
||||
if len(cred) == 1:
|
||||
return cred[0]
|
||||
else:
|
||||
logger.error("Unexpected number of ec2 credentials %s for %s" %
|
||||
(len(cred), user_id))
|
||||
|
||||
def disable_stack_user(self, user_id):
|
||||
# FIXME : This won't work with the v3 keystone API
|
||||
self.client_v2.users.update_enabled(user_id, False)
|
||||
|
||||
def enable_stack_user(self, user_id):
|
||||
# FIXME : This won't work with the v3 keystone API
|
||||
self.client_v2.users.update_enabled(user_id, True)
|
||||
|
||||
def url_for(self, **kwargs):
|
||||
return self.client_v2.service_catalog.url_for(**kwargs)
|
||||
|
||||
@property
|
||||
def auth_token(self):
|
||||
return self.client_v2.auth_token
|
@@ -1,206 +0,0 @@
|
||||
# Copyright 2014, Huawei, Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
# @author: Haojie Jia, Huawei
|
||||
|
||||
from oslo.config import cfg
|
||||
|
||||
#from heat.openstack.common import local
|
||||
#from neutron.openstack.common import local
|
||||
#from heat.common import exception
|
||||
from neutron.common import exceptions
|
||||
#from heat.common import wsgi
|
||||
from neutron import wsgi
|
||||
#from neutron.openstack.common import context
|
||||
from oslo_context import context
|
||||
#from heat.openstack.common import importutils
|
||||
#from neutron.openstack.common import importutils
|
||||
from oslo.utils import importutils
|
||||
#from heat.openstack.common import uuidutils
|
||||
#from neutron.openstack.common import uuidutils
|
||||
from oslo.utils import uuidutils
|
||||
|
||||
|
||||
def generate_request_id():
|
||||
return 'req-' + uuidutils.generate_uuid()
|
||||
|
||||
|
||||
class RequestContext(context.RequestContext):
|
||||
|
||||
"""
|
||||
Stores information about the security context under which the user
|
||||
accesses the system, as well as additional request information.
|
||||
"""
|
||||
|
||||
def __init__(self, auth_token=None, username=None, password=None,
|
||||
aws_creds=None, tenant=None,
|
||||
tenant_id=None, auth_url=None, roles=None, is_admin=False,
|
||||
region_name=None, read_only=False, show_deleted=False,
|
||||
owner_is_tenant=True, overwrite=True,
|
||||
trust_id=None, trustor_user_id=None,
|
||||
**kwargs):
|
||||
"""
|
||||
:param overwrite: Set to False to ensure that the greenthread local
|
||||
copy of the index is not overwritten.
|
||||
|
||||
:param kwargs: Extra arguments that might be present, but we ignore
|
||||
because they possibly came in from older rpc messages.
|
||||
"""
|
||||
super(RequestContext, self).__init__(auth_token=auth_token,
|
||||
user=username, tenant=tenant,
|
||||
is_admin=is_admin,
|
||||
read_only=read_only,
|
||||
show_deleted=show_deleted,
|
||||
request_id='unused')
|
||||
|
||||
self.username = username
|
||||
self.password = password
|
||||
self.aws_creds = aws_creds
|
||||
self.tenant_id = tenant_id
|
||||
self.auth_url = auth_url
|
||||
self.roles = roles or []
|
||||
self.region_name = region_name
|
||||
self.owner_is_tenant = owner_is_tenant
|
||||
# if overwrite or not hasattr(local.store, 'context'):
|
||||
# self.update_store()
|
||||
self._session = None
|
||||
self.trust_id = trust_id
|
||||
self.trustor_user_id = trustor_user_id
|
||||
|
||||
# def update_store(self):
|
||||
# local.store.context = self
|
||||
|
||||
def to_dict(self):
|
||||
return {'auth_token': self.auth_token,
|
||||
'username': self.username,
|
||||
'password': self.password,
|
||||
'aws_creds': self.aws_creds,
|
||||
'tenant': self.tenant,
|
||||
'tenant_id': self.tenant_id,
|
||||
'trust_id': self.trust_id,
|
||||
'trustor_user_id': self.trustor_user_id,
|
||||
'auth_url': self.auth_url,
|
||||
'roles': self.roles,
|
||||
'is_admin': self.is_admin,
|
||||
'region_name': self.region_name}
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, values):
|
||||
return cls(**values)
|
||||
|
||||
@property
|
||||
def owner(self):
|
||||
"""Return the owner to correlate with an image."""
|
||||
return self.tenant if self.owner_is_tenant else self.user
|
||||
|
||||
|
||||
def get_admin_context(read_deleted="no"):
|
||||
return RequestContext(is_admin=True)
|
||||
|
||||
|
||||
class ContextMiddleware(wsgi.Middleware):
|
||||
|
||||
opts = [cfg.BoolOpt('owner_is_tenant', default=True),
|
||||
cfg.StrOpt('admin_role', default='admin')]
|
||||
|
||||
def __init__(self, app, conf, **local_conf):
|
||||
cfg.CONF.register_opts(self.opts)
|
||||
|
||||
# Determine the context class to use
|
||||
self.ctxcls = RequestContext
|
||||
if 'context_class' in local_conf:
|
||||
self.ctxcls = importutils.import_class(local_conf['context_class'])
|
||||
|
||||
super(ContextMiddleware, self).__init__(app)
|
||||
|
||||
def make_context(self, *args, **kwargs):
|
||||
"""
|
||||
Create a context with the given arguments.
|
||||
"""
|
||||
kwargs.setdefault('owner_is_tenant', cfg.CONF.owner_is_tenant)
|
||||
|
||||
return self.ctxcls(*args, **kwargs)
|
||||
|
||||
def process_request(self, req):
|
||||
"""
|
||||
Extract any authentication information in the request and
|
||||
construct an appropriate context from it.
|
||||
|
||||
A few scenarios exist:
|
||||
|
||||
1. If X-Auth-Token is passed in, then consult TENANT and ROLE headers
|
||||
to determine permissions.
|
||||
|
||||
2. An X-Auth-Token was passed in, but the Identity-Status is not
|
||||
confirmed. For now, just raising a NotAuthenticated exception.
|
||||
|
||||
3. X-Auth-Token is omitted. If we were using Keystone, then the
|
||||
tokenauth middleware would have rejected the request, so we must be
|
||||
using NoAuth. In that case, assume that is_admin=True.
|
||||
"""
|
||||
headers = req.headers
|
||||
|
||||
try:
|
||||
"""
|
||||
This sets the username/password to the admin user because you
|
||||
need this information in order to perform token authentication.
|
||||
The real 'username' is the 'tenant'.
|
||||
|
||||
We should also check here to see if X-Auth-Token is not set and
|
||||
in that case we should assign the user/pass directly as the real
|
||||
username/password and token as None. 'tenant' should still be
|
||||
the username.
|
||||
"""
|
||||
|
||||
username = None
|
||||
password = None
|
||||
aws_creds = None
|
||||
|
||||
if headers.get('X-Auth-User') is not None:
|
||||
username = headers.get('X-Auth-User')
|
||||
password = headers.get('X-Auth-Key')
|
||||
elif headers.get('X-Auth-EC2-Creds') is not None:
|
||||
aws_creds = headers.get('X-Auth-EC2-Creds')
|
||||
|
||||
token = headers.get('X-Auth-Token')
|
||||
tenant = headers.get('X-Tenant-Name')
|
||||
tenant_id = headers.get('X-Tenant-Id')
|
||||
auth_url = headers.get('X-Auth-Url')
|
||||
roles = headers.get('X-Roles')
|
||||
if roles is not None:
|
||||
roles = roles.split(',')
|
||||
|
||||
except Exception:
|
||||
raise exception.NotAuthenticated()
|
||||
|
||||
req.context = self.make_context(auth_token=token,
|
||||
tenant=tenant, tenant_id=tenant_id,
|
||||
aws_creds=aws_creds,
|
||||
username=username,
|
||||
password=password,
|
||||
auth_url=auth_url, roles=roles,
|
||||
is_admin=True)
|
||||
|
||||
|
||||
def ContextMiddleware_filter_factory(global_conf, **local_conf):
|
||||
"""
|
||||
Factory method for paste.deploy
|
||||
"""
|
||||
conf = global_conf.copy()
|
||||
conf.update(local_conf)
|
||||
|
||||
def filter(app):
|
||||
return ContextMiddleware(app, conf)
|
||||
|
||||
return filter
|
@@ -1,718 +0,0 @@
|
||||
# Copyright 2014, Hewlett-Packard Development Company, L.P.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
|
||||
from neutron.api.rpc.handlers import dvr_rpc
|
||||
from neutron.common import constants as n_const
|
||||
from neutron.common import utils as n_utils
|
||||
from neutron.openstack.common import log as logging
|
||||
from neutron.plugins.openvswitch.common import constants
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# A class to represent a DVR-hosted subnet including vif_ports resident on
|
||||
# that subnet
|
||||
class LocalDVRSubnetMapping:
|
||||
def __init__(self, subnet, csnat_ofport=constants.OFPORT_INVALID):
|
||||
# set of commpute ports on on this dvr subnet
|
||||
self.compute_ports = {}
|
||||
self.subnet = subnet
|
||||
self.csnat_ofport = csnat_ofport
|
||||
self.dvr_owned = False
|
||||
|
||||
def __str__(self):
|
||||
return ("subnet = %s compute_ports = %s csnat_port = %s"
|
||||
" is_dvr_owned = %s" %
|
||||
(self.subnet, self.get_compute_ofports(),
|
||||
self.get_csnat_ofport(), self.is_dvr_owned()))
|
||||
|
||||
def get_subnet_info(self):
|
||||
return self.subnet
|
||||
|
||||
def set_dvr_owned(self, owned):
|
||||
self.dvr_owned = owned
|
||||
|
||||
def is_dvr_owned(self):
|
||||
return self.dvr_owned
|
||||
|
||||
def add_compute_ofport(self, vif_id, ofport):
|
||||
self.compute_ports[vif_id] = ofport
|
||||
|
||||
def remove_compute_ofport(self, vif_id):
|
||||
self.compute_ports.pop(vif_id, 0)
|
||||
|
||||
def remove_all_compute_ofports(self):
|
||||
self.compute_ports.clear()
|
||||
|
||||
def get_compute_ofports(self):
|
||||
return self.compute_ports
|
||||
|
||||
def set_csnat_ofport(self, ofport):
|
||||
self.csnat_ofport = ofport
|
||||
|
||||
def get_csnat_ofport(self):
|
||||
return self.csnat_ofport
|
||||
|
||||
|
||||
class OVSPort:
|
||||
def __init__(self, id, ofport, mac, device_owner):
|
||||
self.id = id
|
||||
self.mac = mac
|
||||
self.ofport = ofport
|
||||
self.subnets = set()
|
||||
self.device_owner = device_owner
|
||||
|
||||
def __str__(self):
|
||||
return ("OVSPort: id = %s, ofport = %s, mac = %s,"
|
||||
"device_owner = %s, subnets = %s" %
|
||||
(self.id, self.ofport, self.mac,
|
||||
self.device_owner, self.subnets))
|
||||
|
||||
def add_subnet(self, subnet_id):
|
||||
self.subnets.add(subnet_id)
|
||||
|
||||
def remove_subnet(self, subnet_id):
|
||||
self.subnets.remove(subnet_id)
|
||||
|
||||
def remove_all_subnets(self):
|
||||
self.subnets.clear()
|
||||
|
||||
def get_subnets(self):
|
||||
return self.subnets
|
||||
|
||||
def get_device_owner(self):
|
||||
return self.device_owner
|
||||
|
||||
def get_mac(self):
|
||||
return self.mac
|
||||
|
||||
def get_ofport(self):
|
||||
return self.ofport
|
||||
|
||||
|
||||
class OVSDVRNeutronAgent(dvr_rpc.DVRAgentRpcApiMixin):
|
||||
'''
|
||||
Implements OVS-based DVR(Distributed Virtual Router), for overlay networks.
|
||||
'''
|
||||
# history
|
||||
# 1.0 Initial version
|
||||
|
||||
def __init__(self, context, plugin_rpc, integ_br, tun_br,
|
||||
patch_int_ofport=constants.OFPORT_INVALID,
|
||||
patch_tun_ofport=constants.OFPORT_INVALID,
|
||||
host=None, enable_tunneling=False,
|
||||
enable_distributed_routing=False):
|
||||
self.context = context
|
||||
self.plugin_rpc = plugin_rpc
|
||||
self.int_br = integ_br
|
||||
self.tun_br = tun_br
|
||||
self.patch_int_ofport = patch_int_ofport
|
||||
self.patch_tun_ofport = patch_tun_ofport
|
||||
self.host = host
|
||||
self.enable_tunneling = enable_tunneling
|
||||
self.enable_distributed_routing = enable_distributed_routing
|
||||
|
||||
def reset_ovs_parameters(self, integ_br, tun_br,
|
||||
patch_int_ofport, patch_tun_ofport):
|
||||
'''Reset the openvswitch parameters'''
|
||||
if not (self.enable_tunneling and self.enable_distributed_routing):
|
||||
return
|
||||
self.int_br = integ_br
|
||||
self.tun_br = tun_br
|
||||
self.patch_int_ofport = patch_int_ofport
|
||||
self.patch_tun_ofport = patch_tun_ofport
|
||||
|
||||
def setup_dvr_flows_on_integ_tun_br(self):
|
||||
'''Setup up initial dvr flows into br-int and br-tun'''
|
||||
if not (self.enable_tunneling and self.enable_distributed_routing):
|
||||
return
|
||||
LOG.debug("L2 Agent operating in DVR Mode")
|
||||
self.dvr_mac_address = None
|
||||
self.local_dvr_map = {}
|
||||
self.local_csnat_map = {}
|
||||
self.local_ports = {}
|
||||
self.registered_dvr_macs = set()
|
||||
# get the local DVR MAC Address
|
||||
try:
|
||||
details = self.plugin_rpc.get_dvr_mac_address_by_host(
|
||||
self.context, self.host)
|
||||
LOG.debug("L2 Agent DVR: Received response for "
|
||||
"get_dvr_mac_address_by_host() from "
|
||||
"plugin: %r", details)
|
||||
self.dvr_mac_address = details['mac_address']
|
||||
except Exception:
|
||||
LOG.error(_("DVR: Failed to obtain local DVR Mac address"))
|
||||
self.enable_distributed_routing = False
|
||||
# switch all traffic using L2 learning
|
||||
self.int_br.add_flow(table=constants.LOCAL_SWITCHING,
|
||||
priority=1, actions="normal")
|
||||
return
|
||||
|
||||
# Remove existing flows in integration bridge
|
||||
self.int_br.remove_all_flows()
|
||||
|
||||
# Add a canary flow to int_br to track OVS restarts
|
||||
self.int_br.add_flow(table=constants.CANARY_TABLE, priority=0,
|
||||
actions="drop")
|
||||
|
||||
# Insert 'drop' action as the default for Table DVR_TO_SRC_MAC
|
||||
self.int_br.add_flow(table=constants.DVR_TO_SRC_MAC,
|
||||
priority=1,
|
||||
actions="drop")
|
||||
|
||||
# Insert 'normal' action as the default for Table LOCAL_SWITCHING
|
||||
self.int_br.add_flow(table=constants.LOCAL_SWITCHING,
|
||||
priority=1,
|
||||
actions="normal")
|
||||
|
||||
dvr_macs = self.plugin_rpc.get_dvr_mac_address_list(self.context)
|
||||
LOG.debug("L2 Agent DVR: Received these MACs: %r", dvr_macs)
|
||||
for mac in dvr_macs:
|
||||
if mac['mac_address'] == self.dvr_mac_address:
|
||||
continue
|
||||
# Table 0 (default) will now sort DVR traffic from other
|
||||
# traffic depending on in_port
|
||||
self.int_br.add_flow(table=constants.LOCAL_SWITCHING,
|
||||
priority=2,
|
||||
in_port=self.patch_tun_ofport,
|
||||
dl_src=mac['mac_address'],
|
||||
actions="resubmit(,%s)" %
|
||||
constants.DVR_TO_SRC_MAC)
|
||||
# Table DVR_NOT_LEARN ensures unique dvr macs in the cloud
|
||||
# are not learnt, as they may
|
||||
# result in flow explosions
|
||||
self.tun_br.add_flow(table=constants.DVR_NOT_LEARN,
|
||||
priority=1,
|
||||
dl_src=mac['mac_address'],
|
||||
actions="output:%s" % self.patch_int_ofport)
|
||||
|
||||
self.registered_dvr_macs.add(mac['mac_address'])
|
||||
|
||||
self.tun_br.add_flow(priority=1,
|
||||
in_port=self.patch_int_ofport,
|
||||
actions="resubmit(,%s)" %
|
||||
constants.DVR_PROCESS)
|
||||
# table-miss should be sent to learning table
|
||||
self.tun_br.add_flow(table=constants.DVR_NOT_LEARN,
|
||||
priority=0,
|
||||
actions="resubmit(,%s)" %
|
||||
constants.LEARN_FROM_TUN)
|
||||
|
||||
self.tun_br.add_flow(table=constants.DVR_PROCESS,
|
||||
priority=0,
|
||||
actions="resubmit(,%s)" %
|
||||
constants.PATCH_LV_TO_TUN)
|
||||
|
||||
def dvr_mac_address_update(self, dvr_macs):
|
||||
if not (self.enable_tunneling and self.enable_distributed_routing):
|
||||
return
|
||||
|
||||
LOG.debug("DVR Mac address update with host-mac: %s", dvr_macs)
|
||||
|
||||
if not self.dvr_mac_address:
|
||||
LOG.debug("Self mac unknown, ignoring this "
|
||||
"dvr_mac_address_update() ")
|
||||
return
|
||||
|
||||
dvr_host_macs = set()
|
||||
for entry in dvr_macs:
|
||||
if entry['mac_address'] == self.dvr_mac_address:
|
||||
continue
|
||||
dvr_host_macs.add(entry['mac_address'])
|
||||
|
||||
if dvr_host_macs == self.registered_dvr_macs:
|
||||
LOG.debug("DVR Mac address already up to date")
|
||||
return
|
||||
|
||||
dvr_macs_added = dvr_host_macs - self.registered_dvr_macs
|
||||
dvr_macs_removed = self.registered_dvr_macs - dvr_host_macs
|
||||
|
||||
for oldmac in dvr_macs_removed:
|
||||
self.int_br.delete_flows(table=constants.LOCAL_SWITCHING,
|
||||
in_port=self.patch_tun_ofport,
|
||||
dl_src=oldmac)
|
||||
self.tun_br.delete_flows(table=constants.DVR_NOT_LEARN,
|
||||
dl_src=oldmac)
|
||||
LOG.debug("Removed DVR MAC flow for %s", oldmac)
|
||||
self.registered_dvr_macs.remove(oldmac)
|
||||
|
||||
for newmac in dvr_macs_added:
|
||||
self.int_br.add_flow(table=constants.LOCAL_SWITCHING,
|
||||
priority=2,
|
||||
in_port=self.patch_tun_ofport,
|
||||
dl_src=newmac,
|
||||
actions="resubmit(,%s)" %
|
||||
constants.DVR_TO_SRC_MAC)
|
||||
self.tun_br.add_flow(table=constants.DVR_NOT_LEARN,
|
||||
priority=1,
|
||||
dl_src=newmac,
|
||||
actions="output:%s" % self.patch_int_ofport)
|
||||
LOG.debug("Added DVR MAC flow for %s", newmac)
|
||||
self.registered_dvr_macs.add(newmac)
|
||||
|
||||
def is_dvr_router_interface(self, device_owner):
|
||||
return device_owner == n_const.DEVICE_OWNER_DVR_INTERFACE
|
||||
|
||||
def process_tunneled_network(self, network_type, lvid, segmentation_id):
|
||||
if not (self.enable_tunneling and self.enable_distributed_routing):
|
||||
return
|
||||
self.tun_br.add_flow(table=constants.TUN_TABLE[network_type],
|
||||
priority=1,
|
||||
tun_id=segmentation_id,
|
||||
actions="mod_vlan_vid:%s,"
|
||||
"resubmit(,%s)" %
|
||||
(lvid, constants.DVR_NOT_LEARN))
|
||||
|
||||
def _bind_distributed_router_interface_port(self, port, fixed_ips,
|
||||
device_owner, local_vlan):
|
||||
# since router port must have only one fixed IP, directly
|
||||
# use fixed_ips[0]
|
||||
subnet_uuid = fixed_ips[0]['subnet_id']
|
||||
csnat_ofport = constants.OFPORT_INVALID
|
||||
ldm = None
|
||||
if subnet_uuid in self.local_dvr_map:
|
||||
ldm = self.local_dvr_map[subnet_uuid]
|
||||
csnat_ofport = ldm.get_csnat_ofport()
|
||||
if csnat_ofport == constants.OFPORT_INVALID:
|
||||
LOG.error(_("DVR: Duplicate DVR router interface detected "
|
||||
"for subnet %s"), subnet_uuid)
|
||||
return
|
||||
else:
|
||||
# set up LocalDVRSubnetMapping available for this subnet
|
||||
subnet_info = self.plugin_rpc.get_subnet_for_dvr(self.context,
|
||||
subnet_uuid)
|
||||
if not subnet_info:
|
||||
LOG.error(_("DVR: Unable to retrieve subnet information"
|
||||
" for subnet_id %s"), subnet_uuid)
|
||||
return
|
||||
LOG.debug("get_subnet_for_dvr for subnet %s returned with %s" %
|
||||
(subnet_uuid, subnet_info))
|
||||
ldm = LocalDVRSubnetMapping(subnet_info)
|
||||
self.local_dvr_map[subnet_uuid] = ldm
|
||||
|
||||
# DVR takes over
|
||||
ldm.set_dvr_owned(True)
|
||||
|
||||
subnet_info = ldm.get_subnet_info()
|
||||
ip_subnet = subnet_info['cidr']
|
||||
local_compute_ports = (
|
||||
self.plugin_rpc.get_ports_on_host_by_subnet(
|
||||
self.context, self.host, subnet_uuid))
|
||||
LOG.debug("DVR: List of ports received from "
|
||||
"get_ports_on_host_by_subnet %s",
|
||||
local_compute_ports)
|
||||
for prt in local_compute_ports:
|
||||
vif = self.int_br.get_vif_port_by_id(prt['id'])
|
||||
if not vif:
|
||||
continue
|
||||
ldm.add_compute_ofport(vif.vif_id, vif.ofport)
|
||||
if vif.vif_id in self.local_ports:
|
||||
# ensure if a compute port is already on
|
||||
# a different dvr routed subnet
|
||||
# if yes, queue this subnet to that port
|
||||
ovsport = self.local_ports[vif.vif_id]
|
||||
ovsport.add_subnet(subnet_uuid)
|
||||
else:
|
||||
# the compute port is discovered first here that its on
|
||||
# a dvr routed subnet queue this subnet to that port
|
||||
ovsport = OVSPort(vif.vif_id, vif.ofport,
|
||||
vif.vif_mac, prt['device_owner'])
|
||||
|
||||
ovsport.add_subnet(subnet_uuid)
|
||||
self.local_ports[vif.vif_id] = ovsport
|
||||
|
||||
# create rule for just this vm port
|
||||
self.int_br.add_flow(table=constants.DVR_TO_SRC_MAC,
|
||||
priority=4,
|
||||
dl_vlan=local_vlan,
|
||||
dl_dst=ovsport.get_mac(),
|
||||
actions="strip_vlan,mod_dl_src:%s,"
|
||||
"output:%s" %
|
||||
(subnet_info['gateway_mac'],
|
||||
ovsport.get_ofport()))
|
||||
|
||||
# create rule to forward broadcast/multicast frames from dvr
|
||||
# router interface to appropriate local tenant ports
|
||||
ofports = ','.join(map(str, ldm.get_compute_ofports().values()))
|
||||
if csnat_ofport != constants.OFPORT_INVALID:
|
||||
ofports = str(csnat_ofport) + ',' + ofports
|
||||
if ofports:
|
||||
self.int_br.add_flow(table=constants.DVR_TO_SRC_MAC,
|
||||
priority=2,
|
||||
proto='ip',
|
||||
dl_vlan=local_vlan,
|
||||
nw_dst=ip_subnet,
|
||||
actions="strip_vlan,mod_dl_src:%s,"
|
||||
"output:%s" %
|
||||
(subnet_info['gateway_mac'], ofports))
|
||||
|
||||
self.tun_br.add_flow(table=constants.DVR_PROCESS,
|
||||
priority=3,
|
||||
dl_vlan=local_vlan,
|
||||
proto='arp',
|
||||
nw_dst=subnet_info['gateway_ip'],
|
||||
actions="drop")
|
||||
|
||||
self.tun_br.add_flow(table=constants.DVR_PROCESS,
|
||||
priority=2,
|
||||
dl_vlan=local_vlan,
|
||||
dl_dst=port.vif_mac,
|
||||
actions="drop")
|
||||
|
||||
self.tun_br.add_flow(table=constants.DVR_PROCESS,
|
||||
priority=1,
|
||||
dl_vlan=local_vlan,
|
||||
dl_src=port.vif_mac,
|
||||
actions="mod_dl_src:%s,resubmit(,%s)" %
|
||||
(self.dvr_mac_address,
|
||||
constants.PATCH_LV_TO_TUN))
|
||||
|
||||
# the dvr router interface is itself a port, so capture it
|
||||
# queue this subnet to that port. A subnet appears only once as
|
||||
# a router interface on any given router
|
||||
ovsport = OVSPort(port.vif_id, port.ofport,
|
||||
port.vif_mac, device_owner)
|
||||
ovsport.add_subnet(subnet_uuid)
|
||||
self.local_ports[port.vif_id] = ovsport
|
||||
|
||||
def _bind_port_on_dvr_subnet(self, port, fixed_ips,
|
||||
device_owner, local_vlan):
|
||||
# Handle new compute port added use-case
|
||||
subnet_uuid = None
|
||||
for ips in fixed_ips:
|
||||
if ips['subnet_id'] not in self.local_dvr_map:
|
||||
continue
|
||||
subnet_uuid = ips['subnet_id']
|
||||
ldm = self.local_dvr_map[subnet_uuid]
|
||||
if not ldm.is_dvr_owned():
|
||||
# well this is CSNAT stuff, let dvr come in
|
||||
# and do plumbing for this vm later
|
||||
continue
|
||||
|
||||
# This confirms that this compute port belongs
|
||||
# to a dvr hosted subnet.
|
||||
# Accommodate this VM Port into the existing rule in
|
||||
# the integration bridge
|
||||
LOG.debug("DVR: Plumbing compute port %s", port.vif_id)
|
||||
subnet_info = ldm.get_subnet_info()
|
||||
ip_subnet = subnet_info['cidr']
|
||||
csnat_ofport = ldm.get_csnat_ofport()
|
||||
ldm.add_compute_ofport(port.vif_id, port.ofport)
|
||||
if port.vif_id in self.local_ports:
|
||||
# ensure if a compute port is already on a different
|
||||
# dvr routed subnet
|
||||
# if yes, queue this subnet to that port
|
||||
ovsport = self.local_ports[port.vif_id]
|
||||
ovsport.add_subnet(subnet_uuid)
|
||||
else:
|
||||
# the compute port is discovered first here that its
|
||||
# on a dvr routed subnet, queue this subnet to that port
|
||||
ovsport = OVSPort(port.vif_id, port.ofport,
|
||||
port.vif_mac, device_owner)
|
||||
|
||||
ovsport.add_subnet(subnet_uuid)
|
||||
self.local_ports[port.vif_id] = ovsport
|
||||
# create a rule for this vm port
|
||||
self.int_br.add_flow(table=constants.DVR_TO_SRC_MAC,
|
||||
priority=4,
|
||||
dl_vlan=local_vlan,
|
||||
dl_dst=ovsport.get_mac(),
|
||||
actions="strip_vlan,mod_dl_src:%s,"
|
||||
"output:%s" %
|
||||
(subnet_info['gateway_mac'],
|
||||
ovsport.get_ofport()))
|
||||
ofports = ','.join(map(str, ldm.get_compute_ofports().values()))
|
||||
|
||||
if csnat_ofport != constants.OFPORT_INVALID:
|
||||
ofports = str(csnat_ofport) + ',' + ofports
|
||||
self.int_br.add_flow(table=constants.DVR_TO_SRC_MAC,
|
||||
priority=2,
|
||||
proto='ip',
|
||||
dl_vlan=local_vlan,
|
||||
nw_dst=ip_subnet,
|
||||
actions="strip_vlan,mod_dl_src:%s,"
|
||||
" output:%s" %
|
||||
(subnet_info['gateway_mac'], ofports))
|
||||
|
||||
def _bind_centralized_snat_port_on_dvr_subnet(self, port, fixed_ips,
|
||||
device_owner, local_vlan):
|
||||
if port.vif_id in self.local_ports:
|
||||
# throw an error if CSNAT port is already on a different
|
||||
# dvr routed subnet
|
||||
ovsport = self.local_ports[port.vif_id]
|
||||
subs = list(ovsport.get_subnets())
|
||||
LOG.error(_("Centralized-SNAT port %s already seen on "),
|
||||
port.vif_id)
|
||||
LOG.error(_("a different subnet %s"), subs[0])
|
||||
return
|
||||
# since centralized-SNAT (CSNAT) port must have only one fixed
|
||||
# IP, directly use fixed_ips[0]
|
||||
subnet_uuid = fixed_ips[0]['subnet_id']
|
||||
ldm = None
|
||||
subnet_info = None
|
||||
if subnet_uuid not in self.local_dvr_map:
|
||||
# no csnat ports seen on this subnet - create csnat state
|
||||
# for this subnet
|
||||
subnet_info = self.plugin_rpc.get_subnet_for_dvr(self.context,
|
||||
subnet_uuid)
|
||||
ldm = LocalDVRSubnetMapping(subnet_info, port.ofport)
|
||||
self.local_dvr_map[subnet_uuid] = ldm
|
||||
else:
|
||||
ldm = self.local_dvr_map[subnet_uuid]
|
||||
subnet_info = ldm.get_subnet_info()
|
||||
# Store csnat OF Port in the existing DVRSubnetMap
|
||||
ldm.set_csnat_ofport(port.ofport)
|
||||
|
||||
# create ovsPort footprint for csnat port
|
||||
ovsport = OVSPort(port.vif_id, port.ofport,
|
||||
port.vif_mac, device_owner)
|
||||
ovsport.add_subnet(subnet_uuid)
|
||||
self.local_ports[port.vif_id] = ovsport
|
||||
|
||||
self.int_br.add_flow(table=constants.DVR_TO_SRC_MAC,
|
||||
priority=4,
|
||||
dl_vlan=local_vlan,
|
||||
dl_dst=ovsport.get_mac(),
|
||||
actions="strip_vlan,mod_dl_src:%s,"
|
||||
" output:%s" %
|
||||
(subnet_info['gateway_mac'],
|
||||
ovsport.get_ofport()))
|
||||
ofports = ','.join(map(str, ldm.get_compute_ofports().values()))
|
||||
ofports = str(ldm.get_csnat_ofport()) + ',' + ofports
|
||||
ip_subnet = subnet_info['cidr']
|
||||
self.int_br.add_flow(table=constants.DVR_TO_SRC_MAC,
|
||||
priority=2,
|
||||
proto='ip',
|
||||
dl_vlan=local_vlan,
|
||||
nw_dst=ip_subnet,
|
||||
actions="strip_vlan,mod_dl_src:%s,"
|
||||
" output:%s" %
|
||||
(subnet_info['gateway_mac'], ofports))
|
||||
|
||||
def bind_port_to_dvr(self, port, network_type, fixed_ips,
|
||||
device_owner, local_vlan_id):
|
||||
# a port coming up as distributed router interface
|
||||
if not (self.enable_tunneling and self.enable_distributed_routing):
|
||||
return
|
||||
|
||||
if network_type not in constants.TUNNEL_NETWORK_TYPES:
|
||||
return
|
||||
|
||||
if device_owner == n_const.DEVICE_OWNER_DVR_INTERFACE:
|
||||
self._bind_distributed_router_interface_port(port, fixed_ips,
|
||||
device_owner,
|
||||
local_vlan_id)
|
||||
|
||||
if device_owner and n_utils.is_dvr_serviced(device_owner):
|
||||
self._bind_port_on_dvr_subnet(port, fixed_ips,
|
||||
device_owner,
|
||||
local_vlan_id)
|
||||
|
||||
if device_owner == n_const.DEVICE_OWNER_ROUTER_SNAT:
|
||||
self._bind_centralized_snat_port_on_dvr_subnet(port, fixed_ips,
|
||||
device_owner,
|
||||
local_vlan_id)
|
||||
|
||||
def _unbind_distributed_router_interface_port(self, port, local_vlan):
|
||||
|
||||
ovsport = self.local_ports[port.vif_id]
|
||||
|
||||
# removal of distributed router interface
|
||||
subnet_ids = ovsport.get_subnets()
|
||||
subnet_set = set(subnet_ids)
|
||||
# ensure we process for all the subnets laid on this removed port
|
||||
for sub_uuid in subnet_set:
|
||||
if sub_uuid not in self.local_dvr_map:
|
||||
continue
|
||||
|
||||
ldm = self.local_dvr_map[sub_uuid]
|
||||
subnet_info = ldm.get_subnet_info()
|
||||
ip_subnet = subnet_info['cidr']
|
||||
|
||||
# DVR is no more owner
|
||||
ldm.set_dvr_owned(False)
|
||||
|
||||
# remove all vm rules for this dvr subnet
|
||||
# clear of compute_ports altogether
|
||||
compute_ports = ldm.get_compute_ofports()
|
||||
for vif_id in compute_ports:
|
||||
ovsport = self.local_ports[vif_id]
|
||||
self.int_br.delete_flows(table=constants.DVR_TO_SRC_MAC,
|
||||
dl_vlan=local_vlan,
|
||||
dl_dst=ovsport.get_mac())
|
||||
ldm.remove_all_compute_ofports()
|
||||
|
||||
if ldm.get_csnat_ofport() != -1:
|
||||
# If there is a csnat port on this agent, preserve
|
||||
# the local_dvr_map state
|
||||
ofports = str(ldm.get_csnat_ofport())
|
||||
self.int_br.add_flow(table=constants.DVR_TO_SRC_MAC,
|
||||
priority=2,
|
||||
proto='ip',
|
||||
dl_vlan=local_vlan,
|
||||
nw_dst=ip_subnet,
|
||||
actions="strip_vlan,mod_dl_src:%s,"
|
||||
" output:%s" %
|
||||
(subnet_info['gateway_mac'], ofports))
|
||||
else:
|
||||
# removed port is a distributed router interface
|
||||
self.int_br.delete_flows(table=constants.DVR_TO_SRC_MAC,
|
||||
proto='ip', dl_vlan=local_vlan,
|
||||
nw_dst=ip_subnet)
|
||||
# remove subnet from local_dvr_map as no dvr (or) csnat
|
||||
# ports available on this agent anymore
|
||||
self.local_dvr_map.pop(sub_uuid, None)
|
||||
|
||||
self.tun_br.delete_flows(table=constants.DVR_PROCESS,
|
||||
dl_vlan=local_vlan,
|
||||
proto='arp',
|
||||
nw_dst=subnet_info['gateway_ip'])
|
||||
ovsport.remove_subnet(sub_uuid)
|
||||
|
||||
self.tun_br.delete_flows(table=constants.DVR_PROCESS,
|
||||
dl_vlan=local_vlan,
|
||||
dl_dst=port.vif_mac)
|
||||
|
||||
self.tun_br.delete_flows(table=constants.DVR_PROCESS,
|
||||
dl_vlan=local_vlan,
|
||||
dl_src=port.vif_mac)
|
||||
# release port state
|
||||
self.local_ports.pop(port.vif_id, None)
|
||||
|
||||
def _unbind_port_on_dvr_subnet(self, port, local_vlan):
|
||||
|
||||
ovsport = self.local_ports[port.vif_id]
|
||||
# This confirms that this compute port being removed belonged
|
||||
# to a dvr hosted subnet.
|
||||
# Accommodate this VM Port into the existing rule in
|
||||
# the integration bridge
|
||||
LOG.debug("DVR: Removing plumbing for compute port %s", port)
|
||||
subnet_ids = ovsport.get_subnets()
|
||||
# ensure we process for all the subnets laid on this port
|
||||
for sub_uuid in subnet_ids:
|
||||
if sub_uuid not in self.local_dvr_map:
|
||||
continue
|
||||
|
||||
ldm = self.local_dvr_map[sub_uuid]
|
||||
subnet_info = ldm.get_subnet_info()
|
||||
ldm.remove_compute_ofport(port.vif_id)
|
||||
ofports = ','.join(map(str, ldm.get_compute_ofports().values()))
|
||||
ip_subnet = subnet_info['cidr']
|
||||
|
||||
# first remove this vm port rule
|
||||
self.int_br.delete_flows(table=constants.DVR_TO_SRC_MAC,
|
||||
dl_vlan=local_vlan,
|
||||
dl_dst=ovsport.get_mac())
|
||||
if ldm.get_csnat_ofport() != -1:
|
||||
# If there is a csnat port on this agent, preserve
|
||||
# the local_dvr_map state
|
||||
ofports = str(ldm.get_csnat_ofport()) + ',' + ofports
|
||||
self.int_br.add_flow(table=constants.DVR_TO_SRC_MAC,
|
||||
priority=2,
|
||||
proto='ip',
|
||||
dl_vlan=local_vlan,
|
||||
nw_dst=ip_subnet,
|
||||
actions="strip_vlan,mod_dl_src:%s,"
|
||||
" output:%s" %
|
||||
(subnet_info['gateway_mac'], ofports))
|
||||
else:
|
||||
if ofports:
|
||||
self.int_br.add_flow(table=constants.DVR_TO_SRC_MAC,
|
||||
priority=2,
|
||||
proto='ip',
|
||||
dl_vlan=local_vlan,
|
||||
nw_dst=ip_subnet,
|
||||
actions="strip_vlan,mod_dl_src:%s,"
|
||||
" output:%s" %
|
||||
(subnet_info['gateway_mac'],
|
||||
ofports))
|
||||
else:
|
||||
# remove the flow altogether, as no ports (both csnat/
|
||||
# compute) are available on this subnet in this
|
||||
# agent
|
||||
self.int_br.delete_flows(table=constants.DVR_TO_SRC_MAC,
|
||||
proto='ip',
|
||||
dl_vlan=local_vlan,
|
||||
nw_dst=ip_subnet)
|
||||
# release port state
|
||||
self.local_ports.pop(port.vif_id, None)
|
||||
|
||||
def _unbind_centralized_snat_port_on_dvr_subnet(self, port, local_vlan):
|
||||
|
||||
ovsport = self.local_ports[port.vif_id]
|
||||
# This confirms that this compute port being removed belonged
|
||||
# to a dvr hosted subnet.
|
||||
# Accommodate this VM Port into the existing rule in
|
||||
# the integration bridge
|
||||
LOG.debug("DVR: Removing plumbing for csnat port %s", port)
|
||||
sub_uuid = list(ovsport.get_subnets())[0]
|
||||
# ensure we process for all the subnets laid on this port
|
||||
if sub_uuid not in self.local_dvr_map:
|
||||
return
|
||||
ldm = self.local_dvr_map[sub_uuid]
|
||||
subnet_info = ldm.get_subnet_info()
|
||||
ip_subnet = subnet_info['cidr']
|
||||
ldm.set_csnat_ofport(constants.OFPORT_INVALID)
|
||||
# then remove csnat port rule
|
||||
self.int_br.delete_flows(table=constants.DVR_TO_SRC_MAC,
|
||||
dl_vlan=local_vlan,
|
||||
dl_dst=ovsport.get_mac())
|
||||
|
||||
ofports = ','.join(map(str, ldm.get_compute_ofports().values()))
|
||||
if ofports:
|
||||
self.int_br.add_flow(table=constants.DVR_TO_SRC_MAC,
|
||||
priority=2,
|
||||
proto='ip',
|
||||
dl_vlan=local_vlan,
|
||||
nw_dst=ip_subnet,
|
||||
actions="strip_vlan,mod_dl_src:%s,"
|
||||
" output:%s" %
|
||||
(subnet_info['gateway_mac'], ofports))
|
||||
else:
|
||||
self.int_br.delete_flows(table=constants.DVR_TO_SRC_MAC,
|
||||
proto='ip',
|
||||
dl_vlan=local_vlan,
|
||||
nw_dst=ip_subnet)
|
||||
if not ldm.is_dvr_owned():
|
||||
# if not owned by DVR (only used for csnat), remove this
|
||||
# subnet state altogether
|
||||
self.local_dvr_map.pop(sub_uuid, None)
|
||||
|
||||
# release port state
|
||||
self.local_ports.pop(port.vif_id, None)
|
||||
|
||||
def unbind_port_from_dvr(self, vif_port, local_vlan_id):
|
||||
if not (self.enable_tunneling and self.enable_distributed_routing):
|
||||
return
|
||||
# Handle port removed use-case
|
||||
if vif_port and vif_port.vif_id not in self.local_ports:
|
||||
LOG.debug("DVR: Non distributed port, ignoring %s", vif_port)
|
||||
return
|
||||
|
||||
ovsport = self.local_ports[vif_port.vif_id]
|
||||
device_owner = ovsport.get_device_owner()
|
||||
|
||||
if device_owner == n_const.DEVICE_OWNER_DVR_INTERFACE:
|
||||
self._unbind_distributed_router_interface_port(vif_port,
|
||||
local_vlan_id)
|
||||
|
||||
if device_owner and n_utils.is_dvr_serviced(device_owner):
|
||||
self._unbind_port_on_dvr_subnet(vif_port, local_vlan_id)
|
||||
|
||||
if device_owner == n_const.DEVICE_OWNER_ROUTER_SNAT:
|
||||
self._unbind_centralized_snat_port_on_dvr_subnet(vif_port,
|
||||
local_vlan_id)
|
@@ -1,13 +0,0 @@
|
||||
# Copyright 2012 Red Hat, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
@@ -1,135 +0,0 @@
|
||||
# Copyright 2012 Red Hat, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo.config import cfg
|
||||
|
||||
from neutron.agent.common import config
|
||||
from neutron.plugins.common import constants as p_const
|
||||
from neutron.plugins.l2_proxy.common import constants
|
||||
|
||||
|
||||
DEFAULT_BRIDGE_MAPPINGS = []
|
||||
DEFAULT_VLAN_RANGES = []
|
||||
DEFAULT_TUNNEL_RANGES = []
|
||||
DEFAULT_TUNNEL_TYPES = []
|
||||
|
||||
ovs_opts = [
|
||||
cfg.StrOpt('integration_bridge', default='br-int',
|
||||
help=_("Integration bridge to use.")),
|
||||
cfg.BoolOpt('enable_tunneling', default=False,
|
||||
help=_("Enable tunneling support.")),
|
||||
cfg.StrOpt('tunnel_bridge', default='br-tun',
|
||||
help=_("Tunnel bridge to use.")),
|
||||
cfg.StrOpt('int_peer_patch_port', default='patch-tun',
|
||||
help=_("Peer patch port in integration bridge for tunnel "
|
||||
"bridge.")),
|
||||
cfg.StrOpt('tun_peer_patch_port', default='patch-int',
|
||||
help=_("Peer patch port in tunnel bridge for integration "
|
||||
"bridge.")),
|
||||
cfg.StrOpt('local_ip', default='',
|
||||
help=_("Local IP address of GRE tunnel endpoints.")),
|
||||
cfg.ListOpt('bridge_mappings',
|
||||
default=DEFAULT_BRIDGE_MAPPINGS,
|
||||
help=_("List of <physical_network>:<bridge>. "
|
||||
"Deprecated for ofagent.")),
|
||||
cfg.StrOpt('tenant_network_type', default='local',
|
||||
help=_("Network type for tenant networks "
|
||||
"(local, vlan, gre, vxlan, or none).")),
|
||||
cfg.ListOpt('network_vlan_ranges',
|
||||
default=DEFAULT_VLAN_RANGES,
|
||||
help=_("List of <physical_network>:<vlan_min>:<vlan_max> "
|
||||
"or <physical_network>.")),
|
||||
cfg.ListOpt('tunnel_id_ranges',
|
||||
default=DEFAULT_TUNNEL_RANGES,
|
||||
help=_("List of <tun_min>:<tun_max>.")),
|
||||
cfg.StrOpt('tunnel_type', default='',
|
||||
help=_("The type of tunnels to use when utilizing tunnels, "
|
||||
"either 'gre' or 'vxlan'.")),
|
||||
cfg.BoolOpt('use_veth_interconnection', default=False,
|
||||
help=_("Use veths instead of patch ports to interconnect the "
|
||||
"integration bridge to physical bridges.")),
|
||||
]
|
||||
|
||||
agent_opts = [
|
||||
cfg.IntOpt('polling_interval', default=2,
|
||||
help=_("The number of seconds the agent will wait between "
|
||||
"polling for local device changes.")),
|
||||
cfg.BoolOpt('minimize_polling',
|
||||
default=True,
|
||||
help=_("Minimize polling by monitoring ovsdb for interface "
|
||||
"changes.")),
|
||||
cfg.IntOpt('ovsdb_monitor_respawn_interval',
|
||||
default=constants.DEFAULT_OVSDBMON_RESPAWN,
|
||||
help=_("The number of seconds to wait before respawning the "
|
||||
"ovsdb monitor after losing communication with it.")),
|
||||
cfg.ListOpt('tunnel_types', default=DEFAULT_TUNNEL_TYPES,
|
||||
help=_("Network types supported by the agent "
|
||||
"(gre and/or vxlan).")),
|
||||
cfg.IntOpt('vxlan_udp_port', default=p_const.VXLAN_UDP_PORT,
|
||||
help=_("The UDP port to use for VXLAN tunnels.")),
|
||||
cfg.IntOpt('veth_mtu',
|
||||
help=_("MTU size of veth interfaces")),
|
||||
cfg.BoolOpt('l2_population', default=False,
|
||||
help=_("Use ML2 l2population mechanism driver to learn "
|
||||
"remote MAC and IPs and improve tunnel scalability.")),
|
||||
cfg.BoolOpt('arp_responder', default=False,
|
||||
help=_("Enable local ARP responder if it is supported. "
|
||||
"Requires OVS 2.1 and ML2 l2population driver. "
|
||||
"Allows the switch (when supporting an overlay) "
|
||||
"to respond to an ARP request locally without "
|
||||
"performing a costly ARP broadcast into the overlay.")),
|
||||
cfg.BoolOpt('dont_fragment', default=True,
|
||||
help=_("Set or un-set the don't fragment (DF) bit on "
|
||||
"outgoing IP packet carrying GRE/VXLAN tunnel.")),
|
||||
cfg.BoolOpt('enable_distributed_routing', default=False,
|
||||
help=_("Make the l2 agent run in DVR mode.")),
|
||||
|
||||
# add by jiahaojie 00209498
|
||||
cfg.StrOpt('os_region_name', default=None,
|
||||
help=_("region name to use")),
|
||||
cfg.StrOpt('keystone_auth_url', default='http://127.0.0.1:35357/v2.0',
|
||||
help=_("keystone auth url to use")),
|
||||
cfg.StrOpt('neutron_user_name',
|
||||
help=_("access neutron user name to use")),
|
||||
cfg.StrOpt('neutron_password',
|
||||
help=_("access neutron password to use")),
|
||||
cfg.StrOpt('neutron_tenant_name',
|
||||
help=_("access neutron tenant to use")),
|
||||
|
||||
# add by jiahaojie 00209498
|
||||
cfg.StrOpt('cascading_os_region_name', default=None,
|
||||
help=_("region name to use")),
|
||||
cfg.StrOpt('cascading_auth_url', default='http://127.0.0.1:35357/v2.0',
|
||||
help=_("keystone auth url to use")),
|
||||
cfg.StrOpt('cascading_user_name',
|
||||
help=_("access neutron user name to use")),
|
||||
cfg.StrOpt('cascading_password',
|
||||
help=_("access neutron password to use")),
|
||||
cfg.StrOpt('cascading_tenant_name',
|
||||
help=_("access neutron tenant to use")),
|
||||
cfg.IntOpt('pagination_limit', default=-1,
|
||||
help=_("list ports pagination limit, default value is -1,"
|
||||
"means no pagination")),
|
||||
cfg.StrOpt('query_ports_mode', default='nova_proxy',
|
||||
help=_("query ports mode, default value is nova_proxy,"
|
||||
"means query ports from nova_proxy")),
|
||||
cfg.StrOpt('proxy_sock_path', default='/var/l2proxysock',
|
||||
help=_("socket path when query ports from nova_proxy")),
|
||||
]
|
||||
|
||||
|
||||
cfg.CONF.register_opts(ovs_opts, "OVS")
|
||||
cfg.CONF.register_opts(agent_opts, "AGENT")
|
||||
config.register_agent_state_opts_helper(cfg.CONF)
|
||||
config.register_root_helper(cfg.CONF)
|
@@ -1,73 +0,0 @@
|
||||
# Copyright (c) 2012 OpenStack Foundation.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from neutron.plugins.common import constants as p_const
|
||||
|
||||
|
||||
# Special vlan_id value in ovs_vlan_allocations table indicating flat network
|
||||
FLAT_VLAN_ID = -1
|
||||
|
||||
# Topic for tunnel notifications between the plugin and agent
|
||||
TUNNEL = 'tunnel'
|
||||
|
||||
# Name prefixes for veth device or patch port pair linking the integration
|
||||
# bridge with the physical bridge for a physical network
|
||||
PEER_INTEGRATION_PREFIX = 'int-'
|
||||
PEER_PHYSICAL_PREFIX = 'phy-'
|
||||
|
||||
# Nonexistent peer used to create patch ports without associating them, it
|
||||
# allows to define flows before association
|
||||
NONEXISTENT_PEER = 'nonexistent-peer'
|
||||
|
||||
# The different types of tunnels
|
||||
TUNNEL_NETWORK_TYPES = [p_const.TYPE_GRE, p_const.TYPE_VXLAN]
|
||||
|
||||
# Various tables for DVR use of integration bridge flows
|
||||
LOCAL_SWITCHING = 0
|
||||
DVR_TO_SRC_MAC = 1
|
||||
|
||||
# Various tables for tunneling flows
|
||||
DVR_PROCESS = 1
|
||||
PATCH_LV_TO_TUN = 2
|
||||
GRE_TUN_TO_LV = 3
|
||||
VXLAN_TUN_TO_LV = 4
|
||||
DVR_NOT_LEARN = 9
|
||||
LEARN_FROM_TUN = 10
|
||||
UCAST_TO_TUN = 20
|
||||
ARP_RESPONDER = 21
|
||||
FLOOD_TO_TUN = 22
|
||||
|
||||
# Tables for integration bridge
|
||||
# Table 0 is used for forwarding.
|
||||
CANARY_TABLE = 23
|
||||
|
||||
# Map tunnel types to tables number
|
||||
TUN_TABLE = {p_const.TYPE_GRE: GRE_TUN_TO_LV,
|
||||
p_const.TYPE_VXLAN: VXLAN_TUN_TO_LV}
|
||||
|
||||
# The default respawn interval for the ovsdb monitor
|
||||
DEFAULT_OVSDBMON_RESPAWN = 30
|
||||
|
||||
# Represent invalid OF Port
|
||||
OFPORT_INVALID = -1
|
||||
|
||||
ARP_RESPONDER_ACTIONS = ('move:NXM_OF_ETH_SRC[]->NXM_OF_ETH_DST[],'
|
||||
'mod_dl_src:%(mac)s,'
|
||||
'load:0x2->NXM_OF_ARP_OP[],'
|
||||
'move:NXM_NX_ARP_SHA[]->NXM_NX_ARP_THA[],'
|
||||
'move:NXM_OF_ARP_SPA[]->NXM_OF_ARP_TPA[],'
|
||||
'load:%(mac)#x->NXM_NX_ARP_SHA[],'
|
||||
'load:%(ip)#x->NXM_OF_ARP_SPA[],'
|
||||
'in_port')
|
Reference in New Issue
Block a user