Initial commit: Introducing AWS Drivers for OpenStack
This commit is contained in:
commit
fa89beafab
2
.gitignore
vendored
Normal file
2
.gitignore
vendored
Normal file
@ -0,0 +1,2 @@
|
||||
*.pyc
|
||||
*~
|
16
README.md
Normal file
16
README.md
Normal file
@ -0,0 +1,16 @@
|
||||
This repository contains Openstack drivers for Amazon EC2.
|
||||
These drivers provide the capability to spin up Openstack instances, images, volumes and networks on Amazon EC2.
|
||||
Following Openstack projects are supported
|
||||
* Nova
|
||||
* Neutron
|
||||
* Cinder
|
||||
* Glance
|
||||
|
||||
## Setup
|
||||
The setup instructions are project specific. Check the project directories for specifics.
|
||||
|
||||
## Status
|
||||
Development is active. Can be used for individual testing.
|
||||
|
||||
## Contributions
|
||||
TODO
|
12
cinder/README.md
Normal file
12
cinder/README.md
Normal file
@ -0,0 +1,12 @@
|
||||
## Setup
|
||||
|
||||
### Prerequesites
|
||||
1. Working green field OpenStack deployment.
|
||||
2. No prior cinder nodes. This service does not work if cinder backends are already configured.
|
||||
|
||||
|
||||
### Instructions
|
||||
1. Copy source directory (cinder/volume/drivers/aws) to cinder-volume module directory {cinder-volume-root}/cinder/volume/drivers
|
||||
2. Update configuration file (cinder.conf) used by cinder-volume service. Set it to
|
||||
volume_driver=cinder.volume.drivers.aws.ebs.EBSDriver
|
||||
3. Restart cinder-volume service.
|
117
cinder/tests/unit/volume/drivers/test_ebs.py
Normal file
117
cinder/tests/unit/volume/drivers/test_ebs.py
Normal file
@ -0,0 +1,117 @@
|
||||
"""
|
||||
Copyright 2016 Platform9 Systems Inc.(http://www.platform9.com)
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
"""
|
||||
import mock
|
||||
|
||||
from oslo_service import loopingcall
|
||||
from cinder import context
|
||||
from cinder import test
|
||||
from cinder.exception import APITimeout, NotFound, VolumeNotFound
|
||||
from cinder.volume.drivers.aws import ebs
|
||||
from moto import mock_ec2
|
||||
|
||||
class EBSVolumeTestCase(test.TestCase):
|
||||
|
||||
@mock_ec2
|
||||
def setUp(self):
|
||||
super(EBSVolumeTestCase, self).setUp()
|
||||
ebs.CONF.AWS.region_name = 'us-east-1'
|
||||
ebs.CONF.AWS.access_key = 'fake-key'
|
||||
ebs.CONF.AWS.secret_key = 'fake-secret'
|
||||
ebs.CONF.AWS.az = 'us-east-1a'
|
||||
self._driver = ebs.EBSDriver()
|
||||
ctxt = context.get_admin_context()
|
||||
self._driver.do_setup(ctxt)
|
||||
|
||||
def _stub_volume(self, **kwargs):
|
||||
uuid = u'c20aba21-6ef6-446b-b374-45733b4883ba'
|
||||
name = u'volume-00000001'
|
||||
size = 1
|
||||
created_at = '2016-10-19 23:22:33'
|
||||
volume = dict()
|
||||
volume['id'] = kwargs.get('id', uuid)
|
||||
volume['display_name'] = kwargs.get('display_name', name)
|
||||
volume['size'] = kwargs.get('size', size)
|
||||
volume['provider_location'] = kwargs.get('provider_location', None)
|
||||
volume['volume_type_id'] = kwargs.get('volume_type_id', None)
|
||||
volume['project_id'] = kwargs.get('project_id', 'aws_proj_700')
|
||||
volume['created_at'] = kwargs.get('create_at', created_at)
|
||||
return volume
|
||||
|
||||
def _stub_snapshot(self, **kwargs):
|
||||
uuid = u'0196f961-c294-4a2a-923e-01ef5e30c2c9'
|
||||
created_at = '2016-10-19 23:22:33'
|
||||
ss = dict()
|
||||
|
||||
ss['id'] = kwargs.get('id', uuid)
|
||||
ss['project_id'] = kwargs.get('project_id', 'aws_proj_700')
|
||||
ss['created_at'] = kwargs.get('create_at', created_at)
|
||||
ss['volume'] = kwargs.get('volume', self._stub_volume())
|
||||
|
||||
return ss
|
||||
|
||||
@mock_ec2
|
||||
def test_volume_create_success(self):
|
||||
self.assertIsNone(self._driver.create_volume(self._stub_volume()))
|
||||
|
||||
@mock_ec2
|
||||
@mock.patch('cinder.volume.drivers.aws.ebs.EBSDriver._wait_for_create')
|
||||
def test_volume_create_fails(self, mock_wait):
|
||||
def wait(*args):
|
||||
def _wait():
|
||||
raise loopingcall.LoopingCallDone(False)
|
||||
timer = loopingcall.FixedIntervalLoopingCall(_wait)
|
||||
return timer.start(interval=1).wait()
|
||||
|
||||
mock_wait.side_effect = wait
|
||||
self.assertRaises(APITimeout, self._driver.create_volume, self._stub_volume())
|
||||
|
||||
@mock_ec2
|
||||
def test_volume_deletion(self):
|
||||
vol = self._stub_volume()
|
||||
self._driver.create_volume(vol)
|
||||
self.assertIsNone(self._driver.delete_volume(vol))
|
||||
|
||||
@mock_ec2
|
||||
@mock.patch('cinder.volume.drivers.aws.ebs.EBSDriver._find')
|
||||
def test_volume_deletion_not_found(self, mock_find):
|
||||
vol = self._stub_volume()
|
||||
mock_find.side_effect = NotFound
|
||||
self.assertIsNone(self._driver.delete_volume(vol))
|
||||
|
||||
@mock_ec2
|
||||
def test_snapshot(self):
|
||||
vol = self._stub_volume()
|
||||
snapshot = self._stub_snapshot()
|
||||
self._driver.create_volume(vol)
|
||||
self.assertIsNone(self._driver.create_snapshot(snapshot))
|
||||
|
||||
@mock_ec2
|
||||
@mock.patch('cinder.volume.drivers.aws.ebs.EBSDriver._find')
|
||||
def test_snapshot_volume_not_found(self, mock_find):
|
||||
mock_find.side_effect = NotFound
|
||||
ss = self._stub_snapshot()
|
||||
self.assertRaises(VolumeNotFound, self._driver.create_snapshot, ss)
|
||||
|
||||
@mock_ec2
|
||||
@mock.patch('cinder.volume.drivers.aws.ebs.EBSDriver._wait_for_snapshot')
|
||||
def test_snapshot_create_fails(self, mock_wait):
|
||||
def wait(*args):
|
||||
def _wait():
|
||||
raise loopingcall.LoopingCallDone(False)
|
||||
|
||||
timer = loopingcall.FixedIntervalLoopingCall(_wait)
|
||||
return timer.start(interval=1).wait()
|
||||
mock_wait.side_effect = wait
|
||||
ss = self._stub_snapshot()
|
||||
self._driver.create_volume(ss['volume'])
|
||||
self.assertRaises(APITimeout, self._driver.create_snapshot, ss)
|
0
cinder/volume/drivers/aws/__init__.py
Normal file
0
cinder/volume/drivers/aws/__init__.py
Normal file
214
cinder/volume/drivers/aws/ebs.py
Normal file
214
cinder/volume/drivers/aws/ebs.py
Normal file
@ -0,0 +1,214 @@
|
||||
"""
|
||||
Copyright 2016 Platform9 Systems Inc.(http://www.platform9.com)
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
"""
|
||||
import time
|
||||
|
||||
from boto import ec2
|
||||
from boto.regioninfo import RegionInfo
|
||||
from oslo_service import loopingcall
|
||||
from oslo_log import log as logging
|
||||
from oslo_config import cfg
|
||||
|
||||
from cinder.i18n import _LE
|
||||
from cinder.exception import VolumeNotFound, NotFound, APITimeout, InvalidConfigurationValue
|
||||
from cinder.volume.driver import BaseVD
|
||||
|
||||
|
||||
aws_group = cfg.OptGroup(name='AWS', title='Options to connect to an AWS environment')
|
||||
aws_opts = [
|
||||
cfg.StrOpt('secret_key', help='Secret key of AWS account', secret=True),
|
||||
cfg.StrOpt('access_key', help='Access key of AWS account', secret=True),
|
||||
cfg.StrOpt('region_name', help='AWS region'),
|
||||
cfg.StrOpt('az', help='AWS availability zone'),
|
||||
cfg.IntOpt('wait_time_min', help='Maximum wait time for AWS operations', default=5)
|
||||
]
|
||||
|
||||
ebs_opts = [
|
||||
cfg.StrOpt('ebs_pool_name', help='Storage pool name'),
|
||||
cfg.IntOpt('ebs_free_capacity_gb', help='Free space available on EBS storage pool',
|
||||
default=1024),
|
||||
cfg.IntOpt('ebs_total_capacity_gb', help='Total space available on EBS storage pool',
|
||||
default=1024)
|
||||
]
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.register_group(aws_group)
|
||||
CONF.register_opts(aws_opts, group=aws_group)
|
||||
CONF.register_opts(ebs_opts)
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class EBSDriver(BaseVD):
|
||||
"""
|
||||
Implements cinder volume interface with EBS as storage backend.
|
||||
"""
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(EBSDriver, self).__init__(*args, **kwargs)
|
||||
self.VERSION = '1.0.0'
|
||||
self._wait_time_sec = 60 * (CONF.AWS.wait_time_min)
|
||||
|
||||
self._check_config()
|
||||
region_name = CONF.AWS.region_name
|
||||
endpoint = '.'.join(['ec2', region_name, 'amazonaws.com'])
|
||||
region = RegionInfo(name=region_name, endpoint=endpoint)
|
||||
self._conn = ec2.EC2Connection(aws_access_key_id=CONF.AWS.access_key,
|
||||
aws_secret_access_key=CONF.AWS.secret_key,
|
||||
region=region)
|
||||
# resort to first AZ for now. TODO: expose this through API
|
||||
az = CONF.AWS.az
|
||||
self._zone = filter(lambda z: z.name == az,
|
||||
self._conn.get_all_zones())[0]
|
||||
self.set_initialized()
|
||||
|
||||
def _check_config(self):
|
||||
tbl = dict([(n, eval(n)) for n in ['CONF.AWS.access_key',
|
||||
'CONF.AWS.secret_key',
|
||||
'CONF.AWS.region_name',
|
||||
'CONF.AWS.az']])
|
||||
for k, v in tbl.iteritems():
|
||||
if v is None:
|
||||
raise InvalidConfigurationValue(value=None, option=k)
|
||||
|
||||
def do_setup(self, context):
|
||||
pass
|
||||
|
||||
def _wait_for_create(self, id, final_state):
|
||||
def _wait_for_status(start_time):
|
||||
current_time = time.time()
|
||||
|
||||
if current_time - start_time > self._wait_time_sec:
|
||||
raise loopingcall.LoopingCallDone(False)
|
||||
|
||||
obj = self._conn.get_all_volumes([id])[0]
|
||||
if obj.status == final_state:
|
||||
raise loopingcall.LoopingCallDone(True)
|
||||
|
||||
timer = loopingcall.FixedIntervalLoopingCall(_wait_for_status, time.time())
|
||||
return timer.start(interval=5).wait()
|
||||
|
||||
def _wait_for_snapshot(self, id, final_state):
|
||||
def _wait_for_status(start_time):
|
||||
|
||||
if time.time() - start_time > self._wait_time_sec:
|
||||
raise loopingcall.LoopingCallDone(False)
|
||||
|
||||
obj = self._conn.get_all_snapshots([id])[0]
|
||||
if obj.status == final_state:
|
||||
raise loopingcall.LoopingCallDone(True)
|
||||
|
||||
timer = loopingcall.FixedIntervalLoopingCall(_wait_for_status, time.time())
|
||||
return timer.start(interval=5).wait()
|
||||
|
||||
def create_volume(self, volume):
|
||||
size = volume['size']
|
||||
ebs_vol = self._conn.create_volume(size, self._zone)
|
||||
if self._wait_for_create(ebs_vol.id, 'available') is False:
|
||||
raise APITimeout(service='EC2')
|
||||
self._conn.create_tags([ebs_vol.id], {'project_id': volume['project_id'],
|
||||
'uuid': volume['id'],
|
||||
'is_clone': False,
|
||||
'created_at': volume['created_at']})
|
||||
|
||||
def _find(self, obj_id, find_func):
|
||||
ebs_objs = find_func(filters={'tag:uuid': obj_id})
|
||||
if len(ebs_objs) == 0:
|
||||
raise NotFound()
|
||||
ebs_obj = ebs_objs[0]
|
||||
return ebs_obj
|
||||
|
||||
def delete_volume(self, volume):
|
||||
try:
|
||||
ebs_vol = self._find(volume['id'], self._conn.get_all_volumes)
|
||||
except NotFound:
|
||||
LOG.error(_LE('Volume %s was not found'), volume['id'])
|
||||
return
|
||||
self._conn.delete_volume(ebs_vol.id)
|
||||
|
||||
def check_for_setup_error(self):
|
||||
# TODO throw errors if AWS config is broken
|
||||
pass
|
||||
|
||||
def create_export(self, context, volume, connector):
|
||||
pass
|
||||
|
||||
def ensure_export(self, context, volume):
|
||||
pass
|
||||
|
||||
def remove_export(self, context, volume):
|
||||
pass
|
||||
|
||||
def initialize_connection(self, volume, connector, initiator_data=None):
|
||||
try:
|
||||
ebs_vol = self._find(volume.id, self._conn.get_all_volumes)
|
||||
except NotFound:
|
||||
raise VolumeNotFound(volume_id=volume.id)
|
||||
conn_info = dict(data=dict(volume_id=ebs_vol.id))
|
||||
return conn_info
|
||||
|
||||
def terminate_connection(self, volume, connector, **kwargs):
|
||||
pass
|
||||
|
||||
def _update_volume_stats(self):
|
||||
data = dict()
|
||||
data['volume_backend_name'] = 'ebs'
|
||||
data['vendor_name'] = 'Amazon, Inc.'
|
||||
data['driver_version'] = '0.1'
|
||||
data['storage_protocol'] = 'iscsi'
|
||||
pool = dict(pool_name='ebs',
|
||||
free_capacity_gb=CONF.ebs_free_capacity_gb,
|
||||
total_capacity_gb=CONF.ebs_total_capacity_gb,
|
||||
provisioned_capacity_gb=0,
|
||||
reserved_percentage=0,
|
||||
location_info=dict(),
|
||||
QoS_support=False,
|
||||
max_over_subscription_ratio=1.0,
|
||||
thin_provisioning_support=False,
|
||||
thick_provisioning_support=True,
|
||||
total_volumes=0)
|
||||
data['pools'] = [pool]
|
||||
self._stats = data
|
||||
|
||||
def get_volume_stats(self, refresh=False):
|
||||
if refresh is True:
|
||||
self._update_volume_stats()
|
||||
return self._stats
|
||||
|
||||
def create_snapshot(self, snapshot):
|
||||
os_vol = snapshot['volume']
|
||||
try:
|
||||
ebs_vol = self._find(os_vol['id'], self._conn.get_all_volumes)
|
||||
except NotFound:
|
||||
raise VolumeNotFound(os_vol['id'])
|
||||
|
||||
ebs_snap = self._conn.create_snapshot(ebs_vol.id)
|
||||
if self._wait_for_snapshot(ebs_snap.id, 'completed') is False:
|
||||
raise APITimeout(service='EC2')
|
||||
|
||||
self._conn.create_tags([ebs_snap.id], {'project_id': snapshot['project_id'],
|
||||
'uuid': snapshot['id'],
|
||||
'is_clone': True,
|
||||
'created_at': snapshot['created_at']})
|
||||
|
||||
def delete_snapshot(self, snapshot):
|
||||
try:
|
||||
ebs_ss = self._find(snapshot['id'], self._conn.get_all_snapshots)
|
||||
except NotFound:
|
||||
LOG.error(_LE('Snapshot %s was not found'), snapshot['id'])
|
||||
return
|
||||
self._conn.delete_snapshot(ebs_ss.id)
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
24
glance/README.md
Normal file
24
glance/README.md
Normal file
@ -0,0 +1,24 @@
|
||||
#Setup
|
||||
|
||||
##Prerequesites
|
||||
|
||||
* Working green field OpenStack deployment (code currently based out of stable/liberty)
|
||||
* The virtualenv used by glance should have Amazon boto package installed
|
||||
|
||||
## Components
|
||||
Glance store driver: Handles glance image endpoint for AWS AMIs
|
||||
|
||||
## Instructions
|
||||
1. Copy the glance_store/_drivers directory to <glance_store_root>/glance_store/_drivers
|
||||
2. Update the configuration file -- /etc/glance/glance-api.conf
|
||||
```
|
||||
[glance_store]
|
||||
default_store = aws
|
||||
stores = aws
|
||||
[AWS]
|
||||
secret_key = <your aws secret access key>
|
||||
access_key = <your aws access key>
|
||||
region_name = <was region to use>
|
||||
```
|
||||
3. Restart the glance-api service
|
||||
4. Populate AMI as glance image using helper script.
|
180
glance/create-glance-images.py
Normal file
180
glance/create-glance-images.py
Normal file
@ -0,0 +1,180 @@
|
||||
# Copyright (c) 2016 Platform9 Systems Inc. (http://www.platform9.com)
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
'''
|
||||
Run this script as: python create-glance-credentials.py <access-key> <secret-key> <region-name>
|
||||
'''
|
||||
|
||||
import boto3
|
||||
import ConfigParser
|
||||
import hashlib
|
||||
import keystoneauth1
|
||||
import os
|
||||
import requests
|
||||
import sys
|
||||
import uuid
|
||||
|
||||
from keystoneauth1 import session
|
||||
from keystoneauth1.identity import v3
|
||||
|
||||
class AwsImages(object):
|
||||
|
||||
def __init__(self, credentials):
|
||||
self.ec2_client = boto3.client('ec2', **credentials)
|
||||
self.glance_client = RestClient()
|
||||
self.aws_image_types = {'machine': 'ami', 'kernel': 'aki', 'ramdisk': 'ari'}
|
||||
|
||||
def register_aws_images(self):
|
||||
response = self.ec2_client.describe_images(Owners=['self'])
|
||||
images = response['Images']
|
||||
|
||||
for img in images:
|
||||
self.create_image(self._aws_to_ostack_formatter(img))
|
||||
|
||||
def create_image(self, img_data):
|
||||
"""
|
||||
Create an OpenStack image.
|
||||
:param img_data: dict -- Describes AWS AMI
|
||||
:returns: dict -- Response from REST call
|
||||
:raises: requests.HTTPError
|
||||
"""
|
||||
sys.stdout.write('Creating image: ' + str(img_data) + ' \n')
|
||||
glance_id = img_data['id']
|
||||
ami_id = img_data['aws_image_id']
|
||||
img_props = {
|
||||
'locations': [{'url': 'aws://%s/%s' % (ami_id, glance_id),
|
||||
'metadata': {'ami_id': ami_id}}]
|
||||
}
|
||||
try:
|
||||
resp = self.glance_client.request('POST', '/v2/images', json=img_data)
|
||||
resp.raise_for_status()
|
||||
# Need to update the image in the registry with location information so
|
||||
# the status changes from 'queued' to 'active'
|
||||
self.update_properties(glance_id, img_props)
|
||||
except keystoneauth1.exceptions.http.Conflict as e:
|
||||
# ignore error if image already exists
|
||||
pass
|
||||
except requests.HTTPError as e:
|
||||
raise e
|
||||
|
||||
def update_properties(self, imageid, props):
|
||||
"""
|
||||
Add or update a set of image properties on an image.
|
||||
:param imageid: int -- The Ostack image UUID
|
||||
:param props: dict -- Image properties to update
|
||||
"""
|
||||
if not props:
|
||||
return
|
||||
patch_body = []
|
||||
for name, value in props.iteritems():
|
||||
patch_body.append({
|
||||
'op': 'replace',
|
||||
'path': '/%s' % name,
|
||||
'value': value
|
||||
})
|
||||
resp = self.glance_client.request('PATCH', '/v2/images/%s' % imageid, json=patch_body)
|
||||
resp.raise_for_status()
|
||||
|
||||
def _get_image_uuid(self, ami_id):
|
||||
md = hashlib.md5()
|
||||
md.update(ami_id)
|
||||
return str(uuid.UUID(bytes=md.digest()))
|
||||
|
||||
def _aws_to_ostack_formatter(self, aws_obj):
|
||||
"""
|
||||
Converts aws img data to Openstack img data format.
|
||||
:param img(dict): aws img data
|
||||
:return(dict): ostack img data
|
||||
"""
|
||||
visibility = 'public' if aws_obj['Public'] is True else 'private'
|
||||
# Check number and size (if any) of EBS and instance-store volumes
|
||||
ebs_vol_sizes = []
|
||||
num_istore_vols = 0
|
||||
for bdm in aws_obj.get('BlockDeviceMappings'):
|
||||
if 'Ebs' in bdm:
|
||||
ebs_vol_sizes.append(bdm['Ebs']['VolumeSize'])
|
||||
elif 'VirtualName' in bdm and bdm['VirtualName'].startswith('ephemeral'):
|
||||
# for instance-store volumes, size is not available
|
||||
num_istore_vols += 1
|
||||
if aws_obj.get('RootDeviceType' == 'instance-store') and num_istore_vols == 0:
|
||||
# list of bdms can be empty for instance-store volumes
|
||||
num_istore_vols = 1
|
||||
# generate glance image uuid based on AWS image id
|
||||
image_id = self._get_image_uuid(aws_obj.get('ImageId'))
|
||||
|
||||
return {
|
||||
'id' : image_id,
|
||||
'name' : aws_obj.get('Name') or aws_obj.get('ImageId'),
|
||||
'container_format' : self.aws_image_types[aws_obj.get('ImageType')],
|
||||
'disk_format' : self.aws_image_types[aws_obj.get('ImageType')],
|
||||
'visibility' : visibility,
|
||||
'pf9_description' : aws_obj.get('Description') or 'Discovered image',
|
||||
'aws_image_id' : aws_obj.get('ImageId'),
|
||||
'aws_root_device_type': aws_obj.get('RootDeviceType'),
|
||||
'aws_ebs_vol_sizes' : str(ebs_vol_sizes),
|
||||
'aws_num_istore_vols' : str(num_istore_vols),
|
||||
}
|
||||
|
||||
|
||||
|
||||
class RestClient(object):
|
||||
def __init__(self):
|
||||
os_auth_url = os.getenv('OS_AUTH_URL')
|
||||
os_auth_url = os_auth_url.replace('v2.0', 'v3')
|
||||
if not os_auth_url.endswith('v3'):
|
||||
os_auth_url += '/v3'
|
||||
|
||||
os_username = os.getenv('OS_USERNAME')
|
||||
os_password = os.getenv('OS_PASSWORD')
|
||||
os_tenant_name = os.getenv('OS_TENANT_NAME')
|
||||
os_region_name = os.getenv('OS_REGION_NAME')
|
||||
|
||||
self.glance_endpoint = os_auth_url.replace('keystone/v3', 'glance')
|
||||
sys.stdout.write('Using glance endpoint: ' + self.glance_endpoint)
|
||||
|
||||
v3_auth = v3.Password(auth_url = os_auth_url, username = os_username,
|
||||
password = os_password, project_name = os_tenant_name,
|
||||
project_domain_name = 'default', user_domain_name = 'default')
|
||||
self.sess = session.Session(auth=v3_auth, verify=False) # verify=True
|
||||
|
||||
def request(self, method, path, **kwargs):
|
||||
"""
|
||||
Make a requests request with retry/relogin on auth failure.
|
||||
"""
|
||||
url = self.glance_endpoint + path
|
||||
headers = self.sess.get_auth_headers()
|
||||
if method == 'PUT' or method == 'PATCH':
|
||||
headers['Content-Type'] = 'application/openstack-images-v2.1-json-patch'
|
||||
resp = requests.request(method, url, headers=headers, **kwargs)
|
||||
else:
|
||||
resp = self.sess.request(url, method, headers=headers, **kwargs)
|
||||
resp.raise_for_status()
|
||||
return resp
|
||||
|
||||
|
||||
### MAIN ###
|
||||
|
||||
if __name__ == '__main__':
|
||||
if len(sys.argv) != 4:
|
||||
sys.stderr.write('Incorrect usage: this script takes exactly 3 arguments.\n')
|
||||
sys.exit(1)
|
||||
|
||||
credentials = {}
|
||||
credentials['aws_access_key_id'] = sys.argv[1]
|
||||
credentials['aws_secret_access_key'] = sys.argv[2]
|
||||
credentials['region_name'] = sys.argv[3]
|
||||
|
||||
aws_images = AwsImages(credentials)
|
||||
aws_images.register_aws_images()
|
||||
|
173
glance/glance_store/_drivers/aws.py
Normal file
173
glance/glance_store/_drivers/aws.py
Normal file
@ -0,0 +1,173 @@
|
||||
# Copyright (c) 2016 Platform9 Systems Inc. (http://www.platform9.com)
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import logging
|
||||
import socket
|
||||
|
||||
from six.moves import http_client
|
||||
from six.moves import urllib
|
||||
from oslo_config import cfg
|
||||
from ConfigParser import ConfigParser
|
||||
|
||||
from glance_store import capabilities
|
||||
from glance_store import exceptions
|
||||
from glance_store.i18n import _, _LE
|
||||
import glance_store.driver
|
||||
import glance_store.location
|
||||
|
||||
import boto3
|
||||
import botocore.exceptions
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
MAX_REDIRECTS = 5
|
||||
STORE_SCHEME = 'aws'
|
||||
|
||||
aws_opts_group = cfg.OptGroup(name='aws', title='AWS specific options')
|
||||
aws_opts = [
|
||||
cfg.StrOpt('access_key', help='AWS access key ID'),
|
||||
cfg.StrOpt('secret_key', help='AWS secret access key'),
|
||||
cfg.StrOpt('region_name', help='AWS region name'),
|
||||
]
|
||||
|
||||
class StoreLocation(glance_store.location.StoreLocation):
|
||||
|
||||
"""Class describing an AWS URI."""
|
||||
|
||||
def __init__(self, store_specs, conf):
|
||||
super(StoreLocation, self).__init__(store_specs, conf)
|
||||
|
||||
def process_specs(self):
|
||||
self.scheme = self.specs.get('scheme', STORE_SCHEME)
|
||||
self.ami_id = self.specs.get('ami_id')
|
||||
|
||||
def get_uri(self):
|
||||
return "{}://{}".format(self.scheme, self.ami_id)
|
||||
|
||||
def parse_uri(self, uri):
|
||||
"""
|
||||
Parse URLs. This method fixes an issue where credentials specified
|
||||
in the URL are interpreted differently in Python 2.6.1+ than prior
|
||||
versions of Python.
|
||||
"""
|
||||
if not uri.startswith('%s://' % STORE_SCHEME):
|
||||
reason = (_("URI %(uri)s must start with %(scheme)s://") %
|
||||
{'uri': uri, 'scheme': STORE_SCHEME})
|
||||
LOG.info(reason)
|
||||
raise exceptions.BadStoreUri(message=reason)
|
||||
pieces = urllib.parse.urlparse(uri)
|
||||
self.scheme = pieces.scheme
|
||||
ami_id = pieces.netloc
|
||||
if ami_id == '':
|
||||
LOG.info(_("No image ami_id specified in URL"))
|
||||
raise exceptions.BadStoreUri(uri=uri)
|
||||
self.ami_id = ami_id
|
||||
|
||||
|
||||
class Store(glance_store.driver.Store):
|
||||
|
||||
"""An implementation of the HTTP(S) Backend Adapter"""
|
||||
|
||||
_CAPABILITIES = (capabilities.BitMasks.RW_ACCESS |
|
||||
capabilities.BitMasks.DRIVER_REUSABLE)
|
||||
|
||||
def __init__(self, conf):
|
||||
super(Store, self).__init__(conf)
|
||||
conf.register_group(aws_opts_group)
|
||||
conf.register_opts(aws_opts, group = aws_opts_group)
|
||||
self.credentials = {}
|
||||
self.credentials['aws_access_key_id'] = conf.aws.access_key
|
||||
self.credentials['aws_secret_access_key'] = conf.aws.secret_key
|
||||
self.credentials['region_name'] = conf.aws.region_name
|
||||
self.__ec2_client = None
|
||||
self.__ec2_resource = None
|
||||
|
||||
def _get_ec2_client(self):
|
||||
if self.__ec2_client is None:
|
||||
self.__ec2_client = boto3.client('ec2', **self.credentials)
|
||||
return self.__ec2_client
|
||||
|
||||
def _get_ec2_resource(self):
|
||||
if self.__ec2_resource is None:
|
||||
self.__ec2_resource = boto3.resource('ec2', **self.credentials)
|
||||
return self.__ec2_resource
|
||||
|
||||
|
||||
@capabilities.check
|
||||
def get(self, location, offset=0, chunk_size=None, context=None):
|
||||
"""
|
||||
Takes a `glance_store.location.Location` object that indicates
|
||||
where to find the image file, and returns a tuple of generator
|
||||
(for reading the image file) and image_size
|
||||
|
||||
:param location `glance_store.location.Location` object, supplied
|
||||
from glance_store.location.get_location_from_uri()
|
||||
"""
|
||||
yield ('aws://generic', self.get_size(location, context))
|
||||
|
||||
@capabilities.check
|
||||
def delete(self, location, context=None):
|
||||
"""Takes a `glance_store.location.Location` object that indicates
|
||||
where to find the image file to delete
|
||||
|
||||
:param location: `glance_store.location.Location` object, supplied
|
||||
from glance_store.location.get_location_from_uri()
|
||||
:raises NotFound if image does not exist
|
||||
"""
|
||||
ami_id = location.get_store_uri().split('/')[2]
|
||||
aws_client = self._get_ec2_client()
|
||||
aws_imgs = aws_client.describe_images(Owners=['self'])['Images']
|
||||
for img in aws_imgs:
|
||||
if ami_id == img.get('ImageId'):
|
||||
LOG.warn('**** ID of ami being deleted: {}'.format(ami_id))
|
||||
aws_client.deregister_image(ImageId=ami_id)
|
||||
|
||||
|
||||
def get_schemes(self):
|
||||
"""
|
||||
:retval tuple: containing valid scheme names to
|
||||
associate with this store driver
|
||||
"""
|
||||
return ('aws',)
|
||||
|
||||
|
||||
def get_size(self, location, context=None):
|
||||
"""
|
||||
Takes a `glance_store.location.Location` object that indicates
|
||||
where to find the image file, and returns the size
|
||||
|
||||
:param location `glance_store.location.Location` object, supplied
|
||||
from glance_store.location.get_location_from_uri()
|
||||
:retval int: size of image file in bytes
|
||||
"""
|
||||
ami_id = location.get_store_uri().split('/')[2]
|
||||
ec2_resource = self._get_ec2_resource()
|
||||
image = ec2_resource.Image(ami_id)
|
||||
size = 0
|
||||
try:
|
||||
image.load()
|
||||
# no size info for instance-store volumes, so return 0 in that case
|
||||
if image.root_device_type == 'ebs':
|
||||
for bdm in image.block_device_mappings:
|
||||
if 'Ebs' in bdm and 'VolumeSize' in bdm['Ebs']:
|
||||
LOG.debug('ebs info: %s', bdm['Ebs'])
|
||||
size += bdm['Ebs']['VolumeSize']
|
||||
# convert size in gb to bytes
|
||||
size *= 1073741824
|
||||
except botocore.exceptions.ClientError as ce:
|
||||
if ce.response['Error']['Code'] == 'InvalidAMIID.NotFound':
|
||||
raise exceptions.ImageDataNotFound()
|
||||
else:
|
||||
raise exceptions.GlanceStoreException(ce.response['Error']['Code'])
|
||||
return size
|
102
neutron/README.md
Normal file
102
neutron/README.md
Normal file
@ -0,0 +1,102 @@
|
||||
## Setup
|
||||
|
||||
### Prerequesites
|
||||
1. Working green field OpenStack deployment (code currently based out of stable/liberty)
|
||||
2. No prior neutron agents. This service does not work if neutron l3 agent and ml2 drivers are already configured.
|
||||
|
||||
#### Components
|
||||
- ML2 core plugin with AWS Mechanism Driver handles **Networks, Subnets, Ports** and **Security Groups**.
|
||||
- AWS Router Service Plugin handles **Routers, Router Interfaces, Floating IPs**.
|
||||
|
||||
### Instructions
|
||||
1. Copy files from this repo into your neutron source tree:
|
||||
1. directory neutron/plugins/ml2/drivers/aws to neutron module directory {neutron-root}/neutron/plugins/ml2/drivers/
|
||||
2. neutron/services/l3_router/aws_router_plugin.py to {neutron-root}/neutron/services/l3_router/
|
||||
3. requirements.txt and setup.cfg to your {neutron-root}
|
||||
4. the three files under neutron/common to {neutron-root}/neutron/common/
|
||||
5. neutron/plugins/ml2/managers.py to {neutron-root}/neutron/plugins/ml2/
|
||||
|
||||
2. Update configuration files
|
||||
1. /etc/neutron/**neutron.conf** Set the following config options:
|
||||
```
|
||||
|
||||
[DEFAULT]
|
||||
service_plugins = aws_router
|
||||
core_plugin = ml2
|
||||
|
||||
[credentials]
|
||||
aws_access_key_id = <your aws access key>
|
||||
aws_secret_access_key = <your aws secret access key>
|
||||
region_name = <the region you would like to use>
|
||||
```
|
||||
|
||||
2. /etc/neutron/plugins/ml2/**ml2_conf.ini**
|
||||
|
||||
```
|
||||
|
||||
[ml2]
|
||||
type_drivers = local,flat
|
||||
tenant_network_types = local
|
||||
mechanism_drivers = aws
|
||||
|
||||
[ml2_type_flat]
|
||||
flat_networks = *
|
||||
```
|
||||
|
||||
3. Restart neutron server service
|
||||
> service openstack-neutron-server restart
|
||||
|
||||
### Creating network objects in AWS
|
||||
|
||||
**Neutron to AWS object mapping**
|
||||
|
||||
| Neutron | AWS |
|
||||
| --------------- | ---------------- |
|
||||
| Network | None |
|
||||
| Subnet | VPC + subnet |
|
||||
| Router | Internet Gateway (IG) |
|
||||
| Ports | None |
|
||||
|
||||
**Operations**
|
||||
|
||||
| Neutron | AWS |
|
||||
| --------------- | ---------------- |
|
||||
| Create Network | None |
|
||||
| Create Subnet with < /16 | Create VPC with /16 + subnet with given CIDR |
|
||||
| Create router | Create Internet Gateway |
|
||||
| Attach gateway | None |
|
||||
| Attach interface to router | Add VPC to the IG |
|
||||
| Create Floating IP | Create Elastic IP |
|
||||
| Associate FIP to an instance | Associate Elastic IP to instance |
|
||||
| Delete FIP | Delete elastic IP |
|
||||
| Delete subnet | Delete subnet within the VPC. Delete VPC if last subnet |
|
||||
| Delete Network | None |
|
||||
| Delete router interface | Remove VPC from IG |
|
||||
| Delete router | Remove IG |
|
||||
|
||||
#### Notes
|
||||
|
||||
**Networks**
|
||||
|
||||
1. Only supports tenant network creation. Tenant network subnet should be a CIDR smaller than /16. **create network** assumes /16 CIDR based on the first subnet that will be created in that network/vpc. This is needed as VPC in AWS needs a CIDR.
|
||||
- Example - 1 network (Paul) with 0 subnets in Openstack. AWS will have 0 VPCs and 0 subnets. Upon creating a subnet (Blart) under the Paul network with a CIDR of 12.12.1.5/26, the VPC on AWS will be created with CIDR 12.12.1.5/16, then a subnet under that VPC will be created with the actual CIDR (which is a subset of the VPC CIDR). The reason /16 is chosen for the CIDR of the VPC is because this is the largest mask allowed by AWS.
|
||||
2. Subsequent subnets can be created within the same network (VPC)
|
||||
3. When creating a subnet, set the allocation pool to x.x.x.4 - x.x.x.254 This is needed as AWS has reserved IPs from .0 till .3. http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_Subnets.html
|
||||
4. For Floating IPs to work, an External Network with subnet like CIDR 52.0.0.0/8 which covers all the IPs that AWS provides needs to be created. If AWS assigns an elastic IP from within 54.0.0.0/8, the external subnet should reflect that.
|
||||
|
||||
###### Limitations
|
||||
|
||||
1. Provider networks not supported today.
|
||||
2. External network subnet needs to be created manually depending on elastic IPs given by AWS.
|
||||
3. IPv6 not supported by AWS VPC (https://aws.amazon.com/vpc/faqs/).
|
||||
|
||||
**Routers**
|
||||
|
||||
1. Creating a Router will create an Internet Gateway on AWS without associating to any VPC.
|
||||
2. Add an Interface to Router connecting to any subnet will associate the VPC to that IG and also adds a default route of 0.0.0.0/0 to the IG.
|
||||
|
||||
###### Limitations
|
||||
|
||||
1. Adding an Interface with a subnet of another network will not work as an IG can be associated with a single VPC.
|
||||
2. Adding an Interface with a subnet of same network will not work because Internet Gateway gets associated with VPC and not Subnets.
|
||||
|
337
neutron/neutron/common/aws_utils.py
Normal file
337
neutron/neutron/common/aws_utils.py
Normal file
@ -0,0 +1,337 @@
|
||||
# Copyright 2016 Platform9 Systems Inc.(http://www.platform9.com)
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from ConfigParser import ConfigParser
|
||||
import boto3
|
||||
from novaclient.v2 import client as novaclient
|
||||
from oslo_log import log as logging
|
||||
from neutron.common import exceptions
|
||||
import botocore
|
||||
from oslo_config import cfg
|
||||
|
||||
aws_group = cfg.OptGroup(name='AWS', title='Options to connect to an AWS environment')
|
||||
aws_opts = [
|
||||
cfg.StrOpt('secret_key', help='Secret key of AWS account', secret=True),
|
||||
cfg.StrOpt('access_key', help='Access key of AWS account', secret=True),
|
||||
cfg.StrOpt('region_name', help='AWS region'),
|
||||
cfg.StrOpt('az', help='AWS availability zone'),
|
||||
cfg.IntOpt('wait_time_min', help='Maximum wait time for AWS operations', default=5)
|
||||
]
|
||||
|
||||
cfg.CONF.register_group(aws_group)
|
||||
cfg.CONF.register_opts(aws_opts, group=aws_group)
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
def _process_exception(e, dry_run):
|
||||
if dry_run:
|
||||
error_code = e.response['Code']
|
||||
if not error_code == 'DryRunOperation':
|
||||
raise exceptions.AwsException(error_code='AuthFailure',
|
||||
message='Check your AWS authorization')
|
||||
else:
|
||||
if isinstance(e, botocore.exceptions.ClientError):
|
||||
error_code = e.response['Error']['Code']
|
||||
error_message = e.response['Error']['Message']
|
||||
raise exceptions.AwsException(error_code=error_code,
|
||||
message=error_message)
|
||||
else:
|
||||
# TODO: This might display all Exceptions to the user which
|
||||
# might be irrelevant, keeping it until it becomes stable
|
||||
error_message = e.message
|
||||
raise exceptions.AwsException(error_code="NeutronError",
|
||||
message=error_message)
|
||||
|
||||
def aws_exception(fn):
|
||||
def wrapper(*args, **kwargs):
|
||||
try:
|
||||
return fn(*args, **kwargs)
|
||||
except Exception as e:
|
||||
_process_exception(e, kwargs.get('dry_run'))
|
||||
return wrapper
|
||||
|
||||
|
||||
class AwsUtils:
|
||||
|
||||
def __init__(self):
|
||||
self.__ec2_client = None
|
||||
self.__ec2_resource = None
|
||||
self._nova_client = None
|
||||
self._neutron_credentials = {
|
||||
'secret_key': cfg.CONF.AWS.secret_key,
|
||||
'access_key': cfg.CONF.AWS.access_key,
|
||||
'region_name': cfg.CONF.AWS.region_name,
|
||||
'az': cfg.CONF.AWS.az,
|
||||
'wait_time_min': cfg.CONF.AWS.wait_time_min
|
||||
}
|
||||
|
||||
|
||||
def get_nova_client(self):
|
||||
if self._nova_client is None:
|
||||
self._nova_client = novaclient.Client(username=cfg.CONF.nova_admin_username,
|
||||
api_key=cfg.CONF.nova_admin_password, auth_url=cfg.CONF.nova_admin_auth_url,
|
||||
tenant_id=cfg.CONF.nova_admin_tenant_id,
|
||||
region_name=cfg.CONF.nova_region_name, insecure=True)
|
||||
return self._nova_client
|
||||
|
||||
def _get_ec2_client(self):
|
||||
if self.__ec2_client is None:
|
||||
self.__ec2_client = boto3.client('ec2', **self._neutron_credentials)
|
||||
return self.__ec2_client
|
||||
|
||||
def _get_ec2_resource(self):
|
||||
if self.__ec2_resource is None:
|
||||
self.__ec2_resource = boto3.resource('ec2', **self._neutron_credentials)
|
||||
return self.__ec2_resource
|
||||
|
||||
# Internet Gateway Operations
|
||||
@aws_exception
|
||||
def get_internet_gw_from_router_id(self, router_id, dry_run=False):
|
||||
response = self._get_ec2_client().describe_internet_gateways(
|
||||
DryRun=dry_run,
|
||||
Filters=[
|
||||
{
|
||||
'Name': 'tag-value',
|
||||
'Values': [router_id]
|
||||
},
|
||||
]
|
||||
)
|
||||
if 'InternetGateways' in response:
|
||||
for internet_gateway in response['InternetGateways']:
|
||||
if 'InternetGatewayId' in internet_gateway:
|
||||
return internet_gateway['InternetGatewayId']
|
||||
|
||||
@aws_exception
|
||||
def create_tags_internet_gw_from_router_id(self, router_id, tags_list, dry_run=False):
|
||||
ig_id = self.get_internet_gw_from_router_id(router_id, dry_run)
|
||||
internet_gw_res = self._get_ec2_resource().InternetGateway(ig_id)
|
||||
internet_gw_res.create_tags(Tags=tags_list)
|
||||
|
||||
@aws_exception
|
||||
def delete_internet_gateway_by_router_id(self, router_id, dry_run=False):
|
||||
ig_id = self.get_internet_gw_from_router_id(router_id, dry_run)
|
||||
self._get_ec2_client().delete_internet_gateway(
|
||||
DryRun=dry_run,
|
||||
InternetGatewayId=ig_id
|
||||
)
|
||||
|
||||
@aws_exception
|
||||
def attach_internet_gateway(self, ig_id, vpc_id, dry_run=False):
|
||||
return self._get_ec2_client().attach_internet_gateway(
|
||||
DryRun=dry_run,
|
||||
InternetGatewayId=ig_id,
|
||||
VpcId=vpc_id
|
||||
)
|
||||
|
||||
@aws_exception
|
||||
def detach_internet_gateway_by_router_id(self, router_id, dry_run=False):
|
||||
ig_id = self.get_internet_gw_from_router_id(router_id)
|
||||
ig_res = self._get_ec2_resource().InternetGateway(ig_id)
|
||||
if len(ig_res.attachments) > 0:
|
||||
vpc_id = ig_res.attachments[0]['VpcId']
|
||||
self._get_ec2_client().detach_internet_gateway(
|
||||
DryRun=dry_run,
|
||||
InternetGatewayId=ig_id,
|
||||
VpcId=vpc_id
|
||||
)
|
||||
|
||||
@aws_exception
|
||||
def create_internet_gateway(self, dry_run=False):
|
||||
return self._get_ec2_client().create_internet_gateway(DryRun=dry_run)
|
||||
|
||||
@aws_exception
|
||||
def create_internet_gateway_resource(self, dry_run=False):
|
||||
internet_gw = self._get_ec2_client().create_internet_gateway(DryRun=dry_run)
|
||||
ig_id = internet_gw['InternetGateway']['InternetGatewayId']
|
||||
return self._get_ec2_resource().InternetGateway(ig_id)
|
||||
|
||||
# Elastic IP Operations
|
||||
@aws_exception
|
||||
def get_elastic_addresses_by_elastic_ip(self, elastic_ip, dry_run=False):
|
||||
eip_addresses = self._get_ec2_client().describe_addresses(
|
||||
DryRun=dry_run,
|
||||
PublicIps=[elastic_ip])
|
||||
return eip_addresses['Addresses']
|
||||
|
||||
@aws_exception
|
||||
def associate_elastic_ip_to_ec2_instance(self, elastic_ip, ec2_instance_id, dry_run=False):
|
||||
allocation_id = None
|
||||
eid_addresses = self.get_elastic_addresses_by_elastic_ip(elastic_ip, dry_run)
|
||||
if len(eid_addresses) > 0:
|
||||
if 'AllocationId' in eid_addresses[0]:
|
||||
allocation_id = eid_addresses[0]['AllocationId']
|
||||
if allocation_id is None:
|
||||
raise exceptions.AwsException(error_code="Allocation ID",
|
||||
message="Allocation ID not found")
|
||||
return self._get_ec2_client().associate_address(
|
||||
DryRun=dry_run,
|
||||
InstanceId=ec2_instance_id,
|
||||
AllocationId=allocation_id
|
||||
)
|
||||
|
||||
@aws_exception
|
||||
def allocate_elastic_ip(self, dry_run=False):
|
||||
response = self._get_ec2_client().allocate_address(
|
||||
DryRun=dry_run,
|
||||
Domain='vpc'
|
||||
)
|
||||
return response
|
||||
|
||||
@aws_exception
|
||||
def disassociate_elastic_ip_from_ec2_instance(self, elastic_ip, dry_run=False):
|
||||
association_id = None
|
||||
eid_addresses = self.get_elastic_addresses_by_elastic_ip(elastic_ip, dry_run)
|
||||
if len(eid_addresses) > 0:
|
||||
if 'AssociationId' in eid_addresses[0]:
|
||||
association_id = eid_addresses[0]['AssociationId']
|
||||
if association_id is None:
|
||||
raise exceptions.AwsException(error_code="Association ID",
|
||||
message="Association ID not found")
|
||||
return self._get_ec2_client().disassociate_address(
|
||||
DryRun=dry_run,
|
||||
AssociationId=association_id
|
||||
)
|
||||
|
||||
@aws_exception
|
||||
def delete_elastic_ip(self, elastic_ip, dry_run=False):
|
||||
eid_addresses = self.get_elastic_addresses_by_elastic_ip(elastic_ip, dry_run)
|
||||
if len(eid_addresses) > 0:
|
||||
if 'AllocationId' in eid_addresses[0]:
|
||||
allocation_id = eid_addresses[0]['AllocationId']
|
||||
if allocation_id is None:
|
||||
raise exceptions.AwsException(error_code="Allocation ID",
|
||||
message="Allocation ID not found")
|
||||
return self._get_ec2_client().release_address(
|
||||
DryRun=dry_run,
|
||||
AllocationId=allocation_id)
|
||||
|
||||
# VPC Operations
|
||||
@aws_exception
|
||||
def get_vpc_from_neutron_network_id(self, neutron_network_id, dry_run=False):
|
||||
response = self._get_ec2_client().describe_vpcs(
|
||||
DryRun=dry_run,
|
||||
Filters=[
|
||||
{
|
||||
'Name': 'tag-value',
|
||||
'Values': [neutron_network_id]
|
||||
}
|
||||
]
|
||||
)
|
||||
if 'Vpcs' in response:
|
||||
for vpc in response['Vpcs']:
|
||||
if 'VpcId' in vpc:
|
||||
return vpc['VpcId']
|
||||
return None
|
||||
|
||||
@aws_exception
|
||||
def create_vpc_and_tags(self, cidr, tags_list, dry_run=False):
|
||||
vpc_id = self._get_ec2_client().create_vpc(
|
||||
DryRun=dry_run,
|
||||
CidrBlock=cidr)['Vpc']['VpcId']
|
||||
vpc = self._get_ec2_resource().Vpc(vpc_id)
|
||||
vpc.create_tags(Tags=tags_list)
|
||||
return vpc_id
|
||||
|
||||
@aws_exception
|
||||
def delete_vpc(self, vpc_id, dry_run=False):
|
||||
self._get_ec2_client().delete_vpc(
|
||||
DryRun=dry_run,
|
||||
VpcId=vpc_id
|
||||
)
|
||||
|
||||
@aws_exception
|
||||
def create_tags_for_vpc(self, neutron_network_id, tags_list):
|
||||
vpc_id = self.get_vpc_from_neutron_network_id(neutron_network_id)
|
||||
vpc_res = self._get_ec2_resource().Vpc(vpc_id)
|
||||
vpc_res.create_tags(Tags=tags_list)
|
||||
|
||||
# Subnet Operations
|
||||
@aws_exception
|
||||
def create_subnet_and_tags(self, vpc_id, cidr, tags_list, dry_run=False):
|
||||
vpc = self._get_ec2_resource().Vpc(vpc_id)
|
||||
subnet = vpc.create_subnet(
|
||||
DryRun=dry_run,
|
||||
CidrBlock=cidr)
|
||||
subnet.create_tags(Tags=tags_list)
|
||||
|
||||
@aws_exception
|
||||
def create_subnet_tags(self, neutron_subnet_id, tags_list, dry_run=False):
|
||||
subnet_id = self.get_subnet_from_neutron_subnet_id(neutron_subnet_id)
|
||||
subnet = self._get_ec2_resource().Subnet(subnet_id)
|
||||
subnet.create_tags(Tags=tags_list)
|
||||
|
||||
@aws_exception
|
||||
def delete_subnet(self, subnet_id, dry_run=False):
|
||||
self._get_ec2_client().delete_subnet(
|
||||
DryRun=dry_run,
|
||||
SubnetId=subnet_id
|
||||
)
|
||||
|
||||
@aws_exception
|
||||
def get_subnet_from_neutron_subnet_id(self, neutron_subnet_id, dry_run=False):
|
||||
response = self._get_ec2_client().describe_subnets(
|
||||
DryRun=dry_run,
|
||||
Filters=[
|
||||
{
|
||||
'Name': 'tag-value',
|
||||
'Values': [neutron_subnet_id]
|
||||
}
|
||||
]
|
||||
)
|
||||
if 'Subnets' in response:
|
||||
for subnet in response['Subnets']:
|
||||
if 'SubnetId' in subnet:
|
||||
return subnet['SubnetId']
|
||||
return None
|
||||
|
||||
# RouteTable Operations
|
||||
@aws_exception
|
||||
def describe_route_tables_by_vpc_id(self, vpc_id, dry_run=False):
|
||||
response = self._get_ec2_client().describe_route_tables(
|
||||
DryRun=dry_run,
|
||||
Filters=[
|
||||
{
|
||||
'Name': 'vpc-id',
|
||||
'Values': [vpc_id]
|
||||
},
|
||||
]
|
||||
)
|
||||
return response['RouteTables']
|
||||
|
||||
@aws_exception
|
||||
def get_route_table_by_router_id(self, neutron_router_id, dry_run=False):
|
||||
response = self._get_ec2_client().describe_route_tables(
|
||||
DryRun=dry_run,
|
||||
Filters=[
|
||||
{
|
||||
'Name': 'tag-value',
|
||||
'Values': [neutron_router_id]
|
||||
},
|
||||
]
|
||||
)
|
||||
return response['RouteTables']
|
||||
|
||||
# Has ignore_errors special case so can't use decorator
|
||||
def create_default_route_to_ig(self, route_table_id, ig_id, dry_run=False, ignore_errors=False):
|
||||
try:
|
||||
self._get_ec2_client().create_route(
|
||||
DryRun=dry_run,
|
||||
RouteTableId=route_table_id,
|
||||
DestinationCidrBlock='0.0.0.0/0',
|
||||
GatewayId=ig_id,
|
||||
)
|
||||
except Exception as e:
|
||||
LOG.warning("Ignoring failure in creating default route to IG: %s" % e)
|
||||
if not ignore_errors:
|
||||
_process_exception(e, dry_run)
|
262
neutron/neutron/common/config.py
Normal file
262
neutron/neutron/common/config.py
Normal file
@ -0,0 +1,262 @@
|
||||
# Copyright 2011 VMware, Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
Routines for configuring Neutron
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
from keystoneclient import auth
|
||||
from keystoneclient import session as ks_session
|
||||
from oslo_config import cfg
|
||||
from oslo_db import options as db_options
|
||||
from oslo_log import log as logging
|
||||
import oslo_messaging
|
||||
from paste import deploy
|
||||
|
||||
from neutron.api.v2 import attributes
|
||||
from neutron.common import utils
|
||||
from neutron.i18n import _LI
|
||||
from neutron import policy
|
||||
from neutron import version
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
core_opts = [
|
||||
cfg.StrOpt('bind_host', default='0.0.0.0',
|
||||
help=_("The host IP to bind to")),
|
||||
cfg.IntOpt('bind_port', default=9696,
|
||||
help=_("The port to bind to")),
|
||||
cfg.StrOpt('api_paste_config', default="api-paste.ini",
|
||||
help=_("The API paste config file to use")),
|
||||
cfg.StrOpt('api_extensions_path', default="",
|
||||
help=_("The path for API extensions")),
|
||||
cfg.StrOpt('auth_strategy', default='keystone',
|
||||
help=_("The type of authentication to use")),
|
||||
cfg.StrOpt('core_plugin',
|
||||
help=_("The core plugin Neutron will use")),
|
||||
cfg.ListOpt('service_plugins', default=[],
|
||||
help=_("The service plugins Neutron will use")),
|
||||
cfg.StrOpt('base_mac', default="fa:16:3e:00:00:00",
|
||||
help=_("The base MAC address Neutron will use for VIFs")),
|
||||
cfg.IntOpt('mac_generation_retries', default=16,
|
||||
help=_("How many times Neutron will retry MAC generation")),
|
||||
cfg.BoolOpt('allow_bulk', default=True,
|
||||
help=_("Allow the usage of the bulk API")),
|
||||
cfg.BoolOpt('allow_pagination', default=False,
|
||||
help=_("Allow the usage of the pagination")),
|
||||
cfg.BoolOpt('allow_sorting', default=False,
|
||||
help=_("Allow the usage of the sorting")),
|
||||
cfg.StrOpt('pagination_max_limit', default="-1",
|
||||
help=_("The maximum number of items returned in a single "
|
||||
"response, value was 'infinite' or negative integer "
|
||||
"means no limit")),
|
||||
cfg.IntOpt('max_dns_nameservers', default=5,
|
||||
help=_("Maximum number of DNS nameservers")),
|
||||
cfg.IntOpt('max_subnet_host_routes', default=20,
|
||||
help=_("Maximum number of host routes per subnet")),
|
||||
cfg.IntOpt('max_fixed_ips_per_port', default=5,
|
||||
help=_("Maximum number of fixed ips per port")),
|
||||
cfg.StrOpt('default_ipv4_subnet_pool', default=None,
|
||||
help=_("Default IPv4 subnet-pool to be used for automatic "
|
||||
"subnet CIDR allocation")),
|
||||
cfg.StrOpt('default_ipv6_subnet_pool', default=None,
|
||||
help=_("Default IPv6 subnet-pool to be used for automatic "
|
||||
"subnet CIDR allocation")),
|
||||
cfg.IntOpt('dhcp_lease_duration', default=86400,
|
||||
deprecated_name='dhcp_lease_time',
|
||||
help=_("DHCP lease duration (in seconds). Use -1 to tell "
|
||||
"dnsmasq to use infinite lease times.")),
|
||||
cfg.StrOpt('dns_domain',
|
||||
default='openstacklocal',
|
||||
help=_('Domain to use for building the hostnames')),
|
||||
cfg.BoolOpt('dhcp_agent_notification', default=True,
|
||||
help=_("Allow sending resource operation"
|
||||
" notification to DHCP agent")),
|
||||
cfg.BoolOpt('allow_overlapping_ips', default=False,
|
||||
help=_("Allow overlapping IP support in Neutron")),
|
||||
cfg.StrOpt('host', default=utils.get_hostname(),
|
||||
help=_("Hostname to be used by the neutron server, agents and "
|
||||
"services running on this machine. All the agents and "
|
||||
"services running on this machine must use the same "
|
||||
"host value.")),
|
||||
cfg.BoolOpt('force_gateway_on_subnet', default=True,
|
||||
help=_("Ensure that configured gateway is on subnet. "
|
||||
"For IPv6, validate only if gateway is not a link "
|
||||
"local address. Deprecated, to be removed during the "
|
||||
"K release, at which point the check will be "
|
||||
"mandatory.")),
|
||||
cfg.BoolOpt('notify_nova_on_port_status_changes', default=True,
|
||||
help=_("Send notification to nova when port status changes")),
|
||||
cfg.BoolOpt('notify_nova_on_port_data_changes', default=True,
|
||||
help=_("Send notification to nova when port data (fixed_ips/"
|
||||
"floatingip) changes so nova can update its cache.")),
|
||||
cfg.StrOpt('nova_url',
|
||||
default='http://127.0.0.1:8774/v2',
|
||||
help=_('URL for connection to nova. '
|
||||
'Deprecated in favour of an auth plugin in [nova].')),
|
||||
cfg.StrOpt('nova_region_name',
|
||||
help=_('Region Name')),
|
||||
cfg.StrOpt('nova_admin_username',
|
||||
help=_('Username for connecting to nova in admin context. '
|
||||
'Deprecated in favour of an auth plugin in [nova].')),
|
||||
cfg.StrOpt('nova_admin_password',
|
||||
help=_('Password for connection to nova in admin context. '
|
||||
'Deprecated in favour of an auth plugin in [nova].'),
|
||||
secret=True),
|
||||
cfg.StrOpt('nova_admin_tenant_id',
|
||||
help=_('The uuid of the admin nova tenant. '
|
||||
'Deprecated in favour of an auth plugin in [nova].')),
|
||||
cfg.StrOpt('nova_admin_tenant_name',
|
||||
help=_('The name of the admin nova tenant. '
|
||||
'Deprecated in favour of an auth plugin in [nova].')),
|
||||
cfg.StrOpt('nova_admin_auth_url',
|
||||
default='http://localhost:5000/v2.0',
|
||||
help=_('Authorization URL for connecting to nova in admin '
|
||||
'context. '
|
||||
'Deprecated in favour of an auth plugin in [nova].')),
|
||||
cfg.IntOpt('send_events_interval', default=2,
|
||||
help=_('Number of seconds between sending events to nova if '
|
||||
'there are any events to send.')),
|
||||
cfg.BoolOpt('advertise_mtu', default=False,
|
||||
help=_('If True, effort is made to advertise MTU settings '
|
||||
'to VMs via network methods (DHCP and RA MTU options) '
|
||||
'when the network\'s preferred MTU is known.')),
|
||||
cfg.StrOpt('ipam_driver', default=None,
|
||||
help=_('IPAM driver to use.')),
|
||||
cfg.BoolOpt('vlan_transparent', default=False,
|
||||
help=_('If True, then allow plugins that support it to '
|
||||
'create VLAN transparent networks.')),
|
||||
]
|
||||
|
||||
core_cli_opts = [
|
||||
cfg.StrOpt('state_path',
|
||||
default='/var/lib/neutron',
|
||||
help=_("Where to store Neutron state files. "
|
||||
"This directory must be writable by the agent.")),
|
||||
]
|
||||
|
||||
# Register the configuration options
|
||||
cfg.CONF.register_opts(core_opts)
|
||||
cfg.CONF.register_cli_opts(core_cli_opts)
|
||||
|
||||
# Ensure that the control exchange is set correctly
|
||||
oslo_messaging.set_transport_defaults(control_exchange='neutron')
|
||||
|
||||
|
||||
def set_db_defaults():
|
||||
# Update the default QueuePool parameters. These can be tweaked by the
|
||||
# conf variables - max_pool_size, max_overflow and pool_timeout
|
||||
db_options.set_defaults(
|
||||
cfg.CONF,
|
||||
connection='sqlite://',
|
||||
sqlite_db='', max_pool_size=10,
|
||||
max_overflow=20, pool_timeout=10)
|
||||
|
||||
set_db_defaults()
|
||||
|
||||
NOVA_CONF_SECTION = 'nova'
|
||||
|
||||
nova_deprecated_opts = {
|
||||
'cafile': [cfg.DeprecatedOpt('nova_ca_certificates_file', 'DEFAULT')],
|
||||
'insecure': [cfg.DeprecatedOpt('nova_api_insecure', 'DEFAULT')],
|
||||
}
|
||||
ks_session.Session.register_conf_options(cfg.CONF, NOVA_CONF_SECTION,
|
||||
deprecated_opts=nova_deprecated_opts)
|
||||
auth.register_conf_options(cfg.CONF, NOVA_CONF_SECTION)
|
||||
|
||||
nova_opts = [
|
||||
cfg.StrOpt('region_name',
|
||||
deprecated_name='nova_region_name',
|
||||
deprecated_group='DEFAULT',
|
||||
help=_('Name of nova region to use. Useful if keystone manages'
|
||||
' more than one region.')),
|
||||
cfg.StrOpt('endpoint_type',
|
||||
default='public',
|
||||
choices=['public', 'admin', 'internal'],
|
||||
help=_('Type of the nova endpoint to use. This endpoint will'
|
||||
' be looked up in the keystone catalog and should be'
|
||||
' one of public, internal or admin.')),
|
||||
]
|
||||
cfg.CONF.register_opts(nova_opts, group=NOVA_CONF_SECTION)
|
||||
|
||||
logging.register_options(cfg.CONF)
|
||||
|
||||
|
||||
def init(args, **kwargs):
|
||||
cfg.CONF(args=args, project='neutron',
|
||||
version='%%(prog)s %s' % version.version_info.release_string(),
|
||||
**kwargs)
|
||||
|
||||
# FIXME(ihrachys): if import is put in global, circular import
|
||||
# failure occurs
|
||||
from neutron.common import rpc as n_rpc
|
||||
n_rpc.init(cfg.CONF)
|
||||
|
||||
# Validate that the base_mac is of the correct format
|
||||
msg = attributes._validate_regex(cfg.CONF.base_mac,
|
||||
attributes.MAC_PATTERN)
|
||||
if msg:
|
||||
msg = _("Base MAC: %s") % msg
|
||||
raise Exception(msg)
|
||||
|
||||
|
||||
def setup_logging():
|
||||
"""Sets up the logging options for a log with supplied name."""
|
||||
product_name = "neutron"
|
||||
logging.setup(cfg.CONF, product_name)
|
||||
LOG.info(_LI("Logging enabled!"))
|
||||
LOG.info(_LI("%(prog)s version %(version)s"),
|
||||
{'prog': sys.argv[0],
|
||||
'version': version.version_info.release_string()})
|
||||
LOG.debug("command line: %s", " ".join(sys.argv))
|
||||
|
||||
|
||||
def reset_service():
|
||||
# Reset worker in case SIGHUP is called.
|
||||
# Note that this is called only in case a service is running in
|
||||
# daemon mode.
|
||||
setup_logging()
|
||||
policy.refresh()
|
||||
|
||||
|
||||
def load_paste_app(app_name):
|
||||
"""Builds and returns a WSGI app from a paste config file.
|
||||
|
||||
:param app_name: Name of the application to load
|
||||
:raises ConfigFilesNotFoundError when config file cannot be located
|
||||
:raises RuntimeError when application cannot be loaded from config file
|
||||
"""
|
||||
|
||||
config_path = cfg.CONF.find_file(cfg.CONF.api_paste_config)
|
||||
if not config_path:
|
||||
raise cfg.ConfigFilesNotFoundError(
|
||||
config_files=[cfg.CONF.api_paste_config])
|
||||
config_path = os.path.abspath(config_path)
|
||||
LOG.info(_LI("Config paste file: %s"), config_path)
|
||||
|
||||
try:
|
||||
app = deploy.loadapp("config:%s" % config_path, name=app_name)
|
||||
except (LookupError, ImportError):
|
||||
msg = (_("Unable to load %(app_name)s from "
|
||||
"configuration file %(config_path)s.") %
|
||||
{'app_name': app_name,
|
||||
'config_path': config_path})
|
||||
LOG.exception(msg)
|
||||
raise RuntimeError(msg)
|
||||
return app
|
545
neutron/neutron/common/exceptions.py
Normal file
545
neutron/neutron/common/exceptions.py
Normal file
@ -0,0 +1,545 @@
|
||||
# Copyright 2011 VMware, Inc
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
Neutron base exception handling.
|
||||
"""
|
||||
|
||||
from oslo_utils import excutils
|
||||
import six
|
||||
|
||||
|
||||
class NeutronException(Exception):
|
||||
"""Base Neutron Exception.
|
||||
|
||||
To correctly use this class, inherit from it and define
|
||||
a 'message' property. That message will get printf'd
|
||||
with the keyword arguments provided to the constructor.
|
||||
"""
|
||||
message = _("An unknown exception occurred.")
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
try:
|
||||
super(NeutronException, self).__init__(self.message % kwargs)
|
||||
self.msg = self.message % kwargs
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception() as ctxt:
|
||||
if not self.use_fatal_exceptions():
|
||||
ctxt.reraise = False
|
||||
# at least get the core message out if something happened
|
||||
super(NeutronException, self).__init__(self.message)
|
||||
|
||||
if six.PY2:
|
||||
def __unicode__(self):
|
||||
return unicode(self.msg)
|
||||
|
||||
def __str__(self):
|
||||
return self.msg
|
||||
|
||||
def use_fatal_exceptions(self):
|
||||
return False
|
||||
|
||||
|
||||
class BadRequest(NeutronException):
|
||||
message = _('Bad %(resource)s request: %(msg)s')
|
||||
|
||||
|
||||
class NotFound(NeutronException):
|
||||
pass
|
||||
|
||||
|
||||
class Conflict(NeutronException):
|
||||
pass
|
||||
|
||||
|
||||
class NotAuthorized(NeutronException):
|
||||
message = _("Not authorized.")
|
||||
|
||||
|
||||
class ServiceUnavailable(NeutronException):
|
||||
message = _("The service is unavailable")
|
||||
|
||||
|
||||
class AdminRequired(NotAuthorized):
|
||||
message = _("User does not have admin privileges: %(reason)s")
|
||||
|
||||
|
||||
class ObjectNotFound(NotFound):
|
||||
message = _("Object %(id)s not found.")
|
||||
|
||||
|
||||
class NetworkNotFound(NotFound):
|
||||
message = _("Network %(net_id)s could not be found")
|
||||
|
||||
|
||||
class SubnetNotFound(NotFound):
|
||||
message = _("Subnet %(subnet_id)s could not be found")
|
||||
|
||||
|
||||
class SubnetPoolNotFound(NotFound):
|
||||
message = _("Subnet pool %(subnetpool_id)s could not be found")
|
||||
|
||||
|
||||
class PortNotFound(NotFound):
|
||||
message = _("Port %(port_id)s could not be found")
|
||||
|
||||
|
||||
class QosPolicyNotFound(NotFound):
|
||||
message = _("QoS policy %(policy_id)s could not be found")
|
||||
|
||||
|
||||
class QosRuleNotFound(NotFound):
|
||||
message = _("QoS rule %(rule_id)s for policy %(policy_id)s "
|
||||
"could not be found")
|
||||
|
||||
|
||||
class PortNotFoundOnNetwork(NotFound):
|
||||
message = _("Port %(port_id)s could not be found "
|
||||
"on network %(net_id)s")
|
||||
|
||||
|
||||
class PortQosBindingNotFound(NotFound):
|
||||
message = _("QoS binding for port %(port_id)s and policy %(policy_id)s "
|
||||
"could not be found")
|
||||
|
||||
|
||||
class NetworkQosBindingNotFound(NotFound):
|
||||
message = _("QoS binding for network %(net_id)s and policy %(policy_id)s "
|
||||
"could not be found")
|
||||
|
||||
|
||||
class PolicyFileNotFound(NotFound):
|
||||
message = _("Policy configuration policy.json could not be found")
|
||||
|
||||
|
||||
class PolicyInitError(NeutronException):
|
||||
message = _("Failed to init policy %(policy)s because %(reason)s")
|
||||
|
||||
|
||||
class PolicyCheckError(NeutronException):
|
||||
message = _("Failed to check policy %(policy)s because %(reason)s")
|
||||
|
||||
|
||||
class StateInvalid(BadRequest):
|
||||
message = _("Unsupported port state: %(port_state)s")
|
||||
|
||||
|
||||
class InUse(NeutronException):
|
||||
message = _("The resource is inuse")
|
||||
|
||||
|
||||
class QosPolicyInUse(InUse):
|
||||
message = _("QoS Policy %(policy_id)s is used by "
|
||||
"%(object_type)s %(object_id)s.")
|
||||
|
||||
|
||||
class NetworkInUse(InUse):
|
||||
message = _("Unable to complete operation on network %(net_id)s. "
|
||||
"There are one or more ports still in use on the network.")
|
||||
|
||||
|
||||
class SubnetInUse(InUse):
|
||||
message = _("Unable to complete operation on subnet %(subnet_id)s. "
|
||||
"%(reason)s")
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
if 'reason' not in kwargs:
|
||||
kwargs['reason'] = _("One or more ports have an IP allocation "
|
||||
"from this subnet.")
|
||||
super(SubnetInUse, self).__init__(**kwargs)
|
||||
|
||||
|
||||
class SubnetPoolInUse(InUse):
|
||||
message = _("Unable to complete operation on subnet pool "
|
||||
"%(subnet_pool_id)s. %(reason)s.")
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
if 'reason' not in kwargs:
|
||||
kwargs['reason'] = _("Two or more concurrent subnets allocated.")
|
||||
super(SubnetPoolInUse, self).__init__(**kwargs)
|
||||
|
||||
|
||||
class PortInUse(InUse):
|
||||
message = _("Unable to complete operation on port %(port_id)s "
|
||||
"for network %(net_id)s. Port already has an attached "
|
||||
"device %(device_id)s.")
|
||||
|
||||
|
||||
class ServicePortInUse(InUse):
|
||||
message = _("Port %(port_id)s cannot be deleted directly via the "
|
||||
"port API: %(reason)s")
|
||||
|
||||
|
||||
class DhcpPortInUse(InUse):
|
||||
message = _("Port %(port_id)s is already acquired by another DHCP agent")
|
||||
|
||||
|
||||
class PortBound(InUse):
|
||||
message = _("Unable to complete operation on port %(port_id)s, "
|
||||
"port is already bound, port type: %(vif_type)s, "
|
||||
"old_mac %(old_mac)s, new_mac %(new_mac)s")
|
||||
|
||||
|
||||
class MacAddressInUse(InUse):
|
||||
message = _("Unable to complete operation for network %(net_id)s. "
|
||||
"The mac address %(mac)s is in use.")
|
||||
|
||||
|
||||
class HostRoutesExhausted(BadRequest):
|
||||
# NOTE(xchenum): probably make sense to use quota exceeded exception?
|
||||
message = _("Unable to complete operation for %(subnet_id)s. "
|
||||
"The number of host routes exceeds the limit %(quota)s.")
|
||||
|
||||
|
||||
class DNSNameServersExhausted(BadRequest):
|
||||
# NOTE(xchenum): probably make sense to use quota exceeded exception?
|
||||
message = _("Unable to complete operation for %(subnet_id)s. "
|
||||
"The number of DNS nameservers exceeds the limit %(quota)s.")
|
||||
|
||||
|
||||
class InvalidIpForNetwork(BadRequest):
|
||||
message = _("IP address %(ip_address)s is not a valid IP "
|
||||
"for any of the subnets on the specified network.")
|
||||
|
||||
|
||||
class InvalidIpForSubnet(BadRequest):
|
||||
message = _("IP address %(ip_address)s is not a valid IP "
|
||||
"for the specified subnet.")
|
||||
|
||||
|
||||
class IpAddressInUse(InUse):
|
||||
message = _("Unable to complete operation for network %(net_id)s. "
|
||||
"The IP address %(ip_address)s is in use.")
|
||||
|
||||
|
||||
class VlanIdInUse(InUse):
|
||||
message = _("Unable to create the network. "
|
||||
"The VLAN %(vlan_id)s on physical network "
|
||||
"%(physical_network)s is in use.")
|
||||
|
||||
|
||||
class FlatNetworkInUse(InUse):
|
||||
message = _("Unable to create the flat network. "
|
||||
"Physical network %(physical_network)s is in use.")
|
||||
|
||||
|
||||
class TunnelIdInUse(InUse):
|
||||
message = _("Unable to create the network. "
|
||||
"The tunnel ID %(tunnel_id)s is in use.")
|
||||
|
||||
|
||||
class TenantNetworksDisabled(ServiceUnavailable):
|
||||
message = _("Tenant network creation is not enabled.")
|
||||
|
||||
|
||||
class ResourceExhausted(ServiceUnavailable):
|
||||
pass
|
||||
|
||||
|
||||
class NoNetworkAvailable(ResourceExhausted):
|
||||
message = _("Unable to create the network. "
|
||||
"No tenant network is available for allocation.")
|
||||
|
||||
|
||||
class NoNetworkFoundInMaximumAllowedAttempts(ServiceUnavailable):
|
||||
message = _("Unable to create the network. "
|
||||
"No available network found in maximum allowed attempts.")
|
||||
|
||||
|
||||
class SubnetMismatchForPort(BadRequest):
|
||||
message = _("Subnet on port %(port_id)s does not match "
|
||||
"the requested subnet %(subnet_id)s")
|
||||
|
||||
|
||||
class MalformedRequestBody(BadRequest):
|
||||
message = _("Malformed request body: %(reason)s")
|
||||
|
||||
|
||||
class Invalid(NeutronException):
|
||||
def __init__(self, message=None):
|
||||
self.message = message
|
||||
super(Invalid, self).__init__()
|
||||
|
||||
|
||||
class InvalidInput(BadRequest):
|
||||
message = _("Invalid input for operation: %(error_message)s.")
|
||||
|
||||
|
||||
class InvalidAllocationPool(BadRequest):
|
||||
message = _("The allocation pool %(pool)s is not valid.")
|
||||
|
||||
|
||||
class UnsupportedPortDeviceOwner(Conflict):
|
||||
message = _("Operation %(op)s is not supported for device_owner "
|
||||
"%(device_owner)s on port %(port_id)s.")
|
||||
|
||||
|
||||
class OverlappingAllocationPools(Conflict):
|
||||
message = _("Found overlapping allocation pools: "
|
||||
"%(pool_1)s %(pool_2)s for subnet %(subnet_cidr)s.")
|
||||
|
||||
|
||||
class OutOfBoundsAllocationPool(BadRequest):
|
||||
message = _("The allocation pool %(pool)s spans "
|
||||
"beyond the subnet cidr %(subnet_cidr)s.")
|
||||
|
||||
|
||||
class MacAddressGenerationFailure(ServiceUnavailable):
|
||||
message = _("Unable to generate unique mac on network %(net_id)s.")
|
||||
|
||||
|
||||
class IpAddressGenerationFailure(Conflict):
|
||||
message = _("No more IP addresses available on network %(net_id)s.")
|
||||
|
||||
|
||||
class BridgeDoesNotExist(NeutronException):
|
||||
message = _("Bridge %(bridge)s does not exist.")
|
||||
|
||||
|
||||
class PreexistingDeviceFailure(NeutronException):
|
||||
message = _("Creation failed. %(dev_name)s already exists.")
|
||||
|
||||
|
||||
class QuotaResourceUnknown(NotFound):
|
||||
message = _("Unknown quota resources %(unknown)s.")
|
||||
|
||||
|
||||
class OverQuota(Conflict):
|
||||
message = _("Quota exceeded for resources: %(overs)s")
|
||||
|
||||
|
||||
class QuotaMissingTenant(BadRequest):
|
||||
message = _("Tenant-id was missing from Quota request")
|
||||
|
||||
|
||||
class InvalidQuotaValue(Conflict):
|
||||
message = _("Change would make usage less than 0 for the following "
|
||||
"resources: %(unders)s")
|
||||
|
||||
|
||||
class InvalidSharedSetting(Conflict):
|
||||
message = _("Unable to reconfigure sharing settings for network "
|
||||
"%(network)s. Multiple tenants are using it")
|
||||
|
||||
|
||||
class InvalidExtensionEnv(BadRequest):
|
||||
message = _("Invalid extension environment: %(reason)s")
|
||||
|
||||
|
||||
class ExtensionsNotFound(NotFound):
|
||||
message = _("Extensions not found: %(extensions)s")
|
||||
|
||||
|
||||
class InvalidContentType(NeutronException):
|
||||
message = _("Invalid content type %(content_type)s")
|
||||
|
||||
|
||||
class ExternalIpAddressExhausted(BadRequest):
|
||||
message = _("Unable to find any IP address on external "
|
||||
"network %(net_id)s.")
|
||||
|
||||
|
||||
class TooManyExternalNetworks(NeutronException):
|
||||
message = _("More than one external network exists")
|
||||
|
||||
|
||||
class InvalidConfigurationOption(NeutronException):
|
||||
message = _("An invalid value was provided for %(opt_name)s: "
|
||||
"%(opt_value)s")
|
||||
|
||||
|
||||
class GatewayConflictWithAllocationPools(InUse):
|
||||
message = _("Gateway ip %(ip_address)s conflicts with "
|
||||
"allocation pool %(pool)s")
|
||||
|
||||
|
||||
class GatewayIpInUse(InUse):
|
||||
message = _("Current gateway ip %(ip_address)s already in use "
|
||||
"by port %(port_id)s. Unable to update.")
|
||||
|
||||
# PF9 Change begin
|
||||
class AwsException(NeutronException):
|
||||
message = _("AWS Error: '%(error_code)s' - '%(message)s'")
|
||||
# PF9 end
|
||||
|
||||
class NetworkVlanRangeError(NeutronException):
|
||||
message = _("Invalid network VLAN range: '%(vlan_range)s' - '%(error)s'")
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
# Convert vlan_range tuple to 'start:end' format for display
|
||||
if isinstance(kwargs['vlan_range'], tuple):
|
||||
kwargs['vlan_range'] = "%d:%d" % kwargs['vlan_range']
|
||||
super(NetworkVlanRangeError, self).__init__(**kwargs)
|
||||
|
||||
|
||||
class PhysicalNetworkNameError(NeutronException):
|
||||
message = _("Empty physical network name.")
|
||||
|
||||
|
||||
class NetworkTunnelRangeError(NeutronException):
|
||||
message = _("Invalid network Tunnel range: "
|
||||
"'%(tunnel_range)s' - %(error)s")
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
# Convert tunnel_range tuple to 'start:end' format for display
|
||||
if isinstance(kwargs['tunnel_range'], tuple):
|
||||
kwargs['tunnel_range'] = "%d:%d" % kwargs['tunnel_range']
|
||||
super(NetworkTunnelRangeError, self).__init__(**kwargs)
|
||||
|
||||
|
||||
class NetworkVxlanPortRangeError(NeutronException):
|
||||
message = _("Invalid network VXLAN port range: '%(vxlan_range)s'")
|
||||
|
||||
|
||||
class VxlanNetworkUnsupported(NeutronException):
|
||||
message = _("VXLAN Network unsupported.")
|
||||
|
||||
|
||||
class DuplicatedExtension(NeutronException):
|
||||
message = _("Found duplicate extension: %(alias)s")
|
||||
|
||||
|
||||
class DeviceIDNotOwnedByTenant(Conflict):
|
||||
message = _("The following device_id %(device_id)s is not owned by your "
|
||||
"tenant or matches another tenants router.")
|
||||
|
||||
|
||||
class InvalidCIDR(BadRequest):
|
||||
message = _("Invalid CIDR %(input)s given as IP prefix")
|
||||
|
||||
|
||||
class RouterNotCompatibleWithAgent(NeutronException):
|
||||
message = _("Router '%(router_id)s' is not compatible with this agent")
|
||||
|
||||
|
||||
class DvrHaRouterNotSupported(NeutronException):
|
||||
message = _("Router '%(router_id)s' cannot be both DVR and HA")
|
||||
|
||||
|
||||
class FailToDropPrivilegesExit(SystemExit):
|
||||
"""Exit exception raised when a drop privileges action fails."""
|
||||
code = 99
|
||||
|
||||
|
||||
class FloatingIpSetupException(NeutronException):
|
||||
def __init__(self, message=None):
|
||||
self.message = message
|
||||
super(FloatingIpSetupException, self).__init__()
|
||||
|
||||
|
||||
class IpTablesApplyException(NeutronException):
|
||||
def __init__(self, message=None):
|
||||
self.message = message
|
||||
super(IpTablesApplyException, self).__init__()
|
||||
|
||||
|
||||
class NetworkIdOrRouterIdRequiredError(NeutronException):
|
||||
message = _('network_id and router_id are None. One must be provided.')
|
||||
|
||||
|
||||
class AbortSyncRouters(NeutronException):
|
||||
message = _("Aborting periodic_sync_routers_task due to an error")
|
||||
|
||||
|
||||
# Shared *aas exceptions, pending them being refactored out of Neutron
|
||||
# proper.
|
||||
|
||||
class FirewallInternalDriverError(NeutronException):
|
||||
"""Fwaas exception for all driver errors.
|
||||
|
||||
On any failure or exception in the driver, driver should log it and
|
||||
raise this exception to the agent
|
||||
"""
|
||||
message = _("%(driver)s: Internal driver error.")
|
||||
|
||||
|
||||
class MissingMinSubnetPoolPrefix(BadRequest):
|
||||
message = _("Unspecified minimum subnet pool prefix")
|
||||
|
||||
|
||||
class EmptySubnetPoolPrefixList(BadRequest):
|
||||
message = _("Empty subnet pool prefix list")
|
||||
|
||||
|
||||
class PrefixVersionMismatch(BadRequest):
|
||||
message = _("Cannot mix IPv4 and IPv6 prefixes in a subnet pool")
|
||||
|
||||
|
||||
class UnsupportedMinSubnetPoolPrefix(BadRequest):
|
||||
message = _("Prefix '%(prefix)s' not supported in IPv%(version)s pool")
|
||||
|
||||
|
||||
class IllegalSubnetPoolPrefixBounds(BadRequest):
|
||||
message = _("Illegal prefix bounds: %(prefix_type)s=%(prefixlen)s, "
|
||||
"%(base_prefix_type)s=%(base_prefixlen)s")
|
||||
|
||||
|
||||
class IllegalSubnetPoolPrefixUpdate(BadRequest):
|
||||
message = _("Illegal update to prefixes: %(msg)s")
|
||||
|
||||
|
||||
class SubnetAllocationError(NeutronException):
|
||||
message = _("Failed to allocate subnet: %(reason)s")
|
||||
|
||||
|
||||
class AddressScopePrefixConflict(Conflict):
|
||||
message = _("Failed to associate address scope: subnetpools "
|
||||
"within an address scope must have unique prefixes")
|
||||
|
||||
|
||||
class IllegalSubnetPoolAssociationToAddressScope(BadRequest):
|
||||
message = _("Illegal subnetpool association: subnetpool %(subnetpool_id)s "
|
||||
" cannot be associated with address scope"
|
||||
" %(address_scope_id)s")
|
||||
|
||||
|
||||
class IllegalSubnetPoolUpdate(BadRequest):
|
||||
message = _("Illegal subnetpool update : %(reason)s")
|
||||
|
||||
|
||||
class MinPrefixSubnetAllocationError(BadRequest):
|
||||
message = _("Unable to allocate subnet with prefix length %(prefixlen)s, "
|
||||
"minimum allowed prefix is %(min_prefixlen)s")
|
||||
|
||||
|
||||
class MaxPrefixSubnetAllocationError(BadRequest):
|
||||
message = _("Unable to allocate subnet with prefix length %(prefixlen)s, "
|
||||
"maximum allowed prefix is %(max_prefixlen)s")
|
||||
|
||||
|
||||
class SubnetPoolDeleteError(BadRequest):
|
||||
message = _("Unable to delete subnet pool: %(reason)s")
|
||||
|
||||
|
||||
class SubnetPoolQuotaExceeded(OverQuota):
|
||||
message = _("Per-tenant subnet pool prefix quota exceeded")
|
||||
|
||||
|
||||
class DeviceNotFoundError(NeutronException):
|
||||
message = _("Device '%(device_name)s' does not exist")
|
||||
|
||||
|
||||
class NetworkSubnetPoolAffinityError(BadRequest):
|
||||
message = _("Subnets hosted on the same network must be allocated from "
|
||||
"the same subnet pool")
|
||||
|
||||
|
||||
class ObjectActionError(NeutronException):
|
||||
message = _('Object action %(action)s failed because: %(reason)s')
|
||||
|
||||
|
||||
class CTZoneExhaustedError(NeutronException):
|
||||
message = _("IPtables conntrack zones exhausted, iptables rules cannot "
|
||||
"be applied.")
|
0
neutron/neutron/plugins/ml2/drivers/aws/__init__.py
Normal file
0
neutron/neutron/plugins/ml2/drivers/aws/__init__.py
Normal file
181
neutron/neutron/plugins/ml2/drivers/aws/mechanism_aws.py
Normal file
181
neutron/neutron/plugins/ml2/drivers/aws/mechanism_aws.py
Normal file
@ -0,0 +1,181 @@
|
||||
# Copyright 2016 Platform9 Systems Inc.(http://www.platform9.com)
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo_log import log
|
||||
from neutron.common.aws_utils import AwsUtils
|
||||
from neutron.common.exceptions import AwsException
|
||||
from neutron.plugins.ml2 import driver_api as api
|
||||
import json
|
||||
import random
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
class AwsMechanismDriver(api.MechanismDriver):
|
||||
"""Ml2 Mechanism driver for AWS"""
|
||||
def __init__(self):
|
||||
self.aws_utils = None
|
||||
|
||||
def initialize(self):
|
||||
self.aws_utils = AwsUtils()
|
||||
|
||||
# NETWORK
|
||||
def create_network_precommit(self, context):
|
||||
pass
|
||||
|
||||
def create_network_postcommit(self, context):
|
||||
pass
|
||||
|
||||
def update_network_precommit(self, context):
|
||||
try:
|
||||
network_name = context.current['name']
|
||||
neutron_network_id = context.current['id']
|
||||
tags_list = [{'Key': 'Name', 'Value': network_name}]
|
||||
self.aws_utils.create_tags_for_vpc(neutron_network_id, tags_list)
|
||||
except Exception as e:
|
||||
LOG.error("Error in update subnet precommit: %s" % e)
|
||||
raise e
|
||||
|
||||
def update_network_postcommit(self, context):
|
||||
pass
|
||||
|
||||
def delete_network_precommit(self, context):
|
||||
neutron_network_id = context.current['id']
|
||||
# If user is deleting an empty neutron network then nothing to be done on AWS side
|
||||
if len(context.current['subnets']) > 0:
|
||||
vpc_id = self.aws_utils.get_vpc_from_neutron_network_id(neutron_network_id)
|
||||
if vpc_id is not None:
|
||||
LOG.info("Deleting network %s (VPC_ID: %s)" % (neutron_network_id, vpc_id))
|
||||
self.aws_utils.delete_vpc(vpc_id=vpc_id)
|
||||
|
||||
def delete_network_postcommit(self, context):
|
||||
pass
|
||||
|
||||
# SUBNET
|
||||
def create_subnet_precommit(self, context):
|
||||
LOG.info("Create subnet for network %s" % context.network.current['id'])
|
||||
# External Network doesn't exist on AWS, so no operations permitted
|
||||
if 'provider:physical_network' in context.network.current and context.network.current['provider:physical_network'] == "external":
|
||||
# Do not create subnets for external & provider networks. Only allow tenant network
|
||||
# subnet creation at the moment.
|
||||
return
|
||||
|
||||
if context.current['ip_version'] == 6:
|
||||
raise AwsException(error_code="IPv6Error", message="Cannot create subnets with IPv6")
|
||||
mask = int(context.current['cidr'][-2:])
|
||||
if mask < 16 or mask > 28:
|
||||
raise AwsException(error_code="InvalidMask", message="Subnet mask has to be >16 and <28")
|
||||
try:
|
||||
# Check if this is the first subnet to be added to a network
|
||||
neutron_network = context.network.current
|
||||
associated_vpc_id = self.aws_utils.get_vpc_from_neutron_network_id(neutron_network['id'])
|
||||
if associated_vpc_id is None:
|
||||
# Need to create EC2 VPC
|
||||
vpc_cidr = context.current['cidr'][:-2] + '16'
|
||||
tags = [
|
||||
{'Key': 'Name', 'Value': neutron_network['name']},
|
||||
{'Key': 'openstack_network_id', 'Value': neutron_network['id']}
|
||||
]
|
||||
associated_vpc_id = self.aws_utils.create_vpc_and_tags(cidr=vpc_cidr,
|
||||
tags_list=tags)
|
||||
# Create Subnet in AWS
|
||||
tags = [
|
||||
{'Key': 'Name', 'Value': context.current['name']},
|
||||
{'Key': 'openstack_subnet_id', 'Value': context.current['id']}
|
||||
]
|
||||
self.aws_utils.create_subnet_and_tags(vpc_id=associated_vpc_id,
|
||||
cidr=context.current['cidr'],
|
||||
tags_list=tags)
|
||||
except Exception as e:
|
||||
LOG.error("Error in create subnet precommit: %s" % e)
|
||||
raise e
|
||||
|
||||
def create_subnet_postcommit(self, context):
|
||||
pass
|
||||
|
||||
def update_subnet_precommit(self, context):
|
||||
try:
|
||||
subnet_name = context.current['name']
|
||||
neutron_subnet_id = context.current['id']
|
||||
tags_list = [{'Key': 'Name', 'Value': subnet_name}]
|
||||
self.aws_utils.create_subnet_tags(neutron_subnet_id, tags_list)
|
||||
except Exception as e:
|
||||
LOG.error("Error in update subnet precommit: %s" % e)
|
||||
raise e
|
||||
|
||||
def update_subnet_postcommit(self, context):
|
||||
pass
|
||||
|
||||
def delete_subnet_precommit(self, context):
|
||||
if 'provider:physical_network' in context.network.current and context.network.current[
|
||||
'provider:physical_network'] == "external":
|
||||
LOG.error("Deleting provider and external networks not supported")
|
||||
return
|
||||
try:
|
||||
LOG.info("Deleting subnet %s" % context.current['id'])
|
||||
subnet_id = self.aws_utils.get_subnet_from_neutron_subnet_id(context.current['id'])
|
||||
if subnet_id is not None:
|
||||
self.aws_utils.delete_subnet(subnet_id=subnet_id)
|
||||
except Exception as e:
|
||||
LOG.error("Error in delete subnet precommit: %s" % e)
|
||||
raise e
|
||||
|
||||
def delete_subnet_postcommit(self, context):
|
||||
neutron_network = context.network.current
|
||||
if 'provider:physical_network' in context.network.current and context.network.current[
|
||||
'provider:physical_network'] == "external":
|
||||
return
|
||||
try:
|
||||
subnets = neutron_network['subnets']
|
||||
if len(subnets) == 1 and subnets[0] == context.current['id'] or len(subnets) == 0:
|
||||
# Last subnet for this network was deleted, so delete VPC
|
||||
# because VPC gets created during first subnet creation under
|
||||
# an OpenStack network
|
||||
vpc_id = self.aws_utils.get_vpc_from_neutron_network_id(neutron_network['id'])
|
||||
LOG.info("Deleting VPC %s since this was the last subnet in the vpc" % vpc_id)
|
||||
self.aws_utils.delete_vpc(vpc_id=vpc_id)
|
||||
except Exception as e:
|
||||
LOG.error("Error in delete subnet postcommit: %s" % e)
|
||||
raise e
|
||||
|
||||
def create_port_precommit(self, context):
|
||||
pass
|
||||
|
||||
def create_port_postcommit(self, context):
|
||||
pass
|
||||
|
||||
def update_port_precommit(self, context):
|
||||
pass
|
||||
|
||||
def update_port_postcommit(self, context):
|
||||
pass
|
||||
|
||||
def delete_port_precommit(self, context):
|
||||
pass
|
||||
|
||||
def delete_port_postcommit(self, context):
|
||||
pass
|
||||
|
||||
def bind_port(self, context):
|
||||
fixed_ip_dict = dict()
|
||||
if 'fixed_ips' in context.current:
|
||||
if len(context.current['fixed_ips']) > 0:
|
||||
fixed_ip_dict = context.current['fixed_ips'][0]
|
||||
fixed_ip_dict['subnet_id'] = self.aws_utils.get_subnet_from_neutron_subnet_id(fixed_ip_dict['subnet_id'])
|
||||
|
||||
segment_id = random.choice(context.network.network_segments)[api.ID]
|
||||
context.set_binding(segment_id,
|
||||
"vip_type_a",
|
||||
json.dumps(fixed_ip_dict),
|
||||
status='ACTIVE')
|
||||
return True
|
893
neutron/neutron/plugins/ml2/managers.py
Normal file
893
neutron/neutron/plugins/ml2/managers.py
Normal file
@ -0,0 +1,893 @@
|
||||
# Copyright (c) 2013 OpenStack Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log
|
||||
from oslo_utils import excutils
|
||||
import six
|
||||
import stevedore
|
||||
|
||||
from neutron.api.v2 import attributes
|
||||
from neutron.common import exceptions as exc
|
||||
from neutron.extensions import external_net
|
||||
from neutron.extensions import multiprovidernet as mpnet
|
||||
from neutron.extensions import portbindings
|
||||
from neutron.extensions import providernet as provider
|
||||
from neutron.extensions import vlantransparent
|
||||
from neutron.i18n import _LE, _LI, _LW
|
||||
from neutron.plugins.ml2.common import exceptions as ml2_exc
|
||||
from neutron.plugins.ml2 import db
|
||||
from neutron.plugins.ml2 import driver_api as api
|
||||
from neutron.plugins.ml2 import models
|
||||
from neutron.services.qos import qos_consts
|
||||
from neutron.common import exceptions
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
MAX_BINDING_LEVELS = 10
|
||||
|
||||
|
||||
class TypeManager(stevedore.named.NamedExtensionManager):
|
||||
"""Manage network segment types using drivers."""
|
||||
|
||||
def __init__(self):
|
||||
# Mapping from type name to DriverManager
|
||||
self.drivers = {}
|
||||
|
||||
LOG.info(_LI("Configured type driver names: %s"),
|
||||
cfg.CONF.ml2.type_drivers)
|
||||
super(TypeManager, self).__init__('neutron.ml2.type_drivers',
|
||||
cfg.CONF.ml2.type_drivers,
|
||||
invoke_on_load=True)
|
||||
LOG.info(_LI("Loaded type driver names: %s"), self.names())
|
||||
self._register_types()
|
||||
self._check_tenant_network_types(cfg.CONF.ml2.tenant_network_types)
|
||||
self._check_external_network_type(cfg.CONF.ml2.external_network_type)
|
||||
|
||||
def _register_types(self):
|
||||
for ext in self:
|
||||
network_type = ext.obj.get_type()
|
||||
if network_type in self.drivers:
|
||||
LOG.error(_LE("Type driver '%(new_driver)s' ignored because"
|
||||
" type driver '%(old_driver)s' is already"
|
||||
" registered for type '%(type)s'"),
|
||||
{'new_driver': ext.name,
|
||||
'old_driver': self.drivers[network_type].name,
|
||||
'type': network_type})
|
||||
else:
|
||||
self.drivers[network_type] = ext
|
||||
LOG.info(_LI("Registered types: %s"), self.drivers.keys())
|
||||
|
||||
def _check_tenant_network_types(self, types):
|
||||
self.tenant_network_types = []
|
||||
for network_type in types:
|
||||
if network_type in self.drivers:
|
||||
self.tenant_network_types.append(network_type)
|
||||
else:
|
||||
LOG.error(_LE("No type driver for tenant network_type: %s. "
|
||||
"Service terminated!"), network_type)
|
||||
raise SystemExit(1)
|
||||
LOG.info(_LI("Tenant network_types: %s"), self.tenant_network_types)
|
||||
|
||||
def _check_external_network_type(self, ext_network_type):
|
||||
if ext_network_type and ext_network_type not in self.drivers:
|
||||
LOG.error(_LE("No type driver for external network_type: %s. "
|
||||
"Service terminated!"), ext_network_type)
|
||||
raise SystemExit(1)
|
||||
|
||||
def _process_provider_segment(self, segment):
|
||||
(network_type, physical_network,
|
||||
segmentation_id) = (self._get_attribute(segment, attr)
|
||||
for attr in provider.ATTRIBUTES)
|
||||
|
||||
if attributes.is_attr_set(network_type):
|
||||
segment = {api.NETWORK_TYPE: network_type,
|
||||
api.PHYSICAL_NETWORK: physical_network,
|
||||
api.SEGMENTATION_ID: segmentation_id}
|
||||
self.validate_provider_segment(segment)
|
||||
return segment
|
||||
|
||||
msg = _("network_type required")
|
||||
raise exc.InvalidInput(error_message=msg)
|
||||
|
||||
def _process_provider_create(self, network):
|
||||
if any(attributes.is_attr_set(network.get(attr))
|
||||
for attr in provider.ATTRIBUTES):
|
||||
# Verify that multiprovider and provider attributes are not set
|
||||
# at the same time.
|
||||
if attributes.is_attr_set(network.get(mpnet.SEGMENTS)):
|
||||
raise mpnet.SegmentsSetInConjunctionWithProviders()
|
||||
segment = self._get_provider_segment(network)
|
||||
return [self._process_provider_segment(segment)]
|
||||
elif attributes.is_attr_set(network.get(mpnet.SEGMENTS)):
|
||||
segments = [self._process_provider_segment(s)
|
||||
for s in network[mpnet.SEGMENTS]]
|
||||
mpnet.check_duplicate_segments(segments, self.is_partial_segment)
|
||||
return segments
|
||||
|
||||
def _match_segment(self, segment, filters):
|
||||
return all(not filters.get(attr) or segment.get(attr) in filters[attr]
|
||||
for attr in provider.ATTRIBUTES)
|
||||
|
||||
def _get_provider_segment(self, network):
|
||||
# TODO(manishg): Placeholder method
|
||||
# Code intended for operating on a provider segment should use
|
||||
# this method to extract the segment, even though currently the
|
||||
# segment attributes are part of the network dictionary. In the
|
||||
# future, network and segment information will be decoupled and
|
||||
# here we will do the job of extracting the segment information.
|
||||
return network
|
||||
|
||||
def network_matches_filters(self, network, filters):
|
||||
if not filters:
|
||||
return True
|
||||
if any(attributes.is_attr_set(network.get(attr))
|
||||
for attr in provider.ATTRIBUTES):
|
||||
segments = [self._get_provider_segment(network)]
|
||||
elif attributes.is_attr_set(network.get(mpnet.SEGMENTS)):
|
||||
segments = self._get_attribute(network, mpnet.SEGMENTS)
|
||||
else:
|
||||
return True
|
||||
return any(self._match_segment(s, filters) for s in segments)
|
||||
|
||||
def _get_attribute(self, attrs, key):
|
||||
value = attrs.get(key)
|
||||
if value is attributes.ATTR_NOT_SPECIFIED:
|
||||
value = None
|
||||
return value
|
||||
|
||||
def extend_network_dict_provider(self, context, network):
|
||||
# this method is left for backward compat even though it would be
|
||||
# easy to change the callers in tree to use the bulk function
|
||||
return self.extend_networks_dict_provider(context, [network])
|
||||
|
||||
def extend_networks_dict_provider(self, context, networks):
|
||||
ids = [network['id'] for network in networks]
|
||||
net_segments = db.get_networks_segments(context.session, ids)
|
||||
for network in networks:
|
||||
segments = net_segments[network['id']]
|
||||
self._extend_network_dict_provider(network, segments)
|
||||
|
||||
def _extend_network_dict_provider(self, network, segments):
|
||||
if not segments:
|
||||
LOG.debug("Network %s has no segments", network['id'])
|
||||
for attr in provider.ATTRIBUTES:
|
||||
network[attr] = None
|
||||
elif len(segments) > 1:
|
||||
network[mpnet.SEGMENTS] = [
|
||||
{provider.NETWORK_TYPE: segment[api.NETWORK_TYPE],
|
||||
provider.PHYSICAL_NETWORK: segment[api.PHYSICAL_NETWORK],
|
||||
provider.SEGMENTATION_ID: segment[api.SEGMENTATION_ID]}
|
||||
for segment in segments]
|
||||
else:
|
||||
segment = segments[0]
|
||||
network[provider.NETWORK_TYPE] = segment[api.NETWORK_TYPE]
|
||||
network[provider.PHYSICAL_NETWORK] = segment[api.PHYSICAL_NETWORK]
|
||||
network[provider.SEGMENTATION_ID] = segment[api.SEGMENTATION_ID]
|
||||
|
||||
def initialize(self):
|
||||
for network_type, driver in six.iteritems(self.drivers):
|
||||
LOG.info(_LI("Initializing driver for type '%s'"), network_type)
|
||||
driver.obj.initialize()
|
||||
|
||||
def _add_network_segment(self, session, network_id, segment, mtu,
|
||||
segment_index=0):
|
||||
db.add_network_segment(session, network_id, segment, segment_index)
|
||||
if segment.get(api.MTU, 0) > 0:
|
||||
mtu.append(segment[api.MTU])
|
||||
|
||||
def create_network_segments(self, context, network, tenant_id):
|
||||
"""Call type drivers to create network segments."""
|
||||
segments = self._process_provider_create(network)
|
||||
session = context.session
|
||||
mtu = []
|
||||
with session.begin(subtransactions=True):
|
||||
network_id = network['id']
|
||||
if segments:
|
||||
for segment_index, segment in enumerate(segments):
|
||||
segment = self.reserve_provider_segment(
|
||||
session, segment)
|
||||
self._add_network_segment(session, network_id, segment,
|
||||
mtu, segment_index)
|
||||
elif (cfg.CONF.ml2.external_network_type and
|
||||
self._get_attribute(network, external_net.EXTERNAL)):
|
||||
segment = self._allocate_ext_net_segment(session)
|
||||
self._add_network_segment(session, network_id, segment, mtu)
|
||||
else:
|
||||
segment = self._allocate_tenant_net_segment(session)
|
||||
self._add_network_segment(session, network_id, segment, mtu)
|
||||
network[api.MTU] = min(mtu) if mtu else 0
|
||||
|
||||
def is_partial_segment(self, segment):
|
||||
network_type = segment[api.NETWORK_TYPE]
|
||||
driver = self.drivers.get(network_type)
|
||||
if driver:
|
||||
return driver.obj.is_partial_segment(segment)
|
||||
else:
|
||||
msg = _("network_type value '%s' not supported") % network_type
|
||||
raise exc.InvalidInput(error_message=msg)
|
||||
|
||||
def validate_provider_segment(self, segment):
|
||||
network_type = segment[api.NETWORK_TYPE]
|
||||
driver = self.drivers.get(network_type)
|
||||
if driver:
|
||||
driver.obj.validate_provider_segment(segment)
|
||||
else:
|
||||
msg = _("network_type value '%s' not supported") % network_type
|
||||
raise exc.InvalidInput(error_message=msg)
|
||||
|
||||
def reserve_provider_segment(self, session, segment):
|
||||
network_type = segment.get(api.NETWORK_TYPE)
|
||||
driver = self.drivers.get(network_type)
|
||||
return driver.obj.reserve_provider_segment(session, segment)
|
||||
|
||||
def _allocate_segment(self, session, network_type):
|
||||
driver = self.drivers.get(network_type)
|
||||
return driver.obj.allocate_tenant_segment(session)
|
||||
|
||||
def _allocate_tenant_net_segment(self, session):
|
||||
for network_type in self.tenant_network_types:
|
||||
segment = self._allocate_segment(session, network_type)
|
||||
if segment:
|
||||
return segment
|
||||
raise exc.NoNetworkAvailable()
|
||||
|
||||
def _allocate_ext_net_segment(self, session):
|
||||
network_type = cfg.CONF.ml2.external_network_type
|
||||
segment = self._allocate_segment(session, network_type)
|
||||
if segment:
|
||||
return segment
|
||||
raise exc.NoNetworkAvailable()
|
||||
|
||||
def release_network_segments(self, session, network_id):
|
||||
segments = db.get_network_segments(session, network_id,
|
||||
filter_dynamic=None)
|
||||
|
||||
for segment in segments:
|
||||
network_type = segment.get(api.NETWORK_TYPE)
|
||||
driver = self.drivers.get(network_type)
|
||||
if driver:
|
||||
driver.obj.release_segment(session, segment)
|
||||
else:
|
||||
LOG.error(_LE("Failed to release segment '%s' because "
|
||||
"network type is not supported."), segment)
|
||||
|
||||
def allocate_dynamic_segment(self, session, network_id, segment):
|
||||
"""Allocate a dynamic segment using a partial or full segment dict."""
|
||||
dynamic_segment = db.get_dynamic_segment(
|
||||
session, network_id, segment.get(api.PHYSICAL_NETWORK),
|
||||
segment.get(api.SEGMENTATION_ID))
|
||||
|
||||
if dynamic_segment:
|
||||
return dynamic_segment
|
||||
|
||||
driver = self.drivers.get(segment.get(api.NETWORK_TYPE))
|
||||
dynamic_segment = driver.obj.reserve_provider_segment(session, segment)
|
||||
db.add_network_segment(session, network_id, dynamic_segment,
|
||||
is_dynamic=True)
|
||||
return dynamic_segment
|
||||
|
||||
def release_dynamic_segment(self, session, segment_id):
|
||||
"""Delete a dynamic segment."""
|
||||
segment = db.get_segment_by_id(session, segment_id)
|
||||
if segment:
|
||||
driver = self.drivers.get(segment.get(api.NETWORK_TYPE))
|
||||
if driver:
|
||||
driver.obj.release_segment(session, segment)
|
||||
db.delete_network_segment(session, segment_id)
|
||||
else:
|
||||
LOG.error(_LE("Failed to release segment '%s' because "
|
||||
"network type is not supported."), segment)
|
||||
else:
|
||||
LOG.debug("No segment found with id %(segment_id)s", segment_id)
|
||||
|
||||
|
||||
class MechanismManager(stevedore.named.NamedExtensionManager):
|
||||
"""Manage networking mechanisms using drivers."""
|
||||
|
||||
def __init__(self):
|
||||
# Registered mechanism drivers, keyed by name.
|
||||
self.mech_drivers = {}
|
||||
# Ordered list of mechanism drivers, defining
|
||||
# the order in which the drivers are called.
|
||||
self.ordered_mech_drivers = []
|
||||
|
||||
LOG.info(_LI("Configured mechanism driver names: %s"),
|
||||
cfg.CONF.ml2.mechanism_drivers)
|
||||
super(MechanismManager, self).__init__('neutron.ml2.mechanism_drivers',
|
||||
cfg.CONF.ml2.mechanism_drivers,
|
||||
invoke_on_load=True,
|
||||
name_order=True)
|
||||
LOG.info(_LI("Loaded mechanism driver names: %s"), self.names())
|
||||
self._register_mechanisms()
|
||||
|
||||
def _register_mechanisms(self):
|
||||
"""Register all mechanism drivers.
|
||||
|
||||
This method should only be called once in the MechanismManager
|
||||
constructor.
|
||||
"""
|
||||
for ext in self:
|
||||
self.mech_drivers[ext.name] = ext
|
||||
self.ordered_mech_drivers.append(ext)
|
||||
LOG.info(_LI("Registered mechanism drivers: %s"),
|
||||
[driver.name for driver in self.ordered_mech_drivers])
|
||||
|
||||
@property
|
||||
def supported_qos_rule_types(self):
|
||||
if not self.ordered_mech_drivers:
|
||||
return []
|
||||
|
||||
rule_types = set(qos_consts.VALID_RULE_TYPES)
|
||||
binding_driver_found = False
|
||||
|
||||
# Recalculate on every call to allow drivers determine supported rule
|
||||
# types dynamically
|
||||
for driver in self.ordered_mech_drivers:
|
||||
driver_obj = driver.obj
|
||||
if driver_obj._supports_port_binding:
|
||||
binding_driver_found = True
|
||||
if hasattr(driver_obj, 'supported_qos_rule_types'):
|
||||
new_rule_types = \
|
||||
rule_types & set(driver_obj.supported_qos_rule_types)
|
||||
dropped_rule_types = new_rule_types - rule_types
|
||||
if dropped_rule_types:
|
||||
LOG.info(
|
||||
_LI("%(rule_types)s rule types disabled for ml2 "
|
||||
"because %(driver)s does not support them"),
|
||||
{'rule_types': ', '.join(dropped_rule_types),
|
||||
'driver': driver.name})
|
||||
rule_types = new_rule_types
|
||||
else:
|
||||
# at least one of drivers does not support QoS, meaning
|
||||
# there are no rule types supported by all of them
|
||||
LOG.warn(
|
||||
_LW("%s does not support QoS; "
|
||||
"no rule types available"),
|
||||
driver.name)
|
||||
return []
|
||||
|
||||
if binding_driver_found:
|
||||
rule_types = list(rule_types)
|
||||
else:
|
||||
rule_types = []
|
||||
LOG.debug("Supported QoS rule types "
|
||||
"(common subset for all mech drivers): %s", rule_types)
|
||||
return rule_types
|
||||
|
||||
def initialize(self):
|
||||
for driver in self.ordered_mech_drivers:
|
||||
LOG.info(_LI("Initializing mechanism driver '%s'"), driver.name)
|
||||
driver.obj.initialize()
|
||||
|
||||
def _check_vlan_transparency(self, context):
|
||||
"""Helper method for checking vlan transparecncy support.
|
||||
|
||||
:param context: context parameter to pass to each method call
|
||||
:raises: neutron.extensions.vlantransparent.
|
||||
VlanTransparencyDriverError if any mechanism driver doesn't
|
||||
support vlan transparency.
|
||||
"""
|
||||
if context.current['vlan_transparent'] is None:
|
||||
return
|
||||
|
||||
if context.current['vlan_transparent']:
|
||||
for driver in self.ordered_mech_drivers:
|
||||
if not driver.obj.check_vlan_transparency(context):
|
||||
raise vlantransparent.VlanTransparencyDriverError()
|
||||
|
||||
def _call_on_drivers(self, method_name, context,
|
||||
continue_on_failure=False):
|
||||
"""Helper method for calling a method across all mechanism drivers.
|
||||
|
||||
:param method_name: name of the method to call
|
||||
:param context: context parameter to pass to each method call
|
||||
:param continue_on_failure: whether or not to continue to call
|
||||
all mechanism drivers once one has raised an exception
|
||||
:raises: neutron.plugins.ml2.common.MechanismDriverError
|
||||
if any mechanism driver call fails.
|
||||
"""
|
||||
error = False
|
||||
for driver in self.ordered_mech_drivers:
|
||||
try:
|
||||
getattr(driver.obj, method_name)(context)
|
||||
except exceptions.AwsException as aws_exception:
|
||||
LOG.exception(
|
||||
_LE("Mechanism driver '%(name)s' failed in %(method)s"),
|
||||
{'name': driver.name, 'method': method_name}
|
||||
)
|
||||
if not continue_on_failure:
|
||||
raise aws_exception
|
||||
except Exception:
|
||||
LOG.exception(
|
||||
_LE("Mechanism driver '%(name)s' failed in %(method)s"),
|
||||
{'name': driver.name, 'method': method_name}
|
||||
)
|
||||
error = True
|
||||
if not continue_on_failure:
|
||||
break
|
||||
if error:
|
||||
raise ml2_exc.MechanismDriverError(
|
||||
method=method_name
|
||||
)
|
||||
|
||||
def create_network_precommit(self, context):
|
||||
"""Notify all mechanism drivers during network creation.
|
||||
|
||||
:raises: neutron.plugins.ml2.common.MechanismDriverError
|
||||
if any mechanism driver create_network_precommit call fails.
|
||||
|
||||
Called within the database transaction. If a mechanism driver
|
||||
raises an exception, then a MechanismDriverError is propogated
|
||||
to the caller, triggering a rollback. There is no guarantee
|
||||
that all mechanism drivers are called in this case.
|
||||
"""
|
||||
self._check_vlan_transparency(context)
|
||||
self._call_on_drivers("create_network_precommit", context)
|
||||
|
||||
def create_network_postcommit(self, context):
|
||||
"""Notify all mechanism drivers after network creation.
|
||||
|
||||
:raises: neutron.plugins.ml2.common.MechanismDriverError
|
||||
if any mechanism driver create_network_postcommit call fails.
|
||||
|
||||
Called after the database transaction. If a mechanism driver
|
||||
raises an exception, then a MechanismDriverError is propagated
|
||||
to the caller, where the network will be deleted, triggering
|
||||
any required cleanup. There is no guarantee that all mechanism
|
||||
drivers are called in this case.
|
||||
"""
|
||||
self._call_on_drivers("create_network_postcommit", context)
|
||||
|
||||
def update_network_precommit(self, context):
|
||||
"""Notify all mechanism drivers during network update.
|
||||
|
||||
:raises: neutron.plugins.ml2.common.MechanismDriverError
|
||||
if any mechanism driver update_network_precommit call fails.
|
||||
|
||||
Called within the database transaction. If a mechanism driver
|
||||
raises an exception, then a MechanismDriverError is propogated
|
||||
to the caller, triggering a rollback. There is no guarantee
|
||||
that all mechanism drivers are called in this case.
|
||||
"""
|
||||
self._call_on_drivers("update_network_precommit", context)
|
||||
|
||||
def update_network_postcommit(self, context):
|
||||
"""Notify all mechanism drivers after network update.
|
||||
|
||||
:raises: neutron.plugins.ml2.common.MechanismDriverError
|
||||
if any mechanism driver update_network_postcommit call fails.
|
||||
|
||||
Called after the database transaction. If any mechanism driver
|
||||
raises an error, then the error is logged but we continue to
|
||||
call every other mechanism driver. A MechanismDriverError is
|
||||
then reraised at the end to notify the caller of a failure.
|
||||
"""
|
||||
self._call_on_drivers("update_network_postcommit", context,
|
||||
continue_on_failure=True)
|
||||
|
||||
def delete_network_precommit(self, context):
|
||||
"""Notify all mechanism drivers during network deletion.
|
||||
|
||||
:raises: neutron.plugins.ml2.common.MechanismDriverError
|
||||
if any mechanism driver delete_network_precommit call fails.
|
||||
|
||||
Called within the database transaction. If a mechanism driver
|
||||
raises an exception, then a MechanismDriverError is propogated
|
||||
to the caller, triggering a rollback. There is no guarantee
|
||||
that all mechanism drivers are called in this case.
|
||||
"""
|
||||
self._call_on_drivers("delete_network_precommit", context)
|
||||
|
||||
def delete_network_postcommit(self, context):
|
||||
"""Notify all mechanism drivers after network deletion.
|
||||
|
||||
:raises: neutron.plugins.ml2.common.MechanismDriverError
|
||||
if any mechanism driver delete_network_postcommit call fails.
|
||||
|
||||
Called after the database transaction. If any mechanism driver
|
||||
raises an error, then the error is logged but we continue to
|
||||
call every other mechanism driver. A MechanismDriverError is
|
||||
then reraised at the end to notify the caller of a failure. In
|
||||
general we expect the caller to ignore the error, as the
|
||||
network resource has already been deleted from the database
|
||||
and it doesn't make sense to undo the action by recreating the
|
||||
network.
|
||||
"""
|
||||
self._call_on_drivers("delete_network_postcommit", context,
|
||||
continue_on_failure=True)
|
||||
|
||||
def create_subnet_precommit(self, context):
|
||||
"""Notify all mechanism drivers during subnet creation.
|
||||
|
||||
:raises: neutron.plugins.ml2.common.MechanismDriverError
|
||||
if any mechanism driver create_subnet_precommit call fails.
|
||||
|
||||
Called within the database transaction. If a mechanism driver
|
||||
raises an exception, then a MechanismDriverError is propogated
|
||||
to the caller, triggering a rollback. There is no guarantee
|
||||
that all mechanism drivers are called in this case.
|
||||
"""
|
||||
self._call_on_drivers("create_subnet_precommit", context)
|
||||
|
||||
def create_subnet_postcommit(self, context):
|
||||
"""Notify all mechanism drivers after subnet creation.
|
||||
|
||||
:raises: neutron.plugins.ml2.common.MechanismDriverError
|
||||
if any mechanism driver create_subnet_postcommit call fails.
|
||||
|
||||
Called after the database transaction. If a mechanism driver
|
||||
raises an exception, then a MechanismDriverError is propagated
|
||||
to the caller, where the subnet will be deleted, triggering
|
||||
any required cleanup. There is no guarantee that all mechanism
|
||||
drivers are called in this case.
|
||||
"""
|
||||
self._call_on_drivers("create_subnet_postcommit", context)
|
||||
|
||||
def update_subnet_precommit(self, context):
|
||||
"""Notify all mechanism drivers during subnet update.
|
||||
|
||||
:raises: neutron.plugins.ml2.common.MechanismDriverError
|
||||
if any mechanism driver update_subnet_precommit call fails.
|
||||
|
||||
Called within the database transaction. If a mechanism driver
|
||||
raises an exception, then a MechanismDriverError is propogated
|
||||
to the caller, triggering a rollback. There is no guarantee
|
||||
that all mechanism drivers are called in this case.
|
||||
"""
|
||||
self._call_on_drivers("update_subnet_precommit", context)
|
||||
|
||||
def update_subnet_postcommit(self, context):
|
||||
"""Notify all mechanism drivers after subnet update.
|
||||
|
||||
:raises: neutron.plugins.ml2.common.MechanismDriverError
|
||||
if any mechanism driver update_subnet_postcommit call fails.
|
||||
|
||||
Called after the database transaction. If any mechanism driver
|
||||
raises an error, then the error is logged but we continue to
|
||||
call every other mechanism driver. A MechanismDriverError is
|
||||
then reraised at the end to notify the caller of a failure.
|
||||
"""
|
||||
self._call_on_drivers("update_subnet_postcommit", context,
|
||||
continue_on_failure=True)
|
||||
|
||||
def delete_subnet_precommit(self, context):
|
||||
"""Notify all mechanism drivers during subnet deletion.
|
||||
|
||||
:raises: neutron.plugins.ml2.common.MechanismDriverError
|
||||
if any mechanism driver delete_subnet_precommit call fails.
|
||||
|
||||
Called within the database transaction. If a mechanism driver
|
||||
raises an exception, then a MechanismDriverError is propogated
|
||||
to the caller, triggering a rollback. There is no guarantee
|
||||
that all mechanism drivers are called in this case.
|
||||
"""
|
||||
self._call_on_drivers("delete_subnet_precommit", context)
|
||||
|
||||
def delete_subnet_postcommit(self, context):
|
||||
"""Notify all mechanism drivers after subnet deletion.
|
||||
|
||||
:raises: neutron.plugins.ml2.common.MechanismDriverError
|
||||
if any mechanism driver delete_subnet_postcommit call fails.
|
||||
|
||||
Called after the database transaction. If any mechanism driver
|
||||
raises an error, then the error is logged but we continue to
|
||||
call every other mechanism driver. A MechanismDriverError is
|
||||
then reraised at the end to notify the caller of a failure. In
|
||||
general we expect the caller to ignore the error, as the
|
||||
subnet resource has already been deleted from the database
|
||||
and it doesn't make sense to undo the action by recreating the
|
||||
subnet.
|
||||
"""
|
||||
self._call_on_drivers("delete_subnet_postcommit", context,
|
||||
continue_on_failure=True)
|
||||
|
||||
def create_port_precommit(self, context):
|
||||
"""Notify all mechanism drivers during port creation.
|
||||
|
||||
:raises: neutron.plugins.ml2.common.MechanismDriverError
|
||||
if any mechanism driver create_port_precommit call fails.
|
||||
|
||||
Called within the database transaction. If a mechanism driver
|
||||
raises an exception, then a MechanismDriverError is propogated
|
||||
to the caller, triggering a rollback. There is no guarantee
|
||||
that all mechanism drivers are called in this case.
|
||||
"""
|
||||
self._call_on_drivers("create_port_precommit", context)
|
||||
|
||||
def create_port_postcommit(self, context):
|
||||
"""Notify all mechanism drivers of port creation.
|
||||
|
||||
:raises: neutron.plugins.ml2.common.MechanismDriverError
|
||||
if any mechanism driver create_port_postcommit call fails.
|
||||
|
||||
Called after the database transaction. Errors raised by
|
||||
mechanism drivers are left to propagate to the caller, where
|
||||
the port will be deleted, triggering any required
|
||||
cleanup. There is no guarantee that all mechanism drivers are
|
||||
called in this case.
|
||||
"""
|
||||
self._call_on_drivers("create_port_postcommit", context)
|
||||
|
||||
def update_port_precommit(self, context):
|
||||
"""Notify all mechanism drivers during port update.
|
||||
|
||||
:raises: neutron.plugins.ml2.common.MechanismDriverError
|
||||
if any mechanism driver update_port_precommit call fails.
|
||||
|
||||
Called within the database transaction. If a mechanism driver
|
||||
raises an exception, then a MechanismDriverError is propogated
|
||||
to the caller, triggering a rollback. There is no guarantee
|
||||
that all mechanism drivers are called in this case.
|
||||
"""
|
||||
self._call_on_drivers("update_port_precommit", context)
|
||||
|
||||
def update_port_postcommit(self, context):
|
||||
"""Notify all mechanism drivers after port update.
|
||||
|
||||
:raises: neutron.plugins.ml2.common.MechanismDriverError
|
||||
if any mechanism driver update_port_postcommit call fails.
|
||||
|
||||
Called after the database transaction. If any mechanism driver
|
||||
raises an error, then the error is logged but we continue to
|
||||
call every other mechanism driver. A MechanismDriverError is
|
||||
then reraised at the end to notify the caller of a failure.
|
||||
"""
|
||||
self._call_on_drivers("update_port_postcommit", context,
|
||||
continue_on_failure=True)
|
||||
|
||||
def delete_port_precommit(self, context):
|
||||
"""Notify all mechanism drivers during port deletion.
|
||||
|
||||
:raises: neutron.plugins.ml2.common.MechanismDriverError
|
||||
if any mechanism driver delete_port_precommit call fails.
|
||||
|
||||
Called within the database transaction. If a mechanism driver
|
||||
raises an exception, then a MechanismDriverError is propogated
|
||||
to the caller, triggering a rollback. There is no guarantee
|
||||
that all mechanism drivers are called in this case.
|
||||
"""
|
||||
self._call_on_drivers("delete_port_precommit", context)
|
||||
|
||||
def delete_port_postcommit(self, context):
|
||||
"""Notify all mechanism drivers after port deletion.
|
||||
|
||||
:raises: neutron.plugins.ml2.common.MechanismDriverError
|
||||
if any mechanism driver delete_port_postcommit call fails.
|
||||
|
||||
Called after the database transaction. If any mechanism driver
|
||||
raises an error, then the error is logged but we continue to
|
||||
call every other mechanism driver. A MechanismDriverError is
|
||||
then reraised at the end to notify the caller of a failure. In
|
||||
general we expect the caller to ignore the error, as the
|
||||
port resource has already been deleted from the database
|
||||
and it doesn't make sense to undo the action by recreating the
|
||||
port.
|
||||
"""
|
||||
self._call_on_drivers("delete_port_postcommit", context,
|
||||
continue_on_failure=True)
|
||||
|
||||
def bind_port(self, context):
|
||||
"""Attempt to bind a port using registered mechanism drivers.
|
||||
|
||||
:param context: PortContext instance describing the port
|
||||
|
||||
Called outside any transaction to attempt to establish a port
|
||||
binding.
|
||||
"""
|
||||
binding = context._binding
|
||||
LOG.debug("Attempting to bind port %(port)s on host %(host)s "
|
||||
"for vnic_type %(vnic_type)s with profile %(profile)s",
|
||||
{'port': context.current['id'],
|
||||
'host': context.host,
|
||||
'vnic_type': binding.vnic_type,
|
||||
'profile': binding.profile})
|
||||
context._clear_binding_levels()
|
||||
if not self._bind_port_level(context, 0,
|
||||
context.network.network_segments):
|
||||
binding.vif_type = portbindings.VIF_TYPE_BINDING_FAILED
|
||||
LOG.error(_LE("Failed to bind port %(port)s on host %(host)s"),
|
||||
{'port': context.current['id'],
|
||||
'host': context.host})
|
||||
|
||||
def _bind_port_level(self, context, level, segments_to_bind):
|
||||
binding = context._binding
|
||||
port_id = context.current['id']
|
||||
LOG.debug("Attempting to bind port %(port)s on host %(host)s "
|
||||
"at level %(level)s using segments %(segments)s",
|
||||
{'port': port_id,
|
||||
'host': context.host,
|
||||
'level': level,
|
||||
'segments': segments_to_bind})
|
||||
|
||||
if level == MAX_BINDING_LEVELS:
|
||||
LOG.error(_LE("Exceeded maximum binding levels attempting to bind "
|
||||
"port %(port)s on host %(host)s"),
|
||||
{'port': context.current['id'],
|
||||
'host': context.host})
|
||||
return False
|
||||
|
||||
for driver in self.ordered_mech_drivers:
|
||||
if not self._check_driver_to_bind(driver, segments_to_bind,
|
||||
context._binding_levels):
|
||||
continue
|
||||
try:
|
||||
context._prepare_to_bind(segments_to_bind)
|
||||
driver.obj.bind_port(context)
|
||||
segment = context._new_bound_segment
|
||||
if segment:
|
||||
context._push_binding_level(
|
||||
models.PortBindingLevel(port_id=port_id,
|
||||
host=context.host,
|
||||
level=level,
|
||||
driver=driver.name,
|
||||
segment_id=segment))
|
||||
next_segments = context._next_segments_to_bind
|
||||
if next_segments:
|
||||
# Continue binding another level.
|
||||
if self._bind_port_level(context, level + 1,
|
||||
next_segments):
|
||||
return True
|
||||
else:
|
||||
context._pop_binding_level()
|
||||
else:
|
||||
# Binding complete.
|
||||
LOG.debug("Bound port: %(port)s, "
|
||||
"host: %(host)s, "
|
||||
"vif_type: %(vif_type)s, "
|
||||
"vif_details: %(vif_details)s, "
|
||||
"binding_levels: %(binding_levels)s",
|
||||
{'port': port_id,
|
||||
'host': context.host,
|
||||
'vif_type': binding.vif_type,
|
||||
'vif_details': binding.vif_details,
|
||||
'binding_levels': context.binding_levels})
|
||||
return True
|
||||
except Exception:
|
||||
LOG.exception(_LE("Mechanism driver %s failed in "
|
||||
"bind_port"),
|
||||
driver.name)
|
||||
LOG.error(_LE("Failed to bind port %(port)s on host %(host)s"),
|
||||
{'port': context.current['id'],
|
||||
'host': binding.host})
|
||||
|
||||
def _check_driver_to_bind(self, driver, segments_to_bind, binding_levels):
|
||||
# To prevent a possible binding loop, don't try to bind with
|
||||
# this driver if the same driver has already bound at a higher
|
||||
# level to one of the segments we are currently trying to
|
||||
# bind. Note that is is OK for the same driver to bind at
|
||||
# multiple levels using different segments.
|
||||
for level in binding_levels:
|
||||
if (level.driver == driver and
|
||||
level.segment_id in segments_to_bind):
|
||||
return False
|
||||
return True
|
||||
|
||||
def get_workers(self):
|
||||
workers = []
|
||||
for driver in self.ordered_mech_drivers:
|
||||
workers += driver.obj.get_workers()
|
||||
return workers
|
||||
|
||||
|
||||
class ExtensionManager(stevedore.named.NamedExtensionManager):
|
||||
"""Manage extension drivers using drivers."""
|
||||
|
||||
def __init__(self):
|
||||
# Ordered list of extension drivers, defining
|
||||
# the order in which the drivers are called.
|
||||
self.ordered_ext_drivers = []
|
||||
|
||||
LOG.info(_LI("Configured extension driver names: %s"),
|
||||
cfg.CONF.ml2.extension_drivers)
|
||||
super(ExtensionManager, self).__init__('neutron.ml2.extension_drivers',
|
||||
cfg.CONF.ml2.extension_drivers,
|
||||
invoke_on_load=True,
|
||||
name_order=True)
|
||||
LOG.info(_LI("Loaded extension driver names: %s"), self.names())
|
||||
self._register_drivers()
|
||||
|
||||
def _register_drivers(self):
|
||||
"""Register all extension drivers.
|
||||
|
||||
This method should only be called once in the ExtensionManager
|
||||
constructor.
|
||||
"""
|
||||
for ext in self:
|
||||
self.ordered_ext_drivers.append(ext)
|
||||
LOG.info(_LI("Registered extension drivers: %s"),
|
||||
[driver.name for driver in self.ordered_ext_drivers])
|
||||
|
||||
def initialize(self):
|
||||
# Initialize each driver in the list.
|
||||
for driver in self.ordered_ext_drivers:
|
||||
LOG.info(_LI("Initializing extension driver '%s'"), driver.name)
|
||||
driver.obj.initialize()
|
||||
|
||||
def extension_aliases(self):
|
||||
exts = []
|
||||
for driver in self.ordered_ext_drivers:
|
||||
alias = driver.obj.extension_alias
|
||||
if alias:
|
||||
exts.append(alias)
|
||||
LOG.info(_LI("Got %(alias)s extension from driver '%(drv)s'"),
|
||||
{'alias': alias, 'drv': driver.name})
|
||||
return exts
|
||||
|
||||
def _call_on_ext_drivers(self, method_name, plugin_context, data, result):
|
||||
"""Helper method for calling a method across all extension drivers."""
|
||||
for driver in self.ordered_ext_drivers:
|
||||
try:
|
||||
getattr(driver.obj, method_name)(plugin_context, data, result)
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.info(_LI("Extension driver '%(name)s' failed in "
|
||||
"%(method)s"),
|
||||
{'name': driver.name, 'method': method_name})
|
||||
|
||||
def process_create_network(self, plugin_context, data, result):
|
||||
"""Notify all extension drivers during network creation."""
|
||||
self._call_on_ext_drivers("process_create_network", plugin_context,
|
||||
data, result)
|
||||
|
||||
def process_update_network(self, plugin_context, data, result):
|
||||
"""Notify all extension drivers during network update."""
|
||||
self._call_on_ext_drivers("process_update_network", plugin_context,
|
||||
data, result)
|
||||
|
||||
def process_create_subnet(self, plugin_context, data, result):
|
||||
"""Notify all extension drivers during subnet creation."""
|
||||
self._call_on_ext_drivers("process_create_subnet", plugin_context,
|
||||
data, result)
|
||||
|
||||
def process_update_subnet(self, plugin_context, data, result):
|
||||
"""Notify all extension drivers during subnet update."""
|
||||
self._call_on_ext_drivers("process_update_subnet", plugin_context,
|
||||
data, result)
|
||||
|
||||
def process_create_port(self, plugin_context, data, result):
|
||||
"""Notify all extension drivers during port creation."""
|
||||
self._call_on_ext_drivers("process_create_port", plugin_context,
|
||||
data, result)
|
||||
|
||||
def process_update_port(self, plugin_context, data, result):
|
||||
"""Notify all extension drivers during port update."""
|
||||
self._call_on_ext_drivers("process_update_port", plugin_context,
|
||||
data, result)
|
||||
|
||||
def _call_on_dict_driver(self, method_name, session, base_model, result):
|
||||
for driver in self.ordered_ext_drivers:
|
||||
try:
|
||||
getattr(driver.obj, method_name)(session, base_model, result)
|
||||
except Exception:
|
||||
LOG.error(_LE("Extension driver '%(name)s' failed in "
|
||||
"%(method)s"),
|
||||
{'name': driver.name, 'method': method_name})
|
||||
raise ml2_exc.ExtensionDriverError(driver=driver.name)
|
||||
|
||||
def extend_network_dict(self, session, base_model, result):
|
||||
"""Notify all extension drivers to extend network dictionary."""
|
||||
self._call_on_dict_driver("extend_network_dict", session, base_model,
|
||||
result)
|
||||
|
||||
def extend_subnet_dict(self, session, base_model, result):
|
||||
"""Notify all extension drivers to extend subnet dictionary."""
|
||||
self._call_on_dict_driver("extend_subnet_dict", session, base_model,
|
||||
result)
|
||||
|
||||
def extend_port_dict(self, session, base_model, result):
|
||||
"""Notify all extension drivers to extend port dictionary."""
|
||||
self._call_on_dict_driver("extend_port_dict", session, base_model,
|
||||
result)
|
216
neutron/neutron/services/l3_router/aws_router_plugin.py
Normal file
216
neutron/neutron/services/l3_router/aws_router_plugin.py
Normal file
@ -0,0 +1,216 @@
|
||||
# Copyright 2016 Platform9 Systems Inc.(http://www.platform9.com)
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from neutron.common import constants as n_const
|
||||
from neutron.db import common_db_mixin
|
||||
from neutron.db import extraroute_db
|
||||
from neutron.db import l3_db
|
||||
from neutron.db import l3_dvrscheduler_db
|
||||
from neutron.db import l3_gwmode_db
|
||||
from neutron.db import l3_hamode_db
|
||||
from neutron.db import l3_hascheduler_db
|
||||
from neutron.plugins.common import constants
|
||||
from neutron.quota import resource_registry
|
||||
from neutron.services import service_base
|
||||
from oslo_log import log as logging
|
||||
from neutron.common.aws_utils import AwsUtils
|
||||
from neutron.common import exceptions
|
||||
from neutron.db import securitygroups_db
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
class AwsRouterPlugin(service_base.ServicePluginBase,
|
||||
common_db_mixin.CommonDbMixin,
|
||||
extraroute_db.ExtraRoute_db_mixin,
|
||||
l3_hamode_db.L3_HA_NAT_db_mixin,
|
||||
l3_gwmode_db.L3_NAT_db_mixin,
|
||||
l3_dvrscheduler_db.L3_DVRsch_db_mixin,
|
||||
l3_hascheduler_db.L3_HA_scheduler_db_mixin):
|
||||
"""Implementation of the Neutron L3 Router Service Plugin.
|
||||
|
||||
This class implements a L3 service plugin that provides
|
||||
router and floatingip resources and manages associated
|
||||
request/response.
|
||||
All DB related work is implemented in classes
|
||||
l3_db.L3_NAT_db_mixin, l3_hamode_db.L3_HA_NAT_db_mixin,
|
||||
l3_dvr_db.L3_NAT_with_dvr_db_mixin, and extraroute_db.ExtraRoute_db_mixin.
|
||||
"""
|
||||
supported_extension_aliases = ["dvr", "router", "ext-gw-mode",
|
||||
"extraroute", "l3_agent_scheduler",
|
||||
"l3-ha", "security-group"]
|
||||
|
||||
@resource_registry.tracked_resources(router=l3_db.Router,
|
||||
floatingip=l3_db.FloatingIP,
|
||||
security_group=securitygroups_db.SecurityGroup)
|
||||
def __init__(self):
|
||||
self.aws_utils = AwsUtils()
|
||||
super(AwsRouterPlugin, self).__init__()
|
||||
l3_db.subscribe()
|
||||
|
||||
def get_plugin_type(self):
|
||||
return constants.L3_ROUTER_NAT
|
||||
|
||||
def get_plugin_description(self):
|
||||
"""returns string description of the plugin."""
|
||||
return ("AWS L3 Router Service Plugin for basic L3 forwarding"
|
||||
" between (L2) Neutron networks and access to external"
|
||||
" networks via a NAT gateway.")
|
||||
|
||||
########## FLOATING IP FEATURES ###############
|
||||
|
||||
def create_floatingip(self, context, floatingip):
|
||||
try:
|
||||
response = self.aws_utils.allocate_elastic_ip()
|
||||
public_ip_allocated = response['PublicIp']
|
||||
LOG.info("Created elastic IP %s" % public_ip_allocated)
|
||||
if 'floatingip' in floatingip:
|
||||
floatingip['floatingip']['floating_ip_address'] = public_ip_allocated
|
||||
|
||||
if 'port_id' in floatingip['floatingip'] and floatingip['floatingip']['port_id'] is not None:
|
||||
# Associate to a Port
|
||||
port_id = floatingip['floatingip']['port_id']
|
||||
self._associate_floatingip_to_port(context, public_ip_allocated, port_id)
|
||||
except Exception as e:
|
||||
LOG.error("Error in Allocating EIP: %s " % e)
|
||||
raise e
|
||||
|
||||
return super(AwsRouterPlugin, self).create_floatingip(
|
||||
context, floatingip,
|
||||
initial_status=n_const.FLOATINGIP_STATUS_DOWN)
|
||||
|
||||
def _associate_floatingip_to_port(self, context, floating_ip_address, port_id):
|
||||
port = self._core_plugin.get_port(context, port_id)
|
||||
ec2_id = None
|
||||
fixed_ip_address = None
|
||||
# TODO: Assuming that there is only one fixed IP
|
||||
if len(port['fixed_ips']) > 0:
|
||||
fixed_ip = port['fixed_ips'][0]
|
||||
if 'ip_address' in fixed_ip:
|
||||
fixed_ip_address = fixed_ip['ip_address']
|
||||
search_opts = {'ip': fixed_ip_address, 'tenant_id': context.tenant_id}
|
||||
server_list = self.aws_utils.get_nova_client().servers.list(search_opts=search_opts)
|
||||
if len(server_list) > 0:
|
||||
server = server_list[0]
|
||||
if 'ec2_id' in server.metadata:
|
||||
ec2_id = server.metadata['ec2_id']
|
||||
if floating_ip_address is not None and ec2_id is not None:
|
||||
self.aws_utils.associate_elastic_ip_to_ec2_instance(floating_ip_address, ec2_id)
|
||||
LOG.info("EC2 ID found for IP %s : %s" % (fixed_ip_address, ec2_id))
|
||||
else:
|
||||
LOG.warning("EC2 ID not found to associate the floating IP")
|
||||
raise exceptions.AwsException(error_code="No Server Found",
|
||||
message="No server found with the Required IP")
|
||||
|
||||
def update_floatingip(self, context, id, floatingip):
|
||||
floating_ip_dict = super(AwsRouterPlugin, self).get_floatingip(context, id)
|
||||
if 'floatingip' in floatingip and 'port_id' in floatingip['floatingip']:
|
||||
port_id = floatingip['floatingip']['port_id']
|
||||
if port_id is not None:
|
||||
# Associate Floating IP
|
||||
LOG.info("Associating elastic IP %s with port %s" %
|
||||
(floating_ip_dict['floating_ip_address'], port_id))
|
||||
self._associate_floatingip_to_port(context,
|
||||
floating_ip_dict['floating_ip_address'], port_id)
|
||||
else:
|
||||
# Port Disassociate
|
||||
self.aws_utils.disassociate_elastic_ip_from_ec2_instance(floating_ip_dict['floating_ip_address'])
|
||||
return super(AwsRouterPlugin, self).update_floatingip(context, id, floatingip)
|
||||
|
||||
def delete_floatingip(self, context, id):
|
||||
floating_ip = super(AwsRouterPlugin, self).get_floatingip(context, id)
|
||||
floating_ip_address = floating_ip['floating_ip_address']
|
||||
LOG.info("Deleting elastic IP %s" % floating_ip_address)
|
||||
self.aws_utils.delete_elastic_ip(floating_ip_address)
|
||||
return super(AwsRouterPlugin, self).delete_floatingip(context, id)
|
||||
|
||||
##### ROUTERS #####
|
||||
|
||||
def create_router(self, context, router):
|
||||
try:
|
||||
router_name = router['router']['name']
|
||||
internet_gw_res = self.aws_utils.create_internet_gateway_resource()
|
||||
ret_obj = super(AwsRouterPlugin, self).create_router(context, router)
|
||||
internet_gw_res.create_tags(Tags=[
|
||||
{'Key': 'Name', 'Value': router_name},
|
||||
{'Key': 'openstack_router_id', 'Value': ret_obj['id']}
|
||||
])
|
||||
LOG.info("Created AWS router %s with openstack id %s" %
|
||||
(router_name, ret_obj['id']))
|
||||
return ret_obj
|
||||
except Exception as e:
|
||||
LOG.error("Error while creating router %s" % e)
|
||||
raise e
|
||||
|
||||
def delete_router(self, context, id):
|
||||
try:
|
||||
LOG.info("Deleting router %s" % id)
|
||||
self.aws_utils.detach_internet_gateway_by_router_id(id)
|
||||
self.aws_utils.delete_internet_gateway_by_router_id(id)
|
||||
except Exception as e:
|
||||
LOG.error("Error in Deleting Router: %s " % e)
|
||||
raise e
|
||||
return super(AwsRouterPlugin, self).delete_router(context, id)
|
||||
|
||||
def update_router(self, context, id, router):
|
||||
## get internet gateway resource by openstack router id and update the tags
|
||||
try:
|
||||
if 'router' in router and 'name' in router['router']:
|
||||
router_name = router['router']['name']
|
||||
tags_list = [
|
||||
{'Key': 'Name', 'Value': router_name},
|
||||
{'Key': 'openstack_router_id', 'Value': id}
|
||||
]
|
||||
LOG.info("Updated router %s" % id)
|
||||
self.aws_utils.create_tags_internet_gw_from_router_id(id, tags_list)
|
||||
except Exception as e:
|
||||
LOG.error("Error in Updating Router: %s " % e)
|
||||
raise e
|
||||
return super(AwsRouterPlugin, self).update_router(context, id, router)
|
||||
|
||||
###### ROUTER INTERFACE ######
|
||||
|
||||
def add_router_interface(self, context, router_id, interface_info):
|
||||
subnet_id = interface_info['subnet_id']
|
||||
subnet_obj = self._core_plugin.get_subnet(context, subnet_id)
|
||||
LOG.info("Adding subnet %s to router %s" % (subnet_id, router_id))
|
||||
neutron_network_id = subnet_obj['network_id']
|
||||
try:
|
||||
# Get Internet Gateway ID
|
||||
ig_id = self.aws_utils.get_internet_gw_from_router_id(router_id)
|
||||
# Get VPC ID
|
||||
vpc_id = self.aws_utils.get_vpc_from_neutron_network_id(neutron_network_id)
|
||||
self.aws_utils.attach_internet_gateway(ig_id, vpc_id)
|
||||
# Search for a Route table tagged with Router-id
|
||||
route_tables = self.aws_utils.get_route_table_by_router_id(router_id)
|
||||
if len(route_tables) == 0:
|
||||
# If not tagged, Fetch all the Route Tables Select one and tag it
|
||||
route_tables = self.aws_utils.describe_route_tables_by_vpc_id(vpc_id)
|
||||
if len(route_tables) > 0:
|
||||
route_table = route_tables[0]
|
||||
route_table_res = self.aws_utils._get_ec2_resource().RouteTable(route_table['RouteTableId'])
|
||||
route_table_res.create_tags(Tags=[
|
||||
{'Key': 'openstack_router_id', 'Value': router_id}
|
||||
])
|
||||
if len(route_tables) > 0:
|
||||
route_table = route_tables[0]
|
||||
self.aws_utils.create_default_route_to_ig(route_table['RouteTableId'], ig_id, ignore_errors=True)
|
||||
except Exception as e:
|
||||
LOG.error("Error in Creating Interface: %s " % e)
|
||||
raise e
|
||||
return super(AwsRouterPlugin, self).add_router_interface(context, router_id, interface_info)
|
||||
|
||||
def remove_router_interface(self, context, router_id, interface_info):
|
||||
LOG.info("Deleting subnet %s from router %s" % (interface_info['subnet_id'], router_id))
|
||||
# TODO: Need to delete the route entry in the Route Table of AWS
|
||||
return super(AwsRouterPlugin, self).remove_router_interface(context, router_id, interface_info)
|
104
neutron/requirements.txt
Normal file
104
neutron/requirements.txt
Normal file
@ -0,0 +1,104 @@
|
||||
# The order of packages is significant, because pip processes them in the order
|
||||
# of appearance. Changing the order has an impact on the overall integration
|
||||
# process, which may cause wedges in the gate later.
|
||||
pbr==1.8.1
|
||||
|
||||
Paste==2.0.2
|
||||
PasteDeploy==1.5.2
|
||||
Routes==2.2;python_version=='2.7'
|
||||
Routes!=2.0,>=1.12.3;python_version!='2.7'
|
||||
debtcollector==1.3.0 # Apache-2.0
|
||||
eventlet==0.18.4
|
||||
pecan==1.0.4
|
||||
greenlet==0.4.9
|
||||
httplib2==0.9.2
|
||||
requests==2.9.1
|
||||
Jinja2==2.8 # BSD License (3 clause)
|
||||
keystonemiddleware==4.3.0
|
||||
netaddr==0.7.18
|
||||
python-neutronclient==4.0.0
|
||||
retrying==1.3.3 # Apache-2.0
|
||||
ryu==3.30 # Apache-2.0
|
||||
SQLAlchemy==1.0.12
|
||||
WebOb==1.5.1
|
||||
python-keystoneclient==2.2.0
|
||||
alembic==0.8.4
|
||||
six==1.10.0
|
||||
stevedore==1.11.0 # Apache-2.0
|
||||
oslo.vmware==2.1.0
|
||||
oslo.concurrency==2.6.1 # Apache-2.0
|
||||
oslo.config==3.7.0 # Apache-2.0
|
||||
oslo.context==2.0.0 # Apache-2.0
|
||||
oslo.db==4.5.0 # Apache-2.0
|
||||
oslo.i18n==3.3.0 # Apache-2.0
|
||||
oslo.log==3.0.0 # Apache-2.0
|
||||
oslo.messaging==4.3.0
|
||||
oslo.middleware==3.6.0
|
||||
oslo.policy==1.4.0 # Apache-2.0
|
||||
oslo.rootwrap==4.0.0 # Apache-2.0
|
||||
oslo.serialization==2.3.0 # Apache-2.0
|
||||
oslo.service==1.5.0 # Apache-2.0
|
||||
oslo.utils==3.6.0 # Apache-2.0
|
||||
oslo.versionedobjects==1.6.0
|
||||
|
||||
python-novaclient==3.2.0
|
||||
boto3==1.3.1
|
||||
|
||||
# Windows-only requirements
|
||||
pywin32;sys_platform=='win32'
|
||||
wmi;sys_platform=='win32'
|
||||
|
||||
## The following requirements were added by pip freeze:
|
||||
aioeventlet==0.5.1
|
||||
amqp==1.4.9
|
||||
anyjson==0.3.3
|
||||
appdirs==1.4.0
|
||||
Babel==2.2.0
|
||||
beautifulsoup4==4.4.1
|
||||
cachetools==1.1.5
|
||||
cliff==2.0.0
|
||||
cmd2==0.6.8
|
||||
contextlib2==0.5.1
|
||||
decorator==4.0.9
|
||||
enum34==1.1.2
|
||||
fasteners==0.14.1
|
||||
funcsigs==0.4
|
||||
futures==3.0.5
|
||||
futurist==0.13.0
|
||||
iso8601==0.1.11
|
||||
keystoneauth1==2.3.0
|
||||
kombu==3.0.35
|
||||
logutils==0.3.3
|
||||
Mako==1.0.4
|
||||
MarkupSafe==0.23
|
||||
monotonic==1.0
|
||||
msgpack-python==0.4.7
|
||||
MySQL-python==1.2.5
|
||||
netifaces==0.10.4
|
||||
os-client-config==1.16.0
|
||||
pika==0.10.0
|
||||
pika-pool==0.1.3
|
||||
positional==1.0.1
|
||||
prettytable==0.7.2
|
||||
pycadf==2.1.0
|
||||
pyinotify==0.9.6
|
||||
pyparsing==2.1.1
|
||||
python-dateutil==2.5.1
|
||||
python-editor==0.5
|
||||
pytz==2016.2
|
||||
PyYAML==3.11
|
||||
repoze.lru==0.6
|
||||
requestsexceptions==1.1.3
|
||||
simplejson==3.8.2
|
||||
singledispatch==3.4.0.3
|
||||
sqlalchemy-migrate==0.10.0
|
||||
sqlparse==0.1.19
|
||||
Tempita==0.5.2
|
||||
trollius==2.1
|
||||
unicodecsv==0.14.1
|
||||
waitress==0.8.10
|
||||
WebTest==2.0.20
|
||||
wrapt==1.10.6
|
||||
|
||||
# for ACI
|
||||
cryptography==1.1.2
|
208
neutron/setup.cfg
Normal file
208
neutron/setup.cfg
Normal file
@ -0,0 +1,208 @@
|
||||
[metadata]
|
||||
name = neutron
|
||||
summary = OpenStack Networking
|
||||
description-file =
|
||||
README.rst
|
||||
author = OpenStack
|
||||
author-email = openstack-dev@lists.openstack.org
|
||||
home-page = http://www.openstack.org/
|
||||
classifier =
|
||||
Environment :: OpenStack
|
||||
Intended Audience :: Information Technology
|
||||
Intended Audience :: System Administrators
|
||||
License :: OSI Approved :: Apache Software License
|
||||
Operating System :: POSIX :: Linux
|
||||
Programming Language :: Python
|
||||
Programming Language :: Python :: 2
|
||||
Programming Language :: Python :: 2.7
|
||||
|
||||
[files]
|
||||
packages =
|
||||
neutron
|
||||
data_files =
|
||||
etc/neutron =
|
||||
etc/api-paste.ini
|
||||
etc/dhcp_agent.ini
|
||||
etc/l3_agent.ini
|
||||
etc/metadata_agent.ini
|
||||
etc/metering_agent.ini
|
||||
etc/policy.json
|
||||
etc/neutron.conf
|
||||
etc/rootwrap.conf
|
||||
etc/neutron/rootwrap.d =
|
||||
etc/neutron/rootwrap.d/debug.filters
|
||||
etc/neutron/rootwrap.d/dhcp.filters
|
||||
etc/neutron/rootwrap.d/dibbler.filters
|
||||
etc/neutron/rootwrap.d/iptables-firewall.filters
|
||||
etc/neutron/rootwrap.d/ebtables.filters
|
||||
etc/neutron/rootwrap.d/ipset-firewall.filters
|
||||
etc/neutron/rootwrap.d/l3.filters
|
||||
etc/neutron/rootwrap.d/linuxbridge-plugin.filters
|
||||
etc/neutron/rootwrap.d/openvswitch-plugin.filters
|
||||
etc/init.d = etc/init.d/neutron-server
|
||||
etc/neutron/plugins/bigswitch =
|
||||
etc/neutron/plugins/bigswitch/restproxy.ini
|
||||
etc/neutron/plugins/bigswitch/ssl/ca_certs =
|
||||
etc/neutron/plugins/bigswitch/ssl/ca_certs/README
|
||||
etc/neutron/plugins/bigswitch/ssl/host_certs =
|
||||
etc/neutron/plugins/bigswitch/ssl/host_certs/README
|
||||
etc/neutron/plugins/brocade =
|
||||
etc/neutron/plugins/brocade/brocade.ini
|
||||
etc/neutron/plugins/brocade/brocade_mlx.ini
|
||||
etc/neutron/plugins/brocade/vyatta = etc/neutron/plugins/brocade/vyatta/vrouter.ini
|
||||
etc/neutron/plugins/cisco =
|
||||
etc/neutron/plugins/cisco/cisco_vpn_agent.ini
|
||||
etc/neutron/plugins/embrane = etc/neutron/plugins/embrane/heleos_conf.ini
|
||||
etc/neutron/plugins/ml2 =
|
||||
etc/neutron/plugins/bigswitch/restproxy.ini
|
||||
etc/neutron/plugins/ml2/linuxbridge_agent.ini
|
||||
etc/neutron/plugins/ml2/ml2_conf.ini
|
||||
etc/neutron/plugins/ml2/ml2_conf_brocade.ini
|
||||
etc/neutron/plugins/ml2/ml2_conf_brocade_fi_ni.ini
|
||||
etc/neutron/plugins/ml2/ml2_conf_ofa.ini
|
||||
etc/neutron/plugins/ml2/ml2_conf_fslsdn.ini
|
||||
etc/neutron/plugins/ml2/ml2_conf_sriov.ini
|
||||
etc/neutron/plugins/ml2/openvswitch_agent.ini
|
||||
etc/neutron/plugins/ml2/sriov_agent.ini
|
||||
etc/neutron/plugins/mlnx = etc/neutron/plugins/mlnx/mlnx_conf.ini
|
||||
etc/neutron/plugins/nuage = etc/neutron/plugins/nuage/nuage_plugin.ini
|
||||
etc/neutron/plugins/oneconvergence = etc/neutron/plugins/oneconvergence/nvsdplugin.ini
|
||||
etc/neutron/plugins/opencontrail = etc/neutron/plugins/opencontrail/contrailplugin.ini
|
||||
etc/neutron/plugins/ovsvapp = etc/neutron/plugins/ovsvapp/ovsvapp_agent.ini
|
||||
scripts =
|
||||
bin/neutron-rootwrap-xen-dom0
|
||||
|
||||
[entry_points]
|
||||
console_scripts =
|
||||
neutron-db-manage = neutron.db.migration.cli:main
|
||||
neutron-debug = neutron.debug.shell:main
|
||||
neutron-dhcp-agent = neutron.cmd.eventlet.agents.dhcp:main
|
||||
neutron-hyperv-agent = neutron.cmd.eventlet.plugins.hyperv_neutron_agent:main
|
||||
neutron-keepalived-state-change = neutron.cmd.keepalived_state_change:main
|
||||
neutron-ipset-cleanup = neutron.cmd.ipset_cleanup:main
|
||||
neutron-l3-agent = neutron.cmd.eventlet.agents.l3:main
|
||||
neutron-linuxbridge-agent = neutron.plugins.ml2.drivers.linuxbridge.agent.linuxbridge_neutron_agent:main
|
||||
neutron-metadata-agent = neutron.cmd.eventlet.agents.metadata:main
|
||||
neutron-mlnx-agent = neutron.cmd.eventlet.plugins.mlnx_neutron_agent:main
|
||||
neutron-netns-cleanup = neutron.cmd.netns_cleanup:main
|
||||
neutron-ns-metadata-proxy = neutron.cmd.eventlet.agents.metadata_proxy:main
|
||||
neutron-ovsvapp-agent = neutron.cmd.eventlet.plugins.ovsvapp_neutron_agent:main
|
||||
neutron-nvsd-agent = neutron.plugins.oneconvergence.agent.nvsd_neutron_agent:main
|
||||
neutron-openvswitch-agent = neutron.cmd.eventlet.plugins.ovs_neutron_agent:main
|
||||
neutron-ovs-cleanup = neutron.cmd.ovs_cleanup:main
|
||||
neutron-pd-notify = neutron.cmd.pd_notify:main
|
||||
neutron-restproxy-agent = neutron.plugins.bigswitch.agent.restproxy_agent:main
|
||||
neutron-server = neutron.cmd.eventlet.server:main_wsgi_eventlet
|
||||
neutron-dev-server = neutron.cmd.eventlet.server:main_wsgi_pecan
|
||||
neutron-rpc-server = neutron.cmd.eventlet.server:main_rpc_eventlet
|
||||
neutron-rootwrap = oslo_rootwrap.cmd:main
|
||||
neutron-rootwrap-daemon = oslo_rootwrap.cmd:daemon
|
||||
neutron-usage-audit = neutron.cmd.eventlet.usage_audit:main
|
||||
neutron-metering-agent = neutron.cmd.eventlet.services.metering_agent:main
|
||||
neutron-sriov-nic-agent = neutron.plugins.ml2.drivers.mech_sriov.agent.sriov_nic_agent:main
|
||||
neutron-sanity-check = neutron.cmd.sanity_check:main
|
||||
neutron.core_plugins =
|
||||
bigswitch = neutron.plugins.bigswitch.plugin:NeutronRestProxyV2
|
||||
brocade = neutron.plugins.brocade.NeutronPlugin:BrocadePluginV2
|
||||
embrane = neutron.plugins.embrane.plugins.embrane_ml2_plugin:EmbraneMl2Plugin
|
||||
ml2 = neutron.plugins.ml2.plugin:Ml2Plugin
|
||||
nuage = neutron.plugins.nuage.plugin:NuagePlugin
|
||||
oneconvergence = neutron.plugins.oneconvergence.plugin:OneConvergencePluginV2
|
||||
neutron.service_plugins =
|
||||
dummy = neutron.tests.unit.dummy_plugin:DummyServicePlugin
|
||||
router = neutron.services.l3_router.l3_router_plugin:L3RouterPlugin
|
||||
bigswitch_l3 = neutron.plugins.bigswitch.l3_router_plugin:L3RestProxy
|
||||
brocade_vyatta_l3 = neutron.services.l3_router.brocade.vyatta.vrouter_neutron_plugin:VyattaVRouterPlugin
|
||||
brocade_mlx_l3 = neutron.services.l3_router.brocade.mlx.l3_router_plugin:BrocadeRouterPlugin
|
||||
firewall = neutron_fwaas.services.firewall.fwaas_plugin:FirewallPlugin
|
||||
fsl_firewall = neutron_fwaas.services.firewall.freescale.fwaas_plugin:FirewallPlugin
|
||||
lbaas = neutron_lbaas.services.loadbalancer.plugin:LoadBalancerPlugin
|
||||
vpnaas = neutron_vpnaas.services.vpn.plugin:VPNDriverPlugin
|
||||
metering = neutron.services.metering.metering_plugin:MeteringPlugin
|
||||
neutron.services.firewall.fwaas_plugin.FirewallPlugin = neutron_fwaas.services.firewall.fwaas_plugin:FirewallPlugin
|
||||
neutron.services.loadbalancer.plugin.LoadBalancerPlugin = neutron_lbaas.services.loadbalancer.plugin:LoadBalancerPlugin
|
||||
neutron.services.vpn.plugin.VPNDriverPlugin = neutron_vpnaas.services.vpn.plugin:VPNDriverPlugin
|
||||
ibm_l3 = neutron.services.l3_router.l3_sdnve:SdnveL3ServicePlugin
|
||||
qos = neutron.services.qos.qos_plugin:QoSPlugin
|
||||
# PF9 start
|
||||
aws_router = neutron.services.l3_router.aws_router_plugin:AwsRouterPlugin
|
||||
# PF9 end
|
||||
neutron.qos.notification_drivers =
|
||||
message_queue = neutron.services.qos.notification_drivers.message_queue:RpcQosServiceNotificationDriver
|
||||
neutron.ml2.type_drivers =
|
||||
flat = neutron.plugins.ml2.drivers.type_flat:FlatTypeDriver
|
||||
local = neutron.plugins.ml2.drivers.type_local:LocalTypeDriver
|
||||
vlan = neutron.plugins.ml2.drivers.type_vlan:VlanTypeDriver
|
||||
geneve = neutron.plugins.ml2.drivers.type_geneve:GeneveTypeDriver
|
||||
gre = neutron.plugins.ml2.drivers.type_gre:GreTypeDriver
|
||||
vxlan = neutron.plugins.ml2.drivers.type_vxlan:VxlanTypeDriver
|
||||
opflex = opflexagent.type_opflex:OpflexTypeDriver
|
||||
neutron.ml2.mechanism_drivers =
|
||||
ovsvapp = neutron.plugins.ml2.drivers.ovsvapp.mech_driver:OVSvAppAgentMechanismDriver
|
||||
opendaylight = neutron.plugins.ml2.drivers.opendaylight.driver:OpenDaylightMechanismDriver
|
||||
logger = neutron.tests.unit.plugins.ml2.drivers.mechanism_logger:LoggerMechanismDriver
|
||||
test = neutron.tests.unit.plugins.ml2.drivers.mechanism_test:TestMechanismDriver
|
||||
linuxbridge = neutron.plugins.ml2.drivers.linuxbridge.mech_driver.mech_linuxbridge:LinuxbridgeMechanismDriver
|
||||
openvswitch = neutron.plugins.ml2.drivers.openvswitch.mech_driver.mech_openvswitch:OpenvswitchMechanismDriver
|
||||
hyperv = neutron.plugins.ml2.drivers.hyperv.mech_hyperv:HypervMechanismDriver
|
||||
l2population = neutron.plugins.ml2.drivers.l2pop.mech_driver:L2populationMechanismDriver
|
||||
ofagent = neutron.plugins.ml2.drivers.ofagent.driver:OfagentMechanismDriver
|
||||
mlnx = neutron.plugins.ml2.drivers.mlnx.mech_mlnx:MlnxMechanismDriver
|
||||
brocade = networking_brocade.vdx.ml2driver.mechanism_brocade:BrocadeMechanism
|
||||
brocade_fi_ni = neutron.plugins.ml2.drivers.brocade.fi_ni.mechanism_brocade_fi_ni:BrocadeFiNiMechanism
|
||||
fslsdn = neutron.plugins.ml2.drivers.freescale.mechanism_fslsdn:FslsdnMechanismDriver
|
||||
sriovnicswitch = neutron.plugins.ml2.drivers.mech_sriov.mech_driver.mech_driver:SriovNicSwitchMechanismDriver
|
||||
fake_agent = neutron.tests.unit.plugins.ml2.drivers.mech_fake_agent:FakeAgentMechanismDriver
|
||||
sdnve = neutron.plugins.ml2.drivers.ibm.mechanism_sdnve:SdnveMechanismDriver
|
||||
aws = neutron.plugins.ml2.drivers.aws.mechanism_aws:AwsMechanismDriver
|
||||
neutron.ml2.extension_drivers =
|
||||
test = neutron.tests.unit.plugins.ml2.drivers.ext_test:TestExtensionDriver
|
||||
testdb = neutron.tests.unit.plugins.ml2.drivers.ext_test:TestDBExtensionDriver
|
||||
port_security = neutron.plugins.ml2.extensions.port_security:PortSecurityExtensionDriver
|
||||
qos = neutron.plugins.ml2.extensions.qos:QosExtensionDriver
|
||||
neutron.openstack.common.cache.backends =
|
||||
memory = neutron.openstack.common.cache._backends.memory:MemoryBackend
|
||||
neutron.ipam_drivers =
|
||||
fake = neutron.tests.unit.ipam.fake_driver:FakeDriver
|
||||
internal = neutron.ipam.drivers.neutrondb_ipam.driver:NeutronDbPool
|
||||
neutron.agent.l2.extensions =
|
||||
qos = neutron.agent.l2.extensions.qos:QosAgentExtension
|
||||
neutron.qos.agent_drivers =
|
||||
ovs = neutron.plugins.ml2.drivers.openvswitch.agent.extension_drivers.qos_driver:QosOVSAgentDriver
|
||||
sriov = neutron.plugins.ml2.drivers.mech_sriov.agent.extension_drivers.qos_driver:QosSRIOVAgentDriver
|
||||
neutron.agent.linux.pd_drivers =
|
||||
dibbler = neutron.agent.linux.dibbler:PDDibbler
|
||||
# These are for backwards compat with Icehouse notification_driver configuration values
|
||||
oslo.messaging.notify.drivers =
|
||||
neutron.openstack.common.notifier.log_notifier = oslo_messaging.notify._impl_log:LogDriver
|
||||
neutron.openstack.common.notifier.no_op_notifier = oslo_messaging.notify._impl_noop:NoOpDriver
|
||||
neutron.openstack.common.notifier.rpc_notifier2 = oslo_messaging.notify._impl_messaging:MessagingV2Driver
|
||||
neutron.openstack.common.notifier.rpc_notifier = oslo_messaging.notify._impl_messaging:MessagingDriver
|
||||
neutron.openstack.common.notifier.test_notifier = oslo_messaging.notify._impl_test:TestDriver
|
||||
neutron.db.alembic_migrations =
|
||||
neutron = neutron.db.migration:alembic_migrations
|
||||
|
||||
[build_sphinx]
|
||||
all_files = 1
|
||||
build-dir = doc/build
|
||||
source-dir = doc/source
|
||||
|
||||
[extract_messages]
|
||||
keywords = _ gettext ngettext l_ lazy_gettext
|
||||
mapping_file = babel.cfg
|
||||
output_file = neutron/locale/neutron.pot
|
||||
|
||||
[compile_catalog]
|
||||
directory = neutron/locale
|
||||
domain = neutron
|
||||
|
||||
[update_catalog]
|
||||
domain = neutron
|
||||
output_dir = neutron/locale
|
||||
input_file = neutron/locale/neutron.pot
|
||||
|
||||
[wheel]
|
||||
universal = 1
|
||||
|
||||
[pbr]
|
||||
warnerrors = true
|
23
nova/README.md
Normal file
23
nova/README.md
Normal file
@ -0,0 +1,23 @@
|
||||
## Setup
|
||||
|
||||
### Prerequesites
|
||||
1. Working green field OpenStack deployment (code currently based out of stable/liberty)
|
||||
2. The virtualenv used by nova should have Amazon boto package installed
|
||||
|
||||
#### Components
|
||||
- Nova driver: Handles instance creation, power operations and snapshotting an instance to AMI
|
||||
|
||||
### Instructions
|
||||
1. Copy the nova/ec2 directory to <nova-root>/nova/nova/virt/
|
||||
2. Update the configuration files -
|
||||
1. edit /etc/nova/**nova.conf**
|
||||
```
|
||||
[DEFAULT]
|
||||
compute_driver = ec2.EC2Driver
|
||||
|
||||
[AWS]
|
||||
secret_key = <your aws secret access key>
|
||||
access_key = <your aws access key>
|
||||
region_name = <was region to use>
|
||||
```
|
||||
3. Restart the nova compute services
|
18
nova/ec2/__init__.py
Normal file
18
nova/ec2/__init__.py
Normal file
@ -0,0 +1,18 @@
|
||||
# Copyright (c) 2014 Thoughtworks.
|
||||
# Copyright (c) 2016 Platform9 Systems Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either expressed or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from nova.virt.ec2 import ec2driver
|
||||
|
||||
EC2Driver = ec2driver.EC2Driver
|
47
nova/ec2/cloud_burst_filter.py
Normal file
47
nova/ec2/cloud_burst_filter.py
Normal file
@ -0,0 +1,47 @@
|
||||
# Copyright (c) 2014 ThoughtWorks
|
||||
# Copyright (c) 2016 Platform9 Systems Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo.config import cfg
|
||||
from nova import db
|
||||
from nova.openstack.common import log as logging
|
||||
from nova.scheduler import filters
|
||||
|
||||
opts = [
|
||||
cfg.BoolOpt('cloud_burst',
|
||||
help='Switch to enable could bursting'),
|
||||
cfg.StrOpt('cloud_burst_availability_zone',
|
||||
help='The availability zone of only compute hosts with the public cloud driver'),
|
||||
]
|
||||
CONF = cfg.CONF
|
||||
CONF.register_opts(opts)
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
class CloudBurstFilter(filters.BaseHostFilter):
|
||||
"""Filter for cloud burst availability zone"""
|
||||
|
||||
run_filter_once_per_request = True
|
||||
|
||||
def host_passes(self, host_state, filter_properties):
|
||||
context = filter_properties['context'].elevated()
|
||||
metadata = db.aggregate_metadata_get_by_host(context, host_state.host, key='availability_zone')
|
||||
|
||||
if CONF.cloud_burst:
|
||||
return CONF.cloud_burst_availability_zone in metadata['availability_zone']
|
||||
else:
|
||||
return CONF.cloud_burst_availability_zone not in metadata['availability_zone']
|
||||
|
||||
return True
|
20
nova/ec2/ec2_group_transformer.py
Normal file
20
nova/ec2/ec2_group_transformer.py
Normal file
@ -0,0 +1,20 @@
|
||||
# Copyright (c) 2014 ThoughtWorks
|
||||
# Copyright (c) 2016 Platform9 Systems Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
class EC2GroupTransformer:
|
||||
|
||||
def to_group(self, ec2_group):
|
||||
pass
|
25
nova/ec2/ec2_rule_service.py
Normal file
25
nova/ec2/ec2_rule_service.py
Normal file
@ -0,0 +1,25 @@
|
||||
# Copyright (c) 2014 ThoughtWorks
|
||||
# Copyright (c) 2016 Platform9 Systems Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
class EC2RuleService:
|
||||
|
||||
def __init__(self, ec2_connection, ec2_rule_transformer):
|
||||
self.ec2_connection = ec2_connection
|
||||
self.ec2_rule_transformer = ec2_rule_transformer
|
||||
|
||||
def get_rules_for_group(self, group_name):
|
||||
group = self.ec2_connection.get_all_security_groups(groupnames=group_name)[0]
|
||||
return set([self.ec2_rule_transformer.to_rule(rule) for rule in group.rules])
|
38
nova/ec2/ec2_rule_transformer.py
Normal file
38
nova/ec2/ec2_rule_transformer.py
Normal file
@ -0,0 +1,38 @@
|
||||
# Copyright (c) 2014 ThoughtWorks
|
||||
# Copyright (c) 2016 Platform9 Systems Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from copy import deepcopy
|
||||
from rule import Rule
|
||||
|
||||
|
||||
class EC2RuleTransformer:
|
||||
|
||||
def __init__(self, ec2_connection):
|
||||
self.ec2_connection = ec2_connection
|
||||
|
||||
def to_rule(self, ec2_rule):
|
||||
rule_args = {}
|
||||
rule_args['ip_protocol'] = ec2_rule.ip_protocol
|
||||
rule_args['from_port'] = ec2_rule.from_port
|
||||
rule_args['to_port'] = ec2_rule.to_port
|
||||
|
||||
if ec2_rule.grants[0].cidr_ip:
|
||||
rule_args['ip_range'] = ec2_rule.grants[0].cidr_ip
|
||||
else:
|
||||
group_id = ec2_rule.grants[0].group_id
|
||||
rule_args['group_name'] = self.ec2_connection.get_all_security_groups(group_ids=group_id)[0].name
|
||||
|
||||
return Rule(**rule_args)
|
1241
nova/ec2/ec2driver.py
Normal file
1241
nova/ec2/ec2driver.py
Normal file
File diff suppressed because it is too large
Load Diff
31
nova/ec2/exception_handler.py
Normal file
31
nova/ec2/exception_handler.py
Normal file
@ -0,0 +1,31 @@
|
||||
# Copyright (c) 2016 Platform9 Systems Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either expressed or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
|
||||
from nova import exception
|
||||
|
||||
class Ec2ExceptionHandler:
|
||||
"""
|
||||
This is a class which can be used to create mapping between EC2 Exception messages to Nova based Exceptions.
|
||||
Also gives control on the error message displayed to the user.
|
||||
|
||||
"""
|
||||
@staticmethod
|
||||
def get_processed_exception(ec2_response_error_exc):
|
||||
if ec2_response_error_exc.error_code == "AuthFailure":
|
||||
return exception.Forbidden("Please check AWS credentials")
|
||||
elif ec2_response_error_exc.error_code == "InvalidAMIID.NotFound":
|
||||
return exception.ImageNotFoundEC2("Invalid Image")
|
||||
else:
|
||||
return exception.NovaException(ec2_response_error_exc.message)
|
20
nova/ec2/group.py
Normal file
20
nova/ec2/group.py
Normal file
@ -0,0 +1,20 @@
|
||||
# Copyright (c) 2014 ThoughtWorks
|
||||
# Copyright (c) 2016 Platform9 Systems Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
class Group:
|
||||
|
||||
def rule_diff(self, other_group):
|
||||
pass
|
55
nova/ec2/group_rule_refresher.py
Normal file
55
nova/ec2/group_rule_refresher.py
Normal file
@ -0,0 +1,55 @@
|
||||
# Copyright (c) 2014 ThoughtWorks
|
||||
# Copyright (c) 2016 Platform9 Systems Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
class GroupRuleRefresher:
|
||||
|
||||
def __init__(self, ec2_connection, openstack_rule_service, ec2_rule_service):
|
||||
self.ec2_conn = ec2_connection
|
||||
self.openstack_rule_service = openstack_rule_service
|
||||
self.ec2_rule_service = ec2_rule_service
|
||||
|
||||
def refresh(self, group_name):
|
||||
openstack_rules = self.openstack_rule_service.get_rules_for_group(group_name)
|
||||
ec2_rules = self.ec2_rule_service.get_rules_for_group(group_name)
|
||||
|
||||
self._add_rules_to_ec2(ec2_rules, group_name, openstack_rules)
|
||||
self._remove_rules_from_ec2(ec2_rules, group_name, openstack_rules)
|
||||
|
||||
def _add_rules_to_ec2(self, ec2_rules, group_name, openstack_rules):
|
||||
for rule in openstack_rules - ec2_rules:
|
||||
self._add_rule_on_ec2(group_name, rule)
|
||||
|
||||
def _remove_rules_from_ec2(self, ec2_rules, group_name, openstack_rules):
|
||||
for rule in ec2_rules - openstack_rules:
|
||||
self._remove_rule_from_ec2(group_name, rule)
|
||||
|
||||
def _remove_rule_from_ec2(self, group_name, rule):
|
||||
self.ec2_conn.revoke_security_group(
|
||||
group_name=group_name,
|
||||
ip_protocol=rule.ip_protocol,
|
||||
from_port=rule.from_port,
|
||||
to_port=rule.to_port,
|
||||
cidr_ip=rule.ip_range
|
||||
)
|
||||
|
||||
def _add_rule_on_ec2(self, group_name, rule):
|
||||
self.ec2_conn.authorize_security_group(
|
||||
group_name=group_name,
|
||||
ip_protocol=rule.ip_protocol,
|
||||
from_port=rule.from_port,
|
||||
to_port=rule.to_port,
|
||||
cidr_ip=rule.ip_range
|
||||
)
|
27
nova/ec2/instance_rule_refresher.py
Normal file
27
nova/ec2/instance_rule_refresher.py
Normal file
@ -0,0 +1,27 @@
|
||||
# Copyright (c) 2014 ThoughtWorks
|
||||
# Copyright (c) 2016 Platform9 Systems Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
class InstanceRuleRefresher:
|
||||
|
||||
def __init__(self, group_rule_refresher):
|
||||
self.group_rule_refresher = group_rule_refresher
|
||||
|
||||
def refresh(self, instance):
|
||||
for group_name in self._get_group_names(instance):
|
||||
self.group_rule_refresher.refresh(group_name)
|
||||
|
||||
def _get_group_names(self, instance):
|
||||
return [group['name'] for group in instance.security_groups]
|
23
nova/ec2/openstack_group_service.py
Normal file
23
nova/ec2/openstack_group_service.py
Normal file
@ -0,0 +1,23 @@
|
||||
# Copyright (c) 2014 ThoughtWorks
|
||||
# Copyright (c) 2016 Platform9 Systems Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
class OpenstackGroupService():
|
||||
|
||||
def __init__(self, security_group_manager):
|
||||
self.security_group_manager = security_group_manager
|
||||
|
||||
def get_group(self, group_name):
|
||||
return [group for group in self.security_group_manager.list() if group.name == group_name][0]
|
19
nova/ec2/openstack_group_transformer.py
Normal file
19
nova/ec2/openstack_group_transformer.py
Normal file
@ -0,0 +1,19 @@
|
||||
# Copyright (c) 2014 ThoughtWorks
|
||||
# Copyright (c) 2016 Platform9 Systems Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
class OpenstackGroupTransformer:
|
||||
def to_group(self, openstack_group):
|
||||
pass
|
25
nova/ec2/openstack_rule_service.py
Normal file
25
nova/ec2/openstack_rule_service.py
Normal file
@ -0,0 +1,25 @@
|
||||
# Copyright (c) 2014 ThoughtWorks
|
||||
# Copyright (c) 2016 Platform9 Systems Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
class OpenstackRuleService:
|
||||
def __init__(self, group_service, openstack_rule_transformer):
|
||||
self.group_service = group_service
|
||||
self.openstack_rule_transformer = openstack_rule_transformer
|
||||
|
||||
def get_rules_for_group(self, group_name):
|
||||
openstack_group = self.group_service.get_group(group_name)
|
||||
return set([self.openstack_rule_transformer.to_rule(rule) for rule in openstack_group.rules])
|
||||
# return self.group_service.get_group(group_name).rules
|
33
nova/ec2/openstack_rule_transformer.py
Normal file
33
nova/ec2/openstack_rule_transformer.py
Normal file
@ -0,0 +1,33 @@
|
||||
# Copyright (c) 2014 ThoughtWorks
|
||||
# Copyright (c) 2016 Platform9 Systems Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from copy import deepcopy
|
||||
from rule import Rule
|
||||
|
||||
|
||||
class OpenstackRuleTransformer:
|
||||
def to_rule(self, openstack_rule):
|
||||
rule_args = {}
|
||||
rule_args['ip_protocol'] = openstack_rule['ip_protocol']
|
||||
rule_args['from_port'] = str(openstack_rule['from_port'])
|
||||
rule_args['to_port'] = str(openstack_rule['to_port'])
|
||||
|
||||
if 'cidr' in openstack_rule['ip_range']:
|
||||
rule_args['ip_range'] = openstack_rule['ip_range']['cidr']
|
||||
else:
|
||||
rule_args['group_name'] = openstack_rule['group']['name']
|
||||
|
||||
return Rule(**rule_args)
|
32
nova/ec2/rule.py
Normal file
32
nova/ec2/rule.py
Normal file
@ -0,0 +1,32 @@
|
||||
# Copyright (c) 2014 ThoughtWorks
|
||||
# Copyright (c) 2016 Platform9 Systems Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
class Rule:
|
||||
def __init__(self, ip_protocol, from_port, to_port, ip_range=None, group_name=None):
|
||||
self.ip_protocol = ip_protocol
|
||||
self.from_port = from_port
|
||||
self.to_port = to_port
|
||||
self.ip_range = ip_range
|
||||
self.group_name = group_name
|
||||
|
||||
def __key(self):
|
||||
return self.ip_protocol, self.from_port, self.to_port, self.ip_range, self.group_name
|
||||
|
||||
def __eq__(self, other):
|
||||
return self.__key() == other.__key()
|
||||
|
||||
def __hash__(self):
|
||||
return hash(self.__key())
|
47
nova/ec2/rule_comparator.py
Normal file
47
nova/ec2/rule_comparator.py
Normal file
@ -0,0 +1,47 @@
|
||||
# Copyright (c) 2014 ThoughtWorks
|
||||
# Copyright (c) 2016 Platform9 Systems Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
class RuleComparator:
|
||||
def __init__(self, ec2_connection):
|
||||
self.ec2_connection = ec2_connection
|
||||
|
||||
def rules_are_equal(self, openstack_rule, ec2_rule):
|
||||
if self._ip_protocols_are_different(ec2_rule, openstack_rule) \
|
||||
or self._from_ports_are_different(ec2_rule, openstack_rule) \
|
||||
or self._to_ports_are_different(ec2_rule, openstack_rule) \
|
||||
or self._ip_ranges_are_present_and_different(ec2_rule, openstack_rule) \
|
||||
or self._group_names_are_present_and_different(openstack_rule, ec2_rule):
|
||||
return False
|
||||
return True
|
||||
|
||||
def _ip_protocols_are_different(self, ec2_rule, openstack_rule):
|
||||
return openstack_rule['ip_protocol'] != ec2_rule.ip_protocol
|
||||
|
||||
def _from_ports_are_different(self, ec2_rule, openstack_rule):
|
||||
return str(openstack_rule['from_port']) != ec2_rule.from_port
|
||||
|
||||
def _to_ports_are_different(self, ec2_rule, openstack_rule):
|
||||
return str(openstack_rule['to_port']) != ec2_rule.to_port
|
||||
|
||||
def _ip_ranges_are_present_and_different(self, ec2_rule, openstack_rule):
|
||||
return ('cidr' in openstack_rule['ip_range'] and openstack_rule['ip_range']['cidr'] != ec2_rule.grants[0].cidr_ip)
|
||||
|
||||
def _group_names_are_present_and_different(self, openstack_rule, ec2_rule):
|
||||
if 'name' not in openstack_rule['group']:
|
||||
return False
|
||||
else:
|
||||
ec2_group_name = self.ec2_connection.get_all_security_groups(group_ids=ec2_rule.grants[0].group_id)[0].name
|
||||
return openstack_rule['group']['name'] != ec2_group_name
|
Loading…
x
Reference in New Issue
Block a user