Merged trunk and resolved conflicts
This commit is contained in:
commit
60d78fb396
2
Authors
2
Authors
@ -17,6 +17,7 @@ Christian Berendt <berendt@b1-systems.de>
|
||||
Chuck Short <zulcss@ubuntu.com>
|
||||
Cory Wright <corywright@gmail.com>
|
||||
Dan Prince <dan.prince@rackspace.com>
|
||||
Dave Walker <DaveWalker@ubuntu.com>
|
||||
David Pravec <David.Pravec@danix.org>
|
||||
Dean Troyer <dtroyer@gmail.com>
|
||||
Devin Carlen <devin.carlen@gmail.com>
|
||||
@ -65,6 +66,7 @@ Nachi Ueno <ueno.nachi@lab.ntt.co.jp>
|
||||
Naveed Massjouni <naveedm9@gmail.com>
|
||||
Nirmal Ranganathan <nirmal.ranganathan@rackspace.com>
|
||||
Paul Voccio <paul@openstack.org>
|
||||
Renuka Apte <renuka.apte@citrix.com>
|
||||
Ricardo Carrillo Cruz <emaildericky@gmail.com>
|
||||
Rick Clark <rick@openstack.org>
|
||||
Rick Harris <rconradharris@gmail.com>
|
||||
|
@ -108,6 +108,13 @@ def main():
|
||||
interface = os.environ.get('DNSMASQ_INTERFACE', FLAGS.dnsmasq_interface)
|
||||
if int(os.environ.get('TESTING', '0')):
|
||||
from nova.tests import fake_flags
|
||||
|
||||
#if FLAGS.fake_rabbit:
|
||||
# LOG.debug(_("leasing ip"))
|
||||
# network_manager = utils.import_object(FLAGS.network_manager)
|
||||
## reload(fake_flags)
|
||||
# from nova.tests import fake_flags
|
||||
|
||||
action = argv[1]
|
||||
if action in ['add', 'del', 'old']:
|
||||
mac = argv[2]
|
||||
|
@ -97,7 +97,7 @@ flags.DECLARE('vlan_start', 'nova.network.manager')
|
||||
flags.DECLARE('vpn_start', 'nova.network.manager')
|
||||
flags.DECLARE('fixed_range_v6', 'nova.network.manager')
|
||||
flags.DECLARE('images_path', 'nova.image.local')
|
||||
flags.DECLARE('libvirt_type', 'nova.virt.libvirt_conn')
|
||||
flags.DECLARE('libvirt_type', 'nova.virt.libvirt.connection')
|
||||
flags.DEFINE_flag(flags.HelpFlag())
|
||||
flags.DEFINE_flag(flags.HelpshortFlag())
|
||||
flags.DEFINE_flag(flags.HelpXMLFlag())
|
||||
|
@ -35,6 +35,7 @@ Programming Concepts
|
||||
.. toctree::
|
||||
:maxdepth: 3
|
||||
|
||||
zone
|
||||
rabbit
|
||||
|
||||
API Reference
|
||||
|
@ -17,7 +17,7 @@
|
||||
Zones
|
||||
=====
|
||||
|
||||
A Nova deployment is called a Zone. At the very least a Zone requires an API node, a Scheduler node, a database and RabbitMQ. Pushed further a Zone may contain many API nodes, many Scheduler, Volume, Network and Compute nodes as well as a cluster of databases and RabbitMQ servers. A Zone allows you to partition your deployments into logical groups for load balancing and instance distribution.
|
||||
A Nova deployment is called a Zone. A Zone allows you to partition your deployments into logical groups for load balancing and instance distribution. At the very least a Zone requires an API node, a Scheduler node, a database and RabbitMQ. Pushed further a Zone may contain many API nodes, many Scheduler, Volume, Network and Compute nodes as well as a cluster of databases and RabbitMQ servers.
|
||||
|
||||
The idea behind Zones is, if a particular deployment is not capable of servicing a particular request, the request may be forwarded to (child) Zones for possible processing. Zones may be nested in a tree fashion.
|
||||
|
||||
@ -34,7 +34,7 @@ Routing between Zones is based on the Capabilities of that Zone. Capabilities ar
|
||||
|
||||
key=value;value;value, key=value;value;value
|
||||
|
||||
Zones have Capabilities which are general to the Zone and are set via `--zone-capabilities` flag. Zones also have dynamic per-service Capabilities. Services derived from `nova.manager.SchedulerDependentManager` (such as Compute, Volume and Network) can set these capabilities by calling the `update_service_capabilities()` method on their `Manager` base class. These capabilities will be periodically sent to the Scheduler service automatically. The rate at which these updates are sent is controlled by the `--periodic_interval` flag.
|
||||
Zones have Capabilities which are general to the Zone and are set via `--zone_capabilities` flag. Zones also have dynamic per-service Capabilities. Services derived from `nova.manager.SchedulerDependentManager` (such as Compute, Volume and Network) can set these capabilities by calling the `update_service_capabilities()` method on their `Manager` base class. These capabilities will be periodically sent to the Scheduler service automatically. The rate at which these updates are sent is controlled by the `--periodic_interval` flag.
|
||||
|
||||
Flow within a Zone
|
||||
------------------
|
||||
@ -47,7 +47,7 @@ Inter-service communication within a Zone is done with RabbitMQ. Each class of S
|
||||
|
||||
These capability messages are received by the Scheduler services and stored in the `ZoneManager` object. The SchedulerManager object has a reference to the `ZoneManager` it can use for load balancing.
|
||||
|
||||
The `ZoneManager` also polls the child Zones periodically to gather their capabilities to aid in decision making. This is done via the OpenStack API `/v1.0/zones/info` REST call. This also captures the name of each child Zone. The Zone name is set via the `--zone-name` flag (and defaults to "nova").
|
||||
The `ZoneManager` also polls the child Zones periodically to gather their capabilities to aid in decision making. This is done via the OpenStack API `/v1.0/zones/info` REST call. This also captures the name of each child Zone. The Zone name is set via the `--zone_name` flag (and defaults to "nova").
|
||||
|
||||
Zone administrative functions
|
||||
-----------------------------
|
||||
|
@ -6,7 +6,7 @@ nova-manage
|
||||
control and manage cloud computer instances and images
|
||||
------------------------------------------------------
|
||||
|
||||
:Author: nova@lists.launchpad.net
|
||||
:Author: openstack@lists.launchpad.net
|
||||
:Date: 2010-11-16
|
||||
:Copyright: OpenStack LLC
|
||||
:Version: 0.1
|
||||
@ -121,7 +121,7 @@ Nova Role
|
||||
nova-manage role <action> [<argument>]
|
||||
``nova-manage role add <username> <rolename> <(optional) projectname>``
|
||||
|
||||
Add a user to either a global or project-based role with the indicated <rolename> assigned to the named user. Role names can be one of the following five roles: admin, itsec, projectmanager, netadmin, developer. If you add the project name as the last argument then the role is assigned just for that project, otherwise the user is assigned the named role for all projects.
|
||||
Add a user to either a global or project-based role with the indicated <rolename> assigned to the named user. Role names can be one of the following five roles: cloudadmin, itsec, sysadmin, netadmin, developer. If you add the project name as the last argument then the role is assigned just for that project, otherwise the user is assigned the named role for all projects.
|
||||
|
||||
``nova-manage role has <username> <projectname>``
|
||||
Checks the user or project and responds with True if the user has a global role with a particular project.
|
||||
|
@ -38,11 +38,11 @@ Role-based access control (RBAC) is an approach to restricting system access to
|
||||
|
||||
Nova’s rights management system employs the RBAC model and currently supports the following five roles:
|
||||
|
||||
* **Cloud Administrator.** (admin) Users of this class enjoy complete system access.
|
||||
* **Cloud Administrator.** (cloudadmin) Users of this class enjoy complete system access.
|
||||
* **IT Security.** (itsec) This role is limited to IT security personnel. It permits role holders to quarantine instances.
|
||||
* **Project Manager.** (projectmanager)The default for project owners, this role affords users the ability to add other users to a project, interact with project images, and launch and terminate instances.
|
||||
* **System Administrator.** (sysadmin) The default for project owners, this role affords users the ability to add other users to a project, interact with project images, and launch and terminate instances.
|
||||
* **Network Administrator.** (netadmin) Users with this role are permitted to allocate and assign publicly accessible IP addresses as well as create and modify firewall rules.
|
||||
* **Developer.** This is a general purpose role that is assigned to users by default.
|
||||
* **Developer.** (developer) This is a general purpose role that is assigned to users by default.
|
||||
|
||||
RBAC management is exposed through the dashboard for simplified user management.
|
||||
|
||||
|
@ -338,6 +338,10 @@ class Executor(wsgi.Application):
|
||||
else:
|
||||
return self._error(req, context, type(ex).__name__,
|
||||
unicode(ex))
|
||||
except exception.KeyPairExists as ex:
|
||||
LOG.debug(_('KeyPairExists raised: %s'), unicode(ex),
|
||||
context=context)
|
||||
return self._error(req, context, type(ex).__name__, unicode(ex))
|
||||
except Exception as ex:
|
||||
extra = {'environment': req.environ}
|
||||
LOG.exception(_('Unexpected error raised: %s'), unicode(ex),
|
||||
|
@ -180,7 +180,8 @@ class Controller(common.OpenstackController):
|
||||
key_name=key_name,
|
||||
key_data=key_data,
|
||||
metadata=env['server'].get('metadata', {}),
|
||||
injected_files=injected_files)
|
||||
injected_files=injected_files,
|
||||
admin_password=password)
|
||||
except quota.QuotaError as error:
|
||||
self._handle_quota_error(error)
|
||||
|
||||
@ -190,8 +191,6 @@ class Controller(common.OpenstackController):
|
||||
builder = self._get_view_builder(req)
|
||||
server = builder.build(inst, is_detail=True)
|
||||
server['server']['adminPass'] = password
|
||||
self.compute_api.set_admin_password(context, server['server']['id'],
|
||||
password)
|
||||
return server
|
||||
|
||||
def _deserialize_create(self, request):
|
||||
@ -608,8 +607,8 @@ class ControllerV10(Controller):
|
||||
|
||||
def _parse_update(self, context, server_id, inst_dict, update_dict):
|
||||
if 'adminPass' in inst_dict['server']:
|
||||
update_dict['admin_pass'] = inst_dict['server']['adminPass']
|
||||
self.compute_api.set_admin_password(context, server_id)
|
||||
self.compute_api.set_admin_password(context, server_id,
|
||||
inst_dict['server']['adminPass'])
|
||||
|
||||
def _action_rebuild(self, info, request, instance_id):
|
||||
context = request.environ['nova.context']
|
||||
|
@ -135,7 +135,8 @@ class API(base.Base):
|
||||
display_name='', display_description='',
|
||||
key_name=None, key_data=None, security_group='default',
|
||||
availability_zone=None, user_data=None, metadata={},
|
||||
injected_files=None):
|
||||
injected_files=None,
|
||||
admin_password=None):
|
||||
"""Create the number and type of instances requested.
|
||||
|
||||
Verifies that quota and other arguments are valid.
|
||||
@ -269,7 +270,8 @@ class API(base.Base):
|
||||
"instance_id": instance_id,
|
||||
"instance_type": instance_type,
|
||||
"availability_zone": availability_zone,
|
||||
"injected_files": injected_files}})
|
||||
"injected_files": injected_files,
|
||||
"admin_password": admin_password}})
|
||||
|
||||
for group_id in security_groups:
|
||||
self.trigger_security_group_members_refresh(elevated, group_id)
|
||||
@ -508,15 +510,6 @@ class API(base.Base):
|
||||
raise exception.Error(_("Unable to find host for Instance %s")
|
||||
% instance_id)
|
||||
|
||||
def _set_admin_password(self, context, instance_id, password):
|
||||
"""Set the root/admin password for the given instance."""
|
||||
host = self._find_host(context, instance_id)
|
||||
|
||||
rpc.cast(context,
|
||||
self.db.queue_get_for(context, FLAGS.compute_topic, host),
|
||||
{"method": "set_admin_password",
|
||||
"args": {"instance_id": instance_id, "new_pass": password}})
|
||||
|
||||
def snapshot(self, context, instance_id, name):
|
||||
"""Snapshot the given instance.
|
||||
|
||||
@ -670,8 +663,12 @@ class API(base.Base):
|
||||
|
||||
def set_admin_password(self, context, instance_id, password=None):
|
||||
"""Set the root/admin password for the given instance."""
|
||||
eventlet.spawn_n(self._set_admin_password(context, instance_id,
|
||||
password))
|
||||
host = self._find_host(context, instance_id)
|
||||
|
||||
rpc.cast(context,
|
||||
self.db.queue_get_for(context, FLAGS.compute_topic, host),
|
||||
{"method": "set_admin_password",
|
||||
"args": {"instance_id": instance_id, "new_pass": password}})
|
||||
|
||||
def inject_file(self, context, instance_id):
|
||||
"""Write a file to the given instance."""
|
||||
|
@ -221,6 +221,7 @@ class ComputeManager(manager.SchedulerDependentManager):
|
||||
context = context.elevated()
|
||||
instance_ref = self.db.instance_get(context, instance_id)
|
||||
instance_ref.injected_files = kwargs.get('injected_files', [])
|
||||
instance_ref.admin_pass = kwargs.get('admin_password', None)
|
||||
if instance_ref['name'] in self.driver.list_instances():
|
||||
raise exception.Error(_("Instance has already been created"))
|
||||
LOG.audit(_("instance %s: starting..."), instance_id,
|
||||
@ -405,22 +406,28 @@ class ComputeManager(manager.SchedulerDependentManager):
|
||||
@exception.wrap_exception
|
||||
@checks_instance_lock
|
||||
def set_admin_password(self, context, instance_id, new_pass=None):
|
||||
"""Set the root/admin password for an instance on this host."""
|
||||
"""Set the root/admin password for an instance on this host.
|
||||
|
||||
This is generally only called by API password resets after an
|
||||
image has been built.
|
||||
"""
|
||||
|
||||
context = context.elevated()
|
||||
|
||||
if new_pass is None:
|
||||
# Generate a random password
|
||||
new_pass = utils.generate_password(FLAGS.password_length)
|
||||
|
||||
while True:
|
||||
max_tries = 10
|
||||
|
||||
for i in xrange(max_tries):
|
||||
instance_ref = self.db.instance_get(context, instance_id)
|
||||
instance_id = instance_ref["id"]
|
||||
instance_state = instance_ref["state"]
|
||||
expected_state = power_state.RUNNING
|
||||
|
||||
if instance_state != expected_state:
|
||||
time.sleep(5)
|
||||
continue
|
||||
raise exception.Error(_('Instance is not running'))
|
||||
else:
|
||||
try:
|
||||
self.driver.set_admin_password(instance_ref, new_pass)
|
||||
@ -436,6 +443,12 @@ class ComputeManager(manager.SchedulerDependentManager):
|
||||
except Exception, e:
|
||||
# Catch all here because this could be anything.
|
||||
LOG.exception(e)
|
||||
if i == max_tries - 1:
|
||||
# At some point this exception may make it back
|
||||
# to the API caller, and we don't want to reveal
|
||||
# too much. The real exception is logged above
|
||||
raise exception.Error(_('Internal error'))
|
||||
time.sleep(1)
|
||||
continue
|
||||
|
||||
@exception.wrap_exception
|
||||
|
@ -110,7 +110,7 @@ class FlagValues(gflags.FlagValues):
|
||||
return name in self.__dict__['__dirty']
|
||||
|
||||
def ClearDirty(self):
|
||||
self.__dict__['__is_dirty'] = []
|
||||
self.__dict__['__dirty'] = []
|
||||
|
||||
def WasAlreadyParsed(self):
|
||||
return self.__dict__['__was_already_parsed']
|
||||
@ -119,11 +119,12 @@ class FlagValues(gflags.FlagValues):
|
||||
if '__stored_argv' not in self.__dict__:
|
||||
return
|
||||
new_flags = FlagValues(self)
|
||||
for k in self.__dict__['__dirty']:
|
||||
for k in self.FlagDict().iterkeys():
|
||||
new_flags[k] = gflags.FlagValues.__getitem__(self, k)
|
||||
|
||||
new_flags.Reset()
|
||||
new_flags(self.__dict__['__stored_argv'])
|
||||
for k in self.__dict__['__dirty']:
|
||||
for k in new_flags.FlagDict().iterkeys():
|
||||
setattr(self, k, getattr(new_flags, k))
|
||||
self.ClearDirty()
|
||||
|
||||
|
@ -100,7 +100,7 @@ class LimitsControllerV10Test(BaseLimitTestSuite):
|
||||
self.absolute_limits = {
|
||||
'instances': 5,
|
||||
'cores': 8,
|
||||
'ram': 2**13,
|
||||
'ram': 2 ** 13,
|
||||
'volumes': 21,
|
||||
'gigabytes': 34,
|
||||
'metadata_items': 55,
|
||||
@ -150,7 +150,7 @@ class LimitsControllerV10Test(BaseLimitTestSuite):
|
||||
"absolute": {
|
||||
"maxTotalInstances": 5,
|
||||
"maxTotalCores": 8,
|
||||
"maxTotalRAMSize": 2**13,
|
||||
"maxTotalRAMSize": 2 ** 13,
|
||||
"maxServerMeta": 55,
|
||||
"maxImageMeta": 55,
|
||||
"maxPersonality": 89,
|
||||
|
@ -138,6 +138,16 @@ def find_host(self, context, instance_id):
|
||||
return "nova"
|
||||
|
||||
|
||||
class MockSetAdminPassword(object):
|
||||
def __init__(self):
|
||||
self.instance_id = None
|
||||
self.password = None
|
||||
|
||||
def __call__(self, context, instance_id, password):
|
||||
self.instance_id = instance_id
|
||||
self.password = password
|
||||
|
||||
|
||||
class ServersTest(test.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
@ -764,8 +774,7 @@ class ServersTest(test.TestCase):
|
||||
|
||||
def server_update(context, id, params):
|
||||
filtered_dict = dict(
|
||||
display_name='server_test',
|
||||
admin_pass='bacon',
|
||||
display_name='server_test'
|
||||
)
|
||||
self.assertEqual(params, filtered_dict)
|
||||
return filtered_dict
|
||||
@ -773,6 +782,8 @@ class ServersTest(test.TestCase):
|
||||
self.stubs.Set(nova.db.api, 'instance_update',
|
||||
server_update)
|
||||
self.stubs.Set(nova.compute.api.API, "_find_host", find_host)
|
||||
mock_method = MockSetAdminPassword()
|
||||
self.stubs.Set(nova.compute.api.API, 'set_admin_password', mock_method)
|
||||
|
||||
req = webob.Request.blank('/v1.0/servers/1')
|
||||
req.method = 'PUT'
|
||||
@ -780,6 +791,8 @@ class ServersTest(test.TestCase):
|
||||
req.body = self.body
|
||||
res = req.get_response(fakes.wsgi_app())
|
||||
self.assertEqual(res.status_int, 204)
|
||||
self.assertEqual(mock_method.instance_id, '1')
|
||||
self.assertEqual(mock_method.password, 'bacon')
|
||||
|
||||
def test_update_server_adminPass_ignored_v1_1(self):
|
||||
inst_dict = dict(name='server_test', adminPass='bacon')
|
||||
@ -996,16 +1009,6 @@ class ServersTest(test.TestCase):
|
||||
self.assertEqual(res.status_int, 501)
|
||||
|
||||
def test_server_change_password_v1_1(self):
|
||||
|
||||
class MockSetAdminPassword(object):
|
||||
def __init__(self):
|
||||
self.instance_id = None
|
||||
self.password = None
|
||||
|
||||
def __call__(self, context, instance_id, password):
|
||||
self.instance_id = instance_id
|
||||
self.password = password
|
||||
|
||||
mock_method = MockSetAdminPassword()
|
||||
self.stubs.Set(nova.compute.api.API, 'set_admin_password', mock_method)
|
||||
body = {'changePassword': {'adminPass': '1234pass'}}
|
||||
|
@ -21,24 +21,24 @@ from nova import flags
|
||||
FLAGS = flags.FLAGS
|
||||
|
||||
flags.DECLARE('volume_driver', 'nova.volume.manager')
|
||||
FLAGS.volume_driver = 'nova.volume.driver.FakeISCSIDriver'
|
||||
FLAGS.connection_type = 'fake'
|
||||
FLAGS.fake_rabbit = True
|
||||
FLAGS['volume_driver'].SetDefault('nova.volume.driver.FakeISCSIDriver')
|
||||
FLAGS['connection_type'].SetDefault('fake')
|
||||
FLAGS['fake_rabbit'].SetDefault(True)
|
||||
flags.DECLARE('auth_driver', 'nova.auth.manager')
|
||||
FLAGS.auth_driver = 'nova.auth.dbdriver.DbDriver'
|
||||
FLAGS['auth_driver'].SetDefault('nova.auth.dbdriver.DbDriver')
|
||||
flags.DECLARE('network_size', 'nova.network.manager')
|
||||
flags.DECLARE('num_networks', 'nova.network.manager')
|
||||
flags.DECLARE('fake_network', 'nova.network.manager')
|
||||
FLAGS.network_size = 8
|
||||
FLAGS.num_networks = 2
|
||||
FLAGS.fake_network = True
|
||||
FLAGS.image_service = 'nova.image.local.LocalImageService'
|
||||
FLAGS['network_size'].SetDefault(8)
|
||||
FLAGS['num_networks'].SetDefault(2)
|
||||
FLAGS['fake_network'].SetDefault(True)
|
||||
FLAGS['image_service'].SetDefault('nova.image.local.LocalImageService')
|
||||
flags.DECLARE('num_shelves', 'nova.volume.driver')
|
||||
flags.DECLARE('blades_per_shelf', 'nova.volume.driver')
|
||||
flags.DECLARE('iscsi_num_targets', 'nova.volume.driver')
|
||||
FLAGS.num_shelves = 2
|
||||
FLAGS.blades_per_shelf = 4
|
||||
FLAGS.iscsi_num_targets = 8
|
||||
FLAGS.verbose = True
|
||||
FLAGS.sqlite_db = "tests.sqlite"
|
||||
FLAGS.use_ipv6 = True
|
||||
FLAGS['num_shelves'].SetDefault(2)
|
||||
FLAGS['blades_per_shelf'].SetDefault(4)
|
||||
FLAGS['iscsi_num_targets'].SetDefault(8)
|
||||
FLAGS['verbose'].SetDefault(True)
|
||||
FLAGS['sqlite_db'].SetDefault("tests.sqlite")
|
||||
FLAGS['use_ipv6'].SetDefault(True)
|
||||
|
@ -1,26 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from nova import flags
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
|
||||
FLAGS.connection_type = 'libvirt'
|
||||
FLAGS.fake_rabbit = False
|
||||
FLAGS.fake_network = False
|
||||
FLAGS.verbose = False
|
@ -224,6 +224,29 @@ class ApiEc2TestCase(test.TestCase):
|
||||
self.manager.delete_project(project)
|
||||
self.manager.delete_user(user)
|
||||
|
||||
def test_create_duplicate_key_pair(self):
|
||||
"""Test that, after successfully generating a keypair,
|
||||
requesting a second keypair with the same name fails sanely"""
|
||||
self.expect_http()
|
||||
self.mox.ReplayAll()
|
||||
keyname = "".join(random.choice("sdiuisudfsdcnpaqwertasd") \
|
||||
for x in range(random.randint(4, 8)))
|
||||
user = self.manager.create_user('fake', 'fake', 'fake')
|
||||
project = self.manager.create_project('fake', 'fake', 'fake')
|
||||
# NOTE(vish): create depends on pool, so call helper directly
|
||||
self.ec2.create_key_pair('test')
|
||||
|
||||
try:
|
||||
self.ec2.create_key_pair('test')
|
||||
except EC2ResponseError, e:
|
||||
if e.code == 'KeyPairExists':
|
||||
pass
|
||||
else:
|
||||
self.fail("Unexpected EC2ResponseError: %s "
|
||||
"(expected KeyPairExists)" % e.code)
|
||||
else:
|
||||
self.fail('Exception not raised.')
|
||||
|
||||
def test_get_all_security_groups(self):
|
||||
"""Test that we can retrieve security groups"""
|
||||
self.expect_http()
|
||||
|
@ -91,6 +91,20 @@ class FlagsTestCase(test.TestCase):
|
||||
self.assert_('runtime_answer' in self.global_FLAGS)
|
||||
self.assertEqual(self.global_FLAGS.runtime_answer, 60)
|
||||
|
||||
def test_long_vs_short_flags(self):
|
||||
flags.DEFINE_string('duplicate_answer_long', 'val', 'desc',
|
||||
flag_values=self.global_FLAGS)
|
||||
argv = ['flags_test', '--duplicate_answer=60', 'extra_arg']
|
||||
args = self.global_FLAGS(argv)
|
||||
|
||||
self.assert_('duplicate_answer' not in self.global_FLAGS)
|
||||
self.assert_(self.global_FLAGS.duplicate_answer_long, 60)
|
||||
|
||||
flags.DEFINE_integer('duplicate_answer', 60, 'desc',
|
||||
flag_values=self.global_FLAGS)
|
||||
self.assertEqual(self.global_FLAGS.duplicate_answer, 60)
|
||||
self.assertEqual(self.global_FLAGS.duplicate_answer_long, 'val')
|
||||
|
||||
def test_flag_leak_left(self):
|
||||
self.assertEqual(FLAGS.flags_unittest, 'foo')
|
||||
FLAGS.flags_unittest = 'bar'
|
||||
|
@ -32,7 +32,8 @@ from nova import utils
|
||||
from nova.api.ec2 import cloud
|
||||
from nova.auth import manager
|
||||
from nova.compute import power_state
|
||||
from nova.virt import libvirt_conn
|
||||
from nova.virt.libvirt import connection
|
||||
from nova.virt.libvirt import firewall
|
||||
|
||||
libvirt = None
|
||||
FLAGS = flags.FLAGS
|
||||
@ -83,7 +84,7 @@ class CacheConcurrencyTestCase(test.TestCase):
|
||||
|
||||
def test_same_fname_concurrency(self):
|
||||
"""Ensures that the same fname cache runs at a sequentially"""
|
||||
conn = libvirt_conn.LibvirtConnection
|
||||
conn = connection.LibvirtConnection
|
||||
wait1 = eventlet.event.Event()
|
||||
done1 = eventlet.event.Event()
|
||||
eventlet.spawn(conn._cache_image, _concurrency,
|
||||
@ -104,7 +105,7 @@ class CacheConcurrencyTestCase(test.TestCase):
|
||||
|
||||
def test_different_fname_concurrency(self):
|
||||
"""Ensures that two different fname caches are concurrent"""
|
||||
conn = libvirt_conn.LibvirtConnection
|
||||
conn = connection.LibvirtConnection
|
||||
wait1 = eventlet.event.Event()
|
||||
done1 = eventlet.event.Event()
|
||||
eventlet.spawn(conn._cache_image, _concurrency,
|
||||
@ -125,7 +126,7 @@ class CacheConcurrencyTestCase(test.TestCase):
|
||||
class LibvirtConnTestCase(test.TestCase):
|
||||
def setUp(self):
|
||||
super(LibvirtConnTestCase, self).setUp()
|
||||
libvirt_conn._late_load_cheetah()
|
||||
connection._late_load_cheetah()
|
||||
self.flags(fake_call=True)
|
||||
self.manager = manager.AuthManager()
|
||||
|
||||
@ -171,8 +172,8 @@ class LibvirtConnTestCase(test.TestCase):
|
||||
return False
|
||||
global libvirt
|
||||
libvirt = __import__('libvirt')
|
||||
libvirt_conn.libvirt = __import__('libvirt')
|
||||
libvirt_conn.libxml2 = __import__('libxml2')
|
||||
connection.libvirt = __import__('libvirt')
|
||||
connection.libxml2 = __import__('libxml2')
|
||||
return True
|
||||
|
||||
def create_fake_libvirt_mock(self, **kwargs):
|
||||
@ -182,7 +183,7 @@ class LibvirtConnTestCase(test.TestCase):
|
||||
class FakeLibvirtConnection(object):
|
||||
pass
|
||||
|
||||
# A fake libvirt_conn.IptablesFirewallDriver
|
||||
# A fake connection.IptablesFirewallDriver
|
||||
class FakeIptablesFirewallDriver(object):
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
@ -198,11 +199,11 @@ class LibvirtConnTestCase(test.TestCase):
|
||||
for key, val in kwargs.items():
|
||||
fake.__setattr__(key, val)
|
||||
|
||||
# Inevitable mocks for libvirt_conn.LibvirtConnection
|
||||
self.mox.StubOutWithMock(libvirt_conn.utils, 'import_class')
|
||||
libvirt_conn.utils.import_class(mox.IgnoreArg()).AndReturn(fakeip)
|
||||
self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection, '_conn')
|
||||
libvirt_conn.LibvirtConnection._conn = fake
|
||||
# Inevitable mocks for connection.LibvirtConnection
|
||||
self.mox.StubOutWithMock(connection.utils, 'import_class')
|
||||
connection.utils.import_class(mox.IgnoreArg()).AndReturn(fakeip)
|
||||
self.mox.StubOutWithMock(connection.LibvirtConnection, '_conn')
|
||||
connection.LibvirtConnection._conn = fake
|
||||
|
||||
def create_service(self, **kwargs):
|
||||
service_ref = {'host': kwargs.get('host', 'dummy'),
|
||||
@ -214,7 +215,7 @@ class LibvirtConnTestCase(test.TestCase):
|
||||
return db.service_create(context.get_admin_context(), service_ref)
|
||||
|
||||
def test_preparing_xml_info(self):
|
||||
conn = libvirt_conn.LibvirtConnection(True)
|
||||
conn = connection.LibvirtConnection(True)
|
||||
instance_ref = db.instance_create(self.context, self.test_instance)
|
||||
|
||||
result = conn._prepare_xml_info(instance_ref, False)
|
||||
@ -229,7 +230,7 @@ class LibvirtConnTestCase(test.TestCase):
|
||||
self.assertTrue(len(result['nics']) == 2)
|
||||
|
||||
def test_get_nic_for_xml_v4(self):
|
||||
conn = libvirt_conn.LibvirtConnection(True)
|
||||
conn = connection.LibvirtConnection(True)
|
||||
network, mapping = _create_network_info()[0]
|
||||
self.flags(use_ipv6=False)
|
||||
params = conn._get_nic_for_xml(network, mapping)['extra_params']
|
||||
@ -237,7 +238,7 @@ class LibvirtConnTestCase(test.TestCase):
|
||||
self.assertTrue(params.find('PROJMASKV6') == -1)
|
||||
|
||||
def test_get_nic_for_xml_v6(self):
|
||||
conn = libvirt_conn.LibvirtConnection(True)
|
||||
conn = connection.LibvirtConnection(True)
|
||||
network, mapping = _create_network_info()[0]
|
||||
self.flags(use_ipv6=True)
|
||||
params = conn._get_nic_for_xml(network, mapping)['extra_params']
|
||||
@ -282,7 +283,7 @@ class LibvirtConnTestCase(test.TestCase):
|
||||
def test_multi_nic(self):
|
||||
instance_data = dict(self.test_instance)
|
||||
network_info = _create_network_info(2)
|
||||
conn = libvirt_conn.LibvirtConnection(True)
|
||||
conn = connection.LibvirtConnection(True)
|
||||
instance_ref = db.instance_create(self.context, instance_data)
|
||||
xml = conn.to_xml(instance_ref, False, network_info)
|
||||
tree = xml_to_tree(xml)
|
||||
@ -313,7 +314,7 @@ class LibvirtConnTestCase(test.TestCase):
|
||||
'instance_id': instance_ref['id']})
|
||||
|
||||
self.flags(libvirt_type='lxc')
|
||||
conn = libvirt_conn.LibvirtConnection(True)
|
||||
conn = connection.LibvirtConnection(True)
|
||||
|
||||
uri = conn.get_uri()
|
||||
self.assertEquals(uri, 'lxc:///')
|
||||
@ -419,7 +420,7 @@ class LibvirtConnTestCase(test.TestCase):
|
||||
|
||||
for (libvirt_type, (expected_uri, checks)) in type_uri_map.iteritems():
|
||||
FLAGS.libvirt_type = libvirt_type
|
||||
conn = libvirt_conn.LibvirtConnection(True)
|
||||
conn = connection.LibvirtConnection(True)
|
||||
|
||||
uri = conn.get_uri()
|
||||
self.assertEquals(uri, expected_uri)
|
||||
@ -446,7 +447,7 @@ class LibvirtConnTestCase(test.TestCase):
|
||||
FLAGS.libvirt_uri = testuri
|
||||
for (libvirt_type, (expected_uri, checks)) in type_uri_map.iteritems():
|
||||
FLAGS.libvirt_type = libvirt_type
|
||||
conn = libvirt_conn.LibvirtConnection(True)
|
||||
conn = connection.LibvirtConnection(True)
|
||||
uri = conn.get_uri()
|
||||
self.assertEquals(uri, testuri)
|
||||
db.instance_destroy(user_context, instance_ref['id'])
|
||||
@ -470,13 +471,13 @@ class LibvirtConnTestCase(test.TestCase):
|
||||
self.create_fake_libvirt_mock(getVersion=getVersion,
|
||||
getType=getType,
|
||||
listDomainsID=listDomainsID)
|
||||
self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection,
|
||||
self.mox.StubOutWithMock(connection.LibvirtConnection,
|
||||
'get_cpu_info')
|
||||
libvirt_conn.LibvirtConnection.get_cpu_info().AndReturn('cpuinfo')
|
||||
connection.LibvirtConnection.get_cpu_info().AndReturn('cpuinfo')
|
||||
|
||||
# Start test
|
||||
self.mox.ReplayAll()
|
||||
conn = libvirt_conn.LibvirtConnection(False)
|
||||
conn = connection.LibvirtConnection(False)
|
||||
conn.update_available_resource(self.context, 'dummy')
|
||||
service_ref = db.service_get(self.context, service_ref['id'])
|
||||
compute_node = service_ref['compute_node'][0]
|
||||
@ -510,7 +511,7 @@ class LibvirtConnTestCase(test.TestCase):
|
||||
self.create_fake_libvirt_mock()
|
||||
|
||||
self.mox.ReplayAll()
|
||||
conn = libvirt_conn.LibvirtConnection(False)
|
||||
conn = connection.LibvirtConnection(False)
|
||||
self.assertRaises(exception.ComputeServiceUnavailable,
|
||||
conn.update_available_resource,
|
||||
self.context, 'dummy')
|
||||
@ -545,7 +546,7 @@ class LibvirtConnTestCase(test.TestCase):
|
||||
# Start test
|
||||
self.mox.ReplayAll()
|
||||
try:
|
||||
conn = libvirt_conn.LibvirtConnection(False)
|
||||
conn = connection.LibvirtConnection(False)
|
||||
conn.firewall_driver.setattr('setup_basic_filtering', fake_none)
|
||||
conn.firewall_driver.setattr('prepare_instance_filter', fake_none)
|
||||
conn.firewall_driver.setattr('instance_filter_exists', fake_none)
|
||||
@ -594,7 +595,7 @@ class LibvirtConnTestCase(test.TestCase):
|
||||
|
||||
# Start test
|
||||
self.mox.ReplayAll()
|
||||
conn = libvirt_conn.LibvirtConnection(False)
|
||||
conn = connection.LibvirtConnection(False)
|
||||
self.assertRaises(libvirt.libvirtError,
|
||||
conn._live_migration,
|
||||
self.context, instance_ref, 'dest', '',
|
||||
@ -623,7 +624,7 @@ class LibvirtConnTestCase(test.TestCase):
|
||||
|
||||
# Start test
|
||||
self.mox.ReplayAll()
|
||||
conn = libvirt_conn.LibvirtConnection(False)
|
||||
conn = connection.LibvirtConnection(False)
|
||||
conn.firewall_driver.setattr('setup_basic_filtering', fake_none)
|
||||
conn.firewall_driver.setattr('prepare_instance_filter', fake_none)
|
||||
|
||||
@ -647,7 +648,7 @@ class LibvirtConnTestCase(test.TestCase):
|
||||
self.assertTrue(count)
|
||||
|
||||
def test_get_host_ip_addr(self):
|
||||
conn = libvirt_conn.LibvirtConnection(False)
|
||||
conn = connection.LibvirtConnection(False)
|
||||
ip = conn.get_host_ip_addr()
|
||||
self.assertEquals(ip, FLAGS.my_ip)
|
||||
|
||||
@ -671,7 +672,7 @@ class IptablesFirewallTestCase(test.TestCase):
|
||||
class FakeLibvirtConnection(object):
|
||||
pass
|
||||
self.fake_libvirt_connection = FakeLibvirtConnection()
|
||||
self.fw = libvirt_conn.IptablesFirewallDriver(
|
||||
self.fw = firewall.IptablesFirewallDriver(
|
||||
get_connection=lambda: self.fake_libvirt_connection)
|
||||
|
||||
def tearDown(self):
|
||||
@ -895,7 +896,7 @@ class NWFilterTestCase(test.TestCase):
|
||||
|
||||
self.fake_libvirt_connection = Mock()
|
||||
|
||||
self.fw = libvirt_conn.NWFilterFirewall(
|
||||
self.fw = firewall.NWFilterFirewall(
|
||||
lambda: self.fake_libvirt_connection)
|
||||
|
||||
def tearDown(self):
|
@ -27,9 +27,9 @@ from nova import utils
|
||||
from nova.virt import driver
|
||||
from nova.virt import fake
|
||||
from nova.virt import hyperv
|
||||
from nova.virt import libvirt_conn
|
||||
from nova.virt import vmwareapi_conn
|
||||
from nova.virt import xenapi_conn
|
||||
from nova.virt.libvirt import connection as libvirt_conn
|
||||
|
||||
|
||||
LOG = logging.getLogger("nova.virt.connection")
|
||||
|
0
nova/virt/libvirt/__init__.py
Normal file
0
nova/virt/libvirt/__init__.py
Normal file
@ -57,7 +57,6 @@ from nova import context
|
||||
from nova import db
|
||||
from nova import exception
|
||||
from nova import flags
|
||||
from nova import ipv6
|
||||
from nova import log as logging
|
||||
from nova import utils
|
||||
from nova import vnc
|
||||
@ -67,20 +66,23 @@ from nova.compute import power_state
|
||||
from nova.virt import disk
|
||||
from nova.virt import driver
|
||||
from nova.virt import images
|
||||
from nova.virt.libvirt import netutils
|
||||
|
||||
|
||||
libvirt = None
|
||||
libxml2 = None
|
||||
Template = None
|
||||
|
||||
|
||||
LOG = logging.getLogger('nova.virt.libvirt_conn')
|
||||
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
flags.DECLARE('live_migration_retry_count', 'nova.compute.manager')
|
||||
# TODO(vish): These flags should probably go into a shared location
|
||||
flags.DEFINE_string('rescue_image_id', 'ami-rescue', 'Rescue ami image')
|
||||
flags.DEFINE_string('rescue_kernel_id', 'aki-rescue', 'Rescue aki image')
|
||||
flags.DEFINE_string('rescue_ramdisk_id', 'ari-rescue', 'Rescue ari image')
|
||||
|
||||
flags.DEFINE_string('libvirt_xml_template',
|
||||
utils.abspath('virt/libvirt.xml.template'),
|
||||
'Libvirt XML Template')
|
||||
@ -102,7 +104,7 @@ flags.DEFINE_string('ajaxterm_portrange',
|
||||
'10000-12000',
|
||||
'Range of ports that ajaxterm should randomly try to bind')
|
||||
flags.DEFINE_string('firewall_driver',
|
||||
'nova.virt.libvirt_conn.IptablesFirewallDriver',
|
||||
'nova.virt.libvirt.firewall.IptablesFirewallDriver',
|
||||
'Firewall driver (defaults to iptables)')
|
||||
flags.DEFINE_string('cpuinfo_xml_template',
|
||||
utils.abspath('virt/cpuinfo.xml.template'),
|
||||
@ -144,70 +146,6 @@ def _late_load_cheetah():
|
||||
Template = t.Template
|
||||
|
||||
|
||||
def _get_net_and_mask(cidr):
|
||||
net = IPy.IP(cidr)
|
||||
return str(net.net()), str(net.netmask())
|
||||
|
||||
|
||||
def _get_net_and_prefixlen(cidr):
|
||||
net = IPy.IP(cidr)
|
||||
return str(net.net()), str(net.prefixlen())
|
||||
|
||||
|
||||
def _get_ip_version(cidr):
|
||||
net = IPy.IP(cidr)
|
||||
return int(net.version())
|
||||
|
||||
|
||||
def _get_network_info(instance):
|
||||
# TODO(adiantum) If we will keep this function
|
||||
# we should cache network_info
|
||||
admin_context = context.get_admin_context()
|
||||
|
||||
ip_addresses = db.fixed_ip_get_all_by_instance(admin_context,
|
||||
instance['id'])
|
||||
networks = db.network_get_all_by_instance(admin_context,
|
||||
instance['id'])
|
||||
flavor = db.instance_type_get_by_id(admin_context,
|
||||
instance['instance_type_id'])
|
||||
network_info = []
|
||||
|
||||
for network in networks:
|
||||
network_ips = [ip for ip in ip_addresses
|
||||
if ip['network_id'] == network['id']]
|
||||
|
||||
def ip_dict(ip):
|
||||
return {
|
||||
'ip': ip['address'],
|
||||
'netmask': network['netmask'],
|
||||
'enabled': '1'}
|
||||
|
||||
def ip6_dict():
|
||||
prefix = network['cidr_v6']
|
||||
mac = instance['mac_address']
|
||||
project_id = instance['project_id']
|
||||
return {
|
||||
'ip': ipv6.to_global(prefix, mac, project_id),
|
||||
'netmask': network['netmask_v6'],
|
||||
'enabled': '1'}
|
||||
|
||||
mapping = {
|
||||
'label': network['label'],
|
||||
'gateway': network['gateway'],
|
||||
'broadcast': network['broadcast'],
|
||||
'mac': instance['mac_address'],
|
||||
'rxtx_cap': flavor['rxtx_cap'],
|
||||
'dns': [network['dns']],
|
||||
'ips': [ip_dict(ip) for ip in network_ips]}
|
||||
|
||||
if FLAGS.use_ipv6:
|
||||
mapping['ip6s'] = [ip6_dict()]
|
||||
mapping['gateway6'] = network['gateway_v6']
|
||||
|
||||
network_info.append((network, mapping))
|
||||
return network_info
|
||||
|
||||
|
||||
class LibvirtConnection(driver.ComputeDriver):
|
||||
|
||||
def __init__(self, read_only):
|
||||
@ -807,7 +745,7 @@ class LibvirtConnection(driver.ComputeDriver):
|
||||
def _create_image(self, inst, libvirt_xml, suffix='', disk_images=None,
|
||||
network_info=None):
|
||||
if not network_info:
|
||||
network_info = _get_network_info(inst)
|
||||
network_info = netutils.get_network_info(inst)
|
||||
|
||||
if not suffix:
|
||||
suffix = ''
|
||||
@ -966,10 +904,10 @@ class LibvirtConnection(driver.ComputeDriver):
|
||||
|
||||
if FLAGS.allow_project_net_traffic:
|
||||
template = "<parameter name=\"%s\"value=\"%s\" />\n"
|
||||
net, mask = _get_net_and_mask(network['cidr'])
|
||||
net, mask = netutils.get_net_and_mask(network['cidr'])
|
||||
values = [("PROJNET", net), ("PROJMASK", mask)]
|
||||
if FLAGS.use_ipv6:
|
||||
net_v6, prefixlen_v6 = _get_net_and_prefixlen(
|
||||
net_v6, prefixlen_v6 = netutils.get_net_and_prefixlen(
|
||||
network['cidr_v6'])
|
||||
values.extend([("PROJNETV6", net_v6),
|
||||
("PROJMASKV6", prefixlen_v6)])
|
||||
@ -996,7 +934,7 @@ class LibvirtConnection(driver.ComputeDriver):
|
||||
# TODO(adiantum) remove network_info creation code
|
||||
# when multinics will be completed
|
||||
if not network_info:
|
||||
network_info = _get_network_info(instance)
|
||||
network_info = netutils.get_network_info(instance)
|
||||
|
||||
nics = []
|
||||
for (network, mapping) in network_info:
|
||||
@ -1591,606 +1529,3 @@ class LibvirtConnection(driver.ComputeDriver):
|
||||
def get_host_stats(self, refresh=False):
|
||||
"""See xenapi_conn.py implementation."""
|
||||
pass
|
||||
|
||||
|
||||
class FirewallDriver(object):
|
||||
def prepare_instance_filter(self, instance, network_info=None):
|
||||
"""Prepare filters for the instance.
|
||||
|
||||
At this point, the instance isn't running yet."""
|
||||
raise NotImplementedError()
|
||||
|
||||
def unfilter_instance(self, instance):
|
||||
"""Stop filtering instance"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def apply_instance_filter(self, instance):
|
||||
"""Apply instance filter.
|
||||
|
||||
Once this method returns, the instance should be firewalled
|
||||
appropriately. This method should as far as possible be a
|
||||
no-op. It's vastly preferred to get everything set up in
|
||||
prepare_instance_filter.
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def refresh_security_group_rules(self,
|
||||
security_group_id,
|
||||
network_info=None):
|
||||
"""Refresh security group rules from data store
|
||||
|
||||
Gets called when a rule has been added to or removed from
|
||||
the security group."""
|
||||
raise NotImplementedError()
|
||||
|
||||
def refresh_security_group_members(self, security_group_id):
|
||||
"""Refresh security group members from data store
|
||||
|
||||
Gets called when an instance gets added to or removed from
|
||||
the security group."""
|
||||
raise NotImplementedError()
|
||||
|
||||
def setup_basic_filtering(self, instance, network_info=None):
|
||||
"""Create rules to block spoofing and allow dhcp.
|
||||
|
||||
This gets called when spawning an instance, before
|
||||
:method:`prepare_instance_filter`.
|
||||
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def instance_filter_exists(self, instance):
|
||||
"""Check nova-instance-instance-xxx exists"""
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
class NWFilterFirewall(FirewallDriver):
|
||||
"""
|
||||
This class implements a network filtering mechanism versatile
|
||||
enough for EC2 style Security Group filtering by leveraging
|
||||
libvirt's nwfilter.
|
||||
|
||||
First, all instances get a filter ("nova-base-filter") applied.
|
||||
This filter provides some basic security such as protection against
|
||||
MAC spoofing, IP spoofing, and ARP spoofing.
|
||||
|
||||
This filter drops all incoming ipv4 and ipv6 connections.
|
||||
Outgoing connections are never blocked.
|
||||
|
||||
Second, every security group maps to a nwfilter filter(*).
|
||||
NWFilters can be updated at runtime and changes are applied
|
||||
immediately, so changes to security groups can be applied at
|
||||
runtime (as mandated by the spec).
|
||||
|
||||
Security group rules are named "nova-secgroup-<id>" where <id>
|
||||
is the internal id of the security group. They're applied only on
|
||||
hosts that have instances in the security group in question.
|
||||
|
||||
Updates to security groups are done by updating the data model
|
||||
(in response to API calls) followed by a request sent to all
|
||||
the nodes with instances in the security group to refresh the
|
||||
security group.
|
||||
|
||||
Each instance has its own NWFilter, which references the above
|
||||
mentioned security group NWFilters. This was done because
|
||||
interfaces can only reference one filter while filters can
|
||||
reference multiple other filters. This has the added benefit of
|
||||
actually being able to add and remove security groups from an
|
||||
instance at run time. This functionality is not exposed anywhere,
|
||||
though.
|
||||
|
||||
Outstanding questions:
|
||||
|
||||
The name is unique, so would there be any good reason to sync
|
||||
the uuid across the nodes (by assigning it from the datamodel)?
|
||||
|
||||
|
||||
(*) This sentence brought to you by the redundancy department of
|
||||
redundancy.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, get_connection, **kwargs):
|
||||
self._libvirt_get_connection = get_connection
|
||||
self.static_filters_configured = False
|
||||
self.handle_security_groups = False
|
||||
|
||||
def apply_instance_filter(self, instance):
|
||||
"""No-op. Everything is done in prepare_instance_filter"""
|
||||
pass
|
||||
|
||||
def _get_connection(self):
|
||||
return self._libvirt_get_connection()
|
||||
_conn = property(_get_connection)
|
||||
|
||||
def nova_dhcp_filter(self):
|
||||
"""The standard allow-dhcp-server filter is an <ip> one, so it uses
|
||||
ebtables to allow traffic through. Without a corresponding rule in
|
||||
iptables, it'll get blocked anyway."""
|
||||
|
||||
return '''<filter name='nova-allow-dhcp-server' chain='ipv4'>
|
||||
<uuid>891e4787-e5c0-d59b-cbd6-41bc3c6b36fc</uuid>
|
||||
<rule action='accept' direction='out'
|
||||
priority='100'>
|
||||
<udp srcipaddr='0.0.0.0'
|
||||
dstipaddr='255.255.255.255'
|
||||
srcportstart='68'
|
||||
dstportstart='67'/>
|
||||
</rule>
|
||||
<rule action='accept' direction='in'
|
||||
priority='100'>
|
||||
<udp srcipaddr='$DHCPSERVER'
|
||||
srcportstart='67'
|
||||
dstportstart='68'/>
|
||||
</rule>
|
||||
</filter>'''
|
||||
|
||||
def nova_ra_filter(self):
|
||||
return '''<filter name='nova-allow-ra-server' chain='root'>
|
||||
<uuid>d707fa71-4fb5-4b27-9ab7-ba5ca19c8804</uuid>
|
||||
<rule action='accept' direction='inout'
|
||||
priority='100'>
|
||||
<icmpv6 srcipaddr='$RASERVER'/>
|
||||
</rule>
|
||||
</filter>'''
|
||||
|
||||
def setup_basic_filtering(self, instance, network_info=None):
|
||||
"""Set up basic filtering (MAC, IP, and ARP spoofing protection)"""
|
||||
logging.info('called setup_basic_filtering in nwfilter')
|
||||
|
||||
if not network_info:
|
||||
network_info = _get_network_info(instance)
|
||||
|
||||
if self.handle_security_groups:
|
||||
# No point in setting up a filter set that we'll be overriding
|
||||
# anyway.
|
||||
return
|
||||
|
||||
logging.info('ensuring static filters')
|
||||
self._ensure_static_filters()
|
||||
|
||||
if instance['image_id'] == str(FLAGS.vpn_image_id):
|
||||
base_filter = 'nova-vpn'
|
||||
else:
|
||||
base_filter = 'nova-base'
|
||||
|
||||
for (network, mapping) in network_info:
|
||||
nic_id = mapping['mac'].replace(':', '')
|
||||
instance_filter_name = self._instance_filter_name(instance, nic_id)
|
||||
self._define_filter(self._filter_container(instance_filter_name,
|
||||
[base_filter]))
|
||||
|
||||
def _ensure_static_filters(self):
|
||||
if self.static_filters_configured:
|
||||
return
|
||||
|
||||
self._define_filter(self._filter_container('nova-base',
|
||||
['no-mac-spoofing',
|
||||
'no-ip-spoofing',
|
||||
'no-arp-spoofing',
|
||||
'allow-dhcp-server']))
|
||||
self._define_filter(self._filter_container('nova-vpn',
|
||||
['allow-dhcp-server']))
|
||||
self._define_filter(self.nova_base_ipv4_filter)
|
||||
self._define_filter(self.nova_base_ipv6_filter)
|
||||
self._define_filter(self.nova_dhcp_filter)
|
||||
self._define_filter(self.nova_ra_filter)
|
||||
if FLAGS.allow_project_net_traffic:
|
||||
self._define_filter(self.nova_project_filter)
|
||||
if FLAGS.use_ipv6:
|
||||
self._define_filter(self.nova_project_filter_v6)
|
||||
|
||||
self.static_filters_configured = True
|
||||
|
||||
def _filter_container(self, name, filters):
|
||||
xml = '''<filter name='%s' chain='root'>%s</filter>''' % (
|
||||
name,
|
||||
''.join(["<filterref filter='%s'/>" % (f,) for f in filters]))
|
||||
return xml
|
||||
|
||||
def nova_base_ipv4_filter(self):
|
||||
retval = "<filter name='nova-base-ipv4' chain='ipv4'>"
|
||||
for protocol in ['tcp', 'udp', 'icmp']:
|
||||
for direction, action, priority in [('out', 'accept', 399),
|
||||
('in', 'drop', 400)]:
|
||||
retval += """<rule action='%s' direction='%s' priority='%d'>
|
||||
<%s />
|
||||
</rule>""" % (action, direction,
|
||||
priority, protocol)
|
||||
retval += '</filter>'
|
||||
return retval
|
||||
|
||||
def nova_base_ipv6_filter(self):
|
||||
retval = "<filter name='nova-base-ipv6' chain='ipv6'>"
|
||||
for protocol in ['tcp-ipv6', 'udp-ipv6', 'icmpv6']:
|
||||
for direction, action, priority in [('out', 'accept', 399),
|
||||
('in', 'drop', 400)]:
|
||||
retval += """<rule action='%s' direction='%s' priority='%d'>
|
||||
<%s />
|
||||
</rule>""" % (action, direction,
|
||||
priority, protocol)
|
||||
retval += '</filter>'
|
||||
return retval
|
||||
|
||||
def nova_project_filter(self):
|
||||
retval = "<filter name='nova-project' chain='ipv4'>"
|
||||
for protocol in ['tcp', 'udp', 'icmp']:
|
||||
retval += """<rule action='accept' direction='in' priority='200'>
|
||||
<%s srcipaddr='$PROJNET' srcipmask='$PROJMASK' />
|
||||
</rule>""" % protocol
|
||||
retval += '</filter>'
|
||||
return retval
|
||||
|
||||
def nova_project_filter_v6(self):
|
||||
retval = "<filter name='nova-project-v6' chain='ipv6'>"
|
||||
for protocol in ['tcp-ipv6', 'udp-ipv6', 'icmpv6']:
|
||||
retval += """<rule action='accept' direction='inout'
|
||||
priority='200'>
|
||||
<%s srcipaddr='$PROJNETV6'
|
||||
srcipmask='$PROJMASKV6' />
|
||||
</rule>""" % (protocol)
|
||||
retval += '</filter>'
|
||||
return retval
|
||||
|
||||
def _define_filter(self, xml):
|
||||
if callable(xml):
|
||||
xml = xml()
|
||||
# execute in a native thread and block current greenthread until done
|
||||
tpool.execute(self._conn.nwfilterDefineXML, xml)
|
||||
|
||||
def unfilter_instance(self, instance):
|
||||
# Nothing to do
|
||||
pass
|
||||
|
||||
def prepare_instance_filter(self, instance, network_info=None):
|
||||
"""
|
||||
Creates an NWFilter for the given instance. In the process,
|
||||
it makes sure the filters for the security groups as well as
|
||||
the base filter are all in place.
|
||||
"""
|
||||
if not network_info:
|
||||
network_info = _get_network_info(instance)
|
||||
|
||||
ctxt = context.get_admin_context()
|
||||
|
||||
instance_secgroup_filter_name = \
|
||||
'%s-secgroup' % (self._instance_filter_name(instance))
|
||||
#% (instance_filter_name,)
|
||||
|
||||
instance_secgroup_filter_children = ['nova-base-ipv4',
|
||||
'nova-base-ipv6',
|
||||
'nova-allow-dhcp-server']
|
||||
|
||||
if FLAGS.use_ipv6:
|
||||
networks = [network for (network, _m) in network_info if
|
||||
network['gateway_v6']]
|
||||
|
||||
if networks:
|
||||
instance_secgroup_filter_children.\
|
||||
append('nova-allow-ra-server')
|
||||
|
||||
for security_group in \
|
||||
db.security_group_get_by_instance(ctxt, instance['id']):
|
||||
|
||||
self.refresh_security_group_rules(security_group['id'])
|
||||
|
||||
instance_secgroup_filter_children.append('nova-secgroup-%s' %
|
||||
security_group['id'])
|
||||
|
||||
self._define_filter(
|
||||
self._filter_container(instance_secgroup_filter_name,
|
||||
instance_secgroup_filter_children))
|
||||
|
||||
network_filters = self.\
|
||||
_create_network_filters(instance, network_info,
|
||||
instance_secgroup_filter_name)
|
||||
|
||||
for (name, children) in network_filters:
|
||||
self._define_filters(name, children)
|
||||
|
||||
def _create_network_filters(self, instance, network_info,
|
||||
instance_secgroup_filter_name):
|
||||
if instance['image_id'] == str(FLAGS.vpn_image_id):
|
||||
base_filter = 'nova-vpn'
|
||||
else:
|
||||
base_filter = 'nova-base'
|
||||
|
||||
result = []
|
||||
for (_n, mapping) in network_info:
|
||||
nic_id = mapping['mac'].replace(':', '')
|
||||
instance_filter_name = self._instance_filter_name(instance, nic_id)
|
||||
instance_filter_children = [base_filter,
|
||||
instance_secgroup_filter_name]
|
||||
|
||||
if FLAGS.allow_project_net_traffic:
|
||||
instance_filter_children.append('nova-project')
|
||||
if FLAGS.use_ipv6:
|
||||
instance_filter_children.append('nova-project-v6')
|
||||
|
||||
result.append((instance_filter_name, instance_filter_children))
|
||||
|
||||
return result
|
||||
|
||||
def _define_filters(self, filter_name, filter_children):
|
||||
self._define_filter(self._filter_container(filter_name,
|
||||
filter_children))
|
||||
|
||||
def refresh_security_group_rules(self,
|
||||
security_group_id,
|
||||
network_info=None):
|
||||
return self._define_filter(
|
||||
self.security_group_to_nwfilter_xml(security_group_id))
|
||||
|
||||
def security_group_to_nwfilter_xml(self, security_group_id):
|
||||
security_group = db.security_group_get(context.get_admin_context(),
|
||||
security_group_id)
|
||||
rule_xml = ""
|
||||
v6protocol = {'tcp': 'tcp-ipv6', 'udp': 'udp-ipv6', 'icmp': 'icmpv6'}
|
||||
for rule in security_group.rules:
|
||||
rule_xml += "<rule action='accept' direction='in' priority='300'>"
|
||||
if rule.cidr:
|
||||
version = _get_ip_version(rule.cidr)
|
||||
if(FLAGS.use_ipv6 and version == 6):
|
||||
net, prefixlen = _get_net_and_prefixlen(rule.cidr)
|
||||
rule_xml += "<%s srcipaddr='%s' srcipmask='%s' " % \
|
||||
(v6protocol[rule.protocol], net, prefixlen)
|
||||
else:
|
||||
net, mask = _get_net_and_mask(rule.cidr)
|
||||
rule_xml += "<%s srcipaddr='%s' srcipmask='%s' " % \
|
||||
(rule.protocol, net, mask)
|
||||
if rule.protocol in ['tcp', 'udp']:
|
||||
rule_xml += "dstportstart='%s' dstportend='%s' " % \
|
||||
(rule.from_port, rule.to_port)
|
||||
elif rule.protocol == 'icmp':
|
||||
LOG.info('rule.protocol: %r, rule.from_port: %r, '
|
||||
'rule.to_port: %r', rule.protocol,
|
||||
rule.from_port, rule.to_port)
|
||||
if rule.from_port != -1:
|
||||
rule_xml += "type='%s' " % rule.from_port
|
||||
if rule.to_port != -1:
|
||||
rule_xml += "code='%s' " % rule.to_port
|
||||
|
||||
rule_xml += '/>\n'
|
||||
rule_xml += "</rule>\n"
|
||||
xml = "<filter name='nova-secgroup-%s' " % security_group_id
|
||||
if(FLAGS.use_ipv6):
|
||||
xml += "chain='root'>%s</filter>" % rule_xml
|
||||
else:
|
||||
xml += "chain='ipv4'>%s</filter>" % rule_xml
|
||||
return xml
|
||||
|
||||
def _instance_filter_name(self, instance, nic_id=None):
|
||||
if not nic_id:
|
||||
return 'nova-instance-%s' % (instance['name'])
|
||||
return 'nova-instance-%s-%s' % (instance['name'], nic_id)
|
||||
|
||||
def instance_filter_exists(self, instance):
|
||||
"""Check nova-instance-instance-xxx exists"""
|
||||
network_info = _get_network_info(instance)
|
||||
for (network, mapping) in network_info:
|
||||
nic_id = mapping['mac'].replace(':', '')
|
||||
instance_filter_name = self._instance_filter_name(instance, nic_id)
|
||||
try:
|
||||
self._conn.nwfilterLookupByName(instance_filter_name)
|
||||
except libvirt.libvirtError:
|
||||
name = instance.name
|
||||
LOG.debug(_('The nwfilter(%(instance_filter_name)s) for'
|
||||
'%(name)s is not found.') % locals())
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
class IptablesFirewallDriver(FirewallDriver):
|
||||
def __init__(self, execute=None, **kwargs):
|
||||
from nova.network import linux_net
|
||||
self.iptables = linux_net.iptables_manager
|
||||
self.instances = {}
|
||||
self.nwfilter = NWFilterFirewall(kwargs['get_connection'])
|
||||
|
||||
self.iptables.ipv4['filter'].add_chain('sg-fallback')
|
||||
self.iptables.ipv4['filter'].add_rule('sg-fallback', '-j DROP')
|
||||
self.iptables.ipv6['filter'].add_chain('sg-fallback')
|
||||
self.iptables.ipv6['filter'].add_rule('sg-fallback', '-j DROP')
|
||||
|
||||
def setup_basic_filtering(self, instance, network_info=None):
|
||||
"""Use NWFilter from libvirt for this."""
|
||||
if not network_info:
|
||||
network_info = _get_network_info(instance)
|
||||
return self.nwfilter.setup_basic_filtering(instance, network_info)
|
||||
|
||||
def apply_instance_filter(self, instance):
|
||||
"""No-op. Everything is done in prepare_instance_filter"""
|
||||
pass
|
||||
|
||||
def unfilter_instance(self, instance):
|
||||
if self.instances.pop(instance['id'], None):
|
||||
self.remove_filters_for_instance(instance)
|
||||
self.iptables.apply()
|
||||
else:
|
||||
LOG.info(_('Attempted to unfilter instance %s which is not '
|
||||
'filtered'), instance['id'])
|
||||
|
||||
def prepare_instance_filter(self, instance, network_info=None):
|
||||
if not network_info:
|
||||
network_info = _get_network_info(instance)
|
||||
self.instances[instance['id']] = instance
|
||||
self.add_filters_for_instance(instance, network_info)
|
||||
self.iptables.apply()
|
||||
|
||||
def _create_filter(self, ips, chain_name):
|
||||
return ['-d %s -j $%s' % (ip, chain_name) for ip in ips]
|
||||
|
||||
def _filters_for_instance(self, chain_name, network_info):
|
||||
ips_v4 = [ip['ip'] for (_n, mapping) in network_info
|
||||
for ip in mapping['ips']]
|
||||
ipv4_rules = self._create_filter(ips_v4, chain_name)
|
||||
|
||||
ipv6_rules = []
|
||||
if FLAGS.use_ipv6:
|
||||
ips_v6 = [ip['ip'] for (_n, mapping) in network_info
|
||||
for ip in mapping['ip6s']]
|
||||
ipv6_rules = self._create_filter(ips_v6, chain_name)
|
||||
|
||||
return ipv4_rules, ipv6_rules
|
||||
|
||||
def _add_filters(self, chain_name, ipv4_rules, ipv6_rules):
|
||||
for rule in ipv4_rules:
|
||||
self.iptables.ipv4['filter'].add_rule(chain_name, rule)
|
||||
|
||||
if FLAGS.use_ipv6:
|
||||
for rule in ipv6_rules:
|
||||
self.iptables.ipv6['filter'].add_rule(chain_name, rule)
|
||||
|
||||
def add_filters_for_instance(self, instance, network_info=None):
|
||||
chain_name = self._instance_chain_name(instance)
|
||||
if FLAGS.use_ipv6:
|
||||
self.iptables.ipv6['filter'].add_chain(chain_name)
|
||||
self.iptables.ipv4['filter'].add_chain(chain_name)
|
||||
ipv4_rules, ipv6_rules = self._filters_for_instance(chain_name,
|
||||
network_info)
|
||||
self._add_filters('local', ipv4_rules, ipv6_rules)
|
||||
ipv4_rules, ipv6_rules = self.instance_rules(instance, network_info)
|
||||
self._add_filters(chain_name, ipv4_rules, ipv6_rules)
|
||||
|
||||
def remove_filters_for_instance(self, instance):
|
||||
chain_name = self._instance_chain_name(instance)
|
||||
|
||||
self.iptables.ipv4['filter'].remove_chain(chain_name)
|
||||
if FLAGS.use_ipv6:
|
||||
self.iptables.ipv6['filter'].remove_chain(chain_name)
|
||||
|
||||
def instance_rules(self, instance, network_info=None):
|
||||
if not network_info:
|
||||
network_info = _get_network_info(instance)
|
||||
ctxt = context.get_admin_context()
|
||||
|
||||
ipv4_rules = []
|
||||
ipv6_rules = []
|
||||
|
||||
# Always drop invalid packets
|
||||
ipv4_rules += ['-m state --state ' 'INVALID -j DROP']
|
||||
ipv6_rules += ['-m state --state ' 'INVALID -j DROP']
|
||||
|
||||
# Allow established connections
|
||||
ipv4_rules += ['-m state --state ESTABLISHED,RELATED -j ACCEPT']
|
||||
ipv6_rules += ['-m state --state ESTABLISHED,RELATED -j ACCEPT']
|
||||
|
||||
dhcp_servers = [network['gateway'] for (network, _m) in network_info]
|
||||
|
||||
for dhcp_server in dhcp_servers:
|
||||
ipv4_rules.append('-s %s -p udp --sport 67 --dport 68 '
|
||||
'-j ACCEPT' % (dhcp_server,))
|
||||
|
||||
#Allow project network traffic
|
||||
if FLAGS.allow_project_net_traffic:
|
||||
cidrs = [network['cidr'] for (network, _m) in network_info]
|
||||
for cidr in cidrs:
|
||||
ipv4_rules.append('-s %s -j ACCEPT' % (cidr,))
|
||||
|
||||
# We wrap these in FLAGS.use_ipv6 because they might cause
|
||||
# a DB lookup. The other ones are just list operations, so
|
||||
# they're not worth the clutter.
|
||||
if FLAGS.use_ipv6:
|
||||
# Allow RA responses
|
||||
gateways_v6 = [network['gateway_v6'] for (network, _) in
|
||||
network_info]
|
||||
for gateway_v6 in gateways_v6:
|
||||
ipv6_rules.append(
|
||||
'-s %s/128 -p icmpv6 -j ACCEPT' % (gateway_v6,))
|
||||
|
||||
#Allow project network traffic
|
||||
if FLAGS.allow_project_net_traffic:
|
||||
cidrv6s = [network['cidr_v6'] for (network, _m)
|
||||
in network_info]
|
||||
|
||||
for cidrv6 in cidrv6s:
|
||||
ipv6_rules.append('-s %s -j ACCEPT' % (cidrv6,))
|
||||
|
||||
security_groups = db.security_group_get_by_instance(ctxt,
|
||||
instance['id'])
|
||||
|
||||
# then, security group chains and rules
|
||||
for security_group in security_groups:
|
||||
rules = db.security_group_rule_get_by_security_group(ctxt,
|
||||
security_group['id'])
|
||||
|
||||
for rule in rules:
|
||||
logging.info('%r', rule)
|
||||
|
||||
if not rule.cidr:
|
||||
# Eventually, a mechanism to grant access for security
|
||||
# groups will turn up here. It'll use ipsets.
|
||||
continue
|
||||
|
||||
version = _get_ip_version(rule.cidr)
|
||||
if version == 4:
|
||||
rules = ipv4_rules
|
||||
else:
|
||||
rules = ipv6_rules
|
||||
|
||||
protocol = rule.protocol
|
||||
if version == 6 and rule.protocol == 'icmp':
|
||||
protocol = 'icmpv6'
|
||||
|
||||
args = ['-p', protocol, '-s', rule.cidr]
|
||||
|
||||
if rule.protocol in ['udp', 'tcp']:
|
||||
if rule.from_port == rule.to_port:
|
||||
args += ['--dport', '%s' % (rule.from_port,)]
|
||||
else:
|
||||
args += ['-m', 'multiport',
|
||||
'--dports', '%s:%s' % (rule.from_port,
|
||||
rule.to_port)]
|
||||
elif rule.protocol == 'icmp':
|
||||
icmp_type = rule.from_port
|
||||
icmp_code = rule.to_port
|
||||
|
||||
if icmp_type == -1:
|
||||
icmp_type_arg = None
|
||||
else:
|
||||
icmp_type_arg = '%s' % icmp_type
|
||||
if not icmp_code == -1:
|
||||
icmp_type_arg += '/%s' % icmp_code
|
||||
|
||||
if icmp_type_arg:
|
||||
if version == 4:
|
||||
args += ['-m', 'icmp', '--icmp-type',
|
||||
icmp_type_arg]
|
||||
elif version == 6:
|
||||
args += ['-m', 'icmp6', '--icmpv6-type',
|
||||
icmp_type_arg]
|
||||
|
||||
args += ['-j ACCEPT']
|
||||
rules += [' '.join(args)]
|
||||
|
||||
ipv4_rules += ['-j $sg-fallback']
|
||||
ipv6_rules += ['-j $sg-fallback']
|
||||
|
||||
return ipv4_rules, ipv6_rules
|
||||
|
||||
def instance_filter_exists(self, instance):
|
||||
"""Check nova-instance-instance-xxx exists"""
|
||||
return self.nwfilter.instance_filter_exists(instance)
|
||||
|
||||
def refresh_security_group_members(self, security_group):
|
||||
pass
|
||||
|
||||
def refresh_security_group_rules(self, security_group, network_info=None):
|
||||
self.do_refresh_security_group_rules(security_group, network_info)
|
||||
self.iptables.apply()
|
||||
|
||||
@utils.synchronized('iptables', external=True)
|
||||
def do_refresh_security_group_rules(self,
|
||||
security_group,
|
||||
network_info=None):
|
||||
for instance in self.instances.values():
|
||||
self.remove_filters_for_instance(instance)
|
||||
if not network_info:
|
||||
network_info = _get_network_info(instance)
|
||||
self.add_filters_for_instance(instance, network_info)
|
||||
|
||||
def _security_group_chain_name(self, security_group_id):
|
||||
return 'nova-sg-%s' % (security_group_id,)
|
||||
|
||||
def _instance_chain_name(self, instance):
|
||||
return 'inst-%s' % (instance['id'],)
|
642
nova/virt/libvirt/firewall.py
Normal file
642
nova/virt/libvirt/firewall.py
Normal file
@ -0,0 +1,642 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# All Rights Reserved.
|
||||
# Copyright (c) 2010 Citrix Systems, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
|
||||
from eventlet import tpool
|
||||
|
||||
from nova import context
|
||||
from nova import db
|
||||
from nova import flags
|
||||
from nova import log as logging
|
||||
from nova import utils
|
||||
from nova.virt.libvirt import netutils
|
||||
|
||||
|
||||
LOG = logging.getLogger("nova.virt.libvirt.firewall")
|
||||
FLAGS = flags.FLAGS
|
||||
|
||||
|
||||
try:
|
||||
import libvirt
|
||||
except ImportError:
|
||||
LOG.warn(_("Libvirt module could not be loaded. NWFilterFirewall will "
|
||||
"not work correctly."))
|
||||
|
||||
|
||||
class FirewallDriver(object):
|
||||
def prepare_instance_filter(self, instance, network_info=None):
|
||||
"""Prepare filters for the instance.
|
||||
|
||||
At this point, the instance isn't running yet."""
|
||||
raise NotImplementedError()
|
||||
|
||||
def unfilter_instance(self, instance):
|
||||
"""Stop filtering instance"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def apply_instance_filter(self, instance):
|
||||
"""Apply instance filter.
|
||||
|
||||
Once this method returns, the instance should be firewalled
|
||||
appropriately. This method should as far as possible be a
|
||||
no-op. It's vastly preferred to get everything set up in
|
||||
prepare_instance_filter.
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def refresh_security_group_rules(self,
|
||||
security_group_id,
|
||||
network_info=None):
|
||||
"""Refresh security group rules from data store
|
||||
|
||||
Gets called when a rule has been added to or removed from
|
||||
the security group."""
|
||||
raise NotImplementedError()
|
||||
|
||||
def refresh_security_group_members(self, security_group_id):
|
||||
"""Refresh security group members from data store
|
||||
|
||||
Gets called when an instance gets added to or removed from
|
||||
the security group."""
|
||||
raise NotImplementedError()
|
||||
|
||||
def setup_basic_filtering(self, instance, network_info=None):
|
||||
"""Create rules to block spoofing and allow dhcp.
|
||||
|
||||
This gets called when spawning an instance, before
|
||||
:method:`prepare_instance_filter`.
|
||||
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def instance_filter_exists(self, instance):
|
||||
"""Check nova-instance-instance-xxx exists"""
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
class NWFilterFirewall(FirewallDriver):
|
||||
"""
|
||||
This class implements a network filtering mechanism versatile
|
||||
enough for EC2 style Security Group filtering by leveraging
|
||||
libvirt's nwfilter.
|
||||
|
||||
First, all instances get a filter ("nova-base-filter") applied.
|
||||
This filter provides some basic security such as protection against
|
||||
MAC spoofing, IP spoofing, and ARP spoofing.
|
||||
|
||||
This filter drops all incoming ipv4 and ipv6 connections.
|
||||
Outgoing connections are never blocked.
|
||||
|
||||
Second, every security group maps to a nwfilter filter(*).
|
||||
NWFilters can be updated at runtime and changes are applied
|
||||
immediately, so changes to security groups can be applied at
|
||||
runtime (as mandated by the spec).
|
||||
|
||||
Security group rules are named "nova-secgroup-<id>" where <id>
|
||||
is the internal id of the security group. They're applied only on
|
||||
hosts that have instances in the security group in question.
|
||||
|
||||
Updates to security groups are done by updating the data model
|
||||
(in response to API calls) followed by a request sent to all
|
||||
the nodes with instances in the security group to refresh the
|
||||
security group.
|
||||
|
||||
Each instance has its own NWFilter, which references the above
|
||||
mentioned security group NWFilters. This was done because
|
||||
interfaces can only reference one filter while filters can
|
||||
reference multiple other filters. This has the added benefit of
|
||||
actually being able to add and remove security groups from an
|
||||
instance at run time. This functionality is not exposed anywhere,
|
||||
though.
|
||||
|
||||
Outstanding questions:
|
||||
|
||||
The name is unique, so would there be any good reason to sync
|
||||
the uuid across the nodes (by assigning it from the datamodel)?
|
||||
|
||||
|
||||
(*) This sentence brought to you by the redundancy department of
|
||||
redundancy.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, get_connection, **kwargs):
|
||||
self._libvirt_get_connection = get_connection
|
||||
self.static_filters_configured = False
|
||||
self.handle_security_groups = False
|
||||
|
||||
def apply_instance_filter(self, instance):
|
||||
"""No-op. Everything is done in prepare_instance_filter"""
|
||||
pass
|
||||
|
||||
def _get_connection(self):
|
||||
return self._libvirt_get_connection()
|
||||
_conn = property(_get_connection)
|
||||
|
||||
def nova_dhcp_filter(self):
|
||||
"""The standard allow-dhcp-server filter is an <ip> one, so it uses
|
||||
ebtables to allow traffic through. Without a corresponding rule in
|
||||
iptables, it'll get blocked anyway."""
|
||||
|
||||
return '''<filter name='nova-allow-dhcp-server' chain='ipv4'>
|
||||
<uuid>891e4787-e5c0-d59b-cbd6-41bc3c6b36fc</uuid>
|
||||
<rule action='accept' direction='out'
|
||||
priority='100'>
|
||||
<udp srcipaddr='0.0.0.0'
|
||||
dstipaddr='255.255.255.255'
|
||||
srcportstart='68'
|
||||
dstportstart='67'/>
|
||||
</rule>
|
||||
<rule action='accept' direction='in'
|
||||
priority='100'>
|
||||
<udp srcipaddr='$DHCPSERVER'
|
||||
srcportstart='67'
|
||||
dstportstart='68'/>
|
||||
</rule>
|
||||
</filter>'''
|
||||
|
||||
def nova_ra_filter(self):
|
||||
return '''<filter name='nova-allow-ra-server' chain='root'>
|
||||
<uuid>d707fa71-4fb5-4b27-9ab7-ba5ca19c8804</uuid>
|
||||
<rule action='accept' direction='inout'
|
||||
priority='100'>
|
||||
<icmpv6 srcipaddr='$RASERVER'/>
|
||||
</rule>
|
||||
</filter>'''
|
||||
|
||||
def setup_basic_filtering(self, instance, network_info=None):
|
||||
"""Set up basic filtering (MAC, IP, and ARP spoofing protection)"""
|
||||
logging.info('called setup_basic_filtering in nwfilter')
|
||||
|
||||
if not network_info:
|
||||
network_info = netutils.get_network_info(instance)
|
||||
|
||||
if self.handle_security_groups:
|
||||
# No point in setting up a filter set that we'll be overriding
|
||||
# anyway.
|
||||
return
|
||||
|
||||
logging.info('ensuring static filters')
|
||||
self._ensure_static_filters()
|
||||
|
||||
if instance['image_id'] == str(FLAGS.vpn_image_id):
|
||||
base_filter = 'nova-vpn'
|
||||
else:
|
||||
base_filter = 'nova-base'
|
||||
|
||||
for (network, mapping) in network_info:
|
||||
nic_id = mapping['mac'].replace(':', '')
|
||||
instance_filter_name = self._instance_filter_name(instance, nic_id)
|
||||
self._define_filter(self._filter_container(instance_filter_name,
|
||||
[base_filter]))
|
||||
|
||||
def _ensure_static_filters(self):
|
||||
if self.static_filters_configured:
|
||||
return
|
||||
|
||||
self._define_filter(self._filter_container('nova-base',
|
||||
['no-mac-spoofing',
|
||||
'no-ip-spoofing',
|
||||
'no-arp-spoofing',
|
||||
'allow-dhcp-server']))
|
||||
self._define_filter(self._filter_container('nova-vpn',
|
||||
['allow-dhcp-server']))
|
||||
self._define_filter(self.nova_base_ipv4_filter)
|
||||
self._define_filter(self.nova_base_ipv6_filter)
|
||||
self._define_filter(self.nova_dhcp_filter)
|
||||
self._define_filter(self.nova_ra_filter)
|
||||
if FLAGS.allow_project_net_traffic:
|
||||
self._define_filter(self.nova_project_filter)
|
||||
if FLAGS.use_ipv6:
|
||||
self._define_filter(self.nova_project_filter_v6)
|
||||
|
||||
self.static_filters_configured = True
|
||||
|
||||
def _filter_container(self, name, filters):
|
||||
xml = '''<filter name='%s' chain='root'>%s</filter>''' % (
|
||||
name,
|
||||
''.join(["<filterref filter='%s'/>" % (f,) for f in filters]))
|
||||
return xml
|
||||
|
||||
def nova_base_ipv4_filter(self):
|
||||
retval = "<filter name='nova-base-ipv4' chain='ipv4'>"
|
||||
for protocol in ['tcp', 'udp', 'icmp']:
|
||||
for direction, action, priority in [('out', 'accept', 399),
|
||||
('in', 'drop', 400)]:
|
||||
retval += """<rule action='%s' direction='%s' priority='%d'>
|
||||
<%s />
|
||||
</rule>""" % (action, direction,
|
||||
priority, protocol)
|
||||
retval += '</filter>'
|
||||
return retval
|
||||
|
||||
def nova_base_ipv6_filter(self):
|
||||
retval = "<filter name='nova-base-ipv6' chain='ipv6'>"
|
||||
for protocol in ['tcp-ipv6', 'udp-ipv6', 'icmpv6']:
|
||||
for direction, action, priority in [('out', 'accept', 399),
|
||||
('in', 'drop', 400)]:
|
||||
retval += """<rule action='%s' direction='%s' priority='%d'>
|
||||
<%s />
|
||||
</rule>""" % (action, direction,
|
||||
priority, protocol)
|
||||
retval += '</filter>'
|
||||
return retval
|
||||
|
||||
def nova_project_filter(self):
|
||||
retval = "<filter name='nova-project' chain='ipv4'>"
|
||||
for protocol in ['tcp', 'udp', 'icmp']:
|
||||
retval += """<rule action='accept' direction='in' priority='200'>
|
||||
<%s srcipaddr='$PROJNET' srcipmask='$PROJMASK' />
|
||||
</rule>""" % protocol
|
||||
retval += '</filter>'
|
||||
return retval
|
||||
|
||||
def nova_project_filter_v6(self):
|
||||
retval = "<filter name='nova-project-v6' chain='ipv6'>"
|
||||
for protocol in ['tcp-ipv6', 'udp-ipv6', 'icmpv6']:
|
||||
retval += """<rule action='accept' direction='inout'
|
||||
priority='200'>
|
||||
<%s srcipaddr='$PROJNETV6'
|
||||
srcipmask='$PROJMASKV6' />
|
||||
</rule>""" % (protocol)
|
||||
retval += '</filter>'
|
||||
return retval
|
||||
|
||||
def _define_filter(self, xml):
|
||||
if callable(xml):
|
||||
xml = xml()
|
||||
# execute in a native thread and block current greenthread until done
|
||||
tpool.execute(self._conn.nwfilterDefineXML, xml)
|
||||
|
||||
def unfilter_instance(self, instance):
|
||||
# Nothing to do
|
||||
pass
|
||||
|
||||
def prepare_instance_filter(self, instance, network_info=None):
|
||||
"""
|
||||
Creates an NWFilter for the given instance. In the process,
|
||||
it makes sure the filters for the security groups as well as
|
||||
the base filter are all in place.
|
||||
"""
|
||||
if not network_info:
|
||||
network_info = netutils.get_network_info(instance)
|
||||
|
||||
ctxt = context.get_admin_context()
|
||||
|
||||
instance_secgroup_filter_name = \
|
||||
'%s-secgroup' % (self._instance_filter_name(instance))
|
||||
#% (instance_filter_name,)
|
||||
|
||||
instance_secgroup_filter_children = ['nova-base-ipv4',
|
||||
'nova-base-ipv6',
|
||||
'nova-allow-dhcp-server']
|
||||
|
||||
if FLAGS.use_ipv6:
|
||||
networks = [network for (network, _m) in network_info if
|
||||
network['gateway_v6']]
|
||||
|
||||
if networks:
|
||||
instance_secgroup_filter_children.\
|
||||
append('nova-allow-ra-server')
|
||||
|
||||
for security_group in \
|
||||
db.security_group_get_by_instance(ctxt, instance['id']):
|
||||
|
||||
self.refresh_security_group_rules(security_group['id'])
|
||||
|
||||
instance_secgroup_filter_children.append('nova-secgroup-%s' %
|
||||
security_group['id'])
|
||||
|
||||
self._define_filter(
|
||||
self._filter_container(instance_secgroup_filter_name,
|
||||
instance_secgroup_filter_children))
|
||||
|
||||
network_filters = self.\
|
||||
_create_network_filters(instance, network_info,
|
||||
instance_secgroup_filter_name)
|
||||
|
||||
for (name, children) in network_filters:
|
||||
self._define_filters(name, children)
|
||||
|
||||
def _create_network_filters(self, instance, network_info,
|
||||
instance_secgroup_filter_name):
|
||||
if instance['image_id'] == str(FLAGS.vpn_image_id):
|
||||
base_filter = 'nova-vpn'
|
||||
else:
|
||||
base_filter = 'nova-base'
|
||||
|
||||
result = []
|
||||
for (_n, mapping) in network_info:
|
||||
nic_id = mapping['mac'].replace(':', '')
|
||||
instance_filter_name = self._instance_filter_name(instance, nic_id)
|
||||
instance_filter_children = [base_filter,
|
||||
instance_secgroup_filter_name]
|
||||
|
||||
if FLAGS.allow_project_net_traffic:
|
||||
instance_filter_children.append('nova-project')
|
||||
if FLAGS.use_ipv6:
|
||||
instance_filter_children.append('nova-project-v6')
|
||||
|
||||
result.append((instance_filter_name, instance_filter_children))
|
||||
|
||||
return result
|
||||
|
||||
def _define_filters(self, filter_name, filter_children):
|
||||
self._define_filter(self._filter_container(filter_name,
|
||||
filter_children))
|
||||
|
||||
def refresh_security_group_rules(self,
|
||||
security_group_id,
|
||||
network_info=None):
|
||||
return self._define_filter(
|
||||
self.security_group_to_nwfilter_xml(security_group_id))
|
||||
|
||||
def security_group_to_nwfilter_xml(self, security_group_id):
|
||||
security_group = db.security_group_get(context.get_admin_context(),
|
||||
security_group_id)
|
||||
rule_xml = ""
|
||||
v6protocol = {'tcp': 'tcp-ipv6', 'udp': 'udp-ipv6', 'icmp': 'icmpv6'}
|
||||
for rule in security_group.rules:
|
||||
rule_xml += "<rule action='accept' direction='in' priority='300'>"
|
||||
if rule.cidr:
|
||||
version = netutils.get_ip_version(rule.cidr)
|
||||
if(FLAGS.use_ipv6 and version == 6):
|
||||
net, prefixlen = netutils.get_net_and_prefixlen(rule.cidr)
|
||||
rule_xml += "<%s srcipaddr='%s' srcipmask='%s' " % \
|
||||
(v6protocol[rule.protocol], net, prefixlen)
|
||||
else:
|
||||
net, mask = netutils.get_net_and_mask(rule.cidr)
|
||||
rule_xml += "<%s srcipaddr='%s' srcipmask='%s' " % \
|
||||
(rule.protocol, net, mask)
|
||||
if rule.protocol in ['tcp', 'udp']:
|
||||
rule_xml += "dstportstart='%s' dstportend='%s' " % \
|
||||
(rule.from_port, rule.to_port)
|
||||
elif rule.protocol == 'icmp':
|
||||
LOG.info('rule.protocol: %r, rule.from_port: %r, '
|
||||
'rule.to_port: %r', rule.protocol,
|
||||
rule.from_port, rule.to_port)
|
||||
if rule.from_port != -1:
|
||||
rule_xml += "type='%s' " % rule.from_port
|
||||
if rule.to_port != -1:
|
||||
rule_xml += "code='%s' " % rule.to_port
|
||||
|
||||
rule_xml += '/>\n'
|
||||
rule_xml += "</rule>\n"
|
||||
xml = "<filter name='nova-secgroup-%s' " % security_group_id
|
||||
if(FLAGS.use_ipv6):
|
||||
xml += "chain='root'>%s</filter>" % rule_xml
|
||||
else:
|
||||
xml += "chain='ipv4'>%s</filter>" % rule_xml
|
||||
return xml
|
||||
|
||||
def _instance_filter_name(self, instance, nic_id=None):
|
||||
if not nic_id:
|
||||
return 'nova-instance-%s' % (instance['name'])
|
||||
return 'nova-instance-%s-%s' % (instance['name'], nic_id)
|
||||
|
||||
def instance_filter_exists(self, instance):
|
||||
"""Check nova-instance-instance-xxx exists"""
|
||||
network_info = netutils.get_network_info(instance)
|
||||
for (network, mapping) in network_info:
|
||||
nic_id = mapping['mac'].replace(':', '')
|
||||
instance_filter_name = self._instance_filter_name(instance, nic_id)
|
||||
try:
|
||||
self._conn.nwfilterLookupByName(instance_filter_name)
|
||||
except libvirt.libvirtError:
|
||||
name = instance.name
|
||||
LOG.debug(_('The nwfilter(%(instance_filter_name)s) for'
|
||||
'%(name)s is not found.') % locals())
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
class IptablesFirewallDriver(FirewallDriver):
|
||||
def __init__(self, execute=None, **kwargs):
|
||||
from nova.network import linux_net
|
||||
self.iptables = linux_net.iptables_manager
|
||||
self.instances = {}
|
||||
self.nwfilter = NWFilterFirewall(kwargs['get_connection'])
|
||||
|
||||
self.iptables.ipv4['filter'].add_chain('sg-fallback')
|
||||
self.iptables.ipv4['filter'].add_rule('sg-fallback', '-j DROP')
|
||||
self.iptables.ipv6['filter'].add_chain('sg-fallback')
|
||||
self.iptables.ipv6['filter'].add_rule('sg-fallback', '-j DROP')
|
||||
|
||||
def setup_basic_filtering(self, instance, network_info=None):
|
||||
"""Use NWFilter from libvirt for this."""
|
||||
if not network_info:
|
||||
network_info = netutils.get_network_info(instance)
|
||||
return self.nwfilter.setup_basic_filtering(instance, network_info)
|
||||
|
||||
def apply_instance_filter(self, instance):
|
||||
"""No-op. Everything is done in prepare_instance_filter"""
|
||||
pass
|
||||
|
||||
def unfilter_instance(self, instance):
|
||||
if self.instances.pop(instance['id'], None):
|
||||
self.remove_filters_for_instance(instance)
|
||||
self.iptables.apply()
|
||||
else:
|
||||
LOG.info(_('Attempted to unfilter instance %s which is not '
|
||||
'filtered'), instance['id'])
|
||||
|
||||
def prepare_instance_filter(self, instance, network_info=None):
|
||||
if not network_info:
|
||||
network_info = netutils.get_network_info(instance)
|
||||
self.instances[instance['id']] = instance
|
||||
self.add_filters_for_instance(instance, network_info)
|
||||
self.iptables.apply()
|
||||
|
||||
def _create_filter(self, ips, chain_name):
|
||||
return ['-d %s -j $%s' % (ip, chain_name) for ip in ips]
|
||||
|
||||
def _filters_for_instance(self, chain_name, network_info):
|
||||
ips_v4 = [ip['ip'] for (_n, mapping) in network_info
|
||||
for ip in mapping['ips']]
|
||||
ipv4_rules = self._create_filter(ips_v4, chain_name)
|
||||
|
||||
ipv6_rules = []
|
||||
if FLAGS.use_ipv6:
|
||||
ips_v6 = [ip['ip'] for (_n, mapping) in network_info
|
||||
for ip in mapping['ip6s']]
|
||||
ipv6_rules = self._create_filter(ips_v6, chain_name)
|
||||
|
||||
return ipv4_rules, ipv6_rules
|
||||
|
||||
def _add_filters(self, chain_name, ipv4_rules, ipv6_rules):
|
||||
for rule in ipv4_rules:
|
||||
self.iptables.ipv4['filter'].add_rule(chain_name, rule)
|
||||
|
||||
if FLAGS.use_ipv6:
|
||||
for rule in ipv6_rules:
|
||||
self.iptables.ipv6['filter'].add_rule(chain_name, rule)
|
||||
|
||||
def add_filters_for_instance(self, instance, network_info=None):
|
||||
chain_name = self._instance_chain_name(instance)
|
||||
if FLAGS.use_ipv6:
|
||||
self.iptables.ipv6['filter'].add_chain(chain_name)
|
||||
self.iptables.ipv4['filter'].add_chain(chain_name)
|
||||
ipv4_rules, ipv6_rules = self._filters_for_instance(chain_name,
|
||||
network_info)
|
||||
self._add_filters('local', ipv4_rules, ipv6_rules)
|
||||
ipv4_rules, ipv6_rules = self.instance_rules(instance, network_info)
|
||||
self._add_filters(chain_name, ipv4_rules, ipv6_rules)
|
||||
|
||||
def remove_filters_for_instance(self, instance):
|
||||
chain_name = self._instance_chain_name(instance)
|
||||
|
||||
self.iptables.ipv4['filter'].remove_chain(chain_name)
|
||||
if FLAGS.use_ipv6:
|
||||
self.iptables.ipv6['filter'].remove_chain(chain_name)
|
||||
|
||||
def instance_rules(self, instance, network_info=None):
|
||||
if not network_info:
|
||||
network_info = netutils.get_network_info(instance)
|
||||
ctxt = context.get_admin_context()
|
||||
|
||||
ipv4_rules = []
|
||||
ipv6_rules = []
|
||||
|
||||
# Always drop invalid packets
|
||||
ipv4_rules += ['-m state --state ' 'INVALID -j DROP']
|
||||
ipv6_rules += ['-m state --state ' 'INVALID -j DROP']
|
||||
|
||||
# Allow established connections
|
||||
ipv4_rules += ['-m state --state ESTABLISHED,RELATED -j ACCEPT']
|
||||
ipv6_rules += ['-m state --state ESTABLISHED,RELATED -j ACCEPT']
|
||||
|
||||
dhcp_servers = [network['gateway'] for (network, _m) in network_info]
|
||||
|
||||
for dhcp_server in dhcp_servers:
|
||||
ipv4_rules.append('-s %s -p udp --sport 67 --dport 68 '
|
||||
'-j ACCEPT' % (dhcp_server,))
|
||||
|
||||
#Allow project network traffic
|
||||
if FLAGS.allow_project_net_traffic:
|
||||
cidrs = [network['cidr'] for (network, _m) in network_info]
|
||||
for cidr in cidrs:
|
||||
ipv4_rules.append('-s %s -j ACCEPT' % (cidr,))
|
||||
|
||||
# We wrap these in FLAGS.use_ipv6 because they might cause
|
||||
# a DB lookup. The other ones are just list operations, so
|
||||
# they're not worth the clutter.
|
||||
if FLAGS.use_ipv6:
|
||||
# Allow RA responses
|
||||
gateways_v6 = [network['gateway_v6'] for (network, _) in
|
||||
network_info]
|
||||
for gateway_v6 in gateways_v6:
|
||||
ipv6_rules.append(
|
||||
'-s %s/128 -p icmpv6 -j ACCEPT' % (gateway_v6,))
|
||||
|
||||
#Allow project network traffic
|
||||
if FLAGS.allow_project_net_traffic:
|
||||
cidrv6s = [network['cidr_v6'] for (network, _m)
|
||||
in network_info]
|
||||
|
||||
for cidrv6 in cidrv6s:
|
||||
ipv6_rules.append('-s %s -j ACCEPT' % (cidrv6,))
|
||||
|
||||
security_groups = db.security_group_get_by_instance(ctxt,
|
||||
instance['id'])
|
||||
|
||||
# then, security group chains and rules
|
||||
for security_group in security_groups:
|
||||
rules = db.security_group_rule_get_by_security_group(ctxt,
|
||||
security_group['id'])
|
||||
|
||||
for rule in rules:
|
||||
logging.info('%r', rule)
|
||||
|
||||
if not rule.cidr:
|
||||
# Eventually, a mechanism to grant access for security
|
||||
# groups will turn up here. It'll use ipsets.
|
||||
continue
|
||||
|
||||
version = netutils.get_ip_version(rule.cidr)
|
||||
if version == 4:
|
||||
rules = ipv4_rules
|
||||
else:
|
||||
rules = ipv6_rules
|
||||
|
||||
protocol = rule.protocol
|
||||
if version == 6 and rule.protocol == 'icmp':
|
||||
protocol = 'icmpv6'
|
||||
|
||||
args = ['-p', protocol, '-s', rule.cidr]
|
||||
|
||||
if rule.protocol in ['udp', 'tcp']:
|
||||
if rule.from_port == rule.to_port:
|
||||
args += ['--dport', '%s' % (rule.from_port,)]
|
||||
else:
|
||||
args += ['-m', 'multiport',
|
||||
'--dports', '%s:%s' % (rule.from_port,
|
||||
rule.to_port)]
|
||||
elif rule.protocol == 'icmp':
|
||||
icmp_type = rule.from_port
|
||||
icmp_code = rule.to_port
|
||||
|
||||
if icmp_type == -1:
|
||||
icmp_type_arg = None
|
||||
else:
|
||||
icmp_type_arg = '%s' % icmp_type
|
||||
if not icmp_code == -1:
|
||||
icmp_type_arg += '/%s' % icmp_code
|
||||
|
||||
if icmp_type_arg:
|
||||
if version == 4:
|
||||
args += ['-m', 'icmp', '--icmp-type',
|
||||
icmp_type_arg]
|
||||
elif version == 6:
|
||||
args += ['-m', 'icmp6', '--icmpv6-type',
|
||||
icmp_type_arg]
|
||||
|
||||
args += ['-j ACCEPT']
|
||||
rules += [' '.join(args)]
|
||||
|
||||
ipv4_rules += ['-j $sg-fallback']
|
||||
ipv6_rules += ['-j $sg-fallback']
|
||||
|
||||
return ipv4_rules, ipv6_rules
|
||||
|
||||
def instance_filter_exists(self, instance):
|
||||
"""Check nova-instance-instance-xxx exists"""
|
||||
return self.nwfilter.instance_filter_exists(instance)
|
||||
|
||||
def refresh_security_group_members(self, security_group):
|
||||
pass
|
||||
|
||||
def refresh_security_group_rules(self, security_group, network_info=None):
|
||||
self.do_refresh_security_group_rules(security_group, network_info)
|
||||
self.iptables.apply()
|
||||
|
||||
@utils.synchronized('iptables', external=True)
|
||||
def do_refresh_security_group_rules(self,
|
||||
security_group,
|
||||
network_info=None):
|
||||
for instance in self.instances.values():
|
||||
self.remove_filters_for_instance(instance)
|
||||
if not network_info:
|
||||
network_info = netutils.get_network_info(instance)
|
||||
self.add_filters_for_instance(instance, network_info)
|
||||
|
||||
def _security_group_chain_name(self, security_group_id):
|
||||
return 'nova-sg-%s' % (security_group_id,)
|
||||
|
||||
def _instance_chain_name(self, instance):
|
||||
return 'inst-%s' % (instance['id'],)
|
97
nova/virt/libvirt/netutils.py
Normal file
97
nova/virt/libvirt/netutils.py
Normal file
@ -0,0 +1,97 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# All Rights Reserved.
|
||||
# Copyright (c) 2010 Citrix Systems, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
|
||||
"""Network-releated utilities for supporting libvirt connection code."""
|
||||
|
||||
|
||||
import IPy
|
||||
|
||||
from nova import context
|
||||
from nova import db
|
||||
from nova import flags
|
||||
from nova import ipv6
|
||||
from nova import utils
|
||||
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
|
||||
|
||||
def get_net_and_mask(cidr):
|
||||
net = IPy.IP(cidr)
|
||||
return str(net.net()), str(net.netmask())
|
||||
|
||||
|
||||
def get_net_and_prefixlen(cidr):
|
||||
net = IPy.IP(cidr)
|
||||
return str(net.net()), str(net.prefixlen())
|
||||
|
||||
|
||||
def get_ip_version(cidr):
|
||||
net = IPy.IP(cidr)
|
||||
return int(net.version())
|
||||
|
||||
|
||||
def get_network_info(instance):
|
||||
# TODO(adiantum) If we will keep this function
|
||||
# we should cache network_info
|
||||
admin_context = context.get_admin_context()
|
||||
|
||||
ip_addresses = db.fixed_ip_get_all_by_instance(admin_context,
|
||||
instance['id'])
|
||||
networks = db.network_get_all_by_instance(admin_context,
|
||||
instance['id'])
|
||||
flavor = db.instance_type_get_by_id(admin_context,
|
||||
instance['instance_type_id'])
|
||||
network_info = []
|
||||
|
||||
for network in networks:
|
||||
network_ips = [ip for ip in ip_addresses
|
||||
if ip['network_id'] == network['id']]
|
||||
|
||||
def ip_dict(ip):
|
||||
return {
|
||||
'ip': ip['address'],
|
||||
'netmask': network['netmask'],
|
||||
'enabled': '1'}
|
||||
|
||||
def ip6_dict():
|
||||
prefix = network['cidr_v6']
|
||||
mac = instance['mac_address']
|
||||
project_id = instance['project_id']
|
||||
return {
|
||||
'ip': ipv6.to_global(prefix, mac, project_id),
|
||||
'netmask': network['netmask_v6'],
|
||||
'enabled': '1'}
|
||||
|
||||
mapping = {
|
||||
'label': network['label'],
|
||||
'gateway': network['gateway'],
|
||||
'broadcast': network['broadcast'],
|
||||
'mac': instance['mac_address'],
|
||||
'rxtx_cap': flavor['rxtx_cap'],
|
||||
'dns': [network['dns']],
|
||||
'ips': [ip_dict(ip) for ip in network_ips]}
|
||||
|
||||
if FLAGS.use_ipv6:
|
||||
mapping['ip6s'] = [ip6_dict()]
|
||||
mapping['gateway6'] = network['gateway_v6']
|
||||
|
||||
network_info.append((network, mapping))
|
||||
return network_info
|
@ -202,6 +202,13 @@ class VMOps(object):
|
||||
for path, contents in instance.injected_files:
|
||||
LOG.debug(_("Injecting file path: '%s'") % path)
|
||||
self.inject_file(instance, path, contents)
|
||||
|
||||
def _set_admin_password():
|
||||
admin_password = instance.admin_pass
|
||||
if admin_password:
|
||||
LOG.debug(_("Setting admin password"))
|
||||
self.set_admin_password(instance, admin_password)
|
||||
|
||||
# NOTE(armando): Do we really need to do this in virt?
|
||||
# NOTE(tr3buchet): not sure but wherever we do it, we need to call
|
||||
# reset_network afterwards
|
||||
@ -214,6 +221,7 @@ class VMOps(object):
|
||||
LOG.debug(_('Instance %s: booted'), instance_name)
|
||||
timer.stop()
|
||||
_inject_files()
|
||||
_set_admin_password()
|
||||
return True
|
||||
except Exception, exc:
|
||||
LOG.warn(exc)
|
||||
@ -253,7 +261,8 @@ class VMOps(object):
|
||||
instance_name = instance_or_vm.name
|
||||
vm_ref = VMHelper.lookup(self._session, instance_name)
|
||||
if vm_ref is None:
|
||||
raise exception.InstanceNotFound(instance_id=instance_obj.id)
|
||||
raise exception.NotFound(_("No opaque_ref could be determined "
|
||||
"for '%s'.") % instance_or_vm)
|
||||
return vm_ref
|
||||
|
||||
def _acquire_bootlock(self, vm):
|
||||
@ -457,6 +466,9 @@ class VMOps(object):
|
||||
# Successful return code from password is '0'
|
||||
if resp_dict['returncode'] != '0':
|
||||
raise RuntimeError(resp_dict['message'])
|
||||
db.instance_update(context.get_admin_context(),
|
||||
instance['id'],
|
||||
dict(admin_pass=new_pass))
|
||||
return resp_dict['message']
|
||||
|
||||
def inject_file(self, instance, path, contents):
|
||||
@ -1171,13 +1183,13 @@ class SimpleDH(object):
|
||||
shared = self._shared
|
||||
cmd = base_cmd % locals()
|
||||
proc = _runproc(cmd)
|
||||
proc.stdin.write(text)
|
||||
proc.stdin.write(text + '\n')
|
||||
proc.stdin.close()
|
||||
proc.wait()
|
||||
err = proc.stderr.read()
|
||||
if err:
|
||||
raise RuntimeError(_('OpenSSL error: %s') % err)
|
||||
return proc.stdout.read()
|
||||
return proc.stdout.read().strip('\n')
|
||||
|
||||
def encrypt(self, text):
|
||||
return self._run_ssl(text, 'enc')
|
||||
|
@ -204,14 +204,17 @@ def _get_volume_id(path_or_id):
|
||||
if isinstance(path_or_id, int):
|
||||
return path_or_id
|
||||
# n must contain at least the volume_id
|
||||
# /vol- is for remote volumes
|
||||
# -vol- is for local volumes
|
||||
# :volume- is for remote volumes
|
||||
# -volume- is for local volumes
|
||||
# see compute/manager->setup_compute_volume
|
||||
volume_id = path_or_id[path_or_id.find('/vol-') + 1:]
|
||||
volume_id = path_or_id[path_or_id.find(':volume-') + 1:]
|
||||
if volume_id == path_or_id:
|
||||
volume_id = path_or_id[path_or_id.find('-vol-') + 1:]
|
||||
volume_id = volume_id.replace('--', '-')
|
||||
return volume_id
|
||||
volume_id = path_or_id[path_or_id.find('-volume--') + 1:]
|
||||
volume_id = volume_id.replace('volume--', '')
|
||||
else:
|
||||
volume_id = volume_id.replace('volume-', '')
|
||||
volume_id = volume_id[0:volume_id.find('-')]
|
||||
return int(volume_id)
|
||||
|
||||
|
||||
def _get_target_host(iscsi_string):
|
||||
@ -244,25 +247,23 @@ def _get_target(volume_id):
|
||||
Gets iscsi name and portal from volume name and host.
|
||||
For this method to work the following are needed:
|
||||
1) volume_ref['host'] must resolve to something rather than loopback
|
||||
2) ietd must bind only to the address as resolved above
|
||||
If any of the two conditions are not met, fall back on Flags.
|
||||
"""
|
||||
volume_ref = db.volume_get_by_ec2_id(context.get_admin_context(),
|
||||
volume_id)
|
||||
volume_ref = db.volume_get(context.get_admin_context(),
|
||||
volume_id)
|
||||
result = (None, None)
|
||||
try:
|
||||
(r, _e) = utils.execute("sudo iscsiadm -m discovery -t "
|
||||
"sendtargets -p %s" %
|
||||
volume_ref['host'])
|
||||
(r, _e) = utils.execute('sudo', 'iscsiadm',
|
||||
'-m', 'discovery',
|
||||
'-t', 'sendtargets',
|
||||
'-p', volume_ref['host'])
|
||||
except exception.ProcessExecutionError, exc:
|
||||
LOG.exception(exc)
|
||||
else:
|
||||
targets = r.splitlines()
|
||||
if len(_e) == 0 and len(targets) == 1:
|
||||
for target in targets:
|
||||
if volume_id in target:
|
||||
(location, _sep, iscsi_name) = target.partition(" ")
|
||||
break
|
||||
iscsi_portal = location.split(",")[0]
|
||||
result = (iscsi_name, iscsi_portal)
|
||||
volume_name = "volume-%08x" % volume_id
|
||||
for target in r.splitlines():
|
||||
if FLAGS.iscsi_ip_prefix in target and volume_name in target:
|
||||
(location, _sep, iscsi_name) = target.partition(" ")
|
||||
break
|
||||
iscsi_portal = location.split(",")[0]
|
||||
result = (iscsi_name, iscsi_portal)
|
||||
return result
|
||||
|
Loading…
x
Reference in New Issue
Block a user