Merged trunk

This commit is contained in:
Ryu Ishimoto
2011-08-17 17:58:14 +09:00
10 changed files with 181 additions and 109 deletions

View File

@@ -48,7 +48,6 @@ flags.DECLARE('auth_driver', 'nova.auth.manager')
flags.DECLARE('network_size', 'nova.network.manager') flags.DECLARE('network_size', 'nova.network.manager')
flags.DECLARE('num_networks', 'nova.network.manager') flags.DECLARE('num_networks', 'nova.network.manager')
flags.DECLARE('update_dhcp_on_disassociate', 'nova.network.manager') flags.DECLARE('update_dhcp_on_disassociate', 'nova.network.manager')
flags.DEFINE_string('dnsmasq_interface', 'br0', 'Default Dnsmasq interface')
LOG = logging.getLogger('nova.dhcpbridge') LOG = logging.getLogger('nova.dhcpbridge')
@@ -87,10 +86,10 @@ def del_lease(mac, ip_address, _interface):
"args": {"address": ip_address}}) "args": {"address": ip_address}})
def init_leases(interface): def init_leases(network_id):
"""Get the list of hosts for an interface.""" """Get the list of hosts for a network."""
ctxt = context.get_admin_context() ctxt = context.get_admin_context()
network_ref = db.network_get_by_bridge(ctxt, interface) network_ref = db.network_get(ctxt, network_id)
return linux_net.get_dhcp_leases(ctxt, network_ref) return linux_net.get_dhcp_leases(ctxt, network_ref)
@@ -101,7 +100,8 @@ def main():
argv = FLAGS(sys.argv) argv = FLAGS(sys.argv)
logging.setup() logging.setup()
# check ENV first so we don't break any older deploys # check ENV first so we don't break any older deploys
interface = os.environ.get('DNSMASQ_INTERFACE', FLAGS.dnsmasq_interface) network_id = int(os.environ.get('NETWORK_ID'))
if int(os.environ.get('TESTING', '0')): if int(os.environ.get('TESTING', '0')):
from nova.tests import fake_flags from nova.tests import fake_flags
@@ -120,7 +120,7 @@ def main():
LOG.debug(msg) LOG.debug(msg)
globals()[action + '_lease'](mac, ip, interface) globals()[action + '_lease'](mac, ip, interface)
else: else:
print init_leases(interface) print init_leases(network_id)
if __name__ == "__main__": if __name__ == "__main__":
main() main()

View File

@@ -719,7 +719,7 @@ class NetworkCommands(object):
# sanitize other input using FLAGS if necessary # sanitize other input using FLAGS if necessary
if not num_networks: if not num_networks:
num_networks = FLAGS.num_networks num_networks = FLAGS.num_networks
if not network_size: if not network_size and fixed_range_v4:
fixnet = netaddr.IPNetwork(fixed_range_v4) fixnet = netaddr.IPNetwork(fixed_range_v4)
each_subnet_size = fixnet.size / int(num_networks) each_subnet_size = fixnet.size / int(num_networks)
if each_subnet_size > FLAGS.network_size: if each_subnet_size > FLAGS.network_size:
@@ -741,6 +741,9 @@ class NetworkCommands(object):
if not dns1 and FLAGS.flat_network_dns: if not dns1 and FLAGS.flat_network_dns:
dns1 = FLAGS.flat_network_dns dns1 = FLAGS.flat_network_dns
if not network_size:
network_size = FLAGS.network_size
# create the network # create the network
net_manager = utils.import_object(FLAGS.network_manager) net_manager = utils.import_object(FLAGS.network_manager)
net_manager.create_networks(context.get_admin_context(), net_manager.create_networks(context.get_admin_context(),

View File

@@ -14,7 +14,7 @@
# under the License. # under the License.
""" """
The Zone Aware Scheduler is a base class Scheduler for creating instances The AbsractScheduler is a base class Scheduler for creating instances
across zones. There are two expansion points to this class for: across zones. There are two expansion points to this class for:
1. Assigning Weights to hosts for requested instances 1. Assigning Weights to hosts for requested instances
2. Filtering Hosts based on required instance capabilities 2. Filtering Hosts based on required instance capabilities
@@ -40,7 +40,7 @@ from nova.scheduler import api
from nova.scheduler import driver from nova.scheduler import driver
FLAGS = flags.FLAGS FLAGS = flags.FLAGS
LOG = logging.getLogger('nova.scheduler.zone_aware_scheduler') LOG = logging.getLogger('nova.scheduler.abstract_scheduler')
class InvalidBlob(exception.NovaException): class InvalidBlob(exception.NovaException):
@@ -48,8 +48,10 @@ class InvalidBlob(exception.NovaException):
"to instance create request.") "to instance create request.")
class ZoneAwareScheduler(driver.Scheduler): class AbstractScheduler(driver.Scheduler):
"""Base class for creating Zone Aware Schedulers.""" """Base class for creating Schedulers that can work across any nova
deployment, from simple designs to multiply-nested zones.
"""
def _call_zone_method(self, context, method, specs, zones): def _call_zone_method(self, context, method, specs, zones):
"""Call novaclient zone method. Broken out for testing.""" """Call novaclient zone method. Broken out for testing."""
@@ -266,7 +268,7 @@ class ZoneAwareScheduler(driver.Scheduler):
""" """
if topic != "compute": if topic != "compute":
raise NotImplementedError(_("Zone Aware Scheduler only understands" raise NotImplementedError(_("Scheduler only understands"
" Compute nodes (for now)")) " Compute nodes (for now)"))
num_instances = request_spec.get('num_instances', 1) num_instances = request_spec.get('num_instances', 1)
@@ -328,13 +330,31 @@ class ZoneAwareScheduler(driver.Scheduler):
requested_mem = instance_type['memory_mb'] * 1024 * 1024 requested_mem = instance_type['memory_mb'] * 1024 * 1024
return capabilities['host_memory_free'] >= requested_mem return capabilities['host_memory_free'] >= requested_mem
def hold_filter_hosts(self, topic, request_spec, hosts=None):
"""Filter the full host list (from the ZoneManager)"""
# NOTE(dabo): The logic used by the current _schedule() method
# is incorrect. Since this task is just to refactor the classes,
# I'm not fixing the logic now - that will be the next task.
# So for now this method is just renamed; afterwards this will
# become the filter_hosts() method, and the one below will
# be removed.
filter_name = request_spec.get('filter', None)
# Make sure that the requested filter is legitimate.
selected_filter = host_filter.choose_host_filter(filter_name)
# TODO(sandy): We're only using InstanceType-based specs
# currently. Later we'll need to snoop for more detailed
# host filter requests.
instance_type = request_spec['instance_type']
name, query = selected_filter.instance_type_to_filter(instance_type)
return selected_filter.filter_hosts(self.zone_manager, query)
def filter_hosts(self, topic, request_spec, host_list=None): def filter_hosts(self, topic, request_spec, host_list=None):
"""Return a list of hosts which are acceptable for scheduling. """Return a list of hosts which are acceptable for scheduling.
Return value should be a list of (hostname, capability_dict)s. Return value should be a list of (hostname, capability_dict)s.
Derived classes may override this, but may find the Derived classes may override this, but may find the
'<topic>_filter' function more appropriate. '<topic>_filter' function more appropriate.
""" """
def _default_filter(self, hostname, capabilities, request_spec): def _default_filter(self, hostname, capabilities, request_spec):
"""Default filter function if there's no <topic>_filter""" """Default filter function if there's no <topic>_filter"""
# NOTE(sirp): The default logic is the equivalent to # NOTE(sirp): The default logic is the equivalent to

View File

@@ -14,7 +14,12 @@
# under the License. # under the License.
""" """
Host Filter is a mechanism for requesting instance resources. The Host Filter classes are a way to ensure that only hosts that are
appropriate are considered when creating a new instance. Hosts that are
either incompatible or insufficient to accept a newly-requested instance
are removed by Host Filter classes from consideration. Those that pass
the filter are then passed on for weighting or other process for ordering.
Three filters are included: AllHosts, Flavor & JSON. AllHosts just Three filters are included: AllHosts, Flavor & JSON. AllHosts just
returns the full, unfiltered list of hosts. Flavor is a hard coded returns the full, unfiltered list of hosts. Flavor is a hard coded
matching mechanism based on flavor criteria and JSON is an ad-hoc matching mechanism based on flavor criteria and JSON is an ad-hoc
@@ -28,12 +33,6 @@ noted a need for a more expressive way of specifying instances.
Since we don't want to get into building full DSL this is a simple Since we don't want to get into building full DSL this is a simple
form as an example of how this could be done. In reality, most form as an example of how this could be done. In reality, most
consumers will use the more rigid filters such as FlavorFilter. consumers will use the more rigid filters such as FlavorFilter.
Note: These are "required" capability filters. These capabilities
used must be present or the host will be excluded. The hosts
returned are then weighed by the Weighted Scheduler. Weights
can take the more esoteric factors into consideration (such as
server affinity and customer separation).
""" """
import json import json
@@ -41,9 +40,7 @@ import json
from nova import exception from nova import exception
from nova import flags from nova import flags
from nova import log as logging from nova import log as logging
from nova.scheduler import zone_aware_scheduler
from nova import utils from nova import utils
from nova.scheduler import zone_aware_scheduler
LOG = logging.getLogger('nova.scheduler.host_filter') LOG = logging.getLogger('nova.scheduler.host_filter')
@@ -125,9 +122,8 @@ class InstanceTypeFilter(HostFilter):
spec_disk = instance_type['local_gb'] spec_disk = instance_type['local_gb']
extra_specs = instance_type['extra_specs'] extra_specs = instance_type['extra_specs']
if host_ram_mb >= spec_ram and \ if ((host_ram_mb >= spec_ram) and (disk_bytes >= spec_disk) and
disk_bytes >= spec_disk and \ self._satisfies_extra_specs(capabilities, instance_type)):
self._satisfies_extra_specs(capabilities, instance_type):
selected_hosts.append((host, capabilities)) selected_hosts.append((host, capabilities))
return selected_hosts return selected_hosts
@@ -309,7 +305,6 @@ def choose_host_filter(filter_name=None):
function checks the filter name against a predefined set function checks the filter name against a predefined set
of acceptable filters. of acceptable filters.
""" """
if not filter_name: if not filter_name:
filter_name = FLAGS.default_host_filter filter_name = FLAGS.default_host_filter
for filter_class in FILTERS: for filter_class in FILTERS:
@@ -317,33 +312,3 @@ def choose_host_filter(filter_name=None):
if host_match == filter_name: if host_match == filter_name:
return filter_class() return filter_class()
raise exception.SchedulerHostFilterNotFound(filter_name=filter_name) raise exception.SchedulerHostFilterNotFound(filter_name=filter_name)
class HostFilterScheduler(zone_aware_scheduler.ZoneAwareScheduler):
"""The HostFilterScheduler uses the HostFilter to filter
hosts for weighing. The particular filter used may be passed in
as an argument or the default will be used.
request_spec = {'filter': <Filter name>,
'instance_type': <InstanceType dict>}
"""
def filter_hosts(self, topic, request_spec, hosts=None):
"""Filter the full host list (from the ZoneManager)"""
filter_name = request_spec.get('filter', None)
host_filter = choose_host_filter(filter_name)
# TODO(sandy): We're only using InstanceType-based specs
# currently. Later we'll need to snoop for more detailed
# host filter requests.
instance_type = request_spec['instance_type']
name, query = host_filter.instance_type_to_filter(instance_type)
return host_filter.filter_hosts(self.zone_manager, query)
def weigh_hosts(self, topic, request_spec, hosts):
"""Derived classes must override this method and return
a lists of hosts in [{weight, hostname}] format.
"""
return [dict(weight=1, hostname=hostname, capabilities=caps)
for hostname, caps in hosts]

View File

@@ -22,11 +22,14 @@ The cost-function and weights are tabulated, and the host with the least cost
is then selected for provisioning. is then selected for provisioning.
""" """
# TODO(dabo): This class will be removed in the next merge prop; it remains now
# because much of the code will be refactored into different classes.
import collections import collections
from nova import flags from nova import flags
from nova import log as logging from nova import log as logging
from nova.scheduler import zone_aware_scheduler from nova.scheduler import abstract_scheduler
from nova import utils from nova import utils
from nova import exception from nova import exception
@@ -61,7 +64,7 @@ def compute_fill_first_cost_fn(host):
return free_mem return free_mem
class LeastCostScheduler(zone_aware_scheduler.ZoneAwareScheduler): class LeastCostScheduler(abstract_scheduler.AbstractScheduler):
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
self.cost_fns_cache = {} self.cost_fns_cache = {}
super(LeastCostScheduler, self).__init__(*args, **kwargs) super(LeastCostScheduler, self).__init__(*args, **kwargs)

View File

@@ -13,7 +13,7 @@
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
""" """
Tests For Zone Aware Scheduler. Tests For Abstract Scheduler.
""" """
import json import json
@@ -25,7 +25,7 @@ from nova import rpc
from nova import test from nova import test
from nova.compute import api as compute_api from nova.compute import api as compute_api
from nova.scheduler import driver from nova.scheduler import driver
from nova.scheduler import zone_aware_scheduler from nova.scheduler import abstract_scheduler
from nova.scheduler import zone_manager from nova.scheduler import zone_manager
@@ -60,7 +60,7 @@ def fake_zone_manager_service_states(num_hosts):
return states return states
class FakeZoneAwareScheduler(zone_aware_scheduler.ZoneAwareScheduler): class FakeAbstractScheduler(abstract_scheduler.AbstractScheduler):
# No need to stub anything at the moment # No need to stub anything at the moment
pass pass
@@ -161,15 +161,15 @@ def fake_zone_get_all(context):
] ]
class ZoneAwareSchedulerTestCase(test.TestCase): class AbstractSchedulerTestCase(test.TestCase):
"""Test case for Zone Aware Scheduler.""" """Test case for Abstract Scheduler."""
def test_zone_aware_scheduler(self): def test_abstract_scheduler(self):
""" """
Create a nested set of FakeZones, try to build multiple instances Create a nested set of FakeZones, try to build multiple instances
and ensure that a select call returns the appropriate build plan. and ensure that a select call returns the appropriate build plan.
""" """
sched = FakeZoneAwareScheduler() sched = FakeAbstractScheduler()
self.stubs.Set(sched, '_call_zone_method', fake_call_zone_method) self.stubs.Set(sched, '_call_zone_method', fake_call_zone_method)
self.stubs.Set(nova.db, 'zone_get_all', fake_zone_get_all) self.stubs.Set(nova.db, 'zone_get_all', fake_zone_get_all)
@@ -194,7 +194,7 @@ class ZoneAwareSchedulerTestCase(test.TestCase):
properly adjusted based on the scale/offset in the zone properly adjusted based on the scale/offset in the zone
db entries. db entries.
""" """
sched = FakeZoneAwareScheduler() sched = FakeAbstractScheduler()
child_results = fake_call_zone_method(None, None, None, None) child_results = fake_call_zone_method(None, None, None, None)
zones = fake_zone_get_all(None) zones = fake_zone_get_all(None)
sched._adjust_child_weights(child_results, zones) sched._adjust_child_weights(child_results, zones)
@@ -209,11 +209,11 @@ class ZoneAwareSchedulerTestCase(test.TestCase):
if zone == 'zone3': # Scale x1000 if zone == 'zone3': # Scale x1000
self.assertEqual(scaled.pop(0), w) self.assertEqual(scaled.pop(0), w)
def test_empty_zone_aware_scheduler(self): def test_empty_abstract_scheduler(self):
""" """
Ensure empty hosts & child_zones result in NoValidHosts exception. Ensure empty hosts & child_zones result in NoValidHosts exception.
""" """
sched = FakeZoneAwareScheduler() sched = FakeAbstractScheduler()
self.stubs.Set(sched, '_call_zone_method', fake_empty_call_zone_method) self.stubs.Set(sched, '_call_zone_method', fake_empty_call_zone_method)
self.stubs.Set(nova.db, 'zone_get_all', fake_zone_get_all) self.stubs.Set(nova.db, 'zone_get_all', fake_zone_get_all)
@@ -231,7 +231,7 @@ class ZoneAwareSchedulerTestCase(test.TestCase):
If the zone_blob hint was passed in, don't re-schedule. If the zone_blob hint was passed in, don't re-schedule.
""" """
global was_called global was_called
sched = FakeZoneAwareScheduler() sched = FakeAbstractScheduler()
was_called = False was_called = False
self.stubs.Set(sched, '_provision_resource', fake_provision_resource) self.stubs.Set(sched, '_provision_resource', fake_provision_resource)
request_spec = { request_spec = {
@@ -248,7 +248,7 @@ class ZoneAwareSchedulerTestCase(test.TestCase):
def test_provision_resource_local(self): def test_provision_resource_local(self):
"""Provision a resource locally or remotely.""" """Provision a resource locally or remotely."""
global was_called global was_called
sched = FakeZoneAwareScheduler() sched = FakeAbstractScheduler()
was_called = False was_called = False
self.stubs.Set(sched, '_provision_resource_locally', self.stubs.Set(sched, '_provision_resource_locally',
fake_provision_resource_locally) fake_provision_resource_locally)
@@ -260,7 +260,7 @@ class ZoneAwareSchedulerTestCase(test.TestCase):
def test_provision_resource_remote(self): def test_provision_resource_remote(self):
"""Provision a resource locally or remotely.""" """Provision a resource locally or remotely."""
global was_called global was_called
sched = FakeZoneAwareScheduler() sched = FakeAbstractScheduler()
was_called = False was_called = False
self.stubs.Set(sched, '_provision_resource_from_blob', self.stubs.Set(sched, '_provision_resource_from_blob',
fake_provision_resource_from_blob) fake_provision_resource_from_blob)
@@ -272,9 +272,9 @@ class ZoneAwareSchedulerTestCase(test.TestCase):
def test_provision_resource_from_blob_empty(self): def test_provision_resource_from_blob_empty(self):
"""Provision a resource locally or remotely given no hints.""" """Provision a resource locally or remotely given no hints."""
global was_called global was_called
sched = FakeZoneAwareScheduler() sched = FakeAbstractScheduler()
request_spec = {} request_spec = {}
self.assertRaises(zone_aware_scheduler.InvalidBlob, self.assertRaises(abstract_scheduler.InvalidBlob,
sched._provision_resource_from_blob, sched._provision_resource_from_blob,
None, {}, 1, {}, {}) None, {}, 1, {}, {})
@@ -283,7 +283,7 @@ class ZoneAwareSchedulerTestCase(test.TestCase):
Provision a resource locally or remotely when blob hint passed in. Provision a resource locally or remotely when blob hint passed in.
""" """
global was_called global was_called
sched = FakeZoneAwareScheduler() sched = FakeAbstractScheduler()
was_called = False was_called = False
def fake_create_db_entry_for_new_instance(self, context, def fake_create_db_entry_for_new_instance(self, context,
@@ -317,7 +317,7 @@ class ZoneAwareSchedulerTestCase(test.TestCase):
passed in. passed in.
""" """
global was_called global was_called
sched = FakeZoneAwareScheduler() sched = FakeAbstractScheduler()
self.stubs.Set(sched, '_decrypt_blob', self.stubs.Set(sched, '_decrypt_blob',
fake_decrypt_blob_returns_child_info) fake_decrypt_blob_returns_child_info)
was_called = False was_called = False
@@ -336,7 +336,7 @@ class ZoneAwareSchedulerTestCase(test.TestCase):
from an immediate child. from an immediate child.
""" """
global was_called global was_called
sched = FakeZoneAwareScheduler() sched = FakeAbstractScheduler()
was_called = False was_called = False
self.stubs.Set(sched, '_ask_child_zone_to_create_instance', self.stubs.Set(sched, '_ask_child_zone_to_create_instance',
fake_ask_child_zone_to_create_instance) fake_ask_child_zone_to_create_instance)
@@ -350,14 +350,14 @@ class ZoneAwareSchedulerTestCase(test.TestCase):
def test_decrypt_blob(self): def test_decrypt_blob(self):
"""Test that the decrypt method works.""" """Test that the decrypt method works."""
fixture = FakeZoneAwareScheduler() fixture = FakeAbstractScheduler()
test_data = {"foo": "bar"} test_data = {"foo": "bar"}
class StubDecryptor(object): class StubDecryptor(object):
def decryptor(self, key): def decryptor(self, key):
return lambda blob: blob return lambda blob: blob
self.stubs.Set(zone_aware_scheduler, 'crypto', self.stubs.Set(abstract_scheduler, 'crypto',
StubDecryptor()) StubDecryptor())
self.assertEqual(fixture._decrypt_blob(test_data), self.assertEqual(fixture._decrypt_blob(test_data),

View File

@@ -18,7 +18,7 @@ Tests For Least Cost Scheduler
from nova import test from nova import test
from nova.scheduler import least_cost from nova.scheduler import least_cost
from nova.tests.scheduler import test_zone_aware_scheduler from nova.tests.scheduler import test_abstract_scheduler
MB = 1024 * 1024 MB = 1024 * 1024
@@ -70,7 +70,7 @@ class LeastCostSchedulerTestCase(test.TestCase):
zone_manager = FakeZoneManager() zone_manager = FakeZoneManager()
states = test_zone_aware_scheduler.fake_zone_manager_service_states( states = test_abstract_scheduler.fake_zone_manager_service_states(
num_hosts=10) num_hosts=10)
zone_manager.service_states = states zone_manager.service_states = states

View File

@@ -1341,6 +1341,69 @@ class ComputeTestCase(test.TestCase):
db.instance_destroy(c, instance_id2) db.instance_destroy(c, instance_id2)
db.instance_destroy(c, instance_id3) db.instance_destroy(c, instance_id3)
def test_get_all_by_metadata(self):
"""Test searching instances by metadata"""
c = context.get_admin_context()
instance_id0 = self._create_instance()
instance_id1 = self._create_instance({
'metadata': {'key1': 'value1'}})
instance_id2 = self._create_instance({
'metadata': {'key2': 'value2'}})
instance_id3 = self._create_instance({
'metadata': {'key3': 'value3'}})
instance_id4 = self._create_instance({
'metadata': {'key3': 'value3',
'key4': 'value4'}})
# get all instances
instances = self.compute_api.get_all(c,
search_opts={'metadata': {}})
self.assertEqual(len(instances), 5)
# wrong key/value combination
instances = self.compute_api.get_all(c,
search_opts={'metadata': {'key1': 'value3'}})
self.assertEqual(len(instances), 0)
# non-existing keys
instances = self.compute_api.get_all(c,
search_opts={'metadata': {'key5': 'value1'}})
self.assertEqual(len(instances), 0)
# find existing instance
instances = self.compute_api.get_all(c,
search_opts={'metadata': {'key2': 'value2'}})
self.assertEqual(len(instances), 1)
self.assertEqual(instances[0].id, instance_id2)
instances = self.compute_api.get_all(c,
search_opts={'metadata': {'key3': 'value3'}})
self.assertEqual(len(instances), 2)
instance_ids = [instance.id for instance in instances]
self.assertTrue(instance_id3 in instance_ids)
self.assertTrue(instance_id4 in instance_ids)
# multiple criterias as a dict
instances = self.compute_api.get_all(c,
search_opts={'metadata': {'key3': 'value3',
'key4': 'value4'}})
self.assertEqual(len(instances), 1)
self.assertEqual(instances[0].id, instance_id4)
# multiple criterias as a list
instances = self.compute_api.get_all(c,
search_opts={'metadata': [{'key4': 'value4'},
{'key3': 'value3'}]})
self.assertEqual(len(instances), 1)
self.assertEqual(instances[0].id, instance_id4)
db.instance_destroy(c, instance_id0)
db.instance_destroy(c, instance_id1)
db.instance_destroy(c, instance_id2)
db.instance_destroy(c, instance_id3)
db.instance_destroy(c, instance_id4)
@staticmethod @staticmethod
def _parse_db_block_device_mapping(bdm_ref): def _parse_db_block_device_mapping(bdm_ref):
attr_list = ('delete_on_termination', 'device_name', 'no_device', attr_list = ('delete_on_termination', 'device_name', 'no_device',

View File

@@ -255,7 +255,7 @@ class CommonNetworkTestCase(test.TestCase):
raise exception.NetworkNotFoundForCidr() raise exception.NetworkNotFoundForCidr()
def network_create_safe(self, context, net): def network_create_safe(self, context, net):
fakenet = {} fakenet = dict(net)
fakenet['id'] = 999 fakenet['id'] = 999
return fakenet return fakenet
@@ -269,6 +269,9 @@ class CommonNetworkTestCase(test.TestCase):
def deallocate_fixed_ip(self, context, address): def deallocate_fixed_ip(self, context, address):
self.deallocate_called = address self.deallocate_called = address
def _create_fixed_ips(self, context, network_id):
pass
def fake_create_fixed_ips(self, context, network_id): def fake_create_fixed_ips(self, context, network_id):
return None return None
@@ -286,16 +289,20 @@ class CommonNetworkTestCase(test.TestCase):
def test_validate_cidrs(self): def test_validate_cidrs(self):
manager = self.FakeNetworkManager() manager = self.FakeNetworkManager()
nets = manager._validate_cidrs(None, '192.168.0.0/24', 1, 256) nets = manager.create_networks(None, 'fake', '192.168.0.0/24',
False, 1, 256, None, None, None,
None)
self.assertEqual(1, len(nets)) self.assertEqual(1, len(nets))
cidrs = [str(net) for net in nets] cidrs = [str(net['cidr']) for net in nets]
self.assertTrue('192.168.0.0/24' in cidrs) self.assertTrue('192.168.0.0/24' in cidrs)
def test_validate_cidrs_split_exact_in_half(self): def test_validate_cidrs_split_exact_in_half(self):
manager = self.FakeNetworkManager() manager = self.FakeNetworkManager()
nets = manager._validate_cidrs(None, '192.168.0.0/24', 2, 128) nets = manager.create_networks(None, 'fake', '192.168.0.0/24',
False, 2, 128, None, None, None,
None)
self.assertEqual(2, len(nets)) self.assertEqual(2, len(nets))
cidrs = [str(net) for net in nets] cidrs = [str(net['cidr']) for net in nets]
self.assertTrue('192.168.0.0/25' in cidrs) self.assertTrue('192.168.0.0/25' in cidrs)
self.assertTrue('192.168.0.128/25' in cidrs) self.assertTrue('192.168.0.128/25' in cidrs)
@@ -306,9 +313,11 @@ class CommonNetworkTestCase(test.TestCase):
manager.db.network_get_all(ctxt).AndReturn([{'id': 1, manager.db.network_get_all(ctxt).AndReturn([{'id': 1,
'cidr': '192.168.2.0/24'}]) 'cidr': '192.168.2.0/24'}])
self.mox.ReplayAll() self.mox.ReplayAll()
nets = manager._validate_cidrs(None, '192.168.0.0/16', 4, 256) nets = manager.create_networks(None, 'fake', '192.168.0.0/16',
False, 4, 256, None, None, None,
None)
self.assertEqual(4, len(nets)) self.assertEqual(4, len(nets))
cidrs = [str(net) for net in nets] cidrs = [str(net['cidr']) for net in nets]
exp_cidrs = ['192.168.0.0/24', '192.168.1.0/24', '192.168.3.0/24', exp_cidrs = ['192.168.0.0/24', '192.168.1.0/24', '192.168.3.0/24',
'192.168.4.0/24'] '192.168.4.0/24']
for exp_cidr in exp_cidrs: for exp_cidr in exp_cidrs:
@@ -324,8 +333,9 @@ class CommonNetworkTestCase(test.TestCase):
self.mox.ReplayAll() self.mox.ReplayAll()
# ValueError: requested cidr (192.168.2.0/24) conflicts with # ValueError: requested cidr (192.168.2.0/24) conflicts with
# existing smaller cidr # existing smaller cidr
args = [None, '192.168.2.0/24', 1, 256] args = (None, 'fake', '192.168.2.0/24', False, 1, 256, None, None,
self.assertRaises(ValueError, manager._validate_cidrs, *args) None, None)
self.assertRaises(ValueError, manager.create_networks, *args)
def test_validate_cidrs_split_smaller_cidr_in_use(self): def test_validate_cidrs_split_smaller_cidr_in_use(self):
manager = self.FakeNetworkManager() manager = self.FakeNetworkManager()
@@ -334,9 +344,10 @@ class CommonNetworkTestCase(test.TestCase):
manager.db.network_get_all(ctxt).AndReturn([{'id': 1, manager.db.network_get_all(ctxt).AndReturn([{'id': 1,
'cidr': '192.168.2.0/25'}]) 'cidr': '192.168.2.0/25'}])
self.mox.ReplayAll() self.mox.ReplayAll()
nets = manager._validate_cidrs(None, '192.168.0.0/16', 4, 256) nets = manager.create_networks(None, 'fake', '192.168.0.0/16',
False, 4, 256, None, None, None, None)
self.assertEqual(4, len(nets)) self.assertEqual(4, len(nets))
cidrs = [str(net) for net in nets] cidrs = [str(net['cidr']) for net in nets]
exp_cidrs = ['192.168.0.0/24', '192.168.1.0/24', '192.168.3.0/24', exp_cidrs = ['192.168.0.0/24', '192.168.1.0/24', '192.168.3.0/24',
'192.168.4.0/24'] '192.168.4.0/24']
for exp_cidr in exp_cidrs: for exp_cidr in exp_cidrs:
@@ -350,9 +361,10 @@ class CommonNetworkTestCase(test.TestCase):
manager.db.network_get_all(ctxt).AndReturn([{'id': 1, manager.db.network_get_all(ctxt).AndReturn([{'id': 1,
'cidr': '192.168.2.9/29'}]) 'cidr': '192.168.2.9/29'}])
self.mox.ReplayAll() self.mox.ReplayAll()
nets = manager._validate_cidrs(None, '192.168.2.0/24', 3, 32) nets = manager.create_networks(None, 'fake', '192.168.2.0/24',
False, 3, 32, None, None, None, None)
self.assertEqual(3, len(nets)) self.assertEqual(3, len(nets))
cidrs = [str(net) for net in nets] cidrs = [str(net['cidr']) for net in nets]
exp_cidrs = ['192.168.2.32/27', '192.168.2.64/27', '192.168.2.96/27'] exp_cidrs = ['192.168.2.32/27', '192.168.2.64/27', '192.168.2.96/27']
for exp_cidr in exp_cidrs: for exp_cidr in exp_cidrs:
self.assertTrue(exp_cidr in cidrs) self.assertTrue(exp_cidr in cidrs)
@@ -367,17 +379,19 @@ class CommonNetworkTestCase(test.TestCase):
{'id': 3, 'cidr': '192.168.2.128/26'}] {'id': 3, 'cidr': '192.168.2.128/26'}]
manager.db.network_get_all(ctxt).AndReturn(in_use) manager.db.network_get_all(ctxt).AndReturn(in_use)
self.mox.ReplayAll() self.mox.ReplayAll()
args = [None, '192.168.2.0/24', 3, 64] args = (None, 'fake', '192.168.2.0/24', False, 3, 64, None, None,
None, None)
# ValueError: Not enough subnets avail to satisfy requested num_ # ValueError: Not enough subnets avail to satisfy requested num_
# networks - some subnets in requested range already # networks - some subnets in requested range already
# in use # in use
self.assertRaises(ValueError, manager._validate_cidrs, *args) self.assertRaises(ValueError, manager.create_networks, *args)
def test_validate_cidrs_one_in_use(self): def test_validate_cidrs_one_in_use(self):
manager = self.FakeNetworkManager() manager = self.FakeNetworkManager()
args = [None, '192.168.0.0/24', 2, 256] args = (None, 'fake', '192.168.0.0/24', False, 2, 256, None, None,
None, None)
# ValueError: network_size * num_networks exceeds cidr size # ValueError: network_size * num_networks exceeds cidr size
self.assertRaises(ValueError, manager._validate_cidrs, *args) self.assertRaises(ValueError, manager.create_networks, *args)
def test_validate_cidrs_already_used(self): def test_validate_cidrs_already_used(self):
manager = self.FakeNetworkManager() manager = self.FakeNetworkManager()
@@ -387,20 +401,23 @@ class CommonNetworkTestCase(test.TestCase):
'cidr': '192.168.0.0/24'}]) 'cidr': '192.168.0.0/24'}])
self.mox.ReplayAll() self.mox.ReplayAll()
# ValueError: cidr already in use # ValueError: cidr already in use
args = [None, '192.168.0.0/24', 1, 256] args = (None, 'fake', '192.168.0.0/24', False, 1, 256, None, None,
self.assertRaises(ValueError, manager._validate_cidrs, *args) None, None)
self.assertRaises(ValueError, manager.create_networks, *args)
def test_validate_cidrs_too_many(self): def test_validate_cidrs_too_many(self):
manager = self.FakeNetworkManager() manager = self.FakeNetworkManager()
args = [None, '192.168.0.0/24', 200, 256] args = (None, 'fake', '192.168.0.0/24', False, 200, 256, None, None,
None, None)
# ValueError: Not enough subnets avail to satisfy requested # ValueError: Not enough subnets avail to satisfy requested
# num_networks # num_networks
self.assertRaises(ValueError, manager._validate_cidrs, *args) self.assertRaises(ValueError, manager.create_networks, *args)
def test_validate_cidrs_split_partial(self): def test_validate_cidrs_split_partial(self):
manager = self.FakeNetworkManager() manager = self.FakeNetworkManager()
nets = manager._validate_cidrs(None, '192.168.0.0/16', 2, 256) nets = manager.create_networks(None, 'fake', '192.168.0.0/16',
returned_cidrs = [str(net) for net in nets] False, 2, 256, None, None, None, None)
returned_cidrs = [str(net['cidr']) for net in nets]
self.assertTrue('192.168.0.0/24' in returned_cidrs) self.assertTrue('192.168.0.0/24' in returned_cidrs)
self.assertTrue('192.168.1.0/24' in returned_cidrs) self.assertTrue('192.168.1.0/24' in returned_cidrs)
@@ -411,10 +428,11 @@ class CommonNetworkTestCase(test.TestCase):
fakecidr = [{'id': 1, 'cidr': '192.168.0.0/8'}] fakecidr = [{'id': 1, 'cidr': '192.168.0.0/8'}]
manager.db.network_get_all(ctxt).AndReturn(fakecidr) manager.db.network_get_all(ctxt).AndReturn(fakecidr)
self.mox.ReplayAll() self.mox.ReplayAll()
args = [None, '192.168.0.0/24', 1, 256] args = (None, 'fake', '192.168.0.0/24', False, 1, 256, None, None,
None, None)
# ValueError: requested cidr (192.168.0.0/24) conflicts # ValueError: requested cidr (192.168.0.0/24) conflicts
# with existing supernet # with existing supernet
self.assertRaises(ValueError, manager._validate_cidrs, *args) self.assertRaises(ValueError, manager.create_networks, *args)
def test_create_networks(self): def test_create_networks(self):
cidr = '192.168.0.0/24' cidr = '192.168.0.0/24'
@@ -424,7 +442,7 @@ class CommonNetworkTestCase(test.TestCase):
args = [None, 'foo', cidr, None, 1, 256, 'fd00::/48', None, None, args = [None, 'foo', cidr, None, 1, 256, 'fd00::/48', None, None,
None] None]
result = manager.create_networks(*args) result = manager.create_networks(*args)
self.assertEqual(manager.create_networks(*args), None) self.assertTrue(manager.create_networks(*args))
def test_create_networks_cidr_already_used(self): def test_create_networks_cidr_already_used(self):
manager = self.FakeNetworkManager() manager = self.FakeNetworkManager()
@@ -444,4 +462,4 @@ class CommonNetworkTestCase(test.TestCase):
self.fake_create_fixed_ips) self.fake_create_fixed_ips)
args = [None, 'foo', cidr, None, 10, 256, 'fd00::/48', None, None, args = [None, 'foo', cidr, None, 10, 256, 'fd00::/48', None, None,
None] None]
self.assertEqual(manager.create_networks(*args), None) self.assertTrue(manager.create_networks(*args))

View File

@@ -28,10 +28,10 @@ from nova import utils
def stubout_instance_snapshot(stubs): def stubout_instance_snapshot(stubs):
@classmethod @classmethod
def fake_fetch_image(cls, context, session, instance_id, image, user, def fake_fetch_image(cls, context, session, instance, image, user,
project, type): project, type):
from nova.virt.xenapi.fake import create_vdi from nova.virt.xenapi.fake import create_vdi
name_label = "instance-%s" % instance_id name_label = "instance-%s" % instance.id
#TODO: create fake SR record #TODO: create fake SR record
sr_ref = "fakesr" sr_ref = "fakesr"
vdi_ref = create_vdi(name_label=name_label, read_only=False, vdi_ref = create_vdi(name_label=name_label, read_only=False,