merged trunk

This commit is contained in:
Chris Behrens
2011-09-08 12:40:45 -07:00
7 changed files with 408 additions and 11 deletions

View File

@@ -15,6 +15,7 @@
<code@term.ie> <termie@preciousroy.local> <code@term.ie> <termie@preciousroy.local>
<corywright@gmail.com> <cory.wright@rackspace.com> <corywright@gmail.com> <cory.wright@rackspace.com>
<dan@nicira.com> <danwent@dan-xs3-cs> <dan@nicira.com> <danwent@dan-xs3-cs>
<dan@nicira.com> danwent@gmail.com
<devin.carlen@gmail.com> <devcamcar@illian.local> <devin.carlen@gmail.com> <devcamcar@illian.local>
<ewan.mellor@citrix.com> <emellor@silver> <ewan.mellor@citrix.com> <emellor@silver>
<itoumsn@nttdata.co.jp> <itoumsn@shayol> <itoumsn@nttdata.co.jp> <itoumsn@shayol>

View File

@@ -11,6 +11,7 @@ Antony Messerli <ant@openstack.org>
Armando Migliaccio <Armando.Migliaccio@eu.citrix.com> Armando Migliaccio <Armando.Migliaccio@eu.citrix.com>
Arvind Somya <asomya@cisco.com> Arvind Somya <asomya@cisco.com>
Bilal Akhtar <bilalakhtar@ubuntu.com> Bilal Akhtar <bilalakhtar@ubuntu.com>
Brad Hall <brad@nicira.com>
Brian Lamar <brian.lamar@rackspace.com> Brian Lamar <brian.lamar@rackspace.com>
Brian Schott <bschott@isi.edu> Brian Schott <bschott@isi.edu>
Brian Waldon <brian.waldon@rackspace.com> Brian Waldon <brian.waldon@rackspace.com>

View File

@@ -59,11 +59,11 @@ import glob
import json import json
import math import math
import netaddr import netaddr
from optparse import OptionParser
import os import os
import sys import sys
import time import time
from optparse import OptionParser
# If ../nova/__init__.py exists, add ../ to Python search path, so that # If ../nova/__init__.py exists, add ../ to Python search path, so that
# it will override what happens to be installed in /usr/(local/)lib/python... # it will override what happens to be installed in /usr/(local/)lib/python...
@@ -685,10 +685,17 @@ class NetworkCommands(object):
help='Multi host') help='Multi host')
@args('--dns1', dest="dns1", metavar="<DNS Address>", help='First DNS') @args('--dns1', dest="dns1", metavar="<DNS Address>", help='First DNS')
@args('--dns2', dest="dns2", metavar="<DNS Address>", help='Second DNS') @args('--dns2', dest="dns2", metavar="<DNS Address>", help='Second DNS')
@args('--uuid', dest="net_uuid", metavar="<network uuid>",
help='Network UUID')
@args('--project_id', dest="project_id", metavar="<project id>",
help='Project id')
@args('--priority', dest="priority", metavar="<number>",
help='Network interface priority')
def create(self, label=None, fixed_range_v4=None, num_networks=None, def create(self, label=None, fixed_range_v4=None, num_networks=None,
network_size=None, multi_host=None, vlan_start=None, network_size=None, multi_host=None, vlan_start=None,
vpn_start=None, fixed_range_v6=None, gateway_v6=None, vpn_start=None, fixed_range_v6=None, gateway_v6=None,
bridge=None, bridge_interface=None, dns1=None, dns2=None): bridge=None, bridge_interface=None, dns1=None, dns2=None,
project_id=None, priority=None, uuid=None):
"""Creates fixed ips for host by range""" """Creates fixed ips for host by range"""
# check for certain required inputs # check for certain required inputs
@@ -765,7 +772,10 @@ class NetworkCommands(object):
bridge=bridge, bridge=bridge,
bridge_interface=bridge_interface, bridge_interface=bridge_interface,
dns1=dns1, dns1=dns1,
dns2=dns2) dns2=dns2,
project_id=project_id,
priority=priority,
uuid=uuid)
def list(self): def list(self):
"""List all created networks""" """List all created networks"""
@@ -790,16 +800,29 @@ class NetworkCommands(object):
network.project_id, network.project_id,
network.uuid) network.uuid)
def quantum_list(self):
"""List all created networks with Quantum-relevant fields"""
_fmt = "%-32s\t%-10s\t%-10s\t%s , %s"
print _fmt % (_('uuid'),
_('project'),
_('priority'),
_('cidr_v4'),
_('cidr_v6'))
for network in db.network_get_all(context.get_admin_context()):
print _fmt % (network.uuid,
network.project_id,
network.priority,
network.cidr,
network.cidr_v6)
@args('--network', dest="fixed_range", metavar='<x.x.x.x/yy>', @args('--network', dest="fixed_range", metavar='<x.x.x.x/yy>',
help='Network to delete') help='Network to delete')
def delete(self, fixed_range): def delete(self, fixed_range):
"""Deletes a network""" """Deletes a network"""
network = db.network_get_by_cidr(context.get_admin_context(), \
fixed_range) # delete the network
if network.project_id is not None: net_manager = utils.import_object(FLAGS.network_manager)
raise ValueError(_('Network must be disassociated from project %s' net_manager.delete_network(context.get_admin_context(), fixed_range)
' before delete' % network.project_id))
db.network_delete_safe(context.get_admin_context(), network.id)
@args('--network', dest="fixed_range", metavar='<x.x.x.x/yy>', @args('--network', dest="fixed_range", metavar='<x.x.x.x/yy>',
help='Network to modify') help='Network to modify')

View File

@@ -435,6 +435,10 @@ class NetworkNotFoundForBridge(NetworkNotFound):
message = _("Network could not be found for bridge %(bridge)s") message = _("Network could not be found for bridge %(bridge)s")
class NetworkNotFoundForUUID(NetworkNotFound):
message = _("Network could not be found for uuid %(uuid)s")
class NetworkNotFoundForCidr(NetworkNotFound): class NetworkNotFoundForCidr(NetworkNotFound):
message = _("Network could not be found with cidr %(cidr)s.") message = _("Network could not be found with cidr %(cidr)s.")

View File

@@ -55,5 +55,17 @@ class BaseScheduler(abstract_scheduler.AbstractScheduler):
scheduling objectives scheduling objectives
""" """
# NOTE(sirp): The default logic is the same as the NoopCostFunction # NOTE(sirp): The default logic is the same as the NoopCostFunction
return [dict(weight=1, hostname=hostname, capabilities=capabilities) hosts = [dict(weight=1, hostname=hostname, capabilities=capabilities)
for hostname, capabilities in hosts] for hostname, capabilities in hosts]
# NOTE(Vek): What we actually need to return is enough hosts
# for all the instances!
num_instances = request_spec.get('num_instances', 1)
instances = []
while num_instances > len(hosts):
instances.extend(hosts)
num_instances -= len(hosts)
if num_instances > 0:
instances.extend(hosts[:num_instances])
return instances

View File

@@ -26,6 +26,7 @@ from nova import test
from nova.compute import api as compute_api from nova.compute import api as compute_api
from nova.scheduler import driver from nova.scheduler import driver
from nova.scheduler import abstract_scheduler from nova.scheduler import abstract_scheduler
from nova.scheduler import base_scheduler
from nova.scheduler import zone_manager from nova.scheduler import zone_manager
@@ -65,6 +66,11 @@ class FakeAbstractScheduler(abstract_scheduler.AbstractScheduler):
pass pass
class FakeBaseScheduler(base_scheduler.BaseScheduler):
# No need to stub anything at the moment
pass
class FakeZoneManager(zone_manager.ZoneManager): class FakeZoneManager(zone_manager.ZoneManager):
def __init__(self): def __init__(self):
self.service_states = { self.service_states = {
@@ -387,3 +393,30 @@ class AbstractSchedulerTestCase(test.TestCase):
# 0 from local zones, 12 from remotes # 0 from local zones, 12 from remotes
self.assertEqual(12, len(build_plan)) self.assertEqual(12, len(build_plan))
class BaseSchedulerTestCase(test.TestCase):
"""Test case for Base Scheduler."""
def test_weigh_hosts(self):
"""
Try to weigh a short list of hosts and make sure enough
entries for a larger number instances are returned.
"""
sched = FakeBaseScheduler()
# Fake out a list of hosts
zm = FakeZoneManager()
hostlist = [(host, services['compute'])
for host, services in zm.service_states.items()
if 'compute' in services]
# Call weigh_hosts()
num_instances = len(hostlist) * 2 + len(hostlist) / 2
instlist = sched.weigh_hosts('compute',
dict(num_instances=num_instances),
hostlist)
# Should be enough entries to cover all instances
self.assertEqual(len(instlist), num_instances)

323
nova/tests/test_quantum.py Normal file
View File

@@ -0,0 +1,323 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 Nicira, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import context
from nova import db
from nova.db.sqlalchemy import models
from nova.db.sqlalchemy.session import get_session
from nova import exception
from nova import ipv6
from nova import log as logging
from nova.network.quantum import manager as quantum_manager
from nova import test
from nova import utils
LOG = logging.getLogger('nova.tests.quantum_network')
# this class can be used for unit functional/testing on nova,
# as it does not actually make remote calls to the Quantum service
class FakeQuantumClientConnection(object):
def __init__(self):
self.nets = {}
def get_networks_for_tenant(self, tenant_id):
net_ids = []
for net_id, n in self.nets.items():
if n['tenant-id'] == tenant_id:
net_ids.append(net_id)
return net_ids
def create_network(self, tenant_id, network_name):
uuid = str(utils.gen_uuid())
self.nets[uuid] = {'net-name': network_name,
'tenant-id': tenant_id,
'ports': {}}
return uuid
def delete_network(self, tenant_id, net_id):
if self.nets[net_id]['tenant-id'] == tenant_id:
del self.nets[net_id]
def network_exists(self, tenant_id, net_id):
try:
return self.nets[net_id]['tenant-id'] == tenant_id
except KeyError:
return False
def _confirm_not_attached(self, interface_id):
for n in self.nets.values():
for p in n['ports'].values():
if p['attachment-id'] == interface_id:
raise Exception(_("interface '%s' is already attached" %
interface_id))
def create_and_attach_port(self, tenant_id, net_id, interface_id):
if not self.network_exists(tenant_id, net_id):
raise Exception(
_("network %(net_id)s does not exist for tenant %(tenant_id)"
% locals()))
self._confirm_not_attached(interface_id)
uuid = str(utils.gen_uuid())
self.nets[net_id]['ports'][uuid] = \
{"port-state": "ACTIVE",
"attachment-id": interface_id}
def detach_and_delete_port(self, tenant_id, net_id, port_id):
if not self.network_exists(tenant_id, net_id):
raise exception.NotFound(
_("network %(net_id)s does not exist "
"for tenant %(tenant_id)s" % locals()))
del self.nets[net_id]['ports'][port_id]
def get_port_by_attachment(self, tenant_id, attachment_id):
for net_id, n in self.nets.items():
if n['tenant-id'] == tenant_id:
for port_id, p in n['ports'].items():
if p['attachment-id'] == attachment_id:
return (net_id, port_id)
return (None, None)
networks = [{'label': 'project1-net1',
'injected': False,
'multi_host': False,
'cidr': '192.168.0.0/24',
'cidr_v6': '2001:1db8::/64',
'gateway_v6': '2001:1db8::1',
'netmask_v6': '64',
'netmask': '255.255.255.0',
'bridge': None,
'bridge_interface': None,
'gateway': '192.168.0.1',
'broadcast': '192.168.0.255',
'dns1': '192.168.0.1',
'dns2': '192.168.0.2',
'vlan': None,
'host': None,
'vpn_public_address': None,
'project_id': 'fake_project1',
'priority': 1},
{'label': 'project2-net1',
'injected': False,
'multi_host': False,
'cidr': '192.168.1.0/24',
'cidr_v6': '2001:1db9::/64',
'gateway_v6': '2001:1db9::1',
'netmask_v6': '64',
'netmask': '255.255.255.0',
'bridge': None,
'bridge_interface': None,
'gateway': '192.168.1.1',
'broadcast': '192.168.1.255',
'dns1': '192.168.0.1',
'dns2': '192.168.0.2',
'vlan': None,
'host': None,
'project_id': 'fake_project2',
'priority': 1},
{'label': "public",
'injected': False,
'multi_host': False,
'cidr': '10.0.0.0/24',
'cidr_v6': '2001:1dba::/64',
'gateway_v6': '2001:1dba::1',
'netmask_v6': '64',
'netmask': '255.255.255.0',
'bridge': None,
'bridge_interface': None,
'gateway': '10.0.0.1',
'broadcast': '10.0.0.255',
'dns1': '10.0.0.1',
'dns2': '10.0.0.2',
'vlan': None,
'host': None,
'project_id': None,
'priority': 0},
{'label': "project2-net2",
'injected': False,
'multi_host': False,
'cidr': '9.0.0.0/24',
'cidr_v6': '2001:1dbb::/64',
'gateway_v6': '2001:1dbb::1',
'netmask_v6': '64',
'netmask': '255.255.255.0',
'bridge': None,
'bridge_interface': None,
'gateway': '9.0.0.1',
'broadcast': '9.0.0.255',
'dns1': '9.0.0.1',
'dns2': '9.0.0.2',
'vlan': None,
'host': None,
'project_id': "fake_project2",
'priority': 2}]
# this is a base class to be used by all other Quantum Test classes
class QuantumTestCaseBase(object):
def test_create_and_delete_nets(self):
self._create_nets()
self._delete_nets()
def _create_nets(self):
for n in networks:
ctx = context.RequestContext('user1', n['project_id'])
self.net_man.create_networks(ctx,
label=n['label'], cidr=n['cidr'],
multi_host=n['multi_host'],
num_networks=1, network_size=256, cidr_v6=n['cidr_v6'],
gateway_v6=n['gateway_v6'], bridge=None,
bridge_interface=None, dns1=n['dns1'],
dns2=n['dns2'], project_id=n['project_id'],
priority=n['priority'])
def _delete_nets(self):
for n in networks:
ctx = context.RequestContext('user1', n['project_id'])
self.net_man.delete_network(ctx, n['cidr'])
def test_allocate_and_deallocate_instance_static(self):
self._create_nets()
project_id = "fake_project1"
ctx = context.RequestContext('user1', project_id)
instance_ref = db.api.instance_create(ctx,
{"project_id": project_id})
nw_info = self.net_man.allocate_for_instance(ctx,
instance_id=instance_ref['id'], host="",
instance_type_id=instance_ref['instance_type_id'],
project_id=project_id)
self.assertEquals(len(nw_info), 2)
# we don't know which order the NICs will be in until we
# introduce the notion of priority
# v4 cidr
self.assertTrue(nw_info[0][0]['cidr'].startswith("10."))
self.assertTrue(nw_info[1][0]['cidr'].startswith("192."))
# v4 address
self.assertTrue(nw_info[0][1]['ips'][0]['ip'].startswith("10."))
self.assertTrue(nw_info[1][1]['ips'][0]['ip'].startswith("192."))
# v6 cidr
self.assertTrue(nw_info[0][0]['cidr_v6'].startswith("2001:1dba:"))
self.assertTrue(nw_info[1][0]['cidr_v6'].startswith("2001:1db8:"))
# v6 address
self.assertTrue(
nw_info[0][1]['ip6s'][0]['ip'].startswith("2001:1dba:"))
self.assertTrue(
nw_info[1][1]['ip6s'][0]['ip'].startswith("2001:1db8:"))
self.net_man.deallocate_for_instance(ctx,
instance_id=instance_ref['id'],
project_id=project_id)
self._delete_nets()
def test_allocate_and_deallocate_instance_dynamic(self):
self._create_nets()
project_id = "fake_project2"
ctx = context.RequestContext('user1', project_id)
net_ids = self.net_man.q_conn.get_networks_for_tenant(project_id)
requested_networks = [(net_id, None) for net_id in net_ids]
self.net_man.validate_networks(ctx, requested_networks)
instance_ref = db.api.instance_create(ctx,
{"project_id": project_id})
nw_info = self.net_man.allocate_for_instance(ctx,
instance_id=instance_ref['id'], host="",
instance_type_id=instance_ref['instance_type_id'],
project_id=project_id,
requested_networks=requested_networks)
self.assertEquals(len(nw_info), 2)
# we don't know which order the NICs will be in until we
# introduce the notion of priority
# v4 cidr
self.assertTrue(nw_info[0][0]['cidr'].startswith("9.") or
nw_info[1][0]['cidr'].startswith("9."))
self.assertTrue(nw_info[0][0]['cidr'].startswith("192.") or
nw_info[1][0]['cidr'].startswith("192."))
# v4 address
self.assertTrue(nw_info[0][1]['ips'][0]['ip'].startswith("9.") or
nw_info[1][1]['ips'][0]['ip'].startswith("9."))
self.assertTrue(nw_info[0][1]['ips'][0]['ip'].startswith("192.") or
nw_info[1][1]['ips'][0]['ip'].startswith("192."))
# v6 cidr
self.assertTrue(nw_info[0][0]['cidr_v6'].startswith("2001:1dbb:") or
nw_info[1][0]['cidr_v6'].startswith("2001:1dbb:"))
self.assertTrue(nw_info[0][0]['cidr_v6'].startswith("2001:1db9:") or
nw_info[1][0]['cidr_v6'].startswith("2001:1db9:"))
# v6 address
self.assertTrue(
nw_info[0][1]['ip6s'][0]['ip'].startswith("2001:1dbb:") or
nw_info[1][1]['ip6s'][0]['ip'].startswith("2001:1dbb:"))
self.assertTrue(
nw_info[0][1]['ip6s'][0]['ip'].startswith("2001:1db9:") or
nw_info[1][1]['ip6s'][0]['ip'].startswith("2001:1db9:"))
self.net_man.deallocate_for_instance(ctx,
instance_id=instance_ref['id'],
project_id=project_id)
self._delete_nets()
def test_validate_bad_network(self):
ctx = context.RequestContext('user1', 'fake_project1')
self.assertRaises(exception.NetworkNotFound,
self.net_man.validate_networks, ctx, [("", None)])
class QuantumNovaIPAMTestCase(QuantumTestCaseBase, test.TestCase):
def setUp(self):
super(QuantumNovaIPAMTestCase, self).setUp()
self.net_man = quantum_manager.QuantumManager(
ipam_lib="nova.network.quantum.nova_ipam_lib",
q_conn=FakeQuantumClientConnection())
# Tests seem to create some networks by default, which
# we don't want. So we delete them.
ctx = context.RequestContext('user1', 'fake_project1').elevated()
for n in db.network_get_all(ctx):
db.network_delete_safe(ctx, n['id'])
# Other unit tests (e.g., test_compute.py) have a nasty
# habit of of creating fixed IPs and not cleaning up, which
# can confuse these tests, so we remove all existing fixed
# ips before starting.
session = get_session()
result = session.query(models.FixedIp).all()
with session.begin():
for fip_ref in result:
session.delete(fip_ref)