moved migration again & trunk merge

This commit is contained in:
Sandy Walsh
2011-06-29 06:24:09 -07:00
72 changed files with 3036 additions and 385 deletions

View File

@@ -47,3 +47,7 @@
<vishvananda@gmail.com> <root@mirror.nasanebula.net>
<vishvananda@gmail.com> <root@ubuntu>
<vishvananda@gmail.com> <vishvananda@yahoo.com>
<ilyaalekseyev@acm.org> <ialekseev@griddynamics.com>
<ilyaalekseyev@acm.org> <ilya@oscloud.ru>
<reldan@oscloud.ru> <enugaev@griddynamics.com>
<kshileev@gmail.com> <kshileev@griddynamics.com>

View File

@@ -22,14 +22,14 @@ David Pravec <David.Pravec@danix.org>
Dean Troyer <dtroyer@gmail.com>
Devin Carlen <devin.carlen@gmail.com>
Ed Leafe <ed@leafe.com>
Eldar Nugaev <enugaev@griddynamics.com>
Eldar Nugaev <reldan@oscloud.ru>
Eric Day <eday@oddments.org>
Eric Windisch <eric@cloudscaling.com>
Ewan Mellor <ewan.mellor@citrix.com>
Gabe Westmaas <gabe.westmaas@rackspace.com>
Hisaharu Ishii <ishii.hisaharu@lab.ntt.co.jp>
Hisaki Ohara <hisaki.ohara@intel.com>
Ilya Alekseyev <ialekseev@griddynamics.com>
Ilya Alekseyev <ilyaalekseyev@acm.org>
Isaku Yamahata <yamahata@valinux.co.jp>
Jason Cannavale <jason.cannavale@rackspace.com>
Jason Koelker <jason@koelker.net>
@@ -53,6 +53,7 @@ Kei Masumoto <masumotok@nttdata.co.jp>
Ken Pepple <ken.pepple@gmail.com>
Kevin Bringard <kbringard@attinteractive.com>
Kevin L. Mitchell <kevin.mitchell@rackspace.com>
Kirill Shileev <kshileev@gmail.com>
Koji Iida <iida.koji@lab.ntt.co.jp>
Lorin Hochstein <lorin@isi.edu>
Lvov Maxim <usrleon@gmail.com>

116
bin/instance-usage-audit Executable file
View File

@@ -0,0 +1,116 @@
#!/usr/bin/env python
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2011 Openstack, LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Cron script to generate usage notifications for instances neither created
nor destroyed in a given time period.
Together with the notifications generated by compute on instance
create/delete/resize, over that ime period, this allows an external
system consuming usage notification feeds to calculate instance usage
for each tenant.
Time periods are specified like so:
<number>[mdy]
1m = previous month. If the script is run April 1, it will generate usages
for March 1 thry March 31.
3m = 3 previous months.
90d = previous 90 days.
1y = previous year. If run on Jan 1, it generates usages for
Jan 1 thru Dec 31 of the previous year.
"""
import datetime
import gettext
import os
import sys
import time
# If ../nova/__init__.py exists, add ../ to Python search path, so that
# it will override what happens to be installed in /usr/(local/)lib/python...
POSSIBLE_TOPDIR = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
os.pardir,
os.pardir))
if os.path.exists(os.path.join(POSSIBLE_TOPDIR, 'nova', '__init__.py')):
sys.path.insert(0, POSSIBLE_TOPDIR)
gettext.install('nova', unicode=1)
from nova import context
from nova import db
from nova import exception
from nova import flags
from nova import log as logging
from nova import utils
from nova.notifier import api as notifier_api
FLAGS = flags.FLAGS
flags.DEFINE_string('instance_usage_audit_period', '1m',
'time period to generate instance usages for.')
def time_period(period):
today = datetime.date.today()
unit = period[-1]
if unit not in 'mdy':
raise ValueError('Time period must be m, d, or y')
n = int(period[:-1])
if unit == 'm':
year = today.year - (n // 12)
n = n % 12
if n >= today.month:
year -= 1
month = 12 + (today.month - n)
else:
month = today.month - n
begin = datetime.datetime(day=1, month=month, year=year)
end = datetime.datetime(day=1, month=today.month, year=today.year)
elif unit == 'y':
begin = datetime.datetime(day=1, month=1, year=today.year - n)
end = datetime.datetime(day=1, month=1, year=today.year)
elif unit == 'd':
b = today - datetime.timedelta(days=n)
begin = datetime.datetime(day=b.day, month=b.month, year=b.year)
end = datetime.datetime(day=today.day,
month=today.month,
year=today.year)
return (begin, end)
if __name__ == '__main__':
utils.default_flagfile()
flags.FLAGS(sys.argv)
logging.setup()
begin, end = time_period(FLAGS.instance_usage_audit_period)
print "Creating usages for %s until %s" % (str(begin), str(end))
instances = db.instance_get_active_by_window(context.get_admin_context(),
begin,
end)
print "%s instances" % len(instances)
for instance_ref in instances:
usage_info = utils.usage_from_instance(instance_ref,
audit_period_begining=str(begin),
audit_period_ending=str(end))
notifier_api.notify('compute.%s' % FLAGS.host,
'compute.instance.exists',
notifier_api.INFO,
usage_info)

View File

@@ -137,8 +137,9 @@ if __name__ == '__main__':
utils.default_flagfile()
FLAGS(sys.argv)
logging.setup()
server = wsgi.Server()
acp_port = FLAGS.ajax_console_proxy_port
acp = AjaxConsoleProxy()
acp.register_listeners()
server.start(acp, FLAGS.ajax_console_proxy_port, host='0.0.0.0')
server = wsgi.Server("AJAX Console Proxy", acp, port=acp_port)
server.start()
server.wait()

View File

@@ -1,5 +1,4 @@
#!/usr/bin/env python
# pylint: disable=C0103
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
@@ -18,44 +17,40 @@
# See the License for the specific language governing permissions and
# limitations under the License.
"""Starter script for Nova API."""
"""Starter script for Nova API.
Starts both the EC2 and OpenStack APIs in separate processes.
"""
import gettext
import os
import sys
# If ../nova/__init__.py exists, add ../ to Python search path, so that
# it will override what happens to be installed in /usr/(local/)lib/python...
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
os.pardir,
os.pardir))
if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(
sys.argv[0]), os.pardir, os.pardir))
if os.path.exists(os.path.join(possible_topdir, "nova", "__init__.py")):
sys.path.insert(0, possible_topdir)
gettext.install('nova', unicode=1)
from nova import flags
from nova import log as logging
from nova import service
from nova import utils
from nova import version
from nova import wsgi
import nova.service
import nova.utils
LOG = logging.getLogger('nova.api')
def main():
"""Launch EC2 and OSAPI services."""
nova.utils.Bootstrapper.bootstrap_binary(sys.argv)
ec2 = nova.service.WSGIService("ec2")
osapi = nova.service.WSGIService("osapi")
launcher = nova.service.Launcher()
launcher.launch_service(ec2)
launcher.launch_service(osapi)
try:
launcher.wait()
except KeyboardInterrupt:
launcher.stop()
FLAGS = flags.FLAGS
if __name__ == '__main__':
utils.default_flagfile()
FLAGS(sys.argv)
logging.setup()
LOG.audit(_("Starting nova-api node (version %s)"),
version.version_string_with_vcs())
LOG.debug(_("Full set of FLAGS:"))
for flag in FLAGS:
flag_get = FLAGS.get(flag, None)
LOG.debug("%(flag)s : %(flag_get)s" % locals())
service = service.serve_wsgi(service.ApiService)
service.wait()
sys.exit(main())

View File

@@ -93,6 +93,9 @@ if __name__ == '__main__':
with_req = direct.PostParamsMiddleware(with_json)
with_auth = direct.DelegatedAuthMiddleware(with_req)
server = wsgi.Server()
server.start(with_auth, FLAGS.direct_port, host=FLAGS.direct_host)
server = wsgi.Server("Direct API",
with_auth,
host=FLAGS.direct_host,
port=FLAGS.direct_port)
server.start()
server.wait()

View File

@@ -56,11 +56,11 @@
import gettext
import glob
import json
import netaddr
import os
import sys
import time
import IPy
# If ../nova/__init__.py exists, add ../ to Python search path, so that
# it will override what happens to be installed in /usr/(local/)lib/python...
@@ -257,6 +257,11 @@ class RoleCommands(object):
"""adds role to user
if project is specified, adds project specific role
arguments: user, role [project]"""
if project:
projobj = self.manager.get_project(project)
if not projobj.has_member(user):
print "%s not a member of %s" % (user, project)
return
self.manager.add_role(user, role, project)
def has(self, user, role, project=None):
@@ -513,7 +518,7 @@ class FloatingIpCommands(object):
def create(self, host, range):
"""Creates floating ips for host by range
arguments: host ip_range"""
for address in IPy.IP(range):
for address in netaddr.IPNetwork(range):
db.floating_ip_create(context.get_admin_context(),
{'address': str(address),
'host': host})
@@ -521,7 +526,7 @@ class FloatingIpCommands(object):
def delete(self, ip_range):
"""Deletes floating ips by range
arguments: range"""
for address in IPy.IP(ip_range):
for address in netaddr.IPNetwork(ip_range):
db.floating_ip_destroy(context.get_admin_context(),
str(address))

View File

@@ -50,6 +50,9 @@ if __name__ == '__main__':
FLAGS(sys.argv)
logging.setup()
router = s3server.S3Application(FLAGS.buckets_path)
server = wsgi.Server()
server.start(router, FLAGS.s3_port, host=FLAGS.s3_host)
server = wsgi.Server("S3 Objectstore",
router,
port=FLAGS.s3_port,
host=FLAGS.s3_host)
server.start()
server.wait()

View File

@@ -96,6 +96,9 @@ if __name__ == "__main__":
service.serve()
server = wsgi.Server()
server.start(with_auth, FLAGS.vncproxy_port, host=FLAGS.vncproxy_host)
server = wsgi.Server("VNC Proxy",
with_auth,
host=FLAGS.vncproxy_host,
port=FLAGS.vncproxy_port)
server.start()
server.wait()

View File

@@ -30,3 +30,8 @@
.. moduleauthor:: Manish Singh <yosh@gimp.org>
.. moduleauthor:: Andy Smith <andy@anarkystic.com>
"""
import gettext
gettext.install("nova", unicode=1)

View File

@@ -21,7 +21,11 @@ Admin API controller, exposed through http via the api worker.
"""
import base64
import datetime
import netaddr
import urllib
from nova import compute
from nova import db
from nova import exception
from nova import flags
@@ -117,6 +121,9 @@ class AdminController(object):
def __str__(self):
return 'AdminController'
def __init__(self):
self.compute_api = compute.API()
def describe_instance_types(self, context, **_kwargs):
"""Returns all active instance types data (vcpus, memory, etc.)"""
return {'instanceTypeSet': [instance_dict(v) for v in
@@ -324,3 +331,61 @@ class AdminController(object):
rv.append(host_dict(host, compute, instances, volume, volumes,
now))
return {'hosts': rv}
def _provider_fw_rule_exists(self, context, rule):
# TODO(todd): we call this repeatedly, can we filter by protocol?
for old_rule in db.provider_fw_rule_get_all(context):
if all([rule[k] == old_rule[k] for k in ('cidr', 'from_port',
'to_port', 'protocol')]):
return True
return False
def block_external_addresses(self, context, cidr):
"""Add provider-level firewall rules to block incoming traffic."""
LOG.audit(_('Blocking traffic to all projects incoming from %s'),
cidr, context=context)
cidr = urllib.unquote(cidr).decode()
# raise if invalid
netaddr.IPNetwork(cidr)
rule = {'cidr': cidr}
tcp_rule = rule.copy()
tcp_rule.update({'protocol': 'tcp', 'from_port': 1, 'to_port': 65535})
udp_rule = rule.copy()
udp_rule.update({'protocol': 'udp', 'from_port': 1, 'to_port': 65535})
icmp_rule = rule.copy()
icmp_rule.update({'protocol': 'icmp', 'from_port': -1,
'to_port': None})
rules_added = 0
if not self._provider_fw_rule_exists(context, tcp_rule):
db.provider_fw_rule_create(context, tcp_rule)
rules_added += 1
if not self._provider_fw_rule_exists(context, udp_rule):
db.provider_fw_rule_create(context, udp_rule)
rules_added += 1
if not self._provider_fw_rule_exists(context, icmp_rule):
db.provider_fw_rule_create(context, icmp_rule)
rules_added += 1
if not rules_added:
raise exception.ApiError(_('Duplicate rule'))
self.compute_api.trigger_provider_fw_rules_refresh(context)
return {'status': 'OK', 'message': 'Added %s rules' % rules_added}
def describe_external_address_blocks(self, context):
blocks = db.provider_fw_rule_get_all(context)
# NOTE(todd): use a set since we have icmp/udp/tcp rules with same cidr
blocks = set([b.cidr for b in blocks])
blocks = [{'cidr': b} for b in blocks]
return {'externalIpBlockInfo':
list(sorted(blocks, key=lambda k: k['cidr']))}
def remove_external_address_block(self, context, cidr):
LOG.audit(_('Removing ip block from %s'), cidr, context=context)
cidr = urllib.unquote(cidr).decode()
# raise if invalid
netaddr.IPNetwork(cidr)
rules = db.provider_fw_rule_get_all_by_cidr(context, cidr)
for rule in rules:
db.provider_fw_rule_destroy(context, rule['id'])
if rules:
self.compute_api.trigger_provider_fw_rules_refresh(context)
return {'status': 'OK', 'message': 'Deleted %s rules' % len(rules)}

View File

@@ -23,7 +23,7 @@ datastore.
"""
import base64
import IPy
import netaddr
import os
import urllib
import tempfile
@@ -452,7 +452,7 @@ class CloudController(object):
elif cidr_ip:
# If this fails, it throws an exception. This is what we want.
cidr_ip = urllib.unquote(cidr_ip).decode()
IPy.IP(cidr_ip)
netaddr.IPNetwork(cidr_ip)
values['cidr'] = cidr_ip
else:
values['cidr'] = '0.0.0.0/0'

View File

@@ -0,0 +1,126 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 University of Southern California
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
""" The instance type extra specs extension"""
from webob import exc
from nova import db
from nova import quota
from nova.api.openstack import extensions
from nova.api.openstack import faults
from nova.api.openstack import wsgi
class FlavorExtraSpecsController(object):
""" The flavor extra specs API controller for the Openstack API """
def _get_extra_specs(self, context, flavor_id):
extra_specs = db.api.instance_type_extra_specs_get(context, flavor_id)
specs_dict = {}
for key, value in extra_specs.iteritems():
specs_dict[key] = value
return dict(extra_specs=specs_dict)
def _check_body(self, body):
if body == None or body == "":
expl = _('No Request Body')
raise exc.HTTPBadRequest(explanation=expl)
def index(self, req, flavor_id):
""" Returns the list of extra specs for a givenflavor """
context = req.environ['nova.context']
return self._get_extra_specs(context, flavor_id)
def create(self, req, flavor_id, body):
self._check_body(body)
context = req.environ['nova.context']
specs = body.get('extra_specs')
try:
db.api.instance_type_extra_specs_update_or_create(context,
flavor_id,
specs)
except quota.QuotaError as error:
self._handle_quota_error(error)
return body
def update(self, req, flavor_id, id, body):
self._check_body(body)
context = req.environ['nova.context']
if not id in body:
expl = _('Request body and URI mismatch')
raise exc.HTTPBadRequest(explanation=expl)
if len(body) > 1:
expl = _('Request body contains too many items')
raise exc.HTTPBadRequest(explanation=expl)
try:
db.api.instance_type_extra_specs_update_or_create(context,
flavor_id,
body)
except quota.QuotaError as error:
self._handle_quota_error(error)
return body
def show(self, req, flavor_id, id):
""" Return a single extra spec item """
context = req.environ['nova.context']
specs = self._get_extra_specs(context, flavor_id)
if id in specs['extra_specs']:
return {id: specs['extra_specs'][id]}
else:
return faults.Fault(exc.HTTPNotFound())
def delete(self, req, flavor_id, id):
""" Deletes an existing extra spec """
context = req.environ['nova.context']
db.api.instance_type_extra_specs_delete(context, flavor_id, id)
def _handle_quota_error(self, error):
"""Reraise quota errors as api-specific http exceptions."""
if error.code == "MetadataLimitExceeded":
raise exc.HTTPBadRequest(explanation=error.message)
raise error
class Flavorextraspecs(extensions.ExtensionDescriptor):
def get_name(self):
return "FlavorExtraSpecs"
def get_alias(self):
return "os-flavor-extra-specs"
def get_description(self):
return "Instance type (flavor) extra specs"
def get_namespace(self):
return \
"http://docs.openstack.org/ext/flavor_extra_specs/api/v1.1"
def get_updated(self):
return "2011-06-23T00:00:00+00:00"
def get_resources(self):
resources = []
res = extensions.ResourceExtension(
'os-extra_specs',
FlavorExtraSpecsController(),
parent=dict(member_name='flavor', collection_name='flavors'))
resources.append(res)
return resources

View File

@@ -0,0 +1,172 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC.
# Copyright 2011 Grid Dynamics
# Copyright 2011 Eldar Nugaev, Kirill Shileev, Ilya Alekseyev
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License
from webob import exc
from nova import exception
from nova import network
from nova import rpc
from nova.api.openstack import faults
from nova.api.openstack import extensions
def _translate_floating_ip_view(floating_ip):
result = {'id': floating_ip['id'],
'ip': floating_ip['address']}
if 'fixed_ip' in floating_ip:
result['fixed_ip'] = floating_ip['fixed_ip']['address']
else:
result['fixed_ip'] = None
if 'instance' in floating_ip:
result['instance_id'] = floating_ip['instance']['id']
else:
result['instance_id'] = None
return {'floating_ip': result}
def _translate_floating_ips_view(floating_ips):
return {'floating_ips': [_translate_floating_ip_view(floating_ip)
for floating_ip in floating_ips]}
class FloatingIPController(object):
"""The Floating IPs API controller for the OpenStack API."""
_serialization_metadata = {
'application/xml': {
"attributes": {
"floating_ip": [
"id",
"ip",
"instance_id",
"fixed_ip",
]}}}
def __init__(self):
self.network_api = network.API()
super(FloatingIPController, self).__init__()
def show(self, req, id):
"""Return data about the given floating ip."""
context = req.environ['nova.context']
try:
floating_ip = self.network_api.get_floating_ip(context, id)
except exception.NotFound:
return faults.Fault(exc.HTTPNotFound())
return _translate_floating_ip_view(floating_ip)
def index(self, req):
context = req.environ['nova.context']
floating_ips = self.network_api.list_floating_ips(context)
return _translate_floating_ips_view(floating_ips)
def create(self, req, body):
context = req.environ['nova.context']
try:
address = self.network_api.allocate_floating_ip(context)
ip = self.network_api.get_floating_ip_by_ip(context, address)
except rpc.RemoteError as ex:
if ex.exc_type == 'NoMoreAddresses':
raise exception.NoMoreFloatingIps()
else:
raise
return {'allocated': {
"id": ip['id'],
"floating_ip": ip['address']}}
def delete(self, req, id):
context = req.environ['nova.context']
ip = self.network_api.get_floating_ip(context, id)
self.network_api.release_floating_ip(context, address=ip)
return {'released': {
"id": ip['id'],
"floating_ip": ip['address']}}
def associate(self, req, id, body):
""" /floating_ips/{id}/associate fixed ip in body """
context = req.environ['nova.context']
floating_ip = self._get_ip_by_id(context, id)
fixed_ip = body['associate_address']['fixed_ip']
try:
self.network_api.associate_floating_ip(context,
floating_ip, fixed_ip)
except rpc.RemoteError:
raise
return {'associated':
{
"floating_ip_id": id,
"floating_ip": floating_ip,
"fixed_ip": fixed_ip}}
def disassociate(self, req, id, body):
""" POST /floating_ips/{id}/disassociate """
context = req.environ['nova.context']
floating_ip = self.network_api.get_floating_ip(context, id)
address = floating_ip['address']
fixed_ip = floating_ip['fixed_ip']['address']
try:
self.network_api.disassociate_floating_ip(context, address)
except rpc.RemoteError:
raise
return {'disassociated': {'floating_ip': address,
'fixed_ip': fixed_ip}}
def _get_ip_by_id(self, context, value):
"""Checks that value is id and then returns its address."""
return self.network_api.get_floating_ip(context, value)['address']
class Floating_ips(extensions.ExtensionDescriptor):
def get_name(self):
return "Floating_ips"
def get_alias(self):
return "os-floating-ips"
def get_description(self):
return "Floating IPs support"
def get_namespace(self):
return "http://docs.openstack.org/ext/floating_ips/api/v1.1"
def get_updated(self):
return "2011-06-16T00:00:00+00:00"
def get_resources(self):
resources = []
res = extensions.ResourceExtension('os-floating-ips',
FloatingIPController(),
member_actions={
'associate': 'POST',
'disassociate': 'POST'})
resources.append(res)
return resources

View File

@@ -16,6 +16,7 @@
# under the License.
from webob import exc
from xml.dom import minidom
from nova import flags
from nova import image
@@ -59,7 +60,7 @@ class Controller(object):
context = req.environ['nova.context']
metadata = self._get_metadata(context, image_id)
if id in metadata:
return {id: metadata[id]}
return {'meta': {id: metadata[id]}}
else:
return faults.Fault(exc.HTTPNotFound())
@@ -77,15 +78,22 @@ class Controller(object):
def update(self, req, image_id, id, body):
context = req.environ['nova.context']
if not id in body:
try:
meta = body['meta']
except KeyError:
expl = _('Incorrect request body format')
raise exc.HTTPBadRequest(explanation=expl)
if not id in meta:
expl = _('Request body and URI mismatch')
raise exc.HTTPBadRequest(explanation=expl)
if len(body) > 1:
if len(meta) > 1:
expl = _('Request body contains too many items')
raise exc.HTTPBadRequest(explanation=expl)
img = self.image_service.show(context, image_id)
metadata = self._get_metadata(context, image_id, img)
metadata[id] = body[id]
metadata[id] = meta[id]
self._check_quota_limit(context, metadata)
img['properties'] = metadata
self.image_service.update(context, image_id, img, None)
@@ -103,9 +111,55 @@ class Controller(object):
self.image_service.update(context, image_id, img, None)
class ImageMetadataXMLSerializer(wsgi.XMLDictSerializer):
def __init__(self):
xmlns = wsgi.XMLNS_V11
super(ImageMetadataXMLSerializer, self).__init__(xmlns=xmlns)
def _meta_item_to_xml(self, doc, key, value):
node = doc.createElement('meta')
node.setAttribute('key', key)
text = doc.createTextNode(value)
node.appendChild(text)
return node
def _meta_list_to_xml(self, xml_doc, meta_items):
container_node = xml_doc.createElement('metadata')
for (key, value) in meta_items:
item_node = self._meta_item_to_xml(xml_doc, key, value)
container_node.appendChild(item_node)
return container_node
def _meta_list_to_xml_string(self, metadata_dict):
xml_doc = minidom.Document()
items = metadata_dict['metadata'].items()
container_node = self._meta_list_to_xml(xml_doc, items)
self._add_xmlns(container_node)
return container_node.toprettyxml(indent=' ')
def index(self, metadata_dict):
return self._meta_list_to_xml_string(metadata_dict)
def create(self, metadata_dict):
return self._meta_list_to_xml_string(metadata_dict)
def _meta_item_to_xml_string(self, meta_item_dict):
xml_doc = minidom.Document()
item_key, item_value = meta_item_dict.items()[0]
item_node = self._meta_item_to_xml(xml_doc, item_key, item_value)
self._add_xmlns(item_node)
return item_node.toprettyxml(indent=' ')
def show(self, meta_item_dict):
return self._meta_item_to_xml_string(meta_item_dict['meta'])
def update(self, meta_item_dict):
return self._meta_item_to_xml_string(meta_item_dict['meta'])
def create_resource():
serializers = {
'application/xml': wsgi.XMLDictSerializer(xmlns=wsgi.XMLNS_V11),
'application/xml': ImageMetadataXMLSerializer(),
}
return wsgi.Resource(Controller(), serializers=serializers)

View File

@@ -13,6 +13,8 @@
# License for the specific language governing permissions and limitations
# under the License.
import os.path
import webob.exc
from nova import compute
@@ -99,21 +101,27 @@ class Controller(object):
raise webob.exc.HTTPBadRequest()
try:
server_id = self._server_id_from_req_data(body)
server_id = self._server_id_from_req(req, body)
image_name = body["image"]["name"]
except KeyError:
raise webob.exc.HTTPBadRequest()
image = self._compute_service.snapshot(context, server_id, image_name)
props = self._get_extra_properties(req, body)
image = self._compute_service.snapshot(context, server_id,
image_name, props)
return dict(image=self.get_builder(req).build(image, detail=True))
def get_builder(self, request):
"""Indicates that you must use a Controller subclass."""
raise NotImplementedError
def _server_id_from_req_data(self, data):
def _server_id_from_req(self, req, data):
raise NotImplementedError()
def _get_extra_properties(self, req, data):
return {}
class ControllerV10(Controller):
"""Version 1.0 specific controller logic."""
@@ -149,8 +157,12 @@ class ControllerV10(Controller):
builder = self.get_builder(req).build
return dict(images=[builder(image, detail=True) for image in images])
def _server_id_from_req_data(self, data):
return data['image']['serverId']
def _server_id_from_req(self, req, data):
try:
return data['image']['serverId']
except KeyError:
msg = _("Expected serverId attribute on server entity.")
raise webob.exc.HTTPBadRequest(explanation=msg)
class ControllerV11(Controller):
@@ -189,8 +201,27 @@ class ControllerV11(Controller):
builder = self.get_builder(req).build
return dict(images=[builder(image, detail=True) for image in images])
def _server_id_from_req_data(self, data):
return data['image']['serverRef']
def _server_id_from_req(self, req, data):
try:
server_ref = data['image']['serverRef']
except KeyError:
msg = _("Expected serverRef attribute on server entity.")
raise webob.exc.HTTPBadRequest(explanation=msg)
head, tail = os.path.split(server_ref)
if head and head != os.path.join(req.application_url, 'servers'):
msg = _("serverRef must match request url")
raise webob.exc.HTTPBadRequest(explanation=msg)
return tail
def _get_extra_properties(self, req, data):
server_ref = data['image']['serverRef']
if not server_ref.startswith('http'):
server_ref = os.path.join(req.application_url, 'servers',
server_ref)
return {'instance_ref': server_ref}
def create_resource(version='1.0'):

View File

@@ -46,13 +46,9 @@ class ViewBuilder(object):
except KeyError:
image['status'] = image['status'].upper()
def _build_server(self, image, instance_id):
def _build_server(self, image, image_obj):
"""Indicates that you must use a ViewBuilder subclass."""
raise NotImplementedError
def generate_server_ref(self, server_id):
"""Return an href string pointing to this server."""
return os.path.join(self._url, "servers", str(server_id))
raise NotImplementedError()
def generate_href(self, image_id):
"""Return an href string pointing to this object."""
@@ -60,8 +56,6 @@ class ViewBuilder(object):
def build(self, image_obj, detail=False):
"""Return a standardized image structure for display by the API."""
properties = image_obj.get("properties", {})
self._format_dates(image_obj)
if "status" in image_obj:
@@ -72,11 +66,7 @@ class ViewBuilder(object):
"name": image_obj.get("name"),
}
if "instance_id" in properties:
try:
self._build_server(image, int(properties["instance_id"]))
except ValueError:
pass
self._build_server(image, image_obj)
if detail:
image.update({
@@ -94,15 +84,21 @@ class ViewBuilder(object):
class ViewBuilderV10(ViewBuilder):
"""OpenStack API v1.0 Image Builder"""
def _build_server(self, image, instance_id):
image["serverId"] = instance_id
def _build_server(self, image, image_obj):
try:
image['serverId'] = int(image_obj['properties']['instance_id'])
except (KeyError, ValueError):
pass
class ViewBuilderV11(ViewBuilder):
"""OpenStack API v1.1 Image Builder"""
def _build_server(self, image, instance_id):
image["serverRef"] = self.generate_server_ref(instance_id)
def _build_server(self, image, image_obj):
try:
image['serverRef'] = image_obj['properties']['instance_ref']
except KeyError:
return
def build(self, image_obj, detail=False):
"""Return a standardized image structure for display by the API."""

View File

@@ -232,12 +232,14 @@ class XMLDictSerializer(DictSerializer):
doc = minidom.Document()
node = self._to_xml_node(doc, self.metadata, root_key, data[root_key])
xmlns = node.getAttribute('xmlns')
if not xmlns and self.xmlns:
node.setAttribute('xmlns', self.xmlns)
self._add_xmlns(node)
return node.toprettyxml(indent=' ', encoding='utf-8')
def _add_xmlns(self, node):
if self.xmlns is not None:
node.setAttribute('xmlns', self.xmlns)
def _to_xml_node(self, doc, metadata, nodename, data):
"""Recursive method to convert data members to XML nodes."""
result = doc.createElement(nodename)
@@ -356,7 +358,7 @@ class Resource(wsgi.Application):
def __call__(self, request):
"""WSGI method that controls (de)serialization and method dispatch."""
LOG.debug("%(method)s %(url)s" % {"method": request.method,
LOG.info("%(method)s %(url)s" % {"method": request.method,
"url": request.url})
try:
@@ -384,7 +386,7 @@ class Resource(wsgi.Application):
msg_dict = dict(url=request.url, e=e)
msg = _("%(url)s returned a fault: %(e)s" % msg_dict)
LOG.debug(msg)
LOG.info(msg)
return response

View File

@@ -100,6 +100,11 @@ class OBJECT_CLASS_VIOLATION(Exception): # pylint: disable=C0103
pass
class SERVER_DOWN(Exception): # pylint: disable=C0103
"""Duplicate exception class from real LDAP module."""
pass
def initialize(_uri):
"""Opens a fake connection with an LDAP server."""
return FakeLDAP()
@@ -202,25 +207,38 @@ def _to_json(unencoded):
return json.dumps(list(unencoded))
server_fail = False
class FakeLDAP(object):
"""Fake LDAP connection."""
def simple_bind_s(self, dn, password):
"""This method is ignored, but provided for compatibility."""
if server_fail:
raise SERVER_DOWN
pass
def unbind_s(self):
"""This method is ignored, but provided for compatibility."""
if server_fail:
raise SERVER_DOWN
pass
def add_s(self, dn, attr):
"""Add an object with the specified attributes at dn."""
if server_fail:
raise SERVER_DOWN
key = "%s%s" % (self.__prefix, dn)
value_dict = dict([(k, _to_json(v)) for k, v in attr])
Store.instance().hmset(key, value_dict)
def delete_s(self, dn):
"""Remove the ldap object at specified dn."""
if server_fail:
raise SERVER_DOWN
Store.instance().delete("%s%s" % (self.__prefix, dn))
def modify_s(self, dn, attrs):
@@ -232,6 +250,9 @@ class FakeLDAP(object):
([MOD_ADD | MOD_DELETE | MOD_REPACE], attribute, value)
"""
if server_fail:
raise SERVER_DOWN
store = Store.instance()
key = "%s%s" % (self.__prefix, dn)
@@ -255,6 +276,9 @@ class FakeLDAP(object):
fields -- fields to return. Returns all fields if not specified
"""
if server_fail:
raise SERVER_DOWN
if scope != SCOPE_BASE and scope != SCOPE_SUBTREE:
raise NotImplementedError(str(scope))
store = Store.instance()

View File

@@ -101,6 +101,41 @@ def sanitize(fn):
return _wrapped
class LDAPWrapper(object):
def __init__(self, ldap, url, user, password):
self.ldap = ldap
self.url = url
self.user = user
self.password = password
self.conn = None
def __wrap_reconnect(f):
def inner(self, *args, **kwargs):
if self.conn is None:
self.connect()
return f(self.conn)(*args, **kwargs)
else:
try:
return f(self.conn)(*args, **kwargs)
except self.ldap.SERVER_DOWN:
self.connect()
return f(self.conn)(*args, **kwargs)
return inner
def connect(self):
try:
self.conn = self.ldap.initialize(self.url)
self.conn.simple_bind_s(self.user, self.password)
except self.ldap.SERVER_DOWN:
self.conn = None
raise
search_s = __wrap_reconnect(lambda conn: conn.search_s)
add_s = __wrap_reconnect(lambda conn: conn.add_s)
delete_s = __wrap_reconnect(lambda conn: conn.delete_s)
modify_s = __wrap_reconnect(lambda conn: conn.modify_s)
class LdapDriver(object):
"""Ldap Auth driver
@@ -124,8 +159,8 @@ class LdapDriver(object):
LdapDriver.project_objectclass = 'novaProject'
self.__cache = None
if LdapDriver.conn is None:
LdapDriver.conn = self.ldap.initialize(FLAGS.ldap_url)
LdapDriver.conn.simple_bind_s(FLAGS.ldap_user_dn,
LdapDriver.conn = LDAPWrapper(self.ldap, FLAGS.ldap_url,
FLAGS.ldap_user_dn,
FLAGS.ldap_password)
if LdapDriver.mc is None:
LdapDriver.mc = memcache.Client(FLAGS.memcached_servers, debug=0)

View File

@@ -500,6 +500,16 @@ class API(base.Base):
{"method": "refresh_security_group_members",
"args": {"security_group_id": group_id}})
def trigger_provider_fw_rules_refresh(self, context):
"""Called when a rule is added to or removed from a security_group"""
hosts = [x['host'] for (x, idx)
in db.service_get_all_compute_sorted(context)]
for host in hosts:
rpc.cast(context,
self.db.queue_get_for(context, FLAGS.compute_topic, host),
{'method': 'refresh_provider_fw_rules', 'args': {}})
def update(self, context, instance_id, **kwargs):
"""Updates the instance in the datastore.
@@ -705,7 +715,7 @@ class API(base.Base):
raise exception.Error(_("Unable to find host for Instance %s")
% instance_id)
def snapshot(self, context, instance_id, name):
def snapshot(self, context, instance_id, name, extra_properties=None):
"""Snapshot the given instance.
:returns: A dict containing image metadata
@@ -713,6 +723,7 @@ class API(base.Base):
properties = {'instance_id': str(instance_id),
'user_id': str(context.user_id),
'image_state': 'creating'}
properties.update(extra_properties or {})
sent_meta = {'name': name, 'is_public': False,
'status': 'creating', 'properties': properties}
recv_meta = self.image_service.create(context, sent_meta)

View File

@@ -49,10 +49,12 @@ from nova import flags
from nova import log as logging
from nova import manager
from nova import network
from nova import notifier
from nova import rpc
from nova import utils
from nova import volume
from nova.compute import power_state
from nova.notifier import api as notifier_api
from nova.compute.utils import terminate_volumes
from nova.virt import driver
@@ -215,6 +217,11 @@ class ComputeManager(manager.SchedulerDependentManager):
"""
return self.driver.refresh_security_group_members(security_group_id)
@exception.wrap_exception
def refresh_provider_fw_rules(self, context, **_kwargs):
"""This call passes straight through to the virtualization driver."""
return self.driver.refresh_provider_fw_rules()
def _setup_block_device_mapping(self, context, instance_id):
"""setup volumes for block device mapping"""
self.db.instance_set_state(context,
@@ -338,6 +345,11 @@ class ComputeManager(manager.SchedulerDependentManager):
self._update_launched_at(context, instance_id)
self._update_state(context, instance_id)
usage_info = utils.usage_from_instance(instance_ref)
notifier_api.notify('compute.%s' % self.host,
'compute.instance.create',
notifier_api.INFO,
usage_info)
except exception.InstanceNotFound:
# FIXME(wwolf): We are just ignoring InstanceNotFound
# exceptions here in case the instance was immediately
@@ -416,9 +428,15 @@ class ComputeManager(manager.SchedulerDependentManager):
def terminate_instance(self, context, instance_id):
"""Terminate an instance on this host."""
self._shutdown_instance(context, instance_id, 'Terminating')
instance_ref = self.db.instance_get(context.elevated(), instance_id)
# TODO(ja): should we keep it in a terminated state for a bit?
self.db.instance_destroy(context, instance_id)
usage_info = utils.usage_from_instance(instance_ref)
notifier_api.notify('compute.%s' % self.host,
'compute.instance.delete',
notifier_api.INFO,
usage_info)
@exception.wrap_exception
@checks_instance_lock
@@ -455,6 +473,12 @@ class ComputeManager(manager.SchedulerDependentManager):
self._update_image_ref(context, instance_id, image_ref)
self._update_launched_at(context, instance_id)
self._update_state(context, instance_id)
usage_info = utils.usage_from_instance(instance_ref,
image_ref=image_ref)
notifier_api.notify('compute.%s' % self.host,
'compute.instance.rebuild',
notifier_api.INFO,
usage_info)
@exception.wrap_exception
@checks_instance_lock
@@ -632,6 +656,11 @@ class ComputeManager(manager.SchedulerDependentManager):
context = context.elevated()
instance_ref = self.db.instance_get(context, instance_id)
self.driver.destroy(instance_ref)
usage_info = utils.usage_from_instance(instance_ref)
notifier_api.notify('compute.%s' % self.host,
'compute.instance.resize.confirm',
notifier_api.INFO,
usage_info)
@exception.wrap_exception
@checks_instance_lock
@@ -679,6 +708,11 @@ class ComputeManager(manager.SchedulerDependentManager):
self.driver.revert_resize(instance_ref)
self.db.migration_update(context, migration_id,
{'status': 'reverted'})
usage_info = utils.usage_from_instance(instance_ref)
notifier_api.notify('compute.%s' % self.host,
'compute.instance.resize.revert',
notifier_api.INFO,
usage_info)
@exception.wrap_exception
@checks_instance_lock
@@ -715,6 +749,13 @@ class ComputeManager(manager.SchedulerDependentManager):
'migration_id': migration_ref['id'],
'instance_id': instance_id, },
})
usage_info = utils.usage_from_instance(instance_ref,
new_instance_type=instance_type['name'],
new_instance_type_id=instance_type['id'])
notifier_api.notify('compute.%s' % self.host,
'compute.instance.resize.prep',
notifier_api.INFO,
usage_info)
@exception.wrap_exception
@checks_instance_lock

View File

@@ -223,6 +223,9 @@ def certificate_update(context, certificate_id, values):
###################
def floating_ip_get(context, floating_ip_id):
return IMPL.floating_ip_get(context, floating_ip_id)
def floating_ip_allocate_address(context, host, project_id):
"""Allocate free floating ip and return the address.
@@ -289,6 +292,11 @@ def floating_ip_get_by_address(context, address):
return IMPL.floating_ip_get_by_address(context, address)
def floating_ip_get_by_ip(context, ip):
"""Get a floating ip by floating address."""
return IMPL.floating_ip_get_by_ip(context, ip)
def floating_ip_update(context, address, values):
"""Update a floating ip by address or raise if it doesn't exist."""
return IMPL.floating_ip_update(context, address, values)
@@ -434,6 +442,11 @@ def instance_get_all(context):
return IMPL.instance_get_all(context)
def instance_get_active_by_window(context, begin, end=None):
"""Get instances active during a certain time window."""
return IMPL.instance_get_active_by_window(context, begin, end)
def instance_get_all_by_user(context, user_id):
"""Get all instances."""
return IMPL.instance_get_all_by_user(context, user_id)
@@ -1034,6 +1047,29 @@ def security_group_rule_destroy(context, security_group_rule_id):
###################
def provider_fw_rule_create(context, rule):
"""Add a firewall rule at the provider level (all hosts & instances)."""
return IMPL.provider_fw_rule_create(context, rule)
def provider_fw_rule_get_all(context):
"""Get all provider-level firewall rules."""
return IMPL.provider_fw_rule_get_all(context)
def provider_fw_rule_get_all_by_cidr(context, cidr):
"""Get all provider-level firewall rules."""
return IMPL.provider_fw_rule_get_all_by_cidr(context, cidr)
def provider_fw_rule_destroy(context, rule_id):
"""Delete a provider firewall rule from the database."""
return IMPL.provider_fw_rule_destroy(context, rule_id)
###################
def user_get(context, id):
"""Get user by id."""
return IMPL.user_get(context, id)
@@ -1316,3 +1352,24 @@ def agent_build_destroy(context, agent_update_id):
def agent_build_update(context, agent_build_id, values):
"""Update agent build entry."""
IMPL.agent_build_update(context, agent_build_id, values)
####################
def instance_type_extra_specs_get(context, instance_type_id):
"""Get all extra specs for an instance type."""
return IMPL.instance_type_extra_specs_get(context, instance_type_id)
def instance_type_extra_specs_delete(context, instance_type_id, key):
"""Delete the given extra specs item."""
IMPL.instance_type_extra_specs_delete(context, instance_type_id, key)
def instance_type_extra_specs_update_or_create(context, instance_type_id,
extra_specs):
"""Create or update instance type extra specs. This adds or modifies the
key/value pairs specified in the extra specs dict argument"""
IMPL.instance_type_extra_specs_update_or_create(context, instance_type_id,
extra_specs)

View File

@@ -428,6 +428,29 @@ def certificate_update(context, certificate_id, values):
###################
@require_context
def floating_ip_get(context, id):
session = get_session()
result = None
if is_admin_context(context):
result = session.query(models.FloatingIp).\
options(joinedload('fixed_ip')).\
options(joinedload_all('fixed_ip.instance')).\
filter_by(id=id).\
filter_by(deleted=can_read_deleted(context)).\
first()
elif is_user_context(context):
result = session.query(models.FloatingIp).\
options(joinedload('fixed_ip')).\
options(joinedload_all('fixed_ip.instance')).\
filter_by(project_id=context.project_id).\
filter_by(id=id).\
filter_by(deleted=False).\
first()
if not result:
raise exception.FloatingIpNotFoundForFixedAddress()
return result
@require_context
@@ -582,7 +605,23 @@ def floating_ip_get_by_address(context, address, session=None):
filter_by(deleted=can_read_deleted(context)).\
first()
if not result:
raise exception.FloatingIpNotFound(fixed_ip=address)
raise exception.FloatingIpNotFoundForFixedAddress(fixed_ip=address)
return result
@require_context
def floating_ip_get_by_ip(context, ip, session=None):
if not session:
session = get_session()
result = session.query(models.FloatingIp).\
filter_by(address=ip).\
filter_by(deleted=can_read_deleted(context)).\
first()
if not result:
raise exception.FloatingIpNotFound(floating_ip=ip)
return result
@@ -722,7 +761,7 @@ def fixed_ip_get_by_address(context, address, session=None):
options(joinedload('instance')).\
first()
if not result:
raise exception.FloatingIpNotFound(fixed_ip=address)
raise exception.FloatingIpNotFoundForFixedAddress(fixed_ip=address)
if is_user_context(context):
authorize_project_context(context, result.instance.project_id)
@@ -916,6 +955,24 @@ def instance_get_all(context):
all()
@require_admin_context
def instance_get_active_by_window(context, begin, end=None):
"""Return instances that were continuously active over the given window"""
session = get_session()
query = session.query(models.Instance).\
options(joinedload_all('fixed_ip.floating_ips')).\
options(joinedload('security_groups')).\
options(joinedload_all('fixed_ip.network')).\
options(joinedload('instance_type')).\
filter(models.Instance.launched_at < begin)
if end:
query = query.filter(or_(models.Instance.terminated_at == None,
models.Instance.terminated_at > end))
else:
query = query.filter(models.Instance.terminated_at == None)
return query.all()
@require_admin_context
def instance_get_all_by_user(context, user_id):
session = get_session()
@@ -2180,6 +2237,46 @@ def security_group_rule_destroy(context, security_group_rule_id):
###################
@require_admin_context
def provider_fw_rule_create(context, rule):
fw_rule_ref = models.ProviderFirewallRule()
fw_rule_ref.update(rule)
fw_rule_ref.save()
return fw_rule_ref
@require_admin_context
def provider_fw_rule_get_all(context):
session = get_session()
return session.query(models.ProviderFirewallRule).\
filter_by(deleted=can_read_deleted(context)).\
all()
@require_admin_context
def provider_fw_rule_get_all_by_cidr(context, cidr):
session = get_session()
return session.query(models.ProviderFirewallRule).\
filter_by(deleted=can_read_deleted(context)).\
filter_by(cidr=cidr).\
all()
@require_admin_context
def provider_fw_rule_destroy(context, rule_id):
session = get_session()
with session.begin():
session.query(models.ProviderFirewallRule).\
filter_by(id=rule_id).\
update({'deleted': True,
'deleted_at': utils.utcnow(),
'updated_at': literal_column('updated_at')})
###################
@require_admin_context
def user_get(context, id, session=None):
if not session:
@@ -2557,7 +2654,22 @@ def console_get(context, console_id, instance_id=None):
@require_admin_context
def instance_type_create(_context, values):
"""Create a new instance type. In order to pass in extra specs,
the values dict should contain a 'extra_specs' key/value pair:
{'extra_specs' : {'k1': 'v1', 'k2': 'v2', ...}}
"""
try:
specs = values.get('extra_specs')
specs_refs = []
if specs:
for k, v in specs.iteritems():
specs_ref = models.InstanceTypeExtraSpecs()
specs_ref['key'] = k
specs_ref['value'] = v
specs_refs.append(specs_ref)
values['extra_specs'] = specs_refs
instance_type_ref = models.InstanceTypes()
instance_type_ref.update(values)
instance_type_ref.save()
@@ -2566,6 +2678,25 @@ def instance_type_create(_context, values):
return instance_type_ref
def _dict_with_extra_specs(inst_type_query):
"""Takes an instance type query returned by sqlalchemy
and returns it as a dictionary, converting the extra_specs
entry from a list of dicts:
'extra_specs' : [{'key': 'k1', 'value': 'v1', ...}, ...]
to a single dict:
'extra_specs' : {'k1': 'v1'}
"""
inst_type_dict = dict(inst_type_query)
extra_specs = dict([(x['key'], x['value']) for x in \
inst_type_query['extra_specs']])
inst_type_dict['extra_specs'] = extra_specs
return inst_type_dict
@require_context
def instance_type_get_all(context, inactive=False):
"""
@@ -2574,17 +2705,19 @@ def instance_type_get_all(context, inactive=False):
session = get_session()
if inactive:
inst_types = session.query(models.InstanceTypes).\
options(joinedload('extra_specs')).\
order_by("name").\
all()
else:
inst_types = session.query(models.InstanceTypes).\
options(joinedload('extra_specs')).\
filter_by(deleted=False).\
order_by("name").\
all()
if inst_types:
inst_dict = {}
for i in inst_types:
inst_dict[i['name']] = dict(i)
inst_dict[i['name']] = _dict_with_extra_specs(i)
return inst_dict
else:
raise exception.NoInstanceTypesFound()
@@ -2595,12 +2728,14 @@ def instance_type_get_by_id(context, id):
"""Returns a dict describing specific instance_type"""
session = get_session()
inst_type = session.query(models.InstanceTypes).\
options(joinedload('extra_specs')).\
filter_by(id=id).\
first()
if not inst_type:
raise exception.InstanceTypeNotFound(instance_type=id)
else:
return dict(inst_type)
return _dict_with_extra_specs(inst_type)
@require_context
@@ -2608,12 +2743,13 @@ def instance_type_get_by_name(context, name):
"""Returns a dict describing specific instance_type"""
session = get_session()
inst_type = session.query(models.InstanceTypes).\
options(joinedload('extra_specs')).\
filter_by(name=name).\
first()
if not inst_type:
raise exception.InstanceTypeNotFoundByName(instance_type_name=name)
else:
return dict(inst_type)
return _dict_with_extra_specs(inst_type)
@require_context
@@ -2621,12 +2757,13 @@ def instance_type_get_by_flavor_id(context, id):
"""Returns a dict describing specific flavor_id"""
session = get_session()
inst_type = session.query(models.InstanceTypes).\
options(joinedload('extra_specs')).\
filter_by(flavorid=int(id)).\
first()
if not inst_type:
raise exception.FlavorNotFound(flavor_id=id)
else:
return dict(inst_type)
return _dict_with_extra_specs(inst_type)
@require_admin_context
@@ -2794,6 +2931,9 @@ def instance_metadata_update_or_create(context, instance_id, metadata):
return metadata
####################
@require_admin_context
def agent_build_create(context, values):
agent_build_ref = models.AgentBuild()
@@ -2843,3 +2983,70 @@ def agent_build_update(context, agent_build_id, values):
first()
agent_build_ref.update(values)
agent_build_ref.save(session=session)
####################
@require_context
def instance_type_extra_specs_get(context, instance_type_id):
session = get_session()
spec_results = session.query(models.InstanceTypeExtraSpecs).\
filter_by(instance_type_id=instance_type_id).\
filter_by(deleted=False).\
all()
spec_dict = {}
for i in spec_results:
spec_dict[i['key']] = i['value']
return spec_dict
@require_context
def instance_type_extra_specs_delete(context, instance_type_id, key):
session = get_session()
session.query(models.InstanceTypeExtraSpecs).\
filter_by(instance_type_id=instance_type_id).\
filter_by(key=key).\
filter_by(deleted=False).\
update({'deleted': True,
'deleted_at': utils.utcnow(),
'updated_at': literal_column('updated_at')})
@require_context
def instance_type_extra_specs_get_item(context, instance_type_id, key):
session = get_session()
sppec_result = session.query(models.InstanceTypeExtraSpecs).\
filter_by(instance_type_id=instance_type_id).\
filter_by(key=key).\
filter_by(deleted=False).\
first()
if not spec_result:
raise exception.\
InstanceTypeExtraSpecsNotFound(extra_specs_key=key,
instance_type_id=instance_type_id)
return spec_result
@require_context
def instance_type_extra_specs_update_or_create(context, instance_type_id,
specs):
session = get_session()
spec_ref = None
for key, value in specs.iteritems():
try:
spec_ref = instance_type_extra_specs_get_item(context,
instance_type_id,
key,
session)
except:
spec_ref = models.InstanceTypeExtraSpecs()
spec_ref.update({"key": key, "value": value,
"instance_type_id": instance_type_id,
"deleted": 0})
spec_ref.save(session=session)
return specs

View File

@@ -0,0 +1,74 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import *
from migrate import *
from nova import log as logging
meta = MetaData()
# Just for the ForeignKey and column creation to succeed, these are not the
# actual definitions of instances or services.
instances = Table('instances', meta,
Column('id', Integer(), primary_key=True, nullable=False),
)
services = Table('services', meta,
Column('id', Integer(), primary_key=True, nullable=False),
)
networks = Table('networks', meta,
Column('id', Integer(), primary_key=True, nullable=False),
)
#
# New Tables
#
provider_fw_rules = Table('provider_fw_rules', meta,
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Boolean(create_constraint=True, name=None)),
Column('id', Integer(), primary_key=True, nullable=False),
Column('protocol',
String(length=5, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('from_port', Integer()),
Column('to_port', Integer()),
Column('cidr',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)))
def upgrade(migrate_engine):
# Upgrade operations go here. Don't create your own engine;
# bind migrate_engine to your metadata
meta.bind = migrate_engine
for table in (provider_fw_rules,):
try:
table.create()
except Exception:
logging.info(repr(table))
logging.exception('Exception while creating table')
raise

View File

@@ -0,0 +1,67 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 University of Southern California
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import Boolean, Column, DateTime, ForeignKey, Integer
from sqlalchemy import MetaData, String, Table
from nova import log as logging
meta = MetaData()
# Just for the ForeignKey and column creation to succeed, these are not the
# actual definitions of instances or services.
instance_types = Table('instance_types', meta,
Column('id', Integer(), primary_key=True, nullable=False),
)
#
# New Tables
#
instance_type_extra_specs_table = Table('instance_type_extra_specs', meta,
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Boolean(create_constraint=True, name=None)),
Column('id', Integer(), primary_key=True, nullable=False),
Column('instance_type_id',
Integer(),
ForeignKey('instance_types.id'),
nullable=False),
Column('key',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('value',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)))
def upgrade(migrate_engine):
# Upgrade operations go here. Don't create your own engine;
# bind migrate_engine to your metadata
meta.bind = migrate_engine
for table in (instance_type_extra_specs_table, ):
try:
table.create()
except Exception:
logging.info(repr(table))
logging.exception('Exception while creating table')
raise
def downgrade(migrate_engine):
# Operations to reverse the above upgrade go here.
for table in (instance_type_extra_specs_table, ):
table.drop()

View File

@@ -493,6 +493,17 @@ class SecurityGroupIngressRule(BASE, NovaBase):
group_id = Column(Integer, ForeignKey('security_groups.id'))
class ProviderFirewallRule(BASE, NovaBase):
"""Represents a rule in a security group."""
__tablename__ = 'provider_fw_rules'
id = Column(Integer, primary_key=True)
protocol = Column(String(5)) # "tcp", "udp", or "icmp"
from_port = Column(Integer)
to_port = Column(Integer)
cidr = Column(String(255))
class KeyPair(BASE, NovaBase):
"""Represents a public key pair for ssh."""
__tablename__ = 'key_pairs'
@@ -705,6 +716,21 @@ class InstanceMetadata(BASE, NovaBase):
'InstanceMetadata.deleted == False)')
class InstanceTypeExtraSpecs(BASE, NovaBase):
"""Represents additional specs as key/value pairs for an instance_type"""
__tablename__ = 'instance_type_extra_specs'
id = Column(Integer, primary_key=True)
key = Column(String(255))
value = Column(String(255))
instance_type_id = Column(Integer, ForeignKey('instance_types.id'),
nullable=False)
instance_type = relationship(InstanceTypes, backref="extra_specs",
foreign_keys=instance_type_id,
primaryjoin='and_('
'InstanceTypeExtraSpecs.instance_type_id == InstanceTypes.id,'
'InstanceTypeExtraSpecs.deleted == False)')
class Zone(BASE, NovaBase):
"""Represents a child zone of this zone."""
__tablename__ = 'zones'
@@ -741,7 +767,7 @@ def register_models():
Network, SecurityGroup, SecurityGroupIngressRule,
SecurityGroupInstanceAssociation, AuthToken, User,
Project, Certificate, ConsolePool, Console, Zone,
AgentBuild, InstanceMetadata, Migration)
AgentBuild, InstanceMetadata, InstanceTypeExtraSpecs, Migration)
engine = create_engine(FLAGS.sql_connection, echo=False)
for model in models:
model.metadata.create_all(engine)

View File

@@ -361,6 +361,10 @@ class NoFixedIpsFoundForInstance(NotFound):
class FloatingIpNotFound(NotFound):
message = _("Floating ip %(floating_ip)s not found")
class FloatingIpNotFoundForFixedAddress(NotFound):
message = _("Floating ip not found for fixed address %(fixed_ip)s.")
@@ -504,6 +508,11 @@ class InstanceMetadataNotFound(NotFound):
"key %(metadata_key)s.")
class InstanceTypeExtraSpecsNotFound(NotFound):
message = _("Instance Type %(instance_type_id)s has no extra specs with "
"key %(extra_specs_key)s.")
class LDAPObjectNotFound(NotFound):
message = _("LDAP object could not be found")
@@ -589,3 +598,11 @@ class MigrationError(NovaException):
class MalformedRequestBody(NovaException):
message = _("Malformed message body: %(reason)s")
class PasteConfigNotFound(NotFound):
message = _("Could not find paste config at %(path)s")
class PasteAppNotFound(NotFound):
message = _("Could not load paste app '%(name)s' from %(path)s")

View File

@@ -59,7 +59,7 @@ class GlanceImageService(service.BaseImageService):
"""Provides storage and retrieval of disk image objects within Glance."""
GLANCE_ONLY_ATTRS = ['size', 'location', 'disk_format',
'container_format']
'container_format', 'checksum']
# NOTE(sirp): Overriding to use _translate_to_service provided by
# BaseImageService

View File

@@ -314,3 +314,14 @@ logging.setLoggerClass(NovaLogger)
def audit(msg, *args, **kwargs):
"""Shortcut for logging to root log with sevrity 'AUDIT'."""
logging.root.log(AUDIT, msg, *args, **kwargs)
class WritableLogger(object):
"""A thin wrapper that responds to `write` and logs."""
def __init__(self, logger, level=logging.INFO):
self.logger = logger
self.level = level
def write(self, msg):
self.logger.log(self.level, msg)

View File

@@ -34,6 +34,19 @@ LOG = logging.getLogger('nova.network')
class API(base.Base):
"""API for interacting with the network manager."""
def get_floating_ip(self, context, id):
rv = self.db.floating_ip_get(context, id)
return dict(rv.iteritems())
def get_floating_ip_by_ip(self, context, address):
res = self.db.floating_ip_get_by_ip(context, address)
return dict(res.iteritems())
def list_floating_ips(self, context):
ips = self.db.floating_ip_get_all_by_project(context,
context.project_id)
return ips
def allocate_floating_ip(self, context):
if quota.allowed_floating_ips(context, 1) < 1:
LOG.warn(_('Quota exceeeded for %s, tried to allocate '

View File

@@ -20,6 +20,7 @@
import calendar
import inspect
import netaddr
import os
from nova import db
@@ -27,7 +28,6 @@ from nova import exception
from nova import flags
from nova import log as logging
from nova import utils
from IPy import IP
LOG = logging.getLogger("nova.linux_net")
@@ -191,6 +191,13 @@ class IptablesTable(object):
{'chain': chain, 'rule': rule,
'top': top, 'wrap': wrap})
def empty_chain(self, chain, wrap=True):
"""Remove all rules from a chain."""
chained_rules = [rule for rule in self.rules
if rule.chain == chain and rule.wrap == wrap]
for rule in chained_rules:
self.rules.remove(rule)
class IptablesManager(object):
"""Wrapper for iptables.
@@ -700,7 +707,7 @@ def _dnsmasq_cmd(net):
'--listen-address=%s' % net['gateway'],
'--except-interface=lo',
'--dhcp-range=%s,static,120s' % net['dhcp_start'],
'--dhcp-lease-max=%s' % IP(net['cidr']).len(),
'--dhcp-lease-max=%s' % len(netaddr.IPNetwork(net['cidr'])),
'--dhcp-hostsfile=%s' % _dhcp_file(net['bridge'], 'conf'),
'--dhcp-script=%s' % FLAGS.dhcpbridge,
'--leasefile-ro']

View File

@@ -45,10 +45,9 @@ topologies. All of the network commands are issued to a subclass of
import datetime
import math
import netaddr
import socket
import IPy
from nova import context
from nova import db
from nova import exception
@@ -295,8 +294,8 @@ class NetworkManager(manager.SchedulerDependentManager):
def create_networks(self, context, cidr, num_networks, network_size,
cidr_v6, gateway_v6, label, *args, **kwargs):
"""Create networks based on parameters."""
fixed_net = IPy.IP(cidr)
fixed_net_v6 = IPy.IP(cidr_v6)
fixed_net = netaddr.IPNetwork(cidr)
fixed_net_v6 = netaddr.IPNetwork(cidr_v6)
significant_bits_v6 = 64
network_size_v6 = 1 << 64
count = 1
@@ -305,15 +304,15 @@ class NetworkManager(manager.SchedulerDependentManager):
start_v6 = index * network_size_v6
significant_bits = 32 - int(math.log(network_size, 2))
cidr = '%s/%s' % (fixed_net[start], significant_bits)
project_net = IPy.IP(cidr)
project_net = netaddr.IPNetwork(cidr)
net = {}
net['bridge'] = FLAGS.flat_network_bridge
net['dns'] = FLAGS.flat_network_dns
net['cidr'] = cidr
net['netmask'] = str(project_net.netmask())
net['gateway'] = str(project_net[1])
net['broadcast'] = str(project_net.broadcast())
net['dhcp_start'] = str(project_net[2])
net['netmask'] = str(project_net.netmask)
net['gateway'] = str(list(project_net)[1])
net['broadcast'] = str(project_net.broadcast)
net['dhcp_start'] = str(list(project_net)[2])
if num_networks > 1:
net['label'] = '%s_%d' % (label, count)
else:
@@ -324,15 +323,16 @@ class NetworkManager(manager.SchedulerDependentManager):
cidr_v6 = '%s/%s' % (fixed_net_v6[start_v6],
significant_bits_v6)
net['cidr_v6'] = cidr_v6
project_net_v6 = IPy.IP(cidr_v6)
project_net_v6 = netaddr.IPNetwork(cidr_v6)
if gateway_v6:
# use a pre-defined gateway if one is provided
net['gateway_v6'] = str(gateway_v6)
net['gateway_v6'] = str(list(gateway_v6)[1])
else:
net['gateway_v6'] = str(project_net_v6[1])
net['gateway_v6'] = str(list(project_net_v6)[1])
net['netmask_v6'] = str(project_net_v6.prefixlen())
net['netmask_v6'] = str(project_net_v6._prefixlen)
network_ref = self.db.network_create_safe(context, net)
@@ -356,7 +356,7 @@ class NetworkManager(manager.SchedulerDependentManager):
# to properties of the manager class?
bottom_reserved = self._bottom_reserved_ips
top_reserved = self._top_reserved_ips
project_net = IPy.IP(network_ref['cidr'])
project_net = netaddr.IPNetwork(network_ref['cidr'])
num_ips = len(project_net)
for index in range(num_ips):
address = str(project_net[index])
@@ -546,13 +546,13 @@ class VlanManager(NetworkManager):
' the vlan start cannot be greater'
' than 4094'))
fixed_net = IPy.IP(cidr)
if fixed_net.len() < num_networks * network_size:
fixed_net = netaddr.IPNetwork(cidr)
if len(fixed_net) < num_networks * network_size:
raise ValueError(_('The network range is not big enough to fit '
'%(num_networks)s. Network size is %(network_size)s' %
locals()))
fixed_net_v6 = IPy.IP(cidr_v6)
fixed_net_v6 = netaddr.IPNetwork(cidr_v6)
network_size_v6 = 1 << 64
significant_bits_v6 = 64
for index in range(num_networks):
@@ -561,14 +561,14 @@ class VlanManager(NetworkManager):
start_v6 = index * network_size_v6
significant_bits = 32 - int(math.log(network_size, 2))
cidr = "%s/%s" % (fixed_net[start], significant_bits)
project_net = IPy.IP(cidr)
project_net = netaddr.IPNetwork(cidr)
net = {}
net['cidr'] = cidr
net['netmask'] = str(project_net.netmask())
net['gateway'] = str(project_net[1])
net['broadcast'] = str(project_net.broadcast())
net['vpn_private_address'] = str(project_net[2])
net['dhcp_start'] = str(project_net[3])
net['netmask'] = str(project_net.netmask)
net['gateway'] = str(list(project_net)[1])
net['broadcast'] = str(project_net.broadcast)
net['vpn_private_address'] = str(list(project_net)[2])
net['dhcp_start'] = str(list(project_net)[3])
net['vlan'] = vlan
net['bridge'] = 'br%s' % vlan
if(FLAGS.use_ipv6):

View File

@@ -56,8 +56,10 @@ def ensure_vlan_bridge(vlan_num, bridge, net_attrs=None):
'other_config': {}}
network_ref = session.call_xenapi('network.create', network_rec)
# 2 - find PIF for VLAN
expr = "field 'device' = '%s' and \
field 'VLAN' = '-1'" % FLAGS.vlan_interface
# NOTE(salvatore-orlando): using double quotes inside single quotes
# as xapi filter only support tokens in double quotes
expr = 'field "device" = "%s" and \
field "VLAN" = "-1"' % FLAGS.vlan_interface
pifs = session.call_xenapi('PIF.get_all_records_where', expr)
pif_ref = None
# Multiple PIF are ok: we are dealing with a pool

View File

@@ -0,0 +1,28 @@
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
from nova import flags
from nova import log as logging
FLAGS = flags.FLAGS
NOTIFICATIONS = []
def notify(message):
"""Test notifier, stores notifications in memory for unittests."""
NOTIFICATIONS.append(message)

View File

@@ -93,6 +93,26 @@ class InstanceTypeFilter(HostFilter):
"""Use instance_type to filter hosts."""
return (self._full_name(), instance_type)
def _satisfies_extra_specs(self, capabilities, instance_type):
"""Check that the capabilities provided by the compute service
satisfy the extra specs associated with the instance type"""
if 'extra_specs' not in instance_type:
return True
# Note(lorinh): For now, we are just checking exact matching on the
# values. Later on, we want to handle numerical
# values so we can represent things like number of GPU cards
try:
for key, value in instance_type['extra_specs'].iteritems():
if capabilities[key] != value:
return False
except KeyError:
return False
return True
def filter_hosts(self, zone_manager, query):
"""Return a list of hosts that can create instance_type."""
instance_type = query
@@ -103,7 +123,11 @@ class InstanceTypeFilter(HostFilter):
disk_bytes = capabilities['disk_available']
spec_ram = instance_type['memory_mb']
spec_disk = instance_type['local_gb']
if host_ram_mb >= spec_ram and disk_bytes >= spec_disk:
extra_specs = instance_type['extra_specs']
if host_ram_mb >= spec_ram and \
disk_bytes >= spec_disk and \
self._satisfies_extra_specs(capabilities, instance_type):
selected_hosts.append((host, capabilities))
return selected_hosts

View File

@@ -19,10 +19,12 @@
"""Generic Node baseclass for all workers that run on hosts."""
import greenlet
import inspect
import multiprocessing
import os
import greenlet
from eventlet import greenthread
from nova import context
@@ -36,6 +38,8 @@ from nova import version
from nova import wsgi
LOG = logging.getLogger('nova.service')
FLAGS = flags.FLAGS
flags.DEFINE_integer('report_interval', 10,
'seconds between nodes reporting state to datastore',
@@ -53,6 +57,63 @@ flags.DEFINE_string('api_paste_config', "api-paste.ini",
'File name for the paste.deploy config for nova-api')
class Launcher(object):
"""Launch one or more services and wait for them to complete."""
def __init__(self):
"""Initialize the service launcher.
:returns: None
"""
self._services = []
@staticmethod
def run_service(service):
"""Start and wait for a service to finish.
:param service: Service to run and wait for.
:returns: None
"""
service.start()
try:
service.wait()
except KeyboardInterrupt:
service.stop()
def launch_service(self, service):
"""Load and start the given service.
:param service: The service you would like to start.
:returns: None
"""
process = multiprocessing.Process(target=self.run_service,
args=(service,))
process.start()
self._services.append(process)
def stop(self):
"""Stop all services which are currently running.
:returns: None
"""
for service in self._services:
if service.is_alive():
service.terminate()
def wait(self):
"""Waits until all services have been stopped, and then returns.
:returns: None
"""
for service in self._services:
service.join()
class Service(object):
"""Base class for workers that run on hosts."""
@@ -232,45 +293,54 @@ class Service(object):
logging.exception(_('model server went away'))
class WsgiService(object):
"""Base class for WSGI based services.
class WSGIService(object):
"""Provides ability to launch API from a 'paste' configuration."""
For each api you define, you must also define these flags:
:<api>_listen: The address on which to listen
:<api>_listen_port: The port on which to listen
def __init__(self, name, loader=None):
"""Initialize, but do not start the WSGI service.
"""
:param name: The name of the WSGI service given to the loader.
:param loader: Loads the WSGI application using the given name.
:returns: None
def __init__(self, conf, apis):
self.conf = conf
self.apis = apis
self.wsgi_app = None
"""
self.name = name
self.loader = loader or wsgi.Loader()
self.app = self.loader.load_app(name)
self.host = getattr(FLAGS, '%s_listen' % name, "0.0.0.0")
self.port = getattr(FLAGS, '%s_listen_port' % name, 0)
self.server = wsgi.Server(name,
self.app,
host=self.host,
port=self.port)
def start(self):
self.wsgi_app = _run_wsgi(self.conf, self.apis)
"""Start serving this service using loaded configuration.
Also, retrieve updated port number in case '0' was passed in, which
indicates a random port should be used.
:returns: None
"""
self.server.start()
self.port = self.server.port
def stop(self):
"""Stop serving this API.
:returns: None
"""
self.server.stop()
def wait(self):
self.wsgi_app.wait()
"""Wait for the service to stop serving this API.
def get_socket_info(self, api_name):
"""Returns the (host, port) that an API was started on."""
return self.wsgi_app.socket_info[api_name]
:returns: None
class ApiService(WsgiService):
"""Class for our nova-api service."""
@classmethod
def create(cls, conf=None):
if not conf:
conf = wsgi.paste_config_file(FLAGS.api_paste_config)
if not conf:
message = (_('No paste configuration found for: %s'),
FLAGS.api_paste_config)
raise exception.Error(message)
api_endpoints = ['ec2', 'osapi']
service = cls(conf, api_endpoints)
return service
"""
self.server.wait()
def serve(*services):
@@ -302,48 +372,3 @@ def serve(*services):
def wait():
while True:
greenthread.sleep(5)
def serve_wsgi(cls, conf=None):
try:
service = cls.create(conf)
except Exception:
logging.exception('in WsgiService.create()')
raise
finally:
# After we've loaded up all our dynamic bits, check
# whether we should print help
flags.DEFINE_flag(flags.HelpFlag())
flags.DEFINE_flag(flags.HelpshortFlag())
flags.DEFINE_flag(flags.HelpXMLFlag())
FLAGS.ParseNewFlags()
service.start()
return service
def _run_wsgi(paste_config_file, apis):
logging.debug(_('Using paste.deploy config at: %s'), paste_config_file)
apps = []
for api in apis:
config = wsgi.load_paste_configuration(paste_config_file, api)
if config is None:
logging.debug(_('No paste configuration for app: %s'), api)
continue
logging.debug(_('App Config: %(api)s\n%(config)r') % locals())
logging.info(_('Running %s API'), api)
app = wsgi.load_paste_app(paste_config_file, api)
apps.append((app,
getattr(FLAGS, '%s_listen_port' % api),
getattr(FLAGS, '%s_listen' % api),
api))
if len(apps) == 0:
logging.error(_('No known API applications configured in %s.'),
paste_config_file)
return
server = wsgi.Server()
for app in apps:
server.start(*app)
return server

View File

@@ -38,7 +38,6 @@ from nova import flags
from nova import rpc
from nova import utils
from nova import service
from nova import wsgi
from nova.virt import fake
@@ -81,7 +80,6 @@ class TestCase(unittest.TestCase):
self.injected = []
self._services = []
self._monkey_patch_attach()
self._monkey_patch_wsgi()
self._original_flags = FLAGS.FlagValuesDict()
rpc.ConnectionPool = rpc.Pool(max_size=FLAGS.rpc_conn_pool_size)
@@ -107,7 +105,6 @@ class TestCase(unittest.TestCase):
# Reset our monkey-patches
rpc.Consumer.attach_to_eventlet = self.original_attach
wsgi.Server.start = self.original_start
# Stop any timers
for x in self.injected:
@@ -163,26 +160,6 @@ class TestCase(unittest.TestCase):
_wrapped.func_name = self.original_attach.func_name
rpc.Consumer.attach_to_eventlet = _wrapped
def _monkey_patch_wsgi(self):
"""Allow us to kill servers spawned by wsgi.Server."""
self.original_start = wsgi.Server.start
@functools.wraps(self.original_start)
def _wrapped_start(inner_self, *args, **kwargs):
original_spawn_n = inner_self.pool.spawn_n
@functools.wraps(original_spawn_n)
def _wrapped_spawn_n(*args, **kwargs):
rv = greenthread.spawn(*args, **kwargs)
self._services.append(rv)
inner_self.pool.spawn_n = _wrapped_spawn_n
self.original_start(inner_self, *args, **kwargs)
inner_self.pool.spawn_n = original_spawn_n
_wrapped_start.func_name = self.original_start.func_name
wsgi.Server.start = _wrapped_start
# Useful assertions
def assertDictMatch(self, d1, d2, approx_equal=False, tolerance=0.001):
"""Assert two dicts are equivalent.

View File

@@ -50,7 +50,7 @@ def setup():
testdb = os.path.join(FLAGS.state_path, FLAGS.sqlite_db)
if os.path.exists(testdb):
os.unlink(testdb)
return
migration.db_sync()
ctxt = context.get_admin_context()
network_manager.VlanManager().create_networks(ctxt,

View File

@@ -0,0 +1,15 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.

View File

@@ -0,0 +1,186 @@
# Copyright 2011 Eldar Nugaev
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import stubout
import webob
from nova import context
from nova import db
from nova import test
from nova import network
from nova.tests.api.openstack import fakes
from nova.api.openstack.contrib.floating_ips import FloatingIPController
from nova.api.openstack.contrib.floating_ips import _translate_floating_ip_view
def network_api_get_floating_ip(self, context, id):
return {'id': 1, 'address': '10.10.10.10',
'fixed_ip': {'address': '11.0.0.1'}}
def network_api_list_floating_ips(self, context):
return [{'id': 1,
'address': '10.10.10.10',
'instance': {'id': 11},
'fixed_ip': {'address': '10.0.0.1'}},
{'id': 2,
'address': '10.10.10.11'}]
def network_api_allocate(self, context):
return '10.10.10.10'
def network_api_release(self, context, address):
pass
def network_api_associate(self, context, floating_ip, fixed_ip):
pass
def network_api_disassociate(self, context, floating_address):
pass
class FloatingIpTest(test.TestCase):
address = "10.10.10.10"
def _create_floating_ip(self):
"""Create a floating ip object."""
host = "fake_host"
return db.floating_ip_create(self.context,
{'address': self.address,
'host': host})
def _delete_floating_ip(self):
db.floating_ip_destroy(self.context, self.address)
def setUp(self):
super(FloatingIpTest, self).setUp()
self.controller = FloatingIPController()
self.stubs = stubout.StubOutForTesting()
fakes.FakeAuthManager.reset_fake_data()
fakes.FakeAuthDatabase.data = {}
fakes.stub_out_networking(self.stubs)
fakes.stub_out_rate_limiting(self.stubs)
fakes.stub_out_auth(self.stubs)
self.stubs.Set(network.api.API, "get_floating_ip",
network_api_get_floating_ip)
self.stubs.Set(network.api.API, "list_floating_ips",
network_api_list_floating_ips)
self.stubs.Set(network.api.API, "allocate_floating_ip",
network_api_allocate)
self.stubs.Set(network.api.API, "release_floating_ip",
network_api_release)
self.stubs.Set(network.api.API, "associate_floating_ip",
network_api_associate)
self.stubs.Set(network.api.API, "disassociate_floating_ip",
network_api_disassociate)
self.context = context.get_admin_context()
self._create_floating_ip()
def tearDown(self):
self.stubs.UnsetAll()
self._delete_floating_ip()
super(FloatingIpTest, self).tearDown()
def test_translate_floating_ip_view(self):
floating_ip_address = self._create_floating_ip()
floating_ip = db.floating_ip_get_by_address(self.context,
floating_ip_address)
view = _translate_floating_ip_view(floating_ip)
self.assertTrue('floating_ip' in view)
self.assertTrue(view['floating_ip']['id'])
self.assertEqual(view['floating_ip']['ip'], self.address)
self.assertEqual(view['floating_ip']['fixed_ip'], None)
self.assertEqual(view['floating_ip']['instance_id'], None)
def test_floating_ips_list(self):
req = webob.Request.blank('/v1.1/os-floating-ips')
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 200)
res_dict = json.loads(res.body)
response = {'floating_ips': [{'floating_ip': {'instance_id': 11,
'ip': '10.10.10.10',
'fixed_ip': '10.0.0.1',
'id': 1}},
{'floating_ip': {'instance_id': None,
'ip': '10.10.10.11',
'fixed_ip': None,
'id': 2}}]}
self.assertEqual(res_dict, response)
def test_floating_ip_show(self):
req = webob.Request.blank('/v1.1/os-floating-ips/1')
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 200)
res_dict = json.loads(res.body)
self.assertEqual(res_dict['floating_ip']['id'], 1)
self.assertEqual(res_dict['floating_ip']['ip'], '10.10.10.10')
self.assertEqual(res_dict['floating_ip']['fixed_ip'], '11.0.0.1')
self.assertEqual(res_dict['floating_ip']['instance_id'], None)
def test_floating_ip_allocate(self):
req = webob.Request.blank('/v1.1/os-floating-ips')
req.method = 'POST'
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 200)
ip = json.loads(res.body)['allocated']
expected = {
"id": 1,
"floating_ip": '10.10.10.10'}
self.assertEqual(ip, expected)
def test_floating_ip_release(self):
req = webob.Request.blank('/v1.1/os-floating-ips/1')
req.method = 'DELETE'
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 200)
actual = json.loads(res.body)['released']
expected = {
"id": 1,
"floating_ip": '10.10.10.10'}
self.assertEqual(actual, expected)
def test_floating_ip_associate(self):
body = dict(associate_address=dict(fixed_ip='1.2.3.4'))
req = webob.Request.blank('/v1.1/os-floating-ips/1/associate')
req.method = 'POST'
req.body = json.dumps(body)
req.headers["content-type"] = "application/json"
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 200)
actual = json.loads(res.body)['associated']
expected = {
"floating_ip_id": '1',
"floating_ip": "10.10.10.10",
"fixed_ip": "1.2.3.4"}
self.assertEqual(actual, expected)
def test_floating_ip_disassociate(self):
req = webob.Request.blank('/v1.1/os-floating-ips/1/disassociate')
req.method = 'POST'
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 200)
ip = json.loads(res.body)['disassociated']
expected = {
"floating_ip": '10.10.10.10',
"fixed_ip": '11.0.0.1'}
self.assertEqual(ip, expected)

View File

@@ -0,0 +1,198 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 University of Southern California
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import stubout
import unittest
import webob
import os.path
from nova import flags
from nova.api import openstack
from nova.api.openstack import auth
from nova.api.openstack import extensions
from nova.tests.api.openstack import fakes
import nova.wsgi
FLAGS = flags.FLAGS
def return_create_flavor_extra_specs(context, flavor_id, extra_specs):
return stub_flavor_extra_specs()
def return_flavor_extra_specs(context, flavor_id):
return stub_flavor_extra_specs()
def return_flavor_extra_specs(context, flavor_id):
return stub_flavor_extra_specs()
def return_empty_flavor_extra_specs(context, flavor_id):
return {}
def delete_flavor_extra_specs(context, flavor_id, key):
pass
def stub_flavor_extra_specs():
specs = {
"key1": "value1",
"key2": "value2",
"key3": "value3",
"key4": "value4",
"key5": "value5"}
return specs
class FlavorsExtraSpecsTest(unittest.TestCase):
def setUp(self):
super(FlavorsExtraSpecsTest, self).setUp()
FLAGS.osapi_extensions_path = os.path.join(os.path.dirname(__file__),
"extensions")
self.stubs = stubout.StubOutForTesting()
fakes.FakeAuthManager.auth_data = {}
fakes.FakeAuthDatabase.data = {}
fakes.stub_out_auth(self.stubs)
fakes.stub_out_key_pair_funcs(self.stubs)
self.mware = auth.AuthMiddleware(
extensions.ExtensionMiddleware(
openstack.APIRouterV11()))
def tearDown(self):
self.stubs.UnsetAll()
super(FlavorsExtraSpecsTest, self).tearDown()
def test_index(self):
self.stubs.Set(nova.db.api, 'instance_type_extra_specs_get',
return_flavor_extra_specs)
request = webob.Request.blank('/flavors/1/os-extra_specs')
res = request.get_response(self.mware)
self.assertEqual(200, res.status_int)
res_dict = json.loads(res.body)
self.assertEqual('application/json', res.headers['Content-Type'])
self.assertEqual('value1', res_dict['extra_specs']['key1'])
def test_index_no_data(self):
self.stubs.Set(nova.db.api, 'instance_type_extra_specs_get',
return_empty_flavor_extra_specs)
req = webob.Request.blank('/flavors/1/os-extra_specs')
res = req.get_response(self.mware)
res_dict = json.loads(res.body)
self.assertEqual(200, res.status_int)
self.assertEqual('application/json', res.headers['Content-Type'])
self.assertEqual(0, len(res_dict['extra_specs']))
def test_show(self):
self.stubs.Set(nova.db.api, 'instance_type_extra_specs_get',
return_flavor_extra_specs)
req = webob.Request.blank('/flavors/1/os-extra_specs/key5')
res = req.get_response(self.mware)
self.assertEqual(200, res.status_int)
res_dict = json.loads(res.body)
self.assertEqual('application/json', res.headers['Content-Type'])
self.assertEqual('value5', res_dict['key5'])
def test_show_spec_not_found(self):
self.stubs.Set(nova.db.api, 'instance_type_extra_specs_get',
return_empty_flavor_extra_specs)
req = webob.Request.blank('/flavors/1/os-extra_specs/key6')
res = req.get_response(self.mware)
res_dict = json.loads(res.body)
self.assertEqual(404, res.status_int)
def test_delete(self):
self.stubs.Set(nova.db.api, 'instance_type_extra_specs_delete',
delete_flavor_extra_specs)
req = webob.Request.blank('/flavors/1/os-extra_specs/key5')
req.method = 'DELETE'
res = req.get_response(self.mware)
self.assertEqual(200, res.status_int)
def test_create(self):
self.stubs.Set(nova.db.api,
'instance_type_extra_specs_update_or_create',
return_create_flavor_extra_specs)
req = webob.Request.blank('/flavors/1/os-extra_specs')
req.method = 'POST'
req.body = '{"extra_specs": {"key1": "value1"}}'
req.headers["content-type"] = "application/json"
res = req.get_response(self.mware)
res_dict = json.loads(res.body)
self.assertEqual(200, res.status_int)
self.assertEqual('application/json', res.headers['Content-Type'])
self.assertEqual('value1', res_dict['extra_specs']['key1'])
def test_create_empty_body(self):
self.stubs.Set(nova.db.api,
'instance_type_extra_specs_update_or_create',
return_create_flavor_extra_specs)
req = webob.Request.blank('/flavors/1/os-extra_specs')
req.method = 'POST'
req.headers["content-type"] = "application/json"
res = req.get_response(self.mware)
self.assertEqual(400, res.status_int)
def test_update_item(self):
self.stubs.Set(nova.db.api,
'instance_type_extra_specs_update_or_create',
return_create_flavor_extra_specs)
req = webob.Request.blank('/flavors/1/os-extra_specs/key1')
req.method = 'PUT'
req.body = '{"key1": "value1"}'
req.headers["content-type"] = "application/json"
res = req.get_response(self.mware)
self.assertEqual(200, res.status_int)
self.assertEqual('application/json', res.headers['Content-Type'])
res_dict = json.loads(res.body)
self.assertEqual('value1', res_dict['key1'])
def test_update_item_empty_body(self):
self.stubs.Set(nova.db.api,
'instance_type_extra_specs_update_or_create',
return_create_flavor_extra_specs)
req = webob.Request.blank('/flavors/1/os-extra_specs/key1')
req.method = 'PUT'
req.headers["content-type"] = "application/json"
res = req.get_response(self.mware)
self.assertEqual(400, res.status_int)
def test_update_item_too_many_keys(self):
self.stubs.Set(nova.db.api,
'instance_type_extra_specs_update_or_create',
return_create_flavor_extra_specs)
req = webob.Request.blank('/flavors/1/os-extra_specs/key1')
req.method = 'PUT'
req.body = '{"key1": "value1", "key2": "value2"}'
req.headers["content-type"] = "application/json"
res = req.get_response(self.mware)
self.assertEqual(400, res.status_int)
def test_update_item_body_uri_mismatch(self):
self.stubs.Set(nova.db.api,
'instance_type_extra_specs_update_or_create',
return_create_flavor_extra_specs)
req = webob.Request.blank('/flavors/1/os-extra_specs/bad')
req.method = 'PUT'
req.body = '{"key1": "value1"}'
req.headers["content-type"] = "application/json"
res = req.get_response(self.mware)
self.assertEqual(400, res.status_int)

View File

@@ -16,7 +16,6 @@
# under the License.
import copy
import json
import random
import string
@@ -29,11 +28,11 @@ from glance.common import exception as glance_exc
from nova import context
from nova import exception as exc
from nova import flags
from nova import utils
import nova.api.openstack.auth
from nova.api import openstack
from nova.api.openstack import auth
from nova.api.openstack import extensions
from nova.api.openstack import versions
from nova.api.openstack import limits
from nova.auth.manager import User, Project
@@ -82,7 +81,8 @@ def wsgi_app(inner_app10=None, inner_app11=None):
api10 = openstack.FaultWrapper(auth.AuthMiddleware(
limits.RateLimitingMiddleware(inner_app10)))
api11 = openstack.FaultWrapper(auth.AuthMiddleware(
limits.RateLimitingMiddleware(inner_app11)))
limits.RateLimitingMiddleware(
extensions.ExtensionMiddleware(inner_app11))))
mapper['/v1.0'] = api10
mapper['/v1.1'] = api11
mapper['/'] = openstack.FaultWrapper(versions.Versions())
@@ -140,9 +140,10 @@ def stub_out_networking(stubs):
def stub_out_compute_api_snapshot(stubs):
def snapshot(self, context, instance_id, name):
return dict(id='123', status='ACTIVE',
properties=dict(instance_id='123'))
def snapshot(self, context, instance_id, name, extra_properties=None):
props = dict(instance_id=instance_id, instance_ref=instance_id)
props.update(extra_properties or {})
return dict(id='123', status='ACTIVE', name=name, properties=props)
stubs.Set(nova.compute.API, 'snapshot', snapshot)

View File

@@ -19,6 +19,7 @@ import json
import stubout
import unittest
import webob
import xml.dom.minidom as minidom
from nova import flags
@@ -37,6 +38,7 @@ class ImageMetaDataTest(unittest.TestCase):
'name': 'image1',
'deleted': False,
'container_format': None,
'checksum': None,
'created_at': '2011-03-22T17:40:15',
'disk_format': None,
'updated_at': '2011-03-22T17:40:15',
@@ -52,6 +54,7 @@ class ImageMetaDataTest(unittest.TestCase):
'name': 'image2',
'deleted': False,
'container_format': None,
'checksum': None,
'created_at': '2011-03-22T17:40:15',
'disk_format': None,
'updated_at': '2011-03-22T17:40:15',
@@ -67,6 +70,7 @@ class ImageMetaDataTest(unittest.TestCase):
'name': 'image3',
'deleted': False,
'container_format': None,
'checksum': None,
'created_at': '2011-03-22T17:40:15',
'disk_format': None,
'updated_at': '2011-03-22T17:40:15',
@@ -103,7 +107,34 @@ class ImageMetaDataTest(unittest.TestCase):
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(200, res.status_int)
self.assertEqual('value1', res_dict['metadata']['key1'])
expected = self.IMAGE_FIXTURES[0]['properties']
self.assertEqual(len(expected), len(res_dict['metadata']))
for (key, value) in res_dict['metadata'].items():
self.assertEqual(value, res_dict['metadata'][key])
def test_index_xml(self):
serializer = openstack.image_metadata.ImageMetadataXMLSerializer()
fixture = {
'metadata': {
'one': 'two',
'three': 'four',
},
}
output = serializer.index(fixture)
actual = minidom.parseString(output.replace(" ", ""))
expected = minidom.parseString("""
<metadata xmlns="http://docs.openstack.org/compute/api/v1.1">
<meta key="three">
four
</meta>
<meta key="one">
two
</meta>
</metadata>
""".replace(" ", ""))
self.assertEqual(expected.toxml(), actual.toxml())
def test_show(self):
req = webob.Request.blank('/v1.1/images/1/meta/key1')
@@ -111,13 +142,32 @@ class ImageMetaDataTest(unittest.TestCase):
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(200, res.status_int)
self.assertEqual('value1', res_dict['key1'])
self.assertTrue('meta' in res_dict)
self.assertEqual(len(res_dict['meta']), 1)
self.assertEqual('value1', res_dict['meta']['key1'])
def test_show_xml(self):
serializer = openstack.image_metadata.ImageMetadataXMLSerializer()
fixture = {
'meta': {
'one': 'two',
},
}
output = serializer.show(fixture)
actual = minidom.parseString(output.replace(" ", ""))
expected = minidom.parseString("""
<meta xmlns="http://docs.openstack.org/compute/api/v1.1" key="one">
two
</meta>
""".replace(" ", ""))
self.assertEqual(expected.toxml(), actual.toxml())
def test_show_not_found(self):
req = webob.Request.blank('/v1.1/images/1/meta/key9')
req.environ['api.version'] = '1.1'
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(404, res.status_int)
def test_create(self):
@@ -135,22 +185,79 @@ class ImageMetaDataTest(unittest.TestCase):
self.assertEqual('value2', res_dict['metadata']['key2'])
self.assertEqual(1, len(res_dict))
def test_create_xml(self):
serializer = openstack.image_metadata.ImageMetadataXMLSerializer()
fixture = {
'metadata': {
'key9': 'value9',
'key2': 'value2',
'key1': 'value1',
},
}
output = serializer.create(fixture)
actual = minidom.parseString(output.replace(" ", ""))
expected = minidom.parseString("""
<metadata xmlns="http://docs.openstack.org/compute/api/v1.1">
<meta key="key2">
value2
</meta>
<meta key="key9">
value9
</meta>
<meta key="key1">
value1
</meta>
</metadata>
""".replace(" ", ""))
self.assertEqual(expected.toxml(), actual.toxml())
def test_update_item(self):
req = webob.Request.blank('/v1.1/images/1/meta/key1')
req.environ['api.version'] = '1.1'
req.method = 'PUT'
req.body = '{"meta": {"key1": "zz"}}'
req.headers["content-type"] = "application/json"
res = req.get_response(fakes.wsgi_app())
self.assertEqual(200, res.status_int)
res_dict = json.loads(res.body)
self.assertTrue('meta' in res_dict)
self.assertEqual(len(res_dict['meta']), 1)
self.assertEqual('zz', res_dict['meta']['key1'])
def test_update_item_bad_body(self):
req = webob.Request.blank('/v1.1/images/1/meta/key1')
req.environ['api.version'] = '1.1'
req.method = 'PUT'
req.body = '{"key1": "zz"}'
req.headers["content-type"] = "application/json"
res = req.get_response(fakes.wsgi_app())
self.assertEqual(200, res.status_int)
res_dict = json.loads(res.body)
self.assertEqual('zz', res_dict['key1'])
self.assertEqual(400, res.status_int)
def test_update_item_xml(self):
serializer = openstack.image_metadata.ImageMetadataXMLSerializer()
fixture = {
'meta': {
'one': 'two',
},
}
output = serializer.update(fixture)
actual = minidom.parseString(output.replace(" ", ""))
expected = minidom.parseString("""
<meta xmlns="http://docs.openstack.org/compute/api/v1.1" key="one">
two
</meta>
""".replace(" ", ""))
self.assertEqual(expected.toxml(), actual.toxml())
def test_update_item_too_many_keys(self):
req = webob.Request.blank('/v1.1/images/1/meta/key1')
req.environ['api.version'] = '1.1'
req.method = 'PUT'
req.body = '{"key1": "value1", "key2": "value2"}'
req.body = '{"meta": {"key1": "value1", "key2": "value2"}}'
req.headers["content-type"] = "application/json"
res = req.get_response(fakes.wsgi_app())
self.assertEqual(400, res.status_int)
@@ -159,7 +266,7 @@ class ImageMetaDataTest(unittest.TestCase):
req = webob.Request.blank('/v1.1/images/1/meta/bad')
req.environ['api.version'] = '1.1'
req.method = 'PUT'
req.body = '{"key1": "value1"}'
req.body = '{"meta": {"key1": "value1"}}'
req.headers["content-type"] = "application/json"
res = req.get_response(fakes.wsgi_app())
self.assertEqual(400, res.status_int)
@@ -195,7 +302,7 @@ class ImageMetaDataTest(unittest.TestCase):
req = webob.Request.blank('/v1.1/images/3/meta/blah')
req.environ['api.version'] = '1.1'
req.method = 'PUT'
req.body = '{"blah": "blah"}'
req.body = '{"meta": {"blah": "blah"}}'
req.headers["content-type"] = "application/json"
res = req.get_response(fakes.wsgi_app())
self.assertEqual(400, res.status_int)

View File

@@ -618,7 +618,6 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
{
'id': 124,
'name': 'queued backup',
'serverId': 42,
'updated': self.NOW_API_FORMAT,
'created': self.NOW_API_FORMAT,
'status': 'QUEUED',
@@ -626,7 +625,6 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
{
'id': 125,
'name': 'saving backup',
'serverId': 42,
'updated': self.NOW_API_FORMAT,
'created': self.NOW_API_FORMAT,
'status': 'SAVING',
@@ -635,7 +633,6 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
{
'id': 126,
'name': 'active backup',
'serverId': 42,
'updated': self.NOW_API_FORMAT,
'created': self.NOW_API_FORMAT,
'status': 'ACTIVE'
@@ -643,7 +640,6 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
{
'id': 127,
'name': 'killed backup',
'serverId': 42,
'updated': self.NOW_API_FORMAT,
'created': self.NOW_API_FORMAT,
'status': 'FAILED',
@@ -689,7 +685,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
{
'id': 124,
'name': 'queued backup',
'serverRef': "http://localhost/v1.1/servers/42",
'serverRef': "http://localhost:8774/v1.1/servers/42",
'updated': self.NOW_API_FORMAT,
'created': self.NOW_API_FORMAT,
'status': 'QUEUED',
@@ -711,7 +707,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
{
'id': 125,
'name': 'saving backup',
'serverRef': "http://localhost/v1.1/servers/42",
'serverRef': "http://localhost:8774/v1.1/servers/42",
'updated': self.NOW_API_FORMAT,
'created': self.NOW_API_FORMAT,
'status': 'SAVING',
@@ -734,7 +730,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
{
'id': 126,
'name': 'active backup',
'serverRef': "http://localhost/v1.1/servers/42",
'serverRef': "http://localhost:8774/v1.1/servers/42",
'updated': self.NOW_API_FORMAT,
'created': self.NOW_API_FORMAT,
'status': 'ACTIVE',
@@ -756,7 +752,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
{
'id': 127,
'name': 'killed backup',
'serverRef': "http://localhost/v1.1/servers/42",
'serverRef': "http://localhost:8774/v1.1/servers/42",
'updated': self.NOW_API_FORMAT,
'created': self.NOW_API_FORMAT,
'status': 'FAILED',
@@ -1002,6 +998,30 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
response = req.get_response(fakes.wsgi_app())
self.assertEqual(200, response.status_int)
def test_create_image_v1_1_actual_server_ref(self):
serverRef = 'http://localhost/v1.1/servers/1'
body = dict(image=dict(serverRef=serverRef, name='Backup 1'))
req = webob.Request.blank('/v1.1/images')
req.method = 'POST'
req.body = json.dumps(body)
req.headers["content-type"] = "application/json"
response = req.get_response(fakes.wsgi_app())
self.assertEqual(200, response.status_int)
result = json.loads(response.body)
self.assertEqual(result['image']['serverRef'], serverRef)
def test_create_image_v1_1_server_ref_bad_hostname(self):
serverRef = 'http://asdf/v1.1/servers/1'
body = dict(image=dict(serverRef=serverRef, name='Backup 1'))
req = webob.Request.blank('/v1.1/images')
req.method = 'POST'
req.body = json.dumps(body)
req.headers["content-type"] = "application/json"
response = req.get_response(fakes.wsgi_app())
self.assertEqual(400, response.status_int)
def test_create_image_v1_1_xml_serialization(self):
body = dict(image=dict(serverRef='123', name='Backup 1'))
@@ -1018,7 +1038,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
<image
created="None"
id="123"
name="None"
name="Backup 1"
serverRef="http://localhost/v1.1/servers/123"
status="ACTIVE"
updated="None"
@@ -1065,7 +1085,8 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
image_id += 1
# Backup for User 1
backup_properties = {'instance_id': '42', 'user_id': '1'}
server_ref = 'http://localhost:8774/v1.1/servers/42'
backup_properties = {'instance_ref': server_ref, 'user_id': '1'}
for status in ('queued', 'saving', 'active', 'killed'):
add_fixture(id=image_id, name='%s backup' % status,
is_public=False, status=status,

View File

@@ -171,16 +171,10 @@ class _IntegratedTestBase(test.TestCase):
self.api = self.user.openstack_api
def _start_api_service(self):
api_service = service.ApiService.create()
api_service.start()
if not api_service:
raise Exception("API Service was None")
self.api_service = api_service
host, port = api_service.get_socket_info('osapi')
self.auth_url = 'http://%s:%s/v1.1' % (host, port)
osapi = service.WSGIService("osapi")
osapi.start()
self.auth_url = 'http://%s:%s/v1.1' % (osapi.host, osapi.port)
LOG.warn(self.auth_url)
def tearDown(self):
self.context.cleanup()

View File

@@ -18,7 +18,7 @@
"""
Base class of Unit Tests for all network models
"""
import IPy
import netaddr
import os
from nova import context

View File

@@ -67,7 +67,18 @@ class HostFilterTestCase(test.TestCase):
flavorid=1,
swap=500,
rxtx_quota=30000,
rxtx_cap=200)
rxtx_cap=200,
extra_specs={})
self.gpu_instance_type = dict(name='tiny.gpu',
memory_mb=50,
vcpus=10,
local_gb=500,
flavorid=2,
swap=500,
rxtx_quota=30000,
rxtx_cap=200,
extra_specs={'xpu_arch': 'fermi',
'xpu_info': 'Tesla 2050'})
self.zone_manager = FakeZoneManager()
states = {}
@@ -75,6 +86,18 @@ class HostFilterTestCase(test.TestCase):
states['host%02d' % (x + 1)] = {'compute': self._host_caps(x)}
self.zone_manager.service_states = states
# Add some extra capabilities to some hosts
host07 = self.zone_manager.service_states['host07']['compute']
host07['xpu_arch'] = 'fermi'
host07['xpu_info'] = 'Tesla 2050'
host08 = self.zone_manager.service_states['host08']['compute']
host08['xpu_arch'] = 'radeon'
host09 = self.zone_manager.service_states['host09']['compute']
host09['xpu_arch'] = 'fermi'
host09['xpu_info'] = 'Tesla 2150'
def tearDown(self):
FLAGS.default_host_filter = self.old_flag
@@ -116,6 +139,17 @@ class HostFilterTestCase(test.TestCase):
self.assertEquals('host05', just_hosts[0])
self.assertEquals('host10', just_hosts[5])
def test_instance_type_filter_extra_specs(self):
hf = host_filter.InstanceTypeFilter()
# filter all hosts that can support 50 ram and 500 disk
name, cooked = hf.instance_type_to_filter(self.gpu_instance_type)
self.assertEquals('nova.scheduler.host_filter.InstanceTypeFilter',
name)
hosts = hf.filter_hosts(self.zone_manager, cooked)
self.assertEquals(1, len(hosts))
just_hosts = [host for host, caps in hosts]
self.assertEquals('host07', just_hosts[0])
def test_json_filter(self):
hf = host_filter.JsonFilter()
# filter all hosts that can support 50 ram and 500 disk

111
nova/tests/test_adminapi.py Normal file
View File

@@ -0,0 +1,111 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from eventlet import greenthread
from nova import context
from nova import db
from nova import flags
from nova import log as logging
from nova import rpc
from nova import test
from nova import utils
from nova.auth import manager
from nova.api.ec2 import admin
from nova.image import fake
FLAGS = flags.FLAGS
LOG = logging.getLogger('nova.tests.adminapi')
class AdminApiTestCase(test.TestCase):
def setUp(self):
super(AdminApiTestCase, self).setUp()
self.flags(connection_type='fake')
self.conn = rpc.Connection.instance()
# set up our cloud
self.api = admin.AdminController()
# set up services
self.compute = self.start_service('compute')
self.scheduter = self.start_service('scheduler')
self.network = self.start_service('network')
self.volume = self.start_service('volume')
self.image_service = utils.import_object(FLAGS.image_service)
self.manager = manager.AuthManager()
self.user = self.manager.create_user('admin', 'admin', 'admin', True)
self.project = self.manager.create_project('proj', 'admin', 'proj')
self.context = context.RequestContext(user=self.user,
project=self.project)
host = self.network.get_network_host(self.context.elevated())
def fake_show(meh, context, id):
return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1,
'type': 'machine', 'image_state': 'available'}}
self.stubs.Set(fake._FakeImageService, 'show', fake_show)
self.stubs.Set(fake._FakeImageService, 'show_by_name', fake_show)
# NOTE(vish): set up a manual wait so rpc.cast has a chance to finish
rpc_cast = rpc.cast
def finish_cast(*args, **kwargs):
rpc_cast(*args, **kwargs)
greenthread.sleep(0.2)
self.stubs.Set(rpc, 'cast', finish_cast)
def tearDown(self):
network_ref = db.project_get_network(self.context,
self.project.id)
db.network_disassociate(self.context, network_ref['id'])
self.manager.delete_project(self.project)
self.manager.delete_user(self.user)
super(AdminApiTestCase, self).tearDown()
def test_block_external_ips(self):
"""Make sure provider firewall rules are created."""
result = self.api.block_external_addresses(self.context, '1.1.1.1/32')
self.api.remove_external_address_block(self.context, '1.1.1.1/32')
self.assertEqual('OK', result['status'])
self.assertEqual('Added 3 rules', result['message'])
def test_list_blocked_ips(self):
"""Make sure we can see the external blocks that exist."""
self.api.block_external_addresses(self.context, '1.1.1.2/32')
result = self.api.describe_external_address_blocks(self.context)
num = len(db.provider_fw_rule_get_all(self.context))
self.api.remove_external_address_block(self.context, '1.1.1.2/32')
# we only list IP, not tcp/udp/icmp rules
self.assertEqual(num / 3, len(result['externalIpBlockInfo']))
def test_remove_ip_block(self):
"""Remove ip blocks."""
result = self.api.block_external_addresses(self.context, '1.1.1.3/32')
self.assertEqual('OK', result['status'])
num0 = len(db.provider_fw_rule_get_all(self.context))
result = self.api.remove_external_address_block(self.context,
'1.1.1.3/32')
self.assertEqual('OK', result['status'])
self.assertEqual('Deleted 3 rules', result['message'])
num1 = len(db.provider_fw_rule_get_all(self.context))
self.assert_(num1 < num0)

View File

@@ -25,6 +25,7 @@ from nova import log as logging
from nova import test
from nova.auth import manager
from nova.api.ec2 import cloud
from nova.auth import fakeldap
FLAGS = flags.FLAGS
LOG = logging.getLogger('nova.tests.auth_unittest')
@@ -369,6 +370,15 @@ class _AuthManagerBaseTestCase(test.TestCase):
class AuthManagerLdapTestCase(_AuthManagerBaseTestCase):
auth_driver = 'nova.auth.ldapdriver.FakeLdapDriver'
def test_reconnect_on_server_failure(self):
self.manager.get_users()
fakeldap.server_fail = True
try:
self.assertRaises(fakeldap.SERVER_DOWN, self.manager.get_users)
finally:
fakeldap.server_fail = False
self.manager.get_users()
class AuthManagerDbTestCase(_AuthManagerBaseTestCase):
auth_driver = 'nova.auth.dbdriver.DbDriver'

View File

@@ -37,6 +37,7 @@ from nova import log as logging
from nova import rpc
from nova import test
from nova import utils
from nova.notifier import test_notifier
LOG = logging.getLogger('nova.tests.compute')
FLAGS = flags.FLAGS
@@ -62,6 +63,7 @@ class ComputeTestCase(test.TestCase):
super(ComputeTestCase, self).setUp()
self.flags(connection_type='fake',
stub_network=True,
notification_driver='nova.notifier.test_notifier',
network_manager='nova.network.manager.FlatManager')
self.compute = utils.import_object(FLAGS.compute_manager)
self.compute_api = compute.API()
@@ -69,6 +71,7 @@ class ComputeTestCase(test.TestCase):
self.user = self.manager.create_user('fake', 'fake', 'fake')
self.project = self.manager.create_project('fake', 'fake', 'fake')
self.context = context.RequestContext('fake', 'fake', False)
test_notifier.NOTIFICATIONS = []
def fake_show(meh, context, id):
return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1}}
@@ -327,6 +330,50 @@ class ComputeTestCase(test.TestCase):
self.assert_(console)
self.compute.terminate_instance(self.context, instance_id)
def test_run_instance_usage_notification(self):
"""Ensure run instance generates apropriate usage notification"""
instance_id = self._create_instance()
self.compute.run_instance(self.context, instance_id)
self.assertEquals(len(test_notifier.NOTIFICATIONS), 1)
msg = test_notifier.NOTIFICATIONS[0]
self.assertEquals(msg['priority'], 'INFO')
self.assertEquals(msg['event_type'], 'compute.instance.create')
payload = msg['payload']
self.assertEquals(payload['tenant_id'], self.project.id)
self.assertEquals(payload['user_id'], self.user.id)
self.assertEquals(payload['instance_id'], instance_id)
self.assertEquals(payload['instance_type'], 'm1.tiny')
type_id = instance_types.get_instance_type_by_name('m1.tiny')['id']
self.assertEquals(str(payload['instance_type_id']), str(type_id))
self.assertTrue('display_name' in payload)
self.assertTrue('created_at' in payload)
self.assertTrue('launched_at' in payload)
self.assertEquals(payload['image_ref'], '1')
self.compute.terminate_instance(self.context, instance_id)
def test_terminate_usage_notification(self):
"""Ensure terminate_instance generates apropriate usage notification"""
instance_id = self._create_instance()
self.compute.run_instance(self.context, instance_id)
test_notifier.NOTIFICATIONS = []
self.compute.terminate_instance(self.context, instance_id)
self.assertEquals(len(test_notifier.NOTIFICATIONS), 1)
msg = test_notifier.NOTIFICATIONS[0]
self.assertEquals(msg['priority'], 'INFO')
self.assertEquals(msg['event_type'], 'compute.instance.delete')
payload = msg['payload']
self.assertEquals(payload['tenant_id'], self.project.id)
self.assertEquals(payload['user_id'], self.user.id)
self.assertEquals(payload['instance_id'], instance_id)
self.assertEquals(payload['instance_type'], 'm1.tiny')
type_id = instance_types.get_instance_type_by_name('m1.tiny')['id']
self.assertEquals(str(payload['instance_type_id']), str(type_id))
self.assertTrue('display_name' in payload)
self.assertTrue('created_at' in payload)
self.assertTrue('launched_at' in payload)
self.assertEquals(payload['image_ref'], '1')
def test_run_instance_existing(self):
"""Ensure failure when running an instance that already exists"""
instance_id = self._create_instance()
@@ -378,6 +425,36 @@ class ComputeTestCase(test.TestCase):
self.compute.terminate_instance(self.context, instance_id)
def test_resize_instance_notification(self):
"""Ensure notifications on instance migrate/resize"""
instance_id = self._create_instance()
context = self.context.elevated()
self.compute.run_instance(self.context, instance_id)
test_notifier.NOTIFICATIONS = []
db.instance_update(self.context, instance_id, {'host': 'foo'})
self.compute.prep_resize(context, instance_id, 1)
migration_ref = db.migration_get_by_instance_and_status(context,
instance_id, 'pre-migrating')
self.assertEquals(len(test_notifier.NOTIFICATIONS), 1)
msg = test_notifier.NOTIFICATIONS[0]
self.assertEquals(msg['priority'], 'INFO')
self.assertEquals(msg['event_type'], 'compute.instance.resize.prep')
payload = msg['payload']
self.assertEquals(payload['tenant_id'], self.project.id)
self.assertEquals(payload['user_id'], self.user.id)
self.assertEquals(payload['instance_id'], instance_id)
self.assertEquals(payload['instance_type'], 'm1.tiny')
type_id = instance_types.get_instance_type_by_name('m1.tiny')['id']
self.assertEquals(str(payload['instance_type_id']), str(type_id))
self.assertTrue('display_name' in payload)
self.assertTrue('created_at' in payload)
self.assertTrue('launched_at' in payload)
self.assertEquals(payload['image_ref'], '1')
self.compute.terminate_instance(context, instance_id)
def test_resize_instance(self):
"""Ensure instance can be migrated/resized"""
instance_id = self._create_instance()

View File

@@ -18,7 +18,7 @@
"""
Unit Tests for flat network code
"""
import IPy
import netaddr
import os
import unittest
@@ -45,8 +45,8 @@ class FlatNetworkTestCase(base.NetworkTestCase):
self.context._project = self.projects[0]
self.context.project_id = self.projects[0].id
pubnet = IPy.IP(flags.FLAGS.floating_range)
address = str(pubnet[0])
pubnet = netaddr.IPRange(flags.FLAGS.floating_range)
address = str(list(pubnet)[0])
try:
db.floating_ip_get_by_address(context.get_admin_context(), address)
except exception.NotFound:

View File

@@ -67,7 +67,8 @@ class HostFilterTestCase(test.TestCase):
flavorid=1,
swap=500,
rxtx_quota=30000,
rxtx_cap=200)
rxtx_cap=200,
extra_specs={})
self.zone_manager = FakeZoneManager()
states = {}

View File

@@ -0,0 +1,165 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 University of Southern California
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Unit Tests for instance types extra specs code
"""
from nova import context
from nova import db
from nova import test
from nova.db.sqlalchemy.session import get_session
from nova.db.sqlalchemy import models
class InstanceTypeExtraSpecsTestCase(test.TestCase):
def setUp(self):
super(InstanceTypeExtraSpecsTestCase, self).setUp()
self.context = context.get_admin_context()
values = dict(name="cg1.4xlarge",
memory_mb=22000,
vcpus=8,
local_gb=1690,
flavorid=105)
specs = dict(cpu_arch="x86_64",
cpu_model="Nehalem",
xpu_arch="fermi",
xpus=2,
xpu_model="Tesla 2050")
values['extra_specs'] = specs
ref = db.api.instance_type_create(self.context,
values)
self.instance_type_id = ref.id
def tearDown(self):
# Remove the instance type from the database
db.api.instance_type_purge(context.get_admin_context(), "cg1.4xlarge")
super(InstanceTypeExtraSpecsTestCase, self).tearDown()
def test_instance_type_specs_get(self):
expected_specs = dict(cpu_arch="x86_64",
cpu_model="Nehalem",
xpu_arch="fermi",
xpus="2",
xpu_model="Tesla 2050")
actual_specs = db.api.instance_type_extra_specs_get(
context.get_admin_context(),
self.instance_type_id)
self.assertEquals(expected_specs, actual_specs)
def test_instance_type_extra_specs_delete(self):
expected_specs = dict(cpu_arch="x86_64",
cpu_model="Nehalem",
xpu_arch="fermi",
xpus="2")
db.api.instance_type_extra_specs_delete(context.get_admin_context(),
self.instance_type_id,
"xpu_model")
actual_specs = db.api.instance_type_extra_specs_get(
context.get_admin_context(),
self.instance_type_id)
self.assertEquals(expected_specs, actual_specs)
def test_instance_type_extra_specs_update(self):
expected_specs = dict(cpu_arch="x86_64",
cpu_model="Sandy Bridge",
xpu_arch="fermi",
xpus="2",
xpu_model="Tesla 2050")
db.api.instance_type_extra_specs_update_or_create(
context.get_admin_context(),
self.instance_type_id,
dict(cpu_model="Sandy Bridge"))
actual_specs = db.api.instance_type_extra_specs_get(
context.get_admin_context(),
self.instance_type_id)
self.assertEquals(expected_specs, actual_specs)
def test_instance_type_extra_specs_create(self):
expected_specs = dict(cpu_arch="x86_64",
cpu_model="Nehalem",
xpu_arch="fermi",
xpus="2",
xpu_model="Tesla 2050",
net_arch="ethernet",
net_mbps="10000")
db.api.instance_type_extra_specs_update_or_create(
context.get_admin_context(),
self.instance_type_id,
dict(net_arch="ethernet",
net_mbps=10000))
actual_specs = db.api.instance_type_extra_specs_get(
context.get_admin_context(),
self.instance_type_id)
self.assertEquals(expected_specs, actual_specs)
def test_instance_type_get_by_id_with_extra_specs(self):
instance_type = db.api.instance_type_get_by_id(
context.get_admin_context(),
self.instance_type_id)
self.assertEquals(instance_type['extra_specs'],
dict(cpu_arch="x86_64",
cpu_model="Nehalem",
xpu_arch="fermi",
xpus="2",
xpu_model="Tesla 2050"))
instance_type = db.api.instance_type_get_by_id(
context.get_admin_context(),
5)
self.assertEquals(instance_type['extra_specs'], {})
def test_instance_type_get_by_name_with_extra_specs(self):
instance_type = db.api.instance_type_get_by_name(
context.get_admin_context(),
"cg1.4xlarge")
self.assertEquals(instance_type['extra_specs'],
dict(cpu_arch="x86_64",
cpu_model="Nehalem",
xpu_arch="fermi",
xpus="2",
xpu_model="Tesla 2050"))
instance_type = db.api.instance_type_get_by_name(
context.get_admin_context(),
"m1.small")
self.assertEquals(instance_type['extra_specs'], {})
def test_instance_type_get_by_id_with_extra_specs(self):
instance_type = db.api.instance_type_get_by_flavor_id(
context.get_admin_context(),
105)
self.assertEquals(instance_type['extra_specs'],
dict(cpu_arch="x86_64",
cpu_model="Nehalem",
xpu_arch="fermi",
xpus="2",
xpu_model="Tesla 2050"))
instance_type = db.api.instance_type_get_by_flavor_id(
context.get_admin_context(),
2)
self.assertEquals(instance_type['extra_specs'], {})
def test_instance_type_get_all(self):
specs = dict(cpu_arch="x86_64",
cpu_model="Nehalem",
xpu_arch="fermi",
xpus='2',
xpu_model="Tesla 2050")
types = db.api.instance_type_get_all(context.get_admin_context())
self.assertEquals(types['cg1.4xlarge']['extra_specs'], specs)
self.assertEquals(types['m1.small']['extra_specs'], {})

View File

@@ -799,7 +799,9 @@ class IptablesFirewallTestCase(test.TestCase):
self.network = utils.import_object(FLAGS.network_manager)
class FakeLibvirtConnection(object):
pass
def nwfilterDefineXML(*args, **kwargs):
"""setup_basic_rules in nwfilter calls this."""
pass
self.fake_libvirt_connection = FakeLibvirtConnection()
self.fw = firewall.IptablesFirewallDriver(
get_connection=lambda: self.fake_libvirt_connection)
@@ -1035,7 +1037,6 @@ class IptablesFirewallTestCase(test.TestCase):
fakefilter.filterDefineXMLMock
self.fw.nwfilter._conn.nwfilterLookupByName =\
fakefilter.nwfilterLookupByName
instance_ref = self._create_instance_ref()
inst_id = instance_ref['id']
instance = db.instance_get(self.context, inst_id)
@@ -1057,6 +1058,70 @@ class IptablesFirewallTestCase(test.TestCase):
db.instance_destroy(admin_ctxt, instance_ref['id'])
def test_provider_firewall_rules(self):
# setup basic instance data
instance_ref = self._create_instance_ref()
nw_info = _create_network_info(1)
ip = '10.11.12.13'
network_ref = db.project_get_network(self.context, 'fake')
admin_ctxt = context.get_admin_context()
fixed_ip = {'address': ip, 'network_id': network_ref['id']}
db.fixed_ip_create(admin_ctxt, fixed_ip)
db.fixed_ip_update(admin_ctxt, ip, {'allocated': True,
'instance_id': instance_ref['id']})
# FRAGILE: peeks at how the firewall names chains
chain_name = 'inst-%s' % instance_ref['id']
# create a firewall via setup_basic_filtering like libvirt_conn.spawn
# should have a chain with 0 rules
self.fw.setup_basic_filtering(instance_ref, network_info=nw_info)
self.assertTrue('provider' in self.fw.iptables.ipv4['filter'].chains)
rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
if rule.chain == 'provider']
self.assertEqual(0, len(rules))
# add a rule and send the update message, check for 1 rule
provider_fw0 = db.provider_fw_rule_create(admin_ctxt,
{'protocol': 'tcp',
'cidr': '10.99.99.99/32',
'from_port': 1,
'to_port': 65535})
self.fw.refresh_provider_fw_rules()
rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
if rule.chain == 'provider']
self.assertEqual(1, len(rules))
# Add another, refresh, and make sure number of rules goes to two
provider_fw1 = db.provider_fw_rule_create(admin_ctxt,
{'protocol': 'udp',
'cidr': '10.99.99.99/32',
'from_port': 1,
'to_port': 65535})
self.fw.refresh_provider_fw_rules()
rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
if rule.chain == 'provider']
self.assertEqual(2, len(rules))
# create the instance filter and make sure it has a jump rule
self.fw.prepare_instance_filter(instance_ref, network_info=nw_info)
self.fw.apply_instance_filter(instance_ref)
inst_rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
if rule.chain == chain_name]
jump_rules = [rule for rule in inst_rules if '-j' in rule.rule]
provjump_rules = []
# IptablesTable doesn't make rules unique internally
for rule in jump_rules:
if 'provider' in rule.rule and rule not in provjump_rules:
provjump_rules.append(rule)
self.assertEqual(1, len(provjump_rules))
# remove a rule from the db, cast to compute to refresh rule
db.provider_fw_rule_destroy(admin_ctxt, provider_fw1['id'])
self.fw.refresh_provider_fw_rules()
rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
if rule.chain == 'provider']
self.assertEqual(1, len(rules))
class NWFilterTestCase(test.TestCase):
def setUp(self):

View File

@@ -18,7 +18,7 @@
"""
Unit Tests for network code
"""
import IPy
import netaddr
import os
from nova import test
@@ -164,3 +164,33 @@ class IptablesManagerTestCase(test.TestCase):
self.assertTrue('-A %s -j run_tests.py-%s' \
% (chain, chain) in new_lines,
"Built-in chain %s not wrapped" % (chain,))
def test_will_empty_chain(self):
self.manager.ipv4['filter'].add_chain('test-chain')
self.manager.ipv4['filter'].add_rule('test-chain', '-j DROP')
old_count = len(self.manager.ipv4['filter'].rules)
self.manager.ipv4['filter'].empty_chain('test-chain')
self.assertEqual(old_count - 1, len(self.manager.ipv4['filter'].rules))
def test_will_empty_unwrapped_chain(self):
self.manager.ipv4['filter'].add_chain('test-chain', wrap=False)
self.manager.ipv4['filter'].add_rule('test-chain', '-j DROP',
wrap=False)
old_count = len(self.manager.ipv4['filter'].rules)
self.manager.ipv4['filter'].empty_chain('test-chain', wrap=False)
self.assertEqual(old_count - 1, len(self.manager.ipv4['filter'].rules))
def test_will_not_empty_wrapped_when_unwrapped(self):
self.manager.ipv4['filter'].add_chain('test-chain')
self.manager.ipv4['filter'].add_rule('test-chain', '-j DROP')
old_count = len(self.manager.ipv4['filter'].rules)
self.manager.ipv4['filter'].empty_chain('test-chain', wrap=False)
self.assertEqual(old_count, len(self.manager.ipv4['filter'].rules))
def test_will_not_empty_unwrapped_when_wrapped(self):
self.manager.ipv4['filter'].add_chain('test-chain', wrap=False)
self.manager.ipv4['filter'].add_rule('test-chain', '-j DROP',
wrap=False)
old_count = len(self.manager.ipv4['filter'].rules)
self.manager.ipv4['filter'].empty_chain('test-chain')
self.assertEqual(old_count, len(self.manager.ipv4['filter'].rules))

View File

@@ -70,11 +70,15 @@ class S3APITestCase(test.TestCase):
os.mkdir(FLAGS.buckets_path)
router = s3server.S3Application(FLAGS.buckets_path)
server = wsgi.Server()
server.start(router, FLAGS.s3_port, host=FLAGS.s3_host)
self.server = wsgi.Server("S3 Objectstore",
router,
host=FLAGS.s3_host,
port=FLAGS.s3_port)
self.server.start()
if not boto.config.has_section('Boto'):
boto.config.add_section('Boto')
boto.config.set('Boto', 'num_retries', '0')
conn = s3.S3Connection(aws_access_key_id=self.admin_user.access,
aws_secret_access_key=self.admin_user.secret,
@@ -145,4 +149,5 @@ class S3APITestCase(test.TestCase):
"""Tear down auth and test server."""
self.auth_manager.delete_user('admin')
self.auth_manager.delete_project('admin')
self.server.stop()
super(S3APITestCase, self).tearDown()

View File

@@ -30,6 +30,7 @@ from nova import rpc
from nova import test
from nova import service
from nova import manager
from nova import wsgi
from nova.compute import manager as compute_manager
FLAGS = flags.FLAGS
@@ -349,3 +350,32 @@ class ServiceTestCase(test.TestCase):
serv.stop()
db.service_destroy(ctxt, service_ref['id'])
class TestWSGIService(test.TestCase):
def setUp(self):
super(TestWSGIService, self).setUp()
self.stubs.Set(wsgi.Loader, "load_app", mox.MockAnything())
def test_service_random_port(self):
test_service = service.WSGIService("test_service")
self.assertEquals(0, test_service.port)
test_service.start()
self.assertNotEqual(0, test_service.port)
test_service.stop()
class TestLauncher(test.TestCase):
def setUp(self):
super(TestLauncher, self).setUp()
self.stubs.Set(wsgi.Loader, "load_app", mox.MockAnything())
self.service = service.WSGIService("test_service")
def test_launch_app(self):
self.assertEquals(0, self.service.port)
launcher = service.Launcher()
launcher.launch_service(self.service)
self.assertEquals(0, self.service.port)
launcher.stop()

View File

@@ -18,7 +18,7 @@
"""
Unit Tests for vlan network code
"""
import IPy
import netaddr
import os
from nova import context
@@ -44,8 +44,8 @@ class VlanNetworkTestCase(base.NetworkTestCase):
# TODO(vish): better way of adding floating ips
self.context._project = self.projects[0]
self.context.project_id = self.projects[0].id
pubnet = IPy.IP(flags.FLAGS.floating_range)
address = str(pubnet[0])
pubnet = netaddr.IPNetwork(flags.FLAGS.floating_range)
address = str(list(pubnet)[0])
try:
db.floating_ip_get_by_address(context.get_admin_context(), address)
except exception.NotFound:

95
nova/tests/test_wsgi.py Normal file
View File

@@ -0,0 +1,95 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Unit tests for `nova.wsgi`."""
import os.path
import tempfile
import unittest
import nova.exception
import nova.test
import nova.wsgi
class TestLoaderNothingExists(unittest.TestCase):
"""Loader tests where os.path.exists always returns False."""
def setUp(self):
self._os_path_exists = os.path.exists
os.path.exists = lambda _: False
def test_config_not_found(self):
self.assertRaises(
nova.exception.PasteConfigNotFound,
nova.wsgi.Loader,
)
def tearDown(self):
os.path.exists = self._os_path_exists
class TestLoaderNormalFilesystem(unittest.TestCase):
"""Loader tests with normal filesystem (unmodified os.path module)."""
_paste_config = """
[app:test_app]
use = egg:Paste#static
document_root = /tmp
"""
def setUp(self):
self.config = tempfile.NamedTemporaryFile(mode="w+t")
self.config.write(self._paste_config.lstrip())
self.config.seek(0)
self.config.flush()
self.loader = nova.wsgi.Loader(self.config.name)
def test_config_found(self):
self.assertEquals(self.config.name, self.loader.config_path)
def test_app_not_found(self):
self.assertRaises(
nova.exception.PasteAppNotFound,
self.loader.load_app,
"non-existant app",
)
def test_app_found(self):
url_parser = self.loader.load_app("test_app")
self.assertEquals("/tmp", url_parser.directory)
def tearDown(self):
self.config.close()
class TestWSGIServer(unittest.TestCase):
"""WSGI server tests."""
def test_no_app(self):
server = nova.wsgi.Server("test_app", None)
self.assertEquals("test_app", server.name)
def test_start_random_port(self):
server = nova.wsgi.Server("test_random_port", None, host="127.0.0.1")
self.assertEqual(0, server.port)
server.start()
self.assertNotEqual(0, server.port)
server.stop()
server.wait()

View File

@@ -46,6 +46,7 @@ from eventlet.green import subprocess
from nova import exception
from nova import flags
from nova import log as logging
from nova import version
LOG = logging.getLogger("nova.utils")
@@ -226,8 +227,10 @@ def novadir():
return os.path.abspath(nova.__file__).split('nova/__init__.pyc')[0]
def default_flagfile(filename='nova.conf'):
for arg in sys.argv:
def default_flagfile(filename='nova.conf', args=None):
if args is None:
args = sys.argv
for arg in args:
if arg.find('flagfile') != -1:
break
else:
@@ -239,8 +242,8 @@ def default_flagfile(filename='nova.conf'):
filename = "./nova.conf"
if not os.path.exists(filename):
filename = '/etc/nova/nova.conf'
flagfile = ['--flagfile=%s' % filename]
sys.argv = sys.argv[:1] + flagfile + sys.argv[1:]
flagfile = '--flagfile=%s' % filename
args.insert(1, flagfile)
def debug(arg):
@@ -279,6 +282,22 @@ EASIER_PASSWORD_SYMBOLS = ('23456789' # Removed: 0, 1
'ABCDEFGHJKLMNPQRSTUVWXYZ') # Removed: I, O
def usage_from_instance(instance_ref, **kw):
usage_info = dict(
tenant_id=instance_ref['project_id'],
user_id=instance_ref['user_id'],
instance_id=instance_ref['id'],
instance_type=instance_ref['instance_type']['name'],
instance_type_id=instance_ref['instance_type_id'],
display_name=instance_ref['display_name'],
created_at=str(instance_ref['created_at']),
launched_at=str(instance_ref['launched_at']) \
if instance_ref['launched_at'] else '',
image_ref=instance_ref['image_ref'])
usage_info.update(kw)
return usage_info
def generate_password(length=20, symbols=DEFAULT_PASSWORD_SYMBOLS):
"""Generate a random password from the supplied symbols.
@@ -526,6 +545,16 @@ def loads(s):
return json.loads(s)
try:
import anyjson
except ImportError:
pass
else:
anyjson._modules.append(("nova.utils", "dumps", TypeError,
"loads", ValueError))
anyjson.force_implementation("nova.utils")
_semaphores = {}
@@ -741,3 +770,39 @@ def is_uuid_like(val):
if not isinstance(val, basestring):
return False
return (len(val) == 36) and (val.count('-') == 4)
class Bootstrapper(object):
"""Provides environment bootstrapping capabilities for entry points."""
@staticmethod
def bootstrap_binary(argv):
"""Initialize the Nova environment using command line arguments."""
Bootstrapper.setup_flags(argv)
Bootstrapper.setup_logging()
Bootstrapper.log_flags()
@staticmethod
def setup_logging():
"""Initialize logging and log a message indicating the Nova version."""
logging.setup()
logging.audit(_("Nova Version (%s)") %
version.version_string_with_vcs())
@staticmethod
def setup_flags(input_flags):
"""Initialize flags, load flag file, and print help if needed."""
default_flagfile(args=input_flags)
FLAGS(input_flags or [])
flags.DEFINE_flag(flags.HelpFlag())
flags.DEFINE_flag(flags.HelpshortFlag())
flags.DEFINE_flag(flags.HelpXMLFlag())
FLAGS.ParseNewFlags()
@staticmethod
def log_flags():
"""Log the list of all active flags being used."""
logging.audit(_("Currently active flags:"))
for key in FLAGS:
value = FLAGS.get(key, None)
logging.audit(_("%(key)s : %(value)s" % locals()))

View File

@@ -191,6 +191,10 @@ class ComputeDriver(object):
def refresh_security_group_members(self, security_group_id):
raise NotImplementedError()
def refresh_provider_fw_rules(self, security_group_id):
"""See: nova/virt/fake.py for docs."""
raise NotImplementedError()
def reset_network(self, instance):
"""reset networking for specified instance"""
raise NotImplementedError()

View File

@@ -466,6 +466,22 @@ class FakeConnection(driver.ComputeDriver):
"""
return True
def refresh_provider_fw_rules(self):
"""This triggers a firewall update based on database changes.
When this is called, rules have either been added or removed from the
datastore. You can retrieve rules with
:method:`nova.db.api.provider_fw_rule_get_all`.
Provider rules take precedence over security group rules. If an IP
would be allowed by a security group ingress rule, but blocked by
a provider rule, then packets from the IP are dropped. This includes
intra-project traffic in the case of the allow_project_net_traffic
flag for the libvirt-derived classes.
"""
pass
def update_available_resource(self, ctxt, host):
"""This method is supported only by libvirt."""
return

View File

@@ -38,6 +38,7 @@ Supports KVM, LXC, QEMU, UML, and XEN.
import hashlib
import multiprocessing
import netaddr
import os
import random
import re
@@ -53,8 +54,6 @@ from xml.etree import ElementTree
from eventlet import greenthread
from eventlet import tpool
import IPy
from nova import context
from nova import db
from nova import exception
@@ -186,6 +185,7 @@ class LibvirtConnection(driver.ComputeDriver):
if state != power_state.RUNNING:
continue
self.firewall_driver.setup_basic_filtering(instance)
self.firewall_driver.prepare_instance_filter(instance)
self.firewall_driver.apply_instance_filter(instance)
@@ -1383,6 +1383,9 @@ class LibvirtConnection(driver.ComputeDriver):
def refresh_security_group_members(self, security_group_id):
self.firewall_driver.refresh_security_group_members(security_group_id)
def refresh_provider_fw_rules(self):
self.firewall_driver.refresh_provider_fw_rules()
def update_available_resource(self, ctxt, host):
"""Updates compute manager resource info on ComputeNode table.

View File

@@ -76,6 +76,15 @@ class FirewallDriver(object):
the security group."""
raise NotImplementedError()
def refresh_provider_fw_rules(self):
"""Refresh common rules for all hosts/instances from data store.
Gets called when a rule has been added to or removed from
the list of rules (via admin api).
"""
raise NotImplementedError()
def setup_basic_filtering(self, instance, network_info=None):
"""Create rules to block spoofing and allow dhcp.
@@ -207,6 +216,13 @@ class NWFilterFirewall(FirewallDriver):
[base_filter]))
def _ensure_static_filters(self):
"""Static filters are filters that have no need to be IP aware.
There is no configuration or tuneability of these filters, so they
can be set up once and forgotten about.
"""
if self.static_filters_configured:
return
@@ -310,19 +326,21 @@ class NWFilterFirewall(FirewallDriver):
'for %(instance_name)s is not found.') % locals())
def prepare_instance_filter(self, instance, network_info=None):
"""
Creates an NWFilter for the given instance. In the process,
it makes sure the filters for the security groups as well as
the base filter are all in place.
"""Creates an NWFilter for the given instance.
In the process, it makes sure the filters for the provider blocks,
security groups, and base filter are all in place.
"""
if not network_info:
network_info = netutils.get_network_info(instance)
self.refresh_provider_fw_rules()
ctxt = context.get_admin_context()
instance_secgroup_filter_name = \
'%s-secgroup' % (self._instance_filter_name(instance))
#% (instance_filter_name,)
instance_secgroup_filter_children = ['nova-base-ipv4',
'nova-base-ipv6',
@@ -366,7 +384,7 @@ class NWFilterFirewall(FirewallDriver):
for (_n, mapping) in network_info:
nic_id = mapping['mac'].replace(':', '')
instance_filter_name = self._instance_filter_name(instance, nic_id)
instance_filter_children = [base_filter,
instance_filter_children = [base_filter, 'nova-provider-rules',
instance_secgroup_filter_name]
if FLAGS.allow_project_net_traffic:
@@ -388,6 +406,19 @@ class NWFilterFirewall(FirewallDriver):
return self._define_filter(
self.security_group_to_nwfilter_xml(security_group_id))
def refresh_provider_fw_rules(self):
"""Update rules for all instances.
This is part of the FirewallDriver API and is called when the
provider firewall rules change in the database. In the
`prepare_instance_filter` we add a reference to the
'nova-provider-rules' filter for each instance's firewall, and
by changing that filter we update them all.
"""
xml = self.provider_fw_to_nwfilter_xml()
return self._define_filter(xml)
def security_group_to_nwfilter_xml(self, security_group_id):
security_group = db.security_group_get(context.get_admin_context(),
security_group_id)
@@ -426,6 +457,43 @@ class NWFilterFirewall(FirewallDriver):
xml += "chain='ipv4'>%s</filter>" % rule_xml
return xml
def provider_fw_to_nwfilter_xml(self):
"""Compose a filter of drop rules from specified cidrs."""
rule_xml = ""
v6protocol = {'tcp': 'tcp-ipv6', 'udp': 'udp-ipv6', 'icmp': 'icmpv6'}
rules = db.provider_fw_rule_get_all(context.get_admin_context())
for rule in rules:
rule_xml += "<rule action='block' direction='in' priority='150'>"
version = netutils.get_ip_version(rule.cidr)
if(FLAGS.use_ipv6 and version == 6):
net, prefixlen = netutils.get_net_and_prefixlen(rule.cidr)
rule_xml += "<%s srcipaddr='%s' srcipmask='%s' " % \
(v6protocol[rule.protocol], net, prefixlen)
else:
net, mask = netutils.get_net_and_mask(rule.cidr)
rule_xml += "<%s srcipaddr='%s' srcipmask='%s' " % \
(rule.protocol, net, mask)
if rule.protocol in ['tcp', 'udp']:
rule_xml += "dstportstart='%s' dstportend='%s' " % \
(rule.from_port, rule.to_port)
elif rule.protocol == 'icmp':
LOG.info('rule.protocol: %r, rule.from_port: %r, '
'rule.to_port: %r', rule.protocol,
rule.from_port, rule.to_port)
if rule.from_port != -1:
rule_xml += "type='%s' " % rule.from_port
if rule.to_port != -1:
rule_xml += "code='%s' " % rule.to_port
rule_xml += '/>\n'
rule_xml += "</rule>\n"
xml = "<filter name='nova-provider-rules' "
if(FLAGS.use_ipv6):
xml += "chain='root'>%s</filter>" % rule_xml
else:
xml += "chain='ipv4'>%s</filter>" % rule_xml
return xml
def _instance_filter_name(self, instance, nic_id=None):
if not nic_id:
return 'nova-instance-%s' % (instance['name'])
@@ -453,6 +521,7 @@ class IptablesFirewallDriver(FirewallDriver):
self.iptables = linux_net.iptables_manager
self.instances = {}
self.nwfilter = NWFilterFirewall(kwargs['get_connection'])
self.basicly_filtered = False
self.iptables.ipv4['filter'].add_chain('sg-fallback')
self.iptables.ipv4['filter'].add_rule('sg-fallback', '-j DROP')
@@ -460,10 +529,14 @@ class IptablesFirewallDriver(FirewallDriver):
self.iptables.ipv6['filter'].add_rule('sg-fallback', '-j DROP')
def setup_basic_filtering(self, instance, network_info=None):
"""Use NWFilter from libvirt for this."""
"""Set up provider rules and basic NWFilter."""
if not network_info:
network_info = netutils.get_network_info(instance)
return self.nwfilter.setup_basic_filtering(instance, network_info)
self.nwfilter.setup_basic_filtering(instance, network_info)
if not self.basicly_filtered:
LOG.debug(_('iptables firewall: Setup Basic Filtering'))
self.refresh_provider_fw_rules()
self.basicly_filtered = True
def apply_instance_filter(self, instance):
"""No-op. Everything is done in prepare_instance_filter"""
@@ -543,6 +616,10 @@ class IptablesFirewallDriver(FirewallDriver):
ipv4_rules += ['-m state --state ESTABLISHED,RELATED -j ACCEPT']
ipv6_rules += ['-m state --state ESTABLISHED,RELATED -j ACCEPT']
# Pass through provider-wide drops
ipv4_rules += ['-j $provider']
ipv6_rules += ['-j $provider']
dhcp_servers = [network['gateway'] for (network, _m) in network_info]
for dhcp_server in dhcp_servers:
@@ -560,7 +637,7 @@ class IptablesFirewallDriver(FirewallDriver):
# they're not worth the clutter.
if FLAGS.use_ipv6:
# Allow RA responses
gateways_v6 = [network['gateway_v6'] for (network, _) in
gateways_v6 = [network['gateway_v6'] for (network, _m) in
network_info]
for gateway_v6 in gateways_v6:
ipv6_rules.append(
@@ -583,7 +660,7 @@ class IptablesFirewallDriver(FirewallDriver):
security_group['id'])
for rule in rules:
logging.info('%r', rule)
LOG.debug(_('Adding security group rule: %r'), rule)
if not rule.cidr:
# Eventually, a mechanism to grant access for security
@@ -592,9 +669,9 @@ class IptablesFirewallDriver(FirewallDriver):
version = netutils.get_ip_version(rule.cidr)
if version == 4:
rules = ipv4_rules
fw_rules = ipv4_rules
else:
rules = ipv6_rules
fw_rules = ipv6_rules
protocol = rule.protocol
if version == 6 and rule.protocol == 'icmp':
@@ -629,7 +706,7 @@ class IptablesFirewallDriver(FirewallDriver):
icmp_type_arg]
args += ['-j ACCEPT']
rules += [' '.join(args)]
fw_rules += [' '.join(args)]
ipv4_rules += ['-j $sg-fallback']
ipv6_rules += ['-j $sg-fallback']
@@ -657,6 +734,85 @@ class IptablesFirewallDriver(FirewallDriver):
network_info = netutils.get_network_info(instance)
self.add_filters_for_instance(instance, network_info)
def refresh_provider_fw_rules(self):
"""See class:FirewallDriver: docs."""
self._do_refresh_provider_fw_rules()
self.iptables.apply()
@utils.synchronized('iptables', external=True)
def _do_refresh_provider_fw_rules(self):
"""Internal, synchronized version of refresh_provider_fw_rules."""
self._purge_provider_fw_rules()
self._build_provider_fw_rules()
def _purge_provider_fw_rules(self):
"""Remove all rules from the provider chains."""
self.iptables.ipv4['filter'].empty_chain('provider')
if FLAGS.use_ipv6:
self.iptables.ipv6['filter'].empty_chain('provider')
def _build_provider_fw_rules(self):
"""Create all rules for the provider IP DROPs."""
self.iptables.ipv4['filter'].add_chain('provider')
if FLAGS.use_ipv6:
self.iptables.ipv6['filter'].add_chain('provider')
ipv4_rules, ipv6_rules = self._provider_rules()
for rule in ipv4_rules:
self.iptables.ipv4['filter'].add_rule('provider', rule)
if FLAGS.use_ipv6:
for rule in ipv6_rules:
self.iptables.ipv6['filter'].add_rule('provider', rule)
def _provider_rules(self):
"""Generate a list of rules from provider for IP4 & IP6."""
ctxt = context.get_admin_context()
ipv4_rules = []
ipv6_rules = []
rules = db.provider_fw_rule_get_all(ctxt)
for rule in rules:
LOG.debug(_('Adding provider rule: %s'), rule['cidr'])
version = netutils.get_ip_version(rule['cidr'])
if version == 4:
fw_rules = ipv4_rules
else:
fw_rules = ipv6_rules
protocol = rule['protocol']
if version == 6 and protocol == 'icmp':
protocol = 'icmpv6'
args = ['-p', protocol, '-s', rule['cidr']]
if protocol in ['udp', 'tcp']:
if rule['from_port'] == rule['to_port']:
args += ['--dport', '%s' % (rule['from_port'],)]
else:
args += ['-m', 'multiport',
'--dports', '%s:%s' % (rule['from_port'],
rule['to_port'])]
elif protocol == 'icmp':
icmp_type = rule['from_port']
icmp_code = rule['to_port']
if icmp_type == -1:
icmp_type_arg = None
else:
icmp_type_arg = '%s' % icmp_type
if not icmp_code == -1:
icmp_type_arg += '/%s' % icmp_code
if icmp_type_arg:
if version == 4:
args += ['-m', 'icmp', '--icmp-type',
icmp_type_arg]
elif version == 6:
args += ['-m', 'icmp6', '--icmpv6-type',
icmp_type_arg]
args += ['-j DROP']
fw_rules += [' '.join(args)]
return ipv4_rules, ipv6_rules
def _security_group_chain_name(self, security_group_id):
return 'nova-sg-%s' % (security_group_id,)

View File

@@ -21,7 +21,7 @@
"""Network-releated utilities for supporting libvirt connection code."""
import IPy
import netaddr
from nova import context
from nova import db
@@ -34,18 +34,18 @@ FLAGS = flags.FLAGS
def get_net_and_mask(cidr):
net = IPy.IP(cidr)
return str(net.net()), str(net.netmask())
net = netaddr.IPNetwork(cidr)
return str(net.ip), str(net.netmask)
def get_net_and_prefixlen(cidr):
net = IPy.IP(cidr)
return str(net.net()), str(net.prefixlen())
net = netaddr.IPNetwork(cidr)
return str(net.ip), str(net._prefixlen)
def get_ip_version(cidr):
net = IPy.IP(cidr)
return int(net.version())
net = netaddr.IPNetwork(cidr)
return int(net.version)
def get_network_info(instance):

View File

@@ -21,16 +21,16 @@
import os
import sys
from xml.dom import minidom
import eventlet
import eventlet.wsgi
eventlet.patcher.monkey_patch(all=False, socket=True, time=True)
import routes
import greenlet
import routes.middleware
import webob
import webob.dec
import webob.exc
from paste import deploy
from nova import exception
@@ -39,49 +39,86 @@ from nova import log as logging
from nova import utils
eventlet.patcher.monkey_patch(socket=True, time=True)
FLAGS = flags.FLAGS
LOG = logging.getLogger('nova.wsgi')
class WritableLogger(object):
"""A thin wrapper that responds to `write` and logs."""
def __init__(self, logger, level=logging.DEBUG):
self.logger = logger
self.level = level
def write(self, msg):
self.logger.log(self.level, msg)
class Server(object):
"""Server class to manage multiple WSGI sockets and applications."""
"""Server class to manage a WSGI server, serving a WSGI application."""
def __init__(self, threads=1000):
self.pool = eventlet.GreenPool(threads)
self.socket_info = {}
default_pool_size = 1000
def start(self, application, port, host='0.0.0.0', key=None, backlog=128):
"""Run a WSGI server with the given application."""
arg0 = sys.argv[0]
logging.audit(_('Starting %(arg0)s on %(host)s:%(port)s') % locals())
socket = eventlet.listen((host, port), backlog=backlog)
self.pool.spawn_n(self._run, application, socket)
if key:
self.socket_info[key] = socket.getsockname()
def __init__(self, name, app, host=None, port=None, pool_size=None):
"""Initialize, but do not start, a WSGI server.
:param name: Pretty name for logging.
:param app: The WSGI application to serve.
:param host: IP address to serve the application.
:param port: Port number to server the application.
:param pool_size: Maximum number of eventlets to spawn concurrently.
:returns: None
"""
self.name = name
self.app = app
self.host = host or "0.0.0.0"
self.port = port or 0
self._server = None
self._socket = None
self._pool = eventlet.GreenPool(pool_size or self.default_pool_size)
self._logger = logging.getLogger("eventlet.wsgi.server")
self._wsgi_logger = logging.WritableLogger(self._logger)
def _start(self):
"""Run the blocking eventlet WSGI server.
:returns: None
"""
eventlet.wsgi.server(self._socket,
self.app,
custom_pool=self._pool,
log=self._wsgi_logger)
def start(self, backlog=128):
"""Start serving a WSGI application.
:param backlog: Maximum number of queued connections.
:returns: None
"""
self._socket = eventlet.listen((self.host, self.port), backlog=backlog)
self._server = eventlet.spawn(self._start)
(self.host, self.port) = self._socket.getsockname()
LOG.info(_("Started %(name)s on %(host)s:%(port)s") % self.__dict__)
def stop(self):
"""Stop this server.
This is not a very nice action, as currently the method by which a
server is stopped is by killing it's eventlet.
:returns: None
"""
LOG.info(_("Stopping WSGI server."))
self._server.kill()
def wait(self):
"""Wait until all servers have completed running."""
try:
self.pool.waitall()
except KeyboardInterrupt:
pass
"""Block, until the server has stopped.
def _run(self, application, socket):
"""Start a WSGI server in a new green thread."""
logger = logging.getLogger('eventlet.wsgi.server')
eventlet.wsgi.server(socket, application, custom_pool=self.pool,
log=WritableLogger(logger))
Waits on the server's eventlet to finish, then returns.
:returns: None
"""
try:
self._server.wait()
except greenlet.GreenletExit:
LOG.info(_("WSGI server has stopped."))
class Request(webob.Request):
@@ -309,55 +346,51 @@ class Router(object):
return app
def paste_config_file(basename):
"""Find the best location in the system for a paste config file.
class Loader(object):
"""Used to load WSGI applications from paste configurations."""
Search Order
------------
def __init__(self, config_path=None):
"""Initialize the loader, and attempt to find the config.
The search for a paste config file honors `FLAGS.state_path`, which in a
version checked out from bzr will be the `nova` directory in the top level
of the checkout, and in an installation for a package for your distribution
will likely point to someplace like /etc/nova.
:param config_path: Full or relative path to the paste config.
:returns: None
This method tries to load places likely to be used in development or
experimentation before falling back to the system-wide configuration
in `/etc/nova/`.
"""
config_path = config_path or FLAGS.api_paste_config
self.config_path = self._find_config(config_path)
* Current working directory
* the `etc` directory under state_path, because when working on a checkout
from bzr this will point to the default
* top level of FLAGS.state_path, for distributions
* /etc/nova, which may not be diffrerent from state_path on your distro
def _find_config(self, config_path):
"""Find the paste configuration file using the given hint.
"""
configfiles = [basename,
os.path.join(FLAGS.state_path, 'etc', 'nova', basename),
os.path.join(FLAGS.state_path, 'etc', basename),
os.path.join(FLAGS.state_path, basename),
'/etc/nova/%s' % basename]
for configfile in configfiles:
if os.path.exists(configfile):
return configfile
:param config_path: Full or relative path to the paste config.
:returns: Full path of the paste config, if it exists.
:raises: `nova.exception.PasteConfigNotFound`
"""
possible_locations = [
config_path,
os.path.join(FLAGS.state_path, "etc", "nova", config_path),
os.path.join(FLAGS.state_path, "etc", config_path),
os.path.join(FLAGS.state_path, config_path),
"/etc/nova/%s" % config_path,
]
def load_paste_configuration(filename, appname):
"""Returns a paste configuration dict, or None."""
filename = os.path.abspath(filename)
config = None
try:
config = deploy.appconfig('config:%s' % filename, name=appname)
except LookupError:
pass
return config
for path in possible_locations:
if os.path.exists(path):
return os.path.abspath(path)
raise exception.PasteConfigNotFound(path=os.path.abspath(config_path))
def load_paste_app(filename, appname):
"""Builds a wsgi app from a paste config, None if app not configured."""
filename = os.path.abspath(filename)
app = None
try:
app = deploy.loadapp('config:%s' % filename, name=appname)
except LookupError:
pass
return app
def load_app(self, name):
"""Return the paste URLMap wrapped WSGI application.
:param name: Name of the application to load.
:returns: Paste URLMap object wrapping the requested application.
:raises: `nova.exception.PasteAppNotFound`
"""
try:
return deploy.loadapp("config:%s" % self.config_path, name=name)
except LookupError as err:
LOG.error(err)
raise exception.PasteAppNotFound(name=name, path=self.config_path)

View File

@@ -8,7 +8,7 @@
fi
;;
@@ -224,9 +225,11 @@
@@ -224,6 +225,7 @@
remove)
if [ "${TYPE}" = "vif" ] ;then
@@ -16,7 +16,3 @@
xenstore-rm "${HOTPLUG}/hotplug"
fi
logger -t scripts-vif "${dev} has been removed"
remove_from_bridge
;;
esac
+

View File

@@ -69,7 +69,6 @@ from nose import core
from nose import result
from nova import log as logging
from nova.tests import fake_flags
class _AnsiColorizer(object):
@@ -211,11 +210,11 @@ class NovaTestResult(result.TextTestResult):
break
sys.stdout = stdout
# NOTE(lorinh): Initialize start_time in case a sqlalchemy-migrate
# error results in it failing to be initialized later. Otherwise,
# NOTE(lorinh): Initialize start_time in case a sqlalchemy-migrate
# error results in it failing to be initialized later. Otherwise,
# _handleElapsedTime will fail, causing the wrong error message to
# be outputted.
self.start_time = time.time()
self.start_time = time.time()
def getDescription(self, test):
return str(test)

View File

@@ -6,6 +6,7 @@ function usage {
echo ""
echo " -V, --virtual-env Always use virtualenv. Install automatically if not present"
echo " -N, --no-virtual-env Don't use virtualenv. Run tests in local environment"
echo " -r, --recreate-db Recreate the test database."
echo " -x, --stop Stop running tests after the first error or failure."
echo " -f, --force Force a clean re-build of the virtual environment. Useful when dependencies have been added."
echo " -p, --pep8 Just run pep8"
@@ -23,6 +24,7 @@ function process_option {
-h|--help) usage;;
-V|--virtual-env) let always_venv=1; let never_venv=0;;
-N|--no-virtual-env) let always_venv=0; let never_venv=1;;
-r|--recreate-db) let recreate_db=1;;
-f|--force) let force=1;;
-p|--pep8) let just_pep8=1;;
-*) noseopts="$noseopts $1";;
@@ -39,6 +41,7 @@ noseargs=
noseopts=
wrapper=""
just_pep8=0
recreate_db=0
for arg in "$@"; do
process_option $arg
@@ -108,6 +111,10 @@ if [ $just_pep8 -eq 1 ]; then
exit
fi
if [ $recreate_db -eq 1 ]; then
rm tests.sqlite
fi
run_tests || exit
# NOTE(sirp): we only want to run pep8 when we're running the full-test suite,

View File

@@ -1,14 +1,13 @@
SQLAlchemy==0.6.3
pep8==0.5.0
pep8==0.6.1
pylint==0.19
IPy==0.70
Cheetah==2.4.4
M2Crypto==0.20.2
amqplib==0.6.1
anyjson==0.2.4
boto==1.9b
carrot==0.10.5
eventlet==0.9.12
eventlet
lockfile==0.8
python-novaclient==2.5.5
python-daemon==1.5.5