Merge and conflict resolving

This commit is contained in:
Eldar Nugaev 2011-05-20 13:29:54 +04:00
commit 6eaaf3183c
51 changed files with 1770 additions and 503 deletions

View File

@ -417,11 +417,10 @@ class ProjectCommands(object):
arguments: project_id [key] [value]"""
ctxt = context.get_admin_context()
if key:
quo = {'project_id': project_id, key: value}
try:
db.quota_update(ctxt, project_id, quo)
db.quota_update(ctxt, project_id, key, value)
except exception.ProjectQuotaNotFound:
db.quota_create(ctxt, quo)
db.quota_create(ctxt, project_id, key, value)
project_quota = quota.get_quota(ctxt, project_id)
for key, value in project_quota.iteritems():
print '%s: %s' % (key, value)
@ -550,8 +549,10 @@ class NetworkCommands(object):
[network_size=FLAG], [vlan_start=FLAG],
[vpn_start=FLAG], [fixed_range_v6=FLAG]"""
if not fixed_range:
raise TypeError(_('Fixed range in the form of 10.0.0.0/8 is '
'required to create networks.'))
msg = _('Fixed range in the form of 10.0.0.0/8 is '
'required to create networks.')
print msg
raise TypeError(msg)
if not num_networks:
num_networks = FLAGS.num_networks
if not network_size:
@ -563,14 +564,18 @@ class NetworkCommands(object):
if not fixed_range_v6:
fixed_range_v6 = FLAGS.fixed_range_v6
net_manager = utils.import_object(FLAGS.network_manager)
net_manager.create_networks(context.get_admin_context(),
cidr=fixed_range,
num_networks=int(num_networks),
network_size=int(network_size),
vlan_start=int(vlan_start),
vpn_start=int(vpn_start),
cidr_v6=fixed_range_v6,
label=label)
try:
net_manager.create_networks(context.get_admin_context(),
cidr=fixed_range,
num_networks=int(num_networks),
network_size=int(network_size),
vlan_start=int(vlan_start),
vpn_start=int(vpn_start),
cidr_v6=fixed_range_v6,
label=label)
except ValueError, e:
print e
raise e
def list(self):
"""List all created networks"""

View File

@ -65,7 +65,7 @@ def format_help(d):
indent = MAX_INDENT - 6
out = []
for k, v in d.iteritems():
for k, v in sorted(d.iteritems()):
if (len(k) + 6) > MAX_INDENT:
out.extend([' %s' % k])
initial_indent = ' ' * (indent + 6)

View File

@ -27,6 +27,8 @@ import datetime
import IPy
import os
import urllib
import tempfile
import shutil
from nova import compute
from nova import context
@ -35,6 +37,7 @@ from nova import crypto
from nova import db
from nova import exception
from nova import flags
from nova import ipv6
from nova import log as logging
from nova import network
from nova import utils
@ -315,6 +318,27 @@ class CloudController(object):
'keyMaterial': data['private_key']}
# TODO(vish): when context is no longer an object, pass it here
def import_public_key(self, context, key_name, public_key,
fingerprint=None):
LOG.audit(_("Import key %s"), key_name, context=context)
key = {}
key['user_id'] = context.user_id
key['name'] = key_name
key['public_key'] = public_key
if fingerprint is None:
tmpdir = tempfile.mkdtemp()
pubfile = os.path.join(tmpdir, 'temp.pub')
fh = open(pubfile, 'w')
fh.write(public_key)
fh.close()
(out, err) = utils.execute('ssh-keygen', '-q', '-l', '-f',
'%s' % (pubfile))
fingerprint = out.split(' ')[1]
shutil.rmtree(tmpdir)
key['fingerprint'] = fingerprint
db.key_pair_create(context, key)
return True
def delete_key_pair(self, context, key_name, **kwargs):
LOG.audit(_("Delete key pair %s"), key_name, context=context)
try:
@ -718,9 +742,10 @@ class CloudController(object):
fixed = instance['fixed_ip']
floating_addr = fixed['floating_ips'][0]['address']
if instance['fixed_ip']['network'] and 'use_v6' in kwargs:
i['dnsNameV6'] = utils.to_global_ipv6(
i['dnsNameV6'] = ipv6.to_global(
instance['fixed_ip']['network']['cidr_v6'],
instance['mac_address'])
instance['mac_address'],
instance['project_id'])
i['privateDnsName'] = fixed_addr
i['privateIpAddress'] = fixed_addr

View File

@ -17,7 +17,6 @@
import datetime
import hashlib
import json
import time
import webob.exc
@ -25,11 +24,9 @@ import webob.dec
from nova import auth
from nova import context
from nova import db
from nova import exception
from nova import flags
from nova import log as logging
from nova import manager
from nova import utils
from nova import wsgi
from nova.api.openstack import faults
@ -102,11 +99,11 @@ class AuthMiddleware(wsgi.Middleware):
token, user = self._authorize_user(username, key, req)
if user and token:
res = webob.Response()
res.headers['X-Auth-Token'] = token.token_hash
res.headers['X-Auth-Token'] = token['token_hash']
res.headers['X-Server-Management-Url'] = \
token.server_management_url
res.headers['X-Storage-Url'] = token.storage_url
res.headers['X-CDN-Management-Url'] = token.cdn_management_url
token['server_management_url']
res.headers['X-Storage-Url'] = token['storage_url']
res.headers['X-CDN-Management-Url'] = token['cdn_management_url']
res.content_type = 'text/plain'
res.status = '204'
LOG.debug(_("Successfully authenticated '%s'") % username)
@ -130,11 +127,11 @@ class AuthMiddleware(wsgi.Middleware):
except exception.NotFound:
return None
if token:
delta = datetime.datetime.now() - token.created_at
delta = datetime.datetime.utcnow() - token['created_at']
if delta.days >= 2:
self.db.auth_token_destroy(ctxt, token.token_hash)
self.db.auth_token_destroy(ctxt, token['token_hash'])
else:
return self.auth.get_user(token.user_id)
return self.auth.get_user(token['user_id'])
return None
def _authorize_user(self, username, key, req):

View File

@ -45,6 +45,9 @@ class Controller(common.OpenstackController):
items = self._get_flavors(req, is_detail=True)
return dict(flavors=items)
def _get_view_builder(self, req):
raise NotImplementedError()
def _get_flavors(self, req, is_detail=True):
"""Helper function that returns a list of flavor dicts."""
ctxt = req.environ['nova.context']

View File

@ -75,6 +75,21 @@ class Controller(common.OpenstackController):
""" Returns a list of server details for a given user """
return self._items(req, is_detail=True)
def _image_id_from_req_data(self, data):
raise NotImplementedError()
def _flavor_id_from_req_data(self, data):
raise NotImplementedError()
def _get_view_builder(self, req):
raise NotImplementedError()
def _limit_items(self, items, req):
raise NotImplementedError()
def _action_rebuild(self, info, request, instance_id):
raise NotImplementedError()
def _items(self, req, is_detail):
"""Returns a list of servers for a given user.
@ -743,8 +758,9 @@ class ServerCreateRequestXMLDeserializer(object):
"""Marshal the server attribute of a parsed request"""
server = {}
server_node = self._find_first_child_named(node, 'server')
for attr in ["name", "imageId", "flavorId"]:
server[attr] = server_node.getAttribute(attr)
for attr in ["name", "imageId", "flavorId", "imageRef", "flavorRef"]:
if server_node.getAttribute(attr):
server[attr] = server_node.getAttribute(attr)
metadata = self._extract_metadata(server_node)
if metadata is not None:
server["metadata"] = metadata

View File

@ -23,6 +23,15 @@ from nova.api.openstack import common
class ViewBuilder(object):
"""Openstack API base limits view builder."""
def _build_rate_limits(self, rate_limits):
raise NotImplementedError()
def _build_rate_limit(self, rate_limit):
raise NotImplementedError()
def _build_absolute_limits(self, absolute_limit):
raise NotImplementedError()
def build(self, rate_limits, absolute_limits):
rate_limits = self._build_rate_limits(rate_limits)
absolute_limits = self._build_absolute_limits(absolute_limits)

View File

@ -628,7 +628,7 @@ class ComputeManager(manager.SchedulerDependentManager):
instance_type = self.db.instance_type_get_by_flavor_id(context,
migration_ref['new_flavor_id'])
self.db.instance_update(context, instance_id,
dict(instance_type=instance_type['name'],
dict(instance_type_id=instance_type['id'],
memory_mb=instance_type['memory_mb'],
vcpus=instance_type['vcpus'],
local_gb=instance_type['local_gb']))

View File

@ -756,24 +756,34 @@ def auth_token_create(context, token):
###################
def quota_create(context, values):
"""Create a quota from the values dictionary."""
return IMPL.quota_create(context, values)
def quota_create(context, project_id, resource, limit):
"""Create a quota for the given project and resource."""
return IMPL.quota_create(context, project_id, resource, limit)
def quota_get(context, project_id):
def quota_get(context, project_id, resource):
"""Retrieve a quota or raise if it does not exist."""
return IMPL.quota_get(context, project_id)
return IMPL.quota_get(context, project_id, resource)
def quota_update(context, project_id, values):
"""Update a quota from the values dictionary."""
return IMPL.quota_update(context, project_id, values)
def quota_get_all_by_project(context, project_id):
"""Retrieve all quotas associated with a given project."""
return IMPL.quota_get_all_by_project(context, project_id)
def quota_destroy(context, project_id):
def quota_update(context, project_id, resource, limit):
"""Update a quota or raise if it does not exist."""
return IMPL.quota_update(context, project_id, resource, limit)
def quota_destroy(context, project_id, resource):
"""Destroy the quota or raise if it does not exist."""
return IMPL.quota_destroy(context, project_id)
return IMPL.quota_destroy(context, project_id, resource)
def quota_destroy_all_by_project(context, project_id):
"""Destroy all quotas associated with a given project."""
return IMPL.quota_get_all_by_project(context, project_id)
###################

View File

@ -25,6 +25,7 @@ import warnings
from nova import db
from nova import exception
from nova import flags
from nova import ipv6
from nova import utils
from nova.db.sqlalchemy import models
from nova.db.sqlalchemy.session import get_session
@ -744,7 +745,7 @@ def fixed_ip_get_all_by_instance(context, instance_id):
@require_context
def fixed_ip_get_instance_v6(context, address):
session = get_session()
mac = utils.to_mac(address)
mac = ipv6.to_mac(address)
result = session.query(models.Instance).\
filter_by(mac_address=mac).\
@ -872,6 +873,7 @@ def instance_get_all(context):
options(joinedload_all('fixed_ip.floating_ips')).\
options(joinedload('security_groups')).\
options(joinedload_all('fixed_ip.network')).\
options(joinedload('metadata')).\
options(joinedload('instance_type')).\
filter_by(deleted=can_read_deleted(context)).\
all()
@ -884,6 +886,7 @@ def instance_get_all_by_user(context, user_id):
options(joinedload_all('fixed_ip.floating_ips')).\
options(joinedload('security_groups')).\
options(joinedload_all('fixed_ip.network')).\
options(joinedload('metadata')).\
options(joinedload('instance_type')).\
filter_by(deleted=can_read_deleted(context)).\
filter_by(user_id=user_id).\
@ -974,7 +977,8 @@ def instance_get_fixed_address_v6(context, instance_id):
network_ref = network_get_by_instance(context, instance_id)
prefix = network_ref.cidr_v6
mac = instance_ref.mac_address
return utils.to_global_ipv6(prefix, mac)
project_id = instance_ref.project_id
return ipv6.to_global(prefix, mac, project_id)
@require_context
@ -1496,45 +1500,71 @@ def auth_token_create(_context, token):
@require_admin_context
def quota_get(context, project_id, session=None):
def quota_get(context, project_id, resource, session=None):
if not session:
session = get_session()
result = session.query(models.Quota).\
filter_by(project_id=project_id).\
filter_by(deleted=can_read_deleted(context)).\
filter_by(resource=resource).\
filter_by(deleted=False).\
first()
if not result:
raise exception.ProjectQuotaNotFound(project_id=project_id)
return result
@require_admin_context
def quota_create(context, values):
def quota_get_all_by_project(context, project_id):
session = get_session()
result = {'project_id': project_id}
rows = session.query(models.Quota).\
filter_by(project_id=project_id).\
filter_by(deleted=False).\
all()
for row in rows:
result[row.resource] = row.hard_limit
return result
@require_admin_context
def quota_create(context, project_id, resource, limit):
quota_ref = models.Quota()
quota_ref.update(values)
quota_ref.project_id = project_id
quota_ref.resource = resource
quota_ref.hard_limit = limit
quota_ref.save()
return quota_ref
@require_admin_context
def quota_update(context, project_id, values):
def quota_update(context, project_id, resource, limit):
session = get_session()
with session.begin():
quota_ref = quota_get(context, project_id, session=session)
quota_ref.update(values)
quota_ref = quota_get(context, project_id, resource, session=session)
quota_ref.hard_limit = limit
quota_ref.save(session=session)
@require_admin_context
def quota_destroy(context, project_id):
def quota_destroy(context, project_id, resource):
session = get_session()
with session.begin():
quota_ref = quota_get(context, project_id, session=session)
quota_ref = quota_get(context, project_id, resource, session=session)
quota_ref.delete(session=session)
@require_admin_context
def quota_destroy_all_by_project(context, project_id):
session = get_session()
with session.begin():
quotas = session.query(models.Quota).\
filter_by(project_id=project_id).\
filter_by(deleted=False).\
all()
for quota_ref in quotas:
quota_ref.delete(session=session)
###################

View File

@ -0,0 +1,203 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import Boolean, Column, DateTime, Integer
from sqlalchemy import MetaData, String, Table
import datetime
meta = MetaData()
resources = [
'instances',
'cores',
'volumes',
'gigabytes',
'floating_ips',
'metadata_items',
]
def old_style_quotas_table(name):
return Table(name, meta,
Column('id', Integer(), primary_key=True),
Column('created_at', DateTime(),
default=datetime.datetime.utcnow),
Column('updated_at', DateTime(),
onupdate=datetime.datetime.utcnow),
Column('deleted_at', DateTime()),
Column('deleted', Boolean(), default=False),
Column('project_id',
String(length=255, convert_unicode=False,
assert_unicode=None, unicode_error=None,
_warn_on_bytestring=False)),
Column('instances', Integer()),
Column('cores', Integer()),
Column('volumes', Integer()),
Column('gigabytes', Integer()),
Column('floating_ips', Integer()),
Column('metadata_items', Integer()),
)
def new_style_quotas_table(name):
return Table(name, meta,
Column('id', Integer(), primary_key=True),
Column('created_at', DateTime(),
default=datetime.datetime.utcnow),
Column('updated_at', DateTime(),
onupdate=datetime.datetime.utcnow),
Column('deleted_at', DateTime()),
Column('deleted', Boolean(), default=False),
Column('project_id',
String(length=255, convert_unicode=False,
assert_unicode=None, unicode_error=None,
_warn_on_bytestring=False)),
Column('resource',
String(length=255, convert_unicode=False,
assert_unicode=None, unicode_error=None,
_warn_on_bytestring=False),
nullable=False),
Column('hard_limit', Integer(), nullable=True),
)
def existing_quotas_table(migrate_engine):
return Table('quotas', meta, autoload=True, autoload_with=migrate_engine)
def _assert_no_duplicate_project_ids(quotas):
project_ids = set()
message = ('There are multiple active quotas for project "%s" '
'(among others, possibly). '
'Please resolve all ambiguous quotas before '
'reattempting the migration.')
for quota in quotas:
assert quota.project_id not in project_ids, message % quota.project_id
project_ids.add(quota.project_id)
def assert_old_quotas_have_no_active_duplicates(migrate_engine, quotas):
"""Ensure that there are no duplicate non-deleted quota entries."""
select = quotas.select().where(quotas.c.deleted == False)
results = migrate_engine.execute(select)
_assert_no_duplicate_project_ids(list(results))
def assert_new_quotas_have_no_active_duplicates(migrate_engine, quotas):
"""Ensure that there are no duplicate non-deleted quota entries."""
for resource in resources:
select = quotas.select().\
where(quotas.c.deleted == False).\
where(quotas.c.resource == resource)
results = migrate_engine.execute(select)
_assert_no_duplicate_project_ids(list(results))
def convert_forward(migrate_engine, old_quotas, new_quotas):
quotas = list(migrate_engine.execute(old_quotas.select()))
for quota in quotas:
for resource in resources:
hard_limit = getattr(quota, resource)
if hard_limit is None:
continue
insert = new_quotas.insert().values(
created_at=quota.created_at,
updated_at=quota.updated_at,
deleted_at=quota.deleted_at,
deleted=quota.deleted,
project_id=quota.project_id,
resource=resource,
hard_limit=hard_limit)
migrate_engine.execute(insert)
def earliest(date1, date2):
if date1 is None and date2 is None:
return None
if date1 is None:
return date2
if date2 is None:
return date1
if date1 < date2:
return date1
return date2
def latest(date1, date2):
if date1 is None and date2 is None:
return None
if date1 is None:
return date2
if date2 is None:
return date1
if date1 > date2:
return date1
return date2
def convert_backward(migrate_engine, old_quotas, new_quotas):
quotas = {}
for quota in migrate_engine.execute(new_quotas.select()):
if (quota.resource not in resources
or quota.hard_limit is None or quota.deleted):
continue
if not quota.project_id in quotas:
quotas[quota.project_id] = {
'project_id': quota.project_id,
'created_at': quota.created_at,
'updated_at': quota.updated_at,
quota.resource: quota.hard_limit
}
else:
quotas[quota.project_id]['created_at'] = earliest(
quota.created_at, quotas[quota.project_id]['created_at'])
quotas[quota.project_id]['updated_at'] = latest(
quota.updated_at, quotas[quota.project_id]['updated_at'])
quotas[quota.project_id][quota.resource] = quota.hard_limit
for quota in quotas.itervalues():
insert = old_quotas.insert().values(**quota)
migrate_engine.execute(insert)
def upgrade(migrate_engine):
# Upgrade operations go here. Don't create your own engine;
# bind migrate_engine to your metadata
meta.bind = migrate_engine
old_quotas = existing_quotas_table(migrate_engine)
assert_old_quotas_have_no_active_duplicates(migrate_engine, old_quotas)
new_quotas = new_style_quotas_table('quotas_new')
new_quotas.create()
convert_forward(migrate_engine, old_quotas, new_quotas)
old_quotas.drop()
new_quotas.rename('quotas')
def downgrade(migrate_engine):
# Operations to reverse the above upgrade go here.
meta.bind = migrate_engine
new_quotas = existing_quotas_table(migrate_engine)
assert_new_quotas_have_no_active_duplicates(migrate_engine, new_quotas)
old_quotas = old_style_quotas_table('quotas_old')
old_quotas.create()
convert_backward(migrate_engine, old_quotas, new_quotas)
new_quotas.drop()
old_quotas.rename('quotas')

View File

@ -0,0 +1,68 @@
from sqlalchemy import Column, Integer, MetaData, String, Table
from nova import log as logging
meta = MetaData()
def upgrade(migrate_engine):
meta.bind = migrate_engine
instances = Table('instances', meta, autoload=True,
autoload_with=migrate_engine)
types = {}
for instance in migrate_engine.execute(instances.select()):
if instance.instance_type_id is None:
types[instance.id] = None
continue
try:
types[instance.id] = int(instance.instance_type_id)
except ValueError:
logging.warn("Instance %s did not have instance_type_id "
"converted to an integer because its value is %s" %
(instance.id, instance.instance_type_id))
types[instance.id] = None
integer_column = Column('instance_type_id_int', Integer(), nullable=True)
string_column = instances.c.instance_type_id
integer_column.create(instances)
for instance_id, instance_type_id in types.iteritems():
update = instances.update().\
where(instances.c.id == instance_id).\
values(instance_type_id_int=instance_type_id)
migrate_engine.execute(update)
string_column.alter(name='instance_type_id_str')
integer_column.alter(name='instance_type_id')
string_column.drop()
def downgrade(migrate_engine):
meta.bind = migrate_engine
instances = Table('instances', meta, autoload=True,
autoload_with=migrate_engine)
integer_column = instances.c.instance_type_id
string_column = Column('instance_type_id_str',
String(length=255, convert_unicode=False,
assert_unicode=None, unicode_error=None,
_warn_on_bytestring=False),
nullable=True)
types = {}
for instance in migrate_engine.execute(instances.select()):
if instance.instance_type_id is None:
types[instance.id] = None
else:
types[instance.id] = str(instance.instance_type_id)
string_column.create(instances)
for instance_id, instance_type_id in types.iteritems():
update = instances.update().\
where(instances.c.id == instance_id).\
values(instance_type_id_str=instance_type_id)
migrate_engine.execute(update)
integer_column.alter(name='instance_type_id_int')
string_column.alter(name='instance_type_id')
integer_column.drop()

View File

@ -0,0 +1,60 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 OpenStack LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import Column, Integer, MetaData, String, Table
#from nova import log as logging
meta = MetaData()
c_manageent = Column('server_manageent_url',
String(length=255, convert_unicode=False,
assert_unicode=None, unicode_error=None,
_warn_on_bytestring=False),
nullable=True)
c_management = Column('server_management_url',
String(length=255, convert_unicode=False,
assert_unicode=None, unicode_error=None,
_warn_on_bytestring=False),
nullable=True)
def upgrade(migrate_engine):
# Upgrade operations go here. Don't create your own engine;
# bind migrate_engine to your metadata
meta.bind = migrate_engine
tokens = Table('auth_tokens', meta, autoload=True,
autoload_with=migrate_engine)
tokens.create_column(c_management)
migrate_engine.execute(tokens.update()
.values(server_management_url=tokens.c.server_manageent_url))
tokens.c.server_manageent_url.drop()
def downgrade(migrate_engine):
meta.bind = migrate_engine
tokens = Table('auth_tokens', meta, autoload=True,
autoload_with=migrate_engine)
tokens.create_column(c_manageent)
migrate_engine.execute(tokens.update()
.values(server_manageent_url=tokens.c.server_management_url))
tokens.c.server_management_url.drop()

View File

@ -209,7 +209,7 @@ class Instance(BASE, NovaBase):
hostname = Column(String(255))
host = Column(String(255)) # , ForeignKey('hosts.id'))
instance_type_id = Column(String(255))
instance_type_id = Column(Integer)
user_data = Column(Text)
@ -313,18 +313,20 @@ class Volume(BASE, NovaBase):
class Quota(BASE, NovaBase):
"""Represents quota overrides for a project."""
"""Represents a single quota override for a project.
If there is no row for a given project id and resource, then
the default for the deployment is used. If the row is present
but the hard limit is Null, then the resource is unlimited.
"""
__tablename__ = 'quotas'
id = Column(Integer, primary_key=True)
project_id = Column(String(255))
project_id = Column(String(255), index=True)
instances = Column(Integer)
cores = Column(Integer)
volumes = Column(Integer)
gigabytes = Column(Integer)
floating_ips = Column(Integer)
metadata_items = Column(Integer)
resource = Column(String(255))
hard_limit = Column(Integer, nullable=True)
class ExportDevice(BASE, NovaBase):
@ -493,7 +495,7 @@ class AuthToken(BASE, NovaBase):
__tablename__ = 'auth_tokens'
token_hash = Column(String(255), primary_key=True)
user_id = Column(String(255))
server_manageent_url = Column(String(255))
server_management_url = Column(String(255))
storage_url = Column(String(255))
cdn_management_url = Column(String(255))

View File

@ -369,6 +369,9 @@ DEFINE_string('host', socket.gethostname(),
DEFINE_string('node_availability_zone', 'nova',
'availability zone of this node')
DEFINE_string('notification_driver',
'nova.notifier.no_op_notifier',
'Default driver for sending notifications')
DEFINE_list('memcached_servers', None,
'Memcached servers or None for in process cache.')

17
nova/ipv6/__init__.py Normal file
View File

@ -0,0 +1,17 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2011 Openstack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.ipv6.api import *

View File

@ -0,0 +1,45 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""IPv6 address generation with account identifier embedded"""
import hashlib
import netaddr
def to_global(prefix, mac, project_id):
project_hash = netaddr.IPAddress(int(hashlib.sha1(project_id).\
hexdigest()[:8], 16) << 32)
static_num = netaddr.IPAddress(0xff << 24)
try:
mac_suffix = netaddr.EUI(mac).words[3:]
int_addr = int(''.join(['%02x' % i for i in mac_suffix]), 16)
mac_addr = netaddr.IPAddress(int_addr)
maskIP = netaddr.IPNetwork(prefix).ip
return (project_hash ^ static_num ^ mac_addr | maskIP).format()
except TypeError:
raise TypeError(_('Bad mac for to_global_ipv6: %s') % mac)
def to_mac(ipv6_address):
address = netaddr.IPAddress(ipv6_address)
mask1 = netaddr.IPAddress('::ff:ffff')
mac = netaddr.EUI(int(address & mask1)).words
return ':'.join(['02', '16', '3e'] + ['%02x' % i for i in mac[3:6]])

41
nova/ipv6/api.py Normal file
View File

@ -0,0 +1,41 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2011 Openstack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import flags
from nova import utils
FLAGS = flags.FLAGS
flags.DEFINE_string('ipv6_backend',
'rfc2462',
'Backend to use for IPv6 generation')
def reset_backend():
global IMPL
IMPL = utils.LazyPluggable(FLAGS['ipv6_backend'],
rfc2462='nova.ipv6.rfc2462',
account_identifier='nova.ipv6.account_identifier')
def to_global(prefix, mac, project_id):
return IMPL.to_global(prefix, mac, project_id)
def to_mac(ipv6_address):
return IMPL.to_mac(ipv6_address)
reset_backend()

42
nova/ipv6/rfc2462.py Normal file
View File

@ -0,0 +1,42 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""RFC2462 style IPv6 address generation"""
import netaddr
def to_global(prefix, mac, project_id):
try:
mac64 = netaddr.EUI(mac).eui64().words
int_addr = int(''.join(['%02x' % i for i in mac64]), 16)
mac64_addr = netaddr.IPAddress(int_addr)
maskIP = netaddr.IPNetwork(prefix).ip
return (mac64_addr ^ netaddr.IPAddress('::0200:0:0:0') | maskIP).\
format()
except TypeError:
raise TypeError(_('Bad mac for to_global_ipv6: %s') % mac)
def to_mac(ipv6_address):
address = netaddr.IPAddress(ipv6_address)
mask1 = netaddr.IPAddress('::ffff:ffff:ffff:ffff')
mask2 = netaddr.IPAddress('::0200:0:0:0')
mac64 = netaddr.EUI(int(address & mask1 ^ mask2)).words
return ':'.join(['%02x' % i for i in mac64[0:3] + mac64[5:8]])

View File

@ -16,9 +16,7 @@
# License for the specific language governing permissions and limitations
# under the License.
"""
Handles all requests relating to instances (guest vms).
"""
"""Handles all requests relating to instances (guest vms)."""
from nova import db
from nova import exception
@ -28,6 +26,7 @@ from nova import quota
from nova import rpc
from nova.db import base
FLAGS = flags.FLAGS
LOG = logging.getLogger('nova.network')
@ -37,19 +36,19 @@ class API(base.Base):
def allocate_floating_ip(self, context):
if quota.allowed_floating_ips(context, 1) < 1:
LOG.warn(_("Quota exceeeded for %s, tried to allocate "
"address"),
context.project_id)
raise quota.QuotaError(_("Address quota exceeded. You cannot "
"allocate any more addresses"))
LOG.warn(_('Quota exceeeded for %s, tried to allocate '
'address'),
context.project_id)
raise quota.QuotaError(_('Address quota exceeded. You cannot '
'allocate any more addresses'))
# NOTE(vish): We don't know which network host should get the ip
# when we allocate, so just send it to any one. This
# will probably need to move into a network supervisor
# at some point.
return rpc.call(context,
FLAGS.network_topic,
{"method": "allocate_floating_ip",
"args": {"project_id": context.project_id}})
{'method': 'allocate_floating_ip',
'args': {'project_id': context.project_id}})
def release_floating_ip(self, context, address,
affect_auto_assigned=False):
@ -62,8 +61,8 @@ class API(base.Base):
# at some point.
rpc.cast(context,
FLAGS.network_topic,
{"method": "deallocate_floating_ip",
"args": {"floating_address": floating_ip['address']}})
{'method': 'deallocate_floating_ip',
'args': {'floating_address': floating_ip['address']}})
def associate_floating_ip(self, context, floating_ip, fixed_ip,
affect_auto_assigned=False):
@ -74,17 +73,17 @@ class API(base.Base):
return
# Check if the floating ip address is allocated
if floating_ip['project_id'] is None:
raise exception.ApiError(_("Address (%s) is not allocated") %
raise exception.ApiError(_('Address (%s) is not allocated') %
floating_ip['address'])
# Check if the floating ip address is allocated to the same project
if floating_ip['project_id'] != context.project_id:
LOG.warn(_("Address (%(address)s) is not allocated to your "
"project (%(project)s)"),
LOG.warn(_('Address (%(address)s) is not allocated to your '
'project (%(project)s)'),
{'address': floating_ip['address'],
'project': context.project_id})
raise exception.ApiError(_("Address (%(address)s) is not "
"allocated to your project"
"(%(project)s)") %
raise exception.ApiError(_('Address (%(address)s) is not '
'allocated to your project'
'(%(project)s)') %
{'address': floating_ip['address'],
'project': context.project_id})
# NOTE(vish): Perhaps we should just pass this on to compute and
@ -92,9 +91,9 @@ class API(base.Base):
host = fixed_ip['network']['host']
rpc.cast(context,
self.db.queue_get_for(context, FLAGS.network_topic, host),
{"method": "associate_floating_ip",
"args": {"floating_address": floating_ip['address'],
"fixed_address": fixed_ip['address']}})
{'method': 'associate_floating_ip',
'args': {'floating_address': floating_ip['address'],
'fixed_address': fixed_ip['address']}})
def disassociate_floating_ip(self, context, address,
affect_auto_assigned=False):
@ -108,5 +107,5 @@ class API(base.Base):
host = floating_ip['fixed_ip']['network']['host']
rpc.cast(context,
self.db.queue_get_for(context, FLAGS.network_topic, host),
{"method": "disassociate_floating_ip",
"args": {"floating_address": floating_ip['address']}})
{'method': 'disassociate_floating_ip',
'args': {'floating_address': floating_ip['address']}})

View File

@ -15,13 +15,12 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Implements vlans, bridges, and iptables rules using linux utilities.
"""
"""Implements vlans, bridges, and iptables rules using linux utilities."""
import calendar
import inspect
import os
import calendar
from nova import db
from nova import exception
@ -29,12 +28,13 @@ from nova import flags
from nova import log as logging
from nova import utils
LOG = logging.getLogger("nova.linux_net")
def _bin_file(script):
"""Return the absolute path to scipt in the bin directory"""
return os.path.abspath(os.path.join(__file__, "../../../bin", script))
"""Return the absolute path to scipt in the bin directory."""
return os.path.abspath(os.path.join(__file__, '../../../bin', script))
FLAGS = flags.FLAGS
@ -66,11 +66,13 @@ binary_name = os.path.basename(inspect.stack()[-1][1])
class IptablesRule(object):
"""An iptables rule
"""An iptables rule.
You shouldn't need to use this class directly, it's only used by
IptablesManager
IptablesManager.
"""
def __init__(self, chain, rule, wrap=True, top=False):
self.chain = chain
self.rule = rule
@ -95,7 +97,7 @@ class IptablesRule(object):
class IptablesTable(object):
"""An iptables table"""
"""An iptables table."""
def __init__(self):
self.rules = []
@ -103,15 +105,16 @@ class IptablesTable(object):
self.unwrapped_chains = set()
def add_chain(self, name, wrap=True):
"""Adds a named chain to the table
"""Adds a named chain to the table.
The chain name is wrapped to be unique for the component creating
it, so different components of Nova can safely create identically
named chains without interfering with one another.
At the moment, its wrapped name is <binary name>-<chain name>,
so if nova-compute creates a chain named "OUTPUT", it'll actually
end up named "nova-compute-OUTPUT".
so if nova-compute creates a chain named 'OUTPUT', it'll actually
end up named 'nova-compute-OUTPUT'.
"""
if wrap:
self.chains.add(name)
@ -119,12 +122,13 @@ class IptablesTable(object):
self.unwrapped_chains.add(name)
def remove_chain(self, name, wrap=True):
"""Remove named chain
"""Remove named chain.
This removal "cascades". All rule in the chain are removed, as are
all rules in other chains that jump to it.
If the chain is not found, this is merely logged.
"""
if wrap:
chain_set = self.chains
@ -132,7 +136,7 @@ class IptablesTable(object):
chain_set = self.unwrapped_chains
if name not in chain_set:
LOG.debug(_("Attempted to remove chain %s which doesn't exist"),
LOG.debug(_('Attempted to remove chain %s which does not exist'),
name)
return
@ -147,17 +151,18 @@ class IptablesTable(object):
self.rules = filter(lambda r: jump_snippet not in r.rule, self.rules)
def add_rule(self, chain, rule, wrap=True, top=False):
"""Add a rule to the table
"""Add a rule to the table.
This is just like what you'd feed to iptables, just without
the "-A <chain name>" bit at the start.
the '-A <chain name>' bit at the start.
However, if you need to jump to one of your wrapped chains,
prepend its name with a '$' which will ensure the wrapping
is applied correctly.
"""
if wrap and chain not in self.chains:
raise ValueError(_("Unknown chain: %r") % chain)
raise ValueError(_('Unknown chain: %r') % chain)
if '$' in rule:
rule = ' '.join(map(self._wrap_target_chain, rule.split(' ')))
@ -170,23 +175,24 @@ class IptablesTable(object):
return s
def remove_rule(self, chain, rule, wrap=True, top=False):
"""Remove a rule from a chain
"""Remove a rule from a chain.
Note: The rule must be exactly identical to the one that was added.
You cannot switch arguments around like you can with the iptables
CLI tool.
"""
try:
self.rules.remove(IptablesRule(chain, rule, wrap, top))
except ValueError:
LOG.debug(_("Tried to remove rule that wasn't there:"
" %(chain)r %(rule)r %(wrap)r %(top)r"),
LOG.debug(_('Tried to remove rule that was not there:'
' %(chain)r %(rule)r %(wrap)r %(top)r'),
{'chain': chain, 'rule': rule,
'top': top, 'wrap': wrap})
class IptablesManager(object):
"""Wrapper for iptables
"""Wrapper for iptables.
See IptablesTable for some usage docs
@ -205,7 +211,9 @@ class IptablesManager(object):
For ipv4, the builtin PREROUTING, OUTPUT, and POSTROUTING nat chains are
wrapped in the same was as the builtin filter chains. Additionally, there's
a snat chain that is applied after the POSTROUTING chain.
"""
def __init__(self, execute=None):
if not execute:
self.execute = _execute
@ -267,11 +275,12 @@ class IptablesManager(object):
@utils.synchronized('iptables', external=True)
def apply(self):
"""Apply the current in-memory set of iptables rules
"""Apply the current in-memory set of iptables rules.
This will blow away any rules left over from previous runs of the
same component of Nova, and replace them with our current set of
rules. This happens atomically, thanks to iptables-restore.
"""
s = [('iptables', self.ipv4)]
if FLAGS.use_ipv6:
@ -348,63 +357,63 @@ class IptablesManager(object):
def metadata_forward():
"""Create forwarding rule for metadata"""
iptables_manager.ipv4['nat'].add_rule("PREROUTING",
"-s 0.0.0.0/0 -d 169.254.169.254/32 "
"-p tcp -m tcp --dport 80 -j DNAT "
"--to-destination %s:%s" % \
"""Create forwarding rule for metadata."""
iptables_manager.ipv4['nat'].add_rule('PREROUTING',
'-s 0.0.0.0/0 -d 169.254.169.254/32 '
'-p tcp -m tcp --dport 80 -j DNAT '
'--to-destination %s:%s' % \
(FLAGS.ec2_dmz_host, FLAGS.ec2_port))
iptables_manager.apply()
def init_host():
"""Basic networking setup goes here"""
"""Basic networking setup goes here."""
# NOTE(devcamcar): Cloud public SNAT entries and the default
# SNAT rule for outbound traffic.
iptables_manager.ipv4['nat'].add_rule("snat",
"-s %s -j SNAT --to-source %s" % \
iptables_manager.ipv4['nat'].add_rule('snat',
'-s %s -j SNAT --to-source %s' % \
(FLAGS.fixed_range,
FLAGS.routing_source_ip))
iptables_manager.ipv4['nat'].add_rule("POSTROUTING",
"-s %s -d %s -j ACCEPT" % \
iptables_manager.ipv4['nat'].add_rule('POSTROUTING',
'-s %s -d %s -j ACCEPT' % \
(FLAGS.fixed_range, FLAGS.dmz_cidr))
iptables_manager.ipv4['nat'].add_rule("POSTROUTING",
"-s %(range)s -d %(range)s "
"-j ACCEPT" % \
iptables_manager.ipv4['nat'].add_rule('POSTROUTING',
'-s %(range)s -d %(range)s '
'-j ACCEPT' % \
{'range': FLAGS.fixed_range})
iptables_manager.apply()
def bind_floating_ip(floating_ip, check_exit_code=True):
"""Bind ip to public interface"""
"""Bind ip to public interface."""
_execute('sudo', 'ip', 'addr', 'add', floating_ip,
'dev', FLAGS.public_interface,
check_exit_code=check_exit_code)
def unbind_floating_ip(floating_ip):
"""Unbind a public ip from public interface"""
"""Unbind a public ip from public interface."""
_execute('sudo', 'ip', 'addr', 'del', floating_ip,
'dev', FLAGS.public_interface)
def ensure_metadata_ip():
"""Sets up local metadata ip"""
"""Sets up local metadata ip."""
_execute('sudo', 'ip', 'addr', 'add', '169.254.169.254/32',
'scope', 'link', 'dev', 'lo', check_exit_code=False)
def ensure_vlan_forward(public_ip, port, private_ip):
"""Sets up forwarding rules for vlan"""
iptables_manager.ipv4['filter'].add_rule("FORWARD",
"-d %s -p udp "
"--dport 1194 "
"-j ACCEPT" % private_ip)
iptables_manager.ipv4['nat'].add_rule("PREROUTING",
"-d %s -p udp "
"--dport %s -j DNAT --to %s:1194" %
"""Sets up forwarding rules for vlan."""
iptables_manager.ipv4['filter'].add_rule('FORWARD',
'-d %s -p udp '
'--dport 1194 '
'-j ACCEPT' % private_ip)
iptables_manager.ipv4['nat'].add_rule('PREROUTING',
'-d %s -p udp '
'--dport %s -j DNAT --to %s:1194' %
(public_ip, port, private_ip))
iptables_manager.ipv4['nat'].add_rule("OUTPUT",
"-d %s -p udp "
@ -414,37 +423,37 @@ def ensure_vlan_forward(public_ip, port, private_ip):
def ensure_floating_forward(floating_ip, fixed_ip):
"""Ensure floating ip forwarding rule"""
"""Ensure floating ip forwarding rule."""
for chain, rule in floating_forward_rules(floating_ip, fixed_ip):
iptables_manager.ipv4['nat'].add_rule(chain, rule)
iptables_manager.apply()
def remove_floating_forward(floating_ip, fixed_ip):
"""Remove forwarding for floating ip"""
"""Remove forwarding for floating ip."""
for chain, rule in floating_forward_rules(floating_ip, fixed_ip):
iptables_manager.ipv4['nat'].remove_rule(chain, rule)
iptables_manager.apply()
def floating_forward_rules(floating_ip, fixed_ip):
return [("PREROUTING", "-d %s -j DNAT --to %s" % (floating_ip, fixed_ip)),
("OUTPUT", "-d %s -j DNAT --to %s" % (floating_ip, fixed_ip)),
("floating-snat",
"-s %s -j SNAT --to %s" % (fixed_ip, floating_ip))]
return [('PREROUTING', '-d %s -j DNAT --to %s' % (floating_ip, fixed_ip)),
('OUTPUT', '-d %s -j DNAT --to %s' % (floating_ip, fixed_ip)),
('floating-snat',
'-s %s -j SNAT --to %s' % (fixed_ip, floating_ip))]
def ensure_vlan_bridge(vlan_num, bridge, net_attrs=None):
"""Create a vlan and bridge unless they already exist"""
"""Create a vlan and bridge unless they already exist."""
interface = ensure_vlan(vlan_num)
ensure_bridge(bridge, interface, net_attrs)
def ensure_vlan(vlan_num):
"""Create a vlan unless it already exists"""
interface = "vlan%s" % vlan_num
"""Create a vlan unless it already exists."""
interface = 'vlan%s' % vlan_num
if not _device_exists(interface):
LOG.debug(_("Starting VLAN inteface %s"), interface)
LOG.debug(_('Starting VLAN inteface %s'), interface)
_execute('sudo', 'vconfig', 'set_name_type', 'VLAN_PLUS_VID_NO_PAD')
_execute('sudo', 'vconfig', 'add', FLAGS.vlan_interface, vlan_num)
_execute('sudo', 'ip', 'link', 'set', interface, 'up')
@ -464,12 +473,13 @@ def ensure_bridge(bridge, interface, net_attrs=None):
The code will attempt to move any ips that already exist on the interface
onto the bridge and reset the default gateway if necessary.
"""
if not _device_exists(bridge):
LOG.debug(_("Starting Bridge interface for %s"), interface)
LOG.debug(_('Starting Bridge interface for %s'), interface)
_execute('sudo', 'brctl', 'addbr', bridge)
_execute('sudo', 'brctl', 'setfd', bridge, 0)
# _execute("sudo brctl setageing %s 10" % bridge)
# _execute('sudo brctl setageing %s 10' % bridge)
_execute('sudo', 'brctl', 'stp', bridge, 'off')
_execute('sudo', 'ip', 'link', 'set', bridge, 'up')
if net_attrs:
@ -477,15 +487,15 @@ def ensure_bridge(bridge, interface, net_attrs=None):
# bridge for it to respond to reqests properly
suffix = net_attrs['cidr'].rpartition('/')[2]
out, err = _execute('sudo', 'ip', 'addr', 'add',
"%s/%s" %
'%s/%s' %
(net_attrs['gateway'], suffix),
'brd',
net_attrs['broadcast'],
'dev',
bridge,
check_exit_code=False)
if err and err != "RTNETLINK answers: File exists\n":
raise exception.Error("Failed to add ip: %s" % err)
if err and err != 'RTNETLINK answers: File exists\n':
raise exception.Error('Failed to add ip: %s' % err)
if(FLAGS.use_ipv6):
_execute('sudo', 'ip', '-f', 'inet6', 'addr',
'change', net_attrs['cidr_v6'],
@ -501,17 +511,17 @@ def ensure_bridge(bridge, interface, net_attrs=None):
# interface, so we move any ips to the bridge
gateway = None
out, err = _execute('sudo', 'route', '-n')
for line in out.split("\n"):
for line in out.split('\n'):
fields = line.split()
if fields and fields[0] == "0.0.0.0" and fields[-1] == interface:
if fields and fields[0] == '0.0.0.0' and fields[-1] == interface:
gateway = fields[1]
_execute('sudo', 'route', 'del', 'default', 'gw', gateway,
'dev', interface, check_exit_code=False)
out, err = _execute('sudo', 'ip', 'addr', 'show', 'dev', interface,
'scope', 'global')
for line in out.split("\n"):
for line in out.split('\n'):
fields = line.split()
if fields and fields[0] == "inet":
if fields and fields[0] == 'inet':
params = fields[1:-1]
_execute(*_ip_bridge_cmd('del', params, fields[-1]))
_execute(*_ip_bridge_cmd('add', params, bridge))
@ -522,18 +532,18 @@ def ensure_bridge(bridge, interface, net_attrs=None):
if (err and err != "device %s is already a member of a bridge; can't "
"enslave it to bridge %s.\n" % (interface, bridge)):
raise exception.Error("Failed to add interface: %s" % err)
raise exception.Error('Failed to add interface: %s' % err)
iptables_manager.ipv4['filter'].add_rule("FORWARD",
"--in-interface %s -j ACCEPT" % \
iptables_manager.ipv4['filter'].add_rule('FORWARD',
'--in-interface %s -j ACCEPT' % \
bridge)
iptables_manager.ipv4['filter'].add_rule("FORWARD",
"--out-interface %s -j ACCEPT" % \
iptables_manager.ipv4['filter'].add_rule('FORWARD',
'--out-interface %s -j ACCEPT' % \
bridge)
def get_dhcp_leases(context, network_id):
"""Return a network's hosts config in dnsmasq leasefile format"""
"""Return a network's hosts config in dnsmasq leasefile format."""
hosts = []
for fixed_ip_ref in db.network_get_associated_fixed_ips(context,
network_id):
@ -542,7 +552,7 @@ def get_dhcp_leases(context, network_id):
def get_dhcp_hosts(context, network_id):
"""Get a string containing a network's hosts config in dhcp-host format"""
"""Get network's hosts config in dhcp-host format."""
hosts = []
for fixed_ip_ref in db.network_get_associated_fixed_ips(context,
network_id):
@ -555,10 +565,11 @@ def get_dhcp_hosts(context, network_id):
# aren't reloaded.
@utils.synchronized('dnsmasq_start')
def update_dhcp(context, network_id):
"""(Re)starts a dnsmasq server for a given network
"""(Re)starts a dnsmasq server for a given network.
If a dnsmasq instance is already running then send a HUP
signal causing it to reload, otherwise spawn a new instance.
if a dnsmasq instance is already running then send a HUP
signal causing it to reload, otherwise spawn a new instance
"""
network_ref = db.network_get(context, network_id)
@ -573,16 +584,16 @@ def update_dhcp(context, network_id):
# if dnsmasq is already running, then tell it to reload
if pid:
out, _err = _execute('cat', "/proc/%d/cmdline" % pid,
out, _err = _execute('cat', '/proc/%d/cmdline' % pid,
check_exit_code=False)
if conffile in out:
try:
_execute('sudo', 'kill', '-HUP', pid)
return
except Exception as exc: # pylint: disable=W0703
LOG.debug(_("Hupping dnsmasq threw %s"), exc)
LOG.debug(_('Hupping dnsmasq threw %s'), exc)
else:
LOG.debug(_("Pid %d is stale, relaunching dnsmasq"), pid)
LOG.debug(_('Pid %d is stale, relaunching dnsmasq'), pid)
# FLAGFILE and DNSMASQ_INTERFACE in env
env = {'FLAGFILE': FLAGS.dhcpbridge_flagfile,
@ -625,18 +636,18 @@ interface %s
try:
_execute('sudo', 'kill', pid)
except Exception as exc: # pylint: disable=W0703
LOG.debug(_("killing radvd threw %s"), exc)
LOG.debug(_('killing radvd threw %s'), exc)
else:
LOG.debug(_("Pid %d is stale, relaunching radvd"), pid)
LOG.debug(_('Pid %d is stale, relaunching radvd'), pid)
command = _ra_cmd(network_ref)
_execute(*command)
db.network_update(context, network_id,
{"gateway_v6":
{'gateway_v6':
utils.get_my_linklocal(network_ref['bridge'])})
def _host_lease(fixed_ip_ref):
"""Return a host string for an address in leasefile format"""
"""Return a host string for an address in leasefile format."""
instance_ref = fixed_ip_ref['instance']
if instance_ref['updated_at']:
timestamp = instance_ref['updated_at']
@ -645,39 +656,39 @@ def _host_lease(fixed_ip_ref):
seconds_since_epoch = calendar.timegm(timestamp.utctimetuple())
return "%d %s %s %s *" % (seconds_since_epoch + FLAGS.dhcp_lease_time,
return '%d %s %s %s *' % (seconds_since_epoch + FLAGS.dhcp_lease_time,
instance_ref['mac_address'],
fixed_ip_ref['address'],
instance_ref['hostname'] or '*')
def _host_dhcp(fixed_ip_ref):
"""Return a host string for an address in dhcp-host format"""
"""Return a host string for an address in dhcp-host format."""
instance_ref = fixed_ip_ref['instance']
return "%s,%s.%s,%s" % (instance_ref['mac_address'],
return '%s,%s.%s,%s' % (instance_ref['mac_address'],
instance_ref['hostname'],
FLAGS.dhcp_domain,
fixed_ip_ref['address'])
def _execute(*cmd, **kwargs):
"""Wrapper around utils._execute for fake_network"""
"""Wrapper around utils._execute for fake_network."""
if FLAGS.fake_network:
LOG.debug("FAKE NET: %s", " ".join(map(str, cmd)))
return "fake", 0
LOG.debug('FAKE NET: %s', ' '.join(map(str, cmd)))
return 'fake', 0
else:
return utils.execute(*cmd, **kwargs)
def _device_exists(device):
"""Check if ethernet device exists"""
"""Check if ethernet device exists."""
(_out, err) = _execute('ip', 'link', 'show', 'dev', device,
check_exit_code=False)
return not err
def _dnsmasq_cmd(net):
"""Builds dnsmasq command"""
"""Builds dnsmasq command."""
cmd = ['sudo', '-E', 'dnsmasq',
'--strict-order',
'--bind-interfaces',
@ -696,7 +707,7 @@ def _dnsmasq_cmd(net):
def _ra_cmd(net):
"""Builds radvd command"""
"""Builds radvd command."""
cmd = ['sudo', '-E', 'radvd',
# '-u', 'nobody',
'-C', '%s' % _ra_file(net['bridge'], 'conf'),
@ -705,44 +716,43 @@ def _ra_cmd(net):
def _stop_dnsmasq(network):
"""Stops the dnsmasq instance for a given network"""
"""Stops the dnsmasq instance for a given network."""
pid = _dnsmasq_pid_for(network)
if pid:
try:
_execute('sudo', 'kill', '-TERM', pid)
except Exception as exc: # pylint: disable=W0703
LOG.debug(_("Killing dnsmasq threw %s"), exc)
LOG.debug(_('Killing dnsmasq threw %s'), exc)
def _dhcp_file(bridge, kind):
"""Return path to a pid, leases or conf file for a bridge"""
"""Return path to a pid, leases or conf file for a bridge."""
if not os.path.exists(FLAGS.networks_path):
os.makedirs(FLAGS.networks_path)
return os.path.abspath("%s/nova-%s.%s" % (FLAGS.networks_path,
return os.path.abspath('%s/nova-%s.%s' % (FLAGS.networks_path,
bridge,
kind))
def _ra_file(bridge, kind):
"""Return path to a pid or conf file for a bridge"""
"""Return path to a pid or conf file for a bridge."""
if not os.path.exists(FLAGS.networks_path):
os.makedirs(FLAGS.networks_path)
return os.path.abspath("%s/nova-ra-%s.%s" % (FLAGS.networks_path,
return os.path.abspath('%s/nova-ra-%s.%s' % (FLAGS.networks_path,
bridge,
kind))
def _dnsmasq_pid_for(bridge):
"""Returns the pid for prior dnsmasq instance for a bridge
"""Returns the pid for prior dnsmasq instance for a bridge.
Returns None if no pid file exists
Returns None if no pid file exists.
If machine has rebooted pid might be incorrect (caller should check).
If machine has rebooted pid might be incorrect (caller should check)
"""
pid_file = _dhcp_file(bridge, 'pid')
if os.path.exists(pid_file):
@ -751,13 +761,13 @@ def _dnsmasq_pid_for(bridge):
def _ra_pid_for(bridge):
"""Returns the pid for prior radvd instance for a bridge
"""Returns the pid for prior radvd instance for a bridge.
Returns None if no pid file exists
Returns None if no pid file exists.
If machine has rebooted pid might be incorrect (caller should check).
If machine has rebooted pid might be incorrect (caller should check)
"""
pid_file = _ra_file(bridge, 'pid')
if os.path.exists(pid_file):
@ -766,8 +776,7 @@ def _ra_pid_for(bridge):
def _ip_bridge_cmd(action, params, device):
"""Build commands to add/del ips to bridges/devices"""
"""Build commands to add/del ips to bridges/devices."""
cmd = ['sudo', 'ip', 'addr', action]
cmd.extend(params)
cmd.extend(['dev', device])

View File

@ -16,8 +16,7 @@
# License for the specific language governing permissions and limitations
# under the License.
"""
Network Hosts are responsible for allocating ips and setting up network.
"""Network Hosts are responsible for allocating ips and setting up network.
There are multiple backend drivers that handle specific types of networking
topologies. All of the network commands are issued to a subclass of
@ -61,6 +60,8 @@ from nova import rpc
LOG = logging.getLogger("nova.network.manager")
FLAGS = flags.FLAGS
flags.DEFINE_string('flat_network_bridge', 'br100',
'Bridge for simple network instances')
@ -111,7 +112,9 @@ class NetworkManager(manager.SchedulerDependentManager):
"""Implements common network manager functionality.
This class must be subclassed to support specific topologies.
"""
timeout_fixed_ips = True
def __init__(self, network_driver=None, *args, **kwargs):
@ -122,9 +125,7 @@ class NetworkManager(manager.SchedulerDependentManager):
*args, **kwargs)
def init_host(self):
"""Do any initialization that needs to be run if this is a
standalone service.
"""
"""Do any initialization for a standalone service."""
self.driver.init_host()
self.driver.ensure_metadata_ip()
# Set up networking for the projects for which we're already
@ -154,11 +155,11 @@ class NetworkManager(manager.SchedulerDependentManager):
self.host,
time)
if num:
LOG.debug(_("Dissassociated %s stale fixed ip(s)"), num)
LOG.debug(_('Dissassociated %s stale fixed ip(s)'), num)
def set_network_host(self, context, network_id):
"""Safely sets the host of the network."""
LOG.debug(_("setting network host"), context=context)
LOG.debug(_('setting network host'), context=context)
host = self.db.network_set_host(context,
network_id,
self.host)
@ -224,39 +225,39 @@ class NetworkManager(manager.SchedulerDependentManager):
def lease_fixed_ip(self, context, mac, address):
"""Called by dhcp-bridge when ip is leased."""
LOG.debug(_("Leasing IP %s"), address, context=context)
LOG.debug(_('Leasing IP %s'), address, context=context)
fixed_ip_ref = self.db.fixed_ip_get_by_address(context, address)
instance_ref = fixed_ip_ref['instance']
if not instance_ref:
raise exception.Error(_("IP %s leased that isn't associated") %
raise exception.Error(_('IP %s leased that is not associated') %
address)
if instance_ref['mac_address'] != mac:
inst_addr = instance_ref['mac_address']
raise exception.Error(_("IP %(address)s leased to bad"
" mac %(inst_addr)s vs %(mac)s") % locals())
raise exception.Error(_('IP %(address)s leased to bad mac'
' %(inst_addr)s vs %(mac)s') % locals())
now = datetime.datetime.utcnow()
self.db.fixed_ip_update(context,
fixed_ip_ref['address'],
{'leased': True,
'updated_at': now})
if not fixed_ip_ref['allocated']:
LOG.warn(_("IP %s leased that was already deallocated"), address,
LOG.warn(_('IP %s leased that was already deallocated'), address,
context=context)
def release_fixed_ip(self, context, mac, address):
"""Called by dhcp-bridge when ip is released."""
LOG.debug(_("Releasing IP %s"), address, context=context)
LOG.debug(_('Releasing IP %s'), address, context=context)
fixed_ip_ref = self.db.fixed_ip_get_by_address(context, address)
instance_ref = fixed_ip_ref['instance']
if not instance_ref:
raise exception.Error(_("IP %s released that isn't associated") %
raise exception.Error(_('IP %s released that is not associated') %
address)
if instance_ref['mac_address'] != mac:
inst_addr = instance_ref['mac_address']
raise exception.Error(_("IP %(address)s released from"
" bad mac %(inst_addr)s vs %(mac)s") % locals())
raise exception.Error(_('IP %(address)s released from bad mac'
' %(inst_addr)s vs %(mac)s') % locals())
if not fixed_ip_ref['leased']:
LOG.warn(_("IP %s released that was not leased"), address,
LOG.warn(_('IP %s released that was not leased'), address,
context=context)
self.db.fixed_ip_update(context,
fixed_ip_ref['address'],
@ -286,8 +287,8 @@ class NetworkManager(manager.SchedulerDependentManager):
return self.set_network_host(context, network_ref['id'])
host = rpc.call(context,
FLAGS.network_topic,
{"method": "set_network_host",
"args": {"network_id": network_ref['id']}})
{'method': 'set_network_host',
'args': {'network_id': network_ref['id']}})
return host
def create_networks(self, context, cidr, num_networks, network_size,
@ -302,7 +303,7 @@ class NetworkManager(manager.SchedulerDependentManager):
start = index * network_size
start_v6 = index * network_size_v6
significant_bits = 32 - int(math.log(network_size, 2))
cidr = "%s/%s" % (fixed_net[start], significant_bits)
cidr = '%s/%s' % (fixed_net[start], significant_bits)
project_net = IPy.IP(cidr)
net = {}
net['bridge'] = FLAGS.flat_network_bridge
@ -313,13 +314,13 @@ class NetworkManager(manager.SchedulerDependentManager):
net['broadcast'] = str(project_net.broadcast())
net['dhcp_start'] = str(project_net[2])
if num_networks > 1:
net['label'] = "%s_%d" % (label, count)
net['label'] = '%s_%d' % (label, count)
else:
net['label'] = label
count += 1
if(FLAGS.use_ipv6):
cidr_v6 = "%s/%s" % (fixed_net_v6[start_v6],
cidr_v6 = '%s/%s' % (fixed_net_v6[start_v6],
significant_bits_v6)
net['cidr_v6'] = cidr_v6
project_net_v6 = IPy.IP(cidr_v6)
@ -386,13 +387,13 @@ class FlatManager(NetworkManager):
Metadata forwarding must be handled by the gateway, and since nova does
not do any setup in this mode, it must be done manually. Requests to
169.254.169.254 port 80 will need to be forwarded to the api server.
"""
timeout_fixed_ips = False
def init_host(self):
"""Do any initialization that needs to be run if this is a
standalone service.
"""
"""Do any initialization for a standalone service."""
#Fix for bug 723298 - do not call init_host on superclass
#Following code has been copied for NetworkManager.init_host
ctxt = context.get_admin_context()
@ -433,12 +434,11 @@ class FlatDHCPManager(NetworkManager):
FlatDHCPManager will start up one dhcp server to give out addresses.
It never injects network settings into the guest. Otherwise it behaves
like FlatDHCPManager.
"""
def init_host(self):
"""Do any initialization that needs to be run if this is a
standalone service.
"""
"""Do any initialization for a standalone service."""
super(FlatDHCPManager, self).init_host()
self.driver.metadata_forward()
@ -490,12 +490,11 @@ class VlanManager(NetworkManager):
A dhcp server is run for each subnet, so each project will have its own.
For this mode to be useful, each project will need a vpn to access the
instances in its subnet.
"""
def init_host(self):
"""Do any initialization that needs to be run if this is a
standalone service.
"""
"""Do any initialization for a standalone service."""
super(VlanManager, self).init_host()
self.driver.metadata_forward()
@ -566,7 +565,7 @@ class VlanManager(NetworkManager):
net['vlan'] = vlan
net['bridge'] = 'br%s' % vlan
if(FLAGS.use_ipv6):
cidr_v6 = "%s/%s" % (fixed_net_v6[start_v6],
cidr_v6 = '%s/%s' % (fixed_net_v6[start_v6],
significant_bits_v6)
net['cidr_v6'] = cidr_v6
@ -600,8 +599,8 @@ class VlanManager(NetworkManager):
return self.set_network_host(context, network_ref['id'])
host = rpc.call(context,
FLAGS.network_topic,
{"method": "set_network_host",
"args": {"network_id": network_ref['id']}})
{'method': 'set_network_host',
'args': {'network_id': network_ref['id']}})
return host

View File

@ -15,9 +15,7 @@
# License for the specific language governing permissions and limitations
# under the License.
"""
Implements vlans for vmwareapi.
"""
"""Implements vlans for vmwareapi."""
from nova import db
from nova import exception
@ -27,8 +25,10 @@ from nova import utils
from nova.virt.vmwareapi_conn import VMWareAPISession
from nova.virt.vmwareapi import network_utils
LOG = logging.getLogger("nova.network.vmwareapi_net")
FLAGS = flags.FLAGS
flags.DEFINE_string('vlan_interface', 'vmnic0',
'Physical network adapter name in VMware ESX host for '
@ -42,10 +42,10 @@ def ensure_vlan_bridge(vlan_num, bridge, net_attrs=None):
host_username = FLAGS.vmwareapi_host_username
host_password = FLAGS.vmwareapi_host_password
if not host_ip or host_username is None or host_password is None:
raise Exception(_("Must specify vmwareapi_host_ip,"
"vmwareapi_host_username "
"and vmwareapi_host_password to use"
"connection_type=vmwareapi"))
raise Exception(_('Must specify vmwareapi_host_ip, '
'vmwareapi_host_username '
'and vmwareapi_host_password to use '
'connection_type=vmwareapi'))
session = VMWareAPISession(host_ip, host_username, host_password,
FLAGS.vmwareapi_api_retry_count)
vlan_interface = FLAGS.vlan_interface

View File

@ -15,9 +15,7 @@
# License for the specific language governing permissions and limitations
# under the License.
"""
Implements vlans, bridges, and iptables rules using linux utilities.
"""
"""Implements vlans, bridges, and iptables rules using linux utilities."""
import os
@ -26,22 +24,24 @@ from nova import exception
from nova import flags
from nova import log as logging
from nova import utils
from nova.virt.xenapi_conn import XenAPISession
from nova.virt import xenapi_conn
from nova.virt.xenapi import network_utils
LOG = logging.getLogger("nova.xenapi_net")
FLAGS = flags.FLAGS
def ensure_vlan_bridge(vlan_num, bridge, net_attrs=None):
"""Create a vlan and bridge unless they already exist."""
# Open xenapi session
LOG.debug("ENTERING ensure_vlan_bridge in xenapi net")
LOG.debug('ENTERING ensure_vlan_bridge in xenapi net')
url = FLAGS.xenapi_connection_url
username = FLAGS.xenapi_connection_username
password = FLAGS.xenapi_connection_password
session = XenAPISession(url, username, password)
session = xenapi_conn.XenAPISession(url, username, password)
# Check whether bridge already exists
# Retrieve network whose name_label is "bridge"
network_ref = network_utils.NetworkHelper.find_network_with_name_label(
@ -50,14 +50,14 @@ def ensure_vlan_bridge(vlan_num, bridge, net_attrs=None):
if network_ref is None:
# If bridge does not exists
# 1 - create network
description = "network for nova bridge %s" % bridge
description = 'network for nova bridge %s' % bridge
network_rec = {'name_label': bridge,
'name_description': description,
'other_config': {}}
network_ref = session.call_xenapi('network.create', network_rec)
# 2 - find PIF for VLAN
expr = 'field "device" = "%s" and \
field "VLAN" = "-1"' % FLAGS.vlan_interface
expr = "field 'device' = '%s' and \
field 'VLAN' = '-1'" % FLAGS.vlan_interface
pifs = session.call_xenapi('PIF.get_all_records_where', expr)
pif_ref = None
# Multiple PIF are ok: we are dealing with a pool

14
nova/notifier/__init__.py Normal file
View File

@ -0,0 +1,14 @@
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.

83
nova/notifier/api.py Normal file
View File

@ -0,0 +1,83 @@
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.import datetime
import datetime
import uuid
from nova import flags
from nova import utils
FLAGS = flags.FLAGS
flags.DEFINE_string('default_notification_level', 'INFO',
'Default notification level for outgoing notifications')
WARN = 'WARN'
INFO = 'INFO'
ERROR = 'ERROR'
CRITICAL = 'CRITICAL'
DEBUG = 'DEBUG'
log_levels = (DEBUG, WARN, INFO, ERROR, CRITICAL)
class BadPriorityException(Exception):
pass
def notify(publisher_id, event_type, priority, payload):
"""
Sends a notification using the specified driver
Notify parameters:
publisher_id - the source worker_type.host of the message
event_type - the literal type of event (ex. Instance Creation)
priority - patterned after the enumeration of Python logging levels in
the set (DEBUG, WARN, INFO, ERROR, CRITICAL)
payload - A python dictionary of attributes
Outgoing message format includes the above parameters, and appends the
following:
message_id - a UUID representing the id for this notification
timestamp - the GMT timestamp the notification was sent at
The composite message will be constructed as a dictionary of the above
attributes, which will then be sent via the transport mechanism defined
by the driver.
Message example:
{'message_id': str(uuid.uuid4()),
'publisher_id': 'compute.host1',
'timestamp': datetime.datetime.utcnow(),
'priority': 'WARN',
'event_type': 'compute.create_instance',
'payload': {'instance_id': 12, ... }}
"""
if priority not in log_levels:
raise BadPriorityException(
_('%s not in valid priorities' % priority))
driver = utils.import_object(FLAGS.notification_driver)
msg = dict(message_id=str(uuid.uuid4()),
publisher_id=publisher_id,
event_type=event_type,
priority=priority,
payload=payload,
timestamp=str(datetime.datetime.utcnow()))
driver.notify(msg)

View File

@ -0,0 +1,34 @@
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
from nova import flags
from nova import log as logging
FLAGS = flags.FLAGS
def notify(message):
"""Notifies the recipient of the desired event given the model.
Log notifications using nova's default logging system"""
priority = message.get('priority',
FLAGS.default_notification_level)
priority = priority.lower()
logger = logging.getLogger(
'nova.notification.%s' % message['event_type'])
getattr(logger, priority)(json.dumps(message))

View File

@ -0,0 +1,19 @@
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
def notify(message):
"""Notifies the recipient of the desired event given the model"""
pass

View File

@ -0,0 +1,36 @@
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import nova.context
from nova import flags
from nova import rpc
FLAGS = flags.FLAGS
flags.DEFINE_string('notification_topic', 'notifications',
'RabbitMQ topic used for Nova notifications')
def notify(message):
"""Sends a notification to the RabbitMQ"""
context = nova.context.get_admin_context()
priority = message.get('priority',
FLAGS.default_notification_level)
priority = priority.lower()
topic = '%s.%s' % (FLAGS.notification_topic, priority)
rpc.cast(context, topic, message)

View File

@ -52,26 +52,31 @@ def get_quota(context, project_id):
'floating_ips': FLAGS.quota_floating_ips,
'metadata_items': FLAGS.quota_metadata_items}
try:
quota = db.quota_get(context, project_id)
for key in rval.keys():
if quota[key] is not None:
rval[key] = quota[key]
except exception.NotFound:
pass
quota = db.quota_get_all_by_project(context, project_id)
for key in rval.keys():
if key in quota:
rval[key] = quota[key]
return rval
def _get_request_allotment(requested, used, quota):
if quota is None:
return requested
return quota - used
def allowed_instances(context, num_instances, instance_type):
"""Check quota and return min(num_instances, allowed_instances)."""
project_id = context.project_id
context = context.elevated()
num_cores = num_instances * instance_type['vcpus']
used_instances, used_cores = db.instance_data_get_for_project(context,
project_id)
quota = get_quota(context, project_id)
allowed_instances = quota['instances'] - used_instances
allowed_cores = quota['cores'] - used_cores
num_cores = num_instances * instance_type['vcpus']
allowed_instances = _get_request_allotment(num_instances, used_instances,
quota['instances'])
allowed_cores = _get_request_allotment(num_cores, used_cores,
quota['cores'])
allowed_instances = min(allowed_instances,
int(allowed_cores // instance_type['vcpus']))
return min(num_instances, allowed_instances)
@ -81,13 +86,15 @@ def allowed_volumes(context, num_volumes, size):
"""Check quota and return min(num_volumes, allowed_volumes)."""
project_id = context.project_id
context = context.elevated()
size = int(size)
num_gigabytes = num_volumes * size
used_volumes, used_gigabytes = db.volume_data_get_for_project(context,
project_id)
quota = get_quota(context, project_id)
allowed_volumes = quota['volumes'] - used_volumes
allowed_gigabytes = quota['gigabytes'] - used_gigabytes
size = int(size)
num_gigabytes = num_volumes * size
allowed_volumes = _get_request_allotment(num_volumes, used_volumes,
quota['volumes'])
allowed_gigabytes = _get_request_allotment(num_gigabytes, used_gigabytes,
quota['gigabytes'])
allowed_volumes = min(allowed_volumes,
int(allowed_gigabytes // size))
return min(num_volumes, allowed_volumes)
@ -99,7 +106,9 @@ def allowed_floating_ips(context, num_floating_ips):
context = context.elevated()
used_floating_ips = db.floating_ip_count_by_project(context, project_id)
quota = get_quota(context, project_id)
allowed_floating_ips = quota['floating_ips'] - used_floating_ips
allowed_floating_ips = _get_request_allotment(num_floating_ips,
used_floating_ips,
quota['floating_ips'])
return min(num_floating_ips, allowed_floating_ips)
@ -108,8 +117,9 @@ def allowed_metadata_items(context, num_metadata_items):
project_id = context.project_id
context = context.elevated()
quota = get_quota(context, project_id)
num_allowed_metadata_items = quota['metadata_items']
return min(num_metadata_items, num_allowed_metadata_items)
allowed_metadata_items = _get_request_allotment(num_metadata_items, 0,
quota['metadata_items'])
return min(num_metadata_items, allowed_metadata_items)
def allowed_injected_files(context):

View File

@ -228,6 +228,9 @@ class FakeToken(object):
# FIXME(sirp): let's not use id here
id = 0
def __getitem__(self, key):
return getattr(self, key)
def __init__(self, **kwargs):
FakeToken.id += 1
self.id = FakeToken.id

View File

@ -183,7 +183,7 @@ class ServersTest(test.TestCase):
self.assertEqual(res_dict['server']['id'], 1)
self.assertEqual(res_dict['server']['name'], 'server1')
def test_get_server_by_id_v11(self):
def test_get_server_by_id_v1_1(self):
req = webob.Request.blank('/v1.1/servers/1')
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
@ -246,7 +246,7 @@ class ServersTest(test.TestCase):
self.assertEqual(len(addresses["private"]), 1)
self.assertEqual(addresses["private"][0], private)
def test_get_server_addresses_V10(self):
def test_get_server_addresses_v1_0(self):
private = '192.168.0.3'
public = ['1.2.3.4']
new_return_server = return_server_with_addresses(private, public)
@ -257,7 +257,7 @@ class ServersTest(test.TestCase):
self.assertEqual(res_dict, {
'addresses': {'public': public, 'private': [private]}})
def test_get_server_addresses_xml_V10(self):
def test_get_server_addresses_xml_v1_0(self):
private_expected = "192.168.0.3"
public_expected = ["1.2.3.4"]
new_return_server = return_server_with_addresses(private_expected,
@ -276,7 +276,7 @@ class ServersTest(test.TestCase):
(ip,) = private.getElementsByTagName('ip')
self.assertEquals(ip.getAttribute('addr'), private_expected)
def test_get_server_addresses_public_V10(self):
def test_get_server_addresses_public_v1_0(self):
private = "192.168.0.3"
public = ["1.2.3.4"]
new_return_server = return_server_with_addresses(private, public)
@ -286,7 +286,7 @@ class ServersTest(test.TestCase):
res_dict = json.loads(res.body)
self.assertEqual(res_dict, {'public': public})
def test_get_server_addresses_private_V10(self):
def test_get_server_addresses_private_v1_0(self):
private = "192.168.0.3"
public = ["1.2.3.4"]
new_return_server = return_server_with_addresses(private, public)
@ -296,7 +296,7 @@ class ServersTest(test.TestCase):
res_dict = json.loads(res.body)
self.assertEqual(res_dict, {'private': [private]})
def test_get_server_addresses_public_xml_V10(self):
def test_get_server_addresses_public_xml_v1_0(self):
private = "192.168.0.3"
public = ["1.2.3.4"]
new_return_server = return_server_with_addresses(private, public)
@ -310,7 +310,7 @@ class ServersTest(test.TestCase):
(ip,) = public_node.getElementsByTagName('ip')
self.assertEquals(ip.getAttribute('addr'), public[0])
def test_get_server_addresses_private_xml_V10(self):
def test_get_server_addresses_private_xml_v1_0(self):
private = "192.168.0.3"
public = ["1.2.3.4"]
new_return_server = return_server_with_addresses(private, public)
@ -324,7 +324,7 @@ class ServersTest(test.TestCase):
(ip,) = private_node.getElementsByTagName('ip')
self.assertEquals(ip.getAttribute('addr'), private)
def test_get_server_by_id_with_addresses_v11(self):
def test_get_server_by_id_with_addresses_v1_1(self):
private = "192.168.0.3"
public = ["1.2.3.4"]
new_return_server = return_server_with_addresses(private, public)
@ -354,7 +354,7 @@ class ServersTest(test.TestCase):
self.assertEqual(s.get('imageId', None), None)
i += 1
def test_get_server_list_v11(self):
def test_get_server_list_v1_1(self):
req = webob.Request.blank('/v1.1/servers')
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
@ -576,16 +576,16 @@ class ServersTest(test.TestCase):
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 400)
def test_create_instance_v11(self):
def test_create_instance_v1_1(self):
self._setup_for_create_instance()
imageRef = 'http://localhost/v1.1/images/2'
flavorRef = 'http://localhost/v1.1/flavors/3'
image_ref = 'http://localhost/v1.1/images/2'
flavor_ref = 'http://localhost/v1.1/flavors/3'
body = {
'server': {
'name': 'server_test',
'imageRef': imageRef,
'flavorRef': flavorRef,
'imageRef': image_ref,
'flavorRef': flavor_ref,
'metadata': {
'hello': 'world',
'open': 'stack',
@ -605,17 +605,17 @@ class ServersTest(test.TestCase):
self.assertEqual(16, len(server['adminPass']))
self.assertEqual('server_test', server['name'])
self.assertEqual(1, server['id'])
self.assertEqual(flavorRef, server['flavorRef'])
self.assertEqual(imageRef, server['imageRef'])
self.assertEqual(flavor_ref, server['flavorRef'])
self.assertEqual(image_ref, server['imageRef'])
self.assertEqual(res.status_int, 200)
def test_create_instance_v11_bad_href(self):
def test_create_instance_v1_1_bad_href(self):
self._setup_for_create_instance()
imageRef = 'http://localhost/v1.1/images/asdf'
flavorRef = 'http://localhost/v1.1/flavors/3'
image_ref = 'http://localhost/v1.1/images/asdf'
flavor_ref = 'http://localhost/v1.1/flavors/3'
body = dict(server=dict(
name='server_test', imageRef=imageRef, flavorRef=flavorRef,
name='server_test', imageRef=image_ref, flavorRef=flavor_ref,
metadata={'hello': 'world', 'open': 'stack'},
personality={}))
req = webob.Request.blank('/v1.1/servers')
@ -625,17 +625,17 @@ class ServersTest(test.TestCase):
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 400)
def test_create_instance_v11_local_href(self):
def test_create_instance_v1_1_local_href(self):
self._setup_for_create_instance()
imageRef = 'http://localhost/v1.1/images/2'
imageRefLocal = '2'
flavorRef = 'http://localhost/v1.1/flavors/3'
image_ref = 'http://localhost/v1.1/images/2'
image_ref_local = '2'
flavor_ref = 'http://localhost/v1.1/flavors/3'
body = {
'server': {
'name': 'server_test',
'imageRef': imageRefLocal,
'flavorRef': flavorRef,
'imageRef': image_ref_local,
'flavorRef': flavor_ref,
},
}
@ -648,11 +648,11 @@ class ServersTest(test.TestCase):
server = json.loads(res.body)['server']
self.assertEqual(1, server['id'])
self.assertEqual(flavorRef, server['flavorRef'])
self.assertEqual(imageRef, server['imageRef'])
self.assertEqual(flavor_ref, server['flavorRef'])
self.assertEqual(image_ref, server['imageRef'])
self.assertEqual(res.status_int, 200)
def test_create_instance_with_admin_pass_v10(self):
def test_create_instance_with_admin_pass_v1_0(self):
self._setup_for_create_instance()
body = {
@ -673,16 +673,16 @@ class ServersTest(test.TestCase):
self.assertNotEqual(res['server']['adminPass'],
body['server']['adminPass'])
def test_create_instance_with_admin_pass_v11(self):
def test_create_instance_with_admin_pass_v1_1(self):
self._setup_for_create_instance()
imageRef = 'http://localhost/v1.1/images/2'
flavorRef = 'http://localhost/v1.1/flavors/3'
image_ref = 'http://localhost/v1.1/images/2'
flavor_ref = 'http://localhost/v1.1/flavors/3'
body = {
'server': {
'name': 'server_test',
'imageRef': imageRef,
'flavorRef': flavorRef,
'imageRef': image_ref,
'flavorRef': flavor_ref,
'adminPass': 'testpass',
},
}
@ -695,16 +695,16 @@ class ServersTest(test.TestCase):
server = json.loads(res.body)['server']
self.assertEqual(server['adminPass'], body['server']['adminPass'])
def test_create_instance_with_empty_admin_pass_v11(self):
def test_create_instance_with_empty_admin_pass_v1_1(self):
self._setup_for_create_instance()
imageRef = 'http://localhost/v1.1/images/2'
flavorRef = 'http://localhost/v1.1/flavors/3'
image_ref = 'http://localhost/v1.1/images/2'
flavor_ref = 'http://localhost/v1.1/flavors/3'
body = {
'server': {
'name': 'server_test',
'imageRef': imageRef,
'flavorRef': flavorRef,
'imageRef': image_ref,
'flavorRef': flavor_ref,
'adminPass': '',
},
}
@ -758,7 +758,7 @@ class ServersTest(test.TestCase):
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 400)
def test_update_server_v10(self):
def test_update_server_v1_0(self):
inst_dict = dict(name='server_test', adminPass='bacon')
self.body = json.dumps(dict(server=inst_dict))
@ -781,7 +781,7 @@ class ServersTest(test.TestCase):
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 204)
def test_update_server_adminPass_ignored_v11(self):
def test_update_server_adminPass_ignored_v1_1(self):
inst_dict = dict(name='server_test', adminPass='bacon')
self.body = json.dumps(dict(server=inst_dict))
@ -822,7 +822,7 @@ class ServersTest(test.TestCase):
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 501)
def test_server_backup_schedule_deprecated_v11(self):
def test_server_backup_schedule_deprecated_v1_1(self):
req = webob.Request.blank('/v1.1/servers/1/backup_schedule')
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 404)
@ -1113,7 +1113,7 @@ class ServersTest(test.TestCase):
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 400)
def test_server_rebuild_accepted_minimum_v11(self):
def test_server_rebuild_accepted_minimum_v1_1(self):
body = {
"rebuild": {
"imageRef": "http://localhost/images/2",
@ -1128,7 +1128,7 @@ class ServersTest(test.TestCase):
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 202)
def test_server_rebuild_rejected_when_building_v11(self):
def test_server_rebuild_rejected_when_building_v1_1(self):
body = {
"rebuild": {
"imageRef": "http://localhost/images/2",
@ -1147,7 +1147,7 @@ class ServersTest(test.TestCase):
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 409)
def test_server_rebuild_accepted_with_metadata_v11(self):
def test_server_rebuild_accepted_with_metadata_v1_1(self):
body = {
"rebuild": {
"imageRef": "http://localhost/images/2",
@ -1165,7 +1165,7 @@ class ServersTest(test.TestCase):
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 202)
def test_server_rebuild_accepted_with_bad_metadata_v11(self):
def test_server_rebuild_accepted_with_bad_metadata_v1_1(self):
body = {
"rebuild": {
"imageRef": "http://localhost/images/2",
@ -1181,7 +1181,7 @@ class ServersTest(test.TestCase):
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 400)
def test_server_rebuild_bad_entity_v11(self):
def test_server_rebuild_bad_entity_v1_1(self):
body = {
"rebuild": {
"imageId": 2,
@ -1196,7 +1196,7 @@ class ServersTest(test.TestCase):
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 400)
def test_server_rebuild_bad_personality_v11(self):
def test_server_rebuild_bad_personality_v1_1(self):
body = {
"rebuild": {
"imageRef": "http://localhost/images/2",
@ -1215,7 +1215,7 @@ class ServersTest(test.TestCase):
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 400)
def test_server_rebuild_personality_v11(self):
def test_server_rebuild_personality_v1_1(self):
body = {
"rebuild": {
"imageRef": "http://localhost/images/2",
@ -1654,6 +1654,19 @@ b25zLiINCg0KLVJpY2hhcmQgQmFjaA==""",
request = self.deserializer.deserialize(serial_request)
self.assertEqual(request, expected)
def test_request_xmlser_with_flavor_image_ref(self):
serial_request = """
<server xmlns="http://docs.openstack.org/compute/api/v1.1"
name="new-server-test"
imageRef="http://localhost:8774/v1.1/images/1"
flavorRef="http://localhost:8774/v1.1/flavors/1">
</server>"""
request = self.deserializer.deserialize(serial_request)
self.assertEquals(request["server"]["flavorRef"],
"http://localhost:8774/v1.1/flavors/1")
self.assertEquals(request["server"]["imageRef"],
"http://localhost:8774/v1.1/images/1")
class TestServerInstanceCreation(test.TestCase):

View File

@ -124,7 +124,6 @@ def stub_out_db_instance_api(stubs, injected=True):
return FakeModel(vlan_network_fields)
else:
return FakeModel(flat_network_fields)
return FakeModel(network_fields)
def fake_network_get_all_by_instance(context, instance_id):
# Even instance numbers are on vlan networks

View File

@ -25,6 +25,7 @@ from nova import context
from nova import db
from nova import exception
from nova import flags
from nova import ipv6
from nova import log as logging
from nova import test
from nova import utils
@ -117,15 +118,15 @@ class NetworkTestCase(test.TestCase):
context.get_admin_context(),
instance_ref['id'])
self.assertEqual(instance_ref['mac_address'],
utils.to_mac(address_v6))
ipv6.to_mac(address_v6))
instance_ref2 = db.fixed_ip_get_instance_v6(
context.get_admin_context(),
address_v6)
self.assertEqual(instance_ref['id'], instance_ref2['id'])
self.assertEqual(address_v6,
utils.to_global_ipv6(
network_ref['cidr_v6'],
instance_ref['mac_address']))
ipv6.to_global(network_ref['cidr_v6'],
instance_ref['mac_address'],
'test'))
self._deallocate_address(0, address)
db.instance_destroy(context.get_admin_context(),
instance_ref['id'])

View File

@ -0,0 +1 @@
1c:87:d1:d9:32:fd:62:3c:78:2b:c0:ad:c0:15:88:df

View File

@ -0,0 +1 @@
ssh-dss AAAAB3NzaC1kc3MAAACBAMGJlY9XEIm2X234pdO5yFWMp2JuOQx8U0E815IVXhmKxYCBK9ZakgZOIQmPbXoGYyV+mziDPp6HJ0wKYLQxkwLEFr51fAZjWQvRss0SinURRuLkockDfGFtD4pYJthekr/rlqMKlBSDUSpGq8jUWW60UJ18FGooFpxR7ESqQRx/AAAAFQC96LRglaUeeP+E8U/yblEJocuiWwAAAIA3XiMR8Skiz/0aBm5K50SeQznQuMJTyzt9S9uaz5QZWiFu69hOyGSFGw8fqgxEkXFJIuHobQQpGYQubLW0NdaYRqyE/Vud3JUJUb8Texld6dz8vGemyB5d1YvtSeHIo8/BGv2msOqR3u5AZTaGCBD9DhpSGOKHEdNjTtvpPd8S8gAAAIBociGZ5jf09iHLVENhyXujJbxfGRPsyNTyARJfCOGl0oFV6hEzcQyw8U/ePwjgvjc2UizMWLl8tsb2FXKHRdc2v+ND3Us+XqKQ33X3ADP4FZ/+Oj213gMyhCmvFTP0u5FmHog9My4CB7YcIWRuUR42WlhQ2IfPvKwUoTk3R+T6Og== www-data@mk

View File

@ -354,45 +354,40 @@ class CloudTestCase(test.TestCase):
self.assertTrue(filter(lambda k: k['keyName'] == 'test1', keys))
self.assertTrue(filter(lambda k: k['keyName'] == 'test2', keys))
def test_import_public_key(self):
# test when user provides all values
result1 = self.cloud.import_public_key(self.context,
'testimportkey1',
'mytestpubkey',
'mytestfprint')
self.assertTrue(result1)
keydata = db.key_pair_get(self.context,
self.context.user.id,
'testimportkey1')
self.assertEqual('mytestpubkey', keydata['public_key'])
self.assertEqual('mytestfprint', keydata['fingerprint'])
# test when user omits fingerprint
pubkey_path = os.path.join(os.path.dirname(__file__), 'public_key')
f = open(pubkey_path + '/dummy.pub', 'r')
dummypub = f.readline().rstrip()
f.close
f = open(pubkey_path + '/dummy.fingerprint', 'r')
dummyfprint = f.readline().rstrip()
f.close
result2 = self.cloud.import_public_key(self.context,
'testimportkey2',
dummypub)
self.assertTrue(result2)
keydata = db.key_pair_get(self.context,
self.context.user.id,
'testimportkey2')
self.assertEqual(dummypub, keydata['public_key'])
self.assertEqual(dummyfprint, keydata['fingerprint'])
def test_delete_key_pair(self):
self._create_key('test')
self.cloud.delete_key_pair(self.context, 'test')
def test_run_instances(self):
if FLAGS.connection_type == 'fake':
LOG.debug(_("Can't test instances without a real virtual env."))
return
image_id = FLAGS.default_image
instance_type = FLAGS.default_instance_type
max_count = 1
kwargs = {'image_id': image_id,
'instance_type': instance_type,
'max_count': max_count}
rv = self.cloud.run_instances(self.context, **kwargs)
# TODO: check for proper response
instance_id = rv['reservationSet'][0].keys()[0]
instance = rv['reservationSet'][0][instance_id][0]
LOG.debug(_("Need to watch instance %s until it's running..."),
instance['instance_id'])
while True:
greenthread.sleep(1)
info = self.cloud._get_instance(instance['instance_id'])
LOG.debug(info['state'])
if info['state'] == power_state.RUNNING:
break
self.assert_(rv)
if FLAGS.connection_type != 'fake':
time.sleep(45) # Should use boto for polling here
for reservations in rv['reservationSet']:
# for res_id in reservations.keys():
# LOG.debug(reservations[res_id])
# for instance in reservations[res_id]:
for instance in reservations[reservations.keys()[0]]:
instance_id = instance['instance_id']
LOG.debug(_("Terminating instance %s"), instance_id)
rv = self.compute.terminate_instance(instance_id)
def test_terminate_instances(self):
inst1 = db.instance_create(self.context, {'reservation_id': 'a',
'image_id': 1,

View File

@ -334,6 +334,28 @@ class ComputeTestCase(test.TestCase):
self.compute.terminate_instance(self.context, instance_id)
def test_finish_resize(self):
"""Contrived test to ensure finish_resize doesn't raise anything"""
def fake(*args, **kwargs):
pass
self.stubs.Set(self.compute.driver, 'finish_resize', fake)
context = self.context.elevated()
instance_id = self._create_instance()
self.compute.prep_resize(context, instance_id, 1)
migration_ref = db.migration_get_by_instance_and_status(context,
instance_id, 'pre-migrating')
try:
self.compute.finish_resize(context, instance_id,
int(migration_ref['id']), {})
except KeyError, e:
# Only catch key errors. We want other reasons for the test to
# fail to actually error out so we don't obscure anything
self.fail()
self.compute.terminate_instance(self.context, instance_id)
def test_resize_instance(self):
"""Ensure instance can be migrated/resized"""
instance_id = self._create_instance()

60
nova/tests/test_ipv6.py Normal file
View File

@ -0,0 +1,60 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2011 OpenStack LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Test suite for IPv6."""
from nova import flags
from nova import ipv6
from nova import log as logging
from nova import test
LOG = logging.getLogger('nova.tests.test_ipv6')
FLAGS = flags.FLAGS
import sys
class IPv6RFC2462TestCase(test.TestCase):
"""Unit tests for IPv6 rfc2462 backend operations."""
def setUp(self):
super(IPv6RFC2462TestCase, self).setUp()
self.flags(ipv6_backend='rfc2462')
ipv6.reset_backend()
def test_to_global(self):
addr = ipv6.to_global('2001:db8::', '02:16:3e:33:44:55', 'test')
self.assertEquals(addr, '2001:db8::16:3eff:fe33:4455')
def test_to_mac(self):
mac = ipv6.to_mac('2001:db8::216:3eff:fe33:4455')
self.assertEquals(mac, '00:16:3e:33:44:55')
class IPv6AccountIdentiferTestCase(test.TestCase):
"""Unit tests for IPv6 account_identifier backend operations."""
def setUp(self):
super(IPv6AccountIdentiferTestCase, self).setUp()
self.flags(ipv6_backend='account_identifier')
ipv6.reset_backend()
def test_to_global(self):
addr = ipv6.to_global('2001:db8::', '02:16:3e:33:44:55', 'test')
self.assertEquals(addr, '2001:db8::a94a:8fe5:ff33:4455')
def test_to_mac(self):
mac = ipv6.to_mac('2001:db8::a94a:8fe5:ff33:4455')
self.assertEquals(mac, '02:16:3e:33:44:55')

117
nova/tests/test_notifier.py Normal file
View File

@ -0,0 +1,117 @@
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import nova
from nova import context
from nova import flags
from nova import rpc
import nova.notifier.api
from nova.notifier.api import notify
from nova.notifier import no_op_notifier
from nova.notifier import rabbit_notifier
from nova import test
import stubout
class NotifierTestCase(test.TestCase):
"""Test case for notifications"""
def setUp(self):
super(NotifierTestCase, self).setUp()
self.stubs = stubout.StubOutForTesting()
def tearDown(self):
self.stubs.UnsetAll()
super(NotifierTestCase, self).tearDown()
def test_send_notification(self):
self.notify_called = False
def mock_notify(cls, *args):
self.notify_called = True
self.stubs.Set(nova.notifier.no_op_notifier, 'notify',
mock_notify)
class Mock(object):
pass
notify('publisher_id', 'event_type',
nova.notifier.api.WARN, dict(a=3))
self.assertEqual(self.notify_called, True)
def test_verify_message_format(self):
"""A test to ensure changing the message format is prohibitively
annoying"""
def message_assert(message):
fields = [('publisher_id', 'publisher_id'),
('event_type', 'event_type'),
('priority', 'WARN'),
('payload', dict(a=3))]
for k, v in fields:
self.assertEqual(message[k], v)
self.assertTrue(len(message['message_id']) > 0)
self.assertTrue(len(message['timestamp']) > 0)
self.stubs.Set(nova.notifier.no_op_notifier, 'notify',
message_assert)
notify('publisher_id', 'event_type',
nova.notifier.api.WARN, dict(a=3))
def test_send_rabbit_notification(self):
self.stubs.Set(nova.flags.FLAGS, 'notification_driver',
'nova.notifier.rabbit_notifier')
self.mock_cast = False
def mock_cast(cls, *args):
self.mock_cast = True
class Mock(object):
pass
self.stubs.Set(nova.rpc, 'cast', mock_cast)
notify('publisher_id', 'event_type',
nova.notifier.api.WARN, dict(a=3))
self.assertEqual(self.mock_cast, True)
def test_invalid_priority(self):
def mock_cast(cls, *args):
pass
class Mock(object):
pass
self.stubs.Set(nova.rpc, 'cast', mock_cast)
self.assertRaises(nova.notifier.api.BadPriorityException,
notify, 'publisher_id',
'event_type', 'not a priority', dict(a=3))
def test_rabbit_priority_queue(self):
self.stubs.Set(nova.flags.FLAGS, 'notification_driver',
'nova.notifier.rabbit_notifier')
self.stubs.Set(nova.flags.FLAGS, 'notification_topic',
'testnotify')
self.test_topic = None
def mock_cast(context, topic, msg):
self.test_topic = topic
self.stubs.Set(nova.rpc, 'cast', mock_cast)
notify('publisher_id',
'event_type', 'DEBUG', dict(a=3))
self.assertEqual(self.test_topic, 'testnotify.debug')

View File

@ -96,12 +96,11 @@ class QuotaTestCase(test.TestCase):
num_instances = quota.allowed_instances(self.context, 100,
self._get_instance_type('m1.small'))
self.assertEqual(num_instances, 2)
db.quota_create(self.context, {'project_id': self.project.id,
'instances': 10})
db.quota_create(self.context, self.project.id, 'instances', 10)
num_instances = quota.allowed_instances(self.context, 100,
self._get_instance_type('m1.small'))
self.assertEqual(num_instances, 4)
db.quota_update(self.context, self.project.id, {'cores': 100})
db.quota_create(self.context, self.project.id, 'cores', 100)
num_instances = quota.allowed_instances(self.context, 100,
self._get_instance_type('m1.small'))
self.assertEqual(num_instances, 10)
@ -111,13 +110,85 @@ class QuotaTestCase(test.TestCase):
num_metadata_items = quota.allowed_metadata_items(self.context,
too_many_items)
self.assertEqual(num_metadata_items, FLAGS.quota_metadata_items)
db.quota_update(self.context, self.project.id, {'metadata_items': 5})
db.quota_create(self.context, self.project.id, 'metadata_items', 5)
num_metadata_items = quota.allowed_metadata_items(self.context,
too_many_items)
self.assertEqual(num_metadata_items, 5)
# Cleanup
db.quota_destroy(self.context, self.project.id)
db.quota_destroy_all_by_project(self.context, self.project.id)
def test_unlimited_instances(self):
FLAGS.quota_instances = 2
FLAGS.quota_cores = 1000
instance_type = self._get_instance_type('m1.small')
num_instances = quota.allowed_instances(self.context, 100,
instance_type)
self.assertEqual(num_instances, 2)
db.quota_create(self.context, self.project.id, 'instances', None)
num_instances = quota.allowed_instances(self.context, 100,
instance_type)
self.assertEqual(num_instances, 100)
num_instances = quota.allowed_instances(self.context, 101,
instance_type)
self.assertEqual(num_instances, 101)
def test_unlimited_cores(self):
FLAGS.quota_instances = 1000
FLAGS.quota_cores = 2
instance_type = self._get_instance_type('m1.small')
num_instances = quota.allowed_instances(self.context, 100,
instance_type)
self.assertEqual(num_instances, 2)
db.quota_create(self.context, self.project.id, 'cores', None)
num_instances = quota.allowed_instances(self.context, 100,
instance_type)
self.assertEqual(num_instances, 100)
num_instances = quota.allowed_instances(self.context, 101,
instance_type)
self.assertEqual(num_instances, 101)
def test_unlimited_volumes(self):
FLAGS.quota_volumes = 10
FLAGS.quota_gigabytes = 1000
volumes = quota.allowed_volumes(self.context, 100, 1)
self.assertEqual(volumes, 10)
db.quota_create(self.context, self.project.id, 'volumes', None)
volumes = quota.allowed_volumes(self.context, 100, 1)
self.assertEqual(volumes, 100)
volumes = quota.allowed_volumes(self.context, 101, 1)
self.assertEqual(volumes, 101)
def test_unlimited_gigabytes(self):
FLAGS.quota_volumes = 1000
FLAGS.quota_gigabytes = 10
volumes = quota.allowed_volumes(self.context, 100, 1)
self.assertEqual(volumes, 10)
db.quota_create(self.context, self.project.id, 'gigabytes', None)
volumes = quota.allowed_volumes(self.context, 100, 1)
self.assertEqual(volumes, 100)
volumes = quota.allowed_volumes(self.context, 101, 1)
self.assertEqual(volumes, 101)
def test_unlimited_floating_ips(self):
FLAGS.quota_floating_ips = 10
floating_ips = quota.allowed_floating_ips(self.context, 100)
self.assertEqual(floating_ips, 10)
db.quota_create(self.context, self.project.id, 'floating_ips', None)
floating_ips = quota.allowed_floating_ips(self.context, 100)
self.assertEqual(floating_ips, 100)
floating_ips = quota.allowed_floating_ips(self.context, 101)
self.assertEqual(floating_ips, 101)
def test_unlimited_metadata_items(self):
FLAGS.quota_metadata_items = 10
items = quota.allowed_metadata_items(self.context, 100)
self.assertEqual(items, 10)
db.quota_create(self.context, self.project.id, 'metadata_items', None)
items = quota.allowed_metadata_items(self.context, 100)
self.assertEqual(items, 100)
items = quota.allowed_metadata_items(self.context, 101)
self.assertEqual(items, 101)
def test_too_many_instances(self):
instance_ids = []

View File

@ -642,7 +642,7 @@ class LibvirtConnTestCase(test.TestCase):
try:
conn.spawn(instance, network_info)
except Exception, e:
count = (0 <= e.message.find('Unexpected method call'))
count = (0 <= str(e.message).find('Unexpected method call'))
self.assertTrue(count)
@ -849,7 +849,7 @@ class IptablesFirewallTestCase(test.TestCase):
self.assertEquals(len(rulesv4), 2)
self.assertEquals(len(rulesv6), 0)
def multinic_iptables_test(self):
def test_multinic_iptables(self):
ipv4_rules_per_network = 2
ipv6_rules_per_network = 3
networks_count = 5
@ -869,6 +869,16 @@ class IptablesFirewallTestCase(test.TestCase):
self.assertEquals(ipv6_network_rules,
ipv6_rules_per_network * networks_count)
def test_do_refresh_security_group_rules(self):
instance_ref = self._create_instance_ref()
self.mox.StubOutWithMock(self.fw,
'add_filters_for_instance',
use_mock_anything=True)
self.fw.add_filters_for_instance(instance_ref, mox.IgnoreArg())
self.fw.instances[instance_ref['id']] = instance_ref
self.mox.ReplayAll()
self.fw.do_refresh_security_group_rules("fake")
class NWFilterTestCase(test.TestCase):
def setUp(self):

View File

@ -306,26 +306,6 @@ def get_my_linklocal(interface):
" :%(ex)s") % locals())
def to_global_ipv6(prefix, mac):
try:
mac64 = netaddr.EUI(mac).eui64().words
int_addr = int(''.join(['%02x' % i for i in mac64]), 16)
mac64_addr = netaddr.IPAddress(int_addr)
maskIP = netaddr.IPNetwork(prefix).ip
return (mac64_addr ^ netaddr.IPAddress('::0200:0:0:0') | maskIP).\
format()
except TypeError:
raise TypeError(_('Bad mac for to_global_ipv6: %s') % mac)
def to_mac(ipv6_address):
address = netaddr.IPAddress(ipv6_address)
mask1 = netaddr.IPAddress('::ffff:ffff:ffff:ffff')
mask2 = netaddr.IPAddress('::0200:0:0:0')
mac64 = netaddr.EUI(int(address & mask1 ^ mask2)).words
return ':'.join(['%02x' % i for i in mac64[0:3] + mac64[5:8]])
def utcnow():
"""Overridable version of datetime.datetime.utcnow."""
if utcnow.override_time:

View File

@ -81,34 +81,36 @@ def inject_data(image, key=None, net=None, partition=None, nbd=False):
else:
mapped_device = device
# We can only loopback mount raw images. If the device isn't there,
# it's normally because it's a .vmdk or a .vdi etc
if not os.path.exists(mapped_device):
raise exception.Error('Mapped device was not found (we can'
' only inject raw disk images): %s' %
mapped_device)
# Configure ext2fs so that it doesn't auto-check every N boots
out, err = utils.execute('sudo', 'tune2fs',
'-c', 0, '-i', 0, mapped_device)
tmpdir = tempfile.mkdtemp()
try:
# mount loopback to dir
out, err = utils.execute(
'sudo', 'mount', mapped_device, tmpdir)
if err:
raise exception.Error(_('Failed to mount filesystem: %s')
% err)
# We can only loopback mount raw images. If the device isn't there,
# it's normally because it's a .vmdk or a .vdi etc
if not os.path.exists(mapped_device):
raise exception.Error('Mapped device was not found (we can'
' only inject raw disk images): %s' %
mapped_device)
# Configure ext2fs so that it doesn't auto-check every N boots
out, err = utils.execute('sudo', 'tune2fs',
'-c', 0, '-i', 0, mapped_device)
tmpdir = tempfile.mkdtemp()
try:
inject_data_into_fs(tmpdir, key, net, utils.execute)
# mount loopback to dir
out, err = utils.execute(
'sudo', 'mount', mapped_device, tmpdir)
if err:
raise exception.Error(_('Failed to mount filesystem: %s')
% err)
try:
inject_data_into_fs(tmpdir, key, net, utils.execute)
finally:
# unmount device
utils.execute('sudo', 'umount', mapped_device)
finally:
# unmount device
utils.execute('sudo', 'umount', mapped_device)
# remove temporary directory
utils.execute('rmdir', tmpdir)
finally:
# remove temporary directory
utils.execute('rmdir', tmpdir)
if not partition is None:
# remove partitions
utils.execute('sudo', 'kpartx', '-d', device)

View File

@ -57,6 +57,7 @@ from nova import context
from nova import db
from nova import exception
from nova import flags
from nova import ipv6
from nova import log as logging
from nova import utils
from nova import vnc
@ -184,8 +185,9 @@ def _get_network_info(instance):
def ip6_dict():
prefix = network['cidr_v6']
mac = instance['mac_address']
project_id = instance['project_id']
return {
'ip': utils.to_global_ipv6(prefix, mac),
'ip': ipv6.to_global(prefix, mac, project_id),
'netmask': network['netmask_v6'],
'enabled': '1'}
@ -1612,7 +1614,9 @@ class FirewallDriver(object):
"""
raise NotImplementedError()
def refresh_security_group_rules(self, security_group_id):
def refresh_security_group_rules(self,
security_group_id,
network_info=None):
"""Refresh security group rules from data store
Gets called when a rule has been added to or removed from
@ -1911,7 +1915,9 @@ class NWFilterFirewall(FirewallDriver):
self._define_filter(self._filter_container(filter_name,
filter_children))
def refresh_security_group_rules(self, security_group_id):
def refresh_security_group_rules(self,
security_group_id,
network_info=None):
return self._define_filter(
self.security_group_to_nwfilter_xml(security_group_id))
@ -2169,15 +2175,19 @@ class IptablesFirewallDriver(FirewallDriver):
def refresh_security_group_members(self, security_group):
pass
def refresh_security_group_rules(self, security_group):
self.do_refresh_security_group_rules(security_group)
def refresh_security_group_rules(self, security_group, network_info=None):
self.do_refresh_security_group_rules(security_group, network_info)
self.iptables.apply()
@utils.synchronized('iptables', external=True)
def do_refresh_security_group_rules(self, security_group):
def do_refresh_security_group_rules(self,
security_group,
network_info=None):
for instance in self.instances.values():
self.remove_filters_for_instance(instance)
self.add_filters_for_instance(instance)
if not network_info:
network_info = _get_network_info(instance)
self.add_filters_for_instance(instance, network_info)
def _security_group_chain_name(self, security_group_id):
return 'nova-sg-%s' % (security_group_id,)

View File

@ -48,6 +48,8 @@ FLAGS = flags.FLAGS
flags.DEFINE_string('default_os_type', 'linux', 'Default OS type')
flags.DEFINE_integer('block_device_creation_timeout', 10,
'time to wait for a block device to be created')
flags.DEFINE_integer('max_kernel_ramdisk_size', 16 * 1024 * 1024,
'maximum size in bytes of kernel or ramdisk images')
XENAPI_POWER_STATE = {
'Halted': power_state.SHUTDOWN,
@ -444,6 +446,12 @@ class VMHelper(HelperBase):
if image_type == ImageType.DISK:
# Make room for MBR.
vdi_size += MBR_SIZE_BYTES
elif image_type == ImageType.KERNEL_RAMDISK and \
vdi_size > FLAGS.max_kernel_ramdisk_size:
max_size = FLAGS.max_kernel_ramdisk_size
raise exception.Error(
_("Kernel/Ramdisk image is too large: %(vdi_size)d bytes, "
"max %(max_size)d bytes") % locals())
name_label = get_name_label_for_image(image)
vdi_ref = cls.create_vdi(session, sr_ref, name_label, vdi_size, False)

View File

@ -25,15 +25,15 @@ import M2Crypto
import os
import pickle
import subprocess
import tempfile
import uuid
from nova import db
from nova import context
from nova import log as logging
from nova import db
from nova import exception
from nova import utils
from nova import flags
from nova import ipv6
from nova import log as logging
from nova import utils
from nova.auth.manager import AuthManager
from nova.compute import power_state
@ -808,8 +808,9 @@ class VMOps(object):
def ip6_dict():
return {
"ip": utils.to_global_ipv6(network['cidr_v6'],
instance['mac_address']),
"ip": ipv6.to_global(network['cidr_v6'],
instance['mac_address'],
instance['project_id']),
"netmask": network['netmask_v6'],
"enabled": "1"}
@ -1161,18 +1162,17 @@ class SimpleDH(object):
return mpi
def _run_ssl(self, text, which):
base_cmd = ('cat %(tmpfile)s | openssl enc -aes-128-cbc '
'-a -pass pass:%(shared)s -nosalt %(dec_flag)s')
base_cmd = ('openssl enc -aes-128-cbc -a -pass pass:%(shared)s '
'-nosalt %(dec_flag)s')
if which.lower()[0] == 'd':
dec_flag = ' -d'
else:
dec_flag = ''
fd, tmpfile = tempfile.mkstemp()
os.close(fd)
file(tmpfile, 'w').write(text)
shared = self._shared
cmd = base_cmd % locals()
proc = _runproc(cmd)
proc.stdin.write(text)
proc.stdin.close()
proc.wait()
err = proc.stderr.read()
if err:

View File

@ -169,15 +169,15 @@ class XenAPIConnection(driver.ComputeDriver):
def __init__(self, url, user, pw):
super(XenAPIConnection, self).__init__()
session = XenAPISession(url, user, pw)
self._vmops = VMOps(session)
self._volumeops = VolumeOps(session)
self._session = XenAPISession(url, user, pw)
self._vmops = VMOps(self._session)
self._volumeops = VolumeOps(self._session)
self._host_state = None
@property
def HostState(self):
if not self._host_state:
self._host_state = HostState(self.session)
self._host_state = HostState(self._session)
return self._host_state
def init_host(self, host):

View File

@ -0,0 +1,183 @@
#!/usr/bin/env python
# Copyright 2011 OpenStack LLC.
# Copyright 2011 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# XenAPI plugin for reading/writing information to xenstore
#
try:
import json
except ImportError:
import simplejson as json
import os
import random
import re
import subprocess
import tempfile
import time
import XenAPIPlugin
from pluginlib_nova import *
configure_logging("xenhost")
host_data_pattern = re.compile(r"\s*(\S+) \([^\)]+\) *: ?(.*)")
def jsonify(fnc):
def wrapper(*args, **kwargs):
return json.dumps(fnc(*args, **kwargs))
return wrapper
class TimeoutError(StandardError):
pass
def _run_command(cmd):
"""Abstracts out the basics of issuing system commands. If the command
returns anything in stderr, a PluginError is raised with that information.
Otherwise, the output from stdout is returned.
"""
pipe = subprocess.PIPE
proc = subprocess.Popen([cmd], shell=True, stdin=pipe, stdout=pipe,
stderr=pipe, close_fds=True)
proc.wait()
err = proc.stderr.read()
if err:
raise pluginlib.PluginError(err)
return proc.stdout.read()
@jsonify
def host_data(self, arg_dict):
"""Runs the commands on the xenstore host to return the current status
information.
"""
cmd = "xe host-list | grep uuid"
resp = _run_command(cmd)
host_uuid = resp.split(":")[-1].strip()
cmd = "xe host-param-list uuid=%s" % host_uuid
resp = _run_command(cmd)
parsed_data = parse_response(resp)
# We have the raw dict of values. Extract those that we need,
# and convert the data types as needed.
ret_dict = cleanup(parsed_data)
return ret_dict
def parse_response(resp):
data = {}
for ln in resp.splitlines():
if not ln:
continue
mtch = host_data_pattern.match(ln.strip())
try:
k, v = mtch.groups()
data[k] = v
except AttributeError:
# Not a valid line; skip it
continue
return data
def cleanup(dct):
"""Take the raw KV pairs returned and translate them into the
appropriate types, discarding any we don't need.
"""
def safe_int(val):
"""Integer values will either be string versions of numbers,
or empty strings. Convert the latter to nulls.
"""
try:
return int(val)
except ValueError:
return None
def strip_kv(ln):
return [val.strip() for val in ln.split(":", 1)]
out = {}
# sbs = dct.get("supported-bootloaders", "")
# out["host_supported-bootloaders"] = sbs.split("; ")
# out["host_suspend-image-sr-uuid"] = dct.get("suspend-image-sr-uuid", "")
# out["host_crash-dump-sr-uuid"] = dct.get("crash-dump-sr-uuid", "")
# out["host_local-cache-sr"] = dct.get("local-cache-sr", "")
out["host_memory"] = omm = {}
omm["total"] = safe_int(dct.get("memory-total", ""))
omm["overhead"] = safe_int(dct.get("memory-overhead", ""))
omm["free"] = safe_int(dct.get("memory-free", ""))
omm["free-computed"] = safe_int(
dct.get("memory-free-computed", ""))
# out["host_API-version"] = avv = {}
# avv["vendor"] = dct.get("API-version-vendor", "")
# avv["major"] = safe_int(dct.get("API-version-major", ""))
# avv["minor"] = safe_int(dct.get("API-version-minor", ""))
out["host_uuid"] = dct.get("uuid", None)
out["host_name-label"] = dct.get("name-label", "")
out["host_name-description"] = dct.get("name-description", "")
# out["host_host-metrics-live"] = dct.get(
# "host-metrics-live", "false") == "true"
out["host_hostname"] = dct.get("hostname", "")
out["host_ip_address"] = dct.get("address", "")
oc = dct.get("other-config", "")
out["host_other-config"] = ocd = {}
if oc:
for oc_fld in oc.split("; "):
ock, ocv = strip_kv(oc_fld)
ocd[ock] = ocv
# out["host_capabilities"] = dct.get("capabilities", "").split("; ")
# out["host_allowed-operations"] = dct.get(
# "allowed-operations", "").split("; ")
# lsrv = dct.get("license-server", "")
# out["host_license-server"] = ols = {}
# if lsrv:
# for lspart in lsrv.split("; "):
# lsk, lsv = lspart.split(": ")
# if lsk == "port":
# ols[lsk] = safe_int(lsv)
# else:
# ols[lsk] = lsv
# sv = dct.get("software-version", "")
# out["host_software-version"] = osv = {}
# if sv:
# for svln in sv.split("; "):
# svk, svv = strip_kv(svln)
# osv[svk] = svv
cpuinf = dct.get("cpu_info", "")
out["host_cpu_info"] = ocp = {}
if cpuinf:
for cpln in cpuinf.split("; "):
cpk, cpv = strip_kv(cpln)
if cpk in ("cpu_count", "family", "model", "stepping"):
ocp[cpk] = safe_int(cpv)
else:
ocp[cpk] = cpv
# out["host_edition"] = dct.get("edition", "")
# out["host_external-auth-service-name"] = dct.get(
# "external-auth-service-name", "")
return out
if __name__ == "__main__":
XenAPIPlugin.dispatch(
{"host_data": host_data})

View File

@ -59,7 +59,13 @@ function run_tests {
function run_pep8 {
echo "Running pep8 ..."
# Opt-out files from pep8
ignore_scripts="*.sh:*nova-debug:*clean-vlans"
ignore_files="*eventlet-patch:*pip-requires"
ignore_dirs="*ajaxterm*"
GLOBIGNORE="$ignore_scripts:$ignore_files:$ignore_dirs"
srcfiles=`find bin -type f ! -name "nova.conf*"`
srcfiles+=" `find tools/*`"
srcfiles+=" nova setup.py plugins/xenserver/xenapi/etc/xapi.d/plugins/glance"
pep8 --repeat --show-pep8 --show-source --exclude=vcsversion.py ${srcfiles}
}

View File

@ -31,119 +31,125 @@ import sys
ROOT = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
VENV = os.path.join(ROOT, '.nova-venv')
PIP_REQUIRES = os.path.join(ROOT, 'tools', 'pip-requires')
TWISTED_NOVA='http://nova.openstack.org/Twisted-10.0.0Nova.tar.gz'
PY_VERSION = "python" + str(sys.version_info[0]) + '.' + str(sys.version_info[1])
TWISTED_NOVA = 'http://nova.openstack.org/Twisted-10.0.0Nova.tar.gz'
PY_VERSION = "python%s.%s" % (sys.version_info[0], sys.version_info[1])
def die(message, *args):
print >>sys.stderr, message % args
sys.exit(1)
print >>sys.stderr, message % args
sys.exit(1)
def check_python_version():
if sys.version_info < (2, 6):
die("Need Python Version >= 2.6")
if sys.version_info < (2, 6):
die("Need Python Version >= 2.6")
def run_command(cmd, redirect_output=True, check_exit_code=True):
"""
Runs a command in an out-of-process shell, returning the
output of that command. Working directory is ROOT.
"""
if redirect_output:
stdout = subprocess.PIPE
else:
stdout = None
"""
Runs a command in an out-of-process shell, returning the
output of that command. Working directory is ROOT.
"""
if redirect_output:
stdout = subprocess.PIPE
else:
stdout = None
proc = subprocess.Popen(cmd, cwd=ROOT, stdout=stdout)
output = proc.communicate()[0]
if check_exit_code and proc.returncode != 0:
die('Command "%s" failed.\n%s', ' '.join(cmd), output)
return output
proc = subprocess.Popen(cmd, cwd=ROOT, stdout=stdout)
output = proc.communicate()[0]
if check_exit_code and proc.returncode != 0:
die('Command "%s" failed.\n%s', ' '.join(cmd), output)
return output
HAS_EASY_INSTALL = bool(run_command(['which', 'easy_install'], check_exit_code=False).strip())
HAS_VIRTUALENV = bool(run_command(['which', 'virtualenv'], check_exit_code=False).strip())
HAS_EASY_INSTALL = bool(run_command(['which', 'easy_install'],
check_exit_code=False).strip())
HAS_VIRTUALENV = bool(run_command(['which', 'virtualenv'],
check_exit_code=False).strip())
def check_dependencies():
"""Make sure virtualenv is in the path."""
"""Make sure virtualenv is in the path."""
if not HAS_VIRTUALENV:
print 'not found.'
# Try installing it via easy_install...
if HAS_EASY_INSTALL:
print 'Installing virtualenv via easy_install...',
if not (run_command(['which', 'easy_install']) and
run_command(['easy_install', 'virtualenv'])):
die('ERROR: virtualenv not found.\n\nNova development requires virtualenv,'
' please install it using your favorite package management tool')
print 'done.'
print 'done.'
if not HAS_VIRTUALENV:
print 'not found.'
# Try installing it via easy_install...
if HAS_EASY_INSTALL:
print 'Installing virtualenv via easy_install...',
if not (run_command(['which', 'easy_install']) and
run_command(['easy_install', 'virtualenv'])):
die('ERROR: virtualenv not found.\n\nNova development'
' requires virtualenv, please install it using your'
' favorite package management tool')
print 'done.'
print 'done.'
def create_virtualenv(venv=VENV):
"""Creates the virtual environment and installs PIP only into the
virtual environment
"""
print 'Creating venv...',
run_command(['virtualenv', '-q', '--no-site-packages', VENV])
print 'done.'
print 'Installing pip in virtualenv...',
if not run_command(['tools/with_venv.sh', 'easy_install', 'pip']).strip():
die("Failed to install pip.")
print 'done.'
"""Creates the virtual environment and installs PIP only into the
virtual environment
"""
print 'Creating venv...',
run_command(['virtualenv', '-q', '--no-site-packages', VENV])
print 'done.'
print 'Installing pip in virtualenv...',
if not run_command(['tools/with_venv.sh', 'easy_install', 'pip']).strip():
die("Failed to install pip.")
print 'done.'
def install_dependencies(venv=VENV):
print 'Installing dependencies with pip (this can take a while)...'
# Install greenlet by hand - just listing it in the requires file does not
# get it in stalled in the right order
run_command(['tools/with_venv.sh', 'pip', 'install', '-E', venv, 'greenlet'],
redirect_output=False)
run_command(['tools/with_venv.sh', 'pip', 'install', '-E', venv, '-r', PIP_REQUIRES],
redirect_output=False)
run_command(['tools/with_venv.sh', 'pip', 'install', '-E', venv, TWISTED_NOVA],
redirect_output=False)
print 'Installing dependencies with pip (this can take a while)...'
# Install greenlet by hand - just listing it in the requires file does not
# get it in stalled in the right order
run_command(['tools/with_venv.sh', 'pip', 'install', '-E', venv,
'greenlet'], redirect_output=False)
run_command(['tools/with_venv.sh', 'pip', 'install', '-E', venv, '-r',
PIP_REQUIRES], redirect_output=False)
run_command(['tools/with_venv.sh', 'pip', 'install', '-E', venv,
TWISTED_NOVA], redirect_output=False)
# Tell the virtual env how to "import nova"
pthfile = os.path.join(venv, "lib", PY_VERSION, "site-packages", "nova.pth")
f = open(pthfile, 'w')
f.write("%s\n" % ROOT)
# Patch eventlet (see FAQ # 1485)
patchsrc = os.path.join(ROOT, 'tools', 'eventlet-patch')
patchfile = os.path.join(venv, "lib", PY_VERSION, "site-packages", "eventlet",
"green", "subprocess.py")
patch_cmd = "patch %s %s" % (patchfile, patchsrc)
os.system(patch_cmd)
# Tell the virtual env how to "import nova"
pthfile = os.path.join(venv, "lib", PY_VERSION, "site-packages",
"nova.pth")
f = open(pthfile, 'w')
f.write("%s\n" % ROOT)
# Patch eventlet (see FAQ # 1485)
patchsrc = os.path.join(ROOT, 'tools', 'eventlet-patch')
patchfile = os.path.join(venv, "lib", PY_VERSION, "site-packages",
"eventlet", "green", "subprocess.py")
patch_cmd = "patch %s %s" % (patchfile, patchsrc)
os.system(patch_cmd)
def print_help():
help = """
Nova development environment setup is complete.
help = """
Nova development environment setup is complete.
Nova development uses virtualenv to track and manage Python dependencies
while in development and testing.
Nova development uses virtualenv to track and manage Python dependencies
while in development and testing.
To activate the Nova virtualenv for the extent of your current shell session
you can run:
To activate the Nova virtualenv for the extent of your current shell
session you can run:
$ source .nova-venv/bin/activate
$ source .nova-venv/bin/activate
Or, if you prefer, you can run commands in the virtualenv on a case by case
basis by running:
Or, if you prefer, you can run commands in the virtualenv on a case by case
basis by running:
$ tools/with_venv.sh <your command>
$ tools/with_venv.sh <your command>
Also, make test will automatically use the virtualenv.
"""
print help
Also, make test will automatically use the virtualenv.
"""
print help
def main(argv):
check_python_version()
check_dependencies()
create_virtualenv()
install_dependencies()
print_help()
check_python_version()
check_dependencies()
create_virtualenv()
install_dependencies()
print_help()
if __name__ == '__main__':
main(sys.argv)
main(sys.argv)