merging trunk
This commit is contained in:
commit
7c68bb8172
1
Authors
1
Authors
@ -31,6 +31,7 @@ Hisaharu Ishii <ishii.hisaharu@lab.ntt.co.jp>
|
||||
Hisaki Ohara <hisaki.ohara@intel.com>
|
||||
Ilya Alekseyev <ialekseev@griddynamics.com>
|
||||
Isaku Yamahata <yamahata@valinux.co.jp>
|
||||
Jason Cannavale <jason.cannavale@rackspace.com>
|
||||
Jason Koelker <jason@koelker.net>
|
||||
Jay Pipes <jaypipes@gmail.com>
|
||||
Jesse Andrews <anotherjesse@gmail.com>
|
||||
|
@ -96,6 +96,7 @@ flags.DECLARE('network_size', 'nova.network.manager')
|
||||
flags.DECLARE('vlan_start', 'nova.network.manager')
|
||||
flags.DECLARE('vpn_start', 'nova.network.manager')
|
||||
flags.DECLARE('fixed_range_v6', 'nova.network.manager')
|
||||
flags.DECLARE('gateway_v6', 'nova.network.manager')
|
||||
flags.DECLARE('libvirt_type', 'nova.virt.libvirt.connection')
|
||||
flags.DEFINE_flag(flags.HelpFlag())
|
||||
flags.DEFINE_flag(flags.HelpshortFlag())
|
||||
@ -544,13 +545,10 @@ class FloatingIpCommands(object):
|
||||
class NetworkCommands(object):
|
||||
"""Class for managing networks."""
|
||||
|
||||
def create(self, fixed_range=None, num_networks=None,
|
||||
network_size=None, vlan_start=None,
|
||||
vpn_start=None, fixed_range_v6=None, label='public'):
|
||||
"""Creates fixed ips for host by range
|
||||
arguments: fixed_range=FLAG, [num_networks=FLAG],
|
||||
[network_size=FLAG], [vlan_start=FLAG],
|
||||
[vpn_start=FLAG], [fixed_range_v6=FLAG]"""
|
||||
def create(self, fixed_range=None, num_networks=None, network_size=None,
|
||||
vlan_start=None, vpn_start=None, fixed_range_v6=None,
|
||||
gateway_v6=None, label='public'):
|
||||
"""Creates fixed ips for host by range"""
|
||||
if not fixed_range:
|
||||
msg = _('Fixed range in the form of 10.0.0.0/8 is '
|
||||
'required to create networks.')
|
||||
@ -566,6 +564,8 @@ class NetworkCommands(object):
|
||||
vpn_start = FLAGS.vpn_start
|
||||
if not fixed_range_v6:
|
||||
fixed_range_v6 = FLAGS.fixed_range_v6
|
||||
if not gateway_v6:
|
||||
gateway_v6 = FLAGS.gateway_v6
|
||||
net_manager = utils.import_object(FLAGS.network_manager)
|
||||
try:
|
||||
net_manager.create_networks(context.get_admin_context(),
|
||||
@ -575,6 +575,7 @@ class NetworkCommands(object):
|
||||
vlan_start=int(vlan_start),
|
||||
vpn_start=int(vpn_start),
|
||||
cidr_v6=fixed_range_v6,
|
||||
gateway_v6=gateway_v6,
|
||||
label=label)
|
||||
except ValueError, e:
|
||||
print e
|
||||
|
@ -324,7 +324,7 @@ class Limited(object):
|
||||
|
||||
def __init__(self, proxy):
|
||||
self._proxy = proxy
|
||||
if not self.__doc__:
|
||||
if not self.__doc__: # pylint: disable=E0203
|
||||
self.__doc__ = proxy.__doc__
|
||||
if not self._allowed:
|
||||
self._allowed = []
|
||||
|
@ -242,6 +242,7 @@ class Authorizer(wsgi.Middleware):
|
||||
'CreateKeyPair': ['all'],
|
||||
'DeleteKeyPair': ['all'],
|
||||
'DescribeSecurityGroups': ['all'],
|
||||
'ImportPublicKey': ['all'],
|
||||
'AuthorizeSecurityGroupIngress': ['netadmin'],
|
||||
'RevokeSecurityGroupIngress': ['netadmin'],
|
||||
'CreateSecurityGroup': ['netadmin'],
|
||||
|
@ -324,7 +324,3 @@ class AdminController(object):
|
||||
rv.append(host_dict(host, compute, instances, volume, volumes,
|
||||
now))
|
||||
return {'hosts': rv}
|
||||
|
||||
def describe_host(self, _context, name, **_kwargs):
|
||||
"""Returns status info for single node."""
|
||||
return host_dict(db.host_get(name))
|
||||
|
@ -39,6 +39,7 @@ from nova import flags
|
||||
from nova import ipv6
|
||||
from nova import log as logging
|
||||
from nova import network
|
||||
from nova import rpc
|
||||
from nova import utils
|
||||
from nova import volume
|
||||
from nova.api.ec2 import ec2utils
|
||||
@ -136,6 +137,13 @@ class CloudController(object):
|
||||
return services[0]['availability_zone']
|
||||
return 'unknown zone'
|
||||
|
||||
def _get_image_state(self, image):
|
||||
# NOTE(vish): fallback status if image_state isn't set
|
||||
state = image.get('status')
|
||||
if state == 'active':
|
||||
state = 'available'
|
||||
return image['properties'].get('image_state', state)
|
||||
|
||||
def get_metadata(self, address):
|
||||
ctxt = context.get_admin_context()
|
||||
instance_ref = self.compute_api.get_all(ctxt, fixed_ip=address)
|
||||
@ -865,8 +873,14 @@ class CloudController(object):
|
||||
|
||||
def allocate_address(self, context, **kwargs):
|
||||
LOG.audit(_("Allocate address"), context=context)
|
||||
public_ip = self.network_api.allocate_floating_ip(context)
|
||||
return {'publicIp': public_ip}
|
||||
try:
|
||||
public_ip = self.network_api.allocate_floating_ip(context)
|
||||
return {'publicIp': public_ip}
|
||||
except rpc.RemoteError as ex:
|
||||
if ex.exc_type == 'NoMoreAddresses':
|
||||
raise exception.NoMoreFloatingIps()
|
||||
else:
|
||||
raise
|
||||
|
||||
def release_address(self, context, public_ip, **kwargs):
|
||||
LOG.audit(_("Release address %s"), public_ip, context=context)
|
||||
@ -896,14 +910,13 @@ class CloudController(object):
|
||||
ramdisk = self._get_image(context, kwargs['ramdisk_id'])
|
||||
kwargs['ramdisk_id'] = ramdisk['id']
|
||||
image = self._get_image(context, kwargs['image_id'])
|
||||
if not image:
|
||||
raise exception.ImageNotFound(image_id=kwargs['image_id'])
|
||||
try:
|
||||
available = (image['properties']['image_state'] == 'available')
|
||||
except KeyError:
|
||||
available = False
|
||||
|
||||
if not available:
|
||||
if image:
|
||||
image_state = self._get_image_state(image)
|
||||
else:
|
||||
raise exception.ImageNotFound(image_id=kwargs['image_id'])
|
||||
|
||||
if image_state != 'available':
|
||||
raise exception.ApiError(_('Image must be available'))
|
||||
|
||||
instances = self.compute_api.create(context,
|
||||
@ -1021,11 +1034,8 @@ class CloudController(object):
|
||||
get('image_location'), name)
|
||||
else:
|
||||
i['imageLocation'] = image['properties'].get('image_location')
|
||||
# NOTE(vish): fallback status if image_state isn't set
|
||||
state = image.get('status')
|
||||
if state == 'active':
|
||||
state = 'available'
|
||||
i['imageState'] = image['properties'].get('image_state', state)
|
||||
|
||||
i['imageState'] = self._get_image_state(image)
|
||||
i['displayName'] = name
|
||||
i['description'] = image.get('description')
|
||||
display_mapping = {'aki': 'kernel',
|
||||
|
@ -101,7 +101,7 @@ class APIRouter(base_wsgi.Router):
|
||||
mapper.resource("zone", "zones",
|
||||
controller=zones.create_resource(),
|
||||
collection={'detail': 'GET', 'info': 'GET',
|
||||
'select': 'GET'})
|
||||
'select': 'POST'})
|
||||
|
||||
mapper.resource("user", "users",
|
||||
controller=users.create_resource(),
|
||||
|
@ -49,19 +49,22 @@ class AuthMiddleware(wsgi.Middleware):
|
||||
if not self.has_authentication(req):
|
||||
return self.authenticate(req)
|
||||
user = self.get_user_by_authentication(req)
|
||||
accounts = self.auth.get_projects(user=user)
|
||||
if not user:
|
||||
token = req.headers["X-Auth-Token"]
|
||||
msg = _("%(user)s could not be found with token '%(token)s'")
|
||||
LOG.warn(msg % locals())
|
||||
return faults.Fault(webob.exc.HTTPUnauthorized())
|
||||
|
||||
if accounts:
|
||||
#we are punting on this til auth is settled,
|
||||
#and possibly til api v1.1 (mdragon)
|
||||
account = accounts[0]
|
||||
else:
|
||||
return faults.Fault(webob.exc.HTTPUnauthorized())
|
||||
try:
|
||||
account = req.headers["X-Auth-Project-Id"]
|
||||
except KeyError:
|
||||
# FIXME(usrleon): It needed only for compatibility
|
||||
# while osapi clients don't use this header
|
||||
accounts = self.auth.get_projects(user=user)
|
||||
if accounts:
|
||||
account = accounts[0]
|
||||
else:
|
||||
return faults.Fault(webob.exc.HTTPUnauthorized())
|
||||
|
||||
if not self.auth.is_admin(user) and \
|
||||
not self.auth.is_project_member(user, account):
|
||||
|
@ -137,7 +137,7 @@ class ActionExtensionResource(wsgi.Resource):
|
||||
|
||||
def __init__(self, application):
|
||||
controller = ActionExtensionController(application)
|
||||
super(ActionExtensionResource, self).__init__(controller)
|
||||
wsgi.Resource.__init__(self, controller)
|
||||
|
||||
def add_action(self, action_name, handler):
|
||||
self.controller.add_action(action_name, handler)
|
||||
@ -164,7 +164,7 @@ class RequestExtensionResource(wsgi.Resource):
|
||||
|
||||
def __init__(self, application):
|
||||
controller = RequestExtensionController(application)
|
||||
super(RequestExtensionResource, self).__init__(controller)
|
||||
wsgi.Resource.__init__(self, controller)
|
||||
|
||||
def add_handler(self, handler):
|
||||
self.controller.add_handler(handler)
|
||||
|
@ -153,6 +153,7 @@ class Controller(object):
|
||||
msg = _("Server name is not defined")
|
||||
return exc.HTTPBadRequest(msg)
|
||||
|
||||
zone_blob = body['server'].get('blob')
|
||||
name = body['server']['name']
|
||||
self._validate_server_name(name)
|
||||
name = name.strip()
|
||||
@ -172,7 +173,8 @@ class Controller(object):
|
||||
key_data=key_data,
|
||||
metadata=body['server'].get('metadata', {}),
|
||||
injected_files=injected_files,
|
||||
admin_password=password)
|
||||
admin_password=password,
|
||||
zone_blob=zone_blob)
|
||||
except quota.QuotaError as error:
|
||||
self._handle_quota_error(error)
|
||||
except exception.ImageNotFound as error:
|
||||
|
@ -35,7 +35,7 @@ class Versions(wsgi.Resource):
|
||||
'application/xml': wsgi.XMLDictSerializer(metadata=metadata),
|
||||
}
|
||||
|
||||
super(Versions, self).__init__(None, serializers=serializers)
|
||||
wsgi.Resource.__init__(self, None, serializers=serializers)
|
||||
|
||||
def dispatch(self, request, *args):
|
||||
"""Respond to a request for all OpenStack API versions."""
|
||||
|
@ -29,9 +29,6 @@ class ViewBuilder(object):
|
||||
def _build_rate_limit(self, rate_limit):
|
||||
raise NotImplementedError()
|
||||
|
||||
def _build_absolute_limits(self, absolute_limit):
|
||||
raise NotImplementedError()
|
||||
|
||||
def build(self, rate_limits, absolute_limits):
|
||||
rate_limits = self._build_rate_limits(rate_limits)
|
||||
absolute_limits = self._build_absolute_limits(absolute_limits)
|
||||
@ -67,12 +64,6 @@ class ViewBuilder(object):
|
||||
limits[name] = value
|
||||
return limits
|
||||
|
||||
def _build_rate_limits(self, rate_limits):
|
||||
raise NotImplementedError()
|
||||
|
||||
def _build_rate_limit(self, rate_limit):
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
class ViewBuilderV10(ViewBuilder):
|
||||
"""Openstack API v1.0 limits view builder."""
|
||||
|
@ -225,7 +225,7 @@ class XMLDictSerializer(DictSerializer):
|
||||
if not xmlns and self.xmlns:
|
||||
node.setAttribute('xmlns', self.xmlns)
|
||||
|
||||
return node.toprettyxml(indent=' ')
|
||||
return node.toprettyxml(indent=' ', encoding='utf-8')
|
||||
|
||||
def _to_xml_node(self, doc, metadata, nodename, data):
|
||||
"""Recursive method to convert data members to XML nodes."""
|
||||
|
@ -27,9 +27,6 @@ from nova.scheduler import api
|
||||
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
flags.DEFINE_string('build_plan_encryption_key',
|
||||
None,
|
||||
'128bit (hex) encryption key for scheduler build plans.')
|
||||
|
||||
|
||||
LOG = logging.getLogger('nova.api.openstack.zones')
|
||||
@ -53,6 +50,14 @@ def _scrub_zone(zone):
|
||||
'deleted', 'deleted_at', 'updated_at'))
|
||||
|
||||
|
||||
def check_encryption_key(func):
|
||||
def wrapped(*args, **kwargs):
|
||||
if not FLAGS.build_plan_encryption_key:
|
||||
raise exception.Error(_("--build_plan_encryption_key not set"))
|
||||
return func(*args, **kwargs)
|
||||
return wrapped
|
||||
|
||||
|
||||
class Controller(object):
|
||||
|
||||
def index(self, req):
|
||||
@ -103,19 +108,13 @@ class Controller(object):
|
||||
zone = api.zone_update(context, zone_id, body["zone"])
|
||||
return dict(zone=_scrub_zone(zone))
|
||||
|
||||
def select(self, req):
|
||||
@check_encryption_key
|
||||
def select(self, req, body):
|
||||
"""Returns a weighted list of costs to create instances
|
||||
of desired capabilities."""
|
||||
ctx = req.environ['nova.context']
|
||||
qs = req.environ['QUERY_STRING']
|
||||
param_dict = urlparse.parse_qs(qs)
|
||||
param_dict.pop("fresh", None)
|
||||
# parse_qs returns a dict where the values are lists,
|
||||
# since query strings can have multiple values for the
|
||||
# same key. We need to convert that to single values.
|
||||
for key in param_dict:
|
||||
param_dict[key] = param_dict[key][0]
|
||||
build_plan = api.select(ctx, specs=param_dict)
|
||||
specs = json.loads(body)
|
||||
build_plan = api.select(ctx, specs=specs)
|
||||
cooked = self._scrub_build_plan(build_plan)
|
||||
return {"weights": cooked}
|
||||
|
||||
@ -123,9 +122,6 @@ class Controller(object):
|
||||
"""Remove all the confidential data and return a sanitized
|
||||
version of the build plan. Include an encrypted full version
|
||||
of the weighting entry so we can get back to it later."""
|
||||
if not FLAGS.build_plan_encryption_key:
|
||||
raise exception.FlagNotSet(flag='build_plan_encryption_key')
|
||||
|
||||
encryptor = crypto.encryptor(FLAGS.build_plan_encryption_key)
|
||||
cooked = []
|
||||
for entry in build_plan:
|
||||
|
@ -139,7 +139,7 @@ class LdapDriver(object):
|
||||
self.__cache = None
|
||||
return False
|
||||
|
||||
def __local_cache(key_fmt):
|
||||
def __local_cache(key_fmt): # pylint: disable=E0213
|
||||
"""Wrap function to cache it's result in self.__cache.
|
||||
Works only with functions with one fixed argument.
|
||||
"""
|
||||
|
@ -14,4 +14,5 @@ alias ec2-bundle-image="ec2-bundle-image --cert ${EC2_CERT} --privatekey ${EC2_P
|
||||
alias ec2-upload-bundle="ec2-upload-bundle -a ${EC2_ACCESS_KEY} -s ${EC2_SECRET_KEY} --url ${S3_URL} --ec2cert ${NOVA_CERT}"
|
||||
export NOVA_API_KEY="%(access)s"
|
||||
export NOVA_USERNAME="%(user)s"
|
||||
export NOVA_PROJECT_ID="%(project)s"
|
||||
export NOVA_URL="%(os)s"
|
||||
|
@ -128,18 +128,16 @@ class API(base.Base):
|
||||
LOG.warn(msg)
|
||||
raise quota.QuotaError(msg, "MetadataLimitExceeded")
|
||||
|
||||
def create(self, context, instance_type,
|
||||
def _check_create_parameters(self, context, instance_type,
|
||||
image_href, kernel_id=None, ramdisk_id=None,
|
||||
min_count=1, max_count=1,
|
||||
display_name='', display_description='',
|
||||
key_name=None, key_data=None, security_group='default',
|
||||
availability_zone=None, user_data=None, metadata={},
|
||||
injected_files=None,
|
||||
admin_password=None):
|
||||
"""Create the number and type of instances requested.
|
||||
injected_files=None, admin_password=None, zone_blob=None):
|
||||
"""Verify all the input parameters regardless of the provisioning
|
||||
strategy being performed."""
|
||||
|
||||
Verifies that quota and other arguments are valid.
|
||||
"""
|
||||
if not instance_type:
|
||||
instance_type = instance_types.get_default_instance_type()
|
||||
|
||||
@ -225,63 +223,145 @@ class API(base.Base):
|
||||
'metadata': metadata,
|
||||
'availability_zone': availability_zone,
|
||||
'os_type': os_type}
|
||||
|
||||
return (num_instances, base_options, security_groups)
|
||||
|
||||
def create_db_entry_for_new_instance(self, context, base_options,
|
||||
security_groups, num=1):
|
||||
"""Create an entry in the DB for this new instance,
|
||||
including any related table updates (such as security
|
||||
groups, MAC address, etc). This will called by create()
|
||||
in the majority of situations, but all-at-once style
|
||||
Schedulers may initiate the call."""
|
||||
instance = dict(mac_address=utils.generate_mac(),
|
||||
launch_index=num,
|
||||
**base_options)
|
||||
instance = self.db.instance_create(context, instance)
|
||||
instance_id = instance['id']
|
||||
|
||||
elevated = context.elevated()
|
||||
instances = []
|
||||
LOG.debug(_("Going to run %s instances..."), num_instances)
|
||||
for num in range(num_instances):
|
||||
instance = dict(mac_address=utils.generate_mac(),
|
||||
launch_index=num,
|
||||
**base_options)
|
||||
instance = self.db.instance_create(context, instance)
|
||||
instance_id = instance['id']
|
||||
if not security_groups:
|
||||
security_groups = []
|
||||
for security_group_id in security_groups:
|
||||
self.db.instance_add_security_group(elevated,
|
||||
instance_id,
|
||||
security_group_id)
|
||||
|
||||
elevated = context.elevated()
|
||||
if not security_groups:
|
||||
security_groups = []
|
||||
for security_group_id in security_groups:
|
||||
self.db.instance_add_security_group(elevated,
|
||||
instance_id,
|
||||
security_group_id)
|
||||
# Set sane defaults if not specified
|
||||
updates = dict(hostname=self.hostname_factory(instance_id))
|
||||
if (not hasattr(instance, 'display_name') or
|
||||
instance.display_name is None):
|
||||
updates['display_name'] = "Server %s" % instance_id
|
||||
|
||||
# Set sane defaults if not specified
|
||||
updates = dict(hostname=self.hostname_factory(instance_id))
|
||||
if (not hasattr(instance, 'display_name') or
|
||||
instance.display_name is None):
|
||||
updates['display_name'] = "Server %s" % instance_id
|
||||
|
||||
instance = self.update(context, instance_id, **updates)
|
||||
instances.append(instance)
|
||||
|
||||
pid = context.project_id
|
||||
uid = context.user_id
|
||||
LOG.debug(_("Casting to scheduler for %(pid)s/%(uid)s's"
|
||||
" instance %(instance_id)s") % locals())
|
||||
|
||||
# NOTE(sandy): For now we're just going to pass in the
|
||||
# instance_type record to the scheduler. In a later phase
|
||||
# we'll be ripping this whole for-loop out and deferring the
|
||||
# creation of the Instance record. At that point all this will
|
||||
# change.
|
||||
rpc.cast(context,
|
||||
FLAGS.scheduler_topic,
|
||||
{"method": "run_instance",
|
||||
"args": {"topic": FLAGS.compute_topic,
|
||||
"instance_id": instance_id,
|
||||
"request_spec": {
|
||||
'instance_type': instance_type,
|
||||
'filter':
|
||||
'nova.scheduler.host_filter.'
|
||||
'InstanceTypeFilter',
|
||||
},
|
||||
"availability_zone": availability_zone,
|
||||
"injected_files": injected_files,
|
||||
"admin_password": admin_password,
|
||||
},
|
||||
})
|
||||
instance = self.update(context, instance_id, **updates)
|
||||
|
||||
for group_id in security_groups:
|
||||
self.trigger_security_group_members_refresh(elevated, group_id)
|
||||
|
||||
return instance
|
||||
|
||||
def _ask_scheduler_to_create_instance(self, context, base_options,
|
||||
instance_type, zone_blob,
|
||||
availability_zone, injected_files,
|
||||
admin_password,
|
||||
instance_id=None, num_instances=1):
|
||||
"""Send the run_instance request to the schedulers for processing."""
|
||||
pid = context.project_id
|
||||
uid = context.user_id
|
||||
if instance_id:
|
||||
LOG.debug(_("Casting to scheduler for %(pid)s/%(uid)s's"
|
||||
" instance %(instance_id)s (single-shot)") % locals())
|
||||
else:
|
||||
LOG.debug(_("Casting to scheduler for %(pid)s/%(uid)s's"
|
||||
" (all-at-once)") % locals())
|
||||
|
||||
filter_class = 'nova.scheduler.host_filter.InstanceTypeFilter'
|
||||
request_spec = {
|
||||
'instance_properties': base_options,
|
||||
'instance_type': instance_type,
|
||||
'filter': filter_class,
|
||||
'blob': zone_blob,
|
||||
'num_instances': num_instances
|
||||
}
|
||||
|
||||
rpc.cast(context,
|
||||
FLAGS.scheduler_topic,
|
||||
{"method": "run_instance",
|
||||
"args": {"topic": FLAGS.compute_topic,
|
||||
"instance_id": instance_id,
|
||||
"request_spec": request_spec,
|
||||
"availability_zone": availability_zone,
|
||||
"admin_password": admin_password,
|
||||
"injected_files": injected_files}})
|
||||
|
||||
def create_all_at_once(self, context, instance_type,
|
||||
image_href, kernel_id=None, ramdisk_id=None,
|
||||
min_count=1, max_count=1,
|
||||
display_name='', display_description='',
|
||||
key_name=None, key_data=None, security_group='default',
|
||||
availability_zone=None, user_data=None, metadata={},
|
||||
injected_files=None, admin_password=None, zone_blob=None):
|
||||
"""Provision the instances by passing the whole request to
|
||||
the Scheduler for execution. Returns a Reservation ID
|
||||
related to the creation of all of these instances."""
|
||||
num_instances, base_options, security_groups = \
|
||||
self._check_create_parameters(
|
||||
context, instance_type,
|
||||
image_href, kernel_id, ramdisk_id,
|
||||
min_count, max_count,
|
||||
display_name, display_description,
|
||||
key_name, key_data, security_group,
|
||||
availability_zone, user_data, metadata,
|
||||
injected_files, admin_password, zone_blob)
|
||||
|
||||
self._ask_scheduler_to_create_instance(context, base_options,
|
||||
instance_type, zone_blob,
|
||||
availability_zone, injected_files,
|
||||
admin_password,
|
||||
num_instances=num_instances)
|
||||
|
||||
return base_options['reservation_id']
|
||||
|
||||
def create(self, context, instance_type,
|
||||
image_href, kernel_id=None, ramdisk_id=None,
|
||||
min_count=1, max_count=1,
|
||||
display_name='', display_description='',
|
||||
key_name=None, key_data=None, security_group='default',
|
||||
availability_zone=None, user_data=None, metadata={},
|
||||
injected_files=None, admin_password=None, zone_blob=None):
|
||||
"""
|
||||
Provision the instances by sending off a series of single
|
||||
instance requests to the Schedulers. This is fine for trival
|
||||
Scheduler drivers, but may remove the effectiveness of the
|
||||
more complicated drivers.
|
||||
|
||||
Returns a list of instance dicts.
|
||||
"""
|
||||
|
||||
num_instances, base_options, security_groups = \
|
||||
self._check_create_parameters(
|
||||
context, instance_type,
|
||||
image_href, kernel_id, ramdisk_id,
|
||||
min_count, max_count,
|
||||
display_name, display_description,
|
||||
key_name, key_data, security_group,
|
||||
availability_zone, user_data, metadata,
|
||||
injected_files, admin_password, zone_blob)
|
||||
|
||||
instances = []
|
||||
LOG.debug(_("Going to run %s instances..."), num_instances)
|
||||
for num in range(num_instances):
|
||||
instance = self.create_db_entry_for_new_instance(context,
|
||||
base_options, security_groups, num=num)
|
||||
instances.append(instance)
|
||||
instance_id = instance['id']
|
||||
|
||||
self._ask_scheduler_to_create_instance(context, base_options,
|
||||
instance_type, zone_blob,
|
||||
availability_zone, injected_files,
|
||||
admin_password,
|
||||
instance_id=instance_id)
|
||||
|
||||
return [dict(x.iteritems()) for x in instances]
|
||||
|
||||
def has_finished_migration(self, context, instance_id):
|
||||
|
@ -114,7 +114,7 @@ def get_instance_type(id):
|
||||
ctxt = context.get_admin_context()
|
||||
return db.instance_type_get_by_id(ctxt, id)
|
||||
except exception.DBError:
|
||||
raise exception.ApiError(_("Unknown instance type: %s") % name)
|
||||
raise exception.ApiError(_("Unknown instance type: %s") % id)
|
||||
|
||||
|
||||
def get_instance_type_by_name(name):
|
||||
|
@ -36,6 +36,7 @@ from twisted.application import service
|
||||
|
||||
from nova import flags
|
||||
from nova import log as logging
|
||||
from nova import utils
|
||||
from nova.virt import connection as virt_connection
|
||||
|
||||
|
||||
|
@ -119,7 +119,7 @@ class VMRCSessionConsole(VMRCConsole):
|
||||
"""
|
||||
vms = vim_session._call_method(vim_util, 'get_objects',
|
||||
'VirtualMachine', ['name'])
|
||||
vm_ref = NoneV
|
||||
vm_ref = None
|
||||
for vm in vms:
|
||||
if vm.propSet[0].val == instance_name:
|
||||
vm_ref = vm.obj
|
||||
|
@ -743,7 +743,7 @@ def fixed_ip_get_all_by_instance(context, instance_id):
|
||||
filter_by(instance_id=instance_id).\
|
||||
filter_by(deleted=False)
|
||||
if not rv:
|
||||
raise exception.NoFloatingIpsFoundForInstance(instance_id=instance_id)
|
||||
raise exception.NoFixedIpsFoundForInstance(instance_id=instance_id)
|
||||
return rv
|
||||
|
||||
|
||||
|
@ -0,0 +1,65 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2011 OpenStack LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from sqlalchemy import MetaData, Table
|
||||
|
||||
meta = MetaData()
|
||||
|
||||
|
||||
def upgrade(migrate_engine):
|
||||
# Upgrade operations go here. Don't create your own engine;
|
||||
# bind migrate_engine to your metadata
|
||||
meta.bind = migrate_engine
|
||||
if migrate_engine.name == "mysql":
|
||||
migrate_engine.execute("ALTER TABLE auth_tokens Engine=InnoDB")
|
||||
migrate_engine.execute("ALTER TABLE certificates Engine=InnoDB")
|
||||
migrate_engine.execute("ALTER TABLE compute_nodes Engine=InnoDB")
|
||||
migrate_engine.execute("ALTER TABLE console_pools Engine=InnoDB")
|
||||
migrate_engine.execute("ALTER TABLE consoles Engine=InnoDB")
|
||||
migrate_engine.execute("ALTER TABLE export_devices Engine=InnoDB")
|
||||
migrate_engine.execute("ALTER TABLE fixed_ips Engine=InnoDB")
|
||||
migrate_engine.execute("ALTER TABLE floating_ips Engine=InnoDB")
|
||||
migrate_engine.execute("ALTER TABLE instance_actions Engine=InnoDB")
|
||||
migrate_engine.execute("ALTER TABLE instance_metadata Engine=InnoDB")
|
||||
migrate_engine.execute("ALTER TABLE instance_types Engine=InnoDB")
|
||||
migrate_engine.execute("ALTER TABLE instances Engine=InnoDB")
|
||||
migrate_engine.execute("ALTER TABLE iscsi_targets Engine=InnoDB")
|
||||
migrate_engine.execute("ALTER TABLE key_pairs Engine=InnoDB")
|
||||
migrate_engine.execute("ALTER TABLE migrate_version Engine=InnoDB")
|
||||
migrate_engine.execute("ALTER TABLE migrations Engine=InnoDB")
|
||||
migrate_engine.execute("ALTER TABLE networks Engine=InnoDB")
|
||||
migrate_engine.execute("ALTER TABLE projects Engine=InnoDB")
|
||||
migrate_engine.execute("ALTER TABLE quotas Engine=InnoDB")
|
||||
migrate_engine.execute(
|
||||
"ALTER TABLE security_group_instance_association Engine=InnoDB")
|
||||
migrate_engine.execute(
|
||||
"ALTER TABLE security_group_rules Engine=InnoDB")
|
||||
migrate_engine.execute("ALTER TABLE security_groups Engine=InnoDB")
|
||||
migrate_engine.execute("ALTER TABLE services Engine=InnoDB")
|
||||
migrate_engine.execute(
|
||||
"ALTER TABLE user_project_association Engine=InnoDB")
|
||||
migrate_engine.execute(
|
||||
"ALTER TABLE user_project_role_association Engine=InnoDB")
|
||||
migrate_engine.execute(
|
||||
"ALTER TABLE user_role_association Engine=InnoDB")
|
||||
migrate_engine.execute("ALTER TABLE users Engine=InnoDB")
|
||||
migrate_engine.execute("ALTER TABLE volumes Engine=InnoDB")
|
||||
migrate_engine.execute("ALTER TABLE zones Engine=InnoDB")
|
||||
migrate_engine.execute("ALTER TABLE snapshots Engine=InnoDB")
|
||||
|
||||
|
||||
def downgrade(migrate_engine):
|
||||
meta.bind = migrate_engine
|
@ -46,6 +46,7 @@ class NovaBase(object):
|
||||
updated_at = Column(DateTime, onupdate=utils.utcnow)
|
||||
deleted_at = Column(DateTime)
|
||||
deleted = Column(Boolean, default=False)
|
||||
metadata = None
|
||||
|
||||
def save(self, session=None):
|
||||
"""Save this object."""
|
||||
|
@ -376,6 +376,10 @@ class NoFloatingIpsDefinedForInstance(NoFloatingIpsDefined):
|
||||
message = _("Zero floating ips defined for instance %(instance_id)s.")
|
||||
|
||||
|
||||
class NoMoreFloatingIps(NotFound):
|
||||
message = _("Zero floating ips available.")
|
||||
|
||||
|
||||
class KeypairNotFound(NotFound):
|
||||
message = _("Keypair %(keypair_name)s not found for user %(user_id)s")
|
||||
|
||||
|
@ -270,8 +270,10 @@ DEFINE_list('region_list',
|
||||
DEFINE_string('connection_type', 'libvirt', 'libvirt, xenapi or fake')
|
||||
DEFINE_string('aws_access_key_id', 'admin', 'AWS Access ID')
|
||||
DEFINE_string('aws_secret_access_key', 'admin', 'AWS Access Key')
|
||||
DEFINE_integer('glance_port', 9292, 'glance port')
|
||||
DEFINE_string('glance_host', '$my_ip', 'glance host')
|
||||
# NOTE(sirp): my_ip interpolation doesn't work within nested structures
|
||||
DEFINE_list('glance_api_servers',
|
||||
['127.0.0.1:9292'],
|
||||
'list of glance api servers available to nova (host:port)')
|
||||
DEFINE_integer('s3_port', 3333, 's3 port')
|
||||
DEFINE_string('s3_host', '$my_ip', 's3 host (for infrastructure)')
|
||||
DEFINE_string('s3_dmz', '$my_ip', 's3 dmz ip (for instances)')
|
||||
@ -381,3 +383,5 @@ DEFINE_string('zone_name', 'nova', 'name of this zone')
|
||||
DEFINE_list('zone_capabilities',
|
||||
['hypervisor=xenserver;kvm', 'os=linux;windows'],
|
||||
'Key/Multi-value list representng capabilities of this zone')
|
||||
DEFINE_string('build_plan_encryption_key', None,
|
||||
'128bit (hex) encryption key for scheduler build plans.')
|
||||
|
@ -22,6 +22,7 @@ import nova
|
||||
from nova import exception
|
||||
from nova import utils
|
||||
from nova import flags
|
||||
from nova.image import glance as glance_image_service
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
|
||||
@ -48,6 +49,8 @@ def get_default_image_service():
|
||||
return ImageService()
|
||||
|
||||
|
||||
# FIXME(sirp): perhaps this should be moved to nova/images/glance so that we
|
||||
# keep Glance specific code together for the most part
|
||||
def get_glance_client(image_href):
|
||||
"""Get the correct glance client and id for the given image_href.
|
||||
|
||||
@ -62,7 +65,9 @@ def get_glance_client(image_href):
|
||||
"""
|
||||
image_href = image_href or 0
|
||||
if str(image_href).isdigit():
|
||||
glance_client = GlanceClient(FLAGS.glance_host, FLAGS.glance_port)
|
||||
glance_host, glance_port = \
|
||||
glance_image_service.pick_glance_api_server()
|
||||
glance_client = GlanceClient(glance_host, glance_port)
|
||||
return (glance_client, int(image_href))
|
||||
|
||||
try:
|
||||
|
@ -20,6 +20,7 @@
|
||||
from __future__ import absolute_import
|
||||
|
||||
import datetime
|
||||
import random
|
||||
|
||||
from glance.common import exception as glance_exception
|
||||
|
||||
@ -39,6 +40,21 @@ FLAGS = flags.FLAGS
|
||||
GlanceClient = utils.import_class('glance.client.Client')
|
||||
|
||||
|
||||
def pick_glance_api_server():
|
||||
"""Return which Glance API server to use for the request
|
||||
|
||||
This method provides a very primitive form of load-balancing suitable for
|
||||
testing and sandbox environments. In production, it would be better to use
|
||||
one IP and route that to a real load-balancer.
|
||||
|
||||
Returns (host, port)
|
||||
"""
|
||||
host_port = random.choice(FLAGS.glance_api_servers)
|
||||
host, port_str = host_port.split(':')
|
||||
port = int(port_str)
|
||||
return host, port
|
||||
|
||||
|
||||
class GlanceImageService(service.BaseImageService):
|
||||
"""Provides storage and retrieval of disk image objects within Glance."""
|
||||
|
||||
@ -51,12 +67,21 @@ class GlanceImageService(service.BaseImageService):
|
||||
GLANCE_ONLY_ATTRS
|
||||
|
||||
def __init__(self, client=None):
|
||||
# FIXME(sirp): can we avoid dependency-injection here by using
|
||||
# stubbing out a fake?
|
||||
if client is None:
|
||||
self.client = GlanceClient(FLAGS.glance_host, FLAGS.glance_port)
|
||||
else:
|
||||
self.client = client
|
||||
self._client = client
|
||||
|
||||
def _get_client(self):
|
||||
# NOTE(sirp): we want to load balance each request across glance
|
||||
# servers. Since GlanceImageService is a long-lived object, `client`
|
||||
# is made to choose a new server each time via this property.
|
||||
if self._client is not None:
|
||||
return self._client
|
||||
glance_host, glance_port = pick_glance_api_server()
|
||||
return GlanceClient(glance_host, glance_port)
|
||||
|
||||
def _set_client(self, client):
|
||||
self._client = client
|
||||
|
||||
client = property(_get_client, _set_client)
|
||||
|
||||
def index(self, context, filters=None, marker=None, limit=None):
|
||||
"""Calls out to Glance for a list of images available."""
|
||||
|
@ -86,6 +86,7 @@ flags.DEFINE_string('floating_range', '4.4.4.0/24',
|
||||
'Floating IP address block')
|
||||
flags.DEFINE_string('fixed_range', '10.0.0.0/8', 'Fixed IP address block')
|
||||
flags.DEFINE_string('fixed_range_v6', 'fd00::/48', 'Fixed IPv6 address block')
|
||||
flags.DEFINE_string('gateway_v6', None, 'Default IPv6 gateway')
|
||||
flags.DEFINE_integer('cnt_vpn_clients', 0,
|
||||
'Number of addresses reserved for vpn clients')
|
||||
flags.DEFINE_string('network_driver', 'nova.network.linux_net',
|
||||
@ -292,7 +293,7 @@ class NetworkManager(manager.SchedulerDependentManager):
|
||||
return host
|
||||
|
||||
def create_networks(self, context, cidr, num_networks, network_size,
|
||||
cidr_v6, label, *args, **kwargs):
|
||||
cidr_v6, gateway_v6, label, *args, **kwargs):
|
||||
"""Create networks based on parameters."""
|
||||
fixed_net = IPy.IP(cidr)
|
||||
fixed_net_v6 = IPy.IP(cidr_v6)
|
||||
@ -324,7 +325,13 @@ class NetworkManager(manager.SchedulerDependentManager):
|
||||
significant_bits_v6)
|
||||
net['cidr_v6'] = cidr_v6
|
||||
project_net_v6 = IPy.IP(cidr_v6)
|
||||
net['gateway_v6'] = str(project_net_v6[1])
|
||||
|
||||
if gateway_v6:
|
||||
# use a pre-defined gateway if one is provided
|
||||
net['gateway_v6'] = str(gateway_v6)
|
||||
else:
|
||||
net['gateway_v6'] = str(project_net_v6[1])
|
||||
|
||||
net['netmask_v6'] = str(project_net_v6.prefixlen())
|
||||
|
||||
network_ref = self.db.network_create_safe(context, net)
|
||||
|
@ -84,7 +84,7 @@ def get_zone_capabilities(context):
|
||||
def select(context, specs=None):
|
||||
"""Returns a list of hosts."""
|
||||
return _call_scheduler('select', context=context,
|
||||
params={"specs": specs})
|
||||
params={"request_spec": specs})
|
||||
|
||||
|
||||
def update_service_capabilities(context, service_name, host, capabilities):
|
||||
|
@ -70,6 +70,14 @@ class SchedulerManager(manager.Manager):
|
||||
self.zone_manager.update_service_capabilities(service_name,
|
||||
host, capabilities)
|
||||
|
||||
def select(self, context=None, *args, **kwargs):
|
||||
"""Select a list of hosts best matching the provided specs."""
|
||||
return self.driver.select(context, *args, **kwargs)
|
||||
|
||||
def get_scheduler_rules(self, context=None, *args, **kwargs):
|
||||
"""Ask the driver how requests should be made of it."""
|
||||
return self.driver.get_scheduler_rules(context, *args, **kwargs)
|
||||
|
||||
def _schedule(self, method, context, topic, *args, **kwargs):
|
||||
"""Tries to call schedule_* method on the driver to retrieve host.
|
||||
|
||||
@ -80,7 +88,9 @@ class SchedulerManager(manager.Manager):
|
||||
try:
|
||||
host = getattr(self.driver, driver_method)(elevated, *args,
|
||||
**kwargs)
|
||||
except AttributeError:
|
||||
except AttributeError, e:
|
||||
LOG.exception(_("Driver Method %(driver_method)s missing: %(e)s")
|
||||
% locals())
|
||||
host = self.driver.schedule(elevated, topic, *args, **kwargs)
|
||||
|
||||
if not host:
|
||||
|
@ -21,16 +21,30 @@ across zones. There are two expansion points to this class for:
|
||||
"""
|
||||
|
||||
import operator
|
||||
import json
|
||||
|
||||
import M2Crypto
|
||||
import novaclient
|
||||
|
||||
from nova import crypto
|
||||
from nova import db
|
||||
from nova import exception
|
||||
from nova import flags
|
||||
from nova import log as logging
|
||||
from nova import rpc
|
||||
|
||||
from nova.scheduler import api
|
||||
from nova.scheduler import driver
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
LOG = logging.getLogger('nova.scheduler.zone_aware_scheduler')
|
||||
|
||||
|
||||
class InvalidBlob(exception.NovaException):
|
||||
message = _("Ill-formed or incorrectly routed 'blob' data sent "
|
||||
"to instance create request.")
|
||||
|
||||
|
||||
class ZoneAwareScheduler(driver.Scheduler):
|
||||
"""Base class for creating Zone Aware Schedulers."""
|
||||
|
||||
@ -38,6 +52,112 @@ class ZoneAwareScheduler(driver.Scheduler):
|
||||
"""Call novaclient zone method. Broken out for testing."""
|
||||
return api.call_zone_method(context, method, specs=specs)
|
||||
|
||||
def _provision_resource_locally(self, context, item, instance_id, kwargs):
|
||||
"""Create the requested resource in this Zone."""
|
||||
host = item['hostname']
|
||||
kwargs['instance_id'] = instance_id
|
||||
rpc.cast(context,
|
||||
db.queue_get_for(context, "compute", host),
|
||||
{"method": "run_instance",
|
||||
"args": kwargs})
|
||||
LOG.debug(_("Provisioning locally via compute node %(host)s")
|
||||
% locals())
|
||||
|
||||
def _decrypt_blob(self, blob):
|
||||
"""Returns the decrypted blob or None if invalid. Broken out
|
||||
for testing."""
|
||||
decryptor = crypto.decryptor(FLAGS.build_plan_encryption_key)
|
||||
try:
|
||||
json_entry = decryptor(blob)
|
||||
return json.dumps(entry)
|
||||
except M2Crypto.EVP.EVPError:
|
||||
pass
|
||||
return None
|
||||
|
||||
def _ask_child_zone_to_create_instance(self, context, zone_info,
|
||||
request_spec, kwargs):
|
||||
"""Once we have determined that the request should go to one
|
||||
of our children, we need to fabricate a new POST /servers/
|
||||
call with the same parameters that were passed into us.
|
||||
|
||||
Note that we have to reverse engineer from our args to get back the
|
||||
image, flavor, ipgroup, etc. since the original call could have
|
||||
come in from EC2 (which doesn't use these things)."""
|
||||
|
||||
instance_type = request_spec['instance_type']
|
||||
instance_properties = request_spec['instance_properties']
|
||||
|
||||
name = instance_properties['display_name']
|
||||
image_id = instance_properties['image_id']
|
||||
meta = instance_properties['metadata']
|
||||
flavor_id = instance_type['flavorid']
|
||||
|
||||
files = kwargs['injected_files']
|
||||
ipgroup = None # Not supported in OS API ... yet
|
||||
|
||||
child_zone = zone_info['child_zone']
|
||||
child_blob = zone_info['child_blob']
|
||||
zone = db.zone_get(context, child_zone)
|
||||
url = zone.api_url
|
||||
LOG.debug(_("Forwarding instance create call to child zone %(url)s")
|
||||
% locals())
|
||||
nova = None
|
||||
try:
|
||||
nova = novaclient.OpenStack(zone.username, zone.password, url)
|
||||
nova.authenticate()
|
||||
except novaclient.exceptions.BadRequest, e:
|
||||
raise exception.NotAuthorized(_("Bad credentials attempting "
|
||||
"to talk to zone at %(url)s.") % locals())
|
||||
|
||||
nova.servers.create(name, image_id, flavor_id, ipgroup, meta, files,
|
||||
child_blob)
|
||||
|
||||
def _provision_resource_from_blob(self, context, item, instance_id,
|
||||
request_spec, kwargs):
|
||||
"""Create the requested resource locally or in a child zone
|
||||
based on what is stored in the zone blob info.
|
||||
|
||||
Attempt to decrypt the blob to see if this request is:
|
||||
1. valid, and
|
||||
2. intended for this zone or a child zone.
|
||||
|
||||
Note: If we have "blob" that means the request was passed
|
||||
into us from a parent zone. If we have "child_blob" that
|
||||
means we gathered the info from one of our children.
|
||||
It's possible that, when we decrypt the 'blob' field, it
|
||||
contains "child_blob" data. In which case we forward the
|
||||
request."""
|
||||
|
||||
host_info = None
|
||||
if "blob" in item:
|
||||
# Request was passed in from above. Is it for us?
|
||||
host_info = self._decrypt_blob(item['blob'])
|
||||
elif "child_blob" in item:
|
||||
# Our immediate child zone provided this info ...
|
||||
host_info = item
|
||||
|
||||
if not host_info:
|
||||
raise InvalidBlob()
|
||||
|
||||
# Valid data ... is it for us?
|
||||
if 'child_zone' in host_info and 'child_blob' in host_info:
|
||||
self._ask_child_zone_to_create_instance(context, host_info,
|
||||
request_spec, kwargs)
|
||||
else:
|
||||
self._provision_resource_locally(context, host_info,
|
||||
instance_id, kwargs)
|
||||
|
||||
def _provision_resource(self, context, item, instance_id, request_spec,
|
||||
kwargs):
|
||||
"""Create the requested resource in this Zone or a child zone."""
|
||||
if "hostname" in item:
|
||||
self._provision_resource_locally(context, item, instance_id,
|
||||
kwargs)
|
||||
return
|
||||
|
||||
self._provision_resource_from_blob(context, item, instance_id,
|
||||
request_spec, kwargs)
|
||||
|
||||
def schedule_run_instance(self, context, instance_id, request_spec,
|
||||
*args, **kwargs):
|
||||
"""This method is called from nova.compute.api to provision
|
||||
@ -51,8 +171,10 @@ class ZoneAwareScheduler(driver.Scheduler):
|
||||
|
||||
# TODO(sandy): We'll have to look for richer specs at some point.
|
||||
|
||||
if 'blob' in request_spec:
|
||||
self.provision_resource(context, request_spec, instance_id, kwargs)
|
||||
blob = request_spec.get('blob')
|
||||
if blob:
|
||||
self._provision_resource(context, request_spec, instance_id,
|
||||
request_spec, kwargs)
|
||||
return None
|
||||
|
||||
# Create build plan and provision ...
|
||||
@ -61,28 +183,13 @@ class ZoneAwareScheduler(driver.Scheduler):
|
||||
raise driver.NoValidHost(_('No hosts were available'))
|
||||
|
||||
for item in build_plan:
|
||||
self.provision_resource(context, item, instance_id, kwargs)
|
||||
self._provision_resource(context, item, instance_id, request_spec,
|
||||
kwargs)
|
||||
|
||||
# Returning None short-circuits the routing to Compute (since
|
||||
# we've already done it here)
|
||||
return None
|
||||
|
||||
def provision_resource(self, context, item, instance_id, kwargs):
|
||||
"""Create the requested resource in this Zone or a child zone."""
|
||||
if "hostname" in item:
|
||||
host = item['hostname']
|
||||
kwargs['instance_id'] = instance_id
|
||||
rpc.cast(context,
|
||||
db.queue_get_for(context, "compute", host),
|
||||
{"method": "run_instance",
|
||||
"args": kwargs})
|
||||
LOG.debug(_("Casted to compute %(host)s for run_instance")
|
||||
% locals())
|
||||
else:
|
||||
# TODO(sandy) Provision in child zone ...
|
||||
LOG.warning(_("Provision to Child Zone not supported (yet)"))
|
||||
pass
|
||||
|
||||
def select(self, context, request_spec, *args, **kwargs):
|
||||
"""Select returns a list of weights and zone/host information
|
||||
corresponding to the best hosts to service the request. Any
|
||||
@ -124,17 +231,17 @@ class ZoneAwareScheduler(driver.Scheduler):
|
||||
weighted = self.weigh_hosts(num_instances, request_spec, host_list)
|
||||
|
||||
# Next, tack on the best weights from the child zones ...
|
||||
json_spec = json.dumps(request_spec)
|
||||
child_results = self._call_zone_method(context, "select",
|
||||
specs=request_spec)
|
||||
specs=json_spec)
|
||||
for child_zone, result in child_results:
|
||||
for weighting in result:
|
||||
# Remember the child_zone so we can get back to
|
||||
# it later if needed. This implicitly builds a zone
|
||||
# path structure.
|
||||
host_dict = {
|
||||
"weight": weighting["weight"],
|
||||
"child_zone": child_zone,
|
||||
"child_blob": weighting["blob"]}
|
||||
host_dict = {"weight": weighting["weight"],
|
||||
"child_zone": child_zone,
|
||||
"child_blob": weighting["blob"]}
|
||||
weighted.append(host_dict)
|
||||
|
||||
weighted.sort(key=operator.itemgetter('weight'))
|
||||
|
@ -352,6 +352,11 @@ class FakeAuthManager(object):
|
||||
return user.admin
|
||||
|
||||
def is_project_member(self, user, project):
|
||||
if not isinstance(project, Project):
|
||||
try:
|
||||
project = self.get_project(project)
|
||||
except exc.NotFound:
|
||||
raise webob.exc.HTTPUnauthorized()
|
||||
return ((user.id in project.member_ids) or
|
||||
(user.id == project.project_manager_id))
|
||||
|
||||
|
@ -114,6 +114,28 @@ class Test(test.TestCase):
|
||||
self.assertEqual(result.status, '401 Unauthorized')
|
||||
self.assertEqual(self.destroy_called, True)
|
||||
|
||||
def test_authorize_project(self):
|
||||
f = fakes.FakeAuthManager()
|
||||
user = nova.auth.manager.User('id1', 'user1', 'user1_key', None, None)
|
||||
f.add_user(user)
|
||||
f.create_project('user1_project', user)
|
||||
f.create_project('user2_project', user)
|
||||
|
||||
req = webob.Request.blank('/v1.0/', {'HTTP_HOST': 'foo'})
|
||||
req.headers['X-Auth-User'] = 'user1'
|
||||
req.headers['X-Auth-Key'] = 'user1_key'
|
||||
result = req.get_response(fakes.wsgi_app())
|
||||
self.assertEqual(result.status, '204 No Content')
|
||||
|
||||
token = result.headers['X-Auth-Token']
|
||||
self.stubs.Set(nova.api.openstack, 'APIRouterV10', fakes.FakeRouter)
|
||||
req = webob.Request.blank('/v1.0/fake')
|
||||
req.headers['X-Auth-Token'] = token
|
||||
req.headers['X-Auth-Project-Id'] = 'user2_project'
|
||||
result = req.get_response(fakes.wsgi_app())
|
||||
self.assertEqual(result.status, '200 OK')
|
||||
self.assertEqual(result.headers['X-Test-Success'], 'True')
|
||||
|
||||
def test_bad_user_bad_key(self):
|
||||
req = webob.Request.blank('/v1.0/')
|
||||
req.headers['X-Auth-User'] = 'unknown_user'
|
||||
@ -143,6 +165,49 @@ class Test(test.TestCase):
|
||||
result = req.get_response(fakes.wsgi_app())
|
||||
self.assertEqual(result.status, '401 Unauthorized')
|
||||
|
||||
def test_bad_project(self):
|
||||
f = fakes.FakeAuthManager()
|
||||
user1 = nova.auth.manager.User('id1', 'user1', 'user1_key', None, None)
|
||||
user2 = nova.auth.manager.User('id2', 'user2', 'user2_key', None, None)
|
||||
f.add_user(user1)
|
||||
f.add_user(user2)
|
||||
f.create_project('user1_project', user1)
|
||||
f.create_project('user2_project', user2)
|
||||
|
||||
req = webob.Request.blank('/v1.0/', {'HTTP_HOST': 'foo'})
|
||||
req.headers['X-Auth-User'] = 'user1'
|
||||
req.headers['X-Auth-Key'] = 'user1_key'
|
||||
result = req.get_response(fakes.wsgi_app())
|
||||
self.assertEqual(result.status, '204 No Content')
|
||||
|
||||
token = result.headers['X-Auth-Token']
|
||||
self.stubs.Set(nova.api.openstack, 'APIRouterV10', fakes.FakeRouter)
|
||||
req = webob.Request.blank('/v1.0/fake')
|
||||
req.headers['X-Auth-Token'] = token
|
||||
req.headers['X-Auth-Project-Id'] = 'user2_project'
|
||||
result = req.get_response(fakes.wsgi_app())
|
||||
self.assertEqual(result.status, '401 Unauthorized')
|
||||
|
||||
def test_not_existing_project(self):
|
||||
f = fakes.FakeAuthManager()
|
||||
user1 = nova.auth.manager.User('id1', 'user1', 'user1_key', None, None)
|
||||
f.add_user(user1)
|
||||
f.create_project('user1_project', user1)
|
||||
|
||||
req = webob.Request.blank('/v1.0/', {'HTTP_HOST': 'foo'})
|
||||
req.headers['X-Auth-User'] = 'user1'
|
||||
req.headers['X-Auth-Key'] = 'user1_key'
|
||||
result = req.get_response(fakes.wsgi_app())
|
||||
self.assertEqual(result.status, '204 No Content')
|
||||
|
||||
token = result.headers['X-Auth-Token']
|
||||
self.stubs.Set(nova.api.openstack, 'APIRouterV10', fakes.FakeRouter)
|
||||
req = webob.Request.blank('/v1.0/fake')
|
||||
req.headers['X-Auth-Token'] = token
|
||||
req.headers['X-Auth-Project-Id'] = 'unknown_project'
|
||||
result = req.get_response(fakes.wsgi_app())
|
||||
self.assertEqual(result.status, '401 Unauthorized')
|
||||
|
||||
|
||||
class TestFunctional(test.TestCase):
|
||||
def test_token_expiry(self):
|
||||
|
@ -21,7 +21,6 @@ import json
|
||||
import nova.db
|
||||
from nova import context
|
||||
from nova import crypto
|
||||
from nova import exception
|
||||
from nova import flags
|
||||
from nova import test
|
||||
from nova.api.openstack import zones
|
||||
@ -210,6 +209,11 @@ class ZonesTest(test.TestCase):
|
||||
self.stubs.Set(api, 'select', zone_select)
|
||||
|
||||
req = webob.Request.blank('/v1.0/zones/select')
|
||||
req.method = 'POST'
|
||||
req.headers["Content-Type"] = "application/json"
|
||||
# Select queries end up being JSON encoded twice.
|
||||
# Once to a string and again as an HTTP POST Body
|
||||
req.body = json.dumps(json.dumps({}))
|
||||
|
||||
res = req.get_response(fakes.wsgi_app())
|
||||
res_dict = json.loads(res.body)
|
||||
|
@ -60,10 +60,8 @@ class BaseGlanceTest(unittest.TestCase):
|
||||
NOW_DATETIME = datetime.datetime(2010, 10, 11, 10, 30, 22)
|
||||
|
||||
def setUp(self):
|
||||
# FIXME(sirp): we can probably use stubs library here rather than
|
||||
# dependency injection
|
||||
self.client = StubGlanceClient(None)
|
||||
self.service = glance.GlanceImageService(self.client)
|
||||
self.service = glance.GlanceImageService(client=self.client)
|
||||
self.context = context.RequestContext(None, None)
|
||||
|
||||
def assertDateTimesFilled(self, image_meta):
|
||||
|
@ -16,6 +16,7 @@
|
||||
Tests For Zone Aware Scheduler.
|
||||
"""
|
||||
|
||||
from nova import exception
|
||||
from nova import test
|
||||
from nova.scheduler import driver
|
||||
from nova.scheduler import zone_aware_scheduler
|
||||
@ -90,6 +91,41 @@ def fake_empty_call_zone_method(context, method, specs):
|
||||
return []
|
||||
|
||||
|
||||
# Hmm, I should probably be using mox for this.
|
||||
was_called = False
|
||||
|
||||
|
||||
def fake_provision_resource(context, item, instance_id, request_spec, kwargs):
|
||||
global was_called
|
||||
was_called = True
|
||||
|
||||
|
||||
def fake_ask_child_zone_to_create_instance(context, zone_info,
|
||||
request_spec, kwargs):
|
||||
global was_called
|
||||
was_called = True
|
||||
|
||||
|
||||
def fake_provision_resource_locally(context, item, instance_id, kwargs):
|
||||
global was_called
|
||||
was_called = True
|
||||
|
||||
|
||||
def fake_provision_resource_from_blob(context, item, instance_id,
|
||||
request_spec, kwargs):
|
||||
global was_called
|
||||
was_called = True
|
||||
|
||||
|
||||
def fake_decrypt_blob_returns_local_info(blob):
|
||||
return {'foo': True} # values aren't important.
|
||||
|
||||
|
||||
def fake_decrypt_blob_returns_child_info(blob):
|
||||
return {'child_zone': True,
|
||||
'child_blob': True} # values aren't important. Keys are.
|
||||
|
||||
|
||||
def fake_call_zone_method(context, method, specs):
|
||||
return [
|
||||
('zone1', [
|
||||
@ -149,4 +185,112 @@ class ZoneAwareSchedulerTestCase(test.TestCase):
|
||||
fake_context = {}
|
||||
self.assertRaises(driver.NoValidHost, sched.schedule_run_instance,
|
||||
fake_context, 1,
|
||||
dict(host_filter=None, instance_type={}))
|
||||
dict(host_filter=None,
|
||||
request_spec={'instance_type': {}}))
|
||||
|
||||
def test_schedule_do_not_schedule_with_hint(self):
|
||||
"""
|
||||
Check the local/child zone routing in the run_instance() call.
|
||||
If the zone_blob hint was passed in, don't re-schedule.
|
||||
"""
|
||||
global was_called
|
||||
sched = FakeZoneAwareScheduler()
|
||||
was_called = False
|
||||
self.stubs.Set(sched, '_provision_resource', fake_provision_resource)
|
||||
request_spec = {
|
||||
'instance_properties': {},
|
||||
'instance_type': {},
|
||||
'filter_driver': 'nova.scheduler.host_filter.AllHostsFilter',
|
||||
'blob': "Non-None blob data"
|
||||
}
|
||||
|
||||
result = sched.schedule_run_instance(None, 1, request_spec)
|
||||
self.assertEquals(None, result)
|
||||
self.assertTrue(was_called)
|
||||
|
||||
def test_provision_resource_local(self):
|
||||
"""Provision a resource locally or remotely."""
|
||||
global was_called
|
||||
sched = FakeZoneAwareScheduler()
|
||||
was_called = False
|
||||
self.stubs.Set(sched, '_provision_resource_locally',
|
||||
fake_provision_resource_locally)
|
||||
|
||||
request_spec = {'hostname': "foo"}
|
||||
sched._provision_resource(None, request_spec, 1, request_spec, {})
|
||||
self.assertTrue(was_called)
|
||||
|
||||
def test_provision_resource_remote(self):
|
||||
"""Provision a resource locally or remotely."""
|
||||
global was_called
|
||||
sched = FakeZoneAwareScheduler()
|
||||
was_called = False
|
||||
self.stubs.Set(sched, '_provision_resource_from_blob',
|
||||
fake_provision_resource_from_blob)
|
||||
|
||||
request_spec = {}
|
||||
sched._provision_resource(None, request_spec, 1, request_spec, {})
|
||||
self.assertTrue(was_called)
|
||||
|
||||
def test_provision_resource_from_blob_empty(self):
|
||||
"""Provision a resource locally or remotely given no hints."""
|
||||
global was_called
|
||||
sched = FakeZoneAwareScheduler()
|
||||
request_spec = {}
|
||||
self.assertRaises(zone_aware_scheduler.InvalidBlob,
|
||||
sched._provision_resource_from_blob,
|
||||
None, {}, 1, {}, {})
|
||||
|
||||
def test_provision_resource_from_blob_with_local_blob(self):
|
||||
"""
|
||||
Provision a resource locally or remotely when blob hint passed in.
|
||||
"""
|
||||
global was_called
|
||||
sched = FakeZoneAwareScheduler()
|
||||
was_called = False
|
||||
self.stubs.Set(sched, '_decrypt_blob',
|
||||
fake_decrypt_blob_returns_local_info)
|
||||
self.stubs.Set(sched, '_provision_resource_locally',
|
||||
fake_provision_resource_locally)
|
||||
|
||||
request_spec = {'blob': "Non-None blob data"}
|
||||
|
||||
sched._provision_resource_from_blob(None, request_spec, 1,
|
||||
request_spec, {})
|
||||
self.assertTrue(was_called)
|
||||
|
||||
def test_provision_resource_from_blob_with_child_blob(self):
|
||||
"""
|
||||
Provision a resource locally or remotely when child blob hint
|
||||
passed in.
|
||||
"""
|
||||
global was_called
|
||||
sched = FakeZoneAwareScheduler()
|
||||
self.stubs.Set(sched, '_decrypt_blob',
|
||||
fake_decrypt_blob_returns_child_info)
|
||||
was_called = False
|
||||
self.stubs.Set(sched, '_ask_child_zone_to_create_instance',
|
||||
fake_ask_child_zone_to_create_instance)
|
||||
|
||||
request_spec = {'blob': "Non-None blob data"}
|
||||
|
||||
sched._provision_resource_from_blob(None, request_spec, 1,
|
||||
request_spec, {})
|
||||
self.assertTrue(was_called)
|
||||
|
||||
def test_provision_resource_from_blob_with_immediate_child_blob(self):
|
||||
"""
|
||||
Provision a resource locally or remotely when blob hint passed in
|
||||
from an immediate child.
|
||||
"""
|
||||
global was_called
|
||||
sched = FakeZoneAwareScheduler()
|
||||
was_called = False
|
||||
self.stubs.Set(sched, '_ask_child_zone_to_create_instance',
|
||||
fake_ask_child_zone_to_create_instance)
|
||||
|
||||
request_spec = {'child_blob': True, 'child_zone': True}
|
||||
|
||||
sched._provision_resource_from_blob(None, request_spec, 1,
|
||||
request_spec, {})
|
||||
self.assertTrue(was_called)
|
||||
|
@ -115,6 +115,18 @@ class CloudTestCase(test.TestCase):
|
||||
public_ip=address)
|
||||
db.floating_ip_destroy(self.context, address)
|
||||
|
||||
def test_allocate_address(self):
|
||||
address = "10.10.10.10"
|
||||
allocate = self.cloud.allocate_address
|
||||
db.floating_ip_create(self.context,
|
||||
{'address': address,
|
||||
'host': self.network.host})
|
||||
self.assertEqual(allocate(self.context)['publicIp'], address)
|
||||
db.floating_ip_destroy(self.context, address)
|
||||
self.assertRaises(exception.NoMoreFloatingIps,
|
||||
allocate,
|
||||
self.context)
|
||||
|
||||
def test_associate_disassociate_address(self):
|
||||
"""Verifies associate runs cleanly without raising an exception"""
|
||||
address = "10.10.10.10"
|
||||
@ -487,6 +499,21 @@ class CloudTestCase(test.TestCase):
|
||||
self.assertRaises(exception.ApiError, run_instances,
|
||||
self.context, **kwargs)
|
||||
|
||||
def test_run_instances_image_status_active(self):
|
||||
kwargs = {'image_id': FLAGS.default_image,
|
||||
'instance_type': FLAGS.default_instance_type,
|
||||
'max_count': 1}
|
||||
run_instances = self.cloud.run_instances
|
||||
|
||||
def fake_show_stat_active(self, context, id):
|
||||
return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1,
|
||||
'type': 'machine'}, 'status': 'active'}
|
||||
|
||||
self.stubs.Set(local.LocalImageService, 'show', fake_show_stat_active)
|
||||
|
||||
result = run_instances(self.context, **kwargs)
|
||||
self.assertEqual(len(result['instancesSet']), 1)
|
||||
|
||||
def test_terminate_instances(self):
|
||||
inst1 = db.instance_create(self.context, {'reservation_id': 'a',
|
||||
'image_ref': 1,
|
||||
|
@ -14,6 +14,7 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import copy
|
||||
import eventlet
|
||||
import mox
|
||||
import os
|
||||
@ -125,6 +126,7 @@ class CacheConcurrencyTestCase(test.TestCase):
|
||||
|
||||
|
||||
class LibvirtConnTestCase(test.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(LibvirtConnTestCase, self).setUp()
|
||||
connection._late_load_cheetah()
|
||||
@ -207,6 +209,29 @@ class LibvirtConnTestCase(test.TestCase):
|
||||
self.mox.StubOutWithMock(connection.LibvirtConnection, '_conn')
|
||||
connection.LibvirtConnection._conn = fake
|
||||
|
||||
def fake_lookup(self, instance_name):
|
||||
|
||||
class FakeVirtDomain(object):
|
||||
|
||||
def snapshotCreateXML(self, *args):
|
||||
return None
|
||||
|
||||
def XMLDesc(self, *args):
|
||||
return """
|
||||
<domain type='kvm'>
|
||||
<devices>
|
||||
<disk type='file'>
|
||||
<source file='filename'/>
|
||||
</disk>
|
||||
</devices>
|
||||
</domain>
|
||||
"""
|
||||
|
||||
return FakeVirtDomain()
|
||||
|
||||
def fake_execute(self, *args):
|
||||
open(args[-1], "a").close()
|
||||
|
||||
def create_service(self, **kwargs):
|
||||
service_ref = {'host': kwargs.get('host', 'dummy'),
|
||||
'binary': 'nova-compute',
|
||||
@ -283,38 +308,11 @@ class LibvirtConnTestCase(test.TestCase):
|
||||
self._check_xml_and_container(instance_data)
|
||||
|
||||
def test_snapshot(self):
|
||||
if not self.lazy_load_library_exists():
|
||||
return
|
||||
|
||||
FLAGS.image_service = 'nova.image.fake.FakeImageService'
|
||||
|
||||
# Only file-based instance storages are supported at the moment
|
||||
test_xml = """
|
||||
<domain type='kvm'>
|
||||
<devices>
|
||||
<disk type='file'>
|
||||
<source file='filename'/>
|
||||
</disk>
|
||||
</devices>
|
||||
</domain>
|
||||
"""
|
||||
|
||||
class FakeVirtDomain(object):
|
||||
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
def snapshotCreateXML(self, *args):
|
||||
return None
|
||||
|
||||
def XMLDesc(self, *args):
|
||||
return test_xml
|
||||
|
||||
def fake_lookup(instance_name):
|
||||
if instance_name == instance_ref.name:
|
||||
return FakeVirtDomain()
|
||||
|
||||
def fake_execute(*args):
|
||||
# Touch filename to pass 'with open(out_path)'
|
||||
open(args[-1], "a").close()
|
||||
|
||||
# Start test
|
||||
image_service = utils.import_object(FLAGS.image_service)
|
||||
|
||||
@ -330,9 +328,49 @@ class LibvirtConnTestCase(test.TestCase):
|
||||
recv_meta = image_service.create(context, sent_meta)
|
||||
|
||||
self.mox.StubOutWithMock(connection.LibvirtConnection, '_conn')
|
||||
connection.LibvirtConnection._conn.lookupByName = fake_lookup
|
||||
connection.LibvirtConnection._conn.lookupByName = self.fake_lookup
|
||||
self.mox.StubOutWithMock(connection.utils, 'execute')
|
||||
connection.utils.execute = fake_execute
|
||||
connection.utils.execute = self.fake_execute
|
||||
|
||||
self.mox.ReplayAll()
|
||||
|
||||
conn = connection.LibvirtConnection(False)
|
||||
conn.snapshot(instance_ref, recv_meta['id'])
|
||||
|
||||
snapshot = image_service.show(context, recv_meta['id'])
|
||||
self.assertEquals(snapshot['properties']['image_state'], 'available')
|
||||
self.assertEquals(snapshot['status'], 'active')
|
||||
self.assertEquals(snapshot['name'], snapshot_name)
|
||||
|
||||
def test_snapshot_no_image_architecture(self):
|
||||
if not self.lazy_load_library_exists():
|
||||
return
|
||||
|
||||
FLAGS.image_service = 'nova.image.fake.FakeImageService'
|
||||
|
||||
# Start test
|
||||
image_service = utils.import_object(FLAGS.image_service)
|
||||
|
||||
# Assign image_ref = 2 from nova/images/fakes for testing different
|
||||
# base image
|
||||
test_instance = copy.deepcopy(self.test_instance)
|
||||
test_instance["image_ref"] = "2"
|
||||
|
||||
# Assuming that base image already exists in image_service
|
||||
instance_ref = db.instance_create(self.context, test_instance)
|
||||
properties = {'instance_id': instance_ref['id'],
|
||||
'user_id': str(self.context.user_id)}
|
||||
snapshot_name = 'test-snap'
|
||||
sent_meta = {'name': snapshot_name, 'is_public': False,
|
||||
'status': 'creating', 'properties': properties}
|
||||
# Create new image. It will be updated in snapshot method
|
||||
# To work with it from snapshot, the single image_service is needed
|
||||
recv_meta = image_service.create(context, sent_meta)
|
||||
|
||||
self.mox.StubOutWithMock(connection.LibvirtConnection, '_conn')
|
||||
connection.LibvirtConnection._conn.lookupByName = self.fake_lookup
|
||||
self.mox.StubOutWithMock(connection.utils, 'execute')
|
||||
connection.utils.execute = self.fake_execute
|
||||
|
||||
self.mox.ReplayAll()
|
||||
|
||||
|
@ -69,7 +69,7 @@ class VMWareAPIVMTestCase(test.TestCase):
|
||||
'instance_type': 'm1.large',
|
||||
'mac_address': 'aa:bb:cc:dd:ee:ff',
|
||||
}
|
||||
self.instance = db.instance_create(values)
|
||||
self.instance = db.instance_create(None, values)
|
||||
|
||||
def _create_vm(self):
|
||||
"""Create and spawn the VM."""
|
||||
|
@ -52,7 +52,7 @@ def stub_out_db_instance_api(stubs):
|
||||
else:
|
||||
raise NotImplementedError()
|
||||
|
||||
def fake_instance_create(values):
|
||||
def fake_instance_create(context, values):
|
||||
"""Stubs out the db.instance_create method."""
|
||||
|
||||
type_data = INSTANCE_TYPES[values['instance_type']]
|
||||
|
@ -42,20 +42,6 @@ def stubout_instance_snapshot(stubs):
|
||||
|
||||
stubs.Set(vm_utils.VMHelper, 'fetch_image', fake_fetch_image)
|
||||
|
||||
def fake_wait_for_vhd_coalesce(session, instance_id, sr_ref, vdi_ref,
|
||||
original_parent_uuid):
|
||||
from nova.virt.xenapi.fake import create_vdi
|
||||
name_label = "instance-%s" % instance_id
|
||||
#TODO: create fake SR record
|
||||
sr_ref = "fakesr"
|
||||
vdi_ref = create_vdi(name_label=name_label, read_only=False,
|
||||
sr_ref=sr_ref, sharable=False)
|
||||
vdi_rec = session.get_xenapi().VDI.get_record(vdi_ref)
|
||||
vdi_uuid = vdi_rec['uuid']
|
||||
return vdi_uuid
|
||||
|
||||
stubs.Set(vm_utils.VMHelper, 'fetch_image', fake_fetch_image)
|
||||
|
||||
def fake_parse_xmlrpc_value(val):
|
||||
return val
|
||||
|
||||
@ -251,10 +237,10 @@ class FakeSessionForMigrationTests(fake.SessionBase):
|
||||
def __init__(self, uri):
|
||||
super(FakeSessionForMigrationTests, self).__init__(uri)
|
||||
|
||||
def VDI_get_by_uuid(*args):
|
||||
def VDI_get_by_uuid(self, *args):
|
||||
return 'hurr'
|
||||
|
||||
def VDI_resize_online(*args):
|
||||
def VDI_resize_online(self, *args):
|
||||
pass
|
||||
|
||||
def VM_start(self, _1, ref, _2, _3):
|
||||
|
@ -78,7 +78,7 @@ def WrapTwistedOptions(wrapped):
|
||||
self._absorbParameters()
|
||||
self._absorbHandlers()
|
||||
|
||||
super(TwistedOptionsToFlags, self).__init__()
|
||||
wrapped.__init__(self)
|
||||
|
||||
def _absorbFlags(self):
|
||||
twistd_flags = []
|
||||
@ -163,12 +163,12 @@ def WrapTwistedOptions(wrapped):
|
||||
def parseArgs(self, *args):
|
||||
# TODO(termie): figure out a decent way of dealing with args
|
||||
#return
|
||||
super(TwistedOptionsToFlags, self).parseArgs(*args)
|
||||
wrapped.parseArgs(self, *args)
|
||||
|
||||
def postOptions(self):
|
||||
self._doHandlers()
|
||||
|
||||
super(TwistedOptionsToFlags, self).postOptions()
|
||||
wrapped.postOptions(self)
|
||||
|
||||
def __getitem__(self, key):
|
||||
key = key.replace('-', '_')
|
||||
|
@ -142,24 +142,26 @@ def execute(*cmd, **kwargs):
|
||||
env = os.environ.copy()
|
||||
if addl_env:
|
||||
env.update(addl_env)
|
||||
_PIPE = subprocess.PIPE # pylint: disable=E1101
|
||||
obj = subprocess.Popen(cmd,
|
||||
stdin=subprocess.PIPE,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
stdin=_PIPE,
|
||||
stdout=_PIPE,
|
||||
stderr=_PIPE,
|
||||
env=env)
|
||||
result = None
|
||||
if process_input is not None:
|
||||
result = obj.communicate(process_input)
|
||||
else:
|
||||
result = obj.communicate()
|
||||
obj.stdin.close()
|
||||
if obj.returncode:
|
||||
LOG.debug(_('Result was %s') % obj.returncode)
|
||||
obj.stdin.close() # pylint: disable=E1101
|
||||
_returncode = obj.returncode # pylint: disable=E1101
|
||||
if _returncode:
|
||||
LOG.debug(_('Result was %s') % _returncode)
|
||||
if type(check_exit_code) == types.IntType \
|
||||
and obj.returncode != check_exit_code:
|
||||
and _returncode != check_exit_code:
|
||||
(stdout, stderr) = result
|
||||
raise exception.ProcessExecutionError(
|
||||
exit_code=obj.returncode,
|
||||
exit_code=_returncode,
|
||||
stdout=stdout,
|
||||
stderr=stderr,
|
||||
cmd=' '.join(cmd))
|
||||
|
@ -23,6 +23,7 @@ Handling of VM disk images.
|
||||
|
||||
from nova import context
|
||||
from nova import flags
|
||||
from nova.image import glance as glance_image_service
|
||||
import nova.image
|
||||
from nova import log as logging
|
||||
from nova import utils
|
||||
@ -42,13 +43,3 @@ def fetch(image_href, path, _user, _project):
|
||||
elevated = context.get_admin_context()
|
||||
metadata = image_service.get(elevated, image_id, image_file)
|
||||
return metadata
|
||||
|
||||
|
||||
# TODO(vish): xenapi should use the glance client code directly instead
|
||||
# of retrieving the image using this method.
|
||||
def image_url(image):
|
||||
if FLAGS.image_service == "nova.image.glance.GlanceImageService":
|
||||
return "http://%s:%s/images/%s" % (FLAGS.glance_host,
|
||||
FLAGS.glance_port, image)
|
||||
return "http://%s:%s/_images/%s/image" % (FLAGS.s3_host, FLAGS.s3_port,
|
||||
image)
|
||||
|
@ -403,8 +403,7 @@ class LibvirtConnection(driver.ComputeDriver):
|
||||
'is_public': False,
|
||||
'status': 'active',
|
||||
'name': snapshot['name'],
|
||||
'properties': {'architecture':
|
||||
base['properties']['architecture'],
|
||||
'properties': {
|
||||
'kernel_id': instance['kernel_id'],
|
||||
'image_location': 'snapshot',
|
||||
'image_state': 'available',
|
||||
@ -412,6 +411,9 @@ class LibvirtConnection(driver.ComputeDriver):
|
||||
'ramdisk_id': instance['ramdisk_id'],
|
||||
}
|
||||
}
|
||||
if 'architecture' in base['properties']:
|
||||
arch = base['properties']['architecture']
|
||||
metadata['properties']['architecture'] = arch
|
||||
|
||||
# Make the snapshot
|
||||
snapshot_name = uuid.uuid4().hex
|
||||
|
@ -340,10 +340,6 @@ class SessionBase(object):
|
||||
return
|
||||
db_ref['xenstore_data'][key] = None
|
||||
|
||||
def network_get_all_records_where(self, _1, _2):
|
||||
# TODO (salvatore-orlando): filter table on _2
|
||||
return _db_content['network']
|
||||
|
||||
def VM_add_to_xenstore_data(self, _1, vm_ref, key, value):
|
||||
db_ref = _db_content['VM'][vm_ref]
|
||||
if not 'xenstore_data' in db_ref:
|
||||
@ -354,7 +350,7 @@ class SessionBase(object):
|
||||
#Always return 12GB available
|
||||
return 12 * 1024 * 1024 * 1024
|
||||
|
||||
def host_call_plugin(*args):
|
||||
def host_call_plugin(self, *args):
|
||||
return 'herp'
|
||||
|
||||
def network_get_all_records_where(self, _1, filter):
|
||||
|
@ -33,6 +33,7 @@ import glance.client
|
||||
from nova import exception
|
||||
from nova import flags
|
||||
import nova.image
|
||||
from nova.image import glance as glance_image_service
|
||||
from nova import log as logging
|
||||
from nova import utils
|
||||
from nova.auth.manager import AuthManager
|
||||
@ -358,10 +359,12 @@ class VMHelper(HelperBase):
|
||||
|
||||
os_type = instance.os_type or FLAGS.default_os_type
|
||||
|
||||
glance_host, glance_port = \
|
||||
glance_image_service.pick_glance_api_server()
|
||||
params = {'vdi_uuids': vdi_uuids,
|
||||
'image_id': image_id,
|
||||
'glance_host': FLAGS.glance_host,
|
||||
'glance_port': FLAGS.glance_port,
|
||||
'glance_host': glance_host,
|
||||
'glance_port': glance_port,
|
||||
'sr_path': cls.get_sr_path(session),
|
||||
'os_type': os_type}
|
||||
|
||||
@ -409,9 +412,11 @@ class VMHelper(HelperBase):
|
||||
# here (under Python 2.6+) and pass them as arguments
|
||||
uuid_stack = [str(uuid.uuid4()) for i in xrange(2)]
|
||||
|
||||
glance_host, glance_port = \
|
||||
glance_image_service.pick_glance_api_server()
|
||||
params = {'image_id': image,
|
||||
'glance_host': FLAGS.glance_host,
|
||||
'glance_port': FLAGS.glance_port,
|
||||
'glance_host': glance_host,
|
||||
'glance_port': glance_port,
|
||||
'uuid_stack': uuid_stack,
|
||||
'sr_path': cls.get_sr_path(session)}
|
||||
|
||||
@ -576,7 +581,8 @@ class VMHelper(HelperBase):
|
||||
Returns: A single filename if image_type is KERNEL_RAMDISK
|
||||
A list of dictionaries that describe VDIs, otherwise
|
||||
"""
|
||||
url = images.image_url(image)
|
||||
url = "http://%s:%s/_images/%s/image" % (FLAGS.s3_host, FLAGS.s3_port,
|
||||
image)
|
||||
LOG.debug(_("Asking xapi to fetch %(url)s as %(access)s") % locals())
|
||||
if image_type == ImageType.KERNEL_RAMDISK:
|
||||
fn = 'get_kernel'
|
||||
|
@ -101,7 +101,7 @@ class VMOps(object):
|
||||
if not vm_ref:
|
||||
vm_ref = VMHelper.lookup(self._session, instance.name)
|
||||
if vm_ref is None:
|
||||
raise exception(_('Attempted to power on non-existent instance'
|
||||
raise Exception(_('Attempted to power on non-existent instance'
|
||||
' bad instance id %s') % instance.id)
|
||||
LOG.debug(_("Starting instance %s"), instance.name)
|
||||
self._session.call_xenapi('VM.start', vm_ref, False, False)
|
||||
|
14
run_tests.sh
14
run_tests.sh
@ -67,14 +67,11 @@ function run_pep8 {
|
||||
srcfiles=`find bin -type f ! -name "nova.conf*"`
|
||||
srcfiles+=" `find tools/*`"
|
||||
srcfiles+=" nova setup.py plugins/xenserver/xenapi/etc/xapi.d/plugins/glance"
|
||||
pep8 --repeat --show-pep8 --show-source --exclude=vcsversion.py ${srcfiles}
|
||||
# Just run PEP8 in current environment
|
||||
${wrapper} pep8 --repeat --show-pep8 --show-source \
|
||||
--exclude=vcsversion.py ${srcfiles}
|
||||
}
|
||||
|
||||
if [ $just_pep8 -eq 1 ]; then
|
||||
run_pep8
|
||||
exit
|
||||
fi
|
||||
|
||||
NOSETESTS="python run_tests.py $noseargs"
|
||||
|
||||
if [ $never_venv -eq 0 ]
|
||||
@ -103,6 +100,11 @@ then
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ $just_pep8 -eq 1 ]; then
|
||||
run_pep8
|
||||
exit
|
||||
fi
|
||||
|
||||
run_tests || exit
|
||||
|
||||
# Also run pep8 if no options were provided.
|
||||
|
Loading…
Reference in New Issue
Block a user