Cleaned up the compute API, mostly consistency with other parts of the system and renaming redundant module names.
This commit is contained in:
@@ -31,6 +31,7 @@ import os
|
||||
from nova import context
|
||||
import IPy
|
||||
|
||||
from nova import compute
|
||||
from nova import crypto
|
||||
from nova import db
|
||||
from nova import exception
|
||||
@@ -39,7 +40,6 @@ from nova import quota
|
||||
from nova import rpc
|
||||
from nova import utils
|
||||
from nova import volume
|
||||
from nova.compute import api as compute_api
|
||||
from nova.compute import instance_types
|
||||
|
||||
|
||||
@@ -90,9 +90,9 @@ class CloudController(object):
|
||||
self.network_manager = utils.import_object(FLAGS.network_manager)
|
||||
self.image_service = utils.import_object(FLAGS.image_service)
|
||||
self.volume_api = volume.API()
|
||||
self.compute_api = compute_api.ComputeAPI(self.network_manager,
|
||||
self.image_service,
|
||||
self.volume_api)
|
||||
self.compute_api = compute.API(self.network_manager,
|
||||
self.image_service,
|
||||
self.volume_api)
|
||||
self.setup()
|
||||
|
||||
def __str__(self):
|
||||
@@ -116,7 +116,7 @@ class CloudController(object):
|
||||
|
||||
def _get_mpi_data(self, context, project_id):
|
||||
result = {}
|
||||
for instance in self.compute_api.get_instances(context, project_id):
|
||||
for instance in self.compute_api.get(context, project_id=project_id):
|
||||
if instance['fixed_ip']:
|
||||
line = '%s slots=%d' % (instance['fixed_ip']['address'],
|
||||
instance['vcpus'])
|
||||
@@ -440,7 +440,7 @@ class CloudController(object):
|
||||
# instance_id is passed in as a list of instances
|
||||
ec2_id = instance_id[0]
|
||||
instance_id = ec2_id_to_id(ec2_id)
|
||||
instance_ref = self.compute_api.get_instance(context, instance_id)
|
||||
instance_ref = self.compute_api.get(context, instance_id)
|
||||
output = rpc.call(context,
|
||||
'%s.%s' % (FLAGS.compute_topic,
|
||||
instance_ref['host']),
|
||||
@@ -561,7 +561,7 @@ class CloudController(object):
|
||||
instances = db.instance_get_all_by_reservation(context,
|
||||
reservation_id)
|
||||
else:
|
||||
instances = self.compute_api.get_instances(context)
|
||||
instances = self.compute_api.get(context)
|
||||
for instance in instances:
|
||||
if not context.user.is_admin():
|
||||
if instance['image_id'] == FLAGS.vpn_image_id:
|
||||
@@ -664,7 +664,7 @@ class CloudController(object):
|
||||
|
||||
def associate_address(self, context, instance_id, public_ip, **kwargs):
|
||||
instance_id = ec2_id_to_id(instance_id)
|
||||
instance_ref = self.compute_api.get_instance(context, instance_id)
|
||||
instance_ref = self.compute_api.get(context, instance_id)
|
||||
fixed_address = db.instance_get_fixed_address(context,
|
||||
instance_ref['id'])
|
||||
floating_ip_ref = db.floating_ip_get_by_address(context, public_ip)
|
||||
@@ -695,7 +695,7 @@ class CloudController(object):
|
||||
|
||||
def run_instances(self, context, **kwargs):
|
||||
max_count = int(kwargs.get('max_count', 1))
|
||||
instances = self.compute_api.create_instances(context,
|
||||
instances = self.compute_api.create(context,
|
||||
instance_types.get_by_type(kwargs.get('instance_type', None)),
|
||||
kwargs['image_id'],
|
||||
min_count=int(kwargs.get('min_count', max_count)),
|
||||
@@ -703,7 +703,7 @@ class CloudController(object):
|
||||
kernel_id=kwargs.get('kernel_id', None),
|
||||
ramdisk_id=kwargs.get('ramdisk_id'),
|
||||
display_name=kwargs.get('display_name'),
|
||||
description=kwargs.get('display_description'),
|
||||
display_description=kwargs.get('display_description'),
|
||||
key_name=kwargs.get('key_name'),
|
||||
user_data=kwargs.get('user_data'),
|
||||
security_group=kwargs.get('security_group'),
|
||||
@@ -717,7 +717,7 @@ class CloudController(object):
|
||||
logging.debug("Going to start terminating instances")
|
||||
for ec2_id in instance_id:
|
||||
instance_id = ec2_id_to_id(ec2_id)
|
||||
self.compute_api.delete_instance(context, instance_id)
|
||||
self.compute_api.delete(context, instance_id)
|
||||
return True
|
||||
|
||||
def reboot_instances(self, context, instance_id, **kwargs):
|
||||
@@ -747,7 +747,7 @@ class CloudController(object):
|
||||
changes[field] = kwargs[field]
|
||||
if changes:
|
||||
instance_id = ec2_id_to_id(ec2_id)
|
||||
inst = self.compute_api.get_instance(context, instance_id)
|
||||
inst = self.compute_api.get(context, instance_id)
|
||||
db.instance_update(context, inst['id'], kwargs)
|
||||
return True
|
||||
|
||||
|
||||
@@ -20,12 +20,12 @@ import traceback
|
||||
|
||||
from webob import exc
|
||||
|
||||
from nova import compute
|
||||
from nova import exception
|
||||
from nova import wsgi
|
||||
from nova.api.openstack import common
|
||||
from nova.api.openstack import faults
|
||||
from nova.auth import manager as auth_manager
|
||||
from nova.compute import api as compute_api
|
||||
from nova.compute import instance_types
|
||||
from nova.compute import power_state
|
||||
import nova.api.openstack
|
||||
@@ -81,7 +81,7 @@ class Controller(wsgi.Controller):
|
||||
"status", "progress"]}}}
|
||||
|
||||
def __init__(self):
|
||||
self.compute_api = compute_api.ComputeAPI()
|
||||
self.compute_api = compute.API()
|
||||
super(Controller, self).__init__()
|
||||
|
||||
def index(self, req):
|
||||
@@ -97,8 +97,7 @@ class Controller(wsgi.Controller):
|
||||
|
||||
entity_maker - either _entity_detail or _entity_inst
|
||||
"""
|
||||
instance_list = self.compute_api.get_instances(
|
||||
req.environ['nova.context'])
|
||||
instance_list = self.compute_api.get(req.environ['nova.context'])
|
||||
limited_list = common.limited(instance_list, req)
|
||||
res = [entity_maker(inst)['server'] for inst in limited_list]
|
||||
return _entity_list(res)
|
||||
@@ -106,8 +105,7 @@ class Controller(wsgi.Controller):
|
||||
def show(self, req, id):
|
||||
""" Returns server details by server id """
|
||||
try:
|
||||
instance = self.compute_api.get_instance(
|
||||
req.environ['nova.context'], int(id))
|
||||
instance = self.compute_api.get(req.environ['nova.context'], id)
|
||||
return _entity_detail(instance)
|
||||
except exception.NotFound:
|
||||
return faults.Fault(exc.HTTPNotFound())
|
||||
@@ -115,8 +113,7 @@ class Controller(wsgi.Controller):
|
||||
def delete(self, req, id):
|
||||
""" Destroys a server """
|
||||
try:
|
||||
self.compute_api.delete_instance(req.environ['nova.context'],
|
||||
int(id))
|
||||
self.compute_api.delete(req.environ['nova.context'], id)
|
||||
except exception.NotFound:
|
||||
return faults.Fault(exc.HTTPNotFound())
|
||||
return exc.HTTPAccepted()
|
||||
@@ -129,12 +126,12 @@ class Controller(wsgi.Controller):
|
||||
|
||||
key_pair = auth_manager.AuthManager.get_key_pairs(
|
||||
req.environ['nova.context'])[0]
|
||||
instances = self.compute_api.create_instances(
|
||||
instances = self.compute_api.create(
|
||||
req.environ['nova.context'],
|
||||
instance_types.get_by_flavor_id(env['server']['flavorId']),
|
||||
env['server']['imageId'],
|
||||
display_name=env['server']['name'],
|
||||
description=env['server']['name'],
|
||||
display_description=env['server']['name'],
|
||||
key_name=key_pair['name'],
|
||||
key_data=key_pair['public_key'])
|
||||
return _entity_inst(instances[0])
|
||||
@@ -152,9 +149,8 @@ class Controller(wsgi.Controller):
|
||||
update_dict['display_name'] = inst_dict['server']['name']
|
||||
|
||||
try:
|
||||
self.compute_api.update_instance(req.environ['nova.context'],
|
||||
instance['id'],
|
||||
**update_dict)
|
||||
self.compute_api.update(req.environ['nova.context'], id,
|
||||
**update_dict)
|
||||
except exception.NotFound:
|
||||
return faults.Fault(exc.HTTPNotFound())
|
||||
return exc.HTTPNoContent()
|
||||
|
||||
@@ -17,16 +17,316 @@
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
:mod:`nova.compute` -- Compute Nodes using LibVirt
|
||||
=====================================================
|
||||
|
||||
.. automodule:: nova.compute
|
||||
:platform: Unix
|
||||
:synopsis: Thin wrapper around libvirt for VM mgmt.
|
||||
.. moduleauthor:: Jesse Andrews <jesse@ansolabs.com>
|
||||
.. moduleauthor:: Devin Carlen <devin.carlen@gmail.com>
|
||||
.. moduleauthor:: Vishvananda Ishaya <vishvananda@yahoo.com>
|
||||
.. moduleauthor:: Joshua McKenty <joshua@cognition.ca>
|
||||
.. moduleauthor:: Manish Singh <yosh@gimp.org>
|
||||
.. moduleauthor:: Andy Smith <andy@anarkystic.com>
|
||||
Handles all requests relating to instances (guest vms).
|
||||
"""
|
||||
|
||||
import datetime
|
||||
import logging
|
||||
import time
|
||||
|
||||
from nova import db
|
||||
from nova import exception
|
||||
from nova import flags
|
||||
from nova import quota
|
||||
from nova import rpc
|
||||
from nova import utils
|
||||
from nova import volume
|
||||
from nova.compute import instance_types
|
||||
from nova.db import base
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
|
||||
|
||||
def generate_default_hostname(instance_id):
|
||||
"""Default function to generate a hostname given an instance reference."""
|
||||
return str(instance_id)
|
||||
|
||||
|
||||
class API(base.Base):
|
||||
"""API for interacting with the compute manager."""
|
||||
|
||||
def __init__(self, network_manager=None, image_service=None,
|
||||
volume_api=None, **kwargs):
|
||||
if not network_manager:
|
||||
network_manager = utils.import_object(FLAGS.network_manager)
|
||||
self.network_manager = network_manager
|
||||
if not image_service:
|
||||
image_service = utils.import_object(FLAGS.image_service)
|
||||
self.image_service = image_service
|
||||
if not volume_api:
|
||||
volume_api = volume.API()
|
||||
self.volume_api = volume_api
|
||||
super(API, self).__init__(**kwargs)
|
||||
|
||||
def get_network_topic(self, context, instance_id):
|
||||
try:
|
||||
instance = self.get(context, instance_id)
|
||||
except exception.NotFound as e:
|
||||
logging.warning("Instance %d was not found in get_network_topic",
|
||||
instance_id)
|
||||
raise e
|
||||
|
||||
host = instance['host']
|
||||
if not host:
|
||||
raise exception.Error("Instance %d has no host" % instance_id)
|
||||
topic = self.db.queue_get_for(context, FLAGS.compute_topic, host)
|
||||
return rpc.call(context,
|
||||
topic,
|
||||
{"method": "get_network_topic", "args": {'fake': 1}})
|
||||
|
||||
def create(self, context, instance_type,
|
||||
image_id, kernel_id=None, ramdisk_id=None,
|
||||
min_count=1, max_count=1,
|
||||
display_name='', display_description='',
|
||||
key_name=None, key_data=None, security_group='default',
|
||||
user_data=None, generate_hostname=generate_default_hostname):
|
||||
"""Create the number of instances requested if quota and
|
||||
other arguments check out ok."""
|
||||
|
||||
num_instances = quota.allowed_instances(context, max_count,
|
||||
instance_type)
|
||||
if num_instances < min_count:
|
||||
logging.warn("Quota exceeeded for %s, tried to run %s instances",
|
||||
context.project_id, min_count)
|
||||
raise quota.QuotaError("Instance quota exceeded. You can only "
|
||||
"run %s more instances of this type." %
|
||||
num_instances, "InstanceLimitExceeded")
|
||||
|
||||
is_vpn = image_id == FLAGS.vpn_image_id
|
||||
if not is_vpn:
|
||||
image = self.image_service.show(context, image_id)
|
||||
if kernel_id is None:
|
||||
kernel_id = image.get('kernelId', None)
|
||||
if ramdisk_id is None:
|
||||
ramdisk_id = image.get('ramdiskId', None)
|
||||
# No kernel and ramdisk for raw images
|
||||
if kernel_id == str(FLAGS.null_kernel):
|
||||
kernel_id = None
|
||||
ramdisk_id = None
|
||||
logging.debug("Creating a raw instance")
|
||||
# Make sure we have access to kernel and ramdisk (if not raw)
|
||||
if kernel_id:
|
||||
self.image_service.show(context, kernel_id)
|
||||
if ramdisk_id:
|
||||
self.image_service.show(context, ramdisk_id)
|
||||
|
||||
if security_group is None:
|
||||
security_group = ['default']
|
||||
if not type(security_group) is list:
|
||||
security_group = [security_group]
|
||||
|
||||
security_groups = []
|
||||
self.ensure_default_security_group(context)
|
||||
for security_group_name in security_group:
|
||||
group = db.security_group_get_by_name(context,
|
||||
context.project_id,
|
||||
security_group_name)
|
||||
security_groups.append(group['id'])
|
||||
|
||||
if key_data is None and key_name:
|
||||
key_pair = db.key_pair_get(context, context.user_id, key_name)
|
||||
key_data = key_pair['public_key']
|
||||
|
||||
type_data = instance_types.INSTANCE_TYPES[instance_type]
|
||||
base_options = {
|
||||
'reservation_id': utils.generate_uid('r'),
|
||||
'image_id': image_id,
|
||||
'kernel_id': kernel_id or '',
|
||||
'ramdisk_id': ramdisk_id or '',
|
||||
'state_description': 'scheduling',
|
||||
'user_id': context.user_id,
|
||||
'project_id': context.project_id,
|
||||
'launch_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()),
|
||||
'instance_type': instance_type,
|
||||
'memory_mb': type_data['memory_mb'],
|
||||
'vcpus': type_data['vcpus'],
|
||||
'local_gb': type_data['local_gb'],
|
||||
'display_name': display_name,
|
||||
'display_description': display_description,
|
||||
'user_data': user_data or '',
|
||||
'key_name': key_name,
|
||||
'key_data': key_data}
|
||||
|
||||
elevated = context.elevated()
|
||||
instances = []
|
||||
logging.debug(_("Going to run %s instances..."), num_instances)
|
||||
for num in range(num_instances):
|
||||
instance = dict(mac_address=utils.generate_mac(),
|
||||
launch_index=num,
|
||||
**base_options)
|
||||
instance = self.db.instance_create(context, instance)
|
||||
instance_id = instance['id']
|
||||
|
||||
elevated = context.elevated()
|
||||
if not security_groups:
|
||||
security_groups = []
|
||||
for security_group_id in security_groups:
|
||||
self.db.instance_add_security_group(elevated,
|
||||
instance_id,
|
||||
security_group_id)
|
||||
|
||||
# Set sane defaults if not specified
|
||||
updates = dict(hostname=generate_hostname(instance_id))
|
||||
if 'display_name' not in instance:
|
||||
updates['display_name'] = "Server %s" % instance_id
|
||||
|
||||
instance = self.update(context, instance_id, **updates)
|
||||
instances.append(instance)
|
||||
|
||||
logging.debug(_("Casting to scheduler for %s/%s's instance %s"),
|
||||
context.project_id, context.user_id, instance_id)
|
||||
rpc.cast(context,
|
||||
FLAGS.scheduler_topic,
|
||||
{"method": "run_instance",
|
||||
"args": {"topic": FLAGS.compute_topic,
|
||||
"instance_id": instance_id}})
|
||||
|
||||
return instances
|
||||
|
||||
def ensure_default_security_group(self, context):
|
||||
""" Create security group for the security context if it
|
||||
does not already exist
|
||||
|
||||
:param context: the security context
|
||||
|
||||
"""
|
||||
try:
|
||||
db.security_group_get_by_name(context, context.project_id,
|
||||
'default')
|
||||
except exception.NotFound:
|
||||
values = {'name': 'default',
|
||||
'description': 'default',
|
||||
'user_id': context.user_id,
|
||||
'project_id': context.project_id}
|
||||
db.security_group_create(context, values)
|
||||
|
||||
def update(self, context, instance_id, **kwargs):
|
||||
"""Updates the instance in the datastore.
|
||||
|
||||
:param context: The security context
|
||||
:param instance_id: ID of the instance to update
|
||||
:param kwargs: All additional keyword args are treated
|
||||
as data fields of the instance to be
|
||||
updated
|
||||
|
||||
:retval None
|
||||
|
||||
"""
|
||||
return self.db.instance_update(context, instance_id, kwargs)
|
||||
|
||||
def delete(self, context, instance_id):
|
||||
logging.debug("Going to try and terminate %s" % instance_id)
|
||||
try:
|
||||
instance = self.get(context, instance_id)
|
||||
except exception.NotFound as e:
|
||||
logging.warning(_("Instance %s was not found during terminate"),
|
||||
instance_id)
|
||||
raise e
|
||||
|
||||
if (instance['state_description'] == 'terminating'):
|
||||
logging.warning(_("Instance %s is already being terminated"),
|
||||
instance_id)
|
||||
return
|
||||
|
||||
self.update(context,
|
||||
instance['id'],
|
||||
state_description='terminating',
|
||||
state=0,
|
||||
terminated_at=datetime.datetime.utcnow())
|
||||
|
||||
host = instance['host']
|
||||
if host:
|
||||
rpc.cast(context,
|
||||
self.db.queue_get_for(context, FLAGS.compute_topic, host),
|
||||
{"method": "terminate_instance",
|
||||
"args": {"instance_id": instance['id']}})
|
||||
else:
|
||||
self.db.instance_destroy(context, instance['id'])
|
||||
|
||||
def get(self, context, instance_id=None, project_id=None):
|
||||
"""Get one or more instances, possibly filtered by project
|
||||
ID or user ID. If there is no filter and the context is
|
||||
an admin, it will retreive all instances in the system."""
|
||||
if instance_id is not None:
|
||||
return self.db.instance_get_by_id(context, instance_id)
|
||||
if project_id or not context.is_admin:
|
||||
if not context.project:
|
||||
return self.db.instance_get_all_by_user(context,
|
||||
context.user_id)
|
||||
if project_id is None:
|
||||
project_id = context.project_id
|
||||
return self.db.instance_get_all_by_project(context,
|
||||
project_id)
|
||||
return self.db.instance_get_all(context)
|
||||
|
||||
def reboot(self, context, instance_id):
|
||||
"""Reboot the given instance."""
|
||||
instance = self.get(context, instance_id)
|
||||
host = instance['host']
|
||||
rpc.cast(context,
|
||||
self.db.queue_get_for(context, FLAGS.compute_topic, host),
|
||||
{"method": "reboot_instance",
|
||||
"args": {"instance_id": instance['id']}})
|
||||
|
||||
def pause(self, context, instance_id):
|
||||
"""Pause the given instance."""
|
||||
instance = self.get(context, instance_id)
|
||||
host = instance['host']
|
||||
rpc.cast(context,
|
||||
self.db.queue_get_for(context, FLAGS.compute_topic, host),
|
||||
{"method": "pause_instance",
|
||||
"args": {"instance_id": instance['id']}})
|
||||
|
||||
def unpause(self, context, instance_id):
|
||||
"""Unpause the given instance."""
|
||||
instance = self.get(context, instance_id)
|
||||
host = instance['host']
|
||||
rpc.cast(context,
|
||||
self.db.queue_get_for(context, FLAGS.compute_topic, host),
|
||||
{"method": "unpause_instance",
|
||||
"args": {"instance_id": instance['id']}})
|
||||
|
||||
def rescue(self, context, instance_id):
|
||||
"""Rescue the given instance."""
|
||||
instance = self.get(context, instance_id)
|
||||
host = instance['host']
|
||||
rpc.cast(context,
|
||||
self.db.queue_get_for(context, FLAGS.compute_topic, host),
|
||||
{"method": "rescue_instance",
|
||||
"args": {"instance_id": instance['id']}})
|
||||
|
||||
def unrescue(self, context, instance_id):
|
||||
"""Unrescue the given instance."""
|
||||
instance = self.get(context, instance_id)
|
||||
host = instance['host']
|
||||
rpc.cast(context,
|
||||
self.db.queue_get_for(context, FLAGS.compute_topic, host),
|
||||
{"method": "unrescue_instance",
|
||||
"args": {"instance_id": instance['id']}})
|
||||
|
||||
def attach_volume(self, context, instance_id, volume_id, device):
|
||||
if not re.match("^/dev/[a-z]d[a-z]+$", device):
|
||||
raise exception.ApiError(_("Invalid device specified: %s. "
|
||||
"Example device: /dev/vdb") % device)
|
||||
self.volume_api.check_attach(context, volume_id)
|
||||
instance = self.get(context, instance_id)
|
||||
host = instance['host']
|
||||
rpc.cast(context,
|
||||
self.db.queue_get_for(context, FLAGS.compute_topic, host),
|
||||
{"method": "attach_volume",
|
||||
"args": {"volume_id": volume_id,
|
||||
"instance_id": instance_id,
|
||||
"mountpoint": device}})
|
||||
|
||||
def detach_volume(self, context, volume_id):
|
||||
instance = self.db.volume_get_instance(context.elevated(), volume_id)
|
||||
if not instance:
|
||||
raise exception.ApiError(_("Volume isn't attached to anything!"))
|
||||
self.volume_api.check_detach(context, volume_id)
|
||||
host = instance['host']
|
||||
rpc.cast(context,
|
||||
self.db.queue_get_for(context, FLAGS.compute_topic, host),
|
||||
{"method": "detach_volume",
|
||||
"args": {"instance_id": instance['id'],
|
||||
"volume_id": volume_id}})
|
||||
return instance
|
||||
|
||||
@@ -1,332 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
Handles all API requests relating to instances (guest vms).
|
||||
"""
|
||||
|
||||
import datetime
|
||||
import logging
|
||||
import time
|
||||
|
||||
from nova import db
|
||||
from nova import exception
|
||||
from nova import flags
|
||||
from nova import quota
|
||||
from nova import rpc
|
||||
from nova import utils
|
||||
from nova import volume
|
||||
from nova.compute import instance_types
|
||||
from nova.db import base
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
|
||||
|
||||
def generate_default_hostname(instance_id):
|
||||
"""Default function to generate a hostname given an instance reference."""
|
||||
return str(instance_id)
|
||||
|
||||
|
||||
class ComputeAPI(base.Base):
|
||||
"""API for interacting with the compute manager."""
|
||||
|
||||
def __init__(self, network_manager=None, image_service=None,
|
||||
volume_api=None, **kwargs):
|
||||
if not network_manager:
|
||||
network_manager = utils.import_object(FLAGS.network_manager)
|
||||
self.network_manager = network_manager
|
||||
if not image_service:
|
||||
image_service = utils.import_object(FLAGS.image_service)
|
||||
self.image_service = image_service
|
||||
if not volume_api:
|
||||
volume_api = volume.API()
|
||||
self.volume_api = volume_api
|
||||
super(ComputeAPI, self).__init__(**kwargs)
|
||||
|
||||
def get_network_topic(self, context, instance_id):
|
||||
try:
|
||||
instance = self.db.instance_get_by_id(context, instance_id)
|
||||
except exception.NotFound as e:
|
||||
logging.warning("Instance %d was not found in get_network_topic",
|
||||
instance_id)
|
||||
raise e
|
||||
|
||||
host = instance['host']
|
||||
if not host:
|
||||
raise exception.Error("Instance %d has no host" % instance_id)
|
||||
topic = self.db.queue_get_for(context, FLAGS.compute_topic, host)
|
||||
return rpc.call(context,
|
||||
topic,
|
||||
{"method": "get_network_topic", "args": {'fake': 1}})
|
||||
|
||||
def create_instances(self, context, instance_type, image_id, min_count=1,
|
||||
max_count=1, kernel_id=None, ramdisk_id=None,
|
||||
display_name='', description='', key_name=None,
|
||||
key_data=None, security_group='default',
|
||||
user_data=None,
|
||||
generate_hostname=generate_default_hostname):
|
||||
"""Create the number of instances requested if quote and
|
||||
other arguments check out ok."""
|
||||
|
||||
num_instances = quota.allowed_instances(context, max_count,
|
||||
instance_type)
|
||||
if num_instances < min_count:
|
||||
logging.warn("Quota exceeeded for %s, tried to run %s instances",
|
||||
context.project_id, min_count)
|
||||
raise quota.QuotaError("Instance quota exceeded. You can only "
|
||||
"run %s more instances of this type." %
|
||||
num_instances, "InstanceLimitExceeded")
|
||||
|
||||
is_vpn = image_id == FLAGS.vpn_image_id
|
||||
if not is_vpn:
|
||||
image = self.image_service.show(context, image_id)
|
||||
if kernel_id is None:
|
||||
kernel_id = image.get('kernelId', None)
|
||||
if ramdisk_id is None:
|
||||
ramdisk_id = image.get('ramdiskId', None)
|
||||
#No kernel and ramdisk for raw images
|
||||
if kernel_id == str(FLAGS.null_kernel):
|
||||
kernel_id = None
|
||||
ramdisk_id = None
|
||||
logging.debug("Creating a raw instance")
|
||||
# Make sure we have access to kernel and ramdisk (if not raw)
|
||||
if kernel_id:
|
||||
self.image_service.show(context, kernel_id)
|
||||
if ramdisk_id:
|
||||
self.image_service.show(context, ramdisk_id)
|
||||
|
||||
if security_group is None:
|
||||
security_group = ['default']
|
||||
if not type(security_group) is list:
|
||||
security_group = [security_group]
|
||||
|
||||
security_groups = []
|
||||
self.ensure_default_security_group(context)
|
||||
for security_group_name in security_group:
|
||||
group = db.security_group_get_by_name(context,
|
||||
context.project_id,
|
||||
security_group_name)
|
||||
security_groups.append(group['id'])
|
||||
|
||||
if key_data is None and key_name:
|
||||
key_pair = db.key_pair_get(context, context.user_id, key_name)
|
||||
key_data = key_pair['public_key']
|
||||
|
||||
type_data = instance_types.INSTANCE_TYPES[instance_type]
|
||||
base_options = {
|
||||
'reservation_id': utils.generate_uid('r'),
|
||||
'image_id': image_id,
|
||||
'kernel_id': kernel_id or '',
|
||||
'ramdisk_id': ramdisk_id or '',
|
||||
'state_description': 'scheduling',
|
||||
'user_id': context.user_id,
|
||||
'project_id': context.project_id,
|
||||
'launch_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()),
|
||||
'instance_type': instance_type,
|
||||
'memory_mb': type_data['memory_mb'],
|
||||
'vcpus': type_data['vcpus'],
|
||||
'local_gb': type_data['local_gb'],
|
||||
'display_name': display_name,
|
||||
'display_description': description,
|
||||
'user_data': user_data or '',
|
||||
'key_name': key_name,
|
||||
'key_data': key_data}
|
||||
|
||||
elevated = context.elevated()
|
||||
instances = []
|
||||
logging.debug(_("Going to run %s instances..."), num_instances)
|
||||
for num in range(num_instances):
|
||||
instance = dict(mac_address=utils.generate_mac(),
|
||||
launch_index=num,
|
||||
**base_options)
|
||||
instance = self.db.instance_create(context, instance)
|
||||
instance_id = instance['id']
|
||||
|
||||
elevated = context.elevated()
|
||||
if not security_groups:
|
||||
security_groups = []
|
||||
for security_group_id in security_groups:
|
||||
self.db.instance_add_security_group(elevated,
|
||||
instance_id,
|
||||
security_group_id)
|
||||
|
||||
# Set sane defaults if not specified
|
||||
updates = dict(hostname=generate_hostname(instance_id))
|
||||
if 'display_name' not in instance:
|
||||
updates['display_name'] = "Server %s" % instance_id
|
||||
|
||||
instance = self.update_instance(context, instance_id, **updates)
|
||||
instances.append(instance)
|
||||
|
||||
logging.debug(_("Casting to scheduler for %s/%s's instance %s"),
|
||||
context.project_id, context.user_id, instance_id)
|
||||
rpc.cast(context,
|
||||
FLAGS.scheduler_topic,
|
||||
{"method": "run_instance",
|
||||
"args": {"topic": FLAGS.compute_topic,
|
||||
"instance_id": instance_id}})
|
||||
|
||||
return instances
|
||||
|
||||
def ensure_default_security_group(self, context):
|
||||
""" Create security group for the security context if it
|
||||
does not already exist
|
||||
|
||||
:param context: the security context
|
||||
|
||||
"""
|
||||
try:
|
||||
db.security_group_get_by_name(context, context.project_id,
|
||||
'default')
|
||||
except exception.NotFound:
|
||||
values = {'name': 'default',
|
||||
'description': 'default',
|
||||
'user_id': context.user_id,
|
||||
'project_id': context.project_id}
|
||||
db.security_group_create(context, values)
|
||||
|
||||
def update_instance(self, context, instance_id, **kwargs):
|
||||
"""Updates the instance in the datastore.
|
||||
|
||||
:param context: The security context
|
||||
:param instance_id: ID of the instance to update
|
||||
:param kwargs: All additional keyword args are treated
|
||||
as data fields of the instance to be
|
||||
updated
|
||||
|
||||
:retval None
|
||||
|
||||
"""
|
||||
return self.db.instance_update(context, instance_id, kwargs)
|
||||
|
||||
def delete_instance(self, context, instance_id):
|
||||
logging.debug("Going to try and terminate %s" % instance_id)
|
||||
try:
|
||||
instance = self.db.instance_get_by_id(context, instance_id)
|
||||
except exception.NotFound as e:
|
||||
logging.warning(_("Instance %s was not found during terminate"),
|
||||
instance_id)
|
||||
raise e
|
||||
|
||||
if (instance['state_description'] == 'terminating'):
|
||||
logging.warning(_("Instance %s is already being terminated"),
|
||||
instance_id)
|
||||
return
|
||||
|
||||
self.update_instance(context,
|
||||
instance['id'],
|
||||
state_description='terminating',
|
||||
state=0,
|
||||
terminated_at=datetime.datetime.utcnow())
|
||||
|
||||
host = instance['host']
|
||||
if host:
|
||||
rpc.cast(context,
|
||||
self.db.queue_get_for(context, FLAGS.compute_topic, host),
|
||||
{"method": "terminate_instance",
|
||||
"args": {"instance_id": instance['id']}})
|
||||
else:
|
||||
self.db.instance_destroy(context, instance['id'])
|
||||
|
||||
def get_instances(self, context, project_id=None):
|
||||
"""Get all instances, possibly filtered by project ID or
|
||||
user ID. If there is no filter and the context is an admin,
|
||||
it will retreive all instances in the system."""
|
||||
if project_id or not context.is_admin:
|
||||
if not context.project:
|
||||
return self.db.instance_get_all_by_user(context,
|
||||
context.user_id)
|
||||
if project_id is None:
|
||||
project_id = context.project_id
|
||||
return self.db.instance_get_all_by_project(context, project_id)
|
||||
return self.db.instance_get_all(context)
|
||||
|
||||
def get_instance(self, context, instance_id):
|
||||
return self.db.instance_get_by_id(context, instance_id)
|
||||
|
||||
def reboot(self, context, instance_id):
|
||||
"""Reboot the given instance."""
|
||||
instance = self.db.instance_get_by_id(context, instance_id)
|
||||
host = instance['host']
|
||||
rpc.cast(context,
|
||||
self.db.queue_get_for(context, FLAGS.compute_topic, host),
|
||||
{"method": "reboot_instance",
|
||||
"args": {"instance_id": instance['id']}})
|
||||
|
||||
def pause(self, context, instance_id):
|
||||
"""Pause the given instance."""
|
||||
instance = self.db.instance_get_by_id(context, instance_id)
|
||||
host = instance['host']
|
||||
rpc.cast(context,
|
||||
self.db.queue_get_for(context, FLAGS.compute_topic, host),
|
||||
{"method": "pause_instance",
|
||||
"args": {"instance_id": instance['id']}})
|
||||
|
||||
def unpause(self, context, instance_id):
|
||||
"""Unpause the given instance."""
|
||||
instance = self.db.instance_get_by_id(context, instance_id)
|
||||
host = instance['host']
|
||||
rpc.cast(context,
|
||||
self.db.queue_get_for(context, FLAGS.compute_topic, host),
|
||||
{"method": "unpause_instance",
|
||||
"args": {"instance_id": instance['id']}})
|
||||
|
||||
def rescue(self, context, instance_id):
|
||||
"""Rescue the given instance."""
|
||||
instance = self.db.instance_get_by_id(context, instance_id)
|
||||
host = instance['host']
|
||||
rpc.cast(context,
|
||||
self.db.queue_get_for(context, FLAGS.compute_topic, host),
|
||||
{"method": "rescue_instance",
|
||||
"args": {"instance_id": instance['id']}})
|
||||
|
||||
def unrescue(self, context, instance_id):
|
||||
"""Unrescue the given instance."""
|
||||
instance = self.db.instance_get_by_id(context, instance_id)
|
||||
host = instance['host']
|
||||
rpc.cast(context,
|
||||
self.db.queue_get_for(context, FLAGS.compute_topic, host),
|
||||
{"method": "unrescue_instance",
|
||||
"args": {"instance_id": instance['id']}})
|
||||
|
||||
def attach_volume(self, context, instance_id, volume_id, device):
|
||||
if not re.match("^/dev/[a-z]d[a-z]+$", device):
|
||||
raise exception.ApiError(_("Invalid device specified: %s. "
|
||||
"Example device: /dev/vdb") % device)
|
||||
self.volume_api.check_attach(context, volume_id)
|
||||
instance = self.get_instance(context, instance_id)
|
||||
host = instance['host']
|
||||
rpc.cast(context,
|
||||
self.db.queue_get_for(context, FLAGS.compute_topic, host),
|
||||
{"method": "attach_volume",
|
||||
"args": {"volume_id": volume_id,
|
||||
"instance_id": instance_id,
|
||||
"mountpoint": device}})
|
||||
|
||||
def detach_volume(self, context, volume_id):
|
||||
instance = self.db.volume_get_instance(context.elevated(), volume_id)
|
||||
if not instance:
|
||||
raise exception.ApiError(_("Volume isn't attached to anything!"))
|
||||
self.volume_api.check_detach(context, volume_id)
|
||||
host = instance['host']
|
||||
rpc.cast(context,
|
||||
self.db.queue_get_for(context, FLAGS.compute_topic, host),
|
||||
{"method": "detach_volume",
|
||||
"args": {"instance_id": instance['id'],
|
||||
"volume_id": volume_id}})
|
||||
return instance
|
||||
@@ -86,10 +86,8 @@ class ServersTest(unittest.TestCase):
|
||||
instance_address)
|
||||
self.stubs.Set(nova.db.api, 'instance_get_floating_address',
|
||||
instance_address)
|
||||
self.stubs.Set(nova.compute.api.ComputeAPI, 'pause',
|
||||
fake_compute_api)
|
||||
self.stubs.Set(nova.compute.api.ComputeAPI, 'unpause',
|
||||
fake_compute_api)
|
||||
self.stubs.Set(nova.compute.API, 'pause', fake_compute_api)
|
||||
self.stubs.Set(nova.compute.API, 'unpause', fake_compute_api)
|
||||
self.allow_admin = FLAGS.allow_admin_api
|
||||
|
||||
def tearDown(self):
|
||||
@@ -100,7 +98,7 @@ class ServersTest(unittest.TestCase):
|
||||
req = webob.Request.blank('/v1.0/servers/1')
|
||||
res = req.get_response(nova.api.API('os'))
|
||||
res_dict = json.loads(res.body)
|
||||
self.assertEqual(res_dict['server']['id'], 1)
|
||||
self.assertEqual(res_dict['server']['id'], '1')
|
||||
self.assertEqual(res_dict['server']['name'], 'server1')
|
||||
|
||||
def test_get_server_list(self):
|
||||
|
||||
@@ -22,6 +22,7 @@ Tests For Compute
|
||||
import datetime
|
||||
import logging
|
||||
|
||||
from nova import compute
|
||||
from nova import context
|
||||
from nova import db
|
||||
from nova import exception
|
||||
@@ -29,7 +30,6 @@ from nova import flags
|
||||
from nova import test
|
||||
from nova import utils
|
||||
from nova.auth import manager
|
||||
from nova.compute import api as compute_api
|
||||
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
@@ -44,7 +44,7 @@ class ComputeTestCase(test.TestCase):
|
||||
stub_network=True,
|
||||
network_manager='nova.network.manager.FlatManager')
|
||||
self.compute = utils.import_object(FLAGS.compute_manager)
|
||||
self.compute_api = compute_api.ComputeAPI()
|
||||
self.compute_api = compute.API()
|
||||
self.manager = manager.AuthManager()
|
||||
self.user = self.manager.create_user('fake', 'fake', 'fake')
|
||||
self.project = self.manager.create_project('fake', 'fake', 'fake')
|
||||
@@ -72,7 +72,7 @@ class ComputeTestCase(test.TestCase):
|
||||
"""Verify that an instance cannot be created without a display_name."""
|
||||
cases = [dict(), dict(display_name=None)]
|
||||
for instance in cases:
|
||||
ref = self.compute_api.create_instances(self.context,
|
||||
ref = self.compute_api.create(self.context,
|
||||
FLAGS.default_instance_type, None, **instance)
|
||||
try:
|
||||
self.assertNotEqual(ref[0].display_name, None)
|
||||
@@ -80,13 +80,13 @@ class ComputeTestCase(test.TestCase):
|
||||
db.instance_destroy(self.context, ref[0]['id'])
|
||||
|
||||
def test_create_instance_associates_security_groups(self):
|
||||
"""Make sure create_instances associates security groups"""
|
||||
"""Make sure create associates security groups"""
|
||||
values = {'name': 'default',
|
||||
'description': 'default',
|
||||
'user_id': self.user.id,
|
||||
'project_id': self.project.id}
|
||||
group = db.security_group_create(self.context, values)
|
||||
ref = self.compute_api.create_instances(self.context,
|
||||
ref = self.compute_api.create(self.context,
|
||||
FLAGS.default_instance_type, None, security_group=['default'])
|
||||
try:
|
||||
self.assertEqual(len(ref[0]['security_groups']), 1)
|
||||
|
||||
Reference in New Issue
Block a user