Moved __init__ api code to api.py and changed allowed_instances quota method argument to accept all type data, not just vcpu count.
This commit is contained in:
parent
2899896d1c
commit
e97cb0f19f
@ -16,371 +16,4 @@
|
|||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
"""
|
from nova.compute.api import API
|
||||||
Handles all requests relating to instances (guest vms).
|
|
||||||
"""
|
|
||||||
|
|
||||||
import datetime
|
|
||||||
import logging
|
|
||||||
import time
|
|
||||||
|
|
||||||
from nova import db
|
|
||||||
from nova import exception
|
|
||||||
from nova import flags
|
|
||||||
from nova import network
|
|
||||||
from nova import quota
|
|
||||||
from nova import rpc
|
|
||||||
from nova import utils
|
|
||||||
from nova import volume
|
|
||||||
from nova.compute import instance_types
|
|
||||||
from nova.db import base
|
|
||||||
|
|
||||||
FLAGS = flags.FLAGS
|
|
||||||
|
|
||||||
|
|
||||||
def generate_default_hostname(instance_id):
|
|
||||||
"""Default function to generate a hostname given an instance reference."""
|
|
||||||
return str(instance_id)
|
|
||||||
|
|
||||||
|
|
||||||
class API(base.Base):
|
|
||||||
"""API for interacting with the compute manager."""
|
|
||||||
|
|
||||||
def __init__(self, image_service=None, network_api=None, volume_api=None,
|
|
||||||
**kwargs):
|
|
||||||
if not image_service:
|
|
||||||
image_service = utils.import_object(FLAGS.image_service)
|
|
||||||
self.image_service = image_service
|
|
||||||
if not network_api:
|
|
||||||
network_api = network.API()
|
|
||||||
self.network_api = network_api
|
|
||||||
if not volume_api:
|
|
||||||
volume_api = volume.API()
|
|
||||||
self.volume_api = volume_api
|
|
||||||
super(API, self).__init__(**kwargs)
|
|
||||||
|
|
||||||
def get_network_topic(self, context, instance_id):
|
|
||||||
try:
|
|
||||||
instance = self.get(context, instance_id)
|
|
||||||
except exception.NotFound as e:
|
|
||||||
logging.warning("Instance %d was not found in get_network_topic",
|
|
||||||
instance_id)
|
|
||||||
raise e
|
|
||||||
|
|
||||||
host = instance['host']
|
|
||||||
if not host:
|
|
||||||
raise exception.Error("Instance %d has no host" % instance_id)
|
|
||||||
topic = self.db.queue_get_for(context, FLAGS.compute_topic, host)
|
|
||||||
return rpc.call(context,
|
|
||||||
topic,
|
|
||||||
{"method": "get_network_topic", "args": {'fake': 1}})
|
|
||||||
|
|
||||||
def create(self, context, instance_type,
|
|
||||||
image_id, kernel_id=None, ramdisk_id=None,
|
|
||||||
min_count=1, max_count=1,
|
|
||||||
display_name='', display_description='',
|
|
||||||
key_name=None, key_data=None, security_group='default',
|
|
||||||
availability_zone=None, user_data=None,
|
|
||||||
generate_hostname=generate_default_hostname):
|
|
||||||
"""Create the number of instances requested if quota and
|
|
||||||
other arguments check out ok."""
|
|
||||||
|
|
||||||
type_data = instance_types.INSTANCE_TYPES[instance_type]
|
|
||||||
num_instances = quota.allowed_instances(context, max_count,
|
|
||||||
type_data['vcpus'])
|
|
||||||
if num_instances < min_count:
|
|
||||||
logging.warn("Quota exceeeded for %s, tried to run %s instances",
|
|
||||||
context.project_id, min_count)
|
|
||||||
raise quota.QuotaError("Instance quota exceeded. You can only "
|
|
||||||
"run %s more instances of this type." %
|
|
||||||
num_instances, "InstanceLimitExceeded")
|
|
||||||
|
|
||||||
is_vpn = image_id == FLAGS.vpn_image_id
|
|
||||||
if not is_vpn:
|
|
||||||
image = self.image_service.show(context, image_id)
|
|
||||||
if kernel_id is None:
|
|
||||||
kernel_id = image.get('kernelId', None)
|
|
||||||
if ramdisk_id is None:
|
|
||||||
ramdisk_id = image.get('ramdiskId', None)
|
|
||||||
# No kernel and ramdisk for raw images
|
|
||||||
if kernel_id == str(FLAGS.null_kernel):
|
|
||||||
kernel_id = None
|
|
||||||
ramdisk_id = None
|
|
||||||
logging.debug("Creating a raw instance")
|
|
||||||
# Make sure we have access to kernel and ramdisk (if not raw)
|
|
||||||
if kernel_id:
|
|
||||||
self.image_service.show(context, kernel_id)
|
|
||||||
if ramdisk_id:
|
|
||||||
self.image_service.show(context, ramdisk_id)
|
|
||||||
|
|
||||||
if security_group is None:
|
|
||||||
security_group = ['default']
|
|
||||||
if not type(security_group) is list:
|
|
||||||
security_group = [security_group]
|
|
||||||
|
|
||||||
security_groups = []
|
|
||||||
self.ensure_default_security_group(context)
|
|
||||||
for security_group_name in security_group:
|
|
||||||
group = db.security_group_get_by_name(context,
|
|
||||||
context.project_id,
|
|
||||||
security_group_name)
|
|
||||||
security_groups.append(group['id'])
|
|
||||||
|
|
||||||
if key_data is None and key_name:
|
|
||||||
key_pair = db.key_pair_get(context, context.user_id, key_name)
|
|
||||||
key_data = key_pair['public_key']
|
|
||||||
|
|
||||||
base_options = {
|
|
||||||
'reservation_id': utils.generate_uid('r'),
|
|
||||||
'image_id': image_id,
|
|
||||||
'kernel_id': kernel_id or '',
|
|
||||||
'ramdisk_id': ramdisk_id or '',
|
|
||||||
'state_description': 'scheduling',
|
|
||||||
'user_id': context.user_id,
|
|
||||||
'project_id': context.project_id,
|
|
||||||
'launch_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()),
|
|
||||||
'instance_type': instance_type,
|
|
||||||
'memory_mb': type_data['memory_mb'],
|
|
||||||
'vcpus': type_data['vcpus'],
|
|
||||||
'local_gb': type_data['local_gb'],
|
|
||||||
'display_name': display_name,
|
|
||||||
'display_description': display_description,
|
|
||||||
'user_data': user_data or '',
|
|
||||||
'key_name': key_name,
|
|
||||||
'key_data': key_data,
|
|
||||||
'availability_zone': availability_zone}
|
|
||||||
|
|
||||||
elevated = context.elevated()
|
|
||||||
instances = []
|
|
||||||
logging.debug(_("Going to run %s instances..."), num_instances)
|
|
||||||
for num in range(num_instances):
|
|
||||||
instance = dict(mac_address=utils.generate_mac(),
|
|
||||||
launch_index=num,
|
|
||||||
**base_options)
|
|
||||||
instance = self.db.instance_create(context, instance)
|
|
||||||
instance_id = instance['id']
|
|
||||||
|
|
||||||
elevated = context.elevated()
|
|
||||||
if not security_groups:
|
|
||||||
security_groups = []
|
|
||||||
for security_group_id in security_groups:
|
|
||||||
self.db.instance_add_security_group(elevated,
|
|
||||||
instance_id,
|
|
||||||
security_group_id)
|
|
||||||
|
|
||||||
# Set sane defaults if not specified
|
|
||||||
updates = dict(hostname=generate_hostname(instance_id))
|
|
||||||
if 'display_name' not in instance:
|
|
||||||
updates['display_name'] = "Server %s" % instance_id
|
|
||||||
|
|
||||||
instance = self.update(context, instance_id, **updates)
|
|
||||||
instances.append(instance)
|
|
||||||
|
|
||||||
logging.debug(_("Casting to scheduler for %s/%s's instance %s"),
|
|
||||||
context.project_id, context.user_id, instance_id)
|
|
||||||
rpc.cast(context,
|
|
||||||
FLAGS.scheduler_topic,
|
|
||||||
{"method": "run_instance",
|
|
||||||
"args": {"topic": FLAGS.compute_topic,
|
|
||||||
"instance_id": instance_id}})
|
|
||||||
|
|
||||||
return instances
|
|
||||||
|
|
||||||
def ensure_default_security_group(self, context):
|
|
||||||
""" Create security group for the security context if it
|
|
||||||
does not already exist
|
|
||||||
|
|
||||||
:param context: the security context
|
|
||||||
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
db.security_group_get_by_name(context, context.project_id,
|
|
||||||
'default')
|
|
||||||
except exception.NotFound:
|
|
||||||
values = {'name': 'default',
|
|
||||||
'description': 'default',
|
|
||||||
'user_id': context.user_id,
|
|
||||||
'project_id': context.project_id}
|
|
||||||
db.security_group_create(context, values)
|
|
||||||
|
|
||||||
def update(self, context, instance_id, **kwargs):
|
|
||||||
"""Updates the instance in the datastore.
|
|
||||||
|
|
||||||
:param context: The security context
|
|
||||||
:param instance_id: ID of the instance to update
|
|
||||||
:param kwargs: All additional keyword args are treated
|
|
||||||
as data fields of the instance to be
|
|
||||||
updated
|
|
||||||
|
|
||||||
:retval None
|
|
||||||
|
|
||||||
"""
|
|
||||||
return self.db.instance_update(context, instance_id, kwargs)
|
|
||||||
|
|
||||||
def delete(self, context, instance_id):
|
|
||||||
logging.debug("Going to try and terminate %s" % instance_id)
|
|
||||||
try:
|
|
||||||
instance = self.get(context, instance_id)
|
|
||||||
except exception.NotFound as e:
|
|
||||||
logging.warning(_("Instance %s was not found during terminate"),
|
|
||||||
instance_id)
|
|
||||||
raise e
|
|
||||||
|
|
||||||
if (instance['state_description'] == 'terminating'):
|
|
||||||
logging.warning(_("Instance %s is already being terminated"),
|
|
||||||
instance_id)
|
|
||||||
return
|
|
||||||
|
|
||||||
self.update(context,
|
|
||||||
instance['id'],
|
|
||||||
state_description='terminating',
|
|
||||||
state=0,
|
|
||||||
terminated_at=datetime.datetime.utcnow())
|
|
||||||
|
|
||||||
host = instance['host']
|
|
||||||
if host:
|
|
||||||
rpc.cast(context,
|
|
||||||
self.db.queue_get_for(context, FLAGS.compute_topic, host),
|
|
||||||
{"method": "terminate_instance",
|
|
||||||
"args": {"instance_id": instance_id}})
|
|
||||||
else:
|
|
||||||
self.db.instance_destroy(context, instance_id)
|
|
||||||
|
|
||||||
def get(self, context, instance_id=None, project_id=None,
|
|
||||||
reservation_id=None, fixed_ip=None):
|
|
||||||
"""Get one or more instances, possibly filtered by one of the
|
|
||||||
given parameters. If there is no filter and the context is
|
|
||||||
an admin, it will retreive all instances in the system."""
|
|
||||||
if instance_id is not None:
|
|
||||||
return self.db.instance_get_by_id(context, instance_id)
|
|
||||||
if reservation_id is not None:
|
|
||||||
return self.db.instance_get_all_by_reservation(context,
|
|
||||||
reservation_id)
|
|
||||||
if fixed_ip is not None:
|
|
||||||
return self.db.fixed_ip_get_instance(context, fixed_ip)
|
|
||||||
if project_id or not context.is_admin:
|
|
||||||
if not context.project:
|
|
||||||
return self.db.instance_get_all_by_user(context,
|
|
||||||
context.user_id)
|
|
||||||
if project_id is None:
|
|
||||||
project_id = context.project_id
|
|
||||||
return self.db.instance_get_all_by_project(context,
|
|
||||||
project_id)
|
|
||||||
return self.db.instance_get_all(context)
|
|
||||||
|
|
||||||
def snapshot(self, context, instance_id, name):
|
|
||||||
"""Snapshot the given instance."""
|
|
||||||
instance = self.get(context, instance_id)
|
|
||||||
host = instance['host']
|
|
||||||
rpc.cast(context,
|
|
||||||
self.db.queue_get_for(context, FLAGS.compute_topic, host),
|
|
||||||
{"method": "snapshot_instance",
|
|
||||||
"args": {"instance_id": instance_id, "name": name}})
|
|
||||||
|
|
||||||
def reboot(self, context, instance_id):
|
|
||||||
"""Reboot the given instance."""
|
|
||||||
instance = self.get(context, instance_id)
|
|
||||||
host = instance['host']
|
|
||||||
rpc.cast(context,
|
|
||||||
self.db.queue_get_for(context, FLAGS.compute_topic, host),
|
|
||||||
{"method": "reboot_instance",
|
|
||||||
"args": {"instance_id": instance_id}})
|
|
||||||
|
|
||||||
def pause(self, context, instance_id):
|
|
||||||
"""Pause the given instance."""
|
|
||||||
instance = self.get(context, instance_id)
|
|
||||||
host = instance['host']
|
|
||||||
rpc.cast(context,
|
|
||||||
self.db.queue_get_for(context, FLAGS.compute_topic, host),
|
|
||||||
{"method": "pause_instance",
|
|
||||||
"args": {"instance_id": instance_id}})
|
|
||||||
|
|
||||||
def unpause(self, context, instance_id):
|
|
||||||
"""Unpause the given instance."""
|
|
||||||
instance = self.get(context, instance_id)
|
|
||||||
host = instance['host']
|
|
||||||
rpc.cast(context,
|
|
||||||
self.db.queue_get_for(context, FLAGS.compute_topic, host),
|
|
||||||
{"method": "unpause_instance",
|
|
||||||
"args": {"instance_id": instance_id}})
|
|
||||||
|
|
||||||
def get_diagnostics(self, context, instance_id):
|
|
||||||
"""Retrieve diagnostics for the given instance."""
|
|
||||||
instance = self.get(context, instance_id)
|
|
||||||
host = instance["host"]
|
|
||||||
return rpc.call(context,
|
|
||||||
self.db.queue_get_for(context, FLAGS.compute_topic, host),
|
|
||||||
{"method": "get_diagnostics",
|
|
||||||
"args": {"instance_id": instance_id}})
|
|
||||||
|
|
||||||
def get_actions(self, context, instance_id):
|
|
||||||
"""Retrieve actions for the given instance."""
|
|
||||||
return self.db.instance_get_actions(context, instance_id)
|
|
||||||
|
|
||||||
def suspend(self, context, instance_id):
|
|
||||||
"""suspend the instance with instance_id"""
|
|
||||||
instance = self.get(context, instance_id)
|
|
||||||
host = instance['host']
|
|
||||||
rpc.cast(context,
|
|
||||||
self.db.queue_get_for(context, FLAGS.compute_topic, host),
|
|
||||||
{"method": "suspend_instance",
|
|
||||||
"args": {"instance_id": instance_id}})
|
|
||||||
|
|
||||||
def resume(self, context, instance_id):
|
|
||||||
"""resume the instance with instance_id"""
|
|
||||||
instance = self.get(context, instance_id)
|
|
||||||
host = instance['host']
|
|
||||||
rpc.cast(context,
|
|
||||||
self.db.queue_get_for(context, FLAGS.compute_topic, host),
|
|
||||||
{"method": "resume_instance",
|
|
||||||
"args": {"instance_id": instance_id}})
|
|
||||||
|
|
||||||
def rescue(self, context, instance_id):
|
|
||||||
"""Rescue the given instance."""
|
|
||||||
instance = self.get(context, instance_id)
|
|
||||||
host = instance['host']
|
|
||||||
rpc.cast(context,
|
|
||||||
self.db.queue_get_for(context, FLAGS.compute_topic, host),
|
|
||||||
{"method": "rescue_instance",
|
|
||||||
"args": {"instance_id": instance_id}})
|
|
||||||
|
|
||||||
def unrescue(self, context, instance_id):
|
|
||||||
"""Unrescue the given instance."""
|
|
||||||
instance = self.get(context, instance_id)
|
|
||||||
host = instance['host']
|
|
||||||
rpc.cast(context,
|
|
||||||
self.db.queue_get_for(context, FLAGS.compute_topic, host),
|
|
||||||
{"method": "unrescue_instance",
|
|
||||||
"args": {"instance_id": instance_id}})
|
|
||||||
|
|
||||||
def attach_volume(self, context, instance_id, volume_id, device):
|
|
||||||
if not re.match("^/dev/[a-z]d[a-z]+$", device):
|
|
||||||
raise exception.ApiError(_("Invalid device specified: %s. "
|
|
||||||
"Example device: /dev/vdb") % device)
|
|
||||||
self.volume_api.check_attach(context, volume_id)
|
|
||||||
instance = self.get(context, instance_id)
|
|
||||||
host = instance['host']
|
|
||||||
rpc.cast(context,
|
|
||||||
self.db.queue_get_for(context, FLAGS.compute_topic, host),
|
|
||||||
{"method": "attach_volume",
|
|
||||||
"args": {"volume_id": volume_id,
|
|
||||||
"instance_id": instance_id,
|
|
||||||
"mountpoint": device}})
|
|
||||||
|
|
||||||
def detach_volume(self, context, volume_id):
|
|
||||||
instance = self.db.volume_get_instance(context.elevated(), volume_id)
|
|
||||||
if not instance:
|
|
||||||
raise exception.ApiError(_("Volume isn't attached to anything!"))
|
|
||||||
self.volume_api.check_detach(context, volume_id)
|
|
||||||
host = instance['host']
|
|
||||||
rpc.cast(context,
|
|
||||||
self.db.queue_get_for(context, FLAGS.compute_topic, host),
|
|
||||||
{"method": "detach_volume",
|
|
||||||
"args": {"instance_id": instance['id'],
|
|
||||||
"volume_id": volume_id}})
|
|
||||||
return instance
|
|
||||||
|
|
||||||
def associate_floating_ip(self, context, instance_id, address):
|
|
||||||
instance = self.get(context, instance_id)
|
|
||||||
self.network_api.associate_floating_ip(context, address,
|
|
||||||
instance['fixed_ip'])
|
|
||||||
|
385
nova/compute/api.py
Normal file
385
nova/compute/api.py
Normal file
@ -0,0 +1,385 @@
|
|||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
|
||||||
|
# Copyright 2010 United States Government as represented by the
|
||||||
|
# Administrator of the National Aeronautics and Space Administration.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
"""
|
||||||
|
Handles all requests relating to instances (guest vms).
|
||||||
|
"""
|
||||||
|
|
||||||
|
import datetime
|
||||||
|
import logging
|
||||||
|
import time
|
||||||
|
|
||||||
|
from nova import db
|
||||||
|
from nova import exception
|
||||||
|
from nova import flags
|
||||||
|
from nova import network
|
||||||
|
from nova import quota
|
||||||
|
from nova import rpc
|
||||||
|
from nova import utils
|
||||||
|
from nova import volume
|
||||||
|
from nova.compute import instance_types
|
||||||
|
from nova.db import base
|
||||||
|
|
||||||
|
FLAGS = flags.FLAGS
|
||||||
|
|
||||||
|
|
||||||
|
def generate_default_hostname(instance_id):
|
||||||
|
"""Default function to generate a hostname given an instance reference."""
|
||||||
|
return str(instance_id)
|
||||||
|
|
||||||
|
|
||||||
|
class API(base.Base):
|
||||||
|
"""API for interacting with the compute manager."""
|
||||||
|
|
||||||
|
def __init__(self, image_service=None, network_api=None, volume_api=None,
|
||||||
|
**kwargs):
|
||||||
|
if not image_service:
|
||||||
|
image_service = utils.import_object(FLAGS.image_service)
|
||||||
|
self.image_service = image_service
|
||||||
|
if not network_api:
|
||||||
|
network_api = network.API()
|
||||||
|
self.network_api = network_api
|
||||||
|
if not volume_api:
|
||||||
|
volume_api = volume.API()
|
||||||
|
self.volume_api = volume_api
|
||||||
|
super(API, self).__init__(**kwargs)
|
||||||
|
|
||||||
|
def get_network_topic(self, context, instance_id):
|
||||||
|
try:
|
||||||
|
instance = self.get(context, instance_id)
|
||||||
|
except exception.NotFound as e:
|
||||||
|
logging.warning("Instance %d was not found in get_network_topic",
|
||||||
|
instance_id)
|
||||||
|
raise e
|
||||||
|
|
||||||
|
host = instance['host']
|
||||||
|
if not host:
|
||||||
|
raise exception.Error("Instance %d has no host" % instance_id)
|
||||||
|
topic = self.db.queue_get_for(context, FLAGS.compute_topic, host)
|
||||||
|
return rpc.call(context,
|
||||||
|
topic,
|
||||||
|
{"method": "get_network_topic", "args": {'fake': 1}})
|
||||||
|
|
||||||
|
def create(self, context, instance_type,
|
||||||
|
image_id, kernel_id=None, ramdisk_id=None,
|
||||||
|
min_count=1, max_count=1,
|
||||||
|
display_name='', display_description='',
|
||||||
|
key_name=None, key_data=None, security_group='default',
|
||||||
|
availability_zone=None, user_data=None,
|
||||||
|
generate_hostname=generate_default_hostname):
|
||||||
|
"""Create the number of instances requested if quota and
|
||||||
|
other arguments check out ok."""
|
||||||
|
|
||||||
|
type_data = instance_types.INSTANCE_TYPES[instance_type]
|
||||||
|
num_instances = quota.allowed_instances(context, max_count, type_data)
|
||||||
|
if num_instances < min_count:
|
||||||
|
logging.warn("Quota exceeeded for %s, tried to run %s instances",
|
||||||
|
context.project_id, min_count)
|
||||||
|
raise quota.QuotaError("Instance quota exceeded. You can only "
|
||||||
|
"run %s more instances of this type." %
|
||||||
|
num_instances, "InstanceLimitExceeded")
|
||||||
|
|
||||||
|
is_vpn = image_id == FLAGS.vpn_image_id
|
||||||
|
if not is_vpn:
|
||||||
|
image = self.image_service.show(context, image_id)
|
||||||
|
if kernel_id is None:
|
||||||
|
kernel_id = image.get('kernelId', None)
|
||||||
|
if ramdisk_id is None:
|
||||||
|
ramdisk_id = image.get('ramdiskId', None)
|
||||||
|
# No kernel and ramdisk for raw images
|
||||||
|
if kernel_id == str(FLAGS.null_kernel):
|
||||||
|
kernel_id = None
|
||||||
|
ramdisk_id = None
|
||||||
|
logging.debug("Creating a raw instance")
|
||||||
|
# Make sure we have access to kernel and ramdisk (if not raw)
|
||||||
|
if kernel_id:
|
||||||
|
self.image_service.show(context, kernel_id)
|
||||||
|
if ramdisk_id:
|
||||||
|
self.image_service.show(context, ramdisk_id)
|
||||||
|
|
||||||
|
if security_group is None:
|
||||||
|
security_group = ['default']
|
||||||
|
if not type(security_group) is list:
|
||||||
|
security_group = [security_group]
|
||||||
|
|
||||||
|
security_groups = []
|
||||||
|
self.ensure_default_security_group(context)
|
||||||
|
for security_group_name in security_group:
|
||||||
|
group = db.security_group_get_by_name(context,
|
||||||
|
context.project_id,
|
||||||
|
security_group_name)
|
||||||
|
security_groups.append(group['id'])
|
||||||
|
|
||||||
|
if key_data is None and key_name:
|
||||||
|
key_pair = db.key_pair_get(context, context.user_id, key_name)
|
||||||
|
key_data = key_pair['public_key']
|
||||||
|
|
||||||
|
base_options = {
|
||||||
|
'reservation_id': utils.generate_uid('r'),
|
||||||
|
'image_id': image_id,
|
||||||
|
'kernel_id': kernel_id or '',
|
||||||
|
'ramdisk_id': ramdisk_id or '',
|
||||||
|
'state_description': 'scheduling',
|
||||||
|
'user_id': context.user_id,
|
||||||
|
'project_id': context.project_id,
|
||||||
|
'launch_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()),
|
||||||
|
'instance_type': instance_type,
|
||||||
|
'memory_mb': type_data['memory_mb'],
|
||||||
|
'vcpus': type_data['vcpus'],
|
||||||
|
'local_gb': type_data['local_gb'],
|
||||||
|
'display_name': display_name,
|
||||||
|
'display_description': display_description,
|
||||||
|
'user_data': user_data or '',
|
||||||
|
'key_name': key_name,
|
||||||
|
'key_data': key_data,
|
||||||
|
'availability_zone': availability_zone}
|
||||||
|
|
||||||
|
elevated = context.elevated()
|
||||||
|
instances = []
|
||||||
|
logging.debug(_("Going to run %s instances..."), num_instances)
|
||||||
|
for num in range(num_instances):
|
||||||
|
instance = dict(mac_address=utils.generate_mac(),
|
||||||
|
launch_index=num,
|
||||||
|
**base_options)
|
||||||
|
instance = self.db.instance_create(context, instance)
|
||||||
|
instance_id = instance['id']
|
||||||
|
|
||||||
|
elevated = context.elevated()
|
||||||
|
if not security_groups:
|
||||||
|
security_groups = []
|
||||||
|
for security_group_id in security_groups:
|
||||||
|
self.db.instance_add_security_group(elevated,
|
||||||
|
instance_id,
|
||||||
|
security_group_id)
|
||||||
|
|
||||||
|
# Set sane defaults if not specified
|
||||||
|
updates = dict(hostname=generate_hostname(instance_id))
|
||||||
|
if 'display_name' not in instance:
|
||||||
|
updates['display_name'] = "Server %s" % instance_id
|
||||||
|
|
||||||
|
instance = self.update(context, instance_id, **updates)
|
||||||
|
instances.append(instance)
|
||||||
|
|
||||||
|
logging.debug(_("Casting to scheduler for %s/%s's instance %s"),
|
||||||
|
context.project_id, context.user_id, instance_id)
|
||||||
|
rpc.cast(context,
|
||||||
|
FLAGS.scheduler_topic,
|
||||||
|
{"method": "run_instance",
|
||||||
|
"args": {"topic": FLAGS.compute_topic,
|
||||||
|
"instance_id": instance_id}})
|
||||||
|
|
||||||
|
return instances
|
||||||
|
|
||||||
|
def ensure_default_security_group(self, context):
|
||||||
|
""" Create security group for the security context if it
|
||||||
|
does not already exist
|
||||||
|
|
||||||
|
:param context: the security context
|
||||||
|
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
db.security_group_get_by_name(context, context.project_id,
|
||||||
|
'default')
|
||||||
|
except exception.NotFound:
|
||||||
|
values = {'name': 'default',
|
||||||
|
'description': 'default',
|
||||||
|
'user_id': context.user_id,
|
||||||
|
'project_id': context.project_id}
|
||||||
|
db.security_group_create(context, values)
|
||||||
|
|
||||||
|
def update(self, context, instance_id, **kwargs):
|
||||||
|
"""Updates the instance in the datastore.
|
||||||
|
|
||||||
|
:param context: The security context
|
||||||
|
:param instance_id: ID of the instance to update
|
||||||
|
:param kwargs: All additional keyword args are treated
|
||||||
|
as data fields of the instance to be
|
||||||
|
updated
|
||||||
|
|
||||||
|
:retval None
|
||||||
|
|
||||||
|
"""
|
||||||
|
return self.db.instance_update(context, instance_id, kwargs)
|
||||||
|
|
||||||
|
def delete(self, context, instance_id):
|
||||||
|
logging.debug("Going to try and terminate %s" % instance_id)
|
||||||
|
try:
|
||||||
|
instance = self.get(context, instance_id)
|
||||||
|
except exception.NotFound as e:
|
||||||
|
logging.warning(_("Instance %s was not found during terminate"),
|
||||||
|
instance_id)
|
||||||
|
raise e
|
||||||
|
|
||||||
|
if (instance['state_description'] == 'terminating'):
|
||||||
|
logging.warning(_("Instance %s is already being terminated"),
|
||||||
|
instance_id)
|
||||||
|
return
|
||||||
|
|
||||||
|
self.update(context,
|
||||||
|
instance['id'],
|
||||||
|
state_description='terminating',
|
||||||
|
state=0,
|
||||||
|
terminated_at=datetime.datetime.utcnow())
|
||||||
|
|
||||||
|
host = instance['host']
|
||||||
|
if host:
|
||||||
|
rpc.cast(context,
|
||||||
|
self.db.queue_get_for(context, FLAGS.compute_topic, host),
|
||||||
|
{"method": "terminate_instance",
|
||||||
|
"args": {"instance_id": instance_id}})
|
||||||
|
else:
|
||||||
|
self.db.instance_destroy(context, instance_id)
|
||||||
|
|
||||||
|
def get(self, context, instance_id=None, project_id=None,
|
||||||
|
reservation_id=None, fixed_ip=None):
|
||||||
|
"""Get one or more instances, possibly filtered by one of the
|
||||||
|
given parameters. If there is no filter and the context is
|
||||||
|
an admin, it will retreive all instances in the system."""
|
||||||
|
if instance_id is not None:
|
||||||
|
return self.db.instance_get_by_id(context, instance_id)
|
||||||
|
if reservation_id is not None:
|
||||||
|
return self.db.instance_get_all_by_reservation(context,
|
||||||
|
reservation_id)
|
||||||
|
if fixed_ip is not None:
|
||||||
|
return self.db.fixed_ip_get_instance(context, fixed_ip)
|
||||||
|
if project_id or not context.is_admin:
|
||||||
|
if not context.project:
|
||||||
|
return self.db.instance_get_all_by_user(context,
|
||||||
|
context.user_id)
|
||||||
|
if project_id is None:
|
||||||
|
project_id = context.project_id
|
||||||
|
return self.db.instance_get_all_by_project(context,
|
||||||
|
project_id)
|
||||||
|
return self.db.instance_get_all(context)
|
||||||
|
|
||||||
|
def snapshot(self, context, instance_id, name):
|
||||||
|
"""Snapshot the given instance."""
|
||||||
|
instance = self.get(context, instance_id)
|
||||||
|
host = instance['host']
|
||||||
|
rpc.cast(context,
|
||||||
|
self.db.queue_get_for(context, FLAGS.compute_topic, host),
|
||||||
|
{"method": "snapshot_instance",
|
||||||
|
"args": {"instance_id": instance_id, "name": name}})
|
||||||
|
|
||||||
|
def reboot(self, context, instance_id):
|
||||||
|
"""Reboot the given instance."""
|
||||||
|
instance = self.get(context, instance_id)
|
||||||
|
host = instance['host']
|
||||||
|
rpc.cast(context,
|
||||||
|
self.db.queue_get_for(context, FLAGS.compute_topic, host),
|
||||||
|
{"method": "reboot_instance",
|
||||||
|
"args": {"instance_id": instance_id}})
|
||||||
|
|
||||||
|
def pause(self, context, instance_id):
|
||||||
|
"""Pause the given instance."""
|
||||||
|
instance = self.get(context, instance_id)
|
||||||
|
host = instance['host']
|
||||||
|
rpc.cast(context,
|
||||||
|
self.db.queue_get_for(context, FLAGS.compute_topic, host),
|
||||||
|
{"method": "pause_instance",
|
||||||
|
"args": {"instance_id": instance_id}})
|
||||||
|
|
||||||
|
def unpause(self, context, instance_id):
|
||||||
|
"""Unpause the given instance."""
|
||||||
|
instance = self.get(context, instance_id)
|
||||||
|
host = instance['host']
|
||||||
|
rpc.cast(context,
|
||||||
|
self.db.queue_get_for(context, FLAGS.compute_topic, host),
|
||||||
|
{"method": "unpause_instance",
|
||||||
|
"args": {"instance_id": instance_id}})
|
||||||
|
|
||||||
|
def get_diagnostics(self, context, instance_id):
|
||||||
|
"""Retrieve diagnostics for the given instance."""
|
||||||
|
instance = self.get(context, instance_id)
|
||||||
|
host = instance["host"]
|
||||||
|
return rpc.call(context,
|
||||||
|
self.db.queue_get_for(context, FLAGS.compute_topic, host),
|
||||||
|
{"method": "get_diagnostics",
|
||||||
|
"args": {"instance_id": instance_id}})
|
||||||
|
|
||||||
|
def get_actions(self, context, instance_id):
|
||||||
|
"""Retrieve actions for the given instance."""
|
||||||
|
return self.db.instance_get_actions(context, instance_id)
|
||||||
|
|
||||||
|
def suspend(self, context, instance_id):
|
||||||
|
"""suspend the instance with instance_id"""
|
||||||
|
instance = self.get(context, instance_id)
|
||||||
|
host = instance['host']
|
||||||
|
rpc.cast(context,
|
||||||
|
self.db.queue_get_for(context, FLAGS.compute_topic, host),
|
||||||
|
{"method": "suspend_instance",
|
||||||
|
"args": {"instance_id": instance_id}})
|
||||||
|
|
||||||
|
def resume(self, context, instance_id):
|
||||||
|
"""resume the instance with instance_id"""
|
||||||
|
instance = self.get(context, instance_id)
|
||||||
|
host = instance['host']
|
||||||
|
rpc.cast(context,
|
||||||
|
self.db.queue_get_for(context, FLAGS.compute_topic, host),
|
||||||
|
{"method": "resume_instance",
|
||||||
|
"args": {"instance_id": instance_id}})
|
||||||
|
|
||||||
|
def rescue(self, context, instance_id):
|
||||||
|
"""Rescue the given instance."""
|
||||||
|
instance = self.get(context, instance_id)
|
||||||
|
host = instance['host']
|
||||||
|
rpc.cast(context,
|
||||||
|
self.db.queue_get_for(context, FLAGS.compute_topic, host),
|
||||||
|
{"method": "rescue_instance",
|
||||||
|
"args": {"instance_id": instance_id}})
|
||||||
|
|
||||||
|
def unrescue(self, context, instance_id):
|
||||||
|
"""Unrescue the given instance."""
|
||||||
|
instance = self.get(context, instance_id)
|
||||||
|
host = instance['host']
|
||||||
|
rpc.cast(context,
|
||||||
|
self.db.queue_get_for(context, FLAGS.compute_topic, host),
|
||||||
|
{"method": "unrescue_instance",
|
||||||
|
"args": {"instance_id": instance_id}})
|
||||||
|
|
||||||
|
def attach_volume(self, context, instance_id, volume_id, device):
|
||||||
|
if not re.match("^/dev/[a-z]d[a-z]+$", device):
|
||||||
|
raise exception.ApiError(_("Invalid device specified: %s. "
|
||||||
|
"Example device: /dev/vdb") % device)
|
||||||
|
self.volume_api.check_attach(context, volume_id)
|
||||||
|
instance = self.get(context, instance_id)
|
||||||
|
host = instance['host']
|
||||||
|
rpc.cast(context,
|
||||||
|
self.db.queue_get_for(context, FLAGS.compute_topic, host),
|
||||||
|
{"method": "attach_volume",
|
||||||
|
"args": {"volume_id": volume_id,
|
||||||
|
"instance_id": instance_id,
|
||||||
|
"mountpoint": device}})
|
||||||
|
|
||||||
|
def detach_volume(self, context, volume_id):
|
||||||
|
instance = self.db.volume_get_instance(context.elevated(), volume_id)
|
||||||
|
if not instance:
|
||||||
|
raise exception.ApiError(_("Volume isn't attached to anything!"))
|
||||||
|
self.volume_api.check_detach(context, volume_id)
|
||||||
|
host = instance['host']
|
||||||
|
rpc.cast(context,
|
||||||
|
self.db.queue_get_for(context, FLAGS.compute_topic, host),
|
||||||
|
{"method": "detach_volume",
|
||||||
|
"args": {"instance_id": instance['id'],
|
||||||
|
"volume_id": volume_id}})
|
||||||
|
return instance
|
||||||
|
|
||||||
|
def associate_floating_ip(self, context, instance_id, address):
|
||||||
|
instance = self.get(context, instance_id)
|
||||||
|
self.network_api.associate_floating_ip(context, address,
|
||||||
|
instance['fixed_ip'])
|
@ -16,72 +16,4 @@
|
|||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
"""
|
from nova.network.api import API
|
||||||
Handles all requests relating to instances (guest vms).
|
|
||||||
"""
|
|
||||||
|
|
||||||
import logging
|
|
||||||
|
|
||||||
from nova import db
|
|
||||||
from nova import flags
|
|
||||||
from nova import quota
|
|
||||||
from nova import rpc
|
|
||||||
from nova.db import base
|
|
||||||
|
|
||||||
FLAGS = flags.FLAGS
|
|
||||||
|
|
||||||
|
|
||||||
class API(base.Base):
|
|
||||||
"""API for interacting with the network manager."""
|
|
||||||
|
|
||||||
def allocate_floating_ip(self, context):
|
|
||||||
if quota.allowed_floating_ips(context, 1) < 1:
|
|
||||||
logging.warn(_("Quota exceeeded for %s, tried to allocate "
|
|
||||||
"address"),
|
|
||||||
context.project_id)
|
|
||||||
raise quota.QuotaError(_("Address quota exceeded. You cannot "
|
|
||||||
"allocate any more addresses"))
|
|
||||||
# NOTE(vish): We don't know which network host should get the ip
|
|
||||||
# when we allocate, so just send it to any one. This
|
|
||||||
# will probably need to move into a network supervisor
|
|
||||||
# at some point.
|
|
||||||
return rpc.call(context,
|
|
||||||
FLAGS.network_topic,
|
|
||||||
{"method": "allocate_floating_ip",
|
|
||||||
"args": {"project_id": context.project_id}})
|
|
||||||
|
|
||||||
def release_floating_ip(self, context, address):
|
|
||||||
floating_ip = self.db.floating_ip_get_by_address(context, address)
|
|
||||||
# NOTE(vish): We don't know which network host should get the ip
|
|
||||||
# when we deallocate, so just send it to any one. This
|
|
||||||
# will probably need to move into a network supervisor
|
|
||||||
# at some point.
|
|
||||||
rpc.cast(context,
|
|
||||||
FLAGS.network_topic,
|
|
||||||
{"method": "deallocate_floating_ip",
|
|
||||||
"args": {"floating_address": floating_ip['address']}})
|
|
||||||
|
|
||||||
def associate_floating_ip(self, context, floating_ip, fixed_ip):
|
|
||||||
if isinstance(fixed_ip, str) or isinstance(fixed_ip, unicode):
|
|
||||||
fixed_ip = self.db.fixed_ip_get_by_address(context, fixed_ip)
|
|
||||||
floating_ip = self.db.floating_ip_get_by_address(context, floating_ip)
|
|
||||||
# NOTE(vish): Perhaps we should just pass this on to compute and
|
|
||||||
# let compute communicate with network.
|
|
||||||
host = fixed_ip['network']['host']
|
|
||||||
rpc.cast(context,
|
|
||||||
self.db.queue_get_for(context, FLAGS.network_topic, host),
|
|
||||||
{"method": "associate_floating_ip",
|
|
||||||
"args": {"floating_address": floating_ip['address'],
|
|
||||||
"fixed_address": fixed_ip['address']}})
|
|
||||||
|
|
||||||
def disassociate_floating_ip(self, context, address):
|
|
||||||
floating_ip = self.db.floating_ip_get_by_address(context, address)
|
|
||||||
if not floating_ip.get('fixed_ip'):
|
|
||||||
raise exception.ApiError('Address is not associated.')
|
|
||||||
# NOTE(vish): Get the topic from the host name of the network of
|
|
||||||
# the associated fixed ip.
|
|
||||||
host = floating_ip['fixed_ip']['network']['host']
|
|
||||||
rpc.cast(context,
|
|
||||||
self.db.queue_get_for(context, FLAGS.network_topic, host),
|
|
||||||
{"method": "disassociate_floating_ip",
|
|
||||||
"args": {"floating_address": floating_ip['address']}})
|
|
||||||
|
87
nova/network/api.py
Normal file
87
nova/network/api.py
Normal file
@ -0,0 +1,87 @@
|
|||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
|
||||||
|
# Copyright 2010 United States Government as represented by the
|
||||||
|
# Administrator of the National Aeronautics and Space Administration.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
"""
|
||||||
|
Handles all requests relating to instances (guest vms).
|
||||||
|
"""
|
||||||
|
|
||||||
|
import logging
|
||||||
|
|
||||||
|
from nova import db
|
||||||
|
from nova import flags
|
||||||
|
from nova import quota
|
||||||
|
from nova import rpc
|
||||||
|
from nova.db import base
|
||||||
|
|
||||||
|
FLAGS = flags.FLAGS
|
||||||
|
|
||||||
|
|
||||||
|
class API(base.Base):
|
||||||
|
"""API for interacting with the network manager."""
|
||||||
|
|
||||||
|
def allocate_floating_ip(self, context):
|
||||||
|
if quota.allowed_floating_ips(context, 1) < 1:
|
||||||
|
logging.warn(_("Quota exceeeded for %s, tried to allocate "
|
||||||
|
"address"),
|
||||||
|
context.project_id)
|
||||||
|
raise quota.QuotaError(_("Address quota exceeded. You cannot "
|
||||||
|
"allocate any more addresses"))
|
||||||
|
# NOTE(vish): We don't know which network host should get the ip
|
||||||
|
# when we allocate, so just send it to any one. This
|
||||||
|
# will probably need to move into a network supervisor
|
||||||
|
# at some point.
|
||||||
|
return rpc.call(context,
|
||||||
|
FLAGS.network_topic,
|
||||||
|
{"method": "allocate_floating_ip",
|
||||||
|
"args": {"project_id": context.project_id}})
|
||||||
|
|
||||||
|
def release_floating_ip(self, context, address):
|
||||||
|
floating_ip = self.db.floating_ip_get_by_address(context, address)
|
||||||
|
# NOTE(vish): We don't know which network host should get the ip
|
||||||
|
# when we deallocate, so just send it to any one. This
|
||||||
|
# will probably need to move into a network supervisor
|
||||||
|
# at some point.
|
||||||
|
rpc.cast(context,
|
||||||
|
FLAGS.network_topic,
|
||||||
|
{"method": "deallocate_floating_ip",
|
||||||
|
"args": {"floating_address": floating_ip['address']}})
|
||||||
|
|
||||||
|
def associate_floating_ip(self, context, floating_ip, fixed_ip):
|
||||||
|
if isinstance(fixed_ip, str) or isinstance(fixed_ip, unicode):
|
||||||
|
fixed_ip = self.db.fixed_ip_get_by_address(context, fixed_ip)
|
||||||
|
floating_ip = self.db.floating_ip_get_by_address(context, floating_ip)
|
||||||
|
# NOTE(vish): Perhaps we should just pass this on to compute and
|
||||||
|
# let compute communicate with network.
|
||||||
|
host = fixed_ip['network']['host']
|
||||||
|
rpc.cast(context,
|
||||||
|
self.db.queue_get_for(context, FLAGS.network_topic, host),
|
||||||
|
{"method": "associate_floating_ip",
|
||||||
|
"args": {"floating_address": floating_ip['address'],
|
||||||
|
"fixed_address": fixed_ip['address']}})
|
||||||
|
|
||||||
|
def disassociate_floating_ip(self, context, address):
|
||||||
|
floating_ip = self.db.floating_ip_get_by_address(context, address)
|
||||||
|
if not floating_ip.get('fixed_ip'):
|
||||||
|
raise exception.ApiError('Address is not associated.')
|
||||||
|
# NOTE(vish): Get the topic from the host name of the network of
|
||||||
|
# the associated fixed ip.
|
||||||
|
host = floating_ip['fixed_ip']['network']['host']
|
||||||
|
rpc.cast(context,
|
||||||
|
self.db.queue_get_for(context, FLAGS.network_topic, host),
|
||||||
|
{"method": "disassociate_floating_ip",
|
||||||
|
"args": {"floating_address": floating_ip['address']}})
|
@ -53,7 +53,7 @@ def get_quota(context, project_id):
|
|||||||
return rval
|
return rval
|
||||||
|
|
||||||
|
|
||||||
def allowed_instances(context, num_instances, cores_per_instance):
|
def allowed_instances(context, num_instances, instance_type):
|
||||||
"""Check quota and return min(num_instances, allowed_instances)"""
|
"""Check quota and return min(num_instances, allowed_instances)"""
|
||||||
project_id = context.project_id
|
project_id = context.project_id
|
||||||
context = context.elevated()
|
context = context.elevated()
|
||||||
@ -62,9 +62,9 @@ def allowed_instances(context, num_instances, cores_per_instance):
|
|||||||
quota = get_quota(context, project_id)
|
quota = get_quota(context, project_id)
|
||||||
allowed_instances = quota['instances'] - used_instances
|
allowed_instances = quota['instances'] - used_instances
|
||||||
allowed_cores = quota['cores'] - used_cores
|
allowed_cores = quota['cores'] - used_cores
|
||||||
num_cores = num_instances * cores_per_instance
|
num_cores = num_instances * instance_type['vcpus']
|
||||||
allowed_instances = min(allowed_instances,
|
allowed_instances = min(allowed_instances,
|
||||||
int(allowed_cores // cores_per_instance))
|
int(allowed_cores // instance_type['vcpus']))
|
||||||
return min(num_instances, allowed_instances)
|
return min(num_instances, allowed_instances)
|
||||||
|
|
||||||
|
|
||||||
|
@ -27,6 +27,7 @@ from nova import test
|
|||||||
from nova import utils
|
from nova import utils
|
||||||
from nova.auth import manager
|
from nova.auth import manager
|
||||||
from nova.api.ec2 import cloud
|
from nova.api.ec2 import cloud
|
||||||
|
from nova.compute import instance_types
|
||||||
|
|
||||||
|
|
||||||
FLAGS = flags.FLAGS
|
FLAGS = flags.FLAGS
|
||||||
@ -78,14 +79,17 @@ class QuotaTestCase(test.TestCase):
|
|||||||
|
|
||||||
def test_quota_overrides(self):
|
def test_quota_overrides(self):
|
||||||
"""Make sure overriding a projects quotas works"""
|
"""Make sure overriding a projects quotas works"""
|
||||||
num_instances = quota.allowed_instances(self.context, 100, 1)
|
num_instances = quota.allowed_instances(self.context, 100,
|
||||||
|
instance_types.INSTANCE_TYPES['m1.small'])
|
||||||
self.assertEqual(num_instances, 2)
|
self.assertEqual(num_instances, 2)
|
||||||
db.quota_create(self.context, {'project_id': self.project.id,
|
db.quota_create(self.context, {'project_id': self.project.id,
|
||||||
'instances': 10})
|
'instances': 10})
|
||||||
num_instances = quota.allowed_instances(self.context, 100, 1)
|
num_instances = quota.allowed_instances(self.context, 100,
|
||||||
|
instance_types.INSTANCE_TYPES['m1.small'])
|
||||||
self.assertEqual(num_instances, 4)
|
self.assertEqual(num_instances, 4)
|
||||||
db.quota_update(self.context, self.project.id, {'cores': 100})
|
db.quota_update(self.context, self.project.id, {'cores': 100})
|
||||||
num_instances = quota.allowed_instances(self.context, 100, 1)
|
num_instances = quota.allowed_instances(self.context, 100,
|
||||||
|
instance_types.INSTANCE_TYPES['m1.small'])
|
||||||
self.assertEqual(num_instances, 10)
|
self.assertEqual(num_instances, 10)
|
||||||
db.quota_destroy(self.context, self.project.id)
|
db.quota_destroy(self.context, self.project.id)
|
||||||
|
|
||||||
|
@ -16,85 +16,4 @@
|
|||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
"""
|
from nova.volume.api import API
|
||||||
Handles all requests relating to volumes.
|
|
||||||
"""
|
|
||||||
|
|
||||||
import datetime
|
|
||||||
import logging
|
|
||||||
|
|
||||||
from nova import db
|
|
||||||
from nova import exception
|
|
||||||
from nova import flags
|
|
||||||
from nova import quota
|
|
||||||
from nova import rpc
|
|
||||||
from nova.db import base
|
|
||||||
|
|
||||||
FLAGS = flags.FLAGS
|
|
||||||
flags.DECLARE('storage_availability_zone', 'nova.volume.manager')
|
|
||||||
|
|
||||||
|
|
||||||
class API(base.Base):
|
|
||||||
"""API for interacting with the volume manager."""
|
|
||||||
|
|
||||||
def create(self, context, size, name, description):
|
|
||||||
if quota.allowed_volumes(context, 1, size) < 1:
|
|
||||||
logging.warn("Quota exceeeded for %s, tried to create %sG volume",
|
|
||||||
context.project_id, size)
|
|
||||||
raise quota.QuotaError("Volume quota exceeded. You cannot "
|
|
||||||
"create a volume of size %s" % size)
|
|
||||||
|
|
||||||
options = {
|
|
||||||
'size': size,
|
|
||||||
'user_id': context.user.id,
|
|
||||||
'project_id': context.project_id,
|
|
||||||
'availability_zone': FLAGS.storage_availability_zone,
|
|
||||||
'status': "creating",
|
|
||||||
'attach_status': "detached",
|
|
||||||
'display_name': name,
|
|
||||||
'display_description': description}
|
|
||||||
|
|
||||||
volume = self.db.volume_create(context, options)
|
|
||||||
rpc.cast(context,
|
|
||||||
FLAGS.scheduler_topic,
|
|
||||||
{"method": "create_volume",
|
|
||||||
"args": {"topic": FLAGS.volume_topic,
|
|
||||||
"volume_id": volume['id']}})
|
|
||||||
return volume
|
|
||||||
|
|
||||||
def delete(self, context, volume_id):
|
|
||||||
volume = self.db.volume_get(context, volume_id)
|
|
||||||
if volume['status'] != "available":
|
|
||||||
raise exception.ApiError(_("Volume status must be available"))
|
|
||||||
now = datetime.datetime.utcnow()
|
|
||||||
self.db.volume_update(context, volume_id, {'status': 'deleting',
|
|
||||||
'terminated_at': now})
|
|
||||||
host = volume['host']
|
|
||||||
rpc.cast(context,
|
|
||||||
self.db.queue_get_for(context, FLAGS.volume_topic, host),
|
|
||||||
{"method": "delete_volume",
|
|
||||||
"args": {"volume_id": volume_id}})
|
|
||||||
|
|
||||||
def update(self, context, volume_id, fields):
|
|
||||||
self.db.volume_update(context, volume_id, fields)
|
|
||||||
|
|
||||||
def get(self, context, volume_id=None):
|
|
||||||
if volume_id is not None:
|
|
||||||
return self.db.volume_get(context, volume_id)
|
|
||||||
if context.user.is_admin():
|
|
||||||
return self.db.volume_get_all(context)
|
|
||||||
return self.db.volume_get_all_by_project(context, context.project_id)
|
|
||||||
|
|
||||||
def check_attach(self, context, volume_id):
|
|
||||||
volume = self.db.volume_get(context, volume_id)
|
|
||||||
# TODO(vish): abstract status checking?
|
|
||||||
if volume['status'] != "available":
|
|
||||||
raise exception.ApiError(_("Volume status must be available"))
|
|
||||||
if volume['attach_status'] == "attached":
|
|
||||||
raise exception.ApiError(_("Volume is already attached"))
|
|
||||||
|
|
||||||
def check_detach(self, context, volume_id):
|
|
||||||
volume = self.db.volume_get(context, volume_id)
|
|
||||||
# TODO(vish): abstract status checking?
|
|
||||||
if volume['status'] == "available":
|
|
||||||
raise exception.ApiError(_("Volume is already detached"))
|
|
||||||
|
100
nova/volume/api.py
Normal file
100
nova/volume/api.py
Normal file
@ -0,0 +1,100 @@
|
|||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
|
||||||
|
# Copyright 2010 United States Government as represented by the
|
||||||
|
# Administrator of the National Aeronautics and Space Administration.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
"""
|
||||||
|
Handles all requests relating to volumes.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import datetime
|
||||||
|
import logging
|
||||||
|
|
||||||
|
from nova import db
|
||||||
|
from nova import exception
|
||||||
|
from nova import flags
|
||||||
|
from nova import quota
|
||||||
|
from nova import rpc
|
||||||
|
from nova.db import base
|
||||||
|
|
||||||
|
FLAGS = flags.FLAGS
|
||||||
|
flags.DECLARE('storage_availability_zone', 'nova.volume.manager')
|
||||||
|
|
||||||
|
|
||||||
|
class API(base.Base):
|
||||||
|
"""API for interacting with the volume manager."""
|
||||||
|
|
||||||
|
def create(self, context, size, name, description):
|
||||||
|
if quota.allowed_volumes(context, 1, size) < 1:
|
||||||
|
logging.warn("Quota exceeeded for %s, tried to create %sG volume",
|
||||||
|
context.project_id, size)
|
||||||
|
raise quota.QuotaError("Volume quota exceeded. You cannot "
|
||||||
|
"create a volume of size %s" % size)
|
||||||
|
|
||||||
|
options = {
|
||||||
|
'size': size,
|
||||||
|
'user_id': context.user.id,
|
||||||
|
'project_id': context.project_id,
|
||||||
|
'availability_zone': FLAGS.storage_availability_zone,
|
||||||
|
'status': "creating",
|
||||||
|
'attach_status': "detached",
|
||||||
|
'display_name': name,
|
||||||
|
'display_description': description}
|
||||||
|
|
||||||
|
volume = self.db.volume_create(context, options)
|
||||||
|
rpc.cast(context,
|
||||||
|
FLAGS.scheduler_topic,
|
||||||
|
{"method": "create_volume",
|
||||||
|
"args": {"topic": FLAGS.volume_topic,
|
||||||
|
"volume_id": volume['id']}})
|
||||||
|
return volume
|
||||||
|
|
||||||
|
def delete(self, context, volume_id):
|
||||||
|
volume = self.db.volume_get(context, volume_id)
|
||||||
|
if volume['status'] != "available":
|
||||||
|
raise exception.ApiError(_("Volume status must be available"))
|
||||||
|
now = datetime.datetime.utcnow()
|
||||||
|
self.db.volume_update(context, volume_id, {'status': 'deleting',
|
||||||
|
'terminated_at': now})
|
||||||
|
host = volume['host']
|
||||||
|
rpc.cast(context,
|
||||||
|
self.db.queue_get_for(context, FLAGS.volume_topic, host),
|
||||||
|
{"method": "delete_volume",
|
||||||
|
"args": {"volume_id": volume_id}})
|
||||||
|
|
||||||
|
def update(self, context, volume_id, fields):
|
||||||
|
self.db.volume_update(context, volume_id, fields)
|
||||||
|
|
||||||
|
def get(self, context, volume_id=None):
|
||||||
|
if volume_id is not None:
|
||||||
|
return self.db.volume_get(context, volume_id)
|
||||||
|
if context.user.is_admin():
|
||||||
|
return self.db.volume_get_all(context)
|
||||||
|
return self.db.volume_get_all_by_project(context, context.project_id)
|
||||||
|
|
||||||
|
def check_attach(self, context, volume_id):
|
||||||
|
volume = self.db.volume_get(context, volume_id)
|
||||||
|
# TODO(vish): abstract status checking?
|
||||||
|
if volume['status'] != "available":
|
||||||
|
raise exception.ApiError(_("Volume status must be available"))
|
||||||
|
if volume['attach_status'] == "attached":
|
||||||
|
raise exception.ApiError(_("Volume is already attached"))
|
||||||
|
|
||||||
|
def check_detach(self, context, volume_id):
|
||||||
|
volume = self.db.volume_get(context, volume_id)
|
||||||
|
# TODO(vish): abstract status checking?
|
||||||
|
if volume['status'] == "available":
|
||||||
|
raise exception.ApiError(_("Volume is already detached"))
|
Loading…
Reference in New Issue
Block a user