renamed target_id to iscsi_target
This commit is contained in:
commit
967cf15723
@ -36,3 +36,23 @@ def reboot(instance_id, context=None):
|
||||
db.queue_get_for(context, FLAGS.compute_topic, host),
|
||||
{"method": "reboot_instance",
|
||||
"args": {"instance_id": instance_ref['id']}})
|
||||
|
||||
|
||||
def rescue(instance_id, context):
|
||||
"""Rescue the given instance."""
|
||||
instance_ref = db.instance_get_by_internal_id(context, instance_id)
|
||||
host = instance_ref['host']
|
||||
rpc.cast(context,
|
||||
db.queue_get_for(context, FLAGS.compute_topic, host),
|
||||
{"method": "rescue_instance",
|
||||
"args": {"instance_id": instance_ref['id']}})
|
||||
|
||||
|
||||
def unrescue(instance_id, context):
|
||||
"""Unrescue the given instance."""
|
||||
instance_ref = db.instance_get_by_internal_id(context, instance_id)
|
||||
host = instance_ref['host']
|
||||
rpc.cast(context,
|
||||
db.queue_get_for(context, FLAGS.compute_topic, host),
|
||||
{"method": "unrescue_instance",
|
||||
"args": {"instance_id": instance_ref['id']}})
|
||||
|
@ -25,6 +25,7 @@ datastore.
|
||||
import base64
|
||||
import datetime
|
||||
import logging
|
||||
import re
|
||||
import os
|
||||
import time
|
||||
|
||||
@ -529,6 +530,9 @@ class CloudController(object):
|
||||
|
||||
def attach_volume(self, context, volume_id, instance_id, device, **kwargs):
|
||||
volume_ref = db.volume_get_by_ec2_id(context, volume_id)
|
||||
if not re.match("^/dev/[a-z]d[a-z]+$", device):
|
||||
raise exception.ApiError("Invalid device specified: %s. "
|
||||
"Example device: /dev/vdb" % device)
|
||||
# TODO(vish): abstract status checking?
|
||||
if volume_ref['status'] != "available":
|
||||
raise exception.ApiError("Volume status must be available")
|
||||
@ -942,8 +946,21 @@ class CloudController(object):
|
||||
|
||||
def reboot_instances(self, context, instance_id, **kwargs):
|
||||
"""instance_id is a list of instance ids"""
|
||||
for id_str in instance_id:
|
||||
cloud.reboot(id_str, context=context)
|
||||
for ec2_id in instance_id:
|
||||
internal_id = ec2_id_to_internal_id(ec2_id)
|
||||
cloud.reboot(internal_id, context=context)
|
||||
return True
|
||||
|
||||
def rescue_instance(self, context, instance_id, **kwargs):
|
||||
"""This is an extension to the normal ec2_api"""
|
||||
internal_id = ec2_id_to_internal_id(instance_id)
|
||||
cloud.rescue(internal_id, context=context)
|
||||
return True
|
||||
|
||||
def unrescue_instance(self, context, instance_id, **kwargs):
|
||||
"""This is an extension to the normal ec2_api"""
|
||||
internal_id = ec2_id_to_internal_id(instance_id)
|
||||
cloud.unrescue(internal_id, context=context)
|
||||
return True
|
||||
|
||||
def update_instance(self, context, ec2_id, **kwargs):
|
||||
|
@ -23,6 +23,7 @@ WSGI middleware for OpenStack API controllers.
|
||||
import json
|
||||
import time
|
||||
|
||||
import logging
|
||||
import routes
|
||||
import webob.dec
|
||||
import webob.exc
|
||||
@ -54,6 +55,15 @@ class API(wsgi.Middleware):
|
||||
app = AuthMiddleware(RateLimitingMiddleware(APIRouter()))
|
||||
super(API, self).__init__(app)
|
||||
|
||||
@webob.dec.wsgify
|
||||
def __call__(self, req):
|
||||
try:
|
||||
return req.get_response(self.application)
|
||||
except Exception as ex:
|
||||
logging.warn("Caught error: %s" % str(ex))
|
||||
exc = webob.exc.HTTPInternalServerError(explanation=str(ex))
|
||||
return faults.Fault(exc)
|
||||
|
||||
|
||||
class AuthMiddleware(wsgi.Middleware):
|
||||
"""Authorize the openstack API request or return an HTTP Forbidden."""
|
||||
|
@ -68,10 +68,10 @@ class Limiter(object):
|
||||
self._levels[key] = (now, new_level)
|
||||
return None
|
||||
|
||||
|
||||
# If one instance of this WSGIApps is unable to handle your load, put a
|
||||
# sharding app in front that shards by username to one of many backends.
|
||||
|
||||
|
||||
class WSGIApp(object):
|
||||
|
||||
"""Application that tracks rate limits in memory. Send requests to it of
|
||||
|
@ -20,10 +20,8 @@
|
||||
Handles all code relating to instances (guest vms)
|
||||
"""
|
||||
|
||||
import base64
|
||||
import datetime
|
||||
import logging
|
||||
import os
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
@ -59,7 +57,11 @@ class ComputeManager(manager.Manager):
|
||||
"""Update the state of an instance from the driver info"""
|
||||
# FIXME(ja): include other fields from state?
|
||||
instance_ref = self.db.instance_get(context, instance_id)
|
||||
state = self.driver.get_info(instance_ref.name)['state']
|
||||
try:
|
||||
info = self.driver.get_info(instance_ref['name'])
|
||||
state = info['state']
|
||||
except exception.NotFound:
|
||||
state = power_state.NOSTATE
|
||||
self.db.instance_set_state(context, instance_id, state)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@ -129,16 +131,15 @@ class ComputeManager(manager.Manager):
|
||||
def reboot_instance(self, context, instance_id):
|
||||
"""Reboot an instance on this server."""
|
||||
context = context.elevated()
|
||||
self._update_state(context, instance_id)
|
||||
instance_ref = self.db.instance_get(context, instance_id)
|
||||
self._update_state(context, instance_id)
|
||||
|
||||
if instance_ref['state'] != power_state.RUNNING:
|
||||
raise exception.Error(
|
||||
'trying to reboot a non-running'
|
||||
'instance: %s (state: %s excepted: %s)' %
|
||||
(instance_ref['internal_id'],
|
||||
instance_ref['state'],
|
||||
power_state.RUNNING))
|
||||
logging.warn('trying to reboot a non-running '
|
||||
'instance: %s (state: %s excepted: %s)',
|
||||
instance_ref['internal_id'],
|
||||
instance_ref['state'],
|
||||
power_state.RUNNING)
|
||||
|
||||
logging.debug('instance %s: rebooting', instance_ref['name'])
|
||||
self.db.instance_set_state(context,
|
||||
@ -148,6 +149,38 @@ class ComputeManager(manager.Manager):
|
||||
yield self.driver.reboot(instance_ref)
|
||||
self._update_state(context, instance_id)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@exception.wrap_exception
|
||||
def rescue_instance(self, context, instance_id):
|
||||
"""Rescue an instance on this server."""
|
||||
context = context.elevated()
|
||||
instance_ref = self.db.instance_get(context, instance_id)
|
||||
|
||||
logging.debug('instance %s: rescuing',
|
||||
instance_ref['internal_id'])
|
||||
self.db.instance_set_state(context,
|
||||
instance_id,
|
||||
power_state.NOSTATE,
|
||||
'rescuing')
|
||||
yield self.driver.rescue(instance_ref)
|
||||
self._update_state(context, instance_id)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@exception.wrap_exception
|
||||
def unrescue_instance(self, context, instance_id):
|
||||
"""Rescue an instance on this server."""
|
||||
context = context.elevated()
|
||||
instance_ref = self.db.instance_get(context, instance_id)
|
||||
|
||||
logging.debug('instance %s: unrescuing',
|
||||
instance_ref['internal_id'])
|
||||
self.db.instance_set_state(context,
|
||||
instance_id,
|
||||
power_state.NOSTATE,
|
||||
'unrescuing')
|
||||
yield self.driver.unrescue(instance_ref)
|
||||
self._update_state(context, instance_id)
|
||||
|
||||
@exception.wrap_exception
|
||||
def get_console_output(self, context, instance_id):
|
||||
"""Send the console output for an instance."""
|
||||
|
@ -486,18 +486,18 @@ def export_device_create_safe(context, values):
|
||||
###################
|
||||
|
||||
|
||||
def target_id_count_by_host(context, host):
|
||||
def iscsi_target_count_by_host(context, host):
|
||||
"""Return count of export devices."""
|
||||
return IMPL.target_id_count_by_host(context, host)
|
||||
return IMPL.iscsi_target_count_by_host(context, host)
|
||||
|
||||
|
||||
def target_id_create_safe(context, values):
|
||||
"""Create an target_id from the values dictionary.
|
||||
def iscsi_target_create_safe(context, values):
|
||||
"""Create an iscsi_target from the values dictionary.
|
||||
|
||||
The device is not returned. If the create violates the unique
|
||||
constraints because the target_id and host already exist,
|
||||
constraints because the iscsi_target and host already exist,
|
||||
no exception is raised."""
|
||||
return IMPL.target_id_create_safe(context, values)
|
||||
return IMPL.iscsi_target_create_safe(context, values)
|
||||
|
||||
|
||||
###############
|
||||
@ -549,9 +549,9 @@ def volume_allocate_shelf_and_blade(context, volume_id):
|
||||
return IMPL.volume_allocate_shelf_and_blade(context, volume_id)
|
||||
|
||||
|
||||
def volume_allocate_target_id(context, volume_id, host):
|
||||
"""Atomically allocate a free target_id from the pool."""
|
||||
return IMPL.volume_allocate_target_id(context, volume_id, host)
|
||||
def volume_allocate_iscsi_target(context, volume_id, host):
|
||||
"""Atomically allocate a free iscsi_target from the pool."""
|
||||
return IMPL.volume_allocate_iscsi_target(context, volume_id, host)
|
||||
|
||||
|
||||
def volume_attached(context, volume_id, instance_id, mountpoint):
|
||||
@ -614,9 +614,9 @@ def volume_get_shelf_and_blade(context, volume_id):
|
||||
return IMPL.volume_get_shelf_and_blade(context, volume_id)
|
||||
|
||||
|
||||
def volume_get_target_id(context, volume_id):
|
||||
"""Get the target id allocated to the volume."""
|
||||
return IMPL.volume_get_target_id(context, volume_id)
|
||||
def volume_get_iscsi_target_num(context, volume_id):
|
||||
"""Get the target num (tid) allocated to the volume."""
|
||||
return IMPL.volume_get_iscsi_target_num(context, volume_id)
|
||||
|
||||
|
||||
def volume_update(context, volume_id, values):
|
||||
|
@ -19,6 +19,7 @@
|
||||
Implementation of SQLAlchemy backend
|
||||
"""
|
||||
|
||||
import random
|
||||
import warnings
|
||||
|
||||
from nova import db
|
||||
@ -542,7 +543,8 @@ def instance_create(context, values):
|
||||
session = get_session()
|
||||
with session.begin():
|
||||
while instance_ref.internal_id == None:
|
||||
internal_id = utils.generate_uid(instance_ref.__prefix__)
|
||||
# Instances have integer internal ids.
|
||||
internal_id = random.randint(0, 2 ** 32 - 1)
|
||||
if not instance_internal_id_exists(context, internal_id,
|
||||
session=session):
|
||||
instance_ref.internal_id = internal_id
|
||||
@ -1042,22 +1044,22 @@ def export_device_create_safe(context, values):
|
||||
|
||||
|
||||
@require_admin_context
|
||||
def target_id_count_by_host(context, host):
|
||||
def iscsi_target_count_by_host(context, host):
|
||||
session = get_session()
|
||||
return session.query(models.TargetId).\
|
||||
return session.query(models.IscsiTarget).\
|
||||
filter_by(deleted=can_read_deleted(context)).\
|
||||
filter_by(host=host).\
|
||||
count()
|
||||
|
||||
|
||||
@require_admin_context
|
||||
def target_id_create_safe(context, values):
|
||||
target_id_ref = models.TargetId()
|
||||
def iscsi_target_create_safe(context, values):
|
||||
iscsi_target_ref = models.IscsiTarget()
|
||||
for (key, value) in values.iteritems():
|
||||
target_id_ref[key] = value
|
||||
iscsi_target_ref[key] = value
|
||||
try:
|
||||
target_id_ref.save()
|
||||
return target_id_ref
|
||||
iscsi_target_ref.save()
|
||||
return iscsi_target_ref
|
||||
except IntegrityError:
|
||||
return None
|
||||
|
||||
@ -1155,10 +1157,10 @@ def volume_allocate_shelf_and_blade(context, volume_id):
|
||||
|
||||
|
||||
@require_admin_context
|
||||
def volume_allocate_target_id(context, volume_id, host):
|
||||
def volume_allocate_iscsi_target(context, volume_id, host):
|
||||
session = get_session()
|
||||
with session.begin():
|
||||
target_id_ref = session.query(models.TargetId).\
|
||||
iscsi_target_ref = session.query(models.IscsiTarget).\
|
||||
filter_by(volume=None).\
|
||||
filter_by(host=host).\
|
||||
filter_by(deleted=False).\
|
||||
@ -1166,11 +1168,11 @@ def volume_allocate_target_id(context, volume_id, host):
|
||||
first()
|
||||
# NOTE(vish): if with_lockmode isn't supported, as in sqlite,
|
||||
# then this has concurrency issues
|
||||
if not target_id_ref:
|
||||
if not iscsi_target_ref:
|
||||
raise db.NoMoreTargets()
|
||||
target_id_ref.volume_id = volume_id
|
||||
session.add(target_id_ref)
|
||||
return target_id_ref.target_id
|
||||
iscsi_target_ref.volume_id = volume_id
|
||||
session.add(iscsi_target_ref)
|
||||
return iscsi_target_ref.target_num
|
||||
|
||||
|
||||
@require_admin_context
|
||||
@ -1195,7 +1197,7 @@ def volume_create(context, values):
|
||||
session = get_session()
|
||||
with session.begin():
|
||||
while volume_ref.ec2_id == None:
|
||||
ec2_id = utils.generate_uid(volume_ref.__prefix__)
|
||||
ec2_id = utils.generate_uid('vol')
|
||||
if not volume_ec2_id_exists(context, ec2_id, session=session):
|
||||
volume_ref.ec2_id = ec2_id
|
||||
volume_ref.save(session=session)
|
||||
@ -1224,7 +1226,7 @@ def volume_destroy(context, volume_id):
|
||||
session.execute('update export_devices set volume_id=NULL '
|
||||
'where volume_id=:id',
|
||||
{'id': volume_id})
|
||||
session.execute('update target_ids set volume_id=NULL '
|
||||
session.execute('update iscsi_targets set volume_id=NULL '
|
||||
'where volume_id=:id',
|
||||
{'id': volume_id})
|
||||
|
||||
@ -1358,16 +1360,16 @@ def volume_get_shelf_and_blade(context, volume_id):
|
||||
|
||||
|
||||
@require_admin_context
|
||||
def volume_get_target_id(context, volume_id):
|
||||
def volume_get_iscsi_target_num(context, volume_id):
|
||||
session = get_session()
|
||||
result = session.query(models.TargetId).\
|
||||
result = session.query(models.IscsiTarget).\
|
||||
filter_by(volume_id=volume_id).\
|
||||
first()
|
||||
if not result:
|
||||
raise exception.NotFound('No target id found for volume %s' %
|
||||
volume_id)
|
||||
|
||||
return result.target_id
|
||||
return result.target_num
|
||||
|
||||
|
||||
@require_context
|
||||
|
@ -20,11 +20,9 @@
|
||||
SQLAlchemy models for nova data
|
||||
"""
|
||||
|
||||
import sys
|
||||
import datetime
|
||||
|
||||
# TODO(vish): clean up these imports
|
||||
from sqlalchemy.orm import relationship, backref, exc, object_mapper
|
||||
from sqlalchemy.orm import relationship, backref, object_mapper
|
||||
from sqlalchemy import Column, Integer, String, schema
|
||||
from sqlalchemy import ForeignKey, DateTime, Boolean, Text
|
||||
from sqlalchemy.exc import IntegrityError
|
||||
@ -46,17 +44,11 @@ class NovaBase(object):
|
||||
"""Base class for Nova Models"""
|
||||
__table_args__ = {'mysql_engine': 'InnoDB'}
|
||||
__table_initialized__ = False
|
||||
__prefix__ = 'none'
|
||||
created_at = Column(DateTime, default=datetime.datetime.utcnow)
|
||||
updated_at = Column(DateTime, onupdate=datetime.datetime.utcnow)
|
||||
deleted_at = Column(DateTime)
|
||||
deleted = Column(Boolean, default=False)
|
||||
|
||||
@property
|
||||
def str_id(self):
|
||||
"""Get string id of object (generally prefix + '-' + id)"""
|
||||
return "%s-%s" % (self.__prefix__, self.id)
|
||||
|
||||
def save(self, session=None):
|
||||
"""Save this object"""
|
||||
if not session:
|
||||
@ -100,7 +92,6 @@ class NovaBase(object):
|
||||
#class Image(BASE, NovaBase):
|
||||
# """Represents an image in the datastore"""
|
||||
# __tablename__ = 'images'
|
||||
# __prefix__ = 'ami'
|
||||
# id = Column(Integer, primary_key=True)
|
||||
# ec2_id = Column(String(12), unique=True)
|
||||
# user_id = Column(String(255))
|
||||
@ -156,7 +147,6 @@ class Service(BASE, NovaBase):
|
||||
class Instance(BASE, NovaBase):
|
||||
"""Represents a guest vm"""
|
||||
__tablename__ = 'instances'
|
||||
__prefix__ = 'i'
|
||||
id = Column(Integer, primary_key=True)
|
||||
internal_id = Column(Integer, unique=True)
|
||||
|
||||
@ -233,7 +223,6 @@ class Instance(BASE, NovaBase):
|
||||
class Volume(BASE, NovaBase):
|
||||
"""Represents a block storage device that can be attached to a vm"""
|
||||
__tablename__ = 'volumes'
|
||||
__prefix__ = 'vol'
|
||||
id = Column(Integer, primary_key=True)
|
||||
ec2_id = Column(String(12), unique=True)
|
||||
|
||||
@ -279,10 +268,6 @@ class Quota(BASE, NovaBase):
|
||||
gigabytes = Column(Integer)
|
||||
floating_ips = Column(Integer)
|
||||
|
||||
@property
|
||||
def str_id(self):
|
||||
return self.project_id
|
||||
|
||||
|
||||
class ExportDevice(BASE, NovaBase):
|
||||
"""Represates a shelf and blade that a volume can be exported on"""
|
||||
@ -300,20 +285,20 @@ class ExportDevice(BASE, NovaBase):
|
||||
'ExportDevice.deleted==False)')
|
||||
|
||||
|
||||
class TargetId(BASE, NovaBase):
|
||||
"""Represates an iscsi target_id for a given host"""
|
||||
__tablename__ = 'target_ids'
|
||||
__table_args__ = (schema.UniqueConstraint("target_id", "host"),
|
||||
class IscsiTarget(BASE, NovaBase):
|
||||
"""Represates an iscsi target for a given host"""
|
||||
__tablename__ = 'iscsi_targets'
|
||||
__table_args__ = (schema.UniqueConstraint("target_num", "host"),
|
||||
{'mysql_engine': 'InnoDB'})
|
||||
id = Column(Integer, primary_key=True)
|
||||
target_id = Column(Integer)
|
||||
target_num = Column(Integer)
|
||||
host = Column(String(255))
|
||||
volume_id = Column(Integer, ForeignKey('volumes.id'), nullable=True)
|
||||
volume = relationship(Volume,
|
||||
backref=backref('target_id', uselist=False),
|
||||
backref=backref('iscsi_target', uselist=False),
|
||||
foreign_keys=volume_id,
|
||||
primaryjoin='and_(TargetId.volume_id==Volume.id,'
|
||||
'TargetId.deleted==False)')
|
||||
primaryjoin='and_(IscsiTarget.volume_id==Volume.id,'
|
||||
'IscsiTarget.deleted==False)')
|
||||
|
||||
|
||||
class SecurityGroupInstanceAssociation(BASE, NovaBase):
|
||||
@ -387,10 +372,6 @@ class KeyPair(BASE, NovaBase):
|
||||
fingerprint = Column(String(255))
|
||||
public_key = Column(Text)
|
||||
|
||||
@property
|
||||
def str_id(self):
|
||||
return '%s.%s' % (self.user_id, self.name)
|
||||
|
||||
|
||||
class Network(BASE, NovaBase):
|
||||
"""Represents a network"""
|
||||
@ -452,10 +433,6 @@ class FixedIp(BASE, NovaBase):
|
||||
leased = Column(Boolean, default=False)
|
||||
reserved = Column(Boolean, default=False)
|
||||
|
||||
@property
|
||||
def str_id(self):
|
||||
return self.address
|
||||
|
||||
|
||||
class User(BASE, NovaBase):
|
||||
"""Represents a user"""
|
||||
@ -536,7 +513,7 @@ class FloatingIp(BASE, NovaBase):
|
||||
def register_models():
|
||||
"""Register Models and create metadata"""
|
||||
from sqlalchemy import create_engine
|
||||
models = (Service, Instance, Volume, ExportDevice, TargetId, FixedIp,
|
||||
models = (Service, Instance, Volume, ExportDevice, IscsiTarget, FixedIp,
|
||||
FloatingIp, Network, SecurityGroup,
|
||||
SecurityGroupIngressRule, SecurityGroupInstanceAssociation,
|
||||
AuthToken, User, Project) # , Image, Host
|
||||
|
@ -171,7 +171,7 @@ class NetworkManager(manager.Manager):
|
||||
if not fixed_ip_ref['leased']:
|
||||
logging.warn("IP %s released that was not leased", address)
|
||||
self.db.fixed_ip_update(context,
|
||||
fixed_ip_ref['str_id'],
|
||||
fixed_ip_ref['address'],
|
||||
{'leased': False})
|
||||
if not fixed_ip_ref['allocated']:
|
||||
self.db.fixed_ip_disassociate(context, address)
|
||||
|
@ -44,7 +44,6 @@ import multiprocessing
|
||||
import os
|
||||
import urllib
|
||||
|
||||
from tornado import escape
|
||||
from twisted.application import internet
|
||||
from twisted.application import service
|
||||
from twisted.web import error
|
||||
@ -55,6 +54,7 @@ from twisted.web import static
|
||||
from nova import context
|
||||
from nova import exception
|
||||
from nova import flags
|
||||
from nova import utils
|
||||
from nova.auth import manager
|
||||
from nova.objectstore import bucket
|
||||
from nova.objectstore import image
|
||||
@ -70,10 +70,10 @@ def render_xml(request, value):
|
||||
|
||||
name = value.keys()[0]
|
||||
request.write('<?xml version="1.0" encoding="UTF-8"?>\n')
|
||||
request.write('<' + escape.utf8(name) +
|
||||
request.write('<' + utils.utf8(name) +
|
||||
' xmlns="http://doc.s3.amazonaws.com/2006-03-01">')
|
||||
_render_parts(value.values()[0], request.write)
|
||||
request.write('</' + escape.utf8(name) + '>')
|
||||
request.write('</' + utils.utf8(name) + '>')
|
||||
request.finish()
|
||||
|
||||
|
||||
@ -87,7 +87,7 @@ def finish(request, content=None):
|
||||
def _render_parts(value, write_cb):
|
||||
"""Helper method to render different Python objects to XML"""
|
||||
if isinstance(value, basestring):
|
||||
write_cb(escape.xhtml_escape(value))
|
||||
write_cb(utils.xhtml_escape(value))
|
||||
elif isinstance(value, int) or isinstance(value, long):
|
||||
write_cb(str(value))
|
||||
elif isinstance(value, datetime.datetime):
|
||||
@ -97,9 +97,9 @@ def _render_parts(value, write_cb):
|
||||
if not isinstance(subvalue, list):
|
||||
subvalue = [subvalue]
|
||||
for subsubvalue in subvalue:
|
||||
write_cb('<' + escape.utf8(name) + '>')
|
||||
write_cb('<' + utils.utf8(name) + '>')
|
||||
_render_parts(subsubvalue, write_cb)
|
||||
write_cb('</' + escape.utf8(name) + '>')
|
||||
write_cb('</' + utils.utf8(name) + '>')
|
||||
else:
|
||||
raise Exception("Unknown S3 value type %r", value)
|
||||
|
||||
|
68
nova/tests/api/openstack/test_api.py
Normal file
68
nova/tests/api/openstack/test_api.py
Normal file
@ -0,0 +1,68 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2010 OpenStack LLC.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import unittest
|
||||
import webob.exc
|
||||
import webob.dec
|
||||
|
||||
import nova.api.openstack
|
||||
from nova.api.openstack import API
|
||||
from nova.api.openstack import faults
|
||||
from webob import Request
|
||||
|
||||
class APITest(unittest.TestCase):
|
||||
|
||||
def test_exceptions_are_converted_to_faults(self):
|
||||
@webob.dec.wsgify
|
||||
def succeed(req):
|
||||
return 'Succeeded'
|
||||
@webob.dec.wsgify
|
||||
def raise_webob_exc(req):
|
||||
raise webob.exc.HTTPNotFound(explanation='Raised a webob.exc')
|
||||
@webob.dec.wsgify
|
||||
def fail(req):
|
||||
raise Exception("Threw an exception")
|
||||
@webob.dec.wsgify
|
||||
def raise_api_fault(req):
|
||||
exc = webob.exc.HTTPNotFound(explanation='Raised a webob.exc')
|
||||
return faults.Fault(exc)
|
||||
api = API()
|
||||
|
||||
api.application = succeed
|
||||
resp = Request.blank('/').get_response(api)
|
||||
self.assertFalse('cloudServersFault' in resp.body, resp.body)
|
||||
self.assertEqual(resp.status_int, 200, resp.body)
|
||||
|
||||
api.application = raise_webob_exc
|
||||
resp = Request.blank('/').get_response(api)
|
||||
self.assertFalse('cloudServersFault' in resp.body, resp.body)
|
||||
self.assertEqual(resp.status_int, 404, resp.body)
|
||||
|
||||
api.application = raise_api_fault
|
||||
resp = Request.blank('/').get_response(api)
|
||||
self.assertTrue('itemNotFound' in resp.body, resp.body)
|
||||
self.assertEqual(resp.status_int, 404, resp.body)
|
||||
|
||||
api.application = fail
|
||||
resp = Request.blank('/').get_response(api)
|
||||
self.assertTrue('{"cloudServersFault' in resp.body, resp.body)
|
||||
self.assertEqual(resp.status_int, 500, resp.body)
|
||||
|
||||
api.application = fail
|
||||
resp = Request.blank('/.xml').get_response(api)
|
||||
self.assertTrue('<cloudServersFault' in resp.body, resp.body)
|
||||
self.assertEqual(resp.status_int, 500, resp.body)
|
@ -112,7 +112,7 @@ class SerializerTest(unittest.TestCase):
|
||||
self.match('/servers/4.json', None, expect='json')
|
||||
self.match('/servers/4', 'application/json', expect='json')
|
||||
self.match('/servers/4', 'application/xml', expect='xml')
|
||||
self.match('/servers/4.xml', None, expect='xml')
|
||||
self.match('/servers/4.xml', None, expect='xml')
|
||||
|
||||
def test_defaults_to_json(self):
|
||||
self.match('/servers/4', None, expect='json')
|
||||
|
@ -242,7 +242,7 @@ class ApiEc2TestCase(test.BaseTestCase):
|
||||
self.assertEquals(int(group.rules[0].from_port), 80)
|
||||
self.assertEquals(int(group.rules[0].to_port), 81)
|
||||
self.assertEquals(len(group.rules[0].grants), 1)
|
||||
self.assertEquals(str(group.rules[0].grants[0]), '0.0.0.0/0')
|
||||
self.assertEquals(str(group.rules[0].grants[0]), '0.0.0.0/0')
|
||||
|
||||
self.expect_http()
|
||||
self.mox.ReplayAll()
|
||||
|
@ -33,9 +33,9 @@ FLAGS.num_networks = 5
|
||||
FLAGS.fake_network = True
|
||||
flags.DECLARE('num_shelves', 'nova.volume.driver')
|
||||
flags.DECLARE('blades_per_shelf', 'nova.volume.driver')
|
||||
flags.DECLARE('iscsi_target_ids', 'nova.volume.driver')
|
||||
flags.DECLARE('iscsi_num_targets', 'nova.volume.driver')
|
||||
FLAGS.num_shelves = 2
|
||||
FLAGS.blades_per_shelf = 4
|
||||
FLAGS.iscsi_target_ids = 8
|
||||
FLAGS.iscsi_num_targets = 8
|
||||
FLAGS.verbose = True
|
||||
FLAGS.sql_connection = 'sqlite:///nova.sqlite'
|
||||
|
@ -81,7 +81,7 @@ class SimpleDriverTestCase(test.TrialTestCase):
|
||||
max_cores=4,
|
||||
max_gigabytes=4,
|
||||
network_manager='nova.network.manager.FlatManager',
|
||||
volume_driver='nova.volume.driver.FakeAOEDriver',
|
||||
volume_driver='nova.volume.driver.FakeISCSIDriver',
|
||||
scheduler_driver='nova.scheduler.simple.SimpleScheduler')
|
||||
self.scheduler = manager.SchedulerManager()
|
||||
self.manager = auth_manager.AuthManager()
|
||||
|
@ -91,7 +91,7 @@ class LibvirtConnTestCase(test.TrialTestCase):
|
||||
FLAGS.libvirt_type = libvirt_type
|
||||
conn = libvirt_conn.LibvirtConnection(True)
|
||||
|
||||
uri, template = conn.get_uri_and_template()
|
||||
uri, _template, _rescue = conn.get_uri_and_templates()
|
||||
self.assertEquals(uri, expected_uri)
|
||||
|
||||
xml = conn.to_xml(instance_ref)
|
||||
@ -114,7 +114,7 @@ class LibvirtConnTestCase(test.TrialTestCase):
|
||||
for (libvirt_type, (expected_uri, checks)) in type_uri_map.iteritems():
|
||||
FLAGS.libvirt_type = libvirt_type
|
||||
conn = libvirt_conn.LibvirtConnection(True)
|
||||
uri, template = conn.get_uri_and_template()
|
||||
uri, _template, _rescue = conn.get_uri_and_templates()
|
||||
self.assertEquals(uri, testuri)
|
||||
|
||||
def tearDown(self):
|
||||
|
@ -85,7 +85,7 @@ class VolumeTestCase(test.TrialTestCase):
|
||||
def test_too_many_volumes(self):
|
||||
"""Ensure that NoMoreTargets is raised when we run out of volumes"""
|
||||
vols = []
|
||||
total_slots = FLAGS.iscsi_target_ids
|
||||
total_slots = FLAGS.iscsi_num_targets
|
||||
for _index in xrange(total_slots):
|
||||
volume_id = self._create_volume()
|
||||
yield self.volume.create_volume(self.context, volume_id)
|
||||
@ -157,12 +157,13 @@ class VolumeTestCase(test.TrialTestCase):
|
||||
"""Make sure targets aren't duplicated"""
|
||||
volume_ids.append(volume_id)
|
||||
admin_context = context.get_admin_context()
|
||||
target_id = db.volume_get_target_id(admin_context, volume_id)
|
||||
self.assert_(target_id not in targets)
|
||||
targets.append(target_id)
|
||||
logging.debug("Target %s allocated", target_id)
|
||||
iscsi_target = db.volume_get_iscsi_target_num(admin_context,
|
||||
volume_id)
|
||||
self.assert_(iscsi_target not in targets)
|
||||
targets.append(iscsi_target)
|
||||
logging.debug("Target %s allocated", iscsi_target)
|
||||
deferreds = []
|
||||
total_slots = FLAGS.iscsi_target_ids
|
||||
total_slots = FLAGS.iscsi_num_targets
|
||||
for _index in xrange(total_slots):
|
||||
volume_id = self._create_volume()
|
||||
d = self.volume.create_volume(self.context, volume_id)
|
||||
|
@ -28,6 +28,7 @@ import random
|
||||
import subprocess
|
||||
import socket
|
||||
import sys
|
||||
from xml.sax import saxutils
|
||||
|
||||
from twisted.internet.threads import deferToThread
|
||||
|
||||
@ -131,13 +132,9 @@ def runthis(prompt, cmd, check_exit_code=True):
|
||||
|
||||
|
||||
def generate_uid(topic, size=8):
|
||||
if topic == "i":
|
||||
# Instances have integer internal ids.
|
||||
return random.randint(0, 2 ** 32 - 1)
|
||||
else:
|
||||
characters = '01234567890abcdefghijklmnopqrstuvwxyz'
|
||||
choices = [random.choice(characters) for x in xrange(size)]
|
||||
return '%s-%s' % (topic, ''.join(choices))
|
||||
characters = '01234567890abcdefghijklmnopqrstuvwxyz'
|
||||
choices = [random.choice(characters) for x in xrange(size)]
|
||||
return '%s-%s' % (topic, ''.join(choices))
|
||||
|
||||
|
||||
def generate_mac():
|
||||
@ -212,3 +209,27 @@ def deferredToThread(f):
|
||||
def g(*args, **kwargs):
|
||||
return deferToThread(f, *args, **kwargs)
|
||||
return g
|
||||
|
||||
|
||||
def xhtml_escape(value):
|
||||
"""Escapes a string so it is valid within XML or XHTML.
|
||||
|
||||
Code is directly from the utf8 function in
|
||||
http://github.com/facebook/tornado/blob/master/tornado/escape.py
|
||||
|
||||
"""
|
||||
return saxutils.escape(value, {'"': """})
|
||||
|
||||
|
||||
def utf8(value):
|
||||
"""Try to turn a string into utf-8 if possible.
|
||||
|
||||
Code is directly from the utf8 function in
|
||||
http://github.com/facebook/tornado/blob/master/tornado/escape.py
|
||||
|
||||
"""
|
||||
if isinstance(value, unicode):
|
||||
return value.encode("utf-8")
|
||||
assert isinstance(value, str)
|
||||
return value
|
||||
|
||||
|
@ -22,10 +22,9 @@ A fake (in-memory) hypervisor+api. Allows nova testing w/o a hypervisor.
|
||||
This module also documents the semantics of real hypervisor connections.
|
||||
"""
|
||||
|
||||
import logging
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from nova import exception
|
||||
from nova.compute import power_state
|
||||
|
||||
|
||||
@ -119,6 +118,18 @@ class FakeConnection(object):
|
||||
"""
|
||||
return defer.succeed(None)
|
||||
|
||||
def rescue(self, instance):
|
||||
"""
|
||||
Rescue the specified instance.
|
||||
"""
|
||||
return defer.succeed(None)
|
||||
|
||||
def unrescue(self, instance):
|
||||
"""
|
||||
Unrescue the specified instance.
|
||||
"""
|
||||
return defer.succeed(None)
|
||||
|
||||
def destroy(self, instance):
|
||||
"""
|
||||
Destroy (shutdown and delete) the specified instance.
|
||||
@ -148,7 +159,12 @@ class FakeConnection(object):
|
||||
current memory the instance has, in KiB, 'num_cpu': The current number
|
||||
of virtual CPUs the instance has, 'cpu_time': The total CPU time used
|
||||
by the instance, in nanoseconds.
|
||||
|
||||
This method should raise exception.NotFound if the hypervisor has no
|
||||
knowledge of the instance
|
||||
"""
|
||||
if instance_name not in self.instances:
|
||||
raise exception.NotFound("Instance %s Not Found" % instance_name)
|
||||
i = self.instances[instance_name]
|
||||
return {'state': i._state,
|
||||
'max_mem': 0,
|
||||
|
37
nova/virt/libvirt.rescue.qemu.xml.template
Normal file
37
nova/virt/libvirt.rescue.qemu.xml.template
Normal file
@ -0,0 +1,37 @@
|
||||
<domain type='%(type)s'>
|
||||
<name>%(name)s</name>
|
||||
<os>
|
||||
<type>hvm</type>
|
||||
<kernel>%(basepath)s/rescue-kernel</kernel>
|
||||
<initrd>%(basepath)s/rescue-ramdisk</initrd>
|
||||
<cmdline>root=/dev/vda1 console=ttyS0</cmdline>
|
||||
</os>
|
||||
<features>
|
||||
<acpi/>
|
||||
</features>
|
||||
<memory>%(memory_kb)s</memory>
|
||||
<vcpu>%(vcpus)s</vcpu>
|
||||
<devices>
|
||||
<disk type='file'>
|
||||
<source file='%(basepath)s/rescue-disk'/>
|
||||
<target dev='vda' bus='virtio'/>
|
||||
</disk>
|
||||
<disk type='file'>
|
||||
<source file='%(basepath)s/disk'/>
|
||||
<target dev='vdb' bus='virtio'/>
|
||||
</disk>
|
||||
<interface type='bridge'>
|
||||
<source bridge='%(bridge_name)s'/>
|
||||
<mac address='%(mac_address)s'/>
|
||||
<!-- <model type='virtio'/> CANT RUN virtio network right now -->
|
||||
<filterref filter="nova-instance-%(name)s">
|
||||
<parameter name="IP" value="%(ip_address)s" />
|
||||
<parameter name="DHCPSERVER" value="%(dhcp_server)s" />
|
||||
</filterref>
|
||||
</interface>
|
||||
<serial type="file">
|
||||
<source path='%(basepath)s/console.log'/>
|
||||
<target port='1'/>
|
||||
</serial>
|
||||
</devices>
|
||||
</domain>
|
26
nova/virt/libvirt.rescue.uml.xml.template
Normal file
26
nova/virt/libvirt.rescue.uml.xml.template
Normal file
@ -0,0 +1,26 @@
|
||||
<domain type='%(type)s'>
|
||||
<name>%(name)s</name>
|
||||
<memory>%(memory_kb)s</memory>
|
||||
<os>
|
||||
<type>%(type)s</type>
|
||||
<kernel>/usr/bin/linux</kernel>
|
||||
<root>/dev/ubda1</root>
|
||||
</os>
|
||||
<devices>
|
||||
<disk type='file'>
|
||||
<source file='%(basepath)s/rescue-disk'/>
|
||||
<target dev='ubd0' bus='uml'/>
|
||||
</disk>
|
||||
<disk type='file'>
|
||||
<source file='%(basepath)s/disk'/>
|
||||
<target dev='ubd1' bus='uml'/>
|
||||
</disk>
|
||||
<interface type='bridge'>
|
||||
<source bridge='%(bridge_name)s'/>
|
||||
<mac address='%(mac_address)s'/>
|
||||
</interface>
|
||||
<console type="file">
|
||||
<source path='%(basepath)s/console.log'/>
|
||||
</console>
|
||||
</devices>
|
||||
</domain>
|
34
nova/virt/libvirt.rescue.xen.xml.template
Normal file
34
nova/virt/libvirt.rescue.xen.xml.template
Normal file
@ -0,0 +1,34 @@
|
||||
<domain type='%(type)s'>
|
||||
<name>%(name)s</name>
|
||||
<os>
|
||||
<type>linux</type>
|
||||
<kernel>%(basepath)s/kernel</kernel>
|
||||
<initrd>%(basepath)s/ramdisk</initrd>
|
||||
<root>/dev/xvda1</root>
|
||||
<cmdline>ro</cmdline>
|
||||
</os>
|
||||
<features>
|
||||
<acpi/>
|
||||
</features>
|
||||
<memory>%(memory_kb)s</memory>
|
||||
<vcpu>%(vcpus)s</vcpu>
|
||||
<devices>
|
||||
<disk type='file'>
|
||||
<source file='%(basepath)s/rescue-disk'/>
|
||||
<target dev='sda' />
|
||||
</disk>
|
||||
<disk type='file'>
|
||||
<source file='%(basepath)s/disk'/>
|
||||
<target dev='sdb' />
|
||||
</disk>
|
||||
<interface type='bridge'>
|
||||
<source bridge='%(bridge_name)s'/>
|
||||
<mac address='%(mac_address)s'/>
|
||||
</interface>
|
||||
<console type="file">
|
||||
<source path='%(basepath)s/console.log'/>
|
||||
<target port='1'/>
|
||||
</console>
|
||||
</devices>
|
||||
</domain>
|
||||
|
@ -48,6 +48,19 @@ libxml2 = None
|
||||
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
flags.DEFINE_string('libvirt_rescue_xml_template',
|
||||
utils.abspath('virt/libvirt.rescue.qemu.xml.template'),
|
||||
'Libvirt RESCUE XML Template for QEmu/KVM')
|
||||
flags.DEFINE_string('libvirt_rescue_xen_xml_template',
|
||||
utils.abspath('virt/libvirt.rescue.xen.xml.template'),
|
||||
'Libvirt RESCUE XML Template for xen')
|
||||
flags.DEFINE_string('libvirt_rescue_uml_xml_template',
|
||||
utils.abspath('virt/libvirt.rescue.uml.xml.template'),
|
||||
'Libvirt RESCUE XML Template for user-mode-linux')
|
||||
# TODO(vish): These flags should probably go into a shared location
|
||||
flags.DEFINE_string('rescue_image_id', 'ami-rescue', 'Rescue ami image')
|
||||
flags.DEFINE_string('rescue_kernel_id', 'aki-rescue', 'Rescue aki image')
|
||||
flags.DEFINE_string('rescue_ramdisk_id', 'ari-rescue', 'Rescue ari image')
|
||||
flags.DEFINE_string('libvirt_xml_template',
|
||||
utils.abspath('virt/libvirt.qemu.xml.template'),
|
||||
'Libvirt XML Template for QEmu/KVM')
|
||||
@ -87,9 +100,12 @@ def get_connection(read_only):
|
||||
|
||||
class LibvirtConnection(object):
|
||||
def __init__(self, read_only):
|
||||
self.libvirt_uri, template_file = self.get_uri_and_template()
|
||||
(self.libvirt_uri,
|
||||
template_file,
|
||||
rescue_file) = self.get_uri_and_templates()
|
||||
|
||||
self.libvirt_xml = open(template_file).read()
|
||||
self.rescue_xml = open(rescue_file).read()
|
||||
self._wrapped_conn = None
|
||||
self.read_only = read_only
|
||||
|
||||
@ -112,17 +128,20 @@ class LibvirtConnection(object):
|
||||
return False
|
||||
raise
|
||||
|
||||
def get_uri_and_template(self):
|
||||
def get_uri_and_templates(self):
|
||||
if FLAGS.libvirt_type == 'uml':
|
||||
uri = FLAGS.libvirt_uri or 'uml:///system'
|
||||
template_file = FLAGS.libvirt_uml_xml_template
|
||||
rescue_file = FLAGS.libvirt_rescue_uml_xml_template
|
||||
elif FLAGS.libvirt_type == 'xen':
|
||||
uri = FLAGS.libvirt_uri or 'xen:///'
|
||||
template_file = FLAGS.libvirt_xen_xml_template
|
||||
rescue_file = FLAGS.libvirt_rescue_xen_xml_template
|
||||
else:
|
||||
uri = FLAGS.libvirt_uri or 'qemu:///system'
|
||||
template_file = FLAGS.libvirt_xml_template
|
||||
return uri, template_file
|
||||
rescue_file = FLAGS.libvirt_rescue_xml_template
|
||||
return uri, template_file, rescue_file
|
||||
|
||||
def _connect(self, uri, read_only):
|
||||
auth = [[libvirt.VIR_CRED_AUTHNAME, libvirt.VIR_CRED_NOECHOPROMPT],
|
||||
@ -138,7 +157,7 @@ class LibvirtConnection(object):
|
||||
return [self._conn.lookupByID(x).name()
|
||||
for x in self._conn.listDomainsID()]
|
||||
|
||||
def destroy(self, instance):
|
||||
def destroy(self, instance, cleanup=True):
|
||||
try:
|
||||
virt_dom = self._conn.lookupByName(instance['name'])
|
||||
virt_dom.destroy()
|
||||
@ -146,10 +165,11 @@ class LibvirtConnection(object):
|
||||
pass
|
||||
# If the instance is already terminated, we're still happy
|
||||
d = defer.Deferred()
|
||||
d.addCallback(lambda _: self._cleanup(instance))
|
||||
if cleanup:
|
||||
d.addCallback(lambda _: self._cleanup(instance))
|
||||
# FIXME: What does this comment mean?
|
||||
# TODO(termie): short-circuit me for tests
|
||||
# WE'LL save this for when we do shutdown,
|
||||
# WE'LL save this for when we do shutdown,
|
||||
# instead of destroy - but destroy returns immediately
|
||||
timer = task.LoopingCall(f=None)
|
||||
|
||||
@ -226,8 +246,8 @@ class LibvirtConnection(object):
|
||||
@defer.inlineCallbacks
|
||||
@exception.wrap_exception
|
||||
def reboot(self, instance):
|
||||
yield self.destroy(instance, False)
|
||||
xml = self.to_xml(instance)
|
||||
yield self._conn.lookupByName(instance['name']).destroy()
|
||||
yield self._conn.createXML(xml, 0)
|
||||
|
||||
d = defer.Deferred()
|
||||
@ -254,6 +274,48 @@ class LibvirtConnection(object):
|
||||
timer.start(interval=0.5, now=True)
|
||||
yield d
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@exception.wrap_exception
|
||||
def rescue(self, instance):
|
||||
yield self.destroy(instance, False)
|
||||
|
||||
xml = self.to_xml(instance, rescue=True)
|
||||
rescue_images = {'image_id': FLAGS.rescue_image_id,
|
||||
'kernel_id': FLAGS.rescue_kernel_id,
|
||||
'ramdisk_id': FLAGS.rescue_ramdisk_id}
|
||||
yield self._create_image(instance, xml, 'rescue-', rescue_images)
|
||||
yield self._conn.createXML(xml, 0)
|
||||
|
||||
d = defer.Deferred()
|
||||
timer = task.LoopingCall(f=None)
|
||||
|
||||
def _wait_for_rescue():
|
||||
try:
|
||||
state = self.get_info(instance['name'])['state']
|
||||
db.instance_set_state(None, instance['id'], state)
|
||||
if state == power_state.RUNNING:
|
||||
logging.debug('instance %s: rescued', instance['name'])
|
||||
timer.stop()
|
||||
d.callback(None)
|
||||
except Exception, exn:
|
||||
logging.error('_wait_for_rescue failed: %s', exn)
|
||||
db.instance_set_state(None,
|
||||
instance['id'],
|
||||
power_state.SHUTDOWN)
|
||||
timer.stop()
|
||||
d.callback(None)
|
||||
|
||||
timer.f = _wait_for_rescue
|
||||
timer.start(interval=0.5, now=True)
|
||||
yield d
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@exception.wrap_exception
|
||||
def unrescue(self, instance):
|
||||
# NOTE(vish): Because reboot destroys and recreates an instance using
|
||||
# the normal xml file, we can just call reboot here
|
||||
yield self.reboot(instance)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@exception.wrap_exception
|
||||
def spawn(self, instance):
|
||||
@ -266,8 +328,6 @@ class LibvirtConnection(object):
|
||||
setup_nwfilters_for_instance(instance)
|
||||
yield self._create_image(instance, xml)
|
||||
yield self._conn.createXML(xml, 0)
|
||||
# TODO(termie): this should actually register
|
||||
# a callback to check for successful boot
|
||||
logging.debug("instance %s: is running", instance['name'])
|
||||
|
||||
local_d = defer.Deferred()
|
||||
@ -338,15 +398,16 @@ class LibvirtConnection(object):
|
||||
return d
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _create_image(self, inst, libvirt_xml):
|
||||
def _create_image(self, inst, libvirt_xml, prefix='', disk_images=None):
|
||||
# syntactic nicety
|
||||
basepath = lambda fname='': os.path.join(FLAGS.instances_path,
|
||||
basepath = lambda fname='', prefix=prefix: os.path.join(
|
||||
FLAGS.instances_path,
|
||||
inst['name'],
|
||||
fname)
|
||||
prefix + fname)
|
||||
|
||||
# ensure directories exist and are writable
|
||||
yield process.simple_execute('mkdir -p %s' % basepath())
|
||||
yield process.simple_execute('chmod 0777 %s' % basepath())
|
||||
yield process.simple_execute('mkdir -p %s' % basepath(prefix=''))
|
||||
yield process.simple_execute('chmod 0777 %s' % basepath(prefix=''))
|
||||
|
||||
# TODO(termie): these are blocking calls, it would be great
|
||||
# if they weren't.
|
||||
@ -355,12 +416,17 @@ class LibvirtConnection(object):
|
||||
f.write(libvirt_xml)
|
||||
f.close()
|
||||
|
||||
os.close(os.open(basepath('console.log'), os.O_CREAT | os.O_WRONLY,
|
||||
0660))
|
||||
# NOTE(vish): No need add the prefix to console.log
|
||||
os.close(os.open(basepath('console.log', ''),
|
||||
os.O_CREAT | os.O_WRONLY, 0660))
|
||||
|
||||
user = manager.AuthManager().get_user(inst['user_id'])
|
||||
project = manager.AuthManager().get_project(inst['project_id'])
|
||||
|
||||
if not disk_images:
|
||||
disk_images = {'image_id': inst['image_id'],
|
||||
'kernel_id': inst['kernel_id'],
|
||||
'ramdisk_id': inst['ramdisk_id']}
|
||||
if not os.path.exists(basepath('disk')):
|
||||
yield images.fetch(inst.image_id, basepath('disk-raw'), user,
|
||||
project)
|
||||
@ -406,7 +472,9 @@ class LibvirtConnection(object):
|
||||
['local_gb']
|
||||
* 1024 * 1024 * 1024)
|
||||
|
||||
resize = inst['instance_type'] != 'm1.tiny'
|
||||
resize = True
|
||||
if inst['instance_type'] == 'm1.tiny' or prefix == 'rescue-':
|
||||
resize = False
|
||||
yield disk.partition(basepath('disk-raw'), basepath('disk'),
|
||||
local_bytes, resize, execute=execute)
|
||||
|
||||
@ -414,7 +482,7 @@ class LibvirtConnection(object):
|
||||
yield process.simple_execute('sudo chown root %s' %
|
||||
basepath('disk'))
|
||||
|
||||
def to_xml(self, instance):
|
||||
def to_xml(self, instance, rescue=False):
|
||||
# TODO(termie): cache?
|
||||
logging.debug('instance %s: starting toXML method', instance['name'])
|
||||
network = db.project_get_network(context.get_admin_context(),
|
||||
@ -436,13 +504,19 @@ class LibvirtConnection(object):
|
||||
'mac_address': instance['mac_address'],
|
||||
'ip_address': ip_address,
|
||||
'dhcp_server': dhcp_server}
|
||||
libvirt_xml = self.libvirt_xml % xml_info
|
||||
if rescue:
|
||||
libvirt_xml = self.rescue_xml % xml_info
|
||||
else:
|
||||
libvirt_xml = self.libvirt_xml % xml_info
|
||||
logging.debug('instance %s: finished toXML method', instance['name'])
|
||||
|
||||
return libvirt_xml
|
||||
|
||||
def get_info(self, instance_name):
|
||||
virt_dom = self._conn.lookupByName(instance_name)
|
||||
try:
|
||||
virt_dom = self._conn.lookupByName(instance_name)
|
||||
except:
|
||||
raise exception.NotFound("Instance %s not found" % instance_name)
|
||||
(state, max_mem, mem, num_cpu, cpu_time) = virt_dom.info()
|
||||
return {'state': state,
|
||||
'max_mem': max_mem,
|
||||
|
@ -44,7 +44,7 @@ flags.DEFINE_integer('num_shelves',
|
||||
flags.DEFINE_integer('blades_per_shelf',
|
||||
16,
|
||||
'Number of vblade blades per shelf')
|
||||
flags.DEFINE_integer('iscsi_target_ids',
|
||||
flags.DEFINE_integer('iscsi_num_targets',
|
||||
100,
|
||||
'Number of iscsi target ids per host')
|
||||
flags.DEFINE_string('iscsi_target_prefix', 'iqn.2010-10.org.openstack:',
|
||||
@ -224,52 +224,54 @@ class ISCSIDriver(VolumeDriver):
|
||||
|
||||
def ensure_export(self, context, volume):
|
||||
"""Safely and synchronously recreates an export for a logical volume"""
|
||||
target_id = self.db.volume_get_target_id(context, volume['id'])
|
||||
iscsi_target = self.db.volume_get_iscsi_target_num(context,
|
||||
volume['id'])
|
||||
iscsi_name = "%s%s" % (FLAGS.iscsi_target_prefix, volume['name'])
|
||||
volume_path = "/dev/%s/%s" % (FLAGS.volume_group, volume['name'])
|
||||
self._sync_exec("sudo ietadm --op new "
|
||||
"--tid=%s --params Name=%s" %
|
||||
(target_id, iscsi_name),
|
||||
(iscsi_target, iscsi_name),
|
||||
check_exit_code=False)
|
||||
self._sync_exec("sudo ietadm --op new --tid=%s "
|
||||
"--lun=0 --params Path=%s,Type=fileio" %
|
||||
(target_id, volume_path),
|
||||
(iscsi_target, volume_path),
|
||||
check_exit_code=False)
|
||||
|
||||
def _ensure_target_ids(self, context, host):
|
||||
def _ensure_iscsi_targets(self, context, host):
|
||||
"""Ensure that target ids have been created in datastore"""
|
||||
host_target_ids = self.db.target_id_count_by_host(context, host)
|
||||
if host_target_ids >= FLAGS.iscsi_target_ids:
|
||||
host_iscsi_targets = self.db.iscsi_target_count_by_host(context, host)
|
||||
if host_iscsi_targets >= FLAGS.iscsi_num_targets:
|
||||
return
|
||||
# NOTE(vish): Target ids start at 1, not 0.
|
||||
for target_id in xrange(1, FLAGS.iscsi_target_ids + 1):
|
||||
target = {'host': host, 'target_id': target_id}
|
||||
self.db.target_id_create_safe(context, target)
|
||||
for target_num in xrange(1, FLAGS.iscsi_num_targets + 1):
|
||||
target = {'host': host, 'target_num': target_num}
|
||||
self.db.iscsi_target_create_safe(context, target)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def create_export(self, context, volume):
|
||||
"""Creates an export for a logical volume"""
|
||||
self._ensure_target_ids(context, volume['host'])
|
||||
target_id = self.db.volume_allocate_target_id(context,
|
||||
self._ensure_iscsi_targets(context, volume['host'])
|
||||
iscsi_target = self.db.volume_allocate_iscsi_target(context,
|
||||
volume['id'],
|
||||
volume['host'])
|
||||
iscsi_name = "%s%s" % (FLAGS.iscsi_target_prefix, volume['name'])
|
||||
volume_path = "/dev/%s/%s" % (FLAGS.volume_group, volume['name'])
|
||||
yield self._execute("sudo ietadm --op new "
|
||||
"--tid=%s --params Name=%s" %
|
||||
(target_id, iscsi_name))
|
||||
(iscsi_target, iscsi_name))
|
||||
yield self._execute("sudo ietadm --op new --tid=%s "
|
||||
"--lun=0 --params Path=%s,Type=fileio" %
|
||||
(target_id, volume_path))
|
||||
(iscsi_target, volume_path))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def remove_export(self, context, volume):
|
||||
"""Removes an export for a logical volume"""
|
||||
target_id = self.db.volume_get_target_id(context, volume['id'])
|
||||
iscsi_target = self.db.volume_get_iscsi_target_num(context,
|
||||
volume['id'])
|
||||
yield self._execute("sudo ietadm --op delete --tid=%s "
|
||||
"--lun=0" % target_id)
|
||||
"--lun=0" % iscsi_target)
|
||||
yield self._execute("sudo ietadm --op delete --tid=%s" %
|
||||
target_id)
|
||||
iscsi_target)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _get_name_and_portal(self, volume_name, host):
|
||||
|
@ -13,7 +13,6 @@ python-daemon==1.5.5
|
||||
python-gflags==1.3
|
||||
redis==2.0.0
|
||||
routes==1.12.3
|
||||
tornado==1.0
|
||||
WebOb==0.9.8
|
||||
wsgiref==0.1.2
|
||||
zope.interface==3.6.1
|
||||
|
Loading…
x
Reference in New Issue
Block a user