merged trunk and resolved conflict
This commit is contained in:
1
Authors
1
Authors
@@ -32,6 +32,7 @@ Jesse Andrews <anotherjesse@gmail.com>
|
||||
Joe Heck <heckj@mac.com>
|
||||
Joel Moore <joelbm24@gmail.com>
|
||||
John Dewey <john@dewey.ws>
|
||||
John Tran <jtran@attinteractive.com>
|
||||
Jonathan Bryce <jbryce@jbryce.com>
|
||||
Jordan Rinke <jordan@openstack.org>
|
||||
Josh Durgin <joshd@hq.newdream.net>
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
include HACKING LICENSE run_tests.py run_tests.sh
|
||||
include README builddeb.sh exercise_rsapi.py
|
||||
include ChangeLog MANIFEST.in pylintrc Authors
|
||||
graft CA
|
||||
graft nova/CA
|
||||
graft doc
|
||||
graft smoketests
|
||||
graft tools
|
||||
|
||||
@@ -881,7 +881,7 @@ class InstanceTypeCommands(object):
|
||||
elif name == "--all":
|
||||
inst_types = instance_types.get_all_types(True)
|
||||
else:
|
||||
inst_types = instance_types.get_instance_type(name)
|
||||
inst_types = instance_types.get_instance_type_by_name(name)
|
||||
except exception.DBError, e:
|
||||
_db_error(e)
|
||||
if isinstance(inst_types.values()[0], dict):
|
||||
@@ -905,7 +905,7 @@ class ImageCommands(object):
|
||||
'disk_format': disk_format,
|
||||
'container_format': container_format,
|
||||
'properties': {'image_state': 'available',
|
||||
'owner': owner,
|
||||
'owner_id': owner,
|
||||
'type': image_type,
|
||||
'architecture': architecture,
|
||||
'image_location': 'local',
|
||||
@@ -983,7 +983,7 @@ class ImageCommands(object):
|
||||
'is_public': True,
|
||||
'name': old['imageId'],
|
||||
'properties': {'image_state': old['imageState'],
|
||||
'owner': old['imageOwnerId'],
|
||||
'owner_id': old['imageOwnerId'],
|
||||
'architecture': old['architecture'],
|
||||
'type': old['type'],
|
||||
'image_location': old['imageLocation'],
|
||||
|
||||
0
CA/.gitignore → nova/CA/.gitignore
vendored
0
CA/.gitignore → nova/CA/.gitignore
vendored
@@ -23,7 +23,7 @@ mkdir -p projects/$NAME
|
||||
cd projects/$NAME
|
||||
cp ../../openssl.cnf.tmpl openssl.cnf
|
||||
sed -i -e s/%USERNAME%/$NAME/g openssl.cnf
|
||||
mkdir certs crl newcerts private
|
||||
mkdir -p certs crl newcerts private
|
||||
openssl req -new -x509 -extensions v3_ca -keyout private/cakey.pem -out cacert.pem -days 365 -config ./openssl.cnf -batch -nodes
|
||||
echo "10" > serial
|
||||
touch index.txt
|
||||
@@ -20,8 +20,9 @@ if [ -f "cacert.pem" ];
|
||||
then
|
||||
echo "Not installing, it's already done."
|
||||
else
|
||||
cp openssl.cnf.tmpl openssl.cnf
|
||||
cp "$(dirname $0)/openssl.cnf.tmpl" openssl.cnf
|
||||
sed -i -e s/%USERNAME%/ROOT/g openssl.cnf
|
||||
mkdir -p certs crl newcerts private
|
||||
openssl req -new -x509 -extensions v3_ca -keyout private/cakey.pem -out cacert.pem -days 365 -config ./openssl.cnf -batch -nodes
|
||||
touch index.txt
|
||||
echo "10" > serial
|
||||
@@ -1,473 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
"""
|
||||
Nova User API client library.
|
||||
"""
|
||||
|
||||
import base64
|
||||
import boto
|
||||
import boto.exception
|
||||
import httplib
|
||||
import re
|
||||
import string
|
||||
|
||||
from boto.ec2.regioninfo import RegionInfo
|
||||
|
||||
|
||||
DEFAULT_CLC_URL = 'http://127.0.0.1:8773'
|
||||
DEFAULT_REGION = 'nova'
|
||||
|
||||
|
||||
class UserInfo(object):
|
||||
"""
|
||||
Information about a Nova user, as parsed through SAX.
|
||||
|
||||
**Fields Include**
|
||||
|
||||
* username
|
||||
* accesskey
|
||||
* secretkey
|
||||
* file (optional) containing zip of X509 cert & rc file
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, connection=None, username=None, endpoint=None):
|
||||
self.connection = connection
|
||||
self.username = username
|
||||
self.endpoint = endpoint
|
||||
|
||||
def __repr__(self):
|
||||
return 'UserInfo:%s' % self.username
|
||||
|
||||
def startElement(self, name, attrs, connection):
|
||||
return None
|
||||
|
||||
def endElement(self, name, value, connection):
|
||||
if name == 'username':
|
||||
self.username = str(value)
|
||||
elif name == 'file':
|
||||
self.file = base64.b64decode(str(value))
|
||||
elif name == 'accesskey':
|
||||
self.accesskey = str(value)
|
||||
elif name == 'secretkey':
|
||||
self.secretkey = str(value)
|
||||
|
||||
|
||||
class UserRole(object):
|
||||
"""
|
||||
Information about a Nova user's role, as parsed through SAX.
|
||||
|
||||
**Fields include**
|
||||
|
||||
* role
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, connection=None):
|
||||
self.connection = connection
|
||||
self.role = None
|
||||
|
||||
def __repr__(self):
|
||||
return 'UserRole:%s' % self.role
|
||||
|
||||
def startElement(self, name, attrs, connection):
|
||||
return None
|
||||
|
||||
def endElement(self, name, value, connection):
|
||||
if name == 'role':
|
||||
self.role = value
|
||||
else:
|
||||
setattr(self, name, str(value))
|
||||
|
||||
|
||||
class ProjectInfo(object):
|
||||
"""
|
||||
Information about a Nova project, as parsed through SAX.
|
||||
|
||||
**Fields include**
|
||||
|
||||
* projectname
|
||||
* description
|
||||
* projectManagerId
|
||||
* memberIds
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, connection=None):
|
||||
self.connection = connection
|
||||
self.projectname = None
|
||||
self.description = None
|
||||
self.projectManagerId = None
|
||||
self.memberIds = []
|
||||
|
||||
def __repr__(self):
|
||||
return 'ProjectInfo:%s' % self.projectname
|
||||
|
||||
def startElement(self, name, attrs, connection):
|
||||
return None
|
||||
|
||||
def endElement(self, name, value, connection):
|
||||
if name == 'projectname':
|
||||
self.projectname = value
|
||||
elif name == 'description':
|
||||
self.description = value
|
||||
elif name == 'projectManagerId':
|
||||
self.projectManagerId = value
|
||||
elif name == 'memberId':
|
||||
self.memberIds.append(value)
|
||||
else:
|
||||
setattr(self, name, str(value))
|
||||
|
||||
|
||||
class ProjectMember(object):
|
||||
"""
|
||||
Information about a Nova project member, as parsed through SAX.
|
||||
|
||||
**Fields include**
|
||||
|
||||
* memberId
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, connection=None):
|
||||
self.connection = connection
|
||||
self.memberId = None
|
||||
|
||||
def __repr__(self):
|
||||
return 'ProjectMember:%s' % self.memberId
|
||||
|
||||
def startElement(self, name, attrs, connection):
|
||||
return None
|
||||
|
||||
def endElement(self, name, value, connection):
|
||||
if name == 'member':
|
||||
self.memberId = value
|
||||
else:
|
||||
setattr(self, name, str(value))
|
||||
|
||||
|
||||
class HostInfo(object):
|
||||
"""
|
||||
Information about a Nova Host, as parsed through SAX.
|
||||
|
||||
**Fields Include**
|
||||
|
||||
* Hostname
|
||||
* Compute service status
|
||||
* Volume service status
|
||||
* Instance count
|
||||
* Volume count
|
||||
"""
|
||||
|
||||
def __init__(self, connection=None):
|
||||
self.connection = connection
|
||||
self.hostname = None
|
||||
self.compute = None
|
||||
self.volume = None
|
||||
self.instance_count = 0
|
||||
self.volume_count = 0
|
||||
|
||||
def __repr__(self):
|
||||
return 'Host:%s' % self.hostname
|
||||
|
||||
# this is needed by the sax parser, so ignore the ugly name
|
||||
def startElement(self, name, attrs, connection):
|
||||
return None
|
||||
|
||||
# this is needed by the sax parser, so ignore the ugly name
|
||||
def endElement(self, name, value, connection):
|
||||
fixed_name = string.lower(re.sub(r'([A-Z])', r'_\1', name))
|
||||
setattr(self, fixed_name, value)
|
||||
|
||||
|
||||
class Vpn(object):
|
||||
"""
|
||||
Information about a Vpn, as parsed through SAX
|
||||
|
||||
**Fields Include**
|
||||
|
||||
* instance_id
|
||||
* project_id
|
||||
* public_ip
|
||||
* public_port
|
||||
* created_at
|
||||
* internal_ip
|
||||
* state
|
||||
"""
|
||||
|
||||
def __init__(self, connection=None):
|
||||
self.connection = connection
|
||||
self.instance_id = None
|
||||
self.project_id = None
|
||||
|
||||
def __repr__(self):
|
||||
return 'Vpn:%s:%s' % (self.project_id, self.instance_id)
|
||||
|
||||
def startElement(self, name, attrs, connection):
|
||||
return None
|
||||
|
||||
def endElement(self, name, value, connection):
|
||||
fixed_name = string.lower(re.sub(r'([A-Z])', r'_\1', name))
|
||||
setattr(self, fixed_name, value)
|
||||
|
||||
|
||||
class InstanceType(object):
|
||||
"""
|
||||
Information about a Nova instance type, as parsed through SAX.
|
||||
|
||||
**Fields include**
|
||||
|
||||
* name
|
||||
* vcpus
|
||||
* disk_gb
|
||||
* memory_mb
|
||||
* flavor_id
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, connection=None):
|
||||
self.connection = connection
|
||||
self.name = None
|
||||
self.vcpus = None
|
||||
self.disk_gb = None
|
||||
self.memory_mb = None
|
||||
self.flavor_id = None
|
||||
|
||||
def __repr__(self):
|
||||
return 'InstanceType:%s' % self.name
|
||||
|
||||
def startElement(self, name, attrs, connection):
|
||||
return None
|
||||
|
||||
def endElement(self, name, value, connection):
|
||||
if name == "memoryMb":
|
||||
self.memory_mb = str(value)
|
||||
elif name == "flavorId":
|
||||
self.flavor_id = str(value)
|
||||
elif name == "diskGb":
|
||||
self.disk_gb = str(value)
|
||||
else:
|
||||
setattr(self, name, str(value))
|
||||
|
||||
|
||||
class NovaAdminClient(object):
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
clc_url=DEFAULT_CLC_URL,
|
||||
region=DEFAULT_REGION,
|
||||
access_key=None,
|
||||
secret_key=None,
|
||||
**kwargs):
|
||||
parts = self.split_clc_url(clc_url)
|
||||
|
||||
self.clc_url = clc_url
|
||||
self.region = region
|
||||
self.access = access_key
|
||||
self.secret = secret_key
|
||||
self.apiconn = boto.connect_ec2(aws_access_key_id=access_key,
|
||||
aws_secret_access_key=secret_key,
|
||||
is_secure=parts['is_secure'],
|
||||
region=RegionInfo(None,
|
||||
region,
|
||||
parts['ip']),
|
||||
port=parts['port'],
|
||||
path='/services/Admin',
|
||||
**kwargs)
|
||||
self.apiconn.APIVersion = 'nova'
|
||||
|
||||
def connection_for(self, username, project, clc_url=None, region=None,
|
||||
**kwargs):
|
||||
"""Returns a boto ec2 connection for the given username."""
|
||||
if not clc_url:
|
||||
clc_url = self.clc_url
|
||||
if not region:
|
||||
region = self.region
|
||||
parts = self.split_clc_url(clc_url)
|
||||
user = self.get_user(username)
|
||||
access_key = '%s:%s' % (user.accesskey, project)
|
||||
return boto.connect_ec2(aws_access_key_id=access_key,
|
||||
aws_secret_access_key=user.secretkey,
|
||||
is_secure=parts['is_secure'],
|
||||
region=RegionInfo(None,
|
||||
self.region,
|
||||
parts['ip']),
|
||||
port=parts['port'],
|
||||
path='/services/Cloud',
|
||||
**kwargs)
|
||||
|
||||
def split_clc_url(self, clc_url):
|
||||
"""Splits a cloud controller endpoint url."""
|
||||
parts = httplib.urlsplit(clc_url)
|
||||
is_secure = parts.scheme == 'https'
|
||||
ip, port = parts.netloc.split(':')
|
||||
return {'ip': ip, 'port': int(port), 'is_secure': is_secure}
|
||||
|
||||
def get_users(self):
|
||||
"""Grabs the list of all users."""
|
||||
return self.apiconn.get_list('DescribeUsers', {}, [('item', UserInfo)])
|
||||
|
||||
def get_user(self, name):
|
||||
"""Grab a single user by name."""
|
||||
user = self.apiconn.get_object('DescribeUser',
|
||||
{'Name': name},
|
||||
UserInfo)
|
||||
if user.username != None:
|
||||
return user
|
||||
|
||||
def has_user(self, username):
|
||||
"""Determine if user exists."""
|
||||
return self.get_user(username) != None
|
||||
|
||||
def create_user(self, username):
|
||||
"""Creates a new user, returning the userinfo object with
|
||||
access/secret."""
|
||||
return self.apiconn.get_object('RegisterUser', {'Name': username},
|
||||
UserInfo)
|
||||
|
||||
def delete_user(self, username):
|
||||
"""Deletes a user."""
|
||||
return self.apiconn.get_object('DeregisterUser', {'Name': username},
|
||||
UserInfo)
|
||||
|
||||
def get_roles(self, project_roles=True):
|
||||
"""Returns a list of available roles."""
|
||||
return self.apiconn.get_list('DescribeRoles',
|
||||
{'ProjectRoles': project_roles},
|
||||
[('item', UserRole)])
|
||||
|
||||
def get_user_roles(self, user, project=None):
|
||||
"""Returns a list of roles for the given user.
|
||||
|
||||
Omitting project will return any global roles that the user has.
|
||||
Specifying project will return only project specific roles.
|
||||
|
||||
"""
|
||||
params = {'User': user}
|
||||
if project:
|
||||
params['Project'] = project
|
||||
return self.apiconn.get_list('DescribeUserRoles',
|
||||
params,
|
||||
[('item', UserRole)])
|
||||
|
||||
def add_user_role(self, user, role, project=None):
|
||||
"""Add a role to a user either globally or for a specific project."""
|
||||
return self.modify_user_role(user, role, project=project,
|
||||
operation='add')
|
||||
|
||||
def remove_user_role(self, user, role, project=None):
|
||||
"""Remove a role from a user either globally or for a specific
|
||||
project."""
|
||||
return self.modify_user_role(user, role, project=project,
|
||||
operation='remove')
|
||||
|
||||
def modify_user_role(self, user, role, project=None, operation='add',
|
||||
**kwargs):
|
||||
"""Add or remove a role for a user and project."""
|
||||
params = {'User': user,
|
||||
'Role': role,
|
||||
'Project': project,
|
||||
'Operation': operation}
|
||||
return self.apiconn.get_status('ModifyUserRole', params)
|
||||
|
||||
def get_projects(self, user=None):
|
||||
"""Returns a list of all projects."""
|
||||
if user:
|
||||
params = {'User': user}
|
||||
else:
|
||||
params = {}
|
||||
return self.apiconn.get_list('DescribeProjects',
|
||||
params,
|
||||
[('item', ProjectInfo)])
|
||||
|
||||
def get_project(self, name):
|
||||
"""Returns a single project with the specified name."""
|
||||
project = self.apiconn.get_object('DescribeProject',
|
||||
{'Name': name},
|
||||
ProjectInfo)
|
||||
|
||||
if project.projectname != None:
|
||||
return project
|
||||
|
||||
def create_project(self, projectname, manager_user, description=None,
|
||||
member_users=None):
|
||||
"""Creates a new project."""
|
||||
params = {'Name': projectname,
|
||||
'ManagerUser': manager_user,
|
||||
'Description': description,
|
||||
'MemberUsers': member_users}
|
||||
return self.apiconn.get_object('RegisterProject', params, ProjectInfo)
|
||||
|
||||
def modify_project(self, projectname, manager_user=None, description=None):
|
||||
"""Modifies an existing project."""
|
||||
params = {'Name': projectname,
|
||||
'ManagerUser': manager_user,
|
||||
'Description': description}
|
||||
return self.apiconn.get_status('ModifyProject', params)
|
||||
|
||||
def delete_project(self, projectname):
|
||||
"""Permanently deletes the specified project."""
|
||||
return self.apiconn.get_object('DeregisterProject',
|
||||
{'Name': projectname},
|
||||
ProjectInfo)
|
||||
|
||||
def get_project_members(self, name):
|
||||
"""Returns a list of members of a project."""
|
||||
return self.apiconn.get_list('DescribeProjectMembers',
|
||||
{'Name': name},
|
||||
[('item', ProjectMember)])
|
||||
|
||||
def add_project_member(self, user, project):
|
||||
"""Adds a user to a project."""
|
||||
return self.modify_project_member(user, project, operation='add')
|
||||
|
||||
def remove_project_member(self, user, project):
|
||||
"""Removes a user from a project."""
|
||||
return self.modify_project_member(user, project, operation='remove')
|
||||
|
||||
def modify_project_member(self, user, project, operation='add'):
|
||||
"""Adds or removes a user from a project."""
|
||||
params = {'User': user,
|
||||
'Project': project,
|
||||
'Operation': operation}
|
||||
return self.apiconn.get_status('ModifyProjectMember', params)
|
||||
|
||||
def get_zip(self, user, project):
|
||||
"""Returns the content of a zip file containing novarc and access
|
||||
credentials."""
|
||||
params = {'Name': user, 'Project': project}
|
||||
zip = self.apiconn.get_object('GenerateX509ForUser', params, UserInfo)
|
||||
return zip.file
|
||||
|
||||
def start_vpn(self, project):
|
||||
"""
|
||||
Starts the vpn for a user
|
||||
"""
|
||||
return self.apiconn.get_object('StartVpn', {'Project': project}, Vpn)
|
||||
|
||||
def get_vpns(self):
|
||||
"""Return a list of vpn with project name"""
|
||||
return self.apiconn.get_list('DescribeVpns', {}, [('item', Vpn)])
|
||||
|
||||
def get_hosts(self):
|
||||
return self.apiconn.get_list('DescribeHosts', {}, [('item', HostInfo)])
|
||||
|
||||
def get_instance_types(self):
|
||||
"""Grabs the list of all users."""
|
||||
return self.apiconn.get_list('DescribeInstanceTypes', {},
|
||||
[('item', InstanceType)])
|
||||
@@ -206,10 +206,14 @@ class ServiceWrapper(wsgi.Controller):
|
||||
# NOTE(vish): make sure we have no unicode keys for py2.6.
|
||||
params = dict([(str(k), v) for (k, v) in params.iteritems()])
|
||||
result = method(context, **params)
|
||||
|
||||
if result is None or type(result) is str or type(result) is unicode:
|
||||
return result
|
||||
|
||||
try:
|
||||
return self._serialize(result, req.best_match_content_type())
|
||||
content_type = req.best_match_content_type()
|
||||
default_xmlns = self.get_default_xmlns(req)
|
||||
return self._serialize(result, content_type, default_xmlns)
|
||||
except:
|
||||
raise exception.Error("returned non-serializable type: %s"
|
||||
% result)
|
||||
|
||||
@@ -103,10 +103,18 @@ class CloudController(object):
|
||||
# Gen root CA, if we don't have one
|
||||
root_ca_path = os.path.join(FLAGS.ca_path, FLAGS.ca_file)
|
||||
if not os.path.exists(root_ca_path):
|
||||
genrootca_sh_path = os.path.join(os.path.dirname(__file__),
|
||||
os.path.pardir,
|
||||
os.path.pardir,
|
||||
'CA',
|
||||
'genrootca.sh')
|
||||
|
||||
start = os.getcwd()
|
||||
if not os.path.exists(FLAGS.ca_path):
|
||||
os.makedirs(FLAGS.ca_path)
|
||||
os.chdir(FLAGS.ca_path)
|
||||
# TODO(vish): Do this with M2Crypto instead
|
||||
utils.runthis(_("Generating root CA: %s"), "sh", "genrootca.sh")
|
||||
utils.runthis(_("Generating root CA: %s"), "sh", genrootca_sh_path)
|
||||
os.chdir(start)
|
||||
|
||||
def _get_mpi_data(self, context, project_id):
|
||||
@@ -722,7 +730,10 @@ class CloudController(object):
|
||||
instance['project_id'],
|
||||
instance['host'])
|
||||
i['productCodesSet'] = self._convert_to_set([], 'product_codes')
|
||||
i['instanceType'] = instance['instance_type']
|
||||
if instance['instance_type']:
|
||||
i['instanceType'] = instance['instance_type'].get('name')
|
||||
else:
|
||||
i['instanceType'] = None
|
||||
i['launchTime'] = instance['created_at']
|
||||
i['amiLaunchIndex'] = instance['launch_index']
|
||||
i['displayName'] = instance['display_name']
|
||||
@@ -757,6 +768,8 @@ class CloudController(object):
|
||||
iterator = db.floating_ip_get_all_by_project(context,
|
||||
context.project_id)
|
||||
for floating_ip_ref in iterator:
|
||||
if floating_ip_ref['project_id'] is None:
|
||||
continue
|
||||
address = floating_ip_ref['address']
|
||||
ec2_id = None
|
||||
if (floating_ip_ref['fixed_ip']
|
||||
@@ -775,7 +788,7 @@ class CloudController(object):
|
||||
def allocate_address(self, context, **kwargs):
|
||||
LOG.audit(_("Allocate address"), context=context)
|
||||
public_ip = self.network_api.allocate_floating_ip(context)
|
||||
return {'addressSet': [{'publicIp': public_ip}]}
|
||||
return {'publicIp': public_ip}
|
||||
|
||||
def release_address(self, context, public_ip, **kwargs):
|
||||
LOG.audit(_("Release address %s"), public_ip, context=context)
|
||||
@@ -805,7 +818,7 @@ class CloudController(object):
|
||||
ramdisk = self._get_image(context, kwargs['ramdisk_id'])
|
||||
kwargs['ramdisk_id'] = ramdisk['id']
|
||||
instances = self.compute_api.create(context,
|
||||
instance_type=instance_types.get_by_type(
|
||||
instance_type=instance_types.get_instance_type_by_name(
|
||||
kwargs.get('instance_type', None)),
|
||||
image_id=self._get_image(context, kwargs['image_id'])['id'],
|
||||
min_count=int(kwargs.get('min_count', max_count)),
|
||||
@@ -884,9 +897,6 @@ class CloudController(object):
|
||||
image_type = image['properties'].get('type')
|
||||
ec2_id = self._image_ec2_id(image.get('id'), image_type)
|
||||
name = image.get('name')
|
||||
if name:
|
||||
i['imageId'] = "%s (%s)" % (ec2_id, name)
|
||||
else:
|
||||
i['imageId'] = ec2_id
|
||||
kernel_id = image['properties'].get('kernel_id')
|
||||
if kernel_id:
|
||||
@@ -895,11 +905,15 @@ class CloudController(object):
|
||||
if ramdisk_id:
|
||||
i['ramdiskId'] = self._image_ec2_id(ramdisk_id, 'ramdisk')
|
||||
i['imageOwnerId'] = image['properties'].get('owner_id')
|
||||
if name:
|
||||
i['imageLocation'] = "%s (%s)" % (image['properties'].
|
||||
get('image_location'), name)
|
||||
else:
|
||||
i['imageLocation'] = image['properties'].get('image_location')
|
||||
i['imageState'] = image['properties'].get('image_state')
|
||||
i['displayName'] = image.get('name')
|
||||
i['displayName'] = name
|
||||
i['description'] = image.get('description')
|
||||
i['type'] = image_type
|
||||
i['imageType'] = image_type
|
||||
i['isPublic'] = str(image['properties'].get('is_public', '')) == 'True'
|
||||
i['architecture'] = image['properties'].get('architecture')
|
||||
return i
|
||||
|
||||
@@ -13,15 +13,14 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import common
|
||||
import webob.exc
|
||||
|
||||
from nova import exception
|
||||
from nova import flags
|
||||
from nova import log as logging
|
||||
from nova import wsgi
|
||||
|
||||
from nova.auth import manager
|
||||
from nova.api.openstack import common
|
||||
from nova.api.openstack import faults
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
@@ -35,7 +34,7 @@ def _translate_keys(account):
|
||||
manager=account.project_manager_id)
|
||||
|
||||
|
||||
class Controller(wsgi.Controller):
|
||||
class Controller(common.OpenstackController):
|
||||
|
||||
_serialization_metadata = {
|
||||
'application/xml': {
|
||||
|
||||
@@ -19,7 +19,7 @@ import time
|
||||
|
||||
from webob import exc
|
||||
|
||||
from nova import wsgi
|
||||
from nova.api.openstack import common
|
||||
from nova.api.openstack import faults
|
||||
import nova.image.service
|
||||
|
||||
@@ -29,7 +29,7 @@ def _translate_keys(inst):
|
||||
return dict(backupSchedule=inst)
|
||||
|
||||
|
||||
class Controller(wsgi.Controller):
|
||||
class Controller(common.OpenstackController):
|
||||
""" The backup schedule API controller for the Openstack API """
|
||||
|
||||
_serialization_metadata = {
|
||||
|
||||
@@ -22,6 +22,7 @@ import webob
|
||||
from nova import exception
|
||||
from nova import flags
|
||||
from nova import log as logging
|
||||
from nova import wsgi
|
||||
|
||||
|
||||
LOG = logging.getLogger('common')
|
||||
@@ -30,6 +31,10 @@ LOG = logging.getLogger('common')
|
||||
FLAGS = flags.FLAGS
|
||||
|
||||
|
||||
XML_NS_V10 = 'http://docs.rackspacecloud.com/servers/api/v1.0'
|
||||
XML_NS_V11 = 'http://docs.openstack.org/compute/api/v1.1'
|
||||
|
||||
|
||||
def limited(items, request, max_limit=FLAGS.osapi_max_limit):
|
||||
"""
|
||||
Return a slice of items according to requested offset and limit.
|
||||
@@ -128,3 +133,9 @@ def get_id_from_href(href):
|
||||
except:
|
||||
LOG.debug(_("Error extracting id from href: %s") % href)
|
||||
raise webob.exc.HTTPBadRequest(_('could not parse id from href'))
|
||||
|
||||
|
||||
class OpenstackController(wsgi.Controller):
|
||||
def get_default_xmlns(self, req):
|
||||
# Use V10 by default
|
||||
return XML_NS_V10
|
||||
|
||||
@@ -19,7 +19,7 @@ from webob import exc
|
||||
|
||||
from nova import console
|
||||
from nova import exception
|
||||
from nova import wsgi
|
||||
from nova.api.openstack import common
|
||||
from nova.api.openstack import faults
|
||||
|
||||
|
||||
@@ -43,7 +43,7 @@ def _translate_detail_keys(cons):
|
||||
return dict(console=info)
|
||||
|
||||
|
||||
class Controller(wsgi.Controller):
|
||||
class Controller(common.OpenstackController):
|
||||
"""The Consoles Controller for the Openstack API"""
|
||||
|
||||
_serialization_metadata = {
|
||||
|
||||
@@ -28,6 +28,7 @@ from nova import exception
|
||||
from nova import flags
|
||||
from nova import log as logging
|
||||
from nova import wsgi
|
||||
from nova.api.openstack import common
|
||||
from nova.api.openstack import faults
|
||||
|
||||
|
||||
@@ -115,7 +116,7 @@ class ExtensionDescriptor(object):
|
||||
return response_exts
|
||||
|
||||
|
||||
class ActionExtensionController(wsgi.Controller):
|
||||
class ActionExtensionController(common.OpenstackController):
|
||||
|
||||
def __init__(self, application):
|
||||
|
||||
@@ -136,7 +137,7 @@ class ActionExtensionController(wsgi.Controller):
|
||||
return res
|
||||
|
||||
|
||||
class ResponseExtensionController(wsgi.Controller):
|
||||
class ResponseExtensionController(common.OpenstackController):
|
||||
|
||||
def __init__(self, application):
|
||||
self.application = application
|
||||
@@ -155,7 +156,8 @@ class ResponseExtensionController(wsgi.Controller):
|
||||
body = res.body
|
||||
headers = res.headers
|
||||
except AttributeError:
|
||||
body = self._serialize(res, content_type)
|
||||
default_xmlns = None
|
||||
body = self._serialize(res, content_type, default_xmlns)
|
||||
headers = {"Content-Type": content_type}
|
||||
res = webob.Response()
|
||||
res.body = body
|
||||
@@ -163,7 +165,7 @@ class ResponseExtensionController(wsgi.Controller):
|
||||
return res
|
||||
|
||||
|
||||
class ExtensionController(wsgi.Controller):
|
||||
class ExtensionController(common.OpenstackController):
|
||||
|
||||
def __init__(self, extension_manager):
|
||||
self.extension_manager = extension_manager
|
||||
|
||||
@@ -20,10 +20,10 @@ import webob.dec
|
||||
import webob.exc
|
||||
|
||||
from nova import wsgi
|
||||
from nova.api.openstack import common
|
||||
|
||||
|
||||
class Fault(webob.exc.HTTPException):
|
||||
|
||||
"""An RS API fault response."""
|
||||
|
||||
_fault_names = {
|
||||
@@ -47,7 +47,7 @@ class Fault(webob.exc.HTTPException):
|
||||
"""Generate a WSGI response based on the exception passed to ctor."""
|
||||
# Replace the body with fault details.
|
||||
code = self.wrapped_exc.status_int
|
||||
fault_name = self._fault_names.get(code, "computeFault")
|
||||
fault_name = self._fault_names.get(code, "cloudServersFault")
|
||||
fault_data = {
|
||||
fault_name: {
|
||||
'code': code,
|
||||
@@ -57,9 +57,11 @@ class Fault(webob.exc.HTTPException):
|
||||
fault_data[fault_name]['retryAfter'] = retry
|
||||
# 'code' is an attribute on the fault tag itself
|
||||
metadata = {'application/xml': {'attributes': {fault_name: 'code'}}}
|
||||
serializer = wsgi.Serializer(metadata)
|
||||
default_xmlns = common.XML_NS_V10
|
||||
serializer = wsgi.Serializer(metadata, default_xmlns)
|
||||
content_type = req.best_match_content_type()
|
||||
self.wrapped_exc.body = serializer.serialize(fault_data, content_type)
|
||||
self.wrapped_exc.content_type = content_type
|
||||
return self.wrapped_exc
|
||||
|
||||
|
||||
|
||||
@@ -19,11 +19,11 @@ import webob
|
||||
|
||||
from nova import db
|
||||
from nova import exception
|
||||
from nova import wsgi
|
||||
from nova.api.openstack import common
|
||||
from nova.api.openstack import views
|
||||
|
||||
|
||||
class Controller(wsgi.Controller):
|
||||
class Controller(common.OpenstackController):
|
||||
"""Flavor controller for the OpenStack API."""
|
||||
|
||||
_serialization_metadata = {
|
||||
@@ -76,3 +76,6 @@ class ControllerV11(Controller):
|
||||
def _get_view_builder(self, req):
|
||||
base_url = req.application_url
|
||||
return views.flavors.ViewBuilderV11(base_url)
|
||||
|
||||
def get_default_xmlns(self, req):
|
||||
return common.XML_NS_V11
|
||||
|
||||
@@ -20,13 +20,14 @@ from webob import exc
|
||||
from nova import flags
|
||||
from nova import utils
|
||||
from nova import wsgi
|
||||
from nova.api.openstack import common
|
||||
from nova.api.openstack import faults
|
||||
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
|
||||
|
||||
class Controller(wsgi.Controller):
|
||||
class Controller(common.OpenstackController):
|
||||
"""The image metadata API controller for the Openstack API"""
|
||||
|
||||
def __init__(self):
|
||||
|
||||
@@ -13,8 +13,6 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import datetime
|
||||
|
||||
import webob.exc
|
||||
|
||||
from nova import compute
|
||||
@@ -22,7 +20,6 @@ from nova import exception
|
||||
from nova import flags
|
||||
from nova import log
|
||||
from nova import utils
|
||||
from nova import wsgi
|
||||
from nova.api.openstack import common
|
||||
from nova.api.openstack import faults
|
||||
from nova.api.openstack.views import images as images_view
|
||||
@@ -32,7 +29,7 @@ LOG = log.getLogger('nova.api.openstack.images')
|
||||
FLAGS = flags.FLAGS
|
||||
|
||||
|
||||
class Controller(wsgi.Controller):
|
||||
class Controller(common.OpenstackController):
|
||||
"""Base `wsgi.Controller` for retrieving/displaying images."""
|
||||
|
||||
_serialization_metadata = {
|
||||
@@ -153,3 +150,6 @@ class ControllerV11(Controller):
|
||||
"""Property to get the ViewBuilder class we need to use."""
|
||||
base_url = request.application_url
|
||||
return images_view.ViewBuilderV11(base_url)
|
||||
|
||||
def get_default_xmlns(self, req):
|
||||
return common.XML_NS_V11
|
||||
|
||||
@@ -31,8 +31,8 @@ from collections import defaultdict
|
||||
from webob.dec import wsgify
|
||||
|
||||
from nova import wsgi
|
||||
from nova.api.openstack import common
|
||||
from nova.api.openstack import faults
|
||||
from nova.wsgi import Controller
|
||||
from nova.wsgi import Middleware
|
||||
|
||||
|
||||
@@ -43,7 +43,7 @@ PER_HOUR = 60 * 60
|
||||
PER_DAY = 60 * 60 * 24
|
||||
|
||||
|
||||
class LimitsController(Controller):
|
||||
class LimitsController(common.OpenstackController):
|
||||
"""
|
||||
Controller for accessing limits in the OpenStack API.
|
||||
"""
|
||||
|
||||
@@ -19,10 +19,11 @@ from webob import exc
|
||||
|
||||
from nova import compute
|
||||
from nova import wsgi
|
||||
from nova.api.openstack import common
|
||||
from nova.api.openstack import faults
|
||||
|
||||
|
||||
class Controller(wsgi.Controller):
|
||||
class Controller(common.OpenstackController):
|
||||
""" The server metadata API controller for the Openstack API """
|
||||
|
||||
def __init__(self):
|
||||
|
||||
@@ -44,7 +44,7 @@ LOG = logging.getLogger('server')
|
||||
FLAGS = flags.FLAGS
|
||||
|
||||
|
||||
class Controller(wsgi.Controller):
|
||||
class Controller(common.OpenstackController):
|
||||
""" The Server API controller for the OpenStack API """
|
||||
|
||||
_serialization_metadata = {
|
||||
@@ -150,15 +150,26 @@ class Controller(wsgi.Controller):
|
||||
injected_files = self._get_injected_files(personality)
|
||||
|
||||
flavor_id = self._flavor_id_from_req_data(env)
|
||||
|
||||
if not 'name' in env['server']:
|
||||
msg = _("Server name is not defined")
|
||||
return exc.HTTPBadRequest(msg)
|
||||
|
||||
name = env['server']['name']
|
||||
self._validate_server_name(name)
|
||||
name = name.strip()
|
||||
|
||||
try:
|
||||
inst_type = \
|
||||
instance_types.get_instance_type_by_flavor_id(flavor_id)
|
||||
(inst,) = self.compute_api.create(
|
||||
context,
|
||||
instance_types.get_by_flavor_id(flavor_id),
|
||||
inst_type,
|
||||
image_id,
|
||||
kernel_id=kernel_id,
|
||||
ramdisk_id=ramdisk_id,
|
||||
display_name=env['server']['name'],
|
||||
display_description=env['server']['name'],
|
||||
display_name=name,
|
||||
display_description=name,
|
||||
key_name=key_name,
|
||||
key_data=key_data,
|
||||
metadata=metadata,
|
||||
@@ -166,13 +177,12 @@ class Controller(wsgi.Controller):
|
||||
except quota.QuotaError as error:
|
||||
self._handle_quota_error(error)
|
||||
|
||||
inst['instance_type'] = flavor_id
|
||||
inst['instance_type'] = inst_type
|
||||
inst['image_id'] = requested_image_id
|
||||
|
||||
builder = self._get_view_builder(req)
|
||||
server = builder.build(inst, is_detail=True)
|
||||
password = "%s%s" % (server['server']['name'][:4],
|
||||
utils.generate_password(12))
|
||||
password = utils.generate_password(16)
|
||||
server['server']['adminPass'] = password
|
||||
self.compute_api.set_admin_password(context, server['server']['id'],
|
||||
password)
|
||||
@@ -246,26 +256,40 @@ class Controller(wsgi.Controller):
|
||||
|
||||
ctxt = req.environ['nova.context']
|
||||
update_dict = {}
|
||||
if 'adminPass' in inst_dict['server']:
|
||||
update_dict['admin_pass'] = inst_dict['server']['adminPass']
|
||||
try:
|
||||
self.compute_api.set_admin_password(ctxt, id)
|
||||
except exception.TimeoutException:
|
||||
return exc.HTTPRequestTimeout()
|
||||
|
||||
if 'name' in inst_dict['server']:
|
||||
update_dict['display_name'] = inst_dict['server']['name']
|
||||
name = inst_dict['server']['name']
|
||||
self._validate_server_name(name)
|
||||
update_dict['display_name'] = name.strip()
|
||||
|
||||
self._parse_update(ctxt, id, inst_dict, update_dict)
|
||||
|
||||
try:
|
||||
self.compute_api.update(ctxt, id, **update_dict)
|
||||
except exception.NotFound:
|
||||
return faults.Fault(exc.HTTPNotFound())
|
||||
|
||||
return exc.HTTPNoContent()
|
||||
|
||||
def _validate_server_name(self, value):
|
||||
if not isinstance(value, basestring):
|
||||
msg = _("Server name is not a string or unicode")
|
||||
raise exc.HTTPBadRequest(msg)
|
||||
|
||||
if value.strip() == '':
|
||||
msg = _("Server name is an empty string")
|
||||
raise exc.HTTPBadRequest(msg)
|
||||
|
||||
def _parse_update(self, context, id, inst_dict, update_dict):
|
||||
pass
|
||||
|
||||
@scheduler_api.redirect_handler
|
||||
def action(self, req, id):
|
||||
"""Multi-purpose method used to reboot, rebuild, or
|
||||
resize a server"""
|
||||
|
||||
actions = {
|
||||
'changePassword': self._action_change_password,
|
||||
'reboot': self._action_reboot,
|
||||
'resize': self._action_resize,
|
||||
'confirmResize': self._action_confirm_resize,
|
||||
@@ -279,6 +303,9 @@ class Controller(wsgi.Controller):
|
||||
return actions[key](input_dict, req, id)
|
||||
return faults.Fault(exc.HTTPNotImplemented())
|
||||
|
||||
def _action_change_password(self, input_dict, req, id):
|
||||
return exc.HTTPNotImplemented()
|
||||
|
||||
def _action_confirm_resize(self, input_dict, req, id):
|
||||
try:
|
||||
self.compute_api.confirm_resize(req.environ['nova.context'], id)
|
||||
@@ -576,6 +603,14 @@ class ControllerV10(Controller):
|
||||
def _limit_items(self, items, req):
|
||||
return common.limited(items, req)
|
||||
|
||||
def _parse_update(self, context, server_id, inst_dict, update_dict):
|
||||
if 'adminPass' in inst_dict['server']:
|
||||
update_dict['admin_pass'] = inst_dict['server']['adminPass']
|
||||
try:
|
||||
self.compute_api.set_admin_password(context, server_id)
|
||||
except exception.TimeoutException:
|
||||
return exc.HTTPRequestTimeout()
|
||||
|
||||
|
||||
class ControllerV11(Controller):
|
||||
def _image_id_from_req_data(self, data):
|
||||
@@ -599,9 +634,25 @@ class ControllerV11(Controller):
|
||||
def _get_addresses_view_builder(self, req):
|
||||
return nova.api.openstack.views.addresses.ViewBuilderV11(req)
|
||||
|
||||
def _action_change_password(self, input_dict, req, id):
|
||||
context = req.environ['nova.context']
|
||||
if (not 'changePassword' in input_dict
|
||||
or not 'adminPass' in input_dict['changePassword']):
|
||||
msg = _("No adminPass was specified")
|
||||
return exc.HTTPBadRequest(msg)
|
||||
password = input_dict['changePassword']['adminPass']
|
||||
if not isinstance(password, basestring) or password == '':
|
||||
msg = _("Invalid adminPass")
|
||||
return exc.HTTPBadRequest(msg)
|
||||
self.compute_api.set_admin_password(context, id, password)
|
||||
return exc.HTTPAccepted()
|
||||
|
||||
def _limit_items(self, items, req):
|
||||
return common.limited_by_marker(items, req)
|
||||
|
||||
def get_default_xmlns(self, req):
|
||||
return common.XML_NS_V11
|
||||
|
||||
|
||||
class ServerCreateRequestXMLDeserializer(object):
|
||||
"""
|
||||
|
||||
@@ -17,7 +17,7 @@
|
||||
|
||||
from webob import exc
|
||||
|
||||
from nova import wsgi
|
||||
from nova.api.openstack import common
|
||||
from nova.api.openstack import faults
|
||||
|
||||
|
||||
@@ -32,7 +32,7 @@ def _translate_detail_keys(inst):
|
||||
return dict(sharedIpGroups=inst)
|
||||
|
||||
|
||||
class Controller(wsgi.Controller):
|
||||
class Controller(common.OpenstackController):
|
||||
""" The Shared IP Groups Controller for the Openstack API """
|
||||
|
||||
_serialization_metadata = {
|
||||
|
||||
@@ -18,7 +18,6 @@ from webob import exc
|
||||
from nova import exception
|
||||
from nova import flags
|
||||
from nova import log as logging
|
||||
from nova import wsgi
|
||||
from nova.api.openstack import common
|
||||
from nova.api.openstack import faults
|
||||
from nova.auth import manager
|
||||
@@ -35,7 +34,7 @@ def _translate_keys(user):
|
||||
admin=user.admin)
|
||||
|
||||
|
||||
class Controller(wsgi.Controller):
|
||||
class Controller(common.OpenstackController):
|
||||
|
||||
_serialization_metadata = {
|
||||
'application/xml': {
|
||||
|
||||
@@ -15,8 +15,8 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import webob
|
||||
import webob.dec
|
||||
import webob.exc
|
||||
|
||||
from nova import wsgi
|
||||
import nova.api.openstack.views.versions
|
||||
@@ -51,4 +51,10 @@ class Versions(wsgi.Application):
|
||||
}
|
||||
|
||||
content_type = req.best_match_content_type()
|
||||
return wsgi.Serializer(metadata).serialize(response, content_type)
|
||||
body = wsgi.Serializer(metadata).serialize(response, content_type)
|
||||
|
||||
response = webob.Response()
|
||||
response.content_type = content_type
|
||||
response.body = body
|
||||
|
||||
return response
|
||||
|
||||
@@ -60,8 +60,8 @@ class ViewBuilder(object):
|
||||
self._format_status(image_obj)
|
||||
|
||||
image = {
|
||||
"id": image_obj["id"],
|
||||
"name": image_obj["name"],
|
||||
"id": image_obj.get("id"),
|
||||
"name": image_obj.get("name"),
|
||||
}
|
||||
|
||||
if "instance_id" in properties:
|
||||
@@ -72,9 +72,9 @@ class ViewBuilder(object):
|
||||
|
||||
if detail:
|
||||
image.update({
|
||||
"created": image_obj["created_at"],
|
||||
"updated": image_obj["updated_at"],
|
||||
"status": image_obj["status"],
|
||||
"created": image_obj.get("created_at"),
|
||||
"updated": image_obj.get("updated_at"),
|
||||
"status": image_obj.get("status"),
|
||||
})
|
||||
|
||||
if image["status"] == "SAVING":
|
||||
|
||||
@@ -57,16 +57,16 @@ class ViewBuilder(object):
|
||||
def _build_detail(self, inst):
|
||||
"""Returns a detailed model of a server."""
|
||||
power_mapping = {
|
||||
None: 'build',
|
||||
power_state.NOSTATE: 'build',
|
||||
power_state.RUNNING: 'active',
|
||||
power_state.BLOCKED: 'active',
|
||||
power_state.SUSPENDED: 'suspended',
|
||||
power_state.PAUSED: 'paused',
|
||||
power_state.SHUTDOWN: 'active',
|
||||
power_state.SHUTOFF: 'active',
|
||||
power_state.CRASHED: 'error',
|
||||
power_state.FAILED: 'error'}
|
||||
None: 'BUILD',
|
||||
power_state.NOSTATE: 'BUILD',
|
||||
power_state.RUNNING: 'ACTIVE',
|
||||
power_state.BLOCKED: 'ACTIVE',
|
||||
power_state.SUSPENDED: 'SUSPENDED',
|
||||
power_state.PAUSED: 'PAUSED',
|
||||
power_state.SHUTDOWN: 'ACTIVE',
|
||||
power_state.SHUTOFF: 'ACTIVE',
|
||||
power_state.CRASHED: 'ERROR',
|
||||
power_state.FAILED: 'ERROR'}
|
||||
|
||||
inst_dict = {
|
||||
'id': int(inst['id']),
|
||||
@@ -77,7 +77,7 @@ class ViewBuilder(object):
|
||||
ctxt = nova.context.get_admin_context()
|
||||
compute_api = nova.compute.API()
|
||||
if compute_api.has_finished_migration(ctxt, inst['id']):
|
||||
inst_dict['status'] = 'resize-confirm'
|
||||
inst_dict['status'] = 'RESIZE-CONFIRM'
|
||||
|
||||
# Return the metadata as a dictionary
|
||||
metadata = {}
|
||||
@@ -115,7 +115,7 @@ class ViewBuilderV10(ViewBuilder):
|
||||
|
||||
def _build_flavor(self, response, inst):
|
||||
if 'instance_type' in dict(inst):
|
||||
response['flavorId'] = inst['instance_type']
|
||||
response['flavorId'] = inst['instance_type']['flavorid']
|
||||
|
||||
|
||||
class ViewBuilderV11(ViewBuilder):
|
||||
@@ -134,7 +134,7 @@ class ViewBuilderV11(ViewBuilder):
|
||||
|
||||
def _build_flavor(self, response, inst):
|
||||
if "instance_type" in dict(inst):
|
||||
flavor_id = inst["instance_type"]
|
||||
flavor_id = inst["instance_type"]['flavorid']
|
||||
flavor_ref = self.flavor_builder.generate_href(flavor_id)
|
||||
response["flavorRef"] = flavor_ref
|
||||
|
||||
|
||||
@@ -13,12 +13,10 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import common
|
||||
|
||||
from nova import db
|
||||
from nova import flags
|
||||
from nova import log as logging
|
||||
from nova import wsgi
|
||||
from nova.api.openstack import common
|
||||
from nova.scheduler import api
|
||||
|
||||
|
||||
@@ -43,7 +41,7 @@ def _scrub_zone(zone):
|
||||
'deleted', 'deleted_at', 'updated_at'))
|
||||
|
||||
|
||||
class Controller(wsgi.Controller):
|
||||
class Controller(common.OpenstackController):
|
||||
|
||||
_serialization_metadata = {
|
||||
'application/xml': {
|
||||
|
||||
@@ -37,10 +37,14 @@ from nova.compute import instance_types
|
||||
from nova.scheduler import api as scheduler_api
|
||||
from nova.db import base
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
|
||||
LOG = logging.getLogger('nova.compute.api')
|
||||
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
flags.DECLARE('vncproxy_topic', 'nova.vnc')
|
||||
|
||||
|
||||
def generate_default_hostname(instance_id):
|
||||
"""Default function to generate a hostname given an instance reference."""
|
||||
return str(instance_id)
|
||||
@@ -110,8 +114,11 @@ class API(base.Base):
|
||||
"""Create the number of instances requested if quota and
|
||||
other arguments check out ok."""
|
||||
|
||||
type_data = instance_types.get_instance_type(instance_type)
|
||||
num_instances = quota.allowed_instances(context, max_count, type_data)
|
||||
if not instance_type:
|
||||
instance_type = instance_types.get_default_instance_type()
|
||||
|
||||
num_instances = quota.allowed_instances(context, max_count,
|
||||
instance_type)
|
||||
if num_instances < min_count:
|
||||
pid = context.project_id
|
||||
LOG.warn(_("Quota exceeeded for %(pid)s,"
|
||||
@@ -197,10 +204,10 @@ class API(base.Base):
|
||||
'user_id': context.user_id,
|
||||
'project_id': context.project_id,
|
||||
'launch_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()),
|
||||
'instance_type': instance_type,
|
||||
'memory_mb': type_data['memory_mb'],
|
||||
'vcpus': type_data['vcpus'],
|
||||
'local_gb': type_data['local_gb'],
|
||||
'instance_type_id': instance_type['id'],
|
||||
'memory_mb': instance_type['memory_mb'],
|
||||
'vcpus': instance_type['vcpus'],
|
||||
'local_gb': instance_type['local_gb'],
|
||||
'display_name': display_name,
|
||||
'display_description': display_description,
|
||||
'user_data': user_data or '',
|
||||
@@ -517,8 +524,7 @@ class API(base.Base):
|
||||
def resize(self, context, instance_id, flavor_id):
|
||||
"""Resize a running instance."""
|
||||
instance = self.db.instance_get(context, instance_id)
|
||||
current_instance_type = self.db.instance_type_get_by_name(
|
||||
context, instance['instance_type'])
|
||||
current_instance_type = instance['instance_type']
|
||||
|
||||
new_instance_type = self.db.instance_type_get_by_flavor_id(
|
||||
context, flavor_id)
|
||||
|
||||
@@ -74,8 +74,8 @@ def destroy(name):
|
||||
try:
|
||||
db.instance_type_destroy(context.get_admin_context(), name)
|
||||
except exception.NotFound:
|
||||
LOG.exception(_('Instance type %s not found for deletion' % name))
|
||||
raise exception.ApiError(_("Unknown instance type: %s" % name))
|
||||
LOG.exception(_('Instance type %s not found for deletion') % name)
|
||||
raise exception.ApiError(_("Unknown instance type: %s") % name)
|
||||
|
||||
|
||||
def purge(name):
|
||||
@@ -87,8 +87,8 @@ def purge(name):
|
||||
try:
|
||||
db.instance_type_purge(context.get_admin_context(), name)
|
||||
except exception.NotFound:
|
||||
LOG.exception(_('Instance type %s not found for purge' % name))
|
||||
raise exception.ApiError(_("Unknown instance type: %s" % name))
|
||||
LOG.exception(_('Instance type %s not found for purge') % name)
|
||||
raise exception.ApiError(_("Unknown instance type: %s") % name)
|
||||
|
||||
|
||||
def get_all_types(inactive=0):
|
||||
@@ -103,41 +103,43 @@ def get_all_flavors():
|
||||
return get_all_types(context.get_admin_context())
|
||||
|
||||
|
||||
def get_instance_type(name):
|
||||
def get_default_instance_type():
|
||||
name = FLAGS.default_instance_type
|
||||
try:
|
||||
return get_instance_type_by_name(name)
|
||||
except exception.DBError:
|
||||
raise exception.ApiError(_("Unknown instance type: %s") % name)
|
||||
|
||||
|
||||
def get_instance_type(id):
|
||||
"""Retrieves single instance type by id"""
|
||||
if id is None:
|
||||
return get_default_instance_type()
|
||||
try:
|
||||
ctxt = context.get_admin_context()
|
||||
return db.instance_type_get_by_id(ctxt, id)
|
||||
except exception.DBError:
|
||||
raise exception.ApiError(_("Unknown instance type: %s") % name)
|
||||
|
||||
|
||||
def get_instance_type_by_name(name):
|
||||
"""Retrieves single instance type by name"""
|
||||
if name is None:
|
||||
return FLAGS.default_instance_type
|
||||
return get_default_instance_type()
|
||||
try:
|
||||
ctxt = context.get_admin_context()
|
||||
inst_type = db.instance_type_get_by_name(ctxt, name)
|
||||
return inst_type
|
||||
return db.instance_type_get_by_name(ctxt, name)
|
||||
except exception.DBError:
|
||||
raise exception.ApiError(_("Unknown instance type: %s" % name))
|
||||
raise exception.ApiError(_("Unknown instance type: %s") % name)
|
||||
|
||||
|
||||
def get_by_type(instance_type):
|
||||
"""retrieve instance type name"""
|
||||
if instance_type is None:
|
||||
return FLAGS.default_instance_type
|
||||
|
||||
try:
|
||||
ctxt = context.get_admin_context()
|
||||
inst_type = db.instance_type_get_by_name(ctxt, instance_type)
|
||||
return inst_type['name']
|
||||
except exception.DBError, e:
|
||||
LOG.exception(_('DB error: %s' % e))
|
||||
raise exception.ApiError(_("Unknown instance type: %s" %\
|
||||
instance_type))
|
||||
|
||||
|
||||
def get_by_flavor_id(flavor_id):
|
||||
"""retrieve instance type's name by flavor_id"""
|
||||
def get_instance_type_by_flavor_id(flavor_id):
|
||||
"""retrieve instance type by flavor_id"""
|
||||
if flavor_id is None:
|
||||
return FLAGS.default_instance_type
|
||||
return get_default_instance_type()
|
||||
try:
|
||||
ctxt = context.get_admin_context()
|
||||
flavor = db.instance_type_get_by_flavor_id(ctxt, flavor_id)
|
||||
return flavor['name']
|
||||
return db.instance_type_get_by_flavor_id(ctxt, flavor_id)
|
||||
except exception.DBError, e:
|
||||
LOG.exception(_('DB error: %s' % e))
|
||||
raise exception.ApiError(_("Unknown flavor: %s" % flavor_id))
|
||||
LOG.exception(_('DB error: %s') % e)
|
||||
raise exception.ApiError(_("Unknown flavor: %s") % flavor_id)
|
||||
|
||||
@@ -215,9 +215,12 @@ def generate_x509_cert(user_id, project_id, bits=1024):
|
||||
|
||||
def _ensure_project_folder(project_id):
|
||||
if not os.path.exists(ca_path(project_id)):
|
||||
geninter_sh_path = os.path.join(os.path.dirname(__file__),
|
||||
'CA',
|
||||
'geninter.sh')
|
||||
start = os.getcwd()
|
||||
os.chdir(ca_folder())
|
||||
utils.execute('sh', 'geninter.sh', project_id,
|
||||
utils.execute('sh', geninter_sh_path, project_id,
|
||||
_project_cert_subject(project_id))
|
||||
os.chdir(start)
|
||||
|
||||
@@ -227,13 +230,16 @@ def generate_vpn_files(project_id):
|
||||
csr_fn = os.path.join(project_folder, "server.csr")
|
||||
crt_fn = os.path.join(project_folder, "server.crt")
|
||||
|
||||
genvpn_sh_path = os.path.join(os.path.dirname(__file__),
|
||||
'CA',
|
||||
'geninter.sh')
|
||||
if os.path.exists(crt_fn):
|
||||
return
|
||||
_ensure_project_folder(project_id)
|
||||
start = os.getcwd()
|
||||
os.chdir(ca_folder())
|
||||
# TODO(vish): the shell scripts could all be done in python
|
||||
utils.execute('sh', 'genvpn.sh',
|
||||
utils.execute('sh', genvpn_sh_path,
|
||||
project_id, _vpn_cert_subject(project_id))
|
||||
with open(csr_fn, "r") as csrfile:
|
||||
csr_text = csrfile.read()
|
||||
@@ -263,6 +269,8 @@ def _sign_csr(csr_text, ca_folder):
|
||||
LOG.debug(_("Flags path: %s"), ca_folder)
|
||||
start = os.getcwd()
|
||||
# Change working dir to CA
|
||||
if not os.path.exists(ca_folder):
|
||||
os.makedirs(ca_folder)
|
||||
os.chdir(ca_folder)
|
||||
utils.execute('openssl', 'ca', '-batch', '-out', outbound, '-config',
|
||||
'./openssl.cnf', '-infiles', inbound)
|
||||
|
||||
@@ -1124,6 +1124,11 @@ def instance_type_get_all(context, inactive=False):
|
||||
return IMPL.instance_type_get_all(context, inactive)
|
||||
|
||||
|
||||
def instance_type_get_by_id(context, id):
|
||||
"""Get instance type by id"""
|
||||
return IMPL.instance_type_get_by_id(context, id)
|
||||
|
||||
|
||||
def instance_type_get_by_name(context, name):
|
||||
"""Get instance type by name"""
|
||||
return IMPL.instance_type_get_by_name(context, name)
|
||||
|
||||
@@ -660,7 +660,9 @@ def fixed_ip_disassociate_all_by_timeout(_context, host, time):
|
||||
filter(models.FixedIp.instance_id != None).\
|
||||
filter_by(allocated=0).\
|
||||
update({'instance_id': None,
|
||||
'leased': 0})
|
||||
'leased': 0,
|
||||
'updated_at': datetime.datetime.utcnow()},
|
||||
synchronize_session='fetch')
|
||||
return result
|
||||
|
||||
|
||||
@@ -829,6 +831,7 @@ def instance_get(context, instance_id, session=None):
|
||||
options(joinedload('volumes')).\
|
||||
options(joinedload_all('fixed_ip.network')).\
|
||||
options(joinedload('metadata')).\
|
||||
options(joinedload('instance_type')).\
|
||||
filter_by(id=instance_id).\
|
||||
filter_by(deleted=can_read_deleted(context)).\
|
||||
first()
|
||||
@@ -838,6 +841,7 @@ def instance_get(context, instance_id, session=None):
|
||||
options(joinedload_all('security_groups.rules')).\
|
||||
options(joinedload('volumes')).\
|
||||
options(joinedload('metadata')).\
|
||||
options(joinedload('instance_type')).\
|
||||
filter_by(project_id=context.project_id).\
|
||||
filter_by(id=instance_id).\
|
||||
filter_by(deleted=False).\
|
||||
@@ -857,6 +861,7 @@ def instance_get_all(context):
|
||||
options(joinedload_all('fixed_ip.floating_ips')).\
|
||||
options(joinedload('security_groups')).\
|
||||
options(joinedload_all('fixed_ip.network')).\
|
||||
options(joinedload('instance_type')).\
|
||||
filter_by(deleted=can_read_deleted(context)).\
|
||||
all()
|
||||
|
||||
@@ -868,6 +873,7 @@ def instance_get_all_by_user(context, user_id):
|
||||
options(joinedload_all('fixed_ip.floating_ips')).\
|
||||
options(joinedload('security_groups')).\
|
||||
options(joinedload_all('fixed_ip.network')).\
|
||||
options(joinedload('instance_type')).\
|
||||
filter_by(deleted=can_read_deleted(context)).\
|
||||
filter_by(user_id=user_id).\
|
||||
all()
|
||||
@@ -880,6 +886,7 @@ def instance_get_all_by_host(context, host):
|
||||
options(joinedload_all('fixed_ip.floating_ips')).\
|
||||
options(joinedload('security_groups')).\
|
||||
options(joinedload_all('fixed_ip.network')).\
|
||||
options(joinedload('instance_type')).\
|
||||
filter_by(host=host).\
|
||||
filter_by(deleted=can_read_deleted(context)).\
|
||||
all()
|
||||
@@ -894,6 +901,7 @@ def instance_get_all_by_project(context, project_id):
|
||||
options(joinedload_all('fixed_ip.floating_ips')).\
|
||||
options(joinedload('security_groups')).\
|
||||
options(joinedload_all('fixed_ip.network')).\
|
||||
options(joinedload('instance_type')).\
|
||||
filter_by(project_id=project_id).\
|
||||
filter_by(deleted=can_read_deleted(context)).\
|
||||
all()
|
||||
@@ -908,6 +916,7 @@ def instance_get_all_by_reservation(context, reservation_id):
|
||||
options(joinedload_all('fixed_ip.floating_ips')).\
|
||||
options(joinedload('security_groups')).\
|
||||
options(joinedload_all('fixed_ip.network')).\
|
||||
options(joinedload('instance_type')).\
|
||||
filter_by(reservation_id=reservation_id).\
|
||||
filter_by(deleted=can_read_deleted(context)).\
|
||||
all()
|
||||
@@ -916,6 +925,7 @@ def instance_get_all_by_reservation(context, reservation_id):
|
||||
options(joinedload_all('fixed_ip.floating_ips')).\
|
||||
options(joinedload('security_groups')).\
|
||||
options(joinedload_all('fixed_ip.network')).\
|
||||
options(joinedload('instance_type')).\
|
||||
filter_by(project_id=context.project_id).\
|
||||
filter_by(reservation_id=reservation_id).\
|
||||
filter_by(deleted=False).\
|
||||
@@ -928,6 +938,7 @@ def instance_get_project_vpn(context, project_id):
|
||||
return session.query(models.Instance).\
|
||||
options(joinedload_all('fixed_ip.floating_ips')).\
|
||||
options(joinedload('security_groups')).\
|
||||
options(joinedload('instance_type')).\
|
||||
filter_by(project_id=project_id).\
|
||||
filter_by(image_id=FLAGS.vpn_image_id).\
|
||||
filter_by(deleted=can_read_deleted(context)).\
|
||||
@@ -2368,6 +2379,19 @@ def instance_type_get_all(context, inactive=False):
|
||||
raise exception.NotFound
|
||||
|
||||
|
||||
@require_context
|
||||
def instance_type_get_by_id(context, id):
|
||||
"""Returns a dict describing specific instance_type"""
|
||||
session = get_session()
|
||||
inst_type = session.query(models.InstanceTypes).\
|
||||
filter_by(id=id).\
|
||||
first()
|
||||
if not inst_type:
|
||||
raise exception.NotFound(_("No instance type with id %s") % id)
|
||||
else:
|
||||
return dict(inst_type)
|
||||
|
||||
|
||||
@require_context
|
||||
def instance_type_get_by_name(context, name):
|
||||
"""Returns a dict describing specific instance_type"""
|
||||
|
||||
@@ -0,0 +1,84 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2010 OpenStack LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from sqlalchemy import *
|
||||
from sqlalchemy.sql import text
|
||||
from migrate import *
|
||||
|
||||
#from nova import log as logging
|
||||
|
||||
|
||||
meta = MetaData()
|
||||
|
||||
|
||||
c_instance_type = Column('instance_type',
|
||||
String(length=255, convert_unicode=False,
|
||||
assert_unicode=None, unicode_error=None,
|
||||
_warn_on_bytestring=False),
|
||||
nullable=True)
|
||||
|
||||
c_instance_type_id = Column('instance_type_id',
|
||||
String(length=255, convert_unicode=False,
|
||||
assert_unicode=None, unicode_error=None,
|
||||
_warn_on_bytestring=False),
|
||||
nullable=True)
|
||||
|
||||
instance_types = Table('instance_types', meta,
|
||||
Column('id', Integer(), primary_key=True, nullable=False),
|
||||
Column('name',
|
||||
String(length=255, convert_unicode=False, assert_unicode=None,
|
||||
unicode_error=None, _warn_on_bytestring=False),
|
||||
unique=True))
|
||||
|
||||
|
||||
def upgrade(migrate_engine):
|
||||
# Upgrade operations go here. Don't create your own engine;
|
||||
# bind migrate_engine to your metadata
|
||||
meta.bind = migrate_engine
|
||||
|
||||
instances = Table('instances', meta, autoload=True,
|
||||
autoload_with=migrate_engine)
|
||||
|
||||
instances.create_column(c_instance_type_id)
|
||||
|
||||
recs = migrate_engine.execute(instance_types.select())
|
||||
for row in recs:
|
||||
type_id = row[0]
|
||||
type_name = row[1]
|
||||
migrate_engine.execute(instances.update()\
|
||||
.where(instances.c.instance_type == type_name)\
|
||||
.values(instance_type_id=type_id))
|
||||
|
||||
instances.c.instance_type.drop()
|
||||
|
||||
|
||||
def downgrade(migrate_engine):
|
||||
meta.bind = migrate_engine
|
||||
|
||||
instances = Table('instances', meta, autoload=True,
|
||||
autoload_with=migrate_engine)
|
||||
|
||||
instances.create_column(c_instance_type)
|
||||
|
||||
recs = migrate_engine.execute(instance_types.select())
|
||||
for row in recs:
|
||||
type_id = row[0]
|
||||
type_name = row[1]
|
||||
migrate_engine.execute(instances.update()\
|
||||
.where(instances.c.instance_type_id == type_id)\
|
||||
.values(instance_type=type_name))
|
||||
|
||||
instances.c.instance_type_id.drop()
|
||||
@@ -209,7 +209,7 @@ class Instance(BASE, NovaBase):
|
||||
hostname = Column(String(255))
|
||||
host = Column(String(255)) # , ForeignKey('hosts.id'))
|
||||
|
||||
instance_type = Column(String(255))
|
||||
instance_type_id = Column(String(255))
|
||||
|
||||
user_data = Column(Text)
|
||||
|
||||
@@ -268,6 +268,12 @@ class InstanceTypes(BASE, NovaBase):
|
||||
rxtx_quota = Column(Integer, nullable=False, default=0)
|
||||
rxtx_cap = Column(Integer, nullable=False, default=0)
|
||||
|
||||
instances = relationship(Instance,
|
||||
backref=backref('instance_type', uselist=False),
|
||||
foreign_keys=id,
|
||||
primaryjoin='and_(Instance.instance_type_id == '
|
||||
'InstanceTypes.id)')
|
||||
|
||||
|
||||
class Volume(BASE, NovaBase):
|
||||
"""Represents a block storage device that can be attached to a vm."""
|
||||
|
||||
@@ -66,6 +66,21 @@ class API(base.Base):
|
||||
if isinstance(fixed_ip, str) or isinstance(fixed_ip, unicode):
|
||||
fixed_ip = self.db.fixed_ip_get_by_address(context, fixed_ip)
|
||||
floating_ip = self.db.floating_ip_get_by_address(context, floating_ip)
|
||||
# Check if the floating ip address is allocated
|
||||
if floating_ip['project_id'] is None:
|
||||
raise exception.ApiError(_("Address (%s) is not allocated") %
|
||||
floating_ip['address'])
|
||||
# Check if the floating ip address is allocated to the same project
|
||||
if floating_ip['project_id'] != context.project_id:
|
||||
LOG.warn(_("Address (%(address)s) is not allocated to your "
|
||||
"project (%(project)s)"),
|
||||
{'address': floating_ip['address'],
|
||||
'project': context.project_id})
|
||||
raise exception.ApiError(_("Address (%(address)s) is not "
|
||||
"allocated to your project"
|
||||
"(%(project)s)") %
|
||||
{'address': floating_ip['address'],
|
||||
'project': context.project_id})
|
||||
# NOTE(vish): Perhaps we should just pass this on to compute and
|
||||
# let compute communicate with network.
|
||||
host = fixed_ip['network']['host']
|
||||
|
||||
@@ -74,7 +74,12 @@ class Connection(carrot_connection.BrokerConnection):
|
||||
"""Recreates the connection instance
|
||||
|
||||
This is necessary to recover from some network errors/disconnects"""
|
||||
try:
|
||||
del cls._instance
|
||||
except AttributeError, e:
|
||||
# The _instance stuff is for testing purposes. Usually we don't use
|
||||
# it. So don't freak out if it doesn't exist.
|
||||
pass
|
||||
return cls.instance()
|
||||
|
||||
|
||||
@@ -125,9 +130,9 @@ class Consumer(messaging.Consumer):
|
||||
# NOTE(vish): This is catching all errors because we really don't
|
||||
# want exceptions to be logged 10 times a second if some
|
||||
# persistent failure occurs.
|
||||
except Exception: # pylint: disable=W0703
|
||||
except Exception, e: # pylint: disable=W0703
|
||||
if not self.failed_connection:
|
||||
LOG.exception(_("Failed to fetch message from queue"))
|
||||
LOG.exception(_("Failed to fetch message from queue: %s" % e))
|
||||
self.failed_connection = True
|
||||
|
||||
def attach_to_eventlet(self):
|
||||
|
||||
@@ -34,5 +34,7 @@ class ChanceScheduler(driver.Scheduler):
|
||||
|
||||
hosts = self.hosts_up(context, topic)
|
||||
if not hosts:
|
||||
raise driver.NoValidHost(_("No hosts found"))
|
||||
raise driver.NoValidHost(_("Scheduler was unable to locate a host"
|
||||
" for this request. Is the appropriate"
|
||||
" service running?"))
|
||||
return hosts[int(random.random() * len(hosts))]
|
||||
|
||||
@@ -72,7 +72,9 @@ class SimpleScheduler(chance.ChanceScheduler):
|
||||
{'host': service['host'],
|
||||
'scheduled_at': now})
|
||||
return service['host']
|
||||
raise driver.NoValidHost(_("No hosts found"))
|
||||
raise driver.NoValidHost(_("Scheduler was unable to locate a host"
|
||||
" for this request. Is the appropriate"
|
||||
" service running?"))
|
||||
|
||||
def schedule_create_volume(self, context, volume_id, *_args, **_kwargs):
|
||||
"""Picks a host that is up and has the fewest volumes."""
|
||||
@@ -107,7 +109,9 @@ class SimpleScheduler(chance.ChanceScheduler):
|
||||
{'host': service['host'],
|
||||
'scheduled_at': now})
|
||||
return service['host']
|
||||
raise driver.NoValidHost(_("No hosts found"))
|
||||
raise driver.NoValidHost(_("Scheduler was unable to locate a host"
|
||||
" for this request. Is the appropriate"
|
||||
" service running?"))
|
||||
|
||||
def schedule_set_network_host(self, context, *_args, **_kwargs):
|
||||
"""Picks a host that is up and has the fewest networks."""
|
||||
@@ -119,4 +123,6 @@ class SimpleScheduler(chance.ChanceScheduler):
|
||||
raise driver.NoValidHost(_("All hosts have too many networks"))
|
||||
if self.service_is_up(service):
|
||||
return service['host']
|
||||
raise driver.NoValidHost(_("No hosts found"))
|
||||
raise driver.NoValidHost(_("Scheduler was unable to locate a host"
|
||||
" for this request. Is the appropriate"
|
||||
" service running?"))
|
||||
|
||||
@@ -52,5 +52,8 @@ class ZoneScheduler(driver.Scheduler):
|
||||
zone = _kwargs.get('availability_zone')
|
||||
hosts = self.hosts_up_with_zone(context, topic, zone)
|
||||
if not hosts:
|
||||
raise driver.NoValidHost(_("No hosts found"))
|
||||
raise driver.NoValidHost(_("Scheduler was unable to locate a host"
|
||||
" for this request. Is the appropriate"
|
||||
" service running?"))
|
||||
|
||||
return hosts[int(random.random() * len(hosts))]
|
||||
|
||||
@@ -53,13 +53,13 @@ class APITest(test.TestCase):
|
||||
#api.application = succeed
|
||||
api = self._wsgi_app(succeed)
|
||||
resp = Request.blank('/').get_response(api)
|
||||
self.assertFalse('computeFault' in resp.body, resp.body)
|
||||
self.assertFalse('cloudServersFault' in resp.body, resp.body)
|
||||
self.assertEqual(resp.status_int, 200, resp.body)
|
||||
|
||||
#api.application = raise_webob_exc
|
||||
api = self._wsgi_app(raise_webob_exc)
|
||||
resp = Request.blank('/').get_response(api)
|
||||
self.assertFalse('computeFault' in resp.body, resp.body)
|
||||
self.assertFalse('cloudServersFault' in resp.body, resp.body)
|
||||
self.assertEqual(resp.status_int, 404, resp.body)
|
||||
|
||||
#api.application = raise_api_fault
|
||||
@@ -71,11 +71,11 @@ class APITest(test.TestCase):
|
||||
#api.application = fail
|
||||
api = self._wsgi_app(fail)
|
||||
resp = Request.blank('/').get_response(api)
|
||||
self.assertTrue('{"computeFault' in resp.body, resp.body)
|
||||
self.assertTrue('{"cloudServersFault' in resp.body, resp.body)
|
||||
self.assertEqual(resp.status_int, 500, resp.body)
|
||||
|
||||
#api.application = fail
|
||||
api = self._wsgi_app(fail)
|
||||
resp = Request.blank('/.xml').get_response(api)
|
||||
self.assertTrue('<computeFault' in resp.body, resp.body)
|
||||
self.assertTrue('<cloudServersFault' in resp.body, resp.body)
|
||||
self.assertEqual(resp.status_int, 500, resp.body)
|
||||
|
||||
@@ -15,44 +15,127 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import json
|
||||
|
||||
import webob
|
||||
import webob.dec
|
||||
import webob.exc
|
||||
|
||||
from nova import test
|
||||
from nova.api.openstack import common
|
||||
from nova.api.openstack import faults
|
||||
|
||||
|
||||
class TestFaults(test.TestCase):
|
||||
"""Tests covering `nova.api.openstack.faults:Fault` class."""
|
||||
|
||||
def test_fault_parts(self):
|
||||
req = webob.Request.blank('/.xml')
|
||||
f = faults.Fault(webob.exc.HTTPBadRequest(explanation='scram'))
|
||||
resp = req.get_response(f)
|
||||
def _prepare_xml(self, xml_string):
|
||||
"""Remove characters from string which hinder XML equality testing."""
|
||||
xml_string = xml_string.replace(" ", "")
|
||||
xml_string = xml_string.replace("\n", "")
|
||||
xml_string = xml_string.replace("\t", "")
|
||||
return xml_string
|
||||
|
||||
first_two_words = resp.body.strip().split()[:2]
|
||||
self.assertEqual(first_two_words, ['<badRequest', 'code="400">'])
|
||||
body_without_spaces = ''.join(resp.body.split())
|
||||
self.assertTrue('<message>scram</message>' in body_without_spaces)
|
||||
def test_400_fault_xml(self):
|
||||
"""Test fault serialized to XML via file-extension and/or header."""
|
||||
requests = [
|
||||
webob.Request.blank('/.xml'),
|
||||
webob.Request.blank('/', headers={"Accept": "application/xml"}),
|
||||
]
|
||||
|
||||
def test_retry_header(self):
|
||||
req = webob.Request.blank('/.xml')
|
||||
exc = webob.exc.HTTPRequestEntityTooLarge(explanation='sorry',
|
||||
headers={'Retry-After': 4})
|
||||
f = faults.Fault(exc)
|
||||
resp = req.get_response(f)
|
||||
first_two_words = resp.body.strip().split()[:2]
|
||||
self.assertEqual(first_two_words, ['<overLimit', 'code="413">'])
|
||||
body_sans_spaces = ''.join(resp.body.split())
|
||||
self.assertTrue('<message>sorry</message>' in body_sans_spaces)
|
||||
self.assertTrue('<retryAfter>4</retryAfter>' in body_sans_spaces)
|
||||
self.assertEqual(resp.headers['Retry-After'], 4)
|
||||
for request in requests:
|
||||
fault = faults.Fault(webob.exc.HTTPBadRequest(explanation='scram'))
|
||||
response = request.get_response(fault)
|
||||
|
||||
expected = self._prepare_xml("""
|
||||
<badRequest code="400" xmlns="%s">
|
||||
<message>scram</message>
|
||||
</badRequest>
|
||||
""" % common.XML_NS_V10)
|
||||
actual = self._prepare_xml(response.body)
|
||||
|
||||
self.assertEqual(response.content_type, "application/xml")
|
||||
self.assertEqual(expected, actual)
|
||||
|
||||
def test_400_fault_json(self):
|
||||
"""Test fault serialized to JSON via file-extension and/or header."""
|
||||
requests = [
|
||||
webob.Request.blank('/.json'),
|
||||
webob.Request.blank('/', headers={"Accept": "application/json"}),
|
||||
]
|
||||
|
||||
for request in requests:
|
||||
fault = faults.Fault(webob.exc.HTTPBadRequest(explanation='scram'))
|
||||
response = request.get_response(fault)
|
||||
|
||||
expected = {
|
||||
"badRequest": {
|
||||
"message": "scram",
|
||||
"code": 400,
|
||||
},
|
||||
}
|
||||
actual = json.loads(response.body)
|
||||
|
||||
self.assertEqual(response.content_type, "application/json")
|
||||
self.assertEqual(expected, actual)
|
||||
|
||||
def test_413_fault_xml(self):
|
||||
requests = [
|
||||
webob.Request.blank('/.xml'),
|
||||
webob.Request.blank('/', headers={"Accept": "application/xml"}),
|
||||
]
|
||||
|
||||
for request in requests:
|
||||
exc = webob.exc.HTTPRequestEntityTooLarge
|
||||
fault = faults.Fault(exc(explanation='sorry',
|
||||
headers={'Retry-After': 4}))
|
||||
response = request.get_response(fault)
|
||||
|
||||
expected = self._prepare_xml("""
|
||||
<overLimit code="413" xmlns="%s">
|
||||
<message>sorry</message>
|
||||
<retryAfter>4</retryAfter>
|
||||
</overLimit>
|
||||
""" % common.XML_NS_V10)
|
||||
actual = self._prepare_xml(response.body)
|
||||
|
||||
self.assertEqual(expected, actual)
|
||||
self.assertEqual(response.content_type, "application/xml")
|
||||
self.assertEqual(response.headers['Retry-After'], 4)
|
||||
|
||||
def test_413_fault_json(self):
|
||||
"""Test fault serialized to JSON via file-extension and/or header."""
|
||||
requests = [
|
||||
webob.Request.blank('/.json'),
|
||||
webob.Request.blank('/', headers={"Accept": "application/json"}),
|
||||
]
|
||||
|
||||
for request in requests:
|
||||
exc = webob.exc.HTTPRequestEntityTooLarge
|
||||
fault = faults.Fault(exc(explanation='sorry',
|
||||
headers={'Retry-After': 4}))
|
||||
response = request.get_response(fault)
|
||||
|
||||
expected = {
|
||||
"overLimit": {
|
||||
"message": "sorry",
|
||||
"code": 413,
|
||||
"retryAfter": 4,
|
||||
},
|
||||
}
|
||||
actual = json.loads(response.body)
|
||||
|
||||
self.assertEqual(response.content_type, "application/json")
|
||||
self.assertEqual(expected, actual)
|
||||
|
||||
def test_raise(self):
|
||||
"""Ensure the ability to raise `Fault`s in WSGI-ified methods."""
|
||||
@webob.dec.wsgify
|
||||
def raiser(req):
|
||||
raise faults.Fault(webob.exc.HTTPNotFound(explanation='whut?'))
|
||||
|
||||
req = webob.Request.blank('/.xml')
|
||||
resp = req.get_response(raiser)
|
||||
self.assertEqual(resp.content_type, "application/xml")
|
||||
self.assertEqual(resp.status_int, 404)
|
||||
self.assertTrue('whut?' in resp.body)
|
||||
|
||||
@@ -263,7 +263,8 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
|
||||
{'id': 124, 'name': 'queued backup'},
|
||||
{'id': 125, 'name': 'saving backup'},
|
||||
{'id': 126, 'name': 'active backup'},
|
||||
{'id': 127, 'name': 'killed backup'}]
|
||||
{'id': 127, 'name': 'killed backup'},
|
||||
{'id': 129, 'name': None}]
|
||||
|
||||
self.assertDictListMatch(response_list, expected)
|
||||
|
||||
@@ -334,7 +335,27 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
|
||||
name="public image"
|
||||
updated="%(expected_now)s"
|
||||
created="%(expected_now)s"
|
||||
status="ACTIVE" />
|
||||
status="ACTIVE"
|
||||
xmlns="http://docs.rackspacecloud.com/servers/api/v1.0" />
|
||||
""" % (locals()))
|
||||
|
||||
self.assertEqual(expected_image.toxml(), actual_image.toxml())
|
||||
|
||||
def test_get_image_xml_no_name(self):
|
||||
request = webob.Request.blank('/v1.0/images/129')
|
||||
request.accept = "application/xml"
|
||||
response = request.get_response(fakes.wsgi_app())
|
||||
|
||||
actual_image = minidom.parseString(response.body.replace(" ", ""))
|
||||
|
||||
expected_now = self.NOW_API_FORMAT
|
||||
expected_image = minidom.parseString("""
|
||||
<image id="129"
|
||||
name="None"
|
||||
updated="%(expected_now)s"
|
||||
created="%(expected_now)s"
|
||||
status="ACTIVE"
|
||||
xmlns="http://docs.rackspacecloud.com/servers/api/v1.0" />
|
||||
""" % (locals()))
|
||||
|
||||
self.assertEqual(expected_image.toxml(), actual_image.toxml())
|
||||
@@ -353,7 +374,8 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
|
||||
name="public image"
|
||||
updated="%(expected_now)s"
|
||||
created="%(expected_now)s"
|
||||
status="ACTIVE">
|
||||
status="ACTIVE"
|
||||
xmlns="http://docs.openstack.org/compute/api/v1.1">
|
||||
<links>
|
||||
<link href="%(expected_href)s" rel="self"/>
|
||||
<link href="%(expected_href)s" rel="bookmark"
|
||||
@@ -389,7 +411,8 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
|
||||
self.assertEqual(404, response.status_int)
|
||||
|
||||
expected = minidom.parseString("""
|
||||
<itemNotFound code="404">
|
||||
<itemNotFound code="404"
|
||||
xmlns="http://docs.rackspacecloud.com/servers/api/v1.0">
|
||||
<message>
|
||||
Image not found.
|
||||
</message>
|
||||
@@ -422,8 +445,11 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
|
||||
response = request.get_response(fakes.wsgi_app())
|
||||
self.assertEqual(404, response.status_int)
|
||||
|
||||
# NOTE(justinsb): I believe this should still use the v1.0 XSD,
|
||||
# because the element hasn't changed definition
|
||||
expected = minidom.parseString("""
|
||||
<itemNotFound code="404">
|
||||
<itemNotFound code="404"
|
||||
xmlns="http://docs.rackspacecloud.com/servers/api/v1.0">
|
||||
<message>
|
||||
Image not found.
|
||||
</message>
|
||||
@@ -516,6 +542,13 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
|
||||
'updated': self.NOW_API_FORMAT,
|
||||
'created': self.NOW_API_FORMAT,
|
||||
'status': 'FAILED',
|
||||
},
|
||||
{
|
||||
'id': 129,
|
||||
'name': None,
|
||||
'updated': self.NOW_API_FORMAT,
|
||||
'created': self.NOW_API_FORMAT,
|
||||
'status': 'ACTIVE',
|
||||
}]
|
||||
|
||||
self.assertDictListMatch(expected, response_list)
|
||||
@@ -635,7 +668,29 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
|
||||
"type": "application/xml",
|
||||
"href": "http://localhost/v1.1/images/127",
|
||||
}],
|
||||
}]
|
||||
},
|
||||
{
|
||||
'id': 129,
|
||||
'name': None,
|
||||
'updated': self.NOW_API_FORMAT,
|
||||
'created': self.NOW_API_FORMAT,
|
||||
'status': 'ACTIVE',
|
||||
"links": [{
|
||||
"rel": "self",
|
||||
"href": "http://localhost/v1.1/images/129",
|
||||
},
|
||||
{
|
||||
"rel": "bookmark",
|
||||
"type": "application/json",
|
||||
"href": "http://localhost/v1.1/images/129",
|
||||
},
|
||||
{
|
||||
"rel": "bookmark",
|
||||
"type": "application/xml",
|
||||
"href": "http://localhost/v1.1/images/129",
|
||||
}],
|
||||
},
|
||||
]
|
||||
|
||||
self.assertDictListMatch(expected, response_list)
|
||||
|
||||
@@ -694,4 +749,9 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
|
||||
status='active', properties=other_backup_properties)
|
||||
image_id += 1
|
||||
|
||||
# Image without a name
|
||||
add_fixture(id=image_id, is_public=True, status='active',
|
||||
properties={})
|
||||
image_id += 1
|
||||
|
||||
return fixtures
|
||||
|
||||
@@ -136,10 +136,17 @@ class LimitsControllerTest(BaseLimitTestSuite):
|
||||
request = self._get_index_request("application/xml")
|
||||
response = request.get_response(self.controller)
|
||||
|
||||
expected = "<limits><rate/><absolute/></limits>"
|
||||
body = response.body.replace("\n", "").replace(" ", "")
|
||||
expected = parseString("""
|
||||
<limits
|
||||
xmlns="http://docs.rackspacecloud.com/servers/api/v1.0">
|
||||
<rate/>
|
||||
<absolute/>
|
||||
</limits>
|
||||
""".replace(" ", ""))
|
||||
|
||||
self.assertEqual(expected, body)
|
||||
body = parseString(response.body.replace(" ", ""))
|
||||
|
||||
self.assertEqual(expected.toxml(), body.toxml())
|
||||
|
||||
def test_index_xml(self):
|
||||
"""Test getting limit details in XML."""
|
||||
@@ -148,7 +155,8 @@ class LimitsControllerTest(BaseLimitTestSuite):
|
||||
response = request.get_response(self.controller)
|
||||
|
||||
expected = parseString("""
|
||||
<limits>
|
||||
<limits
|
||||
xmlns="http://docs.rackspacecloud.com/servers/api/v1.0">
|
||||
<rate>
|
||||
<limit URI="*" regex=".*" remaining="10" resetTime="0"
|
||||
unit="MINUTE" value="10" verb="GET"/>
|
||||
|
||||
@@ -32,6 +32,7 @@ from nova import test
|
||||
import nova.api.openstack
|
||||
from nova.api.openstack import servers
|
||||
import nova.compute.api
|
||||
from nova.compute import instance_types
|
||||
import nova.db.api
|
||||
from nova.db.sqlalchemy.models import Instance
|
||||
from nova.db.sqlalchemy.models import InstanceMetadata
|
||||
@@ -71,13 +72,19 @@ def instance_address(context, instance_id):
|
||||
return None
|
||||
|
||||
|
||||
def stub_instance(id, user_id=1, private_address=None, public_addresses=None):
|
||||
def stub_instance(id, user_id=1, private_address=None, public_addresses=None,
|
||||
host=None):
|
||||
metadata = []
|
||||
metadata.append(InstanceMetadata(key='seq', value=id))
|
||||
|
||||
inst_type = instance_types.get_instance_type_by_flavor_id(1)
|
||||
|
||||
if public_addresses == None:
|
||||
public_addresses = list()
|
||||
|
||||
if host != None:
|
||||
host = str(host)
|
||||
|
||||
instance = {
|
||||
"id": id,
|
||||
"admin_pass": "",
|
||||
@@ -95,8 +102,8 @@ def stub_instance(id, user_id=1, private_address=None, public_addresses=None):
|
||||
"vcpus": 0,
|
||||
"local_gb": 0,
|
||||
"hostname": "",
|
||||
"host": None,
|
||||
"instance_type": "1",
|
||||
"host": host,
|
||||
"instance_type": dict(inst_type),
|
||||
"user_data": "",
|
||||
"reservation_id": "",
|
||||
"mac_address": "",
|
||||
@@ -377,7 +384,6 @@ class ServersTest(test.TestCase):
|
||||
res = req.get_response(fakes.wsgi_app())
|
||||
|
||||
server = json.loads(res.body)['server']
|
||||
self.assertEqual('serv', server['adminPass'][:4])
|
||||
self.assertEqual(16, len(server['adminPass']))
|
||||
self.assertEqual('server_test', server['name'])
|
||||
self.assertEqual(1, server['id'])
|
||||
@@ -392,6 +398,74 @@ class ServersTest(test.TestCase):
|
||||
fakes.stub_out_key_pair_funcs(self.stubs, have_key_pair=False)
|
||||
self._test_create_instance_helper()
|
||||
|
||||
def test_create_instance_no_name(self):
|
||||
self._setup_for_create_instance()
|
||||
|
||||
body = {
|
||||
'server': {
|
||||
'imageId': 3,
|
||||
'flavorId': 1,
|
||||
'metadata': {
|
||||
'hello': 'world',
|
||||
'open': 'stack',
|
||||
},
|
||||
'personality': {},
|
||||
},
|
||||
}
|
||||
|
||||
req = webob.Request.blank('/v1.0/servers')
|
||||
req.method = 'POST'
|
||||
req.body = json.dumps(body)
|
||||
req.headers["content-type"] = "application/json"
|
||||
res = req.get_response(fakes.wsgi_app())
|
||||
self.assertEqual(res.status_int, 400)
|
||||
|
||||
def test_create_instance_nonstring_name(self):
|
||||
self._setup_for_create_instance()
|
||||
|
||||
body = {
|
||||
'server': {
|
||||
'name': 12,
|
||||
'imageId': 3,
|
||||
'flavorId': 1,
|
||||
'metadata': {
|
||||
'hello': 'world',
|
||||
'open': 'stack',
|
||||
},
|
||||
'personality': {},
|
||||
},
|
||||
}
|
||||
|
||||
req = webob.Request.blank('/v1.0/servers')
|
||||
req.method = 'POST'
|
||||
req.body = json.dumps(body)
|
||||
req.headers["content-type"] = "application/json"
|
||||
res = req.get_response(fakes.wsgi_app())
|
||||
self.assertEqual(res.status_int, 400)
|
||||
|
||||
def test_create_instance_whitespace_name(self):
|
||||
self._setup_for_create_instance()
|
||||
|
||||
body = {
|
||||
'server': {
|
||||
'name': ' ',
|
||||
'imageId': 3,
|
||||
'flavorId': 1,
|
||||
'metadata': {
|
||||
'hello': 'world',
|
||||
'open': 'stack',
|
||||
},
|
||||
'personality': {},
|
||||
},
|
||||
}
|
||||
|
||||
req = webob.Request.blank('/v1.0/servers')
|
||||
req.method = 'POST'
|
||||
req.body = json.dumps(body)
|
||||
req.headers["content-type"] = "application/json"
|
||||
res = req.get_response(fakes.wsgi_app())
|
||||
self.assertEqual(res.status_int, 400)
|
||||
|
||||
def test_create_instance_v11(self):
|
||||
self._setup_for_create_instance()
|
||||
|
||||
@@ -418,7 +492,6 @@ class ServersTest(test.TestCase):
|
||||
res = req.get_response(fakes.wsgi_app())
|
||||
|
||||
server = json.loads(res.body)['server']
|
||||
self.assertEqual('serv', server['adminPass'][:4])
|
||||
self.assertEqual(16, len(server['adminPass']))
|
||||
self.assertEqual('server_test', server['name'])
|
||||
self.assertEqual(1, server['id'])
|
||||
@@ -448,39 +521,82 @@ class ServersTest(test.TestCase):
|
||||
res = req.get_response(fakes.wsgi_app())
|
||||
self.assertEqual(res.status_int, 422)
|
||||
|
||||
def test_update_bad_params(self):
|
||||
def test_update_nonstring_name(self):
|
||||
""" Confirm that update is filtering params """
|
||||
inst_dict = dict(cat='leopard', name='server_test', adminPass='bacon')
|
||||
inst_dict = dict(name=12, adminPass='bacon')
|
||||
self.body = json.dumps(dict(server=inst_dict))
|
||||
|
||||
def server_update(context, id, params):
|
||||
self.update_called = True
|
||||
filtered_dict = dict(name='server_test', admin_pass='bacon')
|
||||
self.assertEqual(params, filtered_dict)
|
||||
|
||||
self.stubs.Set(nova.db.api, 'instance_update',
|
||||
server_update)
|
||||
|
||||
req = webob.Request.blank('/v1.0/servers/1')
|
||||
req.method = 'PUT'
|
||||
req.content_type = "application/json"
|
||||
req.body = self.body
|
||||
req.get_response(fakes.wsgi_app())
|
||||
res = req.get_response(fakes.wsgi_app())
|
||||
self.assertEqual(res.status_int, 400)
|
||||
|
||||
def test_update_server(self):
|
||||
def test_update_whitespace_name(self):
|
||||
""" Confirm that update is filtering params """
|
||||
inst_dict = dict(name=' ', adminPass='bacon')
|
||||
self.body = json.dumps(dict(server=inst_dict))
|
||||
|
||||
req = webob.Request.blank('/v1.0/servers/1')
|
||||
req.method = 'PUT'
|
||||
req.content_type = "application/json"
|
||||
req.body = self.body
|
||||
res = req.get_response(fakes.wsgi_app())
|
||||
self.assertEqual(res.status_int, 400)
|
||||
|
||||
def test_update_null_name(self):
|
||||
""" Confirm that update is filtering params """
|
||||
inst_dict = dict(name='', adminPass='bacon')
|
||||
self.body = json.dumps(dict(server=inst_dict))
|
||||
|
||||
req = webob.Request.blank('/v1.0/servers/1')
|
||||
req.method = 'PUT'
|
||||
req.content_type = "application/json"
|
||||
req.body = self.body
|
||||
res = req.get_response(fakes.wsgi_app())
|
||||
self.assertEqual(res.status_int, 400)
|
||||
|
||||
def test_update_server_v10(self):
|
||||
inst_dict = dict(name='server_test', adminPass='bacon')
|
||||
self.body = json.dumps(dict(server=inst_dict))
|
||||
|
||||
def server_update(context, id, params):
|
||||
filtered_dict = dict(name='server_test', admin_pass='bacon')
|
||||
filtered_dict = dict(
|
||||
display_name='server_test',
|
||||
admin_pass='bacon',
|
||||
)
|
||||
self.assertEqual(params, filtered_dict)
|
||||
return filtered_dict
|
||||
|
||||
self.stubs.Set(nova.db.api, 'instance_update',
|
||||
server_update)
|
||||
|
||||
req = webob.Request.blank('/v1.0/servers/1')
|
||||
req.method = 'PUT'
|
||||
req.content_type = "application/json"
|
||||
req.body = self.body
|
||||
req.get_response(fakes.wsgi_app())
|
||||
res = req.get_response(fakes.wsgi_app())
|
||||
self.assertEqual(res.status_int, 204)
|
||||
|
||||
def test_update_server_adminPass_ignored_v11(self):
|
||||
inst_dict = dict(name='server_test', adminPass='bacon')
|
||||
self.body = json.dumps(dict(server=inst_dict))
|
||||
|
||||
def server_update(context, id, params):
|
||||
filtered_dict = dict(display_name='server_test')
|
||||
self.assertEqual(params, filtered_dict)
|
||||
return filtered_dict
|
||||
|
||||
self.stubs.Set(nova.db.api, 'instance_update',
|
||||
server_update)
|
||||
|
||||
req = webob.Request.blank('/v1.1/servers/1')
|
||||
req.method = 'PUT'
|
||||
req.content_type = "application/json"
|
||||
req.body = self.body
|
||||
res = req.get_response(fakes.wsgi_app())
|
||||
self.assertEqual(res.status_int, 204)
|
||||
|
||||
def test_create_backup_schedules(self):
|
||||
req = webob.Request.blank('/v1.0/servers/1/backup_schedule')
|
||||
@@ -519,7 +635,8 @@ class ServersTest(test.TestCase):
|
||||
self.assertEqual(s['hostId'], '')
|
||||
self.assertEqual(s['name'], 'server%d' % i)
|
||||
self.assertEqual(s['imageId'], '10')
|
||||
self.assertEqual(s['flavorId'], '1')
|
||||
self.assertEqual(s['flavorId'], 1)
|
||||
self.assertEqual(s['status'], 'BUILD')
|
||||
self.assertEqual(s['metadata']['seq'], i)
|
||||
|
||||
def test_get_all_server_details_v1_1(self):
|
||||
@@ -533,6 +650,7 @@ class ServersTest(test.TestCase):
|
||||
self.assertEqual(s['name'], 'server%d' % i)
|
||||
self.assertEqual(s['imageRef'], 'http://localhost/v1.1/images/10')
|
||||
self.assertEqual(s['flavorRef'], 'http://localhost/v1.1/flavors/1')
|
||||
self.assertEqual(s['status'], 'BUILD')
|
||||
self.assertEqual(s['metadata']['seq'], i)
|
||||
|
||||
def test_get_all_server_details_with_host(self):
|
||||
@@ -543,12 +661,8 @@ class ServersTest(test.TestCase):
|
||||
instances - 2 on one host and 3 on another.
|
||||
'''
|
||||
|
||||
def stub_instance(id, user_id=1):
|
||||
return Instance(id=id, state=0, image_id=10, user_id=user_id,
|
||||
display_name='server%s' % id, host='host%s' % (id % 2))
|
||||
|
||||
def return_servers_with_host(context, user_id=1):
|
||||
return [stub_instance(i) for i in xrange(5)]
|
||||
return [stub_instance(i, 1, None, None, i % 2) for i in xrange(5)]
|
||||
|
||||
self.stubs.Set(nova.db.api, 'instance_get_all_by_user',
|
||||
return_servers_with_host)
|
||||
@@ -566,7 +680,8 @@ class ServersTest(test.TestCase):
|
||||
self.assertEqual(s['id'], i)
|
||||
self.assertEqual(s['hostId'], host_ids[i % 2])
|
||||
self.assertEqual(s['name'], 'server%d' % i)
|
||||
self.assertEqual(s['imageId'], 10)
|
||||
self.assertEqual(s['imageId'], '10')
|
||||
self.assertEqual(s['flavorId'], 1)
|
||||
|
||||
def test_server_pause(self):
|
||||
FLAGS.allow_admin_api = True
|
||||
@@ -653,6 +768,74 @@ class ServersTest(test.TestCase):
|
||||
res = req.get_response(fakes.wsgi_app())
|
||||
self.assertEqual(res.status_int, 404)
|
||||
|
||||
def test_server_change_password(self):
|
||||
body = {'changePassword': {'adminPass': '1234pass'}}
|
||||
req = webob.Request.blank('/v1.0/servers/1/action')
|
||||
req.method = 'POST'
|
||||
req.content_type = 'application/json'
|
||||
req.body = json.dumps(body)
|
||||
res = req.get_response(fakes.wsgi_app())
|
||||
self.assertEqual(res.status_int, 501)
|
||||
|
||||
def test_server_change_password_v1_1(self):
|
||||
|
||||
class MockSetAdminPassword(object):
|
||||
def __init__(self):
|
||||
self.instance_id = None
|
||||
self.password = None
|
||||
|
||||
def __call__(self, context, instance_id, password):
|
||||
self.instance_id = instance_id
|
||||
self.password = password
|
||||
|
||||
mock_method = MockSetAdminPassword()
|
||||
self.stubs.Set(nova.compute.api.API, 'set_admin_password', mock_method)
|
||||
body = {'changePassword': {'adminPass': '1234pass'}}
|
||||
req = webob.Request.blank('/v1.1/servers/1/action')
|
||||
req.method = 'POST'
|
||||
req.content_type = 'application/json'
|
||||
req.body = json.dumps(body)
|
||||
res = req.get_response(fakes.wsgi_app())
|
||||
self.assertEqual(res.status_int, 202)
|
||||
self.assertEqual(mock_method.instance_id, '1')
|
||||
self.assertEqual(mock_method.password, '1234pass')
|
||||
|
||||
def test_server_change_password_bad_request_v1_1(self):
|
||||
body = {'changePassword': {'pass': '12345'}}
|
||||
req = webob.Request.blank('/v1.1/servers/1/action')
|
||||
req.method = 'POST'
|
||||
req.content_type = 'application/json'
|
||||
req.body = json.dumps(body)
|
||||
res = req.get_response(fakes.wsgi_app())
|
||||
self.assertEqual(res.status_int, 400)
|
||||
|
||||
def test_server_change_password_empty_string_v1_1(self):
|
||||
body = {'changePassword': {'adminPass': ''}}
|
||||
req = webob.Request.blank('/v1.1/servers/1/action')
|
||||
req.method = 'POST'
|
||||
req.content_type = 'application/json'
|
||||
req.body = json.dumps(body)
|
||||
res = req.get_response(fakes.wsgi_app())
|
||||
self.assertEqual(res.status_int, 400)
|
||||
|
||||
def test_server_change_password_none_v1_1(self):
|
||||
body = {'changePassword': {'adminPass': None}}
|
||||
req = webob.Request.blank('/v1.1/servers/1/action')
|
||||
req.method = 'POST'
|
||||
req.content_type = 'application/json'
|
||||
req.body = json.dumps(body)
|
||||
res = req.get_response(fakes.wsgi_app())
|
||||
self.assertEqual(res.status_int, 400)
|
||||
|
||||
def test_server_change_password_not_a_string_v1_1(self):
|
||||
body = {'changePassword': {'adminPass': 1234}}
|
||||
req = webob.Request.blank('/v1.1/servers/1/action')
|
||||
req.method = 'POST'
|
||||
req.content_type = 'application/json'
|
||||
req.body = json.dumps(body)
|
||||
res = req.get_response(fakes.wsgi_app())
|
||||
self.assertEqual(res.status_int, 400)
|
||||
|
||||
def test_server_reboot(self):
|
||||
body = dict(server=dict(
|
||||
name='server_test', imageId=2, flavorId=2, metadata={},
|
||||
@@ -738,7 +921,7 @@ class ServersTest(test.TestCase):
|
||||
fake_migration_get)
|
||||
res = req.get_response(fakes.wsgi_app())
|
||||
body = json.loads(res.body)
|
||||
self.assertEqual(body['server']['status'], 'resize-confirm')
|
||||
self.assertEqual(body['server']['status'], 'RESIZE-CONFIRM')
|
||||
|
||||
def test_confirm_resize_server(self):
|
||||
req = self.webreq('/1/action', 'POST', dict(confirmResize=None))
|
||||
@@ -1315,7 +1498,7 @@ class TestServerInstanceCreation(test.TestCase):
|
||||
self.assertEquals(response.status_int, 200)
|
||||
response = json.loads(response.body)
|
||||
self.assertTrue('adminPass' in response['server'])
|
||||
self.assertTrue(response['server']['adminPass'].startswith('fake'))
|
||||
self.assertEqual(16, len(response['server']['adminPass']))
|
||||
|
||||
def test_create_instance_admin_pass_xml(self):
|
||||
request, response, dummy = \
|
||||
@@ -1324,7 +1507,7 @@ class TestServerInstanceCreation(test.TestCase):
|
||||
dom = minidom.parseString(response.body)
|
||||
server = dom.childNodes[0]
|
||||
self.assertEquals(server.nodeName, 'server')
|
||||
self.assertTrue(server.getAttribute('adminPass').startswith('fake'))
|
||||
self.assertEqual(16, len(server.getAttribute('adminPass')))
|
||||
|
||||
|
||||
class TestGetKernelRamdiskFromImage(test.TestCase):
|
||||
|
||||
@@ -34,8 +34,10 @@ class VersionsTest(test.TestCase):
|
||||
|
||||
def test_get_version_list(self):
|
||||
req = webob.Request.blank('/')
|
||||
req.accept = "application/json"
|
||||
res = req.get_response(fakes.wsgi_app())
|
||||
self.assertEqual(res.status_int, 200)
|
||||
self.assertEqual(res.content_type, "application/json")
|
||||
versions = json.loads(res.body)["versions"]
|
||||
expected = [
|
||||
{
|
||||
@@ -61,6 +63,30 @@ class VersionsTest(test.TestCase):
|
||||
]
|
||||
self.assertEqual(versions, expected)
|
||||
|
||||
def test_get_version_list_xml(self):
|
||||
req = webob.Request.blank('/')
|
||||
req.accept = "application/xml"
|
||||
res = req.get_response(fakes.wsgi_app())
|
||||
self.assertEqual(res.status_int, 200)
|
||||
self.assertEqual(res.content_type, "application/xml")
|
||||
|
||||
expected = """<versions>
|
||||
<version id="v1.1" status="CURRENT">
|
||||
<links>
|
||||
<link href="http://localhost/v1.1" rel="self"/>
|
||||
</links>
|
||||
</version>
|
||||
<version id="v1.0" status="DEPRECATED">
|
||||
<links>
|
||||
<link href="http://localhost/v1.0" rel="self"/>
|
||||
</links>
|
||||
</version>
|
||||
</versions>""".replace(" ", "").replace("\n", "")
|
||||
|
||||
actual = res.body.replace(" ", "").replace("\n", "")
|
||||
|
||||
self.assertEqual(expected, actual)
|
||||
|
||||
def test_view_builder(self):
|
||||
base_url = "http://example.org/"
|
||||
|
||||
|
||||
@@ -28,29 +28,34 @@ def stub_out_db_instance_api(stubs, injected=True):
|
||||
"""Stubs out the db API for creating Instances."""
|
||||
|
||||
INSTANCE_TYPES = {
|
||||
'm1.tiny': dict(memory_mb=512,
|
||||
'm1.tiny': dict(id=2,
|
||||
memory_mb=512,
|
||||
vcpus=1,
|
||||
local_gb=0,
|
||||
flavorid=1,
|
||||
rxtx_cap=1),
|
||||
'm1.small': dict(memory_mb=2048,
|
||||
'm1.small': dict(id=5,
|
||||
memory_mb=2048,
|
||||
vcpus=1,
|
||||
local_gb=20,
|
||||
flavorid=2,
|
||||
rxtx_cap=2),
|
||||
'm1.medium':
|
||||
dict(memory_mb=4096,
|
||||
dict(id=1,
|
||||
memory_mb=4096,
|
||||
vcpus=2,
|
||||
local_gb=40,
|
||||
flavorid=3,
|
||||
rxtx_cap=3),
|
||||
'm1.large': dict(memory_mb=8192,
|
||||
'm1.large': dict(id=3,
|
||||
memory_mb=8192,
|
||||
vcpus=4,
|
||||
local_gb=80,
|
||||
flavorid=4,
|
||||
rxtx_cap=4),
|
||||
'm1.xlarge':
|
||||
dict(memory_mb=16384,
|
||||
dict(id=4,
|
||||
memory_mb=16384,
|
||||
vcpus=8,
|
||||
local_gb=160,
|
||||
flavorid=5,
|
||||
@@ -107,6 +112,12 @@ def stub_out_db_instance_api(stubs, injected=True):
|
||||
def fake_instance_type_get_by_name(context, name):
|
||||
return INSTANCE_TYPES[name]
|
||||
|
||||
def fake_instance_type_get_by_id(context, id):
|
||||
for name, inst_type in INSTANCE_TYPES.iteritems():
|
||||
if str(inst_type['id']) == str(id):
|
||||
return inst_type
|
||||
return None
|
||||
|
||||
def fake_network_get_by_instance(context, instance_id):
|
||||
# Even instance numbers are on vlan networks
|
||||
if instance_id % 2 == 0:
|
||||
@@ -136,6 +147,7 @@ def stub_out_db_instance_api(stubs, injected=True):
|
||||
fake_network_get_all_by_instance)
|
||||
stubs.Set(db, 'instance_type_get_all', fake_instance_type_get_all)
|
||||
stubs.Set(db, 'instance_type_get_by_name', fake_instance_type_get_by_name)
|
||||
stubs.Set(db, 'instance_type_get_by_id', fake_instance_type_get_by_id)
|
||||
stubs.Set(db, 'instance_get_fixed_address',
|
||||
fake_instance_get_fixed_address)
|
||||
stubs.Set(db, 'instance_get_fixed_address_v6',
|
||||
|
||||
56
nova/tests/integrated/test_xml.py
Normal file
56
nova/tests/integrated/test_xml.py
Normal file
@@ -0,0 +1,56 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2011 Justin Santa Barbara
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from nova import flags
|
||||
from nova.log import logging
|
||||
from nova.tests.integrated import integrated_helpers
|
||||
from nova.api.openstack import common
|
||||
|
||||
|
||||
LOG = logging.getLogger('nova.tests.integrated')
|
||||
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
FLAGS.verbose = True
|
||||
|
||||
|
||||
class XmlTests(integrated_helpers._IntegratedTestBase):
|
||||
""""Some basic XML sanity checks."""
|
||||
|
||||
def test_namespace_limits(self):
|
||||
"""/limits should have v1.0 namespace (hasn't changed in 1.1)."""
|
||||
headers = {}
|
||||
headers['Accept'] = 'application/xml'
|
||||
|
||||
response = self.api.api_request('/limits', headers=headers)
|
||||
data = response.read()
|
||||
LOG.debug("data: %s" % data)
|
||||
|
||||
prefix = '<limits xmlns="%s"' % common.XML_NS_V10
|
||||
self.assertTrue(data.startswith(prefix))
|
||||
|
||||
def test_namespace_servers(self):
|
||||
"""/servers should have v1.1 namespace (has changed in 1.1)."""
|
||||
headers = {}
|
||||
headers['Accept'] = 'application/xml'
|
||||
|
||||
response = self.api.api_request('/servers', headers=headers)
|
||||
data = response.read()
|
||||
LOG.debug("data: %s" % data)
|
||||
|
||||
prefix = '<servers xmlns="%s"' % common.XML_NS_V11
|
||||
self.assertTrue(data.startswith(prefix))
|
||||
@@ -41,6 +41,7 @@ from nova.compute import power_state
|
||||
from nova.api.ec2 import cloud
|
||||
from nova.api.ec2 import ec2utils
|
||||
from nova.image import local
|
||||
from nova.exception import NotFound
|
||||
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
@@ -71,7 +72,8 @@ class CloudTestCase(test.TestCase):
|
||||
host = self.network.get_network_host(self.context.elevated())
|
||||
|
||||
def fake_show(meh, context, id):
|
||||
return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1}}
|
||||
return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1,
|
||||
'type': 'machine'}}
|
||||
|
||||
self.stubs.Set(local.LocalImageService, 'show', fake_show)
|
||||
self.stubs.Set(local.LocalImageService, 'show_by_name', fake_show)
|
||||
@@ -216,6 +218,35 @@ class CloudTestCase(test.TestCase):
|
||||
db.service_destroy(self.context, comp1['id'])
|
||||
db.service_destroy(self.context, comp2['id'])
|
||||
|
||||
def test_describe_images(self):
|
||||
describe_images = self.cloud.describe_images
|
||||
|
||||
def fake_detail(meh, context):
|
||||
return [{'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1,
|
||||
'type': 'machine'}}]
|
||||
|
||||
def fake_show_none(meh, context, id):
|
||||
raise NotFound
|
||||
|
||||
self.stubs.Set(local.LocalImageService, 'detail', fake_detail)
|
||||
# list all
|
||||
result1 = describe_images(self.context)
|
||||
result1 = result1['imagesSet'][0]
|
||||
self.assertEqual(result1['imageId'], 'ami-00000001')
|
||||
# provided a valid image_id
|
||||
result2 = describe_images(self.context, ['ami-00000001'])
|
||||
self.assertEqual(1, len(result2['imagesSet']))
|
||||
# provide more than 1 valid image_id
|
||||
result3 = describe_images(self.context, ['ami-00000001',
|
||||
'ami-00000002'])
|
||||
self.assertEqual(2, len(result3['imagesSet']))
|
||||
# provide an non-existing image_id
|
||||
self.stubs.UnsetAll()
|
||||
self.stubs.Set(local.LocalImageService, 'show', fake_show_none)
|
||||
self.stubs.Set(local.LocalImageService, 'show_by_name', fake_show_none)
|
||||
self.assertRaises(NotFound, describe_images,
|
||||
self.context, ['ami-fake'])
|
||||
|
||||
def test_console_output(self):
|
||||
instance_type = FLAGS.default_instance_type
|
||||
max_count = 1
|
||||
|
||||
@@ -84,7 +84,8 @@ class ComputeTestCase(test.TestCase):
|
||||
inst['launch_time'] = '10'
|
||||
inst['user_id'] = self.user.id
|
||||
inst['project_id'] = self.project.id
|
||||
inst['instance_type'] = 'm1.tiny'
|
||||
type_id = instance_types.get_instance_type_by_name('m1.tiny')['id']
|
||||
inst['instance_type_id'] = type_id
|
||||
inst['mac_address'] = utils.generate_mac()
|
||||
inst['ami_launch_index'] = 0
|
||||
inst.update(params)
|
||||
@@ -132,7 +133,7 @@ class ComputeTestCase(test.TestCase):
|
||||
cases = [dict(), dict(display_name=None)]
|
||||
for instance in cases:
|
||||
ref = self.compute_api.create(self.context,
|
||||
FLAGS.default_instance_type, None, **instance)
|
||||
instance_types.get_default_instance_type(), None, **instance)
|
||||
try:
|
||||
self.assertNotEqual(ref[0]['display_name'], None)
|
||||
finally:
|
||||
@@ -143,7 +144,7 @@ class ComputeTestCase(test.TestCase):
|
||||
group = self._create_group()
|
||||
ref = self.compute_api.create(
|
||||
self.context,
|
||||
instance_type=FLAGS.default_instance_type,
|
||||
instance_type=instance_types.get_default_instance_type(),
|
||||
image_id=None,
|
||||
security_group=['testgroup'])
|
||||
try:
|
||||
@@ -161,7 +162,7 @@ class ComputeTestCase(test.TestCase):
|
||||
|
||||
ref = self.compute_api.create(
|
||||
self.context,
|
||||
instance_type=FLAGS.default_instance_type,
|
||||
instance_type=instance_types.get_default_instance_type(),
|
||||
image_id=None,
|
||||
security_group=['testgroup'])
|
||||
try:
|
||||
@@ -177,7 +178,7 @@ class ComputeTestCase(test.TestCase):
|
||||
|
||||
ref = self.compute_api.create(
|
||||
self.context,
|
||||
instance_type=FLAGS.default_instance_type,
|
||||
instance_type=instance_types.get_default_instance_type(),
|
||||
image_id=None,
|
||||
security_group=['testgroup'])
|
||||
|
||||
@@ -359,8 +360,9 @@ class ComputeTestCase(test.TestCase):
|
||||
instance_id = self._create_instance()
|
||||
|
||||
self.compute.run_instance(self.context, instance_id)
|
||||
inst_type = instance_types.get_instance_type_by_name('m1.xlarge')
|
||||
db.instance_update(self.context, instance_id,
|
||||
{'instance_type': 'm1.xlarge'})
|
||||
{'instance_type_id': inst_type['id']})
|
||||
|
||||
self.assertRaises(exception.ApiError, self.compute_api.resize,
|
||||
context, instance_id, 1)
|
||||
@@ -380,8 +382,8 @@ class ComputeTestCase(test.TestCase):
|
||||
self.compute.terminate_instance(context, instance_id)
|
||||
|
||||
def test_get_by_flavor_id(self):
|
||||
type = instance_types.get_by_flavor_id(1)
|
||||
self.assertEqual(type, 'm1.tiny')
|
||||
type = instance_types.get_instance_type_by_flavor_id(1)
|
||||
self.assertEqual(type['name'], 'm1.tiny')
|
||||
|
||||
def test_resize_same_source_fails(self):
|
||||
"""Ensure instance fails to migrate when source and destination are
|
||||
|
||||
@@ -62,7 +62,7 @@ class ConsoleTestCase(test.TestCase):
|
||||
inst['launch_time'] = '10'
|
||||
inst['user_id'] = self.user.id
|
||||
inst['project_id'] = self.project.id
|
||||
inst['instance_type'] = 'm1.tiny'
|
||||
inst['instance_type_id'] = 1
|
||||
inst['mac_address'] = utils.generate_mac()
|
||||
inst['ami_launch_index'] = 0
|
||||
return db.instance_create(self.context, inst)['id']
|
||||
|
||||
@@ -40,7 +40,11 @@ class InstanceTypeTestCase(test.TestCase):
|
||||
max_flavorid = session.query(models.InstanceTypes).\
|
||||
order_by("flavorid desc").\
|
||||
first()
|
||||
max_id = session.query(models.InstanceTypes).\
|
||||
order_by("id desc").\
|
||||
first()
|
||||
self.flavorid = max_flavorid["flavorid"] + 1
|
||||
self.id = max_id["id"] + 1
|
||||
self.name = str(int(time.time()))
|
||||
|
||||
def test_instance_type_create_then_delete(self):
|
||||
@@ -53,7 +57,7 @@ class InstanceTypeTestCase(test.TestCase):
|
||||
'instance type was not created')
|
||||
instance_types.destroy(self.name)
|
||||
self.assertEqual(1,
|
||||
instance_types.get_instance_type(self.name)["deleted"])
|
||||
instance_types.get_instance_type(self.id)["deleted"])
|
||||
self.assertEqual(starting_inst_list, instance_types.get_all_types())
|
||||
instance_types.purge(self.name)
|
||||
self.assertEqual(len(starting_inst_list),
|
||||
|
||||
@@ -67,7 +67,7 @@ class QuotaTestCase(test.TestCase):
|
||||
inst['reservation_id'] = 'r-fakeres'
|
||||
inst['user_id'] = self.user.id
|
||||
inst['project_id'] = self.project.id
|
||||
inst['instance_type'] = 'm1.large'
|
||||
inst['instance_type_id'] = '3' # m1.large
|
||||
inst['vcpus'] = cores
|
||||
inst['mac_address'] = utils.generate_mac()
|
||||
return db.instance_create(self.context, inst)['id']
|
||||
@@ -124,11 +124,12 @@ class QuotaTestCase(test.TestCase):
|
||||
for i in range(FLAGS.quota_instances):
|
||||
instance_id = self._create_instance()
|
||||
instance_ids.append(instance_id)
|
||||
inst_type = instance_types.get_instance_type_by_name('m1.small')
|
||||
self.assertRaises(quota.QuotaError, compute.API().create,
|
||||
self.context,
|
||||
min_count=1,
|
||||
max_count=1,
|
||||
instance_type='m1.small',
|
||||
instance_type=inst_type,
|
||||
image_id=1)
|
||||
for instance_id in instance_ids:
|
||||
db.instance_destroy(self.context, instance_id)
|
||||
@@ -137,11 +138,12 @@ class QuotaTestCase(test.TestCase):
|
||||
instance_ids = []
|
||||
instance_id = self._create_instance(cores=4)
|
||||
instance_ids.append(instance_id)
|
||||
inst_type = instance_types.get_instance_type_by_name('m1.small')
|
||||
self.assertRaises(quota.QuotaError, compute.API().create,
|
||||
self.context,
|
||||
min_count=1,
|
||||
max_count=1,
|
||||
instance_type='m1.small',
|
||||
instance_type=inst_type,
|
||||
image_id=1)
|
||||
for instance_id in instance_ids:
|
||||
db.instance_destroy(self.context, instance_id)
|
||||
@@ -192,11 +194,12 @@ class QuotaTestCase(test.TestCase):
|
||||
metadata = {}
|
||||
for i in range(FLAGS.quota_metadata_items + 1):
|
||||
metadata['key%s' % i] = 'value%s' % i
|
||||
inst_type = instance_types.get_instance_type_by_name('m1.small')
|
||||
self.assertRaises(quota.QuotaError, compute.API().create,
|
||||
self.context,
|
||||
min_count=1,
|
||||
max_count=1,
|
||||
instance_type='m1.small',
|
||||
instance_type=inst_type,
|
||||
image_id='fake',
|
||||
metadata=metadata)
|
||||
|
||||
@@ -207,13 +210,15 @@ class QuotaTestCase(test.TestCase):
|
||||
|
||||
def _create_with_injected_files(self, files):
|
||||
api = compute.API(image_service=self.StubImageService())
|
||||
inst_type = instance_types.get_instance_type_by_name('m1.small')
|
||||
api.create(self.context, min_count=1, max_count=1,
|
||||
instance_type='m1.small', image_id='fake',
|
||||
instance_type=inst_type, image_id='fake',
|
||||
injected_files=files)
|
||||
|
||||
def test_no_injected_files(self):
|
||||
api = compute.API(image_service=self.StubImageService())
|
||||
api.create(self.context, instance_type='m1.small', image_id='fake')
|
||||
inst_type = instance_types.get_instance_type_by_name('m1.small')
|
||||
api.create(self.context, instance_type=inst_type, image_id='fake')
|
||||
|
||||
def test_max_injected_files(self):
|
||||
files = []
|
||||
|
||||
@@ -263,7 +263,7 @@ class SimpleDriverTestCase(test.TestCase):
|
||||
inst['reservation_id'] = 'r-fakeres'
|
||||
inst['user_id'] = self.user.id
|
||||
inst['project_id'] = self.project.id
|
||||
inst['instance_type'] = 'm1.tiny'
|
||||
inst['instance_type_id'] = '1'
|
||||
inst['mac_address'] = utils.generate_mac()
|
||||
inst['vcpus'] = kwargs.get('vcpus', 1)
|
||||
inst['ami_launch_index'] = 0
|
||||
|
||||
@@ -140,7 +140,7 @@ class LibvirtConnTestCase(test.TestCase):
|
||||
'vcpus': 2,
|
||||
'project_id': 'fake',
|
||||
'bridge': 'br101',
|
||||
'instance_type': 'm1.small'}
|
||||
'instance_type_id': '5'} # m1.small
|
||||
|
||||
def lazy_load_library_exists(self):
|
||||
"""check if libvirt is available."""
|
||||
@@ -479,7 +479,7 @@ class LibvirtConnTestCase(test.TestCase):
|
||||
|
||||
fake_timer = FakeTime()
|
||||
|
||||
self.create_fake_libvirt_mock(nwfilterLookupByName=fake_raise)
|
||||
self.create_fake_libvirt_mock()
|
||||
instance_ref = db.instance_create(self.context, self.test_instance)
|
||||
|
||||
# Start test
|
||||
@@ -488,6 +488,7 @@ class LibvirtConnTestCase(test.TestCase):
|
||||
conn = libvirt_conn.LibvirtConnection(False)
|
||||
conn.firewall_driver.setattr('setup_basic_filtering', fake_none)
|
||||
conn.firewall_driver.setattr('prepare_instance_filter', fake_none)
|
||||
conn.firewall_driver.setattr('instance_filter_exists', fake_none)
|
||||
conn.ensure_filtering_rules_for_instance(instance_ref,
|
||||
time=fake_timer)
|
||||
except exception.Error, e:
|
||||
|
||||
@@ -106,7 +106,7 @@ class VolumeTestCase(test.TestCase):
|
||||
inst['launch_time'] = '10'
|
||||
inst['user_id'] = 'fake'
|
||||
inst['project_id'] = 'fake'
|
||||
inst['instance_type'] = 'm1.tiny'
|
||||
inst['instance_type_id'] = '2' # m1.tiny
|
||||
inst['mac_address'] = utils.generate_mac()
|
||||
inst['ami_launch_index'] = 0
|
||||
instance_id = db.instance_create(self.context, inst)['id']
|
||||
|
||||
@@ -80,7 +80,7 @@ class XenAPIVolumeTestCase(test.TestCase):
|
||||
'image_id': 1,
|
||||
'kernel_id': 2,
|
||||
'ramdisk_id': 3,
|
||||
'instance_type': 'm1.large',
|
||||
'instance_type_id': '3', # m1.large
|
||||
'mac_address': 'aa:bb:cc:dd:ee:ff',
|
||||
'os_type': 'linux'}
|
||||
|
||||
@@ -289,11 +289,11 @@ class XenAPIVMTestCase(test.TestCase):
|
||||
'enabled':'1'}],
|
||||
'ip6s': [{'ip': 'fe80::a8bb:ccff:fedd:eeff',
|
||||
'netmask': '120',
|
||||
'enabled': '1',
|
||||
'gateway': 'fe80::a00:1'}],
|
||||
'enabled': '1'}],
|
||||
'mac': 'aa:bb:cc:dd:ee:ff',
|
||||
'dns': ['10.0.0.2'],
|
||||
'gateway': '10.0.0.1'})
|
||||
'gateway': '10.0.0.1',
|
||||
'gateway6': 'fe80::a00:1'})
|
||||
|
||||
def check_vm_params_for_windows(self):
|
||||
self.assertEquals(self.vm['platform']['nx'], 'true')
|
||||
@@ -328,7 +328,7 @@ class XenAPIVMTestCase(test.TestCase):
|
||||
self.assertEquals(self.vm['HVM_boot_policy'], '')
|
||||
|
||||
def _test_spawn(self, image_id, kernel_id, ramdisk_id,
|
||||
instance_type="m1.large", os_type="linux",
|
||||
instance_type_id="3", os_type="linux",
|
||||
instance_id=1, check_injection=False):
|
||||
stubs.stubout_loopingcall_start(self.stubs)
|
||||
values = {'id': instance_id,
|
||||
@@ -337,7 +337,7 @@ class XenAPIVMTestCase(test.TestCase):
|
||||
'image_id': image_id,
|
||||
'kernel_id': kernel_id,
|
||||
'ramdisk_id': ramdisk_id,
|
||||
'instance_type': instance_type,
|
||||
'instance_type_id': instance_type_id,
|
||||
'mac_address': 'aa:bb:cc:dd:ee:ff',
|
||||
'os_type': os_type}
|
||||
instance = db.instance_create(self.context, values)
|
||||
@@ -349,7 +349,7 @@ class XenAPIVMTestCase(test.TestCase):
|
||||
FLAGS.xenapi_image_service = 'glance'
|
||||
self.assertRaises(Exception,
|
||||
self._test_spawn,
|
||||
1, 2, 3, "m1.xlarge")
|
||||
1, 2, 3, "4") # m1.xlarge
|
||||
|
||||
def test_spawn_raw_objectstore(self):
|
||||
FLAGS.xenapi_image_service = 'objectstore'
|
||||
@@ -523,7 +523,7 @@ class XenAPIVMTestCase(test.TestCase):
|
||||
'image_id': 1,
|
||||
'kernel_id': 2,
|
||||
'ramdisk_id': 3,
|
||||
'instance_type': 'm1.large',
|
||||
'instance_type_id': '3', # m1.large
|
||||
'mac_address': 'aa:bb:cc:dd:ee:ff',
|
||||
'os_type': 'linux'}
|
||||
instance = db.instance_create(self.context, values)
|
||||
@@ -580,7 +580,7 @@ class XenAPIMigrateInstance(test.TestCase):
|
||||
'kernel_id': None,
|
||||
'ramdisk_id': None,
|
||||
'local_gb': 5,
|
||||
'instance_type': 'm1.large',
|
||||
'instance_type_id': '3', # m1.large
|
||||
'mac_address': 'aa:bb:cc:dd:ee:ff',
|
||||
'os_type': 'linux'}
|
||||
|
||||
|
||||
@@ -485,3 +485,7 @@ class HyperVConnection(driver.ComputeDriver):
|
||||
|
||||
def poll_rescued_instances(self, timeout):
|
||||
pass
|
||||
|
||||
def update_available_resource(self, ctxt, host):
|
||||
"""This method is supported only by libvirt."""
|
||||
return
|
||||
|
||||
@@ -169,34 +169,34 @@ def _get_network_info(instance):
|
||||
instance['id'])
|
||||
network_info = []
|
||||
|
||||
def ip_dict(ip):
|
||||
return {
|
||||
"ip": ip.address,
|
||||
"netmask": network["netmask"],
|
||||
"enabled": "1"}
|
||||
|
||||
def ip6_dict(ip6):
|
||||
prefix = ip6.network.cidr_v6
|
||||
mac = instance.mac_address
|
||||
return {
|
||||
"ip": utils.to_global_ipv6(prefix, mac),
|
||||
"netmask": ip6.network.netmask_v6,
|
||||
"gateway": ip6.network.gateway_v6,
|
||||
"enabled": "1"}
|
||||
|
||||
for network in networks:
|
||||
network_ips = [ip for ip in ip_addresses
|
||||
if ip.network_id == network.id]
|
||||
if ip['network_id'] == network['id']]
|
||||
|
||||
def ip_dict(ip):
|
||||
return {
|
||||
'ip': ip['address'],
|
||||
'netmask': network['netmask'],
|
||||
'enabled': '1'}
|
||||
|
||||
def ip6_dict():
|
||||
prefix = network['cidr_v6']
|
||||
mac = instance['mac_address']
|
||||
return {
|
||||
'ip': utils.to_global_ipv6(prefix, mac),
|
||||
'netmask': network['netmask_v6'],
|
||||
'enabled': '1'}
|
||||
|
||||
mapping = {
|
||||
'label': network['label'],
|
||||
'gateway': network['gateway'],
|
||||
'mac': instance.mac_address,
|
||||
'mac': instance['mac_address'],
|
||||
'dns': [network['dns']],
|
||||
'ips': [ip_dict(ip) for ip in network_ips]}
|
||||
|
||||
if FLAGS.use_ipv6:
|
||||
mapping['ip6s'] = [ip6_dict(ip) for ip in network_ips]
|
||||
mapping['ip6s'] = [ip6_dict()]
|
||||
mapping['gateway6'] = network['gateway_v6']
|
||||
|
||||
network_info.append((network, mapping))
|
||||
return network_info
|
||||
@@ -797,7 +797,10 @@ class LibvirtConnection(driver.ComputeDriver):
|
||||
|
||||
root_fname = '%08x' % int(disk_images['image_id'])
|
||||
size = FLAGS.minimum_root_size
|
||||
if inst['instance_type'] == 'm1.tiny' or suffix == '.rescue':
|
||||
|
||||
inst_type_id = inst['instance_type_id']
|
||||
inst_type = instance_types.get_instance_type(inst_type_id)
|
||||
if inst_type['name'] == 'm1.tiny' or suffix == '.rescue':
|
||||
size = None
|
||||
root_fname += "_sm"
|
||||
|
||||
@@ -809,14 +812,13 @@ class LibvirtConnection(driver.ComputeDriver):
|
||||
user=user,
|
||||
project=project,
|
||||
size=size)
|
||||
type_data = instance_types.get_instance_type(inst['instance_type'])
|
||||
|
||||
if type_data['local_gb']:
|
||||
if inst_type['local_gb']:
|
||||
self._cache_image(fn=self._create_local,
|
||||
target=basepath('disk.local'),
|
||||
fname="local_%s" % type_data['local_gb'],
|
||||
fname="local_%s" % inst_type['local_gb'],
|
||||
cow=FLAGS.use_cow_images,
|
||||
local_gb=type_data['local_gb'])
|
||||
local_gb=inst_type['local_gb'])
|
||||
|
||||
# For now, we assume that if we're not using a kernel, we're using a
|
||||
# partitioned disk image where the target partition is the first
|
||||
@@ -828,7 +830,10 @@ class LibvirtConnection(driver.ComputeDriver):
|
||||
if FLAGS.libvirt_type == 'lxc':
|
||||
target_partition = None
|
||||
|
||||
if inst['key_data']:
|
||||
key = str(inst['key_data'])
|
||||
else:
|
||||
key = None
|
||||
net = None
|
||||
|
||||
nets = []
|
||||
@@ -839,7 +844,7 @@ class LibvirtConnection(driver.ComputeDriver):
|
||||
for (network_ref, mapping) in network_info:
|
||||
ifc_num += 1
|
||||
|
||||
if not 'injected' in network_ref:
|
||||
if not network_ref['injected']:
|
||||
continue
|
||||
|
||||
have_injected_networks = True
|
||||
@@ -947,8 +952,8 @@ class LibvirtConnection(driver.ComputeDriver):
|
||||
nics.append(self._get_nic_for_xml(network,
|
||||
mapping))
|
||||
# FIXME(vish): stick this in db
|
||||
instance_type_name = instance['instance_type']
|
||||
instance_type = instance_types.get_instance_type(instance_type_name)
|
||||
inst_type_id = instance['instance_type_id']
|
||||
inst_type = instance_types.get_instance_type(inst_type_id)
|
||||
|
||||
if FLAGS.use_cow_images:
|
||||
driver_type = 'qcow2'
|
||||
@@ -959,14 +964,15 @@ class LibvirtConnection(driver.ComputeDriver):
|
||||
'name': instance['name'],
|
||||
'basepath': os.path.join(FLAGS.instances_path,
|
||||
instance['name']),
|
||||
'memory_kb': instance_type['memory_mb'] * 1024,
|
||||
'vcpus': instance_type['vcpus'],
|
||||
'memory_kb': inst_type['memory_mb'] * 1024,
|
||||
'vcpus': inst_type['vcpus'],
|
||||
'rescue': rescue,
|
||||
'local': instance_type['local_gb'],
|
||||
'local': inst_type['local_gb'],
|
||||
'driver_type': driver_type,
|
||||
'nics': nics}
|
||||
|
||||
if FLAGS.vnc_enabled:
|
||||
if FLAGS.libvirt_type != 'lxc':
|
||||
xml_info['vncserver_host'] = FLAGS.vncserver_host
|
||||
if not rescue:
|
||||
if instance['kernel_id']:
|
||||
@@ -1398,17 +1404,12 @@ class LibvirtConnection(driver.ComputeDriver):
|
||||
# wait for completion
|
||||
timeout_count = range(FLAGS.live_migration_retry_count)
|
||||
while timeout_count:
|
||||
try:
|
||||
filter_name = 'nova-instance-%s' % instance_ref.name
|
||||
self._conn.nwfilterLookupByName(filter_name)
|
||||
if self.firewall_driver.instance_filter_exists(instance_ref):
|
||||
break
|
||||
except libvirt.libvirtError:
|
||||
timeout_count.pop()
|
||||
if len(timeout_count) == 0:
|
||||
ec2_id = instance_ref['hostname']
|
||||
iname = instance_ref.name
|
||||
msg = _('Timeout migrating for %(ec2_id)s(%(iname)s)')
|
||||
raise exception.Error(msg % locals())
|
||||
msg = _('Timeout migrating for %s. nwfilter not found.')
|
||||
raise exception.Error(msg % instance_ref.name)
|
||||
time.sleep(1)
|
||||
|
||||
def live_migration(self, ctxt, instance_ref, dest,
|
||||
@@ -1538,6 +1539,10 @@ class FirewallDriver(object):
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def instance_filter_exists(self, instance):
|
||||
"""Check nova-instance-instance-xxx exists"""
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
class NWFilterFirewall(FirewallDriver):
|
||||
"""
|
||||
@@ -1845,6 +1850,21 @@ class NWFilterFirewall(FirewallDriver):
|
||||
return 'nova-instance-%s' % (instance['name'])
|
||||
return 'nova-instance-%s-%s' % (instance['name'], nic_id)
|
||||
|
||||
def instance_filter_exists(self, instance):
|
||||
"""Check nova-instance-instance-xxx exists"""
|
||||
network_info = _get_network_info(instance)
|
||||
for (network, mapping) in network_info:
|
||||
nic_id = mapping['mac'].replace(':', '')
|
||||
instance_filter_name = self._instance_filter_name(instance, nic_id)
|
||||
try:
|
||||
self._conn.nwfilterLookupByName(instance_filter_name)
|
||||
except libvirt.libvirtError:
|
||||
name = instance.name
|
||||
LOG.debug(_('The nwfilter(%(instance_filter_name)s) for'
|
||||
'%(name)s is not found.') % locals())
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
class IptablesFirewallDriver(FirewallDriver):
|
||||
def __init__(self, execute=None, **kwargs):
|
||||
@@ -2034,6 +2054,10 @@ class IptablesFirewallDriver(FirewallDriver):
|
||||
|
||||
return ipv4_rules, ipv6_rules
|
||||
|
||||
def instance_filter_exists(self, instance):
|
||||
"""Check nova-instance-instance-xxx exists"""
|
||||
return self.nwfilter.instance_filter_exists(instance)
|
||||
|
||||
def refresh_security_group_members(self, security_group):
|
||||
pass
|
||||
|
||||
|
||||
@@ -21,10 +21,10 @@ Classes for making VMware VI SOAP calls.
|
||||
|
||||
import httplib
|
||||
|
||||
from suds import WebFault
|
||||
from suds.client import Client
|
||||
from suds.plugin import MessagePlugin
|
||||
from suds.sudsobject import Property
|
||||
try:
|
||||
import suds
|
||||
except ImportError:
|
||||
suds = None
|
||||
|
||||
from nova import flags
|
||||
from nova.virt.vmwareapi import error_util
|
||||
@@ -42,7 +42,8 @@ flags.DEFINE_string('vmwareapi_wsdl_loc',
|
||||
'Refer readme-vmware to setup')
|
||||
|
||||
|
||||
class VIMMessagePlugin(MessagePlugin):
|
||||
if suds:
|
||||
class VIMMessagePlugin(suds.plugin.MessagePlugin):
|
||||
|
||||
def addAttributeForValue(self, node):
|
||||
# suds does not handle AnyType properly.
|
||||
@@ -56,8 +57,8 @@ class VIMMessagePlugin(MessagePlugin):
|
||||
nodes and fixup nodes before sending it to the server.
|
||||
"""
|
||||
# suds builds the entire request object based on the wsdl schema.
|
||||
# VI SDK throws server errors if optional SOAP nodes are sent without
|
||||
# values, e.g. <test/> as opposed to <test>test</test>
|
||||
# VI SDK throws server errors if optional SOAP nodes are sent
|
||||
# without values, e.g. <test/> as opposed to <test>test</test>
|
||||
context.envelope.prune()
|
||||
context.envelope.walk(self.addAttributeForValue)
|
||||
|
||||
@@ -75,6 +76,9 @@ class Vim:
|
||||
protocol: http or https
|
||||
host : ESX IPAddress[:port] or ESX Hostname[:port]
|
||||
"""
|
||||
if not suds:
|
||||
raise Exception(_("Unable to import suds."))
|
||||
|
||||
self._protocol = protocol
|
||||
self._host_name = host
|
||||
wsdl_url = FLAGS.vmwareapi_wsdl_loc
|
||||
@@ -84,7 +88,7 @@ class Vim:
|
||||
#wsdl_url = '%s://%s/sdk/vimService.wsdl' % (self._protocol,
|
||||
# self._host_name)
|
||||
url = '%s://%s/sdk' % (self._protocol, self._host_name)
|
||||
self.client = Client(wsdl_url, location=url,
|
||||
self.client = suds.client.Client(wsdl_url, location=url,
|
||||
plugins=[VIMMessagePlugin()])
|
||||
self._service_content = \
|
||||
self.RetrieveServiceContent("ServiceInstance")
|
||||
@@ -127,7 +131,7 @@ class Vim:
|
||||
# check of the SOAP response
|
||||
except error_util.VimFaultException, excep:
|
||||
raise
|
||||
except WebFault, excep:
|
||||
except suds.WebFault, excep:
|
||||
doc = excep.document
|
||||
detail = doc.childAtPath("/Envelope/Body/Fault/detail")
|
||||
fault_list = []
|
||||
@@ -163,7 +167,7 @@ class Vim:
|
||||
"""Builds the request managed object."""
|
||||
# Request Managed Object Builder
|
||||
if type(managed_object) == type(""):
|
||||
mo = Property(managed_object)
|
||||
mo = suds.sudsobject.Property(managed_object)
|
||||
mo._type = managed_object
|
||||
else:
|
||||
mo = managed_object
|
||||
|
||||
@@ -47,6 +47,7 @@ from nova.virt.vmwareapi import vim
|
||||
from nova.virt.vmwareapi import vim_util
|
||||
from nova.virt.vmwareapi.vmops import VMWareVMOps
|
||||
|
||||
|
||||
LOG = logging.getLogger("nova.virt.vmwareapi_conn")
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
|
||||
@@ -101,8 +101,8 @@ class VMHelper(HelperBase):
|
||||
3. Using hardware virtualization
|
||||
"""
|
||||
|
||||
instance_type = instance_types.\
|
||||
get_instance_type(instance.instance_type)
|
||||
inst_type_id = instance.instance_type_id
|
||||
instance_type = instance_types.get_instance_type(inst_type_id)
|
||||
mem = str(long(instance_type['memory_mb']) * 1024 * 1024)
|
||||
vcpus = str(instance_type['vcpus'])
|
||||
rec = {
|
||||
@@ -169,8 +169,8 @@ class VMHelper(HelperBase):
|
||||
|
||||
@classmethod
|
||||
def ensure_free_mem(cls, session, instance):
|
||||
instance_type = instance_types.get_instance_type(
|
||||
instance.instance_type)
|
||||
inst_type_id = instance.instance_type_id
|
||||
instance_type = instance_types.get_instance_type(inst_type_id)
|
||||
mem = long(instance_type['memory_mb']) * 1024 * 1024
|
||||
#get free memory from host
|
||||
host = session.get_xenapi_host()
|
||||
@@ -1130,7 +1130,7 @@ def _prepare_injectables(inst, networks_info):
|
||||
'dns': dns,
|
||||
'address_v6': ip_v6 and ip_v6['ip'] or '',
|
||||
'netmask_v6': ip_v6 and ip_v6['netmask'] or '',
|
||||
'gateway_v6': ip_v6 and ip_v6['gateway'] or '',
|
||||
'gateway_v6': ip_v6 and info['gateway6'] or '',
|
||||
'use_ipv6': FLAGS.use_ipv6}
|
||||
interfaces_info.append(interface_info)
|
||||
|
||||
|
||||
@@ -176,7 +176,7 @@ class VMOps(object):
|
||||
vdi_ref, network_info)
|
||||
|
||||
self.create_vifs(vm_ref, network_info)
|
||||
self.inject_network_info(instance, vm_ref, network_info)
|
||||
self.inject_network_info(instance, network_info, vm_ref)
|
||||
return vm_ref
|
||||
|
||||
def _spawn(self, instance, vm_ref):
|
||||
@@ -802,8 +802,10 @@ class VMOps(object):
|
||||
instance['id'])
|
||||
networks = db.network_get_all_by_instance(admin_context,
|
||||
instance['id'])
|
||||
flavor = db.instance_type_get_by_name(admin_context,
|
||||
instance['instance_type'])
|
||||
|
||||
inst_type = db.instance_type_get_by_id(admin_context,
|
||||
instance['instance_type_id'])
|
||||
|
||||
network_info = []
|
||||
for network in networks:
|
||||
network_IPs = [ip for ip in IPs if ip.network_id == network.id]
|
||||
@@ -814,12 +816,11 @@ class VMOps(object):
|
||||
"netmask": network["netmask"],
|
||||
"enabled": "1"}
|
||||
|
||||
def ip6_dict(ip6):
|
||||
def ip6_dict():
|
||||
return {
|
||||
"ip": utils.to_global_ipv6(network['cidr_v6'],
|
||||
instance['mac_address']),
|
||||
"netmask": network['netmask_v6'],
|
||||
"gateway": network['gateway_v6'],
|
||||
"enabled": "1"}
|
||||
|
||||
info = {
|
||||
@@ -827,23 +828,41 @@ class VMOps(object):
|
||||
'gateway': network['gateway'],
|
||||
'broadcast': network['broadcast'],
|
||||
'mac': instance.mac_address,
|
||||
'rxtx_cap': flavor['rxtx_cap'],
|
||||
'rxtx_cap': inst_type['rxtx_cap'],
|
||||
'dns': [network['dns']],
|
||||
'ips': [ip_dict(ip) for ip in network_IPs]}
|
||||
if network['cidr_v6']:
|
||||
info['ip6s'] = [ip6_dict(ip) for ip in network_IPs]
|
||||
info['ip6s'] = [ip6_dict()]
|
||||
if network['gateway_v6']:
|
||||
info['gateway6'] = network['gateway_v6']
|
||||
network_info.append((network, info))
|
||||
return network_info
|
||||
|
||||
def inject_network_info(self, instance, vm_ref, network_info):
|
||||
#TODO{tr3buchet) remove this shim with nova-multi-nic
|
||||
def inject_network_info(self, instance, network_info=None, vm_ref=None):
|
||||
"""
|
||||
shim in place which makes inject_network_info work without being
|
||||
passed network_info.
|
||||
shim goes away after nova-multi-nic
|
||||
"""
|
||||
if not network_info:
|
||||
network_info = self._get_network_info(instance)
|
||||
self._inject_network_info(instance, network_info, vm_ref)
|
||||
|
||||
def _inject_network_info(self, instance, network_info, vm_ref=None):
|
||||
"""
|
||||
Generate the network info and make calls to place it into the
|
||||
xenstore and the xenstore param list.
|
||||
vm_ref can be passed in because it will sometimes be different than
|
||||
what VMHelper.lookup(session, instance.name) will find (ex: rescue)
|
||||
"""
|
||||
logging.debug(_("injecting network info to xs for vm: |%s|"), vm_ref)
|
||||
|
||||
if vm_ref:
|
||||
# this function raises if vm_ref is not a vm_opaque_ref
|
||||
self._session.get_xenapi().VM.get_record(vm_ref)
|
||||
else:
|
||||
vm_ref = VMHelper.lookup(self._session, instance.name)
|
||||
|
||||
for (network, info) in network_info:
|
||||
location = 'vm-data/networking/%s' % info['mac'].replace(':', '')
|
||||
@@ -875,8 +894,10 @@ class VMOps(object):
|
||||
VMHelper.create_vif(self._session, vm_ref, network_ref,
|
||||
mac_address, device, rxtx_cap)
|
||||
|
||||
def reset_network(self, instance, vm_ref):
|
||||
def reset_network(self, instance, vm_ref=None):
|
||||
"""Creates uuid arg to pass to make_agent_call and calls it."""
|
||||
if not vm_ref:
|
||||
vm_ref = VMHelper.lookup(self._session, instance.name)
|
||||
args = {'id': str(uuid.uuid4())}
|
||||
# TODO(tr3buchet): fix function call after refactor
|
||||
#resp = self._make_agent_call('resetnetwork', instance, '', args)
|
||||
|
||||
@@ -63,6 +63,7 @@ import xmlrpclib
|
||||
|
||||
from eventlet import event
|
||||
from eventlet import tpool
|
||||
from eventlet import timeout
|
||||
|
||||
from nova import context
|
||||
from nova import db
|
||||
@@ -140,6 +141,9 @@ flags.DEFINE_bool('xenapi_remap_vbd_dev', False,
|
||||
flags.DEFINE_string('xenapi_remap_vbd_dev_prefix', 'sd',
|
||||
'Specify prefix to remap VBD dev to '
|
||||
'(ex. /dev/xvdb -> /dev/sdb)')
|
||||
flags.DEFINE_integer('xenapi_login_timeout',
|
||||
10,
|
||||
'Timeout in seconds for XenAPI login.')
|
||||
|
||||
|
||||
def get_connection(_):
|
||||
@@ -318,6 +322,9 @@ class XenAPISession(object):
|
||||
def __init__(self, url, user, pw):
|
||||
self.XenAPI = self.get_imported_xenapi()
|
||||
self._session = self._create_session(url)
|
||||
exception = self.XenAPI.Failure(_("Unable to log in to XenAPI "
|
||||
"(is the Dom0 disk full?)"))
|
||||
with timeout.Timeout(FLAGS.xenapi_login_timeout, exception):
|
||||
self._session.login_with_password(user, pw)
|
||||
self.loop = None
|
||||
|
||||
|
||||
29
nova/wsgi.py
29
nova/wsgi.py
@@ -355,24 +355,25 @@ class Controller(object):
|
||||
|
||||
if type(result) is dict:
|
||||
content_type = req.best_match_content_type()
|
||||
body = self._serialize(result, content_type)
|
||||
default_xmlns = self.get_default_xmlns(req)
|
||||
body = self._serialize(result, content_type, default_xmlns)
|
||||
|
||||
response = webob.Response()
|
||||
response.headers["Content-Type"] = content_type
|
||||
response.body = body
|
||||
return response
|
||||
|
||||
else:
|
||||
return result
|
||||
|
||||
def _serialize(self, data, content_type):
|
||||
def _serialize(self, data, content_type, default_xmlns):
|
||||
"""
|
||||
Serialize the given dict to the provided content_type.
|
||||
Uses self._serialization_metadata if it exists, which is a dict mapping
|
||||
MIME types to information needed to serialize to that type.
|
||||
"""
|
||||
_metadata = getattr(type(self), "_serialization_metadata", {})
|
||||
serializer = Serializer(_metadata)
|
||||
|
||||
serializer = Serializer(_metadata, default_xmlns)
|
||||
try:
|
||||
return serializer.serialize(data, content_type)
|
||||
except exception.InvalidContentType:
|
||||
@@ -388,19 +389,24 @@ class Controller(object):
|
||||
serializer = Serializer(_metadata)
|
||||
return serializer.deserialize(data, content_type)
|
||||
|
||||
def get_default_xmlns(self, req):
|
||||
"""Provide the XML namespace to use if none is otherwise specified."""
|
||||
return None
|
||||
|
||||
|
||||
class Serializer(object):
|
||||
"""
|
||||
Serializes and deserializes dictionaries to certain MIME types.
|
||||
"""
|
||||
|
||||
def __init__(self, metadata=None):
|
||||
def __init__(self, metadata=None, default_xmlns=None):
|
||||
"""
|
||||
Create a serializer based on the given WSGI environment.
|
||||
'metadata' is an optional dict mapping MIME types to information
|
||||
needed to serialize a dictionary to that type.
|
||||
"""
|
||||
self.metadata = metadata or {}
|
||||
self.default_xmlns = default_xmlns
|
||||
|
||||
def _get_serialize_handler(self, content_type):
|
||||
handlers = {
|
||||
@@ -478,11 +484,23 @@ class Serializer(object):
|
||||
root_key = data.keys()[0]
|
||||
doc = minidom.Document()
|
||||
node = self._to_xml_node(doc, metadata, root_key, data[root_key])
|
||||
|
||||
xmlns = node.getAttribute('xmlns')
|
||||
if not xmlns and self.default_xmlns:
|
||||
node.setAttribute('xmlns', self.default_xmlns)
|
||||
|
||||
return node.toprettyxml(indent=' ')
|
||||
|
||||
def _to_xml_node(self, doc, metadata, nodename, data):
|
||||
"""Recursive method to convert data members to XML nodes."""
|
||||
result = doc.createElement(nodename)
|
||||
|
||||
# Set the xml namespace if one is specified
|
||||
# TODO(justinsb): We could also use prefixes on the keys
|
||||
xmlns = metadata.get('xmlns', None)
|
||||
if xmlns:
|
||||
result.setAttribute('xmlns', xmlns)
|
||||
|
||||
if type(data) is list:
|
||||
singular = metadata.get('plurals', {}).get(nodename, None)
|
||||
if singular is None:
|
||||
@@ -532,6 +550,7 @@ def paste_config_file(basename):
|
||||
"""
|
||||
|
||||
configfiles = [basename,
|
||||
os.path.join(FLAGS.state_path, 'etc', 'nova', basename),
|
||||
os.path.join(FLAGS.state_path, 'etc', basename),
|
||||
os.path.join(FLAGS.state_path, basename),
|
||||
'/etc/nova/%s' % basename]
|
||||
|
||||
@@ -22,6 +22,8 @@
|
||||
# XenAPI plugin for reading/writing information to xenstore
|
||||
#
|
||||
|
||||
import base64
|
||||
import commands
|
||||
try:
|
||||
import json
|
||||
except ImportError:
|
||||
@@ -66,7 +68,7 @@ def key_init(self, arg_dict):
|
||||
try:
|
||||
resp = _wait_for_agent(self, request_id, arg_dict)
|
||||
except TimeoutError, e:
|
||||
raise PluginError("%s" % e)
|
||||
raise PluginError(e)
|
||||
return resp
|
||||
|
||||
|
||||
@@ -87,7 +89,7 @@ def password(self, arg_dict):
|
||||
try:
|
||||
resp = _wait_for_agent(self, request_id, arg_dict)
|
||||
except TimeoutError, e:
|
||||
raise PluginError("%s" % e)
|
||||
raise PluginError(e)
|
||||
return resp
|
||||
|
||||
|
||||
@@ -102,6 +104,75 @@ def resetnetwork(self, arg_dict):
|
||||
xenstore.write_record(self, arg_dict)
|
||||
|
||||
|
||||
@jsonify
|
||||
def inject_file(self, arg_dict):
|
||||
"""Expects a file path and the contents of the file to be written. Both
|
||||
should be base64-encoded in order to eliminate errors as they are passed
|
||||
through the stack. Writes that information to xenstore for the agent,
|
||||
which will decode the file and intended path, and create it on the
|
||||
instance. The original agent munged both of these into a single entry;
|
||||
the new agent keeps them separate. We will need to test for the new agent,
|
||||
and write the xenstore records to match the agent version. We will also
|
||||
need to test to determine if the file injection method on the agent has
|
||||
been disabled, and raise a NotImplemented error if that is the case.
|
||||
"""
|
||||
b64_path = arg_dict["b64_path"]
|
||||
b64_file = arg_dict["b64_file"]
|
||||
request_id = arg_dict["id"]
|
||||
if self._agent_has_method("file_inject"):
|
||||
# New version of the agent. Agent should receive a 'value'
|
||||
# key whose value is a dictionary containing 'b64_path' and
|
||||
# 'b64_file'. See old version below.
|
||||
arg_dict["value"] = json.dumps({"name": "file_inject",
|
||||
"value": {"b64_path": b64_path, "b64_file": b64_file}})
|
||||
elif self._agent_has_method("injectfile"):
|
||||
# Old agent requires file path and file contents to be
|
||||
# combined into one base64 value.
|
||||
raw_path = base64.b64decode(b64_path)
|
||||
raw_file = base64.b64decode(b64_file)
|
||||
new_b64 = base64.b64encode("%s,%s") % (raw_path, raw_file)
|
||||
arg_dict["value"] = json.dumps({"name": "injectfile",
|
||||
"value": new_b64})
|
||||
else:
|
||||
# Either the methods don't exist in the agent, or they
|
||||
# have been disabled.
|
||||
raise NotImplementedError(_("NOT IMPLEMENTED: Agent does not"
|
||||
" support file injection."))
|
||||
arg_dict["path"] = "data/host/%s" % request_id
|
||||
xenstore.write_record(self, arg_dict)
|
||||
try:
|
||||
resp = _wait_for_agent(self, request_id, arg_dict)
|
||||
except TimeoutError, e:
|
||||
raise PluginError(e)
|
||||
return resp
|
||||
|
||||
|
||||
def _agent_has_method(self, method):
|
||||
"""Check that the agent has a particular method by checking its
|
||||
features. Cache the features so we don't have to query the agent
|
||||
every time we need to check.
|
||||
"""
|
||||
try:
|
||||
self._agent_methods
|
||||
except AttributeError:
|
||||
self._agent_methods = []
|
||||
if not self._agent_methods:
|
||||
# Haven't been defined
|
||||
tmp_id = commands.getoutput("uuidgen")
|
||||
dct = {}
|
||||
dct["value"] = json.dumps({"name": "features", "value": ""})
|
||||
dct["path"] = "data/host/%s" % tmp_id
|
||||
xenstore.write_record(self, dct)
|
||||
try:
|
||||
resp = _wait_for_agent(self, tmp_id, dct)
|
||||
except TimeoutError, e:
|
||||
raise PluginError(e)
|
||||
response = json.loads(resp)
|
||||
# The agent returns a comma-separated list of methods.
|
||||
self._agent_methods = response.split(",")
|
||||
return method in self._agent_methods
|
||||
|
||||
|
||||
def _wait_for_agent(self, request_id, arg_dict):
|
||||
"""Periodically checks xenstore for a response from the agent.
|
||||
The request is always written to 'data/host/{id}', and
|
||||
@@ -119,9 +190,8 @@ def _wait_for_agent(self, request_id, arg_dict):
|
||||
# First, delete the request record
|
||||
arg_dict["path"] = "data/host/%s" % request_id
|
||||
xenstore.delete_record(self, arg_dict)
|
||||
raise TimeoutError(
|
||||
"TIMEOUT: No response from agent within %s seconds." %
|
||||
AGENT_TIMEOUT)
|
||||
raise TimeoutError(_("TIMEOUT: No response from agent within"
|
||||
" %s seconds.") % AGENT_TIMEOUT)
|
||||
ret = xenstore.read_record(self, arg_dict)
|
||||
# Note: the response for None with be a string that includes
|
||||
# double quotes.
|
||||
@@ -136,4 +206,5 @@ if __name__ == "__main__":
|
||||
XenAPIPlugin.dispatch(
|
||||
{"key_init": key_init,
|
||||
"password": password,
|
||||
"resetnetwork": resetnetwork})
|
||||
"resetnetwork": resetnetwork,
|
||||
"inject_file": inject_file})
|
||||
|
||||
@@ -56,16 +56,17 @@ def read_record(self, arg_dict):
|
||||
and boolean True, attempting to read a non-existent path will return
|
||||
the string 'None' instead of raising an exception.
|
||||
"""
|
||||
cmd = "xenstore-read /local/domain/%(dom_id)s/%(path)s" % arg_dict
|
||||
cmd = ["xenstore-read", "/local/domain/%(dom_id)s/%(path)s" % arg_dict]
|
||||
try:
|
||||
return _run_command(cmd).rstrip("\n")
|
||||
ret, result = _run_command(cmd)
|
||||
return result.rstrip("\n")
|
||||
except pluginlib.PluginError, e:
|
||||
if arg_dict.get("ignore_missing_path", False):
|
||||
cmd = "xenstore-exists /local/domain/%(dom_id)s/%(path)s; echo $?"
|
||||
cmd = cmd % arg_dict
|
||||
ret = _run_command(cmd).strip()
|
||||
cmd = ["xenstore-exists",
|
||||
"/local/domain/%(dom_id)s/%(path)s" % arg_dict]
|
||||
ret, result = _run_command(cmd).strip()
|
||||
# If the path exists, the cmd should return "0"
|
||||
if ret != "0":
|
||||
if ret != 0:
|
||||
# No such path, so ignore the error and return the
|
||||
# string 'None', since None can't be marshalled
|
||||
# over RPC.
|
||||
@@ -83,8 +84,9 @@ def write_record(self, arg_dict):
|
||||
you must specify a 'value' key, whose value must be a string. Typically,
|
||||
you can json-ify more complex values and store the json output.
|
||||
"""
|
||||
cmd = "xenstore-write /local/domain/%(dom_id)s/%(path)s '%(value)s'"
|
||||
cmd = cmd % arg_dict
|
||||
cmd = ["xenstore-write",
|
||||
"/local/domain/%(dom_id)s/%(path)s" % arg_dict,
|
||||
arg_dict["value"]]
|
||||
_run_command(cmd)
|
||||
return arg_dict["value"]
|
||||
|
||||
@@ -96,10 +98,10 @@ def list_records(self, arg_dict):
|
||||
path as the key and the stored value as the value. If the path
|
||||
doesn't exist, an empty dict is returned.
|
||||
"""
|
||||
cmd = "xenstore-ls /local/domain/%(dom_id)s/%(path)s" % arg_dict
|
||||
cmd = cmd.rstrip("/")
|
||||
dirpath = "/local/domain/%(dom_id)s/%(path)s" % arg_dict
|
||||
cmd = ["xenstore-ls", dirpath.rstrip("/")]
|
||||
try:
|
||||
recs = _run_command(cmd)
|
||||
ret, recs = _run_command(cmd)
|
||||
except pluginlib.PluginError, e:
|
||||
if "No such file or directory" in "%s" % e:
|
||||
# Path doesn't exist.
|
||||
@@ -128,8 +130,9 @@ def delete_record(self, arg_dict):
|
||||
"""Just like it sounds: it removes the record for the specified
|
||||
VM and the specified path from xenstore.
|
||||
"""
|
||||
cmd = "xenstore-rm /local/domain/%(dom_id)s/%(path)s" % arg_dict
|
||||
return _run_command(cmd)
|
||||
cmd = ["xenstore-rm", "/local/domain/%(dom_id)s/%(path)s" % arg_dict]
|
||||
ret, result = _run_command(cmd)
|
||||
return result
|
||||
|
||||
|
||||
def _paths_from_ls(recs):
|
||||
@@ -171,9 +174,9 @@ def _run_command(cmd):
|
||||
Otherwise, the output from stdout is returned.
|
||||
"""
|
||||
pipe = subprocess.PIPE
|
||||
proc = subprocess.Popen([cmd], shell=True, stdin=pipe, stdout=pipe,
|
||||
stderr=pipe, close_fds=True)
|
||||
proc.wait()
|
||||
proc = subprocess.Popen(cmd, stdin=pipe, stdout=pipe, stderr=pipe,
|
||||
close_fds=True)
|
||||
ret = proc.wait()
|
||||
err = proc.stderr.read()
|
||||
if err:
|
||||
raise pluginlib.PluginError(err)
|
||||
|
||||
15
setup.py
15
setup.py
@@ -16,6 +16,7 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import glob
|
||||
import os
|
||||
import subprocess
|
||||
import sys
|
||||
@@ -86,6 +87,19 @@ try:
|
||||
except:
|
||||
pass
|
||||
|
||||
|
||||
def find_data_files(destdir, srcdir):
|
||||
package_data = []
|
||||
files = []
|
||||
for d in glob.glob('%s/*' % (srcdir, )):
|
||||
if os.path.isdir(d):
|
||||
package_data += find_data_files(
|
||||
os.path.join(destdir, os.path.basename(d)), d)
|
||||
else:
|
||||
files += [d]
|
||||
package_data += [(destdir, files)]
|
||||
return package_data
|
||||
|
||||
DistUtilsExtra.auto.setup(name='nova',
|
||||
version=version.canonical_version_string(),
|
||||
description='cloud computing fabric controller',
|
||||
@@ -96,6 +110,7 @@ DistUtilsExtra.auto.setup(name='nova',
|
||||
packages=find_packages(exclude=['bin', 'smoketests']),
|
||||
include_package_data=True,
|
||||
test_suite='nose.collector',
|
||||
data_files=find_data_files('share/nova', 'tools'),
|
||||
scripts=['bin/nova-ajax-console-proxy',
|
||||
'bin/nova-api',
|
||||
'bin/nova-compute',
|
||||
|
||||
@@ -30,7 +30,6 @@ possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
|
||||
if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
|
||||
sys.path.insert(0, possible_topdir)
|
||||
|
||||
from nova import adminclient
|
||||
from smoketests import flags
|
||||
from smoketests import base
|
||||
|
||||
@@ -47,6 +46,7 @@ TEST_PROJECTNAME = '%sproject' % TEST_PREFIX
|
||||
|
||||
class AdminSmokeTestCase(base.SmokeTestCase):
|
||||
def setUp(self):
|
||||
import nova_adminclient as adminclient
|
||||
self.admin = adminclient.NovaAdminClient(
|
||||
access_key=os.getenv('EC2_ACCESS_KEY'),
|
||||
secret_key=os.getenv('EC2_SECRET_KEY'),
|
||||
|
||||
@@ -35,6 +35,7 @@ if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
|
||||
import boto
|
||||
import nova
|
||||
from boto.ec2.connection import EC2Connection
|
||||
import euca2ools
|
||||
from euca2ools import Euca2ool, InstanceValidationError, Util
|
||||
|
||||
usage_string = """
|
||||
@@ -93,8 +94,13 @@ def override_connect_ec2(aws_access_key_id=None,
|
||||
aws_secret_access_key, **kwargs)
|
||||
|
||||
# override boto's connect_ec2 method, so that we can use NovaEC2Connection
|
||||
# (This is for Euca2ools 1.2)
|
||||
boto.connect_ec2 = override_connect_ec2
|
||||
|
||||
# Override Euca2ools' EC2Connection class (which it gets from boto)
|
||||
# (This is for Euca2ools 1.3)
|
||||
euca2ools.EC2Connection = NovaEC2Connection
|
||||
|
||||
|
||||
def usage(status=1):
|
||||
print usage_string
|
||||
|
||||
@@ -30,4 +30,5 @@ sqlalchemy-migrate
|
||||
netaddr
|
||||
sphinx
|
||||
glance
|
||||
nova-adminclient
|
||||
suds==0.4
|
||||
|
||||
Reference in New Issue
Block a user