Remove Deprecated EC2 and ObjectStore impl/tests
In Id7936be290b6febd18deb4c2db8ea4d678d4d9b1, we removed entries from api-paste.ini for EC2 API service. In this review we remove all the unnecessary code, docs and tests associated with objectstore and ec2 service. Note that this does not cleanup the Instance object or change any of the versioned objects. We just drop any code associated with testing the REST endpoint(s) that are no longer needed. Also added shims such that the api-paste.ini from liberty will still work (grenade job) and added logs and response messages for prompting administrators to cleanup their old api-paste.ini and switching to the stand alone EC2 API project for their needs. Change-Id: I8bf7cbaa7015bb61656ab90ccc8f944aaeebb095
This commit is contained in:
parent
b3879bd199
commit
4140eb4004
@ -111,8 +111,6 @@ modindex_common_prefix = ['nova.']
|
||||
man_pages = [
|
||||
('man/nova-all', 'nova-all', u'Cloud controller fabric',
|
||||
[u'OpenStack'], 1),
|
||||
('man/nova-api-ec2', 'nova-api-ec2', u'Cloud controller fabric',
|
||||
[u'OpenStack'], 1),
|
||||
('man/nova-api-metadata', 'nova-api-metadata', u'Cloud controller fabric',
|
||||
[u'OpenStack'], 1),
|
||||
('man/nova-api-os-compute', 'nova-api-os-compute',
|
||||
@ -143,8 +141,6 @@ man_pages = [
|
||||
[u'OpenStack'], 1),
|
||||
('man/nova-serialproxy', 'nova-serialproxy', u'Cloud controller fabric',
|
||||
[u'OpenStack'], 1),
|
||||
('man/nova-objectstore', 'nova-objectstore', u'Cloud controller fabric',
|
||||
[u'OpenStack'], 1),
|
||||
('man/nova-rootwrap', 'nova-rootwrap', u'Cloud controller fabric',
|
||||
[u'OpenStack'], 1),
|
||||
('man/nova-scheduler', 'nova-scheduler', u'Cloud controller fabric',
|
||||
|
@ -26,7 +26,6 @@ Reference
|
||||
:maxdepth: 1
|
||||
|
||||
nova-all
|
||||
nova-api-ec2
|
||||
nova-api-metadata
|
||||
nova-api-os-compute
|
||||
nova-api
|
||||
@ -41,7 +40,6 @@ Reference
|
||||
nova-manage
|
||||
nova-network
|
||||
nova-novncproxy
|
||||
nova-objectstore
|
||||
nova-rootwrap
|
||||
nova-scheduler
|
||||
nova-spicehtml5proxy
|
||||
|
@ -1,48 +0,0 @@
|
||||
============
|
||||
nova-api-ec2
|
||||
============
|
||||
|
||||
----------------------------
|
||||
Server for the Nova EC2 API
|
||||
----------------------------
|
||||
|
||||
:Author: openstack@lists.openstack.org
|
||||
:Date: 2012-09-27
|
||||
:Copyright: OpenStack Foundation
|
||||
:Version: 2012.1
|
||||
:Manual section: 1
|
||||
:Manual group: cloud computing
|
||||
|
||||
SYNOPSIS
|
||||
========
|
||||
|
||||
nova-api-ec2 [options]
|
||||
|
||||
DESCRIPTION
|
||||
===========
|
||||
|
||||
nova-api-ec2 is a server daemon that serves the Nova EC2 API
|
||||
|
||||
OPTIONS
|
||||
=======
|
||||
|
||||
**General options**
|
||||
|
||||
FILES
|
||||
========
|
||||
|
||||
* /etc/nova/nova.conf
|
||||
* /etc/nova/api-paste.ini
|
||||
* /etc/nova/policy.json
|
||||
* /etc/nova/rootwrap.conf
|
||||
* /etc/nova/rootwrap.d/
|
||||
|
||||
SEE ALSO
|
||||
========
|
||||
|
||||
* `OpenStack Nova <http://nova.openstack.org>`__
|
||||
|
||||
BUGS
|
||||
====
|
||||
|
||||
* Nova bugs are managed at Launchpad `Bugs : Nova <https://bugs.launchpad.net/nova>`__
|
@ -1,55 +0,0 @@
|
||||
================
|
||||
nova-objectstore
|
||||
================
|
||||
|
||||
-----------------------------
|
||||
Nova Objectstore Server
|
||||
-----------------------------
|
||||
|
||||
:Author: openstack@lists.openstack.org
|
||||
:Date: 2012-09-27
|
||||
:Copyright: OpenStack Foundation
|
||||
:Version: 2012.1
|
||||
:Manual section: 1
|
||||
:Manual group: cloud computing
|
||||
|
||||
SYNOPSIS
|
||||
========
|
||||
|
||||
nova-objectstore [options]
|
||||
|
||||
DESCRIPTION
|
||||
===========
|
||||
|
||||
Implementation of an S3-like storage server based on local files.
|
||||
|
||||
Useful to test features that will eventually run on S3, or if you want to
|
||||
run something locally that was once running on S3.
|
||||
|
||||
We don't support all the features of S3, but it does work with the
|
||||
standard S3 client for the most basic semantics.
|
||||
|
||||
Used for testing when do not have OpenStack Swift installed.
|
||||
|
||||
OPTIONS
|
||||
=======
|
||||
|
||||
**General options**
|
||||
|
||||
FILES
|
||||
========
|
||||
|
||||
* /etc/nova/nova.conf
|
||||
* /etc/nova/policy.json
|
||||
* /etc/nova/rootwrap.conf
|
||||
* /etc/nova/rootwrap.d/
|
||||
|
||||
SEE ALSO
|
||||
========
|
||||
|
||||
* `OpenStack Nova <http://nova.openstack.org>`__
|
||||
|
||||
BUGS
|
||||
====
|
||||
|
||||
* Nova bugs are managed at Launchpad `Bugs : Nova <https://bugs.launchpad.net/nova>`__
|
@ -13,640 +13,37 @@
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
"""
|
||||
Starting point for routing EC2 requests.
|
||||
|
||||
"""
|
||||
|
||||
import hashlib
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_context import context as common_context
|
||||
from oslo_log import log as logging
|
||||
from oslo_serialization import jsonutils
|
||||
from oslo_service import sslutils
|
||||
from oslo_utils import importutils
|
||||
from oslo_utils import netutils
|
||||
from oslo_utils import timeutils
|
||||
import requests
|
||||
import six
|
||||
import webob
|
||||
import webob.dec
|
||||
import webob.exc
|
||||
|
||||
from nova.api.ec2 import apirequest
|
||||
from nova.api.ec2 import ec2utils
|
||||
from nova.api.ec2 import faults
|
||||
from nova.api import validator
|
||||
from nova import context
|
||||
from nova import exception
|
||||
from nova.i18n import _
|
||||
from nova.i18n import _LE
|
||||
from nova.i18n import _LI
|
||||
from nova.i18n import _LW
|
||||
from nova.openstack.common import memorycache
|
||||
from nova import wsgi
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
ec2_opts = [
|
||||
cfg.IntOpt('lockout_attempts',
|
||||
default=5,
|
||||
help='Number of failed auths before lockout.'),
|
||||
cfg.IntOpt('lockout_minutes',
|
||||
default=15,
|
||||
help='Number of minutes to lockout if triggered.'),
|
||||
cfg.IntOpt('lockout_window',
|
||||
default=15,
|
||||
help='Number of minutes for lockout window.'),
|
||||
cfg.StrOpt('keystone_ec2_url',
|
||||
default='http://localhost:5000/v2.0/ec2tokens',
|
||||
help='URL to get token from ec2 request.'),
|
||||
cfg.BoolOpt('ec2_private_dns_show_ip',
|
||||
default=False,
|
||||
help='Return the IP address as private dns hostname in '
|
||||
'describe instances'),
|
||||
cfg.BoolOpt('ec2_strict_validation',
|
||||
default=True,
|
||||
help='Validate security group names'
|
||||
' according to EC2 specification'),
|
||||
cfg.IntOpt('ec2_timestamp_expiry',
|
||||
default=300,
|
||||
help='Time in seconds before ec2 timestamp expires'),
|
||||
cfg.BoolOpt('keystone_ec2_insecure', default=False, help='Disable SSL '
|
||||
'certificate verification.'),
|
||||
]
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.register_opts(ec2_opts)
|
||||
CONF.import_opt('use_forwarded_for', 'nova.api.auth')
|
||||
sslutils.is_enabled(CONF)
|
||||
_DEPRECATION_MESSAGE = ('The in tree EC2 API has been removed in Mitaka. '
|
||||
'Please remove entries from api-paste.ini')
|
||||
|
||||
|
||||
# Fault Wrapper around all EC2 requests
|
||||
class FaultWrapper(wsgi.Middleware):
|
||||
"""Calls the middleware stack, captures any exceptions into faults."""
|
||||
class DeprecatedMiddleware(wsgi.Middleware):
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(DeprecatedMiddleware, self).__init__(args[0])
|
||||
|
||||
@webob.dec.wsgify(RequestClass=wsgi.Request)
|
||||
def __call__(self, req):
|
||||
try:
|
||||
return req.get_response(self.application)
|
||||
except Exception:
|
||||
LOG.exception(_LE("FaultWrapper error"))
|
||||
return faults.Fault(webob.exc.HTTPInternalServerError())
|
||||
return webob.exc.HTTPException(message=_DEPRECATION_MESSAGE)
|
||||
|
||||
|
||||
class RequestLogging(wsgi.Middleware):
|
||||
"""Access-Log akin logging for all EC2 API requests."""
|
||||
|
||||
class DeprecatedApplication(wsgi.Application):
|
||||
@webob.dec.wsgify(RequestClass=wsgi.Request)
|
||||
def __call__(self, req):
|
||||
start = timeutils.utcnow()
|
||||
rv = req.get_response(self.application)
|
||||
self.log_request_completion(rv, req, start)
|
||||
return rv
|
||||
|
||||
def log_request_completion(self, response, request, start):
|
||||
apireq = request.environ.get('ec2.request', None)
|
||||
if apireq:
|
||||
controller = apireq.controller
|
||||
action = apireq.action
|
||||
else:
|
||||
controller = None
|
||||
action = None
|
||||
ctxt = request.environ.get('nova.context', None)
|
||||
delta = timeutils.utcnow() - start
|
||||
seconds = delta.seconds
|
||||
microseconds = delta.microseconds
|
||||
LOG.info(
|
||||
"%s.%ss %s %s %s %s:%s %s [%s] %s %s",
|
||||
seconds,
|
||||
microseconds,
|
||||
request.remote_addr,
|
||||
request.method,
|
||||
"%s%s" % (request.script_name, request.path_info),
|
||||
controller,
|
||||
action,
|
||||
response.status_int,
|
||||
request.user_agent,
|
||||
request.content_type,
|
||||
response.content_type,
|
||||
context=ctxt) # noqa
|
||||
|
||||
|
||||
class Lockout(wsgi.Middleware):
|
||||
"""Lockout for x minutes on y failed auths in a z minute period.
|
||||
|
||||
x = lockout_timeout flag
|
||||
y = lockout_window flag
|
||||
z = lockout_attempts flag
|
||||
|
||||
Uses memcached if lockout_memcached_servers flag is set, otherwise it
|
||||
uses a very simple in-process cache. Due to the simplicity of
|
||||
the implementation, the timeout window is started with the first
|
||||
failed request, so it will block if there are x failed logins within
|
||||
that period.
|
||||
|
||||
There is a possible race condition where simultaneous requests could
|
||||
sneak in before the lockout hits, but this is extremely rare and would
|
||||
only result in a couple of extra failed attempts.
|
||||
"""
|
||||
|
||||
def __init__(self, application):
|
||||
"""middleware can use fake for testing."""
|
||||
self.mc = memorycache.get_client()
|
||||
super(Lockout, self).__init__(application)
|
||||
|
||||
@webob.dec.wsgify(RequestClass=wsgi.Request)
|
||||
def __call__(self, req):
|
||||
access_key = str(req.params['AWSAccessKeyId'])
|
||||
failures_key = "authfailures-%s" % access_key
|
||||
failures = int(self.mc.get(failures_key) or 0)
|
||||
if failures >= CONF.lockout_attempts:
|
||||
detail = _("Too many failed authentications.")
|
||||
raise webob.exc.HTTPForbidden(explanation=detail)
|
||||
res = req.get_response(self.application)
|
||||
if res.status_int == 403:
|
||||
failures = self.mc.incr(failures_key)
|
||||
if failures is None:
|
||||
# NOTE(vish): To use incr, failures has to be a string.
|
||||
self.mc.set(failures_key, '1', time=CONF.lockout_window * 60)
|
||||
elif failures >= CONF.lockout_attempts:
|
||||
LOG.warning(_LW('Access key %(access_key)s has had '
|
||||
'%(failures)d failed authentications and '
|
||||
'will be locked out for %(lock_mins)d '
|
||||
'minutes.'),
|
||||
{'access_key': access_key,
|
||||
'failures': failures,
|
||||
'lock_mins': CONF.lockout_minutes})
|
||||
self.mc.set(failures_key, str(failures),
|
||||
time=CONF.lockout_minutes * 60)
|
||||
return res
|
||||
|
||||
|
||||
class EC2KeystoneAuth(wsgi.Middleware):
|
||||
"""Authenticate an EC2 request with keystone and convert to context."""
|
||||
|
||||
def _get_signature(self, req):
|
||||
"""Extract the signature from the request.
|
||||
|
||||
This can be a get/post variable or for version 4 also in a header
|
||||
called 'Authorization'.
|
||||
- params['Signature'] == version 0,1,2,3
|
||||
- params['X-Amz-Signature'] == version 4
|
||||
- header 'Authorization' == version 4
|
||||
"""
|
||||
sig = req.params.get('Signature') or req.params.get('X-Amz-Signature')
|
||||
if sig is None and 'Authorization' in req.headers:
|
||||
auth_str = req.headers['Authorization']
|
||||
sig = auth_str.partition("Signature=")[2].split(',')[0]
|
||||
|
||||
return sig
|
||||
|
||||
def _get_access(self, req):
|
||||
"""Extract the access key identifier.
|
||||
|
||||
For version 0/1/2/3 this is passed as the AccessKeyId parameter, for
|
||||
version 4 it is either an X-Amz-Credential parameter or a Credential=
|
||||
field in the 'Authorization' header string.
|
||||
"""
|
||||
access = req.params.get('AWSAccessKeyId')
|
||||
if access is None:
|
||||
cred_param = req.params.get('X-Amz-Credential')
|
||||
if cred_param:
|
||||
access = cred_param.split("/")[0]
|
||||
|
||||
if access is None and 'Authorization' in req.headers:
|
||||
auth_str = req.headers['Authorization']
|
||||
cred_str = auth_str.partition("Credential=")[2].split(',')[0]
|
||||
access = cred_str.split("/")[0]
|
||||
|
||||
return access
|
||||
|
||||
@webob.dec.wsgify(RequestClass=wsgi.Request)
|
||||
def __call__(self, req):
|
||||
# NOTE(alevine) We need to calculate the hash here because
|
||||
# subsequent access to request modifies the req.body so the hash
|
||||
# calculation will yield invalid results.
|
||||
body_hash = hashlib.sha256(req.body).hexdigest()
|
||||
|
||||
request_id = common_context.generate_request_id()
|
||||
signature = self._get_signature(req)
|
||||
if not signature:
|
||||
msg = _("Signature not provided")
|
||||
return faults.ec2_error_response(request_id, "AuthFailure", msg,
|
||||
status=400)
|
||||
access = self._get_access(req)
|
||||
if not access:
|
||||
msg = _("Access key not provided")
|
||||
return faults.ec2_error_response(request_id, "AuthFailure", msg,
|
||||
status=400)
|
||||
|
||||
if 'X-Amz-Signature' in req.params or 'Authorization' in req.headers:
|
||||
auth_params = {}
|
||||
else:
|
||||
# Make a copy of args for authentication and signature verification
|
||||
auth_params = dict(req.params)
|
||||
# Not part of authentication args
|
||||
auth_params.pop('Signature', None)
|
||||
|
||||
cred_dict = {
|
||||
'access': access,
|
||||
'signature': signature,
|
||||
'host': req.host,
|
||||
'verb': req.method,
|
||||
'path': req.path,
|
||||
'params': auth_params,
|
||||
'headers': req.headers,
|
||||
'body_hash': body_hash
|
||||
}
|
||||
if "ec2" in CONF.keystone_ec2_url:
|
||||
creds = {'ec2Credentials': cred_dict}
|
||||
else:
|
||||
creds = {'auth': {'OS-KSEC2:ec2Credentials': cred_dict}}
|
||||
creds_json = jsonutils.dumps(creds)
|
||||
headers = {'Content-Type': 'application/json'}
|
||||
|
||||
verify = not CONF.keystone_ec2_insecure
|
||||
if verify and CONF.ssl.ca_file:
|
||||
verify = CONF.ssl.ca_file
|
||||
|
||||
cert = None
|
||||
if CONF.ssl.cert_file and CONF.ssl.key_file:
|
||||
cert = (CONF.ssl.cert_file, CONF.ssl.key_file)
|
||||
elif CONF.ssl.cert_file:
|
||||
cert = CONF.ssl.cert_file
|
||||
|
||||
response = requests.request('POST', CONF.keystone_ec2_url,
|
||||
data=creds_json, headers=headers,
|
||||
verify=verify, cert=cert)
|
||||
status_code = response.status_code
|
||||
if status_code != 200:
|
||||
msg = response.reason
|
||||
return faults.ec2_error_response(request_id, "AuthFailure", msg,
|
||||
status=status_code)
|
||||
result = response.json()
|
||||
|
||||
try:
|
||||
token_id = result['access']['token']['id']
|
||||
user_id = result['access']['user']['id']
|
||||
project_id = result['access']['token']['tenant']['id']
|
||||
user_name = result['access']['user'].get('name')
|
||||
project_name = result['access']['token']['tenant'].get('name')
|
||||
roles = [role['name'] for role
|
||||
in result['access']['user']['roles']]
|
||||
except (AttributeError, KeyError) as e:
|
||||
LOG.error(_LE("Keystone failure: %s"), e)
|
||||
msg = _("Failure parsing response from keystone: %s") % e
|
||||
return faults.ec2_error_response(request_id, "AuthFailure", msg,
|
||||
status=400)
|
||||
|
||||
remote_address = req.remote_addr
|
||||
if CONF.use_forwarded_for:
|
||||
remote_address = req.headers.get('X-Forwarded-For',
|
||||
remote_address)
|
||||
|
||||
catalog = result['access']['serviceCatalog']
|
||||
ctxt = context.RequestContext(user_id,
|
||||
project_id,
|
||||
user_name=user_name,
|
||||
project_name=project_name,
|
||||
roles=roles,
|
||||
auth_token=token_id,
|
||||
remote_address=remote_address,
|
||||
service_catalog=catalog)
|
||||
|
||||
req.environ['nova.context'] = ctxt
|
||||
|
||||
return self.application
|
||||
|
||||
|
||||
class NoAuth(wsgi.Middleware):
|
||||
"""Add user:project as 'nova.context' to WSGI environ."""
|
||||
|
||||
@webob.dec.wsgify(RequestClass=wsgi.Request)
|
||||
def __call__(self, req):
|
||||
if 'AWSAccessKeyId' not in req.params:
|
||||
raise webob.exc.HTTPBadRequest()
|
||||
user_id, _sep, project_id = req.params['AWSAccessKeyId'].partition(':')
|
||||
project_id = project_id or user_id
|
||||
remote_address = req.remote_addr
|
||||
if CONF.use_forwarded_for:
|
||||
remote_address = req.headers.get('X-Forwarded-For', remote_address)
|
||||
ctx = context.RequestContext(user_id,
|
||||
project_id,
|
||||
is_admin=True,
|
||||
remote_address=remote_address)
|
||||
|
||||
req.environ['nova.context'] = ctx
|
||||
return self.application
|
||||
|
||||
|
||||
class Requestify(wsgi.Middleware):
|
||||
|
||||
def __init__(self, app, controller):
|
||||
super(Requestify, self).__init__(app)
|
||||
self.controller = importutils.import_object(controller)
|
||||
|
||||
@webob.dec.wsgify(RequestClass=wsgi.Request)
|
||||
def __call__(self, req):
|
||||
# Not all arguments are mandatory with v4 signatures, as some data is
|
||||
# passed in the header, not query arguments.
|
||||
required_args = ['Action', 'Version']
|
||||
non_args = ['Action', 'Signature', 'AWSAccessKeyId', 'SignatureMethod',
|
||||
'SignatureVersion', 'Version', 'Timestamp']
|
||||
args = dict(req.params)
|
||||
try:
|
||||
expired = ec2utils.is_ec2_timestamp_expired(req.params,
|
||||
expires=CONF.ec2_timestamp_expiry)
|
||||
if expired:
|
||||
msg = _("Timestamp failed validation.")
|
||||
LOG.debug("Timestamp failed validation")
|
||||
raise webob.exc.HTTPForbidden(explanation=msg)
|
||||
|
||||
# Raise KeyError if omitted
|
||||
action = req.params['Action']
|
||||
# Fix bug lp:720157 for older (version 1) clients
|
||||
# If not present assume v4
|
||||
version = req.params.get('SignatureVersion', 4)
|
||||
if int(version) == 1:
|
||||
non_args.remove('SignatureMethod')
|
||||
if 'SignatureMethod' in args:
|
||||
args.pop('SignatureMethod')
|
||||
for non_arg in non_args:
|
||||
if non_arg in required_args:
|
||||
# Remove, but raise KeyError if omitted
|
||||
args.pop(non_arg)
|
||||
else:
|
||||
args.pop(non_arg, None)
|
||||
except KeyError:
|
||||
raise webob.exc.HTTPBadRequest()
|
||||
except exception.InvalidRequest as err:
|
||||
raise webob.exc.HTTPBadRequest(explanation=six.text_type(err))
|
||||
|
||||
LOG.debug('action: %s', action)
|
||||
for key, value in args.items():
|
||||
LOG.debug('arg: %(key)s\t\tval: %(value)s',
|
||||
{'key': key, 'value': value})
|
||||
|
||||
# Success!
|
||||
api_request = apirequest.APIRequest(self.controller, action,
|
||||
req.params['Version'], args)
|
||||
req.environ['ec2.request'] = api_request
|
||||
return self.application
|
||||
|
||||
|
||||
class Authorizer(wsgi.Middleware):
|
||||
|
||||
"""Authorize an EC2 API request.
|
||||
|
||||
Return a 401 if ec2.controller and ec2.action in WSGI environ may not be
|
||||
executed in nova.context.
|
||||
"""
|
||||
|
||||
def __init__(self, application):
|
||||
super(Authorizer, self).__init__(application)
|
||||
self.action_roles = {
|
||||
'CloudController': {
|
||||
'DescribeAvailabilityZones': ['all'],
|
||||
'DescribeRegions': ['all'],
|
||||
'DescribeSnapshots': ['all'],
|
||||
'DescribeKeyPairs': ['all'],
|
||||
'CreateKeyPair': ['all'],
|
||||
'DeleteKeyPair': ['all'],
|
||||
'DescribeSecurityGroups': ['all'],
|
||||
'ImportKeyPair': ['all'],
|
||||
'AuthorizeSecurityGroupIngress': ['netadmin'],
|
||||
'RevokeSecurityGroupIngress': ['netadmin'],
|
||||
'CreateSecurityGroup': ['netadmin'],
|
||||
'DeleteSecurityGroup': ['netadmin'],
|
||||
'GetConsoleOutput': ['projectmanager', 'sysadmin'],
|
||||
'DescribeVolumes': ['projectmanager', 'sysadmin'],
|
||||
'CreateVolume': ['projectmanager', 'sysadmin'],
|
||||
'AttachVolume': ['projectmanager', 'sysadmin'],
|
||||
'DetachVolume': ['projectmanager', 'sysadmin'],
|
||||
'DescribeInstances': ['all'],
|
||||
'DescribeAddresses': ['all'],
|
||||
'AllocateAddress': ['netadmin'],
|
||||
'ReleaseAddress': ['netadmin'],
|
||||
'AssociateAddress': ['netadmin'],
|
||||
'DisassociateAddress': ['netadmin'],
|
||||
'RunInstances': ['projectmanager', 'sysadmin'],
|
||||
'TerminateInstances': ['projectmanager', 'sysadmin'],
|
||||
'RebootInstances': ['projectmanager', 'sysadmin'],
|
||||
'UpdateInstance': ['projectmanager', 'sysadmin'],
|
||||
'StartInstances': ['projectmanager', 'sysadmin'],
|
||||
'StopInstances': ['projectmanager', 'sysadmin'],
|
||||
'DeleteVolume': ['projectmanager', 'sysadmin'],
|
||||
'DescribeImages': ['all'],
|
||||
'DeregisterImage': ['projectmanager', 'sysadmin'],
|
||||
'RegisterImage': ['projectmanager', 'sysadmin'],
|
||||
'DescribeImageAttribute': ['all'],
|
||||
'ModifyImageAttribute': ['projectmanager', 'sysadmin'],
|
||||
'UpdateImage': ['projectmanager', 'sysadmin'],
|
||||
'CreateImage': ['projectmanager', 'sysadmin'],
|
||||
},
|
||||
'AdminController': {
|
||||
# All actions have the same permission: ['none'] (the default)
|
||||
# superusers will be allowed to run them
|
||||
# all others will get HTTPUnauthorized.
|
||||
},
|
||||
}
|
||||
|
||||
@webob.dec.wsgify(RequestClass=wsgi.Request)
|
||||
def __call__(self, req):
|
||||
context = req.environ['nova.context']
|
||||
controller = req.environ['ec2.request'].controller.__class__.__name__
|
||||
action = req.environ['ec2.request'].action
|
||||
allowed_roles = self.action_roles[controller].get(action, ['none'])
|
||||
if self._matches_any_role(context, allowed_roles):
|
||||
return self.application
|
||||
else:
|
||||
LOG.info(_LI('Unauthorized request for controller=%(controller)s '
|
||||
'and action=%(action)s'),
|
||||
{'controller': controller, 'action': action},
|
||||
context=context)
|
||||
raise webob.exc.HTTPUnauthorized()
|
||||
|
||||
def _matches_any_role(self, context, roles):
|
||||
"""Return True if any role in roles is allowed in context."""
|
||||
if context.is_admin:
|
||||
return True
|
||||
if 'all' in roles:
|
||||
return True
|
||||
if 'none' in roles:
|
||||
return False
|
||||
return any(role in context.roles for role in roles)
|
||||
|
||||
|
||||
class Validator(wsgi.Middleware):
|
||||
|
||||
def validate_ec2_id(val):
|
||||
if not validator.validate_str()(val):
|
||||
return False
|
||||
try:
|
||||
ec2utils.ec2_id_to_id(val)
|
||||
except exception.InvalidEc2Id:
|
||||
return False
|
||||
return True
|
||||
|
||||
validator.validate_ec2_id = validate_ec2_id
|
||||
|
||||
validator.DEFAULT_VALIDATOR = {
|
||||
'instance_id': validator.validate_ec2_id,
|
||||
'volume_id': validator.validate_ec2_id,
|
||||
'image_id': validator.validate_ec2_id,
|
||||
'attribute': validator.validate_str(),
|
||||
'image_location': validator.validate_image_path,
|
||||
'public_ip': netutils.is_valid_ipv4,
|
||||
'region_name': validator.validate_str(),
|
||||
'group_name': validator.validate_str(max_length=255),
|
||||
'group_description': validator.validate_str(max_length=255),
|
||||
'size': validator.validate_int(),
|
||||
'user_data': validator.validate_user_data
|
||||
}
|
||||
|
||||
def __init__(self, application):
|
||||
super(Validator, self).__init__(application)
|
||||
|
||||
@webob.dec.wsgify(RequestClass=wsgi.Request)
|
||||
def __call__(self, req):
|
||||
if validator.validate(req.environ['ec2.request'].args,
|
||||
validator.DEFAULT_VALIDATOR):
|
||||
return self.application
|
||||
else:
|
||||
raise webob.exc.HTTPBadRequest()
|
||||
|
||||
|
||||
def exception_to_ec2code(ex):
|
||||
"""Helper to extract EC2 error code from exception.
|
||||
|
||||
For other than EC2 exceptions (those without ec2_code attribute),
|
||||
use exception name.
|
||||
"""
|
||||
if hasattr(ex, 'ec2_code'):
|
||||
code = ex.ec2_code
|
||||
else:
|
||||
code = type(ex).__name__
|
||||
return code
|
||||
|
||||
|
||||
def ec2_error_ex(ex, req, code=None, message=None, unexpected=False):
|
||||
"""Return an EC2 error response based on passed exception and log
|
||||
the exception on an appropriate log level:
|
||||
|
||||
* DEBUG: expected errors
|
||||
* ERROR: unexpected errors
|
||||
|
||||
All expected errors are treated as client errors and 4xx HTTP
|
||||
status codes are always returned for them.
|
||||
|
||||
Unexpected 5xx errors may contain sensitive information,
|
||||
suppress their messages for security.
|
||||
"""
|
||||
if not code:
|
||||
code = exception_to_ec2code(ex)
|
||||
status = getattr(ex, 'code', None)
|
||||
if not status:
|
||||
status = 500
|
||||
|
||||
if unexpected:
|
||||
log_fun = LOG.error
|
||||
log_msg = _LE("Unexpected %(ex_name)s raised: %(ex_str)s")
|
||||
else:
|
||||
log_fun = LOG.debug
|
||||
log_msg = "%(ex_name)s raised: %(ex_str)s"
|
||||
# NOTE(jruzicka): For compatibility with EC2 API, treat expected
|
||||
# exceptions as client (4xx) errors. The exception error code is 500
|
||||
# by default and most exceptions inherit this from NovaException even
|
||||
# though they are actually client errors in most cases.
|
||||
if status >= 500:
|
||||
status = 400
|
||||
|
||||
context = req.environ['nova.context']
|
||||
request_id = context.request_id
|
||||
log_msg_args = {
|
||||
'ex_name': type(ex).__name__,
|
||||
'ex_str': ex
|
||||
}
|
||||
log_fun(log_msg, log_msg_args, context=context)
|
||||
|
||||
if ex.args and not message and (not unexpected or status < 500):
|
||||
message = six.text_type(ex.args[0])
|
||||
if unexpected:
|
||||
# Log filtered environment for unexpected errors.
|
||||
env = req.environ.copy()
|
||||
for k in list(env.keys()):
|
||||
if not isinstance(env[k], six.string_types):
|
||||
env.pop(k)
|
||||
log_fun(_LE('Environment: %s'), jsonutils.dumps(env))
|
||||
if not message:
|
||||
message = _('Unknown error occurred.')
|
||||
return faults.ec2_error_response(request_id, code, message, status=status)
|
||||
|
||||
|
||||
class Executor(wsgi.Application):
|
||||
|
||||
"""Execute an EC2 API request.
|
||||
|
||||
Executes 'ec2.action' upon 'ec2.controller', passing 'nova.context' and
|
||||
'ec2.action_args' (all variables in WSGI environ.) Returns an XML
|
||||
response, or a 400 upon failure.
|
||||
"""
|
||||
|
||||
@webob.dec.wsgify(RequestClass=wsgi.Request)
|
||||
def __call__(self, req):
|
||||
context = req.environ['nova.context']
|
||||
api_request = req.environ['ec2.request']
|
||||
try:
|
||||
result = api_request.invoke(context)
|
||||
except exception.InstanceNotFound as ex:
|
||||
ec2_id = ec2utils.id_to_ec2_inst_id(ex.kwargs['instance_id'])
|
||||
message = ex.msg_fmt % {'instance_id': ec2_id}
|
||||
return ec2_error_ex(ex, req, message=message)
|
||||
except exception.VolumeNotFound as ex:
|
||||
ec2_id = ec2utils.id_to_ec2_vol_id(ex.kwargs['volume_id'])
|
||||
message = ex.msg_fmt % {'volume_id': ec2_id}
|
||||
return ec2_error_ex(ex, req, message=message)
|
||||
except exception.SnapshotNotFound as ex:
|
||||
ec2_id = ec2utils.id_to_ec2_snap_id(ex.kwargs['snapshot_id'])
|
||||
message = ex.msg_fmt % {'snapshot_id': ec2_id}
|
||||
return ec2_error_ex(ex, req, message=message)
|
||||
except (exception.CannotDisassociateAutoAssignedFloatingIP,
|
||||
exception.FloatingIpAssociated,
|
||||
exception.FloatingIpNotFound,
|
||||
exception.FloatingIpBadRequest,
|
||||
exception.ImageNotActive,
|
||||
exception.InvalidInstanceIDMalformed,
|
||||
exception.InvalidVolumeIDMalformed,
|
||||
exception.InvalidKeypair,
|
||||
exception.InvalidParameterValue,
|
||||
exception.InvalidPortRange,
|
||||
exception.InvalidVolume,
|
||||
exception.KeyPairExists,
|
||||
exception.KeypairNotFound,
|
||||
exception.MissingParameter,
|
||||
exception.NoFloatingIpInterface,
|
||||
exception.NoMoreFixedIps,
|
||||
exception.Forbidden,
|
||||
exception.QuotaError,
|
||||
exception.SecurityGroupExists,
|
||||
exception.SecurityGroupLimitExceeded,
|
||||
exception.SecurityGroupRuleExists,
|
||||
exception.VolumeUnattached,
|
||||
# Following aren't translated to valid EC2 errors.
|
||||
exception.ImageNotFound,
|
||||
exception.ImageNotFoundEC2,
|
||||
exception.InvalidAttribute,
|
||||
exception.InvalidRequest,
|
||||
exception.NotFound) as ex:
|
||||
return ec2_error_ex(ex, req)
|
||||
except Exception as ex:
|
||||
return ec2_error_ex(ex, req, unexpected=True)
|
||||
else:
|
||||
resp = webob.Response()
|
||||
resp.status = 200
|
||||
resp.headers['Content-Type'] = 'text/xml'
|
||||
resp.body = str(result)
|
||||
return resp
|
||||
return webob.exc.HTTPException(message=_DEPRECATION_MESSAGE)
|
||||
|
||||
|
||||
FaultWrapper = DeprecatedMiddleware
|
||||
RequestLogging = DeprecatedMiddleware
|
||||
Lockout = DeprecatedMiddleware
|
||||
EC2KeystoneAuth = DeprecatedMiddleware
|
||||
NoAuth = DeprecatedMiddleware
|
||||
Requestify = DeprecatedMiddleware
|
||||
Authorizer = DeprecatedMiddleware
|
||||
Validator = DeprecatedMiddleware
|
||||
Executor = DeprecatedApplication
|
||||
|
@ -1,142 +0,0 @@
|
||||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
APIRequest class
|
||||
"""
|
||||
|
||||
import datetime
|
||||
# TODO(termie): replace minidom with etree
|
||||
from xml.dom import minidom
|
||||
|
||||
from lxml import etree
|
||||
from oslo_log import log as logging
|
||||
from oslo_utils import encodeutils
|
||||
import six
|
||||
|
||||
from nova.api.ec2 import ec2utils
|
||||
from nova import exception
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _underscore_to_camelcase(str):
|
||||
return ''.join([x[:1].upper() + x[1:] for x in str.split('_')])
|
||||
|
||||
|
||||
def _underscore_to_xmlcase(str):
|
||||
res = _underscore_to_camelcase(str)
|
||||
return res[:1].lower() + res[1:]
|
||||
|
||||
|
||||
def _database_to_isoformat(datetimeobj):
|
||||
"""Return a xs:dateTime parsable string from datatime."""
|
||||
return datetimeobj.strftime("%Y-%m-%dT%H:%M:%S.%f")[:-3] + 'Z'
|
||||
|
||||
|
||||
class APIRequest(object):
|
||||
def __init__(self, controller, action, version, args):
|
||||
self.controller = controller
|
||||
self.action = action
|
||||
self.version = version
|
||||
self.args = args
|
||||
|
||||
def invoke(self, context):
|
||||
try:
|
||||
method = getattr(self.controller,
|
||||
ec2utils.camelcase_to_underscore(self.action))
|
||||
except AttributeError:
|
||||
LOG.debug('Unsupported API request: controller = '
|
||||
'%(controller)s, action = %(action)s',
|
||||
{'controller': self.controller,
|
||||
'action': self.action})
|
||||
# TODO(gundlach): Raise custom exception, trap in apiserver,
|
||||
# and reraise as 400 error.
|
||||
raise exception.InvalidRequest()
|
||||
|
||||
args = ec2utils.dict_from_dotted_str(self.args.items())
|
||||
|
||||
for key in args.keys():
|
||||
# NOTE(vish): Turn numeric dict keys into lists
|
||||
if isinstance(args[key], dict):
|
||||
if args[key] != {} and list(args[key].keys())[0].isdigit():
|
||||
s = args[key].items()
|
||||
s.sort()
|
||||
args[key] = [v for k, v in s]
|
||||
|
||||
result = method(context, **args)
|
||||
return self._render_response(result, context.request_id)
|
||||
|
||||
def _render_response(self, response_data, request_id):
|
||||
xml = minidom.Document()
|
||||
|
||||
response_el = xml.createElement(self.action + 'Response')
|
||||
response_el.setAttribute('xmlns',
|
||||
'http://ec2.amazonaws.com/doc/%s/' % self.version)
|
||||
request_id_el = xml.createElement('requestId')
|
||||
request_id_el.appendChild(xml.createTextNode(request_id))
|
||||
response_el.appendChild(request_id_el)
|
||||
if response_data is True:
|
||||
self._render_dict(xml, response_el, {'return': 'true'})
|
||||
else:
|
||||
self._render_dict(xml, response_el, response_data)
|
||||
|
||||
xml.appendChild(response_el)
|
||||
|
||||
response = xml.toxml()
|
||||
root = etree.fromstring(response)
|
||||
response = etree.tostring(root, pretty_print=True)
|
||||
|
||||
xml.unlink()
|
||||
|
||||
# Don't write private key to log
|
||||
if self.action != "CreateKeyPair":
|
||||
LOG.debug(response)
|
||||
else:
|
||||
LOG.debug("CreateKeyPair: Return Private Key")
|
||||
|
||||
return response
|
||||
|
||||
def _render_dict(self, xml, el, data):
|
||||
try:
|
||||
for key in data.keys():
|
||||
val = data[key]
|
||||
el.appendChild(self._render_data(xml, key, val))
|
||||
except Exception:
|
||||
LOG.debug(data)
|
||||
raise
|
||||
|
||||
def _render_data(self, xml, el_name, data):
|
||||
el_name = _underscore_to_xmlcase(el_name)
|
||||
data_el = xml.createElement(el_name)
|
||||
|
||||
if isinstance(data, list):
|
||||
for item in data:
|
||||
data_el.appendChild(self._render_data(xml, 'item', item))
|
||||
elif isinstance(data, dict):
|
||||
self._render_dict(xml, data_el, data)
|
||||
elif hasattr(data, '__dict__'):
|
||||
self._render_dict(xml, data_el, data.__dict__)
|
||||
elif isinstance(data, bool):
|
||||
data_el.appendChild(xml.createTextNode(str(data).lower()))
|
||||
elif isinstance(data, datetime.datetime):
|
||||
data_el.appendChild(
|
||||
xml.createTextNode(_database_to_isoformat(data)))
|
||||
elif data is not None:
|
||||
data_el.appendChild(xml.createTextNode(
|
||||
encodeutils.safe_encode(six.text_type(data))))
|
||||
|
||||
return data_el
|
File diff suppressed because it is too large
Load Diff
@ -1,73 +0,0 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
import webob.dec
|
||||
import webob.exc
|
||||
|
||||
import nova.api.ec2
|
||||
from nova import context
|
||||
from nova import utils
|
||||
|
||||
CONF = cfg.CONF
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def ec2_error_response(request_id, code, message, status=500):
|
||||
"""Helper to construct an EC2 compatible error response."""
|
||||
LOG.debug('EC2 error response: %(code)s: %(message)s',
|
||||
{'code': code, 'message': message})
|
||||
resp = webob.Response()
|
||||
resp.status = status
|
||||
resp.headers['Content-Type'] = 'text/xml'
|
||||
resp.body = str('<?xml version="1.0"?>\n'
|
||||
'<Response><Errors><Error><Code>%s</Code>'
|
||||
'<Message>%s</Message></Error></Errors>'
|
||||
'<RequestID>%s</RequestID></Response>' %
|
||||
(utils.xhtml_escape(utils.utf8(code)),
|
||||
utils.xhtml_escape(utils.utf8(message)),
|
||||
utils.xhtml_escape(utils.utf8(request_id))))
|
||||
return resp
|
||||
|
||||
|
||||
class Fault(webob.exc.HTTPException):
|
||||
"""Captures exception and return REST Response."""
|
||||
|
||||
def __init__(self, exception):
|
||||
"""Create a response for the given webob.exc.exception."""
|
||||
self.wrapped_exc = exception
|
||||
|
||||
@webob.dec.wsgify
|
||||
def __call__(self, req):
|
||||
"""Generate a WSGI response based on the exception passed to ctor."""
|
||||
code = nova.api.ec2.exception_to_ec2code(self.wrapped_exc)
|
||||
status = self.wrapped_exc.status_int
|
||||
message = self.wrapped_exc.explanation
|
||||
|
||||
if status == 501:
|
||||
message = "The requested function is not supported"
|
||||
|
||||
if 'AWSAccessKeyId' not in req.params:
|
||||
raise webob.exc.HTTPBadRequest()
|
||||
user_id, _sep, project_id = req.params['AWSAccessKeyId'].partition(':')
|
||||
project_id = project_id or user_id
|
||||
remote_address = getattr(req, 'remote_address', '127.0.0.1')
|
||||
if CONF.use_forwarded_for:
|
||||
remote_address = req.headers.get('X-Forwarded-For', remote_address)
|
||||
|
||||
ctxt = context.RequestContext(user_id,
|
||||
project_id,
|
||||
remote_address=remote_address)
|
||||
resp = ec2_error_response(ctxt.request_id, code,
|
||||
message=message, status=status)
|
||||
return resp
|
@ -1,56 +0,0 @@
|
||||
# Copyright 2011 Isaku Yamahata <yamahata at valinux co jp>
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
PENDING_CODE = 0
|
||||
RUNNING_CODE = 16
|
||||
SHUTTING_DOWN_CODE = 32
|
||||
TERMINATED_CODE = 48
|
||||
STOPPING_CODE = 64
|
||||
STOPPED_CODE = 80
|
||||
|
||||
PENDING = 'pending'
|
||||
RUNNING = 'running'
|
||||
SHUTTING_DOWN = 'shutting-down'
|
||||
TERMINATED = 'terminated'
|
||||
STOPPING = 'stopping'
|
||||
STOPPED = 'stopped'
|
||||
|
||||
# non-ec2 value
|
||||
MIGRATE = 'migrate'
|
||||
RESIZE = 'resize'
|
||||
PAUSE = 'pause'
|
||||
SUSPEND = 'suspend'
|
||||
RESCUE = 'rescue'
|
||||
|
||||
# EC2 API instance status code
|
||||
_NAME_TO_CODE = {
|
||||
PENDING: PENDING_CODE,
|
||||
RUNNING: RUNNING_CODE,
|
||||
SHUTTING_DOWN: SHUTTING_DOWN_CODE,
|
||||
TERMINATED: TERMINATED_CODE,
|
||||
STOPPING: STOPPING_CODE,
|
||||
STOPPED: STOPPED_CODE,
|
||||
|
||||
# approximation
|
||||
MIGRATE: RUNNING_CODE,
|
||||
RESIZE: RUNNING_CODE,
|
||||
PAUSE: STOPPED_CODE,
|
||||
SUSPEND: STOPPED_CODE,
|
||||
RESCUE: RUNNING_CODE,
|
||||
}
|
||||
|
||||
|
||||
def name_to_code(name):
|
||||
return _NAME_TO_CODE.get(name, PENDING_CODE)
|
@ -13,8 +13,6 @@
|
||||
import itertools
|
||||
|
||||
import nova.api.auth
|
||||
import nova.api.ec2
|
||||
import nova.api.ec2.cloud
|
||||
import nova.api.metadata.base
|
||||
import nova.api.metadata.handler
|
||||
import nova.api.metadata.vendordata_json
|
||||
@ -68,7 +66,6 @@ import nova.db.sqlalchemy.api
|
||||
import nova.exception
|
||||
import nova.image.download.file
|
||||
import nova.image.glance
|
||||
import nova.image.s3
|
||||
import nova.ipv6.api
|
||||
import nova.keymgr
|
||||
import nova.keymgr.barbican
|
||||
@ -85,7 +82,6 @@ import nova.network.rpcapi
|
||||
import nova.network.security_group.openstack_driver
|
||||
import nova.notifications
|
||||
import nova.objects.network
|
||||
import nova.objectstore.s3server
|
||||
import nova.paths
|
||||
import nova.pci.request
|
||||
import nova.pci.whitelist
|
||||
@ -129,8 +125,6 @@ def list_opts():
|
||||
[nova.api.metadata.vendordata_json.file_opt],
|
||||
[nova.api.openstack.compute.allow_instance_snapshots_opt],
|
||||
nova.api.auth.auth_opts,
|
||||
nova.api.ec2.cloud.ec2_opts,
|
||||
nova.api.ec2.ec2_opts,
|
||||
nova.api.metadata.base.metadata_opts,
|
||||
nova.api.metadata.handler.metadata_opts,
|
||||
nova.api.openstack.common.osapi_opts,
|
||||
|
@ -74,13 +74,9 @@ def _load_boot_script():
|
||||
with open(CONF.boot_script_template, "r") as shellfile:
|
||||
s = string.Template(shellfile.read())
|
||||
|
||||
CONF.import_opt('ec2_dmz_host', 'nova.api.ec2.cloud')
|
||||
CONF.import_opt('ec2_port', 'nova.api.ec2.cloud')
|
||||
CONF.import_opt('cnt_vpn_clients', 'nova.network.manager')
|
||||
|
||||
return s.substitute(cc_dmz=CONF.ec2_dmz_host,
|
||||
cc_port=CONF.ec2_port,
|
||||
dmz_net=CONF.dmz_net,
|
||||
return s.substitute(dmz_net=CONF.dmz_net,
|
||||
dmz_mask=CONF.dmz_mask,
|
||||
num_vpn=CONF.cnt_vpn_clients)
|
||||
|
||||
|
@ -32,7 +32,6 @@ from oslo_log import log as logging
|
||||
from nova import config
|
||||
from nova.i18n import _LE
|
||||
from nova import objects
|
||||
from nova.objectstore import s3server
|
||||
from nova import service
|
||||
from nova import utils
|
||||
from nova.vnc import xvp_proxy
|
||||
@ -62,7 +61,7 @@ def main():
|
||||
except (Exception, SystemExit):
|
||||
LOG.exception(_LE('Failed to load %s-api'), api)
|
||||
|
||||
for mod in [s3server, xvp_proxy]:
|
||||
for mod in [xvp_proxy]:
|
||||
try:
|
||||
launcher.launch_service(mod.get_wsgi_server())
|
||||
except (Exception, SystemExit):
|
||||
|
@ -48,10 +48,6 @@ def main():
|
||||
launcher = service.process_launcher()
|
||||
for api in CONF.enabled_apis:
|
||||
should_use_ssl = api in CONF.enabled_ssl_apis
|
||||
if api == 'ec2':
|
||||
server = service.WSGIService(api, use_ssl=should_use_ssl,
|
||||
max_url_len=16384)
|
||||
else:
|
||||
server = service.WSGIService(api, use_ssl=should_use_ssl)
|
||||
server = service.WSGIService(api, use_ssl=should_use_ssl)
|
||||
launcher.launch_service(server, workers=server.workers or 1)
|
||||
launcher.wait()
|
||||
|
@ -1,41 +0,0 @@
|
||||
|
||||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""Daemon for nova objectstore. Supports S3 API."""
|
||||
|
||||
import sys
|
||||
|
||||
from oslo_log import log as logging
|
||||
from oslo_reports import guru_meditation_report as gmr
|
||||
|
||||
from nova import config
|
||||
from nova.objectstore import s3server
|
||||
from nova import service
|
||||
from nova import utils
|
||||
from nova import version
|
||||
|
||||
|
||||
def main():
|
||||
config.parse_args(sys.argv)
|
||||
logging.setup(config.CONF, "nova")
|
||||
utils.monkey_patch()
|
||||
|
||||
gmr.TextGuruMeditation.setup_autorun(version)
|
||||
|
||||
server = s3server.get_wsgi_server()
|
||||
service.serve(server)
|
||||
service.wait()
|
@ -1,24 +0,0 @@
|
||||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
:mod:`nova.objectstore` -- S3-type object store
|
||||
=====================================================
|
||||
|
||||
.. automodule:: nova.objectstore
|
||||
:platform: Unix
|
||||
:synopsis: Currently a trivial file-based system, getting extended w/ swift.
|
||||
"""
|
@ -1,383 +0,0 @@
|
||||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# Copyright 2010 OpenStack Foundation
|
||||
# Copyright 2009 Facebook
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""Implementation of an S3-like storage server based on local files.
|
||||
|
||||
Useful to test features that will eventually run on S3, or if you want to
|
||||
run something locally that was once running on S3.
|
||||
|
||||
We don't support all the features of S3, but it does work with the
|
||||
standard S3 client for the most basic semantics. To use the standard
|
||||
S3 client with this module::
|
||||
|
||||
c = S3.AWSAuthConnection("", "", server="localhost", port=8888,
|
||||
is_secure=False)
|
||||
c.create_bucket("mybucket")
|
||||
c.put("mybucket", "mykey", "a value")
|
||||
print c.get("mybucket", "mykey").body
|
||||
|
||||
"""
|
||||
|
||||
import bisect
|
||||
import datetime
|
||||
import os
|
||||
import os.path
|
||||
import urllib
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
from oslo_log import versionutils
|
||||
from oslo_utils import fileutils
|
||||
import routes
|
||||
import six
|
||||
import webob
|
||||
|
||||
from nova.i18n import _LW
|
||||
from nova import paths
|
||||
from nova import utils
|
||||
from nova import wsgi
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
s3_opts = [
|
||||
cfg.StrOpt('buckets_path',
|
||||
default=paths.state_path_def('buckets'),
|
||||
help='Path to S3 buckets'),
|
||||
cfg.StrOpt('s3_listen',
|
||||
default="0.0.0.0",
|
||||
help='IP address for S3 API to listen'),
|
||||
cfg.IntOpt('s3_listen_port',
|
||||
default=3333,
|
||||
min=1,
|
||||
max=65535,
|
||||
help='Port for S3 API to listen'),
|
||||
]
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.register_opts(s3_opts)
|
||||
|
||||
|
||||
def get_wsgi_server():
|
||||
return wsgi.Server("S3 Objectstore",
|
||||
S3Application(CONF.buckets_path),
|
||||
port=CONF.s3_listen_port,
|
||||
host=CONF.s3_listen)
|
||||
|
||||
|
||||
class S3Application(wsgi.Router):
|
||||
"""Implementation of an S3-like storage server based on local files.
|
||||
|
||||
If bucket depth is given, we break files up into multiple directories
|
||||
to prevent hitting file system limits for number of files in each
|
||||
directories. 1 means one level of directories, 2 means 2, etc.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, root_directory, bucket_depth=0, mapper=None):
|
||||
versionutils.report_deprecated_feature(
|
||||
LOG,
|
||||
_LW('The in tree EC2 API is deprecated as of Kilo release and may '
|
||||
'be removed in a future release. The openstack ec2-api '
|
||||
'project http://git.openstack.org/cgit/openstack/ec2-api/ '
|
||||
'is the target replacement for this functionality.')
|
||||
)
|
||||
if mapper is None:
|
||||
mapper = routes.Mapper()
|
||||
|
||||
mapper.connect('/',
|
||||
controller=lambda *a, **kw: RootHandler(self)(*a, **kw))
|
||||
mapper.connect('/{bucket}/{object_name}',
|
||||
controller=lambda *a, **kw: ObjectHandler(self)(*a, **kw))
|
||||
mapper.connect('/{bucket_name}/',
|
||||
controller=lambda *a, **kw: BucketHandler(self)(*a, **kw))
|
||||
self.directory = os.path.abspath(root_directory)
|
||||
fileutils.ensure_tree(self.directory)
|
||||
self.bucket_depth = bucket_depth
|
||||
super(S3Application, self).__init__(mapper)
|
||||
|
||||
|
||||
class BaseRequestHandler(object):
|
||||
"""Base class emulating Tornado's web framework pattern in WSGI.
|
||||
|
||||
This is a direct port of Tornado's implementation, so some key decisions
|
||||
about how the code interacts have already been chosen.
|
||||
|
||||
The two most common ways of designing web frameworks can be
|
||||
classified as async object-oriented and sync functional.
|
||||
|
||||
Tornado's is on the OO side because a response is built up in and using
|
||||
the shared state of an object and one of the object's methods will
|
||||
eventually trigger the "finishing" of the response asynchronously.
|
||||
|
||||
Most WSGI stuff is in the functional side, we pass a request object to
|
||||
every call down a chain and the eventual return value will be a response.
|
||||
|
||||
Part of the function of the routing code in S3Application as well as the
|
||||
code in BaseRequestHandler's __call__ method is to merge those two styles
|
||||
together enough that the Tornado code can work without extensive
|
||||
modifications.
|
||||
|
||||
To do that it needs to give the Tornado-style code clean objects that it
|
||||
can modify the state of for each request that is processed, so we use a
|
||||
very simple factory lambda to create new state for each request, that's
|
||||
the stuff in the router, and when we let the Tornado code modify that
|
||||
object to handle the request, then we return the response it generated.
|
||||
This wouldn't work the same if Tornado was being more async'y and doing
|
||||
other callbacks throughout the process, but since Tornado is being
|
||||
relatively simple here we can be satisfied that the response will be
|
||||
complete by the end of the get/post method.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, application):
|
||||
self.application = application
|
||||
|
||||
@webob.dec.wsgify
|
||||
def __call__(self, request):
|
||||
method = request.method.lower()
|
||||
f = getattr(self, method, self.invalid)
|
||||
self.request = request
|
||||
self.response = webob.Response()
|
||||
params = request.environ['wsgiorg.routing_args'][1]
|
||||
del params['controller']
|
||||
f(**params)
|
||||
return self.response
|
||||
|
||||
def get_argument(self, arg, default):
|
||||
return self.request.params.get(arg, default)
|
||||
|
||||
def set_header(self, header, value):
|
||||
self.response.headers[header] = value
|
||||
|
||||
def set_status(self, status_code):
|
||||
self.response.status = status_code
|
||||
|
||||
def set_404(self):
|
||||
self.render_xml({"Error": {
|
||||
"Code": "NoSuchKey",
|
||||
"Message": "The resource you requested does not exist"
|
||||
}})
|
||||
self.set_status(404)
|
||||
|
||||
def finish(self, body=''):
|
||||
self.response.body = utils.utf8(body)
|
||||
|
||||
def invalid(self, **kwargs):
|
||||
pass
|
||||
|
||||
def render_xml(self, value):
|
||||
assert isinstance(value, dict) and len(value) == 1
|
||||
self.set_header("Content-Type", "application/xml; charset=UTF-8")
|
||||
name = list(value.keys())[0]
|
||||
parts = []
|
||||
parts.append('<' + utils.utf8(name) +
|
||||
' xmlns="http://doc.s3.amazonaws.com/2006-03-01">')
|
||||
self._render_parts(list(value.values())[0], parts)
|
||||
parts.append('</' + utils.utf8(name) + '>')
|
||||
self.finish('<?xml version="1.0" encoding="UTF-8"?>\n' +
|
||||
''.join(parts))
|
||||
|
||||
def _render_parts(self, value, parts=None):
|
||||
if not parts:
|
||||
parts = []
|
||||
|
||||
if isinstance(value, six.string_types):
|
||||
parts.append(utils.xhtml_escape(value))
|
||||
elif type(value) in six.integer_types:
|
||||
parts.append(str(value))
|
||||
elif isinstance(value, bool):
|
||||
parts.append(str(value))
|
||||
elif isinstance(value, datetime.datetime):
|
||||
parts.append(value.strftime("%Y-%m-%dT%H:%M:%S.000Z"))
|
||||
elif isinstance(value, dict):
|
||||
for name, subvalue in six.iteritems(value):
|
||||
if not isinstance(subvalue, list):
|
||||
subvalue = [subvalue]
|
||||
for subsubvalue in subvalue:
|
||||
parts.append('<' + utils.utf8(name) + '>')
|
||||
self._render_parts(subsubvalue, parts)
|
||||
parts.append('</' + utils.utf8(name) + '>')
|
||||
else:
|
||||
raise Exception("Unknown S3 value type %r", value)
|
||||
|
||||
def _object_path(self, bucket, object_name):
|
||||
if self.application.bucket_depth < 1:
|
||||
return os.path.abspath(os.path.join(
|
||||
self.application.directory, bucket, object_name))
|
||||
hash = utils.get_hash_str(object_name)
|
||||
path = os.path.abspath(os.path.join(
|
||||
self.application.directory, bucket))
|
||||
for i in range(self.application.bucket_depth):
|
||||
path = os.path.join(path, hash[:2 * (i + 1)])
|
||||
return os.path.join(path, object_name)
|
||||
|
||||
|
||||
class RootHandler(BaseRequestHandler):
|
||||
def get(self):
|
||||
names = os.listdir(self.application.directory)
|
||||
buckets = []
|
||||
for name in names:
|
||||
path = os.path.join(self.application.directory, name)
|
||||
info = os.stat(path)
|
||||
buckets.append({
|
||||
"Name": name,
|
||||
"CreationDate": datetime.datetime.utcfromtimestamp(
|
||||
info.st_ctime),
|
||||
})
|
||||
self.render_xml({"ListAllMyBucketsResult": {
|
||||
"Buckets": {"Bucket": buckets},
|
||||
}})
|
||||
|
||||
|
||||
class BucketHandler(BaseRequestHandler):
|
||||
def get(self, bucket_name):
|
||||
prefix = self.get_argument("prefix", u"")
|
||||
marker = self.get_argument("marker", u"")
|
||||
max_keys = int(self.get_argument("max-keys", 50000))
|
||||
path = os.path.abspath(os.path.join(self.application.directory,
|
||||
bucket_name))
|
||||
terse = int(self.get_argument("terse", 0))
|
||||
if (not path.startswith(self.application.directory) or
|
||||
not os.path.isdir(path)):
|
||||
self.set_404()
|
||||
return
|
||||
object_names = []
|
||||
for root, dirs, files in os.walk(path):
|
||||
for file_name in files:
|
||||
object_names.append(os.path.join(root, file_name))
|
||||
skip = len(path) + 1
|
||||
for i in range(self.application.bucket_depth):
|
||||
skip += 2 * (i + 1) + 1
|
||||
object_names = [n[skip:] for n in object_names]
|
||||
object_names.sort()
|
||||
contents = []
|
||||
|
||||
start_pos = 0
|
||||
if marker:
|
||||
start_pos = bisect.bisect_right(object_names, marker, start_pos)
|
||||
if prefix:
|
||||
start_pos = bisect.bisect_left(object_names, prefix, start_pos)
|
||||
|
||||
truncated = False
|
||||
for object_name in object_names[start_pos:]:
|
||||
if not object_name.startswith(prefix):
|
||||
break
|
||||
if len(contents) >= max_keys:
|
||||
truncated = True
|
||||
break
|
||||
object_path = self._object_path(bucket_name, object_name)
|
||||
c = {"Key": object_name}
|
||||
if not terse:
|
||||
info = os.stat(object_path)
|
||||
c.update({
|
||||
"LastModified": datetime.datetime.utcfromtimestamp(
|
||||
info.st_mtime),
|
||||
"Size": info.st_size,
|
||||
})
|
||||
contents.append(c)
|
||||
marker = object_name
|
||||
self.render_xml({"ListBucketResult": {
|
||||
"Name": bucket_name,
|
||||
"Prefix": prefix,
|
||||
"Marker": marker,
|
||||
"MaxKeys": max_keys,
|
||||
"IsTruncated": truncated,
|
||||
"Contents": contents,
|
||||
}})
|
||||
|
||||
def put(self, bucket_name):
|
||||
path = os.path.abspath(os.path.join(
|
||||
self.application.directory, bucket_name))
|
||||
if (not path.startswith(self.application.directory) or
|
||||
os.path.exists(path)):
|
||||
self.set_status(403)
|
||||
return
|
||||
fileutils.ensure_tree(path)
|
||||
self.finish()
|
||||
|
||||
def delete(self, bucket_name):
|
||||
path = os.path.abspath(os.path.join(
|
||||
self.application.directory, bucket_name))
|
||||
if (not path.startswith(self.application.directory) or
|
||||
not os.path.isdir(path)):
|
||||
self.set_404()
|
||||
return
|
||||
if len(os.listdir(path)) > 0:
|
||||
self.set_status(403)
|
||||
return
|
||||
os.rmdir(path)
|
||||
self.set_status(204)
|
||||
self.finish()
|
||||
|
||||
def head(self, bucket_name):
|
||||
path = os.path.abspath(os.path.join(self.application.directory,
|
||||
bucket_name))
|
||||
if (not path.startswith(self.application.directory) or
|
||||
not os.path.isdir(path)):
|
||||
self.set_404()
|
||||
return
|
||||
self.set_status(200)
|
||||
self.finish()
|
||||
|
||||
|
||||
class ObjectHandler(BaseRequestHandler):
|
||||
def get(self, bucket, object_name):
|
||||
object_name = urllib.unquote(object_name)
|
||||
path = self._object_path(bucket, object_name)
|
||||
if (not path.startswith(self.application.directory) or
|
||||
not os.path.isfile(path)):
|
||||
self.set_404()
|
||||
return
|
||||
info = os.stat(path)
|
||||
self.set_header("Content-Type", "application/unknown")
|
||||
self.set_header("Last-Modified", datetime.datetime.utcfromtimestamp(
|
||||
info.st_mtime))
|
||||
with open(path, "r") as object_file:
|
||||
self.finish(object_file.read())
|
||||
|
||||
def put(self, bucket, object_name):
|
||||
object_name = urllib.unquote(object_name)
|
||||
bucket_dir = os.path.abspath(os.path.join(
|
||||
self.application.directory, bucket))
|
||||
if (not bucket_dir.startswith(self.application.directory) or
|
||||
not os.path.isdir(bucket_dir)):
|
||||
self.set_404()
|
||||
return
|
||||
path = self._object_path(bucket, object_name)
|
||||
if not path.startswith(bucket_dir) or os.path.isdir(path):
|
||||
self.set_status(403)
|
||||
return
|
||||
directory = os.path.dirname(path)
|
||||
fileutils.ensure_tree(directory)
|
||||
with open(path, "w") as object_file:
|
||||
object_file.write(self.request.body)
|
||||
self.set_header('ETag',
|
||||
'"%s"' % utils.get_hash_str(self.request.body))
|
||||
self.finish()
|
||||
|
||||
def delete(self, bucket, object_name):
|
||||
object_name = urllib.unquote(object_name)
|
||||
path = self._object_path(bucket, object_name)
|
||||
if (not path.startswith(self.application.directory) or
|
||||
not os.path.isfile(path)):
|
||||
self.set_404()
|
||||
return
|
||||
os.unlink(path)
|
||||
self.set_status(204)
|
||||
self.finish()
|
@ -38,7 +38,6 @@ import nova.db.sqlalchemy.api
|
||||
import nova.exception
|
||||
import nova.image.download.file
|
||||
import nova.image.glance
|
||||
import nova.image.s3
|
||||
import nova.ipv6.api
|
||||
import nova.keymgr
|
||||
import nova.keymgr.barbican
|
||||
@ -46,7 +45,6 @@ import nova.keymgr.conf_key_mgr
|
||||
import nova.netconf
|
||||
import nova.notifications
|
||||
import nova.objects.network
|
||||
import nova.objectstore.s3server
|
||||
import nova.paths
|
||||
import nova.pci.request
|
||||
import nova.pci.whitelist
|
||||
@ -87,11 +85,9 @@ def list_opts():
|
||||
nova.db.api.db_opts,
|
||||
nova.db.sqlalchemy.api.db_opts,
|
||||
nova.exception.exc_log_opts,
|
||||
nova.image.s3.s3_opts,
|
||||
nova.netconf.netconf_opts,
|
||||
nova.notifications.notify_opts,
|
||||
nova.objects.network.network_opts,
|
||||
nova.objectstore.s3server.s3_opts,
|
||||
nova.paths.path_opts,
|
||||
nova.pci.request.pci_alias_opts,
|
||||
nova.pci.whitelist.pci_opts,
|
||||
|
@ -63,17 +63,6 @@ service_opts = [
|
||||
cfg.ListOpt('enabled_ssl_apis',
|
||||
default=[],
|
||||
help='A list of APIs with enabled SSL'),
|
||||
cfg.StrOpt('ec2_listen',
|
||||
default="0.0.0.0",
|
||||
help='The IP address on which the EC2 API will listen.'),
|
||||
cfg.IntOpt('ec2_listen_port',
|
||||
default=8773,
|
||||
min=1,
|
||||
max=65535,
|
||||
help='The port on which the EC2 API will listen.'),
|
||||
cfg.IntOpt('ec2_workers',
|
||||
help='Number of workers for EC2 API service. The default will '
|
||||
'be equal to the number of CPUs available.'),
|
||||
cfg.StrOpt('osapi_compute_listen',
|
||||
default="0.0.0.0",
|
||||
help='The IP address on which the OpenStack API will listen.'),
|
||||
|
@ -342,10 +342,8 @@ class OSAPIFixture(fixtures.Fixture):
|
||||
# in order to run these in tests we need to bind only to local
|
||||
# host, and dynamically allocate ports
|
||||
conf_overrides = {
|
||||
'ec2_listen': '127.0.0.1',
|
||||
'osapi_compute_listen': '127.0.0.1',
|
||||
'metadata_listen': '127.0.0.1',
|
||||
'ec2_listen_port': 0,
|
||||
'osapi_compute_listen_port': 0,
|
||||
'metadata_listen_port': 0,
|
||||
'verbose': True,
|
||||
|
@ -1 +0,0 @@
|
||||
1c:87:d1:d9:32:fd:62:3c:78:2b:c0:ad:c0:15:88:df
|
@ -1 +0,0 @@
|
||||
ssh-dss AAAAB3NzaC1kc3MAAACBAMGJlY9XEIm2X234pdO5yFWMp2JuOQx8U0E815IVXhmKxYCBK9ZakgZOIQmPbXoGYyV+mziDPp6HJ0wKYLQxkwLEFr51fAZjWQvRss0SinURRuLkockDfGFtD4pYJthekr/rlqMKlBSDUSpGq8jUWW60UJ18FGooFpxR7ESqQRx/AAAAFQC96LRglaUeeP+E8U/yblEJocuiWwAAAIA3XiMR8Skiz/0aBm5K50SeQznQuMJTyzt9S9uaz5QZWiFu69hOyGSFGw8fqgxEkXFJIuHobQQpGYQubLW0NdaYRqyE/Vud3JUJUb8Texld6dz8vGemyB5d1YvtSeHIo8/BGv2msOqR3u5AZTaGCBD9DhpSGOKHEdNjTtvpPd8S8gAAAIBociGZ5jf09iHLVENhyXujJbxfGRPsyNTyARJfCOGl0oFV6hEzcQyw8U/ePwjgvjc2UizMWLl8tsb2FXKHRdc2v+ND3Us+XqKQ33X3ADP4FZ/+Oj213gMyhCmvFTP0u5FmHog9My4CB7YcIWRuUR42WlhQ2IfPvKwUoTk3R+T6Og== www-data@mk
|
@ -1,635 +0,0 @@
|
||||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""Unit tests for the API endpoint."""
|
||||
|
||||
import random
|
||||
import re
|
||||
from six.moves import StringIO
|
||||
|
||||
import boto
|
||||
import boto.connection
|
||||
from boto.ec2 import regioninfo
|
||||
from boto import exception as boto_exc
|
||||
# newer versions of boto use their own wrapper on top of httplib.HTTPResponse
|
||||
if hasattr(boto.connection, 'HTTPResponse'):
|
||||
httplib = boto.connection
|
||||
else:
|
||||
from six.moves import http_client as httplib
|
||||
import fixtures
|
||||
from oslo_utils import encodeutils
|
||||
from oslo_utils import versionutils
|
||||
import webob
|
||||
|
||||
from nova.api import auth
|
||||
from nova.api import ec2
|
||||
from nova.api.ec2 import ec2utils
|
||||
from nova import block_device
|
||||
from nova import context
|
||||
from nova import exception
|
||||
from nova import test
|
||||
from nova.tests.unit import matchers
|
||||
|
||||
|
||||
class FakeHttplibSocket(object):
|
||||
"""a fake socket implementation for httplib.HTTPResponse, trivial."""
|
||||
def __init__(self, response_string):
|
||||
self.response_string = response_string
|
||||
self._buffer = StringIO(response_string)
|
||||
|
||||
def makefile(self, _mode, _other):
|
||||
"""Returns the socket's internal buffer."""
|
||||
return self._buffer
|
||||
|
||||
|
||||
class FakeHttplibConnection(object):
|
||||
"""A fake httplib.HTTPConnection for boto to use
|
||||
|
||||
requests made via this connection actually get translated and routed into
|
||||
our WSGI app, we then wait for the response and turn it back into
|
||||
the HTTPResponse that boto expects.
|
||||
"""
|
||||
def __init__(self, app, host, is_secure=False):
|
||||
self.app = app
|
||||
self.host = host
|
||||
|
||||
def request(self, method, path, data, headers):
|
||||
req = webob.Request.blank(path)
|
||||
req.method = method
|
||||
req.body = encodeutils.safe_encode(data)
|
||||
req.headers = headers
|
||||
req.headers['Accept'] = 'text/html'
|
||||
req.host = self.host
|
||||
# Call the WSGI app, get the HTTP response
|
||||
resp = str(req.get_response(self.app))
|
||||
# For some reason, the response doesn't have "HTTP/1.0 " prepended; I
|
||||
# guess that's a function the web server usually provides.
|
||||
resp = "HTTP/1.0 %s" % resp
|
||||
self.sock = FakeHttplibSocket(resp)
|
||||
self.http_response = httplib.HTTPResponse(self.sock)
|
||||
# NOTE(vish): boto is accessing private variables for some reason
|
||||
self._HTTPConnection__response = self.http_response
|
||||
self.http_response.begin()
|
||||
|
||||
def getresponse(self):
|
||||
return self.http_response
|
||||
|
||||
def getresponsebody(self):
|
||||
return self.sock.response_string
|
||||
|
||||
def close(self):
|
||||
"""Required for compatibility with boto/tornado."""
|
||||
pass
|
||||
|
||||
|
||||
class XmlConversionTestCase(test.NoDBTestCase):
|
||||
"""Unit test api xml conversion."""
|
||||
def test_number_conversion(self):
|
||||
conv = ec2utils._try_convert
|
||||
self.assertIsNone(conv('None'))
|
||||
self.assertEqual(conv('True'), True)
|
||||
self.assertEqual(conv('TRUE'), True)
|
||||
self.assertEqual(conv('true'), True)
|
||||
self.assertEqual(conv('False'), False)
|
||||
self.assertEqual(conv('FALSE'), False)
|
||||
self.assertEqual(conv('false'), False)
|
||||
self.assertEqual(conv('0'), 0)
|
||||
self.assertEqual(conv('42'), 42)
|
||||
self.assertEqual(conv('3.14'), 3.14)
|
||||
self.assertEqual(conv('-57.12'), -57.12)
|
||||
self.assertEqual(conv('0x57'), 0x57)
|
||||
self.assertEqual(conv('-0x57'), -0x57)
|
||||
self.assertEqual(conv('-'), '-')
|
||||
self.assertEqual(conv('-0'), 0)
|
||||
self.assertEqual(conv('0.0'), 0.0)
|
||||
self.assertEqual(conv('1e-8'), 0.0)
|
||||
self.assertEqual(conv('-1e-8'), 0.0)
|
||||
self.assertEqual(conv('0xDD8G'), '0xDD8G')
|
||||
self.assertEqual(conv('0XDD8G'), '0XDD8G')
|
||||
self.assertEqual(conv('-stringy'), '-stringy')
|
||||
self.assertEqual(conv('stringy'), 'stringy')
|
||||
self.assertEqual(conv('add'), 'add')
|
||||
self.assertEqual(conv('remove'), 'remove')
|
||||
self.assertEqual(conv(''), '')
|
||||
|
||||
|
||||
class Ec2utilsTestCase(test.NoDBTestCase):
|
||||
def test_ec2_id_to_id(self):
|
||||
self.assertEqual(ec2utils.ec2_id_to_id('i-0000001e'), 30)
|
||||
self.assertEqual(ec2utils.ec2_id_to_id('ami-1d'), 29)
|
||||
self.assertEqual(ec2utils.ec2_id_to_id('snap-0000001c'), 28)
|
||||
self.assertEqual(ec2utils.ec2_id_to_id('vol-0000001b'), 27)
|
||||
|
||||
def test_bad_ec2_id(self):
|
||||
self.assertRaises(exception.InvalidEc2Id,
|
||||
ec2utils.ec2_id_to_id,
|
||||
'badone')
|
||||
|
||||
def test_id_to_ec2_id(self):
|
||||
self.assertEqual(ec2utils.id_to_ec2_id(30), 'i-0000001e')
|
||||
self.assertEqual(ec2utils.id_to_ec2_id(29, 'ami-%08x'), 'ami-0000001d')
|
||||
self.assertEqual(ec2utils.id_to_ec2_snap_id(28), 'snap-0000001c')
|
||||
self.assertEqual(ec2utils.id_to_ec2_vol_id(27), 'vol-0000001b')
|
||||
|
||||
def test_dict_from_dotted_str(self):
|
||||
in_str = [('BlockDeviceMapping.1.DeviceName', '/dev/sda1'),
|
||||
('BlockDeviceMapping.1.Ebs.SnapshotId', 'snap-0000001c'),
|
||||
('BlockDeviceMapping.1.Ebs.VolumeSize', '80'),
|
||||
('BlockDeviceMapping.1.Ebs.DeleteOnTermination', 'false'),
|
||||
('BlockDeviceMapping.2.DeviceName', '/dev/sdc'),
|
||||
('BlockDeviceMapping.2.VirtualName', 'ephemeral0')]
|
||||
expected_dict = {
|
||||
'block_device_mapping': {
|
||||
'1': {'device_name': '/dev/sda1',
|
||||
'ebs': {'snapshot_id': 'snap-0000001c',
|
||||
'volume_size': 80,
|
||||
'delete_on_termination': False}},
|
||||
'2': {'device_name': '/dev/sdc',
|
||||
'virtual_name': 'ephemeral0'}}}
|
||||
out_dict = ec2utils.dict_from_dotted_str(in_str)
|
||||
|
||||
self.assertThat(out_dict, matchers.DictMatches(expected_dict))
|
||||
|
||||
def test_properties_root_device_name(self):
|
||||
mappings = [{"device": "/dev/sda1", "virtual": "root"}]
|
||||
properties0 = {'mappings': mappings}
|
||||
properties1 = {'root_device_name': '/dev/sdb', 'mappings': mappings}
|
||||
|
||||
root_device_name = block_device.properties_root_device_name(
|
||||
properties0)
|
||||
self.assertEqual(root_device_name, '/dev/sda1')
|
||||
|
||||
root_device_name = block_device.properties_root_device_name(
|
||||
properties1)
|
||||
self.assertEqual(root_device_name, '/dev/sdb')
|
||||
|
||||
def test_regex_from_ec2_regex(self):
|
||||
def _test_re(ec2_regex, expected, literal, match=True):
|
||||
regex = ec2utils.regex_from_ec2_regex(ec2_regex)
|
||||
self.assertEqual(regex, expected)
|
||||
if match:
|
||||
self.assertIsNotNone(re.match(regex, literal))
|
||||
else:
|
||||
self.assertIsNone(re.match(regex, literal))
|
||||
|
||||
# wildcards
|
||||
_test_re('foo', '\Afoo\Z(?s)', 'foo')
|
||||
_test_re('foo', '\Afoo\Z(?s)', 'baz', match=False)
|
||||
_test_re('foo?bar', '\Afoo.bar\Z(?s)', 'foo bar')
|
||||
_test_re('foo?bar', '\Afoo.bar\Z(?s)', 'foo bar', match=False)
|
||||
_test_re('foo*bar', '\Afoo.*bar\Z(?s)', 'foo QUUX bar')
|
||||
|
||||
# backslashes and escaped wildcards
|
||||
_test_re('foo\\', '\Afoo\\\\\Z(?s)', 'foo\\')
|
||||
_test_re('foo*bar', '\Afoo.*bar\Z(?s)', 'zork QUUX bar', match=False)
|
||||
_test_re('foo\\?bar', '\Afoo[?]bar\Z(?s)', 'foo?bar')
|
||||
_test_re('foo\\?bar', '\Afoo[?]bar\Z(?s)', 'foo bar', match=False)
|
||||
_test_re('foo\\*bar', '\Afoo[*]bar\Z(?s)', 'foo*bar')
|
||||
_test_re('foo\\*bar', '\Afoo[*]bar\Z(?s)', 'foo bar', match=False)
|
||||
|
||||
# analog to the example given in the EC2 API docs
|
||||
ec2_regex = '\*nova\?\\end'
|
||||
expected = r'\A[*]nova[?]\\end\Z(?s)'
|
||||
literal = r'*nova?\end'
|
||||
_test_re(ec2_regex, expected, literal)
|
||||
|
||||
def test_mapping_prepend_dev(self):
|
||||
mappings = [
|
||||
{'virtual': 'ami',
|
||||
'device': 'sda1'},
|
||||
{'virtual': 'root',
|
||||
'device': '/dev/sda1'},
|
||||
|
||||
{'virtual': 'swap',
|
||||
'device': 'sdb1'},
|
||||
{'virtual': 'swap',
|
||||
'device': '/dev/sdb2'},
|
||||
|
||||
{'virtual': 'ephemeral0',
|
||||
'device': 'sdc1'},
|
||||
{'virtual': 'ephemeral1',
|
||||
'device': '/dev/sdc1'}]
|
||||
expected_result = [
|
||||
{'virtual': 'ami',
|
||||
'device': 'sda1'},
|
||||
{'virtual': 'root',
|
||||
'device': '/dev/sda1'},
|
||||
|
||||
{'virtual': 'swap',
|
||||
'device': '/dev/sdb1'},
|
||||
{'virtual': 'swap',
|
||||
'device': '/dev/sdb2'},
|
||||
|
||||
{'virtual': 'ephemeral0',
|
||||
'device': '/dev/sdc1'},
|
||||
{'virtual': 'ephemeral1',
|
||||
'device': '/dev/sdc1'}]
|
||||
self.assertThat(block_device.mappings_prepend_dev(mappings),
|
||||
matchers.DictListMatches(expected_result))
|
||||
|
||||
|
||||
class ApiEc2TestCase(test.TestCase):
|
||||
"""Unit test for the cloud controller on an EC2 API."""
|
||||
def setUp(self):
|
||||
super(ApiEc2TestCase, self).setUp()
|
||||
self.host = '127.0.0.1'
|
||||
# NOTE(vish): skipping the Authorizer
|
||||
roles = ['sysadmin', 'netadmin']
|
||||
ctxt = context.RequestContext('fake', 'fake', roles=roles)
|
||||
self.app = auth.InjectContext(ctxt, ec2.FaultWrapper(
|
||||
ec2.RequestLogging(ec2.Requestify(ec2.Authorizer(ec2.Executor()
|
||||
), 'nova.api.ec2.cloud.CloudController'))))
|
||||
self.useFixture(fixtures.FakeLogger('boto'))
|
||||
|
||||
def expect_http(self, host=None, is_secure=False, api_version=None):
|
||||
"""Returns a new EC2 connection."""
|
||||
self.ec2 = boto.connect_ec2(
|
||||
aws_access_key_id='fake',
|
||||
aws_secret_access_key='fake',
|
||||
is_secure=False,
|
||||
region=regioninfo.RegionInfo(None, 'test', self.host),
|
||||
port=8773,
|
||||
path='/services/Cloud')
|
||||
if api_version:
|
||||
self.ec2.APIVersion = api_version
|
||||
|
||||
self.mox.StubOutWithMock(self.ec2, 'new_http_connection')
|
||||
self.http = FakeHttplibConnection(
|
||||
self.app, '%s:8773' % (self.host), False)
|
||||
if versionutils.is_compatible('2.14', boto.Version, same_major=False):
|
||||
self.ec2.new_http_connection(host or self.host, 8773,
|
||||
is_secure).AndReturn(self.http)
|
||||
elif versionutils.is_compatible('2', boto.Version, same_major=False):
|
||||
self.ec2.new_http_connection(host or '%s:8773' % (self.host),
|
||||
is_secure).AndReturn(self.http)
|
||||
else:
|
||||
self.ec2.new_http_connection(host, is_secure).AndReturn(self.http)
|
||||
return self.http
|
||||
|
||||
def test_xmlns_version_matches_request_version(self):
|
||||
self.expect_http(api_version='2010-10-30')
|
||||
self.mox.ReplayAll()
|
||||
|
||||
# Any request should be fine
|
||||
self.ec2.get_all_instances()
|
||||
self.assertIn(self.ec2.APIVersion, self.http.getresponsebody(),
|
||||
'The version in the xmlns of the response does '
|
||||
'not match the API version given in the request.')
|
||||
|
||||
def test_describe_instances(self):
|
||||
"""Test that, after creating a user and a project, the describe
|
||||
instances call to the API works properly.
|
||||
"""
|
||||
self.expect_http()
|
||||
self.mox.ReplayAll()
|
||||
self.assertEqual(self.ec2.get_all_instances(), [])
|
||||
|
||||
def test_terminate_invalid_instance(self):
|
||||
# Attempt to terminate an invalid instance.
|
||||
self.expect_http()
|
||||
self.mox.ReplayAll()
|
||||
self.assertRaises(boto_exc.EC2ResponseError,
|
||||
self.ec2.terminate_instances, "i-00000005")
|
||||
|
||||
def test_get_all_key_pairs(self):
|
||||
"""Test that, after creating a user and project and generating
|
||||
a key pair, that the API call to list key pairs works properly.
|
||||
"""
|
||||
keyname = "".join(random.choice("sdiuisudfsdcnpaqwertasd")
|
||||
for x in range(random.randint(4, 8)))
|
||||
self.expect_http()
|
||||
self.mox.ReplayAll()
|
||||
self.ec2.create_key_pair(keyname)
|
||||
rv = self.ec2.get_all_key_pairs()
|
||||
results = [k for k in rv if k.name == keyname]
|
||||
self.assertEqual(len(results), 1)
|
||||
|
||||
def test_create_duplicate_key_pair(self):
|
||||
"""Test that, after successfully generating a keypair,
|
||||
requesting a second keypair with the same name fails sanely.
|
||||
"""
|
||||
self.expect_http()
|
||||
self.mox.ReplayAll()
|
||||
self.ec2.create_key_pair('test')
|
||||
|
||||
try:
|
||||
self.ec2.create_key_pair('test')
|
||||
except boto_exc.EC2ResponseError as e:
|
||||
if e.code == 'InvalidKeyPair.Duplicate':
|
||||
pass
|
||||
else:
|
||||
self.assertEqual('InvalidKeyPair.Duplicate', e.code)
|
||||
else:
|
||||
self.fail('Exception not raised.')
|
||||
|
||||
def test_get_all_security_groups(self):
|
||||
# Test that we can retrieve security groups.
|
||||
self.expect_http()
|
||||
self.mox.ReplayAll()
|
||||
|
||||
rv = self.ec2.get_all_security_groups()
|
||||
|
||||
self.assertEqual(len(rv), 1)
|
||||
self.assertEqual(rv[0].name, 'default')
|
||||
|
||||
def test_create_delete_security_group(self):
|
||||
# Test that we can create a security group.
|
||||
self.expect_http()
|
||||
self.mox.ReplayAll()
|
||||
|
||||
security_group_name = "".join(random.choice("sdiuisudfsdcnpaqwertasd")
|
||||
for x in range(random.randint(4, 8)))
|
||||
|
||||
self.ec2.create_security_group(security_group_name, 'test group')
|
||||
|
||||
self.expect_http()
|
||||
self.mox.ReplayAll()
|
||||
|
||||
rv = self.ec2.get_all_security_groups()
|
||||
self.assertEqual(len(rv), 2)
|
||||
self.assertIn(security_group_name, [group.name for group in rv])
|
||||
|
||||
self.expect_http()
|
||||
self.mox.ReplayAll()
|
||||
|
||||
self.ec2.delete_security_group(security_group_name)
|
||||
|
||||
def test_group_name_valid_chars_security_group(self):
|
||||
"""Test that we sanely handle invalid security group names.
|
||||
|
||||
EC2 API Spec states we should only accept alphanumeric characters,
|
||||
spaces, dashes, and underscores. Amazon implementation
|
||||
accepts more characters - so, [:print:] is ok.
|
||||
"""
|
||||
bad_strict_ec2 = "aa \t\x01\x02\x7f"
|
||||
bad_amazon_ec2 = "aa #^% -=99"
|
||||
test_raise = [
|
||||
(True, bad_amazon_ec2, "test desc"),
|
||||
(True, "test name", bad_amazon_ec2),
|
||||
(False, bad_strict_ec2, "test desc"),
|
||||
]
|
||||
for t in test_raise:
|
||||
self.expect_http()
|
||||
self.mox.ReplayAll()
|
||||
self.flags(ec2_strict_validation=t[0])
|
||||
self.assertRaises(boto_exc.EC2ResponseError,
|
||||
self.ec2.create_security_group,
|
||||
t[1],
|
||||
t[2])
|
||||
test_accept = [
|
||||
(False, bad_amazon_ec2, "test desc"),
|
||||
(False, "test name", bad_amazon_ec2),
|
||||
]
|
||||
for t in test_accept:
|
||||
self.expect_http()
|
||||
self.mox.ReplayAll()
|
||||
self.flags(ec2_strict_validation=t[0])
|
||||
self.ec2.create_security_group(t[1], t[2])
|
||||
self.expect_http()
|
||||
self.mox.ReplayAll()
|
||||
self.ec2.delete_security_group(t[1])
|
||||
|
||||
def test_group_name_valid_length_security_group(self):
|
||||
"""Test that we sanely handle invalid security group names.
|
||||
|
||||
API Spec states that the length should not exceed 255 char.
|
||||
"""
|
||||
self.expect_http()
|
||||
self.mox.ReplayAll()
|
||||
|
||||
# Test block group_name > 255 chars
|
||||
security_group_name = "".join(random.choice("poiuytrewqasdfghjklmnbvc")
|
||||
for x in range(random.randint(256, 266)))
|
||||
|
||||
self.assertRaises(boto_exc.EC2ResponseError,
|
||||
self.ec2.create_security_group,
|
||||
security_group_name,
|
||||
'test group')
|
||||
|
||||
def test_authorize_revoke_security_group_cidr(self):
|
||||
"""Test that we can add and remove CIDR based rules
|
||||
to a security group
|
||||
"""
|
||||
self.expect_http()
|
||||
self.mox.ReplayAll()
|
||||
|
||||
security_group_name = "".join(random.choice("sdiuisudfsdcnpaqwertasd")
|
||||
for x in range(random.randint(4, 8)))
|
||||
|
||||
group = self.ec2.create_security_group(security_group_name,
|
||||
'test group')
|
||||
|
||||
self.expect_http()
|
||||
self.mox.ReplayAll()
|
||||
group.connection = self.ec2
|
||||
|
||||
group.authorize('tcp', 80, 81, '0.0.0.0/0')
|
||||
group.authorize('icmp', -1, -1, '0.0.0.0/0')
|
||||
group.authorize('udp', 80, 81, '0.0.0.0/0')
|
||||
group.authorize('tcp', 1, 65535, '0.0.0.0/0')
|
||||
group.authorize('udp', 1, 65535, '0.0.0.0/0')
|
||||
group.authorize('icmp', 1, 0, '0.0.0.0/0')
|
||||
group.authorize('icmp', 0, 1, '0.0.0.0/0')
|
||||
group.authorize('icmp', 0, 0, '0.0.0.0/0')
|
||||
|
||||
def _assert(message, *args):
|
||||
try:
|
||||
group.authorize(*args)
|
||||
except boto_exc.EC2ResponseError as e:
|
||||
self.assertEqual(e.status, 400, 'Expected status to be 400')
|
||||
self.assertIn(message, e.error_message)
|
||||
else:
|
||||
raise self.failureException('EC2ResponseError not raised')
|
||||
|
||||
# Invalid CIDR address
|
||||
_assert('Invalid CIDR', 'tcp', 80, 81, '0.0.0.0/0444')
|
||||
# Missing ports
|
||||
_assert('Not enough parameters', 'tcp', '0.0.0.0/0')
|
||||
# from port cannot be greater than to port
|
||||
_assert('Invalid port range', 'tcp', 100, 1, '0.0.0.0/0')
|
||||
# For tcp, negative values are not allowed
|
||||
_assert('Invalid port range', 'tcp', -1, 1, '0.0.0.0/0')
|
||||
# For tcp, valid port range 1-65535
|
||||
_assert('Invalid port range', 'tcp', 1, 65599, '0.0.0.0/0')
|
||||
# Invalid Cidr for ICMP type
|
||||
_assert('Invalid CIDR', 'icmp', -1, -1, '0.0.444.0/4')
|
||||
# Invalid protocol
|
||||
_assert('Invalid IP protocol', 'xyz', 1, 14, '0.0.0.0/0')
|
||||
# Invalid port
|
||||
_assert('Invalid input received: To and From ports must be integers',
|
||||
'tcp', " ", "81", '0.0.0.0/0')
|
||||
# Invalid icmp port
|
||||
_assert('Invalid input received: '
|
||||
'Type and Code must be integers for ICMP protocol type',
|
||||
'icmp', " ", "81", '0.0.0.0/0')
|
||||
# Invalid CIDR Address
|
||||
_assert('Invalid CIDR', 'icmp', -1, -1, '0.0.0.0')
|
||||
# Invalid CIDR Address
|
||||
_assert('Invalid CIDR', 'icmp', -1, -1, '0.0.0.0/')
|
||||
# Invalid Cidr ports
|
||||
_assert('Invalid port range', 'icmp', 1, 256, '0.0.0.0/0')
|
||||
|
||||
self.expect_http()
|
||||
self.mox.ReplayAll()
|
||||
|
||||
rv = self.ec2.get_all_security_groups()
|
||||
|
||||
group = [grp for grp in rv if grp.name == security_group_name][0]
|
||||
|
||||
self.assertEqual(len(group.rules), 8)
|
||||
self.assertEqual(int(group.rules[0].from_port), 80)
|
||||
self.assertEqual(int(group.rules[0].to_port), 81)
|
||||
self.assertEqual(len(group.rules[0].grants), 1)
|
||||
self.assertEqual(str(group.rules[0].grants[0]), '0.0.0.0/0')
|
||||
|
||||
self.expect_http()
|
||||
self.mox.ReplayAll()
|
||||
group.connection = self.ec2
|
||||
|
||||
group.revoke('tcp', 80, 81, '0.0.0.0/0')
|
||||
group.revoke('icmp', -1, -1, '0.0.0.0/0')
|
||||
group.revoke('udp', 80, 81, '0.0.0.0/0')
|
||||
group.revoke('tcp', 1, 65535, '0.0.0.0/0')
|
||||
group.revoke('udp', 1, 65535, '0.0.0.0/0')
|
||||
group.revoke('icmp', 1, 0, '0.0.0.0/0')
|
||||
group.revoke('icmp', 0, 1, '0.0.0.0/0')
|
||||
group.revoke('icmp', 0, 0, '0.0.0.0/0')
|
||||
|
||||
self.expect_http()
|
||||
self.mox.ReplayAll()
|
||||
|
||||
self.ec2.delete_security_group(security_group_name)
|
||||
|
||||
self.expect_http()
|
||||
self.mox.ReplayAll()
|
||||
group.connection = self.ec2
|
||||
|
||||
rv = self.ec2.get_all_security_groups()
|
||||
|
||||
self.assertEqual(len(rv), 1)
|
||||
self.assertEqual(rv[0].name, 'default')
|
||||
|
||||
def test_authorize_revoke_security_group_cidr_v6(self):
|
||||
"""Test that we can add and remove CIDR based rules
|
||||
to a security group for IPv6
|
||||
"""
|
||||
self.expect_http()
|
||||
self.mox.ReplayAll()
|
||||
|
||||
security_group_name = "".join(random.choice("sdiuisudfsdcnpaqwertasd")
|
||||
for x in range(random.randint(4, 8)))
|
||||
|
||||
group = self.ec2.create_security_group(security_group_name,
|
||||
'test group')
|
||||
|
||||
self.expect_http()
|
||||
self.mox.ReplayAll()
|
||||
group.connection = self.ec2
|
||||
|
||||
group.authorize('tcp', 80, 81, '::/0')
|
||||
|
||||
self.expect_http()
|
||||
self.mox.ReplayAll()
|
||||
|
||||
rv = self.ec2.get_all_security_groups()
|
||||
|
||||
group = [grp for grp in rv if grp.name == security_group_name][0]
|
||||
self.assertEqual(len(group.rules), 1)
|
||||
self.assertEqual(int(group.rules[0].from_port), 80)
|
||||
self.assertEqual(int(group.rules[0].to_port), 81)
|
||||
self.assertEqual(len(group.rules[0].grants), 1)
|
||||
self.assertEqual(str(group.rules[0].grants[0]), '::/0')
|
||||
|
||||
self.expect_http()
|
||||
self.mox.ReplayAll()
|
||||
group.connection = self.ec2
|
||||
|
||||
group.revoke('tcp', 80, 81, '::/0')
|
||||
|
||||
self.expect_http()
|
||||
self.mox.ReplayAll()
|
||||
|
||||
self.ec2.delete_security_group(security_group_name)
|
||||
|
||||
self.expect_http()
|
||||
self.mox.ReplayAll()
|
||||
group.connection = self.ec2
|
||||
|
||||
rv = self.ec2.get_all_security_groups()
|
||||
|
||||
self.assertEqual(len(rv), 1)
|
||||
self.assertEqual(rv[0].name, 'default')
|
||||
|
||||
def test_authorize_revoke_security_group_foreign_group(self):
|
||||
"""Test that we can grant and revoke another security group access
|
||||
to a security group
|
||||
"""
|
||||
self.expect_http()
|
||||
self.mox.ReplayAll()
|
||||
|
||||
rand_string = 'sdiuisudfsdcnpaqwertasd'
|
||||
security_group_name = "".join(random.choice(rand_string)
|
||||
for x in range(random.randint(4, 8)))
|
||||
other_security_group_name = "".join(random.choice(rand_string)
|
||||
for x in range(random.randint(4, 8)))
|
||||
|
||||
group = self.ec2.create_security_group(security_group_name,
|
||||
'test group')
|
||||
|
||||
self.expect_http()
|
||||
self.mox.ReplayAll()
|
||||
|
||||
other_group = self.ec2.create_security_group(other_security_group_name,
|
||||
'some other group')
|
||||
|
||||
self.expect_http()
|
||||
self.mox.ReplayAll()
|
||||
group.connection = self.ec2
|
||||
|
||||
group.authorize(src_group=other_group)
|
||||
|
||||
self.expect_http()
|
||||
self.mox.ReplayAll()
|
||||
|
||||
rv = self.ec2.get_all_security_groups()
|
||||
|
||||
# I don't bother checkng that we actually find it here,
|
||||
# because the create/delete unit test further up should
|
||||
# be good enough for that.
|
||||
for group in rv:
|
||||
if group.name == security_group_name:
|
||||
self.assertEqual(len(group.rules), 3)
|
||||
self.assertEqual(len(group.rules[0].grants), 1)
|
||||
self.assertEqual(str(group.rules[0].grants[0]),
|
||||
'%s-%s' % (other_security_group_name, 'fake'))
|
||||
|
||||
self.expect_http()
|
||||
self.mox.ReplayAll()
|
||||
|
||||
rv = self.ec2.get_all_security_groups()
|
||||
|
||||
for group in rv:
|
||||
if group.name == security_group_name:
|
||||
self.expect_http()
|
||||
self.mox.ReplayAll()
|
||||
group.connection = self.ec2
|
||||
group.revoke(src_group=other_group)
|
||||
|
||||
self.expect_http()
|
||||
self.mox.ReplayAll()
|
||||
|
||||
self.ec2.delete_security_group(security_group_name)
|
||||
self.ec2.delete_security_group(other_security_group_name)
|
@ -1,94 +0,0 @@
|
||||
# Copyright 2014 Hewlett-Packard Development Company, L.P.
|
||||
#
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""Unit tests for the API Request internals."""
|
||||
|
||||
import copy
|
||||
|
||||
import six
|
||||
|
||||
from oslo_utils import timeutils
|
||||
|
||||
from nova.api.ec2 import apirequest
|
||||
from nova import test
|
||||
|
||||
|
||||
class APIRequestTestCase(test.NoDBTestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(APIRequestTestCase, self).setUp()
|
||||
self.req = apirequest.APIRequest("FakeController", "FakeAction",
|
||||
"FakeVersion", {})
|
||||
self.resp = {
|
||||
'string': 'foo',
|
||||
'int': 1,
|
||||
'long': int(1),
|
||||
'bool': False,
|
||||
'dict': {
|
||||
'string': 'foo',
|
||||
'int': 1,
|
||||
}
|
||||
}
|
||||
|
||||
# The previous will produce an output that looks like the
|
||||
# following (excusing line wrap for 80 cols):
|
||||
#
|
||||
# <FakeActionResponse xmlns="http://ec2.amazonaws.com/doc/\
|
||||
# FakeVersion/">
|
||||
# <requestId>uuid</requestId>
|
||||
# <int>1</int>
|
||||
# <dict>
|
||||
# <int>1</int>
|
||||
# <string>foo</string>
|
||||
# </dict>
|
||||
# <bool>false</bool>
|
||||
# <string>foo</string>
|
||||
# </FakeActionResponse>
|
||||
#
|
||||
# We don't attempt to ever test for the full document because
|
||||
# hash seed order might impact it's rendering order. The fact
|
||||
# that running the function doesn't explode is a big part of
|
||||
# the win.
|
||||
|
||||
def test_render_response_ascii(self):
|
||||
data = self.req._render_response(self.resp, 'uuid')
|
||||
self.assertIn('<FakeActionResponse xmlns="http://ec2.amazonaws.com/'
|
||||
'doc/FakeVersion/', data)
|
||||
self.assertIn('<int>1</int>', data)
|
||||
self.assertIn('<string>foo</string>', data)
|
||||
|
||||
def test_render_response_utf8(self):
|
||||
resp = copy.deepcopy(self.resp)
|
||||
resp['utf8'] = six.unichr(40960) + u'abcd' + six.unichr(1972)
|
||||
data = self.req._render_response(resp, 'uuid')
|
||||
self.assertIn('<utf8>ꀀabcd޴</utf8>', data)
|
||||
|
||||
# Tests for individual data element format functions
|
||||
|
||||
def test_return_valid_isoformat(self):
|
||||
"""Ensure that the ec2 api returns datetime in xs:dateTime
|
||||
(which apparently isn't datetime.isoformat())
|
||||
NOTE(ken-pepple): https://bugs.launchpad.net/nova/+bug/721297
|
||||
"""
|
||||
conv = apirequest._database_to_isoformat
|
||||
# sqlite database representation with microseconds
|
||||
time_to_convert = timeutils.parse_strtime("2011-02-21 20:14:10.634276",
|
||||
"%Y-%m-%d %H:%M:%S.%f")
|
||||
self.assertEqual(conv(time_to_convert), '2011-02-21T20:14:10.634Z')
|
||||
# mysqlite database representation
|
||||
time_to_convert = timeutils.parse_strtime("2011-02-21 19:56:18",
|
||||
"%Y-%m-%d %H:%M:%S")
|
||||
self.assertEqual(conv(time_to_convert), '2011-02-21T19:56:18.000Z')
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -1,276 +0,0 @@
|
||||
# Copyright 2012 Cloudscaling, Inc.
|
||||
# All Rights Reserved.
|
||||
# Copyright 2013 Red Hat, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import datetime
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_utils import timeutils
|
||||
|
||||
from nova.api.ec2 import cloud
|
||||
from nova.api.ec2 import ec2utils
|
||||
from nova.compute import utils as compute_utils
|
||||
from nova import context
|
||||
from nova import db
|
||||
from nova import exception
|
||||
from nova import test
|
||||
from nova.tests.unit import cast_as_call
|
||||
from nova.tests.unit import fake_network
|
||||
from nova.tests.unit import fake_notifier
|
||||
from nova.tests.unit.image import fake
|
||||
from nova import utils
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.import_opt('compute_driver', 'nova.virt.driver')
|
||||
|
||||
|
||||
class EC2ValidateTestCase(test.TestCase):
|
||||
def setUp(self):
|
||||
super(EC2ValidateTestCase, self).setUp()
|
||||
self.flags(compute_driver='nova.virt.fake.FakeDriver')
|
||||
|
||||
def dumb(*args, **kwargs):
|
||||
pass
|
||||
|
||||
self.stubs.Set(compute_utils, 'notify_about_instance_usage', dumb)
|
||||
fake_network.set_stub_network_methods(self.stubs)
|
||||
|
||||
# set up our cloud
|
||||
self.cloud = cloud.CloudController()
|
||||
|
||||
# Short-circuit the conductor service
|
||||
self.flags(use_local=True, group='conductor')
|
||||
|
||||
# Stub out the notification service so we use the no-op serializer
|
||||
# and avoid lazy-load traces with the wrap_exception decorator in
|
||||
# the compute service.
|
||||
fake_notifier.stub_notifier(self.stubs)
|
||||
self.addCleanup(fake_notifier.reset)
|
||||
|
||||
# set up services
|
||||
self.conductor = self.start_service('conductor',
|
||||
manager=CONF.conductor.manager)
|
||||
self.compute = self.start_service('compute')
|
||||
self.scheduter = self.start_service('scheduler')
|
||||
self.network = self.start_service('network')
|
||||
self.image_service = fake.FakeImageService()
|
||||
|
||||
self.user_id = 'fake'
|
||||
self.project_id = 'fake'
|
||||
self.context = context.RequestContext(self.user_id,
|
||||
self.project_id,
|
||||
is_admin=True)
|
||||
|
||||
self.EC2_MALFORMED_IDS = ['foobar', '', 123]
|
||||
self.EC2_VALID__IDS = ['i-284f3a41', 'i-001', 'i-deadbeef']
|
||||
|
||||
self.ec2_id_exception_map = [(x,
|
||||
exception.InvalidInstanceIDMalformed)
|
||||
for x in self.EC2_MALFORMED_IDS]
|
||||
self.ec2_id_exception_map.extend([(x, exception.InstanceNotFound)
|
||||
for x in self.EC2_VALID__IDS])
|
||||
self.volume_id_exception_map = [(x,
|
||||
exception.InvalidVolumeIDMalformed)
|
||||
for x in self.EC2_MALFORMED_IDS]
|
||||
self.volume_id_exception_map.extend([(x, exception.VolumeNotFound)
|
||||
for x in self.EC2_VALID__IDS])
|
||||
|
||||
def fake_show(meh, context, id, **kwargs):
|
||||
return {'id': id,
|
||||
'container_format': 'ami',
|
||||
'properties': {
|
||||
'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
|
||||
'ramdisk_id': 'cedef40a-ed67-4d10-800e-17455edce175',
|
||||
'type': 'machine',
|
||||
'image_state': 'available'}}
|
||||
|
||||
def fake_detail(self, context, **kwargs):
|
||||
image = fake_show(self, context, None)
|
||||
image['name'] = kwargs.get('name')
|
||||
return [image]
|
||||
|
||||
fake.stub_out_image_service(self)
|
||||
self.stubs.Set(fake._FakeImageService, 'show', fake_show)
|
||||
self.stubs.Set(fake._FakeImageService, 'detail', fake_detail)
|
||||
|
||||
self.useFixture(cast_as_call.CastAsCall(self.stubs))
|
||||
|
||||
# make sure we can map ami-00000001/2 to a uuid in FakeImageService
|
||||
db.s3_image_create(self.context,
|
||||
'cedef40a-ed67-4d10-800e-17455edce175')
|
||||
db.s3_image_create(self.context,
|
||||
'76fa36fc-c930-4bf3-8c8a-ea2a2420deb6')
|
||||
|
||||
def tearDown(self):
|
||||
super(EC2ValidateTestCase, self).tearDown()
|
||||
fake.FakeImageService_reset()
|
||||
|
||||
# EC2_API tests (InvalidInstanceID.Malformed)
|
||||
def test_console_output(self):
|
||||
for ec2_id, e in self.ec2_id_exception_map:
|
||||
self.assertRaises(e,
|
||||
self.cloud.get_console_output,
|
||||
context=self.context,
|
||||
instance_id=[ec2_id])
|
||||
|
||||
def test_describe_instance_attribute(self):
|
||||
for ec2_id, e in self.ec2_id_exception_map:
|
||||
self.assertRaises(e,
|
||||
self.cloud.describe_instance_attribute,
|
||||
context=self.context,
|
||||
instance_id=ec2_id,
|
||||
attribute='kernel')
|
||||
|
||||
def test_instance_lifecycle(self):
|
||||
lifecycle = [self.cloud.terminate_instances,
|
||||
self.cloud.reboot_instances,
|
||||
self.cloud.stop_instances,
|
||||
self.cloud.start_instances,
|
||||
]
|
||||
for cmd in lifecycle:
|
||||
for ec2_id, e in self.ec2_id_exception_map:
|
||||
self.assertRaises(e,
|
||||
cmd,
|
||||
context=self.context,
|
||||
instance_id=[ec2_id])
|
||||
|
||||
def test_create_image(self):
|
||||
for ec2_id, e in self.ec2_id_exception_map:
|
||||
self.assertRaises(e,
|
||||
self.cloud.create_image,
|
||||
context=self.context,
|
||||
instance_id=ec2_id)
|
||||
|
||||
def test_create_snapshot(self):
|
||||
for ec2_id, e in self.volume_id_exception_map:
|
||||
self.assertRaises(e,
|
||||
self.cloud.create_snapshot,
|
||||
context=self.context,
|
||||
volume_id=ec2_id)
|
||||
|
||||
def test_describe_volumes(self):
|
||||
for ec2_id, e in self.volume_id_exception_map:
|
||||
self.assertRaises(e,
|
||||
self.cloud.describe_volumes,
|
||||
context=self.context,
|
||||
volume_id=[ec2_id])
|
||||
|
||||
def test_delete_volume(self):
|
||||
for ec2_id, e in self.volume_id_exception_map:
|
||||
self.assertRaises(e,
|
||||
self.cloud.delete_volume,
|
||||
context=self.context,
|
||||
volume_id=ec2_id)
|
||||
|
||||
def test_detach_volume(self):
|
||||
for ec2_id, e in self.volume_id_exception_map:
|
||||
self.assertRaises(e,
|
||||
self.cloud.detach_volume,
|
||||
context=self.context,
|
||||
volume_id=ec2_id)
|
||||
|
||||
|
||||
class EC2TimestampValidationTestCase(test.NoDBTestCase):
|
||||
"""Test case for EC2 request timestamp validation."""
|
||||
|
||||
def test_validate_ec2_timestamp_valid(self):
|
||||
params = {'Timestamp': '2011-04-22T11:29:49Z'}
|
||||
expired = ec2utils.is_ec2_timestamp_expired(params)
|
||||
self.assertFalse(expired)
|
||||
|
||||
def test_validate_ec2_timestamp_old_format(self):
|
||||
params = {'Timestamp': '2011-04-22T11:29:49'}
|
||||
expired = ec2utils.is_ec2_timestamp_expired(params)
|
||||
self.assertTrue(expired)
|
||||
|
||||
def test_validate_ec2_timestamp_not_set(self):
|
||||
params = {}
|
||||
expired = ec2utils.is_ec2_timestamp_expired(params)
|
||||
self.assertFalse(expired)
|
||||
|
||||
def test_validate_ec2_timestamp_ms_time_regex(self):
|
||||
result = ec2utils._ms_time_regex.match('2011-04-22T11:29:49.123Z')
|
||||
self.assertIsNotNone(result)
|
||||
result = ec2utils._ms_time_regex.match('2011-04-22T11:29:49.123456Z')
|
||||
self.assertIsNotNone(result)
|
||||
result = ec2utils._ms_time_regex.match('2011-04-22T11:29:49.1234567Z')
|
||||
self.assertIsNone(result)
|
||||
result = ec2utils._ms_time_regex.match('2011-04-22T11:29:49.123')
|
||||
self.assertIsNone(result)
|
||||
result = ec2utils._ms_time_regex.match('2011-04-22T11:29:49Z')
|
||||
self.assertIsNone(result)
|
||||
|
||||
def test_validate_ec2_timestamp_aws_sdk_format(self):
|
||||
params = {'Timestamp': '2011-04-22T11:29:49.123Z'}
|
||||
expired = ec2utils.is_ec2_timestamp_expired(params)
|
||||
self.assertFalse(expired)
|
||||
expired = ec2utils.is_ec2_timestamp_expired(params, expires=300)
|
||||
self.assertTrue(expired)
|
||||
|
||||
def test_validate_ec2_timestamp_invalid_format(self):
|
||||
params = {'Timestamp': '2011-04-22T11:29:49.000P'}
|
||||
expired = ec2utils.is_ec2_timestamp_expired(params)
|
||||
self.assertTrue(expired)
|
||||
|
||||
def test_validate_ec2_timestamp_advanced_time(self):
|
||||
|
||||
# EC2 request with Timestamp in advanced time
|
||||
timestamp = timeutils.utcnow() + datetime.timedelta(seconds=250)
|
||||
params = {'Timestamp': utils.isotime(timestamp)}
|
||||
expired = ec2utils.is_ec2_timestamp_expired(params, expires=300)
|
||||
self.assertFalse(expired)
|
||||
|
||||
def test_validate_ec2_timestamp_advanced_time_expired(self):
|
||||
timestamp = timeutils.utcnow() + datetime.timedelta(seconds=350)
|
||||
params = {'Timestamp': utils.isotime(timestamp)}
|
||||
expired = ec2utils.is_ec2_timestamp_expired(params, expires=300)
|
||||
self.assertTrue(expired)
|
||||
|
||||
def test_validate_ec2_req_timestamp_not_expired(self):
|
||||
params = {'Timestamp': utils.isotime()}
|
||||
expired = ec2utils.is_ec2_timestamp_expired(params, expires=15)
|
||||
self.assertFalse(expired)
|
||||
|
||||
def test_validate_ec2_req_timestamp_expired(self):
|
||||
params = {'Timestamp': '2011-04-22T12:00:00Z'}
|
||||
compare = ec2utils.is_ec2_timestamp_expired(params, expires=300)
|
||||
self.assertTrue(compare)
|
||||
|
||||
def test_validate_ec2_req_expired(self):
|
||||
params = {'Expires': utils.isotime()}
|
||||
expired = ec2utils.is_ec2_timestamp_expired(params)
|
||||
self.assertTrue(expired)
|
||||
|
||||
def test_validate_ec2_req_not_expired(self):
|
||||
expire = timeutils.utcnow() + datetime.timedelta(seconds=350)
|
||||
params = {'Expires': utils.isotime(expire)}
|
||||
expired = ec2utils.is_ec2_timestamp_expired(params)
|
||||
self.assertFalse(expired)
|
||||
|
||||
def test_validate_Expires_timestamp_invalid_format(self):
|
||||
|
||||
# EC2 request with invalid Expires
|
||||
params = {'Expires': '2011-04-22T11:29:49'}
|
||||
expired = ec2utils.is_ec2_timestamp_expired(params)
|
||||
self.assertTrue(expired)
|
||||
|
||||
def test_validate_ec2_req_timestamp_Expires(self):
|
||||
|
||||
# EC2 request with both Timestamp and Expires
|
||||
params = {'Timestamp': '2011-04-22T11:29:49Z',
|
||||
'Expires': utils.isotime()}
|
||||
self.assertRaises(exception.InvalidRequest,
|
||||
ec2utils.is_ec2_timestamp_expired,
|
||||
params)
|
@ -1,61 +0,0 @@
|
||||
# Copyright 2014 - Red Hat, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from nova.api.ec2 import ec2utils
|
||||
from nova import context
|
||||
from nova import objects
|
||||
from nova import test
|
||||
|
||||
|
||||
class EC2UtilsTestCase(test.TestCase):
|
||||
def setUp(self):
|
||||
self.ctxt = context.get_admin_context()
|
||||
ec2utils.reset_cache()
|
||||
super(EC2UtilsTestCase, self).setUp()
|
||||
|
||||
def test_get_int_id_from_snapshot_uuid(self):
|
||||
smap = objects.EC2SnapshotMapping(self.ctxt, uuid='fake-uuid')
|
||||
smap.create()
|
||||
smap_id = ec2utils.get_int_id_from_snapshot_uuid(self.ctxt,
|
||||
'fake-uuid')
|
||||
self.assertEqual(smap.id, smap_id)
|
||||
|
||||
def test_get_int_id_from_snapshot_uuid_creates_mapping(self):
|
||||
smap_id = ec2utils.get_int_id_from_snapshot_uuid(self.ctxt,
|
||||
'fake-uuid')
|
||||
smap = objects.EC2SnapshotMapping.get_by_id(self.ctxt, smap_id)
|
||||
self.assertEqual('fake-uuid', smap.uuid)
|
||||
|
||||
def test_get_snapshot_uuid_from_int_id(self):
|
||||
smap = objects.EC2SnapshotMapping(self.ctxt, uuid='fake-uuid')
|
||||
smap.create()
|
||||
smap_uuid = ec2utils.get_snapshot_uuid_from_int_id(self.ctxt, smap.id)
|
||||
self.assertEqual(smap.uuid, smap_uuid)
|
||||
|
||||
def test_id_to_glance_id(self):
|
||||
s3imap = objects.S3ImageMapping(self.ctxt, uuid='fake-uuid')
|
||||
s3imap.create()
|
||||
uuid = ec2utils.id_to_glance_id(self.ctxt, s3imap.id)
|
||||
self.assertEqual(uuid, s3imap.uuid)
|
||||
|
||||
def test_glance_id_to_id(self):
|
||||
s3imap = objects.S3ImageMapping(self.ctxt, uuid='fake-uuid')
|
||||
s3imap.create()
|
||||
s3imap_id = ec2utils.glance_id_to_id(self.ctxt, s3imap.uuid)
|
||||
self.assertEqual(s3imap_id, s3imap.id)
|
||||
|
||||
def test_glance_id_to_id_creates_mapping(self):
|
||||
s3imap_id = ec2utils.glance_id_to_id(self.ctxt, 'fake-uuid')
|
||||
s3imap = objects.S3ImageMapping.get_by_id(self.ctxt, s3imap_id)
|
||||
self.assertEqual('fake-uuid', s3imap.uuid)
|
@ -1,132 +0,0 @@
|
||||
#
|
||||
# Copyright 2013 - Red Hat, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
"""
|
||||
Unit tests for EC2 error responses.
|
||||
"""
|
||||
|
||||
from lxml import etree
|
||||
|
||||
from nova.api import ec2
|
||||
from nova import context
|
||||
from nova import test
|
||||
from nova import wsgi
|
||||
|
||||
|
||||
class TestClientExceptionEC2(Exception):
|
||||
ec2_code = 'ClientException.Test'
|
||||
message = "Test Client Exception."
|
||||
code = 400
|
||||
|
||||
|
||||
class TestServerExceptionEC2(Exception):
|
||||
ec2_code = 'ServerException.Test'
|
||||
message = "Test Server Exception."
|
||||
code = 500
|
||||
|
||||
|
||||
class Ec2ErrorResponseTestCase(test.NoDBTestCase):
|
||||
"""Test EC2 error responses.
|
||||
|
||||
This deals mostly with api/ec2/__init__.py code, especially
|
||||
the ec2_error_ex helper.
|
||||
"""
|
||||
def setUp(self):
|
||||
super(Ec2ErrorResponseTestCase, self).setUp()
|
||||
self.context = context.RequestContext('test_user_id',
|
||||
'test_project_id')
|
||||
self.req = wsgi.Request.blank('/test')
|
||||
self.req.environ['nova.context'] = self.context
|
||||
|
||||
def _validate_ec2_error(self, response, http_status, ec2_code, msg=None,
|
||||
unknown_msg=False):
|
||||
self.assertEqual(response.status_code, http_status,
|
||||
'Expected HTTP status %s' % http_status)
|
||||
root_e = etree.XML(response.body)
|
||||
self.assertEqual(root_e.tag, 'Response',
|
||||
"Top element must be Response.")
|
||||
errors_e = root_e.find('Errors')
|
||||
self.assertEqual(len(errors_e), 1,
|
||||
"Expected exactly one Error element in Errors.")
|
||||
error_e = errors_e[0]
|
||||
self.assertEqual(error_e.tag, 'Error',
|
||||
"Expected Error element.")
|
||||
# Code
|
||||
code_e = error_e.find('Code')
|
||||
self.assertIsNotNone(code_e, "Code element must be present.")
|
||||
self.assertEqual(code_e.text, ec2_code)
|
||||
# Message
|
||||
if msg or unknown_msg:
|
||||
message_e = error_e.find('Message')
|
||||
self.assertIsNotNone(code_e, "Message element must be present.")
|
||||
if msg:
|
||||
self.assertEqual(message_e.text, msg)
|
||||
elif unknown_msg:
|
||||
self.assertEqual(message_e.text, "Unknown error occurred.",
|
||||
"Error message should be anonymous.")
|
||||
# RequestID
|
||||
requestid_e = root_e.find('RequestID')
|
||||
self.assertIsNotNone(requestid_e,
|
||||
'RequestID element should be present.')
|
||||
self.assertEqual(requestid_e.text, self.context.request_id)
|
||||
|
||||
def test_exception_ec2_4xx(self):
|
||||
"""Test response to EC2 exception with code = 400."""
|
||||
msg = "Test client failure."
|
||||
err = ec2.ec2_error_ex(TestClientExceptionEC2(msg), self.req)
|
||||
self._validate_ec2_error(err, TestClientExceptionEC2.code,
|
||||
TestClientExceptionEC2.ec2_code, msg)
|
||||
|
||||
def test_exception_ec2_5xx(self):
|
||||
"""Test response to EC2 exception with code = 500.
|
||||
|
||||
Expected errors are treated as client ones even with 5xx code.
|
||||
"""
|
||||
msg = "Test client failure with 5xx error code."
|
||||
err = ec2.ec2_error_ex(TestServerExceptionEC2(msg), self.req)
|
||||
self._validate_ec2_error(err, 400, TestServerExceptionEC2.ec2_code,
|
||||
msg)
|
||||
|
||||
def test_unexpected_exception_ec2_4xx(self):
|
||||
"""Test response to unexpected EC2 exception with code = 400."""
|
||||
msg = "Test unexpected client failure."
|
||||
err = ec2.ec2_error_ex(TestClientExceptionEC2(msg), self.req,
|
||||
unexpected=True)
|
||||
self._validate_ec2_error(err, TestClientExceptionEC2.code,
|
||||
TestClientExceptionEC2.ec2_code, msg)
|
||||
|
||||
def test_unexpected_exception_ec2_5xx(self):
|
||||
"""Test response to unexpected EC2 exception with code = 500.
|
||||
|
||||
Server exception messages (with code >= 500 or without code) should
|
||||
be filtered as they might contain sensitive information.
|
||||
"""
|
||||
msg = "Test server failure."
|
||||
err = ec2.ec2_error_ex(TestServerExceptionEC2(msg), self.req,
|
||||
unexpected=True)
|
||||
self._validate_ec2_error(err, TestServerExceptionEC2.code,
|
||||
TestServerExceptionEC2.ec2_code,
|
||||
unknown_msg=True)
|
||||
|
||||
def test_unexpected_exception_builtin(self):
|
||||
"""Test response to builtin unexpected exception.
|
||||
|
||||
Server exception messages (with code >= 500 or without code) should
|
||||
be filtered as they might contain sensitive information.
|
||||
"""
|
||||
msg = "Test server failure."
|
||||
err = ec2.ec2_error_ex(RuntimeError(msg), self.req, unexpected=True)
|
||||
self._validate_ec2_error(err, 500, 'RuntimeError', unknown_msg=True)
|
@ -1,46 +0,0 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from mox3 import mox
|
||||
import webob
|
||||
|
||||
from nova.api.ec2 import faults
|
||||
from nova import test
|
||||
from nova import wsgi
|
||||
|
||||
|
||||
class TestFaults(test.NoDBTestCase):
|
||||
"""Tests covering ec2 Fault class."""
|
||||
|
||||
def test_fault_exception(self):
|
||||
# Ensure the status_int is set correctly on faults.
|
||||
fault = faults.Fault(webob.exc.HTTPBadRequest(
|
||||
explanation='test'))
|
||||
self.assertIsInstance(fault.wrapped_exc, webob.exc.HTTPBadRequest)
|
||||
|
||||
def test_fault_exception_status_int(self):
|
||||
# Ensure the status_int is set correctly on faults.
|
||||
fault = faults.Fault(webob.exc.HTTPNotFound(explanation='test'))
|
||||
self.assertEqual(fault.wrapped_exc.status_int, 404)
|
||||
|
||||
def test_fault_call(self):
|
||||
# Ensure proper EC2 response on faults.
|
||||
message = 'test message'
|
||||
ex = webob.exc.HTTPNotFound(explanation=message)
|
||||
fault = faults.Fault(ex)
|
||||
req = wsgi.Request.blank('/test')
|
||||
req.GET['AWSAccessKeyId'] = "test_user_id:test_project_id"
|
||||
self.mox.StubOutWithMock(faults, 'ec2_error_response')
|
||||
faults.ec2_error_response(mox.IgnoreArg(), 'HTTPNotFound',
|
||||
message=message, status=ex.status_int)
|
||||
self.mox.ReplayAll()
|
||||
fault(req)
|
@ -1,215 +0,0 @@
|
||||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from lxml import etree
|
||||
import mock
|
||||
from oslo_config import cfg
|
||||
from oslo_utils import fixture as utils_fixture
|
||||
import requests
|
||||
from six.moves import range
|
||||
import webob
|
||||
import webob.dec
|
||||
import webob.exc
|
||||
|
||||
from nova.api import ec2
|
||||
from nova import context
|
||||
from nova import exception
|
||||
from nova import test
|
||||
from nova import wsgi
|
||||
|
||||
CONF = cfg.CONF
|
||||
|
||||
|
||||
@webob.dec.wsgify
|
||||
def conditional_forbid(req):
|
||||
"""Helper wsgi app returns 403 if param 'die' is 1."""
|
||||
if 'die' in req.params and req.params['die'] == '1':
|
||||
raise webob.exc.HTTPForbidden()
|
||||
return 'OK'
|
||||
|
||||
|
||||
class LockoutTestCase(test.NoDBTestCase):
|
||||
"""Test case for the Lockout middleware."""
|
||||
def setUp(self):
|
||||
super(LockoutTestCase, self).setUp()
|
||||
self.time_fixture = self.useFixture(utils_fixture.TimeFixture())
|
||||
self.lockout = ec2.Lockout(conditional_forbid)
|
||||
|
||||
def _send_bad_attempts(self, access_key, num_attempts=1):
|
||||
"""Fail x."""
|
||||
for i in range(num_attempts):
|
||||
req = webob.Request.blank('/?AWSAccessKeyId=%s&die=1' % access_key)
|
||||
self.assertEqual(req.get_response(self.lockout).status_int, 403)
|
||||
|
||||
def _is_locked_out(self, access_key):
|
||||
"""Sends a test request to see if key is locked out."""
|
||||
req = webob.Request.blank('/?AWSAccessKeyId=%s' % access_key)
|
||||
return (req.get_response(self.lockout).status_int == 403)
|
||||
|
||||
def test_lockout(self):
|
||||
self._send_bad_attempts('test', CONF.lockout_attempts)
|
||||
self.assertTrue(self._is_locked_out('test'))
|
||||
|
||||
def test_timeout(self):
|
||||
self._send_bad_attempts('test', CONF.lockout_attempts)
|
||||
self.assertTrue(self._is_locked_out('test'))
|
||||
self.time_fixture.advance_time_seconds(CONF.lockout_minutes * 60)
|
||||
self.assertFalse(self._is_locked_out('test'))
|
||||
|
||||
def test_multiple_keys(self):
|
||||
self._send_bad_attempts('test1', CONF.lockout_attempts)
|
||||
self.assertTrue(self._is_locked_out('test1'))
|
||||
self.assertFalse(self._is_locked_out('test2'))
|
||||
self.time_fixture.advance_time_seconds(CONF.lockout_minutes * 60)
|
||||
self.assertFalse(self._is_locked_out('test1'))
|
||||
self.assertFalse(self._is_locked_out('test2'))
|
||||
|
||||
def test_window_timeout(self):
|
||||
self._send_bad_attempts('test', CONF.lockout_attempts - 1)
|
||||
self.assertFalse(self._is_locked_out('test'))
|
||||
self.time_fixture.advance_time_seconds(CONF.lockout_window * 60)
|
||||
self._send_bad_attempts('test', CONF.lockout_attempts - 1)
|
||||
self.assertFalse(self._is_locked_out('test'))
|
||||
|
||||
|
||||
class ExecutorTestCase(test.NoDBTestCase):
|
||||
def setUp(self):
|
||||
super(ExecutorTestCase, self).setUp()
|
||||
self.executor = ec2.Executor()
|
||||
|
||||
def _execute(self, invoke):
|
||||
class Fake(object):
|
||||
pass
|
||||
fake_ec2_request = Fake()
|
||||
fake_ec2_request.invoke = invoke
|
||||
|
||||
fake_wsgi_request = Fake()
|
||||
|
||||
fake_wsgi_request.environ = {
|
||||
'nova.context': context.get_admin_context(),
|
||||
'ec2.request': fake_ec2_request,
|
||||
}
|
||||
return self.executor(fake_wsgi_request)
|
||||
|
||||
def _extract_message(self, result):
|
||||
tree = etree.fromstring(result.body)
|
||||
return tree.findall('./Errors')[0].find('Error/Message').text
|
||||
|
||||
def _extract_code(self, result):
|
||||
tree = etree.fromstring(result.body)
|
||||
return tree.findall('./Errors')[0].find('Error/Code').text
|
||||
|
||||
def test_instance_not_found(self):
|
||||
def not_found(context):
|
||||
raise exception.InstanceNotFound(instance_id=5)
|
||||
result = self._execute(not_found)
|
||||
self.assertIn('i-00000005', self._extract_message(result))
|
||||
self.assertEqual('InvalidInstanceID.NotFound',
|
||||
self._extract_code(result))
|
||||
|
||||
def test_instance_not_found_none(self):
|
||||
def not_found(context):
|
||||
raise exception.InstanceNotFound(instance_id=None)
|
||||
|
||||
# NOTE(mikal): we want no exception to be raised here, which was what
|
||||
# was happening in bug/1080406
|
||||
result = self._execute(not_found)
|
||||
self.assertIn('None', self._extract_message(result))
|
||||
self.assertEqual('InvalidInstanceID.NotFound',
|
||||
self._extract_code(result))
|
||||
|
||||
def test_snapshot_not_found(self):
|
||||
def not_found(context):
|
||||
raise exception.SnapshotNotFound(snapshot_id=5)
|
||||
result = self._execute(not_found)
|
||||
self.assertIn('snap-00000005', self._extract_message(result))
|
||||
self.assertEqual('InvalidSnapshot.NotFound',
|
||||
self._extract_code(result))
|
||||
|
||||
def test_volume_not_found(self):
|
||||
def not_found(context):
|
||||
raise exception.VolumeNotFound(volume_id=5)
|
||||
result = self._execute(not_found)
|
||||
self.assertIn('vol-00000005', self._extract_message(result))
|
||||
self.assertEqual('InvalidVolume.NotFound', self._extract_code(result))
|
||||
|
||||
def test_floating_ip_bad_create_request(self):
|
||||
def bad_request(context):
|
||||
raise exception.FloatingIpBadRequest()
|
||||
result = self._execute(bad_request)
|
||||
self.assertIn('BadRequest', self._extract_message(result))
|
||||
self.assertEqual('UnsupportedOperation', self._extract_code(result))
|
||||
|
||||
|
||||
class FakeResponse(object):
|
||||
reason = "Test Reason"
|
||||
|
||||
def __init__(self, status_code=400):
|
||||
self.status_code = status_code
|
||||
|
||||
def json(self):
|
||||
return {}
|
||||
|
||||
|
||||
class KeystoneAuthTestCase(test.NoDBTestCase):
|
||||
def setUp(self):
|
||||
super(KeystoneAuthTestCase, self).setUp()
|
||||
self.kauth = ec2.EC2KeystoneAuth(conditional_forbid)
|
||||
|
||||
def _validate_ec2_error(self, response, http_status, ec2_code):
|
||||
self.assertEqual(response.status_code, http_status,
|
||||
'Expected HTTP status %s' % http_status)
|
||||
root_e = etree.XML(response.body)
|
||||
self.assertEqual(root_e.tag, 'Response',
|
||||
"Top element must be Response.")
|
||||
errors_e = root_e.find('Errors')
|
||||
error_e = errors_e[0]
|
||||
code_e = error_e.find('Code')
|
||||
self.assertIsNotNone(code_e, "Code element must be present.")
|
||||
self.assertEqual(code_e.text, ec2_code)
|
||||
|
||||
def test_no_signature(self):
|
||||
req = wsgi.Request.blank('/test')
|
||||
resp = self.kauth(req)
|
||||
self._validate_ec2_error(resp, 400, 'AuthFailure')
|
||||
|
||||
def test_no_key_id(self):
|
||||
req = wsgi.Request.blank('/test')
|
||||
req.GET['Signature'] = 'test-signature'
|
||||
resp = self.kauth(req)
|
||||
self._validate_ec2_error(resp, 400, 'AuthFailure')
|
||||
|
||||
@mock.patch.object(requests, 'request', return_value=FakeResponse())
|
||||
def test_communication_failure(self, mock_request):
|
||||
req = wsgi.Request.blank('/test')
|
||||
req.GET['Signature'] = 'test-signature'
|
||||
req.GET['AWSAccessKeyId'] = 'test-key-id'
|
||||
resp = self.kauth(req)
|
||||
self._validate_ec2_error(resp, 400, 'AuthFailure')
|
||||
mock_request.assert_called_with('POST', CONF.keystone_ec2_url,
|
||||
data=mock.ANY, headers=mock.ANY,
|
||||
verify=mock.ANY, cert=mock.ANY)
|
||||
|
||||
@mock.patch.object(requests, 'request', return_value=FakeResponse(200))
|
||||
def test_no_result_data(self, mock_request):
|
||||
req = wsgi.Request.blank('/test')
|
||||
req.GET['Signature'] = 'test-signature'
|
||||
req.GET['AWSAccessKeyId'] = 'test-key-id'
|
||||
resp = self.kauth(req)
|
||||
self._validate_ec2_error(resp, 400, 'AuthFailure')
|
||||
mock_request.assert_called_with('POST', CONF.keystone_ec2_url,
|
||||
data=mock.ANY, headers=mock.ANY,
|
||||
verify=mock.ANY, cert=mock.ANY)
|
@ -64,12 +64,6 @@ class ValidatorTestCase(test.NoDBTestCase):
|
||||
self.assertFalse(validator.validate_int(4)(5))
|
||||
self.assertFalse(validator.validate_int()(None))
|
||||
|
||||
def test_validate_ec2_id(self):
|
||||
self.assertFalse(validator.validate_ec2_id('foobar'))
|
||||
self.assertFalse(validator.validate_ec2_id(''))
|
||||
self.assertFalse(validator.validate_ec2_id(1234))
|
||||
self.assertTrue(validator.validate_ec2_id('i-284f3a41'))
|
||||
|
||||
def test_validate_url_path(self):
|
||||
self.assertTrue(validator.validate_url_path('/path/to/file'))
|
||||
self.assertFalse(validator.validate_url_path('path/to/file'))
|
||||
@ -89,15 +83,3 @@ class ValidatorTestCase(test.NoDBTestCase):
|
||||
self.assertTrue(validator.validate_user_data(fixture))
|
||||
self.assertFalse(validator.validate_user_data(False))
|
||||
self.assertFalse(validator.validate_user_data('hello, world!'))
|
||||
|
||||
def test_default_validator(self):
|
||||
expect_pass = {
|
||||
'attribute': 'foobar'
|
||||
}
|
||||
self.assertTrue(validator.validate(expect_pass,
|
||||
validator.DEFAULT_VALIDATOR))
|
||||
expect_fail = {
|
||||
'attribute': 0
|
||||
}
|
||||
self.assertFalse(validator.validate(expect_fail,
|
||||
validator.DEFAULT_VALIDATOR))
|
||||
|
@ -20,6 +20,7 @@ from six.moves import StringIO
|
||||
import glanceclient.exc
|
||||
import mock
|
||||
from oslo_config import cfg
|
||||
from oslo_service import sslutils
|
||||
from oslo_utils import netutils
|
||||
import six
|
||||
import testtools
|
||||
@ -385,6 +386,7 @@ class TestGlanceClientWrapper(test.NoDBTestCase):
|
||||
@mock.patch('glanceclient.Client')
|
||||
def test_create_glance_client_with_ssl(self, client_mock,
|
||||
ssl_enable_mock):
|
||||
sslutils.register_opts(CONF)
|
||||
self.flags(ca_file='foo.cert', cert_file='bar.cert',
|
||||
key_file='wut.key', group='ssl')
|
||||
ctxt = mock.sentinel.ctx
|
||||
|
@ -1,267 +0,0 @@
|
||||
# Copyright 2011 Isaku Yamahata
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import binascii
|
||||
import os
|
||||
import tempfile
|
||||
|
||||
import eventlet
|
||||
import fixtures
|
||||
from mox3 import mox
|
||||
|
||||
from nova.api.ec2 import ec2utils
|
||||
from nova import context
|
||||
from nova import db
|
||||
from nova import exception
|
||||
from nova.image import s3
|
||||
from nova import test
|
||||
from nova.tests.unit.image import fake
|
||||
|
||||
|
||||
ami_manifest_xml = """<?xml version="1.0" ?>
|
||||
<manifest>
|
||||
<version>2011-06-17</version>
|
||||
<bundler>
|
||||
<name>test-s3</name>
|
||||
<version>0</version>
|
||||
<release>0</release>
|
||||
</bundler>
|
||||
<machine_configuration>
|
||||
<architecture>x86_64</architecture>
|
||||
<block_device_mapping>
|
||||
<mapping>
|
||||
<virtual>ami</virtual>
|
||||
<device>sda1</device>
|
||||
</mapping>
|
||||
<mapping>
|
||||
<virtual>root</virtual>
|
||||
<device>/dev/sda1</device>
|
||||
</mapping>
|
||||
<mapping>
|
||||
<virtual>ephemeral0</virtual>
|
||||
<device>sda2</device>
|
||||
</mapping>
|
||||
<mapping>
|
||||
<virtual>swap</virtual>
|
||||
<device>sda3</device>
|
||||
</mapping>
|
||||
</block_device_mapping>
|
||||
<kernel_id>aki-00000001</kernel_id>
|
||||
<ramdisk_id>ari-00000001</ramdisk_id>
|
||||
</machine_configuration>
|
||||
</manifest>
|
||||
"""
|
||||
|
||||
file_manifest_xml = """<?xml version="1.0" ?>
|
||||
<manifest>
|
||||

|
||||
</manifest>
|
||||
"""
|
||||
|
||||
|
||||
class TestS3ImageService(test.TestCase):
|
||||
def setUp(self):
|
||||
super(TestS3ImageService, self).setUp()
|
||||
self.context = context.RequestContext(None, None)
|
||||
self.useFixture(fixtures.FakeLogger('boto'))
|
||||
|
||||
# set up 3 fixtures to test shows, should have id '1', '2', and '3'
|
||||
db.s3_image_create(self.context,
|
||||
'155d900f-4e14-4e4c-a73d-069cbf4541e6')
|
||||
db.s3_image_create(self.context,
|
||||
'a2459075-d96c-40d5-893e-577ff92e721c')
|
||||
db.s3_image_create(self.context,
|
||||
'76fa36fc-c930-4bf3-8c8a-ea2a2420deb6')
|
||||
|
||||
fake.stub_out_image_service(self)
|
||||
self.image_service = s3.S3ImageService()
|
||||
ec2utils.reset_cache()
|
||||
|
||||
def tearDown(self):
|
||||
super(TestS3ImageService, self).tearDown()
|
||||
fake.FakeImageService_reset()
|
||||
|
||||
def _assertEqualList(self, list0, list1, keys):
|
||||
self.assertEqual(len(list0), len(list1))
|
||||
key = keys[0]
|
||||
for x in list0:
|
||||
self.assertEqual(len(x), len(keys))
|
||||
self.assertIn(key, x)
|
||||
for y in list1:
|
||||
self.assertIn(key, y)
|
||||
if x[key] == y[key]:
|
||||
for k in keys:
|
||||
self.assertEqual(x[k], y[k])
|
||||
|
||||
def test_show_cannot_use_uuid(self):
|
||||
self.assertRaises(exception.ImageNotFound,
|
||||
self.image_service.show, self.context,
|
||||
'155d900f-4e14-4e4c-a73d-069cbf4541e6')
|
||||
|
||||
def test_show_translates_correctly(self):
|
||||
self.image_service.show(self.context, '1')
|
||||
|
||||
def test_show_translates_image_state_correctly(self):
|
||||
def my_fake_show(self, context, image_id, **kwargs):
|
||||
fake_state_map = {
|
||||
'155d900f-4e14-4e4c-a73d-069cbf4541e6': 'downloading',
|
||||
'a2459075-d96c-40d5-893e-577ff92e721c': 'failed_decrypt',
|
||||
'76fa36fc-c930-4bf3-8c8a-ea2a2420deb6': 'available'}
|
||||
return {'id': image_id,
|
||||
'name': 'fakeimage123456',
|
||||
'deleted_at': None,
|
||||
'deleted': False,
|
||||
'status': 'active',
|
||||
'is_public': False,
|
||||
'container_format': 'raw',
|
||||
'disk_format': 'raw',
|
||||
'size': '25165824',
|
||||
'properties': {'image_state': fake_state_map[image_id]}}
|
||||
|
||||
# Override part of the fake image service as well just for
|
||||
# this test so we can set the image_state to various values
|
||||
# and test that S3ImageService does the correct mapping for
|
||||
# us. We can't put fake bad or pending states in the real fake
|
||||
# image service as it causes other tests to fail
|
||||
self.stubs.Set(fake._FakeImageService, 'show', my_fake_show)
|
||||
ret_image = self.image_service.show(self.context, '1')
|
||||
self.assertEqual(ret_image['properties']['image_state'], 'pending')
|
||||
ret_image = self.image_service.show(self.context, '2')
|
||||
self.assertEqual(ret_image['properties']['image_state'], 'failed')
|
||||
ret_image = self.image_service.show(self.context, '3')
|
||||
self.assertEqual(ret_image['properties']['image_state'], 'available')
|
||||
|
||||
def test_detail(self):
|
||||
self.image_service.detail(self.context)
|
||||
|
||||
def test_s3_create(self):
|
||||
metadata = {'properties': {
|
||||
'root_device_name': '/dev/sda1',
|
||||
'block_device_mapping': [
|
||||
{'device_name': '/dev/sda1',
|
||||
'snapshot_id': 'snap-12345678',
|
||||
'delete_on_termination': True},
|
||||
{'device_name': '/dev/sda2',
|
||||
'virtual_name': 'ephemeral0'},
|
||||
{'device_name': '/dev/sdb0',
|
||||
'no_device': True}]}}
|
||||
_manifest, image, image_uuid = self.image_service._s3_parse_manifest(
|
||||
self.context, metadata, ami_manifest_xml)
|
||||
|
||||
ret_image = self.image_service.show(self.context, image['id'])
|
||||
self.assertIn('properties', ret_image)
|
||||
properties = ret_image['properties']
|
||||
|
||||
self.assertIn('mappings', properties)
|
||||
mappings = properties['mappings']
|
||||
expected_mappings = [
|
||||
{"device": "sda1", "virtual": "ami"},
|
||||
{"device": "/dev/sda1", "virtual": "root"},
|
||||
{"device": "sda2", "virtual": "ephemeral0"},
|
||||
{"device": "sda3", "virtual": "swap"}]
|
||||
self._assertEqualList(mappings, expected_mappings,
|
||||
['device', 'virtual'])
|
||||
|
||||
self.assertIn('block_device_mapping', properties)
|
||||
block_device_mapping = properties['block_device_mapping']
|
||||
expected_bdm = [
|
||||
{'device_name': '/dev/sda1',
|
||||
'snapshot_id': 'snap-12345678',
|
||||
'delete_on_termination': True},
|
||||
{'device_name': '/dev/sda2',
|
||||
'virtual_name': 'ephemeral0'},
|
||||
{'device_name': '/dev/sdb0',
|
||||
'no_device': True}]
|
||||
self.assertEqual(block_device_mapping, expected_bdm)
|
||||
|
||||
def _initialize_mocks(self):
|
||||
handle, tempf = tempfile.mkstemp(dir='/tmp')
|
||||
ignore = mox.IgnoreArg()
|
||||
mockobj = self.mox.CreateMockAnything()
|
||||
self.stubs.Set(self.image_service, '_conn', mockobj)
|
||||
mockobj(ignore).AndReturn(mockobj)
|
||||
self.stubs.Set(mockobj, 'get_bucket', mockobj)
|
||||
mockobj(ignore).AndReturn(mockobj)
|
||||
self.stubs.Set(mockobj, 'get_key', mockobj)
|
||||
mockobj(ignore).AndReturn(mockobj)
|
||||
self.stubs.Set(mockobj, 'get_contents_as_string', mockobj)
|
||||
mockobj().AndReturn(file_manifest_xml)
|
||||
self.stubs.Set(self.image_service, '_download_file', mockobj)
|
||||
mockobj(ignore, ignore, ignore).AndReturn(tempf)
|
||||
self.stubs.Set(binascii, 'a2b_hex', mockobj)
|
||||
mockobj(ignore).AndReturn('foo')
|
||||
mockobj(ignore).AndReturn('foo')
|
||||
self.stubs.Set(self.image_service, '_decrypt_image', mockobj)
|
||||
mockobj(ignore, ignore, ignore, ignore, ignore).AndReturn(mockobj)
|
||||
self.stubs.Set(self.image_service, '_untarzip_image', mockobj)
|
||||
mockobj(ignore, ignore).AndReturn(tempf)
|
||||
self.mox.ReplayAll()
|
||||
|
||||
def test_s3_create_image_locations(self):
|
||||
image_location_1 = 'testbucket_1/test.img.manifest.xml'
|
||||
# Use another location that starts with a '/'
|
||||
image_location_2 = '/testbucket_2/test.img.manifest.xml'
|
||||
|
||||
metadata = [{'properties': {'image_location': image_location_1}},
|
||||
{'properties': {'image_location': image_location_2}}]
|
||||
|
||||
for mdata in metadata:
|
||||
self._initialize_mocks()
|
||||
image = self.image_service._s3_create(self.context, mdata)
|
||||
eventlet.sleep()
|
||||
translated = self.image_service._translate_id_to_uuid(self.context,
|
||||
image)
|
||||
uuid = translated['id']
|
||||
image_service = fake.FakeImageService()
|
||||
updated_image = image_service.update(self.context, uuid,
|
||||
{'properties': {'image_state': 'available'}},
|
||||
purge_props=False)
|
||||
self.assertEqual(updated_image['properties']['image_state'],
|
||||
'available')
|
||||
|
||||
def test_s3_create_is_public(self):
|
||||
self._initialize_mocks()
|
||||
metadata = {'properties': {
|
||||
'image_location': 'mybucket/my.img.manifest.xml'},
|
||||
'name': 'mybucket/my.img'}
|
||||
img = self.image_service._s3_create(self.context, metadata)
|
||||
eventlet.sleep()
|
||||
translated = self.image_service._translate_id_to_uuid(self.context,
|
||||
img)
|
||||
uuid = translated['id']
|
||||
image_service = fake.FakeImageService()
|
||||
updated_image = image_service.update(self.context, uuid,
|
||||
{'is_public': True}, purge_props=False)
|
||||
self.assertTrue(updated_image['is_public'])
|
||||
self.assertEqual(updated_image['status'], 'active')
|
||||
self.assertEqual(updated_image['properties']['image_state'],
|
||||
'available')
|
||||
|
||||
def test_s3_malicious_tarballs(self):
|
||||
self.assertRaises(exception.NovaException,
|
||||
self.image_service._test_for_malicious_tarball,
|
||||
"/unused", os.path.join(os.path.dirname(__file__), 'abs.tar.gz'))
|
||||
self.assertRaises(exception.NovaException,
|
||||
self.image_service._test_for_malicious_tarball,
|
||||
"/unused", os.path.join(os.path.dirname(__file__), 'rel.tar.gz'))
|
@ -1,248 +0,0 @@
|
||||
# Copyright 2011 Isaku Yamahata
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
Tests for Block Device Mapping Code.
|
||||
"""
|
||||
|
||||
from nova.api.ec2 import cloud
|
||||
from nova.api.ec2 import ec2utils
|
||||
from nova import test
|
||||
from nova.tests.unit import matchers
|
||||
|
||||
|
||||
class BlockDeviceMappingEc2CloudTestCase(test.NoDBTestCase):
|
||||
"""Test Case for Block Device Mapping."""
|
||||
|
||||
def fake_ec2_vol_id_to_uuid(obj, ec2_id):
|
||||
if ec2_id == 'vol-87654321':
|
||||
return '22222222-3333-4444-5555-666666666666'
|
||||
elif ec2_id == 'vol-98765432':
|
||||
return '77777777-8888-9999-0000-aaaaaaaaaaaa'
|
||||
else:
|
||||
return 'OhNoooo'
|
||||
|
||||
def fake_ec2_snap_id_to_uuid(obj, ec2_id):
|
||||
if ec2_id == 'snap-12345678':
|
||||
return '00000000-1111-2222-3333-444444444444'
|
||||
elif ec2_id == 'snap-23456789':
|
||||
return '11111111-2222-3333-4444-555555555555'
|
||||
else:
|
||||
return 'OhNoooo'
|
||||
|
||||
def _assertApply(self, action, bdm_list):
|
||||
for bdm, expected_result in bdm_list:
|
||||
self.assertThat(action(bdm), matchers.DictMatches(expected_result))
|
||||
|
||||
def test_parse_block_device_mapping(self):
|
||||
self.stubs.Set(ec2utils,
|
||||
'ec2_vol_id_to_uuid',
|
||||
self.fake_ec2_vol_id_to_uuid)
|
||||
self.stubs.Set(ec2utils,
|
||||
'ec2_snap_id_to_uuid',
|
||||
self.fake_ec2_snap_id_to_uuid)
|
||||
bdm_list = [
|
||||
({'device_name': '/dev/fake0',
|
||||
'ebs': {'snapshot_id': 'snap-12345678',
|
||||
'volume_size': 1}},
|
||||
{'device_name': '/dev/fake0',
|
||||
'snapshot_id': '00000000-1111-2222-3333-444444444444',
|
||||
'volume_size': 1,
|
||||
'delete_on_termination': True}),
|
||||
|
||||
({'device_name': '/dev/fake1',
|
||||
'ebs': {'snapshot_id': 'snap-23456789',
|
||||
'delete_on_termination': False}},
|
||||
{'device_name': '/dev/fake1',
|
||||
'snapshot_id': '11111111-2222-3333-4444-555555555555',
|
||||
'delete_on_termination': False}),
|
||||
|
||||
({'device_name': '/dev/fake2',
|
||||
'ebs': {'snapshot_id': 'vol-87654321',
|
||||
'volume_size': 2}},
|
||||
{'device_name': '/dev/fake2',
|
||||
'volume_id': '22222222-3333-4444-5555-666666666666',
|
||||
'volume_size': 2,
|
||||
'delete_on_termination': True}),
|
||||
|
||||
({'device_name': '/dev/fake3',
|
||||
'ebs': {'snapshot_id': 'vol-98765432',
|
||||
'delete_on_termination': False}},
|
||||
{'device_name': '/dev/fake3',
|
||||
'volume_id': '77777777-8888-9999-0000-aaaaaaaaaaaa',
|
||||
'delete_on_termination': False}),
|
||||
|
||||
({'device_name': '/dev/fake4',
|
||||
'ebs': {'no_device': True}},
|
||||
{'device_name': '/dev/fake4',
|
||||
'no_device': True}),
|
||||
|
||||
({'device_name': '/dev/fake5',
|
||||
'virtual_name': 'ephemeral0'},
|
||||
{'device_name': '/dev/fake5',
|
||||
'virtual_name': 'ephemeral0'}),
|
||||
|
||||
({'device_name': '/dev/fake6',
|
||||
'virtual_name': 'swap'},
|
||||
{'device_name': '/dev/fake6',
|
||||
'virtual_name': 'swap'}),
|
||||
]
|
||||
self._assertApply(cloud._parse_block_device_mapping, bdm_list)
|
||||
|
||||
def test_format_block_device_mapping(self):
|
||||
bdm_list = [
|
||||
({'device_name': '/dev/fake0',
|
||||
'snapshot_id': 0x12345678,
|
||||
'volume_size': 1,
|
||||
'delete_on_termination': True},
|
||||
{'deviceName': '/dev/fake0',
|
||||
'ebs': {'snapshotId': 'snap-12345678',
|
||||
'volumeSize': 1,
|
||||
'deleteOnTermination': True}}),
|
||||
|
||||
({'device_name': '/dev/fake1',
|
||||
'snapshot_id': 0x23456789},
|
||||
{'deviceName': '/dev/fake1',
|
||||
'ebs': {'snapshotId': 'snap-23456789'}}),
|
||||
|
||||
({'device_name': '/dev/fake2',
|
||||
'snapshot_id': 0x23456789,
|
||||
'delete_on_termination': False},
|
||||
{'deviceName': '/dev/fake2',
|
||||
'ebs': {'snapshotId': 'snap-23456789',
|
||||
'deleteOnTermination': False}}),
|
||||
|
||||
({'device_name': '/dev/fake3',
|
||||
'volume_id': 0x12345678,
|
||||
'volume_size': 1,
|
||||
'delete_on_termination': True},
|
||||
{'deviceName': '/dev/fake3',
|
||||
'ebs': {'snapshotId': 'vol-12345678',
|
||||
'volumeSize': 1,
|
||||
'deleteOnTermination': True}}),
|
||||
|
||||
({'device_name': '/dev/fake4',
|
||||
'volume_id': 0x23456789},
|
||||
{'deviceName': '/dev/fake4',
|
||||
'ebs': {'snapshotId': 'vol-23456789'}}),
|
||||
|
||||
({'device_name': '/dev/fake5',
|
||||
'volume_id': 0x23456789,
|
||||
'delete_on_termination': False},
|
||||
{'deviceName': '/dev/fake5',
|
||||
'ebs': {'snapshotId': 'vol-23456789',
|
||||
'deleteOnTermination': False}}),
|
||||
]
|
||||
self._assertApply(cloud._format_block_device_mapping, bdm_list)
|
||||
|
||||
def test_format_mapping(self):
|
||||
properties = {
|
||||
'mappings': [
|
||||
{'virtual': 'ami',
|
||||
'device': 'sda1'},
|
||||
{'virtual': 'root',
|
||||
'device': '/dev/sda1'},
|
||||
|
||||
{'virtual': 'swap',
|
||||
'device': 'sdb1'},
|
||||
{'virtual': 'swap',
|
||||
'device': 'sdb2'},
|
||||
{'virtual': 'swap',
|
||||
'device': 'sdb3'},
|
||||
{'virtual': 'swap',
|
||||
'device': 'sdb4'},
|
||||
|
||||
{'virtual': 'ephemeral0',
|
||||
'device': 'sdc1'},
|
||||
{'virtual': 'ephemeral1',
|
||||
'device': 'sdc2'},
|
||||
{'virtual': 'ephemeral2',
|
||||
'device': 'sdc3'},
|
||||
],
|
||||
|
||||
'block_device_mapping': [
|
||||
# root
|
||||
{'device_name': '/dev/sda1',
|
||||
'snapshot_id': 0x12345678,
|
||||
'delete_on_termination': False},
|
||||
|
||||
|
||||
# overwrite swap
|
||||
{'device_name': '/dev/sdb2',
|
||||
'snapshot_id': 0x23456789,
|
||||
'delete_on_termination': False},
|
||||
{'device_name': '/dev/sdb3',
|
||||
'snapshot_id': 0x3456789A},
|
||||
{'device_name': '/dev/sdb4',
|
||||
'no_device': True},
|
||||
|
||||
# overwrite ephemeral
|
||||
{'device_name': '/dev/sdc2',
|
||||
'snapshot_id': 0x3456789A,
|
||||
'delete_on_termination': False},
|
||||
{'device_name': '/dev/sdc3',
|
||||
'snapshot_id': 0x456789AB},
|
||||
{'device_name': '/dev/sdc4',
|
||||
'no_device': True},
|
||||
|
||||
# volume
|
||||
{'device_name': '/dev/sdd1',
|
||||
'snapshot_id': 0x87654321,
|
||||
'delete_on_termination': False},
|
||||
{'device_name': '/dev/sdd2',
|
||||
'snapshot_id': 0x98765432},
|
||||
{'device_name': '/dev/sdd3',
|
||||
'snapshot_id': 0xA9875463},
|
||||
{'device_name': '/dev/sdd4',
|
||||
'no_device': True}]}
|
||||
|
||||
expected_result = {
|
||||
'blockDeviceMapping': [
|
||||
# root
|
||||
{'deviceName': '/dev/sda1',
|
||||
'ebs': {'snapshotId': 'snap-12345678',
|
||||
'deleteOnTermination': False}},
|
||||
|
||||
# swap
|
||||
{'deviceName': '/dev/sdb1',
|
||||
'virtualName': 'swap'},
|
||||
{'deviceName': '/dev/sdb2',
|
||||
'ebs': {'snapshotId': 'snap-23456789',
|
||||
'deleteOnTermination': False}},
|
||||
{'deviceName': '/dev/sdb3',
|
||||
'ebs': {'snapshotId': 'snap-3456789a'}},
|
||||
|
||||
# ephemeral
|
||||
{'deviceName': '/dev/sdc1',
|
||||
'virtualName': 'ephemeral0'},
|
||||
{'deviceName': '/dev/sdc2',
|
||||
'ebs': {'snapshotId': 'snap-3456789a',
|
||||
'deleteOnTermination': False}},
|
||||
{'deviceName': '/dev/sdc3',
|
||||
'ebs': {'snapshotId': 'snap-456789ab'}},
|
||||
|
||||
# volume
|
||||
{'deviceName': '/dev/sdd1',
|
||||
'ebs': {'snapshotId': 'snap-87654321',
|
||||
'deleteOnTermination': False}},
|
||||
{'deviceName': '/dev/sdd2',
|
||||
'ebs': {'snapshotId': 'snap-98765432'}},
|
||||
{'deviceName': '/dev/sdd3',
|
||||
'ebs': {'snapshotId': 'snap-a9875463'}}]}
|
||||
|
||||
result = {}
|
||||
cloud._format_mappings(properties, result)
|
||||
self.assertEqual(result['blockDeviceMapping'].sort(),
|
||||
expected_result['blockDeviceMapping'].sort())
|
@ -1,155 +0,0 @@
|
||||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
Unittets for S3 objectstore clone.
|
||||
"""
|
||||
|
||||
import os
|
||||
import shutil
|
||||
import tempfile
|
||||
|
||||
import boto
|
||||
from boto import exception as boto_exception
|
||||
from boto.s3 import connection as s3
|
||||
from oslo_config import cfg
|
||||
|
||||
from nova.objectstore import s3server
|
||||
from nova import test
|
||||
from nova import wsgi
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.import_opt('s3_host', 'nova.image.s3')
|
||||
|
||||
# Create a unique temporary directory. We don't delete after test to
|
||||
# allow checking the contents after running tests. Users and/or tools
|
||||
# running the tests need to remove the tests directories.
|
||||
OSS_TEMPDIR = tempfile.mkdtemp(prefix='test_oss-')
|
||||
|
||||
# Create bucket/images path
|
||||
os.makedirs(os.path.join(OSS_TEMPDIR, 'images'))
|
||||
os.makedirs(os.path.join(OSS_TEMPDIR, 'buckets'))
|
||||
|
||||
|
||||
class S3APITestCase(test.NoDBTestCase):
|
||||
"""Test objectstore through S3 API."""
|
||||
|
||||
def setUp(self):
|
||||
"""Setup users, projects, and start a test server."""
|
||||
super(S3APITestCase, self).setUp()
|
||||
self.flags(buckets_path=os.path.join(OSS_TEMPDIR, 'buckets'),
|
||||
s3_host='127.0.0.1')
|
||||
|
||||
shutil.rmtree(CONF.buckets_path)
|
||||
os.mkdir(CONF.buckets_path)
|
||||
|
||||
router = s3server.S3Application(CONF.buckets_path)
|
||||
self.server = wsgi.Server("S3 Objectstore",
|
||||
router,
|
||||
host=CONF.s3_host,
|
||||
port=0)
|
||||
self.server.start()
|
||||
|
||||
if not boto.config.has_section('Boto'):
|
||||
boto.config.add_section('Boto')
|
||||
|
||||
boto.config.set('Boto', 'num_retries', '0')
|
||||
conn = s3.S3Connection(aws_access_key_id='fake',
|
||||
aws_secret_access_key='fake',
|
||||
host=CONF.s3_host,
|
||||
port=self.server.port,
|
||||
is_secure=False,
|
||||
calling_format=s3.OrdinaryCallingFormat())
|
||||
self.conn = conn
|
||||
|
||||
def get_http_connection(*args):
|
||||
"""Get a new S3 connection, don't attempt to reuse connections."""
|
||||
return self.conn.new_http_connection(*args)
|
||||
|
||||
self.conn.get_http_connection = get_http_connection
|
||||
|
||||
def _ensure_no_buckets(self, buckets):
|
||||
self.assertEqual(len(buckets), 0, "Bucket list was not empty")
|
||||
return True
|
||||
|
||||
def _ensure_one_bucket(self, buckets, name):
|
||||
self.assertEqual(len(buckets), 1,
|
||||
"Bucket list didn't have exactly one element in it")
|
||||
self.assertEqual(buckets[0].name, name, "Wrong name")
|
||||
return True
|
||||
|
||||
def test_list_buckets(self):
|
||||
# Make sure we are starting with no buckets.
|
||||
self._ensure_no_buckets(self.conn.get_all_buckets())
|
||||
|
||||
def test_create_and_delete_bucket(self):
|
||||
# Test bucket creation and deletion.
|
||||
bucket_name = 'testbucket'
|
||||
|
||||
self.conn.create_bucket(bucket_name)
|
||||
self._ensure_one_bucket(self.conn.get_all_buckets(), bucket_name)
|
||||
self.conn.delete_bucket(bucket_name)
|
||||
self._ensure_no_buckets(self.conn.get_all_buckets())
|
||||
|
||||
def test_create_bucket_and_key_and_delete_key_again(self):
|
||||
# Test key operations on buckets.
|
||||
bucket_name = 'testbucket'
|
||||
key_name = 'somekey'
|
||||
key_contents = 'somekey'
|
||||
|
||||
b = self.conn.create_bucket(bucket_name)
|
||||
k = b.new_key(key_name)
|
||||
k.set_contents_from_string(key_contents)
|
||||
|
||||
bucket = self.conn.get_bucket(bucket_name)
|
||||
|
||||
# make sure the contents are correct
|
||||
key = bucket.get_key(key_name)
|
||||
self.assertEqual(key.get_contents_as_string(), key_contents,
|
||||
"Bad contents")
|
||||
|
||||
# delete the key
|
||||
key.delete()
|
||||
|
||||
self._ensure_no_buckets(bucket.get_all_keys())
|
||||
|
||||
def test_unknown_bucket(self):
|
||||
# NOTE(unicell): Since Boto v2.25.0, the underlying implementation
|
||||
# of get_bucket method changed from GET to HEAD.
|
||||
#
|
||||
# Prior to v2.25.0, default validate=True fetched a list of keys in the
|
||||
# bucket and raises S3ResponseError. As a side effect of switching to
|
||||
# HEAD request, get_bucket call now generates less error message.
|
||||
#
|
||||
# To keep original semantics, additional get_all_keys call is
|
||||
# suggestted per Boto document. This case tests both validate=False and
|
||||
# validate=True case for completeness.
|
||||
#
|
||||
# http://docs.pythonboto.org/en/latest/releasenotes/v2.25.0.html
|
||||
# http://docs.pythonboto.org/en/latest/s3_tut.html#accessing-a-bucket
|
||||
bucket_name = 'falalala'
|
||||
self.assertRaises(boto_exception.S3ResponseError,
|
||||
self.conn.get_bucket,
|
||||
bucket_name)
|
||||
bucket = self.conn.get_bucket(bucket_name, validate=False)
|
||||
self.assertRaises(boto_exception.S3ResponseError,
|
||||
bucket.get_all_keys,
|
||||
maxkeys=0)
|
||||
|
||||
def tearDown(self):
|
||||
"""Tear down test server."""
|
||||
self.server.stop()
|
||||
super(S3APITestCase, self).tearDown()
|
@ -67,7 +67,6 @@ monkey_patch_opts = [
|
||||
help='Whether to apply monkey patching'),
|
||||
cfg.ListOpt('monkey_patch_modules',
|
||||
default=[
|
||||
'nova.api.ec2.cloud:%s' % (notify_decorator),
|
||||
'nova.compute.api:%s' % (notify_decorator)
|
||||
],
|
||||
help='List of modules/decorators to monkey patch'),
|
||||
|
@ -0,0 +1,7 @@
|
||||
---
|
||||
upgrade:
|
||||
- All code and tests for Nova's EC2 and ObjectStore API support which
|
||||
was deprecated in Kilo
|
||||
(https://wiki.openstack.org/wiki/ReleaseNotes/Kilo#Upgrade_Notes_2) has
|
||||
been completely removed in Mitaka. This has been replaced by the new
|
||||
ec2-api project (http://git.openstack.org/cgit/openstack/ec2-api/).
|
@ -58,7 +58,6 @@ console_scripts =
|
||||
nova-manage = nova.cmd.manage:main
|
||||
nova-network = nova.cmd.network:main
|
||||
nova-novncproxy = nova.cmd.novncproxy:main
|
||||
nova-objectstore = nova.cmd.objectstore:main
|
||||
nova-rootwrap = oslo_rootwrap.cmd:main
|
||||
nova-rootwrap-daemon = oslo_rootwrap.cmd:daemon
|
||||
nova-scheduler = nova.cmd.scheduler:main
|
||||
|
@ -1,12 +1,3 @@
|
||||
nova.tests.unit.api.ec2.test_api.ApiEc2TestCase
|
||||
nova.tests.unit.api.ec2.test_apirequest.APIRequestTestCase
|
||||
nova.tests.unit.api.ec2.test_cinder_cloud.CinderCloudTestCase
|
||||
nova.tests.unit.api.ec2.test_cloud.CloudTestCase
|
||||
nova.tests.unit.api.ec2.test_cloud.CloudTestCaseNeutronProxy
|
||||
nova.tests.unit.api.ec2.test_ec2_validate.EC2ValidateTestCase
|
||||
nova.tests.unit.api.ec2.test_error_response.Ec2ErrorResponseTestCase
|
||||
nova.tests.unit.api.ec2.test_middleware.ExecutorTestCase
|
||||
nova.tests.unit.api.ec2.test_middleware.KeystoneAuthTestCase
|
||||
nova.tests.unit.api.openstack.compute.legacy_v2.test_extensions.ActionExtensionTest
|
||||
nova.tests.unit.api.openstack.compute.legacy_v2.test_extensions.ControllerExtensionTest
|
||||
nova.tests.unit.api.openstack.compute.legacy_v2.test_extensions.ExtensionControllerIdFormatTest
|
||||
@ -97,7 +88,6 @@ nova.tests.unit.db.test_migrations.TestNovaMigrationsMySQL
|
||||
nova.tests.unit.db.test_migrations.TestNovaMigrationsPostgreSQL
|
||||
nova.tests.unit.db.test_migrations.TestNovaMigrationsSQLite
|
||||
nova.tests.unit.image.test_fake.FakeImageServiceTestCase
|
||||
nova.tests.unit.image.test_s3.TestS3ImageService
|
||||
nova.tests.unit.keymgr.test_barbican.BarbicanKeyManagerTestCase
|
||||
nova.tests.unit.keymgr.test_conf_key_mgr.ConfKeyManagerTestCase
|
||||
nova.tests.unit.keymgr.test_key.SymmetricKeyTestCase
|
||||
@ -126,7 +116,6 @@ nova.tests.unit.test_metadata.MetadataPasswordTestCase
|
||||
nova.tests.unit.test_metadata.MetadataTestCase
|
||||
nova.tests.unit.test_metadata.OpenStackMetadataTestCase
|
||||
nova.tests.unit.test_nova_manage.CellCommandsTestCase
|
||||
nova.tests.unit.test_objectstore.S3APITestCase
|
||||
nova.tests.unit.test_pipelib.PipelibTest
|
||||
nova.tests.unit.test_policy.AdminRolePolicyTestCase
|
||||
nova.tests.unit.test_quota.QuotaIntegrationTestCase
|
||||
|
Loading…
Reference in New Issue
Block a user