Remove contrib/rackspace

These resources are unmaintained and (due to bug 1747775) untested.

Change-Id: I932368902abc9a32a4964c064abb49f16aadc1f3
This commit is contained in:
Zane Bitter 2018-02-06 17:21:51 -05:00
parent 93ada4560c
commit 13a5b43d65
24 changed files with 0 additions and 8728 deletions

View File

@ -1,58 +0,0 @@
# Heat resources for working with the Rackspace Cloud
The resources and configuration in this module are for using Heat with the Rackspace Cloud. These resources either
allow using Rackspace services that don't have equivalent services in OpenStack or account for differences between
a generic OpenStack deployment and Rackspace Cloud.
This package also includes a Keystone V2 compatible client plugin, that can be used in place of the default client
for clouds running older versions of Keystone.
## Installation
### 1. Install the Rackspace plugins in Heat
NOTE: These instructions assume the value of heat.conf plugin_dirs includes the
default directory /usr/lib/heat.
- To install the plugin, from this directory run:
sudo python ./setup.py install
- (Optional) If you want to enable the Keystone V2 client plugin, set the `keystone_backend` option to
`heat.engine.plugins.heat_keystoneclient_v2.client.KeystoneClientV2`
### 2. Restart heat
Only the process "heat-engine" needs to be restarted to load the newly installed
plugin.
## Resources
The following resources are provided for compatibility:
* `Rackspace::Cloud::Server`:
>Provide compatibility with `OS::Nova::Server` and allow for working `user_data` and `Metadata`. This is deprecated and should be replaced with `OS::Nova::Server` once service compatibility is implemented by Rackspace.
* `Rackspace::Cloud::LoadBalancer`:
>Use the Rackspace Cloud Loadbalancer service; not compatible with `OS::Neutron::LoadBalancer`.
### Usage
#### Templates
#### Configuration
## Heat Keystone V2
Note that some forward compatibility decisions had to be made for the Keystone V2 client plugin:
* Stack domain users are created as users on the stack owner's tenant
rather than the stack's domain
* Trusts are not supported
### How it works
By setting the `keystone_backend` option, the KeystoneBackend class in
`heat/engine/clients/os/keystone/heat_keystoneclient.py` will instantiate the plugin
KeystoneClientV2 class and use that instead of the default client in
`heat/engine/clients/os/keystone/heat_keystoneclient.py`.

View File

@ -1,255 +0,0 @@
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Client Library for Keystone Resources."""
import weakref
from keystoneclient.v2_0 import client as kc
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import importutils
from heat.common import exception
LOG = logging.getLogger('heat.common.keystoneclient')
LOG.info("Keystone V2 loaded")
class KeystoneClientV2(object):
"""Wrap keystone client so we can encapsulate logic used in resources.
Note: This is intended to be initialized from a resource on a per-session
basis, so the session context is passed in on initialization
Also note that a copy of this is created every resource as self.keystone()
via the code in engine/client.py, so there should not be any need to
directly instantiate instances of this class inside resources themselves.
"""
def __init__(self, context):
# If a trust_id is specified in the context, we immediately
# authenticate so we can populate the context with a trust token
# otherwise, we delay client authentication until needed to avoid
# unnecessary calls to keystone.
#
# Note that when you obtain a token using a trust, it cannot be
# used to reauthenticate and get another token, so we have to
# get a new trust-token even if context.auth_token is set.
#
# - context.auth_url is expected to contain the v2.0 keystone endpoint
self._context = weakref.ref(context)
self._client = None
if self.context.trust_id:
# Create a connection to the v2 API, with the trust_id, this
# populates self.context.auth_token with a trust-scoped token
self._client = self._v2_client_init()
@property
def context(self):
ctxt = self._context()
assert ctxt is not None, "Need a reference to the context"
return ctxt
@property
def client(self):
if not self._client:
self._client = self._v2_client_init()
return self._client
def _v2_client_init(self):
kwargs = {
'auth_url': self.context.auth_url,
'endpoint': self.context.auth_url,
'region_name': cfg.CONF.region_name_for_services
}
if self.context.region_name is not None:
kwargs['region_name'] = self.context.region_name
auth_kwargs = {}
# Note try trust_id first, as we can't reuse auth_token in that case
if self.context.trust_id is not None:
# We got a trust_id, so we use the admin credentials
# to authenticate, then re-scope the token to the
# trust impersonating the trustor user.
# Note that this currently requires the trustor tenant_id
# to be passed to the authenticate(), unlike the v3 call
kwargs.update(self._service_admin_creds())
auth_kwargs['trust_id'] = self.context.trust_id
auth_kwargs['tenant_id'] = self.context.tenant_id
elif self.context.auth_token is not None:
kwargs['tenant_name'] = self.context.project_name
kwargs['token'] = self.context.auth_token
elif self.context.password is not None:
kwargs['username'] = self.context.username
kwargs['password'] = self.context.password
kwargs['tenant_name'] = self.context.project_name
kwargs['tenant_id'] = self.context.tenant_id
else:
LOG.error("Keystone v2 API connection failed, no password "
"or auth_token!")
raise exception.AuthorizationFailure()
kwargs['cacert'] = self._get_client_option('ca_file')
kwargs['insecure'] = self._get_client_option('insecure')
kwargs['cert'] = self._get_client_option('cert_file')
kwargs['key'] = self._get_client_option('key_file')
client = kc.Client(**kwargs)
client.authenticate(**auth_kwargs)
# If we are authenticating with a trust auth_kwargs are set, so set
# the context auth_token with the re-scoped trust token
if auth_kwargs:
# Sanity check
if not client.auth_ref.trust_scoped:
LOG.error("v2 trust token re-scoping failed!")
raise exception.AuthorizationFailure()
# All OK so update the context with the token
self.context.auth_token = client.auth_ref.auth_token
self.context.auth_url = kwargs.get('auth_url')
# Ensure the v2 API we're using is not impacted by keystone
# bug #1239303, otherwise we can't trust the user_id
if self.context.trustor_user_id != client.auth_ref.user_id:
LOG.error("Trust impersonation failed, bug #1239303 "
"suspected, you may need a newer keystone")
raise exception.AuthorizationFailure()
return client
@staticmethod
def _service_admin_creds():
# Import auth_token to have keystone_authtoken settings setup.
importutils.import_module('keystonemiddleware.auth_token')
creds = {
'username': cfg.CONF.keystone_authtoken.admin_user,
'password': cfg.CONF.keystone_authtoken.admin_password,
'auth_url': cfg.CONF.keystone_authtoken.auth_uri,
'tenant_name': cfg.CONF.keystone_authtoken.admin_tenant_name,
}
return creds
def _get_client_option(self, option):
# look for the option in the [clients_keystone] section
# unknown options raise cfg.NoSuchOptError
cfg.CONF.import_opt(option, 'heat.common.config',
group='clients_keystone')
v = getattr(cfg.CONF.clients_keystone, option)
if v is not None:
return v
# look for the option in the generic [clients] section
cfg.CONF.import_opt(option, 'heat.common.config', group='clients')
return getattr(cfg.CONF.clients, option)
def create_stack_user(self, username, password=''):
"""Create a user.
User can be defined as part of a stack, either via template
or created internally by a resource. This user will be added to
the heat_stack_user_role as defined in the config
Returns the keystone ID of the resulting user
"""
if len(username) > 64:
LOG.warning("Truncating the username %s to the last 64 "
"characters.", username)
# get the last 64 characters of the username
username = username[-64:]
user = self.client.users.create(username,
password,
'%s@openstack.org' % username,
tenant_id=self.context.tenant_id,
enabled=True)
# We add the new user to a special keystone role
# This role is designed to allow easier differentiation of the
# heat-generated "stack users" which will generally have credentials
# deployed on an instance (hence are implicitly untrusted)
roles = self.client.roles.list()
stack_user_role = [r.id for r in roles
if r.name == cfg.CONF.heat_stack_user_role]
if len(stack_user_role) == 1:
role_id = stack_user_role[0]
LOG.debug("Adding user %(user)s to role %(role)s"
% {'user': user.id, 'role': role_id})
self.client.roles.add_user_role(user.id, role_id,
self.context.tenant_id)
else:
LOG.error("Failed to add user %(user)s to role %(role)s, "
"check role exists!",
{'user': username,
'role': cfg.CONF.heat_stack_user_role})
return user.id
def delete_stack_user(self, user_id):
self.client.users.delete(user_id)
def delete_ec2_keypair(self, user_id, accesskey):
self.client.ec2.delete(user_id, accesskey)
def get_ec2_keypair(self, access, user_id=None):
uid = user_id or self.client.auth_ref.user_id
return self.client.ec2.get(uid, access)
def create_ec2_keypair(self, user_id=None):
uid = user_id or self.client.auth_ref.user_id
return self.client.ec2.create(uid, self.context.tenant_id)
def disable_stack_user(self, user_id):
self.client.users.update_enabled(user_id, False)
def enable_stack_user(self, user_id):
self.client.users.update_enabled(user_id, True)
def url_for(self, **kwargs):
return self.client.service_catalog.url_for(**kwargs)
@property
def auth_token(self):
return self.client.auth_token
# ##################### #
# V3 Compatible Methods #
# ##################### #
def create_stack_domain_user(self, username, project_id, password=None):
return self.create_stack_user(username, password)
def delete_stack_domain_user(self, user_id, project_id):
return self.delete_stack_user(user_id)
def create_stack_domain_project(self, project_id):
"""Use the tenant ID as domain project."""
return self.context.tenant_id
def delete_stack_domain_project(self, project_id):
"""Pass through method since no project was created."""
pass
def create_stack_domain_user_keypair(self, user_id, project_id):
return self.create_ec2_keypair(user_id)
def delete_stack_domain_user_keypair(self, user_id, project_id,
credential_id):
return self.delete_ec2_keypair(user_id, credential_id)
# ###################### #
# V3 Unsupported Methods #
# ###################### #
def create_trust_context(self):
raise exception.NotSupported(feature='Keystone Trusts')
def delete_trust(self, trust_id):
raise exception.NotSupported(feature='Keystone Trusts')

View File

@ -1,5 +0,0 @@
import sys
from mox3 import mox
sys.modules['mox'] = mox

View File

@ -1,274 +0,0 @@
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import mox
from oslo_config import cfg
from oslo_utils import importutils
from heat.common import exception
from heat.tests import common
from heat.tests import utils
from .. import client as heat_keystoneclient # noqa
class KeystoneClientTest(common.HeatTestCase):
"""Test cases for heat.common.heat_keystoneclient."""
def setUp(self):
super(KeystoneClientTest, self).setUp()
self.ctx = utils.dummy_context()
# Import auth_token to have keystone_authtoken settings setup.
importutils.import_module('keystonemiddleware.auth_token')
dummy_url = 'http://server.test:5000/v2.0'
cfg.CONF.set_override('auth_uri', dummy_url,
group='keystone_authtoken')
cfg.CONF.set_override('admin_user', 'heat',
group='keystone_authtoken')
cfg.CONF.set_override('admin_password', 'verybadpass',
group='keystone_authtoken')
cfg.CONF.set_override('admin_tenant_name', 'service',
group='keystone_authtoken')
self.addCleanup(self.m.VerifyAll)
def _stubs_v2(self, method='token', auth_ok=True, trust_scoped=True,
user_id='trustor_user_id', region=None):
self.mock_ks_client = self.m.CreateMock(heat_keystoneclient.kc.Client)
self.m.StubOutWithMock(heat_keystoneclient.kc, "Client")
if method == 'token':
heat_keystoneclient.kc.Client(
auth_url=mox.IgnoreArg(),
endpoint=mox.IgnoreArg(),
tenant_name='test_tenant',
token='abcd1234',
cacert=None,
cert=None,
insecure=False,
region_name=region,
key=None).AndReturn(self.mock_ks_client)
self.mock_ks_client.authenticate().AndReturn(auth_ok)
elif method == 'password':
heat_keystoneclient.kc.Client(
auth_url=mox.IgnoreArg(),
endpoint=mox.IgnoreArg(),
tenant_name='test_tenant',
tenant_id='test_tenant_id',
username='test_username',
password='password',
cacert=None,
cert=None,
insecure=False,
region_name=region,
key=None).AndReturn(self.mock_ks_client)
self.mock_ks_client.authenticate().AndReturn(auth_ok)
if method == 'trust':
heat_keystoneclient.kc.Client(
auth_url='http://server.test:5000/v2.0',
endpoint='http://server.test:5000/v2.0',
password='verybadpass',
tenant_name='service',
username='heat',
cacert=None,
cert=None,
insecure=False,
region_name=region,
key=None).AndReturn(self.mock_ks_client)
self.mock_ks_client.authenticate(trust_id='atrust123',
tenant_id='test_tenant_id'
).AndReturn(auth_ok)
self.mock_ks_client.auth_ref = self.m.CreateMockAnything()
self.mock_ks_client.auth_ref.trust_scoped = trust_scoped
self.mock_ks_client.auth_ref.auth_token = 'atrusttoken'
self.mock_ks_client.auth_ref.user_id = user_id
def test_username_length(self):
"""Test that user names >64 characters are properly truncated."""
self._stubs_v2()
# a >64 character user name and the expected version
long_user_name = 'U' * 64 + 'S'
good_user_name = long_user_name[-64:]
# mock keystone client user functions
self.mock_ks_client.users = self.m.CreateMockAnything()
mock_user = self.m.CreateMockAnything()
# when keystone is called, the name should have been truncated
# to the last 64 characters of the long name
(self.mock_ks_client.users.create(good_user_name, 'password',
mox.IgnoreArg(), enabled=True,
tenant_id=mox.IgnoreArg())
.AndReturn(mock_user))
# mock out the call to roles; will send an error log message but does
# not raise an exception
self.mock_ks_client.roles = self.m.CreateMockAnything()
self.mock_ks_client.roles.list().AndReturn([])
self.m.ReplayAll()
# call create_stack_user with a long user name.
# the cleanup VerifyAll should verify that though we passed
# long_user_name, keystone was actually called with a truncated
# user name
self.ctx.trust_id = None
heat_ks_client = heat_keystoneclient.KeystoneClientV2(self.ctx)
heat_ks_client.create_stack_user(long_user_name, password='password')
def test_init_v2_password(self):
"""Test creating the client, user/password context."""
self._stubs_v2(method='password')
self.m.ReplayAll()
self.ctx.auth_token = None
self.ctx.trust_id = None
heat_ks_client = heat_keystoneclient.KeystoneClientV2(self.ctx)
self.assertIsNotNone(heat_ks_client.client)
def test_init_v2_bad_nocreds(self):
"""Test creating the client without trusts, no credentials."""
self.ctx.auth_token = None
self.ctx.username = None
self.ctx.password = None
self.ctx.trust_id = None
heat_ks_client = heat_keystoneclient.KeystoneClientV2(self.ctx)
self.assertRaises(exception.AuthorizationFailure,
heat_ks_client._v2_client_init)
def test_trust_init(self):
"""Test consuming a trust when initializing."""
self._stubs_v2(method='trust')
self.m.ReplayAll()
self.ctx.username = None
self.ctx.password = None
self.ctx.auth_token = None
self.ctx.trust_id = 'atrust123'
self.ctx.trustor_user_id = 'trustor_user_id'
heat_ks_client = heat_keystoneclient.KeystoneClientV2(self.ctx)
client = heat_ks_client.client
self.assertIsNotNone(client)
def test_trust_init_fail(self):
"""Test consuming a trust when initializing, error scoping."""
self._stubs_v2(method='trust', trust_scoped=False)
self.m.ReplayAll()
self.ctx.username = None
self.ctx.password = None
self.ctx.auth_token = None
self.ctx.trust_id = 'atrust123'
self.ctx.trustor_user_id = 'trustor_user_id'
self.assertRaises(exception.AuthorizationFailure,
heat_keystoneclient.KeystoneClientV2, self.ctx)
def test_trust_init_fail_impersonation(self):
"""Test consuming a trust when initializing, impersonation error."""
self._stubs_v2(method='trust', user_id='wrong_user_id')
self.m.ReplayAll()
self.ctx.username = 'heat'
self.ctx.password = None
self.ctx.auth_token = None
self.ctx.trust_id = 'atrust123'
self.ctx.trustor_user_id = 'trustor_user_id'
self.assertRaises(exception.AuthorizationFailure,
heat_keystoneclient.KeystoneClientV2, self.ctx)
def test_trust_init_pw(self):
"""Test trust_id is takes precedence username/password specified."""
self._stubs_v2(method='trust')
self.m.ReplayAll()
self.ctx.auth_token = None
self.ctx.trust_id = 'atrust123'
self.ctx.trustor_user_id = 'trustor_user_id'
heat_ks_client = heat_keystoneclient.KeystoneClientV2(self.ctx)
self.assertIsNotNone(heat_ks_client._client)
def test_trust_init_token(self):
"""Test trust_id takes precedence when token specified."""
self._stubs_v2(method='trust')
self.m.ReplayAll()
self.ctx.username = None
self.ctx.password = None
self.ctx.trust_id = 'atrust123'
self.ctx.trustor_user_id = 'trustor_user_id'
heat_ks_client = heat_keystoneclient.KeystoneClientV2(self.ctx)
self.assertIsNotNone(heat_ks_client._client)
def test_region_name(self):
"""Test region_name is used when specified."""
self._stubs_v2(method='trust', region='region123')
self.m.ReplayAll()
self.ctx.username = None
self.ctx.password = None
self.ctx.auth_token = None
self.ctx.trust_id = 'atrust123'
self.ctx.trustor_user_id = 'trustor_user_id'
self.ctx.region_name = 'region123'
heat_keystoneclient.KeystoneClientV2(self.ctx)
self.m.VerifyAll()
# ##################### #
# V3 Compatible Methods #
# ##################### #
def test_create_stack_domain_user_pass_through_to_create_stack_user(self):
heat_ks_client = heat_keystoneclient.KeystoneClientV2(self.ctx)
mock_create_stack_user = mock.Mock()
heat_ks_client.create_stack_user = mock_create_stack_user
heat_ks_client.create_stack_domain_user('username', 'project_id',
'password')
mock_create_stack_user.assert_called_once_with('username', 'password')
def test_delete_stack_domain_user_pass_through_to_delete_stack_user(self):
heat_ks_client = heat_keystoneclient.KeystoneClientV2(self.ctx)
mock_delete_stack_user = mock.Mock()
heat_ks_client.delete_stack_user = mock_delete_stack_user
heat_ks_client.delete_stack_domain_user('user_id', 'project_id')
mock_delete_stack_user.assert_called_once_with('user_id')
def test_create_stack_domain_project(self):
tenant_id = self.ctx.tenant_id
ks = heat_keystoneclient.KeystoneClientV2(self.ctx)
self.assertEqual(tenant_id, ks.create_stack_domain_project('fakeid'))
def test_delete_stack_domain_project(self):
heat_ks_client = heat_keystoneclient.KeystoneClientV2(self.ctx)
self.assertIsNone(heat_ks_client.delete_stack_domain_project('fakeid'))
# ###################### #
# V3 Unsupported Methods #
# ###################### #
def test_create_trust_context(self):
heat_ks_client = heat_keystoneclient.KeystoneClientV2(self.ctx)
self.assertRaises(exception.NotSupported,
heat_ks_client.create_trust_context)
def test_delete_trust(self):
heat_ks_client = heat_keystoneclient.KeystoneClientV2(self.ctx)
self.assertRaises(exception.NotSupported,
heat_ks_client.delete_trust,
'fake_trust_id')

View File

@ -1 +0,0 @@
"""Contributed Rackspace-specific resources."""

View File

@ -1,246 +0,0 @@
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Client Libraries for Rackspace Resources."""
import hashlib
import random
import time
from glanceclient import client as gc
from oslo_config import cfg
from oslo_log import log as logging
from six.moves.urllib import parse
from swiftclient import utils as swiftclient_utils
from troveclient import client as tc
from heat.common import exception
from heat.engine.clients import client_plugin
from heat.engine.clients.os import cinder
from heat.engine.clients.os import glance
from heat.engine.clients.os import nova
from heat.engine.clients.os import swift
from heat.engine.clients.os import trove
LOG = logging.getLogger(__name__)
try:
import pyrax
except ImportError:
pyrax = None
class RackspaceClientPlugin(client_plugin.ClientPlugin):
pyrax = None
def _get_client(self, name):
if self.pyrax is None:
self._authenticate()
return self.pyrax.get_client(
name, cfg.CONF.region_name_for_services)
def _authenticate(self):
"""Create an authenticated client context."""
self.pyrax = pyrax.create_context("rackspace")
self.pyrax.auth_endpoint = self.context.auth_url
LOG.info("Authenticating username: %s",
self.context.username)
tenant = self.context.tenant_id
tenant_name = self.context.tenant
self.pyrax.auth_with_token(self.context.auth_token,
tenant_id=tenant,
tenant_name=tenant_name)
if not self.pyrax.authenticated:
LOG.warning("Pyrax Authentication Failed.")
raise exception.AuthorizationFailure()
LOG.info("User %s authenticated successfully.",
self.context.username)
class RackspaceAutoScaleClient(RackspaceClientPlugin):
def _create(self):
"""Rackspace Auto Scale client."""
return self._get_client("autoscale")
class RackspaceCloudLBClient(RackspaceClientPlugin):
def _create(self):
"""Rackspace cloud loadbalancer client."""
return self._get_client("load_balancer")
class RackspaceCloudDNSClient(RackspaceClientPlugin):
def _create(self):
"""Rackspace cloud dns client."""
return self._get_client("dns")
class RackspaceNovaClient(nova.NovaClientPlugin,
RackspaceClientPlugin):
def _create(self):
"""Rackspace cloudservers client."""
client = self._get_client("compute")
if not client:
client = super(RackspaceNovaClient, self)._create()
return client
class RackspaceCloudNetworksClient(RackspaceClientPlugin):
def _create(self):
"""Rackspace cloud networks client.
Though pyrax "fixed" the network client bugs that were introduced
in 1.8, it still doesn't work for contexts because of caching of the
nova client.
"""
if not self.pyrax:
self._authenticate()
# need special handling now since the contextual
# pyrax doesn't handle "networks" not being in
# the catalog
ep = pyrax._get_service_endpoint(
self.pyrax, "compute", region=cfg.CONF.region_name_for_services)
cls = pyrax._client_classes['compute:network']
client = cls(self.pyrax,
region_name=cfg.CONF.region_name_for_services,
management_url=ep)
return client
class RackspaceTroveClient(trove.TroveClientPlugin):
"""Rackspace trove client.
Since the pyrax module uses its own client implementation for Cloud
Databases, we have to skip pyrax on this one and override the super
implementation to account for custom service type and regionalized
management url.
"""
def _create(self):
service_type = "rax:database"
con = self.context
endpoint_type = self._get_client_option('trove', 'endpoint_type')
args = {
'service_type': service_type,
'auth_url': con.auth_url,
'proxy_token': con.auth_token,
'username': None,
'password': None,
'cacert': self._get_client_option('trove', 'ca_file'),
'insecure': self._get_client_option('trove', 'insecure'),
'endpoint_type': endpoint_type
}
client = tc.Client('1.0', **args)
region = cfg.CONF.region_name_for_services
management_url = self.url_for(service_type=service_type,
endpoint_type=endpoint_type,
region_name=region)
client.client.auth_token = con.auth_token
client.client.management_url = management_url
return client
class RackspaceCinderClient(cinder.CinderClientPlugin):
def _create(self):
"""Override the region for the cinder client."""
client = super(RackspaceCinderClient, self)._create()
management_url = self.url_for(
service_type='volume',
region_name=cfg.CONF.region_name_for_services)
client.client.management_url = management_url
return client
class RackspaceSwiftClient(swift.SwiftClientPlugin):
def is_valid_temp_url_path(self, path):
"""Return True if path is a valid Swift TempURL path, False otherwise.
A Swift TempURL path must:
- Be five parts, ['', 'v1', 'account', 'container', 'object']
- Be a v1 request
- Have account, container, and object values
- Have an object value with more than just '/'s
:param path: The TempURL path
:type path: string
"""
parts = path.split('/', 4)
return bool(len(parts) == 5 and
not parts[0] and
parts[1] == 'v1' and
parts[2] and
parts[3] and
parts[4].strip('/'))
def get_temp_url(self, container_name, obj_name, timeout=None,
method='PUT'):
"""Return a Swift TempURL."""
def tenant_uuid():
access = self.context.auth_token_info['access']
for role in access['user']['roles']:
if role['name'] == 'object-store:default':
return role['tenantId']
key_header = 'x-account-meta-temp-url-key'
if key_header in self.client().head_account():
key = self.client().head_account()[key_header]
else:
key = hashlib.sha224(str(random.getrandbits(256))).hexdigest()[:32]
self.client().post_account({key_header: key})
path = '/v1/%s/%s/%s' % (tenant_uuid(), container_name, obj_name)
if timeout is None:
timeout = swift.MAX_EPOCH - 60 - time.time()
tempurl = swiftclient_utils.generate_temp_url(path, timeout, key,
method)
sw_url = parse.urlparse(self.client().url)
return '%s://%s%s' % (sw_url.scheme, sw_url.netloc, tempurl)
class RackspaceGlanceClient(glance.GlanceClientPlugin):
def _create(self, version=None):
con = self.context
endpoint_type = self._get_client_option('glance', 'endpoint_type')
endpoint = self.url_for(
service_type='image',
endpoint_type=endpoint_type,
region_name=cfg.CONF.region_name_for_services)
# Rackspace service catalog includes a tenant scoped glance
# endpoint so we have to munge the url a bit
glance_url = parse.urlparse(endpoint)
# remove the tenant and following from the url
endpoint = "%s://%s" % (glance_url.scheme, glance_url.hostname)
args = {
'auth_url': con.auth_url,
'service_type': 'image',
'project_id': con.tenant,
'token': self.auth_token,
'endpoint_type': endpoint_type,
'ca_file': self._get_client_option('glance', 'ca_file'),
'cert_file': self._get_client_option('glance', 'cert_file'),
'key_file': self._get_client_option('glance', 'key_file'),
'insecure': self._get_client_option('glance', 'insecure')
}
return gc.Client('2', endpoint, **args)

View File

@ -1,789 +0,0 @@
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Resources for Rackspace Auto Scale."""
import copy
import six
from heat.common import exception
from heat.common.i18n import _
from heat.common import template_format
from heat.engine import attributes
from heat.engine import constraints
from heat.engine import properties
from heat.engine import resource
from heat.engine import support
from heat.engine import template as templatem
try:
from pyrax.exceptions import Forbidden
from pyrax.exceptions import NotFound
PYRAX_INSTALLED = True
except ImportError:
class Forbidden(Exception):
"""Dummy pyrax exception - only used for testing."""
class NotFound(Exception):
"""Dummy pyrax exception - only used for testing."""
PYRAX_INSTALLED = False
class Group(resource.Resource):
"""Represents a scaling group."""
# pyrax differs drastically from the actual Auto Scale API. We'll prefer
# the true API here, but since pyrax doesn't support the full flexibility
# of the API, we'll have to restrict what users can provide.
support_status = support.SupportStatus(
status=support.UNSUPPORTED,
message=_('This resource is not supported, use at your own risk.'))
# properties are identical to the API POST /groups.
PROPERTIES = (
GROUP_CONFIGURATION, LAUNCH_CONFIGURATION,
) = (
'groupConfiguration', 'launchConfiguration',
)
_GROUP_CONFIGURATION_KEYS = (
GROUP_CONFIGURATION_MAX_ENTITIES, GROUP_CONFIGURATION_COOLDOWN,
GROUP_CONFIGURATION_NAME, GROUP_CONFIGURATION_MIN_ENTITIES,
GROUP_CONFIGURATION_METADATA,
) = (
'maxEntities', 'cooldown',
'name', 'minEntities',
'metadata',
)
_LAUNCH_CONFIG_KEYS = (
LAUNCH_CONFIG_ARGS, LAUNCH_CONFIG_TYPE,
) = (
'args', 'type',
)
_LAUNCH_CONFIG_ARGS_KEYS = (
LAUNCH_CONFIG_ARGS_LOAD_BALANCERS,
LAUNCH_CONFIG_ARGS_SERVER,
LAUNCH_CONFIG_ARGS_STACK,
) = (
'loadBalancers',
'server',
'stack',
)
_LAUNCH_CONFIG_ARGS_LOAD_BALANCER_KEYS = (
LAUNCH_CONFIG_ARGS_LOAD_BALANCER_ID,
LAUNCH_CONFIG_ARGS_LOAD_BALANCER_PORT,
) = (
'loadBalancerId',
'port',
)
_LAUNCH_CONFIG_ARGS_SERVER_KEYS = (
LAUNCH_CONFIG_ARGS_SERVER_NAME, LAUNCH_CONFIG_ARGS_SERVER_FLAVOR_REF,
LAUNCH_CONFIG_ARGS_SERVER_IMAGE_REF,
LAUNCH_CONFIG_ARGS_SERVER_METADATA,
LAUNCH_CONFIG_ARGS_SERVER_PERSONALITY,
LAUNCH_CONFIG_ARGS_SERVER_NETWORKS,
LAUNCH_CONFIG_ARGS_SERVER_DISK_CONFIG,
LAUNCH_CONFIG_ARGS_SERVER_KEY_NAME,
LAUNCH_CONFIG_ARGS_SERVER_USER_DATA,
LAUNCH_CONFIG_ARGS_SERVER_CDRIVE
) = (
'name', 'flavorRef',
'imageRef',
'metadata',
'personality',
'networks',
'diskConfig', # technically maps to OS-DCF:diskConfig
'key_name',
'user_data',
'config_drive'
)
_LAUNCH_CONFIG_ARGS_SERVER_NETWORK_KEYS = (
LAUNCH_CONFIG_ARGS_SERVER_NETWORK_UUID,
) = (
'uuid',
)
_LAUNCH_CONFIG_ARGS_STACK_KEYS = (
LAUNCH_CONFIG_ARGS_STACK_TEMPLATE,
LAUNCH_CONFIG_ARGS_STACK_TEMPLATE_URL,
LAUNCH_CONFIG_ARGS_STACK_DISABLE_ROLLBACK,
LAUNCH_CONFIG_ARGS_STACK_ENVIRONMENT,
LAUNCH_CONFIG_ARGS_STACK_FILES,
LAUNCH_CONFIG_ARGS_STACK_PARAMETERS,
LAUNCH_CONFIG_ARGS_STACK_TIMEOUT_MINS
) = (
'template',
'template_url',
'disable_rollback',
'environment',
'files',
'parameters',
'timeout_mins'
)
_launch_configuration_args_schema = {
LAUNCH_CONFIG_ARGS_LOAD_BALANCERS: properties.Schema(
properties.Schema.LIST,
_('List of load balancers to hook the '
'server up to. If not specified, no '
'load balancing will be configured.'),
default=[],
schema=properties.Schema(
properties.Schema.MAP,
schema={
LAUNCH_CONFIG_ARGS_LOAD_BALANCER_ID: properties.Schema(
properties.Schema.STRING,
_('ID of the load balancer.'),
required=True
),
LAUNCH_CONFIG_ARGS_LOAD_BALANCER_PORT: properties.Schema(
properties.Schema.INTEGER,
_('Server port to connect the load balancer to.')
),
},
)
),
LAUNCH_CONFIG_ARGS_SERVER: properties.Schema(
properties.Schema.MAP,
_('Server creation arguments, as accepted by the Cloud Servers '
'server creation API.'),
required=False,
schema={
LAUNCH_CONFIG_ARGS_SERVER_NAME: properties.Schema(
properties.Schema.STRING,
_('Server name.'),
required=True
),
LAUNCH_CONFIG_ARGS_SERVER_FLAVOR_REF: properties.Schema(
properties.Schema.STRING,
_('The ID or name of the flavor to boot onto.'),
constraints=[
constraints.CustomConstraint('nova.flavor')
],
required=True
),
LAUNCH_CONFIG_ARGS_SERVER_IMAGE_REF: properties.Schema(
properties.Schema.STRING,
_('The ID or name of the image to boot with.'),
constraints=[
constraints.CustomConstraint('glance.image')
],
required=True
),
LAUNCH_CONFIG_ARGS_SERVER_METADATA: properties.Schema(
properties.Schema.MAP,
_('Metadata key and value pairs.')
),
LAUNCH_CONFIG_ARGS_SERVER_PERSONALITY: properties.Schema(
properties.Schema.MAP,
_('File path and contents.')
),
LAUNCH_CONFIG_ARGS_SERVER_CDRIVE: properties.Schema(
properties.Schema.BOOLEAN,
_('Enable config drive on the instance.')
),
LAUNCH_CONFIG_ARGS_SERVER_USER_DATA: properties.Schema(
properties.Schema.STRING,
_('User data for bootstrapping the instance.')
),
LAUNCH_CONFIG_ARGS_SERVER_NETWORKS: properties.Schema(
properties.Schema.LIST,
_('Networks to attach to. If unspecified, the instance '
'will be attached to the public Internet and private '
'ServiceNet networks.'),
schema=properties.Schema(
properties.Schema.MAP,
schema={
LAUNCH_CONFIG_ARGS_SERVER_NETWORK_UUID:
properties.Schema(
properties.Schema.STRING,
_('UUID of network to attach to.'),
required=True)
}
)
),
LAUNCH_CONFIG_ARGS_SERVER_DISK_CONFIG: properties.Schema(
properties.Schema.STRING,
_('Configuration specifying the partition layout. AUTO to '
'create a partition utilizing the entire disk, and '
'MANUAL to create a partition matching the source '
'image.'),
constraints=[
constraints.AllowedValues(['AUTO', 'MANUAL']),
]
),
LAUNCH_CONFIG_ARGS_SERVER_KEY_NAME: properties.Schema(
properties.Schema.STRING,
_('Name of a previously created SSH keypair to allow '
'key-based authentication to the server.')
),
},
),
LAUNCH_CONFIG_ARGS_STACK: properties.Schema(
properties.Schema.MAP,
_('The attributes that Auto Scale uses to create a new stack. The '
'attributes that you specify for the stack entity apply to all '
'new stacks in the scaling group. Note the stack arguments are '
'directly passed to Heat when creating a stack.'),
schema={
LAUNCH_CONFIG_ARGS_STACK_TEMPLATE: properties.Schema(
properties.Schema.STRING,
_('The template that describes the stack. Either the '
'template or template_url property must be specified.'),
),
LAUNCH_CONFIG_ARGS_STACK_TEMPLATE_URL: properties.Schema(
properties.Schema.STRING,
_('A URI to a template. Either the template or '
'template_url property must be specified.')
),
LAUNCH_CONFIG_ARGS_STACK_DISABLE_ROLLBACK: properties.Schema(
properties.Schema.BOOLEAN,
_('Keep the resources that have been created if the stack '
'fails to create. Defaults to True.'),
default=True
),
LAUNCH_CONFIG_ARGS_STACK_ENVIRONMENT: properties.Schema(
properties.Schema.MAP,
_('The environment for the stack.'),
),
LAUNCH_CONFIG_ARGS_STACK_FILES: properties.Schema(
properties.Schema.MAP,
_('The contents of files that the template references.')
),
LAUNCH_CONFIG_ARGS_STACK_PARAMETERS: properties.Schema(
properties.Schema.MAP,
_('Key/value pairs of the parameters and their values to '
'pass to the parameters in the template.')
),
LAUNCH_CONFIG_ARGS_STACK_TIMEOUT_MINS: properties.Schema(
properties.Schema.INTEGER,
_('The stack creation timeout in minutes.')
)
}
)
}
properties_schema = {
GROUP_CONFIGURATION: properties.Schema(
properties.Schema.MAP,
_('Group configuration.'),
schema={
GROUP_CONFIGURATION_MAX_ENTITIES: properties.Schema(
properties.Schema.INTEGER,
_('Maximum number of entities in this scaling group.'),
required=True
),
GROUP_CONFIGURATION_COOLDOWN: properties.Schema(
properties.Schema.NUMBER,
_('Number of seconds after capacity changes during '
'which further capacity changes are disabled.'),
required=True
),
GROUP_CONFIGURATION_NAME: properties.Schema(
properties.Schema.STRING,
_('Name of the scaling group.'),
required=True
),
GROUP_CONFIGURATION_MIN_ENTITIES: properties.Schema(
properties.Schema.INTEGER,
_('Minimum number of entities in this scaling group.'),
required=True
),
GROUP_CONFIGURATION_METADATA: properties.Schema(
properties.Schema.MAP,
_('Arbitrary key/value metadata to associate with '
'this group.')
),
},
required=True,
update_allowed=True
),
LAUNCH_CONFIGURATION: properties.Schema(
properties.Schema.MAP,
_('Launch configuration.'),
schema={
LAUNCH_CONFIG_ARGS: properties.Schema(
properties.Schema.MAP,
_('Type-specific launch arguments.'),
schema=_launch_configuration_args_schema,
required=True
),
LAUNCH_CONFIG_TYPE: properties.Schema(
properties.Schema.STRING,
_('Launch configuration method. Only launch_server and '
'launch_stack are currently supported.'),
required=True,
constraints=[
constraints.AllowedValues(['launch_server',
'launch_stack']),
]
),
},
required=True,
update_allowed=True
),
# We don't allow scaling policies to be specified here, despite the
# fact that the API supports it. Users should use the ScalingPolicy
# resource.
}
def _get_group_config_args(self, groupconf):
"""Get the groupConfiguration-related pyrax arguments."""
return dict(
name=groupconf[self.GROUP_CONFIGURATION_NAME],
cooldown=groupconf[self.GROUP_CONFIGURATION_COOLDOWN],
min_entities=groupconf[self.GROUP_CONFIGURATION_MIN_ENTITIES],
max_entities=groupconf[self.GROUP_CONFIGURATION_MAX_ENTITIES],
metadata=groupconf.get(self.GROUP_CONFIGURATION_METADATA, None))
def _get_launch_config_server_args(self, launchconf):
lcargs = launchconf[self.LAUNCH_CONFIG_ARGS]
server_args = lcargs[self.LAUNCH_CONFIG_ARGS_SERVER]
lb_args = lcargs.get(self.LAUNCH_CONFIG_ARGS_LOAD_BALANCERS)
lbs = copy.deepcopy(lb_args)
for lb in lbs:
# if the port is not specified, the lbid must be that of a
# RackConnectV3 lb pool.
if not lb[self.LAUNCH_CONFIG_ARGS_LOAD_BALANCER_PORT]:
del lb[self.LAUNCH_CONFIG_ARGS_LOAD_BALANCER_PORT]
continue
lbid = int(lb[self.LAUNCH_CONFIG_ARGS_LOAD_BALANCER_ID])
lb[self.LAUNCH_CONFIG_ARGS_LOAD_BALANCER_ID] = lbid
personality = server_args.get(
self.LAUNCH_CONFIG_ARGS_SERVER_PERSONALITY)
if personality:
personality = [{'path': k, 'contents': v} for k, v in
personality.items()]
user_data = server_args.get(self.LAUNCH_CONFIG_ARGS_SERVER_USER_DATA)
cdrive = (server_args.get(self.LAUNCH_CONFIG_ARGS_SERVER_CDRIVE) or
bool(user_data is not None and len(user_data.strip())))
image_id = self.client_plugin('glance').find_image_by_name_or_id(
server_args[self.LAUNCH_CONFIG_ARGS_SERVER_IMAGE_REF])
flavor_id = self.client_plugin('nova').find_flavor_by_name_or_id(
server_args[self.LAUNCH_CONFIG_ARGS_SERVER_FLAVOR_REF])
return dict(
launch_config_type=launchconf[self.LAUNCH_CONFIG_TYPE],
server_name=server_args[self.GROUP_CONFIGURATION_NAME],
image=image_id,
flavor=flavor_id,
disk_config=server_args.get(
self.LAUNCH_CONFIG_ARGS_SERVER_DISK_CONFIG),
metadata=server_args.get(self.GROUP_CONFIGURATION_METADATA),
config_drive=cdrive,
user_data=user_data,
personality=personality,
networks=server_args.get(self.LAUNCH_CONFIG_ARGS_SERVER_NETWORKS),
load_balancers=lbs,
key_name=server_args.get(self.LAUNCH_CONFIG_ARGS_SERVER_KEY_NAME),
)
def _get_launch_config_stack_args(self, launchconf):
lcargs = launchconf[self.LAUNCH_CONFIG_ARGS]
stack_args = lcargs[self.LAUNCH_CONFIG_ARGS_STACK]
return dict(
launch_config_type=launchconf[self.LAUNCH_CONFIG_TYPE],
template=stack_args[self.LAUNCH_CONFIG_ARGS_STACK_TEMPLATE],
template_url=stack_args[
self.LAUNCH_CONFIG_ARGS_STACK_TEMPLATE_URL],
disable_rollback=stack_args[
self.LAUNCH_CONFIG_ARGS_STACK_DISABLE_ROLLBACK],
environment=stack_args[self.LAUNCH_CONFIG_ARGS_STACK_ENVIRONMENT],
files=stack_args[self.LAUNCH_CONFIG_ARGS_STACK_FILES],
parameters=stack_args[self.LAUNCH_CONFIG_ARGS_STACK_PARAMETERS],
timeout_mins=stack_args[self.LAUNCH_CONFIG_ARGS_STACK_TIMEOUT_MINS]
)
def _get_launch_config_args(self, launchconf):
"""Get the launchConfiguration-related pyrax arguments."""
if launchconf[self.LAUNCH_CONFIG_ARGS].get(
self.LAUNCH_CONFIG_ARGS_SERVER):
return self._get_launch_config_server_args(launchconf)
else:
return self._get_launch_config_stack_args(launchconf)
def _get_create_args(self):
"""Get pyrax-style arguments for creating a scaling group."""
args = self._get_group_config_args(
self.properties[self.GROUP_CONFIGURATION])
args['group_metadata'] = args.pop('metadata')
args.update(self._get_launch_config_args(
self.properties[self.LAUNCH_CONFIGURATION]))
return args
def handle_create(self):
"""Create the autoscaling group and set resource_id.
The resource_id is set to the resulting group's ID.
"""
asclient = self.auto_scale()
group = asclient.create(**self._get_create_args())
self.resource_id_set(str(group.id))
def handle_check(self):
self.auto_scale().get(self.resource_id)
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
"""Update the group configuration and the launch configuration."""
asclient = self.auto_scale()
if self.GROUP_CONFIGURATION in prop_diff:
args = self._get_group_config_args(
prop_diff[self.GROUP_CONFIGURATION])
asclient.replace(self.resource_id, **args)
if self.LAUNCH_CONFIGURATION in prop_diff:
args = self._get_launch_config_args(
prop_diff[self.LAUNCH_CONFIGURATION])
asclient.replace_launch_config(self.resource_id, **args)
def handle_delete(self):
"""Delete the scaling group.
Since Auto Scale doesn't allow deleting a group until all its servers
are gone, we must set the minEntities and maxEntities of the group to 0
and then keep trying the delete until Auto Scale has deleted all the
servers and the delete will succeed.
"""
if self.resource_id is None:
return
asclient = self.auto_scale()
args = self._get_group_config_args(
self.properties[self.GROUP_CONFIGURATION])
args['min_entities'] = 0
args['max_entities'] = 0
try:
asclient.replace(self.resource_id, **args)
except NotFound:
pass
def check_delete_complete(self, result):
"""Try the delete operation until it succeeds."""
if self.resource_id is None:
return True
try:
self.auto_scale().delete(self.resource_id)
except Forbidden:
return False
except NotFound:
return True
else:
return True
def _check_rackconnect_v3_pool_exists(self, pool_id):
pools = self.client("rackconnect").list_load_balancer_pools()
if pool_id in (p.id for p in pools):
return True
return False
def validate(self):
super(Group, self).validate()
launchconf = self.properties[self.LAUNCH_CONFIGURATION]
lcargs = launchconf[self.LAUNCH_CONFIG_ARGS]
server_args = lcargs.get(self.LAUNCH_CONFIG_ARGS_SERVER)
st_args = lcargs.get(self.LAUNCH_CONFIG_ARGS_STACK)
# launch_server and launch_stack are required and mutually exclusive.
if ((not server_args and not st_args) or
(server_args and st_args)):
msg = (_('Must provide one of %(server)s or %(stack)s in %(conf)s')
% {'server': self.LAUNCH_CONFIG_ARGS_SERVER,
'stack': self.LAUNCH_CONFIG_ARGS_STACK,
'conf': self.LAUNCH_CONFIGURATION})
raise exception.StackValidationFailed(msg)
lb_args = lcargs.get(self.LAUNCH_CONFIG_ARGS_LOAD_BALANCERS)
lbs = copy.deepcopy(lb_args)
for lb in lbs:
lb_port = lb.get(self.LAUNCH_CONFIG_ARGS_LOAD_BALANCER_PORT)
lb_id = lb[self.LAUNCH_CONFIG_ARGS_LOAD_BALANCER_ID]
if not lb_port:
# check if lb id is a valid RCV3 pool id
if not self._check_rackconnect_v3_pool_exists(lb_id):
msg = _('Could not find RackConnectV3 pool '
'with id %s') % (lb_id)
raise exception.StackValidationFailed(msg)
if st_args:
st_tmpl = st_args.get(self.LAUNCH_CONFIG_ARGS_STACK_TEMPLATE)
st_tmpl_url = st_args.get(
self.LAUNCH_CONFIG_ARGS_STACK_TEMPLATE_URL)
st_env = st_args.get(self.LAUNCH_CONFIG_ARGS_STACK_ENVIRONMENT)
# template and template_url are required and mutually exclusive.
if ((not st_tmpl and not st_tmpl_url) or
(st_tmpl and st_tmpl_url)):
msg = _('Must provide one of template or template_url.')
raise exception.StackValidationFailed(msg)
if st_tmpl:
st_files = st_args.get(self.LAUNCH_CONFIG_ARGS_STACK_FILES)
try:
tmpl = template_format.simple_parse(st_tmpl)
templatem.Template(tmpl, files=st_files, env=st_env)
except Exception as exc:
msg = (_('Encountered error while loading template: %s') %
six.text_type(exc))
raise exception.StackValidationFailed(msg)
def auto_scale(self):
return self.client('auto_scale')
class ScalingPolicy(resource.Resource):
"""Represents a Rackspace Auto Scale scaling policy."""
support_status = support.SupportStatus(
status=support.UNSUPPORTED,
message=_('This resource is not supported, use at your own risk.'))
PROPERTIES = (
GROUP, NAME, CHANGE, CHANGE_PERCENT, DESIRED_CAPACITY,
COOLDOWN, TYPE, ARGS,
) = (
'group', 'name', 'change', 'changePercent', 'desiredCapacity',
'cooldown', 'type', 'args',
)
properties_schema = {
# group isn't in the post body, but it's in the URL to post to.
GROUP: properties.Schema(
properties.Schema.STRING,
_('Scaling group ID that this policy belongs to.'),
required=True
),
NAME: properties.Schema(
properties.Schema.STRING,
_('Name of this scaling policy.'),
required=True,
update_allowed=True
),
CHANGE: properties.Schema(
properties.Schema.INTEGER,
_('Amount to add to or remove from current number of instances. '
'Incompatible with changePercent and desiredCapacity.'),
update_allowed=True
),
CHANGE_PERCENT: properties.Schema(
properties.Schema.NUMBER,
_('Percentage-based change to add or remove from current number '
'of instances. Incompatible with change and desiredCapacity.'),
update_allowed=True
),
DESIRED_CAPACITY: properties.Schema(
properties.Schema.INTEGER,
_('Absolute number to set the number of instances to. '
'Incompatible with change and changePercent.'),
update_allowed=True
),
COOLDOWN: properties.Schema(
properties.Schema.NUMBER,
_('Number of seconds after a policy execution during which '
'further executions are disabled.'),
update_allowed=True
),
TYPE: properties.Schema(
properties.Schema.STRING,
_('Type of this scaling policy. Specifies how the policy is '
'executed.'),
required=True,
constraints=[
constraints.AllowedValues(['webhook', 'schedule',
'cloud_monitoring']),
],
update_allowed=True
),
ARGS: properties.Schema(
properties.Schema.MAP,
_('Type-specific arguments for the policy.'),
update_allowed=True
),
}
def _get_args(self, properties):
"""Get pyrax-style create arguments for scaling policies."""
args = dict(
scaling_group=properties[self.GROUP],
name=properties[self.NAME],
policy_type=properties[self.TYPE],
cooldown=properties[self.COOLDOWN],
)
if properties.get(self.CHANGE) is not None:
args['change'] = properties[self.CHANGE]
elif properties.get(self.CHANGE_PERCENT) is not None:
args['change'] = properties[self.CHANGE_PERCENT]
args['is_percent'] = True
elif properties.get(self.DESIRED_CAPACITY) is not None:
args['desired_capacity'] = properties[self.DESIRED_CAPACITY]
if properties.get(self.ARGS) is not None:
args['args'] = properties[self.ARGS]
return args
def handle_create(self):
"""Create the scaling policy and initialize the resource ID.
The resource ID is initialized to {group_id}:{policy_id}.
"""
asclient = self.auto_scale()
args = self._get_args(self.properties)
policy = asclient.add_policy(**args)
resource_id = '%s:%s' % (self.properties[self.GROUP], policy.id)
self.resource_id_set(resource_id)
def _get_policy_id(self):
return self.resource_id.split(':', 1)[1]
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
asclient = self.auto_scale()
props = json_snippet.properties(self.properties_schema,
self.context)
args = self._get_args(props)
args['policy'] = self._get_policy_id()
asclient.replace_policy(**args)
def handle_delete(self):
"""Delete the policy if it exists."""
asclient = self.auto_scale()
if self.resource_id is None:
return
policy_id = self._get_policy_id()
try:
asclient.delete_policy(self.properties[self.GROUP], policy_id)
except NotFound:
pass
def auto_scale(self):
return self.client('auto_scale')
class WebHook(resource.Resource):
"""Represents a Rackspace AutoScale webhook.
Exposes the URLs of the webhook as attributes.
"""
support_status = support.SupportStatus(
status=support.UNSUPPORTED,
message=_('This resource is not supported, use at your own risk.'))
PROPERTIES = (
POLICY, NAME, METADATA,
) = (
'policy', 'name', 'metadata',
)
ATTRIBUTES = (
EXECUTE_URL, CAPABILITY_URL,
) = (
'executeUrl', 'capabilityUrl',
)
properties_schema = {
POLICY: properties.Schema(
properties.Schema.STRING,
_('The policy that this webhook should apply to, in '
'{group_id}:{policy_id} format. Generally a Ref to a Policy '
'resource.'),
required=True
),
NAME: properties.Schema(
properties.Schema.STRING,
_('The name of this webhook.'),
required=True,
update_allowed=True
),
METADATA: properties.Schema(
properties.Schema.MAP,
_('Arbitrary key/value metadata for this webhook.'),
update_allowed=True
),
}
attributes_schema = {
EXECUTE_URL: attributes.Schema(
_("The url for executing the webhook (requires auth)."),
cache_mode=attributes.Schema.CACHE_NONE
),
CAPABILITY_URL: attributes.Schema(
_("The url for executing the webhook (doesn't require auth)."),
cache_mode=attributes.Schema.CACHE_NONE
),
}
def _get_args(self, props):
group_id, policy_id = props[self.POLICY].split(':', 1)
return dict(
name=props[self.NAME],
scaling_group=group_id,
policy=policy_id,
metadata=props.get(self.METADATA))
def handle_create(self):
asclient = self.auto_scale()
args = self._get_args(self.properties)
webhook = asclient.add_webhook(**args)
self.resource_id_set(webhook.id)
for link in webhook.links:
rel_to_key = {'self': 'executeUrl',
'capability': 'capabilityUrl'}
key = rel_to_key.get(link['rel'])
if key is not None:
url = link['href'].encode('utf-8')
self.data_set(key, url)
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
asclient = self.auto_scale()
args = self._get_args(json_snippet.properties(self.properties_schema,
self.context))
args['webhook'] = self.resource_id
asclient.replace_webhook(**args)
def _resolve_attribute(self, key):
v = self.data().get(key)
if v is not None:
return v.decode('utf-8')
else:
return None
def handle_delete(self):
if self.resource_id is None:
return
asclient = self.auto_scale()
group_id, policy_id = self.properties[self.POLICY].split(':', 1)
try:
asclient.delete_webhook(group_id, policy_id, self.resource_id)
except NotFound:
pass
def auto_scale(self):
return self.client('auto_scale')
def resource_mapping():
return {
'Rackspace::AutoScale::Group': Group,
'Rackspace::AutoScale::ScalingPolicy': ScalingPolicy,
'Rackspace::AutoScale::WebHook': WebHook
}
def available_resource_mapping():
if PYRAX_INSTALLED:
return resource_mapping()
return {}

View File

@ -1,216 +0,0 @@
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Resources for Rackspace DNS."""
from oslo_log import log as logging
from heat.common import exception
from heat.common.i18n import _
from heat.engine import constraints
from heat.engine import properties
from heat.engine import resource
from heat.engine import support
try:
from pyrax.exceptions import NotFound
PYRAX_INSTALLED = True
except ImportError:
# Setup fake exception for testing without pyrax
class NotFound(Exception):
pass
PYRAX_INSTALLED = False
LOG = logging.getLogger(__name__)
class CloudDns(resource.Resource):
"""Represents a DNS resource."""
support_status = support.SupportStatus(
status=support.UNSUPPORTED,
message=_('This resource is not supported, use at your own risk.'))
PROPERTIES = (
NAME, EMAIL_ADDRESS, TTL, COMMENT, RECORDS,
) = (
'name', 'emailAddress', 'ttl', 'comment', 'records',
)
_RECORD_KEYS = (
RECORD_COMMENT, RECORD_NAME, RECORD_DATA, RECORD_PRIORITY, RECORD_TTL,
RECORD_TYPE,
) = (
'comment', 'name', 'data', 'priority', 'ttl',
'type',
)
properties_schema = {
NAME: properties.Schema(
properties.Schema.STRING,
_('Specifies the name for the domain or subdomain. Must be a '
'valid domain name.'),
required=True,
constraints=[
constraints.Length(min=3),
]
),
EMAIL_ADDRESS: properties.Schema(
properties.Schema.STRING,
_('Email address to use for contacting the domain administrator.'),
required=True,
update_allowed=True
),
TTL: properties.Schema(
properties.Schema.INTEGER,
_('How long other servers should cache recorddata.'),
default=3600,
constraints=[
constraints.Range(min=300),
],
update_allowed=True
),
COMMENT: properties.Schema(
properties.Schema.STRING,
_('Optional free form text comment'),
constraints=[
constraints.Length(max=160),
],
update_allowed=True
),
RECORDS: properties.Schema(
properties.Schema.LIST,
_('Domain records'),
schema=properties.Schema(
properties.Schema.MAP,
schema={
RECORD_COMMENT: properties.Schema(
properties.Schema.STRING,
_('Optional free form text comment'),
constraints=[
constraints.Length(max=160),
]
),
RECORD_NAME: properties.Schema(
properties.Schema.STRING,
_('Specifies the name for the domain or '
'subdomain. Must be a valid domain name.'),
required=True,
constraints=[
constraints.Length(min=3),
]
),
RECORD_DATA: properties.Schema(
properties.Schema.STRING,
_('Type specific record data'),
required=True
),
RECORD_PRIORITY: properties.Schema(
properties.Schema.INTEGER,
_('Required for MX and SRV records, but '
'forbidden for other record types. If '
'specified, must be an integer from 0 to '
'65535.'),
constraints=[
constraints.Range(0, 65535),
]
),
RECORD_TTL: properties.Schema(
properties.Schema.INTEGER,
_('How long other servers should cache '
'recorddata.'),
default=3600,
constraints=[
constraints.Range(min=300),
]
),
RECORD_TYPE: properties.Schema(
properties.Schema.STRING,
_('Specifies the record type.'),
required=True,
constraints=[
constraints.AllowedValues(['A', 'AAAA', 'NS',
'MX', 'CNAME',
'TXT', 'SRV']),
]
),
},
),
update_allowed=True
),
}
def cloud_dns(self):
return self.client('cloud_dns')
def handle_create(self):
"""Create a Rackspace CloudDns Instance."""
# There is no check_create_complete as the pyrax create for DNS is
# synchronous.
LOG.debug("CloudDns handle_create called.")
args = dict((k, v) for k, v in self.properties.items())
for rec in args[self.RECORDS] or {}:
# only pop the priority for the correct types
rec_type = rec[self.RECORD_TYPE]
if (rec_type != 'MX') and (rec_type != 'SRV'):
rec.pop(self.RECORD_PRIORITY, None)
dom = self.cloud_dns().create(**args)
self.resource_id_set(dom.id)
def handle_check(self):
self.cloud_dns().get(self.resource_id)
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
"""Update a Rackspace CloudDns Instance."""
LOG.debug("CloudDns handle_update called.")
if not self.resource_id:
raise exception.Error(_('Update called on a non-existent domain'))
if prop_diff:
dom = self.cloud_dns().get(self.resource_id)
# handle records separately
records = prop_diff.pop(self.RECORDS, {})
if prop_diff:
# Handle top level domain properties
dom.update(**prop_diff)
# handle records
if records:
recs = dom.list_records()
# 1. delete all the current records other than rackspace NS records
[rec.delete() for rec in recs if rec.type != 'NS' or
'stabletransit.com' not in rec.data]
# 2. update with the new records in prop_diff
dom.add_records(records)
def handle_delete(self):
"""Delete a Rackspace CloudDns Instance."""
LOG.debug("CloudDns handle_delete called.")
if self.resource_id:
try:
dom = self.cloud_dns().get(self.resource_id)
dom.delete()
except NotFound:
pass
def resource_mapping():
return {'Rackspace::Cloud::DNS': CloudDns}
def available_resource_mapping():
if PYRAX_INSTALLED:
return resource_mapping()
return {}

File diff suppressed because it is too large Load Diff

View File

@ -1,309 +0,0 @@
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from oslo_log import log as logging
from heat.common import exception
from heat.common.i18n import _
from heat.engine import constraints
from heat.engine import properties
from heat.engine.resources.openstack.nova import server
from heat.engine import support
try:
import pyrax # noqa
PYRAX_INSTALLED = True
except ImportError:
PYRAX_INSTALLED = False
LOG = logging.getLogger(__name__)
class CloudServer(server.Server):
"""Resource for Rackspace Cloud Servers.
This resource overloads existent integrated OS::Nova::Server resource and
is used for Rackspace Cloud Servers.
"""
support_status = support.SupportStatus(
status=support.UNSUPPORTED,
message=_('This resource is not supported, use at your own risk.'))
# Rackspace Cloud automation statuses
SM_STATUS_IN_PROGRESS = 'In Progress'
SM_STATUS_COMPLETE = 'Complete'
SM_STATUS_BUILD_ERROR = 'Build Error'
# RackConnect automation statuses
RC_STATUS_DEPLOYING = 'DEPLOYING'
RC_STATUS_DEPLOYED = 'DEPLOYED'
RC_STATUS_FAILED = 'FAILED'
RC_STATUS_UNPROCESSABLE = 'UNPROCESSABLE'
# Nova Extra specs
FLAVOR_EXTRA_SPECS = 'OS-FLV-WITH-EXT-SPECS:extra_specs'
FLAVOR_CLASSES_KEY = 'flavor_classes'
FLAVOR_ACCEPT_ANY = '*'
FLAVOR_CLASS = 'class'
DISK_IO_INDEX = 'disk_io_index'
FLAVOR_CLASSES = (
GENERAL1, MEMORY1, PERFORMANCE2, PERFORMANCE1, STANDARD1, IO1,
ONMETAL, COMPUTE1
) = (
'general1', 'memory1', 'performance2', 'performance1',
'standard1', 'io1', 'onmetal', 'compute1',
)
BASE_IMAGE_REF = 'base_image_ref'
# flavor classes that can be booted ONLY from volume
BFV_VOLUME_REQUIRED = {MEMORY1, COMPUTE1}
# flavor classes that can NOT be booted from volume
NON_BFV = {STANDARD1, ONMETAL}
properties_schema = copy.deepcopy(server.Server.properties_schema)
properties_schema.update(
{
server.Server.USER_DATA_FORMAT: properties.Schema(
properties.Schema.STRING,
_('How the user_data should be formatted for the server. '
'For RAW the user_data is passed to Nova unmodified. '
'For SOFTWARE_CONFIG user_data is bundled as part of the '
'software config data, and metadata is derived from any '
'associated SoftwareDeployment resources.'),
default=server.Server.RAW,
constraints=[
constraints.AllowedValues([
server.Server.RAW, server.Server.SOFTWARE_CONFIG
])
]
),
}
)
properties_schema.update(
{
server.Server.SOFTWARE_CONFIG_TRANSPORT: properties.Schema(
properties.Schema.STRING,
_('How the server should receive the metadata required for '
'software configuration. POLL_TEMP_URL is the only '
'supported transport on Rackspace Cloud. This property is '
'retained for compatibility.'),
default=server.Server.POLL_TEMP_URL,
update_allowed=True,
constraints=[
constraints.AllowedValues([
server.Server.POLL_TEMP_URL
])
]
),
}
)
def __init__(self, name, json_snippet, stack):
super(CloudServer, self).__init__(name, json_snippet, stack)
self._managed_cloud_started_event_sent = False
self._rack_connect_started_event_sent = False
def _config_drive(self):
user_data_format = self.properties[self.USER_DATA_FORMAT]
is_sw_config = user_data_format == self.SOFTWARE_CONFIG
user_data = self.properties.get(self.USER_DATA)
config_drive = self.properties.get(self.CONFIG_DRIVE)
if config_drive or is_sw_config or user_data:
return True
else:
return False
def _check_rax_automation_complete(self, server):
if not self._managed_cloud_started_event_sent:
msg = _("Waiting for Rackspace Cloud automation to complete")
self._add_event(self.action, self.status, msg)
self._managed_cloud_started_event_sent = True
if 'rax_service_level_automation' not in server.metadata:
LOG.debug("Cloud server does not have the "
"rax_service_level_automation metadata tag yet")
return False
mc_status = server.metadata['rax_service_level_automation']
LOG.debug("Rackspace Cloud automation status: %s" % mc_status)
if mc_status == self.SM_STATUS_IN_PROGRESS:
return False
elif mc_status == self.SM_STATUS_COMPLETE:
msg = _("Rackspace Cloud automation has completed")
self._add_event(self.action, self.status, msg)
return True
elif mc_status == self.SM_STATUS_BUILD_ERROR:
raise exception.Error(_("Rackspace Cloud automation failed"))
else:
raise exception.Error(_("Unknown Rackspace Cloud automation "
"status: %s") % mc_status)
def _check_rack_connect_complete(self, server):
if not self._rack_connect_started_event_sent:
msg = _("Waiting for RackConnect automation to complete")
self._add_event(self.action, self.status, msg)
self._rack_connect_started_event_sent = True
if 'rackconnect_automation_status' not in server.metadata:
LOG.debug("RackConnect server does not have the "
"rackconnect_automation_status metadata tag yet")
return False
rc_status = server.metadata['rackconnect_automation_status']
LOG.debug("RackConnect automation status: %s" % rc_status)
if rc_status == self.RC_STATUS_DEPLOYING:
return False
elif rc_status == self.RC_STATUS_DEPLOYED:
self._server = None # The public IP changed, forget old one
return True
elif rc_status == self.RC_STATUS_UNPROCESSABLE:
# UNPROCESSABLE means the RackConnect automation was not
# attempted (eg. Cloud Server in a different DC than
# dedicated gear, so RackConnect does not apply). It is
# okay if we do not raise an exception.
reason = server.metadata.get('rackconnect_unprocessable_reason',
None)
if reason is not None:
LOG.warning("RackConnect unprocessable reason: %s",
reason)
msg = _("RackConnect automation has completed")
self._add_event(self.action, self.status, msg)
return True
elif rc_status == self.RC_STATUS_FAILED:
raise exception.Error(_("RackConnect automation FAILED"))
else:
msg = _("Unknown RackConnect automation status: %s") % rc_status
raise exception.Error(msg)
def check_create_complete(self, server_id):
"""Check if server creation is complete and handle server configs."""
if not super(CloudServer, self).check_create_complete(server_id):
return False
server = self.client_plugin().fetch_server(server_id)
if not server:
return False
if ('rack_connect' in self.context.roles and not
self._check_rack_connect_complete(server)):
return False
if not self._check_rax_automation_complete(server):
return False
return True
# Since rackspace compute service does not support 'os-interface' endpoint,
# accessing addresses attribute of OS::Nova::Server results in NotFound
# error. Here overrdiing '_add_port_for_address' method and using different
# endpoint named 'os-virtual-interfacesv2' to get the same information.
def _add_port_for_address(self, server):
def get_port(net_name, address):
for iface in ifaces:
for ip_addr in iface.ip_addresses:
if ip_addr['network_label'] == net_name and ip_addr[
'address'] == address:
return iface.id
nets = copy.deepcopy(server.addresses)
nova_ext = self.client().os_virtual_interfacesv2_python_novaclient_ext
ifaces = nova_ext.list(server.id)
for net_name, addresses in nets.items():
for address in addresses:
address['port'] = get_port(net_name, address['addr'])
return self._extend_networks(nets)
def _base_image_obj(self, image):
image_obj = self.client_plugin('glance').get_image(image)
if self.BASE_IMAGE_REF in image_obj:
base_image = image_obj[self.BASE_IMAGE_REF]
return self.client_plugin('glance').get_image(base_image)
return image_obj
def _image_flavor_class_match(self, flavor_type, image):
base_image_obj = self._base_image_obj(image)
flavor_class_string = base_image_obj.get(self.FLAVOR_CLASSES_KEY)
# If the flavor_class_string metadata does not exist or is
# empty, do not validate image/flavor combo
if not flavor_class_string:
return True
flavor_class_excluded = "!{0}".format(flavor_type)
flavor_classes_accepted = flavor_class_string.split(',')
if flavor_type in flavor_classes_accepted:
return True
if (self.FLAVOR_ACCEPT_ANY in flavor_classes_accepted and
flavor_class_excluded not in flavor_classes_accepted):
return True
return False
def validate(self):
"""Validate for Rackspace Cloud specific parameters"""
super(CloudServer, self).validate()
# check if image, flavor combination is valid
flavor = self.properties[self.FLAVOR]
flavor_obj = self.client_plugin().get_flavor(flavor)
fl_xtra_specs = flavor_obj.to_dict().get(self.FLAVOR_EXTRA_SPECS, {})
flavor_type = fl_xtra_specs.get(self.FLAVOR_CLASS, None)
image = self.properties.get(self.IMAGE)
if not image:
if flavor_type in self.NON_BFV:
msg = _('Flavor %s cannot be booted from volume.') % flavor
raise exception.StackValidationFailed(message=msg)
else:
# we cannot determine details of the attached volume, so this
# is all the validation possible
return
if not self._image_flavor_class_match(flavor_type, image):
msg = _('Flavor %(flavor)s cannot be used with image '
'%(image)s.') % {'image': image, 'flavor': flavor}
raise exception.StackValidationFailed(message=msg)
if flavor_type in self.BFV_VOLUME_REQUIRED:
msg = _('Flavor %(flavor)s must be booted from volume, '
'but image %(image)s was also specified.') % {
'flavor': flavor, 'image': image}
raise exception.StackValidationFailed(message=msg)
def resource_mapping():
return {'OS::Nova::Server': CloudServer}
def available_resource_mapping():
if PYRAX_INSTALLED:
return resource_mapping()
return {}

View File

@ -1,165 +0,0 @@
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
import six
from heat.common.i18n import _
from heat.engine import attributes
from heat.engine import constraints
from heat.engine import properties
from heat.engine import resource
from heat.engine import support
try:
from pyrax.exceptions import NetworkInUse # noqa
from pyrax.exceptions import NotFound # noqa
PYRAX_INSTALLED = True
except ImportError:
PYRAX_INSTALLED = False
class NotFound(Exception):
"""Dummy pyrax exception - only used for testing."""
class NetworkInUse(Exception):
"""Dummy pyrax exception - only used for testing."""
LOG = logging.getLogger(__name__)
class CloudNetwork(resource.Resource):
"""A resource for creating Rackspace Cloud Networks.
See http://www.rackspace.com/cloud/networks/ for service
documentation.
"""
support_status = support.SupportStatus(
status=support.HIDDEN,
version='6.0.0',
previous_status=support.SupportStatus(
status=support.DEPRECATED,
message=_('Use OS::Neutron::Net instead.'),
version='2015.1',
previous_status=support.SupportStatus(version='2014.1')
)
)
PROPERTIES = (
LABEL, CIDR
) = (
"label", "cidr"
)
ATTRIBUTES = (
CIDR_ATTR, LABEL_ATTR,
) = (
'cidr', 'label',
)
properties_schema = {
LABEL: properties.Schema(
properties.Schema.STRING,
_("The name of the network."),
required=True,
constraints=[
constraints.Length(min=3, max=64)
]
),
CIDR: properties.Schema(
properties.Schema.STRING,
_("The IP block from which to allocate the network. For example, "
"172.16.0.0/24 or 2001:DB8::/64."),
required=True,
constraints=[
constraints.CustomConstraint('net_cidr')
]
)
}
attributes_schema = {
CIDR_ATTR: attributes.Schema(
_("The CIDR for an isolated private network.")
),
LABEL_ATTR: attributes.Schema(
_("The name of the network.")
),
}
def __init__(self, name, json_snippet, stack):
resource.Resource.__init__(self, name, json_snippet, stack)
self._network = None
self._delete_issued = False
def network(self):
if self.resource_id and not self._network:
try:
self._network = self.cloud_networks().get(self.resource_id)
except NotFound:
LOG.warning("Could not find network %s but resource id is"
" set.", self.resource_id)
return self._network
def cloud_networks(self):
return self.client('cloud_networks')
def handle_create(self):
cnw = self.cloud_networks().create(label=self.properties[self.LABEL],
cidr=self.properties[self.CIDR])
self.resource_id_set(cnw.id)
def handle_check(self):
self.cloud_networks().get(self.resource_id)
def check_delete_complete(self, cookie):
if not self.resource_id:
return True
try:
network = self.cloud_networks().get(self.resource_id)
except NotFound:
return True
if not network:
return True
if not self._delete_issued:
try:
network.delete()
except NetworkInUse:
LOG.warning("Network '%s' still in use.", network.id)
else:
self._delete_issued = True
return False
return False
def validate(self):
super(CloudNetwork, self).validate()
def _resolve_attribute(self, name):
net = self.network()
if net:
return six.text_type(getattr(net, name))
return ""
def resource_mapping():
return {'Rackspace::Cloud::Network': CloudNetwork}
def available_resource_mapping():
if PYRAX_INSTALLED:
return resource_mapping()
return {}

View File

@ -1,230 +0,0 @@
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from oslo_utils import timeutils
import six
from heat.common import exception
from heat.common.i18n import _
from heat.engine import constraints
from heat.engine import properties
from heat.engine import resource
try:
from pyrax.exceptions import NotFound # noqa
PYRAX_INSTALLED = True
except ImportError:
# Setup fake exception for testing without pyrax
class NotFound(Exception):
pass
PYRAX_INSTALLED = False
def lb_immutable(exc):
return 'immutable' in six.text_type(exc)
class LoadbalancerDeleted(exception.HeatException):
msg_fmt = _("The Load Balancer (ID %(lb_id)s) has been deleted.")
class NodeNotFound(exception.HeatException):
msg_fmt = _("Node (ID %(node_id)s) not found on Load Balancer "
"(ID %(lb_id)s).")
class LBNode(resource.Resource):
"""Represents a single node of a Rackspace Cloud Load Balancer"""
default_client_name = 'cloud_lb'
_CONDITIONS = (
ENABLED, DISABLED, DRAINING,
) = (
'ENABLED', 'DISABLED', 'DRAINING',
)
_NODE_KEYS = (
ADDRESS, PORT, CONDITION, TYPE, WEIGHT
) = (
'address', 'port', 'condition', 'type', 'weight'
)
_OTHER_KEYS = (
LOAD_BALANCER, DRAINING_TIMEOUT
) = (
'load_balancer', 'draining_timeout'
)
PROPERTIES = _NODE_KEYS + _OTHER_KEYS
properties_schema = {
LOAD_BALANCER: properties.Schema(
properties.Schema.STRING,
_("The ID of the load balancer to associate the node with."),
required=True
),
DRAINING_TIMEOUT: properties.Schema(
properties.Schema.INTEGER,
_("The time to wait, in seconds, for the node to drain before it "
"is deleted."),
default=0,
constraints=[
constraints.Range(min=0)
],
update_allowed=True
),
ADDRESS: properties.Schema(
properties.Schema.STRING,
_("IP address for the node."),
required=True
),
PORT: properties.Schema(
properties.Schema.INTEGER,
required=True
),
CONDITION: properties.Schema(
properties.Schema.STRING,
default=ENABLED,
constraints=[
constraints.AllowedValues(_CONDITIONS),
],
update_allowed=True
),
TYPE: properties.Schema(
properties.Schema.STRING,
constraints=[
constraints.AllowedValues(['PRIMARY',
'SECONDARY']),
],
update_allowed=True
),
WEIGHT: properties.Schema(
properties.Schema.NUMBER,
constraints=[
constraints.Range(1, 100),
],
update_allowed=True
),
}
def lb(self):
lb_id = self.properties.get(self.LOAD_BALANCER)
lb = self.client().get(lb_id)
if lb.status in ('DELETED', 'PENDING_DELETE'):
raise LoadbalancerDeleted(lb_id=lb.id)
return lb
def node(self, lb):
for node in getattr(lb, 'nodes', []):
if node.id == self.resource_id:
return node
raise NodeNotFound(node_id=self.resource_id, lb_id=lb.id)
def handle_create(self):
pass
def check_create_complete(self, *args):
node_args = {k: self.properties.get(k) for k in self._NODE_KEYS}
node = self.client().Node(**node_args)
try:
resp, body = self.lb().add_nodes([node])
except Exception as exc:
if lb_immutable(exc):
return False
raise
new_node = body['nodes'][0]
node_id = new_node['id']
self.resource_id_set(node_id)
return True
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
return prop_diff
def check_update_complete(self, prop_diff):
node = self.node(self.lb())
is_complete = True
for key in self._NODE_KEYS:
if key in prop_diff and getattr(node, key, None) != prop_diff[key]:
setattr(node, key, prop_diff[key])
is_complete = False
if is_complete:
return True
try:
node.update()
except Exception as exc:
if lb_immutable(exc):
return False
raise
return False
def handle_delete(self):
return timeutils.utcnow()
def check_delete_complete(self, deleted_at):
if self.resource_id is None:
return True
try:
node = self.node(self.lb())
except (NotFound, LoadbalancerDeleted, NodeNotFound):
return True
if isinstance(deleted_at, six.string_types):
deleted_at = timeutils.parse_isotime(deleted_at)
deleted_at = timeutils.normalize_time(deleted_at)
waited = timeutils.utcnow() - deleted_at
timeout_secs = self.properties[self.DRAINING_TIMEOUT]
timeout_secs = datetime.timedelta(seconds=timeout_secs)
if waited > timeout_secs:
try:
node.delete()
except NotFound:
return True
except Exception as exc:
if lb_immutable(exc):
return False
raise
elif node.condition != self.DRAINING:
node.condition = self.DRAINING
try:
node.update()
except Exception as exc:
if lb_immutable(exc):
return False
raise
return False
def resource_mapping():
return {'Rackspace::Cloud::LBNode': LBNode}
def available_resource_mapping():
if PYRAX_INSTALLED:
return resource_mapping()
return {}

View File

@ -1,5 +0,0 @@
import sys
from mox3 import mox
sys.modules['mox'] = mox

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,199 +0,0 @@
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
import mock
from oslo_utils import reflection
import six
from heat.common import exception
from heat.common import template_format
from heat.engine import resource
from heat.engine import scheduler
from heat.tests import common
from heat.tests import utils
from ..resources import cloudnetworks # noqa
try:
from pyrax.exceptions import NotFound # noqa
except ImportError:
from ..resources.cloudnetworks import NotFound # noqa
class FakeNetwork(object):
def __init__(self, client, label="test_network", cidr="172.16.0.0/24"):
self.client = client
self.label = label
self.cidr = cidr
self.id = str(uuid.uuid4())
def _is_deleted(self):
return (self.client and
self.id not in [nw.id for nw in self.client.networks])
def get(self):
if self._is_deleted():
raise NotFound("I am deleted")
def delete(self):
self.client._delete(self)
class FakeClient(object):
def __init__(self):
self.networks = []
def create(self, label=None, cidr=None):
nw = FakeNetwork(self, label=label, cidr=cidr)
self.networks.append(nw)
return nw
def get(self, nwid):
for nw in self.networks:
if nw.id == nwid:
return nw
raise NotFound("No network %s" % nwid)
def _delete(self, nw):
try:
self.networks.remove(nw)
except ValueError:
pass
class FakeClientRaiseException(FakeClient):
def create(self, label=None, cidr=None):
raise Exception
def get(self, nwid):
raise Exception
@mock.patch.object(cloudnetworks.CloudNetwork, "cloud_networks")
class CloudNetworkTest(common.HeatTestCase):
_template = template_format.parse("""
heat_template_version: 2013-05-23
description: Test stack for Rackspace Cloud Networks
resources:
cnw:
type: Rackspace::Cloud::Network
properties:
label: test_network
cidr: 172.16.0.0/24
""")
def setUp(self):
super(CloudNetworkTest, self).setUp()
resource._register_class("Rackspace::Cloud::Network",
cloudnetworks.CloudNetwork)
def _parse_stack(self):
class_name = reflection.get_class_name(self, fully_qualified=False)
self.stack = utils.parse_stack(self._template,
stack_name=class_name)
def _setup_stack(self, mock_client, *args):
self.fake_cnw = FakeClient(*args)
mock_client.return_value = self.fake_cnw
self._parse_stack()
self.stack.create()
self.assertEqual((self.stack.CREATE, self.stack.COMPLETE),
self.stack.state)
res = self.stack['cnw']
self.assertEqual((res.CREATE, res.COMPLETE), res.state)
def test_attributes(self, mock_client):
self._setup_stack(mock_client)
res = self.stack['cnw']
template_resource = self._template['resources']['cnw']
expect_label = template_resource['properties']['label']
expect_cidr = template_resource['properties']['cidr']
self.assertEqual(expect_label, res.FnGetAtt('label'))
self.assertEqual(expect_cidr, res.FnGetAtt('cidr'))
def test_create_bad_cidr(self, mock_client):
prop = self._template['resources']['cnw']['properties']
prop['cidr'] = "bad cidr"
self._parse_stack()
exc = self.assertRaises(exception.StackValidationFailed,
self.stack.validate)
self.assertIn("Invalid net cidr", six.text_type(exc))
# reset property
prop['cidr'] = "172.16.0.0/24"
def test_check(self, mock_client):
self._setup_stack(mock_client)
res = self.stack['cnw']
scheduler.TaskRunner(res.check)()
self.assertEqual((res.CHECK, res.COMPLETE), res.state)
self.fake_cnw.networks = []
exc = self.assertRaises(exception.ResourceFailure,
scheduler.TaskRunner(res.check))
self.assertEqual((res.CHECK, res.FAILED), res.state)
self.assertIn('No network', str(exc))
def test_delete(self, mock_client):
self._setup_stack(mock_client)
res = self.stack['cnw']
res_id = res.FnGetRefId()
scheduler.TaskRunner(res.delete)()
self.assertEqual((res.DELETE, res.COMPLETE), res.state)
exc = self.assertRaises(NotFound, self.fake_cnw.get, res_id)
self.assertIn(res_id, six.text_type(exc))
def test_delete_no_network_created(self, mock_client):
self.fake_cnw = FakeClientRaiseException()
mock_client.return_value = self.fake_cnw
self._parse_stack()
self.stack.create()
self.assertEqual((self.stack.CREATE, self.stack.FAILED),
self.stack.state)
res = self.stack['cnw']
self.assertEqual((res.CREATE, res.FAILED), res.state)
scheduler.TaskRunner(res.delete)()
self.assertEqual((res.DELETE, res.COMPLETE), res.state)
def test_delete_in_use(self, mock_client):
self._setup_stack(mock_client)
res = self.stack['cnw']
fake_network = res.network()
fake_network.delete = mock.Mock()
fake_network.delete.side_effect = [cloudnetworks.NetworkInUse(), True]
mock_client.return_value = fake_network
fake_network.get = mock.Mock()
fake_network.get.side_effect = [cloudnetworks.NotFound()]
scheduler.TaskRunner(res.delete)()
self.assertEqual((res.DELETE, res.COMPLETE), res.state)
def test_delete_not_complete(self, mock_client):
self._setup_stack(mock_client)
res = self.stack['cnw']
mock_client.get = mock.Mock()
task = res.handle_delete()
self.assertFalse(res.check_delete_complete(task))
def test_delete_not_found(self, mock_client):
self._setup_stack(mock_client)
self.fake_cnw.networks = []
res = self.stack['cnw']
scheduler.TaskRunner(res.delete)()
self.assertEqual((res.DELETE, res.COMPLETE), res.state)

View File

@ -1,305 +0,0 @@
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import mock
from heat.engine import rsrc_defn
from heat.tests import common
from ..resources import lb_node # noqa
from ..resources.lb_node import ( # noqa
LoadbalancerDeleted,
NotFound,
NodeNotFound)
from .test_cloud_loadbalancer import FakeNode # noqa
class LBNode(lb_node.LBNode):
@classmethod
def is_service_available(cls, context):
return (True, None)
class LBNodeTest(common.HeatTestCase):
def setUp(self):
super(LBNodeTest, self).setUp()
self.mockstack = mock.Mock()
self.mockstack.has_cache_data.return_value = False
self.mockstack.db_resource_get.return_value = None
self.mockclient = mock.Mock()
self.mockstack.clients.client.return_value = self.mockclient
self.def_props = {
LBNode.LOAD_BALANCER: 'some_lb_id',
LBNode.DRAINING_TIMEOUT: 60,
LBNode.ADDRESS: 'some_ip',
LBNode.PORT: 80,
LBNode.CONDITION: 'ENABLED',
LBNode.TYPE: 'PRIMARY',
LBNode.WEIGHT: None,
}
self.resource_def = rsrc_defn.ResourceDefinition(
"test", LBNode, properties=self.def_props)
self.resource = LBNode("test", self.resource_def, self.mockstack)
self.resource.resource_id = 12345
def test_create(self):
self.resource.resource_id = None
fake_lb = mock.Mock()
fake_lb.add_nodes.return_value = (None, {'nodes': [{'id': 12345}]})
self.mockclient.get.return_value = fake_lb
fake_node = mock.Mock()
self.mockclient.Node.return_value = fake_node
self.resource.check_create_complete()
self.mockclient.get.assert_called_once_with('some_lb_id')
self.mockclient.Node.assert_called_once_with(
address='some_ip', port=80, condition='ENABLED',
type='PRIMARY', weight=0)
fake_lb.add_nodes.assert_called_once_with([fake_node])
self.assertEqual(self.resource.resource_id, 12345)
def test_create_lb_not_found(self):
self.mockclient.get.side_effect = NotFound()
self.assertRaises(NotFound, self.resource.check_create_complete)
def test_create_lb_deleted(self):
fake_lb = mock.Mock()
fake_lb.id = 1111
fake_lb.status = 'DELETED'
self.mockclient.get.return_value = fake_lb
exc = self.assertRaises(LoadbalancerDeleted,
self.resource.check_create_complete)
self.assertEqual("The Load Balancer (ID 1111) has been deleted.",
str(exc))
def test_create_lb_pending_delete(self):
fake_lb = mock.Mock()
fake_lb.id = 1111
fake_lb.status = 'PENDING_DELETE'
self.mockclient.get.return_value = fake_lb
exc = self.assertRaises(LoadbalancerDeleted,
self.resource.check_create_complete)
self.assertEqual("The Load Balancer (ID 1111) has been deleted.",
str(exc))
def test_handle_update_method(self):
self.assertEqual(self.resource.handle_update(None, None, 'foo'), 'foo')
def _test_update(self, diff):
fake_lb = mock.Mock()
fake_node = FakeNode(id=12345, address='a', port='b')
fake_node.update = mock.Mock()
expected_node = FakeNode(id=12345, address='a', port='b', **diff)
expected_node.update = fake_node.update
fake_lb.nodes = [fake_node]
self.mockclient.get.return_value = fake_lb
self.assertFalse(self.resource.check_update_complete(prop_diff=diff))
self.mockclient.get.assert_called_once_with('some_lb_id')
fake_node.update.assert_called_once_with()
self.assertEqual(fake_node, expected_node)
def test_update_condition(self):
self._test_update({'condition': 'DISABLED'})
def test_update_weight(self):
self._test_update({'weight': 100})
def test_update_type(self):
self._test_update({'type': 'SECONDARY'})
def test_update_multiple(self):
self._test_update({'condition': 'DISABLED',
'weight': 100,
'type': 'SECONDARY'})
def test_update_finished(self):
fake_lb = mock.Mock()
fake_node = FakeNode(id=12345, address='a', port='b',
condition='ENABLED')
fake_node.update = mock.Mock()
expected_node = FakeNode(id=12345, address='a', port='b',
condition='ENABLED')
expected_node.update = fake_node.update
fake_lb.nodes = [fake_node]
self.mockclient.get.return_value = fake_lb
diff = {'condition': 'ENABLED'}
self.assertTrue(self.resource.check_update_complete(prop_diff=diff))
self.mockclient.get.assert_called_once_with('some_lb_id')
self.assertFalse(fake_node.update.called)
self.assertEqual(fake_node, expected_node)
def test_update_lb_not_found(self):
self.mockclient.get.side_effect = NotFound()
diff = {'condition': 'ENABLED'}
self.assertRaises(NotFound, self.resource.check_update_complete,
prop_diff=diff)
def test_update_lb_deleted(self):
fake_lb = mock.Mock()
fake_lb.id = 1111
fake_lb.status = 'DELETED'
self.mockclient.get.return_value = fake_lb
diff = {'condition': 'ENABLED'}
exc = self.assertRaises(LoadbalancerDeleted,
self.resource.check_update_complete,
prop_diff=diff)
self.assertEqual("The Load Balancer (ID 1111) has been deleted.",
str(exc))
def test_update_lb_pending_delete(self):
fake_lb = mock.Mock()
fake_lb.id = 1111
fake_lb.status = 'PENDING_DELETE'
self.mockclient.get.return_value = fake_lb
diff = {'condition': 'ENABLED'}
exc = self.assertRaises(LoadbalancerDeleted,
self.resource.check_update_complete,
prop_diff=diff)
self.assertEqual("The Load Balancer (ID 1111) has been deleted.",
str(exc))
def test_update_node_not_found(self):
fake_lb = mock.Mock()
fake_lb.id = 4444
fake_lb.nodes = []
self.mockclient.get.return_value = fake_lb
diff = {'condition': 'ENABLED'}
exc = self.assertRaises(NodeNotFound,
self.resource.check_update_complete,
prop_diff=diff)
self.assertEqual(
"Node (ID 12345) not found on Load Balancer (ID 4444).", str(exc))
def test_delete_no_id(self):
self.resource.resource_id = None
self.assertTrue(self.resource.check_delete_complete(None))
def test_delete_lb_already_deleted(self):
self.mockclient.get.side_effect = NotFound()
self.assertTrue(self.resource.check_delete_complete(None))
self.mockclient.get.assert_called_once_with('some_lb_id')
def test_delete_lb_deleted_status(self):
fake_lb = mock.Mock()
fake_lb.status = 'DELETED'
self.mockclient.get.return_value = fake_lb
self.assertTrue(self.resource.check_delete_complete(None))
self.mockclient.get.assert_called_once_with('some_lb_id')
def test_delete_lb_pending_delete_status(self):
fake_lb = mock.Mock()
fake_lb.status = 'PENDING_DELETE'
self.mockclient.get.return_value = fake_lb
self.assertTrue(self.resource.check_delete_complete(None))
self.mockclient.get.assert_called_once_with('some_lb_id')
def test_delete_node_already_deleted(self):
fake_lb = mock.Mock()
fake_lb.nodes = []
self.mockclient.get.return_value = fake_lb
self.assertTrue(self.resource.check_delete_complete(None))
self.mockclient.get.assert_called_once_with('some_lb_id')
@mock.patch.object(lb_node.timeutils, 'utcnow')
def test_drain_before_delete(self, mock_utcnow):
fake_lb = mock.Mock()
fake_node = FakeNode(id=12345, address='a', port='b')
expected_node = FakeNode(id=12345, address='a', port='b',
condition='DRAINING')
fake_node.update = mock.Mock()
expected_node.update = fake_node.update
fake_node.delete = mock.Mock()
expected_node.delete = fake_node.delete
fake_lb.nodes = [fake_node]
self.mockclient.get.return_value = fake_lb
now = datetime.datetime.utcnow()
mock_utcnow.return_value = now
self.assertFalse(self.resource.check_delete_complete(now))
self.mockclient.get.assert_called_once_with('some_lb_id')
fake_node.update.assert_called_once_with()
self.assertFalse(fake_node.delete.called)
self.assertEqual(fake_node, expected_node)
@mock.patch.object(lb_node.timeutils, 'utcnow')
def test_delete_waiting(self, mock_utcnow):
fake_lb = mock.Mock()
fake_node = FakeNode(id=12345, address='a', port='b',
condition='DRAINING')
expected_node = FakeNode(id=12345, address='a', port='b',
condition='DRAINING')
fake_node.update = mock.Mock()
expected_node.update = fake_node.update
fake_node.delete = mock.Mock()
expected_node.delete = fake_node.delete
fake_lb.nodes = [fake_node]
self.mockclient.get.return_value = fake_lb
now = datetime.datetime.utcnow()
now_plus_30 = now + datetime.timedelta(seconds=30)
mock_utcnow.return_value = now_plus_30
self.assertFalse(self.resource.check_delete_complete(now))
self.mockclient.get.assert_called_once_with('some_lb_id')
self.assertFalse(fake_node.update.called)
self.assertFalse(fake_node.delete.called)
self.assertEqual(fake_node, expected_node)
@mock.patch.object(lb_node.timeutils, 'utcnow')
def test_delete_finishing(self, mock_utcnow):
fake_lb = mock.Mock()
fake_node = FakeNode(id=12345, address='a', port='b',
condition='DRAINING')
expected_node = FakeNode(id=12345, address='a', port='b',
condition='DRAINING')
fake_node.update = mock.Mock()
expected_node.update = fake_node.update
fake_node.delete = mock.Mock()
expected_node.delete = fake_node.delete
fake_lb.nodes = [fake_node]
self.mockclient.get.return_value = fake_lb
now = datetime.datetime.utcnow()
now_plus_62 = now + datetime.timedelta(seconds=62)
mock_utcnow.return_value = now_plus_62
self.assertFalse(self.resource.check_delete_complete(now))
self.mockclient.get.assert_called_once_with('some_lb_id')
self.assertFalse(fake_node.update.called)
self.assertTrue(fake_node.delete.called)
self.assertEqual(fake_node, expected_node)

View File

@ -1,662 +0,0 @@
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_config import cfg
from oslo_utils import uuidutils
import six
from heat.common import exception
from heat.common import template_format
from heat.engine.clients.os import glance
from heat.engine.clients.os import neutron
from heat.engine.clients.os import nova
from heat.engine import environment
from heat.engine import resource
from heat.engine import rsrc_defn
from heat.engine import scheduler
from heat.engine import stack as parser
from heat.engine import template
from heat.tests import common
from heat.tests.openstack.nova import fakes
from heat.tests import utils
from ..resources import cloud_server # noqa
wp_template = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "WordPress",
"Parameters" : {
"key_name" : {
"Description" : "key_name",
"Type" : "String",
"Default" : "test"
}
},
"Resources" : {
"WebServer": {
"Type": "OS::Nova::Server",
"Properties": {
"image" : "CentOS 5.2",
"flavor" : "256 MB Server",
"key_name" : "test",
"user_data" : "wordpress"
}
}
}
}
'''
cfg.CONF.import_opt('region_name_for_services', 'heat.common.config')
class CloudServersTest(common.HeatTestCase):
def setUp(self):
super(CloudServersTest, self).setUp()
cfg.CONF.set_override('region_name_for_services', 'RegionOne')
self.ctx = utils.dummy_context()
self.fc = fakes.FakeClient()
mock_nova_create = mock.Mock()
self.ctx.clients.client_plugin(
'nova')._create = mock_nova_create
mock_nova_create.return_value = self.fc
# Test environment may not have pyrax client library installed and if
# pyrax is not installed resource class would not be registered.
# So register resource provider class explicitly for unit testing.
resource._register_class("OS::Nova::Server",
cloud_server.CloudServer)
def _setup_test_stack(self, stack_name):
t = template_format.parse(wp_template)
templ = template.Template(
t, env=environment.Environment({'key_name': 'test'}))
self.stack = parser.Stack(self.ctx, stack_name, templ,
stack_id=uuidutils.generate_uuid())
return (templ, self.stack)
def _setup_test_server(self, return_server, name, image_id=None,
override_name=False, stub_create=True):
stack_name = '%s_s' % name
(tmpl, stack) = self._setup_test_stack(stack_name)
tmpl.t['Resources']['WebServer']['Properties'][
'image'] = image_id or 'CentOS 5.2'
tmpl.t['Resources']['WebServer']['Properties'][
'flavor'] = '256 MB Server'
self.patchobject(neutron.NeutronClientPlugin,
'find_resourceid_by_name_or_id',
return_value='aaaaaa')
self.patchobject(nova.NovaClientPlugin, 'find_flavor_by_name_or_id',
return_value=1)
self.patchobject(glance.GlanceClientPlugin, 'find_image_by_name_or_id',
return_value=1)
server_name = '%s' % name
if override_name:
tmpl.t['Resources']['WebServer']['Properties'][
'name'] = server_name
resource_defns = tmpl.resource_definitions(stack)
server = cloud_server.CloudServer(server_name,
resource_defns['WebServer'],
stack)
self.patchobject(nova.NovaClientPlugin, '_create',
return_value=self.fc)
self.patchobject(server, 'store_external_ports')
if stub_create:
self.patchobject(self.fc.servers, 'create',
return_value=return_server)
# mock check_create_complete innards
self.patchobject(self.fc.servers, 'get',
return_value=return_server)
return server
def _create_test_server(self, return_server, name, override_name=False,
stub_create=True):
server = self._setup_test_server(return_server, name,
stub_create=stub_create)
scheduler.TaskRunner(server.create)()
return server
def _mock_metadata_os_distro(self):
image_data = mock.Mock(metadata={'os_distro': 'centos'})
self.fc.images.get = mock.Mock(return_value=image_data)
def test_rackconnect_deployed(self):
return_server = self.fc.servers.list()[1]
return_server.metadata = {
'rackconnect_automation_status': 'DEPLOYED',
'rax_service_level_automation': 'Complete',
}
server = self._setup_test_server(return_server,
'test_rackconnect_deployed')
server.context.roles = ['rack_connect']
scheduler.TaskRunner(server.create)()
self.assertEqual('CREATE', server.action)
self.assertEqual('COMPLETE', server.status)
def test_rackconnect_failed(self):
return_server = self.fc.servers.list()[1]
return_server.metadata = {
'rackconnect_automation_status': 'FAILED',
'rax_service_level_automation': 'Complete',
}
server = self._setup_test_server(return_server,
'test_rackconnect_failed')
server.context.roles = ['rack_connect']
create = scheduler.TaskRunner(server.create)
exc = self.assertRaises(exception.ResourceFailure, create)
self.assertEqual('Error: resources.test_rackconnect_failed: '
'RackConnect automation FAILED',
six.text_type(exc))
def test_rackconnect_unprocessable(self):
return_server = self.fc.servers.list()[1]
return_server.metadata = {
'rackconnect_automation_status': 'UNPROCESSABLE',
'rackconnect_unprocessable_reason': 'Fake reason',
'rax_service_level_automation': 'Complete',
}
server = self._setup_test_server(return_server,
'test_rackconnect_unprocessable')
server.context.roles = ['rack_connect']
scheduler.TaskRunner(server.create)()
self.assertEqual('CREATE', server.action)
self.assertEqual('COMPLETE', server.status)
def test_rackconnect_unknown(self):
return_server = self.fc.servers.list()[1]
return_server.metadata = {
'rackconnect_automation_status': 'FOO',
'rax_service_level_automation': 'Complete',
}
server = self._setup_test_server(return_server,
'test_rackconnect_unknown')
server.context.roles = ['rack_connect']
create = scheduler.TaskRunner(server.create)
exc = self.assertRaises(exception.ResourceFailure, create)
self.assertEqual('Error: resources.test_rackconnect_unknown: '
'Unknown RackConnect automation status: FOO',
six.text_type(exc))
def test_rackconnect_deploying(self):
return_server = self.fc.servers.list()[0]
server = self._setup_test_server(return_server,
'srv_sts_bld')
server.resource_id = 1234
server.context.roles = ['rack_connect']
check_iterations = [0]
# Bind fake get method which check_create_complete will call
def activate_status(server):
check_iterations[0] += 1
if check_iterations[0] == 1:
return_server.metadata.update({
'rackconnect_automation_status': 'DEPLOYING',
'rax_service_level_automation': 'Complete',
})
if check_iterations[0] == 2:
return_server.status = 'ACTIVE'
if check_iterations[0] > 3:
return_server.metadata.update({
'rackconnect_automation_status': 'DEPLOYED',
})
return return_server
self.patchobject(self.fc.servers, 'get',
side_effect=activate_status)
scheduler.TaskRunner(server.create)()
self.assertEqual((server.CREATE, server.COMPLETE), server.state)
def test_rackconnect_no_status(self):
return_server = self.fc.servers.list()[0]
server = self._setup_test_server(return_server,
'srv_sts_bld')
server.resource_id = 1234
server.context.roles = ['rack_connect']
check_iterations = [0]
# Bind fake get method which check_create_complete will call
def activate_status(server):
check_iterations[0] += 1
if check_iterations[0] == 1:
return_server.status = 'ACTIVE'
if check_iterations[0] > 2:
return_server.metadata.update({
'rackconnect_automation_status': 'DEPLOYED',
'rax_service_level_automation': 'Complete'})
return return_server
self.patchobject(self.fc.servers, 'get',
side_effect=activate_status)
scheduler.TaskRunner(server.create)()
self.assertEqual((server.CREATE, server.COMPLETE), server.state)
def test_rax_automation_lifecycle(self):
return_server = self.fc.servers.list()[0]
server = self._setup_test_server(return_server,
'srv_sts_bld')
server.resource_id = 1234
server.context.roles = ['rack_connect']
server.metadata = {}
check_iterations = [0]
# Bind fake get method which check_create_complete will call
def activate_status(server):
check_iterations[0] += 1
if check_iterations[0] == 1:
return_server.status = 'ACTIVE'
if check_iterations[0] == 2:
return_server.metadata = {
'rackconnect_automation_status': 'DEPLOYED'}
if check_iterations[0] == 3:
return_server.metadata = {
'rackconnect_automation_status': 'DEPLOYED',
'rax_service_level_automation': 'In Progress'}
if check_iterations[0] > 3:
return_server.metadata = {
'rackconnect_automation_status': 'DEPLOYED',
'rax_service_level_automation': 'Complete'}
return return_server
self.patchobject(self.fc.servers, 'get',
side_effect=activate_status)
scheduler.TaskRunner(server.create)()
self.assertEqual((server.CREATE, server.COMPLETE), server.state)
def test_add_port_for_addresses(self):
return_server = self.fc.servers.list()[1]
return_server.metadata = {'rax_service_level_automation': 'Complete'}
stack_name = 'test_stack'
(tmpl, stack) = self._setup_test_stack(stack_name)
resource_defns = tmpl.resource_definitions(stack)
self.patchobject(nova.NovaClientPlugin, 'find_flavor_by_name_or_id',
return_value=1)
self.patchobject(glance.GlanceClientPlugin, 'find_image_by_name_or_id',
return_value=1)
server = cloud_server.CloudServer('WebServer',
resource_defns['WebServer'], stack)
self.patchobject(server, 'store_external_ports')
class Interface(object):
def __init__(self, id, addresses):
self.identifier = id
self.addresses = addresses
@property
def id(self):
return self.identifier
@property
def ip_addresses(self):
return self.addresses
interfaces = [
{
"id": "port-uuid-1",
"ip_addresses": [
{
"address": "4.5.6.7",
"network_id": "00xx000-0xx0-0xx0-0xx0-00xxx000",
"network_label": "public"
},
{
"address": "2001:4802:7805:104:be76:4eff:fe20:2063",
"network_id": "00xx000-0xx0-0xx0-0xx0-00xxx000",
"network_label": "public"
}
],
"mac_address": "fa:16:3e:8c:22:aa"
},
{
"id": "port-uuid-2",
"ip_addresses": [
{
"address": "5.6.9.8",
"network_id": "11xx1-1xx1-xx11-1xx1-11xxxx11",
"network_label": "public"
}
],
"mac_address": "fa:16:3e:8c:44:cc"
},
{
"id": "port-uuid-3",
"ip_addresses": [
{
"address": "10.13.12.13",
"network_id": "1xx1-1xx1-xx11-1xx1-11xxxx11",
"network_label": "private"
}
],
"mac_address": "fa:16:3e:8c:44:dd"
}
]
ifaces = [Interface(i['id'], i['ip_addresses']) for i in interfaces]
expected = {
'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa':
[{'OS-EXT-IPS-MAC:mac_addr': 'fa:16:3e:8c:22:aa',
'addr': '4.5.6.7',
'port': 'port-uuid-1',
'version': 4},
{'OS-EXT-IPS-MAC:mac_addr': 'fa:16:3e:8c:33:bb',
'addr': '5.6.9.8',
'port': 'port-uuid-2',
'version': 4}],
'private': [{'OS-EXT-IPS-MAC:mac_addr': 'fa:16:3e:8c:44:cc',
'addr': '10.13.12.13',
'port': 'port-uuid-3',
'version': 4}],
'public': [{'OS-EXT-IPS-MAC:mac_addr': 'fa:16:3e:8c:22:aa',
'addr': '4.5.6.7',
'port': 'port-uuid-1',
'version': 4},
{'OS-EXT-IPS-MAC:mac_addr': 'fa:16:3e:8c:33:bb',
'addr': '5.6.9.8',
'port': 'port-uuid-2',
'version': 4}]}
server.client = mock.Mock()
mock_client = mock.Mock()
server.client.return_value = mock_client
mock_ext = mock_client.os_virtual_interfacesv2_python_novaclient_ext
mock_ext.list.return_value = ifaces
resp = server._add_port_for_address(return_server)
self.assertEqual(expected, resp)
def test_rax_automation_build_error(self):
return_server = self.fc.servers.list()[1]
return_server.metadata = {'rax_service_level_automation':
'Build Error'}
server = self._setup_test_server(return_server,
'test_managed_cloud_build_error')
create = scheduler.TaskRunner(server.create)
exc = self.assertRaises(exception.ResourceFailure, create)
self.assertEqual('Error: resources.test_managed_cloud_build_error: '
'Rackspace Cloud automation failed',
six.text_type(exc))
def test_rax_automation_unknown(self):
return_server = self.fc.servers.list()[1]
return_server.metadata = {'rax_service_level_automation': 'FOO'}
server = self._setup_test_server(return_server,
'test_managed_cloud_unknown')
create = scheduler.TaskRunner(server.create)
exc = self.assertRaises(exception.ResourceFailure, create)
self.assertEqual('Error: resources.test_managed_cloud_unknown: '
'Unknown Rackspace Cloud automation status: FOO',
six.text_type(exc))
def _test_server_config_drive(self, user_data, config_drive, result,
ud_format='RAW'):
return_server = self.fc.servers.list()[1]
return_server.metadata = {'rax_service_level_automation': 'Complete'}
stack_name = 'no_user_data'
self.patchobject(nova.NovaClientPlugin, 'find_flavor_by_name_or_id',
return_value=1)
self.patchobject(glance.GlanceClientPlugin, 'find_image_by_name_or_id',
return_value=1)
(tmpl, stack) = self._setup_test_stack(stack_name)
properties = tmpl.t['Resources']['WebServer']['Properties']
properties['user_data'] = user_data
properties['config_drive'] = config_drive
properties['user_data_format'] = ud_format
properties['software_config_transport'] = "POLL_TEMP_URL"
resource_defns = tmpl.resource_definitions(stack)
server = cloud_server.CloudServer('WebServer',
resource_defns['WebServer'], stack)
server.metadata = {'rax_service_level_automation': 'Complete'}
self.patchobject(server, 'store_external_ports')
self.patchobject(server, "_populate_deployments_metadata")
mock_servers_create = mock.Mock(return_value=return_server)
self.fc.servers.create = mock_servers_create
self.patchobject(self.fc.servers, 'get',
return_value=return_server)
scheduler.TaskRunner(server.create)()
mock_servers_create.assert_called_with(
image=mock.ANY,
flavor=mock.ANY,
key_name=mock.ANY,
name=mock.ANY,
security_groups=mock.ANY,
userdata=mock.ANY,
scheduler_hints=mock.ANY,
meta=mock.ANY,
nics=mock.ANY,
availability_zone=mock.ANY,
block_device_mapping=mock.ANY,
block_device_mapping_v2=mock.ANY,
config_drive=result,
disk_config=mock.ANY,
reservation_id=mock.ANY,
files=mock.ANY,
admin_pass=mock.ANY)
def test_server_user_data_no_config_drive(self):
self._test_server_config_drive("my script", False, True)
def test_server_user_data_config_drive(self):
self._test_server_config_drive("my script", True, True)
def test_server_no_user_data_config_drive(self):
self._test_server_config_drive(None, True, True)
def test_server_no_user_data_no_config_drive(self):
self._test_server_config_drive(None, False, False)
def test_server_no_user_data_software_config(self):
self._test_server_config_drive(None, False, True,
ud_format="SOFTWARE_CONFIG")
@mock.patch.object(resource.Resource, "client_plugin")
@mock.patch.object(resource.Resource, "client")
class CloudServersValidationTests(common.HeatTestCase):
def setUp(self):
super(CloudServersValidationTests, self).setUp()
resource._register_class("OS::Nova::Server", cloud_server.CloudServer)
properties_server = {
"image": "CentOS 5.2",
"flavor": "256 MB Server",
"key_name": "test",
"user_data": "wordpress",
}
self.mockstack = mock.Mock()
self.mockstack.has_cache_data.return_value = False
self.mockstack.db_resource_get.return_value = None
self.rsrcdef = rsrc_defn.ResourceDefinition(
"test", cloud_server.CloudServer, properties=properties_server)
def test_validate_no_image(self, mock_client, mock_plugin):
properties_server = {
"flavor": "256 MB Server",
"key_name": "test",
"user_data": "wordpress",
}
rsrcdef = rsrc_defn.ResourceDefinition(
"test", cloud_server.CloudServer, properties=properties_server)
mock_plugin().find_flavor_by_name_or_id.return_value = 1
server = cloud_server.CloudServer("test", rsrcdef, self.mockstack)
mock_boot_vol = self.patchobject(
server, '_validate_block_device_mapping')
mock_boot_vol.return_value = True
self.assertIsNone(server.validate())
def test_validate_no_image_bfv(self, mock_client, mock_plugin):
properties_server = {
"flavor": "256 MB Server",
"key_name": "test",
"user_data": "wordpress",
}
rsrcdef = rsrc_defn.ResourceDefinition(
"test", cloud_server.CloudServer, properties=properties_server)
mock_plugin().find_flavor_by_name_or_id.return_value = 1
server = cloud_server.CloudServer("test", rsrcdef, self.mockstack)
mock_boot_vol = self.patchobject(
server, '_validate_block_device_mapping')
mock_boot_vol.return_value = True
mock_flavor = mock.Mock(ram=4)
mock_flavor.to_dict.return_value = {
'OS-FLV-WITH-EXT-SPECS:extra_specs': {
'class': 'standard1',
},
}
mock_plugin().get_flavor.return_value = mock_flavor
error = self.assertRaises(
exception.StackValidationFailed, server.validate)
self.assertEqual(
'Flavor 256 MB Server cannot be booted from volume.',
six.text_type(error))
def test_validate_bfv_volume_only(self, mock_client, mock_plugin):
mock_plugin().find_flavor_by_name_or_id.return_value = 1
mock_plugin().find_image_by_name_or_id.return_value = 1
server = cloud_server.CloudServer("test", self.rsrcdef, self.mockstack)
mock_flavor = mock.Mock(ram=4, disk=4)
mock_flavor.to_dict.return_value = {
'OS-FLV-WITH-EXT-SPECS:extra_specs': {
'class': 'memory1',
},
}
mock_image = mock.Mock(status='ACTIVE', min_ram=2, min_disk=1)
mock_image.get.return_value = "memory1"
mock_image.__iter__ = mock.Mock(return_value=iter([]))
mock_plugin().get_flavor.return_value = mock_flavor
mock_plugin().get_image.return_value = mock_image
error = self.assertRaises(
exception.StackValidationFailed, server.validate)
self.assertEqual(
'Flavor 256 MB Server must be booted from volume, '
'but image CentOS 5.2 was also specified.',
six.text_type(error))
def test_validate_image_flavor_excluded_class(self, mock_client,
mock_plugin):
mock_plugin().find_flavor_by_name_or_id.return_value = 1
mock_plugin().find_image_by_name_or_id.return_value = 1
server = cloud_server.CloudServer("test", self.rsrcdef, self.mockstack)
mock_image = mock.Mock(status='ACTIVE', min_ram=2, min_disk=1)
mock_image.get.return_value = "!standard1, *"
mock_image.__iter__ = mock.Mock(return_value=iter([]))
mock_flavor = mock.Mock(ram=4, disk=4)
mock_flavor.to_dict.return_value = {
'OS-FLV-WITH-EXT-SPECS:extra_specs': {
'class': 'standard1',
},
}
mock_plugin().get_flavor.return_value = mock_flavor
mock_plugin().get_image.return_value = mock_image
error = self.assertRaises(
exception.StackValidationFailed, server.validate)
self.assertEqual(
'Flavor 256 MB Server cannot be used with image CentOS 5.2.',
six.text_type(error))
def test_validate_image_flavor_ok(self, mock_client, mock_plugin):
mock_plugin().find_flavor_by_name_or_id.return_value = 1
mock_plugin().find_image_by_name_or_id.return_value = 1
server = cloud_server.CloudServer("test", self.rsrcdef, self.mockstack)
mock_image = mock.Mock(size=1, status='ACTIVE', min_ram=2, min_disk=2)
mock_image.get.return_value = "standard1"
mock_image.__iter__ = mock.Mock(return_value=iter([]))
mock_flavor = mock.Mock(ram=4, disk=4)
mock_flavor.to_dict.return_value = {
'OS-FLV-WITH-EXT-SPECS:extra_specs': {
'class': 'standard1',
'disk_io_index': 1,
},
}
mock_plugin().get_flavor.return_value = mock_flavor
mock_plugin().get_image.return_value = mock_image
self.assertIsNone(server.validate())
def test_validate_image_flavor_empty_metadata(self, mock_client,
mock_plugin):
server = cloud_server.CloudServer("test", self.rsrcdef, self.mockstack)
mock_image = mock.Mock(size=1, status='ACTIVE', min_ram=2, min_disk=2)
mock_image.get.return_value = ""
mock_image.__iter__ = mock.Mock(return_value=iter([]))
mock_flavor = mock.Mock(ram=4, disk=4)
mock_flavor.to_dict.return_value = {
'OS-FLV-WITH-EXT-SPECS:extra_specs': {
'flavor_classes': '',
},
}
mock_plugin().get_flavor.return_value = mock_flavor
mock_plugin().get_image.return_value = mock_image
self.assertIsNone(server.validate())
def test_validate_image_flavor_no_metadata(self, mock_client, mock_plugin):
server = cloud_server.CloudServer("test", self.rsrcdef, self.mockstack)
mock_image = mock.Mock(size=1, status='ACTIVE', min_ram=2, min_disk=2)
mock_image.get.return_value = None
mock_image.__iter__ = mock.Mock(return_value=iter([]))
mock_flavor = mock.Mock(ram=4, disk=4)
mock_flavor.to_dict.return_value = {}
mock_plugin().get_flavor.return_value = mock_flavor
mock_plugin().get_image.return_value = mock_image
self.assertIsNone(server.validate())
def test_validate_image_flavor_not_base(self, mock_client, mock_plugin):
server = cloud_server.CloudServer("test", self.rsrcdef, self.mockstack)
mock_image = mock.Mock(size=1, status='ACTIVE', min_ram=2, min_disk=2)
mock_image.get.return_value = None
mock_image.__iter__ = mock.Mock(return_value=iter(
['base_image_ref']))
mock_image.__getitem__ = mock.Mock(return_value='1234')
mock_base_image = mock.Mock(size=1, status='ACTIVE', min_ram=2,
min_disk=2)
mock_base_image.get.return_value = None
mock_base_image.__iter__ = mock.Mock(return_value=iter([]))
mock_flavor = mock.Mock(ram=4, disk=4)
mock_flavor.to_dict.return_value = {}
mock_plugin().get_flavor.return_value = mock_flavor
mock_plugin().get_image.side_effect = [mock_image, mock_base_image]
self.assertIsNone(server.validate())

View File

@ -1,316 +0,0 @@
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
import mock
from heat.common import exception
from heat.common import template_format
from heat.engine import environment
from heat.engine import resource
from heat.engine import rsrc_defn
from heat.engine import scheduler
from heat.engine import stack as parser
from heat.engine import template
from heat.tests import common
from heat.tests import utils
from ..resources import cloud_dns # noqa
domain_only_template = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "Dns instance running on Rackspace cloud",
"Parameters" : {
"UnittestDomain" : {
"Description" : "Domain for unit tests",
"Type" : "String",
"Default" : 'dnsheatunittest.com'
},
"dnsttl" : {
"Description" : "TTL for the domain",
"Type" : "Number",
"MinValue" : '301',
"Default" : '301'
},
"name": {
"Description" : "The cloud dns instance name",
"Type": "String",
"Default": "CloudDNS"
}
},
"Resources" : {
"domain" : {
"Type": "Rackspace::Cloud::DNS",
"Properties" : {
"name" : "dnsheatunittest.com",
"emailAddress" : "admin@dnsheatunittest.com",
"ttl" : 3600,
"comment" : "Testing Cloud DNS integration with Heat"
}
}
}
}
'''
class FakeDnsInstance(object):
def __init__(self):
self.id = 4
self.resource_id = 4
def get(self):
pass
def delete(self):
pass
class RackspaceDnsTest(common.HeatTestCase):
def setUp(self):
super(RackspaceDnsTest, self).setUp()
# Test environment may not have pyrax client library installed and if
# pyrax is not installed resource class would not be registered.
# So register resource provider class explicitly for unit testing.
resource._register_class("Rackspace::Cloud::DNS", cloud_dns.CloudDns)
self.create_domain_only_args = {
"name": 'dnsheatunittest.com',
"emailAddress": 'admin@dnsheatunittest.com',
"ttl": 3600,
"comment": 'Testing Cloud DNS integration with Heat',
"records": None
}
self.update_domain_only_args = {
"emailAddress": 'updatedEmail@example.com',
"ttl": 5555,
"comment": 'updated comment'
}
def _setup_test_cloud_dns_instance(self, name, parsed_t):
stack_name = '%s_stack' % name
t = parsed_t
templ = template.Template(
t, env=environment.Environment({'name': 'test'}))
self.stack = parser.Stack(utils.dummy_context(),
stack_name,
templ,
stack_id=str(uuid.uuid4()))
instance = cloud_dns.CloudDns(
'%s_name' % name,
templ.resource_definitions(self.stack)['domain'],
self.stack)
return instance
def _stubout_create(self, instance, fake_dnsinstance, **create_args):
mock_client = self.m.CreateMockAnything()
self.m.StubOutWithMock(instance, 'cloud_dns')
instance.cloud_dns().AndReturn(mock_client)
self.m.StubOutWithMock(mock_client, "create")
mock_client.create(**create_args).AndReturn(fake_dnsinstance)
self.m.ReplayAll()
def _stubout_update(
self,
instance,
fake_dnsinstance,
updateRecords=None,
**update_args):
mock_client = self.m.CreateMockAnything()
self.m.StubOutWithMock(instance, 'cloud_dns')
instance.cloud_dns().AndReturn(mock_client)
self.m.StubOutWithMock(mock_client, "get")
mock_domain = self.m.CreateMockAnything()
mock_client.get(fake_dnsinstance.resource_id).AndReturn(mock_domain)
self.m.StubOutWithMock(mock_domain, "update")
mock_domain.update(**update_args).AndReturn(fake_dnsinstance)
if updateRecords:
fake_records = list()
mock_domain.list_records().AndReturn(fake_records)
mock_domain.add_records([{
'comment': None,
'priority': None,
'type': 'A',
'name': 'ftp.example.com',
'data': '192.0.2.8',
'ttl': 3600}])
self.m.ReplayAll()
def _get_create_args_with_comments(self, record):
record_with_comment = [dict(record[0])]
record_with_comment[0]["comment"] = None
create_record_args = dict()
create_record_args['records'] = record_with_comment
create_args = dict(
list(self.create_domain_only_args.items()) +
list(create_record_args.items()))
return create_args
def test_create_domain_only(self):
"""Test domain create only without any records."""
fake_dns_instance = FakeDnsInstance()
t = template_format.parse(domain_only_template)
instance = self._setup_test_cloud_dns_instance('dnsinstance_create', t)
create_args = self.create_domain_only_args
self._stubout_create(instance, fake_dns_instance, **create_args)
scheduler.TaskRunner(instance.create)()
self.assertEqual((instance.CREATE, instance.COMPLETE), instance.state)
self.m.VerifyAll()
def test_create_domain_with_a_record(self):
"""Test domain create with an A record.
This should not have a priority field.
"""
fake_dns_instance = FakeDnsInstance()
t = template_format.parse(domain_only_template)
a_record = [{
"type": "A",
"name": "ftp.example.com",
"data": "192.0.2.8",
"ttl": 3600
}]
t['Resources']['domain']['Properties']['records'] = a_record
instance = self._setup_test_cloud_dns_instance('dnsinstance_create', t)
create_args = self._get_create_args_with_comments(a_record)
self._stubout_create(instance, fake_dns_instance, **create_args)
scheduler.TaskRunner(instance.create)()
self.assertEqual((instance.CREATE, instance.COMPLETE), instance.state)
self.m.VerifyAll()
def test_create_domain_with_mx_record(self):
"""Test domain create with an MX record.
This should have a priority field.
"""
fake_dns_instance = FakeDnsInstance()
t = template_format.parse(domain_only_template)
mx_record = [{
"type": "MX",
"name": "example.com",
"data": "mail.example.com",
"priority": 5,
"ttl": 3600
}]
t['Resources']['domain']['Properties']['records'] = mx_record
instance = self._setup_test_cloud_dns_instance('dnsinstance_create', t)
create_args = self._get_create_args_with_comments(mx_record)
self._stubout_create(instance, fake_dns_instance, **create_args)
scheduler.TaskRunner(instance.create)()
self.assertEqual((instance.CREATE, instance.COMPLETE), instance.state)
self.m.VerifyAll()
def test_check(self):
t = template_format.parse(domain_only_template)
instance = self._setup_test_cloud_dns_instance('dnsinstance_create', t)
mock_get = mock.Mock()
instance.cloud_dns = mock.Mock()
instance.cloud_dns.return_value.get = mock_get
scheduler.TaskRunner(instance.check)()
self.assertEqual('CHECK', instance.action)
self.assertEqual('COMPLETE', instance.status)
mock_get.side_effect = cloud_dns.NotFound('boom')
exc = self.assertRaises(exception.ResourceFailure,
scheduler.TaskRunner(instance.check))
self.assertEqual('CHECK', instance.action)
self.assertEqual('FAILED', instance.status)
self.assertIn('boom', str(exc))
def test_update(self, updateRecords=None):
"""Helper function for testing domain updates."""
fake_dns_instance = FakeDnsInstance()
t = template_format.parse(domain_only_template)
instance = self._setup_test_cloud_dns_instance('dnsinstance_update', t)
instance.resource_id = 4
update_args = self.update_domain_only_args
self._stubout_update(
instance,
fake_dns_instance,
updateRecords,
**update_args)
uprops = dict(instance.properties)
uprops.update({
'emailAddress': 'updatedEmail@example.com',
'ttl': 5555,
'comment': 'updated comment',
})
if updateRecords:
uprops['records'] = updateRecords
ut = rsrc_defn.ResourceDefinition(instance.name,
instance.type(),
uprops)
instance.state_set(instance.CREATE, instance.COMPLETE)
scheduler.TaskRunner(instance.update, ut)()
self.assertEqual((instance.UPDATE, instance.COMPLETE), instance.state)
self.m.VerifyAll()
def test_update_domain_only(self):
"""Test domain update without any records."""
self.test_update()
def test_update_domain_with_a_record(self):
"""Test domain update with an A record."""
a_record = [{'type': 'A',
'name': 'ftp.example.com',
'data': '192.0.2.8',
'ttl': 3600}]
self.test_update(updateRecords=a_record)
def test_update_record_only(self):
"""Helper function for testing domain updates."""
fake_dns_instance = FakeDnsInstance()
t = template_format.parse(domain_only_template)
instance = self._setup_test_cloud_dns_instance('dnsinstance_update', t)
instance.resource_id = 4
update_records = [{'type': 'A',
'name': 'ftp.example.com',
'data': '192.0.2.8',
'ttl': 3600}]
mock_client = self.m.CreateMockAnything()
self.m.StubOutWithMock(instance, 'cloud_dns')
instance.cloud_dns().AndReturn(mock_client)
self.m.StubOutWithMock(mock_client, "get")
mock_domain = self.m.CreateMockAnything()
mock_client.get(fake_dns_instance.resource_id).AndReturn(mock_domain)
# mock_domain.update shouldn't be called in this scenario, so
# stub it out but don't record a call to it
self.m.StubOutWithMock(mock_domain, "update")
fake_records = list()
mock_domain.list_records().AndReturn(fake_records)
mock_domain.add_records([{
'comment': None,
'priority': None,
'type': 'A',
'name': 'ftp.example.com',
'data': '192.0.2.8',
'ttl': 3600}])
self.m.ReplayAll()
uprops = dict(instance.properties)
uprops['records'] = update_records
ut = rsrc_defn.ResourceDefinition(instance.name,
instance.type(),
uprops)
instance.state_set(instance.CREATE, instance.COMPLETE)
scheduler.TaskRunner(instance.update, ut)()
self.assertEqual((instance.UPDATE, instance.COMPLETE), instance.state)
self.m.VerifyAll()

View File

@ -1 +0,0 @@
-e git+https://github.com/rackerlabs/heat-pyrax.git#egg=pyrax

View File

@ -1,43 +0,0 @@
[metadata]
name = heat-contrib-rackspace
summary = Heat resources for working with the Rackspace Cloud
description-file =
README.md
author = OpenStack
author-email = openstack-dev@lists.openstack.org
home-page = http://docs.openstack.org/developer/heat/
classifier =
Environment :: OpenStack
Intended Audience :: Information Technology
Intended Audience :: System Administrators
License :: OSI Approved :: Apache Software License
Operating System :: POSIX :: Linux
Programming Language :: Python
Programming Language :: Python :: 2
Programming Language :: Python :: 2.7
[files]
packages =
rackspace
# Copy to /usr/lib/heat for non-stevedore plugin loading
data_files =
lib/heat/rackspace = rackspace/resources/*
lib/heat/heat_keystoneclient_v2 = heat_keystoneclient_v2/*
[entry_points]
heat.clients =
auto_scale = rackspace.clients:RackspaceAutoScaleClient
cinder = rackspace.clients:RackspaceCinderClient
cloud_dns = rackspace.clients:RackspaceCloudDNSClient
cloud_lb = rackspace.clients:RackspaceCloudLBClient
cloud_networks = rackspace.clients:RackspaceCloudNetworksClient
glance = rackspace.clients:RackspaceGlanceClient
nova = rackspace.clients:RackspaceNovaClient
trove = rackspace.clients:RackspaceTroveClient
swift = rackspace.clients:RackspaceSwiftClient
[global]
setup-hooks =
pbr.hooks.setup_hook

View File

@ -1,30 +0,0 @@
#!/usr/bin/env python
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT
import setuptools
# In python < 2.7.4, a lazy loading of package `pbr` will break
# setuptools if some other modules registered functions in `atexit`.
# solution from: http://bugs.python.org/issue15881#msg170215
try:
import multiprocessing # noqa
except ImportError:
pass
setuptools.setup(
setup_requires=['pbr'],
pbr=True)