789 lines
30 KiB
Python
789 lines
30 KiB
Python
#
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
# not use this file except in compliance with the License. You may obtain
|
|
# a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
# License for the specific language governing permissions and limitations
|
|
# under the License.
|
|
|
|
"""Resources for Rackspace Auto Scale."""
|
|
|
|
import copy
|
|
import six
|
|
|
|
from heat.common import exception
|
|
from heat.common.i18n import _
|
|
from heat.common import template_format
|
|
from heat.engine import attributes
|
|
from heat.engine import constraints
|
|
from heat.engine import properties
|
|
from heat.engine import resource
|
|
from heat.engine import support
|
|
from heat.engine import template as templatem
|
|
|
|
try:
|
|
from pyrax.exceptions import Forbidden
|
|
from pyrax.exceptions import NotFound
|
|
PYRAX_INSTALLED = True
|
|
except ImportError:
|
|
class Forbidden(Exception):
|
|
"""Dummy pyrax exception - only used for testing."""
|
|
|
|
class NotFound(Exception):
|
|
"""Dummy pyrax exception - only used for testing."""
|
|
|
|
PYRAX_INSTALLED = False
|
|
|
|
|
|
class Group(resource.Resource):
|
|
"""Represents a scaling group."""
|
|
|
|
# pyrax differs drastically from the actual Auto Scale API. We'll prefer
|
|
# the true API here, but since pyrax doesn't support the full flexibility
|
|
# of the API, we'll have to restrict what users can provide.
|
|
|
|
support_status = support.SupportStatus(
|
|
status=support.UNSUPPORTED,
|
|
message=_('This resource is not supported, use at your own risk.'))
|
|
|
|
# properties are identical to the API POST /groups.
|
|
PROPERTIES = (
|
|
GROUP_CONFIGURATION, LAUNCH_CONFIGURATION,
|
|
) = (
|
|
'groupConfiguration', 'launchConfiguration',
|
|
)
|
|
|
|
_GROUP_CONFIGURATION_KEYS = (
|
|
GROUP_CONFIGURATION_MAX_ENTITIES, GROUP_CONFIGURATION_COOLDOWN,
|
|
GROUP_CONFIGURATION_NAME, GROUP_CONFIGURATION_MIN_ENTITIES,
|
|
GROUP_CONFIGURATION_METADATA,
|
|
) = (
|
|
'maxEntities', 'cooldown',
|
|
'name', 'minEntities',
|
|
'metadata',
|
|
)
|
|
|
|
_LAUNCH_CONFIG_KEYS = (
|
|
LAUNCH_CONFIG_ARGS, LAUNCH_CONFIG_TYPE,
|
|
) = (
|
|
'args', 'type',
|
|
)
|
|
|
|
_LAUNCH_CONFIG_ARGS_KEYS = (
|
|
LAUNCH_CONFIG_ARGS_LOAD_BALANCERS,
|
|
LAUNCH_CONFIG_ARGS_SERVER,
|
|
LAUNCH_CONFIG_ARGS_STACK,
|
|
) = (
|
|
'loadBalancers',
|
|
'server',
|
|
'stack',
|
|
)
|
|
|
|
_LAUNCH_CONFIG_ARGS_LOAD_BALANCER_KEYS = (
|
|
LAUNCH_CONFIG_ARGS_LOAD_BALANCER_ID,
|
|
LAUNCH_CONFIG_ARGS_LOAD_BALANCER_PORT,
|
|
) = (
|
|
'loadBalancerId',
|
|
'port',
|
|
)
|
|
|
|
_LAUNCH_CONFIG_ARGS_SERVER_KEYS = (
|
|
LAUNCH_CONFIG_ARGS_SERVER_NAME, LAUNCH_CONFIG_ARGS_SERVER_FLAVOR_REF,
|
|
LAUNCH_CONFIG_ARGS_SERVER_IMAGE_REF,
|
|
LAUNCH_CONFIG_ARGS_SERVER_METADATA,
|
|
LAUNCH_CONFIG_ARGS_SERVER_PERSONALITY,
|
|
LAUNCH_CONFIG_ARGS_SERVER_NETWORKS,
|
|
LAUNCH_CONFIG_ARGS_SERVER_DISK_CONFIG,
|
|
LAUNCH_CONFIG_ARGS_SERVER_KEY_NAME,
|
|
LAUNCH_CONFIG_ARGS_SERVER_USER_DATA,
|
|
LAUNCH_CONFIG_ARGS_SERVER_CDRIVE
|
|
) = (
|
|
'name', 'flavorRef',
|
|
'imageRef',
|
|
'metadata',
|
|
'personality',
|
|
'networks',
|
|
'diskConfig', # technically maps to OS-DCF:diskConfig
|
|
'key_name',
|
|
'user_data',
|
|
'config_drive'
|
|
)
|
|
|
|
_LAUNCH_CONFIG_ARGS_SERVER_NETWORK_KEYS = (
|
|
LAUNCH_CONFIG_ARGS_SERVER_NETWORK_UUID,
|
|
) = (
|
|
'uuid',
|
|
)
|
|
|
|
_LAUNCH_CONFIG_ARGS_STACK_KEYS = (
|
|
LAUNCH_CONFIG_ARGS_STACK_TEMPLATE,
|
|
LAUNCH_CONFIG_ARGS_STACK_TEMPLATE_URL,
|
|
LAUNCH_CONFIG_ARGS_STACK_DISABLE_ROLLBACK,
|
|
LAUNCH_CONFIG_ARGS_STACK_ENVIRONMENT,
|
|
LAUNCH_CONFIG_ARGS_STACK_FILES,
|
|
LAUNCH_CONFIG_ARGS_STACK_PARAMETERS,
|
|
LAUNCH_CONFIG_ARGS_STACK_TIMEOUT_MINS
|
|
) = (
|
|
'template',
|
|
'template_url',
|
|
'disable_rollback',
|
|
'environment',
|
|
'files',
|
|
'parameters',
|
|
'timeout_mins'
|
|
)
|
|
|
|
_launch_configuration_args_schema = {
|
|
LAUNCH_CONFIG_ARGS_LOAD_BALANCERS: properties.Schema(
|
|
properties.Schema.LIST,
|
|
_('List of load balancers to hook the '
|
|
'server up to. If not specified, no '
|
|
'load balancing will be configured.'),
|
|
default=[],
|
|
schema=properties.Schema(
|
|
properties.Schema.MAP,
|
|
schema={
|
|
LAUNCH_CONFIG_ARGS_LOAD_BALANCER_ID: properties.Schema(
|
|
properties.Schema.STRING,
|
|
_('ID of the load balancer.'),
|
|
required=True
|
|
),
|
|
LAUNCH_CONFIG_ARGS_LOAD_BALANCER_PORT: properties.Schema(
|
|
properties.Schema.INTEGER,
|
|
_('Server port to connect the load balancer to.')
|
|
),
|
|
},
|
|
)
|
|
),
|
|
LAUNCH_CONFIG_ARGS_SERVER: properties.Schema(
|
|
properties.Schema.MAP,
|
|
_('Server creation arguments, as accepted by the Cloud Servers '
|
|
'server creation API.'),
|
|
required=False,
|
|
schema={
|
|
LAUNCH_CONFIG_ARGS_SERVER_NAME: properties.Schema(
|
|
properties.Schema.STRING,
|
|
_('Server name.'),
|
|
required=True
|
|
),
|
|
LAUNCH_CONFIG_ARGS_SERVER_FLAVOR_REF: properties.Schema(
|
|
properties.Schema.STRING,
|
|
_('The ID or name of the flavor to boot onto.'),
|
|
constraints=[
|
|
constraints.CustomConstraint('nova.flavor')
|
|
],
|
|
required=True
|
|
),
|
|
LAUNCH_CONFIG_ARGS_SERVER_IMAGE_REF: properties.Schema(
|
|
properties.Schema.STRING,
|
|
_('The ID or name of the image to boot with.'),
|
|
constraints=[
|
|
constraints.CustomConstraint('glance.image')
|
|
],
|
|
required=True
|
|
),
|
|
LAUNCH_CONFIG_ARGS_SERVER_METADATA: properties.Schema(
|
|
properties.Schema.MAP,
|
|
_('Metadata key and value pairs.')
|
|
),
|
|
LAUNCH_CONFIG_ARGS_SERVER_PERSONALITY: properties.Schema(
|
|
properties.Schema.MAP,
|
|
_('File path and contents.')
|
|
),
|
|
LAUNCH_CONFIG_ARGS_SERVER_CDRIVE: properties.Schema(
|
|
properties.Schema.BOOLEAN,
|
|
_('Enable config drive on the instance.')
|
|
),
|
|
LAUNCH_CONFIG_ARGS_SERVER_USER_DATA: properties.Schema(
|
|
properties.Schema.STRING,
|
|
_('User data for bootstrapping the instance.')
|
|
),
|
|
LAUNCH_CONFIG_ARGS_SERVER_NETWORKS: properties.Schema(
|
|
properties.Schema.LIST,
|
|
_('Networks to attach to. If unspecified, the instance '
|
|
'will be attached to the public Internet and private '
|
|
'ServiceNet networks.'),
|
|
schema=properties.Schema(
|
|
properties.Schema.MAP,
|
|
schema={
|
|
LAUNCH_CONFIG_ARGS_SERVER_NETWORK_UUID:
|
|
properties.Schema(
|
|
properties.Schema.STRING,
|
|
_('UUID of network to attach to.'),
|
|
required=True)
|
|
}
|
|
)
|
|
),
|
|
LAUNCH_CONFIG_ARGS_SERVER_DISK_CONFIG: properties.Schema(
|
|
properties.Schema.STRING,
|
|
_('Configuration specifying the partition layout. AUTO to '
|
|
'create a partition utilizing the entire disk, and '
|
|
'MANUAL to create a partition matching the source '
|
|
'image.'),
|
|
constraints=[
|
|
constraints.AllowedValues(['AUTO', 'MANUAL']),
|
|
]
|
|
),
|
|
LAUNCH_CONFIG_ARGS_SERVER_KEY_NAME: properties.Schema(
|
|
properties.Schema.STRING,
|
|
_('Name of a previously created SSH keypair to allow '
|
|
'key-based authentication to the server.')
|
|
),
|
|
},
|
|
),
|
|
LAUNCH_CONFIG_ARGS_STACK: properties.Schema(
|
|
properties.Schema.MAP,
|
|
_('The attributes that Auto Scale uses to create a new stack. The '
|
|
'attributes that you specify for the stack entity apply to all '
|
|
'new stacks in the scaling group. Note the stack arguments are '
|
|
'directly passed to Heat when creating a stack.'),
|
|
schema={
|
|
LAUNCH_CONFIG_ARGS_STACK_TEMPLATE: properties.Schema(
|
|
properties.Schema.STRING,
|
|
_('The template that describes the stack. Either the '
|
|
'template or template_url property must be specified.'),
|
|
),
|
|
LAUNCH_CONFIG_ARGS_STACK_TEMPLATE_URL: properties.Schema(
|
|
properties.Schema.STRING,
|
|
_('A URI to a template. Either the template or '
|
|
'template_url property must be specified.')
|
|
),
|
|
LAUNCH_CONFIG_ARGS_STACK_DISABLE_ROLLBACK: properties.Schema(
|
|
properties.Schema.BOOLEAN,
|
|
_('Keep the resources that have been created if the stack '
|
|
'fails to create. Defaults to True.'),
|
|
default=True
|
|
),
|
|
LAUNCH_CONFIG_ARGS_STACK_ENVIRONMENT: properties.Schema(
|
|
properties.Schema.MAP,
|
|
_('The environment for the stack.'),
|
|
),
|
|
LAUNCH_CONFIG_ARGS_STACK_FILES: properties.Schema(
|
|
properties.Schema.MAP,
|
|
_('The contents of files that the template references.')
|
|
),
|
|
LAUNCH_CONFIG_ARGS_STACK_PARAMETERS: properties.Schema(
|
|
properties.Schema.MAP,
|
|
_('Key/value pairs of the parameters and their values to '
|
|
'pass to the parameters in the template.')
|
|
),
|
|
LAUNCH_CONFIG_ARGS_STACK_TIMEOUT_MINS: properties.Schema(
|
|
properties.Schema.INTEGER,
|
|
_('The stack creation timeout in minutes.')
|
|
)
|
|
}
|
|
)
|
|
}
|
|
|
|
properties_schema = {
|
|
GROUP_CONFIGURATION: properties.Schema(
|
|
properties.Schema.MAP,
|
|
_('Group configuration.'),
|
|
schema={
|
|
GROUP_CONFIGURATION_MAX_ENTITIES: properties.Schema(
|
|
properties.Schema.INTEGER,
|
|
_('Maximum number of entities in this scaling group.'),
|
|
required=True
|
|
),
|
|
GROUP_CONFIGURATION_COOLDOWN: properties.Schema(
|
|
properties.Schema.NUMBER,
|
|
_('Number of seconds after capacity changes during '
|
|
'which further capacity changes are disabled.'),
|
|
required=True
|
|
),
|
|
GROUP_CONFIGURATION_NAME: properties.Schema(
|
|
properties.Schema.STRING,
|
|
_('Name of the scaling group.'),
|
|
required=True
|
|
),
|
|
GROUP_CONFIGURATION_MIN_ENTITIES: properties.Schema(
|
|
properties.Schema.INTEGER,
|
|
_('Minimum number of entities in this scaling group.'),
|
|
required=True
|
|
),
|
|
GROUP_CONFIGURATION_METADATA: properties.Schema(
|
|
properties.Schema.MAP,
|
|
_('Arbitrary key/value metadata to associate with '
|
|
'this group.')
|
|
),
|
|
},
|
|
required=True,
|
|
update_allowed=True
|
|
),
|
|
LAUNCH_CONFIGURATION: properties.Schema(
|
|
properties.Schema.MAP,
|
|
_('Launch configuration.'),
|
|
schema={
|
|
LAUNCH_CONFIG_ARGS: properties.Schema(
|
|
properties.Schema.MAP,
|
|
_('Type-specific launch arguments.'),
|
|
schema=_launch_configuration_args_schema,
|
|
required=True
|
|
),
|
|
LAUNCH_CONFIG_TYPE: properties.Schema(
|
|
properties.Schema.STRING,
|
|
_('Launch configuration method. Only launch_server and '
|
|
'launch_stack are currently supported.'),
|
|
required=True,
|
|
constraints=[
|
|
constraints.AllowedValues(['launch_server',
|
|
'launch_stack']),
|
|
]
|
|
),
|
|
},
|
|
required=True,
|
|
update_allowed=True
|
|
),
|
|
# We don't allow scaling policies to be specified here, despite the
|
|
# fact that the API supports it. Users should use the ScalingPolicy
|
|
# resource.
|
|
}
|
|
|
|
def _get_group_config_args(self, groupconf):
|
|
"""Get the groupConfiguration-related pyrax arguments."""
|
|
return dict(
|
|
name=groupconf[self.GROUP_CONFIGURATION_NAME],
|
|
cooldown=groupconf[self.GROUP_CONFIGURATION_COOLDOWN],
|
|
min_entities=groupconf[self.GROUP_CONFIGURATION_MIN_ENTITIES],
|
|
max_entities=groupconf[self.GROUP_CONFIGURATION_MAX_ENTITIES],
|
|
metadata=groupconf.get(self.GROUP_CONFIGURATION_METADATA, None))
|
|
|
|
def _get_launch_config_server_args(self, launchconf):
|
|
lcargs = launchconf[self.LAUNCH_CONFIG_ARGS]
|
|
server_args = lcargs[self.LAUNCH_CONFIG_ARGS_SERVER]
|
|
lb_args = lcargs.get(self.LAUNCH_CONFIG_ARGS_LOAD_BALANCERS)
|
|
lbs = copy.deepcopy(lb_args)
|
|
for lb in lbs:
|
|
# if the port is not specified, the lbid must be that of a
|
|
# RackConnectV3 lb pool.
|
|
if not lb[self.LAUNCH_CONFIG_ARGS_LOAD_BALANCER_PORT]:
|
|
del lb[self.LAUNCH_CONFIG_ARGS_LOAD_BALANCER_PORT]
|
|
continue
|
|
lbid = int(lb[self.LAUNCH_CONFIG_ARGS_LOAD_BALANCER_ID])
|
|
lb[self.LAUNCH_CONFIG_ARGS_LOAD_BALANCER_ID] = lbid
|
|
personality = server_args.get(
|
|
self.LAUNCH_CONFIG_ARGS_SERVER_PERSONALITY)
|
|
if personality:
|
|
personality = [{'path': k, 'contents': v} for k, v in
|
|
personality.items()]
|
|
user_data = server_args.get(self.LAUNCH_CONFIG_ARGS_SERVER_USER_DATA)
|
|
cdrive = (server_args.get(self.LAUNCH_CONFIG_ARGS_SERVER_CDRIVE) or
|
|
bool(user_data is not None and len(user_data.strip())))
|
|
image_id = self.client_plugin('glance').find_image_by_name_or_id(
|
|
server_args[self.LAUNCH_CONFIG_ARGS_SERVER_IMAGE_REF])
|
|
flavor_id = self.client_plugin('nova').find_flavor_by_name_or_id(
|
|
server_args[self.LAUNCH_CONFIG_ARGS_SERVER_FLAVOR_REF])
|
|
|
|
return dict(
|
|
launch_config_type=launchconf[self.LAUNCH_CONFIG_TYPE],
|
|
server_name=server_args[self.GROUP_CONFIGURATION_NAME],
|
|
image=image_id,
|
|
flavor=flavor_id,
|
|
disk_config=server_args.get(
|
|
self.LAUNCH_CONFIG_ARGS_SERVER_DISK_CONFIG),
|
|
metadata=server_args.get(self.GROUP_CONFIGURATION_METADATA),
|
|
config_drive=cdrive,
|
|
user_data=user_data,
|
|
personality=personality,
|
|
networks=server_args.get(self.LAUNCH_CONFIG_ARGS_SERVER_NETWORKS),
|
|
load_balancers=lbs,
|
|
key_name=server_args.get(self.LAUNCH_CONFIG_ARGS_SERVER_KEY_NAME),
|
|
)
|
|
|
|
def _get_launch_config_stack_args(self, launchconf):
|
|
lcargs = launchconf[self.LAUNCH_CONFIG_ARGS]
|
|
stack_args = lcargs[self.LAUNCH_CONFIG_ARGS_STACK]
|
|
return dict(
|
|
launch_config_type=launchconf[self.LAUNCH_CONFIG_TYPE],
|
|
template=stack_args[self.LAUNCH_CONFIG_ARGS_STACK_TEMPLATE],
|
|
template_url=stack_args[
|
|
self.LAUNCH_CONFIG_ARGS_STACK_TEMPLATE_URL],
|
|
disable_rollback=stack_args[
|
|
self.LAUNCH_CONFIG_ARGS_STACK_DISABLE_ROLLBACK],
|
|
environment=stack_args[self.LAUNCH_CONFIG_ARGS_STACK_ENVIRONMENT],
|
|
files=stack_args[self.LAUNCH_CONFIG_ARGS_STACK_FILES],
|
|
parameters=stack_args[self.LAUNCH_CONFIG_ARGS_STACK_PARAMETERS],
|
|
timeout_mins=stack_args[self.LAUNCH_CONFIG_ARGS_STACK_TIMEOUT_MINS]
|
|
)
|
|
|
|
def _get_launch_config_args(self, launchconf):
|
|
"""Get the launchConfiguration-related pyrax arguments."""
|
|
if launchconf[self.LAUNCH_CONFIG_ARGS].get(
|
|
self.LAUNCH_CONFIG_ARGS_SERVER):
|
|
return self._get_launch_config_server_args(launchconf)
|
|
else:
|
|
return self._get_launch_config_stack_args(launchconf)
|
|
|
|
def _get_create_args(self):
|
|
"""Get pyrax-style arguments for creating a scaling group."""
|
|
args = self._get_group_config_args(
|
|
self.properties[self.GROUP_CONFIGURATION])
|
|
args['group_metadata'] = args.pop('metadata')
|
|
args.update(self._get_launch_config_args(
|
|
self.properties[self.LAUNCH_CONFIGURATION]))
|
|
return args
|
|
|
|
def handle_create(self):
|
|
"""Create the autoscaling group and set resource_id.
|
|
|
|
The resource_id is set to the resulting group's ID.
|
|
"""
|
|
asclient = self.auto_scale()
|
|
group = asclient.create(**self._get_create_args())
|
|
self.resource_id_set(str(group.id))
|
|
|
|
def handle_check(self):
|
|
self.auto_scale().get(self.resource_id)
|
|
|
|
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
|
|
"""Update the group configuration and the launch configuration."""
|
|
asclient = self.auto_scale()
|
|
if self.GROUP_CONFIGURATION in prop_diff:
|
|
args = self._get_group_config_args(
|
|
prop_diff[self.GROUP_CONFIGURATION])
|
|
asclient.replace(self.resource_id, **args)
|
|
if self.LAUNCH_CONFIGURATION in prop_diff:
|
|
args = self._get_launch_config_args(
|
|
prop_diff[self.LAUNCH_CONFIGURATION])
|
|
asclient.replace_launch_config(self.resource_id, **args)
|
|
|
|
def handle_delete(self):
|
|
"""Delete the scaling group.
|
|
|
|
Since Auto Scale doesn't allow deleting a group until all its servers
|
|
are gone, we must set the minEntities and maxEntities of the group to 0
|
|
and then keep trying the delete until Auto Scale has deleted all the
|
|
servers and the delete will succeed.
|
|
"""
|
|
if self.resource_id is None:
|
|
return
|
|
asclient = self.auto_scale()
|
|
args = self._get_group_config_args(
|
|
self.properties[self.GROUP_CONFIGURATION])
|
|
args['min_entities'] = 0
|
|
args['max_entities'] = 0
|
|
try:
|
|
asclient.replace(self.resource_id, **args)
|
|
except NotFound:
|
|
pass
|
|
|
|
def check_delete_complete(self, result):
|
|
"""Try the delete operation until it succeeds."""
|
|
if self.resource_id is None:
|
|
return True
|
|
try:
|
|
self.auto_scale().delete(self.resource_id)
|
|
except Forbidden:
|
|
return False
|
|
except NotFound:
|
|
return True
|
|
else:
|
|
return True
|
|
|
|
def _check_rackconnect_v3_pool_exists(self, pool_id):
|
|
pools = self.client("rackconnect").list_load_balancer_pools()
|
|
if pool_id in (p.id for p in pools):
|
|
return True
|
|
return False
|
|
|
|
def validate(self):
|
|
super(Group, self).validate()
|
|
launchconf = self.properties[self.LAUNCH_CONFIGURATION]
|
|
lcargs = launchconf[self.LAUNCH_CONFIG_ARGS]
|
|
|
|
server_args = lcargs.get(self.LAUNCH_CONFIG_ARGS_SERVER)
|
|
st_args = lcargs.get(self.LAUNCH_CONFIG_ARGS_STACK)
|
|
|
|
# launch_server and launch_stack are required and mutually exclusive.
|
|
if ((not server_args and not st_args) or
|
|
(server_args and st_args)):
|
|
msg = (_('Must provide one of %(server)s or %(stack)s in %(conf)s')
|
|
% {'server': self.LAUNCH_CONFIG_ARGS_SERVER,
|
|
'stack': self.LAUNCH_CONFIG_ARGS_STACK,
|
|
'conf': self.LAUNCH_CONFIGURATION})
|
|
raise exception.StackValidationFailed(msg)
|
|
|
|
lb_args = lcargs.get(self.LAUNCH_CONFIG_ARGS_LOAD_BALANCERS)
|
|
lbs = copy.deepcopy(lb_args)
|
|
for lb in lbs:
|
|
lb_port = lb.get(self.LAUNCH_CONFIG_ARGS_LOAD_BALANCER_PORT)
|
|
lb_id = lb[self.LAUNCH_CONFIG_ARGS_LOAD_BALANCER_ID]
|
|
if not lb_port:
|
|
# check if lb id is a valid RCV3 pool id
|
|
if not self._check_rackconnect_v3_pool_exists(lb_id):
|
|
msg = _('Could not find RackConnectV3 pool '
|
|
'with id %s') % (lb_id)
|
|
raise exception.StackValidationFailed(msg)
|
|
|
|
if st_args:
|
|
st_tmpl = st_args.get(self.LAUNCH_CONFIG_ARGS_STACK_TEMPLATE)
|
|
st_tmpl_url = st_args.get(
|
|
self.LAUNCH_CONFIG_ARGS_STACK_TEMPLATE_URL)
|
|
st_env = st_args.get(self.LAUNCH_CONFIG_ARGS_STACK_ENVIRONMENT)
|
|
# template and template_url are required and mutually exclusive.
|
|
if ((not st_tmpl and not st_tmpl_url) or
|
|
(st_tmpl and st_tmpl_url)):
|
|
msg = _('Must provide one of template or template_url.')
|
|
raise exception.StackValidationFailed(msg)
|
|
|
|
if st_tmpl:
|
|
st_files = st_args.get(self.LAUNCH_CONFIG_ARGS_STACK_FILES)
|
|
try:
|
|
tmpl = template_format.simple_parse(st_tmpl)
|
|
templatem.Template(tmpl, files=st_files, env=st_env)
|
|
except Exception as exc:
|
|
msg = (_('Encountered error while loading template: %s') %
|
|
six.text_type(exc))
|
|
raise exception.StackValidationFailed(msg)
|
|
|
|
def auto_scale(self):
|
|
return self.client('auto_scale')
|
|
|
|
|
|
class ScalingPolicy(resource.Resource):
|
|
"""Represents a Rackspace Auto Scale scaling policy."""
|
|
|
|
support_status = support.SupportStatus(
|
|
status=support.UNSUPPORTED,
|
|
message=_('This resource is not supported, use at your own risk.'))
|
|
|
|
PROPERTIES = (
|
|
GROUP, NAME, CHANGE, CHANGE_PERCENT, DESIRED_CAPACITY,
|
|
COOLDOWN, TYPE, ARGS,
|
|
) = (
|
|
'group', 'name', 'change', 'changePercent', 'desiredCapacity',
|
|
'cooldown', 'type', 'args',
|
|
)
|
|
|
|
properties_schema = {
|
|
# group isn't in the post body, but it's in the URL to post to.
|
|
GROUP: properties.Schema(
|
|
properties.Schema.STRING,
|
|
_('Scaling group ID that this policy belongs to.'),
|
|
required=True
|
|
),
|
|
NAME: properties.Schema(
|
|
properties.Schema.STRING,
|
|
_('Name of this scaling policy.'),
|
|
required=True,
|
|
update_allowed=True
|
|
),
|
|
CHANGE: properties.Schema(
|
|
properties.Schema.INTEGER,
|
|
_('Amount to add to or remove from current number of instances. '
|
|
'Incompatible with changePercent and desiredCapacity.'),
|
|
update_allowed=True
|
|
),
|
|
CHANGE_PERCENT: properties.Schema(
|
|
properties.Schema.NUMBER,
|
|
_('Percentage-based change to add or remove from current number '
|
|
'of instances. Incompatible with change and desiredCapacity.'),
|
|
update_allowed=True
|
|
),
|
|
DESIRED_CAPACITY: properties.Schema(
|
|
properties.Schema.INTEGER,
|
|
_('Absolute number to set the number of instances to. '
|
|
'Incompatible with change and changePercent.'),
|
|
update_allowed=True
|
|
),
|
|
COOLDOWN: properties.Schema(
|
|
properties.Schema.NUMBER,
|
|
_('Number of seconds after a policy execution during which '
|
|
'further executions are disabled.'),
|
|
update_allowed=True
|
|
),
|
|
TYPE: properties.Schema(
|
|
properties.Schema.STRING,
|
|
_('Type of this scaling policy. Specifies how the policy is '
|
|
'executed.'),
|
|
required=True,
|
|
constraints=[
|
|
constraints.AllowedValues(['webhook', 'schedule',
|
|
'cloud_monitoring']),
|
|
],
|
|
update_allowed=True
|
|
),
|
|
ARGS: properties.Schema(
|
|
properties.Schema.MAP,
|
|
_('Type-specific arguments for the policy.'),
|
|
update_allowed=True
|
|
),
|
|
}
|
|
|
|
def _get_args(self, properties):
|
|
"""Get pyrax-style create arguments for scaling policies."""
|
|
args = dict(
|
|
scaling_group=properties[self.GROUP],
|
|
name=properties[self.NAME],
|
|
policy_type=properties[self.TYPE],
|
|
cooldown=properties[self.COOLDOWN],
|
|
)
|
|
if properties.get(self.CHANGE) is not None:
|
|
args['change'] = properties[self.CHANGE]
|
|
elif properties.get(self.CHANGE_PERCENT) is not None:
|
|
args['change'] = properties[self.CHANGE_PERCENT]
|
|
args['is_percent'] = True
|
|
elif properties.get(self.DESIRED_CAPACITY) is not None:
|
|
args['desired_capacity'] = properties[self.DESIRED_CAPACITY]
|
|
if properties.get(self.ARGS) is not None:
|
|
args['args'] = properties[self.ARGS]
|
|
return args
|
|
|
|
def handle_create(self):
|
|
"""Create the scaling policy and initialize the resource ID.
|
|
|
|
The resource ID is initialized to {group_id}:{policy_id}.
|
|
"""
|
|
asclient = self.auto_scale()
|
|
args = self._get_args(self.properties)
|
|
policy = asclient.add_policy(**args)
|
|
resource_id = '%s:%s' % (self.properties[self.GROUP], policy.id)
|
|
self.resource_id_set(resource_id)
|
|
|
|
def _get_policy_id(self):
|
|
return self.resource_id.split(':', 1)[1]
|
|
|
|
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
|
|
asclient = self.auto_scale()
|
|
props = json_snippet.properties(self.properties_schema,
|
|
self.context)
|
|
args = self._get_args(props)
|
|
args['policy'] = self._get_policy_id()
|
|
asclient.replace_policy(**args)
|
|
|
|
def handle_delete(self):
|
|
"""Delete the policy if it exists."""
|
|
asclient = self.auto_scale()
|
|
if self.resource_id is None:
|
|
return
|
|
policy_id = self._get_policy_id()
|
|
try:
|
|
asclient.delete_policy(self.properties[self.GROUP], policy_id)
|
|
except NotFound:
|
|
pass
|
|
|
|
def auto_scale(self):
|
|
return self.client('auto_scale')
|
|
|
|
|
|
class WebHook(resource.Resource):
|
|
"""Represents a Rackspace AutoScale webhook.
|
|
|
|
Exposes the URLs of the webhook as attributes.
|
|
"""
|
|
|
|
support_status = support.SupportStatus(
|
|
status=support.UNSUPPORTED,
|
|
message=_('This resource is not supported, use at your own risk.'))
|
|
|
|
PROPERTIES = (
|
|
POLICY, NAME, METADATA,
|
|
) = (
|
|
'policy', 'name', 'metadata',
|
|
)
|
|
|
|
ATTRIBUTES = (
|
|
EXECUTE_URL, CAPABILITY_URL,
|
|
) = (
|
|
'executeUrl', 'capabilityUrl',
|
|
)
|
|
|
|
properties_schema = {
|
|
POLICY: properties.Schema(
|
|
properties.Schema.STRING,
|
|
_('The policy that this webhook should apply to, in '
|
|
'{group_id}:{policy_id} format. Generally a Ref to a Policy '
|
|
'resource.'),
|
|
required=True
|
|
),
|
|
NAME: properties.Schema(
|
|
properties.Schema.STRING,
|
|
_('The name of this webhook.'),
|
|
required=True,
|
|
update_allowed=True
|
|
),
|
|
METADATA: properties.Schema(
|
|
properties.Schema.MAP,
|
|
_('Arbitrary key/value metadata for this webhook.'),
|
|
update_allowed=True
|
|
),
|
|
}
|
|
|
|
attributes_schema = {
|
|
EXECUTE_URL: attributes.Schema(
|
|
_("The url for executing the webhook (requires auth)."),
|
|
cache_mode=attributes.Schema.CACHE_NONE
|
|
),
|
|
CAPABILITY_URL: attributes.Schema(
|
|
_("The url for executing the webhook (doesn't require auth)."),
|
|
cache_mode=attributes.Schema.CACHE_NONE
|
|
),
|
|
}
|
|
|
|
def _get_args(self, props):
|
|
group_id, policy_id = props[self.POLICY].split(':', 1)
|
|
return dict(
|
|
name=props[self.NAME],
|
|
scaling_group=group_id,
|
|
policy=policy_id,
|
|
metadata=props.get(self.METADATA))
|
|
|
|
def handle_create(self):
|
|
asclient = self.auto_scale()
|
|
args = self._get_args(self.properties)
|
|
webhook = asclient.add_webhook(**args)
|
|
self.resource_id_set(webhook.id)
|
|
|
|
for link in webhook.links:
|
|
rel_to_key = {'self': 'executeUrl',
|
|
'capability': 'capabilityUrl'}
|
|
key = rel_to_key.get(link['rel'])
|
|
if key is not None:
|
|
url = link['href'].encode('utf-8')
|
|
self.data_set(key, url)
|
|
|
|
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
|
|
asclient = self.auto_scale()
|
|
args = self._get_args(json_snippet['Properties'])
|
|
args['webhook'] = self.resource_id
|
|
asclient.replace_webhook(**args)
|
|
|
|
def _resolve_attribute(self, key):
|
|
v = self.data().get(key)
|
|
if v is not None:
|
|
return v.decode('utf-8')
|
|
else:
|
|
return None
|
|
|
|
def handle_delete(self):
|
|
if self.resource_id is None:
|
|
return
|
|
asclient = self.auto_scale()
|
|
group_id, policy_id = self.properties[self.POLICY].split(':', 1)
|
|
try:
|
|
asclient.delete_webhook(group_id, policy_id, self.resource_id)
|
|
except NotFound:
|
|
pass
|
|
|
|
def auto_scale(self):
|
|
return self.client('auto_scale')
|
|
|
|
|
|
def resource_mapping():
|
|
return {
|
|
'Rackspace::AutoScale::Group': Group,
|
|
'Rackspace::AutoScale::ScalingPolicy': ScalingPolicy,
|
|
'Rackspace::AutoScale::WebHook': WebHook
|
|
}
|
|
|
|
|
|
def available_resource_mapping():
|
|
if PYRAX_INSTALLED:
|
|
return resource_mapping()
|
|
return {}
|