Merge "rackspace: Convert Scaling resources to new Schema format"

This commit is contained in:
Jenkins
2013-12-18 21:53:01 +00:00
committed by Gerrit Code Review

View File

@@ -18,6 +18,8 @@ Resources for Rackspace Auto Scale.
import copy
from heat.engine import constraints
from heat.engine import properties
from heat.engine import resource
from heat.db.sqlalchemy import api as db_api
from heat.common import exception
@@ -45,135 +47,209 @@ class Group(resource.Resource):
# the true API here, but since pyrax doesn't support the full flexibility
# of the API, we'll have to restrict what users can provide.
network_schema = {
'uuid': {
'Type': 'String',
'Required': True,
'Description': _("UUID of network to attach to.")}}
server_args_schema = {
'name': {
'Type': 'String',
'Required': True,
'Description': _("Server name.")},
'flavorRef': {
'Type': 'String',
'Required': True,
'Description': _("Flavor ID.")},
'imageRef': {
'Type': 'String',
'Required': True,
'Description': _("Image ID.")},
'metadata': {
'Type': 'Map',
'Description': _("Metadata key and value pairs.")},
'personality': {
'Type': 'Map',
'Description': _("File path and contents.")},
'networks': {
'Type': 'Map',
'Schema': network_schema,
'Description': _(
"Networks to attach to. If unspecified, the instance will be "
"attached to the public Internet and private ServiceNet "
"networks.")},
# technically maps to OS-DCF:diskConfig
'diskConfig': {
'Type': 'String',
'AllowedValues': ['AUTO', 'MANUAL'],
'Description': _(
"Configuration specifying the partition layout. "
"AUTO to create a partition utilizing the entire disk, and "
"MANUAL to create a partition matching the source image.")},
'key_name': {
'Type': 'String',
'Description': _(
"Name of a previously created SSH keypair to allow key-based "
"authentication to the server."),
}
}
load_balancers_schema = {
'loadBalancerId': {
'Type': 'String',
'Required': True,
'Description': _("ID of the load balancer.")},
'port': {
'Type': 'Number',
'Required': True,
'Description': _("Server port to connect the load balancer to.")}
}
launch_config_args_schema = {
'server': {
'Type': 'Map',
'Required': True,
'Description': _("Server creation arguments, as accepted by the "
"Cloud Servers server creation API."),
'Schema': server_args_schema},
'loadBalancers': {
'Type': 'List',
'Required': False,
'Description': _(
"List of load balancers to hook the server up to. If not "
"specified, no load balancing will be configured."),
'Schema': {'Type': 'Map', 'Schema': load_balancers_schema}},
}
launch_configuration_schema = {
'type': {
'Type': 'String',
'Required': True,
'AllowedValues': ['launch_server'],
'Description': _(
"Launch configuration method. "
"Only launch_server is currently supported.")},
'args': {
'Type': 'Map',
'Required': True,
'Schema': launch_config_args_schema,
'Description': _("Type-specific server launching arguments.")},
}
group_configuration_schema = {
'name': {
'Type': 'String',
'Required': True,
'Description': _("Name of the scaling group.")},
'cooldown': {
'Type': 'Number',
'Required': True,
'Description': _(
"Number of seconds after capacity changes during which "
"further capacity changes are disabled.")},
'minEntities': {
'Type': 'Number',
'Required': True,
'Description': _(
"Minimum number of entities in this scaling group.")},
'maxEntities': {
'Type': 'Number',
'Required': True,
'Description': _(
"Maximum number of entities in this scaling group.")},
'metadata': {
'Type': 'Map',
'Description': _(
"Arbitrary key/value metadata to associate with this group.")},
}
# properties are identical to the API POST /groups.
PROPERTIES = (
GROUP_CONFIGURATION, LAUNCH_CONFIGURATION,
) = (
'groupConfiguration', 'launchConfiguration',
)
_GROUP_CONFIGURATION_KEYS = (
GROUP_CONFIGURATION_MAX_ENTITIES, GROUP_CONFIGURATION_COOLDOWN,
GROUP_CONFIGURATION_NAME, GROUP_CONFIGURATION_MIN_ENTITIES,
GROUP_CONFIGURATION_METADATA,
) = (
'maxEntities', 'cooldown',
'name', 'minEntities',
'metadata',
)
_LAUNCH_CONFIG_KEYS = (
LAUNCH_CONFIG_ARGS, LAUNCH_CONFIG_TYPE,
) = (
'args', 'type',
)
_LAUNCH_CONFIG_ARGS_KEYS = (
LAUNCH_CONFIG_ARGS_LOAD_BALANCERS,
LAUNCH_CONFIG_ARGS_SERVER,
) = (
'loadBalancers',
'server',
)
_LAUNCH_CONFIG_ARGS_LOAD_BALANCER_KEYS = (
LAUNCH_CONFIG_ARGS_LOAD_BALANCER_ID,
LAUNCH_CONFIG_ARGS_LOAD_BALANCER_PORT,
) = (
'loadBalancerId',
'port',
)
_LAUNCH_CONFIG_ARGS_SERVER_KEYS = (
LAUNCH_CONFIG_ARGS_SERVER_NAME, LAUNCH_CONFIG_ARGS_SERVER_FLAVOR_REF,
LAUNCH_CONFIG_ARGS_SERVER_IMAGE_REF,
LAUNCH_CONFIG_ARGS_SERVER_METADATA,
LAUNCH_CONFIG_ARGS_SERVER_PERSONALITY,
LAUNCH_CONFIG_ARGS_SERVER_NETWORKS,
LAUNCH_CONFIG_ARGS_SERVER_DISK_CONFIG,
LAUNCH_CONFIG_ARGS_SERVER_KEY_NAME,
) = (
'name', 'flavorRef',
'imageRef',
'metadata',
'personality',
'networks',
'diskConfig', # technically maps to OS-DCF:diskConfig
'key_name',
)
_LAUNCH_CONFIG_ARGS_SERVER_NETWORKS_KEYS = (
LAUNCH_CONFIG_ARGS_SERVER_NETWORKS_UUID,
) = (
'uuid',
)
_launch_configuration_args_schema = {
LAUNCH_CONFIG_ARGS_LOAD_BALANCERS: properties.Schema(
properties.Schema.LIST,
_('List of load balancers to hook the '
'server up to. If not specified, no '
'load balancing will be configured.'),
schema=properties.Schema(
properties.Schema.MAP,
schema={
LAUNCH_CONFIG_ARGS_LOAD_BALANCER_ID: properties.Schema(
properties.Schema.STRING,
_('ID of the load balancer.'),
required=True
),
LAUNCH_CONFIG_ARGS_LOAD_BALANCER_PORT: properties.Schema(
properties.Schema.NUMBER,
_('Server port to connect the load balancer to.'),
required=True
),
},
)
),
LAUNCH_CONFIG_ARGS_SERVER: properties.Schema(
properties.Schema.MAP,
_('Server creation arguments, as accepted by the Cloud Servers '
'server creation API.'),
schema={
LAUNCH_CONFIG_ARGS_SERVER_NAME: properties.Schema(
properties.Schema.STRING,
_('Server name.'),
required=True
),
LAUNCH_CONFIG_ARGS_SERVER_FLAVOR_REF: properties.Schema(
properties.Schema.STRING,
_('Flavor ID.'),
required=True
),
LAUNCH_CONFIG_ARGS_SERVER_IMAGE_REF: properties.Schema(
properties.Schema.STRING,
_('Image ID.'),
required=True
),
LAUNCH_CONFIG_ARGS_SERVER_METADATA: properties.Schema(
properties.Schema.MAP,
_('Metadata key and value pairs.')
),
LAUNCH_CONFIG_ARGS_SERVER_PERSONALITY: properties.Schema(
properties.Schema.MAP,
_('File path and contents.')
),
LAUNCH_CONFIG_ARGS_SERVER_NETWORKS: properties.Schema(
properties.Schema.MAP,
_('Networks to attach to. If unspecified, the instance '
'will be attached to the public Internet and private '
'ServiceNet networks.'),
schema={
LAUNCH_CONFIG_ARGS_SERVER_NETWORKS_UUID:
properties.Schema(
properties.Schema.STRING,
_('UUID of network to attach to.'),
required=True
),
}
),
LAUNCH_CONFIG_ARGS_SERVER_DISK_CONFIG: properties.Schema(
properties.Schema.STRING,
_('Configuration specifying the partition layout. AUTO to '
'create a partition utilizing the entire disk, and '
'MANUAL to create a partition matching the source '
'image.'),
constraints=[
constraints.AllowedValues(['AUTO', 'MANUAL']),
]
),
LAUNCH_CONFIG_ARGS_SERVER_KEY_NAME: properties.Schema(
properties.Schema.STRING,
_('Name of a previously created SSH keypair to allow '
'key-based authentication to the server.')
),
},
required=True
),
}
properties_schema = {
'groupConfiguration': {
'Type': 'Map',
'Required': True,
'Schema': group_configuration_schema,
'Description': _("Group configuration.")},
'launchConfiguration': {
'Type': 'Map',
'Required': True,
'Schema': launch_configuration_schema,
'Description': _("Launch configuration.")},
GROUP_CONFIGURATION: properties.Schema(
properties.Schema.MAP,
_('Group configuration.'),
schema={
GROUP_CONFIGURATION_MAX_ENTITIES: properties.Schema(
properties.Schema.NUMBER,
_('Maximum number of entities in this scaling group.'),
required=True
),
GROUP_CONFIGURATION_COOLDOWN: properties.Schema(
properties.Schema.NUMBER,
_('Number of seconds after capacity changes during '
'which further capacity changes are disabled.'),
required=True
),
GROUP_CONFIGURATION_NAME: properties.Schema(
properties.Schema.STRING,
_('Name of the scaling group.'),
required=True
),
GROUP_CONFIGURATION_MIN_ENTITIES: properties.Schema(
properties.Schema.NUMBER,
_('Minimum number of entities in this scaling group.'),
required=True
),
GROUP_CONFIGURATION_METADATA: properties.Schema(
properties.Schema.MAP,
_('Arbitrary key/value metadata to associate with '
'this group.')
),
},
required=True
),
LAUNCH_CONFIGURATION: properties.Schema(
properties.Schema.MAP,
_('Launch configuration.'),
schema={
LAUNCH_CONFIG_ARGS: properties.Schema(
properties.Schema.MAP,
_('Type-specific server launching arguments.'),
schema=_launch_configuration_args_schema,
required=True
),
LAUNCH_CONFIG_TYPE: properties.Schema(
properties.Schema.STRING,
_('Launch configuration method. Only launch_server '
'is currently supported.'),
required=True,
constraints=[
constraints.AllowedValues(['launch_server']),
]
),
},
required=True
),
# We don't allow scaling policies to be specified here, despite the
# fact that the API supports it. Users should use the ScalingPolicy
# resource.
@@ -181,45 +257,49 @@ class Group(resource.Resource):
update_allowed_keys = ('Properties',)
# Everything can be changed.
update_allowed_properties = ('groupConfiguration', 'launchConfiguration')
update_allowed_properties = (GROUP_CONFIGURATION, LAUNCH_CONFIGURATION)
def _get_group_config_args(self, groupconf):
"""Get the groupConfiguration-related pyrax arguments."""
return dict(
name=groupconf['name'],
cooldown=groupconf['cooldown'],
min_entities=groupconf['minEntities'],
max_entities=groupconf['maxEntities'],
metadata=groupconf.get('metadata', None))
name=groupconf[self.GROUP_CONFIGURATION_NAME],
cooldown=groupconf[self.GROUP_CONFIGURATION_COOLDOWN],
min_entities=groupconf[self.GROUP_CONFIGURATION_MIN_ENTITIES],
max_entities=groupconf[self.GROUP_CONFIGURATION_MAX_ENTITIES],
metadata=groupconf.get(self.GROUP_CONFIGURATION_METADATA, None))
def _get_launch_config_args(self, launchconf):
"""Get the launchConfiguration-related pyrax arguments."""
lcargs = launchconf['args']
server_args = lcargs['server']
lbs = copy.deepcopy(lcargs.get('loadBalancers'))
lcargs = launchconf[self.LAUNCH_CONFIG_ARGS]
server_args = lcargs[self.LAUNCH_CONFIG_ARGS_SERVER]
lb_args = lcargs.get(self.LAUNCH_CONFIG_ARGS_LOAD_BALANCERS)
lbs = copy.deepcopy(lb_args)
if lbs:
for lb in lbs:
lb['loadBalancerId'] = int(lb['loadBalancerId'])
lbid = int(lb[self.LAUNCH_CONFIG_ARGS_LOAD_BALANCER_ID])
lb[self.LAUNCH_CONFIG_ARGS_LOAD_BALANCER_ID] = lbid
return dict(
launch_config_type=launchconf['type'],
server_name=server_args['name'],
image=server_args['imageRef'],
flavor=server_args['flavorRef'],
disk_config=server_args.get('diskConfig'),
metadata=server_args.get('metadata'),
personality=server_args.get('personality'),
networks=server_args.get('networks'),
launch_config_type=launchconf[self.LAUNCH_CONFIG_TYPE],
server_name=server_args[self.GROUP_CONFIGURATION_NAME],
image=server_args[self.LAUNCH_CONFIG_ARGS_SERVER_IMAGE_REF],
flavor=server_args[self.LAUNCH_CONFIG_ARGS_SERVER_FLAVOR_REF],
disk_config=server_args.get(
self.LAUNCH_CONFIG_ARGS_SERVER_DISK_CONFIG),
metadata=server_args.get(self.GROUP_CONFIGURATION_METADATA),
personality=server_args.get(
self.LAUNCH_CONFIG_ARGS_SERVER_PERSONALITY),
networks=server_args.get(self.LAUNCH_CONFIG_ARGS_SERVER_NETWORKS),
load_balancers=lbs,
key_name=server_args.get('key_name'),
key_name=server_args.get(self.LAUNCH_CONFIG_ARGS_SERVER_KEY_NAME),
)
def _get_create_args(self):
"""Get pyrax-style arguments for creating a scaling group."""
args = self._get_group_config_args(
self.properties['groupConfiguration'])
self.properties[self.GROUP_CONFIGURATION])
args['group_metadata'] = args.pop('metadata')
args.update(self._get_launch_config_args(
self.properties['launchConfiguration']))
self.properties[self.LAUNCH_CONFIGURATION]))
return args
def handle_create(self):
@@ -236,12 +316,13 @@ class Group(resource.Resource):
Update the group configuration and the launch configuration.
"""
asclient = self.stack.clients.auto_scale()
if 'groupConfiguration' in prop_diff:
args = self._get_group_config_args(prop_diff['groupConfiguration'])
if self.GROUP_CONFIGURATION in prop_diff:
args = self._get_group_config_args(
prop_diff[self.GROUP_CONFIGURATION])
asclient.replace(self.resource_id, **args)
if 'launchConfiguration' in prop_diff:
if self.LAUNCH_CONFIGURATION in prop_diff:
args = self._get_launch_config_args(
prop_diff['launchConfiguration'])
prop_diff[self.LAUNCH_CONFIGURATION])
asclient.replace_launch_config(self.resource_id, **args)
def handle_delete(self):
@@ -257,7 +338,7 @@ class Group(resource.Resource):
return
asclient = self.stack.clients.auto_scale()
args = self._get_group_config_args(
self.properties['groupConfiguration'])
self.properties[self.GROUP_CONFIGURATION])
args['min_entities'] = 0
args['max_entities'] = 0
try:
@@ -281,78 +362,86 @@ class Group(resource.Resource):
class ScalingPolicy(resource.Resource):
"""Represents a Rackspace Auto Scale scaling policy."""
PROPERTIES = (
GROUP, NAME, CHANGE, CHANGE_PERCENT, DESIRED_CAPACITY,
COOLDOWN, TYPE, ARGS,
) = (
'group', 'name', 'change', 'changePercent', 'desiredCapacity',
'cooldown', 'type', 'args',
)
properties_schema = {
# group isn't in the post body, but it's in the URL to post to.
'group': {
'Type': 'String',
'Required': True,
'Description': _("Scaling group ID that this policy "
"belongs to.")},
'name': {
'Type': 'String',
'Required': True,
'Description': _("Name of this scaling policy.")},
'change': {
'Type': 'Number',
'Required': False,
'Description': _(
"Amount to add to or remove from current number of instances. "
"Incompatible with changePercent and desiredCapacity.")},
'changePercent': {
'Type': 'Number',
'Required': False,
'Description': _(
"Percentage-based change to add or remove from current number "
"of instances. Incompatible with change and desiredCapacity.")
},
'desiredCapacity': {
'Type': 'Number',
'Required': False,
'Description': _(
"Absolute number to set the number of instances to. "
"Incompatible with change and changePercent.")},
'cooldown': {
'Type': 'Number',
'Required': False,
'Description': _(
"Number of seconds after a policy execution during which "
"further executions are disabled.")},
'type': {
'Type': 'String',
'Required': True,
'AllowedValues': ['webhook', 'schedule', 'cloud_monitoring'],
'Description': _(
"Type of this scaling policy. Specifies how the policy is "
"executed.")},
'args': {
'Type': 'Map',
'Required': False,
'Description': _("Type-specific arguments for the policy.")},
GROUP: properties.Schema(
properties.Schema.STRING,
_('Scaling group ID that this policy belongs to.'),
required=True
),
NAME: properties.Schema(
properties.Schema.STRING,
_('Name of this scaling policy.'),
required=True
),
CHANGE: properties.Schema(
properties.Schema.NUMBER,
_('Amount to add to or remove from current number of instances. '
'Incompatible with changePercent and desiredCapacity.')
),
CHANGE_PERCENT: properties.Schema(
properties.Schema.NUMBER,
_('Percentage-based change to add or remove from current number '
'of instances. Incompatible with change and desiredCapacity.')
),
DESIRED_CAPACITY: properties.Schema(
properties.Schema.NUMBER,
_('Absolute number to set the number of instances to. '
'Incompatible with change and changePercent.')
),
COOLDOWN: properties.Schema(
properties.Schema.NUMBER,
_('Number of seconds after a policy execution during which '
'further executions are disabled.')
),
TYPE: properties.Schema(
properties.Schema.STRING,
_('Type of this scaling policy. Specifies how the policy is '
'executed.'),
required=True,
constraints=[
constraints.AllowedValues(['webhook', 'schedule',
'cloud_monitoring']),
]
),
ARGS: properties.Schema(
properties.Schema.MAP,
_('Type-specific arguments for the policy.')
),
}
update_allowed_keys = ('Properties',)
# Everything other than group can be changed.
update_allowed_properties = (
'name', 'change', 'changePercent', 'desiredCapacity', 'cooldown',
'type', 'args')
NAME, CHANGE, CHANGE_PERCENT, DESIRED_CAPACITY, COOLDOWN, TYPE, ARGS,
)
def _get_args(self, properties):
"""Get pyrax-style create arguments for scaling policies."""
args = dict(
scaling_group=properties['group'],
name=properties['name'],
policy_type=properties['type'],
cooldown=properties['cooldown'],
scaling_group=properties[self.GROUP],
name=properties[self.NAME],
policy_type=properties[self.TYPE],
cooldown=properties[self.COOLDOWN],
)
if properties.get('change') is not None:
args['change'] = properties['change']
elif properties.get('changePercent') is not None:
args['change'] = properties['changePercent']
if properties.get(self.CHANGE) is not None:
args['change'] = properties[self.CHANGE]
elif properties.get(self.CHANGE_PERCENT) is not None:
args['change'] = properties[self.CHANGE_PERCENT]
args['is_percent'] = True
elif properties.get('desiredCapacity') is not None:
args['desired_capacity'] = properties['desiredCapacity']
if properties.get('args') is not None:
args['args'] = properties['args']
elif properties.get(self.DESIRED_CAPACITY) is not None:
args['desired_capacity'] = properties[self.DESIRED_CAPACITY]
if properties.get(self.ARGS) is not None:
args['args'] = properties[self.ARGS]
return args
def handle_create(self):
@@ -363,7 +452,7 @@ class ScalingPolicy(resource.Resource):
asclient = self.stack.clients.auto_scale()
args = self._get_args(self.properties)
policy = asclient.add_policy(**args)
resource_id = '%s:%s' % (self.properties['group'], policy.id)
resource_id = '%s:%s' % (self.properties[self.GROUP], policy.id)
self.resource_id_set(resource_id)
def _get_policy_id(self):
@@ -382,7 +471,7 @@ class ScalingPolicy(resource.Resource):
return
policy_id = self._get_policy_id()
try:
asclient.delete_policy(self.properties['group'], policy_id)
asclient.delete_policy(self.properties[self.GROUP], policy_id)
except NotFound:
pass
@@ -393,28 +482,34 @@ class WebHook(resource.Resource):
Exposes the URLs of the webhook as attributes.
"""
PROPERTIES = (
POLICY, NAME, METADATA,
) = (
'policy', 'name', 'metadata',
)
properties_schema = {
'policy': {
'Type': 'String',
'Required': True,
'Description': _(
"The policy that this webhook should apply to, in "
"{group_id}:{policy_id} format. Generally a Ref to a Policy "
"resource.")},
'name': {
'Type': 'String',
'Required': True,
'Description': _("The name of this webhook.")},
'metadata': {
'Type': 'Map',
'Required': False,
'Description': _("Arbitrary key/value metadata for this webhook."),
},
POLICY: properties.Schema(
properties.Schema.STRING,
_('The policy that this webhook should apply to, in '
'{group_id}:{policy_id} format. Generally a Ref to a Policy '
'resource.'),
required=True
),
NAME: properties.Schema(
properties.Schema.STRING,
_('The name of this webhook.'),
required=True
),
METADATA: properties.Schema(
properties.Schema.MAP,
_('Arbitrary key/value metadata for this webhook.')
),
}
update_allowed_keys = ('Properties',)
# Everything other than policy can be changed.
update_allowed_properties = ('name', 'metadata')
update_allowed_properties = (NAME, METADATA)
attributes_schema = {
'executeUrl': _(
@@ -424,12 +519,12 @@ class WebHook(resource.Resource):
}
def _get_args(self, props):
group_id, policy_id = props['policy'].split(':', 1)
group_id, policy_id = props[self.POLICY].split(':', 1)
return dict(
name=props['name'],
name=props[self.NAME],
scaling_group=group_id,
policy=policy_id,
metadata=props.get('metadata'))
metadata=props.get(self.METADATA))
def handle_create(self):
asclient = self.stack.clients.auto_scale()
@@ -461,7 +556,7 @@ class WebHook(resource.Resource):
if self.resource_id is None:
return
asclient = self.stack.clients.auto_scale()
group_id, policy_id = self.properties['policy'].split(':', 1)
group_id, policy_id = self.properties[self.POLICY].split(':', 1)
try:
asclient.delete_webhook(group_id, policy_id, self.resource_id)
except NotFound: