heat cleanups to align with pep8 1.3.4
Lots of (mostly whitespace) cleanups to align all the non-test code with pep8 v1.3.4 ref bug 1092057 Change-Id: I444b288444dba4ec1da5854bd276d091c06d8489 Signed-off-by: Steven Hardy <shardy@redhat.com>
This commit is contained in:
parent
8936c97172
commit
c1bf924c0f
35
bin/heat-cfn
35
bin/heat-cfn
|
@ -366,7 +366,7 @@ def stack_resources_list_details(options, arguments):
|
|||
usage = ('''Usage:
|
||||
%s resource-list-details stack_name [logical_resource_id]
|
||||
%s resource-list-details physical_resource_id [logical_resource_id]''' %
|
||||
(scriptname, scriptname))
|
||||
(scriptname, scriptname))
|
||||
|
||||
try:
|
||||
name_or_pid = arguments.pop(0)
|
||||
|
@ -545,14 +545,14 @@ def parse_options(parser, cli_args):
|
|||
|
||||
if options.debug:
|
||||
logging.basicConfig(format='%(levelname)s:%(message)s',
|
||||
level=logging.DEBUG)
|
||||
level=logging.DEBUG)
|
||||
logging.debug("Debug level logging enabled")
|
||||
elif options.verbose:
|
||||
logging.basicConfig(format='%(levelname)s:%(message)s',
|
||||
level=logging.INFO)
|
||||
level=logging.INFO)
|
||||
else:
|
||||
logging.basicConfig(format='%(levelname)s:%(message)s',
|
||||
level=logging.WARNING)
|
||||
level=logging.WARNING)
|
||||
|
||||
return (options, command, args)
|
||||
|
||||
|
@ -574,20 +574,19 @@ def print_help(options, args):
|
|||
def lookup_command(parser, command_name):
|
||||
base_commands = {'help': print_help}
|
||||
|
||||
stack_commands = {
|
||||
'create': stack_create,
|
||||
'update': stack_update,
|
||||
'delete': stack_delete,
|
||||
'list': stack_list,
|
||||
'events_list': stack_events_list, # DEPRECATED
|
||||
'event-list': stack_events_list,
|
||||
'resource': stack_resource_show,
|
||||
'resource-list': stack_resources_list,
|
||||
'resource-list-details': stack_resources_list_details,
|
||||
'validate': template_validate,
|
||||
'gettemplate': get_template,
|
||||
'estimate-template-cost': estimate_template_cost,
|
||||
'describe': stack_describe}
|
||||
stack_commands = {'create': stack_create,
|
||||
'update': stack_update,
|
||||
'delete': stack_delete,
|
||||
'list': stack_list,
|
||||
'events_list': stack_events_list, # DEPRECATED
|
||||
'event-list': stack_events_list,
|
||||
'resource': stack_resource_show,
|
||||
'resource-list': stack_resources_list,
|
||||
'resource-list-details': stack_resources_list_details,
|
||||
'validate': template_validate,
|
||||
'gettemplate': get_template,
|
||||
'estimate-template-cost': estimate_template_cost,
|
||||
'describe': stack_describe}
|
||||
|
||||
commands = {}
|
||||
for command_set in (base_commands, stack_commands):
|
||||
|
|
|
@ -57,7 +57,6 @@ if __name__ == '__main__':
|
|||
from heat.engine import service as engine
|
||||
|
||||
db_api.configure()
|
||||
srv = engine.EngineService(cfg.CONF.host,
|
||||
'engine')
|
||||
srv = engine.EngineService(cfg.CONF.host, 'engine')
|
||||
launcher = service.launch(srv)
|
||||
launcher.wait()
|
||||
|
|
|
@ -76,7 +76,7 @@ class EC2Token(wsgi.Middleware):
|
|||
'verb': req.method,
|
||||
'path': req.path,
|
||||
'params': auth_params,
|
||||
}}
|
||||
}}
|
||||
creds_json = None
|
||||
try:
|
||||
creds_json = json.dumps(creds)
|
||||
|
|
|
@ -57,7 +57,7 @@ class HeatAPIException(webob.exc.HTTPError):
|
|||
else:
|
||||
message = self.explanation
|
||||
return {'ErrorResponse': {'Error': {'Type': self.err_type,
|
||||
'Code': self.title, 'Message': message}}}
|
||||
'Code': self.title, 'Message': message}}}
|
||||
|
||||
|
||||
# Common Error Subclasses:
|
||||
|
|
|
@ -51,7 +51,7 @@ def extract_param_pairs(params, prefix='', keyname='', valuename=''):
|
|||
"""
|
||||
plist = extract_param_list(params, prefix)
|
||||
kvs = [(p[keyname], p[valuename]) for p in plist
|
||||
if keyname in p and valuename in p]
|
||||
if keyname in p and valuename in p]
|
||||
|
||||
return dict(kvs)
|
||||
|
||||
|
|
|
@ -67,7 +67,7 @@ class API(wsgi.Router):
|
|||
|
||||
for action in self._actions:
|
||||
mapper.connect("/", controller=stacks_resource, action=action,
|
||||
conditions=conditions(action))
|
||||
conditions=conditions(action))
|
||||
|
||||
mapper.connect("/", controller=stacks_resource, action="index")
|
||||
|
||||
|
|
|
@ -74,9 +74,9 @@ class StackController(object):
|
|||
Parameters.member.1.ParameterValue
|
||||
"""
|
||||
return api_utils.extract_param_pairs(params,
|
||||
prefix='Parameters',
|
||||
keyname='ParameterKey',
|
||||
valuename='ParameterValue')
|
||||
prefix='Parameters',
|
||||
keyname='ParameterKey',
|
||||
valuename='ParameterValue')
|
||||
|
||||
def _get_identity(self, con, stack_name):
|
||||
"""
|
||||
|
@ -128,7 +128,7 @@ class StackController(object):
|
|||
return exception.map_remote_error(ex)
|
||||
|
||||
res = {'StackSummaries': [format_stack_summary(s)
|
||||
for s in stack_list['stacks']]}
|
||||
for s in stack_list['stacks']]}
|
||||
|
||||
return api_utils.format_response('ListStacks', res)
|
||||
|
||||
|
@ -145,8 +145,8 @@ class StackController(object):
|
|||
}
|
||||
|
||||
def replacecolon(d):
|
||||
return dict(map(lambda (k, v):
|
||||
(k.replace(':', '.'), v), d.items()))
|
||||
return dict(map(lambda (k, v): (k.replace(':', '.'), v),
|
||||
d.items()))
|
||||
|
||||
def transform(attrs):
|
||||
"""
|
||||
|
@ -193,9 +193,9 @@ class StackController(object):
|
|||
# Reformat Parameters dict-of-dict into AWS API format
|
||||
# This is a list-of-dict with nasty "ParameterKey" : key
|
||||
# "ParameterValue" : value format.
|
||||
result['Parameters'] = [{'ParameterKey':k,
|
||||
'ParameterValue':v}
|
||||
for (k, v) in result['Parameters'].items()]
|
||||
result['Parameters'] = [{'ParameterKey': k,
|
||||
'ParameterValue': v}
|
||||
for (k, v) in result['Parameters'].items()]
|
||||
|
||||
return self._id_format(result)
|
||||
|
||||
|
@ -237,9 +237,10 @@ class StackController(object):
|
|||
return None
|
||||
|
||||
CREATE_OR_UPDATE_ACTION = (
|
||||
CREATE_STACK, UPDATE_STACK
|
||||
) = (
|
||||
"CreateStack", "UpdateStack")
|
||||
CREATE_STACK, UPDATE_STACK,
|
||||
) = (
|
||||
"CreateStack", "UpdateStack",
|
||||
)
|
||||
|
||||
def create(self, req):
|
||||
return self.create_or_update(req, self.CREATE_STACK)
|
||||
|
@ -349,7 +350,10 @@ class StackController(object):
|
|||
Get the estimated monthly cost of a template
|
||||
"""
|
||||
return api_utils.format_response('EstimateTemplateCost',
|
||||
{'Url': 'http://en.wikipedia.org/wiki/Gratis'})
|
||||
{'Url':
|
||||
'http://en.wikipedia.org/wiki/Gratis'
|
||||
}
|
||||
)
|
||||
|
||||
def validate_template(self, req):
|
||||
"""
|
||||
|
@ -421,8 +425,8 @@ class StackController(object):
|
|||
}
|
||||
|
||||
result = api_utils.reformat_dict_keys(keymap, e)
|
||||
result['ResourceProperties'] = json.dumps(
|
||||
result['ResourceProperties'])
|
||||
result['ResourceProperties'] = json.dumps(result[
|
||||
'ResourceProperties'])
|
||||
|
||||
return self._id_format(result)
|
||||
|
||||
|
@ -439,7 +443,7 @@ class StackController(object):
|
|||
result = [format_stack_event(e) for e in events]
|
||||
|
||||
return api_utils.format_response('DescribeStackEvents',
|
||||
{'StackEvents': result})
|
||||
{'StackEvents': result})
|
||||
|
||||
def describe_stack_resource(self, req):
|
||||
"""
|
||||
|
@ -472,9 +476,10 @@ class StackController(object):
|
|||
|
||||
try:
|
||||
identity = self._get_identity(con, req.params['StackName'])
|
||||
resource_details = self.engine_rpcapi.describe_stack_resource(con,
|
||||
stack_identity=identity,
|
||||
resource_name=req.params.get('LogicalResourceId'))
|
||||
resource_details = self.engine_rpcapi.describe_stack_resource(
|
||||
con,
|
||||
stack_identity=identity,
|
||||
resource_name=req.params.get('LogicalResourceId'))
|
||||
|
||||
except rpc_common.RemoteError as ex:
|
||||
return exception.map_remote_error(ex)
|
||||
|
@ -482,7 +487,7 @@ class StackController(object):
|
|||
result = format_resource_detail(resource_details)
|
||||
|
||||
return api_utils.format_response('DescribeStackResource',
|
||||
{'StackResourceDetail': result})
|
||||
{'StackResourceDetail': result})
|
||||
|
||||
def describe_stack_resources(self, req):
|
||||
"""
|
||||
|
@ -530,7 +535,8 @@ class StackController(object):
|
|||
|
||||
try:
|
||||
identity = self._get_identity(con, stack_name)
|
||||
resources = self.engine_rpcapi.describe_stack_resources(con,
|
||||
resources = self.engine_rpcapi.describe_stack_resources(
|
||||
con,
|
||||
stack_identity=identity,
|
||||
physical_resource_id=physical_resource_id,
|
||||
logical_resource_id=req.params.get('LogicalResourceId'))
|
||||
|
@ -541,7 +547,7 @@ class StackController(object):
|
|||
result = [format_stack_resource(r) for r in resources]
|
||||
|
||||
return api_utils.format_response('DescribeStackResources',
|
||||
{'StackResources': result})
|
||||
{'StackResources': result})
|
||||
|
||||
def list_stack_resources(self, req):
|
||||
"""
|
||||
|
@ -567,15 +573,16 @@ class StackController(object):
|
|||
|
||||
try:
|
||||
identity = self._get_identity(con, req.params['StackName'])
|
||||
resources = self.engine_rpcapi.list_stack_resources(con,
|
||||
stack_identity=identity)
|
||||
resources = self.engine_rpcapi.list_stack_resources(
|
||||
con,
|
||||
stack_identity=identity)
|
||||
except rpc_common.RemoteError as ex:
|
||||
return exception.map_remote_error(ex)
|
||||
|
||||
summaries = [format_resource_summary(r) for r in resources]
|
||||
|
||||
return api_utils.format_response('ListStackResources',
|
||||
{'StackResourceSummaries': summaries})
|
||||
{'StackResourceSummaries': summaries})
|
||||
|
||||
|
||||
def create_resource(options):
|
||||
|
|
|
@ -30,10 +30,11 @@ class WaitConditionController:
|
|||
con = req.context
|
||||
identity = identifier.ResourceIdentifier.from_arn(arn)
|
||||
try:
|
||||
md = self.engine.metadata_update(con,
|
||||
stack_id=dict(identity.stack()),
|
||||
resource_name=identity.resource_name,
|
||||
metadata=body)
|
||||
md = self.engine.metadata_update(
|
||||
con,
|
||||
stack_id=dict(identity.stack()),
|
||||
resource_name=identity.resource_name,
|
||||
metadata=body)
|
||||
except rpc_common.RemoteError as ex:
|
||||
return exception.map_remote_error(ex)
|
||||
|
||||
|
|
|
@ -70,7 +70,7 @@ class API(wsgi.Router):
|
|||
|
||||
for action in self._actions:
|
||||
mapper.connect("/", controller=controller_resource, action=action,
|
||||
conditions=conditions(action))
|
||||
conditions=conditions(action))
|
||||
|
||||
mapper.connect("/", controller=controller_resource, action="index")
|
||||
|
||||
|
|
|
@ -73,37 +73,39 @@ class WatchController(object):
|
|||
Reformat engine output into the AWS "MetricAlarm" format
|
||||
"""
|
||||
keymap = {
|
||||
engine_api.WATCH_ACTIONS_ENABLED: 'ActionsEnabled',
|
||||
engine_api.WATCH_ALARM_ACTIONS: 'AlarmActions',
|
||||
engine_api.WATCH_TOPIC: 'AlarmArn',
|
||||
engine_api.WATCH_UPDATED_TIME:
|
||||
engine_api.WATCH_ACTIONS_ENABLED: 'ActionsEnabled',
|
||||
engine_api.WATCH_ALARM_ACTIONS: 'AlarmActions',
|
||||
engine_api.WATCH_TOPIC: 'AlarmArn',
|
||||
engine_api.WATCH_UPDATED_TIME:
|
||||
'AlarmConfigurationUpdatedTimestamp',
|
||||
engine_api.WATCH_DESCRIPTION: 'AlarmDescription',
|
||||
engine_api.WATCH_NAME: 'AlarmName',
|
||||
engine_api.WATCH_COMPARISON: 'ComparisonOperator',
|
||||
engine_api.WATCH_DIMENSIONS: 'Dimensions',
|
||||
engine_api.WATCH_PERIODS: 'EvaluationPeriods',
|
||||
engine_api.WATCH_INSUFFICIENT_ACTIONS: 'InsufficientDataActions',
|
||||
engine_api.WATCH_METRIC_NAME: 'MetricName',
|
||||
engine_api.WATCH_NAMESPACE: 'Namespace',
|
||||
engine_api.WATCH_OK_ACTIONS: 'OKActions',
|
||||
engine_api.WATCH_PERIOD: 'Period',
|
||||
engine_api.WATCH_STATE_REASON: 'StateReason',
|
||||
engine_api.WATCH_STATE_REASON_DATA: 'StateReasonData',
|
||||
engine_api.WATCH_STATE_UPDATED_TIME: 'StateUpdatedTimestamp',
|
||||
engine_api.WATCH_STATE_VALUE: 'StateValue',
|
||||
engine_api.WATCH_STATISTIC: 'Statistic',
|
||||
engine_api.WATCH_THRESHOLD: 'Threshold',
|
||||
engine_api.WATCH_UNIT: 'Unit'}
|
||||
engine_api.WATCH_DESCRIPTION: 'AlarmDescription',
|
||||
engine_api.WATCH_NAME: 'AlarmName',
|
||||
engine_api.WATCH_COMPARISON: 'ComparisonOperator',
|
||||
engine_api.WATCH_DIMENSIONS: 'Dimensions',
|
||||
engine_api.WATCH_PERIODS: 'EvaluationPeriods',
|
||||
engine_api.WATCH_INSUFFICIENT_ACTIONS:
|
||||
'InsufficientDataActions',
|
||||
engine_api.WATCH_METRIC_NAME: 'MetricName',
|
||||
engine_api.WATCH_NAMESPACE: 'Namespace',
|
||||
engine_api.WATCH_OK_ACTIONS: 'OKActions',
|
||||
engine_api.WATCH_PERIOD: 'Period',
|
||||
engine_api.WATCH_STATE_REASON: 'StateReason',
|
||||
engine_api.WATCH_STATE_REASON_DATA: 'StateReasonData',
|
||||
engine_api.WATCH_STATE_UPDATED_TIME: 'StateUpdatedTimestamp',
|
||||
engine_api.WATCH_STATE_VALUE: 'StateValue',
|
||||
engine_api.WATCH_STATISTIC: 'Statistic',
|
||||
engine_api.WATCH_THRESHOLD: 'Threshold',
|
||||
engine_api.WATCH_UNIT: 'Unit'}
|
||||
|
||||
# AWS doesn't return StackId in the main MetricAlarm
|
||||
# structure, so we add StackId as a dimension to all responses
|
||||
a[engine_api.WATCH_DIMENSIONS].append({'StackId':
|
||||
a[engine_api.WATCH_STACK_ID]})
|
||||
a[engine_api.WATCH_STACK_ID]
|
||||
})
|
||||
|
||||
# Reformat dimensions list into AWS API format
|
||||
a[engine_api.WATCH_DIMENSIONS] = self._reformat_dimensions(
|
||||
a[engine_api.WATCH_DIMENSIONS])
|
||||
a[engine_api.WATCH_DIMENSIONS])
|
||||
|
||||
return api_utils.reformat_dict_keys(keymap, a)
|
||||
|
||||
|
@ -120,7 +122,7 @@ class WatchController(object):
|
|||
return exception.map_remote_error(ex)
|
||||
|
||||
res = {'MetricAlarms': [format_metric_alarm(a)
|
||||
for a in watch_list]}
|
||||
for a in watch_list]}
|
||||
|
||||
result = api_utils.format_response("DescribeAlarms", res)
|
||||
return result
|
||||
|
@ -243,9 +245,9 @@ class WatchController(object):
|
|||
dimensions = []
|
||||
for p in metric_data:
|
||||
dimension = api_utils.extract_param_pairs(p,
|
||||
prefix='Dimensions',
|
||||
keyname='Name',
|
||||
valuename='Value')
|
||||
prefix='Dimensions',
|
||||
keyname='Name',
|
||||
valuename='Value')
|
||||
if 'AlarmName' in dimension:
|
||||
watch_name = dimension['AlarmName']
|
||||
else:
|
||||
|
@ -283,8 +285,8 @@ class WatchController(object):
|
|||
"""
|
||||
# Map from AWS state names to those used in the engine
|
||||
state_map = {'OK': engine_api.WATCH_STATE_OK,
|
||||
'ALARM': engine_api.WATCH_STATE_ALARM,
|
||||
'INSUFFICIENT_DATA': engine_api.WATCH_STATE_NODATA}
|
||||
'ALARM': engine_api.WATCH_STATE_ALARM,
|
||||
'INSUFFICIENT_DATA': engine_api.WATCH_STATE_NODATA}
|
||||
|
||||
con = req.context
|
||||
parms = dict(req.params)
|
||||
|
|
|
@ -47,7 +47,7 @@ class VersionNegotiationFilter(wsgi.Middleware):
|
|||
# API controller
|
||||
msg = _("Processing request: %(method)s %(path)s Accept: "
|
||||
"%(accept)s") % ({'method': req.method,
|
||||
'path': req.path, 'accept': req.accept})
|
||||
'path': req.path, 'accept': req.accept})
|
||||
logger.debug(msg)
|
||||
|
||||
# If the request is for /versions, just return the versions container
|
||||
|
@ -57,7 +57,7 @@ class VersionNegotiationFilter(wsgi.Middleware):
|
|||
match = self._match_version_string(req.path_info_peek(), req)
|
||||
if match:
|
||||
if (req.environ['api.major_version'] == 1 and
|
||||
req.environ['api.minor_version'] == 0):
|
||||
req.environ['api.minor_version'] == 0):
|
||||
logger.debug(_("Matched versioned URI. Version: %d.%d"),
|
||||
req.environ['api.major_version'],
|
||||
req.environ['api.minor_version'])
|
||||
|
@ -78,7 +78,7 @@ class VersionNegotiationFilter(wsgi.Middleware):
|
|||
match = self._match_version_string(accept_version, req)
|
||||
if match:
|
||||
if (req.environ['api.major_version'] == 1 and
|
||||
req.environ['api.minor_version'] == 0):
|
||||
req.environ['api.minor_version'] == 0):
|
||||
logger.debug(_("Matched versioned media type. "
|
||||
"Version: %d.%d"),
|
||||
req.environ['api.major_version'],
|
||||
|
|
|
@ -40,25 +40,29 @@ class BotoClient(CloudFormationConnection):
|
|||
|
||||
def create_stack(self, **kwargs):
|
||||
if 'TemplateUrl' in kwargs:
|
||||
return super(BotoClient, self).create_stack(kwargs['StackName'],
|
||||
template_url=kwargs['TemplateUrl'],
|
||||
parameters=kwargs['Parameters'])
|
||||
return super(BotoClient, self).create_stack(
|
||||
kwargs['StackName'],
|
||||
template_url=kwargs['TemplateUrl'],
|
||||
parameters=kwargs['Parameters'])
|
||||
elif 'TemplateBody' in kwargs:
|
||||
return super(BotoClient, self).create_stack(kwargs['StackName'],
|
||||
template_body=kwargs['TemplateBody'],
|
||||
parameters=kwargs['Parameters'])
|
||||
return super(BotoClient, self).create_stack(
|
||||
kwargs['StackName'],
|
||||
template_body=kwargs['TemplateBody'],
|
||||
parameters=kwargs['Parameters'])
|
||||
else:
|
||||
logger.error("Must specify TemplateUrl or TemplateBody!")
|
||||
|
||||
def update_stack(self, **kwargs):
|
||||
if 'TemplateUrl' in kwargs:
|
||||
return super(BotoClient, self).update_stack(kwargs['StackName'],
|
||||
template_url=kwargs['TemplateUrl'],
|
||||
parameters=kwargs['Parameters'])
|
||||
return super(BotoClient, self).update_stack(
|
||||
kwargs['StackName'],
|
||||
template_url=kwargs['TemplateUrl'],
|
||||
parameters=kwargs['Parameters'])
|
||||
elif 'TemplateBody' in kwargs:
|
||||
return super(BotoClient, self).update_stack(kwargs['StackName'],
|
||||
template_body=kwargs['TemplateBody'],
|
||||
parameters=kwargs['Parameters'])
|
||||
return super(BotoClient, self).update_stack(
|
||||
kwargs['StackName'],
|
||||
template_body=kwargs['TemplateBody'],
|
||||
parameters=kwargs['Parameters'])
|
||||
else:
|
||||
logger.error("Must specify TemplateUrl or TemplateBody!")
|
||||
|
||||
|
@ -67,11 +71,11 @@ class BotoClient(CloudFormationConnection):
|
|||
|
||||
def list_stack_events(self, **kwargs):
|
||||
return super(BotoClient, self).describe_stack_events(
|
||||
kwargs['StackName'])
|
||||
kwargs['StackName'])
|
||||
|
||||
def describe_stack_resource(self, **kwargs):
|
||||
return super(BotoClient, self).describe_stack_resource(
|
||||
kwargs['StackName'], kwargs['LogicalResourceId'])
|
||||
kwargs['StackName'], kwargs['LogicalResourceId'])
|
||||
|
||||
def describe_stack_resources(self, **kwargs):
|
||||
# Check if this is a StackName, if not assume it's a physical res ID
|
||||
|
@ -83,29 +87,29 @@ class BotoClient(CloudFormationConnection):
|
|||
stack_names = [s.stack_name for s in list_stacks]
|
||||
if kwargs['NameOrPid'] in stack_names:
|
||||
logger.debug("Looking up resources for StackName:%s" %
|
||||
kwargs['NameOrPid'])
|
||||
kwargs['NameOrPid'])
|
||||
return super(BotoClient, self).describe_stack_resources(
|
||||
stack_name_or_id=kwargs['NameOrPid'],
|
||||
logical_resource_id=kwargs['LogicalResourceId'])
|
||||
stack_name_or_id=kwargs['NameOrPid'],
|
||||
logical_resource_id=kwargs['LogicalResourceId'])
|
||||
else:
|
||||
logger.debug("Looking up resources for PhysicalResourceId:%s" %
|
||||
kwargs['NameOrPid'])
|
||||
kwargs['NameOrPid'])
|
||||
return super(BotoClient, self).describe_stack_resources(
|
||||
stack_name_or_id=None,
|
||||
logical_resource_id=kwargs['LogicalResourceId'],
|
||||
physical_resource_id=kwargs['NameOrPid'])
|
||||
stack_name_or_id=None,
|
||||
logical_resource_id=kwargs['LogicalResourceId'],
|
||||
physical_resource_id=kwargs['NameOrPid'])
|
||||
|
||||
def list_stack_resources(self, **kwargs):
|
||||
return super(BotoClient, self).list_stack_resources(
|
||||
kwargs['StackName'])
|
||||
kwargs['StackName'])
|
||||
|
||||
def validate_template(self, **kwargs):
|
||||
if 'TemplateUrl' in kwargs:
|
||||
return super(BotoClient, self).validate_template(
|
||||
template_url=kwargs['TemplateUrl'])
|
||||
template_url=kwargs['TemplateUrl'])
|
||||
elif 'TemplateBody' in kwargs:
|
||||
return super(BotoClient, self).validate_template(
|
||||
template_body=kwargs['TemplateBody'])
|
||||
template_body=kwargs['TemplateBody'])
|
||||
else:
|
||||
logger.error("Must specify TemplateUrl or TemplateBody!")
|
||||
|
||||
|
@ -115,14 +119,14 @@ class BotoClient(CloudFormationConnection):
|
|||
def estimate_template_cost(self, **kwargs):
|
||||
if 'TemplateUrl' in kwargs:
|
||||
return super(BotoClient, self).estimate_template_cost(
|
||||
kwargs['StackName'],
|
||||
template_url=kwargs['TemplateUrl'],
|
||||
parameters=kwargs['Parameters'])
|
||||
kwargs['StackName'],
|
||||
template_url=kwargs['TemplateUrl'],
|
||||
parameters=kwargs['Parameters'])
|
||||
elif 'TemplateBody' in kwargs:
|
||||
return super(BotoClient, self).estimate_template_cost(
|
||||
kwargs['StackName'],
|
||||
template_body=kwargs['TemplateBody'],
|
||||
parameters=kwargs['Parameters'])
|
||||
kwargs['StackName'],
|
||||
template_body=kwargs['TemplateBody'],
|
||||
parameters=kwargs['Parameters'])
|
||||
else:
|
||||
logger.error("Must specify TemplateUrl or TemplateBody!")
|
||||
|
||||
|
@ -139,7 +143,7 @@ class BotoClient(CloudFormationConnection):
|
|||
ret.append("ResourceProperties : %s" % event.resource_properties)
|
||||
ret.append("ResourceStatus : %s" % event.resource_status)
|
||||
ret.append("ResourceStatusReason : %s" %
|
||||
event.resource_status_reason)
|
||||
event.resource_status_reason)
|
||||
ret.append("ResourceType : %s" % event.resource_type)
|
||||
ret.append("StackId : %s" % event.stack_id)
|
||||
ret.append("StackName : %s" % event.stack_name)
|
||||
|
@ -180,7 +184,7 @@ class BotoClient(CloudFormationConnection):
|
|||
ret.append("PhysicalResourceId : %s" % res.physical_resource_id)
|
||||
ret.append("ResourceStatus : %s" % res.resource_status)
|
||||
ret.append("ResourceStatusReason : %s" %
|
||||
res.resource_status_reason)
|
||||
res.resource_status_reason)
|
||||
ret.append("ResourceType : %s" % res.resource_type)
|
||||
ret.append("StackId : %s" % res.stack_id)
|
||||
ret.append("StackName : %s" % res.stack_name)
|
||||
|
@ -196,12 +200,12 @@ class BotoClient(CloudFormationConnection):
|
|||
ret = []
|
||||
for res in resources:
|
||||
ret.append("LastUpdatedTimestamp : %s" %
|
||||
res.last_updated_timestamp)
|
||||
res.last_updated_timestamp)
|
||||
ret.append("LogicalResourceId : %s" % res.logical_resource_id)
|
||||
ret.append("PhysicalResourceId : %s" % res.physical_resource_id)
|
||||
ret.append("ResourceStatus : %s" % res.resource_status)
|
||||
ret.append("ResourceStatusReason : %s" %
|
||||
res.resource_status_reason)
|
||||
res.resource_status_reason)
|
||||
ret.append("ResourceType : %s" % res.resource_type)
|
||||
ret.append("--")
|
||||
return '\n'.join(ret)
|
||||
|
@ -218,7 +222,7 @@ class BotoClient(CloudFormationConnection):
|
|||
For now, we format the dict response as a workaround
|
||||
'''
|
||||
resource_detail = res['DescribeStackResourceResponse'][
|
||||
'DescribeStackResourceResult']['StackResourceDetail']
|
||||
'DescribeStackResourceResult']['StackResourceDetail']
|
||||
ret = []
|
||||
for key in resource_detail:
|
||||
ret.append("%s : %s" % (key, resource_detail[key]))
|
||||
|
@ -285,8 +289,10 @@ def get_client(host, port=None, username=None,
|
|||
# Also note is_secure is defaulted to False as HTTPS connections
|
||||
# don't seem to work atm, FIXME
|
||||
cloudformation = BotoClient(aws_access_key_id=aws_access_key,
|
||||
aws_secret_access_key=aws_secret_key, is_secure=False,
|
||||
port=port, path="/v1")
|
||||
aws_secret_access_key=aws_secret_key,
|
||||
is_secure=False,
|
||||
port=port,
|
||||
path="/v1")
|
||||
if cloudformation:
|
||||
logger.debug("Got CF connection object OK")
|
||||
else:
|
||||
|
|
|
@ -34,12 +34,12 @@ class BotoCWClient(CloudWatchConnection):
|
|||
# TODO : These should probably go in the CW API and be imported
|
||||
DEFAULT_NAMESPACE = "heat/unknown"
|
||||
METRIC_UNITS = ("Seconds", "Microseconds", "Milliseconds", "Bytes",
|
||||
"Kilobytes", "Megabytes", "Gigabytes", "Terabytes",
|
||||
"Bits", "Kilobits", "Megabits", "Gigabits", "Terabits",
|
||||
"Percent", "Count", "Bytes/Second", "Kilobytes/Second",
|
||||
"Megabytes/Second", "Gigabytes/Second", "Terabytes/Second",
|
||||
"Bits/Second", "Kilobits/Second", "Megabits/Second",
|
||||
"Gigabits/Second", "Terabits/Second", "Count/Second", None)
|
||||
"Kilobytes", "Megabytes", "Gigabytes", "Terabytes",
|
||||
"Bits", "Kilobits", "Megabits", "Gigabits", "Terabits",
|
||||
"Percent", "Count", "Bytes/Second", "Kilobytes/Second",
|
||||
"Megabytes/Second", "Gigabytes/Second", "Terabytes/Second",
|
||||
"Bits/Second", "Kilobits/Second", "Megabits/Second",
|
||||
"Gigabits/Second", "Terabits/Second", "Count/Second", None)
|
||||
METRIC_COMPARISONS = (">=", ">", "<", "<=")
|
||||
ALARM_STATES = ("OK", "ALARM", "INSUFFICIENT_DATA")
|
||||
METRIC_STATISTICS = ("Average", "Sum", "SampleCount", "Maximum", "Minimum")
|
||||
|
@ -56,7 +56,7 @@ class BotoCWClient(CloudWatchConnection):
|
|||
except KeyError:
|
||||
name = None
|
||||
return super(BotoCWClient, self).describe_alarms(
|
||||
alarm_names=[name])
|
||||
alarm_names=[name])
|
||||
|
||||
def list_metrics(self, **kwargs):
|
||||
# list_metrics returns non-null index in next_token if there
|
||||
|
@ -75,10 +75,10 @@ class BotoCWClient(CloudWatchConnection):
|
|||
token = None
|
||||
while True:
|
||||
results.append(super(BotoCWClient, self).list_metrics(
|
||||
next_token=token,
|
||||
dimensions=None,
|
||||
metric_name=name,
|
||||
namespace=None))
|
||||
next_token=token,
|
||||
dimensions=None,
|
||||
metric_name=name,
|
||||
namespace=None))
|
||||
if not token:
|
||||
break
|
||||
|
||||
|
@ -94,8 +94,8 @@ class BotoCWClient(CloudWatchConnection):
|
|||
metric_value = kwargs['MetricValue']
|
||||
metric_namespace = kwargs['Namespace']
|
||||
except KeyError:
|
||||
logger.error("Must pass MetricName, MetricUnit, " +\
|
||||
"Namespace, MetricValue!")
|
||||
logger.error("Must pass MetricName, MetricUnit, " +
|
||||
"Namespace, MetricValue!")
|
||||
return
|
||||
|
||||
try:
|
||||
|
@ -116,20 +116,20 @@ class BotoCWClient(CloudWatchConnection):
|
|||
return
|
||||
|
||||
return super(BotoCWClient, self).put_metric_data(
|
||||
namespace=metric_namespace,
|
||||
name=metric_name,
|
||||
value=metric_value,
|
||||
timestamp=None, # This means use "now" in the engine
|
||||
unit=metric_unit,
|
||||
dimensions=metric_dims,
|
||||
statistics=None)
|
||||
namespace=metric_namespace,
|
||||
name=metric_name,
|
||||
value=metric_value,
|
||||
timestamp=None, # This means use "now" in the engine
|
||||
unit=metric_unit,
|
||||
dimensions=metric_dims,
|
||||
statistics=None)
|
||||
|
||||
def set_alarm_state(self, **kwargs):
|
||||
return super(BotoCWClient, self).set_alarm_state(
|
||||
alarm_name=kwargs['AlarmName'],
|
||||
state_reason=kwargs['StateReason'],
|
||||
state_value=kwargs['StateValue'],
|
||||
state_reason_data=kwargs['StateReasonData'])
|
||||
alarm_name=kwargs['AlarmName'],
|
||||
state_reason=kwargs['StateReason'],
|
||||
state_value=kwargs['StateValue'],
|
||||
state_reason_data=kwargs['StateReasonData'])
|
||||
|
||||
def format_metric_alarm(self, alarms):
|
||||
'''
|
||||
|
@ -144,19 +144,19 @@ class BotoCWClient(CloudWatchConnection):
|
|||
ret.append("AlarmActions : %s" % s.alarm_actions)
|
||||
ret.append("AlarmArn : %s" % s.alarm_arn)
|
||||
ret.append("AlarmConfigurationUpdatedTimestamp : %s" %
|
||||
s.last_updated)
|
||||
s.last_updated)
|
||||
ret.append("ComparisonOperator : %s" % s.comparison)
|
||||
ret.append("Dimensions : %s" % s.dimensions)
|
||||
ret.append("EvaluationPeriods : %s" % s.evaluation_periods)
|
||||
ret.append("InsufficientDataActions : %s" %
|
||||
s.insufficient_data_actions)
|
||||
s.insufficient_data_actions)
|
||||
ret.append("MetricName : %s" % s.metric)
|
||||
ret.append("Namespace : %s" % s.namespace)
|
||||
ret.append("OKActions : %s" % s.ok_actions)
|
||||
ret.append("Period : %s" % s.period)
|
||||
ret.append("StateReason : %s" % s.state_reason)
|
||||
ret.append("StateUpdatedTimestamp : %s" %
|
||||
s.last_updated)
|
||||
s.last_updated)
|
||||
ret.append("StateValue : %s" % s.state_value)
|
||||
ret.append("Statistic : %s" % s.statistic)
|
||||
ret.append("Threshold : %s" % s.threshold)
|
||||
|
@ -199,8 +199,10 @@ def get_client(port=None, aws_access_key=None, aws_secret_key=None):
|
|||
# Also note is_secure is defaulted to False as HTTPS connections
|
||||
# don't seem to work atm, FIXME
|
||||
cloudwatch = BotoCWClient(aws_access_key_id=aws_access_key,
|
||||
aws_secret_access_key=aws_secret_key, is_secure=False,
|
||||
port=port, path="/v1")
|
||||
aws_secret_access_key=aws_secret_key,
|
||||
is_secure=False,
|
||||
port=port,
|
||||
path="/v1")
|
||||
if cloudwatch:
|
||||
logger.debug("Got CW connection object OK")
|
||||
else:
|
||||
|
|
|
@ -31,7 +31,7 @@ SUPPORTED_PARAMS = ('StackName', 'TemplateBody', 'TemplateUrl',
|
|||
'SignatureVersion', 'Timestamp', 'AWSAccessKeyId',
|
||||
'Signature', 'TimeoutInMinutes',
|
||||
'LogicalResourceId', 'PhysicalResourceId', 'NextToken',
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
class V1Client(base_client.BaseClient):
|
||||
|
@ -85,7 +85,7 @@ class V1Client(base_client.BaseClient):
|
|||
'LogicalResourceId': kwargs['LogicalResourceId']}
|
||||
try:
|
||||
result = self.stack_request("DescribeStackResources", "GET",
|
||||
**parameters)
|
||||
**parameters)
|
||||
except Exception:
|
||||
logger.debug("Failed to lookup resource details with key %s:%s"
|
||||
% (lookup_key, lookup_value))
|
||||
|
@ -167,8 +167,7 @@ def get_client(host, port=None, username=None,
|
|||
tenant=tenant,
|
||||
auth_url=auth_url,
|
||||
strategy=force_strategy or auth_strategy,
|
||||
region=region,
|
||||
)
|
||||
region=region)
|
||||
|
||||
if creds['strategy'] == 'keystone' and not creds['auth_url']:
|
||||
msg = ("--auth_url option or OS_AUTH_URL environment variable "
|
||||
|
@ -176,14 +175,14 @@ def get_client(host, port=None, username=None,
|
|||
raise exception.ClientConfigurationError(msg)
|
||||
|
||||
use_ssl = (creds['auth_url'] is not None and
|
||||
creds['auth_url'].find('https') != -1)
|
||||
creds['auth_url'].find('https') != -1)
|
||||
|
||||
client = HeatClient
|
||||
|
||||
return client(host=host,
|
||||
port=port,
|
||||
use_ssl=use_ssl,
|
||||
auth_tok=auth_token,
|
||||
creds=creds,
|
||||
insecure=insecure,
|
||||
service_type='cloudformation')
|
||||
port=port,
|
||||
use_ssl=use_ssl,
|
||||
auth_tok=auth_token,
|
||||
creds=creds,
|
||||
insecure=insecure,
|
||||
service_type='cloudformation')
|
||||
|
|
|
@ -33,15 +33,14 @@ def catch_error(action):
|
|||
return SUCCESS if ret is None else ret
|
||||
except exception.NotAuthorized:
|
||||
LOG.error("Not authorized to make this request. Check " +
|
||||
"your credentials (OS_USERNAME, OS_PASSWORD, " +
|
||||
"OS_TENANT_NAME, OS_AUTH_URL and OS_AUTH_STRATEGY).")
|
||||
"your credentials (OS_USERNAME, OS_PASSWORD, " +
|
||||
"OS_TENANT_NAME, OS_AUTH_URL and OS_AUTH_STRATEGY).")
|
||||
return FAILURE
|
||||
except exception.ClientConfigurationError:
|
||||
raise
|
||||
except exception.KeystoneError, e:
|
||||
LOG.error("Keystone did not finish the authentication and "
|
||||
"returned the following message:\n\n%s"
|
||||
% e.message)
|
||||
"returned the following message:\n\n%s" % e.message)
|
||||
return FAILURE
|
||||
except Exception, e:
|
||||
options = arguments[0]
|
||||
|
|
|
@ -196,7 +196,7 @@ class KeystoneStrategy(BaseStrategy):
|
|||
region_matches = lambda e: region is None or e['region'] == region
|
||||
|
||||
endpoints = [ep for s in service_catalog if service_type_matches(s)
|
||||
for ep in s['endpoints'] if region_matches(ep)]
|
||||
for ep in s['endpoints'] if region_matches(ep)]
|
||||
|
||||
if len(endpoints) > 1:
|
||||
raise exception.RegionAmbiguity(region=region)
|
||||
|
@ -213,17 +213,14 @@ class KeystoneStrategy(BaseStrategy):
|
|||
"tenantName": creds['tenant'],
|
||||
"passwordCredentials": {
|
||||
"username": creds['username'],
|
||||
"password": creds['password']
|
||||
}
|
||||
}
|
||||
}
|
||||
"password": creds['password']}}}
|
||||
|
||||
headers = {}
|
||||
headers['Content-Type'] = 'application/json'
|
||||
req_body = json.dumps(creds)
|
||||
|
||||
resp, resp_body = self._do_request(
|
||||
token_url, 'POST', headers=headers, body=req_body)
|
||||
token_url, 'POST', headers=headers, body=req_body)
|
||||
|
||||
if resp.status == 200:
|
||||
resp_auth = json.loads(resp_body)['access']
|
||||
|
|
|
@ -208,7 +208,7 @@ class AuthProtocol(object):
|
|||
'X-Role',
|
||||
)
|
||||
LOG.debug('Removing headers from request environment: %s' %
|
||||
','.join(auth_headers))
|
||||
','.join(auth_headers))
|
||||
self._remove_headers(env, auth_headers)
|
||||
|
||||
def _get_user_token_from_header(self, env):
|
||||
|
@ -363,7 +363,7 @@ class AuthProtocol(object):
|
|||
self.admin_token = None
|
||||
else:
|
||||
LOG.error('Bad response code while validating token: %s' %
|
||||
response.status)
|
||||
response.status)
|
||||
if retry:
|
||||
LOG.info('Retrying validation')
|
||||
return self._validate_user_token(user_token, False)
|
||||
|
|
|
@ -301,21 +301,21 @@ class BaseClient(object):
|
|||
raise exception.ClientConnectionError(msg)
|
||||
|
||||
if (self.key_file is not None and
|
||||
not os.path.exists(self.key_file)):
|
||||
not os.path.exists(self.key_file)):
|
||||
msg = _("The key file you specified %s does not "
|
||||
"exist") % self.key_file
|
||||
raise exception.ClientConnectionError(msg)
|
||||
connect_kwargs['key_file'] = self.key_file
|
||||
|
||||
if (self.cert_file is not None and
|
||||
not os.path.exists(self.cert_file)):
|
||||
not os.path.exists(self.cert_file)):
|
||||
msg = _("The cert file you specified %s does not "
|
||||
"exist") % self.cert_file
|
||||
raise exception.ClientConnectionError(msg)
|
||||
connect_kwargs['cert_file'] = self.cert_file
|
||||
|
||||
if (self.ca_file is not None and
|
||||
not os.path.exists(self.ca_file)):
|
||||
not os.path.exists(self.ca_file)):
|
||||
msg = _("The CA file you specified %s does not "
|
||||
"exist") % self.ca_file
|
||||
raise exception.ClientConnectionError(msg)
|
||||
|
|
|
@ -34,80 +34,80 @@ DEFAULT_PORT = 8000
|
|||
paste_deploy_group = cfg.OptGroup('paste_deploy')
|
||||
paste_deploy_opts = [
|
||||
cfg.StrOpt('flavor'),
|
||||
cfg.StrOpt('config_file'),
|
||||
]
|
||||
cfg.StrOpt('config_file')]
|
||||
|
||||
|
||||
bind_opts = [cfg.IntOpt('bind_port', default=8000),
|
||||
cfg.StrOpt('bind_host', default='127.0.0.1')]
|
||||
bind_opts = [
|
||||
cfg.IntOpt('bind_port', default=8000),
|
||||
cfg.StrOpt('bind_host', default='127.0.0.1')]
|
||||
|
||||
service_opts = [
|
||||
cfg.IntOpt('report_interval',
|
||||
default=10,
|
||||
help='seconds between nodes reporting state to datastore'),
|
||||
cfg.IntOpt('periodic_interval',
|
||||
default=60,
|
||||
help='seconds between running periodic tasks'),
|
||||
cfg.StrOpt('ec2_listen',
|
||||
default="0.0.0.0",
|
||||
help='IP address for EC2 API to listen'),
|
||||
cfg.IntOpt('ec2_listen_port',
|
||||
default=8773,
|
||||
help='port for ec2 api to listen'),
|
||||
cfg.StrOpt('osapi_compute_listen',
|
||||
default="0.0.0.0",
|
||||
help='IP address for OpenStack API to listen'),
|
||||
cfg.IntOpt('osapi_compute_listen_port',
|
||||
default=8774,
|
||||
help='list port for osapi compute'),
|
||||
cfg.StrOpt('osapi_volume_listen',
|
||||
default="0.0.0.0",
|
||||
help='IP address for OpenStack Volume API to listen'),
|
||||
cfg.IntOpt('osapi_volume_listen_port',
|
||||
default=8776,
|
||||
help='port for os volume api to listen'),
|
||||
cfg.StrOpt('heat_metadata_server_url',
|
||||
default="",
|
||||
help='URL of the Heat metadata server'),
|
||||
cfg.StrOpt('heat_waitcondition_server_url',
|
||||
default="",
|
||||
help='URL of the Heat waitcondition server'),
|
||||
cfg.StrOpt('heat_watch_server_url',
|
||||
default="",
|
||||
help='URL of the Heat cloudwatch server'),
|
||||
cfg.StrOpt('heat_stack_user_role',
|
||||
default="heat_stack_user",
|
||||
help='Keystone role for heat template-defined users'),
|
||||
]
|
||||
cfg.IntOpt('report_interval',
|
||||
default=10,
|
||||
help='seconds between nodes reporting state to datastore'),
|
||||
cfg.IntOpt('periodic_interval',
|
||||
default=60,
|
||||
help='seconds between running periodic tasks'),
|
||||
cfg.StrOpt('ec2_listen',
|
||||
default="0.0.0.0",
|
||||
help='IP address for EC2 API to listen'),
|
||||
cfg.IntOpt('ec2_listen_port',
|
||||
default=8773,
|
||||
help='port for ec2 api to listen'),
|
||||
cfg.StrOpt('osapi_compute_listen',
|
||||
default="0.0.0.0",
|
||||
help='IP address for OpenStack API to listen'),
|
||||
cfg.IntOpt('osapi_compute_listen_port',
|
||||
default=8774,
|
||||
help='list port for osapi compute'),
|
||||
cfg.StrOpt('osapi_volume_listen',
|
||||
default="0.0.0.0",
|
||||
help='IP address for OpenStack Volume API to listen'),
|
||||
cfg.IntOpt('osapi_volume_listen_port',
|
||||
default=8776,
|
||||
help='port for os volume api to listen'),
|
||||
cfg.StrOpt('heat_metadata_server_url',
|
||||
default="",
|
||||
help='URL of the Heat metadata server'),
|
||||
cfg.StrOpt('heat_waitcondition_server_url',
|
||||
default="",
|
||||
help='URL of the Heat waitcondition server'),
|
||||
cfg.StrOpt('heat_watch_server_url',
|
||||
default="",
|
||||
help='URL of the Heat cloudwatch server'),
|
||||
cfg.StrOpt('heat_stack_user_role',
|
||||
default="heat_stack_user",
|
||||
help='Keystone role for heat template-defined users')]
|
||||
|
||||
db_opts = [
|
||||
cfg.StrOpt('sql_connection',
|
||||
default='mysql://heat:heat@localhost/heat',
|
||||
help='The SQLAlchemy connection string used to connect to the '
|
||||
'database'),
|
||||
cfg.IntOpt('sql_idle_timeout',
|
||||
default=3600,
|
||||
help='timeout before idle sql connections are reaped'),
|
||||
]
|
||||
cfg.StrOpt('sql_connection',
|
||||
default='mysql://heat:heat@localhost/heat',
|
||||
help='The SQLAlchemy connection string used to connect to the '
|
||||
'database'),
|
||||
cfg.IntOpt('sql_idle_timeout',
|
||||
default=3600,
|
||||
help='timeout before idle sql connections are reaped')]
|
||||
|
||||
engine_opts = [
|
||||
cfg.StrOpt('instance_driver',
|
||||
default='heat.engine.nova',
|
||||
help='Driver to use for controlling instances'),
|
||||
cfg.ListOpt('plugin_dirs',
|
||||
default=['/usr/lib64/heat', '/usr/lib/heat'],
|
||||
help='List of directories to search for Plugins'),
|
||||
]
|
||||
cfg.StrOpt('instance_driver',
|
||||
default='heat.engine.nova',
|
||||
help='Driver to use for controlling instances'),
|
||||
cfg.ListOpt('plugin_dirs',
|
||||
default=['/usr/lib64/heat', '/usr/lib/heat'],
|
||||
help='List of directories to search for Plugins')]
|
||||
|
||||
rpc_opts = [
|
||||
cfg.StrOpt('host',
|
||||
default=socket.gethostname(),
|
||||
help='Name of the engine node. This can be an opaque identifier.'
|
||||
'It is not necessarily a hostname, FQDN, or IP address.'),
|
||||
cfg.StrOpt('control_exchange',
|
||||
default='heat',
|
||||
help='AMQP exchange to connect to if using RabbitMQ or Qpid'),
|
||||
cfg.StrOpt('engine_topic',
|
||||
default='engine',
|
||||
help='the topic engine nodes listen on')
|
||||
]
|
||||
cfg.StrOpt('host',
|
||||
default=socket.gethostname(),
|
||||
help='Name of the engine node. '
|
||||
'This can be an opaque identifier.'
|
||||
'It is not necessarily a hostname, FQDN, or IP address.'),
|
||||
cfg.StrOpt('control_exchange',
|
||||
default='heat',
|
||||
help='AMQP exchange to connect to if using RabbitMQ or Qpid'),
|
||||
cfg.StrOpt('engine_topic',
|
||||
default='engine',
|
||||
help='the topic engine nodes listen on')]
|
||||
|
||||
|
||||
def register_api_opts():
|
||||
|
|
|
@ -114,10 +114,8 @@ def get_admin_context(read_deleted="no"):
|
|||
|
||||
class ContextMiddleware(wsgi.Middleware):
|
||||
|
||||
opts = [
|
||||
cfg.BoolOpt('owner_is_tenant', default=True),
|
||||
cfg.StrOpt('admin_role', default='admin'),
|
||||
]
|
||||
opts = [cfg.BoolOpt('owner_is_tenant', default=True),
|
||||
cfg.StrOpt('admin_role', default='admin')]
|
||||
|
||||
def __init__(self, app, conf, **local_conf):
|
||||
cfg.CONF.register_opts(self.opts)
|
||||
|
|
|
@ -73,7 +73,7 @@ class KeystoneClient(object):
|
|||
# deployed on an instance (hence are implicitly untrusted)
|
||||
roles = self.client.roles.list()
|
||||
stack_user_role = [r.id for r in roles
|
||||
if r.name == cfg.CONF.heat_stack_user_role]
|
||||
if r.name == cfg.CONF.heat_stack_user_role]
|
||||
if len(stack_user_role) == 1:
|
||||
role_id = stack_user_role[0]
|
||||
logger.debug("Adding user %s to role %s" % (user.id, role_id))
|
||||
|
|
|
@ -75,7 +75,7 @@ class HeatIdentifier(collections.Mapping):
|
|||
# Sanity check the URL
|
||||
urlp = urlparse.urlparse(url)
|
||||
if (urlp.scheme not in ('http', 'https') or
|
||||
not urlp.netloc or not urlp.path):
|
||||
not urlp.netloc or not urlp.path):
|
||||
raise ValueError('"%s" is not a valid URL' % url)
|
||||
|
||||
# Remove any query-string and extract the ARN
|
||||
|
|
|
@ -43,10 +43,10 @@ def parse(tmpl_str):
|
|||
except yaml.scanner.ScannerError as e:
|
||||
raise ValueError(e)
|
||||
else:
|
||||
if tpl == None:
|
||||
if tpl is None:
|
||||
tpl = {}
|
||||
default_for_missing(tpl, u'HeatTemplateFormatVersion',
|
||||
HEAT_VERSIONS)
|
||||
HEAT_VERSIONS)
|
||||
return tpl
|
||||
|
||||
|
||||
|
@ -73,7 +73,7 @@ def convert_json_to_yaml(json_str):
|
|||
global key_order
|
||||
# Replace AWS format version with Heat format version
|
||||
json_str = re.sub('"AWSTemplateFormatVersion"\s*:\s*"[^"]+"\s*,',
|
||||
'', json_str)
|
||||
'', json_str)
|
||||
|
||||
# insert a sortable order into the key to preserve file ordering
|
||||
key_order = 0
|
||||
|
|
|
@ -102,8 +102,8 @@ def get_socket(conf, default_port):
|
|||
# support IPv6 in getaddrinfo(). We need to get around this in the
|
||||
# future or monitor upstream for a fix
|
||||
address_family = [addr[0] for addr in socket.getaddrinfo(bind_addr[0],
|
||||
bind_addr[1], socket.AF_UNSPEC, socket.SOCK_STREAM)
|
||||
if addr[0] in (socket.AF_INET, socket.AF_INET6)][0]
|
||||
bind_addr[1], socket.AF_UNSPEC, socket.SOCK_STREAM)
|
||||
if addr[0] in (socket.AF_INET, socket.AF_INET6)][0]
|
||||
|
||||
conf.register_opts(socket_opts)
|
||||
|
||||
|
@ -239,10 +239,11 @@ class Server(object):
|
|||
eventlet.patcher.monkey_patch(all=False, socket=True)
|
||||
self.pool = eventlet.GreenPool(size=self.threads)
|
||||
try:
|
||||
eventlet_wsgi_server(self.sock, self.application,
|
||||
custom_pool=self.pool,
|
||||
url_length_limit=URL_LENGTH_LIMIT,
|
||||
log=WritableLogger(self.logger))
|
||||
eventlet_wsgi_server(self.sock,
|
||||
self.application,
|
||||
custom_pool=self.pool,
|
||||
url_length_limit=URL_LENGTH_LIMIT,
|
||||
log=WritableLogger(self.logger))
|
||||
except socket.error, err:
|
||||
if err[0] != errno.EINVAL:
|
||||
raise
|
||||
|
|
|
@ -34,8 +34,7 @@ SQL_IDLE_TIMEOUT = 3600
|
|||
db_opts = [
|
||||
cfg.StrOpt('db_backend',
|
||||
default='sqlalchemy',
|
||||
help='The backend to use for db'),
|
||||
]
|
||||
help='The backend to use for db')]
|
||||
|
||||
IMPL = utils.LazyPluggable('db_backend',
|
||||
sqlalchemy='heat.db.sqlalchemy.api')
|
||||
|
|
|
@ -69,8 +69,8 @@ def resource_get(context, resource_id):
|
|||
|
||||
def resource_get_by_name_and_stack(context, resource_name, stack_id):
|
||||
result = model_query(context, models.Resource).\
|
||||
filter_by(name=resource_name).\
|
||||
filter_by(stack_id=stack_id).first()
|
||||
filter_by(name=resource_name).\
|
||||
filter_by(stack_id=stack_id).first()
|
||||
|
||||
return result
|
||||
|
||||
|
@ -80,7 +80,7 @@ def resource_get_by_physical_resource_id(context, physical_resource_id):
|
|||
.filter_by(nova_instance=physical_resource_id)
|
||||
.first())
|
||||
if (result is not None and context is not None and
|
||||
result.stack.tenant != context.tenant_id):
|
||||
result.stack.tenant != context.tenant_id):
|
||||
return None
|
||||
return result
|
||||
|
||||
|
@ -103,7 +103,7 @@ def resource_create(context, values):
|
|||
|
||||
def resource_get_all_by_stack(context, stack_id):
|
||||
results = model_query(context, models.Resource).\
|
||||
filter_by(stack_id=stack_id).all()
|
||||
filter_by(stack_id=stack_id).all()
|
||||
|
||||
if not results:
|
||||
raise NotFound("no resources for stack_id %s were found" % stack_id)
|
||||
|
@ -113,9 +113,9 @@ def resource_get_all_by_stack(context, stack_id):
|
|||
|
||||
def stack_get_by_name(context, stack_name, owner_id=None):
|
||||
query = model_query(context, models.Stack).\
|
||||
filter_by(tenant=context.tenant_id).\
|
||||
filter_by(name=stack_name).\
|
||||
filter_by(owner_id=owner_id)
|
||||
filter_by(tenant=context.tenant_id).\
|
||||
filter_by(name=stack_name).\
|
||||
filter_by(owner_id=owner_id)
|
||||
|
||||
return query.first()
|
||||
|
||||
|
@ -129,7 +129,7 @@ def stack_get(context, stack_id, admin=False):
|
|||
return result
|
||||
|
||||
if (result is not None and context is not None and
|
||||
result.tenant != context.tenant_id):
|
||||
result.tenant != context.tenant_id):
|
||||
return None
|
||||
|
||||
return result
|
||||
|
@ -137,14 +137,14 @@ def stack_get(context, stack_id, admin=False):
|
|||
|
||||
def stack_get_all(context):
|
||||
results = model_query(context, models.Stack).\
|
||||
filter_by(owner_id=None).all()
|
||||
filter_by(owner_id=None).all()
|
||||
return results
|
||||
|
||||
|
||||
def stack_get_all_by_tenant(context):
|
||||
results = model_query(context, models.Stack).\
|
||||
filter_by(owner_id=None).\
|
||||
filter_by(tenant=context.tenant_id).all()
|
||||
filter_by(owner_id=None).\
|
||||
filter_by(tenant=context.tenant_id).all()
|
||||
return results
|
||||
|
||||
|
||||
|
@ -160,7 +160,7 @@ def stack_update(context, stack_id, values):
|
|||
|
||||
if not stack:
|
||||
raise NotFound('Attempt to update a stack with id: %s %s' %
|
||||
(stack_id, 'that does not exist'))
|
||||
(stack_id, 'that does not exist'))
|
||||
|
||||
old_template_id = stack.raw_template_id
|
||||
|
||||
|
@ -180,7 +180,7 @@ def stack_delete(context, stack_id):
|
|||
s = stack_get(context, stack_id)
|
||||
if not s:
|
||||
raise NotFound('Attempt to delete a stack with id: %s %s' %
|
||||
(stack_id, 'that does not exist'))
|
||||
(stack_id, 'that does not exist'))
|
||||
|
||||
session = Session.object_session(s)
|
||||
|
||||
|
@ -236,18 +236,18 @@ def event_get_all(context):
|
|||
|
||||
def event_get_all_by_tenant(context):
|
||||
stacks = model_query(context, models.Stack).\
|
||||
filter_by(tenant=context.tenant_id).all()
|
||||
filter_by(tenant=context.tenant_id).all()
|
||||
results = []
|
||||
for stack in stacks:
|
||||
results.extend(model_query(context, models.Event).
|
||||
filter_by(stack_id=stack.id).all())
|
||||
filter_by(stack_id=stack.id).all())
|
||||
|
||||
return results
|
||||
|
||||
|
||||
def event_get_all_by_stack(context, stack_id):
|
||||
results = model_query(context, models.Event).\
|
||||
filter_by(stack_id=stack_id).all()
|
||||
filter_by(stack_id=stack_id).all()
|
||||
|
||||
return results
|
||||
|
||||
|
@ -261,13 +261,13 @@ def event_create(context, values):
|
|||
|
||||
def watch_rule_get(context, watch_rule_id):
|
||||
result = model_query(context, models.WatchRule).\
|
||||
filter_by(id=watch_rule_id).first()
|
||||
filter_by(id=watch_rule_id).first()
|
||||
return result
|
||||
|
||||
|
||||
def watch_rule_get_by_name(context, watch_rule_name):
|
||||
result = model_query(context, models.WatchRule).\
|
||||
filter_by(name=watch_rule_name).first()
|
||||
filter_by(name=watch_rule_name).first()
|
||||
return result
|
||||
|
||||
|
||||
|
@ -278,7 +278,7 @@ def watch_rule_get_all(context):
|
|||
|
||||
def watch_rule_get_all_by_stack(context, stack_id):
|
||||
results = model_query(context, models.WatchRule).\
|
||||
filter_by(stack_id=stack_id).all()
|
||||
filter_by(stack_id=stack_id).all()
|
||||
return results
|
||||
|
||||
|
||||
|
@ -294,7 +294,7 @@ def watch_rule_update(context, watch_id, values):
|
|||
|
||||
if not wr:
|
||||
raise NotFound('Attempt to update a watch with id: %s %s' %
|
||||
(watch_id, 'that does not exist'))
|
||||
(watch_id, 'that does not exist'))
|
||||
|
||||
wr.update(values)
|
||||
wr.save(_session(context))
|
||||
|
@ -302,11 +302,11 @@ def watch_rule_update(context, watch_id, values):
|
|||
|
||||
def watch_rule_delete(context, watch_name):
|
||||
wr = model_query(context, models.WatchRule).\
|
||||
filter_by(name=watch_name).first()
|
||||
filter_by(name=watch_name).first()
|
||||
|
||||
if not wr:
|
||||
raise NotFound('Attempt to delete a watch_rule with name: %s %s' %
|
||||
(watch_name, 'that does not exist'))
|
||||
(watch_name, 'that does not exist'))
|
||||
|
||||
session = Session.object_session(wr)
|
||||
|
||||
|
@ -331,11 +331,11 @@ def watch_data_get_all(context):
|
|||
|
||||
def watch_data_delete(context, watch_name):
|
||||
ds = model_query(context, models.WatchRule).\
|
||||
filter_by(name=watch_name).all()
|
||||
filter_by(name=watch_name).all()
|
||||
|
||||
if not ds:
|
||||
raise NotFound('Attempt to delete watch_data with name: %s %s' %
|
||||
(watch_name, 'that does not exist'))
|
||||
(watch_name, 'that does not exist'))
|
||||
|
||||
session = Session.object_session(ds)
|
||||
for d in ds:
|
||||
|
|
|
@ -19,9 +19,12 @@ def upgrade(migrate_engine):
|
|||
Column('id', Integer, primary_key=True),
|
||||
Column('created_at', DateTime(timezone=False)),
|
||||
Column('updated_at', DateTime(timezone=False)),
|
||||
Column('name', String(length=255, convert_unicode=False,
|
||||
assert_unicode=None,
|
||||
unicode_error=None, _warn_on_bytestring=False)),
|
||||
Column('name', String(
|
||||
length=255,
|
||||
convert_unicode=False,
|
||||
assert_unicode=None,
|
||||
unicode_error=None,
|
||||
_warn_on_bytestring=False)),
|
||||
Column('raw_template_id', Integer, ForeignKey("raw_template.id"),
|
||||
nullable=False),
|
||||
)
|
||||
|
@ -32,27 +35,37 @@ def upgrade(migrate_engine):
|
|||
Column('stack_id', Integer, ForeignKey("stack.id"), nullable=False),
|
||||
Column('created_at', DateTime(timezone=False)),
|
||||
Column('updated_at', DateTime(timezone=False)),
|
||||
Column('name', String(length=255, convert_unicode=False,
|
||||
assert_unicode=None,
|
||||
unicode_error=None, _warn_on_bytestring=False)),
|
||||
Column('name', String(
|
||||
length=255,
|
||||
convert_unicode=False,
|
||||
assert_unicode=None,
|
||||
unicode_error=None,
|
||||
_warn_on_bytestring=False)),
|
||||
)
|
||||
|
||||
resource = Table(
|
||||
'resource', meta,
|
||||
Column('id', Integer, primary_key=True),
|
||||
Column('nova_instance', String(length=255, convert_unicode=False,
|
||||
assert_unicode=None,
|
||||
unicode_error=None, _warn_on_bytestring=False)),
|
||||
Column('name', String(length=255, convert_unicode=False,
|
||||
assert_unicode=None,
|
||||
unicode_error=None, _warn_on_bytestring=False)),
|
||||
|
||||
Column('nova_instance', String(
|
||||
length=255,
|
||||
convert_unicode=False,
|
||||
assert_unicode=None,
|
||||
unicode_error=None,
|
||||
_warn_on_bytestring=False)),
|
||||
Column('name', String(
|
||||
length=255,
|
||||
convert_unicode=False,
|
||||
assert_unicode=None,
|
||||
unicode_error=None,
|
||||
_warn_on_bytestring=False)),
|
||||
Column('created_at', DateTime(timezone=False)),
|
||||
Column('updated_at', DateTime(timezone=False)),
|
||||
Column('state', String(length=255, convert_unicode=False,
|
||||
assert_unicode=None,
|
||||
unicode_error=None,
|
||||
_warn_on_bytestring=False)),
|
||||
Column('state', String(
|
||||
length=255,
|
||||
convert_unicode=False,
|
||||
assert_unicode=None,
|
||||
unicode_error=None,
|
||||
_warn_on_bytestring=False)),
|
||||
Column('state_description', String(length=255, convert_unicode=False,
|
||||
assert_unicode=None,
|
||||
unicode_error=None,
|
||||
|
|
|
@ -11,9 +11,11 @@ def upgrade(migrate_engine):
|
|||
Column('id', Integer, primary_key=True),
|
||||
Column('created_at', DateTime(timezone=False)),
|
||||
Column('updated_at', DateTime(timezone=False)),
|
||||
Column('stack_name', String(length=255, convert_unicode=False,
|
||||
assert_unicode=None,
|
||||
unicode_error=None, _warn_on_bytestring=False)),
|
||||
Column('stack_name', String(length=255,
|
||||
convert_unicode=False,
|
||||
assert_unicode=None,
|
||||
unicode_error=None,
|
||||
_warn_on_bytestring=False)),
|
||||
Column('name', String(length=255, convert_unicode=False,
|
||||
assert_unicode=None,
|
||||
unicode_error=None, _warn_on_bytestring=False)),
|
||||
|
|
|
@ -33,9 +33,9 @@ def upgrade(migrate_engine):
|
|||
Column('auth_url', Text()),
|
||||
Column('aws_auth_url', Text()),
|
||||
Column('tenant_id', String(length=256, convert_unicode=False,
|
||||
assert_unicode=None,
|
||||
unicode_error=None,
|
||||
_warn_on_bytestring=False)),
|
||||
assert_unicode=None,
|
||||
unicode_error=None,
|
||||
_warn_on_bytestring=False)),
|
||||
Column('aws_creds', Text())
|
||||
)
|
||||
|
||||
|
|
|
@ -16,18 +16,21 @@ def upgrade(migrate_engine):
|
|||
fkeys = list(event.c.stack_id.foreign_keys)
|
||||
if fkeys:
|
||||
fkey_name = fkeys[0].constraint.name
|
||||
ForeignKeyConstraint(columns=[event.c.stack_id],
|
||||
refcolumns=[stack.c.id],
|
||||
name=fkey_name).drop()
|
||||
ForeignKeyConstraint(
|
||||
columns=[event.c.stack_id],
|
||||
refcolumns=[stack.c.id],
|
||||
name=fkey_name).drop()
|
||||
|
||||
fkeys = list(resource.c.stack_id.foreign_keys)
|
||||
if fkeys:
|
||||
fkey_name = fkeys[0].constraint.name
|
||||
ForeignKeyConstraint(columns=[resource.c.stack_id],
|
||||
refcolumns=[stack.c.id],
|
||||
name=fkey_name).drop()
|
||||
ForeignKeyConstraint(
|
||||
columns=[resource.c.stack_id],
|
||||
refcolumns=[stack.c.id],
|
||||
name=fkey_name).drop()
|
||||
|
||||
stack.c.id.alter(String(36), primary_key=True,
|
||||
stack.c.id.alter(
|
||||
String(36), primary_key=True,
|
||||
default=uuidutils.generate_uuid)
|
||||
event.c.stack_id.alter(String(36), nullable=False)
|
||||
resource.c.stack_id.alter(String(36), nullable=False)
|
||||
|
@ -35,16 +38,18 @@ def upgrade(migrate_engine):
|
|||
fkeys = list(event.c.stack_id.foreign_keys)
|
||||
if fkeys:
|
||||
fkey_name = fkeys[0].constraint.name
|
||||
ForeignKeyConstraint(columns=[event.c.stack_id],
|
||||
refcolumns=[stack.c.id],
|
||||
name=fkey_name).create()
|
||||
ForeignKeyConstraint(
|
||||
columns=[event.c.stack_id],
|
||||
refcolumns=[stack.c.id],
|
||||
name=fkey_name).create()
|
||||
|
||||
fkeys = list(resource.c.stack_id.foreign_keys)
|
||||
if fkeys:
|
||||
fkey_name = fkeys[0].constraint.name
|
||||
ForeignKeyConstraint(columns=[resource.c.stack_id],
|
||||
refcolumns=[stack.c.id],
|
||||
name=fkey_name).create()
|
||||
ForeignKeyConstraint(
|
||||
columns=[resource.c.stack_id],
|
||||
refcolumns=[stack.c.id],
|
||||
name=fkey_name).create()
|
||||
|
||||
|
||||
def downgrade(migrate_engine):
|
||||
|
@ -63,18 +68,21 @@ def downgrade(migrate_engine):
|
|||
fkeys = list(event.c.stack_id.foreign_keys)
|
||||
if fkeys:
|
||||
fkey_name = fkeys[0].constraint.name
|
||||
ForeignKeyConstraint(columns=[event.c.stack_id],
|
||||
refcolumns=[stack.c.id],
|
||||
name=fkey_name).drop()
|
||||
ForeignKeyConstraint(
|
||||
columns=[event.c.stack_id],
|
||||
refcolumns=[stack.c.id],
|
||||
name=fkey_name).drop()
|
||||
|
||||
fkeys = list(resource.c.stack_id.foreign_keys)
|
||||
if fkeys:
|
||||
fkey_name = fkeys[0].constraint.name
|
||||
ForeignKeyConstraint(columns=[resource.c.stack_id],
|
||||
refcolumns=[stack.c.id],
|
||||
name=fkey_name).drop()
|
||||
ForeignKeyConstraint(
|
||||
columns=[resource.c.stack_id],
|
||||
refcolumns=[stack.c.id],
|
||||
name=fkey_name).drop()
|
||||
|
||||
stack.c.id.alter(Integer, primary_key=True,
|
||||
stack.c.id.alter(
|
||||
Integer, primary_key=True,
|
||||
default=utils.generate_uuid)
|
||||
event.c.stack_id.alter(Integer, nullable=False)
|
||||
resource.c.stack_id.alter(Integer, nullable=False)
|
||||
|
@ -82,13 +90,15 @@ def downgrade(migrate_engine):
|
|||
fkeys = list(event.c.stack_id.foreign_keys)
|
||||
if fkeys:
|
||||
fkey_name = fkeys[0].constraint.name
|
||||
ForeignKeyConstraint(columns=[event.c.stack_id],
|
||||
refcolumns=[stack.c.id],
|
||||
name=fkey_name).create()
|
||||
ForeignKeyConstraint(
|
||||
columns=[event.c.stack_id],
|
||||
refcolumns=[stack.c.id],
|
||||
name=fkey_name).create()
|
||||
|
||||
fkeys = list(resource.c.stack_id.foreign_keys)
|
||||
if fkeys:
|
||||
fkey_name = fkeys[0].constraint.name
|
||||
ForeignKeyConstraint(columns=[resource.c.stack_id],
|
||||
refcolumns=[stack.c.id],
|
||||
name=fkey_name).create()
|
||||
ForeignKeyConstraint(
|
||||
columns=[resource.c.stack_id],
|
||||
refcolumns=[stack.c.id],
|
||||
name=fkey_name).create()
|
||||
|
|
|
@ -13,18 +13,20 @@ def upgrade(migrate_engine):
|
|||
fkeys = list(stack.c.owner_id.foreign_keys)
|
||||
if fkeys:
|
||||
fkey_name = fkeys[0].constraint.name
|
||||
ForeignKeyConstraint(columns=[stack.c.owner_id],
|
||||
refcolumns=[stack.c.id],
|
||||
name=fkey_name).drop()
|
||||
ForeignKeyConstraint(columns=[
|
||||
stack.c.owner_id],
|
||||
refcolumns=[stack.c.id],
|
||||
name=fkey_name).drop()
|
||||
|
||||
stack.c.owner_id.alter(String(36), nullable=True)
|
||||
|
||||
fkeys = list(stack.c.owner_id.foreign_keys)
|
||||
if fkeys:
|
||||
fkey_name = fkeys[0].constraint.name
|
||||
ForeignKeyConstraint(columns=[stack.c.owner_id],
|
||||
refcolumns=[stack.c.id],
|
||||
name=fkey_name).create()
|
||||
ForeignKeyConstraint(
|
||||
columns=[stack.c.owner_id],
|
||||
refcolumns=[stack.c.id],
|
||||
name=fkey_name).create()
|
||||
|
||||
|
||||
def downgrade(migrate_engine):
|
||||
|
@ -41,22 +43,25 @@ def downgrade(migrate_engine):
|
|||
fkeys = list(stack.c.owner_id.foreign_keys)
|
||||
if fkeys:
|
||||
fkey_name = fkeys[0].constraint.name
|
||||
ForeignKeyConstraint(columns=[stack.c.owner_id],
|
||||
refcolumns=[stack.c.id],
|
||||
name=fkey_name).drop()
|
||||
ForeignKeyConstraint(
|
||||
columns=[stack.c.owner_id],
|
||||
refcolumns=[stack.c.id],
|
||||
name=fkey_name).drop()
|
||||
|
||||
stack.c.owner_id.alter(Integer, nullable=True)
|
||||
|
||||
fkeys = list(event.c.stack_id.foreign_keys)
|
||||
if fkeys:
|
||||
fkey_name = fkeys[0].constraint.name
|
||||
ForeignKeyConstraint(columns=[event.c.stack_id],
|
||||
refcolumns=[stack.c.id],
|
||||
name=fkey_name).create()
|
||||
ForeignKeyConstraint(
|
||||
columns=[event.c.stack_id],
|
||||
refcolumns=[stack.c.id],
|
||||
name=fkey_name).create()
|
||||
|
||||
fkeys = list(stack.c.owner_id.foreign_keys)
|
||||
if fkeys:
|
||||
fkey_name = fkeys[0].constraint.name
|
||||
ForeignKeyConstraint(columns=[stack.c.owner_id],
|
||||
refcolumns=[stack.c.id],
|
||||
name=fkey_name).create()
|
||||
ForeignKeyConstraint(
|
||||
columns=[stack.c.owner_id],
|
||||
refcolumns=[stack.c.id],
|
||||
name=fkey_name).create()
|
||||
|
|
|
@ -9,7 +9,7 @@ def upgrade(migrate_engine):
|
|||
watch_rule = Table('watch_rule', meta, autoload=True)
|
||||
|
||||
Column('stack_id', String(length=36), ForeignKey("stack.id"),
|
||||
nullable=False).create(watch_rule)
|
||||
nullable=False).create(watch_rule)
|
||||
|
||||
watch_rule.c.stack_name.drop()
|
||||
|
||||
|
|
|
@ -43,8 +43,8 @@ def patched_with_engine(f, *a, **kw):
|
|||
# on that version or higher, this can be removed
|
||||
MIN_PKG_VERSION = dist_version.StrictVersion('0.7.3')
|
||||
if (not hasattr(migrate, '__version__') or
|
||||
dist_version.StrictVersion(migrate.__version__) < MIN_PKG_VERSION):
|
||||
migrate_util.with_engine = patched_with_engine
|
||||
dist_version.StrictVersion(migrate.__version__) < MIN_PKG_VERSION):
|
||||
migrate_util.with_engine = patched_with_engine
|
||||
|
||||
|
||||
# NOTE(jkoelker) Delay importing migrate until we are patched
|
||||
|
@ -90,7 +90,7 @@ def db_version():
|
|||
meta.reflect(bind=engine)
|
||||
try:
|
||||
for table in ('stack', 'resource', 'event',
|
||||
'parsed_template', 'raw_template'):
|
||||
'parsed_template', 'raw_template'):
|
||||
assert table in meta.tables
|
||||
return db_version_control(1)
|
||||
except AssertionError:
|
||||
|
|
|
@ -144,20 +144,22 @@ class Stack(BASE, HeatBase):
|
|||
|
||||
__tablename__ = 'stack'
|
||||
|
||||
id = Column(String, primary_key=True,
|
||||
default=uuidutils.generate_uuid)
|
||||
id = Column(String, primary_key=True, default=uuidutils.generate_uuid)
|
||||
name = Column(String)
|
||||
raw_template_id = Column(Integer, ForeignKey('raw_template.id'),
|
||||
nullable=False)
|
||||
raw_template = relationship(RawTemplate,
|
||||
backref=backref('stack'))
|
||||
raw_template_id = Column(
|
||||
Integer,
|
||||
ForeignKey('raw_template.id'),
|
||||
nullable=False)
|
||||
raw_template = relationship(RawTemplate, backref=backref('stack'))
|
||||
username = Column(String)
|
||||
tenant = Column(String)
|
||||
status = Column('status', String)
|
||||
status_reason = Column('status_reason', String)
|
||||
parameters = Column('parameters', Json)
|
||||
user_creds_id = Column(Integer, ForeignKey('user_creds.id'),
|
||||
nullable=False)
|
||||
user_creds_id = Column(
|
||||
Integer,
|
||||
ForeignKey('user_creds.id'),
|
||||
nullable=False)
|
||||
owner_id = Column(Integer, nullable=True)
|
||||
timeout = Column(Integer)
|
||||
|
||||
|
@ -180,8 +182,7 @@ class UserCreds(BASE, HeatBase):
|
|||
aws_auth_url = Column(String)
|
||||
tenant_id = Column(String)
|
||||
aws_creds = Column(String)
|
||||
stack = relationship(Stack,
|
||||
backref=backref('user_creds'))
|
||||
stack = relationship(Stack, backref=backref('user_creds'))
|
||||
|
||||
|
||||
class Event(BASE, HeatBase):
|
||||
|
@ -190,10 +191,8 @@ class Event(BASE, HeatBase):
|
|||
__tablename__ = 'event'
|
||||
|
||||
id = Column(Integer, primary_key=True)
|
||||
stack_id = Column(String, ForeignKey('stack.id'),
|
||||
nullable=False)
|
||||
stack = relationship(Stack,
|
||||
backref=backref('events'))
|
||||
stack_id = Column(String, ForeignKey('stack.id'), nullable=False)
|
||||
stack = relationship(Stack, backref=backref('events'))
|
||||
|
||||
name = Column(String)
|
||||
logical_resource_id = Column(String)
|
||||
|
@ -216,8 +215,7 @@ class Resource(BASE, HeatBase):
|
|||
# odd name as "metadata" is reserved
|
||||
rsrc_metadata = Column('rsrc_metadata', Json)
|
||||
|
||||
stack_id = Column(String, ForeignKey('stack.id'),
|
||||
nullable=False)
|
||||
stack_id = Column(String, ForeignKey('stack.id'), nullable=False)
|
||||
stack = relationship(Stack, backref=backref('resources'))
|
||||
|
||||
|
||||
|
@ -232,8 +230,7 @@ class WatchRule(BASE, HeatBase):
|
|||
state = Column('state', String)
|
||||
last_evaluated = Column(DateTime, default=timeutils.utcnow)
|
||||
|
||||
stack_id = Column(String, ForeignKey('stack.id'),
|
||||
nullable=False)
|
||||
stack_id = Column(String, ForeignKey('stack.id'), nullable=False)
|
||||
stack = relationship(Stack, backref=backref('watch_rule'))
|
||||
|
||||
|
||||
|
@ -245,6 +242,8 @@ class WatchData(BASE, HeatBase):
|
|||
id = Column(Integer, primary_key=True)
|
||||
data = Column('data', Json)
|
||||
|
||||
watch_rule_id = Column(Integer, ForeignKey('watch_rule.id'),
|
||||
nullable=False)
|
||||
watch_rule_id = Column(
|
||||
Integer,
|
||||
ForeignKey('watch_rule.id'),
|
||||
nullable=False)
|
||||
watch_rule = relationship(WatchRule, backref=backref('watch_data'))
|
||||
|
|
|
@ -87,9 +87,10 @@ def get_engine():
|
|||
|
||||
def get_maker(engine, autocommit=True, expire_on_commit=False):
|
||||
"""Return a SQLAlchemy sessionmaker using the given engine."""
|
||||
ses = sqlalchemy.orm.sessionmaker(bind=engine,
|
||||
autocommit=autocommit,
|
||||
expire_on_commit=expire_on_commit)
|
||||
ses = sqlalchemy.orm.sessionmaker(
|
||||
bind=engine,
|
||||
autocommit=autocommit,
|
||||
expire_on_commit=expire_on_commit)
|
||||
return sqlalchemy.orm.scoped_session(ses)
|
||||
|
||||
|
||||
|
|
|
@ -145,7 +145,7 @@ def format_watch(watch):
|
|||
WATCH_STATE_REASON: watch.rule.get(RULE_STATE_REASON),
|
||||
WATCH_STATE_REASON_DATA: watch.rule.get(RULE_STATE_REASON_DATA),
|
||||
WATCH_STATE_UPDATED_TIME: timeutils.isotime(
|
||||
watch.rule.get(RULE_STATE_UPDATED_TIME)),
|
||||
watch.rule.get(RULE_STATE_UPDATED_TIME)),
|
||||
WATCH_STATE_VALUE: watch.state,
|
||||
WATCH_STATISTIC: watch.rule.get(RULE_STATISTIC),
|
||||
WATCH_THRESHOLD: watch.rule.get(RULE_THRESHOLD),
|
||||
|
|
|
@ -116,7 +116,7 @@ class Clients(object):
|
|||
# Lookup endpoint for object-store service type
|
||||
service_type = 'object-store'
|
||||
endpoints = self.keystone().service_catalog.get_endpoints(
|
||||
service_type=service_type)
|
||||
service_type=service_type)
|
||||
if len(endpoints[service_type]) == 1:
|
||||
args['preauthurl'] = endpoints[service_type][0]['publicURL']
|
||||
else:
|
||||
|
@ -155,7 +155,7 @@ class Clients(object):
|
|||
args['token'] = con.auth_token
|
||||
else:
|
||||
logger.error("Quantum connection failed, "
|
||||
"no password or auth_token!")
|
||||
"no password or auth_token!")
|
||||
return None
|
||||
logger.debug('quantum args %s', args)
|
||||
|
||||
|
|
|
@ -262,8 +262,8 @@ class Parameters(collections.Mapping):
|
|||
Map the supplied filter function onto each Parameter (with an
|
||||
optional filter function) and return the resulting dictionary.
|
||||
'''
|
||||
return dict((n, func(p)) for n, p in self.params.iteritems()
|
||||
if filter_func(p))
|
||||
return dict((n, func(p))
|
||||
for n, p in self.params.iteritems() if filter_func(p))
|
||||
|
||||
def user_parameters(self):
|
||||
'''
|
||||
|
|
|
@ -277,7 +277,7 @@ class Stack(object):
|
|||
for res in reversed(self):
|
||||
if not res.name in newstack.keys():
|
||||
logger.debug("resource %s not found in updated stack"
|
||||
% res.name + " definition, deleting")
|
||||
% res.name + " definition, deleting")
|
||||
result = res.destroy()
|
||||
if result:
|
||||
failures.append('Resource %s delete failed'
|
||||
|
@ -289,7 +289,7 @@ class Stack(object):
|
|||
for res in newstack:
|
||||
if not res.name in self.keys():
|
||||
logger.debug("resource %s not found in current stack"
|
||||
% res.name + " definition, adding")
|
||||
% res.name + " definition, adding")
|
||||
res.stack = self
|
||||
self[res.name] = res
|
||||
result = self[res.name].create()
|
||||
|
@ -313,8 +313,8 @@ class Stack(object):
|
|||
# Currently all resource have a default handle_update method
|
||||
# which returns "requires replacement" (res.UPDATE_REPLACE)
|
||||
for res in newstack:
|
||||
if self.resolve_runtime_data(
|
||||
self[res.name].t) != self.resolve_runtime_data(res.t):
|
||||
if self.resolve_runtime_data(self[res.name].t) !=\
|
||||
self.resolve_runtime_data(res.t):
|
||||
|
||||
# Can fail if underlying resource class does not
|
||||
# implement update logic or update requires replacement
|
||||
|
@ -337,7 +337,7 @@ class Stack(object):
|
|||
% res.name)
|
||||
else:
|
||||
logger.warning("Cannot update resource %s," %
|
||||
res.name + " reason %s" % retval)
|
||||
res.name + " reason %s" % retval)
|
||||
failures.append('Resource %s update failed'
|
||||
% res.name)
|
||||
|
||||
|
|
|
@ -29,8 +29,8 @@ class Property(object):
|
|||
for key in self.schema:
|
||||
assert key in SCHEMA_KEYS, 'Unknown schema key "%s"' % key
|
||||
|
||||
assert self.type() in SCHEMA_TYPES, \
|
||||
'Unknown property type "%s"' % self.type()
|
||||
assert self.type() in SCHEMA_TYPES,\
|
||||
'Unknown property type "%s"' % self.type()
|
||||
|
||||
def required(self):
|
||||
return self.schema.get(REQUIRED, False)
|
||||
|
@ -103,7 +103,7 @@ class Property(object):
|
|||
|
||||
def _validate_list(self, value):
|
||||
if (not isinstance(value, collections.Sequence) or
|
||||
isinstance(value, basestring)):
|
||||
isinstance(value, basestring)):
|
||||
raise TypeError('"%s" is not a list' % repr(value))
|
||||
|
||||
for v in value:
|
||||
|
|
|
@ -45,7 +45,7 @@ def _register_class(resource_type, resource_class):
|
|||
logger.info(_('Registering resource type %s') % resource_type)
|
||||
if resource_type in _resource_classes:
|
||||
logger.warning(_('Replacing existing resource type %s') %
|
||||
resource_type)
|
||||
resource_type)
|
||||
|
||||
_resource_classes[resource_type] = resource_class
|
||||
|
||||
|
|
|
@ -28,28 +28,49 @@ class CloudWatchAlarm(resource.Resource):
|
|||
'AllowedValues': ['GreaterThanOrEqualToThreshold',
|
||||
'GreaterThanThreshold', 'LessThanThreshold',
|
||||
'LessThanOrEqualToThreshold']},
|
||||
'AlarmDescription': {'Type': 'String'},
|
||||
'EvaluationPeriods': {'Type': 'String'},
|
||||
'MetricName': {'Type': 'String'},
|
||||
'Namespace': {'Type': 'String'},
|
||||
'Period': {'Type': 'String'},
|
||||
'Statistic': {'Type': 'String',
|
||||
'AllowedValues': ['SampleCount', 'Average', 'Sum',
|
||||
'Minimum', 'Maximum']},
|
||||
'AlarmActions': {'Type': 'List'},
|
||||
'OKActions': {'Type': 'List'},
|
||||
'Dimensions': {'Type': 'List'},
|
||||
'InsufficientDataActions': {'Type': 'List'},
|
||||
'Threshold': {'Type': 'String'},
|
||||
'Units': {'Type': 'String',
|
||||
'AllowedValues': ['Seconds', 'Microseconds', 'Milliseconds',
|
||||
'Bytes', 'Kilobytes', 'Megabytes', 'Gigabytes',
|
||||
'Terabytes', 'Bits', 'Kilobits', 'Megabits', 'Gigabits',
|
||||
'Terabits', 'Percent', 'Count', 'Bytes/Second',
|
||||
'Kilobytes/Second', 'Megabytes/Second', 'Gigabytes/Second',
|
||||
'Terabytes/Second', 'Bits/Second', 'Kilobits/Second',
|
||||
'Megabits/Second', 'Gigabits/Second', 'Terabits/Second',
|
||||
'Count/Second', None]}}
|
||||
'AlarmDescription': {'Type': 'String'},
|
||||
'EvaluationPeriods': {'Type': 'String'},
|
||||
'MetricName': {'Type': 'String'},
|
||||
'Namespace': {'Type': 'String'},
|
||||
'Period': {'Type': 'String'},
|
||||
'Statistic': {'Type': 'String',
|
||||
'AllowedValues': ['SampleCount',
|
||||
'Average',
|
||||
'Sum',
|
||||
'Minimum',
|
||||
'Maximum']},
|
||||
'AlarmActions': {'Type': 'List'},
|
||||
'OKActions': {'Type': 'List'},
|
||||
'Dimensions': {'Type': 'List'},
|
||||
'InsufficientDataActions': {'Type': 'List'},
|
||||
'Threshold': {'Type': 'String'},
|
||||
'Units': {'Type': 'String',
|
||||
'AllowedValues': ['Seconds',
|
||||
'Microseconds',
|
||||
'Milliseconds',
|
||||
'Bytes',
|
||||
'Kilobytes',
|
||||
'Megabytes',
|
||||
'Gigabytes',
|
||||
'Terabytes',
|
||||
'Bits',
|
||||
'Kilobits',
|
||||
'Megabits',
|
||||
'Gigabits',
|
||||
'Terabits',
|
||||
'Percent',
|
||||
'Count',
|
||||
'Bytes/Second',
|
||||
'Kilobytes/Second',
|
||||
'Megabytes/Second',
|
||||
'Gigabytes/Second',
|
||||
'Terabytes/Second',
|
||||
'Bits/Second',
|
||||
'Kilobits/Second',
|
||||
'Megabits/Second',
|
||||
'Gigabits/Second',
|
||||
'Terabits/Second',
|
||||
'Count/Second', None]}}
|
||||
|
||||
strict_dependency = False
|
||||
|
||||
|
|
|
@ -51,7 +51,7 @@ class Restarter(resource.Resource):
|
|||
|
||||
if victim is None:
|
||||
logger.info('%s Alarm, can not find instance %s' %
|
||||
(self.name, self.properties['InstanceId']))
|
||||
(self.name, self.properties['InstanceId']))
|
||||
return
|
||||
|
||||
logger.info('%s Alarm, restarting resource: %s' %
|
||||
|
@ -67,9 +67,9 @@ class Instance(resource.Resource):
|
|||
'Required': True}}
|
||||
|
||||
properties_schema = {'ImageId': {'Type': 'String',
|
||||
'Required': True},
|
||||
'Required': True},
|
||||
'InstanceType': {'Type': 'String',
|
||||
'Required': True},
|
||||
'Required': True},
|
||||
'KeyName': {'Type': 'String',
|
||||
'Required': True},
|
||||
'AvailabilityZone': {'Type': 'String',
|
||||
|
@ -92,7 +92,7 @@ class Instance(resource.Resource):
|
|||
'SourceDestCheck': {'Type': 'Boolean',
|
||||
'Implemented': False},
|
||||
'SubnetId': {'Type': 'String',
|
||||
'Implemented': False},
|
||||
'Implemented': False},
|
||||
'Tags': {'Type': 'List',
|
||||
'Schema': {'Type': 'Map',
|
||||
'Schema': tags_schema}},
|
||||
|
@ -210,11 +210,11 @@ class Instance(resource.Resource):
|
|||
return self.mime_string
|
||||
|
||||
def handle_create(self):
|
||||
if self.properties.get('SecurityGroups') == None:
|
||||
if self.properties.get('SecurityGroups') is None:
|
||||
security_groups = None
|
||||
else:
|
||||
security_groups = [self.physical_resource_name_find(sg) for sg in
|
||||
self.properties.get('SecurityGroups')]
|
||||
security_groups = [self.physical_resource_name_find(sg)
|
||||
for sg in self.properties.get('SecurityGroups')]
|
||||
|
||||
userdata = self.properties['UserData'] or ''
|
||||
userdata += '\ntouch /var/lib/cloud/instance/provision-finished\n'
|
||||
|
@ -286,7 +286,7 @@ class Instance(resource.Resource):
|
|||
if res:
|
||||
return res
|
||||
|
||||
#check validity of key
|
||||
# check validity of key
|
||||
try:
|
||||
key_name = self.properties['KeyName']
|
||||
except ValueError:
|
||||
|
|
|
@ -178,7 +178,7 @@ class LoadBalancer(stack.Stack):
|
|||
'Timeout': {'Type': 'Number',
|
||||
'Required': True},
|
||||
'UnhealthyThreshold': {'Type': 'Number',
|
||||
'Required': True},
|
||||
'Required': True},
|
||||
}
|
||||
|
||||
properties_schema = {
|
||||
|
|
|
@ -22,12 +22,11 @@ logger = logging.getLogger(__name__)
|
|||
|
||||
class FloatingIP(quantum.QuantumResource):
|
||||
properties_schema = {'floating_network_id': {'Type': 'String',
|
||||
'Required': True},
|
||||
'value_specs': {'Type': 'Map',
|
||||
'Default': {}},
|
||||
'port_id': {'Type': 'String'},
|
||||
'fixed_ip_address': {'Type': 'String'},
|
||||
}
|
||||
'Required': True},
|
||||
'value_specs': {'Type': 'Map',
|
||||
'Default': {}},
|
||||
'port_id': {'Type': 'String'},
|
||||
'fixed_ip_address': {'Type': 'String'}}
|
||||
|
||||
def handle_create(self):
|
||||
props = self.prepare_properties(self.properties, self.name)
|
||||
|
@ -47,11 +46,10 @@ class FloatingIP(quantum.QuantumResource):
|
|||
|
||||
class FloatingIPAssociation(quantum.QuantumResource):
|
||||
properties_schema = {'floatingip_id': {'Type': 'String',
|
||||
'Required': True},
|
||||
'port_id': {'Type': 'String',
|
||||
'Required': True},
|
||||
'fixed_ip_address': {'Type': 'String'}
|
||||
}
|
||||
'Required': True},
|
||||
'port_id': {'Type': 'String',
|
||||
'Required': True},
|
||||
'fixed_ip_address': {'Type': 'String'}}
|
||||
|
||||
def __init__(self, name, json_snippet, stack):
|
||||
super(FloatingIPAssociation, self).__init__(name, json_snippet, stack)
|
||||
|
@ -69,7 +67,7 @@ class FloatingIPAssociation(quantum.QuantumResource):
|
|||
client = self.quantum()
|
||||
(floatingip_id, port_id) = self.resource_id.split(':')
|
||||
client.update_floatingip(floatingip_id,
|
||||
{'floatingip': {'port_id': None}})
|
||||
{'floatingip': {'port_id': None}})
|
||||
|
||||
|
||||
def resource_mapping():
|
||||
|
|
|
@ -22,11 +22,10 @@ logger = logging.getLogger(__name__)
|
|||
|
||||
class Net(quantum.QuantumResource):
|
||||
properties_schema = {'name': {'Type': 'String'},
|
||||
'value_specs': {'Type': 'Map',
|
||||
'Default': {}},
|
||||
'admin_state_up': {'Default': True,
|
||||
'Type': 'Boolean'},
|
||||
}
|
||||
'value_specs': {'Type': 'Map',
|
||||
'Default': {}},
|
||||
'admin_state_up': {'Default': True,
|
||||
'Type': 'Boolean'}}
|
||||
|
||||
def __init__(self, name, json_snippet, stack):
|
||||
super(Net, self).__init__(name, json_snippet, stack)
|
||||
|
|
|
@ -23,23 +23,22 @@ logger = logging.getLogger(__name__)
|
|||
class Port(quantum.QuantumResource):
|
||||
|
||||
fixed_ip_schema = {'subnet_id': {'Type': 'String',
|
||||
'Required': True},
|
||||
'ip_address': {'Type': 'String',
|
||||
'Required': True}}
|
||||
'Required': True},
|
||||
'ip_address': {'Type': 'String',
|
||||
'Required': True}}
|
||||
|
||||
properties_schema = {'network_id': {'Type': 'String',
|
||||
'Required': True},
|
||||
'name': {'Type': 'String'},
|
||||
'value_specs': {'Type': 'Map',
|
||||
'Default': {}},
|
||||
'admin_state_up': {'Default': True,
|
||||
'Type': 'Boolean'},
|
||||
'fixed_ips': {'Type': 'List',
|
||||
'Schema': {'Type': 'Map',
|
||||
'Schema': fixed_ip_schema}},
|
||||
'mac_address': {'Type': 'String'},
|
||||
'device_id': {'Type': 'String'},
|
||||
}
|
||||
'Required': True},
|
||||
'name': {'Type': 'String'},
|
||||
'value_specs': {'Type': 'Map',
|
||||
'Default': {}},
|
||||
'admin_state_up': {'Default': True,
|
||||
'Type': 'Boolean'},
|
||||
'fixed_ips': {'Type': 'List',
|
||||
'Schema': {'Type': 'Map',
|
||||
'Schema': fixed_ip_schema}},
|
||||
'mac_address': {'Type': 'String'},
|
||||
'device_id': {'Type': 'String'}}
|
||||
|
||||
def __init__(self, name, json_snippet, stack):
|
||||
super(Port, self).__init__(name, json_snippet, stack)
|
||||
|
|
|
@ -61,7 +61,7 @@ class QuantumResource(resource.Resource):
|
|||
values.
|
||||
'''
|
||||
props = dict((k, v) for k, v in properties.items()
|
||||
if v is not None and k != 'value_specs')
|
||||
if v is not None and k != 'value_specs')
|
||||
|
||||
if 'name' in properties.keys():
|
||||
props.setdefault('name', name)
|
||||
|
@ -82,8 +82,7 @@ class QuantumResource(resource.Resource):
|
|||
if key in attributes.keys():
|
||||
return attributes[key]
|
||||
|
||||
raise exception.InvalidTemplateAttribute(resource=name,
|
||||
key=key)
|
||||
raise exception.InvalidTemplateAttribute(resource=name, key=key)
|
||||
|
||||
def handle_update(self):
|
||||
return self.UPDATE_REPLACE
|
||||
|
|
|
@ -23,11 +23,10 @@ logger = logging.getLogger(__name__)
|
|||
|
||||
class Router(quantum.QuantumResource):
|
||||
properties_schema = {'name': {'Type': 'String'},
|
||||
'value_specs': {'Type': 'Map',
|
||||
'Default': {}},
|
||||
'admin_state_up': {'Type': 'Boolean',
|
||||
'Default': True},
|
||||
}
|
||||
'value_specs': {'Type': 'Map',
|
||||
'Default': {}},
|
||||
'admin_state_up': {'Type': 'Boolean',
|
||||
'Default': True}}
|
||||
|
||||
def __init__(self, name, json_snippet, stack):
|
||||
super(Router, self).__init__(name, json_snippet, stack)
|
||||
|
@ -49,10 +48,9 @@ class Router(quantum.QuantumResource):
|
|||
|
||||
class RouterInterface(quantum.QuantumResource):
|
||||
properties_schema = {'router_id': {'Type': 'String',
|
||||
'Required': True},
|
||||
'subnet_id': {'Type': 'String',
|
||||
'Required': True},
|
||||
}
|
||||
'Required': True},
|
||||
'subnet_id': {'Type': 'String',
|
||||
'Required': True}}
|
||||
|
||||
def __init__(self, name, json_snippet, stack):
|
||||
super(RouterInterface, self).__init__(name, json_snippet, stack)
|
||||
|
@ -61,22 +59,21 @@ class RouterInterface(quantum.QuantumResource):
|
|||
router_id = self.properties.get('router_id')
|
||||
subnet_id = self.properties.get('subnet_id')
|
||||
self.quantum().add_interface_router(router_id,
|
||||
{'subnet_id': subnet_id})
|
||||
{'subnet_id': subnet_id})
|
||||
self.resource_id_set('%s:%s' % (router_id, subnet_id))
|
||||
|
||||
def handle_delete(self):
|
||||
client = self.quantum()
|
||||
(router_id, subnet_id) = self.resource_id.split(':')
|
||||
client.remove_interface_router(router_id,
|
||||
{'subnet_id': subnet_id})
|
||||
{'subnet_id': subnet_id})
|
||||
|
||||
|
||||
class RouterGateway(quantum.QuantumResource):
|
||||
properties_schema = {'router_id': {'Type': 'String',
|
||||
'Required': True},
|
||||
'network_id': {'Type': 'String',
|
||||
'Required': True},
|
||||
}
|
||||
'Required': True},
|
||||
'network_id': {'Type': 'String',
|
||||
'Required': True}}
|
||||
|
||||
def __init__(self, name, json_snippet, stack):
|
||||
super(RouterGateway, self).__init__(name, json_snippet, stack)
|
||||
|
@ -85,7 +82,7 @@ class RouterGateway(quantum.QuantumResource):
|
|||
router_id = self.properties.get('router_id')
|
||||
network_id = self.properties.get('network_id')
|
||||
self.quantum().add_gateway_router(router_id,
|
||||
{'network_id': network_id})
|
||||
{'network_id': network_id})
|
||||
self.resource_id_set('%s:%s' % (router_id, network_id))
|
||||
|
||||
def handle_delete(self):
|
||||
|
|
|
@ -23,29 +23,28 @@ logger = logging.getLogger(__name__)
|
|||
class Subnet(quantum.QuantumResource):
|
||||
|
||||
allocation_schema = {'start': {'Type': 'String',
|
||||
'Required': True},
|
||||
'end': {'Type': 'String',
|
||||
'Required': True}}
|
||||
'Required': True},
|
||||
'end': {'Type': 'String',
|
||||
'Required': True}}
|
||||
|
||||
properties_schema = {'network_id': {'Type': 'String',
|
||||
'Required': True},
|
||||
'cidr': {'Type': 'String',
|
||||
'Required': True},
|
||||
'value_specs': {'Type': 'Map',
|
||||
'Default': {}},
|
||||
'name': {'Type': 'String'},
|
||||
'admin_state_up': {'Default': True,
|
||||
'Type': 'Boolean'},
|
||||
'ip_version': {'Type': 'Integer',
|
||||
'AllowedValues': [4, 6],
|
||||
'Default': 4},
|
||||
'gateway_ip': {'Type': 'String'},
|
||||
'allocation_pools': {'Type': 'List',
|
||||
'Schema': {
|
||||
'Type': 'Map',
|
||||
'Schema': allocation_schema
|
||||
}}
|
||||
}
|
||||
'Required': True},
|
||||
'cidr': {'Type': 'String',
|
||||
'Required': True},
|
||||
'value_specs': {'Type': 'Map',
|
||||
'Default': {}},
|
||||
'name': {'Type': 'String'},
|
||||
'admin_state_up': {'Default': True,
|
||||
'Type': 'Boolean'},
|
||||
'ip_version': {'Type': 'Integer',
|
||||
'AllowedValues': [4, 6],
|
||||
'Default': 4},
|
||||
'gateway_ip': {'Type': 'String'},
|
||||
'allocation_pools': {'Type': 'List',
|
||||
'Schema': {
|
||||
'Type': 'Map',
|
||||
'Schema': allocation_schema
|
||||
}}}
|
||||
|
||||
def __init__(self, name, json_snippet, stack):
|
||||
super(Subnet, self).__init__(name, json_snippet, stack)
|
||||
|
|
|
@ -29,19 +29,18 @@ class S3Bucket(resource.Resource):
|
|||
website_schema = {'IndexDocument': {'Type': 'String'},
|
||||
'ErrorDocument': {'Type': 'String'}}
|
||||
properties_schema = {'AccessControl': {
|
||||
'Type': 'String',
|
||||
'AllowedValues': ['Private',
|
||||
'PublicRead',
|
||||
'PublicReadWrite',
|
||||
'AuthenticatedRead',
|
||||
'BucketOwnerRead',
|
||||
'BucketOwnerFullControl']},
|
||||
'DeletionPolicy': {
|
||||
'Type': 'String',
|
||||
'AllowedValues': ['Delete',
|
||||
'Retain']},
|
||||
'WebsiteConfiguration': {'Type': 'Map',
|
||||
'Schema': website_schema}}
|
||||
'Type': 'String',
|
||||
'AllowedValues': ['Private',
|
||||
'PublicRead',
|
||||
'PublicReadWrite',
|
||||
'AuthenticatedRead',
|
||||
'BucketOwnerRead',
|
||||
'BucketOwnerFullControl']},
|
||||
'DeletionPolicy': {
|
||||
'Type': 'String',
|
||||
'AllowedValues': ['Delete', 'Retain']},
|
||||
'WebsiteConfiguration': {'Type': 'Map',
|
||||
'Schema': website_schema}}
|
||||
|
||||
def __init__(self, name, json_snippet, stack):
|
||||
super(S3Bucket, self).__init__(name, json_snippet, stack)
|
||||
|
@ -63,7 +62,7 @@ class S3Bucket(resource.Resource):
|
|||
def handle_create(self):
|
||||
"""Create a bucket."""
|
||||
container = S3Bucket._create_container_name(
|
||||
self.physical_resource_name())
|
||||
self.physical_resource_name())
|
||||
headers = {}
|
||||
logger.debug('S3Bucket create container %s with headers %s' %
|
||||
(container, headers))
|
||||
|
@ -116,7 +115,7 @@ class S3Bucket(resource.Resource):
|
|||
return parsed[1].split(':')[0]
|
||||
elif key == 'WebsiteURL':
|
||||
return '%s://%s%s/%s' % (parsed[0], parsed[1], parsed[2],
|
||||
self.resource_id)
|
||||
self.resource_id)
|
||||
else:
|
||||
raise exception.InvalidTemplateAttribute(resource=self.name,
|
||||
key=key)
|
||||
|
|
|
@ -28,7 +28,7 @@ class SecurityGroup(resource.Resource):
|
|||
'Implemented': False},
|
||||
'SecurityGroupIngress': {'Type': 'List'},
|
||||
'SecurityGroupEgress': {'Type': 'List',
|
||||
'Implemented': False}}
|
||||
'Implemented': False}}
|
||||
|
||||
def __init__(self, name, json_snippet, stack):
|
||||
super(SecurityGroup, self).__init__(name, json_snippet, stack)
|
||||
|
@ -44,8 +44,8 @@ class SecurityGroup(resource.Resource):
|
|||
|
||||
if not sec:
|
||||
sec = self.nova().security_groups.create(
|
||||
self.physical_resource_name(),
|
||||
self.properties['GroupDescription'])
|
||||
self.physical_resource_name(),
|
||||
self.properties['GroupDescription'])
|
||||
|
||||
self.resource_id_set(sec.id)
|
||||
if self.properties['SecurityGroupIngress']:
|
||||
|
|
|
@ -67,7 +67,7 @@ class Stack(resource.Resource):
|
|||
stack = self.nested()
|
||||
if op not in stack.outputs:
|
||||
raise exception.InvalidTemplateAttribute(
|
||||
resource=self.physical_resource_name(), key=key)
|
||||
resource=self.physical_resource_name(), key=key)
|
||||
|
||||
return stack.output(op)
|
||||
|
||||
|
@ -102,7 +102,7 @@ class NestedStack(Stack):
|
|||
def FnGetAtt(self, key):
|
||||
if not key.startswith('Outputs.'):
|
||||
raise exception.InvalidTemplateAttribute(
|
||||
resource=self.physical_resource_name(), key=key)
|
||||
resource=self.physical_resource_name(), key=key)
|
||||
|
||||
prefix, dot, op = key.partition('.')
|
||||
return self.get_output(op)
|
||||
|
|
|
@ -33,7 +33,7 @@ class User(resource.Resource):
|
|||
'LoginProfile': {'Type': 'Map',
|
||||
'Schema': {
|
||||
'Password': {'Type': 'String'}
|
||||
}},
|
||||
}},
|
||||
'Policies': {'Type': 'List'}}
|
||||
|
||||
def __init__(self, name, json_snippet, stack):
|
||||
|
@ -42,11 +42,11 @@ class User(resource.Resource):
|
|||
def handle_create(self):
|
||||
passwd = ''
|
||||
if self.properties['LoginProfile'] and \
|
||||
'Password' in self.properties['LoginProfile']:
|
||||
passwd = self.properties['LoginProfile']['Password']
|
||||
'Password' in self.properties['LoginProfile']:
|
||||
passwd = self.properties['LoginProfile']['Password']
|
||||
|
||||
uid = self.keystone().create_stack_user(self.physical_resource_name(),
|
||||
passwd)
|
||||
passwd)
|
||||
self.resource_id_set(uid)
|
||||
|
||||
def handle_update(self):
|
||||
|
@ -64,7 +64,7 @@ class User(resource.Resource):
|
|||
def FnGetAtt(self, key):
|
||||
#TODO Implement Arn attribute
|
||||
raise exception.InvalidTemplateAttribute(
|
||||
resource=self.physical_resource_name(), key=key)
|
||||
resource=self.physical_resource_name(), key=key)
|
||||
|
||||
|
||||
class AccessKey(resource.Resource):
|
||||
|
@ -159,7 +159,7 @@ class AccessKey(resource.Resource):
|
|||
log_res = "<SANITIZED>"
|
||||
else:
|
||||
raise exception.InvalidTemplateAttribute(
|
||||
resource=self.physical_resource_name(), key=key)
|
||||
resource=self.physical_resource_name(), key=key)
|
||||
|
||||
logger.info('%s.GetAtt(%s) == %s' % (self.physical_resource_name(),
|
||||
key, log_res))
|
||||
|
|
|
@ -34,9 +34,10 @@ class Volume(resource.Resource):
|
|||
super(Volume, self).__init__(name, json_snippet, stack)
|
||||
|
||||
def handle_create(self):
|
||||
vol = self.nova('volume').volumes.create(self.properties['Size'],
|
||||
display_name=self.physical_resource_name(),
|
||||
display_description=self.physical_resource_name())
|
||||
vol = self.nova('volume').volumes.create(
|
||||
self.properties['Size'],
|
||||
display_name=self.physical_resource_name(),
|
||||
display_description=self.physical_resource_name())
|
||||
|
||||
while vol.status == 'creating':
|
||||
eventlet.sleep(1)
|
||||
|
@ -119,7 +120,7 @@ class VolumeAttachment(resource.Resource):
|
|||
vol.get()
|
||||
except clients.novaclient.exceptions.NotFound as e:
|
||||
logger.warning('Deleting VolumeAttachment %s %s - not found' %
|
||||
(server_id, volume_id))
|
||||
(server_id, volume_id))
|
||||
|
||||
|
||||
def resource_mapping():
|
||||
|
|
|
@ -22,10 +22,10 @@ logger = logging.getLogger(__name__)
|
|||
class VPC(resource.Resource):
|
||||
properties_schema = {'CidrBlock': {'Type': 'String'},
|
||||
'InstanceTenancy': {'Type': 'String',
|
||||
'AllowedValues': ['default', 'dedicated'],
|
||||
'Default': 'default',
|
||||
'Implemented': False}
|
||||
}
|
||||
'AllowedValues': ['default',
|
||||
'dedicated'],
|
||||
'Default': 'default',
|
||||
'Implemented': False}}
|
||||
|
||||
def __init__(self, name, json_snippet, stack):
|
||||
super(VPC, self).__init__(name, json_snippet, stack)
|
||||
|
|
|
@ -85,7 +85,7 @@ class WaitConditionHandle(resource.Resource):
|
|||
def handle_create(self):
|
||||
# Create a keystone user so we can create a signed URL via FnGetRefId
|
||||
user_id = self.keystone().create_stack_user(
|
||||
self.physical_resource_name())
|
||||
self.physical_resource_name())
|
||||
kp = self.keystone().get_ec2_keypair(user_id)
|
||||
if not kp:
|
||||
raise exception.Error("Error creating ec2 keypair for user %s" %
|
||||
|
@ -129,8 +129,8 @@ class WaitCondition(resource.Resource):
|
|||
properties_schema = {'Handle': {'Type': 'String',
|
||||
'Required': True},
|
||||
'Timeout': {'Type': 'Number',
|
||||
'Required': True,
|
||||
'MinValue': '1'},
|
||||
'Required': True,
|
||||
'MinValue': '1'},
|
||||
'Count': {'Type': 'Number',
|
||||
'MinValue': '1'}}
|
||||
|
||||
|
|
|
@ -168,7 +168,7 @@ class EngineService(service.Service):
|
|||
for s in stacks:
|
||||
try:
|
||||
stack = parser.Stack.load(context, stack=s,
|
||||
resolve_data=False)
|
||||
resolve_data=False)
|
||||
except exception.NotFound:
|
||||
# The stack may have been deleted between listing
|
||||
# and formatting
|
||||
|
@ -382,8 +382,8 @@ class EngineService(service.Service):
|
|||
if stack_identity is not None:
|
||||
s = self._get_stack(context, stack_identity)
|
||||
else:
|
||||
rs = db_api.resource_get_by_physical_resource_id(context,
|
||||
physical_resource_id)
|
||||
rs = db_api.resource_get_by_physical_resource_id(
|
||||
context, physical_resource_id)
|
||||
if not rs:
|
||||
msg = "The specified PhysicalResourceId doesn't exist"
|
||||
raise AttributeError(msg)
|
||||
|
@ -400,8 +400,8 @@ class EngineService(service.Service):
|
|||
name_match = lambda r: True
|
||||
|
||||
return [api.format_stack_resource(resource)
|
||||
for resource in stack if resource.id is not None and
|
||||
name_match(resource)]
|
||||
for resource in stack
|
||||
if resource.id is not None and name_match(resource)]
|
||||
|
||||
@request_context
|
||||
def list_stack_resources(self, context, stack_identity):
|
||||
|
@ -443,7 +443,7 @@ class EngineService(service.Service):
|
|||
stack = db_api.stack_get(admin_context, sid, admin=True)
|
||||
if not stack:
|
||||
logger.error("Unable to retrieve stack %s for periodic task" %
|
||||
sid)
|
||||
sid)
|
||||
return
|
||||
user_creds = db_api.user_creds_get(stack.user_creds_id)
|
||||
stack_context = context.RequestContext.from_dict(user_creds)
|
||||
|
@ -502,7 +502,7 @@ class EngineService(service.Service):
|
|||
# DB API and schema does not yet allow us to easily query by
|
||||
# namespace/metric, but we will want this at some point
|
||||
# for now, the API can query all metric data and filter locally
|
||||
if namespace != None or metric_name != None:
|
||||
if namespace is not None or metric_name is not None:
|
||||
logger.error("Filtering by namespace/metric not yet supported")
|
||||
return
|
||||
|
||||
|
|
|
@ -63,13 +63,13 @@ class WatchRule(object):
|
|||
'''
|
||||
Load the watchrule object, either by name or via an existing DB object
|
||||
'''
|
||||
if watch == None:
|
||||
if watch is None:
|
||||
try:
|
||||
watch = db_api.watch_rule_get_by_name(context, watch_name)
|
||||
except Exception as ex:
|
||||
logger.warn('WatchRule.load (%s) db error %s' %
|
||||
(watch_name, str(ex)))
|
||||
if watch == None:
|
||||
if watch is None:
|
||||
raise AttributeError('Unknown watch name %s' % watch_name)
|
||||
else:
|
||||
return cls(context=context,
|
||||
|
@ -237,7 +237,7 @@ class WatchRule(object):
|
|||
else:
|
||||
s = db_api.stack_get(self.context, self.stack_id)
|
||||
if s and s.status in (parser.Stack.CREATE_COMPLETE,
|
||||
parser.Stack.UPDATE_COMPLETE):
|
||||
parser.Stack.UPDATE_COMPLETE):
|
||||
stack = parser.Stack.load(self.context, stack=s)
|
||||
for a in self.rule[self.ACTION_MAP[new_state]]:
|
||||
greenpool.spawn_n(stack[a].alarm)
|
||||
|
@ -274,7 +274,7 @@ class WatchRule(object):
|
|||
if state != self.state:
|
||||
if self.rule_action(state):
|
||||
logger.debug("Overriding state %s for watch %s with %s" %
|
||||
(self.state, self.name, state))
|
||||
(self.state, self.name, state))
|
||||
else:
|
||||
logger.warning("Unable to override state %s for watch %s" %
|
||||
(self.state, self.name))
|
||||
(self.state, self.name))
|
||||
|
|
|
@ -54,7 +54,7 @@ class BaseParser(object):
|
|||
|
||||
value = value.strip()
|
||||
if ((value and value[0] == value[-1]) and
|
||||
(value[0] == "\"" or value[0] == "'")):
|
||||
(value[0] == "\"" or value[0] == "'")):
|
||||
value = value[1:-1]
|
||||
return key.strip(), [value]
|
||||
|
||||
|
|
|
@ -258,7 +258,7 @@ class JSONFormatter(logging.Formatter):
|
|||
class PublishErrorsHandler(logging.Handler):
|
||||
def emit(self, record):
|
||||
if ('heat.openstack.common.notifier.log_notifier' in
|
||||
CONF.notification_driver):
|
||||
CONF.notification_driver):
|
||||
return
|
||||
notifier.api.notify(None, 'error.publisher',
|
||||
'error_notification',
|
||||
|
@ -418,7 +418,7 @@ class LegacyFormatter(logging.Formatter):
|
|||
self._fmt = CONF.logging_default_format_string
|
||||
|
||||
if (record.levelno == logging.DEBUG and
|
||||
CONF.logging_debug_format_suffix):
|
||||
CONF.logging_debug_format_suffix):
|
||||
self._fmt += " " + CONF.logging_debug_format_suffix
|
||||
|
||||
# Cache this on the record, Logger will respect our formated copy
|
||||
|
|
|
@ -19,9 +19,10 @@ from heat.openstack.common import importutils
|
|||
from heat.openstack.common import log as logging
|
||||
|
||||
|
||||
list_notifier_drivers_opt = cfg.MultiStrOpt('list_notifier_drivers',
|
||||
default=['heat.openstack.common.notifier.no_op_notifier'],
|
||||
help='List of drivers to send notifications')
|
||||
list_notifier_drivers_opt = cfg.MultiStrOpt(
|
||||
'list_notifier_drivers',
|
||||
default=['heat.openstack.common.notifier.no_op_notifier'],
|
||||
help='List of drivers to send notifications')
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.register_opt(list_notifier_drivers_opt)
|
||||
|
|
|
@ -52,8 +52,8 @@ class EngineClient(heat.openstack.common.rpc.proxy.RpcProxy):
|
|||
|
||||
def __init__(self):
|
||||
super(EngineClient, self).__init__(
|
||||
topic=FLAGS.engine_topic,
|
||||
default_version=self.BASE_RPC_API_VERSION)
|
||||
topic=FLAGS.engine_topic,
|
||||
default_version=self.BASE_RPC_API_VERSION)
|
||||
|
||||
def identify_stack(self, ctxt, stack_name):
|
||||
"""
|
||||
|
@ -160,9 +160,10 @@ class EngineClient(heat.openstack.common.rpc.proxy.RpcProxy):
|
|||
:param params: Params passed from API.
|
||||
"""
|
||||
rpc_method = self.cast if cast else self.call
|
||||
return rpc_method(ctxt, self.make_msg('delete_stack',
|
||||
stack_identity=stack_identity),
|
||||
topic=_engine_topic(self.topic, ctxt, None))
|
||||
return rpc_method(ctxt,
|
||||
self.make_msg('delete_stack',
|
||||
stack_identity=stack_identity),
|
||||
topic=_engine_topic(self.topic, ctxt, None))
|
||||
|
||||
def list_resource_types(self, ctxt):
|
||||
"""
|
||||
|
|
|
@ -122,19 +122,18 @@ class _Win32Colorizer(object):
|
|||
def __init__(self, stream):
|
||||
import win32console as win
|
||||
red, green, blue, bold = (win.FOREGROUND_RED, win.FOREGROUND_GREEN,
|
||||
win.FOREGROUND_BLUE, win.FOREGROUND_INTENSITY)
|
||||
win.FOREGROUND_BLUE, win.FOREGROUND_INTENSITY
|
||||
)
|
||||
self.stream = stream
|
||||
self.screenBuffer = win.GetStdHandle(win.STD_OUT_HANDLE)
|
||||
self._colors = {
|
||||
'normal': red | green | blue,
|
||||
'red': red | bold,
|
||||
'green': green | bold,
|
||||
'blue': blue | bold,
|
||||
'yellow': red | green | bold,
|
||||
'magenta': red | blue | bold,
|
||||
'cyan': green | blue | bold,
|
||||
'white': red | green | blue | bold
|
||||
}
|
||||
self._colors = {'normal': red | green | blue,
|
||||
'red': red | bold,
|
||||
'green': green | bold,
|
||||
'blue': blue | bold,
|
||||
'yellow': red | green | bold,
|
||||
'magenta': red | blue | bold,
|
||||
'cyan': green | blue | bold,
|
||||
'white': red | green | blue | bold}
|
||||
|
||||
def supported(cls, stream=sys.stdout):
|
||||
try:
|
||||
|
|
|
@ -143,8 +143,8 @@ class Fedora(Distro):
|
|||
|
||||
|
||||
def get_distro():
|
||||
if os.path.exists('/etc/fedora-release') or \
|
||||
os.path.exists('/etc/redhat-release'):
|
||||
if (os.path.exists('/etc/fedora-release') or
|
||||
os.path.exists('/etc/redhat-release')):
|
||||
return Fedora()
|
||||
else:
|
||||
return Distro()
|
||||
|
@ -228,8 +228,9 @@ def parse_args():
|
|||
"""Parse command-line arguments"""
|
||||
parser = optparse.OptionParser()
|
||||
parser.add_option("-n", "--no-site-packages", dest="no_site_packages",
|
||||
default=False, action="store_true",
|
||||
help="Do not inherit packages from global Python install")
|
||||
default=False, action="store_true",
|
||||
help=
|
||||
"Do not inherit packages from global Python install")
|
||||
return parser.parse_args()
|
||||
|
||||
|
||||
|
|
Loading…
Reference in New Issue