Retire stackforge/rubick

This commit is contained in:
Monty Taylor 2015-10-17 16:04:41 -04:00
parent 58a0832ac3
commit 011cc58b5b
112 changed files with 7 additions and 39804 deletions

21
.gitignore vendored
View File

@ -1,21 +0,0 @@
# Python stuff
*.pyc
.idea
ProcfileHonchoLocal
.venv
.tox
tags
.testrepository
# Vim stuff
.swp
.swo
.*.swp
.*.swo
.ropeproject
# Vagrant state
.vagrant
vagrant_ansible_inventory_dev
dev.out.txt
web.out.txt

View File

@ -1,4 +0,0 @@
[gerrit]
host=review.openstack.org
port=29418
project=stackforge/rubick.git

View File

@ -1,8 +0,0 @@
[DEFAULT]
test_command=OS_STDOUT_CAPTURE=${OS_STDOUT_CAPTURE:-1} \
OS_STDERR_CAPTURE=${OS_STDERR_CAPTURE:-1} \
OS_TEST_TIMEOUT=${OS_TEST_TIMEOUT:-60} \
${PYTHON:-python} -m subunit.run discover -t ./ ./ $LISTOPT $IDOPTION
test_id_option=--load-list $IDFILE
test_list_option=--list

View File

@ -1,32 +0,0 @@
# Rubick
Rubick is a tool to analyze OpenStack installation for possible problems. It is
a library that provides a representation of the OpenStack configuration and
inspection/validation/analysis actions on that representation.
## Config representation
The first step to create representation of OpenStack architecture and
configuration is a collection of data from an installation of the platform.
There are several ways to collect those data, including automated discovery from
different sources. The most simple way is to parse pre-populated directory
structure that contain configuration files of OpenStack services from different
nodes in a cluster.
With more complicated discovery engines, it is possible that those files are
collected automatically via SSH based on inspection of process list at every
node listed in hypervisor inventory of OpenStack Compute service, and even more
complicated scenarios. However, that is a scope of specialized discovery service
which Rubick is not at the moment.
The next step is to organize all the colleced data into single data structure,
called OpenStack configration model. This is an object model that includes
physical nodes of the cluster, OpenStack services and their instances,
configuration parameters, etc. See detailed description of the proposed model in
the documentation.
## Config analysis
Once the OpenStack configuration model is created, it could be used to validate
the correctness of static OpenStack settings, as well as the dynamic state of
OpenStack cluster.

7
README.rst Normal file
View File

@ -0,0 +1,7 @@
This project is no longer maintained.
The contents of this repository are still available in the Git source code
management system. To see the contents of this repository before it reached
its end of life, please check out the previous commit with
"git checkout HEAD^1".

21
Vagrantfile vendored
View File

@ -1,21 +0,0 @@
# -*- mode: ruby -*-
# vi: set ft=ruby :
Vagrant.configure("2") do |config|
config.vm.define "web" do |web|
web.vm.box = "ubuntu12.04-server-amd64"
web.vm.box_url = "http://goo.gl/8kWkm"
web.vm.network "forwarded_port", guest: 8008, host: 8008, host_ip: '0.0.0.0'
web.vm.provider "virtualbox" do |vb|
vb.customize ["modifyvm", :id, "--memory", "1024"]
vb.customize ["modifyvm", :id, "--cpus", "1"]
end
web.vm.provision :chef_solo do |chef|
chef.log_level = :debug
chef.cookbooks_path = ["vagrant/cookbooks"]
chef.add_recipe "rubick"
end
end
end

View File

@ -1,136 +0,0 @@
# Copyright (c) 2014 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and#
# limitations under the License.
import argparse
import json
from itertools import groupby
import logging
import sys
from rubick.common import MarkedIssue, Inspection
from rubick.discovery import OpenstackDiscovery
import rubick.inspections # noqa
import rubick.schemas # noqa
from rubick.json import openstack_for_json
def indent_prefix(indent=0):
s = ''
if indent > 0:
for i in range(0, indent):
s += ' '
return s
def print_issue(issue, indent=0):
prefix = indent_prefix(indent)
if hasattr(issue, 'mark'):
print(
'%s[%s] %s (line %d column %d)' %
(prefix, issue.type, issue.message,
issue.mark.line + 1, issue.mark.column + 1))
else:
print('%s[%s] %s' % (prefix, issue.type, issue.message))
def print_issues(issues, indent=0):
issue_source_f = lambda i: i.mark.source if isinstance(
i, MarkedIssue) else None
source_groupped_issues = groupby(
sorted(issues, key=issue_source_f), key=issue_source_f)
for source, issues in source_groupped_issues:
if source:
print('%sFile %s' % (indent_prefix(indent), source))
for issue in sorted(issues, key=lambda i: i.mark.line):
print_issue(issue, indent + 1)
else:
for issue in issues:
print_issue(issue, indent)
def print_service(service):
print(' ' + service.name)
print_issues(service.issues, indent=3)
def print_path(path):
print(' ' + path.path)
print_issues(path.all_issues, indent=3)
def print_host(host):
print(host)
print_issues(host.issues, indent=1)
print(' Services:')
for service in sorted(host.components, key=lambda c: c.name):
print_service(service)
print(' Filesystem:')
for path in sorted(host.filesystem.values(), key=lambda f: f.path):
print_path(path)
def print_openstack(openstack):
print_issues(openstack.issues)
for host in openstack.hosts:
print_host(host)
def parse_args(argv):
parser = argparse.ArgumentParser()
parser.add_argument('-l', '--loglevel', default='INFO',
help='Loglevel to use')
parser.add_argument('-j', '--json', dest='json', default=False,
action='store_true',
help='Output result in JSON format')
args = parser.parse_args(argv[1:])
return args
def main(argv):
args = parse_args(argv)
params = vars(args)
logging.basicConfig(level=logging.WARNING)
logging.getLogger('rubick').setLevel(params['loglevel'])
discovery = OpenstackDiscovery()
try:
with open('test_rsa') as f:
private_key = f.read()
except Exception:
private_key = sys.stdin.read()
openstack = discovery.discover(
['172.18.65.179'],
private_key=private_key)
all_inspections = Inspection.all_inspections()
for inspection in all_inspections:
x = inspection()
x.inspect(openstack)
if params['json']:
print(json.dumps(openstack_for_json(openstack)))
else:
print_openstack(openstack)
if __name__ == '__main__':
main(sys.argv)

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.1 MiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 35 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 13 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 44 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 50 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 22 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 214 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 8.6 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 25 KiB

View File

@ -1,132 +0,0 @@
digraph "classes_Rubick" {
charset="utf-8"
rankdir=BT
"4" [shape="record", label="{KeystoneEndpointsInspection|name : str\ldescription\l|inspect()\l}"];
"6" [shape="record", label="{SimpleNodeDiscovery|logger : NoneType, RootLogger\l|test_connection()\ldiscover()\l}"];
"7" [shape="record", label="{SshShell|\l|}"];
"8" [shape="record", label="{NodeClient|use_sudo\lshell\llogger : NoneType, RootLogger\l|open()\lrun()\l}"];
"9" [shape="record", label="{JokerNodeDiscovery|logger : NoneType, RootLogger\l|test_connection()\ldiscover()\l}"];
"10" [shape="record", label="{OpenstackDiscovery|logger : NoneType, RootLogger\lnode_discovery_klass\l|discover()\ltest_connection()\l}"];
"12" [shape="record", label="{KeystoneAuthtokenSettingsInspection|name : str\ldescription : str\l|inspect()\l}"];
"14" [shape="record", label="{LettuceRunnerInspection|base_path\l|rules()\linspect()\l}"];
"16" [shape="record", label="{Configuration|\l|set()\lget()\lkeys()\lsection()\lcontains()\lis_default()\litems()\lset_default()\l}"];
"17" [shape="record", label="{ConfigSection|name\lparameters\l|}"];
"18" [shape="record", label="{TextElement|text\l|}"];
"19" [shape="record", label="{Element|end_mark\lstart_mark\l|}"];
"20" [shape="record", label="{ComponentConfig|errors : list\lsections : list\lname\l|}"];
"21" [shape="record", label="{ConfigurationWrapper|state\lconfig\l|}"];
"22" [shape="record", label="{ConfigParameterName|\l|}"];
"23" [shape="record", label="{ConfigParameterValue|quotechar : NoneType\lvalue : NoneType\l|}"];
"24" [shape="record", label="{ConfigSectionName|\l|}"];
"25" [shape="record", label="{ConfigurationSection|section\lconfig\l|set()\lget()\lkeys()\lcontains()\lis_default()\litems()\lset_default()\l}"];
"26" [shape="record", label="{ConfigParameter|delimiter\lname\lvalue\l|}"];
"29" [shape="record", label="{InspectionRequest|username\lnodes\lpassword : NoneType\lprivate_key : NoneType\l|}"];
"30" [shape="record", label="{InspectionResult|request\lvalue\l|}"];
"38" [shape="record", label="{SchemaWriter|project\lversion\lfile\l|comment()\lparam()\lsection()\l}"];
"43" [shape="record", label="{Cluster|data : dict\l|as_doc()\lfor_json()\lfrom_doc()\l}"];
"44" [shape="record", label="{RuleGroup|all : list\lHA : str\lVALIDITY : str\lBEST_PRACTICES : str\l|}"];
"47" [fontcolor="red", shape="record", label="{RubickException|\l|}"];
"48" [fontcolor="red", shape="record", label="{SchemaException|\l|}"];
"49" [fontcolor="red", shape="record", label="{ValidatorException|\l|}"];
"55" [shape="record", label="{MarkTests|\l|test_merge()\ltest_creation()\l}"];
"57" [shape="record", label="{StringDictTypeValidatorTests|type_name : str\l|test_single_value()\ltest_empty_value()\ltest_list_of_values()\l}"];
"58" [shape="record", label="{StringTypeValidatorTests|type_name : str\l|test_validation_always_passes()\ltest_empty_string_passes()\ltest_should_return_same_string_if_valid()\l}"];
"59" [shape="record", label="{TypeValidatorTestHelper|validator\l|setUp()\lassertInvalid()\lassertValid()\l}"];
"60" [shape="record", label="{IntegerTypeValidatorTests|type_name : str\l|test_negative_values_are_valid()\ltest_positive_values_are_valid()\ltest_invalid_char_error_contains_proper_column_in_mark()\ltest_invalid_char_error_contains_proper_column_if_leading_whitespaces()\ltest_trailing_whitespace_is_ignored()\ltest_non_digits_are_invalid()\ltest_returns_integer_if_valid()\ltest_zero_is_valid()\ltest_leading_whitespace_is_ignored()\l}"];
"61" [shape="record", label="{NetworkAddressTypeValidatorTests|type_name : str\l|test_no_prefix_length()\ltest_non_integer_prefix_length()\ltest_prefix_greater_than_32()\ltest_ipv4_network()\ltest_value_with_less_than_4_numbers_separated_by_dots()\ltest_returns_address()\ltest_ipv4_like_string_with_numbers_greater_than_255()\l}"];
"62" [shape="record", label="{PortTypeValidatorTests|type_name : str\l|test_leading_and_or_trailing_whitespace_is_ignored()\ltest_high_boundary_is_valid()\ltest_returns_integer_if_valid()\ltest_zero_invalid()\ltest_negatives_are_invalid()\ltest_non_digits_are_invalid()\ltest_empty()\ltest_low_boundary_is_valid()\ltest_values_greater_than_65535_are_invalid()\ltest_positive_integer()\l}"];
"63" [shape="record", label="{BooleanTypeValidatorTests|type_name : str\l|test_True()\ltest_other_values_produce_error()\ltest_False()\l}"];
"64" [shape="record", label="{HostAndPortTypeValidatorTests|type_name : str\l|test_no_port()\ltest_port_is_not_an_integer()\ltest_port_is_greater_than_65535()\ltest_value_with_less_than_4_numbers_separated_by_dots()\ltest_returns_address()\ltest_ipv4_like_string_with_numbers_greater_than_255()\ltest_ipv4_address()\l}"];
"65" [shape="record", label="{HostAddressTypeValidatorTests|type_name : str\l|test_value_with_less_than_4_numbers_separated_by_dots()\ltest_host_with_empty_parts()\ltest_mark_should_point_to_incorrect_symbol()\ltest_host_parts_with_invalid_chars()\ltest_host_with_single_host_label()\ltest_host_name()\ltest_returns_address()\ltest_ipv4_like_string_with_numbers_greater_than_255()\ltest_host_that_ends_with_a_hyphen()\ltest_ipv4_address()\ltest_host_part_starting_with_non_letter()\l}"];
"66" [shape="record", label="{StringListTypeValidatorTests|type_name : str\l|test_single_value()\ltest_empty_value()\ltest_list_of_values()\l}"];
"68" [shape="record", label="{FileResource|owner\lpath\lgroup\lcontents\lpermissions\l|}"];
"69" [shape="record", label="{IssueReporter|issues : list\l|all_issues()\lreport_issue()\l}"];
"70" [shape="record", label="{CinderSchedulerComponent|config_files : list\lversion\lcomponent : str\lname : str\l|}"];
"71" [shape="record", label="{MysqlComponent|config_files : list\lversion\lcomponent : str\lname : str\l|}"];
"72" [shape="record", label="{Service|issues : list\l|report_issue()\lall_issues()\lhost()\lopenstack()\l}"];
"73" [shape="record", label="{Host|components : list\lname\lnetwork_addresses : list\lid\l|openstack()\ladd_component()\lall_issues()\l}"];
"74" [shape="record", label="{NovaApiComponent|config_files : list\lversion\lpaste_config_file : NoneType\lcomponent : str\lname : str\l|paste_config()\lall_issues()\l}"];
"75" [shape="record", label="{KeystoneComponent|config_files : list\lversion\ldb : dict\lcomponent : str\lname : str\l|}"];
"76" [shape="record", label="{GlanceApiComponent|config_files : list\lversion\lcomponent : str\lname : str\l|}"];
"77" [shape="record", label="{CinderApiComponent|config_files : list\lversion\lpaste_config_file : NoneType\lcomponent : str\lname : str\l|}"];
"78" [shape="record", label="{NovaComputeComponent|config_files : list\lversion\lcomponent : str\lname : str\l|}"];
"79" [shape="record", label="{NovaSchedulerComponent|config_files : list\lversion\lcomponent : str\lname : str\l|}"];
"80" [shape="record", label="{OpenstackComponent|logger : NoneType, RootLogger\lcomponent : NoneType\l|config()\l}"];
"81" [shape="record", label="{RabbitMqComponent|version : str\lname : str\l|}"];
"82" [shape="record", label="{GlanceRegistryComponent|config_files : list\lversion\lcomponent : str\lname : str\l|}"];
"83" [shape="record", label="{CinderVolumeComponent|config_files : list\lversion\lrootwrap_config : NoneType\lcomponent : str\lname : str\l|}"];
"84" [shape="record", label="{Openstack|hosts : list\l|components()\ladd_host()\lall_issues()\l}"];
"87" [shape="record", label="{IniConfigParser|key_value_re\l|parse()\l}"];
"90" [shape="record", label="{Inspection|\l|rules()\lall_inspections()\linspect()\l}"];
"91" [shape="record", label="{Issue|message\ltype\lINFO : str\lWARNING : str\lFATAL : str\lERROR : str\l|}"];
"92" [shape="record", label="{MarkedIssue|mark\l|offset_by()\l}"];
"93" [shape="record", label="{Mark|column : int\lsource\lline : int\l|merge()\l}"];
"94" [shape="record", label="{Version|parts : list\l|major()\lmaintenance()\lminor()\l}"];
"95" [shape="record", label="{Error|message\l|}"];
"97" [shape="record", label="{ConfigSchemaRegistryTests|\l|test_sample()\l}"];
"99" [shape="record", label="{IniConfigParserTests|parser\l|test_default_section_name()\ltest_multiline_value()\lparse()\ltest_use_equals_delimiter_if_it_comes_before_colon()\ltest_errors_doesnt_affect_valid_parameters()\ltest_colon_as_delimiter()\ltest_wrapping_value_with_double_quotes_and_trailing_whitespace()\ltest_parsing_with_same_section()\ltest_wrapping_value_with_single_quotes_and_trailing_whitespace()\ltest_hash_in_value_is_part_of_the_value()\ltest_whole_line_comments_starting_with_hash()\ltest_returning_multiple_errors()\lsetUp()\ltest_spaces_in_key_causes_error()\ltest_multiline_value_finished_by_other_parameter()\ltest_use_colon_delimiter_if_it_comes_before_equals_sign()\ltest_wrapping_value_with_single_quotes()\ltest_whole_line_comments_starting_with_semicolon()\ltest_unclosed_section_causes_error()\ltest_parsing_with_different_sections()\lassertAttributes()\ltest_parsing_with_section()\ltest_missing_equals_sign_or_colon_causes_error()\lassertParameter()\ltest_parsing_iolike_source()\ltest_wrapping_value_with_double_quotes()\ltest_multiline_value_finished_by_empty_line()\ltest_parsing()\l}"];
"101" [shape="record", label="{memoized|cache : dict\lfunc\l|}"];
"104" [shape="record", label="{ConfigurationTests|default_value : str\lsection : str\lvalue : str\lparam : str\lfullparam\l|test_explicit_default_on_get()\ltest_contains_default()\ltest_is_default_returns_true_if_only_default_value_set()\ltest_normal_overrides_default()\ltest_keys()\ltest_storage()\ltest_cycle_template_substitution_resolves_in_empty_string()\ltest_subsection_keys()\ltest_subsection_getitem()\ltest_subsection_contains()\ltest_subsection_get()\ltest_subsection_items()\ltest_default()\ltest_is_default_returns_false_if_param_missing()\ltest_returns_section_object_even_if_section_doesnot_exist()\ltest_template_substitution()\ltest_parameter_names_containing_sections()\ltest_is_default_returns_false_if_both_values_set()\ltest_getitem()\ltest_contains()\ltest_subsection_setitem()\ltest_subsection_set()\ltest_is_default_returns_false_if_normal_value_set()\ltest_parameter_with_default_section()\ltest_empty()\ltest_getting_raw_values()\ltest_setitem()\ltest_contains_normal()\l}"];
"106" [shape="record", label="{VersionTests|\l|test_equility()\ltest_creation_from_components()\ltest_non_equility()\ltest_creation_from_string()\ltest_creation_from_string_with_less_parts()\ltest_creation_from_other_version()\ltest_comparision()\l}"];
"109" [shape="record", label="{ParseError|\l|}"];
"114" [shape="record", label="{TypeValidatorRegistry|\l|register_validator()\lget_validator()\l}"];
"115" [shape="record", label="{SchemaVersionRecord|checkpoint\lremovals : list\lversion\ladds : list\l|section()\lparam()\lremove_param()\l}"];
"116" [shape="record", label="{ConfigParameterSchema|name\ldefault : NoneType\lsection : NoneType\lrequired : bool\ldeprecation_message : NoneType\ltype\ldescription : NoneType\l|}"];
"117" [shape="record", label="{TypeValidator|f\l|validate()\l}"];
"118" [shape="record", label="{ConfigSchema|version\lname\lparameters\lformat\l|get_parameter()\lhas_section()\l}"];
"119" [shape="record", label="{ConfigSchemaRegistry|\l|register_schema()\lget_schema()\l}"];
"120" [shape="record", label="{InvalidValueError|\l|}"];
"121" [shape="record", label="{SchemaBuilder|data\l|version()\l}"];
"122" [fontcolor="red", shape="record", label="{SchemaError|\l|}"];
"123" [shape="record", label="{SchemaIssue|\l|}"];
"4" -> "90" [arrowtail="none", arrowhead="empty"];
"12" -> "90" [arrowtail="none", arrowhead="empty"];
"14" -> "90" [arrowtail="none", arrowhead="empty"];
"17" -> "19" [arrowtail="none", arrowhead="empty"];
"18" -> "19" [arrowtail="none", arrowhead="empty"];
"20" -> "19" [arrowtail="none", arrowhead="empty"];
"22" -> "18" [arrowtail="none", arrowhead="empty"];
"23" -> "18" [arrowtail="none", arrowhead="empty"];
"24" -> "18" [arrowtail="none", arrowhead="empty"];
"26" -> "19" [arrowtail="none", arrowhead="empty"];
"48" -> "47" [arrowtail="none", arrowhead="empty"];
"49" -> "47" [arrowtail="none", arrowhead="empty"];
"57" -> "59" [arrowtail="none", arrowhead="empty"];
"58" -> "59" [arrowtail="none", arrowhead="empty"];
"60" -> "59" [arrowtail="none", arrowhead="empty"];
"61" -> "59" [arrowtail="none", arrowhead="empty"];
"62" -> "59" [arrowtail="none", arrowhead="empty"];
"63" -> "59" [arrowtail="none", arrowhead="empty"];
"64" -> "59" [arrowtail="none", arrowhead="empty"];
"65" -> "59" [arrowtail="none", arrowhead="empty"];
"66" -> "59" [arrowtail="none", arrowhead="empty"];
"68" -> "69" [arrowtail="none", arrowhead="empty"];
"70" -> "80" [arrowtail="none", arrowhead="empty"];
"71" -> "72" [arrowtail="none", arrowhead="empty"];
"72" -> "69" [arrowtail="none", arrowhead="empty"];
"73" -> "69" [arrowtail="none", arrowhead="empty"];
"74" -> "80" [arrowtail="none", arrowhead="empty"];
"75" -> "80" [arrowtail="none", arrowhead="empty"];
"76" -> "80" [arrowtail="none", arrowhead="empty"];
"77" -> "80" [arrowtail="none", arrowhead="empty"];
"78" -> "80" [arrowtail="none", arrowhead="empty"];
"79" -> "80" [arrowtail="none", arrowhead="empty"];
"80" -> "72" [arrowtail="none", arrowhead="empty"];
"81" -> "72" [arrowtail="none", arrowhead="empty"];
"82" -> "80" [arrowtail="none", arrowhead="empty"];
"83" -> "80" [arrowtail="none", arrowhead="empty"];
"84" -> "69" [arrowtail="none", arrowhead="empty"];
"92" -> "91" [arrowtail="none", arrowhead="empty"];
"109" -> "92" [arrowtail="none", arrowhead="empty"];
"120" -> "92" [arrowtail="none", arrowhead="empty"];
"122" -> "47" [arrowtail="none", arrowhead="empty"];
"123" -> "91" [arrowtail="none", arrowhead="empty"];
"7" -> "8" [arrowhead="diamond", style="solid", arrowtail="none", fontcolor="green", label="shell"];
"9" -> "10" [arrowhead="diamond", style="solid", arrowtail="none", fontcolor="green", label="node_discovery_klass"];
"68" -> "74" [arrowhead="diamond", style="solid", arrowtail="none", fontcolor="green", label="paste_config_file"];
"68" -> "77" [arrowhead="diamond", style="solid", arrowtail="none", fontcolor="green", label="paste_config_file"];
"68" -> "83" [arrowhead="diamond", style="solid", arrowtail="none", fontcolor="green", label="rootwrap_config"];
"87" -> "99" [arrowhead="diamond", style="solid", arrowtail="none", fontcolor="green", label="parser"];
"94" -> "115" [arrowhead="diamond", style="solid", arrowtail="none", fontcolor="green", label="version"];
"94" -> "118" [arrowhead="diamond", style="solid", arrowtail="none", fontcolor="green", label="version"];
}

View File

@ -1,30 +0,0 @@
@startuml
frame "Peter" {
[network emulation]
cloud {
[demo scenario]
}
}
frame "Sergey" {
[network emulation] --> [salt bootstrap]
[salt bootstrap] --> [nodes discovery]
}
frame "Max" {
[config files collector]
[config-inspector] -up-> [demo scenario]
}
frame "Ilya" {
[tripleo-image-elements] --> [os-collect-config]
[tripleo-heat-templates] --> [os-collect-config]
}
frame "Kirill" {
[rules editing engine] <-- [config-inspector]
[rules editing engine] --> [demo scenario]
}
[nodes discovery] --> nodelist
nodelist --> [config files collector]
[config files collector] --> JSON
JSON --> [config-inspector]
[os-collect-config] --> JSON
@enduml

View File

@ -1,7 +0,0 @@
@startuml
(*) -right-> [<i>OpenStack Services</i>\nNova, Keystone, Neutron,\nGlance, Heat, Swift] "Deployment"
"Deployment" -right-> [<i>OpenStack Deployment</i>\nFuel, TripleO, Devstack] "Operation\nMaintenance"
"Operation\nMaintenance" -right-> [<i>DRAGONS?</i>\nTuskar, <b>Rubick</b>] (*)
@enduml

View File

@ -1,61 +0,0 @@
@startuml
frame "Undercloud" {
package "Metadata services" {
[Heat]
[CFN]
[EC2]
}
frame "TripleO" {
cloud {
folder "tripleo-image-elements" {
() "nova.conf"
() "keystone.conf"
() "glance.conf"
() "..."
}
[diskimage-builder] -- nova.conf
[diskimage-builder] -- keystone.conf
[diskimage-builder] -- glance.conf
[diskimage-builder] -- ...
}
[os-collect-config] --> JSON
JSON --> [os-refresh-config]
}
frame "Tuskar" {
[Tuskar]
}
frame "OpenStack Dashboard" {
[Tuskar-UI]
[Rubick-UI]
}
cloud {
[OpenStack Diagnostics] << Rubick >>
}
() HOT
() Templates
frame "Glance" {
[Images]
}
}
[Heat] --> [os-collect-config]
[CFN] --> [os-collect-config]
[EC2] --> [os-collect-config]
[Tuskar] -- HOT
HOT -- [Heat]
HOT -- [OpenStack Diagnostics]
[OpenStack Diagnostics] -- [Rubick-UI]
[Tuskar] -- [Tuskar-UI]
[diskimage-builder] -right-> [Images]
[diskimage-builder] -up-> Templates
Templates --> [OpenStack Diagnostics]
@enduml

View File

@ -1,15 +0,0 @@
@startuml
User -> Tuskar: Create cluster (metadata)
Tuskar -> Heat: Create HOT (metadata)
Tuskar -> diskimagebuilder: Create images\n(config files templates)
Tuskar -> Rubick: Verify config
Rubick -> Heat: Get HOT
Heat -> Rubick: HOT (metadata)
Rubick -> diskimagebuilder: Get config\nfiles templates
diskimagebuilder -> Rubick: Templates
Rubick -> Rubick: Create data model\nInpspect config
Rubick -> Tuskar: Config report
Tuskar -> User: Config report
@enduml

View File

@ -1,103 +0,0 @@
digraph "packages_Rubick" {
charset="utf-8"
rankdir=BT
"3" [shape="box", label="rubick.inspections.keystone_endpoints"];
"5" [shape="box", label="rubick.discovery"];
"11" [shape="box", label="rubick.inspections.keystone_authtoken"];
"13" [shape="box", label="rubick.inspections.lettuce_runner"];
"15" [shape="box", label="rubick.config_model"];
"27" [shape="box", label="rubick.main"];
"28" [shape="box", label="rubick.celery"];
"31" [shape="box", label="rubick"];
"32" [shape="box", label="rubick.config_formats"];
"33" [shape="box", label="rubick.schemas.glance"];
"34" [shape="box", label="rubick.schemas.swift.v2013_2"];
"35" [shape="box", label="rubick.schemas.glance.v2013_2"];
"36" [shape="box", label="rubick.json"];
"37" [shape="box", label="rubick.schemas.schema_generator"];
"39" [shape="box", label="rubick.schemas.keystone.v2013_2"];
"40" [shape="box", label="rubick.schemas.nova.v2013_2"];
"41" [shape="box", label="rubick.schemas.cinder"];
"42" [shape="box", label="rubick.database"];
"45" [shape="box", label="rubick.schemas.nova.v2013_1_4"];
"46" [shape="box", label="rubick.exceptions"];
"50" [shape="box", label="rubick.schemas.nova.v2013_1_3"];
"51" [shape="box", label="rubick.schemas.nova"];
"52" [shape="box", label="rubick.schemas.keystone.v2013_1_4"];
"53" [shape="box", label="rubick.schemas.keystone.v2013_1_3"];
"54" [shape="box", label="rubick.test_mark"];
"56" [shape="box", label="rubick.test_type_validators"];
"67" [shape="box", label="rubick.model"];
"85" [shape="box", label="rubick.inspections"];
"86" [shape="box", label="rubick.config_formats.ini"];
"88" [shape="box", label="rubick.schemas.neutron.v2013_2"];
"89" [shape="box", label="rubick.common"];
"96" [shape="box", label="rubick.test_config_schema_registry"];
"98" [shape="box", label="rubick.config_formats.test_ini"];
"100" [shape="box", label="rubick.utils"];
"102" [shape="box", label="rubick.schemas.keystone"];
"103" [shape="box", label="rubick.test_configuration"];
"105" [shape="box", label="rubick.test_version"];
"107" [shape="box", label="rubick.schemas.swift"];
"108" [shape="box", label="rubick.config_formats.common"];
"110" [shape="box", label="rubick.schemas.cinder.v2013_2"];
"111" [shape="box", label="rubick.schemas.neutron"];
"112" [shape="box", label="rubick.schemas"];
"113" [shape="box", label="rubick.schema"];
"124" [shape="box", label="rubick.schemas.cinder.v2013_1_3"];
"3" -> "89" [arrowtail="none", arrowhead="open"];
"5" -> "67" [arrowtail="none", arrowhead="open"];
"5" -> "46" [arrowtail="none", arrowhead="open"];
"5" -> "89" [arrowtail="none", arrowhead="open"];
"11" -> "89" [arrowtail="none", arrowhead="open"];
"13" -> "89" [arrowtail="none", arrowhead="open"];
"27" -> "31" [arrowtail="none", arrowhead="open"];
"28" -> "85" [arrowtail="none", arrowhead="open"];
"28" -> "28" [arrowtail="none", arrowhead="open"];
"28" -> "89" [arrowtail="none", arrowhead="open"];
"28" -> "42" [arrowtail="none", arrowhead="open"];
"28" -> "36" [arrowtail="none", arrowhead="open"];
"28" -> "5" [arrowtail="none", arrowhead="open"];
"31" -> "27" [arrowtail="none", arrowhead="open"];
"32" -> "89" [arrowtail="none", arrowhead="open"];
"32" -> "86" [arrowtail="none", arrowhead="open"];
"33" -> "35" [arrowtail="none", arrowhead="open"];
"34" -> "113" [arrowtail="none", arrowhead="open"];
"35" -> "113" [arrowtail="none", arrowhead="open"];
"39" -> "113" [arrowtail="none", arrowhead="open"];
"40" -> "113" [arrowtail="none", arrowhead="open"];
"41" -> "124" [arrowtail="none", arrowhead="open"];
"45" -> "113" [arrowtail="none", arrowhead="open"];
"50" -> "113" [arrowtail="none", arrowhead="open"];
"51" -> "50" [arrowtail="none", arrowhead="open"];
"52" -> "113" [arrowtail="none", arrowhead="open"];
"53" -> "113" [arrowtail="none", arrowhead="open"];
"54" -> "89" [arrowtail="none", arrowhead="open"];
"56" -> "113" [arrowtail="none", arrowhead="open"];
"56" -> "89" [arrowtail="none", arrowhead="open"];
"67" -> "113" [arrowtail="none", arrowhead="open"];
"67" -> "89" [arrowtail="none", arrowhead="open"];
"67" -> "15" [arrowtail="none", arrowhead="open"];
"67" -> "100" [arrowtail="none", arrowhead="open"];
"67" -> "32" [arrowtail="none", arrowhead="open"];
"85" -> "11" [arrowtail="none", arrowhead="open"];
"85" -> "13" [arrowtail="none", arrowhead="open"];
"85" -> "3" [arrowtail="none", arrowhead="open"];
"86" -> "15" [arrowtail="none", arrowhead="open"];
"86" -> "108" [arrowtail="none", arrowhead="open"];
"88" -> "113" [arrowtail="none", arrowhead="open"];
"96" -> "113" [arrowtail="none", arrowhead="open"];
"96" -> "89" [arrowtail="none", arrowhead="open"];
"98" -> "86" [arrowtail="none", arrowhead="open"];
"102" -> "53" [arrowtail="none", arrowhead="open"];
"103" -> "15" [arrowtail="none", arrowhead="open"];
"105" -> "113" [arrowtail="none", arrowhead="open"];
"107" -> "34" [arrowtail="none", arrowhead="open"];
"108" -> "89" [arrowtail="none", arrowhead="open"];
"110" -> "113" [arrowtail="none", arrowhead="open"];
"111" -> "88" [arrowtail="none", arrowhead="open"];
"112" -> "41" [arrowtail="none", arrowhead="open"];
"113" -> "89" [arrowtail="none", arrowhead="open"];
"113" -> "46" [arrowtail="none", arrowhead="open"];
"124" -> "113" [arrowtail="none", arrowhead="open"];
}

View File

@ -1,23 +0,0 @@
@startuml
package "common.py" {
class "Inspection" {
}
class "Issue" {
}
class "Mark" {
}
class "Error" {
}
class "Version" {
}
}
package "model.py" {
class "Model" {
}
}
Inspection --|> Issue
@enduml

View File

@ -1,36 +0,0 @@
@startuml
frame "Rubick" {
[Rubick API]
[Rule engine]
[Config data\nextractor]
[Heat metadata\n plugin]
[SSH metadata\nplugin]
[...]
[Config data\nstore]
() "openstack.model"
folder "Rulesets" {
[healthcheck\nruleset]
[best practices\nruleset]
}
}
frame "Heat" {
[Heat API]
}
() Stack
[Rubick API] -- openstack.model
[Config data\nstore] -- openstack.model
[Heat API] -up-> Stack
Stack -up-> [Heat metadata\n plugin]
[Config data\nextractor] -up- openstack.model
[Rule engine] -- openstack.model
[Config data\nextractor] -- [Heat metadata\n plugin]
[Config data\nextractor] -- [...]
[Config data\nextractor] -- [SSH metadata\nplugin]
[Rule engine] -up- [healthcheck\nruleset]
[Rule engine] -up- [best practices\nruleset]
@enduml

View File

@ -1,93 +0,0 @@
Architecture Data Model
=======================
Overview
--------
We want to introduce unified data structure which contains all information
necessary to inspect, analyze, describe and visualize OpenStack architecture.
Architecture data model serves multiple actual and potential use cases.
Diagnostics
^^^^^^^^^^^
Architecture data model provides necessary data for the configuration analysis
and diagnostics tool (**Rubick**).
Deployment
^^^^^^^^^^
Arhictecture data model must include all information necessary to deployment
systems (e.g. **Fuel** or **TripleO**). We will implement simple conversion
tools which will allow to configure these deployment systems and effectively
support 'portable' clouds.
Benchmarking
^^^^^^^^^^^^
This model could be reused by **Rally** project to compare benchmarking
results for different architectures. Definitions of architectures must be
comparable and portable, which is exactly what architecture model aimed to
solve.
Upgrade
^^^^^^^
Upgrade system could potentially utilize the model just in the way the
Deployment systems do. In addition, existing clouds could be inspected and
described for subsequent upgrade using this model.
Tech Support
^^^^^^^^^^^^
The model suits as base for questionaire to assess existing installations for
support contract pricing purposes.
Hardening
^^^^^^^^^
The model could be used to perform automated/guided hardening of OpenStack
architecture and configuration. This is achieved through use of 'best practice'
rulesets for the inspection of cloud.
Expert system
^^^^^^^^^^^^^
The model could be used as a part of production/reactive rules system capable
of automated reporting and handling of operational errors, based on combination
of *base* status of the cloud, logging messages and notifications.
Data Format
-----------
This section proposes data model format which allows to describe an OpenStack
installation. The model includes data regarding physical infrastructure, logical
topology of services and mapping between the two.
Current model of OpenStack architecture used in Rubick is defined in module
``rubick/model.py``. This module contains following classes in hierarchy below:
OpenStack:
hosts:
- Host: hostname1
components:
- Service: NovaApiComponent
config:
- key: value
- ...
- Service: KeystoneComponent
config:
- key: value
- ...
- ...
filesystem:
- resource1: FileResource
- resource2: DirectoryResource
- ...
- Host: hostname2
components:
- ...
filesystem:
- ...
- ...

View File

@ -1,108 +0,0 @@
==============================
OPENSTACK DIAGNOSTICS PROPOSAL
==============================
.. contents::
Project Name
============
**Official:** OpenStack Diagnostics
**Codename:** Rubick
OVERVIEW
========
The typical OpenStack cloud life cycle consists of 2 phases:
- initial deployment and
- operation maintenance
OpenStack cloud operators usually rely on deploymnet tools to configure all the
platform components correctly and efficiently in **initial deployment** phase.
Multiple OpenStack projects cover that area: TripleO/Tuskar, Fuel and Devstack,
to name a few.
However, once you installed and kicked off the cloud, platform configurations
and operational conditions begin to change. These changes could break
consistency and integration of cloud platform components. Keeping cloud up and
running is the essense of **operation maintenance** phase.
Cloud operator must quickly and efficiently identify and respond to the root
cause of such failures. To do so, he must check if his OpenStack configuration
is sane and consistent. These checks could be thought of as rules of diagnostic
system.
There are not many projects in OpenStack ecosystem aimed to increase reliability
and resilience of the cloud at the operation stage. With this proposal we want
to introduce a project which will help operators to diagnose their OpenStack
platform, reduce response time to known and unknown failures and effectively
support the desired SLA.
Mission
-------
Diagnostics' mission is to **provide OpenStack cloud operators with tools which
minimize time and effort needed to identify and fix errors in operations
maintenance phase of cloud life cycle.**
User Stories
-----------
- As a **cloud operator**, I want to make sure that my OpenStack architecture
and configuration is sane and consistent across all platform components and
services.
- As a **cloud architect**, I want to make sure that my OpenStack architecture
and configuration are compliant to best practices.
- As a **cloud architect**, I need a knowledge base of sanity checks and best
practices for troubleshooting my OpenStack cloud which I can reuse and update
with my own checks and rules.
- As a **cloud operator**, I want to be able to automatically extract
configuration parameters from all OpenStack components to verify their
correctness, consistency and integrity.
- As a **cloud operator**, I want automatic diagnostics tool which can inspect
configuration of my OpenStack cloud and report if it is sane and/or compliant
toc community-defined best practices.
- As a **cloud operator**, I want to be able to define rules used to inspect
and verify configuration of OpenStack components and store them to use for
verification of future configuration changes.
Roadmap
-------
Proof of concept implementation - end October 2013. PoC implementation includes:
#. Open source code in stackforge repository
#. Standalone service with REST API v0.1
#. Simple SSH-based configuration data extraction
#. Rules engine with grammatic analysis
#. Basic healthcheck ruleset v0.1 with example rules of different types
#. Filesystem-based ruleset store
PoC scope does not include:
#. Basic integration with OpenStack Deployment program projects (Tuskar,
TripleO)
#. Extraction of configuration data from Heat metadata
#. Extended ruleset with example best practices
#. Healthcheck ruleset v1.0
#. Ruleset store back-ends
Assumptions
-----------
We assume that we must reuse as much as possible from OpenStack Deployment
program in terms of platform configuration and architecture definitions (i.e.
TripleO Heat and configuration files templates).
DESIGN
======
.. include:: service_architecture.rst
.. include:: rules_engine.rst
.. include:: openstack_integration.rst
.. include:: openstack_architecture_model.rst

View File

@ -1,57 +0,0 @@
Integration with OpenStack
==========================
Use Case #1. Validate initial configuration
-------------------------------------------
OpenStack Diagnostics could add value to OpenStack Deployment by providing
on-demand or automated verification of OpenStack configuration created by user
of Deployment tools.
OpenStack Deployment (TripleO) allows user to manage OpenStack cloud (called
'overcloud' in terms of TripleO) as standard OpenStack environment. This
involves Heat, Nova with baremetal driver (or Ironic service) and Tuskar as a
user interface application, all installed in small 'management' environment
called 'undercloud'.
When user wants to install 'overcloud', he uses Tuskar UI to configure bare
metal in cluster and set roles for all nodes. Tuskar then creates Heat
Orcestration Template (HOT) which describes overcloud architecture. This
template also contains node-specific configurations of overcloud OpenStack
components as nodes metadata. This template could be used by Diagnostics as a
source of information for analysis.
Currently (as of Havana release) there is no support for automated creation of
images for overcloud nodes in TripleO. However, once such functionality added to
the project, Diagnostics could fetch base configuration templates for all
overcloud components. Until then, user will have to provide these templates to
Diagnostics service via API.
Combining node-specific metadata with configuration templates, Diagnostics will
have comprehensive configuration information for the new 'overcloud' and will be
able to match it to ruleset to verify configuration consistency.
The following diagram illustrates architecture of the described case:
.. image:: images/openstack_integration_tripleo_arch.png
The following sequence diagram shows data exchange in dynamic:
.. image:: images/openstack_integration_tripleo_seq.png
This diagram shows integration points between OpenStack TripleO (OpenStack on
OpenStack) program and the diagnostics system. Diagnostic system will perform
the following steps:
* extract initial environment configuration from **metadata services**
of the 'undercloud' (in terms of TripleO). Heat Orchestration Templates for
OpenStack 'overcloud' describe nodes and their roles, as well as configuration
parameters.
* populate an **architecture data model** with actual configuration
parameters from metadata services.
* run **inspections** through the architecture data model by
set of **production rules** defined by user, or selected by user from the list
of all available rules, defined externally.
* report **results of inspection** as a list of rules that were checked with
indication of matched and unmatched rules. For unmatched rules, diagnostics
could give **recommendations and hints**.

View File

@ -1,96 +0,0 @@
Production Rules Engine
=======================
This document describes rule engine used for inspection and diagnostics of
OpenStack configuration.
Summary
-------
The consistent configuration across all components is essential to OpenStack
cloud operation. If something is wrong with configuration, you as an operator
will know this immidiately either from monitoring or clients complaining. But
diagnosing the exact problem is always a challenge, given the number of
components and configuration options per component.
You could think about troubleshooting OpenStack as going through some scenarios
which can be expressed as sets of rules. Your configuration must comply to all
those rules to be operational. On the other hand, if you know rules which your
configuration breaks, you can identify incorrect parameters reliably and easy.
That is how production rule systems and diagnostic systems work.
Example production rule
-----------------------
Example production rule for OpenStack system would be::
Given (condition_parameter_1) is (value) and
(condition_parameter_2) is (value)
then (check_parameter_1) must be (value)
Rule-based inspection
---------------------
All rule-based inspections are using pre-defined actions written in python,
currently they are defined in "steps.py" file in the directory:
``rubick/inspections/lettuce``. They are based on lettuce framework -
bdd framework for python.
Store and reuse rules
---------------------
First version of Rubick project stores rules to text files and loads them to
memory at runtime. You can add your own rules to the set using web UI, and those
rules can be saved to files for persistence.
In future versions, we plan to add module which will save rules to database. It
will also support migrating existing rule set to the database.
You can store your rules wherever you want and add it through the UI or simply
by putting them in text files in directory
``rubick/inspections/lettuce``.
Rules file must have name in the following format::
\*.feature
The main requirement is that all rule conditions and actions in those files must
be written in accordance with code of rule steps in
``ostack-validator/inspections/lettuce/steps.py``.
Extending rules
---------------
Also you can extend rules definition by adding your own steps to steps.py. As
an example::
#This decorator is for defining step for using them in the scenario.
@step(r'Nova has "(.+)" equal to "(.*)"')
def nova_has_property(step, name, value):
name = subst(name)
value = subst(value)
for nova in [c for c in world.openstack.components if
c.name.startswith('nova')]:
if not nova.config[name] == value:
stop()
New methods can use 2 classes from the inspections framework:
``rubick/model.py`` and ``rubick/common.py``. There you can
find many adapters to OpenStack services configuration data and all additional
information collected from OpenStack nodes. After that you can use you brand
new rule in the scenarios as described above.
In module ``rubick/common.py`` you can find ``Inspection``, ``Issue``,
``Mark``, ``Error`` and ``Version`` classes for your convenience in rule
defining. Module ``model.py`` contains Openstack model based on configuration
schemas.
.. image:: images/rules_engine_class_model.png
Default rule sets
-----------------
We plan to provide 2 rule sets with Rubick initial version:
* healthcheck or sanity rule set
* best practices rule set

View File

@ -1,24 +0,0 @@
Design & Architecture
=====================
This section describes design and architecture of OpenStack Diagnostics (Rubik)
service.
Service includes the following components:
* **openstack.model** is an OpenStack architecture model representation. It is a
common format used by components of the system to exchange configuration of
the inspected environment
* **Rubick API** is a web service which implements APIs to rules, inspections
and OpenStack architecture model
* **Rule engine** is a logic which performs inspections on the data model. Rule
engine will have an interface to the ruleset store in future.
* **Config data store** is a storage for architecture models
* **Config data extractor** creates OpenStack model based on data collected from
different sources, implemented as pluggable back-ends
* **Heat metadata plugin** extracts configration metadata from Heat stacks
created by TripleO/Tuskar service
* **SSH metadata plugin** extracts configuration metadata from actual nodes of
OpenStack cloud via secure SSH connection
.. image:: images/service_architecture.png

View File

@ -1,110 +0,0 @@
# Copyright (c) 2014 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and#
# limitations under the License.
from nodes import Node
import os
TMP_PATH = "/tmp/joker_%s_%d"
class Joker():
def __init__(self, default_key, *args, **kwargs):
self.useKey = False
self.discoverQueue = []
self.discoveryResult = []
self.cleanUp = []
self.name = "EntryPoint"
self.seenNodes = {}
self.default_key = None
if (default_key):
try:
with open(default_key) as f:
self.default_key = f.read()
except Exception:
self.default_key = default_key
self.useKey = True
# keys temporary files
def __del__(self):
for filePath in self.cleanUp:
if os.path.exists(filePath):
os.remove(filePath)
def addNode(self, name, host, port=22, user='root', password=None):
node = Node(name, host, port)
node.assignCredential(user, self.default_key, password)
self.discoverQueue.append(node)
if (self.useKey):
self.cleanUp.append(node.keyPath)
return node
def addResult(self, hostname, ip, user, key, proxyCommand=None, port=22):
return self.discoveryResult.append(
self.dkOutput(hostname, ip, user, key, proxyCommand, port))
def dkOutput(self, hostname, ip, user, key, proxyCommand=None, port=22):
return {
"name": hostname,
"ip": ip,
"user": user,
"key": key,
"port": port,
"proxy_command": proxyCommand
}
def discover(self):
while self.discoverQueue:
point = self.discoverQueue.pop()
nodes = point.discover()
# this host can't be discovered by ssh method
if nodes is None:
continue
self.addResult(
hostname=point.hostName, ip=point.hostName, user=point.user,
key=point.origKey, proxyCommand=point.proxyCommandTxt,
port=point.accessPort)
# merge already seen nodes with new discovered nodes
self.seenNodes = dict(self.seenNodes.items() + point.link.items())
for node in nodes:
if node['hwAddr'] not in self.seenNodes:
# add to discovering queue
newNode = self.addNode(
name=node['ip'],
host=node['ip'],
user=point.user)
# new node connection channel working through master node
newNode.setProxyCommand(
point.hostName,
point.accessPort,
point.user,
point.keyPath
)
return self.discoveryResult

View File

@ -1,203 +0,0 @@
# Copyright (c) 2014 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and#
# limitations under the License.
import paramiko
import os
from paramiko.dsskey import DSSKey
from paramiko.rsakey import RSAKey
from six import StringIO
import stat
TMP_KEY_PATH = "/tmp/joker_%s_%d"
class Node():
def __init__(self, name, ip, port):
self.ssh = paramiko.SSHClient()
self.ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
self.setHostName(ip)
self.setName(name)
self.setAccessPort(port)
self.connected = False
self.neighbours = []
self.debug = True
self.proxyCommandTxt = self.proxyCommand = None
self.link = None
self.origKey = self._pkey = None
self.keyPath = TMP_KEY_PATH % (name, os.getpid())
def dumpKey(self, path, key):
if (key):
f = open(path, "w", stat.S_IRUSR | stat.S_IWUSR)
f.write(key)
f.close()
# def __del__(self):
# print "Del %s" % self.keyPath
# if os.path.exists(self.keyPath):
# print "Remove %s" % self.keyPath
# os.remove(self.keyPath)
def proxyCommandGen(self, masterHost, masterPort, masterUser,
masterKeyfile):
return "ssh -i %s -o StrictHostChecking=no -p%d %s@%s nc -q0 %s %d" % (
masterKeyfile, masterPort, masterUser, masterHost,
self.hostName, self.accessPort)
def discoverHwAddr(self):
try:
(stdout, stderr) = self.runCommand(
"ip addr | grep -A2 BROADCAST,MULTICAST,UP,LOWER_UP | "
"awk '/link\/ether/ {ether=$2} /inet/ {print $2 \" \" ether}'")
except Exception:
raise ()
macDict = {}
for line in stdout:
(ip, hwAddr) = line.strip().split(" ")
macDict[hwAddr] = ip
return macDict
def setUniqData(self):
self.link = self.discoverHwAddr()
def getUniqData(self):
return self.link
def debugLog(self, debugData):
if self.debug is True:
print debugData
def prepare(self):
# install arp-scan on node
try:
self.runCommand(
"[ ! -x arp-scan ] && sudo apt-get --force-yes -y install "
"arp-scan")
except Exception:
raise ()
self.setUniqData()
return True
def infect(self):
# infect node
return True
def setName(self, name):
self.name = name
def setHostName(self, hostname):
self.hostName = hostname
def setAccessPort(self, port):
self.accessPort = port
def assignKey(self, key):
self.origKey = key
# dump key to file
self.dumpKey(self.keyPath, self.origKey)
try:
self._pkey = RSAKey.from_private_key(StringIO(self.origKey))
except paramiko.SSHException:
try:
self._pkey = DSSKey.from_private_key(StringIO(self.origKey))
except paramiko.SSHException:
raise "Unknown private key format"
def assignCredential(self, user, key, password=None):
self.user = user
self.password = password
if (key):
self.assignKey(key)
def setProxyCommand(self, masterHost, masterPort, masterUser,
masterKeyfile):
self.proxyCommandTxt = self.proxyCommandGen(
masterHost, masterPort, masterUser, masterKeyfile)
self.proxyCommand = paramiko.ProxyCommand(self.proxyCommandTxt)
def connect(self):
if self.connected is True:
raise AssertionError(self.connected is True)
try:
self.ssh.connect(self.hostName, self.accessPort, self.user,
pkey=self._pkey, sock=self.proxyCommand,
timeout=5, password=self.password)
self.connected = True
return True
except paramiko.BadHostKeyException as e:
print "Host key could not be verified: ", e
return False
except paramiko.AuthenticationException as e:
print "Error unable to authenticate: ", e
return False
except paramiko.SSHException as e:
return False
except EOFError as e:
return False
def runCommand(self, command):
if (command == ""):
AssertionError(command == "")
if self.connected is False:
self.connect()
self.debugLog("---> " + self.hostName + " " + command)
stdin, stdout, stderr = self.ssh.exec_command(command)
self.debugLog("OK " + self.hostName + " " + command)
return (stdout.readlines(), stderr.readlines())
def __discover__(self):
(data, _) = self.runCommand(
"(test -x arp-scan && ip link |\
awk -F: '/^[0-9]+?: eth/ {print $2}' |\
sudo xargs -I% arp-scan -l -I % 2>&1 | grep -E '^[0-9]+?\.';\
arp -an | awk -F\" \" '{ gsub(\"[^0-9\\.]\", \"\", $2);\
printf(\"%s\\t%s\\t%s\\n\", $2, $4, $7)}'\
)")
for line in data:
(ip, hwAddr, _) = line.strip().split("\t")
self.neighbours.append({"hwAddr": hwAddr, "ip": ip})
self.debugLog("%s -> %s" % (self.hostName, ip))
return self.neighbours
def discover(self):
if self.connect() is False:
return None
self.prepare()
return self.__discover__()

View File

@ -1,124 +0,0 @@
# Copyright (c) 2014 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and#
# limitations under the License.
from os import environ
#import shlex
#import subprocess
class JokerSecureShell():
def __init__(self, hostName):
self.tempDir = "/tmp"
# TODO(metacoma): implement password authentication scheme
self.credentials = {
"user": None,
"host": None,
"port": 22,
"key": None,
}
self.options = {
"proxyCommand": None,
"StrictHostKeyChecking": "no"
}
self.haveMasterSocket = False
self.masterSocketPid = None
# FIXME use inspect.stack()[0][3] ?
@property
def host(self):
print "called host getter"
return self.credentials['host']
@host.setter
def host(self, value):
print "called host setter"
self.credentials['host'] = value
@property
def user(self):
if self.credentials['user']:
return self.credentials['user']
else:
return environ['USER']
@user.setter
def user(self, value):
self.credentials.user = value
@property
def key(self):
assert self.credentials['key'] is not None, \
"Keyfile for %s@%s:%d not present" \
% (self.user, self.host, self.port)
return self.credentials['key']
@key.setter
def key(self, value):
self.credentials['key'] = value
@property
def port(self):
return self.credentials['port']
@port.setter
def port(self, value):
self.credentials.port = value
@property
def proxyCommand(self):
return self.credentials.proxyCommand
@proxyCommand.setter
def proxyCommand(self, value):
self.credentials.proxyCommand = value
@property
def masterSocketPath(self):
return "%s/%s:%d" % (self.tempDir, self.host, self.port)
@property
def sshOptions(self):
r = ""
# compile ssh options in one string
for i in self.options:
if self.options[i] is not None:
r = r + ('-o %s=%s' % (i, self.options[i]))
return r
def createMasterSocket(self):
self.haveMasterSocket = True
# XXX we support only keys without password encryption
#cmd = "ssh -i %s -p %d %s -M -S %s %s@%s" \
# % (self.key, self.port, self.sshOptions,
# self.masterSocketPath, self.user, self.host)
# subprocess.Popen(shlex.split(cmd))
def call(self, destinationCmd):
if (not self.haveMasterSocket):
self.createMasterSocket()
#cmd = "ssh %s %s" % (self.host, destinationCmd)
#stdout = stderr = None
# exitCode = subprocess.call(shlex.split(destinationCmd), \
# stdout = stdout, stderr = stderr)

View File

@ -1,43 +0,0 @@
# Copyright (c) 2014 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and#
# limitations under the License.
import argparse
from joker import Joker
import sys
def arg_parse():
p = argparse.ArgumentParser(description='Joker cli interface')
p.add_argument('-i', '--identity', help='Path to identity file',
default=None)
p.add_argument('-H', '--host', help='destination host')
p.add_argument('-p', '--port', help='destination port', default=22,
type=int)
p.add_argument('-u', '--user', help='username', default="root")
p.add_argument('-P', '--password', help='username', default=None)
return p.parse_args()
def main():
args = arg_parse()
print args
j = Joker(args.identity)
j.addNode("EntryPoint", args.host, args.port, args.user, args.password)
print j.discover()
if __name__ == '__main__':
sys.exit(main())

View File

@ -1,252 +0,0 @@
= Configutation
== keystone (identity)
* MySQL database exists
* MySQL user exists and has proper permissions for keystone database
* /etc/keystone/keystone.conf:
* contains proper 'connection' setting
* 'admin_token' ???
* Keystone certificates exists (what config options control other communication methods?)
* /etc/keystone/* has user and group set to keystone user
== glance (image)
* /var/lib/glance/glance.sqlite
* MySQL database exists
* MySQL user exists and has proper permissions for glance database
* /etc/glance/glance-api.conf:
[keystone_authtoken]
auth_host = ...
auth_port = ...
auth_protocol = http
admin_tenant_name = service
admin_user = glance
admin_password = glance
[paste_deploy]
config_file = /etc/glance/glance-api-paste.ini
flavor = keystone
* sql_connection = mysql://glance:glance-password@<mysql-ip>/glance
* /etc/glance/glance-registry.conf:
[keystone_authtoken]
auth_host = ...
auth_port = ...
auth_protocol = http
admin_tenant_name = service
admin_user = glance
admin_password = glance
[paste_deploy]
config_file = /etc/glance/glance-registry-paste.ini
flavor = keystone
* /etc/glance/glance-registry-paste.ini:
# Use this pipeline for keystone auth
[pipeline:glance-registry-keystone]
pipeline = authtoken context registryapp
* sql_connection = mysql://glance:glance-password@<mysql-ip>/glance
= nova (compute)
* Enabling KVM:
/etc/nova/nova.conf:
compute_driver = libvirt.LibvirtDriver
libvirt_type = kvm
* Check for supported CPU features:
egrep '(vmx|svm)' --color=always /proc/cpuinfo
output:
flags : fpu vme de pse tsc msr pae mce ...
lsmod | grep kvm
* Enabling QEMU
/etc/nova/nova.conf:
compute_driver=libvirt.LibvirtDriver
libvirt_type=qemu
* Enabling Xen:
/etc/nova/nova.conf:
compute_driver=xenapi.XenAPIDriver
xenapi_connection_url=http://your_xenapi_management_ip_address
xenapi_connection_username=root
xenapi_connection_password=your_password
or
compute_driver=libvirt.LibvirtDriver
libvirt_type=xen
* Network configuration
* Network interface in promiscuous mode
ip link set eth0 promisc on
* /etc/qppid.conf has "auth=no"
* SELinux in permissive mode
sudo setenforce permissive
* MySQL
* Database exists
* User exists and has proper permissions to access nova database
* PostgreSQL
* Database exists
* User exists and has proper permissions to access nova database
* /etc/nova/nova.conf has sql_connection=postgres://novadbadmin:[[YOUR_NOVADB_PASSWORD]]@127.0.0.1/nova
== cinder (block storage)
* /etc/cinder/api-paste.ini:
[filter:authtoken]
paste.filter_factory = keystone.middleware.auth_token:filter_factory
service_protocol = http
service_host = 10.211.55.20
service_port = 5000
auth_host = 10.211.55.20
auth_port = 35357
auth_protocol = http
admin_tenant_name = service
admin_user = cinder
admin_password = openstack
* /etc/cinder/cinder.conf:
[DEFAULT]
rootwrap_config=/etc/cinder/rootwrap.conf
sql_connection = mysql://cinder:openstack@192.168.127.130/cinder
api_paste_config = /etc/cinder/api-paste.ini
iscsi_helper=tgtadm
volume_name_template = volume-%s
volume_group = cinder-volumes
verbose = True
auth_strategy = keystone
#osapi_volume_listen_port=5900
* If RabbitMQ:
rabbit_host = 10.10.10.10
rabbit_port = 5672
rabbit_userid = rabbit
rabbit_password = secure_password
rabbit_virtual_host = /nova
* If Qpid:
qpid_hostname=192.168.206.130
* /etc/nova/nova.conf:
volume_api_class=nova.volume.cinder.API
(continue from http://docs.openstack.org/grizzly/openstack-compute/install/yum/content/compute-minimum-configuration-settings.html)
* Ensure user 'nova' exists, group 'nova' exists, user 'nova' belongs to group 'nova'
* Ensure that '/etc/nova' has 'nova:nova' owners.
* Ensure that '/etc/nova/nova.conf' has 'root:nova' owners and 0640 permissions.
* Minimal /etc/nova/nova.conf:
auth_strategy=keystone
network_manager=nova.network.manager.FlatDHCPManager
fixed_range=192.168.100.0/24
public_interface=eth0
flat_interface=eth0
flat_network_bridge=br100
* Sample /etc/nova/nova.conf:
[DEFAULT]
# LOGS/STATE
verbose=True
logdir=/var/log/nova
state_path=/var/lib/nova
lock_path=/var/lock/nova
rootwrap_config=/etc/nova/rootwrap.conf
# SCHEDULER
compute_scheduler_driver=nova.scheduler.filter_scheduler.FilterScheduler
# VOLUMES
volume_api_class=nova.volume.cinder.API
volume_driver=nova.volume.driver.ISCSIDriver
volume_group=cinder-volumes
volume_name_template=volume-%s
iscsi_helper=tgtadm
# DATABASE
sql_connection=mysql://nova:yourpassword@192.168.206.130/nova
# COMPUTE
libvirt_type=qemu
compute_driver=libvirt.LibvirtDriver
instance_name_template=instance-%08x
api_paste_config=/etc/nova/api-paste.ini
# COMPUTE/APIS: if you have separate configs for separate services
# this flag is required for both nova-api and nova-compute
allow_resize_to_same_host=True
# APIS
osapi_compute_extension=nova.api.openstack.compute.contrib.standard_extensions
ec2_dmz_host=192.168.206.130
s3_host=192.168.206.130
enabled_apis=ec2,osapi_compute,metadata
# QPID
qpid_hostname=192.168.206.130
# GLANCE
image_service=nova.image.glance.GlanceImageService
glance_api_servers=192.168.206.130:9292
# NETWORK
network_manager=nova.network.manager.FlatDHCPManager
force_dhcp_release=True
dhcpbridge_flagfile=/etc/nova/nova.conf
firewall_driver=nova.virt.libvirt.firewall.IptablesFirewallDriver
# Change my_ip to match each host
my_ip=192.168.206.130
public_interface=eth100
vlan_interface=eth0
flat_network_bridge=br100
flat_interface=eth0
fixed_range=192.168.100.0/24
# NOVNC CONSOLE
novncproxy_base_url=http://192.168.206.130:6080/vnc_auto.html
# Change vncserver_proxyclient_address and vncserver_listen to match each compute host
vncserver_proxyclient_address=192.168.206.130
vncserver_listen=192.168.206.130
# AUTHENTICATION
auth_strategy=keystone
[keystone_authtoken]
auth_host = 127.0.0.1
auth_port = 35357
auth_protocol = http
admin_tenant_name = service
admin_user = nova
admin_password = nova
signing_dirname = /tmp/keystone-signing-nova
* 'nova-manage version' to find out version of nova. The output will be something like '2013.1'.

View File

@ -1,13 +0,0 @@
spur==0.3.5
WTForms-JSON>=0.2.2
gunicorn==18.0
honcho==0.4.2
jinja2==2.7
lettuce>=0.2.19
pymongo==2.6.1
https://bitbucket.org/jstasiak/recordtype/get/default.tar.gz
paramiko==1.11.0
oslo.config==1.2.1
requests==1.2.0
PyYAML==3.10
six>=1.4.1

View File

@ -1,5 +0,0 @@
if __name__ == '__main__':
from rubick.main import main
import sys
main(sys.argv[1:])

View File

@ -1,261 +0,0 @@
# Copyright (c) 2014 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and#
# limitations under the License.
import copy
import os.path
from recordtype import recordtype
def find(l, predicate):
results = [x for x in l if predicate(x)]
return results[0] if len(results) > 0 else None
def index(l, predicate):
i = 0
while i < len(l):
if predicate(l[i]):
return i
i += 1
return -1
def all_subclasses(klass):
subclasses = klass.__subclasses__()
for d in list(subclasses):
subclasses.extend(all_subclasses(d))
return subclasses
def path_relative_to(path, base_path):
if not path.startswith('/'):
path = os.path.join(base_path, path)
return path
class Version:
def __init__(self, major, minor=0, maintenance=0):
"Create Version object by either passing 3 integers,"
"one string or an another Version object"
if isinstance(major, str):
self.parts = [int(x) for x in major.split('.', 3)]
while len(self.parts) < 3:
self.parts.append(0)
elif isinstance(major, Version):
self.parts = major.parts
else:
self.parts = [int(major), int(minor), int(maintenance)]
@property
def major(self):
return self.parts[0]
@major.setter
def major(self, value):
self.parts[0] = int(value)
@property
def minor(self):
return self.parts[1]
@minor.setter
def minor(self, value):
self.parts[1] = int(value)
@property
def maintenance(self):
return self.parts[2]
@maintenance.setter
def maintenance(self, value):
self.parts[2] = value
def __str__(self):
return '.'.join([str(p) for p in self.parts])
def __repr__(self):
return '<Version %s>' % str(self)
def __cmp__(self, other):
for i in range(0, 3):
x = self.parts[i] - other.parts[i]
if x != 0:
return -1 if x < 0 else 1
return 0
def __lt__(self, other):
for i in range(0, 3):
x = self.parts[i] - other.parts[i]
if x != 0:
return True if x < 0 else False
return False
def __le__(self, other):
for i in range(0, 3):
x = self.parts[i] - other.parts[i]
if x != 0:
return True if x < 0 else False
return True
def __ne__(self, other):
for i in range(0, 3):
x = self.parts[i] - other.parts[i]
if x != 0:
return True
return False
def __eq__(self, other):
for i in range(0, 3):
x = self.parts[i] - other.parts[i]
if x != 0:
return False
return True
class Mark(object):
def __init__(self, source, line=0, column=0):
self.source = source
self.line = line
self.column = column
def __eq__(self, other):
return (
(self.source == other.source) and
(self.line == other.line) and
(self.column == other.column)
)
def __ne__(self, other):
return not self == other
def merge(self, other):
return (
Mark(
self.source,
self.line +
other.line,
self.column +
other.column)
)
def __repr__(self):
return '%s line %d column %d' % (self.source, self.line, self.column)
class Error:
def __init__(self, message):
self.message = message
def __repr__(self):
return (
'<%s "%s">' % (
str(self.__class__).split('.')[-1][:-2],
self.message)
)
def __str__(self):
return self.message
class Issue(object):
FATAL = 'FATAL'
ERROR = 'ERROR'
WARNING = 'WARNING'
INFO = 'INFO'
def __init__(self, type, message):
self.type = type
self.message = message
def __eq__(self, other):
if not isinstance(other, Issue):
return False
return self.type == other.type and self.message == other.message
def __ne__(self, other):
return not self == other
def __repr__(self):
return (
'<%s type=%s message=%s>' % (
str(self.__class__).split('.')[-1][:-2],
self.type,
self.message)
)
def __str__(self):
return '[%s] %s' % (self.type, self.message)
class MarkedIssue(Issue):
def __init__(self, type, message, mark):
super(MarkedIssue, self).__init__(type, message)
self.mark = mark
def offset_by(self, base_mark):
other = copy.copy(self)
other.mark = base_mark.merge(self.mark)
return other
def __eq__(self, other):
if not isinstance(other, MarkedIssue):
return False
return super(MarkedIssue, self).__eq__(other) and self.mark == other.mark
def __ne__(self, other):
return not self == other
def __repr__(self):
return (
'<%s type=%s message=%s mark=%s>' % (
str(self.__class__).split('.')[-1][:-2],
self.type,
self.message,
self.mark)
)
def __str__(self):
return (
super(MarkedIssue, self).__str__() +
(' (source "%s" line %d column %d)' %
(self.mark.source, self.mark.line + 1, self.mark.column + 1))
)
Rule = recordtype('Rule', ['name', 'description'])
class Inspection(object):
@classmethod
def all_inspections(klass):
return [c for c in all_subclasses(klass)]
@classmethod
def rules(klass):
if hasattr(klass, 'name') and hasattr(klass, 'description'):
return [Rule(klass.name, klass.description)]
else:
return []
def inspect(self, openstack):
pass

View File

@ -1 +0,0 @@
from rubick.config_formats.ini import IniConfigParser # noqa

View File

@ -1,20 +0,0 @@
# Copyright (c) 2014 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and#
# limitations under the License.
from rubick.common import Issue, MarkedIssue
class ParseError(MarkedIssue):
def __init__(self, message, mark):
super(ParseError, self).__init__(Issue.ERROR, message, mark)

View File

@ -1,177 +0,0 @@
# Copyright (c) 2014 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and#
# limitations under the License.
import re
from six import StringIO
from rubick.common import Mark
from rubick.config_model import ComponentConfig, ConfigSection, \
ConfigSectionName, ConfigParameter, ConfigParameterName, \
ConfigParameterValue, TextElement
from rubick.config_formats.common import ParseError
class IniConfigParser:
key_value_re = re.compile("^(\S+?)\s*([:=])\s*('.*'|\".*\"|.*)\s*$")
def parse(self, name, base_mark, io):
if not hasattr(io, 'readlines'):
io = StringIO(io)
def mark(line, column=0):
return base_mark.merge(Mark('', line, column))
errors = []
current_section_name = ConfigSectionName(mark(0), mark(0), '')
current_param_name = None
current_param_value = None
current_param_delimiter = None
sections = []
parameters = []
line_number = -1
for line in io.readlines():
line = line.rstrip()
line_number += 1
if current_param_name \
and (current_param_value.quotechar
or (line == '' or not line[0].isspace())):
param = ConfigParameter(
current_param_name.start_mark,
current_param_value.end_mark,
current_param_name,
current_param_value,
current_param_delimiter)
parameters.append(param)
current_param_name = None
current_param_value = None
current_param_delimiter = None
if line == '':
continue
if line[0] in '#;':
continue
if line[0].isspace():
if current_param_name:
current_param_value.end_mark = mark(line_number, len(line))
current_param_value.text += line.lstrip()
continue
else:
errors.append(
ParseError('Unexpected multiline value continuation',
mark(line_number)))
continue
if line[0] == '[':
end_index = line.find(']')
if end_index == -1:
errors.append(
ParseError('Unclosed section', mark(line_number,
len(line))))
end_index = len(line)
while line[end_index - 1].isspace():
end_index -= 1
if end_index <= 1:
errors.append(
ParseError('Missing section name',
mark(line_number)))
continue
else:
i = end_index + 1
while i < len(line):
if not line[i].isspace():
errors.append(
ParseError('Extra chars after section name',
mark(line_number, i)))
break
i += 1
if current_section_name.text != '' or len(parameters) > 0:
section = ConfigSection(
current_section_name.start_mark,
mark(line_number),
current_section_name,
parameters)
sections.append(section)
parameters = []
current_section_name = ConfigSectionName(
mark(line_number, 0),
mark(line_number, end_index),
line[1:end_index]
)
else:
m = self.key_value_re.match(line)
if m:
current_param_name = ConfigParameterName(
mark(line_number, m.start(1)),
mark(line_number, m.end(1)),
m.group(1)
)
current_param_delimiter = TextElement(
mark(line_number, m.start(2)),
mark(line_number, m.end(2)),
m.group(2)
)
# Unquote value
value = m.group(3)
quotechar = None
if len(value) > 0 and (value[0] == value[-1]
and value[0] in "\"'"):
quotechar = value[0]
value = value[1:-1]
current_param_value = ConfigParameterValue(
mark(line_number, m.start(3)),
mark(line_number, m.end(3)),
value,
quotechar=quotechar
)
else:
errors.append(
ParseError('Syntax error in line "%s"' %
line, mark(line_number)))
if current_param_name:
param = ConfigParameter(
current_param_name.start_mark,
current_param_value.end_mark,
current_param_name,
current_param_value,
current_param_delimiter)
parameters.append(param)
if current_section_name.text != '' or len(parameters) > 0:
section = ConfigSection(
current_section_name.start_mark,
mark(line_number),
current_section_name,
parameters)
sections.append(section)
parameters = []
end_mark = base_mark
if len(sections) > 0:
end_mark = base_mark.merge(sections[-1].end_mark)
config = ComponentConfig(base_mark, end_mark, name, sections, errors)
return config

View File

@ -1,248 +0,0 @@
# Copyright (c) 2014 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and#
# limitations under the License.
import unittest
from six import StringIO
from rubick.common import Mark
from rubick.config_formats.ini import IniConfigParser
class IniConfigParserTests(unittest.TestCase):
def setUp(self):
self.parser = IniConfigParser()
def _strip_margin(self, content):
lines = content.split("\n")
if lines[0] == '' and lines[-1].strip() == '':
lines = lines[1:-1]
first_line = lines[0]
margin_size = 0
while margin_size < len(first_line) \
and first_line[margin_size].isspace():
margin_size += 1
stripped_lines = [line[margin_size:] for line in lines]
return "\n".join(stripped_lines)
def parse(self, content, margin=False):
if margin:
content = self._strip_margin(content)
return self.parser.parse('test.conf', Mark(''), content)
def test_parsing(self):
config = self.parse("param1 = value1")
self.assertEqual(0, len(config.errors))
self.assertParameter(
'param1',
'value1',
config.sections[0].parameters[0])
self.assertEqual(1, len(config.sections[0].parameters))
def test_colon_as_delimiter(self):
c = self.parse('param1 : value1')
self.assertEqual(0, len(c.errors))
self.assertParameter('param1', 'value1', c.sections[0].parameters[0])
def test_use_colon_delimiter_if_it_comes_before_equals_sign(self):
c = self.parse('param1: value=123')
self.assertEqual(0, len(c.errors))
self.assertParameter(
'param1',
'value=123',
c.sections[0].parameters[0])
def test_use_equals_delimiter_if_it_comes_before_colon(self):
c = self.parse('param1=value:123')
self.assertEqual(0, len(c.errors))
self.assertParameter(
'param1',
'value:123',
c.sections[0].parameters[0])
def test_wrapping_value_with_single_quotes(self):
c = self.parse("param = 'foo bar'")
self.assertEqual(0, len(c.errors))
self.assertParameter('param', 'foo bar', c.sections[0].parameters[0])
self.assertEqual("'", c.sections[0].parameters[0].value.quotechar)
def test_wrapping_value_with_single_quotes_and_trailing_whitespace(self):
c = self.parse("param = 'foo bar' ")
self.assertEqual(0, len(c.errors))
self.assertParameter('param', 'foo bar', c.sections[0].parameters[0])
def test_wrapping_value_with_double_quotes(self):
c = self.parse("param = \"foo bar\"")
self.assertEqual(0, len(c.errors))
self.assertParameter('param', 'foo bar', c.sections[0].parameters[0])
self.assertEqual('"', c.sections[0].parameters[0].value.quotechar)
def test_wrapping_value_with_double_quotes_and_trailing_whitespace(self):
c = self.parse("param = \"foo bar\" ")
self.assertEqual(0, len(c.errors))
self.assertParameter('param', 'foo bar', c.sections[0].parameters[0])
def test_parsing_iolike_source(self):
c = self.parse(StringIO("param1 = value1"))
self.assertEqual(0, len(c.errors))
self.assertParameter('param1', 'value1', c.sections[0].parameters[0])
self.assertEqual(1, len(c.sections[0].parameters))
def test_default_section_name(self):
c = self.parse("param1 = value1")
self.assertEqual('', c.sections[0].name.text)
def test_parsing_with_section(self):
c = self.parse("""
[section1]
param1 = value1
""", margin=True)
self.assertEqual(0, len(c.errors))
self.assertEqual('section1', c.sections[0].name.text)
self.assertEqual(1, len(c.sections[0].parameters))
def test_parsing_with_same_section(self):
c = self.parse("""
[section1]
param1 = value1
param2 = value2
""", margin=True)
self.assertEqual(0, len(c.errors))
self.assertEqual(2, len(c.sections[0].parameters))
def test_parsing_with_different_sections(self):
c = self.parse("""
[section1]
param1 = value1
[section2]
param2 = value2
""", margin=True)
self.assertEqual(0, len(c.errors))
self.assertEqual('section1', c.sections[0].name.text)
self.assertParameter('param1', 'value1', c.sections[0].parameters[0])
self.assertEqual(1, len(c.sections[0].parameters))
self.assertEqual('section2', c.sections[1].name.text)
self.assertParameter('param2', 'value2', c.sections[1].parameters[0])
self.assertEqual(1, len(c.sections[1].parameters))
def test_whole_line_comments_starting_with_hash(self):
c = self.parse("#param=value")
self.assertEqual(0, len(c.errors))
self.assertEqual(0, len(c.sections))
def test_whole_line_comments_starting_with_semicolon(self):
c = self.parse(";param=value")
self.assertEqual(0, len(c.errors))
self.assertEqual(0, len(c.sections))
def test_hash_in_value_is_part_of_the_value(self):
c = self.parse("param=value#123")
self.assertEqual(0, len(c.errors))
self.assertParameter("param", "value#123", c.sections[0].parameters[0])
def test_multiline_value(self):
c = self.parse("""
param1 = line1
line2
""", margin=True)
self.assertEqual(0, len(c.errors))
self.assertParameter(
'param1',
'line1line2',
c.sections[0].parameters[0])
def test_multiline_value_finished_by_other_parameter(self):
c = self.parse("""
param1 = foo
bar
param2 = baz
""", margin=True)
self.assertEqual(0, len(c.errors))
self.assertParameter('param1', 'foobar', c.sections[0].parameters[0])
def test_multiline_value_finished_by_empty_line(self):
c = self.parse("""
param1 = foo
bar
param2 = baz
""", margin=True)
self.assertEqual(0, len(c.errors))
self.assertParameter('param1', 'foobar', c.sections[0].parameters[0])
def test_unclosed_section_causes_error(self):
c = self.parse("[section1\nparam1=123")
self.assertEqual(1, len(c.errors))
def test_missing_equals_sign_or_colon_causes_error(self):
c = self.parse("param1 value1")
self.assertEqual(1, len(c.errors))
def test_spaces_in_key_causes_error(self):
c = self.parse("param 1 = value1")
self.assertEqual(1, len(c.errors))
def test_returning_multiple_errors(self):
c = self.parse("[unclosed section\npararm 1 = value1")
self.assertEqual(2, len(c.errors))
def test_errors_doesnt_affect_valid_parameters(self):
c = self.parse('param1 value1\nparam2 = value2')
self.assertEqual(1, len(c.errors))
self.assertParameter('param2', 'value2', c.sections[0].parameters[0])
def _getattr(self, o, name):
if name.find('.') != -1:
parts = name.split('.')
o = getattr(o, parts[0])
if o is None:
return None
return self._getattr(o, '.'.join(parts[1:]))
else:
return getattr(o, name)
def assertAttributes(self, attribute_values, subject):
for attr, expected in attribute_values.items():
actual = self._getattr(subject, attr)
self.assertEqual(
expected, actual,
"%s expected to have %s = %s, but the value was %s" %
(subject, attr, expected, actual))
def assertParameter(self, name, value, o):
self.assertAttributes({'name.text': name, 'value.text': value}, o)
if __name__ == '__main__':
unittest.main()

View File

@ -1,390 +0,0 @@
# Copyright (c) 2014 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and#
# limitations under the License.
import string
from rubick.schema import TypeValidatorRegistry, InvalidValueError
class ConfigurationSection(object):
def __init__(self, config, section):
super(ConfigurationSection, self).__init__()
self.config = config
self.section = section
def _combine_names(self, section, param):
if section == 'DEFAULT':
return param
return '%s.%s' % (section, param)
def get(self, name, *args, **kwargs):
return self.config.get(
self._combine_names(self.section, name), *args, **kwargs)
def set(self, name, *args, **kwargs):
self.config.set(
self._combine_names(self.section, name), *args, **kwargs)
def set_default(self, name, *args, **kwargs):
self.config.set_default(
self._combine_names(self.section, name), *args, **kwargs)
def set_cli(self, name, *args, **kwargs):
self.config.set_cli(
self._combine_names(self.section, name), *args, **kwargs)
def set_env(self, name, *args, **kwargs):
self.config.set_env(
self._combine_names(self.section, name), *args, **kwargs)
def contains(self, name, *args, **kwargs):
return self.config.contains(
self._combine_names(self.section, name), *args, **kwargs)
def is_default(self, name, *args, **kwargs):
return self.config.is_default(
self._combine_names(self.section, name), *args, **kwargs)
def __getitem__(self, key):
return self.config.get(self._combine_names(self.section, key))
def __setitem__(self, key, value):
return self.config.set(self._combine_names(self.section, key), value)
def __contains__(self, key):
return self.config.contains(self._combine_names(self.section, key))
def keys(self):
return self.config.keys(self.section)
def items(self, *args, **kwargs):
return self.config.items(self.section, *args, **kwargs)
class ConfigurationWrapper(object):
def __init__(self, config, state):
super(ConfigurationWrapper, self).__init__()
self.config = config
self.state = state
def __getitem__(self, key):
if key in self.state:
return ''
return self.config.get(key, _state=self.state)
class Configuration(object):
def __init__(self, schema=None):
super(Configuration, self).__init__()
self._defaults = dict()
self._normal = dict()
self._cli = dict()
self._env = dict()
self._cache = dict()
self.schema = schema
def _normalize_name(self, name):
if name.find('.') == -1:
section = 'DEFAULT'
else:
section, name = name.split('.', 1)
return (section, name)
def _combine_names(self, section, param):
if section == 'DEFAULT':
return param
return '%s.%s' % (section, param)
def get(self, fullname, default=None, raw=False, _state=[]):
if not raw and fullname in self._cache:
return self._cache[fullname]
section, name = self._normalize_name(fullname)
if section in self._cli and name in self._cli[section]:
value = self._cli[section][name]
elif section in self._env and name in self._env[section]:
value = self._env[section][name]
elif section in self._normal and name in self._normal[section]:
value = self._normal[section][name]
elif section in self._defaults and name in self._defaults[section]:
value = self._defaults[section][name]
else:
value = default
if not isinstance(value, str):
return value
if raw:
return value
tmpl = string.Template(value)
value = tmpl.safe_substitute(
ConfigurationWrapper(self, _state + [name]))
if self.schema:
param_schema = self.schema.get_parameter(name, section=section)
if param_schema:
type_validator = TypeValidatorRegistry.get_validator(
param_schema.type)
type_validation_result = type_validator.validate(value, **param_schema.type_args)
if not isinstance(type_validation_result, InvalidValueError):
value = type_validation_result
self._cache[fullname] = value
return value
def validate(self, fullname):
if not self.schema:
return None
section, name = self._normalize_name(fullname)
value = self.get(fullname, raw=True)
tmpl = string.Template(value)
value = tmpl.safe_substitute(
ConfigurationWrapper(self, [name]))
param_schema = self.schema.get_parameter(name, section=section)
if not param_schema:
return None
type_validator = TypeValidatorRegistry.get_validator(
param_schema.type)
type_validation_result = type_validator.validate(value, **param_schema.type_args)
if not isinstance(type_validation_result, InvalidValueError):
return None
return type_validation_result
def contains(self, name, ignoreDefault=False):
section, name = self._normalize_name(name)
if section in self._normal and name in self._normal[section]:
return True
if section in self._cli and name in self._cli[section]:
return True
if section in self._env and name in self._env[section]:
return True
if (not ignoreDefault and section in self._defaults and
name in self._defaults[section]):
return True
return False
def is_default(self, name):
section, name = self._normalize_name(name)
return (
not (section in self._normal and name in self._normal[section]) and
not (section in self._cli and name in self._cli[section]) and
not (section in self._env and name in self._env[section]) and
(section in self._defaults and name in self._defaults[section])
)
def set_env(self, fullname, value):
section, name = self._normalize_name(fullname)
self._env.setdefault(section, {})[name] = value
self._invalidate_cache(fullname)
def set_cli(self, fullname, value):
section, name = self._normalize_name(fullname)
self._cli.setdefault(section, {})[name] = value
self._invalidate_cache(fullname)
def set_default(self, fullname, value):
section, name = self._normalize_name(fullname)
self._defaults.setdefault(section, {})[name] = value
self._invalidate_cache(fullname)
def set(self, fullname, value):
section, name = self._normalize_name(fullname)
self._normal.setdefault(section, {})[name] = value
self._invalidate_cache(fullname)
def _invalidate_cache(self, fullname):
# We need to invalidate not only value of given parameter
# but also values that depend on that parameter
# Since this is hard, we'll just invalidate all cached values
self._cache = dict()
def section(self, section):
return ConfigurationSection(self, section)
def __getitem__(self, key):
return self.get(key)
def __setitem__(self, key, value):
self.set(key, value)
def __contains__(self, section):
return ((section in self._defaults) or
(section in self._cli) or
(section in self._env) or
(section in self._normal))
def keys(self, section=None):
if section:
names = set()
for param in self._defaults.get(section, {}).keys():
names.add(param)
for param in self._normal.get(section, {}).keys():
names.add(param)
for param in self._cli.get(section, {}).keys():
names.add(param)
for param in self._env.get(section, {}).keys():
names.add(param)
return list(names)
else:
sections = set()
for section in self._defaults.keys():
sections.add(section)
for section in self._normal.keys():
sections.add(section)
for section in self._cli.keys():
sections.add(section)
for section in self._env.keys():
sections.add(section)
return list(sections)
def items(self, section=None):
if section:
return (
[(name, self.get(self._combine_names(section, name)))
for name in self.keys(section)]
)
else:
return (
[(name, ConfigurationSection(self, name))
for name in self.keys()]
)
class Element(object):
def __init__(self, start_mark, end_mark):
self.start_mark = start_mark
self.end_mark = end_mark
def __eq__(self, other):
return (
(self.__class__ == other.__class__)
and (self.start_mark == other.start_mark)
and (self.end_mark == other.end_mark)
)
def __ne__(self, other):
return not self == other
class ComponentConfig(Element):
def __init__(self, start_mark, end_mark, name, sections=[], errors=[]):
super(ComponentConfig, self).__init__(start_mark, end_mark)
self.name = name
self.sections = sections
for section in self.sections:
section.parent = self
self.errors = errors
class TextElement(Element):
def __init__(self, start_mark, end_mark, text):
super(TextElement, self).__init__(start_mark, end_mark)
self.text = text
class ConfigSection(Element):
def __init__(self, start_mark, end_mark, name, parameters):
super(ConfigSection, self).__init__(start_mark, end_mark)
self.name = name
self.parameters = parameters
for parameter in self.parameters:
parameter.parent = self
class ConfigSectionName(TextElement):
pass
class ConfigParameter(Element):
def __init__(self, start_mark, end_mark, name, value, delimiter):
super(ConfigParameter, self).__init__(start_mark, end_mark)
self.name = name
self.name.parent = self
self.value = value
self.value.parent = self
self.delimiter = delimiter
self.delimiter.parent = self
def __eq__(self, other):
return (
(self.name.text == other.name.text) and (
self.value.text == other.value.text)
)
def __ne__(self, other):
return not self == other
def __repr__(self):
return (
"<ConfigParameter %s=%s delimiter=%s>" % (
self.name.text,
self.value.text,
self.delimiter.text)
)
class ConfigParameterName(TextElement):
pass
class ConfigParameterValue(TextElement):
def __init__(self, start_mark, end_mark, text, value=None, quotechar=None):
super(ConfigParameterValue, self).__init__(start_mark, end_mark, text)
self.value = value
self.quotechar = quotechar

View File

@ -1,58 +0,0 @@
# Copyright (c) 2014 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and#
# limitations under the License.
from bson.objectid import ObjectId
from copy import copy
import os
from pymongo import MongoClient
assert ObjectId
from recordtype import recordtype
def connect_to_db():
mongo_url = os.environ.get("MONGODB_URI") or "mongodb://localhost/rubick"
client = MongoClient(mongo_url)
return client[mongo_url.split('/')[-1]]
def get_db():
db = connect_to_db()
return db
class Cluster(recordtype('Cluster',
[('id', str(ObjectId())), 'name', 'description',
'status', 'nodes', 'private_key', 'data'],
default=None)):
@classmethod
def from_doc(klass, doc):
doc['id'] = str(doc['_id'])
del doc['_id']
return Cluster(**doc)
def as_doc(self):
doc = copy(self._asdict())
doc['_id'] = ObjectId(doc['id'])
del doc['id']
return doc
def for_json(self):
return copy(self._asdict())
class RuleGroup:
VALIDITY = 'validity'
HA = 'high-availability'
BEST_PRACTICES = 'best-practices'
all = [VALIDITY, HA, BEST_PRACTICES]

File diff suppressed because it is too large Load Diff

View File

@ -1,25 +0,0 @@
# Copyright (c) 2014 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and#
# limitations under the License.
class RubickException(BaseException):
pass
class ValidatorException(RubickException):
pass
class SchemaException(RubickException):
pass

View File

@ -1,3 +0,0 @@
from rubick.inspections.keystone_authtoken import KeystoneAuthtokenSettingsInspection # noqa
from rubick.inspections.keystone_endpoints import KeystoneEndpointsInspection # noqa
from rubick.inspections.lettuce_runner import LettuceRunnerInspection # noqa

View File

@ -1,121 +0,0 @@
# Copyright (c) 2014 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and#
# limitations under the License.
from rubick.common import Inspection, Issue, find
AUTHTOKEN_FILTER_FACTORY = ('keystoneclient.middleware.auth_token:'
'filter_factory')
class KeystoneAuthtokenSettingsInspection(Inspection):
name = 'Keystone auth'
description = 'Validate correctness of keystone settings'
def inspect(self, openstack):
components = []
for host in openstack.hosts:
components.extend(host.components)
keystones = [c for c in components if c.name == 'keystone']
if len(keystones) == 0:
openstack.report_issue(
Issue(Issue.FATAL, 'No keystone service found'))
return
keystone = keystones[0]
keystone_addresses = [keystone.config['bind_host']]
if keystone_addresses == ['0.0.0.0']:
keystone_addresses = keystone.host.network_addresses
for nova in [c for c in components if c.name == 'nova-api']:
if nova.config['auth_strategy'] != 'keystone':
continue
authtoken_section = find(
nova.paste_config.items(),
lambda name_values: name_values[0].startswith('filter:') and
name_values[1].get(
'paste.filter_factory') == AUTHTOKEN_FILTER_FACTORY)
if not authtoken_section:
continue
authtoken_settings = authtoken_section[1]
def get_value(name):
return (
authtoken_settings[name] or
nova.config['keystone_authtoken.%s' % name]
)
auth_host = get_value('auth_host')
auth_port = int(get_value('auth_port'))
auth_protocol = get_value('auth_protocol')
admin_user = get_value('admin_user')
# admin_password = get_value('admin_password')
admin_tenant_name = get_value('admin_tenant_name')
admin_token = get_value('admin_token')
msg = 'Keystone authtoken config %s'
def missing_param_issue(param):
return Issue(Issue.ERROR,
msg % (' miss "%s" setting' % param))
def incorrect_param_issue(param):
return Issue(Issue.ERROR,
msg % (' has incorrect "%s" setting' % param))
if not auth_host:
nova.report_issue(missing_param_issue('auth_host'))
elif not auth_host in keystone_addresses:
nova.report_issue(incorrect_param_issue('auth_host'))
if not auth_port:
nova.report_issue(missing_param_issue('auth_port'))
elif auth_port != keystone.config['admin_port']:
nova.report_issue(incorrect_param_issue('auth_port'))
if not auth_protocol:
nova.report_issue(missing_param_issue('auth_protocol'))
elif not auth_protocol in ['http', 'https']:
nova.report_issue(incorrect_param_issue('auth_protocol'))
if not admin_user:
nova.report_issue(missing_param_issue('admin_user'))
else:
user = find(
keystone.db['users'],
lambda u: u['name'] == admin_user)
if not user:
nova.report_issue(
Issue(Issue.ERROR, msg %
' has "admin_user" that is missing'))
if not admin_tenant_name:
nova.report_issue(missing_param_issue('admin_tenant_name'))
else:
tenant = find(keystone.db['tenants'],
lambda t: t['name'] == admin_tenant_name)
if not tenant:
nova.report_issue(
Issue(Issue.ERROR, msg %
' has "admin_tenant_name" that is missing'))
if admin_token:
nova.report_issue(
Issue(
Issue.WARNING,
msg % ' uses insecure admin_token method'
'for authentication'))

View File

@ -1,82 +0,0 @@
# Copyright (c) 2014 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and#
# limitations under the License.
from six.moves.urllib.parse import urlparse
from rubick.common import Inspection, Issue, find
SERVICE_WITH_NO_ENDPOINT_MSG = """
Keystone catalog contains service "%s" that has no defined endpoints
""".strip()
SERVICE_ENDPOINT_MSG = """
Keystone catalog has endpoint for service "%s" (id %s) that has "%s"
""".strip()
UNKNOWN_HOST_ENDPOINT_MSG = (SERVICE_ENDPOINT_MSG +
' set pointing to unknown host')
UNKNOWN_SERVICE_ENDPOINT_MSG = (SERVICE_ENDPOINT_MSG +
' set pointing to no service')
class KeystoneEndpointsInspection(Inspection):
name = 'Keystone endpoints'
description = """
Validate that each keystone endpoint leads to proper service
""".strip()
def inspect(self, openstack):
keystone = find(openstack.components, lambda c: c.name == 'keystone')
if not keystone:
return
for service in keystone.db['services']:
if service['type'] == 'compute':
endpoint = find(
keystone.db['endpoints'],
lambda e: e['service_id'] == service['id'])
if not endpoint:
keystone.report_issue(
Issue(
Issue.WARNING, SERVICE_WITH_NO_ENDPOINT_MSG %
service['name']))
continue
for url_attr in ['adminurl', 'publicurl', 'internalurl']:
url = urlparse(endpoint[url_attr])
# TODO(someone): resolve endpoint url host address
host = find(
openstack.hosts,
lambda h: url.hostname in h.network_addresses)
if not host:
keystone.report_issue(
Issue(Issue.ERROR, UNKNOWN_HOST_ENDPOINT_MSG %
(service['name'], service['id'], url_attr)))
continue
nova_api = None
for c in host.components:
if c.name != 'nova-api':
continue
listen_address = c.config['osapi_compute_listen']
listen_port = c.config['osapi_compute_listen_port']
if (listen_address in ['0.0.0.0', url.hostname] and
listen_port == url.port):
nova_api = c
break
if not nova_api:
keystone.report_issue(
Issue(Issue.ERROR, UNKNOWN_SERVICE_ENDPOINT_MSG %
(service['name'], service['id'], url_attr)))

View File

@ -1,19 +0,0 @@
Feature: Configuration consistency
Scenario: Nova has proper Keystone host
Given I use OpenStack 2013.1
And Nova has "auth_strategy" equal to "keystone"
And Keystone addresses are @X
Then Nova should have keystone authtoken filter's "auth_host" in "$X"
Scenario: Nova has proper fixed_range settings for Grizzly release
Given I use OpenStack 2013.1
And Nova has "fixed_range" equal to ""
Then "nova" component have "fixed_range" parameter equal to ""
Scenario: Nova has proper settings for NoVNC
Given I use OpenStack 2013.1
And Controller addresses are @X
Then "nova" component have "novncproxy_base_url" parameter equal to "$X"
And "nova" component must have "sql_connection" parameter

View File

@ -1,22 +0,0 @@
Feature: Configuration consistency
Scenario: Nova has proper Keystone host
Given I use OpenStack 2013.2.1
And Nova has "auth_strategy" equal to "keystone"
And Keystone addresses are @X
Then Nova should have keystone authtoken filter's "auth_host" in "$X"
Scenario: Nova has proper fixed_range settings for Grizzly release
Given I use OpenStack 2013.2.1
And Nova has "fixed_range" equal to ""
Then "nova" component have "fixed_range" parameter equal to ""
Scenario: Nova has proper settings for NoVNC
Given I use OpenStack 2013.2.1
And Controller addresses are @X
Then "nova" component have "novncproxy_base_url" parameter equal to "$X"
And "nova" component must have "sql_connection" parameter
Scenario: Neutron check
Given I use OpenStack 2013.2.1
Then "neutron" component must have "sql_connection" parameter

View File

@ -1,198 +0,0 @@
# Copyright (c) 2014 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and#
# limitations under the License.
import string
from lettuce import step, world
from rubick.common import Issue, Version, find
import rubick.model as model
AUTHTOKEN_FILTER_FACTORY = ('keystoneclient.middleware.auth_token:'
'filter_factory')
def get_variable(name):
if not hasattr(world, 'variables'):
return None
return world.variables.get(name)
def set_variable(name, value):
if not hasattr(world, 'variables'):
world.variables = {}
world.variables[name] = value
def subst(template):
if not hasattr(world, 'variables'):
return template
tmpl = string.Template(template)
return tmpl.safe_substitute(world.variables)
def stop():
assert False, "stop"
# Openstack general step description section
@step(r'I use OpenStack (\w+)')
def use_openstack_version(step, version):
version = Version(version)
for component in [c for c in world.openstack.components
if isinstance(c, model.OpenstackComponent)]:
if not Version(component.version) >= version:
stop()
@step(r'Controller addresses are @(\w+)')
def controller_addresses(self, variable):
controller = find(world.openstack.components, lambda c: c.name == 'nova')
if controller.config['s3_host'] == '0.0.0.0':
addresses = filter(
lambda ip: not ip.startswith('127.'),
controller.host.network_addresses)
else:
addresses = [controller.config['s3_host']]
set_variable(variable, addresses)
# Keystone steps section
@step(r'Keystone addresses are @(\w+)')
def keystone_addresses(self, variable):
keystone = find(world.openstack.components, lambda c: c.name == 'keystone')
if keystone.config['bind_host'] == '0.0.0.0':
addresses = filter(
lambda ip: not ip.startswith('127.'),
keystone.host.network_addresses)
else:
addresses = [keystone.config['bind_host']]
set_variable(variable, addresses)
# Nova steps section
@step(r'Nova has "(.+)" equal to "(.*)"')
def nova_has_property(step, name, value):
name = subst(name)
value = subst(value)
for nova in [c for c in world.openstack.components
if c.name.startswith('nova')]:
if not nova.config[name] == value:
stop()
@step(r'Nova should have "(.+)" in "(.*)"')
def nova_property_assertion(self, name, values):
name = subst(name)
values = subst(values)
if not values:
return
for nova in [c for c in world.openstack.components
if c.name.startswith('nova')]:
nova_value = nova.config[name]
if not (nova_value and nova_value in values):
nova.report_issue(
Issue(Issue.ERROR, 'Nova should have "%s" in %s' %
(name, values)))
@step(r"Nova should have keystone authtoken filter's \"(.+)\" in \"(.*)\"")
def nova_authtoken_property_assertion(self, name, values):
name = subst(name)
values = subst(values)
if not values:
return
for nova in [c for c in world.openstack.components
if c.name.startswith('nova')]:
(authtoken_section, _) = find(
nova.paste_config.items(),
lambda name_values: name_values[0].startswith('filter:')
and name_values[1].get('paste.filter_factory') ==
AUTHTOKEN_FILTER_FACTORY
)
if not authtoken_section:
nova.report_issue(
Issue(Issue.ERROR, 'Nova has keystone "auth" strategy '
'configured, but doesnt have authtoken '
'paste filter'))
continue
authtoken_settings = nova.paste_config.section(authtoken_section)
param_value = (authtoken_settings[name] or
nova.config['keystone_authtoken.%s' % name])
if not (param_value and param_value in values):
nova.report_issue(
Issue(Issue.ERROR, 'Nova should have "%s" in %s, '
'actual value is "%s"' % (
name, values, param_value)))
# Common steps section
@step(r'"(.+)" component must have "(.+)" parameter')
def component_has_non_none_property(step, component_name, parameter_name):
component_name = subst(component_name)
parameter_name = subst(parameter_name)
for component in [c for c in world.openstack.components
if c.name.startswith('%s' % component_name)]:
component_value = component.config[parameter_name]
if component_value is None:
component.report_issue(
Issue(Issue.ERROR,
'"%s" must have parameter "%s - version %s"' %
(c.name, parameter_name, component.version)))
@step(r'"(.+)" component have "(.+)" parameter equal to "(.*)"')
def component_has_property_with_value(step, component_name, parameter_name,
value):
component_name = subst(component_name)
parameter_name = subst(parameter_name)
value = subst(value)
for component in [c for c in world.openstack.components
if c.component.startswith('%s' % component_name)]:
component_value = component.config[parameter_name]
if not component_value == value:
component.report_issue(
Issue(Issue.ERROR,
'"%s" should have parameter "%s" equals "%s"'
'now its "%s"' % (component_name, parameter_name,
component_value, value)))
@step(r'Which package version do I use?')
def component_versions_list(self):
for component in world.openstack.components:
component.report_issue(Issue(Issue.INFO, "%s component has % version" %
(component.name,
component.version)))

View File

@ -1,4 +0,0 @@
Feature: OpenStack component version finding
Scenario: All component version finding
Then Which package version do I use?

View File

@ -1,52 +0,0 @@
# Copyright (c) 2014 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and#
# limitations under the License.
import os.path
import lettuce
import lettuce.fs
from rubick.common import Inspection, Rule, Issue
class LettuceRunnerInspection(Inspection):
base_path = os.path.join(os.path.dirname(__file__), 'lettuce')
@classmethod
def rules(klass):
rules = []
loader = lettuce.fs.FeatureLoader(klass.base_path)
for path in loader.find_feature_files():
feature = lettuce.Feature.from_file(path)
for scenario in feature.scenarios:
rules.append(Rule(scenario.name,
"\n".join(scenario.remaining_lines)))
return rules
def inspect(self, openstack):
runner = lettuce.Runner(base_path=self.base_path)
lettuce.world.openstack = openstack
result = runner.run()
del lettuce.world.openstack
for feature_result in result.feature_results:
for scenario_result in feature_result.scenario_results:
if scenario_result.passed:
continue
for step in scenario_result.steps_undefined:
openstack.report_issue(
Issue(Issue.ERROR, 'Undefined step "%s"' %
step.sentence))

View File

@ -1,53 +0,0 @@
# Copyright (c) 2014 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and#
# limitations under the License.
__all__ = ['openstack_for_json']
def json_issues(issues):
return [str(issue) for issue in issues]
def json_component(component):
result = dict(type='component', name=component.name)
if hasattr(component, 'version'):
result['version'] = component.version
if len(component.all_issues) > 0:
result['issues'] = json_issues(component.all_issues)
return result
def json_host(host):
result = dict(type='host', name=host.name,
addresses=host.network_addresses,
components=[json_component(c) for c in host.components])
if len(host.issues) > 0:
result['issues'] = json_issues(host.issues)
return result
def json_openstack(openstack):
result = dict(type='openstack',
hosts=[json_host(host) for host in openstack.hosts])
if len(openstack.issues) > 0:
result['issues'] = json_issues(openstack.issues)
return result
def openstack_for_json(openstack):
return json_openstack(openstack)

View File

@ -1,60 +0,0 @@
# Copyright (c) 2014 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and#
# limitations under the License.
import argparse
import logging
import sys
from rubick.inspection import MainConfigValidationInspection
from rubick.model_parser import ModelParser
def main(args):
parser = argparse.ArgumentParser()
parser.add_argument(
'-d',
'--debug',
help='set debug log level',
action='store_true')
parser.add_argument('path', help='Path to config snapshot')
args = parser.parse_args(args)
if args.debug:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.WARN)
model_parser = ModelParser()
print('Analyzing configs in "%s"' % args.path)
model = model_parser.parse(args.path)
inspections = [MainConfigValidationInspection()]
issues = []
for inspection in inspections:
issues.extend(inspection.inspect(model))
if len(issues) == 0:
print('No issues found')
else:
print('Found issues:')
for issue in issues:
print(issue)
if __name__ == '__main__':
main(sys.argv[1:])

View File

@ -1,451 +0,0 @@
# Copyright (c) 2014 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and#
# limitations under the License.
from itertools import groupby
import logging
from rubick.common import Mark, Issue, MarkedIssue, Version
from rubick.config_formats import IniConfigParser
from rubick.config_model import Configuration
from rubick.schema import ConfigSchemaRegistry
from rubick.utils import memoized
class IssueReporter(object):
def __init__(self):
super(IssueReporter, self).__init__()
self.issues = []
def report_issue(self, issue):
if issue not in self.issues:
issue.subject = self
self.issues.append(issue)
@property
def all_issues(self):
return list(self.issues)
class Resource(IssueReporter):
pass
class Openstack(Resource):
def __init__(self):
super(Openstack, self).__init__()
self.hosts = []
def add_host(self, host):
if not host:
return
self.hosts.append(host)
host.parent = self
@property
def all_issues(self):
result = super(Openstack, self).all_issues
for host in self.hosts:
result.extend(host.all_issues)
return result
@property
def components(self):
components = []
for host in self.hosts:
components.extend(host.components)
return components
class HostResource(Resource):
def __init__(self, name):
super(HostResource, self).__init__()
self.name = name
self.components = []
self.filesystem = {}
def __str__(self):
return 'Host "%s"' % self.name
def add_component(self, component):
if not component:
return
self.components.append(component)
component.parent = self
def add_fs_resource(self, resource):
if not resource:
return
self.filesystem[resource.path] = resource
resource.parent = self
@property
def openstack(self):
return self.parent
@property
def all_issues(self):
result = super(HostResource, self).all_issues
for component in self.components:
result.extend(component.all_issues)
return result
class ProcessResource(Resource):
def __init__(self, pid, cmdline, cwd):
super(ProcessResource, self).__init__()
self.pid = pid
self.cmdline = cmdline
self.cwd = cwd
class Service(Resource):
def __init__(self):
super(Service, self).__init__()
self.issues = []
def report_issue(self, issue):
self.issues.append(issue)
@property
def host(self):
return self.parent
@property
def openstack(self):
return self.host.openstack
@property
def all_issues(self):
result = super(Service, self).all_issues
if hasattr(self, 'config_files') and self.config_files:
[result.extend(config_file.all_issues)
for config_file in self.config_files]
return result
def __str__(self):
return 'Service "%s"' % self.name
class OpenstackComponent(Service):
logger = logging.getLogger('rubick.model.openstack_component')
component = None
@property
@memoized
def config(self):
schema = ConfigSchemaRegistry.get_schema(self.component, self.version)
if not schema:
self.logger.debug(
'No schema for component "%s" main config version %s. '
'Using untyped parameters (everything is string)' %
(self.component, self.version))
return self._parse_config_resources(self.config_files, schema)
def _parse_config_resources(self, resources, schema=None):
config = Configuration(schema)
# Apply defaults
if schema:
for parameter in filter(lambda p: p.default, schema.parameters):
if not parameter.section or parameter.section == 'DEFAULT':
config.set_default(parameter.name, parameter.default)
else:
config.set_default(
'%s.%s' %
(parameter.section, parameter.name), parameter.default)
for resource in reversed(resources):
self._parse_config_file(
Mark(resource.path), resource.contents, config, schema,
issue_reporter=resource)
return config
def _parse_config_file(self, base_mark, config_contents,
config=Configuration(), schema=None,
issue_reporter=None):
if issue_reporter:
def report_issue(issue):
issue_reporter.report_issue(issue)
else:
def report_issue(issue):
pass
# Parse config file
config_parser = IniConfigParser()
parsed_config = config_parser.parse('', base_mark, config_contents)
for error in parsed_config.errors:
report_issue(error)
# Validate config parameters and store them
section_name_text_f = lambda s: s.name.text
sections_by_name = groupby(
sorted(
parsed_config.sections,
key=section_name_text_f),
key=section_name_text_f)
for section_name, sections in sections_by_name:
sections = list(sections)
if len(sections) > 1:
report_issue(
Issue(
Issue.INFO,
'Section "%s" appears multiple times' %
section_name))
seen_parameters = set()
for section in sections:
unknown_section = False
if schema:
unknown_section = not schema.has_section(section.name.text)
if unknown_section:
report_issue(
MarkedIssue(Issue.WARNING, 'Unknown section "%s"' %
(section_name), section.start_mark))
continue
for parameter in section.parameters:
parameter_schema = None
if schema:
parameter_schema = schema.get_parameter(
name=parameter.name.text,
section=section.name.text)
if not (parameter_schema or unknown_section):
report_issue(
MarkedIssue(
Issue.WARNING,
'Unknown parameter: section "%s" name "%s"'
% (section_name, parameter.name.text),
parameter.start_mark))
if parameter.name.text in seen_parameters:
report_issue(
MarkedIssue(
Issue.WARNING,
'Parameter "%s" in section "%s" redeclared' %
(parameter.name.text, section_name),
parameter.start_mark))
else:
seen_parameters.add(parameter.name.text)
parameter_fullname = parameter.name.text
if section_name != 'DEFAULT':
parameter_fullname = section_name + \
'.' + parameter_fullname
config.set(parameter_fullname, parameter.value.text)
validation_error = config.validate(parameter_fullname)
if validation_error:
validation_error.mark = parameter\
.value.start_mark.merge(validation_error.mark)
validation_error.message = \
'Property "%s" in section "%s": %s' % (
parameter.name.text, section_name,
validation_error.message)
report_issue(validation_error)
if (parameter_schema and
parameter_schema.deprecation_message):
report_issue(
MarkedIssue(
Issue.WARNING,
'Deprecated parameter: section "%s" name '
'"%s". %s' %
(section_name, parameter.name.text,
parameter_schema.deprecation_message),
parameter.start_mark))
return config
class KeystoneComponent(OpenstackComponent):
component = 'keystone'
name = 'keystone'
class NovaApiComponent(OpenstackComponent):
component = 'nova'
name = 'nova-api'
@property
@memoized
def paste_config(self):
return self._parse_config_resources([self.paste_config_file])
@property
def all_issues(self):
result = super(NovaApiComponent, self).all_issues
if hasattr(self, 'paste_config_file') and self.paste_config_file:
result.extend(self.paste_config_file.all_issues)
return result
class NovaComputeComponent(OpenstackComponent):
component = 'nova'
name = 'nova-compute'
class NovaSchedulerComponent(OpenstackComponent):
component = 'nova'
name = 'nova-scheduler'
class CinderApiComponent(OpenstackComponent):
component = 'cinder'
name = 'cinder-api'
class CinderVolumeComponent(OpenstackComponent):
component = 'cinder'
name = 'cinder-volume'
class CinderSchedulerComponent(OpenstackComponent):
component = 'cinder'
name = 'cinder-scheduler'
class MysqlComponent(Service):
component = 'mysql'
name = 'mysql'
class RabbitMqComponent(Service):
name = 'rabbitmq'
@property
@memoized
def config(self):
config = Configuration()
schema = ConfigSchemaRegistry.get_schema('rabbitmq', Version(1000000))
if schema:
for parameter in schema.parameters:
if not parameter.default:
continue
config.set_default(parameter.name, parameter.default)
else:
print("RabbitMQ schema not found")
return config
class GlanceApiComponent(OpenstackComponent):
component = 'glance_api'
name = 'glance-api'
class GlanceRegistryComponent(OpenstackComponent):
component = 'glance_registry'
name = 'glance-registry'
class NeutronServerComponent(OpenstackComponent):
component = 'neutron_server'
name = 'neutron-server'
class NeutronOpenvswitchAgentComponent(OpenstackComponent):
component = 'neutron_openvswitch_agent'
name = 'neutron-openvswitch-agent'
class NeutronDhcpAgentComponent(OpenstackComponent):
component = 'neutron_dhcp_agent'
name = 'neutron-dhcp-agent'
class NeutronL3AgentComponent(OpenstackComponent):
component = 'neutron_l3_agent'
name = 'neutron-l3-agent'
class NeutronMetadataAgentComponent(OpenstackComponent):
component = 'neutron_metadata_agent'
name = 'neutron-metadata-agent'
class SwiftProxyServerComponent(OpenstackComponent):
component = 'swift_proxy_server'
name = 'swift-proxy-server'
class SwiftContainerServerComponent(OpenstackComponent):
component = 'swift_container_server'
name = 'swift-container-server'
class SwiftAccountServerComponent(OpenstackComponent):
component = 'swift_account_server'
name = 'swift-account-server'
class SwiftObjectServerComponent(OpenstackComponent):
component = 'swift_object_server'
name = 'swift-object-server'
class FileSystemResource(Resource):
def __init__(self, path, owner, group, permissions):
super(FileSystemResource, self).__init__()
self.path = path
self.owner = owner
self.group = group
self.permissions = permissions
def __str__(self):
return '%s "%s"' % (
self.__class__.__name__.split('.')[-1].replace('Resource', ''),
self.path)
def __repr__(self):
return (
'%s(path=%s, owner=%s, group=%s, permissions=%s)' %
(self.__class__.__name__.split('.')[-1], repr(self.path),
repr(self.owner), repr(self.group), repr(self.permissions))
)
class FileResource(FileSystemResource):
def __init__(self, path, contents, owner, group, permissions):
super(FileResource, self).__init__(
path, owner, group, permissions)
self.contents = contents
class DirectoryResource(FileSystemResource):
pass

View File

@ -1,614 +0,0 @@
# Copyright (c) 2014 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and#
# limitations under the License.
import os.path
import re
import yaml
from rubick.common import Issue, MarkedIssue, Mark, Version, find, index
from rubick.exceptions import RubickException
class SchemaError(RubickException):
pass
class ConfigSchemaLoader(object):
db_path = os.path.join(os.path.dirname(__file__), 'schemas')
def load(self, project, configname):
path = os.path.join(self.db_path, project, configname + '.yml')
if not os.path.exists(path):
return None
with open(path) as f:
records = yaml.load(f.read())
return records
class ConfigSchemaRegistry:
@classmethod
def get_schema(self, project, version, configname=None, schema_loader=ConfigSchemaLoader()):
if not configname:
configname = '%s.conf' % project
fullname = '%s/%s' % (project, configname)
version = Version(version)
records = schema_loader.load(project, configname)
if not records:
return None
i = len(records) - 1
# Find latest checkpoint prior given version
while i >= 0 and not (records[i].get('checkpoint', False)
and Version(records[i]['version']) <= version):
i -= 1
if i < 0:
if Version(records[0]['version']) > version:
# Reached the earliest record yet haven't found version
return None
# Haven't found checkpoint but yearliest version is less than given
# Assuming first record is checkpoint
i = 0
parameters = []
seen_parameters = set()
last_version = None
while i < len(records) and Version(records[i]['version']) <= version:
last_version = records[i]['version']
for param_data in records[i].get('added', []):
name = param_data['name']
section = None
if '.' in name:
section, name = name.split('.', 1)
param = ConfigParameterSchema(
name, param_data['type'], section=section,
type_args=param_data.get('type_args', {}),
default=param_data.get('default', None),
description=param_data.get('help', None),
required=param_data.get('required', False),
deprecation_message=param_data.get('deprecated', None))
if param.name in seen_parameters:
old_param_index = index(
parameters,
lambda p: p.name == param.name)
if old_param_index != -1:
parameters[old_param_index] = param
else:
parameters.append(param)
seen_parameters.add(param.name)
for param_name in records[i].get('removed', []):
param_index = index(
parameters,
lambda p: p.name == param_name)
if index != -1:
parameters.pop(param_index)
seen_parameters.discard(param_name)
i += 1
return ConfigSchema(fullname, last_version, 'ini', parameters)
def param_fullname(name, section=None):
fullname = name
if section and section != 'DEFAULT':
fullname = '%s.%s' % (section, name)
return fullname
class ConfigSchema:
def __init__(self, name, version, format, parameters):
self.name = name
self.version = Version(version)
self.format = format
self.parameters = parameters
self._parameterByName = {}
for param in self.parameters:
self._parameterByName[param.fullname] = param
def has_section(self, section):
return (
find(self.parameters, lambda p: p.section == section) is not None
)
def get_parameter(self, name, section=None):
fullname = param_fullname(name, section)
return self._parameterByName.get(fullname, None)
def __len__(self):
return len(self.parameters)
def __iter__(self):
for param in self.parameters:
yield param
def __getitem__(self, key):
return self.get_parameter(key)
def __contains__(self, item):
return item in self._parameterByName
def __repr__(self):
return ('<ConfigSchema name=%s version=%s format=%s parameters=%s>' %
(self.name, self.version, self.format, self.parameters))
class ConfigParameterSchema:
def __init__(self, name, type, type_args={}, section=None, description=None,
default=None, required=False, deprecation_message=None):
self.section = section or 'DEFAULT'
self.name = name
self.type = type
self.type_args = type_args
self.fullname = param_fullname(name, section)
self.description = description
self.default = default
self.required = required
self.deprecation_message = deprecation_message
def __repr__(self):
return (
'<ConfigParameterSchema %s>' % ' '.join(
['%s=%s' % (attr, getattr(self, attr))
for attr in ['section', 'name', 'type', 'description',
'default', 'required']])
)
class TypeValidatorRegistry:
__validators = {}
__default_validator = None
@classmethod
def register_validator(self, type_name, type_validator, default=False):
self.__validators[type_name] = type_validator
if default:
self.__default_validator = type_name
@classmethod
def get_validator(self, name):
return self.__validators.get(
name, self.__validators[self.__default_validator])
class SchemaIssue(Issue):
def __init__(self, message):
super(SchemaIssue, self).__init__(Issue.ERROR, message)
class InvalidValueError(MarkedIssue):
def __init__(self, message, mark=Mark('', 0, 0)):
super(InvalidValueError, self).__init__(
Issue.ERROR, 'Invalid value: ' + message, mark)
class TypeValidator(object):
def __init__(self, base_type, f):
super(TypeValidator, self).__init__()
self.base_type = base_type
self.f = f
def validate(self, value, **kwargs):
if value is None:
return value
return getattr(self, 'f')(value, **kwargs)
def type_validator(name, base_type=None, default=False, **kwargs):
if not base_type:
base_type = name
def wrap(fn):
def wrapped(s, **immediate_kwargs):
return fn(s, **dict(kwargs, **immediate_kwargs))
o = TypeValidator(base_type, wrapped)
TypeValidatorRegistry.register_validator(name, o, default=default)
return fn
return wrap
def isissue(o):
return isinstance(o, Issue)
@type_validator('boolean')
def validate_boolean(s):
if isinstance(s, bool):
return s
s = s.lower()
if s == 'true':
return True
elif s == 'false':
return False
else:
return InvalidValueError('Value should be "true" or "false"')
@type_validator('enum')
def validate_enum(s, values=[]):
if s in values:
return None
if len(values) == 0:
message = 'There should be no value, but found %s' % repr(s)
elif len(values) == 1:
message = 'The only valid value is "%s", but found "%s"' % (
repr(values[0]), repr(s))
else:
message = 'Valid values are %s and %s, but found %s' % (
', '.join([repr(v) for v in values[:-1]]),
repr(values[-1]), repr(s))
return InvalidValueError('%s' % message)
def validate_ipv4_address(s):
s = s.strip()
parts = s.split('.')
if len(parts) == 4:
if all([all([c.isdigit() for c in part]) for part in parts]):
parts = [int(part) for part in parts]
if all([part < 256 for part in parts]):
return '.'.join([str(part) for part in parts])
return InvalidValueError('Value should be ipv4 address')
def validate_ipv4_network(s):
s = s.strip()
parts = s.split('/')
if len(parts) != 2:
return (
InvalidValueError(
'Should have "/" character separating address and prefix '
'length')
)
address, prefix = parts
prefix = prefix.strip()
if prefix.strip() == '':
return InvalidValueError('Prefix length is required')
address = validate_ipv4_address(address)
if isissue(address):
return address
if not all([c.isdigit() for c in prefix]):
return InvalidValueError('Prefix length should be an integer')
prefix = int(prefix)
if prefix > 32:
return (
InvalidValueError(
'Prefix length should be less than or equal to 32')
)
return '%s/%d' % (address, prefix)
def validate_host_label(s):
if len(s) == 0:
return InvalidValueError(
'Host label should have at least one character')
if not s[0].isalpha():
return InvalidValueError(
'Host label should start with a letter, but it starts with '
'"%s"' % s[0])
if len(s) == 1:
return s
if not (s[-1].isdigit() or s[-1].isalpha()):
return InvalidValueError(
'Host label should end with letter or digit, but it ends '
'with "%s"' %
s[-1], Mark('', 0, len(s) - 1))
if len(s) == 2:
return s
for i, c in enumerate(s[1:-1]):
if not (c.isalpha() or c.isdigit() or c == '-'):
return InvalidValueError(
'Host label should contain only letters, digits or hypens,'
' but it contains "%s"' %
c, Mark('', 0, i + 1))
return s
@type_validator('host', base_type='string')
@type_validator('host_address', base_type='string')
@type_validator('old_network', base_type='string')
def validate_host_address(s):
result = validate_ipv4_address(s)
if not isissue(result):
return result
offset = len(s) - len(s.lstrip())
parts = s.strip().split('.')
part_offset = offset
labels = []
for part in parts:
host_label = validate_host_label(part)
if isissue(host_label):
return host_label.offset_by(Mark('', 0, part_offset))
part_offset += len(part) + 1
labels.append(host_label)
return '.'.join(labels)
@type_validator('network', base_type='string')
@type_validator('network_address', base_type='string')
def validate_network_address(s):
return validate_ipv4_network(s)
@type_validator('network_mask', base_type='string')
def validate_network_mask(s):
# TODO(someone): implement proper checking
result = validate_ipv4_address(s)
if isissue(result):
return result
parts = [int(p) for p in result.split('.', 3)]
x = index(parts, lambda p: p != 255)
if x == -1:
return result
if parts[x] not in [0, 128, 192, 224, 240, 248, 252, 254]:
return InvalidValueError('Invalid netmask')
x += 1
while x < 4:
if parts[x] != 0:
return InvalidValueError('Invalid netmask')
return result
@type_validator('host_and_port', base_type='string')
def validate_host_and_port(s, default_port=None):
parts = s.strip().split(':', 2)
host_address = validate_host_address(parts[0])
if isissue(host_address):
return host_address
if len(parts) == 2:
port = validate_port(parts[1])
if isissue(port):
return port
elif default_port:
port = default_port
else:
return InvalidValueError('No port specified')
return (host_address, port)
@type_validator('string', base_type='string', default=True)
@type_validator('list', base_type='list')
@type_validator('multi', base_type='multi')
@type_validator('file', base_type='string')
@type_validator('directory', base_type='string')
@type_validator('host_v6', base_type='string')
def validate_string(s):
return s
@type_validator('regex', base_type='string')
@type_validator('regexp', base_type='string')
def validate_regex(s):
try:
re.compile(s)
except re.error as e:
return InvalidValueError(str(e))
return s
@type_validator('integer')
def validate_integer(s, min=None, max=None):
if isinstance(s, int):
return s
leading_whitespace_len = 0
while leading_whitespace_len < len(s) \
and s[leading_whitespace_len].isspace():
leading_whitespace_len += 1
s = s.strip()
if s == '':
return InvalidValueError('Should not be empty')
for i, c in enumerate(s):
if not c.isdigit() and not ((c == '-') and (i == 0)):
return (
InvalidValueError(
'Only digits are allowed, but found char "%s"' %
c, Mark('', 1, i + 1 + leading_whitespace_len))
)
v = int(s)
if min and v < min:
return (
InvalidValueError(
'Should be greater than or equal to %d' %
min, Mark('', 1, leading_whitespace_len))
)
if max and v > max:
return (
InvalidValueError(
'Should be less than or equal to %d' %
max, Mark('', 1, leading_whitespace_len))
)
return v
@type_validator('file_mode')
def validate_file_mode(s):
return validate_integer(s)
@type_validator('float')
def validate_float(s):
if isinstance(s, float):
return s
# TODO(someone): Implement proper validation
return float(s)
@type_validator('port', base_type='integer')
def validate_port(s, min=1, max=65535):
return validate_integer(s, min=min, max=max)
def validate_list(s, element_type):
if isinstance(s, list):
return s
element_type_validator = TypeValidatorRegistry.get_validator(element_type)
if not element_type_validator:
return SchemaIssue('Invalid element type "%s"' % element_type)
result = []
s = s.strip()
if s == '':
return result
values = s.split(',')
while len(values) > 0:
value = values.pop(0)
while True:
validated_value = element_type_validator.validate(value.strip())
if not isinstance(validated_value, Issue):
break
if len(values) == 0:
# TODO(someone): provide better position reporting
return validated_value
value += ',' + values.pop()
result.append(validated_value)
return result
@type_validator('string_list', base_type='list')
def validate_string_list(s):
return validate_list(s, element_type='string')
@type_validator('string_dict', base_type='multi')
def validate_dict(s, element_type='string'):
if isinstance(s, dict):
return s
element_type_validator = TypeValidatorRegistry.get_validator(element_type)
if not element_type_validator:
return SchemaIssue('Invalid element type "%s"' % element_type)
result = {}
s = s.strip()
if s == '':
return result
pairs = s.split(',')
for pair in pairs:
key_value = pair.split(':', 2)
if len(key_value) < 2:
return (
InvalidValueError(
'Value should be NAME:VALUE pairs separated by ","')
)
key, value = key_value
key = key.strip()
value = value.strip()
if key == '':
# TODO(someone): provide better position reporting
return InvalidValueError('Key name should not be empty')
validated_value = element_type_validator.validate(value)
if isinstance(validated_value, Issue):
# TODO(someone): provide better position reporting
return validated_value
result[key] = validated_value
return result
@type_validator('rabbitmq_bind', base_type='string')
def validate_rabbitmq_bind(s):
m = re.match('\d+', s)
if m:
port = validate_port(s)
if isinstance(port, Issue):
return port
return ('0.0.0.0', port)
m = re.match('{\s*\"(.+)\"\s*,\s*(\d+)\s*}', s)
if m:
host = validate_host_address(m.group(1))
port = validate_port(m.group(2))
if isinstance(host, Issue):
return host
if isinstance(port, Issue):
return port
return (host, port)
return SchemaIssue("Unrecognized bind format")
def validate_rabbitmq_list(s, element_type):
if isinstance(s, list):
return s
if not (s.startswith('[') and s.endswith(']')):
return SchemaIssue('List should be surrounded by [ and ]')
return validate_list(s[1:-1], element_type=element_type)
@type_validator('rabbitmq_bind_list', base_type='list')
def validate_rabbitmq_bind_list(s):
return validate_rabbitmq_list(s, element_type='rabbitmq_bind')

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,482 +0,0 @@
# Copyright (c) 2014 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and#
# limitations under the License.
import argparse
from copy import copy
from lib2to3.pgen2 import driver
from lib2to3.pgen2 import token
from lib2to3.pygram import python_grammar, python_symbols as py
from lib2to3.pytree import Node, Leaf
import os
import re
import sys
import traceback
from oslo.config import cfg
from rubick.schemas.yaml_utils import yaml_string, yaml_value
def identity(x):
return x
__builtins__._ = identity
class SchemaBuilderSchemaWriter(object):
def __init__(self, file, project, version):
super(SchemaBuilderSchemaWriter, self).__init__()
self.file = file
self.project = project
self.version = version
self._started = False
self._conf_variable = '%s_%s' % (self.project,
self.version.replace('.', '_'))
def _ensure_header(self):
if not self._started:
self._output_header()
self._started = True
def _output_header(self):
self.file.write("""from rubick.schema import ConfigSchemaRegistry
{0} = ConfigSchemaRegistry.register_schema(project='{0}')
with {0}.version('{1}') as {2}:""".format(self.project, self.version,
self._conf_variable))
def section(self, name):
self._ensure_header()
self.file.write("\n\n %s.section('%s')" % (
self._conf_variable, name))
def param(self, name, type, default_value=None, description=None):
self._ensure_header()
self.file.write("\n\n %s.param('%s', type='%s', default=%s" % (
self._conf_variable, name, type, repr(default_value)))
if description:
self.file.write(", description=\"%s\"" % (
description.replace('"', '\'')))
self.file.write(")")
def comment(self, text):
self.file.write("\n\n # %s" % text)
class YamlSchemaWriter(object):
def __init__(self, file, project, version):
super(YamlSchemaWriter, self).__init__()
self.file = file
self.project = project
self.version = version
self._output_header()
def _output_header(self):
self.file.write("project: %s\n" % self.project)
self.file.write("version: %s\n" % self.version)
self.file.write("parameters:\n")
def section(self, name):
self._current_section = name
def param(self, name, type, default_value=None, description=None):
fullname = name
if self._current_section and self._current_section != 'DEFAULT':
fullname = '%s.%s' % (self._current_section, name)
self.file.write(" - name: %s\n"
% yaml_string(fullname, allowSimple=True))
self.file.write(" type: %s\n" % yaml_string(type, allowSimple=True))
self.file.write(" default: %s\n" % yaml_value(default_value))
if description:
self.file.write(" help: %s\n" % yaml_string(description))
self.file.write("\n")
def comment(self, text):
self.file.write("\n# %s\n" % text)
def parse_args(argv):
parser = argparse.ArgumentParser()
parser.add_argument('project',
help='Name of the project (e.g. "nova")')
parser.add_argument('version',
help='Version of the project (e.g. "2013.1.3")')
parser.add_argument('config_or_module',
help='Config file sample or Python module to process')
args = parser.parse_args(argv[1:])
return args
def sanitize_type_and_value(param_name, param_type, param_value):
if param_value == '<None>':
param_value = None
elif param_type == 'boolean':
if param_value.lower() == 'false':
param_value = False
elif param_value.lower() == 'true':
param_value = True
elif param_type == 'integer':
param_value = int(param_value)
if param_name.endswith('_port'):
param_type = 'port'
elif param_type == 'float':
param_value = float(param_value)
elif param_type == 'list':
param_type = 'string_list'
if param_value == '':
param_value = []
else:
param_value = param_value.split(',')
elif (param_type == 'string' and
param_name.endswith('_host') and
param_value in ['0.0.0.0', 'localhost', '127.0.0.1']):
param_type = 'host'
elif param_type == 'string' and param_name.endswith('_listen'):
param_type = 'host'
return (param_type, param_value)
def generate_schema_from_sample_config(project, version, config_file, writer):
with open(config_file, 'r') as f:
config_lines = f.readlines()
description_lines = []
for line in config_lines:
if line.startswith('['):
section_name = line.strip('[]\n')
writer.section(section_name)
description_lines = []
continue
if line.strip() in ['', '#']:
description_lines = []
continue
if line.startswith('# '):
description_lines.append(line[2:].strip())
continue
description = ' '.join(description_lines)
match = re.search('^(.*)\((.*?) value\)$', description)
if match:
description = match.group(1)
param_type = match.group(2).strip()
if param_type == 'floating point':
param_type = 'float'
else:
param_type = 'string'
line = line.strip('#\n')
param_name, param_value = [
s.strip() for s in re.split('[:=]', line, 1)]
(param_type, param_value) = \
sanitize_type_and_value(param_name, param_type, param_value)
writer.param(param_name, param_type, param_value, description)
OPT_TYPE_MAPPING = {
'StrOpt': 'string',
'BoolOpt': 'boolean',
'IntOpt': 'integer',
'FloatOpt': 'float',
'ListOpt': 'list',
'MultiStrOpt': 'multi'
}
OPTION_REGEX = re.compile(r"(%s)" % "|".join(OPT_TYPE_MAPPING.keys()))
def convert(gr, raw_node):
type, value, context, children = raw_node
# if has children or correspond to nonterminal
if children or type in gr.number2symbol:
return Node(type, children, context=context)
else:
return Leaf(type, value, context=context)
def walk_tree(root):
while True:
yield root
# Optimize traversing single-child nodes
if len(root.children) == 1:
root = root.children[0]
continue
break
for child in copy(root.children):
for node in walk_tree(child):
yield node
def extract_config_from_file(path):
with open(path) as f:
contents = f.read()
d = driver.Driver(python_grammar, convert=convert)
tree = d.parse_string(contents)
def mark_stmt(node):
n = node
while n:
if n.type == py.stmt:
n.marked = True
break
n = n.parent
fullnames = {}
# Process imports and renames
for node in walk_tree(tree):
if node.type == py.import_from:
mod = str(node.children[1]).strip()
for node2 in walk_tree(node.children[3]):
if node2.type == py.import_as_name:
n = str(node2).strip()
f = '.'.join([mod, n])
fullnames[n] = f
elif node.type == py.expr_stmt:
if len(node.children) > 1 and node.children[1].type == token.EQUAL:
lhs = str(node.children[0]).strip()
rhs = str(node.children[2]).strip()
if re.match('\S+(\.\S+)*', rhs):
parts = rhs.split('.')
if parts[0] in fullnames:
rhs = '.'.join([fullnames[parts[0]]] + parts[1:])
fullnames[lhs] = rhs
if any([rhs.startswith(s) for s in ['oslo.', 'oslo.config.', 'oslo.config.cfg.']]):
mark_stmt(node)
# Process all callsites CONF.register*
for node in walk_tree(tree):
if node.type == py.power and node.children[0].children[0].type == token.NAME:
s = str(node.children[0]).strip()
if s in fullnames:
s = fullnames[s]
cs = node.children
i = 1
while i < len(cs) and cs[i].type == py.trailer:
c = cs[i]
if c.children[0].type != token.DOT:
break
s += '.' + c.children[1].value
i += 1
if i < len(cs) and cs[i].type == py.trailer and cs[i].children[0].type == token.LPAR:
# call site
if s.startswith('oslo.config.cfg.CONF.'):
rest = s[len('oslo.config.cfg.CONF.'):]
if rest.startswith('register_'):
mark_stmt(node)
if s.startswith('oslo.config.cfg.'):
rest = s[len('oslo.config.cfg.'):]
if rest.endswith('Opt'):
mark_stmt(node)
# Traverse code and find all var references
seen_vars = set()
referenced_vars_queue = []
def find_definition(tree, name):
for node in walk_tree(tree):
if node.type == py.classdef and node.children[1].value == name:
return node
elif node.type == py.funcdef and node.children[1].value == name:
return node
elif node.type == py.import_name:
imported_name = str(node.children[1]).strip()
if imported_name == name:
return node
elif node.type == py.import_from:
for n in walk_tree(node):
if n.type == py.import_as_name:
i = 0
if len(n.children) == 3:
i = 2
if n.children[i].value == name:
return node
elif node.type == py.expr_stmt:
if len(node.children) > 1 and node.children[1].type == token.EQUAL:
for n in walk_tree(node):
if n.type == py.power:
assignment_name = str(n.children[0]).strip()
if assignment_name == name:
return node
return None
def collect_refs(root):
for n2 in walk_tree(root):
if n2.type == py.power and n2.children[0].children[0].type == token.NAME:
name = n2.children[0].children[0].value
x = 1
while (x < len(n2.children) and
n2.children[x].type == py.trailer and
n2.children[x].children[0].type == token.DOT):
name += str(n2.children[x]).strip()
x += 1
if '.' not in name:
isKWArgName = False
n = n2
while n.parent:
if n.parent.type == py.argument:
arg = n.parent
if len(arg.children) > 1 and arg.children[1].type == token.EQUAL and n == arg.children[0]:
isKWArgName = True
n = n.parent
if isKWArgName:
continue
if name in dir(__builtins__):
continue
if name not in seen_vars:
seen_vars.add(name)
referenced_vars_queue.append(name)
for node in tree.children:
if node.type == py.stmt and (hasattr(node, 'marked') and node.marked):
collect_refs(node)
for name in referenced_vars_queue:
node = find_definition(tree, name)
if node:
mark_stmt(node)
collect_refs(node)
else:
while '.' in name:
name = '.'.join(name.split('.')[:-1])
node = find_definition(tree, name)
if node:
mark_stmt(node)
collect_refs(node)
# Remove all unmarked top-level statements
for node in walk_tree(tree):
if node.type == py.stmt and node.parent.type == py.file_input:
if not (hasattr(node, 'marked') and node.marked):
node.remove()
code = str(tree)
try:
exec code in {'__file__': path}
except Exception:
sys.stderr.write("Error processing file %s\n" % path)
traceback.print_exc()
sys.stderr.write(code)
def generate_schema_from_code(project, version, module_path, writer):
old_sys_path = copy(sys.path)
filepaths = []
module_directory = ''
if os.path.isdir(module_path):
module_directory = module_path
while module_directory != '':
# TODO(mkulkin): handle .pyc and .pyo
if not os.path.isfile(
os.path.join(module_directory, '__init__.py')):
break
module_directory = os.path.dirname(module_directory)
if module_directory not in sys.path:
sys.path.insert(0, module_directory)
for (dirpath, _, filenames) in os.walk(module_path):
for filename in filenames:
if not filename.endswith('.py'):
continue
filepath = os.path.join(dirpath, filename)
with open(filepath) as f:
content = f.read()
if not re.search('Opt\(', content):
continue
filepaths.append(filepath)
else:
filepaths.append(module_path)
for filepath in filepaths:
extract_config_from_file(filepath)
print_group_opts(writer, 'DEFAULT', cfg.CONF._opts.values())
for group_name in cfg.CONF._groups:
print_group_opts(writer, group_name, cfg.CONF._groups[group_name]._opts.values())
sys.path = old_sys_path
def print_group_opts(writer, group, opts):
writer.section(group)
for opt in opts:
print_opt(writer, opt['opt'])
def print_opt(writer, opt):
opt_name, opt_default, opt_help = opt.dest, opt.default, opt.help
if not opt_help:
sys.stderr.write('WARNING: "%s" is missing help string.\n' % opt_name)
opt_help = ""
opt_type = None
try:
opt_type = OPT_TYPE_MAPPING.get(
OPTION_REGEX.search(str(type(opt))).group(0))
except (ValueError, AttributeError) as err:
sys.stderr.write("%s\n" % str(err))
opt_type = 'string'
writer.param(opt_name, opt_type, opt_default, opt_help)
def main(argv):
args = parse_args(argv)
params = vars(args)
project = params.pop('project')
version = params.pop('version')
path = params.pop('config_or_module')
writer = YamlSchemaWriter(sys.stdout, project, version)
if os.path.isdir(path) or path.endswith('.py'):
generate_schema_from_code(project, version, path,
writer=writer)
else:
generate_schema_from_sample_config(project, version, path,
writer=writer)
if __name__ == '__main__':
main(sys.argv)

View File

@ -1,307 +0,0 @@
# Copyright (c) 2014 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and#
# limitations under the License.
import argparse
from collections import OrderedDict
import glob
import logging
import os.path
import yaml
from rubick.common import index, Version, Issue
from rubick.schema import TypeValidatorRegistry as TypeRegistry
from rubick.schemas.yaml_utils import yaml_string, yaml_value
DIFF_THRESHOLD = 0.5
logger = logging.getLogger('rubick.schemas.generator')
def yaml_dump_schema_records(records):
lines = []
for record in records:
if len(record['added']) == 0 and len(record['removed']) == 0:
continue
lines.append('- version: %s' % yaml_string(str(record['version'])))
if 'checkpoint' in record:
lines.append(' checkpoint: %s' % yaml_value(record['checkpoint']))
if 'added' in record and len(record['added']) > 0:
lines.append(' added:')
for param in record['added']:
lines.append('')
lines.append(' - name: %s' % yaml_string(param['name'],
allowSimple=True))
lines.append(' type: %s' % yaml_string(param['type'],
allowSimple=True))
if 'default' in param:
lines.append(' default: %s'
% yaml_value(param['default']))
if 'help' in param:
lines.append(' help: %s'
% yaml_string(param['help']))
extra_data = [k for k in param.keys()
if k not in ['name', 'type', 'default', 'help']]
for attr in extra_data:
lines.append(' %s: %s'
% (attr, yaml_value(param[attr])))
if 'removed' in record and len(record['removed']) > 0:
lines.append(' removed:')
for removed in record['removed']:
lines.append(' - %s' % yaml_string(removed, allowSimple=True))
lines.append('')
lines.append('# ====================================================')
lines.append('')
return "\n".join(lines)
def generate_project_schema(project):
logger.info('Processing project %s' % project)
project_path = os.path.join(os.path.dirname(__file__), project)
files = glob.glob(os.path.join(project_path, '*.yml'))
if files == []:
logger.info("Found no YAML files in project %s. Skipping it" % project)
return
x = index(files, lambda f: f.endswith('.conf.yml'))
if x != -1:
database_file = files[x]
del files[x]
else:
database_file = os.path.join(project_path, project + '.conf.yml')
schema_records = []
if os.path.exists(database_file):
logger.debug("Processing database file %s" % database_file)
with open(database_file) as f:
schema_records.extend(yaml.load(f.read()))
schema_versions = []
for version_file in files:
logger.debug("Processing version file %s" % version_file)
with open(version_file) as f:
schema_versions.append(yaml.load(f.read()))
schema_versions = sorted(schema_versions,
key=lambda s: Version(s['version']))
parameters = OrderedDict()
for schema in schema_versions:
added = []
seen = set()
logger.debug('Processing schema version %s' % schema['version'])
for param in schema['parameters']:
# TODO(mkulkin): reduce the level of nesting
prev_param = parameters.get(param['name'], None)
if not prev_param:
logger.debug('Parameter %s does not exist yet,'
' adding it as new'
% param['name'])
added.append(param)
else:
seen.add(param['name'])
if param['type'] != prev_param['type']:
validator = TypeRegistry.get_validator(prev_param['type'])
if param['type'] == validator.base_type:
param['type'] = prev_param['type']
if param.get('default', None) is not None:
type_args = param.get('type_args', {})
value = validator.validate(param['default'], **type_args)
if not isinstance(value, Issue):
param['default'] = value
else:
logger.error("In project '%s' version %s"
" default value for parameter"
" '%s' is not valid value of"
" type %s: %s"
% (project, schema['version'],
param['name'], param['type'],
repr(param['default'])))
else:
logger.debug('Parameter %s type has'
' changed from %s to %s' %
(param['name'], prev_param['type'],
param['type']))
param['comment'] = 'Type has changed'
added.append(param)
continue
if param.get('default', None) != \
prev_param.get('default', None):
logger.debug('Parameter %s default value'
' has changed from %s to %s' %
(param['name'], prev_param['default'],
param['default']))
param['comment'] = 'Default value has changed'
added.append(param)
continue
if param.get('help', None) != prev_param.get('help', None):
param['comment'] = 'Help string has changed'
added.append(param)
removed = [name for name in parameters.keys() if name not in seen]
if len(removed) > 0:
logger.debug('Following parameters from previous'
' schema version are not present in'
' current version, marking as removed: %s'
% ','.join(removed))
# Decide either to use full schema update or incremental
changes_count = sum(map(len, [added, removed]))
logger.debug('Found %d change(s) from previous version schema'
% changes_count)
if changes_count > int(len(parameters) * DIFF_THRESHOLD):
logger.debug('Using full schema update')
new_parameters = parameters.copy()
for param in added:
new_parameters[param['name']] = param
for name in removed:
del new_parameters[name]
new_schema_record = dict(version=schema['version'],
added=new_parameters.values(),
removed=[],
checkpoint=True)
else:
logger.debug('Using incremental schema update')
new_schema_record = dict(version=schema['version'],
added=added, removed=removed)
# Place schema record either replacing existing one or appending as new
old_schema_record_idx = index(schema_records, lambda r:
str(r['version']) ==
str(new_schema_record['version']))
if old_schema_record_idx != -1:
old_schema_record = schema_records[old_schema_record_idx]
# Collect information from existing records
old_schema_parameters = {}
for param in old_schema_record.get('added', []):
old_schema_parameters[param['name']] = param
for param in added:
old_param = old_schema_parameters.get(param['name'], None)
if not old_param:
param.setdefault('comment', 'New param')
continue
extra_data = [(k, v) for k, v in old_param.items()
if k not in ['name', 'type', 'default', 'help']]
param.update(extra_data)
validator = TypeRegistry.get_validator(old_param['type'])
if param['type'] not in [old_param['type'],
validator.base_type]:
param['comment'] = 'Type has changed'
# Type has changed, enforcing old type to prevent
# accidental data loss
param['type'] = old_param['type']
if 'default' in old_param:
param['default'] = old_param['default']
if param.get('default', None) is not None:
type_args = old_param.get('type_args', {})
value = validator.validate(old_param['default'], **type_args)
if not isinstance(value, Issue):
param['default'] = value
else:
logger.error("In project '%s' version %s default value"
" for parameter '%s' is not valid value"
" of type %s: %s" %
(project, schema['version'],
param['name'], param['type'],
repr(param['default'])))
if param.get('default', None) != old_param.get('default',
None):
param['comment'] = 'Default value has changed'
continue
logger.debug('Replacing schema record %s'
% repr(new_schema_record))
schema_records[old_schema_record_idx] = new_schema_record
else:
for param in added:
param.setdefault('comment', 'New param')
logger.debug('Appending schema record %s'
% repr(new_schema_record))
schema_records.append(new_schema_record)
# Update parameter info
for param in new_schema_record.get('added', []):
parameters[param['name']] = param
for name in new_schema_record.get('removed', []):
del parameters[name]
schema_records = sorted(schema_records,
key=lambda r: Version(r['version']))
with open(database_file, 'w') as f:
f.write(yaml_dump_schema_records(schema_records))
def parse_args(argv):
parser = argparse.ArgumentParser()
parser.add_argument('-l', '--loglevel', default='INFO',
help='Loglevel to use')
parser.add_argument('projects', nargs='*',
help='Name of the projects (e.g. "nova")')
args = parser.parse_args(argv[1:])
return args
def main(argv):
args = parse_args(argv)
params = vars(args)
logging.basicConfig(level=params['loglevel'])
if 'project' in params:
projects = [params['project']]
else:
projects = []
for project_path in glob.glob(os.path.join(os.path.dirname(__file__),
'*')):
if not os.path.isdir(project_path):
continue
projects.append(os.path.basename(project_path))
for project in projects:
generate_project_schema(project)
if __name__ == '__main__':
import sys
main(sys.argv)

View File

@ -1,556 +0,0 @@
project: glance_api
version: '2013.2.1'
parameters:
- name: verbose
type: string
default: 'False'
help: 'Show more verbose log output (sets INFO log level output)'
- name: debug
type: string
default: 'False'
help: 'Show debugging output in logs (sets DEBUG log level output)'
- name: default_store
type: string
default: 'file'
help: "Which backend scheme should Glance use by default is not specified in a request to add a new image to Glance? Known schemes are determined by the known_stores option below. Default: 'file'"
- name: known_stores
type: string
default: 'glance.store.filesystem.Store,'
help: 'List of which store classes and store class locations are currently known to glance at startup.'
- name: image_size_cap
type: string
default: '1099511627776'
help: 'Maximum image size (in bytes) that may be uploaded through the Glance API server. Defaults to 1 TB. WARNING: this value should only be increased after careful consideration and must be set to a value under 8 EB (9223372036854775808).'
- name: bind_host
type: host
default: '0.0.0.0'
help: 'Address to bind the API server'
- name: bind_port
type: string
default: '9292'
help: 'Port the bind the API server to'
- name: log_file
type: string
default: '/var/log/glance/api.log'
help: 'Log to this file. Make sure you do not set the same log file for both the API and registry servers!'
- name: backlog
type: string
default: '4096'
help: 'Backlog requests when creating socket'
- name: tcp_keepidle
type: string
default: '600'
help: 'TCP_KEEPIDLE value in seconds when creating socket. Not supported on OS X.'
- name: sql_connection
type: string
default: 'sqlite:///glance.sqlite'
help: 'SQLAlchemy connection string for the reference implementation registry server. Any valid SQLAlchemy connection string is fine. See: http://www.sqlalchemy.org/docs/05/reference/sqlalchemy/connections.html#sqlalchemy.create_engine'
- name: sql_idle_timeout
type: string
default: '3600'
help: "MySQL uses a default `wait_timeout` of 8 hours, after which it will drop idle connections. This can result in 'MySQL Gone Away' exceptions. If you notice this, you can lower this value to ensure that SQLAlchemy reconnects before MySQL can drop the connection."
- name: workers
type: string
default: '1'
help: 'Number of Glance API worker processes to start. On machines with more than one CPU increasing this value may improve performance (especially if using SSL with compression turned on). It is typically recommended to set this value to the number of CPUs present on your machine.'
- name: admin_role
type: string
default: 'admin'
help: 'Role used to identify an authenticated user as administrator'
- name: allow_anonymous_access
type: string
default: 'False'
help: 'Allow unauthenticated users to access the API with read-only privileges. This only applies when using ContextMiddleware.'
- name: enable_v1_api
type: string
default: 'True'
help: 'Allow access to version 1 of glance api'
- name: enable_v2_api
type: string
default: 'True'
help: 'Allow access to version 2 of glance api'
- name: show_image_direct_url
type: string
default: 'False'
help: "Return the URL that references where the data is stored on the backend storage system. For example, if using the file system store a URL of 'file:///path/to/image' will be returned to the user in the 'direct_url' meta-data field. The default value is false."
- name: send_identity_headers
type: string
default: 'False'
help: 'Send headers containing user and tenant information when making requests to the v1 glance registry. This allows the registry to function as if a user is authenticated without the need to authenticate a user itself using the auth_token middleware. The default value is false.'
- name: container_formats
type: string
default: 'ami,ari,aki,bare,ovf'
help: "Supported values for the 'container_format' image attribute"
- name: disk_formats
type: string
default: 'ami,ari,aki,vhd,vmdk,raw,qcow2,vdi,iso'
help: "Supported values for the 'disk_format' image attribute"
- name: lock_path
type: string
default: None
help: 'Directory to use for lock files. Default to a temp directory (string value). This setting needs to be the same for both glance-scrubber and glance-api.'
- name: property_protection_file
type: string
default: ''
help: "Property Protections config file This file contains the rules for property protections and the roles associated with it. If this config value is not specified, by default, property protections won't be enforced. If a value is specified and the file is not found, then an HTTPInternalServerError will be thrown."
- name: user_storage_quota
type: string
default: '0'
help: 'Set a system wide quota for every user. This value is the total number of bytes that a user can use across all storage systems. A value of 0 means unlimited.'
- name: use_syslog
type: string
default: 'False'
help: 'Send logs to syslog (/dev/log) instead of to file specified by `log_file`'
- name: syslog_log_facility
type: string
default: 'LOG_LOCAL0'
help: 'Facility to use. If unset defaults to LOG_USER.'
- name: cert_file
type: string
default: '/path/to/certfile'
help: 'Certificate file to use when starting API server securely'
- name: key_file
type: string
default: '/path/to/keyfile'
help: 'Private key file to use when starting API server securely'
- name: ca_file
type: string
default: '/path/to/cafile'
help: 'CA certificate file to use to verify connecting clients'
- name: metadata_encryption_key
type: string
default: '<16, 24 or 32 char registry metadata key>'
help: "AES key for encrypting store 'location' metadata, including -- if used -- Swift or S3 credentials Should be set to a random string of length 16, 24 or 32 bytes"
- name: registry_host
type: host
default: '0.0.0.0'
help: 'Address to find the registry server'
- name: registry_port
type: string
default: '9191'
help: 'Port the registry server is listening on'
- name: registry_client_protocol
type: string
default: 'http'
help: 'What protocol to use when connecting to the registry server? Set to https for secure HTTP communication'
- name: registry_client_key_file
type: string
default: '/path/to/key/file'
help: 'The path to the key file to use in SSL connections to the registry server, if any. Alternately, you may set the GLANCE_CLIENT_KEY_FILE environ variable to a filepath of the key file'
- name: registry_client_cert_file
type: string
default: '/path/to/cert/file'
help: 'The path to the cert file to use in SSL connections to the registry server, if any. Alternately, you may set the GLANCE_CLIENT_CERT_FILE environ variable to a filepath of the cert file'
- name: registry_client_ca_file
type: string
default: '/path/to/ca/file'
help: 'The path to the certifying authority cert file to use in SSL connections to the registry server, if any. Alternately, you may set the GLANCE_CLIENT_CA_FILE environ variable to a filepath of the CA cert file'
- name: registry_client_insecure
type: string
default: 'False'
help: "When using SSL in connections to the registry server, do not require validation via a certifying authority. This is the registry's equivalent of specifying --insecure on the command line using glanceclient for the API Default: False"
- name: registry_client_timeout
type: string
default: '600'
help: "The period of time, in seconds, that the API server will wait for a registry request to complete. A value of '0' implies no timeout. Default: 600"
- name: db_auto_create
type: string
default: 'False'
help: 'Whether to automatically create the database tables. Default: False'
- name: sqlalchemy_debug
type: string
default: 'True'
help: 'Enable DEBUG log messages from sqlalchemy which prints every database query and response. Default: False'
- name: notifier_strategy
type: string
default: 'noop'
help: 'Notifications can be sent when images are create, updated or deleted. There are three methods of sending notifications, logging (via the log_file directive), rabbit (via a rabbitmq queue), qpid (via a Qpid message queue), or noop (no notifications sent, the default)'
- name: rabbit_host
type: host
default: 'localhost'
help: 'Configuration options if sending notifications via rabbitmq (these are the defaults)'
- name: rabbit_port
type: string
default: '5672'
help: 'Configuration options if sending notifications via rabbitmq (these are the defaults)'
- name: rabbit_use_ssl
type: string
default: 'false'
help: 'Configuration options if sending notifications via rabbitmq (these are the defaults)'
- name: rabbit_userid
type: string
default: 'guest'
help: 'Configuration options if sending notifications via rabbitmq (these are the defaults)'
- name: rabbit_password
type: string
default: 'guest'
help: 'Configuration options if sending notifications via rabbitmq (these are the defaults)'
- name: rabbit_virtual_host
type: string
default: '/'
help: 'Configuration options if sending notifications via rabbitmq (these are the defaults)'
- name: rabbit_notification_exchange
type: string
default: 'glance'
help: 'Configuration options if sending notifications via rabbitmq (these are the defaults)'
- name: rabbit_notification_topic
type: string
default: 'notifications'
help: 'Configuration options if sending notifications via rabbitmq (these are the defaults)'
- name: rabbit_durable_queues
type: string
default: 'False'
help: 'Configuration options if sending notifications via rabbitmq (these are the defaults)'
- name: qpid_notification_exchange
type: string
default: 'glance'
help: 'Configuration options if sending notifications via Qpid (these are the defaults)'
- name: qpid_notification_topic
type: string
default: 'notifications'
help: 'Configuration options if sending notifications via Qpid (these are the defaults)'
- name: qpid_host
type: host
default: 'localhost'
help: 'Configuration options if sending notifications via Qpid (these are the defaults)'
- name: qpid_port
type: string
default: '5672'
help: 'Configuration options if sending notifications via Qpid (these are the defaults)'
- name: qpid_username
type: string
default: ''
help: 'Configuration options if sending notifications via Qpid (these are the defaults)'
- name: qpid_password
type: string
default: ''
help: 'Configuration options if sending notifications via Qpid (these are the defaults)'
- name: qpid_sasl_mechanisms
type: string
default: ''
help: 'Configuration options if sending notifications via Qpid (these are the defaults)'
- name: qpid_reconnect_timeout
type: string
default: '0'
help: 'Configuration options if sending notifications via Qpid (these are the defaults)'
- name: qpid_reconnect_limit
type: string
default: '0'
help: 'Configuration options if sending notifications via Qpid (these are the defaults)'
- name: qpid_reconnect_interval_min
type: string
default: '0'
help: 'Configuration options if sending notifications via Qpid (these are the defaults)'
- name: qpid_reconnect_interval_max
type: string
default: '0'
help: 'Configuration options if sending notifications via Qpid (these are the defaults)'
- name: qpid_reconnect_interval
type: string
default: '0'
help: 'Configuration options if sending notifications via Qpid (these are the defaults)'
- name: qpid_heartbeat
type: string
default: '5'
help: 'Configuration options if sending notifications via Qpid (these are the defaults)'
- name: qpid_protocol
type: string
default: 'tcp'
help: "Configuration options if sending notifications via Qpid (these are the defaults) Set to 'ssl' to enable SSL"
- name: qpid_tcp_nodelay
type: string
default: 'True'
help: "Configuration options if sending notifications via Qpid (these are the defaults) Set to 'ssl' to enable SSL"
- name: filesystem_store_datadir
type: string
default: '/var/lib/glance/images/'
help: 'Directory that the Filesystem backend store writes image data to'
- name: filesystem_store_metadata_file
type: string
default: 'None'
help: 'A path to a JSON file that contains metadata describing the storage system. When show_multiple_locations is True the information in this file will be returned with any location that is contained in this store.'
- name: swift_store_auth_version
type: string
default: '2'
help: "Version of the authentication service to use Valid versions are '2' for keystone and '1' for swauth and rackspace"
- name: swift_store_auth_address
type: string
default: '127.0.0.1:5000/v2.0/'
help: "Address where the Swift authentication service lives Valid schemes are 'http://' and 'https://' If no scheme specified, default to 'https://' For swauth, use something like '127.0.0.1:8080/v1.0/'"
- name: swift_store_user
type: string
default: 'jdoe:jdoe'
help: "User to authenticate against the Swift authentication service If you use Swift authentication service, set it to 'account':'user' where 'account' is a Swift storage account and 'user' is a user in that account"
- name: swift_store_key
type: string
default: 'a86850deb2742ec3cb41518e26aa2d89'
help: 'Auth key for the user authenticating against the Swift authentication service'
- name: swift_store_container
type: string
default: 'glance'
help: 'Container within the account that the account should use for storing images in Swift'
- name: swift_store_create_container_on_put
type: string
default: 'False'
help: 'Do we create the container if it does not exist?'
- name: swift_store_large_object_size
type: string
default: '5120'
help: 'What size, in MB, should Glance start chunking image files and do a large object manifest in Swift? By default, this is the maximum object size in Swift, which is 5GB'
- name: swift_store_large_object_chunk_size
type: string
default: '200'
help: 'When doing a large object manifest, what size, in MB, should Glance write chunks to Swift? This amount of data is written to a temporary disk buffer during the process of chunking the image file, and the default is 200MB'
- name: swift_enable_snet
type: string
default: 'False'
help: "To use ServiceNET for authentication, prefix hostname of `swift_store_auth_address` with 'snet-'. Ex. https://example.com/v1.0/ -> https://snet-example.com/v1.0/"
- name: swift_store_multi_tenant
type: string
default: 'False'
help: 'If set to True enables multi-tenant storage mode which causes Glance images to be stored in tenant specific Swift accounts.'
- name: swift_store_admin_tenants
type: string
default: ''
help: 'A list of swift_proxy_server ACL strings that will be applied as both read and write ACLs to the containers created by Glance in multi-tenant mode. This grants the specified tenants/users read and write access to all newly created image objects. The standard swift_proxy_server ACL string formats are allowed, including: <tenant_id>:<username> <tenant_name>:<username> *:<username> Multiple ACLs can be combined using a comma separated list, for example: swift_store_admin_tenants = service:glance,*:admin'
- name: swift_store_region
type: string
default: ''
help: 'The region of the swift_proxy_server endpoint to be used for single tenant. This setting is only necessary if the tenant has multiple swift_proxy_server endpoints.'
- name: swift_store_ssl_compression
type: string
default: 'True'
help: "If set to False, disables SSL layer compression of https swift_proxy_server requests. Setting to 'False' may improve performance for images which are already in a compressed format, eg qcow2. If set to True, enables SSL layer compression (provided it is supported by the target swift_proxy_server proxy)."
- name: s3_store_host
type: string
default: '127.0.0.1:8080/v1.0/'
help: "Address where the S3 authentication service lives Valid schemes are 'http://' and 'https://' If no scheme specified, default to 'http://'"
- name: s3_store_access_key
type: string
default: '<20-char AWS access key>'
help: 'User to authenticate against the S3 authentication service'
- name: s3_store_secret_key
type: string
default: '<40-char AWS secret key>'
help: 'Auth key for the user authenticating against the S3 authentication service'
- name: s3_store_bucket
type: string
default: '<lowercased 20-char aws access key>glance'
help: "Container within the account that the account should use for storing images in S3. Note that S3 has a flat namespace, so you need a unique bucket name for your glance images. An easy way to do this is append your AWS access key to 'glance'. S3 buckets in AWS *must* be lowercased, so remember to lowercase your AWS access key if you use it in your bucket name below!"
- name: s3_store_create_bucket_on_put
type: string
default: 'False'
help: 'Do we create the bucket if it does not exist?'
- name: s3_store_object_buffer_dir
type: string
default: '/path/to/dir'
help: "When sending images to S3, the data will first be written to a temporary buffer on disk. By default the platform's temporary directory will be used. If required, an alternative directory can be specified here."
- name: s3_store_bucket_url_format
type: string
default: 'subdomain'
help: "When forming a bucket url, boto will either set the bucket name as the subdomain or as the first token of the path. Amazon's S3 service will accept it as the subdomain, but Swift's S3 middleware requires it be in the path. Set this to 'path' or 'subdomain' - defaults to 'subdomain'."
- name: rbd_store_ceph_conf
type: string
default: '/etc/ceph/ceph.conf'
help: 'Ceph configuration file path If using cephx authentication, this file should include a reference to the right keyring in a client.<USER> section'
- name: rbd_store_user
type: string
default: 'glance'
help: 'RADOS user to authenticate as (only applicable if using cephx)'
- name: rbd_store_pool
type: string
default: 'images'
help: 'RADOS pool in which images are stored'
- name: rbd_store_chunk_size
type: string
default: '8'
help: 'Images will be chunked into objects of this size (in megabytes). For best performance, this should be a power of two'
- name: sheepdog_store_address
type: string
default: 'localhost'
- name: sheepdog_store_port
type: string
default: '7000'
- name: sheepdog_store_chunk_size
type: string
default: '64'
help: 'Images will be chunked into objects of this size (in megabytes). For best performance, this should be a power of two'
- name: cinder_catalog_info
type: string
default: 'volume:cinder:publicURL'
help: 'Info to match when looking for cinder in the service catalog Format is : separated values of the form: <service_type>:<service_name>:<endpoint_type> '
- name: cinder_endpoint_template
type: string
default: None
help: 'Override service catalog lookup with template for cinder endpoint e.g. http://localhost:8776/v1/%(project_id)s '
- name: os_region_name
type: string
default: None
help: 'Region name of this node '
- name: cinder_ca_certificates_file
type: string
default: None
help: 'Location of ca certicates file to use for cinder client requests '
- name: cinder_http_retries
type: integer
default: 3
help: 'Number of cinderclient retries on failed http calls '
- name: cinder_api_insecure
type: boolean
default: False
help: 'Allow to perform insecure SSL requests to cinder '
- name: delayed_delete
type: string
default: 'False'
help: 'Turn on/off delayed delete'
- name: scrub_time
type: string
default: '43200'
help: 'Delayed delete time in seconds'
- name: scrubber_datadir
type: string
default: '/var/lib/glance/scrubber'
help: 'Directory that the scrubber will use to remind itself of what to delete Make sure this is also set in glance-scrubber.conf'
- name: image_cache_dir
type: string
default: '/var/lib/glance/image-cache/'
help: 'Base directory that the Image Cache uses'
- name: keystone_authtoken.auth_host
type: host
default: '127.0.0.1'
- name: keystone_authtoken.auth_port
type: string
default: '35357'
- name: keystone_authtoken.auth_protocol
type: string
default: 'http'
- name: keystone_authtoken.admin_tenant_name
type: string
default: '%SERVICE_TENANT_NAME%'
- name: keystone_authtoken.admin_user
type: string
default: '%SERVICE_USER%'
- name: keystone_authtoken.admin_password
type: string
default: '%SERVICE_PASSWORD%'
- name: paste_deploy.config_file
type: string
default: 'glance-api-paste.ini'
help: 'Name of the paste configuration file that defines the available pipelines'
- name: paste_deploy.flavor
type: string
default: ''
help: "Partial name of a pipeline in your paste configuration file with the service name removed. For example, if your paste section name is [pipeline:glance-api-keystone], you would configure the flavor below as 'keystone'."

View File

@ -1,560 +0,0 @@
- version: '2013.2.1'
checkpoint: true
added:
- name: verbose
type: boolean
default: false
help: 'Show more verbose log output (sets INFO log level output)'
- name: debug
type: boolean
default: false
help: 'Show debugging output in logs (sets DEBUG log level output)'
- name: default_store
type: string
default: 'file'
help: "Which backend scheme should Glance use by default is not specified in a request to add a new image to Glance? Known schemes are determined by the known_stores option below. Default: 'file'"
- name: known_stores
type: string_list
default: ['glance.store.filesystem.Store']
help: 'List of which store classes and store class locations are currently known to glance at startup.'
- name: image_size_cap
type: integer
default: 1099511627776
help: 'Maximum image size (in bytes) that may be uploaded through the Glance API server. Defaults to 1 TB. WARNING: this value should only be increased after careful consideration and must be set to a value under 8 EB (9223372036854775808).'
- name: bind_host
type: host
default: '0.0.0.0'
help: 'Address to bind the API server'
- name: bind_port
type: port
default: 9292
help: 'Port the bind the API server to'
- name: log_file
type: file
default: '/var/log/glance/api.log'
help: 'Log to this file. Make sure you do not set the same log file for both the API and registry servers!'
- name: backlog
type: integer
default: 4096
help: 'Backlog requests when creating socket'
- name: tcp_keepidle
type: integer
default: 600
help: 'TCP_KEEPIDLE value in seconds when creating socket. Not supported on OS X.'
- name: sql_connection
type: string
default: 'sqlite:///glance.sqlite'
help: 'SQLAlchemy connection string for the reference implementation registry server. Any valid SQLAlchemy connection string is fine. See: http://www.sqlalchemy.org/docs/05/reference/sqlalchemy/connections.html#sqlalchemy.create_engine'
- name: sql_idle_timeout
type: integer
default: 3600
help: "MySQL uses a default `wait_timeout` of 8 hours, after which it will drop idle connections. This can result in 'MySQL Gone Away' exceptions. If you notice this, you can lower this value to ensure that SQLAlchemy reconnects before MySQL can drop the connection."
- name: workers
type: integer
default: 1
help: 'Number of Glance API worker processes to start. On machines with more than one CPU increasing this value may improve performance (especially if using SSL with compression turned on). It is typically recommended to set this value to the number of CPUs present on your machine.'
- name: admin_role
type: string
default: 'admin'
help: 'Role used to identify an authenticated user as administrator'
- name: allow_anonymous_access
type: boolean
default: false
help: 'Allow unauthenticated users to access the API with read-only privileges. This only applies when using ContextMiddleware.'
- name: enable_v1_api
type: boolean
default: true
help: 'Allow access to version 1 of glance api'
- name: enable_v2_api
type: boolean
default: true
help: 'Allow access to version 2 of glance api'
- name: show_image_direct_url
type: boolean
default: false
help: "Return the URL that references where the data is stored on the backend storage system. For example, if using the file system store a URL of 'file:///path/to/image' will be returned to the user in the 'direct_url' meta-data field. The default value is false."
- name: send_identity_headers
type: boolean
default: false
help: 'Send headers containing user and tenant information when making requests to the v1 glance registry. This allows the registry to function as if a user is authenticated without the need to authenticate a user itself using the auth_token middleware. The default value is false.'
- name: container_formats
type: string_list
default: ['ami', 'ari', 'aki', 'bare', 'ovf']
help: "Supported values for the 'container_format' image attribute"
- name: disk_formats
type: string_list
default: ['ami', 'ari', 'aki', 'vhd', 'vmdk', 'raw', 'qcow2', 'vdi', 'iso']
help: "Supported values for the 'disk_format' image attribute"
- name: lock_path
type: directory
default: ~
help: 'Directory to use for lock files. Default to a temp directory (string value). This setting needs to be the same for both glance-scrubber and glance-api.'
comment: 'New param'
- name: property_protection_file
type: file
default: ~
help: "Property Protections config file This file contains the rules for property protections and the roles associated with it. If this config value is not specified, by default, property protections won't be enforced. If a value is specified and the file is not found, then an HTTPInternalServerError will be thrown."
- name: user_storage_quota
type: integer
default: 0
help: 'Set a system wide quota for every user. This value is the total number of bytes that a user can use across all storage systems. A value of 0 means unlimited.'
- name: use_syslog
type: boolean
default: false
help: 'Send logs to syslog (/dev/log) instead of to file specified by `log_file`'
- name: syslog_log_facility
type: string
default: 'LOG_LOCAL0'
help: 'Facility to use. If unset defaults to LOG_USER.'
- name: cert_file
type: file
default: '/path/to/certfile'
help: 'Certificate file to use when starting API server securely'
- name: key_file
type: file
default: '/path/to/keyfile'
help: 'Private key file to use when starting API server securely'
- name: ca_file
type: file
default: '/path/to/cafile'
help: 'CA certificate file to use to verify connecting clients'
- name: metadata_encryption_key
type: string
default: '<16, 24 or 32 char registry metadata key>'
help: "AES key for encrypting store 'location' metadata, including -- if used -- Swift or S3 credentials Should be set to a random string of length 16, 24 or 32 bytes"
- name: registry_host
type: host
default: '0.0.0.0'
help: 'Address to find the registry server'
- name: registry_port
type: port
default: 9191
help: 'Port the registry server is listening on'
- name: registry_client_protocol
type: string
default: 'http'
help: 'What protocol to use when connecting to the registry server? Set to https for secure HTTP communication'
- name: registry_client_key_file
type: file
default: '/path/to/key/file'
help: 'The path to the key file to use in SSL connections to the registry server, if any. Alternately, you may set the GLANCE_CLIENT_KEY_FILE environ variable to a filepath of the key file'
- name: registry_client_cert_file
type: file
default: '/path/to/cert/file'
help: 'The path to the cert file to use in SSL connections to the registry server, if any. Alternately, you may set the GLANCE_CLIENT_CERT_FILE environ variable to a filepath of the cert file'
- name: registry_client_ca_file
type: file
default: '/path/to/ca/file'
help: 'The path to the certifying authority cert file to use in SSL connections to the registry server, if any. Alternately, you may set the GLANCE_CLIENT_CA_FILE environ variable to a filepath of the CA cert file'
- name: registry_client_insecure
type: boolean
default: false
help: "When using SSL in connections to the registry server, do not require validation via a certifying authority. This is the registry's equivalent of specifying --insecure on the command line using glanceclient for the API Default: False"
- name: registry_client_timeout
type: integer
default: 600
help: "The period of time, in seconds, that the API server will wait for a registry request to complete. A value of '0' implies no timeout. Default: 600"
- name: db_auto_create
type: boolean
default: false
help: 'Whether to automatically create the database tables. Default: False'
- name: sqlalchemy_debug
type: boolean
default: true
help: 'Enable DEBUG log messages from sqlalchemy which prints every database query and response. Default: False'
- name: notifier_strategy
type: string
default: 'noop'
help: 'Notifications can be sent when images are create, updated or deleted. There are three methods of sending notifications, logging (via the log_file directive), rabbit (via a rabbitmq queue), qpid (via a Qpid message queue), or noop (no notifications sent, the default)'
- name: rabbit_host
type: host
default: 'localhost'
help: 'Configuration options if sending notifications via rabbitmq (these are the defaults)'
- name: rabbit_port
type: port
default: 5672
help: 'Configuration options if sending notifications via rabbitmq (these are the defaults)'
- name: rabbit_use_ssl
type: boolean
default: false
help: 'Configuration options if sending notifications via rabbitmq (these are the defaults)'
- name: rabbit_userid
type: string
default: 'guest'
help: 'Configuration options if sending notifications via rabbitmq (these are the defaults)'
- name: rabbit_password
type: string
default: 'guest'
help: 'Configuration options if sending notifications via rabbitmq (these are the defaults)'
- name: rabbit_virtual_host
type: string
default: '/'
help: 'Configuration options if sending notifications via rabbitmq (these are the defaults)'
- name: rabbit_notification_exchange
type: string
default: 'glance'
help: 'Configuration options if sending notifications via rabbitmq (these are the defaults)'
- name: rabbit_notification_topic
type: string
default: 'notifications'
help: 'Configuration options if sending notifications via rabbitmq (these are the defaults)'
- name: rabbit_durable_queues
type: boolean
default: false
help: 'Configuration options if sending notifications via rabbitmq (these are the defaults)'
- name: qpid_notification_exchange
type: string
default: 'glance'
help: 'Configuration options if sending notifications via Qpid (these are the defaults)'
- name: qpid_notification_topic
type: string
default: 'notifications'
help: 'Configuration options if sending notifications via Qpid (these are the defaults)'
- name: qpid_host
type: host
default: 'localhost'
help: 'Configuration options if sending notifications via Qpid (these are the defaults)'
- name: qpid_port
type: port
default: 5672
help: 'Configuration options if sending notifications via Qpid (these are the defaults)'
- name: qpid_username
type: string
default: ''
help: 'Configuration options if sending notifications via Qpid (these are the defaults)'
- name: qpid_password
type: string
default: ''
help: 'Configuration options if sending notifications via Qpid (these are the defaults)'
- name: qpid_sasl_mechanisms
type: string
default: ''
help: 'Configuration options if sending notifications via Qpid (these are the defaults)'
- name: qpid_reconnect_timeout
type: integer
default: 0
help: 'Configuration options if sending notifications via Qpid (these are the defaults)'
- name: qpid_reconnect_limit
type: integer
default: 0
help: 'Configuration options if sending notifications via Qpid (these are the defaults)'
- name: qpid_reconnect_interval_min
type: integer
default: 0
help: 'Configuration options if sending notifications via Qpid (these are the defaults)'
- name: qpid_reconnect_interval_max
type: integer
default: 0
help: 'Configuration options if sending notifications via Qpid (these are the defaults)'
- name: qpid_reconnect_interval
type: integer
default: 0
help: 'Configuration options if sending notifications via Qpid (these are the defaults)'
- name: qpid_heartbeat
type: integer
default: 5
help: 'Configuration options if sending notifications via Qpid (these are the defaults)'
- name: qpid_protocol
type: string
default: 'tcp'
help: "Configuration options if sending notifications via Qpid (these are the defaults) Set to 'ssl' to enable SSL"
- name: qpid_tcp_nodelay
type: boolean
default: true
help: "Configuration options if sending notifications via Qpid (these are the defaults) Set to 'ssl' to enable SSL"
- name: filesystem_store_datadir
type: directory
default: '/var/lib/glance/images/'
help: 'Directory that the Filesystem backend store writes image data to'
- name: filesystem_store_metadata_file
type: file
default: ~
help: 'A path to a JSON file that contains metadata describing the storage system. When show_multiple_locations is True the information in this file will be returned with any location that is contained in this store.'
comment: 'New param'
- name: swift_store_auth_version
type: string
default: '2'
help: "Version of the authentication service to use Valid versions are '2' for keystone and '1' for swauth and rackspace"
- name: swift_store_auth_address
type: string
default: '127.0.0.1:5000/v2.0/'
help: "Address where the Swift authentication service lives Valid schemes are 'http://' and 'https://' If no scheme specified, default to 'https://' For swauth, use something like '127.0.0.1:8080/v1.0/'"
- name: swift_store_user
type: string
default: 'jdoe:jdoe'
help: "User to authenticate against the Swift authentication service If you use Swift authentication service, set it to 'account':'user' where 'account' is a Swift storage account and 'user' is a user in that account"
- name: swift_store_key
type: string
default: 'a86850deb2742ec3cb41518e26aa2d89'
help: 'Auth key for the user authenticating against the Swift authentication service'
- name: swift_store_container
type: string
default: 'glance'
help: 'Container within the account that the account should use for storing images in Swift'
- name: swift_store_create_container_on_put
type: boolean
default: false
help: 'Do we create the container if it does not exist?'
- name: swift_store_large_object_size
type: integer
default: 5120
help: 'What size, in MB, should Glance start chunking image files and do a large object manifest in Swift? By default, this is the maximum object size in Swift, which is 5GB'
- name: swift_store_large_object_chunk_size
type: integer
default: 200
help: 'When doing a large object manifest, what size, in MB, should Glance write chunks to Swift? This amount of data is written to a temporary disk buffer during the process of chunking the image file, and the default is 200MB'
- name: swift_enable_snet
type: boolean
default: false
help: "To use ServiceNET for authentication, prefix hostname of `swift_store_auth_address` with 'snet-'. Ex. https://example.com/v1.0/ -> https://snet-example.com/v1.0/"
- name: swift_store_multi_tenant
type: boolean
default: false
help: 'If set to True enables multi-tenant storage mode which causes Glance images to be stored in tenant specific Swift accounts.'
- name: swift_store_admin_tenants
type: string_list
default: []
help: 'A list of swift_proxy_server ACL strings that will be applied as both read and write ACLs to the containers created by Glance in multi-tenant mode. This grants the specified tenants/users read and write access to all newly created image objects. The standard swift_proxy_server ACL string formats are allowed, including: <tenant_id>:<username> <tenant_name>:<username> *:<username> Multiple ACLs can be combined using a comma separated list, for example: swift_store_admin_tenants = service:glance,*:admin'
- name: swift_store_region
type: string
default: ''
help: 'The region of the swift_proxy_server endpoint to be used for single tenant. This setting is only necessary if the tenant has multiple swift_proxy_server endpoints.'
- name: swift_store_ssl_compression
type: boolean
default: true
help: "If set to False, disables SSL layer compression of https swift_proxy_server requests. Setting to 'False' may improve performance for images which are already in a compressed format, eg qcow2. If set to True, enables SSL layer compression (provided it is supported by the target swift_proxy_server proxy)."
- name: s3_store_host
type: string
default: '127.0.0.1:8080/v1.0/'
help: "Address where the S3 authentication service lives Valid schemes are 'http://' and 'https://' If no scheme specified, default to 'http://'"
- name: s3_store_access_key
type: string
default: '<20-char AWS access key>'
help: 'User to authenticate against the S3 authentication service'
- name: s3_store_secret_key
type: string
default: '<40-char AWS secret key>'
help: 'Auth key for the user authenticating against the S3 authentication service'
- name: s3_store_bucket
type: string
default: '<lowercased 20-char aws access key>glance'
help: "Container within the account that the account should use for storing images in S3. Note that S3 has a flat namespace, so you need a unique bucket name for your glance images. An easy way to do this is append your AWS access key to 'glance'. S3 buckets in AWS *must* be lowercased, so remember to lowercase your AWS access key if you use it in your bucket name below!"
- name: s3_store_create_bucket_on_put
type: boolean
default: false
help: 'Do we create the bucket if it does not exist?'
- name: s3_store_object_buffer_dir
type: directory
default: '/path/to/dir'
help: "When sending images to S3, the data will first be written to a temporary buffer on disk. By default the platform's temporary directory will be used. If required, an alternative directory can be specified here."
- name: s3_store_bucket_url_format
type: string
default: 'subdomain'
help: "When forming a bucket url, boto will either set the bucket name as the subdomain or as the first token of the path. Amazon's S3 service will accept it as the subdomain, but Swift's S3 middleware requires it be in the path. Set this to 'path' or 'subdomain' - defaults to 'subdomain'."
- name: rbd_store_ceph_conf
type: file
default: '/etc/ceph/ceph.conf'
help: 'Ceph configuration file path If using cephx authentication, this file should include a reference to the right keyring in a client.<USER> section'
- name: rbd_store_user
type: string
default: 'glance'
help: 'RADOS user to authenticate as (only applicable if using cephx)'
- name: rbd_store_pool
type: string
default: 'images'
help: 'RADOS pool in which images are stored'
- name: rbd_store_chunk_size
type: integer
default: 8
help: 'Images will be chunked into objects of this size (in megabytes). For best performance, this should be a power of two'
- name: sheepdog_store_address
type: host
default: 'localhost'
- name: sheepdog_store_port
type: port
default: 7000
- name: sheepdog_store_chunk_size
type: integer
default: 64
help: 'Images will be chunked into objects of this size (in megabytes). For best performance, this should be a power of two'
- name: cinder_catalog_info
type: string
default: 'volume:cinder:publicURL'
help: 'Info to match when looking for cinder in the service catalog Format is : separated values of the form: <service_type>:<service_name>:<endpoint_type> '
- name: cinder_endpoint_template
type: string
default: ~
help: 'Override service catalog lookup with template for cinder endpoint e.g. http://localhost:8776/v1/%(project_id)s '
- name: os_region_name
type: string
default: ~
help: 'Region name of this node '
- name: cinder_ca_certificates_file
type: string
default: ~
help: 'Location of ca certicates file to use for cinder client requests '
- name: cinder_http_retries
type: integer
default: 3
help: 'Number of cinderclient retries on failed http calls '
- name: cinder_api_insecure
type: boolean
default: false
help: 'Allow to perform insecure SSL requests to cinder '
- name: delayed_delete
type: boolean
default: false
help: 'Turn on/off delayed delete'
- name: scrub_time
type: integer
default: 43200
help: 'Delayed delete time in seconds'
- name: scrubber_datadir
type: directory
default: '/var/lib/glance/scrubber'
help: 'Directory that the scrubber will use to remind itself of what to delete Make sure this is also set in glance-scrubber.conf'
- name: image_cache_dir
type: directory
default: '/var/lib/glance/image-cache/'
help: 'Base directory that the Image Cache uses'
- name: keystone_authtoken.auth_host
type: host
default: '127.0.0.1'
comment: 'New param'
- name: keystone_authtoken.auth_port
type: port
default: 35357
- name: keystone_authtoken.auth_protocol
type: string
default: 'http'
- name: keystone_authtoken.admin_tenant_name
type: string
default: '%SERVICE_TENANT_NAME%'
- name: keystone_authtoken.admin_user
type: string
default: '%SERVICE_USER%'
- name: keystone_authtoken.admin_password
type: string
default: '%SERVICE_PASSWORD%'
- name: paste_deploy.config_file
type: file
default: 'glance-api-paste.ini'
help: 'Name of the paste configuration file that defines the available pipelines'
- name: paste_deploy.flavor
type: string
default: ''
help: "Partial name of a pipeline in your paste configuration file with the service name removed. For example, if your paste section name is [pipeline:glance-api-keystone], you would configure the flavor below as 'keystone'."
# ====================================================

View File

@ -1,133 +0,0 @@
project: glance_registry
version: '2013.2.1'
parameters:
- name: verbose
type: string
default: 'False'
help: 'Show more verbose log output (sets INFO log level output)'
- name: debug
type: string
default: 'False'
help: 'Show debugging output in logs (sets DEBUG log level output)'
- name: bind_host
type: host
default: '0.0.0.0'
help: 'Address to bind the registry server'
- name: bind_port
type: string
default: '9191'
help: 'Port the bind the registry server to'
- name: log_file
type: string
default: '/var/log/glance/registry.log'
help: 'Log to this file. Make sure you do not set the same log file for both the API and registry servers!'
- name: backlog
type: string
default: '4096'
help: 'Backlog requests when creating socket'
- name: tcp_keepidle
type: string
default: '600'
help: 'TCP_KEEPIDLE value in seconds when creating socket. Not supported on OS X.'
- name: sql_connection
type: string
default: 'sqlite:///glance.sqlite'
help: 'SQLAlchemy connection string for the reference implementation registry server. Any valid SQLAlchemy connection string is fine. See: http://www.sqlalchemy.org/docs/05/reference/sqlalchemy/connections.html#sqlalchemy.create_engine'
- name: sql_idle_timeout
type: string
default: '3600'
help: "MySQL uses a default `wait_timeout` of 8 hours, after which it will drop idle connections. This can result in 'MySQL Gone Away' exceptions. If you notice this, you can lower this value to ensure that SQLAlchemy reconnects before MySQL can drop the connection."
- name: api_limit_max
type: string
default: '1000'
help: 'Limit the api to return `param_limit_max` items in a call to a container. If a larger `limit` query param is provided, it will be reduced to this value.'
- name: limit_param_default
type: string
default: '25'
help: 'If a `limit` query param is not provided in an api request, it will default to `limit_param_default`'
- name: admin_role
type: string
default: 'admin'
help: 'Role used to identify an authenticated user as administrator'
- name: db_auto_create
type: string
default: 'False'
help: 'Whether to automatically create the database tables. Default: False'
- name: sqlalchemy_debug
type: string
default: 'True'
help: 'Enable DEBUG log messages from sqlalchemy which prints every database query and response. Default: False'
- name: use_syslog
type: string
default: 'False'
help: 'Send logs to syslog (/dev/log) instead of to file specified by `log_file`'
- name: syslog_log_facility
type: string
default: 'LOG_LOCAL1'
help: 'Facility to use. If unset defaults to LOG_USER.'
- name: cert_file
type: string
default: '/path/to/certfile'
help: 'Certificate file to use when starting registry server securely'
- name: key_file
type: string
default: '/path/to/keyfile'
help: 'Private key file to use when starting registry server securely'
- name: ca_file
type: string
default: '/path/to/cafile'
help: 'CA certificate file to use to verify connecting clients'
- name: keystone_authtoken.auth_host
type: host
default: '127.0.0.1'
- name: keystone_authtoken.auth_port
type: string
default: '35357'
- name: keystone_authtoken.auth_protocol
type: string
default: 'http'
- name: keystone_authtoken.admin_tenant_name
type: string
default: '%SERVICE_TENANT_NAME%'
- name: keystone_authtoken.admin_user
type: string
default: '%SERVICE_USER%'
- name: keystone_authtoken.admin_password
type: string
default: '%SERVICE_PASSWORD%'
- name: paste_deploy.config_file
type: string
default: 'glance-registry-paste.ini'
help: 'Name of the paste configuration file that defines the available pipelines'
- name: paste_deploy.flavor
type: string
default: ''
help: "Partial name of a pipeline in your paste configuration file with the service name removed. For example, if your paste section name is [pipeline:glance-registry-keystone], you would configure the flavor below as 'keystone'."

View File

@ -1,161 +0,0 @@
- version: '2013.2.1'
checkpoint: true
added:
- name: verbose
type: string
default: 'False'
help: 'Show more verbose log output (sets INFO log level output)'
comment: 'New param'
- name: debug
type: string
default: 'False'
help: 'Show debugging output in logs (sets DEBUG log level output)'
comment: 'New param'
- name: bind_host
type: host
default: '0.0.0.0'
help: 'Address to bind the registry server'
comment: 'New param'
- name: bind_port
type: string
default: '9191'
help: 'Port the bind the registry server to'
comment: 'New param'
- name: log_file
type: string
default: '/var/log/glance/registry.log'
help: 'Log to this file. Make sure you do not set the same log file for both the API and registry servers!'
comment: 'New param'
- name: backlog
type: string
default: '4096'
help: 'Backlog requests when creating socket'
comment: 'New param'
- name: tcp_keepidle
type: string
default: '600'
help: 'TCP_KEEPIDLE value in seconds when creating socket. Not supported on OS X.'
comment: 'New param'
- name: sql_connection
type: string
default: 'sqlite:///glance.sqlite'
help: 'SQLAlchemy connection string for the reference implementation registry server. Any valid SQLAlchemy connection string is fine. See: http://www.sqlalchemy.org/docs/05/reference/sqlalchemy/connections.html#sqlalchemy.create_engine'
comment: 'New param'
- name: sql_idle_timeout
type: string
default: '3600'
help: "MySQL uses a default `wait_timeout` of 8 hours, after which it will drop idle connections. This can result in 'MySQL Gone Away' exceptions. If you notice this, you can lower this value to ensure that SQLAlchemy reconnects before MySQL can drop the connection."
comment: 'New param'
- name: api_limit_max
type: string
default: '1000'
help: 'Limit the api to return `param_limit_max` items in a call to a container. If a larger `limit` query param is provided, it will be reduced to this value.'
comment: 'New param'
- name: limit_param_default
type: string
default: '25'
help: 'If a `limit` query param is not provided in an api request, it will default to `limit_param_default`'
comment: 'New param'
- name: admin_role
type: string
default: 'admin'
help: 'Role used to identify an authenticated user as administrator'
comment: 'New param'
- name: db_auto_create
type: string
default: 'False'
help: 'Whether to automatically create the database tables. Default: False'
comment: 'New param'
- name: sqlalchemy_debug
type: string
default: 'True'
help: 'Enable DEBUG log messages from sqlalchemy which prints every database query and response. Default: False'
comment: 'New param'
- name: use_syslog
type: string
default: 'False'
help: 'Send logs to syslog (/dev/log) instead of to file specified by `log_file`'
comment: 'New param'
- name: syslog_log_facility
type: string
default: 'LOG_LOCAL1'
help: 'Facility to use. If unset defaults to LOG_USER.'
comment: 'New param'
- name: cert_file
type: string
default: '/path/to/certfile'
help: 'Certificate file to use when starting registry server securely'
comment: 'New param'
- name: key_file
type: string
default: '/path/to/keyfile'
help: 'Private key file to use when starting registry server securely'
comment: 'New param'
- name: ca_file
type: string
default: '/path/to/cafile'
help: 'CA certificate file to use to verify connecting clients'
comment: 'New param'
- name: keystone_authtoken.auth_host
type: host
default: '127.0.0.1'
comment: 'New param'
- name: keystone_authtoken.auth_port
type: string
default: '35357'
comment: 'New param'
- name: keystone_authtoken.auth_protocol
type: string
default: 'http'
comment: 'New param'
- name: keystone_authtoken.admin_tenant_name
type: string
default: '%SERVICE_TENANT_NAME%'
comment: 'New param'
- name: keystone_authtoken.admin_user
type: string
default: '%SERVICE_USER%'
comment: 'New param'
- name: keystone_authtoken.admin_password
type: string
default: '%SERVICE_PASSWORD%'
comment: 'New param'
- name: paste_deploy.config_file
type: string
default: 'glance-registry-paste.ini'
help: 'Name of the paste configuration file that defines the available pipelines'
comment: 'New param'
- name: paste_deploy.flavor
type: string
default: ''
help: "Partial name of a pipeline in your paste configuration file with the service name removed. For example, if your paste section name is [pipeline:glance-registry-keystone], you would configure the flavor below as 'keystone'."
comment: 'New param'
# ====================================================

View File

@ -1,642 +0,0 @@
project: keystone
version: '2013.1.3'
parameters:
- name: admin_token
type: string
default: 'ADMIN'
help: "A 'shared secret' between keystone and other openstack services"
- name: bind_host
type: host
default: '0.0.0.0'
help: 'The IP address of the network interface to listen on'
- name: public_port
type: port
default: 5000
help: 'The port number which the public service listens on'
- name: admin_port
type: port
default: 35357
help: 'The port number which the public admin listens on'
- name: public_endpoint
type: string
default: 'http://localhost:%(public_port)s/'
help: 'The base endpoint URLs for keystone that are advertised to clients (NOTE: this does NOT affect how keystone listens for connections)'
- name: admin_endpoint
type: string
default: 'http://localhost:%(admin_port)s/'
- name: compute_port
type: port
default: 8774
help: 'The port number which the OpenStack Compute service listens on'
- name: policy_file
type: string
default: 'policy.json'
help: 'Path to your policy definition containing identity actions'
- name: policy_default_rule
type: string
default: 'admin_required'
help: 'Rule to check if no matching policy definition is found FIXME(dolph): This should really be defined as [policy] default_rule'
- name: member_role_id
type: string
default: '9fe2ff9ee4384b1894a90878d3e92bab'
help: 'Role for migrating membership relationships During a SQL upgrade, the following values will be used to create a new role that will replace records in the user_tenant_membership table with explicit role grants. After migration, the member_role_id will be used in the API add_user_to_project, and member_role_name will be ignored.'
- name: member_role_name
type: string
default: '_member_'
- name: max_request_body_size
type: integer
default: 114688
help: 'enforced by optional sizelimit middleware (keystone.middleware:RequestBodySizeLimiter)'
- name: max_param_size
type: integer
default: 64
help: 'limit the sizes of user & tenant ID/names'
- name: max_token_size
type: integer
default: 8192
help: 'similar to max_param_size, but provides an exception for token values'
- name: debug
type: boolean
default: false
help: '=== Logging Options === Print debugging output (includes plaintext request logging, potentially including passwords)'
- name: verbose
type: boolean
default: false
help: 'Print more verbose output'
- name: log_file
type: string
default: 'keystone.log'
help: 'Name of log file to output to. If not set, logging will go to stdout.'
- name: log_dir
type: string
default: '/var/log/keystone'
help: 'The directory to keep log files in (will be prepended to --logfile)'
- name: use_syslog
type: boolean
default: false
help: 'Use syslog for logging.'
- name: syslog_log_facility
type: string
default: 'LOG_USER'
help: 'syslog facility to receive log lines'
- name: log_config
type: string
default: 'logging.conf'
help: 'If this option is specified, the logging configuration file specified is used and overrides any other logging options specified. Please see the Python logging module documentation for details on logging configuration files.'
- name: log_format
type: string
default: '%(asctime)s %(levelname)8s [%(name)s] %(message)s'
help: 'A logging.Formatter log message format string which may use any of the available logging.LogRecord attributes.'
- name: log_date_format
type: string
default: '%Y-%m-%d %H:%M:%S'
help: 'Format string for %(asctime)s in log records.'
- name: onready
type: string
default: 'keystone.common.systemd'
help: 'onready allows you to send a notification when the process is ready to serve For example, to have it notify using systemd, one could set shell command: onready = systemd-notify --ready or a module with notify() method:'
- name: default_notification_level
type: string
default: 'INFO'
help: 'Default notification level for outgoing notifications'
- name: default_publisher_id
type: string
default: ''
help: 'Default publisher_id for outgoing notifications; included in the payload.'
- name: rpc_backend
type: string
default: 'keystone.openstack.common.rpc.impl_kombu'
help: 'The messaging module to use, defaults to kombu.'
- name: rpc_thread_pool_size
type: integer
default: 64
help: 'Size of RPC thread pool'
- name: rpc_conn_pool_size
type: integer
default: 30
help: 'Size of RPC connection pool'
- name: rpc_response_timeout
type: integer
default: 60
help: 'Seconds to wait for a response from call or multicall'
- name: rpc_cast_timeout
type: integer
default: 30
help: 'Seconds to wait before a cast expires (TTL). Only supported by impl_zmq.'
- name: fake_rabbit
type: boolean
default: false
help: 'If True, use a fake RabbitMQ provider'
- name: control_exchange
type: string
default: 'openstack'
help: 'AMQP exchange to connect to if using RabbitMQ or Qpid'
- name: sql.connection
type: string
default: 'sqlite:///keystone.db'
help: 'The SQLAlchemy connection string used to connect to the database'
- name: sql.idle_timeout
type: integer
default: 200
help: 'the timeout before idle sql connections are reaped'
- name: oauth1.driver
type: string
default: 'keystone.contrib.oauth1.backends.sql.OAuth1'
- name: identity.default_domain_id
type: string
default: 'default'
help: 'This references the domain to use for all Identity API v2 requests (which are not aware of domains). A domain with this ID will be created for you by keystone-manage db_sync in migration 008. The domain referenced by this ID cannot be deleted on the v3 API, to prevent accidentally breaking the v2 API. There is nothing special about this domain, other than the fact that it must exist to order to maintain support for your v2 clients.'
- name: identity.domain_specific_drivers_enabled
type: boolean
default: false
help: 'A subset (or all) of domains can have their own identity driver, each with their own partial configuration file in a domain configuration directory. Only'
- name: identity.domain_config_dir
type: string
default: '/etc/keystone/domains'
- name: identity.max_password_length
type: integer
default: 4096
help: 'Maximum supported length for user passwords; decrease to improve performance.'
- name: cache.enabled
type: boolean
default: false
help: 'Global cache functionality toggle.'
- name: catalog.template_file
type: string
default: 'default_catalog.templates'
- name: endpoint_filter.return_all_endpoints_if_no_filter
type: boolean
default: true
- name: token.provider
type: string
default: ''
help: 'Controls the token construction, validation, and revocation operations. Core providers are keystone.token.providers.[pki|uuid].Provider'
- name: token.expiration
type: integer
default: 86400
help: 'Amount of time a token should remain valid (in seconds)'
- name: token.bind
type: string
default: ''
help: 'External auth mechanisms that should add bind information to token. eg kerberos, x509'
- name: token.enforce_token_bind
type: string
default: 'permissive'
help: 'Enforcement policy on tokens presented to keystone with bind information. One of disabled, permissive, strict, required or a specifically required bind mode e.g. kerberos or x509 to require binding to that authentication.'
- name: assignment.caching
type: boolean
default: true
help: 'Assignment specific caching toggle. This has no effect unless the global caching option is set to True'
- name: assignment.cache_time
type: integer
default: 0
help: 'Assignment specific cache time-to-live (TTL) in seconds.'
- name: token.revocation_cache_time
type: integer
default: 3600
help: 'Revocation-List specific cache time-to-live (TTL) in seconds.'
- name: cache.config_prefix
type: string
default: 'cache.keystone'
help: 'Prefix for building the configuration dictionary for the cache region. This should not need to be changed unless there is another dogpile.cache region with the same configuration name'
- name: cache.backend
type: string
default: 'keystone.common.cache.noop'
help: 'Dogpile.cache backend module. It is recommended that Memcache (dogpile.cache.memcache) or Redis (dogpile.cache.redis) be used in production deployments. Small workloads (single process) like devstack can use the dogpile.cache.memory backend.'
- name: cache.backend_argument
type: string
default: ''
help: 'Arguments supplied to the backend module. Specify this option once per argument to be passed to the dogpile.cache backend. Example format: <argname>:<value>'
- name: cache.proxies
type: string
default: ''
help: 'Proxy Classes to import that will affect the way the dogpile.cache backend functions. See the dogpile.cache documentation on changing-backend-behavior. Comma delimited list e.g. my.dogpile.proxy.Class, my.dogpile.proxyClass2'
- name: cache.use_key_mangler
type: boolean
default: true
help: 'Use a key-mangling function (sha1) to ensure fixed length cache-keys. This is toggle-able for debugging purposes, it is highly recommended to always leave this set to True.'
- name: cache.debug_cache_backend
type: boolean
default: false
help: 'Extra debugging from the cache backend (cache keys, get/set/delete/etc calls) This is only really useful if you need to see the specific cache-backend get/set/delete calls with the keys/values. Typically this should be left set to False.'
- name: oauth1.request_token_duration
type: integer
default: 28800
help: 'The Identity service may include expire attributes. If no such attribute is included, then the token lasts indefinitely. Specify how quickly the request token will expire (in seconds)'
- name: oauth1.access_token_duration
type: integer
default: 86400
help: 'Specify how quickly the access token will expire (in seconds)'
- name: ssl.enable
type: boolean
default: true
- name: signing.certfile
type: string
default: '/etc/keystone/pki/certs/signing_cert.pem'
- name: signing.keyfile
type: string
default: '/etc/keystone/pki/private/signing_key.pem'
- name: signing.ca_certs
type: string
default: '/etc/keystone/pki/certs/cacert.pem'
- name: signing.ca_key
type: string
default: '/etc/keystone/pki/private/cakey.pem'
- name: signing.key_size
type: integer
default: 2048
- name: signing.valid_days
type: integer
default: 3650
- name: ssl.cert_required
type: boolean
default: false
- name: signing.cert_subject
type: string
default: '/CUS/STUnset/LUnset/OUnset/CNwww.example.com'
- name: signing.token_format
type: string
default: ''
help: 'Deprecated in favor of provider in the [token] section Allowed values are PKI or UUID'
- name: ldap.url
type: string
default: 'ldap://localhost'
- name: ldap.user
type: string
default: 'dcManager,dcexample,dccom'
- name: auth.password
type: string
default: 'keystone.auth.plugins.password.Password'
- name: ldap.suffix
type: string
default: 'cnexample,cncom'
- name: ldap.use_dumb_member
type: boolean
default: false
- name: ldap.allow_subtree_delete
type: boolean
default: false
- name: ldap.dumb_member
type: string
default: 'cndumb,dcexample,dccom'
- name: ldap.page_size
type: integer
default: 0
help: "Maximum results per page; a value of zero ('0') disables paging (default)"
- name: ldap.alias_dereferencing
type: string
default: 'default'
help: "The LDAP dereferencing option for queries. This can be either 'never', 'searching', 'always', 'finding' or 'default'. The 'default' option falls back to using default dereferencing configured by your ldap.conf."
- name: ldap.query_scope
type: string
default: 'one'
help: "The LDAP scope for queries, this can be either 'one' (onelevel/singleLevel) or 'sub' (subtree/wholeSubtree)"
- name: ldap.user_tree_dn
type: string
default: 'ouUsers,dcexample,dccom'
- name: ldap.user_filter
type: string
default: ''
- name: ldap.user_objectclass
type: string
default: 'inetOrgPerson'
- name: ldap.user_domain_id_attribute
type: string
default: 'businessCategory'
- name: ldap.user_id_attribute
type: string
default: 'cn'
- name: ldap.user_name_attribute
type: string
default: 'sn'
- name: ldap.user_mail_attribute
type: string
default: 'email'
- name: ldap.user_pass_attribute
type: string
default: 'userPassword'
- name: ldap.user_enabled_attribute
type: string
default: 'enabled'
- name: ldap.user_enabled_mask
type: integer
default: 0
- name: ldap.user_enabled_default
type: boolean
default: true
- name: ldap.user_attribute_ignore
type: string
default: 'tenant_id,tenants'
- name: ldap.user_allow_create
type: boolean
default: true
- name: ldap.user_allow_update
type: boolean
default: true
- name: ldap.user_allow_delete
type: boolean
default: true
- name: ldap.user_enabled_emulation
type: boolean
default: false
- name: ldap.user_enabled_emulation_dn
type: string
default: ''
- name: ldap.tenant_tree_dn
type: string
default: 'ouProjects,dcexample,dccom'
- name: ldap.tenant_filter
type: string
default: ''
- name: ldap.tenant_objectclass
type: string
default: 'groupOfNames'
- name: ldap.tenant_domain_id_attribute
type: string
default: 'businessCategory'
- name: ldap.tenant_id_attribute
type: string
default: 'cn'
- name: ldap.tenant_member_attribute
type: string
default: 'member'
- name: ldap.tenant_name_attribute
type: string
default: 'ou'
- name: ldap.tenant_desc_attribute
type: string
default: 'desc'
- name: ldap.tenant_enabled_attribute
type: string
default: 'enabled'
- name: ldap.tenant_attribute_ignore
type: string
default: ''
- name: ldap.tenant_allow_create
type: boolean
default: true
- name: ldap.tenant_allow_update
type: boolean
default: true
- name: ldap.tenant_allow_delete
type: boolean
default: true
- name: ldap.tenant_enabled_emulation
type: boolean
default: false
- name: ldap.tenant_enabled_emulation_dn
type: string
default: ''
- name: ldap.role_tree_dn
type: string
default: 'ouRoles,dcexample,dccom'
- name: ldap.role_filter
type: string
default: ''
- name: ldap.role_objectclass
type: string
default: 'organizationalRole'
- name: ldap.role_id_attribute
type: string
default: 'cn'
- name: ldap.role_name_attribute
type: string
default: 'ou'
- name: ldap.role_member_attribute
type: string
default: 'roleOccupant'
- name: ldap.role_attribute_ignore
type: string
default: ''
- name: ldap.role_allow_create
type: boolean
default: true
- name: ldap.role_allow_update
type: boolean
default: true
- name: ldap.role_allow_delete
type: boolean
default: true
- name: ldap.group_tree_dn
type: string
default: ''
- name: ldap.group_filter
type: string
default: ''
- name: ldap.group_objectclass
type: string
default: 'groupOfNames'
- name: ldap.group_id_attribute
type: string
default: 'cn'
- name: ldap.group_name_attribute
type: string
default: 'ou'
- name: ldap.group_member_attribute
type: string
default: 'member'
- name: ldap.group_desc_attribute
type: string
default: 'desc'
- name: ldap.group_attribute_ignore
type: string
default: ''
- name: ldap.group_allow_create
type: boolean
default: true
- name: ldap.group_allow_update
type: boolean
default: true
- name: ldap.group_allow_delete
type: boolean
default: true
- name: ldap.use_tls
type: boolean
default: false
help: 'ldap TLS options if both tls_cacertfile and tls_cacertdir are set then tls_cacertfile will be used and tls_cacertdir is ignored valid options for tls_req_cert are demand, never, and allow'
- name: ldap.tls_cacertfile
type: string
default: ''
- name: ldap.tls_cacertdir
type: string
default: ''
- name: ldap.tls_req_cert
type: string
default: 'demand'
- name: ldap.user_additional_attribute_mapping
type: string
default: ''
- name: ldap.domain_additional_attribute_mapping
type: string
default: ''
- name: ldap.group_additional_attribute_mapping
type: string
default: ''
- name: ldap.role_additional_attribute_mapping
type: string
default: ''
- name: ldap.project_additional_attribute_mapping
type: string
default: ''
- name: auth.methods
type: string
default: 'external,password,token,oauth1'
- name: auth.external
type: string
default: 'keystone.auth.plugins.external.ExternalDefault'
- name: auth.token
type: string
default: 'keystone.auth.plugins.token.Token'
- name: auth.oauth1
type: string
default: 'keystone.auth.plugins.oauth1.OAuth'
- name: paste_deploy.config_file
type: string
default: 'keystone-paste.ini'
help: 'Name of the paste configuration file that defines the available pipelines'

View File

@ -1,642 +0,0 @@
project: keystone
version: '2013.1.4'
parameters:
- name: admin_token
type: string
default: 'ADMIN'
help: "A 'shared secret' between keystone and other openstack services"
- name: bind_host
type: host
default: '0.0.0.0'
help: 'The IP address of the network interface to listen on'
- name: public_port
type: port
default: 5000
help: 'The port number which the public service listens on'
- name: admin_port
type: port
default: 35357
help: 'The port number which the public admin listens on'
- name: public_endpoint
type: string
default: 'http://localhost:%(public_port)s/'
help: 'The base endpoint URLs for keystone that are advertised to clients (NOTE: this does NOT affect how keystone listens for connections)'
- name: admin_endpoint
type: string
default: 'http://localhost:%(admin_port)s/'
- name: compute_port
type: port
default: 8774
help: 'The port number which the OpenStack Compute service listens on'
- name: policy_file
type: string
default: 'policy.json'
help: 'Path to your policy definition containing identity actions'
- name: policy_default_rule
type: string
default: 'admin_required'
help: 'Rule to check if no matching policy definition is found FIXME(dolph): This should really be defined as [policy] default_rule'
- name: member_role_id
type: string
default: '9fe2ff9ee4384b1894a90878d3e92bab'
help: 'Role for migrating membership relationships During a SQL upgrade, the following values will be used to create a new role that will replace records in the user_tenant_membership table with explicit role grants. After migration, the member_role_id will be used in the API add_user_to_project, and member_role_name will be ignored.'
- name: member_role_name
type: string
default: '_member_'
- name: max_request_body_size
type: integer
default: 114688
help: 'enforced by optional sizelimit middleware (keystone.middleware:RequestBodySizeLimiter)'
- name: max_param_size
type: integer
default: 64
help: 'limit the sizes of user & tenant ID/names'
- name: max_token_size
type: integer
default: 8192
help: 'similar to max_param_size, but provides an exception for token values'
- name: debug
type: boolean
default: False
help: '=== Logging Options === Print debugging output (includes plaintext request logging, potentially including passwords)'
- name: verbose
type: boolean
default: False
help: 'Print more verbose output'
- name: log_file
type: string
default: 'keystone.log'
help: 'Name of log file to output to. If not set, logging will go to stdout.'
- name: log_dir
type: string
default: '/var/log/keystone'
help: 'The directory to keep log files in (will be prepended to --logfile)'
- name: use_syslog
type: boolean
default: False
help: 'Use syslog for logging.'
- name: syslog_log_facility
type: string
default: 'LOG_USER'
help: 'syslog facility to receive log lines'
- name: log_config
type: string
default: 'logging.conf'
help: 'If this option is specified, the logging configuration file specified is used and overrides any other logging options specified. Please see the Python logging module documentation for details on logging configuration files.'
- name: log_format
type: string
default: '%(asctime)s %(levelname)8s [%(name)s] %(message)s'
help: 'A logging.Formatter log message format string which may use any of the available logging.LogRecord attributes.'
- name: log_date_format
type: string
default: '%Y-%m-%d %H:%M:%S'
help: 'Format string for %(asctime)s in log records.'
- name: onready
type: string
default: 'keystone.common.systemd'
help: 'onready allows you to send a notification when the process is ready to serve For example, to have it notify using systemd, one could set shell command: onready = systemd-notify --ready or a module with notify() method:'
- name: default_notification_level
type: string
default: 'INFO'
help: 'Default notification level for outgoing notifications'
- name: default_publisher_id
type: string
default: ''
help: 'Default publisher_id for outgoing notifications; included in the payload.'
- name: rpc_backend
type: string
default: 'keystone.openstack.common.rpc.impl_kombu'
help: 'The messaging module to use, defaults to kombu.'
- name: rpc_thread_pool_size
type: integer
default: 64
help: 'Size of RPC thread pool'
- name: rpc_conn_pool_size
type: integer
default: 30
help: 'Size of RPC connection pool'
- name: rpc_response_timeout
type: integer
default: 60
help: 'Seconds to wait for a response from call or multicall'
- name: rpc_cast_timeout
type: integer
default: 30
help: 'Seconds to wait before a cast expires (TTL). Only supported by impl_zmq.'
- name: fake_rabbit
type: boolean
default: False
help: 'If True, use a fake RabbitMQ provider'
- name: control_exchange
type: string
default: 'openstack'
help: 'AMQP exchange to connect to if using RabbitMQ or Qpid'
- name: sql.connection
type: string
default: 'sqlite:///keystone.db'
help: 'The SQLAlchemy connection string used to connect to the database'
- name: sql.idle_timeout
type: integer
default: 200
help: 'the timeout before idle sql connections are reaped'
- name: oauth1.driver
type: string
default: 'keystone.contrib.oauth1.backends.sql.OAuth1'
- name: identity.default_domain_id
type: string
default: 'default'
help: 'This references the domain to use for all Identity API v2 requests (which are not aware of domains). A domain with this ID will be created for you by keystone-manage db_sync in migration 008. The domain referenced by this ID cannot be deleted on the v3 API, to prevent accidentally breaking the v2 API. There is nothing special about this domain, other than the fact that it must exist to order to maintain support for your v2 clients.'
- name: identity.domain_specific_drivers_enabled
type: boolean
default: False
help: 'A subset (or all) of domains can have their own identity driver, each with their own partial configuration file in a domain configuration directory. Only'
- name: identity.domain_config_dir
type: string
default: '/etc/keystone/domains'
- name: identity.max_password_length
type: integer
default: 4096
help: 'Maximum supported length for user passwords; decrease to improve performance.'
- name: cache.enabled
type: boolean
default: False
help: 'Global cache functionality toggle.'
- name: catalog.template_file
type: string
default: 'default_catalog.templates'
- name: endpoint_filter.return_all_endpoints_if_no_filter
type: boolean
default: True
- name: token.provider
type: string
default: ''
help: 'Controls the token construction, validation, and revocation operations. Core providers are keystone.token.providers.[pki|uuid].Provider'
- name: token.expiration
type: integer
default: 86400
help: 'Amount of time a token should remain valid (in seconds)'
- name: token.bind
type: string
default: ''
help: 'External auth mechanisms that should add bind information to token. eg kerberos, x509'
- name: token.enforce_token_bind
type: string
default: 'permissive'
help: 'Enforcement policy on tokens presented to keystone with bind information. One of disabled, permissive, strict, required or a specifically required bind mode e.g. kerberos or x509 to require binding to that authentication.'
- name: assignment.caching
type: boolean
default: True
help: 'Assignment specific caching toggle. This has no effect unless the global caching option is set to True'
- name: assignment.cache_time
type: integer
default: 0
help: 'Assignment specific cache time-to-live (TTL) in seconds.'
- name: token.revocation_cache_time
type: integer
default: 3600
help: 'Revocation-List specific cache time-to-live (TTL) in seconds.'
- name: cache.config_prefix
type: string
default: 'cache.keystone'
help: 'Prefix for building the configuration dictionary for the cache region. This should not need to be changed unless there is another dogpile.cache region with the same configuration name'
- name: cache.backend
type: string
default: 'keystone.common.cache.noop'
help: 'Dogpile.cache backend module. It is recommended that Memcache (dogpile.cache.memcache) or Redis (dogpile.cache.redis) be used in production deployments. Small workloads (single process) like devstack can use the dogpile.cache.memory backend.'
- name: cache.backend_argument
type: string
default: ''
help: 'Arguments supplied to the backend module. Specify this option once per argument to be passed to the dogpile.cache backend. Example format: <argname>:<value>'
- name: cache.proxies
type: string
default: ''
help: 'Proxy Classes to import that will affect the way the dogpile.cache backend functions. See the dogpile.cache documentation on changing-backend-behavior. Comma delimited list e.g. my.dogpile.proxy.Class, my.dogpile.proxyClass2'
- name: cache.use_key_mangler
type: boolean
default: True
help: 'Use a key-mangling function (sha1) to ensure fixed length cache-keys. This is toggle-able for debugging purposes, it is highly recommended to always leave this set to True.'
- name: cache.debug_cache_backend
type: boolean
default: False
help: 'Extra debugging from the cache backend (cache keys, get/set/delete/etc calls) This is only really useful if you need to see the specific cache-backend get/set/delete calls with the keys/values. Typically this should be left set to False.'
- name: oauth1.request_token_duration
type: integer
default: 28800
help: 'The Identity service may include expire attributes. If no such attribute is included, then the token lasts indefinitely. Specify how quickly the request token will expire (in seconds)'
- name: oauth1.access_token_duration
type: integer
default: 86400
help: 'Specify how quickly the access token will expire (in seconds)'
- name: ssl.enable
type: boolean
default: True
- name: signing.certfile
type: string
default: '/etc/keystone/pki/certs/signing_cert.pem'
- name: signing.keyfile
type: string
default: '/etc/keystone/pki/private/signing_key.pem'
- name: signing.ca_certs
type: string
default: '/etc/keystone/pki/certs/cacert.pem'
- name: signing.ca_key
type: string
default: '/etc/keystone/pki/private/cakey.pem'
- name: signing.key_size
type: integer
default: 2048
- name: signing.valid_days
type: integer
default: 3650
- name: ssl.cert_required
type: boolean
default: False
- name: signing.cert_subject
type: string
default: '/CUS/STUnset/LUnset/OUnset/CNwww.example.com'
- name: signing.token_format
type: string
default: ''
help: 'Deprecated in favor of provider in the [token] section Allowed values are PKI or UUID'
- name: ldap.url
type: string
default: 'ldap://localhost'
- name: ldap.user
type: string
default: 'dcManager,dcexample,dccom'
- name: auth.password
type: string
default: 'keystone.auth.plugins.password.Password'
- name: ldap.suffix
type: string
default: 'cnexample,cncom'
- name: ldap.use_dumb_member
type: boolean
default: False
- name: ldap.allow_subtree_delete
type: boolean
default: False
- name: ldap.dumb_member
type: string
default: 'cndumb,dcexample,dccom'
- name: ldap.page_size
type: integer
default: 0
help: "Maximum results per page; a value of zero ('0') disables paging (default)"
- name: ldap.alias_dereferencing
type: string
default: 'default'
help: "The LDAP dereferencing option for queries. This can be either 'never', 'searching', 'always', 'finding' or 'default'. The 'default' option falls back to using default dereferencing configured by your ldap.conf."
- name: ldap.query_scope
type: string
default: 'one'
help: "The LDAP scope for queries, this can be either 'one' (onelevel/singleLevel) or 'sub' (subtree/wholeSubtree)"
- name: ldap.user_tree_dn
type: string
default: 'ouUsers,dcexample,dccom'
- name: ldap.user_filter
type: string
default: ''
- name: ldap.user_objectclass
type: string
default: 'inetOrgPerson'
- name: ldap.user_domain_id_attribute
type: string
default: 'businessCategory'
- name: ldap.user_id_attribute
type: string
default: 'cn'
- name: ldap.user_name_attribute
type: string
default: 'sn'
- name: ldap.user_mail_attribute
type: string
default: 'email'
- name: ldap.user_pass_attribute
type: string
default: 'userPassword'
- name: ldap.user_enabled_attribute
type: string
default: 'enabled'
- name: ldap.user_enabled_mask
type: integer
default: 0
- name: ldap.user_enabled_default
type: boolean
default: True
- name: ldap.user_attribute_ignore
type: string
default: 'tenant_id,tenants'
- name: ldap.user_allow_create
type: boolean
default: True
- name: ldap.user_allow_update
type: boolean
default: True
- name: ldap.user_allow_delete
type: boolean
default: True
- name: ldap.user_enabled_emulation
type: boolean
default: False
- name: ldap.user_enabled_emulation_dn
type: string
default: ''
- name: ldap.tenant_tree_dn
type: string
default: 'ouProjects,dcexample,dccom'
- name: ldap.tenant_filter
type: string
default: ''
- name: ldap.tenant_objectclass
type: string
default: 'groupOfNames'
- name: ldap.tenant_domain_id_attribute
type: string
default: 'businessCategory'
- name: ldap.tenant_id_attribute
type: string
default: 'cn'
- name: ldap.tenant_member_attribute
type: string
default: 'member'
- name: ldap.tenant_name_attribute
type: string
default: 'ou'
- name: ldap.tenant_desc_attribute
type: string
default: 'desc'
- name: ldap.tenant_enabled_attribute
type: string
default: 'enabled'
- name: ldap.tenant_attribute_ignore
type: string
default: ''
- name: ldap.tenant_allow_create
type: boolean
default: True
- name: ldap.tenant_allow_update
type: boolean
default: True
- name: ldap.tenant_allow_delete
type: boolean
default: True
- name: ldap.tenant_enabled_emulation
type: boolean
default: False
- name: ldap.tenant_enabled_emulation_dn
type: string
default: ''
- name: ldap.role_tree_dn
type: string
default: 'ouRoles,dcexample,dccom'
- name: ldap.role_filter
type: string
default: ''
- name: ldap.role_objectclass
type: string
default: 'organizationalRole'
- name: ldap.role_id_attribute
type: string
default: 'cn'
- name: ldap.role_name_attribute
type: string
default: 'ou'
- name: ldap.role_member_attribute
type: string
default: 'roleOccupant'
- name: ldap.role_attribute_ignore
type: string
default: ''
- name: ldap.role_allow_create
type: boolean
default: True
- name: ldap.role_allow_update
type: boolean
default: True
- name: ldap.role_allow_delete
type: boolean
default: True
- name: ldap.group_tree_dn
type: string
default: ''
- name: ldap.group_filter
type: string
default: ''
- name: ldap.group_objectclass
type: string
default: 'groupOfNames'
- name: ldap.group_id_attribute
type: string
default: 'cn'
- name: ldap.group_name_attribute
type: string
default: 'ou'
- name: ldap.group_member_attribute
type: string
default: 'member'
- name: ldap.group_desc_attribute
type: string
default: 'desc'
- name: ldap.group_attribute_ignore
type: string
default: ''
- name: ldap.group_allow_create
type: boolean
default: True
- name: ldap.group_allow_update
type: boolean
default: True
- name: ldap.group_allow_delete
type: boolean
default: True
- name: ldap.use_tls
type: boolean
default: False
help: 'ldap TLS options if both tls_cacertfile and tls_cacertdir are set then tls_cacertfile will be used and tls_cacertdir is ignored valid options for tls_req_cert are demand, never, and allow'
- name: ldap.tls_cacertfile
type: string
default: ''
- name: ldap.tls_cacertdir
type: string
default: ''
- name: ldap.tls_req_cert
type: string
default: 'demand'
- name: ldap.user_additional_attribute_mapping
type: string
default: ''
- name: ldap.domain_additional_attribute_mapping
type: string
default: ''
- name: ldap.group_additional_attribute_mapping
type: string
default: ''
- name: ldap.role_additional_attribute_mapping
type: string
default: ''
- name: ldap.project_additional_attribute_mapping
type: string
default: ''
- name: auth.methods
type: string
default: 'external,password,token,oauth1'
- name: auth.external
type: string
default: 'keystone.auth.plugins.external.ExternalDefault'
- name: auth.token
type: string
default: 'keystone.auth.plugins.token.Token'
- name: auth.oauth1
type: string
default: 'keystone.auth.plugins.oauth1.OAuth'
- name: paste_deploy.config_file
type: string
default: 'keystone-paste.ini'
help: 'Name of the paste configuration file that defines the available pipelines'

View File

@ -1,666 +0,0 @@
project: keystone
version: '2013.2.0'
parameters:
- name: admin_token
type: string
default: 'ADMIN'
help: "A 'shared secret' between keystone and other openstack services"
- name: bind_host
type: host
default: '0.0.0.0'
help: 'The IP address of the network interface to listen on'
- name: public_port
type: port
default: 5000
help: 'The port number which the public service listens on'
- name: admin_port
type: port
default: 35357
help: 'The port number which the public admin listens on'
- name: public_endpoint
type: string
default: 'http://localhost:%(public_port)s/'
help: 'The base endpoint URLs for keystone that are advertised to clients (NOTE: this does NOT affect how keystone listens for connections)'
- name: admin_endpoint
type: string
default: 'http://localhost:%(admin_port)s/'
help: 'The base endpoint URLs for keystone that are advertised to clients (NOTE: this does NOT affect how keystone listens for connections)'
- name: compute_port
type: port
default: 8774
help: 'The port number which the OpenStack Compute service listens on'
- name: policy_file
type: string
default: 'policy.json'
help: 'Path to your policy definition containing identity actions'
- name: policy_default_rule
type: string
default: 'admin_required'
help: 'Rule to check if no matching policy definition is found FIXME(dolph): This should really be defined as [policy] default_rule'
- name: member_role_id
type: string
default: '9fe2ff9ee4384b1894a90878d3e92bab'
help: 'Role for migrating membership relationships During a SQL upgrade, the following values will be used to create a new role that will replace records in the user_tenant_membership table with explicit role grants. After migration, the member_role_id will be used in the API add_user_to_project, and member_role_name will be ignored.'
- name: member_role_name
type: string
default: '_member_'
help: 'Role for migrating membership relationships During a SQL upgrade, the following values will be used to create a new role that will replace records in the user_tenant_membership table with explicit role grants. After migration, the member_role_id will be used in the API add_user_to_project, and member_role_name will be ignored.'
- name: max_request_body_size
type: integer
default: 114688
help: 'enforced by optional sizelimit middleware (keystone.middleware:RequestBodySizeLimiter)'
- name: max_param_size
type: integer
default: 64
help: 'limit the sizes of user & tenant ID/names'
- name: max_token_size
type: integer
default: 8192
help: 'similar to max_param_size, but provides an exception for token values'
- name: debug
type: boolean
default: false
help: '=== Logging Options === Print debugging output (includes plaintext request logging, potentially including passwords)'
- name: verbose
type: boolean
default: false
help: 'Print more verbose output'
- name: log_file
type: string
default: 'keystone.log'
help: 'Name of log file to output to. If not set, logging will go to stdout.'
- name: log_dir
type: string
default: '/var/log/keystone'
help: 'The directory to keep log files in (will be prepended to --logfile)'
- name: use_syslog
type: boolean
default: false
help: 'Use syslog for logging.'
- name: syslog_log_facility
type: string
default: 'LOG_USER'
help: 'syslog facility to receive log lines'
- name: log_config
type: string
default: 'logging.conf'
help: 'If this option is specified, the logging configuration file specified is used and overrides any other logging options specified. Please see the Python logging module documentation for details on logging configuration files.'
- name: log_format
type: string
default: '%(asctime)s %(levelname)8s [%(name)s] %(message)s'
help: 'A logging.Formatter log message format string which may use any of the available logging.LogRecord attributes.'
- name: log_date_format
type: string
default: '%Y-%m-%d %H:%M:%S'
help: 'Format string for %(asctime)s in log records.'
- name: onready
type: string
default: 'keystone.common.systemd'
help: 'onready allows you to send a notification when the process is ready to serve For example, to have it notify using systemd, one could set shell command: onready = systemd-notify --ready or a module with notify() method:'
- name: default_notification_level
type: string
default: 'INFO'
help: 'Default notification level for outgoing notifications'
- name: default_publisher_id
type: string
default: ''
help: 'Default publisher_id for outgoing notifications; included in the payload.'
- name: rpc_backend
type: string
default: 'keystone.openstack.common.rpc.impl_kombu'
help: 'The messaging module to use, defaults to kombu.'
- name: rpc_thread_pool_size
type: integer
default: 64
help: 'Size of RPC thread pool'
- name: rpc_conn_pool_size
type: integer
default: 30
help: 'Size of RPC connection pool'
- name: rpc_response_timeout
type: integer
default: 60
help: 'Seconds to wait for a response from call or multicall'
- name: rpc_cast_timeout
type: integer
default: 30
help: 'Seconds to wait before a cast expires (TTL). Only supported by impl_zmq.'
- name: fake_rabbit
type: boolean
default: false
help: 'If True, use a fake RabbitMQ provider'
- name: control_exchange
type: string
default: 'openstack'
help: 'AMQP exchange to connect to if using RabbitMQ or Qpid'
- name: sql.connection
type: string
default: 'sqlite:///keystone.db'
help: 'The SQLAlchemy connection string used to connect to the database'
- name: sql.idle_timeout
type: integer
default: 200
help: 'the timeout before idle sql connections are reaped'
- name: oauth1.driver
type: string
default: 'keystone.contrib.oauth1.backends.sql.OAuth1'
- name: identity.default_domain_id
type: string
default: 'default'
help: 'This references the domain to use for all Identity API v2 requests (which are not aware of domains). A domain with this ID will be created for you by keystone-manage db_sync in migration 008. The domain referenced by this ID cannot be deleted on the v3 API, to prevent accidentally breaking the v2 API. There is nothing special about this domain, other than the fact that it must exist to order to maintain support for your v2 clients.'
- name: identity.domain_specific_drivers_enabled
type: boolean
default: false
help: 'A subset (or all) of domains can have their own identity driver, each with their own partial configuration file in a domain configuration directory. Only values specific to the domain need to be placed in the domain specific configuration file. This feature is disabled by default; set domain_specific_drivers_enabled to True to enable.'
- name: identity.domain_config_dir
type: string
default: '/etc/keystone/domains'
help: 'A subset (or all) of domains can have their own identity driver, each with their own partial configuration file in a domain configuration directory. Only values specific to the domain need to be placed in the domain specific configuration file. This feature is disabled by default; set domain_specific_drivers_enabled to True to enable.'
- name: identity.max_password_length
type: integer
default: 4096
help: 'Maximum supported length for user passwords; decrease to improve performance.'
- name: cache.enabled
type: boolean
default: false
help: 'Global cache functionality toggle.'
- name: catalog.template_file
type: string
default: 'default_catalog.templates'
- name: endpoint_filter.return_all_endpoints_if_no_filter
type: boolean
default: true
help: 'extension for creating associations between project and endpoints in order to provide a tailored catalog for project-scoped token requests.'
- name: token.provider
type: string
default: ''
help: 'Controls the token construction, validation, and revocation operations. Core providers are keystone.token.providers.[pki|uuid].Provider'
- name: token.expiration
type: integer
default: 86400
help: 'Amount of time a token should remain valid (in seconds)'
- name: token.bind
type: string
default: ''
help: 'External auth mechanisms that should add bind information to token. eg kerberos, x509'
- name: token.enforce_token_bind
type: string
default: 'permissive'
help: 'Enforcement policy on tokens presented to keystone with bind information. One of disabled, permissive, strict, required or a specifically required bind mode e.g. kerberos or x509 to require binding to that authentication.'
- name: assignment.caching
type: boolean
default: true
help: 'Assignment specific caching toggle. This has no effect unless the global caching option is set to True'
- name: assignment.cache_time
type: integer
default: ~
help: 'Assignment specific cache time-to-live (TTL) in seconds.'
- name: token.revocation_cache_time
type: integer
default: 3600
help: 'Revocation-List specific cache time-to-live (TTL) in seconds.'
- name: cache.config_prefix
type: string
default: 'cache.keystone'
help: 'Prefix for building the configuration dictionary for the cache region. This should not need to be changed unless there is another dogpile.cache region with the same configuration name'
- name: cache.backend
type: string
default: 'keystone.common.cache.noop'
help: 'Dogpile.cache backend module. It is recommended that Memcache (dogpile.cache.memcache) or Redis (dogpile.cache.redis) be used in production deployments. Small workloads (single process) like devstack can use the dogpile.cache.memory backend.'
- name: cache.backend_argument
type: string
default: ''
help: 'Arguments supplied to the backend module. Specify this option once per argument to be passed to the dogpile.cache backend. Example format: <argname>:<value>'
- name: cache.proxies
type: string
default: ''
help: 'Proxy Classes to import that will affect the way the dogpile.cache backend functions. See the dogpile.cache documentation on changing-backend-behavior. Comma delimited list e.g. my.dogpile.proxy.Class, my.dogpile.proxyClass2'
- name: cache.use_key_mangler
type: boolean
default: true
help: 'Use a key-mangling function (sha1) to ensure fixed length cache-keys. This is toggle-able for debugging purposes, it is highly recommended to always leave this set to True.'
- name: cache.debug_cache_backend
type: boolean
default: false
help: 'Extra debugging from the cache backend (cache keys, get/set/delete/etc calls) This is only really useful if you need to see the specific cache-backend get/set/delete calls with the keys/values. Typically this should be left set to False.'
- name: oauth1.request_token_duration
type: integer
default: 28800
help: 'The Identity service may include expire attributes. If no such attribute is included, then the token lasts indefinitely. Specify how quickly the request token will expire (in seconds)'
- name: oauth1.access_token_duration
type: integer
default: 86400
help: 'The Identity service may include expire attributes. If no such attribute is included, then the token lasts indefinitely. Specify how quickly the request token will expire (in seconds) Specify how quickly the access token will expire (in seconds)'
- name: ssl.enable
type: boolean
default: true
- name: signing.certfile
type: string
default: '/etc/keystone/pki/certs/signing_cert.pem'
- name: signing.keyfile
type: string
default: '/etc/keystone/pki/private/signing_key.pem'
- name: signing.ca_certs
type: string
default: '/etc/keystone/pki/certs/cacert.pem'
- name: signing.ca_key
type: string
default: '/etc/keystone/pki/private/cakey.pem'
- name: signing.key_size
type: integer
default: 2048
- name: signing.valid_days
type: integer
default: 3650
- name: ssl.cert_required
type: boolean
default: false
- name: signing.cert_subject
type: string
default: '/C=US/ST=Unset/L=Unset/O=Unset/CN=www.example.com'
- name: signing.token_format
type: string
default: ''
help: 'Deprecated in favor of provider in the [token] section Allowed values are PKI or UUID'
- name: ldap.url
type: string
default: 'ldap://localhost'
- name: ldap.user
type: string
default: 'dcManager,dcexample,dccom'
- name: auth.password
type: string
default: 'keystone.auth.plugins.password.Password'
- name: ldap.suffix
type: string
default: 'cnexample,cncom'
- name: ldap.use_dumb_member
type: boolean
default: false
- name: ldap.allow_subtree_delete
type: boolean
default: false
- name: ldap.dumb_member
type: string
default: 'cndumb,dcexample,dccom'
- name: ldap.page_size
type: integer
default: 0
help: "Maximum results per page; a value of zero ('0') disables paging (default)"
- name: ldap.alias_dereferencing
type: string
default: 'default'
help: "The LDAP dereferencing option for queries. This can be either 'never', 'searching', 'always', 'finding' or 'default'. The 'default' option falls back to using default dereferencing configured by your ldap.conf."
- name: ldap.query_scope
type: string
default: 'one'
help: "The LDAP scope for queries, this can be either 'one' (onelevel/singleLevel) or 'sub' (subtree/wholeSubtree)"
- name: ldap.user_tree_dn
type: string
default: 'ouUsers,dcexample,dccom'
- name: ldap.user_filter
type: string
default: ''
- name: ldap.user_objectclass
type: string
default: 'inetOrgPerson'
- name: ldap.user_domain_id_attribute
type: string
default: 'businessCategory'
- name: ldap.user_id_attribute
type: string
default: 'cn'
- name: ldap.user_name_attribute
type: string
default: 'sn'
- name: ldap.user_mail_attribute
type: string
default: 'email'
- name: ldap.user_pass_attribute
type: string
default: 'userPassword'
- name: ldap.user_enabled_attribute
type: string
default: 'enabled'
- name: ldap.user_enabled_mask
type: integer
default: 0
- name: ldap.user_enabled_default
type: boolean
default: true
- name: ldap.user_attribute_ignore
type: string
default: 'tenant_id,tenants'
- name: ldap.user_allow_create
type: boolean
default: true
- name: ldap.user_allow_update
type: boolean
default: true
- name: ldap.user_allow_delete
type: boolean
default: true
- name: ldap.user_enabled_emulation
type: boolean
default: false
- name: ldap.user_enabled_emulation_dn
type: string
default: ''
- name: ldap.tenant_tree_dn
type: string
default: 'ouProjects,dcexample,dccom'
- name: ldap.tenant_filter
type: string
default: ''
- name: ldap.tenant_objectclass
type: string
default: 'groupOfNames'
- name: ldap.tenant_domain_id_attribute
type: string
default: 'businessCategory'
- name: ldap.tenant_id_attribute
type: string
default: 'cn'
- name: ldap.tenant_member_attribute
type: string
default: 'member'
- name: ldap.tenant_name_attribute
type: string
default: 'ou'
- name: ldap.tenant_desc_attribute
type: string
default: 'desc'
- name: ldap.tenant_enabled_attribute
type: string
default: 'enabled'
- name: ldap.tenant_attribute_ignore
type: string
default: ''
- name: ldap.tenant_allow_create
type: boolean
default: true
- name: ldap.tenant_allow_update
type: boolean
default: true
- name: ldap.tenant_allow_delete
type: boolean
default: true
- name: ldap.tenant_enabled_emulation
type: boolean
default: false
- name: ldap.tenant_enabled_emulation_dn
type: string
default: ''
- name: ldap.role_tree_dn
type: string
default: 'ouRoles,dcexample,dccom'
- name: ldap.role_filter
type: string
default: ''
- name: ldap.role_objectclass
type: string
default: 'organizationalRole'
- name: ldap.role_id_attribute
type: string
default: 'cn'
- name: ldap.role_name_attribute
type: string
default: 'ou'
- name: ldap.role_member_attribute
type: string
default: 'roleOccupant'
- name: ldap.role_attribute_ignore
type: string
default: ''
- name: ldap.role_allow_create
type: boolean
default: true
- name: ldap.role_allow_update
type: boolean
default: true
- name: ldap.role_allow_delete
type: boolean
default: true
- name: ldap.group_tree_dn
type: string
default: ''
- name: ldap.group_filter
type: string
default: ''
- name: ldap.group_objectclass
type: string
default: 'groupOfNames'
- name: ldap.group_id_attribute
type: string
default: 'cn'
- name: ldap.group_name_attribute
type: string
default: 'ou'
- name: ldap.group_member_attribute
type: string
default: 'member'
- name: ldap.group_desc_attribute
type: string
default: 'desc'
- name: ldap.group_attribute_ignore
type: string
default: ''
- name: ldap.group_allow_create
type: boolean
default: true
- name: ldap.group_allow_update
type: boolean
default: true
- name: ldap.group_allow_delete
type: boolean
default: true
- name: ldap.use_tls
type: boolean
default: false
help: 'ldap TLS options if both tls_cacertfile and tls_cacertdir are set then tls_cacertfile will be used and tls_cacertdir is ignored valid options for tls_req_cert are demand, never, and allow'
- name: ldap.tls_cacertfile
type: string
default: ''
- name: ldap.tls_cacertdir
type: string
default: ''
- name: ldap.tls_req_cert
type: string
default: 'demand'
- name: ldap.user_additional_attribute_mapping
type: string
default: ''
- name: ldap.domain_additional_attribute_mapping
type: string
default: ''
- name: ldap.group_additional_attribute_mapping
type: string
default: ''
- name: ldap.role_additional_attribute_mapping
type: string
default: ''
- name: ldap.project_additional_attribute_mapping
type: string
default: ''
- name: auth.methods
type: string
default: 'external,password,token,oauth1'
- name: auth.external
type: string
default: 'keystone.auth.plugins.external.ExternalDefault'
- name: auth.token
type: string
default: 'keystone.auth.plugins.token.Token'
- name: auth.oauth1
type: string
default: 'keystone.auth.plugins.oauth1.OAuth'
- name: paste_deploy.config_file
type: string
default: 'keystone-paste.ini'
help: 'Name of the paste configuration file that defines the available pipelines'
- name: notification_driver
type: string
default: 'keystone.openstack.common.notifier.rpc_notifier'
help: 'notification_driver can be defined multiple times Do nothing driver (the default) notification_driver = keystone.openstack.common.notifier.no_op_notifier Logging driver example (not enabled by default) notification_driver = keystone.openstack.common.notifier.log_notifier RPC driver example (not enabled by default)'
- name: notification_topics
type: string
default: 'notifications'
help: 'AMQP topics to publish to when using the RPC notification driver. Multiple values can be specified by separating with commas. The actual topic names will be %s.%(default_notification_level)s'
- name: allowed_rpc_exception_modules
type: string
default: 'keystone.openstack.common.exception,nova.exception,cinder.exception,exceptions'
help: 'Modules of exceptions that are permitted to be recreated upon receiving exception data from an rpc call.'
- name: cache.expiration_time
type: integer
default: 600
help: "Default TTL, in seconds, for any cached item in the dogpile.cache region. This applies to any cached method that doesn't have an explicit cache expiration time defined for it."

View File

@ -1,864 +0,0 @@
- version: '2013.1.3'
checkpoint: true
added:
- name: admin_token
type: string
default: 'ADMIN'
help: "A 'shared secret' between keystone and other openstack services"
comment: 'New param'
- name: bind_host
type: host
default: '0.0.0.0'
help: 'The IP address of the network interface to listen on'
comment: 'New param'
- name: public_port
type: port
default: 5000
help: 'The port number which the public service listens on'
comment: 'New param'
- name: admin_port
type: port
default: 35357
help: 'The port number which the public admin listens on'
comment: 'New param'
- name: public_endpoint
type: string
default: 'http://localhost:%(public_port)s/'
help: 'The base endpoint URLs for keystone that are advertised to clients (NOTE: this does NOT affect how keystone listens for connections)'
comment: 'New param'
- name: admin_endpoint
type: string
default: 'http://localhost:%(admin_port)s/'
comment: 'New param'
- name: compute_port
type: port
default: 8774
help: 'The port number which the OpenStack Compute service listens on'
comment: 'New param'
- name: policy_file
type: string
default: 'policy.json'
help: 'Path to your policy definition containing identity actions'
comment: 'New param'
- name: policy_default_rule
type: string
default: 'admin_required'
help: 'Rule to check if no matching policy definition is found FIXME(dolph): This should really be defined as [policy] default_rule'
comment: 'New param'
- name: member_role_id
type: string
default: '9fe2ff9ee4384b1894a90878d3e92bab'
help: 'Role for migrating membership relationships During a SQL upgrade, the following values will be used to create a new role that will replace records in the user_tenant_membership table with explicit role grants. After migration, the member_role_id will be used in the API add_user_to_project, and member_role_name will be ignored.'
comment: 'New param'
- name: member_role_name
type: string
default: '_member_'
comment: 'New param'
- name: max_request_body_size
type: integer
default: 114688
help: 'enforced by optional sizelimit middleware (keystone.middleware:RequestBodySizeLimiter)'
comment: 'New param'
- name: max_param_size
type: integer
default: 64
help: 'limit the sizes of user & tenant ID/names'
comment: 'New param'
- name: max_token_size
type: integer
default: 8192
help: 'similar to max_param_size, but provides an exception for token values'
comment: 'New param'
- name: debug
type: boolean
default: false
help: '=== Logging Options === Print debugging output (includes plaintext request logging, potentially including passwords)'
comment: 'New param'
- name: verbose
type: boolean
default: false
help: 'Print more verbose output'
comment: 'New param'
- name: log_file
type: string
default: 'keystone.log'
help: 'Name of log file to output to. If not set, logging will go to stdout.'
comment: 'New param'
- name: log_dir
type: string
default: '/var/log/keystone'
help: 'The directory to keep log files in (will be prepended to --logfile)'
comment: 'New param'
- name: use_syslog
type: boolean
default: false
help: 'Use syslog for logging.'
comment: 'New param'
- name: syslog_log_facility
type: string
default: 'LOG_USER'
help: 'syslog facility to receive log lines'
comment: 'New param'
- name: log_config
type: string
default: 'logging.conf'
help: 'If this option is specified, the logging configuration file specified is used and overrides any other logging options specified. Please see the Python logging module documentation for details on logging configuration files.'
comment: 'New param'
- name: log_format
type: string
default: '%(asctime)s %(levelname)8s [%(name)s] %(message)s'
help: 'A logging.Formatter log message format string which may use any of the available logging.LogRecord attributes.'
comment: 'New param'
- name: log_date_format
type: string
default: '%Y-%m-%d %H:%M:%S'
help: 'Format string for %(asctime)s in log records.'
comment: 'New param'
- name: onready
type: string
default: 'keystone.common.systemd'
help: 'onready allows you to send a notification when the process is ready to serve For example, to have it notify using systemd, one could set shell command: onready = systemd-notify --ready or a module with notify() method:'
comment: 'New param'
- name: default_notification_level
type: string
default: 'INFO'
help: 'Default notification level for outgoing notifications'
comment: 'New param'
- name: default_publisher_id
type: string
default: ''
help: 'Default publisher_id for outgoing notifications; included in the payload.'
comment: 'New param'
- name: rpc_backend
type: string
default: 'keystone.openstack.common.rpc.impl_kombu'
help: 'The messaging module to use, defaults to kombu.'
comment: 'New param'
- name: rpc_thread_pool_size
type: integer
default: 64
help: 'Size of RPC thread pool'
comment: 'New param'
- name: rpc_conn_pool_size
type: integer
default: 30
help: 'Size of RPC connection pool'
comment: 'New param'
- name: rpc_response_timeout
type: integer
default: 60
help: 'Seconds to wait for a response from call or multicall'
comment: 'New param'
- name: rpc_cast_timeout
type: integer
default: 30
help: 'Seconds to wait before a cast expires (TTL). Only supported by impl_zmq.'
comment: 'New param'
- name: fake_rabbit
type: boolean
default: false
help: 'If True, use a fake RabbitMQ provider'
comment: 'New param'
- name: control_exchange
type: string
default: 'openstack'
help: 'AMQP exchange to connect to if using RabbitMQ or Qpid'
comment: 'New param'
- name: sql.connection
type: string
default: 'sqlite:///keystone.db'
help: 'The SQLAlchemy connection string used to connect to the database'
comment: 'New param'
- name: sql.idle_timeout
type: integer
default: 200
help: 'the timeout before idle sql connections are reaped'
comment: 'New param'
- name: oauth1.driver
type: string
default: 'keystone.contrib.oauth1.backends.sql.OAuth1'
comment: 'New param'
- name: identity.default_domain_id
type: string
default: 'default'
help: 'This references the domain to use for all Identity API v2 requests (which are not aware of domains). A domain with this ID will be created for you by keystone-manage db_sync in migration 008. The domain referenced by this ID cannot be deleted on the v3 API, to prevent accidentally breaking the v2 API. There is nothing special about this domain, other than the fact that it must exist to order to maintain support for your v2 clients.'
comment: 'New param'
- name: identity.domain_specific_drivers_enabled
type: boolean
default: false
help: 'A subset (or all) of domains can have their own identity driver, each with their own partial configuration file in a domain configuration directory. Only'
comment: 'New param'
- name: identity.domain_config_dir
type: string
default: '/etc/keystone/domains'
comment: 'New param'
- name: identity.max_password_length
type: integer
default: 4096
help: 'Maximum supported length for user passwords; decrease to improve performance.'
comment: 'New param'
- name: cache.enabled
type: boolean
default: false
help: 'Global cache functionality toggle.'
comment: 'New param'
- name: catalog.template_file
type: string
default: 'default_catalog.templates'
comment: 'New param'
- name: endpoint_filter.return_all_endpoints_if_no_filter
type: boolean
default: true
comment: 'New param'
- name: token.provider
type: string
default: ''
help: 'Controls the token construction, validation, and revocation operations. Core providers are keystone.token.providers.[pki|uuid].Provider'
comment: 'New param'
- name: token.expiration
type: integer
default: 86400
help: 'Amount of time a token should remain valid (in seconds)'
comment: 'New param'
- name: token.bind
type: string
default: ''
help: 'External auth mechanisms that should add bind information to token. eg kerberos, x509'
comment: 'New param'
- name: token.enforce_token_bind
type: string
default: 'permissive'
help: 'Enforcement policy on tokens presented to keystone with bind information. One of disabled, permissive, strict, required or a specifically required bind mode e.g. kerberos or x509 to require binding to that authentication.'
comment: 'New param'
- name: assignment.caching
type: boolean
default: true
help: 'Assignment specific caching toggle. This has no effect unless the global caching option is set to True'
comment: 'New param'
- name: assignment.cache_time
type: integer
default: false
help: 'Assignment specific cache time-to-live (TTL) in seconds.'
comment: 'New param'
- name: token.revocation_cache_time
type: integer
default: 3600
help: 'Revocation-List specific cache time-to-live (TTL) in seconds.'
comment: 'New param'
- name: cache.config_prefix
type: string
default: 'cache.keystone'
help: 'Prefix for building the configuration dictionary for the cache region. This should not need to be changed unless there is another dogpile.cache region with the same configuration name'
comment: 'New param'
- name: cache.backend
type: string
default: 'keystone.common.cache.noop'
help: 'Dogpile.cache backend module. It is recommended that Memcache (dogpile.cache.memcache) or Redis (dogpile.cache.redis) be used in production deployments. Small workloads (single process) like devstack can use the dogpile.cache.memory backend.'
comment: 'New param'
- name: cache.backend_argument
type: string
default: ''
help: 'Arguments supplied to the backend module. Specify this option once per argument to be passed to the dogpile.cache backend. Example format: <argname>:<value>'
comment: 'New param'
- name: cache.proxies
type: string
default: ''
help: 'Proxy Classes to import that will affect the way the dogpile.cache backend functions. See the dogpile.cache documentation on changing-backend-behavior. Comma delimited list e.g. my.dogpile.proxy.Class, my.dogpile.proxyClass2'
comment: 'New param'
- name: cache.use_key_mangler
type: boolean
default: true
help: 'Use a key-mangling function (sha1) to ensure fixed length cache-keys. This is toggle-able for debugging purposes, it is highly recommended to always leave this set to True.'
comment: 'New param'
- name: cache.debug_cache_backend
type: boolean
default: false
help: 'Extra debugging from the cache backend (cache keys, get/set/delete/etc calls) This is only really useful if you need to see the specific cache-backend get/set/delete calls with the keys/values. Typically this should be left set to False.'
comment: 'New param'
- name: oauth1.request_token_duration
type: integer
default: 28800
help: 'The Identity service may include expire attributes. If no such attribute is included, then the token lasts indefinitely. Specify how quickly the request token will expire (in seconds)'
comment: 'New param'
- name: oauth1.access_token_duration
type: integer
default: 86400
help: 'Specify how quickly the access token will expire (in seconds)'
comment: 'New param'
- name: ssl.enable
type: boolean
default: true
comment: 'New param'
- name: signing.certfile
type: string
default: '/etc/keystone/pki/certs/signing_cert.pem'
comment: 'New param'
- name: signing.keyfile
type: string
default: '/etc/keystone/pki/private/signing_key.pem'
comment: 'New param'
- name: signing.ca_certs
type: string
default: '/etc/keystone/pki/certs/cacert.pem'
comment: 'New param'
- name: signing.ca_key
type: string
default: '/etc/keystone/pki/private/cakey.pem'
comment: 'New param'
- name: signing.key_size
type: integer
default: 2048
comment: 'New param'
- name: signing.valid_days
type: integer
default: 3650
comment: 'New param'
- name: ssl.cert_required
type: boolean
default: false
comment: 'New param'
- name: signing.cert_subject
type: string
default: '/CUS/STUnset/LUnset/OUnset/CNwww.example.com'
comment: 'New param'
- name: signing.token_format
type: string
default: ''
help: 'Deprecated in favor of provider in the [token] section Allowed values are PKI or UUID'
comment: 'New param'
- name: ldap.url
type: string
default: 'ldap://localhost'
comment: 'New param'
- name: ldap.user
type: string
default: 'dcManager,dcexample,dccom'
comment: 'New param'
- name: auth.password
type: string
default: 'keystone.auth.plugins.password.Password'
comment: 'New param'
- name: ldap.suffix
type: string
default: 'cnexample,cncom'
comment: 'New param'
- name: ldap.use_dumb_member
type: boolean
default: false
comment: 'New param'
- name: ldap.allow_subtree_delete
type: boolean
default: false
comment: 'New param'
- name: ldap.dumb_member
type: string
default: 'cndumb,dcexample,dccom'
comment: 'New param'
- name: ldap.page_size
type: integer
default: false
help: "Maximum results per page; a value of zero ('0') disables paging (default)"
comment: 'New param'
- name: ldap.alias_dereferencing
type: string
default: 'default'
help: "The LDAP dereferencing option for queries. This can be either 'never', 'searching', 'always', 'finding' or 'default'. The 'default' option falls back to using default dereferencing configured by your ldap.conf."
comment: 'New param'
- name: ldap.query_scope
type: string
default: 'one'
help: "The LDAP scope for queries, this can be either 'one' (onelevel/singleLevel) or 'sub' (subtree/wholeSubtree)"
comment: 'New param'
- name: ldap.user_tree_dn
type: string
default: 'ouUsers,dcexample,dccom'
comment: 'New param'
- name: ldap.user_filter
type: string
default: ''
comment: 'New param'
- name: ldap.user_objectclass
type: string
default: 'inetOrgPerson'
comment: 'New param'
- name: ldap.user_domain_id_attribute
type: string
default: 'businessCategory'
comment: 'New param'
- name: ldap.user_id_attribute
type: string
default: 'cn'
comment: 'New param'
- name: ldap.user_name_attribute
type: string
default: 'sn'
comment: 'New param'
- name: ldap.user_mail_attribute
type: string
default: 'email'
comment: 'New param'
- name: ldap.user_pass_attribute
type: string
default: 'userPassword'
comment: 'New param'
- name: ldap.user_enabled_attribute
type: string
default: 'enabled'
comment: 'New param'
- name: ldap.user_enabled_mask
type: integer
default: false
comment: 'New param'
- name: ldap.user_enabled_default
type: boolean
default: true
comment: 'New param'
- name: ldap.user_attribute_ignore
type: string
default: 'tenant_id,tenants'
comment: 'New param'
- name: ldap.user_allow_create
type: boolean
default: true
comment: 'New param'
- name: ldap.user_allow_update
type: boolean
default: true
comment: 'New param'
- name: ldap.user_allow_delete
type: boolean
default: true
comment: 'New param'
- name: ldap.user_enabled_emulation
type: boolean
default: false
comment: 'New param'
- name: ldap.user_enabled_emulation_dn
type: string
default: ''
comment: 'New param'
- name: ldap.tenant_tree_dn
type: string
default: 'ouProjects,dcexample,dccom'
comment: 'New param'
- name: ldap.tenant_filter
type: string
default: ''
comment: 'New param'
- name: ldap.tenant_objectclass
type: string
default: 'groupOfNames'
comment: 'New param'
- name: ldap.tenant_domain_id_attribute
type: string
default: 'businessCategory'
comment: 'New param'
- name: ldap.tenant_id_attribute
type: string
default: 'cn'
comment: 'New param'
- name: ldap.tenant_member_attribute
type: string
default: 'member'
comment: 'New param'
- name: ldap.tenant_name_attribute
type: string
default: 'ou'
comment: 'New param'
- name: ldap.tenant_desc_attribute
type: string
default: 'desc'
comment: 'New param'
- name: ldap.tenant_enabled_attribute
type: string
default: 'enabled'
comment: 'New param'
- name: ldap.tenant_attribute_ignore
type: string
default: ''
comment: 'New param'
- name: ldap.tenant_allow_create
type: boolean
default: true
comment: 'New param'
- name: ldap.tenant_allow_update
type: boolean
default: true
comment: 'New param'
- name: ldap.tenant_allow_delete
type: boolean
default: true
comment: 'New param'
- name: ldap.tenant_enabled_emulation
type: boolean
default: false
comment: 'New param'
- name: ldap.tenant_enabled_emulation_dn
type: string
default: ''
comment: 'New param'
- name: ldap.role_tree_dn
type: string
default: 'ouRoles,dcexample,dccom'
comment: 'New param'
- name: ldap.role_filter
type: string
default: ''
comment: 'New param'
- name: ldap.role_objectclass
type: string
default: 'organizationalRole'
comment: 'New param'
- name: ldap.role_id_attribute
type: string
default: 'cn'
comment: 'New param'
- name: ldap.role_name_attribute
type: string
default: 'ou'
comment: 'New param'
- name: ldap.role_member_attribute
type: string
default: 'roleOccupant'
comment: 'New param'
- name: ldap.role_attribute_ignore
type: string
default: ''
comment: 'New param'
- name: ldap.role_allow_create
type: boolean
default: true
comment: 'New param'
- name: ldap.role_allow_update
type: boolean
default: true
comment: 'New param'
- name: ldap.role_allow_delete
type: boolean
default: true
comment: 'New param'
- name: ldap.group_tree_dn
type: string
default: ''
comment: 'New param'
- name: ldap.group_filter
type: string
default: ''
comment: 'New param'
- name: ldap.group_objectclass
type: string
default: 'groupOfNames'
comment: 'New param'
- name: ldap.group_id_attribute
type: string
default: 'cn'
comment: 'New param'
- name: ldap.group_name_attribute
type: string
default: 'ou'
comment: 'New param'
- name: ldap.group_member_attribute
type: string
default: 'member'
comment: 'New param'
- name: ldap.group_desc_attribute
type: string
default: 'desc'
comment: 'New param'
- name: ldap.group_attribute_ignore
type: string
default: ''
comment: 'New param'
- name: ldap.group_allow_create
type: boolean
default: true
comment: 'New param'
- name: ldap.group_allow_update
type: boolean
default: true
comment: 'New param'
- name: ldap.group_allow_delete
type: boolean
default: true
comment: 'New param'
- name: ldap.use_tls
type: boolean
default: false
help: 'ldap TLS options if both tls_cacertfile and tls_cacertdir are set then tls_cacertfile will be used and tls_cacertdir is ignored valid options for tls_req_cert are demand, never, and allow'
comment: 'New param'
- name: ldap.tls_cacertfile
type: string
default: ''
comment: 'New param'
- name: ldap.tls_cacertdir
type: string
default: ''
comment: 'New param'
- name: ldap.tls_req_cert
type: string
default: 'demand'
comment: 'New param'
- name: ldap.user_additional_attribute_mapping
type: string
default: ''
comment: 'New param'
- name: ldap.domain_additional_attribute_mapping
type: string
default: ''
comment: 'New param'
- name: ldap.group_additional_attribute_mapping
type: string
default: ''
comment: 'New param'
- name: ldap.role_additional_attribute_mapping
type: string
default: ''
comment: 'New param'
- name: ldap.project_additional_attribute_mapping
type: string
default: ''
comment: 'New param'
- name: auth.methods
type: string
default: 'external,password,token,oauth1'
comment: 'New param'
- name: auth.external
type: string
default: 'keystone.auth.plugins.external.ExternalDefault'
comment: 'New param'
- name: auth.token
type: string
default: 'keystone.auth.plugins.token.Token'
comment: 'New param'
- name: auth.oauth1
type: string
default: 'keystone.auth.plugins.oauth1.OAuth'
comment: 'New param'
- name: paste_deploy.config_file
type: string
default: 'keystone-paste.ini'
help: 'Name of the paste configuration file that defines the available pipelines'
comment: 'New param'
# ====================================================
- version: '2013.2.0'
added:
- name: admin_endpoint
type: string
default: 'http://localhost:%(admin_port)s/'
help: 'The base endpoint URLs for keystone that are advertised to clients (NOTE: this does NOT affect how keystone listens for connections)'
comment: 'Help string has changed'
- name: member_role_name
type: string
default: '_member_'
help: 'Role for migrating membership relationships During a SQL upgrade, the following values will be used to create a new role that will replace records in the user_tenant_membership table with explicit role grants. After migration, the member_role_id will be used in the API add_user_to_project, and member_role_name will be ignored.'
comment: 'Help string has changed'
- name: identity.domain_specific_drivers_enabled
type: boolean
default: false
help: 'A subset (or all) of domains can have their own identity driver, each with their own partial configuration file in a domain configuration directory. Only values specific to the domain need to be placed in the domain specific configuration file. This feature is disabled by default; set domain_specific_drivers_enabled to True to enable.'
comment: 'Help string has changed'
- name: identity.domain_config_dir
type: string
default: '/etc/keystone/domains'
help: 'A subset (or all) of domains can have their own identity driver, each with their own partial configuration file in a domain configuration directory. Only values specific to the domain need to be placed in the domain specific configuration file. This feature is disabled by default; set domain_specific_drivers_enabled to True to enable.'
comment: 'Help string has changed'
- name: endpoint_filter.return_all_endpoints_if_no_filter
type: boolean
default: true
help: 'extension for creating associations between project and endpoints in order to provide a tailored catalog for project-scoped token requests.'
comment: 'Help string has changed'
- name: assignment.cache_time
type: integer
default: ~
help: 'Assignment specific cache time-to-live (TTL) in seconds.'
comment: 'Default value has changed'
- name: oauth1.access_token_duration
type: integer
default: 86400
help: 'The Identity service may include expire attributes. If no such attribute is included, then the token lasts indefinitely. Specify how quickly the request token will expire (in seconds) Specify how quickly the access token will expire (in seconds)'
comment: 'Help string has changed'
- name: signing.cert_subject
type: string
default: '/C=US/ST=Unset/L=Unset/O=Unset/CN=www.example.com'
comment: 'Default value has changed'
- name: notification_driver
type: string
default: 'keystone.openstack.common.notifier.rpc_notifier'
help: 'notification_driver can be defined multiple times Do nothing driver (the default) notification_driver = keystone.openstack.common.notifier.no_op_notifier Logging driver example (not enabled by default) notification_driver = keystone.openstack.common.notifier.log_notifier RPC driver example (not enabled by default)'
comment: 'New param'
- name: notification_topics
type: string
default: 'notifications'
help: 'AMQP topics to publish to when using the RPC notification driver. Multiple values can be specified by separating with commas. The actual topic names will be %s.%(default_notification_level)s'
comment: 'New param'
- name: allowed_rpc_exception_modules
type: string
default: 'keystone.openstack.common.exception,nova.exception,cinder.exception,exceptions'
help: 'Modules of exceptions that are permitted to be recreated upon receiving exception data from an rpc call.'
comment: 'New param'
- name: cache.expiration_time
type: integer
default: 600
help: "Default TTL, in seconds, for any cached item in the dogpile.cache region. This applies to any cached method that doesn't have an explicit cache expiration time defined for it."
comment: 'New param'
# ====================================================

View File

@ -1,84 +0,0 @@
project: neutron_dhcp_agent
version: '2013.2.1'
parameters:
- name: debug
type: string
default: 'False'
help: 'Show debugging output in log (sets DEBUG log level output)'
- name: resync_interval
type: string
default: '5'
help: 'The DHCP agent will resync its state with Neutron to recover from any transient notification or rpc errors. The interval is number of seconds between attempts.'
- name: interface_driver
type: string
default: 'neutron.agent.linux.interface.BridgeInterfaceDriver'
help: 'Example of interface_driver option for LinuxBridge'
- name: ovs_use_veth
type: string
default: 'False'
help: 'Use veth for an OVS interface or not. Support kernels with limited namespace support (e.g. RHEL 6.5) so long as ovs_use_veth is set to True.'
- name: dhcp_driver
type: string
default: 'neutron.agent.linux.dhcp.Dnsmasq'
help: 'The agent can use other DHCP drivers. Dnsmasq is the simplest and requires no additional setup of the DHCP server.'
- name: use_namespaces
type: string
default: 'True'
help: 'Allow overlapping IP (Must have kernel build with CONFIG_NET_NS=y and iproute2 package that supports namespaces).'
- name: enable_isolated_metadata
type: string
default: 'False'
help: 'The DHCP server can assist with providing metadata support on isolated networks. Setting this value to True will cause the DHCP server to append specific host routes to the DHCP request. The metadata service will only be activated when the subnet gateway_ip is None. The guest instance must be configured to request host routes via DHCP (Option 121).'
- name: enable_metadata_network
type: string
default: 'False'
help: 'Allows for serving metadata requests coming from a dedicated metadata access network whose cidr is 169.254.169.254/16 (or larger prefix), and is connected to a Neutron router from which the VMs send metadata request. In this case DHCP Option 121 will not be injected in VMs, as they will be able to reach 169.254.169.254 through a router. This option requires enable_isolated_metadata = True'
- name: num_sync_threads
type: string
default: '4'
help: 'Number of threads to use during sync process. Should not exceed connection pool size configured on server.'
- name: dhcp_confs
type: string
default: '$state_path/dhcp'
help: 'Location to store DHCP server config files'
- name: dhcp_domain
type: string
default: 'openstacklocal'
help: 'Domain to use for building the hostnames'
- name: dnsmasq_config_file
type: string
default: ''
help: 'Override the default dnsmasq settings with this file'
- name: dnsmasq_dns_server
type: string
default: ''
help: 'Use another DNS server before any in /etc/resolv.conf.'
- name: dnsmasq_lease_max
type: string
default: '16777216'
help: 'Limit number of leases to prevent a denial-of-service.'
- name: dhcp_lease_relay_socket
type: string
default: '$state_path/dhcp/lease_relay'
help: 'Location to DHCP lease relay UNIX domain socket'
- name: metadata_proxy_socket
type: string
default: '$state_path/metadata_proxy'
help: 'Location of Metadata Proxy UNIX domain socket'

View File

@ -1,101 +0,0 @@
- version: '2013.2.1'
checkpoint: true
added:
- name: debug
type: string
default: 'False'
help: 'Show debugging output in log (sets DEBUG log level output)'
comment: 'New param'
- name: resync_interval
type: string
default: '5'
help: 'The DHCP agent will resync its state with Neutron to recover from any transient notification or rpc errors. The interval is number of seconds between attempts.'
comment: 'New param'
- name: interface_driver
type: string
default: 'neutron.agent.linux.interface.BridgeInterfaceDriver'
help: 'Example of interface_driver option for LinuxBridge'
comment: 'New param'
- name: ovs_use_veth
type: string
default: 'False'
help: 'Use veth for an OVS interface or not. Support kernels with limited namespace support (e.g. RHEL 6.5) so long as ovs_use_veth is set to True.'
comment: 'New param'
- name: dhcp_driver
type: string
default: 'neutron.agent.linux.dhcp.Dnsmasq'
help: 'The agent can use other DHCP drivers. Dnsmasq is the simplest and requires no additional setup of the DHCP server.'
comment: 'New param'
- name: use_namespaces
type: string
default: 'True'
help: 'Allow overlapping IP (Must have kernel build with CONFIG_NET_NS=y and iproute2 package that supports namespaces).'
comment: 'New param'
- name: enable_isolated_metadata
type: string
default: 'False'
help: 'The DHCP server can assist with providing metadata support on isolated networks. Setting this value to True will cause the DHCP server to append specific host routes to the DHCP request. The metadata service will only be activated when the subnet gateway_ip is None. The guest instance must be configured to request host routes via DHCP (Option 121).'
comment: 'New param'
- name: enable_metadata_network
type: string
default: 'False'
help: 'Allows for serving metadata requests coming from a dedicated metadata access network whose cidr is 169.254.169.254/16 (or larger prefix), and is connected to a Neutron router from which the VMs send metadata request. In this case DHCP Option 121 will not be injected in VMs, as they will be able to reach 169.254.169.254 through a router. This option requires enable_isolated_metadata = True'
comment: 'New param'
- name: num_sync_threads
type: string
default: '4'
help: 'Number of threads to use during sync process. Should not exceed connection pool size configured on server.'
comment: 'New param'
- name: dhcp_confs
type: string
default: '$state_path/dhcp'
help: 'Location to store DHCP server config files'
comment: 'New param'
- name: dhcp_domain
type: string
default: 'openstacklocal'
help: 'Domain to use for building the hostnames'
comment: 'New param'
- name: dnsmasq_config_file
type: string
default: ''
help: 'Override the default dnsmasq settings with this file'
comment: 'New param'
- name: dnsmasq_dns_server
type: string
default: ''
help: 'Use another DNS server before any in /etc/resolv.conf.'
comment: 'New param'
- name: dnsmasq_lease_max
type: string
default: '16777216'
help: 'Limit number of leases to prevent a denial-of-service.'
comment: 'New param'
- name: dhcp_lease_relay_socket
type: string
default: '$state_path/dhcp/lease_relay'
help: 'Location to DHCP lease relay UNIX domain socket'
comment: 'New param'
- name: metadata_proxy_socket
type: string
default: '$state_path/metadata_proxy'
help: 'Location of Metadata Proxy UNIX domain socket'
comment: 'New param'
# ====================================================

View File

@ -1,74 +0,0 @@
project: neutron_l3_agent
version: '2013.2.1'
parameters:
- name: debug
type: string
default: 'False'
help: 'Show debugging output in log (sets DEBUG log level output)'
- name: interface_driver
type: string
default: 'neutron.agent.linux.interface.BridgeInterfaceDriver'
help: 'Example of interface_driver option for LinuxBridge'
- name: ovs_use_veth
type: string
default: 'False'
help: 'Use veth for an OVS interface or not. Support kernels with limited namespace support (e.g. RHEL 6.5) so long as ovs_use_veth is set to True.'
- name: use_namespaces
type: string
default: 'True'
help: 'Allow overlapping IP (Must have kernel build with CONFIG_NET_NS=y and iproute2 package that supports namespaces).'
- name: router_id
type: string
default: ''
help: 'This is done by setting the specific router_id.'
- name: gateway_external_network_id
type: string
default: ''
help: 'Each L3 agent can be associated with at most one external network. This value should be set to the UUID of that external network. If empty, the agent will enforce that only a single external networks exists and use that external network id'
- name: handle_internal_only_routers
type: string
default: 'True'
help: 'Indicates that this L3 agent should also handle routers that do not have an external network gateway configured. This option should be True only for a single agent in a Neutron deployment, and may be False for all agents if all routers must have an external network gateway'
- name: external_network_bridge
type: string
default: 'br-ex'
help: 'Name of bridge used for external network traffic. This should be set to empty value for the linux bridge'
- name: metadata_port
type: string
default: '9697'
help: 'TCP Port used by Neutron metadata server'
- name: send_arp_for_ha
type: string
default: '3'
help: 'Send this many gratuitous ARPs for HA setup. Set it below or equal to 0 to disable this feature.'
- name: periodic_interval
type: string
default: '40'
help: "seconds between re-sync routers' data if needed"
- name: periodic_fuzzy_delay
type: string
default: '5'
help: "seconds to start to sync routers' data after starting agent"
- name: enable_metadata_proxy
type: string
default: 'True'
help: 'enable_metadata_proxy, which is true by default, can be set to False if the Nova metadata server is not available'
- name: metadata_proxy_socket
type: string
default: '$state_path/metadata_proxy'
help: 'Location of Metadata Proxy UNIX domain socket'

View File

@ -1,89 +0,0 @@
- version: '2013.2.1'
checkpoint: true
added:
- name: debug
type: string
default: 'False'
help: 'Show debugging output in log (sets DEBUG log level output)'
comment: 'New param'
- name: interface_driver
type: string
default: 'neutron.agent.linux.interface.BridgeInterfaceDriver'
help: 'Example of interface_driver option for LinuxBridge'
comment: 'New param'
- name: ovs_use_veth
type: string
default: 'False'
help: 'Use veth for an OVS interface or not. Support kernels with limited namespace support (e.g. RHEL 6.5) so long as ovs_use_veth is set to True.'
comment: 'New param'
- name: use_namespaces
type: string
default: 'True'
help: 'Allow overlapping IP (Must have kernel build with CONFIG_NET_NS=y and iproute2 package that supports namespaces).'
comment: 'New param'
- name: router_id
type: string
default: ''
help: 'This is done by setting the specific router_id.'
comment: 'New param'
- name: gateway_external_network_id
type: string
default: ''
help: 'Each L3 agent can be associated with at most one external network. This value should be set to the UUID of that external network. If empty, the agent will enforce that only a single external networks exists and use that external network id'
comment: 'New param'
- name: handle_internal_only_routers
type: string
default: 'True'
help: 'Indicates that this L3 agent should also handle routers that do not have an external network gateway configured. This option should be True only for a single agent in a Neutron deployment, and may be False for all agents if all routers must have an external network gateway'
comment: 'New param'
- name: external_network_bridge
type: string
default: 'br-ex'
help: 'Name of bridge used for external network traffic. This should be set to empty value for the linux bridge'
comment: 'New param'
- name: metadata_port
type: string
default: '9697'
help: 'TCP Port used by Neutron metadata server'
comment: 'New param'
- name: send_arp_for_ha
type: string
default: '3'
help: 'Send this many gratuitous ARPs for HA setup. Set it below or equal to 0 to disable this feature.'
comment: 'New param'
- name: periodic_interval
type: string
default: '40'
help: "seconds between re-sync routers' data if needed"
comment: 'New param'
- name: periodic_fuzzy_delay
type: string
default: '5'
help: "seconds to start to sync routers' data after starting agent"
comment: 'New param'
- name: enable_metadata_proxy
type: string
default: 'True'
help: 'enable_metadata_proxy, which is true by default, can be set to False if the Nova metadata server is not available'
comment: 'New param'
- name: metadata_proxy_socket
type: string
default: '$state_path/metadata_proxy'
help: 'Location of Metadata Proxy UNIX domain socket'
comment: 'New param'
# ====================================================

View File

@ -1,59 +0,0 @@
project: neutron_metadata_agent
version: '2013.2.1'
parameters:
- name: debug
type: string
default: 'True'
help: 'Show debugging output in log (sets DEBUG log level output)'
- name: auth_url
type: string
default: 'http://localhost:5000/v2.0'
help: 'The Neutron user information for accessing the Neutron API.'
- name: auth_region
type: string
default: 'RegionOne'
help: 'The Neutron user information for accessing the Neutron API.'
- name: admin_tenant_name
type: string
default: '%SERVICE_TENANT_NAME%'
help: 'The Neutron user information for accessing the Neutron API.'
- name: admin_user
type: string
default: '%SERVICE_USER%'
help: 'The Neutron user information for accessing the Neutron API.'
- name: admin_password
type: string
default: '%SERVICE_PASSWORD%'
help: 'The Neutron user information for accessing the Neutron API.'
- name: endpoint_type
type: string
default: 'adminURL'
help: 'Network service endpoint type to pull from the keystone catalog'
- name: nova_metadata_ip
type: string
default: '127.0.0.1'
help: 'IP address used by Nova metadata server'
- name: nova_metadata_port
type: string
default: '8775'
help: 'TCP Port used by Nova metadata server'
- name: metadata_proxy_shared_secret
type: string
default: ''
help: 'When proxying metadata requests, Neutron signs the Instance-ID header with a shared secret to prevent spoofing. You may select any string for a secret, but it must match here and in the configuration used by the Nova Metadata Server. NOTE: Nova uses a different key: neutron_metadata_proxy_shared_secret'
- name: metadata_proxy_socket
type: string
default: '$state_path/metadata_proxy'
help: 'Location of Metadata Proxy UNIX domain socket'

View File

@ -1,71 +0,0 @@
- version: '2013.2.1'
checkpoint: true
added:
- name: debug
type: string
default: 'True'
help: 'Show debugging output in log (sets DEBUG log level output)'
comment: 'New param'
- name: auth_url
type: string
default: 'http://localhost:5000/v2.0'
help: 'The Neutron user information for accessing the Neutron API.'
comment: 'New param'
- name: auth_region
type: string
default: 'RegionOne'
help: 'The Neutron user information for accessing the Neutron API.'
comment: 'New param'
- name: admin_tenant_name
type: string
default: '%SERVICE_TENANT_NAME%'
help: 'The Neutron user information for accessing the Neutron API.'
comment: 'New param'
- name: admin_user
type: string
default: '%SERVICE_USER%'
help: 'The Neutron user information for accessing the Neutron API.'
comment: 'New param'
- name: admin_password
type: string
default: '%SERVICE_PASSWORD%'
help: 'The Neutron user information for accessing the Neutron API.'
comment: 'New param'
- name: endpoint_type
type: string
default: 'adminURL'
help: 'Network service endpoint type to pull from the keystone catalog'
comment: 'New param'
- name: nova_metadata_ip
type: string
default: '127.0.0.1'
help: 'IP address used by Nova metadata server'
comment: 'New param'
- name: nova_metadata_port
type: string
default: '8775'
help: 'TCP Port used by Nova metadata server'
comment: 'New param'
- name: metadata_proxy_shared_secret
type: string
default: ''
help: 'When proxying metadata requests, Neutron signs the Instance-ID header with a shared secret to prevent spoofing. You may select any string for a secret, but it must match here and in the configuration used by the Nova Metadata Server. NOTE: Nova uses a different key: neutron_metadata_proxy_shared_secret'
comment: 'New param'
- name: metadata_proxy_socket
type: string
default: '$state_path/metadata_proxy'
help: 'Location of Metadata Proxy UNIX domain socket'
comment: 'New param'
# ====================================================

View File

@ -1,38 +0,0 @@
project: neutron_openvswitch_agent
version: '2013.2.1'
parameters:
- name: ml2.type_drivers
type: string
default: 'local,flat,vlan,gre,vxlan'
- name: ml2.tenant_network_types
type: string
default: 'local'
- name: ml2.mechanism_drivers
type: string
default: ''
help: '(ListOpt) Ordered list of networking mechanism driver entrypoints to be loaded from the neutron.ml2.mechanism_drivers namespace.'
- name: ml2_type_flat.flat_networks
type: string
default: ''
- name: ml2_type_vlan.network_vlan_ranges
type: string
default: ''
- name: ml2_type_gre.tunnel_id_ranges
type: string
default: ''
help: '(ListOpt) Comma-separated list of <tun_min>:<tun_max> tuples enumerating ranges of GRE tunnel IDs that are available for tenant network allocation'
- name: ml2_type_vxlan.vni_ranges
type: string
default: ''
- name: ml2_type_vxlan.vxlan_group
type: string
default: ''

View File

@ -1,47 +0,0 @@
- version: '2013.2.1'
checkpoint: true
added:
- name: ml2.type_drivers
type: string
default: 'local,flat,vlan,gre,vxlan'
comment: 'New param'
- name: ml2.tenant_network_types
type: string
default: 'local'
comment: 'New param'
- name: ml2.mechanism_drivers
type: string
default: ''
help: '(ListOpt) Ordered list of networking mechanism driver entrypoints to be loaded from the neutron.ml2.mechanism_drivers namespace.'
comment: 'New param'
- name: ml2_type_flat.flat_networks
type: string
default: ''
comment: 'New param'
- name: ml2_type_vlan.network_vlan_ranges
type: string
default: ''
comment: 'New param'
- name: ml2_type_gre.tunnel_id_ranges
type: string
default: ''
help: '(ListOpt) Comma-separated list of <tun_min>:<tun_max> tuples enumerating ranges of GRE tunnel IDs that are available for tenant network allocation'
comment: 'New param'
- name: ml2_type_vxlan.vni_ranges
type: string
default: ''
comment: 'New param'
- name: ml2_type_vxlan.vxlan_group
type: string
default: ''
comment: 'New param'
# ====================================================

View File

@ -1,526 +0,0 @@
project: neutron_server
version: '2013.2.1'
parameters:
- name: verbose
type: string
default: 'False'
help: 'Default log level is INFO verbose and debug has the same result. One of them will set DEBUG log level output'
- name: state_path
type: string
default: '/var/lib/neutron'
help: 'Where to store Neutron state files. This directory must be writable by the user executing the agent.'
- name: lock_path
type: string
default: '$state_path/lock'
help: 'Where to store lock files'
- name: log_format
type: string
default: '%(asctime)s %(levelname)8s [%(name)s] %(message)s'
- name: log_date_format
type: string
default: '%Y-%m-%d %H:%M:%S'
- name: use_syslog
type: string
default: 'False'
- name: syslog_log_facility
type: string
default: 'LOG_USER'
- name: use_stderr
type: string
default: 'True'
- name: publish_errors
type: string
default: 'False'
- name: bind_host
type: host
default: '0.0.0.0'
help: 'Address to bind the API server'
- name: bind_port
type: string
default: '9696'
help: 'Port the bind the API server to'
- name: api_extensions_path
type: string
default: ''
help: "Path to the extensions. Note that this can be a colon-separated list of paths. For example: api_extensions_path = extensions:/path/to/more/extensions:/even/more/extensions The __path__ of neutron.extensions is appended to this, so if your extensions are in there you don't need to specify them here"
- name: core_plugin
type: string
default: ''
help: 'Neutron plugin provider module'
- name: service_plugins
type: string
default: ''
help: 'Advanced service modules'
- name: api_paste_config
type: string
default: 'api-paste.ini'
help: 'Paste configuration file'
- name: auth_strategy
type: string
default: 'keystone'
help: "The strategy to be used for auth. Supported values are 'keystone'(default), 'noauth'."
- name: mac_generation_retries
type: string
default: '16'
help: 'Maximum amount of retries to generate a unique MAC address'
- name: dhcp_lease_duration
type: string
default: '86400'
help: 'DHCP Lease duration (in seconds)'
- name: dhcp_agent_notification
type: string
default: 'True'
help: 'Allow sending resource operation notification to DHCP agent'
- name: allow_bulk
type: string
default: 'True'
help: 'Enable or disable bulk create/update/delete operations'
- name: allow_pagination
type: string
default: 'False'
help: 'Enable or disable bulk create/update/delete operations Enable or disable pagination'
- name: allow_sorting
type: string
default: 'False'
help: 'Enable or disable bulk create/update/delete operations Enable or disable pagination Enable or disable sorting'
- name: allow_overlapping_ips
type: string
default: 'False'
help: 'Enable or disable bulk create/update/delete operations Enable or disable pagination Enable or disable sorting Enable or disable overlapping IPs for subnets Attention: the following parameter MUST be set to False if Neutron is being used in conjunction with nova security groups'
- name: force_gateway_on_subnet
type: string
default: 'False'
help: 'Enable or disable bulk create/update/delete operations Enable or disable pagination Enable or disable sorting Enable or disable overlapping IPs for subnets Attention: the following parameter MUST be set to False if Neutron is being used in conjunction with nova security groups Ensure that configured gateway is on subnet'
- name: rpc_backend
type: string
default: 'neutron.openstack.common.rpc.impl_zmq'
help: 'ZMQ'
- name: rpc_thread_pool_size
type: string
default: '64'
help: 'RPC configuration options. Defined in rpc __init__ The messaging module to use, defaults to kombu. Size of RPC thread pool'
- name: rpc_conn_pool_size
type: string
default: '30'
help: 'RPC configuration options. Defined in rpc __init__ The messaging module to use, defaults to kombu. Size of RPC thread pool Size of RPC connection pool'
- name: rpc_response_timeout
type: string
default: '60'
help: 'RPC configuration options. Defined in rpc __init__ The messaging module to use, defaults to kombu. Size of RPC thread pool Size of RPC connection pool Seconds to wait for a response from call or multicall'
- name: rpc_cast_timeout
type: string
default: '30'
help: 'RPC configuration options. Defined in rpc __init__ The messaging module to use, defaults to kombu. Size of RPC thread pool Size of RPC connection pool Seconds to wait for a response from call or multicall Seconds to wait before a cast expires (TTL). Only supported by impl_zmq.'
- name: allowed_rpc_exception_modules
type: string
default: 'neutron.openstack.common.exception, nova.exception'
help: 'RPC configuration options. Defined in rpc __init__ The messaging module to use, defaults to kombu. Size of RPC thread pool Size of RPC connection pool Seconds to wait for a response from call or multicall Seconds to wait before a cast expires (TTL). Only supported by impl_zmq. Modules of exceptions that are permitted to be recreated upon receiving exception data from an rpc call.'
- name: control_exchange
type: string
default: 'neutron'
help: 'RPC configuration options. Defined in rpc __init__ The messaging module to use, defaults to kombu. Size of RPC thread pool Size of RPC connection pool Seconds to wait for a response from call or multicall Seconds to wait before a cast expires (TTL). Only supported by impl_zmq. Modules of exceptions that are permitted to be recreated upon receiving exception data from an rpc call. AMQP exchange to connect to if using RabbitMQ or QPID'
- name: fake_rabbit
type: string
default: 'False'
help: 'If passed, use a fake RabbitMQ provider'
- name: kombu_ssl_version
type: string
default: ''
help: 'Configuration options if sending notifications via kombu rpc (these are the defaults) SSL version to use (valid only if SSL enabled)'
- name: kombu_ssl_keyfile
type: string
default: ''
help: 'Configuration options if sending notifications via kombu rpc (these are the defaults) SSL version to use (valid only if SSL enabled) SSL key file (valid only if SSL enabled)'
- name: kombu_ssl_certfile
type: string
default: ''
help: 'Configuration options if sending notifications via kombu rpc (these are the defaults) SSL version to use (valid only if SSL enabled) SSL key file (valid only if SSL enabled) SSL cert file (valid only if SSL enabled)'
- name: kombu_ssl_ca_certs
type: string
default: ''
help: "Configuration options if sending notifications via kombu rpc (these are the defaults) SSL version to use (valid only if SSL enabled) SSL key file (valid only if SSL enabled) SSL cert file (valid only if SSL enabled) SSL certification authority file (valid only if SSL enabled)'"
- name: rabbit_host
type: host
default: 'localhost'
help: "Configuration options if sending notifications via kombu rpc (these are the defaults) SSL version to use (valid only if SSL enabled) SSL key file (valid only if SSL enabled) SSL cert file (valid only if SSL enabled) SSL certification authority file (valid only if SSL enabled)' IP address of the RabbitMQ installation"
- name: rabbit_password
type: string
default: 'guest'
help: "Configuration options if sending notifications via kombu rpc (these are the defaults) SSL version to use (valid only if SSL enabled) SSL key file (valid only if SSL enabled) SSL cert file (valid only if SSL enabled) SSL certification authority file (valid only if SSL enabled)' IP address of the RabbitMQ installation Password of the RabbitMQ server"
- name: rabbit_port
type: string
default: '5672'
help: "Configuration options if sending notifications via kombu rpc (these are the defaults) SSL version to use (valid only if SSL enabled) SSL key file (valid only if SSL enabled) SSL cert file (valid only if SSL enabled) SSL certification authority file (valid only if SSL enabled)' IP address of the RabbitMQ installation Password of the RabbitMQ server Port where RabbitMQ server is running/listening"
- name: rabbit_hosts
type: string
default: 'localhost:5672'
help: "Configuration options if sending notifications via kombu rpc (these are the defaults) SSL version to use (valid only if SSL enabled) SSL key file (valid only if SSL enabled) SSL cert file (valid only if SSL enabled) SSL certification authority file (valid only if SSL enabled)' IP address of the RabbitMQ installation Password of the RabbitMQ server Port where RabbitMQ server is running/listening RabbitMQ single or HA cluster (host:port pairs i.e: host1:5672, host2:5672) rabbit_hosts is defaulted to '$rabbit_host:$rabbit_port'"
- name: rabbit_userid
type: string
default: 'guest'
help: "Configuration options if sending notifications via kombu rpc (these are the defaults) SSL version to use (valid only if SSL enabled) SSL key file (valid only if SSL enabled) SSL cert file (valid only if SSL enabled) SSL certification authority file (valid only if SSL enabled)' IP address of the RabbitMQ installation Password of the RabbitMQ server Port where RabbitMQ server is running/listening RabbitMQ single or HA cluster (host:port pairs i.e: host1:5672, host2:5672) rabbit_hosts is defaulted to '$rabbit_host:$rabbit_port' User ID used for RabbitMQ connections"
- name: rabbit_virtual_host
type: string
default: '/'
help: "Configuration options if sending notifications via kombu rpc (these are the defaults) SSL version to use (valid only if SSL enabled) SSL key file (valid only if SSL enabled) SSL cert file (valid only if SSL enabled) SSL certification authority file (valid only if SSL enabled)' IP address of the RabbitMQ installation Password of the RabbitMQ server Port where RabbitMQ server is running/listening RabbitMQ single or HA cluster (host:port pairs i.e: host1:5672, host2:5672) rabbit_hosts is defaulted to '$rabbit_host:$rabbit_port' User ID used for RabbitMQ connections Location of a virtual RabbitMQ installation."
- name: rabbit_max_retries
type: string
default: '0'
help: "Configuration options if sending notifications via kombu rpc (these are the defaults) SSL version to use (valid only if SSL enabled) SSL key file (valid only if SSL enabled) SSL cert file (valid only if SSL enabled) SSL certification authority file (valid only if SSL enabled)' IP address of the RabbitMQ installation Password of the RabbitMQ server Port where RabbitMQ server is running/listening RabbitMQ single or HA cluster (host:port pairs i.e: host1:5672, host2:5672) rabbit_hosts is defaulted to '$rabbit_host:$rabbit_port' User ID used for RabbitMQ connections Location of a virtual RabbitMQ installation. Maximum retries with trying to connect to RabbitMQ (the default of 0 implies an infinite retry count)"
- name: rabbit_retry_interval
type: string
default: '1'
help: "Configuration options if sending notifications via kombu rpc (these are the defaults) SSL version to use (valid only if SSL enabled) SSL key file (valid only if SSL enabled) SSL cert file (valid only if SSL enabled) SSL certification authority file (valid only if SSL enabled)' IP address of the RabbitMQ installation Password of the RabbitMQ server Port where RabbitMQ server is running/listening RabbitMQ single or HA cluster (host:port pairs i.e: host1:5672, host2:5672) rabbit_hosts is defaulted to '$rabbit_host:$rabbit_port' User ID used for RabbitMQ connections Location of a virtual RabbitMQ installation. Maximum retries with trying to connect to RabbitMQ (the default of 0 implies an infinite retry count) RabbitMQ connection retry interval"
- name: rabbit_ha_queues
type: boolean
default: False
help: "Configuration options if sending notifications via kombu rpc (these are the defaults) SSL version to use (valid only if SSL enabled) SSL key file (valid only if SSL enabled) SSL cert file (valid only if SSL enabled) SSL certification authority file (valid only if SSL enabled)' IP address of the RabbitMQ installation Password of the RabbitMQ server Port where RabbitMQ server is running/listening RabbitMQ single or HA cluster (host:port pairs i.e: host1:5672, host2:5672) rabbit_hosts is defaulted to '$rabbit_host:$rabbit_port' User ID used for RabbitMQ connections Location of a virtual RabbitMQ installation. Maximum retries with trying to connect to RabbitMQ (the default of 0 implies an infinite retry count) RabbitMQ connection retry interval Use HA queues in RabbitMQ (x-ha-policy: all).You need to wipe RabbitMQ database when changing this option. "
- name: qpid_hostname
type: string
default: 'localhost'
help: 'QPID Qpid broker hostname'
- name: qpid_port
type: string
default: '5672'
help: 'QPID Qpid broker hostname Qpid broker port'
- name: qpid_hosts
type: string
default: 'localhost:5672'
help: "QPID Qpid broker hostname Qpid broker port Qpid single or HA cluster (host:port pairs i.e: host1:5672, host2:5672) qpid_hosts is defaulted to '$qpid_hostname:$qpid_port'"
- name: qpid_username
type: string
default: "''"
help: "QPID Qpid broker hostname Qpid broker port Qpid single or HA cluster (host:port pairs i.e: host1:5672, host2:5672) qpid_hosts is defaulted to '$qpid_hostname:$qpid_port' Username for qpid connection"
- name: qpid_password
type: string
default: "''"
help: "QPID Qpid broker hostname Qpid broker port Qpid single or HA cluster (host:port pairs i.e: host1:5672, host2:5672) qpid_hosts is defaulted to '$qpid_hostname:$qpid_port' Username for qpid connection Password for qpid connection"
- name: qpid_sasl_mechanisms
type: string
default: "''"
help: "QPID Qpid broker hostname Qpid broker port Qpid single or HA cluster (host:port pairs i.e: host1:5672, host2:5672) qpid_hosts is defaulted to '$qpid_hostname:$qpid_port' Username for qpid connection Password for qpid connection Space separated list of SASL mechanisms to use for auth"
- name: qpid_heartbeat
type: string
default: '60'
help: "QPID Qpid broker hostname Qpid broker port Qpid single or HA cluster (host:port pairs i.e: host1:5672, host2:5672) qpid_hosts is defaulted to '$qpid_hostname:$qpid_port' Username for qpid connection Password for qpid connection Space separated list of SASL mechanisms to use for auth Seconds between connection keepalive heartbeats"
- name: qpid_protocol
type: string
default: 'tcp'
help: "QPID Qpid broker hostname Qpid broker port Qpid single or HA cluster (host:port pairs i.e: host1:5672, host2:5672) qpid_hosts is defaulted to '$qpid_hostname:$qpid_port' Username for qpid connection Password for qpid connection Space separated list of SASL mechanisms to use for auth Seconds between connection keepalive heartbeats Transport to use, either 'tcp' or 'ssl'"
- name: qpid_tcp_nodelay
type: string
default: 'True'
help: "QPID Qpid broker hostname Qpid broker port Qpid single or HA cluster (host:port pairs i.e: host1:5672, host2:5672) qpid_hosts is defaulted to '$qpid_hostname:$qpid_port' Username for qpid connection Password for qpid connection Space separated list of SASL mechanisms to use for auth Seconds between connection keepalive heartbeats Transport to use, either 'tcp' or 'ssl' Disable Nagle algorithm"
- name: rpc_zmq_bind_address
type: string
default: '*'
help: "ZMQ ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP. The 'host' option should point or resolve to this address."
- name: notification_driver
type: string
default: 'neutron.openstack.common.notifier.rpc_notifier'
help: 'Notification_driver can be defined multiple times Do nothing driver notification_driver = neutron.openstack.common.notifier.no_op_notifier Logging driver notification_driver = neutron.openstack.common.notifier.log_notifier RPC driver. DHCP agents needs it.'
- name: default_notification_level
type: string
default: 'INFO'
help: 'default_notification_level is used to form actual topic name(s) or to set logging level'
- name: host
type: string
default: 'myhost.com'
help: 'default_publisher_id is a part of the notification payload'
- name: default_publisher_id
type: string
default: '$host'
help: 'default_publisher_id is a part of the notification payload'
- name: notification_topics
type: string
default: 'notifications'
help: 'Defined in rpc_notifier, can be comma separated values. The actual topic names will be %s.%(default_notification_level)s'
- name: pagination_max_limit
type: string
default: '-1'
help: 'Default maximum number of items returned in a single response, value == infinite and value < 0 means no max limit, and value must greater than 0. If the number of items requested is greater than pagination_max_limit, server will just return pagination_max_limit of number of items.'
- name: max_dns_nameservers
type: string
default: '5'
help: 'Maximum number of DNS nameservers per subnet'
- name: max_subnet_host_routes
type: string
default: '20'
help: 'Maximum number of host routes per subnet'
- name: max_fixed_ips_per_port
type: string
default: '5'
help: 'Maximum number of fixed ips per port'
- name: agent_down_time
type: string
default: '5'
help: '=========== items for agent management extension ============= Seconds to regard the agent as down.'
- name: network_scheduler_driver
type: string
default: 'neutron.scheduler.dhcp_agent_scheduler.ChanceScheduler'
help: '=========== items for agent scheduler extension ============= Driver to use for scheduling network to DHCP agent'
- name: router_scheduler_driver
type: string
default: 'neutron.scheduler.l3_agent_scheduler.ChanceScheduler'
help: '=========== items for agent scheduler extension ============= Driver to use for scheduling network to DHCP agent Driver to use for scheduling router to a default L3 agent'
- name: loadbalancer_pool_scheduler_driver
type: string
default: 'neutron.services.loadbalancer.agent_scheduler.ChanceScheduler'
help: '=========== items for agent scheduler extension ============= Driver to use for scheduling network to DHCP agent Driver to use for scheduling router to a default L3 agent Driver to use for scheduling a loadbalancer pool to an lbaas agent'
- name: network_auto_schedule
type: string
default: 'True'
help: 'Allow auto scheduling networks to DHCP agent. It will schedule non-hosted networks to first DHCP agent which sends get_active_networks message to neutron server'
- name: router_auto_schedule
type: string
default: 'True'
help: 'Allow auto scheduling routers to L3 agent. It will schedule non-hosted routers to first L3 agent which sends sync_routers message to neutron server'
- name: dhcp_agents_per_network
type: string
default: '1'
help: 'Number of DHCP agents scheduled to host a network. This enables redundant DHCP agents for configured networks.'
- name: tcp_keepidle
type: string
default: '600'
help: '=========== WSGI parameters related to the API server ============== Sets the value of TCP_KEEPIDLE in seconds to use for each server socket when starting API server. Not supported on OS X.'
- name: retry_until_window
type: string
default: '30'
help: 'Number of seconds to keep retrying to listen'
- name: backlog
type: string
default: '4096'
help: 'Number of backlog requests to configure the socket with.'
- name: use_ssl
type: string
default: 'False'
help: 'Enable SSL on the API server'
- name: ssl_cert_file
type: string
default: '/path/to/certfile'
help: 'Certificate file to use when starting API server securely'
- name: ssl_key_file
type: string
default: '/path/to/keyfile'
help: 'Private key file to use when starting API server securely'
- name: ssl_ca_file
type: string
default: '/path/to/cafile'
help: 'CA certificate file to use when starting API server securely to verify connecting clients. This is an optional parameter only required if API clients need to authenticate to the API server using SSL certificates signed by a trusted CA'
- name: quotas.quota_items
type: string
default: 'network,subnet,port'
help: 'resource name(s) that are supported in quota features'
- name: quotas.default_quota
type: string
default: '-1'
help: 'default number of resource allowed per tenant, minus for unlimited'
- name: quotas.quota_network
type: string
default: '10'
help: 'number of networks allowed per tenant, and minus means unlimited'
- name: quotas.quota_subnet
type: string
default: '10'
help: 'number of subnets allowed per tenant, and minus means unlimited'
- name: quotas.quota_port
type: string
default: '50'
help: 'number of ports allowed per tenant, and minus means unlimited'
- name: quotas.quota_security_group
type: string
default: '10'
help: 'number of security groups allowed per tenant, and minus means unlimited'
- name: quotas.quota_security_group_rule
type: string
default: '100'
help: 'number of security group rules allowed per tenant, and minus means unlimited'
- name: quotas.quota_driver
type: string
default: 'neutron.db.quota_db.DbQuotaDriver'
help: 'default driver to use for quota checks'
- name: agent.root_helper
type: string
default: 'sudo'
help: "Use 'sudo neutron-rootwrap /etc/neutron/rootwrap.conf' to use the real root filter facility. Change to 'sudo' to skip the filtering and just run the comand directly"
- name: agent.report_interval
type: string
default: '4'
help: '=========== items for agent management extension ============= seconds between nodes reporting state to server, should be less than agent_down_time'
- name: keystone_authtoken.auth_host
type: host
default: '127.0.0.1'
- name: keystone_authtoken.auth_port
type: string
default: '35357'
- name: keystone_authtoken.auth_protocol
type: string
default: 'http'
- name: keystone_authtoken.admin_tenant_name
type: string
default: '%SERVICE_TENANT_NAME%'
- name: keystone_authtoken.admin_user
type: string
default: '%SERVICE_USER%'
- name: keystone_authtoken.admin_password
type: string
default: '%SERVICE_PASSWORD%'
- name: keystone_authtoken.signing_dir
type: string
default: '$state_path/keystone-signing'
- name: database.connection
type: string
default: 'mysql://root:pass@127.0.0.1:3306/neutron'
help: 'This line MUST be changed to actually run the plugin. Example:'
- name: database.slave_connection
type: string
default: ''
help: 'The SQLAlchemy connection string used to connect to the slave database'
- name: database.max_retries
type: string
default: '10'
help: 'Database reconnection retry times - in event connectivity is lost set to -1 implies an infinite retry count'
- name: database.retry_interval
type: string
default: '10'
help: 'Database reconnection interval in seconds - if the initial connection to the database fails'
- name: database.min_pool_size
type: string
default: '1'
help: 'Minimum number of SQL connections to keep open in a pool'
- name: database.max_pool_size
type: string
default: '10'
help: 'Maximum number of SQL connections to keep open in a pool'
- name: database.idle_timeout
type: string
default: '3600'
help: 'Timeout in seconds before idle sql connections are reaped'
- name: database.max_overflow
type: string
default: '20'
help: 'If set, use this value for max_overflow with sqlalchemy'
- name: database.connection_debug
type: string
default: '0'
help: 'Verbosity of SQL debugging information. 0=None, 100=Everything'
- name: database.connection_trace
type: string
default: 'False'
help: 'Add python stack traces to SQL as comment strings'
- name: database.pool_timeout
type: string
default: '10'
help: 'If set, use this value for pool_timeout with sqlalchemy'
- name: service_providers.service_provider
type: string
default: 'LOADBALANCER:Haproxy:neutron.services.loadbalancer.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver:default'
help: 'Specify service providers (drivers) for advanced services like loadbalancer, VPN, Firewall. Must be in form: service_provider=<service_type>:<name>:<driver>[:default] List of allowed service type include LOADBALANCER, FIREWALL, VPN Combination of <service type> and <name> must be unique; <driver> must also be unique this is multiline option, example for default provider: service_provider=LOADBALANCER:name:lbaas_plugin_driver_path:default example of non-default provider: service_provider=FIREWALL:name2:firewall_driver_path --- Reference implementations ---'

View File

@ -1,634 +0,0 @@
- version: '2013.2.1'
checkpoint: true
added:
- name: verbose
type: string
default: 'False'
help: 'Default log level is INFO verbose and debug has the same result. One of them will set DEBUG log level output'
comment: 'New param'
- name: state_path
type: string
default: '/var/lib/neutron'
help: 'Where to store Neutron state files. This directory must be writable by the user executing the agent.'
comment: 'New param'
- name: lock_path
type: string
default: '$state_path/lock'
help: 'Where to store lock files'
comment: 'New param'
- name: log_format
type: string
default: '%(asctime)s %(levelname)8s [%(name)s] %(message)s'
comment: 'New param'
- name: log_date_format
type: string
default: '%Y-%m-%d %H:%M:%S'
comment: 'New param'
- name: use_syslog
type: string
default: 'False'
comment: 'New param'
- name: syslog_log_facility
type: string
default: 'LOG_USER'
comment: 'New param'
- name: use_stderr
type: string
default: 'True'
comment: 'New param'
- name: publish_errors
type: string
default: 'False'
comment: 'New param'
- name: bind_host
type: host
default: '0.0.0.0'
help: 'Address to bind the API server'
comment: 'New param'
- name: bind_port
type: string
default: '9696'
help: 'Port the bind the API server to'
comment: 'New param'
- name: api_extensions_path
type: string
default: ''
help: "Path to the extensions. Note that this can be a colon-separated list of paths. For example: api_extensions_path = extensions:/path/to/more/extensions:/even/more/extensions The __path__ of neutron.extensions is appended to this, so if your extensions are in there you don't need to specify them here"
comment: 'New param'
- name: core_plugin
type: string
default: ''
help: 'Neutron plugin provider module'
comment: 'New param'
- name: service_plugins
type: string
default: ''
help: 'Advanced service modules'
comment: 'New param'
- name: api_paste_config
type: string
default: 'api-paste.ini'
help: 'Paste configuration file'
comment: 'New param'
- name: auth_strategy
type: string
default: 'keystone'
help: "The strategy to be used for auth. Supported values are 'keystone'(default), 'noauth'."
comment: 'New param'
- name: mac_generation_retries
type: string
default: '16'
help: 'Maximum amount of retries to generate a unique MAC address'
comment: 'New param'
- name: dhcp_lease_duration
type: string
default: '86400'
help: 'DHCP Lease duration (in seconds)'
comment: 'New param'
- name: dhcp_agent_notification
type: string
default: 'True'
help: 'Allow sending resource operation notification to DHCP agent'
comment: 'New param'
- name: allow_bulk
type: string
default: 'True'
help: 'Enable or disable bulk create/update/delete operations'
comment: 'New param'
- name: allow_pagination
type: string
default: 'False'
help: 'Enable or disable bulk create/update/delete operations Enable or disable pagination'
comment: 'New param'
- name: allow_sorting
type: string
default: 'False'
help: 'Enable or disable bulk create/update/delete operations Enable or disable pagination Enable or disable sorting'
comment: 'New param'
- name: allow_overlapping_ips
type: string
default: 'False'
help: 'Enable or disable bulk create/update/delete operations Enable or disable pagination Enable or disable sorting Enable or disable overlapping IPs for subnets Attention: the following parameter MUST be set to False if Neutron is being used in conjunction with nova security groups'
comment: 'New param'
- name: force_gateway_on_subnet
type: string
default: 'False'
help: 'Enable or disable bulk create/update/delete operations Enable or disable pagination Enable or disable sorting Enable or disable overlapping IPs for subnets Attention: the following parameter MUST be set to False if Neutron is being used in conjunction with nova security groups Ensure that configured gateway is on subnet'
comment: 'New param'
- name: rpc_backend
type: string
default: 'neutron.openstack.common.rpc.impl_zmq'
help: 'ZMQ'
comment: 'New param'
- name: rpc_thread_pool_size
type: string
default: '64'
help: 'RPC configuration options. Defined in rpc __init__ The messaging module to use, defaults to kombu. Size of RPC thread pool'
comment: 'New param'
- name: rpc_conn_pool_size
type: string
default: '30'
help: 'RPC configuration options. Defined in rpc __init__ The messaging module to use, defaults to kombu. Size of RPC thread pool Size of RPC connection pool'
comment: 'New param'
- name: rpc_response_timeout
type: string
default: '60'
help: 'RPC configuration options. Defined in rpc __init__ The messaging module to use, defaults to kombu. Size of RPC thread pool Size of RPC connection pool Seconds to wait for a response from call or multicall'
comment: 'New param'
- name: rpc_cast_timeout
type: string
default: '30'
help: 'RPC configuration options. Defined in rpc __init__ The messaging module to use, defaults to kombu. Size of RPC thread pool Size of RPC connection pool Seconds to wait for a response from call or multicall Seconds to wait before a cast expires (TTL). Only supported by impl_zmq.'
comment: 'New param'
- name: allowed_rpc_exception_modules
type: string
default: 'neutron.openstack.common.exception, nova.exception'
help: 'RPC configuration options. Defined in rpc __init__ The messaging module to use, defaults to kombu. Size of RPC thread pool Size of RPC connection pool Seconds to wait for a response from call or multicall Seconds to wait before a cast expires (TTL). Only supported by impl_zmq. Modules of exceptions that are permitted to be recreated upon receiving exception data from an rpc call.'
comment: 'New param'
- name: control_exchange
type: string
default: 'neutron'
help: 'RPC configuration options. Defined in rpc __init__ The messaging module to use, defaults to kombu. Size of RPC thread pool Size of RPC connection pool Seconds to wait for a response from call or multicall Seconds to wait before a cast expires (TTL). Only supported by impl_zmq. Modules of exceptions that are permitted to be recreated upon receiving exception data from an rpc call. AMQP exchange to connect to if using RabbitMQ or QPID'
comment: 'New param'
- name: fake_rabbit
type: string
default: 'False'
help: 'If passed, use a fake RabbitMQ provider'
comment: 'New param'
- name: kombu_ssl_version
type: string
default: ''
help: 'Configuration options if sending notifications via kombu rpc (these are the defaults) SSL version to use (valid only if SSL enabled)'
comment: 'New param'
- name: kombu_ssl_keyfile
type: string
default: ''
help: 'Configuration options if sending notifications via kombu rpc (these are the defaults) SSL version to use (valid only if SSL enabled) SSL key file (valid only if SSL enabled)'
comment: 'New param'
- name: kombu_ssl_certfile
type: string
default: ''
help: 'Configuration options if sending notifications via kombu rpc (these are the defaults) SSL version to use (valid only if SSL enabled) SSL key file (valid only if SSL enabled) SSL cert file (valid only if SSL enabled)'
comment: 'New param'
- name: kombu_ssl_ca_certs
type: string
default: ''
help: "Configuration options if sending notifications via kombu rpc (these are the defaults) SSL version to use (valid only if SSL enabled) SSL key file (valid only if SSL enabled) SSL cert file (valid only if SSL enabled) SSL certification authority file (valid only if SSL enabled)'"
comment: 'New param'
- name: rabbit_host
type: host
default: 'localhost'
help: "Configuration options if sending notifications via kombu rpc (these are the defaults) SSL version to use (valid only if SSL enabled) SSL key file (valid only if SSL enabled) SSL cert file (valid only if SSL enabled) SSL certification authority file (valid only if SSL enabled)' IP address of the RabbitMQ installation"
comment: 'New param'
- name: rabbit_password
type: string
default: 'guest'
help: "Configuration options if sending notifications via kombu rpc (these are the defaults) SSL version to use (valid only if SSL enabled) SSL key file (valid only if SSL enabled) SSL cert file (valid only if SSL enabled) SSL certification authority file (valid only if SSL enabled)' IP address of the RabbitMQ installation Password of the RabbitMQ server"
comment: 'New param'
- name: rabbit_port
type: string
default: '5672'
help: "Configuration options if sending notifications via kombu rpc (these are the defaults) SSL version to use (valid only if SSL enabled) SSL key file (valid only if SSL enabled) SSL cert file (valid only if SSL enabled) SSL certification authority file (valid only if SSL enabled)' IP address of the RabbitMQ installation Password of the RabbitMQ server Port where RabbitMQ server is running/listening"
comment: 'New param'
- name: rabbit_hosts
type: string
default: 'localhost:5672'
help: "Configuration options if sending notifications via kombu rpc (these are the defaults) SSL version to use (valid only if SSL enabled) SSL key file (valid only if SSL enabled) SSL cert file (valid only if SSL enabled) SSL certification authority file (valid only if SSL enabled)' IP address of the RabbitMQ installation Password of the RabbitMQ server Port where RabbitMQ server is running/listening RabbitMQ single or HA cluster (host:port pairs i.e: host1:5672, host2:5672) rabbit_hosts is defaulted to '$rabbit_host:$rabbit_port'"
comment: 'New param'
- name: rabbit_userid
type: string
default: 'guest'
help: "Configuration options if sending notifications via kombu rpc (these are the defaults) SSL version to use (valid only if SSL enabled) SSL key file (valid only if SSL enabled) SSL cert file (valid only if SSL enabled) SSL certification authority file (valid only if SSL enabled)' IP address of the RabbitMQ installation Password of the RabbitMQ server Port where RabbitMQ server is running/listening RabbitMQ single or HA cluster (host:port pairs i.e: host1:5672, host2:5672) rabbit_hosts is defaulted to '$rabbit_host:$rabbit_port' User ID used for RabbitMQ connections"
comment: 'New param'
- name: rabbit_virtual_host
type: string
default: '/'
help: "Configuration options if sending notifications via kombu rpc (these are the defaults) SSL version to use (valid only if SSL enabled) SSL key file (valid only if SSL enabled) SSL cert file (valid only if SSL enabled) SSL certification authority file (valid only if SSL enabled)' IP address of the RabbitMQ installation Password of the RabbitMQ server Port where RabbitMQ server is running/listening RabbitMQ single or HA cluster (host:port pairs i.e: host1:5672, host2:5672) rabbit_hosts is defaulted to '$rabbit_host:$rabbit_port' User ID used for RabbitMQ connections Location of a virtual RabbitMQ installation."
comment: 'New param'
- name: rabbit_max_retries
type: string
default: '0'
help: "Configuration options if sending notifications via kombu rpc (these are the defaults) SSL version to use (valid only if SSL enabled) SSL key file (valid only if SSL enabled) SSL cert file (valid only if SSL enabled) SSL certification authority file (valid only if SSL enabled)' IP address of the RabbitMQ installation Password of the RabbitMQ server Port where RabbitMQ server is running/listening RabbitMQ single or HA cluster (host:port pairs i.e: host1:5672, host2:5672) rabbit_hosts is defaulted to '$rabbit_host:$rabbit_port' User ID used for RabbitMQ connections Location of a virtual RabbitMQ installation. Maximum retries with trying to connect to RabbitMQ (the default of 0 implies an infinite retry count)"
comment: 'New param'
- name: rabbit_retry_interval
type: string
default: '1'
help: "Configuration options if sending notifications via kombu rpc (these are the defaults) SSL version to use (valid only if SSL enabled) SSL key file (valid only if SSL enabled) SSL cert file (valid only if SSL enabled) SSL certification authority file (valid only if SSL enabled)' IP address of the RabbitMQ installation Password of the RabbitMQ server Port where RabbitMQ server is running/listening RabbitMQ single or HA cluster (host:port pairs i.e: host1:5672, host2:5672) rabbit_hosts is defaulted to '$rabbit_host:$rabbit_port' User ID used for RabbitMQ connections Location of a virtual RabbitMQ installation. Maximum retries with trying to connect to RabbitMQ (the default of 0 implies an infinite retry count) RabbitMQ connection retry interval"
comment: 'New param'
- name: rabbit_ha_queues
type: boolean
default: false
help: "Configuration options if sending notifications via kombu rpc (these are the defaults) SSL version to use (valid only if SSL enabled) SSL key file (valid only if SSL enabled) SSL cert file (valid only if SSL enabled) SSL certification authority file (valid only if SSL enabled)' IP address of the RabbitMQ installation Password of the RabbitMQ server Port where RabbitMQ server is running/listening RabbitMQ single or HA cluster (host:port pairs i.e: host1:5672, host2:5672) rabbit_hosts is defaulted to '$rabbit_host:$rabbit_port' User ID used for RabbitMQ connections Location of a virtual RabbitMQ installation. Maximum retries with trying to connect to RabbitMQ (the default of 0 implies an infinite retry count) RabbitMQ connection retry interval Use HA queues in RabbitMQ (x-ha-policy: all).You need to wipe RabbitMQ database when changing this option. "
comment: 'New param'
- name: qpid_hostname
type: string
default: 'localhost'
help: 'QPID Qpid broker hostname'
comment: 'New param'
- name: qpid_port
type: string
default: '5672'
help: 'QPID Qpid broker hostname Qpid broker port'
comment: 'New param'
- name: qpid_hosts
type: string
default: 'localhost:5672'
help: "QPID Qpid broker hostname Qpid broker port Qpid single or HA cluster (host:port pairs i.e: host1:5672, host2:5672) qpid_hosts is defaulted to '$qpid_hostname:$qpid_port'"
comment: 'New param'
- name: qpid_username
type: string
default: "''"
help: "QPID Qpid broker hostname Qpid broker port Qpid single or HA cluster (host:port pairs i.e: host1:5672, host2:5672) qpid_hosts is defaulted to '$qpid_hostname:$qpid_port' Username for qpid connection"
comment: 'New param'
- name: qpid_password
type: string
default: "''"
help: "QPID Qpid broker hostname Qpid broker port Qpid single or HA cluster (host:port pairs i.e: host1:5672, host2:5672) qpid_hosts is defaulted to '$qpid_hostname:$qpid_port' Username for qpid connection Password for qpid connection"
comment: 'New param'
- name: qpid_sasl_mechanisms
type: string
default: "''"
help: "QPID Qpid broker hostname Qpid broker port Qpid single or HA cluster (host:port pairs i.e: host1:5672, host2:5672) qpid_hosts is defaulted to '$qpid_hostname:$qpid_port' Username for qpid connection Password for qpid connection Space separated list of SASL mechanisms to use for auth"
comment: 'New param'
- name: qpid_heartbeat
type: string
default: '60'
help: "QPID Qpid broker hostname Qpid broker port Qpid single or HA cluster (host:port pairs i.e: host1:5672, host2:5672) qpid_hosts is defaulted to '$qpid_hostname:$qpid_port' Username for qpid connection Password for qpid connection Space separated list of SASL mechanisms to use for auth Seconds between connection keepalive heartbeats"
comment: 'New param'
- name: qpid_protocol
type: string
default: 'tcp'
help: "QPID Qpid broker hostname Qpid broker port Qpid single or HA cluster (host:port pairs i.e: host1:5672, host2:5672) qpid_hosts is defaulted to '$qpid_hostname:$qpid_port' Username for qpid connection Password for qpid connection Space separated list of SASL mechanisms to use for auth Seconds between connection keepalive heartbeats Transport to use, either 'tcp' or 'ssl'"
comment: 'New param'
- name: qpid_tcp_nodelay
type: string
default: 'True'
help: "QPID Qpid broker hostname Qpid broker port Qpid single or HA cluster (host:port pairs i.e: host1:5672, host2:5672) qpid_hosts is defaulted to '$qpid_hostname:$qpid_port' Username for qpid connection Password for qpid connection Space separated list of SASL mechanisms to use for auth Seconds between connection keepalive heartbeats Transport to use, either 'tcp' or 'ssl' Disable Nagle algorithm"
comment: 'New param'
- name: rpc_zmq_bind_address
type: string
default: '*'
help: "ZMQ ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP. The 'host' option should point or resolve to this address."
comment: 'New param'
- name: notification_driver
type: string
default: 'neutron.openstack.common.notifier.rpc_notifier'
help: 'Notification_driver can be defined multiple times Do nothing driver notification_driver = neutron.openstack.common.notifier.no_op_notifier Logging driver notification_driver = neutron.openstack.common.notifier.log_notifier RPC driver. DHCP agents needs it.'
comment: 'New param'
- name: default_notification_level
type: string
default: 'INFO'
help: 'default_notification_level is used to form actual topic name(s) or to set logging level'
comment: 'New param'
- name: host
type: string
default: 'myhost.com'
help: 'default_publisher_id is a part of the notification payload'
comment: 'New param'
- name: default_publisher_id
type: string
default: '$host'
help: 'default_publisher_id is a part of the notification payload'
comment: 'New param'
- name: notification_topics
type: string
default: 'notifications'
help: 'Defined in rpc_notifier, can be comma separated values. The actual topic names will be %s.%(default_notification_level)s'
comment: 'New param'
- name: pagination_max_limit
type: string
default: '-1'
help: 'Default maximum number of items returned in a single response, value == infinite and value < 0 means no max limit, and value must greater than 0. If the number of items requested is greater than pagination_max_limit, server will just return pagination_max_limit of number of items.'
comment: 'New param'
- name: max_dns_nameservers
type: string
default: '5'
help: 'Maximum number of DNS nameservers per subnet'
comment: 'New param'
- name: max_subnet_host_routes
type: string
default: '20'
help: 'Maximum number of host routes per subnet'
comment: 'New param'
- name: max_fixed_ips_per_port
type: string
default: '5'
help: 'Maximum number of fixed ips per port'
comment: 'New param'
- name: agent_down_time
type: string
default: '5'
help: '=========== items for agent management extension ============= Seconds to regard the agent as down.'
comment: 'New param'
- name: network_scheduler_driver
type: string
default: 'neutron.scheduler.dhcp_agent_scheduler.ChanceScheduler'
help: '=========== items for agent scheduler extension ============= Driver to use for scheduling network to DHCP agent'
comment: 'New param'
- name: router_scheduler_driver
type: string
default: 'neutron.scheduler.l3_agent_scheduler.ChanceScheduler'
help: '=========== items for agent scheduler extension ============= Driver to use for scheduling network to DHCP agent Driver to use for scheduling router to a default L3 agent'
comment: 'New param'
- name: loadbalancer_pool_scheduler_driver
type: string
default: 'neutron.services.loadbalancer.agent_scheduler.ChanceScheduler'
help: '=========== items for agent scheduler extension ============= Driver to use for scheduling network to DHCP agent Driver to use for scheduling router to a default L3 agent Driver to use for scheduling a loadbalancer pool to an lbaas agent'
comment: 'New param'
- name: network_auto_schedule
type: string
default: 'True'
help: 'Allow auto scheduling networks to DHCP agent. It will schedule non-hosted networks to first DHCP agent which sends get_active_networks message to neutron server'
comment: 'New param'
- name: router_auto_schedule
type: string
default: 'True'
help: 'Allow auto scheduling routers to L3 agent. It will schedule non-hosted routers to first L3 agent which sends sync_routers message to neutron server'
comment: 'New param'
- name: dhcp_agents_per_network
type: string
default: '1'
help: 'Number of DHCP agents scheduled to host a network. This enables redundant DHCP agents for configured networks.'
comment: 'New param'
- name: tcp_keepidle
type: string
default: '600'
help: '=========== WSGI parameters related to the API server ============== Sets the value of TCP_KEEPIDLE in seconds to use for each server socket when starting API server. Not supported on OS X.'
comment: 'New param'
- name: retry_until_window
type: string
default: '30'
help: 'Number of seconds to keep retrying to listen'
comment: 'New param'
- name: backlog
type: string
default: '4096'
help: 'Number of backlog requests to configure the socket with.'
comment: 'New param'
- name: use_ssl
type: string
default: 'False'
help: 'Enable SSL on the API server'
comment: 'New param'
- name: ssl_cert_file
type: string
default: '/path/to/certfile'
help: 'Certificate file to use when starting API server securely'
comment: 'New param'
- name: ssl_key_file
type: string
default: '/path/to/keyfile'
help: 'Private key file to use when starting API server securely'
comment: 'New param'
- name: ssl_ca_file
type: string
default: '/path/to/cafile'
help: 'CA certificate file to use when starting API server securely to verify connecting clients. This is an optional parameter only required if API clients need to authenticate to the API server using SSL certificates signed by a trusted CA'
comment: 'New param'
- name: quotas.quota_items
type: string
default: 'network,subnet,port'
help: 'resource name(s) that are supported in quota features'
comment: 'New param'
- name: quotas.default_quota
type: string
default: '-1'
help: 'default number of resource allowed per tenant, minus for unlimited'
comment: 'New param'
- name: quotas.quota_network
type: string
default: '10'
help: 'number of networks allowed per tenant, and minus means unlimited'
comment: 'New param'
- name: quotas.quota_subnet
type: string
default: '10'
help: 'number of subnets allowed per tenant, and minus means unlimited'
comment: 'New param'
- name: quotas.quota_port
type: string
default: '50'
help: 'number of ports allowed per tenant, and minus means unlimited'
comment: 'New param'
- name: quotas.quota_security_group
type: string
default: '10'
help: 'number of security groups allowed per tenant, and minus means unlimited'
comment: 'New param'
- name: quotas.quota_security_group_rule
type: string
default: '100'
help: 'number of security group rules allowed per tenant, and minus means unlimited'
comment: 'New param'
- name: quotas.quota_driver
type: string
default: 'neutron.db.quota_db.DbQuotaDriver'
help: 'default driver to use for quota checks'
comment: 'New param'
- name: agent.root_helper
type: string
default: 'sudo'
help: "Use 'sudo neutron-rootwrap /etc/neutron/rootwrap.conf' to use the real root filter facility. Change to 'sudo' to skip the filtering and just run the comand directly"
comment: 'New param'
- name: agent.report_interval
type: string
default: '4'
help: '=========== items for agent management extension ============= seconds between nodes reporting state to server, should be less than agent_down_time'
comment: 'New param'
- name: keystone_authtoken.auth_host
type: host
default: '127.0.0.1'
comment: 'New param'
- name: keystone_authtoken.auth_port
type: string
default: '35357'
comment: 'New param'
- name: keystone_authtoken.auth_protocol
type: string
default: 'http'
comment: 'New param'
- name: keystone_authtoken.admin_tenant_name
type: string
default: '%SERVICE_TENANT_NAME%'
comment: 'New param'
- name: keystone_authtoken.admin_user
type: string
default: '%SERVICE_USER%'
comment: 'New param'
- name: keystone_authtoken.admin_password
type: string
default: '%SERVICE_PASSWORD%'
comment: 'New param'
- name: keystone_authtoken.signing_dir
type: string
default: '$state_path/keystone-signing'
comment: 'New param'
- name: database.connection
type: string
default: 'mysql://root:pass@127.0.0.1:3306/neutron'
help: 'This line MUST be changed to actually run the plugin. Example:'
comment: 'New param'
- name: database.slave_connection
type: string
default: ''
help: 'The SQLAlchemy connection string used to connect to the slave database'
comment: 'New param'
- name: database.max_retries
type: string
default: '10'
help: 'Database reconnection retry times - in event connectivity is lost set to -1 implies an infinite retry count'
comment: 'New param'
- name: database.retry_interval
type: string
default: '10'
help: 'Database reconnection interval in seconds - if the initial connection to the database fails'
comment: 'New param'
- name: database.min_pool_size
type: string
default: '1'
help: 'Minimum number of SQL connections to keep open in a pool'
comment: 'New param'
- name: database.max_pool_size
type: string
default: '10'
help: 'Maximum number of SQL connections to keep open in a pool'
comment: 'New param'
- name: database.idle_timeout
type: string
default: '3600'
help: 'Timeout in seconds before idle sql connections are reaped'
comment: 'New param'
- name: database.max_overflow
type: string
default: '20'
help: 'If set, use this value for max_overflow with sqlalchemy'
comment: 'New param'
- name: database.connection_debug
type: string
default: '0'
help: 'Verbosity of SQL debugging information. 0=None, 100=Everything'
comment: 'New param'
- name: database.connection_trace
type: string
default: 'False'
help: 'Add python stack traces to SQL as comment strings'
comment: 'New param'
- name: database.pool_timeout
type: string
default: '10'
help: 'If set, use this value for pool_timeout with sqlalchemy'
comment: 'New param'
- name: service_providers.service_provider
type: string
default: 'LOADBALANCER:Haproxy:neutron.services.loadbalancer.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver:default'
help: 'Specify service providers (drivers) for advanced services like loadbalancer, VPN, Firewall. Must be in form: service_provider=<service_type>:<name>:<driver>[:default] List of allowed service type include LOADBALANCER, FIREWALL, VPN Combination of <service type> and <name> must be unique; <driver> must also be unique this is multiline option, example for default provider: service_provider=LOADBALANCER:name:lbaas_plugin_driver_path:default example of non-default provider: service_provider=FIREWALL:name2:firewall_driver_path --- Reference implementations ---'
comment: 'New param'
# ====================================================

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,54 +0,0 @@
project: rabbitmq
version: '3.0.0'
parameters:
- name: tcp_listeners
type: rabbitmq_bind_list
default: [5672]
help: 'List of ports on which to listen for AMQP connections (without SSL)'
- name: ssl_listeners
type: rabbitmq_bind_list
default: []
help: 'List of ports on which to listen for AMQP connections (SSL)'
- name: ssl_options
type: string_list
default: []
- name: vm_memory_high_watermark
type: float
default: 0.4
- name: vm_memory_high_watermark_paging_ratio
type: float
default: 0.5
- name: disk_free_limit
type: integer
default: '50000000'
- name: log_levels
type: string_list
default: ['{connection, info}']
- name: frame_max
type: integer
default: 131072
- name: heartbeat
type: integer
default: 600
- name: default_vhost
type: string
default: '/'
- name: default_user
type: string
default: 'guest'
- name: default_pass
type: string
default: 'guest'

View File

@ -1,67 +0,0 @@
- version: '3.0.0'
checkpoint: true
added:
- name: tcp_listeners
type: rabbitmq_bind_list
default: [5672]
help: 'List of ports on which to listen for AMQP connections (without SSL)'
comment: 'New param'
- name: ssl_listeners
type: rabbitmq_bind_list
default: []
help: 'List of ports on which to listen for AMQP connections (SSL)'
comment: 'New param'
- name: ssl_options
type: string_list
default: []
comment: 'New param'
- name: vm_memory_high_watermark
type: float
default: 0.4
comment: 'New param'
- name: vm_memory_high_watermark_paging_ratio
type: float
default: 0.5
comment: 'New param'
- name: disk_free_limit
type: integer
default: '50000000'
comment: 'New param'
- name: log_levels
type: string_list
default: ['{connection, info}']
comment: 'New param'
- name: frame_max
type: integer
default: 131072
comment: 'New param'
- name: heartbeat
type: integer
default: 600
comment: 'New param'
- name: default_vhost
type: string
default: '/'
comment: 'New param'
- name: default_user
type: string
default: 'guest'
comment: 'New param'
- name: default_pass
type: string
default: 'guest'
comment: 'New param'
# ====================================================

View File

@ -1,3 +0,0 @@
Examples for schema generator execution:
python rubick/schemas/collector.py cinder 2013.1.3 /usr/lib/python/dist-packages/cinder
python rubick/schemas/collector.py nova 2013.1.3 /usr/lib/python/dist-packages/nova

View File

@ -1,237 +0,0 @@
project: swift_account_server
version: '2013.2.1'
parameters:
- name: bind_ip
type: string
default: '0.0.0.0'
- name: bind_port
type: string
default: '6002'
- name: bind_timeout
type: string
default: '30'
- name: backlog
type: string
default: '4096'
- name: user
type: string
default: 'swift'
- name: swift_dir
type: string
default: '/etc/swift'
- name: devices
type: string
default: '/srv/node'
- name: mount_check
type: string
default: 'true'
- name: disable_fallocate
type: string
default: 'false'
- name: workers
type: string
default: 'auto'
help: 'Use an integer to override the number of pre-forked processes that will accept connections.'
- name: max_clients
type: string
default: '1024'
help: 'Maximum concurrent requests per worker'
- name: account-reaper.log_name
type: string
default: 'account-reaper'
help: "You can override the default log routing for this app here (don't use set!):"
- name: account-reaper.log_facility
type: string
default: 'LOG_LOCAL0'
help: "You can override the default log routing for this app here (don't use set!):"
- name: account-reaper.log_level
type: string
default: 'INFO'
help: "You can override the default log routing for this app here (don't use set!):"
- name: account-reaper.log_address
type: string
default: '/dev/log'
help: "You can override the default log routing for this app here (don't use set!):"
- name: log_custom_handlers
type: string
default: ''
help: 'comma separated list of functions to call to setup custom log handlers. functions get passed: conf, name, log_to_console, log_route, fmt, logger, adapted_logger'
- name: log_udp_host
type: string
default: ''
help: 'If set, log_udp_host will override log_address'
- name: log_udp_port
type: string
default: '514'
help: 'If set, log_udp_host will override log_address'
- name: log_statsd_host
type: host
default: 'localhost'
help: 'You can enable StatsD logging here:'
- name: log_statsd_port
type: string
default: '8125'
help: 'You can enable StatsD logging here:'
- name: log_statsd_default_sample_rate
type: string
default: '1.0'
help: 'You can enable StatsD logging here:'
- name: log_statsd_sample_rate_factor
type: string
default: '1.0'
help: 'You can enable StatsD logging here:'
- name: log_statsd_metric_prefix
type: string
default: ''
help: 'You can enable StatsD logging here:'
- name: db_preallocation
type: string
default: 'off'
help: "If you don't mind the extra disk space usage in overhead, you can turn this on to preallocate disk space with SQLite databases to decrease fragmentation."
- name: eventlet_debug
type: string
default: 'false'
- name: fallocate_reserve
type: string
default: '0'
help: "You can set fallocate_reserve to the number of bytes you'd like fallocate to reserve, whether there is space for the given file size or not."
- name: pipeline:main.pipeline
type: string
default: 'healthcheck recon account-server'
- name: filter:recon.use
type: string
default: 'egg:swift#recon'
- name: app:account-server.set log_name
type: string
default: 'account-server'
help: 'You can override the default log routing for this app here:'
- name: app:account-server.set log_facility
type: string
default: 'LOG_LOCAL0'
help: 'You can override the default log routing for this app here:'
- name: app:account-server.set log_level
type: string
default: 'INFO'
help: 'You can override the default log routing for this app here:'
- name: app:account-server.set log_requests
type: string
default: 'true'
help: 'You can override the default log routing for this app here:'
- name: app:account-server.set log_address
type: string
default: '/dev/log'
help: 'You can override the default log routing for this app here:'
- name: app:account-server.auto_create_account_prefix
type: string
default: '.'
- name: app:account-server.replication_server
type: string
default: 'false'
help: "Configure parameter for creating specific server To handle all verbs, including replication verbs, do not specify 'replication_server' (this is the default). To only handle replication, set to a True value (e.g. 'True' or '1'). To handle only non-replication verbs, set to 'False'. Unless you have a separate replication network, you should not specify any value for 'replication_server'."
- name: filter:healthcheck.disable_path
type: string
default: ''
help: "An optional filesystem path, which if present, will cause the healthcheck URL to return '503 Service Unavailable' with a body of 'DISABLED BY FILE'"
- name: account-auditor.recon_cache_path
type: string
default: '/var/cache/swift'
- name: account-replicator.vm_test_mode
type: string
default: 'no'
- name: account-replicator.per_diff
type: string
default: '1000'
- name: account-replicator.max_diffs
type: string
default: '100'
- name: account-reaper.concurrency
type: string
default: '25'
- name: account-reaper.interval
type: string
default: '3600'
- name: account-replicator.error_suppression_interval
type: string
default: '60'
help: "How long without an error before a node's error count is reset. This will also be how long before a node is reenabled after suppression is triggered."
- name: account-replicator.error_suppression_limit
type: string
default: '10'
help: 'How many errors can accumulate before a node is temporarily ignored.'
- name: account-reaper.node_timeout
type: string
default: '10'
- name: account-reaper.conn_timeout
type: string
default: '0.5'
- name: account-replicator.reclaim_age
type: string
default: '604800'
help: 'The replicator also performs reclamation'
- name: account-replicator.run_pause
type: string
default: '30'
help: 'Time in seconds to wait between replication passes'
- name: account-auditor.accounts_per_second
type: string
default: '200'
- name: account-reaper.delay_reaping
type: string
default: '0'
help: 'Normally, the reaper begins deleting account information for deleted accounts immediately; you can set this to delay its work however. The value is in seconds; 2592000 = 30 days for example.'
- name: account-reaper.reap_warn_after
type: string
default: '2592000'
help: 'If the account fails to be be reaped due to a persistent error, the account reaper will log a message such as: Account <name> has not been reaped since <date> You can search logs for this message if space is not being reclaimed after you delete account(s). Default is 2592000 seconds (30 days). This is in addition to any time requested by delay_reaping.'

View File

@ -1,289 +0,0 @@
- version: '2013.2.1'
checkpoint: true
added:
- name: bind_ip
type: string
default: '0.0.0.0'
comment: 'New param'
- name: bind_port
type: string
default: '6002'
comment: 'New param'
- name: bind_timeout
type: string
default: '30'
comment: 'New param'
- name: backlog
type: string
default: '4096'
comment: 'New param'
- name: user
type: string
default: 'swift'
comment: 'New param'
- name: swift_dir
type: string
default: '/etc/swift'
comment: 'New param'
- name: devices
type: string
default: '/srv/node'
comment: 'New param'
- name: mount_check
type: string
default: 'true'
comment: 'New param'
- name: disable_fallocate
type: string
default: 'false'
comment: 'New param'
- name: workers
type: string
default: 'auto'
help: 'Use an integer to override the number of pre-forked processes that will accept connections.'
comment: 'New param'
- name: max_clients
type: string
default: '1024'
help: 'Maximum concurrent requests per worker'
comment: 'New param'
- name: account-reaper.log_name
type: string
default: 'account-reaper'
help: "You can override the default log routing for this app here (don't use set!):"
comment: 'New param'
- name: account-reaper.log_facility
type: string
default: 'LOG_LOCAL0'
help: "You can override the default log routing for this app here (don't use set!):"
comment: 'New param'
- name: account-reaper.log_level
type: string
default: 'INFO'
help: "You can override the default log routing for this app here (don't use set!):"
comment: 'New param'
- name: account-reaper.log_address
type: string
default: '/dev/log'
help: "You can override the default log routing for this app here (don't use set!):"
comment: 'New param'
- name: log_custom_handlers
type: string
default: ''
help: 'comma separated list of functions to call to setup custom log handlers. functions get passed: conf, name, log_to_console, log_route, fmt, logger, adapted_logger'
comment: 'New param'
- name: log_udp_host
type: string
default: ''
help: 'If set, log_udp_host will override log_address'
comment: 'New param'
- name: log_udp_port
type: string
default: '514'
help: 'If set, log_udp_host will override log_address'
comment: 'New param'
- name: log_statsd_host
type: host
default: 'localhost'
help: 'You can enable StatsD logging here:'
comment: 'New param'
- name: log_statsd_port
type: string
default: '8125'
help: 'You can enable StatsD logging here:'
comment: 'New param'
- name: log_statsd_default_sample_rate
type: string
default: '1.0'
help: 'You can enable StatsD logging here:'
comment: 'New param'
- name: log_statsd_sample_rate_factor
type: string
default: '1.0'
help: 'You can enable StatsD logging here:'
comment: 'New param'
- name: log_statsd_metric_prefix
type: string
default: ''
help: 'You can enable StatsD logging here:'
comment: 'New param'
- name: db_preallocation
type: string
default: 'off'
help: "If you don't mind the extra disk space usage in overhead, you can turn this on to preallocate disk space with SQLite databases to decrease fragmentation."
comment: 'New param'
- name: eventlet_debug
type: string
default: 'false'
comment: 'New param'
- name: fallocate_reserve
type: string
default: '0'
help: "You can set fallocate_reserve to the number of bytes you'd like fallocate to reserve, whether there is space for the given file size or not."
comment: 'New param'
- name: 'pipeline:main.pipeline'
type: string
default: 'healthcheck recon account-server'
comment: 'New param'
- name: 'filter:recon.use'
type: string
default: 'egg:swift#recon'
comment: 'New param'
- name: 'app:account-server.set log_name'
type: string
default: 'account-server'
help: 'You can override the default log routing for this app here:'
comment: 'New param'
- name: 'app:account-server.set log_facility'
type: string
default: 'LOG_LOCAL0'
help: 'You can override the default log routing for this app here:'
comment: 'New param'
- name: 'app:account-server.set log_level'
type: string
default: 'INFO'
help: 'You can override the default log routing for this app here:'
comment: 'New param'
- name: 'app:account-server.set log_requests'
type: string
default: 'true'
help: 'You can override the default log routing for this app here:'
comment: 'New param'
- name: 'app:account-server.set log_address'
type: string
default: '/dev/log'
help: 'You can override the default log routing for this app here:'
comment: 'New param'
- name: 'app:account-server.auto_create_account_prefix'
type: string
default: '.'
comment: 'New param'
- name: 'app:account-server.replication_server'
type: string
default: 'false'
help: "Configure parameter for creating specific server To handle all verbs, including replication verbs, do not specify 'replication_server' (this is the default). To only handle replication, set to a True value (e.g. 'True' or '1'). To handle only non-replication verbs, set to 'False'. Unless you have a separate replication network, you should not specify any value for 'replication_server'."
comment: 'New param'
- name: 'filter:healthcheck.disable_path'
type: string
default: ''
help: "An optional filesystem path, which if present, will cause the healthcheck URL to return '503 Service Unavailable' with a body of 'DISABLED BY FILE'"
comment: 'New param'
- name: account-auditor.recon_cache_path
type: string
default: '/var/cache/swift'
comment: 'New param'
- name: account-replicator.vm_test_mode
type: string
default: 'no'
comment: 'New param'
- name: account-replicator.per_diff
type: string
default: '1000'
comment: 'New param'
- name: account-replicator.max_diffs
type: string
default: '100'
comment: 'New param'
- name: account-reaper.concurrency
type: string
default: '25'
comment: 'New param'
- name: account-reaper.interval
type: string
default: '3600'
comment: 'New param'
- name: account-replicator.error_suppression_interval
type: string
default: '60'
help: "How long without an error before a node's error count is reset. This will also be how long before a node is reenabled after suppression is triggered."
comment: 'New param'
- name: account-replicator.error_suppression_limit
type: string
default: '10'
help: 'How many errors can accumulate before a node is temporarily ignored.'
comment: 'New param'
- name: account-reaper.node_timeout
type: string
default: '10'
comment: 'New param'
- name: account-reaper.conn_timeout
type: string
default: '0.5'
comment: 'New param'
- name: account-replicator.reclaim_age
type: string
default: '604800'
help: 'The replicator also performs reclamation'
comment: 'New param'
- name: account-replicator.run_pause
type: string
default: '30'
help: 'Time in seconds to wait between replication passes'
comment: 'New param'
- name: account-auditor.accounts_per_second
type: string
default: '200'
comment: 'New param'
- name: account-reaper.delay_reaping
type: string
default: '0'
help: 'Normally, the reaper begins deleting account information for deleted accounts immediately; you can set this to delay its work however. The value is in seconds; 2592000 = 30 days for example.'
comment: 'New param'
- name: account-reaper.reap_warn_after
type: string
default: '2592000'
help: 'If the account fails to be be reaped due to a persistent error, the account reaper will log a message such as: Account <name> has not been reaped since <date> You can search logs for this message if space is not being reclaimed after you delete account(s). Default is 2592000 seconds (30 days). This is in addition to any time requested by delay_reaping.'
comment: 'New param'
# ====================================================

View File

@ -1,244 +0,0 @@
project: swift_container_server
version: '2013.2.1'
parameters:
- name: bind_ip
type: string
default: '0.0.0.0'
- name: bind_port
type: string
default: '6001'
- name: bind_timeout
type: string
default: '30'
- name: backlog
type: string
default: '4096'
- name: user
type: string
default: 'swift'
- name: swift_dir
type: string
default: '/etc/swift'
- name: devices
type: string
default: '/srv/node'
- name: mount_check
type: string
default: 'true'
- name: disable_fallocate
type: string
default: 'false'
- name: workers
type: string
default: 'auto'
help: 'Use an integer to override the number of pre-forked processes that will accept connections.'
- name: max_clients
type: string
default: '1024'
help: 'Maximum concurrent requests per worker'
- name: allowed_sync_hosts
type: string
default: '127.0.0.1'
help: 'This is a comma separated list of hosts allowed in the X-Container-Sync-To field for containers.'
- name: container-sync.log_name
type: string
default: 'container-sync'
help: "You can override the default log routing for this app here (don't use set!):"
- name: container-sync.log_facility
type: string
default: 'LOG_LOCAL0'
help: "You can override the default log routing for this app here (don't use set!):"
- name: container-sync.log_level
type: string
default: 'INFO'
help: "You can override the default log routing for this app here (don't use set!):"
- name: container-sync.log_address
type: string
default: '/dev/log'
help: "You can override the default log routing for this app here (don't use set!):"
- name: log_custom_handlers
type: string
default: ''
help: 'comma separated list of functions to call to setup custom log handlers. functions get passed: conf, name, log_to_console, log_route, fmt, logger, adapted_logger'
- name: log_udp_host
type: string
default: ''
help: 'If set, log_udp_host will override log_address'
- name: log_udp_port
type: string
default: '514'
help: 'If set, log_udp_host will override log_address'
- name: log_statsd_host
type: host
default: 'localhost'
help: 'You can enable StatsD logging here:'
- name: log_statsd_port
type: string
default: '8125'
help: 'You can enable StatsD logging here:'
- name: log_statsd_default_sample_rate
type: string
default: '1.0'
help: 'You can enable StatsD logging here:'
- name: log_statsd_sample_rate_factor
type: string
default: '1.0'
help: 'You can enable StatsD logging here:'
- name: log_statsd_metric_prefix
type: string
default: ''
help: 'You can enable StatsD logging here:'
- name: db_preallocation
type: string
default: 'off'
help: "If you don't mind the extra disk space usage in overhead, you can turn this on to preallocate disk space with SQLite databases to decrease fragmentation."
- name: eventlet_debug
type: string
default: 'false'
- name: fallocate_reserve
type: string
default: '0'
help: "You can set fallocate_reserve to the number of bytes you'd like fallocate to reserve, whether there is space for the given file size or not."
- name: pipeline:main.pipeline
type: string
default: 'healthcheck recon container-server'
- name: filter:recon.use
type: string
default: 'egg:swift#recon'
- name: app:container-server.set log_name
type: string
default: 'container-server'
help: 'You can override the default log routing for this app here:'
- name: app:container-server.set log_facility
type: string
default: 'LOG_LOCAL0'
help: 'You can override the default log routing for this app here:'
- name: app:container-server.set log_level
type: string
default: 'INFO'
help: 'You can override the default log routing for this app here:'
- name: app:container-server.set log_requests
type: string
default: 'true'
help: 'You can override the default log routing for this app here:'
- name: app:container-server.set log_address
type: string
default: '/dev/log'
help: 'You can override the default log routing for this app here:'
- name: container-updater.node_timeout
type: string
default: '3'
- name: container-updater.conn_timeout
type: string
default: '0.5'
- name: app:container-server.allow_versions
type: string
default: 'false'
- name: app:container-server.auto_create_account_prefix
type: string
default: '.'
- name: app:container-server.replication_server
type: string
default: 'false'
help: "Configure parameter for creating specific server To handle all verbs, including replication verbs, do not specify 'replication_server' (this is the default). To only handle replication, set to a True value (e.g. 'True' or '1'). To handle only non-replication verbs, set to 'False'. Unless you have a separate replication network, you should not specify any value for 'replication_server'."
- name: filter:healthcheck.disable_path
type: string
default: ''
help: "An optional filesystem path, which if present, will cause the healthcheck URL to return '503 Service Unavailable' with a body of 'DISABLED BY FILE'"
- name: container-auditor.recon_cache_path
type: string
default: '/var/cache/swift'
help: 'containers_per_second = 200'
- name: container-replicator.vm_test_mode
type: string
default: 'no'
- name: container-replicator.per_diff
type: string
default: '1000'
- name: container-replicator.max_diffs
type: string
default: '100'
- name: container-updater.concurrency
type: string
default: '4'
- name: container-sync.interval
type: string
default: '300'
help: 'Will sync each container at most once per interval'
- name: container-replicator.reclaim_age
type: string
default: '604800'
help: 'The replicator also performs reclamation'
- name: container-replicator.run_pause
type: string
default: '30'
help: 'Time in seconds to wait between replication passes'
- name: container-updater.slowdown
type: string
default: '0.01'
help: 'slowdown will sleep that amount between containers'
- name: container-updater.account_suppression_time
type: string
default: '60'
help: 'Seconds to suppress updating an account that has generated an error'
- name: container-sync.sync_proxy
type: string
default: 'http://127.0.0.1:8888'
help: 'If you need to use an HTTP Proxy, set it here; defaults to no proxy.'
- name: container-sync.container_time
type: string
default: '60'
help: 'Maximum amount of time to spend syncing each container per pass'

View File

@ -1,297 +0,0 @@
- version: '2013.2.1'
checkpoint: true
added:
- name: bind_ip
type: string
default: '0.0.0.0'
comment: 'New param'
- name: bind_port
type: string
default: '6001'
comment: 'New param'
- name: bind_timeout
type: string
default: '30'
comment: 'New param'
- name: backlog
type: string
default: '4096'
comment: 'New param'
- name: user
type: string
default: 'swift'
comment: 'New param'
- name: swift_dir
type: string
default: '/etc/swift'
comment: 'New param'
- name: devices
type: string
default: '/srv/node'
comment: 'New param'
- name: mount_check
type: string
default: 'true'
comment: 'New param'
- name: disable_fallocate
type: string
default: 'false'
comment: 'New param'
- name: workers
type: string
default: 'auto'
help: 'Use an integer to override the number of pre-forked processes that will accept connections.'
comment: 'New param'
- name: max_clients
type: string
default: '1024'
help: 'Maximum concurrent requests per worker'
comment: 'New param'
- name: allowed_sync_hosts
type: string
default: '127.0.0.1'
help: 'This is a comma separated list of hosts allowed in the X-Container-Sync-To field for containers.'
comment: 'New param'
- name: container-sync.log_name
type: string
default: 'container-sync'
help: "You can override the default log routing for this app here (don't use set!):"
comment: 'New param'
- name: container-sync.log_facility
type: string
default: 'LOG_LOCAL0'
help: "You can override the default log routing for this app here (don't use set!):"
comment: 'New param'
- name: container-sync.log_level
type: string
default: 'INFO'
help: "You can override the default log routing for this app here (don't use set!):"
comment: 'New param'
- name: container-sync.log_address
type: string
default: '/dev/log'
help: "You can override the default log routing for this app here (don't use set!):"
comment: 'New param'
- name: log_custom_handlers
type: string
default: ''
help: 'comma separated list of functions to call to setup custom log handlers. functions get passed: conf, name, log_to_console, log_route, fmt, logger, adapted_logger'
comment: 'New param'
- name: log_udp_host
type: string
default: ''
help: 'If set, log_udp_host will override log_address'
comment: 'New param'
- name: log_udp_port
type: string
default: '514'
help: 'If set, log_udp_host will override log_address'
comment: 'New param'
- name: log_statsd_host
type: host
default: 'localhost'
help: 'You can enable StatsD logging here:'
comment: 'New param'
- name: log_statsd_port
type: string
default: '8125'
help: 'You can enable StatsD logging here:'
comment: 'New param'
- name: log_statsd_default_sample_rate
type: string
default: '1.0'
help: 'You can enable StatsD logging here:'
comment: 'New param'
- name: log_statsd_sample_rate_factor
type: string
default: '1.0'
help: 'You can enable StatsD logging here:'
comment: 'New param'
- name: log_statsd_metric_prefix
type: string
default: ''
help: 'You can enable StatsD logging here:'
comment: 'New param'
- name: db_preallocation
type: string
default: 'off'
help: "If you don't mind the extra disk space usage in overhead, you can turn this on to preallocate disk space with SQLite databases to decrease fragmentation."
comment: 'New param'
- name: eventlet_debug
type: string
default: 'false'
comment: 'New param'
- name: fallocate_reserve
type: string
default: '0'
help: "You can set fallocate_reserve to the number of bytes you'd like fallocate to reserve, whether there is space for the given file size or not."
comment: 'New param'
- name: 'pipeline:main.pipeline'
type: string
default: 'healthcheck recon container-server'
comment: 'New param'
- name: 'filter:recon.use'
type: string
default: 'egg:swift#recon'
comment: 'New param'
- name: 'app:container-server.set log_name'
type: string
default: 'container-server'
help: 'You can override the default log routing for this app here:'
comment: 'New param'
- name: 'app:container-server.set log_facility'
type: string
default: 'LOG_LOCAL0'
help: 'You can override the default log routing for this app here:'
comment: 'New param'
- name: 'app:container-server.set log_level'
type: string
default: 'INFO'
help: 'You can override the default log routing for this app here:'
comment: 'New param'
- name: 'app:container-server.set log_requests'
type: string
default: 'true'
help: 'You can override the default log routing for this app here:'
comment: 'New param'
- name: 'app:container-server.set log_address'
type: string
default: '/dev/log'
help: 'You can override the default log routing for this app here:'
comment: 'New param'
- name: container-updater.node_timeout
type: string
default: '3'
comment: 'New param'
- name: container-updater.conn_timeout
type: string
default: '0.5'
comment: 'New param'
- name: 'app:container-server.allow_versions'
type: string
default: 'false'
comment: 'New param'
- name: 'app:container-server.auto_create_account_prefix'
type: string
default: '.'
comment: 'New param'
- name: 'app:container-server.replication_server'
type: string
default: 'false'
help: "Configure parameter for creating specific server To handle all verbs, including replication verbs, do not specify 'replication_server' (this is the default). To only handle replication, set to a True value (e.g. 'True' or '1'). To handle only non-replication verbs, set to 'False'. Unless you have a separate replication network, you should not specify any value for 'replication_server'."
comment: 'New param'
- name: 'filter:healthcheck.disable_path'
type: string
default: ''
help: "An optional filesystem path, which if present, will cause the healthcheck URL to return '503 Service Unavailable' with a body of 'DISABLED BY FILE'"
comment: 'New param'
- name: container-auditor.recon_cache_path
type: string
default: '/var/cache/swift'
help: 'containers_per_second = 200'
comment: 'New param'
- name: container-replicator.vm_test_mode
type: string
default: 'no'
comment: 'New param'
- name: container-replicator.per_diff
type: string
default: '1000'
comment: 'New param'
- name: container-replicator.max_diffs
type: string
default: '100'
comment: 'New param'
- name: container-updater.concurrency
type: string
default: '4'
comment: 'New param'
- name: container-sync.interval
type: string
default: '300'
help: 'Will sync each container at most once per interval'
comment: 'New param'
- name: container-replicator.reclaim_age
type: string
default: '604800'
help: 'The replicator also performs reclamation'
comment: 'New param'
- name: container-replicator.run_pause
type: string
default: '30'
help: 'Time in seconds to wait between replication passes'
comment: 'New param'
- name: container-updater.slowdown
type: string
default: '0.01'
help: 'slowdown will sleep that amount between containers'
comment: 'New param'
- name: container-updater.account_suppression_time
type: string
default: '60'
help: 'Seconds to suppress updating an account that has generated an error'
comment: 'New param'
- name: container-sync.sync_proxy
type: string
default: 'http://127.0.0.1:8888'
help: 'If you need to use an HTTP Proxy, set it here; defaults to no proxy.'
comment: 'New param'
- name: container-sync.container_time
type: string
default: '60'
help: 'Maximum amount of time to spend syncing each container per pass'
comment: 'New param'
# ====================================================

View File

@ -1,316 +0,0 @@
project: swift_object_server
version: '2013.2.1'
parameters:
- name: bind_ip
type: string
default: '0.0.0.0'
- name: bind_port
type: string
default: '6000'
- name: bind_timeout
type: string
default: '30'
- name: backlog
type: string
default: '4096'
- name: user
type: string
default: 'swift'
- name: swift_dir
type: string
default: '/etc/swift'
- name: devices
type: string
default: '/srv/node'
- name: mount_check
type: string
default: 'true'
- name: disable_fallocate
type: string
default: 'false'
- name: expiring_objects_container_divisor
type: string
default: '86400'
- name: workers
type: string
default: 'auto'
help: 'Use an integer to override the number of pre-forked processes that will accept connections.'
- name: max_clients
type: string
default: '1024'
help: 'Maximum concurrent requests per worker'
- name: object-auditor.log_name
type: string
default: 'object-auditor'
help: "You can override the default log routing for this app here (don't use set!):"
- name: object-auditor.log_facility
type: string
default: 'LOG_LOCAL0'
help: "You can override the default log routing for this app here (don't use set!):"
- name: object-auditor.log_level
type: string
default: 'INFO'
help: "You can override the default log routing for this app here (don't use set!):"
- name: object-auditor.log_address
type: string
default: '/dev/log'
help: "You can override the default log routing for this app here (don't use set!):"
- name: log_custom_handlers
type: string
default: ''
help: 'comma separated list of functions to call to setup custom log handlers. functions get passed: conf, name, log_to_console, log_route, fmt, logger, adapted_logger'
- name: log_udp_host
type: string
default: ''
help: 'If set, log_udp_host will override log_address'
- name: log_udp_port
type: string
default: '514'
help: 'If set, log_udp_host will override log_address'
- name: log_statsd_host
type: host
default: 'localhost'
help: 'You can enable StatsD logging here:'
- name: log_statsd_port
type: string
default: '8125'
help: 'You can enable StatsD logging here:'
- name: log_statsd_default_sample_rate
type: string
default: '1.0'
help: 'You can enable StatsD logging here:'
- name: log_statsd_sample_rate_factor
type: string
default: '1.0'
help: 'You can enable StatsD logging here:'
- name: log_statsd_metric_prefix
type: string
default: ''
help: 'You can enable StatsD logging here:'
- name: eventlet_debug
type: string
default: 'false'
- name: fallocate_reserve
type: string
default: '0'
help: "You can set fallocate_reserve to the number of bytes you'd like fallocate to reserve, whether there is space for the given file size or not."
- name: pipeline:main.pipeline
type: string
default: 'healthcheck recon object-server'
- name: filter:recon.use
type: string
default: 'egg:swift#recon'
- name: app:object-server.set log_name
type: string
default: 'object-server'
help: 'You can override the default log routing for this app here:'
- name: app:object-server.set log_facility
type: string
default: 'LOG_LOCAL0'
help: 'You can override the default log routing for this app here:'
- name: app:object-server.set log_level
type: string
default: 'INFO'
help: 'You can override the default log routing for this app here:'
- name: app:object-server.set log_requests
type: string
default: 'true'
help: 'You can override the default log routing for this app here:'
- name: app:object-server.set log_address
type: string
default: '/dev/log'
help: 'You can override the default log routing for this app here:'
- name: object-updater.node_timeout
type: string
default: '10'
- name: object-updater.conn_timeout
type: string
default: '0.5'
- name: app:object-server.network_chunk_size
type: string
default: '65536'
- name: app:object-server.disk_chunk_size
type: string
default: '65536'
- name: app:object-server.max_upload_time
type: string
default: '86400'
- name: app:object-server.slow
type: string
default: '0'
- name: app:object-server.keep_cache_size
type: string
default: '5424880'
help: 'Objects smaller than this are not evicted from the buffercache once read'
- name: app:object-server.keep_cache_private
type: string
default: 'false'
help: 'If true, objects for authenticated GET requests may be kept in buffer cache if small enough'
- name: app:object-server.mb_per_sync
type: string
default: '512'
help: 'on PUTs, sync data every n MB'
- name: app:object-server.allowed_headers
type: string
default: 'Content-Disposition, Content-Encoding, X-Delete-At, X-Object-Manifest, X-Static-Large-Object'
help: 'Comma separated list of headers that can be set in metadata on an object. This list is in addition to X-Object-Meta-* headers and cannot include Content-Type, etag, Content-Length, or deleted'
- name: app:object-server.auto_create_account_prefix
type: string
default: '.'
- name: app:object-server.replication_server
type: string
default: 'false'
help: "Configure parameter for creating specific server To handle all verbs, including replication verbs, do not specify 'replication_server' (this is the default). To only handle replication, set to a True value (e.g. 'True' or '1'). To handle only non-replication verbs, set to 'False'. Unless you have a separate replication network, you should not specify any value for 'replication_server'."
- name: app:object-server.threads_per_disk
type: string
default: '0'
help: "Configure parameter for creating specific server To handle all verbs, including replication verbs, do not specify 'replication_server' (this is the default). To only handle replication, set to a True value (e.g. 'True' or '1'). To handle only non-replication verbs, set to 'False'. Unless you have a separate replication network, you should not specify any value for 'replication_server'. A value of 0 means 'don't use thread pools'. A reasonable starting point is 4."
- name: filter:healthcheck.disable_path
type: string
default: ''
help: "An optional filesystem path, which if present, will cause the healthcheck URL to return '503 Service Unavailable' with a body of 'DISABLED BY FILE'"
- name: object-auditor.recon_cache_path
type: string
default: '/var/cache/swift'
- name: filter:recon.recon_lock_path
type: string
default: '/var/lock'
- name: object-replicator.vm_test_mode
type: string
default: 'no'
- name: object-replicator.daemonize
type: string
default: 'on'
- name: object-replicator.run_pause
type: string
default: '30'
- name: object-updater.concurrency
type: string
default: '1'
- name: object-replicator.stats_interval
type: string
default: '300'
- name: object-replicator.rsync_timeout
type: string
default: '900'
help: 'max duration of a partition rsync'
- name: object-replicator.rsync_bwlimit
type: string
default: '0'
help: 'bandwith limit for rsync in kB/s. 0 means unlimited'
- name: object-replicator.rsync_io_timeout
type: string
default: '30'
help: 'passed to rsync for io op timeout'
- name: object-replicator.http_timeout
type: string
default: '60'
help: 'max duration of an http request'
- name: object-replicator.lockup_timeout
type: string
default: '1800'
help: 'attempts to kill all workers if nothing replicates for lockup_timeout seconds'
- name: object-replicator.reclaim_age
type: string
default: '604800'
help: 'The replicator also performs reclamation'
- name: object-replicator.ring_check_interval
type: string
default: '15'
- name: object-replicator.rsync_error_log_line_length
type: string
default: '0'
help: 'limits how long rsync error log lines are 0 means to log the entire line'
- name: object-updater.interval
type: string
default: '300'
- name: object-updater.slowdown
type: string
default: '0.01'
help: 'slowdown will sleep that amount between objects'
- name: object-auditor.files_per_second
type: string
default: '20'
- name: object-auditor.bytes_per_second
type: string
default: '10000000'
- name: object-auditor.log_time
type: string
default: '3600'
- name: object-auditor.zero_byte_files_per_second
type: string
default: '50'
- name: object-auditor.object_size_stats
type: string
default: ''
help: 'Takes a comma separated list of ints. If set, the object auditor will increment a counter for every object whose size is <= to the given break points and report the result after a full scan.'

View File

@ -1,386 +0,0 @@
- version: '2013.2.1'
checkpoint: true
added:
- name: bind_ip
type: string
default: '0.0.0.0'
comment: 'New param'
- name: bind_port
type: string
default: '6000'
comment: 'New param'
- name: bind_timeout
type: string
default: '30'
comment: 'New param'
- name: backlog
type: string
default: '4096'
comment: 'New param'
- name: user
type: string
default: 'swift'
comment: 'New param'
- name: swift_dir
type: string
default: '/etc/swift'
comment: 'New param'
- name: devices
type: string
default: '/srv/node'
comment: 'New param'
- name: mount_check
type: string
default: 'true'
comment: 'New param'
- name: disable_fallocate
type: string
default: 'false'
comment: 'New param'
- name: expiring_objects_container_divisor
type: string
default: '86400'
comment: 'New param'
- name: workers
type: string
default: 'auto'
help: 'Use an integer to override the number of pre-forked processes that will accept connections.'
comment: 'New param'
- name: max_clients
type: string
default: '1024'
help: 'Maximum concurrent requests per worker'
comment: 'New param'
- name: object-auditor.log_name
type: string
default: 'object-auditor'
help: "You can override the default log routing for this app here (don't use set!):"
comment: 'New param'
- name: object-auditor.log_facility
type: string
default: 'LOG_LOCAL0'
help: "You can override the default log routing for this app here (don't use set!):"
comment: 'New param'
- name: object-auditor.log_level
type: string
default: 'INFO'
help: "You can override the default log routing for this app here (don't use set!):"
comment: 'New param'
- name: object-auditor.log_address
type: string
default: '/dev/log'
help: "You can override the default log routing for this app here (don't use set!):"
comment: 'New param'
- name: log_custom_handlers
type: string
default: ''
help: 'comma separated list of functions to call to setup custom log handlers. functions get passed: conf, name, log_to_console, log_route, fmt, logger, adapted_logger'
comment: 'New param'
- name: log_udp_host
type: string
default: ''
help: 'If set, log_udp_host will override log_address'
comment: 'New param'
- name: log_udp_port
type: string
default: '514'
help: 'If set, log_udp_host will override log_address'
comment: 'New param'
- name: log_statsd_host
type: host
default: 'localhost'
help: 'You can enable StatsD logging here:'
comment: 'New param'
- name: log_statsd_port
type: string
default: '8125'
help: 'You can enable StatsD logging here:'
comment: 'New param'
- name: log_statsd_default_sample_rate
type: string
default: '1.0'
help: 'You can enable StatsD logging here:'
comment: 'New param'
- name: log_statsd_sample_rate_factor
type: string
default: '1.0'
help: 'You can enable StatsD logging here:'
comment: 'New param'
- name: log_statsd_metric_prefix
type: string
default: ''
help: 'You can enable StatsD logging here:'
comment: 'New param'
- name: eventlet_debug
type: string
default: 'false'
comment: 'New param'
- name: fallocate_reserve
type: string
default: '0'
help: "You can set fallocate_reserve to the number of bytes you'd like fallocate to reserve, whether there is space for the given file size or not."
comment: 'New param'
- name: 'pipeline:main.pipeline'
type: string
default: 'healthcheck recon object-server'
comment: 'New param'
- name: 'filter:recon.use'
type: string
default: 'egg:swift#recon'
comment: 'New param'
- name: 'app:object-server.set log_name'
type: string
default: 'object-server'
help: 'You can override the default log routing for this app here:'
comment: 'New param'
- name: 'app:object-server.set log_facility'
type: string
default: 'LOG_LOCAL0'
help: 'You can override the default log routing for this app here:'
comment: 'New param'
- name: 'app:object-server.set log_level'
type: string
default: 'INFO'
help: 'You can override the default log routing for this app here:'
comment: 'New param'
- name: 'app:object-server.set log_requests'
type: string
default: 'true'
help: 'You can override the default log routing for this app here:'
comment: 'New param'
- name: 'app:object-server.set log_address'
type: string
default: '/dev/log'
help: 'You can override the default log routing for this app here:'
comment: 'New param'
- name: object-updater.node_timeout
type: string
default: '10'
comment: 'New param'
- name: object-updater.conn_timeout
type: string
default: '0.5'
comment: 'New param'
- name: 'app:object-server.network_chunk_size'
type: string
default: '65536'
comment: 'New param'
- name: 'app:object-server.disk_chunk_size'
type: string
default: '65536'
comment: 'New param'
- name: 'app:object-server.max_upload_time'
type: string
default: '86400'
comment: 'New param'
- name: 'app:object-server.slow'
type: string
default: '0'
comment: 'New param'
- name: 'app:object-server.keep_cache_size'
type: string
default: '5424880'
help: 'Objects smaller than this are not evicted from the buffercache once read'
comment: 'New param'
- name: 'app:object-server.keep_cache_private'
type: string
default: 'false'
help: 'If true, objects for authenticated GET requests may be kept in buffer cache if small enough'
comment: 'New param'
- name: 'app:object-server.mb_per_sync'
type: string
default: '512'
help: 'on PUTs, sync data every n MB'
comment: 'New param'
- name: 'app:object-server.allowed_headers'
type: string
default: 'Content-Disposition, Content-Encoding, X-Delete-At, X-Object-Manifest, X-Static-Large-Object'
help: 'Comma separated list of headers that can be set in metadata on an object. This list is in addition to X-Object-Meta-* headers and cannot include Content-Type, etag, Content-Length, or deleted'
comment: 'New param'
- name: 'app:object-server.auto_create_account_prefix'
type: string
default: '.'
comment: 'New param'
- name: 'app:object-server.replication_server'
type: string
default: 'false'
help: "Configure parameter for creating specific server To handle all verbs, including replication verbs, do not specify 'replication_server' (this is the default). To only handle replication, set to a True value (e.g. 'True' or '1'). To handle only non-replication verbs, set to 'False'. Unless you have a separate replication network, you should not specify any value for 'replication_server'."
comment: 'New param'
- name: 'app:object-server.threads_per_disk'
type: string
default: '0'
help: "Configure parameter for creating specific server To handle all verbs, including replication verbs, do not specify 'replication_server' (this is the default). To only handle replication, set to a True value (e.g. 'True' or '1'). To handle only non-replication verbs, set to 'False'. Unless you have a separate replication network, you should not specify any value for 'replication_server'. A value of 0 means 'don't use thread pools'. A reasonable starting point is 4."
comment: 'New param'
- name: 'filter:healthcheck.disable_path'
type: string
default: ''
help: "An optional filesystem path, which if present, will cause the healthcheck URL to return '503 Service Unavailable' with a body of 'DISABLED BY FILE'"
comment: 'New param'
- name: object-auditor.recon_cache_path
type: string
default: '/var/cache/swift'
comment: 'New param'
- name: 'filter:recon.recon_lock_path'
type: string
default: '/var/lock'
comment: 'New param'
- name: object-replicator.vm_test_mode
type: string
default: 'no'
comment: 'New param'
- name: object-replicator.daemonize
type: string
default: 'on'
comment: 'New param'
- name: object-replicator.run_pause
type: string
default: '30'
comment: 'New param'
- name: object-updater.concurrency
type: string
default: '1'
comment: 'New param'
- name: object-replicator.stats_interval
type: string
default: '300'
comment: 'New param'
- name: object-replicator.rsync_timeout
type: string
default: '900'
help: 'max duration of a partition rsync'
comment: 'New param'
- name: object-replicator.rsync_bwlimit
type: string
default: '0'
help: 'bandwith limit for rsync in kB/s. 0 means unlimited'
comment: 'New param'
- name: object-replicator.rsync_io_timeout
type: string
default: '30'
help: 'passed to rsync for io op timeout'
comment: 'New param'
- name: object-replicator.http_timeout
type: string
default: '60'
help: 'max duration of an http request'
comment: 'New param'
- name: object-replicator.lockup_timeout
type: string
default: '1800'
help: 'attempts to kill all workers if nothing replicates for lockup_timeout seconds'
comment: 'New param'
- name: object-replicator.reclaim_age
type: string
default: '604800'
help: 'The replicator also performs reclamation'
comment: 'New param'
- name: object-replicator.ring_check_interval
type: string
default: '15'
comment: 'New param'
- name: object-replicator.rsync_error_log_line_length
type: string
default: '0'
help: 'limits how long rsync error log lines are 0 means to log the entire line'
comment: 'New param'
- name: object-updater.interval
type: string
default: '300'
comment: 'New param'
- name: object-updater.slowdown
type: string
default: '0.01'
help: 'slowdown will sleep that amount between objects'
comment: 'New param'
- name: object-auditor.files_per_second
type: string
default: '20'
comment: 'New param'
- name: object-auditor.bytes_per_second
type: string
default: '10000000'
comment: 'New param'
- name: object-auditor.log_time
type: string
default: '3600'
comment: 'New param'
- name: object-auditor.zero_byte_files_per_second
type: string
default: '50'
comment: 'New param'
- name: object-auditor.object_size_stats
type: string
default: ''
help: 'Takes a comma separated list of ints. If set, the object auditor will increment a counter for every object whose size is <= to the given break points and report the result after a full scan.'
comment: 'New param'
# ====================================================

View File

@ -1,625 +0,0 @@
project: swift_proxy_server
version: '2013.2.1'
parameters:
- name: bind_ip
type: string
default: '0.0.0.0'
- name: bind_port
type: string
default: '80'
- name: bind_timeout
type: string
default: '30'
- name: backlog
type: string
default: '4096'
- name: swift_dir
type: string
default: '/etc/swift'
- name: user
type: string
default: 'swift'
- name: workers
type: string
default: 'auto'
help: "Use an integer to override the number of pre-forked processes that will accept connections. Should default to the number of effective cpu cores in the system. It's worth noting that individual workers will use many eventlet co-routines to service multiple concurrent requests."
- name: max_clients
type: string
default: '1024'
help: 'Maximum concurrent requests per worker'
- name: cert_file
type: string
default: '/etc/swift/proxy.crt'
help: 'Set the following two lines to enable SSL. This is for testing only.'
- name: key_file
type: string
default: '/etc/swift/proxy.key'
help: 'Set the following two lines to enable SSL. This is for testing only.'
- name: log_name
type: string
default: 'swift'
help: 'You can specify default log routing here if you want:'
- name: log_facility
type: string
default: 'LOG_LOCAL0'
help: 'You can specify default log routing here if you want:'
- name: log_level
type: string
default: 'INFO'
help: 'You can specify default log routing here if you want:'
- name: log_headers
type: string
default: 'false'
help: 'You can specify default log routing here if you want:'
- name: log_address
type: string
default: '/dev/log'
help: 'You can specify default log routing here if you want:'
- name: trans_id_suffix
type: string
default: ''
help: 'This optional suffix (default is empty) that would be appended to the swift transaction id allows one to easily figure out from which cluster that X-Trans-Id belongs to. This is very useful when one is managing more than one swift cluster.'
- name: log_custom_handlers
type: string
default: ''
help: 'comma separated list of functions to call to setup custom log handlers. functions get passed: conf, name, log_to_console, log_route, fmt, logger, adapted_logger'
- name: log_udp_host
type: string
default: ''
help: 'If set, log_udp_host will override log_address'
- name: log_udp_port
type: string
default: '514'
help: 'If set, log_udp_host will override log_address'
- name: log_statsd_host
type: host
default: 'localhost'
help: 'You can enable StatsD logging here:'
- name: log_statsd_port
type: string
default: '8125'
help: 'You can enable StatsD logging here:'
- name: log_statsd_default_sample_rate
type: string
default: '1.0'
help: 'You can enable StatsD logging here:'
- name: log_statsd_sample_rate_factor
type: string
default: '1.0'
help: 'You can enable StatsD logging here:'
- name: log_statsd_metric_prefix
type: string
default: ''
help: 'You can enable StatsD logging here:'
- name: cors_allow_origin
type: string
default: ''
help: 'Use a comma separated list of full url (http://foo.bar:1234,https://foo.bar)'
- name: client_timeout
type: string
default: '60'
- name: eventlet_debug
type: string
default: 'false'
- name: pipeline:main.pipeline
type: string
default: 'catch_errors healthcheck proxy-logging cache bulk slo ratelimit tempauth container-quotas account-quotas proxy-logging proxy-server'
- name: filter:account-quotas.use
type: string
default: 'egg:swift#account_quotas'
- name: filter:cname_lookup.set log_name
type: string
default: 'cname_lookup'
help: 'Note: this middleware requires python-dnspython You can override the default log routing for this filter here:'
- name: filter:cname_lookup.set log_facility
type: string
default: 'LOG_LOCAL0'
help: 'Note: this middleware requires python-dnspython You can override the default log routing for this filter here:'
- name: filter:cname_lookup.set log_level
type: string
default: 'INFO'
help: 'Note: this middleware requires python-dnspython You can override the default log routing for this filter here:'
- name: filter:cname_lookup.set log_address
type: string
default: '/dev/log'
help: 'Note: this middleware requires python-dnspython You can override the default log routing for this filter here:'
- name: app:proxy-server.log_handoffs
type: string
default: 'true'
- name: app:proxy-server.recheck_account_existence
type: string
default: '60'
- name: app:proxy-server.recheck_container_existence
type: string
default: '60'
- name: app:proxy-server.object_chunk_size
type: string
default: '8192'
- name: app:proxy-server.client_chunk_size
type: string
default: '8192'
- name: app:proxy-server.node_timeout
type: string
default: '10'
- name: app:proxy-server.conn_timeout
type: string
default: '0.5'
- name: app:proxy-server.error_suppression_interval
type: string
default: '60'
help: "How long without an error before a node's error count is reset. This will also be how long before a node is reenabled after suppression is triggered."
- name: app:proxy-server.error_suppression_limit
type: string
default: '10'
help: 'How many errors can accumulate before a node is temporarily ignored.'
- name: app:proxy-server.allow_account_management
type: string
default: 'false'
help: "If set to 'true' any authorized user may create and delete accounts; if 'false' no one, even authorized, can."
- name: app:proxy-server.object_post_as_copy
type: string
default: 'true'
help: "Set object_post_as_copy = false to turn on fast posts where only the metadata changes are stored anew and the original data file is kept in place. This makes for quicker posts; but since the container metadata isn't updated in this mode, features like container sync won't be able to sync posts."
- name: app:proxy-server.account_autocreate
type: string
default: 'false'
help: "If set to 'true' authorized accounts that do not yet exist within the Swift cluster will be automatically created."
- name: app:proxy-server.max_containers_per_account
type: string
default: '0'
help: 'If set to a positive value, trying to create a container when the account already has at least this maximum containers will result in a 403 Forbidden. Note: This is a soft limit, meaning a user might exceed the cap for recheck_account_existence before the 403s kick in.'
- name: app:proxy-server.max_containers_whitelist
type: string
default: ''
help: 'This is a comma separated list of account hashes that ignore the max_containers_per_account cap.'
- name: app:proxy-server.deny_host_headers
type: string
default: ''
help: 'Comma separated list of Host headers to which the proxy will deny requests.'
- name: app:proxy-server.auto_create_account_prefix
type: string
default: '.'
help: 'Prefix used when automatically creating accounts.'
- name: app:proxy-server.put_queue_depth
type: string
default: '10'
help: 'Depth of the proxy put queue.'
- name: app:proxy-server.rate_limit_after_segment
type: string
default: '10'
help: 'Start rate-limiting object segment serving after the Nth segment of a segmented object.'
- name: app:proxy-server.rate_limit_segments_per_sec
type: string
default: '1'
help: 'Once segment rate-limiting kicks in for an object, limit segments served to N per second.'
- name: app:proxy-server.sorting_method
type: string
default: 'shuffle'
help: "Storage nodes can be chosen at random (shuffle), by using timing measurements (timing), or by using an explicit match (affinity). Using timing measurements may allow for lower overall latency, while using affinity allows for finer control. In both the timing and affinity cases, equally-sorting nodes are still randomly chosen to spread load. The valid values for sorting_method are 'affinity', 'shuffle', and 'timing'."
- name: app:proxy-server.timing_expiry
type: string
default: '300'
help: "If the 'timing' sorting_method is used, the timings will only be valid for the number of seconds configured by timing_expiry."
- name: app:proxy-server.allow_static_large_object
type: string
default: 'true'
help: "If set to false will treat objects with X-Static-Large-Object header set as a regular object on GETs, i.e. will return that object's contents. Should be set to false if slo is not used in pipeline."
- name: app:proxy-server.max_large_object_get_time
type: string
default: '86400'
help: 'The maximum time (seconds) that a large object connection is allowed to last.'
- name: app:proxy-server.request_node_count
type: string
default: '2 * replicas'
help: "Set to the number of nodes to contact for a normal request. You can use '* replicas' at the end to have it use the number given times the number of replicas for the ring being used for the request."
- name: app:proxy-server.read_affinity
type: string
default: ''
help: 'Example: first read from region 1 zone 1, then region 1 zone 2, then anything in region 2, then everything else: read_affinity = r1z1=100, r1z2=200, r2=300 Default is empty, meaning no preference.'
- name: app:proxy-server.write_affinity
type: string
default: ''
help: 'Example: try to write to regions 1 and 2 before writing to any other nodes: write_affinity = r1, r2 Default is empty, meaning no preference.'
- name: app:proxy-server.write_affinity_node_count
type: string
default: '2 * replicas'
help: "The number of local (as governed by the write_affinity setting) nodes to attempt to contact first, before any non-local ones. You can use '* replicas' at the end to have it use the number given times the number of replicas for the ring being used for the request."
- name: app:proxy-server.swift_owner_headers
type: string
default: 'x-container-read, x-container-write, x-container-sync-key, x-container-sync-to, x-account-meta-temp-url-key, x-account-meta-temp-url-key-2'
help: 'These are the headers whose values will only be shown to swift_owners. The exact definition of a swift_owner is up to the auth system in use, but usually indicates administrative responsibilities.'
- name: filter:cname_lookup.set log_headers
type: string
default: 'false'
help: 'Note: this middleware requires python-dnspython You can override the default log routing for this filter here:'
- name: filter:tempauth.reseller_prefix
type: string
default: 'AUTH'
help: 'The reseller prefix will verify a token begins with this prefix before even attempting to validate it. Also, with authorization, only Swift storage accounts with this prefix will be authorized by this middleware. Useful if multiple auth systems are in use for one Swift cluster.'
- name: filter:tempauth.auth_prefix
type: string
default: '/auth/'
help: 'The auth prefix will cause requests beginning with this prefix to be routed to the auth subsystem, for granting tokens, etc.'
- name: filter:tempauth.token_life
type: string
default: '86400'
help: 'The auth prefix will cause requests beginning with this prefix to be routed to the auth subsystem, for granting tokens, etc.'
- name: filter:tempauth.allow_overrides
type: string
default: 'true'
help: "This allows middleware higher in the WSGI pipeline to override auth processing, useful for middleware such as tempurl and formpost. If you know you're not going to use such middleware and you want a bit of extra security, you can set this to false."
- name: filter:tempauth.storage_url_scheme
type: string
default: 'default'
help: 'This specifies what scheme to return with storage urls: http, https, or default (chooses based on what the server is running as) This can be useful with an SSL load balancer in front of a non-SSL server.'
- name: filter:tempauth.user_admin_admin
type: string
default: 'admin .admin .reseller_admin'
help: 'Lastly, you need to list all the accounts/users you want here. The format is: user_<account>_<user> = <key> [group] [group] [...] [storage_url] or if you want underscores in <account> or <user>, you can base64 encode them (with no equal signs) and use this format: user64_<account_b64>_<user_b64> = <key> [group] [group] [...] [storage_url] There are special groups of: .reseller_admin = can do anything to any account for this auth .admin = can do anything within the account If neither of these groups are specified, the user can only access containers that have been explicitly allowed for them by a .admin or .reseller_admin. The trailing optional storage_url allows you to specify an alternate url to hand back to the user upon authentication. If not specified, this defaults to $HOST/v1/<reseller_prefix>_<account> where $HOST will do its best to resolve to what the requester would need to use to reach this host. Here are example entries, required for running the tests:'
- name: filter:tempauth.user_test_tester
type: string
default: 'testing .admin'
help: 'Lastly, you need to list all the accounts/users you want here. The format is: user_<account>_<user> = <key> [group] [group] [...] [storage_url] or if you want underscores in <account> or <user>, you can base64 encode them (with no equal signs) and use this format: user64_<account_b64>_<user_b64> = <key> [group] [group] [...] [storage_url] There are special groups of: .reseller_admin = can do anything to any account for this auth .admin = can do anything within the account If neither of these groups are specified, the user can only access containers that have been explicitly allowed for them by a .admin or .reseller_admin. The trailing optional storage_url allows you to specify an alternate url to hand back to the user upon authentication. If not specified, this defaults to $HOST/v1/<reseller_prefix>_<account> where $HOST will do its best to resolve to what the requester would need to use to reach this host. Here are example entries, required for running the tests:'
- name: filter:tempauth.user_test2_tester2
type: string
default: 'testing2 .admin'
help: 'Lastly, you need to list all the accounts/users you want here. The format is: user_<account>_<user> = <key> [group] [group] [...] [storage_url] or if you want underscores in <account> or <user>, you can base64 encode them (with no equal signs) and use this format: user64_<account_b64>_<user_b64> = <key> [group] [group] [...] [storage_url] There are special groups of: .reseller_admin = can do anything to any account for this auth .admin = can do anything within the account If neither of these groups are specified, the user can only access containers that have been explicitly allowed for them by a .admin or .reseller_admin. The trailing optional storage_url allows you to specify an alternate url to hand back to the user upon authentication. If not specified, this defaults to $HOST/v1/<reseller_prefix>_<account> where $HOST will do its best to resolve to what the requester would need to use to reach this host. Here are example entries, required for running the tests:'
- name: filter:tempauth.user_test_tester3
type: string
default: 'testing3'
help: 'Lastly, you need to list all the accounts/users you want here. The format is: user_<account>_<user> = <key> [group] [group] [...] [storage_url] or if you want underscores in <account> or <user>, you can base64 encode them (with no equal signs) and use this format: user64_<account_b64>_<user_b64> = <key> [group] [group] [...] [storage_url] There are special groups of: .reseller_admin = can do anything to any account for this auth .admin = can do anything within the account If neither of these groups are specified, the user can only access containers that have been explicitly allowed for them by a .admin or .reseller_admin. The trailing optional storage_url allows you to specify an alternate url to hand back to the user upon authentication. If not specified, this defaults to $HOST/v1/<reseller_prefix>_<account> where $HOST will do its best to resolve to what the requester would need to use to reach this host. Here are example entries, required for running the tests:'
- name: filter:tempauth.paste.filter_factory
type: string
default: 'keystoneclient.middleware.auth_token:filter_factory'
help: '[filter:authtoken]'
- name: filter:tempauth.auth_host
type: string
default: 'keystonehost'
help: '[filter:authtoken]'
- name: filter:tempauth.auth_port
type: string
default: '35357'
help: '[filter:authtoken]'
- name: filter:tempauth.auth_protocol
type: string
default: 'http'
help: '[filter:authtoken]'
- name: filter:tempauth.auth_uri
type: string
default: 'http://keystonehost:5000/'
help: '[filter:authtoken]'
- name: filter:tempauth.admin_tenant_name
type: string
default: 'service'
help: '[filter:authtoken]'
- name: filter:tempauth.admin_user
type: string
default: 'swift'
help: '[filter:authtoken]'
- name: filter:tempauth.admin_password
type: string
default: 'password'
help: '[filter:authtoken]'
- name: filter:tempauth.delay_auth_decision
type: string
default: '1'
help: '[filter:authtoken]'
- name: filter:tempauth.cache
type: string
default: 'swift.cache'
help: '[filter:authtoken]'
- name: filter:tempauth.operator_roles
type: string
default: 'admin, swiftoperator'
help: '[filter:keystoneauth] Operator roles is the role which user would be allowed to manage a tenant and be able to create container or give ACL to others.'
- name: filter:tempauth.reseller_admin_role
type: string
default: 'ResellerAdmin'
help: '[filter:keystoneauth] Operator roles is the role which user would be allowed to manage a tenant and be able to create container or give ACL to others. The reseller admin role has the ability to create and delete accounts'
- name: filter:healthcheck.disable_path
type: string
default: ''
help: "An optional filesystem path, which if present, will cause the healthcheck URL to return '503 Service Unavailable' with a body of 'DISABLED BY FILE'. This facility may be used to temporarily remove a Swift node from a load balancer pool during maintenance or upgrade (remove the file to allow the node back into the load balancer pool)."
- name: filter:cache.memcache_servers
type: string
default: '127.0.0.1:11211'
help: 'If not set here, the value for memcache_servers will be read from memcache.conf (see memcache.conf-sample) or lacking that file, it will default to the value below. You can specify multiple servers separated with commas, as in: 10.1.2.3:11211,10.1.2.4:11211'
- name: filter:cache.memcache_serialization_support
type: string
default: '2'
help: 'Sets how memcache values are serialized and deserialized: 0 = older, insecure pickle serialization 1 = json serialization but pickles can still be read (still insecure) 2 = json serialization only (secure and the default) If not set here, the value for memcache_serialization_support will be read from /etc/swift/memcache.conf (see memcache.conf-sample). To avoid an instant full cache flush, existing installations should upgrade with 0, then set to 1 and reload, then after some time (24 hours) set to 2 and reload. In the future, the ability to use pickle serialization will be removed.'
- name: filter:ratelimit.clock_accuracy
type: string
default: '1000'
help: "clock_accuracy should represent how accurate the proxy servers' system clocks are with each other. 1000 means that all the proxies' clock are accurate to each other within 1 millisecond. No ratelimit should be higher than the clock accuracy."
- name: filter:ratelimit.max_sleep_time_seconds
type: string
default: '60'
- name: filter:ratelimit.log_sleep_time_seconds
type: string
default: '0'
help: 'log_sleep_time_seconds of 0 means disabled'
- name: filter:ratelimit.rate_buffer_seconds
type: string
default: '5'
help: "allows for slow rates (e.g. running up to 5 sec's behind) to catch up."
- name: filter:ratelimit.account_ratelimit
type: string
default: '0'
help: 'account_ratelimit of 0 means disabled'
- name: filter:ratelimit.account_whitelist
type: string
default: 'a,b'
help: 'these are comma separated lists of account names'
- name: filter:ratelimit.account_blacklist
type: string
default: 'c,d'
help: 'these are comma separated lists of account names'
- name: filter:ratelimit.with container_limit_x
type: string
default: 'r'
- name: filter:ratelimit.container_ratelimit_0
type: string
default: '100'
help: 'for containers of size x limit write requests per second to r. The container rate will be linearly interpolated from the values given. With the values below, a container of size 5 will get a rate of 75.'
- name: filter:ratelimit.container_ratelimit_10
type: string
default: '50'
help: 'for containers of size x limit write requests per second to r. The container rate will be linearly interpolated from the values given. With the values below, a container of size 5 will get a rate of 75.'
- name: filter:ratelimit.container_ratelimit_50
type: string
default: '20'
help: 'for containers of size x limit write requests per second to r. The container rate will be linearly interpolated from the values given. With the values below, a container of size 5 will get a rate of 75.'
- name: filter:ratelimit.container_listing_ratelimit_0
type: string
default: '100'
help: 'Similarly to the above container-level write limits, the following will limit container GET (listing) requests.'
- name: filter:ratelimit.container_listing_ratelimit_10
type: string
default: '50'
help: 'Similarly to the above container-level write limits, the following will limit container GET (listing) requests.'
- name: filter:ratelimit.container_listing_ratelimit_50
type: string
default: '20'
help: 'Similarly to the above container-level write limits, the following will limit container GET (listing) requests.'
- name: filter:cname_lookup.storage_domain
type: string
default: 'example.com'
- name: filter:domain_remap.path_root
type: string
default: 'v1'
- name: filter:domain_remap.reseller_prefixes
type: string
default: 'AUTH'
- name: filter:cname_lookup.lookup_depth
type: string
default: '1'
- name: filter:tempurl.methods
type: string
default: 'GET HEAD PUT'
help: 'The methods allowed with Temp URLs.'
- name: filter:tempurl.incoming_remove_headers
type: string
default: 'x-timestamp'
help: "The headers to remove from incoming requests. Simply a whitespace delimited list of header names and names can optionally end with '*' to indicate a prefix match. incoming_allow_headers is a list of exceptions to these removals."
- name: filter:tempurl.incoming_allow_headers
type: string
default: ''
help: "The headers allowed as exceptions to incoming_remove_headers. Simply a whitespace delimited list of header names and names can optionally end with '*' to indicate a prefix match."
- name: filter:tempurl.outgoing_remove_headers
type: string
default: 'x-object-meta-*'
help: "The headers to remove from outgoing responses. Simply a whitespace delimited list of header names and names can optionally end with '*' to indicate a prefix match. outgoing_allow_headers is a list of exceptions to these removals."
- name: filter:name_check.forbidden_chars
type: string
default: "\\'\"`<>"
- name: filter:name_check.maximum_length
type: string
default: '255'
- name: filter:name_check.forbidden_regexp
type: string
default: '/\\./|/\\.\\./|/\\.$|/\\.\\.$'
- name: filter:list-endpoints.list_endpoints_path
type: string
default: '/endpoints/'
- name: filter:proxy-logging.access_log_name
type: string
default: 'swift'
help: "If not set, logging directives from [DEFAULT] without 'access_' will be used"
- name: filter:proxy-logging.access_log_facility
type: string
default: 'LOG_LOCAL0'
help: "If not set, logging directives from [DEFAULT] without 'access_' will be used"
- name: filter:proxy-logging.access_log_level
type: string
default: 'INFO'
help: "If not set, logging directives from [DEFAULT] without 'access_' will be used"
- name: filter:proxy-logging.access_log_address
type: string
default: '/dev/log'
help: "If not set, logging directives from [DEFAULT] without 'access_' will be used"
- name: filter:proxy-logging.access_log_udp_host
type: string
default: ''
help: 'If set, access_log_udp_host will override access_log_address'
- name: filter:proxy-logging.access_log_udp_port
type: string
default: '514'
help: 'If set, access_log_udp_host will override access_log_address'
- name: filter:proxy-logging.access_log_statsd_host
type: host
default: 'localhost'
help: 'You can use log_statsd_* from [DEFAULT] or override them here:'
- name: filter:proxy-logging.access_log_statsd_port
type: string
default: '8125'
help: 'You can use log_statsd_* from [DEFAULT] or override them here:'
- name: filter:proxy-logging.access_log_statsd_default_sample_rate
type: string
default: '1.0'
help: 'You can use log_statsd_* from [DEFAULT] or override them here:'
- name: filter:proxy-logging.access_log_statsd_sample_rate_factor
type: string
default: '1.0'
help: 'You can use log_statsd_* from [DEFAULT] or override them here:'
- name: filter:proxy-logging.access_log_statsd_metric_prefix
type: string
default: ''
help: 'You can use log_statsd_* from [DEFAULT] or override them here:'
- name: filter:proxy-logging.access_log_headers
type: string
default: 'false'
help: 'You can use log_statsd_* from [DEFAULT] or override them here:'
- name: filter:bulk.max_containers_per_extraction
type: string
default: '10000'
- name: filter:bulk.max_failed_extractions
type: string
default: '1000'
- name: filter:bulk.max_deletes_per_request
type: string
default: '10000'
- name: filter:bulk.yield_frequency
type: string
default: '60'
- name: filter:slo.max_manifest_segments
type: string
default: '1000'
- name: filter:slo.max_manifest_size
type: string
default: '2097152'
- name: filter:slo.min_segment_size
type: string
default: '1048576'

View File

@ -1,757 +0,0 @@
- version: '2013.2.1'
checkpoint: true
added:
- name: bind_ip
type: string
default: '0.0.0.0'
comment: 'New param'
- name: bind_port
type: string
default: '80'
comment: 'New param'
- name: bind_timeout
type: string
default: '30'
comment: 'New param'
- name: backlog
type: string
default: '4096'
comment: 'New param'
- name: swift_dir
type: string
default: '/etc/swift'
comment: 'New param'
- name: user
type: string
default: 'swift'
comment: 'New param'
- name: workers
type: string
default: 'auto'
help: "Use an integer to override the number of pre-forked processes that will accept connections. Should default to the number of effective cpu cores in the system. It's worth noting that individual workers will use many eventlet co-routines to service multiple concurrent requests."
comment: 'New param'
- name: max_clients
type: string
default: '1024'
help: 'Maximum concurrent requests per worker'
comment: 'New param'
- name: cert_file
type: string
default: '/etc/swift/proxy.crt'
help: 'Set the following two lines to enable SSL. This is for testing only.'
comment: 'New param'
- name: key_file
type: string
default: '/etc/swift/proxy.key'
help: 'Set the following two lines to enable SSL. This is for testing only.'
comment: 'New param'
- name: log_name
type: string
default: 'swift'
help: 'You can specify default log routing here if you want:'
comment: 'New param'
- name: log_facility
type: string
default: 'LOG_LOCAL0'
help: 'You can specify default log routing here if you want:'
comment: 'New param'
- name: log_level
type: string
default: 'INFO'
help: 'You can specify default log routing here if you want:'
comment: 'New param'
- name: log_headers
type: string
default: 'false'
help: 'You can specify default log routing here if you want:'
comment: 'New param'
- name: log_address
type: string
default: '/dev/log'
help: 'You can specify default log routing here if you want:'
comment: 'New param'
- name: trans_id_suffix
type: string
default: ''
help: 'This optional suffix (default is empty) that would be appended to the swift transaction id allows one to easily figure out from which cluster that X-Trans-Id belongs to. This is very useful when one is managing more than one swift cluster.'
comment: 'New param'
- name: log_custom_handlers
type: string
default: ''
help: 'comma separated list of functions to call to setup custom log handlers. functions get passed: conf, name, log_to_console, log_route, fmt, logger, adapted_logger'
comment: 'New param'
- name: log_udp_host
type: string
default: ''
help: 'If set, log_udp_host will override log_address'
comment: 'New param'
- name: log_udp_port
type: string
default: '514'
help: 'If set, log_udp_host will override log_address'
comment: 'New param'
- name: log_statsd_host
type: host
default: 'localhost'
help: 'You can enable StatsD logging here:'
comment: 'New param'
- name: log_statsd_port
type: string
default: '8125'
help: 'You can enable StatsD logging here:'
comment: 'New param'
- name: log_statsd_default_sample_rate
type: string
default: '1.0'
help: 'You can enable StatsD logging here:'
comment: 'New param'
- name: log_statsd_sample_rate_factor
type: string
default: '1.0'
help: 'You can enable StatsD logging here:'
comment: 'New param'
- name: log_statsd_metric_prefix
type: string
default: ''
help: 'You can enable StatsD logging here:'
comment: 'New param'
- name: cors_allow_origin
type: string
default: ''
help: 'Use a comma separated list of full url (http://foo.bar:1234,https://foo.bar)'
comment: 'New param'
- name: client_timeout
type: string
default: '60'
comment: 'New param'
- name: eventlet_debug
type: string
default: 'false'
comment: 'New param'
- name: 'pipeline:main.pipeline'
type: string
default: 'catch_errors healthcheck proxy-logging cache bulk slo ratelimit tempauth container-quotas account-quotas proxy-logging proxy-server'
comment: 'New param'
- name: 'filter:account-quotas.use'
type: string
default: 'egg:swift#account_quotas'
comment: 'New param'
- name: 'filter:cname_lookup.set log_name'
type: string
default: 'cname_lookup'
help: 'Note: this middleware requires python-dnspython You can override the default log routing for this filter here:'
comment: 'New param'
- name: 'filter:cname_lookup.set log_facility'
type: string
default: 'LOG_LOCAL0'
help: 'Note: this middleware requires python-dnspython You can override the default log routing for this filter here:'
comment: 'New param'
- name: 'filter:cname_lookup.set log_level'
type: string
default: 'INFO'
help: 'Note: this middleware requires python-dnspython You can override the default log routing for this filter here:'
comment: 'New param'
- name: 'filter:cname_lookup.set log_address'
type: string
default: '/dev/log'
help: 'Note: this middleware requires python-dnspython You can override the default log routing for this filter here:'
comment: 'New param'
- name: 'app:proxy-server.log_handoffs'
type: string
default: 'true'
comment: 'New param'
- name: 'app:proxy-server.recheck_account_existence'
type: string
default: '60'
comment: 'New param'
- name: 'app:proxy-server.recheck_container_existence'
type: string
default: '60'
comment: 'New param'
- name: 'app:proxy-server.object_chunk_size'
type: string
default: '8192'
comment: 'New param'
- name: 'app:proxy-server.client_chunk_size'
type: string
default: '8192'
comment: 'New param'
- name: 'app:proxy-server.node_timeout'
type: string
default: '10'
comment: 'New param'
- name: 'app:proxy-server.conn_timeout'
type: string
default: '0.5'
comment: 'New param'
- name: 'app:proxy-server.error_suppression_interval'
type: string
default: '60'
help: "How long without an error before a node's error count is reset. This will also be how long before a node is reenabled after suppression is triggered."
comment: 'New param'
- name: 'app:proxy-server.error_suppression_limit'
type: string
default: '10'
help: 'How many errors can accumulate before a node is temporarily ignored.'
comment: 'New param'
- name: 'app:proxy-server.allow_account_management'
type: string
default: 'false'
help: "If set to 'true' any authorized user may create and delete accounts; if 'false' no one, even authorized, can."
comment: 'New param'
- name: 'app:proxy-server.object_post_as_copy'
type: string
default: 'true'
help: "Set object_post_as_copy = false to turn on fast posts where only the metadata changes are stored anew and the original data file is kept in place. This makes for quicker posts; but since the container metadata isn't updated in this mode, features like container sync won't be able to sync posts."
comment: 'New param'
- name: 'app:proxy-server.account_autocreate'
type: string
default: 'false'
help: "If set to 'true' authorized accounts that do not yet exist within the Swift cluster will be automatically created."
comment: 'New param'
- name: 'app:proxy-server.max_containers_per_account'
type: string
default: '0'
help: 'If set to a positive value, trying to create a container when the account already has at least this maximum containers will result in a 403 Forbidden. Note: This is a soft limit, meaning a user might exceed the cap for recheck_account_existence before the 403s kick in.'
comment: 'New param'
- name: 'app:proxy-server.max_containers_whitelist'
type: string
default: ''
help: 'This is a comma separated list of account hashes that ignore the max_containers_per_account cap.'
comment: 'New param'
- name: 'app:proxy-server.deny_host_headers'
type: string
default: ''
help: 'Comma separated list of Host headers to which the proxy will deny requests.'
comment: 'New param'
- name: 'app:proxy-server.auto_create_account_prefix'
type: string
default: '.'
help: 'Prefix used when automatically creating accounts.'
comment: 'New param'
- name: 'app:proxy-server.put_queue_depth'
type: string
default: '10'
help: 'Depth of the proxy put queue.'
comment: 'New param'
- name: 'app:proxy-server.rate_limit_after_segment'
type: string
default: '10'
help: 'Start rate-limiting object segment serving after the Nth segment of a segmented object.'
comment: 'New param'
- name: 'app:proxy-server.rate_limit_segments_per_sec'
type: string
default: '1'
help: 'Once segment rate-limiting kicks in for an object, limit segments served to N per second.'
comment: 'New param'
- name: 'app:proxy-server.sorting_method'
type: string
default: 'shuffle'
help: "Storage nodes can be chosen at random (shuffle), by using timing measurements (timing), or by using an explicit match (affinity). Using timing measurements may allow for lower overall latency, while using affinity allows for finer control. In both the timing and affinity cases, equally-sorting nodes are still randomly chosen to spread load. The valid values for sorting_method are 'affinity', 'shuffle', and 'timing'."
comment: 'New param'
- name: 'app:proxy-server.timing_expiry'
type: string
default: '300'
help: "If the 'timing' sorting_method is used, the timings will only be valid for the number of seconds configured by timing_expiry."
comment: 'New param'
- name: 'app:proxy-server.allow_static_large_object'
type: string
default: 'true'
help: "If set to false will treat objects with X-Static-Large-Object header set as a regular object on GETs, i.e. will return that object's contents. Should be set to false if slo is not used in pipeline."
comment: 'New param'
- name: 'app:proxy-server.max_large_object_get_time'
type: string
default: '86400'
help: 'The maximum time (seconds) that a large object connection is allowed to last.'
comment: 'New param'
- name: 'app:proxy-server.request_node_count'
type: string
default: '2 * replicas'
help: "Set to the number of nodes to contact for a normal request. You can use '* replicas' at the end to have it use the number given times the number of replicas for the ring being used for the request."
comment: 'New param'
- name: 'app:proxy-server.read_affinity'
type: string
default: ''
help: 'Example: first read from region 1 zone 1, then region 1 zone 2, then anything in region 2, then everything else: read_affinity = r1z1=100, r1z2=200, r2=300 Default is empty, meaning no preference.'
comment: 'New param'
- name: 'app:proxy-server.write_affinity'
type: string
default: ''
help: 'Example: try to write to regions 1 and 2 before writing to any other nodes: write_affinity = r1, r2 Default is empty, meaning no preference.'
comment: 'New param'
- name: 'app:proxy-server.write_affinity_node_count'
type: string
default: '2 * replicas'
help: "The number of local (as governed by the write_affinity setting) nodes to attempt to contact first, before any non-local ones. You can use '* replicas' at the end to have it use the number given times the number of replicas for the ring being used for the request."
comment: 'New param'
- name: 'app:proxy-server.swift_owner_headers'
type: string
default: 'x-container-read, x-container-write, x-container-sync-key, x-container-sync-to, x-account-meta-temp-url-key, x-account-meta-temp-url-key-2'
help: 'These are the headers whose values will only be shown to swift_owners. The exact definition of a swift_owner is up to the auth system in use, but usually indicates administrative responsibilities.'
comment: 'New param'
- name: 'filter:cname_lookup.set log_headers'
type: string
default: 'false'
help: 'Note: this middleware requires python-dnspython You can override the default log routing for this filter here:'
comment: 'New param'
- name: 'filter:tempauth.reseller_prefix'
type: string
default: 'AUTH'
help: 'The reseller prefix will verify a token begins with this prefix before even attempting to validate it. Also, with authorization, only Swift storage accounts with this prefix will be authorized by this middleware. Useful if multiple auth systems are in use for one Swift cluster.'
comment: 'New param'
- name: 'filter:tempauth.auth_prefix'
type: string
default: '/auth/'
help: 'The auth prefix will cause requests beginning with this prefix to be routed to the auth subsystem, for granting tokens, etc.'
comment: 'New param'
- name: 'filter:tempauth.token_life'
type: string
default: '86400'
help: 'The auth prefix will cause requests beginning with this prefix to be routed to the auth subsystem, for granting tokens, etc.'
comment: 'New param'
- name: 'filter:tempauth.allow_overrides'
type: string
default: 'true'
help: "This allows middleware higher in the WSGI pipeline to override auth processing, useful for middleware such as tempurl and formpost. If you know you're not going to use such middleware and you want a bit of extra security, you can set this to false."
comment: 'New param'
- name: 'filter:tempauth.storage_url_scheme'
type: string
default: 'default'
help: 'This specifies what scheme to return with storage urls: http, https, or default (chooses based on what the server is running as) This can be useful with an SSL load balancer in front of a non-SSL server.'
comment: 'New param'
- name: 'filter:tempauth.user_admin_admin'
type: string
default: 'admin .admin .reseller_admin'
help: 'Lastly, you need to list all the accounts/users you want here. The format is: user_<account>_<user> = <key> [group] [group] [...] [storage_url] or if you want underscores in <account> or <user>, you can base64 encode them (with no equal signs) and use this format: user64_<account_b64>_<user_b64> = <key> [group] [group] [...] [storage_url] There are special groups of: .reseller_admin = can do anything to any account for this auth .admin = can do anything within the account If neither of these groups are specified, the user can only access containers that have been explicitly allowed for them by a .admin or .reseller_admin. The trailing optional storage_url allows you to specify an alternate url to hand back to the user upon authentication. If not specified, this defaults to $HOST/v1/<reseller_prefix>_<account> where $HOST will do its best to resolve to what the requester would need to use to reach this host. Here are example entries, required for running the tests:'
comment: 'New param'
- name: 'filter:tempauth.user_test_tester'
type: string
default: 'testing .admin'
help: 'Lastly, you need to list all the accounts/users you want here. The format is: user_<account>_<user> = <key> [group] [group] [...] [storage_url] or if you want underscores in <account> or <user>, you can base64 encode them (with no equal signs) and use this format: user64_<account_b64>_<user_b64> = <key> [group] [group] [...] [storage_url] There are special groups of: .reseller_admin = can do anything to any account for this auth .admin = can do anything within the account If neither of these groups are specified, the user can only access containers that have been explicitly allowed for them by a .admin or .reseller_admin. The trailing optional storage_url allows you to specify an alternate url to hand back to the user upon authentication. If not specified, this defaults to $HOST/v1/<reseller_prefix>_<account> where $HOST will do its best to resolve to what the requester would need to use to reach this host. Here are example entries, required for running the tests:'
comment: 'New param'
- name: 'filter:tempauth.user_test2_tester2'
type: string
default: 'testing2 .admin'
help: 'Lastly, you need to list all the accounts/users you want here. The format is: user_<account>_<user> = <key> [group] [group] [...] [storage_url] or if you want underscores in <account> or <user>, you can base64 encode them (with no equal signs) and use this format: user64_<account_b64>_<user_b64> = <key> [group] [group] [...] [storage_url] There are special groups of: .reseller_admin = can do anything to any account for this auth .admin = can do anything within the account If neither of these groups are specified, the user can only access containers that have been explicitly allowed for them by a .admin or .reseller_admin. The trailing optional storage_url allows you to specify an alternate url to hand back to the user upon authentication. If not specified, this defaults to $HOST/v1/<reseller_prefix>_<account> where $HOST will do its best to resolve to what the requester would need to use to reach this host. Here are example entries, required for running the tests:'
comment: 'New param'
- name: 'filter:tempauth.user_test_tester3'
type: string
default: 'testing3'
help: 'Lastly, you need to list all the accounts/users you want here. The format is: user_<account>_<user> = <key> [group] [group] [...] [storage_url] or if you want underscores in <account> or <user>, you can base64 encode them (with no equal signs) and use this format: user64_<account_b64>_<user_b64> = <key> [group] [group] [...] [storage_url] There are special groups of: .reseller_admin = can do anything to any account for this auth .admin = can do anything within the account If neither of these groups are specified, the user can only access containers that have been explicitly allowed for them by a .admin or .reseller_admin. The trailing optional storage_url allows you to specify an alternate url to hand back to the user upon authentication. If not specified, this defaults to $HOST/v1/<reseller_prefix>_<account> where $HOST will do its best to resolve to what the requester would need to use to reach this host. Here are example entries, required for running the tests:'
comment: 'New param'
- name: 'filter:tempauth.paste.filter_factory'
type: string
default: 'keystoneclient.middleware.auth_token:filter_factory'
help: '[filter:authtoken]'
comment: 'New param'
- name: 'filter:tempauth.auth_host'
type: string
default: 'keystonehost'
help: '[filter:authtoken]'
comment: 'New param'
- name: 'filter:tempauth.auth_port'
type: string
default: '35357'
help: '[filter:authtoken]'
comment: 'New param'
- name: 'filter:tempauth.auth_protocol'
type: string
default: 'http'
help: '[filter:authtoken]'
comment: 'New param'
- name: 'filter:tempauth.auth_uri'
type: string
default: 'http://keystonehost:5000/'
help: '[filter:authtoken]'
comment: 'New param'
- name: 'filter:tempauth.admin_tenant_name'
type: string
default: 'service'
help: '[filter:authtoken]'
comment: 'New param'
- name: 'filter:tempauth.admin_user'
type: string
default: 'swift'
help: '[filter:authtoken]'
comment: 'New param'
- name: 'filter:tempauth.admin_password'
type: string
default: 'password'
help: '[filter:authtoken]'
comment: 'New param'
- name: 'filter:tempauth.delay_auth_decision'
type: string
default: '1'
help: '[filter:authtoken]'
comment: 'New param'
- name: 'filter:tempauth.cache'
type: string
default: 'swift.cache'
help: '[filter:authtoken]'
comment: 'New param'
- name: 'filter:tempauth.operator_roles'
type: string
default: 'admin, swiftoperator'
help: '[filter:keystoneauth] Operator roles is the role which user would be allowed to manage a tenant and be able to create container or give ACL to others.'
comment: 'New param'
- name: 'filter:tempauth.reseller_admin_role'
type: string
default: 'ResellerAdmin'
help: '[filter:keystoneauth] Operator roles is the role which user would be allowed to manage a tenant and be able to create container or give ACL to others. The reseller admin role has the ability to create and delete accounts'
comment: 'New param'
- name: 'filter:healthcheck.disable_path'
type: string
default: ''
help: "An optional filesystem path, which if present, will cause the healthcheck URL to return '503 Service Unavailable' with a body of 'DISABLED BY FILE'. This facility may be used to temporarily remove a Swift node from a load balancer pool during maintenance or upgrade (remove the file to allow the node back into the load balancer pool)."
comment: 'New param'
- name: 'filter:cache.memcache_servers'
type: string
default: '127.0.0.1:11211'
help: 'If not set here, the value for memcache_servers will be read from memcache.conf (see memcache.conf-sample) or lacking that file, it will default to the value below. You can specify multiple servers separated with commas, as in: 10.1.2.3:11211,10.1.2.4:11211'
comment: 'New param'
- name: 'filter:cache.memcache_serialization_support'
type: string
default: '2'
help: 'Sets how memcache values are serialized and deserialized: 0 = older, insecure pickle serialization 1 = json serialization but pickles can still be read (still insecure) 2 = json serialization only (secure and the default) If not set here, the value for memcache_serialization_support will be read from /etc/swift/memcache.conf (see memcache.conf-sample). To avoid an instant full cache flush, existing installations should upgrade with 0, then set to 1 and reload, then after some time (24 hours) set to 2 and reload. In the future, the ability to use pickle serialization will be removed.'
comment: 'New param'
- name: 'filter:ratelimit.clock_accuracy'
type: string
default: '1000'
help: "clock_accuracy should represent how accurate the proxy servers' system clocks are with each other. 1000 means that all the proxies' clock are accurate to each other within 1 millisecond. No ratelimit should be higher than the clock accuracy."
comment: 'New param'
- name: 'filter:ratelimit.max_sleep_time_seconds'
type: string
default: '60'
comment: 'New param'
- name: 'filter:ratelimit.log_sleep_time_seconds'
type: string
default: '0'
help: 'log_sleep_time_seconds of 0 means disabled'
comment: 'New param'
- name: 'filter:ratelimit.rate_buffer_seconds'
type: string
default: '5'
help: "allows for slow rates (e.g. running up to 5 sec's behind) to catch up."
comment: 'New param'
- name: 'filter:ratelimit.account_ratelimit'
type: string
default: '0'
help: 'account_ratelimit of 0 means disabled'
comment: 'New param'
- name: 'filter:ratelimit.account_whitelist'
type: string
default: 'a,b'
help: 'these are comma separated lists of account names'
comment: 'New param'
- name: 'filter:ratelimit.account_blacklist'
type: string
default: 'c,d'
help: 'these are comma separated lists of account names'
comment: 'New param'
- name: 'filter:ratelimit.with container_limit_x'
type: string
default: 'r'
comment: 'New param'
- name: 'filter:ratelimit.container_ratelimit_0'
type: string
default: '100'
help: 'for containers of size x limit write requests per second to r. The container rate will be linearly interpolated from the values given. With the values below, a container of size 5 will get a rate of 75.'
comment: 'New param'
- name: 'filter:ratelimit.container_ratelimit_10'
type: string
default: '50'
help: 'for containers of size x limit write requests per second to r. The container rate will be linearly interpolated from the values given. With the values below, a container of size 5 will get a rate of 75.'
comment: 'New param'
- name: 'filter:ratelimit.container_ratelimit_50'
type: string
default: '20'
help: 'for containers of size x limit write requests per second to r. The container rate will be linearly interpolated from the values given. With the values below, a container of size 5 will get a rate of 75.'
comment: 'New param'
- name: 'filter:ratelimit.container_listing_ratelimit_0'
type: string
default: '100'
help: 'Similarly to the above container-level write limits, the following will limit container GET (listing) requests.'
comment: 'New param'
- name: 'filter:ratelimit.container_listing_ratelimit_10'
type: string
default: '50'
help: 'Similarly to the above container-level write limits, the following will limit container GET (listing) requests.'
comment: 'New param'
- name: 'filter:ratelimit.container_listing_ratelimit_50'
type: string
default: '20'
help: 'Similarly to the above container-level write limits, the following will limit container GET (listing) requests.'
comment: 'New param'
- name: 'filter:cname_lookup.storage_domain'
type: string
default: 'example.com'
comment: 'New param'
- name: 'filter:domain_remap.path_root'
type: string
default: 'v1'
comment: 'New param'
- name: 'filter:domain_remap.reseller_prefixes'
type: string
default: 'AUTH'
comment: 'New param'
- name: 'filter:cname_lookup.lookup_depth'
type: string
default: '1'
comment: 'New param'
- name: 'filter:tempurl.methods'
type: string
default: 'GET HEAD PUT'
help: 'The methods allowed with Temp URLs.'
comment: 'New param'
- name: 'filter:tempurl.incoming_remove_headers'
type: string
default: 'x-timestamp'
help: "The headers to remove from incoming requests. Simply a whitespace delimited list of header names and names can optionally end with '*' to indicate a prefix match. incoming_allow_headers is a list of exceptions to these removals."
comment: 'New param'
- name: 'filter:tempurl.incoming_allow_headers'
type: string
default: ''
help: "The headers allowed as exceptions to incoming_remove_headers. Simply a whitespace delimited list of header names and names can optionally end with '*' to indicate a prefix match."
comment: 'New param'
- name: 'filter:tempurl.outgoing_remove_headers'
type: string
default: 'x-object-meta-*'
help: "The headers to remove from outgoing responses. Simply a whitespace delimited list of header names and names can optionally end with '*' to indicate a prefix match. outgoing_allow_headers is a list of exceptions to these removals."
comment: 'New param'
- name: 'filter:name_check.forbidden_chars'
type: string
default: "\\'\"`<>"
comment: 'New param'
- name: 'filter:name_check.maximum_length'
type: string
default: '255'
comment: 'New param'
- name: 'filter:name_check.forbidden_regexp'
type: string
default: '/\\./|/\\.\\./|/\\.$|/\\.\\.$'
comment: 'New param'
- name: 'filter:list-endpoints.list_endpoints_path'
type: string
default: '/endpoints/'
comment: 'New param'
- name: 'filter:proxy-logging.access_log_name'
type: string
default: 'swift'
help: "If not set, logging directives from [DEFAULT] without 'access_' will be used"
comment: 'New param'
- name: 'filter:proxy-logging.access_log_facility'
type: string
default: 'LOG_LOCAL0'
help: "If not set, logging directives from [DEFAULT] without 'access_' will be used"
comment: 'New param'
- name: 'filter:proxy-logging.access_log_level'
type: string
default: 'INFO'
help: "If not set, logging directives from [DEFAULT] without 'access_' will be used"
comment: 'New param'
- name: 'filter:proxy-logging.access_log_address'
type: string
default: '/dev/log'
help: "If not set, logging directives from [DEFAULT] without 'access_' will be used"
comment: 'New param'
- name: 'filter:proxy-logging.access_log_udp_host'
type: string
default: ''
help: 'If set, access_log_udp_host will override access_log_address'
comment: 'New param'
- name: 'filter:proxy-logging.access_log_udp_port'
type: string
default: '514'
help: 'If set, access_log_udp_host will override access_log_address'
comment: 'New param'
- name: 'filter:proxy-logging.access_log_statsd_host'
type: host
default: 'localhost'
help: 'You can use log_statsd_* from [DEFAULT] or override them here:'
comment: 'New param'
- name: 'filter:proxy-logging.access_log_statsd_port'
type: string
default: '8125'
help: 'You can use log_statsd_* from [DEFAULT] or override them here:'
comment: 'New param'
- name: 'filter:proxy-logging.access_log_statsd_default_sample_rate'
type: string
default: '1.0'
help: 'You can use log_statsd_* from [DEFAULT] or override them here:'
comment: 'New param'
- name: 'filter:proxy-logging.access_log_statsd_sample_rate_factor'
type: string
default: '1.0'
help: 'You can use log_statsd_* from [DEFAULT] or override them here:'
comment: 'New param'
- name: 'filter:proxy-logging.access_log_statsd_metric_prefix'
type: string
default: ''
help: 'You can use log_statsd_* from [DEFAULT] or override them here:'
comment: 'New param'
- name: 'filter:proxy-logging.access_log_headers'
type: string
default: 'false'
help: 'You can use log_statsd_* from [DEFAULT] or override them here:'
comment: 'New param'
- name: 'filter:bulk.max_containers_per_extraction'
type: string
default: '10000'
comment: 'New param'
- name: 'filter:bulk.max_failed_extractions'
type: string
default: '1000'
comment: 'New param'
- name: 'filter:bulk.max_deletes_per_request'
type: string
default: '10000'
comment: 'New param'
- name: 'filter:bulk.yield_frequency'
type: string
default: '60'
comment: 'New param'
- name: 'filter:slo.max_manifest_segments'
type: string
default: '1000'
comment: 'New param'
- name: 'filter:slo.max_manifest_size'
type: string
default: '2097152'
comment: 'New param'
- name: 'filter:slo.min_segment_size'
type: string
default: '1048576'
comment: 'New param'
# ====================================================

View File

@ -1,36 +0,0 @@
# Copyright (c) 2014 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and#
# limitations under the License.
def yaml_string(s, allowSimple=False):
if "'" in s:
return '"%s"' % s.replace('\\', '\\\\').replace('"', '\\"')
else:
if not allowSimple or any([c in s for c in " :,"]):
return "'%s'" % s
else:
return s
def yaml_value(x):
if x is None:
return '~'
elif x is True:
return 'true'
elif x is False:
return 'false'
elif isinstance(x, str):
return yaml_string(x)
else:
return repr(x)

View File

@ -1,29 +0,0 @@
{% extends "bootstrap/base.html" %}
{% import "bootstrap/wtf.html" as wtf %}
{% block title %}OpenStack Validator Result{% endblock %}
{% block content %}
<div class="container">
<h1>OpenStack Validation Error</h1>
<p>
{{ message }}
</p>
<form action="/validation" method="POST">
<div style="display: none">
{{ form.hidden_tag() }}
{{ wtf.form_field(form.nodes) }}
{{ wtf.form_field(form.username) }}
{{ wtf.form_field(form.private_key) }}
</div>
<button type="submit" class="btn btn-info">Re-run inspection</button>
<a class="btn btn-default" href="/validation">New inspection</a>
</form>
</div>
{% endblock %}

View File

@ -1,13 +0,0 @@
{% extends "bootstrap/base.html" %}
{% import "bootstrap/wtf.html" as wtf %}
{% block title %}OpenStack Validator{% endblock %}
{% block content %}
<div class="container">
<h1>OpenStack Validator</h1>
{{ wtf.quick_form(form, action='/validation', method='POST', button_map={'launch': 'primary'}) }}
</div>
{% endblock %}

View File

@ -1,65 +0,0 @@
{% extends "bootstrap/base.html" %}
{% import "bootstrap/wtf.html" as wtf %}
{% block title %}OpenStack Validator Result{% endblock %}
{% block content %}
<div class="container">
<h1>OpenStack Validation Result</h1>
<h2>Hosts</h2>
<ul>
{% for host in openstack.hosts %}
<li>
<span class="node-name">{{ host.name }}</span>
<ul>
{% for component in host.components %}
<li>{{ component.name }} version {{ component.version }}</li>
{% endfor %}
</ul>
</li>
{% endfor %}
</ul>
<h2>Issues</h2>
<ul>
{% for group, issues in grouped_issues %}
<li>
<span>
{% if group %}
{{ group }}
{% else %}
General issues
{% endif %}
</span>
<ul>
{% for issue in issues %}
<li>
<span class="label {{ issue.type | to_label }}">{{ issue.type | capitalize }}</span>
{{ issue.message }}
{% if issue.mark %}
(line {{ issue.mark.line+1 }} column {{ issue.mark.column+1 }})
{% endif %}
</li>
{% endfor %}
</ul>
</li>
{% endfor %}
</ul>
<form action="/validation" method="POST">
<div style="display: none">
{{ form.hidden_tag() }}
{{ wtf.form_field(form.nodes) }}
{{ wtf.form_field(form.username) }}
{{ wtf.form_field(form.private_key) }}
</div>
<button type="submit" class="btn btn-info">Re-run inspection</button>
<a class="btn btn-default" href="/validation">New inspection</a>
</form>
</div>
{% endblock %}

Some files were not shown because too many files have changed in this diff Show More