Initial version of Savanna v0.2
* pluggable provisioning mechanism * a lot of old code has been removed * new version of REST API (1.0) * image registry draft implemented as novaclient extension * oslo updated * using oslo.db instead of flask-sqlalchemy * some hacking fixes * using alembic for db migrations Partially implements blueprint pluggable-cluster-provisioning. Partially implements blueprint savanna-rest-api-1-0. Implements blueprint hadoop-image-registry. Implements blueprint fulfill-openstack-requirements. Implements blueprint db-migrate-support. Change-Id: I5df80d67e25c2f4f8367f78f67fb9e9e76fc3647
This commit is contained in:
parent
e2eba36648
commit
b6829d9d8b
@ -47,7 +47,7 @@ def main():
|
||||
if os.path.exists(dev_conf):
|
||||
config_files = [dev_conf]
|
||||
|
||||
config.parse_args(sys.argv[1:], config_files)
|
||||
config.parse_configs(sys.argv[1:], config_files)
|
||||
logging.setup("savanna")
|
||||
|
||||
app = server.make_app()
|
||||
|
27
bin/savanna-db-manage
Executable file
27
bin/savanna-db-manage
Executable file
@ -0,0 +1,27 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# Copyright (c) 2013 Mirantis Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
|
||||
sys.path.insert(0, os.getcwd())
|
||||
|
||||
from savanna.db.migration.cli import main
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -1,44 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# Copyright (c) 2013 Mirantis Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
# If ../savanna/__init__.py exists, add ../ to Python search path, so that
|
||||
# it will override what happens to be installed in /usr/(local/)lib/python...
|
||||
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
|
||||
os.pardir,
|
||||
os.pardir))
|
||||
if os.path.exists(os.path.join(possible_topdir,
|
||||
'savanna',
|
||||
'__init__.py')):
|
||||
sys.path.insert(0, possible_topdir)
|
||||
|
||||
|
||||
from savanna import cli
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
dev_conf = os.path.join(possible_topdir,
|
||||
'etc',
|
||||
'savanna',
|
||||
'savanna.conf')
|
||||
config_files = None
|
||||
if os.path.exists(dev_conf):
|
||||
config_files = [dev_conf]
|
||||
|
||||
cli.main(argv=sys.argv, config_files=config_files)
|
@ -15,6 +15,8 @@
|
||||
# logging will go to stdout. (string value)
|
||||
#log_file=<None>
|
||||
|
||||
plugins=vanilla
|
||||
|
||||
[cluster_node]
|
||||
|
||||
# An existing user on Hadoop image (string value)
|
||||
@ -35,3 +37,6 @@
|
||||
|
||||
# URL for sqlalchemy database (string value)
|
||||
#database_uri=sqlite:////tmp/savanna-server.db
|
||||
|
||||
[plugin:vanilla]
|
||||
plugin_class=savanna.plugins.vanilla.plugin:VanillaProvider
|
||||
|
@ -58,13 +58,9 @@
|
||||
# Log output to standard error (boolean value)
|
||||
#use_stderr=true
|
||||
|
||||
# Default file mode used when creating log files (string
|
||||
# value)
|
||||
#logfile_mode=0644
|
||||
|
||||
# format string to use for log messages with context (string
|
||||
# value)
|
||||
#logging_context_format_string=%(asctime)s.%(msecs)03d %(levelname)s %(name)s [%(request_id)s %(user)s %(tenant)s] %(instance)s%(message)s
|
||||
#logging_context_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user)s %(tenant)s] %(instance)s%(message)s
|
||||
|
||||
# format string to use for log messages without context
|
||||
# (string value)
|
||||
@ -111,12 +107,12 @@
|
||||
# %(default)s (string value)
|
||||
#log_date_format=%Y-%m-%d %H:%M:%S
|
||||
|
||||
# (Optional) Name of log file to output to. If not set,
|
||||
# logging will go to stdout. (string value)
|
||||
# (Optional) Name of log file to output to. If no default is
|
||||
# set, logging will go to stdout. (string value)
|
||||
#log_file=<None>
|
||||
|
||||
# (Optional) The directory to keep log files in (will be
|
||||
# prepended to --log-file) (string value)
|
||||
# (Optional) The base directory used for relative --log-file
|
||||
# paths (string value)
|
||||
#log_dir=<None>
|
||||
|
||||
# Use syslog for logging. (boolean value)
|
||||
@ -143,6 +139,14 @@
|
||||
#default_publisher_id=$host
|
||||
|
||||
|
||||
#
|
||||
# Options defined in savanna.plugins.base
|
||||
#
|
||||
|
||||
# List of plugins to be loaded (list value)
|
||||
#plugins=
|
||||
|
||||
|
||||
[cluster_node]
|
||||
|
||||
#
|
||||
|
@ -1,5 +1,5 @@
|
||||
[DEFAULT]
|
||||
modules=setup, jsonutils, xmlutils, timeutils, exception, gettextutils, log, local, notifier/api, notifier/log_notifier, notifier/no_op_notifier, notifier/test_notifier, notifier/__init__, importutils, context, uuidutils, version
|
||||
modules=setup, jsonutils, xmlutils, timeutils, exception, gettextutils, log, local, notifier/api, notifier/log_notifier, notifier/no_op_notifier, notifier/test_notifier, notifier/__init__, importutils, context, uuidutils, version, threadgroup, db, db.sqlalchemy
|
||||
base=savanna
|
||||
|
||||
# The following code from 'wsgi' is needed:
|
||||
|
@ -1,109 +0,0 @@
|
||||
# Copyright (c) 2013 Mirantis Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from flask import request
|
||||
|
||||
from savanna.openstack.common import log as logging
|
||||
from savanna.service import api
|
||||
import savanna.service.validation as v
|
||||
import savanna.utils.api as api_u
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
rest = api_u.Rest('v02', __name__)
|
||||
|
||||
|
||||
@rest.get('/node-templates')
|
||||
def templates_list():
|
||||
try:
|
||||
return api_u.render(
|
||||
node_templates=[nt.dict for nt in api.get_node_templates()])
|
||||
except Exception, e:
|
||||
return api_u.internal_error(500,
|
||||
"Exception while listing NodeTemplates", e)
|
||||
|
||||
|
||||
@rest.post('/node-templates')
|
||||
@v.validate(v.validate_node_template_create)
|
||||
def templates_create():
|
||||
data = api_u.request_data()
|
||||
headers = request.headers
|
||||
|
||||
return api_u.render(api.create_node_template(data, headers).wrapped_dict)
|
||||
|
||||
|
||||
@rest.get('/node-templates/<template_id>')
|
||||
@v.exists_by_id(api.get_node_template, 'template_id')
|
||||
def templates_get(template_id):
|
||||
nt = api.get_node_template(id=template_id)
|
||||
return api_u.render(nt.wrapped_dict)
|
||||
|
||||
|
||||
@rest.put('/node-templates/<template_id>')
|
||||
def templates_update(template_id):
|
||||
return api_u.internal_error(501, NotImplementedError(
|
||||
"Template update op isn't implemented (id '%s')"
|
||||
% template_id))
|
||||
|
||||
|
||||
@rest.delete('/node-templates/<template_id>')
|
||||
@v.exists_by_id(api.get_node_template, 'template_id')
|
||||
@v.validate(v.validate_node_template_terminate)
|
||||
def templates_delete(template_id):
|
||||
api.terminate_node_template(id=template_id)
|
||||
return api_u.render()
|
||||
|
||||
|
||||
@rest.get('/clusters')
|
||||
def clusters_list():
|
||||
tenant_id = request.headers['X-Tenant-Id']
|
||||
try:
|
||||
return api_u.render(
|
||||
clusters=[c.dict for c in api.get_clusters(tenant_id=tenant_id)])
|
||||
except Exception, e:
|
||||
return api_u.internal_error(500, 'Exception while listing Clusters', e)
|
||||
|
||||
|
||||
@rest.post('/clusters')
|
||||
@v.validate(v.validate_cluster_create)
|
||||
def clusters_create():
|
||||
data = api_u.request_data()
|
||||
headers = request.headers
|
||||
|
||||
return api_u.render(api.create_cluster(data, headers).wrapped_dict)
|
||||
|
||||
|
||||
@rest.get('/clusters/<cluster_id>')
|
||||
@v.exists_by_id(api.get_cluster, 'cluster_id', tenant_specific=True)
|
||||
def clusters_get(cluster_id):
|
||||
tenant_id = request.headers['X-Tenant-Id']
|
||||
c = api.get_cluster(id=cluster_id, tenant_id=tenant_id)
|
||||
return api_u.render(c.wrapped_dict)
|
||||
|
||||
|
||||
@rest.put('/clusters/<cluster_id>')
|
||||
def clusters_update(cluster_id):
|
||||
return api_u.internal_error(501, NotImplementedError(
|
||||
"Cluster update op isn't implemented (id '%s')"
|
||||
% cluster_id))
|
||||
|
||||
|
||||
@rest.delete('/clusters/<cluster_id>')
|
||||
@v.exists_by_id(api.get_cluster, 'cluster_id', tenant_specific=True)
|
||||
def clusters_delete(cluster_id):
|
||||
headers = request.headers
|
||||
tenant_id = headers['X-Tenant-Id']
|
||||
api.terminate_cluster(headers, id=cluster_id, tenant_id=tenant_id)
|
||||
|
||||
return api_u.render()
|
168
savanna/api/v10.py
Normal file
168
savanna/api/v10.py
Normal file
@ -0,0 +1,168 @@
|
||||
# Copyright (c) 2013 Mirantis Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from savanna.openstack.common import log as logging
|
||||
from savanna.service import api
|
||||
import savanna.utils.api as u
|
||||
from savanna.utils.openstack.nova import novaclient
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
rest = u.Rest('v10', __name__)
|
||||
|
||||
|
||||
## Cluster ops
|
||||
|
||||
@rest.get('/clusters')
|
||||
def clusters_list(ctx):
|
||||
return u.render(clusters=[c.dict for c in api.get_clusters()])
|
||||
|
||||
|
||||
@rest.post('/clusters')
|
||||
def clusters_create(data):
|
||||
return u.render(api.create_cluster(data).wrapped_dict)
|
||||
|
||||
|
||||
@rest.get('/clusters/<cluster_id>')
|
||||
def clusters_get(cluster_id):
|
||||
return u.render(api.get_cluster(id=cluster_id).wrapped_dict)
|
||||
|
||||
|
||||
@rest.put('/clusters/<cluster_id>')
|
||||
def clusters_update(cluster_id):
|
||||
return u.internal_error(501, NotImplementedError(
|
||||
"Cluster update op isn't implemented (id '%s')"
|
||||
% cluster_id))
|
||||
|
||||
|
||||
@rest.delete('/clusters/<cluster_id>')
|
||||
def clusters_delete(cluster_id):
|
||||
api.terminate_cluster(id=cluster_id)
|
||||
return u.render()
|
||||
|
||||
|
||||
## ClusterTemplate ops
|
||||
|
||||
@rest.get('/cluster-templates')
|
||||
def cluster_templates_list():
|
||||
return u.render(
|
||||
cluster_templates=[t.dict for t in api.get_cluster_templates()])
|
||||
|
||||
|
||||
@rest.post('/cluster-templates')
|
||||
def cluster_templates_create(data):
|
||||
return u.render(api.create_cluster_template(data).wrapped_dict)
|
||||
|
||||
|
||||
@rest.get('/cluster-templates/<cluster_template_id>')
|
||||
def cluster_templates_get(cluster_template_id):
|
||||
return u.render(
|
||||
api.get_cluster_template(id=cluster_template_id).wrapped_dict)
|
||||
|
||||
|
||||
@rest.put('/cluster-templates/<cluster_template_id>')
|
||||
def cluster_templates_update(_cluster_template_id):
|
||||
pass
|
||||
|
||||
|
||||
@rest.delete('/cluster-templates/<cluster_template_id>')
|
||||
def cluster_templates_delete(cluster_template_id):
|
||||
api.terminate_cluster_template(id=cluster_template_id)
|
||||
return u.render()
|
||||
|
||||
|
||||
## NodeGroupTemplate ops
|
||||
|
||||
@rest.get('/node-group-templates')
|
||||
def node_group_templates_list():
|
||||
return u.render(
|
||||
node_group_template=[t.dict for t in api.get_node_group_templates()])
|
||||
|
||||
|
||||
@rest.post('/node-group-templates')
|
||||
def node_group_templates_create(data):
|
||||
return u.render(api.create_node_group_template(data).wrapped_dict)
|
||||
|
||||
|
||||
@rest.get('/node-group-templates/<node_group_template_id>')
|
||||
def node_group_templates_get(node_group_template_id):
|
||||
return u.render(
|
||||
api.get_node_group_template(id=node_group_template_id).wrapped_dict)
|
||||
|
||||
|
||||
@rest.put('/node-group-templates/<node_group_template_id>')
|
||||
def node_group_templates_update(_node_group_template_id):
|
||||
pass
|
||||
|
||||
|
||||
@rest.delete('/node-group-templates/<node_group_template_id>')
|
||||
def node_group_templates_delete(node_group_template_id):
|
||||
api.terminate_node_group_template(id=node_group_template_id)
|
||||
return u.render()
|
||||
|
||||
|
||||
## Plugins ops
|
||||
|
||||
@rest.get('/plugins')
|
||||
def plugins_list():
|
||||
return u.render(plugins=[p.dict for p in api.get_plugins()])
|
||||
|
||||
|
||||
@rest.get('/plugins/<plugin_name>')
|
||||
def plugins_get(plugin_name):
|
||||
return u.render(api.get_plugin(plugin_name).wrapped_dict)
|
||||
|
||||
|
||||
@rest.get('/plugins/<plugin_name>/<version>')
|
||||
def plugins_get_version(plugin_name, version):
|
||||
return u.render(api.get_plugin(plugin_name, version).wrapped_dict)
|
||||
|
||||
|
||||
## Image Registry ops
|
||||
|
||||
@rest.get('/images')
|
||||
def images_list():
|
||||
return u.render(
|
||||
images=[i.dict for i in novaclient().images.list_registered()])
|
||||
|
||||
|
||||
@rest.get('/images/<image_id>')
|
||||
def images_get(image_id):
|
||||
return u.render(novaclient().images.get(image_id).dict)
|
||||
|
||||
|
||||
def _render_image(image_id, nova):
|
||||
return u.render(nova.images.get(image_id).wrapped_dict)
|
||||
|
||||
|
||||
@rest.post('/images/<image_id>')
|
||||
def images_set(image_id, data):
|
||||
nova = novaclient()
|
||||
nova.images.set_description(image_id, **data)
|
||||
return _render_image(image_id, nova)
|
||||
|
||||
|
||||
@rest.post('/images/<image_id>/tag')
|
||||
def image_tags_add(image_id, data):
|
||||
nova = novaclient()
|
||||
nova.images.tag(image_id, **data)
|
||||
return _render_image(image_id, nova)
|
||||
|
||||
|
||||
@rest.post('/images/<image_id>/untag')
|
||||
def image_tags_delete(image_id, data):
|
||||
nova = novaclient()
|
||||
nova.images.untag(image_id, **data)
|
||||
return _render_image(image_id, nova)
|
@ -1,89 +0,0 @@
|
||||
# Copyright (c) 2013 Mirantis Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from flask import Flask
|
||||
|
||||
from oslo.config import cfg
|
||||
from savanna.openstack.common import log
|
||||
from savanna.storage.db import DB
|
||||
from savanna.storage.db import setup_storage
|
||||
from savanna.storage.defaults import setup_defaults
|
||||
|
||||
CONF = cfg.CONF
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
|
||||
class BaseCmd(object):
|
||||
name = None
|
||||
|
||||
@classmethod
|
||||
def add_argument_parser(cls, subparsers):
|
||||
parser = subparsers.add_parser(cls.name, help=cls.__doc__)
|
||||
parser.set_defaults(cmd_class=cls)
|
||||
return parser
|
||||
|
||||
|
||||
class ResetDbCmd(BaseCmd):
|
||||
"""Reset the database."""
|
||||
|
||||
name = 'reset-db'
|
||||
|
||||
@classmethod
|
||||
def add_argument_parser(cls, subparsers):
|
||||
parser = super(ResetDbCmd, cls).add_argument_parser(subparsers)
|
||||
parser.add_argument('--with-gen-templates', action='store_true')
|
||||
return parser
|
||||
|
||||
@staticmethod
|
||||
def main():
|
||||
gen = CONF.command.with_gen_templates
|
||||
|
||||
app = Flask('savanna.manage')
|
||||
setup_storage(app)
|
||||
|
||||
DB.drop_all()
|
||||
DB.create_all()
|
||||
|
||||
setup_defaults(True, gen)
|
||||
|
||||
LOG.info("DB has been removed and created from scratch, "
|
||||
"gen templates: %s", gen)
|
||||
|
||||
|
||||
CLI_COMMANDS = [
|
||||
ResetDbCmd,
|
||||
]
|
||||
|
||||
|
||||
def add_command_parsers(subparsers):
|
||||
for cmd in CLI_COMMANDS:
|
||||
cmd.add_argument_parser(subparsers)
|
||||
|
||||
|
||||
command_opt = cfg.SubCommandOpt('command',
|
||||
title='Commands',
|
||||
help='Available commands',
|
||||
handler=add_command_parsers)
|
||||
|
||||
|
||||
def main(argv=None, config_files=None):
|
||||
CONF.register_cli_opt(command_opt)
|
||||
CONF(args=argv[1:],
|
||||
project='savanna',
|
||||
usage='%(prog)s [' + '|'.join(
|
||||
[cmd.name for cmd in CLI_COMMANDS]) + ']',
|
||||
default_config_files=config_files)
|
||||
log.setup("savanna")
|
||||
CONF.command.cmd_class.main()
|
@ -20,15 +20,21 @@ cli_opts = [
|
||||
help='set host'),
|
||||
cfg.IntOpt('port', default=8080,
|
||||
help='set port'),
|
||||
cfg.BoolOpt('allow-cluster-ops', default=True,
|
||||
help='without that option'
|
||||
' the application operates in dry run mode and does not '
|
||||
' send any requests to the OpenStack cluster')
|
||||
]
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.register_cli_opts(cli_opts)
|
||||
|
||||
ARGV = []
|
||||
|
||||
def parse_args(argv, conf_files):
|
||||
CONF(argv, project='savanna', default_config_files=conf_files)
|
||||
|
||||
def parse_configs(argv=None, conf_files=None):
|
||||
if argv is not None:
|
||||
global ARGV
|
||||
ARGV = argv
|
||||
try:
|
||||
CONF(ARGV, project='savanna', default_config_files=conf_files)
|
||||
except cfg.RequiredOptError as roe:
|
||||
# todo replace RuntimeError with Savanna-specific exception
|
||||
raise RuntimeError("Option '%s' is required for config group "
|
||||
"'%s'" % (roe.opt_name, roe.group.name))
|
||||
|
63
savanna/context.py
Normal file
63
savanna/context.py
Normal file
@ -0,0 +1,63 @@
|
||||
# Copyright (c) 2013 Mirantis Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import threading
|
||||
|
||||
from savanna.db import api as db_api
|
||||
from savanna.openstack.common import log as logging
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# TODO(slukjanov): it'll be better to use common_context.RequestContext as base
|
||||
class Context(object):
|
||||
def __init__(self, user_id, tenant_id, auth_token, headers, **kwargs):
|
||||
if kwargs:
|
||||
LOG.warn('Arguments dropped when creating context: %s', kwargs)
|
||||
|
||||
self.user_id = user_id
|
||||
self.tenant_id = tenant_id
|
||||
self.auth_token = auth_token
|
||||
self.headers = headers
|
||||
self._db_session = None
|
||||
|
||||
@property
|
||||
def session(self):
|
||||
if self._db_session is None:
|
||||
self._db_session = db_api.get_session()
|
||||
return self._db_session
|
||||
|
||||
|
||||
_CTXS = threading.local()
|
||||
|
||||
|
||||
def ctx():
|
||||
if not hasattr(_CTXS, '_curr_ctx'):
|
||||
# todo replace with specific error
|
||||
raise RuntimeError("Context isn't available here")
|
||||
return _CTXS._curr_ctx
|
||||
|
||||
|
||||
def set_ctx(new_ctx):
|
||||
if not new_ctx and hasattr(_CTXS, '_curr_ctx'):
|
||||
del _CTXS._curr_ctx
|
||||
elif new_ctx:
|
||||
_CTXS._curr_ctx = new_ctx
|
||||
|
||||
|
||||
def model_query(model, context=None):
|
||||
context = context or ctx()
|
||||
return context.session.query(model)
|
70
savanna/db/api.py
Normal file
70
savanna/db/api.py
Normal file
@ -0,0 +1,70 @@
|
||||
# Copyright (c) 2013 Mirantis Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import sqlalchemy as sql
|
||||
|
||||
from savanna.db import model_base
|
||||
from savanna.openstack.common.db.sqlalchemy import session
|
||||
from savanna.openstack.common import log as logging
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
_DB_ENGINE = None
|
||||
|
||||
|
||||
def configure_db():
|
||||
"""Configure database.
|
||||
|
||||
Establish the database, create an engine if needed, and register
|
||||
the models.
|
||||
"""
|
||||
global _DB_ENGINE
|
||||
if not _DB_ENGINE:
|
||||
_DB_ENGINE = session.get_engine(sqlite_fk=True)
|
||||
register_models()
|
||||
|
||||
|
||||
def clear_db(base=model_base.SavannaBase):
|
||||
global _DB_ENGINE
|
||||
unregister_models(base)
|
||||
session.cleanup()
|
||||
_DB_ENGINE = None
|
||||
|
||||
|
||||
def get_session(autocommit=True, expire_on_commit=False):
|
||||
"""Helper method to grab session."""
|
||||
return session.get_session(autocommit=autocommit,
|
||||
expire_on_commit=expire_on_commit,
|
||||
sqlite_fk=True)
|
||||
|
||||
|
||||
def register_models(base=model_base.SavannaBase):
|
||||
"""Register Models and create properties."""
|
||||
try:
|
||||
engine = session.get_engine(sqlite_fk=True)
|
||||
base.metadata.create_all(engine)
|
||||
except sql.exc.OperationalError as e:
|
||||
LOG.info("Database registration exception: %s", e)
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def unregister_models(base=model_base.SavannaBase):
|
||||
"""Unregister Models, useful clearing out data before testing."""
|
||||
try:
|
||||
engine = session.get_engine(sqlite_fk=True)
|
||||
base.metadata.drop_all(engine)
|
||||
except Exception:
|
||||
LOG.exception("Database exception")
|
50
savanna/db/migration/README
Normal file
50
savanna/db/migration/README
Normal file
@ -0,0 +1,50 @@
|
||||
# Copyright (c) 2013 Mirantis Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
You can then upgrade to the latest database version via:
|
||||
$ savanna-db-manage --config-file /path/to/savanna.conf upgrade head
|
||||
|
||||
To check the current database version:
|
||||
$ savanna-db-manage --config-file /path/to/savanna.conf current
|
||||
|
||||
To create a script to run the migration offline:
|
||||
$ savanna-db-manage --config-file /path/to/savanna.conf upgrade head --sql
|
||||
|
||||
To run the offline migration between specific migration versions:
|
||||
$ savanna-db-manage --config-file /path/to/savanna.conf upgrade \
|
||||
<start version>:<end version> --sql
|
||||
|
||||
Upgrade the database incrementally:
|
||||
$ savanna-db-manage --config-file /path/to/savanna.conf upgrade --delta \
|
||||
<# of revs>
|
||||
|
||||
Downgrade the database by a certain number of revisions:
|
||||
$ savanna-db-manage --config-file /path/to/savanna.conf downgrade --delta \
|
||||
<# of revs>
|
||||
|
||||
|
||||
Create new revision:
|
||||
$ savanna-db-manage --config-file /path/to/savanna.conf revision \
|
||||
-m "description of revision" --autogenerate
|
||||
|
||||
Create a blank file:
|
||||
$ savanna-db-manage --config-file /path/to/savanna.conf revision \
|
||||
-m "description of revision"
|
||||
|
||||
To verify that the timeline does branch, you can run this command:
|
||||
$ savanna-db-manage --config-file /path/to/savanna.conf check_migration
|
||||
|
||||
If the migration path does branch, you can find the branch point via:
|
||||
$ savanna-db-manage --config-file /path/to/savanna.conf history
|
66
savanna/db/migration/alembic.ini
Normal file
66
savanna/db/migration/alembic.ini
Normal file
@ -0,0 +1,66 @@
|
||||
# Copyright (c) 2013 Mirantis Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# A generic, single database configuration.
|
||||
|
||||
[alembic]
|
||||
# path to migration scripts
|
||||
script_location = %(here)s/alembic
|
||||
|
||||
# template used to generate migration files
|
||||
# file_template = %%(rev)s_%%(slug)s
|
||||
|
||||
# set to 'true' to run the environment during
|
||||
# the 'revision' command, regardless of autogenerate
|
||||
# revision_environment = false
|
||||
|
||||
# default to an empty string because the Savanna migration cli will extract
|
||||
# the correct value and set it programatically before alembic is fully invoked.
|
||||
sqlalchemy.url =
|
||||
|
||||
# Logging configuration
|
||||
[loggers]
|
||||
keys = root,sqlalchemy,alembic
|
||||
|
||||
[handlers]
|
||||
keys = console
|
||||
|
||||
[formatters]
|
||||
keys = generic
|
||||
|
||||
[logger_root]
|
||||
level = WARN
|
||||
handlers = console
|
||||
qualname =
|
||||
|
||||
[logger_sqlalchemy]
|
||||
level = WARN
|
||||
handlers =
|
||||
qualname = sqlalchemy.engine
|
||||
|
||||
[logger_alembic]
|
||||
level = INFO
|
||||
handlers =
|
||||
qualname = alembic
|
||||
|
||||
[handler_console]
|
||||
class = StreamHandler
|
||||
args = (sys.stderr,)
|
||||
level = NOTSET
|
||||
formatter = generic
|
||||
|
||||
[formatter_generic]
|
||||
format = %(levelname)-5.5s [%(name)s] %(message)s
|
||||
datefmt = %H:%M:%S
|
62
savanna/db/migration/alembic_migrations/env.py
Normal file
62
savanna/db/migration/alembic_migrations/env.py
Normal file
@ -0,0 +1,62 @@
|
||||
# Copyright (c) 2013 Mirantis Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from alembic import context
|
||||
from logging.config import fileConfig
|
||||
from savanna.openstack.common import importutils
|
||||
from sqlalchemy import create_engine, pool
|
||||
|
||||
from savanna.db import model_base
|
||||
|
||||
|
||||
importutils.import_module('savanna.db.models')
|
||||
|
||||
config = context.config
|
||||
savanna_config = config.savanna_config
|
||||
|
||||
fileConfig(config.config_file_name)
|
||||
|
||||
# set the target for 'autogenerate' support
|
||||
target_metadata = model_base.SavannaBase.metadata
|
||||
|
||||
|
||||
def run_migrations_offline():
|
||||
context.configure(url=savanna_config.database.connection)
|
||||
|
||||
with context.begin_transaction():
|
||||
context.run_migrations()
|
||||
|
||||
|
||||
def run_migrations_online():
|
||||
engine = create_engine(savanna_config.database.connection,
|
||||
poolclass=pool.NullPool)
|
||||
|
||||
connection = engine.connect()
|
||||
context.configure(
|
||||
connection=connection,
|
||||
target_metadata=target_metadata
|
||||
)
|
||||
|
||||
try:
|
||||
with context.begin_transaction():
|
||||
context.run_migrations()
|
||||
finally:
|
||||
connection.close()
|
||||
|
||||
|
||||
if context.is_offline_mode():
|
||||
run_migrations_offline()
|
||||
else:
|
||||
run_migrations_online()
|
41
savanna/db/migration/alembic_migrations/script.py.mako
Normal file
41
savanna/db/migration/alembic_migrations/script.py.mako
Normal file
@ -0,0 +1,41 @@
|
||||
# Copyright (c) ${create_date.year} Mirantis Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""${message}
|
||||
|
||||
Revision ID: ${up_revision}
|
||||
Revises: ${down_revision}
|
||||
Create Date: ${create_date}
|
||||
|
||||
"""
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = ${repr(up_revision)}
|
||||
down_revision = ${repr(down_revision)}
|
||||
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
${imports if imports else ""}
|
||||
|
||||
from savanna.utils.sqlatypes import JSONEncoded
|
||||
sa.JSONEncoded = JSONEncoded
|
||||
|
||||
|
||||
def upgrade():
|
||||
${upgrades if upgrades else "pass"}
|
||||
|
||||
|
||||
def downgrade():
|
||||
${downgrades if downgrades else "pass"}
|
14
savanna/db/migration/alembic_migrations/versions/README
Normal file
14
savanna/db/migration/alembic_migrations/versions/README
Normal file
@ -0,0 +1,14 @@
|
||||
# Copyright (c) 2013 Mirantis Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
167
savanna/db/migration/alembic_migrations/versions/v02_initial.py
Normal file
167
savanna/db/migration/alembic_migrations/versions/v02_initial.py
Normal file
@ -0,0 +1,167 @@
|
||||
# Copyright (c) 2013 Mirantis Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""v02_initial
|
||||
|
||||
Revision ID: 2e1cdcf1dff1
|
||||
Revises: None
|
||||
Create Date: 2013-05-31 11:57:18.181738
|
||||
|
||||
"""
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = '2e1cdcf1dff1'
|
||||
down_revision = None
|
||||
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
|
||||
from savanna.utils.sqlatypes import JSONEncoded
|
||||
|
||||
sa.JSONEncoded = JSONEncoded
|
||||
|
||||
|
||||
def upgrade():
|
||||
op.create_table('NodeGroupTemplate',
|
||||
sa.Column('created', sa.DateTime(), nullable=False),
|
||||
sa.Column('updated', sa.DateTime(), nullable=False),
|
||||
sa.Column('id', sa.String(length=36), nullable=False),
|
||||
sa.Column('tenant_id', sa.String(length=36),
|
||||
nullable=True),
|
||||
sa.Column('plugin_name', sa.String(length=80),
|
||||
nullable=False),
|
||||
sa.Column('hadoop_version', sa.String(length=80),
|
||||
nullable=False),
|
||||
sa.Column('name', sa.String(length=80), nullable=False),
|
||||
sa.Column('description', sa.String(length=200),
|
||||
nullable=True),
|
||||
sa.Column('flavor_id', sa.String(length=36),
|
||||
nullable=False),
|
||||
sa.Column('node_processes', sa.JSONEncoded(),
|
||||
nullable=True),
|
||||
sa.Column('node_configs', sa.JSONEncoded(), nullable=True),
|
||||
sa.PrimaryKeyConstraint('id'),
|
||||
sa.UniqueConstraint('name', 'tenant_id'))
|
||||
|
||||
op.create_table('ClusterTemplate',
|
||||
sa.Column('created', sa.DateTime(), nullable=False),
|
||||
sa.Column('updated', sa.DateTime(), nullable=False),
|
||||
sa.Column('id', sa.String(length=36), nullable=False),
|
||||
sa.Column('tenant_id', sa.String(length=36),
|
||||
nullable=True),
|
||||
sa.Column('plugin_name', sa.String(length=80),
|
||||
nullable=False),
|
||||
sa.Column('hadoop_version', sa.String(length=80),
|
||||
nullable=False),
|
||||
sa.Column('name', sa.String(length=80), nullable=False),
|
||||
sa.Column('description', sa.String(length=200),
|
||||
nullable=True),
|
||||
sa.Column('cluster_configs', sa.JSONEncoded(),
|
||||
nullable=True),
|
||||
sa.PrimaryKeyConstraint('id'),
|
||||
sa.UniqueConstraint('name', 'tenant_id'))
|
||||
|
||||
op.create_table('Cluster',
|
||||
sa.Column('created', sa.DateTime(), nullable=False),
|
||||
sa.Column('updated', sa.DateTime(), nullable=False),
|
||||
sa.Column('id', sa.String(length=36), nullable=False),
|
||||
sa.Column('tenant_id', sa.String(length=36),
|
||||
nullable=True),
|
||||
sa.Column('plugin_name', sa.String(length=80),
|
||||
nullable=False),
|
||||
sa.Column('hadoop_version', sa.String(length=80),
|
||||
nullable=False),
|
||||
sa.Column('extra', sa.JSONEncoded(), nullable=True),
|
||||
sa.Column('name', sa.String(length=80), nullable=False),
|
||||
sa.Column('default_image_id', sa.String(length=36),
|
||||
nullable=True),
|
||||
sa.Column('cluster_configs', sa.JSONEncoded(),
|
||||
nullable=True),
|
||||
sa.Column('status', sa.String(length=80), nullable=True),
|
||||
sa.Column('status_description', sa.String(length=200),
|
||||
nullable=True),
|
||||
sa.Column('base_cluster_template_id', sa.String(length=36),
|
||||
nullable=True),
|
||||
sa.ForeignKeyConstraint(['base_cluster_template_id'],
|
||||
['ClusterTemplate.id'], ),
|
||||
sa.PrimaryKeyConstraint('id'),
|
||||
sa.UniqueConstraint('name', 'tenant_id'))
|
||||
|
||||
op.create_table('TemplatesRelation',
|
||||
sa.Column('created', sa.DateTime(), nullable=False),
|
||||
sa.Column('updated', sa.DateTime(), nullable=False),
|
||||
sa.Column('cluster_template_id', sa.String(length=36),
|
||||
nullable=False),
|
||||
sa.Column('node_group_template_id', sa.String(length=36),
|
||||
nullable=False),
|
||||
sa.Column('node_group_name', sa.String(length=80),
|
||||
nullable=False),
|
||||
sa.Column('count', sa.Integer(), nullable=False),
|
||||
sa.ForeignKeyConstraint(['cluster_template_id'],
|
||||
['ClusterTemplate.id'], ),
|
||||
sa.ForeignKeyConstraint(['node_group_template_id'],
|
||||
['NodeGroupTemplate.id'], ),
|
||||
sa.PrimaryKeyConstraint('cluster_template_id',
|
||||
'node_group_template_id'))
|
||||
|
||||
op.create_table('NodeGroup',
|
||||
sa.Column('created', sa.DateTime(), nullable=False),
|
||||
sa.Column('updated', sa.DateTime(), nullable=False),
|
||||
sa.Column('id', sa.String(length=36), nullable=False),
|
||||
sa.Column('extra', sa.JSONEncoded(), nullable=True),
|
||||
sa.Column('cluster_id', sa.String(length=36),
|
||||
nullable=True),
|
||||
sa.Column('name', sa.String(length=80), nullable=False),
|
||||
sa.Column('flavor_id', sa.String(length=36),
|
||||
nullable=False),
|
||||
sa.Column('image_id', sa.String(length=36),
|
||||
nullable=False),
|
||||
sa.Column('node_processes', sa.JSONEncoded(),
|
||||
nullable=True),
|
||||
sa.Column('node_configs', sa.JSONEncoded(), nullable=True),
|
||||
sa.Column('anti_affinity_group', sa.String(length=36),
|
||||
nullable=True),
|
||||
sa.Column('count', sa.Integer(), nullable=False),
|
||||
sa.Column('base_node_group_template_id',
|
||||
sa.String(length=36), nullable=True),
|
||||
sa.ForeignKeyConstraint(['base_node_group_template_id'],
|
||||
['NodeGroupTemplate.id'], ),
|
||||
sa.ForeignKeyConstraint(['cluster_id'], ['Cluster.id'], ),
|
||||
sa.PrimaryKeyConstraint('id'),
|
||||
sa.UniqueConstraint('name', 'cluster_id'))
|
||||
|
||||
op.create_table('Instance',
|
||||
sa.Column('created', sa.DateTime(), nullable=False),
|
||||
sa.Column('updated', sa.DateTime(), nullable=False),
|
||||
sa.Column('extra', sa.JSONEncoded(), nullable=True),
|
||||
sa.Column('node_group_id', sa.String(length=36),
|
||||
nullable=True),
|
||||
sa.Column('instance_id', sa.String(length=36),
|
||||
nullable=False),
|
||||
sa.Column('management_ip', sa.String(length=15),
|
||||
nullable=False),
|
||||
sa.ForeignKeyConstraint(['node_group_id'],
|
||||
['NodeGroup.id'], ),
|
||||
sa.PrimaryKeyConstraint('instance_id'),
|
||||
sa.UniqueConstraint('instance_id', 'node_group_id'))
|
||||
|
||||
|
||||
def downgrade():
|
||||
op.drop_table('Instance')
|
||||
op.drop_table('NodeGroup')
|
||||
op.drop_table('TemplatesRelation')
|
||||
op.drop_table('Cluster')
|
||||
op.drop_table('ClusterTemplate')
|
||||
op.drop_table('NodeGroupTemplate')
|
116
savanna/db/migration/cli.py
Normal file
116
savanna/db/migration/cli.py
Normal file
@ -0,0 +1,116 @@
|
||||
# Copyright (c) 2013 Mirantis Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
# Based on Quantum's migration/cli.py
|
||||
|
||||
import os
|
||||
|
||||
from alembic import command as alembic_cmd
|
||||
from alembic import config as alembic_cfg
|
||||
from alembic import util as alembic_u
|
||||
from oslo.config import cfg
|
||||
|
||||
|
||||
_db_opts = [
|
||||
cfg.StrOpt('connection', default='', help='URL to database'),
|
||||
]
|
||||
|
||||
CONF = cfg.ConfigOpts()
|
||||
CONF.register_opts(_db_opts, 'database')
|
||||
|
||||
|
||||
def do_alembic_command(config, cmd, *args, **kwargs):
|
||||
try:
|
||||
getattr(alembic_cmd, cmd)(config, *args, **kwargs)
|
||||
except alembic_u.CommandError as e:
|
||||
alembic_u.err(str(e))
|
||||
|
||||
|
||||
def do_check_migration(config, _cmd):
|
||||
do_alembic_command(config, 'branches')
|
||||
|
||||
|
||||
def do_upgrade_downgrade(config, cmd):
|
||||
if not CONF.command.revision and not CONF.command.delta:
|
||||
raise SystemExit('You must provide a revision or relative delta')
|
||||
|
||||
revision = CONF.command.revision
|
||||
|
||||
if CONF.command.delta:
|
||||
sign = '+' if CONF.command.name == 'upgrade' else '-'
|
||||
revision = sign + str(CONF.command.delta)
|
||||
|
||||
do_alembic_command(config, cmd, revision, sql=CONF.command.sql)
|
||||
|
||||
|
||||
def do_stamp(config, cmd):
|
||||
do_alembic_command(config, cmd,
|
||||
CONF.command.revision,
|
||||
sql=CONF.command.sql)
|
||||
|
||||
|
||||
def do_revision(config, cmd):
|
||||
do_alembic_command(config, cmd,
|
||||
message=CONF.command.message,
|
||||
autogenerate=CONF.command.autogenerate,
|
||||
sql=CONF.command.sql)
|
||||
|
||||
|
||||
def add_command_parsers(subparsers):
|
||||
for name in ['current', 'history', 'branches']:
|
||||
parser = subparsers.add_parser(name)
|
||||
parser.set_defaults(func=do_alembic_command)
|
||||
|
||||
parser = subparsers.add_parser('check_migration')
|
||||
parser.set_defaults(func=do_check_migration)
|
||||
|
||||
for name in ['upgrade', 'downgrade']:
|
||||
parser = subparsers.add_parser(name)
|
||||
parser.add_argument('--delta', type=int)
|
||||
parser.add_argument('--sql', action='store_true')
|
||||
parser.add_argument('revision', nargs='?')
|
||||
parser.set_defaults(func=do_upgrade_downgrade)
|
||||
|
||||
parser = subparsers.add_parser('stamp')
|
||||
parser.add_argument('--sql', action='store_true')
|
||||
parser.add_argument('revision')
|
||||
parser.set_defaults(func=do_stamp)
|
||||
|
||||
parser = subparsers.add_parser('revision')
|
||||
parser.add_argument('-m', '--message')
|
||||
parser.add_argument('--autogenerate', action='store_true')
|
||||
parser.add_argument('--sql', action='store_true')
|
||||
parser.set_defaults(func=do_revision)
|
||||
|
||||
|
||||
command_opt = cfg.SubCommandOpt('command',
|
||||
title='Command',
|
||||
help='Available commands',
|
||||
handler=add_command_parsers)
|
||||
|
||||
CONF.register_cli_opt(command_opt)
|
||||
|
||||
|
||||
def main():
|
||||
config = alembic_cfg.Config(
|
||||
os.path.join(os.path.dirname(__file__), 'alembic.ini')
|
||||
)
|
||||
config.set_main_option('script_location',
|
||||
'savanna.db.migration:alembic_migrations')
|
||||
# attach the Savanna conf to the Alembic conf
|
||||
config.savanna_config = CONF
|
||||
|
||||
CONF()
|
||||
CONF.command.func(config, CONF.command.name)
|
143
savanna/db/model_base.py
Normal file
143
savanna/db/model_base.py
Normal file
@ -0,0 +1,143 @@
|
||||
# Copyright (c) 2013 Mirantis Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import re
|
||||
|
||||
import sqlalchemy as sa
|
||||
from sqlalchemy.ext import declarative
|
||||
from sqlalchemy import orm
|
||||
|
||||
from savanna.openstack.common import timeutils
|
||||
from savanna.openstack.common import uuidutils
|
||||
from savanna.utils.resources import BaseResource
|
||||
from savanna.utils.sqlatypes import JsonDictType
|
||||
|
||||
|
||||
class _SavannaBase(BaseResource):
|
||||
"""Base class for all Savanna Models."""
|
||||
|
||||
created = sa.Column(sa.DateTime, default=timeutils.utcnow,
|
||||
nullable=False)
|
||||
updated = sa.Column(sa.DateTime, default=timeutils.utcnow,
|
||||
nullable=False, onupdate=timeutils.utcnow)
|
||||
|
||||
__protected_attributes__ = ["created", "updated"]
|
||||
|
||||
@declarative.declared_attr
|
||||
def __tablename__(cls):
|
||||
# Table name is equals to the class name
|
||||
return cls.__name__
|
||||
|
||||
@property
|
||||
def __resource_name__(self):
|
||||
# convert CamelCase class name to camel_case
|
||||
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', self.__class__.__name__)
|
||||
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
|
||||
|
||||
def __setitem__(self, key, value):
|
||||
setattr(self, key, value)
|
||||
|
||||
def __getitem__(self, key):
|
||||
return getattr(self, key)
|
||||
|
||||
def get(self, key, default=None):
|
||||
return getattr(self, key, default)
|
||||
|
||||
def __iter__(self):
|
||||
self._i = iter(orm.object_mapper(self).columns)
|
||||
return self
|
||||
|
||||
def next(self):
|
||||
n = self._i.next().name
|
||||
return n, getattr(self, n)
|
||||
|
||||
def update(self, values):
|
||||
"""Make the model object behave like a dict."""
|
||||
for k, v in values.iteritems():
|
||||
setattr(self, k, v)
|
||||
|
||||
def iteritems(self):
|
||||
"""Make the model object behave like a dict.
|
||||
|
||||
Includes attributes from joins.
|
||||
"""
|
||||
local = dict(self)
|
||||
joined = dict([(k, v) for k, v in self.__dict__.iteritems()
|
||||
if not k[0] == '_'])
|
||||
local.update(joined)
|
||||
return local.iteritems()
|
||||
|
||||
def keys(self):
|
||||
return self.__dict__.keys()
|
||||
|
||||
def values(self):
|
||||
return self.__dict__.values()
|
||||
|
||||
def items(self):
|
||||
return self.__dict__.items()
|
||||
|
||||
def __repr__(self):
|
||||
"""sqlalchemy based automatic __repr__ method."""
|
||||
items = ['%s=%r' % (col.name, getattr(self, col.name))
|
||||
for col in self.__table__.columns]
|
||||
return "<%s.%s[object at %x] {%s}>" % (self.__class__.__module__,
|
||||
self.__class__.__name__,
|
||||
id(self), ', '.join(items))
|
||||
|
||||
def to_dict(self):
|
||||
"""sqlalchemy based automatic to_dict method."""
|
||||
d = {}
|
||||
for col in self.__table__.columns:
|
||||
if self._filter_field(col.name):
|
||||
continue
|
||||
d[col.name] = getattr(self, col.name)
|
||||
return d
|
||||
|
||||
|
||||
SavannaBase = declarative.declarative_base(cls=_SavannaBase)
|
||||
|
||||
|
||||
def _generate_unicode_uuid():
|
||||
return unicode(uuidutils.generate_uuid())
|
||||
|
||||
|
||||
class IdMixin(object):
|
||||
"""Id mixin, add to subclasses that have an id."""
|
||||
|
||||
id = sa.Column(sa.String(36),
|
||||
primary_key=True,
|
||||
default=_generate_unicode_uuid)
|
||||
|
||||
|
||||
class TenantMixin(object):
|
||||
"""Tenant mixin, add to subclasses that have a tenant."""
|
||||
|
||||
__filter_cols__ = ['tenant_id']
|
||||
|
||||
tenant_id = sa.Column(sa.String(36))
|
||||
|
||||
|
||||
class PluginSpecificMixin(object):
|
||||
"""Plugin specific info mixin, add to subclass that plugin specific."""
|
||||
|
||||
plugin_name = sa.Column(sa.String(80), nullable=False)
|
||||
hadoop_version = sa.Column(sa.String(80), nullable=False)
|
||||
|
||||
|
||||
class ExtraMixin(object):
|
||||
"""Extra info mixin, add to subclass that stores extra data w/o schema."""
|
||||
|
||||
__filter_cols__ = ['extra']
|
||||
|
||||
extra = sa.Column(JsonDictType())
|
225
savanna/db/models.py
Normal file
225
savanna/db/models.py
Normal file
@ -0,0 +1,225 @@
|
||||
# Copyright (c) 2013 Mirantis Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import sqlalchemy as sa
|
||||
from sqlalchemy.ext.associationproxy import association_proxy
|
||||
from sqlalchemy.orm import relationship
|
||||
|
||||
from savanna.db import model_base as mb
|
||||
from savanna.utils.openstack.nova import novaclient
|
||||
from savanna.utils.sqlatypes import JsonDictType
|
||||
from savanna.utils.sqlatypes import JsonListType
|
||||
|
||||
|
||||
CLUSTER_STATUSES = ['Starting', 'Active', 'Stopping', 'Error']
|
||||
|
||||
|
||||
class Cluster(mb.SavannaBase, mb.IdMixin, mb.TenantMixin,
|
||||
mb.PluginSpecificMixin, mb.ExtraMixin):
|
||||
"""Contains all info about cluster."""
|
||||
|
||||
__table_args__ = (
|
||||
sa.UniqueConstraint('name', 'tenant_id'),
|
||||
)
|
||||
|
||||
name = sa.Column(sa.String(80), nullable=False)
|
||||
default_image_id = sa.Column(sa.String(36))
|
||||
cluster_configs = sa.Column(JsonDictType())
|
||||
node_groups = relationship('NodeGroup', cascade="all,delete",
|
||||
backref='cluster')
|
||||
# todo replace String type with sa.Enum(*CLUSTER_STATUSES)
|
||||
status = sa.Column(sa.String(80))
|
||||
status_description = sa.Column(sa.String(200))
|
||||
# todo instances' credentials should be stored in cluster
|
||||
base_cluster_template_id = sa.Column(sa.String(36),
|
||||
sa.ForeignKey('ClusterTemplate.id'))
|
||||
base_cluster_template = relationship('ClusterTemplate',
|
||||
backref="clusters")
|
||||
|
||||
def __init__(self, name, tenant_id, plugin_name, hadoop_version,
|
||||
status=None, status_description=None, default_image_id=None,
|
||||
cluster_configs=None, base_cluster_template_id=None,
|
||||
extra=None):
|
||||
self.name = name
|
||||
self.tenant_id = tenant_id
|
||||
self.plugin_name = plugin_name
|
||||
self.hadoop_version = hadoop_version
|
||||
self.status = status
|
||||
self.status_description = status_description
|
||||
self.default_image_id = default_image_id
|
||||
self.cluster_configs = cluster_configs or {}
|
||||
self.base_cluster_template_id = base_cluster_template_id
|
||||
self.extra = extra or {}
|
||||
|
||||
def to_dict(self):
|
||||
d = super(Cluster, self).to_dict()
|
||||
d['node_groups'] = [ng.dict for ng in self.node_groups]
|
||||
return d
|
||||
|
||||
|
||||
class NodeGroup(mb.SavannaBase, mb.IdMixin, mb.ExtraMixin):
|
||||
"""Specifies group of nodes within a cluster."""
|
||||
|
||||
__filter_cols__ = ['cluster_id']
|
||||
__table_args__ = (
|
||||
sa.UniqueConstraint('name', 'cluster_id'),
|
||||
)
|
||||
|
||||
cluster_id = sa.Column(sa.String(36), sa.ForeignKey('Cluster.id'))
|
||||
name = sa.Column(sa.String(80), nullable=False)
|
||||
flavor_id = sa.Column(sa.String(36), nullable=False)
|
||||
image_id = sa.Column(sa.String(36), nullable=False)
|
||||
node_processes = sa.Column(JsonListType())
|
||||
node_configs = sa.Column(JsonDictType())
|
||||
anti_affinity_group = sa.Column(sa.String(36))
|
||||
count = sa.Column(sa.Integer, nullable=False)
|
||||
instances = relationship('Instance', cascade="all,delete",
|
||||
backref='node_group')
|
||||
base_node_group_template_id = sa.Column(sa.String(36),
|
||||
sa.ForeignKey(
|
||||
'NodeGroupTemplate.id'))
|
||||
base_node_group_template = relationship('NodeGroupTemplate',
|
||||
backref="node_groups")
|
||||
|
||||
def __init__(self, name, flavor_id, image_id, node_processes, count,
|
||||
node_configs=None, anti_affinity_group=None, extra=None,
|
||||
base_node_group_template_id=None):
|
||||
self.name = name
|
||||
self.flavor_id = flavor_id
|
||||
self.image_id = image_id
|
||||
self.node_processes = node_processes
|
||||
self.count = count
|
||||
self.node_configs = node_configs or {}
|
||||
self.anti_affinity_group = anti_affinity_group
|
||||
self.extra = extra or {}
|
||||
self.base_node_group_template_id = base_node_group_template_id
|
||||
|
||||
|
||||
class Instance(mb.SavannaBase, mb.ExtraMixin):
|
||||
"""An OpenStack instance created for the cluster."""
|
||||
|
||||
__filter_cols__ = ['node_group_id']
|
||||
__table_args__ = (
|
||||
sa.UniqueConstraint('instance_id', 'node_group_id'),
|
||||
)
|
||||
|
||||
node_group_id = sa.Column(sa.String(36), sa.ForeignKey('NodeGroup.id'))
|
||||
instance_id = sa.Column(sa.String(36), primary_key=True)
|
||||
management_ip = sa.Column(sa.String(15), nullable=False)
|
||||
|
||||
def info(self):
|
||||
"""Returns info from nova about instance."""
|
||||
return novaclient().servers.get(self.instance_id)
|
||||
|
||||
def __init__(self, node_group_id, instance_id, management_ip, extra=None):
|
||||
self.node_group_id = node_group_id
|
||||
self.instance_id = instance_id
|
||||
self.management_ip = management_ip
|
||||
self.extra = extra or {}
|
||||
|
||||
|
||||
class ClusterTemplate(mb.SavannaBase, mb.IdMixin, mb.TenantMixin,
|
||||
mb.PluginSpecificMixin):
|
||||
"""Template for Cluster."""
|
||||
|
||||
__table_args__ = (
|
||||
sa.UniqueConstraint('name', 'tenant_id'),
|
||||
)
|
||||
|
||||
name = sa.Column(sa.String(80), nullable=False)
|
||||
description = sa.Column(sa.String(200))
|
||||
cluster_configs = sa.Column(JsonDictType())
|
||||
|
||||
# todo add node_groups_suggestion helper
|
||||
|
||||
def __init__(self, name, tenant_id, plugin_name, hadoop_version,
|
||||
cluster_configs=None, description=None):
|
||||
self.name = name
|
||||
self.tenant_id = tenant_id
|
||||
self.plugin_name = plugin_name
|
||||
self.hadoop_version = hadoop_version
|
||||
self.cluster_configs = cluster_configs or {}
|
||||
self.description = description
|
||||
|
||||
def add_node_group_template(self, node_group_template_id, name, count):
|
||||
relation = TemplatesRelation(self.id, node_group_template_id, name,
|
||||
count)
|
||||
self.templates_relations.append(relation)
|
||||
return relation
|
||||
|
||||
def to_dict(self):
|
||||
d = super(ClusterTemplate, self).to_dict()
|
||||
d['node_group_templates'] = [tr.dict for tr in
|
||||
self.templates_relations]
|
||||
return d
|
||||
|
||||
|
||||
class NodeGroupTemplate(mb.SavannaBase, mb.IdMixin, mb.TenantMixin,
|
||||
mb.PluginSpecificMixin):
|
||||
"""Template for NodeGroup."""
|
||||
|
||||
__table_args__ = (
|
||||
sa.UniqueConstraint('name', 'tenant_id'),
|
||||
)
|
||||
|
||||
name = sa.Column(sa.String(80), nullable=False)
|
||||
description = sa.Column(sa.String(200))
|
||||
flavor_id = sa.Column(sa.String(36), nullable=False)
|
||||
node_processes = sa.Column(JsonListType())
|
||||
node_configs = sa.Column(JsonDictType())
|
||||
|
||||
def __init__(self, name, tenant_id, flavor_id, plugin_name,
|
||||
hadoop_version, node_processes, node_configs=None,
|
||||
description=None):
|
||||
self.name = name
|
||||
self.tenant_id = tenant_id
|
||||
self.flavor_id = flavor_id
|
||||
self.plugin_name = plugin_name
|
||||
self.hadoop_version = hadoop_version
|
||||
self.node_processes = node_processes
|
||||
self.node_configs = node_configs or {}
|
||||
self.description = description
|
||||
|
||||
|
||||
class TemplatesRelation(mb.SavannaBase):
|
||||
"""NodeGroupTemplate - ClusterTemplate relationship."""
|
||||
|
||||
__filter_cols__ = ['cluster_template_id', 'created', 'updated']
|
||||
|
||||
cluster_template_id = sa.Column(sa.String(36),
|
||||
sa.ForeignKey('ClusterTemplate.id'),
|
||||
primary_key=True)
|
||||
node_group_template_id = sa.Column(sa.String(36),
|
||||
sa.ForeignKey('NodeGroupTemplate.id'),
|
||||
primary_key=True)
|
||||
cluster_template = relationship(ClusterTemplate,
|
||||
backref='templates_relations')
|
||||
node_group_template = relationship(NodeGroupTemplate,
|
||||
backref='templates_relations')
|
||||
node_group_name = sa.Column(sa.String(80), nullable=False)
|
||||
count = sa.Column(sa.Integer, nullable=False)
|
||||
|
||||
def __init__(self, cluster_template_id, node_group_template_id,
|
||||
node_group_name, count):
|
||||
self.cluster_template_id = cluster_template_id
|
||||
self.node_group_template_id = node_group_template_id
|
||||
self.node_group_name = node_group_name
|
||||
self.count = count
|
||||
|
||||
|
||||
ClusterTemplate.node_group_templates = association_proxy("templates_relations",
|
||||
"node_group_template")
|
||||
NodeGroupTemplate.cluster_templates = association_proxy("templates_relations",
|
||||
"cluster_template")
|
104
savanna/db/storage.py
Normal file
104
savanna/db/storage.py
Normal file
@ -0,0 +1,104 @@
|
||||
# Copyright (c) 2013 Mirantis Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from savanna.context import ctx
|
||||
from savanna.context import model_query
|
||||
import savanna.db.models as m
|
||||
|
||||
|
||||
## Cluster ops
|
||||
# todo check tenant_id and etc.
|
||||
|
||||
def get_clusters(**args):
|
||||
return model_query(m.Cluster).filter_by(**args).all()
|
||||
|
||||
|
||||
def get_cluster(**args):
|
||||
return model_query(m.Cluster).filter_by(**args).first()
|
||||
|
||||
|
||||
def create_cluster(values):
|
||||
session = ctx().session
|
||||
with session.begin():
|
||||
values['tenant_id'] = ctx().tenant_id
|
||||
ngs_vals = values.pop('node_groups', [])
|
||||
cluster = m.Cluster(**values)
|
||||
for ng in ngs_vals:
|
||||
node_group = m.NodeGroup(**ng)
|
||||
cluster.node_groups.append(node_group)
|
||||
session.add(node_group)
|
||||
session.add(cluster)
|
||||
|
||||
return cluster
|
||||
|
||||
|
||||
def terminate_cluster(cluster):
|
||||
with ctx().session.begin():
|
||||
ctx().session.delete()
|
||||
|
||||
|
||||
## ClusterTemplate ops
|
||||
|
||||
def get_cluster_templates(**args):
|
||||
return model_query(m.ClusterTemplate).filter_by(**args).all()
|
||||
|
||||
|
||||
def get_cluster_template(**args):
|
||||
return model_query(m.ClusterTemplate).filter_by(**args).first()
|
||||
|
||||
|
||||
def create_cluster_template(values):
|
||||
session = ctx().session
|
||||
with session.begin():
|
||||
values['tenant_id'] = ctx().tenant_id
|
||||
ngts_vals = values.pop('node_group_templates', [])
|
||||
cluster_template = m.ClusterTemplate(**values)
|
||||
for ngt in ngts_vals:
|
||||
relation = cluster_template.add_node_group_template(
|
||||
ngt['node_group_template_id'], ngt['node_group_name'],
|
||||
ngt['count'])
|
||||
session.add(relation)
|
||||
session.add(cluster_template)
|
||||
|
||||
return cluster_template
|
||||
|
||||
|
||||
def terminate_cluster_template(**args):
|
||||
with ctx().session.begin():
|
||||
ctx().session.delete(get_cluster_template(**args))
|
||||
|
||||
|
||||
## NodeGroupTemplate ops
|
||||
|
||||
def get_node_group_templates(**args):
|
||||
return model_query(m.NodeGroupTemplate).filter_by(**args).all()
|
||||
|
||||
|
||||
def get_node_group_template(**args):
|
||||
return model_query(m.NodeGroupTemplate).filter_by(**args).first()
|
||||
|
||||
|
||||
def create_node_group_template(values):
|
||||
session = ctx().session
|
||||
with session.begin():
|
||||
values['tenant_id'] = ctx().tenant_id
|
||||
node_group_template = m.NodeGroupTemplate(**values)
|
||||
session.add(node_group_template)
|
||||
return node_group_template
|
||||
|
||||
|
||||
def terminate_node_group_template(**args):
|
||||
with ctx().session.begin():
|
||||
ctx().session.delete(get_node_group_template(**args))
|
@ -37,102 +37,3 @@ class NotFoundException(SavannaException):
|
||||
def __init__(self, value):
|
||||
self.code = "NOT_FOUND"
|
||||
self.value = value
|
||||
|
||||
|
||||
## Cluster operations exceptions
|
||||
|
||||
class NotEnoughResourcesException(SavannaException):
|
||||
def __init__(self, list):
|
||||
self.message = "Nova available instances=%s, VCPUs=%s, RAM=%s. " \
|
||||
"Requested instances=%s, VCPUs=%s, RAM=%s" % tuple(list)
|
||||
self.code = "NOT_ENOUGH_RESOURCES"
|
||||
|
||||
|
||||
class ClusterNameExistedException(SavannaException):
|
||||
def __init__(self, value):
|
||||
self.message = "Cluster with name '%s' already exists" % value
|
||||
self.code = "CLUSTER_NAME_ALREADY_EXISTS"
|
||||
|
||||
|
||||
class ImageNotFoundException(SavannaException):
|
||||
def __init__(self, value):
|
||||
self.message = "Cannot find image with id '%s'" % value
|
||||
self.code = "IMAGE_NOT_FOUND"
|
||||
|
||||
|
||||
class NotSingleNameNodeException(SavannaException):
|
||||
def __init__(self, nn_count):
|
||||
self.message = "Hadoop cluster should contain only 1 NameNode. " \
|
||||
"Actual NN count is %s" % nn_count
|
||||
self.code = "NOT_SINGLE_NAME_NODE"
|
||||
|
||||
|
||||
class NotSingleJobTrackerException(SavannaException):
|
||||
def __init__(self, jt_count):
|
||||
self.message = "Hadoop cluster should contain only 1 JobTracker. " \
|
||||
"Actual JT count is %s" % jt_count
|
||||
self.code = "NOT_SINGLE_JOB_TRACKER"
|
||||
|
||||
|
||||
class ClusterNotFoundException(NotFoundException):
|
||||
def __init__(self, value):
|
||||
self.value = value
|
||||
self.message = "Cluster '%s' not found" % self.value
|
||||
self.code = "CLUSTER_NOT_FOUND"
|
||||
|
||||
|
||||
## NodeTemplates operations exceptions
|
||||
|
||||
class NodeTemplateNotFoundException(NotFoundException):
|
||||
def __init__(self, value):
|
||||
self.value = value
|
||||
self.message = "NodeTemplate '%s' not found" % self.value
|
||||
self.code = "NODE_TEMPLATE_NOT_FOUND"
|
||||
|
||||
|
||||
class NodeTemplateExistedException(SavannaException):
|
||||
def __init__(self, value):
|
||||
self.message = "NodeTemplate with name '%s' already exists" % value
|
||||
self.code = "NODE_TEMPLATE_ALREADY_EXISTS"
|
||||
|
||||
|
||||
class FlavorNotFoundException(SavannaException):
|
||||
def __init__(self, value):
|
||||
self.message = "Cannot find flavor with name '%s'" % value
|
||||
self.code = "FLAVOR_NOT_FOUND"
|
||||
|
||||
|
||||
class DiscrepancyNodeProcessException(SavannaException):
|
||||
def __init__(self, value):
|
||||
self.message = "Discrepancies in Node Processes. Required: %s" % value
|
||||
self.code = "NODE_PROCESS_DISCREPANCY"
|
||||
|
||||
|
||||
class RequiredParamMissedException(SavannaException):
|
||||
def __init__(self, process, param):
|
||||
self.message = "Required parameter '%s' of process '%s' should be " \
|
||||
"specified" % (param, process)
|
||||
self.code = "REQUIRED_PARAM_MISSED"
|
||||
|
||||
|
||||
class AssociatedNodeTemplateTerminationException(SavannaException):
|
||||
def __init__(self, value):
|
||||
self.message = ("The are active nodes created using NodeTemplate '%s'"
|
||||
" you trying to terminate") % value
|
||||
self.code = "ASSOCIATED_NODE_TEMPLATE_TERMINATION"
|
||||
|
||||
|
||||
class ParamNotAllowedException(SavannaException):
|
||||
def __init__(self, param, process):
|
||||
self.message = "Parameter '%s' of process '%s' is not " \
|
||||
"allowed to change" % (param, process)
|
||||
self.code = "PARAM_IS_NOT_ALLOWED"
|
||||
|
||||
|
||||
## NodeTypes operations exceptions
|
||||
|
||||
class NodeTypeNotFoundException(NotFoundException):
|
||||
def __init__(self, value):
|
||||
self.value = value
|
||||
self.message = "NodeType '%s' not found" % self.value
|
||||
self.code = "NODE_TYPE_NOT_FOUND"
|
||||
|
@ -17,12 +17,14 @@ from eventlet import monkey_patch
|
||||
from flask import Flask
|
||||
from keystoneclient.middleware.auth_token import filter_factory as auth_token
|
||||
from oslo.config import cfg
|
||||
from savanna.context import ctx
|
||||
from savanna.plugins.base import setup_plugins
|
||||
from werkzeug.exceptions import default_exceptions
|
||||
from werkzeug.exceptions import HTTPException
|
||||
|
||||
from savanna.api import v02 as api_v02
|
||||
from savanna.api import v10 as api_v10
|
||||
from savanna.db import api as db_api
|
||||
from savanna.middleware.auth_valid import filter_factory as auth_valid
|
||||
from savanna.storage.db import setup_storage
|
||||
from savanna.utils.api import render
|
||||
from savanna.utils.scheduler import setup_scheduler
|
||||
|
||||
@ -71,14 +73,22 @@ def make_app():
|
||||
def version_list():
|
||||
return render({
|
||||
"versions": [
|
||||
{"id": "v0.2", "status": "CURRENT"}
|
||||
{"id": "v1.0", "status": "CURRENT"}
|
||||
]
|
||||
})
|
||||
|
||||
app.register_blueprint(api_v02.rest, url_prefix='/v0.2')
|
||||
@app.teardown_request
|
||||
def teardown_request(_ex=None):
|
||||
# todo how it'll work in case of exception?
|
||||
session = ctx().session
|
||||
if session.transaction:
|
||||
session.transaction.commit()
|
||||
|
||||
setup_storage(app)
|
||||
app.register_blueprint(api_v10.rest, url_prefix='/v1.0')
|
||||
|
||||
db_api.configure_db()
|
||||
setup_scheduler(app)
|
||||
setup_plugins()
|
||||
|
||||
def make_json_error(ex):
|
||||
status_code = (ex.code
|
||||
|
16
savanna/openstack/common/db/__init__.py
Normal file
16
savanna/openstack/common/db/__init__.py
Normal file
@ -0,0 +1,16 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2012 Cloudscaling Group, Inc
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
106
savanna/openstack/common/db/api.py
Normal file
106
savanna/openstack/common/db/api.py
Normal file
@ -0,0 +1,106 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright (c) 2013 Rackspace Hosting
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""Multiple DB API backend support.
|
||||
|
||||
Supported configuration options:
|
||||
|
||||
The following two parameters are in the 'database' group:
|
||||
`backend`: DB backend name or full module path to DB backend module.
|
||||
`use_tpool`: Enable thread pooling of DB API calls.
|
||||
|
||||
A DB backend module should implement a method named 'get_backend' which
|
||||
takes no arguments. The method can return any object that implements DB
|
||||
API methods.
|
||||
|
||||
*NOTE*: There are bugs in eventlet when using tpool combined with
|
||||
threading locks. The python logging module happens to use such locks. To
|
||||
work around this issue, be sure to specify thread=False with
|
||||
eventlet.monkey_patch().
|
||||
|
||||
A bug for eventlet has been filed here:
|
||||
|
||||
https://bitbucket.org/eventlet/eventlet/issue/137/
|
||||
"""
|
||||
import functools
|
||||
|
||||
from oslo.config import cfg
|
||||
|
||||
from savanna.openstack.common import importutils
|
||||
from savanna.openstack.common import lockutils
|
||||
|
||||
|
||||
db_opts = [
|
||||
cfg.StrOpt('backend',
|
||||
default='sqlalchemy',
|
||||
deprecated_name='db_backend',
|
||||
deprecated_group='DEFAULT',
|
||||
help='The backend to use for db'),
|
||||
cfg.BoolOpt('use_tpool',
|
||||
default=False,
|
||||
deprecated_name='dbapi_use_tpool',
|
||||
deprecated_group='DEFAULT',
|
||||
help='Enable the experimental use of thread pooling for '
|
||||
'all DB API calls')
|
||||
]
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.register_opts(db_opts, 'database')
|
||||
|
||||
|
||||
class DBAPI(object):
|
||||
def __init__(self, backend_mapping=None):
|
||||
if backend_mapping is None:
|
||||
backend_mapping = {}
|
||||
self.__backend = None
|
||||
self.__backend_mapping = backend_mapping
|
||||
|
||||
@lockutils.synchronized('dbapi_backend', 'savanna-')
|
||||
def __get_backend(self):
|
||||
"""Get the actual backend. May be a module or an instance of
|
||||
a class. Doesn't matter to us. We do this synchronized as it's
|
||||
possible multiple greenthreads started very quickly trying to do
|
||||
DB calls and eventlet can switch threads before self.__backend gets
|
||||
assigned.
|
||||
"""
|
||||
if self.__backend:
|
||||
# Another thread assigned it
|
||||
return self.__backend
|
||||
backend_name = CONF.database.backend
|
||||
self.__use_tpool = CONF.database.use_tpool
|
||||
if self.__use_tpool:
|
||||
from eventlet import tpool
|
||||
self.__tpool = tpool
|
||||
# Import the untranslated name if we don't have a
|
||||
# mapping.
|
||||
backend_path = self.__backend_mapping.get(backend_name,
|
||||
backend_name)
|
||||
backend_mod = importutils.import_module(backend_path)
|
||||
self.__backend = backend_mod.get_backend()
|
||||
return self.__backend
|
||||
|
||||
def __getattr__(self, key):
|
||||
backend = self.__backend or self.__get_backend()
|
||||
attr = getattr(backend, key)
|
||||
if not self.__use_tpool or not hasattr(attr, '__call__'):
|
||||
return attr
|
||||
|
||||
def tpool_wrapper(*args, **kwargs):
|
||||
return self.__tpool.execute(attr, *args, **kwargs)
|
||||
|
||||
functools.update_wrapper(tpool_wrapper, attr)
|
||||
return tpool_wrapper
|
45
savanna/openstack/common/db/exception.py
Normal file
45
savanna/openstack/common/db/exception.py
Normal file
@ -0,0 +1,45 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""DB related custom exceptions."""
|
||||
|
||||
from savanna.openstack.common.gettextutils import _
|
||||
|
||||
|
||||
class DBError(Exception):
|
||||
"""Wraps an implementation specific exception."""
|
||||
def __init__(self, inner_exception=None):
|
||||
self.inner_exception = inner_exception
|
||||
super(DBError, self).__init__(str(inner_exception))
|
||||
|
||||
|
||||
class DBDuplicateEntry(DBError):
|
||||
"""Wraps an implementation specific exception."""
|
||||
def __init__(self, columns=[], inner_exception=None):
|
||||
self.columns = columns
|
||||
super(DBDuplicateEntry, self).__init__(inner_exception)
|
||||
|
||||
|
||||
class DBDeadlock(DBError):
|
||||
def __init__(self, inner_exception=None):
|
||||
super(DBDeadlock, self).__init__(inner_exception)
|
||||
|
||||
|
||||
class DBInvalidUnicodeParameter(Exception):
|
||||
message = _("Invalid Parameter: "
|
||||
"Unicode is not supported by the current database.")
|
16
savanna/openstack/common/db/sqlalchemy/__init__.py
Normal file
16
savanna/openstack/common/db/sqlalchemy/__init__.py
Normal file
@ -0,0 +1,16 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2012 Cloudscaling Group, Inc
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
105
savanna/openstack/common/db/sqlalchemy/models.py
Normal file
105
savanna/openstack/common/db/sqlalchemy/models.py
Normal file
@ -0,0 +1,105 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
|
||||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# Copyright 2011 Piston Cloud Computing, Inc.
|
||||
# Copyright 2012 Cloudscaling Group, Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
"""
|
||||
SQLAlchemy models.
|
||||
"""
|
||||
|
||||
from sqlalchemy import Column, Integer
|
||||
from sqlalchemy import DateTime
|
||||
from sqlalchemy.orm import object_mapper
|
||||
|
||||
from savanna.openstack.common.db.sqlalchemy.session import get_session
|
||||
from savanna.openstack.common import timeutils
|
||||
|
||||
|
||||
class ModelBase(object):
|
||||
"""Base class for models."""
|
||||
__table_initialized__ = False
|
||||
|
||||
def save(self, session=None):
|
||||
"""Save this object."""
|
||||
if not session:
|
||||
session = get_session()
|
||||
# NOTE(boris-42): This part of code should be look like:
|
||||
# sesssion.add(self)
|
||||
# session.flush()
|
||||
# But there is a bug in sqlalchemy and eventlet that
|
||||
# raises NoneType exception if there is no running
|
||||
# transaction and rollback is called. As long as
|
||||
# sqlalchemy has this bug we have to create transaction
|
||||
# explicity.
|
||||
with session.begin(subtransactions=True):
|
||||
session.add(self)
|
||||
session.flush()
|
||||
|
||||
def __setitem__(self, key, value):
|
||||
setattr(self, key, value)
|
||||
|
||||
def __getitem__(self, key):
|
||||
return getattr(self, key)
|
||||
|
||||
def get(self, key, default=None):
|
||||
return getattr(self, key, default)
|
||||
|
||||
def __iter__(self):
|
||||
columns = dict(object_mapper(self).columns).keys()
|
||||
# NOTE(russellb): Allow models to specify other keys that can be looked
|
||||
# up, beyond the actual db columns. An example would be the 'name'
|
||||
# property for an Instance.
|
||||
if hasattr(self, '_extra_keys'):
|
||||
columns.extend(self._extra_keys())
|
||||
self._i = iter(columns)
|
||||
return self
|
||||
|
||||
def next(self):
|
||||
n = self._i.next()
|
||||
return n, getattr(self, n)
|
||||
|
||||
def update(self, values):
|
||||
"""Make the model object behave like a dict."""
|
||||
for k, v in values.iteritems():
|
||||
setattr(self, k, v)
|
||||
|
||||
def iteritems(self):
|
||||
"""Make the model object behave like a dict.
|
||||
|
||||
Includes attributes from joins."""
|
||||
local = dict(self)
|
||||
joined = dict([(k, v) for k, v in self.__dict__.iteritems()
|
||||
if not k[0] == '_'])
|
||||
local.update(joined)
|
||||
return local.iteritems()
|
||||
|
||||
|
||||
class TimestampMixin(object):
|
||||
created_at = Column(DateTime, default=timeutils.utcnow)
|
||||
updated_at = Column(DateTime, onupdate=timeutils.utcnow)
|
||||
|
||||
|
||||
class SoftDeleteMixin(object):
|
||||
deleted_at = Column(DateTime)
|
||||
deleted = Column(Integer, default=0)
|
||||
|
||||
def soft_delete(self, session=None):
|
||||
"""Mark this object as deleted."""
|
||||
self.deleted = self.id
|
||||
self.deleted_at = timeutils.utcnow()
|
||||
self.save(session=session)
|
698
savanna/openstack/common/db/sqlalchemy/session.py
Normal file
698
savanna/openstack/common/db/sqlalchemy/session.py
Normal file
@ -0,0 +1,698 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""Session Handling for SQLAlchemy backend.
|
||||
|
||||
Initializing:
|
||||
|
||||
* Call set_defaults with the minimal of the following kwargs:
|
||||
sql_connection, sqlite_db
|
||||
|
||||
Example:
|
||||
|
||||
session.set_defaults(
|
||||
sql_connection="sqlite:///var/lib/savanna/sqlite.db",
|
||||
sqlite_db="/var/lib/savanna/sqlite.db")
|
||||
|
||||
Recommended ways to use sessions within this framework:
|
||||
|
||||
* Don't use them explicitly; this is like running with AUTOCOMMIT=1.
|
||||
model_query() will implicitly use a session when called without one
|
||||
supplied. This is the ideal situation because it will allow queries
|
||||
to be automatically retried if the database connection is interrupted.
|
||||
|
||||
Note: Automatic retry will be enabled in a future patch.
|
||||
|
||||
It is generally fine to issue several queries in a row like this. Even though
|
||||
they may be run in separate transactions and/or separate sessions, each one
|
||||
will see the data from the prior calls. If needed, undo- or rollback-like
|
||||
functionality should be handled at a logical level. For an example, look at
|
||||
the code around quotas and reservation_rollback().
|
||||
|
||||
Examples:
|
||||
|
||||
def get_foo(context, foo):
|
||||
return model_query(context, models.Foo).\
|
||||
filter_by(foo=foo).\
|
||||
first()
|
||||
|
||||
def update_foo(context, id, newfoo):
|
||||
model_query(context, models.Foo).\
|
||||
filter_by(id=id).\
|
||||
update({'foo': newfoo})
|
||||
|
||||
def create_foo(context, values):
|
||||
foo_ref = models.Foo()
|
||||
foo_ref.update(values)
|
||||
foo_ref.save()
|
||||
return foo_ref
|
||||
|
||||
|
||||
* Within the scope of a single method, keeping all the reads and writes within
|
||||
the context managed by a single session. In this way, the session's __exit__
|
||||
handler will take care of calling flush() and commit() for you.
|
||||
If using this approach, you should not explicitly call flush() or commit().
|
||||
Any error within the context of the session will cause the session to emit
|
||||
a ROLLBACK. If the connection is dropped before this is possible, the
|
||||
database will implicitly rollback the transaction.
|
||||
|
||||
Note: statements in the session scope will not be automatically retried.
|
||||
|
||||
If you create models within the session, they need to be added, but you
|
||||
do not need to call model.save()
|
||||
|
||||
def create_many_foo(context, foos):
|
||||
session = get_session()
|
||||
with session.begin():
|
||||
for foo in foos:
|
||||
foo_ref = models.Foo()
|
||||
foo_ref.update(foo)
|
||||
session.add(foo_ref)
|
||||
|
||||
def update_bar(context, foo_id, newbar):
|
||||
session = get_session()
|
||||
with session.begin():
|
||||
foo_ref = model_query(context, models.Foo, session).\
|
||||
filter_by(id=foo_id).\
|
||||
first()
|
||||
model_query(context, models.Bar, session).\
|
||||
filter_by(id=foo_ref['bar_id']).\
|
||||
update({'bar': newbar})
|
||||
|
||||
Note: update_bar is a trivially simple example of using "with session.begin".
|
||||
Whereas create_many_foo is a good example of when a transaction is needed,
|
||||
it is always best to use as few queries as possible. The two queries in
|
||||
update_bar can be better expressed using a single query which avoids
|
||||
the need for an explicit transaction. It can be expressed like so:
|
||||
|
||||
def update_bar(context, foo_id, newbar):
|
||||
subq = model_query(context, models.Foo.id).\
|
||||
filter_by(id=foo_id).\
|
||||
limit(1).\
|
||||
subquery()
|
||||
model_query(context, models.Bar).\
|
||||
filter_by(id=subq.as_scalar()).\
|
||||
update({'bar': newbar})
|
||||
|
||||
For reference, this emits approximagely the following SQL statement:
|
||||
|
||||
UPDATE bar SET bar = ${newbar}
|
||||
WHERE id=(SELECT bar_id FROM foo WHERE id = ${foo_id} LIMIT 1);
|
||||
|
||||
* Passing an active session between methods. Sessions should only be passed
|
||||
to private methods. The private method must use a subtransaction; otherwise
|
||||
SQLAlchemy will throw an error when you call session.begin() on an existing
|
||||
transaction. Public methods should not accept a session parameter and should
|
||||
not be involved in sessions within the caller's scope.
|
||||
|
||||
Note that this incurs more overhead in SQLAlchemy than the above means
|
||||
due to nesting transactions, and it is not possible to implicitly retry
|
||||
failed database operations when using this approach.
|
||||
|
||||
This also makes code somewhat more difficult to read and debug, because a
|
||||
single database transaction spans more than one method. Error handling
|
||||
becomes less clear in this situation. When this is needed for code clarity,
|
||||
it should be clearly documented.
|
||||
|
||||
def myfunc(foo):
|
||||
session = get_session()
|
||||
with session.begin():
|
||||
# do some database things
|
||||
bar = _private_func(foo, session)
|
||||
return bar
|
||||
|
||||
def _private_func(foo, session=None):
|
||||
if not session:
|
||||
session = get_session()
|
||||
with session.begin(subtransaction=True):
|
||||
# do some other database things
|
||||
return bar
|
||||
|
||||
|
||||
There are some things which it is best to avoid:
|
||||
|
||||
* Don't keep a transaction open any longer than necessary.
|
||||
|
||||
This means that your "with session.begin()" block should be as short
|
||||
as possible, while still containing all the related calls for that
|
||||
transaction.
|
||||
|
||||
* Avoid "with_lockmode('UPDATE')" when possible.
|
||||
|
||||
In MySQL/InnoDB, when a "SELECT ... FOR UPDATE" query does not match
|
||||
any rows, it will take a gap-lock. This is a form of write-lock on the
|
||||
"gap" where no rows exist, and prevents any other writes to that space.
|
||||
This can effectively prevent any INSERT into a table by locking the gap
|
||||
at the end of the index. Similar problems will occur if the SELECT FOR UPDATE
|
||||
has an overly broad WHERE clause, or doesn't properly use an index.
|
||||
|
||||
One idea proposed at ODS Fall '12 was to use a normal SELECT to test the
|
||||
number of rows matching a query, and if only one row is returned,
|
||||
then issue the SELECT FOR UPDATE.
|
||||
|
||||
The better long-term solution is to use INSERT .. ON DUPLICATE KEY UPDATE.
|
||||
However, this can not be done until the "deleted" columns are removed and
|
||||
proper UNIQUE constraints are added to the tables.
|
||||
|
||||
|
||||
Enabling soft deletes:
|
||||
|
||||
* To use/enable soft-deletes, the SoftDeleteMixin must be added
|
||||
to your model class. For example:
|
||||
|
||||
class NovaBase(models.SoftDeleteMixin, models.ModelBase):
|
||||
pass
|
||||
|
||||
|
||||
Efficient use of soft deletes:
|
||||
|
||||
* There are two possible ways to mark a record as deleted:
|
||||
model.soft_delete() and query.soft_delete().
|
||||
|
||||
model.soft_delete() method works with single already fetched entry.
|
||||
query.soft_delete() makes only one db request for all entries that correspond
|
||||
to query.
|
||||
|
||||
* In almost all cases you should use query.soft_delete(). Some examples:
|
||||
|
||||
def soft_delete_bar():
|
||||
count = model_query(BarModel).find(some_condition).soft_delete()
|
||||
if count == 0:
|
||||
raise Exception("0 entries were soft deleted")
|
||||
|
||||
def complex_soft_delete_with_synchronization_bar(session=None):
|
||||
if session is None:
|
||||
session = get_session()
|
||||
with session.begin(subtransactions=True):
|
||||
count = model_query(BarModel).\
|
||||
find(some_condition).\
|
||||
soft_delete(synchronize_session=True)
|
||||
# Here synchronize_session is required, because we
|
||||
# don't know what is going on in outer session.
|
||||
if count == 0:
|
||||
raise Exception("0 entries were soft deleted")
|
||||
|
||||
* There is only one situation where model.soft_delete() is appropriate: when
|
||||
you fetch a single record, work with it, and mark it as deleted in the same
|
||||
transaction.
|
||||
|
||||
def soft_delete_bar_model():
|
||||
session = get_session()
|
||||
with session.begin():
|
||||
bar_ref = model_query(BarModel).find(some_condition).first()
|
||||
# Work with bar_ref
|
||||
bar_ref.soft_delete(session=session)
|
||||
|
||||
However, if you need to work with all entries that correspond to query and
|
||||
then soft delete them you should use query.soft_delete() method:
|
||||
|
||||
def soft_delete_multi_models():
|
||||
session = get_session()
|
||||
with session.begin():
|
||||
query = model_query(BarModel, session=session).\
|
||||
find(some_condition)
|
||||
model_refs = query.all()
|
||||
# Work with model_refs
|
||||
query.soft_delete(synchronize_session=False)
|
||||
# synchronize_session=False should be set if there is no outer
|
||||
# session and these entries are not used after this.
|
||||
|
||||
When working with many rows, it is very important to use query.soft_delete,
|
||||
which issues a single query. Using model.soft_delete(), as in the following
|
||||
example, is very inefficient.
|
||||
|
||||
for bar_ref in bar_refs:
|
||||
bar_ref.soft_delete(session=session)
|
||||
# This will produce count(bar_refs) db requests.
|
||||
"""
|
||||
|
||||
import os.path
|
||||
import re
|
||||
import time
|
||||
|
||||
from eventlet import greenthread
|
||||
from oslo.config import cfg
|
||||
import six
|
||||
from sqlalchemy import exc as sqla_exc
|
||||
import sqlalchemy.interfaces
|
||||
from sqlalchemy.interfaces import PoolListener
|
||||
import sqlalchemy.orm
|
||||
from sqlalchemy.pool import NullPool, StaticPool
|
||||
from sqlalchemy.sql.expression import literal_column
|
||||
|
||||
from savanna.openstack.common.db import exception
|
||||
from savanna.openstack.common import log as logging
|
||||
from savanna.openstack.common.gettextutils import _
|
||||
from savanna.openstack.common import timeutils
|
||||
|
||||
DEFAULT = 'DEFAULT'
|
||||
|
||||
sqlite_db_opts = [
|
||||
cfg.StrOpt('sqlite_db',
|
||||
default='savanna.sqlite',
|
||||
help='the filename to use with sqlite'),
|
||||
cfg.BoolOpt('sqlite_synchronous',
|
||||
default=True,
|
||||
help='If true, use synchronous mode for sqlite'),
|
||||
]
|
||||
|
||||
database_opts = [
|
||||
cfg.StrOpt('connection',
|
||||
default='sqlite:///' +
|
||||
os.path.abspath(os.path.join(os.path.dirname(__file__),
|
||||
'../', '$sqlite_db')),
|
||||
help='The SQLAlchemy connection string used to connect to the '
|
||||
'database',
|
||||
deprecated_name='sql_connection',
|
||||
deprecated_group=DEFAULT,
|
||||
secret=True),
|
||||
cfg.IntOpt('idle_timeout',
|
||||
default=3600,
|
||||
deprecated_name='sql_idle_timeout',
|
||||
deprecated_group=DEFAULT,
|
||||
help='timeout before idle sql connections are reaped'),
|
||||
cfg.IntOpt('min_pool_size',
|
||||
default=1,
|
||||
deprecated_name='sql_min_pool_size',
|
||||
deprecated_group=DEFAULT,
|
||||
help='Minimum number of SQL connections to keep open in a '
|
||||
'pool'),
|
||||
cfg.IntOpt('max_pool_size',
|
||||
default=5,
|
||||
deprecated_name='sql_max_pool_size',
|
||||
deprecated_group=DEFAULT,
|
||||
help='Maximum number of SQL connections to keep open in a '
|
||||
'pool'),
|
||||
cfg.IntOpt('max_retries',
|
||||
default=10,
|
||||
deprecated_name='sql_max_retries',
|
||||
deprecated_group=DEFAULT,
|
||||
help='maximum db connection retries during startup. '
|
||||
'(setting -1 implies an infinite retry count)'),
|
||||
cfg.IntOpt('retry_interval',
|
||||
default=10,
|
||||
deprecated_name='sql_retry_interval',
|
||||
deprecated_group=DEFAULT,
|
||||
help='interval between retries of opening a sql connection'),
|
||||
cfg.IntOpt('max_overflow',
|
||||
default=None,
|
||||
deprecated_name='sql_max_overflow',
|
||||
deprecated_group=DEFAULT,
|
||||
help='If set, use this value for max_overflow with sqlalchemy'),
|
||||
cfg.IntOpt('connection_debug',
|
||||
default=0,
|
||||
deprecated_name='sql_connection_debug',
|
||||
deprecated_group=DEFAULT,
|
||||
help='Verbosity of SQL debugging information. 0=None, '
|
||||
'100=Everything'),
|
||||
cfg.BoolOpt('connection_trace',
|
||||
default=False,
|
||||
deprecated_name='sql_connection_trace',
|
||||
deprecated_group=DEFAULT,
|
||||
help='Add python stack traces to SQL as comment strings'),
|
||||
]
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.register_opts(sqlite_db_opts)
|
||||
CONF.register_opts(database_opts, 'database')
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
_ENGINE = None
|
||||
_MAKER = None
|
||||
|
||||
|
||||
def set_defaults(sql_connection, sqlite_db):
|
||||
"""Set defaults for configuration variables."""
|
||||
cfg.set_defaults(database_opts,
|
||||
connection=sql_connection)
|
||||
cfg.set_defaults(sqlite_db_opts,
|
||||
sqlite_db=sqlite_db)
|
||||
|
||||
|
||||
def cleanup():
|
||||
global _ENGINE, _MAKER
|
||||
|
||||
if _MAKER:
|
||||
_MAKER.close_all()
|
||||
_MAKER = None
|
||||
if _ENGINE:
|
||||
_ENGINE.dispose()
|
||||
_ENGINE = None
|
||||
|
||||
|
||||
class SqliteForeignKeysListener(PoolListener):
|
||||
"""
|
||||
Ensures that the foreign key constraints are enforced in SQLite.
|
||||
|
||||
The foreign key constraints are disabled by default in SQLite,
|
||||
so the foreign key constraints will be enabled here for every
|
||||
database connection
|
||||
"""
|
||||
def connect(self, dbapi_con, con_record):
|
||||
dbapi_con.execute('pragma foreign_keys=ON')
|
||||
|
||||
|
||||
def get_session(autocommit=True, expire_on_commit=False,
|
||||
sqlite_fk=False):
|
||||
"""Return a SQLAlchemy session."""
|
||||
global _MAKER
|
||||
|
||||
if _MAKER is None:
|
||||
engine = get_engine(sqlite_fk=sqlite_fk)
|
||||
_MAKER = get_maker(engine, autocommit, expire_on_commit)
|
||||
|
||||
session = _MAKER()
|
||||
return session
|
||||
|
||||
|
||||
# note(boris-42): In current versions of DB backends unique constraint
|
||||
# violation messages follow the structure:
|
||||
#
|
||||
# sqlite:
|
||||
# 1 column - (IntegrityError) column c1 is not unique
|
||||
# N columns - (IntegrityError) column c1, c2, ..., N are not unique
|
||||
#
|
||||
# postgres:
|
||||
# 1 column - (IntegrityError) duplicate key value violates unique
|
||||
# constraint "users_c1_key"
|
||||
# N columns - (IntegrityError) duplicate key value violates unique
|
||||
# constraint "name_of_our_constraint"
|
||||
#
|
||||
# mysql:
|
||||
# 1 column - (IntegrityError) (1062, "Duplicate entry 'value_of_c1' for key
|
||||
# 'c1'")
|
||||
# N columns - (IntegrityError) (1062, "Duplicate entry 'values joined
|
||||
# with -' for key 'name_of_our_constraint'")
|
||||
_DUP_KEY_RE_DB = {
|
||||
"sqlite": re.compile(r"^.*columns?([^)]+)(is|are)\s+not\s+unique$"),
|
||||
"postgresql": re.compile(r"^.*duplicate\s+key.*\"([^\"]+)\"\s*\n.*$"),
|
||||
"mysql": re.compile(r"^.*\(1062,.*'([^\']+)'\"\)$")
|
||||
}
|
||||
|
||||
|
||||
def _raise_if_duplicate_entry_error(integrity_error, engine_name):
|
||||
"""
|
||||
In this function will be raised DBDuplicateEntry exception if integrity
|
||||
error wrap unique constraint violation.
|
||||
"""
|
||||
|
||||
def get_columns_from_uniq_cons_or_name(columns):
|
||||
# note(vsergeyev): UniqueConstraint name convention: "uniq_t$c1$c2"
|
||||
# where `t` it is table name and columns `c1`, `c2`
|
||||
# are in UniqueConstraint.
|
||||
uniqbase = "uniq_"
|
||||
if not columns.startswith(uniqbase):
|
||||
if engine_name == "postgresql":
|
||||
return [columns[columns.index("_") + 1:columns.rindex("_")]]
|
||||
return [columns]
|
||||
return columns[len(uniqbase):].split("$")[1:]
|
||||
|
||||
if engine_name not in ["mysql", "sqlite", "postgresql"]:
|
||||
return
|
||||
|
||||
m = _DUP_KEY_RE_DB[engine_name].match(integrity_error.message)
|
||||
if not m:
|
||||
return
|
||||
columns = m.group(1)
|
||||
|
||||
if engine_name == "sqlite":
|
||||
columns = columns.strip().split(", ")
|
||||
else:
|
||||
columns = get_columns_from_uniq_cons_or_name(columns)
|
||||
raise exception.DBDuplicateEntry(columns, integrity_error)
|
||||
|
||||
|
||||
# NOTE(comstud): In current versions of DB backends, Deadlock violation
|
||||
# messages follow the structure:
|
||||
#
|
||||
# mysql:
|
||||
# (OperationalError) (1213, 'Deadlock found when trying to get lock; try '
|
||||
# 'restarting transaction') <query_str> <query_args>
|
||||
_DEADLOCK_RE_DB = {
|
||||
"mysql": re.compile(r"^.*\(1213, 'Deadlock.*")
|
||||
}
|
||||
|
||||
|
||||
def _raise_if_deadlock_error(operational_error, engine_name):
|
||||
"""
|
||||
Raise DBDeadlock exception if OperationalError contains a Deadlock
|
||||
condition.
|
||||
"""
|
||||
re = _DEADLOCK_RE_DB.get(engine_name)
|
||||
if re is None:
|
||||
return
|
||||
m = re.match(operational_error.message)
|
||||
if not m:
|
||||
return
|
||||
raise exception.DBDeadlock(operational_error)
|
||||
|
||||
|
||||
def _wrap_db_error(f):
|
||||
def _wrap(*args, **kwargs):
|
||||
try:
|
||||
return f(*args, **kwargs)
|
||||
except UnicodeEncodeError:
|
||||
raise exception.DBInvalidUnicodeParameter()
|
||||
# note(boris-42): We should catch unique constraint violation and
|
||||
# wrap it by our own DBDuplicateEntry exception. Unique constraint
|
||||
# violation is wrapped by IntegrityError.
|
||||
except sqla_exc.OperationalError as e:
|
||||
_raise_if_deadlock_error(e, get_engine().name)
|
||||
# NOTE(comstud): A lot of code is checking for OperationalError
|
||||
# so let's not wrap it for now.
|
||||
raise
|
||||
except sqla_exc.IntegrityError as e:
|
||||
# note(boris-42): SqlAlchemy doesn't unify errors from different
|
||||
# DBs so we must do this. Also in some tables (for example
|
||||
# instance_types) there are more than one unique constraint. This
|
||||
# means we should get names of columns, which values violate
|
||||
# unique constraint, from error message.
|
||||
_raise_if_duplicate_entry_error(e, get_engine().name)
|
||||
raise exception.DBError(e)
|
||||
except Exception as e:
|
||||
LOG.exception(_('DB exception wrapped.'))
|
||||
raise exception.DBError(e)
|
||||
_wrap.func_name = f.func_name
|
||||
return _wrap
|
||||
|
||||
|
||||
def get_engine(sqlite_fk=False):
|
||||
"""Return a SQLAlchemy engine."""
|
||||
global _ENGINE
|
||||
if _ENGINE is None:
|
||||
_ENGINE = create_engine(CONF.database.connection,
|
||||
sqlite_fk=sqlite_fk)
|
||||
return _ENGINE
|
||||
|
||||
|
||||
def _synchronous_switch_listener(dbapi_conn, connection_rec):
|
||||
"""Switch sqlite connections to non-synchronous mode."""
|
||||
dbapi_conn.execute("PRAGMA synchronous = OFF")
|
||||
|
||||
|
||||
def _add_regexp_listener(dbapi_con, con_record):
|
||||
"""Add REGEXP function to sqlite connections."""
|
||||
|
||||
def regexp(expr, item):
|
||||
reg = re.compile(expr)
|
||||
return reg.search(six.text_type(item)) is not None
|
||||
dbapi_con.create_function('regexp', 2, regexp)
|
||||
|
||||
|
||||
def _greenthread_yield(dbapi_con, con_record):
|
||||
"""
|
||||
Ensure other greenthreads get a chance to execute by forcing a context
|
||||
switch. With common database backends (eg MySQLdb and sqlite), there is
|
||||
no implicit yield caused by network I/O since they are implemented by
|
||||
C libraries that eventlet cannot monkey patch.
|
||||
"""
|
||||
greenthread.sleep(0)
|
||||
|
||||
|
||||
def _ping_listener(dbapi_conn, connection_rec, connection_proxy):
|
||||
"""
|
||||
Ensures that MySQL connections checked out of the
|
||||
pool are alive.
|
||||
|
||||
Borrowed from:
|
||||
http://groups.google.com/group/sqlalchemy/msg/a4ce563d802c929f
|
||||
"""
|
||||
try:
|
||||
dbapi_conn.cursor().execute('select 1')
|
||||
except dbapi_conn.OperationalError as ex:
|
||||
if ex.args[0] in (2006, 2013, 2014, 2045, 2055):
|
||||
LOG.warn(_('Got mysql server has gone away: %s'), ex)
|
||||
raise sqla_exc.DisconnectionError("Database server went away")
|
||||
else:
|
||||
raise
|
||||
|
||||
|
||||
def _is_db_connection_error(args):
|
||||
"""Return True if error in connecting to db."""
|
||||
# NOTE(adam_g): This is currently MySQL specific and needs to be extended
|
||||
# to support Postgres and others.
|
||||
conn_err_codes = ('2002', '2003', '2006')
|
||||
for err_code in conn_err_codes:
|
||||
if args.find(err_code) != -1:
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def create_engine(sql_connection, sqlite_fk=False):
|
||||
"""Return a new SQLAlchemy engine."""
|
||||
connection_dict = sqlalchemy.engine.url.make_url(sql_connection)
|
||||
|
||||
engine_args = {
|
||||
"pool_recycle": CONF.database.idle_timeout,
|
||||
"echo": False,
|
||||
'convert_unicode': True,
|
||||
}
|
||||
|
||||
# Map our SQL debug level to SQLAlchemy's options
|
||||
if CONF.database.connection_debug >= 100:
|
||||
engine_args['echo'] = 'debug'
|
||||
elif CONF.database.connection_debug >= 50:
|
||||
engine_args['echo'] = True
|
||||
|
||||
if "sqlite" in connection_dict.drivername:
|
||||
if sqlite_fk:
|
||||
engine_args["listeners"] = [SqliteForeignKeysListener()]
|
||||
engine_args["poolclass"] = NullPool
|
||||
|
||||
if CONF.database.connection == "sqlite://":
|
||||
engine_args["poolclass"] = StaticPool
|
||||
engine_args["connect_args"] = {'check_same_thread': False}
|
||||
else:
|
||||
engine_args['pool_size'] = CONF.database.max_pool_size
|
||||
if CONF.database.max_overflow is not None:
|
||||
engine_args['max_overflow'] = CONF.database.max_overflow
|
||||
|
||||
engine = sqlalchemy.create_engine(sql_connection, **engine_args)
|
||||
|
||||
sqlalchemy.event.listen(engine, 'checkin', _greenthread_yield)
|
||||
|
||||
if 'mysql' in connection_dict.drivername:
|
||||
sqlalchemy.event.listen(engine, 'checkout', _ping_listener)
|
||||
elif 'sqlite' in connection_dict.drivername:
|
||||
if not CONF.sqlite_synchronous:
|
||||
sqlalchemy.event.listen(engine, 'connect',
|
||||
_synchronous_switch_listener)
|
||||
sqlalchemy.event.listen(engine, 'connect', _add_regexp_listener)
|
||||
|
||||
if (CONF.database.connection_trace and
|
||||
engine.dialect.dbapi.__name__ == 'MySQLdb'):
|
||||
_patch_mysqldb_with_stacktrace_comments()
|
||||
|
||||
try:
|
||||
engine.connect()
|
||||
except sqla_exc.OperationalError as e:
|
||||
if not _is_db_connection_error(e.args[0]):
|
||||
raise
|
||||
|
||||
remaining = CONF.database.max_retries
|
||||
if remaining == -1:
|
||||
remaining = 'infinite'
|
||||
while True:
|
||||
msg = _('SQL connection failed. %s attempts left.')
|
||||
LOG.warn(msg % remaining)
|
||||
if remaining != 'infinite':
|
||||
remaining -= 1
|
||||
time.sleep(CONF.database.retry_interval)
|
||||
try:
|
||||
engine.connect()
|
||||
break
|
||||
except sqla_exc.OperationalError as e:
|
||||
if (remaining != 'infinite' and remaining == 0) or \
|
||||
not _is_db_connection_error(e.args[0]):
|
||||
raise
|
||||
return engine
|
||||
|
||||
|
||||
class Query(sqlalchemy.orm.query.Query):
|
||||
"""Subclass of sqlalchemy.query with soft_delete() method."""
|
||||
def soft_delete(self, synchronize_session='evaluate'):
|
||||
return self.update({'deleted': literal_column('id'),
|
||||
'updated_at': literal_column('updated_at'),
|
||||
'deleted_at': timeutils.utcnow()},
|
||||
synchronize_session=synchronize_session)
|
||||
|
||||
|
||||
class Session(sqlalchemy.orm.session.Session):
|
||||
"""Custom Session class to avoid SqlAlchemy Session monkey patching."""
|
||||
@_wrap_db_error
|
||||
def query(self, *args, **kwargs):
|
||||
return super(Session, self).query(*args, **kwargs)
|
||||
|
||||
@_wrap_db_error
|
||||
def flush(self, *args, **kwargs):
|
||||
return super(Session, self).flush(*args, **kwargs)
|
||||
|
||||
@_wrap_db_error
|
||||
def execute(self, *args, **kwargs):
|
||||
return super(Session, self).execute(*args, **kwargs)
|
||||
|
||||
|
||||
def get_maker(engine, autocommit=True, expire_on_commit=False):
|
||||
"""Return a SQLAlchemy sessionmaker using the given engine."""
|
||||
return sqlalchemy.orm.sessionmaker(bind=engine,
|
||||
class_=Session,
|
||||
autocommit=autocommit,
|
||||
expire_on_commit=expire_on_commit,
|
||||
query_cls=Query)
|
||||
|
||||
|
||||
def _patch_mysqldb_with_stacktrace_comments():
|
||||
"""Adds current stack trace as a comment in queries by patching
|
||||
MySQLdb.cursors.BaseCursor._do_query.
|
||||
"""
|
||||
import MySQLdb.cursors
|
||||
import traceback
|
||||
|
||||
old_mysql_do_query = MySQLdb.cursors.BaseCursor._do_query
|
||||
|
||||
def _do_query(self, q):
|
||||
stack = ''
|
||||
for file, line, method, function in traceback.extract_stack():
|
||||
# exclude various common things from trace
|
||||
if file.endswith('session.py') and method == '_do_query':
|
||||
continue
|
||||
if file.endswith('api.py') and method == 'wrapper':
|
||||
continue
|
||||
if file.endswith('utils.py') and method == '_inner':
|
||||
continue
|
||||
if file.endswith('exception.py') and method == '_wrap':
|
||||
continue
|
||||
# db/api is just a wrapper around db/sqlalchemy/api
|
||||
if file.endswith('db/api.py'):
|
||||
continue
|
||||
# only trace inside savanna
|
||||
index = file.rfind('savanna')
|
||||
if index == -1:
|
||||
continue
|
||||
stack += "File:%s:%s Method:%s() Line:%s | " \
|
||||
% (file[index:], line, method, function)
|
||||
|
||||
# strip trailing " | " from stack
|
||||
if stack:
|
||||
stack = stack[:-3]
|
||||
qq = "%s /* %s */" % (q, stack)
|
||||
else:
|
||||
qq = q
|
||||
old_mysql_do_query(self, qq)
|
||||
|
||||
setattr(MySQLdb.cursors.BaseCursor, '_do_query', _do_query)
|
132
savanna/openstack/common/db/sqlalchemy/utils.py
Normal file
132
savanna/openstack/common/db/sqlalchemy/utils.py
Normal file
@ -0,0 +1,132 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# Copyright 2010-2011 OpenStack Foundation.
|
||||
# Copyright 2012 Justin Santa Barbara
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""Implementation of paginate query."""
|
||||
|
||||
import sqlalchemy
|
||||
|
||||
from savanna.openstack.common.gettextutils import _
|
||||
from savanna.openstack.common import log as logging
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class InvalidSortKey(Exception):
|
||||
message = _("Sort key supplied was not valid.")
|
||||
|
||||
|
||||
# copy from glance/db/sqlalchemy/api.py
|
||||
def paginate_query(query, model, limit, sort_keys, marker=None,
|
||||
sort_dir=None, sort_dirs=None):
|
||||
"""Returns a query with sorting / pagination criteria added.
|
||||
|
||||
Pagination works by requiring a unique sort_key, specified by sort_keys.
|
||||
(If sort_keys is not unique, then we risk looping through values.)
|
||||
We use the last row in the previous page as the 'marker' for pagination.
|
||||
So we must return values that follow the passed marker in the order.
|
||||
With a single-valued sort_key, this would be easy: sort_key > X.
|
||||
With a compound-values sort_key, (k1, k2, k3) we must do this to repeat
|
||||
the lexicographical ordering:
|
||||
(k1 > X1) or (k1 == X1 && k2 > X2) or (k1 == X1 && k2 == X2 && k3 > X3)
|
||||
|
||||
We also have to cope with different sort_directions.
|
||||
|
||||
Typically, the id of the last row is used as the client-facing pagination
|
||||
marker, then the actual marker object must be fetched from the db and
|
||||
passed in to us as marker.
|
||||
|
||||
:param query: the query object to which we should add paging/sorting
|
||||
:param model: the ORM model class
|
||||
:param limit: maximum number of items to return
|
||||
:param sort_keys: array of attributes by which results should be sorted
|
||||
:param marker: the last item of the previous page; we returns the next
|
||||
results after this value.
|
||||
:param sort_dir: direction in which results should be sorted (asc, desc)
|
||||
:param sort_dirs: per-column array of sort_dirs, corresponding to sort_keys
|
||||
|
||||
:rtype: sqlalchemy.orm.query.Query
|
||||
:return: The query with sorting/pagination added.
|
||||
"""
|
||||
|
||||
if 'id' not in sort_keys:
|
||||
# TODO(justinsb): If this ever gives a false-positive, check
|
||||
# the actual primary key, rather than assuming its id
|
||||
LOG.warn(_('Id not in sort_keys; is sort_keys unique?'))
|
||||
|
||||
assert(not (sort_dir and sort_dirs))
|
||||
|
||||
# Default the sort direction to ascending
|
||||
if sort_dirs is None and sort_dir is None:
|
||||
sort_dir = 'asc'
|
||||
|
||||
# Ensure a per-column sort direction
|
||||
if sort_dirs is None:
|
||||
sort_dirs = [sort_dir for _sort_key in sort_keys]
|
||||
|
||||
assert(len(sort_dirs) == len(sort_keys))
|
||||
|
||||
# Add sorting
|
||||
for current_sort_key, current_sort_dir in zip(sort_keys, sort_dirs):
|
||||
sort_dir_func = {
|
||||
'asc': sqlalchemy.asc,
|
||||
'desc': sqlalchemy.desc,
|
||||
}[current_sort_dir]
|
||||
|
||||
try:
|
||||
sort_key_attr = getattr(model, current_sort_key)
|
||||
except AttributeError:
|
||||
raise InvalidSortKey()
|
||||
query = query.order_by(sort_dir_func(sort_key_attr))
|
||||
|
||||
# Add pagination
|
||||
if marker is not None:
|
||||
marker_values = []
|
||||
for sort_key in sort_keys:
|
||||
v = getattr(marker, sort_key)
|
||||
marker_values.append(v)
|
||||
|
||||
# Build up an array of sort criteria as in the docstring
|
||||
criteria_list = []
|
||||
for i in range(0, len(sort_keys)):
|
||||
crit_attrs = []
|
||||
for j in range(0, i):
|
||||
model_attr = getattr(model, sort_keys[j])
|
||||
crit_attrs.append((model_attr == marker_values[j]))
|
||||
|
||||
model_attr = getattr(model, sort_keys[i])
|
||||
if sort_dirs[i] == 'desc':
|
||||
crit_attrs.append((model_attr < marker_values[i]))
|
||||
elif sort_dirs[i] == 'asc':
|
||||
crit_attrs.append((model_attr > marker_values[i]))
|
||||
else:
|
||||
raise ValueError(_("Unknown sort direction, "
|
||||
"must be 'desc' or 'asc'"))
|
||||
|
||||
criteria = sqlalchemy.sql.and_(*crit_attrs)
|
||||
criteria_list.append(criteria)
|
||||
|
||||
f = sqlalchemy.sql.or_(*criteria_list)
|
||||
query = query.filter(f)
|
||||
|
||||
if limit is not None:
|
||||
query = query.limit(limit)
|
||||
|
||||
return query
|
35
savanna/openstack/common/fileutils.py
Normal file
35
savanna/openstack/common/fileutils.py
Normal file
@ -0,0 +1,35 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2011 OpenStack Foundation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
|
||||
import errno
|
||||
import os
|
||||
|
||||
|
||||
def ensure_tree(path):
|
||||
"""Create a directory (and any ancestor directories required)
|
||||
|
||||
:param path: Directory to create
|
||||
"""
|
||||
try:
|
||||
os.makedirs(path)
|
||||
except OSError as exc:
|
||||
if exc.errno == errno.EEXIST:
|
||||
if not os.path.isdir(path):
|
||||
raise
|
||||
else:
|
||||
raise
|
@ -41,6 +41,8 @@ import json
|
||||
import types
|
||||
import xmlrpclib
|
||||
|
||||
import six
|
||||
|
||||
from savanna.openstack.common import timeutils
|
||||
|
||||
|
||||
@ -93,7 +95,7 @@ def to_primitive(value, convert_instances=False, convert_datetime=True,
|
||||
# value of itertools.count doesn't get caught by nasty_type_tests
|
||||
# and results in infinite loop when list(value) is called.
|
||||
if type(value) == itertools.count:
|
||||
return unicode(value)
|
||||
return six.text_type(value)
|
||||
|
||||
# FIXME(vish): Workaround for LP bug 852095. Without this workaround,
|
||||
# tests that raise an exception in a mocked method that
|
||||
@ -137,12 +139,12 @@ def to_primitive(value, convert_instances=False, convert_datetime=True,
|
||||
return recursive(value.__dict__, level=level + 1)
|
||||
else:
|
||||
if any(test(value) for test in _nasty_type_tests):
|
||||
return unicode(value)
|
||||
return six.text_type(value)
|
||||
return value
|
||||
except TypeError:
|
||||
# Class objects are tricky since they may define something like
|
||||
# __iter__ defined but it isn't callable as list().
|
||||
return unicode(value)
|
||||
return six.text_type(value)
|
||||
|
||||
|
||||
def dumps(value, default=to_primitive, **kwargs):
|
||||
|
@ -15,7 +15,7 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""Greenthread local storage of variables using weak references"""
|
||||
"""Greenthread local db of variables using weak references"""
|
||||
|
||||
import weakref
|
||||
|
||||
@ -41,7 +41,7 @@ class WeakLocal(corolocal.local):
|
||||
store = WeakLocal()
|
||||
|
||||
# A "weak" store uses weak references and allows an object to fall out of scope
|
||||
# when it falls out of scope in the code that uses the thread local storage. A
|
||||
# when it falls out of scope in the code that uses the thread local db. A
|
||||
# "strong" store will hold a reference to the object so that it never falls out
|
||||
# of scope.
|
||||
weak_store = WeakLocal()
|
||||
|
278
savanna/openstack/common/lockutils.py
Normal file
278
savanna/openstack/common/lockutils.py
Normal file
@ -0,0 +1,278 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2011 OpenStack Foundation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
|
||||
import errno
|
||||
import functools
|
||||
import os
|
||||
import shutil
|
||||
import tempfile
|
||||
import time
|
||||
import weakref
|
||||
|
||||
from eventlet import semaphore
|
||||
from oslo.config import cfg
|
||||
|
||||
from savanna.openstack.common import fileutils
|
||||
from savanna.openstack.common.gettextutils import _
|
||||
from savanna.openstack.common import local
|
||||
from savanna.openstack.common import log as logging
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
util_opts = [
|
||||
cfg.BoolOpt('disable_process_locking', default=False,
|
||||
help='Whether to disable inter-process locks'),
|
||||
cfg.StrOpt('lock_path',
|
||||
help=('Directory to use for lock files. Default to a '
|
||||
'temp directory'))
|
||||
]
|
||||
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.register_opts(util_opts)
|
||||
|
||||
|
||||
def set_defaults(lock_path):
|
||||
cfg.set_defaults(util_opts, lock_path=lock_path)
|
||||
|
||||
|
||||
class _InterProcessLock(object):
|
||||
"""Lock implementation which allows multiple locks, working around
|
||||
issues like bugs.debian.org/cgi-bin/bugreport.cgi?bug=632857 and does
|
||||
not require any cleanup. Since the lock is always held on a file
|
||||
descriptor rather than outside of the process, the lock gets dropped
|
||||
automatically if the process crashes, even if __exit__ is not executed.
|
||||
|
||||
There are no guarantees regarding usage by multiple green threads in a
|
||||
single process here. This lock works only between processes. Exclusive
|
||||
access between local threads should be achieved using the semaphores
|
||||
in the @synchronized decorator.
|
||||
|
||||
Note these locks are released when the descriptor is closed, so it's not
|
||||
safe to close the file descriptor while another green thread holds the
|
||||
lock. Just opening and closing the lock file can break synchronisation,
|
||||
so lock files must be accessed only using this abstraction.
|
||||
"""
|
||||
|
||||
def __init__(self, name):
|
||||
self.lockfile = None
|
||||
self.fname = name
|
||||
|
||||
def __enter__(self):
|
||||
self.lockfile = open(self.fname, 'w')
|
||||
|
||||
while True:
|
||||
try:
|
||||
# Using non-blocking locks since green threads are not
|
||||
# patched to deal with blocking locking calls.
|
||||
# Also upon reading the MSDN docs for locking(), it seems
|
||||
# to have a laughable 10 attempts "blocking" mechanism.
|
||||
self.trylock()
|
||||
return self
|
||||
except IOError as e:
|
||||
if e.errno in (errno.EACCES, errno.EAGAIN):
|
||||
# external locks synchronise things like iptables
|
||||
# updates - give it some time to prevent busy spinning
|
||||
time.sleep(0.01)
|
||||
else:
|
||||
raise
|
||||
|
||||
def __exit__(self, exc_type, exc_val, exc_tb):
|
||||
try:
|
||||
self.unlock()
|
||||
self.lockfile.close()
|
||||
except IOError:
|
||||
LOG.exception(_("Could not release the acquired lock `%s`"),
|
||||
self.fname)
|
||||
|
||||
def trylock(self):
|
||||
raise NotImplementedError()
|
||||
|
||||
def unlock(self):
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
class _WindowsLock(_InterProcessLock):
|
||||
def trylock(self):
|
||||
msvcrt.locking(self.lockfile.fileno(), msvcrt.LK_NBLCK, 1)
|
||||
|
||||
def unlock(self):
|
||||
msvcrt.locking(self.lockfile.fileno(), msvcrt.LK_UNLCK, 1)
|
||||
|
||||
|
||||
class _PosixLock(_InterProcessLock):
|
||||
def trylock(self):
|
||||
fcntl.lockf(self.lockfile, fcntl.LOCK_EX | fcntl.LOCK_NB)
|
||||
|
||||
def unlock(self):
|
||||
fcntl.lockf(self.lockfile, fcntl.LOCK_UN)
|
||||
|
||||
|
||||
if os.name == 'nt':
|
||||
import msvcrt
|
||||
InterProcessLock = _WindowsLock
|
||||
else:
|
||||
import fcntl
|
||||
InterProcessLock = _PosixLock
|
||||
|
||||
_semaphores = weakref.WeakValueDictionary()
|
||||
|
||||
|
||||
def synchronized(name, lock_file_prefix, external=False, lock_path=None):
|
||||
"""Synchronization decorator.
|
||||
|
||||
Decorating a method like so::
|
||||
|
||||
@synchronized('mylock')
|
||||
def foo(self, *args):
|
||||
...
|
||||
|
||||
ensures that only one thread will execute the foo method at a time.
|
||||
|
||||
Different methods can share the same lock::
|
||||
|
||||
@synchronized('mylock')
|
||||
def foo(self, *args):
|
||||
...
|
||||
|
||||
@synchronized('mylock')
|
||||
def bar(self, *args):
|
||||
...
|
||||
|
||||
This way only one of either foo or bar can be executing at a time.
|
||||
|
||||
The lock_file_prefix argument is used to provide lock files on disk with a
|
||||
meaningful prefix. The prefix should end with a hyphen ('-') if specified.
|
||||
|
||||
The external keyword argument denotes whether this lock should work across
|
||||
multiple processes. This means that if two different workers both run a
|
||||
a method decorated with @synchronized('mylock', external=True), only one
|
||||
of them will execute at a time.
|
||||
|
||||
The lock_path keyword argument is used to specify a special location for
|
||||
external lock files to live. If nothing is set, then CONF.lock_path is
|
||||
used as a default.
|
||||
"""
|
||||
|
||||
def wrap(f):
|
||||
@functools.wraps(f)
|
||||
def inner(*args, **kwargs):
|
||||
# NOTE(soren): If we ever go natively threaded, this will be racy.
|
||||
# See http://stackoverflow.com/questions/5390569/dyn
|
||||
# amically-allocating-and-destroying-mutexes
|
||||
sem = _semaphores.get(name, semaphore.Semaphore())
|
||||
if name not in _semaphores:
|
||||
# this check is not racy - we're already holding ref locally
|
||||
# so GC won't remove the item and there was no IO switch
|
||||
# (only valid in greenthreads)
|
||||
_semaphores[name] = sem
|
||||
|
||||
with sem:
|
||||
LOG.debug(_('Got semaphore "%(lock)s" for method '
|
||||
'"%(method)s"...'), {'lock': name,
|
||||
'method': f.__name__})
|
||||
|
||||
# NOTE(mikal): I know this looks odd
|
||||
if not hasattr(local.strong_store, 'locks_held'):
|
||||
local.strong_store.locks_held = []
|
||||
local.strong_store.locks_held.append(name)
|
||||
|
||||
try:
|
||||
if external and not CONF.disable_process_locking:
|
||||
LOG.debug(_('Attempting to grab file lock "%(lock)s" '
|
||||
'for method "%(method)s"...'),
|
||||
{'lock': name, 'method': f.__name__})
|
||||
cleanup_dir = False
|
||||
|
||||
# We need a copy of lock_path because it is non-local
|
||||
local_lock_path = lock_path
|
||||
if not local_lock_path:
|
||||
local_lock_path = CONF.lock_path
|
||||
|
||||
if not local_lock_path:
|
||||
cleanup_dir = True
|
||||
local_lock_path = tempfile.mkdtemp()
|
||||
|
||||
if not os.path.exists(local_lock_path):
|
||||
fileutils.ensure_tree(local_lock_path)
|
||||
|
||||
# NOTE(mikal): the lock name cannot contain directory
|
||||
# separators
|
||||
safe_name = name.replace(os.sep, '_')
|
||||
lock_file_name = '%s%s' % (lock_file_prefix, safe_name)
|
||||
lock_file_path = os.path.join(local_lock_path,
|
||||
lock_file_name)
|
||||
|
||||
try:
|
||||
lock = InterProcessLock(lock_file_path)
|
||||
with lock:
|
||||
LOG.debug(_('Got file lock "%(lock)s" at '
|
||||
'%(path)s for method '
|
||||
'"%(method)s"...'),
|
||||
{'lock': name,
|
||||
'path': lock_file_path,
|
||||
'method': f.__name__})
|
||||
retval = f(*args, **kwargs)
|
||||
finally:
|
||||
LOG.debug(_('Released file lock "%(lock)s" at '
|
||||
'%(path)s for method "%(method)s"...'),
|
||||
{'lock': name,
|
||||
'path': lock_file_path,
|
||||
'method': f.__name__})
|
||||
# NOTE(vish): This removes the tempdir if we needed
|
||||
# to create one. This is used to
|
||||
# cleanup the locks left behind by unit
|
||||
# tests.
|
||||
if cleanup_dir:
|
||||
shutil.rmtree(local_lock_path)
|
||||
else:
|
||||
retval = f(*args, **kwargs)
|
||||
|
||||
finally:
|
||||
local.strong_store.locks_held.remove(name)
|
||||
|
||||
return retval
|
||||
return inner
|
||||
return wrap
|
||||
|
||||
|
||||
def synchronized_with_prefix(lock_file_prefix):
|
||||
"""Partial object generator for the synchronization decorator.
|
||||
|
||||
Redefine @synchronized in each project like so::
|
||||
|
||||
(in nova/utils.py)
|
||||
from nova.openstack.common import lockutils
|
||||
|
||||
synchronized = lockutils.synchronized_with_prefix('nova-')
|
||||
|
||||
|
||||
(in nova/foo.py)
|
||||
from nova import utils
|
||||
|
||||
@utils.synchronized('mylock')
|
||||
def bar(self, *args):
|
||||
...
|
||||
|
||||
The lock_file_prefix argument is used to provide lock files on disk with a
|
||||
meaningful prefix. The prefix should end with a hyphen ('-') if specified.
|
||||
"""
|
||||
|
||||
return functools.partial(synchronized, lock_file_prefix=lock_file_prefix)
|
@ -43,12 +43,11 @@ import traceback
|
||||
from oslo.config import cfg
|
||||
|
||||
from savanna.openstack.common.gettextutils import _
|
||||
from savanna.openstack.common import importutils
|
||||
from savanna.openstack.common import jsonutils
|
||||
from savanna.openstack.common import local
|
||||
from savanna.openstack.common import notifier
|
||||
|
||||
|
||||
_DEFAULT_LOG_FORMAT = "%(asctime)s %(levelname)8s [%(name)s] %(message)s"
|
||||
_DEFAULT_LOG_DATE_FORMAT = "%Y-%m-%d %H:%M:%S"
|
||||
|
||||
common_cli_opts = [
|
||||
@ -73,11 +72,13 @@ logging_cli_opts = [
|
||||
'documentation for details on logging configuration '
|
||||
'files.'),
|
||||
cfg.StrOpt('log-format',
|
||||
default=_DEFAULT_LOG_FORMAT,
|
||||
default=None,
|
||||
metavar='FORMAT',
|
||||
help='A logging.Formatter log message format string which may '
|
||||
'use any of the available logging.LogRecord attributes. '
|
||||
'Default: %(default)s'),
|
||||
'This option is deprecated. Please use '
|
||||
'logging_context_format_string and '
|
||||
'logging_default_format_string instead.'),
|
||||
cfg.StrOpt('log-date-format',
|
||||
default=_DEFAULT_LOG_DATE_FORMAT,
|
||||
metavar='DATE_FORMAT',
|
||||
@ -207,7 +208,27 @@ def _get_log_file_path(binary=None):
|
||||
return '%s.log' % (os.path.join(logdir, binary),)
|
||||
|
||||
|
||||
class ContextAdapter(logging.LoggerAdapter):
|
||||
class BaseLoggerAdapter(logging.LoggerAdapter):
|
||||
|
||||
def audit(self, msg, *args, **kwargs):
|
||||
self.log(logging.AUDIT, msg, *args, **kwargs)
|
||||
|
||||
|
||||
class LazyAdapter(BaseLoggerAdapter):
|
||||
def __init__(self, name='unknown', version='unknown'):
|
||||
self._logger = None
|
||||
self.extra = {}
|
||||
self.name = name
|
||||
self.version = version
|
||||
|
||||
@property
|
||||
def logger(self):
|
||||
if not self._logger:
|
||||
self._logger = getLogger(self.name, self.version)
|
||||
return self._logger
|
||||
|
||||
|
||||
class ContextAdapter(BaseLoggerAdapter):
|
||||
warn = logging.LoggerAdapter.warning
|
||||
|
||||
def __init__(self, logger, project_name, version_string):
|
||||
@ -215,8 +236,9 @@ class ContextAdapter(logging.LoggerAdapter):
|
||||
self.project = project_name
|
||||
self.version = version_string
|
||||
|
||||
def audit(self, msg, *args, **kwargs):
|
||||
self.log(logging.AUDIT, msg, *args, **kwargs)
|
||||
@property
|
||||
def handlers(self):
|
||||
return self.logger.handlers
|
||||
|
||||
def deprecated(self, msg, *args, **kwargs):
|
||||
stdmsg = _("Deprecated: %s") % msg
|
||||
@ -300,17 +322,6 @@ class JSONFormatter(logging.Formatter):
|
||||
return jsonutils.dumps(message)
|
||||
|
||||
|
||||
class PublishErrorsHandler(logging.Handler):
|
||||
def emit(self, record):
|
||||
if ('savanna.openstack.common.notifier.log_notifier' in
|
||||
CONF.notification_driver):
|
||||
return
|
||||
notifier.api.notify(None, 'error.publisher',
|
||||
'error_notification',
|
||||
notifier.api.ERROR,
|
||||
dict(error=record.msg))
|
||||
|
||||
|
||||
def _create_logging_excepthook(product_name):
|
||||
def logging_excepthook(type, value, tb):
|
||||
extra = {}
|
||||
@ -406,15 +417,22 @@ def _setup_logging_from_conf():
|
||||
log_root.addHandler(streamlog)
|
||||
|
||||
if CONF.publish_errors:
|
||||
log_root.addHandler(PublishErrorsHandler(logging.ERROR))
|
||||
handler = importutils.import_object(
|
||||
"savanna.openstack.common.log_handler.PublishErrorsHandler",
|
||||
logging.ERROR)
|
||||
log_root.addHandler(handler)
|
||||
|
||||
datefmt = CONF.log_date_format
|
||||
for handler in log_root.handlers:
|
||||
datefmt = CONF.log_date_format
|
||||
# NOTE(alaski): CONF.log_format overrides everything currently. This
|
||||
# should be deprecated in favor of context aware formatting.
|
||||
if CONF.log_format:
|
||||
handler.setFormatter(logging.Formatter(fmt=CONF.log_format,
|
||||
datefmt=datefmt))
|
||||
log_root.info('Deprecated: log_format is now deprecated and will '
|
||||
'be removed in the next release')
|
||||
else:
|
||||
handler.setFormatter(LegacyFormatter(datefmt=datefmt))
|
||||
handler.setFormatter(ContextFormatter(datefmt=datefmt))
|
||||
|
||||
if CONF.debug:
|
||||
log_root.setLevel(logging.DEBUG)
|
||||
@ -440,6 +458,15 @@ def getLogger(name='unknown', version='unknown'):
|
||||
return _loggers[name]
|
||||
|
||||
|
||||
def getLazyLogger(name='unknown', version='unknown'):
|
||||
"""
|
||||
create a pass-through logger that does not create the real logger
|
||||
until it is really needed and delegates all calls to the real logger
|
||||
once it is created
|
||||
"""
|
||||
return LazyAdapter(name, version)
|
||||
|
||||
|
||||
class WritableLogger(object):
|
||||
"""A thin wrapper that responds to `write` and logs."""
|
||||
|
||||
@ -451,7 +478,7 @@ class WritableLogger(object):
|
||||
self.logger.log(self.level, msg)
|
||||
|
||||
|
||||
class LegacyFormatter(logging.Formatter):
|
||||
class ContextFormatter(logging.Formatter):
|
||||
"""A context.RequestContext aware formatter configured through flags.
|
||||
|
||||
The flags used to set format strings are: logging_context_format_string
|
||||
|
147
savanna/openstack/common/loopingcall.py
Normal file
147
savanna/openstack/common/loopingcall.py
Normal file
@ -0,0 +1,147 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# Copyright 2011 Justin Santa Barbara
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import sys
|
||||
|
||||
from eventlet import event
|
||||
from eventlet import greenthread
|
||||
|
||||
from savanna.openstack.common.gettextutils import _
|
||||
from savanna.openstack.common import log as logging
|
||||
from savanna.openstack.common import timeutils
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class LoopingCallDone(Exception):
|
||||
"""Exception to break out and stop a LoopingCall.
|
||||
|
||||
The poll-function passed to LoopingCall can raise this exception to
|
||||
break out of the loop normally. This is somewhat analogous to
|
||||
StopIteration.
|
||||
|
||||
An optional return-value can be included as the argument to the exception;
|
||||
this return-value will be returned by LoopingCall.wait()
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, retvalue=True):
|
||||
""":param retvalue: Value that LoopingCall.wait() should return."""
|
||||
self.retvalue = retvalue
|
||||
|
||||
|
||||
class LoopingCallBase(object):
|
||||
def __init__(self, f=None, *args, **kw):
|
||||
self.args = args
|
||||
self.kw = kw
|
||||
self.f = f
|
||||
self._running = False
|
||||
self.done = None
|
||||
|
||||
def stop(self):
|
||||
self._running = False
|
||||
|
||||
def wait(self):
|
||||
return self.done.wait()
|
||||
|
||||
|
||||
class FixedIntervalLoopingCall(LoopingCallBase):
|
||||
"""A fixed interval looping call."""
|
||||
|
||||
def start(self, interval, initial_delay=None):
|
||||
self._running = True
|
||||
done = event.Event()
|
||||
|
||||
def _inner():
|
||||
if initial_delay:
|
||||
greenthread.sleep(initial_delay)
|
||||
|
||||
try:
|
||||
while self._running:
|
||||
start = timeutils.utcnow()
|
||||
self.f(*self.args, **self.kw)
|
||||
end = timeutils.utcnow()
|
||||
if not self._running:
|
||||
break
|
||||
delay = interval - timeutils.delta_seconds(start, end)
|
||||
if delay <= 0:
|
||||
LOG.warn(_('task run outlasted interval by %s sec') %
|
||||
-delay)
|
||||
greenthread.sleep(delay if delay > 0 else 0)
|
||||
except LoopingCallDone as e:
|
||||
self.stop()
|
||||
done.send(e.retvalue)
|
||||
except Exception:
|
||||
LOG.exception(_('in fixed duration looping call'))
|
||||
done.send_exception(*sys.exc_info())
|
||||
return
|
||||
else:
|
||||
done.send(True)
|
||||
|
||||
self.done = done
|
||||
|
||||
greenthread.spawn_n(_inner)
|
||||
return self.done
|
||||
|
||||
|
||||
# TODO(mikal): this class name is deprecated in Havana and should be removed
|
||||
# in the I release
|
||||
LoopingCall = FixedIntervalLoopingCall
|
||||
|
||||
|
||||
class DynamicLoopingCall(LoopingCallBase):
|
||||
"""A looping call which sleeps until the next known event.
|
||||
|
||||
The function called should return how long to sleep for before being
|
||||
called again.
|
||||
"""
|
||||
|
||||
def start(self, initial_delay=None, periodic_interval_max=None):
|
||||
self._running = True
|
||||
done = event.Event()
|
||||
|
||||
def _inner():
|
||||
if initial_delay:
|
||||
greenthread.sleep(initial_delay)
|
||||
|
||||
try:
|
||||
while self._running:
|
||||
idle = self.f(*self.args, **self.kw)
|
||||
if not self._running:
|
||||
break
|
||||
|
||||
if periodic_interval_max is not None:
|
||||
idle = min(idle, periodic_interval_max)
|
||||
LOG.debug(_('Dynamic looping call sleeping for %.02f '
|
||||
'seconds'), idle)
|
||||
greenthread.sleep(idle)
|
||||
except LoopingCallDone as e:
|
||||
self.stop()
|
||||
done.send(e.retvalue)
|
||||
except Exception:
|
||||
LOG.exception(_('in dynamic looping call'))
|
||||
done.send_exception(*sys.exc_info())
|
||||
return
|
||||
else:
|
||||
done.send(True)
|
||||
|
||||
self.done = done
|
||||
|
||||
greenthread.spawn(_inner)
|
||||
return self.done
|
121
savanna/openstack/common/threadgroup.py
Normal file
121
savanna/openstack/common/threadgroup.py
Normal file
@ -0,0 +1,121 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2012 Red Hat, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from eventlet import greenlet
|
||||
from eventlet import greenpool
|
||||
from eventlet import greenthread
|
||||
|
||||
from savanna.openstack.common import log as logging
|
||||
from savanna.openstack.common import loopingcall
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _thread_done(gt, *args, **kwargs):
|
||||
""" Callback function to be passed to GreenThread.link() when we spawn()
|
||||
Calls the :class:`ThreadGroup` to notify if.
|
||||
|
||||
"""
|
||||
kwargs['group'].thread_done(kwargs['thread'])
|
||||
|
||||
|
||||
class Thread(object):
|
||||
""" Wrapper around a greenthread, that holds a reference to the
|
||||
:class:`ThreadGroup`. The Thread will notify the :class:`ThreadGroup` when
|
||||
it has done so it can be removed from the threads list.
|
||||
"""
|
||||
def __init__(self, thread, group):
|
||||
self.thread = thread
|
||||
self.thread.link(_thread_done, group=group, thread=self)
|
||||
|
||||
def stop(self):
|
||||
self.thread.kill()
|
||||
|
||||
def wait(self):
|
||||
return self.thread.wait()
|
||||
|
||||
|
||||
class ThreadGroup(object):
|
||||
""" The point of the ThreadGroup classis to:
|
||||
|
||||
* keep track of timers and greenthreads (making it easier to stop them
|
||||
when need be).
|
||||
* provide an easy API to add timers.
|
||||
"""
|
||||
def __init__(self, thread_pool_size=10):
|
||||
self.pool = greenpool.GreenPool(thread_pool_size)
|
||||
self.threads = []
|
||||
self.timers = []
|
||||
|
||||
def add_dynamic_timer(self, callback, initial_delay=None,
|
||||
periodic_interval_max=None, *args, **kwargs):
|
||||
timer = loopingcall.DynamicLoopingCall(callback, *args, **kwargs)
|
||||
timer.start(initial_delay=initial_delay,
|
||||
periodic_interval_max=periodic_interval_max)
|
||||
self.timers.append(timer)
|
||||
|
||||
def add_timer(self, interval, callback, initial_delay=None,
|
||||
*args, **kwargs):
|
||||
pulse = loopingcall.FixedIntervalLoopingCall(callback, *args, **kwargs)
|
||||
pulse.start(interval=interval,
|
||||
initial_delay=initial_delay)
|
||||
self.timers.append(pulse)
|
||||
|
||||
def add_thread(self, callback, *args, **kwargs):
|
||||
gt = self.pool.spawn(callback, *args, **kwargs)
|
||||
th = Thread(gt, self)
|
||||
self.threads.append(th)
|
||||
|
||||
def thread_done(self, thread):
|
||||
self.threads.remove(thread)
|
||||
|
||||
def stop(self):
|
||||
current = greenthread.getcurrent()
|
||||
for x in self.threads:
|
||||
if x is current:
|
||||
# don't kill the current thread.
|
||||
continue
|
||||
try:
|
||||
x.stop()
|
||||
except Exception as ex:
|
||||
LOG.exception(ex)
|
||||
|
||||
for x in self.timers:
|
||||
try:
|
||||
x.stop()
|
||||
except Exception as ex:
|
||||
LOG.exception(ex)
|
||||
self.timers = []
|
||||
|
||||
def wait(self):
|
||||
for x in self.timers:
|
||||
try:
|
||||
x.wait()
|
||||
except greenlet.GreenletExit:
|
||||
pass
|
||||
except Exception as ex:
|
||||
LOG.exception(ex)
|
||||
current = greenthread.getcurrent()
|
||||
for x in self.threads:
|
||||
if x is current:
|
||||
continue
|
||||
try:
|
||||
x.wait()
|
||||
except greenlet.GreenletExit:
|
||||
pass
|
||||
except Exception as ex:
|
||||
LOG.exception(ex)
|
0
savanna/plugins/__init__.py
Normal file
0
savanna/plugins/__init__.py
Normal file
167
savanna/plugins/base.py
Normal file
167
savanna/plugins/base.py
Normal file
@ -0,0 +1,167 @@
|
||||
# Copyright (c) 2013 Mirantis Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from abc import ABCMeta
|
||||
from abc import abstractmethod
|
||||
import inspect
|
||||
from oslo.config import cfg
|
||||
from savanna.config import parse_configs
|
||||
from savanna.openstack.common import importutils
|
||||
from savanna.openstack.common import log as logging
|
||||
from savanna.utils.resources import BaseResource
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
opts = [
|
||||
cfg.ListOpt('plugins',
|
||||
default=[],
|
||||
help='List of plugins to be loaded'),
|
||||
]
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.register_opts(opts)
|
||||
|
||||
|
||||
class PluginInterface(BaseResource):
|
||||
__metaclass__ = ABCMeta
|
||||
|
||||
name = 'plugin_interface'
|
||||
|
||||
def get_plugin_opts(self):
|
||||
"""Plugin can expose some options that should be specified in conf file
|
||||
|
||||
For example:
|
||||
|
||||
def get_plugin_opts(self):
|
||||
return [
|
||||
cfg.StrOpt('mandatory-conf', required=True),
|
||||
cfg.StrOpt('optional_conf', default="42"),
|
||||
]
|
||||
"""
|
||||
return []
|
||||
|
||||
def setup(self, conf):
|
||||
"""Plugin initialization
|
||||
|
||||
:param conf: plugin-specific configurations
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def get_title(self):
|
||||
"""Plugin title
|
||||
|
||||
For example:
|
||||
|
||||
"Vanilla Provisioning"
|
||||
"""
|
||||
pass
|
||||
|
||||
def get_description(self):
|
||||
"""Optional description of the plugin
|
||||
|
||||
This information is targeted to be displayed in UI.
|
||||
"""
|
||||
pass
|
||||
|
||||
def to_dict(self):
|
||||
return {
|
||||
'name': self.name,
|
||||
'title': self.get_title(),
|
||||
'description': self.get_description(),
|
||||
}
|
||||
|
||||
|
||||
class PluginManager(object):
|
||||
def __init__(self):
|
||||
self.plugins = {}
|
||||
self._load_all_plugins()
|
||||
|
||||
def _load_all_plugins(self):
|
||||
LOG.debug("List of requested plugins: %s" % CONF.plugins)
|
||||
|
||||
if len(CONF.plugins) > len(set(CONF.plugins)):
|
||||
raise RuntimeError("plugins config contains non-unique entries")
|
||||
|
||||
# register required 'plugin_factory' property for each plugin
|
||||
for plugin in CONF.plugins:
|
||||
opts = [
|
||||
cfg.StrOpt('plugin_class', required=True),
|
||||
]
|
||||
CONF.register_opts(opts, group='plugin:%s' % plugin)
|
||||
|
||||
parse_configs()
|
||||
|
||||
# register plugin-specific configs
|
||||
for plugin_name in CONF.plugins:
|
||||
self.plugins[plugin_name] = self._get_plugin_instance(plugin_name)
|
||||
|
||||
parse_configs()
|
||||
|
||||
titles = []
|
||||
for plugin_name in CONF.plugins:
|
||||
plugin = self.plugins[plugin_name]
|
||||
plugin.setup(CONF['plugin:%s' % plugin_name])
|
||||
|
||||
title = plugin.get_title()
|
||||
if title in titles:
|
||||
# replace with specific error
|
||||
raise RuntimeError(
|
||||
"Title of plugin '%s' isn't unique" % plugin_name)
|
||||
titles.append(title)
|
||||
|
||||
LOG.info("Plugin '%s' defined and loaded" % plugin_name)
|
||||
|
||||
def _get_plugin_instance(self, plugin_name):
|
||||
plugin_path = CONF['plugin:%s' % plugin_name].plugin_class
|
||||
module_path, klass = [s.strip() for s in plugin_path.split(':')]
|
||||
if not module_path or not klass:
|
||||
# todo replace with specific error
|
||||
raise RuntimeError("Incorrect plugin_class: '%s'" %
|
||||
plugin_path)
|
||||
module = importutils.try_import(module_path)
|
||||
if not hasattr(module, klass):
|
||||
# todo replace with specific error
|
||||
raise RuntimeError("Class not found: '%s'" % plugin_path)
|
||||
|
||||
plugin_class = getattr(module, klass)
|
||||
if not inspect.isclass(plugin_class):
|
||||
# todo replace with specific error
|
||||
raise RuntimeError("'%s' isn't a class" % plugin_path)
|
||||
|
||||
plugin = plugin_class()
|
||||
plugin.name = plugin_name
|
||||
|
||||
CONF.register_opts(plugin.get_plugin_opts(),
|
||||
group='plugin:%s' % plugin_name)
|
||||
|
||||
return plugin
|
||||
|
||||
def get_plugins(self, base):
|
||||
return [
|
||||
self.plugins[plugin] for plugin in self.plugins
|
||||
if not base or issubclass(self.plugins[plugin].__class__, base)
|
||||
]
|
||||
|
||||
def get_plugin(self, plugin_name):
|
||||
return self.plugins.get(plugin_name)
|
||||
|
||||
|
||||
PLUGINS = None
|
||||
|
||||
|
||||
def setup_plugins():
|
||||
global PLUGINS
|
||||
PLUGINS = PluginManager()
|
118
savanna/plugins/provisioning.py
Normal file
118
savanna/plugins/provisioning.py
Normal file
@ -0,0 +1,118 @@
|
||||
# Copyright (c) 2013 Mirantis Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from abc import abstractmethod
|
||||
import functools
|
||||
from savanna.plugins.base import PluginInterface
|
||||
import savanna.utils.openstack.nova as nova
|
||||
from savanna.utils.resources import BaseResource
|
||||
|
||||
|
||||
class ProvisioningPluginContext(object):
|
||||
def __init__(self, headers):
|
||||
self.headers = headers
|
||||
self.nova = self._autoheaders(nova.novaclient)
|
||||
|
||||
def _autoheaders(self, func):
|
||||
return functools.partial(func, headers=self.headers)
|
||||
|
||||
|
||||
class ProvisioningPluginBase(PluginInterface):
|
||||
@abstractmethod
|
||||
def get_versions(self):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def get_configs(self, ctx, hadoop_version):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def get_node_processes(self, ctx, hadoop_version):
|
||||
pass
|
||||
|
||||
def validate(self, ctx, cluster):
|
||||
pass
|
||||
|
||||
def update_infra(self, ctx, cluster):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def configure_cluster(self, ctx, cluster):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def start_cluster(self, ctx, cluster):
|
||||
pass
|
||||
|
||||
def convert(self, ctx, cluster, input_file):
|
||||
pass
|
||||
|
||||
def on_terminate_cluster(self, ctx, cluster):
|
||||
pass
|
||||
|
||||
def to_dict(self):
|
||||
res = super(ProvisioningPluginBase, self).to_dict()
|
||||
res['versions'] = self.get_versions()
|
||||
return res
|
||||
|
||||
|
||||
class Config(BaseResource):
|
||||
"""Describes a single config parameter.
|
||||
|
||||
For example:
|
||||
|
||||
"some_conf", "jot_tracker", is_optional=True
|
||||
"""
|
||||
|
||||
def __init__(self, name, applicable_target, config_type="str",
|
||||
config_values=None, default_value=None, is_optional=False,
|
||||
description=None):
|
||||
self.name = name
|
||||
self.applicable_target = applicable_target
|
||||
self.config_type = config_type
|
||||
self.config_values = config_values
|
||||
self.default_value = default_value
|
||||
self.is_optional = is_optional
|
||||
self.description = description
|
||||
|
||||
def to_dict(self):
|
||||
res = super(Config, self).to_dict()
|
||||
# todo all custom fields from res
|
||||
return res
|
||||
|
||||
def __repr__(self):
|
||||
return '<Config %s in %s>' % (self.name, self.applicable_target)
|
||||
|
||||
|
||||
class UserInput(object):
|
||||
"""Value provided by the Savanna user for a specific config entry."""
|
||||
|
||||
def __init__(self, config, value):
|
||||
self.config = config
|
||||
self.value = value
|
||||
|
||||
def __repr__(self):
|
||||
return '<UserInput %s = %s>' % (self.config.name, self.value)
|
||||
|
||||
|
||||
class ValidationError(object):
|
||||
"""Describes what is wrong with one of the values provided by user."""
|
||||
|
||||
def __init__(self, config, message):
|
||||
self.config = config
|
||||
self.message = message
|
||||
|
||||
def __repr__(self):
|
||||
return "<ValidationError %s>" % self.config.name
|
0
savanna/plugins/vanilla/__init__.py
Normal file
0
savanna/plugins/vanilla/__init__.py
Normal file
59
savanna/plugins/vanilla/plugin.py
Normal file
59
savanna/plugins/vanilla/plugin.py
Normal file
@ -0,0 +1,59 @@
|
||||
# Copyright (c) 2013 Mirantis Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from savanna.plugins.provisioning import Config
|
||||
from savanna.plugins.provisioning import ProvisioningPluginBase
|
||||
|
||||
|
||||
class VanillaProvider(ProvisioningPluginBase):
|
||||
def get_plugin_opts(self):
|
||||
return []
|
||||
|
||||
def setup(self, conf):
|
||||
self.conf = conf
|
||||
|
||||
def get_title(self):
|
||||
return "Vanilla Apache Hadoop"
|
||||
|
||||
def get_description(self):
|
||||
return (
|
||||
"This plugin provides an ability to launch vanilla Apache Hadoop "
|
||||
"cluster without any management consoles.")
|
||||
|
||||
def get_versions(self):
|
||||
return ['Hadoop 1.1.1']
|
||||
|
||||
def get_configs(self, ctx, hadoop_version):
|
||||
return [
|
||||
Config('heap_size', 'tasktracker', default_value='1024M')
|
||||
]
|
||||
|
||||
def get_node_processes(self, ctx, hadoop_version):
|
||||
return [
|
||||
'jobtracker', 'tasktracker',
|
||||
'namenode', 'datanode',
|
||||
]
|
||||
|
||||
def validate(self, ctx, cluster):
|
||||
pass
|
||||
|
||||
def update_infra(self, ctx, cluster):
|
||||
pass
|
||||
|
||||
def configure_cluster(self, ctx, cluster):
|
||||
pass
|
||||
|
||||
def start_cluster(self, ctx, cluster):
|
||||
pass
|
@ -1,580 +0,0 @@
|
||||
<?xml version="1.0"?>
|
||||
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
|
||||
|
||||
<!-- Do not modify this file directly. Instead, copy entries that you -->
|
||||
<!-- wish to modify from this file into core-site.xml and change them -->
|
||||
<!-- there. If core-site.xml does not already exist, create it. -->
|
||||
|
||||
<configuration>
|
||||
|
||||
<!--- global properties -->
|
||||
|
||||
<property>
|
||||
<name>hadoop.tmp.dir</name>
|
||||
<value>/tmp/hadoop-${user.name}</value>
|
||||
<description>A base for other temporary directories.</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hadoop.native.lib</name>
|
||||
<value>true</value>
|
||||
<description>Should native hadoop libraries, if present, be used.</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hadoop.http.filter.initializers</name>
|
||||
<value></value>
|
||||
<description>A comma separated list of class names. Each class in the list
|
||||
must extend org.apache.hadoop.http.FilterInitializer. The corresponding
|
||||
Filter will be initialized. Then, the Filter will be applied to all user
|
||||
facing jsp and servlet web pages. The ordering of the list defines the
|
||||
ordering of the filters.</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hadoop.security.group.mapping</name>
|
||||
<value>org.apache.hadoop.security.ShellBasedUnixGroupsMapping</value>
|
||||
<description>Class for user to group mapping (get groups for a given user)
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hadoop.security.authorization</name>
|
||||
<value>false</value>
|
||||
<description>Is service-level authorization enabled?</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hadoop.security.authentication</name>
|
||||
<value>simple</value>
|
||||
<description>Possible values are simple (no authentication), and kerberos
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hadoop.security.token.service.use_ip</name>
|
||||
<value>true</value>
|
||||
<description>Controls whether tokens always use IP addresses. DNS changes
|
||||
will not be detected if this option is enabled. Existing client connections
|
||||
that break will always reconnect to the IP of the original host. New clients
|
||||
will connect to the host's new IP but fail to locate a token. Disabling
|
||||
this option will allow existing and new clients to detect an IP change and
|
||||
continue to locate the new host's token.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hadoop.security.use-weak-http-crypto</name>
|
||||
<value>false</value>
|
||||
<description>If enabled, use KSSL to authenticate HTTP connections to the
|
||||
NameNode. Due to a bug in JDK6, using KSSL requires one to configure
|
||||
Kerberos tickets to use encryption types that are known to be
|
||||
cryptographically weak. If disabled, SPNEGO will be used for HTTP
|
||||
authentication, which supports stronger encryption types.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<!--
|
||||
<property>
|
||||
<name>hadoop.security.service.user.name.key</name>
|
||||
<value></value>
|
||||
<description>Name of the kerberos principal of the user that owns
|
||||
a given service daemon
|
||||
</description>
|
||||
</property>
|
||||
-->
|
||||
|
||||
<!--- logging properties -->
|
||||
|
||||
<property>
|
||||
<name>hadoop.logfile.size</name>
|
||||
<value>10000000</value>
|
||||
<description>The max size of each log file</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hadoop.logfile.count</name>
|
||||
<value>10</value>
|
||||
<description>The max number of log files</description>
|
||||
</property>
|
||||
|
||||
<!-- i/o properties -->
|
||||
<property>
|
||||
<name>io.file.buffer.size</name>
|
||||
<value>4096</value>
|
||||
<description>The size of buffer for use in sequence files.
|
||||
The size of this buffer should probably be a multiple of hardware
|
||||
page size (4096 on Intel x86), and it determines how much data is
|
||||
buffered during read and write operations.</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>io.bytes.per.checksum</name>
|
||||
<value>512</value>
|
||||
<description>The number of bytes per checksum. Must not be larger than
|
||||
io.file.buffer.size.</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>io.skip.checksum.errors</name>
|
||||
<value>false</value>
|
||||
<description>If true, when a checksum error is encountered while
|
||||
reading a sequence file, entries are skipped, instead of throwing an
|
||||
exception.</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>io.compression.codecs</name>
|
||||
<value>org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.BZip2Codec,org.apache.hadoop.io.compress.SnappyCodec</value>
|
||||
<description>A list of the compression codec classes that can be used
|
||||
for compression/decompression.</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>io.serializations</name>
|
||||
<value>org.apache.hadoop.io.serializer.WritableSerialization</value>
|
||||
<description>A list of serialization classes that can be used for
|
||||
obtaining serializers and deserializers.</description>
|
||||
</property>
|
||||
|
||||
<!-- file system properties -->
|
||||
|
||||
<property>
|
||||
<name>fs.default.name</name>
|
||||
<value>file:///</value>
|
||||
<description>The name of the default file system. A URI whose
|
||||
scheme and authority determine the FileSystem implementation. The
|
||||
uri's scheme determines the config property (fs.SCHEME.impl) naming
|
||||
the FileSystem implementation class. The uri's authority is used to
|
||||
determine the host, port, etc. for a filesystem.</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>fs.trash.interval</name>
|
||||
<value>0</value>
|
||||
<description>Number of minutes between trash checkpoints.
|
||||
If zero, the trash feature is disabled.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>fs.file.impl</name>
|
||||
<value>org.apache.hadoop.fs.LocalFileSystem</value>
|
||||
<description>The FileSystem for file: uris.</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>fs.hdfs.impl</name>
|
||||
<value>org.apache.hadoop.hdfs.DistributedFileSystem</value>
|
||||
<description>The FileSystem for hdfs: uris.</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>fs.s3.impl</name>
|
||||
<value>org.apache.hadoop.fs.s3.S3FileSystem</value>
|
||||
<description>The FileSystem for s3: uris.</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>fs.s3n.impl</name>
|
||||
<value>org.apache.hadoop.fs.s3native.NativeS3FileSystem</value>
|
||||
<description>The FileSystem for s3n: (Native S3) uris.</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>fs.kfs.impl</name>
|
||||
<value>org.apache.hadoop.fs.kfs.KosmosFileSystem</value>
|
||||
<description>The FileSystem for kfs: uris.</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>fs.hftp.impl</name>
|
||||
<value>org.apache.hadoop.hdfs.HftpFileSystem</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>fs.hsftp.impl</name>
|
||||
<value>org.apache.hadoop.hdfs.HsftpFileSystem</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>fs.webhdfs.impl</name>
|
||||
<value>org.apache.hadoop.hdfs.web.WebHdfsFileSystem</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>fs.ftp.impl</name>
|
||||
<value>org.apache.hadoop.fs.ftp.FTPFileSystem</value>
|
||||
<description>The FileSystem for ftp: uris.</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>fs.ramfs.impl</name>
|
||||
<value>org.apache.hadoop.fs.InMemoryFileSystem</value>
|
||||
<description>The FileSystem for ramfs: uris.</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>fs.har.impl</name>
|
||||
<value>org.apache.hadoop.fs.HarFileSystem</value>
|
||||
<description>The filesystem for Hadoop archives. </description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>fs.har.impl.disable.cache</name>
|
||||
<value>true</value>
|
||||
<description>Don't cache 'har' filesystem instances.</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>fs.checkpoint.dir</name>
|
||||
<value>${hadoop.tmp.dir}/dfs/namesecondary</value>
|
||||
<description>Determines where on the local filesystem the DFS secondary
|
||||
name node should store the temporary images to merge.
|
||||
If this is a comma-delimited list of directories then the image is
|
||||
replicated in all of the directories for redundancy.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>fs.checkpoint.edits.dir</name>
|
||||
<value>${fs.checkpoint.dir}</value>
|
||||
<description>Determines where on the local filesystem the DFS secondary
|
||||
name node should store the temporary edits to merge.
|
||||
If this is a comma-delimited list of directoires then teh edits is
|
||||
replicated in all of the directoires for redundancy.
|
||||
Default value is same as fs.checkpoint.dir
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>fs.checkpoint.period</name>
|
||||
<value>3600</value>
|
||||
<description>The number of seconds between two periodic checkpoints.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>fs.checkpoint.size</name>
|
||||
<value>67108864</value>
|
||||
<description>The size of the current edit log (in bytes) that triggers
|
||||
a periodic checkpoint even if the fs.checkpoint.period hasn't expired.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
|
||||
|
||||
<property>
|
||||
<name>fs.s3.block.size</name>
|
||||
<value>67108864</value>
|
||||
<description>Block size to use when writing files to S3.</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>fs.s3.buffer.dir</name>
|
||||
<value>${hadoop.tmp.dir}/s3</value>
|
||||
<description>Determines where on the local filesystem the S3 filesystem
|
||||
should store files before sending them to S3
|
||||
(or after retrieving them from S3).
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>fs.s3.maxRetries</name>
|
||||
<value>4</value>
|
||||
<description>The maximum number of retries for reading or writing files to S3,
|
||||
before we signal failure to the application.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>fs.s3.sleepTimeSeconds</name>
|
||||
<value>10</value>
|
||||
<description>The number of seconds to sleep between each S3 retry.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
|
||||
<property>
|
||||
<name>local.cache.size</name>
|
||||
<value>10737418240</value>
|
||||
<description>The limit on the size of cache you want to keep, set by default
|
||||
to 10GB. This will act as a soft limit on the cache directory for out of band data.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>io.seqfile.compress.blocksize</name>
|
||||
<value>1000000</value>
|
||||
<description>The minimum block size for compression in block compressed
|
||||
SequenceFiles.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>io.seqfile.lazydecompress</name>
|
||||
<value>true</value>
|
||||
<description>Should values of block-compressed SequenceFiles be decompressed
|
||||
only when necessary.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>io.seqfile.sorter.recordlimit</name>
|
||||
<value>1000000</value>
|
||||
<description>The limit on number of records to be kept in memory in a spill
|
||||
in SequenceFiles.Sorter
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>io.mapfile.bloom.size</name>
|
||||
<value>1048576</value>
|
||||
<description>The size of BloomFilter-s used in BloomMapFile. Each time this many
|
||||
keys is appended the next BloomFilter will be created (inside a DynamicBloomFilter).
|
||||
Larger values minimize the number of filters, which slightly increases the performance,
|
||||
but may waste too much space if the total number of keys is usually much smaller
|
||||
than this number.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>io.mapfile.bloom.error.rate</name>
|
||||
<value>0.005</value>
|
||||
<description>The rate of false positives in BloomFilter-s used in BloomMapFile.
|
||||
As this value decreases, the size of BloomFilter-s increases exponentially. This
|
||||
value is the probability of encountering false positives (default is 0.5%).
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hadoop.util.hash.type</name>
|
||||
<value>murmur</value>
|
||||
<description>The default implementation of Hash. Currently this can take one of the
|
||||
two values: 'murmur' to select MurmurHash and 'jenkins' to select JenkinsHash.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
|
||||
<!-- ipc properties -->
|
||||
|
||||
<property>
|
||||
<name>ipc.client.idlethreshold</name>
|
||||
<value>4000</value>
|
||||
<description>Defines the threshold number of connections after which
|
||||
connections will be inspected for idleness.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>ipc.client.kill.max</name>
|
||||
<value>10</value>
|
||||
<description>Defines the maximum number of clients to disconnect in one go.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>ipc.client.connection.maxidletime</name>
|
||||
<value>10000</value>
|
||||
<description>The maximum time in msec after which a client will bring down the
|
||||
connection to the server.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>ipc.client.connect.max.retries</name>
|
||||
<value>10</value>
|
||||
<description>Indicates the number of retries a client will make to establish
|
||||
a server connection.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>ipc.server.listen.queue.size</name>
|
||||
<value>128</value>
|
||||
<description>Indicates the length of the listen queue for servers accepting
|
||||
client connections.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>ipc.server.tcpnodelay</name>
|
||||
<value>false</value>
|
||||
<description>Turn on/off Nagle's algorithm for the TCP socket connection on
|
||||
the server. Setting to true disables the algorithm and may decrease latency
|
||||
with a cost of more/smaller packets.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>ipc.client.tcpnodelay</name>
|
||||
<value>false</value>
|
||||
<description>Turn on/off Nagle's algorithm for the TCP socket connection on
|
||||
the client. Setting to true disables the algorithm and may decrease latency
|
||||
with a cost of more/smaller packets.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
|
||||
<!-- Web Interface Configuration -->
|
||||
|
||||
<property>
|
||||
<name>webinterface.private.actions</name>
|
||||
<value>false</value>
|
||||
<description> If set to true, the web interfaces of JT and NN may contain
|
||||
actions, such as kill job, delete file, etc., that should
|
||||
not be exposed to public. Enable this option if the interfaces
|
||||
are only reachable by those who have the right authorization.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<!-- Proxy Configuration -->
|
||||
|
||||
<property>
|
||||
<name>hadoop.rpc.socket.factory.class.default</name>
|
||||
<value>org.apache.hadoop.net.StandardSocketFactory</value>
|
||||
<description> Default SocketFactory to use. This parameter is expected to be
|
||||
formatted as "package.FactoryClassName".
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hadoop.rpc.socket.factory.class.ClientProtocol</name>
|
||||
<value></value>
|
||||
<description> SocketFactory to use to connect to a DFS. If null or empty, use
|
||||
hadoop.rpc.socket.class.default. This socket factory is also used by
|
||||
DFSClient to create sockets to DataNodes.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
|
||||
|
||||
<property>
|
||||
<name>hadoop.socks.server</name>
|
||||
<value></value>
|
||||
<description> Address (host:port) of the SOCKS server to be used by the
|
||||
SocksSocketFactory.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<!-- Rack Configuration -->
|
||||
|
||||
<property>
|
||||
<name>topology.node.switch.mapping.impl</name>
|
||||
<value>org.apache.hadoop.net.ScriptBasedMapping</value>
|
||||
<description> The default implementation of the DNSToSwitchMapping. It
|
||||
invokes a script specified in topology.script.file.name to resolve
|
||||
node names. If the value for topology.script.file.name is not set, the
|
||||
default value of DEFAULT_RACK is returned for all node names.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>topology.script.file.name</name>
|
||||
<value></value>
|
||||
<description> The script name that should be invoked to resolve DNS names to
|
||||
NetworkTopology names. Example: the script would take host.foo.bar as an
|
||||
argument, and return /rack1 as the output.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>topology.script.number.args</name>
|
||||
<value>100</value>
|
||||
<description> The max number of args that the script configured with
|
||||
topology.script.file.name should be run with. Each arg is an
|
||||
IP address.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hadoop.security.uid.cache.secs</name>
|
||||
<value>14400</value>
|
||||
<description> NativeIO maintains a cache from UID to UserName. This is
|
||||
the timeout for an entry in that cache. </description>
|
||||
</property>
|
||||
|
||||
<!-- HTTP web-consoles Authentication -->
|
||||
|
||||
<property>
|
||||
<name>hadoop.http.authentication.type</name>
|
||||
<value>simple</value>
|
||||
<description>
|
||||
Defines authentication used for Oozie HTTP endpoint.
|
||||
Supported values are: simple | kerberos | #AUTHENTICATION_HANDLER_CLASSNAME#
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hadoop.http.authentication.token.validity</name>
|
||||
<value>36000</value>
|
||||
<description>
|
||||
Indicates how long (in seconds) an authentication token is valid before it has
|
||||
to be renewed.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hadoop.http.authentication.signature.secret.file</name>
|
||||
<value>${user.home}/hadoop-http-auth-signature-secret</value>
|
||||
<description>
|
||||
The signature secret for signing the authentication tokens.
|
||||
If not set a random secret is generated at startup time.
|
||||
The same secret should be used for JT/NN/DN/TT configurations.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hadoop.http.authentication.cookie.domain</name>
|
||||
<value></value>
|
||||
<description>
|
||||
The domain to use for the HTTP cookie that stores the authentication token.
|
||||
In order to authentiation to work correctly across all Hadoop nodes web-consoles
|
||||
the domain must be correctly set.
|
||||
IMPORTANT: when using IP addresses, browsers ignore cookies with domain settings.
|
||||
For this setting to work properly all nodes in the cluster must be configured
|
||||
to generate URLs with hostname.domain names on it.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hadoop.http.authentication.simple.anonymous.allowed</name>
|
||||
<value>true</value>
|
||||
<description>
|
||||
Indicates if anonymous requests are allowed when using 'simple' authentication.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hadoop.http.authentication.kerberos.principal</name>
|
||||
<value>HTTP/localhost@LOCALHOST</value>
|
||||
<description>
|
||||
Indicates the Kerberos principal to be used for HTTP endpoint.
|
||||
The principal MUST start with 'HTTP/' as per Kerberos HTTP SPNEGO specification.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hadoop.http.authentication.kerberos.keytab</name>
|
||||
<value>${user.home}/hadoop.keytab</value>
|
||||
<description>
|
||||
Location of the keytab file with the credentials for the principal.
|
||||
Referring to the same keytab file Oozie uses for its Kerberos credentials for Hadoop.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hadoop.relaxed.worker.version.check</name>
|
||||
<value>false</value>
|
||||
<description>
|
||||
By default datanodes refuse to connect to namenodes if their build
|
||||
revision (svn revision) do not match, and tasktrackers refuse to
|
||||
connect to jobtrackers if their build version (version, revision,
|
||||
user, and source checksum) do not match. This option changes the
|
||||
behavior of hadoop workers to only check for a version match (eg
|
||||
"1.0.2") but ignore the other build fields (revision, user, and
|
||||
source checksum).
|
||||
</description>
|
||||
</property>
|
||||
|
||||
</configuration>
|
@ -1,547 +0,0 @@
|
||||
<?xml version="1.0"?>
|
||||
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
|
||||
|
||||
<!-- Do not modify this file directly. Instead, copy entries that you -->
|
||||
<!-- wish to modify from this file into hdfs-site.xml and change them -->
|
||||
<!-- there. If hdfs-site.xml does not already exist, create it. -->
|
||||
|
||||
<configuration>
|
||||
|
||||
<property>
|
||||
<name>dfs.namenode.logging.level</name>
|
||||
<value>info</value>
|
||||
<description>The logging level for dfs namenode. Other values are "dir"(trac
|
||||
e namespace mutations), "block"(trace block under/over replications and block
|
||||
creations/deletions), or "all".</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.secondary.http.address</name>
|
||||
<value>0.0.0.0:50090</value>
|
||||
<description>
|
||||
The secondary namenode http server address and port.
|
||||
If the port is 0 then the server will start on a free port.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.datanode.address</name>
|
||||
<value>0.0.0.0:50010</value>
|
||||
<description>
|
||||
The datanode server address and port for data transfer.
|
||||
If the port is 0 then the server will start on a free port.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.datanode.http.address</name>
|
||||
<value>0.0.0.0:50075</value>
|
||||
<description>
|
||||
The datanode http server address and port.
|
||||
If the port is 0 then the server will start on a free port.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.datanode.ipc.address</name>
|
||||
<value>0.0.0.0:50020</value>
|
||||
<description>
|
||||
The datanode ipc server address and port.
|
||||
If the port is 0 then the server will start on a free port.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.datanode.handler.count</name>
|
||||
<value>3</value>
|
||||
<description>The number of server threads for the datanode.</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.http.address</name>
|
||||
<value>0.0.0.0:50070</value>
|
||||
<description>
|
||||
The address and the base port where the dfs namenode web ui will listen on.
|
||||
If the port is 0 then the server will start on a free port.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.https.enable</name>
|
||||
<value>false</value>
|
||||
<description>Decide if HTTPS(SSL) is supported on HDFS
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.https.need.client.auth</name>
|
||||
<value>false</value>
|
||||
<description>Whether SSL client certificate authentication is required
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.https.server.keystore.resource</name>
|
||||
<value>ssl-server.xml</value>
|
||||
<description>Resource file from which ssl server keystore
|
||||
information will be extracted
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.https.client.keystore.resource</name>
|
||||
<value>ssl-client.xml</value>
|
||||
<description>Resource file from which ssl client keystore
|
||||
information will be extracted
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.datanode.https.address</name>
|
||||
<value>0.0.0.0:50475</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.https.address</name>
|
||||
<value>0.0.0.0:50470</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.datanode.dns.interface</name>
|
||||
<value>default</value>
|
||||
<description>The name of the Network Interface from which a data node should
|
||||
report its IP address.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.datanode.dns.nameserver</name>
|
||||
<value>default</value>
|
||||
<description>The host name or IP address of the name server (DNS)
|
||||
which a DataNode should use to determine the host name used by the
|
||||
NameNode for communication and display purposes.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
|
||||
|
||||
<property>
|
||||
<name>dfs.replication.considerLoad</name>
|
||||
<value>true</value>
|
||||
<description>Decide if chooseTarget considers the target's load or not
|
||||
</description>
|
||||
</property>
|
||||
<property>
|
||||
<name>dfs.default.chunk.view.size</name>
|
||||
<value>32768</value>
|
||||
<description>The number of bytes to view for a file on the browser.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.datanode.du.reserved</name>
|
||||
<value>0</value>
|
||||
<description>Reserved space in bytes per volume. Always leave this much space free for non dfs use.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.name.dir</name>
|
||||
<value>${hadoop.tmp.dir}/dfs/name</value>
|
||||
<description>Determines where on the local filesystem the DFS name node
|
||||
should store the name table(fsimage). If this is a comma-delimited list
|
||||
of directories then the name table is replicated in all of the
|
||||
directories, for redundancy. </description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.name.edits.dir</name>
|
||||
<value>${dfs.name.dir}</value>
|
||||
<description>Determines where on the local filesystem the DFS name node
|
||||
should store the transaction (edits) file. If this is a comma-delimited list
|
||||
of directories then the transaction file is replicated in all of the
|
||||
directories, for redundancy. Default value is same as dfs.name.dir
|
||||
</description>
|
||||
</property>
|
||||
<property>
|
||||
<name>dfs.web.ugi</name>
|
||||
<value>webuser,webgroup</value>
|
||||
<description>The user account used by the web interface.
|
||||
Syntax: USERNAME,GROUP1,GROUP2, ...
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.permissions</name>
|
||||
<value>true</value>
|
||||
<description>
|
||||
If "true", enable permission checking in HDFS.
|
||||
If "false", permission checking is turned off,
|
||||
but all other behavior is unchanged.
|
||||
Switching from one parameter value to the other does not change the mode,
|
||||
owner or group of files or directories.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.permissions.supergroup</name>
|
||||
<value>supergroup</value>
|
||||
<description>The name of the group of super-users.</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.block.access.token.enable</name>
|
||||
<value>false</value>
|
||||
<description>
|
||||
If "true", access tokens are used as capabilities for accessing datanodes.
|
||||
If "false", no access tokens are checked on accessing datanodes.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.block.access.key.update.interval</name>
|
||||
<value>600</value>
|
||||
<description>
|
||||
Interval in minutes at which namenode updates its access keys.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.block.access.token.lifetime</name>
|
||||
<value>600</value>
|
||||
<description>The lifetime of access tokens in minutes.</description>
|
||||
</property>
|
||||
|
||||
|
||||
<property>
|
||||
<name>dfs.data.dir</name>
|
||||
<value>${hadoop.tmp.dir}/dfs/data</value>
|
||||
<description>Determines where on the local filesystem an DFS data node
|
||||
should store its blocks. If this is a comma-delimited
|
||||
list of directories, then data will be stored in all named
|
||||
directories, typically on different devices.
|
||||
Directories that do not exist are ignored.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.datanode.data.dir.perm</name>
|
||||
<value>755</value>
|
||||
<description>Permissions for the directories on on the local filesystem where
|
||||
the DFS data node store its blocks. The permissions can either be octal or
|
||||
symbolic.</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.replication</name>
|
||||
<value>3</value>
|
||||
<description>Default block replication.
|
||||
The actual number of replications can be specified when the file is created.
|
||||
The default is used if replication is not specified in create time.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.replication.max</name>
|
||||
<value>512</value>
|
||||
<description>Maximal block replication.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.replication.min</name>
|
||||
<value>1</value>
|
||||
<description>Minimal block replication.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.block.size</name>
|
||||
<value>67108864</value>
|
||||
<description>The default block size for new files.</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.df.interval</name>
|
||||
<value>60000</value>
|
||||
<description>Disk usage statistics refresh interval in msec.</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.client.block.write.retries</name>
|
||||
<value>3</value>
|
||||
<description>The number of retries for writing blocks to the data nodes,
|
||||
before we signal failure to the application.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.blockreport.intervalMsec</name>
|
||||
<value>3600000</value>
|
||||
<description>Determines block reporting interval in milliseconds.</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.blockreport.initialDelay</name> <value>0</value>
|
||||
<description>Delay for first block report in seconds.</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.heartbeat.interval</name>
|
||||
<value>3</value>
|
||||
<description>Determines datanode heartbeat interval in seconds.</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.namenode.handler.count</name>
|
||||
<value>10</value>
|
||||
<description>The number of server threads for the namenode.</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.safemode.threshold.pct</name>
|
||||
<value>0.999f</value>
|
||||
<description>
|
||||
Specifies the percentage of blocks that should satisfy
|
||||
the minimal replication requirement defined by dfs.replication.min.
|
||||
Values less than or equal to 0 mean not to wait for any particular
|
||||
percentage of blocks before exiting safemode.
|
||||
Values greater than 1 will make safe mode permanent.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.namenode.safemode.min.datanodes</name>
|
||||
<value>0</value>
|
||||
<description>
|
||||
Specifies the number of datanodes that must be considered alive
|
||||
before the name node exits safemode.
|
||||
Values less than or equal to 0 mean not to take the number of live
|
||||
datanodes into account when deciding whether to remain in safe mode
|
||||
during startup.
|
||||
Values greater than the number of datanodes in the cluster
|
||||
will make safe mode permanent.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.safemode.extension</name>
|
||||
<value>30000</value>
|
||||
<description>
|
||||
Determines extension of safe mode in milliseconds
|
||||
after the threshold level is reached.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.balance.bandwidthPerSec</name>
|
||||
<value>1048576</value>
|
||||
<description>
|
||||
Specifies the maximum amount of bandwidth that each datanode
|
||||
can utilize for the balancing purpose in term of
|
||||
the number of bytes per second.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.hosts</name>
|
||||
<value></value>
|
||||
<description>Names a file that contains a list of hosts that are
|
||||
permitted to connect to the namenode. The full pathname of the file
|
||||
must be specified. If the value is empty, all hosts are
|
||||
permitted.</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.hosts.exclude</name>
|
||||
<value></value>
|
||||
<description>Names a file that contains a list of hosts that are
|
||||
not permitted to connect to the namenode. The full pathname of the
|
||||
file must be specified. If the value is empty, no hosts are
|
||||
excluded.</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.max.objects</name>
|
||||
<value>0</value>
|
||||
<description>The maximum number of files, directories and blocks
|
||||
dfs supports. A value of zero indicates no limit to the number
|
||||
of objects that dfs supports.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.namenode.decommission.interval</name>
|
||||
<value>30</value>
|
||||
<description>Namenode periodicity in seconds to check if decommission is
|
||||
complete.</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.namenode.decommission.nodes.per.interval</name>
|
||||
<value>5</value>
|
||||
<description>The number of nodes namenode checks if decommission is complete
|
||||
in each dfs.namenode.decommission.interval.</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.replication.interval</name>
|
||||
<value>3</value>
|
||||
<description>The periodicity in seconds with which the namenode computes
|
||||
repliaction work for datanodes. </description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.access.time.precision</name>
|
||||
<value>3600000</value>
|
||||
<description>The access time for HDFS file is precise upto this value.
|
||||
The default value is 1 hour. Setting a value of 0 disables
|
||||
access times for HDFS.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.support.append</name>
|
||||
<description>
|
||||
This option is no longer supported. HBase no longer requires that
|
||||
this option be enabled as sync is now enabled by default. See
|
||||
HADOOP-8230 for additional information.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.namenode.delegation.key.update-interval</name>
|
||||
<value>86400000</value>
|
||||
<description>The update interval for master key for delegation tokens
|
||||
in the namenode in milliseconds.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.namenode.delegation.token.max-lifetime</name>
|
||||
<value>604800000</value>
|
||||
<description>The maximum lifetime in milliseconds for which a delegation
|
||||
token is valid.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.namenode.delegation.token.renew-interval</name>
|
||||
<value>86400000</value>
|
||||
<description>The renewal interval for delegation token in milliseconds.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.datanode.failed.volumes.tolerated</name>
|
||||
<value>0</value>
|
||||
<description>The number of volumes that are allowed to
|
||||
fail before a datanode stops offering service. By default
|
||||
any volume failure will cause a datanode to shutdown.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.datanode.max.xcievers</name>
|
||||
<value>4096</value>
|
||||
<description>Specifies the maximum number of threads to use for transferring data
|
||||
in and out of the DN.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.client.use.datanode.hostname</name>
|
||||
<value>false</value>
|
||||
<description>Whether clients should use datanode hostnames when
|
||||
connecting to datanodes.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.datanode.use.datanode.hostname</name>
|
||||
<value>false</value>
|
||||
<description>Whether datanodes should use datanode hostnames when
|
||||
connecting to other datanodes for data transfer.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.client.local.interfaces</name>
|
||||
<value></value>
|
||||
<description>A comma separated list of network interface names to use
|
||||
for data transfer between the client and datanodes. When creating
|
||||
a connection to read from or write to a datanode, the client
|
||||
chooses one of the specified interfaces at random and binds its
|
||||
socket to the IP of that interface. Individual names may be
|
||||
specified as either an interface name (eg "eth0"), a subinterface
|
||||
name (eg "eth0:0"), or an IP address (which may be specified using
|
||||
CIDR notation to match a range of IPs).
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.namenode.kerberos.internal.spnego.principal</name>
|
||||
<value>${dfs.web.authentication.kerberos.principal}</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.secondary.namenode.kerberos.internal.spnego.principal</name>
|
||||
<value>${dfs.web.authentication.kerberos.principal}</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.namenode.invalidate.work.pct.per.iteration</name>
|
||||
<value>0.32f</value>
|
||||
<description>
|
||||
*Note*: Advanced property. Change with caution.
|
||||
This determines the percentage amount of block
|
||||
invalidations (deletes) to do over a single DN heartbeat
|
||||
deletion command. The final deletion count is determined by applying this
|
||||
percentage to the number of live nodes in the system.
|
||||
The resultant number is the number of blocks from the deletion list
|
||||
chosen for proper invalidation over a single heartbeat of a single DN.
|
||||
Value should be a positive, non-zero percentage in float notation (X.Yf),
|
||||
with 1.0f meaning 100%.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.namenode.replication.work.multiplier.per.iteration</name>
|
||||
<value>2</value>
|
||||
<description>
|
||||
*Note*: Advanced property. Change with caution.
|
||||
This determines the total amount of block transfers to begin in
|
||||
parallel at a DN, for replication, when such a command list is being
|
||||
sent over a DN heartbeat by the NN. The actual number is obtained by
|
||||
multiplying this multiplier with the total number of live nodes in the
|
||||
cluster. The result number is the number of blocks to begin transfers
|
||||
immediately for, per DN heartbeat. This number can be any positive,
|
||||
non-zero integer.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.namenode.check.stale.datanode</name>
|
||||
<value>false</value>
|
||||
<description>
|
||||
Indicate whether or not to check "stale" datanodes whose
|
||||
heartbeat messages have not been received by the namenode
|
||||
for more than a specified time interval. If this configuration
|
||||
parameter is set as true, the stale datanodes will be moved to
|
||||
the end of the target node list for reading. The writing will
|
||||
also try to avoid stale nodes.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.namenode.stale.datanode.interval</name>
|
||||
<value>30000</value>
|
||||
<description>
|
||||
Default time interval for marking a datanode as "stale", i.e., if
|
||||
the namenode has not received heartbeat msg from a datanode for
|
||||
more than this time interval, the datanode will be marked and treated
|
||||
as "stale" by default.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
</configuration>
|
File diff suppressed because it is too large
Load Diff
@ -1,20 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
echo "----- Setting up Hadoop enviroment config"
|
||||
|
||||
{% for envconf in env_configs -%}
|
||||
echo "{{envconf}}" >> /tmp/hadoop-env.sh
|
||||
{% endfor %}
|
||||
|
||||
cat /etc/hadoop/hadoop-env.sh >> /tmp/hadoop-env.sh
|
||||
mv /tmp/hadoop-env.sh /etc/hadoop/hadoop-env.sh
|
||||
|
||||
|
||||
echo "----- Creating directories permissions"
|
||||
|
||||
#TODO(aignatov): Need to put here /mnt via args in the future when HDFS placement feature will be ready
|
||||
chown -R hadoop:hadoop /mnt
|
||||
chmod -R 755 /mnt
|
||||
|
||||
{% block master %}
|
||||
{% endblock %}
|
@ -1,21 +0,0 @@
|
||||
{% extends "setup-general.sh.template" %}
|
||||
|
||||
{% block master %}
|
||||
echo "----- Populating slaves file"
|
||||
|
||||
echo -e '
|
||||
{%- for slave in slaves -%}
|
||||
{{slave}}\n
|
||||
{%- endfor -%}
|
||||
' | tee /etc/hadoop/slaves
|
||||
|
||||
|
||||
echo "----- Populating master file"
|
||||
|
||||
echo {{master_hostname}} | tee /etc/hadoop/masters
|
||||
|
||||
|
||||
echo "----- Formatting Hadoop NameNode"
|
||||
|
||||
su -c 'hadoop namenode -format' hadoop
|
||||
{% endblock %}
|
@ -13,257 +13,57 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import eventlet
|
||||
from oslo.config import cfg
|
||||
|
||||
from savanna import exceptions as ex
|
||||
import savanna.db.storage as s
|
||||
from savanna.openstack.common import log as logging
|
||||
from savanna.service import cluster_ops
|
||||
import savanna.storage.storage as storage
|
||||
import savanna.plugins.base as plugin_base
|
||||
from savanna.plugins.provisioning import ProvisioningPluginBase
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.import_opt('allow_cluster_ops', 'savanna.config')
|
||||
|
||||
## Cluster ops
|
||||
|
||||
## Node Template ops:
|
||||
get_clusters = s.get_clusters
|
||||
get_cluster = s.get_cluster
|
||||
|
||||
def get_node_template(**args):
|
||||
return _node_template(storage.get_node_template(**args))
|
||||
|
||||
def create_cluster(values):
|
||||
# todo initiate cluster creation here :)
|
||||
return s.create_cluster(values)
|
||||
|
||||
def get_node_templates(**args):
|
||||
return [_node_template(tmpl) for tmpl
|
||||
in storage.get_node_templates(**args)]
|
||||
|
||||
def terminate_cluster(**args):
|
||||
# todo initiate cluster termination here :)
|
||||
cluster = get_cluster(**args)
|
||||
s.terminate_cluster(cluster)
|
||||
|
||||
def is_node_template_associated(**args):
|
||||
return storage.is_node_template_associated(**args)
|
||||
|
||||
## ClusterTemplate ops
|
||||
|
||||
def create_node_template(values, headers):
|
||||
"""Creates new node template from values dict.
|
||||
get_cluster_templates = s.get_cluster_templates
|
||||
get_cluster_template = s.get_cluster_template
|
||||
create_cluster_template = s.create_cluster_template
|
||||
terminate_cluster_template = s.terminate_cluster_template
|
||||
|
||||
:param values: dict
|
||||
:return: created node template resource
|
||||
"""
|
||||
values = values.pop('node_template')
|
||||
|
||||
name = values.pop('name')
|
||||
node_type_id = storage.get_node_type(name=values.pop('node_type')).id
|
||||
flavor_id = values.pop('flavor_id')
|
||||
## NodeGroupTemplate ops
|
||||
|
||||
nt = storage.create_node_template(name, node_type_id, flavor_id, values)
|
||||
get_node_group_templates = s.get_node_group_templates
|
||||
get_node_group_template = s.get_node_group_template
|
||||
create_node_group_template = s.create_node_group_template
|
||||
terminate_node_group_template = s.terminate_node_group_template
|
||||
|
||||
return get_node_template(id=nt.id)
|
||||
|
||||
## Plugins ops
|
||||
|
||||
def terminate_node_template(**args):
|
||||
return storage.terminate_node_template(**args)
|
||||
def get_plugins():
|
||||
return plugin_base.PLUGINS.get_plugins(base=ProvisioningPluginBase)
|
||||
|
||||
|
||||
## Cluster ops:
|
||||
|
||||
def get_cluster(**args):
|
||||
return _cluster(storage.get_cluster(**args))
|
||||
|
||||
|
||||
def get_clusters(**args):
|
||||
return [_cluster(cluster) for cluster in
|
||||
storage.get_clusters(**args)]
|
||||
|
||||
|
||||
def create_cluster(values, headers):
|
||||
values = values.pop('cluster')
|
||||
|
||||
name = values.pop('name')
|
||||
base_image_id = values.pop('base_image_id')
|
||||
tenant_id = headers['X-Tenant-Id']
|
||||
templates = values.pop('node_templates')
|
||||
|
||||
# todo(slukjanov): check that we can create objects in the specified tenant
|
||||
|
||||
cluster = storage.create_cluster(name, base_image_id, tenant_id, templates)
|
||||
|
||||
eventlet.spawn(_cluster_creation_job, headers, cluster.id)
|
||||
|
||||
return get_cluster(id=cluster.id)
|
||||
|
||||
|
||||
def _cluster_creation_job(headers, cluster_id):
|
||||
cluster = storage.get_cluster(id=cluster_id)
|
||||
LOG.debug("Starting cluster '%s' creation: %s", cluster_id,
|
||||
_cluster(cluster).dict)
|
||||
|
||||
if CONF.allow_cluster_ops:
|
||||
launched = cluster_ops.launch_cluster(headers, cluster)
|
||||
else:
|
||||
LOG.info("Cluster ops are disabled, use --allow-cluster-ops flag")
|
||||
launched = True
|
||||
|
||||
if launched:
|
||||
storage.update_cluster_status('Active', id=cluster.id)
|
||||
|
||||
|
||||
def terminate_cluster(headers, **args):
|
||||
cluster = storage.update_cluster_status('Stopping', **args)
|
||||
|
||||
eventlet.spawn(_cluster_termination_job, headers, cluster.id)
|
||||
|
||||
|
||||
def _cluster_termination_job(headers, cluster_id):
|
||||
cluster = storage.get_cluster(id=cluster_id)
|
||||
LOG.debug("Stopping cluster '%s' creation: %s", cluster_id,
|
||||
_cluster(cluster).dict)
|
||||
|
||||
if CONF.allow_cluster_ops:
|
||||
cluster_ops.stop_cluster(headers, cluster)
|
||||
else:
|
||||
LOG.info("Cluster ops are disabled, use --allow-cluster-ops flag")
|
||||
|
||||
storage.terminate_cluster(id=cluster.id)
|
||||
|
||||
|
||||
## Node Type ops:
|
||||
|
||||
def get_node_type(**args):
|
||||
return _node_type(storage.get_node_type(**args))
|
||||
|
||||
|
||||
def get_node_types(**args):
|
||||
return [_node_type(t) for t in storage.get_node_types(**args)]
|
||||
|
||||
|
||||
def get_node_type_required_params(**args):
|
||||
result = {}
|
||||
for process in storage.get_node_type(**args).processes:
|
||||
result[process.name] = []
|
||||
for prop in process.node_process_properties:
|
||||
if prop.required and not prop.default:
|
||||
result[process.name] += [prop.name]
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def get_node_type_all_params(**args):
|
||||
result = {}
|
||||
for process in storage.get_node_type(**args).processes:
|
||||
result[process.name] = [prop.name
|
||||
for prop in process.node_process_properties]
|
||||
return result
|
||||
|
||||
|
||||
## Utils and DB object to Resource converters
|
||||
|
||||
def _clean_nones(obj):
|
||||
if not isinstance(obj, dict) and not isinstance(obj, list):
|
||||
return obj
|
||||
|
||||
if isinstance(obj, dict):
|
||||
remove = []
|
||||
for key, value in obj.iteritems():
|
||||
if value is None:
|
||||
remove.append(key)
|
||||
for key in remove:
|
||||
obj.pop(key)
|
||||
for value in obj.values():
|
||||
_clean_nones(value)
|
||||
elif isinstance(obj, list):
|
||||
new_list = []
|
||||
for elem in obj:
|
||||
elem = _clean_nones(elem)
|
||||
if elem is not None:
|
||||
new_list.append(elem)
|
||||
return new_list
|
||||
|
||||
return obj
|
||||
|
||||
|
||||
class Resource(object):
|
||||
def __init__(self, _name, _info):
|
||||
self._name = _name
|
||||
self._info = _clean_nones(_info)
|
||||
|
||||
def __getattr__(self, k):
|
||||
if k not in self.__dict__:
|
||||
return self._info.get(k)
|
||||
return self.__dict__[k]
|
||||
|
||||
def __repr__(self):
|
||||
return '<%s %s>' % (self._name, self._info)
|
||||
|
||||
def __eq__(self, other):
|
||||
return self._name == other._name and self._info == other._info
|
||||
|
||||
@property
|
||||
def dict(self):
|
||||
return self._info
|
||||
|
||||
@property
|
||||
def wrapped_dict(self):
|
||||
return {self._name: self._info}
|
||||
|
||||
|
||||
def _node_template(nt):
|
||||
if not nt:
|
||||
raise ex.NodeTemplateNotFoundException(nt)
|
||||
|
||||
d = {
|
||||
'id': nt.id,
|
||||
'name': nt.name,
|
||||
'node_type': {
|
||||
'name': nt.node_type.name,
|
||||
'processes': [p.name for p in nt.node_type.processes]},
|
||||
'flavor_id': nt.flavor_id
|
||||
}
|
||||
|
||||
for conf in nt.node_template_configs:
|
||||
c_section = conf.node_process_property.node_process.name
|
||||
c_name = conf.node_process_property.name
|
||||
c_value = conf.value
|
||||
if c_section not in d:
|
||||
d[c_section] = dict()
|
||||
d[c_section][c_name] = c_value
|
||||
|
||||
return Resource('node_template', d)
|
||||
|
||||
|
||||
def _cluster(cluster):
|
||||
if not cluster:
|
||||
raise ex.ClusterNotFoundException(cluster)
|
||||
|
||||
d = {
|
||||
'id': cluster.id,
|
||||
'name': cluster.name,
|
||||
'base_image_id': cluster.base_image_id,
|
||||
'status': cluster.status,
|
||||
'service_urls': {},
|
||||
'node_templates': {},
|
||||
'nodes': [{'vm_id': n.vm_id,
|
||||
'node_template': {
|
||||
'id': n.node_template.id,
|
||||
'name': n.node_template.name
|
||||
}}
|
||||
for n in cluster.nodes]
|
||||
}
|
||||
for ntc in cluster.node_counts:
|
||||
d['node_templates'][ntc.node_template.name] = ntc.count
|
||||
|
||||
for service in cluster.service_urls:
|
||||
d['service_urls'][service.name] = service.url
|
||||
|
||||
return Resource('cluster', d)
|
||||
|
||||
|
||||
def _node_type(nt):
|
||||
if not nt:
|
||||
raise ex.NodeTypeNotFoundException(nt)
|
||||
|
||||
d = {
|
||||
'id': nt.id,
|
||||
'name': nt.name,
|
||||
'processes': [p.name for p in nt.processes]
|
||||
}
|
||||
|
||||
return Resource('node_type', d)
|
||||
def get_plugin(plugin_name, version=None):
|
||||
plugin = plugin_base.PLUGINS.get_plugin(plugin_name)
|
||||
res = plugin.as_resource()
|
||||
if version:
|
||||
res._info['configs'] = [c.dict for c in plugin.get_configs(version)]
|
||||
res._info['node_processes'] = plugin.get_node_processes(version)
|
||||
return res
|
||||
|
@ -1,510 +0,0 @@
|
||||
# Copyright (c) 2013 Mirantis Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import eventlet
|
||||
from jinja2 import Environment
|
||||
from jinja2 import PackageLoader
|
||||
from oslo.config import cfg
|
||||
import paramiko
|
||||
from pkg_resources import resource_filename
|
||||
import xml.dom.minidom as xml
|
||||
|
||||
from savanna.openstack.common import log as logging
|
||||
from savanna.storage.db import DB
|
||||
from savanna.storage.models import Node, ServiceUrl
|
||||
from savanna.storage.storage import update_cluster_status
|
||||
from savanna.utils.openstack.nova import novaclient
|
||||
from savanna.utils.patches import patch_minidom_writexml
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
CONF = cfg.CONF
|
||||
|
||||
cluster_node_opts = [
|
||||
cfg.StrOpt('username',
|
||||
default='root',
|
||||
help='An existing user on Hadoop image'),
|
||||
cfg.StrOpt('password',
|
||||
default='swordfish',
|
||||
help='User\'s password'),
|
||||
cfg.BoolOpt('use_floating_ips',
|
||||
default=True,
|
||||
help='When set to false, Savanna uses only internal IP of VMs.'
|
||||
' When set to true, Savanna expects OpenStack to auto-'
|
||||
'assign floating IPs to cluster nodes. Internal IPs will '
|
||||
'be used for inter-cluster communication, while floating '
|
||||
'ones will be used by Savanna to configure nodes. Also '
|
||||
'floating IPs will be exposed in service URLs')
|
||||
]
|
||||
|
||||
CONF.register_opts(cluster_node_opts, 'cluster_node')
|
||||
|
||||
|
||||
def _find_by_id(lst, id):
|
||||
for entity in lst:
|
||||
if entity.id == id:
|
||||
return entity
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def _find_by_name(lst, name):
|
||||
for entity in lst:
|
||||
if entity.name == name:
|
||||
return entity
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def _check_finding(entity, attr, value):
|
||||
if entity is None:
|
||||
raise RuntimeError("Unable to find entity with %s "
|
||||
"\'%s\'" % (attr, value))
|
||||
|
||||
|
||||
def _ensure_zero(ret):
|
||||
if ret != 0:
|
||||
raise RuntimeError('Command returned non-zero status code - %i' % ret)
|
||||
|
||||
|
||||
def _setup_ssh_connection(host, ssh):
|
||||
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
|
||||
ssh.connect(
|
||||
host,
|
||||
username=CONF.cluster_node.username,
|
||||
password=CONF.cluster_node.password
|
||||
)
|
||||
|
||||
|
||||
def _open_channel_and_execute(ssh, cmd):
|
||||
chan = ssh.get_transport().open_session()
|
||||
chan.exec_command(cmd)
|
||||
return chan.recv_exit_status()
|
||||
|
||||
|
||||
def _execute_command_on_node(host, cmd):
|
||||
ssh = paramiko.SSHClient()
|
||||
try:
|
||||
_setup_ssh_connection(host, ssh)
|
||||
return _open_channel_and_execute(ssh, cmd)
|
||||
finally:
|
||||
ssh.close()
|
||||
|
||||
|
||||
def launch_cluster(headers, cluster):
|
||||
nova = novaclient(headers)
|
||||
|
||||
clmap = dict()
|
||||
clmap['id'] = cluster.id
|
||||
clmap['name'] = cluster.name
|
||||
clmap['image'] = _find_by_id(nova.images.list(),
|
||||
cluster.base_image_id)
|
||||
_check_finding(clmap['image'], 'id', cluster.base_image_id)
|
||||
|
||||
clmap['nodes'] = []
|
||||
num = 1
|
||||
|
||||
for nc in cluster.node_counts:
|
||||
configs = dict()
|
||||
for cf in nc.node_template.node_template_configs:
|
||||
proc_name = cf.node_process_property.node_process.name
|
||||
if proc_name not in configs:
|
||||
configs[proc_name] = dict()
|
||||
|
||||
name = cf.node_process_property.name
|
||||
configs[proc_name][name] = cf.value
|
||||
|
||||
ntype = nc.node_template.node_type.name
|
||||
templ_id = nc.node_template.id
|
||||
flv_id = nc.node_template.flavor_id
|
||||
flv = _find_by_name(nova.flavors.list(), flv_id)
|
||||
_check_finding(flv, 'id', flv_id)
|
||||
|
||||
for _ in xrange(0, nc.count):
|
||||
node = dict()
|
||||
node['id'] = None
|
||||
if ntype == 'JT+NN':
|
||||
node['name'] = '%s-master' % cluster.name
|
||||
else:
|
||||
node['name'] = '%s-%i' % (cluster.name, num)
|
||||
num += 1
|
||||
node['type'] = ntype
|
||||
node['templ_id'] = templ_id
|
||||
node['flavor'] = flv
|
||||
node['configs'] = configs
|
||||
node['is_up'] = False
|
||||
clmap['nodes'].append(node)
|
||||
|
||||
try:
|
||||
for node in clmap['nodes']:
|
||||
LOG.debug("Starting node for cluster '%s', node: %s, image: %s",
|
||||
cluster.name, node, clmap['image'])
|
||||
_launch_node(nova, node, clmap['image'])
|
||||
except Exception, e:
|
||||
_rollback_cluster_creation(cluster, clmap, nova, e)
|
||||
return False
|
||||
|
||||
all_set = False
|
||||
|
||||
LOG.debug("All nodes for cluster '%s' have been started, "
|
||||
"waiting for them to come up", cluster.name)
|
||||
|
||||
while not all_set:
|
||||
all_set = True
|
||||
|
||||
for node in clmap['nodes']:
|
||||
_check_if_up(nova, node)
|
||||
|
||||
if not node['is_up']:
|
||||
all_set = False
|
||||
|
||||
eventlet.sleep(1)
|
||||
|
||||
LOG.debug("All nodes of cluster '%s' are up: %s",
|
||||
cluster.name, all_set)
|
||||
|
||||
_pre_cluster_setup(clmap)
|
||||
for node in clmap['nodes']:
|
||||
_setup_node(node, clmap)
|
||||
_register_node(node, cluster)
|
||||
|
||||
LOG.debug("All nodes of cluster '%s' are configured and registered, "
|
||||
"starting the cluster...", cluster.name)
|
||||
|
||||
_start_cluster(cluster, clmap)
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def _launch_node(nova, node, image):
|
||||
srv = nova.servers.create(node['name'], image, node['flavor'])
|
||||
node['id'] = srv.id
|
||||
|
||||
|
||||
def _rollback_cluster_creation(cluster, clmap, nova, error):
|
||||
update_cluster_status("Error", id=cluster.id)
|
||||
|
||||
LOG.warn("Can't launch all vms for cluster '%s': %s", cluster.id, error)
|
||||
for node in clmap['nodes']:
|
||||
if node['id']:
|
||||
_stop_node_silently(nova, cluster, node['id'])
|
||||
|
||||
LOG.info("All vms of cluster '%s' has been stopped", cluster.id)
|
||||
|
||||
|
||||
def _stop_node_silently(nova, cluster, vm_id):
|
||||
LOG.debug("Stopping vm '%s' of cluster '%s'", vm_id, cluster.id)
|
||||
try:
|
||||
nova.servers.delete(vm_id)
|
||||
except Exception, e:
|
||||
LOG.error("Can't silently remove node '%s': %s", vm_id, e)
|
||||
|
||||
|
||||
def _check_if_up(nova, node):
|
||||
if node['is_up']:
|
||||
# all set
|
||||
return
|
||||
|
||||
if 'ip' not in node:
|
||||
srv = _find_by_id(nova.servers.list(), node['id'])
|
||||
nets = srv.networks
|
||||
|
||||
if len(nets) == 0:
|
||||
# VM's networking is not configured yet
|
||||
return
|
||||
|
||||
ips = nets.values()[0]
|
||||
|
||||
if CONF.cluster_node.use_floating_ips:
|
||||
if len(ips) < 2:
|
||||
# floating IP is not assigned yet
|
||||
return
|
||||
|
||||
# we assume that floating IP comes last in the list
|
||||
node['ip'] = ips[-1]
|
||||
node['internal_ip'] = ips[0]
|
||||
else:
|
||||
if len(ips) < 1:
|
||||
# private IP is not assigned yet
|
||||
return
|
||||
node['ip'] = ips[0]
|
||||
node['internal_ip'] = ips[0]
|
||||
|
||||
try:
|
||||
ret = _execute_command_on_node(node['ip'], 'ls -l /')
|
||||
_ensure_zero(ret)
|
||||
except Exception:
|
||||
# ssh is not up yet
|
||||
return
|
||||
|
||||
node['is_up'] = True
|
||||
|
||||
|
||||
env = Environment(loader=PackageLoader('savanna', 'resources'))
|
||||
|
||||
|
||||
def _render_template(template_name, **kwargs):
|
||||
templ = env.get_template('%s.template' % template_name)
|
||||
return templ.render(**kwargs)
|
||||
|
||||
|
||||
ENV_CONFS = {
|
||||
'job_tracker': {
|
||||
'heap_size': 'HADOOP_JOBTRACKER_OPTS=\\"-Xmx%sm\\"'
|
||||
},
|
||||
'name_node': {
|
||||
'heap_size': 'HADOOP_NAMENODE_OPTS=\\"-Xmx%sm\\"'
|
||||
},
|
||||
'task_tracker': {
|
||||
'heap_size': 'HADOOP_TASKTRACKER_OPTS=\\"-Xmx%sm\\"'
|
||||
},
|
||||
'data_node': {
|
||||
'heap_size': 'HADOOP_DATANODE_OPTS=\\"-Xmx%sm\\"'
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
def _load_xml_default_configs(file_name):
|
||||
doc = xml.parse(resource_filename("savanna", 'resources/%s' % file_name))
|
||||
properties = doc.getElementsByTagName("name")
|
||||
return [prop.childNodes[0].data for prop in properties]
|
||||
|
||||
CORE_CONF = _load_xml_default_configs('core-default.xml')
|
||||
MAPRED_CONF = _load_xml_default_configs('mapred-default.xml')
|
||||
HDFS_CONF = _load_xml_default_configs('hdfs-default.xml')
|
||||
|
||||
|
||||
def _generate_xml_configs(node, clmap):
|
||||
# inserting common configs depends on provisioned VMs and HDFS placement
|
||||
cfg = {
|
||||
'fs.default.name': 'hdfs://%s:8020' % clmap['master_hostname'],
|
||||
'mapred.job.tracker': '%s:8021' % clmap['master_hostname'],
|
||||
'dfs.name.dir': '/mnt/lib/hadoop/hdfs/namenode',
|
||||
'dfs.data.dir': '/mnt/lib/hadoop/hdfs/datanode',
|
||||
'mapred.system.dir': '/mnt/mapred/mapredsystem',
|
||||
'mapred.local.dir': '/mnt/lib/hadoop/mapred'
|
||||
}
|
||||
|
||||
# inserting user-defined configs from NodeTemplates
|
||||
for key, value in _extract_xml_confs(node['configs']):
|
||||
cfg[key] = value
|
||||
|
||||
# invoking applied configs to appropriate xml files
|
||||
xml_configs = {
|
||||
'core-site': _create_xml(cfg, CORE_CONF),
|
||||
'mapred-site': _create_xml(cfg, MAPRED_CONF),
|
||||
'hdfs-site': _create_xml(cfg, HDFS_CONF)
|
||||
}
|
||||
|
||||
return xml_configs
|
||||
|
||||
|
||||
# Patches minidom's writexml to avoid excess whitespaces in generated xml
|
||||
# configuration files that brakes Hadoop.
|
||||
patch_minidom_writexml()
|
||||
|
||||
|
||||
def _create_xml(configs, global_conf):
|
||||
doc = xml.Document()
|
||||
|
||||
pi = doc.createProcessingInstruction('xml-stylesheet',
|
||||
'type="text/xsl" '
|
||||
'href="configuration.xsl"')
|
||||
doc.insertBefore(pi, doc.firstChild)
|
||||
|
||||
# Create the <configuration> base element
|
||||
configuration = doc.createElement("configuration")
|
||||
doc.appendChild(configuration)
|
||||
|
||||
for prop_name, prop_value in configs.items():
|
||||
if prop_name in global_conf:
|
||||
# Create the <property> element
|
||||
property = doc.createElement("property")
|
||||
configuration.appendChild(property)
|
||||
|
||||
# Create a <name> element in <property>
|
||||
name = doc.createElement("name")
|
||||
property.appendChild(name)
|
||||
|
||||
# Give the <name> element some hadoop config name
|
||||
name_text = doc.createTextNode(prop_name)
|
||||
name.appendChild(name_text)
|
||||
|
||||
# Create a <value> element in <property>
|
||||
value = doc.createElement("value")
|
||||
property.appendChild(value)
|
||||
|
||||
# Give the <value> element some hadoop config value
|
||||
value_text = doc.createTextNode(prop_value)
|
||||
value.appendChild(value_text)
|
||||
|
||||
# Return newly created XML
|
||||
return doc.toprettyxml(indent=" ")
|
||||
|
||||
|
||||
def _keys_exist(map, key1, key2):
|
||||
return key1 in map and key2 in map[key1]
|
||||
|
||||
|
||||
def _extract_environment_confs(node_configs):
|
||||
"""Returns list of Hadoop parameters which should be passed via environment
|
||||
"""
|
||||
|
||||
lst = []
|
||||
|
||||
for process, proc_confs in ENV_CONFS.items():
|
||||
for param_name, param_format_str in proc_confs.items():
|
||||
if (_keys_exist(node_configs, process, param_name) and
|
||||
not node_configs[process][param_name] is None):
|
||||
lst.append(param_format_str %
|
||||
node_configs[process][param_name])
|
||||
|
||||
return lst
|
||||
|
||||
|
||||
def _extract_xml_confs(node_configs):
|
||||
"""Returns list of Hadoop parameters which should be passed into general
|
||||
configs like core-site.xml
|
||||
"""
|
||||
|
||||
# For now we assume that all parameters outside of ENV_CONFS
|
||||
# are passed to xml files
|
||||
|
||||
lst = []
|
||||
|
||||
for process, proc_confs in node_configs.items():
|
||||
for param_name, param_value in proc_confs.items():
|
||||
if (not _keys_exist(ENV_CONFS, process, param_name) and
|
||||
not param_value is None):
|
||||
lst.append((param_name, param_value))
|
||||
|
||||
return lst
|
||||
|
||||
|
||||
def _analyze_templates(clmap):
|
||||
clmap['master_ip'] = None
|
||||
clmap['slaves'] = []
|
||||
for node in clmap['nodes']:
|
||||
if node['type'] == 'JT+NN':
|
||||
clmap['master_ip'] = node['ip']
|
||||
clmap['master_hostname'] = node['name']
|
||||
node['is_master'] = True
|
||||
elif node['type'] == 'TT+DN':
|
||||
clmap['slaves'].append(node['name'])
|
||||
node['is_master'] = False
|
||||
|
||||
if clmap['master_ip'] is None:
|
||||
raise RuntimeError("No master node is defined in the cluster")
|
||||
|
||||
|
||||
def _generate_hosts(clmap):
|
||||
hosts = "127.0.0.1 localhost\n"
|
||||
for node in clmap['nodes']:
|
||||
hosts += "%s %s\n" % (node['internal_ip'], node['name'])
|
||||
|
||||
clmap['hosts'] = hosts
|
||||
|
||||
|
||||
def _pre_cluster_setup(clmap):
|
||||
_analyze_templates(clmap)
|
||||
_generate_hosts(clmap)
|
||||
|
||||
for node in clmap['nodes']:
|
||||
if node['is_master']:
|
||||
script_file = 'setup-master.sh'
|
||||
else:
|
||||
script_file = 'setup-general.sh'
|
||||
|
||||
templ_args = {
|
||||
'slaves': clmap['slaves'],
|
||||
'master_hostname': clmap['master_hostname'],
|
||||
'env_configs': _extract_environment_confs(node['configs'])
|
||||
}
|
||||
|
||||
node['setup_script'] = _render_template(script_file, **templ_args)
|
||||
node['xml'] = _generate_xml_configs(node, clmap)
|
||||
|
||||
|
||||
def _setup_node(node, clmap):
|
||||
ssh = paramiko.SSHClient()
|
||||
try:
|
||||
_setup_ssh_connection(node['ip'], ssh)
|
||||
sftp = ssh.open_sftp()
|
||||
|
||||
fl = sftp.file('/etc/hosts', 'w')
|
||||
fl.write(clmap['hosts'])
|
||||
fl.close()
|
||||
|
||||
fl = sftp.file('/etc/hadoop/core-site.xml', 'w')
|
||||
fl.write(node['xml']['core-site'])
|
||||
fl.close()
|
||||
|
||||
fl = sftp.file('/etc/hadoop/hdfs-site.xml', 'w')
|
||||
fl.write(node['xml']['hdfs-site'])
|
||||
fl.close()
|
||||
|
||||
fl = sftp.file('/etc/hadoop/mapred-site.xml', 'w')
|
||||
fl.write(node['xml']['mapred-site'])
|
||||
fl.close()
|
||||
|
||||
fl = sftp.file('/tmp/savanna-hadoop-init.sh', 'w')
|
||||
fl.write(node['setup_script'])
|
||||
fl.close()
|
||||
|
||||
sftp.chmod('/tmp/savanna-hadoop-init.sh', 0500)
|
||||
|
||||
ret = _open_channel_and_execute(ssh,
|
||||
'/tmp/savanna-hadoop-init.sh '
|
||||
'>> /tmp/savanna-hadoop-init.log 2>&1')
|
||||
_ensure_zero(ret)
|
||||
finally:
|
||||
ssh.close()
|
||||
|
||||
|
||||
def _register_node(node, cluster):
|
||||
node_obj = Node(node['id'], cluster.id, node['templ_id'])
|
||||
DB.session.add(node_obj)
|
||||
|
||||
if node['is_master']:
|
||||
srv_url_jt = ServiceUrl(cluster.id, 'jobtracker', 'http://%s:50030'
|
||||
% node['ip'])
|
||||
srv_url_nn = ServiceUrl(cluster.id, 'namenode', 'http://%s:50070'
|
||||
% node['ip'])
|
||||
|
||||
DB.session.add(srv_url_jt)
|
||||
DB.session.add(srv_url_nn)
|
||||
|
||||
DB.session.commit()
|
||||
|
||||
|
||||
def _start_cluster(cluster, clmap):
|
||||
ret = _execute_command_on_node(
|
||||
clmap['master_ip'],
|
||||
'su -c start-all.sh hadoop >> /tmp/savanna-hadoop-start-all.log')
|
||||
_ensure_zero(ret)
|
||||
|
||||
LOG.info("Cluster '%s' successfully started!", cluster.name)
|
||||
|
||||
|
||||
def stop_cluster(headers, cluster):
|
||||
nova = novaclient(headers)
|
||||
|
||||
for node in cluster.nodes:
|
||||
try:
|
||||
nova.servers.delete(node.vm_id)
|
||||
LOG.debug("vm '%s' has been stopped", node.vm_id)
|
||||
except Exception, e:
|
||||
LOG.info("Can't stop vm '%s': %s", node.vm_id, e)
|
@ -1,296 +0,0 @@
|
||||
# Copyright (c) 2013 Mirantis Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from flask import request
|
||||
import functools
|
||||
import jsonschema
|
||||
from oslo.config import cfg
|
||||
|
||||
from savanna import exceptions as ex
|
||||
import savanna.openstack.common.exception as os_ex
|
||||
from savanna.openstack.common import log as logging
|
||||
from savanna.service import api
|
||||
import savanna.utils.api as api_u
|
||||
from savanna.utils.openstack import nova
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.import_opt('allow_cluster_ops', 'savanna.config')
|
||||
|
||||
# Base validation schema of cluster creation operation
|
||||
CLUSTER_CREATE_SCHEMA = {
|
||||
"title": "Cluster creation schema",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"cluster": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"name": {"type": "string",
|
||||
"minLength": 1,
|
||||
"maxLength": 50,
|
||||
"pattern": r"^(([a-zA-Z]|[a-zA-Z][a-zA-Z0-9\-]"
|
||||
r"*[a-zA-Z0-9])\.)*([A-Za-z]|[A-Za-z]"
|
||||
r"[A-Za-z0-9\-]*[A-Za-z0-9])$"},
|
||||
"base_image_id": {"type": "string",
|
||||
"minLength": 1,
|
||||
"maxLength": 240},
|
||||
"node_templates": {
|
||||
"type": "object"
|
||||
}
|
||||
},
|
||||
"required": ["name", "base_image_id", "node_templates"]
|
||||
}
|
||||
},
|
||||
"required": ["cluster"]
|
||||
}
|
||||
|
||||
# Base validation schema of node template creation operation
|
||||
TEMPLATE_CREATE_SCHEMA = {
|
||||
"title": "Node Template creation schema",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"node_template": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"name": {"type": "string",
|
||||
"minLength": 1,
|
||||
"maxLength": 240,
|
||||
"pattern": r"^(([a-zA-Z]|[a-zA-Z][a-zA-Z0-9\-_]"
|
||||
r"*[a-zA-Z0-9])\.)*([A-Za-z]|[A-Za-z]"
|
||||
r"[A-Za-z0-9\-_]*[A-Za-z0-9])$"},
|
||||
"node_type": {"type": "string",
|
||||
"minLength": 1,
|
||||
"maxLength": 240},
|
||||
"flavor_id": {"type": "string",
|
||||
"minLength": 1,
|
||||
"maxLength": 240},
|
||||
"task_tracker": {
|
||||
"type": "object"
|
||||
},
|
||||
"job_tracker": {
|
||||
"type": "object"
|
||||
},
|
||||
"name_node": {
|
||||
"type": "object"
|
||||
},
|
||||
"data_node": {
|
||||
"type": "object"
|
||||
}
|
||||
},
|
||||
"required": ["name", "node_type", "flavor_id"]
|
||||
}
|
||||
},
|
||||
"required": ["node_template"]
|
||||
}
|
||||
|
||||
|
||||
def validate(validate_func):
|
||||
def decorator(func):
|
||||
@functools.wraps(func)
|
||||
def handler(*args, **kwargs):
|
||||
try:
|
||||
validate_func(api_u.request_data(), **kwargs)
|
||||
except jsonschema.ValidationError, e:
|
||||
e.code = "VALIDATION_ERROR"
|
||||
return api_u.bad_request(e)
|
||||
except ex.SavannaException, e:
|
||||
return api_u.bad_request(e)
|
||||
except os_ex.MalformedRequestBody, e:
|
||||
e.code = "MALFORMED_REQUEST_BODY"
|
||||
return api_u.bad_request(e)
|
||||
except Exception, e:
|
||||
return api_u.internal_error(
|
||||
500, "Error occurred during validation", e)
|
||||
|
||||
return func(*args, **kwargs)
|
||||
|
||||
return handler
|
||||
|
||||
return decorator
|
||||
|
||||
|
||||
def exists_by_id(service_func, id_prop, tenant_specific=False):
|
||||
def decorator(func):
|
||||
@functools.wraps(func)
|
||||
def handler(*args, **kwargs):
|
||||
try:
|
||||
if tenant_specific:
|
||||
tenant = request.headers['X-Tenant-Id']
|
||||
service_func(*args, id=kwargs[id_prop], tenant_id=tenant)
|
||||
else:
|
||||
service_func(*args, id=kwargs[id_prop])
|
||||
return func(*args, **kwargs)
|
||||
except ex.NotFoundException, e:
|
||||
e.__init__(kwargs[id_prop])
|
||||
return api_u.not_found(e)
|
||||
except Exception, e:
|
||||
return api_u.internal_error(
|
||||
500, "Unexpected error occurred", e)
|
||||
|
||||
return handler
|
||||
|
||||
return decorator
|
||||
|
||||
|
||||
def validate_cluster_create(cluster_values):
|
||||
jsonschema.validate(cluster_values, CLUSTER_CREATE_SCHEMA)
|
||||
values = cluster_values['cluster']
|
||||
|
||||
# check that requested cluster name is unique
|
||||
unique_names = [cluster.name for cluster in api.get_clusters()]
|
||||
if values['name'] in unique_names:
|
||||
raise ex.ClusterNameExistedException(values['name'])
|
||||
|
||||
# check that requested templates are from already defined values
|
||||
node_templates = values['node_templates']
|
||||
possible_node_templates = [nt.name for nt in api.get_node_templates()]
|
||||
for nt in node_templates:
|
||||
if nt not in possible_node_templates:
|
||||
raise ex.NodeTemplateNotFoundException(nt)
|
||||
# check node count is integer and non-zero value
|
||||
jsonschema.validate(node_templates[nt],
|
||||
{"type": "integer", "minimum": 1})
|
||||
|
||||
# check that requested cluster contains only 1 instance of NameNode
|
||||
# and 1 instance of JobTracker
|
||||
jt_count = 0
|
||||
nn_count = 0
|
||||
|
||||
for nt_name in node_templates:
|
||||
processes = api.get_node_template(name=nt_name).dict['node_type'][
|
||||
'processes']
|
||||
if "job_tracker" in processes:
|
||||
jt_count += node_templates[nt_name]
|
||||
if "name_node" in processes:
|
||||
nn_count += node_templates[nt_name]
|
||||
|
||||
if nn_count != 1:
|
||||
raise ex.NotSingleNameNodeException(nn_count)
|
||||
|
||||
if jt_count != 1:
|
||||
raise ex.NotSingleJobTrackerException(jt_count)
|
||||
|
||||
if CONF.allow_cluster_ops:
|
||||
image_id = values['base_image_id']
|
||||
nova_images = nova.get_images(request.headers)
|
||||
if image_id not in nova_images:
|
||||
LOG.debug("Could not find %s image in %s", image_id, nova_images)
|
||||
raise ex.ImageNotFoundException(values['base_image_id'])
|
||||
|
||||
# check available Nova absolute limits
|
||||
_check_limits(nova.get_limits(request.headers),
|
||||
values['node_templates'])
|
||||
else:
|
||||
LOG.info("Cluster ops are disabled, use --allow-cluster-ops flag")
|
||||
|
||||
|
||||
def validate_node_template_create(nt_values):
|
||||
jsonschema.validate(nt_values, TEMPLATE_CREATE_SCHEMA)
|
||||
values = nt_values['node_template']
|
||||
|
||||
# check that requested node_template name is unique
|
||||
unique_names = [nt.name for nt in api.get_node_templates()]
|
||||
if values['name'] in unique_names:
|
||||
raise ex.NodeTemplateExistedException(values['name'])
|
||||
|
||||
node_types = [nt.name for nt in api.get_node_types()]
|
||||
|
||||
if values['node_type'] not in node_types:
|
||||
raise ex.NodeTypeNotFoundException(values['node_type'])
|
||||
|
||||
req_procs = []
|
||||
if "TT" in values['node_type']:
|
||||
req_procs.append("task_tracker")
|
||||
if "DN" in values['node_type']:
|
||||
req_procs.append("data_node")
|
||||
if "NN" in values['node_type']:
|
||||
req_procs.append("name_node")
|
||||
if "JT" in values['node_type']:
|
||||
req_procs.append("job_tracker")
|
||||
|
||||
LOG.debug("Required properties are: %s", req_procs)
|
||||
|
||||
jsonschema.validate(values, {"required": req_procs})
|
||||
|
||||
processes = values.copy()
|
||||
del processes['name']
|
||||
del processes['node_type']
|
||||
del processes['flavor_id']
|
||||
|
||||
LOG.debug("Incoming properties are: %s", processes)
|
||||
|
||||
for proc in processes:
|
||||
if proc not in req_procs:
|
||||
raise ex.DiscrepancyNodeProcessException(req_procs)
|
||||
|
||||
req_params = api.get_node_type_required_params(name=values['node_type'])
|
||||
for process in req_params:
|
||||
for param in req_params[process]:
|
||||
if param not in values[process] or not values[process][param]:
|
||||
raise ex.RequiredParamMissedException(process, param)
|
||||
|
||||
all_params = api.get_node_type_all_params(name=values['node_type'])
|
||||
for process in all_params:
|
||||
for param in processes[process]:
|
||||
if param not in all_params[process]:
|
||||
raise ex.ParamNotAllowedException(param, process)
|
||||
|
||||
if api.CONF.allow_cluster_ops:
|
||||
flavor = values['flavor_id']
|
||||
nova_flavors = nova.get_flavors(request.headers)
|
||||
if flavor not in nova_flavors:
|
||||
LOG.debug("Could not find %s flavor in %s", flavor, nova_flavors)
|
||||
raise ex.FlavorNotFoundException(flavor)
|
||||
else:
|
||||
LOG.info("Cluster ops are disabled, use --allow-cluster-ops flag")
|
||||
|
||||
|
||||
def _check_limits(limits, node_templates):
|
||||
all_vcpus = limits['maxTotalCores'] - limits['totalCoresUsed']
|
||||
all_ram = limits['maxTotalRAMSize'] - limits['totalRAMUsed']
|
||||
all_inst = limits['maxTotalInstances'] - limits['totalInstancesUsed']
|
||||
LOG.info("List of available VCPUs: %d, RAM: %d, Instances: %d",
|
||||
all_vcpus, all_ram, all_inst)
|
||||
|
||||
need_vcpus = 0
|
||||
need_ram = 0
|
||||
need_inst = 0
|
||||
for nt_name in node_templates:
|
||||
nt_flavor_name = api.get_node_template(name=nt_name).dict['flavor_id']
|
||||
nt_flavor_count = node_templates[nt_name]
|
||||
LOG.debug("User requested flavor: %s, count: %s",
|
||||
nt_flavor_name, nt_flavor_count)
|
||||
nova_flavor = nova.get_flavor(request.headers, name=nt_flavor_name)
|
||||
LOG.debug("Nova has flavor %s with VCPUs=%d, RAM=%d",
|
||||
nova_flavor.name, nova_flavor.vcpus, nova_flavor.ram)
|
||||
|
||||
need_vcpus += nova_flavor.vcpus * nt_flavor_count
|
||||
need_ram += nova_flavor.ram * nt_flavor_count
|
||||
need_inst += nt_flavor_count
|
||||
|
||||
LOG.info("User requested %d instances with total VCPUs=%d and RAM=%d",
|
||||
need_inst, need_vcpus, need_ram)
|
||||
|
||||
if need_inst > all_inst or need_vcpus > all_vcpus or need_ram > all_ram:
|
||||
raise ex.NotEnoughResourcesException([all_inst, all_vcpus, all_ram,
|
||||
need_inst, need_vcpus, need_ram])
|
||||
|
||||
|
||||
def validate_node_template_terminate(_, template_id):
|
||||
if api.is_node_template_associated(id=template_id):
|
||||
name = api.get_node_template(id=template_id).name
|
||||
raise ex.AssociatedNodeTemplateTerminationException(name)
|
@ -1,40 +0,0 @@
|
||||
# Copyright (c) 2013 Mirantis Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from flask.ext.sqlalchemy import SQLAlchemy
|
||||
from oslo.config import cfg
|
||||
|
||||
DB = SQLAlchemy()
|
||||
|
||||
opts = [
|
||||
cfg.StrOpt('database_uri',
|
||||
default='sqlite:////tmp/savanna.db',
|
||||
help='URL for sqlalchemy database'),
|
||||
cfg.BoolOpt('echo',
|
||||
default=False,
|
||||
help='Sqlalchemy echo')
|
||||
]
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.register_opts(opts, group='sqlalchemy')
|
||||
|
||||
|
||||
def setup_storage(app):
|
||||
app.config['SQLALCHEMY_DATABASE_URI'] = CONF.sqlalchemy.database_uri
|
||||
app.config['SQLALCHEMY_ECHO'] = CONF.sqlalchemy.echo
|
||||
|
||||
DB.app = app
|
||||
DB.init_app(app)
|
||||
DB.create_all()
|
@ -1,119 +0,0 @@
|
||||
# Copyright (c) 2013 Mirantis Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from savanna.storage.storage import create_node_type, \
|
||||
create_node_template, create_node_process
|
||||
from savanna.openstack.common import log as logging
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def setup_defaults(reset_db=False, gen_templates=False):
|
||||
nt_jt_nn = None
|
||||
nt_jt = None
|
||||
nt_nn = None
|
||||
nt_tt_dn = None
|
||||
|
||||
if reset_db:
|
||||
# setup default processes
|
||||
p_jt = create_node_process('job_tracker',
|
||||
[('heap_size', True, None),
|
||||
('mapred.job.tracker.handler.count',
|
||||
False, None)])
|
||||
p_nn = create_node_process('name_node',
|
||||
[('heap_size', True, None),
|
||||
('dfs.namenode.handler.count',
|
||||
False, None),
|
||||
('dfs.block.size', False, None),
|
||||
('dfs.replication', False, None)])
|
||||
p_tt = create_node_process('task_tracker',
|
||||
[('heap_size', True, None),
|
||||
('mapred.child.java.opts', False, None),
|
||||
('mapred.map.tasks', False, None),
|
||||
('mapred.tasktracker.map.tasks.maximum',
|
||||
False, None),
|
||||
('mapred.reduce.tasks', False, None),
|
||||
('mapred.tasktracker.reduce.tasks.maximum',
|
||||
False, None)])
|
||||
|
||||
p_dn = create_node_process('data_node',
|
||||
[('heap_size', True, None),
|
||||
('dfs.datanode.max.xcievers', False, None),
|
||||
('dfs.block.size', False, None),
|
||||
('dfs.replication', False, None),
|
||||
('dfs.datanode.handler.count',
|
||||
False, None)])
|
||||
|
||||
for p in [p_jt, p_nn, p_tt, p_dn]:
|
||||
LOG.info('New NodeProcess: \'%s\'', p.name)
|
||||
|
||||
# setup default node types
|
||||
nt_jt_nn = create_node_type('JT+NN', [p_jt, p_nn])
|
||||
nt_jt = create_node_type('JT', [p_jt])
|
||||
nt_nn = create_node_type('NN', [p_nn])
|
||||
nt_tt_dn = create_node_type('TT+DN', [p_tt, p_dn])
|
||||
|
||||
for nt in [nt_jt_nn, nt_jt, nt_nn, nt_tt_dn]:
|
||||
LOG.info('New NodeType: \'%s\' %s',
|
||||
nt.name, [p.name.__str__() for p in nt.processes])
|
||||
|
||||
if gen_templates:
|
||||
_generate_templates(nt_jt_nn, nt_jt, nt_nn, nt_tt_dn)
|
||||
|
||||
LOG.info('All defaults has been inserted')
|
||||
|
||||
|
||||
def _generate_templates(nt_jt_nn, nt_jt, nt_nn, nt_tt_dn):
|
||||
jt_nn_small = create_node_template('jt_nn.small', nt_jt_nn.id, 'm1.small',
|
||||
{
|
||||
'job_tracker': {
|
||||
'heap_size': '896'
|
||||
},
|
||||
'name_node': {
|
||||
'heap_size': '896'
|
||||
}
|
||||
})
|
||||
jt_nn_medium = create_node_template('jt_nn.medium', nt_jt_nn.id,
|
||||
'm1.medium',
|
||||
{
|
||||
'job_tracker': {
|
||||
'heap_size': '1792'
|
||||
},
|
||||
'name_node': {
|
||||
'heap_size': '1792'
|
||||
}
|
||||
})
|
||||
tt_dn_small = create_node_template('tt_dn.small', nt_tt_dn.id, 'm1.small',
|
||||
{
|
||||
'task_tracker': {
|
||||
'heap_size': '896'
|
||||
},
|
||||
'data_node': {
|
||||
'heap_size': '896'
|
||||
}
|
||||
})
|
||||
tt_dn_medium = create_node_template('tt_dn.medium', nt_tt_dn.id,
|
||||
'm1.medium',
|
||||
{
|
||||
'task_tracker': {
|
||||
'heap_size': '1792'
|
||||
},
|
||||
'data_node': {
|
||||
'heap_size': '1792'
|
||||
}
|
||||
})
|
||||
|
||||
for tmpl in [jt_nn_small, jt_nn_medium, tt_dn_small, tt_dn_medium]:
|
||||
LOG.info('New NodeTemplate: \'%s\' %s', tmpl.name, tmpl.flavor_id)
|
@ -1,232 +0,0 @@
|
||||
# Copyright (c) 2013 Mirantis Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import uuid
|
||||
|
||||
from savanna.storage.db import DB
|
||||
|
||||
|
||||
class NodeTemplate(DB.Model):
|
||||
__tablename__ = 'NodeTemplate'
|
||||
|
||||
id = DB.Column(DB.String(36), primary_key=True)
|
||||
name = DB.Column(DB.String(80), unique=True, nullable=False)
|
||||
node_type_id = DB.Column(DB.String(36), DB.ForeignKey('NodeType.id'),
|
||||
nullable=False)
|
||||
flavor_id = DB.Column(DB.String(36), nullable=False)
|
||||
|
||||
node_template_configs = DB.relationship('NodeTemplateConfig',
|
||||
cascade="all,delete",
|
||||
backref='node_template')
|
||||
cluster_node_counts = DB.relationship('ClusterNodeCount',
|
||||
cascade="all,delete",
|
||||
backref='node_template')
|
||||
nodes = DB.relationship('Node', cascade="all,delete",
|
||||
backref='node_template')
|
||||
|
||||
def __init__(self, name, node_type_id, flavor_id):
|
||||
self.id = uuid.uuid4().hex
|
||||
self.name = name
|
||||
self.node_type_id = node_type_id
|
||||
self.flavor_id = flavor_id
|
||||
|
||||
def __repr__(self):
|
||||
return '<NodeTemplate %s / %s>' % (self.name, self.node_type_id)
|
||||
|
||||
|
||||
class Cluster(DB.Model):
|
||||
__tablename__ = 'Cluster'
|
||||
|
||||
id = DB.Column(DB.String(36), primary_key=True)
|
||||
name = DB.Column(DB.String(80), unique=True, nullable=False)
|
||||
base_image_id = DB.Column(DB.String(36), nullable=False)
|
||||
status = DB.Column(DB.String(80))
|
||||
tenant_id = DB.Column(DB.String(36), nullable=False)
|
||||
|
||||
nodes = DB.relationship('Node', cascade="all,delete", backref='cluster')
|
||||
service_urls = DB.relationship('ServiceUrl', cascade="all,delete",
|
||||
backref='cluster')
|
||||
node_counts = DB.relationship('ClusterNodeCount', cascade="all,delete",
|
||||
backref='cluster')
|
||||
|
||||
# node_templates: [(node_template_id, count), ...]
|
||||
|
||||
def __init__(self, name, base_image_id, tenant_id, status=None):
|
||||
self.id = uuid.uuid4().hex
|
||||
self.name = name
|
||||
self.base_image_id = base_image_id
|
||||
if not status:
|
||||
status = 'Starting'
|
||||
self.status = status
|
||||
self.tenant_id = tenant_id
|
||||
|
||||
def __repr__(self):
|
||||
return '<Cluster %s / %s>' % (self.name, self.status)
|
||||
|
||||
|
||||
NODE_TYPE_NODE_PROCESS = DB.Table('NodeType_NodeProcess', DB.metadata,
|
||||
DB.Column('node_type_id', DB.String(36),
|
||||
DB.ForeignKey('NodeType.id')),
|
||||
DB.Column('node_process_id', DB.String(36),
|
||||
DB.ForeignKey('NodeProcess.id')))
|
||||
|
||||
|
||||
class NodeType(DB.Model):
|
||||
__tablename__ = 'NodeType'
|
||||
|
||||
id = DB.Column(DB.String(36), primary_key=True)
|
||||
name = DB.Column(DB.String(80), unique=True, nullable=False)
|
||||
processes = DB.relationship('NodeProcess',
|
||||
cascade="all,delete",
|
||||
secondary=NODE_TYPE_NODE_PROCESS,
|
||||
backref='node_types')
|
||||
node_templates = DB.relationship('NodeTemplate', cascade="all,delete",
|
||||
backref='node_type')
|
||||
|
||||
def __init__(self, name):
|
||||
self.id = uuid.uuid4().hex
|
||||
self.name = name
|
||||
|
||||
def __repr__(self):
|
||||
return '<NodeType %s>' % self.name
|
||||
|
||||
|
||||
class NodeProcess(DB.Model):
|
||||
__tablename__ = 'NodeProcess'
|
||||
|
||||
id = DB.Column(DB.String(36), primary_key=True)
|
||||
name = DB.Column(DB.String(80), unique=True, nullable=False)
|
||||
node_process_properties = DB.relationship('NodeProcessProperty',
|
||||
cascade="all,delete",
|
||||
backref='node_process')
|
||||
|
||||
def __init__(self, name):
|
||||
self.id = uuid.uuid4().hex
|
||||
self.name = name
|
||||
|
||||
def __repr__(self):
|
||||
return '<NodeProcess %s>' % self.name
|
||||
|
||||
|
||||
class NodeProcessProperty(DB.Model):
|
||||
__tablename__ = 'NodeProcessProperty'
|
||||
__table_args__ = (
|
||||
DB.UniqueConstraint('node_process_id', 'name'),
|
||||
)
|
||||
|
||||
id = DB.Column(DB.String(36), primary_key=True)
|
||||
node_process_id = DB.Column(DB.String(36), DB.ForeignKey('NodeProcess.id'))
|
||||
name = DB.Column(DB.String(80), nullable=False)
|
||||
required = DB.Column(DB.Boolean, nullable=False)
|
||||
default = DB.Column(DB.String(36))
|
||||
node_template_configs = DB.relationship('NodeTemplateConfig',
|
||||
cascade="all,delete",
|
||||
backref='node_process_property')
|
||||
|
||||
def __init__(self, node_process_id, name, required=True, default=None):
|
||||
self.id = uuid.uuid4().hex
|
||||
self.node_process_id = node_process_id
|
||||
self.name = name
|
||||
self.required = required
|
||||
self.default = default
|
||||
|
||||
def __repr__(self):
|
||||
return '<NodeProcessProperty %s>' % self.name
|
||||
|
||||
|
||||
class NodeTemplateConfig(DB.Model):
|
||||
__tablename__ = 'NodeTemplateConfig'
|
||||
__table_args__ = (
|
||||
DB.UniqueConstraint('node_template_id', 'node_process_property_id'),
|
||||
)
|
||||
|
||||
id = DB.Column(DB.String(36), primary_key=True)
|
||||
node_template_id = DB.Column(
|
||||
DB.String(36),
|
||||
DB.ForeignKey('NodeTemplate.id'))
|
||||
node_process_property_id = DB.Column(
|
||||
DB.String(36),
|
||||
DB.ForeignKey('NodeProcessProperty.id'))
|
||||
value = DB.Column(DB.String(36))
|
||||
|
||||
def __init__(self, node_template_id, node_process_property_id, value):
|
||||
self.id = uuid.uuid4().hex
|
||||
self.node_template_id = node_template_id
|
||||
self.node_process_property_id = node_process_property_id
|
||||
self.value = value
|
||||
|
||||
def __repr__(self):
|
||||
return '<NodeTemplateConfig %s.%s / %s>' \
|
||||
% (self.node_template_id, self.node_process_property_id,
|
||||
self.value)
|
||||
|
||||
|
||||
class ClusterNodeCount(DB.Model):
|
||||
__tablename__ = 'ClusterNodeCount'
|
||||
__table_args__ = (
|
||||
DB.UniqueConstraint('cluster_id', 'node_template_id'),
|
||||
)
|
||||
|
||||
id = DB.Column(DB.String(36), primary_key=True)
|
||||
cluster_id = DB.Column(DB.String(36), DB.ForeignKey('Cluster.id'))
|
||||
node_template_id = DB.Column(DB.String(36),
|
||||
DB.ForeignKey('NodeTemplate.id'))
|
||||
count = DB.Column(DB.Integer, nullable=False)
|
||||
|
||||
def __init__(self, cluster_id, node_template_id, count):
|
||||
self.id = uuid.uuid4().hex
|
||||
self.cluster_id = cluster_id
|
||||
self.node_template_id = node_template_id
|
||||
self.count = count
|
||||
|
||||
def __repr__(self):
|
||||
return '<ClusterNodeCount %s / %s>' \
|
||||
% (self.node_template_id, self.count)
|
||||
|
||||
|
||||
class Node(DB.Model):
|
||||
__tablename__ = 'Node'
|
||||
|
||||
# do we need own id?
|
||||
vm_id = DB.Column(DB.String(36), primary_key=True)
|
||||
cluster_id = DB.Column(DB.String(36), DB.ForeignKey('Cluster.id'))
|
||||
node_template_id = DB.Column(DB.String(36),
|
||||
DB.ForeignKey('NodeTemplate.id'))
|
||||
|
||||
def __init__(self, vm_id, cluster_id, node_template_id):
|
||||
self.vm_id = vm_id
|
||||
self.cluster_id = cluster_id
|
||||
self.node_template_id = node_template_id
|
||||
|
||||
def __repr__(self):
|
||||
return '<Node based on %s>' % self.node_template.name
|
||||
|
||||
|
||||
class ServiceUrl(DB.Model):
|
||||
__tablename__ = 'ServiceUrl'
|
||||
|
||||
id = DB.Column(DB.String(36), primary_key=True)
|
||||
cluster_id = DB.Column(DB.String(36), DB.ForeignKey('Cluster.id'))
|
||||
name = DB.Column(DB.String(80))
|
||||
url = DB.Column(DB.String(80), nullable=False)
|
||||
|
||||
def __init__(self, cluster_id, name, url):
|
||||
self.id = uuid.uuid4().hex
|
||||
self.cluster_id = cluster_id
|
||||
self.name = name
|
||||
self.url = url
|
||||
|
||||
def __repr__(self):
|
||||
return '<ServiceUrl %s / %s>' % (self.name, self.url)
|
@ -1,163 +0,0 @@
|
||||
# Copyright (c) 2013 Mirantis Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from savanna.storage.db import DB
|
||||
|
||||
from savanna.storage.models import NodeTemplate, NodeProcess, Cluster, \
|
||||
ClusterNodeCount, NodeTemplateConfig, NodeType, NodeProcessProperty
|
||||
|
||||
|
||||
## Node Template ops:
|
||||
|
||||
def get_node_template(**args):
|
||||
return NodeTemplate.query.filter_by(**args).first()
|
||||
|
||||
|
||||
def get_node_templates(**args):
|
||||
return NodeTemplate.query.filter_by(**args).all()
|
||||
|
||||
|
||||
def is_node_template_associated(**args):
|
||||
nt = get_node_template(**args)
|
||||
return nt and (len(nt.nodes) or len(nt.cluster_node_counts))
|
||||
|
||||
|
||||
def create_node_template(name, node_type_id, flavor_id, configs):
|
||||
"""Creates new node templates.
|
||||
|
||||
:param name: template name
|
||||
:param node_type_id: node type
|
||||
:param flavor_id: flavor
|
||||
:param configs: dict of process->property->value
|
||||
:return: created node template
|
||||
"""
|
||||
node_template = NodeTemplate(name, node_type_id, flavor_id)
|
||||
DB.session.add(node_template)
|
||||
for process_name in configs:
|
||||
process = NodeProcess.query.filter_by(name=process_name).first()
|
||||
conf = configs.get(process_name)
|
||||
for prop in process.node_process_properties:
|
||||
val = conf.get(prop.name, None)
|
||||
if not val and prop.required:
|
||||
if not prop.default:
|
||||
raise RuntimeError('Template \'%s\', value missed '
|
||||
'for required param: %s %s'
|
||||
% (name, process.name, prop.name))
|
||||
val = prop.default
|
||||
DB.session.add(NodeTemplateConfig(node_template.id, prop.id, val))
|
||||
DB.session.commit()
|
||||
|
||||
return node_template
|
||||
|
||||
|
||||
def terminate_node_template(**args):
|
||||
template = get_node_template(**args)
|
||||
if template:
|
||||
DB.session.delete(template)
|
||||
DB.session.commit()
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
|
||||
## Cluster ops:
|
||||
|
||||
def get_cluster(**args):
|
||||
return Cluster.query.filter_by(**args).first()
|
||||
|
||||
|
||||
def get_clusters(**args):
|
||||
return Cluster.query.filter_by(**args).all()
|
||||
|
||||
|
||||
def create_cluster(name, base_image_id, tenant_id, templates):
|
||||
"""Creates new cluster.
|
||||
|
||||
:param name: cluster name
|
||||
:param base_image_id: base image
|
||||
:param tenant_id: tenant
|
||||
:param templates: dict of template->count
|
||||
:return: created cluster
|
||||
"""
|
||||
cluster = Cluster(name, base_image_id, tenant_id)
|
||||
DB.session.add(cluster)
|
||||
for template in templates:
|
||||
count = templates.get(template)
|
||||
template_id = get_node_template(name=template).id
|
||||
cnc = ClusterNodeCount(cluster.id, template_id, int(count))
|
||||
DB.session.add(cnc)
|
||||
DB.session.commit()
|
||||
|
||||
return cluster
|
||||
|
||||
|
||||
def terminate_cluster(**args):
|
||||
cluster = get_cluster(**args)
|
||||
DB.session.delete(cluster)
|
||||
DB.session.commit()
|
||||
|
||||
|
||||
def update_cluster_status(new_status, **args):
|
||||
cluster = Cluster.query.filter_by(**args).first()
|
||||
cluster.status = new_status
|
||||
DB.session.add(cluster)
|
||||
DB.session.commit()
|
||||
|
||||
return cluster
|
||||
|
||||
|
||||
## Node Process ops:
|
||||
|
||||
def create_node_process(name, properties):
|
||||
"""Creates new node process and node process properties.
|
||||
|
||||
:param name: process name
|
||||
:param properties: array of triples (name, required, default)
|
||||
:return: created node process
|
||||
"""
|
||||
process = NodeProcess(name)
|
||||
DB.session.add(process)
|
||||
DB.session.commit()
|
||||
for p in properties:
|
||||
prop = NodeProcessProperty(process.id, p[0], p[1], p[2])
|
||||
DB.session.add(prop)
|
||||
DB.session.commit()
|
||||
|
||||
return process
|
||||
|
||||
|
||||
## Node Type ops:
|
||||
|
||||
def get_node_type(**args):
|
||||
return NodeType.query.filter_by(**args).first()
|
||||
|
||||
|
||||
def get_node_types(**args):
|
||||
return NodeType.query.filter_by(**args).all()
|
||||
|
||||
|
||||
def create_node_type(name, processes):
|
||||
"""Creates new node type using specified list of processes
|
||||
|
||||
:param name:
|
||||
:param processes:
|
||||
:return:
|
||||
"""
|
||||
node_type = NodeType(name)
|
||||
node_type.processes = processes
|
||||
DB.session.add(node_type)
|
||||
DB.session.commit()
|
||||
|
||||
return node_type
|
@ -1,17 +0,0 @@
|
||||
Integration tests for Savanna project
|
||||
=====================================
|
||||
|
||||
How to run
|
||||
----------
|
||||
|
||||
Create config file for integration tests - `/savanna/tests/integration/config.py`.
|
||||
You can take a look at the sample config file - `/savanna/tests/integration/config.py.sample`.
|
||||
All values used in a sample config file are defaults, so, if their are applicable for your
|
||||
environment than you can skip config file creation.
|
||||
|
||||
To run integration tests you should use the corresponding tox env: `tox -e integration`.
|
||||
|
||||
Contents
|
||||
--------
|
||||
|
||||
TBD
|
@ -1,24 +0,0 @@
|
||||
OS_USERNAME = 'admin' # username for nova
|
||||
OS_PASSWORD = 'password' # password for nova
|
||||
OS_TENANT_NAME = 'admin'
|
||||
OS_AUTH_URL = 'http://192.168.1.1:35357/v2.0/' # URL for keystone
|
||||
|
||||
SAVANNA_HOST = '192.168.1.1' # IP for Savanna API
|
||||
SAVANNA_PORT = '8080' # port for Savanna API
|
||||
|
||||
IMAGE_ID = '42' # ID for instance image
|
||||
FLAVOR_ID = 'abc'
|
||||
|
||||
IP_PREFIX = '172.' # prefix for IP address which is used for ssh connect to worker nodes
|
||||
|
||||
NODE_USERNAME = 'username' # username for master node
|
||||
NODE_PASSWORD = 'password' # password for master node
|
||||
|
||||
CLUSTER_NAME_CRUD = 'cluster-name-crud' # cluster name for crud operations
|
||||
CLUSTER_NAME_HADOOP = 'cluster-name-hadoop' # cluster name for hadoop testing
|
||||
|
||||
TIMEOUT = 15 # cluster creation timeout (in minutes)
|
||||
|
||||
HADOOP_VERSION = '1.1.1'
|
||||
HADOOP_DIRECTORY = '/usr/share/hadoop'
|
||||
HADOOP_LOG_DIRECTORY = '/mnt/log/hadoop/hadoop/userlogs'
|
@ -1,244 +0,0 @@
|
||||
# Copyright (c) 2013 Mirantis Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import eventlet
|
||||
import json
|
||||
from keystoneclient.v2_0 import Client as keystone_client
|
||||
import requests
|
||||
import savanna.tests.integration.parameters as param
|
||||
import unittest
|
||||
|
||||
|
||||
class ITestCase(unittest.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
self.port = param.SAVANNA_PORT
|
||||
self.host = param.SAVANNA_HOST
|
||||
|
||||
self.maxDiff = None
|
||||
|
||||
self.baseurl = 'http://' + self.host + ':' + self.port
|
||||
|
||||
self.keystone = keystone_client(
|
||||
username=param.OS_USERNAME,
|
||||
password=param.OS_PASSWORD,
|
||||
tenant_name=param.OS_TENANT_NAME,
|
||||
auth_url=param.OS_AUTH_URL
|
||||
)
|
||||
|
||||
self.tenant = self.keystone.tenant_id
|
||||
self.token = self.keystone.auth_token
|
||||
|
||||
self.flavor_id = param.FLAVOR_ID
|
||||
self.image_id = param.IMAGE_ID
|
||||
|
||||
self.url_nt = '/v0.2/%s/node-templates' % self.tenant
|
||||
self.url_nt_with_slash = '/v0.2/%s/node-templates/' % self.tenant
|
||||
self.url_cluster = '/v0.2/%s/clusters' % self.tenant
|
||||
self.url_cl_with_slash = '/v0.2/%s/clusters/' % self.tenant
|
||||
|
||||
#----------------------CRUD_comands--------------------------------------------
|
||||
|
||||
def post(self, url, body):
|
||||
URL = self.baseurl + url
|
||||
resp = requests.post(URL, data=body, headers={
|
||||
'x-auth-token': self.token, 'Content-Type': 'application/json'})
|
||||
data = json.loads(resp.content) if resp.status_code == 202 \
|
||||
else resp.content
|
||||
print('URL = %s\ndata = %s\nresponse = %s\ndata = %s\n'
|
||||
% (URL, body, resp.status_code, data))
|
||||
return resp
|
||||
|
||||
def put(self, url, body):
|
||||
URL = self.baseurl + url
|
||||
resp = requests.put(URL, data=body, headers={
|
||||
'x-auth-token': self.token, 'Content-Type': 'application/json'})
|
||||
data = json.loads(resp.content)
|
||||
print('URL = %s\ndata = %s\nresponse = %s\ndata = %s\n'
|
||||
% (URL, body, resp.status_code, data))
|
||||
return resp
|
||||
|
||||
def get(self, url, printing):
|
||||
URL = self.baseurl + url
|
||||
resp = requests.get(URL, headers={'x-auth-token': self.token})
|
||||
if printing:
|
||||
print('URL = %s\nresponse = %s\n' % (URL, resp.status_code))
|
||||
if resp.status_code != 200:
|
||||
data = json.loads(resp.content)
|
||||
print('data= %s\n') % data
|
||||
return resp
|
||||
|
||||
def delete(self, url):
|
||||
URL = self.baseurl + url
|
||||
resp = requests.delete(URL, headers={'x-auth-token': self.token})
|
||||
print('URL = %s\nresponse = %s\n' % (URL, resp.status_code))
|
||||
if resp.status_code != 204:
|
||||
data = json.loads(resp.content)
|
||||
print('data= %s\n') % data
|
||||
return resp
|
||||
|
||||
def _post_object(self, url, body, code):
|
||||
post = self.post(url, json.dumps(body))
|
||||
self.assertEquals(post.status_code, code)
|
||||
data = json.loads(post.content)
|
||||
return data
|
||||
|
||||
def _get_object(self, url, obj_id, code, printing=False):
|
||||
rv = self.get(url + obj_id, printing)
|
||||
self.assertEquals(rv.status_code, code)
|
||||
data = json.loads(rv.content)
|
||||
return data
|
||||
|
||||
def _del_object(self, url, obj_id, code):
|
||||
rv = self.delete(url + obj_id)
|
||||
self.assertEquals(rv.status_code, code)
|
||||
if rv.status_code != 204:
|
||||
data = json.loads(rv.content)
|
||||
return data
|
||||
else:
|
||||
code = self.delete(url + obj_id).status_code
|
||||
while code != 404:
|
||||
eventlet.sleep(1)
|
||||
code = self.delete(url + obj_id).status_code
|
||||
|
||||
#----------------------other_commands------------------------------------------
|
||||
|
||||
def _get_body_nt(self, name, nt_type, hs1, hs2):
|
||||
node = 'name' if nt_type in ['JT+NN', 'NN'] else 'data'
|
||||
tracker = 'job' if nt_type in ['JT+NN', 'JT'] else 'task'
|
||||
processes_name = nt_type
|
||||
nt = {
|
||||
u'name': u'%s.%s' % (name, param.FLAVOR_ID),
|
||||
u'%s_node' % node: {u'heap_size': u'%d' % hs1},
|
||||
u'%s_tracker' % tracker: {u'heap_size': u'%d' % hs2},
|
||||
u'node_type': {
|
||||
u'processes': [u'%s_tracker' % tracker,
|
||||
u'%s_node' % node],
|
||||
u'name': u'%s' % processes_name},
|
||||
u'flavor_id': u'%s' % self.flavor_id
|
||||
}
|
||||
if nt_type == 'NN':
|
||||
del nt[u'%s_tracker' % tracker]
|
||||
nt[u'node_type'][u'processes'] = [u'%s_node' % node]
|
||||
elif nt_type == 'JT':
|
||||
del nt[u'%s_node' % node]
|
||||
nt[u'node_type'][u'processes'] = [u'%s_tracker' % tracker]
|
||||
return nt
|
||||
|
||||
def _get_body_cluster(self, name, master_name, worker_name, node_number):
|
||||
return {
|
||||
u'status': u'Starting',
|
||||
u'service_urls': {},
|
||||
u'name': u'%s' % name,
|
||||
u'base_image_id': u'%s' % self.image_id,
|
||||
u'node_templates':
|
||||
{
|
||||
u'%s.%s' % (master_name, param.FLAVOR_ID): 1,
|
||||
u'%s.%s' % (worker_name, param.FLAVOR_ID): node_number
|
||||
},
|
||||
u'nodes': []
|
||||
}
|
||||
|
||||
def change_field_nt(self, data, old_field, new_field):
|
||||
val = data['node_template'][old_field]
|
||||
del data['node_template'][old_field]
|
||||
data['node_template'][new_field] = val
|
||||
return data
|
||||
|
||||
def make_nt(self, nt_name, node_type, jt_heap_size, nn_heap_size):
|
||||
nt = dict(
|
||||
node_template=dict(
|
||||
name='%s.%s' % (nt_name, param.FLAVOR_ID),
|
||||
node_type='JT+NN',
|
||||
flavor_id=self.flavor_id,
|
||||
job_tracker={
|
||||
'heap_size': '%d' % jt_heap_size
|
||||
},
|
||||
name_node={
|
||||
'heap_size': '%d' % nn_heap_size
|
||||
}
|
||||
))
|
||||
if node_type == 'TT+DN':
|
||||
nt['node_template']['node_type'] = 'TT+DN'
|
||||
nt = self.change_field_nt(nt, 'job_tracker', 'task_tracker')
|
||||
nt = self.change_field_nt(nt, 'name_node', 'data_node')
|
||||
elif node_type == 'NN':
|
||||
nt['node_template']['node_type'] = 'NN'
|
||||
del nt['node_template']['job_tracker']
|
||||
elif node_type == 'JT':
|
||||
nt['node_template']['node_type'] = 'JT'
|
||||
del nt['node_template']['name_node']
|
||||
return nt
|
||||
|
||||
def make_cluster_body(self, cluster_name, name_master_node,
|
||||
name_worker_node, number_workers):
|
||||
body = dict(
|
||||
cluster=dict(
|
||||
name=cluster_name,
|
||||
base_image_id=self.image_id,
|
||||
node_templates={
|
||||
'%s.%s' % (name_master_node, param.FLAVOR_ID): 1,
|
||||
'%s.%s' %
|
||||
(name_worker_node, param.FLAVOR_ID): number_workers
|
||||
}
|
||||
))
|
||||
return body
|
||||
|
||||
def delete_node_template(self, data):
|
||||
data = data['node_template']
|
||||
object_id = data.pop(u'id')
|
||||
self._del_object(self.url_nt_with_slash, object_id, 204)
|
||||
|
||||
def _crud_object(self, body, get_body, url):
|
||||
data = self._post_object(url, body, 202)
|
||||
get_url = None
|
||||
object_id = None
|
||||
try:
|
||||
obj = 'node_template' if url == self.url_nt else 'cluster'
|
||||
get_url = self.url_nt_with_slash if url == self.url_nt \
|
||||
else self.url_cl_with_slash
|
||||
data = data['%s' % obj]
|
||||
object_id = data.pop(u'id')
|
||||
self.assertEquals(data, get_body)
|
||||
get_data = self._get_object(get_url, object_id, 200)
|
||||
get_data = get_data['%s' % obj]
|
||||
del get_data[u'id']
|
||||
if obj == 'cluster':
|
||||
self._await_cluster_active(
|
||||
get_body, get_data, get_url, object_id)
|
||||
except Exception as e:
|
||||
self.fail('failure:' + str(e))
|
||||
finally:
|
||||
self._del_object(get_url, object_id, 204)
|
||||
return object_id
|
||||
|
||||
def _await_cluster_active(self, get_body, get_data, get_url, object_id):
|
||||
get_body[u'status'] = u'Active'
|
||||
del get_body[u'service_urls']
|
||||
del get_body[u'nodes']
|
||||
i = 1
|
||||
while get_data[u'status'] != u'Active':
|
||||
if i > int(param.TIMEOUT) * 6:
|
||||
self.fail(
|
||||
'cluster not Starting -> Active, passed %d minutes'
|
||||
% param.TIMEOUT)
|
||||
get_data = self._get_object(get_url, object_id, 200)
|
||||
get_data = get_data['cluster']
|
||||
del get_data[u'id']
|
||||
del get_data[u'service_urls']
|
||||
del get_data[u'nodes']
|
||||
eventlet.sleep(10)
|
||||
i += 1
|
||||
self.assertEquals(get_data, get_body)
|
@ -1,33 +0,0 @@
|
||||
import savanna.openstack.common.importutils as importutils
|
||||
|
||||
_CONF = importutils.try_import('savanna.tests.integration.config')
|
||||
|
||||
|
||||
def _get_conf(key, default):
|
||||
return getattr(_CONF, key) if _CONF and hasattr(_CONF, key) else default
|
||||
|
||||
OS_USERNAME = _get_conf('OS_USERNAME', 'admin')
|
||||
OS_PASSWORD = _get_conf('OS_PASSWORD', 'password')
|
||||
OS_TENANT_NAME = _get_conf('OS_TENANT_NAME', 'admin')
|
||||
OS_AUTH_URL = _get_conf('OS_AUTH_URL', 'http://localhost:35357/v2.0/')
|
||||
|
||||
SAVANNA_HOST = _get_conf('SAVANNA_HOST', '192.168.1.1')
|
||||
SAVANNA_PORT = _get_conf('SAVANNA_PORT', '8080')
|
||||
|
||||
IMAGE_ID = _get_conf('IMAGE_ID', '42')
|
||||
FLAVOR_ID = _get_conf('FLAVOR_ID', 'abc')
|
||||
|
||||
NODE_USERNAME = _get_conf('NODE_USERNAME', 'username')
|
||||
NODE_PASSWORD = _get_conf('NODE_PASSWORD', 'password')
|
||||
|
||||
CLUSTER_NAME_CRUD = _get_conf('CLUSTER_NAME_CRUD', 'cluster-crud')
|
||||
CLUSTER_NAME_HADOOP = _get_conf('CLUSTER_NAME_HADOOP', 'cluster-hadoop')
|
||||
|
||||
IP_PREFIX = _get_conf('IP_PREFIX', '10.')
|
||||
|
||||
TIMEOUT = _get_conf('TIMEOUT', '15')
|
||||
|
||||
HADOOP_VERSION = _get_conf('HADOOP_VERSION', '1.1.1')
|
||||
HADOOP_DIRECTORY = _get_conf('HADOOP_DIRECTORY', '/usr/share/hadoop')
|
||||
HADOOP_LOG_DIRECTORY = _get_conf('HADOOP_LOG_DIRECTORY',
|
||||
'/mnt/log/hadoop/hadoop/userlogs')
|
@ -1,170 +0,0 @@
|
||||
#!/bin/bash
|
||||
#touch script.sh && chmod +x script.sh && vim script.sh
|
||||
|
||||
dir=/outputTestMapReduce
|
||||
log=$dir/log.txt
|
||||
|
||||
case $1 in
|
||||
mr)
|
||||
FUNC="map_reduce"
|
||||
;;
|
||||
pi)
|
||||
FUNC="run_pi_job"
|
||||
;;
|
||||
gn)
|
||||
FUNC="get_job_name"
|
||||
;;
|
||||
lt)
|
||||
FUNC="get_list_active_trackers"
|
||||
;;
|
||||
ld)
|
||||
FUNC="get_list_active_datanodes"
|
||||
;;
|
||||
ed)
|
||||
FUNC=" check_exist_directory"
|
||||
;;
|
||||
esac
|
||||
|
||||
shift
|
||||
|
||||
until [ -z $1 ]
|
||||
do
|
||||
if [ "$1" = "-nc" ]
|
||||
then
|
||||
NODE_COUNT="$2"
|
||||
shift
|
||||
fi
|
||||
|
||||
if [ "$1" = "-jn" ]
|
||||
then
|
||||
JOB_NAME="$2"
|
||||
shift
|
||||
fi
|
||||
|
||||
if [ "$1" = "-hv" ]
|
||||
then
|
||||
HADOOP_VERSION="$2"
|
||||
shift
|
||||
fi
|
||||
|
||||
if [ "$1" = "-hd" ]
|
||||
then
|
||||
HADOOP_DIRECTORY="$2"
|
||||
shift
|
||||
fi
|
||||
|
||||
if [ "$1" = "-hld" ]
|
||||
then
|
||||
HADOOP_LOG_DIRECTORY="$2"
|
||||
shift
|
||||
fi
|
||||
|
||||
|
||||
shift
|
||||
done
|
||||
|
||||
f_var_check() {
|
||||
case "$1" in
|
||||
v_node_count)
|
||||
if [ -z "$NODE_COUNT" ]
|
||||
then
|
||||
echo "count_of_node_not_specified"
|
||||
exit 0
|
||||
fi
|
||||
;;
|
||||
v_job_name)
|
||||
if [ -z "$JOB_NAME" ]
|
||||
then
|
||||
echo "job_name_not_specified"
|
||||
exit 0
|
||||
fi
|
||||
;;
|
||||
v_hadoop_version)
|
||||
if [ -z "$HADOOP_VERSION" ]
|
||||
then
|
||||
echo "hadoop_version_not_specified"
|
||||
exit 0
|
||||
fi
|
||||
;;
|
||||
v_hadoop_directory)
|
||||
if [ -z "$HADOOP_DIRECTORY" ]
|
||||
then
|
||||
echo "hadoop_directory_not_specified"
|
||||
exit 0
|
||||
fi
|
||||
;;
|
||||
v_hadoop_log_directory)
|
||||
if [ -z "$HADOOP_LOG_DIRECTORY" ]
|
||||
then
|
||||
echo "hadoop_log_directory_not_specified"
|
||||
exit 0
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
f_create_log_dir() {
|
||||
rm -r $dir 2>/dev/null
|
||||
mkdir $dir
|
||||
chmod -R 777 $dir
|
||||
touch $log
|
||||
}
|
||||
|
||||
map_reduce() {
|
||||
f_create_log_dir
|
||||
f_var_check v_hadoop_version
|
||||
f_var_check v_hadoop_directory
|
||||
echo "[------ dpkg------]">>$log
|
||||
echo `dpkg --get-selections | grep hadoop` >>$log
|
||||
echo "[------jps------]">>$log
|
||||
echo `jps | grep -v Jps` >>$log
|
||||
echo "[------netstat------]">>$log
|
||||
echo `sudo netstat -plten | grep java` &>>$log
|
||||
echo "[------test for hdfs------]">>$log
|
||||
echo `dmesg > $dir/input` 2>>$log
|
||||
su -c "hadoop dfs -ls /" hadoop &&
|
||||
su -c "hadoop dfs -mkdir /test" hadoop &&
|
||||
su -c "hadoop dfs -copyFromLocal $dir/input /test/mydata" hadoop 2>>$log
|
||||
echo "[------start job------]">>$log &&
|
||||
su -c "cd $HADOOP_DIRECTORY && hadoop jar hadoop-examples-$HADOOP_VERSION.jar wordcount /test/mydata /test/output" hadoop 2>>$log &&
|
||||
su -c "hadoop dfs -copyToLocal /test/output/ $dir/out/" hadoop 2>>$log &&
|
||||
su -c "hadoop dfs -rmr /test" hadoop 2>>$log
|
||||
}
|
||||
|
||||
run_pi_job() {
|
||||
f_var_check v_node_count
|
||||
f_var_check v_hadoop_version
|
||||
f_var_check v_hadoop_directory
|
||||
f_create_log_dir
|
||||
directory=/usr/share/hadoop
|
||||
logdir=/var/log/hadoop/hadoop/userlogs
|
||||
su -c "cd $HADOOP_DIRECTORY && hadoop jar hadoop-examples-$HADOOP_VERSION.jar pi $[$NODE_COUNT*10] 1000" hadoop 2>>$log
|
||||
}
|
||||
|
||||
get_job_name() {
|
||||
f_var_check v_hadoop_directory
|
||||
su -c "cd $HADOOP_DIRECTORY && hadoop job -list all | tail -n1" hadoop | awk '{print $1}' 2>>$log
|
||||
}
|
||||
|
||||
get_list_active_trackers() {
|
||||
f_create_log_dir
|
||||
f_var_check v_hadoop_directory
|
||||
sleep 30 &&
|
||||
su -c "cd $HADOOP_DIRECTORY && hadoop job -list-active-trackers" hadoop | wc -l 2>>$log
|
||||
}
|
||||
|
||||
get_list_active_datanodes() {
|
||||
f_create_log_dir
|
||||
f_var_check v_hadoop_directory
|
||||
su -c "hadoop dfsadmin -report" hadoop | grep "Datanodes available:.*" | awk '{print $3}' 2>>$log
|
||||
}
|
||||
|
||||
check_exist_directory() {
|
||||
f_var_check v_job_name
|
||||
f_var_check v_hadoop_log_directory
|
||||
if ! [ -d $HADOOP_LOG_DIRECTORY/$JOB_NAME ];
|
||||
then echo "directory_not_found" && exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
$FUNC
|
@ -1,44 +0,0 @@
|
||||
# Copyright (c) 2013 Mirantis Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from savanna.tests.integration.db import ITestCase
|
||||
import savanna.tests.integration.parameters as param
|
||||
from telnetlib import Telnet
|
||||
|
||||
|
||||
class ITestClusterApi(ITestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(ITestClusterApi, self).setUp()
|
||||
Telnet(self.host, self.port)
|
||||
|
||||
def test_cluster_crud_operations(self):
|
||||
nt_body = self.make_nt('master-node', 'JT+NN', 1234, 2345)
|
||||
data_nt_master = self._post_object(self.url_nt, nt_body, 202)
|
||||
|
||||
nt_body = self.make_nt('worker-node', 'TT+DN', 1234, 2345)
|
||||
data_nt_worker = self._post_object(self.url_nt, nt_body, 202)
|
||||
|
||||
try:
|
||||
cluster_body = self.make_cluster_body(
|
||||
param.CLUSTER_NAME_CRUD, 'master-node', 'worker-node', 2)
|
||||
get_cluster_body = self._get_body_cluster(
|
||||
param.CLUSTER_NAME_CRUD, 'master-node', 'worker-node', 2)
|
||||
|
||||
self._crud_object(cluster_body, get_cluster_body, self.url_cluster)
|
||||
|
||||
finally:
|
||||
self.delete_node_template(data_nt_master)
|
||||
self.delete_node_template(data_nt_worker)
|
@ -1,235 +0,0 @@
|
||||
# Copyright (c) 2013 Mirantis Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import json
|
||||
from novaclient import client as nc
|
||||
from os import getcwd
|
||||
import paramiko
|
||||
from re import search
|
||||
from savanna.service.cluster_ops import _setup_ssh_connection
|
||||
from savanna.tests.integration.db import ITestCase
|
||||
import savanna.tests.integration.parameters as param
|
||||
from telnetlib import Telnet
|
||||
|
||||
|
||||
def _open_transport_chanel(transport):
|
||||
transport.connect(
|
||||
username=param.NODE_USERNAME, password=param.NODE_PASSWORD)
|
||||
return paramiko.SFTPClient.from_transport(transport)
|
||||
|
||||
|
||||
def _execute_transfer_to_node(host, locfile, nodefile):
|
||||
try:
|
||||
transport = paramiko.Transport(host)
|
||||
sftp = _open_transport_chanel(transport)
|
||||
sftp.put(locfile, nodefile)
|
||||
|
||||
finally:
|
||||
sftp.close()
|
||||
transport.close()
|
||||
|
||||
|
||||
def _execute_transfer_from_node(host, nodefile, localfile):
|
||||
try:
|
||||
transport = paramiko.Transport(host)
|
||||
sftp = _open_transport_chanel(transport)
|
||||
sftp.get(nodefile, localfile)
|
||||
|
||||
finally:
|
||||
sftp.close()
|
||||
transport.close()
|
||||
|
||||
|
||||
def _open_channel_and_execute(ssh, cmd, print_output):
|
||||
chan = ssh.get_transport().open_session()
|
||||
chan.exec_command(cmd)
|
||||
stdout = chan.makefile('rb', -1)
|
||||
chan.set_combine_stderr(True)
|
||||
if print_output:
|
||||
return stdout.read()
|
||||
return chan.recv_exit_status()
|
||||
|
||||
|
||||
def _execute_command_on_node(host, cmd, print_output=False):
|
||||
ssh = paramiko.SSHClient()
|
||||
try:
|
||||
_setup_ssh_connection(host, ssh)
|
||||
return _open_channel_and_execute(ssh, cmd, print_output)
|
||||
finally:
|
||||
ssh.close()
|
||||
|
||||
|
||||
def _transfer_script_to_node(host, directory):
|
||||
_execute_transfer_to_node(
|
||||
str(host), '%s/integration/script.sh' % directory, 'script.sh')
|
||||
_execute_command_on_node(str(host), 'chmod 777 script.sh')
|
||||
|
||||
|
||||
class TestHadoop(ITestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(TestHadoop, self).setUp()
|
||||
Telnet(self.host, self.port)
|
||||
|
||||
def _hadoop_testing(self, cluster_name, nt_name_master,
|
||||
nt_name_worker, number_workers):
|
||||
object_id = None
|
||||
cluster_body = self.make_cluster_body(
|
||||
cluster_name, nt_name_master, nt_name_worker, number_workers)
|
||||
data = self._post_object(self.url_cluster, cluster_body, 202)
|
||||
|
||||
try:
|
||||
data = data['cluster']
|
||||
object_id = data.pop(u'id')
|
||||
get_body = self._get_body_cluster(
|
||||
cluster_name, nt_name_master, nt_name_worker, number_workers)
|
||||
get_data = self._get_object(self.url_cl_with_slash, object_id, 200)
|
||||
get_data = get_data['cluster']
|
||||
del get_data[u'id']
|
||||
self._await_cluster_active(
|
||||
get_body, get_data, self.url_cl_with_slash, object_id)
|
||||
|
||||
get_data = self._get_object(
|
||||
self.url_cl_with_slash, object_id, 200, True)
|
||||
get_data = get_data['cluster']
|
||||
namenode = get_data[u'service_urls'][u'namenode']
|
||||
jobtracker = get_data[u'service_urls'][u'jobtracker']
|
||||
nodes = get_data[u'nodes']
|
||||
worker_ips = []
|
||||
nova = nc.Client(version='2',
|
||||
username=param.OS_USERNAME,
|
||||
api_key=param.OS_PASSWORD,
|
||||
auth_url=param.OS_AUTH_URL,
|
||||
project_id=param.OS_TENANT_NAME)
|
||||
for node in nodes:
|
||||
if node[u'node_template'][u'name'] == '%s.%s'\
|
||||
% (nt_name_worker, param.FLAVOR_ID):
|
||||
v = nova.servers.get('%s' % node[u'vm_id'])
|
||||
for network, address in v.addresses.items():
|
||||
instance_ips = json.dumps(address)
|
||||
instance_ips = json.loads(instance_ips)
|
||||
for instance_ip in instance_ips:
|
||||
if instance_ip[u'addr'][:len(param.IP_PREFIX)]\
|
||||
== param.IP_PREFIX:
|
||||
worker_ips.append(instance_ip[u'addr'])
|
||||
|
||||
p = '(?:http.*://)?(?P<host>[^:/ ]+).?(?P<port>[0-9]*).*'
|
||||
m = search(p, namenode)
|
||||
t = search(p, jobtracker)
|
||||
|
||||
namenode_ip = m.group('host')
|
||||
namenode_port = m.group('port')
|
||||
jobtracker_ip = t.group('host')
|
||||
jobtracker_port = t.group('port')
|
||||
|
||||
try:
|
||||
Telnet(str(namenode_ip), str(namenode_port))
|
||||
Telnet(str(jobtracker_ip), str(jobtracker_port))
|
||||
except Exception as e:
|
||||
self.fail('telnet nn or jt is failure: ' + e.message)
|
||||
|
||||
this_dir = getcwd()
|
||||
|
||||
try:
|
||||
_transfer_script_to_node(namenode_ip, this_dir)
|
||||
for worker_ip in worker_ips:
|
||||
_transfer_script_to_node(worker_ip, this_dir)
|
||||
except Exception as e:
|
||||
self.fail('failure in transfer script: ' + e.message)
|
||||
|
||||
try:
|
||||
self.assertEqual(int(_execute_command_on_node(
|
||||
namenode_ip, './script.sh lt -hd %s'
|
||||
% param.HADOOP_DIRECTORY, True)),
|
||||
number_workers)
|
||||
#TODO(vrovachev) delete sleep from script after fix bug 1183387
|
||||
except Exception as e:
|
||||
self.fail('compare number active trackers is failure: '
|
||||
+ e.message)
|
||||
|
||||
try:
|
||||
self.assertEqual(int(_execute_command_on_node(
|
||||
namenode_ip, './script.sh ld -hd %s'
|
||||
% param.HADOOP_DIRECTORY, True)),
|
||||
number_workers)
|
||||
except Exception as e:
|
||||
self.fail('compare number active datanodes is failure: '
|
||||
+ e.message)
|
||||
|
||||
try:
|
||||
_execute_command_on_node(
|
||||
namenode_ip, './script.sh pi -nc %s -hv %s -hd %s'
|
||||
% (number_workers, param.HADOOP_VERSION,
|
||||
param.HADOOP_DIRECTORY))
|
||||
except Exception as e:
|
||||
_execute_transfer_from_node(
|
||||
namenode_ip,
|
||||
'/outputTestMapReduce/log.txt', '%s/errorLog' % this_dir)
|
||||
self.fail(
|
||||
'run pi script or get run in active trackers is failure: '
|
||||
+ e.message)
|
||||
|
||||
try:
|
||||
job_name = _execute_command_on_node(
|
||||
namenode_ip, './script.sh gn -hd %s'
|
||||
% param.HADOOP_DIRECTORY, True)
|
||||
except Exception as e:
|
||||
self.fail('fail in get job name: ' + e.message)
|
||||
|
||||
try:
|
||||
for worker_ip in worker_ips:
|
||||
self.assertEquals(
|
||||
_execute_command_on_node(
|
||||
worker_ip,
|
||||
'./script.sh ed -jn %s -hld %s'
|
||||
% (job_name[:-1], param.HADOOP_LOG_DIRECTORY)), 0)
|
||||
except Exception as e:
|
||||
self.fail('fail in check run job in worker nodes: '
|
||||
+ e.message)
|
||||
|
||||
try:
|
||||
self.assertEquals(
|
||||
_execute_command_on_node(
|
||||
namenode_ip, './script.sh mr -hv %s -hd %s'
|
||||
% (param.HADOOP_VERSION,
|
||||
param.HADOOP_DIRECTORY)), 0)
|
||||
except Exception as e:
|
||||
_execute_transfer_from_node(
|
||||
namenode_ip,
|
||||
'/outputTestMapReduce/log.txt', '%s/errorLog' % this_dir)
|
||||
self.fail('run hdfs script is failure: ' + e.message)
|
||||
except Exception as e:
|
||||
self.fail(e.message)
|
||||
|
||||
finally:
|
||||
self._del_object(self.url_cl_with_slash, object_id, 204)
|
||||
|
||||
def test_hadoop_single_master(self):
|
||||
data_nt_master = self._post_object(
|
||||
self.url_nt, self.make_nt('master_node', 'JT+NN',
|
||||
1234, 1234), 202)
|
||||
data_nt_worker = self._post_object(
|
||||
self.url_nt, self.make_nt('worker_node', 'TT+DN',
|
||||
1234, 1234), 202)
|
||||
|
||||
try:
|
||||
self._hadoop_testing(
|
||||
param.CLUSTER_NAME_HADOOP, 'master_node', 'worker_node', 2)
|
||||
except Exception as e:
|
||||
self.fail(e.message)
|
||||
|
||||
finally:
|
||||
self.delete_node_template(data_nt_master)
|
||||
self.delete_node_template(data_nt_worker)
|
@ -1,48 +0,0 @@
|
||||
# Copyright (c) 2013 Mirantis Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from savanna.tests.integration.db import ITestCase
|
||||
from telnetlib import Telnet
|
||||
|
||||
|
||||
class ITestNodeTemplateApi(ITestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(ITestNodeTemplateApi, self).setUp()
|
||||
Telnet(self.host, self.port)
|
||||
|
||||
def test_crud_nt_jtnn(self):
|
||||
nt_jtnn = self.make_nt('jtnn', 'JT+NN', 1024, 1024)
|
||||
get_jtnn = self._get_body_nt('jtnn', 'JT+NN', 1024, 1024)
|
||||
|
||||
self._crud_object(nt_jtnn, get_jtnn, self.url_nt)
|
||||
|
||||
def test_crud_nt_ttdn(self):
|
||||
nt_ttdn = self.make_nt('ttdn', 'TT+DN', 1024, 1024)
|
||||
get_ttdn = self._get_body_nt('ttdn', 'TT+DN', 1024, 1024)
|
||||
|
||||
self._crud_object(nt_ttdn, get_ttdn, self.url_nt)
|
||||
|
||||
def test_crud_nt_nn(self):
|
||||
nt_nn = self.make_nt('nn', 'NN', 1024, 1024)
|
||||
get_nn = self._get_body_nt('nn', 'NN', 1024, 1024)
|
||||
|
||||
self._crud_object(nt_nn, get_nn, self.url_nt)
|
||||
|
||||
def test_crud_nt_jt(self):
|
||||
nt_jt = self.make_nt('jt', 'JT', 1024, 1024)
|
||||
get_jt = self._get_body_nt('jt', 'JT', 1024, 1024)
|
||||
|
||||
self._crud_object(nt_jt, get_jt, self.url_nt)
|
@ -1,190 +0,0 @@
|
||||
# Copyright (c) 2013 Mirantis Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import os
|
||||
import tempfile
|
||||
import unittest
|
||||
import uuid
|
||||
|
||||
import eventlet
|
||||
from oslo.config import cfg
|
||||
|
||||
import savanna.main
|
||||
from savanna.openstack.common import log as logging
|
||||
from savanna.service import api
|
||||
from savanna.storage.db import DB
|
||||
from savanna.storage.defaults import setup_defaults
|
||||
from savanna.storage.models import Node, NodeTemplate
|
||||
from savanna.utils.openstack import nova
|
||||
from savanna.utils import scheduler
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _stub_vm_creation_job(template_id):
|
||||
template = NodeTemplate.query.filter_by(id=template_id).first()
|
||||
eventlet.sleep(2)
|
||||
return 'ip-address', uuid.uuid4().hex, template.id
|
||||
|
||||
|
||||
def _stub_launch_cluster(headers, cluster):
|
||||
LOG.debug('stub launch_cluster called with %s, %s', headers, cluster)
|
||||
pile = eventlet.GreenPile(scheduler.POOL)
|
||||
|
||||
for elem in cluster.node_counts:
|
||||
node_count = elem.count
|
||||
for _ in xrange(0, node_count):
|
||||
pile.spawn(_stub_vm_creation_job, elem.node_template_id)
|
||||
|
||||
for (ip, vm_id, elem) in pile:
|
||||
DB.session.add(Node(vm_id, cluster.id, elem))
|
||||
LOG.debug("VM '%s/%s/%s' created", ip, vm_id, elem)
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def _stub_stop_cluster(headers, cluster):
|
||||
LOG.debug("stub stop_cluster called with %s, %s", headers, cluster)
|
||||
|
||||
|
||||
def _stub_auth_token(*args, **kwargs):
|
||||
LOG.debug('stub token filter called with %s, %s', args, kwargs)
|
||||
|
||||
def _filter(app):
|
||||
def _handler(env, start_response):
|
||||
env['HTTP_X_TENANT_ID'] = 'tenant-id-1'
|
||||
return app(env, start_response)
|
||||
|
||||
return _handler
|
||||
|
||||
return _filter
|
||||
|
||||
|
||||
def _stub_auth_valid(*args, **kwargs):
|
||||
LOG.debug('stub token validation called with %s, %s', args, kwargs)
|
||||
|
||||
def _filter(app):
|
||||
def _handler(env, start_response):
|
||||
return app(env, start_response)
|
||||
|
||||
return _handler
|
||||
|
||||
return _filter
|
||||
|
||||
|
||||
def _stub_get_flavors(headers):
|
||||
LOG.debug('Stub get_flavors called with %s', headers)
|
||||
return [u'test_flavor', u'test_flavor_2']
|
||||
|
||||
|
||||
def _stub_get_images(headers):
|
||||
LOG.debug('Stub get_images called with %s', headers)
|
||||
return [u'base-image-id', u'base-image-id_2']
|
||||
|
||||
|
||||
def _stub_get_limits(headers):
|
||||
limits = dict(maxTotalCores=100,
|
||||
maxTotalRAMSize=51200,
|
||||
maxTotalInstances=100,
|
||||
totalCoresUsed=0,
|
||||
totalRAMUsed=0,
|
||||
totalInstancesUsed=0)
|
||||
|
||||
LOG.debug('Stub get_limits called with headers %s and limits %s',
|
||||
headers, limits)
|
||||
|
||||
return limits
|
||||
|
||||
|
||||
class StubFlavor:
|
||||
def __init__(self, name, vcpus, ram):
|
||||
self.name = name
|
||||
self.vcpus = int(vcpus)
|
||||
self.ram = int(ram)
|
||||
|
||||
|
||||
def _stub_get_flavor(headers, **kwargs):
|
||||
return StubFlavor('test_flavor', 1, 512)
|
||||
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.import_opt('debug', 'savanna.openstack.common.log')
|
||||
CONF.import_opt('allow_cluster_ops', 'savanna.config')
|
||||
CONF.import_opt('database_uri', 'savanna.storage.db', group='sqlalchemy')
|
||||
CONF.import_opt('echo', 'savanna.storage.db', group='sqlalchemy')
|
||||
|
||||
|
||||
class SavannaTestCase(unittest.TestCase):
|
||||
def setUp(self):
|
||||
self.db_fd, self.db_path = tempfile.mkstemp()
|
||||
self.maxDiff = 10000
|
||||
|
||||
# override configs
|
||||
CONF.set_override('debug', True)
|
||||
CONF.set_override('allow_cluster_ops', True) # stub process
|
||||
CONF.set_override('database_uri', 'sqlite:///' + self.db_path,
|
||||
group='sqlalchemy')
|
||||
CONF.set_override('echo', False, group='sqlalchemy')
|
||||
|
||||
# store functions that will be stubbed
|
||||
self._prev_auth_token = savanna.main.auth_token
|
||||
self._prev_auth_valid = savanna.main.auth_valid
|
||||
self._prev_cluster_launch = api.cluster_ops.launch_cluster
|
||||
self._prev_cluster_stop = api.cluster_ops.stop_cluster
|
||||
self._prev_get_flavors = nova.get_flavors
|
||||
self._prev_get_images = nova.get_images
|
||||
self._prev_get_limits = nova.get_limits
|
||||
self._prev_get_flavor = nova.get_flavor
|
||||
|
||||
# stub functions
|
||||
savanna.main.auth_token = _stub_auth_token
|
||||
savanna.main.auth_valid = _stub_auth_valid
|
||||
api.cluster_ops.launch_cluster = _stub_launch_cluster
|
||||
api.cluster_ops.stop_cluster = _stub_stop_cluster
|
||||
nova.get_flavors = _stub_get_flavors
|
||||
nova.get_images = _stub_get_images
|
||||
nova.get_limits = _stub_get_limits
|
||||
nova.get_flavor = _stub_get_flavor
|
||||
|
||||
app = savanna.main.make_app()
|
||||
|
||||
DB.drop_all()
|
||||
DB.create_all()
|
||||
setup_defaults(True, True)
|
||||
|
||||
LOG.debug('Test db path: %s', self.db_path)
|
||||
LOG.debug('Test app.config: %s', app.config)
|
||||
|
||||
self.app = app.test_client()
|
||||
|
||||
def tearDown(self):
|
||||
# unstub functions
|
||||
savanna.main.auth_token = self._prev_auth_token
|
||||
savanna.main.auth_valid = self._prev_auth_valid
|
||||
api.cluster_ops.launch_cluster = self._prev_cluster_launch
|
||||
api.cluster_ops.stop_cluster = self._prev_cluster_stop
|
||||
nova.get_flavors = self._prev_get_flavors
|
||||
nova.get_images = self._prev_get_images
|
||||
nova.get_limits = self._prev_get_limits
|
||||
nova.get_flavor = self._prev_get_flavor
|
||||
|
||||
os.close(self.db_fd)
|
||||
os.unlink(self.db_path)
|
||||
|
||||
# place back default configs
|
||||
CONF.clear_override('debug')
|
||||
CONF.clear_override('allow_cluster_ops')
|
||||
CONF.clear_override('database_uri', group='sqlalchemy')
|
||||
CONF.clear_override('echo', group='sqlalchemy')
|
0
savanna/tests/unit/db/__init__.py
Normal file
0
savanna/tests/unit/db/__init__.py
Normal file
0
savanna/tests/unit/db/models/__init__.py
Normal file
0
savanna/tests/unit/db/models/__init__.py
Normal file
63
savanna/tests/unit/db/models/base.py
Normal file
63
savanna/tests/unit/db/models/base.py
Normal file
@ -0,0 +1,63 @@
|
||||
# Copyright (c) 2013 Mirantis Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import datetime
|
||||
import os
|
||||
import tempfile
|
||||
import unittest2
|
||||
|
||||
from savanna.context import Context
|
||||
from savanna.context import set_ctx
|
||||
from savanna.db.api import clear_db
|
||||
from savanna.db.api import configure_db
|
||||
from savanna.openstack.common.db.sqlalchemy import session
|
||||
from savanna.openstack.common import timeutils
|
||||
from savanna.openstack.common import uuidutils
|
||||
|
||||
|
||||
class ModelTestCase(unittest2.TestCase):
|
||||
def setUp(self):
|
||||
set_ctx(Context('test_user', 'test_tenant', 'test_auth_token', {}))
|
||||
self.db_fd, self.db_path = tempfile.mkstemp()
|
||||
session.set_defaults('sqlite:///' + self.db_path, self.db_path)
|
||||
configure_db()
|
||||
|
||||
def tearDown(self):
|
||||
clear_db()
|
||||
os.close(self.db_fd)
|
||||
os.unlink(self.db_path)
|
||||
set_ctx(None)
|
||||
|
||||
def assertIsValidModelObject(self, res):
|
||||
self.assertIsNotNone(res)
|
||||
self.assertIsNotNone(res.dict)
|
||||
self.assertTrue(uuidutils.is_uuid_like(res.id))
|
||||
|
||||
# check created/updated
|
||||
delta = datetime.timedelta(seconds=2)
|
||||
now = timeutils.utcnow()
|
||||
|
||||
self.assertAlmostEqual(res.created, now, delta=delta)
|
||||
self.assertAlmostEqual(res.updated, now, delta=delta)
|
||||
|
||||
def get_clean_dict(self, res):
|
||||
res_dict = res.dict
|
||||
del res_dict['created']
|
||||
del res_dict['updated']
|
||||
del res_dict['id']
|
||||
if 'tenant_id' in res_dict:
|
||||
del res_dict['tenant_id']
|
||||
|
||||
return res_dict
|
45
savanna/tests/unit/db/models/test_clusters.py
Normal file
45
savanna/tests/unit/db/models/test_clusters.py
Normal file
@ -0,0 +1,45 @@
|
||||
# Copyright (c) 2013 Mirantis Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from savanna.context import ctx
|
||||
import savanna.db.models as m
|
||||
from savanna.tests.unit.db.models.base import ModelTestCase
|
||||
|
||||
|
||||
class ClusterModelTest(ModelTestCase):
|
||||
def testCreateCluster(self):
|
||||
session = ctx().session
|
||||
with session.begin():
|
||||
c = m.Cluster('c-1', 't-1', 'p-1', 'hv-1')
|
||||
session.add(c)
|
||||
|
||||
with session.begin():
|
||||
res = session.query(m.Cluster).filter_by().first()
|
||||
|
||||
self.assertIsValidModelObject(res)
|
||||
|
||||
def testCreateClusterFromDict(self):
|
||||
c = m.Cluster('c-1', 't-1', 'p-1', 'hv-1')
|
||||
c_dict = c.dict
|
||||
del c_dict['created']
|
||||
del c_dict['updated']
|
||||
del c_dict['id']
|
||||
del c_dict['node_groups']
|
||||
|
||||
c_dict.update({
|
||||
'tenant_id': 't-1'
|
||||
})
|
||||
self.assertEqual(self.get_clean_dict(c),
|
||||
self.get_clean_dict(m.Cluster(**c_dict)))
|
98
savanna/tests/unit/db/models/test_templates.py
Normal file
98
savanna/tests/unit/db/models/test_templates.py
Normal file
@ -0,0 +1,98 @@
|
||||
# Copyright (c) 2013 Mirantis Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from savanna.context import ctx
|
||||
|
||||
import savanna.db.models as m
|
||||
from savanna.tests.unit.db.models.base import ModelTestCase
|
||||
|
||||
|
||||
SAMPLE_CONFIGS = {
|
||||
'a': 'av',
|
||||
'b': 123,
|
||||
'c': [1, '2', u"3"]
|
||||
}
|
||||
|
||||
|
||||
class TemplatesModelTest(ModelTestCase):
|
||||
def testCreateNodeGroupTemplate(self):
|
||||
session = ctx().session
|
||||
with session.begin():
|
||||
ngt = m.NodeGroupTemplate('ngt-1', 't-1', 'f-1', 'p-1', 'hv-1',
|
||||
['np-1', 'np-2'], SAMPLE_CONFIGS, "d")
|
||||
session.add(ngt)
|
||||
|
||||
res = session.query(m.NodeGroupTemplate).filter_by().first()
|
||||
|
||||
self.assertIsValidModelObject(res)
|
||||
self.assertEquals(['np-1', 'np-2'], res.node_processes)
|
||||
self.assertEquals(SAMPLE_CONFIGS, res.node_configs)
|
||||
|
||||
res_dict = self.get_clean_dict(res)
|
||||
|
||||
self.assertEqual(res_dict, {
|
||||
'description': 'd',
|
||||
'flavor_id': 'f-1',
|
||||
'hadoop_version': 'hv-1',
|
||||
'name': 'ngt-1',
|
||||
'node_configs': SAMPLE_CONFIGS,
|
||||
'node_processes': ['np-1', 'np-2'],
|
||||
'plugin_name': 'p-1'
|
||||
})
|
||||
|
||||
def testCreateClusterTemplate(self):
|
||||
session = ctx().session
|
||||
with session.begin():
|
||||
c = m.ClusterTemplate('c-1', 't-1', 'p-1', 'hv-1', SAMPLE_CONFIGS,
|
||||
"d")
|
||||
session.add(c)
|
||||
|
||||
res = session.query(m.ClusterTemplate).filter_by().first()
|
||||
self.assertIsValidModelObject(res)
|
||||
self.assertEqual(SAMPLE_CONFIGS, res.cluster_configs)
|
||||
|
||||
res_dict = self.get_clean_dict(res)
|
||||
|
||||
self.assertEqual(res_dict, {
|
||||
'cluster_configs': SAMPLE_CONFIGS,
|
||||
'description': 'd',
|
||||
'hadoop_version': 'hv-1',
|
||||
'name': 'c-1',
|
||||
'plugin_name': 'p-1',
|
||||
'node_group_templates': []
|
||||
})
|
||||
|
||||
def testCreateClusterTemplateWithNodeGroupTemplates(self):
|
||||
session = ctx().session
|
||||
with session.begin():
|
||||
ct = m.ClusterTemplate('ct', 't-1', 'p-1', 'hv-1')
|
||||
session.add(ct)
|
||||
|
||||
ngts = []
|
||||
for i in xrange(0, 3):
|
||||
ngt = m.NodeGroupTemplate('ngt-%s' % i, 't-1', 'f-1', 'p-1',
|
||||
'hv-1', ['np-1', 'np-2'])
|
||||
session.add(ngt)
|
||||
session.flush()
|
||||
rel = ct.add_node_group_template(ngt.id, 'group-%s' % i, 5 + i)
|
||||
session.add(rel)
|
||||
ngts.append(ngt)
|
||||
|
||||
with session.begin():
|
||||
res = session.query(m.ClusterTemplate).filter_by().first()
|
||||
self.assertIsValidModelObject(res)
|
||||
|
||||
self.assertEqual(len(res.node_group_templates), 3)
|
||||
self.assertEqual(set(t.name for t in res.node_group_templates),
|
||||
set('ngt-%s' % i for i in xrange(0, 3)))
|
@ -1,332 +0,0 @@
|
||||
# Copyright (c) 2013 Mirantis Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import eventlet
|
||||
import json
|
||||
import unittest
|
||||
|
||||
from savanna.openstack.common import log as logging
|
||||
from savanna.tests.unit.base import SavannaTestCase
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class TestApiV02(SavannaTestCase):
|
||||
|
||||
def test_list_node_templates(self):
|
||||
rv = self.app.get('/v0.2/some-tenant-id/node-templates.json')
|
||||
self.assertEquals(rv.status_code, 200)
|
||||
data = json.loads(rv.data)
|
||||
|
||||
# clean all ids
|
||||
for idx in xrange(0, len(data.get(u'node_templates'))):
|
||||
del data.get(u'node_templates')[idx][u'id']
|
||||
|
||||
self.assertEquals(data, _get_templates_stub_data())
|
||||
|
||||
def test_create_node_template(self):
|
||||
rv = self.app.post('/v0.2/some-tenant-id/node-templates.json',
|
||||
data=json.dumps(dict(
|
||||
node_template=dict(
|
||||
name='test-template',
|
||||
node_type='JT+NN',
|
||||
flavor_id='test_flavor',
|
||||
job_tracker={
|
||||
'heap_size': '1234'
|
||||
},
|
||||
name_node={
|
||||
'heap_size': '2345'
|
||||
}
|
||||
))))
|
||||
self.assertEquals(rv.status_code, 202)
|
||||
data = json.loads(rv.data)
|
||||
|
||||
data = data['node_template']
|
||||
|
||||
# clean all ids
|
||||
del data[u'id']
|
||||
|
||||
self.assertEquals(data, {
|
||||
u'job_tracker': {
|
||||
u'heap_size': u'1234'
|
||||
}, u'name': u'test-template',
|
||||
u'node_type': {
|
||||
u'processes': [
|
||||
u'job_tracker', u'name_node'
|
||||
],
|
||||
u'name': u'JT+NN'
|
||||
},
|
||||
u'flavor_id': u'test_flavor',
|
||||
u'name_node': {
|
||||
u'heap_size': u'2345'
|
||||
}
|
||||
})
|
||||
|
||||
def test_list_clusters(self):
|
||||
rv = self.app.get('/v0.2/some-tenant-id/clusters.json')
|
||||
self.assertEquals(rv.status_code, 200)
|
||||
data = json.loads(rv.data)
|
||||
|
||||
self.assertEquals(data, {
|
||||
u'clusters': []
|
||||
})
|
||||
|
||||
def test_create_clusters(self):
|
||||
rv = self.app.post('/v0.2/some-tenant-id/clusters.json',
|
||||
data=json.dumps(dict(
|
||||
cluster=dict(
|
||||
name='test-cluster',
|
||||
base_image_id='base-image-id',
|
||||
node_templates={
|
||||
'jt_nn.medium': 1,
|
||||
'tt_dn.small': 5
|
||||
}
|
||||
))))
|
||||
self.assertEquals(rv.status_code, 202)
|
||||
data = json.loads(rv.data)
|
||||
|
||||
data = data['cluster']
|
||||
|
||||
cluster_id = data.pop(u'id')
|
||||
|
||||
self.assertEquals(data, {
|
||||
u'status': u'Starting',
|
||||
u'service_urls': {},
|
||||
u'name': u'test-cluster',
|
||||
u'base_image_id': u'base-image-id',
|
||||
u'node_templates': {
|
||||
u'jt_nn.medium': 1,
|
||||
u'tt_dn.small': 5
|
||||
},
|
||||
u'nodes': []
|
||||
})
|
||||
|
||||
eventlet.sleep(4)
|
||||
|
||||
rv = self.app.get('/v0.2/some-tenant-id/clusters/%s.json' % cluster_id)
|
||||
self.assertEquals(rv.status_code, 200)
|
||||
data = json.loads(rv.data)
|
||||
|
||||
data = data['cluster']
|
||||
|
||||
self.assertEquals(data.pop(u'id'), cluster_id)
|
||||
|
||||
# clean all ids
|
||||
for idx in xrange(0, len(data.get(u'nodes'))):
|
||||
del data.get(u'nodes')[idx][u'vm_id']
|
||||
del data.get(u'nodes')[idx][u'node_template'][u'id']
|
||||
|
||||
nodes = data.pop(u'nodes')
|
||||
|
||||
self.assertEquals(data, {
|
||||
u'status': u'Active',
|
||||
u'service_urls': {},
|
||||
u'name': u'test-cluster',
|
||||
u'base_image_id': u'base-image-id',
|
||||
u'node_templates': {
|
||||
u'jt_nn.medium': 1,
|
||||
u'tt_dn.small': 5
|
||||
}
|
||||
})
|
||||
|
||||
self.assertEquals(_sorted_nodes(nodes), _sorted_nodes([
|
||||
{u'node_template': {u'name': u'tt_dn.small'}},
|
||||
{u'node_template': {u'name': u'tt_dn.small'}},
|
||||
{u'node_template': {u'name': u'tt_dn.small'}},
|
||||
{u'node_template': {u'name': u'tt_dn.small'}},
|
||||
{u'node_template': {u'name': u'tt_dn.small'}},
|
||||
{u'node_template': {u'name': u'jt_nn.medium'}}
|
||||
]))
|
||||
|
||||
def test_delete_node_template(self):
|
||||
rv = self.app.post('/v0.2/some-tenant-id/node-templates.json',
|
||||
data=json.dumps(dict(
|
||||
node_template=dict(
|
||||
name='test-template-2',
|
||||
node_type='JT+NN',
|
||||
flavor_id='test_flavor_2',
|
||||
job_tracker={
|
||||
'heap_size': '1234'
|
||||
},
|
||||
name_node={
|
||||
'heap_size': '2345'
|
||||
}
|
||||
))))
|
||||
self.assertEquals(rv.status_code, 202)
|
||||
data = json.loads(rv.data)
|
||||
|
||||
data = data['node_template']
|
||||
|
||||
node_template_id = data.pop(u'id')
|
||||
|
||||
rv = self.app.get(
|
||||
'/v0.2/some-tenant-id/node-templates/%s.json' % node_template_id)
|
||||
self.assertEquals(rv.status_code, 200)
|
||||
data = json.loads(rv.data)
|
||||
|
||||
data = data['node_template']
|
||||
|
||||
# clean all ids
|
||||
del data[u'id']
|
||||
|
||||
self.assertEquals(data, {
|
||||
u'job_tracker': {
|
||||
u'heap_size': u'1234'
|
||||
}, u'name': u'test-template-2',
|
||||
u'node_type': {
|
||||
u'processes': [
|
||||
u'job_tracker', u'name_node'
|
||||
],
|
||||
u'name': u'JT+NN'
|
||||
},
|
||||
u'flavor_id': u'test_flavor_2',
|
||||
u'name_node': {
|
||||
u'heap_size': u'2345'
|
||||
}
|
||||
})
|
||||
|
||||
rv = self.app.delete(
|
||||
'/v0.2/some-tenant-id/node-templates/%s.json' % node_template_id)
|
||||
self.assertEquals(rv.status_code, 204)
|
||||
|
||||
rv = self.app.get(
|
||||
'/v0.2/some-tenant-id/node-templates/%s.json' % node_template_id)
|
||||
self.assertEquals(rv.status_code, 404)
|
||||
|
||||
def test_delete_cluster(self):
|
||||
rv = self.app.post('/v0.2/some-tenant-id/clusters.json',
|
||||
data=json.dumps(dict(
|
||||
cluster=dict(
|
||||
name='test-cluster-2',
|
||||
base_image_id='base-image-id_2',
|
||||
node_templates={
|
||||
'jt_nn.medium': 1,
|
||||
'tt_dn.small': 5
|
||||
}
|
||||
))))
|
||||
self.assertEquals(rv.status_code, 202)
|
||||
data = json.loads(rv.data)
|
||||
|
||||
data = data['cluster']
|
||||
|
||||
cluster_id = data.pop(u'id')
|
||||
|
||||
rv = self.app.get('/v0.2/some-tenant-id/clusters/%s.json' % cluster_id)
|
||||
self.assertEquals(rv.status_code, 200)
|
||||
data = json.loads(rv.data)
|
||||
|
||||
data = data['cluster']
|
||||
|
||||
# delete all ids
|
||||
del data[u'id']
|
||||
|
||||
self.assertEquals(data, {
|
||||
u'status': u'Starting',
|
||||
u'service_urls': {},
|
||||
u'name': u'test-cluster-2',
|
||||
u'base_image_id': u'base-image-id_2',
|
||||
u'node_templates': {
|
||||
u'jt_nn.medium': 1,
|
||||
u'tt_dn.small': 5
|
||||
},
|
||||
u'nodes': []
|
||||
})
|
||||
|
||||
rv = self.app.delete(
|
||||
'/v0.2/some-tenant-id/clusters/%s.json' % cluster_id)
|
||||
self.assertEquals(rv.status_code, 204)
|
||||
|
||||
eventlet.sleep(1)
|
||||
|
||||
rv = self.app.get('/v0.2/some-tenant-id/clusters/%s.json' % cluster_id)
|
||||
self.assertEquals(rv.status_code, 404)
|
||||
|
||||
|
||||
def _sorted_nodes(nodes):
|
||||
return sorted(nodes, key=lambda elem: elem[u'node_template'][u'name'])
|
||||
|
||||
|
||||
def _get_templates_stub_data():
|
||||
return {
|
||||
u'node_templates': [
|
||||
{
|
||||
u'job_tracker': {
|
||||
u'heap_size': u'896'
|
||||
},
|
||||
u'name': u'jt_nn.small',
|
||||
u'node_type': {
|
||||
u'processes': [
|
||||
u'job_tracker', u'name_node'
|
||||
],
|
||||
u'name': u'JT+NN'
|
||||
},
|
||||
u'flavor_id': u'm1.small',
|
||||
u'name_node': {
|
||||
u'heap_size': u'896'
|
||||
}
|
||||
},
|
||||
{
|
||||
u'job_tracker': {
|
||||
u'heap_size': u'1792'
|
||||
},
|
||||
u'name': u'jt_nn.medium',
|
||||
u'node_type': {
|
||||
u'processes': [
|
||||
u'job_tracker', u'name_node'
|
||||
], u'name': u'JT+NN'
|
||||
},
|
||||
u'flavor_id': u'm1.medium',
|
||||
u'name_node': {
|
||||
u'heap_size': u'1792'
|
||||
}
|
||||
},
|
||||
{
|
||||
u'name': u'tt_dn.small',
|
||||
u'task_tracker': {
|
||||
u'heap_size': u'896'
|
||||
},
|
||||
u'data_node': {
|
||||
u'heap_size': u'896'
|
||||
},
|
||||
u'node_type': {
|
||||
u'processes': [
|
||||
u'task_tracker', u'data_node'
|
||||
],
|
||||
u'name': u'TT+DN'
|
||||
},
|
||||
u'flavor_id': u'm1.small'
|
||||
},
|
||||
{
|
||||
u'name': u'tt_dn.medium',
|
||||
u'task_tracker': {
|
||||
u'heap_size': u'1792',
|
||||
},
|
||||
u'data_node': {
|
||||
u'heap_size': u'1792'
|
||||
},
|
||||
u'node_type': {
|
||||
u'processes': [
|
||||
u'task_tracker', u'data_node'
|
||||
],
|
||||
u'name': u'TT+DN'
|
||||
},
|
||||
u'flavor_id': u'm1.medium'
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
@ -1,54 +0,0 @@
|
||||
# Copyright (c) 2013 Mirantis Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from savanna.service.cluster_ops import _create_xml
|
||||
import unittest
|
||||
|
||||
|
||||
class ConfigGeneratorTest(unittest.TestCase):
|
||||
def test_xml_generator(self):
|
||||
config = {
|
||||
'key-1': 'value-1',
|
||||
'key-2': 'value-2',
|
||||
'key-3': 'value-3',
|
||||
'key-4': 'value-4',
|
||||
'key-5': 'value-5',
|
||||
}
|
||||
xml = _create_xml(config, config.keys())
|
||||
self.assertEqual(xml, """<?xml version="1.0" ?>
|
||||
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
|
||||
<configuration>
|
||||
<property>
|
||||
<name>key-3</name>
|
||||
<value>value-3</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>key-2</name>
|
||||
<value>value-2</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>key-1</name>
|
||||
<value>value-1</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>key-5</name>
|
||||
<value>value-5</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>key-4</name>
|
||||
<value>value-4</value>
|
||||
</property>
|
||||
</configuration>
|
||||
""")
|
@ -1,244 +0,0 @@
|
||||
# Copyright (c) 2013 Mirantis Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from mock import patch
|
||||
import unittest
|
||||
|
||||
import savanna.service.api as api
|
||||
|
||||
|
||||
class TestServiceLayer(unittest.TestCase):
|
||||
## Node Template ops:
|
||||
|
||||
@patch('savanna.storage.storage.get_node_template')
|
||||
def test_get_node_template(self, m):
|
||||
m.return_value = api.Resource("node_template", {
|
||||
"id": "template-id",
|
||||
"name": "jt_nn.small",
|
||||
"node_type": api.Resource("node_type", {
|
||||
"name": "JT+NN",
|
||||
"processes": [
|
||||
api.Resource("process", {"name": "job_tracker"}),
|
||||
api.Resource("process", {"name": "name_node"})
|
||||
]
|
||||
}),
|
||||
"flavor_id": "flavor-id",
|
||||
"node_template_configs": [
|
||||
api.Resource("conf", {
|
||||
"node_process_property": api.Resource("prop", {
|
||||
"name": "heap_size",
|
||||
"node_process": api.Resource("process", {
|
||||
"name": "job_tracker"
|
||||
})
|
||||
}),
|
||||
"value": "1234"
|
||||
}),
|
||||
api.Resource("conf", {
|
||||
"node_process_property": api.Resource("prop", {
|
||||
"name": "heap_size",
|
||||
"node_process": api.Resource("process", {
|
||||
"name": "name_node"
|
||||
})
|
||||
}),
|
||||
"value": "5678"
|
||||
})
|
||||
]
|
||||
})
|
||||
|
||||
nt = api.get_node_template(id='template-id')
|
||||
self.assertEqual(nt, api.Resource("node_template", {
|
||||
'id': 'template-id',
|
||||
'name': 'jt_nn.small',
|
||||
'node_type': {
|
||||
'processes': ['job_tracker', 'name_node'],
|
||||
'name': 'JT+NN'
|
||||
},
|
||||
'flavor_id': 'flavor-id',
|
||||
'job_tracker': {'heap_size': '1234'},
|
||||
'name_node': {'heap_size': '5678'}
|
||||
}))
|
||||
m.assert_called_once_with(id='template-id')
|
||||
|
||||
@patch('savanna.storage.storage.get_node_templates')
|
||||
def test_get_node_templates(self, m):
|
||||
# '_node_template' tested in 'test_get_node_template'
|
||||
api.get_node_templates(node_type='JT+NN')
|
||||
m.assert_called_once_with(node_type='JT+NN')
|
||||
|
||||
@patch('savanna.service.api.get_node_template')
|
||||
@patch('savanna.storage.storage.create_node_template')
|
||||
@patch('savanna.storage.storage.get_node_type')
|
||||
def test_create_node_template(self, get_n_type, create_tmpl, get_tmpl):
|
||||
get_n_type.return_value = api.Resource(
|
||||
"node_type", {"id": "node-type-1"})
|
||||
create_tmpl.return_value = api.Resource(
|
||||
"node-template", {"id": "tmpl-1"})
|
||||
|
||||
api.create_node_template(
|
||||
{
|
||||
"node_template": {
|
||||
"name": "nt-1",
|
||||
"node_type": "JT+NN",
|
||||
"flavor_id": "flavor-1"
|
||||
}
|
||||
}, {"X-Tenant-Id": "tenant-01"})
|
||||
|
||||
get_n_type.assert_called_once_with(name="JT+NN")
|
||||
create_tmpl.assert_called_once_with("nt-1", "node-type-1",
|
||||
"flavor-1", {})
|
||||
get_tmpl.assert_called_once_with(id="tmpl-1")
|
||||
|
||||
@patch('savanna.storage.storage.terminate_node_template')
|
||||
def test_terminate_node_template(self, m):
|
||||
api.terminate_node_template(node_type='JT+NN')
|
||||
m.assert_called_once_with(node_type='JT+NN')
|
||||
|
||||
## Cluster ops:
|
||||
|
||||
@patch('savanna.storage.storage.get_cluster')
|
||||
def test_get_cluster(self, m):
|
||||
m.return_value = api.Resource("cluster", {
|
||||
"id": "cluster-id",
|
||||
"name": "cluster-name",
|
||||
"base_image_id": "image-id",
|
||||
"status": "Active",
|
||||
"nodes": [
|
||||
api.Resource("node", {
|
||||
"vm_id": "vm-1",
|
||||
"node_template": api.Resource("node_template", {
|
||||
"id": "jt_nn.small-id",
|
||||
"name": "jt_nn.small"
|
||||
})
|
||||
}),
|
||||
api.Resource("node", {
|
||||
"vm_id": "vm-2",
|
||||
"node_template": api.Resource("node_template", {
|
||||
"id": "tt_dn.small-id",
|
||||
"name": "tt_dn.small"
|
||||
})
|
||||
}),
|
||||
api.Resource("node", {
|
||||
"vm_id": "vm-3",
|
||||
"node_template": api.Resource("node_template", {
|
||||
"id": "tt_dn.small-id",
|
||||
"name": "tt_dn.small"
|
||||
})
|
||||
})
|
||||
],
|
||||
"node_counts": [
|
||||
api.Resource("node_count", {
|
||||
"node_template": api.Resource("node_template", {
|
||||
"name": "jt_nn.small"
|
||||
}),
|
||||
"count": "1"
|
||||
}),
|
||||
api.Resource("node_count", {
|
||||
"node_template": api.Resource("node_template", {
|
||||
"name": "tt_dn.small"
|
||||
}),
|
||||
"count": "2"
|
||||
})
|
||||
],
|
||||
"service_urls": [
|
||||
api.Resource("service_url", {
|
||||
"name": "job_tracker",
|
||||
"url": "some-url"
|
||||
}),
|
||||
api.Resource("service_url", {
|
||||
"name": "name_node",
|
||||
"url": "some-url-2"
|
||||
})
|
||||
]
|
||||
})
|
||||
|
||||
cluster = api.get_cluster(id="cluster-id")
|
||||
self.assertEqual(cluster, api.Resource("cluster", {
|
||||
'id': 'cluster-id',
|
||||
'name': 'cluster-name',
|
||||
'base_image_id': "image-id",
|
||||
'status': 'Active',
|
||||
'node_templates': {'jt_nn.small': '1', 'tt_dn.small': '2'},
|
||||
'nodes': [
|
||||
{
|
||||
'node_template': {
|
||||
'id': 'jt_nn.small-id', 'name': 'jt_nn.small'
|
||||
}, 'vm_id': 'vm-1'
|
||||
},
|
||||
{
|
||||
'node_template': {
|
||||
'id': 'tt_dn.small-id', 'name': 'tt_dn.small'
|
||||
}, 'vm_id': 'vm-2'
|
||||
},
|
||||
{
|
||||
'node_template': {
|
||||
'id': 'tt_dn.small-id', 'name': 'tt_dn.small'
|
||||
}, 'vm_id': 'vm-3'
|
||||
}
|
||||
],
|
||||
'service_urls': {
|
||||
'name_node': 'some-url-2',
|
||||
'job_tracker': 'some-url'
|
||||
}
|
||||
}))
|
||||
m.assert_called_once_with(id="cluster-id")
|
||||
|
||||
@patch('savanna.storage.storage.get_clusters')
|
||||
def test_get_clusters(self, m):
|
||||
# '_clusters' tested in 'test_get_clusters'
|
||||
api.get_clusters(id="cluster-id")
|
||||
m.assert_called_once_with(id="cluster-id")
|
||||
|
||||
@patch('eventlet.spawn')
|
||||
@patch('savanna.service.api.get_cluster')
|
||||
@patch('savanna.storage.storage.create_cluster')
|
||||
def test_create_cluster(self, create_c, get_c, spawn):
|
||||
create_c.return_value = api.Resource("cluster", {
|
||||
"id": "cluster-1"
|
||||
})
|
||||
|
||||
api.create_cluster(
|
||||
{
|
||||
"cluster": {
|
||||
"name": "cluster-1",
|
||||
"base_image_id": "image-1",
|
||||
"node_templates": {
|
||||
"jt_nn.small": "1",
|
||||
"tt_dn.small": "10"
|
||||
}
|
||||
}
|
||||
}, {"X-Tenant-Id": "tenant-01"})
|
||||
|
||||
create_c.assert_called_once_with("cluster-1", "image-1", "tenant-01", {
|
||||
"jt_nn.small": "1",
|
||||
"tt_dn.small": "10"
|
||||
})
|
||||
get_c.assert_called_once_with(id="cluster-1")
|
||||
spawn.assert_called_once_with(api._cluster_creation_job,
|
||||
{"X-Tenant-Id": "tenant-01"},
|
||||
"cluster-1")
|
||||
|
||||
@patch('eventlet.spawn')
|
||||
@patch('savanna.storage.storage.update_cluster_status')
|
||||
def test_terminate_cluster(self, update_status, spawn):
|
||||
update_status.return_value = api.Resource("cluster", {
|
||||
"id": "cluster-id"
|
||||
})
|
||||
|
||||
api.terminate_cluster({"X-Tenant-Id": "tenant-01"}, id="cluster-id")
|
||||
|
||||
update_status.assert_called_once_with('Stopping', id="cluster-id")
|
||||
spawn.assert_called_once_with(api._cluster_termination_job,
|
||||
{"X-Tenant-Id": "tenant-01"},
|
||||
"cluster-id")
|
@ -1,544 +0,0 @@
|
||||
# Copyright (c) 2013 Mirantis Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from mock import patch, Mock
|
||||
from oslo.config import cfg
|
||||
import unittest
|
||||
|
||||
from savanna.exceptions import NotFoundException, SavannaException
|
||||
import savanna.openstack.common.exception as os_ex
|
||||
from savanna.service.api import Resource
|
||||
import savanna.service.validation as v
|
||||
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.import_opt('allow_cluster_ops', 'savanna.config')
|
||||
|
||||
|
||||
def _raise(ex):
|
||||
def function(*args, **kwargs):
|
||||
raise ex
|
||||
|
||||
return function
|
||||
|
||||
|
||||
def _cluster(base, **kwargs):
|
||||
base['cluster'].update(**kwargs)
|
||||
return base
|
||||
|
||||
|
||||
def _template(base, **kwargs):
|
||||
base['node_template'].update(**kwargs)
|
||||
return base
|
||||
|
||||
|
||||
class TestValidation(unittest.TestCase):
|
||||
def setUp(self):
|
||||
self._create_object_fun = None
|
||||
CONF.set_override('allow_cluster_ops', False)
|
||||
|
||||
def tearDown(self):
|
||||
self._create_object_fun = None
|
||||
CONF.clear_override('allow_cluster_ops')
|
||||
|
||||
@patch("savanna.utils.api.bad_request")
|
||||
@patch("savanna.utils.api.request_data")
|
||||
def test_malformed_request_body(self, request_data, bad_request):
|
||||
ex = os_ex.MalformedRequestBody()
|
||||
request_data.side_effect = _raise(ex)
|
||||
m_func = Mock()
|
||||
m_func.__name__ = "m_func"
|
||||
|
||||
v.validate(m_func)(m_func)()
|
||||
|
||||
self._assert_calls(bad_request,
|
||||
(1, 'MALFORMED_REQUEST_BODY',
|
||||
'Malformed message body: %(reason)s'))
|
||||
|
||||
def _assert_exists_by_id(self, side_effect, assert_func=True):
|
||||
m_checker = Mock()
|
||||
m_checker.side_effect = side_effect
|
||||
m_func = Mock()
|
||||
m_func.__name__ = "m_func"
|
||||
|
||||
v.exists_by_id(m_checker, "template_id")(m_func)(template_id="asd")
|
||||
|
||||
m_checker.assert_called_once_with(id="asd")
|
||||
|
||||
if assert_func:
|
||||
m_func.assert_called_once_with(template_id="asd")
|
||||
|
||||
@patch("savanna.utils.api.internal_error")
|
||||
@patch("savanna.utils.api.not_found")
|
||||
def test_exists_by_id_passed(self, not_found, internal_error):
|
||||
self._assert_exists_by_id(None)
|
||||
|
||||
self.assertEqual(not_found.call_count, 0)
|
||||
self.assertEqual(internal_error.call_count, 0)
|
||||
|
||||
@patch("savanna.utils.api.internal_error")
|
||||
@patch("savanna.utils.api.not_found")
|
||||
def test_exists_by_id_failed(self, not_found, internal_error):
|
||||
self._assert_exists_by_id(_raise(NotFoundException("")), False)
|
||||
self.assertEqual(not_found.call_count, 1)
|
||||
self.assertEqual(internal_error.call_count, 0)
|
||||
|
||||
self._assert_exists_by_id(_raise(SavannaException()), False)
|
||||
self.assertEqual(not_found.call_count, 1)
|
||||
self.assertEqual(internal_error.call_count, 1)
|
||||
|
||||
self._assert_exists_by_id(_raise(AttributeError()), False)
|
||||
self.assertEqual(not_found.call_count, 1)
|
||||
self.assertEqual(internal_error.call_count, 2)
|
||||
|
||||
def _assert_calls(self, mock, call_info):
|
||||
print "_assert_calls for %s, \n\t actual: %s , \n\t expected: %s" \
|
||||
% (mock, mock.call_args, call_info)
|
||||
if not call_info:
|
||||
self.assertEqual(mock.call_count, 0)
|
||||
else:
|
||||
self.assertEqual(mock.call_count, call_info[0])
|
||||
self.assertEqual(mock.call_args[0][0].code, call_info[1])
|
||||
self.assertEqual(mock.call_args[0][0].message, call_info[2])
|
||||
|
||||
def _assert_create_object_validation(
|
||||
self, data, bad_req_i=None, not_found_i=None, int_err_i=None):
|
||||
|
||||
request_data_p = patch("savanna.utils.api.request_data")
|
||||
bad_req_p = patch("savanna.utils.api.bad_request")
|
||||
not_found_p = patch("savanna.utils.api.not_found")
|
||||
int_err_p = patch("savanna.utils.api.internal_error")
|
||||
get_clusters_p = patch("savanna.service.api.get_clusters")
|
||||
get_templates_p = patch("savanna.service.api.get_node_templates")
|
||||
get_template_p = patch("savanna.service.api.get_node_template")
|
||||
get_types_p = patch("savanna.service.api.get_node_types")
|
||||
get_node_type_required_params_p = \
|
||||
patch("savanna.service.api.get_node_type_required_params")
|
||||
get_node_type_all_params_p = \
|
||||
patch("savanna.service.api.get_node_type_all_params")
|
||||
patchers = (request_data_p, bad_req_p, not_found_p, int_err_p,
|
||||
get_clusters_p, get_templates_p, get_template_p,
|
||||
get_types_p, get_node_type_required_params_p,
|
||||
get_node_type_all_params_p)
|
||||
|
||||
request_data = request_data_p.start()
|
||||
bad_req = bad_req_p.start()
|
||||
not_found = not_found_p.start()
|
||||
int_err = int_err_p.start()
|
||||
get_clusters = get_clusters_p.start()
|
||||
get_templates = get_templates_p.start()
|
||||
get_template = get_template_p.start()
|
||||
get_types = get_types_p.start()
|
||||
get_node_type_required_params = get_node_type_required_params_p.start()
|
||||
get_node_type_all_params = get_node_type_all_params_p.start()
|
||||
|
||||
# stub clusters list
|
||||
get_clusters.return_value = getattr(self, "_clusters_data", [
|
||||
Resource("cluster", {
|
||||
"name": "some-cluster-1"
|
||||
})
|
||||
])
|
||||
|
||||
# stub node templates
|
||||
get_templates.return_value = getattr(self, "_templates_data", [
|
||||
Resource("node_template", {
|
||||
"name": "jt_nn.small",
|
||||
"node_type": {
|
||||
"name": "JT+NN",
|
||||
"processes": ["job_tracker", "name_node"]
|
||||
}
|
||||
}),
|
||||
Resource("node_template", {
|
||||
"name": "nn.small",
|
||||
"node_type": {
|
||||
"name": "NN",
|
||||
"processes": ["name_node"]
|
||||
}
|
||||
})
|
||||
])
|
||||
|
||||
def _get_template(name):
|
||||
for template in get_templates():
|
||||
if template.name == name:
|
||||
return template
|
||||
return None
|
||||
|
||||
get_template.side_effect = _get_template
|
||||
|
||||
get_types.return_value = getattr(self, "_types_data", [
|
||||
Resource("node_type", {
|
||||
"name": "JT+NN",
|
||||
"processes": ["job_tracker", "name_node"]
|
||||
})
|
||||
])
|
||||
|
||||
def _get_r_params(name):
|
||||
if name == "JT+NN":
|
||||
return {"job_tracker": ["jt_param"]}
|
||||
return dict()
|
||||
|
||||
get_node_type_required_params.side_effect = _get_r_params
|
||||
|
||||
def _get_all_params(name):
|
||||
if name == "JT+NN":
|
||||
return {"job_tracker": ["jt_param"]}
|
||||
return dict()
|
||||
|
||||
get_node_type_all_params.side_effect = _get_all_params
|
||||
|
||||
# mock function that should be validated
|
||||
m_func = Mock()
|
||||
m_func.__name__ = "m_func"
|
||||
|
||||
# request data to validate
|
||||
request_data.return_value = data
|
||||
|
||||
v.validate(self._create_object_fun)(m_func)()
|
||||
|
||||
self.assertEqual(request_data.call_count, 1)
|
||||
|
||||
self._assert_calls(bad_req, bad_req_i)
|
||||
self._assert_calls(not_found, not_found_i)
|
||||
self._assert_calls(int_err, int_err_i)
|
||||
|
||||
for patcher in patchers:
|
||||
patcher.stop()
|
||||
|
||||
def test_cluster_create_v_required(self):
|
||||
self._create_object_fun = v.validate_cluster_create
|
||||
|
||||
self._assert_create_object_validation(
|
||||
{},
|
||||
bad_req_i=(1, "VALIDATION_ERROR",
|
||||
u"'cluster' is a required property")
|
||||
)
|
||||
self._assert_create_object_validation(
|
||||
{"cluster": {}},
|
||||
bad_req_i=(1, "VALIDATION_ERROR",
|
||||
u"'name' is a required property")
|
||||
)
|
||||
self._assert_create_object_validation(
|
||||
{"cluster": {
|
||||
"name": "some-name"
|
||||
}},
|
||||
bad_req_i=(1, "VALIDATION_ERROR",
|
||||
u"'base_image_id' is a required property")
|
||||
)
|
||||
self._assert_create_object_validation(
|
||||
{"cluster": {
|
||||
"name": "some-name",
|
||||
"base_image_id": "some-image-id"
|
||||
}},
|
||||
bad_req_i=(1, "VALIDATION_ERROR",
|
||||
u"'node_templates' is a required property")
|
||||
)
|
||||
|
||||
def test_cluster_create_v_name_base(self):
|
||||
self._create_object_fun = v.validate_cluster_create
|
||||
|
||||
cluster = {
|
||||
"cluster": {
|
||||
"base_image_id": "some-image-id",
|
||||
"node_templates": {}
|
||||
}
|
||||
}
|
||||
self._assert_create_object_validation(
|
||||
_cluster(cluster, name=None),
|
||||
bad_req_i=(1, "VALIDATION_ERROR",
|
||||
u"None is not of type 'string'")
|
||||
)
|
||||
self._assert_create_object_validation(
|
||||
_cluster(cluster, name=""),
|
||||
bad_req_i=(1, "VALIDATION_ERROR",
|
||||
u"'' is too short")
|
||||
)
|
||||
self._assert_create_object_validation(
|
||||
_cluster(cluster, name="a" * 51),
|
||||
bad_req_i=(1, "VALIDATION_ERROR",
|
||||
u"'%s' is too long" % ('a' * 51))
|
||||
)
|
||||
|
||||
def test_cluster_create_v_name_pattern(self):
|
||||
self._create_object_fun = v.validate_cluster_create
|
||||
|
||||
cluster = {
|
||||
"cluster": {
|
||||
"base_image_id": "some-image-id",
|
||||
"node_templates": {}
|
||||
}
|
||||
}
|
||||
|
||||
def _assert_cluster_name_pattern(self, name):
|
||||
cluster_schema = v.CLUSTER_CREATE_SCHEMA['properties']['cluster']
|
||||
name_p = cluster_schema['properties']['name']['pattern']
|
||||
self._assert_create_object_validation(
|
||||
_cluster(cluster, name=name),
|
||||
bad_req_i=(1, "VALIDATION_ERROR",
|
||||
(u"'%s' does not match '%s'" % (name, name_p))
|
||||
.replace('\\', "\\\\"))
|
||||
)
|
||||
|
||||
_assert_cluster_name_pattern(self, "asd_123")
|
||||
_assert_cluster_name_pattern(self, "123")
|
||||
_assert_cluster_name_pattern(self, "asd?")
|
||||
|
||||
def test_cluster_create_v_name_exists(self):
|
||||
self._create_object_fun = v.validate_cluster_create
|
||||
|
||||
cluster = {
|
||||
"cluster": {
|
||||
"base_image_id": "some-image-id",
|
||||
"node_templates": {}
|
||||
}
|
||||
}
|
||||
|
||||
self._assert_create_object_validation(
|
||||
_cluster(cluster, name="some-cluster-1"),
|
||||
bad_req_i=(1, "CLUSTER_NAME_ALREADY_EXISTS",
|
||||
u"Cluster with name 'some-cluster-1' already exists")
|
||||
)
|
||||
|
||||
def test_cluster_create_v_templates(self):
|
||||
self._create_object_fun = v.validate_cluster_create
|
||||
|
||||
cluster = {
|
||||
"cluster": {
|
||||
"name": "some-cluster",
|
||||
"base_image_id": "some-image-id"
|
||||
}
|
||||
}
|
||||
self._assert_create_object_validation(
|
||||
_cluster(cluster, node_templates={}),
|
||||
bad_req_i=(1, "NOT_SINGLE_NAME_NODE",
|
||||
u"Hadoop cluster should contain only 1 NameNode. "
|
||||
u"Actual NN count is 0")
|
||||
)
|
||||
self._assert_create_object_validation(
|
||||
_cluster(cluster, node_templates={
|
||||
"nn.small": 1
|
||||
}),
|
||||
bad_req_i=(1, "NOT_SINGLE_JOB_TRACKER",
|
||||
u"Hadoop cluster should contain only 1 JobTracker. "
|
||||
u"Actual JT count is 0")
|
||||
)
|
||||
self._assert_create_object_validation(
|
||||
_cluster(cluster, node_templates={
|
||||
"incorrect_template": 10
|
||||
}),
|
||||
bad_req_i=(1, "NODE_TEMPLATE_NOT_FOUND",
|
||||
u"NodeTemplate 'incorrect_template' not found")
|
||||
)
|
||||
self._assert_create_object_validation(
|
||||
_cluster(cluster, node_templates={
|
||||
"jt_nn.small": 1
|
||||
})
|
||||
)
|
||||
|
||||
def test_node_template_create_v_required(self):
|
||||
self._create_object_fun = v.validate_node_template_create
|
||||
|
||||
self._assert_create_object_validation(
|
||||
{},
|
||||
bad_req_i=(1, "VALIDATION_ERROR",
|
||||
u"'node_template' is a required property")
|
||||
)
|
||||
self._assert_create_object_validation(
|
||||
{"node_template": {}},
|
||||
bad_req_i=(1, "VALIDATION_ERROR",
|
||||
u"'name' is a required property")
|
||||
)
|
||||
self._assert_create_object_validation(
|
||||
{"node_template": {
|
||||
"name": "some-name"
|
||||
}},
|
||||
bad_req_i=(1, "VALIDATION_ERROR",
|
||||
u"'node_type' is a required property")
|
||||
)
|
||||
self._assert_create_object_validation(
|
||||
{"node_template": {
|
||||
"name": "some-name",
|
||||
"node_type": "some-node-type"
|
||||
}},
|
||||
bad_req_i=(1, "VALIDATION_ERROR",
|
||||
u"'flavor_id' is a required property")
|
||||
)
|
||||
self._assert_create_object_validation(
|
||||
{"node_template": {
|
||||
"name": "some-name",
|
||||
"node_type": "JT+NN",
|
||||
"flavor_id": "flavor-1"
|
||||
}},
|
||||
bad_req_i=(1, "VALIDATION_ERROR",
|
||||
u"'name_node' is a required property")
|
||||
)
|
||||
self._assert_create_object_validation(
|
||||
{"node_template": {
|
||||
"name": "some-name",
|
||||
"node_type": "JT+NN",
|
||||
"flavor_id": "flavor-1",
|
||||
"name_node": {}
|
||||
}},
|
||||
bad_req_i=(1, "VALIDATION_ERROR",
|
||||
u"'job_tracker' is a required property")
|
||||
)
|
||||
self._assert_create_object_validation(
|
||||
{"node_template": {
|
||||
"name": "some-name",
|
||||
"node_type": "JT+NN",
|
||||
"flavor_id": "flavor-1",
|
||||
"name_node": {},
|
||||
"job_tracker": {}
|
||||
}},
|
||||
bad_req_i=(1, "REQUIRED_PARAM_MISSED",
|
||||
u"Required parameter 'jt_param' of process "
|
||||
u"'job_tracker' should be specified")
|
||||
)
|
||||
self._assert_create_object_validation(
|
||||
{"node_template": {
|
||||
"name": "some-name",
|
||||
"node_type": "JT+NN",
|
||||
"flavor_id": "flavor-1",
|
||||
"name_node": {},
|
||||
"job_tracker": {"jt_param": ""}
|
||||
}},
|
||||
bad_req_i=(1, "REQUIRED_PARAM_MISSED",
|
||||
u"Required parameter 'jt_param' of process "
|
||||
u"'job_tracker' should be specified")
|
||||
)
|
||||
self._assert_create_object_validation(
|
||||
{"node_template": {
|
||||
"name": "some-name",
|
||||
"node_type": "JT+NN",
|
||||
"flavor_id": "flavor-1",
|
||||
"name_node": {},
|
||||
"job_tracker": {"jt_param": "some value", "bad.parameter": "1"}
|
||||
}},
|
||||
bad_req_i=(1, "PARAM_IS_NOT_ALLOWED",
|
||||
u"Parameter 'bad.parameter' "
|
||||
u"of process 'job_tracker' is not allowed to change")
|
||||
)
|
||||
self._assert_create_object_validation(
|
||||
{"node_template": {
|
||||
"name": "some-name",
|
||||
"node_type": "JT+NN",
|
||||
"flavor_id": "flavor-1",
|
||||
"name_node": {},
|
||||
"job_tracker": {"jt_param": "some value"}
|
||||
}},
|
||||
)
|
||||
self._assert_create_object_validation(
|
||||
{"node_template": {
|
||||
"name": "some-name",
|
||||
"node_type": "JT+NN",
|
||||
"flavor_id": "flavor-1",
|
||||
"name_node": {},
|
||||
"job_tracker": {},
|
||||
"task_tracker": {}
|
||||
}},
|
||||
bad_req_i=(1, "NODE_PROCESS_DISCREPANCY",
|
||||
u"Discrepancies in Node Processes. "
|
||||
u"Required: ['name_node', 'job_tracker']")
|
||||
)
|
||||
|
||||
def test_node_template_create_v_name_base(self):
|
||||
self._create_object_fun = v.validate_node_template_create
|
||||
|
||||
template = {
|
||||
"node_template": {
|
||||
"node_type": "JT+NN",
|
||||
"flavor_id": "flavor-1",
|
||||
"name_node": {},
|
||||
"job_tracker": {}
|
||||
}
|
||||
}
|
||||
self._assert_create_object_validation(
|
||||
_template(template, name=None),
|
||||
bad_req_i=(1, "VALIDATION_ERROR",
|
||||
u"None is not of type 'string'")
|
||||
)
|
||||
self._assert_create_object_validation(
|
||||
_template(template, name=""),
|
||||
bad_req_i=(1, "VALIDATION_ERROR",
|
||||
u"'' is too short")
|
||||
)
|
||||
self._assert_create_object_validation(
|
||||
_template(template, name="a" * 241),
|
||||
bad_req_i=(1, "VALIDATION_ERROR",
|
||||
u"'%s' is too long" % ('a' * 241))
|
||||
)
|
||||
|
||||
def test_node_template_create_v_name_pattern(self):
|
||||
self._create_object_fun = v.validate_node_template_create
|
||||
|
||||
template = {
|
||||
"node_template": {
|
||||
"node_type": "JT+NN",
|
||||
"flavor_id": "flavor-1",
|
||||
"name_node": {},
|
||||
"job_tracker": {}
|
||||
}
|
||||
}
|
||||
|
||||
def _assert_template_name_pattern(self, name):
|
||||
schema_props = v.TEMPLATE_CREATE_SCHEMA['properties']
|
||||
template_schema = schema_props['node_template']
|
||||
name_p = template_schema['properties']['name']['pattern']
|
||||
self._assert_create_object_validation(
|
||||
_template(template, name=name),
|
||||
bad_req_i=(1, "VALIDATION_ERROR",
|
||||
(u"'%s' does not match '%s'" % (name, name_p))
|
||||
.replace('\\', "\\\\"))
|
||||
)
|
||||
|
||||
_assert_template_name_pattern(self, "asd;123")
|
||||
_assert_template_name_pattern(self, "123")
|
||||
_assert_template_name_pattern(self, "asd?")
|
||||
|
||||
def test_node_template_create_v_name_exists(self):
|
||||
self._create_object_fun = v.validate_node_template_create
|
||||
|
||||
template = {
|
||||
"node_template": {
|
||||
"node_type": "JT+NN",
|
||||
"flavor_id": "flavor-1",
|
||||
"name_node": {},
|
||||
"job_tracker": {}
|
||||
}
|
||||
}
|
||||
|
||||
self._assert_create_object_validation(
|
||||
_template(template, name="jt_nn.small"),
|
||||
bad_req_i=(1, "NODE_TEMPLATE_ALREADY_EXISTS",
|
||||
u"NodeTemplate with name 'jt_nn.small' already exists")
|
||||
)
|
||||
|
||||
def test_node_template_create_v_types(self):
|
||||
self._create_object_fun = v.validate_node_template_create
|
||||
|
||||
self._assert_create_object_validation(
|
||||
{
|
||||
"node_template": {
|
||||
"name": "some-name",
|
||||
"node_type": "JJ",
|
||||
"flavor_id": "flavor-1",
|
||||
"name_node": {},
|
||||
"job_tracker": {}
|
||||
}
|
||||
},
|
||||
bad_req_i=(1, "NODE_TYPE_NOT_FOUND",
|
||||
u"NodeType 'JJ' not found")
|
||||
)
|
||||
|
||||
# TODO(slukjanov): add tests for allow_cluster_ops = True
|
@ -13,20 +13,21 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import flask as f
|
||||
import inspect
|
||||
import mimetypes
|
||||
import traceback
|
||||
|
||||
from flask import abort, request, Blueprint, Response
|
||||
from werkzeug.datastructures import MIMEAccept
|
||||
|
||||
from savanna.openstack.common.wsgi import JSONDictSerializer, \
|
||||
XMLDictSerializer, JSONDeserializer
|
||||
from savanna.context import Context
|
||||
from savanna.context import set_ctx
|
||||
from savanna.openstack.common import log as logging
|
||||
from savanna.openstack.common import wsgi
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Rest(Blueprint):
|
||||
class Rest(f.Blueprint):
|
||||
def get(self, rule, status_code=200):
|
||||
return self._mroute('GET', rule, status_code)
|
||||
|
||||
@ -52,24 +53,35 @@ class Rest(Blueprint):
|
||||
|
||||
def handler(**kwargs):
|
||||
# extract response content type
|
||||
resp_type = request.accept_mimetypes
|
||||
resp_type = f.request.accept_mimetypes
|
||||
type_suffix = kwargs.pop('resp_type', None)
|
||||
if type_suffix:
|
||||
suffix_mime = mimetypes.guess_type("res." + type_suffix)[0]
|
||||
if suffix_mime:
|
||||
resp_type = MIMEAccept([(suffix_mime, 1)])
|
||||
request.resp_type = resp_type
|
||||
|
||||
# extract fields (column selection)
|
||||
fields = list(set(request.args.getlist('fields')))
|
||||
fields.sort()
|
||||
request.fields_selector = fields
|
||||
f.request.resp_type = resp_type
|
||||
|
||||
# update status code
|
||||
if status:
|
||||
request.status_code = status
|
||||
f.request.status_code = status
|
||||
|
||||
kwargs.pop("tenant_id")
|
||||
|
||||
context = Context(f.request.headers['X-User-Id'],
|
||||
f.request.headers['X-Tenant-Id'],
|
||||
f.request.headers['X-Auth-Token'],
|
||||
f.request.headers)
|
||||
set_ctx(context)
|
||||
|
||||
# set func implicit args
|
||||
args = inspect.getargspec(func).args
|
||||
|
||||
if 'ctx' in args:
|
||||
kwargs['ctx'] = context
|
||||
|
||||
if f.request.method in ['POST', 'PUT'] and 'data' in args:
|
||||
kwargs['data'] = request_data()
|
||||
|
||||
return func(**kwargs)
|
||||
|
||||
f_rule = "/<tenant_id>" + rule
|
||||
@ -77,7 +89,10 @@ class Rest(Blueprint):
|
||||
ext_rule = f_rule + '.<resp_type>'
|
||||
self.add_url_rule(ext_rule, endpoint, handler, **options)
|
||||
|
||||
return func
|
||||
try:
|
||||
return func
|
||||
except Exception, e:
|
||||
return internal_error(500, 'Exception in API call', e)
|
||||
|
||||
return decorator
|
||||
|
||||
@ -86,6 +101,30 @@ RT_JSON = MIMEAccept([("application/json", 1)])
|
||||
RT_XML = MIMEAccept([("application/xml", 1)])
|
||||
|
||||
|
||||
def _clean_nones(obj):
|
||||
if not isinstance(obj, dict) and not isinstance(obj, list):
|
||||
return obj
|
||||
|
||||
if isinstance(obj, dict):
|
||||
remove = []
|
||||
for key, value in obj.iteritems():
|
||||
if value is None:
|
||||
remove.append(key)
|
||||
for key in remove:
|
||||
obj.pop(key)
|
||||
for value in obj.values():
|
||||
_clean_nones(value)
|
||||
elif isinstance(obj, list):
|
||||
new_list = []
|
||||
for elem in obj:
|
||||
elem = _clean_nones(elem)
|
||||
if elem is not None:
|
||||
new_list.append(elem)
|
||||
return new_list
|
||||
|
||||
return obj
|
||||
|
||||
|
||||
def render(res=None, resp_type=None, status=None, **kwargs):
|
||||
if not res:
|
||||
res = {}
|
||||
@ -95,14 +134,16 @@ def render(res=None, resp_type=None, status=None, **kwargs):
|
||||
# can't merge kwargs into the non-dict res
|
||||
abort_and_log(500, "Non-dict and non-empty kwargs passed to render")
|
||||
|
||||
status_code = getattr(request, 'status_code', None)
|
||||
res = _clean_nones(res)
|
||||
|
||||
status_code = getattr(f.request, 'status_code', None)
|
||||
if status:
|
||||
status_code = status
|
||||
if not status_code:
|
||||
status_code = 200
|
||||
|
||||
if not resp_type:
|
||||
resp_type = getattr(request, 'resp_type', RT_JSON)
|
||||
resp_type = getattr(f.request, 'resp_type', RT_JSON)
|
||||
|
||||
if not resp_type:
|
||||
resp_type = RT_JSON
|
||||
@ -110,31 +151,31 @@ def render(res=None, resp_type=None, status=None, **kwargs):
|
||||
serializer = None
|
||||
if "application/json" in resp_type:
|
||||
resp_type = RT_JSON
|
||||
serializer = JSONDictSerializer()
|
||||
serializer = wsgi.JSONDictSerializer()
|
||||
elif "application/xml" in resp_type:
|
||||
resp_type = RT_XML
|
||||
serializer = XMLDictSerializer()
|
||||
serializer = wsgi.XMLDictSerializer()
|
||||
else:
|
||||
abort_and_log(400, "Content type '%s' isn't supported" % resp_type)
|
||||
|
||||
body = serializer.serialize(res)
|
||||
resp_type = str(resp_type)
|
||||
|
||||
return Response(response=body, status=status_code, mimetype=resp_type)
|
||||
return f.Response(response=body, status=status_code, mimetype=resp_type)
|
||||
|
||||
|
||||
def request_data():
|
||||
if hasattr(request, 'parsed_data'):
|
||||
return request.parsed_data
|
||||
if hasattr(f.request, 'parsed_data'):
|
||||
return f.request.parsed_data
|
||||
|
||||
if not request.content_length > 0:
|
||||
if not f.request.content_length > 0:
|
||||
LOG.debug("Empty body provided in request")
|
||||
return dict()
|
||||
|
||||
deserializer = None
|
||||
content_type = request.mimetype
|
||||
content_type = f.request.mimetype
|
||||
if not content_type or content_type in RT_JSON:
|
||||
deserializer = JSONDeserializer()
|
||||
deserializer = wsgi.JSONDeserializer()
|
||||
elif content_type in RT_XML:
|
||||
abort_and_log(400, "XML requests are not supported yet")
|
||||
# deserializer = XMLDeserializer()
|
||||
@ -142,9 +183,9 @@ def request_data():
|
||||
abort_and_log(400, "Content type '%s' isn't supported" % content_type)
|
||||
|
||||
# parsed request data to avoid unwanted re-parsings
|
||||
request.parsed_data = deserializer.deserialize(request.data)['body']
|
||||
f.request.parsed_data = deserializer.deserialize(f.request.data)['body']
|
||||
|
||||
return request.parsed_data
|
||||
return f.request.parsed_data
|
||||
|
||||
|
||||
def abort_and_log(status_code, descr, exc=None):
|
||||
@ -154,7 +195,7 @@ def abort_and_log(status_code, descr, exc=None):
|
||||
if exc is not None:
|
||||
LOG.error(traceback.format_exc())
|
||||
|
||||
abort(status_code, description=descr)
|
||||
f.abort(status_code, description=descr)
|
||||
|
||||
|
||||
def render_error_message(error_code, error_message, error_name):
|
||||
|
101
savanna/utils/openstack/images.py
Normal file
101
savanna/utils/openstack/images.py
Normal file
@ -0,0 +1,101 @@
|
||||
# Copyright (c) 2013 Mirantis Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from novaclient.v1_1.images import Image
|
||||
from novaclient.v1_1.images import ImageManager
|
||||
|
||||
|
||||
PROP_DESCR = '_savanna_description'
|
||||
PROP_USERNAME = '_savanna_username'
|
||||
PROP_TAG = '_savanna_tag_'
|
||||
|
||||
|
||||
def _iter_tags(meta):
|
||||
for key in meta:
|
||||
if key.startswith(PROP_TAG) and meta[key]:
|
||||
yield key[len(PROP_TAG):]
|
||||
|
||||
|
||||
def _ensure_tags(tags):
|
||||
return [tags] if type(tags) in [str, unicode] else tags
|
||||
|
||||
|
||||
class SavannaImage(Image):
|
||||
def __init__(self, manager, info, loaded=False):
|
||||
info['description'] = info.get('metadata', {}).get(PROP_DESCR)
|
||||
info['username'] = info.get('metadata', {}).get(PROP_USERNAME)
|
||||
info['tags'] = [tag for tag in _iter_tags(info.get('metadata', {}))]
|
||||
super(SavannaImage, self).__init__(manager, info, loaded)
|
||||
|
||||
def tag(self, tags):
|
||||
self.manager.tag(self, tags)
|
||||
|
||||
def untag(self, tags):
|
||||
self.manager.untag(self, tags)
|
||||
|
||||
def set_description(self, description=None, username=None):
|
||||
self.manager.set_description(self, description, username)
|
||||
|
||||
@property
|
||||
def dict(self):
|
||||
return self.to_dict()
|
||||
|
||||
@property
|
||||
def wrapped_dict(self):
|
||||
return {'image': self.dict}
|
||||
|
||||
def to_dict(self):
|
||||
return self._info.copy()
|
||||
|
||||
|
||||
class SavannaImageManager(ImageManager):
|
||||
"""Manage :class:`SavannaImage` resources.
|
||||
|
||||
This is an extended version of nova client's ImageManager with support of
|
||||
additional description and image tags stored in images' meta.
|
||||
"""
|
||||
resource_class = SavannaImage
|
||||
|
||||
def set_description(self, image, description, username):
|
||||
"""Sets human-readable information for image.
|
||||
|
||||
For example:
|
||||
|
||||
Ubuntu 13.04 x64 with Java 1.7u21 and Apache Hadoop 1.1.1, ubuntu
|
||||
"""
|
||||
self.set_meta(image, {
|
||||
PROP_DESCR: description,
|
||||
PROP_USERNAME: username,
|
||||
})
|
||||
|
||||
def tag(self, image, tags):
|
||||
"""Adds tags to the specified image."""
|
||||
tags = _ensure_tags(tags)
|
||||
|
||||
self.set_meta(image, dict((PROP_TAG + tag, True) for tag in tags))
|
||||
|
||||
def untag(self, image, tags):
|
||||
"""Removes tags from the specified image."""
|
||||
tags = _ensure_tags(tags)
|
||||
|
||||
self.delete_meta(image, [PROP_TAG + tag for tag in tags])
|
||||
|
||||
def list_by_tags(self, tags):
|
||||
"""Returns images having all of the specified tags."""
|
||||
tags = _ensure_tags(tags)
|
||||
return [i for i in self.list() if set(tags).issubset(i.tags)]
|
||||
|
||||
def list_registered(self):
|
||||
return [i for i in self.list() if i.description and i.username]
|
@ -16,11 +16,14 @@
|
||||
import logging
|
||||
|
||||
from novaclient.v1_1 import client as nova_client
|
||||
from savanna.context import ctx
|
||||
|
||||
import savanna.utils.openstack.base as base
|
||||
from savanna.utils.openstack.images import SavannaImageManager
|
||||
|
||||
|
||||
def novaclient(headers):
|
||||
def novaclient():
|
||||
headers = ctx().headers
|
||||
username = headers['X-User-Name']
|
||||
token = headers['X-Auth-Token']
|
||||
tenant = headers['X-Tenant-Id']
|
||||
@ -35,26 +38,31 @@ def novaclient(headers):
|
||||
|
||||
nova.client.auth_token = token
|
||||
nova.client.management_url = compute_url
|
||||
nova.images = SavannaImageManager(nova)
|
||||
|
||||
return nova
|
||||
|
||||
|
||||
def get_flavors(headers):
|
||||
def get_flavors():
|
||||
headers = ctx().headers
|
||||
flavors = [flavor.name for flavor
|
||||
in novaclient(headers).flavors.list()]
|
||||
return flavors
|
||||
|
||||
|
||||
def get_flavor(headers, **kwargs):
|
||||
def get_flavor(**kwargs):
|
||||
headers = ctx().headers
|
||||
return novaclient(headers).flavors.find(**kwargs)
|
||||
|
||||
|
||||
def get_images(headers):
|
||||
def get_images():
|
||||
headers = ctx().headers
|
||||
images = [image.id for image
|
||||
in novaclient(headers).images.list()]
|
||||
return images
|
||||
|
||||
|
||||
def get_limits(headers):
|
||||
def get_limits():
|
||||
headers = ctx().headers
|
||||
limits = novaclient(headers).limits.get().absolute
|
||||
return dict((l.name, l.value) for l in limits)
|
||||
|
@ -29,7 +29,7 @@ def patch_minidom_writexml():
|
||||
if sys.version_info >= (2, 7, 3):
|
||||
return
|
||||
|
||||
from xml.dom.minidom import Element, Node, Text, _write_data
|
||||
import xml.dom.minidom as md
|
||||
|
||||
def writexml(self, writer, indent="", addindent="", newl=""):
|
||||
# indent = current indentation
|
||||
@ -43,12 +43,12 @@ def patch_minidom_writexml():
|
||||
|
||||
for a_name in a_names:
|
||||
writer.write(" %s=\"" % a_name)
|
||||
_write_data(writer, attrs[a_name].value)
|
||||
md._write_data(writer, attrs[a_name].value)
|
||||
writer.write("\"")
|
||||
if self.childNodes:
|
||||
writer.write(">")
|
||||
if (len(self.childNodes) == 1
|
||||
and self.childNodes[0].nodeType == Node.TEXT_NODE):
|
||||
and self.childNodes[0].nodeType == md.Node.TEXT_NODE):
|
||||
self.childNodes[0].writexml(writer, '', '', '')
|
||||
else:
|
||||
writer.write(newl)
|
||||
@ -59,9 +59,9 @@ def patch_minidom_writexml():
|
||||
else:
|
||||
writer.write("/>%s" % (newl))
|
||||
|
||||
Element.writexml = writexml
|
||||
md.Element.writexml = writexml
|
||||
|
||||
def writexml(self, writer, indent="", addindent="", newl=""):
|
||||
_write_data(writer, "%s%s%s" % (indent, self.data, newl))
|
||||
md._write_data(writer, "%s%s%s" % (indent, self.data, newl))
|
||||
|
||||
Text.writexml = writexml
|
||||
md.Text.writexml = writexml
|
||||
|
70
savanna/utils/resources.py
Normal file
70
savanna/utils/resources.py
Normal file
@ -0,0 +1,70 @@
|
||||
# Copyright (c) 2013 Mirantis Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import inspect
|
||||
|
||||
|
||||
class BaseResource(object):
|
||||
__resource_name__ = 'base'
|
||||
__filter_cols__ = []
|
||||
|
||||
@property
|
||||
def dict(self):
|
||||
return self.to_dict()
|
||||
|
||||
@property
|
||||
def wrapped_dict(self):
|
||||
return {self.__resource_name__: self.dict}
|
||||
|
||||
@property
|
||||
def __all_filter_cols__(self):
|
||||
cls = self.__class__
|
||||
if not hasattr(cls, '__mro_filter_cols__'):
|
||||
filter_cols = []
|
||||
for base_cls in inspect.getmro(cls):
|
||||
filter_cols += getattr(base_cls, '__filter_cols__', [])
|
||||
cls.__mro_filter_cols__ = set(filter_cols)
|
||||
return cls.__mro_filter_cols__
|
||||
|
||||
def _filter_field(self, k):
|
||||
return k == '_sa_instance_state' or k in self.__all_filter_cols__
|
||||
|
||||
def to_dict(self):
|
||||
dictionary = self.__dict__.copy()
|
||||
return dict([(k, v) for k, v in dictionary.iteritems()
|
||||
if not self._filter_field(k)])
|
||||
|
||||
def as_resource(self):
|
||||
return Resource(self.__resource_name__, self.to_dict())
|
||||
|
||||
|
||||
class Resource(BaseResource):
|
||||
def __init__(self, _name, _info):
|
||||
self._name = _name
|
||||
self._info = _info
|
||||
|
||||
def __getattr__(self, k):
|
||||
if k not in self.__dict__:
|
||||
return self._info.get(k)
|
||||
return self.__dict__[k]
|
||||
|
||||
def __repr__(self):
|
||||
return '<%s %s>' % (self._name, self._info)
|
||||
|
||||
def __eq__(self, other):
|
||||
return self._name == other._name and self._info == other._info
|
||||
|
||||
def to_dict(self):
|
||||
return self._info.copy()
|
110
savanna/utils/sqlatypes.py
Normal file
110
savanna/utils/sqlatypes.py
Normal file
@ -0,0 +1,110 @@
|
||||
# Copyright (c) 2013 Mirantis Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from sqlalchemy.ext.mutable import Mutable
|
||||
from sqlalchemy.types import TypeDecorator, VARCHAR
|
||||
|
||||
from savanna.openstack.common import jsonutils
|
||||
|
||||
|
||||
class JSONEncoded(TypeDecorator):
|
||||
"""Represents an immutable structure as a json-encoded string."""
|
||||
|
||||
impl = VARCHAR
|
||||
|
||||
def process_bind_param(self, value, dialect):
|
||||
if value is not None:
|
||||
value = jsonutils.dumps(value)
|
||||
return value
|
||||
|
||||
def process_result_value(self, value, dialect):
|
||||
if value is not None:
|
||||
value = jsonutils.loads(value)
|
||||
return value
|
||||
|
||||
|
||||
# todo verify this implementation
|
||||
class MutableDict(Mutable, dict):
|
||||
@classmethod
|
||||
def coerce(cls, key, value):
|
||||
"""Convert plain dictionaries to MutableDict."""
|
||||
if not isinstance(value, MutableDict):
|
||||
if isinstance(value, dict):
|
||||
return MutableDict(value)
|
||||
|
||||
# this call will raise ValueError
|
||||
return Mutable.coerce(key, value)
|
||||
else:
|
||||
return value
|
||||
|
||||
def update(self, e=None, **f):
|
||||
"""Detect dictionary update events and emit change events."""
|
||||
dict.update(self, e, **f)
|
||||
self.changed()
|
||||
|
||||
def __setitem__(self, key, value):
|
||||
"""Detect dictionary set events and emit change events."""
|
||||
dict.__setitem__(self, key, value)
|
||||
self.changed()
|
||||
|
||||
def __delitem__(self, key):
|
||||
"""Detect dictionary del events and emit change events."""
|
||||
dict.__delitem__(self, key)
|
||||
self.changed()
|
||||
|
||||
|
||||
# todo verify this implementation
|
||||
class MutableList(Mutable, list):
|
||||
@classmethod
|
||||
def coerce(cls, key, value):
|
||||
"""Convert plain lists to MutableList."""
|
||||
if not isinstance(value, MutableList):
|
||||
if isinstance(value, list):
|
||||
return MutableList(value)
|
||||
|
||||
# this call will raise ValueError
|
||||
return Mutable.coerce(key, value)
|
||||
else:
|
||||
return value
|
||||
|
||||
def __add__(self, value):
|
||||
"""Detect list add events and emit change events."""
|
||||
list.__add__(self, value)
|
||||
self.changed()
|
||||
|
||||
def append(self, value):
|
||||
"""Detect list add events and emit change events."""
|
||||
list.append(self, value)
|
||||
self.changed()
|
||||
|
||||
def __setitem__(self, key, value):
|
||||
"""Detect list set events and emit change events."""
|
||||
list.__setitem__(self, key, value)
|
||||
self.changed()
|
||||
|
||||
def __delitem__(self, i):
|
||||
"""Detect list del events and emit change events."""
|
||||
list.__delitem__(self, i)
|
||||
self.changed()
|
||||
|
||||
|
||||
def JsonDictType():
|
||||
"""Returns an SQLAlchemy Column Type suitable to store a Json dict."""
|
||||
return MutableDict.as_mutable(JSONEncoded)
|
||||
|
||||
|
||||
def JsonListType():
|
||||
"""Returns an SQLAlchemy Column Type suitable to store a Json array."""
|
||||
return MutableList.as_mutable(JSONEncoded)
|
4
setup.py
4
setup.py
@ -8,7 +8,7 @@ project = 'savanna'
|
||||
|
||||
setuptools.setup(
|
||||
name=project,
|
||||
version=common_setup.get_version(project, '0.1.2'),
|
||||
version=common_setup.get_version(project, '0.2'),
|
||||
description='Savanna project',
|
||||
author='Mirantis Inc.',
|
||||
author_email='savanna-team@mirantis.com',
|
||||
@ -37,7 +37,7 @@ setuptools.setup(
|
||||
test_suite='nose.collector',
|
||||
scripts=[
|
||||
'bin/savanna-api',
|
||||
'bin/savanna-manage',
|
||||
'bin/savanna-db-manage',
|
||||
],
|
||||
py_modules=[],
|
||||
data_files=[
|
||||
|
@ -1,4 +1,5 @@
|
||||
# This file is managed by openstack-depends
|
||||
alembic>=0.4.1
|
||||
eventlet>=0.9.12
|
||||
flask==0.9
|
||||
jsonschema>=1.0.0
|
||||
@ -6,8 +7,6 @@ oslo.config>=1.1.0
|
||||
paramiko>=1.8.0
|
||||
python-keystoneclient>=0.2,<0.3
|
||||
python-novaclient>=2.12.0,<3
|
||||
six
|
||||
sqlalchemy>=0.7,<=0.7.99
|
||||
webob>=1.0.8
|
||||
|
||||
# Additional depends
|
||||
flask-sqlalchemy
|
||||
|
@ -1,3 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
echo "Please, use tools/run_pep8 instead of tools/run_pyflakes"
|
9
tox.ini
9
tox.ini
@ -1,5 +1,5 @@
|
||||
[tox]
|
||||
envlist = py26,py27,pep8,pyflakes
|
||||
envlist = py26,py27,pep8
|
||||
|
||||
[testenv]
|
||||
setenv =
|
||||
@ -31,10 +31,6 @@ deps =
|
||||
hacking
|
||||
commands = flake8
|
||||
|
||||
[testenv:pyflakes]
|
||||
deps =
|
||||
commands =
|
||||
|
||||
[testenv:venv]
|
||||
commands = {posargs}
|
||||
|
||||
@ -49,9 +45,8 @@ commands =
|
||||
pylint --output-format=parseable --rcfile=.pylintrc bin/savanna-api bin/savanna-manage savanna | tee pylint-report.txt
|
||||
|
||||
[flake8]
|
||||
# H301 one import per line
|
||||
# H302 import only modules
|
||||
ignore = H301,H302
|
||||
ignore = H302
|
||||
show-source = true
|
||||
builtins = _
|
||||
exclude=.venv,.git,.tox,dist,doc,*openstack/common*,*lib/python*,*egg,tools
|
||||
|
Loading…
Reference in New Issue
Block a user