Merge "root enable for pxc clustered datastore"
This commit is contained in:
commit
1035f4ce7d
@ -691,7 +691,7 @@ pxc_opts = [
|
||||
help='Class that implements datastore-specific Guest Agent API '
|
||||
'logic.'),
|
||||
cfg.StrOpt('root_controller',
|
||||
default='trove.extensions.common.service.DefaultRootController',
|
||||
default='trove.extensions.pxc.service.PxcRootController',
|
||||
help='Root controller implementation for pxc.'),
|
||||
cfg.StrOpt('guest_log_exposed_logs', default='general,slow_query',
|
||||
help='List of Guest Logs to expose for publishing.'),
|
||||
|
@ -22,6 +22,7 @@ from trove.common.remote import create_nova_client
|
||||
from trove.common.strategies.cluster import base
|
||||
from trove.common.template import ClusterConfigTemplate
|
||||
from trove.common import utils
|
||||
from trove.extensions.common import models as ext_models
|
||||
from trove.instance.models import DBInstance
|
||||
from trove.instance.models import Instance
|
||||
from trove.instance import tasks as inst_tasks
|
||||
@ -151,6 +152,16 @@ class PXCClusterTasks(task_models.ClusterTasks):
|
||||
|
||||
LOG.debug("End create_cluster for id: %s." % cluster_id)
|
||||
|
||||
def _check_cluster_for_root(self, context, existing_instances,
|
||||
new_instances):
|
||||
"""Check for existing instances root enabled"""
|
||||
for instance in existing_instances:
|
||||
if ext_models.Root.load(context, instance.id):
|
||||
for new_instance in new_instances:
|
||||
ext_models.RootHistory.create(context, new_instance.id,
|
||||
context.user)
|
||||
return
|
||||
|
||||
def grow_cluster(self, context, cluster_id, new_instance_ids):
|
||||
LOG.debug("Begin pxc grow_cluster for id: %s." % cluster_id)
|
||||
|
||||
@ -203,6 +214,10 @@ class PXCClusterTasks(task_models.ClusterTasks):
|
||||
cluster_configuration,
|
||||
bootstrap)
|
||||
|
||||
self._check_cluster_for_root(context,
|
||||
existing_instances,
|
||||
new_instances)
|
||||
|
||||
# apply the new config to all instances
|
||||
for instance in existing_instances + new_instances:
|
||||
guest = self.get_guest(instance)
|
||||
|
@ -82,6 +82,22 @@ class Root(object):
|
||||
create_guest_client(context, instance_id).disable_root()
|
||||
|
||||
|
||||
class ClusterRoot(Root):
|
||||
|
||||
@classmethod
|
||||
def create(cls, context, instance_id, user, root_password,
|
||||
cluster_instances_list=None):
|
||||
root_user = super(ClusterRoot, cls).create(context, instance_id,
|
||||
user, root_password,
|
||||
cluster_instances_list=None)
|
||||
|
||||
if cluster_instances_list:
|
||||
for instance in cluster_instances_list:
|
||||
RootHistory.create(context, instance, user)
|
||||
|
||||
return root_user
|
||||
|
||||
|
||||
class RootHistory(object):
|
||||
|
||||
_auto_generated_attrs = ['id']
|
||||
|
@ -53,6 +53,12 @@ class BaseDatastoreRootController(wsgi.Controller):
|
||||
def root_delete(self, req, tenant_id, instance_id, is_cluster):
|
||||
pass
|
||||
|
||||
@staticmethod
|
||||
def _get_password_from_body(body=None):
|
||||
if body:
|
||||
return body['password'] if 'password' in body else None
|
||||
return None
|
||||
|
||||
|
||||
class DefaultRootController(BaseDatastoreRootController):
|
||||
|
||||
@ -75,10 +81,7 @@ class DefaultRootController(BaseDatastoreRootController):
|
||||
LOG.info(_LI("req : '%s'\n\n") % req)
|
||||
context = req.environ[wsgi.CONTEXT_KEY]
|
||||
user_name = context.user
|
||||
if body:
|
||||
password = body['password'] if 'password' in body else None
|
||||
else:
|
||||
password = None
|
||||
password = DefaultRootController._get_password_from_body(body)
|
||||
root = models.Root.create(context, instance_id,
|
||||
user_name, password)
|
||||
return wsgi.Result(views.RootCreatedView(root).data(), 200)
|
||||
@ -100,6 +103,67 @@ class DefaultRootController(BaseDatastoreRootController):
|
||||
return wsgi.Result(None, 200)
|
||||
|
||||
|
||||
class ClusterRootController(DefaultRootController):
|
||||
|
||||
def root_index(self, req, tenant_id, instance_id, is_cluster):
|
||||
"""Returns True if root is enabled; False otherwise."""
|
||||
if is_cluster:
|
||||
return self.cluster_root_index(req, tenant_id, instance_id)
|
||||
else:
|
||||
return self.instance_root_index(req, tenant_id, instance_id)
|
||||
|
||||
def instance_root_index(self, req, tenant_id, instance_id):
|
||||
LOG.info(_LI("Getting root enabled for instance '%s'.") % instance_id)
|
||||
LOG.info(_LI("req : '%s'\n\n") % req)
|
||||
context = req.environ[wsgi.CONTEXT_KEY]
|
||||
try:
|
||||
is_root_enabled = models.ClusterRoot.load(context, instance_id)
|
||||
except exception.UnprocessableEntity:
|
||||
raise exception.UnprocessableEntity(
|
||||
"Cluster %s is not ready." % instance_id)
|
||||
return wsgi.Result(views.RootEnabledView(is_root_enabled).data(), 200)
|
||||
|
||||
def cluster_root_index(self, req, tenant_id, cluster_id):
|
||||
LOG.info(_LI("Getting root enabled for cluster '%s'.") % cluster_id)
|
||||
single_instance_id, cluster_instances = self._get_cluster_instance_id(
|
||||
tenant_id, cluster_id)
|
||||
return self.instance_root_index(req, tenant_id, single_instance_id)
|
||||
|
||||
def root_create(self, req, body, tenant_id, instance_id, is_cluster):
|
||||
if is_cluster:
|
||||
return self.cluster_root_create(req, body, tenant_id, instance_id)
|
||||
else:
|
||||
return self.instance_root_create(req, body, instance_id)
|
||||
|
||||
def instance_root_create(self, req, body, instance_id,
|
||||
cluster_instances=None):
|
||||
LOG.info(_LI("Enabling root for instance '%s'.") % instance_id)
|
||||
LOG.info(_LI("req : '%s'\n\n") % req)
|
||||
context = req.environ[wsgi.CONTEXT_KEY]
|
||||
user_name = context.user
|
||||
password = ClusterRootController._get_password_from_body(body)
|
||||
root = models.ClusterRoot.create(context, instance_id, user_name,
|
||||
password, cluster_instances)
|
||||
return wsgi.Result(views.RootCreatedView(root).data(), 200)
|
||||
|
||||
def cluster_root_create(self, req, body, tenant_id, cluster_id):
|
||||
LOG.info(_LI("Enabling root for cluster '%s'.") % cluster_id)
|
||||
single_instance_id, cluster_instances = self._get_cluster_instance_id(
|
||||
tenant_id, cluster_id)
|
||||
return self.instance_root_create(req, body, single_instance_id,
|
||||
cluster_instances)
|
||||
|
||||
def _find_cluster_node_ids(self, tenant_id, cluster_id):
|
||||
args = {'tenant_id': tenant_id, 'cluster_id': cluster_id}
|
||||
cluster_instances = DBInstance.find_all(**args).all()
|
||||
return [db_instance.id for db_instance in cluster_instances]
|
||||
|
||||
def _get_cluster_instance_id(self, tenant_id, cluster_id):
|
||||
instance_ids = self._find_cluster_node_ids(tenant_id, cluster_id)
|
||||
single_instance_id = instance_ids[0]
|
||||
return single_instance_id, instance_ids
|
||||
|
||||
|
||||
class RootController(wsgi.Controller):
|
||||
"""Controller for instance functionality."""
|
||||
|
||||
@ -120,7 +184,7 @@ class RootController(wsgi.Controller):
|
||||
return root_controller.root_create(req, body, tenant_id,
|
||||
instance_id, is_cluster)
|
||||
else:
|
||||
raise NoSuchOptError
|
||||
raise NoSuchOptError('root_controller', group='datastore_manager')
|
||||
|
||||
def delete(self, req, tenant_id, instance_id):
|
||||
datastore_manager, is_cluster = self._get_datastore(tenant_id,
|
||||
|
0
trove/extensions/pxc/__init__.py
Normal file
0
trove/extensions/pxc/__init__.py
Normal file
31
trove/extensions/pxc/service.py
Normal file
31
trove/extensions/pxc/service.py
Normal file
@ -0,0 +1,31 @@
|
||||
# Copyright [2016] Hewlett-Packard Development Company, L.P.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo_log import log as logging
|
||||
|
||||
from trove.common import cfg
|
||||
from trove.common import exception
|
||||
from trove.extensions.common.service import ClusterRootController
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
CONF = cfg.CONF
|
||||
MANAGER = CONF.datastore_manager if CONF.datastore_manager else 'pxc'
|
||||
|
||||
|
||||
class PxcRootController(ClusterRootController):
|
||||
|
||||
def root_delete(self, req, tenant_id, instance_id, is_cluster):
|
||||
raise exception.DatastoreOperationNotSupported(
|
||||
operation='disable_root', datastore=MANAGER)
|
@ -1,37 +0,0 @@
|
||||
# Copyright [2015] Hewlett-Packard Development Company, L.P.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo_log import log as logging
|
||||
|
||||
from trove.extensions.common.models import Root
|
||||
from trove.extensions.common.models import RootHistory
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class VerticaRoot(Root):
|
||||
|
||||
@classmethod
|
||||
def create(cls, context, instance_id, user, root_password,
|
||||
cluster_instances_list=None):
|
||||
root_user = super(VerticaRoot, cls).create(context, instance_id,
|
||||
user, root_password,
|
||||
cluster_instances_list=None)
|
||||
|
||||
if cluster_instances_list:
|
||||
for instance in cluster_instances_list:
|
||||
RootHistory.create(context, instance, user)
|
||||
|
||||
return root_user
|
@ -17,75 +17,22 @@ from oslo_log import log as logging
|
||||
|
||||
from trove.common import cfg
|
||||
from trove.common import exception
|
||||
from trove.common.i18n import _LI
|
||||
from trove.common import wsgi
|
||||
from trove.extensions.common.service import BaseDatastoreRootController
|
||||
from trove.extensions.common import views
|
||||
from trove.extensions.vertica import models
|
||||
from trove.extensions.common.service import ClusterRootController
|
||||
from trove.instance.models import DBInstance
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
CONF = cfg.CONF
|
||||
MANAGER = CONF.datastore_manager
|
||||
MANAGER = CONF.datastore_manager if CONF.datastore_manager else 'vertica'
|
||||
|
||||
|
||||
class VerticaRootController(BaseDatastoreRootController):
|
||||
|
||||
def root_index(self, req, tenant_id, instance_id, is_cluster):
|
||||
"""Returns True if root is enabled; False otherwise."""
|
||||
if is_cluster:
|
||||
return self.cluster_root_index(req, tenant_id, instance_id)
|
||||
else:
|
||||
return self.instance_root_index(req, tenant_id, instance_id)
|
||||
|
||||
def instance_root_index(self, req, tenant_id, instance_id):
|
||||
LOG.info(_LI("Getting root enabled for instance '%s'.") % instance_id)
|
||||
LOG.info(_LI("req : '%s'\n\n") % req)
|
||||
context = req.environ[wsgi.CONTEXT_KEY]
|
||||
is_root_enabled = models.VerticaRoot.load(context, instance_id)
|
||||
return wsgi.Result(views.RootEnabledView(is_root_enabled).data(), 200)
|
||||
|
||||
def cluster_root_index(self, req, tenant_id, cluster_id):
|
||||
LOG.info(_LI("Getting root enabled for cluster '%s'.") % cluster_id)
|
||||
master_instance_id, cluster_instances = self._get_cluster_instance_id(
|
||||
tenant_id, cluster_id)
|
||||
return self.instance_root_index(req, tenant_id, master_instance_id)
|
||||
|
||||
def root_create(self, req, body, tenant_id, instance_id, is_cluster):
|
||||
if is_cluster:
|
||||
return self.cluster_root_create(req, body, tenant_id, instance_id)
|
||||
else:
|
||||
return self.instance_root_create(req, body, instance_id)
|
||||
|
||||
def instance_root_create(self, req, body, instance_id,
|
||||
cluster_instances=None):
|
||||
LOG.info(_LI("Enabling root for instance '%s'.") % instance_id)
|
||||
LOG.info(_LI("req : '%s'\n\n") % req)
|
||||
context = req.environ[wsgi.CONTEXT_KEY]
|
||||
user_name = context.user
|
||||
if body:
|
||||
password = body['password'] if 'password' in body else None
|
||||
else:
|
||||
password = None
|
||||
root = models.VerticaRoot.create(context, instance_id, user_name,
|
||||
password, cluster_instances)
|
||||
return wsgi.Result(views.RootCreatedView(root).data(), 200)
|
||||
|
||||
def cluster_root_create(self, req, body, tenant_id, cluster_id):
|
||||
LOG.info(_LI("Enabling root for cluster '%s'.") % cluster_id)
|
||||
master_instance_id, cluster_instances = self._get_cluster_instance_id(
|
||||
tenant_id, cluster_id)
|
||||
return self.instance_root_create(req, body, master_instance_id,
|
||||
cluster_instances)
|
||||
class VerticaRootController(ClusterRootController):
|
||||
|
||||
def delete(self, req, tenant_id, instance_id):
|
||||
raise exception.DatastoreOperationNotSupported(
|
||||
operation='disable_root', datastore=MANAGER)
|
||||
|
||||
def _get_cluster_instance_id(self, tenant_id, cluster_id):
|
||||
args = {'tenant_id': tenant_id, 'cluster_id': cluster_id}
|
||||
cluster_instances = DBInstance.find_all(**args).all()
|
||||
instance_ids = [db_instance.id for db_instance in cluster_instances]
|
||||
instance_ids = self._find_cluster_node_ids(tenant_id, cluster_id)
|
||||
args = {'tenant_id': tenant_id, 'cluster_id': cluster_id, 'type':
|
||||
'master'}
|
||||
master_instance = DBInstance.find_by(**args)
|
||||
|
@ -80,3 +80,6 @@ class Manager(manager.MySqlManager):
|
||||
LOG.debug("Apply the updated cluster configuration.")
|
||||
app = self.mysql_app(self.mysql_app_status.get())
|
||||
app.write_cluster_configuration_overrides(cluster_configuration)
|
||||
|
||||
def enable_root_with_password(self, context, root_password=None):
|
||||
return self.mysql_admin().enable_root(root_password)
|
||||
|
@ -43,12 +43,27 @@ class ClusterActionsGroup(TestGroup):
|
||||
"""Verify the initial data exists on cluster."""
|
||||
self.test_runner.run_verify_initial_cluster_data()
|
||||
|
||||
@test(depends_on=[cluster_create])
|
||||
def cluster_root_enable(self):
|
||||
"""Root Enable."""
|
||||
self.test_runner.run_cluster_root_enable()
|
||||
|
||||
@test(depends_on=[cluster_root_enable])
|
||||
def verify_cluster_root_enable(self):
|
||||
"""Verify Root Enable."""
|
||||
self.test_runner.run_verify_cluster_root_enable()
|
||||
|
||||
@test(depends_on=[cluster_create],
|
||||
runs_after=[verify_initial_cluster_data])
|
||||
runs_after=[verify_initial_cluster_data, verify_cluster_root_enable])
|
||||
def cluster_grow(self):
|
||||
"""Grow cluster."""
|
||||
self.test_runner.run_cluster_grow()
|
||||
|
||||
@test(depends_on=[cluster_grow])
|
||||
def verify_cluster_root_enable_after_grow(self):
|
||||
"""Verify Root Enabled after grow."""
|
||||
self.test_runner.run_verify_cluster_root_enable()
|
||||
|
||||
@test(depends_on=[cluster_grow, add_initial_cluster_data])
|
||||
def verify_initial_cluster_data_after_grow(self):
|
||||
"""Verify the initial data still exists after cluster grow."""
|
||||
@ -72,11 +87,17 @@ class ClusterActionsGroup(TestGroup):
|
||||
self.test_runner.run_remove_extra_cluster_data()
|
||||
|
||||
@test(depends_on=[cluster_create],
|
||||
runs_after=[remove_extra_cluster_data_after_grow])
|
||||
runs_after=[remove_extra_cluster_data_after_grow,
|
||||
verify_cluster_root_enable_after_grow])
|
||||
def cluster_shrink(self):
|
||||
"""Shrink cluster."""
|
||||
self.test_runner.run_cluster_shrink()
|
||||
|
||||
@test(depends_on=[cluster_shrink])
|
||||
def verify_cluster_root_enable_after_shrink(self):
|
||||
"""Verify Root Enable after shrink."""
|
||||
self.test_runner.run_verify_cluster_root_enable()
|
||||
|
||||
@test(depends_on=[cluster_shrink, add_initial_cluster_data])
|
||||
def verify_initial_cluster_data_after_shrink(self):
|
||||
"""Verify the initial data still exists after cluster shrink."""
|
||||
@ -106,7 +127,8 @@ class ClusterActionsGroup(TestGroup):
|
||||
self.test_runner.run_remove_initial_cluster_data()
|
||||
|
||||
@test(depends_on=[cluster_create],
|
||||
runs_after=[remove_initial_cluster_data])
|
||||
runs_after=[remove_initial_cluster_data,
|
||||
verify_cluster_root_enable_after_shrink])
|
||||
def cluster_delete(self):
|
||||
"""Delete an existing cluster."""
|
||||
self.test_runner.run_cluster_delete()
|
||||
|
@ -101,3 +101,9 @@ class RootActionsGroup(TestGroup):
|
||||
"""Check the root is also enabled on the restored instance."""
|
||||
instance_id = self.backup_runner.restore_instance_id
|
||||
self.test_runner.run_check_root_enabled_after_restore(instance_id)
|
||||
|
||||
@test(depends_on=[wait_for_restored_instance],
|
||||
runs_after=[check_root_enabled_after_restore])
|
||||
def delete_restored_instance(self):
|
||||
"""Delete root restored instances."""
|
||||
self.backup_runner.run_delete_restored_instance()
|
||||
|
@ -24,6 +24,9 @@ class MysqlHelper(SqlHelper):
|
||||
def get_helper_credentials(self):
|
||||
return {'name': 'lite', 'password': 'litepass', 'database': 'firstdb'}
|
||||
|
||||
def get_helper_credentials_root(self):
|
||||
return {'name': 'root', 'password': 'rootpass'}
|
||||
|
||||
def get_valid_database_definitions(self):
|
||||
return [{'name': 'db1', 'character_set': 'latin2',
|
||||
'collate': 'latin2_general_ci'},
|
||||
|
@ -33,23 +33,31 @@ class SqlHelper(TestHelper):
|
||||
self.protocol = protocol
|
||||
self.port = port
|
||||
self.credentials = self.get_helper_credentials()
|
||||
self.credentials_root = self.get_helper_credentials_root()
|
||||
self.test_schema = self.credentials['database']
|
||||
|
||||
self._schema_metadata = MetaData()
|
||||
self._data_cache = dict()
|
||||
|
||||
def create_client(self, host, *args, **kwargs):
|
||||
return sqlalchemy.create_engine(self._get_connection_string(host))
|
||||
username = kwargs.get("username")
|
||||
password = kwargs.get("password")
|
||||
if username and password:
|
||||
creds = {"name": username, "password": password}
|
||||
return sqlalchemy.create_engine(
|
||||
self._build_connection_string(host, creds))
|
||||
return sqlalchemy.create_engine(
|
||||
self._build_connection_string(host, self.credentials))
|
||||
|
||||
def _get_connection_string(self, host):
|
||||
def _build_connection_string(self, host, creds):
|
||||
if self.port:
|
||||
host = "%s:%d" % (host, self.port)
|
||||
|
||||
credentials = {'protocol': self.protocol,
|
||||
'host': host,
|
||||
'user': self.credentials.get('name', ''),
|
||||
'password': self.credentials.get('password', ''),
|
||||
'database': self.credentials.get('database', '')}
|
||||
'user': creds.get('name', ''),
|
||||
'password': creds.get('password', ''),
|
||||
'database': creds.get('database', '')}
|
||||
return ('%(protocol)s://%(user)s:%(password)s@%(host)s/%(database)s'
|
||||
% credentials)
|
||||
|
||||
@ -123,3 +131,7 @@ class SqlHelper(TestHelper):
|
||||
def _select_data_rows(self, client, schema_name, table_name):
|
||||
data_table = self._get_schema_table(schema_name, table_name)
|
||||
return client.execute(data_table.select()).fetchall()
|
||||
|
||||
def ping(self, host, *args, **kwargs):
|
||||
root_client = self.get_client(host, *args, **kwargs)
|
||||
root_client.execute("SELECT 1;")
|
||||
|
@ -158,6 +158,12 @@ class TestHelper(object):
|
||||
"""
|
||||
return {'name': None, 'password': None, 'database': None}
|
||||
|
||||
def get_helper_credentials_root(self):
|
||||
"""Return the credentials that the client will be using to
|
||||
access the database as root.
|
||||
"""
|
||||
return {'name': None, 'password': None}
|
||||
|
||||
##############
|
||||
# Data related
|
||||
##############
|
||||
|
@ -37,6 +37,7 @@ class ClusterActionsRunner(TestRunner):
|
||||
super(ClusterActionsRunner, self).__init__()
|
||||
|
||||
self.cluster_id = 0
|
||||
self.current_root_creds = None
|
||||
|
||||
@property
|
||||
def is_using_existing_cluster(self):
|
||||
@ -101,6 +102,32 @@ class ClusterActionsRunner(TestRunner):
|
||||
|
||||
return None
|
||||
|
||||
def run_cluster_root_enable(self, expected_task_name=None,
|
||||
expected_http_code=200):
|
||||
root_credentials = self.test_helper.get_helper_credentials_root()
|
||||
self.current_root_creds = self.auth_client.root.create_cluster_root(
|
||||
self.cluster_id, root_credentials['password'])
|
||||
self.assert_equal(root_credentials['name'],
|
||||
self.current_root_creds[0])
|
||||
self.assert_equal(root_credentials['password'],
|
||||
self.current_root_creds[1])
|
||||
self._assert_cluster_action(self.cluster_id, expected_task_name,
|
||||
expected_http_code)
|
||||
|
||||
def run_verify_cluster_root_enable(self):
|
||||
if not self.current_root_creds:
|
||||
raise SkipTest("Root not enabled.")
|
||||
cluster = self.auth_client.clusters.get(self.cluster_id)
|
||||
for instance in cluster.instances:
|
||||
root_enabled_test = self.auth_client.root.is_instance_root_enabled(
|
||||
instance['id'])
|
||||
self.assert_true(root_enabled_test.rootEnabled)
|
||||
self.test_helper.ping(
|
||||
cluster.ip[0],
|
||||
username=self.current_root_creds[0],
|
||||
password=self.current_root_creds[1]
|
||||
)
|
||||
|
||||
def run_add_initial_cluster_data(self, data_type=DataType.tiny):
|
||||
self.assert_add_cluster_data(data_type, self.cluster_id)
|
||||
|
||||
|
@ -140,7 +140,7 @@ class InstanceCreateRunner(TestRunner):
|
||||
|
||||
# Here we add helper user/database if any.
|
||||
if create_helper_user:
|
||||
helper_db_def, helper_user_def = self.build_helper_defs()
|
||||
helper_db_def, helper_user_def, root_def = self.build_helper_defs()
|
||||
if helper_db_def:
|
||||
self.report.log(
|
||||
"Appending a helper database '%s' to the instance "
|
||||
|
@ -166,3 +166,15 @@ class CouchbaseRootActionsRunner(RootActionsRunner):
|
||||
|
||||
def run_disable_root(self):
|
||||
raise SkipTest("Operation is currently not supported.")
|
||||
|
||||
|
||||
class PxcRootActionsRunner(RootActionsRunner):
|
||||
|
||||
def run_disable_root_before_enabled(self):
|
||||
raise SkipTest("Operation is currently not supported.")
|
||||
|
||||
def run_disable_root(self):
|
||||
raise SkipTest("Operation is currently not supported.")
|
||||
|
||||
def check_root_still_enabled_after_disable(self):
|
||||
raise SkipTest("Operation is currently not supported.")
|
||||
|
@ -399,7 +399,7 @@ class TestRunner(object):
|
||||
These are for internal use by the test framework and should
|
||||
not be changed by individual test-cases.
|
||||
"""
|
||||
database_def, user_def = self.build_helper_defs()
|
||||
database_def, user_def, root_def = self.build_helper_defs()
|
||||
if database_def:
|
||||
self.report.log(
|
||||
"Creating a helper database '%s' on instance: %s"
|
||||
@ -412,22 +412,33 @@ class TestRunner(object):
|
||||
% (user_def['name'], user_def['password'], instance_id))
|
||||
self.auth_client.users.create(instance_id, [user_def])
|
||||
|
||||
if root_def:
|
||||
# Not enabling root on a single instance of the cluster here
|
||||
# because we want to test the cluster root enable instead.
|
||||
pass
|
||||
|
||||
def build_helper_defs(self):
|
||||
"""Build helper database and user JSON definitions if credentials
|
||||
are defined by the helper.
|
||||
"""
|
||||
database_def = None
|
||||
user_def = None
|
||||
|
||||
def _get_credentials(creds):
|
||||
if creds:
|
||||
username = creds.get('name')
|
||||
if username:
|
||||
password = creds.get('password', '')
|
||||
return {'name': username, 'password': password,
|
||||
'databases': [{'name': database}]}
|
||||
return None
|
||||
|
||||
credentials = self.test_helper.get_helper_credentials()
|
||||
if credentials:
|
||||
database = credentials.get('database')
|
||||
if database:
|
||||
database_def = {'name': database}
|
||||
credentials_root = self.test_helper.get_helper_credentials_root()
|
||||
|
||||
username = credentials.get('name')
|
||||
if username:
|
||||
password = credentials.get('password', '')
|
||||
user_def = {'name': username, 'password': password,
|
||||
'databases': [{'name': database}]}
|
||||
|
||||
return database_def, user_def
|
||||
return (database_def,
|
||||
_get_credentials(credentials),
|
||||
_get_credentials(credentials_root))
|
||||
|
310
trove/tests/unittests/common/test_common_extensions.py
Normal file
310
trove/tests/unittests/common/test_common_extensions.py
Normal file
@ -0,0 +1,310 @@
|
||||
# Copyright 2013 Hewlett-Packard Development Company, L.P.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
from mock import Mock
|
||||
from mock import patch
|
||||
from oslo_config.cfg import NoSuchOptError
|
||||
|
||||
from trove.common import exception
|
||||
from trove.common import utils
|
||||
from trove.extensions.common import models
|
||||
from trove.extensions.common.service import ClusterRootController
|
||||
from trove.extensions.common.service import DefaultRootController
|
||||
from trove.extensions.common.service import RootController
|
||||
from trove.instance.models import DBInstance
|
||||
from trove.tests.unittests import trove_testtools
|
||||
|
||||
|
||||
class TestDefaultRootController(trove_testtools.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(TestDefaultRootController, self).setUp()
|
||||
self.controller = DefaultRootController()
|
||||
|
||||
@patch.object(models.Root, "load")
|
||||
def test_root_index(self, root_load):
|
||||
context = Mock()
|
||||
req = Mock()
|
||||
req.environ = Mock()
|
||||
req.environ.__getitem__ = Mock(return_value=context)
|
||||
tenant_id = Mock()
|
||||
uuid = utils.generate_uuid()
|
||||
is_cluster = False
|
||||
self.controller.root_index(req, tenant_id, uuid, is_cluster)
|
||||
root_load.assert_called_with(context, uuid)
|
||||
|
||||
def test_root_index_with_cluster(self):
|
||||
req = Mock()
|
||||
tenant_id = Mock()
|
||||
uuid = utils.generate_uuid()
|
||||
is_cluster = True
|
||||
self.assertRaises(
|
||||
exception.ClusterOperationNotSupported,
|
||||
self.controller.root_index,
|
||||
req, tenant_id, uuid, is_cluster)
|
||||
|
||||
@patch.object(models.Root, "create")
|
||||
def test_root_create(self, root_create):
|
||||
user = Mock()
|
||||
context = Mock()
|
||||
context.user = Mock()
|
||||
context.user.__getitem__ = Mock(return_value=user)
|
||||
req = Mock()
|
||||
req.environ = Mock()
|
||||
req.environ.__getitem__ = Mock(return_value=context)
|
||||
tenant_id = Mock()
|
||||
uuid = utils.generate_uuid()
|
||||
is_cluster = False
|
||||
password = Mock()
|
||||
body = {'password': password}
|
||||
self.controller.root_create(req, body, tenant_id, uuid, is_cluster)
|
||||
root_create.assert_called_with(context, uuid, context.user, password)
|
||||
|
||||
def test_root_create_with_cluster(self):
|
||||
req = Mock()
|
||||
tenant_id = Mock()
|
||||
uuid = utils.generate_uuid()
|
||||
is_cluster = True
|
||||
password = Mock()
|
||||
body = {'password': password}
|
||||
self.assertRaises(
|
||||
exception.ClusterOperationNotSupported,
|
||||
self.controller.root_create,
|
||||
req, body, tenant_id, uuid, is_cluster)
|
||||
|
||||
|
||||
class TestRootController(trove_testtools.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(TestRootController, self).setUp()
|
||||
self.controller = RootController()
|
||||
|
||||
@patch.object(RootController, "load_root_controller")
|
||||
@patch.object(RootController, "_get_datastore")
|
||||
def test_index(self, service_get_datastore, service_load_root_controller):
|
||||
req = Mock()
|
||||
tenant_id = Mock()
|
||||
uuid = utils.generate_uuid()
|
||||
ds_manager = Mock()
|
||||
is_cluster = Mock()
|
||||
service_get_datastore.return_value = (ds_manager, is_cluster)
|
||||
root_controller = Mock()
|
||||
ret = Mock()
|
||||
root_controller.root_index = Mock(return_value=ret)
|
||||
service_load_root_controller.return_value = root_controller
|
||||
|
||||
self.assertTrue(ret, self.controller.index(req, tenant_id, uuid))
|
||||
service_get_datastore.assert_called_with(tenant_id, uuid)
|
||||
service_load_root_controller.assert_called_with(ds_manager)
|
||||
root_controller.root_index.assert_called_with(
|
||||
req, tenant_id, uuid, is_cluster)
|
||||
|
||||
@patch.object(RootController, "load_root_controller")
|
||||
@patch.object(RootController, "_get_datastore")
|
||||
def test_create(self, service_get_datastore, service_load_root_controller):
|
||||
req = Mock()
|
||||
body = Mock()
|
||||
tenant_id = Mock()
|
||||
uuid = utils.generate_uuid()
|
||||
ds_manager = Mock()
|
||||
is_cluster = Mock()
|
||||
service_get_datastore.return_value = (ds_manager, is_cluster)
|
||||
root_controller = Mock()
|
||||
ret = Mock()
|
||||
root_controller.root_create = Mock(return_value=ret)
|
||||
service_load_root_controller.return_value = root_controller
|
||||
|
||||
self.assertTrue(
|
||||
ret, self.controller.create(req, tenant_id, uuid, body=body))
|
||||
service_get_datastore.assert_called_with(tenant_id, uuid)
|
||||
service_load_root_controller.assert_called_with(ds_manager)
|
||||
root_controller.root_create.assert_called_with(
|
||||
req, body, tenant_id, uuid, is_cluster)
|
||||
|
||||
@patch.object(RootController, "load_root_controller")
|
||||
@patch.object(RootController, "_get_datastore")
|
||||
def test_create_with_no_root_controller(self,
|
||||
service_get_datastore,
|
||||
service_load_root_controller):
|
||||
req = Mock()
|
||||
body = Mock()
|
||||
tenant_id = Mock()
|
||||
uuid = utils.generate_uuid()
|
||||
ds_manager = Mock()
|
||||
is_cluster = Mock()
|
||||
service_get_datastore.return_value = (ds_manager, is_cluster)
|
||||
service_load_root_controller.return_value = None
|
||||
|
||||
self.assertRaises(
|
||||
NoSuchOptError,
|
||||
self.controller.create,
|
||||
req, tenant_id, uuid, body=body)
|
||||
service_get_datastore.assert_called_with(tenant_id, uuid)
|
||||
service_load_root_controller.assert_called_with(ds_manager)
|
||||
|
||||
|
||||
class TestClusterRootController(trove_testtools.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(TestClusterRootController, self).setUp()
|
||||
self.controller = ClusterRootController()
|
||||
|
||||
@patch.object(ClusterRootController, "cluster_root_index")
|
||||
def test_root_index_cluster(self, mock_cluster_root_index):
|
||||
req = Mock()
|
||||
tenant_id = Mock()
|
||||
uuid = utils.generate_uuid()
|
||||
is_cluster = True
|
||||
self.controller.root_index(req, tenant_id, uuid, is_cluster)
|
||||
mock_cluster_root_index.assert_called_with(req, tenant_id, uuid)
|
||||
|
||||
@patch.object(ClusterRootController, "instance_root_index")
|
||||
def test_root_index_instance(self, mock_instance_root_index):
|
||||
req = Mock()
|
||||
tenant_id = Mock()
|
||||
uuid = utils.generate_uuid()
|
||||
is_cluster = False
|
||||
self.controller.root_index(req, tenant_id, uuid, is_cluster)
|
||||
mock_instance_root_index.assert_called_with(req, tenant_id, uuid)
|
||||
|
||||
@patch.object(ClusterRootController, "cluster_root_create")
|
||||
def test_root_create_cluster(self, mock_cluster_root_create):
|
||||
req = Mock()
|
||||
body = Mock()
|
||||
tenant_id = Mock()
|
||||
uuid = utils.generate_uuid()
|
||||
is_cluster = True
|
||||
self.controller.root_create(req, body, tenant_id, uuid, is_cluster)
|
||||
mock_cluster_root_create.assert_called_with(req, body, tenant_id, uuid)
|
||||
|
||||
@patch.object(ClusterRootController, "instance_root_create")
|
||||
def test_root_create_instance(self, mock_instance_root_create):
|
||||
req = Mock()
|
||||
body = Mock()
|
||||
tenant_id = Mock()
|
||||
uuid = utils.generate_uuid()
|
||||
is_cluster = False
|
||||
self.controller.root_create(req, body, tenant_id, uuid, is_cluster)
|
||||
mock_instance_root_create.assert_called_with(req, body, uuid)
|
||||
|
||||
@patch.object(models.ClusterRoot, "load")
|
||||
def test_instance_root_index(self, mock_cluster_root_load):
|
||||
context = Mock()
|
||||
req = Mock()
|
||||
req.environ = Mock()
|
||||
req.environ.__getitem__ = Mock(return_value=context)
|
||||
tenant_id = Mock()
|
||||
instance_id = utils.generate_uuid()
|
||||
self.controller.instance_root_index(req, tenant_id, instance_id)
|
||||
mock_cluster_root_load.assert_called_with(context, instance_id)
|
||||
|
||||
@patch.object(models.ClusterRoot, "load",
|
||||
side_effect=exception.UnprocessableEntity())
|
||||
def test_instance_root_index_exception(self, mock_cluster_root_load):
|
||||
context = Mock()
|
||||
req = Mock()
|
||||
req.environ = Mock()
|
||||
req.environ.__getitem__ = Mock(return_value=context)
|
||||
tenant_id = Mock()
|
||||
instance_id = utils.generate_uuid()
|
||||
self.assertRaises(
|
||||
exception.UnprocessableEntity,
|
||||
self.controller.instance_root_index,
|
||||
req, tenant_id, instance_id
|
||||
)
|
||||
mock_cluster_root_load.assert_called_with(context, instance_id)
|
||||
|
||||
@patch.object(ClusterRootController, "instance_root_index")
|
||||
@patch.object(ClusterRootController, "_get_cluster_instance_id")
|
||||
def test_cluster_root_index(self, mock_get_cluster_instance,
|
||||
mock_instance_root_index):
|
||||
req = Mock()
|
||||
tenant_id = Mock()
|
||||
cluster_id = utils.generate_uuid()
|
||||
single_instance_id = Mock()
|
||||
mock_get_cluster_instance.return_value = (single_instance_id, Mock())
|
||||
self.controller.cluster_root_index(req, tenant_id, cluster_id)
|
||||
mock_get_cluster_instance.assert_called_with(tenant_id, cluster_id)
|
||||
mock_instance_root_index.assert_called_with(req, tenant_id,
|
||||
single_instance_id)
|
||||
|
||||
@patch.object(ClusterRootController, "instance_root_create")
|
||||
@patch.object(ClusterRootController, "_get_cluster_instance_id")
|
||||
def test_cluster_root_create(self, mock_get_cluster_instance,
|
||||
mock_instance_root_create):
|
||||
req = Mock()
|
||||
body = Mock()
|
||||
tenant_id = Mock()
|
||||
cluster_id = utils.generate_uuid()
|
||||
single_instance_id = Mock()
|
||||
cluster_instances = Mock()
|
||||
mock_get_cluster_instance.return_value = (single_instance_id,
|
||||
cluster_instances)
|
||||
self.controller.cluster_root_create(req, body, tenant_id, cluster_id)
|
||||
mock_get_cluster_instance.assert_called_with(tenant_id, cluster_id)
|
||||
mock_instance_root_create.assert_called_with(req, body,
|
||||
single_instance_id,
|
||||
cluster_instances)
|
||||
|
||||
@patch.object(DBInstance, "find_all")
|
||||
def test_get_cluster_instance_id(self, mock_find_all):
|
||||
tenant_id = Mock()
|
||||
cluster_id = Mock()
|
||||
db_inst_1 = Mock()
|
||||
db_inst_1.id.return_value = utils.generate_uuid()
|
||||
db_inst_2 = Mock()
|
||||
db_inst_2.id.return_value = utils.generate_uuid()
|
||||
cluster_instances = [db_inst_1, db_inst_2]
|
||||
mock_find_all.return_value.all.return_value = cluster_instances
|
||||
ret = self.controller._get_cluster_instance_id(tenant_id, cluster_id)
|
||||
self.assertTrue(db_inst_1.id, ret[0])
|
||||
self.assertTrue(cluster_instances, ret[1])
|
||||
|
||||
@patch.object(models.ClusterRoot, "create")
|
||||
def test_instance_root_create(self, mock_cluster_root_create):
|
||||
user = Mock()
|
||||
context = Mock()
|
||||
context.user = Mock()
|
||||
context.user.__getitem__ = Mock(return_value=user)
|
||||
req = Mock()
|
||||
req.environ = Mock()
|
||||
req.environ.__getitem__ = Mock(return_value=context)
|
||||
password = Mock()
|
||||
body = {'password': password}
|
||||
instance_id = utils.generate_uuid()
|
||||
cluster_instances = Mock()
|
||||
self.controller.instance_root_create(
|
||||
req, body, instance_id, cluster_instances)
|
||||
mock_cluster_root_create.assert_called_with(
|
||||
context, instance_id, context.user, password, cluster_instances)
|
||||
|
||||
@patch.object(models.ClusterRoot, "create")
|
||||
def test_instance_root_create_no_body(self, mock_cluster_root_create):
|
||||
user = Mock()
|
||||
context = Mock()
|
||||
context.user = Mock()
|
||||
context.user.__getitem__ = Mock(return_value=user)
|
||||
req = Mock()
|
||||
req.environ = Mock()
|
||||
req.environ.__getitem__ = Mock(return_value=context)
|
||||
password = None
|
||||
body = None
|
||||
instance_id = utils.generate_uuid()
|
||||
cluster_instances = Mock()
|
||||
self.controller.instance_root_create(
|
||||
req, body, instance_id, cluster_instances)
|
||||
mock_cluster_root_create.assert_called_with(
|
||||
context, instance_id, context.user, password, cluster_instances)
|
@ -12,12 +12,13 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from mock import MagicMock
|
||||
from mock import Mock
|
||||
from mock import patch
|
||||
|
||||
from trove.common.context import TroveContext
|
||||
from trove.guestagent.datastore.experimental.pxc.manager import Manager
|
||||
import trove.guestagent.datastore.experimental.pxc.service as dbaas
|
||||
import trove.guestagent.datastore.mysql_common.service as mysql_common
|
||||
from trove.tests.unittests import trove_testtools
|
||||
|
||||
|
||||
@ -32,7 +33,7 @@ class GuestAgentManagerTest(trove_testtools.TestCase):
|
||||
self.mock_rs_class = self.patcher_rs.start()
|
||||
|
||||
status_patcher = patch.object(dbaas.PXCAppStatus, 'get',
|
||||
return_value=MagicMock())
|
||||
return_value=Mock())
|
||||
self.addCleanup(status_patcher.stop)
|
||||
self.status_get_mock = status_patcher.start()
|
||||
|
||||
@ -72,3 +73,9 @@ class GuestAgentManagerTest(trove_testtools.TestCase):
|
||||
self.context, cluster_configuration)
|
||||
self.status_get_mock.assert_any_call()
|
||||
conf_overries.assert_called_with(cluster_configuration)
|
||||
|
||||
@patch.object(mysql_common.BaseMySqlAdmin, 'enable_root')
|
||||
def test_enable_root_with_password(self, reset_admin_pwd):
|
||||
admin_password = "password"
|
||||
self.manager.enable_root_with_password(self.context, admin_password)
|
||||
reset_admin_pwd.assert_called_with(admin_password)
|
||||
|
@ -18,7 +18,7 @@ import uuid
|
||||
|
||||
from cinderclient import exceptions as cinder_exceptions
|
||||
import cinderclient.v2.client as cinderclient
|
||||
from mock import Mock, MagicMock, patch, PropertyMock
|
||||
from mock import Mock, MagicMock, patch, PropertyMock, call
|
||||
from novaclient import exceptions as nova_exceptions
|
||||
import novaclient.v2.flavors
|
||||
import novaclient.v2.servers
|
||||
@ -40,6 +40,7 @@ import trove.common.template as template
|
||||
from trove.common import utils
|
||||
from trove.datastore import models as datastore_models
|
||||
import trove.db.models
|
||||
from trove.extensions.common import models as common_models
|
||||
from trove.extensions.mysql import models as mysql_models
|
||||
import trove.guestagent.api
|
||||
from trove.instance.models import BaseInstance
|
||||
@ -1028,3 +1029,25 @@ class RootReportTest(trove_testtools.TestCase):
|
||||
self.assertTrue(mysql_models.RootHistory.load.called)
|
||||
self.assertEqual(history.user, report.user)
|
||||
self.assertEqual(history.id, report.id)
|
||||
|
||||
|
||||
class ClusterRootTest(trove_testtools.TestCase):
|
||||
|
||||
@patch.object(common_models.RootHistory, "create")
|
||||
@patch.object(common_models.Root, "create")
|
||||
def test_cluster_root_create(self, root_create, root_history_create):
|
||||
context = Mock()
|
||||
uuid = utils.generate_uuid()
|
||||
user = "root"
|
||||
password = "rootpassword"
|
||||
cluster_instances = [utils.generate_uuid(), utils.generate_uuid()]
|
||||
common_models.ClusterRoot.create(context, uuid, user, password,
|
||||
cluster_instances)
|
||||
root_create.assert_called_with(context, uuid, user, password,
|
||||
cluster_instances_list=None)
|
||||
self.assertEqual(2, root_history_create.call_count)
|
||||
calls = [
|
||||
call(context, cluster_instances[0], user),
|
||||
call(context, cluster_instances[1], user)
|
||||
]
|
||||
root_history_create.assert_has_calls(calls)
|
||||
|
@ -169,6 +169,7 @@ class PXCClusterTasksTest(trove_testtools.TestCase):
|
||||
'1234',
|
||||
status=InstanceTasks.GROWING_ERROR)
|
||||
|
||||
@patch.object(ClusterTasks, '_check_cluster_for_root')
|
||||
@patch.object(ClusterTasks, 'reset_task')
|
||||
@patch.object(ClusterTasks, '_render_cluster_config')
|
||||
@patch.object(ClusterTasks, 'get_ip')
|
||||
@ -180,7 +181,8 @@ class PXCClusterTasksTest(trove_testtools.TestCase):
|
||||
@patch.object(datastore_models.DatastoreVersion, 'load_by_uuid')
|
||||
def test_grow_cluster_successs(self, mock_dv, mock_ds, mock_find_all,
|
||||
mock_load, mock_ready, mock_guest, mock_ip,
|
||||
mock_render, mock_reset_task):
|
||||
mock_render, mock_reset_task,
|
||||
mock_check_root):
|
||||
mock_find_all.return_value.all.return_value = [self.dbinst1]
|
||||
|
||||
mock_ip.return_value = "10.0.0.2"
|
||||
|
Loading…
Reference in New Issue
Block a user