Percona Xtradb Cluster implementation

Created the clustering implementation for Percona XtraDB Cluster
Server. Used the existing framework for adding a new cluster strategy
for percona. Created guestagent, api, and taskmanager calls to setup
the cluster.

Ran into a weird issue where creating the admin user was not being
saved even though there was no error logged. I believe this was
happening because mysql was technically up from the mysqladmin ping
cmd but was not processing the grant call yet. I've heard that to be
sure you should run a 'select 1;' call to verify that mysql is really
ready after starting it. I added a method to verify this.

PXC needs a few more ports opened up for the group comm, state
transfer, and incremental state transfer. Added the percona
cluster.cnf template file for the setting needed in PXC. When a
joining instance first joins the cluster it syncs all the data using
xtrabackup, this will cause the joining instance to lose its admin
user os_admin. So we reset the admin to a new password across the
board.

Make Percona Xtradb Cluster a different datastore because there are
muliple version of PXC. This has been tested with pxc 5.5 from the
ubuntu repository.

Adding coverage xml output in tox.ini

DocImpact
Adding a new datastore percona-xtradb-cluster(pxc) that should not
have any special cases for create and delete cluster.

Implements: blueprint support-pxc-56
Change-Id: I239433f0491cea15b214c41ceecc3a5e72b5bbeb
This commit is contained in:
Craig Vyvial 2015-07-10 16:43:17 -05:00
parent 4987f4af78
commit a94987f8ba
32 changed files with 2583 additions and 38 deletions

View File

@ -33,6 +33,7 @@ commands =
python setup.py testr --coverage
coverage run -a run_tests.py
coverage html
coverage xml
coverage report

View File

@ -314,6 +314,7 @@ common_opts = [
cfg.DictOpt('notification_service_id',
default={'mysql': '2f3ff068-2bfb-4f70-9a9d-a6bb65bc084b',
'percona': 'fd1723f5-68d2-409c-994f-a4a197892a17',
'pxc': '75a628c3-f81b-4ffb-b10a-4087c26bc854',
'redis': 'b216ffc5-1947-456c-a4cf-70f94c05f7d0',
'cassandra': '459a230d-4e97-4344-9067-2a54a310b0ed',
'couchbase': 'fa62fe68-74d9-4779-a24e-36f19602c415',
@ -379,15 +380,18 @@ common_opts = [
help="Describes the actual network manager used for "
"the management of network attributes "
"(security groups, floating IPs, etc.)."),
cfg.IntOpt('usage_timeout', default=600,
cfg.IntOpt('usage_timeout', default=900,
help='Maximum time (in seconds) to wait for a Guest to become '
'active.'),
cfg.IntOpt('restore_usage_timeout', default=36000,
help='Maximum time (in seconds) to wait for a Guest instance '
'restored from a backup to become active.'),
cfg.IntOpt('cluster_usage_timeout', default=675,
cfg.IntOpt('cluster_usage_timeout', default=36000,
help='Maximum time (in seconds) to wait for a cluster to '
'become active.'),
cfg.IntOpt('timeout_wait_for_service', default=120,
help='Maximum time (in seconds) to wait for a service to '
'become alive.'),
]
# Profiling specific option groups
@ -541,6 +545,77 @@ percona_opts = [
deprecated_group='DEFAULT'),
]
# Percona XtraDB Cluster
pxc_group = cfg.OptGroup(
'pxc', title='Percona XtraDB Cluster options',
help="Oslo option group designed for Percona XtraDB Cluster datastore")
pxc_opts = [
cfg.ListOpt('tcp_ports', default=["3306", "4444", "4567", "4568"],
help='List of TCP ports and/or port ranges to open '
'in the security group (only applicable '
'if trove_security_groups_support is True).'),
cfg.ListOpt('udp_ports', default=[],
help='List of UDP ports and/or port ranges to open '
'in the security group (only applicable '
'if trove_security_groups_support is True).'),
cfg.StrOpt('backup_strategy', default='InnoBackupEx',
help='Default strategy to perform backups.'),
cfg.StrOpt('replication_strategy', default='MysqlGTIDReplication',
help='Default strategy for replication.'),
cfg.StrOpt('replication_namespace',
default='trove.guestagent.strategies.replication.mysql_gtid',
help='Namespace to load replication strategies from.'),
cfg.StrOpt('replication_user', default='slave_user',
help='Userid for replication slave.'),
cfg.StrOpt('mount_point', default='/var/lib/mysql',
help="Filesystem path for mounting "
"volumes if volume support is enabled."),
cfg.BoolOpt('root_on_create', default=False,
help='Enable the automatic creation of the root user for the '
'service during instance-create. The generated password for '
'the root user is immediately returned in the response of '
"instance-create as the 'password' field."),
cfg.IntOpt('usage_timeout', default=450,
help='Maximum time (in seconds) to wait for a Guest to become '
'active.'),
cfg.StrOpt('backup_namespace',
default='trove.guestagent.strategies.backup.mysql_impl',
help='Namespace to load backup strategies from.'),
cfg.StrOpt('restore_namespace',
default='trove.guestagent.strategies.restore.mysql_impl',
help='Namespace to load restore strategies from.'),
cfg.BoolOpt('volume_support', default=True,
help='Whether to provision a Cinder volume for datadir.'),
cfg.StrOpt('device_path', default='/dev/vdb',
help='Device path for volume if volume support is enabled.'),
cfg.DictOpt('backup_incremental_strategy',
default={'InnoBackupEx': 'InnoBackupExIncremental'},
help='Incremental Backup Runner based on the default '
'strategy. For strategies that do not implement an '
'incremental backup, the runner will use the default full '
'backup.'),
cfg.ListOpt('ignore_users', default=['os_admin', 'root', 'clusterrepuser'],
help='Users to exclude when listing users.'),
cfg.BoolOpt('cluster_support', default=True,
help='Enable clusters to be created and managed.'),
cfg.IntOpt('cluster_member_count', default=3,
help='Number of members in PXC cluster.'),
cfg.StrOpt('api_strategy',
default='trove.common.strategies.cluster.experimental.'
'pxc.api.PXCAPIStrategy',
help='Class that implements datastore-specific API logic.'),
cfg.StrOpt('taskmanager_strategy',
default='trove.common.strategies.cluster.experimental.pxc.'
'taskmanager.PXCTaskManagerStrategy',
help='Class that implements datastore-specific task manager '
'logic.'),
cfg.StrOpt('guestagent_strategy',
default='trove.common.strategies.cluster.experimental.'
'pxc.guestagent.PXCGuestAgentStrategy',
help='Class that implements datastore-specific Guest Agent API '
'logic.'),
]
# Redis
redis_group = cfg.OptGroup(
'redis', title='Redis options',
@ -1043,6 +1118,7 @@ CONF.register_opts(database_opts, 'database')
CONF.register_group(mysql_group)
CONF.register_group(percona_group)
CONF.register_group(pxc_group)
CONF.register_group(redis_group)
CONF.register_group(cassandra_group)
CONF.register_group(couchbase_group)
@ -1055,6 +1131,7 @@ CONF.register_group(mariadb_group)
CONF.register_opts(mysql_opts, mysql_group)
CONF.register_opts(percona_opts, percona_group)
CONF.register_opts(pxc_opts, pxc_group)
CONF.register_opts(redis_opts, redis_group)
CONF.register_opts(cassandra_opts, cassandra_group)
CONF.register_opts(couchbase_opts, couchbase_group)

View File

@ -0,0 +1,144 @@
# Copyright [2015] Hewlett-Packard Development Company, L.P.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from novaclient import exceptions as nova_exceptions
from oslo_log import log as logging
from trove.cluster import models
from trove.cluster.tasks import ClusterTasks
from trove.cluster.views import ClusterView
from trove.common import cfg
from trove.common import exception
from trove.common import remote
from trove.common.strategies.cluster import base
from trove.extensions.mgmt.clusters.views import MgmtClusterView
from trove.instance import models as inst_models
from trove.quota.quota import check_quotas
from trove.taskmanager import api as task_api
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
class PXCAPIStrategy(base.BaseAPIStrategy):
@property
def cluster_class(self):
return PXCCluster
@property
def cluster_controller_actions(self):
return {}
@property
def cluster_view_class(self):
return PXCClusterView
@property
def mgmt_cluster_view_class(self):
return PXCMgmtClusterView
class PXCCluster(models.Cluster):
@classmethod
def create(cls, context, name, datastore, datastore_version, instances):
LOG.debug("Initiating PXC cluster creation.")
pxc_conf = CONF.get(datastore_version.manager)
num_instances = len(instances)
# Matching number of instances with configured cluster_member_count
if num_instances != pxc_conf.cluster_member_count:
raise exception.ClusterNumInstancesNotSupported(
num_instances=pxc_conf.cluster_member_count)
# Checking flavors
flavor_ids = [instance['flavor_id'] for instance in instances]
if len(set(flavor_ids)) != 1:
raise exception.ClusterFlavorsNotEqual()
flavor_id = flavor_ids[0]
nova_client = remote.create_nova_client(context)
try:
flavor = nova_client.flavors.get(flavor_id)
except nova_exceptions.NotFound:
raise exception.FlavorNotFound(uuid=flavor_id)
deltas = {'instances': num_instances}
# Checking volumes
volume_sizes = [instance['volume_size'] for instance in instances
if instance.get('volume_size', None)]
volume_size = None
if pxc_conf.volume_support:
if len(volume_sizes) != num_instances:
raise exception.ClusterVolumeSizeRequired()
if len(set(volume_sizes)) != 1:
raise exception.ClusterVolumeSizesNotEqual()
volume_size = volume_sizes[0]
models.validate_volume_size(volume_size)
deltas['volumes'] = volume_size * num_instances
else:
if len(volume_sizes) > 0:
raise exception.VolumeNotSupported()
ephemeral_support = pxc_conf.device_path
if ephemeral_support and flavor.ephemeral == 0:
raise exception.LocalStorageNotSpecified(flavor=flavor_id)
check_quotas(context.tenant, deltas)
nics = [instance.get('nics', None) for instance in instances]
azs = [instance.get('availability_zone', None)
for instance in instances]
# Updating Cluster Task
db_info = models.DBCluster.create(
name=name, tenant_id=context.tenant,
datastore_version_id=datastore_version.id,
task_status=ClusterTasks.BUILDING_INITIAL)
member_config = {"id": db_info.id,
"instance_type": "member"}
# Creating member instances
for i in range(0, num_instances):
instance_name = "%s-member-%s" % (name, str(i + 1))
inst_models.Instance.create(context, instance_name,
flavor_id,
datastore_version.image_id,
[], [], datastore,
datastore_version,
volume_size, None,
nics=nics[i],
availability_zone=azs[i],
configuration_id=None,
cluster_config=member_config)
# Calling taskmanager to further proceed for cluster-configuration
task_api.load(context, datastore_version.manager).create_cluster(
db_info.id)
return PXCCluster(context, db_info, datastore, datastore_version)
class PXCClusterView(ClusterView):
def build_instances(self):
return self._build_instances(['member'], ['member'])
class PXCMgmtClusterView(MgmtClusterView):
def build_instances(self):
return self._build_instances(['member'], ['member'])

View File

@ -0,0 +1,53 @@
# Copyright [2015] Hewlett-Packard Development Company, L.P.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_log import log as logging
from trove.common import cfg
from trove.common.strategies.cluster import base
from trove.guestagent import api as guest_api
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
class PXCGuestAgentStrategy(base.BaseGuestAgentStrategy):
@property
def guest_client_class(self):
return PXCGuestAgentAPI
class PXCGuestAgentAPI(guest_api.API):
def install_cluster(self, replication_user, cluster_configuration,
bootstrap):
LOG.debug("Installing PXC cluster.")
self._call("install_cluster", CONF.cluster_usage_timeout,
self.version_cap,
replication_user=replication_user,
cluster_configuration=cluster_configuration,
bootstrap=bootstrap)
def reset_admin_password(self, admin_password):
"""Store this password on the instance as the admin password"""
self._call("reset_admin_password", CONF.cluster_usage_timeout,
self.version_cap,
admin_password=admin_password)
def cluster_complete(self):
"""Set the status that the cluster is build is complete"""
LOG.debug("Notifying cluster install completion.")
return self._call("cluster_complete", guest_api.AGENT_HIGH_TIMEOUT,
self.version_cap)

View File

@ -0,0 +1,145 @@
# Copyright [2015] Hewlett-Packard Development Company, L.P.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from eventlet.timeout import Timeout
from oslo_log import log as logging
from trove.common import cfg
from trove.common.i18n import _
from trove.common.remote import create_nova_client
from trove.common.strategies.cluster import base
from trove.common.template import ClusterConfigTemplate
from trove.common import utils
from trove.instance.models import DBInstance
from trove.instance.models import Instance
from trove.taskmanager import api as task_api
import trove.taskmanager.models as task_models
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
USAGE_SLEEP_TIME = CONF.usage_sleep_time # seconds.
class PXCTaskManagerStrategy(base.BaseTaskManagerStrategy):
@property
def task_manager_api_class(self):
return task_api.API
@property
def task_manager_cluster_tasks_class(self):
return PXCClusterTasks
class PXCClusterTasks(task_models.ClusterTasks):
CLUSTER_REPLICATION_USER = "clusterrepuser"
def _render_cluster_config(self, context, instance, cluster_ips,
cluster_name, replication_user):
client = create_nova_client(context)
flavor = client.flavors.get(instance.flavor_id)
instance_ip = self.get_ip(instance)
config = ClusterConfigTemplate(
self.datastore_version, flavor, instance.id)
replication_user_pass = "%(name)s:%(password)s" % replication_user
config_rendered = config.render(
replication_user_pass=replication_user_pass,
cluster_ips=cluster_ips,
cluster_name=cluster_name,
instance_ip=instance_ip,
instance_name=instance.name,
)
return config_rendered
def create_cluster(self, context, cluster_id):
LOG.debug("Begin create_cluster for id: %s." % cluster_id)
def _create_cluster():
# Fetch instances by cluster_id against instances table.
db_instances = DBInstance.find_all(cluster_id=cluster_id).all()
instance_ids = [db_instance.id for db_instance in db_instances]
LOG.debug("Waiting for instances to get to cluster-ready status.")
# Wait for cluster members to get to cluster-ready status.
if not self._all_instances_ready(instance_ids, cluster_id):
return
LOG.debug("All members ready, proceeding for cluster setup.")
instances = [Instance.load(context, instance_id) for instance_id
in instance_ids]
cluster_ips = [self.get_ip(instance) for instance in instances]
instance_guests = [self.get_guest(instance)
for instance in instances]
# Create replication user and password for synchronizing the
# PXC cluster
replication_user = {
"name": self.CLUSTER_REPLICATION_USER,
"password": utils.generate_random_password(),
}
# PXC cluster name must be unique and be shorter than a full
# uuid string so we remove the hyphens and chop it off. It was
# recommended to be 16 chars or less.
# (this is not currently documented on PXC docs)
cluster_name = utils.generate_uuid().replace("-", "")[:16]
LOG.debug("Configuring cluster configuration.")
try:
# Set the admin password for all the instances because the
# password in the my.cnf will be wrong after the joiner
# instances syncs with the donor instance.
admin_password = str(utils.generate_random_password())
for guest in instance_guests:
guest.reset_admin_password(admin_password)
bootstrap = True
for instance in instances:
guest = self.get_guest(instance)
# render the conf.d/cluster.cnf configuration
cluster_configuration = self._render_cluster_config(
context,
instance, ",".join(cluster_ips),
cluster_name,
replication_user)
# push the cluster config and bootstrap the first instance
guest.install_cluster(replication_user,
cluster_configuration,
bootstrap)
bootstrap = False
LOG.debug("Finalizing cluster configuration.")
for guest in instance_guests:
guest.cluster_complete()
except Exception:
LOG.exception(_("Error creating cluster."))
self.update_statuses_on_failure(cluster_id)
timeout = Timeout(CONF.cluster_usage_timeout)
try:
_create_cluster()
self.reset_task()
except Timeout as t:
if t is not timeout:
raise # not my timeout
LOG.exception(_("Timeout for building cluster."))
self.update_statuses_on_failure(cluster_id)
finally:
timeout.cancel()
LOG.debug("End create_cluster for id: %s." % cluster_id)

View File

@ -145,3 +145,7 @@ class ReplicaSourceConfigTemplate(SingleInstanceConfigTemplate):
class ReplicaConfigTemplate(SingleInstanceConfigTemplate):
template_name = "replica.config.template"
class ClusterConfigTemplate(SingleInstanceConfigTemplate):
template_name = "cluster.config.template"

View File

@ -278,8 +278,10 @@ def service_discovery(service_candidates):
"""
result = {}
for service in service_candidates:
result['service'] = service
# check upstart
if os.path.isfile("/etc/init/%s.conf" % service):
result['type'] = 'upstart'
# upstart returns error code when service already started/stopped
result['cmd_start'] = "sudo start %s || true" % service
result['cmd_stop'] = "sudo stop %s || true" % service
@ -290,6 +292,7 @@ def service_discovery(service_candidates):
break
# check sysvinit
if os.path.isfile("/etc/init.d/%s" % service):
result['type'] = 'sysvinit'
result['cmd_start'] = "sudo service %s start" % service
result['cmd_stop'] = "sudo service %s stop" % service
if os.path.isfile("/usr/sbin/update-rc.d"):
@ -306,6 +309,7 @@ def service_discovery(service_candidates):
# check systemd
service_path = "/lib/systemd/system/%s.service" % service
if os.path.isfile(service_path):
result['type'] = 'systemd'
result['cmd_start'] = "sudo systemctl start %s" % service
result['cmd_stop'] = "sudo systemctl stop %s" % service

View File

@ -0,0 +1,148 @@
# Copyright 2015 Tesora, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import os
from oslo_log import log as logging
from oslo_utils import importutils
from trove.common import cfg
from trove.common.i18n import _
from trove.common import instance as rd_instance
from trove.guestagent.common import operating_system
from trove.guestagent.datastore.mysql import manager_base
from trove.guestagent.datastore.mysql import service_base
from trove.guestagent.strategies.replication import get_replication_strategy
from trove.guestagent import volume
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
MANAGER = CONF.datastore_manager if CONF.datastore_manager else 'mysql'
REPLICATION_STRATEGY = CONF.get(MANAGER).replication_strategy
REPLICATION_NAMESPACE = CONF.get(MANAGER).replication_namespace
REPLICATION_STRATEGY_CLASS = get_replication_strategy(REPLICATION_STRATEGY,
REPLICATION_NAMESPACE)
MYSQL_APP = "trove.guestagent.datastore.experimental.pxc." \
"service.PXCApp"
MYSQL_APP_STATUS = "trove.guestagent.datastore.experimental.pxc." \
"service.PXCAppStatus"
MYSQL_ADMIN = "trove.guestagent.datastore.experimental.pxc." \
"service.PXCAdmin"
class Manager(manager_base.BaseMySqlManager):
def __init__(self):
mysql_app = importutils.import_class(MYSQL_APP)
mysql_app_status = importutils.import_class(MYSQL_APP_STATUS)
mysql_admin = importutils.import_class(MYSQL_ADMIN)
super(Manager, self).__init__(mysql_app, mysql_app_status,
mysql_admin, REPLICATION_STRATEGY,
REPLICATION_NAMESPACE,
REPLICATION_STRATEGY_CLASS, MANAGER)
def prepare(self, context, packages, databases, memory_mb, users,
device_path=None, mount_point=None, backup_info=None,
config_contents=None, root_password=None, overrides=None,
cluster_config=None, snapshot=None):
"""Makes ready DBAAS on a Guest container."""
self.mysql_app_status.get().begin_install()
# status end_mysql_install set with secure()
app = self.mysql_app(self.mysql_app_status.get())
app.install_if_needed(packages)
if device_path:
# stop and do not update database
app.stop_db(do_not_start_on_reboot=True)
device = volume.VolumeDevice(device_path)
# unmount if device is already mounted
device.unmount_device(device_path)
device.format()
if os.path.exists(mount_point):
# rsync existing data to a "data" sub-directory
# on the new volume
device.migrate_data(mount_point, target_subdir="data")
# mount the volume
device.mount(mount_point)
operating_system.chown(
mount_point, service_base.MYSQL_OWNER,
service_base.MYSQL_OWNER,
recursive=False, as_root=True)
LOG.debug("Mounted the volume at %s." % mount_point)
# We need to temporarily update the default my.cnf so that
# mysql will start after the volume is mounted. Later on it
# will be changed based on the config template
# (see MySqlApp.secure()) and restart.
app.set_data_dir(mount_point + '/data')
app.start_mysql()
if backup_info:
self._perform_restore(backup_info, context,
mount_point + "/data", app)
LOG.debug("Securing MySQL now.")
app.secure(config_contents, overrides)
enable_root_on_restore = (backup_info and
MySqlAdmin().is_root_enabled())
if root_password and not backup_info:
app.secure_root(secure_remote_root=True)
MySqlAdmin().enable_root(root_password)
elif enable_root_on_restore:
app.secure_root(secure_remote_root=False)
app.get().report_root(context, 'root')
else:
app.secure_root(secure_remote_root=True)
if cluster_config is None:
app.complete_install_or_restart()
else:
app.status.set_status(
rd_instance.ServiceStatuses.BUILD_PENDING)
if databases:
self.create_database(context, databases)
if users:
self.create_user(context, users)
if snapshot:
self.attach_replica(context, snapshot, snapshot['config'])
LOG.info(_('Completed setup of MySQL database instance.'))
def install_cluster(self, context, replication_user, cluster_configuration,
bootstrap):
app = self.mysql_app(self.mysql_app_status.get())
try:
app.install_cluster(
replication_user, cluster_configuration, bootstrap)
LOG.debug("install_cluster call has finished.")
except Exception:
LOG.exception(_('Cluster installation failed.'))
app.status.set_status(
rd_instance.ServiceStatuses.FAILED)
raise
def reset_admin_password(self, context, admin_password):
LOG.debug("Storing the admin password on the instance.")
app = self.mysql_app(self.mysql_app_status.get())
app.reset_admin_password(admin_password)
def cluster_complete(self, context):
LOG.debug("Cluster creation complete, starting status checks.")
app = self.mysql_app(self.mysql_app_status.get())
status = app.status._get_actual_db_status()
app.status.set_status(status)

View File

@ -0,0 +1,149 @@
# Copyright 2015 Tesora, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from oslo_log import log as logging
import sqlalchemy
from sqlalchemy.sql.expression import text
from trove.common.i18n import _
from trove.common import utils
from trove.guestagent.common import sql_query
from trove.guestagent.datastore.experimental.pxc import system
from trove.guestagent.datastore.mysql import service_base
LOG = logging.getLogger(__name__)
CONF = service_base.CONF
CNF_CLUSTER = "cluster"
class KeepAliveConnection(service_base.BaseKeepAliveConnection):
pass
class PXCAppStatus(service_base.BaseMySqlAppStatus):
pass
class LocalSqlClient(service_base.BaseLocalSqlClient):
pass
class PXCApp(service_base.BaseMySqlApp):
def __init__(self, status):
super(PXCApp, self).__init__(status, LocalSqlClient,
KeepAliveConnection)
def _test_mysql(self):
engine = sqlalchemy.create_engine("mysql://root:@localhost:3306",
echo=True)
try:
with LocalSqlClient(engine) as client:
out = client.execute(text("select 1;"))
for line in out:
LOG.debug("line: %s" % line)
return True
except Exception:
return False
def _wait_for_mysql_to_be_really_alive(self, max_time):
utils.poll_until(self._test_mysql, sleep_time=3, time_out=max_time)
def secure(self, config_contents, overrides):
LOG.info(_("Generating admin password."))
admin_password = utils.generate_random_password()
service_base.clear_expired_password()
engine = sqlalchemy.create_engine("mysql://root:@localhost:3306",
echo=True)
with LocalSqlClient(engine) as client:
self._remove_anonymous_user(client)
self._create_admin_user(client, admin_password)
self.stop_db()
self._reset_configuration(config_contents, admin_password)
self._apply_user_overrides(overrides)
self.start_mysql()
# TODO(cp16net) figure out reason for PXC not updating the password
try:
with LocalSqlClient(engine) as client:
query = text("select Host, User from mysql.user;")
client.execute(query)
except Exception:
LOG.debug('failed to query mysql')
# creating the admin user after the config files are written because
# percona pxc was not commiting the grant for the admin user after
# removing the annon users.
self._wait_for_mysql_to_be_really_alive(
CONF.timeout_wait_for_service)
with LocalSqlClient(engine) as client:
self._create_admin_user(client, admin_password)
self.stop_db()
self._reset_configuration(config_contents, admin_password)
self._apply_user_overrides(overrides)
self.start_mysql()
self._wait_for_mysql_to_be_really_alive(
CONF.timeout_wait_for_service)
LOG.debug("MySQL secure complete.")
def _grant_cluster_replication_privilege(self, replication_user):
LOG.info(_("Granting Replication Slave privilege."))
with self.local_sql_client(self.get_engine()) as client:
perms = ['REPLICATION CLIENT', 'RELOAD', 'LOCK TABLES']
g = sql_query.Grant(permissions=perms,
user=replication_user['name'],
clear=replication_user['password'])
t = text(str(g))
client.execute(t)
def _bootstrap_cluster(self, timeout=120):
LOG.info(_("Bootstraping cluster."))
try:
mysql_service = system.service_discovery(
service_base.MYSQL_SERVICE_CANDIDATES)
utils.execute_with_timeout(
mysql_service['cmd_bootstrap_pxc_cluster'],
shell=True, timeout=timeout)
except KeyError:
LOG.exception(_("Error bootstrapping cluster."))
raise RuntimeError(_("Service is not discovered."))
def install_cluster(self, replication_user, cluster_configuration,
bootstrap=False):
LOG.info(_("Installing cluster configuration."))
self._grant_cluster_replication_privilege(replication_user)
self.stop_db()
self.configuration_manager.apply_system_override(cluster_configuration,
CNF_CLUSTER)
self.wipe_ib_logfiles()
LOG.debug("bootstrap the instance? : %s" % bootstrap)
# Have to wait to sync up the joiner instances with the donor instance.
if bootstrap:
self._bootstrap_cluster(timeout=CONF.restore_usage_timeout)
else:
self.start_mysql(timeout=CONF.restore_usage_timeout)
class PXCRootAccess(service_base.BaseMySqlRootAccess):
def __init__(self):
super(PXCRootAccess, self).__init__(LocalSqlClient,
PXCApp(PXCAppStatus.get()))
class PXCAdmin(service_base.BaseMySqlAdmin):
def __init__(self):
super(PXCAdmin, self).__init__(LocalSqlClient, PXCRootAccess(),
PXCApp)

View File

@ -0,0 +1,27 @@
# Copyright [2015] Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from trove.guestagent.common import operating_system
def service_discovery(service_candidates):
result = operating_system.service_discovery(service_candidates)
if result['type'] == 'sysvinit':
result['cmd_bootstrap_pxc_cluster'] = ("sudo service %s bootstrap-pxc"
% result['service'])
elif result['type'] == 'systemd':
result['cmd_bootstrap_pxc_cluster'] = ("systemctl start "
"%s@bootstrap.service"
% result['service'])
return result

View File

@ -923,18 +923,22 @@ class BaseMySqlApp(object):
_("Replication is not %(status)s after %(max)d seconds.") % {
'status': status.lower(), 'max': max_time})
def start_mysql(self, update_db=False):
def start_mysql(self, update_db=False, disable_on_boot=False, timeout=120):
LOG.info(_("Starting MySQL."))
# This is the site of all the trouble in the restart tests.
# Essentially what happens is that mysql start fails, but does not
# die. It is then impossible to kill the original, so
self._enable_mysql_on_boot()
if disable_on_boot:
self._disable_mysql_on_boot()
else:
self._enable_mysql_on_boot()
try:
mysql_service = operating_system.service_discovery(
MYSQL_SERVICE_CANDIDATES)
utils.execute_with_timeout(mysql_service['cmd_start'], shell=True)
utils.execute_with_timeout(mysql_service['cmd_start'], shell=True,
timeout=timeout)
except KeyError:
raise RuntimeError("Service is not discovered.")
except exception.ProcessExecutionError:
@ -1023,6 +1027,16 @@ class BaseMySqlApp(object):
client.execute("SELECT WAIT_UNTIL_SQL_THREAD_AFTER_GTIDS('%s')"
% txn)
def reset_admin_password(self, admin_password):
"""Replace the password in the my.cnf file."""
# grant the new admin password
with self.local_sql_client(self.get_engine()) as client:
self._create_admin_user(client, admin_password)
# reset the ENGINE because the password could have changed
global ENGINE
ENGINE = None
self._save_authentication_properties(admin_password)
class BaseMySqlRootAccess(object):

View File

@ -38,6 +38,8 @@ defaults = {
'trove.guestagent.datastore.mysql.manager.Manager',
'percona':
'trove.guestagent.datastore.experimental.percona.manager.Manager',
'pxc':
'trove.guestagent.datastore.experimental.pxc.manager.Manager',
'redis':
'trove.guestagent.datastore.experimental.redis.manager.Manager',
'cassandra':

View File

@ -0,0 +1,4 @@
[mysqld]
log_bin = /var/lib/mysql/data/mysql-bin.log
relay_log = /var/lib/mysql/data/mysql-relay-bin.log
read_only = true

View File

@ -0,0 +1,2 @@
[mysqld]
log_bin = /var/lib/mysql/data/mysql-bin.log

View File

@ -0,0 +1,19 @@
[mysqld]
binlog_format=ROW
bind-address=0.0.0.0
default-storage-engine=innodb
innodb_autoinc_lock_mode=2
innodb_flush_log_at_trx_commit=0
wsrep_slave_threads=8
wsrep_provider=/usr/lib/libgalera_smm.so
wsrep_provider_options="gcache.size={{ (128 * flavor['ram']/512)|int }}M; gcache.page_size=1G"
wsrep_sst_method=xtrabackup
wsrep_sst_auth="{{ replication_user_pass }}"
wsrep_cluster_address="gcomm://{{ cluster_ips }}"
wsrep_cluster_name={{ cluster_name }}
wsrep_node_name={{ instance_name }}
wsrep_node_address={{ instance_ip }}

View File

@ -0,0 +1,55 @@
[client]
port = 3306
[mysqld_safe]
nice = 0
[mysqld]
user = mysql
port = 3306
basedir = /usr
datadir = /var/lib/mysql/data
####tmpdir = /tmp
tmpdir = /var/tmp
pid_file = /var/run/mysqld/mysqld.pid
skip-external-locking = 1
key_buffer_size = {{ (50 * flavor['ram']/512)|int }}M
max_allowed_packet = {{ (1024 * flavor['ram']/512)|int }}K
thread_stack = 192K
thread_cache_size = {{ (4 * flavor['ram']/512)|int }}
myisam-recover = BACKUP
query_cache_type = 1
query_cache_limit = 1M
query_cache_size = {{ (8 * flavor['ram']/512)|int }}M
innodb_data_file_path = ibdata1:10M:autoextend
innodb_buffer_pool_size = {{ (150 * flavor['ram']/512)|int }}M
innodb_file_per_table = 1
innodb_log_files_in_group = 2
innodb_log_file_size=50M
innodb_log_buffer_size=25M
connect_timeout = 15
wait_timeout = 120
join_buffer_size = 1M
read_buffer_size = 512K
read_rnd_buffer_size = 512K
sort_buffer_size = 1M
tmp_table_size = {{ (16 * flavor['ram']/512)|int }}M
max_heap_table_size = {{ (16 * flavor['ram']/512)|int }}M
table_open_cache = {{ (256 * flavor['ram']/512)|int }}
table_definition_cache = {{ (256 * flavor['ram']/512)|int }}
open_files_limit = {{ (512 * flavor['ram']/512)|int }}
max_user_connections = {{ (100 * flavor['ram']/512)|int }}
max_connections = {{ (100 * flavor['ram']/512)|int }}
default_storage_engine = innodb
local-infile = 0
server_id = {{server_id}}
[mysqldump]
quick = 1
quote-names = 1
max_allowed_packet = 16M
[isamchk]
key_buffer = 16M
!includedir /etc/mysql/conf.d/

View File

@ -0,0 +1,12 @@
[mysqld]
{% for key, value in overrides.iteritems() -%}
{%- if value == True -%}
{{key}} = 1
{%- elif value == False -%}
{{key}} = 0
{%- elif value == "" -%}
{{key}}
{%- else -%}
{{key}}={{value}}
{%- endif %}
{% endfor %}

View File

@ -0,0 +1,10 @@
[mysqld]
log_bin = /var/lib/mysql/data/mysql-bin.log
relay_log = /var/lib/mysql/data/mysql-relay-bin.log
relay_log_info_repository = TABLE
relay_log_recovery = 1
relay_log_purge = 1
enforce_gtid_consistency = ON
gtid_mode = ON
log_slave_updates = ON
read_only = true

View File

@ -0,0 +1,7 @@
[mysqld]
log_bin = /var/lib/mysql/data/mysql-bin.log
binlog_format = MIXED
enforce_gtid_consistency = ON
gtid_mode = ON
log_slave_updates = ON
enforce_storage_engine = InnoDB

View File

@ -0,0 +1,224 @@
{
"configuration-parameters": [
{
"name": "innodb_file_per_table",
"restart_required": true,
"max": 1,
"min": 0,
"type": "integer"
},
{
"name": "autocommit",
"restart_required": false,
"max": 1,
"min": 0,
"type": "integer"
},
{
"name": "local_infile",
"restart_required": false,
"max": 1,
"min": 0,
"type": "integer"
},
{
"name": "key_buffer_size",
"restart_required": false,
"max": 4294967296,
"min": 0,
"type": "integer"
},
{
"name": "connect_timeout",
"restart_required": false,
"max": 65535,
"min": 1,
"type": "integer"
},
{
"name": "join_buffer_size",
"restart_required": false,
"max": 4294967296,
"min": 0,
"type": "integer"
},
{
"name": "sort_buffer_size",
"restart_required": false,
"max": 18446744073709547520,
"min": 32768,
"type": "integer"
},
{
"name": "innodb_buffer_pool_size",
"restart_required": true,
"max": 68719476736,
"min": 0,
"type": "integer"
},
{
"name": "innodb_flush_log_at_trx_commit",
"restart_required": false,
"max": 2,
"min": 0,
"type": "integer"
},
{
"name": "innodb_log_buffer_size",
"restart_required": true,
"max": 4294967296,
"min": 1048576,
"type": "integer"
},
{
"name": "innodb_open_files",
"restart_required": true,
"max": 4294967296,
"min": 10,
"type": "integer"
},
{
"name": "innodb_thread_concurrency",
"restart_required": false,
"max": 1000,
"min": 0,
"type": "integer"
},
{
"name": "sync_binlog",
"restart_required": false,
"max": 18446744073709547520,
"min": 0,
"type": "integer"
},
{
"name": "auto_increment_increment",
"restart_required": false,
"max": 65535,
"min": 1,
"type": "integer"
},
{
"name": "auto_increment_offset",
"restart_required": false,
"max": 65535,
"min": 1,
"type": "integer"
},
{
"name": "bulk_insert_buffer_size",
"restart_required": false,
"max": 18446744073709547520,
"min": 0,
"type": "integer"
},
{
"name": "expire_logs_days",
"restart_required": false,
"max": 65535,
"min": 1,
"type": "integer"
},
{
"name": "interactive_timeout",
"restart_required": false,
"max": 65535,
"min": 1,
"type": "integer"
},
{
"name": "max_allowed_packet",
"restart_required": false,
"max": 1073741824,
"min": 1024,
"type": "integer"
},
{
"name": "max_connect_errors",
"restart_required": false,
"max": 18446744073709547520,
"min": 1,
"type": "integer"
},
{
"name": "max_connections",
"restart_required": false,
"max": 65535,
"min": 1,
"type": "integer"
},
{
"name": "myisam_sort_buffer_size",
"restart_required": false,
"max": 18446744073709547520,
"min": 4,
"type": "integer"
},
{
"name": "max_user_connections",
"restart_required": false,
"max": 100000,
"min": 1,
"type": "integer"
},
{
"name": "server_id",
"restart_required": true,
"max": 100000,
"min": 1,
"type": "integer"
},
{
"name": "wait_timeout",
"restart_required": false,
"max": 31536000,
"min": 1,
"type": "integer"
},
{
"name": "character_set_client",
"restart_required": false,
"type": "string"
},
{
"name": "character_set_connection",
"restart_required": false,
"type": "string"
},
{
"name": "character_set_database",
"restart_required": false,
"type": "string"
},
{
"name": "character_set_filesystem",
"restart_required": false,
"type": "string"
},
{
"name": "character_set_results",
"restart_required": false,
"type": "string"
},
{
"name": "character_set_server",
"restart_required": false,
"type": "string"
},
{
"name": "collation_connection",
"restart_required": false,
"type": "string"
},
{
"name": "collation_database",
"restart_required": false,
"type": "string"
},
{
"name": "collation_server",
"restart_required": false,
"type": "string"
}
]
}

325
trove/tests/api/pxc.py Normal file
View File

@ -0,0 +1,325 @@
# Copyright [2015] Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Integration tests for PXC datastore.
APIs tested for PXC are:
1. create
2. restart
3. resize-volume
4. resize-instance
5. delete
6. cluster-create
7. cluster-delete
"""
from proboscis import asserts
from proboscis.decorators import before_class
from proboscis import SkipTest
from proboscis import test
from troveclient.compat import exceptions
from trove.common.utils import poll_until
from trove.tests.api.instances import GROUP_START_SIMPLE
from trove.tests.api.instances import instance_info
from trove.tests.api.instances import WaitForGuestInstallationToFinish
from trove.tests.config import CONFIG
from trove.tests.util.check import TypeCheck
from trove.tests.util import create_dbaas_client
PXC_GROUP = "dbaas.api.pxc"
TIMEOUT = 2300
SLEEP_TIME = 60
@test(depends_on_groups=[GROUP_START_SIMPLE], groups=[PXC_GROUP],
runs_after=[WaitForGuestInstallationToFinish])
class PXCTest(object):
"""Tests PXC Datastore Features."""
@before_class
def setUp(self):
self.instance = instance_info
self.rd_client = create_dbaas_client(self.instance.user)
self.report = CONFIG.get_report()
def _find_status(self, rd_client, instance_id, expected_status):
"""Tracks instance status, until it gets to expected_status."""
instance = rd_client.instances.get(instance_id)
self.report.log("Instance info %s." % instance._info)
if instance.status == expected_status:
self.report.log("Instance: %s is ready." % instance_id)
return True
else:
return False
@test
def test_instance_restart(self):
"""Tests the restart API."""
if not getattr(self, 'instance', None):
raise SkipTest(
"Skipping this test since instance is not available.")
self.rd_client = create_dbaas_client(self.instance.user)
self.rd_client.instances.restart(self.instance.id)
asserts.assert_equal(202, self.rd_client.last_http_code)
test_instance = self.rd_client.instances.get(self.instance.id)
asserts.assert_equal("REBOOT", test_instance.status)
poll_until(lambda: self._find_status(self.rd_client,
self.instance.id, "ACTIVE"),
sleep_time=SLEEP_TIME, time_out=TIMEOUT)
self.report.log("Restarted Instance: %s." % self.instance.id)
@test(depends_on=[test_instance_restart])
def test_instance_resize_volume(self):
"""Tests the resize volume API."""
old_volume_size = int(instance_info.volume['size'])
new_volume_size = old_volume_size + 1
if not getattr(self, 'instance', None):
raise SkipTest(
"Skipping this test since instance is not available.")
self.rd_client = create_dbaas_client(self.instance.user)
self.rd_client.instances.resize_volume(self.instance.id,
new_volume_size)
asserts.assert_equal(202, self.rd_client.last_http_code)
test_instance = self.rd_client.instances.get(self.instance.id)
asserts.assert_equal("RESIZE", test_instance.status)
poll_until(lambda: self._find_status(self.rd_client,
self.instance.id, "ACTIVE"),
sleep_time=SLEEP_TIME, time_out=TIMEOUT)
instance = self.rd_client.instances.get(self.instance.id)
asserts.assert_equal(instance.volume['size'], new_volume_size)
self.report.log("Resized Volume for Instance ID: %s to %s." % (
self.instance.id, new_volume_size))
@test(depends_on=[test_instance_resize_volume])
def test_instance_resize_flavor(self):
"""Tests the resize instance/flavor API."""
flavor_name = CONFIG.values.get('instance_bigger_flavor_name',
'm1.medium')
flavors = self.instance.dbaas.find_flavors_by_name(flavor_name)
new_flavor = flavors[0]
asserts.assert_true(new_flavor is not None,
"Flavor '%s' not found!" % flavor_name)
if not getattr(self, 'instance', None):
raise SkipTest(
"Skipping this test since instance is not available.")
self.rd_client = create_dbaas_client(self.instance.user)
self.rd_client.instances.resize_instance(self.instance.id,
new_flavor.id)
asserts.assert_equal(202, self.rd_client.last_http_code)
test_instance = self.rd_client.instances.get(self.instance.id)
asserts.assert_equal("RESIZE", test_instance.status)
poll_until(lambda: self._find_status(self.rd_client,
self.instance.id, "ACTIVE"),
sleep_time=SLEEP_TIME, time_out=TIMEOUT)
test_instance = self.rd_client.instances.get(self.instance.id)
asserts.assert_equal(int(test_instance.flavor['id']), new_flavor.id)
self.report.log("Resized Flavor for Instance ID: %s to %s." % (
self.instance.id, new_flavor.id))
@test(depends_on=[test_instance_resize_flavor])
def test_instance_delete(self):
"""Tests the instance delete."""
if not getattr(self, 'instance', None):
raise SkipTest(
"Skipping this test since instance is not available.")
self.rd_client = create_dbaas_client(self.instance.user)
self.rd_client.instances.delete(self.instance.id)
asserts.assert_equal(202, self.rd_client.last_http_code)
test_instance = self.rd_client.instances.get(self.instance.id)
asserts.assert_equal("SHUTDOWN", test_instance.status)
def _poll():
try:
instance = self.rd_client.instances.get(self.instance.id)
self.report.log("Instance info %s" % instance._info)
asserts.assert_equal("SHUTDOWN", instance.status)
return False
except exceptions.NotFound:
self.report.log("Instance has gone.")
asserts.assert_equal(404, self.rd_client.last_http_code)
return True
poll_until(_poll, sleep_time=SLEEP_TIME, time_out=TIMEOUT)
self.report.log("Deleted Instance ID: %s " % self.instance.id)
@test(depends_on=[test_instance_delete])
def test_create_cluster_with_fewer_instances(self):
invalid_request_body_with_few_instances = [
{"flavorRef": 2, "volume": {"size": 1}}]
self.rd_client = create_dbaas_client(self.instance.user)
asserts.assert_raises(
exceptions.BadRequest,
self.rd_client.clusters.create,
"test_cluster",
self.instance.dbaas_datastore,
self.instance.dbaas_datastore_version,
instances=invalid_request_body_with_few_instances)
asserts.assert_equal(400, self.rd_client.last_http_code)
@test(depends_on=[test_create_cluster_with_fewer_instances])
def test_create_cluster_with_different_flavors(self):
invalid_request_body_with_different_flavors = [
{"flavorRef": 3, "volume": {"size": 1}},
{"flavorRef": 4, "volume": {"size": 1}}]
asserts.assert_raises(
exceptions.BadRequest,
self.rd_client.clusters.create,
"test_cluster",
self.instance.dbaas_datastore,
self.instance.dbaas_datastore_version,
instances=invalid_request_body_with_different_flavors)
asserts.assert_equal(400, self.rd_client.last_http_code)
@test(depends_on=[test_create_cluster_with_different_flavors])
def test_create_cluster_with_different_volumes(self):
invalid_request_body_with_different_volumes = [
{"flavorRef": 2, "volume": {"size": 2}},
{"flavorRef": 2, "volume": {"size": 3}}]
asserts.assert_raises(
exceptions.BadRequest,
self.rd_client.clusters.create,
"test_cluster",
self.instance.dbaas_datastore,
self.instance.dbaas_datastore_version,
instances=invalid_request_body_with_different_volumes)
asserts.assert_equal(400, self.rd_client.last_http_code)
@test(depends_on=[test_create_cluster_with_different_volumes])
def test_create_cluster_successfuly(self):
valid_request_body = [
{"flavorRef": self.instance.dbaas_flavor_href,
"volume": self.instance.volume},
{"flavorRef": self.instance.dbaas_flavor_href,
"volume": self.instance.volume}]
self.cluster = self.rd_client.clusters.create(
"test_cluster", self.instance.dbaas_datastore,
self.instance.dbaas_datastore_version,
instances=valid_request_body)
with TypeCheck('Cluster', self.cluster) as check:
check.has_field("id", basestring)
check.has_field("name", basestring)
check.has_field("datastore", dict)
check.has_field("instances", list)
check.has_field("links", list)
check.has_field("created", unicode)
check.has_field("updated", unicode)
for instance in self.cluster.instances:
isinstance(instance, dict)
asserts.assert_is_not_none(instance['id'])
asserts.assert_is_not_none(instance['links'])
asserts.assert_is_not_none(instance['name'])
asserts.assert_equal(200, self.rd_client.last_http_code)
@test(depends_on=[test_create_cluster_successfuly])
def test_wait_until_cluster_is_active(self):
if not getattr(self, 'cluster', None):
raise SkipTest(
"Skipping this test since cluster is not available.")
def result_is_active():
cluster = self.rd_client.clusters.get(self.cluster.id)
cluster_instances = [
self.rd_client.instances.get(instance['id'])
for instance in cluster.instances]
self.report.log("Cluster info %s." % cluster._info)
self.report.log("Cluster instances info %s." % cluster_instances)
if cluster.task['name'] == "NONE":
if ["ERROR"] * len(cluster_instances) == [
str(instance.status) for instance in cluster_instances]:
self.report.log("Cluster provisioning failed.")
asserts.fail("Cluster provisioning failed.")
if ["ACTIVE"] * len(cluster_instances) == [
str(instance.status) for instance in cluster_instances]:
self.report.log("Cluster is ready.")
return True
else:
asserts.assert_not_equal(
["ERROR"] * len(cluster_instances),
[instance.status
for instance in cluster_instances])
self.report.log("Continue polling, cluster is not ready yet.")
poll_until(result_is_active, sleep_time=SLEEP_TIME, time_out=TIMEOUT)
self.report.log("Created cluster, ID = %s." % self.cluster.id)
@test(depends_on=[test_wait_until_cluster_is_active])
def test_cluster_communication(self):
databases = []
databases.append({"name": 'somenewdb'})
cluster = self.rd_client.clusters.get(self.cluster.id)
cluster_instances = [
self.rd_client.instances.get(instance['id'])
for instance in cluster.instances]
databases_before = self.rd_client.databases.list(
cluster_instances[0].id)
self.rd_client.databases.create(cluster_instances[0].id,
databases)
for instance in cluster_instances:
databases_after = self.rd_client.databases.list(
cluster_instances[0].id)
asserts.assert_true(len(databases_before) < len(databases_after))
@test(depends_on=[test_wait_until_cluster_is_active],
runs_after=[test_cluster_communication])
def test_cluster_delete(self):
if not getattr(self, 'cluster', None):
raise SkipTest(
"Skipping this test since cluster is not available.")
self.rd_client.clusters.delete(self.cluster.id)
asserts.assert_equal(202, self.rd_client.last_http_code)
def _poll():
try:
cluster = self.rd_client.clusters.get(
self.cluster.id)
self.report.log("Cluster info %s" % cluster._info)
asserts.assert_equal("DELETING", cluster.task['name'])
return False
except exceptions.NotFound:
self.report.log("Cluster is not available.")
asserts.assert_equal(404, self.rd_client.last_http_code)
return True
poll_until(_poll, sleep_time=SLEEP_TIME, time_out=TIMEOUT)
self.report.log("Deleted cluster: %s." % self.cluster.id)

View File

@ -27,6 +27,7 @@ from trove.tests.api.mgmt import datastore_versions
from trove.tests.api.mgmt import hosts
from trove.tests.api.mgmt import instances as mgmt_instances
from trove.tests.api.mgmt import storage
from trove.tests.api import pxc
from trove.tests.api import redis
from trove.tests.api import replication
from trove.tests.api import root
@ -154,6 +155,7 @@ register(["mysql_group"], backup_groups, instance_actions_groups,
register(["redis_group"], backup_groups, instance_actions_groups,
replication_groups)
register(["vertica_group"], cluster_actions_groups, instance_actions_groups)
register(["pxc_group"], instance_actions_groups, cluster_actions_groups)
# Redis int-tests
redis_group = [
@ -166,3 +168,10 @@ redis_group = [
]
proboscis.register(groups=["redis"],
depends_on_groups=redis_group)
# PXC int-tests
pxc_group = [
pxc.PXC_GROUP,
]
proboscis.register(groups=["pxc"],
depends_on_groups=pxc_group)

View File

@ -36,5 +36,9 @@ class ClusterActionsGroup(TestGroup):
self.test_runner.run_cluster_create()
@test(depends_on=[cluster_create])
def test_cluster_communication(self):
self.test_runner.run_cluster_communication()
@test(depends_on=[cluster_create], runs_after=[test_cluster_communication])
def cluster_delete(self):
self.test_runner.run_cluster_delete()

View File

@ -59,6 +59,22 @@ class ClusterActionsRunner(TestRunner):
return cluster_id
def run_cluster_communication(self):
databases = []
databases.append({"name": 'somenewdb'})
cluster = self.auth_client.clusters.get(self.cluster_id)
cluster_instances = [
self.auth_client.instances.get(instance['id'])
for instance in cluster.instances]
databases_before = self.auth_client.databases.list(
cluster_instances[0].id)
self.auth_client.databases.create(cluster_instances[0].id,
databases)
for instance in cluster_instances:
databases_after = self.auth_client.databases.list(
cluster_instances[0].id)
asserts.assert_true(len(databases_before) < len(databases_after))
def run_cluster_delete(
self, expected_last_instance_state='SHUTDOWN',
expected_http_code=202):

View File

@ -0,0 +1,346 @@
# Copyright [2015] Hewlett-Packard Development Company, L.P.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import jsonschema
from mock import MagicMock
from mock import Mock
from mock import patch
from testtools.matchers import Is, Equals
from trove.cluster import models
from trove.cluster.models import Cluster
from trove.cluster.service import ClusterController
from trove.cluster import views
import trove.common.cfg as cfg
from trove.common import exception
from trove.common.strategies.cluster import strategy
from trove.common import utils
from trove.datastore import models as datastore_models
from trove.tests.unittests import trove_testtools
class TestClusterController(trove_testtools.TestCase):
def setUp(self):
super(TestClusterController, self).setUp()
self.controller = ClusterController()
instances = [
{
"flavorRef": "7",
"volume": {
"size": 1
},
"availability_zone": "az",
"nics": [
{"net-id": "e89aa5fd-6b0a-436d-a75c-1545d34d5331"}
]
}
] * 3
self.cluster = {
"cluster": {
"name": "products",
"datastore": {
"type": "pxc",
"version": "5.5"
},
"instances": instances
}
}
def test_get_schema_create(self):
schema = self.controller.get_schema('create', self.cluster)
self.assertIsNotNone(schema)
self.assertTrue('cluster' in schema['properties'])
self.assertTrue('cluster')
def test_validate_create(self):
body = self.cluster
schema = self.controller.get_schema('create', body)
validator = jsonschema.Draft4Validator(schema)
self.assertTrue(validator.is_valid(body))
def test_validate_create_blankname(self):
body = self.cluster
body['cluster']['name'] = " "
schema = self.controller.get_schema('create', body)
validator = jsonschema.Draft4Validator(schema)
self.assertFalse(validator.is_valid(body))
errors = sorted(validator.iter_errors(body), key=lambda e: e.path)
self.assertThat(len(errors), Is(1))
self.assertThat(errors[0].message,
Equals("' ' does not match '^.*[0-9a-zA-Z]+.*$'"))
def test_validate_create_blank_datastore(self):
body = self.cluster
body['cluster']['datastore']['type'] = ""
schema = self.controller.get_schema('create', body)
validator = jsonschema.Draft4Validator(schema)
self.assertFalse(validator.is_valid(body))
errors = sorted(validator.iter_errors(body), key=lambda e: e.path)
error_messages = [error.message for error in errors]
error_paths = [error.path.pop() for error in errors]
self.assertThat(len(errors), Is(2))
self.assertIn("'' is too short", error_messages)
self.assertIn("'' does not match '^.*[0-9a-zA-Z]+.*$'", error_messages)
self.assertIn("type", error_paths)
@patch.object(Cluster, 'create')
@patch.object(datastore_models, 'get_datastore_version')
def test_create_clusters_disabled(self,
mock_get_datastore_version,
mock_cluster_create):
body = self.cluster
tenant_id = Mock()
context = Mock()
req = Mock()
req.environ = MagicMock()
req.environ.get = Mock(return_value=context)
datastore_version = Mock()
datastore_version.manager = 'mysql'
mock_get_datastore_version.return_value = (Mock(), datastore_version)
self.assertRaises(exception.ClusterDatastoreNotSupported,
self.controller.create,
req,
body,
tenant_id)
@patch.object(Cluster, 'create')
@patch.object(utils, 'get_id_from_href')
@patch.object(datastore_models, 'get_datastore_version')
def test_create_clusters(self,
mock_get_datastore_version,
mock_id_from_href,
mock_cluster_create):
body = self.cluster
tenant_id = Mock()
context = Mock()
req = Mock()
req.environ = Mock()
req.environ.__getitem__ = Mock(return_value=context)
datastore_version = Mock()
datastore_version.manager = 'pxc'
datastore = Mock()
mock_get_datastore_version.return_value = (datastore,
datastore_version)
instances = [
{
'volume_size': 1,
'flavor_id': '1234',
'availability_zone': 'az',
'nics': [
{'net-id': 'e89aa5fd-6b0a-436d-a75c-1545d34d5331'}
]
}
] * 3
mock_id_from_href.return_value = '1234'
mock_cluster = Mock()
mock_cluster.instances = []
mock_cluster.instances_without_server = []
mock_cluster.datastore_version.manager = 'pxc'
mock_cluster_create.return_value = mock_cluster
self.controller.create(req, body, tenant_id)
mock_cluster_create.assert_called_with(context, 'products',
datastore, datastore_version,
instances)
@patch.object(Cluster, 'load')
def test_show_cluster(self,
mock_cluster_load):
tenant_id = Mock()
id = Mock()
context = Mock()
req = Mock()
req.environ = Mock()
req.environ.__getitem__ = Mock(return_value=context)
mock_cluster = Mock()
mock_cluster.instances = []
mock_cluster.instances_without_server = []
mock_cluster.datastore_version.manager = 'pxc'
mock_cluster_load.return_value = mock_cluster
self.controller.show(req, tenant_id, id)
mock_cluster_load.assert_called_with(context, id)
@patch.object(Cluster, 'load')
@patch.object(Cluster, 'load_instance')
def test_show_cluster_instance(self,
mock_cluster_load_instance,
mock_cluster_load):
tenant_id = Mock()
cluster_id = Mock()
instance_id = Mock()
context = Mock()
req = Mock()
req.environ = Mock()
req.environ.__getitem__ = Mock(return_value=context)
cluster = Mock()
mock_cluster_load.return_value = cluster
cluster.id = cluster_id
self.controller.show_instance(req, tenant_id, cluster_id, instance_id)
mock_cluster_load_instance.assert_called_with(context, cluster.id,
instance_id)
@patch.object(Cluster, 'load')
def test_delete_cluster(self, mock_cluster_load):
tenant_id = Mock()
cluster_id = Mock()
req = MagicMock()
cluster = Mock()
mock_cluster_load.return_value = cluster
self.controller.delete(req, tenant_id, cluster_id)
cluster.delete.assert_called_with()
class TestClusterControllerWithStrategy(trove_testtools.TestCase):
def setUp(self):
super(TestClusterControllerWithStrategy, self).setUp()
self.controller = ClusterController()
self.cluster = {
"cluster": {
"name": "products",
"datastore": {
"type": "pxc",
"version": "5.5"
},
"instances": [
{
"flavorRef": "7",
"volume": {
"size": 1
},
},
{
"flavorRef": "7",
"volume": {
"size": 1
},
},
{
"flavorRef": "7",
"volume": {
"size": 1
},
},
]
}
}
def tearDown(self):
super(TestClusterControllerWithStrategy, self).tearDown()
cfg.CONF.clear_override('cluster_support', group='pxc')
cfg.CONF.clear_override('api_strategy', group='pxc')
@patch.object(datastore_models, 'get_datastore_version')
@patch.object(models.Cluster, 'create')
def test_create_clusters_disabled(self,
mock_cluster_create,
mock_get_datastore_version):
cfg.CONF.set_override('cluster_support', False, group='pxc')
body = self.cluster
tenant_id = Mock()
context = Mock()
req = Mock()
req.environ = MagicMock()
req.environ.get = Mock(return_value=context)
datastore_version = Mock()
datastore_version.manager = 'pxc'
mock_get_datastore_version.return_value = (Mock(), datastore_version)
self.assertRaises(exception.TroveError, self.controller.create, req,
body, tenant_id)
@patch.object(views.ClusterView, 'data', return_value={})
@patch.object(datastore_models, 'get_datastore_version')
@patch.object(models.Cluster, 'create')
def test_create_clusters_enabled(self,
mock_cluster_create,
mock_get_datastore_version,
mock_cluster_view_data):
cfg.CONF.set_override('cluster_support', True, group='pxc')
body = self.cluster
tenant_id = Mock()
context = Mock()
req = Mock()
req.environ = MagicMock()
req.environ.get = Mock(return_value=context)
datastore_version = Mock()
datastore_version.manager = 'pxc'
mock_get_datastore_version.return_value = (Mock(), datastore_version)
mock_cluster = Mock()
mock_cluster.datastore_version.manager = 'pxc'
mock_cluster_create.return_value = mock_cluster
self.controller.create(req, body, tenant_id)
@patch.object(models.Cluster, 'load')
def test_controller_action_no_strategy(self,
mock_cluster_load):
body = {'do_stuff2': {}}
tenant_id = Mock()
context = Mock()
id = Mock()
req = Mock()
req.environ = MagicMock()
req.environ.get = Mock(return_value=context)
cluster = Mock()
cluster.datastore_version.manager = 'pxc'
mock_cluster_load.return_value = cluster
self.assertRaises(exception.TroveError, self.controller.action, req,
body, tenant_id, id)
@patch.object(strategy, 'load_api_strategy')
@patch.object(models.Cluster, 'load')
def test_controller_action_found(self,
mock_cluster_load,
mock_cluster_api_strategy):
body = {'do_stuff': {}}
tenant_id = Mock()
context = Mock()
id = Mock()
req = Mock()
req.environ = MagicMock()
req.environ.get = Mock(return_value=context)
cluster = Mock()
cluster.datastore_version.manager = 'pxc'
mock_cluster_load.return_value = cluster
strat = Mock()
do_stuff_func = Mock()
strat.cluster_controller_actions = \
{'do_stuff': do_stuff_func}
mock_cluster_api_strategy.return_value = strat
self.controller.action(req, body, tenant_id, id)
self.assertEqual(1, do_stuff_func.call_count)

View File

@ -0,0 +1,279 @@
# Copyright [2015] Hewlett-Packard Development Company, L.P.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import uuid
from mock import Mock
from mock import patch
from novaclient import exceptions as nova_exceptions
from trove.cluster.models import Cluster
from trove.cluster.models import ClusterTasks
from trove.cluster.models import DBCluster
from trove.common import cfg
from trove.common import exception
from trove.common import remote
from trove.common.strategies.cluster.experimental.pxc import (
api as pxc_api)
from trove.instance import models as inst_models
from trove.quota.quota import QUOTAS
from trove.taskmanager import api as task_api
from trove.tests.unittests import trove_testtools
CONF = cfg.CONF
class FakeOptGroup(object):
def __init__(self, cluster_member_count=3,
volume_support=True, device_path='/dev/vdb'):
self.cluster_member_count = cluster_member_count
self.volume_support = volume_support
self.device_path = device_path
class ClusterTest(trove_testtools.TestCase):
def setUp(self):
super(ClusterTest, self).setUp()
self.get_client_patch = patch.object(task_api.API, 'get_client')
self.get_client_mock = self.get_client_patch.start()
self.addCleanup(self.get_client_patch.stop)
self.cluster_id = str(uuid.uuid4())
self.cluster_name = "Cluster" + self.cluster_id
self.tenant_id = "23423432"
self.dv_id = "1"
self.db_info = DBCluster(ClusterTasks.NONE,
id=self.cluster_id,
name=self.cluster_name,
tenant_id=self.tenant_id,
datastore_version_id=self.dv_id,
task_id=ClusterTasks.NONE._code)
self.context = Mock()
self.datastore = Mock()
self.dv = Mock()
self.dv.manager = "pxc"
self.datastore_version = self.dv
self.cluster = pxc_api.PXCCluster(self.context, self.db_info,
self.datastore,
self.datastore_version)
self.instances = [{'volume_size': 1, 'flavor_id': '1234'},
{'volume_size': 1, 'flavor_id': '1234'},
{'volume_size': 1, 'flavor_id': '1234'}]
def tearDown(self):
super(ClusterTest, self).tearDown()
def test_create_empty_instances(self):
self.assertRaises(exception.ClusterNumInstancesNotSupported,
Cluster.create,
Mock(),
self.cluster_name,
self.datastore,
self.datastore_version,
[]
)
def test_create_flavor_not_specified(self):
instances = self.instances
instances[0]['flavor_id'] = None
self.assertRaises(exception.ClusterFlavorsNotEqual,
Cluster.create,
Mock(),
self.cluster_name,
self.datastore,
self.datastore_version,
instances
)
@patch.object(remote, 'create_nova_client')
def test_create_invalid_flavor_specified(self,
mock_client):
instances = [{'flavor_id': '1234'},
{'flavor_id': '1234'},
{'flavor_id': '1234'}]
(mock_client.return_value.flavors.get) = Mock(
side_effect=nova_exceptions.NotFound(
404, "Flavor id not found %s" % id))
self.assertRaises(exception.FlavorNotFound,
Cluster.create,
Mock(),
self.cluster_name,
self.datastore,
self.datastore_version,
instances
)
@patch.object(remote, 'create_nova_client')
def test_create_volume_no_specified(self,
mock_client):
instances = self.instances
instances[0]['volume_size'] = None
flavors = Mock()
mock_client.return_value.flavors = flavors
self.assertRaises(exception.ClusterVolumeSizeRequired,
Cluster.create,
Mock(),
self.cluster_name,
self.datastore,
self.datastore_version,
instances
)
@patch.object(remote, 'create_nova_client')
@patch.object(pxc_api, 'CONF')
def test_create_storage_specified_with_no_volume_support(self,
mock_conf,
mock_client):
mock_conf.get = Mock(
return_value=FakeOptGroup(volume_support=False))
instances = self.instances
instances[0]['volume_size'] = None
flavors = Mock()
mock_client.return_value.flavors = flavors
self.assertRaises(exception.VolumeNotSupported,
Cluster.create,
Mock(),
self.cluster_name,
self.datastore,
self.datastore_version,
instances
)
@patch.object(remote, 'create_nova_client')
@patch.object(pxc_api, 'CONF')
def test_create_storage_not_specified_and_no_ephemeral_flavor(self,
mock_conf,
mock_client):
class FakeFlavor:
def __init__(self, flavor_id):
self.flavor_id = flavor_id
@property
def id(self):
return self.flavor.id
@property
def ephemeral(self):
return 0
instances = [{'flavor_id': '1234'},
{'flavor_id': '1234'},
{'flavor_id': '1234'}]
mock_conf.get = Mock(
return_value=FakeOptGroup(volume_support=False))
(mock_client.return_value.
flavors.get.return_value) = FakeFlavor('1234')
self.assertRaises(exception.LocalStorageNotSpecified,
Cluster.create,
Mock(),
self.cluster_name,
self.datastore,
self.datastore_version,
instances
)
@patch.object(remote, 'create_nova_client')
def test_create_volume_not_equal(self, mock_client):
instances = self.instances
instances[0]['volume_size'] = 2
flavors = Mock()
mock_client.return_value.flavors = flavors
self.assertRaises(exception.ClusterVolumeSizesNotEqual,
Cluster.create,
Mock(),
self.cluster_name,
self.datastore,
self.datastore_version,
instances
)
@patch.object(inst_models.Instance, 'create')
@patch.object(DBCluster, 'create')
@patch.object(task_api, 'load')
@patch.object(QUOTAS, 'check_quotas')
@patch.object(remote, 'create_nova_client')
def test_create(self, mock_client, mock_check_quotas, mock_task_api,
mock_db_create, mock_ins_create):
instances = self.instances
flavors = Mock()
mock_client.return_value.flavors = flavors
self.cluster.create(Mock(),
self.cluster_name,
self.datastore,
self.datastore_version,
instances)
mock_task_api.return_value.create_cluster.assert_called_with(
mock_db_create.return_value.id)
self.assertEqual(3, mock_ins_create.call_count)
@patch.object(pxc_api, 'CONF')
@patch.object(inst_models.Instance, 'create')
@patch.object(DBCluster, 'create')
@patch.object(task_api, 'load')
@patch.object(QUOTAS, 'check_quotas')
@patch.object(remote, 'create_nova_client')
def test_create_with_ephemeral_flavor(self, mock_client, mock_check_quotas,
mock_task_api, mock_db_create,
mock_ins_create, mock_conf):
class FakeFlavor:
def __init__(self, flavor_id):
self.flavor_id = flavor_id
@property
def id(self):
return self.flavor.id
@property
def ephemeral(self):
return 1
instances = [{'flavor_id': '1234'},
{'flavor_id': '1234'},
{'flavor_id': '1234'}]
mock_conf.get = Mock(
return_value=FakeOptGroup(volume_support=False))
(mock_client.return_value.
flavors.get.return_value) = FakeFlavor('1234')
self.cluster.create(Mock(),
self.cluster_name,
self.datastore,
self.datastore_version,
instances)
mock_task_api.return_value.create_cluster.assert_called_with(
mock_db_create.return_value.id)
self.assertEqual(3, mock_ins_create.call_count)
def test_delete_bad_task_status(self):
self.cluster.db_info.task_status = ClusterTasks.BUILDING_INITIAL
self.assertRaises(exception.UnprocessableEntity,
self.cluster.delete)
@patch.object(task_api.API, 'delete_cluster')
@patch.object(Cluster, 'update_db')
@patch.object(inst_models.DBInstance, 'find_all')
def test_delete_task_status_none(self,
mock_find_all,
mock_update_db,
mock_delete_cluster):
self.cluster.db_info.task_status = ClusterTasks.NONE
self.cluster.delete()
mock_update_db.assert_called_with(task_status=ClusterTasks.DELETING)
@patch.object(task_api.API, 'delete_cluster')
@patch.object(Cluster, 'update_db')
@patch.object(inst_models.DBInstance, 'find_all')
def test_delete_task_status_deleting(self,
mock_find_all,
mock_update_db,
mock_delete_cluster):
self.cluster.db_info.task_status = ClusterTasks.DELETING
self.cluster.delete()
mock_update_db.assert_called_with(task_status=ClusterTasks.DELETING)

View File

@ -20,6 +20,7 @@ import time
from uuid import uuid4
from mock import ANY
from mock import call
from mock import DEFAULT
from mock import MagicMock
from mock import Mock
@ -57,6 +58,10 @@ from trove.guestagent.datastore.experimental.mongodb import (
service as mongo_service)
from trove.guestagent.datastore.experimental.mongodb import (
system as mongo_system)
from trove.guestagent.datastore.experimental.pxc import (
service as pxc_service)
from trove.guestagent.datastore.experimental.pxc import (
system as pxc_system)
from trove.guestagent.datastore.experimental.redis import service as rservice
from trove.guestagent.datastore.experimental.redis.service import RedisApp
from trove.guestagent.datastore.experimental.redis import system as RedisSystem
@ -760,7 +765,6 @@ class MySqlAppTest(testtools.TestCase):
self.orig_unlink = os.unlink
self.orig_get_auth_password = MySqlApp.get_auth_password
self.orig_service_discovery = operating_system.service_discovery
util.init_db()
self.FAKE_ID = str(uuid4())
InstanceServiceStatus.create(instance_id=self.FAKE_ID,
status=rd_instance.ServiceStatuses.NEW)
@ -771,9 +775,12 @@ class MySqlAppTest(testtools.TestCase):
'cmd_stop': Mock(),
'cmd_enable': Mock(),
'cmd_disable': Mock(),
'cmd_bootstrap_pxc_cluster': Mock(),
'bin': Mock()}
operating_system.service_discovery = Mock(
return_value=mysql_service)
pxc_system.service_discovery = Mock(
return_value=mysql_service)
time.sleep = Mock()
os.unlink = Mock()
MySqlApp.get_auth_password = Mock()
@ -784,6 +791,7 @@ class MySqlAppTest(testtools.TestCase):
self.mock_client.__enter__.return_value.execute = self.mock_execute
dbaas.orig_configuration_manager = dbaas.MySqlApp.configuration_manager
dbaas.MySqlApp.configuration_manager = Mock()
self.orig_create_engine = sqlalchemy.create_engine
def tearDown(self):
super(MySqlAppTest, self).tearDown()
@ -796,6 +804,7 @@ class MySqlAppTest(testtools.TestCase):
InstanceServiceStatus.find_by(instance_id=self.FAKE_ID).delete()
dbaas.MySqlApp.configuration_manager = \
dbaas.orig_configuration_manager
sqlalchemy.create_engine = self.orig_create_engine
def assert_reported_status(self, expected_status):
service_status = InstanceServiceStatus.find_by(
@ -863,7 +872,6 @@ class MySqlAppTest(testtools.TestCase):
self.assertEqual(2, mock_execute.call_count)
def test_stop_mysql_error(self):
dbaas_base.utils.execute_with_timeout = Mock()
self.appStatus.set_next_status(rd_instance.ServiceStatuses.RUNNING)
self.mySqlApp.state_change_wait_time = 1
@ -957,9 +965,7 @@ class MySqlAppTest(testtools.TestCase):
self.assertRaises(RuntimeError, self.mySqlApp.start_mysql)
def test_start_db_with_conf_changes(self):
self.mySqlApp.start_mysql = Mock()
self.mySqlApp._write_mycnf = Mock()
self.mysql_starts_successfully()
self.appStatus.status = rd_instance.ServiceStatuses.SHUTDOWN
@ -973,9 +979,7 @@ class MySqlAppTest(testtools.TestCase):
self.appStatus._get_actual_db_status())
def test_start_db_with_conf_changes_mysql_is_running(self):
self.mySqlApp.start_mysql = Mock()
self.mySqlApp._write_mycnf = Mock()
self.appStatus.status = rd_instance.ServiceStatuses.RUNNING
self.assertRaises(RuntimeError,
@ -1275,26 +1279,6 @@ class MySqlAppTest(testtools.TestCase):
self.assertEqual(expected, args[0],
"Sql statements are not the same")
class MySqlAppInstallTest(MySqlAppTest):
def setUp(self):
super(MySqlAppInstallTest, self).setUp()
self.orig_create_engine = sqlalchemy.create_engine
self.orig_pkg_version = dbaas_base.packager.pkg_version
self.orig_utils_execute_with_timeout = utils.execute_with_timeout
self.mock_client = Mock()
self.mock_execute = Mock()
self.mock_client.__enter__ = Mock()
self.mock_client.__exit__ = Mock()
self.mock_client.__enter__.return_value.execute = self.mock_execute
def tearDown(self):
super(MySqlAppInstallTest, self).tearDown()
sqlalchemy.create_engine = self.orig_create_engine
dbaas_base.packager.pkg_version = self.orig_pkg_version
utils.execute_with_timeout = self.orig_utils_execute_with_timeout
def test_install(self):
self.mySqlApp._install_mysql = Mock()
@ -1324,10 +1308,13 @@ class MySqlAppInstallTest(MySqlAppTest):
self.mySqlApp.secure('contents', 'overrides')
self.assertTrue(self.mySqlApp.stop_db.called)
self.mySqlApp._reset_configuration.assert_called_once_with(
'contents', auth_pwd_mock.return_value)
self.mySqlApp._apply_user_overrides.assert_called_once_with(
'overrides')
reset_config_calls = [call('contents', auth_pwd_mock.return_value),
call('contents', auth_pwd_mock.return_value)]
self.mySqlApp._reset_configuration.has_calls(reset_config_calls)
apply_overrides_calls = [call('overrides'),
call('overrides')]
self.mySqlApp._reset_configuration.has_calls(apply_overrides_calls)
self.assertTrue(self.mySqlApp.start_mysql.called)
self.assert_reported_status(rd_instance.ServiceStatuses.NEW)
@ -1426,6 +1413,19 @@ class MySqlAppInstallTest(MySqlAppTest):
self.assertFalse(self.mySqlApp.start_mysql.called)
self.assert_reported_status(rd_instance.ServiceStatuses.NEW)
@patch.object(dbaas, 'get_engine',
return_value=MagicMock(name='get_engine'))
def test_reset_admin_password(self, mock_engine):
with patch.object(dbaas.MySqlApp, 'local_sql_client',
return_value=self.mock_client):
config_manager = self.mySqlApp.configuration_manager
config_manager.apply_system_override = Mock()
self.mySqlApp._create_admin_user = Mock()
self.mySqlApp.reset_admin_password("newpassword")
self.assertEqual(1,
config_manager.apply_system_override.call_count)
self.assertEqual(1, self.mySqlApp._create_admin_user.call_count)
class TextClauseMatcher(object):
@ -1477,12 +1477,13 @@ class MySqlAppMockTest(testtools.TestCase):
MagicMock(return_value=None)
app = MySqlApp(mock_status)
app._reset_configuration = MagicMock()
app._write_mycnf = MagicMock(return_value=True)
app.start_mysql = MagicMock(return_value=None)
app._wait_for_mysql_to_be_really_alive = MagicMock(
return_value=True)
app.stop_db = MagicMock(return_value=None)
app.secure('foo', None)
app._reset_configuration.assert_called_once_with(
'foo', auth_pwd_mock.return_value)
reset_config_calls = [call('foo', auth_pwd_mock.return_value)]
app._reset_configuration.assert_has_calls(reset_config_calls)
self.assertTrue(mock_conn.execute.called)
@patch('trove.guestagent.datastore.mysql.service.MySqlApp'
@ -3394,3 +3395,121 @@ class DB2AdminTest(testtools.TestCase):
"from sysibm.sysdbauth; db2 connect reset"
self.assertEqual(args[0], expected,
"Delete database queries are not the same")
class PXCAppTest(testtools.TestCase):
def setUp(self):
super(PXCAppTest, self).setUp()
self.orig_utils_execute_with_timeout = \
dbaas_base.utils.execute_with_timeout
self.orig_time_sleep = time.sleep
self.orig_unlink = os.unlink
self.orig_get_auth_password = pxc_service.PXCApp.get_auth_password
self.orig_service_discovery = operating_system.service_discovery
self.orig_pxc_system_service_discovery = pxc_system.service_discovery
self.FAKE_ID = str(uuid4())
InstanceServiceStatus.create(instance_id=self.FAKE_ID,
status=rd_instance.ServiceStatuses.NEW)
self.appStatus = FakeAppStatus(self.FAKE_ID,
rd_instance.ServiceStatuses.NEW)
self.PXCApp = pxc_service.PXCApp(self.appStatus)
mysql_service = {'cmd_start': Mock(),
'cmd_stop': Mock(),
'cmd_enable': Mock(),
'cmd_disable': Mock(),
'cmd_bootstrap_pxc_cluster': Mock(),
'bin': Mock()}
pxc_system.service_discovery = Mock(
return_value=mysql_service)
time.sleep = Mock()
os.unlink = Mock()
pxc_service.PXCApp.get_auth_password = Mock()
self.mock_client = Mock()
self.mock_execute = Mock()
self.mock_client.__enter__ = Mock()
self.mock_client.__exit__ = Mock()
self.mock_client.__enter__.return_value.execute = self.mock_execute
pxc_service.orig_configuration_manager = (
pxc_service.PXCApp.configuration_manager)
pxc_service.PXCApp.configuration_manager = Mock()
self.orig_create_engine = sqlalchemy.create_engine
def tearDown(self):
super(PXCAppTest, self).tearDown()
self.PXCApp = None
dbaas_base.utils.execute_with_timeout = \
self.orig_utils_execute_with_timeout
time.sleep = self.orig_time_sleep
os.unlink = self.orig_unlink
operating_system.service_discovery = self.orig_service_discovery
pxc_system.service_discovery = self.orig_pxc_system_service_discovery
pxc_service.PXCApp.get_auth_password = self.orig_get_auth_password
InstanceServiceStatus.find_by(instance_id=self.FAKE_ID).delete()
pxc_service.PXCApp.configuration_manager = \
pxc_service.orig_configuration_manager
sqlalchemy.create_engine = self.orig_create_engine
@patch.object(pxc_service.PXCApp, 'get_engine',
return_value=MagicMock(name='get_engine'))
def test__grant_cluster_replication_privilege(self, mock_engine):
repl_user = {
'name': 'test-user',
'password': 'test-user-password',
}
with patch.object(pxc_service.PXCApp, 'local_sql_client',
return_value=self.mock_client):
self.PXCApp._grant_cluster_replication_privilege(repl_user)
args, _ = self.mock_execute.call_args_list[0]
expected = ("GRANT LOCK TABLES, RELOAD, REPLICATION CLIENT ON *.* "
"TO `test-user`@`%` IDENTIFIED BY 'test-user-password';")
self.assertEqual(expected, args[0].text,
"Sql statements are not the same")
@patch.object(utils, 'execute_with_timeout')
def test__bootstrap_cluster(self, mock_execute):
pxc_service_cmds = pxc_system.service_discovery(['mysql'])
self.PXCApp._bootstrap_cluster(timeout=20)
self.assertEqual(1, mock_execute.call_count)
mock_execute.assert_called_with(
pxc_service_cmds['cmd_bootstrap_pxc_cluster'],
shell=True,
timeout=20)
def test_install_cluster(self):
repl_user = {
'name': 'test-user',
'password': 'test-user-password',
}
apply_mock = Mock()
self.PXCApp.configuration_manager.apply_system_override = apply_mock
self.PXCApp.stop_db = Mock()
self.PXCApp._grant_cluster_replication_privilege = Mock()
self.PXCApp.wipe_ib_logfiles = Mock()
self.PXCApp.start_mysql = Mock()
self.PXCApp.install_cluster(repl_user, "something")
self.assertEqual(1, self.PXCApp.stop_db.call_count)
self.assertEqual(
1, self.PXCApp._grant_cluster_replication_privilege.call_count)
self.assertEqual(1, apply_mock.call_count)
self.assertEqual(1, self.PXCApp.wipe_ib_logfiles.call_count)
self.assertEqual(1, self.PXCApp.start_mysql.call_count)
def test_install_cluster_with_bootstrap(self):
repl_user = {
'name': 'test-user',
'password': 'test-user-password',
}
apply_mock = Mock()
self.PXCApp.configuration_manager.apply_system_override = apply_mock
self.PXCApp.stop_db = Mock()
self.PXCApp._grant_cluster_replication_privilege = Mock()
self.PXCApp.wipe_ib_logfiles = Mock()
self.PXCApp._bootstrap_cluster = Mock()
self.PXCApp.install_cluster(repl_user, "something", bootstrap=True)
self.assertEqual(1, self.PXCApp.stop_db.call_count)
self.assertEqual(
1, self.PXCApp._grant_cluster_replication_privilege.call_count)
self.assertEqual(1, self.PXCApp.wipe_ib_logfiles.call_count)
self.assertEqual(1, apply_mock.call_count)
self.assertEqual(1, self.PXCApp._bootstrap_cluster.call_count)

View File

@ -0,0 +1,122 @@
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from eventlet import Timeout
import mock
import trove.common.context as context
from trove.common import exception
from trove.common.rpc.version import RPC_API_VERSION
from trove.common.strategies.cluster.experimental.pxc.guestagent import (
PXCGuestAgentAPI)
from trove import rpc
from trove.tests.unittests import trove_testtools
def _mock_call(cmd, timeout, version=None, user=None,
public_keys=None, members=None):
# To check get_public_keys, authorize_public_keys,
# install_cluster, cluster_complete in cmd.
if cmd in ('get_public_keys', 'authorize_public_keys',
'install_cluster', 'cluster_complete'):
return True
else:
raise BaseException("Test Failed")
class ApiTest(trove_testtools.TestCase):
@mock.patch.object(rpc, 'get_client')
def setUp(self, *args):
super(ApiTest, self).setUp()
self.context = context.TroveContext()
self.guest = PXCGuestAgentAPI(self.context, 0)
self.guest._call = _mock_call
self.api = PXCGuestAgentAPI(self.context, "instance-id-x23d2d")
self._mock_rpc_client()
def test_get_routing_key(self):
self.assertEqual('guestagent.instance-id-x23d2d',
self.api._get_routing_key())
def test_api_cast_exception(self):
self.call_context.cast.side_effect = IOError('host down')
self.assertRaises(exception.GuestError, self.api.create_user,
'test_user')
def test_api_call_exception(self):
self.call_context.call.side_effect = IOError('host_down')
self.assertRaises(exception.GuestError, self.api.list_users)
def test_api_call_timeout(self):
self.call_context.call.side_effect = Timeout()
self.assertRaises(exception.GuestTimeout, self.api.restart)
def _verify_rpc_prepare_before_call(self):
self.api.client.prepare.assert_called_once_with(
version=RPC_API_VERSION, timeout=mock.ANY)
def _verify_rpc_prepare_before_cast(self):
self.api.client.prepare.assert_called_once_with(
version=RPC_API_VERSION)
def _verify_cast(self, *args, **kwargs):
self.call_context.cast.assert_called_once_with(self.context, *args,
**kwargs)
def _verify_call(self, *args, **kwargs):
self.call_context.call.assert_called_once_with(self.context, *args,
**kwargs)
def _mock_rpc_client(self):
self.call_context = mock.Mock()
self.api.client.prepare = mock.Mock(return_value=self.call_context)
self.call_context.call = mock.Mock()
self.call_context.cast = mock.Mock()
def test_install_cluster(self):
exp_resp = None
self.call_context.call.return_value = exp_resp
resp = self.api.install_cluster(
replication_user="repuser",
cluster_configuration="cluster-configuration",
bootstrap=False)
self._verify_rpc_prepare_before_call()
self._verify_call('install_cluster', replication_user="repuser",
cluster_configuration="cluster-configuration",
bootstrap=False)
self.assertEqual(exp_resp, resp)
def test_reset_admin_password(self):
exp_resp = None
self.call_context.call.return_value = exp_resp
resp = self.api.reset_admin_password(
admin_password="admin_password")
self._verify_rpc_prepare_before_call()
self._verify_call('reset_admin_password',
admin_password="admin_password")
self.assertEqual(exp_resp, resp)
def test_cluster_complete(self):
exp_resp = None
self.call_context.call.return_value = exp_resp
resp = self.api.cluster_complete()
self._verify_rpc_prepare_before_call()
self._verify_call('cluster_complete')
self.assertEqual(exp_resp, resp)

View File

@ -0,0 +1,63 @@
# Copyright [2015] Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from mock import MagicMock
from mock import patch
import testtools
from trove.common.context import TroveContext
from trove.guestagent.datastore.experimental.pxc.manager import Manager
import trove.guestagent.datastore.experimental.pxc.service as dbaas
class GuestAgentManagerTest(testtools.TestCase):
def setUp(self):
super(GuestAgentManagerTest, self).setUp()
self.manager = Manager()
self.context = TroveContext()
self.patcher_rs = patch(self.manager.replication_namespace + "." +
self.manager.replication_strategy)
self.mock_rs_class = self.patcher_rs.start()
def tearDown(self):
super(GuestAgentManagerTest, self).tearDown()
self.patcher_rs.stop()
def test_install_cluster(self):
mock_status = MagicMock()
dbaas.PXCAppStatus.get = MagicMock(return_value=mock_status)
dbaas.PXCApp.install_cluster = MagicMock(return_value=None)
replication_user = "repuser"
configuration = "configuration"
bootstrap = True
self.manager.install_cluster(self.context, replication_user,
configuration, bootstrap)
dbaas.PXCAppStatus.get.assert_any_call()
dbaas.PXCApp.install_cluster.assert_called_with(
replication_user, configuration, bootstrap)
def test_reset_admin_password(self):
mock_status = MagicMock()
dbaas.PXCAppStatus.get = MagicMock(return_value=mock_status)
dbaas.PXCApp.reset_admin_password = MagicMock(return_value=None)
admin_password = "password"
self.manager.reset_admin_password(self.context, admin_password)
dbaas.PXCAppStatus.get.assert_any_call()
dbaas.PXCApp.reset_admin_password.assert_called_with(
admin_password)

View File

@ -0,0 +1,161 @@
# Copyright [2015] Hewlett-Packard Development Company, L.P.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
from mock import Mock
from mock import patch
from trove.cluster.models import ClusterTasks as ClusterTaskStatus
from trove.cluster.models import DBCluster
from trove.common.exception import GuestError
from trove.common.strategies.cluster.experimental.pxc.taskmanager import (
PXCClusterTasks as ClusterTasks)
from trove.common.strategies.cluster.experimental.pxc.taskmanager import (
PXCTaskManagerStrategy as task_strategy)
from trove.datastore import models as datastore_models
from trove.instance.models import BaseInstance
from trove.instance.models import DBInstance
from trove.instance.models import Instance
from trove.instance.models import InstanceServiceStatus
from trove.instance.models import InstanceTasks
from trove.taskmanager.models import ServiceStatuses
from trove.tests.unittests import trove_testtools
from trove.tests.unittests.util import util
class PXCClusterTasksTest(trove_testtools.TestCase):
def setUp(self):
super(PXCClusterTasksTest, self).setUp()
util.init_db()
self.cluster_id = "1232"
self.cluster_name = "Cluster-1234"
self.tenant_id = "6789"
self.db_cluster = DBCluster(ClusterTaskStatus.NONE,
id=self.cluster_id,
created=str(datetime.date),
updated=str(datetime.date),
name=self.cluster_name,
task_id=ClusterTaskStatus.NONE._code,
tenant_id=self.tenant_id,
datastore_version_id="1",
deleted=False)
self.dbinst1 = DBInstance(InstanceTasks.NONE, id="1", name="member1",
compute_instance_id="compute-1",
task_id=InstanceTasks.NONE._code,
task_description=InstanceTasks.NONE._db_text,
volume_id="volume-1",
datastore_version_id="1",
cluster_id=self.cluster_id,
type="member")
self.dbinst2 = DBInstance(InstanceTasks.NONE, id="2", name="member2",
compute_instance_id="compute-2",
task_id=InstanceTasks.NONE._code,
task_description=InstanceTasks.NONE._db_text,
volume_id="volume-2",
datastore_version_id="1",
cluster_id=self.cluster_id,
type="member")
self.dbinst3 = DBInstance(InstanceTasks.NONE, id="3", name="member3",
compute_instance_id="compute-3",
task_id=InstanceTasks.NONE._code,
task_description=InstanceTasks.NONE._db_text,
volume_id="volume-3",
datastore_version_id="1",
cluster_id=self.cluster_id,
type="member")
mock_ds1 = Mock()
mock_ds1.name = 'pxc'
mock_dv1 = Mock()
mock_dv1.name = '7.1'
self.clustertasks = ClusterTasks(Mock(),
self.db_cluster,
datastore=mock_ds1,
datastore_version=mock_dv1)
@patch.object(ClusterTasks, 'update_statuses_on_failure')
@patch.object(InstanceServiceStatus, 'find_by')
def test_all_instances_ready_bad_status(self,
mock_find, mock_update):
(mock_find.return_value.
get_status.return_value) = ServiceStatuses.FAILED
ret_val = self.clustertasks._all_instances_ready(["1", "2", "3", "4"],
self.cluster_id)
mock_update.assert_called_with(self.cluster_id, None)
self.assertFalse(ret_val)
@patch.object(InstanceServiceStatus, 'find_by')
def test_all_instances_ready(self, mock_find):
(mock_find.return_value.
get_status.return_value) = ServiceStatuses.BUILD_PENDING
ret_val = self.clustertasks._all_instances_ready(["1", "2", "3", "4"],
self.cluster_id)
self.assertTrue(ret_val)
@patch.object(ClusterTasks, 'reset_task')
@patch.object(ClusterTasks, '_all_instances_ready', return_value=False)
@patch.object(Instance, 'load')
@patch.object(DBInstance, 'find_all')
@patch.object(datastore_models.Datastore, 'load')
@patch.object(datastore_models.DatastoreVersion, 'load_by_uuid')
def test_create_cluster_instance_not_ready(self, mock_dv, mock_ds,
mock_find_all, mock_load,
mock_ready, mock_reset_task):
mock_find_all.return_value.all.return_value = [self.dbinst1]
mock_load.return_value = BaseInstance(Mock(),
self.dbinst1, Mock(),
InstanceServiceStatus(
ServiceStatuses.NEW))
self.clustertasks.create_cluster(Mock(), self.cluster_id)
mock_reset_task.assert_called_with()
@patch.object(ClusterTasks, 'update_statuses_on_failure')
@patch.object(ClusterTasks, 'reset_task')
@patch.object(ClusterTasks, 'get_ip')
@patch.object(ClusterTasks, '_all_instances_ready')
@patch.object(Instance, 'load')
@patch.object(DBInstance, 'find_all')
@patch.object(datastore_models.Datastore, 'load')
@patch.object(datastore_models.DatastoreVersion, 'load_by_uuid')
def test_create_cluster_fail(self, mock_dv, mock_ds, mock_find_all,
mock_load, mock_ready, mock_ip,
mock_reset_task, mock_update_status):
mock_find_all.return_value.all.return_value = [self.dbinst1]
mock_load.return_value = BaseInstance(Mock(),
self.dbinst1, Mock(),
InstanceServiceStatus(
ServiceStatuses.NEW))
mock_ip.return_value = "10.0.0.2"
guest_client = Mock()
guest_client.install_cluster = Mock(side_effect=GuestError("Error"))
with patch.object(ClusterTasks, 'get_guest',
return_value=guest_client):
self.clustertasks.create_cluster(Mock(), self.cluster_id)
mock_update_status.assert_called_with('1232')
mock_reset_task.assert_called_with()
class PXCTaskManagerStrategyTest(trove_testtools.TestCase):
def test_task_manager_cluster_tasks_class(self):
percona_strategy = task_strategy()
self.assertFalse(
hasattr(percona_strategy.task_manager_cluster_tasks_class,
'rebuild_cluster'))
self.assertTrue(callable(
percona_strategy.task_manager_cluster_tasks_class.create_cluster))
def test_task_manager_api_class(self):
percona_strategy = task_strategy()
self.assertFalse(hasattr(percona_strategy.task_manager_api_class,
'add_new_node'))