Backup and Restore for reddwarf instances.

Implemented backup and restore for reddwarf instances.

Implements spec/consistent-snapshots

Change-Id: Ic1e78545f9e90ab986665a58f524081dcde6894b
This commit is contained in:
Nikhil Manchanda 2013-03-21 15:43:33 -07:00
parent 51523dbf94
commit 595125ac84
40 changed files with 3002 additions and 1247 deletions

View File

@ -44,6 +44,7 @@ reddwarf_proxy_admin_user = admin
reddwarf_proxy_admin_pass = 3de4922d8b6ac5a1aad9
reddwarf_proxy_admin_tenant_name = admin
reddwarf_auth_url = http://0.0.0.0:5000/v2.0
swift_url = http://10.0.0.1:8080/v1/AUTH_
# Manager impl for the taskmanager
guestagent_manager=reddwarf.guestagent.manager.Manager
@ -62,3 +63,15 @@ rabbit_host=10.0.0.1
# ============ Logging information =============================
log_dir = /tmp/
log_file = logfile.txt
# Strategy information for backups
backup_strategy = InnoBackupEx
backup_namespace = reddwarf.guestagent.strategies.backup.impl
restore_namespace = reddwarf.guestagent.strategies.restore.impl
storage_strategy = SwiftStorage
storage_namespace = reddwarf.guestagent.strategies.storage.swift
backup_swift_container = database_backups
backup_use_gzip_compression = True
backup_use_snet = False
backup_chunk_size = 65536
backup_segment_max_size = 2147483648

View File

@ -203,3 +203,13 @@ class DBBackup(DatabaseModelBase):
@property
def is_done(self):
return self.state in BackupState.END_STATES
@property
def filename(self):
if self.location:
last_slash = self.location.rfind("/")
if last_slash < 0:
raise ValueError("Bad location for backup object.")
return self.location[last_slash + 1]
else:
return None

View File

@ -44,7 +44,6 @@ common_opts = [
cfg.StrOpt('nova_volume_url', default='http://localhost:8776/v2'),
cfg.StrOpt('swift_url', default='http://localhost:8080/v1/AUTH_'),
cfg.StrOpt('reddwarf_auth_url', default='http://0.0.0.0:5000/v2.0'),
cfg.StrOpt('backup_swift_container', default='DBaaS-backup'),
cfg.StrOpt('host', default='0.0.0.0'),
cfg.IntOpt('report_interval', default=10),
cfg.IntOpt('periodic_interval', default=60),
@ -119,7 +118,31 @@ common_opts = [
cfg.IntOpt('usage_timeout', default=300,
help="Timeout to wait for an guest to become active"),
cfg.StrOpt('region', default='LOCAL_DEV',
help="The region this service is located.")
help="The region this service is located."),
cfg.StrOpt('backup_runner',
default='reddwarf.guestagent.backup.backup_types.InnoBackupEx'),
cfg.StrOpt('backup_strategy', default='InnoBackupEx',
help="Default strategy to perform backups"),
cfg.StrOpt('backup_namespace',
default='reddwarf.guestagent.strategies.backup.impl',
help="Namespace to load backup strategies from"),
cfg.StrOpt('restore_namespace',
default='reddwarf.guestagent.strategies.restore.impl',
help="Namespace to load restore strategies from"),
cfg.StrOpt('storage_strategy', default='SwiftStorage',
help="Default strategy to store backups"),
cfg.StrOpt('storage_namespace',
default='reddwarf.guestagent.strategies.storage.swift',
help="Namespace to load the default storage strategy from"),
cfg.StrOpt('backup_swift_container', default='database_backups'),
cfg.BoolOpt('backup_use_gzip_compression', default=True,
help="Compress backups using gzip."),
cfg.BoolOpt('backup_use_snet', default=False,
help="Send backup files over snet."),
cfg.IntOpt('backup_chunk_size', default=2 ** 16,
help="Chunk size to stream to swift container."),
cfg.IntOpt('backup_segment_max_size', default=2 * (1024 ** 3),
help="Maximum size of each segment of the backup file."),
]

View File

@ -78,16 +78,12 @@ class ModelBase(object):
return self.id.__hash__()
class NovaRemoteModelBase(ModelBase):
class RemoteModelBase(ModelBase):
# This should be set by the remote model during init time
# The data() method will be using this
_data_object = None
@classmethod
def get_client(cls, context):
return remote.create_nova_client(context)
def _data_item(self, data_object):
data_fields = self._data_fields + self._auto_generated_attrs
return dict([(field, getattr(data_object, field))
@ -102,3 +98,17 @@ class NovaRemoteModelBase(ModelBase):
return [self._data_item(item) for item in self._data_object]
else:
return self._data_item(self._data_object)
class NovaRemoteModelBase(RemoteModelBase):
@classmethod
def get_client(cls, context):
return remote.create_nova_client(context)
class SwiftRemoteModelBase(RemoteModelBase):
@classmethod
def get_client(cls, context):
return remote.create_swift_client(context)

View File

@ -25,6 +25,7 @@ COMPUTE_URL = CONF.nova_compute_url
PROXY_AUTH_URL = CONF.reddwarf_auth_url
VOLUME_URL = CONF.nova_volume_url
OBJECT_STORE_URL = CONF.swift_url
USE_SNET = CONF.backup_use_snet
def create_dns_client(context):
@ -60,15 +61,17 @@ def create_nova_volume_client(context):
def create_swift_client(context):
client = Connection(preauthurl=OBJECT_STORE_URL + context.tenant,
preauthtoken=context.auth_token,
tenant_name=context.tenant)
tenant_name=context.tenant,
snet=USE_SNET)
return client
# Override the functions above with fakes.
if CONF.remote_implementation == "fake":
from reddwarf.tests.fakes.nova import fake_create_nova_client
from reddwarf.tests.fakes.nova import fake_create_nova_volume_client
from reddwarf.tests.fakes.guestagent import fake_create_guest_client
from reddwarf.tests.fakes.swift import FakeSwiftClient
from reddwarf.tests.fakes.swift import fake_create_swift_client
def create_guest_client(context, id):
return fake_create_guest_client(context, id)
@ -80,4 +83,4 @@ if CONF.remote_implementation == "fake":
return fake_create_nova_volume_client(context)
def create_swift_client(context):
return FakeSwiftClient.Connection(context)
return fake_create_swift_client(context)

View File

@ -24,6 +24,8 @@ import sys
import time
import urlparse
import uuid
import os
import shutil
from eventlet import event
from eventlet import greenthread
@ -82,6 +84,23 @@ def utcnow():
return datetime.datetime.utcnow()
def raise_if_process_errored(process, exception):
try:
err = process.stderr.read()
if err:
raise exception(err)
except OSError:
pass
def clean_out(folder):
for root, dirs, files in os.walk(folder):
for f in files:
os.unlink(os.path.join(root, f))
for d in dirs:
shutil.rmtree(os.path.join(root, d))
class cached_property(object):
"""A decorator that converts a function into a lazy property.

View File

@ -201,8 +201,7 @@ class RootHistory(object):
if history is not None:
return history
history = RootHistory(instance_id, user)
history.save()
return history
return history.save()
def load_via_context(cls, context, instance_id):

View File

@ -14,5 +14,3 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from api import API

View File

@ -25,8 +25,8 @@ from reddwarf.common import cfg
from reddwarf.common import exception
from reddwarf.common import rpc as rd_rpc
from reddwarf.guestagent import models as agent_models
from reddwarf.openstack.common import log as logging
from reddwarf.openstack.common import rpc
from reddwarf.openstack.common import log as logging
from reddwarf.openstack.common.rpc import proxy
from reddwarf.openstack.common.gettextutils import _
@ -75,6 +75,7 @@ class API(proxy.RpcProxy):
raise exception.GuestError(original_message=str(e))
def _cast_with_consumer(self, method_name, **kwargs):
conn = None
try:
conn = rpc.create_connection(new=True)
conn.create_consumer(self._get_routing_key(), None, fanout=False)
@ -154,7 +155,7 @@ class API(proxy.RpcProxy):
def delete_user(self, user):
"""Make an asynchronous call to delete an existing database user"""
LOG.debug(_("Deleting user %s for Instance %s"), user, self.id)
return self._cast("delete_user", user=user)
self._cast("delete_user", user=user)
def create_database(self, databases):
"""Make an asynchronous call to create a new database
@ -203,13 +204,16 @@ class API(proxy.RpcProxy):
return self._call("get_diagnostics", AGENT_LOW_TIMEOUT)
def prepare(self, memory_mb, databases, users,
device_path='/dev/vdb', mount_point='/mnt/volume'):
device_path='/dev/vdb', mount_point='/mnt/volume',
backup_id=None):
"""Make an asynchronous call to prepare the guest
as a database container"""
as a database container optionally includes a backup id for restores
"""
LOG.debug(_("Sending the call to prepare the Guest"))
self._cast_with_consumer(
"prepare", databases=databases, memory_mb=memory_mb,
users=users, device_path=device_path, mount_point=mount_point)
users=users, device_path=device_path, mount_point=mount_point,
backup_id=backup_id)
def restart(self):
"""Restart the MySQL server."""
@ -244,3 +248,8 @@ class API(proxy.RpcProxy):
def update_guest(self):
"""Make a synchronous call to update the guest agent."""
self._call("update_guest", AGENT_HIGH_TIMEOUT)
def create_backup(self, backup_id):
"""Make async call to create a full backup of this instance"""
LOG.debug(_("Create Backup %s for Instance %s"), backup_id, self.id)
self._cast("create_backup", backup_id=backup_id)

View File

@ -0,0 +1,27 @@
from reddwarf.guestagent.backup.backupagent import BackupAgent
AGENT = BackupAgent()
def backup(context, backup_id):
"""
Main entry point for starting a backup based on the given backup id. This
will create a backup for this DB instance and will then store the backup
in a configured repository (e.g. Swift)
:param context: the context token which contains the users details
:param backup_id: the id of the persisted backup object
"""
return AGENT.execute_backup(context, backup_id)
def restore(context, backup_id, restore_location):
"""
Main entry point for restoring a backup based on the given backup id. This
will transfer backup data to this instance an will carry out the
appropriate restore procedure (eg. mysqldump)
:param context: the context token which contains the users details
:param backup_id: the id of the persisted backup object
"""
return AGENT.execute_restore(context, backup_id, restore_location)

View File

@ -0,0 +1,139 @@
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import logging
from reddwarf.backup.models import DBBackup
from reddwarf.backup.models import BackupState
from reddwarf.common import cfg, utils
from reddwarf.guestagent.manager.mysql_service import ADMIN_USER_NAME
from reddwarf.guestagent.manager.mysql_service import get_auth_password
from reddwarf.guestagent.strategies.backup.base import BackupError
from reddwarf.guestagent.strategies.backup.base import UnknownBackupType
from reddwarf.guestagent.strategies.storage import get_storage_strategy
from reddwarf.guestagent.strategies.backup import get_backup_strategy
from reddwarf.guestagent.strategies.restore import get_restore_strategy
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
RUNNER = get_backup_strategy(CONF.backup_strategy,
CONF.backup_namespace)
BACKUP_CONTAINER = CONF.backup_swift_container
class BackupAgent(object):
def _get_restore_runner(self, backup_type):
"""Returns the RestoreRunner associated with this backup type."""
try:
runner = get_restore_strategy(backup_type, CONF.restore_namespace)
except ImportError:
raise UnknownBackupType("Unknown Backup type: %s" % backup_type)
return runner
def execute_backup(self, context, backup_id, runner=RUNNER):
LOG.debug("Searching for backup instance %s", backup_id)
backup = DBBackup.find_by(id=backup_id)
LOG.info("Setting task state to %s for instance %s",
BackupState.NEW, backup.instance_id)
backup.state = BackupState.NEW
backup.save()
LOG.info("Running backup %s", backup_id)
user = ADMIN_USER_NAME
password = get_auth_password()
swiftStorage = get_storage_strategy(
CONF.storage_strategy,
CONF.storage_namespace)(context)
backup.state = BackupState.BUILDING
backup.save()
try:
with runner(filename=backup_id, user=user, password=password)\
as bkup:
LOG.info("Starting Backup %s", backup_id)
success, note, checksum, location = swiftStorage.save(
BACKUP_CONTAINER,
bkup)
LOG.info("Backup %s completed status: %s", backup_id, success)
LOG.info("Backup %s file size: %s", backup_id, bkup.content_length)
LOG.info('Backup %s file checksum: %s', backup_id, checksum)
LOG.info('Backup %s location: %s', backup_id, location)
if not success:
raise BackupError(backup.note)
except Exception as e:
LOG.error(e)
LOG.error("Error saving %s Backup", backup_id)
backup.state = BackupState.FAILED
backup.save()
raise
else:
LOG.info("Saving %s Backup Info to model", backup_id)
backup.state = BackupState.COMPLETED
backup.checksum = checksum
backup.location = location
backup.note = note
backup.backup_type = bkup.backup_type
backup.save()
def execute_restore(self, context, backup_id, restore_location):
try:
LOG.debug("Cleaning out restore location: %s", restore_location)
utils.execute_with_timeout("sudo", "chmod", "-R",
"0777", restore_location)
utils.clean_out(restore_location)
LOG.debug("Finding backup %s to restore", backup_id)
backup = DBBackup.find_by(id=backup_id)
LOG.debug("Getting Restore Runner of type %s", backup.backup_type)
restore_runner = self._get_restore_runner(backup.backup_type)
LOG.debug("Getting Storage Strategy")
storage_strategy = get_storage_strategy(
CONF.storage_strategy,
CONF.storage_namespace)(context)
LOG.debug("Preparing storage to download stream.")
download_stream = storage_strategy.load(context,
backup.location,
restore_runner.is_zipped)
with restore_runner(restore_stream=download_stream,
restore_location=restore_location) as runner:
LOG.debug("Restoring instance from backup %s to %s",
backup_id, restore_location)
content_size = runner.restore()
LOG.info("Restore from backup %s completed successfully to %s",
backup_id, restore_location)
LOG.info("Restore size: %s", content_size)
utils.execute_with_timeout("sudo", "chown", "-R",
"mysql", restore_location)
except Exception as e:
LOG.error(e)
LOG.error("Error restoring backup %s", backup_id)
raise
else:
LOG.info("Restored Backup %s", backup_id)

View File

@ -25,31 +25,12 @@ handles RPC calls relating to Platform specific operations.
"""
import os
import re
import time
import uuid
from datetime import date
from sqlalchemy import create_engine
from sqlalchemy import exc
from sqlalchemy import interfaces
from sqlalchemy.sql.expression import text
from reddwarf import db
from reddwarf.common.exception import ProcessExecutionError
from reddwarf.common import cfg
from reddwarf.common import utils
from reddwarf.guestagent import query
from reddwarf.guestagent.db import models
from reddwarf.guestagent import pkg
from reddwarf.instance import models as rd_models
from reddwarf.openstack.common import log as logging
from reddwarf.openstack.common.gettextutils import _
LOG = logging.getLogger(__name__)
SERVICE_REGISTRY = {
'mysql': 'reddwarf.guestagent.manager.mysql.Manager', }
@ -66,11 +47,10 @@ class Interrogator(object):
raise RuntimeError("Filesystem not found (%s) : %s"
% (fs_path, err))
stats = out.split()
output = {}
output['block_size'] = int(stats[4])
output['total_blocks'] = int(stats[6])
output['free_blocks'] = int(stats[7])
output['total'] = int(stats[6]) * int(stats[4])
output['free'] = int(stats[7]) * int(stats[4])
output = {'block_size': int(stats[4]),
'total_blocks': int(stats[6]),
'free_blocks': int(stats[7]),
'total': int(stats[6]) * int(stats[4]),
'free': int(stats[7]) * int(stats[4])}
output['used'] = int(output['total']) - int(output['free'])
return output

View File

@ -1,33 +1,18 @@
import os
import re
import time
import uuid
from datetime import date
from sqlalchemy import create_engine
from sqlalchemy import exc
from sqlalchemy import interfaces
from sqlalchemy.sql.expression import text
from reddwarf import db
from reddwarf.common.exception import ProcessExecutionError
from reddwarf.common import cfg
from reddwarf.common import utils
from reddwarf.guestagent import dbaas
from reddwarf.guestagent import query
from reddwarf.guestagent.db import models
from reddwarf.guestagent import pkg
from reddwarf.guestagent import backup
from reddwarf.guestagent import volume
from reddwarf.instance import models as rd_models
from reddwarf.guestagent.manager.mysql_service import MySqlAppStatus
from reddwarf.guestagent.manager.mysql_service import MySqlAdmin
from reddwarf.guestagent.manager.mysql_service import MySqlApp
from reddwarf.openstack.common import log as logging
from reddwarf.openstack.common.gettextutils import _
from reddwarf.openstack.common import periodic_task
from reddwarf.instance import models as rd_models
LOG = logging.getLogger(__name__)
MYSQL_BASE_DIR = "/var/lib/mysql"
CONF = cfg.CONF
class Manager(periodic_task.PeriodicTasks):
@ -80,8 +65,13 @@ class Manager(periodic_task.PeriodicTasks):
def is_root_enabled(self, context):
return MySqlAdmin().is_root_enabled()
def _perform_restore(self, backup_id, context, restore_location):
LOG.info(_("Restoring database from backup %s" % backup_id))
backup.restore(context, backup_id, restore_location)
LOG.info(_("Restored database"))
def prepare(self, context, databases, memory_mb, users, device_path=None,
mount_point=None):
mount_point=None, backup_id=None):
"""Makes ready DBAAS on a Guest container."""
MySqlAppStatus.get().begin_mysql_install()
# status end_mysql_install set with secure()
@ -91,12 +81,13 @@ class Manager(periodic_task.PeriodicTasks):
device = volume.VolumeDevice(device_path)
device.format()
#if a /var/lib/mysql folder exists, back it up.
if os.path.exists(MYSQL_BASE_DIR):
if os.path.exists(CONF.mount_point):
#stop and do not update database
app.stop_db()
restart_mysql = True
#rsync exiting data
device.migrate_data(MYSQL_BASE_DIR)
if not backup_id:
restart_mysql = True
device.migrate_data(CONF.mount_point)
#mount the volume
device.mount(mount_point)
LOG.debug(_("Mounted the volume."))
@ -104,10 +95,22 @@ class Manager(periodic_task.PeriodicTasks):
if restart_mysql:
app.start_mysql()
app.install_if_needed()
LOG.info("Securing mysql now.")
if backup_id:
self._perform_restore(backup_id, context, CONF.mount_point)
LOG.info(_("Securing mysql now."))
app.secure(memory_mb)
self.create_database(context, databases)
self.create_user(context, users)
if backup_id and MySqlAdmin().is_root_enabled():
MySqlAdmin().report_root_enabled(context)
else:
app.secure_root()
app.complete_install_or_restart()
if databases:
self.create_database(context, databases)
if users:
self.create_user(context, users)
LOG.info('"prepare" call has finished.')
def restart(self, context):
@ -126,850 +129,13 @@ class Manager(periodic_task.PeriodicTasks):
""" Gets the filesystem stats for the path given """
return dbaas.Interrogator().get_filesystem_volume_stats(fs_path)
ADMIN_USER_NAME = "os_admin"
FLUSH = text(query.FLUSH)
ENGINE = None
MYSQLD_ARGS = None
PREPARING = False
UUID = False
ORIG_MYCNF = "/etc/mysql/my.cnf"
FINAL_MYCNF = "/var/lib/mysql/my.cnf"
TMP_MYCNF = "/tmp/my.cnf.tmp"
DBAAS_MYCNF = "/etc/dbaas/my.cnf/my.cnf.%dM"
MYSQL_BASE_DIR = "/var/lib/mysql"
CONF = cfg.CONF
INCLUDE_MARKER_OPERATORS = {
True: ">=",
False: ">"
}
def generate_random_password():
return str(uuid.uuid4())
def get_auth_password():
pwd, err = utils.execute_with_timeout(
"sudo",
"awk",
"/password\\t=/{print $3; exit}",
"/etc/mysql/my.cnf")
if err:
LOG.error(err)
raise RuntimeError("Problem reading my.cnf! : %s" % err)
return pwd.strip()
def get_engine():
"""Create the default engine with the updated admin user"""
#TODO(rnirmal):Based on permissions issues being resolved we may revert
#url = URL(drivername='mysql', host='localhost',
# query={'read_default_file': '/etc/mysql/my.cnf'})
global ENGINE
if ENGINE:
return ENGINE
#ENGINE = create_engine(name_or_url=url)
pwd = get_auth_password()
ENGINE = create_engine("mysql://%s:%s@localhost:3306" %
(ADMIN_USER_NAME, pwd.strip()),
pool_recycle=7200, echo=True,
listeners=[KeepAliveConnection()])
return ENGINE
def load_mysqld_options():
try:
out, err = utils.execute("/usr/sbin/mysqld", "--print-defaults",
run_as_root=True, root_helper="sudo")
arglist = re.split("\n", out)[1].split()
args = {}
for item in arglist:
if "=" in item:
key, value = item.split("=")
args[key.lstrip("--")] = value
else:
args[item.lstrip("--")] = None
return args
except ProcessExecutionError as e:
return None
class MySqlAppStatus(object):
"""
Answers the question "what is the status of the MySQL application on
this box?" The answer can be that the application is not installed, or
the state of the application is determined by calling a series of
commands.
This class also handles saving and load the status of the MySQL application
in the database.
The status is updated whenever the update() method is called, except
if the state is changed to building or restart mode using the
"begin_mysql_install" and "begin_mysql_restart" methods.
The building mode persists in the database while restarting mode does
not (so if there is a Python Pete crash update() will set the status to
show a failure).
These modes are exited and functionality to update() returns when
end_install_or_restart() is called, at which point the status again
reflects the actual status of the MySQL app.
"""
_instance = None
def __init__(self):
if self._instance is not None:
raise RuntimeError("Cannot instantiate twice.")
self.status = self._load_status()
self.restart_mode = False
def begin_mysql_install(self):
"""Called right before MySQL is prepared."""
self.set_status(rd_models.ServiceStatuses.BUILDING)
def begin_mysql_restart(self):
"""Called before restarting MySQL."""
self.restart_mode = True
def end_install_or_restart(self):
"""Called after MySQL is installed or restarted.
Updates the database with the actual MySQL status.
def create_backup(self, context, backup_id):
"""
LOG.info("Ending install_if_needed or restart.")
self.restart_mode = False
real_status = self._get_actual_db_status()
LOG.info("Updating status to %s" % real_status)
self.set_status(real_status)
Entry point for initiating a backup for this guest agents db instance.
The call currently blocks until the backup is complete or errors. If
device_path is specified, it will be mounted based to a point specified
in configuration.
@classmethod
def get(cls):
if not cls._instance:
cls._instance = MySqlAppStatus()
return cls._instance
def _get_actual_db_status(self):
global MYSQLD_ARGS
try:
out, err = utils.execute_with_timeout(
"/usr/bin/mysqladmin",
"ping", run_as_root=True, root_helper="sudo")
LOG.info("Service Status is RUNNING.")
return rd_models.ServiceStatuses.RUNNING
except ProcessExecutionError as e:
LOG.error("Process execution ")
try:
out, err = utils.execute_with_timeout("/bin/ps", "-C",
"mysqld", "h")
pid = out.split()[0]
# TODO(rnirmal): Need to create new statuses for instances
# where the mysql service is up, but unresponsive
LOG.info("Service Status is BLOCKED.")
return rd_models.ServiceStatuses.BLOCKED
except ProcessExecutionError as e:
if not MYSQLD_ARGS:
MYSQLD_ARGS = load_mysqld_options()
pid_file = MYSQLD_ARGS.get('pid_file',
'/var/run/mysqld/mysqld.pid')
if os.path.exists(pid_file):
LOG.info("Service Status is CRASHED.")
return rd_models.ServiceStatuses.CRASHED
else:
LOG.info("Service Status is SHUTDOWN.")
return rd_models.ServiceStatuses.SHUTDOWN
@property
def is_mysql_installed(self):
:param backup_id: the db instance id of the backup task
"""
True if MySQL app should be installed and attempts to ascertain
its status won't result in nonsense.
"""
return (self.status is not None and
self.status != rd_models.ServiceStatuses.BUILDING and
self.status != rd_models.ServiceStatuses.FAILED)
@property
def _is_mysql_restarting(self):
return self.restart_mode
@property
def is_mysql_running(self):
"""True if MySQL is running."""
return (self.status is not None and
self.status == rd_models.ServiceStatuses.RUNNING)
@staticmethod
def _load_status():
"""Loads the status from the database."""
id = CONF.guest_id
return rd_models.InstanceServiceStatus.find_by(instance_id=id)
def set_status(self, status):
"""Changes the status of the MySQL app in the database."""
db_status = self._load_status()
db_status.set_status(status)
db_status.save()
self.status = status
def update(self):
"""Find and report status of MySQL on this machine.
The database is update and the status is also returned.
"""
if self.is_mysql_installed and not self._is_mysql_restarting:
LOG.info("Determining status of MySQL app...")
status = self._get_actual_db_status()
self.set_status(status)
else:
LOG.info("MySQL is not installed or is in restart mode, so for "
"now we'll skip determining the status of MySQL on this "
"box.")
def wait_for_real_status_to_change_to(self, status, max_time,
update_db=False):
"""
Waits the given time for the real status to change to the one
specified. Does not update the publicly viewable status Unless
"update_db" is True.
"""
WAIT_TIME = 3
waited_time = 0
while(waited_time < max_time):
time.sleep(WAIT_TIME)
waited_time += WAIT_TIME
LOG.info("Waiting for MySQL status to change to %s..." % status)
actual_status = self._get_actual_db_status()
LOG.info("MySQL status was %s after %d seconds."
% (actual_status, waited_time))
if actual_status == status:
if update_db:
self.set_status(actual_status)
return True
LOG.error("Time out while waiting for MySQL app status to change!")
return False
class LocalSqlClient(object):
"""A sqlalchemy wrapper to manage transactions"""
def __init__(self, engine, use_flush=True):
self.engine = engine
self.use_flush = use_flush
def __enter__(self):
self.conn = self.engine.connect()
self.trans = self.conn.begin()
return self.conn
def __exit__(self, type, value, traceback):
if self.trans:
if type is not None: # An error occurred
self.trans.rollback()
else:
if self.use_flush:
self.conn.execute(FLUSH)
self.trans.commit()
self.conn.close()
def execute(self, t, **kwargs):
try:
return self.conn.execute(t, kwargs)
except:
self.trans.rollback()
self.trans = None
raise
class MySqlAdmin(object):
"""Handles administrative tasks on the MySQL database."""
def _associate_dbs(self, user):
"""Internal. Given a MySQLUser, populate its databases attribute."""
LOG.debug("Associating dbs to user %s at %s" % (user.name, user.host))
with LocalSqlClient(get_engine()) as client:
q = query.Query()
q.columns = ["grantee", "table_schema"]
q.tables = ["information_schema.SCHEMA_PRIVILEGES"]
q.group = ["grantee", "table_schema"]
q.where = ["privilege_type != 'USAGE'"]
t = text(str(q))
db_result = client.execute(t)
for db in db_result:
LOG.debug("\t db: %s" % db)
if db['grantee'] == "'%s'@'%s'" % (user.name, user.host):
mysql_db = models.MySQLDatabase()
mysql_db.name = db['table_schema']
user.databases.append(mysql_db.serialize())
def change_passwords(self, users):
"""Change the passwords of one or more existing users."""
LOG.debug("Changing the password of some users.""")
LOG.debug("Users is %s" % users)
with LocalSqlClient(get_engine()) as client:
for item in users:
LOG.debug("\tUser: %s" % item)
user_dict = {'_name': item['name'],
'_host': item['host'],
'_password': item['password'],
}
user = models.MySQLUser()
user.deserialize(user_dict)
LOG.debug("\tDeserialized: %s" % user.__dict__)
uu = query.UpdateUser(user.name, host=user.host,
clear=user.password)
t = text(str(uu))
client.execute(t)
def create_database(self, databases):
"""Create the list of specified databases"""
with LocalSqlClient(get_engine()) as client:
for item in databases:
mydb = models.MySQLDatabase()
mydb.deserialize(item)
cd = query.CreateDatabase(mydb.name,
mydb.character_set,
mydb.collate)
t = text(str(cd))
client.execute(t)
def create_user(self, users):
"""Create users and grant them privileges for the
specified databases"""
with LocalSqlClient(get_engine()) as client:
for item in users:
user = models.MySQLUser()
user.deserialize(item)
# TODO(cp16net):Should users be allowed to create users
# 'os_admin' or 'debian-sys-maint'
g = query.Grant(user=user.name, host=user.host,
clear=user.password)
t = text(str(g))
client.execute(t)
for database in user.databases:
mydb = models.MySQLDatabase()
mydb.deserialize(database)
g = query.Grant(permissions='ALL', database=mydb.name,
user=user.name, host=user.host,
clear=user.password)
t = text(str(g))
client.execute(t)
def delete_database(self, database):
"""Delete the specified database"""
with LocalSqlClient(get_engine()) as client:
mydb = models.MySQLDatabase()
mydb.deserialize(database)
dd = query.DropDatabase(mydb.name)
t = text(str(dd))
client.execute(t)
def delete_user(self, user):
"""Delete the specified users"""
with LocalSqlClient(get_engine()) as client:
mysql_user = models.MySQLUser()
mysql_user.deserialize(user)
du = query.DropUser(mysql_user.name, host=mysql_user.host)
t = text(str(du))
client.execute(t)
def enable_root(self):
"""Enable the root user global access and/or reset the root password"""
user = models.MySQLUser()
user.name = "root"
user.host = "%"
user.password = generate_random_password()
with LocalSqlClient(get_engine()) as client:
try:
cu = query.CreateUser(user.name, host=user.host)
t = text(str(cu))
client.execute(t, **cu.keyArgs)
except exc.OperationalError as err:
# Ignore, user is already created, just reset the password
# TODO(rnirmal): More fine grained error checking later on
LOG.debug(err)
with LocalSqlClient(get_engine()) as client:
uu = query.UpdateUser(user.name, host=user.host,
clear=user.password)
t = text(str(uu))
client.execute(t)
LOG.debug("CONF.root_grant: %s CONF.root_grant_option: %s" %
(CONF.root_grant, CONF.root_grant_option))
g = query.Grant(permissions=CONF.root_grant,
user=user.name,
host=user.host,
grant_option=CONF.root_grant_option,
clear=user.password)
t = text(str(g))
client.execute(t)
return user.serialize()
def get_user(self, username, hostname):
user = self._get_user(username, hostname)
if not user:
return None
return user.serialize()
def _get_user(self, username, hostname):
"""Return a single user matching the criteria"""
user = models.MySQLUser()
try:
user.name = username # Could possibly throw a BadRequest here.
except Exception.ValueError as ve:
raise exception.BadRequest("Username %s is not valid: %s"
% (username, ve.message))
with LocalSqlClient(get_engine()) as client:
q = query.Query()
q.columns = ['User', 'Host', 'Password']
q.tables = ['mysql.user']
q.where = ["Host != 'localhost'",
"User = '%s'" % username,
"Host = '%s'" % hostname,
]
q.order = ['User', 'Host']
t = text(str(q))
result = client.execute(t).fetchall()
LOG.debug("Result: %s" % result)
if len(result) != 1:
return None
found_user = result[0]
user.password = found_user['Password']
user.host = found_user['Host']
self._associate_dbs(user)
return user
def grant_access(self, username, hostname, databases):
"""Give a user permission to use a given database."""
user = self._get_user(username, hostname)
with LocalSqlClient(get_engine()) as client:
for database in databases:
g = query.Grant(permissions='ALL', database=database,
user=user.name, host=user.host,
hashed=user.password)
t = text(str(g))
client.execute(t)
def is_root_enabled(self):
"""Return True if root access is enabled; False otherwise."""
with LocalSqlClient(get_engine()) as client:
t = text(query.ROOT_ENABLED)
result = client.execute(t)
LOG.debug("result = " + str(result))
return result.rowcount != 0
def list_databases(self, limit=None, marker=None, include_marker=False):
"""List databases the user created on this mysql instance"""
LOG.debug(_("---Listing Databases---"))
databases = []
with LocalSqlClient(get_engine()) as client:
# If you have an external volume mounted at /var/lib/mysql
# the lost+found directory will show up in mysql as a database
# which will create errors if you try to do any database ops
# on it. So we remove it here if it exists.
q = query.Query()
q.columns = [
'schema_name as name',
'default_character_set_name as charset',
'default_collation_name as collation',
]
q.tables = ['information_schema.schemata']
q.where = ["schema_name NOT IN ("
"'mysql', 'information_schema', "
"'lost+found', '#mysql50#lost+found'"
")"]
q.order = ['schema_name ASC']
if limit:
q.limit = limit + 1
if marker:
q.where.append("schema_name %s '%s'" %
(INCLUDE_MARKER_OPERATORS[include_marker],
marker))
t = text(str(q))
database_names = client.execute(t)
next_marker = None
LOG.debug(_("database_names = %r") % database_names)
for count, database in enumerate(database_names):
if count >= limit:
break
LOG.debug(_("database = %s ") % str(database))
mysql_db = models.MySQLDatabase()
mysql_db.name = database[0]
next_marker = mysql_db.name
mysql_db.character_set = database[1]
mysql_db.collate = database[2]
databases.append(mysql_db.serialize())
LOG.debug(_("databases = ") + str(databases))
if database_names.rowcount <= limit:
next_marker = None
return databases, next_marker
def list_users(self, limit=None, marker=None, include_marker=False):
"""List users that have access to the database"""
'''
SELECT
User,
Host,
Marker
FROM
(SELECT
User,
Host,
CONCAT(User, '@', Host) as Marker
FROM mysql.user
ORDER BY 1, 2) as innerquery
WHERE
Marker > :marker
ORDER BY
Marker
LIMIT :limit;
'''
LOG.debug(_("---Listing Users---"))
users = []
with LocalSqlClient(get_engine()) as client:
mysql_user = models.MySQLUser()
iq = query.Query() # Inner query.
iq.columns = ['User', 'Host', "CONCAT(User, '@', Host) as Marker"]
iq.tables = ['mysql.user']
iq.order = ['User', 'Host']
innerquery = str(iq).rstrip(';')
oq = query.Query() # Outer query.
oq.columns = ['User', 'Host', 'Marker']
oq.tables = ['(%s) as innerquery' % innerquery]
oq.where = ["Host != 'localhost'"]
oq.order = ['Marker']
if marker:
oq.where.append("Marker %s '%s'" %
(INCLUDE_MARKER_OPERATORS[include_marker],
marker))
if limit:
oq.limit = limit + 1
t = text(str(oq))
result = client.execute(t)
next_marker = None
LOG.debug("result = " + str(result))
for count, row in enumerate(result):
if count >= limit:
break
LOG.debug("user = " + str(row))
mysql_user = models.MySQLUser()
mysql_user.name = row['User']
mysql_user.host = row['Host']
self._associate_dbs(mysql_user)
next_marker = row['Marker']
users.append(mysql_user.serialize())
if result.rowcount <= limit:
next_marker = None
LOG.debug("users = " + str(users))
return users, next_marker
def revoke_access(self, username, hostname, database):
"""Give a user permission to use a given database."""
user = self._get_user(username, hostname)
with LocalSqlClient(get_engine()) as client:
r = query.Revoke(database=database, user=user.name, host=user.host,
hashed=user.password)
t = text(str(r))
client.execute(t)
def list_access(self, username, hostname):
"""Show all the databases to which the user has more than
USAGE granted."""
user = self._get_user(username, hostname)
return user.databases
class KeepAliveConnection(interfaces.PoolListener):
"""
A connection pool listener that ensures live connections are returned
from the connecction pool at checkout. This alleviates the problem of
MySQL connections timeing out.
"""
def checkout(self, dbapi_con, con_record, con_proxy):
"""Event triggered when a connection is checked out from the pool"""
try:
try:
dbapi_con.ping(False)
except TypeError:
dbapi_con.ping()
except dbapi_con.OperationalError, ex:
if ex.args[0] in (2006, 2013, 2014, 2045, 2055):
raise exc.DisconnectionError()
else:
raise
class MySqlApp(object):
"""Prepares DBaaS on a Guest container."""
TIME_OUT = 1000
MYSQL_PACKAGE_VERSION = CONF.mysql_pkg
def __init__(self, status):
""" By default login with root no password for initial setup. """
self.state_change_wait_time = CONF.state_change_wait_time
self.status = status
def _create_admin_user(self, client, password):
"""
Create a os_admin user with a random password
with all privileges similar to the root user
"""
localhost = "localhost"
cu = query.CreateUser(ADMIN_USER_NAME, host=localhost)
t = text(str(cu))
client.execute(t, **cu.keyArgs)
uu = query.UpdateUser(ADMIN_USER_NAME, host=localhost, clear=password)
t = text(str(uu))
client.execute(t)
g = query.Grant(permissions='ALL', user=ADMIN_USER_NAME,
host=localhost, grant_option=True, clear=password)
t = text(str(g))
client.execute(t)
@staticmethod
def _generate_root_password(client):
""" Generate and set a random root password and forget about it. """
localhost = "localhost"
uu = query.UpdateUser("root", host=localhost,
clear=generate_random_password())
t = text(str(uu))
client.execute(t)
def install_if_needed(self):
"""Prepare the guest machine with a secure mysql server installation"""
LOG.info(_("Preparing Guest as MySQL Server"))
if not self.is_installed():
self._install_mysql()
LOG.info(_("Dbaas install_if_needed complete"))
def secure(self, memory_mb):
LOG.info(_("Generating root password..."))
admin_password = generate_random_password()
engine = create_engine("mysql://root:@localhost:3306", echo=True)
with LocalSqlClient(engine) as client:
self._generate_root_password(client)
self._remove_anonymous_user(client)
self._remove_remote_root_access(client)
self._create_admin_user(client, admin_password)
self.stop_db()
self._write_mycnf(memory_mb, admin_password)
self.start_mysql()
self.status.end_install_or_restart()
LOG.info(_("Dbaas secure complete."))
def _install_mysql(self):
"""Install mysql server. The current version is 5.5"""
LOG.debug(_("Installing mysql server"))
pkg.pkg_install(self.MYSQL_PACKAGE_VERSION, self.TIME_OUT)
LOG.debug(_("Finished installing mysql server"))
#TODO(rnirmal): Add checks to make sure the package got installed
def _enable_mysql_on_boot(self):
'''
There is a difference between the init.d mechanism and the upstart
The stock mysql uses the upstart mechanism, therefore, there is a
mysql.conf file responsible for the job. to toggle enable/disable
on boot one needs to modify this file. Percona uses the init.d
mechanism and there is no mysql.conf file. Instead, the update-rc.d
command needs to be used to modify the /etc/rc#.d/[S/K]##mysql links
'''
LOG.info("Enabling mysql on boot.")
conf = "/etc/init/mysql.conf"
if os.path.isfile(conf):
command = "sudo sed -i '/^manual$/d' %(conf)s"
command = command % locals()
else:
command = "sudo update-rc.d mysql enable"
utils.execute_with_timeout(command, shell=True)
def _disable_mysql_on_boot(self):
'''
There is a difference between the init.d mechanism and the upstart
The stock mysql uses the upstart mechanism, therefore, there is a
mysql.conf file responsible for the job. to toggle enable/disable
on boot one needs to modify this file. Percona uses the init.d
mechanism and there is no mysql.conf file. Instead, the update-rc.d
command needs to be used to modify the /etc/rc#.d/[S/K]##mysql links
'''
LOG.info("Disabling mysql on boot.")
conf = "/etc/init/mysql.conf"
if os.path.isfile(conf):
command = '''sudo sh -c "echo manual >> %(conf)s"'''
command = command % locals()
else:
command = "sudo update-rc.d mysql disable"
utils.execute_with_timeout(command, shell=True)
def stop_db(self, update_db=False, do_not_start_on_reboot=False):
LOG.info(_("Stopping mysql..."))
if do_not_start_on_reboot:
self._disable_mysql_on_boot()
utils.execute_with_timeout("sudo", "/etc/init.d/mysql", "stop")
if not self.status.wait_for_real_status_to_change_to(
rd_models.ServiceStatuses.SHUTDOWN,
self.state_change_wait_time, update_db):
LOG.error(_("Could not stop MySQL!"))
self.status.end_install_or_restart()
raise RuntimeError("Could not stop MySQL!")
def _remove_anonymous_user(self, client):
t = text(query.REMOVE_ANON)
client.execute(t)
def _remove_remote_root_access(self, client):
t = text(query.REMOVE_ROOT)
client.execute(t)
def restart(self):
try:
self.status.begin_mysql_restart()
self.stop_db()
self.start_mysql()
finally:
self.status.end_install_or_restart()
def _replace_mycnf_with_template(self, template_path, original_path):
LOG.debug("replacing the mycnf with template")
LOG.debug("template_path(%s) original_path(%s)"
% (template_path, original_path))
if os.path.isfile(template_path):
if os.path.isfile(original_path):
utils.execute_with_timeout(
"sudo", "mv", original_path,
"%(name)s.%(date)s" %
{'name': original_path, 'date':
date.today().isoformat()})
utils.execute_with_timeout("sudo", "cp", template_path,
original_path)
def _write_temp_mycnf_with_admin_account(self, original_file_path,
temp_file_path, password):
utils.execute_with_timeout("sudo", "chmod", "0711", MYSQL_BASE_DIR)
mycnf_file = open(original_file_path, 'r')
tmp_file = open(temp_file_path, 'w')
for line in mycnf_file:
tmp_file.write(line)
if "[client]" in line:
tmp_file.write("user\t\t= %s\n" % ADMIN_USER_NAME)
tmp_file.write("password\t= %s\n" % password)
mycnf_file.close()
tmp_file.close()
def wipe_ib_logfiles(self):
"""Destroys the iblogfiles.
If for some reason the selected log size in the conf changes from the
current size of the files MySQL will fail to start, so we delete the
files to be safe.
"""
LOG.info(_("Wiping ib_logfiles..."))
for index in range(2):
try:
utils.execute_with_timeout("sudo", "rm", "%s/ib_logfile%d"
% (MYSQL_BASE_DIR, index))
except ProcessExecutionError as pe:
# On restarts, sometimes these are wiped. So it can be a race
# to have MySQL start up before it's restarted and these have
# to be deleted. That's why its ok if they aren't found.
LOG.error("Could not delete logfile!")
LOG.error(pe)
if "No such file or directory" not in str(pe):
raise
def _write_mycnf(self, update_memory_mb, admin_password):
"""
Install the set of mysql my.cnf templates from dbaas-mycnf package.
The package generates a template suited for the current
container flavor. Update the os_admin user and password
to the my.cnf file for direct login from localhost
"""
LOG.info(_("Writing my.cnf templates."))
if admin_password is None:
admin_password = get_auth_password()
# As of right here, the admin_password contains the password to be
# applied to the my.cnf file, whether it was there before (and we
# passed it in) or we generated a new one just now (because we didn't
# find it).
LOG.debug(_("Installing my.cnf templates"))
pkg.pkg_install("dbaas-mycnf", self.TIME_OUT)
LOG.info(_("Replacing my.cnf with template."))
template_path = DBAAS_MYCNF % update_memory_mb
# replace my.cnf with template.
self._replace_mycnf_with_template(template_path, ORIG_MYCNF)
LOG.info(_("Writing new temp my.cnf."))
self._write_temp_mycnf_with_admin_account(ORIG_MYCNF, TMP_MYCNF,
admin_password)
# permissions work-around
LOG.info(_("Moving tmp into final."))
utils.execute_with_timeout("sudo", "mv", TMP_MYCNF, FINAL_MYCNF)
LOG.info(_("Removing original my.cnf."))
utils.execute_with_timeout("sudo", "rm", ORIG_MYCNF)
LOG.info(_("Symlinking final my.cnf."))
utils.execute_with_timeout("sudo", "ln", "-s", FINAL_MYCNF, ORIG_MYCNF)
self.wipe_ib_logfiles()
def start_mysql(self, update_db=False):
LOG.info(_("Starting mysql..."))
# This is the site of all the trouble in the restart tests.
# Essentially what happens is thaty mysql start fails, but does not
# die. It is then impossible to kill the original, so
self._enable_mysql_on_boot()
try:
utils.execute_with_timeout("sudo", "/etc/init.d/mysql", "start")
except ProcessExecutionError:
# it seems mysql (percona, at least) might come back with [Fail]
# but actually come up ok. we're looking into the timing issue on
# parallel, but for now, we'd like to give it one more chance to
# come up. so regardless of the execute_with_timeout() respose,
# we'll assume mysql comes up and check it's status for a while.
pass
if not self.status.wait_for_real_status_to_change_to(
rd_models.ServiceStatuses.RUNNING,
self.state_change_wait_time, update_db):
LOG.error(_("Start up of MySQL failed!"))
# If it won't start, but won't die either, kill it by hand so we
# don't let a rouge process wander around.
try:
utils.execute_with_timeout("sudo", "pkill", "-9", "mysql")
except ProcessExecutionError, p:
LOG.error("Error killing stalled mysql start command.")
LOG.error(p)
# There's nothing more we can do...
self.status.end_install_or_restart()
raise RuntimeError("Could not start MySQL!")
def start_db_with_conf_changes(self, updated_memory_mb):
LOG.info(_("Starting mysql with conf changes to memory(%s)...")
% updated_memory_mb)
LOG.info(_("inside the guest - self.status.is_mysql_running(%s)...")
% self.status.is_mysql_running)
if self.status.is_mysql_running:
LOG.error(_("Cannot execute start_db_with_conf_changes because "
"MySQL state == %s!") % self.status)
raise RuntimeError("MySQL not stopped.")
LOG.info(_("Initiating config."))
self._write_mycnf(updated_memory_mb, None)
self.start_mysql(True)
def is_installed(self):
#(cp16net) could raise an exception, does it need to be handled here?
version = pkg.pkg_version(self.MYSQL_PACKAGE_VERSION)
return not version is None
backup.backup(context, backup_id)

View File

@ -0,0 +1,894 @@
import os
import re
import time
import uuid
import sqlalchemy
from datetime import date
from sqlalchemy import exc
from sqlalchemy import interfaces
from sqlalchemy.sql.expression import text
from reddwarf.common import cfg
from reddwarf.common import utils as utils
from reddwarf.common import exception
from reddwarf.guestagent import query
from reddwarf.guestagent.db import models
from reddwarf.guestagent import pkg
from reddwarf.instance import models as rd_models
from reddwarf.openstack.common import log as logging
from reddwarf.openstack.common.gettextutils import _
from reddwarf.extensions.mysql.models import RootHistory
ADMIN_USER_NAME = "os_admin"
LOG = logging.getLogger(__name__)
FLUSH = text(query.FLUSH)
ENGINE = None
MYSQLD_ARGS = None
PREPARING = False
UUID = False
ORIG_MYCNF = "/etc/mysql/my.cnf"
FINAL_MYCNF = "/var/lib/mysql/my.cnf"
TMP_MYCNF = "/tmp/my.cnf.tmp"
DBAAS_MYCNF = "/etc/dbaas/my.cnf/my.cnf.%dM"
MYSQL_BASE_DIR = "/var/lib/mysql"
CONF = cfg.CONF
INCLUDE_MARKER_OPERATORS = {
True: ">=",
False: ">"
}
def generate_random_password():
return str(uuid.uuid4())
def get_auth_password():
pwd, err = utils.execute_with_timeout(
"sudo",
"awk",
"/password\\t=/{print $3; exit}",
"/etc/mysql/my.cnf")
if err:
LOG.error(err)
raise RuntimeError("Problem reading my.cnf! : %s" % err)
return pwd.strip()
def get_engine():
"""Create the default engine with the updated admin user"""
#TODO(rnirmal):Based on permissions issues being resolved we may revert
#url = URL(drivername='mysql', host='localhost',
# query={'read_default_file': '/etc/mysql/my.cnf'})
global ENGINE
if ENGINE:
return ENGINE
#ENGINE = create_engine(name_or_url=url)
pwd = get_auth_password()
ENGINE = sqlalchemy.create_engine("mysql://%s:%s@localhost:3306" %
(ADMIN_USER_NAME, pwd.strip()),
pool_recycle=7200, echo=True,
listeners=[KeepAliveConnection()])
return ENGINE
def load_mysqld_options():
try:
out, err = utils.execute("/usr/sbin/mysqld", "--print-defaults",
run_as_root=True, root_helper="sudo")
arglist = re.split("\n", out)[1].split()
args = {}
for item in arglist:
if "=" in item:
key, value = item.split("=")
args[key.lstrip("--")] = value
else:
args[item.lstrip("--")] = None
return args
except exception.ProcessExecutionError as e:
return None
class MySqlAppStatus(object):
"""
Answers the question "what is the status of the MySQL application on
this box?" The answer can be that the application is not installed, or
the state of the application is determined by calling a series of
commands.
This class also handles saving and load the status of the MySQL application
in the database.
The status is updated whenever the update() method is called, except
if the state is changed to building or restart mode using the
"begin_mysql_install" and "begin_mysql_restart" methods.
The building mode persists in the database while restarting mode does
not (so if there is a Python Pete crash update() will set the status to
show a failure).
These modes are exited and functionality to update() returns when
end_install_or_restart() is called, at which point the status again
reflects the actual status of the MySQL app.
"""
_instance = None
def __init__(self):
if self._instance is not None:
raise RuntimeError("Cannot instantiate twice.")
self.status = self._load_status()
self.restart_mode = False
def begin_mysql_install(self):
"""Called right before MySQL is prepared."""
self.set_status(rd_models.ServiceStatuses.BUILDING)
def begin_mysql_restart(self):
"""Called before restarting MySQL."""
self.restart_mode = True
def end_install_or_restart(self):
"""Called after MySQL is installed or restarted.
Updates the database with the actual MySQL status.
"""
LOG.info("Ending install_if_needed or restart.")
self.restart_mode = False
real_status = self._get_actual_db_status()
LOG.info("Updating status to %s" % real_status)
self.set_status(real_status)
@classmethod
def get(cls):
if not cls._instance:
cls._instance = MySqlAppStatus()
return cls._instance
def _get_actual_db_status(self):
global MYSQLD_ARGS
try:
out, err = utils.execute_with_timeout(
"/usr/bin/mysqladmin",
"ping", run_as_root=True, root_helper="sudo")
LOG.info("Service Status is RUNNING.")
return rd_models.ServiceStatuses.RUNNING
except exception.ProcessExecutionError as e:
LOG.error("Process execution ")
try:
out, err = utils.execute_with_timeout("/bin/ps", "-C",
"mysqld", "h")
pid = out.split()[0]
# TODO(rnirmal): Need to create new statuses for instances
# where the mysql service is up, but unresponsive
LOG.info("Service Status is BLOCKED.")
return rd_models.ServiceStatuses.BLOCKED
except exception.ProcessExecutionError as e:
if not MYSQLD_ARGS:
MYSQLD_ARGS = load_mysqld_options()
pid_file = MYSQLD_ARGS.get('pid_file',
'/var/run/mysqld/mysqld.pid')
if os.path.exists(pid_file):
LOG.info("Service Status is CRASHED.")
return rd_models.ServiceStatuses.CRASHED
else:
LOG.info("Service Status is SHUTDOWN.")
return rd_models.ServiceStatuses.SHUTDOWN
@property
def is_mysql_installed(self):
"""
True if MySQL app should be installed and attempts to ascertain
its status won't result in nonsense.
"""
return (self.status is not None and
self.status != rd_models.ServiceStatuses.BUILDING and
self.status != rd_models.ServiceStatuses.FAILED)
@property
def _is_mysql_restarting(self):
return self.restart_mode
@property
def is_mysql_running(self):
"""True if MySQL is running."""
return (self.status is not None and
self.status == rd_models.ServiceStatuses.RUNNING)
@staticmethod
def _load_status():
"""Loads the status from the database."""
inst_id = CONF.guest_id
return rd_models.InstanceServiceStatus.find_by(instance_id=inst_id)
def set_status(self, status):
"""Changes the status of the MySQL app in the database."""
db_status = self._load_status()
db_status.set_status(status)
db_status.save()
self.status = status
def update(self):
"""Find and report status of MySQL on this machine.
The database is update and the status is also returned.
"""
if self.is_mysql_installed and not self._is_mysql_restarting:
LOG.info("Determining status of MySQL app...")
status = self._get_actual_db_status()
self.set_status(status)
else:
LOG.info("MySQL is not installed or is in restart mode, so for "
"now we'll skip determining the status of MySQL on this "
"box.")
def wait_for_real_status_to_change_to(self, status, max_time,
update_db=False):
"""
Waits the given time for the real status to change to the one
specified. Does not update the publicly viewable status Unless
"update_db" is True.
"""
WAIT_TIME = 3
waited_time = 0
while waited_time < max_time:
time.sleep(WAIT_TIME)
waited_time += WAIT_TIME
LOG.info("Waiting for MySQL status to change to %s..." % status)
actual_status = self._get_actual_db_status()
LOG.info("MySQL status was %s after %d seconds."
% (actual_status, waited_time))
if actual_status == status:
if update_db:
self.set_status(actual_status)
return True
LOG.error("Time out while waiting for MySQL app status to change!")
return False
class LocalSqlClient(object):
"""A sqlalchemy wrapper to manage transactions"""
def __init__(self, engine, use_flush=True):
self.engine = engine
self.use_flush = use_flush
def __enter__(self):
self.conn = self.engine.connect()
self.trans = self.conn.begin()
return self.conn
def __exit__(self, type, value, traceback):
if self.trans:
if type is not None: # An error occurred
self.trans.rollback()
else:
if self.use_flush:
self.conn.execute(FLUSH)
self.trans.commit()
self.conn.close()
def execute(self, t, **kwargs):
try:
return self.conn.execute(t, kwargs)
except:
self.trans.rollback()
self.trans = None
raise
class MySqlAdmin(object):
"""Handles administrative tasks on the MySQL database."""
def _associate_dbs(self, user):
"""Internal. Given a MySQLUser, populate its databases attribute."""
LOG.debug("Associating dbs to user %s at %s" % (user.name, user.host))
with LocalSqlClient(get_engine()) as client:
q = query.Query()
q.columns = ["grantee", "table_schema"]
q.tables = ["information_schema.SCHEMA_PRIVILEGES"]
q.group = ["grantee", "table_schema"]
q.where = ["privilege_type != 'USAGE'"]
t = text(str(q))
db_result = client.execute(t)
for db in db_result:
LOG.debug("\t db: %s" % db)
if db['grantee'] == "'%s'@'%s'" % (user.name, user.host):
mysql_db = models.MySQLDatabase()
mysql_db.name = db['table_schema']
user.databases.append(mysql_db.serialize())
def change_passwords(self, users):
"""Change the passwords of one or more existing users."""
LOG.debug("Changing the password of some users.")
LOG.debug("Users is %s" % users)
with LocalSqlClient(get_engine()) as client:
for item in users:
LOG.debug("\tUser: %s" % item)
user_dict = {'_name': item['name'],
'_host': item['host'],
'_password': item['password'],
}
user = models.MySQLUser()
user.deserialize(user_dict)
LOG.debug("\tDeserialized: %s" % user.__dict__)
uu = query.UpdateUser(user.name, host=user.host,
clear=user.password)
t = text(str(uu))
client.execute(t)
def create_database(self, databases):
"""Create the list of specified databases"""
with LocalSqlClient(get_engine()) as client:
for item in databases:
mydb = models.MySQLDatabase()
mydb.deserialize(item)
cd = query.CreateDatabase(mydb.name,
mydb.character_set,
mydb.collate)
t = text(str(cd))
client.execute(t)
def create_user(self, users):
"""Create users and grant them privileges for the
specified databases"""
with LocalSqlClient(get_engine()) as client:
for item in users:
user = models.MySQLUser()
user.deserialize(item)
# TODO(cp16net):Should users be allowed to create users
# 'os_admin' or 'debian-sys-maint'
g = query.Grant(user=user.name, host=user.host,
clear=user.password)
t = text(str(g))
client.execute(t)
for database in user.databases:
mydb = models.MySQLDatabase()
mydb.deserialize(database)
g = query.Grant(permissions='ALL', database=mydb.name,
user=user.name, host=user.host,
clear=user.password)
t = text(str(g))
client.execute(t)
def delete_database(self, database):
"""Delete the specified database"""
with LocalSqlClient(get_engine()) as client:
mydb = models.MySQLDatabase()
mydb.deserialize(database)
dd = query.DropDatabase(mydb.name)
t = text(str(dd))
client.execute(t)
def delete_user(self, user):
"""Delete the specified users"""
with LocalSqlClient(get_engine()) as client:
mysql_user = models.MySQLUser()
mysql_user.deserialize(user)
du = query.DropUser(mysql_user.name, host=mysql_user.host)
t = text(str(du))
client.execute(t)
def get_user(self, username, hostname):
user = self._get_user(username, hostname)
if not user:
return None
return user.serialize()
def _get_user(self, username, hostname):
"""Return a single user matching the criteria"""
user = models.MySQLUser()
try:
user.name = username # Could possibly throw a BadRequest here.
except exceptions.ValueError as ve:
raise exception.BadRequest("Username %s is not valid: %s"
% (username, ve.message))
with LocalSqlClient(get_engine()) as client:
q = query.Query()
q.columns = ['User', 'Host', 'Password']
q.tables = ['mysql.user']
q.where = ["Host != 'localhost'",
"User = '%s'" % username,
"Host = '%s'" % hostname,
]
q.order = ['User', 'Host']
t = text(str(q))
result = client.execute(t).fetchall()
LOG.debug("Result: %s" % result)
if len(result) != 1:
return None
found_user = result[0]
user.password = found_user['Password']
user.host = found_user['Host']
self._associate_dbs(user)
return user
def grant_access(self, username, hostname, databases):
"""Give a user permission to use a given database."""
user = self._get_user(username, hostname)
with LocalSqlClient(get_engine()) as client:
for database in databases:
g = query.Grant(permissions='ALL', database=database,
user=user.name, host=user.host,
hashed=user.password)
t = text(str(g))
client.execute(t)
def is_root_enabled(self):
"""Return True if root access is enabled; False otherwise."""
return MySqlRootAccess.is_root_enabled()
def enable_root(self):
"""Enable the root user global access and/or reset the root password"""
return MySqlRootAccess.enable_root()
def report_root_enabled(self, context=None):
"""Records in the Root History that the root is enabled"""
return MySqlRootAccess.report_root_enabled(context)
def list_databases(self, limit=None, marker=None, include_marker=False):
"""List databases the user created on this mysql instance"""
LOG.debug(_("---Listing Databases---"))
databases = []
with LocalSqlClient(get_engine()) as client:
# If you have an external volume mounted at /var/lib/mysql
# the lost+found directory will show up in mysql as a database
# which will create errors if you try to do any database ops
# on it. So we remove it here if it exists.
q = query.Query()
q.columns = [
'schema_name as name',
'default_character_set_name as charset',
'default_collation_name as collation',
]
q.tables = ['information_schema.schemata']
q.where = ["schema_name NOT IN ("
"'mysql', 'information_schema', "
"'lost+found', '#mysql50#lost+found'"
")"]
q.order = ['schema_name ASC']
if limit:
q.limit = limit + 1
if marker:
q.where.append("schema_name %s '%s'" %
(INCLUDE_MARKER_OPERATORS[include_marker],
marker))
t = text(str(q))
database_names = client.execute(t)
next_marker = None
LOG.debug(_("database_names = %r") % database_names)
for count, database in enumerate(database_names):
if count >= limit:
break
LOG.debug(_("database = %s ") % str(database))
mysql_db = models.MySQLDatabase()
mysql_db.name = database[0]
next_marker = mysql_db.name
mysql_db.character_set = database[1]
mysql_db.collate = database[2]
databases.append(mysql_db.serialize())
LOG.debug(_("databases = ") + str(databases))
if database_names.rowcount <= limit:
next_marker = None
return databases, next_marker
def list_users(self, limit=None, marker=None, include_marker=False):
"""List users that have access to the database"""
'''
SELECT
User,
Host,
Marker
FROM
(SELECT
User,
Host,
CONCAT(User, '@', Host) as Marker
FROM mysql.user
ORDER BY 1, 2) as innerquery
WHERE
Marker > :marker
ORDER BY
Marker
LIMIT :limit;
'''
LOG.debug(_("---Listing Users---"))
users = []
with LocalSqlClient(get_engine()) as client:
mysql_user = models.MySQLUser()
iq = query.Query() # Inner query.
iq.columns = ['User', 'Host', "CONCAT(User, '@', Host) as Marker"]
iq.tables = ['mysql.user']
iq.order = ['User', 'Host']
innerquery = str(iq).rstrip(';')
oq = query.Query() # Outer query.
oq.columns = ['User', 'Host', 'Marker']
oq.tables = ['(%s) as innerquery' % innerquery]
oq.where = ["Host != 'localhost'"]
oq.order = ['Marker']
if marker:
oq.where.append("Marker %s '%s'" %
(INCLUDE_MARKER_OPERATORS[include_marker],
marker))
if limit:
oq.limit = limit + 1
t = text(str(oq))
result = client.execute(t)
next_marker = None
LOG.debug("result = " + str(result))
for count, row in enumerate(result):
if count >= limit:
break
LOG.debug("user = " + str(row))
mysql_user = models.MySQLUser()
mysql_user.name = row['User']
mysql_user.host = row['Host']
self._associate_dbs(mysql_user)
next_marker = row['Marker']
users.append(mysql_user.serialize())
if result.rowcount <= limit:
next_marker = None
LOG.debug("users = " + str(users))
return users, next_marker
def revoke_access(self, username, hostname, database):
"""Give a user permission to use a given database."""
user = self._get_user(username, hostname)
with LocalSqlClient(get_engine()) as client:
r = query.Revoke(database=database, user=user.name, host=user.host,
hashed=user.password)
t = text(str(r))
client.execute(t)
def list_access(self, username, hostname):
"""Show all the databases to which the user has more than
USAGE granted."""
user = self._get_user(username, hostname)
return user.databases
class KeepAliveConnection(interfaces.PoolListener):
"""
A connection pool listener that ensures live connections are returned
from the connection pool at checkout. This alleviates the problem of
MySQL connections timing out.
"""
def checkout(self, dbapi_con, con_record, con_proxy):
"""Event triggered when a connection is checked out from the pool"""
try:
try:
dbapi_con.ping(False)
except TypeError:
dbapi_con.ping()
except dbapi_con.OperationalError, ex:
if ex.args[0] in (2006, 2013, 2014, 2045, 2055):
raise exc.DisconnectionError()
else:
raise
class MySqlApp(object):
"""Prepares DBaaS on a Guest container."""
TIME_OUT = 1000
MYSQL_PACKAGE_VERSION = CONF.mysql_pkg
def __init__(self, status):
""" By default login with root no password for initial setup. """
self.state_change_wait_time = CONF.state_change_wait_time
self.status = status
def _create_admin_user(self, client, password):
"""
Create a os_admin user with a random password
with all privileges similar to the root user
"""
localhost = "localhost"
g = query.Grant(permissions='ALL', user=ADMIN_USER_NAME,
host=localhost, grant_option=True, clear=password)
t = text(str(g))
client.execute(t)
@staticmethod
def _generate_root_password(client):
""" Generate and set a random root password and forget about it. """
localhost = "localhost"
uu = query.UpdateUser("root", host=localhost,
clear=generate_random_password())
t = text(str(uu))
client.execute(t)
def install_if_needed(self):
"""Prepare the guest machine with a secure mysql server installation"""
LOG.info(_("Preparing Guest as MySQL Server"))
if not self.is_installed():
self._install_mysql()
LOG.info(_("Dbaas install_if_needed complete"))
def complete_install_or_restart(self):
self.status.end_install_or_restart()
def secure(self, memory_mb):
LOG.info(_("Generating admin password..."))
admin_password = generate_random_password()
engine = sqlalchemy.create_engine("mysql://root:@localhost:3306",
echo=True)
with LocalSqlClient(engine) as client:
self._remove_anonymous_user(client)
self._create_admin_user(client, admin_password)
self.stop_db()
self._write_mycnf(memory_mb, admin_password)
self.start_mysql()
LOG.info(_("Dbaas secure complete."))
def secure_root(self):
engine = sqlalchemy.create_engine("mysql://root:@localhost:3306",
echo=True)
with LocalSqlClient(engine) as client:
LOG.info(_("Preserving root access from restore"))
self._generate_root_password(client)
self._remove_remote_root_access(client)
def _install_mysql(self):
"""Install mysql server. The current version is 5.5"""
LOG.debug(_("Installing mysql server"))
pkg.pkg_install(self.MYSQL_PACKAGE_VERSION, self.TIME_OUT)
LOG.debug(_("Finished installing mysql server"))
#TODO(rnirmal): Add checks to make sure the package got installed
def _enable_mysql_on_boot(self):
"""
There is a difference between the init.d mechanism and the upstart
The stock mysql uses the upstart mechanism, therefore, there is a
mysql.conf file responsible for the job. to toggle enable/disable
on boot one needs to modify this file. Percona uses the init.d
mechanism and there is no mysql.conf file. Instead, the update-rc.d
command needs to be used to modify the /etc/rc#.d/[S/K]##mysql links
"""
LOG.info("Enabling mysql on boot.")
conf = "/etc/init/mysql.conf"
if os.path.isfile(conf):
command = "sudo sed -i '/^manual$/d' %(conf)s"
command = command % locals()
else:
command = "sudo update-rc.d mysql enable"
utils.execute_with_timeout(command, shell=True)
def _disable_mysql_on_boot(self):
"""
There is a difference between the init.d mechanism and the upstart
The stock mysql uses the upstart mechanism, therefore, there is a
mysql.conf file responsible for the job. to toggle enable/disable
on boot one needs to modify this file. Percona uses the init.d
mechanism and there is no mysql.conf file. Instead, the update-rc.d
command needs to be used to modify the /etc/rc#.d/[S/K]##mysql links
"""
LOG.info("Disabling mysql on boot.")
conf = "/etc/init/mysql.conf"
if os.path.isfile(conf):
command = '''sudo sh -c "echo manual >> %(conf)s"'''
command = command % locals()
else:
command = "sudo update-rc.d mysql disable"
utils.execute_with_timeout(command, shell=True)
def stop_db(self, update_db=False, do_not_start_on_reboot=False):
LOG.info(_("Stopping mysql..."))
if do_not_start_on_reboot:
self._disable_mysql_on_boot()
utils.execute_with_timeout("sudo", "/etc/init.d/mysql", "stop")
if not self.status.wait_for_real_status_to_change_to(
rd_models.ServiceStatuses.SHUTDOWN,
self.state_change_wait_time, update_db):
LOG.error(_("Could not stop MySQL!"))
self.status.end_install_or_restart()
raise RuntimeError("Could not stop MySQL!")
def _remove_anonymous_user(self, client):
t = text(query.REMOVE_ANON)
client.execute(t)
def _remove_remote_root_access(self, client):
t = text(query.REMOVE_ROOT)
client.execute(t)
def restart(self):
try:
self.status.begin_mysql_restart()
self.stop_db()
self.start_mysql()
finally:
self.status.end_install_or_restart()
def _replace_mycnf_with_template(self, template_path, original_path):
LOG.debug("replacing the mycnf with template")
LOG.debug("template_path(%s) original_path(%s)"
% (template_path, original_path))
if os.path.isfile(template_path):
if os.path.isfile(original_path):
utils.execute_with_timeout(
"sudo", "mv", original_path,
"%(name)s.%(date)s" %
{'name': original_path, 'date':
date.today().isoformat()})
utils.execute_with_timeout("sudo", "cp", template_path,
original_path)
def _write_temp_mycnf_with_admin_account(self, original_file_path,
temp_file_path, password):
utils.execute_with_timeout("sudo", "chmod", "0711", MYSQL_BASE_DIR)
mycnf_file = open(original_file_path, 'r')
tmp_file = open(temp_file_path, 'w')
for line in mycnf_file:
tmp_file.write(line)
if "[client]" in line:
tmp_file.write("user\t\t= %s\n" % ADMIN_USER_NAME)
tmp_file.write("password\t= %s\n" % password)
mycnf_file.close()
tmp_file.close()
def wipe_ib_logfiles(self):
"""Destroys the iblogfiles.
If for some reason the selected log size in the conf changes from the
current size of the files MySQL will fail to start, so we delete the
files to be safe.
"""
LOG.info(_("Wiping ib_logfiles..."))
for index in range(2):
try:
utils.execute_with_timeout("sudo", "rm", "%s/ib_logfile%d"
% (MYSQL_BASE_DIR, index))
except exception.ProcessExecutionError as pe:
# On restarts, sometimes these are wiped. So it can be a race
# to have MySQL start up before it's restarted and these have
# to be deleted. That's why its ok if they aren't found.
LOG.error("Could not delete logfile!")
LOG.error(pe)
if "No such file or directory" not in str(pe):
raise
def _write_mycnf(self, update_memory_mb, admin_password):
"""
Install the set of mysql my.cnf templates from dbaas-mycnf package.
The package generates a template suited for the current
container flavor. Update the os_admin user and password
to the my.cnf file for direct login from localhost
"""
LOG.info(_("Writing my.cnf templates."))
if admin_password is None:
admin_password = get_auth_password()
# As of right here, the admin_password contains the password to be
# applied to the my.cnf file, whether it was there before (and we
# passed it in) or we generated a new one just now (because we didn't
# find it).
LOG.debug(_("Installing my.cnf templates"))
pkg.pkg_install("dbaas-mycnf", self.TIME_OUT)
LOG.info(_("Replacing my.cnf with template."))
template_path = DBAAS_MYCNF % update_memory_mb
# replace my.cnf with template.
self._replace_mycnf_with_template(template_path, ORIG_MYCNF)
LOG.info(_("Writing new temp my.cnf."))
self._write_temp_mycnf_with_admin_account(ORIG_MYCNF, TMP_MYCNF,
admin_password)
# permissions work-around
LOG.info(_("Moving tmp into final."))
utils.execute_with_timeout("sudo", "mv", TMP_MYCNF, FINAL_MYCNF)
LOG.info(_("Removing original my.cnf."))
utils.execute_with_timeout("sudo", "rm", ORIG_MYCNF)
LOG.info(_("Symlinking final my.cnf."))
utils.execute_with_timeout("sudo", "ln", "-s", FINAL_MYCNF, ORIG_MYCNF)
self.wipe_ib_logfiles()
def start_mysql(self, update_db=False):
LOG.info(_("Starting mysql..."))
# This is the site of all the trouble in the restart tests.
# Essentially what happens is that mysql start fails, but does not
# die. It is then impossible to kill the original, so
self._enable_mysql_on_boot()
try:
utils.execute_with_timeout("sudo", "/etc/init.d/mysql", "start")
except exception.ProcessExecutionError:
# it seems mysql (percona, at least) might come back with [Fail]
# but actually come up ok. we're looking into the timing issue on
# parallel, but for now, we'd like to give it one more chance to
# come up. so regardless of the execute_with_timeout() respose,
# we'll assume mysql comes up and check it's status for a while.
pass
if not self.status.wait_for_real_status_to_change_to(
rd_models.ServiceStatuses.RUNNING,
self.state_change_wait_time, update_db):
LOG.error(_("Start up of MySQL failed!"))
# If it won't start, but won't die either, kill it by hand so we
# don't let a rouge process wander around.
try:
utils.execute_with_timeout("sudo", "pkill", "-9", "mysql")
except exception.ProcessExecutionError, p:
LOG.error("Error killing stalled mysql start command.")
LOG.error(p)
# There's nothing more we can do...
self.status.end_install_or_restart()
raise RuntimeError("Could not start MySQL!")
def start_db_with_conf_changes(self, updated_memory_mb):
LOG.info(_("Starting mysql with conf changes to memory(%s)...")
% updated_memory_mb)
LOG.info(_("inside the guest - self.status.is_mysql_running(%s)...")
% self.status.is_mysql_running)
if self.status.is_mysql_running:
LOG.error(_("Cannot execute start_db_with_conf_changes because "
"MySQL state == %s!") % self.status)
raise RuntimeError("MySQL not stopped.")
LOG.info(_("Initiating config."))
self._write_mycnf(updated_memory_mb, None)
self.start_mysql(True)
def is_installed(self):
#(cp16net) could raise an exception, does it need to be handled here?
version = pkg.pkg_version(self.MYSQL_PACKAGE_VERSION)
return not version is None
class MySqlRootAccess(object):
@classmethod
def is_root_enabled(cls):
"""Return True if root access is enabled; False otherwise."""
with LocalSqlClient(get_engine()) as client:
t = text(query.ROOT_ENABLED)
result = client.execute(t)
LOG.debug("Found %s with remote root access" % result.rowcount)
return result.rowcount != 0
@classmethod
def enable_root(cls):
"""Enable the root user global access and/or reset the root password"""
user = models.MySQLUser()
user.name = "root"
user.host = "%"
user.password = generate_random_password()
with LocalSqlClient(get_engine()) as client:
print client
try:
cu = query.CreateUser(user.name, host=user.host)
t = text(str(cu))
client.execute(t, **cu.keyArgs)
except exc.OperationalError as err:
# Ignore, user is already created, just reset the password
# TODO(rnirmal): More fine grained error checking later on
LOG.debug(err)
with LocalSqlClient(get_engine()) as client:
print client
uu = query.UpdateUser(user.name, host=user.host,
clear=user.password)
t = text(str(uu))
client.execute(t)
LOG.debug("CONF.root_grant: %s CONF.root_grant_option: %s" %
(CONF.root_grant, CONF.root_grant_option))
g = query.Grant(permissions=CONF.root_grant,
user=user.name,
host=user.host,
grant_option=CONF.root_grant_option,
clear=user.password)
t = text(str(g))
client.execute(t)
return user.serialize()
@classmethod
def report_root_enabled(cls, context):
return RootHistory.create(context, CONF.guest_id, 'root')

View File

@ -23,7 +23,6 @@ from reddwarf.db import models as dbmodels
from reddwarf.openstack.common import log as logging
from reddwarf.openstack.common.gettextutils import _
LOG = logging.getLogger(__name__)
CONF = cfg.CONF

View File

@ -0,0 +1,13 @@
#Copyright 2013 Hewlett-Packard Development Company, L.P.
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
#http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.

View File

@ -0,0 +1,25 @@
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from reddwarf.guestagent.strategy import Strategy
from reddwarf.openstack.common import log as logging
LOG = logging.getLogger(__name__)
def get_backup_strategy(backup_driver, ns=__name__):
LOG.debug("Getting backup strategy: %s" % backup_driver)
return Strategy.get_strategy(backup_driver, ns)

View File

@ -0,0 +1,144 @@
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import hashlib
from reddwarf.guestagent.strategy import Strategy
from reddwarf.openstack.common import log as logging
from reddwarf.common import cfg, utils
from eventlet.green import subprocess
CONF = cfg.CONF
# Read in multiples of 128 bytes, since this is the size of an md5 digest block
# this allows us to update that while streaming the file.
#http://stackoverflow.com/questions/1131220/get-md5-hash-of-big-files-in-python
CHUNK_SIZE = CONF.backup_chunk_size
MAX_FILE_SIZE = CONF.backup_segment_max_size
BACKUP_CONTAINER = CONF.backup_swift_container
BACKUP_USE_GZIP = CONF.backup_use_gzip_compression
LOG = logging.getLogger(__name__)
class BackupError(Exception):
"""Error running the Backup Command."""
class UnknownBackupType(Exception):
"""Unknown backup type"""
class BackupRunner(Strategy):
""" Base class for Backup Strategy implementations """
__strategy_type__ = 'backup_runner'
__strategy_ns__ = 'reddwarf.guestagent.strategies.backup'
# The actual system call to run the backup
cmd = None
is_zipped = BACKUP_USE_GZIP
def __init__(self, filename, **kwargs):
self.filename = filename
self.container = BACKUP_CONTAINER
# how much we have written
self.content_length = 0
self.segment_length = 0
self.process = None
self.pid = None
self.writer = None
self.file_number = 0
self.written = -1
self.end_of_file = False
self.end_of_segment = False
self.checksum = hashlib.md5()
self.schecksum = hashlib.md5()
self.command = self.cmd % kwargs
super(BackupRunner, self).__init__()
@property
def backup_type(self):
return type(self).__name__
def run(self):
self.process = subprocess.Popen(self.command, shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.pid = self.process.pid
def __enter__(self):
"""Start up the process"""
self.run()
return self
def __exit__(self, exc_type, exc_value, traceback):
"""Clean up everything."""
if exc_type is not None:
return False
if hasattr(self, 'process'):
try:
self.process.terminate()
except OSError:
# Already stopped
pass
utils.raise_if_process_errored(self.process, BackupError)
return True
@property
def segment(self):
return '%s_%08d' % (self.filename, self.file_number)
@property
def manifest(self):
"""Subclasses may overwrite this to declare a format (.gz, .tar)"""
return self.filename
@property
def prefix(self):
return '%s/%s_' % (self.container, self.filename)
@property
def zip_cmd(self):
return ' | gzip' if self.is_zipped else ''
@property
def zip_manifest(self):
return '.gz' if self.is_zipped else ''
def read(self, chunk_size):
"""Wrap self.process.stdout.read to allow for segmentation."""
if self.end_of_segment:
self.segment_length = 0
self.schecksum = hashlib.md5()
self.end_of_segment = False
# Upload to a new file if we are starting or too large
if self.segment_length > (MAX_FILE_SIZE - CHUNK_SIZE):
self.file_number += 1
self.end_of_segment = True
return ''
chunk = self.process.stdout.read(CHUNK_SIZE)
if not chunk:
self.end_of_file = True
return ''
self.checksum.update(chunk)
self.schecksum.update(chunk)
self.content_length += len(chunk)
self.segment_length += len(chunk)
return chunk

View File

@ -0,0 +1,57 @@
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from reddwarf.guestagent.strategies.backup import base
from reddwarf.openstack.common import log as logging
LOG = logging.getLogger(__name__)
class MySQLDump(base.BackupRunner):
""" Implementation of Backup Strategy for MySQLDump """
__strategy_name__ = 'mysqldump'
@property
def cmd(self):
cmd = '/usr/bin/mysqldump'\
' --all-databases'\
' --opt'\
' --password=%(password)s'\
' -u %(user)s'
return cmd + self.zip_cmd
@property
def manifest(self):
manifest = '%s' + self.zip_manifest
return manifest % self.filename
class InnoBackupEx(base.BackupRunner):
""" Implementation of Backup Strategy for InnoBackupEx """
__strategy_name__ = 'innobackupex'
@property
def cmd(self):
cmd = 'sudo innobackupex'\
' --stream=xbstream'\
' /var/lib/mysql 2>/tmp/innobackupex.log'
return cmd + self.zip_cmd
@property
def manifest(self):
manifest = '%s.xbstream' + self.zip_manifest
return manifest % self.filename

View File

@ -0,0 +1,23 @@
#Copyright 2013 Hewlett-Packard Development Company, L.P.
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
#http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
from reddwarf.guestagent.strategy import Strategy
from reddwarf.openstack.common import log as logging
LOG = logging.getLogger(__name__)
def get_restore_strategy(restore_driver, ns=__name__):
LOG.debug("Getting restore strategy: %s" % restore_driver)
return Strategy.get_strategy(restore_driver, ns)

View File

@ -0,0 +1,139 @@
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from reddwarf.guestagent.strategy import Strategy
from reddwarf.common import cfg, utils
from reddwarf.openstack.common import log as logging
from eventlet.green import subprocess
import tempfile
import pexpect
import os
import glob
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CHUNK_SIZE = CONF.backup_chunk_size
RESET_ROOT_MYSQL_COMMAND = """
UPDATE mysql.user SET Password=PASSWORD('') WHERE User='root';
FLUSH PRIVILEGES;
"""
class RestoreError(Exception):
"""Error running the Backup Command."""
class RestoreRunner(Strategy):
""" Base class for Restore Strategy implementations """
"""Restore a database from a previous backup."""
__strategy_type__ = 'restore_runner'
__strategy_ns__ = 'reddwarf.guestagent.strategies.restore'
# The actual system calls to run the restore and prepare
restore_cmd = None
prepare_cmd = None
# The backup format type
restore_type = None
def __init__(self, restore_stream, **kwargs):
self.restore_stream = restore_stream
self.restore_location = kwargs.get('restore_location',
'/var/lib/mysql')
self.restore_cmd = self.restore_cmd % kwargs
self.prepare_cmd = self.prepare_cmd % kwargs \
if hasattr(self, 'prepare_cmd') else None
super(RestoreRunner, self).__init__()
def __enter__(self):
"""Return the runner"""
return self
def __exit__(self, exc_type, exc_value, traceback):
"""Clean up everything."""
if exc_type is not None:
return False
if hasattr(self, 'process'):
try:
self.process.terminate()
except OSError:
# Already stopped
pass
utils.raise_if_process_errored(self.process, RestoreError)
return True
def restore(self):
self._pre_restore()
content_length = self._run_restore()
self._run_prepare()
self._post_restore()
return content_length
def _run_restore(self):
with self.restore_stream as stream:
self.process = subprocess.Popen(self.restore_cmd, shell=True,
stdin=subprocess.PIPE,
stderr=subprocess.PIPE)
self.pid = self.process.pid
content_length = 0
chunk = stream.read(CHUNK_SIZE)
while chunk:
self.process.stdin.write(chunk)
content_length += len(chunk)
chunk = stream.read(CHUNK_SIZE)
self.process.stdin.close()
LOG.info("Restored %s bytes from swift via xbstream."
% content_length)
return content_length
def _run_prepare(self):
if hasattr(self, 'prepare_cmd'):
LOG.info("Running innobackupex prepare...")
self.prep_retcode = utils.execute(self.prepare_cmd,
shell=True)
LOG.info("Innobackupex prepare finished successfully")
def _spawn_with_init_file(self, temp_file):
child = pexpect.spawn("sudo mysqld_safe --init-file=%s" %
temp_file.name)
try:
i = child.expect(['Starting mysqld daemon'])
if i == 0:
LOG.info("Root password reset successfully!")
except pexpect.TIMEOUT as e:
LOG.error("wait_and_close_proc failed: %s" % e)
finally:
LOG.info("Cleaning up the temp mysqld process...")
child.delayafterclose = 1
child.delayafterterminate = 1
child.close(force=True)
utils.execute_with_timeout("sudo", "killall", "mysqld")
def _reset_root_password(self):
#Create temp file with reset root password
with tempfile.NamedTemporaryFile() as fp:
fp.write(RESET_ROOT_MYSQL_COMMAND)
fp.flush()
utils.execute_with_timeout("sudo", "chmod", "a+r", fp.name)
self._spawn_with_init_file(fp)
def _delete_old_binlogs(self):
filelist = glob.glob(self.restore_location + "/ib_logfile*")
for f in filelist:
os.unlink(f)

View File

@ -0,0 +1,58 @@
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from reddwarf.guestagent.strategies.restore import base
from reddwarf.openstack.common import log as logging
from reddwarf.common import utils
import reddwarf.guestagent.manager.mysql_service as dbaas
LOG = logging.getLogger(__name__)
class MySQLDump(base.RestoreRunner):
""" Implementation of Restore Strategy for MySQLDump """
__strategy_name__ = 'mysqldump'
is_zipped = True
restore_cmd = ('mysql '
'--password=%(password)s '
'-u %(user)s')
def _pre_restore(self):
pass
def _post_restore(self):
pass
class InnoBackupEx(base.RestoreRunner):
""" Implementation of Restore Strategy for InnoBackupEx """
__strategy_name__ = 'innobackupex'
is_zipped = True
restore_cmd = 'sudo xbstream -x -C %(restore_location)s'
prepare_cmd = ('sudo innobackupex --apply-log %(restore_location)s '
'--ibbackup xtrabackup 2>/tmp/innoprepare.log')
def _pre_restore(self):
app = dbaas.MySqlApp(dbaas.MySqlAppStatus.get())
app.stop_db()
def _post_restore(self):
utils.execute_with_timeout("sudo", "chown", "-R", "-f",
"mysql", self.restore_location)
self._delete_old_binlogs()
self._reset_root_password()
app = dbaas.MySqlApp(dbaas.MySqlAppStatus.get())
app.start_mysql()

View File

@ -0,0 +1,25 @@
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from reddwarf.guestagent.strategy import Strategy
from reddwarf.openstack.common import log as logging
LOG = logging.getLogger(__name__)
def get_storage_strategy(storage_driver, ns=__name__):
LOG.debug("Getting storage strategy: %s" % storage_driver)
return Strategy.get_strategy(storage_driver, ns)

View File

@ -0,0 +1,39 @@
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import abc
from reddwarf.guestagent.strategy import Strategy
from reddwarf.openstack.common import log as logging
LOG = logging.getLogger(__name__)
class Storage(Strategy):
""" Base class for Storage Strategy implementation """
__strategy_type__ = 'storage'
__strategy_ns__ = 'reddwarf.guestagent.strategies.storage'
def __init__(self):
super(Storage, self).__init__()
@abc.abstractmethod
def save(self, save_location, stream):
""" Persist information from the stream """
@abc.abstractmethod
def load(self, context, location, is_zipped):
""" Load a stream from a persisted storage location """

View File

@ -0,0 +1,139 @@
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from reddwarf.guestagent.strategies.storage import base
from reddwarf.openstack.common import log as logging
from reddwarf.common.remote import create_swift_client
from reddwarf.common import utils
from eventlet.green import subprocess
import zlib
UNZIPPER = zlib.decompressobj(16 + zlib.MAX_WBITS)
LOG = logging.getLogger(__name__)
class DownloadError(Exception):
"""Error running the Swift Download Command."""
class SwiftStorage(base.Storage):
""" Implementation of Storage Strategy for Swift """
__strategy_name__ = 'swift'
def __init__(self, context):
super(SwiftStorage, self).__init__()
self.connection = create_swift_client(context)
def set_container(self, ):
""" Set the container to store to. """
""" This creates the container if it doesn't exist. """
def save(self, save_location, stream):
""" Persist information from the stream """
# Create the container (save_location) if it doesn't already exist
self.container_name = save_location
self.segments_container_name = stream.manifest + "_segments"
self.connection.put_container(self.container_name)
self.connection.put_container(self.segments_container_name)
# Read from the stream and write to the container in swift
while not stream.end_of_file:
segment = stream.segment
etag = self.connection.put_object(self.segments_container_name,
segment,
stream)
# Check each segment MD5 hash against swift etag
# Raise an error and mark backup as failed
if etag != stream.schecksum.hexdigest():
print etag, stream.schecksum.hexdigest()
return (False, "Error saving data to Swift!", None, None)
checksum = stream.checksum.hexdigest()
url = self.connection.url
location = "%s/%s/%s" % (url, self.container_name, stream.manifest)
# Create the manifest file
headers = {
'X-Object-Manifest':
self.segments_container_name + "/" + stream.filename}
self.connection.put_object(self.container_name,
stream.manifest,
contents='',
headers=headers)
return (True, "Successfully saved data to Swift!",
checksum, location)
def _explodeLocation(self, location):
storage_url = "/".join(location.split('/')[:-2])
container = location.split('/')[-2]
filename = location.split('/')[-1]
return storage_url, container, filename
def load(self, context, location, is_zipped):
""" Restore a backup from the input stream to the restore_location """
storage_url, container, filename = self._explodeLocation(location)
return SwiftDownloadStream(auth_token=context.auth_token,
storage_url=storage_url,
container=container,
filename=filename,
is_zipped=is_zipped)
class SwiftDownloadStream(object):
""" Class to do the actual swift download using the swiftclient """
cmd = ("swift --os-auth-token=%(auth_token)s "
"--os-storage-url=%(storage_url)s "
"download %(container)s %(filename)s -o -")
def __init__(self, **kwargs):
self.process = None
self.pid = None
self.is_zipped = kwargs.get('is_zipped', False)
self.cmd = self.cmd % kwargs
def __enter__(self):
"""Start up the process"""
self.run()
return self
def __exit__(self, exc_type, exc_value, traceback):
"""Clean up everything."""
if exc_type is None:
utils.raise_if_process_errored(self.process, DownloadError)
# Make sure to terminate the process
try:
self.process.terminate()
except OSError:
# Already stopped
pass
def read(self, *args, **kwargs):
if not self.is_zipped:
return self.process.stdout.read(*args, **kwargs)
return UNZIPPER.decompress(self.process.stdout.read(*args, **kwargs))
def run(self):
self.process = subprocess.Popen(self.cmd, shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.pid = self.process.pid

View File

@ -0,0 +1,74 @@
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import abc
from reddwarf.common import utils
from reddwarf.openstack.common import log as logging
LOG = logging.getLogger(__name__)
class Strategy(object):
__metaclass__ = abc.ABCMeta
__strategy_ns__ = None
__strategy_name__ = None
__strategy_type__ = None
def __init__(self):
self.name = self.get_canonical_name()
LOG.debug("Loaded strategy %s", self.name)
def is_enabled(self):
"""
Is this Strategy enabled?
:retval: Boolean
"""
return True
@classmethod
def get_strategy(cls, name, ns=None):
"""
Load a strategy from namespace
"""
ns = ns or cls.__strategy_ns__
if ns is None:
raise RuntimeError(
'No namespace provided or __strategy_ns__ unset')
LOG.debug('Looking for strategy %s in %s', name, ns)
return utils.import_class(ns + "." + name)
@classmethod
def get_canonical_name(cls):
"""
Return the strategy name
"""
type_ = cls.get_strategy_type()
name = cls.get_strategy_name()
return "%s:%s" % (type_, name)
@classmethod
def get_strategy_name(cls):
return cls.__strategy_name__
@classmethod
def get_strategy_type(cls):
return cls.__strategy_type__

View File

@ -786,6 +786,9 @@ class ServiceStatus(object):
def __str__(self):
return self._description
def __repr__(self):
return self._api_status
class ServiceStatuses(object):
RUNNING = ServiceStatus(0x01, 'running', 'ACTIVE')

View File

@ -60,7 +60,7 @@ class Manager(periodic_task.PeriodicTasks):
instance_tasks.delete_async()
def delete_backup(self, context, backup_id):
models.BackupTasks.delete_backup(backup_id)
models.BackupTasks.delete_backup(context, backup_id)
def create_backup(self, context, backup_id, instance_id):
instance_tasks = models.BuiltInstanceTasks.load(context, instance_id)
@ -72,4 +72,5 @@ class Manager(periodic_task.PeriodicTasks):
instance_tasks = FreshInstanceTasks.load(context, instance_id)
instance_tasks.create_instance(flavor_id, flavor_ram, image_id,
databases, users, service_type,
volume_size, security_groups)
volume_size, security_groups,
backup_id)

View File

@ -12,40 +12,34 @@
# License for the specific language governing permissions and limitations
# under the License.
import time
from eventlet import greenthread
from datetime import datetime
import traceback
from eventlet import greenthread
from novaclient import exceptions as nova_exceptions
from reddwarf.common import cfg
from reddwarf.common import remote
from reddwarf.common import utils
from reddwarf.common.exception import GuestError
from reddwarf.common.exception import PollTimeOut
from reddwarf.common.exception import VolumeCreationFailure
from reddwarf.common.exception import NotFound
from reddwarf.common.exception import ReddwarfError
from reddwarf.common.remote import create_dns_client
from reddwarf.common.remote import create_nova_client
from reddwarf.common.remote import create_nova_volume_client
from reddwarf.common.remote import create_guest_client
from swiftclient.client import ClientException
from reddwarf.common.utils import poll_until
from reddwarf.extensions.mysql.common import populate_databases
from reddwarf.extensions.mysql.common import populate_users
from reddwarf.instance import models as inst_models
from reddwarf.instance.models import DBInstance
from reddwarf.instance.models import BuiltInstance
from reddwarf.instance.models import FreshInstance
from reddwarf.instance.models import InstanceStatus
from reddwarf.instance.models import InstanceServiceStatus
from reddwarf.instance.models import ServiceStatuses
from reddwarf.instance.tasks import InstanceTasks
from reddwarf.instance.views import get_ip_address
from reddwarf.openstack.common import log as logging
from reddwarf.openstack.common.gettextutils import _
from reddwarf.openstack.common.notifier import api as notifier
from reddwarf.openstack.common import timeutils
import reddwarf.common.remote as remote
import reddwarf.backup.models
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
@ -114,7 +108,7 @@ class FreshInstanceTasks(FreshInstance, NotifyMixin):
def create_instance(self, flavor_id, flavor_ram, image_id,
databases, users, service_type, volume_size,
security_groups):
security_groups, backup_id):
if use_nova_server_volume:
server, volume_info = self._create_server_volume(
flavor_id,
@ -138,7 +132,7 @@ class FreshInstanceTasks(FreshInstance, NotifyMixin):
if server:
self._guest_prepare(server, flavor_ram, volume_info,
databases, users)
databases, users, backup_id)
if not self.db_info.task_status.is_error:
self.update_db(task_status=inst_models.InstanceTasks.NONE)
@ -329,12 +323,13 @@ class FreshInstanceTasks(FreshInstance, NotifyMixin):
return server
def _guest_prepare(self, server, flavor_ram, volume_info,
databases, users):
databases, users, backup_id=None):
LOG.info("Entering guest_prepare.")
# Now wait for the response from the create to do additional work
self.guest.prepare(flavor_ram, databases, users,
device_path=volume_info['device_path'],
mount_point=volume_info['mount_point'])
mount_point=volume_info['mount_point'],
backup_id=backup_id)
def _create_dns_entry(self):
LOG.debug("%s: Creating dns entry for instance: %s" %
@ -468,16 +463,8 @@ class BuiltInstanceTasks(BuiltInstance, NotifyMixin):
action.execute()
def create_backup(self, backup_id):
# TODO
# create a temp volume
# nova list
# nova show
# check in progress - make sure no other snapshot creation in progress
# volume create
# volume attach
# call GA.create_backup()
LOG.debug("Calling create_backup %s " % self.id)
self.guest.create_backup(backup_id)
LOG.debug("Called create_backup %s " % self.id)
def reboot(self):
try:
@ -534,6 +521,42 @@ class BuiltInstanceTasks(BuiltInstance, NotifyMixin):
status.save()
class BackupTasks(object):
@classmethod
def delete_files_from_swift(cls, context, filename):
client = remote.create_swift_client(context)
# Delete the manifest
if client.head_object(CONF.backup_swift_container, filename):
client.delete_object(CONF.backup_swift_container, filename)
# Delete the segments
if client.head_container(filename + "_segments"):
for obj in client.get_container(filename + "_segments")[1]:
client.delete_object(filename + "_segments", obj['name'])
# Delete the segments container
client.delete_container(filename + "_segments")
@classmethod
def delete_backup(cls, context, backup_id):
#delete backup from swift
backup = reddwarf.backup.models.Backup.get_by_id(backup_id)
try:
filename = backup.filename
if filename:
BackupTasks.delete_files_from_swift(context, filename)
except (ClientException, ValueError) as e:
LOG.exception("Exception deleting from swift. Details: %s" % e)
LOG.error("Failed to delete swift objects")
backup.state = reddwarf.backup.models.BackupState.FAILED
else:
backup.delete()
class ResizeActionBase(object):
"""Base class for executing a resize action."""

View File

@ -16,12 +16,14 @@ from proboscis.asserts import assert_equal
from proboscis.asserts import assert_not_equal
from proboscis.asserts import assert_raises
from proboscis import test
from proboscis import SkipTest
from proboscis.decorators import time_out
from reddwarf.tests.util import poll_until
from reddwarf.tests.util import test_config
from reddwarfclient import exceptions
from reddwarf.tests.api.instances import WaitForGuestInstallationToFinish
from reddwarf.tests.api.instances import instance_info, assert_unprocessable
from reddwarf.tests.api.instances import instance_info
from reddwarf.tests.api.instances import assert_unprocessable
GROUP = "dbaas.api.backups"
BACKUP_NAME = 'backup_test'
@ -29,6 +31,7 @@ BACKUP_DESC = 'test description'
backup_info = None
restore_instance_id = None
@test(depends_on_classes=[WaitForGuestInstallationToFinish],
@ -81,15 +84,6 @@ class AfterBackupCreation(object):
backup = result[0]
assert_unprocessable(instance_info.dbaas.backups.delete, backup.id)
@test
def test_backup_create_quota_exceeded(self):
"""test quota exceeded when creating a backup"""
instance_info.dbaas_admin.quota.update(instance_info.user.tenant_id,
{'backups': 1})
assert_raises(exceptions.OverLimit,
instance_info.dbaas.backups.create,
'Too_many_backups', instance_info.id, BACKUP_DESC)
@test(runs_after=[AfterBackupCreation],
groups=[GROUP])
@ -152,6 +146,57 @@ class ListBackups(object):
@test(runs_after=[ListBackups],
groups=[GROUP])
class RestoreUsingBackup(object):
@test
def test_restore(self):
"""test restore"""
if test_config.auth_strategy == "fake":
raise SkipTest("Skipping restore tests for fake mode.")
restorePoint = {"backupRef": backup_info.id}
result = instance_info.dbaas.instances.create(
instance_info.name + "_restore",
instance_info.dbaas_flavor_href,
instance_info.volume,
restorePoint=restorePoint)
assert_equal(200, instance_info.dbaas.last_http_code)
assert_equal("BUILD", result.status)
global restore_instance_id
restore_instance_id = result.id
@test(depends_on_classes=[RestoreUsingBackup],
runs_after=[RestoreUsingBackup],
groups=[GROUP])
class WaitForRestoreToFinish(object):
"""
Wait until the instance is finished restoring.
"""
@test
@time_out(60 * 32)
def test_instance_restored(self):
if test_config.auth_strategy == "fake":
raise SkipTest("Skipping restore tests for fake mode.")
# This version just checks the REST API status.
def result_is_active():
instance = instance_info.dbaas.instances.get(restore_instance_id)
if instance.status == "ACTIVE":
return True
else:
# If its not ACTIVE, anything but BUILD must be
# an error.
assert_equal("BUILD", instance.status)
if instance_info.volume is not None:
assert_equal(instance.volume.get('used', None), None)
return False
poll_until(result_is_active)
@test(runs_after=[WaitForRestoreToFinish],
groups=[GROUP])
class DeleteBackups(object):
@test
@ -159,3 +204,20 @@ class DeleteBackups(object):
"""test delete unknown backup"""
assert_raises(exceptions.NotFound, instance_info.dbaas.backups.delete,
'nonexistent_backup')
@test
@time_out(60 * 2)
def test_backup_delete(self):
"""test delete"""
instance_info.dbaas.backups.delete(backup_info.id)
assert_equal(202, instance_info.dbaas.last_http_code)
def backup_is_gone():
result = instance_info.dbaas.instances.backups(instance_info.id)
if len(result) == 0:
return True
else:
return False
poll_until(backup_is_gone)
assert_raises(exceptions.NotFound, instance_info.dbaas.backups.get,
backup_info.id)

View File

@ -181,7 +181,7 @@ class FakeGuest(object):
return self.users.get((username, hostname), None)
def prepare(self, memory_mb, databases, users, device_path=None,
mount_point=None):
mount_point=None, backup_id=None):
from reddwarf.instance.models import DBInstance
from reddwarf.instance.models import InstanceServiceStatus
from reddwarf.instance.models import ServiceStatuses

View File

@ -396,3 +396,7 @@ class SwiftClientStub(object):
http_status=404))
self._remove_object(name, self._objects[container])
return self
def fake_create_swift_client(*args):
return FakeSwiftClient.Connection(*args)

View File

@ -0,0 +1,237 @@
#Copyright 2013 Hewlett-Packard Development Company, L.P.
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
#http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
import hashlib
from reddwarf.common import utils
from reddwarf.common.context import ReddwarfContext
from reddwarf.guestagent.strategies.restore.base import RestoreRunner
import testtools
from testtools.matchers import Equals, Is
from webob.exc import HTTPNotFound
from mockito import when, verify, unstub, mock, any, contains
from reddwarf.backup.models import DBBackup
from reddwarf.backup.models import BackupState
from reddwarf.common.exception import ModelNotFoundError
from reddwarf.db.models import DatabaseModelBase
from reddwarf.guestagent.backup import backupagent
from reddwarf.guestagent.strategies.backup.base import BackupRunner
from reddwarf.guestagent.strategies.backup.base import UnknownBackupType
from reddwarf.guestagent.strategies.storage.base import Storage
def create_fake_data():
from random import choice
from string import ascii_letters
return ''.join([choice(ascii_letters) for _ in xrange(1024)])
class MockBackup(BackupRunner):
"""Create a large temporary file to 'backup' with subprocess."""
backup_type = 'mock_backup'
def __init__(self, *args, **kwargs):
self.data = create_fake_data()
self.cmd = 'echo %s' % self.data
super(MockBackup, self).__init__(*args, **kwargs)
class MockLossyBackup(MockBackup):
"""Fake Incomplete writes to swift"""
def read(self, *args):
results = super(MockLossyBackup, self).read(*args)
if results:
# strip a few chars from the stream
return results[20:]
class MockSwift(object):
"""Store files in String"""
def __init__(self, *args, **kwargs):
self.store = ''
self.containers = []
self.url = 'http://mockswift/v1'
self.etag = hashlib.md5()
def put_container(self, container):
if container not in self.containers:
self.containers.append(container)
return None
def put_object(self, container, obj, contents, **kwargs):
if container not in self.containers:
raise HTTPNotFound
while True:
if not hasattr(contents, 'read'):
break
content = contents.read(2 ** 16)
if not content:
break
self.store += content
self.etag.update(self.store)
return self.etag.hexdigest()
def save(self, save_location, stream):
location = '%s/%s/%s' % (self.url, save_location, stream.manifest)
return True, 'w00t', 'fake-checksum', location
def load(self, context, storage_url, container, filename):
pass
class MockStorage(Storage):
def __init__(self, context):
super(MockStorage, self).__init__()
pass
def __call__(self, *args, **kwargs):
return self
def load(self, context, location, is_zipped):
pass
def save(self, save_location, stream):
pass
def is_enabled(self):
return True
class MockRestoreRunner(RestoreRunner):
def __init__(self, restore_stream, restore_location):
pass
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
pass
def restore(self):
pass
def is_zipped(self):
return False
BACKUP_NS = 'reddwarf.guestagent.strategies.backup'
class BackupAgentTest(testtools.TestCase):
def setUp(self):
super(BackupAgentTest, self).setUp()
when(backupagent).get_auth_password().thenReturn('secret')
when(backupagent).get_storage_strategy(any(), any()).thenReturn(
MockSwift)
def tearDown(self):
super(BackupAgentTest, self).tearDown()
unstub()
def test_execute_backup(self):
"""This test should ensure backup agent
ensures that backup and storage is not running
resolves backup instance
starts backup
starts storage
reports status
"""
backup = mock(DBBackup)
when(DatabaseModelBase).find_by(id='123').thenReturn(backup)
when(backup).save().thenReturn(backup)
agent = backupagent.BackupAgent()
agent.execute_backup(context=None, backup_id='123', runner=MockBackup)
verify(DatabaseModelBase).find_by(id='123')
self.assertThat(backup.state, Is(BackupState.COMPLETED))
self.assertThat(backup.location,
Equals('http://mockswift/v1/database_backups/123'))
verify(backup, times=3).save()
def test_execute_lossy_backup(self):
"""This test verifies that incomplete writes to swift will fail."""
backup = mock(DBBackup)
when(backupagent).get_auth_password().thenReturn('secret')
when(DatabaseModelBase).find_by(id='123').thenReturn(backup)
when(backup).save().thenReturn(backup)
when(MockSwift).save(any(), any()).thenReturn((False, 'Error', 'y',
'z'))
agent = backupagent.BackupAgent()
self.assertRaises(backupagent.BackupError, agent.execute_backup,
context=None, backup_id='123',
runner=MockLossyBackup)
self.assertThat(backup.state, Is(BackupState.FAILED))
verify(backup, times=3).save()
def test_execute_backup_model_exception(self):
"""This test should ensure backup agent
properly handles condition where backup model is not found
"""
when(DatabaseModelBase).find_by(id='123').thenRaise(ModelNotFoundError)
agent = backupagent.BackupAgent()
# probably should catch this exception and return a backup exception
# also note that since the model is not found there is no way to report
# this error
self.assertRaises(ModelNotFoundError, agent.execute_backup,
context=None, backup_id='123')
def test_execute_restore(self):
"""This test should ensure backup agent
resolves backup instance
determines backup/restore type
transfers/downloads data and invokes the restore module
reports status
"""
backup = mock(DBBackup)
backup.location = "/backup/location/123"
backup.backup_type = 'InnoBackupEx'
when(utils).execute(contains('sudo rm -rf')).thenReturn(None)
when(utils).clean_out(any()).thenReturn(None)
when(backupagent).get_storage_strategy(any(), any()).thenReturn(
MockStorage)
when(backupagent).get_restore_strategy(
'InnoBackupEx', any()).thenReturn(MockRestoreRunner)
when(DatabaseModelBase).find_by(id='123').thenReturn(backup)
when(backup).save().thenReturn(backup)
agent = backupagent.BackupAgent()
agent.execute_restore(ReddwarfContext(), '123', '/var/lib/mysql')
def test_restore_unknown(self):
backup = mock(DBBackup)
backup.location = "/backup/location/123"
backup.backup_type = 'foo'
when(utils).execute(contains('sudo rm -rf')).thenReturn(None)
when(utils).clean_out(any()).thenReturn(None)
when(DatabaseModelBase).find_by(id='123').thenReturn(backup)
when(backupagent).get_restore_strategy(
'foo', any()).thenRaise(ImportError)
agent = backupagent.BackupAgent()
self.assertRaises(UnknownBackupType, agent.execute_restore,
context=None, backup_id='123',
restore_location='/var/lib/mysql')

View File

@ -11,183 +11,311 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License
from mockito import when
from mockito import any
from mockito import verify
from mockito import unstub
from mockito import mock
from mockito import verifyZeroInteractions
from mockito import never
import mockito.matchers
import testtools
from mock import Mock, MagicMock
from reddwarf.openstack.common import rpc
from reddwarf.openstack.common.rpc import proxy
from reddwarf.openstack.common.rpc import impl_kombu as kombu
from testtools.matchers import KeysEqual, Is
from reddwarf.guestagent import models as agent_models
import reddwarf.db.models as db_models
from reddwarf.common import exception
from reddwarf.guestagent import api
import reddwarf.openstack.common.rpc as rpc
class ApiTest(testtools.TestCase):
def setUp(self):
super(ApiTest, self).setUp()
self.api = api.API(Mock, Mock)
self.origin_rpc_call = proxy.RpcProxy.call
proxy.RpcProxy.call = Mock()
self.rpc_call = proxy.RpcProxy.call
self.origin_rpc_cast = proxy.RpcProxy.cast
proxy.RpcProxy.cast = Mock()
self.rpc_cast = proxy.RpcProxy.cast
self.origin_object = agent_models.AgentHeartBeat.find_by
agent_models.AgentHeartBeat.find_by = Mock()
self.origin_is_active = agent_models.AgentHeartBeat.is_active
self.origin_api_id = self.api.id
self.FAKE_ID = 'instance-id-x23d2d'
self.api = api.API(mock(), self.FAKE_ID)
when(rpc).call(any(), any(), any(), any(int)).thenRaise(
ValueError('Unexpected Rpc Invocation'))
when(rpc).cast(any(), any(), any()).thenRaise(
ValueError('Unexpected Rpc Invocation'))
def tearDown(self):
super(ApiTest, self).tearDown()
proxy.RpcProxy.call = self.origin_rpc_call
proxy.RpcProxy.cast = self.origin_rpc_cast
agent_models.AgentHeartBeat.is_active = self.origin_is_active
agent_models.AgentHeartBeat.find_by = self.origin_object
self.api.id = self.origin_api_id
def test__call(self):
self.api._call(Mock, Mock)
self.assertEqual(1, self.rpc_call.call_count)
def test__cast(self):
self.api._cast(Mock)
self.assertEqual(1, self.rpc_cast.call_count)
unstub()
def test_delete_queue(self):
self.skipTest("find out if this delete_queue function is needed "
"anymore, Bug#1097482")
def test_get_routing_key(self):
FAKE_ID = '123456'
self.api.id = FAKE_ID
self.assertEqual('guestagent.' + FAKE_ID,
self.assertEqual('guestagent.' + self.FAKE_ID,
self.api._get_routing_key())
def test_check_for_heartbeat_positive(self):
agent_models.AgentHeartBeat.is_active = MagicMock(return_value=True)
when(db_models.DatabaseModelBase).find_by(
instance_id=any()).thenReturn('agent')
when(agent_models.AgentHeartBeat).is_active('agent').thenReturn(True)
self.assertTrue(self.api._check_for_hearbeat())
def test_check_for_heartbeat_exception(self):
# TODO (juice) maybe it would be ok to extend the test to validate
# the is_active method on the heartbeat
when(db_models.DatabaseModelBase).find_by(instance_id=any()).thenRaise(
exception.ModelNotFoundError)
when(agent_models.AgentHeartBeat).is_active(any()).thenReturn(None)
self.assertRaises(exception.GuestTimeout, self.api._check_for_hearbeat)
verify(agent_models.AgentHeartBeat, times=0).is_active(any())
def test_check_for_heartbeat_negative(self):
agent_models.AgentHeartBeat.is_active = MagicMock(return_value=False)
# TODO (juice) maybe it would be ok to extend the test to validate
# the is_active method on the heartbeat
when(db_models.DatabaseModelBase).find_by(
instance_id=any()).thenReturn('agent')
when(agent_models.AgentHeartBeat).is_active(any()).thenReturn(False)
self.assertRaises(exception.GuestTimeout, self.api._check_for_hearbeat)
def test_create_user(self):
self.api.create_user(Mock)
self.assertEqual(1, self.rpc_cast.call_count)
exp_msg = RpcMsgMatcher('create_user', 'users')
self._mock_rpc_cast(exp_msg)
self.api.create_user('test_user')
self._verify_rpc_cast(exp_msg)
def test_rpc_cast_exception(self):
exp_msg = RpcMsgMatcher('create_user', 'users')
when(rpc).cast(any(), any(), exp_msg).thenRaise(IOError('host down'))
with testtools.ExpectedException(exception.GuestError, '.* host down'):
self.api.create_user('test_user')
self._verify_rpc_cast(exp_msg)
def test_list_users(self):
self.api.list_users()
self.assertEqual(1, self.rpc_call.call_count)
exp_msg = RpcMsgMatcher('list_users', 'limit', 'marker',
'include_marker')
exp_resp = ['user1', 'user2', 'user3']
self._mock_rpc_call(exp_msg, exp_resp)
act_resp = self.api.list_users()
self.assertThat(act_resp, Is(exp_resp))
self._verify_rpc_call(exp_msg)
def test_rpc_call_exception(self):
exp_msg = RpcMsgMatcher('list_users', 'limit', 'marker',
'include_marker')
when(rpc).call(any(), any(), exp_msg, any(int)).thenRaise(
IOError('host down'))
with testtools.ExpectedException(exception.GuestError,
'An error occurred.*'):
self.api.list_users()
self._verify_rpc_call(exp_msg)
def test_delete_user(self):
self.api.delete_user(Mock)
self.assertEqual(1, self.rpc_cast.call_count)
exp_msg = RpcMsgMatcher('delete_user', 'user')
self._mock_rpc_cast(exp_msg)
self.api.delete_user('test_user')
self._mock_rpc_cast(exp_msg)
def test_create_database(self):
self.api.create_database(Mock)
self.assertEqual(1, self.rpc_cast.call_count)
exp_msg = RpcMsgMatcher('create_database', 'databases')
self._mock_rpc_cast(exp_msg)
self.api.create_database(['db1', 'db2', 'db3'])
self._verify_rpc_cast(exp_msg)
def test_list_databases(self):
self.api.list_databases()
self.assertEqual(1, self.rpc_call.call_count)
exp_msg = RpcMsgMatcher('list_databases', 'limit', 'marker',
'include_marker')
exp_resp = ['db1', 'db2', 'db3']
self._mock_rpc_call(exp_msg, exp_resp)
resp = self.api.list_databases(limit=1, marker=2,
include_marker=False)
self.assertThat(resp, Is(exp_resp))
self._verify_rpc_call(exp_msg)
def test_delete_database(self):
self.api.delete_database(Mock)
self.assertEqual(1, self.rpc_cast.call_count)
exp_msg = RpcMsgMatcher('delete_database', 'database')
self._mock_rpc_cast(exp_msg)
self.api.delete_database('test_database_name')
self._verify_rpc_cast(exp_msg)
def test_enable_root(self):
self.api.enable_root()
self.assertEqual(1, self.rpc_call.call_count)
exp_msg = RpcMsgMatcher('enable_root')
self._mock_rpc_call(exp_msg, True)
self.assertThat(self.api.enable_root(), Is(True))
self._verify_rpc_call(exp_msg)
def test_disable_root(self):
self.api.disable_root()
self.assertEqual(1, self.rpc_call.call_count)
exp_msg = RpcMsgMatcher('disable_root')
self._mock_rpc_call(exp_msg, True)
self.assertThat(self.api.disable_root(), Is(True))
self._verify_rpc_call(exp_msg)
def test_is_root_enabled(self):
self.api.is_root_enabled()
self.assertEqual(1, self.rpc_call.call_count)
exp_msg = RpcMsgMatcher('is_root_enabled')
self._mock_rpc_call(exp_msg, False)
self.assertThat(self.api.is_root_enabled(), Is(False))
self._verify_rpc_call(exp_msg)
def test_get_hwinfo(self):
exp_msg = RpcMsgMatcher('get_hwinfo')
self._mock_rpc_call(exp_msg)
self.api.get_hwinfo()
self.assertEqual(1, self.rpc_call.call_count)
self._verify_rpc_call(exp_msg)
def test_get_diagnostics(self):
exp_msg = RpcMsgMatcher('get_diagnostics')
self._mock_rpc_call(exp_msg)
self.api.get_diagnostics()
self.assertEqual(1, self.rpc_call.call_count)
self._verify_rpc_call(exp_msg)
def test_restart(self):
exp_msg = RpcMsgMatcher('restart')
self._mock_rpc_call(exp_msg)
self.api.restart()
self.assertEqual(1, self.rpc_call.call_count)
self._verify_rpc_call(exp_msg)
def test_start_db_with_conf_changes(self):
self.api.start_db_with_conf_changes(Mock)
self.assertEqual(1, self.rpc_call.call_count)
exp_msg = RpcMsgMatcher('start_db_with_conf_changes',
'updated_memory_size')
self._mock_rpc_call(exp_msg)
self.api.start_db_with_conf_changes('512')
self._verify_rpc_call(exp_msg)
def test_stop_mysql(self):
self.api.stop_db()
self.assertEqual(1, self.rpc_call.call_count)
def test_stop_db(self):
exp_msg = RpcMsgMatcher('stop_db', 'do_not_start_on_reboot')
self._mock_rpc_call(exp_msg)
self.api.stop_db(do_not_start_on_reboot=False)
self._verify_rpc_call(exp_msg)
def test_get_volume_info(self):
self.api.get_volume_info()
self.assertEqual(1, self.rpc_call.call_count)
fake_resp = {'fake': 'resp'}
exp_msg = RpcMsgMatcher('get_filesystem_stats', 'fs_path')
self._mock_rpc_call(exp_msg, fake_resp)
self.assertThat(self.api.get_volume_info(), Is(fake_resp))
self._verify_rpc_call(exp_msg)
def test_update_guest(self):
exp_msg = RpcMsgMatcher('update_guest')
self._mock_rpc_call(exp_msg)
self.api.update_guest()
self.assertEqual(1, self.rpc_call.call_count)
self._verify_rpc_call(exp_msg)
def test_create_backup(self):
exp_msg = RpcMsgMatcher('create_backup', 'backup_id')
self._mock_rpc_cast(exp_msg)
self.api.create_backup('123')
self._verify_rpc_cast(exp_msg)
def _verify_rpc_connection_and_cast(self, rpc, mock_conn, exp_msg):
verify(rpc).create_connection(new=True)
verify(mock_conn).create_consumer(self.api._get_routing_key(), None,
fanout=False)
verify(rpc).cast(any(), any(), exp_msg)
def test_prepare(self):
mock_conn = mock()
when(rpc).create_connection(new=True).thenReturn(mock_conn)
when(mock_conn).create_consumer(any(), any(), any()).thenReturn(None)
exp_msg = RpcMsgMatcher('prepare', 'memory_mb', 'databases', 'users',
'device_path', 'mount_point', 'backup_id')
when(rpc).cast(any(), any(), exp_msg).thenReturn(None)
self.api.prepare('2048', 'db1', 'user1', '/dev/vdt', '/mnt/opt',
'bkup-1232')
self._verify_rpc_connection_and_cast(rpc, mock_conn, exp_msg)
def test_prepare_with_backup(self):
mock_conn = mock()
when(rpc).create_connection(new=True).thenReturn(mock_conn)
when(mock_conn).create_consumer(any(), any(), any()).thenReturn(None)
exp_msg = RpcMsgMatcher('prepare', 'memory_mb', 'databases', 'users',
'device_path', 'mount_point', 'backup_id')
when(rpc).cast(any(), any(), exp_msg).thenReturn(None)
self.api.prepare('2048', 'db1', 'user1', '/dev/vdt', '/mnt/opt',
'backup_id_123')
self._verify_rpc_connection_and_cast(rpc, mock_conn, exp_msg)
def test_upgrade(self):
mock_conn = mock()
when(rpc).create_connection(new=True).thenReturn(mock_conn)
when(mock_conn).create_consumer(any(), any(), any()).thenReturn(None)
exp_msg = RpcMsgMatcher('upgrade')
when(rpc).cast(any(), any(), exp_msg).thenReturn(None)
self.api.upgrade()
self._verify_rpc_connection_and_cast(rpc, mock_conn, exp_msg)
def test_rpc_cast_with_consumer_exception(self):
mock_conn = mock()
when(rpc).create_connection(new=True).thenRaise(IOError('host down'))
exp_msg = RpcMsgMatcher('prepare', 'memory_mb', 'databases', 'users',
'device_path', 'mount_point')
with testtools.ExpectedException(exception.GuestError, '.* host down'):
self.api.prepare('2048', 'db1', 'user1', '/dev/vdt', '/mnt/opt')
verify(rpc).create_connection(new=True)
verifyZeroInteractions(mock_conn)
verify(rpc, never).cast(any(), any(), exp_msg)
def _mock_rpc_call(self, exp_msg, resp=None):
rpc.common = mock()
when(rpc).call(any(), any(), exp_msg, any(int)).thenReturn(resp)
def _verify_rpc_call(self, exp_msg):
verify(rpc).call(any(), any(), exp_msg, any(int))
def _mock_rpc_cast(self, exp_msg):
when(rpc).cast(any(), any(), exp_msg).thenReturn(None)
def _verify_rpc_cast(self, exp_msg):
verify(rpc).cast(any(), any(), exp_msg)
class CastWithConsumerTest(testtools.TestCase):
def setUp(self):
super(CastWithConsumerTest, self).setUp()
self.api = api.API(Mock, Mock)
self.origin_get_routing_key = self.api._get_routing_key
self.origin_create_consumer = kombu.Connection.create_consumer
self.origin_close = kombu.Connection.close
self.origin_create_connection = rpc.create_connection
self.api = api.API(mock(), 'instance-id-x23d2d')
def tearDown(self):
super(CastWithConsumerTest, self).tearDown()
self.api._get_routing_key = self.origin_get_routing_key
kombu.Connection.create_consumer = self.origin_create_consumer
kombu.Connection.close = self.origin_close
rpc.create_connection = self.origin_create_connection
unstub()
def test__cast_with_consumer(self):
self.api._get_routing_key = Mock()
self.api._cast = Mock()
kombu.Connection.create_consumer = Mock()
kombu.Connection.close = Mock()
rpc.create_connection = MagicMock(return_value=kombu.Connection)
mock_conn = mock()
when(rpc).create_connection(new=True).thenReturn(mock_conn)
when(mock_conn).create_consumer(any(), any(), any()).thenReturn(None)
when(rpc).cast(any(), any(), any()).thenReturn(None)
self.api._cast_with_consumer(Mock)
self.api._cast_with_consumer('fake_method_name', fake_param=1)
self.assertEqual(1, kombu.Connection.create_consumer.call_count)
self.assertEqual(1, kombu.Connection.close.call_count)
self.assertEqual(1, self.api._get_routing_key.call_count)
self.assertEqual(1, rpc.create_connection.call_count)
verify(rpc).create_connection(new=True)
verify(mock_conn).create_consumer(any(), None, fanout=False)
verify(rpc).cast(any(), any(), any())
class OtherTests(testtools.TestCase):
def setUp(self):
super(OtherTests, self).setUp()
self.api = api.API(Mock, Mock)
self.origin_cast_with_consumer = self.api._cast_with_consumer
class RpcMsgMatcher(mockito.matchers.Matcher):
def __init__(self, method, *args_dict):
self.wanted_method = method
self.wanted_dict = KeysEqual('version', 'method', 'args', 'namespace')
self.args_dict = KeysEqual(*args_dict)
def tearDown(self):
super(OtherTests, self).tearDown()
self.api._cast_with_consumer = self.origin_cast_with_consumer
def matches(self, arg):
if self.wanted_method != arg['method']:
raise Exception("Method does not match: %s != %s" %
(self.wanted_method, arg['method']))
#return False
if self.wanted_dict.match(arg) or self.args_dict.match(arg['args']):
raise Exception("Args do not match: %s != %s" %
(self.args_dict, arg['args']))
#return False
return True
def test_prepare(self):
self.api._cast_with_consumer = Mock()
self.api.prepare(Mock, Mock, Mock)
self.assertEqual(1, self.api._cast_with_consumer.call_count)
def test_upgrade(self):
self.api._cast_with_consumer = Mock()
self.api.upgrade()
self.assertEqual(1, self.api._cast_with_consumer.call_count)
def __repr__(self):
return "<Dict: %s>" % self.wanted_dict

View File

@ -12,21 +12,45 @@
# License for the specific language governing permissions and limitations
# under the License.
from mock import Mock, MagicMock
import testtools
import os
import __builtin__
from random import randint
import time
import reddwarf.guestagent.manager.mysql as dbaas
from reddwarf.guestagent.db import models
from reddwarf.guestagent.manager.mysql import MySqlAdmin
from reddwarf.guestagent.manager.mysql import MySqlApp
from reddwarf.guestagent.manager.mysql import MySqlAppStatus
from mock import Mock
from mock import MagicMock
from mockito import mock
from mockito import when
from mockito import any
from mockito import unstub
from mockito import verify
from mockito import contains
from mockito import never
from mockito import matchers
from mockito import inorder, verifyNoMoreInteractions
from reddwarf.extensions.mysql.models import RootHistory
import sqlalchemy
import testtools
from testtools.matchers import Is
from testtools.matchers import Equals
from testtools.matchers import Not
import reddwarf
from reddwarf.common.context import ReddwarfContext
from reddwarf.guestagent import pkg
from reddwarf.common import utils
import reddwarf.guestagent.manager.mysql_service as dbaas
from reddwarf.guestagent.manager.mysql_service import MySqlAdmin
from reddwarf.guestagent.manager.mysql_service import MySqlRootAccess
from reddwarf.guestagent.manager.mysql_service import MySqlApp
from reddwarf.guestagent.manager.mysql_service import MySqlAppStatus
from reddwarf.guestagent.manager.mysql_service import KeepAliveConnection
from reddwarf.guestagent.dbaas import Interrogator
from reddwarf.guestagent.manager.mysql import KeepAliveConnection
from reddwarf.guestagent.db import models
from reddwarf.instance.models import ServiceStatuses
from reddwarf.instance.models import InstanceServiceStatus
from reddwarf.tests.unittests.util import util
"""
Unit tests for the classes and functions in dbaas.py.
"""
@ -107,6 +131,43 @@ class DbaasTest(testtools.TestCase):
self.assertFalse(dbaas.load_mysqld_options())
class ResultSetStub(object):
def __init__(self, rows):
self._rows = rows
def __iter__(self):
return self._rows.__iter__()
@property
def rowcount(self):
return len(self._rows)
def __repr__(self):
return self._rows.__repr__()
class MySqlAdminMockTest(testtools.TestCase):
def tearDown(self):
super(MySqlAdminMockTest, self).tearDown()
unstub()
def test_list_databases(self):
mock_conn = mock_admin_sql_connection()
when(mock_conn).execute(
TextClauseMatcher('schema_name as name')).thenReturn(
ResultSetStub([('db1', 'utf8', 'utf8_bin'),
('db2', 'utf8', 'utf8_bin'),
('db3', 'utf8', 'utf8_bin')]))
databases, next_marker = MySqlAdmin().list_databases(limit=10)
self.assertThat(next_marker, Is(None))
self.assertThat(len(databases), Is(3))
class MySqlAdminTest(testtools.TestCase):
def setUp(self):
@ -234,49 +295,6 @@ class MySqlAdminTest(testtools.TestCase):
"Create user queries are not the same")
self.assertEqual(2, dbaas.LocalSqlClient.execute.call_count)
class EnableRootTest(MySqlAdminTest):
def setUp(self):
super(EnableRootTest, self).setUp()
self.origin_is_valid_user_name = models.MySQLUser._is_valid_user_name
self.mySqlAdmin = MySqlAdmin()
def tearDown(self):
super(EnableRootTest, self).tearDown()
models.MySQLUser._is_valid_user_name = self.origin_is_valid_user_name
def test_enable_root(self):
models.MySQLUser._is_valid_user_name =\
MagicMock(return_value=True)
self.mySqlAdmin.enable_root()
args_list = dbaas.LocalSqlClient.execute.call_args_list
args, keyArgs = args_list[0]
self.assertEquals(args[0].text.strip(), "CREATE USER :user@:host;",
"Create user queries are not the same")
self.assertEquals(keyArgs['user'], 'root')
self.assertEquals(keyArgs['host'], '%')
args, keyArgs = args_list[1]
self.assertTrue("UPDATE mysql.user" in args[0].text)
args, keyArgs = args_list[2]
self.assertTrue("GRANT ALL PRIVILEGES ON *.*" in args[0].text)
self.assertEqual(3, dbaas.LocalSqlClient.execute.call_count)
def test_enable_root_failed(self):
models.MySQLUser._is_valid_user_name =\
MagicMock(return_value=False)
self.assertRaises(ValueError, self.mySqlAdmin.enable_root)
def test_is_root_enable(self):
self.mySqlAdmin.is_root_enabled()
args, _ = dbaas.LocalSqlClient.execute.call_args
expected = ("""SELECT User FROM mysql.user WHERE User = 'root' """
"""AND Host != 'localhost';""")
self.assertTrue(expected in args[0].text,
"%s not in query." % expected)
def test_list_databases(self):
self.mySqlAdmin.list_databases()
args, _ = dbaas.LocalSqlClient.execute.call_args
@ -598,12 +616,12 @@ class MySqlAppInstallTest(MySqlAppTest):
def setUp(self):
super(MySqlAppInstallTest, self).setUp()
self.orig_create_engine = dbaas.create_engine
self.orig_create_engine = sqlalchemy.create_engine
self.orig_pkg_version = dbaas.pkg.pkg_version
def tearDown(self):
super(MySqlAppInstallTest, self).tearDown()
dbaas.create_engine = self.orig_create_engine
sqlalchemy.create_engine = self.orig_create_engine
dbaas.pkg.pkg_version = self.orig_pkg_version
def test_install(self):
@ -621,14 +639,14 @@ class MySqlAppInstallTest(MySqlAppTest):
self.mySqlApp._write_mycnf = Mock()
self.mysql_stops_successfully()
self.mysql_starts_successfully()
dbaas.create_engine = Mock()
sqlalchemy.create_engine = Mock()
self.mySqlApp.secure(100)
self.assertTrue(self.mySqlApp.stop_db.called)
self.assertTrue(self.mySqlApp._write_mycnf.called)
self.assertTrue(self.mySqlApp.start_mysql.called)
self.assert_reported_status(ServiceStatuses.RUNNING)
self.assert_reported_status(ServiceStatuses.NEW)
def test_install_install_error(self):
@ -653,13 +671,14 @@ class MySqlAppInstallTest(MySqlAppTest):
Mock(side_effect=pkg.PkgPackageStateError("Install error"))
self.mysql_stops_successfully()
self.mysql_starts_successfully()
dbaas.create_engine = Mock()
sqlalchemy.create_engine = Mock()
self.assertRaises(pkg.PkgPackageStateError,
self.mySqlApp.secure, 100)
self.assertTrue(self.mySqlApp.stop_db.called)
self.assertTrue(self.mySqlApp._write_mycnf.called)
self.assertFalse(self.mySqlApp.start_mysql.called)
self.assert_reported_status(ServiceStatuses.NEW)
def test_is_installed(self):
@ -675,6 +694,170 @@ class MySqlAppInstallTest(MySqlAppTest):
self.assertFalse(self.mySqlApp.is_installed())
class TextClauseMatcher(matchers.Matcher):
def __init__(self, text):
self.contains = contains(text)
def __repr__(self):
return "TextClause(%s)" % self.contains.sub
def matches(self, arg):
print "Matching", arg.text
return self.contains.matches(arg.text)
def mock_sql_connection():
mock_engine = mock()
when(sqlalchemy).create_engine("mysql://root:@localhost:3306",
echo=True).thenReturn(mock_engine)
mock_conn = mock()
when(dbaas.LocalSqlClient).__enter__().thenReturn(mock_conn)
when(dbaas.LocalSqlClient).__exit__(any(), any(), any()).thenReturn(None)
return mock_conn
def mock_admin_sql_connection():
when(utils).execute_with_timeout("sudo", "awk", any(), any()).thenReturn(
['fake_password', None])
mock_engine = mock()
when(sqlalchemy).create_engine("mysql://root:@localhost:3306",
pool_recycle=any(), echo=True,
listeners=[any()]).thenReturn(mock_engine)
mock_conn = mock()
when(dbaas.LocalSqlClient).__enter__().thenReturn(mock_conn)
when(dbaas.LocalSqlClient).__exit__(any(), any(), any()).thenReturn(None)
return mock_conn
class MySqlAppMockTest(testtools.TestCase):
@classmethod
def stub_file(cls, filename):
return MySqlAppMockTest.StubFile(filename)
class StubFile(object):
def __init__(self, filename):
when(__builtin__).open(filename, any()).thenReturn(self)
def next(self):
raise StopIteration
def __iter__(self):
return self
def write(self, data):
pass
def close(self):
pass
def tearDown(self):
super(MySqlAppMockTest, self).tearDown()
unstub()
def test_secure_with_mycnf_error(self):
mock_conn = mock_sql_connection()
when(mock_conn).execute(any()).thenReturn(None)
when(utils).execute_with_timeout("sudo", any(str), "stop").thenReturn(
None)
when(pkg).pkg_install("dbaas-mycnf", any()).thenRaise(
pkg.PkgPackageStateError("Install error"))
# skip writing the file for now
when(os.path).isfile(any()).thenReturn(False)
mock_status = mock(MySqlAppStatus)
when(mock_status).wait_for_real_status_to_change_to(
any(), any(), any()).thenReturn(True)
app = MySqlApp(mock_status)
self.assertRaises(pkg.PkgPackageStateError, app.secure, 2048)
verify(mock_conn, atleast=2).execute(any())
inorder.verify(mock_status).wait_for_real_status_to_change_to(
ServiceStatuses.SHUTDOWN, any(), any())
verifyNoMoreInteractions(mock_status)
def test_secure_keep_root(self):
mock_conn = mock_sql_connection()
when(mock_conn).execute(any()).thenReturn(None)
when(utils).execute_with_timeout("sudo", any(str), "stop").thenReturn(
None)
when(pkg).pkg_install("dbaas-mycnf", any()).thenReturn(None)
# skip writing the file for now
when(os.path).isfile(any()).thenReturn(False)
when(utils).execute_with_timeout(
"sudo", "chmod", any(), any()).thenReturn(None)
MySqlAppMockTest.stub_file("/etc/mysql/my.cnf")
MySqlAppMockTest.stub_file("/etc/dbaas/my.cnf/my.cnf.2048M")
MySqlAppMockTest.stub_file("/tmp/my.cnf.tmp")
mock_status = mock(MySqlAppStatus)
when(mock_status).wait_for_real_status_to_change_to(
any(), any(), any()).thenReturn(True)
app = MySqlApp(mock_status)
app.secure(2048)
verify(mock_conn, never).execute(TextClauseMatcher('root'))
class MySqlRootStatusTest(testtools.TestCase):
def tearDown(self):
super(MySqlRootStatusTest, self).tearDown()
unstub()
def test_root_is_enabled(self):
mock_conn = mock_admin_sql_connection()
mock_rs = mock()
mock_rs.rowcount = 1
when(mock_conn).execute(
TextClauseMatcher(
"User = 'root' AND Host != 'localhost'")).thenReturn(mock_rs)
self.assertThat(MySqlRootAccess().is_root_enabled(), Is(True))
def test_root_is_not_enabled(self):
mock_conn = mock_admin_sql_connection()
mock_rs = mock()
mock_rs.rowcount = 0
when(mock_conn).execute(
TextClauseMatcher(
"User = 'root' AND Host != 'localhost'")).thenReturn(mock_rs)
self.assertThat(MySqlRootAccess.is_root_enabled(), Equals(False))
def test_enable_root(self):
mock_conn = mock_admin_sql_connection()
when(mock_conn).execute(any()).thenReturn(None)
# invocation
user_ser = MySqlRootAccess.enable_root()
# verification
self.assertThat(user_ser, Not(Is(None)))
verify(mock_conn).execute(TextClauseMatcher('CREATE USER'),
user='root', host='%')
verify(mock_conn).execute(TextClauseMatcher(
'GRANT ALL PRIVILEGES ON *.*'))
verify(mock_conn).execute(TextClauseMatcher('UPDATE mysql.user'))
def test_enable_root_failed(self):
when(models.MySQLUser)._is_valid_user_name(any()).thenReturn(False)
self.assertRaises(ValueError, MySqlAdmin().enable_root)
def test_report_root_enabled(self):
mock_db_api = mock()
when(reddwarf.extensions.mysql.models).get_db_api().thenReturn(
mock_db_api)
when(mock_db_api).find_by(any(), id=None).thenReturn(None)
root_history = RootHistory('x', 'root')
when(mock_db_api).save(any(RootHistory)).thenReturn(root_history)
# invocation
history = MySqlRootAccess.report_root_enabled(ReddwarfContext())
# verification
self.assertThat(history, Is(root_history))
verify(mock_db_api).save(any(RootHistory))
class InterrogatorTest(testtools.TestCase):
def setUp(self):

View File

@ -12,20 +12,27 @@
# License for the specific language governing permissions and limitations
# under the License
from reddwarf.guestagent.manager.mysql import Manager
import reddwarf.guestagent.manager.mysql as dbaas
from reddwarf.guestagent import volume
import testtools
from reddwarf.instance import models as rd_models
import os
from mock import Mock, MagicMock
from mockito import verify, when, unstub, any, mock, never
import testtools
from testtools.matchers import Is, Equals, Not
from reddwarf.common.context import ReddwarfContext
from reddwarf.guestagent.manager.mysql import Manager
import reddwarf.guestagent.manager.mysql_service as dbaas
from reddwarf.guestagent import backup
from reddwarf.guestagent.volume import VolumeDevice
class GuestAgentManagerTest(testtools.TestCase):
def setUp(self):
super(GuestAgentManagerTest, self).setUp()
self.context = Mock()
self.context = ReddwarfContext()
self.manager = Manager()
self.origin_MySqlAppStatus = dbaas.MySqlAppStatus
self.origin_os_path_exists = os.path.exists
@ -48,162 +55,137 @@ class GuestAgentManagerTest(testtools.TestCase):
dbaas.MySqlApp.stop_db = self.origin_stop_mysql
dbaas.MySqlApp.start_mysql = self.origin_start_mysql
dbaas.MySqlApp._install_mysql = self.origin_install_mysql
unstub()
def test_update_status(self):
dbaas.MySqlAppStatus.get = MagicMock()
mock_status = mock()
when(dbaas.MySqlAppStatus).get().thenReturn(mock_status)
self.manager.update_status(self.context)
self.assertEqual(1, dbaas.MySqlAppStatus.get.call_count)
def test_update_status_2(self):
self._setUp_MySqlAppStatus_get()
dbaas.MySqlAppStatus.update = MagicMock()
self.manager.update_status(self.context)
self.assertEqual(1, dbaas.MySqlAppStatus.update.call_count)
verify(dbaas.MySqlAppStatus).get()
verify(mock_status).update()
def test_create_database(self):
databases = Mock()
dbaas.MySqlAdmin.create_database = MagicMock()
self.manager.create_database(self.context, databases)
self.assertEqual(1, dbaas.MySqlAdmin.create_database.call_count)
when(dbaas.MySqlAdmin).create_database(['db1']).thenReturn(None)
self.manager.create_database(self.context, ['db1'])
verify(dbaas.MySqlAdmin).create_database(['db1'])
def test_create_user(self):
users = Mock()
dbaas.MySqlAdmin.create_user = MagicMock()
self.manager.create_user(self.context, users)
self.assertEqual(1, dbaas.MySqlAdmin.create_user.call_count)
when(dbaas.MySqlAdmin).create_user(['user1']).thenReturn(None)
self.manager.create_user(self.context, ['user1'])
verify(dbaas.MySqlAdmin).create_user(['user1'])
def test_delete_database(self):
databases = Mock()
dbaas.MySqlAdmin.delete_database = MagicMock()
databases = ['db1']
when(dbaas.MySqlAdmin).delete_database(databases).thenReturn(None)
self.manager.delete_database(self.context, databases)
self.assertEqual(1, dbaas.MySqlAdmin.delete_database.call_count)
verify(dbaas.MySqlAdmin).delete_database(databases)
def test_delete_user(self):
user = Mock()
dbaas.MySqlAdmin.delete_user = MagicMock()
user = ['user1']
when(dbaas.MySqlAdmin).delete_user(user).thenReturn(None)
self.manager.delete_user(self.context, user)
self.assertEqual(1, dbaas.MySqlAdmin.delete_user.call_count)
verify(dbaas.MySqlAdmin).delete_user(user)
def test_list_databases(self):
dbaas.MySqlAdmin.list_databases = MagicMock()
self.manager.list_databases(self.context)
self.assertEqual(1, dbaas.MySqlAdmin.list_databases.call_count)
when(dbaas.MySqlAdmin).list_databases(None, None,
False).thenReturn(['database1'])
databases = self.manager.list_databases(self.context)
self.assertThat(databases, Not(Is(None)))
self.assertThat(databases, Equals(['database1']))
verify(dbaas.MySqlAdmin).list_databases(None, None, False)
def test_list_users(self):
dbaas.MySqlAdmin.list_users = MagicMock()
self.manager.list_users(self.context)
self.assertEqual(1, dbaas.MySqlAdmin.list_users.call_count)
when(dbaas.MySqlAdmin).list_users(None, None,
False).thenReturn(['user1'])
users = self.manager.list_users(self.context)
self.assertThat(users, Equals(['user1']))
verify(dbaas.MySqlAdmin).list_users(None, None, False)
def test_enable_root(self):
dbaas.MySqlAdmin.enable_root = MagicMock()
self.manager.enable_root(self.context)
self.assertEqual(1, dbaas.MySqlAdmin.enable_root.call_count)
when(dbaas.MySqlAdmin).enable_root().thenReturn('user_id_stuff')
user_id = self.manager.enable_root(self.context)
self.assertThat(user_id, Is('user_id_stuff'))
verify(dbaas.MySqlAdmin).enable_root()
def test_is_root_enabled(self):
dbaas.MySqlAdmin.is_root_enabled = MagicMock()
self.manager.is_root_enabled(self.context)
self.assertEqual(1, dbaas.MySqlAdmin.is_root_enabled.call_count)
when(dbaas.MySqlAdmin).is_root_enabled().thenReturn(True)
is_enabled = self.manager.is_root_enabled(self.context)
self.assertThat(is_enabled, Is(True))
verify(dbaas.MySqlAdmin).is_root_enabled()
def test_create_backup(self):
when(backup).backup(self.context, 'backup_id_123').thenReturn(None)
# entry point
Manager().create_backup(self.context, 'backup_id_123')
# assertions
verify(backup).backup(self.context, 'backup_id_123')
def test_prepare_device_path_true(self):
self._prepare_dynamic()
def test_prepare_device_path_false(self):
self._prepare_dynamic(has_device_path=False)
self._prepare_dynamic(device_path=None)
def test_prepare_mysql_not_installed(self):
self._prepare_dynamic(is_mysql_installed=False)
def _prepare_dynamic(self, has_device_path=True, is_mysql_installed=True):
def test_prepare_mysql_from_backup(self):
self._prepare_dynamic(backup_id='backup_id_123abc')
if has_device_path:
COUNT = 1
else:
COUNT = 0
def test_prepare_mysql_from_backup_with_root(self):
self._prepare_dynamic(backup_id='backup_id_123abc',
is_root_enabled=True)
if is_mysql_installed:
SEC_COUNT = 1
else:
SEC_COUNT = 0
def _prepare_dynamic(self, device_path='/dev/vdb', is_mysql_installed=True,
backup_id=None, is_root_enabled=False):
self._setUp_MySqlAppStatus_get()
dbaas.MySqlAppStatus.begin_mysql_install = MagicMock()
volume.VolumeDevice.format = MagicMock()
volume.VolumeDevice.migrate_data = MagicMock()
volume.VolumeDevice.mount = MagicMock()
dbaas.MySqlApp.stop_db = MagicMock()
dbaas.MySqlApp.start_mysql = MagicMock()
dbaas.MySqlApp.install_if_needed = MagicMock()
dbaas.MySqlApp.secure = MagicMock()
self._prepare_mysql_is_installed(is_mysql_installed)
# covering all outcomes is starting to cause trouble here
COUNT = 1 if device_path else 0
SEC_COUNT = 1 if is_mysql_installed else 0
migrate_count = 1 * COUNT if not backup_id else 0
Manager.create_database = MagicMock()
Manager.create_user = MagicMock()
self.manager.prepare(self.context, Mock, Mock, Mock, has_device_path)
# TODO (juice) this should stub an instance of the MySqlAppStatus
mock_status = mock()
when(dbaas.MySqlAppStatus).get().thenReturn(mock_status)
when(mock_status).begin_mysql_install().thenReturn(None)
when(VolumeDevice).format().thenReturn(None)
when(VolumeDevice).migrate_data(any()).thenReturn(None)
when(VolumeDevice).mount().thenReturn(None)
when(dbaas.MySqlApp).stop_db().thenReturn(None)
when(dbaas.MySqlApp).start_mysql().thenReturn(None)
when(dbaas.MySqlApp).install_if_needed().thenReturn(None)
when(backup).restore(self.context, backup_id).thenReturn(None)
when(dbaas.MySqlApp).secure(any()).thenReturn(None)
when(dbaas.MySqlApp).secure_root().thenReturn(None)
when(dbaas.MySqlApp).is_installed().thenReturn(is_mysql_installed)
when(dbaas.MySqlAdmin).is_root_enabled().thenReturn(is_root_enabled)
when(dbaas.MySqlAdmin).create_user().thenReturn(None)
when(dbaas.MySqlAdmin).create_database().thenReturn(None)
when(dbaas.MySqlAdmin).report_root_enabled(self.context).thenReturn(
None)
self.assertEqual(1,
dbaas.MySqlAppStatus.begin_mysql_install.call_count)
when(os.path).exists(any()).thenReturn(is_mysql_installed)
# invocation
self.manager.prepare(context=self.context, databases=None,
memory_mb='2048', users=None,
device_path=device_path,
mount_point='/var/lib/mysql',
backup_id=backup_id)
# verification/assertion
verify(mock_status).begin_mysql_install()
self.assertEqual(COUNT, volume.VolumeDevice.format.call_count)
# now called internally in install_if_needed() which is a mock
#self.assertEqual(1, dbaas.MySqlApp.is_installed.call_count)
self.assertEqual(COUNT * SEC_COUNT,
dbaas.MySqlApp.stop_db.call_count)
self.assertEqual(COUNT * SEC_COUNT,
volume.VolumeDevice.migrate_data.call_count)
self.assertEqual(COUNT * SEC_COUNT,
dbaas.MySqlApp.start_mysql.call_count)
self.assertEqual(1,
dbaas.MySqlApp.install_if_needed.call_count)
self.assertEqual(1, dbaas.MySqlApp.secure.call_count)
self.assertEqual(1, Manager.create_database.call_count)
self.assertEqual(1, Manager.create_user.call_count)
def _prepare_mysql_is_installed(self, is_installed=True):
dbaas.MySqlApp.is_installed = MagicMock(return_value=is_installed)
os.path.exists = MagicMock()
dbaas.MySqlAppStatus._get_actual_db_status = MagicMock()
def path_exists_true(path):
if path == "/var/lib/mysql":
return True
else:
return False
def path_exists_false(path):
if path == "/var/lib/mysql":
return False
else:
return False
if is_installed:
os.path.exists.side_effect = path_exists_true
else:
os.path.exists.side_effect = path_exists_false
def test_restart(self):
self._setUp_MySqlAppStatus_get()
dbaas.MySqlApp.restart = MagicMock()
self.manager.restart(self.context)
self.assertEqual(1, dbaas.MySqlApp.restart.call_count)
def test_start_db_with_conf_changes(self):
updated_mem_size = Mock()
self._setUp_MySqlAppStatus_get()
dbaas.MySqlApp.start_db_with_conf_changes = MagicMock()
self.manager.start_db_with_conf_changes(self.context,
updated_mem_size)
self.assertEqual(1, dbaas.MySqlApp.
start_db_with_conf_changes.call_count)
def test_stop_mysql(self):
self._setUp_MySqlAppStatus_get()
dbaas.MySqlApp.stop_db = MagicMock()
self.manager.stop_db(self.context)
self.assertEqual(1, dbaas.MySqlApp.stop_db.call_count)
def _setUp_MySqlAppStatus_get(self):
dbaas.MySqlAppStatus = Mock()
dbaas.MySqlAppStatus.get = MagicMock(return_value=dbaas.MySqlAppStatus)
verify(VolumeDevice, times=COUNT).format()
verify(dbaas.MySqlApp, times=(COUNT * SEC_COUNT)).stop_db()
verify(VolumeDevice, times=(migrate_count * SEC_COUNT)).migrate_data(
any())
if backup_id:
verify(backup).restore(self.context, backup_id, '/var/lib/mysql')
verify(dbaas.MySqlApp).install_if_needed()
verify(dbaas.MySqlApp).secure('2048')
verify(dbaas.MySqlAdmin, never).create_database()
verify(dbaas.MySqlAdmin, never).create_user()
times_report = 1 if is_root_enabled else 0
times_reset_root = 1 if not backup_id or not is_root_enabled else 0
verify(dbaas.MySqlApp, times=times_reset_root).secure_root()
verify(dbaas.MySqlAdmin, times=times_report).report_root_enabled(
self.context)

View File

@ -0,0 +1,13 @@
# Copyright 2011 OpenStack LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.

View File

@ -0,0 +1,98 @@
# Copyright 2012 OpenStack LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License
import reddwarf.common.remote as remote
import testtools
import reddwarf.taskmanager.models as taskmanager_models
import reddwarf.backup.models as backup_models
from mockito import mock, when, unstub, any, verify, never
from swiftclient.client import ClientException
class BackupTasksTest(testtools.TestCase):
def setUp(self):
super(BackupTasksTest, self).setUp()
self.backup = backup_models.DBBackup()
self.backup.id = 'backup_id'
self.backup.name = 'backup_test',
self.backup.description = 'test desc'
self.backup.location = 'http://xxx/z_CLOUD/12e48.xbstream.gz'
self.backup.instance_id = 'instance id'
self.backup.created = 'yesterday'
self.backup.updated = 'today'
self.backup.state = backup_models.BackupState.NEW
self.container_content = (None,
[{'name': 'first'},
{'name': 'second'},
{'name': 'third'}])
when(backup_models.Backup).delete(any()).thenReturn(None)
when(backup_models.Backup).get_by_id(
self.backup.id).thenReturn(self.backup)
when(self.backup).delete(any()).thenReturn(None)
self.swift_client = mock()
when(remote).create_swift_client(
any()).thenReturn(self.swift_client)
when(self.swift_client).head_container(
any()).thenRaise(ClientException("foo"))
when(self.swift_client).head_object(
any(), any()).thenRaise(ClientException("foo"))
when(self.swift_client).get_container(any()).thenReturn(
self.container_content)
when(self.swift_client).delete_object(any(), any()).thenReturn(None)
when(self.swift_client).delete_container(any()).thenReturn(None)
def tearDown(self):
super(BackupTasksTest, self).tearDown()
unstub()
def test_delete_backup_nolocation(self):
self.backup.location = ''
taskmanager_models.BackupTasks.delete_backup('dummy context',
self.backup.id)
verify(self.backup).delete()
def test_delete_backup_fail_delete_manifest(self):
filename = self.backup.location[self.backup.location.rfind("/") + 1:]
when(self.swift_client).delete_object(
any(),
filename).thenRaise(ClientException("foo"))
when(self.swift_client).head_object(any(), any()).thenReturn(None)
taskmanager_models.BackupTasks.delete_backup('dummy context',
self.backup.id)
verify(backup_models.Backup, never).delete(self.backup.id)
self.assertEqual(backup_models.BackupState.FAILED, self.backup.state,
"backup should be in FAILED status")
def test_delete_backup_fail_delete_container(self):
when(self.swift_client).delete_container(
any()).thenRaise(ClientException("foo"))
when(self.swift_client).head_container(any()).thenReturn(None)
taskmanager_models.BackupTasks.delete_backup('dummy context',
self.backup.id)
verify(backup_models.Backup, never).delete(self.backup.id)
self.assertEqual(backup_models.BackupState.FAILED, self.backup.state,
"backup should be in FAILED status")
def test_delete_backup_fail_delete_segment(self):
when(self.swift_client).delete_object(
any(),
'second').thenRaise(ClientException("foo"))
when(self.swift_client).delete_container(
any()).thenRaise(ClientException("foo"))
when(self.swift_client).head_container(any()).thenReturn(None)
taskmanager_models.BackupTasks.delete_backup('dummy context',
self.backup.id)
verify(backup_models.Backup, never).delete(self.backup.id)
self.assertEqual(backup_models.BackupState.FAILED, self.backup.state,
"backup should be in FAILED status")

View File

@ -13,16 +13,12 @@
# under the License.
import gettext
import os
import setuptools
import subprocess
from reddwarf.openstack.common import setup
gettext.install('reddwarf', unicode=1)
from reddwarf import version
from reddwarf.openstack.common import setup
from reddwarf.openstack.common.setup import write_git_changelog
requires = setup.parse_requirements()
depend_links = setup.parse_dependency_links()