Merge Postgresql service modules

Postgresql has been implemented differently from
all other guest agents.
This has been causing difficulties with maintenance
and future development of Trove design.
It has been decided (Newton midcycle) to bring the guest agent
in line with others.

This patch set merges all Postgres service modules into
App/Admin classes.

This also resolves 20 pylint issues related to mixin usage.

Change-Id: I1b12a5296c59e9d3b08bcf34ac196d16189d525e
This commit is contained in:
Petr Malik 2016-07-22 11:28:16 -04:00
parent 8abf48f3f6
commit 7fd8801d8d
18 changed files with 1378 additions and 1528 deletions

View File

@ -1101,126 +1101,6 @@
"No name 'NoHostAvailable' in module 'cassandra.cluster'",
null
],
[
"trove/guestagent/datastore/experimental/postgresql/service/access.py",
"E1101",
"Instance of 'PgSqlAccess' has no '_find_user' member",
"PgSqlAccess.list_access"
],
[
"trove/guestagent/datastore/experimental/postgresql/service/access.py",
"no-member",
"Instance of 'PgSqlAccess' has no '_find_user' member",
"PgSqlAccess.list_access"
],
[
"trove/guestagent/datastore/experimental/postgresql/service/config.py",
"E1101",
"Instance of 'PgSqlConfig' has no 'configuration_manager' member",
"PgSqlConfig.apply_initial_guestagent_configuration"
],
[
"trove/guestagent/datastore/experimental/postgresql/service/config.py",
"E1101",
"Instance of 'PgSqlConfig' has no 'configuration_manager' member",
"PgSqlConfig.disable_backups"
],
[
"trove/guestagent/datastore/experimental/postgresql/service/config.py",
"E1101",
"Instance of 'PgSqlConfig' has no 'configuration_manager' member",
"PgSqlConfig.disable_debugging"
],
[
"trove/guestagent/datastore/experimental/postgresql/service/config.py",
"E1101",
"Instance of 'PgSqlConfig' has no 'configuration_manager' member",
"PgSqlConfig.enable_backups"
],
[
"trove/guestagent/datastore/experimental/postgresql/service/config.py",
"E1101",
"Instance of 'PgSqlConfig' has no 'configuration_manager' member",
"PgSqlConfig.enable_debugging"
],
[
"trove/guestagent/datastore/experimental/postgresql/service/config.py",
"E1101",
"Instance of 'PgSqlConfig' has no 'configuration_manager' member",
"PgSqlConfig.reset_configuration"
],
[
"trove/guestagent/datastore/experimental/postgresql/service/config.py",
"E1101",
"Instance of 'PgSqlConfig' has no 'configuration_manager' member",
"PgSqlConfig.start_db_with_conf_changes"
],
[
"trove/guestagent/datastore/experimental/postgresql/service/config.py",
"E1101",
"Instance of 'PgSqlConfig' has no 'configuration_manager' member",
"PgSqlConfig.update_overrides"
],
[
"trove/guestagent/datastore/experimental/postgresql/service/config.py",
"no-member",
"Instance of 'PgSqlConfig' has no 'configuration_manager' member",
"PgSqlConfig.apply_initial_guestagent_configuration"
],
[
"trove/guestagent/datastore/experimental/postgresql/service/config.py",
"no-member",
"Instance of 'PgSqlConfig' has no 'configuration_manager' member",
"PgSqlConfig.disable_backups"
],
[
"trove/guestagent/datastore/experimental/postgresql/service/config.py",
"no-member",
"Instance of 'PgSqlConfig' has no 'configuration_manager' member",
"PgSqlConfig.disable_debugging"
],
[
"trove/guestagent/datastore/experimental/postgresql/service/config.py",
"no-member",
"Instance of 'PgSqlConfig' has no 'configuration_manager' member",
"PgSqlConfig.enable_backups"
],
[
"trove/guestagent/datastore/experimental/postgresql/service/config.py",
"no-member",
"Instance of 'PgSqlConfig' has no 'configuration_manager' member",
"PgSqlConfig.enable_debugging"
],
[
"trove/guestagent/datastore/experimental/postgresql/service/config.py",
"no-member",
"Instance of 'PgSqlConfig' has no 'configuration_manager' member",
"PgSqlConfig.reset_configuration"
],
[
"trove/guestagent/datastore/experimental/postgresql/service/config.py",
"no-member",
"Instance of 'PgSqlConfig' has no 'configuration_manager' member",
"PgSqlConfig.start_db_with_conf_changes"
],
[
"trove/guestagent/datastore/experimental/postgresql/service/config.py",
"no-member",
"Instance of 'PgSqlConfig' has no 'configuration_manager' member",
"PgSqlConfig.update_overrides"
],
[
"trove/guestagent/datastore/experimental/postgresql/service/process.py",
"E1101",
"Instance of 'PgSqlProcess' has no 'set_guest_log_status' member",
"PgSqlProcess.restart"
],
[
"trove/guestagent/datastore/experimental/postgresql/service/process.py",
"no-member",
"Instance of 'PgSqlProcess' has no 'set_guest_log_status' member",
"PgSqlProcess.restart"
],
[
"trove/guestagent/datastore/experimental/redis/service.py",
"E0701",

View File

@ -18,22 +18,18 @@ import os
from oslo_log import log as logging
from .service.config import PgSqlConfig
from .service.database import PgSqlDatabase
from .service.install import PgSqlInstall
from .service.root import PgSqlRoot
from .service.status import PgSqlAppStatus
from trove.common import cfg
from trove.common import exception
from trove.common.i18n import _
from trove.common import instance as trove_instance
from trove.common.notification import EndNotification
from trove.common import utils
from trove.guestagent import backup
from trove.guestagent.datastore.experimental.postgresql import pgutil
from trove.guestagent.datastore.experimental.postgresql.service import (
PgSqlAdmin)
from trove.guestagent.datastore.experimental.postgresql.service import PgSqlApp
from trove.guestagent.datastore import manager
from trove.guestagent.db import models
from trove.guestagent import dbaas
from trove.guestagent import guest_log
from trove.guestagent import volume
@ -42,47 +38,56 @@ LOG = logging.getLogger(__name__)
CONF = cfg.CONF
class Manager(
PgSqlDatabase,
PgSqlRoot,
PgSqlConfig,
PgSqlInstall,
manager.Manager
):
class Manager(manager.Manager):
PG_BUILTIN_ADMIN = 'postgres'
def __init__(self):
super(Manager, self).__init__('postgresql')
def __init__(self, manager_name='postgresql'):
super(Manager, self).__init__(manager_name)
self._app = None
self._admin = None
@property
def status(self):
return PgSqlAppStatus.get()
return self.app.status
@property
def app(self):
if self._app is None:
self._app = self.build_app()
return self._app
def build_app(self):
return PgSqlApp()
@property
def admin(self):
if self._admin is None:
self._admin = self.app.build_admin()
return self._admin
@property
def configuration_manager(self):
return self._configuration_manager
return self.app.configuration_manager
@property
def datastore_log_defs(self):
datastore_dir = '/var/log/postgresql/'
owner = self.app.pgsql_owner
long_query_time = CONF.get(self.manager).get(
'guest_log_long_query_time')
general_log_file = self.build_log_file_name(
self.GUEST_LOG_DEFS_GENERAL_LABEL, self.PGSQL_OWNER,
datastore_dir=datastore_dir)
self.GUEST_LOG_DEFS_GENERAL_LABEL, owner,
datastore_dir=self.app.pgsql_log_dir)
general_log_dir, general_log_filename = os.path.split(general_log_file)
return {
self.GUEST_LOG_DEFS_GENERAL_LABEL: {
self.GUEST_LOG_TYPE_LABEL: guest_log.LogType.USER,
self.GUEST_LOG_USER_LABEL: self.PGSQL_OWNER,
self.GUEST_LOG_USER_LABEL: owner,
self.GUEST_LOG_FILE_LABEL: general_log_file,
self.GUEST_LOG_ENABLE_LABEL: {
'logging_collector': 'on',
'log_destination': self._quote('stderr'),
'log_directory': self._quote(general_log_dir),
'log_filename': self._quote(general_log_filename),
'log_statement': self._quote('all'),
'log_destination': self._quote_str('stderr'),
'log_directory': self._quote_str(general_log_dir),
'log_filename': self._quote_str(general_log_filename),
'log_statement': self._quote_str('all'),
'debug_print_plan': 'on',
'log_min_duration_statement': long_query_time,
},
@ -93,12 +98,125 @@ class Manager(
},
}
def _quote_str(self, value):
return "'%s'" % value
def grant_access(self, context, username, hostname, databases):
self.admin.grant_access(context, username, hostname, databases)
def revoke_access(self, context, username, hostname, database):
self.admin.revoke_access(context, username, hostname, database)
def list_access(self, context, username, hostname):
return self.admin.list_access(context, username, hostname)
def update_overrides(self, context, overrides, remove=False):
self.app.update_overrides(context, overrides, remove)
def apply_overrides(self, context, overrides):
self.app.apply_overrides(context, overrides)
def reset_configuration(self, context, configuration):
self.app.reset_configuration(context, configuration)
def start_db_with_conf_changes(self, context, config_contents):
self.app.start_db_with_conf_changes(context, config_contents)
def create_database(self, context, databases):
with EndNotification(context):
self.admin.create_database(context, databases)
def delete_database(self, context, database):
with EndNotification(context):
self.admin.delete_database(context, database)
def list_databases(
self, context, limit=None, marker=None, include_marker=False):
return self.admin.list_databases(
context, limit=limit, marker=marker, include_marker=include_marker)
def install(self, context, packages):
self.app.install(context, packages)
def stop_db(self, context, do_not_start_on_reboot=False):
self.app.stop_db(do_not_start_on_reboot=do_not_start_on_reboot)
def restart(self, context):
self.app.restart()
self.set_guest_log_status(guest_log.LogStatus.Restart_Completed)
def pre_upgrade(self, context):
LOG.debug('Preparing Postgresql for upgrade.')
self.app.status.begin_restart()
self.app.stop_db()
mount_point = self.app.pgsql_base_data_dir
upgrade_info = self.app.save_files_pre_upgrade(mount_point)
upgrade_info['mount_point'] = mount_point
return upgrade_info
def post_upgrade(self, context, upgrade_info):
LOG.debug('Finalizing Postgresql upgrade.')
self.app.stop_db()
if 'device' in upgrade_info:
self.mount_volume(context, mount_point=upgrade_info['mount_point'],
device_path=upgrade_info['device'])
self.app.restore_files_post_upgrade(upgrade_info)
self.app.start_db()
def is_root_enabled(self, context):
return self.app.is_root_enabled(context)
def enable_root(self, context, root_password=None):
return self.app.enable_root(context, root_password=root_password)
def disable_root(self, context):
self.app.disable_root(context)
def enable_root_with_password(self, context, root_password=None):
return self.app.enable_root_with_password(
context,
root_password=root_password)
def create_user(self, context, users):
with EndNotification(context):
self.admin.create_user(context, users)
def list_users(
self, context, limit=None, marker=None, include_marker=False):
return self.admin.list_users(
context, limit=limit, marker=marker, include_marker=include_marker)
def delete_user(self, context, user):
with EndNotification(context):
self.admin.delete_user(context, user)
def get_user(self, context, username, hostname):
return self.admin.get_user(context, username, hostname)
def change_passwords(self, context, users):
with EndNotification(context):
self.admin.change_passwords(context, users)
def update_attributes(self, context, username, hostname, user_attrs):
with EndNotification(context):
self.admin.update_attributes(
context,
username,
hostname,
user_attrs)
def do_prepare(self, context, packages, databases, memory_mb, users,
device_path, mount_point, backup_info, config_contents,
root_password, overrides, cluster_config, snapshot):
pgutil.PG_ADMIN = self.PG_BUILTIN_ADMIN
self.install(context, packages)
self.stop_db(context)
self.app.install(context, packages)
LOG.debug("Waiting for database first boot.")
if (self.app.status.wait_for_real_status_to_change_to(
trove_instance.ServiceStatuses.RUNNING,
CONF.state_change_wait_time,
False)):
LOG.debug("Stopping database prior to initial configuration.")
self.app.stop_db()
if device_path:
device = volume.VolumeDevice(device_path)
device.format()
@ -106,51 +224,46 @@ class Manager(
device.migrate_data(mount_point)
device.mount(mount_point)
self.configuration_manager.save_configuration(config_contents)
self.apply_initial_guestagent_configuration()
self.app.apply_initial_guestagent_configuration()
os_admin = models.PostgreSQLUser(self.app.ADMIN_USER)
if backup_info:
pgutil.PG_ADMIN = self.ADMIN_USER
backup.restore(context, backup_info, '/tmp')
self.app.set_current_admin_user(os_admin)
if snapshot:
LOG.info("Found snapshot info: " + str(snapshot))
self.attach_replica(context, snapshot, snapshot['config'])
self.start_db(context)
self.app.start_db()
if not backup_info:
self._secure(context)
self.app.secure(context)
self._admin = PgSqlAdmin(os_admin)
if not cluster_config and self.is_root_enabled(context):
self.status.report_root(context, 'postgres')
def _secure(self, context):
# Create a new administrative user for Trove and also
# disable the built-in superuser.
os_admin_db = models.PostgreSQLSchema(self.ADMIN_USER)
self._create_database(context, os_admin_db)
self._create_admin_user(context, databases=[os_admin_db])
pgutil.PG_ADMIN = self.ADMIN_USER
postgres = models.PostgreSQLRootUser()
self.alter_user(context, postgres, 'NOSUPERUSER', 'NOLOGIN')
self.status.report_root(context, self.app.default_superuser_name)
def create_backup(self, context, backup_info):
with EndNotification(context):
self.enable_backups()
self.app.enable_backups()
backup.backup(context, backup_info)
def backup_required_for_replication(self, context):
return self.replication.backup_required_for_replication()
def attach_replica(self, context, replica_info, slave_config):
self.replication.enable_as_slave(self, replica_info, None)
self.replication.enable_as_slave(self.app, replica_info, None)
def detach_replica(self, context, for_failover=False):
replica_info = self.replication.detach_slave(self, for_failover)
replica_info = self.replication.detach_slave(self.app, for_failover)
return replica_info
def enable_as_master(self, context, replica_source_config):
self.enable_backups()
self.replication.enable_as_master(self, None)
self.app.enable_backups()
self.replication.enable_as_master(self.app, None)
def make_read_only(self, context, read_only):
"""There seems to be no way to flag this at the database level in
@ -162,29 +275,30 @@ class Manager(
pass
def get_replica_context(self, context):
return self.replication.get_replica_context(None)
LOG.debug("Getting replica context.")
return self.replication.get_replica_context(self.app)
def get_latest_txn_id(self, context):
if self.pg_is_in_recovery():
lsn = self.pg_last_xlog_replay_location()
if self.app.pg_is_in_recovery():
lsn = self.app.pg_last_xlog_replay_location()
else:
lsn = self.pg_current_xlog_location()
LOG.info(_("Last xlog location found: %s") % lsn)
lsn = self.app.pg_current_xlog_location()
LOG.info("Last xlog location found: %s" % lsn)
return lsn
def get_last_txn(self, context):
master_host = self.pg_primary_host()
master_host = self.app.pg_primary_host()
repl_offset = self.get_latest_txn_id(context)
return master_host, repl_offset
def wait_for_txn(self, context, txn):
if not self.pg_is_in_recovery():
if not self.app.pg_is_in_recovery():
raise RuntimeError(_("Attempting to wait for a txn on a server "
"not in recovery mode!"))
def _wait_for_txn():
lsn = self.pg_last_xlog_replay_location()
LOG.info(_("Last xlog location found: %s") % lsn)
lsn = self.app.pg_last_xlog_replay_location()
LOG.info("Last xlog location found: %s" % lsn)
return lsn >= txn
try:
utils.poll_until(_wait_for_txn, time_out=120)
@ -193,32 +307,37 @@ class Manager(
"offset to change to '%s'.") % txn)
def cleanup_source_on_replica_detach(self, context, replica_info):
self.replication.cleanup_source_on_replica_detach()
LOG.debug("Calling cleanup_source_on_replica_detach")
self.replication.cleanup_source_on_replica_detach(self.app,
replica_info)
def demote_replication_master(self, context):
self.replication.demote_master(self)
LOG.debug("Calling demote_replication_master")
self.replication.demote_master(self.app)
def get_replication_snapshot(self, context, snapshot_info,
replica_source_config=None):
LOG.debug("Getting replication snapshot.")
self.enable_backups()
self.replication.enable_as_master(None, None)
self.app.enable_backups()
self.replication.enable_as_master(self.app, None)
snapshot_id, log_position = (
self.replication.snapshot_for_replication(context, None, None,
self.replication.snapshot_for_replication(context, self.app, None,
snapshot_info))
mount_point = CONF.get(self.manager).mount_point
volume_stats = dbaas.get_filesystem_volume_stats(mount_point)
volume_stats = self.get_filesystem_stats(context, mount_point)
replication_snapshot = {
'dataset': {
'datastore_manager': self.manager,
'dataset_size': volume_stats.get('used', 0.0),
'volume_size': volume_stats.get('total', 0.0),
'snapshot_id': snapshot_id
},
'replication_strategy': self.replication_strategy,
'master': self.replication.get_master_ref(None, snapshot_info),
'master': self.replication.get_master_ref(self.app, snapshot_info),
'log_position': log_position
}

View File

@ -1,4 +1,6 @@
# Copyright (c) 2013 OpenStack Foundation
# Copyright (c) 2016 Tesora, Inc.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@ -13,76 +15,6 @@
# License for the specific language governing permissions and limitations
# under the License.
import psycopg2
from trove.common import exception
from trove.common.i18n import _
PG_ADMIN = 'os_admin'
class PostgresConnection(object):
def __init__(self, autocommit=False, **connection_args):
self._autocommit = autocommit
self._connection_args = connection_args
def execute(self, statement, identifiers=None, data_values=None):
"""Execute a non-returning statement.
"""
self._execute_stmt(statement, identifiers, data_values, False)
def query(self, query, identifiers=None, data_values=None):
"""Execute a query and return the result set.
"""
return self._execute_stmt(query, identifiers, data_values, True)
def _execute_stmt(self, statement, identifiers, data_values, fetch):
if statement:
with psycopg2.connect(**self._connection_args) as connection:
connection.autocommit = self._autocommit
with connection.cursor() as cursor:
cursor.execute(
self._bind(statement, identifiers), data_values)
if fetch:
return cursor.fetchall()
else:
raise exception.UnprocessableEntity(_("Invalid SQL statement: %s")
% statement)
def _bind(self, statement, identifiers):
if identifiers:
return statement.format(*identifiers)
return statement
class PostgresLocalhostConnection(PostgresConnection):
HOST = 'localhost'
def __init__(self, user, password=None, port=5432, autocommit=False):
super(PostgresLocalhostConnection, self).__init__(
autocommit=autocommit, user=user, password=password,
host=self.HOST, port=port)
# TODO(pmalik): No need to recreate the connection every time.
def psql(statement, timeout=30):
"""Execute a non-returning statement (usually DDL);
Turn autocommit ON (this is necessary for statements that cannot run
within an implicit transaction, like CREATE DATABASE).
"""
return PostgresLocalhostConnection(
PG_ADMIN, autocommit=True).execute(statement)
# TODO(pmalik): No need to recreate the connection every time.
def query(query, timeout=30):
"""Execute a query and return the result set.
"""
return PostgresLocalhostConnection(
PG_ADMIN, autocommit=False).query(query)
class DatabaseQuery(object):

File diff suppressed because it is too large Load Diff

View File

@ -1,84 +0,0 @@
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from trove.common import cfg
from trove.common import exception
from trove.common.i18n import _
from trove.guestagent.datastore.experimental.postgresql import pgutil
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
class PgSqlAccess(object):
"""Mixin implementing the user-access API calls."""
def grant_access(self, context, username, hostname, databases):
"""Give a user permission to use a given database.
The username and hostname parameters are strings.
The databases parameter is a list of strings representing the names of
the databases to grant permission on.
"""
for database in databases:
LOG.info(
_("{guest_id}: Granting user ({user}) access to database "
"({database}).").format(
guest_id=CONF.guest_id,
user=username,
database=database,)
)
pgutil.psql(
pgutil.AccessQuery.grant(
user=username,
database=database,
),
timeout=30,
)
def revoke_access(self, context, username, hostname, database):
"""Revoke a user's permission to use a given database.
The username and hostname parameters are strings.
The database parameter is a string representing the name of the
database.
"""
LOG.info(
_("{guest_id}: Revoking user ({user}) access to database"
"({database}).").format(
guest_id=CONF.guest_id,
user=username,
database=database,)
)
pgutil.psql(
pgutil.AccessQuery.revoke(
user=username,
database=database,
),
timeout=30,
)
def list_access(self, context, username, hostname):
"""List database for which the given user as access.
Return a list of serialized Postgres databases.
"""
user = self._find_user(context, username)
if user is not None:
return user.databases
raise exception.UserNotFound(username)

View File

@ -1,243 +0,0 @@
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from collections import OrderedDict
import os
from oslo_log import log as logging
from trove.common import cfg
from trove.common.i18n import _
from trove.common.stream_codecs import PropertiesCodec
from trove.guestagent.common.configuration import ConfigurationManager
from trove.guestagent.common.configuration import OneFileOverrideStrategy
from trove.guestagent.common import guestagent_utils
from trove.guestagent.common import operating_system
from trove.guestagent.common.operating_system import FileMode
from trove.guestagent.datastore.experimental.postgresql.service.process import(
PgSqlProcess)
from trove.guestagent.datastore.experimental.postgresql.service.status import(
PgSqlAppStatus)
from trove.guestagent.datastore.experimental.postgresql import pgutil
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
BACKUP_CFG_OVERRIDE = 'PgBaseBackupConfig'
DEBUG_MODE_OVERRIDE = 'DebugLevelOverride'
class PgSqlConfig(PgSqlProcess):
"""Mixin that implements the config API.
This mixin has a dependency on the PgSqlProcess mixin.
"""
OS = operating_system.get_os()
CONFIG_BASE = {
operating_system.DEBIAN: '/etc/postgresql/',
operating_system.REDHAT: '/var/lib/postgresql/',
operating_system.SUSE: '/var/lib/pgsql/'}[OS]
LISTEN_ADDRESSES = ['*'] # Listen on all available IP (v4/v6) interfaces.
def __init__(self, *args, **kwargs):
super(PgSqlConfig, self).__init__(*args, **kwargs)
revision_dir = guestagent_utils.build_file_path(
os.path.dirname(self.pgsql_config),
ConfigurationManager.DEFAULT_STRATEGY_OVERRIDES_SUB_DIR)
self._configuration_manager = ConfigurationManager(
self.pgsql_config, self.PGSQL_OWNER, self.PGSQL_OWNER,
PropertiesCodec(
delimiter='=',
string_mappings={'on': True, 'off': False, "''": None}),
requires_root=True,
override_strategy=OneFileOverrideStrategy(revision_dir))
@property
def pgsql_extra_bin_dir(self):
"""Redhat and Ubuntu packages for PgSql do not place 'extra' important
binaries in /usr/bin, but rather in a directory like /usr/pgsql-9.4/bin
in the case of PostgreSQL 9.4 for RHEL/CentOS
"""
version = self.pg_version[1]
return {operating_system.DEBIAN: '/usr/lib/postgresql/%s/bin',
operating_system.REDHAT: '/usr/pgsql-%s/bin',
operating_system.SUSE: '/usr/bin'}[self.OS] % version
@property
def pgsql_config(self):
return self._find_config_file('postgresql.conf')
@property
def pgsql_hba_config(self):
return self._find_config_file('pg_hba.conf')
@property
def pgsql_ident_config(self):
return self._find_config_file('pg_ident.conf')
def _find_config_file(self, name_pattern):
version_base = guestagent_utils.build_file_path(self.CONFIG_BASE,
self.pg_version[1])
return sorted(operating_system.list_files_in_directory(
version_base, recursive=True, pattern=name_pattern,
as_root=True), key=len)[0]
def update_overrides(self, context, overrides, remove=False):
if remove:
self.configuration_manager.remove_user_override()
elif overrides:
self.configuration_manager.apply_user_override(overrides)
def apply_overrides(self, context, overrides):
# Send a signal to the server, causing configuration files to be
# reloaded by all server processes.
# Active queries or connections to the database will not be
# interrupted.
#
# NOTE: Do not use the 'SET' command as it only affects the current
# session.
pgutil.psql("SELECT pg_reload_conf()")
def reset_configuration(self, context, configuration):
"""Reset the PgSql configuration to the one given.
"""
config_contents = configuration['config_contents']
self.configuration_manager.save_configuration(config_contents)
def start_db_with_conf_changes(self, context, config_contents):
"""Starts the PgSql instance with a new configuration."""
if PgSqlAppStatus.get().is_running:
raise RuntimeError(_("The service is still running."))
self.configuration_manager.save_configuration(config_contents)
# The configuration template has to be updated with
# guestagent-controlled settings.
self.apply_initial_guestagent_configuration()
self.start_db(context)
def apply_initial_guestagent_configuration(self):
"""Update guestagent-controlled configuration properties.
"""
LOG.debug("Applying initial guestagent configuration.")
file_locations = {
'data_directory': self._quote(self.pgsql_data_dir),
'hba_file': self._quote(self.pgsql_hba_config),
'ident_file': self._quote(self.pgsql_ident_config),
'external_pid_file': self._quote(self.PID_FILE),
'unix_socket_directories': self._quote(self.UNIX_SOCKET_DIR),
'listen_addresses': self._quote(','.join(self.LISTEN_ADDRESSES)),
'port': CONF.postgresql.postgresql_port}
self.configuration_manager.apply_system_override(file_locations)
self._apply_access_rules()
@staticmethod
def _quote(value):
return "'%s'" % value
def _apply_access_rules(self):
LOG.debug("Applying database access rules.")
# Connections to all resources are granted.
#
# Local access from administrative users is implicitly trusted.
#
# Remote access from the Trove's account is always rejected as
# it is not needed and could be used by malicious users to hijack the
# instance.
#
# Connections from other accounts always require a double-MD5-hashed
# password.
#
# Make the rules readable only by the Postgres service.
#
# NOTE: The order of entries is important.
# The first failure to authenticate stops the lookup.
# That is why the 'local' connections validate first.
# The OrderedDict is necessary to guarantee the iteration order.
access_rules = OrderedDict(
[('local', [['all', 'postgres,os_admin', None, 'trust'],
['all', 'all', None, 'md5'],
['replication', 'postgres,os_admin', None, 'trust']]),
('host', [['all', 'postgres,os_admin', '127.0.0.1/32', 'trust'],
['all', 'postgres,os_admin', '::1/128', 'trust'],
['all', 'postgres,os_admin', 'localhost', 'trust'],
['all', 'os_admin', '0.0.0.0/0', 'reject'],
['all', 'os_admin', '::/0', 'reject'],
['all', 'all', '0.0.0.0/0', 'md5'],
['all', 'all', '::/0', 'md5']])
])
operating_system.write_file(self.pgsql_hba_config, access_rules,
PropertiesCodec(
string_mappings={'\t': None}),
as_root=True)
operating_system.chown(self.pgsql_hba_config,
self.PGSQL_OWNER, self.PGSQL_OWNER,
as_root=True)
operating_system.chmod(self.pgsql_hba_config, FileMode.SET_USR_RO,
as_root=True)
def disable_backups(self):
"""Reverse overrides applied by PgBaseBackup strategy"""
if not self.configuration_manager.has_system_override(
BACKUP_CFG_OVERRIDE):
return
LOG.info(_("Removing configuration changes for backups"))
self.configuration_manager.remove_system_override(BACKUP_CFG_OVERRIDE)
self.remove_wal_archive_dir()
self.restart(context=None)
def enable_backups(self):
"""Apply necessary changes to config to enable WAL-based backups
if we are using the PgBaseBackup strategy
"""
if not CONF.postgresql.backup_strategy == 'PgBaseBackup':
return
if self.configuration_manager.has_system_override(BACKUP_CFG_OVERRIDE):
return
LOG.info(_("Applying changes to WAL config for use by base backups"))
wal_arch_loc = CONF.postgresql.wal_archive_location
if not os.path.isdir(wal_arch_loc):
raise RuntimeError(_("Cannot enable backup as WAL dir '%s' does "
"not exist.") % wal_arch_loc)
arch_cmd = "'test ! -f {wal_arch}/%f && cp %p {wal_arch}/%f'".format(
wal_arch=wal_arch_loc
)
opts = {
'wal_level': 'hot_standby',
'archive_mode ': 'on',
'max_wal_senders': 8,
'checkpoint_segments ': 8,
'wal_keep_segments': 8,
'archive_command': arch_cmd
}
if not self.pg_version[1] in ('9.3'):
opts['wal_log_hints'] = 'on'
self.configuration_manager.apply_system_override(
opts, BACKUP_CFG_OVERRIDE)
self.restart(None)
def disable_debugging(self, level=1):
"""Disable debug-level logging in postgres"""
self.configuration_manager.remove_system_override(DEBUG_MODE_OVERRIDE)
def enable_debugging(self, level=1):
"""Enable debug-level logging in postgres"""
opt = {'log_min_messages': 'DEBUG%s' % level}
self.configuration_manager.apply_system_override(opt,
DEBUG_MODE_OVERRIDE)

View File

@ -1,112 +0,0 @@
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from trove.common import cfg
from trove.common.i18n import _
from trove.common.notification import EndNotification
from trove.guestagent.common import guestagent_utils
from trove.guestagent.datastore.experimental.postgresql import pgutil
from trove.guestagent.db import models
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
class PgSqlDatabase(object):
def __init__(self, *args, **kwargs):
super(PgSqlDatabase, self).__init__(*args, **kwargs)
def create_database(self, context, databases):
"""Create the list of specified databases.
The databases parameter is a list of serialized Postgres databases.
"""
with EndNotification(context):
for database in databases:
self._create_database(
context,
models.PostgreSQLSchema.deserialize_schema(database))
def _create_database(self, context, database):
"""Create a database.
:param database: Database to be created.
:type database: PostgreSQLSchema
"""
LOG.info(
_("{guest_id}: Creating database {name}.").format(
guest_id=CONF.guest_id,
name=database.name,
)
)
pgutil.psql(
pgutil.DatabaseQuery.create(
name=database.name,
encoding=database.character_set,
collation=database.collate,
),
timeout=30,
)
def delete_database(self, context, database):
"""Delete the specified database.
"""
with EndNotification(context):
self._drop_database(
models.PostgreSQLSchema.deserialize_schema(database))
def _drop_database(self, database):
"""Drop a given Postgres database.
:param database: Database to be dropped.
:type database: PostgreSQLSchema
"""
LOG.info(
_("{guest_id}: Dropping database {name}.").format(
guest_id=CONF.guest_id,
name=database.name,
)
)
pgutil.psql(
pgutil.DatabaseQuery.drop(name=database.name),
timeout=30,
)
def list_databases(
self,
context,
limit=None,
marker=None,
include_marker=False,
):
"""List all databases on the instance.
Return a paginated list of serialized Postgres databases.
"""
return guestagent_utils.serialize_list(
self._get_databases(),
limit=limit, marker=marker, include_marker=include_marker)
def _get_databases(self):
"""Return all non-system Postgres databases on the instance."""
results = pgutil.query(
pgutil.DatabaseQuery.list(ignore=cfg.get_ignored_dbs()),
timeout=30,
)
return [models.PostgreSQLSchema(
row[0].strip(), character_set=row[1], collate=row[2])
for row in results]

View File

@ -1,90 +0,0 @@
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from trove.common import cfg
from trove.common.i18n import _
from trove.guestagent.datastore.experimental.postgresql.service.process import(
PgSqlProcess)
from trove.guestagent import pkg
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
class PgSqlInstall(PgSqlProcess):
"""Mixin class that provides a PgSql installer.
This mixin has a dependency on the PgSqlProcess mixin.
"""
def __init__(self, *args, **kwargs):
super(PgSqlInstall, self).__init__(*args, **kwargs)
def install(self, context, packages):
"""Install one or more packages that postgresql needs to run.
The packages parameter is a string representing the package names that
should be given to the system's package manager.
"""
LOG.debug(
"{guest_id}: Beginning PgSql package installation.".format(
guest_id=CONF.guest_id
)
)
PgSqlProcess.recreate_wal_archive_dir()
packager = pkg.Package()
if not packager.pkg_is_installed(packages):
try:
LOG.info(
_("{guest_id}: Installing ({packages}).").format(
guest_id=CONF.guest_id,
packages=packages,
)
)
packager.pkg_install(packages, {}, 1000)
except (pkg.PkgAdminLockError, pkg.PkgPermissionError,
pkg.PkgPackageStateError, pkg.PkgNotFoundError,
pkg.PkgTimeout, pkg.PkgScriptletError,
pkg.PkgDownloadError, pkg.PkgSignError,
pkg.PkgBrokenError):
LOG.exception(
"{guest_id}: There was a package manager error while "
"trying to install ({packages}).".format(
guest_id=CONF.guest_id,
packages=packages,
)
)
raise
except Exception:
LOG.exception(
"{guest_id}: The package manager encountered an unknown "
"error while trying to install ({packages}).".format(
guest_id=CONF.guest_id,
packages=packages,
)
)
raise
else:
self.start_db(context)
LOG.debug(
"{guest_id}: Completed package installation.".format(
guest_id=CONF.guest_id,
)
)

View File

@ -1,125 +0,0 @@
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import re
from trove.common import cfg
from trove.guestagent.common import operating_system
from trove.guestagent.datastore.experimental.postgresql import pgutil
from trove.guestagent.datastore.experimental.postgresql.service.status import (
PgSqlAppStatus)
from trove.guestagent import guest_log
CONF = cfg.CONF
class PgSqlProcess(object):
"""Mixin that manages the PgSql process."""
SERVICE_CANDIDATES = ["postgresql"]
PGSQL_OWNER = 'postgres'
DATA_BASE = '/var/lib/postgresql/'
PID_FILE = '/var/run/postgresql/postgresql.pid'
UNIX_SOCKET_DIR = '/var/run/postgresql/'
@property
def pgsql_data_dir(self):
return os.path.dirname(self.pg_version[0])
@property
def pgsql_recovery_config(self):
return os.path.join(self.pgsql_data_dir, "recovery.conf")
@property
def pg_version(self):
"""Find the database version file stored in the data directory.
:returns: A tuple with the path to the version file
(in the root of the data directory) and the version string.
"""
version_files = operating_system.list_files_in_directory(
self.DATA_BASE, recursive=True, pattern='PG_VERSION', as_root=True)
version_file = sorted(version_files, key=len)[0]
version = operating_system.read_file(version_file, as_root=True)
return version_file, version.strip()
def restart(self, context):
PgSqlAppStatus.get().restart_db_service(
self.SERVICE_CANDIDATES, CONF.state_change_wait_time)
self.set_guest_log_status(guest_log.LogStatus.Restart_Completed)
def start_db(self, context, enable_on_boot=True, update_db=False):
PgSqlAppStatus.get().start_db_service(
self.SERVICE_CANDIDATES, CONF.state_change_wait_time,
enable_on_boot=enable_on_boot, update_db=update_db)
def stop_db(self, context, do_not_start_on_reboot=False, update_db=False):
PgSqlAppStatus.get().stop_db_service(
self.SERVICE_CANDIDATES, CONF.state_change_wait_time,
disable_on_boot=do_not_start_on_reboot, update_db=update_db)
def pg_checkpoint(self):
"""Wrapper for CHECKPOINT call"""
pgutil.psql("CHECKPOINT")
def pg_current_xlog_location(self):
"""Wrapper for pg_current_xlog_location()
Cannot be used against a running slave
"""
r = pgutil.query("SELECT pg_current_xlog_location()")
return r[0][0]
def pg_last_xlog_replay_location(self):
"""Wrapper for pg_last_xlog_replay_location()
For use on standby servers
"""
r = pgutil.query("SELECT pg_last_xlog_replay_location()")
return r[0][0]
def pg_is_in_recovery(self):
"""Wrapper for pg_is_in_recovery() for detecting a server in
standby mode
"""
r = pgutil.query("SELECT pg_is_in_recovery()")
return r[0][0]
def pg_primary_host(self):
"""There seems to be no way to programmatically determine this
on a hot standby, so grab what we have written to the recovery
file
"""
r = operating_system.read_file(self.pgsql_recovery_config,
as_root=True)
regexp = re.compile("host=(\d+.\d+.\d+.\d+) ")
m = regexp.search(r)
return m.group(1)
@classmethod
def recreate_wal_archive_dir(cls):
wal_archive_dir = CONF.postgresql.wal_archive_location
operating_system.remove(wal_archive_dir, force=True, recursive=True,
as_root=True)
operating_system.create_directory(wal_archive_dir,
user=cls.PGSQL_OWNER,
group=cls.PGSQL_OWNER,
force=True, as_root=True)
@classmethod
def remove_wal_archive_dir(cls):
wal_archive_dir = CONF.postgresql.wal_archive_location
operating_system.remove(wal_archive_dir, force=True, recursive=True,
as_root=True)

View File

@ -1,90 +0,0 @@
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from trove.guestagent.datastore.experimental.postgresql import pgutil
from trove.guestagent.datastore.experimental.postgresql.service.users import (
PgSqlUsers)
from trove.guestagent.db import models
class PgSqlRoot(PgSqlUsers):
"""Mixin that provides the root-enable API."""
def __init__(self, *args, **kwargs):
super(PgSqlRoot, self).__init__(*args, **kwargs)
def is_root_enabled(self, context):
"""Return True if there is a superuser account enabled.
"""
results = pgutil.query(
pgutil.UserQuery.list_root(),
timeout=30,
)
# There should be only one superuser (Trove's administrative account).
return len(results) > 1 or (results[0][0] != self.ADMIN_USER)
# TODO(pmalik): For future use by 'root-disable'.
# def disable_root(self, context):
# """Generate a new random password for the public superuser account.
# Do not disable its access rights. Once enabled the account should
# stay that way.
# """
# self.enable_root(context)
def enable_root(self, context, root_password=None):
"""Create a superuser user or reset the superuser password.
The default PostgreSQL administration account is 'postgres'.
This account always exists and cannot be removed.
Its attributes and access can however be altered.
Clients can connect from the localhost or remotely via TCP/IP:
Local clients (e.g. psql) can connect from a preset *system* account
called 'postgres'.
This system account has no password and is *locked* by default,
so that it can be used by *local* users only.
It should *never* be enabled (or its password set)!!!
That would just open up a new attack vector on the system account.
Remote clients should use a build-in *database* account of the same
name. It's password can be changed using the "ALTER USER" statement.
Access to this account is disabled by Trove exposed only once the
superuser access is requested.
Trove itself creates its own administrative account.
{"_name": "postgres", "_password": "<secret>"}
"""
user = models.PostgreSQLRootUser(password=root_password)
query = pgutil.UserQuery.alter_user(
user.name,
user.password,
None,
*self.ADMIN_OPTIONS
)
pgutil.psql(query, timeout=30)
return user.serialize()
def disable_root(self, context):
"""Generate a new random password for the public superuser account.
Do not disable its access rights. Once enabled the account should
stay that way.
"""
self.enable_root(context)
def enable_root_with_password(self, context, root_password=None):
return self.enable_root(context, root_password)

View File

@ -1,49 +0,0 @@
# Copyright (c) 2014 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
import psycopg2
from trove.common.i18n import _
from trove.common import instance
from trove.common import utils
from trove.guestagent.datastore.experimental.postgresql import pgutil
from trove.guestagent.datastore import service
LOG = logging.getLogger(__name__)
class PgSqlAppStatus(service.BaseDbStatus):
@classmethod
def get(cls):
if not cls._instance:
cls._instance = PgSqlAppStatus()
return cls._instance
def _get_actual_db_status(self):
try:
# Any query will initiate a new database connection.
pgutil.psql("SELECT 1")
return instance.ServiceStatuses.RUNNING
except psycopg2.OperationalError:
return instance.ServiceStatuses.SHUTDOWN
except utils.Timeout:
return instance.ServiceStatuses.BLOCKED
except Exception:
LOG.exception(_("Error getting Postgres status."))
return instance.ServiceStatuses.CRASHED
return instance.ServiceStatuses.SHUTDOWN

View File

@ -1,316 +0,0 @@
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from trove.common import cfg
from trove.common import exception
from trove.common.i18n import _
from trove.common.notification import EndNotification
from trove.common import utils
from trove.guestagent.common import guestagent_utils
from trove.guestagent.datastore.experimental.postgresql import pgutil
from trove.guestagent.datastore.experimental.postgresql.service.access import (
PgSqlAccess)
from trove.guestagent.db import models
from trove.guestagent.db.models import PostgreSQLSchema
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
class PgSqlUsers(PgSqlAccess):
"""Mixin implementing the user CRUD API.
This mixin has a dependency on the PgSqlAccess mixin.
"""
@property
def ADMIN_USER(self):
"""Trove's administrative user."""
return 'os_admin'
@property
def ADMIN_OPTIONS(self):
"""Default set of options of an administrative account."""
return [
'SUPERUSER',
'CREATEDB',
'CREATEROLE',
'INHERIT',
'REPLICATION',
'LOGIN']
def _create_admin_user(self, context, databases=None):
"""Create an administrative user for Trove.
Force password encryption.
"""
password = utils.generate_random_password()
os_admin = models.PostgreSQLUser(self.ADMIN_USER, password)
if databases:
os_admin.databases.extend([db.serialize() for db in databases])
self._create_user(context, os_admin, True, *self.ADMIN_OPTIONS)
def create_user(self, context, users):
"""Create users and grant privileges for the specified databases.
The users parameter is a list of serialized Postgres users.
"""
with EndNotification(context):
for user in users:
self._create_user(
context,
models.PostgreSQLUser.deserialize_user(user), None)
def _create_user(self, context, user, encrypt_password=None, *options):
"""Create a user and grant privileges for the specified databases.
:param user: User to be created.
:type user: PostgreSQLUser
:param encrypt_password: Store passwords encrypted if True.
Fallback to configured default
behavior if None.
:type encrypt_password: boolean
:param options: Other user options.
:type options: list
"""
LOG.info(
_("{guest_id}: Creating user {user} {with_clause}.")
.format(
guest_id=CONF.guest_id,
user=user.name,
with_clause=pgutil.UserQuery._build_with_clause(
'<SANITIZED>',
encrypt_password,
*options
),
)
)
pgutil.psql(
pgutil.UserQuery.create(
user.name,
user.password,
encrypt_password,
*options
),
timeout=30,
)
self._grant_access(
context, user.name,
[PostgreSQLSchema.deserialize_schema(db) for db in user.databases])
def _grant_access(self, context, username, databases):
self.grant_access(
context,
username,
None,
[db.name for db in databases],
)
def list_users(
self,
context,
limit=None,
marker=None,
include_marker=False,
):
"""List all users on the instance along with their access permissions.
Return a paginated list of serialized Postgres users.
"""
return guestagent_utils.serialize_list(
self._get_users(context),
limit=limit, marker=marker, include_marker=include_marker)
def _get_users(self, context):
"""Return all non-system Postgres users on the instance."""
results = pgutil.query(
pgutil.UserQuery.list(ignore=cfg.get_ignored_users()),
timeout=30,
)
names = set([row[0].strip() for row in results])
return [self._build_user(context, name, results) for name in names]
def _build_user(self, context, username, acl=None):
"""Build a model representation of a Postgres user.
Include all databases it has access to.
"""
user = models.PostgreSQLUser(username)
if acl:
dbs = [models.PostgreSQLSchema(row[1].strip(),
character_set=row[2],
collate=row[3])
for row in acl if row[0] == username and row[1] is not None]
for d in dbs:
user.databases.append(d.serialize())
return user
def delete_user(self, context, user):
"""Delete the specified user.
"""
with EndNotification(context):
self._drop_user(
context, models.PostgreSQLUser.deserialize_user(user))
def _drop_user(self, context, user):
"""Drop a given Postgres user.
:param user: User to be dropped.
:type user: PostgreSQLUser
"""
# Postgresql requires that you revoke grants before dropping the user
dbs = self.list_access(context, user.name, None)
for d in dbs:
db = models.PostgreSQLSchema.deserialize_schema(d)
self.revoke_access(context, user.name, None, db.name)
LOG.info(
_("{guest_id}: Dropping user {name}.").format(
guest_id=CONF.guest_id,
name=user.name,
)
)
pgutil.psql(
pgutil.UserQuery.drop(name=user.name),
timeout=30,
)
def get_user(self, context, username, hostname):
"""Return a serialized representation of a user with a given name.
"""
user = self._find_user(context, username)
return user.serialize() if user is not None else None
def _find_user(self, context, username):
"""Lookup a user with a given username.
Return a new Postgres user instance or None if no match is found.
"""
results = pgutil.query(
pgutil.UserQuery.get(name=username),
timeout=30,
)
if results:
return self._build_user(context, username, results)
return None
def user_exists(self, username):
"""Return whether a given user exists on the instance."""
results = pgutil.query(
pgutil.UserQuery.get(name=username),
timeout=30,
)
return bool(results)
def change_passwords(self, context, users):
"""Change the passwords of one or more existing users.
The users parameter is a list of serialized Postgres users.
"""
with EndNotification(context):
for user in users:
self.alter_user(
context,
models.PostgreSQLUser.deserialize_user(user), None)
def alter_user(self, context, user, encrypt_password=None, *options):
"""Change the password and options of an existing users.
:param user: User to be altered.
:type user: PostgreSQLUser
:param encrypt_password: Store passwords encrypted if True.
Fallback to configured default
behavior if None.
:type encrypt_password: boolean
:param options: Other user options.
:type options: list
"""
LOG.info(
_("{guest_id}: Altering user {user} {with_clause}.")
.format(
guest_id=CONF.guest_id,
user=user.name,
with_clause=pgutil.UserQuery._build_with_clause(
'<SANITIZED>',
encrypt_password,
*options
),
)
)
pgutil.psql(
pgutil.UserQuery.alter_user(
user.name,
user.password,
encrypt_password,
*options),
timeout=30,
)
def update_attributes(self, context, username, hostname, user_attrs):
"""Change the attributes of one existing user.
The username and hostname parameters are strings.
The user_attrs parameter is a dictionary in the following form:
{"password": "", "name": ""}
Each key/value pair in user_attrs is optional.
"""
with EndNotification(context):
user = self._build_user(context, username)
new_username = user_attrs.get('name')
new_password = user_attrs.get('password')
if new_username is not None:
self._rename_user(context, user, new_username)
# Make sure we can retrieve the renamed user.
user = self._find_user(context, new_username)
if user is None:
raise exception.TroveError(_(
"Renamed user %s could not be found on the instance.")
% new_username)
if new_password is not None:
user.password = new_password
self.alter_user(context, user)
def _rename_user(self, context, user, new_username):
"""Rename a given Postgres user and transfer all access to the
new name.
:param user: User to be renamed.
:type user: PostgreSQLUser
"""
LOG.info(
_("{guest_id}: Changing username for {old} to {new}.").format(
guest_id=CONF.guest_id,
old=user.name,
new=new_username,
)
)
# PostgreSQL handles the permission transfer itself.
pgutil.psql(
pgutil.UserQuery.update_name(
old=user.name,
new=new_username,
),
timeout=30,
)

View File

@ -25,13 +25,7 @@ from trove.common.i18n import _
from trove.common import utils
from trove.guestagent.common import operating_system
from trove.guestagent.common.operating_system import FileMode
from trove.guestagent.datastore.experimental.postgresql import pgutil
from trove.guestagent.datastore.experimental.postgresql.service.config import(
PgSqlConfig)
from trove.guestagent.datastore.experimental.postgresql.service.process import(
PgSqlProcess)
from trove.guestagent.datastore.experimental.postgresql.service.users import(
PgSqlUsers)
from trove.guestagent.datastore.experimental.postgresql.service import PgSqlApp
from trove.guestagent.strategies.backup import base
CONF = cfg.CONF
@ -85,18 +79,8 @@ class PgBaseBackupUtil(object):
if walre.search(wal_file) and wal_file >= last_wal]
return wal_files
@staticmethod
def recreate_wal_archive_dir():
operating_system.remove(WAL_ARCHIVE_DIR, force=True, recursive=True,
as_root=True)
operating_system.create_directory(WAL_ARCHIVE_DIR,
user=PgSqlProcess.PGSQL_OWNER,
group=PgSqlProcess.PGSQL_OWNER,
force=True, as_root=True)
class PgBaseBackup(base.BackupRunner, PgSqlConfig, PgBaseBackupUtil,
PgSqlUsers):
class PgBaseBackup(base.BackupRunner, PgBaseBackupUtil):
"""Base backups are taken with the pg_basebackup filesystem-level backup
tool pg_basebackup creates a copy of the binary files in the PostgreSQL
cluster data directory and enough WAL segments to allow the database to
@ -107,6 +91,7 @@ class PgBaseBackup(base.BackupRunner, PgSqlConfig, PgBaseBackupUtil,
__strategy_name__ = 'pg_basebackup'
def __init__(self, *args, **kwargs):
self._app = None
super(PgBaseBackup, self).__init__(*args, **kwargs)
self.label = None
self.stop_segment = None
@ -116,11 +101,21 @@ class PgBaseBackup(base.BackupRunner, PgSqlConfig, PgBaseBackupUtil,
self.checkpoint_location = None
self.mrb = None
@property
def app(self):
if self._app is None:
self._app = self._build_app()
return self._app
def _build_app(self):
return PgSqlApp()
@property
def cmd(self):
cmd = ("pg_basebackup -h %s -U %s --pgdata=-"
" --label=%s --format=tar --xlog " %
(self.UNIX_SOCKET_DIR, self.ADMIN_USER, self.base_filename))
(self.app.pgsql_run_dir, self.app.ADMIN_USER,
self.base_filename))
return cmd + self.zip_cmd + self.encrypt_cmd
@ -208,11 +203,11 @@ class PgBaseBackup(base.BackupRunner, PgSqlConfig, PgBaseBackupUtil,
def _run_post_backup(self):
"""Get rid of WAL data we don't need any longer"""
arch_cleanup_bin = os.path.join(self.pgsql_extra_bin_dir,
arch_cleanup_bin = os.path.join(self.app.pgsql_extra_bin_dir,
"pg_archivecleanup")
bk_file = os.path.basename(self.most_recent_backup_file())
cmd_full = " ".join((arch_cleanup_bin, WAL_ARCHIVE_DIR, bk_file))
utils.execute("sudo", "su", "-", self.PGSQL_OWNER, "-c",
utils.execute("sudo", "su", "-", self.app.pgsql_owner, "-c",
"%s" % cmd_full)
@ -233,16 +228,11 @@ class PgBaseBackupIncremental(PgBaseBackup):
def _run_pre_backup(self):
self.backup_label = self.base_filename
result = pgutil.query("SELECT pg_start_backup('%s', true)" %
self.backup_label)
self.start_segment = result[0][0]
self.start_segment = self.app.pg_start_backup(self.backup_label)
result = pgutil.query("SELECT pg_xlogfile_name('%s')" %
self.start_segment)
self.start_wal_file = result[0][0]
self.start_wal_file = self.app.pg_xlogfile_name(self.start_segment)
result = pgutil.query("SELECT pg_stop_backup()")
self.stop_segment = result[0][0]
self.stop_segment = self.app.pg_stop_backup()
# We have to hack this because self.command is
# initialized in the base class before we get here, which is

View File

@ -26,17 +26,6 @@ from trove.common import utils
from trove.guestagent.backup.backupagent import BackupAgent
from trove.guestagent.common import operating_system
from trove.guestagent.common.operating_system import FileMode
from trove.guestagent.datastore.experimental.postgresql import pgutil
from trove.guestagent.datastore.experimental.postgresql\
.service.config import PgSqlConfig
from trove.guestagent.datastore.experimental.postgresql\
.service.database import PgSqlDatabase
from trove.guestagent.datastore.experimental.postgresql\
.service.install import PgSqlInstall
from trove.guestagent.datastore.experimental.postgresql \
.service.process import PgSqlProcess
from trove.guestagent.datastore.experimental.postgresql\
.service.root import PgSqlRoot
from trove.guestagent.db import models
from trove.guestagent.strategies import backup
from trove.guestagent.strategies.replication import base
@ -46,13 +35,6 @@ CONF = cfg.CONF
REPL_BACKUP_NAMESPACE = 'trove.guestagent.strategies.backup.experimental' \
'.postgresql_impl'
REPL_BACKUP_STRATEGY = 'PgBaseBackup'
REPL_BACKUP_INCREMENTAL_STRATEGY = 'PgBaseBackupIncremental'
REPL_BACKUP_RUNNER = backup.get_backup_strategy(
REPL_BACKUP_STRATEGY, REPL_BACKUP_NAMESPACE)
REPL_BACKUP_INCREMENTAL_RUNNER = backup.get_backup_strategy(
REPL_BACKUP_INCREMENTAL_STRATEGY, REPL_BACKUP_NAMESPACE)
REPL_EXTRA_OPTS = CONF.backup_runner_options.get(REPL_BACKUP_STRATEGY, '')
LOG = logging.getLogger(__name__)
@ -61,21 +43,29 @@ REPL_USER = 'replicator'
SLAVE_STANDBY_OVERRIDE = 'SlaveStandbyOverride'
class PostgresqlReplicationStreaming(
base.Replication,
PgSqlConfig,
PgSqlDatabase,
PgSqlRoot,
PgSqlInstall,
):
class PostgresqlReplicationStreaming(base.Replication):
def __init__(self, *args, **kwargs):
super(PostgresqlReplicationStreaming, self).__init__(*args, **kwargs)
@property
def repl_backup_runner(self):
return backup.get_backup_strategy('PgBaseBackup',
REPL_BACKUP_NAMESPACE)
@property
def repl_incr_backup_runner(self):
return backup.get_backup_strategy('PgBaseBackupIncremental',
REPL_BACKUP_NAMESPACE)
@property
def repl_backup_extra_opts(self):
return CONF.backup_runner_options.get('PgBaseBackup', '')
def get_master_ref(self, service, snapshot_info):
master_ref = {
'host': netutils.get_my_ipv4(),
'port': CONF.postgresql.postgresql_port
'port': cfg.get_configuration_property('postgresql_port')
}
return master_ref
@ -92,13 +82,13 @@ class PostgresqlReplicationStreaming(
# Only create a backup if it's the first replica
if replica_number == 1:
AGENT.execute_backup(
context, snapshot_info, runner=REPL_BACKUP_RUNNER,
extra_opts=REPL_EXTRA_OPTS,
incremental_runner=REPL_BACKUP_INCREMENTAL_RUNNER)
context, snapshot_info, runner=self.repl_backup_runner,
extra_opts=self.repl_backup_extra_opts,
incremental_runner=self.repl_incr_backup_runner)
else:
LOG.info(_("Using existing backup created for previous replica."))
repl_user_info = self._get_or_create_replication_user()
repl_user_info = self._get_or_create_replication_user(service)
log_position = {
'replication_user': repl_user_info
@ -106,25 +96,31 @@ class PostgresqlReplicationStreaming(
return snapshot_id, log_position
def _get_or_create_replication_user(self):
# There are three scenarios we need to deal with here:
# - This is a fresh master, with no replicator user created.
# Generate a new u/p
# - We are attaching a new slave and need to give it the login creds
# Send the creds we have stored in PGDATA/.replpass
# - This is a failed-over-to slave, who will have the replicator user
# but not the credentials file. Recreate the repl user in this case
def _get_or_create_replication_user(self, service):
"""There are three scenarios we need to deal with here:
- This is a fresh master, with no replicator user created.
Generate a new u/p
- We are attaching a new slave and need to give it the login creds
Send the creds we have stored in PGDATA/.replpass
- This is a failed-over-to slave, who will have the replicator user
but not the credentials file. Recreate the repl user in this case
"""
pwfile = os.path.join(self.pgsql_data_dir, ".replpass")
if self.user_exists(REPL_USER):
LOG.debug("Checking for replicator user")
pwfile = os.path.join(service.pgsql_data_dir, ".replpass")
admin = service.build_admin()
if admin.user_exists(REPL_USER):
if operating_system.exists(pwfile, as_root=True):
LOG.debug("Found existing .replpass, returning pw")
pw = operating_system.read_file(pwfile, as_root=True)
else:
LOG.debug("Found user but not .replpass, recreate")
u = models.PostgreSQLUser(REPL_USER)
self._drop_user(context=None, user=u)
pw = self._create_replication_user(pwfile)
admin._drop_user(context=None, user=u)
pw = self._create_replication_user(service, admin, pwfile)
else:
pw = self._create_replication_user(pwfile)
LOG.debug("Found no replicator user, create one")
pw = self._create_replication_user(service, admin, pwfile)
repl_user_info = {
'name': REPL_USER,
@ -133,64 +129,69 @@ class PostgresqlReplicationStreaming(
return repl_user_info
def _create_replication_user(self, pwfile):
def _create_replication_user(self, service, admin, pwfile):
"""Create the replication user. Unfortunately, to be able to
run pg_rewind, we need SUPERUSER, not just REPLICATION privilege
"""
pw = utils.generate_random_password()
operating_system.write_file(pwfile, pw, as_root=True)
operating_system.chown(pwfile, user=self.PGSQL_OWNER,
group=self.PGSQL_OWNER, as_root=True)
operating_system.chown(pwfile, user=service.pgsql_owner,
group=service.pgsql_owner, as_root=True)
operating_system.chmod(pwfile, FileMode.SET_USR_RWX(),
as_root=True)
pgutil.psql("CREATE USER %s SUPERUSER ENCRYPTED "
"password '%s';" % (REPL_USER, pw))
repl_user = models.PostgreSQLUser(name=REPL_USER, password=pw)
admin._create_user(context=None, user=repl_user)
admin.alter_user(None, repl_user, True, 'REPLICATION', 'LOGIN')
return pw
def enable_as_master(self, service, master_config, for_failover=False):
# For a server to be a master in postgres, we need to enable
# replication user in pg_hba and ensure that WAL logging is
# the appropriate level (use the same settings as backups)
self._get_or_create_replication_user()
"""For a server to be a master in postgres, we need to enable
the replication user in pg_hba and ensure that WAL logging is
at the appropriate level (use the same settings as backups)
"""
LOG.debug("Enabling as master, with cfg: %s " % master_config)
self._get_or_create_replication_user(service)
hba_entry = "host replication replicator 0.0.0.0/0 md5 \n"
tmp_hba = '/tmp/pg_hba'
operating_system.copy(self.pgsql_hba_config, tmp_hba,
operating_system.copy(service.pgsql_hba_config, tmp_hba,
force=True, as_root=True)
operating_system.chmod(tmp_hba, FileMode.SET_ALL_RWX(),
as_root=True)
with open(tmp_hba, 'a+') as hba_file:
hba_file.write(hba_entry)
operating_system.copy(tmp_hba, self.pgsql_hba_config,
operating_system.copy(tmp_hba, service.pgsql_hba_config,
force=True, as_root=True)
operating_system.chmod(self.pgsql_hba_config,
operating_system.chmod(service.pgsql_hba_config,
FileMode.SET_USR_RWX(),
as_root=True)
operating_system.remove(tmp_hba, as_root=True)
pgutil.psql("SELECT pg_reload_conf()")
service.reload_configuration()
def enable_as_slave(self, service, snapshot, slave_config):
"""Adds appropriate config options to postgresql.conf, and writes out
the recovery.conf file used to set up replication
"""
self._write_standby_recovery_file(snapshot, sslmode='prefer')
LOG.debug("Got slave_config: %s" % str(slave_config))
self._write_standby_recovery_file(service, snapshot, sslmode='prefer')
self.enable_hot_standby(service)
# Ensure the WAL arch is empty before restoring
PgSqlProcess.recreate_wal_archive_dir()
service.recreate_wal_archive_dir()
def detach_slave(self, service, for_failover):
"""Touch trigger file in to disable recovery mode"""
LOG.info(_("Detaching slave, use trigger to disable recovery mode"))
operating_system.write_file(TRIGGER_FILE, '')
operating_system.chown(TRIGGER_FILE, user=self.PGSQL_OWNER,
group=self.PGSQL_OWNER, as_root=True)
operating_system.chown(TRIGGER_FILE, user=service.pgsql_owner,
group=service.pgsql_owner, as_root=True)
def _wait_for_failover():
# Wait until slave has switched out of recovery mode
return not self.pg_is_in_recovery()
"""Wait until slave has switched out of recovery mode"""
return not service.pg_is_in_recovery()
try:
utils.poll_until(_wait_for_failover, time_out=120)
@ -214,15 +215,15 @@ class PostgresqlReplicationStreaming(
# The recovery.conf file we want should already be there, but pg_rewind
# will delete it, so copy it out first
rec = self.pgsql_recovery_config
rec = service.pgsql_recovery_config
tmprec = "/tmp/recovery.conf.bak"
operating_system.move(rec, tmprec, as_root=True)
cmd_full = " ".join(["pg_rewind", "-D", service.pgsql_data_dir,
'--source-pgdata=' + service.pgsql_data_dir,
'--source-server=' + conninfo])
out, err = utils.execute("sudo", "su", "-", self.PGSQL_OWNER, "-c",
"%s" % cmd_full, check_exit_code=0)
out, err = utils.execute("sudo", "su", "-", service.pgsql_owner,
"-c", "%s" % cmd_full, check_exit_code=0)
LOG.debug("Got stdout %s and stderr %s from pg_rewind" %
(str(out), str(err)))
@ -233,23 +234,26 @@ class PostgresqlReplicationStreaming(
pg_rewind against the new master to enable a proper timeline
switch.
"""
self.pg_checkpoint()
self.stop_db(context=None)
service.stop_db()
self._rewind_against_master(service)
self.start_db(context=None)
service.start_db()
def connect_to_master(self, service, snapshot):
# All that is required in postgresql to connect to a slave is to
# restart with a recovery.conf file in the data dir, which contains
# the connection information for the master.
assert operating_system.exists(self.pgsql_recovery_config,
"""All that is required in postgresql to connect to a slave is to
restart with a recovery.conf file in the data dir, which contains
the connection information for the master.
"""
assert operating_system.exists(service.pgsql_recovery_config,
as_root=True)
self.restart(context=None)
service.restart()
def _remove_recovery_file(self):
operating_system.remove(self.pgsql_recovery_config, as_root=True)
def _remove_recovery_file(self, service):
operating_system.remove(service.pgsql_recovery_config, as_root=True)
def _write_standby_recovery_file(self, service, snapshot,
sslmode='prefer'):
LOG.info("Snapshot data received:" + str(snapshot))
def _write_standby_recovery_file(self, snapshot, sslmode='prefer'):
logging_config = snapshot['log_position']
conninfo_params = \
{'host': snapshot['master']['host'],
@ -270,24 +274,27 @@ class PostgresqlReplicationStreaming(
recovery_conf += "trigger_file = '/tmp/postgresql.trigger'\n"
recovery_conf += "recovery_target_timeline='latest'\n"
operating_system.write_file(self.pgsql_recovery_config, recovery_conf,
operating_system.write_file(service.pgsql_recovery_config,
recovery_conf,
codec=stream_codecs.IdentityCodec(),
as_root=True)
operating_system.chown(self.pgsql_recovery_config, user="postgres",
group="postgres", as_root=True)
operating_system.chown(service.pgsql_recovery_config,
user=service.pgsql_owner,
group=service.pgsql_owner, as_root=True)
def enable_hot_standby(self, service):
opts = {'hot_standby': 'on',
'wal_level': 'hot_standby'}
# wal_log_hints for pg_rewind is only supported in 9.4+
if self.pg_version[1] in ('9.4', '9.5'):
if service.pg_version[1] in ('9.4', '9.5'):
opts['wal_log_hints'] = 'on'
service.configuration_manager.\
apply_system_override(opts, SLAVE_STANDBY_OVERRIDE)
def get_replica_context(self, service):
repl_user_info = self._get_or_create_replication_user()
LOG.debug("Calling get_replica_context")
repl_user_info = self._get_or_create_replication_user(service)
log_position = {
'replication_user': repl_user_info

View File

@ -24,10 +24,7 @@ from trove.common.i18n import _
from trove.common import stream_codecs
from trove.guestagent.common import operating_system
from trove.guestagent.common.operating_system import FileMode
from trove.guestagent.datastore.experimental.postgresql.service.config import(
PgSqlConfig)
from trove.guestagent.datastore.experimental.postgresql.service.process import(
PgSqlProcess)
from trove.guestagent.datastore.experimental.postgresql.service import PgSqlApp
from trove.guestagent.strategies.restore import base
CONF = cfg.CONF
@ -93,7 +90,7 @@ class PgDump(base.RestoreRunner):
pass
class PgBaseBackup(base.RestoreRunner, PgSqlConfig):
class PgBaseBackup(base.RestoreRunner):
"""Implementation of Restore Strategy for pg_basebackup."""
__strategy_name__ = 'pg_basebackup'
location = ""
@ -104,24 +101,35 @@ class PgBaseBackup(base.RestoreRunner, PgSqlConfig):
]
def __init__(self, *args, **kwargs):
self._app = None
self.base_restore_cmd = 'sudo -u %s tar xCf %s - ' % (
self.PGSQL_OWNER, self.pgsql_data_dir
self.app.pgsql_owner, self.app.pgsql_data_dir
)
super(PgBaseBackup, self).__init__(*args, **kwargs)
@property
def app(self):
if self._app is None:
self._app = self._build_app()
return self._app
def _build_app(self):
return PgSqlApp()
def pre_restore(self):
self.stop_db(context=None)
PgSqlProcess.recreate_wal_archive_dir()
datadir = self.pgsql_data_dir
self.app.stop_db()
LOG.info("Preparing WAL archive dir")
self.app.recreate_wal_archive_dir()
datadir = self.app.pgsql_data_dir
operating_system.remove(datadir, force=True, recursive=True,
as_root=True)
operating_system.create_directory(datadir, user=self.PGSQL_OWNER,
group=self.PGSQL_OWNER, force=True,
as_root=True)
operating_system.create_directory(datadir, user=self.app.pgsql_owner,
group=self.app.pgsql_owner,
force=True, as_root=True)
def post_restore(self):
operating_system.chmod(self.pgsql_data_dir,
operating_system.chmod(self.app.pgsql_data_dir,
FileMode.SET_USR_RWX(),
as_root=True, recursive=True, force=True)
@ -135,12 +143,12 @@ class PgBaseBackup(base.RestoreRunner, PgSqlConfig):
recovery_conf += ("restore_command = '" +
self.pgsql_restore_cmd + "'\n")
recovery_file = os.path.join(self.pgsql_data_dir, 'recovery.conf')
recovery_file = os.path.join(self.app.pgsql_data_dir, 'recovery.conf')
operating_system.write_file(recovery_file, recovery_conf,
codec=stream_codecs.IdentityCodec(),
as_root=True)
operating_system.chown(recovery_file, user=self.PGSQL_OWNER,
group=self.PGSQL_OWNER, as_root=True)
operating_system.chown(recovery_file, user=self.app.pgsql_owner,
group=self.app.pgsql_owner, as_root=True)
class PgBaseBackupIncremental(PgBaseBackup):
@ -149,12 +157,12 @@ class PgBaseBackupIncremental(PgBaseBackup):
super(PgBaseBackupIncremental, self).__init__(*args, **kwargs)
self.content_length = 0
self.incr_restore_cmd = 'sudo -u %s tar -xf - -C %s ' % (
self.PGSQL_OWNER, WAL_ARCHIVE_DIR
self.app.pgsql_owner, WAL_ARCHIVE_DIR
)
self.pgsql_restore_cmd = "cp " + WAL_ARCHIVE_DIR + '/%f "%p"'
def pre_restore(self):
self.stop_db(context=None)
self.app.stop_db()
def post_restore(self):
self.write_recovery_file(restore=True)
@ -185,7 +193,7 @@ class PgBaseBackupIncremental(PgBaseBackup):
cmd = self._incremental_restore_cmd(incr=False)
self.content_length += self._unpack(location, checksum, cmd)
operating_system.chmod(self.pgsql_data_dir,
operating_system.chmod(self.app.pgsql_data_dir,
FileMode.SET_USR_RWX(),
as_root=True, recursive=True, force=True)

View File

@ -18,9 +18,9 @@ from trove.tests.scenario.helpers.sql_helper import SqlHelper
class PostgresqlHelper(SqlHelper):
def __init__(self, expected_override_name, report):
def __init__(self, expected_override_name, report, port=5432):
super(PostgresqlHelper, self).__init__(expected_override_name, report,
'postgresql')
'postgresql', port=port)
@property
def test_schema(self):

View File

@ -57,11 +57,7 @@ from trove.guestagent.datastore.experimental.mongodb import (
from trove.guestagent.datastore.experimental.mongodb import (
system as mongo_system)
from trove.guestagent.datastore.experimental.postgresql import (
manager as pg_manager)
from trove.guestagent.datastore.experimental.postgresql.service import (
config as pg_config)
from trove.guestagent.datastore.experimental.postgresql.service import (
status as pg_status)
service as pg_service)
from trove.guestagent.datastore.experimental.pxc import (
service as pxc_service)
from trove.guestagent.datastore.experimental.redis import service as rservice
@ -296,6 +292,7 @@ class BaseAppTest(object):
super(BaseAppTest.AppTestCase, self).setUp()
self.patch_datastore_manager(manager_name)
self.FAKE_ID = fake_id
util.init_db()
InstanceServiceStatus.create(
instance_id=self.FAKE_ID,
status=rd_instance.ServiceStatuses.NEW)
@ -3705,35 +3702,19 @@ class MariaDBAppTest(trove_testtools.TestCase):
class PostgresAppTest(BaseAppTest.AppTestCase):
class FakePostgresApp(pg_manager.Manager):
"""Postgresql design is currently different than other datastores.
It does not have an App class, only the Manager, so we fake one.
The fake App just passes the calls onto the Postgres manager.
"""
def restart(self):
super(PostgresAppTest.FakePostgresApp, self).restart(Mock())
def start_db(self):
super(PostgresAppTest.FakePostgresApp, self).start_db(Mock())
def stop_db(self):
super(PostgresAppTest.FakePostgresApp, self).stop_db(Mock())
@patch.object(pg_config.PgSqlConfig, '_find_config_file', return_value='')
def setUp(self, _):
@patch.object(utils, 'execute_with_timeout', return_value=('0', ''))
@patch.object(pg_service.PgSqlApp, '_find_config_file', return_value='')
@patch.object(pg_service.PgSqlApp,
'pgsql_extra_bin_dir', PropertyMock(return_value=''))
def setUp(self, mock_cfg, mock_exec):
super(PostgresAppTest, self).setUp(str(uuid4()), 'postgresql')
self.orig_time_sleep = time.sleep
self.orig_time_time = time.time
time.sleep = Mock()
time.time = Mock(side_effect=faketime)
status = FakeAppStatus(self.FAKE_ID,
rd_instance.ServiceStatuses.NEW)
self.pg_status_patcher = patch.object(pg_status.PgSqlAppStatus, 'get',
return_value=status)
self.addCleanup(self.pg_status_patcher.stop)
self.pg_status_patcher.start()
self.postgres = PostgresAppTest.FakePostgresApp()
self.postgres = pg_service.PgSqlApp()
self.postgres.status = FakeAppStatus(self.FAKE_ID,
rd_instance.ServiceStatuses.NEW)
@property
def app(self):
@ -3749,7 +3730,7 @@ class PostgresAppTest(BaseAppTest.AppTestCase):
@property
def expected_service_candidates(self):
return self.postgres.SERVICE_CANDIDATES
return self.postgres.service_candidates
def tearDown(self):
time.sleep = self.orig_time_sleep