Datastore containerization
Significant changes: * Using docker image to install datastore. * Datastore image is common to different datastores. * Using backup docker image to do backup and restore. * Support MariaDB replication * Set most of the functional jobs as non-voting as nested virtualization is not supported in CI. Change-Id: Ia9c97a63a961eebc336b70d28dc77638144c1834
This commit is contained in:
parent
523d66e8fd
commit
aa1d4d2246
36
.zuul.yaml
36
.zuul.yaml
@ -14,15 +14,14 @@
|
||||
- openstack-tox-pylint
|
||||
- trove-tox-bandit-baseline:
|
||||
voting: false
|
||||
- trove-tempest:
|
||||
- trove-tempest
|
||||
- trove-functional-mysql:
|
||||
voting: false
|
||||
- trove-functional-mysql
|
||||
- trove-scenario-mysql-single
|
||||
- trove-scenario-mysql-multi
|
||||
- trove-scenario-mariadb-single
|
||||
- trove-scenario-postgresql-single:
|
||||
- trove-scenario-mysql-single:
|
||||
voting: false
|
||||
- trove-scenario-postgresql-multi:
|
||||
- trove-scenario-mysql-multi:
|
||||
voting: false
|
||||
- trove-scenario-mariadb-single:
|
||||
voting: false
|
||||
- trove-scenario-mariadb-multi:
|
||||
voting: false
|
||||
@ -34,9 +33,12 @@
|
||||
queue: trove
|
||||
jobs:
|
||||
- openstack-tox-pylint
|
||||
- trove-functional-mysql
|
||||
- trove-scenario-mysql-single
|
||||
- trove-scenario-mysql-multi
|
||||
- trove-functional-mysql:
|
||||
voting: false
|
||||
- trove-scenario-mysql-single:
|
||||
voting: false
|
||||
- trove-scenario-mysql-multi:
|
||||
voting: false
|
||||
experimental:
|
||||
jobs:
|
||||
- trove-grenade
|
||||
@ -145,7 +147,7 @@
|
||||
trove_resize_time_out: 1800
|
||||
trove_test_datastore: 'mysql'
|
||||
trove_test_group: 'mysql'
|
||||
trove_test_datastore_version: '5.7'
|
||||
trove_test_datastore_version: '5.7.29'
|
||||
|
||||
- job:
|
||||
name: trove-functional-mysql-nondev
|
||||
@ -153,11 +155,11 @@
|
||||
vars:
|
||||
devstack_localrc:
|
||||
TROVE_RESIZE_TIME_OUT: 1800
|
||||
TROVE_NON_DEV_IMAGE_URL_MYSQL: https://tarballs.opendev.org/openstack/trove/images/trove-master-mysql-ubuntu-xenial.qcow2
|
||||
TROVE_NON_DEV_IMAGE_URL: https://tarballs.opendev.org/openstack/trove/images/trove-master-mysql-ubuntu-xenial.qcow2
|
||||
trove_resize_time_out: 1800
|
||||
trove_test_datastore: 'mysql'
|
||||
trove_test_group: 'mysql'
|
||||
trove_test_datastore_version: '5.7'
|
||||
trove_test_datastore_version: '5.7.29'
|
||||
|
||||
- job:
|
||||
name: trove-grenade
|
||||
@ -212,7 +214,7 @@
|
||||
vars:
|
||||
trove_test_datastore: mariadb
|
||||
trove_test_group: mariadb-supported-single
|
||||
trove_test_datastore_version: 10.4
|
||||
trove_test_datastore_version: 10.4.12
|
||||
devstack_localrc:
|
||||
TROVE_ENABLE_IMAGE_BUILD: false
|
||||
|
||||
@ -222,7 +224,7 @@
|
||||
vars:
|
||||
trove_test_datastore: mariadb
|
||||
trove_test_group: mariadb-supported-multi
|
||||
trove_test_datastore_version: 10.4
|
||||
trove_test_datastore_version: 10.4.12
|
||||
devstack_localrc:
|
||||
TROVE_ENABLE_IMAGE_BUILD: false
|
||||
|
||||
@ -232,7 +234,7 @@
|
||||
vars:
|
||||
trove_test_datastore: mysql
|
||||
trove_test_group: mysql-supported-single
|
||||
trove_test_datastore_version: 5.7
|
||||
trove_test_datastore_version: 5.7.29
|
||||
|
||||
- job:
|
||||
name: trove-scenario-mysql-multi
|
||||
@ -240,7 +242,7 @@
|
||||
vars:
|
||||
trove_test_datastore: mysql
|
||||
trove_test_group: mysql-supported-multi
|
||||
trove_test_datastore_version: 5.7
|
||||
trove_test_datastore_version: 5.7.29
|
||||
|
||||
- job:
|
||||
name: trove-scenario-percona-multi
|
||||
|
@ -688,10 +688,9 @@ replica_count:
|
||||
type: integer
|
||||
replica_of:
|
||||
description: |
|
||||
ID or name of an existing instance to replicate
|
||||
from.
|
||||
ID or name of an existing instance to replicate from.
|
||||
in: body
|
||||
required: false
|
||||
required: true
|
||||
type: string
|
||||
restore_point:
|
||||
description: |
|
||||
@ -735,9 +734,10 @@ shard_id:
|
||||
type: string
|
||||
slave_of:
|
||||
description: |
|
||||
To detach a replica, set ``slave_of`` to null.
|
||||
To detach a replica, set ``slave_of`` to null. Deprecated in favor of
|
||||
``replica_of``
|
||||
in: body
|
||||
required: true
|
||||
required: false
|
||||
type: string
|
||||
tenant_id:
|
||||
description: |
|
||||
|
@ -1,6 +1,5 @@
|
||||
{
|
||||
"instance": {
|
||||
"replica_of": null,
|
||||
"slave_of": null
|
||||
"replica_of": null
|
||||
}
|
||||
}
|
||||
|
41
backup/Dockerfile
Normal file
41
backup/Dockerfile
Normal file
@ -0,0 +1,41 @@
|
||||
FROM ubuntu:18.04
|
||||
LABEL maintainer="anlin.kong@gmail.com"
|
||||
|
||||
ARG APTOPTS="-y -qq --no-install-recommends --allow-unauthenticated"
|
||||
ARG PERCONA_XTRABACKUP_VERSION=24
|
||||
ENV DEBIAN_FRONTEND noninteractive
|
||||
ENV APT_KEY_DONT_WARN_ON_DANGEROUS_USAGE=1
|
||||
|
||||
RUN apt-get update \
|
||||
&& apt-get install $APTOPTS gnupg2 lsb-release apt-utils apt-transport-https ca-certificates software-properties-common curl \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Install percona-xtrabackup for mysql
|
||||
RUN curl -sSL https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb -o percona-release.deb \
|
||||
&& dpkg -i percona-release.deb \
|
||||
&& percona-release enable-only tools release \
|
||||
&& apt-get update \
|
||||
&& apt-get install $APTOPTS percona-xtrabackup-${PERCONA_XTRABACKUP_VERSION} \
|
||||
&& apt-get clean
|
||||
|
||||
# Install mariabackup for mariadb
|
||||
Run apt-key adv --fetch-keys 'https://mariadb.org/mariadb_release_signing_key.asc' \
|
||||
&& add-apt-repository "deb [arch=amd64] http://mirror2.hs-esslingen.de/mariadb/repo/10.4/ubuntu $(lsb_release -cs) main" \
|
||||
&& apt-get update \
|
||||
&& apt-get install $APTOPTS mariadb-backup \
|
||||
&& apt-get clean
|
||||
|
||||
RUN apt-get update \
|
||||
&& apt-get install $APTOPTS build-essential python3-setuptools python3-all python3-all-dev python3-pip libffi-dev libssl-dev libxml2-dev libxslt1-dev libyaml-dev \
|
||||
&& apt-get clean
|
||||
|
||||
COPY . /opt/trove/backup
|
||||
WORKDIR /opt/trove/backup
|
||||
|
||||
RUN pip3 --no-cache-dir install -U -r requirements.txt
|
||||
|
||||
RUN curl -sSL https://github.com/Yelp/dumb-init/releases/download/v1.2.2/dumb-init_1.2.2_amd64 -o /usr/local/bin/dumb-init \
|
||||
&& chmod +x /usr/local/bin/dumb-init
|
||||
|
||||
ENTRYPOINT ["dumb-init", "--single-child", "--"]
|
207
backup/drivers/base.py
Normal file
207
backup/drivers/base.py
Normal file
@ -0,0 +1,207 @@
|
||||
# Copyright 2020 Catalyst Cloud
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import os
|
||||
import signal
|
||||
import subprocess
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
|
||||
CONF = cfg.CONF
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class BaseRunner(object):
|
||||
"""Base class for Backup Strategy implementations."""
|
||||
|
||||
# Subclass should provide the commands.
|
||||
cmd = None
|
||||
restore_cmd = None
|
||||
prepare_cmd = None
|
||||
|
||||
encrypt_key = CONF.backup_encryption_key
|
||||
default_data_dir = '/var/lib/mysql/data'
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
self.process = None
|
||||
self.pid = None
|
||||
self.base_filename = kwargs.get('filename')
|
||||
self.storage = kwargs.pop('storage', None)
|
||||
self.location = kwargs.pop('location', '')
|
||||
self.checksum = kwargs.pop('checksum', '')
|
||||
|
||||
if 'restore_location' not in kwargs:
|
||||
kwargs['restore_location'] = self.default_data_dir
|
||||
self.restore_location = kwargs['restore_location']
|
||||
|
||||
self.command = self.cmd % kwargs
|
||||
self.restore_command = (self.decrypt_cmd +
|
||||
self.unzip_cmd +
|
||||
(self.restore_cmd % kwargs))
|
||||
self.prepare_command = self.prepare_cmd % kwargs
|
||||
|
||||
@property
|
||||
def filename(self):
|
||||
"""Subclasses may overwrite this to declare a format (.tar)."""
|
||||
return self.base_filename
|
||||
|
||||
@property
|
||||
def manifest(self):
|
||||
"""Target file name."""
|
||||
return "%s%s%s" % (self.filename,
|
||||
self.zip_manifest,
|
||||
self.encrypt_manifest)
|
||||
|
||||
@property
|
||||
def zip_cmd(self):
|
||||
return ' | gzip'
|
||||
|
||||
@property
|
||||
def unzip_cmd(self):
|
||||
return 'gzip -d -c | '
|
||||
|
||||
@property
|
||||
def zip_manifest(self):
|
||||
return '.gz'
|
||||
|
||||
@property
|
||||
def encrypt_cmd(self):
|
||||
return (' | openssl enc -aes-256-cbc -md sha512 -pbkdf2 -iter 10000 '
|
||||
'-salt -pass pass:%s' %
|
||||
self.encrypt_key) if self.encrypt_key else ''
|
||||
|
||||
@property
|
||||
def decrypt_cmd(self):
|
||||
if self.encrypt_key:
|
||||
return ('openssl enc -d -aes-256-cbc -md sha512 -pbkdf2 -iter '
|
||||
'10000 -salt -pass pass:%s | '
|
||||
% self.encrypt_key)
|
||||
else:
|
||||
return ''
|
||||
|
||||
@property
|
||||
def encrypt_manifest(self):
|
||||
return '.enc' if self.encrypt_key else ''
|
||||
|
||||
def _run(self):
|
||||
LOG.info("Running backup cmd: %s", self.command)
|
||||
self.process = subprocess.Popen(self.command, shell=True,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
preexec_fn=os.setsid)
|
||||
self.pid = self.process.pid
|
||||
|
||||
def __enter__(self):
|
||||
"""Start up the process."""
|
||||
self.pre_backup()
|
||||
self._run()
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc_value, traceback):
|
||||
"""Clean up everything."""
|
||||
if getattr(self, 'process', None):
|
||||
try:
|
||||
# Send a sigterm to the session leader, so that all
|
||||
# child processes are killed and cleaned up on terminate
|
||||
os.killpg(self.process.pid, signal.SIGTERM)
|
||||
self.process.terminate()
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
if exc_type is not None:
|
||||
return False
|
||||
|
||||
try:
|
||||
err = self.process.stderr.read()
|
||||
if err:
|
||||
raise Exception(err)
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
if not self.check_process():
|
||||
raise Exception()
|
||||
|
||||
self.post_backup()
|
||||
|
||||
return True
|
||||
|
||||
def read(self, chunk_size):
|
||||
return self.process.stdout.read(chunk_size)
|
||||
|
||||
def get_metadata(self):
|
||||
"""Hook for subclasses to get metadata from the backup."""
|
||||
return {}
|
||||
|
||||
def check_process(self):
|
||||
"""Hook for subclasses to check process for errors."""
|
||||
return True
|
||||
|
||||
def check_restore_process(self):
|
||||
"""Hook for subclasses to check the restore process for errors."""
|
||||
return True
|
||||
|
||||
def pre_backup(self):
|
||||
"""Hook for subclasses to run commands before backup."""
|
||||
pass
|
||||
|
||||
def post_backup(self):
|
||||
"""Hook for subclasses to run commands after backup."""
|
||||
pass
|
||||
|
||||
def pre_restore(self):
|
||||
"""Hook that is called before the restore command."""
|
||||
pass
|
||||
|
||||
def post_restore(self):
|
||||
"""Hook that is called after the restore command."""
|
||||
pass
|
||||
|
||||
def unpack(self, location, checksum, command):
|
||||
stream = self.storage.load(location, checksum)
|
||||
|
||||
LOG.info('Running restore from stream, command: %s', command)
|
||||
self.process = subprocess.Popen(command, shell=True,
|
||||
stdin=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE)
|
||||
content_length = 0
|
||||
for chunk in stream:
|
||||
self.process.stdin.write(chunk)
|
||||
content_length += len(chunk)
|
||||
self.process.stdin.close()
|
||||
|
||||
try:
|
||||
err = self.process.stderr.read()
|
||||
if err:
|
||||
raise Exception(err)
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
if not self.check_restore_process():
|
||||
raise Exception()
|
||||
|
||||
return content_length
|
||||
|
||||
def run_restore(self):
|
||||
return self.unpack(self.location, self.checksum, self.restore_command)
|
||||
|
||||
def restore(self):
|
||||
"""Restore backup to data directory.
|
||||
|
||||
:returns Restored data size.
|
||||
"""
|
||||
self.pre_restore()
|
||||
content_length = self.run_restore()
|
||||
self.post_restore()
|
||||
return content_length
|
137
backup/drivers/innobackupex.py
Normal file
137
backup/drivers/innobackupex.py
Normal file
@ -0,0 +1,137 @@
|
||||
# Copyright 2020 Catalyst Cloud
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import re
|
||||
|
||||
from oslo_concurrency import processutils
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
|
||||
from backup.drivers import mysql_base
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
CONF = cfg.CONF
|
||||
|
||||
|
||||
class InnoBackupEx(mysql_base.MySQLBaseRunner):
|
||||
"""Implementation of Backup and Restore for InnoBackupEx."""
|
||||
backup_log = '/tmp/innobackupex.log'
|
||||
prepare_log = '/tmp/prepare.log'
|
||||
restore_cmd = ('xbstream -x -C %(restore_location)s --parallel=2'
|
||||
' 2>/tmp/xbstream_extract.log')
|
||||
prepare_cmd = ('innobackupex'
|
||||
' --defaults-file=%(restore_location)s/backup-my.cnf'
|
||||
' --ibbackup=xtrabackup'
|
||||
' --apply-log'
|
||||
' %(restore_location)s'
|
||||
' 2>' + prepare_log)
|
||||
|
||||
@property
|
||||
def cmd(self):
|
||||
cmd = ('innobackupex'
|
||||
' --stream=xbstream'
|
||||
' --parallel=2 ' +
|
||||
self.user_and_pass + ' %s' % self.default_data_dir +
|
||||
' 2>' + self.backup_log
|
||||
)
|
||||
return cmd + self.zip_cmd + self.encrypt_cmd
|
||||
|
||||
def check_restore_process(self):
|
||||
"""Check whether xbstream restore is successful."""
|
||||
LOG.info('Checking return code of xbstream restore process.')
|
||||
return_code = self.process.wait()
|
||||
if return_code != 0:
|
||||
LOG.error('xbstream exited with %s', return_code)
|
||||
return False
|
||||
|
||||
with open('/tmp/xbstream_extract.log', 'r') as xbstream_log:
|
||||
for line in xbstream_log:
|
||||
# Ignore empty lines
|
||||
if not line.strip():
|
||||
continue
|
||||
|
||||
LOG.error('xbstream restore failed with: %s',
|
||||
line.rstrip('\n'))
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def post_restore(self):
|
||||
"""Hook that is called after the restore command."""
|
||||
LOG.info("Running innobackupex prepare: %s.", self.prepare_command)
|
||||
processutils.execute(self.prepare_command, shell=True)
|
||||
|
||||
LOG.info("Checking innobackupex prepare log")
|
||||
with open(self.prepare_log, 'r') as prepare_log:
|
||||
output = prepare_log.read()
|
||||
if not output:
|
||||
msg = "innobackupex prepare log file empty"
|
||||
raise Exception(msg)
|
||||
|
||||
last_line = output.splitlines()[-1].strip()
|
||||
if not re.search('completed OK!', last_line):
|
||||
msg = "innobackupex prepare did not complete successfully"
|
||||
raise Exception(msg)
|
||||
|
||||
|
||||
class InnoBackupExIncremental(InnoBackupEx):
|
||||
"""InnoBackupEx incremental backup."""
|
||||
|
||||
incremental_prep = ('innobackupex'
|
||||
' --defaults-file=%(restore_location)s/backup-my.cnf'
|
||||
' --ibbackup=xtrabackup'
|
||||
' --apply-log'
|
||||
' --redo-only'
|
||||
' %(restore_location)s'
|
||||
' %(incremental_args)s'
|
||||
' 2>/tmp/innoprepare.log')
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
if not kwargs.get('lsn'):
|
||||
raise AttributeError('lsn attribute missing')
|
||||
self.parent_location = kwargs.pop('parent_location', '')
|
||||
self.parent_checksum = kwargs.pop('parent_checksum', '')
|
||||
self.restore_content_length = 0
|
||||
|
||||
super(InnoBackupExIncremental, self).__init__(*args, **kwargs)
|
||||
|
||||
@property
|
||||
def cmd(self):
|
||||
cmd = ('innobackupex'
|
||||
' --stream=xbstream'
|
||||
' --incremental'
|
||||
' --incremental-lsn=%(lsn)s ' +
|
||||
self.user_and_pass + ' %s' % self.default_data_dir +
|
||||
' 2>' + self.backup_log)
|
||||
return cmd + self.zip_cmd + self.encrypt_cmd
|
||||
|
||||
def get_metadata(self):
|
||||
_meta = super(InnoBackupExIncremental, self).get_metadata()
|
||||
|
||||
_meta.update({
|
||||
'parent_location': self.parent_location,
|
||||
'parent_checksum': self.parent_checksum,
|
||||
})
|
||||
return _meta
|
||||
|
||||
def run_restore(self):
|
||||
"""Run incremental restore.
|
||||
|
||||
First grab all parents and prepare them with '--redo-only'. After
|
||||
all backups are restored the super class InnoBackupEx post_restore
|
||||
method is called to do the final prepare with '--apply-log'
|
||||
"""
|
||||
LOG.debug('Running incremental restore')
|
||||
self.incremental_restore(self.location, self.checksum)
|
||||
return self.restore_content_length
|
87
backup/drivers/mariabackup.py
Normal file
87
backup/drivers/mariabackup.py
Normal file
@ -0,0 +1,87 @@
|
||||
# Copyright 2020 Catalyst Cloud
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
|
||||
from backup.drivers import mysql_base
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
CONF = cfg.CONF
|
||||
|
||||
|
||||
class MariaBackup(mysql_base.MySQLBaseRunner):
|
||||
"""Implementation of Backup and Restore using mariabackup."""
|
||||
backup_log = '/tmp/mariabackup.log'
|
||||
restore_log = '/tmp/mbstream_extract.log'
|
||||
restore_cmd = ('mbstream -x -C %(restore_location)s 2>' + restore_log)
|
||||
prepare_cmd = ''
|
||||
|
||||
@property
|
||||
def cmd(self):
|
||||
cmd = ('mariabackup --backup --stream=xbstream ' +
|
||||
self.user_and_pass + ' 2>' + self.backup_log)
|
||||
return cmd + self.zip_cmd + self.encrypt_cmd
|
||||
|
||||
def check_restore_process(self):
|
||||
LOG.debug('Checking return code of mbstream restore process.')
|
||||
return_code = self.process.wait()
|
||||
if return_code != 0:
|
||||
LOG.error('mbstream exited with %s', return_code)
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
|
||||
class MariaBackupIncremental(MariaBackup):
|
||||
"""Incremental backup and restore using mariabackup."""
|
||||
incremental_prep = ('mariabackup --prepare '
|
||||
'--target-dir=%(restore_location)s '
|
||||
'%(incremental_args)s '
|
||||
'2>/tmp/innoprepare.log')
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
if not kwargs.get('lsn'):
|
||||
raise AttributeError('lsn attribute missing')
|
||||
self.parent_location = kwargs.pop('parent_location', '')
|
||||
self.parent_checksum = kwargs.pop('parent_checksum', '')
|
||||
self.restore_content_length = 0
|
||||
|
||||
super(MariaBackupIncremental, self).__init__(*args, **kwargs)
|
||||
|
||||
@property
|
||||
def cmd(self):
|
||||
cmd = (
|
||||
'mariabackup --backup --stream=xbstream'
|
||||
' --incremental-lsn=%(lsn)s ' +
|
||||
self.user_and_pass +
|
||||
' 2>' +
|
||||
self.backup_log
|
||||
)
|
||||
return cmd + self.zip_cmd + self.encrypt_cmd
|
||||
|
||||
def get_metadata(self):
|
||||
meta = super(MariaBackupIncremental, self).get_metadata()
|
||||
|
||||
meta.update({
|
||||
'parent_location': self.parent_location,
|
||||
'parent_checksum': self.parent_checksum,
|
||||
})
|
||||
return meta
|
||||
|
||||
def run_restore(self):
|
||||
"""Run incremental restore."""
|
||||
LOG.debug('Running incremental restore')
|
||||
self.incremental_restore(self.location, self.checksum)
|
||||
return self.restore_content_length
|
139
backup/drivers/mysql_base.py
Normal file
139
backup/drivers/mysql_base.py
Normal file
@ -0,0 +1,139 @@
|
||||
# Copyright 2020 Catalyst Cloud
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import os
|
||||
import re
|
||||
import shutil
|
||||
|
||||
from oslo_concurrency import processutils
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
|
||||
from backup.drivers import base
|
||||
|
||||
CONF = cfg.CONF
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class MySQLBaseRunner(base.BaseRunner):
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(MySQLBaseRunner, self).__init__(*args, **kwargs)
|
||||
|
||||
@property
|
||||
def user_and_pass(self):
|
||||
return ('--user=%(user)s --password=%(password)s --host=%(host)s' %
|
||||
{'user': CONF.db_user,
|
||||
'password': CONF.db_password,
|
||||
'host': CONF.db_host})
|
||||
|
||||
@property
|
||||
def filename(self):
|
||||
return '%s.xbstream' % self.base_filename
|
||||
|
||||
def check_process(self):
|
||||
"""Check the backup output for 'completed OK!'."""
|
||||
LOG.debug('Checking backup process output.')
|
||||
with open(self.backup_log, 'r') as backup_log:
|
||||
output = backup_log.read()
|
||||
if not output:
|
||||
LOG.error("Backup log file %s empty.", self.backup_log)
|
||||
return False
|
||||
|
||||
last_line = output.splitlines()[-1].strip()
|
||||
if not re.search('completed OK!', last_line):
|
||||
LOG.error("Backup did not complete successfully.")
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def get_metadata(self):
|
||||
LOG.debug('Getting metadata for backup %s', self.base_filename)
|
||||
meta = {}
|
||||
lsn = re.compile(r"The latest check point \(for incremental\): "
|
||||
r"'(\d+)'")
|
||||
with open(self.backup_log, 'r') as backup_log:
|
||||
output = backup_log.read()
|
||||
match = lsn.search(output)
|
||||
if match:
|
||||
meta = {'lsn': match.group(1)}
|
||||
|
||||
LOG.info("Updated metadata for backup %s: %s", self.base_filename,
|
||||
meta)
|
||||
|
||||
return meta
|
||||
|
||||
def incremental_restore_cmd(self, incremental_dir):
|
||||
"""Return a command for a restore with a incremental location."""
|
||||
args = {'restore_location': incremental_dir}
|
||||
return (self.decrypt_cmd + self.unzip_cmd + self.restore_cmd % args)
|
||||
|
||||
def incremental_prepare_cmd(self, incremental_dir):
|
||||
if incremental_dir is not None:
|
||||
incremental_arg = '--incremental-dir=%s' % incremental_dir
|
||||
else:
|
||||
incremental_arg = ''
|
||||
|
||||
args = {
|
||||
'restore_location': self.restore_location,
|
||||
'incremental_args': incremental_arg,
|
||||
}
|
||||
|
||||
return self.incremental_prep % args
|
||||
|
||||
def incremental_prepare(self, incremental_dir):
|
||||
prepare_cmd = self.incremental_prepare_cmd(incremental_dir)
|
||||
|
||||
LOG.info("Running restore prepare command: %s.", prepare_cmd)
|
||||
processutils.execute(prepare_cmd, shell=True)
|
||||
|
||||
def incremental_restore(self, location, checksum):
|
||||
"""Recursively apply backups from all parents.
|
||||
|
||||
If we are the parent then we restore to the restore_location and
|
||||
we apply the logs to the restore_location only.
|
||||
|
||||
Otherwise if we are an incremental we restore to a subfolder to
|
||||
prevent stomping on the full restore data. Then we run apply log
|
||||
with the '--incremental-dir' flag
|
||||
|
||||
:param location: The source backup location.
|
||||
:param checksum: Checksum of the source backup for validation.
|
||||
"""
|
||||
metadata = self.storage.load_metadata(location, checksum)
|
||||
incremental_dir = None
|
||||
|
||||
if 'parent_location' in metadata:
|
||||
LOG.info("Restoring parent: %(parent_location)s"
|
||||
" checksum: %(parent_checksum)s.", metadata)
|
||||
|
||||
parent_location = metadata['parent_location']
|
||||
parent_checksum = metadata['parent_checksum']
|
||||
# Restore parents recursively so backup are applied sequentially
|
||||
self.incremental_restore(parent_location, parent_checksum)
|
||||
# for *this* backup set the incremental_dir
|
||||
# just use the checksum for the incremental path as it is
|
||||
# sufficiently unique /var/lib/mysql/<checksum>
|
||||
incremental_dir = os.path.join('/var/lib/mysql', checksum)
|
||||
os.makedirs(incremental_dir)
|
||||
command = self.incremental_restore_cmd(incremental_dir)
|
||||
else:
|
||||
# The parent (full backup) use the same command from InnobackupEx
|
||||
# super class and do not set an incremental_dir.
|
||||
command = self.restore_command
|
||||
|
||||
self.restore_content_length += self.unpack(location, checksum, command)
|
||||
self.incremental_prepare(incremental_dir)
|
||||
|
||||
# Delete after restoring this part of backup
|
||||
if incremental_dir:
|
||||
shutil.rmtree(incremental_dir)
|
149
backup/main.py
Normal file
149
backup/main.py
Normal file
@ -0,0 +1,149 @@
|
||||
# Copyright 2020 Catalyst Cloud
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import os
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
from oslo_utils import importutils
|
||||
import sys
|
||||
|
||||
topdir = os.path.normpath(
|
||||
os.path.join(os.path.abspath(sys.argv[0]), os.pardir, os.pardir))
|
||||
sys.path.insert(0, topdir)
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
CONF = cfg.CONF
|
||||
|
||||
cli_opts = [
|
||||
cfg.StrOpt('backup-id'),
|
||||
cfg.StrOpt(
|
||||
'storage-driver',
|
||||
default='swift',
|
||||
choices=['swift']
|
||||
),
|
||||
cfg.StrOpt(
|
||||
'driver',
|
||||
default='innobackupex',
|
||||
choices=['innobackupex', 'xtrabackup', 'mariabackup']
|
||||
),
|
||||
cfg.BoolOpt('backup'),
|
||||
cfg.StrOpt('backup-encryption-key'),
|
||||
cfg.StrOpt('db-user'),
|
||||
cfg.StrOpt('db-password'),
|
||||
cfg.StrOpt('db-host'),
|
||||
cfg.StrOpt('os-token'),
|
||||
cfg.StrOpt('os-auth-url'),
|
||||
cfg.StrOpt('os-tenant-id'),
|
||||
cfg.StrOpt('swift-container', default='database_backups'),
|
||||
cfg.DictOpt('swift-extra-metadata'),
|
||||
cfg.StrOpt('restore-from'),
|
||||
cfg.StrOpt('restore-checksum'),
|
||||
cfg.BoolOpt('incremental'),
|
||||
cfg.StrOpt('parent-location'),
|
||||
cfg.StrOpt(
|
||||
'parent-checksum',
|
||||
help='It is up to the storage driver to decide to validate the '
|
||||
'checksum or not. '
|
||||
),
|
||||
]
|
||||
|
||||
driver_mapping = {
|
||||
'innobackupex': 'backup.drivers.innobackupex.InnoBackupEx',
|
||||
'innobackupex_inc': 'backup.drivers.innobackupex.InnoBackupExIncremental',
|
||||
'mariabackup': 'backup.drivers.mariabackup.MariaBackup',
|
||||
'mariabackup_inc': 'backup.drivers.mariabackup.MariaBackupIncremental',
|
||||
}
|
||||
storage_mapping = {
|
||||
'swift': 'backup.storage.swift.SwiftStorage',
|
||||
}
|
||||
|
||||
|
||||
def stream_backup_to_storage(runner_cls, storage):
|
||||
parent_metadata = {}
|
||||
|
||||
if CONF.incremental:
|
||||
if not CONF.parent_location:
|
||||
LOG.error('--parent-location should be provided for incremental '
|
||||
'backup')
|
||||
exit(1)
|
||||
|
||||
parent_metadata = storage.load_metadata(CONF.parent_location,
|
||||
CONF.parent_checksum)
|
||||
parent_metadata.update(
|
||||
{
|
||||
'parent_location': CONF.parent_location,
|
||||
'parent_checksum': CONF.parent_checksum
|
||||
}
|
||||
)
|
||||
|
||||
try:
|
||||
with runner_cls(filename=CONF.backup_id, **parent_metadata) as bkup:
|
||||
checksum, location = storage.save(
|
||||
bkup,
|
||||
metadata=CONF.swift_extra_metadata
|
||||
)
|
||||
LOG.info('Backup successfully, checksum: %s, location: %s',
|
||||
checksum, location)
|
||||
except Exception as err:
|
||||
LOG.exception('Failed to call stream_backup_to_storage, error: %s',
|
||||
err)
|
||||
|
||||
|
||||
def stream_restore_from_storage(runner_cls, storage):
|
||||
lsn = ""
|
||||
if storage.is_incremental_backup(CONF.restore_from):
|
||||
lsn = storage.get_backup_lsn(CONF.restore_from)
|
||||
|
||||
try:
|
||||
runner = runner_cls(storage=storage, location=CONF.restore_from,
|
||||
checksum=CONF.restore_checksum, lsn=lsn)
|
||||
restore_size = runner.restore()
|
||||
LOG.info('Restore successfully, restore_size: %s', restore_size)
|
||||
except Exception as err:
|
||||
LOG.exception('Failed to call stream_restore_from_storage, error: %s',
|
||||
err)
|
||||
|
||||
|
||||
def main():
|
||||
CONF.register_cli_opts(cli_opts)
|
||||
logging.register_options(CONF)
|
||||
CONF(sys.argv[1:], project='trove-backup')
|
||||
logging.setup(CONF, 'trove-backup')
|
||||
|
||||
runner_cls = importutils.import_class(driver_mapping[CONF.driver])
|
||||
storage = importutils.import_class(storage_mapping[CONF.storage_driver])()
|
||||
|
||||
if CONF.backup:
|
||||
if CONF.incremental:
|
||||
runner_cls = importutils.import_class(
|
||||
driver_mapping['%s_inc' % CONF.driver])
|
||||
|
||||
LOG.info('Starting backup database to %s, backup ID %s',
|
||||
CONF.storage_driver, CONF.backup_id)
|
||||
stream_backup_to_storage(runner_cls, storage)
|
||||
else:
|
||||
if storage.is_incremental_backup(CONF.restore_from):
|
||||
LOG.debug('Restore from incremental backup')
|
||||
runner_cls = importutils.import_class(
|
||||
driver_mapping['%s_inc' % CONF.driver])
|
||||
|
||||
LOG.info('Starting restore database from %s, location: %s',
|
||||
CONF.storage_driver, CONF.restore_from)
|
||||
|
||||
stream_restore_from_storage(runner_cls, storage)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
sys.exit(main())
|
6
backup/requirements.txt
Normal file
6
backup/requirements.txt
Normal file
@ -0,0 +1,6 @@
|
||||
oslo.config!=4.3.0,!=4.4.0;python_version>='3.0' # Apache-2.0
|
||||
oslo.log;python_version>='3.0' # Apache-2.0
|
||||
oslo.utils!=3.39.1,!=3.40.0,!=3.40.1;python_version>='3.0' # Apache-2.0
|
||||
oslo.concurrency;python_version>='3.0' # Apache-2.0
|
||||
keystoneauth1 # Apache-2.0
|
||||
python-swiftclient # Apache-2.0
|
48
backup/storage/base.py
Normal file
48
backup/storage/base.py
Normal file
@ -0,0 +1,48 @@
|
||||
# Copyright 2020 Catalyst Cloud
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import abc
|
||||
|
||||
|
||||
class Storage(object):
|
||||
"""Base class for Storage driver implementation."""
|
||||
|
||||
@abc.abstractmethod
|
||||
def save(self, stream, metadata=None, **kwargs):
|
||||
"""Persist information from the stream.
|
||||
|
||||
Should return the new backup checkshum and location.
|
||||
"""
|
||||
|
||||
@abc.abstractmethod
|
||||
def load(self, location, backup_checksum, **kwargs):
|
||||
"""Load a stream from the data location.
|
||||
|
||||
Should return an object that provides "read" method.
|
||||
"""
|
||||
|
||||
def load_metadata(self, parent_location, parent_checksum):
|
||||
"""Load metadata for a parent backup.
|
||||
|
||||
It's up to the storage driver to decide how to implement this function.
|
||||
"""
|
||||
return {}
|
||||
|
||||
def is_incremental_backup(self, location):
|
||||
"""Check if the location is an incremental backup."""
|
||||
return False
|
||||
|
||||
@abc.abstractmethod
|
||||
def get_backup_lsn(self, location):
|
||||
"""Get the backup LSN."""
|
294
backup/storage/swift.py
Normal file
294
backup/storage/swift.py
Normal file
@ -0,0 +1,294 @@
|
||||
# Copyright 2020 Catalyst Cloud
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import hashlib
|
||||
import json
|
||||
|
||||
from keystoneauth1 import session
|
||||
from keystoneauth1.identity import v3
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
import swiftclient
|
||||
|
||||
from backup.storage import base
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
CONF = cfg.CONF
|
||||
|
||||
|
||||
def _get_user_keystone_session(auth_url, token, tenant_id):
|
||||
auth = v3.Token(
|
||||
auth_url=auth_url, token=token,
|
||||
project_domain_name="Default",
|
||||
project_id=tenant_id
|
||||
)
|
||||
return session.Session(auth=auth, verify=False)
|
||||
|
||||
|
||||
def _get_service_client(auth_url, token, tenant_id):
|
||||
sess = _get_user_keystone_session(auth_url, token, tenant_id)
|
||||
return swiftclient.Connection(session=sess)
|
||||
|
||||
|
||||
def _set_attr(original):
|
||||
"""Return a swift friendly header key."""
|
||||
key = original.replace('_', '-')
|
||||
return 'X-Object-Meta-%s' % key
|
||||
|
||||
|
||||
def _get_attr(original):
|
||||
"""Get a friendly name from an object header key."""
|
||||
key = original.replace('-', '_')
|
||||
key = key.replace('x_object_meta_', '')
|
||||
return key
|
||||
|
||||
|
||||
class StreamReader(object):
|
||||
"""Wrap the stream from the backup process and chunk it into segements."""
|
||||
|
||||
def __init__(self, stream, container, filename, max_file_size):
|
||||
self.stream = stream
|
||||
self.container = container
|
||||
self.filename = filename
|
||||
self.max_file_size = max_file_size
|
||||
self.segment_length = 0
|
||||
self.process = None
|
||||
self.file_number = 0
|
||||
self.end_of_file = False
|
||||
self.end_of_segment = False
|
||||
self.segment_checksum = hashlib.md5()
|
||||
|
||||
@property
|
||||
def base_filename(self):
|
||||
"""Filename with extensions removed."""
|
||||
return self.filename.split('.')[0]
|
||||
|
||||
@property
|
||||
def segment(self):
|
||||
return '%s_%08d' % (self.base_filename, self.file_number)
|
||||
|
||||
@property
|
||||
def first_segment(self):
|
||||
return '%s_%08d' % (self.base_filename, 0)
|
||||
|
||||
@property
|
||||
def segment_path(self):
|
||||
return '%s/%s' % (self.container, self.segment)
|
||||
|
||||
def read(self, chunk_size=2 ** 16):
|
||||
if self.end_of_segment:
|
||||
self.segment_length = 0
|
||||
self.segment_checksum = hashlib.md5()
|
||||
self.end_of_segment = False
|
||||
|
||||
# Upload to a new file if we are starting or too large
|
||||
if self.segment_length > (self.max_file_size - chunk_size):
|
||||
self.file_number += 1
|
||||
self.end_of_segment = True
|
||||
return ''
|
||||
|
||||
chunk = self.stream.read(chunk_size)
|
||||
if not chunk:
|
||||
self.end_of_file = True
|
||||
return ''
|
||||
|
||||
self.segment_checksum.update(chunk)
|
||||
self.segment_length += len(chunk)
|
||||
return chunk
|
||||
|
||||
|
||||
class SwiftStorage(base.Storage):
|
||||
def __init__(self):
|
||||
self.client = _get_service_client(CONF.os_auth_url, CONF.os_token,
|
||||
CONF.os_tenant_id)
|
||||
|
||||
def save(self, stream, metadata=None, container='database_backups'):
|
||||
"""Persist data from the stream to swift.
|
||||
|
||||
* Read data from stream, upload to swift
|
||||
* Update the new object metadata, stream provides method to get
|
||||
metadata.
|
||||
|
||||
:returns the new object checkshum and swift full URL.
|
||||
"""
|
||||
filename = stream.manifest
|
||||
LOG.info('Saving %(filename)s to %(container)s in swift.',
|
||||
{'filename': filename, 'container': container})
|
||||
|
||||
# Create the container if it doesn't already exist
|
||||
LOG.debug('Ensuring container %s', container)
|
||||
self.client.put_container(container)
|
||||
|
||||
# Swift Checksum is the checksum of the concatenated segment checksums
|
||||
swift_checksum = hashlib.md5()
|
||||
# Wrap the output of the backup process to segment it for swift
|
||||
stream_reader = StreamReader(stream, container, filename,
|
||||
2 * (1024 ** 3))
|
||||
|
||||
url = self.client.url
|
||||
# Full location where the backup manifest is stored
|
||||
location = "%s/%s/%s" % (url, container, filename)
|
||||
LOG.info('Uploading to %s', location)
|
||||
|
||||
# Information about each segment upload job
|
||||
segment_results = []
|
||||
|
||||
# Read from the stream and write to the container in swift
|
||||
while not stream_reader.end_of_file:
|
||||
LOG.debug('Uploading segment %s.', stream_reader.segment)
|
||||
path = stream_reader.segment_path
|
||||
etag = self.client.put_object(container,
|
||||
stream_reader.segment,
|
||||
stream_reader)
|
||||
|
||||
segment_checksum = stream_reader.segment_checksum.hexdigest()
|
||||
|
||||
# Check each segment MD5 hash against swift etag
|
||||
if etag != segment_checksum:
|
||||
msg = ('Failed to upload data segment to swift. ETAG: %(tag)s '
|
||||
'Segment MD5: %(checksum)s.' %
|
||||
{'tag': etag, 'checksum': segment_checksum})
|
||||
raise Exception(msg)
|
||||
|
||||
segment_results.append({
|
||||
'path': path,
|
||||
'etag': etag,
|
||||
'size_bytes': stream_reader.segment_length
|
||||
})
|
||||
|
||||
swift_checksum.update(segment_checksum.encode())
|
||||
|
||||
# All segments uploaded.
|
||||
num_segments = len(segment_results)
|
||||
LOG.debug('File uploaded in %s segments.', num_segments)
|
||||
|
||||
# An SLO will be generated if the backup was more than one segment in
|
||||
# length.
|
||||
large_object = num_segments > 1
|
||||
|
||||
# Meta data is stored as headers
|
||||
if metadata is None:
|
||||
metadata = {}
|
||||
metadata.update(stream.get_metadata())
|
||||
headers = {}
|
||||
for key, value in metadata.items():
|
||||
headers[_set_attr(key)] = value
|
||||
|
||||
LOG.debug('Metadata headers: %s', headers)
|
||||
if large_object:
|
||||
manifest_data = json.dumps(segment_results)
|
||||
LOG.info('Creating the SLO manifest file, manifest content: %s',
|
||||
manifest_data)
|
||||
# The etag returned from the manifest PUT is the checksum of the
|
||||
# manifest object (which is empty); this is not the checksum we
|
||||
# want.
|
||||
self.client.put_object(container,
|
||||
filename,
|
||||
manifest_data,
|
||||
query_string='multipart-manifest=put')
|
||||
|
||||
# Validation checksum is the Swift Checksum
|
||||
final_swift_checksum = swift_checksum.hexdigest()
|
||||
else:
|
||||
LOG.info('Moving segment %(segment)s to %(filename)s.',
|
||||
{'segment': stream_reader.first_segment,
|
||||
'filename': filename})
|
||||
segment_result = segment_results[0]
|
||||
# Just rename it via a special put copy.
|
||||
headers['X-Copy-From'] = segment_result['path']
|
||||
self.client.put_object(container,
|
||||
filename, '',
|
||||
headers=headers)
|
||||
|
||||
# Delete the old segment file that was copied
|
||||
LOG.debug('Deleting the old segment file %s.',
|
||||
stream_reader.first_segment)
|
||||
self.client.delete_object(container,
|
||||
stream_reader.first_segment)
|
||||
|
||||
final_swift_checksum = segment_result['etag']
|
||||
|
||||
# Validate the object by comparing checksums
|
||||
resp = self.client.head_object(container, filename)
|
||||
# swift returns etag in double quotes
|
||||
# e.g. '"dc3b0827f276d8d78312992cc60c2c3f"'
|
||||
etag = resp['etag'].strip('"')
|
||||
|
||||
# Raise an error and mark backup as failed
|
||||
if etag != final_swift_checksum:
|
||||
msg = ('Failed to upload data to swift. Manifest ETAG: %(tag)s '
|
||||
'Swift MD5: %(checksum)s' %
|
||||
{'tag': etag, 'checksum': final_swift_checksum})
|
||||
raise Exception(msg)
|
||||
|
||||
return (final_swift_checksum, location)
|
||||
|
||||
def _explodeLocation(self, location):
|
||||
storage_url = "/".join(location.split('/')[:-2])
|
||||
container = location.split('/')[-2]
|
||||
filename = location.split('/')[-1]
|
||||
return storage_url, container, filename
|
||||
|
||||
def _verify_checksum(self, etag, checksum):
|
||||
etag_checksum = etag.strip('"')
|
||||
if etag_checksum != checksum:
|
||||
msg = ('Checksum validation failure, actual: %s, expected: %s' %
|
||||
(etag_checksum, checksum))
|
||||
raise Exception(msg)
|
||||
|
||||
def load(self, location, backup_checksum):
|
||||
"""Get object from the location."""
|
||||
storage_url, container, filename = self._explodeLocation(location)
|
||||
|
||||
headers, contents = self.client.get_object(container, filename,
|
||||
resp_chunk_size=2 ** 16)
|
||||
|
||||
if backup_checksum:
|
||||
self._verify_checksum(headers.get('etag', ''), backup_checksum)
|
||||
|
||||
return contents
|
||||
|
||||
def load_metadata(self, parent_location, parent_checksum):
|
||||
"""Load metadata from swift."""
|
||||
if not parent_location:
|
||||
return {}
|
||||
|
||||
_, container, filename = self._explodeLocation(parent_location)
|
||||
headers = self.client.head_object(container, filename)
|
||||
|
||||
if parent_checksum:
|
||||
self._verify_checksum(headers.get('etag', ''), parent_checksum)
|
||||
|
||||
_meta = {}
|
||||
for key, value in headers.items():
|
||||
if key.startswith('x-object-meta'):
|
||||
_meta[_get_attr(key)] = value
|
||||
|
||||
return _meta
|
||||
|
||||
def is_incremental_backup(self, location):
|
||||
"""Check if the location is an incremental backup."""
|
||||
_, container, filename = self._explodeLocation(location)
|
||||
headers = self.client.head_object(container, filename)
|
||||
|
||||
if 'x-object-meta-parent-location' in headers:
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def get_backup_lsn(self, location):
|
||||
"""Get the backup LSN."""
|
||||
_, container, filename = self._explodeLocation(location)
|
||||
headers = self.client.head_object(container, filename)
|
||||
return headers.get('x-object-meta-lsn')
|
@ -362,7 +362,7 @@ function create_subnet_v6 {
|
||||
if [[ -n "$IPV6_PRIVATE_NETWORK_GATEWAY" ]]; then
|
||||
subnet_params+="--gateway $IPV6_PRIVATE_NETWORK_GATEWAY "
|
||||
fi
|
||||
if [ -n $SUBNETPOOL_V6_ID ]; then
|
||||
if [[ -n $SUBNETPOOL_V6_ID ]]; then
|
||||
subnet_params+="--subnet-pool $SUBNETPOOL_V6_ID "
|
||||
else
|
||||
subnet_params+="--subnet-range $FIXED_RANGE_V6 $ipv6_modes} "
|
||||
@ -447,27 +447,26 @@ function create_guest_image {
|
||||
return 0
|
||||
fi
|
||||
|
||||
image_name="trove-datastore-${TROVE_IMAGE_OS}-${TROVE_IMAGE_OS_RELEASE}-${TROVE_DATASTORE_TYPE}"
|
||||
image_url_var="TROVE_NON_DEV_IMAGE_URL_${TROVE_DATASTORE_TYPE^^}"
|
||||
image_url=`eval echo '$'"$image_url_var"`
|
||||
image_name="trove-guest-${TROVE_IMAGE_OS}-${TROVE_IMAGE_OS_RELEASE}"
|
||||
mkdir -p $HOME/images
|
||||
image_file=$HOME/images/${image_name}.qcow2
|
||||
|
||||
if [[ -n ${image_url} ]]; then
|
||||
echo "Downloading guest image from ${image_url}"
|
||||
curl -sSL ${image_url} -o ${image_file}
|
||||
if [[ -n ${TROVE_NON_DEV_IMAGE_URL} ]]; then
|
||||
echo "Downloading guest image from ${TROVE_NON_DEV_IMAGE_URL}"
|
||||
curl -sSL ${TROVE_NON_DEV_IMAGE_URL} -o ${image_file}
|
||||
else
|
||||
echo "Starting to create guest image"
|
||||
|
||||
TROVE_BRANCH=${TROVE_BRANCH} $DEST/trove/integration/scripts/trovestack \
|
||||
$DEST/trove/integration/scripts/trovestack \
|
||||
build-image \
|
||||
${TROVE_DATASTORE_TYPE} \
|
||||
${TROVE_IMAGE_OS} \
|
||||
${TROVE_IMAGE_OS_RELEASE} \
|
||||
true
|
||||
true \
|
||||
${TROVE_IMAGE_OS} \
|
||||
${image_file}
|
||||
fi
|
||||
|
||||
if [ ! -f ${image_file} ]; then
|
||||
if [[ ! -f ${image_file} ]]; then
|
||||
echo "Image file was not found at ${image_file}"
|
||||
exit 1
|
||||
fi
|
||||
@ -485,7 +484,7 @@ function create_guest_image {
|
||||
$TROVE_MANAGE datastore_update $TROVE_DATASTORE_TYPE $TROVE_DATASTORE_VERSION
|
||||
|
||||
echo "Add parameter validation rules if available"
|
||||
if [ -f $DEST/trove/trove/templates/$TROVE_DATASTORE_TYPE/validation-rules.json ]; then
|
||||
if [[ -f $DEST/trove/trove/templates/$TROVE_DATASTORE_TYPE/validation-rules.json ]]; then
|
||||
$TROVE_MANAGE db_load_datastore_config_parameters "$TROVE_DATASTORE_TYPE" "$TROVE_DATASTORE_VERSION" \
|
||||
$DEST/trove/trove/templates/$TROVE_DATASTORE_TYPE/validation-rules.json
|
||||
fi
|
||||
|
@ -29,15 +29,9 @@ TROVE_LOCAL_API_PASTE_INI=${TROVE_LOCAL_API_PASTE_INI:-${TROVE_LOCAL_CONF_DIR}/a
|
||||
TROVE_LOCAL_POLICY_JSON=${TROVE_LOCAL_POLICY_JSON:-${TROVE_LOCAL_CONF_DIR}/policy.json}
|
||||
|
||||
TROVE_IMAGE_OS=${TROVE_IMAGE_OS:-"ubuntu"}
|
||||
TROVE_IMAGE_OS_RELEASE=${TROVE_IMAGE_OS_RELEASE:-"xenial"}
|
||||
TROVE_IMAGE_OS_RELEASE=${TROVE_IMAGE_OS_RELEASE:-"bionic"}
|
||||
TROVE_DATASTORE_TYPE=${TROVE_DATASTORE_TYPE:-"mysql"}
|
||||
if [[ "$DISTRO" == "xenial" || "$DISTRO" == "bionic" ]]; then
|
||||
TROVE_DATASTORE_VERSION=${TROVE_DATASTORE_VERSION:-"5.7"}
|
||||
TROVE_DATASTORE_PACKAGE=${TROVE_DATASTORE_PACKAGE:-"mysql-server-5.7"}
|
||||
else
|
||||
TROVE_DATASTORE_VERSION=${TROVE_DATASTORE_VERSION:-"5.6"}
|
||||
TROVE_DATASTORE_PACKAGE=${TROVE_DATASTORE_PACKAGE:-"mysql-server-5.6"}
|
||||
fi
|
||||
TROVE_DATASTORE_VERSION=${TROVE_DATASTORE_VERSION:-"5.7.29"}
|
||||
|
||||
# Configuration values listed here for reference
|
||||
TROVE_MAX_ACCEPTED_VOLUME_SIZE=${TROVE_MAX_ACCEPTED_VOLUME_SIZE}
|
||||
@ -46,8 +40,8 @@ TROVE_MAX_VOLUMES_PER_TENANT=${TROVE_MAX_VOLUMES_PER_TENANT}
|
||||
TROVE_AGENT_CALL_LOW_TIMEOUT=${TROVE_AGENT_CALL_LOW_TIMEOUT}
|
||||
TROVE_AGENT_CALL_HIGH_TIMEOUT=${TROVE_AGENT_CALL_HIGH_TIMEOUT:-1200}
|
||||
TROVE_RESIZE_TIME_OUT=${TROVE_RESIZE_TIME_OUT}
|
||||
TROVE_USAGE_TIMEOUT=${TROVE_USAGE_TIMEOUT:-900}
|
||||
TROVE_STATE_CHANGE_WAIT_TIME=${TROVE_STATE_CHANGE_WAIT_TIME}
|
||||
TROVE_USAGE_TIMEOUT=${TROVE_USAGE_TIMEOUT:-1800}
|
||||
TROVE_STATE_CHANGE_WAIT_TIME=${TROVE_STATE_CHANGE_WAIT_TIME:-600}
|
||||
TROVE_COMMAND_PROCESS_TIMEOUT=${TROVE_COMMAND_PROCESS_TIMEOUT:-60}
|
||||
|
||||
# Set up the host gateway
|
||||
@ -90,4 +84,4 @@ CELLSV2_SETUP=singleconductor
|
||||
|
||||
# Enable or disable the Trove guest image build during devstack installation.
|
||||
TROVE_ENABLE_IMAGE_BUILD=${TROVE_ENABLE_IMAGE_BUILD:-"true"}
|
||||
TROVE_NON_DEV_IMAGE_URL_MYSQL=${TROVE_NON_DEV_IMAGE_URL_MYSQL:-""}
|
||||
TROVE_NON_DEV_IMAGE_URL=${TROVE_NON_DEV_IMAGE_URL:-""}
|
||||
|
@ -26,54 +26,42 @@ stored in Glance. This document shows you the steps to build the guest images.
|
||||
periodically built and published in
|
||||
http://tarballs.openstack.org/trove/images/ in Trove upstream CI.
|
||||
|
||||
Additionally, if you install Trove in devstack environment, a MySQL image
|
||||
Additionally, if you install Trove in devstack environment, the guest image
|
||||
is created and registered in Glance automatically, unless it's disabled by
|
||||
setting ``TROVE_ENABLE_IMAGE_BUILD=false`` in devstack local.conf file.
|
||||
|
||||
High Level Overview of a Trove Guest Instance
|
||||
=============================================
|
||||
|
||||
At the most basic level, a Trove Guest Instance is a Nova instance
|
||||
launched by Trove in response to a create command. For most of this
|
||||
document, we will confine ourselves to single instance databases; in
|
||||
other words, without the additional complexity of replication or
|
||||
mirroring. Guest instances and Guest images for replicated and
|
||||
mirrored database instances will be addressed specifically in later
|
||||
sections of this document.
|
||||
At the most basic level, a Trove Guest Instance is a Nova instance launched by
|
||||
Trove in response to a create command. This section describes the various
|
||||
components of a Trove Guest Instance.
|
||||
|
||||
This section describes the various components of a Trove Guest
|
||||
Instance.
|
||||
----------------
|
||||
Operating System
|
||||
----------------
|
||||
|
||||
-----------------------------
|
||||
Operating System and Database
|
||||
-----------------------------
|
||||
The officially supported operating system is Ubuntu, based on which the
|
||||
functional tests are running.
|
||||
|
||||
A Trove Guest Instance contains at least a functioning Operating
|
||||
System and the database software that the instance wishes to provide
|
||||
(as a Service). For example, if your chosen operating system is Ubuntu
|
||||
and you wish to deliver MySQL version 5.7, then your guest instance is
|
||||
a Nova instance running the Ubuntu operating system and will have
|
||||
MySQL version 5.7 installed on it.
|
||||
------
|
||||
Docker
|
||||
------
|
||||
|
||||
Since Vitoria release, all the datastore services are installed by docker
|
||||
container inside the Trove instance, so docker should be installed when
|
||||
building the guest image.
|
||||
|
||||
-----------------
|
||||
Trove Guest Agent
|
||||
-----------------
|
||||
|
||||
Trove supports multiple databases, some of them are relational (RDBMS)
|
||||
and some are non-relational (NoSQL). In order to provide a common
|
||||
management interface to all of these, the Trove Guest Instance has on
|
||||
it a 'Guest Agent'. The Trove Guest Agent is a component of the
|
||||
Trove system that is specific to the database running on that Guest
|
||||
Instance.
|
||||
The guest agent runs inside the Nova instances that are used to run the
|
||||
database engines. The agent listens to the messaging bus for the topic and is
|
||||
responsible for actually translating and executing the commands that are sent
|
||||
to it by the task manager component for the particular datastore.
|
||||
|
||||
The purpose of the Trove Guest Agent is to implement the Trove Guest
|
||||
Agent API for the specific database. This includes such things as the
|
||||
implementation of the database 'start' and 'stop' commands. The Trove
|
||||
Guest Agent API is the common API used by Trove to communicate with
|
||||
any guest database, and the Guest Agent is the implementation of that
|
||||
API for the specific database.
|
||||
|
||||
The Trove Guest Agent runs inside the Trove Guest Instance.
|
||||
Trove guest agent is responsible for datastore docker container management.
|
||||
|
||||
------------------------------------------
|
||||
Injected Configuration for the Guest Agent
|
||||
@ -104,44 +92,45 @@ services(e.g. the message queue).
|
||||
Building Guest Images
|
||||
=====================
|
||||
|
||||
Since Victoria release, a single trove guest image can be used for different
|
||||
datastores, it's unnecessary to maintain different images for differnt
|
||||
datastores.
|
||||
|
||||
-----------------------------
|
||||
Build images using trovestack
|
||||
-----------------------------
|
||||
|
||||
``trovestack`` is the recommended tooling provided by Trove community to build
|
||||
the guest images. Before running ``trovestack`` command, go to the scripts
|
||||
folder:
|
||||
the guest images. Before running ``trovestack`` command:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
git clone https://opendev.org/openstack/trove
|
||||
cd trove/integration/scripts
|
||||
|
||||
The trove guest agent image could be created by running the following command:
|
||||
The trove guest image could be created by running the following command:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ ./trovestack build-image \
|
||||
${datastore_type} \
|
||||
${guest_os} \
|
||||
${guest_os_release} \
|
||||
${dev_mode} \
|
||||
${guest_username} \
|
||||
${imagepath}
|
||||
${output_image_path}
|
||||
|
||||
* Currently, only ``guest_os=ubuntu`` and ``guest_os_release=xenial`` are fully
|
||||
* Currently, only ``guest_os=ubuntu`` and ``guest_os_release=bionic`` are fully
|
||||
tested and supported.
|
||||
|
||||
* Default input values:
|
||||
|
||||
.. code-block:: ini
|
||||
|
||||
datastore_type=mysql
|
||||
guest_os=ubuntu
|
||||
guest_os_release=xenial
|
||||
guest_os_release=bionic
|
||||
dev_mode=true
|
||||
guest_username=ubuntu
|
||||
imagepath=$HOME/images/trove-${guest_os}-${guest_os_release}-${datastore_type}
|
||||
output_image_path=$HOME/images/trove-guest--${guest_os}-${guest_os_release}-dev
|
||||
|
||||
* ``dev_mode=true`` is mainly for testing purpose for trove developers and it's
|
||||
necessary to build the image on the trove controller host, because the host
|
||||
@ -159,31 +148,27 @@ The trove guest agent image could be created by running the following command:
|
||||
* ``HOST_SCP_USERNAME``: Only used in dev mode, this is the user name used by
|
||||
guest agent to connect to the controller host, e.g. in devstack
|
||||
environment, it should be the ``stack`` user.
|
||||
* ``GUEST_WORKING_DIR``: The place to save the guest image, default value is
|
||||
``$HOME/images``.
|
||||
* ``TROVE_BRANCH``: Only used in dev mode. The branch name of Trove code
|
||||
repository, by default it's master, use other branches as needed such as
|
||||
stable/train.
|
||||
|
||||
For example, in order to build a MySQL image for Ubuntu Xenial operating
|
||||
For example, in order to build a guest image for Ubuntu Bionic operating
|
||||
system in development mode:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ ./trovestack build-image mysql ubuntu xenial true
|
||||
$ ./trovestack build-image ubuntu bionic true ubuntu
|
||||
|
||||
Once the image build is finished, the cloud administrator needs to register the
|
||||
image in Glance and register a new datastore or version in Trove using
|
||||
``trove-manage`` command, e.g. after building an image for MySQL 5.7.1:
|
||||
``trove-manage`` command, e.g. after building an image for MySQL 5.7.29:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ openstack image create ubuntu-mysql-5.7.1-dev \
|
||||
--public \
|
||||
$ openstack image create trove-guest-ubuntu-bionic \
|
||||
--private \
|
||||
--disk-format qcow2 \
|
||||
--container-format bare \
|
||||
--file ~/images/ubuntu-xenial-mysql.qcow2
|
||||
$ trove-manage datastore_version_update mysql 5.7.1 mysql $image_id "" 1
|
||||
--file ~/images/trove-guest-ubuntu-bionic-dev.qcow2
|
||||
$ trove-manage datastore_version_update mysql 5.7.29 mysql $image_id "" 1
|
||||
$ trove-manage db_load_datastore_config_parameters mysql 5.7.29 ${trove_repo_dir}/trove/templates/mysql/validation-rules.json
|
||||
|
||||
If you see anything error or need help for the image creation, please ask help
|
||||
either in ``#openstack-trove`` IRC channel or sending emails to
|
||||
|
@ -322,7 +322,7 @@ Command examples:
|
||||
# Use 5.7.29 as the default datastore version for 'mysql'
|
||||
trove-manage datastore_update mysql 5.7.29
|
||||
# Register configuration parameters for 5.7.29 version of datastore 'mysql'
|
||||
trove-manage db_load_datastore_config_parameters mysql 5.7.29 $workdir/trove/trove/templates/mysql/validation-rules.json
|
||||
trove-manage db_load_datastore_config_parameters mysql 5.7.29 ${trove_repo_dir}}/trove/templates/mysql/validation-rules.json
|
||||
|
||||
|
||||
Quota Management
|
||||
|
@ -1,3 +0,0 @@
|
||||
These cloudinit scripts will used as userdata on instance create
|
||||
File names should match pattern: service_type.cloudinit
|
||||
For example: mysql.cloudinit
|
@ -1,4 +0,0 @@
|
||||
These conf files are read and used by the guest to provide extra
|
||||
information to the guest. The first example of this is the
|
||||
guest_info.conf which will have the uuid of the instance so that
|
||||
the guest can report back things to the infra.
|
@ -1 +0,0 @@
|
||||
# Guest-specific information injected by the taskmanager
|
@ -1,166 +0,0 @@
|
||||
[DEFAULT]
|
||||
|
||||
|
||||
#=========== RPC Configuration ======================
|
||||
|
||||
# URL representing the messaging driver to use and its full configuration.
|
||||
# If not set, we fall back to the 'rpc_backend' option and driver specific
|
||||
# configuration.
|
||||
#transport_url=<None>
|
||||
|
||||
# The messaging driver to use. Options include rabbit, qpid and zmq.
|
||||
# Default is rabbit. (string value)
|
||||
#rpc_backend=rabbit
|
||||
|
||||
# The default exchange under which topics are scoped. May be
|
||||
# overridden by an exchange name specified in the 'transport_url option.
|
||||
control_exchange = trove
|
||||
|
||||
# ========== Configuration options for Swift ==========
|
||||
|
||||
# The swift_url can be specified directly or fetched from Keystone catalog.
|
||||
|
||||
# To fetch from Keystone, comment out swift_url, and uncomment the others.
|
||||
# swift_url = http://10.0.0.1:8080/v1/AUTH_
|
||||
# Region name of this node. Default value is None.
|
||||
# os_region_name = RegionOne
|
||||
# Service type to use when searching catalog.
|
||||
# swift_service_type = object-store
|
||||
|
||||
|
||||
# ========== Datastore Manager Configurations ==========
|
||||
|
||||
# Datastore manager implementations.
|
||||
# Format: list of 'datastore-type:datastore.manager.implementation.module'
|
||||
# datastore_registry_ext = mysql:trove.guestagent.datastore.mysql.manager.Manager, percona:trove.guestagent.datastore.mysql.manager.Manager
|
||||
|
||||
|
||||
# ========== Default Users / DBs Configuration ==========
|
||||
|
||||
# Permissions to grant "root" user by default
|
||||
root_grant = ALL
|
||||
root_grant_option = True
|
||||
# root_grant = ALTER ROUTINE, CREATE, ALTER, CREATE ROUTINE, CREATE TEMPORARY TABLES, CREATE VIEW, CREATE USER, DELETE, DROP, EVENT, EXECUTE, INDEX, INSERT, LOCK TABLES, PROCESS, REFERENCES, SELECT, SHOW DATABASES, SHOW VIEW, TRIGGER, UPDATE, USAGE
|
||||
# root_grant_option = False
|
||||
|
||||
# Default password Length for root password
|
||||
# default_password_length = 36
|
||||
|
||||
|
||||
# ========== Default Storage Options for backup ==========
|
||||
|
||||
# Default configuration for storage strategy and storage options
|
||||
# for backups
|
||||
|
||||
# For storage to Swift, use the following as defaults:
|
||||
# storage_strategy = SwiftStorage
|
||||
# storage_namespace = trove.common.strategies.storage.swift
|
||||
|
||||
# Default config options for storing backups to swift
|
||||
# backup_swift_container = database_backups
|
||||
# backup_use_gzip_compression = True
|
||||
# backup_use_openssl_encryption = True
|
||||
# backup_aes_cbc_key = "default_aes_cbc_key"
|
||||
# backup_use_snet = False
|
||||
# backup_chunk_size = 65536
|
||||
# backup_segment_max_size = 2147483648
|
||||
|
||||
|
||||
# ========== Sample Logging Configuration ==========
|
||||
|
||||
# Show debugging output in logs (sets DEBUG log level output)
|
||||
# debug = True
|
||||
|
||||
# Directory and path for log files
|
||||
log_dir = /var/log/trove/
|
||||
log_file = logfile.txt
|
||||
log_config_append = /etc/trove/trove-logging-guestagent.conf
|
||||
|
||||
[profiler]
|
||||
# If False fully disable profiling feature.
|
||||
#enabled = False
|
||||
# If False doesn't trace SQL requests.
|
||||
#trace_sqlalchemy = True
|
||||
|
||||
[oslo_messaging_notifications]
|
||||
|
||||
#
|
||||
# From oslo.messaging
|
||||
#
|
||||
|
||||
# The Driver(s) to handle sending notifications. Possible
|
||||
# values are messaging, messagingv2, routing, log, test, noop
|
||||
# (multi valued)
|
||||
# Deprecated group/name - [DEFAULT]/notification_driver
|
||||
#driver =
|
||||
|
||||
# A URL representing the messaging driver to use for
|
||||
# notifications. If not set, we fall back to the same
|
||||
# configuration used for RPC. (string value)
|
||||
# Deprecated group/name - [DEFAULT]/notification_transport_url
|
||||
#transport_url = <None>
|
||||
|
||||
# AMQP topic used for OpenStack notifications. (list value)
|
||||
# Deprecated group/name - [rpc_notifier2]/topics
|
||||
# Deprecated group/name - [DEFAULT]/notification_topics
|
||||
#topics = notifications
|
||||
|
||||
# The maximum number of attempts to re-send a notification
|
||||
# message which failed to be delivered due to a recoverable
|
||||
# error. 0 - No retry, -1 - indefinite (integer value)
|
||||
#retry = -1
|
||||
|
||||
# ========== Datastore Specific Configuration Options ==========
|
||||
|
||||
[mysql]
|
||||
# For mysql, the following are the defaults for backup, and restore:
|
||||
# backup_strategy = InnoBackupEx
|
||||
# backup_namespace = trove.guestagent.strategies.backup.mysql_impl
|
||||
# restore_namespace = trove.guestagent.strategies.restore.mysql_impl
|
||||
# Default configuration for mysql replication
|
||||
# replication_strategy = MysqlBinlogReplication
|
||||
# replication_namespace = trove.guestagent.strategies.replication.mysql_binlog
|
||||
# replication_user = slave_user
|
||||
# replication_password = slave_password
|
||||
|
||||
# Users to ignore for user create/list/delete operations
|
||||
# ignore_users = os_admin
|
||||
|
||||
# Databases to ignore for db create/list/delete operations
|
||||
# ignore_dbs = mysql, information_schema, performance_schema
|
||||
|
||||
[vertica]
|
||||
# For vertica, following are the defaults needed:
|
||||
# mount_point = /var/lib/vertica
|
||||
# readahead_size = 2048
|
||||
# guestagent_strategy = trove.common.strategies.cluster.experimental.vertica.guestagent.VerticaGuestAgentStrategy
|
||||
|
||||
[redis]
|
||||
# For redis, the following are the defaults for backup, and restore:
|
||||
# backup_strategy = RedisBackup
|
||||
# backup_namespace = trove.guestagent.strategies.backup.experimental.redis_impl
|
||||
# restore_namespace = trove.guestagent.strategies.restore.experimental.redis_impl
|
||||
|
||||
[percona]
|
||||
backup_namespace = trove.guestagent.strategies.backup.mysql_impl
|
||||
restore_namespace = trove.guestagent.strategies.restore.mysql_impl
|
||||
|
||||
[couchbase]
|
||||
backup_namespace = trove.guestagent.strategies.backup.experimental.couchbase_impl
|
||||
restore_namespace = trove.guestagent.strategies.restore.experimental.couchbase_impl
|
||||
|
||||
[cassandra]
|
||||
backup_namespace = trove.guestagent.strategies.backup.experimental.cassandra_impl
|
||||
restore_namespace = trove.guestagent.strategies.restore.experimental.cassandra_impl
|
||||
|
||||
[db2]
|
||||
# For db2, the following are the defaults for backup, and restore:
|
||||
# backup_strategy = DB2OfflineBackup
|
||||
# backup_namespace = trove.guestagent.strategies.backup.experimental.db2_impl
|
||||
# restore_namespace = trove.guestagent.strategies.restore.experimental.db2_impl
|
||||
|
||||
[couchdb]
|
||||
#For CouchDB, the following are the defaults for backup and restore:
|
||||
# backup_strategy = CouchDBBackup
|
||||
# backup_namespace = trove.guestagent.strategies.backup.experimental.couchdb_impl
|
||||
# restore_namespace = trove.guestagent.strategies.restore.experimental.couchdb_impl
|
@ -1,19 +0,0 @@
|
||||
---
|
||||
version: '2.0'
|
||||
|
||||
name: trove
|
||||
|
||||
description: Trove Workflows
|
||||
|
||||
workflows:
|
||||
|
||||
backup_create:
|
||||
input: [instance, name, description, incremental]
|
||||
output:
|
||||
status: <% $.message %>
|
||||
|
||||
tasks:
|
||||
backup_create:
|
||||
action: trove.backups_create instance=<% $.instance %> name=<% $.name %> description=<% $.description %> incremental=<% $.incremental %>
|
||||
publish:
|
||||
message: <% 'Backup complete' %>
|
@ -1,311 +0,0 @@
|
||||
[DEFAULT]
|
||||
# Show debugging output in logs (sets DEBUG log level output)
|
||||
debug = True
|
||||
|
||||
# Address to bind the API server
|
||||
bind_host = 0.0.0.0
|
||||
|
||||
# Port the bind the API server to
|
||||
bind_port = 8779
|
||||
|
||||
# Number of workers for the API service. The default will
|
||||
# be the number of CPUs available. (integer value)
|
||||
#trove_api_workers=None
|
||||
|
||||
#===================== RPC Configuration =================================
|
||||
|
||||
# URL representing the messaging driver to use and its full configuration.
|
||||
# If not set, we fall back to the 'rpc_backend' option and driver specific
|
||||
# configuration.
|
||||
#transport_url=<None>
|
||||
|
||||
# The messaging driver to use. Options include rabbit, qpid and zmq.
|
||||
# Default is rabbit. (string value)
|
||||
#rpc_backend=rabbit
|
||||
|
||||
# The default exchange under which topics are scoped. May be
|
||||
# overridden by an exchange name specified in the 'transport_url option.
|
||||
control_exchange = trove
|
||||
|
||||
# Maximum line size of message headers to be accepted.
|
||||
# max_header_line may need to be increased when using large tokens
|
||||
# (typically those generated by the Keystone v3 API with big service
|
||||
# catalogs)
|
||||
# max_header_line = 16384
|
||||
|
||||
#DB Api Implementation
|
||||
db_api_implementation = "trove.db.sqlalchemy.api"
|
||||
|
||||
# Configuration options for talking to nova via the novaclient.
|
||||
trove_auth_url = http://0.0.0.0/identity/v2.0
|
||||
#nova_compute_url = http://localhost:8774/v2
|
||||
#cinder_url = http://localhost:8776/v1
|
||||
#swift_url = http://localhost:8080/v1/AUTH_
|
||||
#neutron_url = http://localhost:9696/
|
||||
|
||||
# nova_compute_url, cinder_url, swift_url, and can all be fetched
|
||||
# from Keystone. To fetch from Keystone, comment out nova_compute_url,
|
||||
# cinder_url, swift_url, and and optionally uncomment the lines below.
|
||||
|
||||
# Region name of this node. Used when searching catalog. Default value is None.
|
||||
#os_region_name = RegionOne
|
||||
# Service type to use when searching catalog.
|
||||
#nova_compute_service_type = compute
|
||||
# Service type to use when searching catalog.
|
||||
#cinder_service_type = volumev2
|
||||
# Service type to use when searching catalog.
|
||||
#swift_service_type = object-store
|
||||
# Service type to use when searching catalog.
|
||||
#neutron_service_type = network
|
||||
|
||||
#ip_regex = ^(15.|123.)
|
||||
#black_list_regex = ^10.0.0.
|
||||
|
||||
# Config options for enabling volume service
|
||||
trove_volume_support = True
|
||||
block_device_mapping = vdb
|
||||
device_path = /dev/vdb
|
||||
# Maximum volume size for an instance
|
||||
max_accepted_volume_size = 10
|
||||
max_instances_per_tenant = 5
|
||||
# Maximum volume capacity (in GB) spanning across all trove volumes per tenant
|
||||
max_volumes_per_tenant = 100
|
||||
max_backups_per_tenant = 5
|
||||
volume_time_out=30
|
||||
|
||||
# Config options for rate limits
|
||||
http_get_rate = 200
|
||||
http_post_rate = 200
|
||||
http_put_rate = 200
|
||||
http_delete_rate = 200
|
||||
http_mgmt_post_rate = 200
|
||||
|
||||
# Trove DNS
|
||||
trove_dns_support = False
|
||||
dns_account_id = 123456
|
||||
dns_auth_url = http://127.0.0.1/identity/v2.0
|
||||
dns_username = user
|
||||
dns_passkey = password
|
||||
dns_ttl = 3600
|
||||
dns_domain_name = 'trove.com.'
|
||||
dns_domain_id = 11111111-1111-1111-1111-111111111111
|
||||
dns_driver = trove.dns.designate.driver.DesignateDriver
|
||||
dns_instance_entry_factory = trove.dns.designate.driver.DesignateInstanceEntryFactory
|
||||
dns_endpoint_url = http://127.0.0.1/v1/
|
||||
dns_service_type = dns
|
||||
|
||||
# Neutron
|
||||
network_driver = trove.network.nova.NovaNetwork
|
||||
management_networks =
|
||||
|
||||
|
||||
# Taskmanager queue name
|
||||
taskmanager_queue = taskmanager
|
||||
|
||||
# Auth
|
||||
admin_roles = admin
|
||||
|
||||
# Guest related conf
|
||||
agent_heartbeat_time = 10
|
||||
agent_call_low_timeout = 5
|
||||
agent_call_high_timeout = 150
|
||||
|
||||
# Reboot time out for instances
|
||||
reboot_time_out = 60
|
||||
|
||||
# Trove api-paste file name
|
||||
api_paste_config = api-paste.ini
|
||||
|
||||
|
||||
# ============ Notification System configuration ===========================
|
||||
|
||||
# Sets the notification driver used by oslo.messaging. Options include
|
||||
# messaging, messagingv2, log and routing. Default is 'noop'
|
||||
# notification_driver=noop
|
||||
|
||||
# Topics used for OpenStack notifications, list value. Default is 'notifications'.
|
||||
# notification_topics=notifications
|
||||
|
||||
# ============ Logging information =============================
|
||||
#log_dir = /integration/report
|
||||
#log_file = trove-api.log
|
||||
|
||||
|
||||
[database]
|
||||
|
||||
# SQLAlchemy connection string for the reference implementation
|
||||
# registry server. Any valid SQLAlchemy connection string is fine.
|
||||
# See: http://www.sqlalchemy.org/docs/05/reference/sqlalchemy/connections.html#sqlalchemy.create_engine
|
||||
# connection = sqlite:///trove_test.sqlite
|
||||
connection = mysql+pymysql://root:e1a2c042c828d3566d0a@localhost/trove
|
||||
#connection = postgresql://trove:trove@localhost/trove
|
||||
|
||||
# Period in seconds after which SQLAlchemy should reestablish its connection
|
||||
# to the database.
|
||||
#
|
||||
# MySQL uses a default `wait_timeout` of 8 hours, after which it will drop
|
||||
# idle connections. This can result in 'MySQL Gone Away' exceptions. If you
|
||||
# notice this, you can lower this value to ensure that SQLAlchemy reconnects
|
||||
# before MySQL can drop the connection.
|
||||
idle_timeout = 3600
|
||||
|
||||
|
||||
|
||||
# ============ SSL configuration (and enablement) =============================
|
||||
# In order to enable SSL for the trove api server, uncomment
|
||||
# the cert_file and key_file - and of course have those files
|
||||
# accessible. The existence of those setting and files will
|
||||
# enable SSL.
|
||||
|
||||
[profiler]
|
||||
# If False fully disable profiling feature.
|
||||
#enabled = False
|
||||
# If False doesn't trace SQL requests.
|
||||
#trace_sqlalchemy = True
|
||||
|
||||
[ssl]
|
||||
|
||||
#cert_file = /path/to/server.crt
|
||||
#key_file = /path/to/server.key
|
||||
#optional:
|
||||
#ca_file = /path/to/ca_file
|
||||
|
||||
[oslo_messaging_notifications]
|
||||
|
||||
#
|
||||
# From oslo.messaging
|
||||
#
|
||||
|
||||
# The Driver(s) to handle sending notifications. Possible
|
||||
# values are messaging, messagingv2, routing, log, test, noop
|
||||
# (multi valued)
|
||||
# Deprecated group/name - [DEFAULT]/notification_driver
|
||||
#driver =
|
||||
|
||||
# A URL representing the messaging driver to use for
|
||||
# notifications. If not set, we fall back to the same
|
||||
# configuration used for RPC. (string value)
|
||||
# Deprecated group/name - [DEFAULT]/notification_transport_url
|
||||
#transport_url = <None>
|
||||
|
||||
# AMQP topic used for OpenStack notifications. (list value)
|
||||
# Deprecated group/name - [rpc_notifier2]/topics
|
||||
# Deprecated group/name - [DEFAULT]/notification_topics
|
||||
#topics = notifications
|
||||
|
||||
# The maximum number of attempts to re-send a notification
|
||||
# message which failed to be delivered due to a recoverable
|
||||
# error. 0 - No retry, -1 - indefinite (integer value)
|
||||
#retry = -1
|
||||
|
||||
[mysql]
|
||||
root_on_create = False
|
||||
# Format (single port or port range): A, B-C
|
||||
# where C greater than B
|
||||
tcp_ports = 3306
|
||||
volume_support = True
|
||||
device_path = /dev/vdb
|
||||
|
||||
# Users to ignore for user create/list/delete operations
|
||||
ignore_users = os_admin, root
|
||||
ignore_dbs = mysql, information_schema, performance_schema
|
||||
|
||||
|
||||
[redis]
|
||||
tcp_ports = 6379, 16379
|
||||
volume_support = True
|
||||
device_path = /dev/vdb
|
||||
|
||||
[cassandra]
|
||||
tcp_ports = 7000, 7001, 9042, 9160
|
||||
volume_support = True
|
||||
device_path = /dev/vdb
|
||||
|
||||
[couchbase]
|
||||
tcp_ports = 8091, 8092, 4369, 11209-11211, 21100-21199
|
||||
volume_support = True
|
||||
device_path = /dev/vdb
|
||||
|
||||
[mongodb]
|
||||
tcp_ports = 2500, 27017, 27019
|
||||
volume_support = True
|
||||
device_path = /dev/vdb
|
||||
num_config_servers_per_cluster = 1
|
||||
num_query_routers_per_cluster = 1
|
||||
|
||||
[vertica]
|
||||
tcp_ports = 5433, 5434, 22, 5444, 5450, 4803
|
||||
udp_ports = 5433, 4803, 4804, 6453
|
||||
volume_support = True
|
||||
device_path = /dev/vdb
|
||||
cluster_support = True
|
||||
cluster_member_count = 3
|
||||
api_strategy = trove.common.strategies.cluster.experimental.vertica.api.VerticaAPIStrategy
|
||||
|
||||
|
||||
# ============ CORS configuration =============================
|
||||
|
||||
[cors]
|
||||
|
||||
#
|
||||
# From oslo.middleware.cors
|
||||
#
|
||||
|
||||
# Indicate whether this resource may be shared with the domain received in the
|
||||
# requests "origin" header. (list value)
|
||||
#allowed_origin = <None>
|
||||
|
||||
# Indicate that the actual request can include user credentials (boolean value)
|
||||
#allow_credentials = true
|
||||
|
||||
# Indicate which headers are safe to expose to the API. Defaults to HTTP Simple
|
||||
# Headers. (list value)
|
||||
#expose_headers = X-Auth-Token, X-Subject-Token, X-Service-Token, X-OpenStack-Request-ID
|
||||
|
||||
# Maximum cache age of CORS preflight requests. (integer value)
|
||||
#max_age = 3600
|
||||
|
||||
# Indicate which methods can be used during the actual request. (list value)
|
||||
#allow_methods = GET,PUT,POST,DELETE,PATCH
|
||||
|
||||
# Indicate which header field names may be used during the actual request.
|
||||
# (list value)
|
||||
#allow_headers = X-Auth-Token, X-Identity-Status, X-Roles, X-Service-Catalog, X-User-Id, X-Tenant-Id, X-OpenStack-Request-ID
|
||||
|
||||
|
||||
[cors.subdomain]
|
||||
|
||||
#
|
||||
# From oslo.middleware.cors
|
||||
#
|
||||
|
||||
# Indicate whether this resource may be shared with the domain received in the
|
||||
# requests "origin" header. (list value)
|
||||
#allowed_origin = <None>
|
||||
|
||||
# Indicate that the actual request can include user credentials (boolean value)
|
||||
#allow_credentials = true
|
||||
|
||||
# Indicate which headers are safe to expose to the API. Defaults to HTTP Simple
|
||||
# Headers. (list value)
|
||||
#expose_headers = X-Auth-Token, X-Subject-Token, X-Service-Token, X-OpenStack-Request-ID
|
||||
|
||||
# Maximum cache age of CORS preflight requests. (integer value)
|
||||
#max_age = 3600
|
||||
|
||||
# Indicate which methods can be used during the actual request. (list value)
|
||||
#allow_methods = GET,PUT,POST,DELETE,PATCH
|
||||
|
||||
# Indicate which header field names may be used during the actual request.
|
||||
# (list value)
|
||||
#allow_headers = X-Auth-Token, X-Identity-Status, X-Roles, X-Service-Catalog, X-User-Id, X-Tenant-Id, X-OpenStack-Request-ID
|
||||
|
||||
[oslo_middleware]
|
||||
|
||||
#
|
||||
# From oslo.middleware
|
||||
#
|
||||
|
||||
# Whether the application is behind a proxy or not. This determines if the
|
||||
# middleware should parse the headers or not. (boolean value)
|
||||
#enable_proxy_headers_parsing = false
|
@ -1,12 +0,0 @@
|
||||
{
|
||||
"devstack":null,
|
||||
"glance":null,
|
||||
"horizon":null,
|
||||
"keystone":null,
|
||||
"nova":null,
|
||||
"python_openstackclient":null,
|
||||
"python_novaclient":null,
|
||||
"trove":null,
|
||||
"python_troveclient":null,
|
||||
"tempest":null
|
||||
}
|
@ -1,16 +0,0 @@
|
||||
============
|
||||
apt-conf-dir
|
||||
============
|
||||
|
||||
This element overrides the default apt.conf.d directory for APT based systems.
|
||||
|
||||
Environment Variables
|
||||
---------------------
|
||||
|
||||
DIB_APT_CONF_DIR:
|
||||
:Required: No
|
||||
:Default: None
|
||||
:Description: To override `DIB_APT_CONF_DIR`, set it to the path to your
|
||||
apt.conf.d. The new apt.conf.d will take effect at build time
|
||||
and run time.
|
||||
:Example: ``DIB_APT_CONF_DIR=/etc/apt/apt.conf``
|
@ -1,21 +0,0 @@
|
||||
#!/bin/bash
|
||||
# Override the default /etc/apt/apt.conf.d directory with $DIB_APT_CONF_DIR
|
||||
|
||||
if [ ${DIB_DEBUG_TRACE:-0} -gt 0 ]; then
|
||||
set -x
|
||||
fi
|
||||
set -eu
|
||||
set -o pipefail
|
||||
|
||||
# exit directly if DIB_APT_CONF_DIR is not defined properly
|
||||
if [ -z "${DIB_APT_CONF_DIR:-}" ] ; then
|
||||
echo "DIB_APT_CONF_DIR is not set - no apt.conf.d will be copied in"
|
||||
exit 0
|
||||
elif [ ! -d "$DIB_APT_CONF_DIR" ] ; then
|
||||
echo "$DIB_APT_CONF_DIR is not a valid apt.conf.d directory."
|
||||
echo "You should assign a proper apt.conf.d directory in DIB_APT_CONF_DIR"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# copy the apt.conf to cloudimg
|
||||
sudo cp -L -f -R $DIB_APT_CONF_DIR $TMP_MOUNT_PATH/etc/apt
|
@ -4,3 +4,4 @@ pkg-map
|
||||
source-repositories
|
||||
svc-map
|
||||
pip-and-virtualenv
|
||||
ubuntu-docker
|
||||
|
@ -1,34 +0,0 @@
|
||||
# sometimes the primary key server is unavailable and we should try an
|
||||
# alternate. see
|
||||
# https://bugs.launchpad.net/percona-server/+bug/907789. Disable
|
||||
# shell errexit so we can interrogate the exit code and take action
|
||||
# based on the exit code. We will reenable it later.
|
||||
#
|
||||
# NOTE(zhaochao): we still have this problem from time to time, so it's
|
||||
# better use more reliable keyservers and just retry on that(for now, 3
|
||||
# tries should be fine).
|
||||
# According to:
|
||||
# [1] https://www.gnupg.org/faq/gnupg-faq.html#new_user_default_keyserver
|
||||
# [2] https://sks-keyservers.net/overview-of-pools.php
|
||||
# we'll just the primary suggested pool: pool.sks-keyservers.net.
|
||||
function get_key_robust() {
|
||||
KEY=$1
|
||||
set +e
|
||||
|
||||
tries=1
|
||||
while [ $tries -le 3 ]; do
|
||||
if [ $tries -eq 3 ]; then
|
||||
set -e
|
||||
fi
|
||||
|
||||
echo "Importing the key, try: $tries"
|
||||
# Behind a firewall should use the port 80 instead of the default port 11371
|
||||
apt-key adv --keyserver hkp://pool.sks-keyservers.net:80 --recv-keys ${KEY} && break
|
||||
|
||||
tries=$((tries+1))
|
||||
done
|
||||
|
||||
set -e
|
||||
}
|
||||
|
||||
export -f get_key_robust
|
@ -0,0 +1,51 @@
|
||||
#!/bin/bash
|
||||
|
||||
if [ ${DIB_DEBUG_TRACE:-0} -gt 0 ]; then
|
||||
set -x
|
||||
fi
|
||||
set -eu
|
||||
set -o pipefail
|
||||
|
||||
DEV_MODE=${DEV_MODE:-"true"}
|
||||
SCRIPTDIR=$(dirname $0)
|
||||
GUEST_USERNAME=${GUEST_USERNAME:-"ubuntu"}
|
||||
GUEST_VENV=/opt/guest-agent-venv
|
||||
|
||||
for folder in "/var/lib/trove" "/etc/trove" "/etc/trove/certs" "/etc/trove/conf.d" "/var/log/trove" "/opt/trove-guestagent"; do
|
||||
mkdir -p ${folder}
|
||||
chown -R ${GUEST_USERNAME}:root ${folder}
|
||||
done
|
||||
|
||||
install -D -g root -o ${GUEST_USERNAME} -m 0644 ${SCRIPTDIR}/guest-agent.logrotate /etc/logrotate.d/guest-agent
|
||||
|
||||
# Create a virtual environment (with dependencies installed) for guest agent service
|
||||
${DIB_PYTHON} -m virtualenv ${GUEST_VENV}
|
||||
${GUEST_VENV}/bin/pip install pip --upgrade
|
||||
${GUEST_VENV}/bin/pip install -U -c /opt/upper-constraints.txt /opt/guest-agent
|
||||
chown -R ${GUEST_USERNAME}:root ${GUEST_VENV}
|
||||
|
||||
if [[ ${DEV_MODE} == "true" ]]; then
|
||||
[[ -n "${HOST_SCP_USERNAME}" ]] || die "HOST_SCP_USERNAME needs to be set to the trovestack host user"
|
||||
[[ -n "${ESCAPED_PATH_TROVE}" ]] || die "ESCAPED_PATH_TROVE needs to be set to the path to the trove directory on the trovestack host"
|
||||
|
||||
sed "s/GUEST_USERNAME/${GUEST_USERNAME}/g;s/HOST_SCP_USERNAME/${HOST_SCP_USERNAME}/g;s/PATH_TROVE/${ESCAPED_PATH_TROVE}/g" ${SCRIPTDIR}/guest-agent-dev.service > /etc/systemd/system/guest-agent.service
|
||||
else
|
||||
# Link the trove-guestagent out to /usr/local/bin where the startup scripts look for
|
||||
ln -s ${GUEST_VENV}/bin/trove-guestagent /usr/local/bin/guest-agent || true
|
||||
|
||||
case "$DIB_INIT_SYSTEM" in
|
||||
systemd)
|
||||
sed "s/GUEST_USERNAME/${GUEST_USERNAME}/g" ${SCRIPTDIR}/guest-agent.service > /etc/systemd/system/guest-agent.service
|
||||
;;
|
||||
upstart)
|
||||
install -D -g root -o ${GUEST_USERNAME} -m 0644 ${SCRIPTDIR}/guest-agent.conf /etc/init/guest-agent.conf
|
||||
;;
|
||||
sysv)
|
||||
install -D -g root -o ${GUEST_USERNAME} -m 0644 ${SCRIPTDIR}/guest-agent.init /etc/init.d/guest-agent.init
|
||||
;;
|
||||
*)
|
||||
echo "Unsupported init system"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
fi
|
@ -1,45 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
if [ ${DIB_DEBUG_TRACE:-0} -gt 0 ]; then
|
||||
set -x
|
||||
fi
|
||||
set -eu
|
||||
set -o pipefail
|
||||
|
||||
SCRIPTDIR=$(dirname $0)
|
||||
GUEST_VENV=/opt/guest-agent-venv
|
||||
GUEST_USERNAME=${GUEST_USERNAME:-"ubuntu"}
|
||||
|
||||
# Create a virtual environment for guest agent
|
||||
${DIB_PYTHON} -m virtualenv ${GUEST_VENV}
|
||||
${GUEST_VENV}/bin/pip install pip --upgrade
|
||||
${GUEST_VENV}/bin/pip install -U -c /opt/upper-constraints.txt /opt/guest-agent
|
||||
chown -R ${GUEST_USERNAME}:root ${GUEST_VENV}
|
||||
|
||||
# Link the trove-guestagent out to /usr/local/bin where the startup scripts look for
|
||||
ln -s ${GUEST_VENV}/bin/trove-guestagent /usr/local/bin/guest-agent || true
|
||||
|
||||
for folder in "/var/lib/trove" "/etc/trove" "/etc/trove/certs" "/etc/trove/conf.d" "/var/log/trove"; do
|
||||
mkdir -p ${folder}
|
||||
chown -R ${GUEST_USERNAME}:root ${folder}
|
||||
done
|
||||
|
||||
install -D -g root -o ${GUEST_USERNAME} -m 0644 ${SCRIPTDIR}/guest-agent.logrotate /etc/logrotate.d/guest-agent
|
||||
|
||||
case "$DIB_INIT_SYSTEM" in
|
||||
systemd)
|
||||
mkdir -p /usr/lib/systemd/system
|
||||
touch /usr/lib/systemd/system/guest-agent.service
|
||||
sed "s/GUEST_USERNAME/${GUEST_USERNAME}/g" ${SCRIPTDIR}/guest-agent.service > /usr/lib/systemd/system/guest-agent.service
|
||||
;;
|
||||
upstart)
|
||||
install -D -g root -o ${GUEST_USERNAME} -m 0644 ${SCRIPTDIR}/guest-agent.conf /etc/init/guest-agent.conf
|
||||
;;
|
||||
sysv)
|
||||
install -D -g root -o ${GUEST_USERNAME} -m 0644 ${SCRIPTDIR}/guest-agent.init /etc/init.d/guest-agent.init
|
||||
;;
|
||||
*)
|
||||
echo "Unsupported init system"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
@ -0,0 +1,31 @@
|
||||
[Unit]
|
||||
Description=OpenStack Trove Guest Agent Service for Development
|
||||
After=syslog.target network.target
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
User=GUEST_USERNAME
|
||||
Group=GUEST_USERNAME
|
||||
|
||||
# This script is only for testing purpose for dev_mode=true, the controller
|
||||
# IP address should be defined in /etc/trove/controller.conf, e.g.
|
||||
# CONTROLLER=192.168.32.151
|
||||
EnvironmentFile=/etc/trove/controller.conf
|
||||
|
||||
# If ~/trove-installed does not exist, copy the trove source from
|
||||
# the user's development environment, then touch the sentinel file
|
||||
ExecStartPre=/bin/bash -c "test -e /home/GUEST_USERNAME/trove-installed || sudo rsync -e 'ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i /home/GUEST_USERNAME/.ssh/id_rsa' -az --exclude='.*' HOST_SCP_USERNAME@$CONTROLLER:PATH_TROVE/ /home/GUEST_USERNAME/trove && touch /home/GUEST_USERNAME/trove-installed"
|
||||
|
||||
ExecStartPre=/bin/bash -c "sudo chown -R GUEST_USERNAME:root /etc/trove /var/log/trove/ /home/GUEST_USERNAME/trove"
|
||||
|
||||
# Take care of the changes in requirements.txt
|
||||
ExecStartPre=/bin/bash -c "sudo /opt/guest-agent-venv/bin/pip install -r /home/GUEST_USERNAME/trove/requirements.txt -c /opt/upper-constraints.txt"
|
||||
|
||||
# Start guest-agent.service in virtual environment
|
||||
ExecStart=/bin/bash -c "/opt/guest-agent-venv/bin/python /home/GUEST_USERNAME/trove/contrib/trove-guestagent --config-dir=/etc/trove/conf.d"
|
||||
|
||||
TimeoutSec=300
|
||||
Restart=on-failure
|
@ -1,15 +1,16 @@
|
||||
[Unit]
|
||||
Description=OpenStack Trove Guest Agent
|
||||
Description=OpenStack Trove Guest Agent Service
|
||||
After=network.target syslog.service
|
||||
Wants=syslog.service
|
||||
|
||||
[Service]
|
||||
User=GUEST_USERNAME
|
||||
Group=GUEST_USERNAME
|
||||
ExecStartPre=/bin/bash -c "sudo chown -R GUEST_USERNAME:root /etc/trove/conf.d"
|
||||
ExecStart=/usr/local/bin/guest-agent --config-dir=/etc/trove/conf.d
|
||||
KillMode=mixed
|
||||
Restart=always
|
||||
|
||||
ExecStartPre=/bin/bash -c "sudo chown -R GUEST_USERNAME:root /etc/trove /var/log/trove/"
|
||||
ExecStart=/usr/local/bin/guest-agent --config-dir=/etc/trove/conf.d
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
|
@ -1,53 +1,15 @@
|
||||
guest-agent:
|
||||
installtype: package
|
||||
build-essential:
|
||||
installtype: source
|
||||
python3-all:
|
||||
python3-all-dev:
|
||||
python3-pip:
|
||||
python3-sqlalchemy:
|
||||
libxml2-dev:
|
||||
libxslt1-dev:
|
||||
libffi-dev:
|
||||
installtype: source
|
||||
libssl-dev:
|
||||
installtype: source
|
||||
python-dev:
|
||||
installtype: source
|
||||
|
||||
acl:
|
||||
acpid:
|
||||
apparmor:
|
||||
apparmor-utils:
|
||||
apt-transport-https:
|
||||
at:
|
||||
bash-completion:
|
||||
cloud-guest-utils:
|
||||
cloud-init:
|
||||
cron:
|
||||
curl:
|
||||
dbus:
|
||||
dkms:
|
||||
dmeventd:
|
||||
ethtool:
|
||||
gawk:
|
||||
ifenslave:
|
||||
ifupdown:
|
||||
iptables:
|
||||
iputils-tracepath:
|
||||
irqbalance:
|
||||
isc-dhcp-client:
|
||||
less:
|
||||
logrotate:
|
||||
lsof:
|
||||
net-tools:
|
||||
netbase:
|
||||
netcat-openbsd:
|
||||
open-vm-tools:
|
||||
arch: i386, amd64
|
||||
libyaml-dev:
|
||||
openssh-client:
|
||||
openssh-server:
|
||||
pollinate:
|
||||
psmisc:
|
||||
rsyslog:
|
||||
socat:
|
||||
tcpdump:
|
||||
ubuntu-cloudimage-keyring:
|
||||
ureadahead:
|
||||
uuid-runtime:
|
||||
vim-tiny:
|
||||
vlan:
|
||||
rsync:
|
||||
|
@ -1,8 +0,0 @@
|
||||
This element clears out /etc/resolv.conf and prevents dhclient from populating
|
||||
it with data from DHCP. This means that DNS resolution will not work from the
|
||||
guest. This is OK because all outbound connections from the guest will
|
||||
be based using raw IP addresses.
|
||||
|
||||
In addition we remove dns from the nsswitch.conf hosts setting.
|
||||
|
||||
This means that the guest never waits for DNS timeouts to occur.
|
@ -1,19 +0,0 @@
|
||||
#!/bin/bash
|
||||
echo "" > /etc/resolv.conf
|
||||
echo "" > /etc/resolv.conf.ORIG
|
||||
if [ -d /etc/dhcp/dhclient-enter-hooks.d ]; then
|
||||
# Debian/Ubuntu
|
||||
echo "#!/bin/sh
|
||||
make_resolv_conf() { : ; }" > /etc/dhcp/dhclient-enter-hooks.d/noresolvconf
|
||||
chmod +x /etc/dhcp/dhclient-enter-hooks.d/noresolvconf
|
||||
rm -f /etc/dhcp/dhclient-enter-hooks.d/resolvconf
|
||||
else
|
||||
# RHEL/CentOS/Fedora
|
||||
echo "#!/bin/sh
|
||||
make_resolv_conf() { : ; }" > /etc/dhclient-enter-hooks
|
||||
chmod +x /etc/dhclient-enter-hooks
|
||||
fi
|
||||
|
||||
if [ -e /etc/nsswitch.conf ]; then
|
||||
sed -i -e "/hosts:/ s/dns//g" /etc/nsswitch.conf
|
||||
fi
|
@ -0,0 +1 @@
|
||||
ubuntu-guest
|
19
integration/scripts/files/elements/ubuntu-docker/install.d/21-docker
Executable file
19
integration/scripts/files/elements/ubuntu-docker/install.d/21-docker
Executable file
@ -0,0 +1,19 @@
|
||||
#!/bin/bash
|
||||
|
||||
if [ ${DIB_DEBUG_TRACE:-0} -gt 0 ]; then
|
||||
set -x
|
||||
fi
|
||||
set -eu
|
||||
set -o pipefail
|
||||
|
||||
GUEST_USERNAME=${GUEST_USERNAME:-"ubuntu"}
|
||||
|
||||
echo "Installing docker"
|
||||
export DEBIAN_FRONTEND=noninteractive
|
||||
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add -
|
||||
add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu ${DIB_RELEASE} stable"
|
||||
apt-get update
|
||||
apt-get install -y -qq docker-ce >/dev/null
|
||||
|
||||
echo "Adding ${GUEST_USERNAME} user to docker group"
|
||||
usermod -aG docker ${GUEST_USERNAME}
|
@ -1,34 +0,0 @@
|
||||
# sometimes the primary key server is unavailable and we should try an
|
||||
# alternate. see
|
||||
# https://bugs.launchpad.net/percona-server/+bug/907789. Disable
|
||||
# shell errexit so we can interrogate the exit code and take action
|
||||
# based on the exit code. We will reenable it later.
|
||||
#
|
||||
# NOTE(zhaochao): we still have this problem from time to time, so it's
|
||||
# better use more reliable keyservers and just retry on that(for now, 3
|
||||
# tries should be fine).
|
||||
# According to:
|
||||
# [1] https://www.gnupg.org/faq/gnupg-faq.html#new_user_default_keyserver
|
||||
# [2] https://sks-keyservers.net/overview-of-pools.php
|
||||
# we'll just the primary suggested pool: pool.sks-keyservers.net.
|
||||
function get_key_robust() {
|
||||
KEY=$1
|
||||
set +e
|
||||
|
||||
tries=1
|
||||
while [ $tries -le 3 ]; do
|
||||
if [ $tries -eq 3 ]; then
|
||||
set -e
|
||||
fi
|
||||
|
||||
echo "Importing the key, try: $tries"
|
||||
# Behind a firewall should use the port 80 instead of the default port 11371
|
||||
apt-key adv --keyserver hkp://pool.sks-keyservers.net:80 --recv-keys ${KEY} && break
|
||||
|
||||
tries=$((tries+1))
|
||||
done
|
||||
|
||||
set -e
|
||||
}
|
||||
|
||||
export -f get_key_robust
|
17
integration/scripts/files/elements/ubuntu-guest/extra-data.d/11-ssh-key-dev
Executable file
17
integration/scripts/files/elements/ubuntu-guest/extra-data.d/11-ssh-key-dev
Executable file
@ -0,0 +1,17 @@
|
||||
#!/bin/bash
|
||||
|
||||
if [ ${DIB_DEBUG_TRACE:-0} -gt 0 ]; then
|
||||
set -x
|
||||
fi
|
||||
set -eu
|
||||
set -o pipefail
|
||||
|
||||
source $_LIB/die
|
||||
|
||||
[ -n "$TMP_HOOKS_PATH" ] || die "Temp hook path not set"
|
||||
|
||||
# Guest agent needs to ssh into the controller to download code in dev mode.
|
||||
if [[ ${DEV_MODE} == "true" && -e ${SSH_DIR}/id_rsa ]]; then
|
||||
sudo -Hiu ${HOST_SCP_USERNAME} dd if=${SSH_DIR}/id_rsa of=${TMP_HOOKS_PATH}/id_rsa
|
||||
sudo -Hiu ${HOST_SCP_USERNAME} dd if=${SSH_DIR}/id_rsa.pub of=${TMP_HOOKS_PATH}/id_rsa.pub
|
||||
fi
|
@ -1,28 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
set -o xtrace
|
||||
|
||||
# CONTEXT: HOST prior to IMAGE BUILD as SCRIPT USER
|
||||
# PURPOSE: Setup the requirements file for use by 15-reddwarf-dep
|
||||
|
||||
source $_LIB/die
|
||||
|
||||
TROVE_BRANCH=${TROVE_BRANCH:-'master'}
|
||||
REQUIREMENTS_FILE=${TROVESTACK_SCRIPTS}/../../requirements.txt
|
||||
|
||||
[ -n "$TMP_HOOKS_PATH" ] || die "Temp hook path not set"
|
||||
[ -e ${REQUIREMENTS_FILE} ] || die "Requirements not found"
|
||||
[ -n "$HOST_USERNAME" ] || die "HOST_USERNAME not set"
|
||||
|
||||
sudo -Hiu ${HOST_USERNAME} dd if=${REQUIREMENTS_FILE} of=${TMP_HOOKS_PATH}/requirements.txt
|
||||
|
||||
UC_FILE=upper-constraints.txt
|
||||
UC_DIR=$(pwd)
|
||||
UC_BRANCH=${TROVE_BRANCH##stable/}
|
||||
|
||||
curl -L -o "${UC_DIR}/${UC_FILE}" "https://releases.openstack.org/constraints/upper/${UC_BRANCH}"
|
||||
if [ -f "${UC_DIR}/${UC_FILE}" ]; then
|
||||
sudo -Hiu ${HOST_USERNAME} dd if="${UC_DIR}/${UC_FILE}" of=${TMP_HOOKS_PATH}/${UC_FILE}
|
||||
rm -f "${UC_DIR}/${UC_FILE}"
|
||||
fi
|
@ -1,27 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
set -o xtrace
|
||||
|
||||
# CONTEXT: HOST prior to IMAGE BUILD as SCRIPT USER
|
||||
# PURPOSE: creates the SSH key on the host if it doesn't exist. Then this copies the keys over to a staging area where
|
||||
# they will be duplicated in the guest VM.
|
||||
# This process allows the host to log into the guest but more importantly the guest phones home to get the trove
|
||||
# source
|
||||
|
||||
source $_LIB/die
|
||||
|
||||
[ -n "$TMP_HOOKS_PATH" ] || die "Temp hook path not set"
|
||||
[ -n "${HOST_USERNAME}" ] || die "HOST_USERNAME needs to be set to the user for the current user on the host"
|
||||
|
||||
if [ `whoami` = "root" ]; then
|
||||
die "This should not be run as root"
|
||||
fi
|
||||
|
||||
# Guest agent needs to ssh into the controller to download code in dev mode.
|
||||
if [ -e ${SSH_DIR}/id_rsa ]; then
|
||||
sudo -Hiu ${HOST_USERNAME} dd if=${SSH_DIR}/id_rsa of=${TMP_HOOKS_PATH}/id_rsa
|
||||
sudo -Hiu ${HOST_USERNAME} dd if=${SSH_DIR}/id_rsa.pub of=${TMP_HOOKS_PATH}/id_rsa.pub
|
||||
else
|
||||
die "SSH keys must exist"
|
||||
fi
|
@ -1,10 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# CONTEXT: GUEST during CONSTRUCTION as ROOT
|
||||
# PURPOSE: Install basic services and applications
|
||||
|
||||
set -e
|
||||
set -o xtrace
|
||||
|
||||
export DEBIAN_FRONTEND=noninteractive
|
||||
apt-get --allow-unauthenticated -y install ntp apparmor-utils
|
22
integration/scripts/files/elements/ubuntu-guest/install.d/12-ssh-key-dev
Executable file
22
integration/scripts/files/elements/ubuntu-guest/install.d/12-ssh-key-dev
Executable file
@ -0,0 +1,22 @@
|
||||
#!/bin/bash
|
||||
|
||||
if [ ${DIB_DEBUG_TRACE:-0} -gt 0 ]; then
|
||||
set -x
|
||||
fi
|
||||
set -eu
|
||||
set -o pipefail
|
||||
|
||||
GUEST_SSH_DIR="/home/${GUEST_USERNAME}/.ssh"
|
||||
TMP_HOOKS_DIR="/tmp/in_target.d"
|
||||
|
||||
if [ ! -e ${GUEST_SSH_DIR} ]; then
|
||||
# this method worked more reliable in vmware fusion over doing sudo -Hiu ${GUEST_USERNAME}
|
||||
mkdir ${GUEST_SSH_DIR}
|
||||
chown -R ${GUEST_USERNAME}:${GUEST_USERNAME} ${GUEST_SSH_DIR}
|
||||
fi
|
||||
|
||||
if [[ ${DEV_MODE} == "true" && -e "${TMP_HOOKS_DIR}/id_rsa" ]]; then
|
||||
sudo -Hiu ${GUEST_USERNAME} dd of=${GUEST_SSH_DIR}/id_rsa.pub if=${TMP_HOOKS_DIR}/id_rsa.pub
|
||||
sudo -Hiu ${GUEST_USERNAME} dd of=${GUEST_SSH_DIR}/id_rsa if=${TMP_HOOKS_DIR}/id_rsa
|
||||
sudo -Hiu ${GUEST_USERNAME} chmod 600 ${GUEST_SSH_DIR}/id_rsa
|
||||
fi
|
@ -1,37 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# CONTEXT: GUEST during CONSTRUCTION as ROOT
|
||||
# PURPOSE: Install trove guest python dependencies - see trovestack functions_qemu
|
||||
|
||||
set -e
|
||||
set -o xtrace
|
||||
|
||||
export DEBIAN_FRONTEND=noninteractive
|
||||
apt-get --allow-unauthenticated -y install \
|
||||
libxml2-dev libxslt1-dev libffi-dev libssl-dev libyaml-dev \
|
||||
python3-pip python3-sqlalchemy python3-setuptools
|
||||
|
||||
# Install python 3.7, some python lib (e.g. oslo.concurrency>4.0.0) requries
|
||||
# Python 3.7
|
||||
add-apt-repository --yes ppa:deadsnakes/ppa
|
||||
apt update
|
||||
apt install -y python3.7 python3.7-dev
|
||||
|
||||
update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.7 10
|
||||
python3.5 -m pip install pip==9.0.3
|
||||
python3.5 -m pip install -U wheel setuptools
|
||||
|
||||
TMP_HOOKS_DIR="/tmp/in_target.d"
|
||||
|
||||
UPPER_CONSTRAINTS=
|
||||
if [ -f ${TMP_HOOKS_DIR}/upper-constraints.txt ]; then
|
||||
UPPER_CONSTRAINTS=" -c ${TMP_HOOKS_DIR}/upper-constraints.txt"
|
||||
fi
|
||||
|
||||
python3.7 -m pip install pip==9.0.3
|
||||
python3.7 -m pip install -U wheel setuptools
|
||||
python3.7 -m pip install --upgrade -r ${TMP_HOOKS_DIR}/requirements.txt ${UPPER_CONSTRAINTS}
|
||||
|
||||
echo "diagnostic pip freeze output follows"
|
||||
python3.7 -m pip freeze
|
||||
echo "diagnostic pip freeze output above"
|
@ -1,18 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# CONTEXT: GUEST during CONSTRUCTION as ROOT
|
||||
# PURPOSE: Add the guest image user that will own the trove agent source...if the user does not already exist
|
||||
|
||||
set -e
|
||||
set -o xtrace
|
||||
|
||||
if ! id -u ${GUEST_USERNAME} >/dev/null 2>&1; then
|
||||
echo "Adding ${GUEST_USERNAME} user"
|
||||
useradd -G sudo -m ${GUEST_USERNAME} -s /bin/bash
|
||||
chown ${GUEST_USERNAME}:${GUEST_USERNAME} /home/${GUEST_USERNAME}
|
||||
passwd ${GUEST_USERNAME} <<_EOF_
|
||||
${GUEST_USERNAME}
|
||||
${GUEST_USERNAME}
|
||||
_EOF_
|
||||
fi
|
||||
|
@ -1,25 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# PURPOSE: take "staged" ssh keys (see extra-data.d/62-ssh-key) and put them in the GUEST_USERS home directory
|
||||
# In future, this should be removed and use Nova keypair to inject ssh keys.
|
||||
|
||||
set -e
|
||||
set -o xtrace
|
||||
|
||||
SSH_DIR="/home/${GUEST_USERNAME}/.ssh"
|
||||
TMP_HOOKS_DIR="/tmp/in_target.d"
|
||||
|
||||
if [ ! -e ${SSH_DIR} ]; then
|
||||
# this method worked more reliable in vmware fusion over doing sudo -Hiu ${GUEST_USERNAME}
|
||||
mkdir ${SSH_DIR}
|
||||
chown ${GUEST_USERNAME}:${GUEST_USERNAME} ${SSH_DIR}
|
||||
fi
|
||||
|
||||
if [ -e "${TMP_HOOKS_DIR}/id_rsa" ]; then
|
||||
sudo -Hiu ${GUEST_USERNAME} dd of=${SSH_DIR}/id_rsa.pub if=${TMP_HOOKS_DIR}/id_rsa.pub
|
||||
sudo -Hiu ${GUEST_USERNAME} dd of=${SSH_DIR}/id_rsa if=${TMP_HOOKS_DIR}/id_rsa
|
||||
sudo -Hiu ${GUEST_USERNAME} chmod 600 ${SSH_DIR}/id_rsa
|
||||
else
|
||||
echo "SSH Keys were not staged by host"
|
||||
exit -1
|
||||
fi
|
@ -1,8 +0,0 @@
|
||||
#!/bin/bash
|
||||
# Regenerate host keys now. XXX: Really should be a cloud-init task, should get
|
||||
# that working.
|
||||
|
||||
set -e
|
||||
set -o xtrace
|
||||
|
||||
dpkg-reconfigure openssh-server
|
@ -1,11 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
# CONTEXT: GUEST during CONSTRUCTION as ROOT
|
||||
# PURPOSE: Delete contents of apt cache on guest (saves image disk space)
|
||||
|
||||
set -e
|
||||
set -o xtrace
|
||||
|
||||
apt-get clean
|
||||
|
||||
|
@ -1,7 +0,0 @@
|
||||
#!/bin/bash
|
||||
# Install baseline packages and tools.
|
||||
|
||||
set -e
|
||||
set -o xtrace
|
||||
|
||||
apt-get --allow-unauthenticated install -y language-pack-en python-software-properties software-properties-common
|
@ -0,0 +1,7 @@
|
||||
#!/bin/bash
|
||||
# Install baseline packages and tools.
|
||||
|
||||
set -e
|
||||
set -o xtrace
|
||||
|
||||
DEBIAN_FRONTEND=noninteractive apt-get --allow-unauthenticated install -y -qq software-properties-common apt-transport-https ca-certificates ntp >/dev/null
|
@ -1,3 +0,0 @@
|
||||
Sets up a MariaDB server install in the image.
|
||||
|
||||
TODO: auto-tune settings based on host resources or metadata service.
|
@ -1,11 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
set -e
|
||||
|
||||
#CONTEXT: chroot on host
|
||||
#PURPOSE: Allows mysqld to create temporary files when restoring backups
|
||||
|
||||
cat <<EOF >>/etc/apparmor.d/local/usr.sbin.mysqld
|
||||
/tmp/ rw,
|
||||
/tmp/** rwk,
|
||||
EOF
|
@ -1,12 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
set -e
|
||||
|
||||
#CONTEXT: chroot on host
|
||||
#PURPOSE: Allows mysqld to create temporary files when restoring backups
|
||||
|
||||
mkdir -p /etc/apparmor.d/local/
|
||||
cat <<EOF >>/etc/apparmor.d/local/usr.sbin.mysqld
|
||||
/tmp/ rw,
|
||||
/tmp/** rwk,
|
||||
EOF
|
@ -1,25 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
set -e
|
||||
set -o xtrace
|
||||
|
||||
export DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
cat > "/etc/sysctl.d/10-postgresql-performance.conf" << _EOF_
|
||||
# See 'http://www.postgresql.org/docs/9.6/static/kernel-resources.html'
|
||||
# for best practices.
|
||||
# It is recommended to disable memory overcommit,
|
||||
# but the Python interpreter may require it on smaller flavors.
|
||||
# We therefore stick with the heuristic overcommit setting.
|
||||
vm.overcommit_memory=0
|
||||
_EOF_
|
||||
|
||||
apt-get --allow-unauthenticated -y install libpq-dev postgresql-12 postgresql-server-dev-12 postgresql-client-12
|
||||
|
||||
pgsql_conf=/etc/postgresql/12/main/postgresql.conf
|
||||
sed -i "/listen_addresses/c listen_addresses = '*'" ${pgsql_conf}
|
||||
|
||||
systemctl restart postgresql
|
||||
|
||||
# Install the native Python client.
|
||||
pip3 install psycopg2
|
@ -1,14 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
set -e
|
||||
set -o xtrace
|
||||
|
||||
[ -n "${DIB_RELEASE}" ] || die "RELEASE must be set to a valid Ubuntu release (e.g. trusty)"
|
||||
|
||||
cat <<EOL > /etc/apt/sources.list.d/postgresql.list
|
||||
deb http://apt.postgresql.org/pub/repos/apt/ ${DIB_RELEASE}-pgdg main
|
||||
EOL
|
||||
|
||||
wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | apt-key add -
|
||||
|
||||
apt-get update
|
@ -1 +0,0 @@
|
||||
ubuntu-guest
|
@ -1,21 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
set -o xtrace
|
||||
|
||||
# CONTEXT: HOST prior to IMAGE BUILD as SCRIPT USER
|
||||
# PURPOSE: stages the bootstrap file and upstart conf file while replacing variables so that guest image is properly
|
||||
# configured
|
||||
|
||||
source $_LIB/die
|
||||
|
||||
[ -n "$TMP_HOOKS_PATH" ] || die "Temp hook path not set"
|
||||
|
||||
[ -n "${GUEST_USERNAME}" ] || die "GUEST_USERNAME needs to be set to the user for the guest image"
|
||||
[ -n "${HOST_SCP_USERNAME}" ] || die "HOST_SCP_USERNAME needs to be set to the user for the host instance"
|
||||
[ -n "${ESCAPED_PATH_TROVE}" ] || die "ESCAPED_PATH_TROVE needs to be set to the path to the trove directory on the trovestack host"
|
||||
[ -n "${TROVESTACK_SCRIPTS}" ] || die "TROVESTACK_SCRIPTS needs to be set to the trove/integration/scripts dir"
|
||||
[ -n "${ESCAPED_GUEST_LOGDIR}" ] || die "ESCAPED_GUEST_LOGDIR must be set to the escaped guest log dir"
|
||||
|
||||
sed "s/GUEST_USERNAME/${GUEST_USERNAME}/g;s/GUEST_LOGDIR/${ESCAPED_GUEST_LOGDIR}/g;s/HOST_SCP_USERNAME/${HOST_SCP_USERNAME}/g;s/PATH_TROVE/${ESCAPED_PATH_TROVE}/g" ${TROVESTACK_SCRIPTS}/files/trove-guest.systemd.conf > ${TMP_HOOKS_PATH}/trove-guest.service
|
||||
|
@ -1,8 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
# CONTEXT: GUEST during CONSTRUCTION as ROOT
|
||||
# PURPOSE: take "staged" trove-guest.conf file and put it in the init directory on guest image
|
||||
|
||||
dd if=/tmp/in_target.d/trove-guest.service of=/etc/systemd/system/trove-guest.service
|
||||
|
||||
systemctl enable trove-guest.service
|
@ -1,12 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
# CONTEXT: GUEST during CONSTRUCTION as ROOT
|
||||
# PURPOSE: configure trove-guest service to use system store of trusted certificates
|
||||
|
||||
GUEST_UNIT_DROPINS="/etc/systemd/system/trove-guest.service.d"
|
||||
|
||||
mkdir -v -p ${GUEST_UNIT_DROPINS}
|
||||
cat <<EOF > ${GUEST_UNIT_DROPINS}/30-use-system-certificates.conf
|
||||
[Service]
|
||||
Environment=REQUESTS_CA_BUNDLE=/etc/ssl/certs
|
||||
EOF
|
@ -1,29 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
set -o xtrace
|
||||
|
||||
# The HWE stack must be installed for nested virtualization on ppc64el. This
|
||||
# environment variable is set automatically by trovestack, but it may also be
|
||||
# set by the user when manually invoking disk-image-create.
|
||||
|
||||
case "$DIB_USE_HWE_KERNEL" in
|
||||
true|True|TRUE|yes|Yes|YES)
|
||||
DIB_USE_HWE_KERNEL=true
|
||||
;;
|
||||
*)
|
||||
DIB_USE_HWE_KERNEL=false
|
||||
;;
|
||||
esac
|
||||
|
||||
if [ "$DIB_USE_HWE_KERNEL" == "true" ]; then
|
||||
export DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
PKG_ARCH=$(dpkg --print-architecture)
|
||||
|
||||
case "$PKG_ARCH" in
|
||||
amd64|arm64|ppc64el|s390x)
|
||||
apt-get --allow-unauthenticated install -y linux-generic-hwe-16.04
|
||||
;;
|
||||
esac
|
||||
fi
|
@ -1,90 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# CONTEXT: GUEST during CONSTRUCTION as ROOT
|
||||
# PURPOSE: Install basic services and applications
|
||||
|
||||
set -e
|
||||
set -o xtrace
|
||||
|
||||
export DEBIAN_FRONTEND=noninteractive
|
||||
apt-get -y purge acpid\
|
||||
apport\
|
||||
apport-symptoms\
|
||||
apt-transport-https\
|
||||
aptitude\
|
||||
at\
|
||||
bash-completion\
|
||||
bc\
|
||||
bind9-host\
|
||||
bsdmainutils\
|
||||
busybox-static\
|
||||
byobu\
|
||||
command-not-found\
|
||||
command-not-found-data\
|
||||
curl\
|
||||
dbus\
|
||||
dmidecode\
|
||||
dosfstools\
|
||||
ed\
|
||||
fonts-ubuntu-font-family-console\
|
||||
friendly-recovery\
|
||||
ftp\
|
||||
fuse\
|
||||
geoip-database\
|
||||
groff-base\
|
||||
hdparm\
|
||||
info\
|
||||
install-info\
|
||||
iptables\
|
||||
iputils-tracepath\
|
||||
irqbalance\
|
||||
language-selector-common\
|
||||
libaccountsservice0\
|
||||
libevent-2.0-5\
|
||||
libgeoip1\
|
||||
libnfnetlink0\
|
||||
libpcap0.8\
|
||||
libpci3\
|
||||
libpipeline1\
|
||||
libpolkit-gobject-1-0\
|
||||
libsasl2-modules\
|
||||
libusb-1.0-0\
|
||||
lshw\
|
||||
lsof\
|
||||
ltrace\
|
||||
man-db\
|
||||
mlocate\
|
||||
mtr-tiny\
|
||||
nano\
|
||||
ntfs-3g\
|
||||
parted\
|
||||
patch\
|
||||
plymouth-theme-ubuntu-text\
|
||||
popularity-contest\
|
||||
powermgmt-base\
|
||||
ppp\
|
||||
screen\
|
||||
shared-mime-info\
|
||||
strace\
|
||||
tcpdump\
|
||||
telnet\
|
||||
time\
|
||||
tmux\
|
||||
ubuntu-standard\
|
||||
ufw\
|
||||
update-manager-core\
|
||||
update-notifier-common\
|
||||
usbutils\
|
||||
uuid-runtime\
|
||||
|
||||
# The following packages cannot be removed as they cause cloud-init to be
|
||||
# uninstalled in Ubuntu 14.04
|
||||
# gir1.2-glib-2.0
|
||||
# libdbus-glib-1-2
|
||||
# libgirepository-1.0-1
|
||||
# python-chardet
|
||||
# python-serial
|
||||
# xz-utils
|
||||
|
||||
apt-get -y autoremove
|
||||
|
@ -1 +0,0 @@
|
||||
ubuntu-mariadb
|
@ -1,39 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# CONTEXT: GUEST during CONSTRUCTION as ROOT
|
||||
# PURPOSE: Install controller base required packages
|
||||
# Refer to https://mariadb.com/kb/en/library/installing-mariadb-deb-files
|
||||
|
||||
set -e
|
||||
set -o xtrace
|
||||
|
||||
export DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
# These GPG key IDs are used to fetch keys from a keyserver on Ubuntu & Debian
|
||||
apt-key adv --recv-keys --keyserver hkp://keyserver.ubuntu.com:80 0xF1656F24C74CD1D8
|
||||
curl -sS https://downloads.mariadb.com/MariaDB/mariadb_repo_setup |
|
||||
bash -s -- --mariadb-server-version="mariadb-10.4" --skip-key-import --skip-maxscale
|
||||
|
||||
apt-get install -y -qq apt-transport-https ca-certificates gnupg2
|
||||
|
||||
# NOTE(lxkong): Refer to https://www.percona.com/doc/percona-xtrabackup/2.4/installation/apt_repo.html
|
||||
wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
|
||||
dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
|
||||
|
||||
# Disable password prompt
|
||||
debconf-set-selections <<< "mariadb-server mysql-server/root_password password ''"
|
||||
debconf-set-selections <<< "mariadb-server mysql-server/root_password_again password ''"
|
||||
|
||||
apt-get update -qq
|
||||
apt-get install -y -qq --allow-unauthenticated mariadb-server mariadb-client galera-4 libmariadb3 mariadb-backup mariadb-common
|
||||
|
||||
cat <<EOF >/etc/mysql/conf.d/no_perf_schema.cnf
|
||||
[mysqld]
|
||||
performance_schema = off
|
||||
EOF
|
||||
|
||||
chown mysql:mysql /etc/mysql/my.cnf
|
||||
rm -f /etc/init.d/mysql
|
||||
|
||||
systemctl daemon-reload
|
||||
systemctl enable mariadb
|
@ -1 +0,0 @@
|
||||
ubuntu-mysql
|
@ -1,39 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# CONTEXT: GUEST during CONSTRUCTION as ROOT
|
||||
# PURPOSE: Install controller base required packages
|
||||
|
||||
set -e
|
||||
set -o xtrace
|
||||
|
||||
export DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
apt-get --allow-unauthenticated -y install mysql-client mysql-server gnupg2
|
||||
|
||||
# NOTE(lxkong): Refer to https://www.percona.com/doc/percona-xtrabackup/2.4/installation/apt_repo.html
|
||||
wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
|
||||
dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
|
||||
apt-get update
|
||||
|
||||
# Xenial provides mysql 5.7 which requires percona-xtrabackup-24
|
||||
PXB_VERSION_OVERRIDE=24
|
||||
apt-get --allow-unauthenticated -y install percona-xtrabackup-${PXB_VERSION_OVERRIDE}
|
||||
|
||||
cat >/etc/mysql/conf.d/no_perf_schema.cnf <<_EOF_
|
||||
[mysqld]
|
||||
performance_schema = off
|
||||
show_compatibility_56 = on
|
||||
_EOF_
|
||||
|
||||
mv /etc/mysql/my.cnf.fallback /etc/mysql/my.cnf
|
||||
chown mysql:mysql /etc/mysql/my.cnf
|
||||
cat >/etc/mysql/my.cnf <<_EOF_
|
||||
[mysql]
|
||||
!includedir /etc/mysql/conf.d/
|
||||
_EOF_
|
||||
|
||||
if [ -e /etc/init/mysql.conf ]; then
|
||||
rm -f /etc/init/mysql.conf
|
||||
fi
|
||||
|
||||
systemctl enable mysql
|
@ -1 +0,0 @@
|
||||
ubuntu-postgresql
|
@ -3,44 +3,36 @@
|
||||
# Additional functions that would mostly just pertain to a Ubuntu + Qemu setup
|
||||
#
|
||||
|
||||
function build_vm() {
|
||||
exclaim "Actually building the image, this can take up to 15 minutes"
|
||||
rm -rf ~/.cache/image-create
|
||||
function build_guest_image() {
|
||||
exclaim "Actually building the image, params: $@"
|
||||
|
||||
local datastore_type=$1
|
||||
local guest_os=$2
|
||||
local guest_release=$3
|
||||
local dev_mode=$4
|
||||
local guest_username=$5
|
||||
local image_output=$6
|
||||
local guest_os=$1
|
||||
local guest_release=$2
|
||||
local dev_mode=$3
|
||||
local guest_username=$4
|
||||
local image_output=$5
|
||||
|
||||
local elementes="base vm"
|
||||
local trove_elements_path=${PATH_TROVE}/integration/scripts/files/elements
|
||||
local GUEST_IMAGETYPE=${GUEST_IMAGETYPE:-"qcow2"}
|
||||
local GUEST_IMAGESIZE=${GUEST_IMAGESIZE:-4}
|
||||
local GUEST_IMAGESIZE=${GUEST_IMAGESIZE:-3}
|
||||
local GUEST_CACHEDIR=${GUEST_CACHEDIR:-"$HOME/.cache/image-create"}
|
||||
rm -rf ${GUEST_CACHEDIR}
|
||||
local working_dir=$(dirname ${image_output})
|
||||
|
||||
export GUEST_USERNAME=${guest_username}
|
||||
export HOST_SCP_USERNAME=${HOST_SCP_USERNAME:-$(whoami)}
|
||||
export ESCAPED_PATH_TROVE=$(echo ${PATH_TROVE} | sed 's/\//\\\//g')
|
||||
export DEV_MODE=${dev_mode,,}
|
||||
|
||||
# In dev mode, the trove guest agent needs to download trove code from
|
||||
# trove-taskmanager host during service initialization.
|
||||
if [[ "${dev_mode,,}" == "true" ]]; then
|
||||
export PATH_TROVE=${PATH_TROVE}
|
||||
export ESCAPED_PATH_TROVE=$(echo ${PATH_TROVE} | sed 's/\//\\\//g')
|
||||
export GUEST_LOGDIR=${GUEST_LOGDIR:-"/var/log/trove/"}
|
||||
export ESCAPED_GUEST_LOGDIR=$(echo ${GUEST_LOGDIR} | sed 's/\//\\\//g')
|
||||
export TROVESTACK_SCRIPTS=${TROVESTACK_SCRIPTS}
|
||||
export HOST_SCP_USERNAME=${HOST_SCP_USERNAME:-$(whoami)}
|
||||
export HOST_USERNAME=${HOST_SCP_USERNAME}
|
||||
if [[ "${DEV_MODE}" == "true" ]]; then
|
||||
export SSH_DIR=${SSH_DIR:-"$HOME/.ssh"}
|
||||
export DEST=${DEST:-'/opt/stack'}
|
||||
export TROVE_BRANCH=${TROVE_BRANCH:-'master'}
|
||||
manage_ssh_keys
|
||||
fi
|
||||
|
||||
# For system-wide installs, DIB will automatically find the elements, so we only check local path
|
||||
if [ "${DIB_LOCAL_ELEMENTS_PATH}" ]; then
|
||||
if [[ "${DIB_LOCAL_ELEMENTS_PATH}" ]]; then
|
||||
export ELEMENTS_PATH=${trove_elements_path}:${DIB_LOCAL_ELEMENTS_PATH}
|
||||
else
|
||||
export ELEMENTS_PATH=${trove_elements_path}
|
||||
@ -51,35 +43,25 @@ function build_vm() {
|
||||
|
||||
# https://cloud-images.ubuntu.com/releases is more stable than the daily
|
||||
# builds (https://cloud-images.ubuntu.com/xenial/current/),
|
||||
# e.g. sometimes SHA256SUMS file is missing in the daily builds
|
||||
declare -A releasemapping=( ["xenial"]="16.04" ["bionic"]="18.04")
|
||||
# e.g. sometimes SHA256SUMS file is missing in the daily builds website.
|
||||
# Ref: diskimage_builder/elements/ubuntu/root.d/10-cache-ubuntu-tarball
|
||||
declare -A image_file_mapping=( ["xenial"]="ubuntu-16.04-server-cloudimg-amd64-root.tar.gz" ["bionic"]="ubuntu-18.04-server-cloudimg-amd64.squashfs" )
|
||||
export DIB_CLOUD_IMAGES="https://cloud-images.ubuntu.com/releases/${DIB_RELEASE}/release/"
|
||||
export BASE_IMAGE_FILE="ubuntu-${releasemapping[${DIB_RELEASE}]}-server-cloudimg-amd64-root.tar.gz"
|
||||
export BASE_IMAGE_FILE=${image_file_mapping[${DIB_RELEASE}]}
|
||||
|
||||
TEMP=$(mktemp -d ${working_dir}/diskimage-create.XXXXXXX)
|
||||
pushd $TEMP > /dev/null
|
||||
|
||||
elementes="$elementes ${guest_os}"
|
||||
|
||||
if [[ "${dev_mode,,}" == "false" ]]; then
|
||||
elementes="$elementes pip-and-virtualenv"
|
||||
elementes="$elementes pip-cache"
|
||||
elementes="$elementes guest-agent"
|
||||
else
|
||||
# Install guest agent dependencies, user, etc.
|
||||
elementes="$elementes ${guest_os}-guest"
|
||||
# Install guest agent service
|
||||
elementes="$elementes ${guest_os}-${guest_release}-guest"
|
||||
fi
|
||||
|
||||
elementes="$elementes ${guest_os}-${datastore_type}"
|
||||
elementes="$elementes ${guest_os}-${guest_release}-${datastore_type}"
|
||||
elementes="$elementes ${guest_os}-docker"
|
||||
|
||||
# Build the image
|
||||
disk-image-create -x \
|
||||
-a amd64 \
|
||||
-o ${image_output} \
|
||||
-t ${GUEST_IMAGETYPE} \
|
||||
--image-size ${GUEST_IMAGESIZE} \
|
||||
--image-cache ${GUEST_CACHEDIR} \
|
||||
$elementes
|
||||
@ -91,25 +73,6 @@ function build_vm() {
|
||||
exclaim "Image ${image_output} was built successfully."
|
||||
}
|
||||
|
||||
function build_guest_image() {
|
||||
exclaim "Params for build_guest_image function: $@"
|
||||
|
||||
local datastore_type=${1:-"mysql"}
|
||||
local guest_os=${2:-"ubuntu"}
|
||||
local guest_release=${3:-"xenial"}
|
||||
local dev_mode=${4:-"true"}
|
||||
local guest_username=${5:-"ubuntu"}
|
||||
local output=$6
|
||||
|
||||
VALID_SERVICES='mysql percona mariadb redis cassandra couchbase mongodb postgresql couchdb vertica db2 pxc'
|
||||
if ! [[ " $VALID_SERVICES " =~ " $datastore_type " ]]; then
|
||||
exclaim "You did not pass in a valid datastore type. Valid types are:" $VALID_SERVICES
|
||||
exit 1
|
||||
fi
|
||||
|
||||
build_vm ${datastore_type} ${guest_os} ${guest_release} ${dev_mode} ${guest_username} ${output}
|
||||
}
|
||||
|
||||
function clean_instances() {
|
||||
LIST=`virsh -q list|awk '{print $1}'`
|
||||
for i in $LIST; do sudo virsh destroy $i; done
|
||||
@ -117,6 +80,8 @@ function clean_instances() {
|
||||
|
||||
# In dev mode, guest agent needs to ssh into the controller to download code.
|
||||
function manage_ssh_keys() {
|
||||
SSH_DIR=${SSH_DIR:-"$HOME/.ssh"}
|
||||
|
||||
if [ -d ${SSH_DIR} ]; then
|
||||
echo "${SSH_DIR} already exists"
|
||||
else
|
||||
|
@ -1,3 +0,0 @@
|
||||
[[post-config|\$CINDER_CONF]]
|
||||
[DEFAULT]
|
||||
notification_driver = messagingv2
|
@ -1,3 +0,0 @@
|
||||
[[post-config|\$NOVA_CONF]]
|
||||
[DEFAULT]
|
||||
instance_usage_audit = True
|
@ -1,3 +0,0 @@
|
||||
[[post-config|\$CEILOMETER_CONF]]
|
||||
[notification]
|
||||
store_events = True
|
@ -1,42 +0,0 @@
|
||||
#
|
||||
# Files in this directory are automatically added to the devstack
|
||||
# local.conf file, between a specific set of tags.
|
||||
#
|
||||
# Filenames must end with '.rc' to be recognized; sample.rc is
|
||||
# ignored.
|
||||
#
|
||||
# A '\' is required in front of any devstack variables since all
|
||||
# .rc files are parsed first (using eval).
|
||||
#
|
||||
# Meta section headings must be included in each file, such as:
|
||||
# [[local|localrc]]
|
||||
# as the order of inserting the files is not guaranteed.
|
||||
#
|
||||
# All files are inherently included by default - to exclude a file,
|
||||
# add a variable 'FILENAME_IN_UPPERCASE_MINUS_RC=false' in trovestack.rc
|
||||
# For Example: USING_VAGRANT=false (for the using_vagrant.rc file).
|
||||
#
|
||||
# Symbolic links are followed, so additional files can be loaded
|
||||
# by placing them in an external directory and linking it in
|
||||
# local.conf.d (this should allow complete flexibility in setting
|
||||
# up testing options).
|
||||
# For Example:
|
||||
# cd /path/to/trove/integration/scripts/local.conf.d
|
||||
# ln -s $HOME/local.conf.d local.conf.d
|
||||
# cp /path/to/my_conf.rc $HOME/local.conf.d
|
||||
|
||||
|
||||
[[local|localrc]]
|
||||
# Put regular devstack variables under this meta section heading.
|
||||
# This section is written out to a file and sourced by devstack,
|
||||
# so it can contain logic as well.
|
||||
|
||||
# The following section types should only contain ini file style
|
||||
# section headings and name=value pairs
|
||||
[[post-config|\$TROVE_CONF]]
|
||||
|
||||
[[post-config|\$TROVE_TASKMANAGER_CONF]]
|
||||
|
||||
[[post-config|\$TROVE_CONDUCTOR_CONF]]
|
||||
|
||||
[[post-config|\$TROVE_API_PASTE_INI]]
|
@ -1,24 +0,0 @@
|
||||
[[post-config|\$TROVE_CONF]]
|
||||
[profiler]
|
||||
enabled = $ENABLE_PROFILER
|
||||
trace_sqlalchemy = $PROFILER_TRACE_SQL
|
||||
|
||||
[[post-config|\$TROVE_TASKMANAGER_CONF]]
|
||||
[profiler]
|
||||
enabled = $ENABLE_PROFILER
|
||||
trace_sqlalchemy = $PROFILER_TRACE_SQL
|
||||
|
||||
[[post-config|\$TROVE_CONDUCTOR_CONF]]
|
||||
[profiler]
|
||||
enabled = $ENABLE_PROFILER
|
||||
trace_sqlalchemy = $PROFILER_TRACE_SQL
|
||||
|
||||
[[post-config|\$TROVE_GUESTAGENT_CONF]]
|
||||
[profiler]
|
||||
enabled = $ENABLE_PROFILER
|
||||
trace_sqlalchemy = $PROFILER_TRACE_SQL
|
||||
|
||||
[[post-config|\$TROVE_API_PASTE_INI]]
|
||||
[filter:osprofiler]
|
||||
enabled = $ENABLE_PROFILER
|
||||
hmac_keys = $PROFILER_HMAC_KEYS
|
@ -1,4 +0,0 @@
|
||||
[[local|localrc]]
|
||||
|
||||
# force kvm as the libvirt type.
|
||||
LIBVIRT_TYPE=kvm
|
@ -1,3 +0,0 @@
|
||||
[[local|localrc]]
|
||||
|
||||
KEYSTONE_TOKEN_FORMAT=UUID
|
@ -1,9 +0,0 @@
|
||||
[[local|localrc]]
|
||||
|
||||
# This is similar to code found at
|
||||
# https://github.com/bcwaldon/vagrant_devstack/blob/master/Vagrantfile
|
||||
# and seems to make instances ping'able in VirtualBox.
|
||||
FLAT_INTERFACE=eth1
|
||||
PUBLIC_INTERFACE=eth1
|
||||
FLOATING_RANGE=`ip_chunk eth0 1`.`ip_chunk eth0 2`.`ip_chunk eth0 3`.128/28
|
||||
HOST_IP=`ip_chunk eth0 1`.`ip_chunk eth0 2`.`ip_chunk eth0 3`.`ip_chunk eth0 4`
|
@ -1,37 +0,0 @@
|
||||
$TROVE_PRESENT_TAG
|
||||
# Set some arguments for devstack.
|
||||
#
|
||||
# Note: This file contains autogenerated parts.
|
||||
# All lines are removed from between the tag/end of tag
|
||||
# markers (lines with '$MARKER_TOKEN' at beginning and end) and
|
||||
# are replaced by trovestack.
|
||||
# Edits to these sections will not persist.
|
||||
#
|
||||
# See the '$USER_OPTS_TAG' section
|
||||
# for ways to insert user args into this file.
|
||||
#
|
||||
|
||||
#
|
||||
# This section is for things that belong in localrc
|
||||
# It comes from $DEFAULT_LOCALRC
|
||||
#
|
||||
[[local|localrc]]
|
||||
|
||||
$LOCALRC_OPTS_TAG
|
||||
$LOCALRC_OPTS_TAG_END
|
||||
|
||||
#
|
||||
# User options here were inserted from the file USER_LOCAL_CONF
|
||||
# (defaults to $USERHOME/.$LOCAL_CONF)
|
||||
#
|
||||
|
||||
$USER_OPTS_TAG
|
||||
$USER_OPTS_TAG_END
|
||||
|
||||
#
|
||||
# Additional options here were inserted by trovestack
|
||||
# automatically from files in $LOCAL_CONF_D
|
||||
#
|
||||
|
||||
$ADD_OPTS_TAG
|
||||
$ADD_OPTS_TAG_END
|
@ -124,7 +124,7 @@ if is_fedora; then
|
||||
else
|
||||
PKG_INSTALL_OPTS="DEBIAN_FRONTEND=noninteractive"
|
||||
PKG_MGR=apt-get
|
||||
PKG_GET_ARGS="-y --allow-unauthenticated --force-yes"
|
||||
PKG_GET_ARGS="-y --allow-unauthenticated --force-yes -qq"
|
||||
fi
|
||||
PKG_INSTALL_ARG="install"
|
||||
PKG_UPDATE_ARG="update"
|
||||
@ -522,57 +522,15 @@ function set_bin_path() {
|
||||
|
||||
function cmd_set_datastore() {
|
||||
local IMAGEID=$1
|
||||
local DATASTORE_TYPE=$2
|
||||
|
||||
# rd_manage datastore_update <datastore_name> <default_version>
|
||||
rd_manage datastore_update "$DATASTORE_TYPE" ""
|
||||
PACKAGES=${PACKAGES:-""}
|
||||
|
||||
if [ "$DATASTORE_TYPE" == "mysql" ]; then
|
||||
VERSION="5.7"
|
||||
elif [ "$DATASTORE_TYPE" == "percona" ]; then
|
||||
PACKAGES=${PACKAGES:-"percona-server-server-5.6"}
|
||||
VERSION="5.6"
|
||||
elif [ "$DATASTORE_TYPE" == "pxc" ]; then
|
||||
PACKAGES=${PACKAGES:-"percona-xtradb-cluster-server-5.6"}
|
||||
VERSION="5.6"
|
||||
elif [ "$DATASTORE_TYPE" == "mariadb" ]; then
|
||||
VERSION="10.4"
|
||||
elif [ "$DATASTORE_TYPE" == "mongodb" ]; then
|
||||
PACKAGES=${PACKAGES:-"mongodb-org"}
|
||||
VERSION="3.2"
|
||||
elif [ "$DATASTORE_TYPE" == "redis" ]; then
|
||||
PACKAGES=${PACKAGES:-""}
|
||||
VERSION="3.2.6"
|
||||
elif [ "$DATASTORE_TYPE" == "cassandra" ]; then
|
||||
PACKAGES=${PACKAGES:-"cassandra"}
|
||||
VERSION="2.1.0"
|
||||
elif [ "$DATASTORE_TYPE" == "couchbase" ]; then
|
||||
PACKAGES=${PACKAGES:-"couchbase-server"}
|
||||
VERSION="2.2.0"
|
||||
elif [ "$DATASTORE_TYPE" == "postgresql" ]; then
|
||||
VERSION="9.6"
|
||||
elif [ "$DATASTORE_TYPE" == "couchdb" ]; then
|
||||
PACKAGES=${PACKAGES:-"couchdb"}
|
||||
VERSION="1.6.1"
|
||||
elif [ "$DATASTORE_TYPE" == "vertica" ]; then
|
||||
PACKAGES=${PACKAGES:-"vertica"}
|
||||
VERSION="9.0.1"
|
||||
elif [ "$DATASTORE_TYPE" == "db2" ]; then
|
||||
PACKAGES=${PACKAGES:-""}
|
||||
VERSION="11.1"
|
||||
else
|
||||
echo "Unrecognized datastore type. ($DATASTORE_TYPE)"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
rd_manage datastore_update "$datastore" ""
|
||||
# trove-manage datastore_version_update <datastore_name> <version_name> <datastore_manager> <image_id> <packages> <active>
|
||||
rd_manage datastore_version_update "$DATASTORE_TYPE" "$VERSION" "$DATASTORE_TYPE" $IMAGEID "$PACKAGES" 1
|
||||
rd_manage datastore_update "$DATASTORE_TYPE" "$VERSION"
|
||||
rd_manage datastore_version_update "${DATASTORE_TYPE}" "${DATASTORE_VERSION}" "${DATASTORE_TYPE}" $IMAGEID "" 1
|
||||
rd_manage datastore_update "${DATASTORE_TYPE}" "${DATASTORE_VERSION}"
|
||||
|
||||
if [ -f "$PATH_TROVE"/trove/templates/$DATASTORE_TYPE/validation-rules.json ]; then
|
||||
if [[ -f "$PATH_TROVE"/trove/templates/${DATASTORE_TYPE}/validation-rules.json ]]; then
|
||||
# add the configuration parameters to the database for the kick-start datastore
|
||||
rd_manage db_load_datastore_config_parameters "$DATASTORE_TYPE" "$VERSION" "$PATH_TROVE"/trove/templates/$DATASTORE_TYPE/validation-rules.json
|
||||
rd_manage db_load_datastore_config_parameters "${DATASTORE_TYPE}" "${DATASTORE_VERSION}" "$PATH_TROVE"/trove/templates/${DATASTORE_TYPE}/validation-rules.json
|
||||
fi
|
||||
}
|
||||
|
||||
@ -627,8 +585,8 @@ function install_test_packages() {
|
||||
DATASTORE_TYPE=$1
|
||||
|
||||
sudo -H $HTTP_PROXY pip install openstack.nose_plugin proboscis pexpect
|
||||
if [ "$DATASTORE_TYPE" = "couchbase" ]; then
|
||||
if [ "$DISTRO" == "ubuntu" ]; then
|
||||
if [[ "$DATASTORE_TYPE" = "couchbase" ]]; then
|
||||
if [[ "$DISTRO" == "ubuntu" ]]; then
|
||||
# Install Couchbase SDK for scenario tests.
|
||||
sudo -H $HTTP_PROXY curl http://packages.couchbase.com/ubuntu/couchbase.key | sudo apt-key add -
|
||||
echo "deb http://packages.couchbase.com/ubuntu trusty trusty/main" | sudo tee /etc/apt/sources.list.d/couchbase-csdk.list
|
||||
@ -649,12 +607,12 @@ function mod_confs() {
|
||||
TROVE_REPORT_DIR=${TROVE_REPORT_DIR:=$TROVESTACK_SCRIPTS/../report/}
|
||||
|
||||
EXTRA_CONF=$TROVESTACK_SCRIPTS/conf/test.extra.conf
|
||||
if [ -e $EXTRA_CONF ]; then
|
||||
if [[ -e $EXTRA_CONF ]]; then
|
||||
cat $EXTRA_CONF >> $TEST_CONF
|
||||
fi
|
||||
# Append datastore specific configuration file
|
||||
DATASTORE_CONF=$TROVESTACK_SCRIPTS/conf/$DATASTORE_TYPE.conf
|
||||
if [ ! -f $DATASTORE_CONF ]; then
|
||||
if [[ ! -f $DATASTORE_CONF ]]; then
|
||||
exclaim "Datastore configuration file ${DATASTORE_CONF} not found"
|
||||
exit 1
|
||||
fi
|
||||
@ -695,14 +653,14 @@ function mod_confs() {
|
||||
sed -i "/%shared_network_subnet%/d" $TEST_CONF
|
||||
fi
|
||||
|
||||
if [ "$DATASTORE_TYPE" = "vertica" ]; then
|
||||
if [[ "$DATASTORE_TYPE" = "vertica" ]]; then
|
||||
# Vertica needs more time than mysql for its boot/start/stop operations.
|
||||
setup_cluster_configs cluster_member_count 3
|
||||
elif [ "$DATASTORE_TYPE" = "pxc" ]; then
|
||||
elif [[ "$DATASTORE_TYPE" = "pxc" ]]; then
|
||||
setup_cluster_configs min_cluster_member_count 2
|
||||
elif [ "$DATASTORE_TYPE" = "cassandra" ]; then
|
||||
elif [[ "$DATASTORE_TYPE" = "cassandra" ]]; then
|
||||
setup_cluster_configs cluster_member_count 2
|
||||
elif [ "$DATASTORE_TYPE" = "mongodb" ]; then
|
||||
elif [[ "$DATASTORE_TYPE" = "mongodb" ]]; then
|
||||
setup_cluster_configs cluster_member_count 2
|
||||
# Decrease the number of required config servers per cluster to save resources.
|
||||
iniset $TROVE_CONF $DATASTORE_TYPE num_config_servers_per_cluster 1
|
||||
@ -747,7 +705,7 @@ function cmd_test_init() {
|
||||
local DATASTORE_TYPE=$1
|
||||
local DATASTORE_VERSION=$2
|
||||
|
||||
if [ -z "${DATASTORE_TYPE}" ]; then
|
||||
if [[ -z "${DATASTORE_TYPE}" ]]; then
|
||||
exclaim "${COLOR_RED}Datastore argument was not specified.${COLOR_NONE}"
|
||||
exit 1
|
||||
fi
|
||||
@ -768,73 +726,60 @@ function cmd_test_init() {
|
||||
pip3 install -U git+https://opendev.org/openstack/python-troveclient@master#egg=python-troveclient
|
||||
}
|
||||
|
||||
# Build trove guest image
|
||||
function cmd_build_image() {
|
||||
exclaim "Params for cmd_build_image function: $@"
|
||||
|
||||
local IMAGE_DATASTORE_TYPE=${1:-'mysql'}
|
||||
local IMAGE_GUEST_OS=${2:-'ubuntu'}
|
||||
local IMAGE_GUEST_RELEASE=${3:-'xenial'}
|
||||
local DEV_MODE=${4:-'true'}
|
||||
local guest_username=${5:-'ubuntu'}
|
||||
local output=$6
|
||||
local image_guest_os=${1:-'ubuntu'}
|
||||
local image_guest_release=${2:-'bionic'}
|
||||
local dev_mode=${3:-'true'}
|
||||
local guest_username=${4:-'ubuntu'}
|
||||
local output=$5
|
||||
|
||||
if [[ -z "$output" ]]; then
|
||||
image_name="trove-datastore-${IMAGE_GUEST_OS}-${IMAGE_GUEST_RELEASE}-${IMAGE_DATASTORE_TYPE}"
|
||||
image_folder=$HOME/images
|
||||
output="${image_folder}/${image_name}"
|
||||
image_name="trove-guest-${image_guest_os}-${image_guest_release}"
|
||||
if [[ ${dev_mode} == "true" ]]; then
|
||||
image_name="${image_name}-dev"
|
||||
fi
|
||||
image_folder=$HOME/images
|
||||
output="${image_folder}/${image_name}.qcow2"
|
||||
fi
|
||||
|
||||
# Always rebuild the image.
|
||||
sudo rm -f $output
|
||||
sudo mkdir -p $(dirname $output); sudo chmod 777 -R $(dirname $output)
|
||||
sudo rm -rf ${output}
|
||||
sudo mkdir -p $(dirname ${output}); sudo chmod 777 -R $(dirname ${output})
|
||||
|
||||
echo "Ensuring we have all packages needed to build image."
|
||||
sudo $HTTP_PROXY $PKG_MGR $PKG_GET_ARGS update
|
||||
sudo $HTTP_PROXY $PKG_MGR $PKG_GET_ARGS install qemu git kpartx debootstrap
|
||||
sudo $HTTP_PROXY $PKG_MGR $PKG_GET_ARGS install qemu git kpartx debootstrap squashfs-tools
|
||||
sudo -H $HTTP_PROXY pip install diskimage-builder
|
||||
|
||||
exclaim "Use diskimage-builder to actually build the Trove Guest Agent Image."
|
||||
build_guest_image $IMAGE_DATASTORE_TYPE $IMAGE_GUEST_OS $IMAGE_GUEST_RELEASE $DEV_MODE ${guest_username} $output
|
||||
build_guest_image ${image_guest_os} ${image_guest_release} ${dev_mode} ${guest_username} ${output}
|
||||
}
|
||||
|
||||
# Build guest image and upload to Glance, register the datastore and configuration parameters.
|
||||
# We could skip the image build and upload by:
|
||||
# 1. MYSQL_IMAGE_ID is passed, or
|
||||
# 2. There is an image in Glance contains the datastore name
|
||||
function cmd_build_and_upload_image() {
|
||||
local datastore_type=$1
|
||||
local guest_os=${2:-"ubuntu"}
|
||||
local guest_release=${3:-"xenial"}
|
||||
local dev_mode=${4:-"true"}
|
||||
local guest_username=${5:-"ubuntu"}
|
||||
local output_dir=${6:-"$HOME/images"}
|
||||
local guest_os=${1:-"ubuntu"}
|
||||
local guest_release=${2:-"bionic"}
|
||||
local dev_mode=${3:-"true"}
|
||||
local guest_username=${4:-"ubuntu"}
|
||||
local output_dir=${5:-"$HOME/images"}
|
||||
|
||||
if [ -z "${datastore_type}" ]; then
|
||||
exclaim "${COLOR_RED}Datastore argument was not specified.${COLOR_NONE}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
image_var="${datastore_type^^}_IMAGE_ID"
|
||||
glance_imageid=`eval echo '$'"$image_var"`
|
||||
|
||||
if [[ -z $glance_imageid ]]; then
|
||||
# Find the first image id with the name contains datastore_type.
|
||||
glance_imageid=$(openstack $CLOUD_ADMIN_ARG image list | grep "$datastore_type" | awk 'NR==1 {print}' | awk '{print $2}')
|
||||
|
||||
if [[ -z $glance_imageid ]]; then
|
||||
name=trove-guest-${guest_os}-${guest_release}
|
||||
glance_imageid=$(openstack ${CLOUD_ADMIN_ARG} image list --name $name -f value -c ID)
|
||||
if [[ -z ${glance_imageid} ]]; then
|
||||
mkdir -p ${output_dir}
|
||||
name=trove-datastore-${guest_os}-${guest_release}-${datastore_type}
|
||||
output=${output_dir}/$name.qcow2
|
||||
cmd_build_image ${datastore_type} ${guest_os} ${guest_release} ${dev_mode} ${guest_username} $output
|
||||
output=${output_dir}/${name}
|
||||
cmd_build_image ${guest_os} ${guest_release} ${dev_mode} ${guest_username} ${output}
|
||||
|
||||
glance_imageid=$(openstack ${CLOUD_ADMIN_ARG} image create $name --public --disk-format qcow2 --container-format bare --file $output --property hw_rng_model='virtio' -c id -f value)
|
||||
glance_imageid=$(openstack ${CLOUD_ADMIN_ARG} image create ${name} --public --disk-format qcow2 --container-format bare --file ${output} --property hw_rng_model='virtio' --tag trove -c id -f value)
|
||||
[[ -z "$glance_imageid" ]] && echo "Glance upload failed!" && exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
exclaim "Using Glance image ID: $glance_imageid"
|
||||
|
||||
exclaim "Updating Datastores"
|
||||
cmd_set_datastore "${glance_imageid}" "${datastore_type}"
|
||||
cmd_set_datastore "${glance_imageid}"
|
||||
}
|
||||
|
||||
|
||||
@ -991,11 +936,11 @@ function cmd_stop() {
|
||||
|
||||
function cmd_int_tests() {
|
||||
exclaim "Running Trove Integration Tests..."
|
||||
if [ ! $USAGE_ENDPOINT ]; then
|
||||
if [[ ! $USAGE_ENDPOINT ]]; then
|
||||
export USAGE_ENDPOINT=trove.tests.util.usage.FakeVerifier
|
||||
fi
|
||||
cd $TROVESTACK_SCRIPTS
|
||||
if [ $# -lt 1 ]; then
|
||||
if [[ $# -lt 1 ]]; then
|
||||
args="--group=mysql"
|
||||
else
|
||||
args="$@"
|
||||
@ -1203,7 +1148,7 @@ function cmd_kick_start() {
|
||||
local DATASTORE_TYPE=$1
|
||||
local DATASTORE_VERSION=$2
|
||||
|
||||
if [ -z "${DATASTORE_TYPE}" ]; then
|
||||
if [[ -z "${DATASTORE_TYPE}" ]]; then
|
||||
exclaim "${COLOR_RED}Datastore argument was not specified.${COLOR_NONE}"
|
||||
exit 1
|
||||
fi
|
||||
@ -1220,10 +1165,13 @@ function cmd_kick_start() {
|
||||
function cmd_gate_tests() {
|
||||
local DATASTORE_TYPE=${1:-'mysql'}
|
||||
local TEST_GROUP=${2:-${DATASTORE_TYPE}}
|
||||
local DATASTORE_VERSION=${3:-'5.7'}
|
||||
local DATASTORE_VERSION=${3:-'5.7.29'}
|
||||
local HOST_SCP_USERNAME=${4:-$(whoami)}
|
||||
local GUEST_USERNAME=${5:-'ubuntu'}
|
||||
|
||||
export DATASTORE_TYPE=${DATASTORE_TYPE}
|
||||
export DATASTORE_VERSION=${DATASTORE_VERSION}
|
||||
|
||||
exclaim "Running cmd_gate_tests ..."
|
||||
|
||||
export REPORT_DIRECTORY=${REPORT_DIRECTORY:=$HOME/gate-tests-report/}
|
||||
@ -1238,7 +1186,7 @@ function cmd_gate_tests() {
|
||||
cd $TROVESTACK_SCRIPTS
|
||||
|
||||
# Build and upload guest image, register datastore version.
|
||||
cmd_build_and_upload_image ${DATASTORE_TYPE}
|
||||
cmd_build_and_upload_image
|
||||
|
||||
cmd_kick_start "${DATASTORE_TYPE}" "${DATASTORE_VERSION}"
|
||||
|
||||
|
@ -25,6 +25,7 @@ deprecation==2.0
|
||||
diskimage-builder==1.1.2
|
||||
doc8==0.6.0
|
||||
docutils==0.14
|
||||
docker==4.2.0
|
||||
dogpile.cache==0.6.5
|
||||
dulwich==0.19.0
|
||||
enum34===1.0.4
|
||||
|
@ -7,12 +7,11 @@
|
||||
- name: Build Trove guest image
|
||||
shell: >-
|
||||
./trovestack build-image \
|
||||
{{ datastore_type }} \
|
||||
{{ guest_os }} \
|
||||
{{ guest_os_release }} \
|
||||
{{ dev_mode }} \
|
||||
{{ guest_username }} \
|
||||
{{ ansible_user_dir }}/images/trove-{{ branch }}-{{ datastore_type }}-{{ guest_os }}-{{ guest_os_release }}{{ image_suffix }}
|
||||
{{ ansible_user_dir }}/images/trove-{{ branch }}-guest-{{ guest_os }}-{{ guest_os_release }}{{ image_suffix }}.qcow2
|
||||
args:
|
||||
chdir: "{{ ansible_user_dir }}/src/opendev.org/openstack/trove/integration/scripts"
|
||||
tags:
|
||||
|
@ -48,3 +48,4 @@ xmltodict>=0.10.1 # MIT
|
||||
cryptography>=2.1.4 # BSD/Apache-2.0
|
||||
oslo.policy>=1.30.0 # Apache-2.0
|
||||
diskimage-builder!=1.6.0,!=1.7.0,!=1.7.1,>=1.1.2 # Apache-2.0
|
||||
docker>=4.2.0 # Apache-2.0
|
||||
|
@ -1,5 +1,5 @@
|
||||
devstack_base_dir: /opt/stack
|
||||
trove_test_datastore: 'mysql'
|
||||
trove_test_group: 'mysql'
|
||||
trove_test_datastore_version: '5.7'
|
||||
trove_test_datastore_version: '5.7.29'
|
||||
trove_resize_time_out: ''
|
||||
|
@ -1497,6 +1497,12 @@
|
||||
"Instance of 'FreshInstance' has no 'get_replication_master_snapshot' member",
|
||||
"Manager._create_replication_slave"
|
||||
],
|
||||
[
|
||||
"trove/taskmanager/manager.py",
|
||||
"E1136",
|
||||
"Value 'snapshot' is unsubscriptable",
|
||||
"Manager._create_replication_slave"
|
||||
],
|
||||
[
|
||||
"trove/taskmanager/manager.py",
|
||||
"E1101",
|
||||
|
6
tox.ini
6
tox.ini
@ -53,9 +53,7 @@ ignore-path = .venv,.tox,.git,dist,doc,*egg-info,tools,etc,build,*.po,*.pot,inte
|
||||
|
||||
[flake8]
|
||||
show-source = True
|
||||
# H301 is ignored on purpose.
|
||||
# The rest of the ignores are TODOs.
|
||||
ignore = E402,E731,F601,F821,H301,H404,H405,H501,W503,W504,W605
|
||||
ignore = E125,E129,E402,E731,F601,F821,H301,H306,H404,H405,H501,W503,W504,W605
|
||||
enable-extensions = H203,H106
|
||||
builtins = _
|
||||
# add *.yaml for playbooks/trove-devstack-base.yaml, as it will be matched by
|
||||
@ -68,7 +66,7 @@ import_exceptions = trove.common.i18n
|
||||
|
||||
[flake8:local-plugins]
|
||||
extension =
|
||||
T103= checks:check_raised_localized_exceptions
|
||||
# T103= checks:check_raised_localized_exceptions
|
||||
T104 = checks:check_no_basestring
|
||||
T105 = checks:no_translate_logs
|
||||
N335 = checks:assert_raises_regexp
|
||||
|
@ -268,7 +268,7 @@ class Backup(object):
|
||||
try:
|
||||
cls.delete(context, child.id)
|
||||
except exception.NotFound:
|
||||
LOG.exception("Backup %s cannot be found.", backup_id)
|
||||
LOG.warning("Backup %s cannot be found.", backup_id)
|
||||
|
||||
def _delete_resources():
|
||||
backup = cls.get_by_id(context, backup_id)
|
||||
|
@ -12,7 +12,6 @@
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import sys
|
||||
|
||||
from oslo_config import cfg as openstack_cfg
|
||||
@ -23,6 +22,7 @@ from trove.common import cfg
|
||||
from trove.common import debug_utils
|
||||
from trove.common.i18n import _
|
||||
from trove.guestagent import api as guest_api
|
||||
from trove.guestagent.common import operating_system
|
||||
|
||||
CONF = cfg.CONF
|
||||
# The guest_id opt definition must match the one in common/cfg.py
|
||||
@ -31,9 +31,18 @@ CONF.register_opts([openstack_cfg.StrOpt('guest_id', default=None,
|
||||
openstack_cfg.StrOpt('instance_rpc_encr_key',
|
||||
help=('Key (OpenSSL aes_cbc) for '
|
||||
'instance RPC encryption.'))])
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def main():
|
||||
log_levels = [
|
||||
'docker=WARN',
|
||||
]
|
||||
default_log_levels = logging.get_default_log_levels()
|
||||
default_log_levels.extend(log_levels)
|
||||
logging.set_defaults(default_log_levels=default_log_levels)
|
||||
logging.register_options(CONF)
|
||||
|
||||
cfg.parse_args(sys.argv)
|
||||
logging.setup(CONF, None)
|
||||
debug_utils.setup()
|
||||
@ -50,6 +59,11 @@ def main():
|
||||
"was not injected into the guest or not read by guestagent"))
|
||||
raise RuntimeError(msg)
|
||||
|
||||
# Create user and group for running docker container.
|
||||
LOG.info('Creating user and group for database service')
|
||||
uid = cfg.get_configuration_property('database_service_uid')
|
||||
operating_system.create_user('database', uid)
|
||||
|
||||
# rpc module must be loaded after decision about thread monkeypatching
|
||||
# because if thread module is not monkeypatched we can't use eventlet
|
||||
# executor from oslo_messaging library.
|
||||
|
@ -175,10 +175,10 @@ common_opts = [
|
||||
help="Maximum time (in seconds) to wait for Guest Agent "
|
||||
"'quick' requests (such as retrieving a list of "
|
||||
"users or databases)."),
|
||||
cfg.IntOpt('agent_call_high_timeout', default=60 * 5,
|
||||
cfg.IntOpt('agent_call_high_timeout', default=60 * 3,
|
||||
help="Maximum time (in seconds) to wait for Guest Agent 'slow' "
|
||||
"requests (such as restarting the database)."),
|
||||
cfg.IntOpt('agent_replication_snapshot_timeout', default=36000,
|
||||
cfg.IntOpt('agent_replication_snapshot_timeout', default=60 * 30,
|
||||
help='Maximum time (in seconds) to wait for taking a Guest '
|
||||
'Agent replication snapshot.'),
|
||||
cfg.IntOpt('command_process_timeout', default=30,
|
||||
@ -186,8 +186,9 @@ common_opts = [
|
||||
'commands to complete.'),
|
||||
# The guest_id opt definition must match the one in cmd/guest.py
|
||||
cfg.StrOpt('guest_id', default=None, help="ID of the Guest Instance."),
|
||||
cfg.IntOpt('state_change_wait_time', default=60 * 10,
|
||||
help='Maximum time (in seconds) to wait for a state change.'),
|
||||
cfg.IntOpt('state_change_wait_time', default=180,
|
||||
help='Maximum time (in seconds) to wait for database state '
|
||||
'change.'),
|
||||
cfg.IntOpt('state_change_poll_time', default=3,
|
||||
help='Interval between state change poll requests (seconds).'),
|
||||
cfg.IntOpt('agent_heartbeat_time', default=10,
|
||||
@ -293,9 +294,11 @@ common_opts = [
|
||||
help='The region this service is located.'),
|
||||
cfg.StrOpt('backup_runner',
|
||||
default='trove.guestagent.backup.backup_types.InnoBackupEx',
|
||||
help='Runner to use for backups.'),
|
||||
help='Runner to use for backups.',
|
||||
deprecated_for_removal=True),
|
||||
cfg.DictOpt('backup_runner_options', default={},
|
||||
help='Additional options to be passed to the backup runner.'),
|
||||
help='Additional options to be passed to the backup runner.',
|
||||
deprecated_for_removal=True),
|
||||
cfg.BoolOpt('verify_swift_checksum_on_restore', default=True,
|
||||
help='Enable verification of Swift checksum before starting '
|
||||
'restore. Makes sure the checksum of original backup matches '
|
||||
@ -304,11 +307,12 @@ common_opts = [
|
||||
help='Require the replica volume size to be greater than '
|
||||
'or equal to the size of the master volume '
|
||||
'during replica creation.'),
|
||||
cfg.StrOpt('storage_strategy', default='SwiftStorage',
|
||||
cfg.StrOpt('storage_strategy', default='swift',
|
||||
help="Default strategy to store backups."),
|
||||
cfg.StrOpt('storage_namespace',
|
||||
default='trove.common.strategies.storage.swift',
|
||||
help='Namespace to load the default storage strategy from.'),
|
||||
help='Namespace to load the default storage strategy from.',
|
||||
deprecated_for_removal=True),
|
||||
cfg.StrOpt('backup_swift_container', default='database_backups',
|
||||
help='Swift container to put backups in.'),
|
||||
cfg.BoolOpt('backup_use_gzip_compression', default=True,
|
||||
@ -429,15 +433,12 @@ common_opts = [
|
||||
cfg.IntOpt('usage_timeout', default=60 * 30,
|
||||
help='Maximum time (in seconds) to wait for a Guest to become '
|
||||
'active.'),
|
||||
cfg.IntOpt('restore_usage_timeout', default=36000,
|
||||
cfg.IntOpt('restore_usage_timeout', default=60 * 60,
|
||||
help='Maximum time (in seconds) to wait for a Guest instance '
|
||||
'restored from a backup to become active.'),
|
||||
cfg.IntOpt('cluster_usage_timeout', default=36000,
|
||||
help='Maximum time (in seconds) to wait for a cluster to '
|
||||
'become active.'),
|
||||
cfg.IntOpt('timeout_wait_for_service', default=120,
|
||||
help='Maximum time (in seconds) to wait for a service to '
|
||||
'become alive.'),
|
||||
cfg.StrOpt('module_aes_cbc_key', default='module_aes_cbc_key',
|
||||
help='OpenSSL aes_cbc key for module encryption.'),
|
||||
cfg.ListOpt('module_types', default=['ping', 'new_relic_license'],
|
||||
@ -466,6 +467,10 @@ common_opts = [
|
||||
help='Key (OpenSSL aes_cbc) to encrypt instance keys in DB.'),
|
||||
cfg.StrOpt('instance_rpc_encr_key',
|
||||
help='Key (OpenSSL aes_cbc) for instance RPC encryption.'),
|
||||
cfg.StrOpt('database_service_uid', default='1001',
|
||||
help='The UID(GID) of database service user.'),
|
||||
cfg.StrOpt('backup_docker_image', default='openstacktrove/db-backup:1.0.0',
|
||||
help='The docker image used for backup and restore.'),
|
||||
]
|
||||
|
||||
|
||||
@ -544,7 +549,7 @@ mysql_opts = [
|
||||
help='List of UDP ports and/or port ranges to open '
|
||||
'in the security group (only applicable '
|
||||
'if trove_security_groups_support is True).'),
|
||||
cfg.StrOpt('backup_strategy', default='InnoBackupEx',
|
||||
cfg.StrOpt('backup_strategy', default='innobackupex',
|
||||
help='Default strategy to perform backups.',
|
||||
deprecated_name='backup_strategy',
|
||||
deprecated_group='DEFAULT'),
|
||||
@ -564,28 +569,10 @@ mysql_opts = [
|
||||
cfg.IntOpt('usage_timeout', default=400,
|
||||
help='Maximum time (in seconds) to wait for a Guest to become '
|
||||
'active.'),
|
||||
cfg.StrOpt('backup_namespace',
|
||||
default='trove.guestagent.strategies.backup.mysql_impl',
|
||||
help='Namespace to load backup strategies from.',
|
||||
deprecated_name='backup_namespace',
|
||||
deprecated_group='DEFAULT'),
|
||||
cfg.StrOpt('restore_namespace',
|
||||
default='trove.guestagent.strategies.restore.mysql_impl',
|
||||
help='Namespace to load restore strategies from.',
|
||||
deprecated_name='restore_namespace',
|
||||
deprecated_group='DEFAULT'),
|
||||
cfg.BoolOpt('volume_support', default=True,
|
||||
help='Whether to provision a Cinder volume for datadir.'),
|
||||
cfg.StrOpt('device_path', default='/dev/vdb',
|
||||
help='Device path for volume if volume support is enabled.'),
|
||||
cfg.DictOpt('backup_incremental_strategy',
|
||||
default={'InnoBackupEx': 'InnoBackupExIncremental'},
|
||||
help='Incremental Backup Runner based on the default '
|
||||
'strategy. For strategies that do not implement an '
|
||||
'incremental backup, the runner will use the default full '
|
||||
'backup.',
|
||||
deprecated_name='backup_incremental_strategy',
|
||||
deprecated_group='DEFAULT'),
|
||||
cfg.StrOpt('root_controller',
|
||||
default='trove.extensions.common.service.DefaultRootController',
|
||||
help='Root controller implementation for mysql.'),
|
||||
@ -611,6 +598,10 @@ mysql_opts = [
|
||||
help='Character length of generated passwords.',
|
||||
deprecated_name='default_password_length',
|
||||
deprecated_group='DEFAULT'),
|
||||
cfg.StrOpt(
|
||||
'docker_image', default='mysql',
|
||||
help='Database docker image.'
|
||||
)
|
||||
]
|
||||
|
||||
# Percona
|
||||
@ -653,28 +644,10 @@ percona_opts = [
|
||||
cfg.IntOpt('usage_timeout', default=450,
|
||||
help='Maximum time (in seconds) to wait for a Guest to become '
|
||||
'active.'),
|
||||
cfg.StrOpt('backup_namespace',
|
||||
default='trove.guestagent.strategies.backup.mysql_impl',
|
||||
help='Namespace to load backup strategies from.',
|
||||
deprecated_name='backup_namespace',
|
||||
deprecated_group='DEFAULT'),
|
||||
cfg.StrOpt('restore_namespace',
|
||||
default='trove.guestagent.strategies.restore.mysql_impl',
|
||||
help='Namespace to load restore strategies from.',
|
||||
deprecated_name='restore_namespace',
|
||||
deprecated_group='DEFAULT'),
|
||||
cfg.BoolOpt('volume_support', default=True,
|
||||
help='Whether to provision a Cinder volume for datadir.'),
|
||||
cfg.StrOpt('device_path', default='/dev/vdb',
|
||||
help='Device path for volume if volume support is enabled.'),
|
||||
cfg.DictOpt('backup_incremental_strategy',
|
||||
default={'InnoBackupEx': 'InnoBackupExIncremental'},
|
||||
help='Incremental Backup Runner based on the default '
|
||||
'strategy. For strategies that do not implement an '
|
||||
'incremental backup, the runner will use the default full '
|
||||
'backup.',
|
||||
deprecated_name='backup_incremental_strategy',
|
||||
deprecated_group='DEFAULT'),
|
||||
cfg.StrOpt('root_controller',
|
||||
default='trove.extensions.common.service.DefaultRootController',
|
||||
help='Root controller implementation for percona.'),
|
||||
@ -739,22 +712,10 @@ pxc_opts = [
|
||||
cfg.IntOpt('usage_timeout', default=450,
|
||||
help='Maximum time (in seconds) to wait for a Guest to become '
|
||||
'active.'),
|
||||
cfg.StrOpt('backup_namespace',
|
||||
default='trove.guestagent.strategies.backup.mysql_impl',
|
||||
help='Namespace to load backup strategies from.'),
|
||||
cfg.StrOpt('restore_namespace',
|
||||
default='trove.guestagent.strategies.restore.mysql_impl',
|
||||
help='Namespace to load restore strategies from.'),
|
||||
cfg.BoolOpt('volume_support', default=True,
|
||||
help='Whether to provision a Cinder volume for datadir.'),
|
||||
cfg.StrOpt('device_path', default='/dev/vdb',
|
||||
help='Device path for volume if volume support is enabled.'),
|
||||
cfg.DictOpt('backup_incremental_strategy',
|
||||
default={'InnoBackupEx': 'InnoBackupExIncremental'},
|
||||
help='Incremental Backup Runner based on the default '
|
||||
'strategy. For strategies that do not implement an '
|
||||
'incremental backup, the runner will use the default full '
|
||||
'backup.'),
|
||||
cfg.ListOpt('ignore_users', default=['os_admin', 'root', 'clusterrepuser'],
|
||||
help='Users to exclude when listing users.'),
|
||||
cfg.ListOpt('ignore_dbs',
|
||||
@ -818,12 +779,6 @@ redis_opts = [
|
||||
help='Default strategy to perform backups.',
|
||||
deprecated_name='backup_strategy',
|
||||
deprecated_group='DEFAULT'),
|
||||
cfg.DictOpt('backup_incremental_strategy', default={},
|
||||
help='Incremental Backup Runner based on the default '
|
||||
'strategy. For strategies that do not implement an '
|
||||
'incremental, the runner will use the default full backup.',
|
||||
deprecated_name='backup_incremental_strategy',
|
||||
deprecated_group='DEFAULT'),
|
||||
cfg.StrOpt('replication_strategy', default='RedisSyncReplication',
|
||||
help='Default strategy for replication.'),
|
||||
cfg.StrOpt('replication_namespace',
|
||||
@ -837,18 +792,6 @@ redis_opts = [
|
||||
help='Whether to provision a Cinder volume for datadir.'),
|
||||
cfg.StrOpt('device_path', default='/dev/vdb',
|
||||
help='Device path for volume if volume support is enabled.'),
|
||||
cfg.StrOpt('backup_namespace',
|
||||
default="trove.guestagent.strategies.backup.experimental."
|
||||
"redis_impl",
|
||||
help='Namespace to load backup strategies from.',
|
||||
deprecated_name='backup_namespace',
|
||||
deprecated_group='DEFAULT'),
|
||||
cfg.StrOpt('restore_namespace',
|
||||
default="trove.guestagent.strategies.restore.experimental."
|
||||
"redis_impl",
|
||||
help='Namespace to load restore strategies from.',
|
||||
deprecated_name='restore_namespace',
|
||||
deprecated_group='DEFAULT'),
|
||||
cfg.BoolOpt('cluster_support', default=True,
|
||||
help='Enable clusters to be created and managed.'),
|
||||
cfg.StrOpt('api_strategy',
|
||||
@ -893,12 +836,6 @@ cassandra_opts = [
|
||||
help='List of UDP ports and/or port ranges to open '
|
||||
'in the security group (only applicable '
|
||||
'if trove_security_groups_support is True).'),
|
||||
cfg.DictOpt('backup_incremental_strategy', default={},
|
||||
help='Incremental strategy based on the default backup '
|
||||
'strategy. For strategies that do not implement incremental '
|
||||
'backups, the runner performs full backup instead.',
|
||||
deprecated_name='backup_incremental_strategy',
|
||||
deprecated_group='DEFAULT'),
|
||||
cfg.StrOpt('backup_strategy', default="NodetoolSnapshot",
|
||||
help='Default strategy to perform backups.',
|
||||
deprecated_name='backup_strategy',
|
||||
@ -912,18 +849,6 @@ cassandra_opts = [
|
||||
help='Whether to provision a Cinder volume for datadir.'),
|
||||
cfg.StrOpt('device_path', default='/dev/vdb',
|
||||
help='Device path for volume if volume support is enabled.'),
|
||||
cfg.StrOpt('backup_namespace',
|
||||
default="trove.guestagent.strategies.backup.experimental."
|
||||
"cassandra_impl",
|
||||
help='Namespace to load backup strategies from.',
|
||||
deprecated_name='backup_namespace',
|
||||
deprecated_group='DEFAULT'),
|
||||
cfg.StrOpt('restore_namespace',
|
||||
default="trove.guestagent.strategies.restore.experimental."
|
||||
"cassandra_impl",
|
||||
help='Namespace to load restore strategies from.',
|
||||
deprecated_name='restore_namespace',
|
||||
deprecated_group='DEFAULT'),
|
||||
cfg.StrOpt('root_controller',
|
||||
default='trove.extensions.common.service.DefaultRootController',
|
||||
help='Root controller implementation for Cassandra.'),
|
||||
@ -1002,12 +927,6 @@ couchbase_opts = [
|
||||
help='Default strategy to perform backups.',
|
||||
deprecated_name='backup_strategy',
|
||||
deprecated_group='DEFAULT'),
|
||||
cfg.DictOpt('backup_incremental_strategy', default={},
|
||||
help='Incremental Backup Runner based on the default '
|
||||
'strategy. For strategies that do not implement an '
|
||||
'incremental, the runner will use the default full backup.',
|
||||
deprecated_name='backup_incremental_strategy',
|
||||
deprecated_group='DEFAULT'),
|
||||
cfg.StrOpt('replication_strategy', default=None,
|
||||
help='Default strategy for replication.'),
|
||||
cfg.StrOpt('mount_point', default='/var/lib/couchbase',
|
||||
@ -1018,18 +937,6 @@ couchbase_opts = [
|
||||
'service during instance-create. The generated password for '
|
||||
'the root user is immediately returned in the response of '
|
||||
"instance-create as the 'password' field."),
|
||||
cfg.StrOpt('backup_namespace',
|
||||
default='trove.guestagent.strategies.backup.experimental.'
|
||||
'couchbase_impl',
|
||||
help='Namespace to load backup strategies from.',
|
||||
deprecated_name='backup_namespace',
|
||||
deprecated_group='DEFAULT'),
|
||||
cfg.StrOpt('restore_namespace',
|
||||
default='trove.guestagent.strategies.restore.experimental.'
|
||||
'couchbase_impl',
|
||||
help='Namespace to load restore strategies from.',
|
||||
deprecated_name='restore_namespace',
|
||||
deprecated_group='DEFAULT'),
|
||||
cfg.BoolOpt('volume_support', default=True,
|
||||
help='Whether to provision a Cinder volume for datadir.'),
|
||||
cfg.StrOpt('device_path', default='/dev/vdb',
|
||||
@ -1066,12 +973,6 @@ mongodb_opts = [
|
||||
help='Default strategy to perform backups.',
|
||||
deprecated_name='backup_strategy',
|
||||
deprecated_group='DEFAULT'),
|
||||
cfg.DictOpt('backup_incremental_strategy', default={},
|
||||
help='Incremental Backup Runner based on the default '
|
||||
'strategy. For strategies that do not implement an '
|
||||
'incremental, the runner will use the default full backup.',
|
||||
deprecated_name='backup_incremental_strategy',
|
||||
deprecated_group='DEFAULT'),
|
||||
cfg.StrOpt('replication_strategy', default=None,
|
||||
help='Default strategy for replication.'),
|
||||
cfg.StrOpt('mount_point', default='/var/lib/mongodb',
|
||||
@ -1109,18 +1010,6 @@ mongodb_opts = [
|
||||
'mongodb.guestagent.MongoDbGuestAgentStrategy',
|
||||
help='Class that implements datastore-specific Guest Agent API '
|
||||
'logic.'),
|
||||
cfg.StrOpt('backup_namespace',
|
||||
default='trove.guestagent.strategies.backup.experimental.'
|
||||
'mongo_impl',
|
||||
help='Namespace to load backup strategies from.',
|
||||
deprecated_name='backup_namespace',
|
||||
deprecated_group='DEFAULT'),
|
||||
cfg.StrOpt('restore_namespace',
|
||||
default='trove.guestagent.strategies.restore.experimental.'
|
||||
'mongo_impl',
|
||||
help='Namespace to load restore strategies from.',
|
||||
deprecated_name='restore_namespace',
|
||||
deprecated_group='DEFAULT'),
|
||||
cfg.PortOpt('mongodb_port', default=27017,
|
||||
help='Port for mongod and mongos instances.'),
|
||||
cfg.PortOpt('configsvr_port', default=27019,
|
||||
@ -1164,11 +1053,6 @@ postgresql_opts = [
|
||||
help='The TCP port the server listens on.'),
|
||||
cfg.StrOpt('backup_strategy', default='PgBaseBackup',
|
||||
help='Default strategy to perform backups.'),
|
||||
cfg.DictOpt('backup_incremental_strategy',
|
||||
default={'PgBaseBackup': 'PgBaseBackupIncremental'},
|
||||
help='Incremental Backup Runner based on the default '
|
||||
'strategy. For strategies that do not implement an '
|
||||
'incremental, the runner will use the default full backup.'),
|
||||
cfg.StrOpt('replication_strategy',
|
||||
default='PostgresqlReplicationStreaming',
|
||||
help='Default strategy for replication.'),
|
||||
@ -1188,14 +1072,6 @@ postgresql_opts = [
|
||||
'service during instance-create. The generated password for '
|
||||
'the root user is immediately returned in the response of '
|
||||
"instance-create as the 'password' field."),
|
||||
cfg.StrOpt('backup_namespace',
|
||||
default='trove.guestagent.strategies.backup.experimental.'
|
||||
'postgresql_impl',
|
||||
help='Namespace to load backup strategies from.'),
|
||||
cfg.StrOpt('restore_namespace',
|
||||
default='trove.guestagent.strategies.restore.experimental.'
|
||||
'postgresql_impl',
|
||||
help='Namespace to load restore strategies from.'),
|
||||
cfg.BoolOpt('volume_support', default=True,
|
||||
help='Whether to provision a Cinder volume for datadir.'),
|
||||
cfg.StrOpt('device_path', default='/dev/vdb'),
|
||||
@ -1248,16 +1124,6 @@ couchdb_opts = [
|
||||
help='Default strategy to perform backups.'),
|
||||
cfg.StrOpt('replication_strategy', default=None,
|
||||
help='Default strategy for replication.'),
|
||||
cfg.StrOpt('backup_namespace', default='trove.guestagent.strategies'
|
||||
'.backup.experimental.couchdb_impl',
|
||||
help='Namespace to load backup strategies from.'),
|
||||
cfg.StrOpt('restore_namespace', default='trove.guestagent.strategies'
|
||||
'.restore.experimental.couchdb_impl',
|
||||
help='Namespace to load restore strategies from.'),
|
||||
cfg.DictOpt('backup_incremental_strategy', default={},
|
||||
help='Incremental Backup Runner based on the default '
|
||||
'strategy. For strategies that do not implement an '
|
||||
'incremental, the runner will use the default full backup.'),
|
||||
cfg.BoolOpt('root_on_create', default=False,
|
||||
help='Enable the automatic creation of the root user for the '
|
||||
'service during instance-create. The generated password for '
|
||||
@ -1303,10 +1169,6 @@ vertica_opts = [
|
||||
'if trove_security_groups_support is True).'),
|
||||
cfg.StrOpt('backup_strategy', default=None,
|
||||
help='Default strategy to perform backups.'),
|
||||
cfg.DictOpt('backup_incremental_strategy', default={},
|
||||
help='Incremental Backup Runner based on the default '
|
||||
'strategy. For strategies that do not implement an '
|
||||
'incremental, the runner will use the default full backup.'),
|
||||
cfg.StrOpt('replication_strategy', default=None,
|
||||
help='Default strategy for replication.'),
|
||||
cfg.StrOpt('mount_point', default='/var/lib/vertica',
|
||||
@ -1317,9 +1179,11 @@ vertica_opts = [
|
||||
cfg.StrOpt('device_path', default='/dev/vdb',
|
||||
help='Device path for volume if volume support is enabled.'),
|
||||
cfg.StrOpt('backup_namespace', default=None,
|
||||
help='Namespace to load backup strategies from.'),
|
||||
help='Namespace to load backup strategies from.',
|
||||
deprecated_for_removal=True),
|
||||
cfg.StrOpt('restore_namespace', default=None,
|
||||
help='Namespace to load restore strategies from.'),
|
||||
help='Namespace to load restore strategies from.',
|
||||
deprecated_for_removal=True),
|
||||
cfg.IntOpt('readahead_size', default=2048,
|
||||
help='Size(MB) to be set as readahead_size for data volume'),
|
||||
cfg.BoolOpt('cluster_support', default=True,
|
||||
@ -1387,22 +1251,6 @@ db2_opts = [
|
||||
'service during instance-create. The generated password for '
|
||||
'the root user is immediately returned in the response of '
|
||||
"instance-create as the 'password' field."),
|
||||
cfg.StrOpt('backup_namespace',
|
||||
default='trove.guestagent.strategies.backup.experimental.'
|
||||
'db2_impl',
|
||||
help='Namespace to load backup strategies from.',
|
||||
deprecated_name='backup_namespace',
|
||||
deprecated_group='DEFAULT'),
|
||||
cfg.StrOpt('restore_namespace',
|
||||
default='trove.guestagent.strategies.restore.experimental.'
|
||||
'db2_impl',
|
||||
help='Namespace to load restore strategies from.',
|
||||
deprecated_name='restore_namespace',
|
||||
deprecated_group='DEFAULT'),
|
||||
cfg.DictOpt('backup_incremental_strategy', default={},
|
||||
help='Incremental Backup Runner based on the default '
|
||||
'strategy. For strategies that do not implement an '
|
||||
'incremental, the runner will use the default full backup.'),
|
||||
cfg.ListOpt('ignore_users', default=['PUBLIC', 'DB2INST1']),
|
||||
cfg.StrOpt('root_controller',
|
||||
default='trove.extensions.common.service.DefaultRootController',
|
||||
@ -1432,21 +1280,14 @@ mariadb_opts = [
|
||||
help='List of UDP ports and/or port ranges to open '
|
||||
'in the security group (only applicable '
|
||||
'if trove_security_groups_support is True).'),
|
||||
cfg.StrOpt('backup_namespace',
|
||||
default='trove.guestagent.strategies.backup.experimental'
|
||||
'.mariadb_impl',
|
||||
help='Namespace to load backup strategies from.',
|
||||
deprecated_name='backup_namespace',
|
||||
deprecated_group='DEFAULT'),
|
||||
cfg.StrOpt('backup_strategy', default='MariaBackup',
|
||||
cfg.StrOpt('backup_strategy', default='mariabackup',
|
||||
help='Default strategy to perform backups.',
|
||||
deprecated_name='backup_strategy',
|
||||
deprecated_group='DEFAULT'),
|
||||
cfg.StrOpt('replication_strategy', default='MariaDBGTIDReplication',
|
||||
help='Default strategy for replication.'),
|
||||
cfg.StrOpt('replication_namespace',
|
||||
default='trove.guestagent.strategies.replication.experimental'
|
||||
'.mariadb_gtid',
|
||||
default='trove.guestagent.strategies.replication.mariadb_gtid',
|
||||
help='Namespace to load replication strategies from.'),
|
||||
cfg.StrOpt('mount_point', default='/var/lib/mysql',
|
||||
help="Filesystem path for mounting "
|
||||
@ -1459,25 +1300,10 @@ mariadb_opts = [
|
||||
cfg.IntOpt('usage_timeout', default=400,
|
||||
help='Maximum time (in seconds) to wait for a Guest to become '
|
||||
'active.'),
|
||||
cfg.StrOpt('restore_namespace',
|
||||
default='trove.guestagent.strategies.restore.experimental'
|
||||
'.mariadb_impl',
|
||||
help='Namespace to load restore strategies from.',
|
||||
deprecated_name='restore_namespace',
|
||||
deprecated_group='DEFAULT'),
|
||||
cfg.BoolOpt('volume_support', default=True,
|
||||
help='Whether to provision a Cinder volume for datadir.'),
|
||||
cfg.StrOpt('device_path', default='/dev/vdb',
|
||||
help='Device path for volume if volume support is enabled.'),
|
||||
cfg.DictOpt('backup_incremental_strategy',
|
||||
default={'MariaBackup':
|
||||
'MariaBackupIncremental'},
|
||||
help='Incremental Backup Runner based on the default '
|
||||
'strategy. For strategies that do not implement an '
|
||||
'incremental backup, the runner will use the default full '
|
||||
'backup.',
|
||||
deprecated_name='backup_incremental_strategy',
|
||||
deprecated_group='DEFAULT'),
|
||||
cfg.StrOpt('root_controller',
|
||||
default='trove.extensions.common.service.DefaultRootController',
|
||||
help='Root controller implementation for mysql.'),
|
||||
@ -1521,6 +1347,10 @@ mariadb_opts = [
|
||||
help='Character length of generated passwords.',
|
||||
deprecated_name='default_password_length',
|
||||
deprecated_group='DEFAULT'),
|
||||
cfg.StrOpt(
|
||||
'docker_image', default='mariadb',
|
||||
help='Database docker image.'
|
||||
)
|
||||
]
|
||||
|
||||
# RPC version groups
|
||||
|
@ -307,7 +307,7 @@ class VolumeAttachmentsNotFound(NotFound):
|
||||
|
||||
class VolumeCreationFailure(TroveError):
|
||||
|
||||
message = _("Failed to create a volume in Nova.")
|
||||
message = _("Failed to create volume.")
|
||||
|
||||
|
||||
class VolumeSizeNotSpecified(BadRequest):
|
||||
@ -341,6 +341,16 @@ class ReplicationSlaveAttachError(TroveError):
|
||||
message = _("Exception encountered attaching slave to new replica source.")
|
||||
|
||||
|
||||
class SlaveOperationNotSupported(TroveError):
|
||||
message = _("The '%(operation)s' operation is not supported for slaves in "
|
||||
"replication.")
|
||||
|
||||
|
||||
class UnableToDetermineLastMasterGTID(TroveError):
|
||||
message = _("Unable to determine last GTID executed on master "
|
||||
"(from file %(binlog_file)s).")
|
||||
|
||||
|
||||
class TaskManagerError(TroveError):
|
||||
|
||||
message = _("An error occurred communicating with the task manager: "
|
||||
@ -688,9 +698,3 @@ class LogAccessForbidden(Forbidden):
|
||||
class LogsNotAvailable(Forbidden):
|
||||
|
||||
message = _("Log actions are not supported.")
|
||||
|
||||
|
||||
class SlaveOperationNotSupported(TroveError):
|
||||
|
||||
message = _("The '%(operation)s' operation is not supported for slaves in "
|
||||
"replication.")
|
||||
|
@ -1,141 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<!--
|
||||
-*- rnc -*-
|
||||
RELAX NG Compact Syntax Grammar for the
|
||||
Atom Format Specification Version 11
|
||||
-->
|
||||
<grammar xmlns:xhtml="http://www.w3.org/1999/xhtml" xmlns:atom="http://www.w3.org/2005/Atom" xmlns:s="http://www.ascc.net/xml/schematron" xmlns="http://relaxng.org/ns/structure/1.0" datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">
|
||||
<start>
|
||||
<choice>
|
||||
<ref name="atomLink"/>
|
||||
</choice>
|
||||
</start>
|
||||
<!-- Common attributes -->
|
||||
<define name="atomCommonAttributes">
|
||||
<optional>
|
||||
<attribute name="xml:base">
|
||||
<ref name="atomUri"/>
|
||||
</attribute>
|
||||
</optional>
|
||||
<optional>
|
||||
<attribute name="xml:lang">
|
||||
<ref name="atomLanguageTag"/>
|
||||
</attribute>
|
||||
</optional>
|
||||
<zeroOrMore>
|
||||
<ref name="undefinedAttribute"/>
|
||||
</zeroOrMore>
|
||||
</define>
|
||||
<!-- atom:link -->
|
||||
<define name="atomLink">
|
||||
<element name="atom:link">
|
||||
<ref name="atomCommonAttributes"/>
|
||||
<attribute name="href">
|
||||
<ref name="atomUri"/>
|
||||
</attribute>
|
||||
<optional>
|
||||
<attribute name="rel">
|
||||
<choice>
|
||||
<ref name="atomNCName"/>
|
||||
<ref name="atomUri"/>
|
||||
</choice>
|
||||
</attribute>
|
||||
</optional>
|
||||
<optional>
|
||||
<attribute name="type">
|
||||
<ref name="atomMediaType"/>
|
||||
</attribute>
|
||||
</optional>
|
||||
<optional>
|
||||
<attribute name="hreflang">
|
||||
<ref name="atomLanguageTag"/>
|
||||
</attribute>
|
||||
</optional>
|
||||
<optional>
|
||||
<attribute name="title"/>
|
||||
</optional>
|
||||
<optional>
|
||||
<attribute name="length"/>
|
||||
</optional>
|
||||
<ref name="undefinedContent"/>
|
||||
</element>
|
||||
</define>
|
||||
<!-- Low-level simple types -->
|
||||
<define name="atomNCName">
|
||||
<data type="string">
|
||||
<param name="minLength">1</param>
|
||||
<param name="pattern">[^:]*</param>
|
||||
</data>
|
||||
</define>
|
||||
<!-- Whatever a media type is, it contains at least one slash -->
|
||||
<define name="atomMediaType">
|
||||
<data type="string">
|
||||
<param name="pattern">.+/.+</param>
|
||||
</data>
|
||||
</define>
|
||||
<!-- As defined in RFC 3066 -->
|
||||
<define name="atomLanguageTag">
|
||||
<data type="string">
|
||||
<param name="pattern">[A-Za-z]{1,8}(-[A-Za-z0-9]{1,8})*</param>
|
||||
</data>
|
||||
</define>
|
||||
<!--
|
||||
Unconstrained; it's not entirely clear how IRI fit into
|
||||
xsd:anyURI so let's not try to constrain it here
|
||||
-->
|
||||
<define name="atomUri">
|
||||
<text/>
|
||||
</define>
|
||||
<!-- Other Extensibility -->
|
||||
<define name="undefinedAttribute">
|
||||
<attribute>
|
||||
<anyName>
|
||||
<except>
|
||||
<name>xml:base</name>
|
||||
<name>xml:lang</name>
|
||||
<nsName ns=""/>
|
||||
</except>
|
||||
</anyName>
|
||||
</attribute>
|
||||
</define>
|
||||
<define name="undefinedContent">
|
||||
<zeroOrMore>
|
||||
<choice>
|
||||
<text/>
|
||||
<ref name="anyForeignElement"/>
|
||||
</choice>
|
||||
</zeroOrMore>
|
||||
</define>
|
||||
<define name="anyElement">
|
||||
<element>
|
||||
<anyName/>
|
||||
<zeroOrMore>
|
||||
<choice>
|
||||
<attribute>
|
||||
<anyName/>
|
||||
</attribute>
|
||||
<text/>
|
||||
<ref name="anyElement"/>
|
||||
</choice>
|
||||
</zeroOrMore>
|
||||
</element>
|
||||
</define>
|
||||
<define name="anyForeignElement">
|
||||
<element>
|
||||
<anyName>
|
||||
<except>
|
||||
<nsName ns="http://www.w3.org/2005/Atom"/>
|
||||
</except>
|
||||
</anyName>
|
||||
<zeroOrMore>
|
||||
<choice>
|
||||
<attribute>
|
||||
<anyName/>
|
||||
</attribute>
|
||||
<text/>
|
||||
<ref name="anyElement"/>
|
||||
</choice>
|
||||
</zeroOrMore>
|
||||
</element>
|
||||
</define>
|
||||
</grammar>
|
@ -1,597 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<!--
|
||||
-*- rnc -*-
|
||||
RELAX NG Compact Syntax Grammar for the
|
||||
Atom Format Specification Version 11
|
||||
-->
|
||||
<grammar xmlns:xhtml="http://www.w3.org/1999/xhtml" xmlns:atom="http://www.w3.org/2005/Atom" xmlns:s="http://www.ascc.net/xml/schematron" xmlns="http://relaxng.org/ns/structure/1.0" datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">
|
||||
<start>
|
||||
<choice>
|
||||
<ref name="atomFeed"/>
|
||||
<ref name="atomEntry"/>
|
||||
</choice>
|
||||
</start>
|
||||
<!-- Common attributes -->
|
||||
<define name="atomCommonAttributes">
|
||||
<optional>
|
||||
<attribute name="xml:base">
|
||||
<ref name="atomUri"/>
|
||||
</attribute>
|
||||
</optional>
|
||||
<optional>
|
||||
<attribute name="xml:lang">
|
||||
<ref name="atomLanguageTag"/>
|
||||
</attribute>
|
||||
</optional>
|
||||
<zeroOrMore>
|
||||
<ref name="undefinedAttribute"/>
|
||||
</zeroOrMore>
|
||||
</define>
|
||||
<!-- Text Constructs -->
|
||||
<define name="atomPlainTextConstruct">
|
||||
<ref name="atomCommonAttributes"/>
|
||||
<optional>
|
||||
<attribute name="type">
|
||||
<choice>
|
||||
<value>text</value>
|
||||
<value>html</value>
|
||||
</choice>
|
||||
</attribute>
|
||||
</optional>
|
||||
<text/>
|
||||
</define>
|
||||
<define name="atomXHTMLTextConstruct">
|
||||
<ref name="atomCommonAttributes"/>
|
||||
<attribute name="type">
|
||||
<value>xhtml</value>
|
||||
</attribute>
|
||||
<ref name="xhtmlDiv"/>
|
||||
</define>
|
||||
<define name="atomTextConstruct">
|
||||
<choice>
|
||||
<ref name="atomPlainTextConstruct"/>
|
||||
<ref name="atomXHTMLTextConstruct"/>
|
||||
</choice>
|
||||
</define>
|
||||
<!-- Person Construct -->
|
||||
<define name="atomPersonConstruct">
|
||||
<ref name="atomCommonAttributes"/>
|
||||
<interleave>
|
||||
<element name="atom:name">
|
||||
<text/>
|
||||
</element>
|
||||
<optional>
|
||||
<element name="atom:uri">
|
||||
<ref name="atomUri"/>
|
||||
</element>
|
||||
</optional>
|
||||
<optional>
|
||||
<element name="atom:email">
|
||||
<ref name="atomEmailAddress"/>
|
||||
</element>
|
||||
</optional>
|
||||
<zeroOrMore>
|
||||
<ref name="extensionElement"/>
|
||||
</zeroOrMore>
|
||||
</interleave>
|
||||
</define>
|
||||
<!-- Date Construct -->
|
||||
<define name="atomDateConstruct">
|
||||
<ref name="atomCommonAttributes"/>
|
||||
<data type="dateTime"/>
|
||||
</define>
|
||||
<!-- atom:feed -->
|
||||
<define name="atomFeed">
|
||||
<element name="atom:feed">
|
||||
<s:rule context="atom:feed">
|
||||
<s:assert test="atom:author or not(atom:entry[not(atom:author)])">An atom:feed must have an atom:author unless all of its atom:entry children have an atom:author.</s:assert>
|
||||
</s:rule>
|
||||
<ref name="atomCommonAttributes"/>
|
||||
<interleave>
|
||||
<zeroOrMore>
|
||||
<ref name="atomAuthor"/>
|
||||
</zeroOrMore>
|
||||
<zeroOrMore>
|
||||
<ref name="atomCategory"/>
|
||||
</zeroOrMore>
|
||||
<zeroOrMore>
|
||||
<ref name="atomContributor"/>
|
||||
</zeroOrMore>
|
||||
<optional>
|
||||
<ref name="atomGenerator"/>
|
||||
</optional>
|
||||
<optional>
|
||||
<ref name="atomIcon"/>
|
||||
</optional>
|
||||
<ref name="atomId"/>
|
||||
<zeroOrMore>
|
||||
<ref name="atomLink"/>
|
||||
</zeroOrMore>
|
||||
<optional>
|
||||
<ref name="atomLogo"/>
|
||||
</optional>
|
||||
<optional>
|
||||
<ref name="atomRights"/>
|
||||
</optional>
|
||||
<optional>
|
||||
<ref name="atomSubtitle"/>
|
||||
</optional>
|
||||
<ref name="atomTitle"/>
|
||||
<ref name="atomUpdated"/>
|
||||
<zeroOrMore>
|
||||
<ref name="extensionElement"/>
|
||||
</zeroOrMore>
|
||||
</interleave>
|
||||
<zeroOrMore>
|
||||
<ref name="atomEntry"/>
|
||||
</zeroOrMore>
|
||||
</element>
|
||||
</define>
|
||||
<!-- atom:entry -->
|
||||
<define name="atomEntry">
|
||||
<element name="atom:entry">
|
||||
<s:rule context="atom:entry">
|
||||
<s:assert test="atom:link[@rel='alternate'] or atom:link[not(@rel)] or atom:content">An atom:entry must have at least one atom:link element with a rel attribute of 'alternate' or an atom:content.</s:assert>
|
||||
</s:rule>
|
||||
<s:rule context="atom:entry">
|
||||
<s:assert test="atom:author or ../atom:author or atom:source/atom:author">An atom:entry must have an atom:author if its feed does not.</s:assert>
|
||||
</s:rule>
|
||||
<ref name="atomCommonAttributes"/>
|
||||
<interleave>
|
||||
<zeroOrMore>
|
||||
<ref name="atomAuthor"/>
|
||||
</zeroOrMore>
|
||||
<zeroOrMore>
|
||||
<ref name="atomCategory"/>
|
||||
</zeroOrMore>
|
||||
<optional>
|
||||
<ref name="atomContent"/>
|
||||
</optional>
|
||||
<zeroOrMore>
|
||||
<ref name="atomContributor"/>
|
||||
</zeroOrMore>
|
||||
<ref name="atomId"/>
|
||||
<zeroOrMore>
|
||||
<ref name="atomLink"/>
|
||||
</zeroOrMore>
|
||||
<optional>
|
||||
<ref name="atomPublished"/>
|
||||
</optional>
|
||||
<optional>
|
||||
<ref name="atomRights"/>
|
||||
</optional>
|
||||
<optional>
|
||||
<ref name="atomSource"/>
|
||||
</optional>
|
||||
<optional>
|
||||
<ref name="atomSummary"/>
|
||||
</optional>
|
||||
<ref name="atomTitle"/>
|
||||
<ref name="atomUpdated"/>
|
||||
<zeroOrMore>
|
||||
<ref name="extensionElement"/>
|
||||
</zeroOrMore>
|
||||
</interleave>
|
||||
</element>
|
||||
</define>
|
||||
<!-- atom:content -->
|
||||
<define name="atomInlineTextContent">
|
||||
<element name="atom:content">
|
||||
<ref name="atomCommonAttributes"/>
|
||||
<optional>
|
||||
<attribute name="type">
|
||||
<choice>
|
||||
<value>text</value>
|
||||
<value>html</value>
|
||||
</choice>
|
||||
</attribute>
|
||||
</optional>
|
||||
<zeroOrMore>
|
||||
<text/>
|
||||
</zeroOrMore>
|
||||
</element>
|
||||
</define>
|
||||
<define name="atomInlineXHTMLContent">
|
||||
<element name="atom:content">
|
||||
<ref name="atomCommonAttributes"/>
|
||||
<attribute name="type">
|
||||
<value>xhtml</value>
|
||||
</attribute>
|
||||
<ref name="xhtmlDiv"/>
|
||||
</element>
|
||||
</define>
|
||||
<define name="atomInlineOtherContent">
|
||||
<element name="atom:content">
|
||||
<ref name="atomCommonAttributes"/>
|
||||
<optional>
|
||||
<attribute name="type">
|
||||
<ref name="atomMediaType"/>
|
||||
</attribute>
|
||||
</optional>
|
||||
<zeroOrMore>
|
||||
<choice>
|
||||
<text/>
|
||||
<ref name="anyElement"/>
|
||||
</choice>
|
||||
</zeroOrMore>
|
||||
</element>
|
||||
</define>
|
||||
<define name="atomOutOfLineContent">
|
||||
<element name="atom:content">
|
||||
<ref name="atomCommonAttributes"/>
|
||||
<optional>
|
||||
<attribute name="type">
|
||||
<ref name="atomMediaType"/>
|
||||
</attribute>
|
||||
</optional>
|
||||
<attribute name="src">
|
||||
<ref name="atomUri"/>
|
||||
</attribute>
|
||||
<empty/>
|
||||
</element>
|
||||
</define>
|
||||
<define name="atomContent">
|
||||
<choice>
|
||||
<ref name="atomInlineTextContent"/>
|
||||
<ref name="atomInlineXHTMLContent"/>
|
||||
<ref name="atomInlineOtherContent"/>
|
||||
<ref name="atomOutOfLineContent"/>
|
||||
</choice>
|
||||
</define>
|
||||
<!-- atom:author -->
|
||||
<define name="atomAuthor">
|
||||
<element name="atom:author">
|
||||
<ref name="atomPersonConstruct"/>
|
||||
</element>
|
||||
</define>
|
||||
<!-- atom:category -->
|
||||
<define name="atomCategory">
|
||||
<element name="atom:category">
|
||||
<ref name="atomCommonAttributes"/>
|
||||
<attribute name="term"/>
|
||||
<optional>
|
||||
<attribute name="scheme">
|
||||
<ref name="atomUri"/>
|
||||
</attribute>
|
||||
</optional>
|
||||
<optional>
|
||||
<attribute name="label"/>
|
||||
</optional>
|
||||
<ref name="undefinedContent"/>
|
||||
</element>
|
||||
</define>
|
||||
<!-- atom:contributor -->
|
||||
<define name="atomContributor">
|
||||
<element name="atom:contributor">
|
||||
<ref name="atomPersonConstruct"/>
|
||||
</element>
|
||||
</define>
|
||||
<!-- atom:generator -->
|
||||
<define name="atomGenerator">
|
||||
<element name="atom:generator">
|
||||
<ref name="atomCommonAttributes"/>
|
||||
<optional>
|
||||
<attribute name="uri">
|
||||
<ref name="atomUri"/>
|
||||
</attribute>
|
||||
</optional>
|
||||
<optional>
|
||||
<attribute name="version"/>
|
||||
</optional>
|
||||
<text/>
|
||||
</element>
|
||||
</define>
|
||||
<!-- atom:icon -->
|
||||
<define name="atomIcon">
|
||||
<element name="atom:icon">
|
||||
<ref name="atomCommonAttributes"/>
|
||||
<ref name="atomUri"/>
|
||||
</element>
|
||||
</define>
|
||||
<!-- atom:id -->
|
||||
<define name="atomId">
|
||||
<element name="atom:id">
|
||||
<ref name="atomCommonAttributes"/>
|
||||
<ref name="atomUri"/>
|
||||
</element>
|
||||
</define>
|
||||
<!-- atom:logo -->
|
||||
<define name="atomLogo">
|
||||
<element name="atom:logo">
|
||||
<ref name="atomCommonAttributes"/>
|
||||
<ref name="atomUri"/>
|
||||
</element>
|
||||
</define>
|
||||
<!-- atom:link -->
|
||||
<define name="atomLink">
|
||||
<element name="atom:link">
|
||||
<ref name="atomCommonAttributes"/>
|
||||
<attribute name="href">
|
||||
<ref name="atomUri"/>
|
||||
</attribute>
|
||||
<optional>
|
||||
<attribute name="rel">
|
||||
<choice>
|
||||
<ref name="atomNCName"/>
|
||||
<ref name="atomUri"/>
|
||||
</choice>
|
||||
</attribute>
|
||||
</optional>
|
||||
<optional>
|
||||
<attribute name="type">
|
||||
<ref name="atomMediaType"/>
|
||||
</attribute>
|
||||
</optional>
|
||||
<optional>
|
||||
<attribute name="hreflang">
|
||||
<ref name="atomLanguageTag"/>
|
||||
</attribute>
|
||||
</optional>
|
||||
<optional>
|
||||
<attribute name="title"/>
|
||||
</optional>
|
||||
<optional>
|
||||
<attribute name="length"/>
|
||||
</optional>
|
||||
<ref name="undefinedContent"/>
|
||||
</element>
|
||||
</define>
|
||||
<!-- atom:published -->
|
||||
<define name="atomPublished">
|
||||
<element name="atom:published">
|
||||
<ref name="atomDateConstruct"/>
|
||||
</element>
|
||||
</define>
|
||||
<!-- atom:rights -->
|
||||
<define name="atomRights">
|
||||
<element name="atom:rights">
|
||||
<ref name="atomTextConstruct"/>
|
||||
</element>
|
||||
</define>
|
||||
<!-- atom:source -->
|
||||
<define name="atomSource">
|
||||
<element name="atom:source">
|
||||
<ref name="atomCommonAttributes"/>
|
||||
<interleave>
|
||||
<zeroOrMore>
|
||||
<ref name="atomAuthor"/>
|
||||
</zeroOrMore>
|
||||
<zeroOrMore>
|
||||
<ref name="atomCategory"/>
|
||||
</zeroOrMore>
|
||||
<zeroOrMore>
|
||||
<ref name="atomContributor"/>
|
||||
</zeroOrMore>
|
||||
<optional>
|
||||
<ref name="atomGenerator"/>
|
||||
</optional>
|
||||
<optional>
|
||||
<ref name="atomIcon"/>
|
||||
</optional>
|
||||
<optional>
|
||||
<ref name="atomId"/>
|
||||
</optional>
|
||||
<zeroOrMore>
|
||||
<ref name="atomLink"/>
|
||||
</zeroOrMore>
|
||||
<optional>
|
||||
<ref name="atomLogo"/>
|
||||
</optional>
|
||||
<optional>
|
||||
<ref name="atomRights"/>
|
||||
</optional>
|
||||
<optional>
|
||||
<ref name="atomSubtitle"/>
|
||||
</optional>
|
||||
<optional>
|
||||
<ref name="atomTitle"/>
|
||||
</optional>
|
||||
<optional>
|
||||
<ref name="atomUpdated"/>
|
||||
</optional>
|
||||
<zeroOrMore>
|
||||
<ref name="extensionElement"/>
|
||||
</zeroOrMore>
|
||||
</interleave>
|
||||
</element>
|
||||
</define>
|
||||
<!-- atom:subtitle -->
|
||||
<define name="atomSubtitle">
|
||||
<element name="atom:subtitle">
|
||||
<ref name="atomTextConstruct"/>
|
||||
</element>
|
||||
</define>
|
||||
<!-- atom:summary -->
|
||||
<define name="atomSummary">
|
||||
<element name="atom:summary">
|
||||
<ref name="atomTextConstruct"/>
|
||||
</element>
|
||||
</define>
|
||||
<!-- atom:title -->
|
||||
<define name="atomTitle">
|
||||
<element name="atom:title">
|
||||
<ref name="atomTextConstruct"/>
|
||||
</element>
|
||||
</define>
|
||||
<!-- atom:updated -->
|
||||
<define name="atomUpdated">
|
||||
<element name="atom:updated">
|
||||
<ref name="atomDateConstruct"/>
|
||||
</element>
|
||||
</define>
|
||||
<!-- Low-level simple types -->
|
||||
<define name="atomNCName">
|
||||
<data type="string">
|
||||
<param name="minLength">1</param>
|
||||
<param name="pattern">[^:]*</param>
|
||||
</data>
|
||||
</define>
|
||||
<!-- Whatever a media type is, it contains at least one slash -->
|
||||
<define name="atomMediaType">
|
||||
<data type="string">
|
||||
<param name="pattern">.+/.+</param>
|
||||
</data>
|
||||
</define>
|
||||
<!-- As defined in RFC 3066 -->
|
||||
<define name="atomLanguageTag">
|
||||
<data type="string">
|
||||
<param name="pattern">[A-Za-z]{1,8}(-[A-Za-z0-9]{1,8})*</param>
|
||||
</data>
|
||||
</define>
|
||||
<!--
|
||||
Unconstrained; it's not entirely clear how IRI fit into
|
||||
xsd:anyURI so let's not try to constrain it here
|
||||
-->
|
||||
<define name="atomUri">
|
||||
<text/>
|
||||
</define>
|
||||
<!-- Whatever an email address is, it contains at least one @ -->
|
||||
<define name="atomEmailAddress">
|
||||
<data type="string">
|
||||
<param name="pattern">.+@.+</param>
|
||||
</data>
|
||||
</define>
|
||||
<!-- Simple Extension -->
|
||||
<define name="simpleExtensionElement">
|
||||
<element>
|
||||
<anyName>
|
||||
<except>
|
||||
<nsName ns="http://www.w3.org/2005/Atom"/>
|
||||
</except>
|
||||
</anyName>
|
||||
<text/>
|
||||
</element>
|
||||
</define>
|
||||
<!-- Structured Extension -->
|
||||
<define name="structuredExtensionElement">
|
||||
<element>
|
||||
<anyName>
|
||||
<except>
|
||||
<nsName ns="http://www.w3.org/2005/Atom"/>
|
||||
</except>
|
||||
</anyName>
|
||||
<choice>
|
||||
<group>
|
||||
<oneOrMore>
|
||||
<attribute>
|
||||
<anyName/>
|
||||
</attribute>
|
||||
</oneOrMore>
|
||||
<zeroOrMore>
|
||||
<choice>
|
||||
<text/>
|
||||
<ref name="anyElement"/>
|
||||
</choice>
|
||||
</zeroOrMore>
|
||||
</group>
|
||||
<group>
|
||||
<zeroOrMore>
|
||||
<attribute>
|
||||
<anyName/>
|
||||
</attribute>
|
||||
</zeroOrMore>
|
||||
<group>
|
||||
<optional>
|
||||
<text/>
|
||||
</optional>
|
||||
<oneOrMore>
|
||||
<ref name="anyElement"/>
|
||||
</oneOrMore>
|
||||
<zeroOrMore>
|
||||
<choice>
|
||||
<text/>
|
||||
<ref name="anyElement"/>
|
||||
</choice>
|
||||
</zeroOrMore>
|
||||
</group>
|
||||
</group>
|
||||
</choice>
|
||||
</element>
|
||||
</define>
|
||||
<!-- Other Extensibility -->
|
||||
<define name="extensionElement">
|
||||
<choice>
|
||||
<ref name="simpleExtensionElement"/>
|
||||
<ref name="structuredExtensionElement"/>
|
||||
</choice>
|
||||
</define>
|
||||
<define name="undefinedAttribute">
|
||||
<attribute>
|
||||
<anyName>
|
||||
<except>
|
||||
<name>xml:base</name>
|
||||
<name>xml:lang</name>
|
||||
<nsName ns=""/>
|
||||
</except>
|
||||
</anyName>
|
||||
</attribute>
|
||||
</define>
|
||||
<define name="undefinedContent">
|
||||
<zeroOrMore>
|
||||
<choice>
|
||||
<text/>
|
||||
<ref name="anyForeignElement"/>
|
||||
</choice>
|
||||
</zeroOrMore>
|
||||
</define>
|
||||
<define name="anyElement">
|
||||
<element>
|
||||
<anyName/>
|
||||
<zeroOrMore>
|
||||
<choice>
|
||||
<attribute>
|
||||
<anyName/>
|
||||
</attribute>
|
||||
<text/>
|
||||
<ref name="anyElement"/>
|
||||
</choice>
|
||||
</zeroOrMore>
|
||||
</element>
|
||||
</define>
|
||||
<define name="anyForeignElement">
|
||||
<element>
|
||||
<anyName>
|
||||
<except>
|
||||
<nsName ns="http://www.w3.org/2005/Atom"/>
|
||||
</except>
|
||||
</anyName>
|
||||
<zeroOrMore>
|
||||
<choice>
|
||||
<attribute>
|
||||
<anyName/>
|
||||
</attribute>
|
||||
<text/>
|
||||
<ref name="anyElement"/>
|
||||
</choice>
|
||||
</zeroOrMore>
|
||||
</element>
|
||||
</define>
|
||||
<!-- XHTML -->
|
||||
<define name="anyXHTML">
|
||||
<element>
|
||||
<nsName ns="http://www.w3.org/1999/xhtml"/>
|
||||
<zeroOrMore>
|
||||
<choice>
|
||||
<attribute>
|
||||
<anyName/>
|
||||
</attribute>
|
||||
<text/>
|
||||
<ref name="anyXHTML"/>
|
||||
</choice>
|
||||
</zeroOrMore>
|
||||
</element>
|
||||
</define>
|
||||
<define name="xhtmlDiv">
|
||||
<element name="xhtml:div">
|
||||
<zeroOrMore>
|
||||
<choice>
|
||||
<attribute>
|
||||
<anyName/>
|
||||
</attribute>
|
||||
<text/>
|
||||
<ref name="anyXHTML"/>
|
||||
</choice>
|
||||
</zeroOrMore>
|
||||
</element>
|
||||
</define>
|
||||
</grammar>
|
@ -1,28 +0,0 @@
|
||||
<element name="limits" ns="http://docs.openstack.org/common/api/v1.0"
|
||||
xmlns="http://relaxng.org/ns/structure/1.0">
|
||||
<element name="rates">
|
||||
<zeroOrMore>
|
||||
<element name="rate">
|
||||
<attribute name="uri"> <text/> </attribute>
|
||||
<attribute name="regex"> <text/> </attribute>
|
||||
<zeroOrMore>
|
||||
<element name="limit">
|
||||
<attribute name="value"> <text/> </attribute>
|
||||
<attribute name="verb"> <text/> </attribute>
|
||||
<attribute name="remaining"> <text/> </attribute>
|
||||
<attribute name="unit"> <text/> </attribute>
|
||||
<attribute name="next-available"> <text/> </attribute>
|
||||
</element>
|
||||
</zeroOrMore>
|
||||
</element>
|
||||
</zeroOrMore>
|
||||
</element>
|
||||
<element name="absolute">
|
||||
<zeroOrMore>
|
||||
<element name="limit">
|
||||
<attribute name="name"> <text/> </attribute>
|
||||
<attribute name="value"> <text/> </attribute>
|
||||
</element>
|
||||
</zeroOrMore>
|
||||
</element>
|
||||
</element>
|
@ -185,7 +185,7 @@ class MethodInspector(object):
|
||||
|
||||
|
||||
def build_polling_task(retriever, condition=lambda value: value,
|
||||
sleep_time=1, time_out=0):
|
||||
sleep_time=1, time_out=0, initial_delay=0):
|
||||
"""Run a function in a loop with backoff on error.
|
||||
|
||||
The condition function runs based on the retriever function result.
|
||||
@ -197,7 +197,8 @@ def build_polling_task(retriever, condition=lambda value: value,
|
||||
raise loopingcall.LoopingCallDone(retvalue=obj)
|
||||
|
||||
call = loopingcall.BackOffLoopingCall(f=poll_and_check)
|
||||
return call.start(initial_delay=0, starting_interval=sleep_time,
|
||||
return call.start(initial_delay=initial_delay,
|
||||
starting_interval=sleep_time,
|
||||
max_interval=30, timeout=time_out)
|
||||
|
||||
|
||||
@ -210,7 +211,7 @@ def wait_for_task(polling_task):
|
||||
|
||||
|
||||
def poll_until(retriever, condition=lambda value: value,
|
||||
sleep_time=3, time_out=0):
|
||||
sleep_time=3, time_out=0, initial_delay=0):
|
||||
"""Retrieves object until it passes condition, then returns it.
|
||||
|
||||
If time_out_limit is passed in, PollTimeOut will be raised once that
|
||||
@ -218,7 +219,8 @@ def poll_until(retriever, condition=lambda value: value,
|
||||
|
||||
"""
|
||||
task = build_polling_task(retriever, condition=condition,
|
||||
sleep_time=sleep_time, time_out=time_out)
|
||||
sleep_time=sleep_time, time_out=time_out,
|
||||
initial_delay=initial_delay)
|
||||
return wait_for_task(task)
|
||||
|
||||
|
||||
|
@ -218,8 +218,6 @@ class ConfigurationsController(wsgi.Controller):
|
||||
def _refresh_on_all_instances(self, context, configuration_id):
|
||||
"""Refresh a configuration group on all single instances.
|
||||
"""
|
||||
LOG.debug("Re-applying configuration group '%s' to all instances.",
|
||||
configuration_id)
|
||||
single_instances = instances_models.DBInstance.find_all(
|
||||
tenant_id=context.project_id,
|
||||
configuration_id=configuration_id,
|
||||
@ -228,8 +226,8 @@ class ConfigurationsController(wsgi.Controller):
|
||||
|
||||
config = models.Configuration(context, configuration_id)
|
||||
for dbinstance in single_instances:
|
||||
LOG.debug("Re-applying configuration to instance: %s",
|
||||
dbinstance.id)
|
||||
LOG.info("Re-applying configuration %s to instance: %s",
|
||||
configuration_id, dbinstance.id)
|
||||
instance = instances_models.Instance.load(context, dbinstance.id)
|
||||
instance.update_configuration(config)
|
||||
|
||||
|
@ -314,7 +314,7 @@ class API(object):
|
||||
device_path='/dev/vdb', mount_point='/mnt/volume',
|
||||
backup_info=None, config_contents=None, root_password=None,
|
||||
overrides=None, cluster_config=None, snapshot=None,
|
||||
modules=None):
|
||||
modules=None, ds_version=None):
|
||||
"""Make an asynchronous call to prepare the guest
|
||||
as a database container optionally includes a backup id for restores
|
||||
"""
|
||||
@ -335,7 +335,8 @@ class API(object):
|
||||
device_path=device_path, mount_point=mount_point,
|
||||
backup_info=backup_info, config_contents=config_contents,
|
||||
root_password=root_password, overrides=overrides,
|
||||
cluster_config=cluster_config, snapshot=snapshot, modules=modules)
|
||||
cluster_config=cluster_config, snapshot=snapshot, modules=modules,
|
||||
ds_version=ds_version)
|
||||
|
||||
def _create_guest_queue(self):
|
||||
"""Call to construct, start and immediately stop rpc server in order
|
||||
@ -409,15 +410,14 @@ class API(object):
|
||||
self._call("reset_configuration", self.agent_high_timeout,
|
||||
version=version, configuration=configuration)
|
||||
|
||||
def stop_db(self, do_not_start_on_reboot=False):
|
||||
def stop_db(self):
|
||||
"""Stop the database server."""
|
||||
LOG.debug("Sending the call to stop the database process "
|
||||
"on the Guest.")
|
||||
version = self.API_BASE_VERSION
|
||||
|
||||
self._call("stop_db", self.agent_high_timeout,
|
||||
version=version,
|
||||
do_not_start_on_reboot=do_not_start_on_reboot)
|
||||
self._call("stop_db", self.agent_low_timeout,
|
||||
version=version)
|
||||
|
||||
def upgrade(self, instance_version, location, metadata=None):
|
||||
"""Make an asynchronous call to self upgrade the guest agent."""
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user