Merge "Change deployment model schema"

This commit is contained in:
Jenkins 2016-03-31 02:57:50 +00:00 committed by Gerrit Code Review
commit 974928a4d9
8 changed files with 408 additions and 7 deletions

View File

@ -23,7 +23,7 @@ Source of documentation. Latest version of documentation_.
specs
~~~~~~
~~~~~
Specs are detailed description of proposed changes in project.
Usually they answer on what, why, how to change in project and who is going to work on change.

View File

@ -292,6 +292,15 @@ class Connection(object):
def deployment_create(self, values):
deployment = models.Deployment()
try:
# TODO(rpromyshlennikov): remove after credentials refactoring
values.setdefault(
"credentials",
[
["openstack",
{"admin": values.get("admin"),
"users": values.get("users", [])}]
]
)
deployment.update(values)
deployment.save()
except db_exc.DBDuplicateEntry:
@ -319,6 +328,15 @@ class Connection(object):
values.pop("uuid", None)
with session.begin():
dpl = self._deployment_get(deployment, session=session)
# TODO(rpromyshlennikov): remove after credentials refactoring
values.setdefault(
"credentials",
[
["openstack",
{"admin": values.get("admin"),
"users": values.get("users", [])}]
]
)
dpl.update(values)
return dpl

View File

@ -61,7 +61,7 @@ Information for developers
DB migration in Rally is implemented via package *alembic*.
It is highly recommended to get familiar with it's documnetation
It is highly recommended to get familiar with it's documentation
available by the link_ before proceeding.
.. _link: https://alembic.readthedocs.org
@ -73,7 +73,17 @@ create new DB revision and migration script with the following command
alembic --config rally/common/db/sqlalchemy/alembic.ini revision -m <Message>
or
.. code-block:: shell
alembic --config rally/common/db/sqlalchemy/alembic.ini revision --autogenerate -m <Message>
It will generate migration script -- a file named `<UUID>_<Message>.py`
located in `rally/common/db/sqlalchemy/migrations/versions`.
Alembic with parameter ``--autogenerate`` makes some "routine" job for developer,
for example it makes some SQLite compatible batch expressions for migrations.
Generated script should then be checked, edited if it is needed to be
and added to Rally source tree.

View File

@ -38,6 +38,7 @@ def run_migrations_online():
engine = api.get_engine()
with engine.connect() as connection:
context.configure(connection=connection,
render_as_batch=True,
target_metadata=target_metadata)
with context.begin_transaction():
context.run_migrations()

View File

@ -0,0 +1,100 @@
# Copyright (c) 2016 Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Merge credentials from users and admin
Revision ID: 3177d36ea270
Revises: ca3626f62937
Create Date: 2016-03-01 16:01:38.747048
"""
# revision identifiers, used by Alembic.
revision = "3177d36ea270"
down_revision = "ca3626f62937"
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
deployments_helper = sa.Table(
"deployments",
sa.MetaData(),
sa.Column("id", sa.Integer, primary_key=True, autoincrement=True),
sa.Column("admin", sa.types.PickleType, nullable=True),
sa.Column("users", sa.types.PickleType, default=[], nullable=False),
sa.Column("credentials", sa.types.PickleType, nullable=True),
)
def upgrade():
with op.batch_alter_table("deployments", schema=None) as batch_op:
batch_op.add_column(
sa.Column("credentials", sa.PickleType(), nullable=True))
connection = op.get_bind()
for deployment in connection.execute(deployments_helper.select()):
creds = [
["openstack",
{
"admin": deployment.admin,
"users": deployment.users
}]
]
connection.execute(
deployments_helper.update().where(
deployments_helper.c.id == deployment.id).values(
credentials=creds))
with op.batch_alter_table("deployments", schema=None) as batch_op:
batch_op.alter_column("credentials",
existing_type=sa.PickleType,
existing_nullable=True,
nullable=False)
batch_op.drop_column("admin")
batch_op.drop_column("users")
def downgrade():
with op.batch_alter_table("deployments", schema=None) as batch_op:
batch_op.add_column(
sa.Column("users", sa.PickleType(),
nullable=True))
batch_op.add_column(
sa.Column("admin", sa.PickleType(), nullable=True))
connection = op.get_bind()
for deployment in connection.execute(deployments_helper.select()):
admin = None
users = []
for credentials in deployment.credentials:
if credentials[0] == "openstack":
admin = credentials[1].get("admin")
users = credentials[1].get("users", [])
connection.execute(
deployments_helper.update().where(
deployments_helper.c.id == deployment.id).values(
admin=admin, users=users))
with op.batch_alter_table("deployments", schema=None) as batch_op:
batch_op.alter_column("users",
existing_type=sa.PickleType,
existing_nullable=True,
nullable=False)
batch_op.drop_column("credentials")

View File

@ -80,11 +80,7 @@ class Deployment(BASE, RallyBase):
nullable=False,
)
# NOTE(boris-42): This is pickled rally.object.Endpoint object
admin = sa.Column(types.PickleType, nullable=True)
# NOTE(boris-42): This is list of pickled rally.object.Endpoint objects
users = sa.Column(types.PickleType, default=[], nullable=False)
credentials = sa.Column(types.PickleType, default=[], nullable=False)
status = sa.Column(
sa.Enum(*consts.DeployStatus, name="enum_deploy_status"),
@ -100,6 +96,24 @@ class Deployment(BASE, RallyBase):
foreign_keys=parent_uuid,
)
# TODO(rpromyshlennikov): remove admin after credentials refactoring
@property
def admin(self):
return self.credentials[0][1]["admin"]
@admin.setter
def admin(self, value):
pass
# TODO(rpromyshlennikov): remove users after credentials refactoring
@property
def users(self):
return self.credentials[0][1]["users"]
@users.setter
def users(self, value):
pass
class Resource(BASE, RallyBase):
"""Represent a resource of a deployment."""

View File

@ -21,6 +21,7 @@ import pprint
import alembic
import mock
from oslo_db.sqlalchemy import test_migrations
from oslo_db.sqlalchemy import utils as db_utils
import six
import sqlalchemy as sa
@ -28,6 +29,7 @@ import rally
from rally.common import db
from rally.common.db.sqlalchemy import api
from rally.common.db.sqlalchemy import models
from tests.unit.common.db import test_migrations_base as base
from tests.unit import test as rtest
@ -202,3 +204,62 @@ class MigrationTestCase(rtest.DBTestCase,
self.assertEqual("remove_table", action)
self.assertIsInstance(object, sa.Table)
self.assertEqual("workers", object.name)
class MigrationWalkTestCase(rtest.DBTestCase, base.BaseWalkMigrationMixin):
"""Test case covers upgrade and downgrade methods in migrations."""
snake_walk = True
downgrade = True
def setUp(self):
super(MigrationWalkTestCase, self).setUp()
self.engine = api.get_engine()
def assertColumnExists(self, engine, table, column):
t = db_utils.get_table(engine, table)
self.assertIn(column, t.c)
def assertColumnsExists(self, engine, table, columns):
for column in columns:
self.assertColumnExists(engine, table, column)
def assertColumnCount(self, engine, table, columns):
t = db_utils.get_table(engine, table)
self.assertEqual(len(t.columns), len(columns))
def assertColumnNotExists(self, engine, table, column):
t = db_utils.get_table(engine, table)
self.assertNotIn(column, t.c)
def assertIndexExists(self, engine, table, index):
t = db_utils.get_table(engine, table)
index_names = [idx.name for idx in t.indexes]
self.assertIn(index, index_names)
def assertColumnType(self, engine, table, column, sqltype):
t = db_utils.get_table(engine, table)
col = getattr(t.c, column)
self.assertIsInstance(col.type, sqltype)
def assertIndexMembers(self, engine, table, index, members):
self.assertIndexExists(engine, table, index)
t = db_utils.get_table(engine, table)
index_columns = None
for idx in t.indexes:
if idx.name == index:
index_columns = idx.columns.keys()
break
self.assertEqual(sorted(members), sorted(index_columns))
def test_walk_versions(self):
self.walk_versions(self.engine, self.snake_walk, self.downgrade)
def _check_3177d36ea270(self, engine, data):
self.assertEqual(
"3177d36ea270", api.get_backend().schema_revision(engine=engine))
self.assertColumnExists(engine, "deployments", "credentials")
self.assertColumnNotExists(engine, "deployments", "admin")
self.assertColumnNotExists(engine, "deployments", "users")

View File

@ -0,0 +1,197 @@
# Copyright 2010-2011 OpenStack Foundation
# Copyright 2012-2013 IBM Corp.
# Copyright 2016: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
#
# Ripped off from Murano's test_migrations.py
#
# There is an ongoing work to extact similar code to oslo incubator. Once it is
# extracted we'll be able to remove this file and use oslo.
import io
import os
from alembic import command
from alembic import config as alembic_config
from alembic import migration
from alembic import script as alembic_script
from oslo_config import cfg
import rally.common.db.sqlalchemy.api
from rally.common.i18n import _LE
from rally.common import logging
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
class BaseWalkMigrationMixin(object):
ALEMBIC_CONFIG = alembic_config.Config(
os.path.join(os.path.dirname(rally.common.db.sqlalchemy.api.__file__),
"alembic.ini")
)
ALEMBIC_CONFIG.rally_config = CONF
def _configure(self, engine):
"""Configure database connection.
For each type of repository we should do some of configure steps.
For migrate_repo we should set under version control our database.
For alembic we should configure database settings. For this goal we
should use oslo.config and openstack.commom.db.sqlalchemy.session with
database functionality (reset default settings and session cleanup).
"""
CONF.set_override("connection", str(engine.url), group="database")
def _alembic_command(self, alembic_command, engine, *args, **kwargs):
"""Call alembic command.
Most of alembic command return data into output.
We should redefine this setting for getting info.
"""
self.ALEMBIC_CONFIG.stdout = buf = io.StringIO()
CONF.set_override("connection", str(engine.url), group="database")
getattr(command, alembic_command)(*args, **kwargs)
res = buf.getvalue().strip()
LOG.debug("Alembic command `{command}` returns: {result}".format(
command=alembic_command, result=res))
return res
def _up_and_down_versions(self):
"""Get revisions versions.
Since alembic version has a random algorithm of generation
(SA-migrate has an ordered autoincrement naming) we should store
a tuple of versions (version for upgrade and version for downgrade)
for successful testing of migrations in up>down>up mode.
"""
env = alembic_script.ScriptDirectory.from_config(self.ALEMBIC_CONFIG)
versions = []
for rev in env.walk_revisions():
if (rev.revision ==
rally.common.db.sqlalchemy.api.INITIAL_REVISION_UUID):
# NOTE(rpromyshlennikov): we skip initial migration here
continue
versions.append((rev.revision, rev.down_revision or "-1"))
versions.reverse()
return versions
def walk_versions(self, engine=None, snake_walk=False, downgrade=True):
"""Walk through versions.
Determine latest version script from the repo, then
upgrade from 1 through to the latest, with no data
in the databases. This just checks that the schema itself
upgrades successfully.
"""
self._configure(engine)
up_and_down_versions = self._up_and_down_versions()
for ver_up, ver_down in up_and_down_versions:
# upgrade -> downgrade -> upgrade
self._migrate_up(engine, ver_up, with_data=True)
if snake_walk:
downgraded = self._migrate_down(engine,
ver_down,
with_data=True,
next_version=ver_up)
if downgraded:
self._migrate_up(engine, ver_up)
if downgrade:
# Now walk it back down to 0 from the latest, testing
# the downgrade paths.
up_and_down_versions.reverse()
for ver_up, ver_down in up_and_down_versions:
# downgrade -> upgrade -> downgrade
downgraded = self._migrate_down(engine,
ver_down, next_version=ver_up)
if snake_walk and downgraded:
self._migrate_up(engine, ver_up)
self._migrate_down(engine, ver_down, next_version=ver_up)
def _get_version_from_db(self, engine):
"""Return latest version for each type of migrate repo from db."""
conn = engine.connect()
try:
context = migration.MigrationContext.configure(conn)
version = context.get_current_revision() or "-1"
finally:
conn.close()
return version
def _migrate(self, engine, version, cmd):
"""Base method for manipulation with migrate repo.
It will upgrade or downgrade the actual database.
"""
self._alembic_command(cmd, engine, self.ALEMBIC_CONFIG, version)
def _migrate_down(self, engine, version, with_data=False,
next_version=None):
"""Migrate down to a provided version of the db."""
try:
self._migrate(engine, version, "downgrade")
except NotImplementedError:
# NOTE(sirp): some migrations, namely release-level
# migrations, don't support a downgrade.
return False
self.assertEqual(version, self._get_version_from_db(engine))
# NOTE(sirp): `version` is what we're downgrading to (i.e. the 'target'
# version). So if we have any downgrade checks, they need to be run for
# the previous (higher numbered) migration.
if with_data:
post_downgrade = getattr(
self, "_post_downgrade_%s" % next_version, None)
if post_downgrade:
post_downgrade(engine)
return True
def _migrate_up(self, engine, version, with_data=False):
"""Migrate up to a new version of the db.
We allow for data insertion and post checks at every
migration version with special _pre_upgrade_### and
_check_### functions in the main test.
"""
# NOTE(sdague): try block is here because it's impossible to debug
# where a failed data migration happens otherwise
check_version = version
try:
if with_data:
data = None
pre_upgrade = getattr(
self, "_pre_upgrade_%s" % check_version, None)
if pre_upgrade:
data = pre_upgrade(engine)
self._migrate(engine, version, "upgrade")
self.assertEqual(version, self._get_version_from_db(engine))
if with_data:
check = getattr(self, "_check_%s" % check_version, None)
if check:
check(engine, data)
except Exception:
LOG.error(_LE("Failed to migrate to version {ver} on engine {eng}")
.format(ver=version, eng=engine))
raise