Merge "Switch Designate to Alembic database migrations"

This commit is contained in:
Zuul 2022-08-30 05:11:25 +00:00 committed by Gerrit Code Review
commit 218e11ea20
69 changed files with 1779 additions and 1840 deletions

View File

@ -13,41 +13,76 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from io import StringIO
import os
import sys
from migrate.versioning import api as versioning_api
from alembic import command as alembic_command
from alembic.config import Config
from oslo_config import cfg
from oslo_log import log as logging
from designate.manage import base
from designate.sqlalchemy import utils
LOG = logging.getLogger(__name__)
REPOSITORY = os.path.abspath(os.path.join(os.path.dirname(__file__), '..',
'storage', 'impl_sqlalchemy',
'migrate_repo'))
cfg.CONF.import_opt('connection', 'designate.storage.impl_sqlalchemy',
group='storage:sqlalchemy')
CONF = cfg.CONF
INIT_VERSION = 69
def get_manager():
return utils.get_migration_manager(
REPOSITORY, CONF['storage:sqlalchemy'].connection, INIT_VERSION)
LOG = logging.getLogger(__name__)
class DatabaseCommands(base.Commands):
def version(self):
current = get_manager().version()
latest = versioning_api.version(repository=REPOSITORY).value
def _get_alembic_config(self, db_url=None, stringio_buffer=sys.stdout):
alembic_dir = os.path.join(os.path.dirname(__file__),
os.pardir, 'storage/impl_sqlalchemy')
alembic_cfg = Config(os.path.join(alembic_dir, 'alembic.ini'),
stdout=stringio_buffer)
alembic_cfg.set_main_option(
'script_location', 'designate.storage.impl_sqlalchemy:alembic')
if db_url:
alembic_cfg.set_main_option('sqlalchemy.url', db_url)
else:
alembic_cfg.set_main_option('sqlalchemy.url',
CONF['storage:sqlalchemy'].connection)
return alembic_cfg
def current(self, db_url=None, stringio_buffer=sys.stdout):
alembic_command.current(
self._get_alembic_config(db_url=db_url,
stringio_buffer=stringio_buffer))
def heads(self, db_url=None, stringio_buffer=sys.stdout):
alembic_command.heads(
self._get_alembic_config(db_url=db_url,
stringio_buffer=stringio_buffer))
def history(self, db_url=None, stringio_buffer=sys.stdout):
alembic_command.history(
self._get_alembic_config(db_url=db_url,
stringio_buffer=stringio_buffer))
def version(self, db_url=None):
# Using StringIO buffers here to keep the command output as similar
# as it was before the migration to alembic.
current_buffer = StringIO()
latest_buffer = StringIO()
alembic_command.current(
self._get_alembic_config(db_url=db_url,
stringio_buffer=current_buffer))
current = current_buffer.getvalue().replace('\n', ' ')
current_buffer.close()
alembic_command.heads(
self._get_alembic_config(db_url=db_url,
stringio_buffer=latest_buffer))
latest = latest_buffer.getvalue().replace('\n', ' ')
latest_buffer.close()
print("Current: %s Latest: %s" % (current, latest))
def sync(self):
get_manager().upgrade(None)
def sync(self, db_url=None, stringio_buffer=sys.stdout):
alembic_command.upgrade(
self._get_alembic_config(db_url=db_url,
stringio_buffer=stringio_buffer), 'head')
@base.args('revision', nargs='?')
def upgrade(self, revision):
get_manager().upgrade(revision)
@base.args('revision', default='head', nargs='?',
help='The revision identifier to upgrade to.')
def upgrade(self, revision, db_url=None, stringio_buffer=sys.stdout):
alembic_command.upgrade(
self._get_alembic_config(
db_url=db_url, stringio_buffer=stringio_buffer), revision)

View File

@ -0,0 +1,105 @@
# A generic, single database configuration.
[alembic]
# path to migration scripts
script_location = alembic
# template used to generate migration file names; The default value is %%(rev)s_%%(slug)s
# Uncomment the line below if you want the files to be prepended with date and time
# see https://alembic.sqlalchemy.org/en/latest/tutorial.html#editing-the-ini-file
# for all available tokens
# file_template = %%(year)d_%%(month).2d_%%(day).2d_%%(hour).2d%%(minute).2d-%%(rev)s_%%(slug)s
# sys.path path, will be prepended to sys.path if present.
# defaults to the current working directory.
prepend_sys_path = .
# timezone to use when rendering the date within the migration file
# as well as the filename.
# If specified, requires the python-dateutil library that can be
# installed by adding `alembic[tz]` to the pip requirements
# string value is passed to dateutil.tz.gettz()
# leave blank for localtime
# timezone =
# max length of characters to apply to the
# "slug" field
# truncate_slug_length = 40
# set to 'true' to run the environment during
# the 'revision' command, regardless of autogenerate
# revision_environment = false
# set to 'true' to allow .pyc and .pyo files without
# a source .py file to be detected as revisions in the
# versions/ directory
# sourceless = false
# version location specification; This defaults
# to alembic/versions. When using multiple version
# directories, initial revisions must be specified with --version-path.
# The path separator used here should be the separator specified by "version_path_separator" below.
# version_locations = %(here)s/bar:%(here)s/bat:alembic/versions
# version path separator; As mentioned above, this is the character used to split
# version_locations. The default within new alembic.ini files is "os", which uses os.pathsep.
# If this key is omitted entirely, it falls back to the legacy behavior of splitting on spaces and/or commas.
# Valid values for version_path_separator are:
#
# version_path_separator = :
# version_path_separator = ;
# version_path_separator = space
version_path_separator = os # Use os.pathsep. Default configuration used for new projects.
# the output encoding used when revision files
# are written from script.py.mako
# output_encoding = utf-8
sqlalchemy.url =
[post_write_hooks]
# post_write_hooks defines scripts or Python functions that are run
# on newly generated revision scripts. See the documentation for further
# detail and examples
# format using "black" - use the console_scripts runner, against the "black" entrypoint
# hooks = black
# black.type = console_scripts
# black.entrypoint = black
# black.options = -l 79 REVISION_SCRIPT_FILENAME
# Logging configuration
[loggers]
keys = root,sqlalchemy,alembic
[handlers]
keys = console
[formatters]
keys = generic
[logger_root]
level = WARN
handlers = console
qualname =
[logger_sqlalchemy]
level = WARN
handlers =
qualname = sqlalchemy.engine
[logger_alembic]
level = INFO
handlers =
qualname = alembic
[handler_console]
class = StreamHandler
args = (sys.stderr,)
level = NOTSET
formatter = generic
[formatter_generic]
format = %(levelname)-5.5s [%(name)s] %(message)s
datefmt = %H:%M:%S

View File

@ -0,0 +1,5 @@
Please use the "designate-manage database" command for database management.
Developers adding new migrations can run 'alembic -m "<migration title>"' from
the designate/storage/impl_sqlalchemy directory where the alembic.ini file is
located.

View File

@ -0,0 +1,83 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.mport threading
from sqlalchemy import engine_from_config
from sqlalchemy import pool
from alembic import context
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config
# add your model's MetaData object here
# for 'autogenerate' support
# from myapp import mymodel
# target_metadata = mymodel.Base.metadata
target_metadata = None
# other values from the config, defined by the needs of env.py,
# can be acquired:
# my_important_option = config.get_main_option("my_important_option")
# ... etc.
def run_migrations_offline() -> None:
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
url = config.get_main_option("sqlalchemy.url")
context.configure(
url=url,
target_metadata=target_metadata,
literal_binds=True,
dialect_opts={"paramstyle": "named"},
)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online() -> None:
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
connectable = engine_from_config(
config.get_section(config.config_ini_section),
prefix="sqlalchemy.",
poolclass=pool.NullPool,
)
with connectable.connect() as connection:
context.configure(
connection=connection, target_metadata=target_metadata,
transactional_ddl=True, transaction_per_migration=True
)
with context.begin_transaction():
context.run_migrations()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()

View File

@ -0,0 +1,43 @@
# Copyright 2022 Red Hat
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from alembic import op
from oslo_log import log as logging
import sqlalchemy as sa
LOG = logging.getLogger(__name__)
def is_migration_needed(equivalent_revision):
metadata = sa.MetaData(bind=op.get_bind())
sa.MetaData.reflect(metadata)
if 'migrate_version' not in metadata.tables.keys():
return True
version_sql = sa.text("SELECT version FROM migrate_version;")
legacy_db_rev = None
try:
legacy_db_rev = op.get_bind().execute(version_sql).scalar_one_or_none()
except Exception as e:
LOG.debug("Unable to query the database for the legacy revision "
"number. Assuming there is no legacy migration revision "
"or the migration is running in offline mode. Error: %s",
str(e))
# Check if this migration was already run by the legacy sqlalchemy-migrate
# migrations.
if legacy_db_rev and int(legacy_db_rev) >= equivalent_revision:
return False
return True

View File

@ -1,10 +1,3 @@
#!/usr/bin/env python
# Copyright 2012 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Author: Patrick Galbraith <patg@hp.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
@ -17,7 +10,23 @@
# License for the specific language governing permissions and limitations
# under the License.
from migrate.versioning.shell import main
"""${message}
if __name__ == '__main__':
main(debug='False')
Revision ID: ${up_revision}
Revises: ${down_revision | comma,n}
Create Date: ${create_date}
"""
from alembic import op
import sqlalchemy as sa
${imports if imports else ""}
# revision identifiers, used by Alembic.
revision = ${repr(up_revision)}
down_revision = ${repr(down_revision)}
branch_labels = ${repr(branch_labels)}
depends_on = ${repr(depends_on)}
def upgrade() -> None:
${upgrades if upgrades else "pass"}

View File

@ -0,0 +1,95 @@
# Copyright 2015 Hewlett-Packard Development Company, L.P.
# Copyright 2022 Red Hat
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""add_zone_attributes
Revision ID: 0bcf910ea823
Revises: a69b45715cc1
Create Date: 2022-07-29 21:36:15.117658
"""
from alembic import op
from oslo_utils import timeutils
import sqlalchemy as sa
from designate.sqlalchemy.types import UUID
from designate.storage.impl_sqlalchemy.alembic import legacy_utils
from designate import utils
# revision identifiers, used by Alembic.
revision = '0bcf910ea823'
down_revision = 'a69b45715cc1'
branch_labels = None
depends_on = None
# Equivalent to legacy sqlalchemy-migrate revision 085_add_zone_attributes
# Note from original migration file:
# Move zone masters to their own table, and allow for abstract keys in the
# attributes table
def upgrade() -> None:
# Check if the equivalent legacy migration has already run
if not legacy_utils.is_migration_needed(85):
return
metadata = sa.MetaData()
zone_masters_table = op.create_table(
'zone_masters', metadata,
sa.Column('id', UUID, default=utils.generate_uuid, primary_key=True),
sa.Column('version', sa.Integer, default=1, nullable=False),
sa.Column('created_at', sa.DateTime,
default=lambda: timeutils.utcnow()),
sa.Column('updated_at', sa.DateTime,
onupdate=lambda: timeutils.utcnow()),
sa.Column('host', sa.String(32), nullable=False),
sa.Column('port', sa.Integer, nullable=False),
sa.Column('zone_id', UUID, nullable=False),
sa.UniqueConstraint('host', 'port', 'zone_id', name='unique_masters'),
sa.ForeignKeyConstraint(['zone_id'], ['zones.id'], ondelete='CASCADE'))
zone_attr_sql = sa.text(
'SELECT id, version, created_at, updated_at, value, zone_id FROM '
'zone_attributes WHERE \'key\' = \'master\';')
masters = op.get_bind().execute(zone_attr_sql).fetchall()
masters_input = []
for master in masters:
host, port = utils.split_host_port(
master['value'])
masters_input.append({
'id': master['id'],
'version': master['version'],
'created_at': master['created_at'],
'updated_at': master['updated_at'],
'zone_id': master['zone_id'],
'host': host,
'port': port
})
op.bulk_insert(zone_masters_table, masters_input)
master_delete_sql = sa.text(
'DELETE FROM zone_attributes WHERE \'key\' = \'master\';')
op.get_bind().execute(master_delete_sql)
with op.batch_alter_table('zone_attributes') as batch_op:
batch_op.alter_column('key', type_=sa.String(50),
existing_type=sa.Enum)

View File

@ -0,0 +1,48 @@
# Copyright 2016 Hewlett Packard Enterprise Development Company LP
# Copyright 2022 Red Hat
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""fix_service_charset
Revision ID: 15b34ff3ecb8
Revises: 304d41c3847a
Create Date: 2022-08-01 16:53:34.612019
"""
from alembic import op
from designate.storage.impl_sqlalchemy.alembic import legacy_utils
# revision identifiers, used by Alembic.
revision = '15b34ff3ecb8'
down_revision = '304d41c3847a'
branch_labels = None
depends_on = None
# Equivalent to legacy sqlalchemy-migrate revision 098_fix_service_charset
def upgrade() -> None:
# Check if the equivalent legacy migration has already run
if not legacy_utils.is_migration_needed(98):
return
current_bind = op.get_bind()
if current_bind.dialect.name != 'mysql':
return
op.execute('SET foreign_key_checks = 0;')
op.execute('ALTER TABLE service_statuses CONVERT TO CHARACTER SET utf8;')
op.execute('SET foreign_key_checks = 1;')
op.execute('ALTER DATABASE DEFAULT CHARACTER SET utf8;')

View File

@ -0,0 +1,59 @@
# Copyright 2016 Hewlett Packard Enterprise Development Company LP
# Copyright 2022 Red Hat
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""add_services
Revision ID: 304d41c3847a
Revises: d04819112169
Create Date: 2022-08-01 16:41:55.139558
"""
from alembic import op
import sqlalchemy as sa
from designate.sqlalchemy.types import UUID
from designate.storage.impl_sqlalchemy.alembic import legacy_utils
from designate import utils
# revision identifiers, used by Alembic.
revision = '304d41c3847a'
down_revision = 'd04819112169'
branch_labels = None
depends_on = None
SERVICE_STATES = ["UP", "DOWN", "WARNING"]
# Equivalent to legacy sqlalchemy-migrate revision 097_add_services
def upgrade() -> None:
# Check if the equivalent legacy migration has already run
if not legacy_utils.is_migration_needed(97):
return
metadata = sa.MetaData()
op.create_table(
'service_statuses', metadata,
sa.Column('id', UUID, default=utils.generate_uuid, primary_key=True),
sa.Column('created_at', sa.DateTime),
sa.Column('updated_at', sa.DateTime),
sa.Column('service_name', sa.String(40), nullable=False),
sa.Column('hostname', sa.String(255), nullable=False),
sa.Column('heartbeated_at', sa.DateTime, nullable=True),
sa.Column('status', sa.Enum(name='service_statuses_enum',
*SERVICE_STATES), nullable=False),
sa.Column('stats', sa.Text, nullable=False),
sa.Column('capabilities', sa.Text, nullable=False))

View File

@ -0,0 +1,46 @@
# Copyright 2016 Rackspace
# Copyright 2022 Red Hat
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""add_rrset_indexes_for_filtering_perf
Revision ID: 7977deaa5167
Revises: 15b34ff3ecb8
Create Date: 2022-08-01 17:13:01.429689
"""
from alembic import op
from designate.storage.impl_sqlalchemy.alembic import legacy_utils
# revision identifiers, used by Alembic.
revision = '7977deaa5167'
down_revision = '15b34ff3ecb8'
branch_labels = None
depends_on = None
# Equivalent to legacy sqlalchemy-migrate revision
# 099_add_rrset_indexes_for_filtering_perf
def upgrade() -> None:
# Check if the equivalent legacy migration has already run
if not legacy_utils.is_migration_needed(99):
return
op.create_index('rrset_updated_at', 'recordsets', ['updated_at'])
op.create_index('rrset_zoneid', 'recordsets', ['zone_id'])
op.create_index('rrset_type', 'recordsets', ['type'])
op.create_index('rrset_ttl', 'recordsets', ['ttl'])
op.create_index('rrset_tenant_id', 'recordsets', ['tenant_id'])

View File

@ -0,0 +1,149 @@
# Copyright 2015 Hewlett-Packard Development Company, L.P.
# Copyright 2022 Red Hat
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""domain_to_zone_rename
Revision ID: 867a331ce1fc
Revises: c9f427f7180a
Create Date: 2022-07-29 18:41:19.427853
"""
from alembic import op
import sqlalchemy as sa
from designate.sqlalchemy.types import UUID
from designate.storage.impl_sqlalchemy.alembic import legacy_utils
# revision identifiers, used by Alembic.
revision = '867a331ce1fc'
down_revision = 'c9f427f7180a'
branch_labels = None
depends_on = None
# Equivalent to legacy sqlalchemy-migrate revision 080_domain_to_zone_rename
# Notes from the original migration file:
# This migration removes all references to domain from our Database.
# We rename the domains and domain_attribute tables, and rename any columns
# that had "domain" in the name.as
# There is a follow on patch to recreate the FKs for the newly renamed
# tables as the lib we use doesn't seem to like creating FKs on renamed
# tables until after the migration is complete.
def _drop_foreign_key(fk_def):
table = fk_def[0].table
col = fk_def[0]
ref_col = fk_def[1]
# We need a naming convention to find unnamed foreign keys on sqlite
naming_convention = {
"fk": "fk_%(table_name)s_%(column_0_name)s_%(referred_table_name)s"}
# Use .copy() to avoid the set changing during the for operation
# We need to search for the foreign keys as they may be named differently
# between different dialects (mysql, sqlite, etc.)
for fk in table.foreign_keys.copy():
# We must use batch mode because the unit tests use sqlite
with op.batch_alter_table(
table.name, naming_convention=naming_convention) as batch_op:
# Check if the fk is the one we want
if fk.column == col and fk.parent == ref_col:
batch_op.drop_constraint(fk.constraint.name,
type_='foreignkey')
# Check if the fk is the one we want (sometimes it seems the parent
# / col is switched
if fk.parent == col and fk.column == ref_col:
batch_op.drop_constraint(fk.constraint.name,
type_='foreignkey')
def upgrade() -> None:
# Check if the equivalent legacy migration has already run
if not legacy_utils.is_migration_needed(80):
return
convention = {
"ix": 'ix_%(column_0_label)s',
"uq": "uq_%(table_name)s_%(column_0_name)s",
"ck": "ck_%(table_name)s_%(constraint_name)s",
"fk": "fk_%(table_name)s_%(column_0_name)s_%(referred_table_name)s",
"pk": "pk_%(table_name)s"}
metadata = sa.MetaData(naming_convention=convention)
metadata.bind = op.get_bind()
# Get all the tables
domains_table = sa.Table('domains', metadata, autoload=True)
domain_attrib_table = sa.Table('domain_attributes', metadata,
autoload=True)
recordsets_table = sa.Table('recordsets', metadata, autoload=True)
records_table = sa.Table('records', metadata, autoload=True)
ztr_table = sa.Table('zone_transfer_requests', metadata, autoload=True)
zta_table = sa.Table('zone_transfer_accepts', metadata, autoload=True)
# Remove the affected FKs
# Define FKs
fks = [
[domains_table.c.id, domains_table.c.parent_domain_id],
[domain_attrib_table.c.domain_id, domains_table.c.id],
[recordsets_table.c.domain_id, domains_table.c.id],
[records_table.c.domain_id, domains_table.c.id],
[ztr_table.c.domain_id, domains_table.c.id],
[zta_table.c.domain_id, domains_table.c.id]
]
# Drop FKs
for fk in fks:
_drop_foreign_key(fk)
with op.batch_alter_table('domains') as batch_op:
batch_op.alter_column('parent_domain_id',
new_column_name='parent_zone_id',
existing_type=UUID)
op.rename_table('domains', 'zones')
with op.batch_alter_table('domain_attributes') as batch_op:
batch_op.alter_column('domain_id', new_column_name='zone_id',
existing_type=UUID)
op.rename_table('domain_attributes', 'zone_attributes')
with op.batch_alter_table('recordsets') as batch_op:
batch_op.alter_column('domain_id', new_column_name='zone_id',
existing_type=UUID)
batch_op.alter_column('domain_shard', new_column_name='zone_shard',
existing_type=sa.SmallInteger)
with op.batch_alter_table('records') as batch_op:
batch_op.alter_column('domain_id', new_column_name='zone_id',
existing_type=UUID)
batch_op.alter_column('domain_shard', new_column_name='zone_shard',
existing_type=sa.SmallInteger)
with op.batch_alter_table('zone_transfer_requests') as batch_op:
batch_op.alter_column('domain_id', new_column_name='zone_id',
existing_type=UUID)
with op.batch_alter_table('zone_transfer_accepts') as batch_op:
batch_op.alter_column('domain_id', new_column_name='zone_id',
existing_type=UUID)
with op.batch_alter_table('zone_tasks') as batch_op:
batch_op.alter_column('domain_id', new_column_name='zone_id',
existing_type=UUID)

View File

@ -0,0 +1,48 @@
# Copyright 2018 Canonical Ltd.
# Copyright 2022 Red Hat
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""support_caa_records
Revision ID: 91eb1eb7c882
Revises: b8999fd10721
Create Date: 2022-08-01 17:32:21.386556
"""
from alembic import op
import sqlalchemy as sa
from designate.storage.impl_sqlalchemy.alembic import legacy_utils
# revision identifiers, used by Alembic.
revision = '91eb1eb7c882'
down_revision = 'b8999fd10721'
branch_labels = None
depends_on = None
# Equivalent to legacy sqlalchemy-migrate revision 102_support_caa_records
def upgrade() -> None:
# Check if the equivalent legacy migration has already run
if not legacy_utils.is_migration_needed(102):
return
RECORD_TYPES = ['A', 'AAAA', 'CNAME', 'MX', 'SRV', 'TXT', 'SPF', 'NS',
'PTR', 'SSHFP', 'SOA', 'NAPTR', 'CAA']
with op.batch_alter_table('recordsets') as batch_op:
batch_op.alter_column('type', type_=sa.Enum(name='record_types',
*RECORD_TYPES),
existing_type=sa.Enum, existing_nullable=False)

View File

@ -0,0 +1,60 @@
# Copyright 2022 Red Hat
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""unique_service_status
Revision ID: 93a00a815f07
Revises: 7977deaa5167
Create Date: 2022-08-01 17:17:44.572964
"""
import sys
from alembic import op
from oslo_log import log as logging
import sqlalchemy as sa
from designate.storage.impl_sqlalchemy.alembic import legacy_utils
# revision identifiers, used by Alembic.
revision = '93a00a815f07'
down_revision = '7977deaa5167'
branch_labels = None
depends_on = None
LOG = logging.getLogger()
# Equivalent to legacy sqlalchemy-migrate revision 100_unique_service_status
EXPLANATION = """
You need to manually remove duplicate entries from the database.
The error message was:
%s
"""
def upgrade() -> None:
# Check if the equivalent legacy migration has already run
if not legacy_utils.is_migration_needed(100):
return
try:
with op.batch_alter_table('service_statuses') as batch_op:
batch_op.create_unique_constraint('unique_service_status',
['service_name', 'hostname'])
except sa.exc.IntegrityError as e:
LOG.error(EXPLANATION, e)
# Use sys.exit so we don't blow up with a huge trace
sys.exit(1)

View File

@ -0,0 +1,45 @@
# Copyright 2015 Hewlett-Packard Development Company, L.P.
# Copyright 2022 Red Hat
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""add_delayed_notify_column
Revision ID: a69b45715cc1
Revises: f9f969f9d85e
Create Date: 2022-07-29 21:30:12.127816
"""
from alembic import op
import sqlalchemy as sa
from designate.storage.impl_sqlalchemy.alembic import legacy_utils
# revision identifiers, used by Alembic.
revision = 'a69b45715cc1'
down_revision = 'f9f969f9d85e'
branch_labels = None
depends_on = None
# Equivalent to legacy sqlalchemy-migrate revision
# 084_add_delayed_notify_column
def upgrade() -> None:
# Check if the equivalent legacy migration has already run
if not legacy_utils.is_migration_needed(84):
return
op.add_column('zones',
sa.Column('delayed_notify', sa.Boolean, default=False))
op.create_index('delayed_notify', 'zones', ['delayed_notify'])

View File

@ -0,0 +1,48 @@
# Copyright 2018 Canonical Ltd.
# Copyright 2022 Red Hat
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""support_naptr_records
Revision ID: b8999fd10721
Revises: 93a00a815f07
Create Date: 2022-08-01 17:25:33.058845
"""
from alembic import op
import sqlalchemy as sa
from designate.storage.impl_sqlalchemy.alembic import legacy_utils
# revision identifiers, used by Alembic.
revision = 'b8999fd10721'
down_revision = '93a00a815f07'
branch_labels = None
depends_on = None
# Equivalent to legacy sqlalchemy-migrate revision 101_support_naptr_records
def upgrade() -> None:
# Check if the equivalent legacy migration has already run
if not legacy_utils.is_migration_needed(101):
return
RECORD_TYPES = ['A', 'AAAA', 'CNAME', 'MX', 'SRV', 'TXT', 'SPF', 'NS',
'PTR', 'SSHFP', 'SOA', 'NAPTR']
with op.batch_alter_table('recordsets') as batch_op:
batch_op.alter_column('type', type_=sa.Enum(name='record_types',
*RECORD_TYPES),
existing_type=sa.Enum, existing_nullable=False)

View File

@ -0,0 +1,65 @@
# Copyright 2015 Hewlett-Packard Development Company, L.P.
# Copyright 2022 Red Hat
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""unique_ns_record
Revision ID: bfcfc4a07487
Revises: d9a1883e93e9
Create Date: 2022-07-29 21:05:19.276173
"""
import sys
from alembic import op
from oslo_log import log as logging
import sqlalchemy as sa
from designate.storage.impl_sqlalchemy.alembic import legacy_utils
# revision identifiers, used by Alembic.
revision = 'bfcfc4a07487'
down_revision = 'd9a1883e93e9'
branch_labels = None
depends_on = None
LOG = logging.getLogger()
# Equivalent to legacy sqlalchemy-migrate revision 082_unique_ns_record
# Note from the original migration script:
# Add Unique constraint on ('pool_id', 'hostname') in the pool_ns_records
# table Bug #1517389
EXPLANATION = """
You need to manually remove duplicate entries from the database.
The error message was:
%s
"""
def upgrade() -> None:
# Check if the equivalent legacy migration has already run
if not legacy_utils.is_migration_needed(82):
return
try:
with op.batch_alter_table('pool_ns_records') as batch_op:
batch_op.create_unique_constraint('unique_ns_name',
['pool_id', 'hostname'])
except sa.exc.IntegrityError as e:
LOG.error(EXPLANATION, e)
# Use sys.exit so we don't blow up with a huge trace
sys.exit(1)

View File

@ -0,0 +1,340 @@
# Copyright 2015 Hewlett-Packard Development Company, L.P.
# Copyright 2022 Red Hat
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""liberty
Revision ID: c9f427f7180a
Revises:
Create Date: 2022-07-28 23:06:40.731452
"""
from alembic import op
from oslo_config import cfg
from oslo_utils import timeutils
import sqlalchemy as sa
from designate.conf import central
from designate.sqlalchemy.types import UUID
from designate.storage.impl_sqlalchemy.alembic import legacy_utils
# revision identifiers, used by Alembic.
revision = 'c9f427f7180a'
down_revision = None
branch_labels = None
depends_on = None
# Equivalent to legacy sqlalchemy-migrate revision 070_liberty
CONF = cfg.CONF
central.register_opts(CONF)
ACTIONS = ('CREATE', 'DELETE', 'UPDATE', 'NONE')
POOL_PROVISIONERS = ('UNMANAGED',)
RECORD_TYPES = ('A', 'AAAA', 'CNAME', 'MX', 'SRV', 'TXT', 'SPF', 'NS', 'PTR',
'SSHFP', 'SOA')
RESOURCE_STATUSES = ('ACTIVE', 'PENDING', 'DELETED', 'ERROR')
TASK_STATUSES = ('ACTIVE', 'PENDING', 'DELETED', 'ERROR', 'COMPLETE')
TSIG_ALGORITHMS = ('hmac-md5', 'hmac-sha1', 'hmac-sha224', 'hmac-sha256',
'hmac-sha384', 'hmac-sha512')
TSIG_SCOPES = ('POOL', 'ZONE')
ZONE_ATTRIBUTE_KEYS = ('master',)
ZONE_TASK_TYPES = ('IMPORT', 'EXPORT')
ZONE_TYPES = ('PRIMARY', 'SECONDARY')
def upgrade() -> None:
# Check if the equivalent legacy migration has already run
if not legacy_utils.is_migration_needed(70):
return
metadata = sa.MetaData()
pools_table = op.create_table(
'pools', metadata,
sa.Column('id', UUID, primary_key=True),
sa.Column('created_at', sa.DateTime),
sa.Column('updated_at', sa.DateTime),
sa.Column('version', sa.Integer, default=1, nullable=False),
sa.Column('name', sa.String(50), nullable=False),
sa.Column('description', sa.Unicode(160), nullable=True),
sa.Column('tenant_id', sa.String(36), nullable=True),
sa.Column('provisioner', sa.Enum(name='pool_provisioner',
*POOL_PROVISIONERS),
nullable=False, server_default='UNMANAGED'),
sa.UniqueConstraint('name', name='unique_pool_name'))
op.bulk_insert(
pools_table,
[{'id': CONF['service:central'].default_pool_id,
'name': 'default',
'version': 1}])
op.create_table(
'pool_ns_records', metadata,
sa.Column('id', UUID, primary_key=True),
sa.Column('created_at', sa.DateTime),
sa.Column('updated_at', sa.DateTime),
sa.Column('version', sa.Integer, default=1, nullable=False),
sa.Column('pool_id', UUID, nullable=False),
sa.Column('priority', sa.Integer, nullable=False),
sa.Column('hostname', sa.String(255), nullable=False),
sa.ForeignKeyConstraint(['pool_id'], ['pools.id'], ondelete='CASCADE'))
op.create_table(
'pool_attributes', metadata,
sa.Column('id', UUID, primary_key=True),
sa.Column('created_at', sa.DateTime),
sa.Column('updated_at', sa.DateTime),
sa.Column('version', sa.Integer, default=1, nullable=False),
sa.Column('key', sa.String(255), nullable=False),
sa.Column('value', sa.String(255), nullable=False),
sa.Column('pool_id', UUID, nullable=False),
sa.UniqueConstraint('pool_id', 'key', 'value',
name='unique_pool_attribute'),
sa.ForeignKeyConstraint(['pool_id'], ['pools.id'], ondelete='CASCADE'))
op.create_table(
'domains', metadata,
sa.Column('id', UUID, primary_key=True),
sa.Column('created_at', sa.DateTime),
sa.Column('updated_at', sa.DateTime),
sa.Column('version', sa.Integer, nullable=False),
sa.Column('tenant_id', sa.String(36), default=None, nullable=True),
sa.Column('name', sa.String(255), nullable=False),
sa.Column('email', sa.String(255), nullable=False),
sa.Column('ttl', sa.Integer, default=CONF.default_ttl, nullable=False),
sa.Column('refresh', sa.Integer, nullable=False),
sa.Column('retry', sa.Integer, nullable=False),
sa.Column('expire', sa.Integer, nullable=False),
sa.Column('minimum', sa.Integer, nullable=False),
sa.Column('parent_domain_id', UUID, default=None, nullable=True),
sa.Column('serial', sa.Integer, nullable=False, server_default='1'),
sa.Column('deleted', sa.CHAR(32), nullable=False, default='0',
server_default='0'),
sa.Column('deleted_at', sa.DateTime, nullable=True, default=None),
sa.Column('description', sa.Unicode(160), nullable=True),
sa.Column('status', sa.Enum(name='domains_resource_statuses',
*RESOURCE_STATUSES),
nullable=False, server_default='PENDING', default='PENDING'),
sa.Column('action', sa.Enum(name='domain_actions', *ACTIONS),
default='CREATE', server_default='CREATE', nullable=False),
sa.Column('pool_id', UUID, default=None, nullable=True),
sa.Column('reverse_name', sa.String(255), nullable=False,
server_default=''),
sa.Column("type", sa.Enum(name='type', *ZONE_TYPES),
server_default='PRIMARY', default='PRIMARY'),
sa.Column('transferred_at', sa.DateTime, default=None),
sa.Column('shard', sa.SmallInteger, nullable=False),
sa.UniqueConstraint('name', 'deleted', 'pool_id',
name='unique_domain_name'),
sa.ForeignKeyConstraint(['parent_domain_id'],
['domains.id'],
ondelete='SET NULL'),
sa.Index('zone_deleted', 'deleted'),
sa.Index('zone_tenant_deleted', 'tenant_id', 'deleted'),
sa.Index('reverse_name_deleted', 'reverse_name', 'deleted'),
sa.Index('zone_created_at', 'created_at'))
op.create_table(
'domain_attributes', metadata,
sa.Column('id', UUID, primary_key=True),
sa.Column('version', sa.Integer, default=1, nullable=False),
sa.Column('created_at', sa.DateTime),
sa.Column('updated_at', sa.DateTime),
sa.Column('key', sa.Enum(name='key', *ZONE_ATTRIBUTE_KEYS)),
sa.Column('value', sa.String(255), nullable=False),
sa.Column('domain_id', UUID, nullable=False),
sa.UniqueConstraint('key', 'value', 'domain_id',
name='unique_attributes'),
sa.ForeignKeyConstraint(['domain_id'], ['domains.id'],
ondelete='CASCADE'))
op.create_table(
'recordsets', metadata,
sa.Column('id', UUID, primary_key=True),
sa.Column('version', sa.Integer, default=1, nullable=False),
sa.Column('created_at', sa.DateTime),
sa.Column('updated_at', sa.DateTime),
sa.Column('domain_shard', sa.SmallInteger, nullable=False),
sa.Column('tenant_id', sa.String(36), default=None, nullable=True),
sa.Column('domain_id', UUID, nullable=False),
sa.Column('name', sa.String(255), nullable=False),
sa.Column('type', sa.Enum(name='record_types', *RECORD_TYPES),
nullable=False),
sa.Column('ttl', sa.Integer, default=None, nullable=True),
sa.Column('description', sa.Unicode(160), nullable=True),
sa.Column('reverse_name', sa.String(255), nullable=False,
server_default=''),
sa.UniqueConstraint('domain_id', 'name', 'type',
name='unique_recordset'),
sa.ForeignKeyConstraint(['domain_id'], ['domains.id'],
ondelete='CASCADE'),
sa.Index('rrset_type_domainid', 'type', 'domain_id'),
sa.Index('recordset_type_name', 'type', 'name'),
sa.Index('reverse_name_dom_id', 'reverse_name', 'domain_id'),
sa.Index('recordset_created_at', 'created_at'))
op.create_table(
'records', metadata,
sa.Column('id', UUID, primary_key=True),
sa.Column('created_at', sa.DateTime),
sa.Column('updated_at', sa.DateTime),
sa.Column('version', sa.Integer, default=1, nullable=False),
sa.Column('data', sa.Text, nullable=False),
sa.Column('domain_id', UUID, nullable=False),
sa.Column('managed', sa.Boolean, default=False),
sa.Column('managed_resource_type', sa.Unicode(50), default=None,
nullable=True),
sa.Column('managed_resource_id', UUID, default=None, nullable=True),
sa.Column('managed_plugin_type', sa.Unicode(50), default=None,
nullable=True),
sa.Column('managed_plugin_name', sa.Unicode(50), default=None,
nullable=True),
sa.Column('hash', sa.String(32), nullable=False),
sa.Column('description', sa.Unicode(160), nullable=True),
sa.Column('status', sa.Enum(name='record_resource_statuses',
*RESOURCE_STATUSES),
server_default='PENDING', default='PENDING', nullable=False),
sa.Column('tenant_id', sa.String(36), default=None, nullable=True),
sa.Column('recordset_id', UUID, nullable=False),
sa.Column('managed_tenant_id', sa.Unicode(36), default=None,
nullable=True),
sa.Column('managed_resource_region', sa.Unicode(100), default=None,
nullable=True),
sa.Column('managed_extra', sa.Unicode(100), default=None,
nullable=True),
sa.Column('action', sa.Enum(name='record_actions', *ACTIONS),
default='CREATE', server_default='CREATE', nullable=False),
sa.Column('serial', sa.Integer, server_default='1', nullable=False),
sa.Column('domain_shard', sa.SmallInteger, nullable=False),
sa.UniqueConstraint('hash', name='unique_record'),
sa.ForeignKeyConstraint(['domain_id'], ['domains.id'],
ondelete='CASCADE'),
sa.ForeignKeyConstraint(['recordset_id'], ['recordsets.id'],
ondelete='CASCADE'),
sa.ForeignKeyConstraint(['domain_id'], ['domains.id'],
ondelete='CASCADE',
name='fkey_records_domain_id'),
sa.ForeignKeyConstraint(['recordset_id'], ['recordsets.id'],
ondelete='CASCADE',
name='fkey_records_recordset_id'),
sa.Index('records_tenant', 'tenant_id'),
sa.Index('record_created_at', 'created_at'),
sa.Index('update_status_index', 'status', 'domain_id', 'tenant_id',
'created_at', 'serial'))
op.create_table(
'quotas', metadata,
sa.Column('id', UUID, primary_key=True),
sa.Column('version', sa.Integer, default=1, nullable=False),
sa.Column('created_at', sa.DateTime,
default=lambda: timeutils.utcnow()),
sa.Column('updated_at', sa.DateTime,
onupdate=lambda: timeutils.utcnow()),
sa.Column('tenant_id', sa.String(36), nullable=False),
sa.Column('resource', sa.String(32), nullable=False),
sa.Column('hard_limit', sa.Integer, nullable=False),
sa.UniqueConstraint('tenant_id', 'resource', name='unique_quota'))
op.create_table(
'tsigkeys', metadata,
sa.Column('id', UUID, primary_key=True),
sa.Column('version', sa.Integer, default=1, nullable=False),
sa.Column('created_at', sa.DateTime),
sa.Column('updated_at', sa.DateTime),
sa.Column('name', sa.String(255), nullable=False),
sa.Column('algorithm',
sa.Enum(name='tsig_algorithms', *TSIG_ALGORITHMS),
nullable=False),
sa.Column('secret', sa.String(255), nullable=False),
sa.Column('scope', sa.Enum(name='tsig_scopes', *TSIG_SCOPES),
nullable=False, server_default='POOL'),
sa.Column('resource_id', UUID, nullable=False),
sa.UniqueConstraint('name', name='unique_tsigkey_name'))
op.create_table(
'tlds', metadata,
sa.Column('id', UUID, primary_key=True),
sa.Column('version', sa.Integer, default=1, nullable=False),
sa.Column('created_at', sa.DateTime,
default=lambda: timeutils.utcnow()),
sa.Column('updated_at', sa.DateTime,
onupdate=lambda: timeutils.utcnow()),
sa.Column('name', sa.String(255), nullable=False),
sa.Column('description', sa.Unicode(160), nullable=True),
sa.UniqueConstraint('name', name='unique_tld_name'))
op.create_table(
'zone_transfer_requests', metadata,
sa.Column('id', UUID, primary_key=True),
sa.Column('version', sa.Integer, default=1, nullable=False),
sa.Column('created_at', sa.DateTime),
sa.Column('updated_at', sa.DateTime),
sa.Column('domain_id', UUID, nullable=False),
sa.Column("key", sa.String(255), nullable=False),
sa.Column("description", sa.String(255)),
sa.Column("tenant_id", sa.String(36), default=None, nullable=False),
sa.Column("target_tenant_id", sa.String(36), default=None,
nullable=True),
sa.Column("status",
sa.Enum(name='zone_transfer_requests_resource_statuses',
*TASK_STATUSES),
nullable=False, server_default='ACTIVE', default='ACTIVE'),
sa.ForeignKeyConstraint(['domain_id'], ['domains.id'],
ondelete='CASCADE'))
op.create_table(
'zone_transfer_accepts', metadata,
sa.Column('id', UUID, primary_key=True),
sa.Column('version', sa.Integer, default=1, nullable=False),
sa.Column('created_at', sa.DateTime),
sa.Column('updated_at', sa.DateTime),
sa.Column('domain_id', UUID, nullable=False),
sa.Column('zone_transfer_request_id', UUID, nullable=False),
sa.Column("tenant_id", sa.String(36), default=None, nullable=False),
sa.Column("status",
sa.Enum(name='zone_transfer_accepts_resource_statuses',
*TASK_STATUSES),
nullable=False, server_default='ACTIVE', default='ACTIVE'),
sa.ForeignKeyConstraint(['domain_id'], ['domains.id'],
ondelete='CASCADE'),
sa.ForeignKeyConstraint(['zone_transfer_request_id'],
['zone_transfer_requests.id'],
ondelete='CASCADE'))
op.create_table(
'zone_tasks', metadata,
sa.Column('id', UUID, primary_key=True),
sa.Column('created_at', sa.DateTime),
sa.Column('updated_at', sa.DateTime),
sa.Column('version', sa.Integer, default=1, nullable=False),
sa.Column('tenant_id', sa.String(36), default=None, nullable=True),
sa.Column('domain_id', UUID, nullable=True),
sa.Column('task_type', sa.Enum(name='task_types', *ZONE_TASK_TYPES),
nullable=True),
sa.Column('message', sa.String(160), nullable=True),
sa.Column('status', sa.Enum(name='zone_tasks_resource_statuses',
*TASK_STATUSES),
nullable=False, server_default='ACTIVE', default='ACTIVE'),
sa.Column('location', sa.String(160), nullable=True))
op.create_table(
'blacklists', metadata,
sa.Column('id', UUID, primary_key=True),
sa.Column('version', sa.Integer, default=1, nullable=False),
sa.Column('updated_at', sa.DateTime),
sa.Column('created_at', sa.DateTime),
sa.Column('pattern', sa.String(255), nullable=False),
sa.Column('description', sa.Unicode(160), nullable=True),
sa.UniqueConstraint('pattern', name='pattern'))

View File

@ -0,0 +1,121 @@
# Copyright 2015 Hewlett-Packard Development Company, L.P.
# Copyright 2022 Red Hat
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""new_pools_tables
Revision ID: d04819112169
Revises: 0bcf910ea823
Create Date: 2022-08-01 16:20:17.440784
"""
from alembic import op
from oslo_utils import timeutils
import sqlalchemy as sa
from designate.sqlalchemy.types import UUID
from designate.storage.impl_sqlalchemy.alembic import legacy_utils
from designate import utils
# revision identifiers, used by Alembic.
revision = 'd04819112169'
down_revision = '0bcf910ea823'
branch_labels = None
depends_on = None
# Equivalent to legacy sqlalchemy-migrate revision 086_new_pools_tables
def upgrade() -> None:
# Check if the equivalent legacy migration has already run
if not legacy_utils.is_migration_needed(86):
return
metadata = sa.MetaData()
op.create_table(
'pool_nameservers', metadata,
sa.Column('id', UUID, default=utils.generate_uuid, primary_key=True),
sa.Column('version', sa.Integer, default=1, nullable=False),
sa.Column('created_at', sa.DateTime,
default=lambda: timeutils.utcnow()),
sa.Column('updated_at', sa.DateTime,
onupdate=lambda: timeutils.utcnow()),
sa.Column('pool_id', UUID, nullable=False),
sa.Column('host', sa.String(255), nullable=False),
sa.Column('port', sa.Integer, nullable=False),
sa.ForeignKeyConstraint(['pool_id'], ['pools.id'], ondelete='CASCADE'),
sa.UniqueConstraint('pool_id', 'host', 'port',
name='unique_pool_host_port'))
op.create_table(
'pool_targets', metadata,
sa.Column('id', UUID, default=utils.generate_uuid, primary_key=True),
sa.Column('version', sa.Integer, default=1, nullable=False),
sa.Column('created_at', sa.DateTime,
default=lambda: timeutils.utcnow()),
sa.Column('updated_at', sa.DateTime,
onupdate=lambda: timeutils.utcnow()),
sa.Column('pool_id', UUID, nullable=False),
sa.Column('type', sa.String(50), nullable=False),
sa.Column('tsigkey_id', UUID, nullable=True),
sa.Column('description', sa.Unicode(160), nullable=True),
sa.ForeignKeyConstraint(['pool_id'], ['pools.id'], ondelete='CASCADE'))
op.create_table(
'pool_target_masters', metadata,
sa.Column('id', UUID, default=utils.generate_uuid, primary_key=True),
sa.Column('version', sa.Integer, default=1, nullable=False),
sa.Column('created_at', sa.DateTime,
default=lambda: timeutils.utcnow()),
sa.Column('updated_at', sa.DateTime,
onupdate=lambda: timeutils.utcnow()),
sa.Column('pool_target_id', UUID, nullable=False),
sa.Column('host', sa.String(255), nullable=False),
sa.Column('port', sa.Integer, nullable=False),
sa.ForeignKeyConstraint(['pool_target_id'], ['pool_targets.id'],
ondelete='CASCADE'),
sa.UniqueConstraint('pool_target_id', 'host', 'port',
name='unique_pool_target_host_port'))
op.create_table(
'pool_target_options', metadata,
sa.Column('id', UUID, default=utils.generate_uuid, primary_key=True),
sa.Column('version', sa.Integer, default=1, nullable=False),
sa.Column('created_at', sa.DateTime,
default=lambda: timeutils.utcnow()),
sa.Column('updated_at', sa.DateTime,
onupdate=lambda: timeutils.utcnow()),
sa.Column('pool_target_id', UUID, nullable=False),
sa.Column('key', sa.String(255), nullable=False),
sa.Column('value', sa.String(255), nullable=False),
sa.ForeignKeyConstraint(['pool_target_id'], ['pool_targets.id'],
ondelete='CASCADE'),
sa.UniqueConstraint('pool_target_id', 'key',
name='unique_pool_target_key'))
op.create_table(
'pool_also_notifies', metadata,
sa.Column('id', UUID, default=utils.generate_uuid, primary_key=True),
sa.Column('version', sa.Integer, default=1, nullable=False),
sa.Column('created_at', sa.DateTime,
default=lambda: timeutils.utcnow()),
sa.Column('updated_at', sa.DateTime,
onupdate=lambda: timeutils.utcnow()),
sa.Column('pool_id', UUID, nullable=False),
sa.Column('host', sa.String(255), nullable=False),
sa.Column('port', sa.Integer, nullable=False),
sa.ForeignKeyConstraint(['pool_id'], ['pools.id'], ondelete='CASCADE'),
sa.UniqueConstraint('pool_id', 'host', 'port',
name='unique_pool_also_notifies_pool0host0port'))

View File

@ -0,0 +1,64 @@
# Copyright 2015 Hewlett-Packard Development Company, L.P.
# Copyright 2022 Red Hat
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""add_FKs
Revision ID: d9a1883e93e9
Revises: 867a331ce1fc
Create Date: 2022-07-29 20:41:51.855014
"""
from alembic import op
from designate.storage.impl_sqlalchemy.alembic import legacy_utils
# revision identifiers, used by Alembic.
revision = 'd9a1883e93e9'
down_revision = '867a331ce1fc'
branch_labels = None
depends_on = None
# Equivalent to legacy sqlalchemy-migrate revision 081_add_FKs
def upgrade() -> None:
# Check if the equivalent legacy migration has already run
if not legacy_utils.is_migration_needed(81):
return
# We must use batch mode because the unit tests use sqlite
with op.batch_alter_table('zones') as batch_op:
batch_op.create_foreign_key('fk_zones_id_parent_zone_id', 'zones',
['parent_zone_id'], ['id'],
ondelete='SET NULL')
with op.batch_alter_table('zone_attributes') as batch_op:
batch_op.create_foreign_key('fk_zone_attributes_zones_id_zone_id',
'zones', ['zone_id'], ['id'],
ondelete='CASCADE')
with op.batch_alter_table('recordsets') as batch_op:
batch_op.create_foreign_key('fk_recordsets_zones_id_zone_id', 'zones',
['zone_id'], ['id'], ondelete='CASCADE')
with op.batch_alter_table('records') as batch_op:
batch_op.create_foreign_key('fk_records_zones_id_zone_id', 'zones',
['zone_id'], ['id'], ondelete='CASCADE')
with op.batch_alter_table('zone_transfer_requests') as batch_op:
batch_op.create_foreign_key('fk_ztr_zones_id_zone_id', 'zones',
['zone_id'], ['id'], ondelete='CASCADE')
with op.batch_alter_table('zone_transfer_accepts') as batch_op:
batch_op.create_foreign_key('fk_zta_zones_id_zone_id', 'zones',
['zone_id'], ['id'], ondelete='CASCADE')
with op.batch_alter_table('zone_tasks') as batch_op:
batch_op.create_foreign_key('fk_zone_tasks_zones_id_zone_id', 'zones',
['zone_id'], ['id'], ondelete='CASCADE')

View File

@ -0,0 +1,48 @@
# Copyright 2021 Cloudification GmbH
# Copyright 2022 Red Hat
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""support_cert_records
Revision ID: e5e2199ed76e
Revises: 91eb1eb7c882
Create Date: 2022-08-01 17:34:45.569101
"""
from alembic import op
import sqlalchemy as sa
from designate.storage.impl_sqlalchemy.alembic import legacy_utils
# revision identifiers, used by Alembic.
revision = 'e5e2199ed76e'
down_revision = '91eb1eb7c882'
branch_labels = None
depends_on = None
# Equivalent to legacy sqlalchemy-migrate revision 103_support_cert_records
def upgrade() -> None:
# Check if the equivalent legacy migration has already run
if not legacy_utils.is_migration_needed(103):
return
RECORD_TYPES = ['A', 'AAAA', 'CNAME', 'MX', 'SRV', 'TXT', 'SPF', 'NS',
'PTR', 'SSHFP', 'SOA', 'NAPTR', 'CAA', 'CERT']
with op.batch_alter_table('recordsets') as batch_op:
batch_op.alter_column('type', type_=sa.Enum(name='record_types',
*RECORD_TYPES),
existing_type=sa.Enum, existing_nullable=False)

View File

@ -0,0 +1,61 @@
# Copyright 2016 Hewlett Packard Enterprise Development Company LP
# Copyright 2022 Red Hat
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""change_managed_column_types
Revision ID: f9f969f9d85e
Revises: bfcfc4a07487
Create Date: 2022-07-29 21:18:35.403634
"""
from alembic import op
import sqlalchemy as sa
from designate.storage.impl_sqlalchemy.alembic import legacy_utils
# revision identifiers, used by Alembic.
revision = 'f9f969f9d85e'
down_revision = 'bfcfc4a07487'
branch_labels = None
depends_on = None
# Equivalent to legacy sqlalchemy-migrate revision
# 083_change_managed_column_types
def upgrade() -> None:
# Check if the equivalent legacy migration has already run
if not legacy_utils.is_migration_needed(83):
return
with op.batch_alter_table('records') as batch_op:
batch_op.alter_column('managed_extra', type_=sa.String(100),
existing_type=sa.Unicode(100),
existing_nullable=True)
batch_op.alter_column('managed_plugin_type', type_=sa.String(50),
existing_type=sa.Unicode(50),
existing_nullable=True)
batch_op.alter_column('managed_plugin_name', type_=sa.String(50),
existing_type=sa.Unicode(50),
existing_nullable=True)
batch_op.alter_column('managed_resource_type', type_=sa.String(50),
existing_type=sa.Unicode(50),
existing_nullable=True)
batch_op.alter_column('managed_resource_region', type_=sa.String(100),
existing_type=sa.Unicode(100),
existing_nullable=True)
batch_op.alter_column('managed_tenant_id', type_=sa.String(36),
existing_type=sa.Unicode(36),
existing_nullable=True)

View File

@ -1,4 +0,0 @@
This is a database migration repository for the project Designate.
More information at
http://code.google.com/p/sqlalchemy-migrate/

View File

@ -1,25 +0,0 @@
[db_settings]
# Used to identify which repository this database is versioned under.
# You can use the name of your project.
repository_id=Designate
# The name of the database table used to track the schema version.
# This name shouldn't already be used by your project.
# If this is changed once a database is under version control, you'll need to
# change the table name in each database too.
version_table=migrate_version
# When committing a change script, Migrate will attempt to generate the
# sql for all supported databases; normally, if one of them fails - probably
# because you don't have that database installed - it is ignored and the
# commit continues, perhaps ending successfully.
# Databases in this list MUST compile successfully during a commit, or the
# entire commit will fail. List the databases your application will actually
# be using to ensure your updates to that database work properly.
# This must be a list; example: ['postgres','sqlite']
required_dbs=[]
# When creating new change scripts, Migrate will stamp the new script with
# a version number. By default this is latest_version + 1. You can set this
# to 'true' to tell Migrate to use the UTC timestamp instead.
use_timestamp_numbering=False

View File

@ -1,44 +0,0 @@
# Copyright 2012 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Author: Patrick Galbraith <patg@hp.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Various conveniences used for migration scripts
"""
from oslo_log import log as logging
from sqlalchemy.schema import Table as SqlaTable
LOG = logging.getLogger(__name__)
def create_tables(tables):
for table in tables:
LOG.debug("Creating table %s", table)
table.create()
def drop_tables(tables):
for table in tables:
LOG.debug("Dropping table %s", table)
table.drop()
def Table(*args, **kwargs):
if 'mysql_engine' not in kwargs:
kwargs['mysql_engine'] = 'INNODB'
return SqlaTable(*args, **kwargs)

View File

@ -1,400 +0,0 @@
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Author: Kiall Mac Innes <kiall@hpe.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from sqlalchemy import (Table, MetaData, Column, String, Text, Integer,
SmallInteger, CHAR, DateTime, Enum, Boolean, Unicode,
UniqueConstraint, ForeignKeyConstraint, Index)
from oslo_config import cfg
from oslo_utils import timeutils
from designate.conf import central
from designate.sqlalchemy.types import UUID
CONF = cfg.CONF
central.register_opts(CONF)
RESOURCE_STATUSES = ['ACTIVE', 'PENDING', 'DELETED', 'ERROR']
RECORD_TYPES = ['A', 'AAAA', 'CNAME', 'MX', 'SRV', 'TXT', 'SPF', 'NS', 'PTR',
'SSHFP', 'SOA']
TASK_STATUSES = ['ACTIVE', 'PENDING', 'DELETED', 'ERROR', 'COMPLETE']
TSIG_ALGORITHMS = ['hmac-md5', 'hmac-sha1', 'hmac-sha224', 'hmac-sha256',
'hmac-sha384', 'hmac-sha512']
TSIG_SCOPES = ['POOL', 'ZONE']
POOL_PROVISIONERS = ['UNMANAGED']
ACTIONS = ['CREATE', 'DELETE', 'UPDATE', 'NONE']
ZONE_ATTRIBUTE_KEYS = ('master',)
ZONE_TYPES = ('PRIMARY', 'SECONDARY',)
ZONE_TASK_TYPES = ['IMPORT', 'EXPORT']
metadata = MetaData()
quotas = Table('quotas', metadata,
Column('id', UUID, primary_key=True),
Column('version', Integer(), default=1, nullable=False),
Column('created_at', DateTime, default=lambda: timeutils.utcnow()),
Column('updated_at', DateTime, onupdate=lambda: timeutils.utcnow()),
Column('tenant_id', String(36), nullable=False),
Column('resource', String(32), nullable=False),
Column('hard_limit', Integer(), nullable=False),
UniqueConstraint('tenant_id', 'resource', name='unique_quota'),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
tlds = Table('tlds', metadata,
Column('id', UUID, primary_key=True),
Column('version', Integer(), default=1, nullable=False),
Column('created_at', DateTime, default=lambda: timeutils.utcnow()),
Column('updated_at', DateTime, onupdate=lambda: timeutils.utcnow()),
Column('name', String(255), nullable=False),
Column('description', Unicode(160), nullable=True),
UniqueConstraint('name', name='unique_tld_name'),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
domains = Table('domains', metadata,
Column('id', UUID, primary_key=True),
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('version', Integer(), nullable=False),
Column('tenant_id', String(36), default=None, nullable=True),
Column('name', String(255), nullable=False),
Column('email', String(255), nullable=False),
Column('ttl', Integer, default=CONF.default_ttl, nullable=False),
Column('refresh', Integer, nullable=False),
Column('retry', Integer, nullable=False),
Column('expire', Integer, nullable=False),
Column('minimum', Integer, nullable=False),
Column('parent_domain_id', UUID, default=None, nullable=True),
Column('serial', Integer, nullable=False, server_default='1'),
Column('deleted', CHAR(32), nullable=False, default='0',
server_default='0'),
Column('deleted_at', DateTime, nullable=True, default=None),
Column('description', Unicode(160), nullable=True),
Column('status', Enum(name='domains_resource_statuses',
*RESOURCE_STATUSES),
nullable=False, server_default='PENDING', default='PENDING'),
Column('action', Enum(name='domain_actions', *ACTIONS),
default='CREATE', server_default='CREATE', nullable=False),
Column('pool_id', UUID, default=None, nullable=True),
Column('reverse_name', String(255), nullable=False, server_default=''),
Column("type", Enum(name='type', *ZONE_TYPES),
server_default='PRIMARY', default='PRIMARY'),
Column('transferred_at', DateTime, default=None),
Column('shard', SmallInteger(), nullable=False),
UniqueConstraint('name', 'deleted', 'pool_id', name='unique_domain_name'),
ForeignKeyConstraint(['parent_domain_id'],
['domains.id'],
ondelete='SET NULL'),
Index('zone_deleted', 'deleted'),
Index('zone_tenant_deleted', 'tenant_id', 'deleted'),
Index('reverse_name_deleted', 'reverse_name', 'deleted'),
Index('zone_created_at', 'created_at'),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
domain_attributes = Table('domain_attributes', metadata,
Column('id', UUID(), primary_key=True),
Column('version', Integer(), default=1, nullable=False),
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('key', Enum(name='key', *ZONE_ATTRIBUTE_KEYS)),
Column('value', String(255), nullable=False),
Column('domain_id', UUID(), nullable=False),
UniqueConstraint('key', 'value', 'domain_id', name='unique_attributes'),
ForeignKeyConstraint(['domain_id'], ['domains.id'], ondelete='CASCADE'),
mysql_engine='INNODB',
mysql_charset='utf8'
)
recordsets = Table('recordsets', metadata,
Column('id', UUID, primary_key=True),
Column('version', Integer(), default=1, nullable=False),
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('domain_shard', SmallInteger(), nullable=False),
Column('tenant_id', String(36), default=None, nullable=True),
Column('domain_id', UUID, nullable=False),
Column('name', String(255), nullable=False),
Column('type', Enum(name='record_types', *RECORD_TYPES), nullable=False),
Column('ttl', Integer, default=None, nullable=True),
Column('description', Unicode(160), nullable=True),
Column('reverse_name', String(255), nullable=False, server_default=''),
UniqueConstraint('domain_id', 'name', 'type', name='unique_recordset'),
ForeignKeyConstraint(['domain_id'], ['domains.id'], ondelete='CASCADE'),
Index('rrset_type_domainid', 'type', 'domain_id'),
Index('recordset_type_name', 'type', 'name'),
Index('reverse_name_dom_id', 'reverse_name', 'domain_id'),
Index('recordset_created_at', 'created_at'),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
records = Table('records', metadata,
Column('id', UUID, primary_key=True),
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('version', Integer(), default=1, nullable=False),
Column('data', Text, nullable=False),
Column('domain_id', UUID, nullable=False),
Column('managed', Boolean, default=False),
Column('managed_resource_type', Unicode(50), default=None, nullable=True),
Column('managed_resource_id', UUID, default=None, nullable=True),
Column('managed_plugin_type', Unicode(50), default=None, nullable=True),
Column('managed_plugin_name', Unicode(50), default=None, nullable=True),
Column('hash', String(32), nullable=False),
Column('description', Unicode(160), nullable=True),
Column('status', Enum(name='record_resource_statuses', *RESOURCE_STATUSES),
server_default='PENDING', default='PENDING', nullable=False),
Column('tenant_id', String(36), default=None, nullable=True),
Column('recordset_id', UUID, nullable=False),
Column('managed_tenant_id', Unicode(36), default=None, nullable=True),
Column('managed_resource_region', Unicode(100), default=None,
nullable=True),
Column('managed_extra', Unicode(100), default=None, nullable=True),
Column('action', Enum(name='record_actions', *ACTIONS),
default='CREATE', server_default='CREATE', nullable=False),
Column('serial', Integer(), server_default='1', nullable=False),
Column('domain_shard', SmallInteger(), nullable=False),
UniqueConstraint('hash', name='unique_record'),
ForeignKeyConstraint(['domain_id'], ['domains.id'], ondelete='CASCADE'),
ForeignKeyConstraint(['recordset_id'], ['recordsets.id'],
ondelete='CASCADE'),
ForeignKeyConstraint(['domain_id'], ['domains.id'], ondelete='CASCADE',
name='fkey_records_domain_id'),
ForeignKeyConstraint(['recordset_id'], ['recordsets.id'],
ondelete='CASCADE', name='fkey_records_recordset_id'),
Index('records_tenant', 'tenant_id'),
Index('record_created_at', 'created_at'),
Index('update_status_index', 'status', 'domain_id', 'tenant_id',
'created_at', 'serial'),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
tsigkeys = Table('tsigkeys', metadata,
Column('id', UUID, primary_key=True),
Column('version', Integer(), default=1, nullable=False),
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('name', String(255), nullable=False),
Column('algorithm', Enum(name='tsig_algorithms', *TSIG_ALGORITHMS),
nullable=False),
Column('secret', String(255), nullable=False),
Column('scope', Enum(name='tsig_scopes', *TSIG_SCOPES), nullable=False,
server_default='POOL'),
Column('resource_id', UUID, nullable=False),
UniqueConstraint('name', name='unique_tsigkey_name'),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
blacklists = Table('blacklists', metadata,
Column('id', UUID, primary_key=True),
Column('version', Integer(), default=1, nullable=False),
Column('updated_at', DateTime),
Column('created_at', DateTime),
Column('pattern', String(255), nullable=False),
Column('description', Unicode(160), nullable=True),
UniqueConstraint('pattern', name='pattern'),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
pools = Table('pools', metadata,
Column('id', UUID, primary_key=True),
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('version', Integer(), default=1, nullable=False),
Column('name', String(50), nullable=False),
Column('description', Unicode(160), nullable=True),
Column('tenant_id', String(36), nullable=True),
Column('provisioner', Enum(name='pool_provisioner', *POOL_PROVISIONERS),
nullable=False, server_default='UNMANAGED'),
UniqueConstraint('name', name='unique_pool_name'),
mysql_engine='INNODB',
mysql_charset='utf8'
)
pool_attributes = Table('pool_attributes', metadata,
Column('id', UUID(), primary_key=True),
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('version', Integer(), default=1, nullable=False),
Column('key', String(255), nullable=False),
Column('value', String(255), nullable=False),
Column('pool_id', UUID(), nullable=False),
UniqueConstraint('pool_id', 'key', 'value', name='unique_pool_attribute'),
ForeignKeyConstraint(['pool_id'], ['pools.id'], ondelete='CASCADE'),
mysql_engine='INNODB',
mysql_charset='utf8'
)
pool_ns_records = Table('pool_ns_records', metadata,
Column('id', UUID(), primary_key=True),
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('version', Integer(), default=1, nullable=False),
Column('pool_id', UUID(), nullable=False),
Column('priority', Integer(), nullable=False),
Column('hostname', String(255), nullable=False),
ForeignKeyConstraint(['pool_id'], ['pools.id'], ondelete='CASCADE'),
mysql_engine='INNODB',
mysql_charset='utf8')
zone_transfer_requests = Table('zone_transfer_requests', metadata,
Column('id', UUID, primary_key=True),
Column('version', Integer(), default=1, nullable=False),
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('domain_id', UUID, nullable=False),
Column("key", String(255), nullable=False),
Column("description", String(255)),
Column("tenant_id", String(36), default=None, nullable=False),
Column("target_tenant_id", String(36), default=None, nullable=True),
Column("status", Enum(name='zone_transfer_requests_resource_statuses',
*TASK_STATUSES),
nullable=False, server_default='ACTIVE',
default='ACTIVE'),
ForeignKeyConstraint(['domain_id'], ['domains.id'], ondelete='CASCADE'),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
zone_transfer_accepts = Table('zone_transfer_accepts', metadata,
Column('id', UUID, primary_key=True),
Column('version', Integer(), default=1, nullable=False),
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('domain_id', UUID, nullable=False),
Column('zone_transfer_request_id', UUID, nullable=False),
Column("tenant_id", String(36), default=None, nullable=False),
Column("status", Enum(name='zone_transfer_accepts_resource_statuses',
*TASK_STATUSES),
nullable=False, server_default='ACTIVE',
default='ACTIVE'),
ForeignKeyConstraint(['domain_id'], ['domains.id'], ondelete='CASCADE'),
ForeignKeyConstraint(
['zone_transfer_request_id'],
['zone_transfer_requests.id'],
ondelete='CASCADE'),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
zone_tasks = Table('zone_tasks', metadata,
Column('id', UUID(), primary_key=True),
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('version', Integer(), default=1, nullable=False),
Column('tenant_id', String(36), default=None, nullable=True),
Column('domain_id', UUID(), nullable=True),
Column('task_type', Enum(name='task_types', *ZONE_TASK_TYPES),
nullable=True),
Column('message', String(160), nullable=True),
Column('status', Enum(name='zone_tasks_resource_statuses', *TASK_STATUSES),
nullable=False, server_default='ACTIVE',
default='ACTIVE'),
Column('location', String(160), nullable=True),
mysql_engine='INNODB',
mysql_charset='utf8')
def default_shard(context, id_col):
return int(context.current_parameters[id_col][0:3], 16)
def upgrade(migrate_engine):
metadata.bind = migrate_engine
default_pool_id = CONF['service:central'].default_pool_id
with migrate_engine.begin() as conn:
if migrate_engine.name == "mysql":
conn.execute("SET foreign_key_checks = 0;")
pools.create()
pool_ns_records.create()
pool_attributes.create()
domains.create()
domain_attributes.create()
recordsets.create()
records.create()
quotas.create()
tsigkeys.create()
tlds.create()
zone_transfer_requests.create()
zone_transfer_accepts.create()
zone_tasks.create()
blacklists.create()
pools.insert().execute(
id=default_pool_id,
name='default',
version=1
)
if migrate_engine.name == "mysql":
conn.execute("SET foreign_key_checks = 1;")

View File

@ -1,26 +0,0 @@
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Author: Kiall Mac Innes <kiall@hpe.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# This is a placeholder for Kilo backports.
# Do not use this number for new Liberty work. New Liberty work starts after
# all the placeholders.
#
# See https://blueprints.launchpad.net/nova/+spec/backportable-db-migrations
# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
def upgrade(migrate_engine):
pass

View File

@ -1,26 +0,0 @@
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Author: Kiall Mac Innes <kiall@hpe.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# This is a placeholder for Kilo backports.
# Do not use this number for new Liberty work. New Liberty work starts after
# all the placeholders.
#
# See https://blueprints.launchpad.net/nova/+spec/backportable-db-migrations
# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
def upgrade(migrate_engine):
pass

View File

@ -1,26 +0,0 @@
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Author: Kiall Mac Innes <kiall@hpe.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# This is a placeholder for Kilo backports.
# Do not use this number for new Liberty work. New Liberty work starts after
# all the placeholders.
#
# See https://blueprints.launchpad.net/nova/+spec/backportable-db-migrations
# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
def upgrade(migrate_engine):
pass

View File

@ -1,26 +0,0 @@
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Author: Kiall Mac Innes <kiall@hpe.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# This is a placeholder for Kilo backports.
# Do not use this number for new Liberty work. New Liberty work starts after
# all the placeholders.
#
# See https://blueprints.launchpad.net/nova/+spec/backportable-db-migrations
# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
def upgrade(migrate_engine):
pass

View File

@ -1,26 +0,0 @@
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Author: Kiall Mac Innes <kiall@hpe.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# This is a placeholder for Kilo backports.
# Do not use this number for new Liberty work. New Liberty work starts after
# all the placeholders.
#
# See https://blueprints.launchpad.net/nova/+spec/backportable-db-migrations
# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
def upgrade(migrate_engine):
pass

View File

@ -1,26 +0,0 @@
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Author: Kiall Mac Innes <kiall@hpe.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# This is a placeholder for Kilo backports.
# Do not use this number for new Liberty work. New Liberty work starts after
# all the placeholders.
#
# See https://blueprints.launchpad.net/nova/+spec/backportable-db-migrations
# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
def upgrade(migrate_engine):
pass

View File

@ -1,26 +0,0 @@
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Author: Kiall Mac Innes <kiall@hpe.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# This is a placeholder for Kilo backports.
# Do not use this number for new Liberty work. New Liberty work starts after
# all the placeholders.
#
# See https://blueprints.launchpad.net/nova/+spec/backportable-db-migrations
# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
def upgrade(migrate_engine):
pass

View File

@ -1,26 +0,0 @@
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Author: Kiall Mac Innes <kiall@hpe.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# This is a placeholder for Kilo backports.
# Do not use this number for new Liberty work. New Liberty work starts after
# all the placeholders.
#
# See https://blueprints.launchpad.net/nova/+spec/backportable-db-migrations
# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
def upgrade(migrate_engine):
pass

View File

@ -1,26 +0,0 @@
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Author: Kiall Mac Innes <kiall@hpe.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# This is a placeholder for Kilo backports.
# Do not use this number for new Liberty work. New Liberty work starts after
# all the placeholders.
#
# See https://blueprints.launchpad.net/nova/+spec/backportable-db-migrations
# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
def upgrade(migrate_engine):
pass

View File

@ -1,119 +0,0 @@
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Author: Graham Hayes <graham.hayes@hpe.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# See https://blueprints.launchpad.net/nova/+spec/backportable-db-migrations
# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
from sqlalchemy.schema import MetaData, Table
from migrate.changeset.constraint import UniqueConstraint, \
ForeignKeyConstraint, PathNotFoundError
# This migration removes all references to domain from our Database.
# We rename the domains and domain_attribute tables, and rename any columns
# that had "domain" in the name.as
# There is a follow on patch to recreate the FKs for the newly renamed
# tables as the lib we use doesn't seem to like creating FKs on renamed
# tables until after the migration is complete.
meta = MetaData()
def drop_foreign_key(fk_def):
table = fk_def[0].table
col = fk_def[0]
ref_col = fk_def[1]
# Use .copy() to avoid the set changing during the for operation
for fk in table.foreign_keys.copy():
# Check if the fk is the one we want
if fk.column == col and fk.parent == ref_col:
fkc = ForeignKeyConstraint([fk.column], [fk.parent],
name=fk.constraint.name)
fkc.drop()
# Check if the fk is the one we want (sometimes it seems the parent
# / col is switched
if fk.parent == col and fk.column == ref_col:
fkc = ForeignKeyConstraint([fk.parent], [fk.column],
name=fk.constraint.name)
fkc.drop()
def drop_unique_constraint(uc_def):
uc = UniqueConstraint(*uc_def[2], table=uc_def[0], name=uc_def[1])
try:
uc.drop()
except PathNotFoundError:
pass
def upgrade(migrate_engine):
meta.bind = migrate_engine
# Get all the tables
domains_table = Table('domains', meta, autoload=True)
domain_attrib_table = Table('domain_attributes', meta, autoload=True)
recordsets_table = Table('recordsets', meta, autoload=True)
records_table = Table('records', meta, autoload=True)
ztr_table = Table('zone_transfer_requests', meta, autoload=True)
zta_table = Table('zone_transfer_accepts', meta, autoload=True)
zt_table = Table('zone_tasks', meta, autoload=True)
# Remove the affected FKs
# Define FKs
fks = [
[domains_table.c.id, domains_table.c.parent_domain_id],
[domain_attrib_table.c.domain_id,
domains_table.c.id],
[recordsets_table.c.domain_id, domains_table.c.id],
[records_table.c.domain_id, domains_table.c.id],
[ztr_table.c.domain_id, domains_table.c.id],
[zta_table.c.domain_id, domains_table.c.id]
]
# Drop FKs
for fk in fks:
drop_foreign_key(fk)
# Change the table structures
# Domains Table changes
domains_table.c.parent_domain_id.alter(name='parent_zone_id')
domains_table.rename('zones')
# Domain Attributes
domain_attrib_table.c.domain_id.alter(name='zone_id')
domain_attrib_table.rename('zone_attributes')
# Recordsets
recordsets_table.c.domain_id.alter(name='zone_id')
recordsets_table.c.domain_shard.alter(name='zone_shard')
# Records
records_table.c.domain_id.alter(name="zone_id")
records_table.c.domain_shard.alter(name="zone_shard")
# Zone Transfer Requests
ztr_table.c.domain_id.alter(name='zone_id')
# Zone Transfer Requests
zta_table.c.domain_id.alter(name='zone_id')
# Zone Tasks
zt_table.c.domain_id.alter(name='zone_id')

View File

@ -1,62 +0,0 @@
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Author: Graham Hayes <graham.hayes@hpe.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# See https://blueprints.launchpad.net/nova/+spec/backportable-db-migrations
# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
from migrate.changeset.constraint import ForeignKeyConstraint
from sqlalchemy.schema import MetaData, Table
# This migration adds back the FKs removed in migration 80, as sqlalchemy
# migrate seems to need to wait to add FKs to renamed tables.
meta = MetaData()
def upgrade(migrate_engine):
meta.bind = migrate_engine
# Get all the tables
zones_table = Table('zones', meta, autoload=True)
zone_attrib_table = Table('zone_attributes', meta, autoload=True)
recordsets_table = Table('recordsets', meta, autoload=True)
records_table = Table('records', meta, autoload=True)
ztr_table = Table('zone_transfer_requests', meta, autoload=True)
zta_table = Table('zone_transfer_accepts', meta, autoload=True)
zt_table = Table('zone_tasks', meta, autoload=True)
# Create new FKs
fks = []
fks.append(ForeignKeyConstraint([zones_table.c.parent_zone_id],
[zones_table.c.id], ondelete='SET NULL'))
fks.append(ForeignKeyConstraint([zone_attrib_table.c.zone_id],
[zones_table.c.id], ondelete='CASCADE'))
fks.append(ForeignKeyConstraint([recordsets_table.c.zone_id],
[zones_table.c.id], ondelete='CASCADE'))
fks.append(ForeignKeyConstraint([records_table.c.zone_id],
[zones_table.c.id], ondelete='CASCADE'))
fks.append(ForeignKeyConstraint([ztr_table.c.zone_id],
[zones_table.c.id], ondelete='CASCADE'))
fks.append(ForeignKeyConstraint([zta_table.c.zone_id],
[zones_table.c.id], ondelete='CASCADE'))
fks.append(ForeignKeyConstraint([zt_table.c.zone_id],
[zones_table.c.id], ondelete='CASCADE'))
for fk in fks:
fk.create()

View File

@ -1,62 +0,0 @@
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Author: Endre Karlson <endre.karlson@hpe.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Add Unique constraint on ('pool_id', 'hostname') in the pool_ns_records
table Bug #1517389"""
import sys
from migrate.changeset.constraint import UniqueConstraint
from oslo_log import log as logging
from sqlalchemy.engine.reflection import Inspector
from sqlalchemy import exc
from sqlalchemy.schema import MetaData, Table
LOG = logging.getLogger()
meta = MetaData()
CONSTRAINT_NAME = "unique_ns_name"
explanation = """
You need to manually remove duplicate entries from the database.
The error message was:
%s
"""
def upgrade(migrate_engine):
meta.bind = migrate_engine
pool_ns_records_table = Table('pool_ns_records', meta, autoload=True)
# Only apply it if it's not there (It's been backported to L)
insp = Inspector.from_engine(migrate_engine)
unique_constraints = insp.get_unique_constraints('pool_ns_records')
unique_constraint_names = [i['name'] for i in unique_constraints]
if CONSTRAINT_NAME not in unique_constraint_names:
# We define the constraint here if not it shows in the list above.
constraint = UniqueConstraint('pool_id', 'hostname',
name=CONSTRAINT_NAME,
table=pool_ns_records_table)
try:
constraint.create()
except exc.IntegrityError as e:
LOG.error(explanation, e)
# Use sys.exit so we don't blow up with a huge trace
sys.exit(1)

View File

@ -1,39 +0,0 @@
# Copyright 2016 Hewlett Packard Enterprise Development Company LP
#
# Author: Federico Ceratto <federico.ceratto@hpe.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Switch managed_* column types from Unicode to String
Bug #276448
"""
from oslo_log import log as logging
from sqlalchemy.schema import MetaData, Table
from sqlalchemy import String
LOG = logging.getLogger()
meta = MetaData()
def upgrade(migrate_engine):
meta.bind = migrate_engine
records = Table('records', meta, autoload=True)
records.columns.managed_extra.alter(type=String(100))
records.columns.managed_plugin_type.alter(type=String(50))
records.columns.managed_plugin_name.alter(type=String(50))
records.columns.managed_resource_type.alter(type=String(50))
records.columns.managed_resource_region.alter(type=String(100))
records.columns.managed_tenant_id.alter(type=String(36))

View File

@ -1,35 +0,0 @@
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Author: Federico Ceratto <federico.ceratto@hpe.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# See https://blueprints.launchpad.net/nova/+spec/backportable-db-migrations
# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
from oslo_log import log as logging
from sqlalchemy import Boolean
from sqlalchemy.schema import Column, MetaData, Table, Index
LOG = logging.getLogger(__name__)
meta = MetaData()
def upgrade(migrate_engine):
LOG.info("Adding boolean column delayed_notify to table 'zones'")
meta.bind = migrate_engine
zones_table = Table('zones', meta, autoload=True)
col = Column('delayed_notify', Boolean(), default=False)
col.create(zones_table)
index = Index('delayed_notify', zones_table.c.delayed_notify)
index.create(migrate_engine)

View File

@ -1,97 +0,0 @@
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Move zone masters to their own table, and allow for abstract keys in the
attributes table"""
from migrate.changeset.constraint import UniqueConstraint
from oslo_log import log as logging
from oslo_utils import timeutils
from sqlalchemy import DateTime, Integer, String, select
from sqlalchemy.schema import MetaData, Table, Column, ForeignKeyConstraint
from designate.sqlalchemy.types import UUID
from designate import utils
LOG = logging.getLogger()
meta = MetaData()
zone_masters_table = Table('zone_masters', meta,
Column('id', UUID(), default=utils.generate_uuid, primary_key=True),
Column('version', Integer(), default=1, nullable=False),
Column('created_at', DateTime, default=lambda: timeutils.utcnow()),
Column('updated_at', DateTime, onupdate=lambda: timeutils.utcnow()),
Column('host', String(32), nullable=False),
Column('port', Integer(), nullable=False),
Column('zone_id', UUID(), nullable=False),
UniqueConstraint('host', 'port', 'zone_id', name='unique_masters'),
ForeignKeyConstraint(['zone_id'], ['zones.id'], ondelete='CASCADE'),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
def upgrade(migrate_engine):
meta.bind = migrate_engine
zone_attibutes_table = Table('zone_attributes', meta, autoload=True)
connection = migrate_engine.connect()
transaction = connection.begin()
try:
zone_masters_table.create()
masters = select(
[
zone_attibutes_table.c.id,
zone_attibutes_table.c.version,
zone_attibutes_table.c.created_at,
zone_attibutes_table.c.updated_at,
zone_attibutes_table.c.value,
zone_attibutes_table.c.zone_id
]
).where(
zone_attibutes_table.c.key == 'master'
).execute().fetchall()
masters_input = []
for master in masters:
host, port = utils.split_host_port(
master[zone_attibutes_table.c.value])
masters_input.append({
'id': master[zone_attibutes_table.c.id],
'version': master[zone_attibutes_table.c.version],
'created_at': master[zone_attibutes_table.c.created_at],
'updated_at': master[zone_attibutes_table.c.updated_at],
'zone_id': master[zone_attibutes_table.c.zone_id],
'host': host,
'port': port
})
zone_attibutes_table.insert(masters_input)
zone_attibutes_table.delete().where(
zone_attibutes_table.c.key == 'master')
zone_attibutes_table.c.key.alter(type=String(50))
transaction.commit()
except Exception:
transaction.rollback()
raise

View File

@ -1,127 +0,0 @@
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Author: Kiall Mac Innes <kiall@hpe.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_utils import timeutils
from sqlalchemy import (Integer, String, Unicode, DateTime,
ForeignKeyConstraint, UniqueConstraint)
from sqlalchemy.schema import Table, Column, MetaData
from designate.sqlalchemy.types import UUID
from designate import utils
meta = MetaData()
pool_nameservers = Table('pool_nameservers', meta,
Column('id', UUID, default=utils.generate_uuid, primary_key=True),
Column('version', Integer(), default=1, nullable=False),
Column('created_at', DateTime, default=lambda: timeutils.utcnow()),
Column('updated_at', DateTime, onupdate=lambda: timeutils.utcnow()),
Column('pool_id', UUID(), nullable=False),
Column('host', String(255), nullable=False),
Column('port', Integer(), nullable=False),
ForeignKeyConstraint(['pool_id'], ['pools.id'], ondelete='CASCADE'),
UniqueConstraint('pool_id', 'host', 'port', name='unique_pool_host_port'),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
pool_targets = Table('pool_targets', meta,
Column('id', UUID, default=utils.generate_uuid, primary_key=True),
Column('version', Integer(), default=1, nullable=False),
Column('created_at', DateTime, default=lambda: timeutils.utcnow()),
Column('updated_at', DateTime, onupdate=lambda: timeutils.utcnow()),
Column('pool_id', UUID(), nullable=False),
Column('type', String(50), nullable=False),
Column('tsigkey_id', UUID(), nullable=True),
Column('description', Unicode(160), nullable=True),
ForeignKeyConstraint(['pool_id'], ['pools.id'], ondelete='CASCADE'),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
pool_target_masters = Table('pool_target_masters', meta,
Column('id', UUID, default=utils.generate_uuid, primary_key=True),
Column('version', Integer(), default=1, nullable=False),
Column('created_at', DateTime, default=lambda: timeutils.utcnow()),
Column('updated_at', DateTime, onupdate=lambda: timeutils.utcnow()),
Column('pool_target_id', UUID(), nullable=False),
Column('host', String(255), nullable=False),
Column('port', Integer(), nullable=False),
ForeignKeyConstraint(['pool_target_id'], ['pool_targets.id'],
ondelete='CASCADE'),
UniqueConstraint('pool_target_id', 'host', 'port',
name='unique_pool_target_host_port'),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
pool_target_options = Table('pool_target_options', meta,
Column('id', UUID, default=utils.generate_uuid, primary_key=True),
Column('version', Integer(), default=1, nullable=False),
Column('created_at', DateTime, default=lambda: timeutils.utcnow()),
Column('updated_at', DateTime, onupdate=lambda: timeutils.utcnow()),
Column('pool_target_id', UUID(), nullable=False),
Column('key', String(255), nullable=False),
Column('value', String(255), nullable=False),
ForeignKeyConstraint(['pool_target_id'], ['pool_targets.id'],
ondelete='CASCADE'),
UniqueConstraint('pool_target_id', 'key', name='unique_pool_target_key'),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
pool_also_notifies = Table('pool_also_notifies', meta,
Column('id', UUID, default=utils.generate_uuid, primary_key=True),
Column('version', Integer(), default=1, nullable=False),
Column('created_at', DateTime, default=lambda: timeutils.utcnow()),
Column('updated_at', DateTime, onupdate=lambda: timeutils.utcnow()),
Column('pool_id', UUID(), nullable=False),
Column('host', String(255), nullable=False),
Column('port', Integer(), nullable=False),
ForeignKeyConstraint(['pool_id'], ['pools.id'], ondelete='CASCADE'),
UniqueConstraint('pool_id', 'host', 'port',
name='unique_pool_also_notifies_pool0host0port'),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
def upgrade(migrate_engine):
meta.bind = migrate_engine
# Load the pool_attributes_table table schema for relations
Table('pools', meta, autoload=True)
pool_nameservers.create(checkfirst=True)
pool_targets.create(checkfirst=True)
pool_target_options.create(checkfirst=True)
pool_target_masters.create(checkfirst=True)
pool_also_notifies.create(checkfirst=True)

View File

@ -1,26 +0,0 @@
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Author: Kiall Mac Innes <kiall@hpe.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# This is a placeholder for Mitaka backports.
# Do not use this number for new Newton work. New Newton work starts after
# all the placeholders.
#
# See https://blueprints.launchpad.net/nova/+spec/backportable-db-migrations
# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
def upgrade(migrate_engine):
pass

View File

@ -1,26 +0,0 @@
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Author: Kiall Mac Innes <kiall@hpe.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# This is a placeholder for Mitaka backports.
# Do not use this number for new Newton work. New Newton work starts after
# all the placeholders.
#
# See https://blueprints.launchpad.net/nova/+spec/backportable-db-migrations
# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
def upgrade(migrate_engine):
pass

View File

@ -1,26 +0,0 @@
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Author: Kiall Mac Innes <kiall@hpe.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# This is a placeholder for Mitaka backports.
# Do not use this number for new Newton work. New Newton work starts after
# all the placeholders.
#
# See https://blueprints.launchpad.net/nova/+spec/backportable-db-migrations
# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
def upgrade(migrate_engine):
pass

View File

@ -1,26 +0,0 @@
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Author: Kiall Mac Innes <kiall@hpe.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# This is a placeholder for Mitaka backports.
# Do not use this number for new Newton work. New Newton work starts after
# all the placeholders.
#
# See https://blueprints.launchpad.net/nova/+spec/backportable-db-migrations
# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
def upgrade(migrate_engine):
pass

View File

@ -1,26 +0,0 @@
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Author: Kiall Mac Innes <kiall@hpe.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# This is a placeholder for Mitaka backports.
# Do not use this number for new Newton work. New Newton work starts after
# all the placeholders.
#
# See https://blueprints.launchpad.net/nova/+spec/backportable-db-migrations
# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
def upgrade(migrate_engine):
pass

View File

@ -1,26 +0,0 @@
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Author: Kiall Mac Innes <kiall@hpe.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# This is a placeholder for Mitaka backports.
# Do not use this number for new Newton work. New Newton work starts after
# all the placeholders.
#
# See https://blueprints.launchpad.net/nova/+spec/backportable-db-migrations
# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
def upgrade(migrate_engine):
pass

View File

@ -1,26 +0,0 @@
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Author: Kiall Mac Innes <kiall@hpe.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# This is a placeholder for Mitaka backports.
# Do not use this number for new Newton work. New Newton work starts after
# all the placeholders.
#
# See https://blueprints.launchpad.net/nova/+spec/backportable-db-migrations
# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
def upgrade(migrate_engine):
pass

View File

@ -1,26 +0,0 @@
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Author: Kiall Mac Innes <kiall@hpe.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# This is a placeholder for Mitaka backports.
# Do not use this number for new Newton work. New Newton work starts after
# all the placeholders.
#
# See https://blueprints.launchpad.net/nova/+spec/backportable-db-migrations
# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
def upgrade(migrate_engine):
pass

View File

@ -1,26 +0,0 @@
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Author: Kiall Mac Innes <kiall@hpe.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# This is a placeholder for Mitaka backports.
# Do not use this number for new Newton work. New Newton work starts after
# all the placeholders.
#
# See https://blueprints.launchpad.net/nova/+spec/backportable-db-migrations
# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
def upgrade(migrate_engine):
pass

View File

@ -1,26 +0,0 @@
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Author: Kiall Mac Innes <kiall@hpe.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# This is a placeholder for Mitaka backports.
# Do not use this number for new Newton work. New Newton work starts after
# all the placeholders.
#
# See https://blueprints.launchpad.net/nova/+spec/backportable-db-migrations
# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
def upgrade(migrate_engine):
pass

View File

@ -1,53 +0,0 @@
# Copyright 2016 Hewlett Packard Enterprise Development Company LP
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Add Service Status tables"""
from oslo_log import log as logging
from sqlalchemy.schema import Table, Column, MetaData
from sqlalchemy import String, DateTime, Enum, Text
from designate.sqlalchemy.types import UUID
from designate import utils
LOG = logging.getLogger()
meta = MetaData()
SERVICE_STATES = [
"UP", "DOWN", "WARNING"
]
def upgrade(migrate_engine):
meta.bind = migrate_engine
status_enum = Enum(name='service_statuses_enum', metadata=meta,
*SERVICE_STATES)
status_enum.create(checkfirst=True)
service_status_table = Table('service_statuses', meta,
Column('id', UUID(), default=utils.generate_uuid, primary_key=True),
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('service_name', String(40), nullable=False),
Column('hostname', String(255), nullable=False),
Column('heartbeated_at', DateTime, nullable=True),
Column('status', status_enum, nullable=False),
Column('stats', Text, nullable=False),
Column('capabilities', Text, nullable=False),
)
service_status_table.create(checkfirst=True)

View File

@ -1,34 +0,0 @@
# Copyright 2016 Hewlett Packard Enterprise Development Company LP
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy.schema import MetaData
meta = MetaData()
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
if migrate_engine.name != "mysql":
return
migrate_engine.execute("SET foreign_key_checks = 0;")
migrate_engine.execute(
"ALTER TABLE service_statuses CONVERT TO CHARACTER SET utf8;")
migrate_engine.execute("SET foreign_key_checks = 1;")
migrate_engine.execute(
"ALTER DATABASE %s DEFAULT CHARACTER SET utf8;"
% migrate_engine.url.database)

View File

@ -1,36 +0,0 @@
# Copyright 2016 Rackspace
#
# Author: James Li <james.li@rackspace.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from oslo_log import log as logging
from sqlalchemy.schema import MetaData, Table, Index
LOG = logging.getLogger(__name__)
meta = MetaData()
def upgrade(migrate_engine):
meta.bind = migrate_engine
recordsets_table = Table('recordsets', meta, autoload=True)
Index('rrset_updated_at', recordsets_table.c.updated_at
).create(migrate_engine)
Index('rrset_zoneid', recordsets_table.c.zone_id
).create(migrate_engine)
Index('rrset_type', recordsets_table.c.type).create(migrate_engine)
Index('rrset_ttl', recordsets_table.c.ttl).create(migrate_engine)
Index('rrset_tenant_id', recordsets_table.c.tenant_id
).create(migrate_engine)

View File

@ -1,47 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Add Unique constraint on ('service_name', 'hostname') in the
service_statuses table for bug #1768824"""
import sys
from migrate.changeset.constraint import UniqueConstraint
from oslo_log import log as logging
from sqlalchemy import exc
from sqlalchemy.schema import MetaData
from sqlalchemy.schema import Table
LOG = logging.getLogger()
EXPLANATION = """
You need to manually remove duplicate entries from the database.
The error message was:
%s
"""
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
service_statuses_table = Table('service_statuses', meta, autoload=True)
# Add UniqueConstraint based on service_name and hostname.
constraint = UniqueConstraint('service_name', 'hostname',
table=service_statuses_table,
name="unique_service_status")
try:
constraint.create()
except exc.IntegrityError as e:
LOG.error(EXPLANATION, e)
# Use sys.exit so we don't blow up with a huge trace
sys.exit(1)

View File

@ -1,29 +0,0 @@
# Copyright 2018 Canonical Ltd.
#
# Author: Tytus Kurek <tytus.kurek@canonical.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import MetaData, Table, Enum
meta = MetaData()
def upgrade(migrate_engine):
meta.bind = migrate_engine
RECORD_TYPES = ['A', 'AAAA', 'CNAME', 'MX', 'SRV', 'TXT', 'SPF', 'NS',
'PTR', 'SSHFP', 'SOA', 'NAPTR']
records_table = Table('recordsets', meta, autoload=True)
records_table.columns.type.alter(name='type', type=Enum(*RECORD_TYPES))

View File

@ -1,29 +0,0 @@
# Copyright 2018 Canonical Ltd.
#
# Author: Tytus Kurek <tytus.kurek@canonical.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import MetaData, Table, Enum
meta = MetaData()
def upgrade(migrate_engine):
meta.bind = migrate_engine
RECORD_TYPES = ['A', 'AAAA', 'CNAME', 'MX', 'SRV', 'TXT', 'SPF', 'NS',
'PTR', 'SSHFP', 'SOA', 'NAPTR', 'CAA']
records_table = Table('recordsets', meta, autoload=True)
records_table.columns.type.alter(name='type', type=Enum(*RECORD_TYPES))

View File

@ -1,29 +0,0 @@
# Copyright 2021 Cloudification GmbH
#
# Author: cloudification <contact@cloudification.io>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import MetaData, Table, Enum
meta = MetaData()
def upgrade(migrate_engine):
meta.bind = migrate_engine
RECORD_TYPES = ['A', 'AAAA', 'CNAME', 'MX', 'SRV', 'TXT', 'SPF', 'NS',
'PTR', 'SSHFP', 'SOA', 'NAPTR', 'CAA', 'CERT']
records_table = Table('recordsets', meta, autoload=True)
records_table.columns.type.alter(name='type', type=Enum(*RECORD_TYPES))

View File

@ -33,7 +33,6 @@ from designate.common import constants
import designate.conf
from designate.context import DesignateContext
from designate import exceptions
from designate.manage import database as manage_database
from designate import objects
from designate import policy
from designate import storage
@ -359,15 +358,8 @@ class TestCase(base.BaseTestCase):
self._disable_osprofiler()
# The database fixture needs to be set up here (as opposed to isolated
# in a storage test case) because many tests end up using storage.
REPOSITORY = os.path.abspath(os.path.join(os.path.dirname(__file__),
'..', 'storage',
'impl_sqlalchemy',
'migrate_repo'))
self.db_fixture = self.useFixture(
fixtures.DatabaseFixture.get_fixture(
REPOSITORY, manage_database.INIT_VERSION))
fixtures.DatabaseFixture.get_fixture())
if os.getenv('DESIGNATE_SQL_DEBUG', "False").lower() in _TRUE_VALUES:
connection_debug = 50

View File

@ -33,12 +33,12 @@ from oslo_log import log as logging
from oslo_utils import importutils
import tooz.coordination
from designate.manage import database as db_commands
from designate import network_api
from designate.network_api import fake as fake_network_api
from designate import policy
from designate import rpc
import designate.service
from designate.sqlalchemy import utils as sqlalchemy_utils
import designate.utils
"""Test fixtures
@ -117,14 +117,13 @@ class PolicyFixture(fixtures.Fixture):
class DatabaseFixture(fixtures.Fixture):
fixtures = {}
fixture = None
@staticmethod
def get_fixture(repo_path, init_version=None):
if repo_path not in DatabaseFixture.fixtures:
DatabaseFixture.fixtures[repo_path] = DatabaseFixture(
repo_path, init_version)
return DatabaseFixture.fixtures[repo_path]
def get_fixture():
if not DatabaseFixture.fixture:
DatabaseFixture.fixture = DatabaseFixture()
return DatabaseFixture.fixture
def _mktemp(self):
"""Create temporary database file
@ -142,7 +141,7 @@ class DatabaseFixture(fixtures.Fixture):
dir=tmp_dir)
return path
def __init__(self, repo_path, init_version=None):
def __init__(self):
super(DatabaseFixture, self).__init__()
# Create the Golden DB
@ -150,9 +149,8 @@ class DatabaseFixture(fixtures.Fixture):
self.golden_url = 'sqlite:///%s' % self.golden_db
# Migrate the Golden DB
manager = sqlalchemy_utils.get_migration_manager(
repo_path, self.golden_url, init_version)
manager.upgrade(None)
db_cmds = db_commands.DatabaseCommands()
db_cmds.upgrade('head', db_url=self.golden_url)
# Prepare the Working Copy DB
self.working_copy = self._mktemp()

View File

@ -0,0 +1,62 @@
# Copyright 2022 Red Hat
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from io import StringIO
from unittest import mock
from designate.manage import database
from designate import tests as designate_tests
class TestManageDatabase(designate_tests.TestCase):
def setUp(self):
super(TestManageDatabase, self).setUp()
self.stdlog = designate_tests.fixtures.StandardLogging()
self.useFixture(self.stdlog)
self.db_cmds = database.DatabaseCommands()
def test_current(self):
cmd_output = StringIO()
self.db_cmds.current(stringio_buffer=cmd_output)
self.assertIn('head', cmd_output.getvalue())
def test_heads(self):
cmd_output = StringIO()
self.db_cmds.heads(stringio_buffer=cmd_output)
self.assertIn('head', cmd_output.getvalue())
def test_history(self):
cmd_output = StringIO()
self.db_cmds.history(stringio_buffer=cmd_output)
self.assertIn('head', cmd_output.getvalue())
def test_version(self):
with mock.patch('sys.stdout', new=StringIO()) as cmd_output:
self.db_cmds.version()
self.assertIn('head', cmd_output.getvalue())
def test_sync(self):
cmd_output = StringIO()
self.db_cmds.sync(stringio_buffer=cmd_output)
# The test framework will run the migration, so there should be
# no output of this command run.
self.assertEqual('', cmd_output.getvalue())
def test_upgrade(self):
cmd_output = StringIO()
self.db_cmds.upgrade('head', stringio_buffer=cmd_output)
# The test framework will run the migration, so there should be
# no output of this command run.
self.assertEqual('', cmd_output.getvalue())

View File

@ -31,7 +31,6 @@ class SqlalchemyStorageTest(StorageTestCase, TestCase):
def test_schema_table_names(self):
table_names = [
u'blacklists',
u'migrate_version',
u'pool_also_notifies',
u'pool_attributes',
u'pool_nameservers',
@ -54,10 +53,32 @@ class SqlalchemyStorageTest(StorageTestCase, TestCase):
u'zones'
]
inspector = self.storage.get_inspector()
self.assertEqual(table_names, inspector.get_table_names())
actual_table_names = inspector.get_table_names()
# We have transitioned database schema migration tools.
# They use different tracking tables. Accomidate that one or both
# may exist in this test.
migration_table_found = False
if ('migrate_version' in actual_table_names or
'alembic_version' in actual_table_names):
migration_table_found = True
self.assertTrue(migration_table_found,
'A DB migration table was not found.')
try:
actual_table_names.remove('migrate_version')
except ValueError:
pass
try:
actual_table_names.remove('alembic_version')
except ValueError:
pass
self.assertEqual(table_names, actual_table_names)
def test_schema_table_indexes(self):
indexes_t = self.storage.engine.execute("SELECT * FROM sqlite_master WHERE type = 'index';") # noqa
indexes_t = self.storage.engine.execute(
"SELECT * FROM sqlite_master WHERE type = 'index';")
indexes = {} # table name -> index names -> cmd
for _, index_name, table_name, num, cmd in indexes_t:

View File

@ -0,0 +1,56 @@
# Copyright 2022 Red Hat
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
import oslotest.base
from designate.storage.impl_sqlalchemy.alembic import legacy_utils
class TestLegacyUtils(oslotest.base.BaseTestCase):
@mock.patch('sqlalchemy.MetaData')
@mock.patch('alembic.op.get_bind')
def test_is_migration_needed(self, mock_get_bind, mock_metadata):
mock_bind = mock.MagicMock()
mock_get_bind.return_value = mock_bind
mock_execute = mock.MagicMock()
mock_bind.execute.return_value = mock_execute
mock_metadata_obj = mock.MagicMock()
mock_metadata_obj.tables.keys.side_effect = [
[], ['migrate_version'], ['migrate_version'],
['migrate_version'], ['migrate_version'],
['migrate_version'], ['migrate_version']]
mock_metadata.return_value = mock_metadata_obj
mock_execute.scalar_one_or_none.side_effect = [
None, '79', '80', '81', Exception('boom')]
# No existing migrate_version table
self.assertTrue(legacy_utils.is_migration_needed(2022))
# DB revision None
self.assertTrue(legacy_utils.is_migration_needed(80))
# DB revision 79
self.assertTrue(legacy_utils.is_migration_needed(80))
# DB revision 80
self.assertFalse(legacy_utils.is_migration_needed(80))
# DB revision 81
self.assertFalse(legacy_utils.is_migration_needed(80))
# DB revision query exception (no table, etc.)
self.assertTrue(legacy_utils.is_migration_needed(80))

View File

@ -11,8 +11,8 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from migrate.changeset.constraint import UniqueConstraint
from oslo_upgradecheck import upgradecheck
from sqlalchemy.schema import MetaData
from sqlalchemy.schema import Table
@ -52,22 +52,13 @@ class TestDuplicateServiceStatus(tests.TestCase):
self.assertEqual(upgradecheck.Code.SUCCESS,
checks._duplicate_service_status().code)
def test_failure(self):
# Drop unique constraint so we can test error cases
constraint = UniqueConstraint('service_name', 'hostname',
table=self.service_statuses_table,
name="unique_service_status")
constraint.drop()
fake_record = {'id': '1',
'service_name': 'worker',
'hostname': 'localhost',
'status': 'UP',
'stats': '',
'capabilities': '',
}
self.service_statuses_table.insert().execute(fake_record)
fake_record['id'] = '2'
self.service_statuses_table.insert().execute(fake_record)
@mock.patch('designate.sqlalchemy.session.get_engine')
def test_failure(self, mock_get_engine):
mock_engine = mock.MagicMock()
mock_execute = mock.MagicMock()
mock_engine.execute.return_value = mock_execute
mock_execute.fetchall.return_value = [(2,)]
mock_get_engine.return_value = mock_engine
checks = status.Checks()
self.assertEqual(upgradecheck.Code.FAILURE,

View File

@ -0,0 +1,7 @@
---
other:
- |
Designate will now use Alembic migrations for database schema updates.
This transition will automatically be handled for upgrades.
The sqlalchemy-migrate package has been removed from the Designate
requirements and the alembic package has been added.

View File

@ -5,6 +5,7 @@
# The order of packages is significant, because pip processes them in the order
# of appearance. Changing the order has an impact on the overall integration
# process, which may cause wedges in the gate later.
alembic>=1.6.5 # MIT
eventlet>=0.26.1 # MIT
Flask!=0.11,>=0.10 # BSD
greenlet>=0.4.15 # MIT
@ -34,7 +35,6 @@ python-neutronclient>=6.7.0 # Apache-2.0
requests>=2.23.0 # Apache-2.0
tenacity>=6.0.0 # Apache-2.0
SQLAlchemy>=1.2.19 # MIT
sqlalchemy-migrate>=0.11.0 # Apache-2.0
stevedore>=1.20.0 # Apache-2.0
WebOb>=1.7.1 # MIT
dnspython>=1.16.0 # http://www.dnspython.org/LICENSE