db: Remove old sqlalchemy-migrate migrations

These are no longer used and no longer necessary. Remove them.

Change-Id: Ibd7d46aefb3794dd342c8afb40dbb8d4ce00a976
Implements: blueprint remove-sqlalchemy-migrate
Signed-off-by: Stephen Finucane <sfinucan@redhat.com>
This commit is contained in:
Stephen Finucane 2020-10-29 16:22:57 +00:00
parent 7fb274f6db
commit b28b0e29d2
57 changed files with 0 additions and 4240 deletions

View File

@ -1,4 +0,0 @@
This is a database migration repository.
More information at
https://opendev.org/x/sqlalchemy-migrate/

View File

@ -1,21 +0,0 @@
#!/usr/bin/env python
# Copyright (c) 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from migrate.versioning.shell import main
# This should probably be a console script entry point.
if __name__ == '__main__':
main(debug='False', repository='.')

View File

@ -1,20 +0,0 @@
[db_settings]
# Used to identify which repository this database is versioned under.
# You can use the name of your project.
repository_id=Glance Migrations
# The name of the database table used to track the schema version.
# This name shouldn't already be used by your project.
# If this is changed once a database is under version control, you'll need to
# change the table name in each database too.
version_table=migrate_version
# When committing a change script, Migrate will attempt to generate the
# sql for all supported databases; normally, if one of them fails - probably
# because you don't have that database installed - it is ignored and the
# commit continues, perhaps ending successfully.
# Databases in this list MUST compile successfully during a commit, or the
# entire commit will fail. List the databases your application will actually
# be using to ensure your updates to that database work properly.
# This must be a list; example: ['postgres','sqlite']
required_dbs=[]

View File

@ -1,55 +0,0 @@
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy.schema import (Column, MetaData, Table)
from glance.db.sqlalchemy.schema import (
Boolean, DateTime, Integer, String, Text, create_tables) # noqa
def define_images_table(meta):
images = Table('images',
meta,
Column('id', Integer(), primary_key=True, nullable=False),
Column('name', String(255)),
Column('type', String(30)),
Column('size', Integer()),
Column('status', String(30), nullable=False),
Column('is_public',
Boolean(),
nullable=False,
default=False,
index=True),
Column('location', Text()),
Column('created_at', DateTime(), nullable=False),
Column('updated_at', DateTime()),
Column('deleted_at', DateTime()),
Column('deleted',
Boolean(),
nullable=False,
default=False,
index=True),
mysql_engine='InnoDB',
mysql_charset='utf8',
extend_existing=True)
return images
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
tables = [define_images_table(meta)]
create_tables(tables)

View File

@ -1,78 +0,0 @@
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy.schema import (
Column, ForeignKey, Index, MetaData, Table, UniqueConstraint)
from glance.db.sqlalchemy.schema import (
Boolean, DateTime, Integer, String, Text, create_tables,
from_migration_import) # noqa
def define_image_properties_table(meta):
(define_images_table,) = from_migration_import(
'001_add_images_table', ['define_images_table'])
images = define_images_table(meta) # noqa
# NOTE(dperaza) DB2: specify the UniqueConstraint option when creating the
# table will cause an index being created to specify the index
# name and skip the step of creating another index with the same columns.
# The index name is needed so it can be dropped and re-created later on.
constr_kwargs = {}
if meta.bind.name == 'ibm_db_sa':
constr_kwargs['name'] = 'ix_image_properties_image_id_key'
image_properties = Table('image_properties',
meta,
Column('id',
Integer(),
primary_key=True,
nullable=False),
Column('image_id',
Integer(),
ForeignKey('images.id'),
nullable=False,
index=True),
Column('key', String(255), nullable=False),
Column('value', Text()),
Column('created_at', DateTime(), nullable=False),
Column('updated_at', DateTime()),
Column('deleted_at', DateTime()),
Column('deleted',
Boolean(),
nullable=False,
default=False,
index=True),
UniqueConstraint('image_id', 'key',
**constr_kwargs),
mysql_engine='InnoDB',
mysql_charset='utf8',
extend_existing=True)
if meta.bind.name != 'ibm_db_sa':
Index('ix_image_properties_image_id_key',
image_properties.c.image_id,
image_properties.c.key)
return image_properties
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
tables = [define_image_properties_table(meta)]
create_tables(tables)

View File

@ -1,109 +0,0 @@
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import Column, MetaData, Table, and_, select
from glance.db.sqlalchemy.schema import (
Boolean, DateTime, Integer, String, Text, from_migration_import) # noqa
def get_images_table(meta):
"""
Returns the Table object for the images table that
corresponds to the images table definition of this version.
"""
images = Table('images',
meta,
Column('id', Integer(), primary_key=True, nullable=False),
Column('name', String(255)),
Column('disk_format', String(20)),
Column('container_format', String(20)),
Column('size', Integer()),
Column('status', String(30), nullable=False),
Column('is_public',
Boolean(),
nullable=False,
default=False,
index=True),
Column('location', Text()),
Column('created_at', DateTime(), nullable=False),
Column('updated_at', DateTime()),
Column('deleted_at', DateTime()),
Column('deleted',
Boolean(),
nullable=False,
default=False,
index=True),
mysql_engine='InnoDB',
extend_existing=True)
return images
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
(define_images_table,) = from_migration_import(
'001_add_images_table', ['define_images_table'])
(define_image_properties_table,) = from_migration_import(
'002_add_image_properties_table', ['define_image_properties_table'])
conn = migrate_engine.connect()
images = define_images_table(meta)
image_properties = define_image_properties_table(meta)
# Steps to take, in this order:
# 1) Move the existing type column from Image into
# ImageProperty for all image records that have a non-NULL
# type column
# 2) Drop the type column in images
# 3) Add the new columns to images
# The below wackiness correlates to the following ANSI SQL:
# SELECT images.* FROM images
# LEFT JOIN image_properties
# ON images.id = image_properties.image_id
# AND image_properties.key = 'type'
# WHERE image_properties.image_id IS NULL
# AND images.type IS NOT NULL
#
# which returns all the images that have a type set
# but that DO NOT yet have an image_property record
# with key of type.
from_stmt = [
images.outerjoin(image_properties,
and_(images.c.id == image_properties.c.image_id,
image_properties.c.key == 'type'))
]
and_stmt = and_(image_properties.c.image_id == None,
images.c.type != None)
sel = select([images], from_obj=from_stmt).where(and_stmt)
image_records = conn.execute(sel).fetchall()
property_insert = image_properties.insert()
for record in image_records:
conn.execute(property_insert,
image_id=record.id,
key='type',
created_at=record.created_at,
deleted=False,
value=record.type)
conn.close()
disk_format = Column('disk_format', String(20))
disk_format.create(images)
container_format = Column('container_format', String(20))
container_format.create(images)
images.columns['type'].drop()

View File

@ -1,61 +0,0 @@
-- Move type column from base images table
-- to be records in image_properties table
CREATE TEMPORARY TABLE tmp_type_records (id INTEGER NOT NULL, type VARCHAR(30) NOT NULL);
INSERT INTO tmp_type_records
SELECT id, type
FROM images
WHERE type IS NOT NULL;
REPLACE INTO image_properties
(image_id, key, value, created_at, deleted)
SELECT id, 'type', type, date('now'), 0
FROM tmp_type_records;
DROP TABLE tmp_type_records;
-- Make changes to the base images table
CREATE TEMPORARY TABLE images_backup (
id INTEGER NOT NULL,
name VARCHAR(255),
size INTEGER,
status VARCHAR(30) NOT NULL,
is_public BOOLEAN NOT NULL,
location TEXT,
created_at DATETIME NOT NULL,
updated_at DATETIME,
deleted_at DATETIME,
deleted BOOLEAN NOT NULL,
PRIMARY KEY (id)
);
INSERT INTO images_backup
SELECT id, name, size, status, is_public, location, created_at, updated_at, deleted_at, deleted
FROM images;
DROP TABLE images;
CREATE TABLE images (
id INTEGER NOT NULL,
name VARCHAR(255),
size INTEGER,
status VARCHAR(30) NOT NULL,
is_public BOOLEAN NOT NULL,
location TEXT,
created_at DATETIME NOT NULL,
updated_at DATETIME,
deleted_at DATETIME,
deleted BOOLEAN NOT NULL,
disk_format VARCHAR(20),
container_format VARCHAR(20),
PRIMARY KEY (id),
CHECK (is_public IN (0, 1)),
CHECK (deleted IN (0, 1))
);
CREATE INDEX ix_images_deleted ON images (deleted);
CREATE INDEX ix_images_is_public ON images (is_public);
INSERT INTO images (id, name, size, status, is_public, location, created_at, updated_at, deleted_at, deleted)
SELECT id, name, size, status, is_public, location, created_at, updated_at, deleted_at, deleted
FROM images_backup;
DROP TABLE images_backup;

View File

@ -1,74 +0,0 @@
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import Column, MetaData, Table
from glance.db.sqlalchemy.schema import (
Boolean, DateTime, Integer, String, Text, from_migration_import) # noqa
def get_images_table(meta):
"""
Returns the Table object for the images table that
corresponds to the images table definition of this version.
"""
images = Table('images',
meta,
Column('id', Integer(), primary_key=True, nullable=False),
Column('name', String(255)),
Column('disk_format', String(20)),
Column('container_format', String(20)),
Column('size', Integer()),
Column('status', String(30), nullable=False),
Column('is_public',
Boolean(),
nullable=False,
default=False,
index=True),
Column('location', Text()),
Column('created_at', DateTime(), nullable=False),
Column('updated_at', DateTime()),
Column('deleted_at', DateTime()),
Column('deleted',
Boolean(),
nullable=False,
default=False,
index=True),
Column('checksum', String(32)),
mysql_engine='InnoDB',
extend_existing=True)
return images
def get_image_properties_table(meta):
"""
No changes to the image properties table from 002...
"""
(define_image_properties_table,) = from_migration_import(
'002_add_image_properties_table', ['define_image_properties_table'])
image_properties = define_image_properties_table(meta)
return image_properties
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
images = get_images_table(meta)
checksum = Column('checksum', String(32))
checksum.create(images)

View File

@ -1,74 +0,0 @@
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import Column, MetaData, Table
from glance.db.sqlalchemy.schema import (
Boolean, DateTime, BigInteger, Integer, String,
Text, from_migration_import) # noqa
def get_images_table(meta):
"""
Returns the Table object for the images table that
corresponds to the images table definition of this version.
"""
images = Table('images',
meta,
Column('id', Integer(), primary_key=True, nullable=False),
Column('name', String(255)),
Column('disk_format', String(20)),
Column('container_format', String(20)),
Column('size', BigInteger()),
Column('status', String(30), nullable=False),
Column('is_public',
Boolean(),
nullable=False,
default=False,
index=True),
Column('location', Text()),
Column('created_at', DateTime(), nullable=False),
Column('updated_at', DateTime()),
Column('deleted_at', DateTime()),
Column('deleted',
Boolean(),
nullable=False,
default=False,
index=True),
mysql_engine='InnoDB',
extend_existing=True)
return images
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
# No changes to SQLite stores are necessary, since
# there is no BIG INTEGER type in SQLite. Unfortunately,
# running the Python 005_size_big_integer.py migration script
# on a SQLite datastore results in an error in the sa-migrate
# code that does the workarounds for SQLite not having
# ALTER TABLE MODIFY COLUMN ability
dialect = migrate_engine.url.get_dialect().name
if not dialect.startswith('sqlite'):
(get_images_table,) = from_migration_import(
'003_add_disk_format', ['get_images_table'])
images = get_images_table(meta)
images.columns['size'].alter(type=BigInteger())

View File

@ -1,59 +0,0 @@
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import Index, MetaData
from glance.db.sqlalchemy.schema import from_migration_import
def get_images_table(meta):
"""
No changes to the image properties table from 002...
"""
(get_images_table,) = from_migration_import(
'004_add_checksum', ['get_images_table'])
images = get_images_table(meta)
return images
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
(get_image_properties_table,) = from_migration_import(
'004_add_checksum', ['get_image_properties_table'])
image_properties = get_image_properties_table(meta)
if migrate_engine.name == "ibm_db_sa":
# NOTE(dperaza) ibm db2 does not allow ALTER INDEX so we will drop
# the index, rename the column, then re-create the index
sql_commands = [
"""ALTER TABLE image_properties DROP UNIQUE
ix_image_properties_image_id_key;""",
"""ALTER TABLE image_properties RENAME COLUMN \"key\" to name;""",
"""ALTER TABLE image_properties ADD CONSTRAINT
ix_image_properties_image_id_name UNIQUE(image_id, name);""",
]
for command in sql_commands:
meta.bind.execute(command)
else:
index = Index('ix_image_properties_image_id_key',
image_properties.c.image_id,
image_properties.c.key)
index.rename('ix_image_properties_image_id_name')
image_properties = get_image_properties_table(meta)
image_properties.columns['key'].alter(name="name")

View File

@ -1,11 +0,0 @@
--
-- This file is necessary because MySQL does not support
-- renaming indexes.
--
DROP INDEX ix_image_properties_image_id_key ON image_properties;
-- Rename the `key` column to `name`
ALTER TABLE image_properties
CHANGE COLUMN `key` name VARCHAR(255) NOT NULL;
CREATE UNIQUE INDEX ix_image_properties_image_id_name ON image_properties (image_id, name);

View File

@ -1,44 +0,0 @@
--
-- This is necessary because SQLite does not support
-- RENAME INDEX or ALTER TABLE CHANGE COLUMN.
--
CREATE TEMPORARY TABLE image_properties_backup (
id INTEGER NOT NULL,
image_id INTEGER NOT NULL,
name VARCHAR(255) NOT NULL,
value TEXT,
created_at DATETIME NOT NULL,
updated_at DATETIME,
deleted_at DATETIME,
deleted BOOLEAN NOT NULL,
PRIMARY KEY (id)
);
INSERT INTO image_properties_backup
SELECT id, image_id, key, value, created_at, updated_at, deleted_at, deleted
FROM image_properties;
DROP TABLE image_properties;
CREATE TABLE image_properties (
id INTEGER NOT NULL,
image_id INTEGER NOT NULL,
name VARCHAR(255) NOT NULL,
value TEXT,
created_at DATETIME NOT NULL,
updated_at DATETIME,
deleted_at DATETIME,
deleted BOOLEAN NOT NULL,
PRIMARY KEY (id),
CHECK (deleted IN (0, 1)),
UNIQUE (image_id, name),
FOREIGN KEY(image_id) REFERENCES images (id)
);
CREATE INDEX ix_image_properties_name ON image_properties (name);
CREATE INDEX ix_image_properties_deleted ON image_properties (deleted);
INSERT INTO image_properties (id, image_id, name, value, created_at, updated_at, deleted_at, deleted)
SELECT id, image_id, name, value, created_at, updated_at, deleted_at, deleted
FROM image_properties_backup;
DROP TABLE image_properties_backup;

View File

@ -1,65 +0,0 @@
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import Column, MetaData, Table
from glance.db.sqlalchemy.schema import (
Boolean, DateTime, BigInteger, Integer, String,
Text) # noqa
def get_images_table(meta):
"""
Returns the Table object for the images table that corresponds to
the images table definition of this version.
"""
images = Table('images',
meta,
Column('id', Integer(), primary_key=True, nullable=False),
Column('name', String(255)),
Column('disk_format', String(20)),
Column('container_format', String(20)),
Column('size', BigInteger()),
Column('status', String(30), nullable=False),
Column('is_public',
Boolean(),
nullable=False,
default=False,
index=True),
Column('location', Text()),
Column('created_at', DateTime(), nullable=False),
Column('updated_at', DateTime()),
Column('deleted_at', DateTime()),
Column('deleted',
Boolean(),
nullable=False,
default=False,
index=True),
Column('checksum', String(32)),
Column('owner', String(255)),
mysql_engine='InnoDB',
extend_existing=True)
return images
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
images = get_images_table(meta)
owner = Column('owner', String(255))
owner.create(images)

View File

@ -1,80 +0,0 @@
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import Column, ForeignKey, Index, MetaData, Table
from sqlalchemy import UniqueConstraint
from glance.db.sqlalchemy.schema import (
Boolean, DateTime, Integer, String, create_tables,
from_migration_import) # noqa
def get_images_table(meta):
"""
No changes to the images table from 007...
"""
(get_images_table,) = from_migration_import(
'007_add_owner', ['get_images_table'])
images = get_images_table(meta)
return images
def get_image_members_table(meta):
images = get_images_table(meta) # noqa
image_members = Table('image_members',
meta,
Column('id',
Integer(),
primary_key=True,
nullable=False),
Column('image_id',
Integer(),
ForeignKey('images.id'),
nullable=False,
index=True),
Column('member', String(255), nullable=False),
Column('can_share',
Boolean(),
nullable=False,
default=False),
Column('created_at', DateTime(), nullable=False),
Column('updated_at', DateTime()),
Column('deleted_at', DateTime()),
Column('deleted',
Boolean(),
nullable=False,
default=False,
index=True),
UniqueConstraint('image_id', 'member'),
mysql_charset='utf8',
mysql_engine='InnoDB',
extend_existing=True)
# DB2: an index has already been created for the UniqueConstraint option
# specified on the Table() statement above.
if meta.bind.name != "ibm_db_sa":
Index('ix_image_members_image_id_member', image_members.c.image_id,
image_members.c.member)
return image_members
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
tables = [get_image_members_table(meta)]
create_tables(tables)

View File

@ -1,69 +0,0 @@
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import Column, MetaData, Table
from glance.db.sqlalchemy.schema import (
Boolean, DateTime, Integer, String, Text) # noqa
def get_images_table(meta):
"""
Returns the Table object for the images table that
corresponds to the images table definition of this version.
"""
images = Table('images',
meta,
Column('id', Integer(), primary_key=True, nullable=False),
Column('name', String(255)),
Column('disk_format', String(20)),
Column('container_format', String(20)),
Column('size', Integer()),
Column('status', String(30), nullable=False),
Column('is_public',
Boolean(),
nullable=False,
default=False,
index=True),
Column('location', Text()),
Column('created_at', DateTime(), nullable=False),
Column('updated_at', DateTime()),
Column('deleted_at', DateTime()),
Column('deleted',
Boolean(),
nullable=False,
default=False,
index=True),
Column('checksum', String(32)),
Column('owner', String(255)),
Column('min_disk', Integer(), default=0),
Column('min_ram', Integer(), default=0),
mysql_engine='InnoDB',
extend_existing=True)
return images
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
images = get_images_table(meta)
min_disk = Column('min_disk', Integer(), default=0)
min_disk.create(images)
min_ram = Column('min_ram', Integer(), default=0)
min_ram.create(images)

View File

@ -1,43 +0,0 @@
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import MetaData
from glance.db.sqlalchemy.schema import from_migration_import
def get_images_table(meta):
"""
No changes to the images table from 008...
"""
(get_images_table,) = from_migration_import(
'008_add_image_members_table', ['get_images_table'])
images = get_images_table(meta)
return images
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
images_table = get_images_table(meta)
# set updated_at to created_at if equal to None
conn = migrate_engine.connect()
conn.execute(
images_table.update(
images_table.c.updated_at == None,
{images_table.c.updated_at: images_table.c.created_at}))

View File

@ -1,26 +0,0 @@
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sqlalchemy
meta = sqlalchemy.MetaData()
def upgrade(migrate_engine):
meta.bind = migrate_engine
images = sqlalchemy.Table('images', meta, autoload=True)
images.c.min_disk.alter(nullable=False)
images.c.min_ram.alter(nullable=False)

View File

@ -1,59 +0,0 @@
CREATE TEMPORARY TABLE images_backup (
id INTEGER NOT NULL,
name VARCHAR(255),
size INTEGER,
status VARCHAR(30) NOT NULL,
is_public BOOLEAN NOT NULL,
location TEXT,
created_at DATETIME NOT NULL,
updated_at DATETIME,
deleted_at DATETIME,
deleted BOOLEAN NOT NULL,
disk_format VARCHAR(20),
container_format VARCHAR(20),
checksum VARCHAR(32),
owner VARCHAR(255),
min_disk INTEGER,
min_ram INTEGER,
PRIMARY KEY (id),
CHECK (is_public IN (0, 1)),
CHECK (deleted IN (0, 1))
);
INSERT INTO images_backup
SELECT id, name, size, status, is_public, location, created_at, updated_at, deleted_at, deleted, disk_format, container_format, checksum, owner, min_disk, min_ram
FROM images;
DROP TABLE images;
CREATE TABLE images (
id INTEGER NOT NULL,
name VARCHAR(255),
size INTEGER,
status VARCHAR(30) NOT NULL,
is_public BOOLEAN NOT NULL,
location TEXT,
created_at DATETIME NOT NULL,
updated_at DATETIME,
deleted_at DATETIME,
deleted BOOLEAN NOT NULL,
disk_format VARCHAR(20),
container_format VARCHAR(20),
checksum VARCHAR(32),
owner VARCHAR(255),
min_disk INTEGER NOT NULL,
min_ram INTEGER NOT NULL,
PRIMARY KEY (id),
CHECK (is_public IN (0, 1)),
CHECK (deleted IN (0, 1))
);
CREATE INDEX ix_images_deleted ON images (deleted);
CREATE INDEX ix_images_is_public ON images (is_public);
INSERT INTO images
SELECT id, name, size, status, is_public, location, created_at, updated_at, deleted_at, deleted, disk_format, container_format, checksum, owner, min_disk, min_ram
FROM images_backup;
DROP TABLE images_backup;

View File

@ -1,355 +0,0 @@
# Copyright 2013 IBM Corp.
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
While SQLAlchemy/sqlalchemy-migrate should abstract this correctly,
there are known issues with these libraries so SQLite and non-SQLite
migrations must be done separately.
"""
import uuid
import migrate
import sqlalchemy
and_ = sqlalchemy.and_
or_ = sqlalchemy.or_
def upgrade(migrate_engine):
"""
Call the correct dialect-specific upgrade.
"""
meta = sqlalchemy.MetaData()
meta.bind = migrate_engine
t_images = _get_table('images', meta)
t_image_members = _get_table('image_members', meta)
t_image_properties = _get_table('image_properties', meta)
dialect = migrate_engine.url.get_dialect().name
if dialect == "sqlite":
_upgrade_sqlite(meta, t_images, t_image_members, t_image_properties)
_update_all_ids_to_uuids(t_images, t_image_members, t_image_properties)
elif dialect == "ibm_db_sa":
_upgrade_db2(meta, t_images, t_image_members, t_image_properties)
_update_all_ids_to_uuids(t_images, t_image_members, t_image_properties)
_add_db2_constraints(meta)
else:
_upgrade_other(t_images, t_image_members, t_image_properties, dialect)
def _upgrade_sqlite(meta, t_images, t_image_members, t_image_properties):
"""
Upgrade 011 -> 012 with special SQLite-compatible logic.
"""
sql_commands = [
"""CREATE TABLE images_backup (
id VARCHAR(36) NOT NULL,
name VARCHAR(255),
size INTEGER,
status VARCHAR(30) NOT NULL,
is_public BOOLEAN NOT NULL,
location TEXT,
created_at DATETIME NOT NULL,
updated_at DATETIME,
deleted_at DATETIME,
deleted BOOLEAN NOT NULL,
disk_format VARCHAR(20),
container_format VARCHAR(20),
checksum VARCHAR(32),
owner VARCHAR(255),
min_disk INTEGER NOT NULL,
min_ram INTEGER NOT NULL,
PRIMARY KEY (id),
CHECK (is_public IN (0, 1)),
CHECK (deleted IN (0, 1))
);""",
"""INSERT INTO images_backup
SELECT * FROM images;""",
"""CREATE TABLE image_members_backup (
id INTEGER NOT NULL,
image_id VARCHAR(36) NOT NULL,
member VARCHAR(255) NOT NULL,
can_share BOOLEAN NOT NULL,
created_at DATETIME NOT NULL,
updated_at DATETIME,
deleted_at DATETIME,
deleted BOOLEAN NOT NULL,
PRIMARY KEY (id),
UNIQUE (image_id, member),
CHECK (can_share IN (0, 1)),
CHECK (deleted IN (0, 1)),
FOREIGN KEY(image_id) REFERENCES images (id)
);""",
"""INSERT INTO image_members_backup
SELECT * FROM image_members;""",
"""CREATE TABLE image_properties_backup (
id INTEGER NOT NULL,
image_id VARCHAR(36) NOT NULL,
name VARCHAR(255) NOT NULL,
value TEXT,
created_at DATETIME NOT NULL,
updated_at DATETIME,
deleted_at DATETIME,
deleted BOOLEAN NOT NULL,
PRIMARY KEY (id),
CHECK (deleted IN (0, 1)),
UNIQUE (image_id, name),
FOREIGN KEY(image_id) REFERENCES images (id)
);""",
"""INSERT INTO image_properties_backup
SELECT * FROM image_properties;""",
]
for command in sql_commands:
meta.bind.execute(command)
_sqlite_table_swap(meta, t_image_members, t_image_properties, t_images)
def _upgrade_db2(meta, t_images, t_image_members, t_image_properties):
"""
Upgrade for DB2.
"""
t_images.c.id.alter(sqlalchemy.String(36), primary_key=True)
image_members_backup = sqlalchemy.Table(
'image_members_backup',
meta,
sqlalchemy.Column('id',
sqlalchemy.Integer(),
primary_key=True,
nullable=False),
sqlalchemy.Column('image_id',
sqlalchemy.String(36),
nullable=False,
index=True),
sqlalchemy.Column('member',
sqlalchemy.String(255),
nullable=False),
sqlalchemy.Column('can_share',
sqlalchemy.Boolean(),
nullable=False,
default=False),
sqlalchemy.Column('created_at',
sqlalchemy.DateTime(),
nullable=False),
sqlalchemy.Column('updated_at',
sqlalchemy.DateTime()),
sqlalchemy.Column('deleted_at',
sqlalchemy.DateTime()),
sqlalchemy.Column('deleted',
sqlalchemy.Boolean(),
nullable=False,
default=False,
index=True),
sqlalchemy.UniqueConstraint('image_id', 'member'),
extend_existing=True)
image_properties_backup = sqlalchemy.Table(
'image_properties_backup',
meta,
sqlalchemy.Column('id',
sqlalchemy.Integer(),
primary_key=True,
nullable=False),
sqlalchemy.Column('image_id',
sqlalchemy.String(36),
nullable=False,
index=True),
sqlalchemy.Column('name',
sqlalchemy.String(255),
nullable=False),
sqlalchemy.Column('value',
sqlalchemy.Text()),
sqlalchemy.Column('created_at',
sqlalchemy.DateTime(),
nullable=False),
sqlalchemy.Column('updated_at',
sqlalchemy.DateTime()),
sqlalchemy.Column('deleted_at',
sqlalchemy.DateTime()),
sqlalchemy.Column('deleted',
sqlalchemy.Boolean(),
nullable=False,
default=False,
index=True),
sqlalchemy.UniqueConstraint(
'image_id', 'name',
name='ix_image_properties_image_id_name'),
extend_existing=True)
image_members_backup.create()
image_properties_backup.create()
sql_commands = [
"""INSERT INTO image_members_backup
SELECT * FROM image_members;""",
"""INSERT INTO image_properties_backup
SELECT * FROM image_properties;""",
]
for command in sql_commands:
meta.bind.execute(command)
t_image_members.drop()
t_image_properties.drop()
image_members_backup.rename(name='image_members')
image_properties_backup.rename(name='image_properties')
def _add_db2_constraints(meta):
# Create the foreign keys
sql_commands = [
"""ALTER TABLE image_members ADD CONSTRAINT member_image_id
FOREIGN KEY (image_id)
REFERENCES images (id);""",
"""ALTER TABLE image_properties ADD CONSTRAINT property_image_id
FOREIGN KEY (image_id)
REFERENCES images (id);""",
]
for command in sql_commands:
meta.bind.execute(command)
def _upgrade_other(t_images, t_image_members, t_image_properties, dialect):
"""
Upgrade 011 -> 012 with logic for non-SQLite databases.
"""
foreign_keys = _get_foreign_keys(t_images,
t_image_members,
t_image_properties, dialect)
for fk in foreign_keys:
fk.drop()
t_images.c.id.alter(sqlalchemy.String(36), primary_key=True)
t_image_members.c.image_id.alter(sqlalchemy.String(36))
t_image_properties.c.image_id.alter(sqlalchemy.String(36))
_update_all_ids_to_uuids(t_images, t_image_members, t_image_properties)
for fk in foreign_keys:
fk.create()
def _sqlite_table_swap(meta, t_image_members, t_image_properties, t_images):
t_image_members.drop()
t_image_properties.drop()
t_images.drop()
meta.bind.execute("ALTER TABLE images_backup "
"RENAME TO images")
meta.bind.execute("ALTER TABLE image_members_backup "
"RENAME TO image_members")
meta.bind.execute("ALTER TABLE image_properties_backup "
"RENAME TO image_properties")
meta.bind.execute("""CREATE INDEX ix_image_properties_deleted
ON image_properties (deleted);""")
meta.bind.execute("""CREATE INDEX ix_image_properties_name
ON image_properties (name);""")
def _get_table(table_name, metadata):
"""Return a sqlalchemy Table definition with associated metadata."""
return sqlalchemy.Table(table_name, metadata, autoload=True)
def _get_foreign_keys(t_images, t_image_members, t_image_properties, dialect):
"""Retrieve and return foreign keys for members/properties tables."""
foreign_keys = []
if t_image_members.foreign_keys:
img_members_fk_name = list(t_image_members.foreign_keys)[0].name
if dialect == 'mysql':
fk1 = migrate.ForeignKeyConstraint([t_image_members.c.image_id],
[t_images.c.id],
name=img_members_fk_name)
else:
fk1 = migrate.ForeignKeyConstraint([t_image_members.c.image_id],
[t_images.c.id])
foreign_keys.append(fk1)
if t_image_properties.foreign_keys:
img_properties_fk_name = list(t_image_properties.foreign_keys)[0].name
if dialect == 'mysql':
fk2 = migrate.ForeignKeyConstraint([t_image_properties.c.image_id],
[t_images.c.id],
name=img_properties_fk_name)
else:
fk2 = migrate.ForeignKeyConstraint([t_image_properties.c.image_id],
[t_images.c.id])
foreign_keys.append(fk2)
return foreign_keys
def _update_all_ids_to_uuids(t_images, t_image_members, t_image_properties):
"""Transition from INTEGER id to VARCHAR(36) id."""
images = list(t_images.select().execute())
for image in images:
old_id = image["id"]
new_id = str(uuid.uuid4())
t_images.update().where(
t_images.c.id == old_id).values(id=new_id).execute()
t_image_members.update().where(
t_image_members.c.image_id == old_id).values(
image_id=new_id).execute()
t_image_properties.update().where(
t_image_properties.c.image_id == old_id).values(
image_id=new_id).execute()
t_image_properties.update().where(
and_(or_(t_image_properties.c.name == 'kernel_id',
t_image_properties.c.name == 'ramdisk_id'),
t_image_properties.c.value == old_id)).values(
value=new_id).execute()
def _update_all_uuids_to_ids(t_images, t_image_members, t_image_properties):
"""Transition from VARCHAR(36) id to INTEGER id."""
images = list(t_images.select().execute())
new_id = 1
for image in images:
old_id = image["id"]
t_images.update().where(
t_images.c.id == old_id).values(
id=str(new_id)).execute()
t_image_members.update().where(
t_image_members.c.image_id == old_id).values(
image_id=str(new_id)).execute()
t_image_properties.update().where(
t_image_properties.c.image_id == old_id).values(
image_id=str(new_id)).execute()
t_image_properties.update().where(
and_(or_(t_image_properties.c.name == 'kernel_id',
t_image_properties.c.name == 'ramdisk_id'),
t_image_properties.c.value == old_id)).values(
value=str(new_id)).execute()
new_id += 1

View File

@ -1,28 +0,0 @@
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import MetaData, Table, Column, Boolean
meta = MetaData()
protected = Column('protected', Boolean, default=False)
def upgrade(migrate_engine):
meta.bind = migrate_engine
images = Table('images', meta, autoload=True)
images.create_column(protected)

View File

@ -1,66 +0,0 @@
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import schema
from glance.db.sqlalchemy import schema as glance_schema
def define_image_tags_table(meta):
# Load the images table so the foreign key can be set up properly
schema.Table('images', meta, autoload=True)
image_tags = schema.Table('image_tags',
meta,
schema.Column('id',
glance_schema.Integer(),
primary_key=True,
nullable=False),
schema.Column('image_id',
glance_schema.String(36),
schema.ForeignKey('images.id'),
nullable=False),
schema.Column('value',
glance_schema.String(255),
nullable=False),
schema.Column('created_at',
glance_schema.DateTime(),
nullable=False),
schema.Column('updated_at',
glance_schema.DateTime()),
schema.Column('deleted_at',
glance_schema.DateTime()),
schema.Column('deleted',
glance_schema.Boolean(),
nullable=False,
default=False),
mysql_engine='InnoDB',
mysql_charset='utf8')
schema.Index('ix_image_tags_image_id',
image_tags.c.image_id)
schema.Index('ix_image_tags_image_id_tag_value',
image_tags.c.image_id,
image_tags.c.value)
return image_tags
def upgrade(migrate_engine):
meta = schema.MetaData()
meta.bind = migrate_engine
tables = [define_image_tags_table(meta)]
glance_schema.create_tables(tables)

View File

@ -1,176 +0,0 @@
# Copyright 2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from oslo_utils import encodeutils
import six.moves.urllib.parse as urlparse
import sqlalchemy
from glance.common import exception
from glance.i18n import _, _LE
LOG = logging.getLogger(__name__)
def upgrade(migrate_engine):
migrate_location_credentials(migrate_engine, to_quoted=True)
def migrate_location_credentials(migrate_engine, to_quoted):
"""
Migrate location credentials for swift uri's between the quoted
and unquoted forms.
:param migrate_engine: The configured db engine
:param to_quoted: If True, migrate location credentials from
unquoted to quoted form. If False, do the
reverse.
"""
meta = sqlalchemy.schema.MetaData()
meta.bind = migrate_engine
images_table = sqlalchemy.Table('images', meta, autoload=True)
images = list(images_table.select(images_table.c.location.startswith(
'swift')).execute())
for image in images:
try:
fixed_uri = legacy_parse_uri(image['location'], to_quoted)
images_table.update().where(
images_table.c.id == image['id']).values(
location=fixed_uri).execute()
except exception.BadStoreUri as e:
reason = encodeutils.exception_to_unicode(e)
msg = _LE("Invalid store uri for image: %(image_id)s. "
"Details: %(reason)s") % {'image_id': image.id,
'reason': reason}
LOG.exception(msg)
raise
def legacy_parse_uri(uri, to_quote):
"""
Parse URLs. This method fixes an issue where credentials specified
in the URL are interpreted differently in Python 2.6.1+ than prior
versions of Python. It also deals with the peculiarity that new-style
Swift URIs have where a username can contain a ':', like so:
swift://account:user:pass@authurl.com/container/obj
If to_quoted is True, the uri is assumed to have credentials that
have not been quoted, and the resulting uri will contain quoted
credentials.
If to_quoted is False, the uri is assumed to have credentials that
have been quoted, and the resulting uri will contain credentials
that have not been quoted.
"""
# Make sure that URIs that contain multiple schemes, such as:
# swift://user:pass@http://authurl.com/v1/container/obj
# are immediately rejected.
if uri.count('://') != 1:
reason = _("URI cannot contain more than one occurrence of a scheme."
"If you have specified a URI like "
"swift://user:pass@http://authurl.com/v1/container/obj"
", you need to change it to use the swift+http:// scheme, "
"like so: "
"swift+http://user:pass@authurl.com/v1/container/obj")
raise exception.BadStoreUri(message=reason)
pieces = urlparse.urlparse(uri)
if pieces.scheme not in ('swift', 'swift+http', 'swift+https'):
raise exception.BadStoreUri(message="Unacceptable scheme: '%s'" %
pieces.scheme)
scheme = pieces.scheme
netloc = pieces.netloc
path = pieces.path.lstrip('/')
if netloc != '':
# > Python 2.6.1
if '@' in netloc:
creds, netloc = netloc.split('@')
else:
creds = None
else:
# Python 2.6.1 compat
# see lp659445 and Python issue7904
if '@' in path:
creds, path = path.split('@')
else:
creds = None
netloc = path[0:path.find('/')].strip('/')
path = path[path.find('/'):].strip('/')
if creds:
cred_parts = creds.split(':')
# User can be account:user, in which case cred_parts[0:2] will be
# the account and user. Combine them into a single username of
# account:user
if to_quote:
if len(cred_parts) == 1:
reason = (_("Badly formed credentials '%(creds)s' in Swift "
"URI") % {'creds': creds})
raise exception.BadStoreUri(message=reason)
elif len(cred_parts) == 3:
user = ':'.join(cred_parts[0:2])
else:
user = cred_parts[0]
key = cred_parts[-1]
user = user
key = key
else:
if len(cred_parts) != 2:
reason = (_("Badly formed credentials in Swift URI."))
raise exception.BadStoreUri(message=reason)
user, key = cred_parts
user = urlparse.unquote(user)
key = urlparse.unquote(key)
else:
user = None
key = None
path_parts = path.split('/')
try:
obj = path_parts.pop()
container = path_parts.pop()
if not netloc.startswith('http'):
# push hostname back into the remaining to build full authurl
path_parts.insert(0, netloc)
auth_or_store_url = '/'.join(path_parts)
except IndexError:
reason = _("Badly formed S3 URI: %(uri)s") % {'uri': uri}
raise exception.BadStoreUri(message=reason)
if auth_or_store_url.startswith('http://'):
auth_or_store_url = auth_or_store_url[len('http://'):]
elif auth_or_store_url.startswith('https://'):
auth_or_store_url = auth_or_store_url[len('https://'):]
credstring = ''
if user and key:
if to_quote:
quote_user = urlparse.quote(user)
quote_key = urlparse.quote(key)
else:
quote_user = user
quote_key = key
credstring = '%s:%s@' % (quote_user, quote_key)
auth_or_store_url = auth_or_store_url.strip('/')
container = container.strip('/')
obj = obj.strip('/')
return '%s://%s%s/%s/%s' % (scheme, credstring, auth_or_store_url,
container, obj)

View File

@ -1,28 +0,0 @@
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import MetaData, Table, Column, String
meta = MetaData()
status = Column('status', String(20), default="pending")
def upgrade(migrate_engine):
meta.bind = migrate_engine
image_members = Table('image_members', meta, autoload=True)
image_members.create_column(status)

View File

@ -1,237 +0,0 @@
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
This migration handles migrating encrypted image location values from
the unquoted form to the quoted form.
If 'metadata_encryption_key' is specified in the config then this
migration performs the following steps for every entry in the images table:
1. Decrypt the location value with the metadata_encryption_key
2. Changes the value to its quoted form
3. Encrypts the new value with the metadata_encryption_key
4. Inserts the new value back into the row
Fixes bug #1081043
"""
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import encodeutils
import six.moves.urllib.parse as urlparse
import sqlalchemy
from glance.common import crypt
from glance.common import exception
from glance.i18n import _, _LE, _LI, _LW
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.import_opt('metadata_encryption_key', 'glance.common.config')
def upgrade(migrate_engine):
migrate_location_credentials(migrate_engine, to_quoted=True)
def migrate_location_credentials(migrate_engine, to_quoted):
"""
Migrate location credentials for encrypted swift uri's between the
quoted and unquoted forms.
:param migrate_engine: The configured db engine
:param to_quoted: If True, migrate location credentials from
unquoted to quoted form. If False, do the
reverse.
"""
if not CONF.metadata_encryption_key:
msg = _LI("'metadata_encryption_key' was not specified in the config"
" file or a config file was not specified. This means that"
" this migration is a NOOP.")
LOG.info(msg)
return
meta = sqlalchemy.schema.MetaData()
meta.bind = migrate_engine
images_table = sqlalchemy.Table('images', meta, autoload=True)
images = list(images_table.select().execute())
for image in images:
try:
fixed_uri = fix_uri_credentials(image['location'], to_quoted)
images_table.update().where(
images_table.c.id == image['id']).values(
location=fixed_uri).execute()
except exception.Invalid:
msg = _LW("Failed to decrypt location value for image"
" %(image_id)s") % {'image_id': image['id']}
LOG.warn(msg)
except exception.BadStoreUri as e:
reason = encodeutils.exception_to_unicode(e)
msg = _LE("Invalid store uri for image: %(image_id)s. "
"Details: %(reason)s") % {'image_id': image.id,
'reason': reason}
LOG.exception(msg)
raise
def decrypt_location(uri):
return crypt.urlsafe_decrypt(CONF.metadata_encryption_key, uri)
def encrypt_location(uri):
return crypt.urlsafe_encrypt(CONF.metadata_encryption_key, uri, 64)
def fix_uri_credentials(uri, to_quoted):
"""
Fix the given uri's embedded credentials by round-tripping with
StoreLocation.
If to_quoted is True, the uri is assumed to have credentials that
have not been quoted, and the resulting uri will contain quoted
credentials.
If to_quoted is False, the uri is assumed to have credentials that
have been quoted, and the resulting uri will contain credentials
that have not been quoted.
"""
if not uri:
return
try:
decrypted_uri = decrypt_location(uri)
# NOTE (ameade): If a uri is not encrypted or incorrectly encoded then we
# we raise an exception.
except (TypeError, ValueError) as e:
raise exception.Invalid(str(e))
return legacy_parse_uri(decrypted_uri, to_quoted)
def legacy_parse_uri(uri, to_quote):
"""
Parse URLs. This method fixes an issue where credentials specified
in the URL are interpreted differently in Python 2.6.1+ than prior
versions of Python. It also deals with the peculiarity that new-style
Swift URIs have where a username can contain a ':', like so:
swift://account:user:pass@authurl.com/container/obj
If to_quoted is True, the uri is assumed to have credentials that
have not been quoted, and the resulting uri will contain quoted
credentials.
If to_quoted is False, the uri is assumed to have credentials that
have been quoted, and the resulting uri will contain credentials
that have not been quoted.
"""
# Make sure that URIs that contain multiple schemes, such as:
# swift://user:pass@http://authurl.com/v1/container/obj
# are immediately rejected.
if uri.count('://') != 1:
reason = _("URI cannot contain more than one occurrence of a scheme."
"If you have specified a URI like "
"swift://user:pass@http://authurl.com/v1/container/obj"
", you need to change it to use the swift+http:// scheme, "
"like so: "
"swift+http://user:pass@authurl.com/v1/container/obj")
raise exception.BadStoreUri(message=reason)
pieces = urlparse.urlparse(uri)
if pieces.scheme not in ('swift', 'swift+http', 'swift+https'):
raise exception.BadStoreUri(message="Unacceptable scheme: '%s'" %
pieces.scheme)
scheme = pieces.scheme
netloc = pieces.netloc
path = pieces.path.lstrip('/')
if netloc != '':
# > Python 2.6.1
if '@' in netloc:
creds, netloc = netloc.split('@')
else:
creds = None
else:
# Python 2.6.1 compat
# see lp659445 and Python issue7904
if '@' in path:
creds, path = path.split('@')
else:
creds = None
netloc = path[0:path.find('/')].strip('/')
path = path[path.find('/'):].strip('/')
if creds:
cred_parts = creds.split(':')
# User can be account:user, in which case cred_parts[0:2] will be
# the account and user. Combine them into a single username of
# account:user
if to_quote:
if len(cred_parts) == 1:
reason = (_("Badly formed credentials '%(creds)s' in Swift "
"URI") % {'creds': creds})
raise exception.BadStoreUri(message=reason)
elif len(cred_parts) == 3:
user = ':'.join(cred_parts[0:2])
else:
user = cred_parts[0]
key = cred_parts[-1]
user = user
key = key
else:
if len(cred_parts) != 2:
reason = (_("Badly formed credentials in Swift URI."))
raise exception.BadStoreUri(message=reason)
user, key = cred_parts
user = urlparse.unquote(user)
key = urlparse.unquote(key)
else:
user = None
key = None
path_parts = path.split('/')
try:
obj = path_parts.pop()
container = path_parts.pop()
if not netloc.startswith('http'):
# push hostname back into the remaining to build full authurl
path_parts.insert(0, netloc)
auth_or_store_url = '/'.join(path_parts)
except IndexError:
reason = _("Badly formed S3 URI: %(uri)s") % {'uri': uri}
raise exception.BadStoreUri(message=reason)
if auth_or_store_url.startswith('http://'):
auth_or_store_url = auth_or_store_url[len('http://'):]
elif auth_or_store_url.startswith('https://'):
auth_or_store_url = auth_or_store_url[len('https://'):]
credstring = ''
if user and key:
if to_quote:
quote_user = urlparse.quote(user)
quote_key = urlparse.quote(key)
else:
quote_user = user
quote_key = key
credstring = '%s:%s@' % (quote_user, quote_key)
auth_or_store_url = auth_or_store_url.strip('/')
container = container.strip('/')
obj = obj.strip('/')
uri = '%s://%s%s/%s/%s' % (scheme, credstring, auth_or_store_url,
container, obj)
return encrypt_location(uri)

View File

@ -1,57 +0,0 @@
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sqlalchemy
from glance.db.sqlalchemy import schema
def upgrade(migrate_engine):
meta = sqlalchemy.schema.MetaData(migrate_engine)
# NOTE(bcwaldon): load the images table for the ForeignKey below
sqlalchemy.Table('images', meta, autoload=True)
image_locations_table = sqlalchemy.Table(
'image_locations', meta,
sqlalchemy.Column('id',
schema.Integer(),
primary_key=True,
nullable=False),
sqlalchemy.Column('image_id',
schema.String(36),
sqlalchemy.ForeignKey('images.id'),
nullable=False,
index=True),
sqlalchemy.Column('value',
schema.Text(),
nullable=False),
sqlalchemy.Column('created_at',
schema.DateTime(),
nullable=False),
sqlalchemy.Column('updated_at',
schema.DateTime()),
sqlalchemy.Column('deleted_at',
schema.DateTime()),
sqlalchemy.Column('deleted',
schema.Boolean(),
nullable=False,
default=False,
index=True),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
schema.create_tables([image_locations_table])

View File

@ -1,44 +0,0 @@
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sqlalchemy
def get_images_table(meta):
return sqlalchemy.Table('images', meta, autoload=True)
def get_image_locations_table(meta):
return sqlalchemy.Table('image_locations', meta, autoload=True)
def upgrade(migrate_engine):
meta = sqlalchemy.schema.MetaData(migrate_engine)
images_table = get_images_table(meta)
image_locations_table = get_image_locations_table(meta)
image_records = images_table.select().execute().fetchall()
for image in image_records:
if image.location is not None:
values = {
'image_id': image.id,
'value': image.location,
'created_at': image.created_at,
'updated_at': image.updated_at,
'deleted': image.deleted,
'deleted_at': image.deleted_at,
}
image_locations_table.insert(values=values).execute()

View File

@ -1,26 +0,0 @@
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sqlalchemy
def get_images_table(meta):
return sqlalchemy.Table('images', meta, autoload=True)
def upgrade(migrate_engine):
meta = sqlalchemy.schema.MetaData(migrate_engine)
images_table = get_images_table(meta)
images_table.columns['location'].drop()

View File

@ -1,31 +0,0 @@
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import MetaData
tables = ['image_locations']
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
if migrate_engine.name == "mysql":
d = migrate_engine.execute("SHOW TABLE STATUS WHERE Engine!='InnoDB';")
for row in d.fetchall():
table_name = row[0]
if table_name in tables:
migrate_engine.execute("ALTER TABLE %s Engine=InnoDB" %
table_name)

View File

@ -1,61 +0,0 @@
# Copyright 2013 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
from migrate.changeset import UniqueConstraint
from oslo_db import exception as db_exception
from sqlalchemy import MetaData, Table
from sqlalchemy.exc import OperationalError, ProgrammingError
NEW_KEYNAME = 'image_members_image_id_member_deleted_at_key'
ORIGINAL_KEYNAME_RE = re.compile('image_members_image_id.*_key')
def upgrade(migrate_engine):
image_members = _get_image_members_table(migrate_engine)
if migrate_engine.name in ('mysql', 'postgresql'):
try:
UniqueConstraint('image_id',
name=_get_original_keyname(migrate_engine.name),
table=image_members).drop()
except (OperationalError, ProgrammingError, db_exception.DBError):
UniqueConstraint('image_id',
name=_infer_original_keyname(image_members),
table=image_members).drop()
UniqueConstraint('image_id',
'member',
'deleted_at',
name=NEW_KEYNAME,
table=image_members).create()
def _get_image_members_table(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
return Table('image_members', meta, autoload=True)
def _get_original_keyname(db):
return {'mysql': 'image_id',
'postgresql': 'image_members_image_id_member_key'}[db]
def _infer_original_keyname(table):
for i in table.indexes:
if ORIGINAL_KEYNAME_RE.match(i.name):
return i.name

View File

@ -1,19 +0,0 @@
# Copyright 2013 OpenStack Foundation
# Copyright 2013 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
def upgrade(migrate_engine):
pass

View File

@ -1,19 +0,0 @@
# Copyright 2013 OpenStack Foundation
# Copyright 2013 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
def upgrade(migrate_engine):
pass

View File

@ -1,19 +0,0 @@
# Copyright 2013 OpenStack Foundation
# Copyright 2013 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
def upgrade(migrate_engine):
pass

View File

@ -1,32 +0,0 @@
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sqlalchemy
from glance.db.sqlalchemy import schema
def upgrade(migrate_engine):
meta = sqlalchemy.schema.MetaData()
meta.bind = migrate_engine
image_locations_table = sqlalchemy.Table('image_locations',
meta,
autoload=True)
meta_data = sqlalchemy.Column('meta_data',
schema.PickleType(),
default={})
meta_data.create(image_locations_table)

View File

@ -1,28 +0,0 @@
# Copyright 2013 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import MetaData, Table, Index
INDEX_NAME = 'checksum_image_idx'
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
images = Table('images', meta, autoload=True)
index = Index(INDEX_NAME, images.c.checksum)
index.create(migrate_engine)

View File

@ -1,28 +0,0 @@
# Copyright 2013 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import MetaData, Table, Index
INDEX_NAME = 'owner_image_idx'
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
images = Table('images', meta, autoload=True)
index = Index(INDEX_NAME, images.c.owner)
index.create(migrate_engine)

View File

@ -1,45 +0,0 @@
# Copyright 2013 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import pickle
import sqlalchemy
from sqlalchemy import Table, Column # noqa
from glance.db.sqlalchemy import models
def upgrade(migrate_engine):
meta = sqlalchemy.schema.MetaData(migrate_engine)
image_locations = Table('image_locations', meta, autoload=True)
new_meta_data = Column('storage_meta_data', models.JSONEncodedDict,
default={})
new_meta_data.create(image_locations)
noe = pickle.dumps({})
s = sqlalchemy.sql.select([image_locations]).where(
image_locations.c.meta_data != noe)
conn = migrate_engine.connect()
res = conn.execute(s)
for row in res:
meta_data = row['meta_data']
x = pickle.loads(meta_data)
if x != {}:
stmt = image_locations.update().where(
image_locations.c.id == row['id']).values(storage_meta_data=x)
conn.execute(stmt)
conn.close()
image_locations.columns['meta_data'].drop()
image_locations.columns['storage_meta_data'].alter(name='meta_data')

View File

@ -1,58 +0,0 @@
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy.schema import (Column, MetaData, Table, Index)
from glance.db.sqlalchemy.schema import (
Boolean, DateTime, String, Text, create_tables) # noqa
def define_tasks_table(meta):
tasks = Table('tasks',
meta,
Column('id', String(36), primary_key=True, nullable=False),
Column('type', String(30), nullable=False),
Column('status', String(30), nullable=False),
Column('owner', String(255), nullable=False),
Column('input', Text()), # json blob
Column('result', Text()), # json blob
Column('message', Text()),
Column('expires_at', DateTime(), nullable=True),
Column('created_at', DateTime(), nullable=False),
Column('updated_at', DateTime()),
Column('deleted_at', DateTime()),
Column('deleted',
Boolean(),
nullable=False,
default=False),
mysql_engine='InnoDB',
mysql_charset='utf8',
extend_existing=True)
Index('ix_tasks_type', tasks.c.type)
Index('ix_tasks_status', tasks.c.status)
Index('ix_tasks_owner', tasks.c.owner)
Index('ix_tasks_deleted', tasks.c.deleted)
Index('ix_tasks_updated_at', tasks.c.updated_at)
return tasks
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
tables = [define_tasks_table(meta)]
create_tables(tables)

View File

@ -1,75 +0,0 @@
# Copyright 2013 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sqlalchemy
from sqlalchemy import func
from sqlalchemy import orm
from sqlalchemy import sql
from sqlalchemy import Table
def upgrade(migrate_engine):
meta = sqlalchemy.schema.MetaData(migrate_engine)
image_locations = Table('image_locations', meta, autoload=True)
if migrate_engine.name == "ibm_db_sa":
il = orm.aliased(image_locations)
# NOTE(wenchma): Get all duplicated rows.
qry = (sql.select([il.c.id])
.where(il.c.id > (sql.select([func.min(image_locations.c.id)])
.where(image_locations.c.image_id == il.c.image_id)
.where(image_locations.c.value == il.c.value)
.where(image_locations.c.meta_data == il.c.meta_data)
.where(image_locations.c.deleted == False)))
.where(il.c.deleted == False)
.execute()
)
for row in qry:
stmt = (image_locations.delete()
.where(image_locations.c.id == row[0])
.where(image_locations.c.deleted == False))
stmt.execute()
else:
session = orm.sessionmaker(bind=migrate_engine)()
# NOTE(flaper87): Lets group by
# image_id, location and metadata.
grp = [image_locations.c.image_id,
image_locations.c.value,
image_locations.c.meta_data]
# NOTE(flaper87): Get all duplicated rows
qry = (session.query(*grp)
.filter(image_locations.c.deleted == False)
.group_by(*grp)
.having(func.count() > 1))
for row in qry:
# NOTE(flaper87): Not the fastest way to do it.
# This is the best way to do it since sqlalchemy
# has a bug around delete + limit.
s = (sql.select([image_locations.c.id])
.where(image_locations.c.image_id == row[0])
.where(image_locations.c.value == row[1])
.where(image_locations.c.meta_data == row[2])
.where(image_locations.c.deleted == False)
.limit(1).execute())
stmt = (image_locations.delete()
.where(image_locations.c.id == s.first()[0]))
stmt.execute()
session.close()

View File

@ -1,65 +0,0 @@
# Copyright 2013 Rackspace
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy.schema import (Column, ForeignKey, MetaData, Table)
from glance.db.sqlalchemy.schema import (
String, Text, create_tables) # noqa
TASKS_MIGRATE_COLUMNS = ['input', 'message', 'result']
def define_task_info_table(meta):
Table('tasks', meta, autoload=True)
# NOTE(nikhil): input and result are stored as text in the DB.
# SQLAlchemy marshals the data to/from JSON using custom type
# JSONEncodedDict. It uses simplejson underneath.
task_info = Table('task_info',
meta,
Column('task_id', String(36),
ForeignKey('tasks.id'),
primary_key=True,
nullable=False),
Column('input', Text()),
Column('result', Text()),
Column('message', Text()),
mysql_engine='InnoDB',
mysql_charset='utf8')
return task_info
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
tables = [define_task_info_table(meta)]
create_tables(tables)
tasks_table = Table('tasks', meta, autoload=True)
task_info_table = Table('task_info', meta, autoload=True)
tasks = tasks_table.select().execute().fetchall()
for task in tasks:
values = {
'task_id': task.id,
'input': task.input,
'result': task.result,
'message': task.message,
}
task_info_table.insert(values=values).execute()
for col_name in TASKS_MIGRATE_COLUMNS:
tasks_table.columns[col_name].drop()

View File

@ -1,41 +0,0 @@
# Copyright 2014 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
import sqlalchemy
from glance.db.sqlalchemy import schema
def upgrade(migrate_engine):
meta = sqlalchemy.schema.MetaData()
meta.bind = migrate_engine
images_table = sqlalchemy.Table('images', meta, autoload=True)
image_locations_table = sqlalchemy.Table('image_locations', meta,
autoload=True)
# Create 'status' column for image_locations table
status = sqlalchemy.Column('status', schema.String(30),
server_default='active', nullable=False)
status.create(image_locations_table)
# Set 'status' column initial value for image_locations table
mapping = {'active': 'active', 'pending_delete': 'pending_delete',
'deleted': 'deleted', 'killed': 'deleted'}
for src, dst in six.iteritems(mapping):
subq = sqlalchemy.sql.select([images_table.c.id]).where(
images_table.c.status == src)
image_locations_table.update(values={'status': dst}).where(
image_locations_table.c.image_id.in_(subq)).execute()

View File

@ -1,26 +0,0 @@
# Copyright 2014 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sqlalchemy
def upgrade(migrate_engine):
meta = sqlalchemy.MetaData()
meta.bind = migrate_engine
images = sqlalchemy.Table('images', meta, autoload=True)
virtual_size = sqlalchemy.Column('virtual_size',
sqlalchemy.BigInteger)
images.create_column(virtual_size)

View File

@ -1,208 +0,0 @@
# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sqlalchemy
from sqlalchemy.schema import (
Column, ForeignKey, Index, MetaData, Table, UniqueConstraint) # noqa
from glance.common import timeutils
from glance.db.sqlalchemy.schema import (
Boolean, DateTime, Integer, String, Text, create_tables) # noqa
RESOURCE_TYPES = [u'OS::Glance::Image', u'OS::Cinder::Volume',
u'OS::Nova::Flavor', u'OS::Nova::Aggregate',
u'OS::Nova::Server']
def _get_metadef_resource_types_table(meta):
return sqlalchemy.Table('metadef_resource_types', meta, autoload=True)
def _populate_resource_types(resource_types_table):
now = timeutils.utcnow()
for resource_type in RESOURCE_TYPES:
values = {
'name': resource_type,
'protected': True,
'created_at': now,
'updated_at': now
}
resource_types_table.insert(values=values).execute()
def define_metadef_namespaces_table(meta):
# NOTE: For DB2 if UniqueConstraint is used when creating a table
# an index will automatically be created. So, for DB2 specify the
# index name up front. If not DB2 then create the Index.
_constr_kwargs = {}
if meta.bind.name == 'ibm_db_sa':
_constr_kwargs['name'] = 'ix_namespaces_namespace'
namespaces = Table('metadef_namespaces',
meta,
Column('id', Integer(), primary_key=True,
nullable=False),
Column('namespace', String(80), nullable=False),
Column('display_name', String(80)),
Column('description', Text()),
Column('visibility', String(32)),
Column('protected', Boolean()),
Column('owner', String(255), nullable=False),
Column('created_at', DateTime(), nullable=False),
Column('updated_at', DateTime()),
UniqueConstraint('namespace', **_constr_kwargs),
mysql_engine='InnoDB',
mysql_charset='utf8',
extend_existing=True)
if meta.bind.name != 'ibm_db_sa':
Index('ix_namespaces_namespace', namespaces.c.namespace)
return namespaces
def define_metadef_objects_table(meta):
_constr_kwargs = {}
if meta.bind.name == 'ibm_db_sa':
_constr_kwargs['name'] = 'ix_objects_namespace_id_name'
objects = Table('metadef_objects',
meta,
Column('id', Integer(), primary_key=True, nullable=False),
Column('namespace_id', Integer(),
ForeignKey('metadef_namespaces.id'),
nullable=False),
Column('name', String(80), nullable=False),
Column('description', Text()),
Column('required', Text()),
Column('schema', Text(), nullable=False),
Column('created_at', DateTime(), nullable=False),
Column('updated_at', DateTime()),
UniqueConstraint('namespace_id', 'name',
**_constr_kwargs),
mysql_engine='InnoDB',
mysql_charset='utf8',
extend_existing=True)
if meta.bind.name != 'ibm_db_sa':
Index('ix_objects_namespace_id_name',
objects.c.namespace_id,
objects.c.name)
return objects
def define_metadef_properties_table(meta):
_constr_kwargs = {}
if meta.bind.name == 'ibm_db_sa':
_constr_kwargs['name'] = 'ix_metadef_properties_namespace_id_name'
metadef_properties = Table(
'metadef_properties',
meta,
Column('id', Integer(), primary_key=True, nullable=False),
Column('namespace_id', Integer(), ForeignKey('metadef_namespaces.id'),
nullable=False),
Column('name', String(80), nullable=False),
Column('schema', Text(), nullable=False),
Column('created_at', DateTime(), nullable=False),
Column('updated_at', DateTime()),
UniqueConstraint('namespace_id', 'name', **_constr_kwargs),
mysql_engine='InnoDB',
mysql_charset='utf8',
extend_existing=True)
if meta.bind.name != 'ibm_db_sa':
Index('ix_metadef_properties_namespace_id_name',
metadef_properties.c.namespace_id,
metadef_properties.c.name)
return metadef_properties
def define_metadef_resource_types_table(meta):
_constr_kwargs = {}
if meta.bind.name == 'ibm_db_sa':
_constr_kwargs['name'] = 'ix_metadef_resource_types_name'
metadef_res_types = Table(
'metadef_resource_types',
meta,
Column('id', Integer(), primary_key=True, nullable=False),
Column('name', String(80), nullable=False),
Column('protected', Boolean(), nullable=False, default=False),
Column('created_at', DateTime(), nullable=False),
Column('updated_at', DateTime()),
UniqueConstraint('name', **_constr_kwargs),
mysql_engine='InnoDB',
mysql_charset='utf8',
extend_existing=True)
if meta.bind.name != 'ibm_db_sa':
Index('ix_metadef_resource_types_name',
metadef_res_types.c.name)
return metadef_res_types
def define_metadef_namespace_resource_types_table(meta):
_constr_kwargs = {}
if meta.bind.name == 'ibm_db_sa':
_constr_kwargs['name'] = 'ix_metadef_ns_res_types_res_type_id_ns_id'
metadef_associations = Table(
'metadef_namespace_resource_types',
meta,
Column('resource_type_id', Integer(),
ForeignKey('metadef_resource_types.id'),
primary_key=True, nullable=False),
Column('namespace_id', Integer(),
ForeignKey('metadef_namespaces.id'),
primary_key=True, nullable=False),
Column('properties_target', String(80)),
Column('prefix', String(80)),
Column('created_at', DateTime(), nullable=False),
Column('updated_at', DateTime()),
UniqueConstraint('resource_type_id', 'namespace_id',
**_constr_kwargs),
mysql_engine='InnoDB',
mysql_charset='utf8',
extend_existing=True)
if meta.bind.name != 'ibm_db_sa':
Index('ix_metadef_ns_res_types_res_type_id_ns_id',
metadef_associations.c.resource_type_id,
metadef_associations.c.namespace_id)
return metadef_associations
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
tables = [define_metadef_namespaces_table(meta),
define_metadef_objects_table(meta),
define_metadef_properties_table(meta),
define_metadef_resource_types_table(meta),
define_metadef_namespace_resource_types_table(meta)]
create_tables(tables)
resource_types_table = _get_metadef_resource_types_table(meta)
_populate_resource_types(resource_types_table)

View File

@ -1,25 +0,0 @@
# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy.schema import MetaData
from sqlalchemy.schema import Table
def upgrade(migrate_engine):
meta = MetaData(bind=migrate_engine)
metadef_objects = Table('metadef_objects', meta, autoload=True)
metadef_objects.c.schema.alter(name='json_schema')
metadef_properties = Table('metadef_properties', meta, autoload=True)
metadef_properties.c.schema.alter(name='json_schema')

View File

@ -1,84 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sqlalchemy
from sqlalchemy import Table, Index, UniqueConstraint
from sqlalchemy.schema import (AddConstraint, DropConstraint,
ForeignKeyConstraint)
from sqlalchemy import sql
from sqlalchemy import update
def upgrade(migrate_engine):
meta = sqlalchemy.MetaData()
meta.bind = migrate_engine
if migrate_engine.name not in ['mysql', 'postgresql']:
return
image_properties = Table('image_properties', meta, autoload=True)
image_members = Table('image_members', meta, autoload=True)
images = Table('images', meta, autoload=True)
# We have to ensure that we doesn't have `nulls` values since we are going
# to set nullable=False
migrate_engine.execute(
update(image_members)
.where(image_members.c.status == sql.expression.null())
.values(status='pending'))
migrate_engine.execute(
update(images)
.where(images.c.protected == sql.expression.null())
.values(protected=sql.expression.false()))
image_members.c.status.alter(nullable=False, server_default='pending')
images.c.protected.alter(
nullable=False, server_default=sql.expression.false())
if migrate_engine.name == 'postgresql':
Index('ix_image_properties_image_id_name',
image_properties.c.image_id,
image_properties.c.name).drop()
# We have different names of this constraint in different versions of
# postgresql. Since we have only one constraint on this table, we can
# get it in the following way.
name = migrate_engine.execute(
"""SELECT conname
FROM pg_constraint
WHERE conrelid =
(SELECT oid
FROM pg_class
WHERE relname LIKE 'image_properties')
AND contype = 'u';""").scalar()
constraint = UniqueConstraint(image_properties.c.image_id,
image_properties.c.name,
name='%s' % name)
migrate_engine.execute(DropConstraint(constraint))
constraint = UniqueConstraint(image_properties.c.image_id,
image_properties.c.name,
name='ix_image_properties_image_id_name')
migrate_engine.execute(AddConstraint(constraint))
images.c.id.alter(server_default=None)
if migrate_engine.name == 'mysql':
constraint = UniqueConstraint(image_properties.c.image_id,
image_properties.c.name,
name='image_id')
migrate_engine.execute(DropConstraint(constraint))
image_locations = Table('image_locations', meta, autoload=True)
if len(image_locations.foreign_keys) == 0:
migrate_engine.execute(AddConstraint(ForeignKeyConstraint(
[image_locations.c.image_id], [images.c.id])))

View File

@ -1,159 +0,0 @@
UPDATE images SET protected = 0 WHERE protected is NULL;
UPDATE image_members SET status = 'pending' WHERE status is NULL;
CREATE TEMPORARY TABLE images_backup (
id VARCHAR(36) NOT NULL,
name VARCHAR(255),
size INTEGER,
status VARCHAR(30) NOT NULL,
is_public BOOLEAN NOT NULL,
created_at DATETIME NOT NULL,
updated_at DATETIME,
deleted_at DATETIME,
deleted BOOLEAN NOT NULL,
disk_format VARCHAR(20),
container_format VARCHAR(20),
checksum VARCHAR(32),
owner VARCHAR(255),
min_disk INTEGER,
min_ram INTEGER,
protected BOOLEAN NOT NULL DEFAULT 0,
virtual_size INTEGER,
PRIMARY KEY (id),
CHECK (is_public IN (0, 1)),
CHECK (deleted IN (0, 1))
);
INSERT INTO images_backup
SELECT id, name, size, status, is_public, created_at, updated_at, deleted_at, deleted, disk_format, container_format, checksum, owner, min_disk, min_ram, protected, virtual_size
FROM images;
DROP TABLE images;
CREATE TABLE images (
id VARCHAR(36) NOT NULL,
name VARCHAR(255),
size INTEGER,
status VARCHAR(30) NOT NULL,
is_public BOOLEAN NOT NULL,
created_at DATETIME NOT NULL,
updated_at DATETIME,
deleted_at DATETIME,
deleted BOOLEAN NOT NULL,
disk_format VARCHAR(20),
container_format VARCHAR(20),
checksum VARCHAR(32),
owner VARCHAR(255),
min_disk INTEGER NOT NULL,
min_ram INTEGER NOT NULL,
protected BOOLEAN NOT NULL DEFAULT 0,
virtual_size INTEGER,
PRIMARY KEY (id),
CHECK (is_public IN (0, 1)),
CHECK (deleted IN (0, 1))
);
CREATE INDEX ix_images_deleted ON images (deleted);
CREATE INDEX ix_images_is_public ON images (is_public);
CREATE INDEX owner_image_idx ON images (owner);
CREATE INDEX checksum_image_idx ON images (checksum);
INSERT INTO images
SELECT id, name, size, status, is_public, created_at, updated_at, deleted_at, deleted, disk_format, container_format, checksum, owner, min_disk, min_ram, protected, virtual_size
FROM images_backup;
DROP TABLE images_backup;
CREATE TEMPORARY TABLE image_members_backup (
id INTEGER NOT NULL,
image_id VARCHAR(36) NOT NULL,
member VARCHAR(255) NOT NULL,
can_share BOOLEAN NOT NULL,
created_at DATETIME NOT NULL,
updated_at DATETIME,
deleted_at DATETIME,
deleted BOOLEAN NOT NULL,
status VARCHAR(20) NOT NULL DEFAULT 'pending',
PRIMARY KEY (id),
UNIQUE (image_id, member),
CHECK (can_share IN (0, 1)),
CHECK (deleted IN (0, 1)),
FOREIGN KEY(image_id) REFERENCES images (id)
);
INSERT INTO image_members_backup
SELECT id, image_id, member, can_share, created_at, updated_at, deleted_at, deleted, status
FROM image_members;
DROP TABLE image_members;
CREATE TABLE image_members (
id INTEGER NOT NULL,
image_id VARCHAR(36) NOT NULL,
member VARCHAR(255) NOT NULL,
can_share BOOLEAN NOT NULL,
created_at DATETIME NOT NULL,
updated_at DATETIME,
deleted_at DATETIME,
deleted BOOLEAN NOT NULL,
status VARCHAR(20) NOT NULL DEFAULT 'pending',
PRIMARY KEY (id),
UNIQUE (image_id, member),
CHECK (can_share IN (0, 1)),
CHECK (deleted IN (0, 1)),
FOREIGN KEY(image_id) REFERENCES images (id),
CONSTRAINT image_members_image_id_member_deleted_at_key UNIQUE (image_id, member, deleted_at)
);
CREATE INDEX ix_image_members_deleted ON image_members (deleted);
CREATE INDEX ix_image_members_image_id ON image_members (image_id);
CREATE INDEX ix_image_members_image_id_member ON image_members (image_id, member);
INSERT INTO image_members
SELECT id, image_id, member, can_share, created_at, updated_at, deleted_at, deleted, status
FROM image_members_backup;
DROP TABLE image_members_backup;
CREATE TEMPORARY TABLE image_properties_backup (
id INTEGER NOT NULL,
image_id VARCHAR(36) NOT NULL,
name VARCHAR(255) NOT NULL,
value TEXT,
created_at DATETIME NOT NULL,
updated_at DATETIME,
deleted_at DATETIME,
deleted BOOLEAN NOT NULL,
PRIMARY KEY (id)
);
INSERT INTO image_properties_backup
SELECT id, image_id, name, value, created_at, updated_at, deleted_at, deleted
FROM image_properties;
DROP TABLE image_properties;
CREATE TABLE image_properties (
id INTEGER NOT NULL,
image_id VARCHAR(36) NOT NULL,
name VARCHAR(255) NOT NULL,
value TEXT,
created_at DATETIME NOT NULL,
updated_at DATETIME,
deleted_at DATETIME,
deleted BOOLEAN NOT NULL,
PRIMARY KEY (id),
CHECK (deleted IN (0, 1)),
FOREIGN KEY(image_id) REFERENCES images (id),
CONSTRAINT ix_image_properties_image_id_name UNIQUE (image_id, name)
);
CREATE INDEX ix_image_properties_deleted ON image_properties (deleted);
CREATE INDEX ix_image_properties_image_id ON image_properties (image_id);
INSERT INTO image_properties (id, image_id, name, value, created_at, updated_at, deleted_at, deleted)
SELECT id, image_id, name, value, created_at, updated_at, deleted_at, deleted
FROM image_properties_backup;
DROP TABLE image_properties_backup;

View File

@ -1,51 +0,0 @@
# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy.schema import (
Column, Index, MetaData, Table, UniqueConstraint) # noqa
from glance.db.sqlalchemy.schema import (
DateTime, Integer, String, create_tables) # noqa
def define_metadef_tags_table(meta):
_constr_kwargs = {}
metadef_tags = Table('metadef_tags',
meta,
Column('id', Integer(), primary_key=True,
nullable=False),
Column('namespace_id', Integer(),
nullable=False),
Column('name', String(80), nullable=False),
Column('created_at', DateTime(), nullable=False),
Column('updated_at', DateTime()),
UniqueConstraint('namespace_id', 'name',
**_constr_kwargs),
mysql_engine='InnoDB',
mysql_charset='utf8',
extend_existing=False)
if meta.bind.name != 'ibm_db_sa':
Index('ix_tags_namespace_id_name',
metadef_tags.c.namespace_id,
metadef_tags.c.name)
return metadef_tags
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
tables = [define_metadef_tags_table(meta)]
create_tables(tables)

View File

@ -1,196 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import migrate
import sqlalchemy
from sqlalchemy import inspect
from sqlalchemy import (Table, Index, UniqueConstraint)
from sqlalchemy.schema import (DropConstraint)
def _change_db2_unique_constraint(operation_type, constraint_name, *columns):
constraint = migrate.UniqueConstraint(*columns, name=constraint_name)
operation = getattr(constraint, operation_type)
operation()
def upgrade(migrate_engine):
meta = sqlalchemy.MetaData()
meta.bind = migrate_engine
inspector = inspect(migrate_engine)
metadef_namespaces = Table('metadef_namespaces', meta, autoload=True)
metadef_properties = Table('metadef_properties', meta, autoload=True)
metadef_objects = Table('metadef_objects', meta, autoload=True)
metadef_ns_res_types = Table('metadef_namespace_resource_types',
meta, autoload=True)
metadef_resource_types = Table('metadef_resource_types', meta,
autoload=True)
metadef_tags = Table('metadef_tags', meta, autoload=True)
constraints = [('ix_namespaces_namespace',
[metadef_namespaces.c.namespace]),
('ix_objects_namespace_id_name',
[metadef_objects.c.namespace_id,
metadef_objects.c.name]),
('ix_metadef_properties_namespace_id_name',
[metadef_properties.c.namespace_id,
metadef_properties.c.name])]
metadef_tags_constraints = inspector.get_unique_constraints('metadef_tags')
for constraint in metadef_tags_constraints:
if set(constraint['column_names']) == set(['namespace_id', 'name']):
constraints.append((constraint['name'],
[metadef_tags.c.namespace_id,
metadef_tags.c.name]))
if meta.bind.name == "ibm_db_sa":
# For db2, the following constraints need to be dropped first,
# otherwise the index like ix_metadef_ns_res_types_namespace_id
# will fail to create. These constraints will be added back at
# the end. It should not affect the origional logic for other
# database backends.
for (constraint_name, cols) in constraints:
_change_db2_unique_constraint('drop', constraint_name, *cols)
else:
Index('ix_namespaces_namespace', metadef_namespaces.c.namespace).drop()
Index('ix_objects_namespace_id_name', metadef_objects.c.namespace_id,
metadef_objects.c.name).drop()
Index('ix_metadef_properties_namespace_id_name',
metadef_properties.c.namespace_id,
metadef_properties.c.name).drop()
fkc = migrate.ForeignKeyConstraint([metadef_tags.c.namespace_id],
[metadef_namespaces.c.id])
fkc.create()
# `migrate` module removes unique constraint after adding
# foreign key to the table in sqlite.
# The reason of this issue is that it isn't possible to add fkc to
# existing table in sqlite. Instead of this we should recreate the table
# with needed fkc in the declaration. Migrate package provide us with such
# possibility, but unfortunately it recreates the table without
# constraints. Create unique constraint manually.
if migrate_engine.name == 'sqlite' and len(
inspector.get_unique_constraints('metadef_tags')) == 0:
uc = migrate.UniqueConstraint(metadef_tags.c.namespace_id,
metadef_tags.c.name)
uc.create()
if meta.bind.name != "ibm_db_sa":
Index('ix_tags_namespace_id_name', metadef_tags.c.namespace_id,
metadef_tags.c.name).drop()
Index('ix_metadef_tags_name', metadef_tags.c.name).create()
Index('ix_metadef_tags_namespace_id', metadef_tags.c.namespace_id,
metadef_tags.c.name).create()
if migrate_engine.name == 'mysql':
# We need to drop some foreign keys first because unique constraints
# that we want to delete depend on them. So drop the fk and recreate
# it again after unique constraint deletion.
fkc = migrate.ForeignKeyConstraint([metadef_properties.c.namespace_id],
[metadef_namespaces.c.id],
name='metadef_properties_ibfk_1')
fkc.drop()
constraint = UniqueConstraint(metadef_properties.c.namespace_id,
metadef_properties.c.name,
name='namespace_id')
migrate_engine.execute(DropConstraint(constraint))
fkc.create()
fkc = migrate.ForeignKeyConstraint([metadef_objects.c.namespace_id],
[metadef_namespaces.c.id],
name='metadef_objects_ibfk_1')
fkc.drop()
constraint = UniqueConstraint(metadef_objects.c.namespace_id,
metadef_objects.c.name,
name='namespace_id')
migrate_engine.execute(DropConstraint(constraint))
fkc.create()
constraint = UniqueConstraint(metadef_ns_res_types.c.resource_type_id,
metadef_ns_res_types.c.namespace_id,
name='resource_type_id')
migrate_engine.execute(DropConstraint(constraint))
constraint = UniqueConstraint(metadef_namespaces.c.namespace,
name='namespace')
migrate_engine.execute(DropConstraint(constraint))
constraint = UniqueConstraint(metadef_resource_types.c.name,
name='name')
migrate_engine.execute(DropConstraint(constraint))
if migrate_engine.name == 'postgresql':
met_obj_index_name = (
inspector.get_unique_constraints('metadef_objects')[0]['name'])
constraint = UniqueConstraint(
metadef_objects.c.namespace_id,
metadef_objects.c.name,
name=met_obj_index_name)
migrate_engine.execute(DropConstraint(constraint))
met_prop_index_name = (
inspector.get_unique_constraints('metadef_properties')[0]['name'])
constraint = UniqueConstraint(
metadef_properties.c.namespace_id,
metadef_properties.c.name,
name=met_prop_index_name)
migrate_engine.execute(DropConstraint(constraint))
metadef_namespaces_name = (
inspector.get_unique_constraints(
'metadef_namespaces')[0]['name'])
constraint = UniqueConstraint(
metadef_namespaces.c.namespace,
name=metadef_namespaces_name)
migrate_engine.execute(DropConstraint(constraint))
metadef_resource_types_name = (inspector.get_unique_constraints(
'metadef_resource_types')[0]['name'])
constraint = UniqueConstraint(
metadef_resource_types.c.name,
name=metadef_resource_types_name)
migrate_engine.execute(DropConstraint(constraint))
constraint = UniqueConstraint(
metadef_tags.c.namespace_id,
metadef_tags.c.name,
name='metadef_tags_namespace_id_name_key')
migrate_engine.execute(DropConstraint(constraint))
Index('ix_metadef_ns_res_types_namespace_id',
metadef_ns_res_types.c.namespace_id).create()
Index('ix_metadef_namespaces_namespace',
metadef_namespaces.c.namespace).create()
Index('ix_metadef_namespaces_owner', metadef_namespaces.c.owner).create()
Index('ix_metadef_objects_name', metadef_objects.c.name).create()
Index('ix_metadef_objects_namespace_id',
metadef_objects.c.namespace_id).create()
Index('ix_metadef_properties_name', metadef_properties.c.name).create()
Index('ix_metadef_properties_namespace_id',
metadef_properties.c.namespace_id).create()
if meta.bind.name == "ibm_db_sa":
# For db2, add these constraints back. It should not affect the
# origional logic for other database backends.
for (constraint_name, cols) in constraints:
_change_db2_unique_constraint('create', constraint_name, *cols)

View File

@ -1,24 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sqlalchemy
from sqlalchemy import (Table, Index)
def upgrade(migrate_engine):
if migrate_engine.name == 'mysql':
meta = sqlalchemy.MetaData()
meta.bind = migrate_engine
metadef_tags = Table('metadef_tags', meta, autoload=True)
Index('namespace_id', metadef_tags.c.namespace_id,
metadef_tags.c.name).drop()

View File

@ -1,212 +0,0 @@
# Copyright (c) 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy.schema import (Column, ForeignKey, Index, MetaData, Table)
from glance.db.sqlalchemy.schema import (
BigInteger, Boolean, DateTime, Integer, Numeric, String, Text,
create_tables) # noqa
def define_artifacts_table(meta):
artifacts = Table('artifacts',
meta,
Column('id', String(36), primary_key=True,
nullable=False),
Column('name', String(255), nullable=False),
Column('type_name', String(255), nullable=False),
Column('type_version_prefix', BigInteger(),
nullable=False),
Column('type_version_suffix', String(255)),
Column('type_version_meta', String(255)),
Column('version_prefix', BigInteger(), nullable=False),
Column('version_suffix', String(255)),
Column('version_meta', String(255)),
Column('description', Text()),
Column('visibility', String(32), nullable=False),
Column('state', String(32), nullable=False),
Column('owner', String(255), nullable=False),
Column('created_at', DateTime(), nullable=False),
Column('updated_at', DateTime(),
nullable=False),
Column('deleted_at', DateTime()),
Column('published_at', DateTime()),
mysql_engine='InnoDB',
mysql_charset='utf8',
extend_existing=True)
Index('ix_artifact_name_and_version', artifacts.c.name,
artifacts.c.version_prefix, artifacts.c.version_suffix)
Index('ix_artifact_type', artifacts.c.type_name,
artifacts.c.type_version_prefix, artifacts.c.type_version_suffix)
Index('ix_artifact_state', artifacts.c.state)
Index('ix_artifact_owner', artifacts.c.owner)
Index('ix_artifact_visibility', artifacts.c.visibility)
return artifacts
def define_artifact_tags_table(meta):
artifact_tags = Table('artifact_tags',
meta,
Column('id', String(36), primary_key=True,
nullable=False),
Column('artifact_id', String(36),
ForeignKey('artifacts.id'), nullable=False),
Column('value', String(255), nullable=False),
Column('created_at', DateTime(), nullable=False),
Column('updated_at', DateTime(),
nullable=False),
mysql_engine='InnoDB',
mysql_charset='utf8',
extend_existing=True)
Index('ix_artifact_tags_artifact_id', artifact_tags.c.artifact_id)
Index('ix_artifact_tags_artifact_id_tag_value',
artifact_tags.c.artifact_id, artifact_tags.c.value)
return artifact_tags
def define_artifact_dependencies_table(meta):
artifact_dependencies = Table('artifact_dependencies',
meta,
Column('id', String(36), primary_key=True,
nullable=False),
Column('artifact_source', String(36),
ForeignKey('artifacts.id'),
nullable=False),
Column('artifact_dest', String(36),
ForeignKey('artifacts.id'),
nullable=False),
Column('artifact_origin', String(36),
ForeignKey('artifacts.id'),
nullable=False),
Column('is_direct', Boolean(),
nullable=False),
Column('position', Integer()),
Column('name', String(36)),
Column('created_at', DateTime(),
nullable=False),
Column('updated_at', DateTime(),
nullable=False),
mysql_engine='InnoDB',
mysql_charset='utf8',
extend_existing=True)
Index('ix_artifact_dependencies_source_id',
artifact_dependencies.c.artifact_source)
Index('ix_artifact_dependencies_dest_id',
artifact_dependencies.c.artifact_dest),
Index('ix_artifact_dependencies_origin_id',
artifact_dependencies.c.artifact_origin)
Index('ix_artifact_dependencies_direct_dependencies',
artifact_dependencies.c.artifact_source,
artifact_dependencies.c.is_direct)
return artifact_dependencies
def define_artifact_blobs_table(meta):
artifact_blobs = Table('artifact_blobs',
meta,
Column('id', String(36), primary_key=True,
nullable=False),
Column('artifact_id', String(36),
ForeignKey('artifacts.id'),
nullable=False),
Column('size', BigInteger(), nullable=False),
Column('checksum', String(32)),
Column('name', String(255), nullable=False),
Column('item_key', String(329)),
Column('position', Integer()),
Column('created_at', DateTime(), nullable=False),
Column('updated_at', DateTime(),
nullable=False),
mysql_engine='InnoDB',
mysql_charset='utf8',
extend_existing=True)
Index('ix_artifact_blobs_artifact_id',
artifact_blobs.c.artifact_id)
Index('ix_artifact_blobs_name',
artifact_blobs.c.name)
return artifact_blobs
def define_artifact_properties_table(meta):
artifact_properties = Table('artifact_properties',
meta,
Column('id', String(36),
primary_key=True,
nullable=False),
Column('artifact_id', String(36),
ForeignKey('artifacts.id'),
nullable=False),
Column('name', String(255),
nullable=False),
Column('string_value', String(255)),
Column('int_value', Integer()),
Column('numeric_value', Numeric()),
Column('bool_value', Boolean()),
Column('text_value', Text()),
Column('created_at', DateTime(),
nullable=False),
Column('updated_at', DateTime(),
nullable=False),
Column('position', Integer()),
mysql_engine='InnoDB',
mysql_charset='utf8',
extend_existing=True)
Index('ix_artifact_properties_artifact_id',
artifact_properties.c.artifact_id)
Index('ix_artifact_properties_name', artifact_properties.c.name)
return artifact_properties
def define_artifact_blob_locations_table(meta):
artifact_blob_locations = Table('artifact_blob_locations',
meta,
Column('id', String(36),
primary_key=True,
nullable=False),
Column('blob_id', String(36),
ForeignKey('artifact_blobs.id'),
nullable=False),
Column('value', Text(), nullable=False),
Column('created_at', DateTime(),
nullable=False),
Column('updated_at', DateTime(),
nullable=False),
Column('position', Integer()),
Column('status', String(36),
nullable=True),
mysql_engine='InnoDB',
mysql_charset='utf8',
extend_existing=True)
Index('ix_artifact_blob_locations_blob_id',
artifact_blob_locations.c.blob_id)
return artifact_blob_locations
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
tables = [define_artifacts_table(meta),
define_artifact_tags_table(meta),
define_artifact_properties_table(meta),
define_artifact_blobs_table(meta),
define_artifact_blob_locations_table(meta),
define_artifact_dependencies_table(meta)]
create_tables(tables)

View File

@ -1,442 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import migrate
import sqlalchemy
from sqlalchemy import (func, Index, inspect, orm, String, Table, type_coerce)
# The _upgrade...get_duplicate() def's are separate functions to
# accommodate sqlite which locks the database against updates as long as
# db_recs is active.
# In addition, sqlite doesn't support the function 'concat' between
# Strings and Integers, so, the updating of records is also adjusted.
def _upgrade_metadef_namespaces_get_duplicates(migrate_engine):
meta = sqlalchemy.schema.MetaData(migrate_engine)
metadef_namespaces = Table('metadef_namespaces', meta, autoload=True)
session = orm.sessionmaker(bind=migrate_engine)()
db_recs = (session.query(func.min(metadef_namespaces.c.id),
metadef_namespaces.c.namespace)
.group_by(metadef_namespaces.c.namespace)
.having(func.count(metadef_namespaces.c.namespace) > 1))
dbrecs = []
for row in db_recs:
dbrecs.append({'id': row[0], 'namespace': row[1]})
session.close()
return dbrecs
def _upgrade_metadef_objects_get_duplicates(migrate_engine):
meta = sqlalchemy.schema.MetaData(migrate_engine)
metadef_objects = Table('metadef_objects', meta, autoload=True)
session = orm.sessionmaker(bind=migrate_engine)()
db_recs = (session.query(func.min(metadef_objects.c.id),
metadef_objects.c.namespace_id,
metadef_objects.c.name)
.group_by(metadef_objects.c.namespace_id,
metadef_objects.c.name)
.having(func.count() > 1))
dbrecs = []
for row in db_recs:
dbrecs.append({'id': row[0], 'namespace_id': row[1], 'name': row[2]})
session.close()
return dbrecs
def _upgrade_metadef_properties_get_duplicates(migrate_engine):
meta = sqlalchemy.schema.MetaData(migrate_engine)
metadef_properties = Table('metadef_properties', meta, autoload=True)
session = orm.sessionmaker(bind=migrate_engine)()
db_recs = (session.query(func.min(metadef_properties.c.id),
metadef_properties.c.namespace_id,
metadef_properties.c.name)
.group_by(metadef_properties.c.namespace_id,
metadef_properties.c.name)
.having(func.count() > 1))
dbrecs = []
for row in db_recs:
dbrecs.append({'id': row[0], 'namespace_id': row[1], 'name': row[2]})
session.close()
return dbrecs
def _upgrade_metadef_tags_get_duplicates(migrate_engine):
meta = sqlalchemy.schema.MetaData(migrate_engine)
metadef_tags = Table('metadef_tags', meta, autoload=True)
session = orm.sessionmaker(bind=migrate_engine)()
db_recs = (session.query(func.min(metadef_tags.c.id),
metadef_tags.c.namespace_id,
metadef_tags.c.name)
.group_by(metadef_tags.c.namespace_id,
metadef_tags.c.name)
.having(func.count() > 1))
dbrecs = []
for row in db_recs:
dbrecs.append({'id': row[0], 'namespace_id': row[1], 'name': row[2]})
session.close()
return dbrecs
def _upgrade_metadef_resource_types_get_duplicates(migrate_engine):
meta = sqlalchemy.schema.MetaData(migrate_engine)
metadef_resource_types = Table('metadef_resource_types', meta,
autoload=True)
session = orm.sessionmaker(bind=migrate_engine)()
db_recs = (session.query(func.min(metadef_resource_types.c.id),
metadef_resource_types.c.name)
.group_by(metadef_resource_types.c.name)
.having(func.count(metadef_resource_types.c.name) > 1))
dbrecs = []
for row in db_recs:
dbrecs.append({'id': row[0], 'name': row[1]})
session.close()
return dbrecs
def _upgrade_data(migrate_engine):
# Rename duplicates to be unique.
meta = sqlalchemy.schema.MetaData(migrate_engine)
# ORM tables
metadef_namespaces = Table('metadef_namespaces', meta, autoload=True)
metadef_objects = Table('metadef_objects', meta, autoload=True)
metadef_properties = Table('metadef_properties', meta, autoload=True)
metadef_tags = Table('metadef_tags', meta, autoload=True)
metadef_resource_types = Table('metadef_resource_types', meta,
autoload=True)
# Fix duplicate metadef_namespaces
# Update the non-first record(s) with an unique namespace value
dbrecs = _upgrade_metadef_namespaces_get_duplicates(migrate_engine)
for row in dbrecs:
s = (metadef_namespaces.update()
.where(metadef_namespaces.c.id > row['id'])
.where(metadef_namespaces.c.namespace == row['namespace'])
)
if migrate_engine.name == 'sqlite':
s = (s.values(namespace=(row['namespace'] + '-DUPL-' +
type_coerce(metadef_namespaces.c.id,
String)),
display_name=(row['namespace'] + '-DUPL-' +
type_coerce(metadef_namespaces.c.id,
String))))
else:
s = s.values(namespace=func.concat(row['namespace'],
'-DUPL-',
metadef_namespaces.c.id),
display_name=func.concat(row['namespace'],
'-DUPL-',
metadef_namespaces.c.id))
s.execute()
# Fix duplicate metadef_objects
dbrecs = _upgrade_metadef_objects_get_duplicates(migrate_engine)
for row in dbrecs:
s = (metadef_objects.update()
.where(metadef_objects.c.id > row['id'])
.where(metadef_objects.c.namespace_id == row['namespace_id'])
.where(metadef_objects.c.name == str(row['name']))
)
if migrate_engine.name == 'sqlite':
s = (s.values(name=(row['name'] + '-DUPL-'
+ type_coerce(metadef_objects.c.id, String))))
else:
s = s.values(name=func.concat(row['name'], '-DUPL-',
metadef_objects.c.id))
s.execute()
# Fix duplicate metadef_properties
dbrecs = _upgrade_metadef_properties_get_duplicates(migrate_engine)
for row in dbrecs:
s = (metadef_properties.update()
.where(metadef_properties.c.id > row['id'])
.where(metadef_properties.c.namespace_id == row['namespace_id'])
.where(metadef_properties.c.name == str(row['name']))
)
if migrate_engine.name == 'sqlite':
s = (s.values(name=(row['name'] + '-DUPL-' +
type_coerce(metadef_properties.c.id, String)))
)
else:
s = s.values(name=func.concat(row['name'], '-DUPL-',
metadef_properties.c.id))
s.execute()
# Fix duplicate metadef_tags
dbrecs = _upgrade_metadef_tags_get_duplicates(migrate_engine)
for row in dbrecs:
s = (metadef_tags.update()
.where(metadef_tags.c.id > row['id'])
.where(metadef_tags.c.namespace_id == row['namespace_id'])
.where(metadef_tags.c.name == str(row['name']))
)
if migrate_engine.name == 'sqlite':
s = (s.values(name=(row['name'] + '-DUPL-' +
type_coerce(metadef_tags.c.id, String)))
)
else:
s = s.values(name=func.concat(row['name'], '-DUPL-',
metadef_tags.c.id))
s.execute()
# Fix duplicate metadef_resource_types
dbrecs = _upgrade_metadef_resource_types_get_duplicates(migrate_engine)
for row in dbrecs:
s = (metadef_resource_types.update()
.where(metadef_resource_types.c.id > row['id'])
.where(metadef_resource_types.c.name == str(row['name']))
)
if migrate_engine.name == 'sqlite':
s = (s.values(name=(row['name'] + '-DUPL-' +
type_coerce(metadef_resource_types.c.id,
String)))
)
else:
s = s.values(name=func.concat(row['name'], '-DUPL-',
metadef_resource_types.c.id))
s.execute()
def _update_sqlite_namespace_id_name_constraint(metadef, metadef_namespaces,
new_constraint_name,
new_fk_name):
migrate.UniqueConstraint(
metadef.c.namespace_id, metadef.c.name).drop()
migrate.UniqueConstraint(
metadef.c.namespace_id, metadef.c.name,
name=new_constraint_name).create()
migrate.ForeignKeyConstraint(
[metadef.c.namespace_id],
[metadef_namespaces.c.id],
name=new_fk_name).create()
def _drop_unique_constraint_if_exists(inspector, table_name, metadef):
name = _get_unique_constraint_name(inspector,
table_name,
['namespace_id', 'name'])
if name:
migrate.UniqueConstraint(metadef.c.namespace_id,
metadef.c.name,
name=name).drop()
def _drop_index_with_fk_constraint(metadef, metadef_namespaces,
index_name,
fk_old_name, fk_new_name):
fkc = migrate.ForeignKeyConstraint([metadef.c.namespace_id],
[metadef_namespaces.c.id],
name=fk_old_name)
fkc.drop()
if index_name:
Index(index_name, metadef.c.namespace_id).drop()
# Rename the fk for consistency across all db's
fkc = migrate.ForeignKeyConstraint([metadef.c.namespace_id],
[metadef_namespaces.c.id],
name=fk_new_name)
fkc.create()
def _get_unique_constraint_name(inspector, table_name, columns):
constraints = inspector.get_unique_constraints(table_name)
for constraint in constraints:
if set(constraint['column_names']) == set(columns):
return constraint['name']
return None
def _get_fk_constraint_name(inspector, table_name, columns):
constraints = inspector.get_foreign_keys(table_name)
for constraint in constraints:
if set(constraint['constrained_columns']) == set(columns):
return constraint['name']
return None
def upgrade(migrate_engine):
_upgrade_data(migrate_engine)
meta = sqlalchemy.MetaData()
meta.bind = migrate_engine
inspector = inspect(migrate_engine)
# ORM tables
metadef_namespaces = Table('metadef_namespaces', meta, autoload=True)
metadef_objects = Table('metadef_objects', meta, autoload=True)
metadef_properties = Table('metadef_properties', meta, autoload=True)
metadef_tags = Table('metadef_tags', meta, autoload=True)
metadef_ns_res_types = Table('metadef_namespace_resource_types',
meta, autoload=True)
metadef_resource_types = Table('metadef_resource_types', meta,
autoload=True)
# Drop the bad, non-unique indices.
if migrate_engine.name == 'sqlite':
# For sqlite:
# Only after the unique constraints have been added should the indices
# be dropped. If done the other way, sqlite complains during
# constraint adding/dropping that the index does/does not exist.
# Note: The _get_unique_constraint_name, _get_fk_constraint_name
# return None for constraints that do in fact exist. Also,
# get_index_names returns names, but, the names can not be used with
# the Index(name, blah).drop() command, so, putting sqlite into
# it's own section.
# Objects
_update_sqlite_namespace_id_name_constraint(
metadef_objects, metadef_namespaces,
'uq_metadef_objects_namespace_id_name',
'metadef_objects_fk_1')
# Properties
_update_sqlite_namespace_id_name_constraint(
metadef_properties, metadef_namespaces,
'uq_metadef_properties_namespace_id_name',
'metadef_properties_fk_1')
# Tags
_update_sqlite_namespace_id_name_constraint(
metadef_tags, metadef_namespaces,
'uq_metadef_tags_namespace_id_name',
'metadef_tags_fk_1')
# Namespaces
migrate.UniqueConstraint(
metadef_namespaces.c.namespace).drop()
migrate.UniqueConstraint(
metadef_namespaces.c.namespace,
name='uq_metadef_namespaces_namespace').create()
# ResourceTypes
migrate.UniqueConstraint(
metadef_resource_types.c.name).drop()
migrate.UniqueConstraint(
metadef_resource_types.c.name,
name='uq_metadef_resource_types_name').create()
# Now drop the bad indices
Index('ix_metadef_objects_namespace_id',
metadef_objects.c.namespace_id,
metadef_objects.c.name).drop()
Index('ix_metadef_properties_namespace_id',
metadef_properties.c.namespace_id,
metadef_properties.c.name).drop()
Index('ix_metadef_tags_namespace_id',
metadef_tags.c.namespace_id,
metadef_tags.c.name).drop()
else:
# First drop the bad non-unique indices.
# To do that (for mysql), must first drop foreign key constraints
# BY NAME and then drop the bad indices.
# Finally, re-create the foreign key constraints with a consistent
# name.
# DB2 still has unique constraints, but, they are badly named.
# Drop them, they will be recreated at the final step.
name = _get_unique_constraint_name(inspector, 'metadef_namespaces',
['namespace'])
if name:
migrate.UniqueConstraint(metadef_namespaces.c.namespace,
name=name).drop()
_drop_unique_constraint_if_exists(inspector, 'metadef_objects',
metadef_objects)
_drop_unique_constraint_if_exists(inspector, 'metadef_properties',
metadef_properties)
_drop_unique_constraint_if_exists(inspector, 'metadef_tags',
metadef_tags)
name = _get_unique_constraint_name(inspector, 'metadef_resource_types',
['name'])
if name:
migrate.UniqueConstraint(metadef_resource_types.c.name,
name=name).drop()
# Objects
_drop_index_with_fk_constraint(
metadef_objects, metadef_namespaces,
'ix_metadef_objects_namespace_id',
_get_fk_constraint_name(
inspector, 'metadef_objects', ['namespace_id']),
'metadef_objects_fk_1')
# Properties
_drop_index_with_fk_constraint(
metadef_properties, metadef_namespaces,
'ix_metadef_properties_namespace_id',
_get_fk_constraint_name(
inspector, 'metadef_properties', ['namespace_id']),
'metadef_properties_fk_1')
# Tags
_drop_index_with_fk_constraint(
metadef_tags, metadef_namespaces,
'ix_metadef_tags_namespace_id',
_get_fk_constraint_name(
inspector, 'metadef_tags', ['namespace_id']),
'metadef_tags_fk_1')
# Drop Others without fk constraints.
Index('ix_metadef_namespaces_namespace',
metadef_namespaces.c.namespace).drop()
# The next two don't exist in ibm_db_sa, but, drop them everywhere else.
if migrate_engine.name != 'ibm_db_sa':
Index('ix_metadef_resource_types_name',
metadef_resource_types.c.name).drop()
# Not needed due to primary key on same columns
Index('ix_metadef_ns_res_types_res_type_id_ns_id',
metadef_ns_res_types.c.resource_type_id,
metadef_ns_res_types.c.namespace_id).drop()
# Now, add back the dropped indexes as unique constraints
if migrate_engine.name != 'sqlite':
# Namespaces
migrate.UniqueConstraint(
metadef_namespaces.c.namespace,
name='uq_metadef_namespaces_namespace').create()
# Objects
migrate.UniqueConstraint(
metadef_objects.c.namespace_id,
metadef_objects.c.name,
name='uq_metadef_objects_namespace_id_name').create()
# Properties
migrate.UniqueConstraint(
metadef_properties.c.namespace_id,
metadef_properties.c.name,
name='uq_metadef_properties_namespace_id_name').create()
# Tags
migrate.UniqueConstraint(
metadef_tags.c.namespace_id,
metadef_tags.c.name,
name='uq_metadef_tags_namespace_id_name').create()
# Resource Types
migrate.UniqueConstraint(
metadef_resource_types.c.name,
name='uq_metadef_resource_types_name').create()

View File

@ -1,29 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import MetaData, Table, Index
CREATED_AT_INDEX = 'created_at_image_idx'
UPDATED_AT_INDEX = 'updated_at_image_idx'
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
images = Table('images', meta, autoload=True)
created_index = Index(CREATED_AT_INDEX, images.c.created_at)
created_index.create(migrate_engine)
updated_index = Index(UPDATED_AT_INDEX, images.c.updated_at)
updated_index.create(migrate_engine)

View File

@ -1,26 +0,0 @@
# Copyright (c) 2016 Hewlett Packard Enterprise Software, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import MetaData, Table
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
resource_types_table = Table('metadef_resource_types', meta, autoload=True)
resource_types_table.update(values={'name': 'OS::Nova::Server'}).where(
resource_types_table.c.name == 'OS::Nova::Instance').execute()

View File

@ -1,51 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import Column, Enum, Index, MetaData, Table, select, not_, and_
from sqlalchemy.engine import reflection
def upgrade(migrate_engine):
meta = MetaData(bind=migrate_engine)
images = Table('images', meta, autoload=True)
enum = Enum('private', 'public', 'shared', 'community', metadata=meta,
name='image_visibility')
enum.create()
images.create_column(Column('visibility', enum, nullable=False,
server_default='shared'))
visibility_index = Index('visibility_image_idx', images.c.visibility)
visibility_index.create(migrate_engine)
images.update(values={'visibility': 'public'}).where(
images.c.is_public).execute()
image_members = Table('image_members', meta, autoload=True)
# NOTE(dharinic): Mark all the non-public images as 'private' first
images.update().values(visibility='private').where(
not_(images.c.is_public)).execute()
# NOTE(dharinic): Identify 'shared' images from the above
images.update().values(visibility='shared').where(and_(
images.c.visibility == 'private', images.c.id.in_(select(
[image_members.c.image_id]).distinct().where(
not_(image_members.c.deleted))))).execute()
insp = reflection.Inspector.from_engine(migrate_engine)
for index in insp.get_indexes('images'):
if 'ix_images_is_public' == index['name']:
Index('ix_images_is_public', images.c.is_public).drop()
break
images.c.is_public.drop()

View File

@ -1,162 +0,0 @@
CREATE TEMPORARY TABLE images_backup (
id VARCHAR(36) NOT NULL,
name VARCHAR(255),
size INTEGER,
status VARCHAR(30) NOT NULL,
is_public BOOLEAN NOT NULL,
created_at DATETIME NOT NULL,
updated_at DATETIME,
deleted_at DATETIME,
deleted BOOLEAN NOT NULL,
disk_format VARCHAR(20),
container_format VARCHAR(20),
checksum VARCHAR(32),
owner VARCHAR(255),
min_disk INTEGER NOT NULL,
min_ram INTEGER NOT NULL,
protected BOOLEAN DEFAULT 0 NOT NULL,
virtual_size INTEGER,
PRIMARY KEY (id),
CHECK (is_public IN (0, 1)),
CHECK (deleted IN (0, 1)),
CHECK (protected IN (0, 1))
);
INSERT INTO images_backup
SELECT id,
name,
size,
status,
is_public,
created_at,
updated_at,
deleted_at,
deleted,
disk_format,
container_format,
checksum,
owner,
min_disk,
min_ram,
protected,
virtual_size
FROM images;
DROP TABLE images;
CREATE TABLE images (
id VARCHAR(36) NOT NULL,
name VARCHAR(255),
size INTEGER,
status VARCHAR(30) NOT NULL,
created_at DATETIME NOT NULL,
updated_at DATETIME,
deleted_at DATETIME,
deleted BOOLEAN NOT NULL,
disk_format VARCHAR(20),
container_format VARCHAR(20),
checksum VARCHAR(32),
owner VARCHAR(255),
min_disk INTEGER NOT NULL,
min_ram INTEGER NOT NULL,
protected BOOLEAN DEFAULT 0 NOT NULL,
virtual_size INTEGER,
visibility VARCHAR(9) DEFAULT 'shared' NOT NULL,
PRIMARY KEY (id),
CHECK (deleted IN (0, 1)),
CHECK (protected IN (0, 1)),
CONSTRAINT image_visibility CHECK (visibility IN ('private', 'public', 'shared', 'community'))
);
CREATE INDEX checksum_image_idx ON images (checksum);
CREATE INDEX visibility_image_idx ON images (visibility);
CREATE INDEX ix_images_deleted ON images (deleted);
CREATE INDEX owner_image_idx ON images (owner);
CREATE INDEX created_at_image_idx ON images (created_at);
CREATE INDEX updated_at_image_idx ON images (updated_at);
-- Copy over all the 'public' rows
INSERT INTO images (
id,
name,
size,
status,
created_at,
updated_at,
deleted_at,
deleted,
disk_format,
container_format,
checksum,
owner,
min_disk,
min_ram,
protected,
virtual_size
)
SELECT id,
name,
size,
status,
created_at,
updated_at,
deleted_at,
deleted,
disk_format,
container_format,
checksum,
owner,
min_disk,
min_ram,
protected,
virtual_size
FROM images_backup
WHERE is_public=1;
UPDATE images SET visibility='public';
-- Now copy over the 'private' rows
INSERT INTO images (
id,
name,
size,
status,
created_at,
updated_at,
deleted_at,
deleted,
disk_format,
container_format,
checksum,
owner,
min_disk,
min_ram,
protected,
virtual_size
)
SELECT id,
name,
size,
status,
created_at,
updated_at,
deleted_at,
deleted,
disk_format,
container_format,
checksum,
owner,
min_disk,
min_ram,
protected,
virtual_size
FROM images_backup
WHERE is_public=0;
UPDATE images SET visibility='private' WHERE visibility='shared';
UPDATE images SET visibility='shared' WHERE visibility='private' AND id IN (SELECT DISTINCT image_id FROM image_members WHERE deleted != 1);
DROP TABLE images_backup;

View File

@ -20,8 +20,6 @@ Various conveniences used for migration scripts
from oslo_log import log as logging
import sqlalchemy.types
from glance.i18n import _LI
LOG = logging.getLogger(__name__)
@ -60,56 +58,3 @@ def PickleType():
def Numeric():
return sqlalchemy.types.Numeric()
def from_migration_import(module_name, fromlist):
"""
Import a migration file and return the module
:param module_name: name of migration module to import from
(ex: 001_add_images_table)
:param fromlist: list of items to import (ex: define_images_table)
:returns: module object
This bit of ugliness warrants an explanation:
As you're writing migrations, you'll frequently want to refer to
tables defined in previous migrations.
In the interest of not repeating yourself, you need a way of importing
that table into a 'future' migration.
However, tables are bound to metadata, so what you need to import is
really a table factory, which you can late-bind to your current
metadata object.
Moreover, migrations begin with a number (001...), which means they
aren't valid Python identifiers. This means we can't perform a
'normal' import on them (the Python lexer will 'splode). Instead, we
need to use __import__ magic to bring the table-factory into our
namespace.
Example Usage:
(define_images_table,) = from_migration_import(
'001_add_images_table', ['define_images_table'])
images = define_images_table(meta)
# Refer to images table
"""
module_path = 'glance.db.sqlalchemy.migrate_repo.versions.%s' % module_name
module = __import__(module_path, globals(), locals(), fromlist, 0)
return [getattr(module, item) for item in fromlist]
def create_tables(tables):
for table in tables:
LOG.info(_LI("creating table %(table)s"), {'table': table})
table.create()
def drop_tables(tables):
for table in tables:
LOG.info(_LI("dropping table %(table)s"), {'table': table})
table.drop()