From 8e825c4024f9feb18ca5e2416484ef4096c3bb06 Mon Sep 17 00:00:00 2001 From: Hengqing Hu Date: Wed, 8 Feb 2012 00:02:54 +0800 Subject: [PATCH] Make database downgrade works Fixes bug #854905 Use sqlalchemy reflection whenever possible: http://sqlalchemy-migrate.readthedocs.org/en/latest/versioning.html Work around sqlalchemy-migrate sqlite 'bool column not deletable' issue: http://code.google.com/p/sqlalchemy-migrate/issues/detail?id=143 Add following sql scripts for sqlite: 002_sqlite_downgrade.sql 015_sqlite_downgrade.sql 033_sqlite_downgrade.sql 050_sqlite_downgrade.sql 068_sqlite_downgrade.sql Work around sqlalchemy-migrate sqlite 'table with foreign key column not deletable' issue: http://code.google.com/p/sqlalchemy-migrate/issues/detail?id=94 Add following sql scripts for sqlite: 003_sqlite_downgrade.sql 006_sqlite_downgrade.sql 007_sqlite_downgrade.sql 012_sqlite_upgrade.sql 013_sqlite_downgrade.sql 020_sqlite_downgrade.sql 030_sqlite_downgrade.sql 038_sqlite_downgrade.sql 042_sqlite_downgrade.sql 053_sqlite_downgrade.sql 067_sqlite_downgrade.sql Work around sqlalchemy-migrate 'migrate drops engine reference' issue: http://code.google.com/p/sqlalchemy-migrate/issues/detail?id=72 Add following sql scripts for long primary key to work with utf-8 mysql table: 072_mysql_upgrade.sql Add following sql scripts for postgresql: 002_postgresql_downgrade.sql Add snake walk test cases for database migration based on glance migration test. Change-Id: Ib454ecb4662bbf47736c1b12d9a4f969f180ceb6 --- .../migrate_repo/versions/001_austin.py | 1059 +++++++++-------- .../migrate_repo/versions/002_bexar.py | 364 +++--- .../versions/002_postgresql_downgrade.sql | 20 + .../versions/002_sqlite_downgrade.sql | 388 ++++++ .../versions/003_add_label_to_networks.py | 38 +- .../versions/003_sqlite_downgrade.sql | 111 ++ .../versions/004_add_zone_tables.py | 63 +- .../versions/005_add_instance_metadata.py | 85 +- .../006_add_provider_data_to_volumes.py | 62 +- .../versions/006_sqlite_downgrade.sql | 113 ++ .../versions/007_add_ipv6_to_fixed_ips.py | 101 +- .../versions/007_sqlite_downgrade.sql | 79 ++ .../versions/008_add_instance_types.py | 50 +- .../versions/009_add_instance_migrations.py | 62 +- .../versions/010_add_os_type_to_instances.py | 23 +- .../versions/011_live_migration.py | 90 +- .../versions/012_add_ipv6_flatmanager.py | 174 +-- .../versions/012_sqlite_upgrade.sql | 195 +++ .../versions/013_add_flavors_to_migrations.py | 31 +- .../versions/013_sqlite_downgrade.sql | 69 ++ .../014_add_instance_type_id_to_instances.py | 49 +- .../015_add_auto_assign_to_floating_ips.py | 20 +- .../versions/015_sqlite_downgrade.sql | 62 + .../versions/016_make_quotas_key_and_value.py | 24 +- .../017_make_instance_type_id_an_integer.py | 11 +- .../018_rename_server_management_url.py | 14 +- .../019_add_volume_snapshot_support.py | 71 +- .../020_add_snapshot_id_to_volumes.py | 31 +- .../versions/020_sqlite_downgrade.sql | 119 ++ .../versions/021_rename_image_ids.py | 12 +- .../versions/022_set_engine_mysql_innodb.py | 5 +- .../versions/023_add_vm_mode_to_instances.py | 21 +- .../versions/024_add_block_device_mapping.py | 102 +- .../versions/025_add_uuid_to_instances.py | 18 +- .../versions/026_add_agent_table.py | 93 +- .../027_add_provider_firewall_rules.py | 70 +- .../028_add_instance_type_extra_specs.py | 64 +- .../versions/029_add_zone_weight_offsets.py | 25 +- .../migrate_repo/versions/030_multi_nic.py | 102 +- .../versions/030_sqlite_downgrade.sql | 377 ++++++ .../031_fk_fixed_ips_virtual_interface_id.py | 11 +- .../versions/032_add_root_device_name.py | 31 +- .../migrate_repo/versions/033_ha_network.py | 14 +- .../versions/033_sqlite_downgrade.sql | 193 +++ .../034_change_instance_id_in_migrations.py | 20 +- .../versions/035_secondary_dns.py | 11 +- .../036_change_flavor_id_in_migrations.py | 41 +- .../versions/037_instances_drop_admin_pass.py | 29 +- .../038_add_uuid_to_virtual_interfaces.py | 19 +- .../versions/038_sqlite_downgrade.sql | 63 + .../versions/039_add_instances_accessip.py | 37 +- .../versions/040_add_uuid_to_networks.py | 18 +- .../041_add_config_drive_to_instances.py | 20 +- .../042_add_volume_types_and_extradata.py | 151 +-- .../versions/042_sqlite_downgrade.sql | 129 ++ .../migrate_repo/versions/043_add_vsa_data.py | 76 +- .../versions/044_update_instance_states.py | 23 +- .../versions/045_add_network_priority.py | 21 +- .../versions/046_add_instance_swap.py | 37 +- .../047_remove_instances_fk_from_vif.py | 5 +- .../versions/047_sqlite_downgrade.sql | 1 - .../versions/048_add_zone_name.py | 16 +- .../versions/049_add_instances_progress.py | 20 +- .../050_add_disk_config_to_instances.py | 26 +- .../versions/050_sqlite_downgrade.sql | 207 ++++ .../051_add_vcpu_weight_to_instance_types.py | 18 +- .../versions/052_kill_export_devices.py | 50 +- ...connection_info_to_block_device_mapping.py | 15 +- .../versions/053_sqlite_downgrade.sql | 87 ++ .../versions/054_add_bw_usage_data_cache.py | 40 +- .../versions/055_convert_flavor_id_to_str.py | 21 +- .../versions/056_add_s3_images.py | 44 +- .../versions/057_add_sm_driver_tables.py | 146 +-- .../versions/058_rename_managed_disk.py | 11 +- .../059_split_rxtx_quota_into_network.py | 25 +- .../versions/059_sqlite_downgrade.sql | 137 +-- .../060_remove_network_fk_from_vif.py | 3 +- .../versions/060_sqlite_downgrade.sql | 1 - .../061_add_index_to_instance_uuid.py | 4 +- .../062_add_instance_info_cache_table.py | 50 +- .../versions/063_add_instance_faults_table.py | 53 +- ...instance_id_to_uuid_in_instance_actions.py | 19 +- .../065_add_index_to_instance_project_id.py | 4 +- .../066_preload_instance_info_cache_table.py | 6 +- ...7_add_pool_and_interface_to_floating_ip.py | 15 +- .../versions/067_sqlite_downgrade.sql | 69 ++ .../versions/068_add_instance_attribute.py | 20 +- .../versions/068_sqlite_downgrade.sql | 219 ++++ .../versions/069_block_migration.py | 31 +- .../versions/070_sqlite_downgrade.sql | 2 +- .../versions/070_untie_nova_network_models.py | 3 +- .../versions/071_add_host_aggregate_tables.py | 132 +- .../versions/072_add_dns_table.py | 64 +- .../versions/072_mysql_upgrade.sql | 13 + .../migrate_repo/versions/073_add_capacity.py | 26 +- .../versions/074_change_flavor_local_gb.py | 22 +- ...75_convert_bw_usage_to_store_network_id.py | 103 +- .../versions/076_remove_unique_constraints.py | 4 +- .../versions/077_convert_to_utf8.py | 3 +- .../versions/078_add_rpc_info_to_zones.py | 34 +- .../versions/078_sqlite_downgrade.sql | 2 +- .../079_add_zone_name_to_instances.py | 9 +- nova/tests/test_migrations.conf | 9 + nova/tests/test_migrations.py | 224 ++++ run_tests.sh | 13 + 105 files changed, 5139 insertions(+), 2298 deletions(-) create mode 100644 nova/db/sqlalchemy/migrate_repo/versions/002_postgresql_downgrade.sql create mode 100644 nova/db/sqlalchemy/migrate_repo/versions/002_sqlite_downgrade.sql create mode 100644 nova/db/sqlalchemy/migrate_repo/versions/003_sqlite_downgrade.sql create mode 100644 nova/db/sqlalchemy/migrate_repo/versions/006_sqlite_downgrade.sql create mode 100644 nova/db/sqlalchemy/migrate_repo/versions/007_sqlite_downgrade.sql create mode 100644 nova/db/sqlalchemy/migrate_repo/versions/012_sqlite_upgrade.sql create mode 100644 nova/db/sqlalchemy/migrate_repo/versions/013_sqlite_downgrade.sql create mode 100644 nova/db/sqlalchemy/migrate_repo/versions/015_sqlite_downgrade.sql create mode 100644 nova/db/sqlalchemy/migrate_repo/versions/020_sqlite_downgrade.sql create mode 100644 nova/db/sqlalchemy/migrate_repo/versions/030_sqlite_downgrade.sql create mode 100644 nova/db/sqlalchemy/migrate_repo/versions/033_sqlite_downgrade.sql create mode 100644 nova/db/sqlalchemy/migrate_repo/versions/038_sqlite_downgrade.sql create mode 100644 nova/db/sqlalchemy/migrate_repo/versions/042_sqlite_downgrade.sql create mode 100644 nova/db/sqlalchemy/migrate_repo/versions/050_sqlite_downgrade.sql create mode 100644 nova/db/sqlalchemy/migrate_repo/versions/053_sqlite_downgrade.sql create mode 100644 nova/db/sqlalchemy/migrate_repo/versions/067_sqlite_downgrade.sql create mode 100644 nova/db/sqlalchemy/migrate_repo/versions/068_sqlite_downgrade.sql create mode 100644 nova/db/sqlalchemy/migrate_repo/versions/072_mysql_upgrade.sql create mode 100644 nova/tests/test_migrations.conf create mode 100644 nova/tests/test_migrations.py diff --git a/nova/db/sqlalchemy/migrate_repo/versions/001_austin.py b/nova/db/sqlalchemy/migrate_repo/versions/001_austin.py index 19b2f9678cee..9107cde91a7d 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/001_austin.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/001_austin.py @@ -22,497 +22,562 @@ from sqlalchemy import ForeignKeyConstraint, Integer, MetaData, String from sqlalchemy import Table, Text from nova import log as logging -meta = MetaData() LOG = logging.getLogger(__name__) -auth_tokens = Table('auth_tokens', meta, - Column('created_at', DateTime(timezone=False)), - Column('updated_at', DateTime(timezone=False)), - Column('deleted_at', DateTime(timezone=False)), - Column('deleted', Boolean(create_constraint=True, name=None)), - Column('token_hash', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False), - primary_key=True, - nullable=False), - Column('user_id', Integer()), - Column('server_manageent_url', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('storage_url', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('cdn_management_url', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - ) - - -export_devices = Table('export_devices', meta, - Column('created_at', DateTime(timezone=False)), - Column('updated_at', DateTime(timezone=False)), - Column('deleted_at', DateTime(timezone=False)), - Column('deleted', Boolean(create_constraint=True, name=None)), - Column('id', Integer(), primary_key=True, nullable=False), - Column('shelf_id', Integer()), - Column('blade_id', Integer()), - Column('volume_id', - Integer(), - ForeignKey('volumes.id'), - nullable=True), - ) - - -fixed_ips = Table('fixed_ips', meta, - Column('created_at', DateTime(timezone=False)), - Column('updated_at', DateTime(timezone=False)), - Column('deleted_at', DateTime(timezone=False)), - Column('deleted', Boolean(create_constraint=True, name=None)), - Column('id', Integer(), primary_key=True, nullable=False), - Column('address', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('network_id', - Integer(), - ForeignKey('networks.id'), - nullable=True), - Column('instance_id', - Integer(), - ForeignKey('instances.id'), - nullable=True), - Column('allocated', Boolean(create_constraint=True, name=None)), - Column('leased', Boolean(create_constraint=True, name=None)), - Column('reserved', Boolean(create_constraint=True, name=None)), - ) - - -floating_ips = Table('floating_ips', meta, - Column('created_at', DateTime(timezone=False)), - Column('updated_at', DateTime(timezone=False)), - Column('deleted_at', DateTime(timezone=False)), - Column('deleted', Boolean(create_constraint=True, name=None)), - Column('id', Integer(), primary_key=True, nullable=False), - Column('address', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('fixed_ip_id', - Integer(), - ForeignKey('fixed_ips.id'), - nullable=True), - Column('project_id', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('host', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - ) - - -instances = Table('instances', meta, - Column('created_at', DateTime(timezone=False)), - Column('updated_at', DateTime(timezone=False)), - Column('deleted_at', DateTime(timezone=False)), - Column('deleted', Boolean(create_constraint=True, name=None)), - Column('id', Integer(), primary_key=True, nullable=False), - Column('internal_id', Integer()), - Column('admin_pass', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('user_id', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('project_id', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('image_id', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('kernel_id', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('ramdisk_id', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('server_name', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('launch_index', Integer()), - Column('key_name', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('key_data', - Text(length=None, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('state', Integer()), - Column('state_description', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('memory_mb', Integer()), - Column('vcpus', Integer()), - Column('local_gb', Integer()), - Column('hostname', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('host', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('instance_type', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('user_data', - Text(length=None, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('reservation_id', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('mac_address', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('scheduled_at', DateTime(timezone=False)), - Column('launched_at', DateTime(timezone=False)), - Column('terminated_at', DateTime(timezone=False)), - Column('display_name', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('display_description', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - ) - - -key_pairs = Table('key_pairs', meta, - Column('created_at', DateTime(timezone=False)), - Column('updated_at', DateTime(timezone=False)), - Column('deleted_at', DateTime(timezone=False)), - Column('deleted', Boolean(create_constraint=True, name=None)), - Column('id', Integer(), primary_key=True, nullable=False), - Column('name', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('user_id', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('fingerprint', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('public_key', - Text(length=None, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - ) - - -networks = Table('networks', meta, - Column('created_at', DateTime(timezone=False)), - Column('updated_at', DateTime(timezone=False)), - Column('deleted_at', DateTime(timezone=False)), - Column('deleted', Boolean(create_constraint=True, name=None)), - Column('id', Integer(), primary_key=True, nullable=False), - Column('injected', Boolean(create_constraint=True, name=None)), - Column('cidr', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('netmask', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('bridge', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('gateway', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('broadcast', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('dns', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('vlan', Integer()), - Column('vpn_public_address', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('vpn_public_port', Integer()), - Column('vpn_private_address', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('dhcp_start', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('project_id', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('host', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - ) - - -projects = Table('projects', meta, - Column('created_at', DateTime(timezone=False)), - Column('updated_at', DateTime(timezone=False)), - Column('deleted_at', DateTime(timezone=False)), - Column('deleted', Boolean(create_constraint=True, name=None)), - Column('id', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False), - primary_key=True, - nullable=False), - Column('name', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('description', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('project_manager', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False), - ForeignKey('users.id')), - ) - - -quotas = Table('quotas', meta, - Column('created_at', DateTime(timezone=False)), - Column('updated_at', DateTime(timezone=False)), - Column('deleted_at', DateTime(timezone=False)), - Column('deleted', Boolean(create_constraint=True, name=None)), - Column('id', Integer(), primary_key=True, nullable=False), - Column('project_id', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('instances', Integer()), - Column('cores', Integer()), - Column('volumes', Integer()), - Column('gigabytes', Integer()), - Column('floating_ips', Integer()), - ) - - -security_groups = Table('security_groups', meta, - Column('created_at', DateTime(timezone=False)), - Column('updated_at', DateTime(timezone=False)), - Column('deleted_at', DateTime(timezone=False)), - Column('deleted', Boolean(create_constraint=True, name=None)), - Column('id', Integer(), primary_key=True, nullable=False), - Column('name', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('description', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('user_id', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('project_id', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - ) - - -security_group_inst_assoc = Table('security_group_instance_association', meta, - Column('created_at', DateTime(timezone=False)), - Column('updated_at', DateTime(timezone=False)), - Column('deleted_at', DateTime(timezone=False)), - Column('deleted', Boolean(create_constraint=True, name=None)), - Column('id', Integer(), primary_key=True, nullable=False), - Column('security_group_id', - Integer(), - ForeignKey('security_groups.id')), - Column('instance_id', Integer(), ForeignKey('instances.id')), - ) - - -security_group_rules = Table('security_group_rules', meta, - Column('created_at', DateTime(timezone=False)), - Column('updated_at', DateTime(timezone=False)), - Column('deleted_at', DateTime(timezone=False)), - Column('deleted', Boolean(create_constraint=True, name=None)), - Column('id', Integer(), primary_key=True, nullable=False), - Column('parent_group_id', - Integer(), - ForeignKey('security_groups.id')), - Column('protocol', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('from_port', Integer()), - Column('to_port', Integer()), - Column('cidr', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('group_id', - Integer(), - ForeignKey('security_groups.id')), - ) - - -services = Table('services', meta, - Column('created_at', DateTime(timezone=False)), - Column('updated_at', DateTime(timezone=False)), - Column('deleted_at', DateTime(timezone=False)), - Column('deleted', Boolean(create_constraint=True, name=None)), - Column('id', Integer(), primary_key=True, nullable=False), - Column('host', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('binary', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('topic', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('report_count', Integer(), nullable=False), - Column('disabled', Boolean(create_constraint=True, name=None)), - ) - - -users = Table('users', meta, - Column('created_at', DateTime(timezone=False)), - Column('updated_at', DateTime(timezone=False)), - Column('deleted_at', DateTime(timezone=False)), - Column('deleted', Boolean(create_constraint=True, name=None)), - Column('id', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False), - primary_key=True, - nullable=False), - Column('name', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('access_key', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('secret_key', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('is_admin', Boolean(create_constraint=True, name=None)), - ) - - -user_project_association = Table('user_project_association', meta, - Column('created_at', DateTime(timezone=False)), - Column('updated_at', DateTime(timezone=False)), - Column('deleted_at', DateTime(timezone=False)), - Column('deleted', Boolean(create_constraint=True, name=None)), - Column('user_id', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False), - ForeignKey('users.id'), - primary_key=True, - nullable=False), - Column('project_id', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False), - ForeignKey('projects.id'), - primary_key=True, - nullable=False), - ) - - -user_project_role_association = Table('user_project_role_association', meta, - Column('created_at', DateTime(timezone=False)), - Column('updated_at', DateTime(timezone=False)), - Column('deleted_at', DateTime(timezone=False)), - Column('deleted', Boolean(create_constraint=True, name=None)), - Column('user_id', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False), - primary_key=True, - nullable=False), - Column('project_id', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False), - primary_key=True, - nullable=False), - Column('role', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False), - primary_key=True, - nullable=False), - ForeignKeyConstraint(['user_id', - 'project_id'], - ['user_project_association.user_id', - 'user_project_association.project_id']), - ) - - -user_role_association = Table('user_role_association', meta, - Column('created_at', DateTime(timezone=False)), - Column('updated_at', DateTime(timezone=False)), - Column('deleted_at', DateTime(timezone=False)), - Column('deleted', Boolean(create_constraint=True, name=None)), - Column('user_id', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False), - ForeignKey('users.id'), - primary_key=True, - nullable=False), - Column('role', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False), - primary_key=True, - nullable=False), - ) - - -volumes = Table('volumes', meta, - Column('created_at', DateTime(timezone=False)), - Column('updated_at', DateTime(timezone=False)), - Column('deleted_at', DateTime(timezone=False)), - Column('deleted', Boolean(create_constraint=True, name=None)), - Column('id', Integer(), primary_key=True, nullable=False), - Column('ec2_id', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('user_id', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('project_id', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('host', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('size', Integer()), - Column('availability_zone', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('instance_id', - Integer(), - ForeignKey('instances.id'), - nullable=True), - Column('mountpoint', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('attach_time', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('status', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('attach_status', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('scheduled_at', DateTime(timezone=False)), - Column('launched_at', DateTime(timezone=False)), - Column('terminated_at', DateTime(timezone=False)), - Column('display_name', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('display_description', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - ) - def upgrade(migrate_engine): # Upgrade operations go here. Don't create your own engine; # bind migrate_engine to your metadata + meta = MetaData() meta.bind = migrate_engine + # + # New Tables + # + auth_tokens = Table('auth_tokens', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('token_hash', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + primary_key=True, + nullable=False), + Column('user_id', Integer()), + Column('server_manageent_url', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('storage_url', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('cdn_management_url', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + ) + + export_devices = Table('export_devices', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('shelf_id', Integer()), + Column('blade_id', Integer()), + Column('volume_id', + Integer(), + ForeignKey('volumes.id'), + nullable=True), + ) + + fixed_ips = Table('fixed_ips', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('address', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('network_id', + Integer(), + ForeignKey('networks.id'), + nullable=True), + Column('instance_id', + Integer(), + ForeignKey('instances.id'), + nullable=True), + Column('allocated', Boolean(create_constraint=True, name=None)), + Column('leased', Boolean(create_constraint=True, name=None)), + Column('reserved', Boolean(create_constraint=True, name=None)), + ) + + floating_ips = Table('floating_ips', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('address', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('fixed_ip_id', + Integer(), + ForeignKey('fixed_ips.id'), + nullable=True), + Column('project_id', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('host', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + ) + + instances = Table('instances', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('internal_id', Integer()), + Column('admin_pass', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('user_id', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('project_id', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('image_id', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('kernel_id', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('ramdisk_id', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('server_name', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('launch_index', Integer()), + Column('key_name', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('key_data', + Text(length=None, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('state', Integer()), + Column('state_description', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('memory_mb', Integer()), + Column('vcpus', Integer()), + Column('local_gb', Integer()), + Column('hostname', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('host', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('instance_type', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('user_data', + Text(length=None, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('reservation_id', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('mac_address', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('scheduled_at', DateTime(timezone=False)), + Column('launched_at', DateTime(timezone=False)), + Column('terminated_at', DateTime(timezone=False)), + Column('display_name', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('display_description', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + ) + + key_pairs = Table('key_pairs', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('name', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('user_id', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('fingerprint', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('public_key', + Text(length=None, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + ) + + networks = Table('networks', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('injected', Boolean(create_constraint=True, name=None)), + Column('cidr', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('netmask', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('bridge', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('gateway', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('broadcast', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('dns', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('vlan', Integer()), + Column('vpn_public_address', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('vpn_public_port', Integer()), + Column('vpn_private_address', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('dhcp_start', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('project_id', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('host', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + ) + + projects = Table('projects', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + primary_key=True, + nullable=False), + Column('name', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('description', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('project_manager', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + ForeignKey('users.id')), + ) + + quotas = Table('quotas', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('project_id', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('instances', Integer()), + Column('cores', Integer()), + Column('volumes', Integer()), + Column('gigabytes', Integer()), + Column('floating_ips', Integer()), + ) + + security_groups = Table('security_groups', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('name', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('description', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('user_id', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('project_id', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + ) + + security_group_inst_assoc = Table('security_group_instance_association', + meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('security_group_id', + Integer(), + ForeignKey('security_groups.id')), + Column('instance_id', Integer(), ForeignKey('instances.id')), + ) + + security_group_rules = Table('security_group_rules', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('parent_group_id', + Integer(), + ForeignKey('security_groups.id')), + Column('protocol', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('from_port', Integer()), + Column('to_port', Integer()), + Column('cidr', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('group_id', + Integer(), + ForeignKey('security_groups.id')), + ) + + services = Table('services', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('host', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('binary', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('topic', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('report_count', Integer(), nullable=False), + Column('disabled', Boolean(create_constraint=True, name=None)), + ) + + users = Table('users', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + primary_key=True, + nullable=False), + Column('name', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('access_key', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('secret_key', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('is_admin', Boolean(create_constraint=True, name=None)), + ) + + user_project_association = Table('user_project_association', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('user_id', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + ForeignKey('users.id'), + primary_key=True, + nullable=False), + Column('project_id', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + ForeignKey('projects.id'), + primary_key=True, + nullable=False), + ) + + user_project_role_association = Table('user_project_role_association', + meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('user_id', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + primary_key=True, + nullable=False), + Column('project_id', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + primary_key=True, + nullable=False), + Column('role', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + primary_key=True, + nullable=False), + ForeignKeyConstraint(['user_id', + 'project_id'], + ['user_project_association.user_id', + 'user_project_association.project_id']), + ) + + user_role_association = Table('user_role_association', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('user_id', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + ForeignKey('users.id'), + primary_key=True, + nullable=False), + Column('role', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + primary_key=True, + nullable=False), + ) + + volumes = Table('volumes', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('ec2_id', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('user_id', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('project_id', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('host', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('size', Integer()), + Column('availability_zone', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('instance_id', + Integer(), + ForeignKey('instances.id'), + nullable=True), + Column('mountpoint', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('attach_time', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('status', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('attach_status', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('scheduled_at', DateTime(timezone=False)), + Column('launched_at', DateTime(timezone=False)), + Column('terminated_at', DateTime(timezone=False)), + Column('display_name', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('display_description', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + ) tables = [auth_tokens, instances, key_pairs, networks, fixed_ips, floating_ips, quotas, security_groups, security_group_inst_assoc, security_group_rules, services, users, projects, user_project_association, user_project_role_association, user_role_association, volumes, export_devices] + for table in tables: try: table.create() @@ -525,10 +590,38 @@ def upgrade(migrate_engine): def downgrade(migrate_engine): # Operations to reverse the above upgrade go here. - for table in (auth_tokens, export_devices, fixed_ips, floating_ips, - instances, key_pairs, networks, - projects, quotas, security_groups, security_group_inst_assoc, - security_group_rules, services, users, - user_project_association, user_project_role_association, - user_role_association, volumes): + meta = MetaData() + meta.bind = migrate_engine + + auth_tokens = Table('auth_tokens', meta, autoload=True) + export_devices = Table('export_devices', meta, autoload=True) + fixed_ips = Table('fixed_ips', meta, autoload=True) + floating_ips = Table('floating_ips', meta, autoload=True) + instances = Table('instances', meta, autoload=True) + key_pairs = Table('key_pairs', meta, autoload=True) + networks = Table('networks', meta, autoload=True) + projects = Table('projects', meta, autoload=True) + quotas = Table('quotas', meta, autoload=True) + security_groups = Table('security_groups', meta, autoload=True) + security_group_inst_assoc = Table('security_group_instance_association', + meta, autoload=True) + security_group_rules = Table('security_group_rules', meta, autoload=True) + services = Table('services', meta, autoload=True) + users = Table('users', meta, autoload=True) + user_project_association = Table('user_project_association', meta, + autoload=True) + user_project_role_association = Table('user_project_role_association', + meta, + autoload=True) + user_role_association = Table('user_role_association', meta, autoload=True) + volumes = Table('volumes', meta, autoload=True) + + # table order matters, don't change + for table in (auth_tokens, export_devices, floating_ips, fixed_ips, + key_pairs, networks, + quotas, security_group_inst_assoc, + security_group_rules, security_groups, services, + user_project_role_association, user_project_association, + user_role_association, + projects, users, volumes, instances): table.drop() diff --git a/nova/db/sqlalchemy/migrate_repo/versions/002_bexar.py b/nova/db/sqlalchemy/migrate_repo/versions/002_bexar.py index 7389572d9467..7fcc7da5bab6 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/002_bexar.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/002_bexar.py @@ -20,194 +20,139 @@ from sqlalchemy import Boolean, Column, DateTime, ForeignKey from sqlalchemy import Integer, MetaData, String, Table, Text from nova import log as logging -meta = MetaData() LOG = logging.getLogger(__name__) -# Just for the ForeignKey and column creation to succeed, these are not the -# actual definitions of instances or services. -instances = Table('instances', meta, - Column('id', Integer(), primary_key=True, nullable=False), - ) - - -services = Table('services', meta, - Column('id', Integer(), primary_key=True, nullable=False), - ) - - -networks = Table('networks', meta, - Column('id', Integer(), primary_key=True, nullable=False), - ) - -volumes = Table('volumes', meta, - Column('id', Integer(), primary_key=True, nullable=False), - ) - - -# -# New Tables -# -certificates = Table('certificates', meta, - Column('created_at', DateTime(timezone=False)), - Column('updated_at', DateTime(timezone=False)), - Column('deleted_at', DateTime(timezone=False)), - Column('deleted', Boolean(create_constraint=True, name=None)), - Column('id', Integer(), primary_key=True, nullable=False), - Column('user_id', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('project_id', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('file_name', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - ) - - -consoles = Table('consoles', meta, - Column('created_at', DateTime(timezone=False)), - Column('updated_at', DateTime(timezone=False)), - Column('deleted_at', DateTime(timezone=False)), - Column('deleted', Boolean(create_constraint=True, name=None)), - Column('id', Integer(), primary_key=True, nullable=False), - Column('instance_name', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('instance_id', Integer()), - Column('password', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('port', Integer(), nullable=True), - Column('pool_id', - Integer(), - ForeignKey('console_pools.id')), - ) - - -console_pools = Table('console_pools', meta, - Column('created_at', DateTime(timezone=False)), - Column('updated_at', DateTime(timezone=False)), - Column('deleted_at', DateTime(timezone=False)), - Column('deleted', Boolean(create_constraint=True, name=None)), - Column('id', Integer(), primary_key=True, nullable=False), - Column('address', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('username', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('password', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('console_type', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('public_hostname', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('host', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('compute_host', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - ) - - -instance_actions = Table('instance_actions', meta, - Column('created_at', DateTime(timezone=False)), - Column('updated_at', DateTime(timezone=False)), - Column('deleted_at', DateTime(timezone=False)), - Column('deleted', Boolean(create_constraint=True, name=None)), - Column('id', Integer(), primary_key=True, nullable=False), - Column('instance_id', - Integer(), - ForeignKey('instances.id')), - Column('action', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('error', - Text(length=None, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - ) - - -iscsi_targets = Table('iscsi_targets', meta, - Column('created_at', DateTime(timezone=False)), - Column('updated_at', DateTime(timezone=False)), - Column('deleted_at', DateTime(timezone=False)), - Column('deleted', Boolean(create_constraint=True, name=None)), - Column('id', Integer(), primary_key=True, nullable=False), - Column('target_num', Integer()), - Column('host', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('volume_id', - Integer(), - ForeignKey('volumes.id'), - nullable=True), - ) - - -# -# Tables to alter -# -auth_tokens = Table('auth_tokens', meta, - Column('created_at', DateTime(timezone=False)), - Column('updated_at', DateTime(timezone=False)), - Column('deleted_at', DateTime(timezone=False)), - Column('deleted', Boolean(create_constraint=True, name=None)), - Column('token_hash', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False), - primary_key=True, - nullable=False), - Column('user_id', Integer()), - Column('server_manageent_url', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('storage_url', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('cdn_management_url', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - ) - - -instances_availability_zone = Column( - 'availability_zone', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)) - - -instances_locked = Column('locked', - Boolean(create_constraint=True, name=None)) - - -networks_cidr_v6 = Column( - 'cidr_v6', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)) - -networks_ra_server = Column( - 'ra_server', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)) - - -services_availability_zone = Column( - 'availability_zone', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)) - def upgrade(migrate_engine): # Upgrade operations go here. Don't create your own engine; # bind migrate_engine to your metadata + meta = MetaData() meta.bind = migrate_engine + # load tables for fk + volumes = Table('volumes', meta, autoload=True) + + instances = Table('instances', meta, autoload=True) + services = Table('services', meta, autoload=True) + networks = Table('networks', meta, autoload=True) + auth_tokens = Table('auth_tokens', meta, autoload=True) + + # + # New Tables + # + certificates = Table('certificates', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('user_id', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('project_id', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('file_name', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + ) + + consoles = Table('consoles', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('instance_name', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('instance_id', Integer()), + Column('password', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('port', Integer(), nullable=True), + Column('pool_id', + Integer(), + ForeignKey('console_pools.id')), + ) + + console_pools = Table('console_pools', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('address', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('username', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('password', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('console_type', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('public_hostname', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('host', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('compute_host', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + ) + + instance_actions = Table('instance_actions', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('instance_id', + Integer(), + ForeignKey('instances.id')), + Column('action', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('error', + Text(length=None, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + ) + + iscsi_targets = Table('iscsi_targets', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('target_num', Integer()), + Column('host', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('volume_id', + Integer(), + ForeignKey('volumes.id'), + nullable=True), + ) + tables = [certificates, console_pools, consoles, instance_actions, iscsi_targets] for table in tables: @@ -225,8 +170,67 @@ def upgrade(migrate_engine): unicode_error=None, _warn_on_bytestring=False)) + # + # New Columns + # + instances_availability_zone = Column( + 'availability_zone', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)) + + instances_locked = Column('locked', + Boolean(create_constraint=True, name=None)) + + networks_cidr_v6 = Column( + 'cidr_v6', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)) + + networks_ra_server = Column( + 'ra_server', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)) + + services_availability_zone = Column( + 'availability_zone', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)) + instances.create_column(instances_availability_zone) instances.create_column(instances_locked) networks.create_column(networks_cidr_v6) networks.create_column(networks_ra_server) services.create_column(services_availability_zone) + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + # load tables for fk + volumes = Table('volumes', meta, autoload=True) + + instances = Table('instances', meta, autoload=True) + services = Table('services', meta, autoload=True) + networks = Table('networks', meta, autoload=True) + auth_tokens = Table('auth_tokens', meta, autoload=True) + + certificates = Table('certificates', meta, autoload=True) + consoles = Table('consoles', meta, autoload=True) + console_pools = Table('console_pools', meta, autoload=True) + instance_actions = Table('instance_actions', meta, autoload=True) + iscsi_targets = Table('iscsi_targets', meta, autoload=True) + + # table order matters, don't change + tables = [certificates, consoles, console_pools, instance_actions, + iscsi_targets] + for table in tables: + table.drop() + + auth_tokens.c.user_id.alter(type=Integer()) + + instances.drop_column('availability_zone') + instances.drop_column('locked') + networks.drop_column('cidr_v6') + networks.drop_column('ra_server') + services.drop_column('availability_zone') diff --git a/nova/db/sqlalchemy/migrate_repo/versions/002_postgresql_downgrade.sql b/nova/db/sqlalchemy/migrate_repo/versions/002_postgresql_downgrade.sql new file mode 100644 index 000000000000..cf5c1a20854a --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/002_postgresql_downgrade.sql @@ -0,0 +1,20 @@ +BEGIN; + + DROP TABLE certificates; + DROP TABLE consoles; + DROP TABLE console_pools; + DROP TABLE instance_actions; + DROP TABLE iscsi_targets; + + ALTER TABLE auth_tokens ADD COLUMN user_id_backup INTEGER; + UPDATE auth_tokens SET user_id_backup = CAST(user_id AS INTEGER); + ALTER TABLE auth_tokens DROP COLUMN user_id; + ALTER TABLE auth_tokens RENAME COLUMN user_id_backup TO user_id; + + ALTER TABLE instances DROP COLUMN availability_zone; + ALTER TABLE instances DROP COLUMN locked; + ALTER TABLE networks DROP COLUMN cidr_v6; + ALTER TABLE networks DROP COLUMN ra_server; + ALTER TABLE services DROP COLUMN availability_zone; + +COMMIT; diff --git a/nova/db/sqlalchemy/migrate_repo/versions/002_sqlite_downgrade.sql b/nova/db/sqlalchemy/migrate_repo/versions/002_sqlite_downgrade.sql new file mode 100644 index 000000000000..8c6a5becaac9 --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/002_sqlite_downgrade.sql @@ -0,0 +1,388 @@ +BEGIN TRANSACTION; + + DROP TABLE certificates; + + DROP TABLE console_pools; + + DROP TABLE consoles; + + DROP TABLE instance_actions; + + DROP TABLE iscsi_targets; + + CREATE TEMPORARY TABLE auth_tokens_backup ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + token_hash VARCHAR(255) NOT NULL, + user_id VARCHAR(255), + server_manageent_url VARCHAR(255), + storage_url VARCHAR(255), + cdn_management_url VARCHAR(255), + PRIMARY KEY (token_hash), + CHECK (deleted IN (0, 1)) + ); + + INSERT INTO auth_tokens_backup + SELECT created_at, + updated_at, + deleted_at, + deleted, + token_hash, + user_id, + server_manageent_url, + storage_url, + cdn_management_url + FROM auth_tokens; + + DROP TABLE auth_tokens; + + CREATE TABLE auth_tokens ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + token_hash VARCHAR(255) NOT NULL, + user_id INTEGER, + server_manageent_url VARCHAR(255), + storage_url VARCHAR(255), + cdn_management_url VARCHAR(255), + PRIMARY KEY (token_hash), + CHECK (deleted IN (0, 1)) + ); + + INSERT INTO auth_tokens + SELECT created_at, + updated_at, + deleted_at, + deleted, + token_hash, + user_id, + server_manageent_url, + storage_url, + cdn_management_url + FROM auth_tokens_backup; + + DROP TABLE auth_tokens_backup; + + CREATE TEMPORARY TABLE instances_backup ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + internal_id INTEGER, + admin_pass VARCHAR(255), + user_id VARCHAR(255), + project_id VARCHAR(255), + image_id VARCHAR(255), + kernel_id VARCHAR(255), + ramdisk_id VARCHAR(255), + server_name VARCHAR(255), + launch_index INTEGER, + key_name VARCHAR(255), + key_data TEXT, + state INTEGER, + state_description VARCHAR(255), + memory_mb INTEGER, + vcpus INTEGER, + local_gb INTEGER, + hostname VARCHAR(255), + host VARCHAR(255), + instance_type VARCHAR(255), + user_data TEXT, + reservation_id VARCHAR(255), + mac_address VARCHAR(255), + scheduled_at DATETIME, + launched_at DATETIME, + terminated_at DATETIME, + display_name VARCHAR(255), + display_description VARCHAR(255), + availability_zone VARCHAR(255), + locked BOOLEAN, + PRIMARY KEY (id), + CHECK (deleted IN (0, 1)), + CHECK (locked IN (0, 1)) + ); + + INSERT INTO instances_backup + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + internal_id, + admin_pass, + user_id, + project_id, + image_id, + kernel_id, + ramdisk_id, + server_name, + launch_index, + key_name, + key_data, + state, + state_description, + memory_mb, + vcpus, + local_gb, + hostname, + host, + instance_type, + user_data, + reservation_id, + mac_address, + scheduled_at, + launched_at, + terminated_at, + display_name, + display_description, + availability_zone, + locked + FROM instances; + + DROP TABLE instances; + + CREATE TABLE instances ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + internal_id INTEGER, + admin_pass VARCHAR(255), + user_id VARCHAR(255), + project_id VARCHAR(255), + image_id VARCHAR(255), + kernel_id VARCHAR(255), + ramdisk_id VARCHAR(255), + server_name VARCHAR(255), + launch_index INTEGER, + key_name VARCHAR(255), + key_data TEXT, + state INTEGER, + state_description VARCHAR(255), + memory_mb INTEGER, + vcpus INTEGER, + local_gb INTEGER, + hostname VARCHAR(255), + host VARCHAR(255), + instance_type VARCHAR(255), + user_data TEXT, + reservation_id VARCHAR(255), + mac_address VARCHAR(255), + scheduled_at DATETIME, + launched_at DATETIME, + terminated_at DATETIME, + display_name VARCHAR(255), + display_description VARCHAR(255), + PRIMARY KEY (id), + CHECK (deleted IN (0, 1)) + ); + + INSERT INTO instances + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + internal_id, + admin_pass, + user_id, + project_id, + image_id, + kernel_id, + ramdisk_id, + server_name, + launch_index, + key_name, + key_data, + state, + state_description, + memory_mb, + vcpus, + local_gb, + hostname, + host, + instance_type, + user_data, + reservation_id, + mac_address, + scheduled_at, + launched_at, + terminated_at, + display_name, + display_description + FROM instances_backup; + + DROP TABLE instances_backup; + + CREATE TEMPORARY TABLE networks_backup ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + injected BOOLEAN, + cidr VARCHAR(255), + netmask VARCHAR(255), + bridge VARCHAR(255), + gateway VARCHAR(255), + broadcast VARCHAR(255), + dns VARCHAR(255), + vlan INTEGER, + vpn_public_address VARCHAR(255), + vpn_public_port INTEGER, + vpn_private_address VARCHAR(255), + dhcp_start VARCHAR(255), + project_id VARCHAR(255), + host VARCHAR(255), + cidr_v6 VARCHAR(255), + ra_server VARCHAR(255), + PRIMARY KEY (id), + CHECK (deleted IN (0, 1)), + CHECK (injected IN (0, 1)) + ); + + INSERT INTO networks_backup + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + injected, + cidr, + netmask, + bridge, + gateway, + broadcast, + dns, + vlan, + vpn_public_address, + vpn_public_port, + vpn_private_address, + dhcp_start, + project_id, + host, + cidr_v6, + ra_server + FROM networks; + + DROP TABLE networks; + + CREATE TABLE networks ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + injected BOOLEAN, + cidr VARCHAR(255), + netmask VARCHAR(255), + bridge VARCHAR(255), + gateway VARCHAR(255), + broadcast VARCHAR(255), + dns VARCHAR(255), + vlan INTEGER, + vpn_public_address VARCHAR(255), + vpn_public_port INTEGER, + vpn_private_address VARCHAR(255), + dhcp_start VARCHAR(255), + project_id VARCHAR(255), + host VARCHAR(255), + PRIMARY KEY (id), + CHECK (deleted IN (0, 1)), + CHECK (injected IN (0, 1)) + ); + + INSERT INTO networks + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + injected, + cidr, + netmask, + bridge, + gateway, + broadcast, + dns, + vlan, + vpn_public_address, + vpn_public_port, + vpn_private_address, + dhcp_start, + project_id, + host + FROM networks_backup; + + DROP TABLE networks_backup; + + CREATE TEMPORARY TABLE services_backup ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + host VARCHAR(255), + binary VARCHAR(255), + topic VARCHAR(255), + report_count INTEGER NOT NULL, + disabled BOOLEAN, + availability_zone VARCHAR(255), + PRIMARY KEY (id), + CHECK (deleted IN (0, 1)), + CHECK (disabled IN (0, 1)) + ); + + INSERT INTO services_backup + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + host, + binary, + topic, + report_count, + disabled, + availability_zone + FROM services; + + DROP TABLE services; + + CREATE TABLE services ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + host VARCHAR(255), + binary VARCHAR(255), + topic VARCHAR(255), + report_count INTEGER NOT NULL, + disabled BOOLEAN, + PRIMARY KEY (id), + CHECK (deleted IN (0, 1)), + CHECK (disabled IN (0, 1)) + ); + + INSERT INTO services + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + host, + binary, + topic, + report_count, + disabled + FROM services_backup; + + DROP TABLE services_backup; + +COMMIT; diff --git a/nova/db/sqlalchemy/migrate_repo/versions/003_add_label_to_networks.py b/nova/db/sqlalchemy/migrate_repo/versions/003_add_label_to_networks.py index 85a45639c695..668b77f0ffc4 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/003_add_label_to_networks.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/003_add_label_to_networks.py @@ -17,30 +17,26 @@ from sqlalchemy import Column, Integer, MetaData, String, Table -meta = MetaData() - -networks = Table('networks', meta, - Column('id', Integer(), primary_key=True, nullable=False), - ) - - -# -# New Tables -# - - -# -# Tables to alter -# - -networks_label = Column( - 'label', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)) - def upgrade(migrate_engine): # Upgrade operations go here. Don't create your own engine; # bind migrate_engine to your metadata + meta = MetaData() meta.bind = migrate_engine + + networks = Table('networks', meta, autoload=True) + + networks_label = Column( + 'label', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)) networks.create_column(networks_label) + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + networks = Table('networks', meta, autoload=True) + + networks.drop_column('label') diff --git a/nova/db/sqlalchemy/migrate_repo/versions/003_sqlite_downgrade.sql b/nova/db/sqlalchemy/migrate_repo/versions/003_sqlite_downgrade.sql new file mode 100644 index 000000000000..01601cac07bd --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/003_sqlite_downgrade.sql @@ -0,0 +1,111 @@ +BEGIN TRANSACTION; + + CREATE TEMPORARY TABLE networks_backup ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + injected BOOLEAN, + cidr VARCHAR(255), + netmask VARCHAR(255), + bridge VARCHAR(255), + gateway VARCHAR(255), + broadcast VARCHAR(255), + dns VARCHAR(255), + vlan INTEGER, + vpn_public_address VARCHAR(255), + vpn_public_port INTEGER, + vpn_private_address VARCHAR(255), + dhcp_start VARCHAR(255), + project_id VARCHAR(255), + host VARCHAR(255), + cidr_v6 VARCHAR(255), + ra_server VARCHAR(255), + label VARCHAR(255), + PRIMARY KEY (id), + CHECK (deleted IN (0, 1)), + CHECK (injected IN (0, 1)) + ); + + INSERT INTO networks_backup + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + injected, + cidr, + netmask, + bridge, + gateway, + broadcast, + dns, + vlan, + vpn_public_address, + vpn_public_port, + vpn_private_address, + dhcp_start, + project_id, + host, + cidr_v6, + ra_server, + label + FROM networks; + + DROP TABLE networks; + + CREATE TABLE networks ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + injected BOOLEAN, + cidr VARCHAR(255), + netmask VARCHAR(255), + bridge VARCHAR(255), + gateway VARCHAR(255), + broadcast VARCHAR(255), + dns VARCHAR(255), + vlan INTEGER, + vpn_public_address VARCHAR(255), + vpn_public_port INTEGER, + vpn_private_address VARCHAR(255), + dhcp_start VARCHAR(255), + project_id VARCHAR(255), + host VARCHAR(255), + cidr_v6 VARCHAR(255), + ra_server VARCHAR(255), + PRIMARY KEY (id), + CHECK (deleted IN (0, 1)), + CHECK (injected IN (0, 1)) + ); + + INSERT INTO networks + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + injected, + cidr, + netmask, + bridge, + gateway, + broadcast, + dns, + vlan, + vpn_public_address, + vpn_public_port, + vpn_private_address, + dhcp_start, + project_id, + host, + cidr_v6, + ra_server + FROM networks_backup; + + DROP TABLE networks_backup; + +COMMIT; diff --git a/nova/db/sqlalchemy/migrate_repo/versions/004_add_zone_tables.py b/nova/db/sqlalchemy/migrate_repo/versions/004_add_zone_tables.py index 88517bcd9dae..359e2c0bf846 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/004_add_zone_tables.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/004_add_zone_tables.py @@ -17,43 +17,50 @@ from sqlalchemy import Boolean, Column, DateTime, Integer from sqlalchemy import MetaData, String, Table from nova import log as logging -meta = MetaData() LOG = logging.getLogger(__name__) -# -# New Tables -# -zones = Table('zones', meta, - Column('created_at', DateTime(timezone=False)), - Column('updated_at', DateTime(timezone=False)), - Column('deleted_at', DateTime(timezone=False)), - Column('deleted', Boolean(create_constraint=True, name=None)), - Column('id', Integer(), primary_key=True, nullable=False), - Column('api_url', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('username', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('password', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - ) - - -# -# Tables to alter -# - -# (none currently) - def upgrade(migrate_engine): # Upgrade operations go here. Don't create your own engine; # bind migrate_engine to your metadata + meta = MetaData() meta.bind = migrate_engine + + # + # New Tables + # + zones = Table('zones', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('api_url', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('username', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('password', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + ) + for table in (zones, ): try: table.create() except Exception: LOG.info(repr(table)) + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + zones = Table('zones', meta, autoload=True) + + for table in (zones, ): + table.drop() diff --git a/nova/db/sqlalchemy/migrate_repo/versions/005_add_instance_metadata.py b/nova/db/sqlalchemy/migrate_repo/versions/005_add_instance_metadata.py index a5c25b876ad5..286bf9cb272a 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/005_add_instance_metadata.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/005_add_instance_metadata.py @@ -19,54 +19,39 @@ from sqlalchemy import Boolean, Column, DateTime, ForeignKey, Integer from sqlalchemy import MetaData, String, Table from nova import log as logging - -meta = MetaData() LOG = logging.getLogger(__name__) -# Just for the ForeignKey and column creation to succeed, these are not the -# actual definitions of instances or services. -instances = Table('instances', meta, - Column('id', Integer(), primary_key=True, nullable=False), - ) - -quotas = Table('quotas', meta, - Column('id', Integer(), primary_key=True, nullable=False), - ) - - -# -# New Tables -# - -instance_metadata_table = Table('instance_metadata', meta, - Column('created_at', DateTime(timezone=False)), - Column('updated_at', DateTime(timezone=False)), - Column('deleted_at', DateTime(timezone=False)), - Column('deleted', Boolean(create_constraint=True, name=None)), - Column('id', Integer(), primary_key=True, nullable=False), - Column('instance_id', - Integer(), - ForeignKey('instances.id'), - nullable=False), - Column('key', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('value', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False))) - - -# -# New columns -# -quota_metadata_items = Column('metadata_items', Integer()) - - def upgrade(migrate_engine): # Upgrade operations go here. Don't create your own engine; # bind migrate_engine to your metadata + meta = MetaData() meta.bind = migrate_engine + + # load tables for fk + instances = Table('instances', meta, autoload=True) + + quotas = Table('quotas', meta, autoload=True) + + instance_metadata_table = Table('instance_metadata', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('instance_id', + Integer(), + ForeignKey('instances.id'), + nullable=False), + Column('key', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('value', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False))) + for table in (instance_metadata_table, ): try: table.create() @@ -75,4 +60,22 @@ def upgrade(migrate_engine): LOG.exception('Exception while creating table') raise + quota_metadata_items = Column('metadata_items', Integer()) quotas.create_column(quota_metadata_items) + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + # load tables for fk + instances = Table('instances', meta, autoload=True) + + quotas = Table('quotas', meta, autoload=True) + + instance_metadata_table = Table('instance_metadata', meta, autoload=True) + + for table in (instance_metadata_table, ): + table.drop() + + quotas.drop_column('metadata_items') diff --git a/nova/db/sqlalchemy/migrate_repo/versions/006_add_provider_data_to_volumes.py b/nova/db/sqlalchemy/migrate_repo/versions/006_add_provider_data_to_volumes.py index 4627d333248c..df2be9df44c2 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/006_add_provider_data_to_volumes.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/006_add_provider_data_to_volumes.py @@ -17,52 +17,38 @@ from sqlalchemy import Column, Integer, MetaData, String, Table -meta = MetaData() +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta = MetaData() + meta.bind = migrate_engine -# Table stub-definitions -# Just for the ForeignKey and column creation to succeed, these are not the -# actual definitions of instances or services. -# -volumes = Table('volumes', meta, - Column('id', Integer(), primary_key=True, nullable=False), - ) + volumes = Table('volumes', meta, autoload=True) + # Add columns to existing tables + volumes_provider_location = Column('provider_location', + String(length=256, + convert_unicode=False, + assert_unicode=None, + unicode_error=None, + _warn_on_bytestring=False)) -# -# New Tables -# -# None - -# -# Tables to alter -# -# None - -# -# Columns to add to existing tables -# - -volumes_provider_location = Column('provider_location', + volumes_provider_auth = Column('provider_auth', String(length=256, convert_unicode=False, assert_unicode=None, unicode_error=None, _warn_on_bytestring=False)) - -volumes_provider_auth = Column('provider_auth', - String(length=256, - convert_unicode=False, - assert_unicode=None, - unicode_error=None, - _warn_on_bytestring=False)) - - -def upgrade(migrate_engine): - # Upgrade operations go here. Don't create your own engine; - # bind migrate_engine to your metadata - meta.bind = migrate_engine - - # Add columns to existing tables volumes.create_column(volumes_provider_location) volumes.create_column(volumes_provider_auth) + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + volumes = Table('volumes', meta, autoload=True) + + volumes.drop_column('provider_location') + volumes.drop_column('provider_auth') diff --git a/nova/db/sqlalchemy/migrate_repo/versions/006_sqlite_downgrade.sql b/nova/db/sqlalchemy/migrate_repo/versions/006_sqlite_downgrade.sql new file mode 100644 index 000000000000..f55c284379a5 --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/006_sqlite_downgrade.sql @@ -0,0 +1,113 @@ +BEGIN TRANSACTION; + + CREATE TEMPORARY TABLE volumes_backup ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + ec2_id VARCHAR(255), + user_id VARCHAR(255), + project_id VARCHAR(255), + host VARCHAR(255), + size INTEGER, + availability_zone VARCHAR(255), + instance_id INTEGER, + mountpoint VARCHAR(255), + attach_time VARCHAR(255), + status VARCHAR(255), + attach_status VARCHAR(255), + scheduled_at DATETIME, + launched_at DATETIME, + terminated_at DATETIME, + display_name VARCHAR(255), + display_description VARCHAR(255), + provider_location VARCHAR(256), + provider_auth VARCHAR(256), + PRIMARY KEY (id), + CHECK (deleted IN (0, 1)), + FOREIGN KEY(instance_id) REFERENCES instances (id) + ); + + INSERT INTO volumes_backup + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + ec2_id, + user_id, + project_id, + host, + size, + availability_zone, + instance_id, + mountpoint, + attach_time, + status, + attach_status, + scheduled_at, + launched_at, + terminated_at, + display_name, + display_description, + provider_location, + provider_auth + FROM volumes; + + DROP TABLE volumes; + + CREATE TABLE volumes ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + ec2_id VARCHAR(255), + user_id VARCHAR(255), + project_id VARCHAR(255), + host VARCHAR(255), + size INTEGER, + availability_zone VARCHAR(255), + instance_id INTEGER, + mountpoint VARCHAR(255), + attach_time VARCHAR(255), + status VARCHAR(255), + attach_status VARCHAR(255), + scheduled_at DATETIME, + launched_at DATETIME, + terminated_at DATETIME, + display_name VARCHAR(255), + display_description VARCHAR(255), + PRIMARY KEY (id), + CHECK (deleted IN (0, 1)), + FOREIGN KEY(instance_id) REFERENCES instances (id) + ); + + INSERT INTO volumes + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + ec2_id, + user_id, + project_id, + host, + size, + availability_zone, + instance_id, + mountpoint, + attach_time, + status, + attach_status, + scheduled_at, + launched_at, + terminated_at, + display_name, + display_description + FROM volumes_backup; + + DROP TABLE volumes_backup; + +COMMIT; diff --git a/nova/db/sqlalchemy/migrate_repo/versions/007_add_ipv6_to_fixed_ips.py b/nova/db/sqlalchemy/migrate_repo/versions/007_add_ipv6_to_fixed_ips.py index 6f2668040fa3..d84fa1734001 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/007_add_ipv6_to_fixed_ips.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/007_add_ipv6_to_fixed_ips.py @@ -15,71 +15,56 @@ from sqlalchemy import Column, Integer, MetaData, String, Table -meta = MetaData() - -# Table stub-definitions -# Just for the ForeignKey and column creation to succeed, these are not the -# actual definitions of instances or services. -# -fixed_ips = Table( - "fixed_ips", - meta, - Column( - "id", - Integer(), - primary_key=True, - nullable=False)) - -# -# New Tables -# -# None - -# -# Tables to alter -# -# None - -# -# Columns to add to existing tables -# - -fixed_ips_addressV6 = Column( - "addressV6", - String( - length=255, - convert_unicode=False, - assert_unicode=None, - unicode_error=None, - _warn_on_bytestring=False)) - - -fixed_ips_netmaskV6 = Column( - "netmaskV6", - String( - length=3, - convert_unicode=False, - assert_unicode=None, - unicode_error=None, - _warn_on_bytestring=False)) - - -fixed_ips_gatewayV6 = Column( - "gatewayV6", - String( - length=255, - convert_unicode=False, - assert_unicode=None, - unicode_error=None, - _warn_on_bytestring=False)) - def upgrade(migrate_engine): # Upgrade operations go here. Don't create your own engine; # bind migrate_engine to your metadata + meta = MetaData() meta.bind = migrate_engine + fixed_ips = Table('fixed_ips', meta, autoload=True) + + # + # New Columns + # + fixed_ips_addressV6 = Column( + "addressV6", + String( + length=255, + convert_unicode=False, + assert_unicode=None, + unicode_error=None, + _warn_on_bytestring=False)) + + fixed_ips_netmaskV6 = Column( + "netmaskV6", + String( + length=3, + convert_unicode=False, + assert_unicode=None, + unicode_error=None, + _warn_on_bytestring=False)) + + fixed_ips_gatewayV6 = Column( + "gatewayV6", + String( + length=255, + convert_unicode=False, + assert_unicode=None, + unicode_error=None, + _warn_on_bytestring=False)) # Add columns to existing tables fixed_ips.create_column(fixed_ips_addressV6) fixed_ips.create_column(fixed_ips_netmaskV6) fixed_ips.create_column(fixed_ips_gatewayV6) + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + fixed_ips = Table('fixed_ips', meta, autoload=True) + + fixed_ips.drop_column('addressV6') + fixed_ips.drop_column('netmaskV6') + fixed_ips.drop_column('gatewayV6') diff --git a/nova/db/sqlalchemy/migrate_repo/versions/007_sqlite_downgrade.sql b/nova/db/sqlalchemy/migrate_repo/versions/007_sqlite_downgrade.sql new file mode 100644 index 000000000000..44d34769820f --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/007_sqlite_downgrade.sql @@ -0,0 +1,79 @@ +BEGIN TRANSACTION; + + CREATE TEMPORARY TABLE fixed_ips_backup ( + id INTEGER NOT NULL, + address VARCHAR(255), + network_id INTEGER, + instance_id INTEGER, + allocated BOOLEAN DEFAULT FALSE, + leased BOOLEAN DEFAULT FALSE, + reserved BOOLEAN DEFAULT FALSE, + created_at DATETIME NOT NULL, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN NOT NULL, + addressV6 VARCHAR(255), + netmaskV6 VARCHAR(3), + gatewayV6 VARCHAR(255), + PRIMARY KEY (id), + CHECK (leased IN (0, 1)), + CHECK (allocated IN (0, 1)), + CHECK (deleted IN (0, 1)), + CHECK (reserved IN (0, 1)) + ); + + INSERT INTO fixed_ips_backup + SELECT id, + address, + network_id, + instance_id, + allocated, + leased, + reserved, + created_at, + updated_at, + deleted_at, + deleted, + addressV6, + netmaskV6, + gatewayV6 + FROM fixed_ips; + + DROP TABLE fixed_ips; + + CREATE TABLE fixed_ips ( + id INTEGER NOT NULL, + address VARCHAR(255), + network_id INTEGER, + instance_id INTEGER, + allocated BOOLEAN DEFAULT FALSE, + leased BOOLEAN DEFAULT FALSE, + reserved BOOLEAN DEFAULT FALSE, + created_at DATETIME NOT NULL, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN NOT NULL, + PRIMARY KEY (id), + CHECK (leased IN (0, 1)), + CHECK (allocated IN (0, 1)), + CHECK (deleted IN (0, 1)), + CHECK (reserved IN (0, 1)) + ); + + INSERT INTO fixed_ips + SELECT id, + address, + network_id, + instance_id, + allocated, + leased, + reserved, + created_at, + updated_at, + deleted_at, + deleted + FROM fixed_ips_backup; + + DROP TABLE fixed_ips_backup; + +COMMIT; diff --git a/nova/db/sqlalchemy/migrate_repo/versions/008_add_instance_types.py b/nova/db/sqlalchemy/migrate_repo/versions/008_add_instance_types.py index 5ffa3d313c70..b9ffee239d92 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/008_add_instance_types.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/008_add_instance_types.py @@ -17,37 +17,36 @@ from sqlalchemy import Boolean, Column, DateTime, Integer from sqlalchemy import MetaData, String, Table from nova import log as logging -meta = MetaData() LOG = logging.getLogger(__name__) -# -# New Tables -# -instance_types = Table('instance_types', meta, - Column('created_at', DateTime(timezone=False)), - Column('updated_at', DateTime(timezone=False)), - Column('deleted_at', DateTime(timezone=False)), - Column('deleted', Boolean(create_constraint=True, name=None)), - Column('name', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False), - unique=True), - Column('id', Integer(), primary_key=True, nullable=False), - Column('memory_mb', Integer(), nullable=False), - Column('vcpus', Integer(), nullable=False), - Column('local_gb', Integer(), nullable=False), - Column('flavorid', Integer(), nullable=False, unique=True), - Column('swap', Integer(), nullable=False, default=0), - Column('rxtx_quota', Integer(), nullable=False, default=0), - Column('rxtx_cap', Integer(), nullable=False, default=0)) - - def upgrade(migrate_engine): # Upgrade operations go here # Don't create your own engine; bind migrate_engine # to your metadata + meta = MetaData() meta.bind = migrate_engine + # + # New Tables + # + instance_types = Table('instance_types', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('name', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + unique=True), + Column('id', Integer(), primary_key=True, nullable=False), + Column('memory_mb', Integer(), nullable=False), + Column('vcpus', Integer(), nullable=False), + Column('local_gb', Integer(), nullable=False), + Column('flavorid', Integer(), nullable=False, unique=True), + Column('swap', Integer(), nullable=False, default=0), + Column('rxtx_quota', Integer(), nullable=False, default=0), + Column('rxtx_cap', Integer(), nullable=False, default=0)) try: instance_types.create() except Exception: @@ -79,5 +78,8 @@ def upgrade(migrate_engine): def downgrade(migrate_engine): # Operations to reverse the above upgrade go here. - for table in (instance_types): + meta = MetaData() + meta.bind = migrate_engine + instance_types = Table('instance_types', meta, autoload=True) + for table in (instance_types, ): table.drop() diff --git a/nova/db/sqlalchemy/migrate_repo/versions/009_add_instance_migrations.py b/nova/db/sqlalchemy/migrate_repo/versions/009_add_instance_migrations.py index 26ec067f028f..a0ae130d12df 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/009_add_instance_migrations.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/009_add_instance_migrations.py @@ -13,44 +13,41 @@ # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations -# under the License.from sqlalchemy import * +# under the License. from sqlalchemy import Boolean, Column, DateTime, ForeignKey, Integer from sqlalchemy import MetaData, String, Table from nova import log as logging -meta = MetaData() LOG = logging.getLogger(__name__) -# Just for the ForeignKey and column creation to succeed, these are not the -# actual definitions of instances or services. -instances = Table('instances', meta, - Column('id', Integer(), primary_key=True, nullable=False), - ) - -# -# New Tables -# - -migrations = Table('migrations', meta, - Column('created_at', DateTime(timezone=False)), - Column('updated_at', DateTime(timezone=False)), - Column('deleted_at', DateTime(timezone=False)), - Column('deleted', Boolean(create_constraint=True, name=None)), - Column('id', Integer(), primary_key=True, nullable=False), - Column('source_compute', String(255)), - Column('dest_compute', String(255)), - Column('dest_host', String(255)), - Column('instance_id', Integer, ForeignKey('instances.id'), - nullable=True), - Column('status', String(255)), - ) - def upgrade(migrate_engine): # Upgrade operations go here. Don't create your own engine; # bind migrate_engine to your metadata + meta = MetaData() meta.bind = migrate_engine + + # load tables for fk + instances = Table('instances', meta, autoload=True) + + # + # New Tables + # + migrations = Table('migrations', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('source_compute', String(255)), + Column('dest_compute', String(255)), + Column('dest_host', String(255)), + Column('instance_id', Integer, ForeignKey('instances.id'), + nullable=True), + Column('status', String(255)), + ) + for table in (migrations, ): try: table.create() @@ -58,3 +55,16 @@ def upgrade(migrate_engine): LOG.info(repr(table)) LOG.exception('Exception while creating table') raise + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + # load tables for fk + instances = Table('instances', meta, autoload=True) + + migrations = Table('migrations', meta, autoload=True) + + for table in (migrations, ): + table.drop() diff --git a/nova/db/sqlalchemy/migrate_repo/versions/010_add_os_type_to_instances.py b/nova/db/sqlalchemy/migrate_repo/versions/010_add_os_type_to_instances.py index 49c09f8062c4..da01940bd39d 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/010_add_os_type_to_instances.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/010_add_os_type_to_instances.py @@ -16,24 +16,20 @@ from sqlalchemy import Column, Integer, MetaData, String, Table -meta = MetaData() - -instances = Table('instances', meta, - Column('id', Integer(), primary_key=True, nullable=False), - ) - -instances_os_type = Column('os_type', - String(length=255, convert_unicode=False, - assert_unicode=None, unicode_error=None, - _warn_on_bytestring=False), - nullable=True) - def upgrade(migrate_engine): # Upgrade operations go here. Don't create your own engine; # bind migrate_engine to your metadata + meta = MetaData() meta.bind = migrate_engine + instances = Table('instances', meta, autoload=True) + + instances_os_type = Column('os_type', + String(length=255, convert_unicode=False, + assert_unicode=None, unicode_error=None, + _warn_on_bytestring=False), + nullable=True) instances.create_column(instances_os_type) migrate_engine.execute(instances.update()\ .where(instances.c.os_type == None)\ @@ -41,6 +37,9 @@ def upgrade(migrate_engine): def downgrade(migrate_engine): + meta = MetaData() meta.bind = migrate_engine + instances = Table('instances', meta, autoload=True) + instances.drop_column('os_type') diff --git a/nova/db/sqlalchemy/migrate_repo/versions/011_live_migration.py b/nova/db/sqlalchemy/migrate_repo/versions/011_live_migration.py index f2fcecd84abe..8f3df2d6e0a6 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/011_live_migration.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/011_live_migration.py @@ -20,58 +20,42 @@ from sqlalchemy import Boolean, Column, DateTime, Integer, MetaData from sqlalchemy import Table, Text from nova import log as logging -meta = MetaData() LOG = logging.getLogger(__name__) -instances = Table('instances', meta, - Column('id', Integer(), primary_key=True, nullable=False), - ) - -# -# New Tables -# - -compute_nodes = Table('compute_nodes', meta, - Column('created_at', DateTime(timezone=False)), - Column('updated_at', DateTime(timezone=False)), - Column('deleted_at', DateTime(timezone=False)), - Column('deleted', Boolean(create_constraint=True, name=None)), - Column('id', Integer(), primary_key=True, nullable=False), - Column('service_id', Integer(), nullable=False), - - Column('vcpus', Integer(), nullable=False), - Column('memory_mb', Integer(), nullable=False), - Column('local_gb', Integer(), nullable=False), - Column('vcpus_used', Integer(), nullable=False), - Column('memory_mb_used', Integer(), nullable=False), - Column('local_gb_used', Integer(), nullable=False), - Column('hypervisor_type', - Text(convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False), - nullable=False), - Column('hypervisor_version', Integer(), nullable=False), - Column('cpu_info', - Text(convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False), - nullable=False), - ) - - -# -# Tables to alter -# -instances_launched_on = Column( - 'launched_on', - Text(convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False), - nullable=True) - def upgrade(migrate_engine): # Upgrade operations go here. Don't create your own engine; # bind migrate_engine to your metadata + meta = MetaData() meta.bind = migrate_engine + instances = Table('instances', meta, autoload=True) + + compute_nodes = Table('compute_nodes', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('service_id', Integer(), nullable=False), + + Column('vcpus', Integer(), nullable=False), + Column('memory_mb', Integer(), nullable=False), + Column('local_gb', Integer(), nullable=False), + Column('vcpus_used', Integer(), nullable=False), + Column('memory_mb_used', Integer(), nullable=False), + Column('local_gb_used', Integer(), nullable=False), + Column('hypervisor_type', + Text(convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + nullable=False), + Column('hypervisor_version', Integer(), nullable=False), + Column('cpu_info', + Text(convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + nullable=False), + ) + try: compute_nodes.create() except Exception: @@ -80,4 +64,22 @@ def upgrade(migrate_engine): meta.drop_all(tables=[compute_nodes]) raise + instances_launched_on = Column( + 'launched_on', + Text(convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + nullable=True) instances.create_column(instances_launched_on) + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + instances = Table('instances', meta, autoload=True) + + compute_nodes = Table('compute_nodes', meta, autoload=True) + + compute_nodes.drop() + + instances.drop_column('launched_on') diff --git a/nova/db/sqlalchemy/migrate_repo/versions/012_add_ipv6_flatmanager.py b/nova/db/sqlalchemy/migrate_repo/versions/012_add_ipv6_flatmanager.py index 30ae74b510f6..a626d2c7dbd1 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/012_add_ipv6_flatmanager.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/012_add_ipv6_flatmanager.py @@ -16,135 +16,75 @@ from sqlalchemy import Boolean, Column, DateTime, ForeignKey, Integer from sqlalchemy import MetaData, String, Table -meta = MetaData() - -# Table stub-definitions -# Just for the ForeignKey and column creation to succeed, these are not the -# actual definitions of instances or services. -# -instances = Table('instances', meta, - Column('id', Integer(), primary_key=True, nullable=False), - ) - -# -# Tables to alter -# -networks = Table('networks', meta, - Column('created_at', DateTime(timezone=False)), - Column('updated_at', DateTime(timezone=False)), - Column('deleted_at', DateTime(timezone=False)), - Column('deleted', Boolean(create_constraint=True, name=None)), - Column('id', Integer(), primary_key=True, nullable=False), - Column('injected', Boolean(create_constraint=True, name=None)), - Column('cidr', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('netmask', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('bridge', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('gateway', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('broadcast', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('dns', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('vlan', Integer()), - Column('vpn_public_address', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('vpn_public_port', Integer()), - Column('vpn_private_address', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('dhcp_start', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('project_id', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('host', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('cidr_v6', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('ra_server', String(length=255, - convert_unicode=False, - assert_unicode=None, - unicode_error=None, - _warn_on_bytestring=False)), - Column( - 'label', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False))) - -fixed_ips = Table('fixed_ips', meta, - Column('created_at', DateTime(timezone=False)), - Column('updated_at', DateTime(timezone=False)), - Column('deleted_at', DateTime(timezone=False)), - Column('deleted', Boolean(create_constraint=True, name=None)), - Column('id', Integer(), primary_key=True, nullable=False), - Column('address', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('network_id', - Integer(), - ForeignKey('networks.id'), - nullable=True), - Column('instance_id', - Integer(), - ForeignKey('instances.id'), - nullable=True), - Column('allocated', Boolean(create_constraint=True, name=None)), - Column('leased', Boolean(create_constraint=True, name=None)), - Column('reserved', Boolean(create_constraint=True, name=None)), - Column("addressV6", String(length=255, - convert_unicode=False, - assert_unicode=None, - unicode_error=None, - _warn_on_bytestring=False)), - Column("netmaskV6", String(length=3, - convert_unicode=False, - assert_unicode=None, - unicode_error=None, - _warn_on_bytestring=False)), - Column("gatewayV6", String(length=255, - convert_unicode=False, - assert_unicode=None, - unicode_error=None, - _warn_on_bytestring=False)), - ) -# -# New Tables -# -# None - -# -# Columns to add to existing tables -# -networks_netmask_v6 = Column( - 'netmask_v6', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)) - def upgrade(migrate_engine): # Upgrade operations go here. Don't create your own engine; # bind migrate_engine to your metadata + meta = MetaData() meta.bind = migrate_engine + # load tables for fk + instances = Table('instances', meta, autoload=True) + + networks = Table('networks', meta, autoload=True) + fixed_ips = Table('fixed_ips', meta, autoload=True) + # Alter column name networks.c.ra_server.alter(name='gateway_v6') # Add new column to existing table + networks_netmask_v6 = Column( + 'netmask_v6', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)) networks.create_column(networks_netmask_v6) # drop existing columns from table fixed_ips.c.addressV6.drop() fixed_ips.c.netmaskV6.drop() fixed_ips.c.gatewayV6.drop() + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + # load tables for fk + instances = Table('instances', meta, autoload=True) + + networks = Table('networks', meta, autoload=True) + fixed_ips = Table('fixed_ips', meta, autoload=True) + + networks.c.gateway_v6.alter(name='ra_server') + networks.drop_column('netmask_v6') + + fixed_ips_addressV6 = Column( + "addressV6", + String( + length=255, + convert_unicode=False, + assert_unicode=None, + unicode_error=None, + _warn_on_bytestring=False)) + + fixed_ips_netmaskV6 = Column( + "netmaskV6", + String( + length=3, + convert_unicode=False, + assert_unicode=None, + unicode_error=None, + _warn_on_bytestring=False)) + + fixed_ips_gatewayV6 = Column( + "gatewayV6", + String( + length=255, + convert_unicode=False, + assert_unicode=None, + unicode_error=None, + _warn_on_bytestring=False)) + + for column in (fixed_ips_addressV6, + fixed_ips_netmaskV6, + fixed_ips_gatewayV6): + fixed_ips.create_column(column) diff --git a/nova/db/sqlalchemy/migrate_repo/versions/012_sqlite_upgrade.sql b/nova/db/sqlalchemy/migrate_repo/versions/012_sqlite_upgrade.sql new file mode 100644 index 000000000000..0779f50e8a90 --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/012_sqlite_upgrade.sql @@ -0,0 +1,195 @@ +BEGIN TRANSACTION; + + CREATE TEMPORARY TABLE networks_backup ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + injected BOOLEAN, + cidr VARCHAR(255), + netmask VARCHAR(255), + bridge VARCHAR(255), + gateway VARCHAR(255), + broadcast VARCHAR(255), + dns VARCHAR(255), + vlan INTEGER, + vpn_public_address VARCHAR(255), + vpn_public_port INTEGER, + vpn_private_address VARCHAR(255), + dhcp_start VARCHAR(255), + project_id VARCHAR(255), + host VARCHAR(255), + cidr_v6 VARCHAR(255), + ra_server VARCHAR(255), + label VARCHAR(255), + PRIMARY KEY (id), + CHECK (injected IN (0, 1)), + CHECK (deleted IN (0, 1)) + ); + + INSERT INTO networks_backup + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + injected, + cidr, + netmask, + bridge, + gateway, + broadcast, + dns, + vlan, + vpn_public_address, + vpn_public_port, + vpn_private_address, + dhcp_start, + project_id, + host, + cidr_v6, + ra_server, + label + FROM networks; + + DROP TABLE networks; + + CREATE TABLE networks ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + injected BOOLEAN, + cidr VARCHAR(255), + netmask VARCHAR(255), + bridge VARCHAR(255), + gateway VARCHAR(255), + broadcast VARCHAR(255), + dns VARCHAR(255), + vlan INTEGER, + vpn_public_address VARCHAR(255), + vpn_public_port INTEGER, + vpn_private_address VARCHAR(255), + dhcp_start VARCHAR(255), + project_id VARCHAR(255), + host VARCHAR(255), + cidr_v6 VARCHAR(255), + gateway_v6 VARCHAR(255), + label VARCHAR(255), + netmask_v6 VARCHAR(255), + PRIMARY KEY (id), + CHECK (injected IN (0, 1)), + CHECK (deleted IN (0, 1)) + ); + + INSERT INTO networks + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + injected, + cidr, + netmask, + bridge, + gateway, + broadcast, + dns, + vlan, + vpn_public_address, + vpn_public_port, + vpn_private_address, + dhcp_start, + project_id, + host, + cidr_v6, + ra_server AS gateway_v6, + label, + NULL AS netmask_v6 + FROM networks_backup; + + DROP TABLE networks_backup; + + CREATE TEMPORARY TABLE fixed_ips_backup ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + address VARCHAR(255), + network_id INTEGER, + instance_id INTEGER, + allocated BOOLEAN, + leased BOOLEAN, + reserved BOOLEAN, + addressV6 VARCHAR(255), + netmaskV6 VARCHAR(3), + gatewayV6 VARCHAR(255), + PRIMARY KEY (id), + CHECK (reserved IN (0, 1)), + CHECK (allocated IN (0, 1)), + CHECK (leased IN (0, 1)), + CHECK (deleted IN (0, 1)), + FOREIGN KEY(instance_id) REFERENCES instances (id), + FOREIGN KEY(network_id) REFERENCES networks (id) + ); + + INSERT INTO fixed_ips_backup + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + address, + network_id, + instance_id, + allocated, + leased, + reserved, + addressV6, + netmaskV6, + gatewayV6 + FROM fixed_ips; + + DROP TABLE fixed_ips; + + CREATE TABLE fixed_ips ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + address VARCHAR(255), + network_id INTEGER, + instance_id INTEGER, + allocated BOOLEAN, + leased BOOLEAN, + reserved BOOLEAN, + PRIMARY KEY (id), + CHECK (reserved IN (0, 1)), + CHECK (allocated IN (0, 1)), + CHECK (leased IN (0, 1)), + CHECK (deleted IN (0, 1)), + FOREIGN KEY(instance_id) REFERENCES instances (id), + FOREIGN KEY(network_id) REFERENCES networks (id) + ); + + INSERT INTO fixed_ips + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + address, + network_id, + instance_id, + allocated, + leased, + reserved + FROM fixed_ips_backup; + + DROP TABLE fixed_ips_backup; + +COMMIT; diff --git a/nova/db/sqlalchemy/migrate_repo/versions/013_add_flavors_to_migrations.py b/nova/db/sqlalchemy/migrate_repo/versions/013_add_flavors_to_migrations.py index 7246839b71d4..d8735ec7df9c 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/013_add_flavors_to_migrations.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/013_add_flavors_to_migrations.py @@ -13,34 +13,31 @@ # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations -# under the License.from sqlalchemy import * +# under the License. from sqlalchemy import Column, Integer, MetaData, Table -meta = MetaData() - -migrations = Table('migrations', meta, - Column('id', Integer(), primary_key=True, nullable=False), - ) - -# -# Tables to alter -# -# - -old_flavor_id = Column('old_flavor_id', Integer()) -new_flavor_id = Column('new_flavor_id', Integer()) - def upgrade(migrate_engine): # Upgrade operations go here. Don't create your own engine; # bind migrate_engine to your metadata + meta = MetaData() meta.bind = migrate_engine + + migrations = Table('migrations', meta, autoload=True) + + old_flavor_id = Column('old_flavor_id', Integer()) + new_flavor_id = Column('new_flavor_id', Integer()) + migrations.create_column(old_flavor_id) migrations.create_column(new_flavor_id) def downgrade(migrate_engine): + meta = MetaData() meta.bind = migrate_engine - migrations.drop_column(old_flavor_id) - migrations.drop_column(new_flavor_id) + + migrations = Table('migrations', meta, autoload=True) + + migrations.drop_column('old_flavor_id') + migrations.drop_column('new_flavor_id') diff --git a/nova/db/sqlalchemy/migrate_repo/versions/013_sqlite_downgrade.sql b/nova/db/sqlalchemy/migrate_repo/versions/013_sqlite_downgrade.sql new file mode 100644 index 000000000000..fbba364beab6 --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/013_sqlite_downgrade.sql @@ -0,0 +1,69 @@ +BEGIN TRANSACTION; + + CREATE TEMPORARY TABLE migrations_backup ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + source_compute VARCHAR(255), + dest_compute VARCHAR(255), + dest_host VARCHAR(255), + instance_id INTEGER, + status VARCHAR(255), + old_flavor_id INTEGER, + new_flavor_id INTEGER, + PRIMARY KEY (id), + CHECK (deleted IN (0, 1)), + FOREIGN KEY(instance_id) REFERENCES instances (id) + ); + + INSERT INTO migrations_backup + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + source_compute, + dest_compute, + dest_host, + instance_id, + status, + old_flavor_id, + new_flavor_id + FROM migrations; + + DROP TABLE migrations; + + CREATE TABLE migrations ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + source_compute VARCHAR(255), + dest_compute VARCHAR(255), + dest_host VARCHAR(255), + instance_id INTEGER, + status VARCHAR(255), + PRIMARY KEY (id), + CHECK (deleted IN (0, 1)), + FOREIGN KEY(instance_id) REFERENCES instances (id) + ); + + INSERT INTO migrations + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + source_compute, + dest_compute, + dest_host, + instance_id, + status + FROM migrations_backup; + + DROP TABLE migrations_backup; + +COMMIT; diff --git a/nova/db/sqlalchemy/migrate_repo/versions/014_add_instance_type_id_to_instances.py b/nova/db/sqlalchemy/migrate_repo/versions/014_add_instance_type_id_to_instances.py index e5e37733837e..b363caca5e4d 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/014_add_instance_type_id_to_instances.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/014_add_instance_type_id_to_instances.py @@ -15,37 +15,22 @@ # under the License. from sqlalchemy import Column, Integer, MetaData, String, Table -#from nova import log as logging - -meta = MetaData() - -c_instance_type = Column('instance_type', - String(length=255, convert_unicode=False, - assert_unicode=None, unicode_error=None, - _warn_on_bytestring=False), - nullable=True) - -c_instance_type_id = Column('instance_type_id', - String(length=255, convert_unicode=False, - assert_unicode=None, unicode_error=None, - _warn_on_bytestring=False), - nullable=True) - -instance_types = Table('instance_types', meta, - Column('id', Integer(), primary_key=True, nullable=False), - Column('name', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False), - unique=True)) def upgrade(migrate_engine): # Upgrade operations go here. Don't create your own engine; # bind migrate_engine to your metadata + meta = MetaData() meta.bind = migrate_engine - instances = Table('instances', meta, autoload=True, - autoload_with=migrate_engine) + instance_types = Table('instance_types', meta, autoload=True) + instances = Table('instances', meta, autoload=True) + + c_instance_type_id = Column('instance_type_id', + String(length=255, convert_unicode=False, + assert_unicode=None, unicode_error=None, + _warn_on_bytestring=False), + nullable=True) instances.create_column(c_instance_type_id) @@ -63,17 +48,25 @@ def upgrade(migrate_engine): def downgrade(migrate_engine): + meta = MetaData() meta.bind = migrate_engine - instances = Table('instances', meta, autoload=True, - autoload_with=migrate_engine) + instance_types = Table('instance_types', meta, autoload=True) + instances = Table('instances', meta, autoload=True) + c_instance_type = Column('instance_type', + String(length=255, convert_unicode=False, + assert_unicode=None, unicode_error=None, + _warn_on_bytestring=False), + nullable=True) instances.create_column(c_instance_type) + type_names = {} recs = migrate_engine.execute(instance_types.select()) for row in recs: - type_id = row[0] - type_name = row[1] + type_names[row[0]] = row[1] + + for type_id, type_name in type_names.iteritems(): migrate_engine.execute(instances.update()\ .where(instances.c.instance_type_id == type_id)\ .values(instance_type=type_name)) diff --git a/nova/db/sqlalchemy/migrate_repo/versions/015_add_auto_assign_to_floating_ips.py b/nova/db/sqlalchemy/migrate_repo/versions/015_add_auto_assign_to_floating_ips.py index 375760c84e08..51db850665ea 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/015_add_auto_assign_to_floating_ips.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/015_add_auto_assign_to_floating_ips.py @@ -17,19 +17,19 @@ from sqlalchemy import Boolean, Column, MetaData, Table -meta = MetaData() - -c_auto_assigned = Column('auto_assigned', Boolean, default=False) - def upgrade(migrate_engine): # Upgrade operations go here. Don't create your own engine; # bind migrate_engine to your metadata + meta = MetaData() meta.bind = migrate_engine - - floating_ips = Table('floating_ips', - meta, - autoload=True, - autoload_with=migrate_engine) - + floating_ips = Table('floating_ips', meta, autoload=True) + c_auto_assigned = Column('auto_assigned', Boolean, default=False) floating_ips.create_column(c_auto_assigned) + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + floating_ips = Table('floating_ips', meta, autoload=True) + floating_ips.drop_column('auto_assigned') diff --git a/nova/db/sqlalchemy/migrate_repo/versions/015_sqlite_downgrade.sql b/nova/db/sqlalchemy/migrate_repo/versions/015_sqlite_downgrade.sql new file mode 100644 index 000000000000..c599ef2b355a --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/015_sqlite_downgrade.sql @@ -0,0 +1,62 @@ +BEGIN TRANSACTION; + CREATE TEMPORARY TABLE floating_ips_backup ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + address VARCHAR(255), + fixed_ip_id INTEGER, + project_id VARCHAR(255), + host VARCHAR(255), + auto_assigned BOOLEAN, + PRIMARY KEY (id), + CHECK (deleted IN (0, 1)), + CHECK (auto_assigned IN (0, 1)), + FOREIGN KEY(fixed_ip_id) REFERENCES fixed_ips (id) + ); + + INSERT INTO floating_ips_backup + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + address, + fixed_ip_id, + project_id, + host, + auto_assigned + FROM floating_ips; + + DROP TABLE floating_ips; + + CREATE TABLE floating_ips ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + address VARCHAR(255), + fixed_ip_id INTEGER, + project_id VARCHAR(255), + host VARCHAR(255), + PRIMARY KEY (id), + CHECK (deleted IN (0, 1)), + FOREIGN KEY(fixed_ip_id) REFERENCES fixed_ips (id) + ); + + INSERT INTO floating_ips + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + address, + fixed_ip_id, + project_id, + host + FROM floating_ips_backup; + + DROP TABLE floating_ips_backup; +COMMIT; diff --git a/nova/db/sqlalchemy/migrate_repo/versions/016_make_quotas_key_and_value.py b/nova/db/sqlalchemy/migrate_repo/versions/016_make_quotas_key_and_value.py index 56b287171e26..2d3134e65fcd 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/016_make_quotas_key_and_value.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/016_make_quotas_key_and_value.py @@ -19,8 +19,6 @@ from sqlalchemy import MetaData, String, Table from nova import utils -meta = MetaData() - resources = [ 'instances', 'cores', @@ -31,7 +29,7 @@ resources = [ ] -def old_style_quotas_table(name): +def old_style_quotas_table(meta, name): return Table(name, meta, Column('id', Integer(), primary_key=True), Column('created_at', DateTime(), @@ -53,7 +51,7 @@ def old_style_quotas_table(name): ) -def new_style_quotas_table(name): +def new_style_quotas_table(meta, name): return Table(name, meta, Column('id', Integer(), primary_key=True), Column('created_at', DateTime(), @@ -75,8 +73,8 @@ def new_style_quotas_table(name): ) -def quotas_table(migrate_engine, name='quotas'): - return Table(name, meta, autoload=True, autoload_with=migrate_engine) +def quotas_table(meta, name='quotas'): + return Table(name, meta, autoload=True) def _assert_no_duplicate_project_ids(quotas): @@ -177,12 +175,13 @@ def convert_backward(migrate_engine, old_quotas, new_quotas): def upgrade(migrate_engine): # Upgrade operations go here. Don't create your own engine; # bind migrate_engine to your metadata + meta = MetaData() meta.bind = migrate_engine - old_quotas = quotas_table(migrate_engine) + old_quotas = quotas_table(meta) assert_old_quotas_have_no_active_duplicates(migrate_engine, old_quotas) - new_quotas = new_style_quotas_table('quotas_new') + new_quotas = new_style_quotas_table(meta, 'quotas_new') new_quotas.create() convert_forward(migrate_engine, old_quotas, new_quotas) old_quotas.drop() @@ -190,18 +189,19 @@ def upgrade(migrate_engine): # clear metadata to work around this: # http://code.google.com/p/sqlalchemy-migrate/issues/detail?id=128 meta.clear() - new_quotas = quotas_table(migrate_engine, 'quotas_new') + new_quotas = quotas_table(meta, 'quotas_new') new_quotas.rename('quotas') def downgrade(migrate_engine): # Operations to reverse the above upgrade go here. + meta = MetaData() meta.bind = migrate_engine - new_quotas = quotas_table(migrate_engine) + new_quotas = quotas_table(meta) assert_new_quotas_have_no_active_duplicates(migrate_engine, new_quotas) - old_quotas = old_style_quotas_table('quotas_old') + old_quotas = old_style_quotas_table(meta, 'quotas_old') old_quotas.create() convert_backward(migrate_engine, old_quotas, new_quotas) new_quotas.drop() @@ -209,5 +209,5 @@ def downgrade(migrate_engine): # clear metadata to work around this: # http://code.google.com/p/sqlalchemy-migrate/issues/detail?id=128 meta.clear() - old_quotas = quotas_table(migrate_engine, 'quotas_old') + old_quotas = quotas_table(meta, 'quotas_old') old_quotas.rename('quotas') diff --git a/nova/db/sqlalchemy/migrate_repo/versions/017_make_instance_type_id_an_integer.py b/nova/db/sqlalchemy/migrate_repo/versions/017_make_instance_type_id_an_integer.py index eba8ad5ab30e..90757396cedd 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/017_make_instance_type_id_an_integer.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/017_make_instance_type_id_an_integer.py @@ -18,14 +18,14 @@ from sqlalchemy import Column, Integer, MetaData, String, Table from nova import log as logging -meta = MetaData() LOG = logging.getLogger(__name__) def upgrade(migrate_engine): + meta = MetaData() meta.bind = migrate_engine - instances = Table('instances', meta, autoload=True, - autoload_with=migrate_engine) + + instances = Table('instances', meta, autoload=True) types = {} for instance in migrate_engine.execute(instances.select()): @@ -56,9 +56,10 @@ def upgrade(migrate_engine): def downgrade(migrate_engine): + meta = MetaData() meta.bind = migrate_engine - instances = Table('instances', meta, autoload=True, - autoload_with=migrate_engine) + + instances = Table('instances', meta, autoload=True) integer_column = instances.c.instance_type_id string_column = Column('instance_type_id_str', diff --git a/nova/db/sqlalchemy/migrate_repo/versions/018_rename_server_management_url.py b/nova/db/sqlalchemy/migrate_repo/versions/018_rename_server_management_url.py index 73c76f666998..59ead97ada40 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/018_rename_server_management_url.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/018_rename_server_management_url.py @@ -16,26 +16,20 @@ from sqlalchemy import MetaData, Table -meta = MetaData() - def upgrade(migrate_engine): # Upgrade operations go here. Don't create your own engine; # bind migrate_engine to your metadata + meta = MetaData() meta.bind = migrate_engine - - tokens = Table('auth_tokens', meta, autoload=True, - autoload_with=migrate_engine) - + tokens = Table('auth_tokens', meta, autoload=True) c_manageent = tokens.c.server_manageent_url c_manageent.alter(name='server_management_url') def downgrade(migrate_engine): + meta = MetaData() meta.bind = migrate_engine - - tokens = Table('auth_tokens', meta, autoload=True, - autoload_with=migrate_engine) - + tokens = Table('auth_tokens', meta, autoload=True) c_management = tokens.c.server_management_url c_management.alter(name='server_manageent_url') diff --git a/nova/db/sqlalchemy/migrate_repo/versions/019_add_volume_snapshot_support.py b/nova/db/sqlalchemy/migrate_repo/versions/019_add_volume_snapshot_support.py index 67ca04c775ba..b459f3832089 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/019_add_volume_snapshot_support.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/019_add_volume_snapshot_support.py @@ -20,45 +20,51 @@ from sqlalchemy import Integer, DateTime, Boolean, String from nova import log as logging - -meta = MetaData() LOG = logging.getLogger(__name__) -snapshots = Table('snapshots', meta, - Column('created_at', DateTime(timezone=False)), - Column('updated_at', DateTime(timezone=False)), - Column('deleted_at', DateTime(timezone=False)), - Column('deleted', Boolean(create_constraint=True, name=None)), - Column('id', Integer(), primary_key=True, nullable=False), - Column('volume_id', Integer(), nullable=False), - Column('user_id', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('project_id', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('status', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('progress', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('volume_size', Integer()), - Column('scheduled_at', DateTime(timezone=False)), - Column('display_name', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('display_description', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False))) - - def upgrade(migrate_engine): # Upgrade operations go here. Don't create your own engine; # bind migrate_engine to your metadata + meta = MetaData() meta.bind = migrate_engine + # + # New Tables + # + snapshots = Table('snapshots', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('volume_id', Integer(), nullable=False), + Column('user_id', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('project_id', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('status', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('progress', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('volume_size', Integer()), + Column('scheduled_at', DateTime(timezone=False)), + Column('display_name', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('display_description', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False))) try: snapshots.create() except Exception: @@ -70,4 +76,7 @@ def upgrade(migrate_engine): def downgrade(migrate_engine): # Operations to reverse the above upgrade go here. + meta = MetaData() + meta.bind = migrate_engine + snapshots = Table('snapshots', meta, autoload=True) snapshots.drop() diff --git a/nova/db/sqlalchemy/migrate_repo/versions/020_add_snapshot_id_to_volumes.py b/nova/db/sqlalchemy/migrate_repo/versions/020_add_snapshot_id_to_volumes.py index dad2a1cd7805..c5a632ca08a9 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/020_add_snapshot_id_to_volumes.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/020_add_snapshot_id_to_volumes.py @@ -18,28 +18,23 @@ from sqlalchemy import Column, Table, MetaData, Integer -meta = MetaData() - - -# Table stub-definitions -# Just for the ForeignKey and column creation to succeed, these are not the -# actual definitions of instances or services. -# -volumes = Table('volumes', meta, - Column('id', Integer(), primary_key=True, nullable=False), - ) - -# -# New Column -# - -snapshot_id = Column('snapshot_id', Integer()) - - def upgrade(migrate_engine): # Upgrade operations go here. Don't create your own engine; # bind migrate_engine to your metadata + meta = MetaData() meta.bind = migrate_engine + volumes = Table('volumes', meta, autoload=True) + + snapshot_id = Column('snapshot_id', Integer()) # Add columns to existing tables volumes.create_column(snapshot_id) + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + volumes = Table('volumes', meta, autoload=True) + + volumes.drop_column('snapshot_id') diff --git a/nova/db/sqlalchemy/migrate_repo/versions/020_sqlite_downgrade.sql b/nova/db/sqlalchemy/migrate_repo/versions/020_sqlite_downgrade.sql new file mode 100644 index 000000000000..97b94660453c --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/020_sqlite_downgrade.sql @@ -0,0 +1,119 @@ +BEGIN TRANSACTION; + + CREATE TEMPORARY TABLE volumes_backup ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + ec2_id VARCHAR(255), + user_id VARCHAR(255), + project_id VARCHAR(255), + host VARCHAR(255), + size INTEGER, + availability_zone VARCHAR(255), + instance_id INTEGER, + mountpoint VARCHAR(255), + attach_time VARCHAR(255), + status VARCHAR(255), + attach_status VARCHAR(255), + scheduled_at DATETIME, + launched_at DATETIME, + terminated_at DATETIME, + display_name VARCHAR(255), + display_description VARCHAR(255), + provider_location VARCHAR(256), + provider_auth VARCHAR(256), + snapshot_id INTEGER, + PRIMARY KEY (id), + FOREIGN KEY(instance_id) REFERENCES instances (id), + CHECK (deleted IN (0, 1)) + ); + + INSERT INTO volumes_backup + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + ec2_id, + user_id, + project_id, + host, + size, + availability_zone, + instance_id, + mountpoint, + attach_time, + status, + attach_status, + scheduled_at, + launched_at, + terminated_at, + display_name, + display_description, + provider_location, + provider_auth, + snapshot_id + FROM volumes; + + DROP TABLE volumes; + + CREATE TABLE volumes ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + ec2_id VARCHAR(255), + user_id VARCHAR(255), + project_id VARCHAR(255), + host VARCHAR(255), + size INTEGER, + availability_zone VARCHAR(255), + instance_id INTEGER, + mountpoint VARCHAR(255), + attach_time VARCHAR(255), + status VARCHAR(255), + attach_status VARCHAR(255), + scheduled_at DATETIME, + launched_at DATETIME, + terminated_at DATETIME, + display_name VARCHAR(255), + display_description VARCHAR(255), + provider_location VARCHAR(256), + provider_auth VARCHAR(256), + PRIMARY KEY (id), + FOREIGN KEY(instance_id) REFERENCES instances (id), + CHECK (deleted IN (0, 1)) + ); + + INSERT INTO volumes + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + ec2_id, + user_id, + project_id, + host, + size, + availability_zone, + instance_id, + mountpoint, + attach_time, + status, + attach_status, + scheduled_at, + launched_at, + terminated_at, + display_name, + display_description, + provider_location, + provider_auth + FROM volumes_backup; + + DROP TABLE volumes_backup; + +COMMIT; diff --git a/nova/db/sqlalchemy/migrate_repo/versions/021_rename_image_ids.py b/nova/db/sqlalchemy/migrate_repo/versions/021_rename_image_ids.py index 4d1089d7f36c..64b539ed65c6 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/021_rename_image_ids.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/021_rename_image_ids.py @@ -17,24 +17,22 @@ from sqlalchemy import MetaData, Table -meta = MetaData() - - def upgrade(migrate_engine): # Upgrade operations go here. Don't create your own engine; # bind migrate_engine to your metadata + meta = MetaData() meta.bind = migrate_engine - instances = Table('instances', meta, autoload=True, - autoload_with=migrate_engine) + instances = Table('instances', meta, autoload=True) image_id_column = instances.c.image_id image_id_column.alter(name='image_ref') def downgrade(migrate_engine): + meta = MetaData() meta.bind = migrate_engine - instances = Table('instances', meta, autoload=True, - autoload_with=migrate_engine) + + instances = Table('instances', meta, autoload=True) image_ref_column = instances.c.image_ref image_ref_column.alter(name='image_id') diff --git a/nova/db/sqlalchemy/migrate_repo/versions/022_set_engine_mysql_innodb.py b/nova/db/sqlalchemy/migrate_repo/versions/022_set_engine_mysql_innodb.py index f9fb64d59ff1..2c10b790a687 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/022_set_engine_mysql_innodb.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/022_set_engine_mysql_innodb.py @@ -16,12 +16,11 @@ from sqlalchemy import MetaData -meta = MetaData() - def upgrade(migrate_engine): # Upgrade operations go here. Don't create your own engine; # bind migrate_engine to your metadata + meta = MetaData() meta.bind = migrate_engine if migrate_engine.name == "mysql": migrate_engine.execute("ALTER TABLE auth_tokens Engine=InnoDB") @@ -62,4 +61,4 @@ def upgrade(migrate_engine): def downgrade(migrate_engine): - meta.bind = migrate_engine + pass diff --git a/nova/db/sqlalchemy/migrate_repo/versions/023_add_vm_mode_to_instances.py b/nova/db/sqlalchemy/migrate_repo/versions/023_add_vm_mode_to_instances.py index 59b552a6d9e9..ee607dd92407 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/023_add_vm_mode_to_instances.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/023_add_vm_mode_to_instances.py @@ -16,30 +16,27 @@ from sqlalchemy import Column, MetaData, String, Table -meta = MetaData() - -instances_vm_mode = Column('vm_mode', - String(length=255, convert_unicode=False, - assert_unicode=None, unicode_error=None, - _warn_on_bytestring=False), - nullable=True) - def upgrade(migrate_engine): # Upgrade operations go here. Don't create your own engine; # bind migrate_engine to your metadata + meta = MetaData() meta.bind = migrate_engine - instances = Table('instances', meta, autoload=True, - autoload_with=migrate_engine) + instances = Table('instances', meta, autoload=True) + instances_vm_mode = Column('vm_mode', + String(length=255, convert_unicode=False, + assert_unicode=None, unicode_error=None, + _warn_on_bytestring=False), + nullable=True) instances.create_column(instances_vm_mode) def downgrade(migrate_engine): + meta = MetaData() meta.bind = migrate_engine - instances = Table('instances', meta, autoload=True, - autoload_with=migrate_engine) + instances = Table('instances', meta, autoload=True) instances.drop_column('vm_mode') diff --git a/nova/db/sqlalchemy/migrate_repo/versions/024_add_block_device_mapping.py b/nova/db/sqlalchemy/migrate_repo/versions/024_add_block_device_mapping.py index c64cb9c3527a..202ea634e150 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/024_add_block_device_mapping.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/024_add_block_device_mapping.py @@ -18,62 +18,57 @@ from sqlalchemy import DateTime, Boolean, Integer, String from sqlalchemy import ForeignKey from nova import log as logging -meta = MetaData() LOG = logging.getLogger(__name__) -# Just for the ForeignKey and column creation to succeed, these are not the -# actual definitions of instances or services. -instances = Table('instances', meta, - Column('id', Integer(), primary_key=True, nullable=False), - ) - -volumes = Table('volumes', meta, - Column('id', Integer(), primary_key=True, nullable=False), - ) - -snapshots = Table('snapshots', meta, - Column('id', Integer(), primary_key=True, nullable=False), - ) - - -block_device_mapping = Table('block_device_mapping', meta, - Column('created_at', DateTime(timezone=False)), - Column('updated_at', DateTime(timezone=False)), - Column('deleted_at', DateTime(timezone=False)), - Column('deleted', Boolean(create_constraint=True, name=None)), - Column('id', Integer(), primary_key=True, autoincrement=True), - Column('instance_id', - Integer(), - ForeignKey('instances.id'), - nullable=False), - Column('device_name', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False), - nullable=False), - Column('delete_on_termination', - Boolean(create_constraint=True, name=None), - default=False), - Column('virtual_name', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False), - nullable=True), - Column('snapshot_id', - Integer(), - ForeignKey('snapshots.id'), - nullable=True), - Column('volume_id', Integer(), ForeignKey('volumes.id'), - nullable=True), - Column('volume_size', Integer(), nullable=True), - Column('no_device', - Boolean(create_constraint=True, name=None), - nullable=True), - ) - def upgrade(migrate_engine): # Upgrade operations go here. Don't create your own engine; # bind migrate_engine to your metadata + meta = MetaData() meta.bind = migrate_engine + + # load tables for fk + instances = Table('instances', meta, autoload=True) + volumes = Table('volumes', meta, autoload=True) + snapshots = Table('snapshots', meta, autoload=True) + + # + # New Tables + # + block_device_mapping = Table('block_device_mapping', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, autoincrement=True), + Column('instance_id', + Integer(), + ForeignKey('instances.id'), + nullable=False), + Column('device_name', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + nullable=False), + Column('delete_on_termination', + Boolean(create_constraint=True, name=None), + default=False), + Column('virtual_name', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + nullable=True), + Column('snapshot_id', + Integer(), + ForeignKey('snapshots.id'), + nullable=True), + Column('volume_id', Integer(), ForeignKey('volumes.id'), + nullable=True), + Column('volume_size', Integer(), nullable=True), + Column('no_device', + Boolean(create_constraint=True, name=None), + nullable=True), + ) try: block_device_mapping.create() except Exception: @@ -85,4 +80,13 @@ def upgrade(migrate_engine): def downgrade(migrate_engine): # Operations to reverse the above upgrade go here. + meta = MetaData() + meta.bind = migrate_engine + + # load tables for fk + instances = Table('instances', meta, autoload=True) + volumes = Table('volumes', meta, autoload=True) + snapshots = Table('snapshots', meta, autoload=True) + + block_device_mapping = Table('block_device_mapping', meta, autoload=True) block_device_mapping.drop() diff --git a/nova/db/sqlalchemy/migrate_repo/versions/025_add_uuid_to_instances.py b/nova/db/sqlalchemy/migrate_repo/versions/025_add_uuid_to_instances.py index 27f30d5368e5..3aaa6d79e2ed 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/025_add_uuid_to_instances.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/025_add_uuid_to_instances.py @@ -19,15 +19,13 @@ from sqlalchemy import Column, Integer, MetaData, String, Table from nova import utils -meta = MetaData() - -instances = Table("instances", meta, - Column("id", Integer(), primary_key=True, nullable=False)) -uuid_column = Column("uuid", String(36)) - - def upgrade(migrate_engine): + meta = MetaData() meta.bind = migrate_engine + + instances = Table('instances', meta, autoload=True) + + uuid_column = Column("uuid", String(36)) instances.create_column(uuid_column) rows = migrate_engine.execute(instances.select()) @@ -39,5 +37,9 @@ def upgrade(migrate_engine): def downgrade(migrate_engine): + meta = MetaData() meta.bind = migrate_engine - instances.drop_column(uuid_column) + + instances = Table('instances', meta, autoload=True) + + instances.drop_column('uuid') diff --git a/nova/db/sqlalchemy/migrate_repo/versions/026_add_agent_table.py b/nova/db/sqlalchemy/migrate_repo/versions/026_add_agent_table.py index a3a065d56c73..4665c75ae678 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/026_add_agent_table.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/026_add_agent_table.py @@ -18,59 +18,72 @@ from sqlalchemy import MetaData, String, Table from nova import log as logging -meta = MetaData() LOG = logging.getLogger(__name__) -# -# New Tables -# -builds = Table('agent_builds', meta, - Column('created_at', DateTime(timezone=False)), - Column('updated_at', DateTime(timezone=False)), - Column('deleted_at', DateTime(timezone=False)), - Column('deleted', Boolean(create_constraint=True, name=None)), - Column('id', Integer(), primary_key=True, nullable=False), - Column('hypervisor', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('os', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('architecture', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('version', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('url', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('md5hash', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - ) - - -# -# New Column -# - -architecture = Column('architecture', String(length=255)) - - def upgrade(migrate_engine): # Upgrade operations go here. Don't create your own engine; # bind migrate_engine to your metadata + meta = MetaData() meta.bind = migrate_engine + # + # New Tables + # + builds = Table('agent_builds', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('hypervisor', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('os', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('architecture', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('version', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('url', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('md5hash', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + ) for table in (builds, ): try: table.create() except Exception: LOG.info(repr(table)) - instances = Table('instances', meta, autoload=True, - autoload_with=migrate_engine) + instances = Table('instances', meta, autoload=True) + + # + # New Columns + # + architecture = Column('architecture', String(length=255)) # Add columns to existing tables instances.create_column(architecture) + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + builds = Table('agent_builds', meta, autoload=True) + for table in (builds, ): + table.drop() + + instances = Table('instances', meta, autoload=True) + instances.drop_column('architecture') diff --git a/nova/db/sqlalchemy/migrate_repo/versions/027_add_provider_firewall_rules.py b/nova/db/sqlalchemy/migrate_repo/versions/027_add_provider_firewall_rules.py index 1477796bb2a5..ed82ede9d1e6 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/027_add_provider_firewall_rules.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/027_add_provider_firewall_rules.py @@ -16,56 +16,38 @@ # License for the specific language governing permissions and limitations # under the License. -from sqlalchemy import * -from migrate import * +from sqlalchemy import Boolean, Column, DateTime +from sqlalchemy import Integer, MetaData, String +from sqlalchemy import Table from nova import log as logging - -meta = MetaData() LOG = logging.getLogger(__name__) -# Just for the ForeignKey and column creation to succeed, these are not the -# actual definitions of instances or services. -instances = Table('instances', meta, - Column('id', Integer(), primary_key=True, nullable=False), - ) - - -services = Table('services', meta, - Column('id', Integer(), primary_key=True, nullable=False), - ) - - -networks = Table('networks', meta, - Column('id', Integer(), primary_key=True, nullable=False), - ) - - -# -# New Tables -# -provider_fw_rules = Table('provider_fw_rules', meta, - Column('created_at', DateTime(timezone=False)), - Column('updated_at', DateTime(timezone=False)), - Column('deleted_at', DateTime(timezone=False)), - Column('deleted', Boolean(create_constraint=True, name=None)), - Column('id', Integer(), primary_key=True, nullable=False), - Column('protocol', - String(length=5, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('from_port', Integer()), - Column('to_port', Integer()), - Column('cidr', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False))) - - def upgrade(migrate_engine): # Upgrade operations go here. Don't create your own engine; # bind migrate_engine to your metadata + meta = MetaData() meta.bind = migrate_engine + # + # New Tables + # + provider_fw_rules = Table('provider_fw_rules', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('protocol', + String(length=5, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('from_port', Integer()), + Column('to_port', Integer()), + Column('cidr', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False))) for table in (provider_fw_rules,): try: table.create() @@ -73,3 +55,11 @@ def upgrade(migrate_engine): LOG.info(repr(table)) LOG.exception('Exception while creating table') raise + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + provider_fw_rules = Table('provider_fw_rules', meta, autoload=True) + for table in (provider_fw_rules,): + table.drop() diff --git a/nova/db/sqlalchemy/migrate_repo/versions/028_add_instance_type_extra_specs.py b/nova/db/sqlalchemy/migrate_repo/versions/028_add_instance_type_extra_specs.py index edc7e5107364..29c3ab13cb5c 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/028_add_instance_type_extra_specs.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/028_add_instance_type_extra_specs.py @@ -18,41 +18,40 @@ from sqlalchemy import Boolean, Column, DateTime, ForeignKey, Integer from sqlalchemy import MetaData, String, Table from nova import log as logging -meta = MetaData() LOG = logging.getLogger(__name__) -# Just for the ForeignKey and column creation to succeed, these are not the -# actual definitions of instances or services. -instance_types = Table('instance_types', meta, - Column('id', Integer(), primary_key=True, nullable=False), - ) - -# -# New Tables -# - -instance_type_extra_specs_table = Table('instance_type_extra_specs', meta, - Column('created_at', DateTime(timezone=False)), - Column('updated_at', DateTime(timezone=False)), - Column('deleted_at', DateTime(timezone=False)), - Column('deleted', Boolean(create_constraint=True, name=None)), - Column('id', Integer(), primary_key=True, nullable=False), - Column('instance_type_id', - Integer(), - ForeignKey('instance_types.id'), - nullable=False), - Column('key', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('value', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False))) - def upgrade(migrate_engine): # Upgrade operations go here. Don't create your own engine; # bind migrate_engine to your metadata + meta = MetaData() meta.bind = migrate_engine + + # load tables for fk + instance_types = Table('instance_types', meta, autoload=True) + + # + # New Tables + # + instance_type_extra_specs_table = Table('instance_type_extra_specs', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('instance_type_id', + Integer(), + ForeignKey('instance_types.id'), + nullable=False), + Column('key', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('value', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False))) + for table in (instance_type_extra_specs_table, ): try: table.create() @@ -64,5 +63,14 @@ def upgrade(migrate_engine): def downgrade(migrate_engine): # Operations to reverse the above upgrade go here. + meta = MetaData() + meta.bind = migrate_engine + + # load tables for fk + instance_types = Table('instance_types', meta, autoload=True) + + instance_type_extra_specs_table = Table('instance_type_extra_specs', + meta, + autoload=True) for table in (instance_type_extra_specs_table, ): table.drop() diff --git a/nova/db/sqlalchemy/migrate_repo/versions/029_add_zone_weight_offsets.py b/nova/db/sqlalchemy/migrate_repo/versions/029_add_zone_weight_offsets.py index c2be4f442a23..80eb836c06be 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/029_add_zone_weight_offsets.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/029_add_zone_weight_offsets.py @@ -14,25 +14,28 @@ from sqlalchemy import Column, Float, Integer, MetaData, Table -meta = MetaData() - -zones = Table('zones', meta, - Column('id', Integer(), primary_key=True, nullable=False), - ) - -weight_offset = Column('weight_offset', Float(), default=0.0) -weight_scale = Column('weight_scale', Float(), default=1.0) - def upgrade(migrate_engine): + meta = MetaData() meta.bind = migrate_engine + zones = Table('zones', meta, autoload=True) + + # + # New Columns + # + weight_offset = Column('weight_offset', Float(), default=0.0) + weight_scale = Column('weight_scale', Float(), default=1.0) + zones.create_column(weight_offset) zones.create_column(weight_scale) def downgrade(migrate_engine): + meta = MetaData() meta.bind = migrate_engine - zones.drop_column(weight_offset) - zones.drop_column(weight_scale) + zones = Table('zones', meta, autoload=True) + + zones.drop_column('weight_offset') + zones.drop_column('weight_scale') diff --git a/nova/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py b/nova/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py index 1c5ce8b55dc6..db58ef76ecf8 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py @@ -13,52 +13,18 @@ # License for the specific language governing permissions and limitations # under the License. -from sqlalchemy import * -from migrate import * +from sqlalchemy import select, Boolean, Column, DateTime, ForeignKey +from sqlalchemy import Integer, MetaData, String +from sqlalchemy import Table from nova import log as logging from nova import utils LOG = logging.getLogger(__name__) -meta = MetaData() - -# virtual interface table to add to DB -virtual_interfaces = Table('virtual_interfaces', meta, - Column('created_at', DateTime(timezone=False), - default=utils.utcnow()), - Column('updated_at', DateTime(timezone=False), - onupdate=utils.utcnow()), - Column('deleted_at', DateTime(timezone=False)), - Column('deleted', Boolean(create_constraint=True, name=None)), - Column('id', Integer(), primary_key=True, nullable=False), - Column('address', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False), - unique=True), - Column('network_id', - Integer(), - ForeignKey('networks.id')), - Column('instance_id', - Integer(), - ForeignKey('instances.id'), - nullable=False), - mysql_engine='InnoDB') - - -# bridge_interface column to add to networks table -interface = Column('bridge_interface', - String(length=255, convert_unicode=False, - assert_unicode=None, unicode_error=None, - _warn_on_bytestring=False)) - - -# virtual interface id column to add to fixed_ips table -# foreignkey added in next migration -virtual_interface_id = Column('virtual_interface_id', - Integer()) def upgrade(migrate_engine): + meta = MetaData() meta.bind = migrate_engine # grab tables and (column for dropping later) @@ -67,6 +33,13 @@ def upgrade(migrate_engine): fixed_ips = Table('fixed_ips', meta, autoload=True) c = instances.columns['mac_address'] + interface = Column('bridge_interface', + String(length=255, convert_unicode=False, + assert_unicode=None, unicode_error=None, + _warn_on_bytestring=False)) + + virtual_interface_id = Column('virtual_interface_id', + Integer()) # add interface column to networks table # values will have to be set manually before running nova try: @@ -75,6 +48,31 @@ def upgrade(migrate_engine): LOG.error(_("interface column not added to networks table")) raise + # + # New Tables + # + virtual_interfaces = Table('virtual_interfaces', meta, + Column('created_at', DateTime(timezone=False), + default=utils.utcnow()), + Column('updated_at', DateTime(timezone=False), + onupdate=utils.utcnow()), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('address', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + unique=True), + Column('network_id', + Integer(), + ForeignKey('networks.id')), + Column('instance_id', + Integer(), + ForeignKey('instances.id'), + nullable=False), + mysql_engine='InnoDB') + # create virtual_interfaces table try: virtual_interfaces.create() @@ -120,5 +118,29 @@ def upgrade(migrate_engine): def downgrade(migrate_engine): - LOG.error(_("Can't downgrade without losing data")) - raise Exception + meta = MetaData() + meta.bind = migrate_engine + + # grab tables and (column for dropping later) + instances = Table('instances', meta, autoload=True) + networks = Table('networks', meta, autoload=True) + fixed_ips = Table('fixed_ips', meta, autoload=True) + virtual_interfaces = Table('virtual_interfaces', meta, autoload=True) + + mac_address = Column('mac_address', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)) + + instances.create_column(mac_address) + + s = select([instances.c.id, virtual_interfaces.c.address], + virtual_interfaces.c.instance_id == instances.c.id) + + for row in s.execute(): + u = instances.update().values(mac_address=row['address']).\ + where(instances.c.id == row['id']) + + networks.drop_column('bridge_interface') + virtual_interfaces.drop() + fixed_ips.drop_column('virtual_interface_id') diff --git a/nova/db/sqlalchemy/migrate_repo/versions/030_sqlite_downgrade.sql b/nova/db/sqlalchemy/migrate_repo/versions/030_sqlite_downgrade.sql new file mode 100644 index 000000000000..2486e6d2db6b --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/030_sqlite_downgrade.sql @@ -0,0 +1,377 @@ +BEGIN TRANSACTION; + + CREATE TEMPORARY TABLE instances_backup ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + internal_id INTEGER, + admin_pass VARCHAR(255), + user_id VARCHAR(255), + project_id VARCHAR(255), + image_ref VARCHAR(255), + kernel_id VARCHAR(255), + ramdisk_id VARCHAR(255), + server_name VARCHAR(255), + launch_index INTEGER, + key_name VARCHAR(255), + key_data TEXT, + state INTEGER, + state_description VARCHAR(255), + memory_mb INTEGER, + vcpus INTEGER, + local_gb INTEGER, + hostname VARCHAR(255), + host VARCHAR(255), + user_data TEXT, + reservation_id VARCHAR(255), + scheduled_at DATETIME, + launched_at DATETIME, + terminated_at DATETIME, + display_name VARCHAR(255), + display_description VARCHAR(255), + availability_zone VARCHAR(255), + locked BOOLEAN, + os_type VARCHAR(255), + launched_on TEXT, + instance_type_id INTEGER, + vm_mode VARCHAR(255), + uuid VARCHAR(36), + architecture VARCHAR(255), + PRIMARY KEY (id), + CHECK (locked IN (0, 1)), + CHECK (deleted IN (0, 1)) + ); + + INSERT INTO instances_backup + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + internal_id, + admin_pass, + user_id, + project_id, + image_ref, + kernel_id, + ramdisk_id, + server_name, + launch_index, + key_name, + key_data, + state, + state_description, + memory_mb, + vcpus, + local_gb, + hostname, + host, + user_data, + reservation_id, + scheduled_at, + launched_at, + terminated_at, + display_name, + display_description, + availability_zone, + locked, + os_type, + launched_on, + instance_type_id, + vm_mode, + uuid, + architecture + FROM instances; + + DROP TABLE instances; + + CREATE TABLE instances ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + internal_id INTEGER, + admin_pass VARCHAR(255), + user_id VARCHAR(255), + project_id VARCHAR(255), + image_ref VARCHAR(255), + kernel_id VARCHAR(255), + ramdisk_id VARCHAR(255), + server_name VARCHAR(255), + launch_index INTEGER, + key_name VARCHAR(255), + key_data TEXT, + state INTEGER, + state_description VARCHAR(255), + memory_mb INTEGER, + vcpus INTEGER, + local_gb INTEGER, + hostname VARCHAR(255), + host VARCHAR(255), + user_data TEXT, + reservation_id VARCHAR(255), + scheduled_at DATETIME, + launched_at DATETIME, + terminated_at DATETIME, + display_name VARCHAR(255), + display_description VARCHAR(255), + availability_zone VARCHAR(255), + locked BOOLEAN, + os_type VARCHAR(255), + launched_on TEXT, + instance_type_id INTEGER, + vm_mode VARCHAR(255), + uuid VARCHAR(36), + architecture VARCHAR(255), + mac_address VARCHAR(255), + PRIMARY KEY (id), + CHECK (locked IN (0, 1)), + CHECK (deleted IN (0, 1)) + ); + + INSERT INTO instances + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + internal_id, + admin_pass, + user_id, + project_id, + image_ref, + kernel_id, + ramdisk_id, + server_name, + launch_index, + key_name, + key_data, + state, + state_description, + memory_mb, + vcpus, + local_gb, + hostname, + host, + user_data, + reservation_id, + scheduled_at, + launched_at, + terminated_at, + display_name, + display_description, + availability_zone, + locked, + os_type, + launched_on, + instance_type_id, + vm_mode, + uuid, + architecture, + NULL AS mac_address + FROM instances_backup; + + DROP TABLE instances_backup; + + UPDATE instances SET mac_address=(SELECT address + FROM virtual_interfaces + WHERE virtual_interfaces.instance_id = instances.id); + + CREATE TEMPORARY TABLE networks_backup ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + injected BOOLEAN, + cidr VARCHAR(255), + netmask VARCHAR(255), + bridge VARCHAR(255), + gateway VARCHAR(255), + broadcast VARCHAR(255), + dns VARCHAR(255), + vlan INTEGER, + vpn_public_address VARCHAR(255), + vpn_public_port INTEGER, + vpn_private_address VARCHAR(255), + dhcp_start VARCHAR(255), + project_id VARCHAR(255), + host VARCHAR(255), + cidr_v6 VARCHAR(255), + gateway_v6 VARCHAR(255), + label VARCHAR(255), + netmask_v6 VARCHAR(255), + bridge_interface VARCHAR(255), + PRIMARY KEY (id), + CHECK (injected IN (0, 1)), + CHECK (deleted IN (0, 1)) + ); + + INSERT INTO networks_backup + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + injected, + cidr, + netmask, + bridge, + gateway, + broadcast, + dns, + vlan, + vpn_public_address, + vpn_public_port, + vpn_private_address, + dhcp_start, + project_id, + host, + cidr_v6, + gateway_v6, + label, + netmask_v6, + bridge_interface + FROM networks; + + DROP TABLE networks; + + CREATE TABLE networks ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + injected BOOLEAN, + cidr VARCHAR(255), + netmask VARCHAR(255), + bridge VARCHAR(255), + gateway VARCHAR(255), + broadcast VARCHAR(255), + dns VARCHAR(255), + vlan INTEGER, + vpn_public_address VARCHAR(255), + vpn_public_port INTEGER, + vpn_private_address VARCHAR(255), + dhcp_start VARCHAR(255), + project_id VARCHAR(255), + host VARCHAR(255), + cidr_v6 VARCHAR(255), + gateway_v6 VARCHAR(255), + label VARCHAR(255), + netmask_v6 VARCHAR(255), + PRIMARY KEY (id), + CHECK (injected IN (0, 1)), + CHECK (deleted IN (0, 1)) + ); + + INSERT INTO networks + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + injected, + cidr, + netmask, + bridge, + gateway, + broadcast, + dns, + vlan, + vpn_public_address, + vpn_public_port, + vpn_private_address, + dhcp_start, + project_id, + host, + cidr_v6, + gateway_v6, + label, + netmask_v6 + FROM networks_backup; + + DROP TABLE networks_backup; + + DROP TABLE virtual_interfaces; + + CREATE TEMPORARY TABLE fixed_ips_backup ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + address VARCHAR(255), + network_id INTEGER, + instance_id INTEGER, + allocated BOOLEAN, + leased BOOLEAN, + reserved BOOLEAN, + virtual_interface_id INTEGER, + PRIMARY KEY (id), + CHECK (reserved IN (0, 1)), + CHECK (allocated IN (0, 1)), + CHECK (leased IN (0, 1)), + CHECK (deleted IN (0, 1)), + FOREIGN KEY(instance_id) REFERENCES instances (id), + FOREIGN KEY(network_id) REFERENCES networks (id) + ); + + INSERT INTO fixed_ips_backup + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + address, + network_id, + instance_id, + allocated, + leased, + reserved, + virtual_interface_id + FROM fixed_ips; + + DROP TABLE fixed_ips; + + CREATE TABLE fixed_ips ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + address VARCHAR(255), + network_id INTEGER, + instance_id INTEGER, + allocated BOOLEAN, + leased BOOLEAN, + reserved BOOLEAN, + PRIMARY KEY (id), + CHECK (reserved IN (0, 1)), + CHECK (allocated IN (0, 1)), + CHECK (leased IN (0, 1)), + CHECK (deleted IN (0, 1)), + FOREIGN KEY(instance_id) REFERENCES instances (id), + FOREIGN KEY(network_id) REFERENCES networks (id) + ); + + INSERT INTO fixed_ips + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + address, + network_id, + instance_id, + allocated, + leased, + reserved + FROM fixed_ips_backup; + + DROP TABLE fixed_ips_backup; + +COMMIT; diff --git a/nova/db/sqlalchemy/migrate_repo/versions/031_fk_fixed_ips_virtual_interface_id.py b/nova/db/sqlalchemy/migrate_repo/versions/031_fk_fixed_ips_virtual_interface_id.py index d6f89254ec32..962903f9ff8b 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/031_fk_fixed_ips_virtual_interface_id.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/031_fk_fixed_ips_virtual_interface_id.py @@ -13,16 +13,16 @@ # License for the specific language governing permissions and limitations # under the License. -from sqlalchemy import * -from migrate import * +from sqlalchemy import MetaData, Table +from migrate import ForeignKeyConstraint from nova import log as logging -meta = MetaData() LOG = logging.getLogger(__name__) def upgrade(migrate_engine): + meta = MetaData() meta.bind = migrate_engine dialect = migrate_engine.url.get_dialect().name @@ -41,9 +41,14 @@ def upgrade(migrate_engine): def downgrade(migrate_engine): + meta = MetaData() meta.bind = migrate_engine dialect = migrate_engine.url.get_dialect().name + # grab tables + fixed_ips = Table('fixed_ips', meta, autoload=True) + virtual_interfaces = Table('virtual_interfaces', meta, autoload=True) + # drop foreignkey if not sqlite try: if not dialect.startswith('sqlite'): diff --git a/nova/db/sqlalchemy/migrate_repo/versions/032_add_root_device_name.py b/nova/db/sqlalchemy/migrate_repo/versions/032_add_root_device_name.py index 6b98b9890da7..f12070c57095 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/032_add_root_device_name.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/032_add_root_device_name.py @@ -15,33 +15,28 @@ from sqlalchemy import Column, Integer, MetaData, Table, String -meta = MetaData() - - -# Just for the ForeignKey and column creation to succeed, these are not the -# actual definitions of instances or services. -instances = Table('instances', meta, - Column('id', Integer(), primary_key=True, nullable=False), - ) - -# -# New Column -# -root_device_name = Column( - 'root_device_name', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False), - nullable=True) - def upgrade(migrate_engine): # Upgrade operations go here. Don't create your own engine; # bind migrate_engine to your metadata + meta = MetaData() meta.bind = migrate_engine + + instances = Table('instances', meta, autoload=True) + + root_device_name = Column( + 'root_device_name', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + nullable=True) instances.create_column(root_device_name) def downgrade(migrate_engine): # Operations to reverse the above upgrade go here. + meta = MetaData() meta.bind = migrate_engine + + instances = Table('instances', meta, autoload=True) + instances.drop_column('root_device_name') diff --git a/nova/db/sqlalchemy/migrate_repo/versions/033_ha_network.py b/nova/db/sqlalchemy/migrate_repo/versions/033_ha_network.py index 3a5f7eba8f43..becc353f68f3 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/033_ha_network.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/033_ha_network.py @@ -17,28 +17,26 @@ from sqlalchemy import Column, Table, MetaData, Boolean, String -meta = MetaData() - -fixed_ips_host = Column('host', String(255)) - -networks_multi_host = Column('multi_host', Boolean, default=False) - def upgrade(migrate_engine): + meta = MetaData() meta.bind = migrate_engine + fixed_ips_host = Column('host', String(255)) fixed_ips = Table('fixed_ips', meta, autoload=True) fixed_ips.create_column(fixed_ips_host) + networks_multi_host = Column('multi_host', Boolean, default=False) networks = Table('networks', meta, autoload=True) networks.create_column(networks_multi_host) def downgrade(migrate_engine): + meta = MetaData() meta.bind = migrate_engine fixed_ips = Table('fixed_ips', meta, autoload=True) - fixed_ips.drop_column(fixed_ips_host) + fixed_ips.drop_column('host') networks = Table('networks', meta, autoload=True) - networks.drop_column(networks_multi_host) + networks.drop_column('multi_host') diff --git a/nova/db/sqlalchemy/migrate_repo/versions/033_sqlite_downgrade.sql b/nova/db/sqlalchemy/migrate_repo/versions/033_sqlite_downgrade.sql new file mode 100644 index 000000000000..34188d866296 --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/033_sqlite_downgrade.sql @@ -0,0 +1,193 @@ +BEGIN TRANSACTION; + + CREATE TEMPORARY TABLE fixed_ips_backup ( + id INTEGER NOT NULL, + address VARCHAR(255), + virtual_interface_id INTEGER, + network_id INTEGER, + instance_id INTEGER, + allocated BOOLEAN default FALSE, + leased BOOLEAN default FALSE, + reserved BOOLEAN default FALSE, + created_at DATETIME NOT NULL, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN NOT NULL, + host VARCHAR(255), + PRIMARY KEY (id), + FOREIGN KEY(virtual_interface_id) REFERENCES virtual_interfaces (id) + ); + + INSERT INTO fixed_ips_backup + SELECT id, + address, + virtual_interface_id, + network_id, + instance_id, + allocated, + leased, + reserved, + created_at, + updated_at, + deleted_at, + deleted, + host + FROM fixed_ips; + + DROP TABLE fixed_ips; + + CREATE TABLE fixed_ips ( + id INTEGER NOT NULL, + address VARCHAR(255), + virtual_interface_id INTEGER, + network_id INTEGER, + instance_id INTEGER, + allocated BOOLEAN default FALSE, + leased BOOLEAN default FALSE, + reserved BOOLEAN default FALSE, + created_at DATETIME NOT NULL, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN NOT NULL, + PRIMARY KEY (id), + FOREIGN KEY(virtual_interface_id) REFERENCES virtual_interfaces (id) + ); + + INSERT INTO fixed_ips + SELECT id, + address, + virtual_interface_id, + network_id, + instance_id, + allocated, + leased, + reserved, + created_at, + updated_at, + deleted_at, + deleted + FROM fixed_ips_backup; + + DROP TABLE fixed_ips_backup; + + CREATE TEMPORARY TABLE networks_backup ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + injected BOOLEAN, + cidr VARCHAR(255), + netmask VARCHAR(255), + bridge VARCHAR(255), + gateway VARCHAR(255), + broadcast VARCHAR(255), + dns VARCHAR(255), + vlan INTEGER, + vpn_public_address VARCHAR(255), + vpn_public_port INTEGER, + vpn_private_address VARCHAR(255), + dhcp_start VARCHAR(255), + project_id VARCHAR(255), + host VARCHAR(255), + cidr_v6 VARCHAR(255), + gateway_v6 VARCHAR(255), + label VARCHAR(255), + netmask_v6 VARCHAR(255), + bridge_interface VARCHAR(255), + multi_host BOOLEAN, + PRIMARY KEY (id), + CHECK (deleted IN (0, 1)), + CHECK (injected IN (0, 1)), + CHECK (multi_host IN (0, 1)) + ); + + INSERT INTO networks_backup + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + injected, + cidr, + netmask, + bridge, + gateway, + broadcast, + dns, + vlan, + vpn_public_address, + vpn_public_port, + vpn_private_address, + dhcp_start, + project_id, + host, + cidr_v6, + gateway_v6, + label, + netmask_v6, + bridge_interface, + multi_host + FROM networks; + + DROP TABLE networks; + + CREATE TABLE networks( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + injected BOOLEAN, + cidr VARCHAR(255), + netmask VARCHAR(255), + bridge VARCHAR(255), + gateway VARCHAR(255), + broadcast VARCHAR(255), + dns VARCHAR(255), + vlan INTEGER, + vpn_public_address VARCHAR(255), + vpn_public_port INTEGER, + vpn_private_address VARCHAR(255), + dhcp_start VARCHAR(255), + project_id VARCHAR(255), + host VARCHAR(255), + cidr_v6 VARCHAR(255), + gateway_v6 VARCHAR(255), + label VARCHAR(255), + netmask_v6 VARCHAR(255), + bridge_interface VARCHAR(255), + PRIMARY KEY (id), + CHECK (deleted IN (0, 1)), + CHECK (injected IN (0, 1)) + ); + + INSERT INTO networks + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + injected, + cidr, + netmask, + bridge, + gateway, + broadcast, + dns, + vlan, + vpn_public_address, + vpn_public_port, + vpn_private_address, + dhcp_start, + project_id, + host, + cidr_v6, + gateway_v6, + label, + netmask_v6, + bridge_interface + FROM networks_backup; + + DROP TABLE networks_backup; +COMMIT; diff --git a/nova/db/sqlalchemy/migrate_repo/versions/034_change_instance_id_in_migrations.py b/nova/db/sqlalchemy/migrate_repo/versions/034_change_instance_id_in_migrations.py index b4a3be3dbe40..9cf004301b72 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/034_change_instance_id_in_migrations.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/034_change_instance_id_in_migrations.py @@ -13,26 +13,17 @@ # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations -# under the License.from sqlalchemy import * +# under the License. from sqlalchemy import Column, Integer, String, MetaData, Table -meta = MetaData() - - -# -# Tables to alter -# -# - -instance_id = Column('instance_id', Integer()) -instance_uuid = Column('instance_uuid', String(255)) - - def upgrade(migrate_engine): + meta = MetaData() meta.bind = migrate_engine + migrations = Table('migrations', meta, autoload=True) + instance_uuid = Column('instance_uuid', String(255)) migrations.create_column(instance_uuid) if migrate_engine.name == "mysql": @@ -46,7 +37,10 @@ def upgrade(migrate_engine): def downgrade(migrate_engine): + meta = MetaData() meta.bind = migrate_engine + migrations = Table('migrations', meta, autoload=True) migrations.c.instance_uuid.drop() + instance_id = Column('instance_id', Integer()) migrations.create_column(instance_id) diff --git a/nova/db/sqlalchemy/migrate_repo/versions/035_secondary_dns.py b/nova/db/sqlalchemy/migrate_repo/versions/035_secondary_dns.py index 9aa4d35314cb..c03e5be6625c 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/035_secondary_dns.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/035_secondary_dns.py @@ -17,22 +17,23 @@ from sqlalchemy import Column, Table, MetaData, String -meta = MetaData() - -dns2 = Column('dns2', String(255)) - def upgrade(migrate_engine): + meta = MetaData() meta.bind = migrate_engine networks = Table('networks', meta, autoload=True) + networks.c.dns.alter(name='dns1') + dns2 = Column('dns2', String(255)) networks.create_column(dns2) def downgrade(migrate_engine): + meta = MetaData() meta.bind = migrate_engine networks = Table('networks', meta, autoload=True) + networks.c.dns1.alter(name='dns') - networks.drop_column(dns2) + networks.drop_column('dns2') diff --git a/nova/db/sqlalchemy/migrate_repo/versions/036_change_flavor_id_in_migrations.py b/nova/db/sqlalchemy/migrate_repo/versions/036_change_flavor_id_in_migrations.py index dfbd4ba325ad..38f83fc01929 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/036_change_flavor_id_in_migrations.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/036_change_flavor_id_in_migrations.py @@ -13,29 +13,20 @@ # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations -# under the License.from sqlalchemy import * +# under the License. from sqlalchemy import Column, Integer, MetaData, Table -meta = MetaData() - - -# -# Tables to alter -# -# - -old_flavor_id = Column('old_flavor_id', Integer()) -new_flavor_id = Column('new_flavor_id', Integer()) -old_instance_type_id = Column('old_instance_type_id', Integer()) -new_instance_type_id = Column('new_instance_type_id', Integer()) - - def upgrade(migrate_engine): + meta = MetaData() meta.bind = migrate_engine + instance_types = Table('instance_types', meta, autoload=True) migrations = Table('migrations', meta, autoload=True) + + old_instance_type_id = Column('old_instance_type_id', Integer()) + new_instance_type_id = Column('new_instance_type_id', Integer()) migrations.create_column(old_instance_type_id) migrations.create_column(new_instance_type_id) @@ -57,20 +48,32 @@ def upgrade(migrate_engine): def downgrade(migrate_engine): + meta = MetaData() meta.bind = migrate_engine + instance_types = Table('instance_types', meta, autoload=True) migrations = Table('migrations', meta, autoload=True) + + old_flavor_id = Column('old_flavor_id', Integer()) + new_flavor_id = Column('new_flavor_id', Integer()) + migrations.create_column(old_flavor_id) migrations.create_column(new_flavor_id) # Convert instance_type_id to flavor_id + itypes = {} for instance_type in migrate_engine.execute(instance_types.select()): + itypes[instance_type.flavorid] = instance_type.id + + for instance_type_flavorid in itypes.keys(): migrate_engine.execute(migrations.update()\ - .where(migrations.c.old_instance_type_id == instance_type.id)\ - .values(old_flavor_id=instance_type.flavorid)) + .where(migrations.c.old_instance_type_id == + itypes[instance_type_flavorid])\ + .values(old_flavor_id=instance_type_flavorid)) migrate_engine.execute(migrations.update()\ - .where(migrations.c.new_instance_type_id == instance_type.id)\ - .values(new_flavor_id=instance_type.flavorid)) + .where(migrations.c.new_instance_type_id == + itypes[instance_type_flavorid])\ + .values(new_flavor_id=instance_type_flavorid)) migrations.c.old_instance_type_id.drop() migrations.c.new_instance_type_id.drop() diff --git a/nova/db/sqlalchemy/migrate_repo/versions/037_instances_drop_admin_pass.py b/nova/db/sqlalchemy/migrate_repo/versions/037_instances_drop_admin_pass.py index b957666c29a5..c8a1a19274ef 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/037_instances_drop_admin_pass.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/037_instances_drop_admin_pass.py @@ -14,24 +14,29 @@ from sqlalchemy import Column, MetaData, Table, String -meta = MetaData() - -admin_pass = Column( - 'admin_pass', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False), - nullable=True) - def upgrade(migrate_engine): + meta = MetaData() meta.bind = migrate_engine - instances = Table('instances', meta, autoload=True, - autoload_with=migrate_engine) + + instances = Table('instances', meta, autoload=True) + instances.drop_column('admin_pass') def downgrade(migrate_engine): + meta = MetaData() meta.bind = migrate_engine - instances = Table('instances', meta, autoload=True, - autoload_with=migrate_engine) + + instances = Table('instances', meta, autoload=True) + + # + # New Columns + # + admin_pass = Column( + 'admin_pass', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + nullable=True) + instances.create_column(admin_pass) diff --git a/nova/db/sqlalchemy/migrate_repo/versions/038_add_uuid_to_virtual_interfaces.py b/nova/db/sqlalchemy/migrate_repo/versions/038_add_uuid_to_virtual_interfaces.py index 0f542cbece91..64acaa030565 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/038_add_uuid_to_virtual_interfaces.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/038_add_uuid_to_virtual_interfaces.py @@ -19,16 +19,13 @@ from sqlalchemy import Column, Integer, MetaData, String, Table from nova import utils -meta = MetaData() - -virtual_interfaces = Table("virtual_interfaces", meta, - Column("id", Integer(), primary_key=True, - nullable=False)) -uuid_column = Column("uuid", String(36)) - - def upgrade(migrate_engine): + meta = MetaData() meta.bind = migrate_engine + + virtual_interfaces = Table('virtual_interfaces', meta, autoload=True) + + uuid_column = Column('uuid', String(36)) virtual_interfaces.create_column(uuid_column) rows = migrate_engine.execute(virtual_interfaces.select()) @@ -40,5 +37,9 @@ def upgrade(migrate_engine): def downgrade(migrate_engine): + meta = MetaData() meta.bind = migrate_engine - virtual_interfaces.drop_column(uuid_column) + + virtual_interfaces = Table('virtual_interfaces', meta, autoload=True) + + virtual_interfaces.drop_column('uuid') diff --git a/nova/db/sqlalchemy/migrate_repo/versions/038_sqlite_downgrade.sql b/nova/db/sqlalchemy/migrate_repo/versions/038_sqlite_downgrade.sql new file mode 100644 index 000000000000..0ac66e7e01ba --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/038_sqlite_downgrade.sql @@ -0,0 +1,63 @@ +BEGIN TRANSACTION; + + CREATE TEMPORARY TABLE virtual_interfaces_backup ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + address VARCHAR(255), + network_id INTEGER, + instance_id INTEGER NOT NULL, + uuid VARCHAR(36), + PRIMARY KEY (id), + FOREIGN KEY(network_id) REFERENCES networks (id), + FOREIGN KEY(instance_id) REFERENCES instances (id), + UNIQUE (address), + CHECK (deleted IN (0, 1)) + ); + + INSERT INTO virtual_interfaces_backup + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + address, + network_id, + instance_id, + uuid + FROM virtual_interfaces; + + DROP TABLE virtual_interfaces; + + CREATE TABLE virtual_interfaces ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + address VARCHAR(255), + network_id INTEGER, + instance_id INTEGER NOT NULL, + PRIMARY KEY (id), + FOREIGN KEY(network_id) REFERENCES networks (id), + FOREIGN KEY(instance_id) REFERENCES instances (id), + UNIQUE (address), + CHECK (deleted IN (0, 1)) + ); + + INSERT INTO virtual_interfaces + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + address, + network_id, + instance_id + FROM virtual_interfaces_backup; + + DROP TABLE virtual_interfaces_backup; + +COMMIT; diff --git a/nova/db/sqlalchemy/migrate_repo/versions/039_add_instances_accessip.py b/nova/db/sqlalchemy/migrate_repo/versions/039_add_instances_accessip.py index 39f0dd6cef7e..8c8961cd33a7 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/039_add_instances_accessip.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/039_add_instances_accessip.py @@ -14,35 +14,36 @@ from sqlalchemy import Column, Integer, MetaData, Table, String -meta = MetaData() - -accessIPv4 = Column( - 'access_ip_v4', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False), - nullable=True) - -accessIPv6 = Column( - 'access_ip_v6', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False), - nullable=True) - -instances = Table('instances', meta, - Column('id', Integer(), primary_key=True, nullable=False), - ) - def upgrade(migrate_engine): # Upgrade operations go here. Don't create your own engine; # bind migrate_engine to your metadata + meta = MetaData() meta.bind = migrate_engine + + instances = Table('instances', meta, autoload=True) + + accessIPv4 = Column( + 'access_ip_v4', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + nullable=True) + + accessIPv6 = Column( + 'access_ip_v6', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + nullable=True) instances.create_column(accessIPv4) instances.create_column(accessIPv6) def downgrade(migrate_engine): # Operations to reverse the above upgrade go here. + meta = MetaData() meta.bind = migrate_engine + + instances = Table('instances', meta, autoload=True) + instances.drop_column('access_ip_v4') instances.drop_column('access_ip_v6') diff --git a/nova/db/sqlalchemy/migrate_repo/versions/040_add_uuid_to_networks.py b/nova/db/sqlalchemy/migrate_repo/versions/040_add_uuid_to_networks.py index 38c543d51f0a..60191e1d4c23 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/040_add_uuid_to_networks.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/040_add_uuid_to_networks.py @@ -19,15 +19,13 @@ from sqlalchemy import Column, Integer, MetaData, String, Table from nova import utils -meta = MetaData() - -networks = Table("networks", meta, - Column("id", Integer(), primary_key=True, nullable=False)) -uuid_column = Column("uuid", String(36)) - - def upgrade(migrate_engine): + meta = MetaData() meta.bind = migrate_engine + + networks = Table('networks', meta, autoload=True) + + uuid_column = Column("uuid", String(36)) networks.create_column(uuid_column) rows = migrate_engine.execute(networks.select()) @@ -39,5 +37,9 @@ def upgrade(migrate_engine): def downgrade(migrate_engine): + meta = MetaData() meta.bind = migrate_engine - networks.drop_column(uuid_column) + + networks = Table('networks', meta, autoload=True) + + networks.drop_column('uuid') diff --git a/nova/db/sqlalchemy/migrate_repo/versions/041_add_config_drive_to_instances.py b/nova/db/sqlalchemy/migrate_repo/versions/041_add_config_drive_to_instances.py index b647c9eeb8f0..f85c4a0d7433 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/041_add_config_drive_to_instances.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/041_add_config_drive_to_instances.py @@ -17,20 +17,20 @@ from sqlalchemy import Column, Integer, MetaData, String, Table -meta = MetaData() - -instances = Table("instances", meta, - Column("id", Integer(), primary_key=True, nullable=False)) - -# matches the size of an image_ref -config_drive_column = Column("config_drive", String(255), nullable=True) - - def upgrade(migrate_engine): + meta = MetaData() meta.bind = migrate_engine + + instances = Table("instances", meta, autoload=True) + + config_drive_column = Column("config_drive", String(255), nullable=True) instances.create_column(config_drive_column) def downgrade(migrate_engine): + meta = MetaData() meta.bind = migrate_engine - instances.drop_column(config_drive_column) + + instances = Table("instances", meta, autoload=True) + + instances.drop_column('config_drive') diff --git a/nova/db/sqlalchemy/migrate_repo/versions/042_add_volume_types_and_extradata.py b/nova/db/sqlalchemy/migrate_repo/versions/042_add_volume_types_and_extradata.py index 3512df4517d2..98ff83748525 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/042_add_volume_types_and_extradata.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/042_add_volume_types_and_extradata.py @@ -20,83 +20,72 @@ from sqlalchemy import Boolean, ForeignKey from nova import log as logging -meta = MetaData() LOG = logging.getLogger(__name__) -# Just for the ForeignKey and column creation to succeed, these are not the -# actual definitions of tables . -# - -volumes = Table('volumes', meta, - Column('id', Integer(), primary_key=True, nullable=False), - ) - -volume_type_id = Column('volume_type_id', Integer(), nullable=True) - - -# New Tables -# - -volume_types = Table('volume_types', meta, - Column('created_at', DateTime(timezone=False)), - Column('updated_at', DateTime(timezone=False)), - Column('deleted_at', DateTime(timezone=False)), - Column('deleted', Boolean(create_constraint=True, name=None)), - Column('id', Integer(), primary_key=True, nullable=False), - Column('name', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False), - unique=True)) - -volume_type_extra_specs_table = Table('volume_type_extra_specs', meta, - Column('created_at', DateTime(timezone=False)), - Column('updated_at', DateTime(timezone=False)), - Column('deleted_at', DateTime(timezone=False)), - Column('deleted', Boolean(create_constraint=True, name=None)), - Column('id', Integer(), primary_key=True, nullable=False), - Column('volume_type_id', - Integer(), - ForeignKey('volume_types.id'), - nullable=False), - Column('key', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('value', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False))) - - -volume_metadata_table = Table('volume_metadata', meta, - Column('created_at', DateTime(timezone=False)), - Column('updated_at', DateTime(timezone=False)), - Column('deleted_at', DateTime(timezone=False)), - Column('deleted', Boolean(create_constraint=True, name=None)), - Column('id', Integer(), primary_key=True, nullable=False), - Column('volume_id', - Integer(), - ForeignKey('volumes.id'), - nullable=False), - Column('key', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('value', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False))) - - -new_tables = (volume_types, - volume_type_extra_specs_table, - volume_metadata_table) - -# -# Tables to alter -# - - def upgrade(migrate_engine): + meta = MetaData() meta.bind = migrate_engine + volumes = Table('volumes', meta, autoload=True) + + # + # New Tables + # + volume_types = Table('volume_types', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('name', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + unique=True)) + + volume_type_extra_specs_table = Table('volume_type_extra_specs', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('volume_type_id', + Integer(), + ForeignKey('volume_types.id'), + nullable=False), + Column('key', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('value', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False))) + + volume_metadata_table = Table('volume_metadata', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('volume_id', + Integer(), + ForeignKey('volumes.id'), + nullable=False), + Column('key', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('value', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False))) + + new_tables = (volume_types, + volume_type_extra_specs_table, + volume_metadata_table) + for table in new_tables: try: table.create() @@ -105,13 +94,29 @@ def upgrade(migrate_engine): LOG.exception('Exception while creating table') raise + # + # New Columns + # + volume_type_id = Column('volume_type_id', Integer(), nullable=True) volumes.create_column(volume_type_id) def downgrade(migrate_engine): + meta = MetaData() meta.bind = migrate_engine - volumes.drop_column(volume_type_id) + volumes = Table('volumes', meta, autoload=True) - for table in new_tables: + volumes.drop_column('volume_type_id') + + volume_types = Table('volume_types', meta, autoload=True) + volume_type_extra_specs_table = Table('volume_type_extra_specs', + meta, + autoload=True) + volume_metadata_table = Table('volume_metadata', meta, autoload=True) + + # table order matters, don't change + for table in (volume_type_extra_specs_table, + volume_types, + volume_metadata_table): table.drop() diff --git a/nova/db/sqlalchemy/migrate_repo/versions/042_sqlite_downgrade.sql b/nova/db/sqlalchemy/migrate_repo/versions/042_sqlite_downgrade.sql new file mode 100644 index 000000000000..8fa39663a23e --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/042_sqlite_downgrade.sql @@ -0,0 +1,129 @@ +BEGIN TRANSACTION; + + CREATE TEMPORARY TABLE volumes_backup ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + ec2_id VARCHAR(255), + user_id VARCHAR(255), + project_id VARCHAR(255), + host VARCHAR(255), + size INTEGER, + availability_zone VARCHAR(255), + instance_id INTEGER, + mountpoint VARCHAR(255), + attach_time VARCHAR(255), + status VARCHAR(255), + attach_status VARCHAR(255), + scheduled_at DATETIME, + launched_at DATETIME, + terminated_at DATETIME, + display_name VARCHAR(255), + display_description VARCHAR(255), + provider_location VARCHAR(256), + provider_auth VARCHAR(256), + snapshot_id INTEGER, + volume_type_id INTEGER, + PRIMARY KEY (id), + FOREIGN KEY(instance_id) REFERENCES instances (id), + CHECK (deleted IN (0, 1)) + ); + + INSERT INTO volumes_backup + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + ec2_id, + user_id, + project_id, + host, + size, + availability_zone, + instance_id, + mountpoint, + attach_time, + status, + attach_status, + scheduled_at, + launched_at, + terminated_at, + display_name, + display_description, + provider_location, + provider_auth, + snapshot_id, + volume_type_id + FROM volumes; + + DROP TABLE volumes; + + CREATE TABLE volumes ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + ec2_id VARCHAR(255), + user_id VARCHAR(255), + project_id VARCHAR(255), + host VARCHAR(255), + size INTEGER, + availability_zone VARCHAR(255), + instance_id INTEGER, + mountpoint VARCHAR(255), + attach_time VARCHAR(255), + status VARCHAR(255), + attach_status VARCHAR(255), + scheduled_at DATETIME, + launched_at DATETIME, + terminated_at DATETIME, + display_name VARCHAR(255), + display_description VARCHAR(255), + provider_location VARCHAR(256), + provider_auth VARCHAR(256), + snapshot_id INTEGER, + PRIMARY KEY (id), + FOREIGN KEY(instance_id) REFERENCES instances (id), + CHECK (deleted IN (0, 1)) + ); + + INSERT INTO volumes + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + ec2_id, + user_id, + project_id, + host, + size, + availability_zone, + instance_id, + mountpoint, + attach_time, + status, + attach_status, + scheduled_at, + launched_at, + terminated_at, + display_name, + display_description, + provider_location, + provider_auth, + snapshot_id + FROM volumes_backup; + + DROP TABLE volumes_backup; + + DROP TABLE volume_type_extra_specs; + + DROP TABLE volume_types; + + DROP TABLE volume_metadata; + +COMMIT; diff --git a/nova/db/sqlalchemy/migrate_repo/versions/043_add_vsa_data.py b/nova/db/sqlalchemy/migrate_repo/versions/043_add_vsa_data.py index 9ae05bda0ada..441e41c390c4 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/043_add_vsa_data.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/043_add_vsa_data.py @@ -20,48 +20,52 @@ from sqlalchemy import Boolean from nova import log as logging -meta = MetaData() LOG = logging.getLogger(__name__) -# -# New Tables -# - -virtual_storage_arrays = Table('virtual_storage_arrays', meta, - Column('created_at', DateTime(timezone=False)), - Column('updated_at', DateTime(timezone=False)), - Column('deleted_at', DateTime(timezone=False)), - Column('deleted', Boolean(create_constraint=True, name=None)), - Column('id', Integer(), primary_key=True, nullable=False), - Column('display_name', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('display_description', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('project_id', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('availability_zone', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('instance_type_id', Integer(), nullable=False), - Column('image_ref', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('vc_count', Integer(), nullable=False), - Column('vol_count', Integer(), nullable=False), - Column('status', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - ) - def upgrade(migrate_engine): # Upgrade operations go here. Don't create your own engine; # bind migrate_engine to your metadata + meta = MetaData() meta.bind = migrate_engine + # + # New Tables + # + virtual_storage_arrays = Table('virtual_storage_arrays', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('display_name', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('display_description', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('project_id', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('availability_zone', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('instance_type_id', Integer(), nullable=False), + Column('image_ref', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('vc_count', Integer(), nullable=False), + Column('vol_count', Integer(), nullable=False), + Column('status', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + ) + try: virtual_storage_arrays.create() except Exception: @@ -71,6 +75,10 @@ def upgrade(migrate_engine): def downgrade(migrate_engine): + meta = MetaData() meta.bind = migrate_engine + virtual_storage_arrays = Table('virtual_storage_arrays', + meta, + autoload=True) virtual_storage_arrays.drop() diff --git a/nova/db/sqlalchemy/migrate_repo/versions/044_update_instance_states.py b/nova/db/sqlalchemy/migrate_repo/versions/044_update_instance_states.py index 0484743acbe4..81da7b16d1ce 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/044_update_instance_states.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/044_update_instance_states.py @@ -20,16 +20,6 @@ from nova.compute import task_states from nova.compute import vm_states -meta = MetaData() - - -c_task_state = Column('task_state', - String(length=255, convert_unicode=False, - assert_unicode=None, unicode_error=None, - _warn_on_bytestring=False), - nullable=True) - - _upgrade_translations = { "stopping": { "state_description": vm_states.ACTIVE, @@ -92,10 +82,10 @@ _downgrade_translations = { def upgrade(migrate_engine): + meta = MetaData() meta.bind = migrate_engine - instance_table = Table('instances', meta, autoload=True, - autoload_with=migrate_engine) + instance_table = Table('instances', meta, autoload=True) c_state = instance_table.c.state c_state.alter(name='power_state') @@ -103,6 +93,11 @@ def upgrade(migrate_engine): c_vm_state = instance_table.c.state_description c_vm_state.alter(name='vm_state') + c_task_state = Column('task_state', + String(length=255, convert_unicode=False, + assert_unicode=None, unicode_error=None, + _warn_on_bytestring=False), + nullable=True) instance_table.create_column(c_task_state) for old_state, values in _upgrade_translations.iteritems(): @@ -113,10 +108,10 @@ def upgrade(migrate_engine): def downgrade(migrate_engine): + meta = MetaData() meta.bind = migrate_engine - instance_table = Table('instances', meta, autoload=True, - autoload_with=migrate_engine) + instance_table = Table('instances', meta, autoload=True) c_task_state = instance_table.c.task_state diff --git a/nova/db/sqlalchemy/migrate_repo/versions/045_add_network_priority.py b/nova/db/sqlalchemy/migrate_repo/versions/045_add_network_priority.py index 6d709e958f34..5d4e1e6257e1 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/045_add_network_priority.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/045_add_network_priority.py @@ -13,26 +13,21 @@ # License for the specific language governing permissions and limitations # under the License. -from sqlalchemy import * -from migrate import * +from sqlalchemy import Column, Integer, MetaData, Table from nova import log as logging -meta = MetaData() LOG = logging.getLogger(__name__) -networks = Table('networks', meta, - Column("id", Integer(), primary_key=True, nullable=False)) - -# Add priority column to networks table -priority = Column('priority', Integer()) - - def upgrade(migrate_engine): + meta = MetaData() meta.bind = migrate_engine + networks = Table('networks', meta, autoload=True) + + priority = Column('priority', Integer()) try: networks.create_column(priority) except Exception: @@ -41,5 +36,9 @@ def upgrade(migrate_engine): def downgrade(migrate_engine): + meta = MetaData() meta.bind = migrate_engine - networks.drop_column(priority) + + networks = Table('networks', meta, autoload=True) + + networks.drop_column('priority') diff --git a/nova/db/sqlalchemy/migrate_repo/versions/046_add_instance_swap.py b/nova/db/sqlalchemy/migrate_repo/versions/046_add_instance_swap.py index 63e7bc4f96e6..3ee1c4e7ee5b 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/046_add_instance_swap.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/046_add_instance_swap.py @@ -14,35 +14,36 @@ from sqlalchemy import Column, Integer, MetaData, Table, String -meta = MetaData() - -default_local_device = Column( - 'default_local_device', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False), - nullable=True) - -default_swap_device = Column( - 'default_swap_device', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False), - nullable=True) - -instances = Table('instances', meta, - Column('id', Integer(), primary_key=True, nullable=False), - ) - def upgrade(migrate_engine): # Upgrade operations go here. Don't create your own engine; # bind migrate_engine to your metadata + meta = MetaData() meta.bind = migrate_engine + + instances = Table('instances', meta, autoload=True) + + default_local_device = Column( + 'default_local_device', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + nullable=True) + + default_swap_device = Column( + 'default_swap_device', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + nullable=True) instances.create_column(default_local_device) instances.create_column(default_swap_device) def downgrade(migrate_engine): # Operations to reverse the above upgrade go here. + meta = MetaData() meta.bind = migrate_engine + + instances = Table('instances', meta, autoload=True) + instances.drop_column('default_swap_device') instances.drop_column('default_local_device') diff --git a/nova/db/sqlalchemy/migrate_repo/versions/047_remove_instances_fk_from_vif.py b/nova/db/sqlalchemy/migrate_repo/versions/047_remove_instances_fk_from_vif.py index abb37cd22de8..a750f956b05d 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/047_remove_instances_fk_from_vif.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/047_remove_instances_fk_from_vif.py @@ -17,14 +17,13 @@ from migrate import ForeignKeyConstraint from nova import log as logging - -meta = MetaData() LOG = logging.getLogger(__name__) def upgrade(migrate_engine): # Upgrade operations go here. Don't create your own engine; # bind migrate_engine to your metadata + meta = MetaData() meta.bind = migrate_engine dialect = migrate_engine.url.get_dialect().name if dialect.startswith('sqlite'): @@ -38,7 +37,6 @@ def upgrade(migrate_engine): ForeignKeyConstraint(columns=[vifs.c.instance_id], refcolumns=[instances.c.id], name=fkey_name).drop() - except Exception: LOG.error(_("foreign key constraint couldn't be removed")) raise @@ -46,6 +44,7 @@ def upgrade(migrate_engine): def downgrade(migrate_engine): # Operations to reverse the above upgrade go here. + meta = MetaData() meta.bind = migrate_engine dialect = migrate_engine.url.get_dialect().name if dialect.startswith('sqlite'): diff --git a/nova/db/sqlalchemy/migrate_repo/versions/047_sqlite_downgrade.sql b/nova/db/sqlalchemy/migrate_repo/versions/047_sqlite_downgrade.sql index cf9afbb091eb..9bc3ee8d4c01 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/047_sqlite_downgrade.sql +++ b/nova/db/sqlalchemy/migrate_repo/versions/047_sqlite_downgrade.sql @@ -1,4 +1,3 @@ -COMMIT; BEGIN TRANSACTION; CREATE TEMPORARY TABLE virtual_interfaces_backup ( created_at DATETIME, diff --git a/nova/db/sqlalchemy/migrate_repo/versions/048_add_zone_name.py b/nova/db/sqlalchemy/migrate_repo/versions/048_add_zone_name.py index 70caf9f00ab8..e313fc7dee88 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/048_add_zone_name.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/048_add_zone_name.py @@ -14,22 +14,20 @@ from sqlalchemy import Column, Integer, MetaData, String, Table -meta = MetaData() - -zones = Table('zones', meta, - Column('id', Integer(), primary_key=True, nullable=False), - ) - -name = Column('name', String(255)) - def upgrade(migrate_engine): + meta = MetaData() meta.bind = migrate_engine + zones = Table('zones', meta, autoload=True) + name = Column('name', String(255)) zones.create_column(name) def downgrade(migrate_engine): + meta = MetaData() meta.bind = migrate_engine - zones.drop_column(name) + zones = Table('zones', meta, autoload=True) + + zones.drop_column('name') diff --git a/nova/db/sqlalchemy/migrate_repo/versions/049_add_instances_progress.py b/nova/db/sqlalchemy/migrate_repo/versions/049_add_instances_progress.py index cf675f8206a4..f8d805e8b261 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/049_add_instances_progress.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/049_add_instances_progress.py @@ -14,24 +14,20 @@ # License for the specific language governing permissions and limitations # under the License. -from sqlalchemy import * -from migrate import * +from sqlalchemy import Column, Integer, MetaData, Table from nova import log as logging -meta = MetaData() LOG = logging.getLogger(__name__) -instances = Table('instances', meta, - Column("id", Integer(), primary_key=True, nullable=False)) - -# Add progress column to instances table -progress = Column('progress', Integer()) - def upgrade(migrate_engine): + meta = MetaData() meta.bind = migrate_engine + instances = Table('instances', meta, autoload=True) + + progress = Column('progress', Integer()) try: instances.create_column(progress) except Exception: @@ -40,5 +36,9 @@ def upgrade(migrate_engine): def downgrade(migrate_engine): + meta = MetaData() meta.bind = migrate_engine - instances.drop_column(progress) + + instances = Table('instances', meta, autoload=True) + + instances.drop_column('progress') diff --git a/nova/db/sqlalchemy/migrate_repo/versions/050_add_disk_config_to_instances.py b/nova/db/sqlalchemy/migrate_repo/versions/050_add_disk_config_to_instances.py index 72e49f2ad3be..d4a2fcc13d76 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/050_add_disk_config_to_instances.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/050_add_disk_config_to_instances.py @@ -14,26 +14,24 @@ # License for the specific language governing permissions and limitations # under the License. -from sqlalchemy import Column, Integer, MetaData, Table, Boolean - -meta = MetaData() - -# temporary table for creating the new columns - -instances = Table("instances", meta, - Column("id", Integer(), primary_key=True, nullable=False)) - -# The new column - -managed_disk = Column("managed_disk", Boolean(create_constraint=False, - name=None)) +from sqlalchemy import Boolean, Column, Integer, MetaData, Table def upgrade(migrate_engine): + meta = MetaData() meta.bind = migrate_engine + + instances = Table('instances', meta, autoload=True) + + managed_disk = Column("managed_disk", Boolean(create_constraint=False, + name=None)) instances.create_column(managed_disk) def downgrade(migrate_engine): + meta = MetaData() meta.bind = migrate_engine - instances.drop_column(managed_disk) + + instances = Table('instances', meta, autoload=True) + + instances.drop_column('managed_disk') diff --git a/nova/db/sqlalchemy/migrate_repo/versions/050_sqlite_downgrade.sql b/nova/db/sqlalchemy/migrate_repo/versions/050_sqlite_downgrade.sql new file mode 100644 index 000000000000..8db7087bc088 --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/050_sqlite_downgrade.sql @@ -0,0 +1,207 @@ +BEGIN TRANSACTION; + + CREATE TEMPORARY TABLE instances_backup ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + internal_id INTEGER, + user_id VARCHAR(255), + project_id VARCHAR(255), + image_ref VARCHAR(255), + kernel_id VARCHAR(255), + ramdisk_id VARCHAR(255), + server_name VARCHAR(255), + launch_index INTEGER, + key_name VARCHAR(255), + key_data TEXT, + power_state INTEGER, + vm_state VARCHAR(255), + memory_mb INTEGER, + vcpus INTEGER, + local_gb INTEGER, + hostname VARCHAR(255), + host VARCHAR(255), + user_data TEXT, + reservation_id VARCHAR(255), + scheduled_at DATETIME, + launched_at DATETIME, + terminated_at DATETIME, + display_name VARCHAR(255), + display_description VARCHAR(255), + availability_zone VARCHAR(255), + locked BOOLEAN, + os_type VARCHAR(255), + launched_on TEXT, + instance_type_id INTEGER, + vm_mode VARCHAR(255), + uuid VARCHAR(36), + architecture VARCHAR(255), + root_device_name VARCHAR(255), + access_ip_v4 VARCHAR(255), + access_ip_v6 VARCHAR(255), + config_drive VARCHAR(255), + task_state VARCHAR(255), + default_local_device VARCHAR(255), + default_swap_device VARCHAR(255), + progress INTEGER, + managed_disk BOOLEAN, + PRIMARY KEY (id), + CHECK (deleted IN (0, 1)), + CHECK (locked IN (0, 1)), + CHECK (managed_disk IN (0, 1)) + ); + + INSERT INTO instances_backup + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + internal_id, + user_id, + project_id, + image_ref, + kernel_id, + ramdisk_id, + server_name, + launch_index, + key_name, + key_data, + power_state, + vm_state, + memory_mb, + vcpus, + local_gb, + hostname, + host, + user_data, + reservation_id, + scheduled_at, + launched_at, + terminated_at, + display_name, + display_description, + availability_zone, + locked, + os_type, + launched_on, + instance_type_id, + vm_mode, + uuid, + architecture, + root_device_name, + access_ip_v4, + access_ip_v6, + config_drive, + task_state, + default_local_device, + default_swap_device, + progress, + managed_disk + FROM instances; + + DROP TABLE instances; + + CREATE TABLE instances ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + internal_id INTEGER, + user_id VARCHAR(255), + project_id VARCHAR(255), + image_ref VARCHAR(255), + kernel_id VARCHAR(255), + ramdisk_id VARCHAR(255), + server_name VARCHAR(255), + launch_index INTEGER, + key_name VARCHAR(255), + key_data TEXT, + power_state INTEGER, + vm_state VARCHAR(255), + memory_mb INTEGER, + vcpus INTEGER, + local_gb INTEGER, + hostname VARCHAR(255), + host VARCHAR(255), + user_data TEXT, + reservation_id VARCHAR(255), + scheduled_at DATETIME, + launched_at DATETIME, + terminated_at DATETIME, + display_name VARCHAR(255), + display_description VARCHAR(255), + availability_zone VARCHAR(255), + locked BOOLEAN, + os_type VARCHAR(255), + launched_on TEXT, + instance_type_id INTEGER, + vm_mode VARCHAR(255), + uuid VARCHAR(36), + architecture VARCHAR(255), + root_device_name VARCHAR(255), + access_ip_v4 VARCHAR(255), + access_ip_v6 VARCHAR(255), + config_drive VARCHAR(255), + task_state VARCHAR(255), + default_local_device VARCHAR(255), + default_swap_device VARCHAR(255), + progress INTEGER, + PRIMARY KEY (id), + CHECK (deleted IN (0, 1)), + CHECK (locked IN (0, 1)) + ); + + INSERT INTO instances + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + internal_id, + user_id, + project_id, + image_ref, + kernel_id, + ramdisk_id, + server_name, + launch_index, + key_name, + key_data, + power_state, + vm_state, + memory_mb, + vcpus, + local_gb, + hostname, + host, + user_data, + reservation_id, + scheduled_at, + launched_at, + terminated_at, + display_name, + display_description, + availability_zone, + locked, + os_type, + launched_on, + instance_type_id, + vm_mode, + uuid, + architecture, + root_device_name, + access_ip_v4, + access_ip_v6, + config_drive, + task_state, + default_local_device, + default_swap_device, + progress + FROM instances_backup; + + DROP TABLE instances_backup; +COMMIT; diff --git a/nova/db/sqlalchemy/migrate_repo/versions/051_add_vcpu_weight_to_instance_types.py b/nova/db/sqlalchemy/migrate_repo/versions/051_add_vcpu_weight_to_instance_types.py index 23560d98b49a..a338319933bd 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/051_add_vcpu_weight_to_instance_types.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/051_add_vcpu_weight_to_instance_types.py @@ -14,19 +14,21 @@ from sqlalchemy import Column, Integer, MetaData, Table -meta = MetaData() - -instance_types = Table("instance_types", meta, Column("id", Integer(), - primary_key=True, nullable=False)) - -vcpu_weight = Column("vcpu_weight", Integer()) - def upgrade(migrate_engine): + meta = MetaData() meta.bind = migrate_engine + + instance_types = Table('instance_types', meta, autoload=True) + + vcpu_weight = Column("vcpu_weight", Integer()) instance_types.create_column(vcpu_weight) def downgrade(migrate_engine): + meta = MetaData() meta.bind = migrate_engine - instance_types.drop_column(vcpu_weight) + + instance_types = Table('instance_types', meta, autoload=True) + + instance_types.drop_column('vcpu_weight') diff --git a/nova/db/sqlalchemy/migrate_repo/versions/052_kill_export_devices.py b/nova/db/sqlalchemy/migrate_repo/versions/052_kill_export_devices.py index 0d9833168cf5..4558dd9fab78 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/052_kill_export_devices.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/052_kill_export_devices.py @@ -18,32 +18,33 @@ from sqlalchemy import Boolean, Column, DateTime, ForeignKey, Integer from sqlalchemy import MetaData, Table from nova import log as logging -meta = MetaData() LOG = logging.getLogger(__name__) -# Table definition -volumes = Table('volumes', meta, - Column('id', Integer(), primary_key=True, nullable=False) -) - -export_devices = Table('export_devices', meta, - Column('created_at', DateTime(timezone=False)), - Column('updated_at', DateTime(timezone=False)), - Column('deleted_at', DateTime(timezone=False)), - Column('deleted', Boolean(create_constraint=True, name=None)), - Column('id', Integer(), primary_key=True, nullable=False), - Column('shelf_id', Integer()), - Column('blade_id', Integer()), - Column('volume_id', - Integer(), - ForeignKey('volumes.id'), - nullable=True), - ) - - def downgrade(migrate_engine): + meta = MetaData() meta.bind = migrate_engine + + # load tables for fk + volumes = Table('volumes', meta, autoload=True) + + # + # New Tables + # + export_devices = Table('export_devices', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('shelf_id', Integer()), + Column('blade_id', Integer()), + Column('volume_id', + Integer(), + ForeignKey('volumes.id'), + nullable=True), + ) + try: export_devices.create() except Exception: @@ -53,5 +54,12 @@ def downgrade(migrate_engine): def upgrade(migrate_engine): + meta = MetaData() meta.bind = migrate_engine + + # load tables for fk + volumes = Table('volumes', meta, autoload=True) + + export_devices = Table('export_devices', meta, autoload=True) + export_devices.drop() diff --git a/nova/db/sqlalchemy/migrate_repo/versions/053_add_connection_info_to_block_device_mapping.py b/nova/db/sqlalchemy/migrate_repo/versions/053_add_connection_info_to_block_device_mapping.py index 55ef30f4b9ed..8fe13991849b 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/053_add_connection_info_to_block_device_mapping.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/053_add_connection_info_to_block_device_mapping.py @@ -13,23 +13,26 @@ # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations -# under the License.from sqlalchemy import * +# under the License. from sqlalchemy import Column, MetaData, Table, Text -meta = MetaData() - -new_column = Column('connection_info', Text()) - - def upgrade(migrate_engine): + meta = MetaData() meta.bind = migrate_engine + table = Table('block_device_mapping', meta, autoload=True) + + new_column = Column('connection_info', Text()) + table.create_column(new_column) def downgrade(migrate_engine): + meta = MetaData() meta.bind = migrate_engine + table = Table('block_device_mapping', meta, autoload=True) + table.c.connection_info.drop() diff --git a/nova/db/sqlalchemy/migrate_repo/versions/053_sqlite_downgrade.sql b/nova/db/sqlalchemy/migrate_repo/versions/053_sqlite_downgrade.sql new file mode 100644 index 000000000000..84439976367b --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/053_sqlite_downgrade.sql @@ -0,0 +1,87 @@ +BEGIN TRANSACTION; + + CREATE TEMPORARY TABLE block_device_mapping_backup ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + instance_id INTEGER NOT NULL, + device_name VARCHAR(255) NOT NULL, + delete_on_termination BOOLEAN, + virtual_name VARCHAR(255), + snapshot_id INTEGER, + volume_id INTEGER, + volume_size INTEGER, + no_device BOOLEAN, + connection_info TEXT, + PRIMARY KEY (id), + FOREIGN KEY(snapshot_id) REFERENCES snapshots (id), + CHECK (deleted IN (0, 1)), + CHECK (delete_on_termination IN (0, 1)), + CHECK (no_device IN (0, 1)), + FOREIGN KEY(volume_id) REFERENCES volumes (id), + FOREIGN KEY(instance_id) REFERENCES instances (id) + ); + + INSERT INTO block_device_mapping_backup + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + instance_id, + device_name, + delete_on_termination, + virtual_name, + snapshot_id, + volume_id, + volume_size, + no_device, + connection_info + FROM block_device_mapping; + + DROP TABLE block_device_mapping; + + CREATE TABLE block_device_mapping ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + instance_id INTEGER NOT NULL, + device_name VARCHAR(255) NOT NULL, + delete_on_termination BOOLEAN, + virtual_name VARCHAR(255), + snapshot_id INTEGER, + volume_id INTEGER, + volume_size INTEGER, + no_device BOOLEAN, + PRIMARY KEY (id), + FOREIGN KEY(snapshot_id) REFERENCES snapshots (id), + CHECK (deleted IN (0, 1)), + CHECK (delete_on_termination IN (0, 1)), + CHECK (no_device IN (0, 1)), + FOREIGN KEY(volume_id) REFERENCES volumes (id), + FOREIGN KEY(instance_id) REFERENCES instances (id) + ); + + INSERT INTO block_device_mapping + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + instance_id, + device_name, + delete_on_termination, + virtual_name, + snapshot_id, + volume_id, + volume_size, + no_device + FROM block_device_mapping_backup; + + DROP TABLE block_device_mapping_backup; + +COMMIT; diff --git a/nova/db/sqlalchemy/migrate_repo/versions/054_add_bw_usage_data_cache.py b/nova/db/sqlalchemy/migrate_repo/versions/054_add_bw_usage_data_cache.py index 742ae4598b5d..cd585a81e80b 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/054_add_bw_usage_data_cache.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/054_add_bw_usage_data_cache.py @@ -20,31 +20,33 @@ from sqlalchemy import Integer, BigInteger, DateTime, Boolean, String from nova import log as logging -meta = MetaData() LOG = logging.getLogger(__name__) -bw_cache = Table('bw_usage_cache', meta, - Column('created_at', DateTime(timezone=False)), - Column('updated_at', DateTime(timezone=False)), - Column('deleted_at', DateTime(timezone=False)), - Column('deleted', Boolean(create_constraint=True, name=None)), - Column('id', Integer(), primary_key=True, nullable=False), - Column('instance_id', Integer(), nullable=False), - Column('network_label', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('start_period', DateTime(timezone=False), nullable=False), - Column('last_refreshed', DateTime(timezone=False)), - Column('bw_in', BigInteger()), - Column('bw_out', BigInteger())) - - def upgrade(migrate_engine): # Upgrade operations go here. Don't create your own engine; # bind migrate_engine to your metadata + meta = MetaData() meta.bind = migrate_engine + # + # New Tables + # + bw_cache = Table('bw_usage_cache', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('instance_id', Integer(), nullable=False), + Column('network_label', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('start_period', DateTime(timezone=False), nullable=False), + Column('last_refreshed', DateTime(timezone=False)), + Column('bw_in', BigInteger()), + Column('bw_out', BigInteger())) try: bw_cache.create() except Exception: @@ -55,6 +57,8 @@ def upgrade(migrate_engine): def downgrade(migrate_engine): - meta.bind = migrate_engine # Operations to reverse the above upgrade go here. + meta = MetaData() + meta.bind = migrate_engine + bw_cache = Table('bw_usage_cache', meta, autoload=True) bw_cache.drop() diff --git a/nova/db/sqlalchemy/migrate_repo/versions/055_convert_flavor_id_to_str.py b/nova/db/sqlalchemy/migrate_repo/versions/055_convert_flavor_id_to_str.py index 45009c230d16..91c39edb07d4 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/055_convert_flavor_id_to_str.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/055_convert_flavor_id_to_str.py @@ -17,25 +17,19 @@ import migrate import migrate.changeset -import sqlalchemy +from sqlalchemy import Column, Integer, String, MetaData, Table from nova import log as logging - LOG = logging.getLogger(__name__) -meta = sqlalchemy.MetaData() - - -def _get_table(): - return sqlalchemy.Table('instance_types', meta, autoload=True) - def upgrade(migrate_engine): + meta = MetaData() meta.bind = migrate_engine - instance_types = _get_table() + instance_types = Table('instance_types', meta, autoload=True) - string_column = sqlalchemy.Column('flavorid_str', sqlalchemy.String(255)) + string_column = Column('flavorid_str', String(255)) string_column.create(instance_types) @@ -74,11 +68,12 @@ def upgrade(migrate_engine): def downgrade(migrate_engine): + meta = MetaData() meta.bind = migrate_engine - instance_types = _get_table() - integer_column = sqlalchemy.Column('flavorid_int', - sqlalchemy.Integer()) + instance_types = Table('instance_types', meta, autoload=True) + + integer_column = Column('flavorid_int', Integer()) integer_column.create(instance_types) diff --git a/nova/db/sqlalchemy/migrate_repo/versions/056_add_s3_images.py b/nova/db/sqlalchemy/migrate_repo/versions/056_add_s3_images.py index 647378161249..2c30c7986bcd 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/056_add_s3_images.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/056_add_s3_images.py @@ -15,35 +15,35 @@ # License for the specific language governing permissions and limitations # under the License. -import sqlalchemy - +from sqlalchemy import Boolean, Column, DateTime, Integer +from sqlalchemy import MetaData, String, Table from nova import log as logging - -meta = sqlalchemy.MetaData() LOG = logging.getLogger(__name__) -s3_images = sqlalchemy.Table('s3_images', meta, - sqlalchemy.Column('created_at', - sqlalchemy.DateTime(timezone=False)), - sqlalchemy.Column('updated_at', - sqlalchemy.DateTime(timezone=False)), - sqlalchemy.Column('deleted_at', - sqlalchemy.DateTime(timezone=False)), - sqlalchemy.Column('deleted', - sqlalchemy.Boolean(create_constraint=True, name=None)), - sqlalchemy.Column('id', sqlalchemy.Integer(), - primary_key=True, - nullable=False, - autoincrement=True), - sqlalchemy.Column('uuid', sqlalchemy.String(36), - nullable=False)) - - def upgrade(migrate_engine): + meta = MetaData() meta.bind = migrate_engine + # + # New Tables + # + s3_images = Table('s3_images', meta, + Column('created_at', + DateTime(timezone=False)), + Column('updated_at', + DateTime(timezone=False)), + Column('deleted_at', + DateTime(timezone=False)), + Column('deleted', + Boolean(create_constraint=True, name=None)), + Column('id', Integer(), + primary_key=True, + nullable=False, + autoincrement=True), + Column('uuid', String(36), + nullable=False)) try: s3_images.create() except Exception: @@ -53,6 +53,8 @@ def upgrade(migrate_engine): def downgrade(migrate_engine): + meta = MetaData() meta.bind = migrate_engine + s3_images = Table('s3_images', meta, autoload=True) s3_images.drop() diff --git a/nova/db/sqlalchemy/migrate_repo/versions/057_add_sm_driver_tables.py b/nova/db/sqlalchemy/migrate_repo/versions/057_add_sm_driver_tables.py index a4f00f6b5433..7f1e4f297716 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/057_add_sm_driver_tables.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/057_add_sm_driver_tables.py @@ -12,87 +12,82 @@ # License for the specific language governing permissions and limitations # under the License. -from sqlalchemy import * -from migrate import * +from sqlalchemy import Boolean, Column, DateTime, ForeignKey +from sqlalchemy import Integer, MetaData, String +from sqlalchemy import Table from nova import log as logging - -meta = MetaData() LOG = logging.getLogger(__name__) -# Table stub-definitions -# Just for the ForeignKey and column creation to succeed, these are not the -# actual definitions of instances or services. -# -volumes = Table('volumes', meta, - Column('id', Integer(), primary_key=True, nullable=False), - ) - -# -# New Tables -# -flavors = Table('sm_flavors', meta, - Column('created_at', DateTime(timezone=False)), - Column('updated_at', DateTime(timezone=False)), - Column('deleted_at', DateTime(timezone=False)), - Column('deleted', Boolean(create_constraint=True, name=None)), - Column('id', Integer(), primary_key=True, nullable=False), - Column('label', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('description', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - ) - -backend = Table('sm_backend_config', meta, - Column('created_at', DateTime(timezone=False)), - Column('updated_at', DateTime(timezone=False)), - Column('deleted_at', DateTime(timezone=False)), - Column('deleted', Boolean(create_constraint=True, name=None)), - Column('id', Integer(), primary_key=True, nullable=False), - Column('flavor_id', Integer(), ForeignKey('sm_flavors.id'), - nullable=False), - Column('sr_uuid', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('sr_type', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('config_params', - String(length=2047, - convert_unicode=False, - assert_unicode=None, - unicode_error=None, - _warn_on_bytestring=False)), - ) - -sm_vol = Table('sm_volume', meta, - Column('created_at', DateTime(timezone=False)), - Column('updated_at', DateTime(timezone=False)), - Column('deleted_at', DateTime(timezone=False)), - Column('deleted', Boolean(create_constraint=True, name=None)), - Column('id', Integer(), ForeignKey('volumes.id'), - primary_key=True, nullable=False), - Column('backend_id', Integer(), ForeignKey('sm_backend_config.id'), - nullable=False), - Column('vdi_uuid', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - ) -# -# Tables to alter -# - -# (none currently) - - def upgrade(migrate_engine): # Upgrade operations go here. Don't create your own engine; # bind migrate_engine to your metadata + meta = MetaData() meta.bind = migrate_engine + + # load tables for fk + volumes = Table('volumes', meta, autoload=True) + + # + # New Tables + # + flavors = Table('sm_flavors', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('label', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('description', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + ) + + backend = Table('sm_backend_config', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('flavor_id', Integer(), ForeignKey('sm_flavors.id'), + nullable=False), + Column('sr_uuid', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('sr_type', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('config_params', + String(length=2047, + convert_unicode=False, + assert_unicode=None, + unicode_error=None, + _warn_on_bytestring=False)), + ) + + sm_vol = Table('sm_volume', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), ForeignKey('volumes.id'), + primary_key=True, nullable=False), + Column('backend_id', Integer(), + ForeignKey('sm_backend_config.id'), + nullable=False), + Column('vdi_uuid', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + ) for table in (flavors, backend, sm_vol): try: table.create() @@ -101,7 +96,16 @@ def upgrade(migrate_engine): def downgrade(migrate_engine): + meta = MetaData() meta.bind = migrate_engine + + # load tables for fk + volumes = Table('volumes', meta, autoload=True) + + flavors = Table('sm_flavors', meta, autoload=True) + backend = Table('sm_backend_config', meta, autoload=True) + sm_vol = Table('sm_volume', meta, autoload=True) + for table in (flavors, backend, sm_vol): try: table.drop() diff --git a/nova/db/sqlalchemy/migrate_repo/versions/058_rename_managed_disk.py b/nova/db/sqlalchemy/migrate_repo/versions/058_rename_managed_disk.py index 6c51371e34f7..e12cabddd015 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/058_rename_managed_disk.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/058_rename_managed_disk.py @@ -17,24 +17,21 @@ from sqlalchemy import MetaData, Table -meta = MetaData() - - def upgrade(migrate_engine): # Upgrade operations go here. Don't create your own engine; # bind migrate_engine to your metadata + meta = MetaData() meta.bind = migrate_engine - instances = Table('instances', meta, autoload=True, - autoload_with=migrate_engine) + instances = Table('instances', meta, autoload=True) managed_disk = instances.c.managed_disk managed_disk.alter(name='auto_disk_config') def downgrade(migrate_engine): + meta = MetaData() meta.bind = migrate_engine - instances = Table('instances', meta, autoload=True, - autoload_with=migrate_engine) + instances = Table('instances', meta, autoload=True) image_ref_column = instances.c.auto_disk_config image_ref_column.alter(name='managed_disk') diff --git a/nova/db/sqlalchemy/migrate_repo/versions/059_split_rxtx_quota_into_network.py b/nova/db/sqlalchemy/migrate_repo/versions/059_split_rxtx_quota_into_network.py index e7750e24ba10..58f6d69e08df 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/059_split_rxtx_quota_into_network.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/059_split_rxtx_quota_into_network.py @@ -14,23 +14,15 @@ from sqlalchemy import Column, Integer, Float, MetaData, Table -meta = MetaData() - - -def _get_table(table_name): - return Table(table_name, meta, autoload=True) - -rxtx_base = Column('rxtx_base', Integer) -rxtx_factor = Column('rxtx_factor', Float, default=1) -rxtx_quota = Column('rxtx_quota', Integer) -rxtx_cap = Column('rxtx_cap', Integer) - def upgrade(migrate_engine): + meta = MetaData() meta.bind = migrate_engine - instance_types = _get_table('instance_types') - networks = _get_table('networks') + instance_types = Table('instance_types', meta, autoload=True) + networks = Table('networks', meta, autoload=True) + rxtx_base = Column('rxtx_base', Integer) + rxtx_factor = Column('rxtx_factor', Float, default=1) instance_types.create_column(rxtx_factor) networks.create_column(rxtx_base) @@ -47,10 +39,13 @@ def upgrade(migrate_engine): def downgrade(migrate_engine): + meta = MetaData() meta.bind = migrate_engine - instance_types = _get_table('instance_types') - networks = _get_table('networks') + instance_types = Table('instance_types', meta, autoload=True) + networks = Table('networks', meta, autoload=True) + rxtx_quota = Column('rxtx_quota', Integer) + rxtx_cap = Column('rxtx_cap', Integer) instance_types.create_column(rxtx_quota) instance_types.create_column(rxtx_cap) diff --git a/nova/db/sqlalchemy/migrate_repo/versions/059_sqlite_downgrade.sql b/nova/db/sqlalchemy/migrate_repo/versions/059_sqlite_downgrade.sql index f4fac8447f04..ecf45c599ba0 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/059_sqlite_downgrade.sql +++ b/nova/db/sqlalchemy/migrate_repo/versions/059_sqlite_downgrade.sql @@ -11,13 +11,13 @@ BEGIN TRANSACTION; vcpus INTEGER NOT NULL, local_gb INTEGER NOT NULL, swap INTEGER NOT NULL, - rxtx_cap INTEGER, - rxtx_quota INTEGER, + rxtx_quota INTEGER NOT NULL, + rxtx_cap INTEGER NOT NULL, vcpu_weight INTEGER, flavorid VARCHAR(255), PRIMARY KEY (id), - UNIQUE (flavorid), CHECK (deleted IN (0, 1)), + UNIQUE (flavorid), UNIQUE (name) ); @@ -32,11 +32,11 @@ BEGIN TRANSACTION; vcpus, local_gb, swap, + 0 as rxtx_quota, COALESCE(rxtx_factor, 1) * COALESCE ((SELECT MIN(rxtx_base) FROM networks WHERE rxtx_base > 0), 1) as rxtx_cap, - 0 as rxtx_cap, vcpu_weight, flavorid FROM instance_types; @@ -53,8 +53,8 @@ BEGIN TRANSACTION; vcpus INTEGER NOT NULL, local_gb INTEGER NOT NULL, swap INTEGER NOT NULL, + rxtx_quota INTEGER NOT NULL, rxtx_cap INTEGER NOT NULL, - rxtx_factor INTEGER NOT NULL, vcpu_weight INTEGER, flavorid VARCHAR(255), PRIMARY KEY (id), @@ -67,68 +67,71 @@ BEGIN TRANSACTION; DROP TABLE instance_types_backup; CREATE TABLE networks_backup ( - created_at datetime DEFAULT NULL, - updated_at datetime DEFAULT NULL, - deleted_at datetime DEFAULT NULL, - deleted tinyint(1) DEFAULT NULL, - id int(11) NOT NULL, - injected tinyint(1) DEFAULT NULL, - cidr varchar(255) DEFAULT NULL, - netmask varchar(255) DEFAULT NULL, - bridge varchar(255) DEFAULT NULL, - gateway varchar(255) DEFAULT NULL, - broadcast varchar(255) DEFAULT NULL, - dns1 varchar(255) DEFAULT NULL, - vlan int(11) DEFAULT NULL, - vpn_public_address varchar(255) DEFAULT NULL, - vpn_public_port int(11) DEFAULT NULL, - vpn_private_address varchar(255) DEFAULT NULL, - dhcp_start varchar(255) DEFAULT NULL, - project_id varchar(255) DEFAULT NULL, - host varchar(255) DEFAULT NULL, - cidr_v6 varchar(255) DEFAULT NULL, - gateway_v6 varchar(255) DEFAULT NULL, - label varchar(255) DEFAULT NULL, - netmask_v6 varchar(255) DEFAULT NULL, - bridge_interface varchar(255) DEFAULT NULL, - multi_host tinyint(1) DEFAULT NULL, - dns2 varchar(255) DEFAULT NULL, - uuid varchar(36) DEFAULT NULL, - priority int(11) DEFAULT NULL, - PRIMARY KEY (`id`) - ); + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + injected BOOLEAN, + cidr VARCHAR(255), + netmask VARCHAR(255), + bridge VARCHAR(255), + gateway VARCHAR(255), + broadcast VARCHAR(255), + dns1 VARCHAR(255), + vlan INTEGER, + vpn_public_address VARCHAR(255), + vpn_public_port INTEGER, + vpn_private_address VARCHAR(255), + dhcp_start VARCHAR(255), + project_id VARCHAR(255), + host VARCHAR(255), + cidr_v6 VARCHAR(255), + gateway_v6 VARCHAR(255), + label VARCHAR(255), + netmask_v6 VARCHAR(255), + bridge_interface VARCHAR(255), + multi_host BOOLEAN, + dns2 VARCHAR(255), + uuid VARCHAR(36), + priority INTEGER, + PRIMARY KEY (id), + CHECK (deleted IN (0, 1)), + CHECK (injected IN (0, 1)), + CHECK (multi_host IN (0, 1)) + ); - INSERT INTO networks_backup - SELECT created_at, - updated_at, - deleted_at, - deleted, - id, - injected, - cidr, - netmask, - bridge, - gateway, - broadcast, - dns1, - vlan, - vpn_public_address, - vpn_public_port, - vpn_private_address, - dhcp_start, - project_id, - host, - cidr_v6, - gateway_v6, - label, - netmask_v6, - bridge_interface, - multi_host, - dns2, - uuid, - priority - FROM networks; + INSERT INTO networks_backup + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + injected, + cidr, + netmask, + bridge, + gateway, + broadcast, + dns1, + vlan, + vpn_public_address, + vpn_public_port, + vpn_private_address, + dhcp_start, + project_id, + host, + cidr_v6, + gateway_v6, + label, + netmask_v6, + bridge_interface, + multi_host, + dns2, + uuid, + priority + FROM networks; - DROP TABLE networks; - ALTER TABLE networks_backup RENAME TO networks; + DROP TABLE networks; + ALTER TABLE networks_backup RENAME TO networks; COMMIT; diff --git a/nova/db/sqlalchemy/migrate_repo/versions/060_remove_network_fk_from_vif.py b/nova/db/sqlalchemy/migrate_repo/versions/060_remove_network_fk_from_vif.py index d91d3f99ac07..7515baa012aa 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/060_remove_network_fk_from_vif.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/060_remove_network_fk_from_vif.py @@ -17,13 +17,13 @@ from migrate import ForeignKeyConstraint from nova import log as logging -meta = MetaData() LOG = logging.getLogger(__name__) def upgrade(migrate_engine): # Upgrade operations go here. Don't create your own engine; # bind migrate_engine to your metadata + meta = MetaData() meta.bind = migrate_engine dialect = migrate_engine.url.get_dialect().name if dialect.startswith('sqlite'): @@ -45,6 +45,7 @@ def upgrade(migrate_engine): def downgrade(migrate_engine): # Operations to reverse the above upgrade go here. + meta = MetaData() meta.bind = migrate_engine dialect = migrate_engine.url.get_dialect().name if dialect.startswith('sqlite'): diff --git a/nova/db/sqlalchemy/migrate_repo/versions/060_sqlite_downgrade.sql b/nova/db/sqlalchemy/migrate_repo/versions/060_sqlite_downgrade.sql index c804e34624c8..2c0919f1dd98 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/060_sqlite_downgrade.sql +++ b/nova/db/sqlalchemy/migrate_repo/versions/060_sqlite_downgrade.sql @@ -1,4 +1,3 @@ -COMMIT; BEGIN TRANSACTION; CREATE TEMPORARY TABLE virtual_interfaces_backup ( created_at DATETIME, diff --git a/nova/db/sqlalchemy/migrate_repo/versions/061_add_index_to_instance_uuid.py b/nova/db/sqlalchemy/migrate_repo/versions/061_add_index_to_instance_uuid.py index 0ec06b66d27c..1a369bffec34 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/061_add_index_to_instance_uuid.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/061_add_index_to_instance_uuid.py @@ -14,16 +14,16 @@ from sqlalchemy import Index, MetaData, Table -meta = MetaData() - def upgrade(migrate_engine): + meta = MetaData() meta.bind = migrate_engine instances = Table('instances', meta, autoload=True) Index('uuid', instances.c.uuid, unique=True).create(migrate_engine) def downgrade(migrate_engine): + meta = MetaData() meta.bind = migrate_engine instances = Table('instances', meta, autoload=True) Index('uuid', instances.c.uuid, unique=True).drop(migrate_engine) diff --git a/nova/db/sqlalchemy/migrate_repo/versions/062_add_instance_info_cache_table.py b/nova/db/sqlalchemy/migrate_repo/versions/062_add_instance_info_cache_table.py index 54a02f50fdf4..cbfd8ea5a592 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/062_add_instance_info_cache_table.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/062_add_instance_info_cache_table.py @@ -13,39 +13,40 @@ # License for the specific language governing permissions and limitations # under the License. -from sqlalchemy import * -from migrate import * +from sqlalchemy import Boolean, Column, DateTime, ForeignKey +from sqlalchemy import Integer, MetaData, String +from sqlalchemy import Table, Text from nova import log as logging from nova import utils -meta = MetaData() LOG = logging.getLogger(__name__) -# instance info cache table to add to DB -instance_info_caches = Table('instance_info_caches', meta, - Column('created_at', DateTime(timezone=False), - default=utils.utcnow()), - Column('updated_at', DateTime(timezone=False), - onupdate=utils.utcnow()), - Column('deleted_at', DateTime(timezone=False)), - Column('deleted', Boolean(create_constraint=True, name=None)), - Column('id', Integer(), primary_key=True), - Column('network_info', Text()), - Column('instance_id', String(36), - ForeignKey('instances.uuid'), - nullable=False, - unique=True), - mysql_engine='InnoDB') - - def upgrade(migrate_engine): + meta = MetaData() meta.bind = migrate_engine - # load instances for fk + # load tables for fk instances = Table('instances', meta, autoload=True) + # + # New Tables + # + instance_info_caches = Table('instance_info_caches', meta, + Column('created_at', DateTime(timezone=False), + default=utils.utcnow()), + Column('updated_at', DateTime(timezone=False), + onupdate=utils.utcnow()), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True), + Column('network_info', Text()), + Column('instance_id', String(36), + ForeignKey('instances.uuid'), + nullable=False, + unique=True), + mysql_engine='InnoDB') # create instance_info_caches table try: instance_info_caches.create() @@ -55,6 +56,13 @@ def upgrade(migrate_engine): def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + # load tables for fk + instances = Table('instances', meta, autoload=True) + + instance_info_caches = Table('instance_info_caches', meta, autoload=True) try: instance_info_caches.drop() except Exception: diff --git a/nova/db/sqlalchemy/migrate_repo/versions/063_add_instance_faults_table.py b/nova/db/sqlalchemy/migrate_repo/versions/063_add_instance_faults_table.py index c76dcc9c3588..03ec099714b2 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/063_add_instance_faults_table.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/063_add_instance_faults_table.py @@ -17,41 +17,35 @@ from sqlalchemy import Boolean, Column, DateTime, Integer, ForeignKey from sqlalchemy import MetaData, String, Table, Text from nova import log as logging -meta = MetaData() LOG = logging.getLogger(__name__) -# -# New Tables -# -instance_faults = Table('instance_faults', meta, - Column('created_at', DateTime(timezone=False)), - Column('updated_at', DateTime(timezone=False)), - Column('deleted_at', DateTime(timezone=False)), - Column('deleted', Boolean(create_constraint=True, name=None), - default=False), - Column('id', Integer(), primary_key=True, nullable=False), - Column('instance_uuid', String(36, ForeignKey('instances.uuid'))), - Column('code', Integer(), nullable=False), - Column('message', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('details', - Text(length=None, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - ) - - -# -# Tables to alter -# - -# (none currently) - def upgrade(migrate_engine): # Upgrade operations go here. Don't create your own engine; # bind migrate_engine to your metadata + meta = MetaData() meta.bind = migrate_engine + # + # New Tables + # + instance_faults = Table('instance_faults', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None), + default=False), + Column('id', Integer(), primary_key=True, nullable=False), + Column('instance_uuid', String(36, ForeignKey('instances.uuid'))), + Column('code', Integer(), nullable=False), + Column('message', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('details', + Text(length=None, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + ) try: instance_faults.create() except Exception: @@ -60,4 +54,7 @@ def upgrade(migrate_engine): def downgrade(migrate_engine): # Operations to reverse the above upgrade go here. + meta = MetaData() + meta.bind = migrate_engine + instance_faults = Table('instance_faults', meta, autoload=True) instance_faults.drop() diff --git a/nova/db/sqlalchemy/migrate_repo/versions/064_change_instance_id_to_uuid_in_instance_actions.py b/nova/db/sqlalchemy/migrate_repo/versions/064_change_instance_id_to_uuid_in_instance_actions.py index a31c50d88ed2..a06443c0666b 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/064_change_instance_id_to_uuid_in_instance_actions.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/064_change_instance_id_to_uuid_in_instance_actions.py @@ -15,26 +15,22 @@ # License for the specific language governing permissions and limitations # under the License. -import sqlalchemy -from sqlalchemy import select, Column, ForeignKey, Integer, String +from sqlalchemy import select, Column, ForeignKey, Integer +from sqlalchemy import MetaData, String, Table from migrate import ForeignKeyConstraint from nova import log as logging LOG = logging.getLogger(__name__) -meta = sqlalchemy.MetaData() - - -def _get_table(name): - return sqlalchemy.Table(name, meta, autoload=True) def upgrade(migrate_engine): + meta = MetaData() meta.bind = migrate_engine dialect = migrate_engine.url.get_dialect().name - instance_actions = _get_table('instance_actions') - instances = _get_table('instances') + instance_actions = Table('instance_actions', meta, autoload=True) + instances = Table('instances', meta, autoload=True) uuid_column = Column('instance_uuid', String(36)) uuid_column.create(instance_actions) @@ -64,9 +60,10 @@ def upgrade(migrate_engine): def downgrade(migrate_engine): + meta = MetaData() meta.bind = migrate_engine - instance_actions = _get_table('instance_actions') - instances = _get_table('instances') + instance_actions = Table('instance_actions', meta, autoload=True) + instances = Table('instances', meta, autoload=True) id_column = Column('instance_id', Integer, ForeignKey('instances.id')) id_column.create(instance_actions) diff --git a/nova/db/sqlalchemy/migrate_repo/versions/065_add_index_to_instance_project_id.py b/nova/db/sqlalchemy/migrate_repo/versions/065_add_index_to_instance_project_id.py index 7aaefca87cfb..9b27f39dcc6d 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/065_add_index_to_instance_project_id.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/065_add_index_to_instance_project_id.py @@ -14,10 +14,9 @@ from sqlalchemy import Index, MetaData, Table -meta = MetaData() - def upgrade(migrate_engine): + meta = MetaData() meta.bind = migrate_engine instances = Table('instances', meta, autoload=True) index = Index('project_id', instances.c.project_id) @@ -25,6 +24,7 @@ def upgrade(migrate_engine): def downgrade(migrate_engine): + meta = MetaData() meta.bind = migrate_engine instances = Table('instances', meta, autoload=True) index = Index('project_id', instances.c.project_id) diff --git a/nova/db/sqlalchemy/migrate_repo/versions/066_preload_instance_info_cache_table.py b/nova/db/sqlalchemy/migrate_repo/versions/066_preload_instance_info_cache_table.py index 9764d787c90d..15b6672882c4 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/066_preload_instance_info_cache_table.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/066_preload_instance_info_cache_table.py @@ -15,18 +15,17 @@ import json -from sqlalchemy import * -from migrate import * +from sqlalchemy import select, MetaData, Table from nova import ipv6 from nova import log as logging from nova import utils -meta = MetaData() LOG = logging.getLogger(__name__) def upgrade(migrate_engine): + meta = MetaData() meta.bind = migrate_engine # grab tables instance_info_caches = Table('instance_info_caches', meta, autoload=True) @@ -235,6 +234,7 @@ def upgrade(migrate_engine): def downgrade(migrate_engine): # facepalm + meta = MetaData() meta.bind = migrate_engine instance_info_caches = Table('instance_info_caches', meta, autoload=True) diff --git a/nova/db/sqlalchemy/migrate_repo/versions/067_add_pool_and_interface_to_floating_ip.py b/nova/db/sqlalchemy/migrate_repo/versions/067_add_pool_and_interface_to_floating_ip.py index 205d7e03440d..6ba930db839a 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/067_add_pool_and_interface_to_floating_ip.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/067_add_pool_and_interface_to_floating_ip.py @@ -13,26 +13,24 @@ # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations -# under the License.from sqlalchemy import * +# under the License. -from sqlalchemy import Column, MetaData, Table, String +from sqlalchemy import Column, MetaData, String, Table from nova import flags - flags.DECLARE('default_floating_pool', 'nova.network.manager') flags.DECLARE('public_interface', 'nova.network.linux_net') FLAGS = flags.FLAGS -meta = MetaData() - -pool_column = Column('pool', String(255)) -interface_column = Column('interface', String(255)) - def upgrade(migrate_engine): + meta = MetaData() meta.bind = migrate_engine table = Table('floating_ips', meta, autoload=True) + + pool_column = Column('pool', String(255)) + interface_column = Column('interface', String(255)) table.create_column(pool_column) table.create_column(interface_column) table.update().values(pool=FLAGS.default_floating_pool, @@ -40,6 +38,7 @@ def upgrade(migrate_engine): def downgrade(migrate_engine): + meta = MetaData() meta.bind = migrate_engine table = Table('floating_ips', meta, autoload=True) table.c.pool.drop() diff --git a/nova/db/sqlalchemy/migrate_repo/versions/067_sqlite_downgrade.sql b/nova/db/sqlalchemy/migrate_repo/versions/067_sqlite_downgrade.sql new file mode 100644 index 000000000000..3cd12cbdc2f0 --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/067_sqlite_downgrade.sql @@ -0,0 +1,69 @@ +BEGIN TRANSACTION; + + CREATE TEMPORARY TABLE floating_ips_backup ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + address VARCHAR(255), + fixed_ip_id INTEGER, + project_id VARCHAR(255), + host VARCHAR(255), + auto_assigned BOOLEAN, + pool VARCHAR(255), + interface VARCHAR(255), + PRIMARY KEY (id), + CHECK (deleted IN (0, 1)), + FOREIGN KEY(fixed_ip_id) REFERENCES fixed_ips (id) + ); + + INSERT INTO floating_ips_backup + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + address, + fixed_ip_id, + project_id, + host, + auto_assigned, + pool, + interface + FROM floating_ips; + + DROP TABLE floating_ips; + + CREATE TABLE floating_ips ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + address VARCHAR(255), + fixed_ip_id INTEGER, + project_id VARCHAR(255), + host VARCHAR(255), + auto_assigned BOOLEAN, + PRIMARY KEY (id), + CHECK (deleted IN (0, 1)), + FOREIGN KEY(fixed_ip_id) REFERENCES fixed_ips (id) + ); + + INSERT INTO floating_ips + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + address, + fixed_ip_id, + project_id, + host, + auto_assigned + FROM floating_ips_backup; + + DROP TABLE floating_ips_backup; + +COMMIT; diff --git a/nova/db/sqlalchemy/migrate_repo/versions/068_add_instance_attribute.py b/nova/db/sqlalchemy/migrate_repo/versions/068_add_instance_attribute.py index 00e2b19947e2..a65aff8b426b 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/068_add_instance_attribute.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/068_add_instance_attribute.py @@ -12,27 +12,25 @@ # License for the specific language governing permissions and limitations # under the License. -from sqlalchemy import MetaData from sqlalchemy import Boolean -from sqlalchemy import Column, Table - -meta = MetaData() - -shutdown_terminate = Column( - 'shutdown_terminate', Boolean(), default=True) -disable_terminate = Column( - 'disable_terminate', Boolean(), default=False) +from sqlalchemy import Column, MetaData, Table def upgrade(migrate_engine): + meta = MetaData() meta.bind = migrate_engine instances = Table('instances', meta, autoload=True) + shutdown_terminate = Column( + 'shutdown_terminate', Boolean(), default=True) + disable_terminate = Column( + 'disable_terminate', Boolean(), default=False) instances.create_column(shutdown_terminate) instances.create_column(disable_terminate) def downgrade(migrate_engine): + meta = MetaData() meta.bind = migrate_engine instances = Table('instances', meta, autoload=True) - instances.drop_column(shutdown_terminate) - instances.drop_column(disable_terminate) + instances.drop_column('shutdown_terminate') + instances.drop_column('disable_terminate') diff --git a/nova/db/sqlalchemy/migrate_repo/versions/068_sqlite_downgrade.sql b/nova/db/sqlalchemy/migrate_repo/versions/068_sqlite_downgrade.sql new file mode 100644 index 000000000000..a7700f6fab7b --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/068_sqlite_downgrade.sql @@ -0,0 +1,219 @@ +BEGIN TRANSACTION; + + CREATE TEMPORARY TABLE instances_backup ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + internal_id INTEGER, + user_id VARCHAR(255), + project_id VARCHAR(255), + image_ref VARCHAR(255), + kernel_id VARCHAR(255), + ramdisk_id VARCHAR(255), + server_name VARCHAR(255), + launch_index INTEGER, + key_name VARCHAR(255), + key_data TEXT, + power_state INTEGER, + vm_state VARCHAR(255), + memory_mb INTEGER, + vcpus INTEGER, + local_gb INTEGER, + hostname VARCHAR(255), + host VARCHAR(255), + user_data TEXT, + reservation_id VARCHAR(255), + scheduled_at DATETIME, + launched_at DATETIME, + terminated_at DATETIME, + display_name VARCHAR(255), + display_description VARCHAR(255), + availability_zone VARCHAR(255), + locked BOOLEAN, + os_type VARCHAR(255), + launched_on TEXT, + instance_type_id INTEGER, + vm_mode VARCHAR(255), + uuid VARCHAR(36), + architecture VARCHAR(255), + root_device_name VARCHAR(255), + access_ip_v4 VARCHAR(255), + access_ip_v6 VARCHAR(255), + config_drive VARCHAR(255), + task_state VARCHAR(255), + default_local_device VARCHAR(255), + default_swap_device VARCHAR(255), + progress INTEGER, + auto_disk_config BOOLEAN, + shutdown_terminate BOOLEAN, + disable_terminate BOOLEAN, + PRIMARY KEY (id), + CHECK (deleted IN (0, 1)), + CHECK (locked IN (0, 1)), + CHECK (auto_disk_config IN (0, 1)), + CHECK (shutdown_terminate IN (0, 1)), + CHECK (disable_terminate IN (0, 1)) + ); + + INSERT INTO instances_backup + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + internal_id, + user_id, + project_id, + image_ref, + kernel_id, + ramdisk_id, + server_name, + launch_index, + key_name, + key_data, + power_state, + vm_state, + memory_mb, + vcpus, + local_gb, + hostname, + host, + user_data, + reservation_id, + scheduled_at, + launched_at, + terminated_at, + display_name, + display_description, + availability_zone, + locked, + os_type, + launched_on, + instance_type_id, + vm_mode, + uuid, + architecture, + root_device_name, + access_ip_v4, + access_ip_v6, + config_drive, + task_state, + default_local_device, + default_swap_device, + progress, + auto_disk_config, + shutdown_terminate, + disable_terminate + FROM instances; + + DROP TABLE instances; + + CREATE TABLE instances( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + internal_id INTEGER, + user_id VARCHAR(255), + project_id VARCHAR(255), + image_ref VARCHAR(255), + kernel_id VARCHAR(255), + ramdisk_id VARCHAR(255), + server_name VARCHAR(255), + launch_index INTEGER, + key_name VARCHAR(255), + key_data TEXT, + power_state INTEGER, + vm_state VARCHAR(255), + memory_mb INTEGER, + vcpus INTEGER, + local_gb INTEGER, + hostname VARCHAR(255), + host VARCHAR(255), + user_data TEXT, + reservation_id VARCHAR(255), + scheduled_at DATETIME, + launched_at DATETIME, + terminated_at DATETIME, + display_name VARCHAR(255), + display_description VARCHAR(255), + availability_zone VARCHAR(255), + locked BOOLEAN, + os_type VARCHAR(255), + launched_on TEXT, + instance_type_id INTEGER, + vm_mode VARCHAR(255), + uuid VARCHAR(36), + architecture VARCHAR(255), + root_device_name VARCHAR(255), + access_ip_v4 VARCHAR(255), + access_ip_v6 VARCHAR(255), + config_drive VARCHAR(255), + task_state VARCHAR(255), + default_local_device VARCHAR(255), + default_swap_device VARCHAR(255), + progress INTEGER, + auto_disk_config BOOLEAN, + PRIMARY KEY (id), + CHECK (deleted IN (0, 1)), + CHECK (locked IN (0, 1)), + CHECK (auto_disk_config IN (0, 1)) + ); + + CREATE INDEX project_id ON instances (project_id); + CREATE UNIQUE INDEX uuid ON instances (uuid); + + INSERT INTO instances + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + internal_id, + user_id, + project_id, + image_ref, + kernel_id, + ramdisk_id, + server_name, + launch_index, + key_name, + key_data, + power_state, + vm_state, + memory_mb, + vcpus, + local_gb, + hostname, + host, + user_data, + reservation_id, + scheduled_at, + launched_at, + terminated_at, + display_name, + display_description, + availability_zone, + locked, + os_type, + launched_on, + instance_type_id, + vm_mode, + uuid, + architecture, + root_device_name, + access_ip_v4, + access_ip_v6, + config_drive, + task_state, + default_local_device, + default_swap_device, + progress, + auto_disk_config + FROM instances_backup; + + DROP TABLE instances_backup; +COMMIT; diff --git a/nova/db/sqlalchemy/migrate_repo/versions/069_block_migration.py b/nova/db/sqlalchemy/migrate_repo/versions/069_block_migration.py index cd5dcba1d5da..14a3db35dcb7 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/069_block_migration.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/069_block_migration.py @@ -19,26 +19,22 @@ from sqlalchemy import Column, Integer, MetaData, Table from nova import log as logging -meta = MetaData() LOG = logging.getLogger(__name__) -# Add disk_available_least column to compute_nodes table. -# Thinking about qcow2 image support, both compressed and virtual disk size -# has to be considered. -# disk_available stores "total disk size - used disk(compressed disk size)", -# while disk_available_least stores -# "total disk size - used disk(virtual disk size)". -# virtual disk size is used for kvm block migration. - -compute_nodes = Table('compute_nodes', meta, - Column('id', Integer(), primary_key=True, nullable=False)) - -disk_available_least = Column('disk_available_least', Integer(), default=0) - - def upgrade(migrate_engine): + meta = MetaData() meta.bind = migrate_engine + + disk_available_least = Column('disk_available_least', Integer(), default=0) + compute_nodes = Table('compute_nodes', meta, autoload=True) + # Add disk_available_least column to compute_nodes table. + # Thinking about qcow2 image support, both compressed and virtual disk size + # has to be considered. + # disk_available stores "total disk size - used disk(compressed disk size)" + # while disk_available_least stores + # "total disk size - used disk(virtual disk size)". + # virtual disk size is used for kvm block migration. try: compute_nodes.create_column(disk_available_least) except Exception: @@ -47,5 +43,8 @@ def upgrade(migrate_engine): def downgrade(migrate_engine): + meta = MetaData() meta.bind = migrate_engine - compute_nodes.drop_column(disk_available_least) + + compute_nodes = Table('compute_nodes', meta, autoload=True) + compute_nodes.drop_column('disk_available_least') diff --git a/nova/db/sqlalchemy/migrate_repo/versions/070_sqlite_downgrade.sql b/nova/db/sqlalchemy/migrate_repo/versions/070_sqlite_downgrade.sql index 25270a686ffc..5837603c8663 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/070_sqlite_downgrade.sql +++ b/nova/db/sqlalchemy/migrate_repo/versions/070_sqlite_downgrade.sql @@ -64,7 +64,7 @@ BEGIN TRANSACTION; PRIMARY KEY (id), FOREIGN KEY(virtual_interface_id) REFERENCES virtual_interfaces (id), FOREIGN KEY(network_id) REFERENCES networks (id), - FOREIGN KEY(instance_id) REFERENCES instanced (id) + FOREIGN KEY(instance_id) REFERENCES instances (id) ); CREATE TABLE floating_ips ( diff --git a/nova/db/sqlalchemy/migrate_repo/versions/070_untie_nova_network_models.py b/nova/db/sqlalchemy/migrate_repo/versions/070_untie_nova_network_models.py index 4f0530765db9..b5e1473426ae 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/070_untie_nova_network_models.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/070_untie_nova_network_models.py @@ -17,13 +17,13 @@ from migrate import ForeignKeyConstraint from nova import log as logging -meta = MetaData() LOG = logging.getLogger(__name__) def upgrade(migrate_engine): # Upgrade operations go here. Don't create your own engine; # bind migrate_engine to your metadata + meta = MetaData() meta.bind = migrate_engine dialect = migrate_engine.url.get_dialect().name if dialect.startswith('sqlite'): @@ -71,6 +71,7 @@ def upgrade(migrate_engine): def downgrade(migrate_engine): # Operations to reverse the above upgrade go here. + meta = MetaData() meta.bind = migrate_engine dialect = migrate_engine.url.get_dialect().name if dialect.startswith('sqlite'): diff --git a/nova/db/sqlalchemy/migrate_repo/versions/071_add_host_aggregate_tables.py b/nova/db/sqlalchemy/migrate_repo/versions/071_add_host_aggregate_tables.py index c2dfdb7be04c..99fe642d79f2 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/071_add_host_aggregate_tables.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/071_add_host_aggregate_tables.py @@ -17,71 +17,75 @@ from sqlalchemy import MetaData, Column, ForeignKey, Table from nova import log as logging - -meta = MetaData() LOG = logging.getLogger(__name__) -aggregates = Table('aggregates', meta, - Column('created_at', DateTime(timezone=False)), - Column('updated_at', DateTime(timezone=False)), - Column('deleted_at', DateTime(timezone=False)), - Column('deleted', Boolean(create_constraint=True, name=None)), - Column('id', Integer(), - primary_key=True, nullable=False, autoincrement=True), - Column('name', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False), - unique=True), - Column('operational_state', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False), - nullable=False), - Column('availability_zone', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False), - nullable=False), - ) - -hosts = Table('aggregate_hosts', meta, - Column('created_at', DateTime(timezone=False)), - Column('updated_at', DateTime(timezone=False)), - Column('deleted_at', DateTime(timezone=False)), - Column('deleted', Boolean(create_constraint=True, name=None)), - Column('id', Integer(), primary_key=True, nullable=False), - Column('host', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False), - unique=True), - Column('aggregate_id', Integer(), ForeignKey('aggregates.id'), - nullable=False), - ) - -metadata = Table('aggregate_metadata', meta, - Column('created_at', DateTime(timezone=False)), - Column('updated_at', DateTime(timezone=False)), - Column('deleted_at', DateTime(timezone=False)), - Column('deleted', Boolean(create_constraint=True, name=None)), - Column('id', Integer(), primary_key=True, nullable=False), - Column('aggregate_id', - Integer(), - ForeignKey('aggregates.id'), - nullable=False), - Column('key', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False), - nullable=False), - Column('value', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False), - nullable=False)) - - -tables = (aggregates, hosts, metadata) - - def upgrade(migrate_engine): + meta = MetaData() meta.bind = migrate_engine + # + # New Tables + # + aggregates = Table('aggregates', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), + primary_key=True, nullable=False, autoincrement=True), + Column('name', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + unique=True), + Column('operational_state', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + nullable=False), + Column('availability_zone', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + nullable=False), + ) + + hosts = Table('aggregate_hosts', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('host', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + unique=True), + Column('aggregate_id', Integer(), ForeignKey('aggregates.id'), + nullable=False), + ) + + metadata = Table('aggregate_metadata', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('aggregate_id', + Integer(), + ForeignKey('aggregates.id'), + nullable=False), + Column('key', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + nullable=False), + Column('value', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + nullable=False)) + tables = (aggregates, hosts, metadata) for table in tables: try: table.create() @@ -90,8 +94,14 @@ def upgrade(migrate_engine): def downgrade(migrate_engine): + meta = MetaData() meta.bind = migrate_engine - for table in tables: + + aggregates = Table('aggregates', meta, autoload=True) + hosts = Table('aggregate_hosts', meta, autoload=True) + metadata = Table('aggregate_metadata', meta, autoload=True) + # table order matters, don't change + for table in (hosts, metadata, aggregates): try: table.drop() except Exception: diff --git a/nova/db/sqlalchemy/migrate_repo/versions/072_add_dns_table.py b/nova/db/sqlalchemy/migrate_repo/versions/072_add_dns_table.py index 67cdf9243ed9..fbb3fbea8c9c 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/072_add_dns_table.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/072_add_dns_table.py @@ -17,40 +17,43 @@ from sqlalchemy import Boolean, Column, DateTime, ForeignKey from sqlalchemy import MetaData, String, Table from nova import log as logging -meta = MetaData() LOG = logging.getLogger(__name__) -# -# New Tables -# -dns_domains = Table('dns_domains', meta, - Column('created_at', DateTime(timezone=False)), - Column('updated_at', DateTime(timezone=False)), - Column('deleted_at', DateTime(timezone=False)), - Column('deleted', Boolean(create_constraint=True, name=None)), - Column('domain', - String(length=512, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False), - primary_key=True, nullable=False), - Column('scope', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('availability_zone', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('project_id', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False), - ForeignKey('projects.id')) - ) - def upgrade(migrate_engine): + meta = MetaData() meta.bind = migrate_engine - # load instances for fk - instances = Table('projects', meta, autoload=True) + # load tables for fk + projects = Table('projects', meta, autoload=True) + # + # New Tables + # + dns_domains = Table('dns_domains', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('domain', + String(length=512, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + primary_key=True, nullable=False), + Column('scope', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('availability_zone', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('project_id', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + ForeignKey('projects.id')) + ) # create dns_domains table try: dns_domains.create() @@ -60,6 +63,13 @@ def upgrade(migrate_engine): def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + # load tables for fk + projects = Table('projects', meta, autoload=True) + + dns_domains = Table('dns_domains', meta, autoload=True) try: dns_domains.drop() except Exception: diff --git a/nova/db/sqlalchemy/migrate_repo/versions/072_mysql_upgrade.sql b/nova/db/sqlalchemy/migrate_repo/versions/072_mysql_upgrade.sql new file mode 100644 index 000000000000..d11e82147886 --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/072_mysql_upgrade.sql @@ -0,0 +1,13 @@ +CREATE TABLE dns_domains ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + domain VARCHAR(512) CHARACTER SET latin1 NOT NULL, + scope VARCHAR(255), + availability_zone VARCHAR(255), + project_id VARCHAR(255), + PRIMARY KEY (domain), + CHECK (deleted IN (0, 1)), + FOREIGN KEY(project_id) REFERENCES projects (id) +); diff --git a/nova/db/sqlalchemy/migrate_repo/versions/073_add_capacity.py b/nova/db/sqlalchemy/migrate_repo/versions/073_add_capacity.py index 2b5dd09ad42a..5dfe910cac2c 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/073_add_capacity.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/073_add_capacity.py @@ -12,19 +12,11 @@ # License for the specific language governing permissions and limitations # under the License. -from sqlalchemy import Table, Column, MetaData, Integer +from sqlalchemy import Column, Integer, MetaData, Table from nova import log as logging -new_columns = [ - Column('free_ram_mb', Integer()), - Column('free_disk_gb', Integer()), - Column('current_workload', Integer()), - Column('running_vms', Integer()), - ] - - def upgrade(migrate_engine): # Upgrade operations go here. Don't create your own engine; # bind migrate_engine to your metadata @@ -32,6 +24,15 @@ def upgrade(migrate_engine): meta.bind = migrate_engine compute_nodes = Table('compute_nodes', meta, autoload=True) + # + # New Columns + # + new_columns = [ + Column('free_ram_mb', Integer()), + Column('free_disk_gb', Integer()), + Column('current_workload', Integer()), + Column('running_vms', Integer()), + ] for column in new_columns: compute_nodes.create_column(column) @@ -41,5 +42,8 @@ def downgrade(migrate_engine): meta.bind = migrate_engine compute_nodes = Table('compute_nodes', meta, autoload=True) - for column in new_columns: - compute_notes.drop_column(column) + for column in ('free_ram_mb', + 'free_disk_gb', + 'current_workload', + 'running_vms'): + compute_nodes.drop_column(column) diff --git a/nova/db/sqlalchemy/migrate_repo/versions/074_change_flavor_local_gb.py b/nova/db/sqlalchemy/migrate_repo/versions/074_change_flavor_local_gb.py index 0ec64693272a..91cef2694b20 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/074_change_flavor_local_gb.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/074_change_flavor_local_gb.py @@ -15,21 +15,13 @@ # License for the specific language governing permissions and limitations # under the License. -import sqlalchemy -from sqlalchemy import select, Column, Integer +from sqlalchemy import select, Column, Integer, MetaData, Table from nova import exception from nova import flags - FLAGS = flags.FLAGS -meta = sqlalchemy.MetaData() - - -def _get_table(name): - return sqlalchemy.Table(name, meta, autoload=True) - def upgrade_libvirt(instances, instance_types): # Update instance_types first @@ -73,8 +65,9 @@ def check_instance_presence(migrate_engine, instances_table): def upgrade(migrate_engine): + meta = MetaData() meta.bind = migrate_engine - instances = _get_table('instances') + instances = Table('instances', meta, autoload=True) data_present = check_instance_presence(migrate_engine, instances) @@ -83,7 +76,7 @@ def upgrade(migrate_engine): "connection_type to run migration migration") raise exception.Error(msg) - instance_types = _get_table('instance_types') + instance_types = Table('instance_types', meta, autoload=True) for table in (instances, instance_types): root_gb = Column('root_gb', Integer) @@ -108,12 +101,13 @@ def upgrade(migrate_engine): def downgrade(migrate_engine): + meta = MetaData() meta.bind = migrate_engine - instances = _get_table('instances') - instance_types = _get_table('instance_types') + instances = Table('instances', meta, autoload=True) + instance_types = Table('instance_types', meta, autoload=True) for table in (instances, instance_types): - local_gb = Column('root_gb', Integer) + local_gb = Column('local_gb', Integer) local_gb.create(table) try: diff --git a/nova/db/sqlalchemy/migrate_repo/versions/075_convert_bw_usage_to_store_network_id.py b/nova/db/sqlalchemy/migrate_repo/versions/075_convert_bw_usage_to_store_network_id.py index c7a40b0ff67a..b275524ab55a 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/075_convert_bw_usage_to_store_network_id.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/075_convert_bw_usage_to_store_network_id.py @@ -15,43 +15,40 @@ # License for the specific language governing permissions and limitations # under the License. -from sqlalchemy import * -from migrate import * +from sqlalchemy import and_, select +from sqlalchemy import BigInteger, Boolean, Column, DateTime +from sqlalchemy import Integer, MetaData, String +from sqlalchemy import Table from nova import utils -meta = MetaData() - - def upgrade(migrate_engine): + meta = MetaData() meta.bind = migrate_engine - bw_usage_cache = Table('bw_usage_cache', meta, - Column('id', Integer, primary_key=True), - Column('network_label', String(255)), - Column('instance_id', Integer, nullable=False), - Column('start_period', DateTime, nullable=False), - Column('last_refreshed', DateTime), - Column('bw_in', BigInteger), - Column('bw_out', BigInteger), - Column('created_at', DateTime(timezone=False), - default=utils.utcnow()), - Column('updated_at', DateTime(timezone=False), - onupdate=utils.utcnow()), - Column('deleted_at', DateTime(timezone=False)), - Column('deleted', Boolean(create_constraint=True, - name=None))) vifs = Table('virtual_interfaces', meta, autoload=True) networks = Table('networks', meta, autoload=True) - mac_column = Column('mac', String(255)) - try: - bw_usage_cache.create_column(mac_column) - except Exception: - # NOTE(jkoelker) passing here since this migration was broken - # at one point - pass + bw_usage_cache = Table('bw_usage_cache', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('instance_id', Integer(), nullable=False), + Column('network_label', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('start_period', DateTime(timezone=False), + nullable=False), + Column('last_refreshed', DateTime(timezone=False)), + Column('bw_in', BigInteger()), + Column('bw_out', BigInteger()), + useexisting=True) + mac_column = Column('mac', String(255)) + bw_usage_cache.create_column(mac_column) bw_usage_cache.update()\ .values(mac=select([vifs.c.address])\ @@ -65,40 +62,36 @@ def upgrade(migrate_engine): def downgrade(migrate_engine): + meta = MetaData() meta.bind = migrate_engine - bw_usage_cache = Table('bw_usage_cache', meta, - Column('id', Integer, primary_key=True), - Column('mac', String(255)), - Column('instance_id', Integer, nullable=False), - Column('start_period', DateTime, nullable=False), - Column('last_refreshed', DateTime), - Column('bw_in', BigInteger), - Column('bw_out', BigInteger), - Column('created_at', DateTime(timezone=False), - default=utils.utcnow()), - Column('updated_at', DateTime(timezone=False), - onupdate=utils.utcnow()), - Column('deleted_at', DateTime(timezone=False)), - Column('deleted', Boolean(create_constraint=True, - name=None))) vifs = Table('virtual_interfaces', meta, autoload=True) network = Table('networks', meta, autoload=True) - network_label_column = Column('network_label', String(255)) + bw_usage_cache = Table('bw_usage_cache', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('instance_id', Integer(), nullable=False), + Column('mac', String(255)), + Column('start_period', DateTime(timezone=False), + nullable=False), + Column('last_refreshed', DateTime(timezone=False)), + Column('bw_in', BigInteger()), + Column('bw_out', BigInteger()), + useexisting=True) + + network_label_column = Column('network_label', String(255)) bw_usage_cache.create_column(network_label_column) - try: - bw_usage_cache.update()\ - .values(network_label=select([network.c.label])\ - .where(and_( - network.c.id == vifs.c.network_id, - vifs.c.address == bw_usage_cache.c.mac, - bw_usage_cache.c.instance_id == vifs.c.instance_id))\ - .as_scalar()).execute() - except Exception: - # NOTE(jkoelker) passing here since this migration was broken - # at one point - pass + bw_usage_cache.update()\ + .values(network_label=select([network.c.label])\ + .where(and_( + network.c.id == vifs.c.network_id, + vifs.c.address == bw_usage_cache.c.mac, + bw_usage_cache.c.instance_id == vifs.c.instance_id))\ + .as_scalar()).execute() bw_usage_cache.c.mac.drop() diff --git a/nova/db/sqlalchemy/migrate_repo/versions/076_remove_unique_constraints.py b/nova/db/sqlalchemy/migrate_repo/versions/076_remove_unique_constraints.py index ef8b5d8e8aae..ab409617214e 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/076_remove_unique_constraints.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/076_remove_unique_constraints.py @@ -18,8 +18,6 @@ from sqlalchemy import MetaData, Table from migrate.changeset.constraint import UniqueConstraint -meta = MetaData() - def _get_constraint_names(engine_name): @@ -42,6 +40,7 @@ def _get_constraint_names(engine_name): def upgrade(migrate_engine): + meta = MetaData() meta.bind = migrate_engine c_names = _get_constraint_names(migrate_engine.name) @@ -62,6 +61,7 @@ def upgrade(migrate_engine): def downgrade(migrate_engine): + meta = MetaData() meta.bind = migrate_engine c_names = _get_constraint_names(migrate_engine.name) diff --git a/nova/db/sqlalchemy/migrate_repo/versions/077_convert_to_utf8.py b/nova/db/sqlalchemy/migrate_repo/versions/077_convert_to_utf8.py index 776fbde0885b..4c08e2f0dc61 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/077_convert_to_utf8.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/077_convert_to_utf8.py @@ -17,10 +17,9 @@ from sqlalchemy import MetaData -meta = MetaData() - def upgrade(migrate_engine): + meta = MetaData() meta.bind = migrate_engine # NOTE (ironcamel): The only table we are not converting to utf8 here is diff --git a/nova/db/sqlalchemy/migrate_repo/versions/078_add_rpc_info_to_zones.py b/nova/db/sqlalchemy/migrate_repo/versions/078_add_rpc_info_to_zones.py index 5f1000afe1fd..e4043f84dec8 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/078_add_rpc_info_to_zones.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/078_add_rpc_info_to_zones.py @@ -12,23 +12,22 @@ # License for the specific language governing permissions and limitations # under the License. -from sqlalchemy import * - -meta = MetaData() - -zones = Table('zones', meta, - Column('id', Integer(), primary_key=True, nullable=False), - ) - -is_parent = Column('is_parent', Boolean(), default=False) -rpc_host = Column('rpc_host', String(255)) -rpc_port = Column('rpc_port', Integer()) -rpc_virtual_host = Column('rpc_virtual_host', String(255)) +from sqlalchemy import Boolean, Column +from sqlalchemy import Integer, MetaData, String +from sqlalchemy import Table def upgrade(migrate_engine): + meta = MetaData() meta.bind = migrate_engine + zones = Table('zones', meta, autoload=True) + + is_parent = Column('is_parent', Boolean(), default=False) + rpc_host = Column('rpc_host', String(255)) + rpc_port = Column('rpc_port', Integer()) + rpc_virtual_host = Column('rpc_virtual_host', String(255)) + zones.create_column(is_parent) zones.create_column(rpc_host) zones.create_column(rpc_port) @@ -36,9 +35,12 @@ def upgrade(migrate_engine): def downgrade(migrate_engine): + meta = MetaData() meta.bind = migrate_engine - zones.drop_column(rpc_virtual_host) - zones.drop_column(rpc_port) - zones.drop_column(rpc_host) - zones.drop_column(is_parent) + zones = Table('zones', meta, autoload=True) + + zones.drop_column('rpc_virtual_host') + zones.drop_column('rpc_port') + zones.drop_column('rpc_host') + zones.drop_column('is_parent') diff --git a/nova/db/sqlalchemy/migrate_repo/versions/078_sqlite_downgrade.sql b/nova/db/sqlalchemy/migrate_repo/versions/078_sqlite_downgrade.sql index cf18c1c297da..80061af78b9c 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/078_sqlite_downgrade.sql +++ b/nova/db/sqlalchemy/migrate_repo/versions/078_sqlite_downgrade.sql @@ -7,7 +7,7 @@ BEGIN TRANSACTION; deleted BOOLEAN, id INTEGER NOT NULL, name VARCHAR(255), - api_url VARVHAR(255), + api_url VARCHAR(255), username VARCHAR(255), password VARCHAR(255), weight_offset FLOAT, diff --git a/nova/db/sqlalchemy/migrate_repo/versions/079_add_zone_name_to_instances.py b/nova/db/sqlalchemy/migrate_repo/versions/079_add_zone_name_to_instances.py index bff1b685bc39..2b22b94a0202 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/079_add_zone_name_to_instances.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/079_add_zone_name_to_instances.py @@ -12,12 +12,11 @@ # License for the specific language governing permissions and limitations # under the License. -from sqlalchemy import * - -meta = MetaData() +from sqlalchemy import Column, MetaData, String, Table def upgrade(migrate_engine): + meta = MetaData() meta.bind = migrate_engine instances = Table('instances', meta, autoload=True) zone_name = Column('zone_name', String(255)) @@ -25,7 +24,7 @@ def upgrade(migrate_engine): def downgrade(migrate_engine): + meta = MetaData() meta.bind = migrate_engine instances = Table('instances', meta, autoload=True) - zone_name = Column('zone_name', String(255)) - instances.drop_column(zone_name) + instances.drop_column('zone_name') diff --git a/nova/tests/test_migrations.conf b/nova/tests/test_migrations.conf new file mode 100644 index 000000000000..774f1499406e --- /dev/null +++ b/nova/tests/test_migrations.conf @@ -0,0 +1,9 @@ +[DEFAULT] +# Set up any number of migration data stores you want, one +# The "name" used in the test is the config variable key. +#sqlite=sqlite:///test_migrations.db +sqlite=sqlite:// +#mysql=mysql://root:@localhost/test_migrations +#postgresql=postgresql://user:pass@localhost/test_migrations +[walk_style] +snake_walk=yes diff --git a/nova/tests/test_migrations.py b/nova/tests/test_migrations.py new file mode 100644 index 000000000000..b0caea22081e --- /dev/null +++ b/nova/tests/test_migrations.py @@ -0,0 +1,224 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010-2011 OpenStack, LLC +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Tests for database migrations. This test case reads the configuration +file test_migrations.conf for database connection settings +to use in the tests. For each connection found in the config file, +the test case runs a series of test cases to ensure that migrations work +properly both upgrading and downgrading, and that no data loss occurs +if possible. +""" + +import ConfigParser +import commands +import os +import unittest +import urlparse + +import migrate.versioning.api as migration_api +from migrate.versioning.repository import Repository +from sqlalchemy import create_engine + +from nova import log as logging +from nova import test + +LOG = logging.getLogger('nova.tests.test_migrations') + + +class TestMigrations(unittest.TestCase): + """Test sqlalchemy-migrate migrations""" + + TEST_DATABASES = {} + # Test machines can set the NOVA_TEST_MIGRATIONS_CONF variable + # to override the location of the config file for migration testing + CONFIG_FILE_PATH = os.environ.get('NOVA_TEST_MIGRATIONS_CONF', + os.path.join('test_migrations.conf')) + REPOSITORY_PATH = os.path.abspath(os.path.join('..', 'db', 'sqlalchemy', + 'migrate_repo')) + REPOSITORY = Repository(REPOSITORY_PATH) + + def __init__(self, *args, **kwargs): + super(TestMigrations, self).__init__(*args, **kwargs) + + def setUp(self): + super(TestMigrations, self).setUp() + + # Load test databases from the config file. Only do this + # once. No need to re-run this on each test... + LOG.debug('config_path is %s' % TestMigrations.CONFIG_FILE_PATH) + if not TestMigrations.TEST_DATABASES: + if os.path.exists(TestMigrations.CONFIG_FILE_PATH): + cp = ConfigParser.RawConfigParser() + try: + cp.read(TestMigrations.CONFIG_FILE_PATH) + defaults = cp.defaults() + for key, value in defaults.items(): + TestMigrations.TEST_DATABASES[key] = value + self.snake_walk = cp.getboolean('walk_style', 'snake_walk') + except ConfigParser.ParsingError, e: + self.fail("Failed to read test_migrations.conf config " + "file. Got error: %s" % e) + else: + self.fail("Failed to find test_migrations.conf config " + "file.") + + self.engines = {} + for key, value in TestMigrations.TEST_DATABASES.items(): + self.engines[key] = create_engine(value) + + # We start each test case with a completely blank slate. + self._reset_databases() + + def tearDown(self): + super(TestMigrations, self).tearDown() + + # We destroy the test data store between each test case, + # and recreate it, which ensures that we have no side-effects + # from the tests + self._reset_databases() + + def _reset_databases(self): + def execute_cmd(cmd=None): + status, output = commands.getstatusoutput(cmd) + LOG.debug(output) + self.assertEqual(0, status) + for key, engine in self.engines.items(): + conn_string = TestMigrations.TEST_DATABASES[key] + conn_pieces = urlparse.urlparse(conn_string) + if conn_string.startswith('sqlite'): + # We can just delete the SQLite database, which is + # the easiest and cleanest solution + db_path = conn_pieces.path.strip('/') + if os.path.exists(db_path): + os.unlink(db_path) + # No need to recreate the SQLite DB. SQLite will + # create it for us if it's not there... + elif conn_string.startswith('mysql'): + # We can execute the MySQL client to destroy and re-create + # the MYSQL database, which is easier and less error-prone + # than using SQLAlchemy to do this via MetaData...trust me. + database = conn_pieces.path.strip('/') + loc_pieces = conn_pieces.netloc.split('@') + host = loc_pieces[1] + auth_pieces = loc_pieces[0].split(':') + user = auth_pieces[0] + password = "" + if len(auth_pieces) > 1: + if auth_pieces[1].strip(): + password = "-p%s" % auth_pieces[1] + sql = ("drop database if exists %(database)s; " + "create database %(database)s;") % locals() + cmd = ("mysql -u%(user)s %(password)s -h%(host)s " + "-e\"%(sql)s\"") % locals() + execute_cmd(cmd) + elif conn_string.startswith('postgresql'): + database = conn_pieces.path.strip('/') + loc_pieces = conn_pieces.netloc.split('@') + host = loc_pieces[1] + auth_pieces = loc_pieces[0].split(':') + user = auth_pieces[0] + password = "" + if len(auth_pieces) > 1: + if auth_pieces[1].strip(): + password = auth_pieces[1] + cmd = ("touch ~/.pgpass;" + "chmod 0600 ~/.pgpass;" + "sed -i -e" + "'1{s/^.*$/\*:\*:\*:%(user)s:%(password)s/};" + "1!d' ~/.pgpass") % locals() + execute_cmd(cmd) + sql = ("UPDATE pg_catalog.pg_database SET datallowconn=false " + "WHERE datname='%(database)s';") % locals() + cmd = ("psql -U%(user)s -h%(host)s -c\"%(sql)s\"") % locals() + execute_cmd(cmd) + sql = ("SELECT pg_catalog.pg_terminate_backend(procpid) " + "FROM pg_catalog.pg_stat_activity " + "WHERE datname='%(database)s';") % locals() + cmd = ("psql -U%(user)s -h%(host)s -c\"%(sql)s\"") % locals() + execute_cmd(cmd) + sql = ("drop database if exists %(database)s;") % locals() + cmd = ("psql -U%(user)s -h%(host)s -c\"%(sql)s\"") % locals() + execute_cmd(cmd) + sql = ("create database %(database)s;") % locals() + cmd = ("psql -U%(user)s -h%(host)s -c\"%(sql)s\"") % locals() + execute_cmd(cmd) + + def test_walk_versions(self): + """ + Walks all version scripts for each tested database, ensuring + that there are no errors in the version scripts for each engine + """ + for key, engine in self.engines.items(): + self._walk_versions(engine, self.snake_walk) + + def _walk_versions(self, engine=None, snake_walk=False): + # Determine latest version script from the repo, then + # upgrade from 1 through to the latest, with no data + # in the databases. This just checks that the schema itself + # upgrades successfully. + + # Place the database under version control + migration_api.version_control(engine, TestMigrations.REPOSITORY) + self.assertEqual(0, + migration_api.db_version(engine, + TestMigrations.REPOSITORY)) + + LOG.debug('latest version is %s' % TestMigrations.REPOSITORY.latest) + + for version in xrange(1, TestMigrations.REPOSITORY.latest + 1): + # upgrade -> downgrade -> upgrade + migration_api.upgrade(engine, TestMigrations.REPOSITORY, version) + self.assertEqual(version, + migration_api.db_version(engine, + TestMigrations.REPOSITORY)) + if snake_walk: + migration_api.downgrade(engine, + TestMigrations.REPOSITORY, + version - 1) + self.assertEqual(version - 1, + migration_api.db_version(engine, + TestMigrations.REPOSITORY)) + migration_api.upgrade(engine, + TestMigrations.REPOSITORY, + version) + self.assertEqual(version, + migration_api.db_version(engine, + TestMigrations.REPOSITORY)) + + # Now walk it back down to 0 from the latest, testing + # the downgrade paths. + for version in reversed( + xrange(0, TestMigrations.REPOSITORY.latest)): + # downgrade -> upgrade -> downgrade + migration_api.downgrade(engine, TestMigrations.REPOSITORY, version) + self.assertEqual(version, + migration_api.db_version(engine, + TestMigrations.REPOSITORY)) + if snake_walk: + migration_api.upgrade(engine, + TestMigrations.REPOSITORY, + version + 1) + self.assertEqual(version + 1, + migration_api.db_version(engine, + TestMigrations.REPOSITORY)) + migration_api.downgrade(engine, + TestMigrations.REPOSITORY, + version) + self.assertEqual(version, + migration_api.db_version(engine, + TestMigrations.REPOSITORY)) diff --git a/run_tests.sh b/run_tests.sh index 133b84515ee6..9964802cf334 100755 --- a/run_tests.sh +++ b/run_tests.sh @@ -34,6 +34,8 @@ function process_option { -s|--no-site-packages) no_site_packages=1;; -r|--recreate-db) recreate_db=1;; -n|--no-recreate-db) recreate_db=0;; + -m|--patch-migrate) patch_migrate=1;; + -w|--no-patch-migrate) patch_migrate=0;; -f|--force) force=1;; -p|--pep8) just_pep8=1;; -P|--no-pep8) no_pep8=1;; @@ -59,6 +61,7 @@ no_pep8=0 just_hacking=0 coverage=0 recreate_db=1 +patch_migrate=1 for arg in "$@"; do process_option $arg @@ -185,6 +188,16 @@ if [ $recreate_db -eq 1 ]; then rm -f tests.sqlite fi +# Workaround for sqlalchemy-migrate issue 72 +# see: http://code.google.com/p/sqlalchemy-migrate/issues/detail?id=72 +if [ $patch_migrate -eq 1 ]; then + pyver=python`python -c 'import sys; print(".".join(map(str, sys.version_info[:2])))'` + target=${venv}/lib/${pyver}/site-packages/migrate/versioning/util/__init__.py + if [ -f $target ]; then + sed -i -e '/^\s\+finally:$/ {N; /^\(\s\+finally:\n\s\+if isinstance(engine, Engine)\):$/ {s//\1 and engine is not url:/}}' $target + fi +fi + run_tests # NOTE(sirp): we only want to run pep8 when we're running the full-test suite,