Merge "Add better status to baremetal deployments."
This commit is contained in:
		| @@ -47,6 +47,7 @@ from nova import config | ||||
| from nova import context as nova_context | ||||
| from nova.openstack.common import log as logging | ||||
| from nova import utils | ||||
| from nova.virt.baremetal import baremetal_states | ||||
| from nova.virt.baremetal import db | ||||
|  | ||||
|  | ||||
| @@ -234,22 +235,27 @@ class Worker(threading.Thread): | ||||
|         while not self.stop: | ||||
|             try: | ||||
|                 # Set timeout to check self.stop periodically | ||||
|                 (deployment_id, params) = QUEUE.get(block=True, | ||||
|                 (node_id, params) = QUEUE.get(block=True, | ||||
|                                                     timeout=self.queue_timeout) | ||||
|             except Queue.Empty: | ||||
|                 pass | ||||
|             else: | ||||
|                 # Requests comes here from BareMetalDeploy.post() | ||||
|                 LOG.info("start deployment: %s, %s", deployment_id, params) | ||||
|                 LOG.info(_('start deployment for node %(node_id)s, ' | ||||
|                            'params %(params)s') % locals()) | ||||
|                 context = nova_context.get_admin_context() | ||||
|                 try: | ||||
|                     db.bm_node_update(context, node_id, | ||||
|                           {'task_state': baremetal_states.DEPLOYING}) | ||||
|                     deploy(**params) | ||||
|                 except Exception: | ||||
|                     LOG.exception('deployment %s failed' % deployment_id) | ||||
|                     LOG.error(_('deployment to node %s failed') % node_id) | ||||
|                     db.bm_node_update(context, node_id, | ||||
|                           {'task_state': baremetal_states.DEPLOYFAIL}) | ||||
|                 else: | ||||
|                     LOG.info("deployment %s done", deployment_id) | ||||
|                 finally: | ||||
|                     context = nova_context.get_admin_context() | ||||
|                     db.bm_deployment_destroy(context, deployment_id) | ||||
|                     LOG.info(_('deployment to node %s done') % node_id) | ||||
|                     db.bm_node_update(context, node_id, | ||||
|                           {'task_state': baremetal_states.DEPLOYDONE}) | ||||
|  | ||||
|  | ||||
| class BareMetalDeploy(object): | ||||
| @@ -276,8 +282,8 @@ class BareMetalDeploy(object): | ||||
|         x = inpt.read(length) | ||||
|         q = dict(cgi.parse_qsl(x)) | ||||
|         try: | ||||
|             deployment_id = q['i'] | ||||
|             deployment_key = q['k'] | ||||
|             node_id = q['i'] | ||||
|             deploy_key = q['k'] | ||||
|             address = q['a'] | ||||
|             port = q.get('p', '3260') | ||||
|             iqn = q['n'] | ||||
| @@ -287,9 +293,9 @@ class BareMetalDeploy(object): | ||||
|             return "parameter '%s' is not defined" % e | ||||
|  | ||||
|         context = nova_context.get_admin_context() | ||||
|         d = db.bm_deployment_get(context, deployment_id) | ||||
|         d = db.bm_node_get(context, node_id) | ||||
|  | ||||
|         if d['key'] != deployment_key: | ||||
|         if d['deploy_key'] != deploy_key: | ||||
|             start_response('400 Bad Request', [('Content-type', 'text/plain')]) | ||||
|             return 'key is not match' | ||||
|  | ||||
| @@ -306,8 +312,8 @@ class BareMetalDeploy(object): | ||||
|         if not self.worker.isAlive(): | ||||
|             self.worker = Worker() | ||||
|             self.worker.start() | ||||
|         LOG.info("request is queued: %s, %s", deployment_id, params) | ||||
|         QUEUE.put((deployment_id, params)) | ||||
|         LOG.info("request is queued: node %s, params %s", node_id, params) | ||||
|         QUEUE.put((node_id, params)) | ||||
|         # Requests go to Worker.run() | ||||
|         start_response('200 OK', [('Content-type', 'text/plain')]) | ||||
|         return '' | ||||
|   | ||||
| @@ -22,10 +22,11 @@ import sys | ||||
| import tempfile | ||||
| import time | ||||
|  | ||||
| import mox | ||||
|  | ||||
| from nova import test | ||||
|  | ||||
| from nova.tests.baremetal.db import base as bm_db_base | ||||
|  | ||||
| from nova.virt.baremetal import db as bm_db | ||||
|  | ||||
| TOPDIR = os.path.normpath(os.path.join( | ||||
|                             os.path.dirname(os.path.abspath(__file__)), | ||||
| @@ -93,11 +94,19 @@ class WorkerTestCase(bm_db_base.BMDBTestCase): | ||||
|             history.append(params) | ||||
|  | ||||
|         self.stubs.Set(bmdh, 'deploy', fake_deploy) | ||||
|         self.mox.StubOutWithMock(bm_db, 'bm_node_update') | ||||
|         # update is called twice inside Worker.run | ||||
|         for i in range(6): | ||||
|             bm_db.bm_node_update(mox.IgnoreArg(), mox.IgnoreArg(), | ||||
|                                         mox.IgnoreArg()) | ||||
|         self.mox.ReplayAll() | ||||
|  | ||||
|         params_list = [{'fake1': ''}, {'fake2': ''}, {'fake3': ''}] | ||||
|         for (dep_id, params) in enumerate(params_list): | ||||
|             bmdh.QUEUE.put((dep_id, params)) | ||||
|         self.wait_queue_empty(1) | ||||
|         self.assertEqual(params_list, history) | ||||
|         self.mox.VerifyAll() | ||||
|  | ||||
|     def test_run_with_failing_deploy(self): | ||||
|         """Check a worker keeps on running even if deploy() raises | ||||
| @@ -111,11 +120,19 @@ class WorkerTestCase(bm_db_base.BMDBTestCase): | ||||
|             raise Exception('test') | ||||
|  | ||||
|         self.stubs.Set(bmdh, 'deploy', fake_deploy) | ||||
|         self.mox.StubOutWithMock(bm_db, 'bm_node_update') | ||||
|         # update is called twice inside Worker.run | ||||
|         for i in range(6): | ||||
|             bm_db.bm_node_update(mox.IgnoreArg(), mox.IgnoreArg(), | ||||
|                                         mox.IgnoreArg()) | ||||
|         self.mox.ReplayAll() | ||||
|  | ||||
|         params_list = [{'fake1': ''}, {'fake2': ''}, {'fake3': ''}] | ||||
|         for (dep_id, params) in enumerate(params_list): | ||||
|             bmdh.QUEUE.put((dep_id, params)) | ||||
|         self.wait_queue_empty(1) | ||||
|         self.assertEqual(params_list, history) | ||||
|         self.mox.VerifyAll() | ||||
|  | ||||
|  | ||||
| class PhysicalWorkTestCase(test.TestCase): | ||||
| @@ -175,6 +192,8 @@ class PhysicalWorkTestCase(test.TestCase): | ||||
|         bmdh.deploy(address, port, iqn, lun, image_path, pxe_config_path, | ||||
|                     root_mb, swap_mb) | ||||
|  | ||||
|         self.mox.VerifyAll() | ||||
|  | ||||
|     def test_always_logout_iscsi(self): | ||||
|         """logout_iscsi() must be called once login_iscsi() is called.""" | ||||
|         address = '127.0.0.1' | ||||
|   | ||||
| @@ -21,6 +21,8 @@ | ||||
|  | ||||
| import os | ||||
|  | ||||
| import mox | ||||
|  | ||||
| from oslo.config import cfg | ||||
| from testtools import matchers | ||||
|  | ||||
| @@ -67,7 +69,6 @@ class BareMetalPXETestCase(bm_db_base.BMDBTestCase): | ||||
|         self.instance = utils.get_test_instance() | ||||
|         self.test_network_info = utils.get_test_network_info(), | ||||
|         self.node_info = bm_db_utils.new_bm_node( | ||||
|                 id=123, | ||||
|                 service_host='test_host', | ||||
|                 cpus=4, | ||||
|                 memory_mb=2048, | ||||
| @@ -421,7 +422,7 @@ class PXEPublicMethodsTestCase(BareMetalPXETestCase): | ||||
|         self.driver.destroy_images(self.context, self.node, self.instance) | ||||
|         self.mox.VerifyAll() | ||||
|  | ||||
|     def test_activate_bootloader(self): | ||||
|     def test_activate_bootloader_passes_details(self): | ||||
|         self._create_node() | ||||
|         macs = [nic['address'] for nic in self.nic_info] | ||||
|         macs.append(self.node_info['prov_mac_address']) | ||||
| @@ -441,7 +442,6 @@ class PXEPublicMethodsTestCase(BareMetalPXETestCase): | ||||
|         self.mox.StubOutWithMock(pxe, 'get_tftp_image_info') | ||||
|         self.mox.StubOutWithMock(pxe, 'get_partition_sizes') | ||||
|         self.mox.StubOutWithMock(bm_utils, 'random_alnum') | ||||
|         self.mox.StubOutWithMock(db, 'bm_deployment_create') | ||||
|         self.mox.StubOutWithMock(pxe, 'build_pxe_config') | ||||
|         self.mox.StubOutWithMock(bm_utils, 'write_to_file') | ||||
|         self.mox.StubOutWithMock(bm_utils, 'create_link_without_raise') | ||||
| @@ -449,68 +449,73 @@ class PXEPublicMethodsTestCase(BareMetalPXETestCase): | ||||
|         pxe.get_tftp_image_info(self.instance).AndReturn(image_info) | ||||
|         pxe.get_partition_sizes(self.instance).AndReturn((0, 0)) | ||||
|         bm_utils.random_alnum(32).AndReturn('alnum') | ||||
|         db.bm_deployment_create( | ||||
|                 self.context, 'alnum', image_path, pxe_path, 0, 0).\ | ||||
|                         AndReturn(1234) | ||||
|         pxe.build_pxe_config( | ||||
|                 1234, 'alnum', iqn, 'aaaa', 'bbbb', 'cccc', 'dddd').\ | ||||
|                         AndReturn(pxe_config) | ||||
|                 self.node['id'], 'alnum', iqn, | ||||
|                 'aaaa', 'bbbb', 'cccc', 'dddd').AndReturn(pxe_config) | ||||
|         bm_utils.write_to_file(pxe_path, pxe_config) | ||||
|         for mac in macs: | ||||
|             bm_utils.create_link_without_raise( | ||||
|                     pxe_path, pxe.get_pxe_mac_path(mac)) | ||||
|  | ||||
|         self.mox.ReplayAll() | ||||
|  | ||||
|         self.driver.activate_bootloader( | ||||
|                 self.context, self.node, self.instance) | ||||
|         self.driver.activate_bootloader(self.context, self.node, self.instance) | ||||
|  | ||||
|         self.mox.VerifyAll() | ||||
|  | ||||
|     def test_deactivate_bootloader(self): | ||||
|     def test_activate_and_deactivate_bootloader(self): | ||||
|         self._create_node() | ||||
|         macs = [nic['address'] for nic in self.nic_info] | ||||
|         macs.append(self.node_info['prov_mac_address']) | ||||
|         macs.sort() | ||||
|         image_info = { | ||||
|                 'deploy_kernel': [None, 'aaaa'], | ||||
|                 'deploy_ramdisk': [None, 'bbbb'], | ||||
|                 'kernel': [None, 'cccc'], | ||||
|                 'ramdisk': [None, 'dddd'], | ||||
|         extra_specs = { | ||||
|                 'deploy_kernel_id': 'eeee', | ||||
|                 'deploy_ramdisk_id': 'ffff', | ||||
|             } | ||||
|         self.instance['extra_specs'] = extra_specs | ||||
|         self.instance['uuid'] = 'fake-uuid' | ||||
|         pxe_path = pxe.get_pxe_config_file_path(self.instance) | ||||
|  | ||||
|         self.mox.StubOutWithMock(bm_utils, 'write_to_file') | ||||
|         self.mox.StubOutWithMock(bm_utils, 'create_link_without_raise') | ||||
|         self.mox.StubOutWithMock(bm_utils, 'unlink_without_raise') | ||||
|         self.mox.StubOutWithMock(bm_utils, 'rmtree_without_raise') | ||||
|         self.mox.StubOutWithMock(pxe, 'get_tftp_image_info') | ||||
|         self.mox.StubOutWithMock(self.driver, '_collect_mac_addresses') | ||||
|  | ||||
|         pxe.get_tftp_image_info(self.instance).AndReturn(image_info) | ||||
|         for uuid, path in [image_info[label] for label in image_info]: | ||||
|             bm_utils.unlink_without_raise(path) | ||||
|         bm_utils.unlink_without_raise(pxe_path) | ||||
|         self.driver._collect_mac_addresses(self.context, self.node).\ | ||||
|                 AndReturn(macs) | ||||
|         for mac in macs: | ||||
|             bm_utils.unlink_without_raise(pxe.get_pxe_mac_path(mac)) | ||||
|         bm_utils.rmtree_without_raise( | ||||
|                 os.path.join(CONF.baremetal.tftp_root, 'fake-uuid')) | ||||
|         # create the config file | ||||
|         bm_utils.write_to_file(mox.StrContains('fake-uuid'), | ||||
|                                mox.StrContains(CONF.baremetal.tftp_root)) | ||||
|         # unlink and link the 3 interfaces | ||||
|         for i in range(3): | ||||
|             bm_utils.unlink_without_raise(mox.Or( | ||||
|                     mox.StrContains('fake-uuid'), | ||||
|                     mox.StrContains(CONF.baremetal.tftp_root))) | ||||
|             bm_utils.create_link_without_raise( | ||||
|                     mox.StrContains('fake-uuid'), | ||||
|                     mox.StrContains(CONF.baremetal.tftp_root)) | ||||
|         # unlink all 3 interfaces, 4 images, and the config file | ||||
|         for i in range(8): | ||||
|             bm_utils.unlink_without_raise(mox.Or( | ||||
|                     mox.StrContains('fake-uuid'), | ||||
|                     mox.StrContains(CONF.baremetal.tftp_root))) | ||||
|         bm_utils.rmtree_without_raise(mox.StrContains('fake-uuid')) | ||||
|  | ||||
|         self.mox.ReplayAll() | ||||
|  | ||||
|         self.driver.deactivate_bootloader( | ||||
|             self.context, self.node, self.instance) | ||||
|         # activate and deactivate the bootloader | ||||
|         # and check the deployment task_state in the database | ||||
|         row = db.bm_node_get(self.context, 1) | ||||
|         self.assertTrue(row['deploy_key'] is None) | ||||
|  | ||||
|         self.driver.activate_bootloader(self.context, self.node, | ||||
|                                             self.instance) | ||||
|         row = db.bm_node_get(self.context, 1) | ||||
|         self.assertTrue(row['deploy_key'] is not None) | ||||
|  | ||||
|         self.driver.deactivate_bootloader(self.context, self.node, | ||||
|                                             self.instance) | ||||
|         row = db.bm_node_get(self.context, 1) | ||||
|         self.assertTrue(row['deploy_key'] is None) | ||||
|  | ||||
|         self.mox.VerifyAll() | ||||
|  | ||||
|     def test_deactivate_bootloader_for_nonexistent_instance(self): | ||||
|         self._create_node() | ||||
|         macs = [nic['address'] for nic in self.nic_info] | ||||
|         macs.append(self.node_info['prov_mac_address']) | ||||
|         macs.sort() | ||||
|         image_info = { | ||||
|                 'deploy_kernel': [None, 'aaaa'], | ||||
|                 'deploy_ramdisk': [None, 'bbbb'], | ||||
|                 'kernel': [None, 'cccc'], | ||||
|                 'ramdisk': [None, 'dddd'], | ||||
|             } | ||||
|         self.instance['uuid'] = 'fake-uuid' | ||||
|         pxe_path = pxe.get_pxe_config_file_path(self.instance) | ||||
|  | ||||
|   | ||||
							
								
								
									
										9
									
								
								nova/tests/test_baremetal_migrations.conf
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										9
									
								
								nova/tests/test_baremetal_migrations.conf
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,9 @@ | ||||
| [DEFAULT] | ||||
| # Set up any number of migration data stores you want, one | ||||
| # The "name" used in the test is the config variable key. | ||||
| #sqlite=sqlite:///test_migrations.db | ||||
| sqlite=sqlite:// | ||||
| #mysql=mysql://root:@localhost/test_migrations | ||||
| #postgresql=postgresql://user:pass@localhost/test_migrations | ||||
| [walk_style] | ||||
| snake_walk=yes | ||||
| @@ -47,30 +47,37 @@ import datetime | ||||
| import netaddr | ||||
| import os | ||||
| import sqlalchemy | ||||
| import sqlalchemy.exc | ||||
| import urlparse | ||||
| import uuid | ||||
|  | ||||
| from migrate.versioning import repository | ||||
|  | ||||
| import nova.db.migration as migration | ||||
| import nova.db.sqlalchemy.migrate_repo | ||||
| from nova.db.sqlalchemy.migration import versioning_api as migration_api | ||||
| from nova.openstack.common import log as logging | ||||
| from nova.openstack.common import timeutils | ||||
| from nova import test | ||||
| import nova.virt.baremetal.db.sqlalchemy.migrate_repo | ||||
|  | ||||
|  | ||||
| LOG = logging.getLogger(__name__) | ||||
|  | ||||
|  | ||||
| def _get_connect_string(backend, | ||||
|                         user="openstack_citest", | ||||
|                         passwd="openstack_citest", | ||||
|                         database="openstack_citest"): | ||||
|         user=None, | ||||
|         passwd=None, | ||||
|         database=None): | ||||
|     """ | ||||
|     Try to get a connection with a very specific set of values, if we get | ||||
|     these then we'll run the tests, otherwise they are skipped | ||||
|     """ | ||||
|     if not user: | ||||
|         user = "openstack_citest" | ||||
|     if not passwd: | ||||
|         passwd = "openstack_citest" | ||||
|     if not database: | ||||
|         database = "openstack_citest" | ||||
|  | ||||
|     if backend == "postgres": | ||||
|         backend = "postgresql+psycopg2" | ||||
|     elif backend == "mysql": | ||||
| @@ -120,32 +127,66 @@ def get_table(engine, name): | ||||
|     return sqlalchemy.Table(name, metadata, autoload=True) | ||||
|  | ||||
|  | ||||
| def get_mysql_connection_info(conn_pieces): | ||||
|     database = conn_pieces.path.strip('/') | ||||
|     loc_pieces = conn_pieces.netloc.split('@') | ||||
|     host = loc_pieces[1] | ||||
|     auth_pieces = loc_pieces[0].split(':') | ||||
|     user = auth_pieces[0] | ||||
|     password = "" | ||||
|     if len(auth_pieces) > 1: | ||||
|         if auth_pieces[1].strip(): | ||||
|             password = "-p\"%s\"" % auth_pieces[1] | ||||
|  | ||||
|     return (user, password, database, host) | ||||
|  | ||||
|  | ||||
| def get_pgsql_connection_info(conn_pieces): | ||||
|     database = conn_pieces.path.strip('/') | ||||
|     loc_pieces = conn_pieces.netloc.split('@') | ||||
|     host = loc_pieces[1] | ||||
|  | ||||
|     auth_pieces = loc_pieces[0].split(':') | ||||
|     user = auth_pieces[0] | ||||
|     password = "" | ||||
|     if len(auth_pieces) > 1: | ||||
|         password = auth_pieces[1].strip() | ||||
|  | ||||
|     return (user, password, database, host) | ||||
|  | ||||
|  | ||||
| class BaseMigrationTestCase(test.TestCase): | ||||
|     """Base class fort testing migrations and migration utils.""" | ||||
|  | ||||
|     DEFAULT_CONFIG_FILE = os.path.join(os.path.dirname(__file__), | ||||
|     def __init__(self, *args, **kwargs): | ||||
|         super(BaseMigrationTestCase, self).__init__(*args, **kwargs) | ||||
|  | ||||
|         self.DEFAULT_CONFIG_FILE = os.path.join(os.path.dirname(__file__), | ||||
|                                        'test_migrations.conf') | ||||
|     # Test machines can set the NOVA_TEST_MIGRATIONS_CONF variable | ||||
|     # to override the location of the config file for migration testing | ||||
|     CONFIG_FILE_PATH = os.environ.get('NOVA_TEST_MIGRATIONS_CONF', | ||||
|                                       DEFAULT_CONFIG_FILE) | ||||
|     MIGRATE_FILE = nova.db.sqlalchemy.migrate_repo.__file__ | ||||
|     REPOSITORY = repository.Repository( | ||||
|                                 os.path.abspath(os.path.dirname(MIGRATE_FILE))) | ||||
|         # Test machines can set the NOVA_TEST_MIGRATIONS_CONF variable | ||||
|         # to override the location of the config file for migration testing | ||||
|         self.CONFIG_FILE_PATH = os.environ.get('NOVA_TEST_MIGRATIONS_CONF', | ||||
|                                       self.DEFAULT_CONFIG_FILE) | ||||
|         self.MIGRATE_FILE = nova.db.sqlalchemy.migrate_repo.__file__ | ||||
|         self.REPOSITORY = repository.Repository( | ||||
|                         os.path.abspath(os.path.dirname(self.MIGRATE_FILE))) | ||||
|         self.INIT_VERSION = 0 | ||||
|  | ||||
|         self.snake_walk = False | ||||
|         self.test_databases = {} | ||||
|         self.migration = None | ||||
|         self.migration_api = None | ||||
|  | ||||
|     def setUp(self): | ||||
|         super(BaseMigrationTestCase, self).setUp() | ||||
|  | ||||
|         self.snake_walk = False | ||||
|         self.test_databases = {} | ||||
|  | ||||
|         # Load test databases from the config file. Only do this | ||||
|         # once. No need to re-run this on each test... | ||||
|         LOG.debug('config_path is %s' % BaseMigrationTestCase.CONFIG_FILE_PATH) | ||||
|         if os.path.exists(BaseMigrationTestCase.CONFIG_FILE_PATH): | ||||
|         LOG.debug('config_path is %s' % self.CONFIG_FILE_PATH) | ||||
|         if os.path.exists(self.CONFIG_FILE_PATH): | ||||
|             cp = ConfigParser.RawConfigParser() | ||||
|             try: | ||||
|                 cp.read(BaseMigrationTestCase.CONFIG_FILE_PATH) | ||||
|                 cp.read(self.CONFIG_FILE_PATH) | ||||
|                 defaults = cp.defaults() | ||||
|                 for key, value in defaults.items(): | ||||
|                     self.test_databases[key] = value | ||||
| @@ -192,34 +233,20 @@ class BaseMigrationTestCase(test.TestCase): | ||||
|                 # We can execute the MySQL client to destroy and re-create | ||||
|                 # the MYSQL database, which is easier and less error-prone | ||||
|                 # than using SQLAlchemy to do this via MetaData...trust me. | ||||
|                 database = conn_pieces.path.strip('/') | ||||
|                 loc_pieces = conn_pieces.netloc.split('@') | ||||
|                 host = loc_pieces[1] | ||||
|                 auth_pieces = loc_pieces[0].split(':') | ||||
|                 user = auth_pieces[0] | ||||
|                 password = "" | ||||
|                 if len(auth_pieces) > 1: | ||||
|                     if auth_pieces[1].strip(): | ||||
|                         password = "-p\"%s\"" % auth_pieces[1] | ||||
|                 (user, password, database, host) = \ | ||||
|                         get_mysql_connection_info(conn_pieces) | ||||
|                 sql = ("drop database if exists %(database)s; " | ||||
|                        "create database %(database)s;") % locals() | ||||
|                         "create database %(database)s;") % locals() | ||||
|                 cmd = ("mysql -u \"%(user)s\" %(password)s -h %(host)s " | ||||
|                        "-e \"%(sql)s\"") % locals() | ||||
|                 execute_cmd(cmd) | ||||
|             elif conn_string.startswith('postgresql'): | ||||
|                 database = conn_pieces.path.strip('/') | ||||
|                 loc_pieces = conn_pieces.netloc.split('@') | ||||
|                 host = loc_pieces[1] | ||||
|  | ||||
|                 auth_pieces = loc_pieces[0].split(':') | ||||
|                 user = auth_pieces[0] | ||||
|                 password = "" | ||||
|                 if len(auth_pieces) > 1: | ||||
|                     password = auth_pieces[1].strip() | ||||
|                 # note(krtaylor): File creation problems with tests in | ||||
|                 # venv using .pgpass authentication, changed to | ||||
|                 # PGPASSWORD environment variable which is no longer | ||||
|                 # planned to be deprecated | ||||
|                 (user, password, database, host) = \ | ||||
|                         get_pgsql_connection_info(conn_pieces) | ||||
|                 os.environ['PGPASSWORD'] = password | ||||
|                 os.environ['PGUSER'] = user | ||||
|                 # note(boris-42): We must create and drop database, we can't | ||||
| @@ -236,18 +263,6 @@ class BaseMigrationTestCase(test.TestCase): | ||||
|                 os.unsetenv('PGPASSWORD') | ||||
|                 os.unsetenv('PGUSER') | ||||
|  | ||||
|  | ||||
| class TestMigrations(BaseMigrationTestCase): | ||||
|     """Test sqlalchemy-migrate migrations.""" | ||||
|  | ||||
|     def test_walk_versions(self): | ||||
|         """ | ||||
|         Walks all version scripts for each tested database, ensuring | ||||
|         that there are no errors in the version scripts for each engine | ||||
|         """ | ||||
|         for key, engine in self.engines.items(): | ||||
|             self._walk_versions(engine, self.snake_walk) | ||||
|  | ||||
|     def test_mysql_connect_fail(self): | ||||
|         """ | ||||
|         Test that we can trigger a mysql connection failure and we fail | ||||
| @@ -256,16 +271,18 @@ class TestMigrations(BaseMigrationTestCase): | ||||
|         if _is_backend_avail('mysql', user="openstack_cifail"): | ||||
|             self.fail("Shouldn't have connected") | ||||
|  | ||||
|     def test_mysql_opportunistically(self): | ||||
|     def _test_mysql_opportunistically(self, database=None): | ||||
|         # Test that table creation on mysql only builds InnoDB tables | ||||
|         if not _is_backend_avail('mysql'): | ||||
|             self.skipTest("mysql not available") | ||||
|         # add this to the global lists to make reset work with it, it's removed | ||||
|         # automatically in tearDown so no need to clean it up here. | ||||
|         connect_string = _get_connect_string("mysql") | ||||
|         connect_string = _get_connect_string("mysql", database=database) | ||||
|         (user, password, database, host) = \ | ||||
|                 get_mysql_connection_info(urlparse.urlparse(connect_string)) | ||||
|         engine = sqlalchemy.create_engine(connect_string) | ||||
|         self.engines["mysqlcitest"] = engine | ||||
|         self.test_databases["mysqlcitest"] = connect_string | ||||
|         self.engines[database] = engine | ||||
|         self.test_databases[database] = connect_string | ||||
|  | ||||
|         # build a fully populated mysql database with all the tables | ||||
|         self._reset_databases() | ||||
| @@ -275,14 +292,16 @@ class TestMigrations(BaseMigrationTestCase): | ||||
|         # sanity check | ||||
|         total = connection.execute("SELECT count(*) " | ||||
|                                    "from information_schema.TABLES " | ||||
|                                    "where TABLE_SCHEMA='openstack_citest'") | ||||
|                                    "where TABLE_SCHEMA='%(database)s'" % | ||||
|                                    locals()) | ||||
|         self.assertTrue(total.scalar() > 0, "No tables found. Wrong schema?") | ||||
|  | ||||
|         noninnodb = connection.execute("SELECT count(*) " | ||||
|                                        "from information_schema.TABLES " | ||||
|                                        "where TABLE_SCHEMA='openstack_citest' " | ||||
|                                        "where TABLE_SCHEMA='%(database)s' " | ||||
|                                        "and ENGINE!='InnoDB' " | ||||
|                                        "and TABLE_NAME!='migrate_version'") | ||||
|                                        "and TABLE_NAME!='migrate_version'" % | ||||
|                                        locals()) | ||||
|         count = noninnodb.scalar() | ||||
|         self.assertEqual(count, 0, "%d non InnoDB tables created" % count) | ||||
|         connection.close() | ||||
| @@ -295,16 +314,18 @@ class TestMigrations(BaseMigrationTestCase): | ||||
|         if _is_backend_avail('postgresql', user="openstack_cifail"): | ||||
|             self.fail("Shouldn't have connected") | ||||
|  | ||||
|     def test_postgresql_opportunistically(self): | ||||
|     def _test_postgresql_opportunistically(self, database=None): | ||||
|         # Test postgresql database migration walk | ||||
|         if not _is_backend_avail('postgres'): | ||||
|             self.skipTest("postgresql not available") | ||||
|         # add this to the global lists to make reset work with it, it's removed | ||||
|         # automatically in tearDown so no need to clean it up here. | ||||
|         connect_string = _get_connect_string("postgres") | ||||
|         connect_string = _get_connect_string("postgres", database=database) | ||||
|         engine = sqlalchemy.create_engine(connect_string) | ||||
|         self.engines["postgresqlcitest"] = engine | ||||
|         self.test_databases["postgresqlcitest"] = connect_string | ||||
|         (user, password, database, host) = \ | ||||
|                 get_mysql_connection_info(urlparse.urlparse(connect_string)) | ||||
|         self.engines[database] = engine | ||||
|         self.test_databases[database] = connect_string | ||||
|  | ||||
|         # build a fully populated postgresql database with all the tables | ||||
|         self._reset_databases() | ||||
| @@ -317,19 +338,21 @@ class TestMigrations(BaseMigrationTestCase): | ||||
|         # upgrades successfully. | ||||
|  | ||||
|         # Place the database under version control | ||||
|         migration_api.version_control(engine, TestMigrations.REPOSITORY, | ||||
|                                      migration.INIT_VERSION) | ||||
|         self.assertEqual(migration.INIT_VERSION, | ||||
|                 migration_api.db_version(engine, | ||||
|                                          TestMigrations.REPOSITORY)) | ||||
|         self.migration_api.version_control(engine, | ||||
|                 self.REPOSITORY, | ||||
|                 self.INIT_VERSION) | ||||
|         self.assertEqual(self.INIT_VERSION, | ||||
|                 self.migration_api.db_version(engine, | ||||
|                                          self.REPOSITORY)) | ||||
|  | ||||
|         migration_api.upgrade(engine, TestMigrations.REPOSITORY, | ||||
|                               migration.INIT_VERSION + 1) | ||||
|         self.migration_api.upgrade(engine, | ||||
|                 self.REPOSITORY, | ||||
|                 self.INIT_VERSION + 1) | ||||
|  | ||||
|         LOG.debug('latest version is %s' % TestMigrations.REPOSITORY.latest) | ||||
|         LOG.debug('latest version is %s' % self.REPOSITORY.latest) | ||||
|  | ||||
|         for version in xrange(migration.INIT_VERSION + 2, | ||||
|                                TestMigrations.REPOSITORY.latest + 1): | ||||
|         for version in xrange(self.INIT_VERSION + 2, | ||||
|                               self.REPOSITORY.latest + 1): | ||||
|             # upgrade -> downgrade -> upgrade | ||||
|             self._migrate_up(engine, version, with_data=True) | ||||
|             if snake_walk: | ||||
| @@ -340,8 +363,8 @@ class TestMigrations(BaseMigrationTestCase): | ||||
|             # Now walk it back down to 0 from the latest, testing | ||||
|             # the downgrade paths. | ||||
|             for version in reversed( | ||||
|                 xrange(migration.INIT_VERSION + 2, | ||||
|                        TestMigrations.REPOSITORY.latest + 1)): | ||||
|                 xrange(self.INIT_VERSION + 2, | ||||
|                        self.REPOSITORY.latest + 1)): | ||||
|                 # downgrade -> upgrade -> downgrade | ||||
|                 self._migrate_down(engine, version) | ||||
|                 if snake_walk: | ||||
| @@ -349,12 +372,12 @@ class TestMigrations(BaseMigrationTestCase): | ||||
|                     self._migrate_down(engine, version) | ||||
|  | ||||
|     def _migrate_down(self, engine, version): | ||||
|         migration_api.downgrade(engine, | ||||
|                                 TestMigrations.REPOSITORY, | ||||
|         self.migration_api.downgrade(engine, | ||||
|                                 self.REPOSITORY, | ||||
|                                 version) | ||||
|         self.assertEqual(version, | ||||
|                          migration_api.db_version(engine, | ||||
|                                                   TestMigrations.REPOSITORY)) | ||||
|                          self.migration_api.db_version(engine, | ||||
|                                                   self.REPOSITORY)) | ||||
|  | ||||
|     def _migrate_up(self, engine, version, with_data=False): | ||||
|         """migrate up to a new version of the db. | ||||
| @@ -372,13 +395,13 @@ class TestMigrations(BaseMigrationTestCase): | ||||
|                 if prerun: | ||||
|                     data = prerun(engine) | ||||
|  | ||||
|                 migration_api.upgrade(engine, | ||||
|                                           TestMigrations.REPOSITORY, | ||||
|                                           version) | ||||
|                 self.migration_api.upgrade(engine, | ||||
|                                       self.REPOSITORY, | ||||
|                                       version) | ||||
|                 self.assertEqual( | ||||
|                     version, | ||||
|                     migration_api.db_version(engine, | ||||
|                                              TestMigrations.REPOSITORY)) | ||||
|                     self.migration_api.db_version(engine, | ||||
|                                              self.REPOSITORY)) | ||||
|  | ||||
|             if with_data: | ||||
|                 check = getattr(self, "_check_%d" % version, None) | ||||
| @@ -389,6 +412,50 @@ class TestMigrations(BaseMigrationTestCase): | ||||
|                       (version, engine)) | ||||
|             raise | ||||
|  | ||||
|  | ||||
| class TestNovaMigrations(BaseMigrationTestCase): | ||||
|     """Test sqlalchemy-migrate migrations.""" | ||||
|  | ||||
|     def __init__(self, *args, **kwargs): | ||||
|         super(TestNovaMigrations, self).__init__(*args, **kwargs) | ||||
|  | ||||
|         self.DEFAULT_CONFIG_FILE = os.path.join(os.path.dirname(__file__), | ||||
|                                        'test_migrations.conf') | ||||
|         # Test machines can set the NOVA_TEST_MIGRATIONS_CONF variable | ||||
|         # to override the location of the config file for migration testing | ||||
|         self.CONFIG_FILE_PATH = os.environ.get('NOVA_TEST_MIGRATIONS_CONF', | ||||
|                                       self.DEFAULT_CONFIG_FILE) | ||||
|         self.MIGRATE_FILE = nova.db.sqlalchemy.migrate_repo.__file__ | ||||
|         self.REPOSITORY = repository.Repository( | ||||
|                         os.path.abspath(os.path.dirname(self.MIGRATE_FILE))) | ||||
|  | ||||
|     def setUp(self): | ||||
|         super(TestNovaMigrations, self).setUp() | ||||
|  | ||||
|         if self.migration is None: | ||||
|             self.migration = __import__('nova.db.migration', | ||||
|                     globals(), locals(), ['INIT_VERSION'], -1) | ||||
|             self.INIT_VERSION = self.migration.INIT_VERSION | ||||
|         if self.migration_api is None: | ||||
|             temp = __import__('nova.db.sqlalchemy.migration', | ||||
|                     globals(), locals(), ['versioning_api'], -1) | ||||
|             self.migration_api = temp.versioning_api | ||||
|  | ||||
|     def tearDown(self): | ||||
|         super(TestNovaMigrations, self).tearDown() | ||||
|  | ||||
|     def test_walk_versions(self): | ||||
|         for key, engine in self.engines.items(): | ||||
|             self._walk_versions(engine, self.snake_walk) | ||||
|  | ||||
|     def test_mysql_opportunistically(self): | ||||
|         self._test_mysql_opportunistically( | ||||
|                 database='openstack_citest') | ||||
|  | ||||
|     def test_postgresql_opportunistically(self): | ||||
|         self._test_postgresql_opportunistically( | ||||
|                 database='openstack_citest') | ||||
|  | ||||
|     def _prerun_134(self, engine): | ||||
|         now = timeutils.utcnow() | ||||
|         data = [{ | ||||
| @@ -792,3 +859,60 @@ class TestMigrations(BaseMigrationTestCase): | ||||
|     def _check_156(self, engine, data): | ||||
|         # recheck the 149 data | ||||
|         self._check_149(engine, data) | ||||
|  | ||||
|  | ||||
| class TestBaremetalMigrations(BaseMigrationTestCase): | ||||
|     """Test sqlalchemy-migrate migrations.""" | ||||
|  | ||||
|     def __init__(self, *args, **kwargs): | ||||
|         super(TestBaremetalMigrations, self).__init__(*args, **kwargs) | ||||
|  | ||||
|         self.DEFAULT_CONFIG_FILE = os.path.join(os.path.dirname(__file__), | ||||
|                                        'test_baremetal_migrations.conf') | ||||
|         # Test machines can set the NOVA_TEST_MIGRATIONS_CONF variable | ||||
|         # to override the location of the config file for migration testing | ||||
|         self.CONFIG_FILE_PATH = os.environ.get( | ||||
|                 'BAREMETAL_TEST_MIGRATIONS_CONF', | ||||
|                 self.DEFAULT_CONFIG_FILE) | ||||
|         self.MIGRATE_FILE = \ | ||||
|                 nova.virt.baremetal.db.sqlalchemy.migrate_repo.__file__ | ||||
|         self.REPOSITORY = repository.Repository( | ||||
|                         os.path.abspath(os.path.dirname(self.MIGRATE_FILE))) | ||||
|  | ||||
|     def setUp(self): | ||||
|         super(TestBaremetalMigrations, self).setUp() | ||||
|  | ||||
|         if self.migration is None: | ||||
|             self.migration = __import__('nova.virt.baremetal.db.migration', | ||||
|                     globals(), locals(), ['INIT_VERSION'], -1) | ||||
|             self.INIT_VERSION = self.migration.INIT_VERSION | ||||
|         if self.migration_api is None: | ||||
|             temp = __import__('nova.virt.baremetal.db.sqlalchemy.migration', | ||||
|                     globals(), locals(), ['versioning_api'], -1) | ||||
|             self.migration_api = temp.versioning_api | ||||
|  | ||||
|     def tearDown(self): | ||||
|         super(TestBaremetalMigrations, self).tearDown() | ||||
|  | ||||
|     def test_walk_versions(self): | ||||
|         for key, engine in self.engines.items(): | ||||
|             self._walk_versions(engine, self.snake_walk) | ||||
|  | ||||
|     def test_mysql_opportunistically(self): | ||||
|         self._test_mysql_opportunistically( | ||||
|                 database='openstack_baremetal_citest') | ||||
|  | ||||
|     def test_postgresql_opportunistically(self): | ||||
|         self._test_postgresql_opportunistically( | ||||
|                 database='openstack_baremetal_citest') | ||||
|  | ||||
|     def _prerun_002(self, engine): | ||||
|         data = [{'id': 1, 'key': 'fake-key', 'image_path': '/dev/null', | ||||
|                  'pxe_config_path': '/dev/null/', 'root_mb': 0, 'swap_mb': 0}] | ||||
|         table = get_table(engine, 'bm_deployments') | ||||
|         engine.execute(table.insert(), data) | ||||
|         return data | ||||
|  | ||||
|     def _check_002(self, engine, data): | ||||
|         self.assertRaises(sqlalchemy.exc.NoSuchTableError, | ||||
|                           get_table, engine, 'bm_deployments') | ||||
|   | ||||
		Reference in New Issue
	
	Block a user
	 Jenkins
					Jenkins