Merge from trunk
This commit is contained in:
		| @@ -6,6 +6,7 @@ keys | |||||||
| networks | networks | ||||||
| nova.sqlite | nova.sqlite | ||||||
| CA/cacert.pem | CA/cacert.pem | ||||||
|  | CA/crl.pem | ||||||
| CA/index.txt* | CA/index.txt* | ||||||
| CA/openssl.cnf | CA/openssl.cnf | ||||||
| CA/serial* | CA/serial* | ||||||
|   | |||||||
							
								
								
									
										2
									
								
								.mailmap
									
									
									
									
									
								
							
							
						
						
									
										2
									
								
								.mailmap
									
									
									
									
									
								
							| @@ -27,5 +27,7 @@ | |||||||
| <vishvananda@gmail.com> <root@ubuntu> | <vishvananda@gmail.com> <root@ubuntu> | ||||||
| <sleepsonthefloor@gmail.com> <root@tonbuntu> | <sleepsonthefloor@gmail.com> <root@tonbuntu> | ||||||
| <rlane@wikimedia.org> <laner@controller> | <rlane@wikimedia.org> <laner@controller> | ||||||
|  | <rconradharris@gmail.com> <rick.harris@rackspace.com>  | ||||||
| <corywright@gmail.com> <cory.wright@rackspace.com> | <corywright@gmail.com> <cory.wright@rackspace.com> | ||||||
| <ant@openstack.org> <amesserl@rackspace.com> | <ant@openstack.org> <amesserl@rackspace.com> | ||||||
|  | <chiradeep@cloud.com> <chiradeep@chiradeep-lt2> | ||||||
|   | |||||||
							
								
								
									
										2
									
								
								Authors
									
									
									
									
									
								
							
							
						
						
									
										2
									
								
								Authors
									
									
									
									
									
								
							| @@ -3,6 +3,7 @@ Anne Gentle <anne@openstack.org> | |||||||
| Anthony Young <sleepsonthefloor@gmail.com> | Anthony Young <sleepsonthefloor@gmail.com> | ||||||
| Antony Messerli <ant@openstack.org> | Antony Messerli <ant@openstack.org> | ||||||
| Armando Migliaccio <Armando.Migliaccio@eu.citrix.com> | Armando Migliaccio <Armando.Migliaccio@eu.citrix.com> | ||||||
|  | Chiradeep Vittal <chiradeep@cloud.com> | ||||||
| Chris Behrens <cbehrens@codestud.com> | Chris Behrens <cbehrens@codestud.com> | ||||||
| Chmouel Boudjnah <chmouel@chmouel.com> | Chmouel Boudjnah <chmouel@chmouel.com> | ||||||
| Cory Wright <corywright@gmail.com> | Cory Wright <corywright@gmail.com> | ||||||
| @@ -27,6 +28,7 @@ Michael Gundlach <michael.gundlach@rackspace.com> | |||||||
| Monty Taylor <mordred@inaugust.com> | Monty Taylor <mordred@inaugust.com> | ||||||
| Paul Voccio <paul@openstack.org> | Paul Voccio <paul@openstack.org> | ||||||
| Rick Clark <rick@openstack.org> | Rick Clark <rick@openstack.org> | ||||||
|  | Rick Harris <rconradharris@gmail.com> | ||||||
| Ryan Lane <rlane@wikimedia.org> | Ryan Lane <rlane@wikimedia.org> | ||||||
| Ryan Lucio <rlucio@internap.com> | Ryan Lucio <rlucio@internap.com> | ||||||
| Salvatore Orlando <salvatore.orlando@eu.citrix.com> | Salvatore Orlando <salvatore.orlando@eu.citrix.com> | ||||||
|   | |||||||
							
								
								
									
										109
									
								
								bin/nova-api-paste
									
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										109
									
								
								bin/nova-api-paste
									
									
									
									
									
										Executable file
									
								
							| @@ -0,0 +1,109 @@ | |||||||
|  | #!/usr/bin/env python | ||||||
|  | # pylint: disable-msg=C0103 | ||||||
|  | # vim: tabstop=4 shiftwidth=4 softtabstop=4 | ||||||
|  |  | ||||||
|  | # Copyright 2010 United States Government as represented by the | ||||||
|  | # Administrator of the National Aeronautics and Space Administration. | ||||||
|  | # All Rights Reserved. | ||||||
|  | # | ||||||
|  | #    Licensed under the Apache License, Version 2.0 (the "License"); | ||||||
|  | #    you may not use this file except in compliance with the License. | ||||||
|  | #    You may obtain a copy of the License at | ||||||
|  | # | ||||||
|  | #        http://www.apache.org/licenses/LICENSE-2.0 | ||||||
|  | # | ||||||
|  | #    Unless required by applicable law or agreed to in writing, software | ||||||
|  | #    distributed under the License is distributed on an "AS IS" BASIS, | ||||||
|  | #    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||||
|  | #    See the License for the specific language governing permissions and | ||||||
|  | #    limitations under the License. | ||||||
|  |  | ||||||
|  | """Starter script for Nova API.""" | ||||||
|  |  | ||||||
|  | import gettext | ||||||
|  | import logging | ||||||
|  | import os | ||||||
|  | import sys | ||||||
|  |  | ||||||
|  | from paste import deploy | ||||||
|  |  | ||||||
|  | # If ../nova/__init__.py exists, add ../ to Python search path, so that | ||||||
|  | # it will override what happens to be installed in /usr/(local/)lib/python... | ||||||
|  | possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), | ||||||
|  |                                    os.pardir, | ||||||
|  |                                    os.pardir)) | ||||||
|  | if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')): | ||||||
|  |     sys.path.insert(0, possible_topdir) | ||||||
|  |  | ||||||
|  | gettext.install('nova', unicode=1) | ||||||
|  |  | ||||||
|  | from nova import flags | ||||||
|  | from nova import wsgi | ||||||
|  |  | ||||||
|  | LOG = logging.getLogger('nova.api') | ||||||
|  | LOG.setLevel(logging.DEBUG) | ||||||
|  | LOG.addHandler(logging.StreamHandler()) | ||||||
|  |  | ||||||
|  | FLAGS = flags.FLAGS | ||||||
|  |  | ||||||
|  | API_ENDPOINTS = ['ec2', 'openstack'] | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def load_configuration(paste_config): | ||||||
|  |     """Load the paste configuration from the config file and return it.""" | ||||||
|  |     config = None | ||||||
|  |     # Try each known name to get the global DEFAULTS, which will give ports | ||||||
|  |     for name in API_ENDPOINTS: | ||||||
|  |         try: | ||||||
|  |             config = deploy.appconfig("config:%s" % paste_config, name=name) | ||||||
|  |         except LookupError: | ||||||
|  |             pass | ||||||
|  |         if config: | ||||||
|  |             verbose = config.get('verbose', None) | ||||||
|  |             if verbose: | ||||||
|  |                 FLAGS.verbose = int(verbose) == 1 | ||||||
|  |                 if FLAGS.verbose: | ||||||
|  |                     logging.getLogger().setLevel(logging.DEBUG) | ||||||
|  |             return config | ||||||
|  |     LOG.debug(_("Paste config at %s has no secion for known apis"), | ||||||
|  |               paste_config) | ||||||
|  |     print _("Paste config at %s has no secion for any known apis") % \ | ||||||
|  |           paste_config | ||||||
|  |     os.exit(1) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def launch_api(paste_config_file, section, server, port, host): | ||||||
|  |     """Launch an api server from the specified port and IP.""" | ||||||
|  |     LOG.debug(_("Launching %s api on %s:%s"), section, host, port) | ||||||
|  |     app = deploy.loadapp('config:%s' % paste_config_file, name=section) | ||||||
|  |     server.start(app, int(port), host) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def run_app(paste_config_file): | ||||||
|  |     LOG.debug(_("Using paste.deploy config at: %s"), configfile) | ||||||
|  |     config = load_configuration(paste_config_file) | ||||||
|  |     LOG.debug(_("Configuration: %r"), config) | ||||||
|  |     server = wsgi.Server() | ||||||
|  |     ip = config.get('host', '0.0.0.0') | ||||||
|  |     for api in API_ENDPOINTS: | ||||||
|  |         port = config.get("%s_port" % api, None) | ||||||
|  |         if not port: | ||||||
|  |             continue | ||||||
|  |         host = config.get("%s_host" % api, ip) | ||||||
|  |         launch_api(configfile, api, server, port, host) | ||||||
|  |     LOG.debug(_("All api servers launched, now waiting")) | ||||||
|  |     server.wait() | ||||||
|  |  | ||||||
|  |  | ||||||
|  | if __name__ == '__main__': | ||||||
|  |     FLAGS(sys.argv) | ||||||
|  |     configfiles = ['/etc/nova/nova-api.conf'] | ||||||
|  |     if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')): | ||||||
|  |         configfiles.insert(0, | ||||||
|  |                        os.path.join(possible_topdir, 'etc', 'nova-api.conf')) | ||||||
|  |     for configfile in configfiles: | ||||||
|  |         if os.path.exists(configfile): | ||||||
|  |             run_app(configfile) | ||||||
|  |             break | ||||||
|  |         else: | ||||||
|  |             LOG.debug(_("Skipping missing configuration: %s"), configfile) | ||||||
| @@ -53,6 +53,7 @@ | |||||||
|   CLI interface for nova management. |   CLI interface for nova management. | ||||||
| """ | """ | ||||||
|  |  | ||||||
|  | import datetime | ||||||
| import gettext | import gettext | ||||||
| import logging | import logging | ||||||
| import os | import os | ||||||
| @@ -452,6 +453,52 @@ class NetworkCommands(object): | |||||||
|                                     int(network_size), int(vlan_start), |                                     int(network_size), int(vlan_start), | ||||||
|                                     int(vpn_start)) |                                     int(vpn_start)) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class ServiceCommands(object): | ||||||
|  |     """Enable and disable running services""" | ||||||
|  |  | ||||||
|  |     def list(self, host=None, service=None): | ||||||
|  |         """Show a list of all running services. Filter by host & service name. | ||||||
|  |         args: [host] [service]""" | ||||||
|  |         ctxt = context.get_admin_context() | ||||||
|  |         now = datetime.datetime.utcnow() | ||||||
|  |         services = db.service_get_all(ctxt) | ||||||
|  |         if host: | ||||||
|  |             services = [s for s in services if s['host'] == host] | ||||||
|  |         if service: | ||||||
|  |             services = [s for s in services if s['binary'] == service] | ||||||
|  |         for svc in services: | ||||||
|  |             delta = now - (svc['updated_at'] or svc['created_at']) | ||||||
|  |             alive = (delta.seconds <= 15) | ||||||
|  |             art = (alive and ":-)") or "XXX" | ||||||
|  |             active = 'enabled' | ||||||
|  |             if svc['disabled']: | ||||||
|  |                 active = 'disabled' | ||||||
|  |             print "%-10s %-10s %-8s %s %s" % (svc['host'], svc['binary'], | ||||||
|  |                                               active, art, | ||||||
|  |                                               svc['updated_at']) | ||||||
|  |  | ||||||
|  |     def enable(self, host, service): | ||||||
|  |         """Enable scheduling for a service | ||||||
|  |         args: host service""" | ||||||
|  |         ctxt = context.get_admin_context() | ||||||
|  |         svc = db.service_get_by_args(ctxt, host, service) | ||||||
|  |         if not svc: | ||||||
|  |             print "Unable to find service" | ||||||
|  |             return | ||||||
|  |         db.service_update(ctxt, svc['id'], {'disabled': False}) | ||||||
|  |  | ||||||
|  |     def disable(self, host, service): | ||||||
|  |         """Disable scheduling for a service | ||||||
|  |         args: host service""" | ||||||
|  |         ctxt = context.get_admin_context() | ||||||
|  |         svc = db.service_get_by_args(ctxt, host, service) | ||||||
|  |         if not svc: | ||||||
|  |             print "Unable to find service" | ||||||
|  |             return | ||||||
|  |         db.service_update(ctxt, svc['id'], {'disabled': True}) | ||||||
|  |  | ||||||
|  |  | ||||||
| CATEGORIES = [ | CATEGORIES = [ | ||||||
|     ('user', UserCommands), |     ('user', UserCommands), | ||||||
|     ('project', ProjectCommands), |     ('project', ProjectCommands), | ||||||
| @@ -459,7 +506,8 @@ CATEGORIES = [ | |||||||
|     ('shell', ShellCommands), |     ('shell', ShellCommands), | ||||||
|     ('vpn', VpnCommands), |     ('vpn', VpnCommands), | ||||||
|     ('floating', FloatingIpCommands), |     ('floating', FloatingIpCommands), | ||||||
|     ('network', NetworkCommands)] |     ('network', NetworkCommands), | ||||||
|  |     ('service', ServiceCommands)] | ||||||
|  |  | ||||||
|  |  | ||||||
| def lazy_match(name, key_value_tuples): | def lazy_match(name, key_value_tuples): | ||||||
|   | |||||||
| @@ -23,12 +23,9 @@ import base64 | |||||||
| import boto | import boto | ||||||
| import httplib | import httplib | ||||||
|  |  | ||||||
| from nova import flags |  | ||||||
| from boto.ec2.regioninfo import RegionInfo | from boto.ec2.regioninfo import RegionInfo | ||||||
|  |  | ||||||
|  |  | ||||||
| FLAGS = flags.FLAGS |  | ||||||
|  |  | ||||||
| DEFAULT_CLC_URL = 'http://127.0.0.1:8773' | DEFAULT_CLC_URL = 'http://127.0.0.1:8773' | ||||||
| DEFAULT_REGION = 'nova' | DEFAULT_REGION = 'nova' | ||||||
|  |  | ||||||
| @@ -199,8 +196,8 @@ class NovaAdminClient(object): | |||||||
|             self, |             self, | ||||||
|             clc_url=DEFAULT_CLC_URL, |             clc_url=DEFAULT_CLC_URL, | ||||||
|             region=DEFAULT_REGION, |             region=DEFAULT_REGION, | ||||||
|             access_key=FLAGS.aws_access_key_id, |             access_key=None, | ||||||
|             secret_key=FLAGS.aws_secret_access_key, |             secret_key=None, | ||||||
|             **kwargs): |             **kwargs): | ||||||
|         parts = self.split_clc_url(clc_url) |         parts = self.split_clc_url(clc_url) | ||||||
|  |  | ||||||
|   | |||||||
| @@ -212,6 +212,8 @@ DEFINE_list('region_list', | |||||||
| DEFINE_string('connection_type', 'libvirt', 'libvirt, xenapi or fake') | DEFINE_string('connection_type', 'libvirt', 'libvirt, xenapi or fake') | ||||||
| DEFINE_string('aws_access_key_id', 'admin', 'AWS Access ID') | DEFINE_string('aws_access_key_id', 'admin', 'AWS Access ID') | ||||||
| DEFINE_string('aws_secret_access_key', 'admin', 'AWS Access Key') | DEFINE_string('aws_secret_access_key', 'admin', 'AWS Access Key') | ||||||
|  | DEFINE_integer('glance_port', 9292, 'glance port') | ||||||
|  | DEFINE_string('glance_host', utils.get_my_ip(), 'glance host') | ||||||
| DEFINE_integer('s3_port', 3333, 's3 port') | DEFINE_integer('s3_port', 3333, 's3 port') | ||||||
| DEFINE_string('s3_host', utils.get_my_ip(), 's3 host (for infrastructure)') | DEFINE_string('s3_host', utils.get_my_ip(), 's3 host (for infrastructure)') | ||||||
| DEFINE_string('s3_dmz', utils.get_my_ip(), 's3 dmz ip (for instances)') | DEFINE_string('s3_dmz', utils.get_my_ip(), 's3 dmz ip (for instances)') | ||||||
| @@ -239,6 +241,7 @@ DEFINE_string('cc_dmz', utils.get_my_ip(), 'internal ip of api server') | |||||||
| DEFINE_integer('cc_port', 8773, 'cloud controller port') | DEFINE_integer('cc_port', 8773, 'cloud controller port') | ||||||
| DEFINE_string('ec2_suffix', '/services/Cloud', 'suffix for ec2') | DEFINE_string('ec2_suffix', '/services/Cloud', 'suffix for ec2') | ||||||
|  |  | ||||||
|  | DEFINE_string('default_project', 'openstack', 'default project for openstack') | ||||||
| DEFINE_string('default_image', 'ami-11111', | DEFINE_string('default_image', 'ami-11111', | ||||||
|               'default image to use, testing only') |               'default image to use, testing only') | ||||||
| DEFINE_string('default_instance_type', 'm1.small', | DEFINE_string('default_instance_type', 'm1.small', | ||||||
| @@ -260,6 +263,11 @@ DEFINE_string('state_path', os.path.join(os.path.dirname(__file__), '../'), | |||||||
| DEFINE_string('sql_connection', | DEFINE_string('sql_connection', | ||||||
|               'sqlite:///$state_path/nova.sqlite', |               'sqlite:///$state_path/nova.sqlite', | ||||||
|               'connection string for sql database') |               'connection string for sql database') | ||||||
|  | DEFINE_string('sql_idle_timeout', | ||||||
|  |               '3600', | ||||||
|  |               'timeout for idle sql database connections') | ||||||
|  | DEFINE_integer('sql_max_retries', 12, 'sql connection attempts') | ||||||
|  | DEFINE_integer('sql_retry_interval', 10, 'sql connection retry interval') | ||||||
|  |  | ||||||
| DEFINE_string('compute_manager', 'nova.compute.manager.ComputeManager', | DEFINE_string('compute_manager', 'nova.compute.manager.ComputeManager', | ||||||
|               'Manager for compute') |               'Manager for compute') | ||||||
|   | |||||||
| @@ -43,6 +43,19 @@ class SimpleScheduler(chance.ChanceScheduler): | |||||||
|     def schedule_run_instance(self, context, instance_id, *_args, **_kwargs): |     def schedule_run_instance(self, context, instance_id, *_args, **_kwargs): | ||||||
|         """Picks a host that is up and has the fewest running instances.""" |         """Picks a host that is up and has the fewest running instances.""" | ||||||
|         instance_ref = db.instance_get(context, instance_id) |         instance_ref = db.instance_get(context, instance_id) | ||||||
|  |         if instance_ref['availability_zone'] and context.is_admin: | ||||||
|  |             zone, _x, host = instance_ref['availability_zone'].partition(':') | ||||||
|  |             service = db.service_get_by_args(context.elevated(), host, | ||||||
|  |                                              'nova-compute') | ||||||
|  |             if not self.service_is_up(service): | ||||||
|  |                 raise driver.WillNotSchedule("Host %s is not alive" % host) | ||||||
|  |  | ||||||
|  |             # TODO(vish): this probably belongs in the manager, if we | ||||||
|  |             #             can generalize this somehow | ||||||
|  |             now = datetime.datetime.utcnow() | ||||||
|  |             db.instance_update(context, instance_id, {'host': host, | ||||||
|  |                                                       'scheduled_at': now}) | ||||||
|  |             return host | ||||||
|         results = db.service_get_all_compute_sorted(context) |         results = db.service_get_all_compute_sorted(context) | ||||||
|         for result in results: |         for result in results: | ||||||
|             (service, instance_cores) = result |             (service, instance_cores) = result | ||||||
| @@ -62,6 +75,19 @@ class SimpleScheduler(chance.ChanceScheduler): | |||||||
|     def schedule_create_volume(self, context, volume_id, *_args, **_kwargs): |     def schedule_create_volume(self, context, volume_id, *_args, **_kwargs): | ||||||
|         """Picks a host that is up and has the fewest volumes.""" |         """Picks a host that is up and has the fewest volumes.""" | ||||||
|         volume_ref = db.volume_get(context, volume_id) |         volume_ref = db.volume_get(context, volume_id) | ||||||
|  |         if (':' in volume_ref['availability_zone']) and context.is_admin: | ||||||
|  |             zone, _x, host = volume_ref['availability_zone'].partition(':') | ||||||
|  |             service = db.service_get_by_args(context.elevated(), host, | ||||||
|  |                                              'nova-volume') | ||||||
|  |             if not self.service_is_up(service): | ||||||
|  |                 raise driver.WillNotSchedule("Host %s not available" % host) | ||||||
|  |  | ||||||
|  |             # TODO(vish): this probably belongs in the manager, if we | ||||||
|  |             #             can generalize this somehow | ||||||
|  |             now = datetime.datetime.utcnow() | ||||||
|  |             db.volume_update(context, volume_id, {'host': host, | ||||||
|  |                                                   'scheduled_at': now}) | ||||||
|  |             return host | ||||||
|         results = db.service_get_all_volume_sorted(context) |         results = db.service_get_all_volume_sorted(context) | ||||||
|         for result in results: |         for result in results: | ||||||
|             (service, volume_gigabytes) = result |             (service, volume_gigabytes) = result | ||||||
|   | |||||||
							
								
								
									
										71
									
								
								nova/tests/hyperv_unittest.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										71
									
								
								nova/tests/hyperv_unittest.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,71 @@ | |||||||
|  | # vim: tabstop=4 shiftwidth=4 softtabstop=4 | ||||||
|  | # | ||||||
|  | #    Copyright 2010 Cloud.com, Inc | ||||||
|  | # | ||||||
|  | #    Licensed under the Apache License, Version 2.0 (the "License"); you may | ||||||
|  | #    not use this file except in compliance with the License. You may obtain | ||||||
|  | #    a copy of the License at | ||||||
|  | # | ||||||
|  | #         http://www.apache.org/licenses/LICENSE-2.0 | ||||||
|  | # | ||||||
|  | #    Unless required by applicable law or agreed to in writing, software | ||||||
|  | #    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT | ||||||
|  | #    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the | ||||||
|  | #    License for the specific language governing permissions and limitations | ||||||
|  | #    under the License. | ||||||
|  | """ | ||||||
|  | Tests For Hyper-V driver | ||||||
|  | """ | ||||||
|  |  | ||||||
|  | import random | ||||||
|  |  | ||||||
|  | from nova import context | ||||||
|  | from nova import db | ||||||
|  | from nova import flags | ||||||
|  | from nova import test | ||||||
|  | from nova.auth import manager | ||||||
|  | from nova.virt import hyperv | ||||||
|  |  | ||||||
|  | FLAGS = flags.FLAGS | ||||||
|  | FLAGS.connection_type = 'hyperv' | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class HyperVTestCase(test.TestCase): | ||||||
|  |     """Test cases for the Hyper-V driver""" | ||||||
|  |     def setUp(self): | ||||||
|  |         super(HyperVTestCase, self).setUp() | ||||||
|  |         self.manager = manager.AuthManager() | ||||||
|  |         self.user = self.manager.create_user('fake', 'fake', 'fake', | ||||||
|  |                                              admin=True) | ||||||
|  |         self.project = self.manager.create_project('fake', 'fake', 'fake') | ||||||
|  |         self.context = context.RequestContext(self.user, self.project) | ||||||
|  |  | ||||||
|  |     def test_create_destroy(self): | ||||||
|  |         """Create a VM and destroy it""" | ||||||
|  |         instance = {'internal_id': random.randint(1, 1000000), | ||||||
|  |                      'memory_mb': '1024', | ||||||
|  |                      'mac_address': '02:12:34:46:56:67', | ||||||
|  |                      'vcpus': 2, | ||||||
|  |                      'project_id': 'fake', | ||||||
|  |                      'instance_type': 'm1.small'} | ||||||
|  |         instance_ref = db.instance_create(self.context, instance) | ||||||
|  |  | ||||||
|  |         conn = hyperv.get_connection(False) | ||||||
|  |         conn._create_vm(instance_ref)  # pylint: disable-msg=W0212 | ||||||
|  |         found = [n  for n in conn.list_instances() | ||||||
|  |                       if n == instance_ref['name']] | ||||||
|  |         self.assertTrue(len(found) == 1) | ||||||
|  |         info = conn.get_info(instance_ref['name']) | ||||||
|  |         #Unfortunately since the vm is not running at this point, | ||||||
|  |         #we cannot obtain memory information from get_info | ||||||
|  |         self.assertEquals(info['num_cpu'], instance_ref['vcpus']) | ||||||
|  |  | ||||||
|  |         conn.destroy(instance_ref) | ||||||
|  |         found = [n  for n in conn.list_instances() | ||||||
|  |                       if n == instance_ref['name']] | ||||||
|  |         self.assertTrue(len(found) == 0) | ||||||
|  |  | ||||||
|  |     def tearDown(self): | ||||||
|  |         super(HyperVTestCase, self).tearDown() | ||||||
|  |         self.manager.delete_project(self.project) | ||||||
|  |         self.manager.delete_user(self.user) | ||||||
| @@ -106,7 +106,7 @@ class CloudTestCase(test.TestCase): | |||||||
|         self.cloud.allocate_address(self.context) |         self.cloud.allocate_address(self.context) | ||||||
|         inst = db.instance_create(self.context, {'host': FLAGS.host}) |         inst = db.instance_create(self.context, {'host': FLAGS.host}) | ||||||
|         fixed = self.network.allocate_fixed_ip(self.context, inst['id']) |         fixed = self.network.allocate_fixed_ip(self.context, inst['id']) | ||||||
|         ec2_id = cloud.internal_id_to_ec2_id(inst['internal_id']) |         ec2_id = cloud.id_to_ec2_id(inst['id']) | ||||||
|         self.cloud.associate_address(self.context, |         self.cloud.associate_address(self.context, | ||||||
|                                      instance_id=ec2_id, |                                      instance_id=ec2_id, | ||||||
|                                      public_ip=address) |                                      public_ip=address) | ||||||
| @@ -127,9 +127,9 @@ class CloudTestCase(test.TestCase): | |||||||
|         result = self.cloud.describe_volumes(self.context) |         result = self.cloud.describe_volumes(self.context) | ||||||
|         self.assertEqual(len(result['volumeSet']), 2) |         self.assertEqual(len(result['volumeSet']), 2) | ||||||
|         result = self.cloud.describe_volumes(self.context, |         result = self.cloud.describe_volumes(self.context, | ||||||
|                                              volume_id=[vol2['ec2_id']]) |                                              volume_id=[vol2['id']]) | ||||||
|         self.assertEqual(len(result['volumeSet']), 1) |         self.assertEqual(len(result['volumeSet']), 1) | ||||||
|         self.assertEqual(result['volumeSet'][0]['volumeId'], vol2['ec2_id']) |         self.assertEqual(result['volumeSet'][0]['volumeId'], vol2['id']) | ||||||
|         db.volume_destroy(self.context, vol1['id']) |         db.volume_destroy(self.context, vol1['id']) | ||||||
|         db.volume_destroy(self.context, vol2['id']) |         db.volume_destroy(self.context, vol2['id']) | ||||||
|  |  | ||||||
| @@ -296,7 +296,7 @@ class CloudTestCase(test.TestCase): | |||||||
|  |  | ||||||
|     def test_update_of_instance_display_fields(self): |     def test_update_of_instance_display_fields(self): | ||||||
|         inst = db.instance_create(self.context, {}) |         inst = db.instance_create(self.context, {}) | ||||||
|         ec2_id = cloud.internal_id_to_ec2_id(inst['internal_id']) |         ec2_id = cloud.id_to_ec2_id(inst['id']) | ||||||
|         self.cloud.update_instance(self.context, ec2_id, |         self.cloud.update_instance(self.context, ec2_id, | ||||||
|                                    display_name='c00l 1m4g3') |                                    display_name='c00l 1m4g3') | ||||||
|         inst = db.instance_get(self.context, inst['id']) |         inst = db.instance_get(self.context, inst['id']) | ||||||
|   | |||||||
| @@ -22,6 +22,7 @@ Tests For Compute | |||||||
| import datetime | import datetime | ||||||
| import logging | import logging | ||||||
|  |  | ||||||
|  | from nova import compute | ||||||
| from nova import context | from nova import context | ||||||
| from nova import db | from nova import db | ||||||
| from nova import exception | from nova import exception | ||||||
| @@ -29,7 +30,6 @@ from nova import flags | |||||||
| from nova import test | from nova import test | ||||||
| from nova import utils | from nova import utils | ||||||
| from nova.auth import manager | from nova.auth import manager | ||||||
| from nova.compute import api as compute_api |  | ||||||
|  |  | ||||||
|  |  | ||||||
| FLAGS = flags.FLAGS | FLAGS = flags.FLAGS | ||||||
| @@ -44,7 +44,7 @@ class ComputeTestCase(test.TestCase): | |||||||
|                    stub_network=True, |                    stub_network=True, | ||||||
|                    network_manager='nova.network.manager.FlatManager') |                    network_manager='nova.network.manager.FlatManager') | ||||||
|         self.compute = utils.import_object(FLAGS.compute_manager) |         self.compute = utils.import_object(FLAGS.compute_manager) | ||||||
|         self.compute_api = compute_api.ComputeAPI() |         self.compute_api = compute.API() | ||||||
|         self.manager = manager.AuthManager() |         self.manager = manager.AuthManager() | ||||||
|         self.user = self.manager.create_user('fake', 'fake', 'fake') |         self.user = self.manager.create_user('fake', 'fake', 'fake') | ||||||
|         self.project = self.manager.create_project('fake', 'fake', 'fake') |         self.project = self.manager.create_project('fake', 'fake', 'fake') | ||||||
| @@ -72,7 +72,7 @@ class ComputeTestCase(test.TestCase): | |||||||
|         """Verify that an instance cannot be created without a display_name.""" |         """Verify that an instance cannot be created without a display_name.""" | ||||||
|         cases = [dict(), dict(display_name=None)] |         cases = [dict(), dict(display_name=None)] | ||||||
|         for instance in cases: |         for instance in cases: | ||||||
|             ref = self.compute_api.create_instances(self.context, |             ref = self.compute_api.create(self.context, | ||||||
|                 FLAGS.default_instance_type, None, **instance) |                 FLAGS.default_instance_type, None, **instance) | ||||||
|             try: |             try: | ||||||
|                 self.assertNotEqual(ref[0].display_name, None) |                 self.assertNotEqual(ref[0].display_name, None) | ||||||
| @@ -80,13 +80,13 @@ class ComputeTestCase(test.TestCase): | |||||||
|                 db.instance_destroy(self.context, ref[0]['id']) |                 db.instance_destroy(self.context, ref[0]['id']) | ||||||
|  |  | ||||||
|     def test_create_instance_associates_security_groups(self): |     def test_create_instance_associates_security_groups(self): | ||||||
|         """Make sure create_instances associates security groups""" |         """Make sure create associates security groups""" | ||||||
|         values = {'name': 'default', |         values = {'name': 'default', | ||||||
|                   'description': 'default', |                   'description': 'default', | ||||||
|                   'user_id': self.user.id, |                   'user_id': self.user.id, | ||||||
|                   'project_id': self.project.id} |                   'project_id': self.project.id} | ||||||
|         group = db.security_group_create(self.context, values) |         group = db.security_group_create(self.context, values) | ||||||
|         ref = self.compute_api.create_instances(self.context, |         ref = self.compute_api.create(self.context, | ||||||
|             FLAGS.default_instance_type, None, security_group=['default']) |             FLAGS.default_instance_type, None, security_group=['default']) | ||||||
|         try: |         try: | ||||||
|             self.assertEqual(len(ref[0]['security_groups']), 1) |             self.assertEqual(len(ref[0]['security_groups']), 1) | ||||||
| @@ -151,6 +151,14 @@ class ComputeTestCase(test.TestCase): | |||||||
|         self.compute.reboot_instance(self.context, instance_id) |         self.compute.reboot_instance(self.context, instance_id) | ||||||
|         self.compute.terminate_instance(self.context, instance_id) |         self.compute.terminate_instance(self.context, instance_id) | ||||||
|  |  | ||||||
|  |     def test_snapshot(self): | ||||||
|  |         """Ensure instance can be snapshotted""" | ||||||
|  |         instance_id = self._create_instance() | ||||||
|  |         name = "myfakesnapshot" | ||||||
|  |         self.compute.run_instance(self.context, instance_id) | ||||||
|  |         self.compute.snapshot_instance(self.context, instance_id, name) | ||||||
|  |         self.compute.terminate_instance(self.context, instance_id) | ||||||
|  |  | ||||||
|     def test_console_output(self): |     def test_console_output(self): | ||||||
|         """Make sure we can get console output from instance""" |         """Make sure we can get console output from instance""" | ||||||
|         instance_id = self._create_instance() |         instance_id = self._create_instance() | ||||||
|   | |||||||
| @@ -19,6 +19,8 @@ | |||||||
| Tests For Scheduler | Tests For Scheduler | ||||||
| """ | """ | ||||||
|  |  | ||||||
|  | import datetime | ||||||
|  |  | ||||||
| from nova import context | from nova import context | ||||||
| from nova import db | from nova import db | ||||||
| from nova import flags | from nova import flags | ||||||
| @@ -95,7 +97,7 @@ class SimpleDriverTestCase(test.TestCase): | |||||||
|         self.manager.delete_user(self.user) |         self.manager.delete_user(self.user) | ||||||
|         self.manager.delete_project(self.project) |         self.manager.delete_project(self.project) | ||||||
|  |  | ||||||
|     def _create_instance(self): |     def _create_instance(self, **kwargs): | ||||||
|         """Create a test instance""" |         """Create a test instance""" | ||||||
|         inst = {} |         inst = {} | ||||||
|         inst['image_id'] = 'ami-test' |         inst['image_id'] = 'ami-test' | ||||||
| @@ -106,6 +108,7 @@ class SimpleDriverTestCase(test.TestCase): | |||||||
|         inst['mac_address'] = utils.generate_mac() |         inst['mac_address'] = utils.generate_mac() | ||||||
|         inst['ami_launch_index'] = 0 |         inst['ami_launch_index'] = 0 | ||||||
|         inst['vcpus'] = 1 |         inst['vcpus'] = 1 | ||||||
|  |         inst['availability_zone'] = kwargs.get('availability_zone', None) | ||||||
|         return db.instance_create(self.context, inst)['id'] |         return db.instance_create(self.context, inst)['id'] | ||||||
|  |  | ||||||
|     def _create_volume(self): |     def _create_volume(self): | ||||||
| @@ -114,9 +117,33 @@ class SimpleDriverTestCase(test.TestCase): | |||||||
|         vol['image_id'] = 'ami-test' |         vol['image_id'] = 'ami-test' | ||||||
|         vol['reservation_id'] = 'r-fakeres' |         vol['reservation_id'] = 'r-fakeres' | ||||||
|         vol['size'] = 1 |         vol['size'] = 1 | ||||||
|  |         vol['availability_zone'] = 'test' | ||||||
|         return db.volume_create(self.context, vol)['id'] |         return db.volume_create(self.context, vol)['id'] | ||||||
|  |  | ||||||
|     def test_hosts_are_up(self): |     def test_doesnt_report_disabled_hosts_as_up(self): | ||||||
|  |         """Ensures driver doesn't find hosts before they are enabled""" | ||||||
|  |         # NOTE(vish): constructing service without create method | ||||||
|  |         #             because we are going to use it without queue | ||||||
|  |         compute1 = service.Service('host1', | ||||||
|  |                                    'nova-compute', | ||||||
|  |                                    'compute', | ||||||
|  |                                    FLAGS.compute_manager) | ||||||
|  |         compute1.start() | ||||||
|  |         compute2 = service.Service('host2', | ||||||
|  |                                    'nova-compute', | ||||||
|  |                                    'compute', | ||||||
|  |                                    FLAGS.compute_manager) | ||||||
|  |         compute2.start() | ||||||
|  |         s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute') | ||||||
|  |         s2 = db.service_get_by_args(self.context, 'host2', 'nova-compute') | ||||||
|  |         db.service_update(self.context, s1['id'], {'disabled': True}) | ||||||
|  |         db.service_update(self.context, s2['id'], {'disabled': True}) | ||||||
|  |         hosts = self.scheduler.driver.hosts_up(self.context, 'compute') | ||||||
|  |         self.assertEqual(0, len(hosts)) | ||||||
|  |         compute1.kill() | ||||||
|  |         compute2.kill() | ||||||
|  |  | ||||||
|  |     def test_reports_enabled_hosts_as_up(self): | ||||||
|         """Ensures driver can find the hosts that are up""" |         """Ensures driver can find the hosts that are up""" | ||||||
|         # NOTE(vish): constructing service without create method |         # NOTE(vish): constructing service without create method | ||||||
|         #             because we are going to use it without queue |         #             because we are going to use it without queue | ||||||
| @@ -131,7 +158,7 @@ class SimpleDriverTestCase(test.TestCase): | |||||||
|                                    FLAGS.compute_manager) |                                    FLAGS.compute_manager) | ||||||
|         compute2.start() |         compute2.start() | ||||||
|         hosts = self.scheduler.driver.hosts_up(self.context, 'compute') |         hosts = self.scheduler.driver.hosts_up(self.context, 'compute') | ||||||
|         self.assertEqual(len(hosts), 2) |         self.assertEqual(2, len(hosts)) | ||||||
|         compute1.kill() |         compute1.kill() | ||||||
|         compute2.kill() |         compute2.kill() | ||||||
|  |  | ||||||
| @@ -158,6 +185,63 @@ class SimpleDriverTestCase(test.TestCase): | |||||||
|         compute1.kill() |         compute1.kill() | ||||||
|         compute2.kill() |         compute2.kill() | ||||||
|  |  | ||||||
|  |     def test_specific_host_gets_instance(self): | ||||||
|  |         """Ensures if you set availability_zone it launches on that zone""" | ||||||
|  |         compute1 = service.Service('host1', | ||||||
|  |                                    'nova-compute', | ||||||
|  |                                    'compute', | ||||||
|  |                                    FLAGS.compute_manager) | ||||||
|  |         compute1.start() | ||||||
|  |         compute2 = service.Service('host2', | ||||||
|  |                                    'nova-compute', | ||||||
|  |                                    'compute', | ||||||
|  |                                    FLAGS.compute_manager) | ||||||
|  |         compute2.start() | ||||||
|  |         instance_id1 = self._create_instance() | ||||||
|  |         compute1.run_instance(self.context, instance_id1) | ||||||
|  |         instance_id2 = self._create_instance(availability_zone='nova:host1') | ||||||
|  |         host = self.scheduler.driver.schedule_run_instance(self.context, | ||||||
|  |                                                            instance_id2) | ||||||
|  |         self.assertEqual('host1', host) | ||||||
|  |         compute1.terminate_instance(self.context, instance_id1) | ||||||
|  |         db.instance_destroy(self.context, instance_id2) | ||||||
|  |         compute1.kill() | ||||||
|  |         compute2.kill() | ||||||
|  |  | ||||||
|  |     def test_wont_sechedule_if_specified_host_is_down(self): | ||||||
|  |         compute1 = service.Service('host1', | ||||||
|  |                                    'nova-compute', | ||||||
|  |                                    'compute', | ||||||
|  |                                    FLAGS.compute_manager) | ||||||
|  |         compute1.start() | ||||||
|  |         s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute') | ||||||
|  |         now = datetime.datetime.utcnow() | ||||||
|  |         delta = datetime.timedelta(seconds=FLAGS.service_down_time * 2) | ||||||
|  |         past = now - delta | ||||||
|  |         db.service_update(self.context, s1['id'], {'updated_at': past}) | ||||||
|  |         instance_id2 = self._create_instance(availability_zone='nova:host1') | ||||||
|  |         self.assertRaises(driver.WillNotSchedule, | ||||||
|  |                           self.scheduler.driver.schedule_run_instance, | ||||||
|  |                           self.context, | ||||||
|  |                           instance_id2) | ||||||
|  |         db.instance_destroy(self.context, instance_id2) | ||||||
|  |         compute1.kill() | ||||||
|  |  | ||||||
|  |     def test_will_schedule_on_disabled_host_if_specified(self): | ||||||
|  |         compute1 = service.Service('host1', | ||||||
|  |                                    'nova-compute', | ||||||
|  |                                    'compute', | ||||||
|  |                                    FLAGS.compute_manager) | ||||||
|  |         compute1.start() | ||||||
|  |         s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute') | ||||||
|  |         db.service_update(self.context, s1['id'], {'disabled': True}) | ||||||
|  |         instance_id2 = self._create_instance(availability_zone='nova:host1') | ||||||
|  |         host = self.scheduler.driver.schedule_run_instance(self.context, | ||||||
|  |                                                            instance_id2) | ||||||
|  |         self.assertEqual('host1', host) | ||||||
|  |         db.instance_destroy(self.context, instance_id2) | ||||||
|  |         compute1.kill() | ||||||
|  |  | ||||||
|     def test_too_many_cores(self): |     def test_too_many_cores(self): | ||||||
|         """Ensures we don't go over max cores""" |         """Ensures we don't go over max cores""" | ||||||
|         compute1 = service.Service('host1', |         compute1 = service.Service('host1', | ||||||
|   | |||||||
| @@ -29,9 +29,9 @@ from nova.auth import manager | |||||||
| from nova.compute import instance_types | from nova.compute import instance_types | ||||||
| from nova.compute import power_state | from nova.compute import power_state | ||||||
| from nova.virt import xenapi_conn | from nova.virt import xenapi_conn | ||||||
| from nova.virt.xenapi import fake | from nova.virt.xenapi import fake as xenapi_fake | ||||||
| from nova.virt.xenapi import volume_utils | from nova.virt.xenapi import volume_utils | ||||||
| from nova.tests.db import fakes | from nova.tests.db import fakes as db_fakes | ||||||
| from nova.tests.xenapi import stubs | from nova.tests.xenapi import stubs | ||||||
|  |  | ||||||
| FLAGS = flags.FLAGS | FLAGS = flags.FLAGS | ||||||
| @@ -47,9 +47,9 @@ class XenAPIVolumeTestCase(test.TestCase): | |||||||
|         FLAGS.target_host = '127.0.0.1' |         FLAGS.target_host = '127.0.0.1' | ||||||
|         FLAGS.xenapi_connection_url = 'test_url' |         FLAGS.xenapi_connection_url = 'test_url' | ||||||
|         FLAGS.xenapi_connection_password = 'test_pass' |         FLAGS.xenapi_connection_password = 'test_pass' | ||||||
|         fakes.stub_out_db_instance_api(self.stubs) |         db_fakes.stub_out_db_instance_api(self.stubs) | ||||||
|         stubs.stub_out_get_target(self.stubs) |         stubs.stub_out_get_target(self.stubs) | ||||||
|         fake.reset() |         xenapi_fake.reset() | ||||||
|         self.values = {'name': 1, 'id': 1, |         self.values = {'name': 1, 'id': 1, | ||||||
|                   'project_id': 'fake', |                   'project_id': 'fake', | ||||||
|                   'user_id': 'fake', |                   'user_id': 'fake', | ||||||
| @@ -79,11 +79,11 @@ class XenAPIVolumeTestCase(test.TestCase): | |||||||
|         helper = volume_utils.VolumeHelper |         helper = volume_utils.VolumeHelper | ||||||
|         helper.XenAPI = session.get_imported_xenapi() |         helper.XenAPI = session.get_imported_xenapi() | ||||||
|         vol = self._create_volume() |         vol = self._create_volume() | ||||||
|         info = helper.parse_volume_info(vol['ec2_id'], '/dev/sdc') |         info = helper.parse_volume_info(vol['id'], '/dev/sdc') | ||||||
|         label = 'SR-%s' % vol['ec2_id'] |         label = 'SR-%s' % vol['id'] | ||||||
|         description = 'Test-SR' |         description = 'Test-SR' | ||||||
|         sr_ref = helper.create_iscsi_storage(session, info, label, description) |         sr_ref = helper.create_iscsi_storage(session, info, label, description) | ||||||
|         srs = fake.get_all('SR') |         srs = xenapi_fake.get_all('SR') | ||||||
|         self.assertEqual(sr_ref, srs[0]) |         self.assertEqual(sr_ref, srs[0]) | ||||||
|         db.volume_destroy(context.get_admin_context(), vol['id']) |         db.volume_destroy(context.get_admin_context(), vol['id']) | ||||||
|  |  | ||||||
| @@ -97,7 +97,7 @@ class XenAPIVolumeTestCase(test.TestCase): | |||||||
|         # oops, wrong mount point! |         # oops, wrong mount point! | ||||||
|         self.assertRaises(volume_utils.StorageError, |         self.assertRaises(volume_utils.StorageError, | ||||||
|                           helper.parse_volume_info, |                           helper.parse_volume_info, | ||||||
|                           vol['ec2_id'], |                           vol['id'], | ||||||
|                           '/dev/sd') |                           '/dev/sd') | ||||||
|         db.volume_destroy(context.get_admin_context(), vol['id']) |         db.volume_destroy(context.get_admin_context(), vol['id']) | ||||||
|  |  | ||||||
| @@ -107,17 +107,16 @@ class XenAPIVolumeTestCase(test.TestCase): | |||||||
|         conn = xenapi_conn.get_connection(False) |         conn = xenapi_conn.get_connection(False) | ||||||
|         volume = self._create_volume() |         volume = self._create_volume() | ||||||
|         instance = db.instance_create(self.values) |         instance = db.instance_create(self.values) | ||||||
|         fake.create_vm(instance.name, 'Running') |         xenapi_fake.create_vm(instance.name, 'Running') | ||||||
|         result = conn.attach_volume(instance.name, volume['ec2_id'], |         result = conn.attach_volume(instance.name, volume['id'], '/dev/sdc') | ||||||
|                                     '/dev/sdc') |  | ||||||
|  |  | ||||||
|         def check(): |         def check(): | ||||||
|             # check that the VM has a VBD attached to it |             # check that the VM has a VBD attached to it | ||||||
|             # Get XenAPI reference for the VM |             # Get XenAPI reference for the VM | ||||||
|             vms = fake.get_all('VM') |             vms = xenapi_fake.get_all('VM') | ||||||
|             # Get XenAPI record for VBD |             # Get XenAPI record for VBD | ||||||
|             vbds = fake.get_all('VBD') |             vbds = xenapi_fake.get_all('VBD') | ||||||
|             vbd = fake.get_record('VBD', vbds[0]) |             vbd = xenapi_fake.get_record('VBD', vbds[0]) | ||||||
|             vm_ref = vbd['VM'] |             vm_ref = vbd['VM'] | ||||||
|             self.assertEqual(vm_ref, vms[0]) |             self.assertEqual(vm_ref, vms[0]) | ||||||
|  |  | ||||||
| @@ -130,11 +129,11 @@ class XenAPIVolumeTestCase(test.TestCase): | |||||||
|         conn = xenapi_conn.get_connection(False) |         conn = xenapi_conn.get_connection(False) | ||||||
|         volume = self._create_volume() |         volume = self._create_volume() | ||||||
|         instance = db.instance_create(self.values) |         instance = db.instance_create(self.values) | ||||||
|         fake.create_vm(instance.name, 'Running') |         xenapi_fake.create_vm(instance.name, 'Running') | ||||||
|         self.assertRaises(Exception, |         self.assertRaises(Exception, | ||||||
|                           conn.attach_volume, |                           conn.attach_volume, | ||||||
|                           instance.name, |                           instance.name, | ||||||
|                           volume['ec2_id'], |                           volume['id'], | ||||||
|                           '/dev/sdc') |                           '/dev/sdc') | ||||||
|  |  | ||||||
|     def tearDown(self): |     def tearDown(self): | ||||||
| @@ -156,41 +155,70 @@ class XenAPIVMTestCase(test.TestCase): | |||||||
|         self.stubs = stubout.StubOutForTesting() |         self.stubs = stubout.StubOutForTesting() | ||||||
|         FLAGS.xenapi_connection_url = 'test_url' |         FLAGS.xenapi_connection_url = 'test_url' | ||||||
|         FLAGS.xenapi_connection_password = 'test_pass' |         FLAGS.xenapi_connection_password = 'test_pass' | ||||||
|         fake.reset() |         xenapi_fake.reset() | ||||||
|         fakes.stub_out_db_instance_api(self.stubs) |         db_fakes.stub_out_db_instance_api(self.stubs) | ||||||
|         fake.create_network('fake', FLAGS.flat_network_bridge) |         xenapi_fake.create_network('fake', FLAGS.flat_network_bridge) | ||||||
|  |         stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) | ||||||
|  |         self.conn = xenapi_conn.get_connection(False) | ||||||
|  |  | ||||||
|     def test_list_instances_0(self): |     def test_list_instances_0(self): | ||||||
|         stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) |         instances = self.conn.list_instances() | ||||||
|         conn = xenapi_conn.get_connection(False) |  | ||||||
|         instances = conn.list_instances() |  | ||||||
|         self.assertEquals(instances, []) |         self.assertEquals(instances, []) | ||||||
|  |  | ||||||
|     def test_spawn(self): |     def test_get_diagnostics(self): | ||||||
|         stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) |         instance = self._create_instance() | ||||||
|         values = {'name': 1, 'id': 1, |         self.conn.get_diagnostics(instance) | ||||||
|                   'project_id': self.project.id, |  | ||||||
|                   'user_id': self.user.id, |     def test_instance_snapshot(self): | ||||||
|                   'image_id': 1, |         stubs.stubout_instance_snapshot(self.stubs) | ||||||
|                   'kernel_id': 2, |         instance = self._create_instance() | ||||||
|                   'ramdisk_id': 3, |  | ||||||
|                   'instance_type': 'm1.large', |         name = "MySnapshot" | ||||||
|                   'mac_address': 'aa:bb:cc:dd:ee:ff', |         template_vm_ref = self.conn.snapshot(instance, name) | ||||||
|                   } |  | ||||||
|         conn = xenapi_conn.get_connection(False) |         def ensure_vm_was_torn_down(): | ||||||
|         instance = db.instance_create(values) |             vm_labels = [] | ||||||
|         conn.spawn(instance) |             for vm_ref in xenapi_fake.get_all('VM'): | ||||||
|  |                 vm_rec = xenapi_fake.get_record('VM', vm_ref) | ||||||
|  |                 if not vm_rec["is_control_domain"]: | ||||||
|  |                     vm_labels.append(vm_rec["name_label"]) | ||||||
|  |  | ||||||
|  |             self.assertEquals(vm_labels, [1]) | ||||||
|  |  | ||||||
|  |         def ensure_vbd_was_torn_down(): | ||||||
|  |             vbd_labels = [] | ||||||
|  |             for vbd_ref in xenapi_fake.get_all('VBD'): | ||||||
|  |                 vbd_rec = xenapi_fake.get_record('VBD', vbd_ref) | ||||||
|  |                 vbd_labels.append(vbd_rec["vm_name_label"]) | ||||||
|  |  | ||||||
|  |             self.assertEquals(vbd_labels, [1]) | ||||||
|  |  | ||||||
|  |         def ensure_vdi_was_torn_down(): | ||||||
|  |             for vdi_ref in xenapi_fake.get_all('VDI'): | ||||||
|  |                 vdi_rec = xenapi_fake.get_record('VDI', vdi_ref) | ||||||
|  |                 name_label = vdi_rec["name_label"] | ||||||
|  |                 self.assert_(not name_label.endswith('snapshot')) | ||||||
|  |  | ||||||
|         def check(): |         def check(): | ||||||
|             instances = conn.list_instances() |             ensure_vm_was_torn_down() | ||||||
|  |             ensure_vbd_was_torn_down() | ||||||
|  |             ensure_vdi_was_torn_down() | ||||||
|  |  | ||||||
|  |         check() | ||||||
|  |  | ||||||
|  |     def test_spawn(self): | ||||||
|  |         instance = self._create_instance() | ||||||
|  |  | ||||||
|  |         def check(): | ||||||
|  |             instances = self.conn.list_instances() | ||||||
|             self.assertEquals(instances, [1]) |             self.assertEquals(instances, [1]) | ||||||
|  |  | ||||||
|             # Get Nova record for VM |             # Get Nova record for VM | ||||||
|             vm_info = conn.get_info(1) |             vm_info = self.conn.get_info(1) | ||||||
|  |  | ||||||
|             # Get XenAPI record for VM |             # Get XenAPI record for VM | ||||||
|             vms = fake.get_all('VM') |             vms = xenapi_fake.get_all('VM') | ||||||
|             vm = fake.get_record('VM', vms[0]) |             vm = xenapi_fake.get_record('VM', vms[0]) | ||||||
|  |  | ||||||
|             # Check that m1.large above turned into the right thing. |             # Check that m1.large above turned into the right thing. | ||||||
|             instance_type = instance_types.INSTANCE_TYPES['m1.large'] |             instance_type = instance_types.INSTANCE_TYPES['m1.large'] | ||||||
| @@ -218,3 +246,19 @@ class XenAPIVMTestCase(test.TestCase): | |||||||
|         self.manager.delete_project(self.project) |         self.manager.delete_project(self.project) | ||||||
|         self.manager.delete_user(self.user) |         self.manager.delete_user(self.user) | ||||||
|         self.stubs.UnsetAll() |         self.stubs.UnsetAll() | ||||||
|  |  | ||||||
|  |     def _create_instance(self): | ||||||
|  |         """Creates and spawns a test instance""" | ||||||
|  |         values = { | ||||||
|  |             'name': 1, | ||||||
|  |             'id': 1, | ||||||
|  |             'project_id': self.project.id, | ||||||
|  |             'user_id': self.user.id, | ||||||
|  |             'image_id': 1, | ||||||
|  |             'kernel_id': 2, | ||||||
|  |             'ramdisk_id': 3, | ||||||
|  |             'instance_type': 'm1.large', | ||||||
|  |             'mac_address': 'aa:bb:cc:dd:ee:ff'} | ||||||
|  |         instance = db.instance_create(values) | ||||||
|  |         self.conn.spawn(instance) | ||||||
|  |         return instance | ||||||
|   | |||||||
| @@ -19,6 +19,54 @@ | |||||||
| from nova.virt import xenapi_conn | from nova.virt import xenapi_conn | ||||||
| from nova.virt.xenapi import fake | from nova.virt.xenapi import fake | ||||||
| from nova.virt.xenapi import volume_utils | from nova.virt.xenapi import volume_utils | ||||||
|  | from nova.virt.xenapi import vm_utils | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def stubout_instance_snapshot(stubs): | ||||||
|  |     @classmethod | ||||||
|  |     def fake_fetch_image(cls, session, instance_id, image, user, project, | ||||||
|  |                          type): | ||||||
|  |         # Stubout wait_for_task | ||||||
|  |         def fake_wait_for_task(self, id, task): | ||||||
|  |             class FakeEvent: | ||||||
|  |  | ||||||
|  |                 def send(self, value): | ||||||
|  |                     self.rv = value | ||||||
|  |  | ||||||
|  |                 def wait(self): | ||||||
|  |                     return self.rv | ||||||
|  |  | ||||||
|  |             done = FakeEvent() | ||||||
|  |             self._poll_task(id, task, done) | ||||||
|  |             rv = done.wait() | ||||||
|  |             return rv | ||||||
|  |  | ||||||
|  |         stubs.Set(xenapi_conn.XenAPISession, 'wait_for_task', | ||||||
|  |                   fake_wait_for_task) | ||||||
|  |  | ||||||
|  |         from nova.virt.xenapi.fake import create_vdi | ||||||
|  |         name_label = "instance-%s" % instance_id | ||||||
|  |         #TODO: create fake SR record | ||||||
|  |         sr_ref = "fakesr" | ||||||
|  |         vdi_ref = create_vdi(name_label=name_label, read_only=False, | ||||||
|  |                              sr_ref=sr_ref, sharable=False) | ||||||
|  |         vdi_rec = session.get_xenapi().VDI.get_record(vdi_ref) | ||||||
|  |         vdi_uuid = vdi_rec['uuid'] | ||||||
|  |         return vdi_uuid | ||||||
|  |  | ||||||
|  |     stubs.Set(vm_utils.VMHelper, 'fetch_image', fake_fetch_image) | ||||||
|  |  | ||||||
|  |     def fake_parse_xmlrpc_value(val): | ||||||
|  |         return val | ||||||
|  |  | ||||||
|  |     stubs.Set(xenapi_conn, '_parse_xmlrpc_value', fake_parse_xmlrpc_value) | ||||||
|  |  | ||||||
|  |     def fake_wait_for_vhd_coalesce(session, instance_id, sr_ref, vdi_ref, | ||||||
|  |                               original_parent_uuid): | ||||||
|  |         #TODO(sirp): Should we actually fake out the data here | ||||||
|  |         return "fakeparent" | ||||||
|  |  | ||||||
|  |     stubs.Set(vm_utils, 'wait_for_vhd_coalesce', fake_wait_for_vhd_coalesce) | ||||||
|  |  | ||||||
|  |  | ||||||
| def stubout_session(stubs, cls): | def stubout_session(stubs, cls): | ||||||
| @@ -63,6 +111,24 @@ class FakeSessionForVMTests(fake.SessionBase): | |||||||
|         vm['is_a_template'] = False |         vm['is_a_template'] = False | ||||||
|         vm['is_control_domain'] = False |         vm['is_control_domain'] = False | ||||||
|  |  | ||||||
|  |     def VM_snapshot(self, session_ref, vm_ref, label): | ||||||
|  |         status = "Running" | ||||||
|  |         template_vm_ref = fake.create_vm(label, status, is_a_template=True, | ||||||
|  |             is_control_domain=False) | ||||||
|  |  | ||||||
|  |         sr_ref = "fakesr" | ||||||
|  |         template_vdi_ref = fake.create_vdi(label, read_only=True, | ||||||
|  |             sr_ref=sr_ref, sharable=False) | ||||||
|  |  | ||||||
|  |         template_vbd_ref = fake.create_vbd(template_vm_ref, template_vdi_ref) | ||||||
|  |         return template_vm_ref | ||||||
|  |  | ||||||
|  |     def VDI_destroy(self, session_ref, vdi_ref): | ||||||
|  |         fake.destroy_vdi(vdi_ref) | ||||||
|  |  | ||||||
|  |     def VM_destroy(self, session_ref, vm_ref): | ||||||
|  |         fake.destroy_vm(vm_ref) | ||||||
|  |  | ||||||
|  |  | ||||||
| class FakeSessionForVolumeTests(fake.SessionBase): | class FakeSessionForVolumeTests(fake.SessionBase): | ||||||
|     """ Stubs out a XenAPISession for Volume tests """ |     """ Stubs out a XenAPISession for Volume tests """ | ||||||
|   | |||||||
							
								
								
									
										68
									
								
								run_tests.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										68
									
								
								run_tests.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,68 @@ | |||||||
|  | #!/usr/bin/env python | ||||||
|  | # vim: tabstop=4 shiftwidth=4 softtabstop=4 | ||||||
|  |  | ||||||
|  | # Copyright 2010 United States Government as represented by the | ||||||
|  | # Administrator of the National Aeronautics and Space Administration. | ||||||
|  | # All Rights Reserved. | ||||||
|  | # | ||||||
|  | #    Licensed under the Apache License, Version 2.0 (the "License"); | ||||||
|  | #    you may not use this file except in compliance with the License. | ||||||
|  | #    You may obtain a copy of the License at | ||||||
|  | # | ||||||
|  | #        http://www.apache.org/licenses/LICENSE-2.0 | ||||||
|  | # | ||||||
|  | #    Unless required by applicable law or agreed to in writing, software | ||||||
|  | #    distributed under the License is distributed on an "AS IS" BASIS, | ||||||
|  | #    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||||
|  | #    See the License for the specific language governing permissions and | ||||||
|  | #    limitations under the License. | ||||||
|  |  | ||||||
|  |  | ||||||
|  | import os | ||||||
|  | import unittest | ||||||
|  | import sys | ||||||
|  |  | ||||||
|  | from nose import config | ||||||
|  | from nose import result | ||||||
|  | from nose import core | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class NovaTestResult(result.TextTestResult): | ||||||
|  |     def __init__(self, *args, **kw): | ||||||
|  |         result.TextTestResult.__init__(self, *args, **kw) | ||||||
|  |         self._last_case = None | ||||||
|  |  | ||||||
|  |     def getDescription(self, test): | ||||||
|  |         return str(test) | ||||||
|  |  | ||||||
|  |     def startTest(self, test): | ||||||
|  |         unittest.TestResult.startTest(self, test) | ||||||
|  |         current_case = test.test.__class__.__name__ | ||||||
|  |  | ||||||
|  |         if self.showAll: | ||||||
|  |             if current_case != self._last_case: | ||||||
|  |                 self.stream.writeln(current_case) | ||||||
|  |                 self._last_case = current_case | ||||||
|  |  | ||||||
|  |             self.stream.write( | ||||||
|  |                 '    %s' % str(test.test._testMethodName).ljust(60)) | ||||||
|  |             self.stream.flush() | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class NovaTestRunner(core.TextTestRunner): | ||||||
|  |     def _makeResult(self): | ||||||
|  |         return NovaTestResult(self.stream, | ||||||
|  |                               self.descriptions, | ||||||
|  |                               self.verbosity, | ||||||
|  |                               self.config) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | if __name__ == '__main__': | ||||||
|  |     c = config.Config(stream=sys.stdout, | ||||||
|  |                       env=os.environ, | ||||||
|  |                       verbosity=3) | ||||||
|  |  | ||||||
|  |     runner = NovaTestRunner(stream=c.stream, | ||||||
|  |                             verbosity=c.verbosity, | ||||||
|  |                             config=c) | ||||||
|  |     sys.exit(not core.run(config=c, testRunner=runner)) | ||||||
		Reference in New Issue
	
	Block a user
	 Ryan Lane
					Ryan Lane